spatstat/0000755000176200001440000000000013166520620012116 5ustar liggesusersspatstat/inst/0000755000176200001440000000000013160706031013067 5ustar liggesusersspatstat/inst/CITATION0000755000176200001440000000500313115273007014227 0ustar liggesuserscitHeader("To cite spatstat in publications use:") citEntry(entry = "Book", title = "Spatial Point Patterns: Methodology and Applications with {R}", author = personList(as.person("Adrian Baddeley"), as.person("Ege Rubak"), as.person("Rolf Turner")), year = "2015", publisher = "Chapman and Hall/CRC Press", address = "London", url="http://www.crcpress.com/Spatial-Point-Patterns-Methodology-and-Applications-with-R/Baddeley-Rubak-Turner/9781482210200/", textVersion = paste("Adrian Baddeley, Ege Rubak, Rolf Turner (2015).", "Spatial Point Patterns: Methodology and Applications with R.", "London: Chapman and Hall/CRC Press, 2015.", "URL http://www.crcpress.com/Spatial-Point-Patterns-Methodology-and-Applications-with-R/Baddeley-Rubak-Turner/9781482210200/") ) citEntry(entry = "Article", title = "Hybrids of Gibbs Point Process Models and Their Implementation", author = personList(as.person("Adrian Baddeley"), as.person("Rolf Turner"), as.person("Jorge Mateu"), as.person("Andrew Bevan")), journal = "Journal of Statistical Software", year = "2013", volume = "55", number = "11", pages = "1--43", url = "http://www.jstatsoft.org/v55/i11/", textVersion = paste("Adrian Baddeley, Rolf Turner, Jorge Mateu, Andrew Bevan (2013).", "Hybrids of Gibbs Point Process Models and Their Implementation.", "Journal of Statistical Software, 55(11), 1-43.", "URL http://www.jstatsoft.org/v55/i11/."), header = "If you use hybrid models, please also cite:" ) citEntry(entry = "Article", title = "{spatstat}: An {R} Package for Analyzing Spatial Point Patterns", author = personList(as.person("Adrian Baddeley"), as.person("Rolf Turner")), journal = "Journal of Statistical Software", year = "2005", volume = "12", number = "6", pages = "1--42", url = "http://www.jstatsoft.org/v12/i06/", textVersion = paste("Adrian Baddeley, Rolf Turner (2005).", "spatstat: An R Package for Analyzing Spatial Point Patterns.", "Journal of Statistical Software 12(6), 1-42.", "URL http://www.jstatsoft.org/v12/i06/."), header = "In survey articles, please cite the original paper on spatstat:" ) spatstat/inst/doc/0000755000176200001440000000000013166361210013636 5ustar liggesusersspatstat/inst/doc/Nickname.txt0000644000176200001440000000002713166352416016133 0ustar liggesusers"Quantum Entanglement" spatstat/inst/doc/datasets.Rnw0000644000176200001440000006364313161127550016154 0ustar liggesusers\documentclass[11pt]{article} % \VignetteIndexEntry{Datasets Provided for the Spatstat Package} <>= options(SweaveHooks=list(fig=function() par(mar=c(1,1,1,1)))) @ \usepackage{graphicx} \usepackage{anysize} \marginsize{2cm}{2cm}{2cm}{2cm} \newcommand{\pkg}[1]{\texttt{#1}} \newcommand{\bold}[1]{{\textbf {#1}}} \newcommand{\R}{{\sf R}} \newcommand{\spst}{\pkg{spatstat}} \newcommand{\Spst}{\pkg{Spatstat}} \newcommand{\sdat}{\pkg{spatstat.data}} \newcommand{\Sdat}{\pkg{Spatstat.data}} \begin{document} \bibliographystyle{plain} \thispagestyle{empty} \SweaveOpts{eps=TRUE} \setkeys{Gin}{width=0.6\textwidth} <>= library(spatstat) sdate <- read.dcf(file = system.file("DESCRIPTION", package = "spatstat"), fields = "Date") sversion <- read.dcf(file = system.file("DESCRIPTION", package = "spatstat"), fields = "Version") spatstat.options(transparent=FALSE) options(useFancyQuotes=FALSE) @ \title{Datasets provided for \spst} \author{Adrian Baddeley, Rolf Turner and Ege Rubak} \date{For \spst\ version \texttt{\Sexpr{sversion}}} \maketitle This document is an overview of the spatial datasets that are provided for the \spst\ package. To flick through a nice display of all the data sets that come with \spst\ type \texttt{demo(data)}. To see information about a given data set, type \texttt{help({\em name})} where \emph{name} is the name of the data set. To plot a given data set, type \texttt{plot({\em name})}. Datasets in \spst\ are ``lazy-loaded'', which means that they can be accessed simply by typing their name. Not all packages do this; in some packages you have to type \texttt{data({\em name})} in order to access a data set. To list all the datasets in \spst, you need to type \texttt{data(package="spatstat.data")}. This is because, for efficiency, the datasets are actually installed in a sub-package \sdat. This is the only time you should ever need to mention \sdat\ explicitly. When the \spst\ package is loaded by the command \texttt{library(spatstat)}, the sub-package \sdat\ is automatically loaded. \section{List of datasets} \subsection{Point patterns in 2D} Here is a list of the standard point pattern data sets that are supplied with the current installation of \sdat: \newcommand{\recto}{\framebox{\hphantom{re}\vphantom{re}}} \newcommand{\irregpoly}{\includegraphics*[width=6mm]{irregpoly}} \newcommand{\convpoly}{\includegraphics*[width=4mm]{hexagon}} \newcommand{\disc}{$\bigcirc$} \newcommand{\nomarks}{$\cdot$} \newcommand{\nocov}{$\cdot$} \begin{tabular}{l|l|ccc} {\sf name} & {\sf description} & {\sf marks} & {\sf covariates} & {\sf window} \\ \hline {\tt amacrine} & rabbit amacrine cells & cell type & \nocov & \recto \\ {\tt anemones} & sea anemones & diameter & \nocov & \recto \\ {\tt ants} & ant nests& species & zones & \convpoly \\ {\tt bdspots} & breakdown spots & \nomarks & \nocov & \disc \\ {\tt bei} & rainforest trees & \nomarks & topography & \recto \\ {\tt betacells} & cat retinal ganglia & cell type, area & \nocov & \recto \\ {\tt bramblecanes} & bramble canes & age & \nocov & \recto \\ {\tt bronzefilter} & bronze particles & diameter & \nocov & \recto \\ {\tt cells} & biological cells & \nomarks &\nocov & \recto \\ {\tt chorley} & cancers & case/control &\nocov & \irregpoly \\ {\tt clmfires} & forest fires & cause, size, date & \shortstack[c]{elevation, orientation,\\ slope, land use} & \irregpoly \\ {\tt copper} & copper deposits & \nomarks & fault lines & \recto \\ {\tt demopat} & artificial data & type & \nocov & \irregpoly \\ {\tt finpines} & trees & diam, height & \nocov & \recto \\ {\tt gordon} & people in a park & \nomarks & \nocov & \irregpoly \\ {\tt gorillas} & gorilla nest sites & group, season & \shortstack[c]{terrain, vegetation,\\ heat, water} & \irregpoly \\ {\tt hamster} & hamster tumour cells & cell type &\nocov & \recto \\ {\tt humberside} & child leukaemia & case/control & \nocov & \irregpoly\\ {\tt hyytiala} & mixed forest & species &\nocov & \recto \\ {\tt japanesepines} & Japanese pines & \nomarks &\nocov & \recto \\ {\tt lansing} & mixed forest & species & \nocov & \recto \\ {\tt longleaf} & trees & diameter & \nocov & \recto \\ {\tt mucosa} & gastric mucosa cells & cell type & \nocov & \recto \\ {\tt murchison} & gold deposits & \nomarks & faults, rock type & \irregpoly \\ {\tt nbfires} & wildfires & several & \nocov & \irregpoly \\ {\tt nztrees} & trees & \nomarks & \nocov & \recto \\ {\tt paracou} & trees & adult/juvenile & \nocov & \recto \\ {\tt ponderosa} & trees & \nomarks & \nocov & \recto \\ {\tt redwood} & saplings & \nomarks & \nocov & \recto \\ {\tt redwood3} & saplings & \nomarks & \nocov & \recto \\ {\tt redwoodfull} & saplings & \nomarks & zones & \recto \\ {\tt shapley} & galaxies & magnitude, recession, SE & \nocov & \convpoly \\ {\tt simdat} & simulated pattern & \nomarks & \nocov & \recto \\ {\tt sporophores} & fungi & species & \nocov & \disc \\ {\tt spruces} & trees & diameter & \nocov & \recto \\ {\tt swedishpines} & trees & \nomarks & \nocov & \recto \\ {\tt urkiola} & mixed forest & species & \nocov & \irregpoly \\ {\tt vesicles} & synaptic vesicles & \nomarks & zones & \irregpoly \\ {\tt waka} & trees & diameter & \nocov & \recto \\ \hline \end{tabular} \bigskip \noindent The shape of the window containing the point pattern is indicated by the symbols \recto\ (rectangle), \disc\ (disc), \convpoly\ (convex polygon) and \irregpoly\ (irregular polygon). Additional information about the data set \texttt{\em name} may be stored in a separate list \texttt{{\em name}.extra}. Currently these are the available options: \begin{tabular}[!h]{ll} {\sc Name} & {\sc Contents} \\ \hline {\tt ants.extra} & field and scrub subregions; \\ & additional map elements; plotting function \\ {\tt bei.extra} & covariate images \\ {\tt chorley.extra} & incinerator location; plotting function \\ {\tt gorillas.extra} & covariate images\\ {\tt nbfires.extra} & inscribed rectangle; border type labels \\ {\tt ponderosa.extra} & data points of interest; plotting function\\ {\tt redwoodfull.extra} & subregions; plotting function \\ {\tt shapley.extra} & individual survey fields; plotting function \\ {\tt vesicles.extra} & anatomical regions \\ \hline \end{tabular} For demonstration and instruction purposes, raw data files are available for the datasets \texttt{vesicles}, \texttt{gorillas} and \texttt{osteo}. \subsection{Other Data Types} There are also the following spatial data sets which are not 2D point patterns: \begin{tabular}[c]{l|l|l} {\sf name} & {\sf description} & {\sf format} \\ \hline {\tt austates} & Australian states & tessellation \\ {\tt chicago} & crimes & point pattern on linear network \\ {\tt dendrite} & dendritic spines & point pattern on linear network \\ {\tt spiders} & spider webs & point pattern on linear network \\ {\tt flu} & virus proteins & replicated 2D point patterns \\ {\tt heather} & heather mosaic & binary image (three versions) \\ {\tt demohyper} & simulated data & replicated 2D point patterns with covariates\\ {\tt osteo} & osteocyte lacunae & replicated 3D point patterns with covariates\\ {\tt pyramidal} & pyramidal neurons & replicated 2D point patterns in 3 groups\\ {\tt residualspaper} & data \& code from Baddeley et al (2005) & 2D point patterns, \R\ function \\ {\tt simba} & simulated data & replicated 2D point patterns in 2 groups\\ {\tt waterstriders} & insects on water & replicated 2D point patterns\\ \hline \end{tabular} Additionally there is a dataset \texttt{Kovesi} containing several colour maps with perceptually uniform contrast. \section{Information on each dataset} Here we give basic information about each dataset. For further information, consult the help file for the particular dataset. <>= opa <- par() ## How to set all margins to zero and eliminate all outer spaces zeromargins <- function() { par( mar=rep(0,4), omd=c(0,1,0,1), xaxs="i", yaxs="i" ) invisible(NULL) } ## Set 'mar' setmargins <- function(...) { x <- c(...) x <- rep(x, 4)[1:4] par(mar=x) invisible(NULL) } @ \subsubsection*{\texttt{amacrine}: Amacrine cells} Locations of displaced amacrine cells in the retina of a rabbit. There are two types of points, ``on'' and ``off''. \SweaveOpts{width=5.5,height=3}\setkeys{Gin}{width=0.8\textwidth} <>= plot(amacrine) @ <>= setmargins(0,1,2,0) plot(amacrine) @ \subsubsection*{\texttt{anemones}: Sea Anemones} These data give the spatial locations and diameters of sea anemones on a boulder near sea level. \SweaveOpts{width=7,height=4.5}\setkeys{Gin}{width=0.8\textwidth} <>= plot(anemones, markscale=1) @ <>= setmargins(0,0,2,0) plot(anemones, markscale=1) @ \subsubsection*{\texttt{ants}: Ants' nests} Spatial locations of nests of two species of ants at a site in Greece. The full dataset (supplied here) has an irregular polygonal boundary, while most analyses have been confined to two rectangular subsets of the pattern (also supplied here). % Parameters for Ants data with key at right \SweaveOpts{width=6.3,height=4}\setkeys{Gin}{width=0.7\textwidth} <>= ants.extra$plotit() @ %$ <>= setmargins(0,0,1,0) ants.extra$plotit() @ %$ \subsubsection*{\texttt{austates}: Australian states} The states and large mainland territories of Australia are represented as polygonal regions forming a tessellation. <>= plot(austates) @ \subsubsection*{\texttt{bdspots}: Breakdown spots} A list of three point patterns, each giving the locations of electrical breakdown spots on a circular electrode in a microelectronic capacitor. \SweaveOpts{width=12,height=6}\setkeys{Gin}{width=\textwidth} <>= plot(bdspots, equal.scales=TRUE, pch="+", panel.args=function(i)list(cex=c(0.15, 0.2, 0.7)[i])) @ <>= zeromargins() plot(bdspots, equal.scales=TRUE, pch="+", main="", mar.panel=0, hsep=1, panel.args=function(i)list(cex=c(0.15, 0.2, 0.7)[i])) @ \subsubsection*{\texttt{bei}: Beilschmiedia data} Locations of 3605 trees in a tropical rain forest. Accompanied by covariate data giving the elevation (altitude) and slope of elevation in the study region. \SweaveOpts{width=12,height=6}\setkeys{Gin}{width=0.8\textwidth} <>= plot(bei.extra$elev, main="Beilschmiedia") plot(bei, add=TRUE, pch=16, cex=0.3) @ <>= setmargins(0,0,2,0) plot(bei.extra$elev, main="Beilschmiedia") plot(bei, add=TRUE, pch=16, cex=0.3) @ <>= M <- persp(bei.extra$elev, theta=-45, phi=18, expand=7, border=NA, apron=TRUE, shade=0.3, box=FALSE, visible=TRUE, main="") perspPoints(bei, Z=bei.extra$elev, M=M, pch=16, cex=0.3) @ \subsubsection*{\texttt{betacells}: Beta ganglion cells} Locations of beta ganglion cells in cat retina, each cell classified as `on' or `off' and also labelled with the cell profile area. <>= plot(betacells) @ \subsubsection*{\texttt{bramblecanes}: Bramble canes} <>= plot(bramblecanes, cols=1:3) @ <>= plot(split(bramblecanes)) @ \subsubsection*{\texttt{bronzefilter}: Bronze filter section profiles} Spatially inhomogeneous pattern of circular section profiles of particles, observed in a longitudinal plane section through a gradient sinter filter made from bronze powder. <>= plot(bronzefilter,markscale=2) @ \subsubsection*{\texttt{cells}: Biological cells} Locations of the centres of 42 biological cells observed under optical microscopy in a histological section. Often used as a demonstration example. <>= plot(cells) @ \subsubsection*{\texttt{chicago}: Chicago crimes} Locations (street addresses) of crimes reported in a two-week period in an area close to the University of Chicago. A multitype point pattern on a linear network. <>= plot(chicago, main="Chicago Crimes", col="grey", cols=c("red", "blue", "black", "blue", "red", "blue", "blue"), chars=c(16,2,22,17,24,15,6), leg.side="left", show.window=FALSE) @ \subsubsection*{\texttt{chorley}: Chorley-Ribble cancer data} Spatial locations of cases of cancer of the larynx and cancer of the lung, and the location of a disused industrial incinerator. A marked point pattern, with an irregular window and a simple covariate. <>= chorley.extra$plotit() @ %$ \subsubsection*{\texttt{clmfires}: Castilla-La Mancha Fires} Forest fires in the Castilla-La Mancha region of Spain between 1998 and 2007. A point pattern with 4 columns of marks: \begin{tabular}{ll} \texttt{cause} & cause of fire\\ \texttt{burnt.area} & total area burned, in hectares \\ \texttt{date} & date of fire \\ \texttt{julian.date} & date of fire in days since 1.1.1998 \end{tabular} <>= plot(clmfires, which.marks="cause", cols=2:5, cex=0.25, main="Castilla-La Mancha forest fires") @ The accompanying dataset \texttt{clmfires.extra} is a list of two items \texttt{clmcov100} and \texttt{clmcov200} containing covariate information for the entire Castilla-La Mancha region. Each of these two elements is a list of four pixel images named \texttt{elevation}, \texttt{orientation}, \texttt{slope} and \texttt{landuse}. <>= plot(clmfires.extra$clmcov200, main="Covariates for forest fires") @ %$ \subsubsection*{\texttt{copper}: Queensland copper data} These data come from an intensive geological survey in central Queensland, Australia. They consist of 67 points representing copper ore deposits, and 146 line segments representing geological `lineaments', mostly faults. <>= plot(copper$Points, main="Copper") plot(copper$Lines, add=TRUE) @ \subsubsection*{\texttt{demohyper}} A synthetic example of a \texttt{hyperframe} for demonstration purposes. <>= plot(demohyper, quote({ plot(Image, main=""); plot(Points, add=TRUE) }), parargs=list(mar=rep(1,4))) @ \subsubsection*{\texttt{demopat}} A synthetic example of a point pattern for demonstration purposes. <>= plot(demopat) @ \subsubsection*{\texttt{dendrite}} Dendrites are branching filaments which extend from the main body of a neuron (nerve cell) to propagate electrochemical signals. Spines are small protrusions on the dendrites. This dataset gives the locations of 566 spines observed on one branch of the dendritic tree of a rat neuron. The spines are classified according to their shape into three types: mushroom, stubby or thin. <>= plot(dendrite, leg.side="bottom", main="", cex=0.75, cols=2:4) @ \subsubsection*{\texttt{finpines}: Finnish pine saplings} Locations of 126 pine saplings in a Finnish forest, their heights and their diameters. <>= plot(finpines, main="Finnish pines") @ \subsubsection*{\texttt{flu}: Influenza virus proteins} The \texttt{flu} dataset contains replicated spatial point patterns giving the locations of two different virus proteins on the membranes of cells infected with influenza virus. It is a \texttt{hyperframe} containing point patterns and explanatory variables. <>= wildM1 <- with(flu, virustype == "wt" & stain == "M2-M1") plot(flu[wildM1, 1, drop=TRUE], main=c("flu data", "wild type virus, M2-M1 stain"), chars=c(16,3), cex=0.4, cols=2:3) @ \subsubsection*{\texttt{gordon}: People in Gordon Square} Locations of people sitting on a grass patch on a sunny afternoon. <>= plot(gordon, main="People in Gordon Square", pch=16) @ \subsubsection*{\texttt{gorillas}: Gorilla nesting sites} Locations of nesting sites of gorillas, and associated covariates, in a National Park in Cameroon. \texttt{gorillas} is a marked point pattern (object of class \texttt{"ppp"}) representing nest site locations. \texttt{gorillas.extra} is a named list of 7 pixel images (objects of class \texttt{"im"}) containing spatial covariates. It also belongs to the class \texttt{"listof"}. <>= plot(gorillas, which.marks=1, chars=c(1,3), cols=2:3, main="Gorilla nest sites") @ The \texttt{vegetation} covariate is also available as a raw ASCII format file, <>= system.file("rawdata/gorillas/vegetation.asc", package="spatstat") @ \subsubsection*{\texttt{hamster}: Hamster kidney cells} Cell nuclei in hamster kidney, each nucleus classified as either `dividing' or `pyknotic'. A multitype point pattern. <>= plot(hamster, cols=c(2,4)) @ \subsubsection*{\texttt{heather}: Heather mosaic} The spatial mosaic of vegetation of the heather plant, recorded in a 10 by 20 metre sampling plot in Sweden. A list with three entries, representing the same data at different spatial resolutions. <>= plot(heather) @ \subsubsection*{\texttt{humberside}: Childhood Leukemia and Lymphoma} Spatial locations of cases of childhood leukaemia and lymphoma, and randomly-selected controls, in North Humberside. A marked point pattern. <>= plot(humberside) @ The dataset \texttt{humberside.convex} is an object of the same format, representing the same point pattern data, but contained in a larger, 5-sided convex polygon. \subsubsection*{\texttt{hyytiala}: Mixed forest} Spatial locations and species classification for trees in a Finnish forest. <>= plot(hyytiala, cols=2:5) @ \subsubsection*{\texttt{japanesepines}: Japanese black pine saplings} Locations of Japanese black pine saplings in a square sampling region in a natural forest. Often used as a standard example. <>= plot(japanesepines) @ \subsubsection*{\texttt{lansing}: Lansing Woods} Locations and botanical classification of trees in a forest. A multitype point pattern with 6 different types of points. Includes duplicated points. <>= plot(lansing) @ <>= plot(split(lansing)) @ \subsubsection*{\texttt{longleaf}: Longleaf Pines} Locations and diameters of Longleaf pine trees. <>= plot(longleaf) @ \subsubsection*{\texttt{mucosa}: Gastric Mucosa Cells} A bivariate inhomogeneous point pattern, giving the locations of the centres of two types of cells in a cross-section of the gastric mucosa of a rat. <>= plot(mucosa, chars=c(1,3), cols=c("red", "green")) plot(mucosa.subwin, add=TRUE, lty=3) @ \subsubsection*{\texttt{murchison}: Murchison Gold Deposits} Spatial locations of gold deposits and associated geological features in the Murchison area of Western Australia. A list of three elements: \begin{itemize} \item \texttt{gold}, the point pattern of gold deposits; \item \texttt{faults}, the line segment pattern of geological faults; \item \texttt{greenstone}, the subregion of greenstone outcrop. \end{itemize} <>= plot(murchison$greenstone, main="Murchison data", col="lightgreen") plot(murchison$gold, add=TRUE, pch=3, col="blue") plot(murchison$faults, add=TRUE, col="red") @ \subsubsection*{\texttt{nbfires}: New Brunswick Fires} Fires in New Brunswick (Canada) with marks giving information about each fire. <>= plot(nbfires, use.marks=FALSE, pch=".") @ <>= plot(split(nbfires), use.marks=FALSE, chars=".") @ <>= par(mar=c(0,0,2,0)) plot(split(nbfires)$"2000", which.marks="fire.type", main=c("New Brunswick fires 2000", "by fire type"), cols=c("blue", "green", "red", "cyan"), leg.side="left") @ \subsubsection*{\texttt{nztrees}: New Zealand Trees} Locations of trees in a forest plot in New Zealand. Often used as a demonstration example. <>= plot(nztrees) plot(trim.rectangle(as.owin(nztrees), c(0,5), 0), add=TRUE, lty=3) @ \subsubsection*{\texttt{osteo}: Osteocyte Lacunae} Replicated three-dimensional point patterns: the three-dimensional locations of osteocyte lacunae observed in rectangular volumes of solid bone using a confocal microscope. A \texttt{hyperframe} containing 3D point patterns and explanatory variables. <>= plot(osteo[1:10,], main.panel="", pch=21, bg='white') @ For demonstration and instruction purposes, the raw data from the 36th point pattern are available in a plain ascii file in the \texttt{spatstat} installation, <>= system.file("rawdata/osteo/osteo36.txt", package="spatstat") @ \subsubsection*{\texttt{paracou}: Kimboto trees} Point pattern of adult and juvenile Kimboto trees recorded at Paracou in French Guiana. A bivariate point pattern. <>= plot(paracou, cols=2:3, chars=c(16,3)) @ \subsubsection*{\texttt{ponderosa}: Ponderosa Pines} Locations of Ponderosa Pine trees in a forest. Several special points are identified. <>= ponderosa.extra$plotit() @ %$ \subsubsection*{\texttt{pyramidal}: Pyramidal Neurons in Brain} Locations of pyramidal neurons in sections of human brain. There is one point pattern from each of 31 human subjects. The subjects are divided into three groups: controls (12 subjects), schizoaffective (9 subjects) and schizophrenic (10 subjects). <>= pyr <- pyramidal pyr$grp <- abbreviate(pyramidal$group, minlength=7) plot(pyr, quote(plot(Neurons, pch=16, main=grp)), main="Pyramidal Neurons") @ \subsubsection*{\texttt{redwood}, \texttt{redwood3}, \texttt{redwoodfull}: Redwood seedlings and saplings} California Redwood seedlings and saplings in a forest. There are two versions of this dataset: \texttt{redwood} and \texttt{redwoodfull}. The \texttt{redwoodfull} dataset is the full data. It is spatially inhomogeneous in density and spacing of points. The \texttt{redwood} dataset is a subset of the full data, selected because it is apparently homogeneous, and has often been used as a demonstration example. This comes in two versions commonly used in the literature: \texttt{redwood} (coordinates given to 2 decimal places) and \texttt{redwood3} (coordinates given to 3 decimal places). <>= plot(redwood) plot(redwood3, add=TRUE, pch=20) @ <>= redwoodfull.extra$plotit() @ %$ \subsubsection*{\texttt{residualspaper}: Data from residuals paper} Contains the point patterns used as examples in \begin{quote} A. Baddeley, R. Turner, J. M{\o}ller and M. Hazelton (2005) Residual analysis for spatial point processes. \emph{Journal of the Royal Statistical Society, Series B} \textbf{67}, 617--666 \end{quote} along with {\sf R} code. <>= plot(as.listof(residualspaper[c("Fig1", "Fig4a", "Fig4b", "Fig4c")]), main="") @ \subsubsection*{\texttt{shapley}: Shapley Galaxy Concentration} Sky positions of 4215 galaxies in the Shapley Supercluster (mapped by radioastronomy). <>= shapley.extra$plotit(main="Shapley") @ %$ \subsubsection*{\texttt{simdat}: Simulated data} Another simulated dataset used for demonstration purposes. <>= plot(simdat) @ \subsubsection*{\texttt{spiders}: Spider webs} Spider webs across the mortar lines of a brick wall. A point pattern on a linear network. <>= plot(spiders, pch=16, show.window=FALSE) @ \subsubsection*{\texttt{sporophores}: Sporophores} Sporophores of three species of fungi around a tree. <>= plot(sporophores, chars=c(16,1,2), cex=0.6) points(0,0,pch=16, cex=2) text(15,8,"Tree", cex=0.75) @ \subsubsection*{\texttt{spruces}: Spruces in Saxony} Locations of Norwegian spruce trees in a natural forest stand in Saxonia, Germany. Each tree is marked with its diameter at breast height. <>= plot(spruces, maxsize=min(nndist(spruces))) @ \subsubsection*{\texttt{swedishpines}: Swedish Pines} Locations of pine saplings in a Swedish forest. Often used as a demonstration example. <>= plot(swedishpines) @ \subsubsection*{\texttt{urkiola}: trees in a wood} Locations of birch and oak trees in a secondary wood in Urkiola Natural Park (Basque country, northern Spain). Irregular window, bivariate point pattern. <>= plot(urkiola, cex=0.5, cols=2:3) @ \subsubsection*{\texttt{waka}: trees in Waka National Park} Spatial coordinates of each tree, marked by the tree diameter at breast height. <>= par(mar=c(0,0,2,0)) plot(waka, markscale=0.04, main=c("Waka national park", "tree diameters")) @ \subsubsection*{\texttt{vesicles}: synaptic vesicles} Point pattern of synaptic vesicles observed in rat brain tissue. <>= v <- rotate(vesicles, pi/2) ve <- lapply(vesicles.extra, rotate, pi/2) plot(v, main="Vesicles") plot(ve$activezone, add=TRUE, lwd=3) @ The auxiliary dataset \texttt{vesicles.extra} is a list with entries\\ \begin{tabular}{ll} \texttt{presynapse} & outer polygonal boundary of presynapse \\ \texttt{mitochondria} & polygonal boundary of mitochondria \\ \texttt{mask} & binary mask representation of vesicles window \\ \texttt{activezone} & line segment pattern representing the active zone. \end{tabular} For demonstration and training purposes, the raw data files for this dataset are also provided in the \pkg{spatstat} package installation:\\ \begin{tabular}{ll} \texttt{vesicles.txt} & spatial locations of vesicles \\ \texttt{presynapse.txt} & vertices of \texttt{presynapse} \\ \texttt{mitochondria.txt} & vertices of \texttt{mitochondria} \\ \texttt{vesiclesimage.tif} & greyscale microscope image \\ \texttt{vesiclesmask.tif} & binary image of \texttt{mask} \\ \texttt{activezone.txt} & coordinates of \texttt{activezone} \end{tabular} The files are in the folder \texttt{rawdata/vesicles} in the \texttt{spatstat} installation directory. The precise location of the files can be obtained using \texttt{system.file}, for example <>= system.file("rawdata/vesicles/mitochondria.txt", package="spatstat") @ \subsubsection*{\texttt{waterstriders}: Insects on a pond} Three independent replications of a point pattern formed by insects on the surface of a pond. <>= plot(waterstriders) @ \end{document} spatstat/inst/doc/getstart.pdf0000644000176200001440000035633513166361221016207 0ustar liggesusers%PDF-1.5 % 1 0 obj << /Type /ObjStm /Length 3393 /Filter /FlateDecode /N 80 /First 658 >> stream x[mS~VdI]&p-_Hv"q0ΝX# a l ek#n@!vP#?@PCHXA:b!4iIP P5(؎NHإ!lHwA 0 lF#JqCh`؍Ń&1!~eckf|eZ B1Gu6ړow[Mid~PPK rdʾ?q:\>vtry?p!ۻ̊p,)/`r?%3+L 8xNhwv}_mٸA]+enN0QѰH1}Y8MCP"2;IAn]|ɌIV&?B$_T-is61hB zÊ}V^Vݒ2;!xfwe>f,ogn>[74Юv;sB(='VkqӞ mOb{T(:V}$YJhKR1cc6$9@:fH=Kk~s>S>Il_ F`ntR ۩AG-6͚"9Í(@>g3@+'Ev&|{?[<88NO`ۜ$ Z{jnI,Ϋ/yrs=-]_+1`9ԹМ__3~&8w.KB4x礦JBUIT49SW%Tt>xk]qbm8jqoz%zH^ T_}0K;lʤk窬HRpثA&E#b7˫4&(6͝moˏ)t'^9_`+V0 &3vºեW5Dj-U;,TT:W¨`Q_BGLk,ǠM20y:}}>]K[Rp3p7'5ҮA#NB)Yф1jjqSczS]s6KĪKJ$2F#}&-LMÞLco[C>o;~hN`6G~'P:/2gu1Mg(*fCh2,ήggQz +_bBS!Ysو(qPLig-7oMOږ%;e_A|?pGs;M~ (pBb\vB >윟lp.o76oPwEQo^oٵ؈vQFԢ&> 1}]NFt:'#Euy1z95 Uz,%ΖEWʨ.~<[j?}'',m3k&4C̭{2VJǍwI &+Akcob8l4mt=+>J# Fu/GN)l N(@S Ҧ =URB)L7Vlb ku:` `>U *E=~mHiQ$h!D][</!lS7(,!s=*wi &< Ԇ4iʸ8q͚J[D:ߪM3Wnt&վH=E!`4Vkrwj+~A.. u!KwY傞G\]nv]7AaSW Pg}xCpVA\)i3=(j-'<ӹnGE~7]5'@ӢuUQ_W&MުfQW &SDz!j7eiW ]bD!D.nwC} 5 szw i1\VKc2/1%V?}9Q_TI êM}Yczڈ^Q սPAQ+bz132O}rsHbF?x3ᇓ׬ȇi_-C^97a='GnVrr& fW@[ĞjYa)a;6婙mޔҦH$rI+ʲ4DMw*˶UeDO*\"Bvi pfʴ/i[-y'{:.Iš=V]s繉9ra+Rz"Kh1Rd)z'V4;)˄nJ6E%k i9"XHkw5/Ё.Yyz)#R?8 8{ W@Um%s+ݽ{WS|ꝫGy\kz&6sQ6=,k>),jA++lZMcu3%RL&!^8Isֆ? s?l>cIJc"mS:h3O JVP)jSR])qQYjv!ld?LV  f1;o )5U<5zJ¿×V;bc%XTXTŃJ^SYгM.Fbwed6ѧ2\|"mٷQH)! |O"v[7{%[cz[~)ɷۋh>*@b OrVU^ڒތ_* Ϩn"[sp.~ˆS Y-z>H1CbNZs>Ә 50:AQCzL^r } U#Ёt7NW(k`sX,~ א].vSnWWF+1ۊy,s!{nvcmԄ/E ߜ^)v$cjG{{9Of??_C5c[ЅRbZendstream endobj 82 0 obj << /Subtype /XML /Type /Metadata /Length 1329 >> stream 2017-10-08T16:32:17+08:00 2017-10-08T16:32:17+08:00 TeX Untitled endstream endobj 83 0 obj << /Filter /FlateDecode /Length 4141 >> stream xZKǑ|_ѷ|?և ?`ic!P6es{({~UY]- ̌x~`wk>>⫼;]t??ϻ_?`B<8,;[ʐsޥww07C Z]6a/h>/41V,9H֒}[cR@2.۸?ʆR"L ߭|G?1<<Bw.#lކ/zx^ijNڡ8w8!.WOlRctdd/]@gkOpg=~՘/,\/e^zbbxz_` TWj3i-MOXz玾XcQ- |3~Vja\$ancCt*jcSH n. !'}qXK//[4`!Tmw&I;뢝 G/jt^&oY]y"ýE\x߼ 'bxiuL_hمAy5zGyZ"|Ix` P606`dž"_㰍tҤcJn {|yn6 `x'鳖0Ǯ~|$Czƌp㌇aS ) yǚH.V%IEsW2l>`˵Ըu|Tgx1~;pw,w\|OM 6tKfE$~eÔ |vHHS&Qnsd(A9,='W: ®90f-v9"f%d-xNoO-6M_AEb'p!F.C)"Nc_BuJp=FI m3VH푉6C xbCm Ho\$}vQ{p `P`=L3irj@B.ή2 i bBNh< r7wR )刘rl4 .;|(BF}LQ?elvsoF31̩ tn) 86l&CbE߫>a;?{$/8ehfwA8 mspkOZ#\hU%'ϗ9=l#Qơp1fVKH2n *NN*60g_d7 :POͳVrx[Ё ZbT0gF"&ɟ2]M]rK{L3Jv4܀ï )墷n01 ÚŌ QIƅUB 1Pt*zh O]gʘӣwk(tB݁*~,!%zNO#sIt?.ZrUB->FOk3޼U[TwvSGWP X_Aw&ʮ끔'[A?hrCyآeBBLW 퐜K>\(#%*'al7ZO̷uv>CAN )J f}OiRQ5Фo3Weks6O+NB7#,@<c (F(vJq oswKV;P #ahܡEb`#*:x<5wzi}ێ@ԋj3O3,$̏_egeu=V1M55>8/% 8=Dj3\>5Z5$Igof|c.գiwb M=-T2cU›C :d.ItQ8fGL@cB%\> mM?ﮎ?NU 7 .Zr9#:iMM@^3Q3%(;YYu*k(Fye~f o8f@[Ш-pvk3+"~BWfw12.z6_V&@4w3&*~jpݾ(M7?M`.)iANUb+ÂKZت;kQ<]ߨцhTx뾀݂]ֶB\\h 2U(ݏJLCL SGox[VWgf*B_:G$9OZ}ORU6ۘ%.v=3qFw9$7rB Us`vW[A^717EA,=y M0o5ԅ߲ !Bz |ý;2U%jſLW:ƅR%Xˍ~.'q7kҍɚDžH߶nqo8{ gRAS/Fn}yPYR{f}Ȳb~%3$)3<$]wqRBQ# :IP)K!7?\k |>ގnܫ!TyN&dal,,kmo@"ǘΫPSg:͡n;tWD|Cl.)Okfhȷp"ȢV?'[]T #Wp_^ALxM{-m_(J@ M:'3&J;2x~fH|h|{k;d .]_)WV!̖YI7m]֭ߵԻ+V+rmXMhѣXq-Ѳ_󊖥Lo:=\ uSUWʩo| ɞތooG`Uު #endstream endobj 84 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1615 >> stream xTmL[ۄu[zjڐ& ISR YԂm\7Ɓe}}lll d#,*s!AQS 5U,딭Q}/{KYk:J>9! i A,pkYIRX"<%\{`*I M:b~yF%? $$Ymi5f]֪ToPlޤxa˖+zYVV+ VF*EAXZո=+noV- 抝6):VQh5rEڪ8kkmVYQ`(ט H6[YrMVG"$oD*OX%$;ėdS = V+9[: RZΤHp`x".^&. 4'l<ޓ.qZݮʱ'[Ԝ>~Ԑp;" TJˑnv0n:vz(۫1Ė rI$h)G +PCƭn1GQ)sQDTVb“W 3\Gd 42T{ꄫq};ӥl)u,ҿu -9=5z5JG@'G!ulP/'AG/N$e OĒE(:%Av o-8?笯 )*SZ:?Owv^V t=.~bԀ"s=Qr:jQ(.Ge?~Ə N|< \kt1}ź '`~A{{Ag&LbRqY/zsN/2̑tlʸIЫs'Ɓ9׏#g.p<-.:["=p>^֚ ~"\)f_K*ͬ^ *hSX3VzJ Gܻ%HRM}LiU ԌyD_^ m.m,!_=CaQ } `sq^z.&?xDSvd juXmځD_l̄?!K0!+Ȩ BQ.ɕ ÐH4"gendstream endobj 85 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1204 >> stream xELgZlOu٢lé(Y!)0s Ttq*ZB[ROkR_V:bt[9mn[4lf8θ]S3z'y|?8'pWfd%D󅹸0O&&OTqCF>mq)o`,PTщ T":eTzM5t+֗8(71=-8Kڒ%v=IWjMbآU vWLo[M^˘9#]%Ŏ RY:٭gMX9;6cX<`/a/c%XfeNllJ6)x]dy£o?ȅtR\۰Sy0{̹ [+ACM'^{`0FXf6WSlŕD3fD1@FH ;a8\uvIQcdFzh v)rtiB&5 c)z) N0W:jd.-PFTO|~{|F+$I "-Fs7PGw X|]%iu]POCs |}8k,[y;%ZMֹ\PjTD D264QZͻZ$Vt^lu5ox=^<D^Slr=Goߝ<\֌ߏqꈈuշ{NJ+V@qu s&c$x?"-m+؋&v(L%ثvv:E8}ڏA|--M@eqVGqӑ(O_2Z{~hQ"iqF5W"U}endstream endobj 86 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 2693 >> stream xVyTSW1T8hmQnZ]lUAM(HB PE`R[=-ZXӪ]}s8ۙ䝼sw~]0D5f7IxF$&<+,K1$CF'1qJ,)Rr"*&1>AЙϛ:kƌK2Mn"tT K%9trwL }aaVZ0}zzz4ZjѸ{Xmd3mY`͠z,iO;tbS7q$X0%BeXx X-E0d/3?I@}V1ٰ$&)|pVe1Eҿ\9{BHgϵ[@ML! ׇnw_c.87Dp !H_ݻYu\17Osx)EV(FK~hbX̿fwȇz-Z$0%jCM}baJbfpx4~)HFpa Zѣ:V.v8|hu+t]Cls48H5m2 `}pzѨM)W$CyʒT*<%ChJм Od<5J R$C?$4iCogw޹jBA!kڗ*Fͮv*T;_RL X_zh=M?P:n.kc^B#>7gQZdV=syoK gHzJ}B&oc%O O\KcHwL r3RssuE{WlȆEyh3ůTP2;5fC*grb.4A>0`xcbqpbVUo? 5kZrJfUъryQx%4FiVΒ[ )L#3~)'Nq/4?ud@|SR+$k L>zYnq:X0g|" Sl ! 2!JHVG69dChzj,o5M$C!gT&࿇Vc9C8&.Y{.яѮ}O!Kq玎_R{kkjj!9nG61 9ΓNp{s|70s_gSRr28Uŭa“vnvEmVn.I}D,.7{eAƼoخ&(`-m;,aAϧ'NQкOn(x'dDBm ZB!K\ &sŨ.سel}+jVP 9vcE2 c 9o}:q47.tg#| 7ޫ*s9C~8]%^d:C"K1tnPY A}tgUY`>]%3Ƶ ÉSyu{M-2uÆ*rXQP.,4 ISl_Q-ӉЂ l}WNMSk3js8RI}?~_Ѓ t!z?޿/r5DJ,b `7-Ye}t"øx3 OA!^Pg-.[}AI|%58*I J*1:,X.ۯwt䠆͠2Ǥ 8<G(&9 \~/lЏidinBR.ff`NjK3_2`FI"\b^HUdUԦb[w f7R͛ `b6SDsin! )Kn ѾGr#湂J.W;> stream xywxä́bb ddJBLsƽ w[.%mIʖ L!/!9dp-yg {Z{L޽(uM54f$pŚS'~Ϝ_>U@[R%G>̨@j52Lԇ>R072jW;τu-7f U ;/lY1:fЌA%^ۚ5mχ'R`1C }3l0OHΊGVKTVzv#>: #le3a>:@vU&Ȏlr"# @.K?[DlwQ2tjЮU7JOUjT*b@x JQ o3rt*pH٪0\A[CLZ>E)v9u_":zg,.ܨ^uT]{R h-/lp̿6m}bK7bs XLjn/-:x53iؘIE&?E!{mڇ*K=S+؂ݗnVj% E  "&9R-!W[_i/bp_XP*O@IRIZx.N $eVu4 hOobҤf䠑MljM -!NdM:ItDw3fI/iQ"Q0ߋEJNơg9 ďg%W.·̏S^›jTH?B|Bd8,$SdԉQ%@?oryf\jM=fopX7D$Ůh6֣9"1b!#V x%GY&bom{<ǔrS21 Cq܎E ۏLKSlg؉oգIZ޲5Xg *7D }c/x2dbѝN٫jWWBUԿ\,:-;`PE,Mx{Ec5G_6g{iK8 TPJ]/z!XV=S/!84m@04 /bu<\]C,XXn3,p1|Ewta{B#wX?GʟSMEϪc].|6 3#'gcCtZkk e|"$!1>v,^Ѩ{~:s+aQ^4!کCu  `ew۔>Pqpw7 % OjK!3:avJ3~XyMEh,6TiRiM] F)rh2iڇeAʰ8bʢźZ'Nts1xq""aѡ0(1<,޵Jix*$ȫ#잀{Of m. ،Z~vy.K r!rC[Ym(Xlr]0\ OtT+ _!֋УGWX![|DWoKB’?8ûjPj#2P$NYPmJ0<_]~U#*cd9~p$TKsEWR`ʳ#+AM!PKܘ*5|dȥH=V>ּ4u2߱Um˿#>,zLMYg[ӎB[`-[WYY]\Z\gW!j)bZv2Z/voGg8KpIBsٱ%v9zK'X'D23cЬArOz4kz($@T||b-еBC.Ct*\ q~nfFP)mSFatMry58vϾ}+* -mX)A[ձ](Sdo,[bo n Y+QT]E *esZ|nܠmٕjjUQѥ$<(mMݹok)͋"KH4=j- M==DCk#L"MV]V[FB|U6Ft8uǷz$XHy8EEIU"J@StDf J&pn_wopϊF4PLf)bR B1Dd/%z/酞nDIJBr% m+/U;6;>zea$1[JErMV.l@ї$|/4"96[+| $K^OB!S^&shuh-ꗝr 2?yu0"~h+c<1x;#nz&V%T4WZleh2Ss󚰧5kh| Aʤyւdt`I'fK_J,S0eI#EDPk;'HAɖ1螰Y5v gbZAݭ'U4<>ı/TicŢ_!/Z4Bg q22d_P5h;DsFW(c:T}[#,Re\RB8Y(ӒUT?EVJL>dt"RTx7T{u;]+C&\ 67Jc8a7jZ =yF2 }=HPNNPi;uOhߗp HŒʎ9&D=:t',r\WOLBdd<-GD49p; \4Ѩ8 LFE3gjMPKO~%1O_~y@SI\\JD,3aU5u[:|_yHlоJt`WhCԝ $o[,h"wB4F3 R6_ܼ=Z|R`g‹*UmN[^ ̏064P,N p%@bXHҦy 0O\\.nېVlXF)MIBW]Q^W(sq5`&/_][ ^O\[[iL2fA|Y $B^WZULeDi@@DXжnɽkB*CB+Bjj**j YW54PCtLv"a8fԡPL`(K)2 f$+lѼC+h1b• { c`,854xYmG*FA&o. ޼k:B5͊?}!2<2%|ZuF]Fm{ cXVX(D0^;yh @:uL+QfK!b=,!]JCoPQ,9IvmA&xb0=E"NS Y̠t4 bObxAll$Kc5.%Z:2c:F)a+0U5Uݛbx)nW()dxMn Wֺ$4[p9W1뎢;Zdiido8͗-֌qQû#w޽9U] ,~"x;$&S*5= ZY 'Nt3Xe"=ҘFt8]9e]fŝ,^߿ɹȮBR<4O@vw}hKnDo,Î)Bl&X]Hck .f߂<=Bi|l"񞆇yc1|ăáCp0;J ͉72;e|t7+$'ȝmL#N[U*1,A*bTه}} [@Bsx{~O^(b}zmK6Z{qhpy2Ա.vhru_my/WF?Azн /^̌ufрm~rZrڋ?ur~49ʣO#x8vH@@fM>@ᚑZb-;#8Pg~zs?AKki{G0ƪ# *rI] ]fpD7&o J]3JMꯠW ]'2NϚWwTR|Ieԧ<1,Q_^|^-ƻ ֣5Vɪ{ڏ5a(b #&{bⱽ6tޮ  oG?6+#3rhT~Sx{.a:tZбgx; l;бˇn7]*h3RFќM!]xp|rR.2a¾9"tݢ-#ӗONp ˙w>F/;pJtG-lrb*͵gabէ3C8X]u% =lZgm8n p^\xch ?`φt * l(N'RCRQCLOҒqqʙ{ߡ>XSvst'd# d6}h1Z]dhdV`n;'Yft$#g4}$4CiT,@5Ț=7jc8Kw"Wwرiy`09 YҘx9cTXJrS5ܛ=3DVFVydRaH*ڌo.v*95Y:w!c]᰿H I=yO;,|׷+tkX2gy Hֲp ]فk݂ Y7ؠEz27tP"JeoH5ѓ69ކ zlCukf0t)U{%Y@j4$ ?v{oz2+QJ9W> stream xX XS׶>!prD ǾsЊc*(Zg0@H0( Ș0A: VkUZuvnNw͗#;gganFx<7wcx̌OJ#+޷֐:6 &I)y'෺/`h0 ),> `RC*UkbpXT̝'K`3R%E Tm3{GP@6 A*nxl!F&| '!e(ҥA壃TU}ݓ !LjDZDVERxj=Q8,X׎ܱ~R&$9/Q"`?D"imnQjc>{:E5%']FgR#IYXɝ? ;%KQiIqZ'%*Q!"{Ɲ%H+WE0jU;eNUC{`G c3]gUb,wFvmV6J;"&~ߧ໛]{=E%NUz]#WgF{(uF.9z7ݍ:ɧঞWa 98kԯ:M̃wfYK? -QQގdWdvrA"%2[-|BM&WEiR^ܕ'B;9w%xc&YAupҢ,6kQ-{gUЈC(=D_0 J%35~%(52z3!f4 JWQT^;3Y\]su4rk#+ u IMդAǩ/څm?rEW0e7Q\~*@2]m0:y(=5; vnR0驤*Kװ/.2Aa(5=Vr *{% {a-N~~ )Y4[]xF?>) $8DTwCP54fK.:t x[x~ :I0Ȼ _]%ЃE0׿2m@a x5qB;βB& 낿I}z _DeB5TM( +H؉RzJ:FK}STjPip2kwˋ0}ȑS_~y*Oudr@ւGׯDXm~EƞԬ݈*.ϯ)[QnAԄgl-/ gG"Q47}b鯗FLtWdD-"Q4OSDS[֠4V"=v#%(]GT;Xrdu4qMi1oJ,xoЃmuWA1{|tM=Vڳ Hpj,գ&ؒO O-!9BH\P,ECŊԇXl|ʎ :v"ʉ+gbjdڨvya>/s{ mƑt_,pЍ 7>˟HKcN^&I\++1s6.Ce˦[%~@Өu[?-٘QxU'.QKʲ sCb}%Q_"fPpJrfWOjqU,Ť5] wq!`@4UZj:GlJ'+#=CM0z?X]b-:ݟ+lVSɡa,Ӊo$ lg;+-o6W{9 B&Y*oٻ| Ӊ!'_ot>3E? MoӽgM*Q4Ң,nus!_>[ڛ "\%ﮪLSFOR($ 56yG g9!vp0fTo(.Ϋa2&$- P@=X4?$gHvEosnr>BxK;m=zg7[~HW}θcOg\ul÷!aK??Sl=|ziVş]|jhID_ }'aD7Ds2(6sJӡJYZL*gt\6 !6#wsƍGq75(#33 SJMT!(YȩpְD7a<#|z *>D2kj:_z:%HGYQ!܂.`̓`͇FsQ\# QRKy{~K͹KK?0Z85Wч=zcXj9DX%V:;S:r盼\b&EEE ,=y]A-xt5%Ӱ]7DTI1(dZ[x4vP F2>ltrBYفUG'v}]LW54*LP`aOEyI҄x8MnvHUw\4[~X{}~k(xU^B.0OZţU\(HEg)*k`+`o>Bmśt qKY;1G&|a2Xmck 1d!VmI޹K0՗`~[&NM{˙7wKClgBέ(+V=t~jh_%cDH$j@l'WuL;g= RʡJT y*:?'D,Дe鄼8hX>a3l>Įx+< ND8 0߸~c<sŖs.O&q8\2ަM OIb|f OT %-I*,m:py;^"2zScvynB9:.>1 EQ[:t!\8_n>t%Vcnrbmy.?iO\6Sx 2uƕ.lV^N+WCۤF><[΢/_cZieƴXA]L]Uӭb(XHS|SV% #\resC,*5R\0JN1G.]ix5W@?hsĄ[=7 nkroB#!CQ:ZAYo)tDz=LLQZZZ<M 譛Κ¥%՗ddaJqmG]g-\di jN6j(L%po{9+;olGNݿuSx>TĽ -QTUjk[|HN=8q;9+ cN܍кmwn]>%KE!@+5VS9v?^4ef`lqA.#2[4g,͕~z++?dEendstream endobj 89 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 329 >> stream xcd`ab`dddw 641H3a!O/nn߷}O=J19(3=DA#YS\GR17(391O7$#57QOL-Tа())///K-/JQ(,PJ-N-*KMQp+QKMU8OB9)槤1000103012)ٽgf|;?̿ܥ;~3>,}} |g|Waq6;7<_s/wt\7Xp9|zaendstream endobj 90 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 2655 >> stream xV PSg1!H[(XjS[RR@@"$ /a\_Yń0Zf3+ g62 ELY,e1˘L0(f(1ZdR?߮~733^# |d @.;;Q4u{[qJP܌x;_G&Ǿ-ONf`/!Z E5O]ס|w;5,2]9IBX?3%uJKW*֡V/OL!3$ɐw8'VZAz9 g*]BuK{.kAmU pn&(x?qKG~DNјpqz;wd@:8tyc74ATNfMq=܆ޜӜW:EeCP***# ;<ӮI[\ybJQ3:sBKvK29ϭg &@Xv;٭P^l=p_*ŀSIδ !ƶxyɪ8](-JB(5ʺqnZmRG+pP2.|8mƉ7s҄UA X nlwc[o[=D'C.RAnaE͛#zO'p8-7,%x=W*~H;wo7, zV|rgr9p\edQ1!I<=}oN5:Vua)/J<TC^\'Ft(%0ӺxVqe2<g͕9%*U=KbT2m "vm'=!wB#v +j8"xuFzH;D}Us+4R1`wŒeso[282pb{g>!Nb-dɜh!Gwk[GQ3aɰ'X҈SP'=O5MRD1fQ߽NV: Y zK%{ 0 "U7 ;m5 'ҝ[hUs]Zsl2T $E 5tRP_Mw|m4H?ulfZL\!ip1ڂ/_eWeߪݴo=؍:^f6nG˝%V؝\TSjWى֘D&?z^ w\lj©o6ᨤlw8)N%]2GbvAhz`*MT>6(Y_٦2Na ru R>v9Ld{V3U~a~Y~;?M hp+dXWރ2s㘍  T!A?CPZ\Rt l9 BFJkM{rf,#c{kOIʖ$߾z۝;> fBYW T|ч2{⷇}{x/.R[>ϨZG%1o-3?[\&p߬$Ͼw:eH~dlGwq_j fԈ _N FX-3O5+O!aH sOO[2&Bg(1:r/qAʖo%kwpuFNe9z;Vlwo+& ! ^$a{}S]9NU!z쒕J k ”I0\k]>A*Z⚢⢢ZZZgshZZEE/)n)qAZ۠$nkHM!r2QCcYdq{f'~YbHn*.Mv탒lF"__c`Iendstream endobj 91 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 4289 >> stream xX teN ?ZF_AP@QGuYR"v/mhfiMI%I-mi,Rp\Aq\n/9~i 9|}yc2&Ob0)nXD\fI`?JIa0'?8{  )(TKxQ SE-[QO>U9yd~TtrAfzNr~'HF-|!@Nj&/eab^AfԖWM9Qwti GXX-HK3yᚵy n(NN)MNK߳9#s /nkVvΊ%KfF,w8VB6"vƣkKk011360bld<=5F4cc.#0.c2njcOJt;y/NQL#L枆iaӮOwN{ 'ofPD9:k32 v=,q^JRgje:]A6AСi6!R3 si#|iL %R'3>tP璃hڬlOgWjRFd,N}-Ngee*C9&kNq8dH(NS) OS EDbk/"] hդ\(\ D]do6R~4v-=DB*ړ  D9%͠%*`r>YwfDZIj:h2kYk;t9f| Cϣ? 8hzʹ͈q9ھc gP;vJoW\b}Nȳ4Q0,T nQ+t@?^e"gwn4WPh)$pMC Ѣ9+)ǧC μعD 5\vzd!=*%KZ؊VhRA5L?3ZGS!a\ؠBg(!*DPEPo2 hbl>bt5O<k˚灘lW`0.fpc+r C J2&37OA߱2j =j r9V Bjh"O#ah[`:HI))\ Y;C7E3وєFh)JWUu56+c (X3J(XD_@>zōq\E1 ]g!)?0tBD00WQ,aP^~RldM9zF赖4 k@OMyl#ᢸUI HD~ Gs~jS% <\$H0&X @nvAΥj:rAZY )m4BܡFGE~ӹ,Pb9*rUz3Tq :kVJUIckKYt!-Chil X`6:pt׏V{0ϲ+0s]d<-2SE^E4ge5jOV@̍L=͉[B'],}=u TcK+5<@cZACaPP鐢ITH1 # 70})'r~)=wNT;HBq?Afh%4P V[K) 7О_T*7輥e#=0c0M`$rwq/ג/RVJ%~N" :G:?o^S5f \1TWVC@8 8] vOO v7u*Mn]s`7:]Ǖ"Zo&7Y9c]ƍ뷚JdM @1%[qjJt~p_REP*h-T0C?(4 P ̡*Ѳtecf8 Ą,k]שwz.O? ל!⑴~½O8 g@R$IEaV,,YR wdDM8D@/}>LP !nİ"c42'0DZh]w`5A,O##ddͥY=bz3׾GE~ }LjSMʳWlnpq?Zwj9|ۑZJM2sXLpgb6Q4BT'A WfEF#pi*R uh*z=鼢, wȶ )K#ܞޮ_R:=x/0#E}fNMlW%פFOM!"G4Mw|&Ro6`2q$ӑ+Rc6Wya7\xPՁ C|X|"{,տf}n!WtrrՒ'e@ll(}p֝B8koǙ?UFlS(z4 +;w\`HB>{/z֘S.Mr2]iYcڡ3zT`ĄA@zw2PL 2CcQ{`?vX 5=L7Vԯ_,-) [NA!>)67R ~F+4v%ѩ[MHBr$%p,=_G?^3_r7kPf z;!!z90쫿(%z}̼vUx$̮jwu+`7_%Q(ij;Ґ(SE !\bsǦJ+RE\9O#*(sCJ̄E==8A{ftbK2=Fhhuh&E~˔Ÿغ-+'r_-._Heŗ`CX.|p9? r+1T2yNW\qHkL9U'z*Pi*łGY{/r&<2*KkXo!!en1'˭RQ/>ʕ7Z zRmh;tBAJiR-zJ:vW϶ 'fPD?K[KL6.~|uru u!N"DfP 1jhZ oGKb3KIclGU82VqCSnݯYcUۛZD V*3++'N0Q'`xN)͉lN8**g ۠QUxa{~˕\>raƿDvK@@= E5b<8^>sY }#8ܞZ%pj< V2yA.]򚭡m8fE%+T ھֺҮIɩBrD/;+3OD1z `62Sށ)DTNXPG(PcɎ){FL~3}6}:lZ=endstream endobj 92 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 368 >> stream x%KKQueһ+!LkF![Ԫ0Rh fR):iNQѦںl@vP,qp ּZ({,mCswiv:ŒhprLE%1"2Z hGJZ2(!Kt iL hi3EHvceEUՐ Br\Fj>a ]9CО a/_1Y VPB>J]|U?w^ dY^5ccͬ%꽾.eN^ҲW/޷ 4؉hFEf5h {EY皭LJfn4n[h[]o;FƏxa _( iendstream endobj 93 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 945 >> stream x%RoLufΤ̲dl_ܢ.ψA h\+)׾kKBGO c aa32 웛}%Y~n,Û}☶ q|hNyX9+/U)/k@5=޻F54?>Cϡ cg&踽T.F;-΋7@|6*Jrծ;[[YsE#5tIoKn/˸fKL !Sd$MxdŘI!?r,G{|F{6aendstream endobj 94 0 obj << /Filter /FlateDecode /Length 160 >> stream x]O10  ]Z2D! }I[.%Mů F UqV&:uxf I>J!􆖠DSUmcm+͟tF{8yp8I.~r 6/i Sendstream endobj 95 0 obj << /Filter /FlateDecode /Length 11670 >> stream x}[G;wDm1~0lcw w >H ֐~߉h'X,ԓ:qo;sw/O/{a 6ɹj^}tNߥϡW߾}gg|:=|7gWKs.ƘTO_y6pzWL-+p:߿&su/xzxGO5x;`Z-ZKIJ1X, |q5gW9Y| C~ݗv_x=Ż3x `%Nsux>|nNo;Alr#5'/4QMJۛW|ڐkX7ܵd瀎{4nx w 4s .qmӛǾ sKЙlvJ@Íp >]?'Pa}xھn/ŗm=&XC) hyY1{S; }seGJAul /.WrF8[Bu'^wԭzWm 457E HN ~mIDNᑏ=FA&xi`;?o_ }i:^dSn11f#֚bv0Ξŕ2m%zzXsJXƍ-7E)*;_e˫ʠ );̜MsB\?}xF-w?A3߿g#:ݷpo)O_/\~r_` O>\3T|tBPgA}||˶hA9`(Z|#@&`(bf|i;Z0᠃@tZ;EHTjv?A$?L d*4vה4u! ձqbWl%6F`( ,j ΅:q&]P3. 4zV4o;VXg E#8XrnFa^XHя&0xG@!PI `XS-z Vl^ #لy֪=eFH.9wE >%Pࢨ-i0++- `V0aB$,(Nc ҵ0y}0O YW;ht?m)> YI7V!sV3i'>\D/ <oꪕWI`F9&]v;x!=wg #4>/qPaN@{t-0<+ĒQ*zѐmH{ЂxN2OZ8+o\ãԡYB0 N6h=80G-:x3XZa¥IX3P |py' aEQ-W5DW8Tt-.윧 >`v&Or$I:bDK>`/D@DT1!FA!ɐ1NޝM53j c3]DU7 R ΨC@y\a`2ɪUS*#8Cx?KW31緈aP倮cȼBW00 ]t#@W%rV`B{oӼr?jJ.; )-h؄Ap.B  ;S xza0֍9 Z/&)oWA*AL̪iɨRnp0ǰn0G֘ `#'Qk\8whLjB<0I̍1謄I >f(d5d˜=0-r=kM8I3(Ut`? BJldbR/Q{)iŬNv\y1gdE=brތ$MIKjxGppseTgF(]`9~J.MV`1Ɍ1zB(Lg{ӖadV($mZ} '?1 |,i sI_'d0 ;&g !L5!CL +%Fɥ<$| ^l(it,3,>te+ bpZz-i0ٖ-F{&R^G+ rZcf.. uU]EQF2tև[>3शY]uRc-b >FV)tXYUbYbF>˹`ѪHSP:HMsuFbZ ̇RHŚOT'X2ވ8!1I!EO`.ˉG4T)l/`U̟{z?J*pj%ѳ0j%!cbl `SŁחc ~z` jt fIow||) !RPF@nenŰ܎f|t-BM1w5Vh(Z^/!:@]5@V >m-UpQSQqneT+ՁN6Lr-;$b3*=yR;\B+U{PDLẎ!opv-)q*QæW-\ UWZh>6 Þ-jicW$Χ\w(SP|&MT|Lxf"3s ~M@4Tٮ]œ˳ẢimЇj5 %L%nyVݩ.b\$TC넨 jxm줫-RwMaJʉqm43V_|n)RrLkjpZ[`NO`Bt) ĢWp$榏eX_W<PNQZ~8ƙlK!18"Hm>lSf-$G&CV3ۊҤGO'oM?0 Φ3ЗJ[:B#ɰR dqQBِ}sU7vjh)% T1|$]S^m;w%:?ZBZ}hl+G` 9zW0n9-W E(tLiWUO^[戒$ѕ#~j*s9[|R3Obʡ..&)mXNinO +̪+WY[ų~k 0KjFW> nвy>ċ/qȊ%ՠ&,$5Ew}w=C`w%Z0nRc 1tgN$eΌ29ѝ>aWC/=4@/OC+tҮ0f3S.]ö~V@IT@f6%l r(8$ W ߪ~ R?cRuW#v.zق PVXr=% uuEaϨg\+@_^m.(~v|w`7ٵ.\dQ,5M+L=[.?H96roNwr!cT omU A߫Gci/,kԶR4T%_LSK/Zn,c.ZD[P@knK\KUu}âr02 Q|TٖLo'`MRg+~#/4Im~"wdLBbL;\]#a٤W\r+T+jٺh2P jڙd|dI5o JrW7p` Ӂ6H)T5@`N s9/+c&J7V 8}G+OW*ۭuY\:G$c? L d%(w9ZIObƙ' W dT}cX]4glE%tVjtb&Uq,jS%Zu;FBڢ-2 3l'lbOZae/eW,H2󃫋f黑@uzbONFnٕ#׻>0@J'vOC5 Ҋׅ$O&3qE¤lS'P8r k&&#ST&VvW~mBN.&UFSJq#TY]b]fX&&p\5 dJPY*@Lq}+nkYRWd5A;+[$D5A.sR$dl,Oٶ%iJjQ͡S ҍmȦ~\3nh3wvEJqȞA GnIʳJƟՂ uIU;P>dXn; "a iqLVd@ir`tg",0ye,oٿDW22;֔HR)~ Ί̣X]E;nUbTӆSkH#@`:NϜÎx8wV/M@2tfy$W$1du:Б jx$m$Sa>'Waq>>b/̗p @ZuU(܇`B٠wVӱ UZRҮ$ _9uijgX!d瞶oIpoui*X%x}oe> džy7׷D@d :{ sFgL'-W)/'C. ooe`vr jEc2Υ2Ջj[#;)i?l+͑I7tjy(jk#:O`yѴFk"妖_Ud@8py40eTP7VuqUae5"dO%L,8R\՝ĸyQDp.M9иŪR_ߙk9V Rp1B,9ժp?Yt| țJՏԸ. 49ru}CSDhI cudt L`걥Ȓ:"e>5@<0_f1(HU902W;v}.tEYBq‘KwCBr<mM’k,b+-kի<Vj`, L=#JcelzpuUR -J!#~*-Ij P]Co1&SVo`d^,md-g31i^ W>mJ2%=`w;#(dLxrJi t,s+VodH e,{l _l<{OY;$K. ߸qIJ$ڳn6Z GRۖx`@!ƛӣϓU3eG] |gue`*t9/bNL罙6ݳWtT2$y9_Tpǯtᅹ{6A9f& ιm 6a͋SX7Cc`. ߰Kg*GX 6s:6ULY+_O|۔ ;ɝ~w`[A[,ɇu(}Vv4ç7>#W|~S `p442>ܐ Oi2$/~gN!on}mPpIl, {RN:L=:|XT + p$R*.V19gpA5d #X8w̒Nr>Cb4f!=ևBDS;ֆXBY;lh3##5Lupl.]é~mW=ZM1W-t]/8;.P\ӟOq# *|U9"qC__$(A/ߊ9]Q1\`);ݸ.L>mۢqݑchz6WzFÀ*}ߞ˹\hՄS6,$12M~Y}Y[L jn 8aaAV2%߶WDoP5v dE>^&n'Rh;`fcd?^vN]cެD1nC)҉Sֱ wZ2>! ޅM t#r^#ArDw_>C_8#A뉠Y!'譒pKlqo-jfn֙Rz`eeX "NC&uIqTgƪ(3KItt5Wģ=]9|f \~&х{f_Cs]mu0\?CjkjfòѶJۯ:qqa-w۱_O@.!ѻR 9ʐvo?\6I&ub_If lѺ<cbLDce;oX~eSw4*'2ڕ4 ;^A^0;M<~)YCXy``Kb>#a0AnЛ~![UۋYD` @7Ǜ{:3 ONz_/_A>rHbl~zI.Z.*N}mk/9"7cw{|CFj#lP摈aJ8͗+vdk) ݕ⏬kXk4zBH,cH]Yo%pp-"aF>8kCwv{!@ިu0d阦5ȏ FC}ȌNk7r;mɳ6ğWm}rIrj׎H+K^t,/9ѧQ1ѡ-qH,  ^6]P#ʹx5P}ƗbIu} ؟s-s33 /:9m. 9~۞6rUt"ګx ˦.l'q+%kCDh:}xL Zn<s  *nrp祄U2=}1f;R?܀<%V" V.2/)-fB=Q:2įg|z#2c~D5/7G)D[lA/i֡*_p}e'EK y}BOK/ƒ䢳6 I/m8v1O#?ÁwS)+n{*egGɗ3O͞qu/G8Ւtv~o> stream xUTiTWbNٸuHXZeDvEh BAhPTKT(FqoDFY\`'(h$&z-#gxޭs~A&[K9rIn7Uvp( Y eZh[C-x#$("xŽ 1b*aI{I)', IyG,ʰzIM6P ׈+Io-^N3|j[:ޭSJ{9(ŴY9Y7YeFNsU v};X`/g<.X>kD =1r.FDk& k>2; S@7 ̲"7_J_A?drJ/yn=Xx3ܠߥZI0쮷0~"69 MYCynyM$8fM<fenm\-rt]մ'UvT}_kcLi☶UO4ԝJZ .Υ|+bl Ҷ(W)~uFD'n;X~^.έ5^wl߳Bv Z%R.5"ilpobv<uD 6=h<']|0ԺKÃvx\@:%u-[ VpuTh5[<xnz=L -*>P\XXC}c endstream endobj 97 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 525 >> stream xmkRa߳<!S8NGqt'O:TSvo w#T@MjdJǮx?}x2Dz8SJ&5ViEĮyz^//:x*YraI5m<`1U:N\Wϐ 2<>A8*8/\]Izs܃sfv$0o-&߼VE0WM?`ݘendstream endobj 98 0 obj << /Filter /FlateDecode /Length 159 >> stream x313R0P0U0S01C.=Cɹ\ &`A RN\ %E\@i.}0`ȥ 43KM V8qy(-> stream x]O10 ЅUqP(/ Ct;̇:Z?Eƪ@L4D `,,O\g|Ix@w q+q)ZD ĺ;{FVI%0l>nSIT @6榹I*`,Χ`_CSxendstream endobj 100 0 obj << /Filter /FlateDecode /Length 4110 >> stream x[[o\~#@S/m B"ñ$Ke"pxYv:I3p873(=}{H_x$?o| 1(O^їѩ] ѷË,Θ8TuxBk~?p7qA 'gkX.fQ Zx5Ӽ0Nц8\H}o[G.ΑF3D:67SsRnAQaZ xD¯| KCDkgU6K Y61P}zGy32HOWuk<IR/?uz,K熋,8lT( -[B/ۈ)u-mF;5LiCZsvWɏ1a ҿ m*(q?Gg}/+S(۵Rք@2 nňZ#HRe:a!FUO"L>vMC# C4]$upr"nȝ9>RKoZ"o~zon4ru KqF [$ p4 -̦ΪJd!^+'ۖr"!8Pr }r$C*; v I,]rZG7M=Ɣ|]€(nӥ:mLQ!d NRW3ʧ4x4ȵ(%`0)t- k"/:I$аTlq680vh8lXI 2$c4cJGڀ=}h"XTRH@W8\_T*/6KFi_K "n %]抦tpb+IW]6-(yhx>bupJUÝYMwY< R_% ( 5Є]q CpX$(,?dp|/Zs V8{/bS㬜M6 X&;`8͌bjmu[AϺo-ux^Aإ@tm>/h S4\DV)"%lozpB8İ$#XKFA ? \}--"U +|U7޵Oqק5s-Cb14n 5>Y&_W`Z]WIugNE2!5$#Jn9ŭ/d\<ΊCq6+ 9WNTNz."~@0.[ =Bt:,^5:|)Cy]֏::~vZo .wCwU~bx@ CP^B".9v_LvҀ$![#E6Xca.6gxYхFZ=hC)g i`:3@,prBk\76'J4aK \WIk%^2yW`?y6v7>-؊ŒƴNIv)еhPr~%Jy9g0N }UppsU5ߗ]u|PmEiFٻl{['z:Ov]uս_@3<%{'-jZ(iox`qE #xR@aeVXJU@AJqQ㢘\@ `Z,$)٭]D ](u`BY0as"|!݉\QF܈3 1WPI롡dca&85BDG叫Da;9(لIHuu=vؑ|Y7EUSEFaL :bv yC@E5MYG>>+ȑOWmvEt I))!H*u[i*5d|T& Y `-XӐ m/yPͦPE: .p3D9#jm-'oIy6vUiZ0c=YNuxg{V/P~G3e/?Kzs*sB .̀qF?e(^$NG4 B%d\ B-z驦5f 7zMJ\4&u_۬Nh `>D/yD c(2%ߵEHcYe~eR?~˞tuΕ۱r-,%.Ƕ1cMz­+i hjf&f_TXJ=R)j8rZTC͢3;^9$-WJuHfhόԢnf+Ev6F9f^\m}ScV=et?]ZciZ\'me:~xlu=߯u`Lh4uL':,]~Y|a~)?<zmg Csju~ ѸY> ̈gSJLu~1c"oӼPy5gL{ndm{㺇b﷘w-R:R! bu;C/KeUU"{~0:P{d8R8S􂺰6{݁Z,wgog!dea%agvYfoeWgbZ D2UVP*ȱFYz{=É=ىAe !>ADb~Wl\ʕr% W \i~|rp\N{}6WftG;d,=8j#&fhJ Q-'d&wcQj Gs6tPWj춧幛T\ʖg“Eh% R Ng /ᨔ%eۄ߳2u~<'^w:eP] fSgwuq+x}\`SZE4{K$#[bD vG#NݧH?3:_/ˏ[endstream endobj 101 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 3627 >> stream xW TSgھ1zQuzh(GpZRSD@@ @B|ـ@H’aSVikVZ;uƺ97D ϙsrrO}}0a_~˺[gF{.oㇹdk8 ǖA0a4f T+aFZ(tJЙ ̏ t7U ]$JO&-TQAE"`aTT~~~d7'/L[25"4?C95'UnH⦆ >W+J秤 yMQn^ZqArJ޴-Y,Z2ej\D虳fŰ&=l ۆmv`˱,{[Vc`1ػl[m16:&Fcl6 [bAl,5l6!ÌuVaر>H>1|,^?#qDˆ#&8~5w|ԅC׃ QѼ܎@%9v\ 2yN` ud)j+Acȍ]ّK  j9.wsD54R+]knk9q GżrMFA3X.xź{v]bDB%Wg1(F4"`  ($CdL?n}c[WX6 ]0ʛ6\~txgG'@ע7gV`I=EWBa0LFyQڀӀ8wh ͜5zł]R~$j'[{EN)j+ptXfYCru,,3<۸Mto\l5m6}DsQfI4:sCkq'jD"\Ey]rĂK q.eft5@ t!&Uc&W]$QQEQh_!S\U@5dֵwܲ8Z`;1\;V8>{% F#P ͹rw* K=q$@_ ]O\| ~oȀZSR=e%'I14U} kepQBLNgfEQD'o- 9c.~ϲ98gِ"@1(\p^ר6y0 ~saQ@[:Qh/3nouo%Upw S& &͙f)iHc c͍-ĪkLIc)N=wѾ׿qC?6hnmfpU&%G/ <x'#?Z:^,3_N=f'ϓ-b?KfLGC"f2)#;]'pr.BE}U@GXK,bBE PK{R;هhǡQutۇ->OufQ'oȩ>ˁ~_DfTG> >P7hgٰE s~?DO}U;/DEVQ1=̜WVw|06q[$K)Z!Hٖ٦2A;<_eJYhV!@[/uZlz3Sՠ ՠ9[PHU'3ΚXa8qV2Ͱr6vZ-De\CRZ6"O v\p2Ivo2@l1fPGإMl.p! 0lՕp PQ')^MɽחPF1F⵫v=8 N M,wԀQPW(SSJ@ JJx[Kj r#p-6Uk*$L șf&2Q5]t+QHK@AHnCq}Io4! H`)&au쀱 ULUBlɨ&gM'ʦjj;GAзP_&VKK0bQ,-R̘e^ {`zmQ$`s@" \*bTz2gÊp po*|1}2Ӝy]:y|PR[52FUoa=cp>Oae?lwl!='ݳ5_e =X6sgdpS ypFBr-dMBf/N ߀[a6nʭ`sgih}7MYYJyr㗭PH4yj%䕅޾G'^@eT#kwA]?b] 9>?!zْɅuib̈́3ߚvLG'~?q'qv%䏦o[3\QԁZ+  RxycytR1 mŁ%c&HPQZ |VnQAp׈~?/]endstream endobj 102 0 obj << /Filter /FlateDecode /Length 3884 >> stream x\n$}W[zA cI+wY9Ev7G=:[ c-N,:UM/Fgߤݙ\rf7st0OTl_oMJ`71'sܜߜ}7RT6q8ڨ6ixjeo;TȮ W۝V_89&u~ o u^揽7Ozzվ~Tδ*'I9gUɡ( OAe Mo  qM!FlK9O˭}oEߵ%\tνS_&B~xk'y;*nv֪`|nr"ׇ-`ïM?V#j8R qUd8m r?zujZ]9`7/RWpێl2Bo0YATfS"$ߒr< ܜl֖cv$~G3gꑓq ֞X6i# 3+\@uGXEL#r-nd2ڣﰧ}V}XQPJ66-~") ?O_U4ɹۺɥ}J[]~Hpvam,)j<3<GK􊟖\uBbN4āgˎW>w{)7mG]oyh0?יU7"B-K.Ube@7kp.Yn@\ش6Ũr|d̈Q\zlĪ}Xe/s~y`Kd864Ii3pO"tOþРrO@ml^qWWc`i3NpmRڄ;Iud|e2MyeUECܑj6u\1ia/:=Jl(`P縌rO1t ^Cn):5G[Ժ79JfQ96. ߈vTۈ䡻qwq@D@Xmݙ af5{6bͫlJԟSכcX7ЛϑN|ћ7Ym0Wl {ܡYD <8t1TPgkVp'4Rk@9-i9pB\us^ofMbηFm!g1&fmʖ`?6-z7Mf|Ɋ1OuEUotMjݪٙ}XٻUl!]( +_Z:µצ2z1܃wԗMśJFq:Nuh~鋮fm%y*2hEYgg2Vpħy'4s葁X3Ee2(e*p=i5ܷl p RCx}[f.-[:jՓk;Se8nzUn5_ AT%M`3~MS+h,Y]Ǭ϶#.KZi ]i17&>D"kI1EM.s1ώi\JŮg*\|CA&a;SP6ރ@c<ѻ[]ۏT7R?cS(j`{p3kH!Bv{bVxMNUT?5Wl3xM)mۈ}/YnXزX/' ΌV/2'tu=s^CiE3!MB * N50j`&hFC ؓTĒ]3<=EظLC]D6X$ b= DdCAD&@JHkoy\D @`A!5L,b'w[0#& K- <&%"$omSVJR2M Lq@Jk,P7ًЃ]gbf-Ä,&.'H]0w C.nl Ħ(YK)KՍ ˒O csAz Zr-Q*Vc|@$ 5DCDXxz\ Kg3]."(JN5z%PG D]$ׁg P鐜 Kѻ鲀Lt%P3 V8@I`qJZ=pK/JxI$*R[,kg4C/D|CoH/!ѫF4a:CK2=Y QUtr ڐ3z0q]cq kkO\0eS~/bZOΣ/vs6k],Qg]̧Z&,.~e>3b6g6y8zJ]~ǫv#v_Yp[ 9S?_웰<Hv|گ)S/ؕ}&\LGꞮzjm{t8Bcgl>Ҏ{d~1ϧmp-׺~( \r=yCӬ~Hm2vl,n[v@MQ]2љt|57݆:H؆aqlyryOmr45b{ǐV xΔvync;%O3_>G8)VۯXէ{'QD> stream xUUkTT>wf6@M,VtB$< 2 ci:bL+qPGPSD1h@4*t ̏jZJ5(N꙱1폻׷weJZl*,/*\MNbh淰8TCUHMd̀H0*[K*^ώ{4?_RfǏ7RVGUf[ZQnټͮ5Z_j%!*+ Ҳr%6.AP&z( CHP2d֠4e4 !͢H f-b*- 2BC9Qu|;l&~އpGNVO0h ^7yȹƆoIm7i>"s(_4D%pX@A5yQGb C@I'f<]aƾ~`lBC!@-2 9jS}Q,o Ol(׈JSOE\ky ބi6ޔV>x;zK>'Pgާ0{xկΞu$ﮯ{p:*4qc b%((>EA B,eDYz|JshG[ͧ;9NokqHI&2K='zNeW&9&}]8~swҞS/.sI`ђɉc/ܲwNt4;&:'Jji70xfAeI,G) 536e=R`#@!fB m|Mm݊?"ѽDHŹ>?p:jk4;va4udgx!ZKlk+8$E(uhGno^C뾹<̟!^IUvqR82.Gΐ/X`Jb?R02]aI"SWXcYqi4?\kçqw\~qN!<Jå9X-BЁL }~Q#݃h& 58BC}I4?\ z߅|n܌]xpwgJ-B~qQ5d`Y`ں7۶B][=W>SzI((_Ysg1yæףrz{iіoCW.Hz>-ӚN<z-]9A_ޒ!BC8R__j({aӭBh>EM'X*WϟpnkZո\ }%]cnxc)S]A@D  !r9EEAOAkdtgX {@C44'l{VwáΊL5O$ˑ%ٍ[:S1 kq|eo0=0C oDK1Idq* rZؼb1,<c( $O < C"ŹM,,&擄b1YXNy$څAnjЗ2:5RlL`'[{/Z^!{Ci'Wss[:|wMаpׁtmendstream endobj 104 0 obj << /Filter /FlateDecode /Length 161 >> stream x]O10 b C+qP(/ СY:ߝ|pFcpƲ- DeQV-Ɠ剋BwOϲ*> stream x͎-Ir;?O<O2?\;@ `Hp@)fu5R豯/[=N4I4m'v07l?\o߿ӟx?|Oz6KgzkOz/o?+yO/?~J~zoԹjay7j[|[͚F? *rj[FZsJ5Σ˱Wgtzvy.t/U/@zIu]{w?|v}RjoimH:B^姿_}JuԷۺs?+«r5,Vk [Uq8V1ẹfD9cStŇE~ASL=W݊4-kZInr_v{o-[[8±^[=}Mǀ:-ްRm@W^sF_Wߺוikz6xrqhԲ mYGo8.42rKږʁ!32%,|\8=),g l'>sYO&o\ mY 5dxRsN60K!,zi5ca?MKqZ ^޳r#ѹfsk{xbw##^Xx] ^6P%-K٥$Zn(1:K5x]ehk6ׁnAlu0tusE5בz1˚c1.༑eȱME7m9#q;W.hs#rIt`,tf ӲO $+JŮL#eMHRMLB2@o|~c+ôV[, :t6%lxna.aKp}8x!n#fycќ5;r2'\s_9LO02f0gf j=VM6GqcXb+@S˴s e$b|Z\ɽx?Xnb7W㙷eY/3t [^k$y ner\6EsQÇb@-K;&٧a_,U|6#:WbK/lx x2Skeͬ|z :-?ޑ#_77V<keM X+ z!WbA [98ť%Fe`+J7'o699aQ\ t NeoXَsOZp`]Ebml.KC^͋ Fi1_qY|`Yh˔}Wlx|ņ)}Y\p2ͰVSׄy'e2$nӭYY\XmYm;zG-2(ж z2,,זMlxw;x908b=93dxK>*X>9zt^SCn%NCȶf0m2tO˲^ɆW3kL aӞ0k_-p+ ]?5c,IQ-'Z` $t0aìC\E2]Iuf$Og+۫xm CL+9̕ 3Еea,uP8jt%\\ɎW t%W].> {?xl'-qW$>, Ɇ=bKC\s_BfEzfWr'9bzCpRHbmTs$-r#i#mԛ²8CM FS91X{ʃ!1g?a!f=N$fK[?sw f7DX N:w8b ; `vavEN$B|f60+ ƒP .2Mdq6)ӭyE`Y7 yˢzP#J , , k(mxnLzK3Z2.E~&*1I 0}i$d#txb D6k CHp'9߀IrqѴhVs^ o{. Jp%Fy¦Kἒyݢ&WNY̫\ aw^텸,W" %W:wxbg;qpSNl %2-N< :!&'88,"&O2QLĶ8256218Ķ82qs]ĸ2qXL \ Q8-L|]nd)1\CĄMLd61 NnALYȍLg&Ff&Ě4xg&f"x`&Ȅ:[!3!`&&֔ Bp 801<ۑ&@>@ b#AL'!DL̤XSs] SLF&#i@&ő7E" :3UvEDS-5BEHXhmjbzߩ"wmSAM jbWTmcBl@"ol“@M4MM86iߍM؍?86o?^&n R51Kqjbx`3ljf;ۣyAM@jbIMKqe&fY55)M qT/ @hC?$hb^CML]&cH# Qkb35p5Q jrj6ȂX& Y"Ojb[zmjXsP;uܜo&t VDڟuf M%|}@OnX HXO0&RQ%Ty86,C`Y& 6=K 4pC@X 4t( A5;C-+DkNchS&š &}욨 h+Va&ć' x(MlCC MphNYـ& fbz2Bf"p`&@ ^"k5y up` !&s?L4 `b]/90-L`&?910 bYa1 LɅ&n6"&81 '&< '&Np'DRdc2DĺDL`71!}Ąb>&ˉx1qXDL GAL bL Ulb~PfsDՓSZa&?Ķ80&D{e7"L$60Qo`bh~ ` AL$a4&L83rL l`b`ïD `bF90BDAP%q fDX ^߼-GrEKlA%nUU;޼/7/ZPx]KtM.?%ꓗpcKob^HL܎2g&731D 731"nd"$$# Ȅ8r=63-b&/(N^mf9L8[ FHj! = AL 9+>‰ bK4: ~j182L~(ձ G&n4LTodȄ2A!kೲF&I9dtl ĸ2M (!bǑ ˎLdOldBLܜ.73Q LF&#MpfCSM n“MlJ± 7&PBB7"8-&BDDx`E&Jr`r76ŀ&ns4nSrM8Ħ$h&M<7EA7!&Jdqp8Df=3lB؄6IݍMT<H~)PpcXpl9 6@J؄ߵM8Prb]&'6!B&'@=808єϴD bPN\v8טBr 'OANd?(Jy9Qorʧ9 N p XF'X@'@ pv"@i:/}H,ĺ4LovD/8;qN(ى;#"؉xV:z4ot",N\G'ҥщ̅&'휄1$(S7DK_89nrwMN;:5t|:QcfN)3 'f9H-qG'(NlENye"t9eDSR~p rȱvLr{IAr4T6˗Lf>%o&,wO S~i/OeiB͆! ",}qE_4̓ |"{Zx ʘSex2`2Ttx@RC v2NCg_YPp$c[h_(Ud4:4dl bPFve2D[PFX\urО;( >e#5Qmq洱Q|(Ac[@9rl`$MƆ9Dyac.E"y`D:%[}#H!HǭF:&)Y1qc6F71?LǶ 3X6b:N `:fLi!ӁH ɮ`:Bb&mq#Lat~X mґKp8QHGQg#EHG~4枋< yA'GmȣV#A5ȣyLPn#dHyNy`pJ+CG`a!phsv:9Uwp<@; !iBZ 8KW*xmq^s/k`~ט^696 tbA T6f(81#M&`%Ɓ ԌHA&'WF$ilQ=CFg*lP `c ,l!ƚJ6͹ FjPgظ-Y%t = r 0AR$6N ?01mjDzl4n|IV d#Dj(ri6Q4 6 le+`#@tgFݳ>lt-6@6"M(zg`V@6!d㮚dp 6gqXwl`DؘU)60[lH"`#AOxR ذʩrR#^âB:lP,p`H3$-ldŤ6f*>m #M A_qڨZІ; m4g6UmlÆ6Ϡ3hCGs@gІ~< '97ml\T6#6SC69#:wJ?$!PGRxmmqj Q x₨ P&Am\ mjʡj:I6Ԇ.i`d0`6M:Ehٸl\Օ:@@ 6.W h 6d@2`6Z8,b6BE""Dc]EUhF.ZG9`(h jFntFd!0̑;ċ#y#B4 "!yt?!8a^G4* Qh,ORŎF.* \3Qb4y0EFꮟ"RqXh_8x? b4Rb4@DDAI(k.6/}qXhrD(3h<$-JOg4`h1ehIBh\.F*) p253;A8 t+&4js $MhȄhh Ѩ!ƥs US"&@4"4z; uN"4֧5ih-2Dh 4P2$4%^NH"4 1lqD'4.-S7WUMi 4gBbQE^:&.M:W=@ϱj"4PFR -}BUj\9s%]&3ш@4R=sE·@-$M,L3cdP` &RfPVXd1q#2Tp1aច !fl @c;2 YUwCcIPdx߰AQ2xO Dvm20Wpʌ̈G.6g@y3Qʠg Lch9"M +gPL/tKRw3# L kN e!6g gFH8<:1WDy3Ϡ4\q/Ϡ~378L:2·An Q84Iט'P#c x1lkLGle)@|u5ҵ"_s5"_c]8Tڴc5&3wBw!bSZp /(K$ϑLkP_Sg/#ԘaS j!acRԸasFacB@|PE,O"lLHpHTxI1! qE$l*PIؘ'6 5t0V3S:CL>CƠ6>dBlꌄّQClQPskQvZFi:'l #U:"lHqwT6pF9asyB6!ͦAh$ͦE:>"7VAC"iJA-#ܘ4Y=ͦ"No Gns y|]:rs^+.2b%BnB|'x8q.aܤq _&hMrt3 &O Iґa|V)=xK \̙37HZp& 37p2r9v+/%kM ّ}$>^`ΓAJ[x?K؛||;8 'wNwg9JꬌC8,|q 4tUm7ap+z@8PJ9{8#'RNWq K!!TCE36NS{8S& IEAc6p{AtIl g[p] C8b˂pШǻ4|FrE`pi*x$6#HGNv8Z@p{V 8>JI8Ô@?D;픂928躓2)?EYn\(pjL>\sP^@8;@BNd 87UR & N伃* )-18i8<#'gkqaUk(F G Ni08{G݂(3ػU)dpZ5dpq6/*gp< NBK!\HGpq68R 88Ӕ_<d3RI9؟ry@9U \C('@rH2)x M"pܼǎS9@͞TNqƩtņr {褠M!tTl(hAC('U@9H%KIr, (ghACrFCr0Ӌ!2) )2brP5OW Xjԧ\VDs9p.gs9s$'`~˹ r\VL RM#ɤv2IkC `*)bdRLdR,4IJ%k*SIzrL\:`%&!\J")H H$ 3bybi(pU`")<RHs%6$TOTB $ )օ{D :8C=CC"t=kK`B %R%"TYPEFX]ɡeT`a.}kzA,f/Ohla"ݯ?' u̵'3״ de R1x~A~IXk}}SkU^[Ӯί^㇎C^SZdbkcWHbq_}N5'=a'1T:/kW};+^k!uE~~,$?2 G/CkB 22\?/w8c]1^sG"⹯sc=_ܿ>EDiĪwvbkQ=umc8}L>mqM9wj=kƆ?u(|q[~uǀ/pMS]Riį\oo;@3k&]돿ٗy{ž_UcT{ߚy9O^~׍[Y.aVDZ&BvCo}b{<ᄍ?q/17ٿyX=m*$4 WlOZOh[_GyD 3t |o-Y{ffVa[ezl-VVa[jrl縷cZ+˟rAṳ|=F儭GjH:Zf px9 `ZCcnyl՘<>*#۱[[Aϭdylc=:O_pQt0Z)VR-^u5mM f95Wx Ǹ{nj[՗~f8?6VZ9/oo?OkZX퟽3,7^4iuk[^Ksl(p Y-z~{.?#im^<=.4|nVO?Y)5[b=yn?]^O0$L*`b !XSpZmC4XJ=A䒬%N lgyiay@̤YXj-!TNb?-TxMZB_R-uOH۳G2y{'XogX*ogWGjj X:I?#\O?Ae&o AX4^AbidR3/O&sRfQbPղg7Bj?AFK|%cHST[4B̡2ӳhA!FR~J>4+FO]HE0;XlI'3 dF)=OabTU>zB)_ O99)׋D~2xD)Gamq>1Q'K u$a8@KЃOH~YƉ?,'ĒU @?n!"OHD`/ OOh8l7e9M" 9̫ XnV[-19ռԀ8`Sk B88LP!.$a1)A!4T+Xa2?>vuxtd/Y앷lJerpzz>验A.^dJ~Yn$ 0"a1R)Y,IDė 'L'E>| \4"o?,H)%(֧OXOOĊrR~\4XF)S"#~ <=AJ'ٞ6݂% 9Mޓl_ImE[$D-UI %yPOuevI}ݧ؞ ؞P-4"nbM߃ZOZlZO`EZ`#=X=PRF? F7&U<XtfP&%[%7Il=4O' pL[OаHܺA >DD?=z-*RN4H$ʼ!@F'r!.{Tb칠$ e8#$/Ipݢچz<O L3 S{&ni&-!dz2f\pdRRJʐg3%Ip=CPJ]A\_$#ihr zDͱ|Lz~{[PE;| o'jT,F狾{Pғn1[H'<#3tPgשAPjqnΟZxD;ƩuGdqFgDZu(GfIgBE[jRnNj޹ӜƜzFd"Ng Hr7F[B8⣹gF0‘n=-Q"lݢ l(5!n %+eǪcH1gLdQ@l=CZ;gY [XH ''n,E&\=EOIIgޓ\V$\ϐaKMr=  izb%H3VjOg'n1 =f0B_ géGu[k'{G 3VOCFgC n(֨\֩[X: N9ɌB'%n >ɂV:@}r0:^붼$!#&nk}ѿwe,jj(Ƞ;-x9C+םkw`)ks kR[KU{iG"xJxSi&ƭnVmr}xÕ\TɋjCql\1t9]o>}>ïCxsU=>nu*>,|U_=ł9 Hf fX c⹇0ה |NqХiKhĢU 59rAsVdʼn:5C@oX52֬1o"] 鬲疳OeoGs^T޾tOg"J,.亮rpz~w[.G ,woc*nɵ !ܿ&iyPqO*rFMSBWҥ5Ӈ۶ּ\BJ%wQ%{-.{HdȝV̜b, keǺEQ/<5HgM\hSVAȚL-LHK)x,-)~[}_FhCv]z\aOgYet5i:~*h-Y]%^E⋿%-rcRת36L_jz.V*:\jVµGot.؋X[(OQ+WX"8"QRn`Cb7+1z *8d]s-1%lX1;FkղTqe'f F`h?yL{tSIJ1DŽ)Ъ hA壔9, >Nᢴz h4\>単IM}PޑMY+q&hH@s_wțy~ rZR fݡٺŒTOֻx'O}f%H?A(CycHb(A&Ltޗd(RjE_LBTSыӮP YꥨpuOnST KO7/Hc/)g= iy^+~ɢ)nGjt$Ѝ/IvJMz00V:MXX@t|t޾@@])@=@#}ЉoˏIThLV-堝RH\GuH@fAzL0(( V>שŘLAH mzUa_~\x@:=ZpǍt`BP^{G=`0 ]pSn9aM%LDEpPzL pV"%[hqJ봐B#] !bkL St!5rD;,ՠŇG<[3:)yha~ױ9wpJ`uZn 3xf[Ύ\ +lY==Lx.F U1T0,<ǖ4 ODB`c,僺T""3No"iIb&C:FoJj|HEHSVFb0kBc1E@oG'lM 3)\xFy66 gNܜ}/[iXwBv*5%?nP.XXsǷo`qQ(n+~;K~77"e[/Û.;ێ5, ,iaw:+a;ZhPAzVGw)޼ߡG]v[EnՐ:(7ȼ}iaC+rW@E<-,IF\Jyԣ񴰏cJM)hhleKVjOޖ:%\T0$[S?J o%5cr CNCNᜮ?>>!!!r>>>>>>>>>>?>>>>>>>>>>>>>>>>>>>>>>"L!cr r r r r r r r r r r r r r r r /g=և‡‡‡‡‡9o>]ߖNv=z``@Xk{?-~u{Qw7ic- mqH>j֘cYz,_owX4B" l W#q;asvגs|<dz셂y_6S'{fT'?JU&O JkMsePM>uN.++*^BX1i c/ jx!4/(S߯mWvQ}^vQ w!܌:͐+.wE]A\<2#q?{UJ(`fSh1Yzy$ T(Ł\OҜȔВ'STvENd iPHvGhdFԎֳ|,=uKl')xR+,B3;ņTҍn\_%CSd^9K%X~IBb#d.i̾7Y\pdB@f$TF=9%f }n{=%V (M Q;J:H7;%-<=UBX`0:Z0~OFaQċR䨠gAP8@7p1x&EB X+}qp&7M~#Fu/,F ;P=rGp+ФCE AxK N8r{2 {:>t@afsm ecB7!\νE3` J0oba 1!AE%2mˑ$+௓Z7\!T'CuY 4p֍tU! }ab?ر8l:@N˜P + YQ  g0D' n^(@yX̝͘^ TARY)U8MC8Xipq]NZn/ٌ8GyQK \.$tgi ӱϮ灆'=4VkZ!;APu[([1o sيC,Sia^mq674M*ۈ/r̲ѓ.>_G 8k\ݷt`&+ 57Q{P@}VG(\raW38M}܈8hޗ OՅ/OhZh:pF8^TֺI*Yx8w_>R߃woLFdph}ćoQ$) 8`EWwR!.7@1)T5xB^ae J]RPaQm}8*,RFVyk*P1P$ AD!T6@gDݮG hTR"jXvو׹$ZgLGMM宻)^9EjB~'*T=T+DT&T{)Lr#@!Y4 TT^K ^z?!T%Q )uP%rZ!{1VHZ^\W|~V(/CgVTKiVH6B%l)YQoB8wT,WGuu^tw5 6*K^I~YJrkLxhTAEՆ J*QRF,ӠG,ݧ$*FpJ5xe嵘 У^-tJ̲àP, Ci}ziE*ъ^+fx5kvU&PUa?f{+^_u^{ۻRQ{  ߅kΧK|Ȯ/2` RP0xP91MG$GutTuSR2pEL8@"" Pim iDp ݊wRsp݊n KG]Hb{ߺSҭ{;!.ARkLM@Wƕ*/_,ہ{xo__J_/(# Ԑ%`91(DsHt%Xsqe,,7*훥[ɍ@Vm %`gj UZSC5 ud.d!ەdqsй|?'9 R08PTH: IqU*w:@I"<j1$G~Љ+nuYIw)D7hWWr6ޯĻ~%MHdLr)D*MN" LI ,hJWM457<8A=Y$dEȝg6!ltK EiF(Sw=(A$wsqpZ^ڱ!Q 6ULoqT}} Gh×jIˡzMg%6W&YL#N$!6;x?8h22kXe3beJ3we}Mz^\!jCI>K^c/(TbxނQh2.o/#;_F[zh4_n dh\Bnܨ˔2A~?_թэ"*X%%U?, G,MݫU|Q̍QD\<ƥ}E`9?NJaD*(40 E\4$:4b|:Ā$y ¼C CnbףC 0} ?@pW1ܨAL h7; fyu_cqu{Wd%!BD ȗA: чAT 0F( y.PWN4>1^x kV7 F,:`R1%-br!fxǺh3C%j3=bno=be@#B&1#MbK92GޱK\H$DȄ\\J$XY' J=% IqY>!~R=zېPW7. 9{ !:Mbn &1~~;MbF&$fk7*Mb7jq{9qT.$fVfILE + I>I['i5ŐP4<](ڡbچF5=uBD@N\ ݈G`ܤ61h3J61Ѹ#`z \]BXW\X%<`iKnʊn% .ٕ?Q9<ȹ+DkSLhEӽ[9HH{9̇Ʉx/[Q rV2S@E 9W/\ݡ!9_+Y{Drքbv\@7GfVV}dA2rGp}9i"#SԳ pn:Gf_#s+z-}d_>2!#}ddžr^s*ǢJH\I^C9WC9B\(A\s@8T:|N@P +.IwƩ4UyR9W~/r.NBro.2k.̺D\9DfCԢQG}kq4ɐ)99}I4hآq\աy{0wő 3};S/p(8|*DZ>8P1q4#`"+&8wF5SCq,98S]6pqoCfގAq{ ǷԿG$8X]t[N8u'8-1)=R q S8@4A@3DD0qqu{8WL'CLa4`Ruis0qz qg7Sq dq>]⠅%oXd88Xx5Ia4/X[!'BqV8HJg9qF̓0P@TE0Έ;`q :Dߨ+aRC0ΈF"hh2c<)8ӄ8unE`q+^Eܛs[AEv>4ݼ?wCZFV)xF2FIoC%h$gʸfD5B^~iB2|OV3n7i:;Ndyz%;p|IL,SpzGכbM4m&yְ/OH"PF/?؞ Q:f 5o|A1\om?1<^/lq~ZF#~z}d;z:L_Oe^Bqr>k'_>hWL[['ZR-ט_s'_tfW{~/֩$ל{!9?>*?zkB1בku^:7Z4V5U>KC׌&(/4yk%͛ `E>1^'ܞx<8u,J/_g&k?)X;//:숅`#ZSZ7rvz][ oeWNŹZÿ34{ˏnL2z 6^۶_}S{c:oÚΓ]^NG X_?~掏m)Qh-\t^㫻Ν\-I}ŝ~E~l ~_y_p+>ºpt#nZ"g{&ÿǝ7M}qPR:믏K֑1ֈYWm1ptËKk8~mǣ0Oendstream endobj 106 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1497 >> stream xUTyPWMR3 l8u D@D(KAx1AE!PPˠhVG3?Njߛ{~}pXRyT% se1ҹ*V/؀t(0e. &,K FQ#av(YHJk֬TظiHKe!U!V*P**WB-G\D!1rqI%˂CdIx2*Z%'9&  qG<O F"vqB bˆD8ҎB%_,76mͮq9?zXcLu93&UΧٳ 1 dU>xmztɌҖ4S\m zZ8)}}JOգ'$b_ۀ}a4 l%hr=Axa-D/:Ym@ ;w[':GR@m}]wjx 9 \ZƟ{`]! c::dHM`pF&-)~a΀ h{SW}lo{WCqt):~r^z!+CPΪ4h=UcG7I=jo@5yxԲ5DihyMnjΉT3wB Tbj[&OOZ骬$HlFp{=\v}%E7YbtJ++cmLiF0}MYx \Yrz%ftĆxfJV2HS|_~M{XX2 @sBV!)66}0IMl2ӂs-v\ E4;N8U]h, {S 3񩒸ݧY8'L 5q wAܖ2UTy1> stream x]O10 b C+qP(/ СY:ߝ|pFcpƲ- DeQV-Ɠ剋BwOϲh> stream xXKG&⒌S8pߏqX^EXKe{emb{'$"Kr%ꞙr (WWj!ro9]o F7_c$cP83'9,,Uΐx^ 1S r*D(=K;n5UjSU no__u_o7~|o@ȓBheY46E1YrCPB2ÌpGfV-#Ƹ˥jN.TLtT\RI(֑8$[f!=h&E0F5@b[@B͉|5bw78q]VPJA-зCF$ D$U0I@ZYQF%W bNkr57j[D1UQZF6ptGskU/.zF@zeXII/gC^Ox!D`LAM :< qS> \eRC7lBJ: L@#0zgTGIiB h842q`cTV:^N-Zc;WuS]p@NQtLȘeTaJ3++ -|hdWZ 4q4]O8QX>>U$w iàgC qA(sUک&Fh% DX]UiΜ]$g?wVIVB&'1eyCЭ[%:ڽ a`5B PmV!/N‹Pf;!`1P.?`*@0,Wp|-ƒ7PbYa ^עMWwh9b†-la|`*i)+O0-ᨖLc09yy8 F7/ puU'f%,~{Γ~$k$"٭Ϙ<[$4ib:zy@mX)3Swvc*ҭmL A5"g `3'T{0;Dkgqap0ڨНOQa};:t4Y75dhqH%cG\s@▁7K5PEw*Ԇd mv*X%Iw?Ƒ/5wq;,e/a[?> /Filter /FlateDecode /Height 128 /Subtype /Image /Width 128 /Length 5293 >> stream xuO7܍+AE%(.5j\!QUt}79[=мr>f];:gXћљQl6c>nlmmE;ݰ/w_OG矃XPv [s^+ '4}jQY jXl5+zԌB)cT_0$ZB &4_=oU/] Οd,":82.t!:)6__50̝*` W-FD {FB/Reu ʂ۩&,pƪb5?uZǩ duj?;0k$zH}'7)hO?-``)4k© Y Ѵ|d *^20`+U @ h9q%9/!4 CĀv`j~&EXT:G\Q{@;8r @]VZ ֠bX&& BIIz}9wDmI;1yUz`4D_o@4^7zA dꀎyVP5>"~pfFS5/Z L)&bMB0\PWKgOBS}DNbt7#z2ER+P>sQk. bP D}@WիW49;.=@P')Ͱ 42UVrTߑEb&GG P?*4'E|MEIbAD`c# ?conQ.]B@ "(Z[nS 7 w}F@ŋ]g3 >0T~ mpH( *; D *(Z fO?|DXGӸ[ ]"GeEP2% USR>cEKi.Ǫo1w'@B4 ;}FPǵ_\WX&ByS I\6>݂qy :"Gy"%pGsYxMSF P~p)QsRbSO]xwARiz-&*@D3p!$htB:sCgjYۿB@ox4CnȀJ}K|ЍÔj6Ό(Vdžӟ+w#:6( >%Q`kalO't 0J3U}`kIڊN"\!~;R?ՀMCO6@K$+oVaMhƍ6OP!$&Tx_/~P /P& IJ(5K{nV0/O*mcd"}\ia !ˀ MwqV[uR||`7c`7 nDc`Dc`S ko$׭P f"Ł{jc7B_Oُ_KGPPP}hD {l|oY7^{ ρMТgԍN 4Kb!v#^y\o5wI =fgI V1{Ic9uT 'P> 6zv.aFcZh}"e_ Pbk8 t:"TH.pjx(/Rm=Àzra^}BqyT28T~I1QQNO1D+9Fg[Uǵ` nD0@Qz ~RzqE߃?xǪJ!CG 'y߽3:;z.ⲿYG0PQ}T\U5.D?ꞿl&tDa ʽ+H!@a#tL6wє^nI NV)'_؄4G"!D$6~i-?] zy7xPq 2hz?+k!K1\Y`8 (ztU2]mAQe`4E0h@#-kFW^` +z4 ZUmZD9Jo* Q`,dY *HtEց Yi2GnKPPF(ëj&: ##z?wn D+v VWNP'I_'xHGӑ_9[#v'xvm'}<m.mwCQC?4>u#P-fST8z= &,|S(_ ؕk]wE`c#kFjxwV;zFg@3R>LL?~G*l ufD>Vׯ EDp1wT}aH}:/,#>`轇@TW=pgO 0l`9,xPwb6|k׬O?m(J ?~@F}0gY;{ @珵 ?<a|0qH}͟ᣯ{KڿK^$Sܑw,1|v.>۬&ny=o1h#AS}t}g\.?;~`%O<ңVwɣ?Xw._u{\u?~< ;qХۯ?RǦ}0F x7K{k9G[qɣ3G{oRU@s } ,N↏P ԩSBzǠi)  k;ac̩VW_r'?@KhGo_6=`_ӵf7RW>Z' >:d9}4+7>ń ܏qתY ދW鑁qͅ_K4|O8ۿN\^ڿGc^LR%˙3g"#ݩt@U\ % i÷jS3;N}lWԓDDG 9{Ӕ]L@b^6ܢ^G@3V}=5s(k®K܁7/W\siM3G"7p  4{oW[ Ο?b[Tz `!nB+~mHF|ť"6P.&u`6%6~k}.\б{v8P(l]uvn͟_ŏKDӋ/D׋YtAˉ55U_7OmV m;l|D\K.aF ̀:fnrWtZUvG'ΑM/_=d* ]i\FDԁ) 0EPlx$DWwLu5Tz*Ea~E^DDנ@{>nM\ > stream xZKovKpNrfðY~=ÈHK.E.r) 9_C.n8_uLwΒR" `lY]]WU|ް7σC_͞Ϝ1vv4iIE:['Oڳ9c.r=2uAsp?_C7-0Yec7Z:EM3vFf!;Ǹ=<ώHKH7\I7Lc`+ŭMsw3^%t  m,IGG85afaxǥ{dgDםtڝ{O=lkh#lˆq>_製Ge5gWi ~Z]^5g6?Ώm/w??߿ofk̄w9&DYkgE* p;.JIj8۫B1-P:dBJ~WPu,&*!dx!&*!PBpe4Q ifyg aJ7B n qyEE"@Nk41| =o1`µ2a~PŰ5ܷ(Dp1~!B'G'[@xC`c1;o@I QAzq?ٞy{L0V*%w))@DqST)4.J*iQHzyx*@JT 'Ř0!(&11 IUMrEe<ȜM<r0/̝]]R"1s+?it@zuKqt ]] Ɣk!UZڃ}(׫9IѾ .lΤq ;C ]q4v]CZX:E{o^9$i71}FWtq7cuN(9%lFNT*ImڎwܾP"L.PA z~IJzı #kZh1Ѿw~e%S^Icu،3-U{30!2n ڃCY<9USa!0Wm>[Y|5-5C5} M Vj E:_G4r%ݳ 8Op\\ .#1e !x+N88#~Bj'Kcf:`"D8ȷy:6gslS/"%' -"v Rg0C#ij+^p"Xme?Pߢ;Du)'u$-S'> oHYfI%!YKΌ,!LH UMGL{|4aEm?L\ ODyVki&Dڮ k"%->kV^ aʖ񸁉Gg[˸1A^*ñ hDNYT"OI (2g^(F^Faca:~+xhGt:eưq`c Ӆq餙uG}1ZWۑ S G6wmT1ͽ2JpF AӻsDӞ^l_e_䚠jJZ,3e@,j6kɺSE+ޱ,bKbs#|$\Da_amВêΛ:Ցir ċ(L{Y␍&i&bwq'dU,F#]N #}aA0y &3zG]iFbg% %68i$s"UwST@}5 ΊO٤㕐k5€vŸ;%G#Bf|2"3\P׬%?ͶOՌD%LL:ySa[J׆QͅE0]绞gFkq[cɷY8xdT_9_E^invE{Eo@_?eZmԥS[mDX^d\@66oFC9NXo&7Uiс,'t2$gLb C_T]Sni5IVhRlv]*ѾjxpCMQʨ Cz9.Bɘvps w'qE b|<ܤM kyHwMqgy՜WR[_>ӝ뺧~ _pS6c&?lTH†hH[Ùlx 6%"왬&V k|յǮ5{m2w9ܴ׸`SM׀-T§Qq7wZwPM% Ćg]X9UWڒ !yd(Z;ƃٿsYUendstream endobj 111 0 obj << /Filter /FlateDecode /Length 162 >> stream x]O10 0 0~ 8@0%!tpw'0^Gk"GpXhu[@fcA剋7OH.O!.y%:EHAڙXWU}uȪ?&]mgM*)J747I3 B}Svendstream endobj 112 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceRGB /DecodeParms << /Colors 3 /Columns 128 /Predictor 15 >> /Filter /FlateDecode /Height 128 /Subtype /Image /Width 128 /Length 4764 >> stream xEŻ71泈""QxW  jB4Wfͬ9uzvcXzggί.l믥Zk1M:O<ߚqی_ ~z1-O30`}2 *ATHyJPmҭ?4e$q mKPhX7~8W'h]^e@ 72o*c@+<0tLy :h'VIV@8=3(=1 ` uvh7a 4?]8Ge"ңdbPS2 `74.W4RjO :x8&L rNm( A7L úJP|֭[4D9-X++6@[ "e oܸ{[' P8]ö6s 7B ^$A ! ic `6@y8HD^~] ͨu`Nh*S )p5gDV2(H}S_m%(J%2Zwo hs[" Nn.XW6tQ>@2%`_D#d[W (qr\;y+u?b?Av^(€*bɡO+DyȀv(֜ h .{UdP=YFE'~L@c`s.[qTU_ ￟` k;ؚeK 0;w! (?+AMu 2{"k *+o6^"a M̥I n2IPE]O2_  tl%N;eE>/Bd0 [ևIBB *S ]/_[һǠ,}H4H<4||V҃=- CEi ;wţ@6W%hk|!:&#wޡPtt vS3O"O^0~.#&cPXr!l%^݊Q2"׷z{" b{UYbq~A Cpo+iy@&D* U~V?o+8 M]{ak+GUb0Aa$@J&0 I1^u\7" [v#Gf]Qĸq\_{5QX %(AM8Hpr$0 ` ? :#e8 AbTzneST2wZm1 CS&YfE tc W=lji7+yѺ@PFzeufԏ׼>5 }B0dp jGGI*vP1%0QNaɫ?wm/Z#CSG$(3MB/#Yh λyzE\2Ec+0%AڍZDB뿫 VH2*b?fڙ)8'W/A@@lGQ#1/HfDg˗ōs;Bid+togn?$=*%ɿ Ωm?C.EfP0S Ou?ruB#eЭBcj~;9" ttwB Y6n TT?B$ߺj" *ZidM̀Jn:`/6 &>(6-Lk>^v\ L -ꨟu yU{Z8%hEmXԫ?=7"ѓ0{ G= ˕ DNGa՗ΤbGY;O 𧩰:#[[SJA[I2G bH>PeیΒI+rkY7I 0c'] Oe\,~rChm Ż=vnjw`uS%d݅t·%jI Plq(wxA<*jH6 A3~q֍wnV}!=bꎧEDN1,.o?΍"$R_3#0i028((9vİڣ?АBD>~:tUo=B4&R_`(A@"۰o)R_dyJGp6*{f(ȃI?`ƚuG䭊¿;hξQh$GdU$d`Juʠ 8ap# |%Όa:#6TYz + k~a ܰȀ.TJ0  1F\%aXdچmJ&tcyԽ7QIDY> j*H^U!)ja#tCw1|6 h,oyLLрvҫ?mь.p੧aP)[). 2 q{cߪ?n'6;3u'.ip p q 5͑v47>g_3΂'x@(k7ۤ8!ϴ$A^}ZQ6 'vG+H}#Ǣr $S|h 1tir1}4"n/Tبo[T8d< ?1g4t!v.pg6G5 CB{hvA*z;cxgX X(+S܎D]:I&%oSO,>6݃}Ey>endstream endobj 113 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceRGB /DecodeParms << /Colors 3 /Columns 128 /Predictor 15 >> /Filter /FlateDecode /Height 128 /Subtype /Image /Width 128 /Length 5049 >> stream xŜ=E_1`Yʲ, aHH,@l{=uw`T3S]u{zjZ)ezmȴ>x`zǴYkNO#lθpQ{tm:x8}ܭ+Z]}ڎmfO:M.}wvP&1ocZSj$]R9&k[>G&c_mn I٦>eژyȠS9EƁ 6LSfk݋b՟:S""g@4i +2f!"[~ cN?2XaS k P!]SPu?u- ̋3e`9yI5TSKH"+)f(t>KwAu8:ր*SPԺ2+Ӯ}O&Ym ՘ +R՗3AW`\t3H?vրn ]1(2?2Ov(эYȮpw@?Qqw%WʼXT._5ߕ_w[FuAtk{*=Htg6@^}`MU`jHA]p@~Wt8up&pN.h4bs+aէG@@@N"] r(f#$`@ayEt#PM+E۱w8JA7$dM+2 a&+vD8g*@c2?GD*XQ<a"8AmV_&.Z^&p Z w/F/tQڕ0f]|%k@Ġ᩵ܕ) 1 D#o.Gv(ԅ@ D ~DO_(xxBNz k`|8@ߧGZFKA (>Q ?(.ǜ!X44{ˆ QݑA$kCtw,L'-Y3e@a-DJ(yg[l" -lD *`spETjoF}|J/sݻM;"|)h{(#g6?QopY( E) BQ=JT?o25 "8$0}B]ݻGgݺZ_@p'+eLj@A4Z_$jْFqrh(w8Qr;,4|GO^RQ_Zu[i:Bȥ ] fUPmE_8/U(DN}Jh&~4:pnyϭQZ jX7p-VJ"*4EgOAV>3zPᒘ./YՀ L9)zIA~hZX)D[U1ި؏[]'#q&)Tw'|&|"y*f)*G;2>q޺&Z6u @J܏R Gd>hQ.Ha@͕:1A?D G@#'z ʲMG`ԠS+`Rԩ|AE[?b5eR >e۠ӉH0@`ލ`ؕ7]82Ha5dPܹcuG+ (rvWhZ{dg!aw;9&"!,MDIoOGV[otN>9)#aZ @1XWXA7o:SD ֥#:OA&ȸ!c޸q6 f!P0ĕ  .+u~}5cEUaj3Cb畠^v _E JG.!d`Y*huYP2>W^3ZP<@]%`Q,ˑQ @.,%$D@P." I{W\[A3ґv@Uc]y+\|9:B2X/X"4!ҥK(coOnr&hqsmu&8#IkeE%A2mL _}PM)^,v+he} 2M1)2d$Š@%zER7tDd2gV3Cp 2&W \?zhvȤ'z饗]cnw. fm40ui t(%mz[҈AT4|`n_! gݠM086B~e^q / `VR]D[ ZT8?'CAWW<*JO? RR=z1Q'`[Dw1>q>;> i} @Eq!OP)+.&J"];gs %NGJ 0~RLPRQkU~( kS* ];yRuHu_ `f맞zj%)}긜.0X7'Yv z @O[NRZ \c)$,H_ f'x'ŌmgVe@wڀvUF&(8Ae8@7jdEwpɻ%`t+"2 l ՉȽk JgNXH">e^tG1! HY+wyeb r'b|:\xj!!py{&HzHnԁqHVXJ9… G+UE ٓ"L4|f=/:T-_EE}: ;IA=ꫯ wٳLU $Utӕ["4].$1hQwc.;;!:6D@ilE/p^0Bwa !Z=)ep|| y`dĩ_HEHP ]yQCZ\3h5H4nX$ m Dwe]=R k7,.N@_ݙRГ| ~:^P!S;6T? J*N!! HqF>+H1 p(h]v_GqU4m?vZza @~{|;OA7o\tI鑁IDRAs|M;]ݜpT?€Mc>{i@էx?@& _tq\nBEwPq"D K"1 }v$=u,VnS)`e* xκ؏t8|w$#(dfD3xwo2t 0T~& B}T)_5y~-pCDwG}jJS͒(sDB5g?u-LwQ#自 5lR}f{G:x_,AYu^2&IણG  0J^S pѢGAu ˸ 6ׁ !eM;]ϲ/P0z2J|@e`X`WdgNc[Vmǧb`*A$]87AOcELu&1=Yȭ_,ALT}4;UlP;"M"nIn1(ึ QF.31Ӆ~.̕ KAv4 ZB"n> Uyh4*w´u1OAb$?ԏ>;,7b@S9 [92@vJ2OfJuJ6D5`1C5ȭ'ƎP? ;P}s`rU̝KDt͇N}P>eP/)mS3:$WC4"]CI9իW;i gqu~;h`'#DgnE 6hI]>pş8>>dk} %qZ#: #: (7߈" +S9R@yZ*>5I]*/_tדg-)"и Gs/žS`cNP. P+Uր >wͼ@沐=2/!0߂,6B~v1Q'x1)h_~QD rոۙDy9N1 &[?fMendstream endobj 114 0 obj << /Filter /FlateDecode /Length 5056 >> stream x\rǑ}v{"8_Fɔe[" Y @). ꮬj Pլu6c>8>O6[5ڜ] ~W& gf@ZgF+p NGg|v$J&)g%^_a8ũ@܏ZaB^h>TTixrh>\3ݯa0*ϡD`Li9e;<-*i-`൐N@,tvPG7vYvK)ҰlaFu΃yPOq&[pRefmt3O +z gPM 2Uvy&S⥓l0͇O( +=Ju96 uliS z ڎޥ02ϡ# Fu4f&y(N.N6& ?M _t"·pD foT x:>ªh [\>G8i ~~M[:jZI\X:)t҆r2 o-HbUG7ۤ3dcaoٔS,Y%_`߁юOH`X@T}[w>7%.c!Q+9"ح1nAO7:i?1'd=#n  29Ff ~ĸ"+y܇:FX牫mmh 12x]337Az٢9ΈslI Tg˷5:aR5"ku#q:! |/3D{ nY9 iǴqY)Y98wVAQe?ym݈Vr /!5kpؑ]WX8;!7_*&P G1`L.w-@.len{Y2˒ 7U9ifM ^i8e8B?86wl4%8K8ϼB $`5e5?1}*N(@H[ +7<X ItD2ЉVj&ԸB9/ !DTVFmO\IW /㕉I!E|s?UAZ ]?Ti(R6zNPRXz\U?Frh?j*~aȤV ?R?Uh|CJ'k'k(MW'J >1S 9ޝOnm{go=m g=bq` %V>}rKdK!,amQ3h`~3d&g%Xŋo2-i+#u2M]W֧i+^xrb{8Y0u ڴk&iLP2z/x:f2eDϝf>ĈD=XJ=X-sđ Q̑8& CZ vKU2JN56+Ui"7<ݔ+l\ħUvy92[HVF{>? Q]|_bj Ba|)QGpCD{LQvKP#hH-@ɷL :B(}jGJ@@@3 B cEX# ؔ/7Dwlhv8ͷI&L$G(.Ul`naPp|Q8gQƬo'W(V-9QND,edM*4V*MexB`'bt0~ѝW "t+d8.ѧ B즻uw{d->{$`X}m(`,a:})b\Tmpc!4;% >=O1̧z]%bݵd/6G!yjT#Ngx@@g:J4Go6ǁ +]T)ljzn2}p Fd*x;;yP@""ʀ)b#wrtßPC:NysGo{R;[Ϡ 5INyVn7/h RI.f}*ʅ3?2,27x9+TOZq~i') %]Oew=|c'ۿJa/_Վw݅v(g\j9O0|n3-]>EV&c0B6wUfvH%IcU#Q"'jpV@Pj@NdEHd2.վAC68mpG)8`q>w$-A{gB魱h-5;S(Lk/l$S B;»b*\)մ h{k;gX`6^ ]=:b8|sX&v Uv%3Y\FFkY %v*Q,$꼈 ;DcLך rmLAo"Jc!hFۍKp0ktieu@Q*_D_<3r٤/^Lrg1pR3gE^K)d"-!Y* !k{qqƘE7NÚ΃,N>譇áu@9YLV&_Ρ0Z|| 1"J4^1zpu˩ ءᩩ"wŊ&LlgmAhIQ\T& ~QEmN*S[h]iL& 蚮py\v4)Jeu;q{>RކP8-iI~#85G IŢCDk.kK&=X7cD)NiʼnG#h/y}qR6fl\mA c$= WāÂHCx,3`KBnGb1$UW"G_V"P܌kb$;*?v8W~.l247Sr.=χYG^/91 :ʭ±C 8ֲj[JdĊǼHlO*f)vm?Qև~Α 57A" >T-9Kb''ok0Zn)3zsxז@wM,oJ׳+=ЇGZũR O~[ahR} #2hqn%'Z |}&C  L0 $i?Um,6):_ #^jlUiL,3}#8ɾ7m[j~ tZt)}-?0;uC<~U|+%#8FŐg?|Hendstream endobj 115 0 obj << /Filter /FlateDecode /Length 161 >> stream x]O10 . 2D! }I.|D/J`H_#490Lu@vw=|*ՔC -A#EN?p6MWP_.Xyp8I.~r 6/ES{endstream endobj 116 0 obj << /Filter /FlateDecode /Length 1437 >> stream xWMs7 jC_1I;MSer؎Nl94jUvX{?.@K!eg23غA7NrfPdɺ(L-\>s]?LDf{ oM洞 5N2<_ 8μZCkτ.w̓^?gz^3ZƃiQ4m kIY"thq,cPpL˩s{jl@>9f)?WQX81*(ᴝ]傰YYnku HD߮}kSH!v}@V\f4cHs4!Gs2,I|fÜxHv ^O?oTxWXYXmCPyH TQx E`_sc05P<ʯek%;|Rё1Z䔘*%kx]0䃅 I0Uc@U^aoK@FzyӄrߟOq`i  :Yt }j{`;Y81yf Kgt7ܺ{^18zDւJ2PF(\ p`h38 QEyB{6ͲKWL%:> /W [ 1 3 1 ] /Info 3 0 R /Root 2 0 R /Size 118 /ID [<787acc93186adca5ea66abe4ccb4547a>] >> stream xcb&F~0 $8JC?ﶍ@6[3(@$J"H Rd\ "A$I}"H`4s0 lKoIFl:?d3Ag7b ,vp"\lg endstream endobj startxref 121654 %%EOF spatstat/inst/doc/spatstatlocalsize.txt0000644000176200001440000000013713166361072020157 0ustar liggesusersdate version nhelpfiles nobjects ndatasets Rlines srclines "2017-03-30" "3.5-6" 21 85 0 4677 0 spatstat/inst/doc/shapefiles.Rnw0000755000176200001440000004360113123356342016463 0ustar liggesusers\documentclass[twoside,11pt]{article} % \VignetteIndexEntry{Handling shapefiles in the spatstat package} \SweaveOpts{eps=TRUE} <>= options(SweaveHooks=list(fig=function() par(mar=c(1,1,1,1)))) @ \usepackage{graphicx} \usepackage[colorlinks=true,urlcolor=blue]{hyperref} \usepackage{color} \usepackage{anysize} \marginsize{2cm}{2cm}{2cm}{2cm} \newcommand{\pkg}[1]{\texttt{#1}} \newcommand{\bold}[1]{{\textbf {#1}}} \newcommand{\R}{{\sf R}} \begin{document} %\bibliographystyle{plain} \thispagestyle{empty} <>= library(spatstat) options(useFancyQuotes=FALSE) sdate <- read.dcf(file = system.file("DESCRIPTION", package = "spatstat"), fields = "Date") sversion <- read.dcf(file = system.file("DESCRIPTION", package = "spatstat"), fields = "Version") @ \title{Handling shapefiles in the \texttt{spatstat} package} \author{Adrian Baddeley, Rolf Turner and Ege Rubak} \date{ \Sexpr{sdate} \\ \pkg{spatstat} version \texttt{\Sexpr{sversion}} } \maketitle This vignette explains how to read data into the \pkg{spatstat} package from files in the popular `shapefile' format. This vignette is part of the documentation included in \pkg{spatstat} version \texttt{\Sexpr{sversion}}. The information applies to \pkg{spatstat} versions \texttt{1.36-0} and above. \section{Shapefiles} A shapefile represents a list of spatial objects --- a list of points, a list of lines, or a list of polygonal regions --- and each object in the list may have additional variables attached to it. A dataset stored in shapefile format is actually stored in a collection of text files, for example \begin{verbatim} mydata.shp mydata.prj mydata.sbn mydata.dbf \end{verbatim} which all have the same base name \texttt{mydata} but different file extensions. To refer to this collection you will always use the filename with the extension \texttt{shp}, for example \texttt{mydata.shp}. \section{Helper packages} \label{S:helpers} We'll use two other packages% \footnote{In previous versions of \pkg{spatstat}, the package \pkg{gpclib} was also needed for some tasks. This is no longer required.} to handle shapefile data. The \pkg{maptools} package is designed specifically for handling file formats for spatial data. It contains facilities for reading and writing files in shapefile format. The \pkg{sp} package supports a standard set of spatial data types in \R. These standard data types can be handled by many other packages, so it is useful to convert your spatial data into one of the data types supported by \pkg{sp}. \section{How to read shapefiles into \pkg{spatstat}} To read shapefile data into \pkg{spatstat}, you follow two steps: \begin{enumerate} \item using the facilities of \pkg{maptools}, read the shapefiles and store the data in one of the standard formats supported by \pkg{sp}. \item convert the \pkg{sp} data type into one of the data types supported by \pkg{spatstat}. \end{enumerate} \subsection{Read shapefiles using \pkg{maptools}} Here's how to read shapefile data. \begin{enumerate} \item ensure that the package \pkg{maptools} is installed. You will need version \texttt{0.7-16} or later. \item start R and load the package: <>= library(maptools) @ \item read the shapefile into an object in the \pkg{sp} package using \texttt{readShapeSpatial}, for example <>= x <- readShapeSpatial("mydata.shp") @ \item To find out what kind of spatial objects are represented by the dataset, inspect its class: <>= class(x) @ The class may be either \texttt{SpatialPoints} indicating a point pattern, \texttt{SpatialLines} indicating a list of polygonal lines, or \texttt{SpatialPolygons} indicating a list of polygons. It may also be \texttt{SpatialPointsDataFrame}, \texttt{SpatialLinesDataFrame} or \texttt{SpatialPolygonsDataFrame} indicating that, in addition to the spatial objects, there is a data frame of additional variables. The classes \texttt{SpatialPixelsDataFrame} and \texttt{SpatialGridDataFrame} represent pixel image data. \end{enumerate} Here are some examples, using the example shapefiles supplied in the \pkg{maptools} package itself. % fake data because we don't want spatstat to depend on maptools <>= baltim <- columbus <- fylk <- list() class(baltim) <- "SpatialPointsDataFrame" class(columbus) <- "SpatialPolygonsDataFrame" class(fylk) <- "SpatialLinesDataFrame" @ <>= setwd(system.file("shapes", package="maptools")) baltim <- readShapeSpatial("baltim.shp") columbus <- readShapeSpatial("columbus.shp") fylk <- readShapeSpatial("fylk-val.shp") @ <<>>= class(baltim) class(columbus) class(fylk) @ \subsection{Convert data to \pkg{spatstat} format} To convert the dataset to an object in the \pkg{spatstat} package, the procedure depends on the type of data, as explained below. Both packages \pkg{maptools} and \pkg{spatstat} must be loaded in order to convert the data. \subsubsection{Objects of class \texttt{SpatialPoints}} An object \texttt{x} of class \texttt{SpatialPoints} represents a spatial point pattern. Use \verb!as(x, "ppp")! or \texttt{as.ppp(x)} to convert it to a spatial point pattern in \pkg{spatstat}. (The conversion is performed by \texttt{as.ppp.SpatialPoints}, a function in \pkg{maptools}.) The window for the point pattern will be taken from the bounding box of the points. You will probably wish to change this window, usually by taking another dataset to provide the window information. Use \verb![.ppp! to change the window: if \texttt{X} is a point pattern object of class \verb!"ppp"! and \texttt{W} is a window object of class \verb!"owin"!, type <>= X <- X[W] @ \subsubsection{Objects of class \texttt{SpatialPointsDataFrame }} An object \texttt{x} of class \texttt{SpatialPointsDataFrame} represents a pattern of points with additional variables (`marks') attached to each point. It includes an object of class \texttt{SpatialPoints} giving the point locations, and a data frame containing the additional variables attached to the points. Use \verb!as(x, "ppp")! or \texttt{as.ppp(x)} to convert an object \texttt{x} of class \texttt{SpatialPointsDataFrame} to a spatial point pattern in \pkg{spatstat}. In this conversion, the data frame of additional variables in \texttt{x} will become the \texttt{marks} of the point pattern \texttt{z}. <>= y <- as(x, "ppp") @ (The conversion is performed by \texttt{as.ppp.SpatialPointsDataFrame}, a function in \pkg{maptools}.) Before the conversion you can extract the data frame of auxiliary data by \verb!df <- x@data! or \verb!df <- slot(x, "data")!. After the conversion you can extract these data by \verb!df <- marks(y)!. For example: <>= balt <- as(baltim, "ppp") bdata <- slot(baltim, "data") @ \subsubsection{Objects of class \texttt{SpatialLines}} \label{spatiallines.2.psp} A ``line segment'' is the straight line between two points in the plane. In the \pkg{spatstat} package, an object of class \texttt{psp} (``planar segment pattern'') represents a pattern of line segments, which may or may not be connected to each other (like matches which have fallen at random on the ground). In the \pkg{sp} package, an object of class \texttt{SpatialLines} represents a \textbf{list of lists} of \textbf{connected curves}, each curve consisting of a sequence of straight line segments that are joined together (like several pieces of a broken bicycle chain.) So these two data types do not correspond exactly. The list-of-lists hierarchy in a \texttt{SpatialLines} object is useful when representing internal divisions in a country. For example, if \texttt{USA} is an object of class \texttt{SpatialLines} representing the borders of the United States of America, then \verb!USA@lines! might be a list of length 52, with \verb!USA@lines[[i]]! representing the borders of the \texttt{i}-th State. The borders of each State consist of several different curved lines. Thus \verb!USA@lines[[i]]@Lines[[j]]! would represent the \texttt{j}th piece of the boundary of the \texttt{i}-th State. If \texttt{x} is an object of class \texttt{SpatialLines}, there are several things that you might want to do: \begin{enumerate} \item collect together all the line segments (all the segments that make up all the connected curves) and store them as a single object of class \texttt{psp}. \begin{quote} To do this, use \verb!as(x, "psp")! or \texttt{as.psp(x)} to convert it to a spatial line segment pattern. \end{quote} \item convert each connected curve to an object of class \texttt{psp}, keeping different connected curves separate. To do this, type something like the following: <>= out <- lapply(x@lines, function(z) { lapply(z@Lines, as.psp) }) @ The result will be a \textbf{list of lists} of objects of class \texttt{psp}. Each one of these objects represents a connected curve, although the \pkg{spatstat} package does not know that. The list structure will reflect the list structure of the original \texttt{SpatialLines} object \texttt{x}. If that's not what you want, then use \verb!curvelist <- do.call("c", out)! or <>= curvegroup <- lapply(out, function(z) { do.call("superimpose", z)}) @ to collapse the list-of-lists-of-\texttt{psp}'s into a list-of-\texttt{psp}'s. In the first case, \texttt{curvelist[[i]]} is a \texttt{psp} object representing the \texttt{i}-th connected curve. In the second case, \texttt{curvegroup[[i]]} is a \texttt{psp} object containing all the line segments in the \texttt{i}-th group of connected curves (for example the \texttt{i}-th State in the \texttt{USA} example). \end{enumerate} The window for the spatial line segment pattern can be specified as an argument \texttt{window} to the function \texttt{as.psp}. (The conversion is performed by \texttt{as.psp.SpatialLines} or \texttt{as.psp.Lines}, which are functions in \pkg{maptools}.) \subsubsection{Objects of class \texttt{SpatialLinesDataFrame}} An object \texttt{x} of class \texttt{SpatialLinesDataFrame} is a \texttt{SpatialLines} object with additional data. The additional data is stored as a data frame \verb!x@data! with one row for each entry in \verb!x@lines!, that is, one row for each group of connected curves. In the \pkg{spatstat} package, an object of class \texttt{psp} (representing a collection of line segments) may have a data frame of marks. Note that each \emph{line segment} in a \texttt{psp} object may have different mark values. If \texttt{x} is an object of class \texttt{SpatialLinesDataFrame}, there are two things that you might want to do: \begin{enumerate} \item collect together all the line segments that make up all the connected lines, and store them as a single object of class \texttt{psp}. \begin{quote} To do this, use \verb!as(x, "psp")! or \texttt{as.psp(x)} to convert it to a marked spatial line segment pattern. \end{quote} \item keep each connected curve separate, and convert each connected curve to an object of class \texttt{psp}. To do this, type something like the following: <>= out <- lapply(x@lines, function(z) { lapply(z@Lines, as.psp) }) dat <- x@data for(i in seq(nrow(dat))) out[[i]] <- lapply(out[[i]], "marks<-", value=dat[i, , drop=FALSE]) @ The result is a list-of-lists-of-\texttt{psp}'s. See the previous subsection for explanation on how to change this using \texttt{c()} or \texttt{superimposePSP}. \end{enumerate} In either case, the mark variables attached to a particular \emph{group of connected lines} in the \texttt{SpatialLinesDataFrame} object, will be duplicated and attached to each \emph{line segment} in the resulting \texttt{psp} object. \subsubsection{Objects of class \texttt{SpatialPolygons}} First, so that we don't go completely crazy, let's introduce some terminology. A \emph{polygon} is a closed curve that is composed of straight line segments. You can draw a polygon without lifting your pen from the paper. \setkeys{Gin}{width=0.4\textwidth} \begin{center} <>= data(chorley) plot(as.owin(chorley), lwd=3, main="polygon") @ \end{center} A \emph{polygonal region} is a region in space whose boundary is composed of straight line segments. A polygonal region may consist of several unconnected pieces, and each piece may have holes. The boundary of a polygonal region consists of one or more polygons. To draw the boundary of a polygonal region, you may need to lift and drop the pen several times. \setkeys{Gin}{width=0.4\textwidth} \begin{center} <>= data(demopat) plot(as.owin(demopat), col="blue", main="polygonal region") @ \end{center} An object of class \texttt{owin} in \pkg{spatstat} represents a polygonal region. It is a region of space that is delimited by boundaries made of lines. An object \texttt{x} of class \texttt{SpatialPolygons} represents a \textbf{list of polygonal regions}. For example, a single object of class \texttt{SpatialPolygons} could store information about every State in the United States of America (or the United States of Malaysia). Each State would be a separate polygonal region (and it might contain holes such as lakes). There are two things that you might want to do with an object of class \texttt{SpatialPolygons}: \begin{enumerate} \item combine all the polygonal regions together into a single polygonal region, and convert this to a single object of class \texttt{owin}. \begin{quote} For example, you could combine all the States of the USA together and obtain a single object that represents the territory of the USA. To do this, use \verb!as(x, "owin")! or \texttt{as.owin(x)}. The result is a single window (object of class \texttt{"owin"}) in the \pkg{spatstat} package. \end{quote} \item keep the different polygonal regions separate; convert each one of the polygonal regions to an object of class \texttt{owin}. \begin{quote} For example, you could keep the States of the USA separate, and convert each State to an object of class \texttt{owin}. \end{quote} To do this, type the following: <>= regions <- slot(x, "polygons") regions <- lapply(regions, function(x) { SpatialPolygons(list(x)) }) windows <- lapply(regions, as.owin) @ The result is a list of objects of class \texttt{owin}. Often it would make sense to convert this to a tessellation object, by typing <>= te <- tess(tiles=windows) @ \end{enumerate} {\bf The following is different from what happened in previous versions of \pkg{spatstat}} (prior to version \texttt{1.36-0}.) During the conversion process, the geometry of the polygons will be automatically ``repaired'' if needed. Polygon data from shapefiles often contain geometrical inconsistencies such as self-intersecting boundaries and overlapping pieces. For example, these can arise from small errors in curve-tracing. Geometrical inconsistencies are tolerated in an object of class \texttt{SpatialPolygons} which is a list of lists of polygonal curves. However, they are not tolerated in an object of class \texttt{owin}, because an \texttt{owin} must specify a well-defined region of space. These data inconsistencies must be repaired to prevent technical problems. \pkg{Spatstat} uses polygon-clipping code to automatically convert polygonal lines into valid polygon boundaries. The repair process changes the number of vertices in each polygon, and the number of polygons (if you chose option 1). To disable the repair process, set \texttt{spatstat.options(fixpolygons=FALSE)}. \subsubsection{Objects of class \texttt{SpatialPolygonsDataFrame}} What a mouthful! An object \texttt{x} of class \texttt{SpatialPolygonsDataFrame} represents a list of polygonal regions, with additional variables attached to each region. It includes an object of class \texttt{SpatialPolygons} giving the spatial regions, and a data frame containing the additional variables attached to the regions. The regions are extracted by <>= y <- as(x, "SpatialPolygons") @ and you then proceed as above to convert the curves to \pkg{spatstat} format. The data frame of auxiliary data is extracted by \verb!df <- x@data! or \verb!df <- slot(x, "data")!. For example: <>= cp <- as(columbus, "SpatialPolygons") cregions <- slot(cp, "polygons") cregions <- lapply(cregions, function(x) { SpatialPolygons(list(x)) }) cwindows <- lapply(cregions, as.owin) @ There is currently no facility in \pkg{spatstat} for attaching marks to an \texttt{owin} object directly. However, \pkg{spatstat} supports objects called \textbf{hyperframes}, which are like data frames except that the entries can be any type of object. Thus we can represent the \texttt{columbus} data in \pkg{spatstat} as follows: <>= ch <- hyperframe(window=cwindows) ch <- cbind.hyperframe(ch, columbus@data) @ Then \texttt{ch} is a hyperframe containing a column of \texttt{owin} objects followed by the columns of auxiliary data. \subsubsection{Objects of class \texttt{SpatialGridDataFrame} and \texttt{SpatialPixelsDataFrame}} An object \texttt{x} of class \texttt{SpatialGridDataFrame} represents a pixel image on a rectangular grid. It includes a \texttt{SpatialGrid} object \texttt{slot(x, "grid")} defining the full rectangular grid of pixels, and a data frame \texttt{slot(x, "data")} containing the pixel values (which may include \texttt{NA} values). The command \texttt{as(x, "im")} converts \texttt{x} to a pixel image of class \texttt{"im"}, taking the pixel values from the \emph{first column} of the data frame. If the data frame has multiple columns, these have to be converted to separate pixel images in \pkg{spatstat}. For example <>= y <- as(x, "im") ylist <- lapply(slot(x, "data"), function(z, y) { y[,] <- z; y }, y=y) @ An object \texttt{x} of class \texttt{SpatialPixelsDataFrame} represents a \emph{subset} of a pixel image. To convert this to a \pkg{spatstat} object, it should first be converted to a \texttt{SpatialGridDataFrame} by \texttt{as(x, "SpatialGridDataFrame")}, then handled as described above. \end{document} spatstat/inst/doc/datasets.pdf0000644000176200001440001625224613166361176016176 0ustar liggesusers%PDF-1.5 % 17 0 obj << /Length 1690 /Filter /FlateDecode >> stream xڽXIoFW95K$AQEeKD}pi',g{}oW߽fareMAe/2Tzg:5IU{OwJN6yr] ?trӿVlB"8eb컩^!Z|gF!X<4P]?Dې1.Kia +ΒUZVjp7V]kμH > o8+ -=bjUP:]j dԳܛ9ZTYn` c 6_,QE&7IݦK5xqܶ(Wnx̸tQ{uIM3.n:S|rs$t&U듫G0;AN%g3?/F'͇4dIuQdCKS`ƩK=-Ok5biG-Vn* |C"umB9̨M&G6{8nɏ4r99Ț;2gu ۼ?m ^-цi3G>d.^ҥwYT43{9T*RBǑB0̠ \]$N].l |&B= 0>(mGA&bDzɱBwF cb!DC;?n927S#RvH+M5"=ԖHB2]Þ%72fL9 3jrXJAuD9n@`qًwZAM>x Ac09x{ـEK.g0ds9,qB?1@ip̑(4oyhA'] kʔ0G 9bsQs7var@b yo;*Wn I74V 3j@0޾C ~ҋ*mpSWޣL 6&nLy.ǹ,;(=82 :t"G lNhD3U0.h2UlÖd]&*@{!웭gTM/ZoA!ard P-oz{V2d=Ϧfqɒ]Sn l>ǭ#zEJeZ?(h]՗do;Nv-g"ĜkP}"f= |r4 *E X7ȚR&7/6a5<\u"rr/ ^ =JfcL䏘Ahfe_,z4fn;ѳ]2 `b&C#O%bQ2}`R(*?O):M:|ʫ4Jf#Ы[9Mz\$)r9Q1g\f`{(~B,a=OC=u\CsBGIu9А T6ysгK+?SMaT<9 N ӱgkeI&II!wܜ1F {K9ISRay Z} ^Y` endstream endobj 32 0 obj << /Length 3881 /Filter /FlateDecode >> stream xڵ]r7+,og7I٥]E))R!)+ū ID6>}EV Q+y*$ZHQ%zrO ׻?}Gҕrz/XÇ.0b^[_ ^-v~q"JMç>+:?ï U Fʠa#.⥾"? ]_ϵAq2廁78S|ƭ% l^D* #܂wM`Lff( YU HDƀ 8H1 $W)2̖+3;sPQHKBmwpf*}Ϝ M$4iE1.h!Pfc1Ec$wW280</ :NŌ 0X%o8|ޟ,4^bp 贱ϓDIK1$ZhVX%}6ݬTV 1`V+F\}۝^D!:b∫u""NtQ$2]L.JAiE@̈m\L¬ ݥ d6VJh_$ hG|L7HN;j'8g Gr0S&=tCCfb% QDUb7Ӵigg   ƙ1JjB,`ͭ"G5ɕH xvm@dGGB$d$lz1oIx Vb"ᵍ~j7ZNMh1v1, X?D,!vm+}NcVHeF4Y2B_%&W LqTzAbBF2F^O:(Ll>[:'ؒl)\2SLf=_[~m ~R aIxdr5jjP<IEgHhBol} Y+bD1A̐d &ˤjJ6rMz? i7֌|=lX閴6640^F Vr;Wsu1l*.W"}RD:կFmMV)%gd+w>m欓r )'( K0 ?Mˈr*eSzm^o"]d ?|g&l!NTy0GF 81*zޢS=r:k=hB12S׎%63fztJlWhgbS:@L[dԭ=%b@/?f}&Ir *ӱ3fS|VZi#/%`*YL<_8i^' 5^:rK io'X%-(2ۂf}ѢeJ ""m8d^s1[d @^A&x~d٬q'T`b;3v0"*\ &igsd " qdӓbxj'(_Mn=ꍰz<V_]\,U*Ю> ׵88:@o%3ր0 t*Ib0M$#b="rr6L=rJ"$?tB6b k_TM74{##cLJEԔh& c`-Q/V$fiN4m6M}nKUk$:7z"JET)2D(0"#HӨy[J/j|ʣʌaQ*:7N0YUUc,'mmurEf$28Pq\Q*#Ȓ PKX)ŀp6=A Hfj0nՙˑnlVcPA9\Bo.ؚʷo1ޕׄJ5EGs#y|T<>}X2|4wSfP 2G$`-*H :Ro7F([ڜNѦCR#I wIra)H+Fij_ٳ_rO!"`R>'m{n w#Eբa@,׳ϑ2H!ϞT%IIHEd#?9Ҟ">4Z|Inz(,I4ӓ%#dB{z~ /_J'V07.*(O2˺\>.GHa.:0뀲qF ?MP*x1uc@o̯sQW{0ptt#1:by NeM>2S\.fFJ4x7m?13*(7ɒ29WG~#әɊ-*4+%>=_Rq^:|6\&֨:g`DB &4cf[}J/[<|tZ.|.l8հ1#;iF*p)A},' BٻRp{L?A>#a`UT#e7'k?S EIҦ}clWqCm 4%v}RЊX o!-$'$ 5/>QBIf~QƔt.; K)=Dv-LDŽ)]?G[{s.l9kͲ?on7dUƽԀtP{[ t1W2>]lwaue=ggY(pL$gerPj#?{DKꮞCۯY,'A6hv%:*4g'?iw|ӝԏ)Pӿ,&LBBcBѾM| endstream endobj 2 0 obj << /Type /XObject /Subtype /Form /BBox [0 0 98 98] /FormType 1 /Matrix [1 0 0 1 0 0] /Resources 35 0 R /Length 29 /Filter /FlateDecode >> stream x+2T0B˥kJG endstream endobj 1 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpkG7Hnk/Rbuild688f6c73bb66/spatstat/vignettes/hexagon.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 36 0 R /BBox [0 0 98 98] /Resources << /ProcSet [ /PDF ] /ExtGState << /R7 37 0 R >>>> /Length 118 /Filter /FlateDecode >> stream x-1As^1/@`X^` 2E0T YA=qNOc?4M 9,T;SvzI!ŕehV)OkbJ#S|8xh+] b endstream endobj 4 0 obj << /Type /XObject /Subtype /Form /BBox [0 0 225.999 144] /FormType 1 /Matrix [1 0 0 1 0 0] /Resources 38 0 R /Length 29 /Filter /FlateDecode >> stream x+2T0B˥kJN endstream endobj 3 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpkG7Hnk/Rbuild688f6c73bb66/spatstat/vignettes/irregpoly.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 39 0 R /BBox [0 0 226 144] /Resources << /ProcSet [ /PDF ] /ExtGState << /R7 40 0 R >>>> /Length 311 /Filter /FlateDecode >> stream xm=n1 =O0?}n #EJH6@ob%^EsMo? 'P*fvjqp}]",D <qe]X/pRiuS\ *qX2B*XkӜ\lDU1J卼PX]rrZ0CP$!7D,(ƃ[Q5C2)RHQ5A70=x"EuZ1m=@4)YJ9VL,6}9f,q^ѕ:'1V8'3&R[UXߑX | endstream endobj 5 0 obj << /Type /XObject /Subtype /Form /BBox [0 0 225.999 144] /FormType 1 /Matrix [1 0 0 1 0 0] /Resources 41 0 R /Length 29 /Filter /FlateDecode >> stream x+2T0B˥kJN endstream endobj 6 0 obj << /Type /XObject /Subtype /Form /BBox [0 0 225.999 144] /FormType 1 /Matrix [1 0 0 1 0 0] /Resources 42 0 R /Length 29 /Filter /FlateDecode >> stream x+2T0B˥kJN endstream endobj 7 0 obj << /Type /XObject /Subtype /Form /BBox [0 0 225.999 144] /FormType 1 /Matrix [1 0 0 1 0 0] /Resources 43 0 R /Length 29 /Filter /FlateDecode >> stream x+2T0B˥kJN endstream endobj 8 0 obj << /Type /XObject /Subtype /Form /BBox [0 0 225.999 144] /FormType 1 /Matrix [1 0 0 1 0 0] /Resources 44 0 R /Length 29 /Filter /FlateDecode >> stream x+2T0B˥kJN endstream endobj 9 0 obj << /Type /XObject /Subtype /Form /BBox [0 0 225.999 144] /FormType 1 /Matrix [1 0 0 1 0 0] /Resources 45 0 R /Length 29 /Filter /FlateDecode >> stream x+2T0B˥kJN endstream endobj 10 0 obj << /Type /XObject /Subtype /Form /BBox [0 0 225.999 144] /FormType 1 /Matrix [1 0 0 1 0 0] /Resources 46 0 R /Length 29 /Filter /FlateDecode >> stream x+2T0B˥kJN endstream endobj 11 0 obj << /Type /XObject /Subtype /Form /BBox [0 0 225.999 144] /FormType 1 /Matrix [1 0 0 1 0 0] /Resources 47 0 R /Length 29 /Filter /FlateDecode >> stream x+2T0B˥kJN endstream endobj 12 0 obj << /Type /XObject /Subtype /Form /BBox [0 0 98 98] /FormType 1 /Matrix [1 0 0 1 0 0] /Resources 48 0 R /Length 29 /Filter /FlateDecode >> stream x+2T0B˥kJG endstream endobj 13 0 obj << /Type /XObject /Subtype /Form /BBox [0 0 225.999 144] /FormType 1 /Matrix [1 0 0 1 0 0] /Resources 49 0 R /Length 29 /Filter /FlateDecode >> stream x+2T0B˥kJN endstream endobj 14 0 obj << /Type /XObject /Subtype /Form /BBox [0 0 225.999 144] /FormType 1 /Matrix [1 0 0 1 0 0] /Resources 50 0 R /Length 29 /Filter /FlateDecode >> stream x+2T0B˥kJN endstream endobj 28 0 obj << /Type /XObject /Subtype /Form /BBox [0 0 98 98] /FormType 1 /Matrix [1 0 0 1 0 0] /Resources 51 0 R /Length 29 /Filter /FlateDecode >> stream x+2T0B˥kJG endstream endobj 29 0 obj << /Type /XObject /Subtype /Form /BBox [0 0 225.999 144] /FormType 1 /Matrix [1 0 0 1 0 0] /Resources 52 0 R /Length 29 /Filter /FlateDecode >> stream x+2T0B˥kJN endstream endobj 56 0 obj << /Length 2361 /Filter /FlateDecode >> stream xZ[o6~ϯ0aacH.n:}Pd%1ֲSɞt}ϡ(G8MG~Nz>S3")v&(,C"SuVz^nZͻ tu 5ݹ7Z +7Ԛ0ݔv؛zP_ݞpy,5Ь^[lqalir!tMyW#I#[ !uEo4nb h7_IO⋉i6?UA׬/*yLԽN4Eq: Խ'5֍{{=C2*o!y3;Dm&LFʍe^Ƴvʄ_₄,5!!;DUFvgJ~f(0-u>(^,瀭s6eNjUBvS]Qdw}"r2VSFpI1;lq[='XK\uI]9MLERDT 0H]Wz/y) nbH7'Sjh펢Iê4E}{{*g<\@yOD/l WӸ6iȊa}EBׁ_lB; I/#v4=ABA; 鱈) k S` ?êS@ْMm5fYg"Je1v|BDMBʵ۞N}-]sH2ܵⰥ=/!೼(+T4\(e"UAW\SBZqf/6Ź:2K<[RƊR.3&IŠ܎$'k71ȆWdCfv =g P}%=gK8UNm"T1l]+&k%I'X~fNP뤣GM*Ee㟄E9iy ,?ҷ'#(M"LtFK)3pʌ6>j S xC7sg7;2 pYSjxSXT gP_ZΧ}Q [wppc̑mZ)3Q[&Qn-dzcI/IS{t p:pISڱ/C(-ki bL]' h0Ud\}œ߸mLo#_)_=[@~䎆? QF82[8W%+k~4rzW8;붎?5(3-*MTbiJu2BزJ` nE_Ơ?H- )M2mA 2 aȲ!}sJ| U7(\᜗l&GO.eN*~ endstream endobj 64 0 obj << /Length 739 /Filter /FlateDecode >> stream xڅUM0WD"olNmEo,lm#)r cxUG|tTڕQϾTUX`2#-nqѻ>(tv>uy3Uec`W)m4_D_:F NAr e:şѣNR[M@G,n Rj2Jxc6^GbJ>51d~ؑikZӅRS avC7#S A?0Mm"X=P 0{}-_J.9岜^x'axeT=_w/{䨑Yc^.2y/.|^izUZO0ZLai=\1L]^9y9Ƃ4)t41 ts"y6 1~&*ڧm#  *xL .?1iQ hA7-1͍.eGXxJx ykl] ir!Q$"9j*7R][JJs[RQ3yMA`,$ @a9 O`1!W5ȨLcdP}90'@ǧ-ݭԛe(Rd,_bBE`zEj9T3:Ote%{{gBֺsy Zt٘Ɩg@J $~^ endstream endobj 53 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpkG7Hnk/Rbuild688f6c73bb66/spatstat/vignettes/datasets-005.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 65 0 R /BBox [0 0 396 216] /Resources << /ProcSet [ /PDF /Text ] /Font << /F1 66 0 R/F2 67 0 R/F3 68 0 R>> /ExtGState << >>/ColorSpace << /sRGB 69 0 R >>>> /Length 4574 /Filter /FlateDecode >> stream x\K.7ϯ%l: $$B2A$ >uαC8ro*s.?>ϿǷ뼮o.7GppQҎOǯǧo~ܡ3WW_a6OY3^8r|?w/b_O[9׿p-y|QϑTYufCNo})ۓcW2֛p83#szxXC9$8C0TφŇΞtOZ҃yØZ 8szH&wA øSk e55TG:0rqp?R9k6`\[ӹ҃)33ūv'=>LDiytg2y\(f!t` @8cD)CH2xH2#E(^fzLB#er"q !=9XoE25+Q+n҃m"0KR5Y섗JkZ;L/q7$(dMzڷI>"#ʝH־"D}kQ wĎҁe"ڨBvZMAc]h.ss'C{ZC攔/XlC{}ZROKbf$Y ^ͬ!i?mDwly#}(&J-5*x=`X2E+ /p Y/ļ=^OHn )%!Яx>> ʌ]t֛_u rxh5wHFb3 ~  ./=^O\PbAXV}/xIӚ64#kj==^OоQN(@5f *C{tQ (<}.7kf&VE88 mvK-RlV4ʯSHv7g4o҃͵: rm08Іt`rg(Zb={nNnvVrRnSt`rz :¶C I u:QR_s -= Z3r ;J͑Ln4,b&X7D7">]Vd7(V,̻z72EhFO{ZjB1Ĉq%G񄄶{5BiaC-K'n$r*,#'k '4u㉨" )ӀF i Ii8:5EP:d/Ivfш7JeYM{ׇ<.>|oNXe-t^umn'X8Fk`TVQzdUuܾ6%9\?ah:9bV+[njY%/9tF knt=^g!_6%lu ^u 4dRnEkFg9my on=Sd!dO{! =^ÑL:"_ъPT%f7x5,ƨ:Fkc)˲t`o෮?7+2BT*}Ep, 7_e`eoDP#<B;j5 to`ӺI<X@UM:z7stnLyTe7uP> -_Q-CW=҇W[g0I72y+չ&\a!X"ʜ@yJt&=xq[@,mIMz⶗Iܶ'_ks&t`ps-&^"`v; ?pO?f;smWvmpCu dFqnd`d ܉iKzO0fi nh mHҽ^еsgڲپٶq%ϙ nI2/6V^~IZErwtb<~f0sb@Ήa )Gl-]/g=e~AcvשY|\cm)pa\:6K+k!w5<8 3me1%'3SזƫsS3[5Dg!툮zw΋mivKw[f:1S+KՍs ԒqH9wWrpzoRgvz a9m`>;ZgO;q6TCqg&;UYv49;{m(vM&=cÕoa*}oTL714`.iBFy|c[ޑ'tp-q S1́#+[^ua[F2Gb†#~}13X ){LF q}#U%qIFM\5a*(K^{LmI \['"{d;ގc,Nw;#;oGqx'k@/oj5ζnx+l)U7pJWy2Ͷ$o81f6=xc+߱i: -{[9tvV7ͼ;_e)ui6|Ԗwfۨ3pm/g_t G_!~;N#*Ɏ׷$nnY9I+y .wR֤ĭeMmC,6oR`Xj3ܰ_İᎶ\puH?<ҮwC|ܵ V¥y_[F lVSI;,wǡc)a;dKP}Cmiv kN֖W\P?ncqĐpovz1> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 60 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpkG7Hnk/Rbuild688f6c73bb66/spatstat/vignettes/datasets-007.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 72 0 R /BBox [0 0 504 324] /Resources << /ProcSet [ /PDF /Text ] /Font << /F1 73 0 R/F2 74 0 R/F3 75 0 R>> /ExtGState << >>/ColorSpace << /sRGB 76 0 R >>>> /Length 2429 /Filter /FlateDecode >> stream x[M\ ϯxn699r,u=>hwF;=EE>9~8p#}r_ӗ[N9_~t!5p8kI(6/gTף_>XWZ>4=>7'xG>yp?ݦ&VgKVHߝ~χHZ +X۟MְtMSŬh^V5=(+-o텶uQVRGϩ֎5vm/JǬ=UשosT9i/.f9`)uj=Ʊ/6~a,[7F|u`u4x-RҪ'>ӹ p95> N~z̈ ˃ 5}pk Z5KjZr3l`No3AWn*.;Z `v"L0y$ \m#UwE] qu+qم[ ̏rmb4Hc#$`+eVM n C|N+5Mg(\ z#=jx ,^xbTDIPC79\6ŗŏF/e-MVm}#NpDҏ1CP$qZc(x9Ǹ!0oO.5ø!r~Q)?)i\ܤscaru1}G2N4?{?}Bmj,7]B]`x- gvgy::VHiEWsvd#~v'ULŁV 4T3ݮ[אP ,BN_5W{>6K(/ZlvM99SX4v#3p0=.P6/ +N|kGnMz;FI)w]SE-lNJe4&3;+ϝH;VV*Hk8 ҃DOeus5&Qщ^(gR މLCIį}J 4.;c w̨ mc {SqTH4)ɮՆ} X='\7w6ةj:m/ѐ8 rb%qEyB+ٞu)nGQ϶8=:D̝-VK@~0Vi΄x'Hzb̜\l0F"A`"y-x]A$$ T"W԰hz6o]w*pzDhXU /fXD`'E٩oM3pZ9k`>۱H>;|rsgZk<1k۩ni$e#xP3L(E|MNP,)0KlQJ#/d Ǡ:8G3t|!2*n)|sՓdWao_쁊T2kUD:`p5Vg3I(V+#G!.[Ox?jsoh齪ٲ`R4_ai4]< %ԍq|Mw`Fv7?z;|MXC :eMRz0| J T"͊dȲ`ڝ}#4Z@<}Wܬ3& /M(E`Ngc buf[M_o^SBfTBWUW G~E(Vvuf>O!&QMg+} —fMrrKŲ|ӖK\hڬՊ vgS၊g#~>"xbP<+(fD/w)7$5O~+%y^żmvʢZV1@#d$,턞Fva+g62gcј;L)^mg=)܄y7ԠP ΃Tgh㻴j$@y}>n/?p:9աluzy,X]G~uuk|Jo== endstream endobj 78 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 83 0 obj << /Length 725 /Filter /FlateDecode >> stream xڝTKo@W@ e1TjDMֈ6(68ӴZӸ*k7oV t`s& ʢiYg0:ҕ3\|^S? ? d<¼&76 tʤA5?8V2 YpHϰv /7S3ɬtL$(hu+o]ŦHAv \&A%qQP;u3>#]4v-|wHraX_>{Uq!(:nA҂=WcMy#/Xd ;6֔Bc6JZ*o:8`VDp#E%M AjLe:Φ?蝟4l8X7fvbvqG& _eLnhIQ=cK˔ChLp%HB#0\:pup'n>8 R{Kw f?[[J:5o7Vr9Tڋ6>1M9I<b3nY7& P;/sש2UYICK I z$V7XFL֙o :yPQ NJK8;pGdy FkGOhU"yqh:w?^G65JuANi(?sĠ endstream endobj 61 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpkG7Hnk/Rbuild688f6c73bb66/spatstat/vignettes/datasets-009.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 84 0 R /BBox [0 0 453 288] /Resources << /ProcSet [ /PDF /Text ] /Font << /F1 85 0 R/F2 86 0 R/F3 87 0 R>> /ExtGState << >>/ColorSpace << /sRGB 88 0 R >>>> /Length 2291 /Filter /FlateDecode >> stream xYˎdW%l|? KxZb11H眸U7. jaddddNxN b%e^~Wp.X0! ˆkb.Bq+}m6/{DXl.cȂ#ɂsCLe)&!{4*_j:kľ%A7/{<$,LV“}wW'4 SไL+* +fv3d•uH|0{$R^xi-D"5/&3)d1+JK_o3rЀs#t@&u^ft[fΉo*Qb.eTtΥx@=V,eW<)//4D A+ @+^xikoS+䩒!{i~'+):eQ{ Ti n3&ޞNxi@.Dc)}2SՌK/ 33sv~tFr':a]f%TʬuoۂΪ~Q`dg&E'{IURv^U'բSx '{fVmI:yoADBOxMK4SѣȺoZ]e#{>Y:Qb2%ls6-BҢ'zt0!{EufBH0f&I# m=:07Q OWғ|+#;+}ғL!$]BOjbuOx Ll=EWAOSiڞ>nhB|z+"u M%xdyU9>x&w3>axuWN}dw.ZdX X^CH&X`a~DϣV`909w&6Yp_iZ3wrNbrR nV9e :+ IhA}b俫:߿7yg8=R)^:J Vg29•sc` X3m&=h!@>8~p+MC>V{;Z+ŧqOCJ|^F-4OL5qrݏ@ِnR|΢ˆvti%ҲC.>(^wg|f+ɯPȽE6'K6nSU"aˢ1?Y~HGŜPV@@f#tLetMvrG endstream endobj 90 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 79 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpkG7Hnk/Rbuild688f6c73bb66/spatstat/vignettes/datasets-010.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 91 0 R /BBox [0 0 453 288] /Resources << /ProcSet [ /PDF /Text ] /Font << /F3 92 0 R>> /ExtGState << >>/ColorSpace << /sRGB 93 0 R >>>> /Length 40817 /Filter /FlateDecode >> stream xe_OQ͸S=m@$kii'v`{{Oc ƪU_SCwQEGO??ÏgJGo>?r{lG?o_ȶ?[i?D?$=:H??OdMG-g?_3pLj"t+t+ ~y.Ľ~E ەᚗMI3 Ӥ"2%“.Ij(.xPG`ic2N`W(kp۪(梺S:JM›ȃOYǃr<.a'( lwFzrPG n>sel бCT~uD,uV[NɇJ9 Q,'VNm̰ب"ǃ_~#9Nт;Տ6/Du> ,3ʼ pr"x{c> Atj*>d8171A'6b߮CNW[B'c^DIą b&tQĊv떎1=9w>w "UodETŃ|.]PLO>ALO "ČČ'Yqu>+ 1y= W[x+*`g,:hwT^m_ns&ua6)w{p#>4Bez'+b_20>X҅Bq.Pxq3q,_D-A.{sw ;g} 1mۛZ#gjSoF?I0{ghwh{OF[?MN;쿟鎟 ]%nj ?Wy_BWcd} {|YUZcݼJ7;wJ]<-Цd,}eWdƌ=ݾ27c7?_NF˖2f#5Ο?OD˜P~9f o)w4l^eIy~e Cl=֛T`7D?h7DAƖ]䇍gCf}aWmVo.#yPc=[yȓ /j88oq};=kDlQΉ둱eftf}nQ0?9Sm P;$kL# _&hRY,|6eq0tTa~8K| 뇥?>qxiC;c!PCЄ1Lmh4ި;=,tXGΪa 'i&,pf#Ce.sEVכirTon֟bOȭZ@o)LJV9^&/Ikk)gX'OƟ6mpƿgHG9Y7+ra|\b~q>9Al~^6l~86Ϳ2~)l%?m}i#ΖPx|-JXxZO&%CIML^F{9H,oc2P/0*c(WE觢>L,;L^G Yrӯ1'?b}|dn^$u+_g7o5`{XOSOɋyE[zayzVnUvOy:r/^>,. F^?S 9YɉMnKfn])̽k̼XWQYWvl+X ܼmZKlgY/K,9ʗ3Jp{6"ɕ (SYܚvyOn[a;7OeY͝#)}1^<{Up򰳣g+Y|f"wxߛg8Lݺ*6=.nPYh=TE>\,'r}`)\\s0Tc}Ų_ycd3j0MiD«E,i@nf u }|^fxx^c3377eV~0z5] }.7AnaoofV/٭(9w,e]ΕYu9I.dEeտ^$'~2ʹ~;v)HfW\qx] ,aZ>ns/ۅl,A`ѯw 0p?w2>w#wZ>͹X}~p)_.V2~oX+G.W}Y{p^:^nqxq^<͸/l&vzS0F7/S`aiʏ%k/.=O^U+CŢ=A#rj#ja?}|싵nwXQuC߂0jdF)a 'a۸: C=қd~Q_a诇1_WJߛr -q9Cx~s_y"5)xs$,z$̫Qз~+>^F^2Pӕ)}ꭁ7{7&2*v3a{3,-^y?F,tپ2YV.g>E:P~Ïb#qx }}j7{^ͳg/ƅ7j?gw397/+Wo&2o{W3ԟ쏬|k|/o[Us㕱K7;u1^%._~ -,_ŁU0_=1>g }?yFyo`c><{η /޾>>56}~fU}/pͷ<{[ӓ=X#a=~_R/6k=5;S?$f'ޤ=PgY3ȃO}RXbvC z5yBXM?v.SvwOiW\zm}e9lͭO?m}P'VlOWڣ_8_(_xGn>l?9W+ρ&j-\l*pUrZ]^f9:g޿/qspїKn?b`a?x\+YYyxx~kq?UXۏoR eRaoK~pp1n|;0]qSu.mD'8O_3 ?oyAOgf{O8/swRIXڻWdiߑe}AXa5FO5$>n.ߩ᚜~3}O0~C|O!>U/Ns"U?ٿQgQ.o9Gd6~c֯B0P2p|o?yhbo=OUM8sV*˜*WxwGU1;q&ԟXL5TZd k}بozn+7ɋagC 1|<԰c|j|hr1d,ߕg|5x.S}r^0?Y/"C?s78?EysG|Qo? 9㼵`XxJ}vds HG8_|U ~ +΋=\.|Ow:g3C=sz/|/jz__ k؊>:΋C[Ϧe2CrMl?m<(v, G=3}7=H?끦 KCSEWE_T]8>iQ0ƿ-pq=pzCzϜ^gMM;YYlO-_[R2z۳>>sa? <=L#p2~п[+8S}hE?3nyg ֿ:A2,gɶ~>wҿr=}-;45y_ Cvyݟg~j738R 6^v3=|q~xca|M XoF[ {c.ù4}|?'r~׮SRKؿϵl?w/0M·|s/83o7yϝ9l~T/pac/p˙S㧼sV?>y_OnyW 'Ɩ>_dŽ͇ۺkKE'|~|oxͽ|ɟg,/3NJp ?LW,?s G@ |_co8E=7#c~101<7#B?=ޜ-?>oX^?KїC< rd9_ʔ/KWE'^|~X8[>U=gVb$ثnا)m>z.̧oF߫h褯N+/YFVqCU_뼹\w?^Tݸ Ս΋{o<׿^+7k/>(p~V9._P__>)=L߿7{ 7Z/yY}aG17|_)7+ Ͱo Ov}#}D:n8ec\?k̟ɿoyxY|ݬ_ԧ-=foT6;[+c- ۄ[O* dcg {"ϯ6nfO1i`?Q~逋یsf:yd7؛zyso}=Y7cEcovnjyY|ϊ}3*ؿ)({sLE_+_=/c>}3'.߯3_g*~|ы!?d/POc|p' /caE/ǢqD{I C8oسZye5Xg/F{ekY7N.<ߌͺqs?/ljHuY 1?F7D_tS#䭿~7y9쿯~^ ?B~Cȯ:xח?8X~Gopqn1lO.V /} ꌟ̟;,GZzl0 C|¹[LO/~oOXY~߲`Oy-?|x7{~4m*?/Sy+ di/XzodߎˏܮS>{swӊ/7>3ܗ} %p"F?O5&ov!zQȲg~Ù^?>켜\]> ,?WHWY˭ў0=C}b5:{8SMdOohOs55RKAl' ]儳=9=:3i>79ݾW[:f_y(OǾ'+}=Ց߇QxgPz`7-.O'ulo./ ,E?6x߼,a׋i(r,O7Gu~BJn-O#c/ѿ:rO|VK'371 \wL7aeCTGo;z3.-!,͹,gu,}HuC8W@ |_'㾮}D}m< <=pG;^^sL/ϯܬ|Ri䗜`(<3_C$7/( ҇.)'qC+V/ ܢߔ B9]OqzdzC>҇ W0^L: D^f gƕe]oo3ϜVrgwtxc7 |Jau o!DŽ}B>ZGW~_n-w|dW:t1O=/]S~|+Swӟ_7> ۋ\\g3{bl?dc^>csK󧎼/pʳ0~/_ut+7;W=?r!,G9ʘ̌3 ~2q-r;f~~ۊ |rȳg27>c ~s͡= />3ǾtN͡|{]?Zb}X~ gp*ӗw8FYlgS-;cK]~}͵{V޶59sY}_QQ̟s<7)sΖcy=5^/apOП?S)ʛmǐax_ex@Q>'~2!zaH*(ֈA"[M#0au 7A2"CF8?:eRG}= .B7B資=<qQb}_/t0XXF}/&P0nyginb<㧼fz3B>lT;{*ޣ 4kl_7=6^o{o/9~,_ʸ2 _Xg}_3=YS.߉9`#Y?^ͩ^Տ\[g'W;ѿpOvNuڣBq.) g|_)/o2g?ۻw+/rUO|i+A9yѧ?kd'ߟ.B}ӕL}ߠ m؈26/1.7E -س |_07EKlt#te:wΧrP>8:c~Ffl,>iFa=_0%q{^){X3jf5׌kmh5*E\kx8c \2;H˷՟G8lﳴ/?Y8qg:Gg8B3>:c`iuGuGxog8 \Q_|[x~0'v{}uZn>{zRMxpb}_ .=ƫޝ'p|g򖟤 Ԍ~ zsYc|:0+g8{޽0߯沣7?WWx73z,z*Z~"K`;??x(Z& 9}xr[~ckcr;w7^uT L$3Ϋ 2JRz[BP u578(9\%&CĄ|XNn} p^v}.qG7aUFVtQE]KQJw8vs0TJ7K'ynV:Y7O@Ů^ rjN#V9X),Jo )LwT+ lBlA WӚbțX &h~\\.eo8PByKCREˇ u?k˱dr![Tp>Xr-4您Aق)4 2-3b\˴4yV4)z[9rp\fq i9naV?UT"lfWF[}E|: :.E,=Tm1'wpΓD CJphaHC'vx*m! w,fD%gPd=VwHQ8Cyצ'VdѶ!OMӆՋNc.'}*5:l5$gXr]XLUy:&gI-6U6Q6_¦*`_do<վyuq׈=ǚߙգMJN%z:Z\aSKM̾dc%] T5 C:sA}M[=iEiuOD|*mPRgkoQRRkoƹ#vDžMM8/<@eTx+{ӈKźy񙕏XC8hpħa,ӈNTC[ĚCTV=Fa ο_~Kz6pY}F,D O#ʪy#bl!*uq O2Yݏ_i᎞Z߶|c`ՂMD||Wא gġ1Buz>'GX5O F0Δ`i$#z ?y 7}X8Ql#fሃZ9$>qn3[),خ(*ދ>:όq\ x"D|<,">cVlo`TkMa wfe'*Ǭ9V}ȅ>rQcE\*,Bv ml45r2c_r%3"Azux>F>n{-4}3&?sayþ. ?A]{c;lODO$\ б_lP꫾'*5:=6'*= nR|'t_ZjOcݴSMU>RU֝ ``&wCnXrH}hZs=/bPYGXvW3$.  MEUoT#Cҁ֝}g~f]CzdpFVGL'ky Da G@lGHM@GJIY ;#vwE*6C ۑe4âpvM8 8#8<~d : ŎuImAZ>إ-:>}8:cdD~/2Y lGJx@oXND^:~CS%ug FmEѶEotEk{xzPrkE-Wcß[3^"5f],{d?zC]abraa]yE( ~6)=o$*g>?#z=m}g~~qɫXrq<b0-??g>`xH,Ga1]Sӱf<Ԁվ1lߘ;|t)˅pDlaolcFq,t٫gmkzhO{q iȩBh;ꨥp\v*( aoA)H==̶8kXSJn|dSm9i,6G5QӁ'vA 1v\->6opd/S_ݖd;UZ+ZWT!&g^>MT_}#36|^ׅv 12@K7T]^PF_m>}:}cڶed>}e|g~ x.U6o :,*Yţ*}`-^v"\wsa6W6x[|3νŷS^z:s{LwAwtR8",;etC7C46]M6ކބf>_9T86bu-+QMsS !ٱƘ? tLWj>zP2kYPXd;6"sX) >yWWB[1Bi!OAMxXYU:ǼfGdУBske5][.Z@Px-q,\A8c c+ztK~Û(oPFV&6pF,MNQ8EᡙzZC2plTʣ<DvrQ YF-tzN)fHF9EP(6*|7j-+Lw)<*5xaVY ^|1zUoזcZS1dM >ܠ3ac8">=փ[jZ-R%_ ]13UAXгdLk*yk,+G]}.&JǥM\Q{Ʋԓ찎谸h,^k,T:4[~&<#>S^.\*{#[u@q>P^PG e _uq V]eTGXG׃zUĬޑEW'[}]{D}^ޞ߭o!QQ8MlQ5dd\%M\d͵xM#ƩVջ<]2JЀgjH#py ?SaMFMs/OG+7ujG"r%⺅n y݄>Jˠ_s#B44!QP)Ʋ@HNO5{b6; ʘpi ֬q̴_8n >Q(ΰu'*gXBhV+˯#.rՠ#7*ɠ'ZvB@93d$bਛ՚o˼Q1p FXP" g:dT<YtcۅboEY ڟhCP]tk(,*Ota*`:ro@/u.L@YMGIffsYV?(jVZqC9pa8 p( Vy?K+țZAIDhm_Cc8;dXm~A\P-vf.E:}fUl0N>ـ}mQ>F Gɪkx)K^c 8ThQ9\z@r/v@]At/pW37'N#IO6p@"99!ؒ1j[Fuٖ爩za,o;Zdx-y aA+gzwxQn&xH+eƕcXvrdrg'L0khWDzsjwlR@K@*+wdyXdPӳ_($yտܠkZ}Rkv^itX.:.Kq .Q2 y9~ҨV 8prk1wPbXA]bQ9ʇ5T$lH Pt:᜗]\1C%UmOY] J̀G$zAus]}KF, 34g[>pv#gewz~"V: wߘ-UψW69bӨpS8XoP`u|Tz˾CFl9dJJi% b'/4SI w!ԅ煩^)̤1NՅ֐0F|qX> ^tg߈b<1SG}^ ?{G8150Ӆ2y~d5<9ᅣ7Qy!j%6AGV^y(Oݚ}(`I!*1Pث۰Zw5݉Y .M83L9typP>PJ-U/?Zo\Q!wL3u @.C^XwUUYԎ{Vϒ꿫vkBgUuuPhnmڤ!ۤE k ]RB"=i~&_ClVuUOeŮNS2ЅeZJw!k4C#C/=༅W,nl&LXV<KǼW-} {i%*Hq4RodOѢkٺa^X%!t _H&\8%|!@[^X%rs߭6hg݄҅ךM`/32fmCO^bsC3uS#>4N1'>2Y4iH~u~gU>Ue;buX٬2n2-bAX^Xu7oDš=bAX "]vbuV7ɦN?p[+<,I~J˚]#u [~bVo&҅OBHO;"彑݈ !=Ή%j@=:zqK/µ^)IFr#yjGoɃ'GL/`c8C.N8m{9kD-}K`>ps@.v?˅= P0X.^&VDla"!iV>?,fu^0kQ^*g&`C{zaXS'jhC{PJzu3hR[uˆE;4,/e\G)m׀!k 1Ԃ# p`EDZuX~( }|!M,[sٌ7!lJc-`Mt>:Jf-bHԑZJ%]4\FjhJ>L| 6h4c` qdSV1GolCeSqp bʱ@Ra|WmD[KD[cͪLǫ XKQw!>]Gаc؅H#~` .>4u s,暮P`OWqqy.? Ǫ1ǂZ^F}! Z ضB^ʶWlAXW 9omqF!/}fd v//C. 'ÖBhf[ 47VY# E,\:a5pXLw(g8׏eG,ztN}C7pۄ/3\cp;;J~BĶ@  V|6{LB\8/T<H B±BTfA4C~7V%LILm SA]ia]uIqF;w~㴵sY[]wG=j ]GG=*i}y>vApPJr`{#*ٞ ,@4}Az:[ dNďM.qKZʜd`dg_EV[OSuWmߞ!¼/'Cx gb^a{'_b,ih|'~?'~?ɟOhQ~?9¿O~?ɟ~Glkgں)6N` Hy}_,G^OC bc4!\OC7FٺJKs,=x[~=}1~pFоGޜ_GU?6!% }cX!s1`\|'Xlp~/n wğɣeWnV>_Nś'z~Ȟ~>o^V׉mc8v&@aC]VM&ꢊԖL^RԬGxXM=,.W}%%tg/򷰞e ge>}/`a]3]<Mqf8]O8 \rWyt$͛ %5)ЬrRWwOWtNo` fHLqw|;<+@c=ڟۏڧgqN7|뾹 _7ǿ{ULݬX:OQMO Bz%|U?4/_=_[3CW҈Wu3l?1~߂+\mxd3џ_{< Uݡ~TW}Kow#+g;X屏|8P/,w}>q[/)03GVg\o}0Ϗ>r GOzӟcC_[ }C~BdoU16546=ԯ=\O2#7v11^td<,>m=q~}8[zm}u8]>;ܭ+,E"~{j?dlO }}0xW?c~+O X~jxӗ?N8_.f߳9˟_[Vv8]7Ԥ\kLgmc~3W1޿a^_|ŵdߧ9[zRIgYȇu~UUcx>fRs&MEgs爷'Ox*95RdsϺSh?)ò,EKxyˣ~GyGJa >e;t|+|RXƛPחXJ᳎Ke`Wߎ)7rZpٿ8zyYj`}GI諑e~9U< 6{GY$e0ʓ=~kӃp.}~3#z8ju#Oiپdž+_C)_r~}ߙ_?_R=ipv3p/ᙦY/qD;9`1v/rsj1>)lil/ޘ0/Аy~̗WiRGdݿ8<-\SY$$~uW~m-5t.f+=~={_1.Qk􏦯 ' 1M;mha}P&F(Oy<'cO&o鋊П»w1{yRߚWyGKC![5}O b&Ydf; +OK ~:8ȋ>=Y#<ͼP|\'y ӕ3lAfybpƼp1{8"~LaτnlNf<,Cc,Y+̸>K]wΧU_-N.\S %5ӌǒTfF06iu%ez(!N% .Le<'Oif,l1pAGMKQòehދKQ#;ߊXK85ߊ,7i:pVD:~co;%=5]?C~埿Whp/ XU '3=XVͻ3BUnRwHx"QȟZ͇Ky] pE}5{q|;9ߔˣUc,6l>a oOW캃4+~ GO'-?f~{m|U %).5ƧS@;{'pB Ajo.;{xCp} of}XI_EUOeݨEPpky[[xŴxr4ݙ6/>ÈG+;GT[Y>+-Y˯!o@]PFWgqaP Sub$..?kĄީ^ZTL~A+f8ɭ ;;'δȈ Ӵ Wg.퇿 6 _gcf0c2zwœ—#bxCM-tzG&'&5D5qa{grD2d^Y-8igEifVb +;ik!O!S5W!M]mcA'f >] Z8a~v'-j5L_|/_=Y>>?c>vVvb~^jQ`~;Iryp1^[xhxn/c,z`Y~/O>!bx^H_OEyCX,V(y`}ulU,͔G}ZQbC-Y/V7V^yQY~cO=[{:LyUCC{y9<>py9쿯~^ɞ]92cy݋ubٞ\/8'׹(w,?Y'y~y{|#糌¾M2=G ";w9.b~xnc_|U+o=;8/p`ίv9[{=s};g b}V\)[@50J h! [i"w􁃍N-c!}~<#@Hr52?ܰ? p\Q}91`#NN!h~;ou<,1>̀ | ߧq)gX'zG`y^֞>ϗ'8W@yapYl<{ya)F(GLSY y"FI/\ݣw@NbE-9#[@9h1AB`C X?֛ @#Hs(7.3y !% yV|lN)KP./R _//bXmmk?a? Pʿr-MహvcΕ‭հe ܚقa E) h .5A&6dKcΗ }^-ό3A Y}E0jBfraYv$$3/|!xO/>X֧<)/?_3\TȿZ^ɐgk]t~"W7rQ~o,׆p|\~Rc7{vR-\op?eOf~?_X'|kRo|Η?&65{3O;[yF{c>aF u}1}AK~IQoiw2C-AK3E~sY}B xdt}6Ϧd{U?9u6}8/pA2l?͓7OSu++쟾v?/ߢ~jЗ;y/d+,/}zNnO";з;x WvQ_YV}3kz߾_2]"'jK~+;<0G{K<_T"?|ʿj}Ɠ/^)fj)!)xDZ8?@ۿۿ\q.h4'/>GV k+q+~[^Z?"78<};yARe|T$jd9y_6zkG"9~{THP1;귥#8DfW_Qn}R@ @,+`zب}>zp=Z'ky=~2~_$>}o$p>Y3>6wG}_n?߿x}cCրV_r,cFv??&TͰs^-\ \[ K)ۗdNZG?_ jZÏ7pzY?Mh^,P&tZJY:!>?T_ȯ%&t4A^Aظ9`?~u<)"?z<b WZ3ȲJe%KU`m&U._=N[G&-;QAT%c[e=6\:t̴\7dVIx6+gI,~~} b)e󑤜 t܂`k4@? ?nL9Zv [2xʙ*^e|߸$E}ןWooEm~o.UΠu%nDtcҒL,A]O9ӡM;>#m'灘R8YxbeKN J2fVeS\:Q=1jlblLb6B4̭jl~nq'dlUFMuJ;eƈ({cYJ}sZ:J 3|&qu.lo}aobTk{(41p٦MȮ\zd +1 rAv#ٱ֧zLY)k5yɷh}:VYrIngue.vznK}x,=p>eI^W%8έ`؁|p?Kc; F֋(6Qjk|b8Cvk*N]5ߊ庱[9Z5mVaIᷜ Kr ">[ba愹x|"SМbz#bӶ㷉R1ՈX0ؗ/M#tTl$c@ʝc6f!nКb3pO[hK :KRcxY|8|YQ3a2p{Nlc!ĄDn] ^ƥVl vݡ[>b)CwV*Ky+C^*$ZUy7 Kgp $O_+N/^df*ghݡ2?1s'•2D72:i-)ھ.zp{%/je1;RJؤ.6rVNۜUe!C5n},:#x sKw %Tl/^ є ߎw)IHݙ}p~*WIZ2NCS71ue |FC猡ŮuhyGCr 5eHڡ46-W$~c1x<48H#ouoh@p/-)VÁ;~Now#t#Сb6p_W~~5p߷CHCCVװa7ЧqDUCo/C7$5,׻NnCR}!H]#f!y"ӮѶ@L#t#jwR#to\{*oG7_^W%V|QT~W;p3NGļmmv^HYгS Y7{{!` _pKğw:~o &cn]yx2ҽqyRqb0'FeC㨯 u/W D  :y} .O3?|o/)5#/Kfǹ*oBeeIker5=eFr7Z)ؼXLɱ9?Y닠uȱ)n5٬)?0WS|dE\I-%I`Y6)O.1[V_^2_Ə|g\>N,ﻱkʙp}ܟWSYEYH%٠ <߯*v=S>m=G`y^=|3DH٧PdKC @M)XX$ae:CFOU< y++ўr?kyS!v?>"׫~j'"bGbaDo&tܼ-#kX8,@"qr9'rb{7cUV2"hDI޿Gĉ ե^GQ;DRnn-Y,-eg`|Y,7+K N=0V}Wu *ݟqKux{0y=y I!o&x'= I|RiyͥxXc=mؼ7 ܎O;Yzw Or|>S`oHzt 5֋}<90~. LORj|ϔ%ܦ{o.~s'k9g}^ZyCnVfV־Z@o{0  9'ˁ%e0B%1|?EON%^_=ߴPC=Ha|ʍbKB}a/qo{kT!…O"X|wY_#fϑwD2TflWu o67DG9k9yok/vH#ĕ?g/_,u+A=Z}><\h]B1c S+F]Qm袇OUS2=Y΅_1B*8Lj?38wKTF< cƏ5~88댸|2#213GqȌ(えp "l;K%( Km#;xIN* ;}#zW=vEq mN뫷'3b'E_kBD~gZ"z0 Ov{|_;3Ō ~uN$lĽr]/͓W'ԯl#ؿ9!ڛ`#@̉ȜD zo|r=@ z;cf}dF0`}FxpnoܾqY="bp}qI3ẍ F1N12o#sj3 nާf1|KD/)E\_tWp#p:z[# >O >#){&#u _>YD$}APD+F &w>#NPbTy1.a"~, eF4c}1S}`x΁R(g=D$A8\# tr#YD2#lÜ"0n|r3fSϻl<~+rN&:V/65NmbX!3=w*_WoǹF:\6./ԟ.W#7! 20D@r? / 3 pK'8u#=Ot-{ՀEyb(Nyq=5+3bϋV9i#v%z,x܃-Ci;ۜmKEp=k9|,b,jd˘/^Zxo'oy+gu8Z8L/eJ0rCKat4sd0Le y<;Ozew "sيur\z6I<{94?ܜSVFsr=9}aݩ-΋G;\OU !sC=dqP%fTδ_\޶[TayG*ɰ^ԑsykE>/WC>>-ƍoz+^O#trj~/y~H*'"1x[f059'+򶷴!BWþ!"Wb}DԊ\D?"܎_D̲!Vm[TAC,_vjtǰ!o `̇m{ǥ9KUywOxQT`5t6`Ep/Ey?7'ϧ'uc]p%_%vSǮW0^)֯x?~9cOgO9ȹs^f=,s`?w{.ahc<[z|'q=/ m1z^T'aLJ֏~27拗ߑ1?N>sFx2e2~;tsOd1 1JeO/!y]Oߪ>DjF={~z~y絨=͐H[a<Kwqb7#!gjK'=Q_ GC}Oa(on~`/^x?טzR{i{s=}:_zb/Ӭד^Ծ"}sTQx?u}jw{U\|ԓ'8޺{ܼJ[1ѻ3< f?{I'O_rVy&mw}-G-ENW}Q|i(ּ\X^_WOC䟾(o#bn_DZ?K~~/kyOݯw"O1俆#?p~mϑ%~Ak7 -&\)dzYOr!R"7>OgZ}15rW{T^7ˇ^Z-?1_4Dh˕=DxCc< c{/Oo㏔|r=-~+x(!%VZ҂CbR. >UFJ%ұzQ_`471v II|EdbV/U` pI,Tq6Uzc8nG%Dj SHG3R }kА*r4lwgShuh@:B~1Q,/l<)  k0 q7“(88pAaQ-2qRav h Yni6E5RZwgz&FYDL]hٹk<Ǿ\ܹkvreO9&bvr"s%?͙Z ٟunlTQ9VݸPk_6ֿy'}ieZMKl'/($OWལ`z8RT)<7`Ljuj oO^djK-lKkk1թ1B2OZK*slWjSxjR_RyP3*sy&O}g>~/ Ty -4QKRO=3c嫜笠ogb3yqSռxSLm܂3m\V/JV! V6VdZ 9~q+#go[=x=ZAieW.%;Uv^Ekv k?;6-w|NS忭n+ӊ2xn+ӊrx +ۊⱲ0n+׊x@?V"%lV&mr[V.i%s[ѼV6is[V>i%t[VFmt[1VNm%ZQVVmuZqV^i%v[VfmZVni%w[ѽVvmw[V~m%xXVmZ1Vm%y[QVmy[qVi%z[Viz[Vm%{[ўVk{[Vk%|[VƷk|[1Vηk%}[QVַk}[qVއm%XVm~ZVm%ZVm[V%p{^p{1^%q{Q^q{q^%r{^&r{^.%s{^6s{^>%t{^Ft{1)[J)-q |%*F/R;倞EцkIqp M}<^1#I8BGay"bK8 Gw$9"CGD)"X)D˻8?ZLH2tqJx$,є"rT^8/ m ?Y1Dl!zdi#KYb1Og VP 2S"րV~z{LOlXyߍp-M4/6{6Gz0TW-KdglGPDцNbE!ŹJm%|ڋpF𵈳B'8"EDnK}FS [ //XyQVL_x|k_G&f@XU+0b4w`qa*׳ÅhaXsLcE "sL:V^_`aBOFRZG1Q6# 4UBqvbmFP?L9&7|Vl>HӬ}Xs[2N>nI45jꋍ1Ws=p'¤B"+s=:Bu5T_W9 5+V {u󹯫ᒦ,,l k03X'SNjSx{#E*ffyϳ/xOfQ2 [ͧTNf4V9ɱ!\͖2uC$YKb$۰'9Q#Z95,|7p})Zeq !ze9BX@5E+9*[vȤz`ً#ooB"k2__k%\%M%\Gs.-ѣC4LN]Gu.إ34jQvi^]ݡɻ|^Cxi䥹҉^Kzh[/]죩_C|i󥙾֗Vy_C_~i]?_'uzp-'׹sjqi\'yqZr<'-9uJs\'?'uz_']e5p\=e2\e%qP\en\arٌ<%er٪\,e\4}a}s<;]es]C=emt"]Jюrzm. ~겮zlˬnzl.^&{l.KNb{l. >;l.˻.ڻlk{c I](}U!lߌ= ݋Xpw-={R~V:(+!U`'ʚF:B(-G.)Gvޯ}`mwMWQy c>cf9yՕ_L_['#ؔL=k%ү,@=31e=1t=iZ(?,a ^\C ( N9Ibr#fĭP"uf Et' my.7 T#dTĭ*5%LHS_.)@d`GY|%ޠn"Fc6V{'n[tSiDqWCLU_Yo)ʎn1bci._JFHwkJSiG%T^Ygefef+0ʽ$@I(;z{ʍ)/(иC'{w$qL"N)&.%ɴr#qމ8~-i'Ib.T 'd,V5ACU+4'h U,ݨ)n pOȺ8ne\D E*kc߯ fqj>}J18q++pl3!Mb"bd [vmgҖ%ﯭ;IH<dpFo NM *bFg"S8/ ;̈́tiL "b\T!J}jĦadEmYl!iS^-$> QZє4a04XZeGjb:q?ax8a4̊~T4r)$l|aGˆ;;eWkw˝+-Z[f,1Qa6uu DtA}35 1+ihBwN'o!\w?00jey{˲DlHImj(D:*pgYۓ 勣 wrߥ4bb~VԜ}X6|}#X8ѫZ>_q) lĚs$mv0@eyA,]#CUFi~0 w@HX+#5\࠲ 3LɟaGv^*舿[߆%qo߁{-. m#6/6h?)Jp'.Ml:_h }/V^n,A6Ѩ[ Ǵrz鼣쭣p3'{`/ 2hJHJǺ7[pDZ#CSp>xamvFxWR)u#r\G򡳩{cQI*.pF@ګ<*_ⅬTgTF9R,`yTpn\Ȅ:Vx]f3Bۖly ~*{ [J;V֯w$1ѩe:_Q\A[R_ ֍Ͽ fQ6]AEkUˢ=];g\h83P9NJ gFz:*_Qy.j[ }>~TI_GL뾨_ ]M4i3zEѫwT>r qExګ<*󷭇agGnB o\_\$sPH߽ԪCW72u$"yeY&nenm-jAYp_txrGЍ@ Xe(R _oE(V%>Os>2ڶq`-c(n{U~K EWq!b}V]/f?}r~)S̘ %#Rm/g2*<*[~^L!a4wvb؜scXw~4_ (?^YN#tTʥ5IuF,͟NM#~ß c_T9oވ=L8R~qPˌ{lݧkGa Yc 93[p^GX`V/1Hd3- jA4APpe6}L".}XʁU SxUWaN틟Y,J\ -O(-вa svխ`W*A$*L194V/po_o/xٜ$Z9? Ҧ,Kïg4sxFP4,L{U>B>l wh;vPw1U}s+wbCVu2h.bGe5U/CU-ą30[ aT!_p5-hN`( i] /4D Sl9rR`>upyp08|.,|\X[>nPfu>m$h3Tn0뱬appD6>&>#h/!>sNxͨQ^ +yȮ6Tke"p03+Rt憝 BB$\ _T+iV>greN+?  iW /.?oAl0#``0D'7~34EcEu`U}H;MP0t01K:N+˶[-mdĐ #[wGCdySoG Q,8auB#6F"#X?L`$吂F2bv!&D 1n-`h~J-ͥڪ={0CG;Za?(ۏ[ endstream endobj 95 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 100 0 obj << /Length 723 /Filter /FlateDecode >> stream xڭVێ0}WD+vc;H[+v!Moڦe=3qK B]̙3{WF |dd2:JiEղ'8`TziWM=|Z'H`I#")d4\DEqb& umcu i!鳘zԋ-,U%fYqʘ\3ܗңJr*&d.y0Yޣ:B@\ɾ&&AltpJ@ uvmE*s|`$s۩ےGȘ0wmtV> .0Iy 4Ê/\Q}Ve7?ĵ wHvQd ]n|BՄqYɰ'_s ^%a'lB3 u*(*Zw^4͔3O~ s6"A. 5~Y8q> /ExtGState << >>/ColorSpace << /sRGB 104 0 R >>>> /Length 13976 /Filter /FlateDecode >> stream x]K%q.m JXJ 1Y;Ή̺=Z'e ^xڥY8_yן_/~/^#|^_?_X>_>g׿^~5}.W7?/gk?_ߟ?"_=ȯ^?Ky_}|ζcyY:gͯQ?k<>Gzg xXbεP!b/J so9asaJX;vi?s )9}g? x6fgGya{ <ۆFۋ5Io/>32׷!ٞmBڮ$cۢƿ x6hm~iTe;>' ^Nf>vGOT<?_^ ?jӶ_׭yU1߹tlE'5n,2'~j;'aw.A%/1[}yYLR>So3Iou>d]]xJ$,w]ľJvsug _\Ww(4v_s;1Avm#OSc߻`C,qS[K١qm]שɵt7ygu \P/6I`Ëf0cy1_܌Y}i3tfWٗĸ~\ȶ^g/oxa_k:.Xc =KO]2i–Xk@[۷-oiNe }dK\:l' C.s-o͘Kl]g3Nٗai뒸h}~3Wx]V)WmoR6Ә"eLQ,$`b `ldy7E[1+Aha( wcO#4y!Έj/MLTeB@kߘq*%ur +k/K$QO \NNj*e!2ݑb_4L^Og[|[4C;S }-Sk{"~)T'd )Trb…HM}0`@o _~vKwT(LhƇU~C١ŠS=Y$YȋI mW_ڠol apF2&_U榙Nj\H|O0DQu)Ċq)^p;cnVECN4Ӎ̷IIB)k{Ğaaa*/$Ou̅3c%0i72:HD *_a_d9Z5 Nݗ(Y# .l5 E<@!T3@$+QۘA^mȮjS=!tBȰ{bU0x5_dc*epZ/uc3!d-"_uW320U;Aw?wrkynAf`^&:+d=fW $ri0ʔ9!mW1MAWu3.iM5gF^$M>rD Rpyˉ4bT=NkrF\Ok@YНawu*3;Ž$iin}痝mƗW4mx(I2s3恪JILvSFFHLYɁE3ceWf=8H褔YAJ] or3Aӓ-H^nzuliRk .=1ۙƲ.;|HtW~䃑]=&7h&9ieDctz94fˁƀ,R ^D(kRSSF +TldhGuBC4Dߩ"Oi Dɍ*5#"M9*|f(PrB$nꫜcj=PcAp3UJ'(uݪBdz<%3]LC@^BsQ½3bpKgi|vb. fjfRB->!\(U|o^W"KwʅjîWs55c#ד y*v9_:PdT99H hUܩ^e#ԒhLT]Gߙ֭|S=HJO=Ɉ<|or'1w$JDFE@fCV\(UY;,rF )의EεV;DT 7>Rs;]JeVZgZY>LlUӈ 0?eLѸwn')zAw"2t"9Yl7BIt?Qe&j? G'ԏUiZ,N2.!3Tަuȇe=ިJNhеxβ]f2.7Z` %Yƭ@ZU]gqeU?=J5VQ%Ֆ"lh_Wccbс`:K_2OEd!NԮgrtdzE‰4bkEW5 7Y[&Rr+.!wWոr^CI8)@j){C2%k&X`ꔍq']̗HCUQQEj]=8X1ّN sjY%%nFW"[Y)f닪1zYL٭Tɥ0qSrPKOIQ3:nO#UZMGd9YQrR#ȹj[ 24ē9phUocx 7;tz2Ye6jg7Ʀ%zp1B9QbxlW9"U*F1"|9C'ƣCG9|H{ 8 3/|F**1S]ųb&[sC0v2ml@-;V2Qib# }L:go,Zg`}gٯGt,|Ȭ n?i<bs.<$o@L!pc37d7 x䕭@]4Y|68PnD^Ie/W}F\OyAe; /e( [.,l?My{?=GFX} ^~~4tB|_|/37Վ˵$1u> ^}~ 3: lxG#'%+]˛%?StͿ)f^#uR uR`}uRV%~}~{oCr@>[R~{?ϽyȈ"u}}5m5ϵG~:bmsL_jC-~cbq)/ne}3s뭜$ =h ;_n ϯI!tްn7˅MRc)k=8]\\j}Òeޚ%'m1uKٵ[#Y2z8߇f6,hidKl\i3R]}f̽fXjs_UZwߺGp" f Bٮ4s i.9H 7V5Ue@G3!v'p6ȍTgpT^:Z[>Wt*thWø M$уŠANCM[AoH] O"i(oPMj!2ʠp z8ieΈ\cۤ)MJSh֐TN}Uiz,W]N$dF!$^8[؉ӽ5Ԝ̃}9w/[Ngf83.junR5S*N E轩@zxW]PSɑA@< <4.`0|_@s_3xw]%q74~"U9I*MٯzyrIŚRD-gћ*,[kX4JD\%1Hj VoBFFLG,W"R@J2ժg D ,;}62(ɑu+;JɈGh4XĔF?+a8TUFp(')w MWAY*G E@ 1`0:RdCF*ը1Gq+@e`*qĥQwn~ 6d+.<1KԍLu>:Sح[AU&|ףcb r"Sej/̜u,㠱mf==Ra9^YтB5|k2&)3mYaT{qmC0-۾Uf~]tf&ּU*b(SNzފ|9x&kEn iF-rWz|%ۿca 4Zj&˗.کC@MwB 6bw熾U^kΎk.n8+(VT\XQ@7'W zr:ӱ- Y@RyEFT#wAF?pJ9\ś#:]#Nfb`; tUۈMFiˉEGBMq3kY8 VR`Z`t;frda-7ɽux)͒ol|kv#ngަ=_Xb$iz=YcfS&%ԛ%t+<3T6z[~UYN|sf޶#܊hH :ZL%dV2Z4,o>k7Gw}ΑW֮+IVsv6:l*7ڕ0~d{u7 q&|*#z`RksiG~V9;w ENg47ʦHORvBg"x=S32R٧Cp.e\&}$j\'`u^ݓLE AsW5ŗ39HA&(\5[>= ur0H¹ʁOB"b n؈}9x&h uo(cf3 _U7G>WLQ8r YB=2wB6jp~PSVAʉeY6 0wnF5OQ QJ.f]rbq,J.o+= ۤe~adғ+NM@מ{}(@2IEC aE%snҕwYj` T9LZQj暋#c"%vHñU5Wj@sno^eBcM's"`sBe  |eTF ^TX"e.eno-Nv";<|:}&\@^:?E:a Us>7S\dW4;ҺQ6|3DukUUԭl].o)͑d& ?20C2 C^0˭x-[s0r!c]&o(۔rѝI~ڬ =I|y׭ܳRXOJr;@&>Q霙l\Q4ox5`=xyzrX|#ڃPU@Hw5 'ҙ jLƐUuRϿ!3\"u;ܪQ4#|Pl~u_=;. e9셱U.CaNa]_yܫZ(yXNTF.* o:I31UݼlzDӳgEvW?.>15*]m׃%e'=?Cvd閩},GuϏr\2u=;h<,\v*5=* ^EFB3hra0/ۙo6w>4WI6CD dṭYBäDp`A8B3q<~9܏U`)MKE9X<@yѼrШ 9рڔ ֐rI#t=hy`DÑt9Su>PͩAɀ1yձ\%& `0Xu&1S &5ć_gҹ rpar0%0 `H z="B٧Uk*ӖM0Uw+>1BV-uPG>Oa[`T쿖P58ZIۘj GZ d$GzЋA$BEs4䨴͗]=խ@sr(4jO(íQa1> C6ax8G&-<[U.HXK9~nld6n5{)Hr-W3ldՌᏅF2u.q-&eҷ$#r:pF R88(ǭEUmw:UM:'eZ;Pnm]L͓}/\fG,%8;í#Z6jz j- -[-]>{DP8ټ,l}"ʄ͙:h M: d]prT)rēb&ـ4IN,5E>6d wT==M0(>[5]ÑҌwJ.+|&PPOL^1Yrz'1Cz k %da#1pRR%o2GrfGpG='2@ ]/,C9V@>inEYI<+>vʣ<86<ƬOŋ8f9F[G5#RB9$؜$Na=9ټADc cWQ`BuX~YtDH plsz1bH)q07*qD "Q7G@v+ sI .x[N8c,tsE'otGu] kh=,}hLu6eԨȐLrc$s*9IC H0>N?rI2*{^yoaq,wQD- Q4u}WHމ)zqZPN2ZGB9 zBj*7PjFG5و3iV,PV}P6ȰQVCco6LC=AH~A*3)~$ÁǷ62 h:IFϝD2pY?Ac3^MYCЋd#wr0ԁ"bUɀ(oCO1|B<(fVylF&u`T_! }MGBguay0[ZӝkC1%iƯrJc)䡃3Xy0]9iӈJ'4* eLy!k@z  `4r(a6=Xb~zh?3 GfM" ߋg( |^=3 ʠ|ϡ> XA,q,?`y@Π+"q4zy&R#  Pփ>95Mov0">o ySYkJ9ځ@5GĝR/M6ܬ).SJ{[գG3$/Os>DB˟<)*X4=ђAS~?+'#8{D=UtUdP篜ÂSQјtNQ&HMlЫSJ[3D9x1iLÐ7zS^ǓZD̾1 [)O3$k)PКntJd ^;ʢ"5͚;ùF);f/@ o'e]:xJu ,J`83ǐr$Y?o/#bN;ʾ~Pi45R7ey;95bא#nd H2'vd}Dd_TK2-zkf/ztv\S֟%`Q|[aV '#+a14+n<(Ϫz:nʷ& S=;(e;~8^-^冋4c牪F]э|o#9է RH&5ud%{뵆8r0u1~x}m052%5.5OBΖcOpcY =&ٳ0 RJYy{J & X}R:meVM˻*X'Ğz.dO^OopTD9~^0U* ҹ,vO\g;iH,I7⠇L!)e4ɔeu乊vaԥ1moQhH 9{&10 'OMe)J:L) %~J&en2=Bl_رtVmQBSDC>W~Vs%]gȓ&Tj7v&SL f&!xY8r=4IxKҀ2uY|mv>Zv2ZvʎN ]Da6SC4ÞID'l:<``u]S|?#h*G;8hr  5;IQ]֑g%b  C1WLv}Q_yk䘋vbyV'Z͸%96*Ƴ> 9Rt;T0C,(a*jGTVL&MYΛ^/3h0/AW@;m*:WNq8 L&=邮|bMɚabX?%&[RQHmMOs]k.kӐPy-X#d*϶E)'BȳD2^aW, z&/3O]0&2utљD>$z%tf FNuo;VXb*a+ϯIx[p@[SBC Y6@堮(֔k2;t8rPgY1L>|Fގ t֛h=>2ԣ"IM>l͈tC۔@d:ٚ f{Wz8h/&]&ViO"8`"+jBfIXlzf6ne s:T `FyVnwg{OR,+>ec]4}lz4Ӂ591^?0&4m*§L+aYβԮ,J'yC1h@?'Zi S!^ *CxߎfnjY|Gr'}J?J 6xk(-_}#(ԛxVoUy7O6^"boXN_W'GT_[{OQUs~o ~u⏊?5S5\UE0Ht_M_7_]O_#u?jЙT>3zoF}>W9.8 #dؿk{޻]gX^v-89OnFspsM!᛭XK満.Ly~l坥r>t/km*R\`ݒnI[zKwYriZ#P۟dހ%,7o Ե[ޮ_x˒Ж0Gd> 6;,Me)Kٌfå..]Z.._ZKK^di--{ݲ-Zzҟk?I4~nS%w1|Y%-oޯwx.ݒFfqhOn-i#-?%u G<>9T#C?Uܓ&_u> WuPx>O7 ￵9hsp99脍Etψӹd玴6R{*ϸ%kcq-i,{-iamf>PP(귮6a*oSsM|_a"c| 0~ u4\xF%͜8{/qI78ҽl<eoolAOj8Q}(+yz`p[ cAOҟNN?F}ЃTM8bҍ9\ CDe5]xFi'uk䅃Nxns}kpcmswPڢ⠔t N p0eit"ImZdIZL~oM^sٲ߮.;f8K +{#0?)>ݟ+q!1WL%3;6We n꡻ ID`U)|dOq:}ґT7?"uلUOH Ap1M #gUMI݂ tDW'3Ui'1\IF䡪4d摒уx .Rě) B>pzÉQA=}Hu)q2# )rRf_c!#ˆ^Dl=l7'AE? #(rBBz,%E5Rځ6Vx=b+ʈ mNNz͋z#j- {KYc[p#_UEb·X;}KxH&9Y~i%m9  0[}UAE>Mc-zO,1nIysT9\1^<@KxI*vb֣\0(~c3Ūh2~Uj,&L>\O@& OMJxnEYxNޢIkn\M1] 7%5ȍA {]Dҩz8my%>!0?_CJNO}c.qbPrM=;!7U#=U!Ejh' >USa] >!=ޓHDθ F0Oh.&EO0J"vtE&:uE[QeSGxI=p 's ʣu^dHS$9ľoy/9z?!D~®MX`D"]7G9#:mxK/nR#3z ٤Z0瑞ѷ) Ȝ7gsD]!Qb03=)p\}74ܓVfuU;|+4uΛ<),$&fˡ![kzc rZ#-Ǩ?ԍ> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 96 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpkG7Hnk/Rbuild688f6c73bb66/spatstat/vignettes/datasets-014.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 107 0 R /BBox [0 0 864 432] /Resources << /ProcSet [ /PDF /Text /ImageC ] /Font << /F1 108 0 R/F2 109 0 R/F3 110 0 R>> /XObject << /Im0 111 0 R /Im1 112 0 R >>/ExtGState << >>/ColorSpace << /sRGB 113 0 R >>>> /Length 25412 /Filter /FlateDecode >> stream xKgRS! h2-a,0}gDc_lkZUY_U~?/__/G)_?AO>Qר_|}oL#ߣ_W-__~/~gOOPV|?Rc<_=lcmc?//VOX=//c_#YR=ӿ׏zSůy?|Gô̯^6[ҽģjk лpbƙ 'fo# ч;?&z0{#+7ĺa-lX㐥1{ec82{R~X{~Dv]/=$V&?~H sqw/G*SzӮ? pG[_-|%[Y~V -?l75-F*V.>16zp ]xl\q^y9SZ8rm];mduCs υfjB^޽Ļ^aWܾ%z5sԩ%×,YS0ua5'u; ሇO"bliþݐO:R(S֡]ӆKB]!s /Y{ Ips}V ~q1[ǑłvbE8&"&'|gZw\#BG$}]q3v[kj\¸+$Zk; [wPzצMppR:~kũjQ0*`+GzŴҝ*S|iW9r`궥aس5|6H0Uw{ې=_ãc4|Vo$V=|W R&6\`\VUK6gdҥ#aL5֯ϡw>1r/={RN mqΕh#Ɉ+_3Xf9a ;d+|  3!k3'n|m]ҢV_&daZRՅ_9\J]qV/kzm)H_KiNe +;D>a ƥʝ5BH˅#QPT5ZىtJuY# 9J^t5xvr+Lz@zD_ۅqIz8r"{]K{DH= 1,0a"+Eq4) ii H46^!`" :K0 .M *>]uLbU5tԌLDDLGK+P{8fS TWiٯ]~pg7珊7eq!T Ka;D]Wm|@+QHi%nBʫْc8L+xdFSlS@~8rl^7Fc qE ?K$Q_#U"*&o+.UE3a2PJd '1=Id:e+IG 'F _ 5ط Nt]h. l2՜ gK[=ݯWf mVʕxyVh 6BD") K'a&gщ]tFF"fA zx#@5dNK;f?,A:Z8md.[5dەzH#=>{Y0P$_3çyڴ=!dTƻ, xnN̆N2>25ÅMK䖵D 5t⚉ t`UIhk VKi**uC&myRcZ>Kbμ`xGǪ=D) 7fi8(/Y7;aRH}o@ Z:'9`3MX]\]z$"my;7NWO\lwtwdn&mP, ts$:!x+iVfۘ>nVwGn'Ms6Bi.`XP Ws0޽'u.Ć"f uB!(Uvw]2Ez;XH բJZRqo Zq_/+ IV/c'?no%п(d7/Bac.{P"YJ Ne>|1`l@E+/p|8kSʇKZE~E= o̻ق$ܨ*FĨđAIj <حVVz'CR0[MAfקMdmD";rUA_1zduȧ^\:3zxsDzwuEz?'a)?P2;k: 2$rf|d+9%*]mG>r<òXeehC:#ɨ_,֤-wci՚3}s~ĩ5$;yWT9SaW?,@m<\8:"b?7=DTE~5 #s%uV X1K'NE ߢB^|wq5mMtT!<:6ƁU=&:}{hwWݎ'hxID*ervG:~6.JiޕZm`4r ͮl ]y9\cuJ H.&!04F*rv2kFnMnG??v*Aknl)L/VҗEWVT}3ȔhH[tozX:JI%aQĻǚ T`lsMR+ (OA=XOkP,d !g4N%^[ RGQD Q'-$G]j,Yo % at?@ жOtCF wRY?LEhuMg@Pڴa}2%Ap >Ǣe;V9_RQS9M'&ŕE85{Y1Z 9pʮRIBWgf\^o&Ƹ40mY|uΜI:맺\Cb6]zH-z3~9MN(MGW|~9TV A[Zh4?Gׅތ|<-o×xPbvfkL:x'л BDW)p"CAsC-U9mɃT!Y]FG4l~Ddm| ן#bXVAfž9D|3LzYi?x]CX hò^Ge ˳_֊4ͬ`;^e*&#ţf|,d4',Ɨ0MU'Ūw-]-l2_ T(&oZq tjwvN 4lZ:]AU ˨Gtk=( m(jpEOSo͉-CwsӢhY m=QI ڡ[ybRv%4z*5dPeEw;W.ɁVFr+Έ̧k;nhfe g a Tf3'u#EOzg͸{3cRߜ9LhoV[6 ӓ1i?%ϫ[T~%9fj4*j&0^u2`{h˧c ›k R;p\[͑gÓN6#]UNŽᰃ]5БPF¾n:;Ɂ,ji{مaf!F J_WQ, ( o J ّq['9Y@={Mb\>,Yܥ7 8{<+[wtyDbqYAEzqB͢qb'OjNrEafq ö(toCtp/.hۗbZ7;C [xx1zOn Q[lR.;fg.`!j U4feeW2oĚՌ*trI$@/S lLl㢸#Q h#˸e4z4vu#ndnMJ㖧.Np7R)Bv^CʳhTY6ݩ3HYuV$k|R!q]lPprw1C%1aLH}BFd8$!(`ꣳ:4੹G#2^n+dߚ|sW>WknPbfڦ ]ih5[0^S_ eڦy&kk'w,{υ,QUIQ\#ukt"/!=suc0fwu E&iӽ50ޅglWA5Gʕ/2 s䤳->4KyŶU z07 >}KG߄bnpq}E1\%0oDüޕLYs0 |6=VҦ!>gZZEؾs8/^=W`_^ՍxfC -\&p'af~^!Ea&Y 0KaN'0.4ܣr, vI֯ >F %ookIUX8MRkY5G߯oxts6\8A5\_ U:LԚ) BUS`MuMJ]ߝZuwI6i5!bzMͱ 8D(z8*,GOd֫Q\OГD}.O@?uרּ8ߊ \+N`D'}gy1QOTO9bXb'N5Q'c2>tLQJJ0-.٧0l}oI?2LZ_({_ARM5wX?nj@-b$>$KD2=S)W&8FD.U7_#5&Յ<ྍϐNzKDoBhPllA ZZذ>MWV_ ڭwu/(_6_HY/w+5#\kb۴%11) f'q2uQ0RkSAvVv5Og'n2@Pꙻ9`K33J[Cin0ꩯ9C/j'zP${[z ;N l|{Gz`:@mԏ>k wU ?t/:VκHM;NHؓ-3BQb%={AȈK1h |=re}:GWF-P7@*31F7=(Fễ0=F6sffytؒpcCbaj[6 7 賥'\uU7 S÷:z6(L 'pؔvBBԢ4MOXy4J|OWܿz;$b\y "}hНj[ !1y}Lk"mjrPě]afxo KkEJ=l5(^+߇s]WOSPV^zzpO]&;QɗGzx72u!cJ+-Uu=f43Kסh;+Ow5;JȦ^V=s!WV?N Juy8Fb\CKJ$9`(ܡ\uniVF?Af~$r"#W\5 vPnI@'|wuo.SA=rJUj=J ~t3%ئmBu-A(nu.[JrڳL5No7^Z*pA[Lu\Em&80m9uL̡r} &%Zt$d1= xuA9(%Ɩ*mC$P1BA -JL,wWztɅړim(C )78+>a+(v%-['BJސq3Ng-,T\j]8 dLgb \8Jiq w2<MOr2*e$7WJN' 6jm{ߑD=ޝ U)U_Y[ |1pv,_\hA\0=JVv 1'e:H;O"bܲ-[GbKV$H\b>RgVAL[x?P,jdTQܹ>."?ԯDthtSCc~$WɎj~ŁE}`LKPēqmx,`NCʚvrAGV+NK.7M̌(,3ˍ =Z) Ti5R-V#,iU`T=|+wi;/֑Mv7mST R^IÑ:M(:@S* W^. ;-6nSnX_rPA6n,d%uCC`׎>` 1g\6mOglM:ȇ٧Qחf"2,Ѣd\zDB-TTG>|a4j?:S![éN(b&X^eꀗʂz0F.QX!f5|3#3- x6ROx2ZOXbDB;3(.#{3a,[&zAL!kVBmK:mbAWS RҦK3E~2xE u#QP1;r,R,tue@rw6ȰH G /`WZ: `X}3w=Ic8Tw]s39ą|45;j8B>Ozwj*Y'$oyFL[B=ӳ7Q2ijC̜J'] ֨i"'"5mTu%hy<*|_7dl#Z%^] "Dؕ?4*$IrM P_8 2sN:wX1qH}w+27+ZmcxEѱM WwVw7 [j w ᨦѝ0LU~'Dr8!#h;礖K"dgdAҚP}?pp L\*2j!OK9'],t`M')T ]eJ5]EjI~~$5hղ3dBcԙ@iSݥr {(ȪXe̍fp5z.]#\L - %\cVfZZ!f6ܪ& JbqBC ^`80ٛj5DwgjbDOLx~r86+b,dJ5lȷ!ISS5zdO5\kk"n$#lPJ|m={8kkNrbF\=QW;I71f. [֔ըp7,nJ7T.N;bRKQjHK!+ g~+O7%4]阙*719fD|m#$e6NZN8hi9 pd6ʉopB F,ʹ`:˿>\:'8vfZ$ JUNdz?3\r wimFc{8?=MZ䰶 y?&i5) BLsg=C^YpJ2u]I6=oɈ?[*=~wU^f-]#Dh:}h8`OY W|K{e+I`iCv3Ɂ_k֛TGRo*d]4~δ5\'N"pC!מ6T;ԙwI=FQH\]E.[|zfGj:x%Qt %Q"$EAhI^oV;PoY:bv(&`d>~!==RܟgRf62)ajJNVr0ჵXd5[\ORQji#иz`cưz}H)5uvHIAbVq\NEQF'5zYAa*Ju\M"?|x3]Xn~]֟P)hZXl}[/.BRbychI0iPxq/ru'weGU\ WrFwy4*۲>ӧij>f lyu`m;dȣ挾Y @*N.aT~5\1e]20GF!CZ*Mٻ#P}u 33kZn.:6b{F 51ԌEW\K52^V>~X;ς.pWYi 3LaD>p"QaRmFOvI{e$q%m=p݃brto;C|7m-p<|&NbQ_]!dI}Lŏk& vRÇO DHĻZ+ a;Ywcܱ>atѿn Q]?3ruwS9gz w\qtK׫k"2'@]_g=!4 hL'NA`*uX(λ-m ]Q]xdfhn&A=R pbL75Si x}m2=%+|x5N=nlhg$4D ?'nkԧu&&Ka\ϛV$JQ9[Wxk4b\j K5QےL?WdzvJ.I)E:Qu-J.pLD#TN㰚muWKb4\_2h͇ÔNjjaG_B& j<"m?K>U@RMmG.bz#tzԮĘH߽vQ-@b h嫛l4GWC5#PN .4j?q+JkT:PᲚ\hy7],NłҸޝEB†[XQwݛX-4jZƣCK9VF$ahI J8R DPjA1e"2E( l5%nlBnz(Dn" Qz3M*\=Ñk4M&%LhЌo)I[Kp`v]:Jݢ=wPbY4^)}tdOSgJ!!8JnZ`|1bzUXZ+ғ?\;5{WGcb a}]NXGk, b^3{-+OTzдhng([[On`i Y-0*#gIb:/Bi\$6). zb.>4&2=ړtG `IQQf^EZ`` ?,N#*\}EXD1M#F8T1WHɎ'Yxu0nVFjg0e!Vo(9/ b$n61MM=,<X ]uz=#-ʕbJ9q;V(GWLHgP42悼ܨ*l",EKIbb aۉ5y xSpT* WIOS ,Ul#@ ѮE#=o~g 1* -7^Sk4LaSj Gnp5S #^m-6oQ+vg;KY5[Jy~,GuS 0Q!)%䧦ĶK.XL\uǹD'{T ܶE0b٩C( +cԟk.:ÚsZK'*䶻_{;[ҭ-qp%P=;j1¾m$byv\oFഒJ|aBr%{(b^wjNrGn#ĝ—uG ];5;wC[K :$|<2I@VWa&N֠! #puiPayJUu"F+;u&$cZp|̶_IŅԞ';T`4r&J gL$۠e;Qۯ5oG!c)қ양%h̟P8I~?ϳ'+vz$bE!ꤟN@(Ofĉ}TQ~1W IĪI,A99%-Ȼ~rD^N=î)"yajG^NyU 'i;6Ъ(P!c[:<# ¬̄"z1Dbg)^oV0l0vfجRÎnVX e}úʔJD5mgȧЬ` $\[%ΤViEKofK \fb mq+M67{ut{[xo@d_40wq֐{>4%[A@vjx, lId[Ji ˯#~†|.MlKީ`4^QҲŠ?HԽlXĮ5gDIvd:][Q UĮkMZӢ߽m2&FisU= ._ҎՐ:7r]ʆBxa1ðo݂dFJ00Jp5s|K{~pIQ&e787KdmȔws@rKd͓ #CsJ2JovCoWLq\~y{&&M?h-l ,$FFE'&Ph_:(k1 .\ :rcoNڱONn6n}U?6 m iŎL:wa? Ћ&naXb&$a1]WYL~YJT?j=Uیi"RW)Tea#?8g )L7(Ny,oba6:u*AtL_4> /,pc&UܨB&2iUۓvؤ{ KZk'|b@P;fMrs6Si`_݌#e҄y䋹LI,οw)73q  gfS'V&< $|8V͐dxRK7FCnU8w양kNb~`gZ 5_s٩ߣI&rѩޅ rmQ5yِ;"[oNjLGO~]nL5N*,HFn$?OU^KO; 'K7`&fͨB?_P,X54&YIN]ޠwu;#2_X/I9a *%=e~*,$CZU`MZ#'Ԧ IpcKx˺ {[c0ctr&y-Qe 8Est fa; |8oO2V`&,UYc:Abc̈́wvߊ)jh(k*ֈ%!W3L:K6,kNG"d`&7FZv"%EӳSFQ~aUg i(.$7oEr"c5Gr ah^'Pc'Ğ`>Qa^NT#["wvP^e&Vbg1CVGZ22TIKqX3Q/O'A.R$lfdPȞ#LC32\(a oY^pC.d4[t(κ_[=P=:m%Hh6 `7u2 ju籠)l2s3CVĤ.H(1V W$L<4F׬!>sʨ/1Zٶ1Ҡn3(M0ˊEs/ \#u3knrX +̱u38fJmhȄr$)8m!ӱ[HH% wZjݎ':bI\ڐxʧD/gߛ@Ӱ*q81J"S+[;I:K'Vo&,,:oH(%:/>76>(%~jTv.$90qZ{ȟpȓ:Kb0JG &B;Y)ᅄڬFQ)JP~_ʋnuX~iv$O(Tc$I\-:P " T[m0u#җ$8e&|^ %i QO>7>ae'dBk~+ݶQ'JVE^geE絻 [ $>v&d=}w:1ƱzVXARQ)$ PN쨬%W?:2;  >]^b)6J`f2mz_%I5%sʗ$tt Is z\g˨S#TFڭ&zov[FIDBQew5 a+j"wZ2)|#@Uh (:ܭʔTfcvt.ӝWH١N1CVJ 鶓M#f[#};t6d- _jޫ()Ѡâ N"oDtqÐڅh {TFl)4R- K9vmZ$ axM~F=g^#*Q) Љīg7-/pEjםޔ,9z8ɰjՕL݉G>lB6vV싋yTԫ,Wl ^̺td\y".0P0pU~xӢ#S^ C&mu-y+HMZP7KW)hrh'܌*X+G"5hk"6WwzRXt[/+Xkg{8hyN!0Z40{¯ ?a-}83C۞#p9ă󈩲pi I*zx {LpksTAIeBH[4zna &ٳOvIzJ[G~U[5{Z3_@{8~@-KU bQ/$t^b"Ǩt3nZ؉2ߝdA/A}~{~!nұW.>}wUw;MD 0ðaǰL{w3 oAz[T̡+ײO;:2bO|vѦMLԱn"4&t'ehO44O' oN%/KK((n95R4IbH뿇S$OF5L'-(ŒMmFg'P=|+e3L!W߽g5-V / ϻd ӝ>ɕGzd~]s!Y)WՓ'wbT2 AcZR4yweC{ΕNўD̎m`AHPЎNTᕜz]Dj-ZaKab++8%^ܫJu37#ݺӾȒ^6S b7;EI1qLAPtUƉ"^E¬F(2ٷ 6CimfQcF@2Pk$JvC\e}~֒Ay+vIk^XqЃɍ\bB^k>a4n\تRvmXͨw |H<8m4:|;:!M%2%?9'`LĤk[zA`:.5> /bQȢ4pVߦZGtF$4 ҵ4`#2@Br,]5$~'t2fU u'h_Qioc~SIryLw׈}1o*żI A[5LX7+TfLw[CU[;v!DrH=xY'-cb\2W6/R4RqI O9?Hl^N~eNa$4Y"+G$c7ćDg4ٶxƷ Aq)>cq,Ա1 1bc%05:%ki.Bd+&ahV"ֻOa/>JQAzJ-Fs>1ȏ[2Km8U! 2yD4TNr\gN$5Յ.q.|`Y*hO$_&@DP(b7.O'FцyU7 fq}0'6% zfJBj;˕YQ$IcmRavYA%5Βd!ќbov3n}7WE-Ww u5k;|èg5['hWdѤf78D]r'^hʴ7%j Z*[ed}U g3@m$^pc7[2|Ԉ kӀfUeRbEJo*YLt5H<$1 YTzN4؎|ij!MQ\%c >% =YvVnd_eS if,D~[S#bv)S?+ŕ'4Vl ""&ә\IZw0דSaz}w8 MُE#+3hq~bxms\ތ~:䗡,[3{9ݘfXYeNH- 2‡׉$U.fgEI?q^H]ϸ&]Ă?҃w[x&]LL#_JǬdٯ1rL;wEyN:/bL2s^ ۺ"HaV?Hdnih=[3,g%ָ&fSɓ}E|Db3dR0" ͪnIo1%3d& ҫZCbImX҄ dXV8,s_-uV2JbTwXW&"`9MIThې zgXM`] 3i*YbLK0/5/:B|Kl]eȊti^<&;M_7%&ljYd?^VՇ0E'iYrV˴O*Ú!{[D nP/9l;B (zzҒTUR.8ˡmTp{a3A/<·$Euʩ R G44ImΤf*QLΌOl5Bua"א/i!] S y%Zj͈cXDXkS rfKbY:N(o;q1bZ)ҋ #Ǥ$Vi["6%F:*38aYU"soFtm%vl1INՅ? ړRL+P\`+ϼ䧢ƮD=nR#4_fnM1|D@B-R4!L$1.ʺ/~Iy.#2ND XWu@^zf* &5|N.c[liP".8 k2$S_< Ht \XP+()3UH<:brypvv$apa JU@ƴdlLw.uvPb[In2 aI5#xY+W~T.$"iwEEٶ1^x7_&$Zfxϓvr+3[BSE֌CjdF$R֨rJ XF*$v$pu]Jf^01"߻щnɋh鬴HӥRno8EפI+'n>’VJc$ny/fjL ![aq2Ky$r3T'E9KAX.ɛ_]l}3*,pď&gIeɻIq.SÑGI2xw$^R.>&^#_P+)(ۡ֨8wMI@Z0 ~Gr?GZ@jWIp#1ḟn-Bl) InKi'7Fk-[B:K3f֜`A#~vy+F}թi=; V!bi6Th.̄%٠bZm0z*Ou+UG.\!t?~=?T Tҹ9VO+bg~cU\N"(|j%y2SMf.Fmw}c5f۴OJ<]Lr:kN5!Ere1I#L%3;lت{#+- SPkfaſ'Ng[]=HFJM"{)h*rpG@y9&`V,.ꮫXcմ4h}mi(kK)b0$azlzXnsݲrdd[İ>Z5RZiDA1~ɌT];L9MDIRK,.`=kpLWusѺhA,Z/Evr-OUd2,F`:ŒK>1Xfo01~  دPG="Ju'lDFD[RP A$oL#BЏ_cTْ?XGw7bIt uXgf*Yq* z7HAJJy^F*!V 9[Pn` &1XDYv, d M^E354#Vѓ'44 ,D} K1Oetf6e5Ŵ^cH^ʎ7z3A2,{Gl  :&@ǼDL\tTVn 'rh݆SwU@7yO:F+6)Ҹ0vH.MF;9$38MRy:SN,L4on6g[M>DVo;yQD]ɯ%D֧RHRK:KOƜ[{\&wr,AB:ANddCc AaH!dMċExp3*g8"D钖0Cq-BI=ы_HoRe|f fWnKb%frw݅MYsȟN k^29_#]LE%{Ѭc ̻4DS4WZHuKa )fzrZPD3z8X$þDPJ aT ,7ITvY_=<$"RI&CrRV/JAGtETtE}P&";Ia aܽR-N'CNP %{~Aj;軛쑗 Z:%ƉDDD"{ҷ;%Oocʤґ,MɕGlq'E-h0_Y"?bo„sb4ΌѭN`kChho;atTD4Etex8Rw(R7??1{:]6֝3sq:$"!%3 J BzSxG o=Z,ٓly\r7 `!1r?MAp&?ܭ:0#.&'ch-*,I:p.ƞ#iũ%5(}ix#1N{$zt,%bbV79DW>8uؐ[MۿjҨ9Q*Vi+R-.&.I4fQgC{ӱߦlLhkUzwk5 ܳR 5gW궥at.d/ ;E yv"9Cs&rY |;q)W:]éÆDauXчJEé1S-H*_.-&tKAM؊H ٫ȓXc&H'9C k D_y-ݰd:*f3eJkte+1gDl*jVʮuo$ԕ{JRq%]HlB+ڎ-<vp -~p|J0$2*3k)V}[j uhQ VnP:Xv}-ܢ/X&Sb|nƄ(2UAG JXEg uy`$EO$kҒ0ϵelbPb$+~mY(Cq}[}HeġߦvvgBjׅf蝪h RESFs/ /5zqmQ(&bn "oVN)^Sè(Nn%;HpK(mPϒ=eg&_#f?Uht-_ 43uk&@U^ ;K{"C#ȭLLrcE'@oc롋F#')¶,Ǵĸ.TWJkV5iPwOy,y֬WJ%u SYE>r)Qdrd ֓e#pza甃C$]RD{7{Ku0hĿ*%iAe57+ϏsH{JDKId!Zp&!HNȔ(Q/gM*8GO2Yqq#D-a;$I^TN+􍆋.R 1ݶ"W2-ud֡:I^[XxDP-hJvt׮ț&@"h8bhFbaN4b=I Qn,lMRc@\AQ#H MZ6&%j,2qΌqlLf`#yɱ̱ͬPFg$p\tD`#)LHEV&«N0Lv/ !bd"[N#:]:K*(p).j+@}BO~ٚDžغ=b`jq\ɥBXu &U5"e=p%1[u1rMx2RQaȹJ tfm#mV5^ɏ{/VC.WnY2yorLCqnǢboƵ^RO6S~]SVaݨVHFgƦyV ^.h>kpfxQA"='޶j&{2OaȦnDL1h9Aӷ1h1>=nd/6`{2!a 9t֙. Կv.SeɗܧFC6{1MD9;]{aԡ.Og@%?Fʼn¢*UD7K-jL#(zcy\*`$rAm38$LtӭF ES|0U) {-֯6ەQtWGmWKۭa=|6wp*iWd%IKęE\DmXvV hf%o}U4X &fJj:eGbQknbb^geGNHhu uҩtd4`& ڴp?V,%)k+SZYl^MٹT&:I#C5\!p;OJ endstream endobj 111 0 obj << /Type /XObject /Subtype /Image /Width 201 /Height 101 /ColorSpace 113 0 R /BitsPerComponent 8 /Length 16064 /Filter /FlateDecode >> stream x]UoiE?b;3&'azǑ@:`k72(1B1c!H ' /όRjTvߚ}g}NVzussZ{~zpOg~[_^p˚1wpޒ{]Ë|,/x2ߌ/.wX ovk S3sxr=k .6nUKOs'g{^~zpuM ,}b>z;7n:'ݼeMx'w nMVa-9C8κc?_ݿO\-t>?n nPhVp M]5' tq8c|' Gv@Ex\D>zwÉ̫͝[ht=kZh=k+K [ ]s/_~H3/ `\s$q53Xkܓ p}Lm3)f+_ ׌'}҇8`|qdA5)\9W$:HW {%\n%\U8ې^U)\?Iu88pW<V * d vrT"W%ZGgN:!NseW7w]sЮc>L\ ~ h8ނErG{z0ȟhlbtES\*W $CE?Gh&JtUG+PJ\`ƹQ Z EۓT%zhVWlnВu iT"Z%WB-wKKkK o $^]w^ t"$h?ߵ&ui唏bn94>!j|-$>vsE0|`%PsqR.CȘWҫM!߹"ZJSSd+qEڗj7X?HO2B]h~`<_h57½Na.^k<"9p`a/\2]ʍ% 1pAwGL&OhI|+:ϔ>G*\UXK;\*<RIBqLq5d=RCəS6X&:H?.q\Ēj0GgOM]ZЂMpׄ0 t2r8z"Hu`%<.kN[{kOd1ᘍ)'SHEV]UbEpݪM3SE8]\cXqx1~הM~4RJ 5O <&0$\'JG#+U<8Z8ZjԸ4@3Dr;1~-E8]p@dBk{E[Z.0tz` ;62sWu\wK-qEW\*< 6Q¥"n1>-x#¡jGCk/hQ^GC}r^\‖ ׁ$\p,~'%="E:ad!>v<[R"UZFR. D/xZxweEZ/bv qpxn Hr Y Э \ %W8M  —"Zt##jN،]Utq btsu, <ړf_q-D|Y^\\ L$k~] DA@z"cR,bU!Y?3  &F Bo!Ѐ|u> AN9>o,:`1>%WaJH_< >vs( ݨ"`=t-b?h+X0ʥ雙G/ݟ΍ B[ԸrUI $ \'I>xr<I"vp5R*`}Gk, % *!_8ѭr8;_3/) J h'hkuIR lz-#g^>>SS*2phChX? 7י=@ .'nx(xWBZ40v/@ibfh]ur <+xy95R#E⊳3.Ē.\*cDa 5LH8>Bpdp(\󰔣"%zpv 0K.}]Eo:~R%hcti W hCCW"ox` .Iv 'ݼ[<}ނ7|EIAdF79Z,P8d X F>ApGLCԝ+$ ?P-W-E(ͺvp2~𾜫cmwf ɣ3[Ez88g  NuƓ+@WnqBk \wprMs+~;]mk2.A7stL\8 @ ~JtZIpre q7&KC9=ewn唏_sf4g20PפnZ 2s4MܣH78S(1 :<ou e~SNb|Ct9Z; $\{g}\zʂ]lO~T]Wd Ejh5>nc,x~h(É]B-DN1UgpúW_6Pa5]rb徴|fD~Oq+<X1O=cﴆDxl"gKtM u( ţ3+ؔ.UUD~ G04\\4#.)08 ѧi2""]@ 6 3Zpq5 O9tL9hh_<2RXy6+7O!~GZRN˅-SpiXNlX9+vqEZTc+E<]qᗅ v`ړWÓoC+C)!SVh(\b`ծT ZI}Z֐hiV%9]tIʩct=IU.|0IJѢxĄvKytS8]T h]D E` սi= JPF\y(D}xD,qϓ."kQB%JDZ^R|n5j깈- ckQ#tF7 +_)XՓCe|Yqw"f-D5XuMէaB(TC0=%og`}IdLKEn+KRIS蒅ESr.]t=Z.E!ܙT2PNՓyJEŸ-G. тAT~% ^LG11\ͫ^ @xҪJjRBT,_.1FHJϣ03n&+,hKҏK.8a| @KyGmV]+ܰmhZFkQ8مC]hICH<CB:W!bYɅ`J6ѢAOLE>Mplb@+wHݪU99!,t"W 0E_cr1Qhׂ5<"R?IbYMph)\e)wO׾RN S(zWŔxp1-@Ejx=ˍ/㥡tO-h&J.j+^zʇLV.HQ=˙SŇ96Zy)c\UJ0~r PO|p&V{P༯*zk;>_ sM'=ECPv)Y;qᇆKb䊱,fp"7'k &DmbȢQVH -JE`9RQ?lE̽2 N UPXS6W5&w..^t1QJ h+b j,]kpR_ES#\.<2nD cc4.hɍ/=NiJ. 񎖧=(& < sERYe[ɘ{e4fi72GʜW9T+0NZ5~U g3hmhuWũahHdncDD k=S-Bڰgujj qy]hx) 0.G.aZc|11L&$.W?|ϊTR\VH]\zvx{FZAklF+-e ˨qڋo![!hzs%ڄ5mEJ CDRյ~(Y2T,3GN\ 9&$ՠ}T?QGtɥ]!z&}UBk=7|,[Ç->l1Z!1h\w&}](lK ;oڐ:B?fTk%3$UTNRSƤ`dqh#3%Ex' ==..ÖCDW>TLRy,E)4bbHkd?BZ!Y@1)];(3'-D},e-_!i)![և bad=}s,.zT;u=֪X*A X} ֩ghZjjuSfڜ]V@\͙Ŧ4jl Z)̙Yo;N ]O2&-lnrTS<ݳFi ZZ;`*WU_+4^A\ZyK杖\BlrB%/1+;ITPx(;-Lh1~gFUEkM;~zZQ ܧj!O>i*:nh+PӁ߾fkj96_ɍ+-{w6xo 9.\cZ8=x8s;dG֐e;tC(YU3ֵax `{ A;oj!/;ɜp'GB"ZUwgZ ;(Z&R:lO&&Ɂ徍֎vjݮ~==hybR4-OWޛђA\WBM \ W1_2jhs$A=U֔̔] u!>&]CxPǭeeWi!hiihb"M@Ku]຋K_^U:H# ^]m܁wҨ2_:f=h%D.*8%ZetGv[u#E'9pIy'zZxijڕ"Dt5^]yp}[2_Y1?x#pōRV "TXPR'jk4SBj-pyõx2=.ZTA4тfݞX]sZ@ \A ^ũdK,r8˪&Zzp nW@TTgnCR\J:涜${|ٰOinakP/q\jQ jֻ+xD s%ܸ/ٷW.Ւ0#߻+5%E~;ڻR^PbySg.zNr|[ȟ*Z*\e!lصӅfdR#wBRYZ!)\h+BxY9HօgoI&(S N H ؆'}E+7Ӯ@DzRV~ UnG>&hu WuZL,Ws7i(j߅uiwEn&? Ҵv nrjѶ9yﰉh]t֖,[[-KMK΍%t@mvsXW1֗*Wl !(F Չ h|yQm!˷ w(ѪVh=;ڝ[8q"^i__qŭ1IjfGsMZ2]wZp(sEQ;u$Vj~+D:W m"Vi=F_FXEW(jh+'v{pqᴡZ*dNhaxO|ؖcr4Cd>~q_uICkM^_{%|k P9`0E}q~{jn[ĕV*Yr^.>W+\pzY,]=^ZEPUCKCCY&m+( !ůgڢFpG8bf?%Όթ$K[^ӿ48gVXE.h/AXnpГ3AI[C|4N '[=pyz+}8(Y;7lOhIOA(5D/‗< lq>@ cg4^n Gesju8H2WK??!Wl'N#[- ˝ JNQ ׉dGƳA'\$ Ka9cx:s.;ڒ\Xh}58g+H1獻%T-i` N ܺuUE%ՒO }bӥ"VK *O|Xx8Á8,a)`|6ޝ:+pm/$Kh $&9R҂.#Ӭ F2C{VGKsvU-[TXa/qڶa(U֎p9]2U7d0>֖/ b8>g]5|{hdxܛ` YaA~Z9wLO'pp202sxv&'btuVmJdMTɕŵMYj_V U?#b\+jo1rW 2~)ݝ~2F-%Y( ;wO^u57 |B p<6* Ϩy_xz|$fn|-#G2=WJϪgF-T7X-&8J׫}|{XiZpqF^ܰT;gS‚C-Y韇0`"F<>iK9 q -U.|NwLa]H/l}[-(sΞHtzߞUimtǐIh}yq8O9Ę)4ގԎfk›Z4P9W ^5q[@DBDוC ׼q:AꂪG:h4OY}ʐSc V>+}&vC~paSv9CdgPp5=Kg߆w ۓp'pMłW-t2c! "ݭp)rD{bhdIvg,VCB/zO\vW>-&q ` YoX(KxZt~>rgr`jZDXR˯EVe8@kI/rЂWƒ*ߏ'u"\U{'Lx`D{_a?I2v.17D`6E~[7[!sw#)]p4=\1:ƹᄩ5DqbqVYFѕ0 uf#ӷ҆O<4)*cq5>NMkL &nZ7dất&rC{Lrם\->s E&b^nړӮD ݒ^/9ZBh1{Y8YsQ-** hɀV- wNcajK1& UL{(U|'yzH2[qi yz8R) ٧tgNQa \K5bh:QwjSj&WɹYyʢŦ PH;Ȋ{$ h !@#ގCᘀ.` *#h/Ƈ*^?e4(z:.彷CYZ7!4qԅxNJU `(L'X!F"Qq,ߠMe9뒶w=S͙EFk˖t5dѰl[2;`UC]x3` P C;>ʶ$8<#%blF c : ٤G(o1m;t-- 9D bAޅmDp_>i?aˆSޚ,K,b_ hY>ZxUǡMܖ|%6\r!>/BB1^IR:|6`t}|x|хx5:T#̔ZЎ.(|d%ivy>,Xl1m{o퓋V~/d gM )srg6tx4ޏ\u'Z2@KkEy=\|2١b?+:NO^4H"|#fZʙ’@EFܧ&0(,,ȕݒG7ECx|ok.¶2!\qyHfx%m"]_V!8v&'gAy!شݢM,c%]-xAET>Չnll4%lx#7Eя2Sh_8 q/n˔á&7Ls Б@ߚDcEwUCnrբ^q';Z&Fé߆:rM#'0ӻ,!VF*1 b h[JvҕZZHnٗIt+UpNҕΟY1ɯ҄Zt'Nw]YyVPl9]X5h8 Y 0(xY[lZx1x*ȁR+H .@&>ؗj.d-|%eV,v =rsB`ed5%z 4sE#f~&Jq0>Wͬz>c >ahOr 069;߲m &CC,q <`hpZtUnCv}R* d8>Ӈi8cl jn-<4M9^B?gҡ$F?XcmdM]d{S|ߎ?.⽪Zo&/:SuTKf]@6W|=9k0" DzV$bY,q\js5 hQ@);R@ .aNGx GYգZTᒕo68NREOljneNf3Waa,x )U{pk5aTDҴ)lQAʝA*sE T>hMԩIwhhRCki*W=ݞ $gx@?r -\p犒Q-55->NJ¶PrhSX}lh`s&)q4ٖ`S [pBIb|OF^կ:YA 5{T[U+9ٹ3Jn1"`tN;+VLA5#$볃B&`Yz/մGD2(-:Lnљ']kGIc܅/q6^l|' T;5PBh#ܐ1۔.P .[f8=,tGr>moL 9f=2\L%~kU*02,\N@<3q,p58̦-iӸ8Z]۳uZlQP~Z{}I-qX lj8.+_Q ޔezo%*kVa'ea5vfgLro^t}9 ᯉ`n_\UxBtaɤW[o\K_FƼA7  k$jS/L>>xwg}y9[!_8>/lVEdRl]JAػR$~8mœN[s9_cڮjGˁ |{el볹ܖ YDLz x}ah,;ۦUfA㽼nJ;Gi\) 5=v9jŠKjeH\ilᘰ2 [Ӳ_+q%kTߝ̔qo xtU܉}g;mqbU&W.X~2 opvy␰Nwd?A&AjR6Qk,P]Ԡ?m~;js6zE.| u9WtD&S3>qu ^IzCB$W&F=wk1P5BK}$ ȫ!3i2v]rr Hc /Vi!NΪ&]2]$A?(i,ƛ1@T#עj:OZ]^9imen^T<˵a9Xy&adj4n\8xɲ. fXRXX|*UNLmTH7^rno)h{s܉݆}*Ra#Dҍ|A@! {c{h]uZ@-#ߑ89"Zùj<􀒮 /+,K '>{DoVbuKP/^6MKUR& hn|PmGk2z?W#\ӴN'ySdV^]ڿ6 endstream endobj 112 0 obj << /Type /XObject /Subtype /Image /Width 1 /Height 1024 /ColorSpace 113 0 R /BitsPerComponent 8 /Length 723 /Filter /FlateDecode >> stream xugP$ioQ.K23/!g$!BIBhHC$DB2BeD"#U3|=zˏ[iK/lM7F1W=9Oi-}B>=z'V&75SJ+i^Es@³xSD,닥8=KXlEVX8`P9֘e#0}$a(Lc1i<&c'cQ1 1l: fal<y猾+z.DE|1S77\];:/ùq t3> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 120 0 obj << /Length 500 /Filter /FlateDecode >> stream xڕS_o0 II0$/H}HM;8\A71TqPjt^ΖN;۶ŏBյ7?s6b+.'J2 %h!ڮ.N|@H'4}b:V_c ⺒PIZp3"3=oA҈a6Вʵ.yT4b1J^ZOH)ü$();RfR*l]h2a{d2*#>~PG ל«$_tml%&+_ sF~{`TϤZ 'ȮJ=wFW<Ͷo8<0 i柨Nslt.ŁV+k8+&c`K[:ap6j9/+-i ^>60Rk$@Fƶ@cjBlrC# endstream endobj 97 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpkG7Hnk/Rbuild688f6c73bb66/spatstat/vignettes/datasets-015.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 122 0 R /BBox [0 0 864 432] /Resources << /ProcSet [ /PDF /Text ] /Font << /F1 123 0 R>> /ExtGState << >>/ColorSpace << /sRGB 124 0 R >>>> /Length 379353 /Filter /FlateDecode >> stream x}KLݼV+H IӞ̞x FoݵK|^D?ꓮ 9d0֟:~k?~g,;/?,׺??qw?z><׆/ǪϓcWz;|s^ɾzqIv䯏,|+=y>z;q,q+>y~5vtUWzw'=ޙ~V&ZY78Vmmڛ^Oz'DT+V濱lqA0[ٱ]^i4&>=hlJ[6+/y ;yyW ԫxȆYa2gޕs7%/)μ_“V+VG_ٯD+leoLx^%Zm<3O38c֫xGu ag4ϘRem珽ϿG3;5iGKWA?&s ZoDe=XˎOoetg塬/dJ:p3Wƞ𬄃x_ ۄa{G}6~U}JafG'_Yh8Ux ,}Za2oKdJqPOX9yeyiW!=Jl2޻Rm*w8V~^%ZU=Jly쁼KU{k>9ʾpXQC8VwQ+cEqcC=$lg]y쁎 5Xn5Ubq%eWN^?Wlq蕬htGr~$h<θ*X4ׂEg?GUU4mcT`ZvV}a6+ƕ2kίޞ 0 g~ m;=2GW2\lᝪWֶ+Y Vf /6u5Z-?*HUbmΏd?mϨUV`Z Zxª1:le~*FB ^E3P*c~y2„>U㿩ᰏde/Wߗ}[\IS3W'1}adDZ*}f#^iu_cz*G/`l}Sz'nM5m}g>zqv:yƼohStFoo^{OA2*ߧ7&XS՞Wsfu?s}dU8V-HB<1cw>C 2wZ OʫպGWnHꡲ ZW;ocr EMUV a ʈ;+gW^7'c#vĴ>+9>G?]Bxρ9MqXnH+*Ag~X,?*= '{Wfۺy^NH78c4y &Y.ZFA_?#w1<*y8d.ђ߫g8a em 2#k\݈ޫ`𛾋s2'0w"6׵]x)9@V/X&;#eNϷTK=p1c?:o"4;|%PIq7Vҝd 봊S ~ 1OtA'nGic7y5KnὠVx| O@8_|&?^ gѐz|iÿV̅q3jgO\a23Ggt WZ;=3DY&3,yoNt2oP?D&M֫ 9)kQ9p,2`G$q;o9)K&B̠篳?Q}O.#y{{&| O~ddɾٞגwq1=g~78fp:coK1P~3ո^FO|͘_=T@$a?|%?_m~zրH=}f BB5 *uN_L^j\q/~SSnRs(v;?D\]>qwXەc.K [x<c~*gѾUp=rHPq77id%dhJ6 >xɈؾ/={%/`݉dF6 *^nSf+1}gz'o~lu*h R>s^dϔ wqcIfg{$ 4i$Ȟv >SuZVs%bvbb`Q#O_nAU/ƢUS<"9MkH묶wH(Ŀ.rs OZN٫ZnEASE){UuەE(!$#mީB "FwU3˫pgq Ux7ߕuѰz+dG|ROqh] UqIB6v6ALzh$Ȟ>N3_З.\kB6c50Sd8VqQaau{d_ޝU*.<0v|'fCet,7aoи'Ô;~F}]ƺ s+]ݨUx+U$.j;9kb{>"+v>ш&wʑFvw TH[LKV~4^V~8ɊhИ&џ O f}x$*|"VF\\q/a푟˪k ߃3b.z,X [_z"g硲g ~{0y E/)"nM5#{ӂ6޺cr (c6.|m <4SܸپS&uͶ+eg)3 x_7y߄9V_5Xwm(WH>"n3ulu(!75#VT 3t0k՞W`-{!L־՚5FǷ g{gqAK}t\AxP*"\¡<>Q_*v;wrt+;[}yF팷(w7JpSkܤ:KWNS)ǾMUX+E({6q6Sd`˞4;Y!$R*{@l2rs e{rV5VyUmFlT4}m"\|ܤ'ClxnqƳ`;nE/;~e=HSj3G} +?ɛpwy9hqюN:xk*Π܅=m}qP6z a2FH RgtwVKqY/ í,{t%H୲Alz+}ꜭs@mƯq;2pv:dgڥx';NQ4dO}R.{WqQfS|l)]xONz@~}Ɗ~%"{ƑT%kA3uUPqʹqAfY9X]U#nѯ4z\K\~Fb3eLGKFR _n 4h^*e]GTv9Y*;CdQdrosj9_˿YP=J:_7x'cAx2TXF[q5TީBc֘.aZa2Fo/ {սquz0wH## LƵJH]Tf6th?y*jH<ءЌ2}Zۢ`@ 3㔍cgh gΞDŽ>߰#^O0<[{bE똲a+ c=HgCq' +fomcLN:ٞ:GvWz'X'ʪ2k}<+n~bxmlͱ3$1*^>䦺T#&cojQ7;+eBd+3uY{VΞ^+e kJBO e 4oS/(^bUc5܃|os|2q8)kڏ*K}kh8^}r랿o@?{JȢ nᏌ/${WBU6jP_]cx4Ȝ{ox  UU ߮~u[* ɿK&\MAfv:P(+l(jݒoiGV?{h.%1Ţ/KƵÆރ4XGRq?@5q=#Oxs+mR` wVqS}ِjab'ߦEyЮaӌ= u{>uĊ3l_ uwz3i("n7qpۅVU8eyMΌ -|G SAш40CP|h˓g{g՟xFJM'ꑪ^+a)]8N~7ahV=C:PEڃ^ gϘ;+̤'h\_D#Aqڋt|<:ޒyIF}0E8d1SD+{hGct8?G%Jjr&J}@&w#!G}"\+s)^F㓱h624 Έ1PCX4o<f9XԭUm {+d߼Oq̥7 F*Λs\"h/wT]Rr4cψG:1>pRbF;_\[%@@0?i+i5ƕ2T9AmAigæ{Z."JVo8dRՑr/x-i r2#D)z61>w"{\wBg{Pb0Qh@ Wёn671{wRvC$ʣ"n*E+Ny xh` qŁZ{{d_G%oX%g$1^EbKO 9zj2.[/2&EEgM:RXc?Cw􎛴"B]쟫* ;[VzF1铋}9@{Y{BYS:wОHWqQfˀ.39by1\9Ĺڕ$ۋ(Injl_z :fr/[$rXDzυ8|Y~7u;nUUZE:쑪~XBubecNSnKZ,Z(geg$=XPq|6gV]ͤH=rVj[7,fʘ(pM\믳?Lv7ZgŹ_X+Pl RWQ\q/!Up2x[fh*!5V8eyuN-H' G=^#d ws_1"$_ة']J;PmSN&Sr3uC.Og<6z[sڞQ] B0{ލaj{^#i;enyEAtVVBzNkl6,63~ɵsO\vV!fj8=ʥɽVZeÎ8==i!VlF>* J[(* -9NїeWsu- X29iB-jU羼 bL[ĸ|w;3o<=o߹:䘴Ҽ,Σ}=;_ۿs9:Iǿ{9y$gP,XnsGI[gppӾ`MAByڣk29ݴzy{n ,eG"=*V{.Nw{0>IzY;3&eiM]ɬo1$ΰSUK,!kɷ3rjqv+G' b՘Jn+6vKǒ+~]1sNr$)*9|"A2ȧrHke.v[4 fl ;+pW/z_d"|k"? +R%Ts.qsD_|]S- BRN]Z*ߣ:?T1kޫ4'u?sj8O8yܨ] [X T&k01c"?;3ȧusH\ڭVB+o /iw Oތ5}"ƿX˵ ~앬nS>*xɊY"XG yɓJ R-~#Ĺ [ix ~k*j^k1SY{;s΍vqq`πvsݗ{W}"s*U/a =RHG [πnpH!F9v ' \mczPa+,E2&!'Ĩ#6Gµ=*,ƇXrBڊ(GhT XF[>=I r# lMx ΫfWcZ͓35k3J(B\=gXpH=#q];<^OXp-x2X$筪ӊۚla';晙ǁJp58f}PwwCFTf(8dΊX{g+>_v c|[ddRB0q='oXŞzDrFC6x^NY+y=+YaSŮ]Eل gI.Ǘمe}h;ԛQ'm=z0h:1p"{Unyw3G\oJ ?dQq97ʹD6.Q1WQ,;q\?^sҹ0gfu۪=緑 {EG*vT,7x#n UqV`WoJj U.=xmo+pb1TBu`g澊7?U˹b>Y1-9T'.dNJ(r|b/lj!f\P!Ȍe=@c!rfÞ}iߎeW/}pgi3y#p'iC T%2N&PV%<~~hԽUh;29g#ފhoԟb5'KWW3u~kyiޝ؜U oWɧ\&2x{N*z![vكmڊ/Xv9Ǯ^V]o ɤ[Q-Δ'UE^A:Aa.zњ2 rU25Xp/|m-״u\ГCv*xٳMd@b$\*3"˘Ћlpk[UJ|fJ(YŞ꺅`|xdݮAFuVh$2^W8N4 %;8㐏Wws;c(xq5[N\iֲ I|b\J޽:O漻9{ٳTC wt{vpC*̨޶զ'~[2xb|{ҙՔp8,3+ΧP뢊B>^z:tяm%׿%Gb8wz J7cdK{l"ʃciZh#bWv$fbf:ۘF;Ƹ%FE_ij3){4;ӤѰX1c:W8+eL&Q9r3$&u 3죋),aF=N<Gt:=Vu~I}aڽ kT9Zt,N3~EjOOwi[j< .F5%#V!1^NYeq 9U"Ȍen ^%o ~=!U\KXE~~pZ+{E삓vxpMhS:{ZDh}P/T*p̋+4Gk8!v0Tup[V%<|c 펝Zy\@}xk樚dOA*Oˈ|,R8Τk+~hWc-+qTj=I_[v}Nn G$39צ c|oKdiM5Mdb!09s`UhTY"|qOTYx.Wź aN5Kfr70P $R"ڤ8e)L`rntZ013mAAV}]hgNrDa2@oXGX>;6tWgf(h؍ R~g~~{F>1~%=u2d4)kS}d_>GJO'6Aݟ`UR`+VȠ\+s)ϘoT'yE=3^{g{;i hS%7얙2RH/=wB*deb4.\)[WN^>|8CJPfMr:c>рIꔊ2ڊܭ{'6&'6~m߹]C/?N;ڀǮʵ⦊ZPߢ<+Wgc3w/FXnifd|GK` P[9_g*kaV+hlAFɱq*)Y)^j \ Ӄ{xyT5lk<vKOs_i}dOsUf\(w_9gYJGzjϫzWё9YEfǝ(7 3ώ ?O97־SBHf#_JWa{g?(k+vu<0C^d|yMSv@ГW(shm8y Q}iwqJ<Vxm\͠,J(nќe@e{O\nA]uǞnc-!{NANbV?@.oεwǞbIZKycw> SIq2o~廯+n5uJYcy[uPdvGv(3WǔA!/\`R`UUQ, 1ũY]xl3 *v}:z%3סmx.spxf4V|pUZFky/+=1玕eHSwLǷcdF~gE=N5Ϊ缭hg+z>^+=:+Zwt,w>5{A5lSέ/]~JgTEqqł3>}"Owd=sUl^X8|Y.v;qNⰞ]dWWe&J&ff2Q+^;3qq/؀5sN{YjOΗl\bDo\|暪 Wf- nS>8X^%toU[J`z]'rXAߩrmWE_fY^[/ US.c1SDaN;N..yzϭp ꄿA8Ӓb=.Yf9o럟G,/v9Fg}.>]d_ɡd|{M]0#q2RVF&5"ڬVk͵j~3/*Uu{kkN3K$+Z JIQg@x%2 ώe30v 6Ah4~*͎Tu=+P>dZSF1ҕldj\ oWdF^iEkPz8U"^[GcX\fiv*8x_ JٲӋtt%+]kac&֝sk+ޢVd'38R.lXc7ڭbsfZkIu TU"1wU(+"2762HsQuB̨+ F_HPт'KNx5떞W|g~P [-3^ރ'82 PlҲ͎RhVHs=rt:Q!""g:633T )NS R/>&3[ :/Q3炾AfXr~rcv]p2ܻeb"&^+#E?yh<ыlr.YWP߹yH'f+VG纎Nkp[&3[\yAϐ9_ t N8m)I6[o&fR__;UQTl_ŋ5PJGT*/xJSͮoPם%BWRSw`eD4x6}!D8x -c3!;Ǥ]+wr 83s:Ƌ`'EJtaHu ^k-S{ŽދO6sV];.ؔs2#Dw~};r}U%)pJu놪dm ~SSG'(>6k시8+}gc.RHܫ}U }X2 @_0e6Rv ߤXyir$nlhEb)ŔA'fg]iyxٞv^tБ ؽ@j1īC;?k..PlgL Oe[ao7Wy&k0qwkW4M-\cdoFc+:@o\UcWo[ե~uFT!uEZ3W.q*zz +iYhMU +}{ʌ-O#,Ovv; ]~g`<.ίފٕ1 \ d igSrE`T]"hQ8(wԪ?Y݉NdguP.w@q#L-Di9i U*]I}JA.V;ڀGŚ]*,1)N{f3uCj>iVZjenJ';\UcoUh}yD?isԎ^gg]BZA͕ޡzkϸ7RRUgcȹNe*g {Xq;W{$`b3V':g}H \9nU;ʐ~wT*)c5*ą ɕvjGVs_el4]tS“V2o;i*o ĊWJR?|Ccmy⺉!V)͜52 +#]y55Xi= 㓞^s2c;8qZy; Ǵ{Ky%%+%^7?Zq_xH qJY7Шc\88qWZ8IQݤ[yy=D߹O3#S?D`|ṣg{DZR gX@̄55HnpLhu +WϿث1YI,f 6cjc|ՅϪgHcOP5g{n&-w cvᖧZIUnt(3y;Ԡ;qY/wVGY  x:e1@M6#9YZ{gЫSp>yeoፋ9E=VZ W,(<3S+:IhгFq`zQX^URzg)-g6GAw* koH7!gVq-1NR. /!j%Bok5Kpŕ2Dq VUqZ ɨUr'5zȉdm^c\dɄ5oReQڱ=(' WYhl*Y:pnnc4]ybL Vzƞ%\eJJVJӽ'k^ʊTfߵ/d<յp[VR124n>pwYewA>w( ~R<\tׅT*͜gWѨv&.)Ys')>Uj\[33 + Z? W³n9?Վc]VJcDR% s,wo ;1kcm>MU]VJdpד B^jO;OZTY7:'ĹJ۵м%K<2b0#7݌̓߈6lJSZ:_~1˲'Oڰ^g12*5 齴])ΪN_J842󗀛8E'/8ܕqg7 oQS9y{u\پsf$z,?JV7usmF-Uv (B'(Y+IEe;y*Ⱥ PPtSZꥪXHg*5ڎ(Fy_;8VoB^A Q8X?n\-eN<2se56p\>sPojtrA-3={5kڟ[EtJf-k=\=~\=Qsw}ШiiYx4X\>Ȯ_UGe݈Yp?GGLx~`u:8_gC14lqRltxk;li8\ouX']+Y`3nUsv%d&߄dOm8UƜy~ǍuXVOYXtRnpfPXt&\tS<Qw`UUwEV3ge%ǭ,s w*oc..8!(|:y笄Z N wVʳfc (p[ZiQL, n0.X'CBJxUvpQJ]VO.NS (/Rsj^΅y_~NL/+!+u>w{EVg;ya⳸Hsh,-t XŘk&YW_w?q̥hu|zn03A}S9v6lL:vP[״6;W t#8Fcw-~(2nX)\iƞ״Xf.괃 xEep|̂(fTb"&DyS7GTϦqnϣGY 8A xݩダA% ׎ٕPikJyUw7[rz=yC{<yQŕy5&\ZUߩWhJV%\Q¼W9Np(!8Ӥ^afiix*Le!lF9+#BPPk|fe*+Y. 't7M+bDʘqSo ZXֺ<*nN!:Wye;Tp_ e]"?8σ'c?g{XmUuuRXѪWլjA}UV9!Ht R-Vm(+zG3"_RjTR/3q+A<9ZKEs9yLo+۸os_W8)9 rySWUyБu:XBNe*$;>lN}~L;+lZs3^3,:z[N7u#Ϧ88iČ]o1k}I#d| U\Ը# {,fxAfGQ'PNYW~ED>_ Yq*em)&ѐ\s_5Oa<$>f5QNɽizޒyC:v^(|>8X.Zf* آM v8Q$:3GЬnby)u3eLQC;\jN;@-a>iNEA:OJ>7;r曵J;Pm.a<9=fUev.Ef1Hn8t(Y<Ҡ\褔2hCU{cWò^v+N}ryJockm=w `~gT XvPDڨj;ߧ _UcZ&k0ޥ hR nWo|ƕN*ewd=4JD trdu'emˏVҩ"+r9ΜÌu^a>AȪCkR1IV˝MDx?C2h705fv1̝'h4Y ]yo;: k ^R/A~),Xx׎J4GF;O$wlU:x)ƐfUɘ+MOÆ;⿩Ӿ.t-orT6ҺfM` Rޑ >zލgZ_zRٓJ(΋B,ƾ;UXcםZU#̳1z@pZhB A*|\vB:,ț;/:ٰW:6z#Χ">A;[=J],ϱ{Nc;*@tHǬ ԭc=ĕ$|WB_"(TȪ班Q..+m\qǠɜ;p/<۫a2!kRTjxg:N9GUgT}Y/ͨc*{Mɉ9l${hSz;+mW߿xAaP6)oɼ$Bzd㇙y T@uOg|Snbް*iYP;tT6uA`ᇍ8÷SuNs̚WZ7!=JԍƁ/:ܱ%.xb65MN8WeWNŽ8R72+e($Ϲ?"xN엻ٲX]$n߇ |wu4‹kr#}+_XvFV.|5}VXF[׀<"&,+muNeu*b>/>wkY]QnnРCDU\^~kIʝ=.'3؎f13id@ѼoT@IuhV<z^`W%0ToZ)$SWIJZ;VN1j7k2NI ,Wq Q;p*1j,l]h㵲V%BYm=;JKW39/aX\fi+O٠@ϙ디x c@yw5(1SDM9s8id\gMF.$<|NUyT~NGBjF!g " nbF't{c %҉9uB Ȼ x~0(;\ua2E&=Y))w9̓8xOT9!;ÒAT=*PfZlU6Z}b~>:w8qKc|9˕k,9N6W,p>ٳ̆}tkkwe㑏=qo[0?V0?B為DwV*Z8) >wvx:8rx:dt7,}-OZXֺ&>z%$C=:5NʊG>hT: TV;+gG;~ƞE:jYص_;pF++UN0bV(Yt'}94GɎ]dU2||0?.N9#u]ReߩR3Ux\"(uvjR2%(fLR=x9C=.eQD"Rrf^q.:?>TgG^wj1H%ݺ^ 8t8ʨVͱU6@Pw4^zsNƒ<9us%޳I3-g6d [>ke.iRl&gf ] AAŤZ6+qo{A+x>ھs-] IvJx&@WX^d qZ/^!uEPU;N^s4Y^23|o;o'Od#Ź˓/UveRO)sXcUӮZ7=9ƌ_am =B!W(8U1bu~rU)l+>OB٨g{6sa"՝E-*6$VVՠnj\:r[UJ¯Sj/M-03Z70WRn(pm|Y ZٟU7*PT)b:ǥ3#<𽬡{Щj5z9kbѝ_42aknjt{Xk^Ţ'wV9N;Ẕ&hlծWڔ L߹$:Hn}kPx:e\{%VFeuN:.LɈ;uvTMJ͉"Ȍ=GЊ|jܱ8"ee}.OJ  b8ZO?\-xp`e=NWʺzM9j4YzHdn:0;ز}Ctr(3Z^P~ͨ6Cl'}/Dug6vv.YN ZgLMfpvw8>&@h-4s+^2eA\Dm<ὒ L$B84ѯzPڃt1t 6xÃQEZVgZXֺbI: !sڪFU'о~*Uҩ Tzy}~ƣ5i~O ŽPP;kAAz%N{{KrսҎƁwI wVHCȟx5 f)";[[Dg032AV:wʆU>:᱒3W3J\NNs6WܯBۮZg ȍw Z&]}(0wFV.bg\ou42Qv.##eDNծRbVf3=8f=lqIhV`ϸ3:jTtj 3T^6] ٟ݉ &0QCF"^)+ޗe}p#M*4ws^?uFx8r#~X/B&9QC5ox]w]dse}YIժ{Ds>7 YiY\fiʕw@0l6gXjl b"hۮWV<Q-Vs䮡yJU+'jT;XW}Ag%͍8V-:Wڟ;uxRTk7۹`*U=8crb|\QJ:FC* .EU+soJKWO5&ӮT*M[5>J(_Sʲ,\)5NU^INՎ ꖀA*U(u/r^v*rWG;wX*X{FAg0Ĺ*y7v#ԫTұpYwV \P>w;$'S 8P|xɸ^rϞU6µ+\8j 3݀ࢮ*Nk1n;(GނvlRlYE=~m^mxK 'g!\4+Xp~O9_t^K|Vub VgUS]b&r $픆$xFabP;@9xSGь,Wm@3wF^glI`8# fJLaӔY:i']Rc6x\9:b $[NF_WNO[pރSf,lGY{bwMOW(kA^7\졆/S:.Rd df񛾏3nr XtĬzDkQ+$zkSGKVW41v9c`z*ng(~:lrYVƼ֯?p;?7G} eϪY'."yfjvH:6b?[\\$s nwj$s۔uyNj>”%qRf0{ J7޿VɊW"T(hɧ_ec4[վ.ʕp9ψ:"` 3T)bKυT'oQe}g';O` DөN'7Sm eێ013^:9AYf^lNϺ:J.pc|C@*Ub(r?'҂~E|)جoyx&z+UUU?nJ_=s:.b63ޭRó\oG*gbmGİ6"߲G\Wnwh= jVMN"JGTEE j_V-΍nϐ AFn!'Jt*Ol!+:7*->"[6ċ|A*1*H쓵jVM#k]+Jz&3wr('ksv3ᇝV-2V u(?F'aj6}2eb̨j(>2eVE蔚y[f]NZ푻-x}])*D|R ~.ws Y&b{6&k3γ8՗bӌ=WNlQ.z3>&ݱ u,uz5'^EU,tʜczn,0KURxU>lPnJwU޾.)',\bm>cp.O%lRcTi2yN&j53%^oy<ރI~v)nuIcdnys؟͐l*YHp/Ssgo]dsZ/bqUj=r3s8@ "g%f9NUC!j {<.^Ϫ2k㔄*̉TqLJwX_sɬOPfp\[hV<, ppǩ^YՊ_Hy0^dv;UE^b;$އAn8 X>2a; ^aYԆVw Ue# IݷJUD_ؠ}nwڲ+!sˍ b5tǢ6+^n5(!;l^)<}c ;ǔ𵕂dmG2' WNZ%U:a1VOZhu[5&-q7ZP8/&RE4+^՗|`,ԘD^.v길zTVӮMǒw~*>K,rnfMJGWmg;懮M7֋zNDE:\w`o!5UB' gC^h3.^ܩ9y~wɜD_Vi&2tBQ~geYgc)'뼞X%'ow*}h*xpu2Ъ}Nu>?CUUsm%;?AJ[\cu7cb+8BʳUE=S g$!d0^NB4r)Aqˋ3B=xꭘ2ž^gYU e(Ze' RjDɋt,V6njJ籫GZ j6}ځ\љ䑪g\'3`Y3# (}glJ sEO=dNk9o[ (r$*Vc1U/؜3>5hJgHO@31#GR0yҜ;u{s_l׵^RP!s6u~(!WVUUnX^WԑvvI*,v-:J$`K߸}l )@uRٲs(Hz_hfdfJl/$%kuĩTtWYUS6 f+ѱ6r7X&~_lFʲLxeox8;'}\<_Tڮ_͙rh.*#Dʁ&"Rj̃{+-WuC+oI+c2RiD"sTh*#Yx:kZjo~,8m*QLFNd|.sz,3' +6>H߃qCzcW8t߁؉iUDih4}`}{IKE#qaWI>,ҝBtΗvΈT+лcqg9eVSjV4 'n>Qwj u:fʖ)ZI z0KX?+ Y1q6j똅e;RQҦd7Te, zZ_ʬQ[o+ٙtz)ʦ4cTzkoY[c^梛`7͹'ks}&TTmGQ%|C3}wSg+:71?z7gܰ9g9ei2‧)eńWK.ûϊ/IjsZմ񹘉QHL4\?`U1d"eC<1` ܭCkV`-qdh>pFrܻ,̜9!}%'= 6;Eo E,DJs}F&+~t 8R5fO|EoХp?DJ_ˤjŭNU Ng*NSK{(Ч+Ɋ(r1_ᤙV[׀.c.fLeg)U7%Ta6e5):pYrخZ}jxܮ{긴'̨Q浹 (+L?;b4> n^)m$CPFa޲ݺfٻ:(!u5ooXU{E/翇:3UJT*gBQ !U/vr3m"J/r:ly[mf7~2:搾H1'p[z&|zsZ4\ հ@$ +׼}^gn-.<ڕZ>zYNF ߎVѵ͓Lx扻6+\Y29l7@cYojd{L2bOtΗ*\|j=-Xɮ85k|Ý":z*q,hn&eэt9_溦()([hLZ&7$'Yj̧s93wfu# ;ig۞݇v3J5+g37tɍ{xYWXw; fΪ^V RMg޲w H4pXXf]\|cױR+]pzE!+su]E ":@SU/W9 r'̈́yH^z6c{*fGUA e's6L3kq6LF]/PiIɤyHDѻ:!dHl?Ϳ#NVxw gގj"暥S?ASqV6ǍyXwPR*WI~kS0Oj\CEª%;cKNvY9;jwGUZ;Jy|V?>65;š,#)Ϯ_M1XsrNR2%nΝZCyYN\r|в*[H6mv)W͘_*Rwg@\up=!*h,B ⤏\̤8+v+_ؽ٥ 9wAs&7SbS]^tZkI;o ۮ˦Ej?2s?hb}gĬ*-ljetq\Þbe=PjX2HǻwL ϔ5t+q?p5 {E>swҖİ`+熯9Y1qUaV߳sŷ iTϫR=5hυh9pZ*T ?IJmmD4&Ӗ/7W!&ēԖSȉdϝCɼ>vt\-q]c'9[*:ϸUWT|%9}䌐LFc>y3$>wu]:o)Eϔ kwɩf,C8 չ+j+UϟcwSt%nv.'B{®^6YXf]\gl!˳̺iHe=mW"q~I԰T暕9jxP`-.azO*tY >R=?#|: g=g:qZ4$kŞCV@Yoj*:T@YI,,lT)B{t3^hϙcUX 2ŰɒߺAaM_亄yH9r3C_?vDƨ7g*'rKdrgL 13E&Zݪaӵ1-ߊw1slw\ΧsV:* E;S}+qZ0b>~G$xdRpS/zS)ƙ 37:Xd%2+̝>?%2+ e[qiu F J)lPKQ?}]:f ,dǖ tTU\zunUZ+:d8 w_e3ojEQJ;3̉k|q]_ۺϏ+=y?~kR!>S}뿘"?1XV+Sch2Oc]V,W©_IZf5'(Nu1~e=_룹#28 >g۾[r)WzG_䰥R㖒 x:sMEI}T;Ynk+g5Tǜ-NXw^L3zY9]8n `xezTz7 Tg2fLQ<& EP+jʑÈUσ Ώ>sf`>և2EsOtaRMB$++ɈaӘ?SE3i~ZɌ5YgZ`g_\Q`9ӿAPqRޛ ho'ujSVb/h1}KZPM"k\_")cTbvuϙΤ_,yNNȱ.[OR+=AfqU_zl \zU0HZ1fdR1`&HBԪkl홓眛Uᵞ̳5|ۻƣ,n8yTꢏ]d&rWHj=͒,޻e5/~0<>$V8'adFw[UuEmU<ȿ˵b`BeĪ+֯\}t=qs?F{ݼ]'oz$D)LНKr3:l&N܊ԷεゞrQ^8>UjJeIVfdQe,ӡY1gswW5{yC}v@~*3FRv^`3cyZ~n;6i]ik\'8C7 q^SvTD?-{U/[37oAM5{ Eϩ&~#9Ywf1m !bWĿᡉ7Pz?og;o[8D*4U?H- )YDo<su)d CCU1b=]F8"4k n7T[k7:Us.7S:f;YM)Wv}~C*%sPvDfqe+/L}s7d֩eW Pcλr^fS4Gͻ7\3 N!n0sN!@#҅50k?~"3ׄmksBLRac=ž۸&FG"QSoJHgwB}F,2FHo0|r59bq,\xuBL=g{"HjHhb7skIqtibڪ?/jLY-jzoݹl=9}Dgk~:C0Tmދ߱a sQZ-l#(Yq_mz];"6hnd1JfW׸n_Kϱd.W}ި1S?{&S5h6oH ZQIz.'qJ +rR콆MQU ={g_zK{2R ~UvNF83S >;{D&$qg'ȶq?؟sggfS }ׁTJ6Jj#ҝIvtɶN _Zƭ_eSx]db?;f~{+̯ $ GqIɤIxOwh7v sA*Y~MɮPwSE+,7)Y-VCOޜrlb#XQÙ+\x!N̼lsAv4D}gZKv-VqUU˃}?Gp=]stձ@\7'fVILF褬if~[㩭ĉ!ce*ƕ&> 7P8T$[IIϔLS:.@ }I'Gm)QrZJ, /8&3t(Z瞸{3 rS&s[ N9(׏NYDpwBqqg =H nmHza"U5?sz>ru !]6;uޞ 7זsJUY\A2늙N8tB`}}e;R;=(cŒ]u}0>%ʖ):mtþ 9F&̅> ~kYSu~:Y6Oz;ȟPM=x-zkns2Mq8'Vx%_(䏕Oʯ*UH!b}lM@Nwї8ۥcR׳Z+`9MЌjfQ.!G#(./qS3TOsc.jty3&;5py4S*v֕wәOqt$FbhG1D'_:ʵED Έ&Xo2ρc|vU':WhOB\)]MEU~L 9o8>+:.֛ |J;qə(A/qr}g;3bҐI3No~r_?]2Wa=׳roX'Sj>w˹G6Nt/CyeOA1sB_gS@p)D\-*בv3W=v;Ǥz}1k-6}-k sBn2~& sΌ)*gVAg^f)bI y[5xxV*gJ,}@{{w_ֳuSreYfWl?8^qc41)@: J2 ?r~=aEd`_cIqK{<~U4ʚ &91\GW:~C3uV|x|/3k>:hKg3!wZ~:gf\>sqC2qd3v~sO>Mgz68z2X/s}5Iށ `QG~ V6rp}Կ/4/tv' bo(&ߨ8CnWXpY٬N!N:7ƌ0xU6*~.2:tRX}qCs*sϏMf01Ή|IX,6+LMTJ.波?Qd"uC&q#ZAmY%*+?⹢'@o^(M8D?ou8}JUe||NƉ'ZŇ}-Ʋ ݏzezϊ۬APT_ uRQcAȄ!TOC(EHj_V/OVԕzfT[~ew5;#O><)pv rØWWYC/@j!Y"ϙ,3 O˼Uoc$Q_pl $K`_s{J2H/k{ݼ{kUu@PgUON7)8NgC&. 0J;P,L_&-S^S3_h}$lt2O2cۮyb>2^+&&ݑ76s"~\p\ygEA~sC8+s+9'6t$8bz H_锾ɵ _ J2\Pft_ܸϾip+GfQMde[;DN V;4\ɵ k-8M#M;k̮:2 UWU)6xC&bK^4w@'ɴu? E8&GJDFq#Yg9(\jknS&/w>EuQRS@jZ#zX넣߫FBĩn מN79LuzG3Vҋ+JIr1xa} y&Dݏ |J]^&t q5sa3{oD"yh^9Vjϯ Zz8v^R׻&suLjľϵ:SYBid32ܞU,Cbg)Yh ŭaď~{0iL-iu 踎kYg@Y\paŸ|f !um761+B[V0ݬ}>TjһKx WiޭmnBU1TO%B`؞ eBĕbYvһ b,.͆Hje\劉ЖǺ wv*lǃyoX'*K]<̚^q ܑxVV:Q{ܡQqYg#{Uc-1-4ę{3m+߬`f==> |e ii% Uag])V|CcsZ0+Vo=k,hWS7=̚AGt݃Vs$s'raf40miDEVڠ&WyzD|pc r|%;\9nߟ6BuKK̝Ru77)Lxg:<1aXwjCsf}P4q'W眪[y") ҿJcS/~Cˆr`2(d !zĶ3+Y?a]_E>p:{"k=쫸{A$2Ÿt33 RY[hϨɵ W7V/jekl( ާMyvRNg9d;Νp'{~7nQF2B ԟu<.^(噆&=gaOV UljIΟ8EM-$;n:<*XlݒɴĈZm iȻHjggu[7ؐTn$Nm+{kf'U?25$q~7Ukv)=Y1In,r~N<D(ʎSU9U9^pk+YYfIo>ֹ/5#WݞX˜}}oq/>n|糢J߀nܩ֩vOCzȄ{S&k"AUgUUxxw{(?Nws!W00CĻ>w[ud)UgIYA,Xwy<2u:}ź#ez|w:`E #ڗTzo*[{qLU=u螒^1?GɊOR+SAث6Z'Q%ʙ}3[1{!qi׆Ӻ:r`|Qmjӈtn:Ӹj>'cwCĩX\ɢ4ƅe3W &ԌƜs\<'nxEznLg:t8?d}kOoI5j~G3檫[urKDmnSLML٦=v;|J:P}1r73Ua eaArbE%ޕ=㟆5 fIY;HHiJ_vWje<6 }./.V:J* s̡BS^s@#NuzL~rv8q/\۞K-F}u075Yj^|Ɨto<ʖ;s\9U|Y=%wbA| m{ |>SE 'Qlt*=é(vvAW-1(X!tm.UU3^xci5ìax=Ess}-k {sȬ5r62E:wDƒY2sc%S\p9qUjC$RPU?9ͦϓ5 ī`cU))?wJ/MX`*&7]HZ1/~U-Ւ=?S]&)wMWb3YϪ;qVN0_ Mm^2ᏝgԳ%ճpAYyǡuz~1$2Yth$wffc"יN\q(s3ɑoiu "qĩ$ ׉|oP޾l60CXmMJݪT"A.f֋ GWtudU)\U/C.}D's&։| ˲C呋 d+NJj/W {\o:8^^랳ΜEιk=mxQ?Xu^Z}\ukSQ.Љ2YӬ9\:)]N~7D+o?pUj[rUUi&yM,qD=rŦ IA 5* DYG O*#V% ﱬxUd;:Q[y|h~Ptwϔ5~ddܧ qZ=Yia*ݗO?Uu7k&Ykd*<ˁJEԭN7vmKOS wG/NYA?33)Y`7Fq7q_+ti|8_fV56Af\}u8XQof+LmΏ7}tD*~N/YL3u63woSp#hy~Yu|̢C˒Ӳ^UO5@_{ 5Tyi̟d. (Y`En?B K8SafӐr ZR%p-j$Vٲ& >m3`Μ]ohŪs kB*TkZ*6rô)G,e 9 `4ũ)*6)\H 3'}7+ep*J@90qVC8#!̛`L05u_+J wг-wzeR{+f+;:.nx/Yq7YXf]$gU<=g㝐 ߫6dR+Ɉ_色eƚѬsGTp+xSk:k$yp=7T1GY6$wfd%+l_K>?JO{xZ)YfvuM^=r\+iQD_G YiBS3jݥ&;=]N_1u J|ig>Sūhjn<2fw~r=6ffq/Dj?m=<#P>Fпǐ]e m NqPw4ʺW[6a|#챒X˹c\n*]ǕNŃƳ>wBVU m6:~M昙b=E&@C\x!-Fu?|ۊNۻx2?%LbpC[zdhx0r0qL1 df螱wy 'M'ҝIv]|c~\X>'EsϏ1o_2k@e}Ã8U?Q[NfO53wI:e\YmjT.~,of%n':@Xio>WmtyWO(1'ـN7OBec3o#~BurbhHŭ{ӆe&iΩdN]"6 dRTۃvm^hu =ImjR|ãj+}_ O 3CrG|Iv3Q%E.PWzYq s U<w#=>f,͊(WpS1BsĴaI(mf(.usϢv6>gnw#aE&kO]/aT;D*̬V2?{"O&оj[БH Z+ksXUWaz~Xw [Z#m6nϨ|%+-5Aqg+Oޮowd.s$#<)2/c1A{c9(o` p,;kߙTz915=,>|{g1%k!As#u)IWʺQ\;x}XvVUE}mi2vF+Ϝ)EZO-*+?bu=\2NAnS_nݑ/+b^6 xuҬZ#MȚח{}vab8pDž(n쌞ϊo|z a&p\^cD@_P;Upz#od;wPXfk] Fzfv' F35ޣ,j;Ty|NE ;gV8vNg.y[v> H 4<ǬXqLݵ,KR_<3*bj\pǾ+`ʑjaxt1Yn=?q2 jҳQt)pKy׏]%9CpC#NOM͟`8Sz-kLMgFU:}fvghxCg=gFp tW/eߝG҉V{U(IJDhfv[͘ 32uw[ۭ﬏LŲ*Mu,d:'8ƅOFy%URpCI]O 95Εv n W7+'t*u{PyZov..*?k mʎ i1#t ,[,pL'-]v`v噆3 lsU8'97Sηz>Án^ !z)}$fF282zHw Ւ ~TUO= {Z\~ܿ `/h4YaFFb+gT41=CcU%We1mt7Te3K@筙AR'*9AU]]efI2vKz5֡c:amCO3OkKtYF@=O˼i{> $7L 'sfWJR\Mj{/2Ò={| w>ں,k_MC#K':$oV}_<?_sf SZ[xT[S{jԝCaŘ'n6=&Fzx.$U*U0 lɂ$ޱ)L{l'iYcIɍqE({ӹj˪:Ns#V[ge3(inיNs4`3#ǐ0 :/T̡|k֩uCXz|$u_uL QvcYGko':"8uxn5.{07 .>?#I9`HlfC+JҠ!խUҘE=u&k}rje<}NźS}dve]ﺎh|փ*q5YwN$'gq!+u^ɛUJ?>uΐg"?+gm3_gER I{ڿS AmΓuHuߝmҍ@<81gՋwlhԘĪuqgsѢL$T/ic n#'n8U@> %V8YAԩ/e"k{={5V(V*k^|$J{S )D=똡]D eȞcv #{Ȯ*=ͳ׆B m6ԟVcf.V 7%>n)KYk8]rS *qAvust7U\ҘLgP5d2 ZO+W7IJSiRbVô&Nl!tUE4+6>o*{K.si3k>03z^3+Niu.fHIJI\;u-Ʉ=Xv*` TaUI#G*:ιuҬ :hP500ksQ;-=yt|R~UTp\X 7Ww1o=s22\cI%w:beCo 7EXoUOkvEnn$PXWr췧ěs LR;[-W^Չjxm5;<5 C2|o>wUTnouY۶WLe^鱙k Y`z $"k}-p~0#NUOқDu)_Õg,i@ 'r9޽YQzvܢoV:uLi^J#׏vȯQ4J{&6ˏ89W97g;a֟},٧췍HhtYpW쑼uM؄sޘ9i栬֚U+s8zm\}0d-kkʘ nTz_nA"74Jj!ߩyQS©B'nV}nұ)Pѯ8T`z3"NjnvێCu&)Ϋ Y_{{T ~9!-9kőw Ge;s;Khoե;-B>rY+bj-GZV9s榍Ky㙑(t\UZ]mw24BzwI\lq.C%~k ^=Ɂ5' HaFv /3X%R}WnVI~;Р5|V/wI`_9f4u׮ UV:j`;1e.>ۍSo&bXm}BQ[^RUϹձ#vz>s1zUaL3j]u+nj}j'x'5gʺhDZVTcy׳\뤵1e} 6z\en[&̖LŋT*vGRb ļ4͏SH.󹘫g!!R힗M'II4W.L2E9{&"6C"Ed L t\F@*UyU/v'vJ]|ч[15:7 ,ح6L<3x#Lb>44~jqT*2IKY:m'{y俗E}{ij~vܐgdOr\u1h"U5(Y1$ޫ YXe;ge9sC#;m1$#hǁe^  Or?+y"yV6y=M^(쎕dDN͑0[Fϔk̙:#l[*;o7t~!.T_{R̨<(>W}W=S#5n\3^Olyݭ]n\`*d 4ը6uw[_޿ew @x@_5i!s?kwƏbny9ff|J7K9J9T1ZYEc[)>|{u") Y1|,EH)f==*)zx2J/YB|/ӞsLhz=|I]U_(n|0+7+e,W])M*׿ r^aUT۸ocT\_deyoBu)wc$| i+DKQ ^ē+VfrZ|.VfJLˆdƺ1+Y㌯W}g&.s*ĥKvwj9sΙ1󉎻ʠ_=sTZR7bJY7b)'43zәϱ&B`b־sYψS|}Z5iq^6F[fDuJj'mY0ffV9 z.[C{G)yWΫ鬳?W@OVw̵Qi8ˍKh!d͑w7{Jǡ/TcA[S.CHigfT˻ycaos[J/f.ݬ%A4OnM`kfHq$Lƨ͢AVڨE\>Qɮz2~q>$_nXM(Ȏ/=B./M"'4#DeG}nn\gwxpדsçQv蹴Ѳө>mY嬡&ȁK7cc]=xɼZڐUn@_/vuob/Y:S|ps+O3.>_ޖtRQV$~qTnj_iyᬇY]Vu&cv(7n)ў{+M5!8 zf1W}Z){J`t3R3ّ@х;sZfV垚NG^V dy7v$ ;R=U-Zm]*f7,BrʡYJY7g{x[W*-NZ4~3n<;Iosnps;s)T+HLZ":6?{DMWAgT 敺WI;!o8ds~)eʗmrxY8T.ׅĬ+e݈+.B6כ)3OǴUbڙY538=ć3e}o_"';Iv 4˘|'9<G|iU2|\z7mJ|856:w9,w_))~U6Ug%K uK3Yl? mT1u~TMkt3oUa"!dʻJ,jx~+5sgPϦ2{bHruSbDv9ĵ~h킻h'o}\/|r@Se+8jfnJ9fM36ص!'ydߞ^c=2TRZd2^Ϻ>SSk?f #>=43哦̍Q !"q)U :b{`V0r1&mD2/\81a=nHzu>y݂. 7+1G[ Y쾁&=VQIz0?+Y)' ˒*\QnjrpJu~"^MUZ,W\o) lVX.yƜ?SL}\Xvÿr3^ΰ۔pnJqc!d;I {ɴKK~ӆ̡d.p|#8~Z0 ƅϻr|;:ć3eݩ/VSēiMH T:k_<̬ef)Om8/k}~7t泓Uo\L9gƬޗѱt3ج +TjW:?R)Z/MȔIʊ+=?;xjiLǍ|':C!@mErh2Tlƺn&xUvխj Hv+YaeR?q (ALyȿ|XGS s'E[n2/rB56Yu`FMC;trqtuã螘= ً4?i7*s.z8kbS8ZIw$ȷse9}G%>_cq9+!:UTC_[r]ϜƷ>7qJ]ϸL[S]7{IYoj"~S S:gWb/,ްʆ 4&Q{NS>@yS[|L<k՞K5QVxm}Mkx4yW>ǝ.*z1ɴ%~oF-&-bU~dI94pK^t ˤ.8pXiyO&{9m|UyHwN̍%9{F~?oh9=d b$V+bŤ';2`հwg,ȟ R%ji \~q̍Μukp!ƅh<]{elW[:i `gOryKSif\C*[Oge0+_nbtoȮJ˛`|7;S{iV˞l 車.)C"{t S[u}L uHV`BFNHUb[싍`$ɏqC9R zz[ ]t }+Ie UZϔcJX7_{ Ldߟ0qyCn%CVI8Bf N`:2g {P.;kx݂{ fR KWuLzY^?i-Z[9SP*˺mg5}i t;Uq\z^̄ϔ@) j~;"+XM0:@:'YAuh{ixMC,˪g joƙ5X/̼w/ BŞS;3fD}ODT̡abJW޴rtrv `:Ub+ץC;.|NB(7zEl,g[gE8?*sj. SΔs"V*3Eׂ)?7+f=XwlݡB!)hV'skU1 YլJc)I~Yϩs8n4A R|S}kX9i *+ ^qhsEa=z7+\1]c]Q(g9VI7']x^xQgUo_CVXnzbj'n>QeHT劇5֏(.Pa>NgX I?s}&ݙr} hnEI~ JJt&pʋD^O3}gEshn\])7͹g91Rr'IJ6/wiUZ<:EȢLt&"*׼+!4g1JV˝cy#a?Gk/`xN^_I\Mmh#B)ݼ{+~7^7N /i"OX8(Dы&*Z>.F5@hYaȬ kC7R9;fGwI{~3+O [uqc\=G;*V&!f:qcy]T*볆۳@A=3d/+i⻵L;(;IuE/R5sDVpnSLJ+8ÓJ{BSz}u6Icf׃;9Q ʺ\Tzt x&>9[?vo]Yrf0Z[ezTZ^'w w[YNC{_IɑU/ Bd0X4ĥ/]r`h椝&V h`.#ǯΥRrMZ/c 7}1qwG{k)t#Aګ񜬸~ dA'c]?61E+$s~"5=DT"S;/3iYSd%ןៗ_(ΌW='ݔntVXWrndg5J|`vi/{{͟݋a1wo&^j{:i, Pϰ7Xm(h7fa)Ycm='ȅX'ӝ54y7Y-s3w]T4C4?$ɯL+& 굉=w}'4ԃ9fM7]> )N~7v`,}|,:)w^'9kJ9Ta_5ʹ1=wSh <=QnjQ*UGRҫJ:qpgu['2KӷVv6`Yσgj*Mb~AO;<՞yiU^VzUmidbS) 4DuQsm='Zg*,qBƒ)D3fǺ )!~!?Y|~xWnccSBT|F٭:(Ϗ񍰪*; mzŬ. e?PߙI{v:wviENT9= jtr4\8%@yyԯRV)uґ);w{+8Le1Ӑn5gޕht "pfT*TIfT6H;%DJYݹ=}cBL-9crV ScKI9|nmt{]vcz{ϟ wjé_|jACLJILyGt Z o㪥(7:sȩTNb=S0sr0fb̂={f8t7&/71WsdҒ˪X͟ˋ}>-:fuNVa]cYv0kj2/Yﻂ Z7SL'|=|N\uZ4ʹ7εϠ\;UJ[uXudeXtyn;9}򛏖a} (Y-s6Q+0լr|]udI;9p'eOB q5o<o=W7$`iAKZ1+s٬wmR9dmF>؍9Iѓ X.LJv] 󙾳)r^Ggy>9gJqSv|b_myScvx l&8˝g'*ma3O~o0|uvRDO:Xͺ+{Tg.nuH3]|yf͞O !!ӾaA+#z_tV6^QlE~Rb{%؆!e]붽*EBmE:W Z@΃A&3VW,W!VvcxHHB3Dv 7X򇝰3UdpmȾ3%Wb,|WHə!6=VI֕>"&+3Fuxntq=WD,ev@\gn#G 9Pɟ[x#=sͪ}}4Tg;O?3t;V>_w1c6o|\ȪWE7ӢEbir|Pxv zyDv9ZQOzZRep=ulc>/{u+Kcfm%)]H1mw 7H[9EtF|^TQW%YfV//2zKw_VO~8E]D1SktrLMhN+pWgV9?A.Ióʻr"==-n&2ke]D^hY|+!fmwXun9՝߆bYtC>#G-Kc1[)PGK,ܱvx]O&NNQI=o3 \ojʫyV7Kiޱ@8+sOt Nlo[JSwr$|hh=#]31~N^㖒w1U[_V ݼ~;?= });N4F^ַ7fÒ4o|U=B(Y97ґ]+ uҬ2(yiҸ5k y.0FO2݉jM4\qWO9% }\e^׬& =UR/鉽nR翿3gUNA'vLF:waf nϐ35Z^[]gҁSL؛[e 8mRڨ>(^ UWe7˧Y8[I6l(nOPf r֭yvXV9E%T2w>F';,C]ļ*,CW3aUU1$ [̬ZTV !zq?.!.digWw }?Aߑ:R4?P W Sƿvw"&Eci:z^q?S?[.oyMcwkv򽇞wu]1ircL#%2+35ugX.WcEsrǼ냪Yjw&ˤf=0V1K_EYx\H$ DJQz0XdNCO봞!o+>/wo_;z=̢r?ܸ\ߌQUx5#~? ۽"*X#`?Ĉ5U'7?w&eCGvL q?2Uۊvs&Rwu=ǰyUϯ3J?53pw=UOb=lwx+ok[P Bl8;Cvh" t:+1w3{,RM=NT֥3lA+5[4kRzkσoyp>Ҹ Wnd75Nf6ЍyR8aX\R/:_5cU&]XS]C*/VA?$ЩOe&+6l>oσydqX1l"\p )UfAbVsiW,󟧦5kpYM1ܘǣɿfnKc1bpg-]Vc5BaXb̙!sg[.\}&3,ŎNyH|˟[_qG$c9Ƶo,b{&q'+*)۠밓";%X}l =j `ȷOͅXzMU!.D0 ۓL{ܩ7lIzX-j83陫dÝ$~1Wmw?Iqnp{PB1׺3ne#C30g3߅ D;z,8L&ls=j=BR6D< ~E@ѼJȮ5[@}9rͬz-)"Qs7y4#Yh׆1_tY=d1~4]Nn=JUlmP8웳9oI}]`VKؿ*N&6̠iU ld;zZ} `tNe;ߪY.Y=yߨs' ԯL> J5WQ&!v>d_]jsS{1Y&ceVy,xBPZ;ܣyl;kϰ[ldHCo,tS:EY~fO}Sp!}:i&]|$(a2}C5;EJ;XKJ7#+g3c~鵧ۓ}^8Pi&dht}֚Ըv=ifݿq*>}haꝮߩ 8b=k(ʁ^.ρmeB>;(InD2 *WQz& }zZ"mg'XaŎ:V&"=(qѻDf56{sCe9r><ֵ2gzoAmNX/Wmӳ??v=o6j"DC7=vr~w^kxiQ>X6З< U P};|X1dK+͙MWm~tNbC{/jq <﻾>q7ޯVCpZ]4eQw׸◿,ݒ]DŽ+Ze/UcVVgї+g_كN R `W*(&/+qr$S z-hwkNuix[f.Jke5fgΨ b$d> ЬzeU>RRSd3Y?O݉VWz\ xg5]:~h6}=gIS촉%p$Y&c{ʬ3ͪ&ۅ JGXWI7!L2qx~$;DpoVZb{u;E{<, z+#Dq̽R^[EUQ 1ixެ> Lb,꤮g§.N5s%OY,9\]!d=m\Mꁰ{=-ehuwvztu Ygbt(Ѭx(hؗ3&N?+16 9g.&km輻.V?,L-JkOMp+VX0{ bC3غVU59V[ԕ- )'9wNu[Oɧ~E'osy~SU|k." >©_Ig' p bY(0eXO$yNs(sq[{*sv]IhWvܨ2$sGgyے :(ˊJQ=DL:Ht:(ƾe%cm )=>oOe%_z[Yz75:鹭 aIoy3szVOVvp^o':f6}sx_7i&y]~_+M{jsAd^WBWR|ЉqX L)s}qIaɌ)+D\MD)B9d׭@F#.sh:9fOuV:f3_wJ]vEWDŽ"72:}E}q s]f. ŝ5IP]'w#M;k_n\j(* &~z Xz^uaMo4TA,1'rSzKYƩ!ap>uXW]?Ł~ g&iGgo] 'id"b3B[~~wٽTet2T\;coDf0FF&4ȱ>i^\0gOA싑!˼{ku2zyWۊ=BL!;Xtd|0Z-$5&- 7Yh|iI܍ou*٭+h t@I[(I q4ȟ{s= 5cE*ϞQ,:T<ߍڞ`e?-eN%_-D7gW]XeNlu3*g͞-J)9z}x5ὢ;A^sHtZ.{uJQ̾W,+*UΕ}Nڭ5".1H1VͻcF[Jwj5N^&';Q{=IΧד gfG7E|";V[ۖvS}x{䅑WK4j7pdiA:o}3DCZ}CNMZ\ ԼqZ $*;R7UTztJ|E'X[hnq:w[OǿSg_SF1#ZOA!`z6E2; ㆪ:`ENqf2e}Alä*&†Xݨ%3ىWPoI7d2g5^ÙM)Teo=|)؄*U,C]/#NwZO.'q[ϑq! es¯WwUfg;oIr0r9;N&&43Qꨇ[}S S8dܫ魗{ؙ3CheMnx$WEI: ڿD)^rl>Q[Foe}D gݟgP2'ܯv}ow`<.oJX2Lw!PyX|Jc@="NńLf.U;zcĩ\coԶȞ]c* .'stFktuNn6s;M=k{.)qfrÓ]6$ w}#$,c]e2Y.gH|q'щ9I+Mߩg|'YP8d{= * ^c6)6˳*ydށ#u߃\+gCa 2]*TS 8e!8%iS^KY} OfBYIiեϼ=q̊Z|z4~̘^JYQo^!U<;s%¹XyxO2) |o[6rs3k(zz&ʓʙULv]g] ˉqώ:+mې1z@ǜ=dw:z=F50v|:FG{\Ű#m`2&;|`w|'lj _W=^gwֻ?MX1ۃ]2ݽuia;oY]csFb+41-@Ž3M̻}cw~OljӍN0?᎑,o|LEH8k YvB9ąʓ]Uͮ)s -#qUVm/Kwdwr=9oLҨN"zE#g_1@>~}8#&S|՞ BOkyH6@_x+SZLk񩛅jcTZ{KWXwoeǾnPpNtx19s5xcf\[iShߏw;_Wn`]㾽NE'+6ٵ1Cy(yݸWˤbzYN+Nc#Z'T+b[VghYBD1Edɦl $&M)x%.w.ZX+ZWL+ŝy=칱n>5bvx?vj\C5[3 6uV Yט&d}(ɠQ3eޞ kШNozV{bDu*p:'H2!sYkW}=9R*kbvYڙLDe) 򝖏醤}fmurw> Ρ:Nc! Lkﳀ2iXĴef-wӍ7ɱf/6˵7! YF_J@&E3藰aTJf~.z!>|~ߛW^&zw1L$O+Y |E >]-5f+ ?TPd*nlM~^rJ+qWS7Lu̓ݫiiG;#I1/J̾Cs8dGkLwUd":у7PIf8_|KeU (׶qYeJLQ\W*'aUTN_OoU6}KV\4՟w.w&qUvnwIwQeooB\ y9]rĬa^-s 8+/w `vfr>@G>XXU$ZzPSnz/z {wFu|ޘLSEME%CܠU̱ק QR+ Q[SюU!T;.jgN*sPL-tNjYgלgҝ:v~Ճ\UԘLRn^ֽ5NoU V1x(5aŕ)ӔZlTUqyJ23twV|^1.MSEwDCߪ=q7 =W!/˩jaz|?G"f|\c_ᵱB_jZ]~5^VfϬr-Sޫ7*-}oP7ÑEtΣ Utq/JjϫM}U]n'΅l]yoUe<1tq4jXv'@_]ꁕY[D2iBh i=9o~W8x W/v\MzGb\ԣ'8¬3xH|7Y"ǁ^y9㚷 f93d.}Vtx{g0yoCHk3EWoE>M2ͥ.e콧sdts3a*ugQ oEuez.sw N |2}܍p*{:=~(n(L90o1Y<Փ&;^TI7/e)VM0/u#C^Ma' "SӱķT+ka:5,|WywVKV24Cwk!u&"Z̳@[ G3=r7+? :U,B qණ*@|vc~n J]VX#)+چrwvluɲ쥐Uf&3# V~|UfܫmvYN2167MיfK37Xzq++!FLߚw|x@!M%ˬ{/`w3! /7+e1y"站wgf=a<鱷 KҙŬ*뷫&iP&E92=Ap:[mZ؍*Ѧ;WpiQ9fOv3ndM}(!Sn&˝ 񨛃;eWYul5>3uXz|,qt2FE r%NH*d}}\몳:X=3^{&trM`v̫6:Ũdց+Sx:Ŕ1 }S~CBN&CM9laΔL m(9S:;=4A21#޻=du@?8)t{ı4Z5^e8O{S7!gGh[A/ŧ]\~YPȜ#/;ZxANuk]'L>kĿUs .s.A8 Z@Ӳ^ՌEGkQ/J\IF'_XR;c%ֹv 7o@bvyGg-<+/;T*z`.;qy;̞ؔ}YRv@{&| -k7彫6?ql["V tVe;y3= 1< QR(rnf BY W3W+rY8IY9[7GXgcEMp~r|%'Yu/+]|='N#(kAp(Rg=fzB%ʹķkqAݗWNtXVĺ*}*w9rD3zdG\n-2s6۾wos3t2ѷ™p8wL"/{' pU~][ёHqFy 2y_ǀVǻbYńNf{뤫AN0F1d+]IR|Sy(8aeժ%+Eq!s,w Yje^bM"3Aj W{Vi*)gL{Vw %ثY-fwvdxpBev_tcx+_|-91/ gw5*j;ߘDf"i]93kQgW3 vFzLL uA{T7~ eiC!羙~JPZ>n"ĺ'L]w+>8I#2;_nj{1')ytFce'3(wDQO9ƅ IKoPIW;єL{ONc`_/vw|7~L Q+O;*#J0Sq'OԎ|\gC8I[q]>f{­s*ݱ!'=&s X ]xuRP2ooP|hɮwSluj&T B6:θ,r <^*L u[)xΠL.&sgwz[oߑkuB31ܻdS5ű\#Z! o $ 3W}{iE^z86_;dNU?>\d}3cqq,qsϡcw>㼨Ef,M` 3\O=^@e|3Ƿys0\r+EW'ʼq25K~RòŮQ[ex~@ͱFfZYa|)}eƊxޔtAx6c)Yi#I҈,H|r^+ǃ˸:@sOJiMt/wuk=T1 =P l" f8TT_ }`5  ŌEVOk>-S}u|^M=]nT1)=;y=͚ 2jKuRZG^;bVg~ӯ}&'+916u&^xmg~__DQPv>6S.췥:ngt'qlN)ŜeAcz9{g_ݗ}XuVZeXPz1\ :Ċ]dٻgaN|;e S*.*#Vl?ڊ>@S./߸q8&8i Tf8WԞ$ 7u:Ԯv&C@(6\g! kLo7ПYv7y5w:>V8%eo>N;,uEw'Зr#mvٯ~synm`q`zWeO 9qjSt#J=9oEȤ6 1/ P`˟_jul[\=rzygO>+)0Qxniʗm#&BLwLdLV/HNG,|}t$^¡ua|_+uޯ#vZw*6GƕiRѯ6%DDS~ruw_69̰V)'&[=0;e,jDڌ''2;1;k~!աi/ꢨpg9˜&EbiOg]?fGN t3qFU;h*oMOeoc dY1.߭ߎx|P-T߁;%歍Y.~ׯt7;ǽ&HB=?ǿHw_6gy!g=y_UX)'/eVF/sSXLhz:k cx9Y~^G>iA]bv> ~g-NDOMW{DN&nةk:PY,oݒ_']yyEVcrg0\;U<~:*NQGK2,WDdJqrYpS~ӨW+!-! Tk@ӆb.wI T{EuYBfU3M=$+Tq%uQ&(YO*nTeP/Zk^n޺BvuGBv VPr7{s33]rfq0cY,뾑wWY{ԬZ9Eݦ&?FQYZaÒbkPy?G~M5W܌jZ&p +.;a?1Ɏi]⡼/z7}gUm5W2Uݦ\4_ L&`:ovKʸO`;7^1̳ E:fGx \WD:hG.L"sG[ς*g#F>E,槾2 Dd&.Q}"]Ǽ[kLr7|O$kZSMx{ܛ. SRJQ[9XSuA_IQl~gxYG .LO;~x ʊ;b7T/&q͌ǕA:WlnNSBS&suMzG|vYq-j7惑Z/lJİG;F]yUYE~tY^5B2M$k"\fed{=rA>/w9yn=~hXvWD:}{7e|ҊNA~J_ݭ3WJqC9 ͫ2M)/Q@VbxͩӬN6s.%ȉ2yb _bWdt3a{M;k*w̆X' IN 3PAΧsusϭzw;{!USo57sP~rj~PSNOd=N*=Ay ޻%?gO|~B#P+#l{we.\aN@zYi6׬]M`1aJ7bw󟅥nsA`&5\yR_!mն.JڊX|- l5"F5ӧ{ɏLx ;oNmǔIdj'MVR蕺q5ݒ+wO3Wqϱ=EIyEAy[qc~+hy]3=Y5~H&Ԝn%=5|]hV*S\!5gvb[),j=%PeVcdA ]fbʼ0BX)_]-uŮ 4dJP]۴;"]Bm+'I?.%;Z%Ezo3}=# yo_Trioԕq t1P3"T7uGd&c=SQQjtǦR=cYfɹN\X"3iSnC{abw[)2ژ":e(uJq~;e7ՙut+:?Џī7wpx :/Us4>SC/3 dYa}qr$Y8Bb:B,?'&=XuY{|Uo<Ϩ6V+g:1nAk=5oݥ~< Uܟ_btCWǬʿt+gg3`9, yE w.* 5lP8+٫.4m1%36ٝ}KIJEgx1! 6XIQ|/{D{wDG"NX)w: _&1%hAI4vm~N_[1ξBjb 7>9ɰ8k;O~)?M&E7İpLL|[pv _0%H1viWʾ;N))T亄ݡV1tz ?:1zw1b E`Jcdz;H:|74yx7ݫ)gƳ9=)Ap^! fZ-yvn_;=6*?᮶RaYaʵm ;zG#q,Sah*'uf'NfV!\O.NfW b̫ ;>Kݡj6-.+yoSoTm\暕;Ws{m'rQ3&Z-b= nfv+y ubfd'[.5RǍw4Q*TwXyEk:5:޽+(Gjks㝝nq9.d]N[''{Dϧs} Ƥ&xT暕}W8쇝lvXu$ͼA9 6g>Üj|q& :YwΗԈ"NIADE Vek }Ei=L]"o&d*Y5yԉh+e݈ #iw N9OBѼ6ĠfƔ >߰O;DEOMWޓ[U))Si镓 ~nDAdʁq׼NOf=:11tgRRVTg kn>yVn9% k(W$hVGe-+A:ݔTgf:VMv鹼:Zw|zu[^7j);7Fz֛ؔIVe{ytz+N8\=QZD*˜iɴMS".sy[[gىV”ORN]͚yy4q*nw}o;PQﭻ͂'0~*;KPBl'E2 \ꢃbwxNc,껲[Rz I&p*WWc Jz"^ :\) Bc91E3²L޸oEZuvH\xK4,bbT>bdԕ}<܄e׎6 Owq}U(3a%+hRF#߰W$*[e{F=0%eŁONS{=$;l~}P'y!gH&~w;f ynR͞Px+gg}>{9+IN'̞žѻ}W샗V8"ɎU2Ǚ]l>84q:ЍshW;r~Ԩ}%&ol 6r"FU[m~(~w'43șJNH)ۖ#7\6uos9IR:ԱPd5ijNJ}dל0ٛW) =[/~FՑaNaZuhw WЄ/~ 5l]PTIe*^ݠPsv&3 +*GZsC&ng ~R{:}>U|P//(%V +*ۼ*_v}bcYuQӑNtv iYSdiڙ^A! Ί{iGA)3g9v[I|ױ]֝jGDF_?\]>f;3c,9_I|wF4ӢNf;F̏rm4]Be0dNU$v9a;(( ILf gq+t$2 m> Vx}g5VoS !'v9Jѧy\wD>hxAg0Y3`do0U٘W|gm82/`WO7MvfX9l/YW Lزjã0=a>꽊{H?jL7ʐ'OFNЎtϪCɧ4# y4B z=&O7y^W=sXwY88nLkb }r1R؆ͺ%Uq%6"1i q_Q[ŷHivGYu_V<=N f?qüW8Z[ykf2gdg9f'zNq7}5 ) ox"S75|ݘu~'ȑη}d +&{K]hV{ǙBnGDҌB1LOfy>[*YMY7gUV tO>'Yטܱa²VGgi)/3L.sq@FX<57=B>6|g 2G4W?p^״f|Ցnsl۝iS؍.g_6_Du^9}Ǟ4++Wԃqψ. MI<ICP/gXɚw+z9y1-Эnqt1@qr|#EOa}OD ,XWʺﻡ#eTme}U]^GDVhqa•>NatҬf_ӑcĩX/JX4u Ks{dڵN7[pˤ^\9ϜWd_le! 75XLPy@4L y[Fy8$ }a%{͟9>dx>)G\*}y'x3{q9$^j^t$eour}ax{toL.;A&2.v|HsKIzF\IrkӸAH&Ab,fJ6&̻W2%+"6ˌ#NE0L}ՌCY k, px?ph&Im`!/3y%TtWY1!>f,d}}ɼ}sNetyй)S؈NqC?s^6ٵ̦¬+zҭ^Mop0JĈ 9N`V՞ 6Vy:Bq> lVXc%Ppj;qxJIb/oU!$v2Nqp +F{׋t!O`CUXm<ƻe/9{1Pۃʴ[2+(q7&3SLu%G _ ;;3)/^TDzqf3 ̒m8kcy7ɐe_Se8j2aϨ8];35+`=Qv)z*lh n;S_[l:W>h+%٥.R2E2;5Q~s;z幝n{7ĺog90ωڊ"?kMc?U5Ϡd$j6\Nz A&2 [Y"xdU~Hc6{Eu/W}A.Mpp~ _>5L&a$%ƛ>rͷS?}֦#+;hNwj[W)z"8X4wlknO"e(qW Vksuʥv**Lݻ` u<< kܿna -(Y~Vrߡe%bm+3\+2V> ?Er2.Z/Kb[=uX)YW$=,x %Av,zӷBMl$ m/+B{+੹rL*# ;y#GɊa@DNS9yץcĤ 2Ho܈HrÜKb8Ʋ1b7aueL,9F$72.\V@lvXeZ$jD{PfNrMcC ˾hydg });H/QQUr3[ɾUkWj4[U1!OHo>Nj9WìI D *nMWX[w9Js~ 2 r9 >s%ˬsΛZ.z+t}RiTc4ڽ8sdOΞdaZ4Y77 t~VQRs樦FRw)k#@V˽~rʚf2wk߯Kw=~W+p{yA~+FXHN-jja5U\wIqDu/oU?n5i{/w5⾙a"j=]nBC/Zu:PmcZ>k.>sc01]0Ds˪XWgS;p^R7uZ5veT$Me퉕ݨݡPbuXzOOE:y |ǿS daۙR:Fzi XVi~2 `Ǯ<3A&7,3aw>1~:Q0o%VDĜ'}]MYG~meɓJ viԚo̾f]c>k$R&ұcvje!V߷NG~s䟤}>I0Y.3WgfL"|wlNٿ޻W<[X;;ri!ILF?;S1qBU}gVA{Pho>2qY)uO+.'nBXc/Ff1V+zqkd}\*K`V+&Yƻ1! &'s+j+E[11em)?Ş&.'7>>(gٙ35b9p^C͓XV!$?:P:%Ug{ʱ+,8PN_ c3<evtcu K6Y, =ϙyͨ}(7xu|}`sf8&/e-;C[zKQUuf1G=qG̿0M]ͯϾ"df^DdLJ!N*sP;iw G=`\l2Eõ~G Mb {T|]ZHKnn c~`S\NwVE罂:ѵ ͓ndcjb0g9NJvJXC#uifATjlVVĕPn饷,ff2Fe5V_nGH+2?WC=ܨsK +M5Hb6;ff =LÎXжJz!ƞ*{/P$cNRQc*YL#FvOC.A&[f&3󞜟vWI3c͘.U|!T.tZJl_4?=>Ly>NF*pz}.fjc՟ xG\unḍi]g^ Xuz6Lv\ fqeF.\B"-l({E6K1'l'Lgn?P-8'FMpY|3;*'~zvbJ SQA1W\fjyM_nI&Dh})G'?`06T/N+~׫RC)\.X@ff0c=yp漻wʓ]?Pĉ\V^em<{ YoE;9/ПwC,GDez(&Sp&/j$G ӆ</'Wb\mʰzs gK]lB 3+}*R撘hW:j51`w5S 1sȌg8{jKU;a/Fo}V t^>n^o:wS~^}LNtQb\Woߪ˟^/Z0  D`YȄ'{tFˊqŞrA'S52x7pz#>sKk2!|3O6dr??:T;T URE{mq2eZTX]w҅*}M ":q!>XQ/~|Cc{E0iDk,S/3>}F DZŷެӰw: b8RH[0MzZ˘0yߍlc:5S.8tSH/}H| ݼբ;r̍g2[']ZvZꆙ@>V*Ty[tư[pfe: [6kx+y:S- oLw~gyD1V3/=(͖0II f8&+Xϸ(z-cƅ2gnYrIN !m{Ps˜S }r e6zعHp1iu7=9LR} eN3KJlgfYt䄚Y@x&S4`#NNx໳3W,"8J  ?kY`MϾDŴrcntُj,MR֍Xc'aVJ|_+#ZqeSÝdP&b7ND|OQeҢϘ@3miW:Gx+n{,;CM6/73RX$>\I`Ss٘r*<2;eڄs_VUT[Y؄N}~abﳯgܷϘ3}쬩uYIJ Cm!(G՛_O>9?.}y cqȏ~s|7 [!.T$۪m;GWKb'?b8'/9P& =q8ߺ3ܹ} ǵ iPȔC(gQu=?ݦ&:> YjUa_:nE.s5XU$¦7!;B>uͭ;fa"{WOU-,PzIBv XQtl mWNґ8W,dyXMfkbMōyX)!.TėtPo^nuJ1KсIbOžq) .Ӵ~KS;4MPRV>^jލ(졏1(}h_YQG>kt+srw8wLe4UN ⊽b}n+t(}_>1';Na>myhRp@Xl" P`g{}2d~ĺYcoY焛ع=/+a}> /qvI?bUsxn 3{ wN,UN.2U*3~eC3U{}2&ٵ̦1serΔde]nu J9@1Qqz6=CiY=}p1żԨ[G|RطNn]i'ȑ>>&odWCJOXWw$/̪F'"S8zݢ5_>Ǵ@CF0E)!ΊwD{Ȼw3zZ/~|v~so툉s[oU\wC{qC+z4ӴW:}n4&9/CŠo_.3UAyk#J՗3Q΅(ߒ\3VqJˊiOxmC LtŜ,5cQ򬆳s*`j`}پb9ܛPZ=;ݛ27gEuK]i+sS'˔u՚q"ZE[ڛ,UuAf#=;~g f6ڙfevr#'.+sW1h"]ġ){qB9P@BؗS"Ov[eImΐ>9v&#>~@9 :Oqjgfx2QQL&XYcԘhVYyEP8OS;C31qwDŽɁgΛ3?_Q>^>؛wvģ˟%T΅ﰃTOvojsρg;o˾c^Ny 2c!aJsô+iJ>jj9caP=QGmmo]| >_k̕hlFY"E'IM}QNqٓlL78T(z W&׌C$"7ϞnaizۜHqcF԰R@cwBuL䎈?w %}d6ڪmuJb &/Xq%0lg+V N>JȴQ( -WM-!7Ag4kPb~2zQ$R6W)ZVO6 51^m#0{Z2tQMƝk;}i=I3}GH}ّi5PjIQ(e~{.|re"qW[vAY1{^jr~y.?])O(B`5ut Cy*\TZq׊bQۑr\zRhv^0 r6w޸rΩn;&m;w&Qa*fE/p697ٙao^^6$$:Q<-Oc_/$J|E'v+6Ih>w˝s\>k=tC㗞t3P3)^mTgOAdʁk}2Г0|Sq|UIZXֺoj,i#.+sWL޵di}SQ"XwL0ƺB9EFإ/POQ/dp<a+ &jxnhn"<̷Ż95Nd&;3 "{ńLfĬ \ ]֕^M0h3(N&-mо{*wx^AY\PO~ZIǿRzߍEU$(W^wwSH/\t\k#_6qghgن0! 4{B8/%pƈTZd ;h9sd5e9Ўn0ϠfƔf'Q@r]Gr#)άO'D27'0ߤЅ[%yN\pGˬy}oVEMe²JK"%v9yV2Mٵ̦һPs Ǚ=LvW|C \MSyqCmýrWMC'BEO՟QƿxYf@U!4~&<־B@^n&'2;qoc󳇾ߪ3gM#3\sѐŖjvvRnV2 ߸f^Y߈FC0fڊ^K9 ܗ2|E7؍#0i#IJsxޣ'Cp//yQ7v'z"{9dɼwXϺ~Nj'dv+sûY<β]03U)shs{jOжJ)+q`o>J_=wT_?h*sR,>m?cy}~LyYI+&>tC9:*(I9'fBr%oǻ,iK{sq",΃9+^*r3{ɋurob PvUgˮ{+\5N\V殘$9*gl[xw Y~GvP25^zN<*n֯юXԹ۳L0JcD$N8!{bGi |OP)*+w#gɧoj9csDwbKO~*իҷBNV~D0fBs$F69ƈըˊν^@EOy['ݷ; ZNܰr\+32W]Kރsuf!C˼fվ W y|V~WdsZgJYL5Uܬ+퓫٬+z)wShO)*{'N.VښˎTN6;=n6ExNEΤ:l{`%gmkna3/y fsz>u=l BOՏ-u^~C0>Y-{e-]m1n'>(6ߐg3&&agNնmqϗFIL: |v߹"+ +\sf_UmjwP91E$/ԏ[m2i&Ydz320| '0sw:QC{,̴_ֿWϘ(%59]|^llԃ!V$nhG{;dT@L0fBiԸ1.QęflDq:OSʊ{8]b711xk2f20o\c *#ݸWDSqYjk*FQdt{ﶱò~^ݽb'/OK3]M9}|dT@WA2|e.=0w8G<~r K6Y -wS-3R"$qghvK`@J$? 㭫νsBA.`e!w5EvIza9?31eSP|PIf7WRG=lk7Ȕ{,&\H[3~uX)DA&bF9+L= *r.o=(_zɹ"!U|[q!يKz[ZȚR78yjRi:q"41f*حz70gRfFg).nfmc_ C6nTYx 79_tuQ~K1SǕ}hՃ!JVǧ {YL"5ӫ8tʉ:/SvݪV]FqU.֝;),S k˜XEl>}2W =a/;>ݨ' oEaq#>e:Mݽڀ{{rVT7B-A,E)a&f|^+"H9j s^w+^C\IyikV@vߩ"E&IgyW8ˈ@N+d c+:tUKu}}͎n.Fݭ{EVIYî_5/<ϵS'>7X.vw5pҫPZ'Bu`ȄB.S԰ߎ Sę枘dr2s`ZnQ*g*ጟXd6³m1rN(SBB\'㩿ǿ6>{cY-ig LZ;3**y@4DyuuQxTS^_d+9m3C X2 "8mmï6veqa 5gB$GCxM(zx5ָǎ'Ni~ɨ~M/Q?E*AVGm{!Mfk ッ[{abWϤ^/8hGz+ơ{DZ Odb ޭR}#n%NW '9 x+{:KƷ&vjƤCuu'TV(o3ߺbĥf@.[viO0J_wwñC󛺘sY̺$5)ȳ J,fo+NlzO ~RlY9U8Unt;{slXœNߪۃsb2+ƺNqo˄gLU{ߒ( RÓ`}7t+P=wPqX_vwˊ\)E} `W>8wٟc,:,u@sJppޛnb1945a)ʗ|#q5?RffDe'|yy!ZWn7:MD;Ln`/Luy~gQFAuɎ +]e$U񸪗rߚ4{hL,ysW0ibPfn?Zbjzk\k'ů}YaBOŁcr(̨Ս=c9@՛= qY[%K& Ie7{FZǙM6WoO \V*cr^v{@SmKәt_ yskcox@\O)sw}{e2WL_*֜noD﨩)W~`rWvUnTg M 0hc3>}ru '<1 O.;'pAyK=zk ;1Գ|Ջ`|U/L;3fAI9mfLedb,k/pzWc$"T>ӱ\Mz\Ṳ^mSp;doS]&^S6e>_v'R׹t%*8q0E0apcS5>f ROۇ`8옶)jn`3Ӗ̮vxזSv X?ùb9{s5]2/R)Aށ(*{vy(a->xʔV܁:,n dwnT5[vL~_.,"gʩ+48}u xe͍Va>>C%0{||>%eӘf9}K"}g 5{_Z W:5WgݐgpO~Y9c`?tf_'DO-趌:&Kz6U;vN G-5slxHOTC'.>s̝Jƣ͏# ] JGdH'+VWJ4ME"v.앙|O[Q3Z(Y1my'Om`&y٫g,)+,g^B[ٿbǧ}covϻ5eGع$:@t+/, oIwr[B@x6OV~+~xmǷe[;eAꉏgJ[Zi8k1 z{9OρNEWd4nJ3LueQC\MpvN8lZ2`i+[j{>@ hVUjzn\H7'4 7irٵVTVΥpW>:tâ{4*R)Ql}+,q<ჼ![jUy\z8Ɂ̸8BNJ_HIU*#1.HJ)2JtouGjf#vd hO&2#Wd F#`d v4s?rL{U'[@hs@cb}|cEoT-皪n&^FksՂwr&;~N:VI脚Cy|+y=s|qŬPPi&~'9L-oΕG:Cx1K$̴ymf!7w!V&8G8JE"b%)l#qYopOV﬚7&fw2LRwv?,>&?eV:Id!d|-gјy˔wǨÞ9c-:%FDV=O d P8wd+ﻪz2:rnf/&a(Cq76<Or9Ǫ*3g71f=VZ1: |<~Yƹ2u鞭ڇպcZy=UW,!tg?bO3ֳw>)7+}L%xj]߯Yv-8X2q1%+YL^wJKpC^*QxcaϘf1qĝΝ밅gЬ}wOsE\؍F\{-,k]w1fo_<3*̿AN %˜k…urM=iucHJ=-+W9l>ٿ9Č2轆'*A^Rc~7y&6o5nҙT<͚m72'9hfeܡ_LgL{Mfl֣7XɪdV_+p,dSv|E'G[>wʍƔѻaΐ;GncwU~nզg:ldg2c!{EEhq~֒.)x(Ax!Sk8WEϨYl _A󮺂.3*]iNz]H-Ή̺R֍E9l*{r3u^;RY/L :ԞV晝!ޑͭ!}APT|ӊL 0y|<7B^bB|3}?,{Q,=2Dg;+Ǫꅼа6yWb止M`D5kp*zk㐘1=C$ |.+7Yr{rfl%Gm~㼇Axy^(@9{3zƸva R+-$.`1VVV9"ܳ%BKb#&;>ЈNKoe#:so"E&I2 F4oҿф> X tr%^(@~7t\n(Fq4OѹȜ6Ǭin<,sͬY6on̪^ViVc7[Vҭή/9a'hFLv4Eލme*I}3q_'H&]  YVj}}kΓucd2':˅Y}dTy/xU'7LVو>0Ws5[3Ծŧ$6= ~J[N]4iuudlDfױ>g )GpLXtv!>QvdlBJQCPA2%ջS f xiUu0iWS9hWjгgSswI~ws\dG~4֩JJeJf av=1yQs1L%J2ҪW 4S.\c !Mo{uMӺ)Sgٞz_R4lGHM1 A=هQ^ϢQg7X&s;LiLc%ZMÈTi~4;RB?zͬg&TU9Vo撷s{-XfŁMN{^Dƙ371gEGSU 'Oc\ֆ~XL)k]GإLjSV8_=-xu^x;:p$W(O&g:rf$3UlW~)N_q{*튝E-]E;z%Oke+F"U~3x$'e(z:cwW=a=v_G4@Vhg& -9kŬݏ8*:Ǹ#X'mXYb¡=Ĺ1TckhV4wa;K(l%+g=@+: ̼rDmu<155JkdpDgM> ]lujOSEH&'2TefYM|ῃOE{n89y}>6$mo2 E֫{b*(}wZpos2gPu;W:< G_s&BOW DRבyqr82 +{bˎqc﹀7R گ]'Opy`nJ昕=p kպ332;xރHn*g>z6Zvd "S2t}B>P#kSɻM |Vߙʜѭ{L8 Ow޾u|Oc[hvCS D@IOAd!['z^1;a:5Ǭ)232Yjϥ^,\ο*ܟ94j0)g3L2S2s7 Y79)L9p&%/׹E/6dössMޜvY33;7zg37suvA1/y f:@Y$3}QoA'SV?g1Q͠Py3=^χ)3uǧ7s-vT0ǒ9I\Y{ը,[΢1ѭ:V] O Fٿ⧡}3qLwuT?'g{D\ٻesYS̾,o/z؉cw9[8=Q"ݺ[iܽϜi@!xuUZS~Tr풱{)3L ov&yԠ\5my[d'2˾o_T̑%9zurXXtPIVBIIXG~7-!%tjL ŸE|TWA<\mVm6tP3qd8qb ' }  P恘QqNpy8a9o_s΋!IQ>͠~FRV<={ynңBǿUWڍWWdzwӌc`{FS[V>Ka󊜌:ͱr.=ߙm5rmg5l ۸va621m2۫ =F_3l GmkUfI}ս5ZB&K>9g}ފ9N+و>D+iG`VF[VqChmw!˻n YxK YpUܬg1Y?hOdX'La?YϚ|<[,ЩJce=+/7qMo75"$iˢ܃r5 _t 7BnD¢FdI~kж CwBfخ>BT?W!!N;x&'E19CIW]dŠYۢޘKkEkD"^UF5mEYILD򛲐ȒPN03u\O{TL;s",L[,WL䊈WP^lz$Ϙum7gH3'lhZ*9-W=ꠉ UB3 +'z66gs~jzAY)RyU6 qVRL0dBY3Wcf5:7] BV Xٙ:iWjUGˤ^Y-Qv)}<*f= ޗ{U/vc7Kx\egWy8qsQ~4o7~k'l _`JbF}iSyfWά٤KG~+/mDgUyy҃NE:_/SM߻dHMc&!u!#?T)VSњtI5=sG> VM2MyGxzb˘`̄"atPVpe&Ù?)lfs\'KUh[MF߬q%.+sW788 TdW2S:CtOt^&*+REOg>MDNLrRꦒi$wn;= h&ڜ}#S/AaBjcQ~<%g;>c:ZDK}90"6FU=ǬH Q k ŬƳ}uǼNk~d4cW؁02ҽf*yʝEZ kYKYOy/oD[ViSS sDOg:: a,"`03:3ړ6VsDO ߺU-,ǜE> LX|İmd*U.ˍ(BfI]atO8%+jguvS]N{ ҇~Z3)ѿ/م6+< ;6޲W5>x7.1_9+جn3™Qelo8A1jث ePHLJ?e”+2g~\2MMTMև˚wkxƬĿCZ1<`WldZMf6-D-L;_'m9B51!Beb3O߹\>T V>B>sU8zM l&*'&GN+ʞw*zzx.W8m;uqe“]@(itY`q*&†ٔFx=eb-]&t~ؽ1鐙˭zQ)ς^< O; 9ǔFk3syD\`!V9~7Lz 6;ip†coƼgy'zKy{1gZZRslYB}6599>n>A?`ڙBA9HBV 8;4m]cY2BlW~\Wnq:%H299t}LeGHZaE$ʧ^3c7+U7Ko19  A:iAYW[ 2Ŝ^O!.1-ʳ dEN/@wjB`flu~0(ݸ}e)JulΦ{MdVBD{1OQDQeF ZAd[fkXZP.%r)}!;~ e2ȭU~d6PUq;ڒu;;"1%94~> [ Ojl5%}r Z.]b)S%wjywONQ+:I#&;ߧ:nhBR> 1|X^o_g3SDXOK] tT<'̓+;2=zۨ<|γ3267==C='h]T:tk'2n}W<C+M畎\?#UL=Wq-V=!?Lty6OAW&XAɳk~B=rř{eށp:/4~lqTIcmh>_ ČeݩjU6>sWOM:ЋWd?>߫N&,>d{儫ֆ-ӭ^e='N# 9휳ffuh=[Vd5 tjDdb3QUAp,_t?zW L+Z \u{3ٜ-kNnYZS;=L]f2zgMFA~Y)Bb%>(wSzsGi(C7xꞒV ފAO8m0)IPf\U h)2 nguF ̬VFSסe+YzYg\kEMEO~Pyzt֛i֜1Y"˕V}{ e(w[+Cb4[ e!3Lᙳ]>2vJUU=ϞwQe835 r iTQ=~7q f?o*x-kwᓕ9.b`1͇oExFًoՌaۅ̮s;r,U͡jjBIQ>Ԍ{r#[i:R*+95Q!;ro*}Ld\7Etva%R4L Jw3ItuJLO.` \H q6̈oXQq7s,Sw@biӫt=O1_7TB^Sd`6:όĒvi%p1w=qIZ/Nߞ@6yc=A [RFvSx^{-}aJ".+qLL+PVV}S36㪴l`쪫h#! 0k|yO͢PՉq'dNSH7;5ʔg|ZEC{Ӗ5Vk7/0#Q:{mW]]mt̔Y !#4ϱ")&i1϶`c<iY/cʫҿB<!39m|d%|W)YU9NHoxWvuXO^9%. iw.~ {U='Vb5梏x4{߱Gԓ9 $+YDƟĉd&i^Y#< ̽wopCӔ8Ŭkkn:pID;!t@*9oiCY ,+cLJu*rg%LNY%3ru3smxoeYc2_]uZi&lq|C),R/L }&vpV39&'<(jNC;2G'i|x ̏ iGI#9he[`U =+زJߚWߨ>g'+q]#],U:qZLlYfS<+v*ެP‰UD5܉Jt@(8#fIV=TPH2%)6jȽ"i>梲M?~[yZhD`.Qe·ͭYFY_'8sFQZ:ȗ~ a&58F}f[UA*VDEFNrMH,L,"*T\4-sBojzTK<=GR rd;ݼX2j`k-VtVg2+/wl_SHjX(ΓLpU9?vZA'25~)(j΄u\=#Q?Xlꠉ^v?W 6*rLˉ .;Sl<\ ]*{ тTʣލʬ@\tUWujQ.0o|v~qf(3>1BjtguB_2_<5k)d%טUUu齆{Pr*Dy̛30 թ+NG;**1Ym>ЫUز(6:ԩ>՜g iϴVH=kYriYweugJ Y O}^Ci&Aͺ+k$9 b"7YuCWv̍{,X^jݦfS_Tß\j)ce/I=hMMU*ԣ]=oy>y)L 1Ԯ 7^)v76\Y5_Q~FOllo O qc)9Vg} DA&8=#j9r'qx﹚PFl#^(@Ϲ3 fQ}$io/1*Vtߍ+,D؞ȔC9+Q8Ǐf9sʠW[D4ҘUJ;PsM:bH-gzZ*!HשħmEU 3Ъ7:%uoB{.mAgA9c;$l ӎԐ99,'g|Vӑt U këlYEkgen.njWetKR苨>83Nab=Ǽ8'WUŒLjĔ4X^iCbxiS`4=zt|7N'|WLwo{!K˞@ [ `q7w#1Iځם@~J# kѮt֎1?ڲeyn{QmlYMUIL.m`ψ8H~vUV=CDA&Xiy~“AitITtL>Uq'W1&2i=jT<^c4WmNTDllb^t\f@{1W My\[ĹM(C^OU{^n2KoaEgK(TD|e+z: IZO|8!3 T'Jmh jfLc !TΆHS=U?n}- ~[&E~7^.z9`%+kaS+j7_Z@h@vih28Sl+2?maG#wv#xY7ɻI BHfqJ<2¿w2AexNwOj4¯Yw|ІC*U\o/҉{VM&zaw jqF Sej|v# ǪBW'R3Zo ԔɦtɃIHcɜ 3#heVc?+q6<؀13XpyX~^M\P9꾈wY;+Шi9wY["l|R;z9{7wY.@cXP(لxG&ٮn3:+|]N#!^1"@mw?j<'9(f]M16qPr|˟-+7WRtdoc#5Nةvh6y`AnߙfMW x& YvžYɳy[O.G\V_nbGˏ5fDw9kpsP`VH2?Ω^lN&ڲŻS)۩j^e*Z<'ud.DaEG3O"e(FVe_ؐ`"~wʭ{.ɜ2oY3gr [8U5U{SGn5X̱zu|a^qϨL9&93Uo7KCR5":e)W~3+Pf!x -9?+2?^`̓[μf[}MU1yxA6dch[Q#,tѐJU*&!*KڎLldfcLUps8˺Fl?Ii<8ː)ulmw )ZE ,QęgoK*=@X!PU>|^a}CHc"c씹Y^;44zW*!ldfY|-yEzi0_h:2}ucY\@ޘ%i lq*rc`N1UE|yojF3Hw3יn7ܱuX$62Z~(9(λ Vu`>%3gϻd^EΔ$R4ȃ UU}6LӲw ݊LƵcq O!FHQt78l1Na^56Dʤ3#ڨ^otc12W2)ˤ٘}8v.'s20FaRwCDc,-lͱͻboб7Ҙ Ȕn=?AXpfT̞_R#RAWX2BzHn@A3kY+B? yk Gaq`Ȅ3PlWa^es9u9!ϱR"RgNv̵ jLJΔڞC >#bVxDl۴ɒAqo;aә̘0N" ?.AYn\eN4~/L h7[24ݽWALEV{ Uld'{ލ9Gw06e5Yr0R˨lDfNY̪ra㕛>,N$|y8Zͪ^V[kuk$XO"~Cy}9Bh3e7ճEѵlY%OǤæ&paαw~dvx[7:@?Qegp*Z+^M Z86dZ+X R(d%׻|D޺'LMXFYΘvB7u7YŪUqeV$[D(C}li+%e*-.y uGy˜dUw#̯6ʵ\lOKP~9߃9mt 5ӫ#lI>ʱY.(L1=YH>CXSoIg#0sz-O b|E'𛧦DX1wBG(9K?7<4(a뜘LCeKX˭gs8ךs1N2;:[61ڒnun~> ʊ˟O1YX}te$iaofʗM{foYmU!.()'pCYXղD5d3k-}s!s޼};`+zGC-u=ǥFڦx$k"\C}ީf^{IjyV5܆b|ݔux#S^[o{2HY ~VܧJ0Uvmܚҩ[iLF?h$Ghªܟ\8̩ wSB&޳O'fZabʽXɚo.zKfL[JV]z[gOOLA[:e:vXJrſ$n7:s2'qcDEm5qU%(x +/4\1봷{'u+W-lV}ZŦE;n;vW)L0Al+b56z2%W|Wm裸ſY$x*ՙs~8YMor%XD s']F_T-}`TM9 6ko=YyԨM({2mՕU>"GLwYU2&-hLMK>z9  ZXֺXxpqj!j䆾ʄ-#:mm_w`Z+r#K~=Xv׶_&_cV{gVdBO1 =HLz2J+r2Ϩ7ZF(s8= #gX%*™f,ٔ8S6&g[ZxZyn]*rꚭ`2g {#7Sy3>! טWZ=C)YlscŽr*Jv!NJoT>݋zDIZӢ0eNVޞgr̭jDb$"]hj'̅*Slcgl-~mfS<:2mlZ-Hj ]+ qoGo`KZQ&n9sɜ lYi "X*NY@צc7jar g"Z_"*;2'}D[sr>godÔLrƝa7r/1j9̇&:DIDhIӵy_#sdŕLZ>_ʶ/TV7iH,f6׹Ct[7|Z۝3(1V)+tr&7t<#BXA{r: v7 '3kHkjsblQ1>hQ'AޖeՋߙyAþYe{jQ.J牏fd>ĉsSH G;mZQ-NU 3keZ g:~'zY: [c=5w6Q뙂TuRrpڿ}Q9x)~CUA*f]r,)X9KZU\`2t {`~G76j[dNLf99ׁX gse^4^Hlx_J_NCo ?S?)Djykyՠ&>bσ]jfW{vK68OT5f&9Qu0]h#eLgFEM[;h*!_U1`ܥ|0 :UWV B[U"\B4oia'Tt;OFQmWd%f,GTl;g/cbB'3ڞ:N3U*4f[n6OIT栢#w1П)1At3NfxſV(zBi٪+fKftj̙?cg2XSDnj7X˪]NYǩLF Y=ڼAɢAYDpY7 7=MȍLfUwKm&챊{䟷{V[}ݜ(z=XGh2ɨ.棫NFf\k㣻;smNz،b׼]ܬpeEpGYCXiz#LezM.y {O0dB9 ~]{9\c. 6fƜjtL jfLYkxMpe΋Y^UuN fEŰ 5" 3rcY3͜_\ONQv^aV]N \cYyJMpI>i^r㕝Nz~mt\~L(Ryh>-3ũIe{PW"/i B4A~iH39j(ms9Ϊ8c'YlP9b; ~[U##ѡVsz4Wubf  bJM<3*6םvC s\=wrjtCٜJ/z+:@u 3pȼحg.@xV,ff-xJb?ά<9goeXuw(3!,JEDWY)S v}:Zk3d ґRxW|U; z FUQj^} ,ge9=٫/N*rcUv V+8hy*|q"29۔RW7T|`oDW&qb'*bw̙D@Dݨ.DLo0SE{KmkC'+/>zXɚukCtIϺ~T)z('L"e(qVee*0Ұ{+&d2#J&aUy?7Ot|P+R>+3)ʞGB,1}}z^(@ wכ]0Ϲ|vA9 JfJUdYe^1U؃r=ig/sUz^_\6=׊'C6VdmvtŒd5jH5Ϊ0ke,EܧPJh %{GOfo2[dYOH' "rۭ/Dٟ'=UVL '9(f ZO(Ykՙw<k=*.~'*C>_Λ{giX??ISu5#hK$E&%o2?fq~NZXbVWoxΊ^[ʼnvJԙi䄑k3v6Di,>SO1i~s1yY4i%NvDAv֬YMGre-jjf[1 Sϸ_u;VjcªhI,|mReF>y9.SH:{>*n*y^$sYfʴ灺A كѥџ|tYK/{aVLdF_Q#yERߵ:@뜝fWVd2l6 +uYcjW64i,NEMESɞS)^6Qqtfœif'+Vq|P部̣nΔ 8"7-lDYBL2 >#13Dɬz65=r(όvyD)>_VZ8EsE Nt\'2aFїjY wU'yp$y?t c͜jC7[>8NJ`7U#FwYc=xuxW+*?xE>l+¡B]^Y5NXQ^a7خL Oд+6"f35ݬr +' fSqU:"wZcI +;#:1id*&d2#iT檆OV:Zꪞ-zs9g9w hS2wc<>YzӨj^TmQJY7L^=sFWU<՞xsE>̌>))7sǑ$=Lnf2ɻ^U*ڬ{_bJ⍢IH)yw" 21998׶ǔpO%q}oYgV9uG||0ڞ`*&d2#=wcnb}9Ȕ3}f"}k2==zr܁l|X?R7Ue u^:{<߳gr MH=ȷq}BQt1VU=,!a Cf<[,^<%^YṼZ[&)2)T4v}η僮e S2s"nyK9Cf]uR'{v~Pz{1b[W]4|UaUU{CB?$xt4Vt>bX "~8li^ͧjZ#82V?*re6أ#ߠ{EFDu*P $+yc9x!; 6׉(l\&G)F>?.T?͆WxFD0miRSSV\ޢ_21';\{ݱ$:Yg.?Yiwʅi#9zr止 wȀb._`2#2de%++Xhd@7ˁȟ1N\Emj7»IvvqZ">Y^~'u,3 f/zML͚%>/7>E79U%G4O 1d$g̙W]QcZ'oNeW0؅ڟ M4&yx2S j~Mb-ڛyE]O{`97=#j̼Ch}EC}#b֬7qC??**aR kix_*2Gu9L|Rs2963g=II_7 ?&9WS~zG  utKUńNfͭJ%+V1'0|rNQb8?oj@ @|0gliUu47s9SQ1$ ={b՜9ֳMlZ1y=D,g^',kh[<5tZkcU/i v#Wo O|Z44W؍v,R ևA]L3ojgVU7d\[i7u3%z}ЧA:iAYlvb'; jφ)E Ur3c^q`ÿrr#ӞyƙB1ASEѷsyv}aٍzacV iz*fg| s.g[+K=H˪xܗ2ŜX kYg7d@3҉|r*bvcTt}9ݛ*\=(c !txC<ιo7ȞƍNk3y=r$q3a=W>I|ϙL)¼Ldj&O ,բ/41ՙc:#P[@GbJ-Sh93PSˣrԗh2wT*|`*ί.˅ls8f2_aS=яW'"r^\IOk{{<]:\g%F *JnOE{XUs{cZ4ȊfKCFGZU5PsBҧu Uu1%z-~>G}W*̌q|y'/%1Wy,3Ϣ2~Wz4+&%8+n:K6Ҙ_տljxyG= Od'>dĜ zieJ MjdW @[*BGxC99v=/{c2!1[{Wܳ`=UĝπfQf"k緇#,v2ݬ^̯0Kg_Z )YYbjFR$d ~zُ 9V HfAXjׂLMb#oƭ)!eGLxJe2yӝXTAѴusQ?n.2Wyt2]XD}0i3 Q`z'e^]I:4=AיvU=Ԕ袊Ʌ^a k28ӄm\M}F}0ۅp"P}su&y!3 qwlFS%ysտQ'ƪS"[\weG5<ھdsosm0cK%.kJ.X:s9?z>o)De:k={zF2 !ߊlYi2bnϹűno{_ˤh g5$X&8STet}Vn"}XJ}lu#]9ʱ{ŭB-u'rCeݎssm\笢F >PEGD )1k9/vEg8\u90^|8{-)OzˤW#^T՜v`~ٲW2'+ e?wO Cf]2w,>jfmA>slo6gyU*b*X4#2Y^36ĪKaȌ]$YZf#0+mƲ^tdө{f,{ ;'M?`vߦ`/\I+%R/QƾSUCz?+kJ\hjUyz"߬gwd#NH5z^ya8|VpAQYky[؜V@K^3Cˢ<&x..so 4v7ۅr,=e S*f19+Pud kYg^I~zq%WR=bX}!^6̫ Y픂41eM5bZESfX}l^SNQ[Q~j--N8$Sܞnu/4 Kb3{LNՉAQn7{,+`TvPS&E2e.TMiJ 4 6e_4'v kk.fS]Gcdfx6sFN3uzU,SX̒oVBVOzUgx.Bi4\9Dc$&.\ў)evȨ1%t//*A\ᘩ MMɪR:.ƨ1y(jIFޘL[fb,+=U(3e}FI ̨5w9 cgsE]L*}F ]fb޴HV $,؍3AG2dh9,%ƻ7s|w{Љ=N&Vy݄3i7!X2o;o/#pe'51h\26j[gFX_ڽE4)6'fs#\F8GesrjgA17OL#GmZz's5"&n? UTWr:SV3'}%'sѱMf6"L 7:^'b#&;1%eo+|~MIU#%ҿ\[w`B_dc`C-jVn  r]|~ͪ.uWI'y>qcU0#{KFh2rǜ@l kMcFH{!d(zi=yX9#ij_jԾM;Cq/U5h;BJj2ޛoQW 98*򳉸6}:ccl]^= P-RwْfnOXR1SѽS쫧^2¬lEyjC,SS7uC`Ma3k3giQ-%ݜ sp1sjYd:UnHVZlJfOhm=']<ެ~A B[։F%ad^)[GqCȱUd+M^(賘IVUzh1zo$(kA\):h^q8C6%%S-iE KU)XuLc{P{W:] rcWh8Q7Xgs3V+^b6F+s&2F6 6TFmS?8Q TO!Z3S߬ǎѷOz' /GFuïLg=,oL7bf9oPsEl&JK9Lq(ueͤ;gm*8Sn[ɹ8f}H4m4ԜZA*Q;aۼ ^DNLyE6qWwA݋*iA"&zW})9?6׆Lg ~[N >)1  Y(gOLx]ui(~fl|,8{z׍b5׭ݸ4n ?ip߼/8T.!fbw1lƸ?g;Ύ_2^/q߱U6p5 =Jf]o e^g{*7r=y˭SгJ^ ebXQS@x %S&l_[!9et V3꩓ W "aJUmoc/CrepDX5;APsF=W5F"09\xCe$%\ \bquO_ց#ڊ^t[7=#9ͤE4p Y_`e7L;GY{PVݎ=mc6jWȼB[;dO>v#b2վ=XOz+-?+L|?̑UZz[ #e65^?EZ{~'9qw9{g^s:AT bՁ*h*t{.T9m*)dVm2ONw !uvN:RrxEX$w29w+%G4v o&fe?YqR2 (*wzÍ<‘Y8VܰWTX ^YW)[^I*W4sI7Wut(} icY*mԃTϺdamM~Tfw񫣟ow `/t7>vlʇP`%_*b癩4>Ѓ^gLt.`xD06q]IWVs[6NBZFgFIiOWై$z忡`bQc>y6^Uey7aJl:}CNu3sbo ;{?KYqʺ{>WL&M;kѿyz'6O\ }y09(H5 p;2M@.VBbf{h,&M++~v* n)sIطk3m3ᓙX P>YџwfH>M[ƈ :~*Nګ37tV{Um-Hgqߘh6<1mat>3|d2^1@ާ:=V̱#O+ }9/ G7`AK̼!8P }Y/w2tHݟ'JmYu`q=' ne%myIͻE"T +LvinSHܒ皦bT.t%?;O\5o49Ơ$8'T7zΝsݱ{wͮ೽^X'St!4!'#:)Z>):XEx[{GTUoVn^aeeT[H0xd_lRt{pvٛiw%72o91+b8ܭʅ9[Zwɞy@ٷ]69 ىӦRܧ"ߓ $mVdZZs!ls2X $+-|7ob.Rܳi" 67PV(g}y-ҏNJzBe-({-O(ODt&]iޠL+Wβ2JD/=4ϨS 'Zf& wyspVkY*cy<-Lc! k#φɅ{;R);Y]bVˤbz~=17|ҷeŽn f6>:',=Ŋ`j^&[hi+&g2;=,{{ַ ]/5@*;cn)sILL9n˪Iy2]*ą8ϕdnI6O9qʟ')2)q0hL[&K'>[^2}N~WEU2v[Q{|07}W.*i*L^x#W\=EZZѤA@5kf{p׿}wCa 1{Yݹvxo9*{Kb)uciDo'?N<^Ab[DĬz3uȺj m ad$ݽWhC52k~ȃ[f]56!gESYʙe:'vyqVZOX%!̳$E&%iv{˾_U15̳>xZLajpO{&bD1s˞k*󻉠 6MTԳykIžOC'+NGUǨ%sӰ{^{V[SOE[$K"H2!š4Sm:Ff2K-uml f4#SVoh# bڼ;VO+TfUUYes,цMhyE^m"\IQ"iQWEw]cIRGzmcY=1{yx;C+2Y6׽_COfѫxyYa=8HϪ<+}<0>zyUeJN]T8ޮub'.dCC+&S%<=_SU\{dzQY b6һI0V#q6xd?rcab$ĝcH}gB)HL>ƝAEtLm/˒WToj"_uOW,[B>YJUˊ*~ -p> w䮖M u;}yr Ld fM p\ajf86zN][g|L瘡ezm~c~V8++|:=a<>.2W5iҸ27YpggsẸm9ڈ.&ѝVN۞P^{Dzc\q_ 2 ~Dia%)F1Ϸ7<t8@5v[: SnZچ0Gr@`.>s̒t0;g2|ʍΗ;eπ&*jP*ίi l6/Oɳk鸡_]6n2ūĻUM H[O>4rGބ>QT=~{ГXxޘj7&57ք[6D` $NDd2nx6UVm|쫰ߞRz6vfʕbg}sl!ev Mn1حU#exs Zf].%,#XwX"N}Nm)0>K eV =y׆NGpQqn rU:sqѡhJ_2&Fg^M!Mw9+L}DZqϊqV>?slTwm:5{vG/<ẹ߃2}9R@ܣwrna9k_]bmIsDe:}Nz}uPz.p~Ba(}Qٜ4ﷵcv w@_[>i_lа^=+) N/歧DܖƔ`]gqSdq23ɫ#;=l.8TMn~9nObS2{„Tm og)+D_n2F)/s~A+ؒ.~NA}w泹&~g\an_Ӳ_$wOzJ.'p#-:m3%@:؞T`>,_1450%Fu9TS+tIWU-U/J{;V[OmsF_'q]Vx2ćggOu:%mU3qwKnfv+Ӣd@o|gV˳N4xn7*y*S\O(zn6jfV+Y9o:qb9xU˅ * eo{-GpF7rcک;a/E䉕L?gOu:SH y'21#1_g3b|QIF%>!l:)f T{aJ<g=>Ef= 0U-$&Pa5;_nNf R'^#Bl Y2woG_\!'>O %wL-Ć5SdDa)YJ]؞,ndҾ^33' c]*oN\L#"DlM M.yW7V`<ѪhF׈~w"I~cno1ﱪo׭M*P4IG;kaq>N*8!sD'Zc5CS5Lwܱ?ik#feӎ"f M ߈M4 ]=60D+p^1|}¥7ʯ Ⱦy7t *js ~ΚiOYZjlc}I}R 59`YzW~ˆk֟KnL JE``P{k.7U?sgOCgOt&si֑ޗo Ȥ{m$J&R;iƽB}^M'+ZAj=C+J]d}=svuʄ=r/eɬ m-pjV;d!>1:qwBU;nb Dd=?lKܯi{ϻ+uךT&&PmvB|y*!SCY#DUi}HVg<{ͺʮV+WX/%_mDS~ٮnܻt=]Z3oxΌ̄r宰kHLJY~g}ɮqyƄ7dO;*[VјXy~fs?gqk83*~:.Ή S}.Ճ7Nخ~ږ &COVwXsBCVUWX,\ۘWU&V<);j'QT^NuP{k 3˹=FϮ*ȟab5sp 4S|24v=v##-.g{ gFEM}2͔iY(7f;rO͑Bĩ@' m~Vzԫ9=b.衊{b4&V 氎ҕnĞuBDdAwpe[ujZi}1h/܉ޞ%3Cǫr ]O8,{PGx3Um)n ٭dM)Z-c ^vj2qf:?lYENleU(;"dXV2/ϧtź]U$$-.?#i%9@G#^ϑVmVuU\N7Q}o5WὌ}厅ގ1YG=ڝYP T)ⴄfnpQfw]և49ʗ(z7̈́L`eŠ &QrxU꺬IA|cOCuqi؝Fd!>-,Qz&ZEe$ќu{p׶{,75O`@գ'0̹Ip dAd8UA5V| z2üG98ee*f ׶!J;{Z:uܰ=hӦ&eU\StHؽ|Uq,пNPU\L)~r9dEΧ;i!I7КdOcrߧ2<.wh!,M&K]fw p!m0%/Dm.zݬhx?ٛY=S^;fږw?oN9xCng}P ȔCTBk)fF(;F)xV˳#~z ˬgҡ`b2#w\ȳb]PQc*kU@-:s9T_ t=XiDe'6[+މ17rCJeZ]Q1R9+˱2q?p{4B\ w&GRtMU]UT¤ZBňWq\<@uv*j̯Nw_PݦuvvFv`Ls[a`g43ɞ]V]7v'7=Nr_oY&˘{aw:}>f#դo?2+{*y-s)ƲgkDb4fn4ƕqX"_`˳oXkC|cf'+cr mnQ/sBW^N DOЊftm\ b2Lez(L0N/Ϝ1¬z㪊s6)? ֆBCLW IxM3cTzus8?v ;Q :` Yp Wܒzw)R*h t"txDe.+WXOg?9ūݘdf"Yk'^|.ЫL݋zUM֯7U6GPf&|pѶTa7A G9ì:H,|:# ME { r+f63gܡmcsR#tx7e׋3m/Sl"5¢aLʽt]ώ0.xqVfE z-{xIg>ZWʤTZ `8dϳ~=)+cRBTqq꘲ +J|>~O߿3ͼ֝ tE%)MQ%긡CeQb߰N<$ݸ*^MU74M\zgWw=kt'~pOfϻS!&CoZ,֋NCvj+!6LYЈDL&cxhM[2”JW g}2Ð>!;G9![ >e)>f -v3!1`յͶ*9nʬ@rU?4^-p陑 L,-f}t.~Sn4/]WnSk;nxnJ 1&&U0Nw*:Wqu1n!Hs*hw)Yeܵ=_4؍>Oxvj*޳"aOjYՈ:g9m*~B!\1B*gV1%ioL(0 **bFͻdWwKide]wg"2.T6088*~9?,4$;mݰ9a2Gۺbazm=kqZO*^b29Ǹ PxCG<9^^q[iLsQCNȮ(SU/N+xe/>nfs:^%=#4>=Vјl,ډI\QUzsA~s6Cذzz@L`dv\ܐՊ?|bώO?ph1sgwC2| Ô\͟?|'Zd~ֺW'9;9ǒNBI]u*qv6eJ,>:USf]<1k ?yNj{KĊhT*OWM"ԆBw7QdH4ODE:6Sah@9QxJju>mùfdڲ{/?{LoP[LXHĺӦiBLBEҨ ]D͑-ϬK6{M, {ҩ㨀0/Y QדLݟĪֳs3u54>}"\Čt PN2w;3@e Ufh7pv w}7̻r˧)D3) /?NKnZ_wYvXk75:ҡЉIJm}fŦ3g}Խ8a?tG<7kdSPYs/!Ppͮ9goiq?jVLD LCz/wnd9?EW2NZj ݡ* "7Y1jrBE\yȼ+f2]P~o=.St<l60@6V%~&ۃZ]dg%y̛Ew {6fƿ+|n2mCБ»[F=%~4ciL+1r'yhN/F&3lYw־'!3$NdG^7%w2NP)6*n;ȝ;;Zj{#e5kv{qLa2Lg S Ms O:}c4ĞcelH( ^]aϿPXu4>SkHVy78nE\V殘*ksθ4p{ʿ^6NۃXǾkJ1Zj\CQ>81*\1OԳ ׸y3Y~m|ø*fϯ:=tVZb[񶢫_YgQ~O)xs όWx㹞ei6 %3;))S2G)]"ޏ~s\ݙngFEALJ}Ϋ^J6n}SfصhwA@e LI;e:.DN;5yZ0{ ˩/jϬ.f^8nv[YYp4ߣjMح!es.97{oP%0>5?Cvr ݛtߐG1ghXTySy3DnǪ忟Ϭ {mvZ24'գ:w`lΥsS14Zyu_sLx$s#Q7v(OLӣ!iGoM?YXYNd':D~qS q%Qe%Jl7TxjeBVEzw]zJO)tVA~CWe~F摅sZhGם}` o8 #^;M։W6C\yޏN=F;gǬdYP}'J;{~v,Ɵ|w1ATBIB{T ؝9f;-h؎:ʉ9L=zÂ<fcTS|ZƬZL&´ӛkV8?vL`}iFx$Bz-l]L&*]<췵v˳Zl1#Äųt742O8"|쳳םl69 7{5{{qz"j,Sa .F=ׇ{n,7[9u{wo𣺺40F7彫0v̅Z0 ^h aqwqK G{vR"6d. XbYW15dYoE4Ƒd_&%wff0m#Joyٰ+jX?;\- ʟ-7e,V˨3GJ.{tOY]cn_v|,\+2V m\UD?tn˸jё~dϡ,s;6f3Qӥ\nt@3דv\Rjzc3q4C;^?Uϻ?>G7yX5;pT+vcm̕bL|铴E9Ϥw4uZU{6Dg;V!CU~BQ̺Py:2ӏ tUP'9L}Demn]E78Ϩ<__ĥ(e7pƈSca`'#8c\12i5 N-j88 rXE M]rZzyJg|dD=:"٭d',rKE$Tj|UU7<8[ugU>f\1q /^8dLPjl{ZrUk^Rҋmv[n#sl#'=E$!n#|"J~om<ُoc_ mv_ݭJ3˶js؈#b=,*; 6n#f=_)ig͚ j:?S A۪nuxĻp &&a dP&kςb6L 0ڹ=/q׉OrD]5הYp%39Оwq U]Cʷ8IJEȷ  ϙ 0nBX/vLӅJR `vjo:1}샲s8VUڶ(R/UeuH V*׋M|6qr}m`ogugNGKA~+zjU,;'v#1uN! Tx\@qDZm侑j֯CBf[Yͤg5.~هbNЊSf+z\ys~ӷXV79VtGsu*ll jNf<u:؆3V@Z #Au+ZoS8[iP+}(6(2G^sk3YyUaDlvSP*׫ %*fmHjyVٓl]54enHnyg>f{(YϔKl {8b$Dž N7YÁH3FRNxˎ2 fJ@Bc'+ß#a: r$E9ƍlWȁޯ[_,Ⱦz7 ͉diS$w>)Qw̢[Ÿ|p;dM%oRlOGy~34*=dOė6+1>h U7 Ŧy4Ζ:tUgOa=64)wbw.Ff+z4z?}]pLa/,{]ۺNJ۸vL&{Ԩס.TI+zPE/t${m"(jsgr`3jduC|CZAh|?Ac2' 21ݶ*p!'tWɳDb[)w9RUbXعF=H]˹+9BMYr(jAqwEvv*gV+9?qN) !U=xlP*ѦaγԖPW>u&%CrLp,gĪӄM8+!̌GiMi~pZȞ Y~A6Ubn7J& 7ٗHE@UkfjӛI%*{ewVykE_ 2VϺfl_ Z0My딗e(ıdNu735DYOzUMgXGf܊k'g`WH1.TNt{'YtXIT֞9&q'@M/7wYMyY̊1V?rIGgNmEus^pWaYt糚XKLk\KGnΉY<1ep `1=IQONǵFp̒eV N9 :^A8&_nd9>{8m&5ʁm˻DRC%ҍj^_kZ*$8vf1SNw;\O9MB"&jf$z/é_yӓhׯ8)Jdׁ:>t[ޤ+e݈Gm}$1{IxYv{_²&r bOВ}IoVӔLZq+ww0+s#LPOerI>n,dL9PaNx;U>_;y ??ӨWGmoq _ʼ+iJ\dՃ!Utw:޲ƺSǭ#3w;FBv 7>YἯq :Vg_?}? 8?}_z531? fq*zjU]z=;%Ԩ:ז BsYu }7zJ-Sh ͦHL%D{57/}W&;(cA~CSOojQ/#g*:3ʎGVzٵJaSJ꽊`-/7OV.tIZj|eԻ-"oΧYlE\u^UoPHQG-ꪘpC;cI~V7?jȋ;iTړ~A!>?QZw_C9 ^gԭ yɏ*UNª8!ҹ' 腹 dg&ꦣ uicX':QYǼ'3똡jU~]U^{fݨYM`6*ëwk?mNʳ~13DqU&dup5{?<"Gzq 2Ne˪Lu1ϴVy]U4yt];ZGzmb'2Yow?vuw_kiukyA H>_m4{{fKy*>S1s봍9Fxhf-O)fu뽊RSwPw1wHLĝd빜LtkrxeoLx9o`e.ZwűԜUxcpFbyѱ870qEвo*}ieν=t͸(]l[#`nOdR~`N7s1iXK8nxU욏X}}WxUh gsfojC7,>L} Df1YS}Ϙ*}Ӭu7߯&>BKw9:~xV@> t3\ L(}g^]6Lb z6nxK  :؈>)x*zحt^o=OfSFf~&,Lnnon\2g*c={a6Y43ɐEc=3Wq7u'T݃DeR/ʑ?J~©g_ 2GN#I.taW:jatfovtYGhag2N;'Pks_9}pE /={I1%GɊ|dT 6~v=Vy [57@?+E\QD~w+ 2>S(TpHʊg]RY6 2G_ȡ| <&vW{?X</zJH;Ld:LZrL*'J]O:Dn&AO dN=r bPiuɶMVIue ¦Vְ ais}os(9(|*)jdgvxmA lVXhBCQ6:]l 9 1`G=aU8aW /`N Wľ[O In#4r"S9\ywqc7(~_-i d$Fƛu3i|7b#&;~g.1E)*-nR7?qZ54FghP۔$V:~ jR` %QE[otO%O%'<=v8֛;{G!;1 nkl$ꐟJ7סձc=Ǹ90ޣ׶Ze}] WBZa4g٥.vVhjWUočCTSktXmЊ~؈>0*T8AJ]r5+jƒMQ[1 ߨ(n U{Yr_1ʾm,WG(gv8ӝοiDWyBa_%$;cRr/9Q3'p}$}vLL%{kyfO#ߒҙRL yV>qt< =zĘOYn+ivG%~˫35ƅL){ͽo G&)YkcMEY=mVHjn-֕w̟\4F\͌u콟MObYUz;T17fپsi=هUŊ{=N("_ q`a$.~{⍜8OY ,j3?E7&*M"2Aj3?x[6fQz{?,4(u`ȄB ;^lYLۘժmuI>79fy>tQ2s3\&Qcj{գ>{ػzPkd_+p҉]i L, s*}wm9V}n> v-Æ{?^(mf_+WύfE7Un覌Ep&0LWh|F5 {VqT*^u~!cUVfhAwb!exEuW/vYnuߔFsSz3m=(wRZW򿳿$< z/7!k='`nN:9aݐ#y#Luձ0$ Ԟ{ob&s*T(bG?OŎp0g0}mɭ":2_[q`"T.ɰ5VZkH`,-^aZW/8IΙrNJx}pb?Uz` yGǥdW~m2iv XHs._*iOi ]{uJqbގݴr`CkF]TԼ'Rڠa_ƚv֬{FluZAΚ5-tY#DXU?No};!`z2N]6cq;Z& Ar8j)*v7Ye}ݢQ3y2M+̮DvX{U']8}J=3?vU$ YetFiUn&F&3Xo[9u `m<{&Z7^ -d`!̕^;v r[ySdl6'2e3ȔC]+}mgw*ҟM)r 70zW=#-.w;wܚ~K)FrgHy?Qr:)}j rwvjn^V;za~VB?=mtwT*pJy_?nFtSo"P[jAeX{_'d_!s7 ]w"o<:Qb]] eujf*Bgv݌܉)v;=a'dfHo穫 $]O5X=XMhs}]l:m-o? )TJ~зnw Ei&B~xef}~X?U/WsI:*~; %UVbo<@ ]LvyTWpUbTJ0z\7">DE2Ntr;Jc='`$>76<4oiK8++f2-9! ľ8HmOC Kw&PzN8jeU0Z1OwaU{*#<@NwhI{/g~ܕϭ.Z@b c5ZrN g n :d5{sV@D&P+Vѻ.1O7UZ|&Գ\svZuky(;'CbYE2sTUV|] 녗 \_X~B ޘfܿzp?|CbwQ|V3wҹ׶goΘ|p5DUZILJ{ec +H[(aQ֛ZNz¬ `Z\d KDIDڿrQvev+n=Pw"g0;q?ߨqb̨M wWu>{jNYtYRffK.':-^[w3[ZOlP>8S=OڼvADekiTf_r*V5Z6D:p8g9Q\ ؜ǺTC=h.2%s+<7u6s.7+OSvvlv+$-.-ML ْnE3`h:Vޠbό {ޅyc" vٔy"d2.܌:[D=RN*EB;nȅXWʺqQtӉhL_R&<՛7SO71a$͹uzw kMMώ2! ]H)ʕE`R3W1b-sbYLrL"{R7t.hݍ8ҟV81ɆӨ lqꬾ;"q ]nDgJY1c3 Ǧ54nj$VO@VCNJw)ѹUX+Mߩ'nĒP*Kex#׳DupO)'^77us{>TiS#4]SOT @n.w/uAŕLu/)awRL"gҘiݹ!w(s*—XƔlU]> i4^*aҼ`f-hѳGM㿤T=UjjPt pODžLru vwqi(ޛ78 _ԚJ{t|r3ٝro_C oa7 nZ,_ fjuSPRni h>݉{)6=\W~IuwIQ?c%mҨs=W "HLgrv|kqݿ v QeRe/YkV޳V~}5&TU*P[} U&@249jv!N>NvOX_[^C e]!U,_d^',zgszPaԤ ﱊRo{)5+zB[uVށ#JGKV͸#{EOyekCJ&'4L;-qI'j7,O/,{]tkCbܟVT *'gUyٿB \օ:OfڏYwk~BcܑFĐ5/7}o=yḒgdELS|abg2+a.;.P6e7)]meo͚cgG1n42]Vˉ OJ%-,k]16TڅZrx}De ݨKNE>%n9PU/0kf3m>是)s͊֞4wU5ǜ>OlNPv,׊\m˜J |3=׃!5uTk҇~2 04 &$31'g%*R˳sg &J*f4n flGޕˤ[&٘j=F9ϱTizBfo=;Yj*)3ά .wYrYd+&1O/`,UyCEMJOgkv3vف*iJnL={ňS5euSƍ7m 8o>@CV3~uO"_]oB]^NXw܉ʊwl&9Xd8M ?q{IvALAxJb 8]d=&"[\pZ8=9ׅ*'ٲ/qQ=(sOYiTGw.qfn̼fRY+h9gW8f:nnzf3 i?Qf|&!o:-imR}8r_#yM.za7KPFQ2gևm"2i~uKH:uȔCTg2n8aoj'hE>{؟@Cm^uMvFnd{q*˿H&(;[ۘC5?kS{#p^ ~]݋;p.~|C05rS6B gsf~bO=rw3R[ _68Lg0mO;%l3-s*t|"s,siU5Hz:.Eg9Mq3bQc%Y8 e!CFX9~wV~"߰{jUᘏtթSO$M(NLxꧧtZ|gL4;!SYZ.b0~voP\;=vpXqf3f pp♑ OetYn=S5Ϊ033+(rNn=.!*HWQ14S%4Af>+YɎoM<&aW'Ϝ&ZucX߹>JujB y`p0w.)!AU# w>d4wYO(PBs.Shc3N+bP3cj#W|`k.uv8 BVro&tb*3S1~vW~PM|}q9C:-wWbL3͑QȎkmDrfYloݚrO"jQ-1XVQ>긼Reج:sg`|'\sL[i];vo:?Lû~!MV .*8!.~'vY)o꽊zzWܠA|5s}} Mu`\Cvj\*M"ˤs5F~ȱʎTzwW3<M֏1M8g9u0yB@-ILS|TCD*WݠJSw3|XQjb]ƀˠVb-/>kNTO&iF®qve蹕 ޖe%s*{l k"7&荒;ITDty~-j?(H %=Uo,dԆpVf*3=68W7ǔf" 21Tf :fq#jځw)7[$0o&&Q7Oa@U'Uw:neÍ7rR/fиz3ZYm>*?ZuI[~V+ VwlWaݦYۼ[ʮz|>!~b.Y.q#ubܺgb\Ȅ'hzrٍ'+er. .)Wz`^(@1kYh1;UlW~BG}b ) ,έˠUZ^_Wc'bv0 &;w&h:K }7xNB&0L*u:hO€H> -ʻ}gsNNY q$2iHa׵ b/`wwQF$rG4n̪dp3|SrcWND"RN0kY).{C|E'z Hq̭Y^+c@}}fFZU~[a~W {ʚv֬YyLg-m̌wI]N'r#ujT\ JQf%ˬ;-n*o74v3[5Xχ{f.+qq&CŃy6G i?pz'踁(F>Zs)-{(ϱgf.1рcR+Iq=r-Ŋ\y*P7~r1ee)|Odv?udm=M@{ۈcY*La)9R\29) J*߱c+3ge*ЩzX߿1̬VƸ[etj#c~Zs^]0Dd$f*VT+MMDwd2vp<uP1cfB<3颢!'ڞ95a5󉒂p\ǹB_pK>B~d2:U\dۜcYOs{8vECUV$`=`{ |v{hN4SEl,d*PμKJŕ/t^f3˼gK,;hg 7]m|N8=a~TP^=fBpO=gO;D>lLO(e2 &f uoB93-^tsrqNvdY!*9hC!>8}/'F{W]:RsXJoNN 9&9;$xcgWUo#PFaNЊkRS0ҷu7کb1Nꨡ 9Ǖ(mvvahCHn Wm>ʎ↟3Š䟤Ey'bi֨= d.RrֳsnTkt`)`ejEȽ#nlU՟0F-ȼs5o(zJ[ѡ;ɳϞ}s7=Aq'TNt*zD[A2=v' iaYoIf/Xs  ѡZʂXVv/*ۗiSvZExt#ZV|wO;l[羌ͻ<]7z9?sd^#˾ԏ#n\YlݒWTbLkRdBT9 qӄu6:IJqzw6u/eWU^ttňSq>wwO*zϔ=;7!*BrvU_+'`3ƴd X0Cx*ˍ(?81-BE>M2j8ʄ T Ru]y~b#2B^{DLf"}7k3?4q6#mf8;# 3ͅy[TM#DOw7w$l{3QŮMedpZfm;RF]8֘YDRl~"nhsD,!TOCw^m=g=7aXedBZy>kvs99L@D{&ce_z_w)>80^u:i/ P`m s^cYN߶*@*lrb3󷤒5XH*qY%iJ {dwuuŽqL\9f c9# x'L4etʶ;8EKg]?ohx?w9ޕ9Z1D]wOf3W^]蓿}ŝP@ɎR>FT=6>b>amU vd.n<*"D~ A%`%'Ln`ŜvO;nTK, &s`{2Nqs#NL jE3&i,Zf@9'+szN;V9 Ǚ=L`*bWB% NUf7VC|52R-cYe􍲯mG Mu*E^nжVW,t=l|YW~nceݦP˜W\  1L&=j+2· "S'3ƞ6?d8jKWv]SbT1U)ώc{rGoILEU iToU5鵱K]XPǼbf֒p ~)L (ۼA !oXLOs_F(êh- V-w)})ʴ\E\6c/OfoGn ,Q[Ѵ>vk+|ĵT@'[M"}D$ͯowoNpM6xfTTܤ:J ev[->7QSIuf:ENR#+v׸ͪ;q 4uHEkU)YXe:LdjY ֲO141ꔸ\'fvR (BbNH;.t Mfk{Nx??uofE<1ce?Tc]cyab6'bL00sR9@q< ~: Ao]u U_-e=;knkRw EٕȮ(=";M r`6_w@`=-ewWnfv+GmW؆Ѻ~F+]DM ,bi5ѩbdCZ2uJ&zlzVt>^՚ۮdz E`H灸pۼV ~" 21]7D1%*s&E f/ƕu[LTz+N#VhM#W8A c&c>0-`̄"3P}xJAŌ03W:ZS)D]?V2=(2r~K e(;-ӯ˝=ojaJ+P]JSNYԯ8p3%q&.{Uw:{>26;zV+ٯ:4N2*Dȁ*iJ3͝ס 7ranJ7b sEBOfrjcfh9s{+ 1o\Nxgܨ4=ɌX!5 ]ϡF/_*tރ\07a͑Coދ{riZF Up"ôiZBaPBnޫzlԼK'f+.Tqd녳|+¦Г3 >9W=.ߌ<|9U7} r,&LVwp*ҫL&nX C9 jN"i[پzT.6>칃Ha>avL뎔 ! tՄ 5,vvY&zNFv[\L~+ q'rŮ ^O<_հ><(>\ 3"J~‡DCULl]1:[WavˑMBvP5zĽ*UU'5VFQist{®X.JR֍A9yaV>zP7cjE^E"Xiicݥ!_{u۔$f}BY?]+s\2oWZ,r6e7.]^,9rQR*̵X[̌ w9ߞzd'\ b L13'zRʺZT23*өɳ`hEuو>p5i_Ξ]j|YWSE%_su7|%ʤ_~V\ ;^g/YA('>=q#*nD^rYii5FkiYlvAKU'ߋIzs\+}vWDxfT? q?5ӾAgXV\}W缨&:R5fn1PճZK)F䌲qjV/mݐSvg&p4?L ɞFu0~<fI."h өA1jf|34bV }[oaY3в f^3X:i{g.s4;7DJɝf]mu#q$ |_Pi^EYz3+mDZDMݦ&(;"Ruk>rO|Rh_(dǀ5AMSϱ*lgjς`SG5's{l3z?ꠛv{=x;87WI5I[Iq;]g}6"m7&X,XW֤fD*f:^ṙ#oI῔~hNKdcҨ:35>n0>lP*>iK]_uBuL$3E):0vZE:$q~=bNfJ3dE@sQ [9 ATYZr^d0NcEGp\ vPpZ=YYmt7ŪC~K[}jSuwFfIK+d++]FN'ޕB{PFKpK RXH5*ĦǻJ/wб;GVؽCc7;Lbw*Lsvyȥ]1Jfo:p[Fmg ' {`XUD7xdžw*LWǙJ/(!kfgs]u{wq{9s49UwW :*MsoTg~B(?L0z±{W|p Y& 5J#;V;D!/ՄOE4[kQW xt9 > ͝ֈ2q#@fo8RgN кV=Ζ_*IyTE{GvЬ}~mQ1WY+ :޼ bO܋jr o p>Fu59 .f~o-ɼTݝL C}ץhK)1 oA|T5? NU;]pfՔy穕&'+7f5>m6{X3ݚ4?`ڬDz=_UXO8QlRUja3㻯M Uø2+k_gӂNf2j` CUլdRse>!w Ɯ5dv:LXCni$V1_y2I6&:eB ;3IpMU)7:3I.yrߨj?c0{|/F$Z=کoXg>L~JdȟFf08#{E=pVҙ/|0q%a]'ӓ8H9XImن2L"/T {=i-^kRYo7MB 7gYA1TFNug{o@oI-^ؘUWLBo`[ xf{=3J9̩+m&m |t{.NLW^gu[DZɞmwB>QeZgxmktC&h[wh]Xl+N9R9GOtߩ&Gƺ[} Әyı,2anŸW1GC3n+f! 16}`n!JVfaԣ~ d^7ΡVy4ϫM5Ӵ4%+J}R+p@ҙ%.@#~׃"m]Dfj.ZaN*O{z-u9[.g^* &Fwy/*J 8I4}W1jUBG'ރnAJ+֟*}GxVV]=]_4Oh8= ;faN6CS9Ih*T˂\BItkC^Y׾3 f;S4dy#c]iS6uf6 ɄHzN!svs?g׌jp 3'2Zq4r31C Gtf-td:UVӺ͙!R'8Lҍq^qQ@U esҦINS< 0N7 ן6npVVLwg~,X.Ȇ: q`{8c o(39i)21ķd~%jο}CnvoE NBi6j?qTJbۧk.=:'OV+371ŠZip%}H'z(RfLj8KEnlq!sqkƆmr<&Z/L1'UWכ{Q>wvqDZrH3ȸ#l\A |w>T;Bd6Ҷ5ZAňuDe:-*<]; 4˪c6ȡZ= *?FYNcY }̡]2vS83*JпEnw \ < [^]Ÿ+ M™Gu~7iƣ*'W1LFB}7/şT ΂&_&Ph9(A)RYeg}Whid٨;AIKv;gc n>Yg xI338# N(O>[ % E )4ʜ@ZDN}/xgU#nU' S?z4F-U6m>9"H2!h|pww2Ȝ!vٵ[TTv6sdZ HVۛ©_Qj٪om3̕ dn=lᜫ(ItQT6m1]b*uYc's79;&yOyE m.NS2::+NFI9I.g۪L&S g7Ǹcv+3h"rc´F%\M0b'7qP2B,KQ^OT[iպ<}DX ^;wUݨSrǕp<r\b}3j6MӺCUB>)8ɁFu;7Z1=3fXVun=~ uAxSt$;S ;g_cĩO7پ.V]53'sd@OpW~C&Tu\]ό[\SONsj I}&C j|ĩɝ|aFXX7XD y4ك'+7~P&ym3BD0a3=O~B|ۨ*Wu.-OߡG֔ fÍڝ]rORmC?; cуGPZ#+SZL c^%!&q \1QLw2Փ޵#z/;$ŏTj̿O=SJ,~#wxYaZSի8{gv_ʭ).>jrW/ny9 G Sa~?#2(?1R7QcLyTE |Dbҡu1]j `Ң݃evw<+N?#]1UXJ=w波$N#! LµJLA/W;xb|kܐ*;FɊe[Sʵ;e+2ȟ9g}~Rlj&!]2;|SCĩ߹(Qc=ߟVZ?0s#{L]ɐ{Lm>nd 2S1iTɱwz8)A+rcbHq*rMɷo|,|?D/[1E%y`?r3֪\=K(9))aoϨ>?hŹ圽eOP̞c9w=k&YaJgp+6'Y"HY t{c;?Áls9l YY= հO M;k\)n^Z+RavΉIxD{mm[ܰ,sۥE,]|jiO8'Is2L'%;%LO\l[y>bu؎:6H1k6⦀S"<'}շ },O%ZMکOJq 8=\[T]F/d}. J(3 `j1Q@;3 \`Eu\7էeMJd1Spb\<:ANs A #&IJZ- X.53wOXFx3I&: 'IA~bYE0a:]b^}T9Yxg>3'tvl,:1;&ꂙ aU7*ڒuHGm?V6y׆\PiHTv+y]+2摒øv׳8]ϸ5[5x35T1vxwH…;Eae7SX6l~ߙ%1{7|"f~)ܡf?1'v:r━.2rx#kԖ*mYsS~cݵōc7'y3qO_OzV{7L='Lؐ3DYpW}Ybq;NŜp6Wd8;}Nъ]`۪^ !7X;1eE|sؽI1IɾVb"qtYgi9;U,?7 tEaEDMb.4P%UO<5XHbʔfSݞ;,Pyɲ.{}Ыzh&Ylo0W{٫;9AKWuRQuR)!B HI7)^^5hrEG!'F3>u&C xal!0LMDl b^ÞzΓuJWmJS[6"8֍f&wx>1n9fwacHylo8ŞDz0WS$븱# 1.TD$z 4-e.)VBo;?.psLrg5k1t:3nŽܧ3ʑoh=fV|Ϧhܽ;JV+~rKʲ3yvWg)dz7 ZtyU'$|0s3T 3+sXjr;U]Kݱz6Q=awl7iE|vߙ2sQk}Vru,{.sdO#^{Ap^dߝ`?pU\ÃfڰY?yuB́Jic5jА\֏|.|Hᜫs'5v<`A,9y" lV;a!gWS4ȍ?(]+:i]nKHߐ01- f.^+iJ]un8OY\llOحDfTQ|42b.`"[{vx2)(ؙf's8*i$u~$O.\SɆL.3ݭjA΢= g9 ;axVUEN:UC.O#G>a|p!c;q= c-R+Rty?9LRVQ/~4q{JY|,,Ǖ7Xoz#_2_mzGӥί苐V X":bnuMnfߟƎZEa"QYcYJg+WAwY§ ܣKn_vbչ+ ;فdwH+Doo _=Uw?dyuyGW(ĐU {h{16 YyCWS]顸*^WSψ}L zBɦ3=ESwpQ"Ov];2GYVѸX̺2c?5#׹91Vܿς=Lb_pܳUͪOsJfao3ҿXϺw+ͪiȇݘ춰?hU+N U;ewQ_p<V]j^z?Sĝ9r wB^]maȞ >sޫcېٔr2g3&c.dgAʄ7j .T$)V$~Y jwX KWUqg]a;]› 뵡ޛ[k#skc gG=ƕrajA'uҕ ^m6gwTPw2G(-Yϗ1׬(.7XZyhզn`vgG'ޗAɼ#句UvRdϽd;QiQ)yFvh7aVr͊XP}^|ǸҸۊ p,*}S/z@Nd8Lӵ նSw<S^=tH<=!.Tt_X|MȞ,U /wUm@#9&_[R6? u/AP{-ۺaU8ɬKdZ>n~pX?q74[p2{Bx#!6==⻨X+-Ra~"L᧻l(tU0I:#a3stiMoNUT *n{VZ,9KI*"GqU7}Mi{h]5"_+Zovل<8+HaV)W@ɰw<{̌Ff0XS=j/dXQ8ȤsTFPɡ^F>7g7wAN t-3!XCK .{G}"ndw~OzG TBXn'.@b1{=:u!IΚ5Sɝ@ǵa^'+mj=+M `ws(LѰ %û}gK,G?Fĕd{ٙrvw]b?S@<;h<3*r]!ah;k\*=gʿq/#^XX1՗ˎsZFZ, }(#$7jl"< *d!F&}bw+!ggsϻ"NţK6/if d Ȧ 1=eS;IOd&3/: r=Ddjx3^RkXb3+3FMhTtwevK6`#[z.+sW1~:7}}oV '4Naô,BAd]&b\yjىy ]:#vܤy`=ǝP_ޠ(l9{Sdezƍ2O!Op9%vSMuWd~5V97eY:1~Q)BlPF0i;m >7(670W(1K;* Ώ+Οvߘ1h77ӭeϔ^;bW'3/5SE?dUOWxcn}ǸxG3f{.sI; 8UIi@!Gwſs#tПDʤQ|Y@[ OG'RkW1C{5Nl]aL ?Yuy 7} O>iA9=q!'+k\$vO0,d9NO%r"𭼏}e߽c[j DOe:*ƕX4qsoT԰ߨcwk& r\M>\9(ܫxbڐf35FZuzf扺K QTE_[A}4zJ1bJٲLAd!4Zt/`8pq%j''N^9Xj|,(}ń1  b~we2:rnG܀LyY}rOdUixk~grֳs ;W=&E_҉r;&[nY̊1I)esɊ9TzK[f<=/))q eֳ[o<׈ G}XqZ["_{a:S6D.d.&}l7V]c5A sȾn3`ҩ)"R>ܩM)#n׃pg5侅:jy3Eı~s5}voʩ:ؽ-}$U JfL]{%Ԑ2%ČTroΠe}N < An?{]? ik3uᩌê7y$bwuBԧ}bPQis䥑?{:՞YoW;E.FC5:~|O=eٶ.GRQzqVOVx> {+~F qZ:w=:U|(>xce*MAU;=T4mpYqG\#;'ލ_'N/iCO-vcY I-\Ui5ِT$' Dʤ{/h_2_Gj:v%mB4= ވ'nfv+Yoy p߿v 齰ueyareu0iY/cۃ炛 3{Sd6a(SHykHm1( |ی+A4#3+ &5uOx7_kNH,}N_N%bֆ L0y_Z,=)-3;c-we3=ͱ/`g*>SL)cêG|eHeI|,>cW ?sߠ`F݀*̘UR7q#tUO zF!uΓx7nFr#{=lE{;fsVYvʹVS2f|:(^Oz+Xgzc{hS'<-,򬆳+sF6Gıb:Fq7+F+ 䐖ll)ή*`k#DLdb wNn^nX/4y늮;9,G,sc@vL\`^!3!`VeR Ub2^k lzؽ""w/4u/P{73jV5τSb8vJ=MIENއF|vwԚ*61wBz>63TBZ˔lHiԯz?.lU!fri|Ua'dg;=;aOVLq;n}]NF4&oocw]Ui3*s>uMK=!dl|&w2&hڼN2@VR9ӡbPTc̝)v:eWEɰ-FtՅs.ɜ+'rOeS©_^|{d[43ǒ9Qt0ٸ.h.%/:g[Xy&`g2YG=pTᯓ b\: 1sEcYGkܑ1wYgOħo w *<NJN]gLሓ7BMsT+^"2D=tj M4pUim>lVo0 &Zbq#(xOe -9IB3<1ީa!D5儙LjEw`1TK&ĝ90JpV=i59!\4ꞙ37hP\>UGmU( gƍ\"ʔ]&/Qhd]&js~iƺ5շ ߩ짧JLr|ϜwA_\ 3{wqcSjrXv3FQw6I53'"7u! Yn {"i[IAZ *{PBX>hd ]l[pVֻ*kUE$)>XuƍV;FZ=W)l΄9?ǬWrc~\> *Du;ŊvTNvO;컎$ZNJZXֺoiwX7֞*iQ%̓ ;{~ 4zIu:R9Juwȁ*gﴡ<H+P QgџXuKAxKmu<ֻLzvcyqs~.vvȲN1'n~>3WKKsሓIOߜKUy^+"릝t:nBVuy!nPp[:k{'lF <3O ;e~W5~Ddىew#׵TG}dLd[a81)K!?Ν|G$E:-򳗜GՖgOwEubdxuG:F^wBB}47;U}n^gjodPi˴V `{b}]vۃ*n^M7HEAɌI̡w+ - Utwʷm@V+a"O#UMf,K;\-~lO\nd޻_ʏs&c9V܌ςzPTQoFLvsKRD^r_˜W<͆r@ ;]  4}cӴS󜿧r9V|?s{>fQF4%n&s11 nNxJ qE}gLK; :Qi(۹F3qQ0ފo!=AɌ o܋}'zn&s1YB'\jsG lDfԞG)i H&ApUS"2NK=ߚǵ# rcxd e',kMoYu;s,90;>Ǹr|U Cr.H]̠dƄ;>q®vY;Nxtr_9/fz-w ~kvv\sxsjnKTTLBjfV+9[k/i*R҉u1 ΚOfCo[o# MDzLiTz3dsCV1ytڠ6ȧL6e27U=ƍd.K,p_ g=;\1ub[+S Kԯ(OWݾh: 9ebYS)V@ g11;*ڊx˻MjF8'} RƒX jfLkm_ x$5ˍwk\?4^IXDx˕;?ԕWnv.<̄dւW "Δpԍmw٪*cx䊥ɳ~9x̣6_6lºาA]+UU@[uLx=m7zȍIvΉd"Lnd2#.﵎:{r/unfS]ov3{ٻeL|(IukC3g~OVTϤ~Xݔfw瞘*>iAԗ XaUU}g^>RYȱfx!*;䧼kDQ~YlڜmL&`Eo1aߙ#{ݜ).L\;o4hMU'WG\#+N&r3qw@8Qq#Oxǀ}bB1m2aFSn6 )׊<~;K-C4օT1{ky]/89#X_6 !LY5Vd$f/ڙq/ϥ"zGEaLXqd:cӻ'DJV,]kA 613+Kcٗǘ9rnz Ty,+cvYjl?g`eUp4U)LѰGغR!׻:X93(V]l";uىcՆpM T< +v|߸Ozsʬ@2ҫ; $P|Q5rLhu!:Q1'9({z=-ƻ]ZTz?xý6\yhCE̩-t~V;d _Dogt:Ld"]yzdb8x憗dxpQ*NjrTӼ TZZ>c"jU}™;cdO;{uxx7zwrIv4e.nTB8Ǎq'drϜm;`޶Sʮiue=YrFAj_0~U6jC_8A+=mm> Vw{"'= NdA~ZbНLsC_.ٷ쬫ͣ;3;{n#A!g&SL|L ޝPɿεaWvص]ox.+YYOk)? mѸ!3BxW"$u*׆qۚTaZ&P̘g}i#V1O=XO'E |grOg2;قk-"d{Zc?ǽD9fO=N+ؾ &N:-.ɚNMQus$#O1wnTWjN65vLW=H͊g|Z G"| ϙ1ѩ\&^kn6YgNS-a3n.3¦W6}qO{aUU:"VQ²DK︂q/rq~+#̽k;L+gU1ާ (!Uԇr'Y~'r$&Zo+-^a˟7Yꔃfzv93NL:c&89w^LF7Kuj}ƎUvovۅ\چ/]]|;wv;Lt1m;+3 VOK*E[OUWd~"&B|NeR"1aKw D> YfPx%s*/ƣKmuN}ޭx\U`nO$¹Oe.F?>]׆xX)q#z8'>7ih{/jrBdveQ,P&gUiǨ{LGyTp%_pn9C4`R}11bugs[#1G)9(#X}\yHo8\70"6eU4 ݬ|TH^)TDnok@rW901Dc2mi_&$J|#{Keϻe_ ʅ;eb-X `"|*rmEIr˧Sqm3é9VϹvZ&1Sx2of=vF?*B7"k Ak,z~& jqI!Yl1OL5E+͐VW s$-+ >mϜF#!&HM")2)*Hm'ԗ "ۂRyj*ܣDlcY?>c=fP2GzP$!Uoh--LOg::~BXkA-wQ>P4"FjϘB̞ؔj\#wDTz,\xuq4M9}pw7}XNI&DX)nuż1=ԞKM8%$O[|CG\IFZN9[\!\t F>;*?2 G `n}`*!Sq4oPFv*r} <fdօYNwp[=r{We|OXEMng,~ؘ؁4jʢoê,Igق6Ȫgkg(FՕYA n%FɊ9LRO!ejSeRf!LzⰋO̵~ZXm>qJUI]mGpfTlᄏ&xnڷwo ǞF|C= kc[Ȥ3I]&~k"rQDnf2 n{lu^1oPZ;#RշI%O%dꀕ.+/r9ew4։ ggkgZ>^ЖF%g0;̝,/~.ȴŽ-nê; wvw_hly9a 3~uK=nPiq_cr\;) rs mלDόfomokUxUeNE-7msNhGTsfg&yVw?]f5 HɌC3 7rdnju/n`zDKǟ.ij vκf;.pyV;eN U+`?l"]qRkkwyzs)ޫ(IwD;f+\gF+^ OT=(>^Au*9yxc J0Myn+lSjtP=S⻯^ o;&$pv)OI֦4›]1ѻ864 }8/Pٝ{+ZP |cd "So߬w{#;g^j߷xOLyz,yc+~Z˘] k!W,)MSΝ.²9/*?61yYs/ \ 3wc7@[=o0qV!lh9y!)%=NMw5x˩-3tc7Hg0ebќ`0ѝxBlvhT ?O-tfkqß׳ڝ2bYK <)[4٥e#{' ".s ˿ߜ +ﺻpNƶ"N*sPQl}2r dn3#;fU5(Usà~gBwxyުV732TT*&$om91AQ8n} d qa2۹9?DR{rï:7;*qELnr7/u~p?'Yٝ;S8?Rs=; crx.57;>Cx2tGr`@̔c|#3/nu9IamT:_G%>Mal_:+ğfsQ^1ﱊU=W]tSN.D5EdP"2i#V1j?a8چo+w崙O7ޠU=U#GRW6sCvtR8$ZS;=E&h:gxgmUznLer#aΧQMH8ןvQr`r5j9/2{Zn1rN6D=;ZmdcJ&@AOVwb* WNPQjbk\ɭ}#$;KN {S=m3`,ׇ/+ nx<Փu$=pB8?= |0oocgBUi UtIϿߝ1gwSY1=ĺxnSvLv~cY9l}A'V=暉 1\a*ފȻyu%+"} A ֵpAc^E"]=w {,l^\USrL3S;I'ʊl 1_be U8pc/}YGwuc ;dSb ?Y݉aϺ*~6]EyNtO'7 " 0AuǵBz)L)Bf.:~3Pkg %a4TO0}_&] ٜY_wnaZ2K"E3 ,9+33Nabڴ]s݊[mϾ> iTשj~;6+n;ー{E/j3_I#y*+Rwٗ37L ''V'o6J3N):Isko,攜;ۻ#"-`N_[qY>t|NXգ'^(@!N5ʸqcY9~3z48vcՉn ;]V$f$E$i[ic=['_HLӃz{n#CN;4Yu8s$vϙ0-0+|;.4z,;`lxJ تc#Mmuᶑqy<罟B. wiL }\yW;?Fڨn;Z`eD<c,͓g'2;J3x,NDx]UNZ]@טe+Îʜ0טŨud(14f*a> އ};?2iWrʟc.$we|q76=M~x&ic}=:2UQ˓jt}_Bip؞j)"U*ǟ ݸv7T>8gh_ş^Q >:UMuj9WNn`-,ϜCwɓ"a7jt]dq*u~ w}֛:w_o{0>6UMrN$H:zV?ԘçQi9n=l(O_;9 eӶ!1V@s{+ǕNxl*7Ydba:|ƭv!XIP@;<|dT d\U|7x6hYzDfDN&n}SoT\4൅Xyoӛ=MV[V@[lozw$`~mɝ.̉ǔ<`YeȬ<^aA5AD∓wo_MBxJ^+{DV3|PI+/7fhCpOc?Pݥ[{k"m(h{ &@K+.7KDD5q_r\W7~j)ۇ}6>ߊ\ꄶڡiLc1m;]l33´{HLsUSt$fg:w°ќlqv g13kɄC1nTOVςx63$D=sw:7kNdQn׸Y 6U dW1ى#NUn Q]9/JSPh]EC\ɗڡ7_cYݗ}{B8$?1 ~׾i}]-cҳe㊝VO>6s7?:Җ54"*י0c5<.Pv/S$sՇ%"SQ{ "9gϜN5'"?q ̲AE*z&?0sEwv dh vƺʞ1WQ9[y*^; ~IfPX&.2Ea3rzLlqN11-*}t}Cgޙ AÚSY'7az܅cfozxTaԯXL-WJc!CTZ.;:们m?WF3Z)񜫈I_Ս̼-QJ xiwgRnQԳQw<}n}3섊7Z%x}txwi6 Yn=M,L9S@ &IE`n!'S;w'vzg=I@5zcW˳tU|0!AšIDX\a=@xèBv#uLtŧnd`QǕܟ=ZO8Tψ-6ǻ٭dO|;fO;wVL!G"'+b}9$,gtXc /ޠ|=Gy9[s'tsSWcՉDʤm i^I(sD3Yݍ|0'{bY =N}7ɜzK!!ƲxM}]%>32Yֻ2'y V9/BQ IJ]6$[oo!.dܫ>5ilB|CU%3ܠЯv f珩֡z1W:aQmIz5)$'5 )aVxuM6Ʋҕ{iJ>٭d~4xaݍlh:{P+]֮ziY͊'sJ^Ó *;۬xCW6ryWEm"53|݄2 Of/f#V3<>9FL{ʸ)&<۠Z$N (#N&7o4]YYqRj)1^w2pZZf/VrX27W}ԋ"?-eu*c,TEew1*}ZO`ȱ+Uu-rOJ;3nx#zU51Yi~"Blǘ - !h$P䟟ДPDNc|e2s䙤1qB,$uh gM}bm6$o/;U>ϖ`%!=fho9}֋:Y'sв}IÍ̈́,{PzT;`Ƹzרgr?b~`Y1JVT¤~Ϲ-F"\Wf~27sNdB@Gpμ}"nN/ Q]mnDO҇wQv(?XU WXCUwנnxQAQTH#hJ>6UG׸)^ъy_z!wLH&:t$ֺ!ąӺnzkO~D.XE4:/mKX8h*5~3dűjr$޽d=bdy*^ﱻ ;Tй ^8C1U6k?;psL'hyw檑1#VUww7,zd;khEM:m@fdρY8XWv}0ht!#̴Q;'P>R㺺&8N'9Ա©.7dO~WvVkDMܢ"{i=;! 7r!V;_N==fm|N=#fESgqvwsXUY̲JߩbJ#Í`~#+XƜHfkw^>7]CLnd2 9A {Y;))aGg@.@φ. v+7;a2YqKj_=aqxr#_t.$ _y!Ʌ&M߸;fBrx|iV~G38[븕 ޹Kxfw:LPHĄdɎ_(oF?pN B%_VfY;:ԟ&1kIj6rK܀KJOXꞆ(Ջ6HJ6{s=&.fFf%i'T*Tze^O6,C^ 6na½q*?wX=^:wa[H= T劇3̩oY2rQdff~J83*ep*O!Uڅ3wrPUqʛlou&ъWJ-Sh1yrES\1S#R+qj>к2 {f=޻jkYS$M}Df=sHcjw |U$5T3W=sJ{4(Y_z]1n qCO3ٶ6Pc&;314>0T3E)!vf-x)gO>1Jx ?y?1;cEop`>{dht|@hL]Zh2pG L{j7dQPJTGub ҞF zfV۫>J_{5O˺HcR >z_uR_KNø~v FQW̾mQs3J99%[紿v[{CFXѴm/:c>Qч^Kht &\*HaJp*  pG`'-^aZ`ݹtDFo083קk=w']8+zYszlqCW/ɥ*1D{~\HYͩxwPmC[unp>4>hCi3-+wB U8?NGߴ1́Nz]q9Ty  U'7WU=4ɼatkbz:!f xϳH.2ӻ!玉f1JV|cs$ɗ%<-^|4ڑ#쀱ت'j%-^a߶gN݈ɠdƄߞ9Y,j~oP#t@_p]nSv!hLdD#n&s1*O^8( 5{o:2w=3KY$w⃛{Z|Z8k{QȎAo| ʫrSڦ[wBN?YɺLα>:gFj"9y&uޕfwd?+Yag^@>0râ{o]TOv8gHgI!v>3V̊iˎ}n=LOe:*[ mL RV؅QMȳVUWvWq;jҖ"E,>lvXYaلuZ˖]4fZ*hU፡q:3o;mTJ{rSFY~1Ϥh&Awj*`p^y2s3gȏGŏ_2PrACKZ9i͑;|˲.2a(sD0fBv<ۘXwHŜ$sY|+Vap e5iѥ?bSęf"E_GBǏ2~H0r@ !eOw~SrѬ1ShIs[6Kg)D [{b qٙ+k1II ֒~G zim} P vOkTh=?[]p۪2oƜku `j:LAfʑ]^qg8 YsZr='E׆tm³]݊'bWk‰;EScSQ&#ΖFv;؅mᬬz7eaEv}fa<+Sͧ))i)_^5vhrYV|&);DLIvzvZ>3׆=/3~ܯh4n(qf!۩H77qtSe^y#ֳ:>qZL~g=q]jeLHE*W rCǿU|#smPV,k=?4Po}_ ڊ';9!&ݦl|-4|W|Of&3bJ4XfJ[%Iˮp#}_7J~A#]p&lw宷C\0x̛1w~AZP?Z#ud?VYy8q˶zCeS%{o6'b W^Ngg; yy\+W^{E[;U%*1SU}wkfs>;>]8[笎Dni\Y1ؔɮܰO\U ö>CUZ~?]jbBXT;yۆr9fh9lh[l2!3`ӳlͭKjҬS)'7z:̽p0BmT?|B440S13 ̿O: c&me*h2B-^?|L |&=sfgC, *Rǯ(zRL7\6nU nm+]f/dGjHc㻯*;2U}#&<}jU3D~UG?ۻ EL]QXtyΡLo3X-~*3圈ӹyp~td^ݚi_FjLR撘ifXskN2X .>NJT =qwg2z[{ZyŊhV<|?av>eOu@?]DT&٪L[z_%Qd'u2ÌP}}1\1ԩCWk8uݛ6~9uY?ȳ0uq $>[QjwEӛo{Ko0Am"SXU%ZcGܮûw&ӫtIbVh׳\|`.j'3鐞P!{Byk5 w;H!p[zuD/S&)2)z[[a˱dORfR风m&K|T u^?w[r(T Z_̏ _6;eK%%kxc"1c#$uq2 ĴiI١GmϝveҎP/$vavie⫍ߐ,_[Uq#5Yƽ=n%=G '2ΟߧN 3^o,%Ǫ~v uC;bLm碴t$+t=bm1ک"7'J KAoBPKG\Iej>C=z" a={N*sPqw/'Zpc2oƴSɜ]B&d9ӱ\LRM+q<*g:.qyj1vvYpmYie:9שS7{|TSeIn:@{?:1{o|]"NO.k~YgQrSĘX&>{do5[_Jwk8q+]\y҄5Xy#"sDVsYH‚֑ZnPs*tҨN$*|oV2d*LHQ߼XV7>y)1r"d!LFGƔgF[1 +bf؛ĝe_zkdGmUok+*) 9PIU[ 'hEQ<+q/qc኿{EGwBEO7^mPD{י~tsi6YErOV'[O91‹8̩޺':{Q{bQ%亍9%"D̪i7{0! z?ε`1+?a߶-e0\uI+i8gɻQ3>>-F^Ϻ>Sly Ǚ=Lv{OҪMaUx\M',;`e\Muږze&/f=xx=T e(a]p7lDs[}@X bxV\+(b_Z lG|3nLQ2{ޛyy@QYzs'߫cѲ683H Z{]mu iU^W[ ڃȊUVة}+DH}dbϿ9hLc1s_U[Ȯ*MVpYUf{0~Q9u,3Qm aY P`Vz $>{L7og?n URfɸzώ&ĸO+ǟ 6N"E3 tUr]Mx{1{7#[Ȕ{~z"'8wGcPggWچz@qW'r3q+w-Jnx묞h&Aޠ՚YO`*{)\8|$-CWSp:8O70& T1 gCADA&ek#W$>i O}P&fWpXe*а6ǰ!gO}vw]܇>7ܱ*ʕ>M81LAd!֜ͻf7/EL{x6g݊Q;_=57@pR8d_9M!G#Rv*JmRlR'u@/y+[L;LOd39K/D1lePYF^򘈕Lenr!+Zof=DgrV bx_ʣ*b]/V Miz&SBU1nϠq [+]XzWr"TUלSNfqՒ YV,GWgy(3ELWN]5l4Qt vc1L>x?&V2\ܯfx+XV`WNd%fn``yoR Fs֝"N`^:4k;۶bA'p*_o~]v"ٵ,oY]Eɐp5pp@xCOR]GxUo$o={_TE+34 ysEO[ǃ,oLSuc';?`pLf==vv@ſܿ<3[a~A{>S'wUiYcDFA{&1cz=9熩E53N6E+YqYf'atB3_Gs"ި@qg{P]*~+zNdc]s W,nf01t=Ef1*v~ V$ LQdjY.!H٪ӕv汹7vC: BSzŬ2i W#brwg .fl*rƟfԞ>&5(;X]nƆ\lJ ~CCN1!J wR112UgGUaPqT ~'5;Enoܽѽ\J7` { N2oNC#1S}<4t7Nz~3#&=}gayjbs/{rMmL%\w$)E2#&3!,ރ*L=XW=4{O 7TZ>b])mɖ\peXݒ+:#%B)裔Lc?[ذ^%*j[ޫvZ\Z}SzZXxΦBx:w$ ׇZwrg_N1ݿU1&;QgwNɑyduQe08nM`ؑU[w?㪪(I@Ǝ{vjWUŷLsE=@a&05e/UD׶w%:rU}B_Fr8g5<ƕu:ɄA^k{6ȆL.cuW7T;~V?UFNHJwVY9_Q}/d.%NqVcn,KIxrs^B=IIJͻ 8a(3>́3?q@N<3yw\ {3@{SpDzW*#VC!|W[ DrA!) Ƕ Sr)ʪDV(Ŭ%K磶X"8 3YNBo %"{*O?Ww=\Lqyʫ{Y}gi}~\a>hQw7JLg; :~wd=ÖF y_6JVEI~-pbOk4Ya;{n"d&A{,Jm+d(3k+z{$xU@{fo'N9z% !7*dU}mV;ݬݲSiVs*]iNmĸި2 "}~^1_HzW-օl`\|j^G4C=%*5Ǭ)H5[vϬX)1inJ RÉSiScֆL,PC0օ-pÞYiWhu;`߈~DN}uQJ I:mEr|n9_"9nf^ ĄkAOVDŽԑra3w,}ٟoeR *SU`U9ܥ\pNq'd_]:ʵ[1i:@+b^q` !VܨȀ%Ϭ8#aE9֬_Nu?wac"sʝ+ǜG̀HCy|m3穹9f!DdEo(~NM`8h~Qa&.ʎ{OXf1\阓]J6KObP&E_*#6b9Ws8.**>FYe*/]! F2 _ZM)Lq2ޝjPYZ~ 0_k|m]T _P\lw(\mύﷷ|ܦF)*p/S0}f@14fv 4Ӫ)$5mt{ mnSm%4ֳ={&5`ѵz}6ʞ=/Ac/q7hBNdv# vwcU%&1Gxg>c6 YY=7{,s`!£v2(Mj':tĊKU&1#ª++ *X to Z@EHOF${ًw&@ 9jE|!DSgϓ~-,]εLVɨynww2?*;/[ \q㝿s1@1^hLcjEɋ!+f5DeDkQГԞ]J;͙Q._iֽ9~xtM%{r!RTgxmWN{"^l>$ODTj: URD.*sWkTHQNmKf8ޖ(;V{]9[QbXϚp k5|Hw;,g#2A PA FEU'V< :QU|PgEwʐWWnb~Sȥ.tU_j^s!9DNB]5Ƙd;5D*w HTg~?FtADK߶b}z넋y?/Sf|1#@E4)6^&\ч^)8z&R$ qBwk&yZ63P 퍊I# aE>?6 .=<[f I5^$+kC۝Ldz%>VFa1-"TUVȦ'O"z1WZq4@vhgf')'(t{d˻p7[ )2I-drvg&/9{Nw:^Uٹs}t{ٓg{ \1g-454ďf>NyL|PqM\Be@>ٴB}!P^4 ԭ]+bOTdۺrm==9ώλ/5gI'kcϘg·~&x;zޞVvBj$ g=y@2V6'I $Y`oSo|SDFOY&Ș$ru9ƝpO9챃f@*]iN]}ʞC̘?> N{ƸC34@vYc= c\sd^=>OYF.;<|N04[ԁ;%׊䛤_CUI!T*ʶu Vg̯7ƁTRܹҨV}Ng>zuo1:{x*~^FwXwwc1ى36tXLUf7x,MJ% s8Yh~a2R, !Gޙse(TW)?*裚yXUZ;9E(]2❑Yy^ۭ{|vX%Sv;5$čfy"SLTs龌Naױ'=5Fq%+>6h3ճ'8;X k6_W XVc{ ]G;ygE JfLk`7i`6y7r Qku/,PO)[~?R5#,}')Ȏ%~xcTL2̉8\! b;E|E,'/FLXSVĖу9 JfLlMKh%;9>gN+&Z2y=ĩ/ L}߃B6-ǿ;+4ɨyn]_v{ksb8Ouyʱ'$ݿe΋Y.=n(=3Ul'q\ޛ`й!ꞳR{ۃ~HDf0ZPBaPP*hRFxkQrH x,r!h3ǕYuko lӽafB<3Ҳ @ EŮ{o(V^C(Nb] #9 VuNvM;7Dg⪪w 9}ixdwӻ"Dx⢾,ߕb%@KN[=)B?<{NܽB fLxBqW0K\9+c.Ԝě>dݴ^IL]6~^sm+^hD* tԎt} ϾwjH/'$$1M_>Y>i֍Ԉ*VUq 8@M@|B&1R; ߊa(sU?j`Jbju;Diq.=pwZp&hfFkU-1np˾9}r) x';͝;= {1OƫYeբ i{ ɏ >fTŔDKK]΅SjBcꧥxe3d<" _nC3A1>Ghud^~Ǭi{ͻ%.Dw@\>ꬷL6LջYsJ2ŜK@,>Ͼz٦+ޙ)L9e2b]uDfڏ38nevw߱ D2Q?DS?]$)DdIS3o3z0Nޗ)BeǹsS*Rƽ!*$̴඲^V^fTPJw=S=aeĘXDg"'ؑB l"FPiZwK$ݝɜg|9cIuDN"z`";YT >V1֬<|NE}o{KNwvYӼ4l@>g9M-aݘ{ MSH|`{t6;I"lN @S82SNYYĿL 4],?uc&+&mnYo!-kCN99^pb T&~6MlLnd2g.} wuA5S~3*9eg<C _|Т:72xwwۯjiL]fi>ZkD\XM- 7,!_Cx ?ߊݷ/m6JUK빪qFUi5w1pw("rjO5YbZVTs*M>"6qEԜrâsme^sw !1j2eVtйvÉS> nSqp/TA#c*7Lᥐ|;DswD>!t gYfP<)aLUZzޞ?Vokf 7T|jKj3sCYѯׇx. ٦M.:b/dbP2c#Jygmy9+즊]W mu\X6)>GWVL3 -uhA?SInSSR6? u}|壼+v(q2W]jcqs/ˊkam|}v Osß]А/;͙+2YObȇ>;;L1:Md(3EQ :.ݖ}v{_:qdu.>3vӻ=(т(>;L A3:a2Q :`&iE%S(1N³INƊm\0U)8AUլm 1»ba\spv=q}f*N e)*n炮 2oCVc>i$7ϲ]zZt^j2wa3ƙ[6/QbmN9̩_K{0jU=_c_u2;&;;^G= 1*QJ ǎ<}T* =>:;#</qk#{]cϾ rF$T>z Z+5)0رVP, Nc1 ]F&3hwhxv/i;ނ@Q4 $'+;UVmҺDOj|ņC֒|j;"I`@ҳGILѺ8~ݍWձz?!kG:[9;L]:K\Wes\Ùn_v486pz-5#ÕwҝbފogGpV#;!wUn68֙[K]U:fHN*)|=7tǦd<>G5 q4͎'zYw^69&ʷi:\#Tu^Ӱ\Uc&<{*+Tz{[ߡ]gT+mY)ZЬ&>mCw씨IT?G̒{/ _x }ׅUI+{;C˔wLlHn3 @Nǻ!3w[ug_U%̪·N[l|pq怐.xMUלPiS~CP3AJZopnfR7}2XDJB=M42n.Fp.T:kLېYVRHP+wRU#SGUtI %~/ȀY:L%pſr-_w  2RY1>jxėܞo-҇ B%efa<+3U32vaٿ\(x!wY?1PUu12&USo1b4;j!Npg.u'U-DWYPW.4 WkIGǨ+UR{ÌH H̙Ը%nD+W(&C=+սy3 `_`մuBN '\ȼ!{P[nNtrAK) uHLhf>S1GY.W|d*F^qB9jYgC%a<uJ=F%3!RhTsѝy33\[ՋƝAٜ߰vԻܩb)>MoyH㼼asd}.:O.*D.Hv9kNQa{*1Xrv4 pI8H };})?4oJiz^gQylz?u(H##)FpvXa9[FqfNb9ʁ 2)W%Nz0zyѯ3ݙf,҉SLvx{] 1eEȶ'yo\^3ʬL7nV!ZZf؅. +&G5bAb8&Qy^n=p⼌VE=NjVꆛ1*VrE !CDC՝%c޻ |6#&Dy%cFRA̟9r8/ ߳< ܜdg6vDiU!Fȴո:GKT'yJ&Au|6Nz!J[^LR(f6-J*UFXUܤqdctYyg;_+Ԑ*dyn}_nBoa3e)v{v YfH>#/f/oODj`Oe2EG^PfNYEk#~x[2 @j~L^kR5~os%a6?:ĿhI;]7txֽ;i?y#56~zyWse2:l&2WϪȰpTD$q%Q.zyo*{O0ip+N]vX4K 6#&(ϡ!$^R|e'뢯?9/0`Gը7_ǧ9edSZzcoNÙwg7N9UғuV:H/i+Yzx7;20+4Y>8f&x;3C2}ût`,> 2Z[-`]'Z9x6+e݈3VUQώ^\aO6Qn/Ś6娡s1MB{̵|e'h"2-Cv*=g>Lqt֑!u#r3Lhj&Hk6݃duM~>sc͜*_=c٭D2I֙qӦ. ;0قV}.;]Uyٓ{ٴ;vsܡ#ZiZ.iY#gwi*7_}Ky2yg÷[fR*Ga:=`eUp9fOy3nt ӌb}dǁ#$7S1r1 eٯ6LUd :T4r/5gY9PO!ۉS],*$FQ.۪ߓ)))Vwm< =.`GAԜYwl6t"[ϼ^u!ZWd7zjx&.1:R&ѕ=( Uܻ_Ü2m_țjժωmQTgbtRwDe.>K8.lH )VzMd21/oXZhYMl"& ם7\ZoR?BT~G UOv^:J>ޥ.t(qi!֬/k2|M d|q0'?#T*e3FZW(Td.D[*S}]1ӻK+\ =9ב`{ߊD^\s"3㰹DOg::)Ţ{j\pwn#̿e6:t06YS'Ǻl.}B3 k5, :+>L>y2wnk<_?۹aҘTES&i~uRU-{=ސ Z.81U;*lonhC,nS m,=Eե;lJfP*k \dVjN02ަ|zZ>~U6Ă'MYѱ̞ e'q_!8ɉQv)EW@dj&O+{ QALևoSTeճdX54\<)JnƉr!"uK1RI多C~52\&Yv޻{ZtمfZ,N 64I73qd3LssN)W8'Z~ŃeIN s.-}Vr)&R{oQf5k5߮6 z~ m+i2P=kce6眨L0Ӟ2iʅSgY>iAY@?-Y.vgs!yKDWLS0amcg0;94㭠jKruA';'7$E73Tf)9pawS2:)촴l\]YGofyhn缗U<y/V̄xf=> z3{cͻD;2Q;g[h)b2+Myg.Ttu zBţct, &byעr;'ݺN8i5i#ރU$yzx]gKLYWY8%/NJwz"βyQbHN&qؙhKlݺ ;#]J;0#zQ7i&-?kywY NQTyCV%0u;a_˥Y,.%d>iI/m2_ڸA/I3eZ)_C"k@"~AS '&j'Tӧ2^F|4~̕xd* xɂ'PlxUv5uTb$>Pw*>:٥e u 4 2LRJmuXÜy$׊p /* .'jpd\ÙӚxUd$^YYљȱjp,l6 g2=[^;(*#Fq>@vZ6a'TǤC{Klnߪ /V80aXFϕ!x2i wEWUO%5W4l=3Of "`)E? gW; +)Y)P\Gc_\GBv XLF=\8\IFtkuc"@ȫ+h2Iz"nŢʹQ'Q_^ò5rzj4VǤC:5^4}Vh^C9;Z&l+'q$S-C I6XTT9T*s#wVg6EfFɝVNTUky5D7.t 7zk@9qLγKE_=A4~&o7{Ct'=>/x1g=w 6 |V+'Hz>˲}y"􋰬 ζ2ϕw}'"Q޿1򮸿Y40ݪ *3ά6غڸx1jPNz_Ո>A'~{ ل9Ec&-dngcbȻ"7B~^D/FPxG!;1+ (RG߈/G5+RGM鳘?>o?pЕ7/,Ý+|_ÞozҲ/}WvW;Y\ZTV.t[i۰SlNPg{֝dfޣziKNnΨz?xo#c"}>Ԟ9ms3c3c}2^Omp=1/,Pz,fBLUfS~sچ>SEkٸ8=%왛(S6Veq'T|uC:;*%֎|9)/p,SapFR\a`'"*T4(x3R<쑳s8钢7Φ*OGdIލF/t $w>t',u O(k5jlcFxFb¯CQM2M Ԥ%Q?Uq'R]d!*og;uىc^? %#=𖙻/dvXt<;3,?KGhͨ W u_ fEMsC]E(1jx?9֞U\ߣ =87JIDX)kC9i:FCFU5w윶^>NwzD a;xD*Mq[ȻѼ:gu3vo;Lհ_~^sUdP[&ػjd =QsѻTxR 8*=>VLx  ?|Aᄀ7|VA@*J+Lwbr÷͜>NWU&);gCJհ٣ +y^ +YgW{X=<+Qmuߛ{.G9gU~=kѱ\|;X|Qf3 Ufƿ|0 e.\O=bϽD̔#{]+3:.:joH.X4OV6%\vw2=^v>c2b= |X_28rY+? kdYw{\-ǁ)8Ęgi}6bw_<f:߅|.~>جbwk2=4].ؤ*cÙ۠<٩N/=IubKk>|c#aȌB<fA"tBg;b2 4;WxQ'ވ:yvl.t\"#BҽSqq~e7挺> wz ۪9t=`M;kudz:j5|T{cGf`C<2቏6Z/V~{fgbJsJ3JϪD=z¹Y>uƵ-#^Eem"zؽs1s ݝILz9o5z5TE\:رv*Y%H̃g`?V}_|;p8jGwuA}a:LӼ*)8,(S-|o{٫U2Y}^nt0 7S&jhc0 IoVRw螸FT;R)3ɞb"yKѷj}Y͋Yϻ"/gnʜ@zN食iDl5uqa)VyӋmE9T@;}Vܔ{/׊o댳T#UKB  ̈́(S׿w. V{\ `H_*˪_ULn5Ÿ9Tz:ʞe:N*2^37.nf+ZLttp_TNz5{~Zi2'h#?~]鷵7/6JBw"] ȵ̮ŧnr${7W *58qn(cȢYV9Haj}ŽvZw3㾎P]0z2TպxVnlj291zr!ET+\aNg2LRM+; r`􉞦|Vˤb~Rq\74 1UqX й+ifӸ}v2).OM"6Y_ 9-UXEļ^]Fo"GUd;jk*磝\31MV 3bð:OX.]4?"(':[}hJmQn\]ı;[%U-a\U1ml(foGv()[Xvg*ŦPU%;`/]5I<`2EB0H3b3uFgdF $#YT{0ʪiY8RմEYG+=vJTyu5o|IU>у _Q+bg$ry'9yW2dP7E/ev%ev{F6ExgD>FUt2)t>ZM<؃ȢxUeb]͜ZSs :lG:*u@ 9F&g6T~-/Ɏi>8W0[f_Iߴ|Og=V>n|RmwGU h,g׵EU"3MYvbE2 ¾y{JwSiJBWy<~ʓ]wKqeV:eAǓ eN?FUke}߽|cBd*LL0u _\2nޝ=}GBLV4>TX{1E %C3+fD`9+w;e@?o*/S%UZ9yWxZYx#DWI1IV#vxބ>wCf,Zt:@s\VAL3yکSe'twhd#}y\V?Sţ?r,G=.T2@OxzB#IN pH^iij%aawm纠#ъ;@5ٯ.ݿ=jX uR7 VUTߝtˬK2w>/1/E1{aOO_ۨ1JEŦzՠ_pCiޛ>޷ iL[0#Q[]]3hL[ ܀Muro|]c|VLjw rXuQ(.g./SS}] [O^5-\ӧdIcف0ܻ7NN&q} q$3+H;Vy3d[+_no|yf"S|T_k^*Gݹw9 ;y'uC ;%fv[cwHf /vi~βLr7e{p='}֟k48oH Ye=wa}7ex3ݺ߸In#!œY>VesJyrNI7MJͥ&Qmչn; ZEWKs8Ld]מYbQhdϖqw- nx g")x] %L}J5:dO23 W>1^-9|/[{!671Zy#6v4N"Bg_qx>H >~LsL32޺x[ik .PXF#+>5zbE&KK| 2OO5!O=F<oWDuvLpV~k^H>3ڎ"u}4>v[*j9?!8Ox8jݑ~pzۊݪ;mYDcdڂ)k,{ tn]Ls66K|F>~{Cfu?Oɪ;{Cf0̨&2;L/7Xm'm=kBkhV=nF7~5wn{e~ґ;uŝԜ˅Ruv~gyrșdUW :NY(-wBEqsKn,\Hj2m<3 d>ӳHzOy@{Ps\G<+li-1g;wwOԼN`Ծv]/ 4F {,[ *TʌOG:y>}ۗ<Λ;rRtŠx㉊lO)yυ;c~ǡOvc4.oO3Lwƛ}IxnD'ZPB{U;8N#?/+cr ݝ#t#! 64mY7pC@Y||lVGI&Dv(D;N$xfnw_+2cW ]Nl`sK1U\"ƅFOdB$ƪ HU0yxvցzƻ&ճ(PvG-W+6e+z,7hxdH $s#>sCB63UǤCDz4h_Utƚl|FmMǻݕcnN4rYo;%FW>5&_1 +vxC X';wC!Oet#A+*aR z~߷GA+ = ~IVU=>?Eeۭwٟ{=旉D6β S:x>{0n٣kj OB>j2M*gFk(1T&`VkǬBfl5Zahf::w= e" }R2?֫;VݵZ_W´@KY< 阺T$ߟV9}{0|J'{$#ʞkA`Vj]-GulO g#q>u{-AH-Ǯ'Xt \?z)#O0 EZhUnp9fMs>kCV2޻(*?:'i2nǀ4fUe8 P<:Su̾^?q]XU?.Jᔬ؈NL(#{:?)YcϨˎjo0 9&Dh,G[\s Z9صu=D}&s¹VΥmT}g^ouVk߿TϱVˉl3QȎ珽&bg}F5x~UWL&*U*;%P 4B9<ѻ n q<u[]wC[Iu=[?k4k5auf =k#8xw'Nw=1{aj"FrcpN4UX9üt J8L)lnc+/ə?ka7؃x֘&m*ǻƹnU Ud~:M\tgv=1xW]OVws,T WʾC* 9"~O8zgt烌8(L9{oTGmak?8E;~nTlTPU ѵC[F߄Ig=Fߐ 5dMzń^OgJI.?ٴU3N 0f?̚uڪP;~ۊT)^}|f4kJƱQktUҹ :f,ᅥ`m`6^+hyDE=^t/Lr⎤^/%YTycb]*L1 fb*l^0^ }_J܍B+3W q1N*5|J9+\[3ŝ.3U7T! finrpeٓ-"n7X$g7<  ZLm{k#Fh΀Tf.xB1+JZC<%I+j}r-O6qݱ,׉'NUMOOUZzϥ]PULk /χ=V>vl}>nѷ Gh/ȕhL[UҨKXUnxlYH6xBhq>3 ϵ%v,ŒFfS2{c a=N,Ujܢb&#ޥm}Y9fxR4;]9_ޘƟ)3@*ǍWڬvBXrϚp݉f,0G ;K\/z?{]z.1e]{\+ք,؃F3%)v;2GVE}=.WOx!!>nd}wY)S!oコDQ'+7ttc \u' Y/t9)/ya񪪴91ܺg2kY*v|ZBL{j=-i8f [ohѯރ 7>ܐuSFVIOs֐ٟUyt׊g2dZ +gYߩ9>/gĜF>+dz*=y`t856R0CWGj&Rz鶴S=Ys(Liz  b4knLZ+Uկe* rkEЬqn+@zmccӹ\+ЅzeW=װ;67YRqF{8荊Zc2+ב7_ɜXn >NtU@5 g9{`$WZ怰JUv]xdah8ҋ4s;A!/hK'(e%.F 3Ratڎ\v꼓׻,bL_Mo6k{@ʞS#챪nVu*9\ţO^UNUsjO~ϐG'*u.ǹN1n.4QamsNkox'V+yhzP_ {!Gy_:ɵ5녚yrģe,Fg̪S2MxI*Gs|u'LAzJE1/>`/s6֞ȀV+⺨{*3ܪ|cqZU]Ϊ6zYxUĤ$X̲ {o/ x8nT^[2"{wS,U5咾H93\WΧT}]m*rsmv*Kȕo;E)(K*Da&c ks8;/؉ +9!E PI8x ̀`cP$_11^Lpd$}^]OqT_C|ůj{64|ۊP'9ny!u;:~!Xpj^֪:߼amTpVk _6(Sb._~PKZ;))a݃sAtNKTX1Y96f O}P&SSUO9M+,~ ʄ2힐RP6XȞ u$aV$g.~o8Ǡ5 5Y>,zm,;whp eG; }'g_>}?>ARS]Ǭbfhis/K:!Ϊ=ϛ'"ZZ̤1rm;\(J27LϊZoe= ʮVk;$I'ma,I'/"]<<)Ǹ*wm6̌rǾ#7Xt[lOD%ыNkVL~U6~pivYɶ_7ALeMV0_UCGfS>*~/fVد^|E* >9zs;8{MI:nGȨk&ޥZd5R&*Tg|1Mi1%6T3irw; A'dҝ#'* = ^8bU>b z?!DgֳǪX:4ZMNpvzΞ\K<$$gYÖS.:Xzl;!&23,ʮ0Fg߱rG{鳢k?xֽsQtA7h2}| ̤dܣͯ=a0ʅ)Liŝ)qzGdC&=iUy~xwhp^bȵe;웷Ύ)c7,_;iUsLEŠ7β,!%OTbh4̊e7$5M1N2!h#܊w"~QUf2ϕΟ=MB{8mm;"|$(>,3VU~'u/27294f5͂qgr_8𻝣K?JǁtJ./[2vwQD7([61҉/7! 19L sߕJԊYtܣe\iFoex/ ?wO6*%cu1͔+-*7U2}-c LѝMx{Ϡ98#JW Tpϰ(s[7ױptx߯} ou")|N gw2Û^w5b~~w9G_ntwQmv(1^TY˾>ȃdݛ:7^‚^"F#3L:t (ysdLW?Mr'1F ^m.t83{#a_~&û?aע;7`[1G~B=? ]f=~Jb;|3*or-jI"lNwGb'V3U7\:JzCf=OLPr{ߪ:/YeҢd 4{SOz砽C phuzWD |؄1w2îEO/_DZGWIAf1C#8Ľ9pj$] kL 2N9sm\HwuHI+ Uvzn)fʴ{K ]=S_VtZSUB-IlsjxϻwґtgdΓwxg!!seo{EANJrD 'Txx ?`MwcЯF3AEye'6;R{ ]+>*Gbo2[>jS,8ET!vev+ƅ`$p8hqoxV@^x?Ni+''Tq,Wt)!߫K4dyMU!Lg ^N^OT$?'#| Nt&lDfk uY9"3Sr/EXGeœ#NJd=&=:VQ}dg&y.vrYYUs\ձ/]ug6" Vv2 /߹炖hFJOOI1U_`23Ů_ 25.\} F>&GOrLgY9_r;[~/8wR~s%ˎ<-@osѓXHZfӘ89.HLރ.JY&ȸ._*ĩ#Qvd"S; U| =(8ѫ9o3{ǸS0=/eXѪ^6h{y6?pnSܯ8߳HakbMW8F zFm+bpeSŮ۬wUN5,ia3!(G:)\Ow|$"2GCF/W.vLs5UVCYkX=}ߩˀ쭱:4#($|dS| WBS L(_] h sB~UEJ ١ƴV빨Fvn'e=3%bP2cšdO zo;)L ֓{}tXz3~-x7*_`ȄBNїʃȭ⹐SvhU\z'H"S̽`fą~P~#K=].<1^nu3Ƣ|E][L=.Ob3U? ߉2Mbu S2p*[VјʘࣙL:eo-MT0@UTcwLBC/@e^)2DW4ɨoBhR/+mzs=nA|/p|=x#j"iEҿgى}i2˦&0i,&[>Č mɎ/mEzt֛vLUv2r2߼gTdj&O)e[b 2G\ `pļC0s{3P΅Y]lCA[_kVne].q'D^=i2ψN39.5yt1Uk8 kPuDzԧ=`Ȅ\sh+V݃lx:]eoΛЇf.*@w5wSٵ̦s=.h斲 ]vI395sԊyLl/+Ag#2ӻ+-mZ& X8?pW$fQiW':x*3Sfxw}ٍ񞋞|tE[ d-wYǻ9/d5JaY Q7̺&'֝HBI]Y՜e1\ipY9Qm]NK'T`Q&d Rdo)|;T**?%q[%ogQw((7ɨ;ױeU49",Qyݐ)0BPfmֵ43Lw|)$h\Z3ns}ev/L)\4q vc/f}:cl*,h&/qr7es(v}?fNJ9U `H*9HRm1m׼rRZ9_X`g\+ɚFTOn۳J~euy q]P3Uؙ.4O -՗:vn+h4 y<|Qw=jF'}{yL;g 5ZU0n8B*1;lh(.BSF^}v&b?ct> ZA=^z,+ť6\-„:F(@,є7cⰪ=8ombNg4`^t2`%X^J=)(S JXS5;H+,lwta|͟<M,㉳`]T}RuZvїKl/kcnxzoz֤kȈnJ^'|y,*9:`JTndNN#5]'N.#W]&k|csL+00#W>V8Ktg3Sܻ"_ pbFYb:0tC˻Rcv] svL(#k&{N<{_ӥWʎ64jau2JάWaz﹡1k lKi1m{^ ƱFN: LT{Ц ljwVUg]zr=Pʜbi$+Un=Y1UxtI`TfUΙtE{}=lgSΜkO3u}ƕ,VuO4IAE@PYŪt45L8ʕrzwzA6tu X5Fy'(8߰T s9F{k؟<u/OuU9V (:q_1`Ŵ&-HWQ[Y8ZcshOHc]ӡU6t~ƊWF>w%cF&d 2|װ ]D͔Ɗ%A'[o+yDk*i!nS v},Fa]PťV~LN\ rv6L\j])XʖRݮa3Dj~G=hAѳ/ήL3eIly-Wq!GO69G *qh|x}?MUU4A6Bga]uI*#]uc&)_qoFƶkfcFˏгş7L/J;ST v6$;Jued$MbF܍V{K,$SGkT}O0UI3Ƭ3hښf|t?cL\Nv>)k<< nE,@2(9rOv6FBy,)MNXn"@b>$Ӿ9)S3ig5Z NԾy_`z,jVf+o妷i},HRs3OnT/8HSRNb;9gꌱU;1?N;ŽUCRz:}_2&5΅N;ڌ#uHɰ苜)NI[eTF, UB{ *4"xCf03YV_V݆럠\ӪRjXM9_7aLvv6 J.0EP:GTMb2 qrq&GJTL);X EN\Cռ\&hg0;9霆ﰩ;EUsНg81cNSDoU^8F;K4IN'*%VՌM}93a'''Qr'5R酪0pWrv Uֻoh4avn2{#;h]`؛QSXz҅:83t|EMy4(2+f^&^zLT970xpD(Nq7V'(D]TWK0G!-)H 2>nn?~1Aṱ\eH~*\;#onzGJY*&yJns ֝1zqKooX?T:ve7\ύRJ:Xps9"ʂ ^&b%5}Vۊ` ;61:QU'fwTc<~/;BA|'氦g?ӘXXqJngnI$VJ=2Bԃ1'߄Ykteڸ;F~/y )8RU^gi*0`>5nmg>bz\DҤ1.Bucuöf@Iv*-6m+2g 5?K}}ॏ޺z笺vXg׶#|М̈1( CuY*Y7Yٶ@Gkfk6ΣՌ̸qGTvWppy8jUn;l1MtFv´$cRk)yu7xfE2*R5q*1@:VdCL*B?Z:Aȋ _I g&ģTIQ'dE{ ?K9 O)\fKY痤:c yه:UUGE{IzَJS޾@YD(Iw} _2*)cR]|K'~f6ӽ2xr)cR ^.xg2c&:SQ ) B猟|U}=dK,FLTy`؞ڑP&mJ'WbMPyԠ _c<˔X"8qT㷿KohKBn, '<\Y℀&+2R{p"G-nQA|ȇd`Ffd{9cRN}x=R1c&U T|+ӝΠiVLʿKXcw,'Ȑe%sT,>~ Vcd7`v?9X%7LݮVu[hB'h˦]("v:Ez8*3ƿT*>UjffCEntƌĔk؇fwM'y&)Io<׽EFtͥh=`U\vO 1k9(U s#f~*bOu$k 8ןQcT2x"c*0.& i9Rr$0K]큂}w،Dŗةqӽ؃U56U ='f^w& #4G~=H+q{]cjYET֝>h ]g2wlFG;poX>uu3F F}lq&C8S*GT [ ۂ&(fN,PT.(ꗬ|:Fťbbdr{tg1JwsXQCZEl8h[[sޝQ!z@vKm z(la§3?/&}Nb)`)w;yEu +[FuW[fzToiy L+I9愣䚔K<+;ª;:88W\ :S(/8 }Y,D;Ih._rUo0wvڝ]uo+GxC̊2)V~5{Ng Vih&b gFX9*gL7MV{n w+3`gtsۼ#&RޥysS~]TdS}jn-K 2V̴aDMϔVT1Xn uS ma˘sљU݌2Hgpb+0 U>kj73eVU-f~"-_AB> !*(f|9)12cTۻeHU6ʊ\],\Nu1svd^E1;\CD\A\r;\ݖyMQRQ# q7*?Ŵao?VePAF\m|y81o6&t$*|KffvTcծU9@6:κҶ۹7#{'8VLgƊȻFb* SWi  '1N\9cEGM9#3> FvmWcgKJUE9cNIf4X'W~0,a8c%R?AsoNY'fbwtdrWHOͦO))#c,,g;w֬ ~N}߻gsu.WMx?Q<,zo93Fb|>P8̱+g8۱"gjkc~^ }zp{σ҂P<=S.koƂj"F;pz7Gf<W>&;s;%ΑHr%\mƷ3}aIDW ٪i7ӝ{>R UYP溧?tMS rŪۗ X6i¶)[O鿉#N`"6hۦO7Vm̾Fx@/*OWVj fiswF]=rvKnugr3խdWd& KU='B0zꑳZ&Pa.*w1S1MĄsW\TNEggn|/&5U^5~ r3խdf@Y| S+" ]n8#?6ƪ 3qPjx{A} i;yWڃ^@1+Ul뤪Uf26gQ żvޫtO@Ȓ)+ƎCEĘkw,73W~`g9^ v|Yn’k ? d;"ZUꡏ{UO5G7uyޑRvr}}灚r, 3F35_FΒEspP=,0NwNFTiR$|_r<%G_{&m +~u#=޷ WR(߭#YڼdĬyvQ,YeXc 6C84E׹qZ| 8jxΫ缣rNS%!rN&ro+ܫ5qy "b=EL! &TOVLk_g{/1A0]S#]U c P=",@;7)IVg d7xT/9d_#ֹB?})ϘzYΜzTو91+o923YX+ͧ=gi`F/LVȁVP|/7VoVhfZXkWRpFg$E'-JzZ]vgکҔg[fqE']-p&[ϝlpFyX4c* +s51*3Gt }}g}";b{tx9,3UH(ݑ{=M@~Fj,>CO3qK%1ףl ~FX_+X$dkq]wt%|K%* Ƙ e"Zn4i{w1 2>D~y)L/} qRWk}Mf76#ťVӄ8{zp8zڣe&p!U6-3P⫗B$?M2v*XWq{7J3+[*{ݵG.&&bԑ>{&rk 9q~rs},cn|xme)k^sF2)x*kʱr,UyuLrLZ N}κM}R/JN:%ޗJᯀͤ{g&Z陋Ȓ2EYu+{JYˑ+sLmߟmR7Y>BG:5s3Y:\(/ZBÞC gsN?~Q>VYnRq5e+wJA<}T+ddd]=O;sf|_vwrw>;zsQ">T{).-^6&t۫]jUJR Fʟ: ЌlAj<+#8벫1%7UYˆjA;XzE>1kJ߷ݿk܂X bUM7ktNvU`eɹz?]9RVae+A&o:kiqX[q7}j`%SV;IjjouMɻ%"GXIs'dџmxyןycFYiAwzR4wD˶O͖tYO{=2YD9;z\;U]= *Ìn{5VSzS2FR'nZ!8[¿vFtg]:}Fktu5 O|U;+t[r[u-0#Ykߔkg3TJ]{Ƭ3utr{qRG~^+X.{k Ue2F ثP/n\oRۜ>bCYNpG"Gv*WEz\G[/{ a (9 0P,r`ՉAQƄyʩ9VRIsTM;o^ n?\ec欝=>S#MPZkjx[c¡׿ r4z/Ssޱ[b 䑟Ա.p.̱X5{V߄g9BMW9CsS2U#sɩ -қ0cP:Rf_ˆ3"޳=s3F3fbZoo״c,1gѯ8>E7B?;q^5bDj֬uk'v yN5h)wpVb+]6ur.kA{>QK ɞ5 J6]'[LO@Iё$:#QJ9 dVGڳΔ*1+}_V8/An82ʘ +ڔyvEGx;A` o7JFXG<Dz>~'6,i'ee Ȕ/0F*A7&SJ.0ks *DyT~&ٯ"ZF|;&+%Y圕>]w?/$1V b$ֈs71r_c,ox5kF2Ly7kٽw]> lqkUYTT$w0M9.(ſP |}ZyWT˫ݓI߇FE*gh'I7|P2ܙO}2S+h8~1k(ر ~ceB QkFaf'RwvqAT.M&a֬gm=`YwX5 6ƟDePNnԃvߜEA$I&\~q3wLhGkݛ7 fh'Iȋ=.!rCɌ| S\ۜȍNvnGv7cfꤌNKܷ3vPyghl V#'3wˈ6agK\z1AvV70C!3Ӡ@xq_c9ωْLA(s,ל;+ƻ&2݃d B)&n/

s\QsAk^<^L;|m%&[i(%0R5q;x488[#V9λgEzVn/ډkÔʿ}A/xg?όw;Dھ.{wwrج+$:P1RvsS,Q3 MC4XpI^MϡUWhZρ]9iE1Q+e8Bʙ*cٛkɄ㳟e>$G0ׅ+Q9Seցmi_s۔E-~<8곳W9FX %g֘Ui{ rgW#jF3*30mzFmµ}E}_nRUQg7|ٕRc#UVs~Pqfߨ T>%o賣;y>&^[Lr#*A ;zWP5Q.跶mG(9SI i'\ȶp3XJXݬ墓BVm^MX6,l.*La7A}fs,Nαlhg sLQt*ouiZSN\~v <"1j⸹E4rcmaڻ6 } l宙"~@o0Jwvu1r[d[3n܁+gEt̕8Vh<(t׾RW =>_lqZVf_O;\;Yެ4H;U-dwtsv(UNI %%_y;(g2H$k?șAY.WəejT81.r$/{F9n:"I~0#Ό6w ZZ' M)aTt<ʚe߰I@~}fT?d _5R_8;+g3Nĵ ܥMP*VMz!Vw|9boʃ|FGiS%}~gp0c_x{xʑT~2,WڍB<0թ(6jk_˾pv,9;f;hA@fL쭲yUj|츯Iʌ [9-g;8TnXx|=R3Ƿ;e;fkRa/P{;5 :dWhYPqY(??6i83ex>&̸19ArY{]Q\X>δJ' b}e*a{>BYc/7=7wtqg籊*j6' ?Husƣ83:7Wi0C1:d\T9^Z3󪾳VVU37g{sQFyo/dn]_9Ys,OPC!%L)%>Go|^U*\I}P'qA5>8;'Aw̼*lyV;F^Hy|#5*w{ٜj6#512f1˰?ɱgF';*'И ̩{όj_"g_|C*m4 OhAH")iĚw@Wbb_JtF[ΠDR҈Z'0EYȘ,t ݍL6;+=XuG9kC_U)'8۩䅩ו{ctTDژosB?8bTf#}dR҃uzfÌoXuN\3U^5 )F= r`Tn(QRY2M9&#;J3S>~ Kgzmskwz,\^ŋ ͟53۵zah륮V]*Wl.歧A]E蚅il.0#GUUz3ӄ<9-՗d/gg[',9'y[f {/TȳRjS>qm(' P5>Vjc2!QJ92KvLH`MI3Ʀ{ݲ9i?{^)Ub؂ !{e@?*gLvTNo#Apt1݈W1/џJwUdeb'5܌{]"ʇq䬜_/u*Z)K\4{(c^KeUrS8GVcó)s9g${LaW=]f]Ie .%o]s&j" 㯶Mפrb"w;6;^ r1Yјi6nrRs?qNgPi#bO*LeXcxmvރί\Ezƪ"='D{r*"ijHFk=쭫w*mاn13hf yWkn1+:Y#UmjG)Pf GwҨݲ*H&SiWhC$=Nӌas;0 yVvWo#,Gy޶XWye: *ҳ1L̩),.Tij߭B58SṦlctMKgmdFߌoR/6,+s.齣FAg$U$07cR*F۸ q~/f0[Ϯ\O#Sَ*gYLT%EIsJf^3CJ\xBV/;?|N@w,Ab4S#;尟9 c4In(MboίU)CGIV;O87ȕёV9߭|F/H1QQzy]D)d@\=j`7AYg[NGmNUXWα{#n7A6/}v\a" $oR S&US+`H!sFߌ=d&ӾC/d_}nOV[WdāH{5lrDGz|qn2sD սD۳;5tMr;O߾wcBE5R1m׵E굪լR{]o_К> d3f<4Y'r'+,hcM=L#,#~;C $H ^e:n7Lz쩔q̂L*X#[}\4ήPU0e)w.mQ1B/7WZ 3Z=AIߘƴHzBSni 9's%zCT60ʊΥe鉤PRɴ}qj;Sb J,JٌNM/ycfVi&߅a)?&n)6g 2rnŪ,\~_gϫv_f`GNJ9 o@W[/x@V"T yJ1RgxN|5gV ?% J W5˅l;]n1GUQ5G;nԘUC4b4`ݿ5Q3#e[vNkThJq0߼)W;pAjnMY\X@j䦒9}u }F;߱{ܚo8<5*'5rToNxN'RUcTOό#<!rrP'DKY}pN'$=5˵c΍fvk5|nZY`^u)͸m>岀Ĺ)*:rlVHd]/Mo ogwt8GF"Tڰ͒%#ֻqޛzkL3ʞ`{sTOˍ9fn~+NɜZN oqͳy$OtN 3B}6E 2>LLUy_p2M5ns|zP1 ',#`.e4-"SfT"]~d\(y񻨰VadFl/L9z[%>wff*hNI2NVn;h8꾳_]uL$ r}QgWO )_џceyP u0Gv%dNMew^| :vzָ^\Tgt?a|Ft?ͽwvoPwB0ɣOunįipagfּ/t..5 g'nnXBMdp6az+vlgo yWv3t}>P ܓ{&h^߇'ُӳ':es4z śY*2cG7("`?QI3Aole.qʁf͘Q+F?^ps9njC#>,vջeq*-TqkEdGY=;X-U5tꨵUY1~C:U8u6aҏ-Mo)n4gpi) j!&QLn6le8{t<뎈f v#0x}1|Cݡre}p0"DiՎ3]RyGXfřw:տ14ho^w˾S ϔDEWf{LuNQRH^;m=9FVX>Ug&D>?7@m1EEr\Kĺ9SJ90ipzO)*(O[~q- Kz0 w'3"gK;;-ة=?ᆎ4Jq] /F:bf;>2Zy_^ ,Oe}G߼o3{Ld$VQYD'/0 PDvV`6uoǎ!̻ÌϜ LvDd'ǭz .OԔieN܍Ls?G<}gv{@mdJ*mANN3GX_3bO1"*AZ995|7ի[f;^Vb|e۫2 {7~o84;TRCTy֎wLYEu;lv cl[Lz}#V&ޡŒ]Q Tr7kPh:>L-jz6 Ag?;R{)?9a7[o7XJXRzk݀vm:֜-M8ف̶1_U:ozݳk(o{/EMѼS. ޕQ;۫{6s%gNww͖")1ɝ=@اҤS?L(Pa!z16~S\86)ogwyΒT9ų:O+BV~kHP٨75"c|}hĉF3.f+)d6+}OӁ#n0eXQ=Ӻǚ?+Z$$ן19Ydʷ[:E1"Uk-'-oUrR){SPvNݗi7&ad%YmZzEF]'v3p|ǷwOhl 3V=ᆳ1+(O== 3r,wGщE޳zˬ?٤bkqէe>Ȩ'-Ȩ^MjrIy0#C"//;K\ǦJbUXV՚}ZO<H~?Vy˶)gskGxݔu8|!䚝ml1욉J]=Y7SծU9+5ͅ]wb5ߍ1f5߾p`H"<ޜ(: QBjYeb ̴b§0'REꈪj;3XX[A;z)Y|$U)6l3f|^j*#'5W)1OG9(ba3򯓁z6.WUFe%:HQņRGXE~~\sn$X2d_qgu0)oQjխfLLorT'g7U}Ty8ɮ&T=WU.׺ l ,QJ=uCC uGhC MX2-8s359Eq|;}p>*4Q]I ~Nd&XUa˸q#?>hѨ0:2{wsϹbWɊMAՖ3)0 v_~1{߽,P}oݣ1Šzp͊ HA(a}-Y~3Jf0%Yp.Y {rB 4g9OYxGP/mQeC#DV_qz~IOπ\mv)㶪LH~78ϗЌ[VmMkȜ֫R=#SS<Rڍ^4k#JffS.fLF}=?yQ8q{{-esƱww'hfBd#jX&P`}ԟOa_mR7i:Gx 2 TexPVjL(q$Iwx͎ouA5ZbŮDQP >eZgsTݾY`8ΖLOHi o'A\}Q=&JE}tyOдӟS'rt*k˃= gUND!ٱ|WO=G3`zwcAmL.drBÉq*|- qO&Sՙ҇?85]zZ&$+1Sw(ThVqd1(Q)}3z T] t+tLkRFpd+Ǐᄃ|W9`¡Xߺ zM5+{|N7Ⱦm׭.,Q'U]~7t"7~.Inq(~bUA$W ji8/i誑T-\+*kBnVU@OۍVLHkZYb*+ y熳r4cwu:4J zS\i]t34C;PirO>&?8qhwsa T4 2%ΔRy7+KUtQW]?k^7/!-) Ihɹ tMU#V^Y3T θ8)e*3ns5Y&x*%F͕s/m \=ur,,@tZsbU05r wc]z+%>{n13W83Zk+QwO?AYXHԅRW;q\NgYΘU?_ 8E׫2UyC=q&mz{3mo4_f/70WMPftvwd(QJUd=3?)wi-3 9RfW ߃S~KǜDh1sY'Lsѻ`<\PxQяv m(5A\wyOD U2+}ʁq*c~1zE^yrRT eG՜Sՙs/p\1)o ArLVT'*zD>)DY_Zp aޛzk쮖s &~;#SXkcv>Kp?=ǸjjO酷K;C\y]˻zv}\5vXߧ'xѵ"?ɅQC>]PpwPeGrK1DIwfQ;wLd^x6'hڛ\{X͸oΣl=Cm{J֗gӣGIصvڠeOzƒW찏6+NK Nn|)䞪;s+02؏hst`+q+P3wM)vN}:%mJQ XI~KE,`b~} ?¡댉NZd2{?x}AM5M6H|$B ڮ2v!ζޱ2VM n-J0Q:LleX _)\7ræ!}>M ̫JY+ÿ)E}=z⨅֕U[|YMykʦ5 y }dY-ƅU}p.dzǔnP'gƹ/õfڶa޴gȧNFᛷ<⻝yF\wtt8QUggf7U-Ĭp3ܝ\|7j_2 K팍NctyA۝Wo&Q#ڸiR#1U5Iw55&딜c]1ܡUArV7e=\ءj"!L >(rf,.h_Ձ7AOidz3򪚑HT*I:X*~u_:{2L(\mI٘^F~mJ>dt|Seq T?o>U'?ZffsOŴH3U 5Bu>ТE{Z}e=XfJM)Oڪ9Pig֧3wNvn0/e’%yV#|Ѿ*6GbW]Zf8z͂ڙ1YoU]BOٔ }g5"63 r\OiSV1}?Y{l>V}3pZ\GhC N^ȫJ]Eo۬;+mqVjߕ ݥ"qCf,yg<[,qiw'~±{L#OcÖaG}vʵaɤg??jg_;SY/ٚN)Τl:Z]'W&t摜3Z]#Z|JeD٫jժNr1NU:9ݏvR5l]}1}i'sę>Z5eώ}Y:S`2V8}e!m}O՗|6j{nh'wkoH:vBZ@zюn*YT|0<% Ϙ1kn#V`{{AsBa/Ų)>"S~n0LΛ+N_{w%^g}'eZ8cG} VL/8Vd6QB0ŸPJA; sz޾0 nL`)a=Wto!_dﺎ.ߋ]1jOiŒ9TE=nhJn/_0F'5 4@8rC[8-o$e>Ӹ#jQ/wﱴޜ:pNJ[+#ygJ1UVVQ%`;/˿Ք 67&4Ux4;*g2#ƿ\dNȑv;c|郊ٳNlV,}+D+nBc@YEs8={ÿ.DvS @kkiquwXSXuA1K1ISY8T8͇QqѪ Lێqh +++ə߹9>g\H{k5g_ѨuwNwS©"c'IUk:qfegzw@ҍds9jw23&tqcM{{.P85YOI s %3iפg}aZ3&d*yD,9갲Yn>WLeʿ74~74ר?V:f ?>2ZHѭڈ͈7glUm үo*؄S 낝>Q]OdIN>q*_ɪ1{cM70Dgר}UF\쐏)֋~CGIoYx<P~"ǕwBkr#W:#Skh_mH칋3?MXJrlSyg=+4f[ثsz^Em埿?.K\'8x/H8y!E=VI&}Fult݅'u9{}fd9}iӣRȝqIe ,3 ?\T^yk#sFO<&y4qOK:/kD~:/7댿ovs6鉩 x̣+oX?ݟ?푣#Aeͧ<U}]mވ{NulSvūqftx*O`G]\uO6lvR'v_if?M2T8c*TUUMYֳiqVcٌlAd!׵Hw.^T`%sFYϗReQ|\{vfTBJogWJ'?crڊ\4m2vǡVg_$RHfe^DZl1e4FYλ۝~vniuʤ仼?**qRAzu13;f/W=@~<#:.w߯لsz.BA=v)3bݫ_W*}]}b=^:Wyor!k{‹"?M2vjfF$ߐ1J:~﹨U6 Eilx[/LH])~'5~FٌY{WDN!TR]U+젓=+ 1:}uO#ތ caaOG"\pv>*tGkJ e0- uQ8*k쳂U;i/2Tev]L\V+|}"+T̄*|O~ǘ6ʷUfgW 3w4Ą*k:8RR*Eu|a3Kկ2Tw$P~{gx bO RׅvU0]VO0&zFgP?Y'rJ`ʁ**;">:d济;Cfe;ݬ?3o\~Bڍ9R7st;UǫgTH-ԔBqeK-5Nq/4vbP̴)ƬlMՀTޏ=xͲbgs4f>]8#3#sLmA'gHIzԏ{#Q@~G+-摤gTHA-/? 5F@~ok\m; ;X;&R9bU^ e)L(Mߤ9B>S=J䝡:Sub=Y)#g!F|y+3R?5{`JM]qb{ZU3r[E|^;#QMn%S?>8u|K1Ձ:Us==̭=CdT}7{?͞y`UҌݖ{t}"zs28sPƙwDkViճTW$\Wl޿Kq70^'yV@VNqfgYi~oƁ9f<&gvꃙL  %(g`iUkd+HeopNH }b9*$gkUh`:-Z+HZlèVzۉS2Op2ahpGZzDb41N\bcgx{6e~mDOT~/gBF \=F׭@>7XXGp;Q{#Bz)d L}NP}0OPg˃)쨉̑ۢ{}RA]wOpN>72v}g&nq2J[XsľZ|ЎXRvUw+t9r'\@P+/*NEx} !ʦ|Sѡ/_e}}gNݙ?;#vtu rZϫ;-c=Zf*W:ǧtFuv,eWJ@ÆOPIeG߶:2%bٌwizkޡgLUgj^a;l=w?8W},4G˟DIGʻڳ &AH'3*AL0(JQ[!ԟz/Tw9O|glDg'jVe϶s\P"f˺lR:*;c1=ܞ5;_LUS#]Uc֑iܧ3&dê<˃=gVsT󰊵ٱ>Qp]_t 6!~93|MݦrgOKiߡ~3#eT[ *XeU.eӸiəGM./1S&sߙ+@X(8PRtj"BZY+4+ ?Ύ$W{88s̘߮7LPI/閪K2:^Og ;갲X-M.ߐo#׿{>A3Q' `&vҧgbK'dk'~:[^%7F$5l؍sgUs,K<=Lu+Yo+qFqߪn0aE|ƝUY2n~z,FDIfL*ujH(̅7; ?AJ8J[7潥e!SyM_|︋jdHQoP𗶻IWJi޻ĨyB߶؃F*^UW[9#Qy6ut|9Ʊ:{>5> ewEhUƨڒpqXݻaRd :WaP3%'UFu$?!z[IĬKsToO;Oȑ>~a fa9ԻlŪh1ר3n jׯ?\IgNU"mW+L\4P|c5:Z7Оs NwwlG7n74}:D/+a|#!3fo\DyA|9cPu͘ėi{+0'O#%Xkn>qvOP%XY)\/;59 O&3 *u+ٍQxwN=- #ww ᛅ%6Q$׋}q$:0{l %ks|浓s+䲩Ʈrr7.#d{̂~sF\9Ǝ>Z~voo[WN:9jUοs"@?gw?j"psy͈qUJvRyp{Xuq;:WՌ'~CBl,+ ucg)4bn# tBO[,=9%~+#]&w@P|lPr!M럨7Wkٽ5&\Q*_ǟꑉU%RH`sctw/ .t1!qt*]fJNiGm@ opʍ泯";1 IOOm}ޠ='ق'w?EgBJe\uWfou-=";D]rK\=5 7=FF)Cn`PQs03w̸=kȠoic6?l <6JfEjwMqF3}=n!ڤ<}vetw'8=rTMy!T ƈ%R؟(5sbc,ptY欱vǡG'@]!1bqj9HGuivrk~؁[#g;@9@얫44Ⱦ#ٕThV˾&!)2H9b9'#_|ʳ"^G;ZsRVA՘|c?4ru,, Wku%Yvtmƅ߽cP/X~48~{{_CeMyGF~`ZjC}r JPꀝ10 7jr=߰s6D'yErh)*"'G)ȑ,) q~vSyWLhuX $FC v|_8R>$HZjl3cD`)a~[, 6ٗJRE?CV߃ޅ qn6̕rgբ$N2S91s5AxN|>(gyN?A/<weIi7>/S̹̐|WLgSx"3Fc|E[6 I V7YY[NL?!ZY Ⱦ`yJ߱[ϼ;ķ_VK 8/]jʦ"Уg=+uC?bdK9kxϟζWwo<gpީY o:+M**MpD?ߕogjfmq%G=Z)oSrcači/>0q[Lgƌ||?St~zBA|&K-v+zX 1"LR\mjv:'(Ax8JRE'q#W7nT9)'5rTeο=%渥Jru'b\]?7u^e+׏N?1厴ҤYu+t.kgގ뽈wdF۔ٯ;17=Э6EQ؃!&#[eks>N32t\nwa#VX*2 dكs8VTjX3T?LJ+]9HVy'Ubq ߙ#X7U :"N謫sIs,AЦo' 98{ W0Q-z1\RVgbf,`] MoӌJtQ/'9'Q/c-bgTXLJӼs{=٤C{.A`r?@-Ye-=RDN4˕a 6ydɊ?"t7NQdF;5gwֵf7VT-{oPwyX]UZkC>U(WlAE}Ql'^UUQ7[nxߟ[xwRMYTl1{NlzƦg@őqF/JOö+\%]i(ef1~3bJOc6>n9(U?Nv.1k,uO 43[еIVU7Ҫ)]).+p~&gu.i63ҫm-˕se,:vb4`֢&I ZWQ) 7FEM3bO:bUZgZG`o9ϴqr$]cKvM!؟}x/w uוIϯݏ o5Gܘra•菵}cF誔B\r^ V|G,|m=t2ꚌJC\[QrvB+v )=0w̙5޽WJnfy7S$˦dC!վuSZI0*F}6ԚnC#_5rzl[/ͫJA)P[mY3Wg)`j9=2O8+OGdԮ+: .; | UIu[G7R g_$GVqjbG{L*̵9+9qsѽ:]N+WDJ1pf;vv-"lDu ,v聫ǝƹĈE󸁖+|^Ԧ%;-o$һzgnQfe 3(eHoj9䮦5Ez5S>r^/ E?;bJ1p|h =32nE_jd]dK":=kϥONn<kN{6.U7r6ׁ=Q<6\ZREHU'Od)S5tZOQqکy@~v=!R,SFwŌzгk FH}S\@u}$٢gkĹLz׼uXՒ"_An^gKqA锑^ֆ\׿H;D \Lf1zGݝgb%ezefI_E݉^y^g+ޱ$J4\~A$MȒtt_οw?p;Xc@ [ {tCȹ&XOZI<%Nٽn]1f/;|yQOd.&u/3O ugYiZ [sK{u 9Rfx2 #(R*t l;Ii,T:ZXfb*z%ewbTxs9Ɯ|>֋jN3"m|ın>AsϦ>{ho|bx%ioN#9?7{K?9Sܻ"_3*8QOeR.x#q5+zƂ$EGLlǻwDdg^L $uiՄ:'osvJ/W GѴ^g>O,\{1z:u$Ok'ggy?kelV<;y[NN{!r78?/;+HqWELݘgǴfVIaީ\u* e3A(ܥy&},ѹ>̺)˖#V泐?gHJii -wzS35wGg>_Bl?l\q\P֍;dyݟ\hy`Wfj>U6U|uZy][n8+%_{ eN8Vhk" aϹ F.T TFoIϭ 7V1~1#T>Q{'ڌ#H~y6L(taծiaFyjdt~3;5o]Z)K|BK,[/\S׭ XVjW^7lȮ:Ƅ|ku((sN2#<4ľwȵҰ|"ũ|D˴^gOwSȣQ]9;?E1Y܌Wd(9SƊ]}/8KW#V7YWyȞN,ٶ>Ua*@%K0eH y.`e23Bmmy}TTyX͓m3'@tnS<Ǡ7LxvŴtNN5eʄg4 =Dg 9/G=-G[G`Z'hS]LJ ol =9R0tqAqc:$`*.{ՙr~4I?GݧȾlr:KR 0nzՖZߚ##M^UWE>x)8XO~޾'7tp7+]~dN8TہtL(13 t>y .4fta=rz%X*T7+~31!|d-|FqfGſKpE_G"EsQa%ndSou ֦:D+"|":|bFȌ;[2Ձ;Gy_~sND̚UQ Q}i}JƜ~Ft3rQIQZ%zfBF V˝OXUSS[YCݨwRow~k|u%y3Ʃȴ]cBF \gsXNdʊTGH|-$F\nAe&5:4irIW]?ͷQmUzC3V 8ۄBNwGO %ᾞGfmW[|q~qyVV;5 r97~?=@ӊu eAmաBX83R((jѵxnD(MºS @>#;sUydG=cJ)IAS=m72RS zdv٭SwjAH+jMs[ewe ";:` lS'@w9y_AJ{ N[빨'G)$D!Ew7'm=74U^ћikcos9 :8Z2s=cPۣ\zfr-~9z7- 6³Ox@W|f+SDJ gTH-љ "z(JU3fb g3j\M+udB0;#Vt.g{f?qe_]͸1~;Z}|wMzctv,WtU Ƙ\gh)oS{Key7dSO>' nd0fUY,8KNݝ9qO}y{֕v\I_uyQSiUG:+8|m,3e='gwFu݇6$y2ڕZܘOQrwO&8\)sBI+g 6eʼnߨYCWsֻ^?  lVB5 ufGYK@נ-M:c#ƝTH~]qO="YlsZHyhV77 uwbE`.F/ٝ,jJYCN;/c5N,o?hVgDe)wiExP)8O9W9g)nz{?n|g9Ɍwyvm\J -Hu`V:+O:fp-J[Tsep9*ZYjA])+ Nn\ O;z+\? _0Xo{l2s7FC/W7,볦RrXyNV*_FK&!uiUn< v}g}U_ijhi3TF^Hy vJqaָƝsTO2gbu\nIe+Fۺ; ݝ>cmTƍ_ cmtQkU-5G| Ɂ9J*eD=DQ+`UR-Hcgerf]H97Q֊:zGopNҪZ>|sq(Jz0=o(?qn8AWO/ [XZ:䎩{ YrBlik=記6d73g!s.˝=5˅I1clayڿtP%RTnp%7m'g ݸq5 m}p*"V؜-Z+8N$nr^aFД٭ouZKe{QO`y*r7<;ye|6J2S:ӞN8F#,b~ED)\v>ؾmzƜozӊvq'yq:eЪ:wGG̡Tp$;9y r…27Is' OG^erw \L3ڽ:Ӽ#!ZŜOĊS^j&\ m}`T rYTR {kּ/G{E L-N5>9Ά:]fB*jI0_UV#|5՘ZC]WkiV:FK4a|xS`GޔzOTQ0#?;貞z뚵]gI5줨SnMv7 چjmXo:ftWg'`&Ċ) WkһWWbt-1CH:tqVT&Ii゜uJrẑ;,.L-t}zz"Qrwt?Ϭ(7bFK-Ju.GlYL1=6I_x_DVϹ6½^lpո:[>?w^S74S9Io_>lܢAJP:{zD~ͬklOHww:UYƲ£¯ >ՙb9?g~v@^uC|Dn*Ɋƻ9Uq&h9f-ނNgR\w΂S]/xArN`΁fbWKjuʆ|6Ѯ4ټ~qZϨ.Y/_d_9.(0 [X̡H?9cL]t|wU6)PzvY-#;ɫ4e/w35T3i=쵪vg&uí `y7Êe;0 w<3~i*3%K-5C_Tss4G7ľFu+f$Z>i*87hw;2:wY`+;YWVT)G[c< ar#iC'\3w$k1)5Sw;ζn<9sߌ8PRIgWˣzUfm}Y؅ ME-3;|pA~?U=n$}l!3`$ ZNY|p*޸܅r965*PoIAͅ.i7HN"ԾxxtO=;# 4ҞV)oRΓpEKn{yFGN1b#~VqJw143y#ߣ+!0 iU|14ErďH;|A>kʘM=QҧeDo%rN٤:^ʦeճ{>X>/>Jw%x>qmnH>go-|&]>3f4XAºyO])]H#ONqI}rTgYoQ:00PKi*1=|iDp ㉫'j~Zgqo38gkbg>x^RYAFs 9%T3`7 kL!yO6?9s(YEwQ_ 6իg߼I_߻ |gN 7ܲ\ɮ5.v-RFVOJGrUCO3P)I%w8SLk?Rɸk| Au?+BTkAM^2@+,T+,Կbeuii1SL< q_/&ZOz@\UBEeAz־ ݃&~:^]vQgަ֢<nh-z/Hslve'Y *|'S>Ͼ, vW ^`S. 酪&91%=>3lH'pQi|dzd#%@G*w=Koٕ}X1zCSFn-4+D jj+H`VUg5\u1BcL?%g # { 99%r}4),12%Vy6:Df8s6wg۠)wә1SYg<9z=!'jHVרѨkVwOd,:lzNY:cZ)'uǭnvg3_D7 ,99"OY:W+ &|#ߑqN ,}ZaqJ:#:T-1o>4P/B` rT6UXx}^ݫqC;θœXImNہzG7 k'}m5EtHbmKoQJ%Ф/`kzۄsqyorй&xw\Olv#F=ȷz59&)Tyuš#pCA=уxt1`E;Cs8b佩\@1#yqo^"̧HqvKm\]~:2MZ~"ގ0X G?y/]>&Ī_<"g`_2z_:}].DDkݍΣ^qE5aLv) TnBG5F)56˅w|a~߆g-#VxP%vJڽSi'3nmw*uF0t9S9dz?DǎS,YqyS> :Q \M:6I'UrխAQPǀ gPrߊuu%)5s+%W*\Qr5xN(qboYz37.3!|_]/PYxLn(jM&D.ԙUPuͺŽ>sŭu7[7f3Y7*mm<'瀒n/Xs,60Efwk71Ի_3wq|vMdK٭|l{2ȣ)oƮX݈tn^)>: yoTa!*ݳgػ>ɾ5f5K7OF'RTcb^~\lFuz)@xw{0R #P곫+f9FMN5764鵱bKƬr~B3 4lbT{PܙqZe' j#C)H> (*/z!qƤg/Ӽb؃$WwtyKfl8Q*a*e #€o *AfkeaCzŜfRTc%yOӂy1 61࿙:e䘮l"_WëϞɂ5-BnY 4EzNqA):ذ"_3βXH2ά~uCiZYwG7"ֺz(on }|uG_~09N(.1)Q_]zc = NqI(k#s8+yƹc=?7}C9ëhOvJopBZ+*^XG^U :Kuc9Sg,G4p׿t_He]ھᠷtЛH/W/&7Țj=woOP.~L]Vȴ)TLvOǓS-{@^'=~t9iQy%fsƤ4&E&v$8kH+Yx!0d]1$WZ3o} Eh:ޫGWFL/_tTPm&dmޑ$cڧō\]ki NO䑩A[ JGp\)|{]2sfs[Dzڢ{gxz!gUVL 9i7x'![禄yO 2YU߃{:êz+ FvҘ#Y?{lꔻw+V7r6"bfWF_'fdu rk;~?@d / MV}9\36''vtfyeT."ʶv#~[S+[뿘5CR\3 cЭ/UZ2PǎUDQWݛ^M!QGT~kvZf>顲e £;LbdۊH|+yUzP;Qb#hƨ~[rer^ȫJ(T=邿}P=\qژM;׮W0]%P]>n 5:UrQ3V+Nʰϣ>N+eu,i -Xɫ tQ6bF;c0k&cahO2*!Xjr~wӤۣ'P=>j&c ކ7B$h=OO<=`S_?=6sǧUD%ΐF!pFuv'gØU2fgc4r%3+Sk_&(۬{_u5rT7:e a,;xmO~7MG{jJR_LEp+sN%6s~gowyڜ+c5g@̂%ػ*#ąsD538#d@\rﹶ'RDYmO[5Fڙwн*74D'Uv3|+G z&v@ـ|W\zC΄J䜦8vt__qM^z]>>y2ĊvjgSlQjw/)Ϫ,_g7RHγ:Yk+]F\٩yX#s\y>QGm}ȕ݌JkAON4e49hz|X9+oĎV~;fߡ~;Sï~֪cs_4'y͜>Ocŏ)KxtfY֦p\(> 㻠ZsNޣ節;1#͜V{9w*XqZ8ĉYW*>k`h{fQPdh?s]Pϑh[ϳN,[\!{au=Ƃ{w;*Pg2-2O=;`sTOݡx|"#$JztZ}}Jê(0O5:mFux:\ =w5rݽ󉵱aF hz3oTߝmsP=#F7r`aZu |5n%6 P0MbOcÝyVf۟74Mv(sOk:ޡz{gl5odW݊.?|1x*4JTvC"ꃲ{D$iJΠ:\?vMFgxb԰+N;yęf|׎8K":C[rKoִUfe뾸)QJc4-kAFZj]̹k֋?慔bt{~mJDIGj{ :$L2iU8nCW3ZR^LG=tYW2۬c\1!/ gI&֫k'púfe?ZfvKyu'pbEطP=6sAKA?EOGQ %`r(TS~ya+]M9 _+xvIz:߈ 4rQWe{Jwƿu>)|c@L ^ͩASd}PLE)qcv`܉٬|6>ju|`V+"xr.{CrFr1d} "7qfYw7xsę e=| ; 0PSwS1%יٝ^M[|B˽9>]p_ו}G5Œ3ʊzXFk1Ӊ='*M'l-߀ߝ2mdV+ݺzTčIGGcՌcaQM؄qQzjJT~_Jt7.%N9v+߸s=Jc3W[jF 3ϾOֱƍW('b݁fͱLq\}VH/I'U49-_[cd!Lsm }8qjU[TfF5=>Go5XSz1vr{^?sMuFׯaG5aiΈ1. $O|fCy|wMzxk+ԧq=w9!_V}Wʺ{ڰw"#IңE`-fDO'ħYŪWጛ\Mi܂95soǎ阪}xϾI\<"#'RFк<\)1U5w1s3NnOxO{UK?9+3)69uQk9vO$>gɿ9ciҤ09sGo]QĞ|2Ʀxo@Ϭ썲5ܪט`9c٫q~]z3^hjwDbIjCb:ng@xv7 ]yF֙R-6Qĝ)_Qx*?ovEcvV{Fzhz0N8t\T}xԏ=h=3?)ዒn${N14F ofߩGx؛*P l>{QHɫ] ywfX]aVʊd\17nuYM/3e?sֆ|G<튙ΔiÔDP=߽z'Y)G[a/L.T|z 媆sV(yTU:ܝtӔFNnw{_ĻqN6GIuGWα>dmj 0s\:Ķ>17י;JɞݒtͲyaGu~+B)g̯gLY1v-*Nv_v_13HTnwF"7SVgw&"Uv~SV7}CoifE`ŮO43U5bt9)v +HU.ZėD+sY]P9KHJWV)ު)̠*cʚwabG3Jm|Zѹ6^gc7߸3rr^XV::y:<1%#m9'e8=HVuUw`7QVu:WY7&_rrAV̭qߝ5fՐs-T9Y)H;Żn^ u߯ϑVZ(} a~3?m5c"-#e6鴹Iv:x![nvy+1wnVAzz̑⮊:;c2OUX#UA( 07Rka:>߅YEesn;eBҟ'jZjCs&]=o¯ ~s}Fj&`P=\._:3fe|}<ȕVz`J)vsQ}z+%n;UofW~ J+N=`K?nvֺ}r ] ʜ7qJݨDΏ<XkU-kg76BIQ2Ot@s|C|/1+LQkFX-cbM5뽪S1WKI+"~Zc=HP=sg9?CoDŽʦ*k z w]M o,fgQ~E5 1…`_I}#&;*RXև 3FE9fj~@g]=rvZJmUٓ%e`Vņ$({g6w9RSN&!^lOTRӗl]J[}]֘@ 2rQ|ÆY+ 3S'UvP\ .1,E%Vx7捿N r䲩uvpOn=A)HJ;{ w}*S JWV+sHHS%5ª4OAuXuN}1n]i ,%r,,r/{ :PUo7hƭTvthkć*qcJ8?hl.29)!iL}Y`=_BGI"(;Vfec=Ϻl'Ą*Iw)N昆lLIÙ5RoZ7I+R-yfw`:XWOԕ*m(W wrb J.S{+:M\J^iL{=UFա)Q&3Ƭ3h5opcQO'Tľ`$9Sg9N_jvvþ};,HQ!NFZfҘg'67A${%o*;nTߡNEbNgtAPn%Эr 0Ų<quzW'pZhf&Qǿ;CS,)4Ck¯fs)js6:kDti䟜y><0r|Gi6OUXD|_W8aZ#3qK%KU+ /iy<AYVJ;gxp;YSF(>OՃ ^z埜` ԓ;tV sw2y5K7 W=]vn?cO wM̎.[?-^lGYBoƐ}ZSl#\7ȱEI#;q"}:2cG%R٦ Y9cSԪ }~0a鑣r싼,*ifiΜ1{~U+?Wt=6#ƝY6*aDbJq[c'Fn9ZXjO;j¥y+o_NԣҠJVc9sr͹Ӫ4:ks[!/ح؃ ~AYL#4SKںjq]zڒ7ߩ@ԿeC[w] R+J/? ;f*TWJ?ʮK{L9NE\^EzבQ2&,8bZ}h&*FIVYoH}^IզFΦC%Guk]ڧ`)sdz jw_P?ǕwҲ5VZqVFfĐꥪV[姽5R)S%'5YeGL ($BC.ĺqTu}(9sX,=p\-r\4 \'iڪYSčɴE}8.(G4rFlz67z[M]ôѿD)jzX^xW^'39>yXp3b}JVU):g` S6̓I0%X{+j"m)lëcHpvLU7rqWG}i?8=0#?:q|lƽ#VkVVjoʼn`EJ5yW{{A.T,u4==g[>hb7SJbkW H;9 T8rVIZZd-L.f*Xw݊L 2>9l̯PD+Y+j6qf޸,D׎?I9Vﺨi]Iװ)̴1o#G%s=ɒDcSSoVܜ6erBBEr1`_>??[R?IT2M%uv3fӮzΡb= |X?sٺ ם i;lq`I9ks^CL>9~7rv_Īw8]*+|Ym@)Y;6%ըr^};nJx5A9O̜5lVGwJ(gz\ ElFW_??D'+*0)PzkK|"cĽ%~s_I+~d*tgK̗Beo-"w9PTm_4tӛl9<oJq<Gg]=_ىdž|mj(efE}Yq)=}$ݾ1-eXuk{pv4z-?= yQȑ”ƭ:PsZݤdg=w͝qG/1r9'p~.r%wX}WߎivZ4ę=֋-N:nOLKŌ3;nJ)Z*3gzk~#4pLB$_u}L;T_M|gtA`/^bRLR;"=Cʠ*7?k' HD$:9k{pG稞"{~67rR)a&& OW&BѧX*5q _UggT2ƉYM3(TĄ(Coq?gZ)s֙3Op&Rf끪ZU[X׬gGʻW~Z9W!S)չK#i;]?Ӯ4F^l{ws\q,sY|;sjq8тc^HyqV?1)%`) N:T faa:a{LR()Pxg?<S)%c."{sq;95iU(UX~Vdžc!XGT0~>q{a֧Mdw!)uD#g`-&6t ֖ ,%X*l Pk^i$kD3)y|Z8Cߟ;:c+4n_r `ͮ~V&}VMj3s.{ f/MotVqUNo}_^.9.̈NzֆJ+<9 ;(ϐV~8rx4߾Y2c)l(1E^+ΑPRyh}r1:uUj'q?3-k-K?n@Tמ]E$]94%@M%wt].`YO8'~b92YSEFy`誒\=2`o}عZSV hL+.I||Njˎ)'=b,vRj*i+ UkuZx]5MkAD8a X6# :PWsު9f?j_tbF#";kZdZ8}qqęc\20mm?*{o>*1M1bWT|<=vr&He,OeΒeS_.|i b{?s!ܠ> s3s%aP&Pf KI} 2+k {R%Tu`?ʑ^W{pM Z-j6ZkC ιo%Av]CِmKgvO;q G=>Q-qTϥUY^nwнzN|b-}NfM3-k]՜̜ZOӸrt܇k!@IAZGW/$H<ٽ MۧOc8:,uQ(eɆ)wp0OZ{Cb"&W^5gn^V)wc޽gJvP.7G'`ށzI>qc5t ؕPׁUjՙym*m։'4wΉ4'ru$~'XQQz[1߹|“R,SVad*uܨ5\b> [P;]Pm9P{r*!3fT5;?9T_izN:FD_VFf\Z11/n/3ctw'?ɝ3,xHj y{omjtԑb~;=Fkf"b ox:f w*oTaDA(w 9چFIV"p]DG RTO}zN] ڌ69Úz1<}A0?v{iIOd;I[G̪r"=9;*z63369b \M,QϹ1Fw{?+‡ߩ>DŽÓ;C?DBwLU+%Nޖڗ/4Ž:ɗz/̖W2XV([D"F54~gfusi]c/3~v?Jʨ{03/kޣ9OL꼪ZԙO3\yLi]L{luli:,@gaBZJc1׳&'=ǩf>'+5"ÎJ>3f|W|cNwﱎB.ٞqڪ Um hҋu#PC{cX6Օݩf{@m1U(:VUuT$UVn緬6:dꀱ՟Ƥ/|be1j4,0sA,0kee ke3 }qAi#zU|+)'CɜS{5VV Wt {B[#>"=nXyÌQP AqZ} =w uϨx;[GUWotj-= fðG߳\#L|@3nw;:oJ.J%>#]-۬*\yŽ TtK+=jJ_v3=k wɗUߕymcO31UGUYu_;*8C&:tbn= {ݩܘpbpvF ;㉳On4S]7o]n ځj\1{&\HQ ?qv{CW!P,?C]mX/A97gTg'4x gtDEۿ+|9jX|L]}R婕eM+nXWd*->(9&Y7sFTuPVZjJhՁ0{L閑~))8$c٫'3.xgS٫P/"Ǖ(j1LaMU7pqON}9FޣLymD)%ĎC?k;C/"JWe!U?t#wʗP97ӕYW.w݌`PB] O#QY=ڪUk9fn7P*5B[H4Qqz컯BUɊ<"''RFVyYŬAʕh'}LA)i*eQڗ =ꯌK<a ~R=tQw' z-7X5IU{uN< t y:7oݘ35 U3hIMqL1X@ǚ/㫎n>'+lkB/X։0 0'*M943kjwhTU}U{.{C_̓uf{,w :8a?g`z&XUYn[L fL0v;w\]ZΫwdGzoYA:UYnu=.We{ܕmOjs?SGխrK?p^PNUkMNr\.C_W?ɱ]vc=|Ґ5)ܩaM5v*kpdws2 sݿ% ר3zt}0 .EI G?IcAvAcP0u[ʱlog㺎fg75E:0> -~׃Ԥ GSߕ[z#`b[=o8+f$,g:|nӪU̯OiF0Gi/2uuJ9+{b*@Xw"dgP?nH;=u۸P?Jױb<êrQQrɋ+kuv`'ndk?V+pg|'X(럍<2V}٬vs받neU=ѓ+121hFrɫ)oyљ`TDUd:XdUi:fi}ս1 =s8n|"SfGuދqE;3 + ;֘_"JT\R>:z [*F:X-ґP~ߒ<%% bC>qv/mBLWTbF.8٧95aG%?9k"nT1) uG'tmv Zl6jD9~Wg+Fk,"5^⩀R9c=2kY_G$FЖ}B8cVڽt|)MߵwMAևUQr} uGBF ~YhD|e-^4+:w[oٕ:sX&q-Ų6%ij-{3ow+m6g+uRE$lM8rSƏoCf*z8|BB#'j8kHˁ.ģ]X|V\sʱ ghNcu'[s6s"ǿ}w-^ MŒ ͓FvzFǔ:7wt@mEj0\>9^F]/;s0+Z%g2<z3+?jh՛OĢ1~"gS=LvjjQ}ѷ= }ڷK֊+V,{i @Vhka(Ttp;g7<Dž5Ɋⱎ;w\Ukݽٽq8{GMR!,״t_ՅVv7~6vwʛu_u}֝(i&[ecw{-?{5o)g<Ud^nkdٴ\S7QP GB%X =rUxTkaiJk8͆`X`=TZE慔XV3o+H1mCKoҔ"V(cSx+;TDj嚚bDj-쇔߾aJRL=ݥ RGzeoJdeǖc>x-3+OTw,6tl74v3R?a4o_uı(cf=oߋ#+oW{*AL:}Ns^" #Ό_:X(^;b}nK_2PlF8).;uE=d(W3rJt|֨U~Zqŭ5;*}T/rčI/xsw[bvc=yrAFhoꢪ9~ g&s&Μ6!z0yTW˄cg壚o}'3LHa7]'Eao;oO2 dKUWn;ᷧ0.6Ec9^Yok|mXW긻neZܷf 1U=UJry29{eW V]c3}%tݨeVtʧFh7cì9.dYT+g[\V0{s7[1\V*j3?u3Q|V=M8|y_Ѓӻya+{N+#9y-Q u|f9)xHi9j517w/-¤ՏJk=m?bXϖ*~%U89 0PmSuC\)Tj֛7o8 YC&ӑqɔdѨN% at|ZeΊk*ij%Wz!_h2\kzHupD9+1e.ȱEM58ӝɽN1}%G;yy V`Uaeu)֕r>|%ډk%tPkTۧ7sjvӌNH,O\Kzv_{)]蔭7fW09 uߩ?ǎV&,G ])՞8' =8{UcFtN>:|p㗙#eXb m ňtqX#qLE%c\Tj">.Ùx׉HcdY\mo8u *ZJxYEesVZm'1rV-c 6.q؁U5lY i嬴?3'rCɌGߠ9{3%eh3{ ng7fN lΞY gh3q+f:y?5禀<}F'pEK`ʄgd]'Nѻ=~ڂ H9L{UЃ^z]bi:9h/rO[I:䌞Ce 7ι}X43I\CQV͚>3iۇ6@8qeXqX :x9oy #ѵR~gMotApr9oSo{S&S!NDRfnfw;{9#9I+?K$'ZeGݝcg ~LCVKedRcSx^%!GO"ثcŻE1nۇCU6M\y>^iz,_G;ߍ|9V]z9UYd_+srw}8|~[91 ێ4++)]>O* Ja{h)RU gߩ);g~MIUFEdсW~9duZ5{}Tkξݲϙa蜬vo4e; i nIfB nKHJ>jMnN^n~Zӎ5oWfw~3g_d_Ic{)lFv5?G=ӭd>ٹU TՃmy4`~,3,Dg!tѻ>ПO \Lu+YrQs}'Sݢu ʵUگUBx${wdӦSL2orQO2m9枬•i5<{jzT'N^*C7r*WKǗv.cH"{n8}:jydS]#I@ΖW%\>v% *}v@!߸7KjLK7^՝:: $k̯oG.:YZ}.WF&'HL6>J!K,97x! ̷ur=WaSN|9im,v ŚC*Hy5IŧMG~6Wr(;,]݅~g('dwhЂ =SXb|.+KFIVw)3\y pomt *1_tV7D/8ɸbčʕB$+Ǩyf6V#Ik qoeJaut2ot{)G#{fq9vtXsQ _e;h;D,'XO<6y_W1QfR/j7ؽ$IG5Nхu]4-f)_P3о7wBO7E D>^z?Z9miO?7$YS}wH7Nw_/ꄬun0uҬƽӹxցuԛȮaEamK6u+zKBO~0uܻzk}-[W%/C;h]WUxJ`8 jwɮO8b/)U'g}vγwg*L/CpF ؼe+XO3lH3PRsl;]S7CTIY~-nhϋЬ6a++3jp`e~SEuuM￝Ԗd=L1\t`.ڼܰ_hZ5tzX%+?Gu83ݱu͌'*]98g.iW6aU+KuJdՁ7nNz>Úq$ Oz wYe*3ZԵ{ddءǿv,Ĺ]2w]}cވ?f}29ƪŅ۱M#1s7鮺d7 c fYg-A2b2T ͪdIʬyLB)*2X5Pv+F{=:p>Qnbdo w+ g0F5=+6*J0?](z[?6x̃(ۨ;v?wevŜGT檖Q>Y\!Txmw͍DQ?!P1h GóF5^smVwZ]ck/%7PN v?cz]Z٣|6{gg'Ƃ{4 =lufײ&ebi8]Hlɜ|mcYq.fœ]uK-^lzyemu3$B "E,ј񧦝5kͻ!Ngx͂bvyQ T/i_i33ukN٩EbX][i3_ɆTvA3L_[{\ nL^/蕹 hM:VzJ^^/{j͋3Vz53/ޔ\}Kٗ:u>뀛_MMuu";jvƺQM9B~ϪW:vOT ړVq+XM&RGIC/WѰLg:6C!dMk޽E\\T9w XRmukZUwoFcc$7US ef #/FrdK7{aWf~zv;ldSjPeo~TKD̝vG/DtsOxmX~Yg*ǣxd}njV{q$lYEa/WE""y_3\(ƞU~W5x'es'<;'!d5סsඊX/\vyt e @aūh3gt| jnFfL% dCDXa (a=x2KYv-%U)>/oЎ'isV %@7̜7b}Xɺv@ W%v$oSQ);)e:;\,Z/[ ׏﬩\/nۗټ~ח_{3m'!+$q{89_{.:7@=K'w_tˊYJD9yur*?۾Ή6xm63=߆RƸ_[i6}wm'VV5(w|x~-d#{1g՚V}Y$ j~ZfBt53xCVqJɹ3p DyK.߭Co8_ve{ZĻ⮖|ػ\Wd؏zͮg𿞝oU m|bX|"h+B)[+Y鍱8'*WwSݕ^6}}'s_FsX3 6b0I>4~ 1T~-sIr^MYO\Wۛbƨ6V;S|5efJ= yus09s+\.֛\K_C[pOW(GJIEp2ug.;5o@1DJvdٲ?OZlב’vZC *e=,5s qwooY]cEuBbb]ңB-YSRRL)K -{ EmsxeUeeڭ-f3+)>ӷq#fᬎxG52z&.S@6nڇBMݯYb r\t~>7aRԎNW&<3ܱZO7<ݫꨊ.jxŧU{$\*ynz1EZq>ufG*ɝ;dVǮc7û7DR>ʗ,1ϧe,F){`UzC5%gm:Ȇ9{#Nn31#'Yk{~7܎LM_]U~ml)?&_}\Iw9@\xlׅY\(Gѡ#Uլݽgu2'jvF,dZp5!Gw7T&^߹짨wDߠww/]Wܪ,kqw<С[(fDCڃBru[z]`0,ef9s8)B|Z1ɩ'CC__̾.ϧӿ^]{.s̱whB4a9uy+v ycvgЬ{&47 2O֝<`/,ѹVld;[W7m0{f`=C3T*y)+z{{-hhMCk-\GIJe-V:c`'a zb3qysT' j78 }ߎ ԩ:Gg9f΁s@PygecEU\6$d߬նJG?1 &׊bQJ=we%+-ר>_߽ e3B<1uΣ}2aݩ2@V^77V1Sαc!ۈ1Ǡ=>QI\6u>{ܠUᘦ<JF 3Z\ǔuP%sS/o˘[ Ţk0^FM`oJ{̵۬f.5oTl *"Z|ݨ+~n"p'H"NF)vJ$ P_ν:EWdB,+뮷.{?Y1|X2q73sΦ΢e1Flodj*d/+}_;fX%|3NB|]/掹Hz_Vd:.jL%eyr<: f/w`o0.θ@0Atlׅ.q/7׬;XęC<b54n9Ug<׿oc䔐*oI&mo{?UY7[VßIoMۅNB<} PkAw#r|eGWi)q`~q2ϕN1n8Ys̚bc!0x<9VxϢg;` WJJVJb6o=qcE}8淛UǝJ?Ow,w' %1gr!FޯYI_e(0󑾈aq\OOMKGTYKT7ttݹ(̎lU[^U*"\;wֻd vQ^Gw {oj*".́*}xm5}aǪ8fm~كj\ r3sD)Md+uOԆ;ԕKj\ /澺F@mU3:_[Vto7OJ1+U&哓OAg i^J+ZӯSM֥*ךT:D"}#GNgʪ[޻$eFR aK{66n@,nfi ecn2j⚽,dj޻=c(X:Y0~k0 >NVz>6ۋYok}Y>c\5;JTEV]䇔6~]TV:ynGg9nQ^nXA٬s=\݇M * KϢKw@*u޿n7Dv\J8UVCM<{vy>JWgŨ'jяhUd"98m^t\6\0[Fl+mp=ʜR81ϙYJש{DߙSM~=~[pe+`3︪^3NU"=F_qD%#JZXdסsY^ǻZS/?赳$Yψ`֍,yBĞV <H̓gTJnG-ZG63q4|_g35Xu Ձ T_ʷ ,kF8_hqY>;n2TyV~nʸ_띡ϲ_ A6+zot٬2kj*0Ub*Y9g瑌ob3ͼSnxoXZ=O&1VpZP:XX_t$&s0hSt\T;eV³mx41>>/YfWFnu@ߩRл,tex{Iu]̐BV(^ k|Pj.aҖ;A +h-'#_eh#ɄFSe8± u]$ʌεm^9C*P~nͪn,ZFevUv]o >}<(c{E=m~^^{Z~n"+ ;_eV#ouI8zɉ|Cy+e/dխ-Wxsu{ 9gdϹvq؟Jhw!z_ƨe=3BHSם`J|t曙/(nsh_VEĪa0rUZk4Y{߼cCPV|J#W̷6Cf9}A]1=3H!bv #1^x9ޣھd$[4z)كu/Ǣ6")qgDf~w;l6#|XyE`wsF5xyh@ЎҘg\ve`o6;B-duA`< ~JcV"hANoDzj|_oEwAnSn.xb3ϼftovό՟ei﨟4<Ԅ3v]Agy̳sVsկxQr)*_l+zm7{8¹Bnw_/+{:MPf~mB? Ȟ ~3F="X ]]'3{17gJw;*bg7_$s"i v݆3ɜb{:~n6)-lzao- =A+5s3l3Xe#]C qDZ,;78p[g#NzVe;t1Y?k: iɿ);hQ$cj4""Q[o¹B3ghwRv8Kv< 4>>/dofNsoTTM`8V)VL˻'z~3nvX78Je&ﮈ};ܽD]j6ٞbߵ)+:~7~3W'j9X=<<`57%!Z&r5}{P8S7B{PTXeE*F{C$d%˿2r]Q)k(P;kdH3sC[jRtL\.?!WVJtJKM ]16 &yk3COG#_j{ 0eؙsQ +}knb~_-.^EXȪcVx#VVuJ3ggb "9Tl s*w ȯ8^fyjA)"!v1O\'x Mw%#Ơ@qvzNJqPy Qr5dc2ºdzy>M^ Z& וnN_gzނp.ޜ?h1fƐtOW'Gfw3L9}]|ݕ Wm,KeQ}C$w \i+71a.EP{F [uj^nj+> V} SŻL^cؕ~^"^<16F2-}qb&K59媃~ 3ǜ9dּ;~VP_cGLje'Ey,Y!ִfot0Q1%[bTy_w)/c Jr9d.sCu=1nD iuV޳3+Q d3NŽ\TLJ(JhYf,ɢp|Wtс<$;/2VhGMfnЮ(R k(ziqlCs*gtu\ǡ WV(:;ū1Yma=)?[*þKB6ʌg†hdɋ1ffDYqo3yKx|@`S)Y{b+z;Nd S6 :+^Ω0g^ٹgnkL%0ś`1sq$}+(;˼]&V#jw<^? 1s~? ^1V4'Vlj;"_^U ip&' c^d WHԈӒ0!J𻲩l9Fn Q$+ |4Tc>"?3:xjr}Yrio}>c3T̀3'KyL)YkyE7!c#rxjd nxy7厥W3.2*f wb93X\w0(socngx5*wʌ!:2V3z,[dx&㕸GmѿT679bKXj+]~ g//lSR=;Ntvl}S=V%{\+h8zN$v<3M49r@VXyh2aIVڠ?I;q ,#ފu\~g=8;ӛG-w@b&Q 3"U]Y&6,]õWt`Ģg<ۗA<\}6Q<1D5|Q3lR3:aGV#n .{LGsdbF,Gus}ݜFebuSXMhglm' j{ȃxu2+lY|t?%j*nYu|hVEf1V?n抾txf4yk~[䫗G dv+a/q٬'3OE}m` 0k|]qwMBNU~md#Fw?t_`\"s-C~PmYff2# @7^ dC%{nF?L&bV{̓u:YZԖNuиj: [2.c7V}QS:Ӽ\!gDw!Me.jU9qx^ы[2.}<8.tJO*+:RbNOQ,+Y?aޭx0Bg,OtYA' osQkEX ge+=>[5  ħe,VFCP9dnk?!V{t>4/þ֩!:>P?)VYyq=8tߙ ً,TN|zS 7`qdeNE_ZJIJKr=襽G2SNof9,nIwF5DC#4ցε ~gOڨĭ@Be)据 " g2*o4?z/XO 0+|3 - {16 oQ)kkEetXz5Du\7lӦe<1{Ss[wTTCCi8 i3PdweLN[kvR2hti]P@? c#hbb1'y7 =l>ڷA2uV;/G|S³6#nc,17cx0>컗>f)w#{vo2nHW73٫Q4?Eڭ̻҇};osC'W:NxzQǜ9dm'V~2SE+6f0{P 都AjLV[vxG|mcYvD 3Q皪װ/Ur4YccCڲf5F0?XKVԮzB=vP%*ywowsV毘ϝlcD`2/}G=x=-eqW'"sc:̈́xXi4ĔzFoJ&e߽o3k\s3ΘP<֡$p]Υs["W I6YstD{63O031vZ5N넷27^}mWveE'+6\uyIf:goyZ#'b*Z&/p O*TM Ӌ70TkY);:gU}d&4sA0>Ê{ |p4!3rprNq=s1p,JuX^Xdb)O 52zKxúGes2C~:!]DRWQZҌE.?̓$;Db6 YznHNmaCL]fb=(:Oe%M#<=g9bl`Yl` l:ԟX:5o[7?Cb*e-VJAeq236F/ʗ+W&6紩Ρ>N;Mof6Jt$҇e@5w#x઴)**T,WD}qmz2'5sq4{~:$ĵϕO32 A-Vbq'۲ރՈyX:p4x?c*N;OPs=Blpx٬p]b3zȻzFsx2eֲ g3{=cGnYycύHnam<|S16EF7 kVA8 %;Uف &9+kn_s%κ@EoWb=sWw{Cr;dJ_K[1~vĤd$fQ,e/ ^*k^l;6+D̅yվ_zt֛H3υ\љߐ&@yzd3f~kb`xp[*Š5rWǢ݊9L>yoT cvy{VٲLZ2?F/n!,:ZTN ]a1Vf <ܲNQJ9b}3[1 w/+n{&sXNCִe<̓1je'wZ^QekF>h'YD8ns=N,z<_g>3gʿN㰽ޑ 5;||$aAV^rv"4 +_ƟZiSlf_*ħe鮂noZ #NVx|^i4Fo`E6Lt㩊>uqV.5GʻfL ٨'Gf}GQ);NpAV%j1hu#)x],FUEt>ݼ٨R=+tt?^U ? XrJ@ى5ϊ[> .쪊qqZQԛ1g .\NdBaq F?f**gA>q$d5#V=aGe_ɜ` 97<̼͞$m!o=ۘ?̆36Un@TOz[SnF'9 ~iV29>=߰kXDC d9 +ZOLR1B^?9e-L4ܟ~pQ {6Q)8Vx_V\bӍ B fY.խd/3g9Yݔe4qO/:4ז/ZՆ25g=^x,q&d)H O'07:jRO:Zd೪~޳{X|*,?=~ܵY ? !+H^ajkA3c.D}VzTf|`{x_qk*'7bL{Q'Vd ˍ:~HOEuWIO3f/jV}L߯h8^sS{b?s>z䓐wwhUMgz&W( tF/S]u֘]2=:ϧ/r2 U8ߕ(DUR ȶ.Ɯ'Z V؝XF:n0gݳ~|<|Yaj ںsFWrĈfƓqsoT*򬆳V"zYyʘ~(o1cEu^ĝu+ nY/6tswHĹd o*$PXJ9g-}_ Ugwu/ʟ^{2Yw#ݛ/ܬ r)my(+Yc.<3rs֕x~ǺCrJ@*R: \GrЩ=܇e3{ȪEWm"f3ϬX[olFzsszQ) vcn=,0.mYլ$MW7 s *nPNbn KwVX \@xw'cPn'swT? vƩK[WŘ搅+ׯCzF-{,zǂqwj\3†y=:!ĐA^̋=W뤪0W}w8SsWL_zw>QP?vS3T;2ٓopcnݲ[2/y! B=bؙy^SF-#J cQWvY̚v2" ;&=Y'6Y!hj~oe56B|Cy̫0/i,bN&3ZdT?z鸅1<\-ʵr\oGNoV*c mcbT~£fnUݯCxL i*Bڼ0)#NfxWv:eޯY?؄4挊Ux7~#01`lxun@z5:_"NF窟07{oR3q7m1GT֞"n[tz5+us~mK`Ƞc)sqϏOBVm fpQɃ7:~ WѧRbof2 x`b )0/^.022h[Լođȟuܵӛ+;2ƘWE|pRtc}\*Ъv$]84h."숝2UC'LLpfz0wzs̜"޸=)Tdî2E1WZ+]W~ sN>l#1k13 ^t[[萛3j1$Q_f#@mZB N]I/y4Y-D̡~3c1U~ndʌT; Mx!)Ud-" z|>6aKw"Nƴ|F;.D7q>DzUj ,q1Aj YCR6hQ|Uܓ u[QZ&b;+̇\5gm$K xoS)GQ~>/mc5N-V=qߩv'۟ԅ^$dnss}Lߩ n/vRmu]NuryZMaD#"oޫ8G_QcANY$9eFuϠm}>k͊^Iz]pPT?~WUnS!˭\:Qnr ;H㲠in+{oY鵉D]61ٟp٢sZ=>q3TۨG2S4 gF̴ ego, 2NV. b6 .НA=llSϲek;pߦT>Z4N=+ Y1`N˃27[1V[W7HZ3Z=W)ɢ p%s@UJPuO:qsWD#k qSdױ2=G6\/ǽT2ECyWe!kxC\AjXqce d33[UăL뻁x6|t:t&s2!U|Cz [$d^N}en7lt_`2ƘŃHЎa.uטsesz[l޸ u0T6wSYu`aWR fFVnkg3+YeuϷ9%+nʎ v=\O]1`NnR40V懱DUlVy*E#N (;@>_2n@rد%32};@cky4gFry]y>92h&/'h͉#{uhMC8V֬ԦFMIa"cq_#NƴY=m^Pf3è*T;XVbJj:)&5v iӛ^;"*XN2N jk^*3R4nWeFJ{Vd&&ʟ n33oC7f}kY+mz %+ʮ]b\Zjͨab->cZ 3wtGߚ~$fF٠7TxPL;;p3کzŰd#VM-zx"!7 idŜ3[lT$5"XV2.4Ǯ7$n|>Hv}\i:IT2!%Z׎\Vߍ 04#-w9lԉľ6͵'c[?:Nc)";jEvɼe%1}]5k:wB9;2cھQtO72|wAa[YʹuU7}{-gF=?|u3f)iok\ +o ,:s*3T]iV("&֩4/O >0<uSV?M?uP6wUM&=ہb++agLw N3XQڮ]:MN|ۗ:?Ye6#_(^5qyrǾhHq*6f5I9>G1eeUd : ȨʬV+3TTeY¯X\_Q6{w+mnVb5yR;'"9T/+_N| x/3 Fq矈ϋۈ7|3ͣ rϦ.'=/1E,_eȮfF.7*˺Ŏ>;ONw/Nup K},Ckך_M/9/Q?ݗlTiϧl{?+N7eީuɱ=@o6b3裫9wwDY2/g}m_֥Ⱥx63̱SdPL '!X^jtߓOk0㿸&+pwJiFSy6@myGqӳ:c1ѳdVeN`Cg}eՎ9ɖ 8uĺb|,lsYP]eΑW]qdtP ة鷊%8zAmyL^㓵/|Gs1?3CE݄ϊW:(3{Je'-V}ȞUj#~OU^pJn~ BdT1>O6,ķ;,Cf=+f-2q |!*#G9*yfY]p"u}dlc? avnOBV=7wonc0'a,uqcY W<9Inw*uYW4 n>ᓍةbߔt=3\tڄoZE"b`LON<>ӿW\LiдM`Vc>1zIM2bMa}sB*_+5ТXT;G<]bdj *F 5^ޣw=`RocśYDM YVx`GEw'*bt{ɮTz GkCz&w9_YT& \:@ج2d2{LU'y3UVIMj]ZiK7 .sP\Vu}.͌gı7r`e71gW 0?e?']U<30Ձ>BVF2JEܽ־$ŒDf'qsYORSl沶wr1x̋14>P5`>O%g s/d<|ϿF??H;edw pH,.ff=;6^i33pPg7Uã/!PqgFN5.U:| W|3-|ab5q3e6u~Ƶ9E%#/7oVܹk_iSCƎ׊Q){L'1(^63nwX2ë U,dף.v^#\ֱ\o}:'*q<ٯ^չpt6J*Pu.\ʧ=) k5N,bf xsՃV,5 =3r9K~xe2HGjßקbTje\mLx7.W)8C5qqgƕq7dsm;NuxQy,/#!&db"*eWPC8it f2Fa$b& {̦NXK;jNq^b5ƫ:gUi΁ ld2[m R|լyB<]z`qwCrfסjeT]=YDx8BRq?837}?*FדCLIL+D;7C\ #b"$+ZzaT_?g C}]G.~t5&y-PB<>gZ_?OT,8aƀ2MHvrdRdoHynR\V+^jV*'䮸P;ϔ.[ a1ϤN%Lǹ"{W~:g 2RѱϱCh~-=z8m~;/@O |9dUcs^cuv ԰+ԠPDF qz?fⅵتs>C[Fk1;1TwF,+gb2™}[A_+`2 թ"Ʒ_ns(zW@e+WU[f\fպˏ}骺UiwUW9H4(ʯGo1yls'h:VP Id:2W7 w~>=_qmK:"W$vwvÄH} tH;#Y i9\~u\v/y"i63btb 0ٝByh~_fY̊d|[5HnX{8q_'WNWL Q&5O[O@ӀcEkōLͤ*?yp80㼘i;E@G 7jPs\Naf'={n~oy,%Y[1zb% /32 ItYc='$+ ֞p|Τ;h-|*Ny]z+ĕjWS:^Kj4^spֻɎ;M?:/uKYvoWŵQ.F ̈$~91I8wx*kVQ#Z *ig}~Ǯc&+v~[7]'Y_Isoa?94ؼE %2k>qRU;ǝRdy^oL's8̵M^L[5h'cReF^ m(k4ڊLM^`Ɇf3ꑐ®rF~)<K2#3ƺ+gr@;|G$l7t>_w3nΨ\mmGE#\3+$s_CSy  |0u#̘ qs+:8>`=vؔY$2W)J;nXby2üG卿NoF-{cq5|ox>KZcW|1JD݈ ]I'3|{ІwwCĩPD5rћ,ez}U&ž6RϕxIC5)m夠wd_5sߴi=fVs \MkUA(\~X'߻NP2.M#F;3֌zptݬ;" w#$0Ovo]? }NDŽ3r+=jWz*fqJ%]?i#13ۊw6e:t>&sE;uQT a=ko+q<\Gpi)b,8c_ RxW׿ܯ83C/jeWvz#ɑ,+?Yىhq0_9|of?+:ˇ/'Y=g88Q5'giR΋dYQ]cdU&3תg=qd)8t *f9"3#PMe̪Rd"e@z Tژ_y[8q~c[+ԟNvo{fޖ9{O:"MP9<̨6޸Je7,05-=~i@Ng7VZ@9=~>mf$^ɟ M3;nx?$T̼ Vw9;ƕ*pFoDQ3Wwx' /6؏Y^J7!bk.irӏ㞷QO=ܙŎu`6/'Մ/' ȌC9 ~C b}\i+~=SfHc u[&uκ<]: E?Bְm v[rݲk߬`] ,c包cαȯQn>_%<2V-NR~\̕j5|=0;~8p%4ްNjMADf8Z=Xb& Ҙ>Y+F7>t\\nOV7pM_1)~7qϤd#Fَl$/OgJ(Ԗŕ~2s(9k 7~gWQ\Nxm. {25;o5j8;D))L^>v[S=6>cuW głJJo6[*\i1vl9YcL[;׆̈́:3dW7$ßQ<ȁ+Qۿ'DrbKso>F>m:ԁ~gr o\_eӜsM,\|_\Q~Dxs?6VD;/+x2. ۀ֡OZ<->߸~1.udYۉK1,2r`6/򻭃YدrxC|f޹n'Kl5:Q`Fy3WxƘ}<3#/Ub-Ao'/y'z/Κw29ie]cʏWފOlܯU:gV㖕Pf wr+@h_9o&VXnm6CNV3aw/"\gb3xǫ"~:z&Θ76X8e2﹚An<+OBf5xL-.uւP{ lnox;1fkMVPN9x^綫ASVX,武Z޻1Wt=)O+T1>p2*7sq3gP{߽ hYNwXwⓤEju!g{zc}ggw{..Eep}5FBp\?ñ2y̳\i!%_7YCKǏrxT _'W~ }89cd k,Kj vH3`r/"eQ7ujbY mZ BmUqt~F޾b@&2EDTխEc.nq $ߦRZ8x`^O2/QlT^97<8hLVcy{?Yk1:j+@"#zRIVeު亭ou YLgގ/s%+VV0/p mg4ٸռVCׂ,]f [Sn>nt*\i]٩Z]cp^OMWL֋!U߾ aho=nɇP1~W6gF|32#ȍEOo|WE?L/\ڴs VJMq.xk6A%Ō<j\c!v&/*psx+!q{|FĜ=^=/yX5 L'RgTtbRw<řf׻6?759E/0_f]<.J\VQK~y Ľ ҁ"vWih\0>ef$ZV(Y{o2Ǔ9:LhIDZfXu]@8KlŎ@f\YJ_EvL| ;HVgрYʺ0GU28 @aJeWUYܱ6Tː2cɕ}r:^_8w*K>aN:l=dYL;ұl/6M.DxoͪJ!)ˌQ߂y^f'kŽeoQL1c[%/'>{ITEk/~32`1 L0`m:Ua`uW9Tg:ccQEƋVX٥pƞ1p|_nl#׌L{^L_: O?duۘB+TqL2jF? ͪWZ *ḿ +_UrwݚYV2L(&8=б3:?x.sp1v 3̇0fYY+Tm55;.5~#}3/6|t8 5i(YPS29*k%å}a+2z'M9sq;Љb `Vgz!Xh+va6e~1>[TZ g3ΚT6R6<iV:QC7ir5<;UJT߸QMփ;Ds5a+xBJվsuWo?%cb;2Wx^EC2$2;-vSk>rXDR̠9z5bpj\Y1.d厫Nޣ^%lҪs 9S31ޣۣr=.HZ'2C&Ze}1; frpT6}o/y'ӽi4Qvؔ@o^R̕ mf̚3'N=QЁ㓌?1zd}F#u"t K'yFb2ob/zn[tgB07#0\ d$Fy?BV|[gG-:D?+dm 0ԩ𝪺y}D@&3qHE[nOԅ-XKYgG `\nT\[eVӊXvDTwVv:P~}*~2wAU}0oT0ܻޒ(^̕6ۋ{=VxbIqYÞM32.zF5q0ꋿҪ|`'C9(' (/֓γ]h# }~:Q^;'o4 uJVH VyI\WPw7̗x>u%5q,'퇎IOBjLczamziBP f̆_~7 ҩG"A)+Fu_~3Ϥe,H'75&|8;/._U-/]SzCxEAw!иjwgs {(L5V *=Wጤ{41&{t;#YR _ޙt iknNp˿Ve69'NlJNr(ݹ_\6 {tG 7}Z_G2(q֜nѩg윓Z6ޢo~9&G,]f}sA}+d gEuš]$i#Q!mVZuVeэW]bcyj6eF>$}6nǖ=oq3b>z23Sf+fJO|=$TdN~HZ糋r9 j#aWe֙Oo}K jX09o %8q;7Ne$UUÖWg]tK ӬX{L_bi*0DyVr]ca8ެo֏+Tq<D8sxm~X9d$~/ݿϨgC:C(8Ɩ̄3j5odP 8̵ lB["A}6OwTݙʬfڈx䚑)vg$M G*[>4Z/L{~mO.\Vu[v93B^42+Y>gL<2U;e&+.}mv팦9~u\PB!qWĆ[ɘc:0 F.3\xÿ}QDxi7\_f(qG^YYY +zJ{u Ԯ'XD'}pW*\a2,Fq{ +poLk^V>ឌkc߆] Ǔ9,/U][\k\|ٙʌVskqm=ź)#FC!0Od2s[zcٿ1[7=VLMMifhϺ Ŋ>kU g?3o$b*:c-wEKt*%$zܕ#"U{vUc+G8n slhS)[#sǃ?G4^]*؊K;VLI%cFx5O>?H>9e3|Lms]GH؜0S3Ybs#նv,ֹG2c)W/wzpJg;3tb){0_X>#p߫Yr<\rL.':uHCd]N̬fjpRTПg;[ 1KŻ XեיUa<[?HR =?_݄ uy+Ss cb57#}sԬjRTU)ZuyTe޾M>ϋy/}zzV}dݷ-EZ|O8u}l{=;X A4=5߫ Rtތ(qryom=+tZzܤVi3&yu~U'&jGM0z lQ߯K31BkA!{>icyl8]:<2wXuŲOY}Q-ϲ]P-{i~;?,QpMGY'QDMкFh:x KZJeXg+gslc;Ӷ %dhqt|HLTJm}JIt̗O+g_"CN~~'e-i||n}NG^3 tS-TwCbiY+EƋ9G媴bO[ist:WrzZgУrʏ֌8Q]qWA^1~1bN,Ghqȡ ZaN2-؝‘d=4>Und2H#nyzڕ7Jx@Rb:9G]áVH#-˖.b'YAz͞V`o"e [e6$5%]]g|J jJFm⻵>ϴJ8Q6c V:=V]l?"=f6~l;ujH8[^v>jSStǾ kY1jz^wg-iJW6w5`1:Cܑ Ksuisƒ+Vc}UEc7*Ks NC`ulM!jcyur5~gszLs~,&5Ynڞ]+خ<Ǭjv =~rLj=GIgfϾ˾2nB-bȢe|zic=gҲ+YP>;iI۰Gz&lz>aMJob>$:wgh5,XE_yt>1lj{Z>/J;Q VhcwI*ɧq:]hh'ywizZlF+6t>b>°w W[A&lyhkUU 2[~&ٵ}򂫩Y=hZMQ? vLGpcł[ӣM+W8mk)nY0rͻ}w;[V>*Ȏi:(;<{gjXcTi/_F3O#q zWZ`;WHiү/\xh~ v>PFѽws`Ǫm[~;3>SAXx XPÁI[CZ?-o𗺟(¶QU`S-*ծ534 󬁵+z5n|zڕ'<6B%t|44FK|йV1l=w"~^{=ٻKuxǫ3ܝn{uGN=/y kBnl4䣝ٛ{2f`Tgi?rI ;k3]ņ_64 ѐv,zܓӷnc PpIN+9'7Ί埾#׿ \|.Px<ՔA}>ڶ #kC՛*3 coTgOOKwWmJeў8Ww|ZBӾ*;eoVSr W)|-%_hiZ1^#q ҬKyzPCfy 晲;NNcWE`YW{WL[vQ_u&QkT^t 7w%gPb jbf+"gqSj?o%7D>R(wGc#vǸ{닛`ƤvQ'$ Ҧwq/{8+JY+I6zW4NCjJmñ^⭵c[#0:ZoU>wQȍ˚ֳb3_9,"S r|<7{t* Nhs[3𷤎;28vud#WxY`=NǜO؊%朠g\g']w -ߔf̥E<1Ie>AUS9,{7X7.U7"] ʕ),gmԽϫ't敞io|/LjQd =sjiKF5ȩHV|h?:κS_։^q #:92:˦.9F>>kyVG Lњ,b]u{^cG6zٞ;]gqrДJZ}U|9~3-3ʠ9N,G8 Dh٦/ba9YiU;{Įq-phǝ7KÅDZ㼚\}d:;圀}jѦzi-AN~u?Fjc,;(k~q\u*G6=>u9 0e8~T%Hgg=XUVU*ytnN|1 j˞/4ã c|`9whRtg0l.x`dv)l^}n}'gj٦/rŨo=Ե|Ae(,Qi>h>\qlQq~{=2\9r$:&ALX婣U'gwQ>G]m0xܗߞV=Qų 4f3[k*zV|x6oIN}ъQWUf]ݟ*rsme',FKv=5hQglVC:۾_:iU~<*,oJ'&|wU:|3U<>ӎujVhcwxM=2xsmu=HfхEۯ]> ϭGmPjV zA<ֲ畮Sl&fb-x)s{EZ>һW /ʪ46X=i>E04˪4>FA ye=='W/7hzDI9(ΠEy'wTEGg<,汖x,s{N_(#ϲbqM ~Z#1~{:{yd}M\o{~uʥ=Ѯ5ZblA-Σ/WD5ѐv?ǂwzD;?S~:<<W䤘hG;6vX3~β v"jov)i鮱[DvOPrq5ѐvlLZC'Z]6St#j{|o=G̢Z6`1l5+G3=¿g`W>ki|[6UdM_YnuS۟#=VʮQŴ~!xvoK,UQk'nBVV 9߯vt:W,r~` YG,f&?nI=>!ud,_f5h(z"߹tI^UӮ"qWn&Gv)($]wEwS0&|ת# HB* H4;+Bv&ŭ*$:VmZkn3_> lm8o᫬6\!m5TLU4dP FcS͜wTmjbEY>Hm.s!fMcJ!޷PuB7Q5系P>efy\߫V7k_{Tw ǾTgb_rґbs?4ཽ1,ycm'*OgG$yWrrϴr(w,)6xz ՜}U?z'*/ʁ^yWx6V>ex)sP+S$44kOGvW+%u^l ʮ6erUg8d[{:sӷ@%pPI`PGUZVbW=- gwĒeg΢U[q7gi1$&[q󈫷|E'>іT߬_pB>´> P`$L%~o0}hl0}i}.QA^rq2 1Tb>d&{rSnE 6rŧ(z3rVv]1#6=s( XqO4ܼ[W%w'PHshnIJ\ ?z|hm`Y<"Q;IJ#-xJ{ S׳~cKki<·XmbIKV\nz6}&e)d ӟ M{lq]~5(Hx9BIS0-b3ݾ+ޯ1V=W<#x?f~Ut2 {iy Xy,@Kn-L,zt٧/彐EN}➵o:+.z1jb@&fޱij{\%YKAo{ K:ޓǶ#ޤUeגmcb&;zYR Zdр9u}=F[y:^S Xl힣t/ꕊ)]Z-9߯"wOKLm?/BAlU=.V qMcTzZk 7?EDn)z$h={N< W9E'{ưo rldoVr},!Jrˍ+=ҷgfkSq,x ƈ+uޝg>Ż5LMuE{40Gwձﯾ[N{Zaer$ԗ1}[ x,Oėc c|{wbÝ9#Xb/sh >˶.HBfHln[~;g5cZ~N_"ТzeU3#8ѡZ}GђGCk֪Ĝ>1.V* $ԊwxwI<wpLM~!wY:?*ҧ{hEߡ熅};?/BWQG-U @RR=k`> QD(\E,~Sժ% x"m^K7Klu\9S{Z/|}r޸('wu}AA~o}wNVS2JPZV*o퍏[5Mͩՠ軼f?ꑵؠD-OM[z0lЍؠb^6q_m5KR]=zlVCBߞ?S=wdOvrKUڍ ؿ|OrVEֺ)N+}tiiqN|zW75ɰ]=q(Pi.xǺM 7|AU3_1]]']w~-][oD ]F Uckcb_o ZjG\qXMƪ:b1k|zϣE}=j ߨ!QTš@넧]j_,UI5){Cl||dcc}U'T,Y{UʌW߆V>dzZSٓaUɥ[~{<q_C+4ޣZƨo\Ӹž].t8F{r+NiŏZ!$z [cK\.-=n_m1yD42E^_ۇzZǽ{cmB\?yfQ>'Yy5C3Ѷ(3ZT5= -,@6nz`oe e.Mo?XoY Q3j89Ģ/'Se#^{vY,v"dqG:--GO*X.]-֢.&ќ}XYR85hܣ~leDnLuvVZ]#=  BLǭ-1_~zwoj[ح'zhwWݐT5[cŶ5V=ݪ 0f-^Y:\:9o5|{*|4CEXk`m:Zko߾_L~m?컵2>%g+{-:>ZW=_4U/xv#^f-O9\jEM8GTW'<0qk=œFy4^a~Zy(~}O>a>"}MfW}7G]<P݆xiS{>G+FRWn*/e0 Hk;8 +WT0W.x>jV(}=WxR9<>s{/ւDgm?S%)3?,R{j lߪp1biex|IlӽG,SߖXU_ }{T)-4ѣCQbv$]K$9Wmh9iMmslxO]QӮ1=TT5nӮx /.-cbpǝ~DjXxb}W$5hogμ/Go(p1X$ei>UJ&jYPE ><0ܾncצ;Z+l>nR[OgA*aZؘ{0b-[k)vrD>¶(ԡ{JVx~Y_jrZC"3mmrmV}KuZaZaO+[GhIQM+=zOWcBݐk#PR[mN/G A=@W>]S[P:gtK.B}ynֆyʍϴ+G"іrYl࿇gppm"g>  ~{ϗ6DҠvZg}I6 [Y=8bKZ 4+ӛ\q%߯׀o~>I%8iK]~!`FOiKchC?뀶vQ,e6X} ā21U<$ЦZ2D ľ@*(h+}H= ٷy%{J 'xO[[I\@qH~-|/)Nܸ[W e߸A8m+mb74Ëv!Ha<\2F4pe@LhX&{ %kU~t\G߱Axowo) {S z5M}P]7}"8(.y8dϾ[x:T4cU&!Яg 7G؁G&'WeG&7>wzOI׼i(|/46M_y>Ͱ^JG=2_)oĶs냜 |0ߞ:kl~cIsyW|}\1M@Mlಫ}܃LCq;M@`>*&sTpA7d =ܲ?pj;"|~>GaM'wEg>kcߏd֣o3iwQY5k}S>{O{v~Զ9cg<`Kƕ+ƿEdV0QhvZ]!v= @3Ma4 @XDN?[Ә1[Gq| ~u[h+5Nf+C=57J[0:,mJUQ(TkKZ`-*][Tixq@k1P[٨iS_)I Qxbq=JJLa#yoR ̍Ga6nuu:b+$>ZM$YE>b}ǣO{HrC+_ +WhV69[Up:e=> Fw 0hyklFUwbBI>:+T䱐_.%`svx`aþg;w"۠/yfVSIjt}O6W,P.hޛZ Q ~Ղ_m_heQ?TZ[Vzp! CE{lrIf=hɡio(-Ҡg<>wXY 㶼[_Onn hnw趏NG+w> :}Gx9& RC1 |{inͼ(`vגŁ: yf)"<*ƨcD +=XrG='QFeY9<̚/1D#6[of`gנ+iob%Z^m=ᖔPwxvDj>KXSg. rx!WO3-q6-kh46 o/뽨5;G & gj3Xڣ7AF {}ݱ+13FCl" Lgm6"h-B{}=Fԙl}c;j iz5Zs7:Ztz4c־.1Zh0`[&mu7_xoѮ0TJT{Sl.VQdxh=G݇5_^RGl Byxh1_c|;MRCSF#PrPh"0 ; ɘVq5VsOזC׵nyZBJ壭z78;mzaNingUST q:LgލJrL?1=Ç{CmܖwKvn5Z0\9|BD+kf)}(xz5_` ֯2?G{C\l5{UmR&ϝ؂yE41i|@ PxGpYT곲?76[ xYF6qOo{nf|wv oUEM+3 v)Һ]ıtiPXQ k (,l[rzMGcJgkf`SIvGvYʋGn[w1_ NߴV@:[;To~,ycyWzMͦ\K<xdg2zrUSfKZVmw࢈-<܅L&R6>6nr+HA-R.WG* 6˾M ICEsξ?erjJSڝ儥DE|lntSg&|[+/!m1V[Emhw5vG-l%Iӧ#olegS ֣`+0V:2]+v{84¥=)AOiApe_k=I6:d9޹[ (Kc+VlBBZ3tt_2qFCǕ+?aO&i7}>K2 Ϲ4븶~ݮ7lohe;>36+Sm{iqmMymK-{C=T(V gX>?$hGh%w}۪+llq՜2iջiG7V}&MvKok#v¸2 [ Ӈ_a8jeg;y6ņ9?Wo gaccHvQ5{I|f癤6㨼'f-ZQRhO+W7Tif9wn?u9h/tRͮ\}QV{U>N23 U'?Wj7g nrKCػl4w{##RgW ϰ;"HgwcL3)+ڜ[h7{V_ue[mAК!ee9/+?\M}D|_=o*s?yZ,ď}C-w{dBSkWg9:)B!C>LrC񆘗4> t!'Sg_a񉚱\i'YYW+옱{s&+ ܝ`cScjb\wv3<2d*ٿnmF|hx {z-uQ_y6g_9E\nj\kw,ő}1`~;n6?94߳/PŠΞ?[7)+3(Zss7 sQ2VnEF짫Vml*gBiH(oəjOti(>] 4Qꣿjs.LI|g.i {NAp+VxBkW'snWV6Hp;+5|Ҏ#N- ?f`7IAg  Ͳ1wKk3c#6Qz`|y13ZzuO}ymZF~n?(,+m1kp# G눱1o~C|;qez-Y5U*whM;WTc+ 1ldSz^ǜVgwPl3ߙ]yzRiz 3f"_MfQv7+l~qix%P[Tše'vђ&l yMosu#SDŽ+9[.-*^>Df/X`*>$;C.0㬋{Te]jer^l/zt;7Ól5k}֣4힕 t~{G`v\}{ڷ)̙9+g@06>npnK@~;L/W/3+f+YH͉m췩JH걊o4f\Moh"zS?zј9s5-*F\}`~I}+l}'m|NK&L](7B358>|mg(3 }1/Wip GgD8j#u/ހ'o3G/g̙3X۠ "zHFrfRv|g7ۦ}^xܑ2&K.3SiӺ->_ഘ` Z?kz`D:/0/i5eTMO 2)r4OW@dߞ^igl; ~;N̒V`?]U秡Ow:5,6P}!K6mPꡁu 0 |=T tcvL]\w͹:CRw}߈{fcv&Fb{v8=ڛ3Gglه W4ڰnasa|Nj,~}wc.8=m혳TفtQY?ݸSavH$YJngg ;<D p}sd%"'TsǼC@oL>9{!ܻ}ds P'a_{f\P?nqԆ'ټCr7`ci?:glSe[3%hŝ&_SievG.°{] ykؾnE 2 U'G&`4ƍ涒92۪(J}w9`,8^ omawy߿:n4Bh~"0aټ$֭kƙ-~I.2޺9PCx_۠5w;+#b߲A9u2v@ +^p0޺9 >6HJӽAkF{wb"Q2wX$#nm-QKzٽH`_xW6hzúŖx7iO,}~a)gkv/rz?$q!Z}!̓mc(G1}隝ːqCo?[Wu|!k= wV0&t3ԁoCսUν#lkWwuws?~o@?/o>)~q__~??&#=?[5o׏r׷YˁQ9_M0e w xyzbGɕ|_ ~<DϗWJWփM,?!zb#dICd[El3Kgx,W!yPyMEEyȓYrNZ.F倣a,m{ heOwԛp|?6= J%#5s`oML C9eM:jc* '2AOćiPAy)D؀(0ԛG-BAI1`3wH?_CLd |$"s/ڍH"7b9.PTtqI?_^-YgvC]ijCR↥M< ^H u}u)Q=FlQz̈]WRU~q XFqu/2ԇ(X ]%&3˵$^ yylVK hgMD:ՙW:e( %#ϯeF]'EL#[|"|Lc ")r)"fyu6 k̈́8jս R7.R?#XFnЙ(TY*N<C["c@bc},<{A#'U0yXJ*J˩WLərQ23a- oͣH-I}QGFy,g[Ma_ywJyPj|an@a "ÆQ$N f3nreNLghZ41'[J̨DM;1̀3,1>\R0'.KqP&w*c^,VM$J*K,I ΘlSJԇKfӑ$)W7Rhsyt!S.D:( qPgvX)a;S""&#&H*Y9%{ GuS01&sxyvW 'Ja@Q.8(z0Kz`'`"gB-Je jnI1Ae,gr\]' ԛ"> I%E,MtIJ|Y-bḋ#HB&85*Ԧ=$|p h]px>75 (:̛?/en*3%H0N?S{ <P:j:MԣV<堓 $2J(=y2)L`ݗ>KN[v]]NԞpLadсX!+\|L{gwO"ZΨ\"̜2-2GGb5H'!t\:Wp!/|HJQO-TMGԮaw(bXPfGEO1'п}ldm.aTD"^%l E>O*9kP-{R)8*=\SX yT#3uP"+"ԛpVORP=#g50'T21 2L\stJ}I9.gehH 9. +yEy,{6ȣQ6PSJm#h6u9SH K{s\%]d^@^(|>R[I[U0'UKrX7Yr]˜Ay@&]S.R2lIVVx:e>t\ \@&|(ktҙ)7!q.(dN${)vX#-gzbN<ԇX.(#|l2U9zW뇭qYb!Yr*ZCh2ML ̞xf8.HZKZo9I(oȠRPZ %{FKH IaS˥  oXN E! xAd;K.=/o*AbIDcm,(Ub:1lBEq[gL5q^\ DjJ"PS}S1r2K떡́:%EQTyva#թz5(iffzBH" #Wtd^So,O*KVA!D"KEb!!؃\2?)e2't0J}PVyHrec̊ $@usUxZD)TnhAڵ!qRkpJV.PHfkD _:\'9%m] @ٺ\@Y5t5N:ȄM!( QEuC0.S%%QjL8hS%X<t\ѥ\eT5/(nغȍc&Y!n*1")4ˀcPf(~ &pNgZláE6IRdcnt7i*22p-ҾɇNN9Z 1F2psnc]ƉcP!-'ѹݼ&N ;ߢ[/2ԁ=޿ɩ#pT笺W>J<\InLLN Xx$̲ʘ'Wseji 7}^S*^rLBy?uAFbZJ)e?ԛXT}K0P(=QD!T:H rZ$A#a\-ŝt98q>b4?ExHYDd5<7]WY#0YN:cd&UZ%WЁ^pÐ$qsܙ| }SꞃKm$ "$i@IPQc35|sN/W4*WBW:Ġt1I@&.bnQgAxK=]g>uEF77+F0HQP)Ѝ6XѺ>8 ˩8gj Q !2Y:EFY*B[(EXN6RWg澦L'pLicyHt"3.Cکt^T3VKi h9M'q=: "ܤM&yQZh2E0h5zn=0(K*u$ti& g7M$^Sy[&gN 54u'p}I1|'he Ql ?.1(bԴ-|pVk|.^lPCy.j4TMs!(⥈L&^4^6/İPK>I#ΥP< }"TB]=W!*8Qqt*õQL NK J(*H}]L'`(#fzVoC66>҉L2{j.C2Y'q al>m3 ̦HIF!PTLνzAԨ)-MQ(sZNI]"H,jRbcv4Nd8E҂fRxGYd9yuNT{hBS6K&<tPE#rzd:3rCBQɈqXu` (QrA2bEI] mru`,Mn\,:>uEUM R:> (Co[DMmzݸ\^]Pڴr&&7Wg>^ /5q10Lb&X:٥#U|o%J*O垨ZK D%@2q1As7f 7?.x=Tb_-Uئ4:zgR/ny*.{j";&6f%ke8c:^AJ.vL6-~nPz?eZ GMX#ÝV٤SUs"W]Ga=O]Gvu`FR5Y!F*DIR`MV6)GGUҹp#'+Z'}c9G6&i)}!ETuQWt**O,QBduE$ɈP.QqGNlS6*1 _UGȕ8TՔ9}GT.Nl]yoNP VZr-M䶍$D-kp;DAn.:t9U")hT݄T]FԩF'H33 Bi5MrPN\$;е-FN@VqR&DXΔ v) @GTn2zDM#/AXEb2+,rE`5/Q3GRsxj\;֜ԈǑ, HxxK!>hy\^exțF8%CdpJMK}R E%N \ʴ**ЊTw`_jB+N>mVXH!S2Y٘E7+tJ.Nra/+w?dR4R{wEJE#L( VJmۈ?rRT#p(a2"|IVu( GSm<&:Wx@]Yػ.SJmO䤨zK翇4BN馚}j vmxQ'T8LFZ'rE&Ry8)P'ኁeR"@'Y!XfSRYepL=I^ jǁJ43'{t$@EzErBSiܤ PB(BH)fD+FbSLKr(Qa957Kr$Ga 1QZ8# odA`,p2ǞMx-|R5)&g-f.m)%bChDL%UA,9Q筯Fyu`JQMQ^YE[JC$Z@Vb9''}2v%2h|ujBiH|pq!0:YVuREb2 4/\UpI ^/[b=sdNI}K&V98:/ кĹ,3PH95fi 4^s`4bC+hʥ89jBq3ˣ4@4m@6isO!2)> \@dz9p&LH)y\na9y ,W'a,0T7n(@Dr8蓋eMɉic9:·6u\ZG5r!p`udi UR˫CA) jD!<Ԇ2B -j~0 pCF'+|"WqjVZH .:AKӸK.hN i\\2.1{nݥB 9h*]NaSQE *x(=Nbs\|EuuA Mu:(Dj*vCu_z8PU+$Ήi!~h />cOqw<*0\B&2n:7I"V'ðR\v YL:^c`"R"..1\U:roq JI#5hKJF@,>Ry8Id._ 6HPWOAC<)_NN"e$z:^p#'i&\*QiAc9,mFжK-ׯ!2~\K<23)%R꒸>@ȳPdBEɌGiX[mO{V2o^"̵xj&!'WpuBIբQnQXspPEBf2 I*}(Ń8#z_b$tz2UIlCćS!&r4DV$JcnJPE)mJ 6j釓I(B(O[r8J2E.¨) =$DI9˿"ozpY,4n6 0zSH \2VIRp4L_!W%CiC&=""`e)nRFCpQMًmFUcVWrQMZ+dNPC*ܜ<_e{`a;BPϞ4J}%e{(M:d9sK&*$A Ic9u1j:puH/r:/ aqxuT6D't0m$.] THNL>CqPC >$tԞ,U8dLɣ.Tp)FhLȍ;rvBCـ)HhVuJ?)Y{JO bP 1%qTPbcGЀgnn1yTo].Sn檛H"i(ߝ&<'aBpʚFz Hl@D6"J{ v GA)AVM&jeYcd"F=n3hAA ɮĦ.BDD!S`uA;(rA q(|OIUȴZĆٙ(2' '72'H3ooTA.*}f4Om[дuur%CO/̱톍 {5ex1O]E tFw #)Q/GLVM,/ۍ b@\gq2=Кj\)T-|\8$&ʨ3tFf3Z_O\`WW=y1`c"5IW\@)JND\,FDŽy0Z!ց%wul& }Z@ >L&䀮2"qP:mqn@.hvHUmQH J -ڙ8\-|A#e-ek`Za\ UNZz(g82K:jω ?1yӑ9I~{Vy$-!uoӐI͙+9gXZ%fsX'Jo-TmP.H\;N 5@ )},YNؼw'`5\r;<6h5Jf. :CeE\íi ݤp/i6#e6Hh\sAyo *fMv0代N-(=X\Yj˨)H66d}C;>:ag !Fn@eEK,AcvLF[?`ˬj pbjƊJ@u@ρ pɚ_etVɪN A$ta_UJ#Hfm^9a5tɢmO-9fQ/{`Ah27"N;Z-KѸd8Xxю#M%%4$ֱۊtt_;6]WYҖyKqmhX-ygnǒ5#xɈ#:woj.i]<)>,U\ZPn_!FH0lRՆ=!oS]8|#;\޿WheЎߏ'VДo/ꉴ@lN|ʼ֧ zx1nOGΖ٨~:r3YEꄕa*6 }к PJ$=uƩ+$rR1 GOu3)E/Rg*@?v1[v6² w\ˢM(֕ RC?&+%)*qsiLbyH]t.N I#U)(;oâPca-5Nqy@WdѢW-0k@,˘"B2!q+μ<_6*c"jUdXw 'MUhE*qI7pEiæX .4}cr$ u:.h>Uw0tjRݥzIjޖx%Y"uh6-r)̼ .Jr _'jP:򪈀]4+ jzȀD{0v^s#Nvfn`e)Y;Ji;W J_+tZW)-bz PL"jTp\"L$.09ym " (*Kg0J|8ln@ &d  wL=}YJqesQvі dhSFdJ0#0Z=}H!vYﰾ39w/zD9?%n{`A%0pĘi-ܑ[6:tV#2>v3+h) RX7 0~f8\'XPEs;eSӄHkTn˛BEZi= XH٨jY4a{Iva<]^;h]%52Ul\he=D2)U:4)J}ٌzУO.};ldQ#HfbGíF dNO&2D> }mtAn 'k^̅dPy(PɶD*Twp~QRCpCr?^Q"n إt4RH=TT1;/ HZvhieB{\N>y[续&siVdԩA/uMȚ&P_(R) Fh ?*uj ZAnG@EOѬ ]>tHڔr T EZmVDYF훷yQ4-]:Ej6HR"s)8:ή)B#Pr[d5%6h*;a7l(TgS Kr&I͔C *@~Ц _f3Q,D+!eA԰+Qϛ8ߙ:H5T5<@Ɯ,`@ NJ$Xu!3ߋqV7Z6? +éeMJ=39xYG``BUz׈ ۰;Ҿվ//O1(dRKo!i;fb{cLc>/ rL WȥFFUt:[PCWG QN Sɋ$'RDd淕&5YMm uqneckV4FtĤ7̨pqtb=) Jh:!1Lp}_7+_3V4q%V mF q]G򺌼(u@]!I)T6 ;.pU|c%iaH@y@ `.'|B SF}N$6:e =VE2aZ0H rZr چHc‹Vu0e`a;\X#5ڸҟq@);X*](![$nⱃ'Mt̐vNz({%m9T)-n }0|+!w+į"C^Ïc$nX#RtZF֘--/$0X'y[h/5"vwN 5ӒB.J.58M%pUdNhI}^t|)N4d*jV[Tia EZ5j||5inKkк᚟n㪝}K,L]Gߪk bd"Z(֕aHA.X7Ȣߧ݇UNdR Lvz-TETGe~nKAj6g66Wfg #B {|Ɛ.hd!db|/(1d.Q([v#6q-Bb5J!e&3=mYI>~ )F ;fuVD^lթCDR廳֋g0҆:Q !}ȝG4QU1@ʔ" >$ ]ľ~:3`eS#iaƚ(!n^3̋ \6E@f1̶):h2évac&XTBSw\gu6TVgw(Fh,˴x s&]fU\j4gVX}J"2/^#0UJr"v xu1{Tx`XeEv4W.Hgct6˘:o wiu7 Ҵ2Pc;NRƱ|wQFթmzi6E6iR2%f$SfM 6L\G@8sdzTrP-}r>Vؓy"IY[fEtKv2aQ}6p; Nm3Kٰ;yg |q *r FF>XͅVrQjeT?2׮6pdÎѬShq}}}D/.p#Vs'٬g_cn5&qac_v!D!QHLtBR_yEl΂$5\6R&8x& |}ĆG:k6ȼbBYua$ԇ T0bȼ8qV|74 i15V$1ˌF%_&f endstream endobj 125 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 116 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpkG7Hnk/Rbuild688f6c73bb66/spatstat/vignettes/datasets-016.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 126 0 R /BBox [0 0 864 432] /Resources << /ProcSet [ /PDF /Text ] /Font << /F1 127 0 R/F2 128 0 R/F3 129 0 R>> /ExtGState << >>/ColorSpace << /sRGB 130 0 R >>>> /Length 5089 /Filter /FlateDecode >> stream x\K%q_qck `  E2HN$A}a_q05'AwXWެ>_˛s/zGçk? '.LJۏ}~xABy!S|[ZƔsD4aMGXwxEeICCLmhnؠ7| &S" ɚivf%PnIVxBBf=AQYXm mw|=AG.q uu*x7| %|EL'68a*/x qࢫo&9`_=IlD͋,4Bs@T,O͇-6 M(T:q/ $&%Ć$&_KD݂oAC96j?!(7|D1 TW-TTI %Dtɒ :=Lx&Ӥb1@K@4T(_$`KŸE HUBqxp_ zx%=E_FT7}rMrADiv\ 1Y ArYOo9Lw:4j(»r  {x+HK ݢ#Oi+G9: -:CVl.EԽVAP'),KD=)-Y4W#Ca }v=ߋAA=;!D4D,/rlqquv,jt-(I JԻDWϦ4eV.M 1 (&O[1W,[U0-hcYC(NԉMX2Ł$ <*".?l[aoY(P\[9hu>ٶ c * l:i: p ~=qqx# k ʚBMȫr:>,əZEH8EtlOQod2Vc~T @hq/IM!WξORgD2Vc~khnCR[YT9M:-r2^MY9}/YA9hxx`hImCX/il?x޴BVV5U3`U T0Y QQ=;*DL^ڲ^n S̢^\YyŽ]Hp= +p05? x~:6ǯ8AZű|,jh6j1O:8mDOQS>5jw@:bd[#9BSa~ksT\:C['婯h<'蛌21o<ϊ(iB;*5;!$WTG_5B}ҹٸlr2V}vEX˝U 8xѣh`, +ժۤ[hԮ =N_,Ĉ Q]BVFEc%Du5Vh%kRM*k9hFϣk]+pU welPK R*#U+SUk4lӀuVgAUQC_#e-pLA'GH();ʉjs@8ʣt; {_ 4G_Է5ssvoCZYJL| 6iqT͠6iv lNό ؂b@ENĭ/f P5N[h,oR ] .m' *i5 ,RUr*f0pGm_b6 Y4we-ԕne Jʉ7f Yi%l(X}ɉqrbZ)Z~&ACi+aX_6-& +ŕVW+CeJ=ig~\A |Jߟr%:>Ҿu}5h$oSs>tu4ef ؊=}3!݋έOJϏg;~|>4BbtS{ϲw@clO(2/O:{ q>Y+eߤ7;[$8V^b`We0w}jf?ّ?^]BuWfqڢ/o  endstream endobj 132 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 137 0 obj << /Length 541 /Filter /FlateDecode >> stream xڕTߏ0~k?(}y|Xv LONc י_SN])ڦN9Yzvc5EQB$}7jD`Dk۷Uj%kUu EO٫\8cs vWNE`#WJqܼO4s]2R|D>o>La)LI1%Tp/]z L""K3w}X:M|m0J: } ;^?rQX 5q>kp+N~(${v]](bEEg;GnC' KL뢚p;asV[n|(}hl`HơCTuǐm+jޒXw=-#FpؖP?/˦(s 1~D.e yXbo(".da>9гqҩ:;:K/|E%JX/=U)=|`>^3 endstream endobj 117 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpkG7Hnk/Rbuild688f6c73bb66/spatstat/vignettes/datasets-017.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 138 0 R /BBox [0 0 864 432] /Resources << /ProcSet [ /PDF /Text ] /Font << /F1 139 0 R/F2 140 0 R/F3 141 0 R>> /ExtGState << >>/ColorSpace << /sRGB 142 0 R >>>> /Length 12761 /Filter /FlateDecode >> stream x}ˮ-9r~J#ɜ` ` @@jN͵Ҡnuw#Ͽ}~ӏw#|>_~_}bZ?W??}Oy1//?_秿EshWsBz?:}k>G~DH~IWΟ۩LG͞f!Ӹ]8Z MĹ{c)6^y^U[ ӂk1aqޑ$=S.e!V!MvV<}:Z<;nW͞-{sP *N/͛);3*dcX;;ύ^usT+_|8{psSJɫsؠmye2۵Z25&wVY'uLS陋8тpRX#v(NsTb;<|r NգNgDoy+k\u˿Ia ynN` ^-}r)yսɖwׅ]HywkUX^]>Oy 7ӦSB*gs!5^hM UQ#ygrOX+'O͉bL_qhtyngFUk0|1sBMcbXˌGi޿g1 CSKf~c sbU3}ۦ ǔ(RXj{e`qmq*e^U6,͐gKn~+&tF璀;S|F9=}R>p Ȳn>zIJ,y 1ɞw2^ڻmJX4w] o|Spӹ=J0;ZէQ=Cof.z;6@.Kd}:W-7}D y.lS5Y̷ `Ɲg-Ha uG'8g[a(AJѵOsg4D`wE#u6}m=6Oc,Sup'i>,[t0=29I~R2]wzm\tEe5ݕb1q=0§r)dd21eҹ])O wr)o29/ ~ dZy1]L벖gI5ѱ@pr .bZx.ʼ㘷⋵,UN0O]sdtlZ8'~ӝvɡ p9t? +oi>Go[eJgpKΏ/%XW݌0X"As y.+N% xWL+D{/1}L=nrywܔF!O9_"v7/0y#"M۽.T&C|nLy`SwЬ7'D=:CĨt2-4 ,S. G1]j̭)]J'4 ͧ@uyr`϶rsvZ+}^ Z,Ѕڄzt_r e84=Ff9r F|ai=|-QOŗj#>ܰ. 2r2a4ߕ@pAV LpɎpQqC0sqsr:oέ+5?Z+Mܰn0:9-f+4C0J2@xB,'=1̑{ ȃArQ בGenTۆbrSR~\F>}q(T˭&/ϞGwzwyzɧw\]Y‰יne$gT:u+•-\b!8 s v7M 9 ~ϙ*Ȍ汻+d^'2. +OR/vPpr.+&Ċ>߻ٗ`DK7Uw7p C;LKn\ƅDvFvά.NGGBy)%yRv\Iy@[,L>Vt6w>}@h h#ӗΰ?)5ULfwWBܰ.FN$8OqһiGM5亝6*W%k ǘR_F&T7}9_Jް%dh4sꅀ)lQuVgɬ}h:-Ŗ=naQh?jϫb?bEwyŒ`Mb#h[<6Kwb۾'kT?@ -1ݙ 5Ԙ=^/l=YsyN MK1urZp=%(rJbFzKǡKߘAE@Kg:o>gKz8T,sFfgFL26W55F+Y,`E` 4jLAl])Oz#.h:230j6_Ŗ=4b :Ra]j&gu32 GAb"plza]3#煆425e"<ȃD=Z@=Njh:0hd^Hh7ZF76b!*7,R15+?8n<>pq{BCl8yoqbFNޅ-xzZC^Y>8|BK5]Q;xi7]?熖 Մ} 7s21I3u]trWVbw00|ۜQUb FFCx֜G@Kl[1zhlX4M죳kxoty{h՚`7V;jx.+psyAk~wCn ZFD 1[cB+22*3h՚βlhM5 y35;{: &;|,-Vݹ[.4Ē|Ѿ VS:Fib9kF҅Xi;zrֺ D~~!Zպ%Z4{ExhnC$5T 4+U u N9,I !҅/-MFC%PlQ%,V-=z@!'9$@/VUŖ^=P9*Rt?Lo^=h}Q?k?"^#쇋-!F |Ecl[WК%hz8u&/-8K31ҫ4|ͣZK?H2! | bKO"ţTvY>BJ25bŖ^gPbj5Vz]KLd0Bbҫ}^.P\H:/mMa}XOj<[zIHPM<7"Hg3ҫLY{N>;kq´z8[meCصZ("Ŗ^=账!;%5XgXlLq& 8`"z,bK> \ɴZUbK`Jf -zyP2iPg_:E  <S*lC9-֢\|,ڰk *45$a`&~TȶMc(m-z'a/4j\gl\MXa%TG]l="BM .]}^=#1 FbsғW?TQ7W?T1-` z3ʨ;ǀۿ^R`K/mE'40~ѫHop^1rf2_{&=q" %\lHC,O[)R7sb$mV1*yBK-%7m %P ԑJ.NU ?ch˓%⌎f!Vkf=eH h?IJQ1R/J(=]qqO6W-=t$ ΒK4]7%d)}SV푮QŖ^oAA"+34^.g9Ŗ^'f;c@pg ĕ*SC w`Kha>s4Dg۷~ o#mWV E_nUlaK/qD7cſ#8Ŗ^=0|5>cw듶aƃ_vW`wuڵ{W2!X4j)AŖ=W;g1Qj4Rjҫi[GEu-쨭 MbKƮͣ8$r^3ivLbS3z`P*Z_ZY-h0k-haa.P3/- ̳92.^'ϩiD}i3 a=p%Kz)Al饣ҕ$2ECu ܨW[z)Ix Id>-'/f&CɧVgbK/1 L4xC<'~oK"rsBkyD+=M eMz&7_ly&d/+lyXle#>(T0m.zfXyBCV1mVŖ^oAQ=K[Wz'`ڮbKo{6.[ЖXRaŌ} ~ѫ/G <~۝#ti(/[zr@B*AE`KIV33U3Xbn|\>yW*kCo`KdE.Ǥ؃-$龨zgn~{off9ਬx@/~KIxWEQMKW/z#?Bz҄$?lՃNr .++Cҫ*XVp-KM.Ŗ*{d{ $_WX5.F=Zl/ᔷS:0--bxŖ^:.Y=b rI*iZ#BZvŖ^oBRlÞG/z4-QTЕXl%ӬwWsٮ^`Kod{gWG^^ e *^$7О|^=(hMeg˲rG~[ 0l+-ELluPCDjIqkc&7~oI5,=0 Dz`ɂfUֳu.rlR n޲,vXlijAzZrBeW̐@ZqZtYj`Ko4w%---`K/; IoZjp^x.ړQJ͑YCiAklm-Vgԫ-an,' rSE 'e*"_l=yU0=aaKowԳ {G_;bKsН,9 "$b W-zjz-z@E}gyj\lՃ g#<}˲>Ŗ^="`#IU5DT(!v߶ G/\,jlO+\~0bHORU~1^ xϦhc ,W=(<=l --Z[zݛ4,c[$z \<8[zceb 2$ E}?.SEjf~]S^Y3S/~sO*CJ2l| -o3%WlBorZ꼶Ko4v1\l鵣FWJ\WT_d @zzЪl O_ބf|W–=(.SB%2 3ҫkK2 J}" XluUZK4H:#l~k-P-9O :X[f`P8i0ŖގA[3t=-z`nlv_Q[My*lAfܵvW?;sgl]}~kصO_N 8ܳM_[xc].r2lS$+11rW4aPO[zM}zc 4ƕC R fgY^ׂv-YaC'0Ɗ@(}ELʋ_!x? ?9-tg+YlՃKOByaC~Ζ<1$;ɳ.~1l1\lC=yXYWay \gʞ,YX?`KX֊_$a,ka, 0V2ҳrEsH̅EjHGc#JE@] wQ%lţ.5& 4Ŗ=Zp.Vh[5.~-oc"GF v>M^ܓvL/Y& "6g20V3|3rt3x)5x/!Fݚq-P j Q\/iYU"&ҫD*}n^8!Un`KP?(*x.,Zaea h]lCxɴ2-if5Vhk5~ӛ=gТ$t'=y^"Qkj6ժi/z08H#:/zЇSY"_ fMaeplfbNi&k9<{B8fu~ӫ$IZDyx7C`zoh7Vot?!0 ~ѳȰ]eu.*Ŭ_ƮczXCuM.ւ=8,ӞP!;$hW'eG`qou-VQQ?sanlu)v}z(hȲK3a:E0wd1 &kEf2yl5Ta[Vܣ;I!7ofTެfVҳxTSi[)Hn4-8@POƺbc[z-zP|˶CXzXl鵣VBᮞ?P-zPy838-U‘3(`bKz P_=Zzӽsvp 'hE;f3+>ݰ?)BHΰd^=Ȧ*q?A7k%ߵ@dj- {?j$ʨ+y^39X^{dY/ ;\n_AwAwAE`1+77Ni[z#_9rD VfS=,l5\C|\Lg0uem7z.[z$CDq|jC[z^F/б-~~UثT~}:qg*S|i?&{*ze7b[P~< Z,Ϲps[d!wl_{ Ȯ=6(`Kϵ6PUVV UJ-V+k(R?^u}9; A%2VT4T0EB-N.ll^[zͤ}Vc6Ŗ^t'Ua}R@[z`٬EZ{2,_{RކY\vRRR%r5U{4ң1;ߜ`]GQ~R Q.\;S^;߽3v&ߕPmwNةNw:#;kt wfhuz`|7>4 wF6h|5G#pmA⻑gCWW?4 0`.{dQ&CyAN@*'ULiucQ0_ G,eEpFA!-odj&rlU#JyU6(TN+ aTQ J" `1PlEA'3*"spXX2<). 4?7&Rq10?GyW!ƁH̏t86!z[!&aR XOA+byRtU1Cџzv mQ^6TtA7SVXaUE7zAn=up7#I8p~jy I8V%E ^Q`CUPޒ{}WKoU}8 z[] j^d؅¢`9Xql|5bQ =PlSTI3QgAEشPggX[}`-Ujr¼(G,}^XJ: Sj~N.%*ͺIfu,fKtU VsB>>8Ng3i}\kO¢` x"x傺AEJnX{X֕nܹga+&M*,CC1ulL *E(c:gp]LOTYV Z~. w$Mu~XhRD1aRD- ) {]XOjg %VFONZeU#( |<@>9aq]xg> 6bZs";;6Qiӕ( =p/\꾑GĒaOp~aV%+Qwzd"J$0z'NEvi^֊^9HQEѕfQ |ݺ9 ,›K19'<O,>Ə"&^~*?4׬OY6&Y)#,6#}t"(켤@g$abU|j;Cd:<N!2p;6Vǜy-t|ljYD$(eThV%ƌE4+ vz%F6J}S G E0|jV2?H Sl!] Gܜ CU݅fVI⸪{om7~t]Qq$Eo =q,j>,ӕO 7E}s{ϥD7&EVsjW UvG|3]*<QNB A,O=SX{+W:}1~*&b*+!]+=mrS"1kCQ %҃ ;)/A|li&uu2/Ak,8il |Eɵ>%7g^B(K&In8#O,MwōhJ_?F k@V_cg^ͤ .@Q{f=JD'qh_19^~Jox? *VςIl8Puk<˜jP/}㊫Fir8;M<5ƚXPk3<<3~Y<_BIL"hDeYcTJ]P1:WTplUX !y.8%g+BAi!x` ߌ3PEL$D&lʵq+X*mZS0kD!K W(hBWO v?Q!mL :)?Gӟ]t"z{VdU3Eoͪ~f Q;ԧ◿}~˟x/|BZO`Mp q~ꄇ4fuG};: o&4?Ƈ endstream endobj 144 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 133 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpkG7Hnk/Rbuild688f6c73bb66/spatstat/vignettes/datasets-018.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 145 0 R /BBox [0 0 864 432] /Resources << /ProcSet [ /PDF /Text ] /Font << /F1 146 0 R/F3 147 0 R>> /ExtGState << >>/ColorSpace << /sRGB 148 0 R >>>> /Length 5915 /Filter /FlateDecode >> stream xݝKmqWٞ$B$)Uk]Dd|m!֝k5g=G/?~ۯ~ʳ?~ۯj?>yٟwQoO>_CbkYGgHڟ\dzH׏}|1>zY} /7~oo[Vm=[}|l]+[?.^om4b.~tJx{}V ފ=խq[˳kݏ}>:<|-OVAqӾ{w/={cÏu89z-2:^GůcmnGJQ۳Z+R<7:W=jkx]C=NHϻodN5uw/2uw_5P^5$_SN!3*P\7N43A_`؍UOG+(6bsO 6Y]E7]8,B3@JQ)ul?;&JE\fjƷoxԓZVQs9 P]՚:ڞ`h FegwGU ?7$۔o!^Y`C ؇./\]7tꢚ2Ařs@6 TWA&)!,_N6rh;Xu ?܄Dh:S0YmDg^Ḛ2f,:R"x$.f %?nƙ'=|>@I#tePXd|iH$ KN%p=P7R$s!2[w1 =_DR FP$N+7F5/:3Aŏe%;@4/M<ݳ(veAqegEםC ȮKS*=xMUe`sˊ@Z+Ƹ tw2ĵus3Zݕ 5Fv%iŷJuhډҨ~.S_`f'#rӨ_ͣx\~gd) МzN;0}g'젲iA lFBlW ͓m@Ev›"Y=ہ "ѐ6%i^HtݢLՃ7oA\< XJm'!;}GCebgLJ0Z:S`n5Y1yyя@mVMa_|EI|.LgjBldv;lO#8+wPi Fؓh:g:,^$QGflv!`xBƲ13*)HMLGݷ9/ظٙ&A$aR"Ҁk+‡+) #a]׷c}{wA,BЇ7} :"l~Zm!]Y@$^]t9M\- D0aR: TIP_y9\3]cFwz"`RHߺQT['b]4_i9Æ[ٞ0Y+7:,d4}Wy K0>{d-' T*Wd\j t]1fxSd1K6r>P!zYϼV,%6t]8B:F; Ȳ%rF݁@[0^[QQ6Иkup!U!utl ъB8BjHë2AttlHKpT9 ,JDk@emaX]9] Av!+-.jBT?JRDbx``u 3E2ityrl>u$-MZx$ ud@i@TCCH'fA}'.A*MH< jvaGX@*yY8{YDMGj%h8萇xyZ$EZ}p ӏQQJ`\|=-iPio܄+u30Z eВb'  &u~)UqMV 62.I<.,(َAGH>mlϸqV`A($UIewpْ0JMʬ˾p@|{"Du[2}a[ǸU. Q)kdVw*%1CJ,L>9 4NQ/3AI![`x ِK,t_Wu@ÊE$"F/v'Ydꬬ5dHAw6:)(q˼ kdaqCGZ]\9WJ20[xQ$Tp@?/j UXWBtER3uY\"˨ {p'餪nvF[x8ݧNd\DHnQD5mR~)O@VD61Ϋ;Rjc%tl PJT`YELŢw~ȈʐR6NwI[a⢀`!Yؕ3 }2I}fo+lL C"th_5%§=obNqRkZZXhUo-LJi+9DD w2/u;*{tQqU'I+­k)=6#>"Od^q(;9VW .x|/hTndŁoOX^0v|7|}^ P( 5{IٮqgBѓ&aȩ~df+454Y78GT=$"y^@FMIzw}]8O]l]AbI=(5=WgoDNb3wD{m`;DC ^7]G`4P~4u[̮!Afmqh5ur炒tF"gʂSw>csVi*Z1Y {uJxxYHl>*Y$A*`$PۚNBҺO[5(5(W9>9yVu( #6QIb'mCF͞0z%^T LWA>{ L endstream endobj 150 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 155 0 obj << /Length 833 /Filter /FlateDecode >> stream xڥUn@}WX<9 6B'$/N({ɦM E̜[9'~N~eNN~L(J4)8Vλ~ ~F gA^'gWqσ\9[7J?#g^9_rV#k~8JCQg~aNz*p/zգ- VY4S/B$FTlyaSTQ>^n{n/QT)HC:Ұg{c~BOmz_:;hOGbĮ°;k2w[<ÃÍeFvYul\QdE5 $A=qs(C>E[xa-?#!ƍh: ڭ'# TS8ԾPR>Lg&DZKZ߂8GT 'T˸ O$M,4QYdi_|.Q.3P,ф5 h[g`1S0E 5qޠ"vG]9{9h[Д `bX,z~?{$ՈvfkJlV\4SBңӆ;A!M<<+I2[nx Ι -:oނSz.^N|LՈhBF;x`6~GFJ#WʱI f']mfnH> Pf> /ExtGState << >>/ColorSpace << /sRGB 160 0 R >>>> /Length 9900 /Filter /FlateDecode >> stream x}M,I~[ ؀F x15]m֫$Gq)4$"$w?}_~o㺮o_a7-!|0}񛏏~o'2HQ^7~7)՟O[i7eTB??Mj[_?-|^oǨ#@v1b,$b ~(^k` .oiE b1@ϋ(W0IȧO>kGցr[y 2)D^Fe+ }`!" "2U@$?(Fs7:*]QOIB!)$W1gJ Hϋ(Wց\/kF+Lh$'I&*Dd FAR`}"2\ f *IF]jG@}d׫(ˈ,I&u(|B$ \LU2/2²$3]וHHCNߪj=C#}"v&`怈WI?&z^@ 0%F֕D\p@ )r"p٫,+a HA@.ܹp\`ˑ -q!Εp]z2u][VM`Ò,+o!Y OZгΥt2JZ5h/0Xԙ,J^@J ('9B>9T U,6(͢ erz4tf"16XI4Iej b`$ E{q\NMH U )z`WP_ ʀaɀo!](5R'#Q0R|Bla,0h@Ǟ OqBp: y<8* cY8m"= W}\HtE^h] *:+'vP][he8e|,Ex~w*)￙"B`SQTJ* |&@Wa)VeqMuO"#ќ{;eDȓ1 ;~>M0;-eK@SIۂ; !3uD\fhLp3\zD2 ϽBQ:hſ X X޿ -0kcuUkR&Rp4]O`Y+8F!0yNMZ H>䃃WSwgt :MbgQ|E9 t ưQ^}lDI|ީHkgO{LM6"NRILPMȁ6"&I˔BB|յ&oOgFVhR`"ck|x<>9 6]dp!Eʙ(;ؾ ot3s4 d4l( /.'>X=MIXTDB'.?0pZWt}U%ę@+9A"~&V0R|_`ޙNOxMrg{YRS疪,P8{Q:d,_Q,rTu-7c'יMAq3W$279_H$sh tbn  -mGurePrgSޣd΄5U>L;_p~E()#S72籤nFS,%z awE2aWWYZ\$L4mvfλ1Y$j97KjJd0æfnLlMkVj.27!B5YóaUه/LySKGEgFiJLu98D1]xnW;\{!#4UH3rHuQZP/;qx_\EwU k;1ʻ5Dx=c,ur5 kb9z{ZBD L2F蹏+*[fA ݹs.0VnмV:L5ʙsT= Hwc44"}$=,&ڒ/@iнqm&271jv 9Q'RDOCߥ5bΜpȜ񰕱@DO?S|wMlK^a&B(ܜ~\dpa-1mG4ϗlR|LRӔ~+}ǯٞbD!)_۳lu(6v$ˬtA $J 6u@Ѽ42Vs6R-gkͳ;8UFUl4j𙠊@H!)'uF!é/U N[o&"{&ѭZa^~+P sAo^m2WʲIk%_[ V Ԧ&o-lo2[ul6LZÈ!f冉USfpw4D`IOZYZ϶.ޥsqx}Y鄂mE46eptd.I,=eX{j_Şm]F{ko%uЙf [  ꯈpdw7E>_k(PhCE?LINv[5-'m%|o2[vǺVmn"kw`b$͒x "[Is?_j?+Tj7Pk5x',)SL`m%]Dy_evRb{^[eQ/CW21bT[bUb,^fu|Su x%EmXZbBqdGiguk OzҖ2yki*q<[#cq@D}Vd0۟ɬȬly2D/ Bm*ZS3V?,DT #Ck:igIyB`5LkTDgKI{oiU@ ;?_j`F cʌ#ϒjUVR޺Z ]@OZR϶J{kФ14lkx˻t1 }1 $VRdfOL֒-lR{u؟ͬCcb ȪJ]q,贵_kM\9%ux}km^ɟ@[MnҊ@/exV%1(dd_kX%!= 97[GvKe=ՂeC$wze[W3J!x=s,N51e4B 0Q rel^̊Q>z[s?w6`!vR_yCv_ F_^):TQMEKG̙ߞڼ{ζSnO)T/.:ZE\`<*l hAj !\>UfF (z 8ӢTH!D9Ex֕ gB#`qm$P @9<" 7+P&նsZ,Y<(7 Ǥܿu%9 P Piu'2z`Wشby<1/p񼱐,ܼJ[N WQa$eS~p)#Uw`ata" wr\퐚wjT:Ev{͐vq)3pK TN}t8WJ3 *~]dE>;ȟhd6-owXXSrzs  >ˇ6y dž]t0QyNVWC@> $vi*{uyBlN1~1C]otMN @=1 YMGVCjEㄾ"3 BQAnJNzS ¶!^Ndw4083e 3+Zs`DڎP&<\n+Ø@ؿS]W@ S;O 0π9mĦz;a02g3u`A;tNΦ 3 ƛ/^p}EaHD>ˋIu?<C\ko&U`r/%6L\>`7kiφ+"g}fsKbU,XLЊHY@ yAG ~hZlE!>W\o&)F= p}w̾%Gօ0:k;[mefpC?fF@` \rIl'ހQ/!Cq~ r (BRC}rDPEʝ@ਜ਼L8:k<.3tN/Ft- |dY`t/lօAzf)ՑCxc@[^>nt6Ȓ̔M>m$&n3m4?*_lkS NϵzwlCI%)1-gC X T5{㺃P^p/6Q+Al,L b!03w'3fzܕԋ }9?ۉGa'!6v uY YV`D!믠S7XΓT)sɳ&ÿRNG:B08g$2 rJ*DKX8x *} y0!vZ k K16ovvaAtVIa$b>xU=0v;|})!W` KߊJ ۱y< ?b #=p3py,XUKEє:?9v,Jtk $LܙCpvCjuӹиWsla8 *Og聁si_W>=ӣTf@nU][Gr+xݟn6c:Up^wNwf_A}_蝛^ߣϻ^žnzF 8~+R< fxOvڏI#@fF3#x^]-G'tV~j5pA3Zx \5H)΀s,p^68ݰpLxKF%=X>-660y[IyTio\AٕQMyE#HI!Ĭ7G%{޷v)"㚴?_j#e${ ɷaP$}ylOZ+'NiVriOZRP}ݷvZ(a oxb.ZH72j C'"k"2iq,D~֒J6}ݟ-%>D8YFpWUlXʼn*"h-7ɮ6!ϗFvt8?\yfm_kM!*ʤ-ܕ;y'X|ݷv>|c5Pc[~ ]J>mRΒ {yeOl?[J{'lB]fFpVN'-J+W"OZ+ˬ 5f䭥yONJ{kmA?>_j2 pVN{u۫jQ7} iMq1i#Ǚ4i?[p?vDnRyu$ꧮk5O3i,S%5RhMR~ZIy믭H5ZEGl?[vV OEbLO5zJOq?S\")NEΒV^ktͤ Ԯ&o-M1g}kC g}DWzШn zvF8򞂛5㚴[K*vTiobh&1qXK?)kUYRޯxkU}k=EEE%k͠!n|zpbLiI%TI;Kyș4vev]_~o/*}n?ZCNli2Eϑc,6WfP/S>O>zi endstream endobj 162 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 151 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpkG7Hnk/Rbuild688f6c73bb66/spatstat/vignettes/datasets-020.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 163 0 R /BBox [0 0 864 432] /Resources << /ProcSet [ /PDF /Text ] /Font << /F1 164 0 R/F3 165 0 R>> /ExtGState << >>/ColorSpace << /sRGB 166 0 R >>>> /Length 507 /Filter /FlateDecode >> stream xVn0[& />ڈHR] ]bB.wF$<{pux9>[O \F"PZb)>;‡?hQ%mư-TcP(v~4p- ~oG XS^2՘˜nTmW=ߗF43 %j8~H #l=]:ZY#UHKdv+61BbA/0Jƚ`1E+ǢWT~XlTbb JSgrEO{V9qEW-+=ڣ|צ5ZRT}Z՜yFU|Z_}VK<+òGjnVK҈nuu]Lѧ]fSbiz6nH}J uNӨO2s߳mN]y0;Ÿv 6i|v1NȪw] VK+ns~EYt endstream endobj 168 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 26 0 obj << /Type /ObjStm /N 100 /First 832 /Length 1525 /Filter /FlateDecode >> stream xZOF~bޚ*[HMiA$j"πۻ3=́9@Up/zgvoؓ"v4&mOFC GΠI&(xK&RH,Sv`5E %R+=Z1ъ5d5dƜF tN؋%#H+{Ҍ-@Zc> H[Ńjl[L0K:;IC׊1/X>k,@P|9Z(1s\fMɠM8Rs1d=  ZЈIFLtnYE 1sƨs1 9/@)anBdY`T8!B9cGK[x:t,x$#F^bƜ$vDA͒*drʒCa-f$Pp )@c8D Q)Qvj"&YyZ>VAhb@N$lj#d1#г4K6L* 67O'%e;Q9^Փ kwM=e#l>V<#$?)4i/^momnR-QHp ]5c }^qJLI]d+rδ.>-m>g-w;Y׽4^b(/nڦV'- ciU=ےm\^ۦ|&gQ=8:5և)VNGfCNǣ>͇K;?,Cv_lS.0}6(JoEa4eW %NTcopﯼvD0F(]%(whZe=.5}J4H~\?y^M~4VuxX"=ܼq55|ϬnQyٰF`ʎiŬ-7_ݫ*TV*ZNϑ|ӃIjM[(]J&-uD(oV!B$u&LQ2,&/>zv{ʯUJf09yͰbX 1l+1wX>#9CԽ[3nE v8/D)t Ly5r07? "k-OKc˱&IJӤeޅboҪ|qsJ_F5ޟX]Okb}҄X5}wrX-h@r= endstream endobj 173 0 obj << /Length 992 /Filter /FlateDecode >> stream xڭVo6_!{Ј!)R6`odYV&)MxdW2`0(Q}ǻ ?TEXiEAuXƤӎplkz?ϋb<3zuycM(dkT T`aհ?.odnH!zVB< <6D$aU᫪;L-|C\AZ3W16G/{2 bykO)0xN=UGǯNRڲ%ΏJz ?]A .]hXʦGXS8X%š•Տ(5cAbcgMUsstEgHy<69qMw >Pmy]p^=Wp;+Rԝ 5Ѐ^wz6o?U'hu|gXЌĨv~eٖy>?0՛8ʏ^8tݫiXW쩘WPDg=-LkS׵>k i<c5ϞYw-r]w+Q_ endstream endobj 152 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpkG7Hnk/Rbuild688f6c73bb66/spatstat/vignettes/datasets-021.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 175 0 R /BBox [0 0 864 432] /Resources << /ProcSet [ /PDF /Text ] /Font << /F1 176 0 R/F2 177 0 R/F3 178 0 R>> /ExtGState << >>/ColorSpace << /sRGB 179 0 R >>>> /Length 6552 /Filter /FlateDecode >> stream x\ˎ%qWrf+[ ll@V^ZF $[w3j]]ydi_]__lx~g`<_^~wy1Dj巿{Ś1)_Ƈ5}J˞{JupT>|`02ddb(aXS^`XQ 38%<`bj?6 cK>]ajǖ" z5^2v{ 1 qcl0bxIpƖBd3s{ 1'Mq2R1G~G<\0hHslJ)Yei9"^ʔQ8u/eh8 +3LdCm}(dTƖBX05R#+%.+XSA]<5?j`l00 *GwiHJG|c9#Dm6l2*cKDH4ќ>{3QOzl)sH[--3#{O1f 0QΠE'3tЭ#T(/fl2*cĨ#G"hƈ[JYef5)20]Fcf 7J26|e{)SFes.ACh]Fe(ͨMFefeB]Fel)'jbڬ\S0+II102*i%f:O1o3˨-"OJëjzŒVqkukhKɨ c18G!H'L^ʔlzʞi2e4mV& JYeTƩc@b)2׫k- 4E[1~V޾-(c? q"U$ʘ[40m)2R2Uܓ=*ʼŘ)zz=3H X,T9K3\5AD2Tq)py,\V.1n'z)3}]Fc"~ZA"z==M@y~?=撋~=!?ɳ6NZ6zTۏ )ǽC|M[4|n%HZEC- &"?p"a 2Z-<=ExYe#rׄW`CGi6?#gB6=?/ Δ;i}lo q;*hi)m'Oq þS]uw~Cs//i3~{=r>^\׷ׇ?yHE|ӟ-7?ӟ^B_Q z!JӕՉ!p7c-^ax=70J^<4Ijq1wZ<KftWnXbwl;ZAHzΖ8ya}}e9EExϻbt _/-O|%iYERbYT xr;Z>-مj:B;*UA A[=cVyׄ uBE6pf^) oȽ[|+VدR|oƭx#Dqʩ>I_7Ɨؚ@ x͑noW-]N8d)rOM2Z"שE]񶼽!M{3n-Fa擽H9z}xaF}G'b-C E;#Z'y6>6#/Xf ^ÚCp\8`,#G=Q]!E1d2رT=,S,]#O^!C'ԉYt3<0 z> 8 .#y38,\A)eS0sac~|pc.&MdRU> S<>+?9M,g\m8ˡفY' 2Ge7kpxV9>F`xaσ-]gk ,cy$/-o $Kvbnk^ .w'V7 x"=Ȫ _sq 'edyx9f1ˠ), IՔq|.e`)c܎n950A`vI`+ӗ"_7Hݪ{O9 ]'2"@R8-X"t7ښJ3S8>}^ܧX:f8MH :3M|IǐI?~`*DuVK2|*{`~+輑Ys{! 3n4#"a"߬71YG-LW2Ͻ4eq鉇W>+Itx'D/24"(\c%x;?f~N\h: p@NfY9p-դ}dzVfω7Q="߻w1'@7ș\[vnTmy%fRu`gd*awXsωkd_Fvfѵ~I ߱1V&< Hk8sw<cؽwƉʖω}N~2>xOڲ"14Y-Py3| E*vF|G\[X<D 1²o%54˴ً0s2e` $FƫbX>5 Xgyt681yp#ȱ@9q/q7e/tDg=Xlkw&6KTρiTw,yYEnm.>,ur>KӗtiQAnzvi۫`DxցW>k^ޥX1=f ^lZ>5m*UEuӂ7'hV1*p/+1皛} u9ޫ*ɥ6 2]䈂ئ.qz>]ʗZ iuWXk˟+ȗn;q쎍cIM(~??66g˔?3٭O]O^B77v$GCF^_? bNDUuՉ}^^F7ސc7>h_? > endstream endobj 181 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 169 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpkG7Hnk/Rbuild688f6c73bb66/spatstat/vignettes/datasets-022.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 182 0 R /BBox [0 0 864 432] /Resources << /ProcSet [ /PDF /Text ] /Font << /F1 183 0 R/F3 184 0 R>> /ExtGState << >>/ColorSpace << /sRGB 185 0 R >>>> /Length 16858 /Filter /FlateDecode >> stream x}ˎ-Yn~ΎcmI  [aMym̀Uږx<~|k?>%u9Lgq0)s=?;sڮ} >qoǴ|l\?1xut,s\=g1oy>9>ş3~q^|_2q~=Z=rDZuWc=}O}}y5k3`-x:k~~e%?o\>?|xGCk|m<b<=xI_匯??zW7_y.A~{ǴXoV8yX;uk׏L\{؏-z/\lbin灍cb>sxR;[ka18ƅ~x_SgӞb6 m‹3s|7eѮ%~x۸eza}cW~޶=7:Gx~⃖nc?q[z9ko)_|<~mM31.\kp˵2_-O{S8ecKl7;׏,G#=ײl&;2vϏFOG1kySUc07ۂ=[Pw(qŜhY|qřQ8F3}W8cū('N}Mݎ?ٸb[V-cuL#W۹=u e7mڸbQ:Vz-%Y- w~d[rO(>g~EWR)شwuN47]#0_e%'<Бcʘ683 Y2 B{u+1 ݞzpZk8#eicNF!제زgk'F9Ԯ2vssb#y\w've_N v9uF\9N%Oh-ΧXul_YX\s>1c<~ŽsZ93l>W\g`υ=p`-O6>c(5rlgfݏ頔+w+%M_)b+3WNt0_cӏ.ۿOzi[gFUb51n"ˉ$p-9]csvrܭ>H|?H0NH`L #*3=6.g^<8C.ٸ"W)v9m\q.,1ݴ+"˷g?{fTO@+#)ѮWf9q bFDkVD@\9qF(Fu-퀒f׌m{e rG=^uMOx/'m3\,D v0"|/-{pp-8+p/aA"/m3m<.~ Xmu 76BC֍P>v E*x,HOE1tJu/ߏǒ9uX {#Xn<1ف*0=wԙ6m\q!dO(5>xk;N lۙY ՟qFT>#c.wqڃ' g<sc]f(İwn!J$mw%H>^5^ۆf};=ֽև>,4^i:^XҴ=xSOjʾJcc\D1Wl& юrQWꌂϨqeFV1xPx#a6QϙHkyV>u`{ `>Շcgji32Qky.m{VōS~y:Ʋُ4YbISWKhWҔECYJƿ&=5?c=v טJ+Wڮ1=@ cc`Ov{g[|pwl\q2.h=;F6HA[_*o+h[~̵Em:c?}nun9Ź_w<6rءy,byϜ܂{5*ckԃOtlOGgmm#8i8>cA)8H2O|׎_ tO+!V/8xv \[5mEv-[ 3ni`>Q4Wf![ "Ng,/xVPϲL>[!{X#%2Y[SeFn{Ve{`=+wgWoavYv(Wb 0G̣q8.<6np.-nԻU(H=8-+T/Aki+9o> G- ~_1"5y(FLJb`kjRvNS8me82Y]deSl'y[t{.}aA*>lc>Îa>c `XEZ°aes #>h`zOvz5m\qi;6eە<##ǹ mj;^TK5q¥۵ Aq7{AΙ .uhp΀A 4@ΎZgG%pm|K'!JiQSZNF8 6 Da*z+%lJh[ ϔ=S?Slrzγ< ?lj] 0ti C&[w97J̋l\eĤӜr`^ WڭuI'KO{.a85# ;7)·\cۓ}_kpaB&%0&`5qmsgO9$F~])GcBTJqE(feH`F'!+@Vxq^+Y/@?%o*HZDн^{1bE טقkd{Pn5 M(WV!{Bnq[B޷Op1*փb̞Sgdǔ[h)(6Ɓ")Ahn DW {q=uRөX};-:/0dpw>mmv6]l5/o2Z42а6Ҋ xnm2h>ҹ>.9\,mu87f_oך o-~_ynu>ТR -TAT>sTASM LmWPos@"ƲPIp-<+W=:>yW?1@8o# չY#,F@8U4/5 e мPŝ!p2] J Lu 8QD dQPYZ5C3kܫ/Lg@[%,8g]E%4zg튡}$u㊄}n5 { N^Nvw33ӏS;Me|L<0W/._ oQ'G_*ojMl`W~GqW/yW)T&l+gcnxAw 8ޡcSqwvE\Ljjz2p#@E py2Yz0!P>O֡nx`bv LOџG9tW3\]1쑙yGXV+TWv+T[ݾ.P̭u!_Y,D<γ`י o `ؽ7@Z+[۔Xȭ ŏ@JOuYA|/?~ ̍R:pEv<\߰].pێ ~) pN.S$ڝx.2^:_|X}쿥vZ[#,Vg5|B{Df:{c _Zǜd}x"S@(B"S0'/=ZФ=.T4<=#[O3G4ۮNmG}> s`6#<#EHhG~B-Kkᵃb /r6qYMY1q/Zu@(խՁ'%H.Hu2Ckoۛ Pi!wyAkᑠTPuDB:2b`2RJuZ(ja[([mo$xm M㬤)ZUGgȮ,cbelqAQly<髚&ᬪ)H$b~T?1,u Z)L(E/O~Rr#)o%7H ̆< :Hgm]ZKvz+[yWNݳl்]].<6-oI?@k+ɊoпmڢmM?Cd#(o}=VӼj'=b'Q{#RQWPokϫZ{PyC}F݋~Gb8Z M돚Aqu)pAs0Kv^Fa):L>@t%-gߒ[^/%OP0bhzf:zf<[ϬR~y{y4H.-J;qmzddj/xg>r6QL*4Yۅ͘XA*ܦcՊ"ܯV$ՊbuDP-/U]nڇ8ܢ~ߒ<[Vﯙ6OrMOr铲ܬ6>@ bk]=P~"z[}7M'W`H'7UY}{~lR\vjˍM~e~O퓥e?m%M`W^+GuiW\k!. =  Sa$"*DlUZ玲G I+xWNjP-ԮZ7b-RjٵFH-Fl#x벾l-֗tE@h85_QW9OQԧv(ɾ܈qo`S<0ԣi bm!<nVMu 7qWvℨ*NHU* *_O#U@^Br)=IO8u%B2պbZWhE[cXZ"TQǝ~"D9{}X@#l'j髅;}p'VX?3MיB:Xgm$Vk$~5|nk r ahm0A ͹2v̲NF 2}ŀy|fWZKmF;3@n|򆯪Zwm"g[s+zCE-}yɾlٗj>4zW;S]B"2/ʄ #}pwkD?1qgx.įhRM1k2-cM1I0. $pQLLv|u*6T% UԆL5/z}.^YiB+7Ucd6Awy~OrV+B+MG/l$xBoAy"Vx`edJ22]1oǮW&yuIk]|$8ʒheJ[Lib?18>$%qO3P3lyIVsE޲lh/ږ-xB I3h+zĐd%1$Xq<àv8N/̡vzq(Lt!y Baqz,z;#Ϣx皙`|3> OpQ^]ԝj}QPW~qv%ƜMWŒL̄ < 4[xUεs}0Z{>{96Fyis5}T\N^4;;h+2ש+HqhpGG#[zm!rƜ6~wTNZBkjJ\εlZ:ZJKG\w\$6ĦkWS3^i1Բ x@DdnDd#KDv/`ƕ2WRvq%-Ha:}'umD'@2Az`ڰ^ya+p=0WsxNibcvu-~.Cܒ!e?,X޽䰍>zk_ N`X:aN gsa -rv-Ηtͽw57=5QӞUlmϜ|)0Ki/]<[dz,q$p|IB7ʅ'bGO5أK31cDz=s^GYz]k1=ϊvĜ9qak[ze7= ݇ty*l+Iu?+)h9%6 1dٮ9O7Ψ"QNJ`&mHM>Wg]=kUED5I(F#>"Z늱ky+.Q _!\3 ]`eD[ɣZ@Cкh69NVl(ps4:n~7)R$ D((V:" î '|a l k`dX l>BG7vCn4@#`3 ,P0,1 m~3(RD %;@u`@{O T?&dZJEv/\ηCl"c"&a))w ZjEI-c"Iɘa0CrO``d`T2ܙ WU,GZp#5CG:eXfI[J.JDE֝pg->y 1%k5?0 % 6~Dm3JSv{B` /,68IZjꢖ`tj+~1jBm@|>AQ4Zꠑ;h86S9δ!zo Q/YAʣ_~YϷ& V3E)J]Ot:@+0}udMWg;Yl+B!]0$f[}(a'Y6:\d _~.T2E"qbu11Fv*RpEF#ki9㰹˱+FU4*)‘`IH>fk{}82~jdh)Pj3T}`XT W:* Y <nvƮ#bnvƮ:HM^;vu]`ج!OybR*ZT+ik$^cR/)? [@An nm;ihxebc;i`Cؑrhv~35B3ʙ>r&WХݑ][R.jD?98v9*2''\1o1d*6b')GVـMU6hafpXyg\bnj\K!!JBwZmؽu5nkz.`-nxDIcu_8V?hhN/9/]^\Ű΋qw8RG~j> &d[MLzi^ACL9M;X3e|Wi#O |{b讇+ˡRAPOf}3IP'?c.>uB,%dak< Y|/R{i BPiߧs0OaV#as"))+̹Ƭ`5fZ|<){Q[վa,M 9'l9aq:d9yb9ؙ` 3vd,n:7eWq󉳁Թi`u+ Jɕw+C4qFwuk||rGq.xxM>'x R :걝S6Hܡ"}O`fqG~c171?2E ٞ/ iGOcYJ; ml"+pB[]Yqo]0cv:ZAXcKܮFN:k'iKܘRV ]4tRXT:zI:pmususB~0Z5hK1MS#zwGq8[g3Gou,R yEZbk!,P+)Cvݘh ߼0M_4n@gbggɵ|vI}VRP+ywCɓh#SUk/R qxNpa'\A[u3:A@, Ďrڀ[څ/p|Kŷ &֪@JZ5*VeJ}uXK^ֲ*V#C⁑Zxd-@4!R⇔&Rwq6k<^3pu5G 15$R@"~[O `y.ToWܘ$I=T"dTzt%{':ndF@MRB`MRB&)X1&)VdAr+F"=08$<%>2s S0ĸ 7!ݸ B}h7@*O9K2yj:zdWG/}Dr"DSDnhZkS4WUKќ^2gFQVKX{a߫&^gf|a扰{D-oa[7ڗ,-{fuUU=\祾uI'yWõu:ֹpGdjlOgߟ}zڽ}{ S)Hwl]Ar$avIއ xuN yWK]o_$9uIr$m_1&ލp7vn j}gG=t{JF\[WJݺ 08'{ New"m.Ud  1,pkhjh~n솤jAak$Q*iY/`hy%vZu^ЪY@+>1R7NQrR7^M ևUϬU9dx#>7Ӄ*87΂t)qhnmx 6E29v/Kl `mXq6̋0v^FyhNGI_,@i"Tjp>z$~`r47A ݛ$]bj%JkJM=nכ 4B GTtGTH:#*֞kLFD}<Πb=YjҞ,nYS]GjLv 2M(܀sr8`9) J{VTY h *Ԍ+"U58f\ڭuϾ6(W.Dƕ`hQǕ9oҶW#'$$f t51n4Pߍ_o_W3$vN93I,i ^ֺzU,YǕ$};)/U;%*@oYmwוۚF\TqC&M2،#RT@D SD@ovH(OHR,U#KR8JgCYPK^M3]W$h7rŲ|VnY),YG Z$I+BJq/9H9H{ 9ڲȡ0L52 v{o.fxv5_r:I)F jΆ6՜ ja4,9$5|5[@'HuEmQ8ʤےz^ߺNK]uZŷuU+ҺvEEZ׮*D(}[r[&8Y\;"k]n8~떺*E0H5UyYEQ7*Pxc7 e^P PA,ϩ!}N k<?3@ *[ ְ{ Bl86F4‚R HR rU\TC.}[q>?u} e-gmVDӬ6_kE8' iD:,rJvSah `(]eh{qBKA׻VnfPv#L}B2U'܀AIOtxq͟8_#V*蘟Y!6`&$cO3]юeLdø*2UOqшqg~LףHk8Ah㳡 |> }SPt>,&)#r7yoWnO+aOӿ> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 192 0 obj << /Length 1030 /Filter /FlateDecode >> stream xڝVێD}WX+$i/H 2+A 'c)C ;OڱAQۮ:u9]5&3TVk⯅J̒"C~AddѪԥV(w*eVWFM[z\^&iY=£:,m?7 O!dN|$ @&r9\؋>[綻PrlDvjY2\ 339hއ,baG/#tVYeen⋴YT+m (ޒKJݴ)ofxw9qsvH0r,\AހFU l=ݾ^=_Uq mq뀂^=ъh#4ѦLڪk' 4;')-U8Dmt ļ"&[ 4ğy.ND`/x =VJе^>6D$GwuG\&1ټX|>yfEsz$r3pÆiK{\aBP!B+:jȒ,Gnk 12ȶFsjSsmqp-֛å&xbw-WDj3rFPaK3&F:$ )'gK> /ExtGState << >>/ColorSpace << /sRGB 197 0 R >>>> /Length 163199 /Filter /FlateDecode >> stream xˮ-ˎֿ_U oѕ ,v] A%#UAF0r5>kn}X9HƏ?m_/__KBW./G,?!.iwWkQ5p]ՕqIA٫| ՌqundE`mph\Eq5}Tkܻ~wmpvzcSr5V 6ݪ5eF'Ƽ@ص;pk;^ꍵ LJM>Kq&/4l&e/<ݿ~?(XsaϐU\o2/#T-,s)?TNB%xuR(^{tA,+|_5fmb=aX_dn73^GG!O2~zV)CL5c@̽Dg|jp#k=Yz`}>w tX O;-w꼂Ak+3dmx(6چ+t?[+#畠X]Tٸ^o6^ I\hG6CJ423@= zwUUbl#m?V|YS0/zPDl?_K_gް'뉘S} LI^j-4jqj8x\ߚX=쯩E4})X 0'㷛}da5؞iNj8tp ViD`uh9^3hIx CzeV[5k-I~o;g{2~W{C=x)hc{ /uYE߶e~t~OWg/ <$[N_ =i26᠖bބ]ϟti9tHX'):z6^_r]1g%9(BZs'9$OT'6,UpCu^.z[~~4L`\{DNp nv]g_fKhw^ U'V9]_l|־u}O[췂~a_x=|qWy_dwi<)okQ?Pړ;y'F޴SQI2WzX9ׇW`2 5r7 =!s°sW&pie?`۾#Ł8pPSa>g؆gP+n#2*wGd?j<&W5u_ϗcZ*<7 c]7 .\"'z/BOVgJ+aχ?P0ǃ8vW,S4`\${^>/b?ưp| fF# k_xy9p>=(0laπ 0Aϡv! ,ҿnLFPuwñE̷aD ߈s@:aыbGO$ /ÈugcZԫoWeN2^'7BF na꬜:^QfaqS}:%a]0B~|^!6g60k{fY,f_Y?]q|60Ū{`~7{Qp!h/!UhѓeE_O sgegG=J3]C'^:k\_/X櫑jJ㦽O0wXRłGQ,FW\m~8} &~P?csGkQx?o˜jϭ 6/X?'8CЗcO?WWk039 z|*\i`]?a5Yo;멍ڸ~fJ}sV{iQwlH{Q8(~bѲ+~7/!XD^Q&V4/ORDRVx)9^CW7K?I*bFyQ9ҿ ˉ_ks=NRQMqn-/|/Gq~'8c\رy2͗YD|7JcU2AeVeO.Kx?wy~oe=OT=x8Cp,U>Uʼn7c~r=($>R_GkEz;cvŐSٟ>or?A.[{bUiQ~ 8s?xw0,>Ps d#qܯDQx{#T(~CG' )Xg1_:Orm ϣc=[Li'w^/O;LAn _g74aU_S3 Dػo4'ǒ=_}@6#SS{Wh+!|Wk-7NOjki7Y<}`Cwb__[2ix|/:_/EW V7Wmv]?%njG_~_χЏ0c)x6bo2D"=P1y?6Y{iZy>|"mx!N&/K :7ioc߯|{of?0y9Do~Ϡ{r#0Eza[9B%cr?,|94D#:j%BE|:^ѧiG2$C~ !^y}g{kf8~̴?&)ʟFWsLI _o(Kp"_*/|ya@w*oun n<$ ڴ^]G|lx^)//Z^o >G 0S8+"gg jOu~þ~-A>"כ_3/ڔ.XWx&'|wZd~IjG:Ҿ@yd;ƫ', l_~>~^LCzxN|N i?Z?dU姊~>Á}`u=$RJmo#?K1~~.Oo/I?Vk|@!|X֛8Nѐ#L'omKwt;ixh2_^#!yO&?jfuJKKqP3=_Вsp}h?u=82ߧƿ uR}ygK| WJ|<SqPx_+>߫=ioHxz*Rd|`C ӢJadzv"0WO&^D2#g_}y>OW |/q<äy>.pL ~(FeRٞR̿P@9߿`"[zݬAD} l~*Xߢ^!T'W+|X\.hOU?JW )kUϛd=}K0K5wKTOq=pt-sV|i|oo?6]5]"&MËAUoYgpm'xLQ_l4Sqc3q4wo.'4+G? SCҟc+Â*} پWHyt{uXso-_f%fyц| /q}h_7O0i[2y 0\{0⹢'.a~]7|h󓎀40_PgɌKϓ?(;ηY0>/Ogؘḓ.|?YZa`___#mc{(K^a4]#Q<OE1,Y>Jԯ~ۍt&7}2δGr~D{~BCw2:K~=$ģE1Ư6迤8s>a?`_y^VT,@]{\oA ':羮K Ƨ6Õq~UW p~Q\9_*8+F|3dzg|o :xqo s܏<Dڒ`]G|s|w8Ғ4T}b9?K'F7QCr2 {?Tbl?~F9ggc}@J0 |^=gNCMQ? F{5_u {ΏV8/r>n=j:ԟ D߳[gk/|< ?Oa W>s}aRs72c3Ng~Tݞ4>t~{TU{l335Eq~6Řٞx|`)}°m|fWE0I,#(竦X7 Ѩ2GG5v֭=P*;ܿ fqҔ뚏#?jOoC?ag0^>/ġFyÂj=!I73MWf iۓ'Wno=ϻ\]0LXTy~N\F>6sԇ@ܩG?qsOx!ThkjoA :%C3?^8=Wyi \OR7+[x$Ϭ}Q1|~x)8y|@U7_5]#U,e*|Ҥ̿_W j~}k3Rg]Ay=(F-0PQf2?O`s|WEio=%ڳ'$Oi_4|دϯ= 1ؘK'e?*oSίC1D0>'I>1񧂋oQji|\ |ofEqc`#pE=|٦YҰ#CR>` 2SԵ?+[@o~)`)1m|C NfWu DsPTRx*)+*_ >fsav^H`jϖ\Ϥ%]dGl]`k~U7&w%CׂY/P`~Dφ|pE;j>L/yxO~~=Ozho!#k" 7 ?%,)t{gKI\·~泤>*Q",/a?jS_gb=`/@UA5/ZL?||Se/͆%"W bOCu/ܘ_u̯J}D_CaEߓ|C~]>Z?%_tJ~[_17L"~x f߃5AW/[\Llؿ9oDVd9^j|/9ğs}> Ni;tW< 0dk~O7y߷E8^y0_;Ԛ Qe|p~̗=,wɷ<5^A]}4{C`~i?.X~9jgO<H k>5ߵrC>qK 7S?婯Z>t;5A>߫!qXD!ϓԳ_jOIX,UoLh!"XcS{*U!Z<(YIإ g7;~UmdSƗ'i$/-ڴ^B:"-{QGz(t濖,m|C[2~[/5fAͷ3Z(G8X?UM}*TdbxBQz|gfϮHd&27@pg*:}Jb(jW/u Ϭ,6O?$z1{eŇs-037bXkig?W/i=ݏm?b=/M:Hoxk~Aqgߓ(W;%Ak_c}]'>w{YIda>WͰ_(gdך^$E}heho1(XTsoywmi=Cg{&Xwyi^O=S8{h9Y_J~/?7L-wo KLO1?ԓz\ OA"ԟPcZ7bi}Vs&v]/ (R/y7/f/J}a@.3 )6k=8(XDv@|%<{t籌P0I"ƆWWN'y vBQE7&;o3Z_ # [=ltgxb`ttn_>kB3PX}GSO㕱~bG~NSa6խSdmiRd,'EVэp}*0f=jzYm{V[$5g5Z;mKo0OT,AO&!ynXfjO&4xV ti5"d)hi,@ >I}d9|l\p𘏡 |eLJ_C^"E>ۋ0 EY $q(1Y:=MCyH ?->yZP5Gx4r&pLF8'Mdߧ?]p>m0Lyp~ <5&K aĥL1 o7@Ov,d$LvR7@n.i͂6qkCfBތoD]騠7܌6^to؄+53nfĞU]x Oc]L3ÎfTVvM泟v'<(1&uw^*_hZR4RvΒshYlԛ5C0f / .pbVe)&55p ewh(uu0 ,QZlvh4~ ?LOFD kuiBa凒XҺĠ?,7Xݻ%q]CRWfFtGP,D+ߝ~WךgcaM* ØDTF`Ldj4HШކrةnV eqoFAipafton}hqo#әz+gj6cjӈ8\0<A<î^.p]]9V50WH@,0^[{@Q hkS̫ݸ-U5Ѝ$DGiVa?-a ҍYzI817L: B=a+%Gj`}v݀k_  Z^K 2:G ^۲JX&UpbFQ$aOk9!OgK߂'ˆ5(&N3sL莳CUt]?UkgMJ-IJ'8XF#D8H"FHذ}ё0[2¬ZV cuˤ# 1yLN5@Kd)Y]74f<+s&yB`'eea ;đnD>EgHZ<\n5A6eaRn 5*p@ف0Z#a}wjO:cl@RFiEpǮ "u$ˇhhxt؏n8%$Wsgbs1@u%slmwbAۜ0GFs]WoӞ Hv =+>,'>en4HxsSRen `hiJWJÒtfd8@\4_Y82Xw#$anE]rqn%AdW2Ye%5B6]l/pņj ٱTO$/pa0%ykyU؂lU@%md?EG Y? -/C)J>ߵ 笌I_7$p50r vILH6&nPl+ dZ+vZ$VMkU>Z*2X,sQ:xrW[tonC:V RZ?KW䱔%-},j(1Gٟ D{WSD"(Lmmf,KL_`0cYjZ߾g^hg_8ׁyT#IU膷dō לlZa7ZYҸi0¬'k99iƜ3(0Y G jh~(m.h-nJ2j#k.?5׬.%¿}kuBzS_]'?z- GZh wuw7:.\_NG! !Y *& !J8C $z Iq>XtxU3^&:EKox,SQiϺYl I-K82"Ų%`1/yiqet;+~尗zJ 8lzϧi_?&Hh `2&SAg!tiZX5:-% yߥȧf&-(Hw$IS`/H)MP; GW:S}=Jy1 u2MSDXw䥵(j-~= Z[x!mH{%8 =# +%:y n+1H$DpUvN%zgC@dEY2HH. H?.}JVu2$eEB1ɁWRc bBMsLjc}z-˄qb^#nD&$B*O[<5:aܭ*XCv#Y+.b/5-$%Sb|9Z#V"~V86ȠWMW6,f@6h5P^}뾜YPyI$ `@c㠗t0213 u8@=gΨW*^]"/i!{@ HL3bLA V< ca//-*WZŠfv8(y{E3p҇ڒy8b[y!Sʼ2}ou w8Bn` :$S iGǑ˷S]0RăLZ<""U^'"tc똬*j8j c'k^_l,"a 2"u[VN֭ז9۪mY`/3҄e2{oS6~e?S߫paVu| "^_euUчcr7(= ou:?PCοp|,3l \fX6p`t—l(QҒaSBTL7BEHL#+,,M[sXn ܱ;g@ej$ jUkNHu"aHEk:oE[m"Ia&LK!(-&%-;^̏VȡKb_Էa/V0dE g:\/T G? \uV0\g尗85m=`3/a;vmxc9 6fzK{56D _<xL6ǣpu#@' =2nhB?z,\^~[~3^QB?ׂe:#:D͝5{D^3,;b!8у4ƺT{1{ƁNw Nr2\N{yZ{7!i^f*R'v 3fV4wˣXbc/b'%@a)[qf =4rW+J]iTg5鸛H:^~;\ϛ޹-w_,6p _p~o~iQmcim,U;'}jI_}7椯}hvD'o䰗3\6Cͥj3`/ZUK"j9_KL|Yup[85E'stlNJf9;h5CG7K.ޮͶ0A^ũ9tA?P3bK/N ,SLl6xTk#9;~ZjqrcSdc"3ȓu8 $}n`>YSzn^Q"d`;\4-VonQzm _*LǮ*'ύg*Rqo*xUrũ܎xs;$2Io9\rQ}%h-8 ܾ6W94?j!ca/| z @4||W$  I/NE_1ΎRpc&.[ytr[Ygl^#,!h;4a=oh[筤y+Tc1 3ܘ;Nly֛;:tc/TXޛ V&N*Klu'׏Jf4w3Իo7QJ?7BbeVՙ~ah0XNf9aHꕓcLF25"f5CK 3\Ag"l x|_7p8Z諒Hh[B-FT6J# 8:JQۑCWJǹB_:;NN46RT ;97EE[d JL>fL9 vڼ ;߆#}}{4t=}ncx{^~˃]^ovp/vy58w]nz]O{c+ #/Nww϶_mѯ69 ~‘.wGt^\,6S(q[,PyGݦPnӧRD/|_hRIL/a]K%vMZhwVV+q0n̈w˿ؑ +5z+I$X쌴I.zڤ=S_Xwg3S-9EI1 W6$ =<$$}D1/,d'IPMg)Ag7tOym *%ޖ[ xf33g4D%L)Ҳ{}GCkHncfѷi/ev/z'hw'8'4'G{Qg^S-<}Ow6/kBAJ̅W#w#gF>g쬏byKs<+|㱒k():G9m~ű9`z6җ ^¦ < 1Ǝ1C{e' q,q΢~1^c~1ŏ (ܡmCڦO׋D_LeFD8B';÷GÕn'{ڋP=YFĶ*>6—m[W}غ.] -̉#i{( xQD0 [A"T"Y/©_5Cͻn{5fٺ~g$Q'(9Xʀ8rp.oj]fft3ڕA6(-<7<8OPBᑥ #D?fWkig/֘wWgUK^b%߿XOb I.],7XO";V`%&Ґ霌I$ޅJkO@N` 8˿dBV3Qi De꧹= (lkSy˿Н*=ۆ`6|6|x1|b}j峭 GuG-GoؠBi395crjʤ4NqNI~͇l\~oh~>UDݒ& KE:5c,Pc5h 5G޺[VOᶐ8_iL;BNRbtb_#gW;<Ƿ$5 w){Ò5pݬgZtm&6Mlv~Gq\dPiF>6+XiygL}rh .:h(ShJf|뷋,؇b?{`~mㄔ};KFoLi(b_ZVL&%j4gCmfuߌ%oZ7Wd$WaSf⨽q3o*j:]^D:dGQa/Ɗ׊ BFTo? t_o{k[y؞XNڞi_51N/DUu+v{6l>tE&ۇ{yGtHG7:/i{Z_L|<샽.&`bBzzx|bq{d|x!֮+p4O&aVVx˔V +sہyaX%[cΒmVΒmN>Hჽc4TۄVRɁN53\,Z-mVn|'${JeCX~%"*Sf8V0XǷ/~j_8GZӠ] ᫤i1s[bjܝIlJO^J&>J*eaJ9 77E7͠;f|piӶ>n0*w5dтs}iLrm85 &m/u~˭wޞ %'@VcڈתmO'`;c'YI'M$%$%I _dhA9SwL.y$pox~ݴ=vӟ#%,ĉ)%EJ`wi-(r-:Q;Y. wR-{w|Itn'|Fz3<-m_i7_v'O4WO0tjN'4 tĻjaf5jYM ߰4p=\$q^vo{pn{[O>y j\oXv{ֵ} Ga(=sQ\ e> A[B3m7,חXsjN4㺝dZi'rO&eioͬfŃ,tr>d;fJ:jB%j8jĝӏsy 6m> x9&Ӏ_Tӧ ,[toLcg]x SZjc8˿Cq08U2r8"ywe 4`/B:MԇNS/F*'*>('79#.y>!eTnʦ!x;)Wrv nwXhŗ`?ߔy>`>/WS=q5Ϛ$J=kc5Ҟ}iԓ\"J#@=vEO9}:}{?Îwޑ{yrZu}.^uA;]l&Y鲂e/ygJβUesPK S>JY4gzg q!|Z9>z6vͧ|cOL  Cy+~rKzS W"%ێVҒm;:IK߰]iHʕĮQg0Jg]߷njZQKt˿8.ӧ]?vYCOYliC%WHWrU܅t >OgmeK"#ו ָ;ו1Skr3DGx.xpۃ?;4V,Ҳۢa;JU%EJ㿎k`W6|MWlo} j.ygoH˷tWd'pՔ:ved\ed! TFVcI@I@^rtWk˳n|ڎmלTo @'l{D%ZKȡħ8;!f t39~ߋSpeCFav; 1͇y|2͇y71~';cNnoǶu->Upy9( Oٮ2"{JjHҶvzyP.kD}ZkeܽV~|l$۠+^'(A ,J`x]y52%e>e>=8R8uLuq2e۞BKhiT)ƝTQ-y屛U&!q fwLH&T7ĩgX?3z}=CpiU4*bʩsrݫFo"*2רnnרwt+0g.L+0w\[bcA% x|*⢮u=6~W"[ ]x\Cc/mQo=G KaZ>1Lkz?\)*T'PxX§g3g3 ?;W-2޹j#WmMU5WDƫ %NJ Osy"<"H/6o?ț/l۴^ i)<|Nî]WiKb{/;wr\ۣ^o=:NJʊoXQx!KAR~yon<\--eӒ'Wܹ4^5ڈ+FVڈFV #+$bRd:oN$,ӹI+GC\O!b--ZYh#Y=m<KTW;@>VGV>v?UPhӰ8ָʒYd>x|*_h5/V t  ~MP;kzwzo|ot=e;jG[nC<]lj\=38!>8; YEi폃yZJ=-yܞnOKb)e-\kAUzfFߎ.;aM#zK /QX㰗ʞU8GGOȽ'Ǡ.N=So%0cgPQ$8Zb (^~ sźl^(^oqsx㧗V Psw%3H|aufn:P;2F`N o7][p"`=Jڵ,=^2ȧRvWeRuOE$%hIqq4Lbɨh|QڋާgujZ?uuh>CЏuhz:78-iZu@'?ՅIXu!x3wJᙲҺ^ud@>V77߯Ľw3~~T3SM¨&ȍF(zr2BT d6^^xc*WxGKOCt9W|[l.l>]->")pnҎ׺I._$X/_ R".wّSwz9rj9GѷHkluww %q|7'g 'w¸+@]ig;_^*ŀoJO.ަ: / zL|E; Q]\ xR*9#t#o}> Eezڶ T_G`/Z~f0+],/(,\3ޮȏO' sǗ[C0+\G'u 'U >|efz].0Uv|$3bWٯ_#\eKd&9ט N5:a |L x^,0KU?t[{4^3qWۘi ii' $Ȗ$AyKKiӮGۣݷV&Aq5pe{yrqnwz>oǺ ELP.R{%M+o˙F7{Ã6dy-֫Uyk^!wY/V>vՙUj)$тR;U\̍|/k! j?t ݠi|U[ .w[/(YXwẔ4n6~~fjFj~W3rKh-fx1Pc710xzy_nyA?8vq `rH//& Eli=>l Ltq곛e75`MucpQއ4P6d߽iGmc1:gbbo0WphИwƑag.1=gK(gKl.+I@@m"L~!i\U `Nڍںqάm m9QQ߲҅l~J_ϺƇMe@ %i|NPܽ$(Yd,HIzag [La䱗kn;ƈWdS8Ի0\{ynIg<:#:*O{Y&<0wA&9 STgvojQEFyjQE)Yj6ZAфV06uRneԙ6olł}iB҄&._.;kaIZx_($%w5dQaڱZ`- ݇u:Zm.ip۴6m2zG2£'g' 5^UTb'VQωU>$(~(( s$03Fk ^ő)Hv< qL[B㙶ěxY>e|b|2zwxF_AG__hm4Hm$Hֺw vm:a2ky̺fm/FhIu$oI{ߒDm7ߒ[Ts*T1vÂ͖3?㋣IR`ꓢ\.yg<: y,NeYߧa IFKm:FWmFW9؉B_ b};Қ^IL’(>8ϑ fd&aG28G2;mË$b /W!~##!{C?Wq*&a7>7ˌe⍏ -K΅WyM &(s;˓T^o^M>:Ջ$TŔӜ/ֿ#rY'[O8_i/?G [-p[ մ| KjnLwڲ5x.zwYd[+A]Ib qCi_fo8:!8Jc/[aR"/Q4צhޱQ09({ю2eN<5qMM֖?˿V:8ѫ>zuKhxʺ[:[:{y}H>5ɧ#|곧rEy M-_D/於Q#2_F|E17#ka쬖Wwyjyɿhﳔ9[ԥ1 KcRXkkyUGn2L-QtO11x4sӂT}d^83WNVnHcz~%`S|d٧`wیaq6l31^NdaXa/b bƜb9%9%6s4񷎅O_d!qЀ6zܷ 7J튥}[r8t9xoNYt9Oo"j8 b|J:Kq;;P<-MȖ&qȖk-)~6%_3_)\B׮{ _wtC$oj9Nas^ُFN~yֈT|T3!ce dFqeV!R)'>,SO$ė5~Sʁo@R0OTs-O(Vv˿5z_I}'A…'>(%]n?^ޅA^50ȫՑ7IFntVrJ^޽]G|-BZ,A[BmwMű6yyp wTBI+qaUV55ei5ȃ LqqXb;i-= IrNRqTw#B`31d}ClIU7Y^RV Fv-K7asF{gsW+ܕmJvo(1?(g% ZeZ3D /-l\bֺX}n(;blbKl'w͑-#8riO{dovpEFKduc, )Q`/R,hRA >Aٕ=PnK8$:U%_m_([r9*#uD@ +Y>7 8C2׌M)ɞG$yq` À!l NDwCXvb2,$Ib%G[h۔f]6wuǟ#;xH)-l|ň˄&K-y F է|7  a r斶yƼyKZFVEP6z7sj"k8ql?o>-@W`3KMcc 1(lwSPt9>KD#ѤhR8e@&5 c^r5׭\5S/H+-Z9͹mνns s j-e`\G +CTySwӆxw=u6( &(? !q- di\-eiZbF ZlG mU|%kX-qX0,&E<;REؤ!VM ߶訸}4J`XM>9XyM/Bb;='ywn,WeDf:YD|1VqQO@:x.U)(i Wk| 7Ԇ"ZߠRԍ‡,UJҼ|:okJoAV䵠!s8 _k%hc(tԳx(MjZv6Yfjd+še+_VJe+|sGV.VGAtZƖ؎-q' |. D%;c$6.fds77 bF3 ļp&ՏvvޅjlW}s:8j1QZ8j7)QJKM٨&Dݦn%KX~fm?zC ҩZO"V׎PkGZ)lMwike2X aVCQ̤Lϩk[ \紃HU7_瑃M|o#_(J_δ9,t,G9>0`e>6+3@ /6՛(MǾzw ʻd|p>T9 ?Z·?NVuZ[=Bgr ݼ}lJoX3_WF^R!u~g7geLba4{e1g*篓oJ fsj!|悩5vxUxUx,b8jG333>CG&kAo~5JU,OG\֥]c.aO4 x5M6Zl&TlL[1oILW*P~hض#aeˮe{/m~5rX0j]i*"Еf.l[]iWWÝD[55@ce#ʴQ bnj1vbqg]l""[%7AAsv"=`MOX4.9u"ٕ4=_:&~M@K veU; ZoT7ǣFV0\溆*nދJA{qąs5ڡ#E˦>U79iFa-a-ZZðJ˞"2͞@>V3>S9:!#-͍- ЖmOkYcc1ߍCČ|LyrW5%ZGiey?*>P5>h:+x#GgEk`y7`yri`}ZuĪϊ)}'6}mKv,2NdOށĶ'TʦOH0Ʌl8 /ޭ:Kӽ%ΣuI{ NW2tClqlt$wK#fcb[χ,[4+Jaňv#scQSRwt\xhဖ(Va@pȒmp1 C0 /2ZA웬iGF>X샴 M?j):B L#=1xaY+^؈ۀ!5aW/x,eb@J'Di&$}9Ox7k\C5demoi3$xϟ茜'GOgC4Q۱S<Ä:7hTHu$ //7܌mrsrã6hFшшE5AK8Ar&kP/7VÃ1~4ƴQ%n$:? s] QlegRZ w5v"ᮈgJnܾ ëpKAϐ=efƢ]i,C,f X wn_;תDQ_ݳhV{ )R~IBʵyj:~xOSZgz񲻠k}+p u|/P:XeV<ꆸ\Ygwϱ]cנ2^cgGI8_dD|CЯ:xo:x+Y\Y G4f]DWWC؆fl'u& a8l`iG{S vO b˚ϽS>PV+ S,%xŇF| ӿ3i1_ nB'ixQ.I3+8LB~ *'d-R^zs߅ :G'A. d!x#D "ǒ mEAk:[Qpň:Tn ,jo6I*{` 71̓^ZֶꄶOm{ޕa#Iƈy16yoy%G<.9pVA꙰VU8?(!6qQ*r%h&5FMD)bw)2t=#?g%D3CO;æYTv3|;(0)0_emq`|b*؀2xV^hA'6yɲ-Bx৞H[d[Tm#Z)G!F|nQY|nQYHBNr(ۉ⋲rdWXZ˔eo{Lc?zl7˯#Z)zFfK-[c 5})̓NrTCUmozHhHڀf$ `nDk ƲN?%zϋnJz.*.}##JuC =L鵘s-2#:7 +xB(abpAx$5 g|lgSj{қbZ!#5 =7DbC)TFZX^r.y>) ayC<癆a ul: ㆁ?R;NV 3rcZF ĈY=y-=yE! &M]1VOA/xZ69Qw[x뺺u]O+FD9*DNUBtTCԍ""QO|Y^κs;Az_bHsx鋼2!x&3يJlUR<[-]3K77;y")=j۽mBZԑ&;"/ T7nyYGuͬc^:M c1Xܴ!cƘ3bq%Zux&I>Lqz".޴V[KhZ;B֎q_wKHEoؐlZ8Y1 -dnC Y7B xp=[8_ݖ``V 0w#ޓ7(9(3j0 {pCLJ2'TJ_I ~5Dq̷7@dblrԁץ ?E8OO?/qUWxW;Lk‷gb3ow26}8Z(b+¹XElfnCO+?]6s = Ue B#Zg[˚uG)֔X>"4"/+b *hTiPZ#.7\khX>QVz#V3iU3 `Zb-8‹9?*4 M34#y ExM,APC '06giCy^Q^=׿běMh$.EkQd@Ks; 5 nkqVUĵ|-GZK\'ҷx\B KM{=gzލy{7ʳ\{GJUdFjϔ**ю2 O&@xxx"aPAyw=g؀65 ԀbBh4wYS#mBf ƈVt9?V{ԇpsE@P,;#C;CkG{.e>wp`7Lw9/9Cx'b<*z.|!qT2<;75q06lޒՆFC&NW!#^JT(|lw :Kug8ϰ盖껼m]Ҷf"V@X;P8150}ശԈpz{醴u{RwQjJl+OE]4#l̾k̾wňj5c_u>X6Ձ qޏ0x^߳S B`klOʒ7(kڿfwUQoP7Uoem^{Py4 zA7ϵ s={| kkVә@YDǍ !^xqc<-=x/cN_SIBx0$ :3@5MAj/N`Mz"H-lzll <~bx{ ߂`Ʒ`oj:{:@dT<>2tUtxv<*tȮ)Z--%K%sY"Cٯ2CӈPul+aFӡ!]TXQ3YeFig$lt!F8'j[^j̋^b^܂Dflr!ޢXH(K M)<8kQi9u{~3P2t:Pܔ;gJ JS=cѨybY[׻502ukE@FU}eQ_XY a+!rLuΐ+~E<_e@E[#ޟc{S|ܞ_i8kM;*]R0qxZqcMfXǸcױ{_rڂ[Q?<$j42 ~"UIiuIUf4F=6 zK7ކ0!Ty8y{Xb߬3ޙ-GХ7y,.e4&L0.!_W@ jwn4Nmaw _olB#r+r WKWd+v <#gaX1f;[jEXcpte1+M%2iRm[5bENr[EŊ@?UO'LNmvBd7vB;!ʩ8>|q9e놤r"l)×'wQC05]D*4W LXasV鴔7Jmd62 wܐuKv([􅩮3-uY+ubO>j|M?~w7% %Q8[xc_]ܮ1/ר5oU\UhWqV\2"5+b !CiO`9 5-\,[J7L ܧ`% S HX \,%ROGKVJ1)98) דS LCֆ"jwH^Y[է~!}6ybD7Ck*u 9Ŋ37]i- b1O;}ܘ8n\O%/ mIe|"+u^K)VRRR+u80' n#~xbpb%O+qHu[E prJb=v}ENĞaJfb۰MsŽ#PB5o::ߒ÷թ"m< c+7+ +Fٰ;il؝\05Sll2ؘ`cϓ=:=Mav0-^ܮж+^=*){ X ^Є^ 4ubbL|m]PEzVVo[Y?ӫatyzgCC [Rd5 a0ĺ!چk®au fz@47;@D*dbDt; v_]f=^s/nW(d26! alu>=c{.fQr8p-w9. k_4$[]JnWqR[_QU{E3e ݜp7'm7?ݜ];̑5|F߮ѿh҂<=\>h.ψsyo @IZdW̬ybMؐ'z'ƣ_^ϴ) 뽸_Oqnv X,-y*{i뇳bl2b7UHMe̔ڄ0EoB3P), ¶A:*i*⢵{ȱ#\\TO|]NC%:G}mhUudЪ#BY0PQx  +rT6GG5_?k+Y' U+ ^}G^.TQK~ܮč|ĶgvRn5}~/rv aZ!\ rvX9 F %( {X#_DV!9*ʍgZlm³&z}FI5'e֛%J xo9G{ o'^TCx[mRVWj>uDO=YOdP/ Hu-,} &CM3ʯ#ʯv;$ų ,g%y|Jsz>uO?a!SƇOM릆!V\^ǥ^Yb2=R!K)ѐGQ+ƈzz~=~m5/#{tV"L f>$l#&Qg 3Yߒ:oiloɘvn8-վ&T5j"08Xy-YaY\LH c?a_|6ϹQFa3 ?KӺEgfGc133efx:7 $J$R1D)+3_7JaW!GuJ7Y`.V7R+BPxqYZbRQ5 m.eO'9BMq~c*S(XOaxh:sƫEfpYh[6Vd>M#[h{^ί6tv׃݊porx 6i~q.}J~DO詠Ba+F'e0ial+mof9^x3+,MI>;#1fg qatz6o*J[R1e:z߷$>eOZA@?IgzKRilc4b8B(p&o i7`>?}U9PhXk1gˆtXFi;IdIacU99$C!9i F&:AÈ^8 x™^Gix+^[u/OJ. F FQ9iT?<x|~>l$֢[:."l- ygQwbGܬ!^&/MW3WG읍r1;w6:2I z#Xn9\Zxksq>u͑g%|1 xcX`GsQy³CL6t$bz R\ ʒ3gdNKOkR6b?i[W *i:((a9{g[geÐh}^韲?듟,݃@;SϚd@=3Q swth lεބ18pnWh@/^wf[@BۿOBlKtIME+F:eoϐ\f*W~Μ_/s#xeFWrDW:+*Sd}fK̦#^3 _9"MDm(тp89q QtdMN dQ*Ooepe+FSUa E:"6Qˤ8GQ 8/ԗen^>>G r-$Idk#ێ߰rZn#n֞,况KbێZ+c#۬A^8s22RG40FBLYMj& ClPco8B' s ,vvw6Oc$)o 4B aCx#'EM_^O|jQjVBkAaAxk57i)6HНFM] a1@pv>笉"63ũlh-h8A<)z RģW2dQkQ#^t>}6 f,jl VTt xk)[-兇uxըd^5)YC{9eMe+F<e%6҉1D2[722[laen#JqϷfwseax)r!BY9bt ۼ@z9|]`ui/;7~YebZR_"Ե1=B*Q }Svx#Y!ZZՒKV!6jC{ii+[#[HgMN*PC7|~at3ti 1&CL] 1l0ssEߜ.tDP23Bc^ť=mizӗbNgrN,P1FT^6)/8U߈$ шN)1)N 82}H׵$+U"1 KK-!Z II9CjɛC쐪ba0F%eaj钚,+tMB,MGe=:qGq$cxD&C+d!TcЩ1+Fym5wTc;Ɗ++TiPo :mlB.6<#TЄrbACJE<~E5>5NXȹnT^yRDp;_u9^fF]ή 5ǩܧnkSG]qWZ(?Φdcc1MR7rfM¥ ndew#cs#+ BY 9y׉6FZ XsFܙ#k +wvP [4oz?>:ŏ5S=Qф5 FEfp]b̻bPy u&Ί)Ghi^툤t_1EH8R^I5Gy+lXCk瀪_I#~V.xO2Ekk4&1^$'c]cz7@)YӔ଴iJp+F+ݫEa835#Cfx}'K5 9CdA!9ᤧUewAdo+0/fcT]0tO͌_ҡK47F&5[3Z賅\$Pc4*؆,ҌӘ؀w.3wp_/m~8 {(D3&_8N= x^ߙw+FTyTRHGxXHG~GaMaEWkSVb=: 3: ^/}L3 Z9 r3gi7%u([SauuǁYJs?Z#q5zW soiSaJZc J;:ǣm;Yjm=ǭ6~;]-Cϝ`=vDiᮓ#dM%mWv|zpxi")P)WK-*UV=!DIX)$6Ѻy* Q>`#xTa1M ;LNw!406:N21Pj2􁚓1^c-Fʬb/p!5>{%>|X1Н1mP1ǖlyqxL2h1 Brx!FCKcK>P8!B/(,G0܉r1cdzjRcK^α9ԃF72z`Z~X@xe%t xgдԵƤtNijh@#wuIp_H w^y7Raw:AC}7ĺC49P!l(>ac,Rdϴ:(76ļEDJR!J*0VYCOiOXҚbk"<vE5rm1mqrm y7%kvZnƹK ژTode.lJOHT,CdTb#{3}.<V&T@T36zX0c)Wz ](޾T TSS!PSS&Rr$ār9#^ ¶o`,s`rxgŒYbɌsϩ.͕'$7ߍ+mߍ҉-My-bݻb^l^mh6rZ -3eh^EZECXw:NtX,ٞ LhvEpV7]l8ņxk,f3~kkXd'ˣddy<`SxP}JG\)3בPXEEǬ}Ov9K` HqWIgUf}\ [dIpŒ"yʴol)Syl碐MQƈ`e 3-#t# !ްAm9DJ?P&+eiExYZkmQ> Sz_k*mɒs xl;sgzijrIgesy+s ϵ"s۽2_3S?3( Ymn0`nr/FXd}.8¸p.4n2~( (X4iBvetHo.P;8kN{L'ѡt/) 'fo>؏8R\yFJuG9ĀRFz_Т=ėᾗn L1R)-2}29iv`pǕ9lw#xXxwvV X2> i:E<7WO̬Q:ҡ-JvSϚ*BBZ#ވ#"Y`aMkcG^Ž"Lj+PY\@Tқl3R -N, r\)#80P!lN^WK #Vן{Mw&Ea;3Q#h7!j+u(ۑA݈U(v][,C~Nξ1Iq]6AMNL߳Uϴ.g>NX̫,mpU;*1g 62KM(@oZ:eLQP?ϛK3WBMi[b͈<&zq^6T8(:Bˆx*W./rPr")jňkYcXb)#Xڟۂ&9`z隴-ĈnqBMboф)'-e _8yPySASpjfel6MmQK%{%;aT +$iB!Yy^H :5چp !kXn#:@9 Î I/'~'5)~b}aoa@>&9S@'Ptt aCL[㈥Dfn̍#t>sq> g>E%bߓzqZ$θ^};c=6RfKCK\< |Ӱ*մƊz 9,d 2OFFhH`B}J's͓y2|Šr+[E9Ŋߔ(|89E~~QӓBh Em|L U;}Br~n{<@A5Gʿ8\"D7_p/Fx :mlkܜ?cq:ƠACryy_zR/#M!8(45:WAo\cI7WZeQU!ŋXbtnU[w喣Vܥj[!l+6ȷ݋,UT{&>(>OV4N.88厜66$3hCZ `Vkѷk5v_ ^kxz\QkZ^Zרka9,E!l'$SA71cNRl?R/Ț6ׂ~TΟr`XoyOWdo(i p6Zz9ԫNUMj/)VX{>O>A;ʙI*ʦ!*Reo,! NΎ10,oOTۊLBh NEԹ? 64ي?K>gte[ÿ;lX- #np?eS($k^*r1*D b!o,B S.+Ⱦb<ԓTOc?tRBqJBҗWbJ7ֹ6չ6e\{PT~hxQUT2AkO4Nm\)n;GGVtRUbHIxO󽐼R%k8۰4m4os"pbW+v1w ]l"рU' +*ø1UJqߤu6=υ6#`3e'$zГi)K:U֩j[Ϋ=C~pk% FCW""ִ{Zwn}^{|0[]#,CtXC8ﲕ a7C<{ԑF=7.os);ExGQ>Џ 9Hgh֢@}RpE@,y>˲ Z XhyƉ;0ǣ[qolu5y(ahL"qbhhB=3#'\*6ci "+&ls9H\Qñ4Y՟kE8Zhױ27yE-~pOݗ0i4mr Qab e*hhP@d˜x`рkMS55o7pm .r>36o]aY( rd߬F))]QJcIgB,]LB<;6aVLmfj33)3"r빮՛%&ކ@Ʌs>77 1I^bԹJI#GM =ʆ&n+ƈ\lβ9m, 0UO cJ=Y!͌u͌uF\D(pt.~$(#H 3:mzqژi(hXXOۆӶ!N'w\̵n n n }| awC< #pÙ5k,zHYPLj7fS ͮ#gٳ< sˢkV 4!x=~D!V''>O@-}񉧈O.aטm+Ĉn\|afD C_|aZFIDj. B}ie[AƳWedK[69c6 N; Ni;~v !zȀiOP8ECmOa!IuVf ʓVx sJF|%# ./17/+u[1Vixj2Ew!P;PନœX,Xi7+:0GCny֭EvΓқ5ʙrj>8[LeTIT!l*5F9n&},#~#v#Jt#FFf667Ek[xeŮ"km70eZg ̹Q˲#77oulUfujmsS_\$ᆰ'YXy,Bs=r-™h\>J n墫|l%?Am:mnO*gsE-"J{ogݐl8w?팒/)l!UBmnYmXbK* M_@LU b;_Av F/v BZ: #yݑ87L@$b#bMU":zxcN\1-ޘs\@L_8lD9;n[w&ؓ5F 6`do=V4@EClm4J P]t >h% ;bKyp!xlf 5qsV8VZVXxTU,UjkUJv>x.4ye{Frul|BV9!JebbC$2sh9Vvxiv/+cָ^57Fؽl 7$z`ԫ--8bB ye42WvT߸zI+/3f9 _8Lm_G8L-d=5.Σnט5*EJAV^U_Х7R~[ׄŏDu0;]ɻ|2OiSA6T8r7+yۅO#9t y;ʐe Y!+(~EnftQE-w0/&$DHU#ߌS#hQ$:("%:[ 6DڔmڦlTe6(SW1,fDb2ysȡVuU1 $$J>%Sq{tfn$,CAhE7vnEϹ+\;o|v vR6\k{ ׮\<+?lxUdt5]pzBG7LQD\?2Gh\XR=h,4 0ms$HP=і!Fcb\C//f!y9QN+/2Z3_Vs[N\osS&e¼)2aٔ ELV[ i%hX8 z"+FCΒL gIGYgIa% L;^ڋ#( FZ^e=NR#-|ѴesOͶ>){,S*˔˔|d9clS&4+^(.TUg oSVlN?l+S϶j;l[r&c[HN YE%Ь e^hدyN z@P9#~Gܸq 7m7nFݮkhOٯ]{kw.Tj#Щ"Tq0.־k}S k JZ%YWL F=,YoBZ3KE|3FՁƾA`Q;քq GXjn_Y Gd> VL`QsKb! c}dr냷#~%SFYz`J=ݤ|LH7)2 "R=R=t*'Hs==^w _8ņ^k>t^xQϝHL_1z7yY2MWLEK~fu\Q}G}chN5f7CZdxۀm@z̸?H<6f|}\ZA MгAг/R!9 B;sxs׃[#.N}3;B(ބ9[4cM*#pqPIMwx"H-9bMi" P6i "ԿNю0FHLzEBU`g79l!|U{UmW*2>`S' V`;}PdB݄#$XtD$G|]+Ϗ3 '(&F 4b@Zb磧X?TW~q:$<[GçjgH?6HY#X#a΃ꗏ+ eX) avp;j#|m8W3p?\"|Fl-R\\iZE=G9s_7w҆qǨ-=eN4=Â(‹vVE;if#$畷6>JzAcYkr?*Vn2f ]b{b%MI1M!q4PƛY;Bx"733Eqe6bXFM2 H9rC!01GExyD0n~,\9oϿv۟ceYYnbߔ̨fn2bd4d+NqN!FYL` 4>*f/O*:v3GT' iSk0Tew=r"Sh2DYD˵o%o󒃩bb_;= f z,|(%U  MeExSYO*inhc##&[27%%#ԒZVb#tN aC,R+)2j%0CdoItKw bKFwPcW&uOKe US0CK额!.:eٵsgߪ2B=TFXTF,~R1??hToňw7]tptm.'~דeQ=t1#n @B۝;ODmouhXJQi"ْ骝Y_sgs爹4Vレ}nF}3s_̒+/'Qނ3L%_v3L%1{Qy7)TK|MvEv ""1@uHG6O8}C_:67 c\Jb\9BWp+ +C-&{ <^0*@Иv6]4<99f<3i93wl$l@gW b?XtXT V?Cg;$|&r, .ZXI⧒YQjDWEH2UKg߉繝o|3MŧW*RSdUGd!kb˹!_5`lbU=uK?}]+F|FŮ[偖M}F7AmM |}ۤ0UF7Hbj}bIZldxR"JE~$Kx\^g/ux\z\~ׄ8!)&DǡvFqzܩ5Pō otEZتǬV5Yoi8f%nFrȂ`b*ĦfK @ăq.HG%_)a]خ]x^ugq= / zb1SEȾ!>!ַܹVTD 0vwL,G&al*Mj=H>+o36-[ޛeXW0F\c 0F\C ӶO?y|v7nթ_Hd_2-ʽ8"QVR=TE%Sx9J%>GKV*FXƦ:̮B:N(„6 >]#rG;\iiO挮97+[H_w&mlS2mrĦi(tC[qLjT(KPC71ew{߭#l2d$ٸ+$-#3;AZKa䩅:.Evb>*O٧'4˲>oˆ} eDW!aq/|pxMĉ1:}_)g 3n=̴%!Œ99cl;7e!Raڟ˶}'X z;@+ {Hu+Zӽ*, :s{$Pm#rX =BRle)#pG2dhn WHiŒWXHy~E5m*qJ +S{KQ^}#Yڢ&|ڢж#-35ѓ%i|>ޘKǑB@EN%uMRMd1~R0q¥Up+R2O7ڎ!c0P:i#<.(jӢM6Emdu.&vӵd*ey| J`văA>ƋRH!)EqrO;;ߌ!E8R 뇷' }\@H.?&sw\s´rg 7^M8Vy,XsV1p9rܟʹOGS9OxrF gKxF-$Y1BߜɄCEճ̪)+Bx Q $qjP@'ZhO$qa Gs^<&$zNOZv8MIB~/.f81óJ1]c-" 1$o<6o}mQ/:_d-eT dň6 &}O OX ^_|x!/"7ese1-ˍڲ?)R|zhcs"^f,⃽tY{b<9Nn'"ih6*f14<' U-{N~ZNBњ>XiM`,1m, Gv#\ݎp#X'J߮$QE%1#`VWS N)-T>7e*_2,J]?c+tml*Zѥ Z o]Q⪢!(q4|'-ɇqqaʇJTYSڪS)!D S JE!jz{놦ӎк1n к\'肠8H;Ǧ&<ӳzxzswѓ ;KccEh76F P[T?1On-On-O%'s[˙ D |%<^aE z蹰w-ɧ5SEOܟ3&OsXcFNV(So lpP6}P^ƈU(DM FP+Y"xE{ .<ˏXy(@'{Z"F6ӁzU]Vx#KJ\ ,=omԶ8QۂWuPj*!v bpnS/Y/0͔PcijlxJ-~T?K/cnB#[\u%o9?i;n䭼w3c u BsRc?ѩJ윉cċY ʆXdeCYzZtĒJY:P"ҥoKE}ZdتW 1gN\t%#FFtYh`1ILw#!5t;@cKe~-NpU_WeIŠ|+EWDq;1FA-7': 77ors+ϟAp06XŤ:Ep4C^ %pBɔBI|/i*ݢlR? q( x ux)zKI fm4 Bc,U0e~LjW=bݙMg.f aoWX"  -2BO[#^eϲ84JeD1SC,1SC܈99[떛X1*{{&m4{S>S4`NҠxEoy7W|Slu2!DL&,M&,Ľ _3<.?yyo?}##y9s_w/rmoJe@inJ2Ju#^VKůؕg=&/vDwC\1FsqyK %BS[,Z۞tyeНY_W>Xu܇X]FG۵丌m&l:ohWK{nr:Z{w^;oOR-^AUNQy᭓QNFN?j l}djb_lZ֒ jIZ3sfVs\y]<&Gǟ|ճ C4ib=vGf"udAJux 9֭:6#`Q7&U4*T|=Gߔ󇊱0d#Sl2[Wv`]_JHo}/j%5kbL( 4Nw[O)pw`WSr o㨜&1"5K8Feb!+>4uqnN'ܧ8j?Q?^ؽrH".!,^eya*0dktjUCRvU૆ǪRORr;Ĉ׺c֦笍9kݳ]POHFNCit6x^zd-B2ݪӃ_u_upo-v[tny< -_6bzR:!F"TU6 ǻ]-s(^.#&#cB>w dOƊϦo30+: vx|˧24fY۴c4+*Ycp{-W;4UNy 1=4D!CPA :6AaLj݀{=GQuGqȨ]80cZֲ{U7j|ҋpGI$T cQZB:o@k SCm˴B+XV¢|7jU_i0X{'YvbSAڿENF*)e 1o}ɣw".Sč%{E6HUW{׫zwϪS{k=!惷Vr^iF>+>[d~ɜ~3]:R"%_ _`0jӿA+oyʽK/V)<5EmFca!\돎@ #eJ}ff V* q[/Rےt<+bNjkxֆߙ;S~gkNv.V^F ZV!0tQƾ'p5db?D!QƎO;$Ot"tEo-5*4³J7\RYeJVf^"P&Gf=?# 3^QSf{? ̐Gf|f d6ɗ(Hd FųӃJ,O-CJ _z,z,DE=c)g)"n8pT**ӰRT(nJNnJo)Zmցgfl3&l3tz,3Qhw= OwGq1ABIQDWbkṃ6|mN_,}}MTQwOPE[E{TNOmt_p -p0ˬ]3C4ĺM͘Yv)YEBAr!@'OW?U16A`V35A7 Z"!rls3;lko϶|ήsmzisۼBԟ_H5mrF~Fp:?UV1'`Br儌+NZwœH=!SEx)BxYy utz]:Tk\1ekP;4B0gNˤjLj絻*}gj1BSu*#vSϪI:SV]Oҙ1u2Qөew^[OfgRBxc9>f;7~(uJق1a 2 ǡ`8evUv_]g~ǴNbI==tkl_5/ꅮtㅮB# ]ɂ].UUΌ-Mq' TkњW;;(;JΒgoI3Q᯵k)\-6vˈ2C!{G ?|Tg&Ѧ]fd5z1|Œ'Vym̝-br,El:1Jۚxx>& M-2wyC{.5\?nUl]$8&Q1!CبFİGa#B%CZ|4i_\{wgv)=Txͷ2q/U{J2*́p}JK)b!WaaWCxN $ % DIg.[#X5VDPU0E14O|/ ǩ֠5Jk86_ۑSބ2M_H(;ܽc]m> LK /i;%Y~%:,ukcjX9fkSCwrj`_@=G.d_]Ⱦn-௫W+@2T)CW0h$D,Ub)bLjiߋS_\MzAޏΊPSgE)!o-AӮ.#|X>t N%c ,ud(BY>cAkԼ5|)~㯪>AoЕoʧ-tGkb{?;1>w!;-'5h :^Xҡ|hG^9}Sjpe%`UVMν*SJt1|Z\򡧯1ÿKߖ6:mkkk^;ip MT+qCL+LFqV阷ǼU1aU׀BXl!vް0Z^t)B.E=]WՏIMV=:el)&*KPCV6I/So)ԑZT+&n&;u&Q3NNI::< 4*ンxQ\wS_/O7lwg*ʱwnT 95{̫ϦqΔ]Ӫa*a)O#^]kq,"g`xj]X;b_jJ&QeUb 'cu0/YG|dšx3 3Y[{sktkonz?ZA&_c.6[woOۿsc.oPwh;;#ggY5+˾ΙLi3̔L o<#Ӳ嘖Ϳth!J.[gÑVV?ܩ-xR tk{vڞ۫ct0"d@^f d Uu e& PD>Js?u;"lb!n! ̴LoHWbLj~78GR(+ʇ|1ٝʙ=xv{#We|r5v(D$U_iΥ5JsѷMJj?UR TRM4;eͰw,O}j"^{u+mpQ`!u%(ɯ9FƑ;LJ2nO<_U4*B WWUD2C w+o+Xп˿DżIQU1oF1o5Ui9#;Ex{sB{l-h'Zak}V{;FE|eJ&JRE*)ӫGO۬}2)}^"Ĉxq[j"|rh3I͓O;O>JR'qg]xiC|I)xe 98ƃї>ڥ1L]:?'d=gٟx=YW/Ax܎cr?8&y0+&\:]pEɽ$ĈΩ9U{#su>37vL0ih' fY|#zFY[p:=b 7d-ݑiȴ GZ pj:@56P_X/)k<x_0J~M,^Y)InM.0ݑK4%rg1icJn_NLrOϧ5jِ5Zc?._"+ZEb5 }xGNf rͅBRSmyFYwN$6Uc3NSBbFz ,iS\bD:{wUEҜJd5'tFlj;l]clX3ĪwYOXLWؚ12?l@3&Y'j:QЉrgKtd U,oiyj *byusO |<=AX%%.Eɑ;ҮfqhegIT8X} A ;:!!8ZZv֫1;uLR;ɟ[}?O܎Oܾ8ֲܗr6⍿X&Z,_7$*B#;=R!AVEߛ;FS'O'7"8 ?r;n\&0ckF(D91lYq,lٲ?[nk'R eB1ĵ4ÖZ]Eh7[wuG1Ew=  z6)0 )#ǫuj]uzLj ]P_%Vq_%;F5-.xY q' י(d};T'',_='_L#^GRxkr멥jĂ$[P@ r0 "PE)ѷo!kFS$=Le"$*޼_Y;L '5& # '69r`ܳjG=z= 鍡-V:mZX#V:)-Cr(e^%,JE(%A8eBC}Q}3̾{!E N; ki'ƈpT Y%=ʇ F^Zo{@2=9]w*X wT5!GX]LlO- g4!}6y% m*$yhk_nIڇ7FiZp켅XXcJ!,J162\;_qnJ7D-,J}ʀ!ʀ!LeY]zTN&7~ߑ3ƐB/g<-)p^;/<3tDc=uf \qE)(7D[p(\QF4~d~d~/ TZKQ*G>RULהGSM)_7"2`ֹYD *7(Y4ѷ )uIL(yJ6uZ*/XDkP5R5čZ/hpF5EYM3i:2g?Ȝ sG\VNXV>X#ȕGF:fc?].M\Hc.jE9> O2hls"8'kos";KWҩܶUn[*/J7)>l>x<9^ ;Ү3ax-o[댵[ mbX7I4F*<E=P`ԏrx=[ K;5jDd[vGٰm(yiT& Ti'~Ms({ouvٵAJ-B[wAj;u!.[ȃيOcڼg,hx,hlN6 z11 1^mظLC̎]nl-W UX*CX88J{u{UxW!P_Cټ:BPd6]drxmx2T:hoT@Ay2j?B$صI3 Fx#} #evAF< $sFC(b+oy^ܨ'jw3=ƈ]E6X29{rZasQy!4$1FM$-A;T6)ĈXT6| umԄhÍ-7Xb8* U@[BcEdRyK:3J `I@dhH @Q,8N N#Q.k!^QM9]lw)x.~qKɇ{&lz~qs:p";kY׉J:ngW<+%3@V Y\JT n^,9򨧗GGE/X&,[rek!Pߖ"fOK%mG -v@;Q.EyX E7^xϬnLJ5j#)Pc/vUw ;xzx{֎!> ĈPl֦OT &IC3|zn\P+MӏwDcE۬mV?^U^f˾yyˉ%D-Y%<ye߈uvmU\&&.fO_Y<󧗏?g^Vrm3?5gkkoLjd) ;RǿNѥ~bܥl;}r:P=/ bO?|럋\.3N0;|<{Qf䮛Y8y^΂U'L?|w'=Z쁓̋E=^̇88K#~UToj,L)y-|z,_?\|0W~ʧ?l_]>< b7||E.1q`n7NhF>CF3HJBS^$E!]UDCŧʴO7܇: /U@]_C,]_CoCվVZmU6č5o(D|xͨ+FFn^Qd?J:`QXGa-{ YP;]=_|O_BOu&?Yxo^Ї|˭Gr?d~N?Dꁝb@$+NT^Ky- a,"s~3E?;ni8Y=g[f/1::"ktDL!bڿ1]4z޶E񧕈70/4]z҆&҆Uo+!ջpH4CLnBX~=4jDlv#v 5Wv n9b=hp5)bzoϥ5Ʉɹco1l# yzbw]ƟӖߞ]J室7wOFUsgs3W̻l1Gw;-?msj8k?2l E&hա!7^hiS L-@p%Pn2(Bx}d1QYvb/J(ܥL^dS&L2o8LyJ/EH,:lCpgp\7.Q<|\Uö5 1KKxf0`sl^6ج?D1*锌Vƻ2ɗ& %W}:-/¢b_yeemw]Kgj`laakvؚasbVϘG=3 mշlq<S̔Δ`)7VuB>t}͈%!XOUST08b>}f",e, j&ryPb[q̯|nȶ^J2OjIhC+.2 ^&LH^&$9[zpQc1ɼΖ Kg" ƈܠr8WUٶU㛼OJP.y.i޳TE_T/j(Eeapndapy ""\<נX(5/׽^{]h>[`Ch>(P l\{с˅;gVMraXEwHqHxHĖWĖk-k|',jp^wͩ.]kK+g«uLxu. H P>xN8oQ?xEuV zqճ4s8Y&#X$ڀmr{m#^5尉5v,_o15e>epK 1! IaF cZc[íւ&SQ=Fb8k$'k1=!ָAJϰJd8f0~G;G.i|]JmuJ#G؏lE:4Ѷ0#*b)/mp_6+SU53;lx(h[sxpm0͘`Ӌ9F %& Lu(D-iaISK'7E41k_l]55޻?]$II_KZ뫄cīR젰E**CQ9.f?uꮛK7?'&N95ݑ*̦"(#Qy ƈ71q3G;sx1ۘ;ם?d1 ~J.|=vfvtu(Jr,oV~}v:O[޳68 ;bG1 k:1:ԙ_hlZE"chEi,7d1/γoV`ʈ^2kUQ-K(GP}%,ZDӭeQu U+g/q_;˙i4-f藗֔mEnQRܔ ZEi|kE: 4"Lc_dsY, ֲ2*k-+Fk.,~bLsӧxH 1MsĖns`hW&>u'#qגB$g &R -':vY1T^7w5gM2oX|6Nm qG`v U1|f"Xbʓ.!5Њӵ'AƓΨEDLj>UXZh*d H8_[K ^mFʩ mA$ϳbh$mhD3SOZ#L <[cwoҏvHzXYN+ql9Ez$ TcQXw"Q{sMA6((PP|?M1dJwh&(ѲlcFQU4:@ik α3D; w*Qu3Ry<*c}$ (?U˒Q)cCDT6!U63Wv%7ecxjSᩉS~ʆt/\IY g{iO#FpvEFpvEEFp Fyuˁ M6q'oP+>ə cq3ϭ!u259`XwY"b3xrY²N8 WוݓRQڅXbj5b&Md3)&c4oNM+ga瓅< ;~a;lK#4(WjќN@8:w<:Q˨cfc$"o2j%.nP`y NNtf_<ڏTϽ&עzsOs뾔8+rX/N14>"$Nď)"Ak>Qƕ2`X;!vu5\=Db]Ii5j~o +{ID\c]Ʈd721u6BH3\`b==69;ۈWmDbȭ2( CΧ;F sh|b:'S712 dO\4G̎&fQͩP#UQr*x ™p!Lp&Axd̸{<27#ʏ{?{zŹpgYϳi l:?)tޟs>פjoUV͙g's=*"[$Ĉ;$F@cAȎq:<2(s–FOH1X}g6>6{tYھ#$a;b!XxReΓ]E,*/UN|Dq>|矕hš!Mg%5_h?YxrgLjvTCY j:*Փ")swk7fkE1u2v=HuTqTE_gv¬X²}e粭}4l_Tkתʋ#qژƻ6Yz.'˘ \N.'i|hQx3j WfnN]qgGzpsCԗ;[u}oD)}\7xS'OV^U;ٍ[t%r-㸋L5aSٮEdΰ8փV3sp57R2WqR6;0_\ @I~@^ǛW%#eF.X`9X$lXdތ\F;FC?aЎw/X .>?PdeyMQ6_]v,j|' iFz2i[!L-1  yp/hG^Whגztڟstm<lkϹ-#^kF܎q;j~BiѫAy[>וEƬ!dYc徑} Ǯ1`+YTg: []?>R) O, 73Bٹf qӽghۑ:"cg6AxV ~{+^kWZ*YFk%|me`|ppO'_9@` "ƈ.:?do^㻌o_ZnOP=oy9T1 y˶odƥ&JB.ݗK(Uxv{ޣNy={,Yi{q:Jn$hTdf%cQ"(CR1`8A DjUlq'e%s^l+σ ,Su[AEۢ7ݖNU,D*t N7^D"tJiM:7xV?I9R]bwRs-ÔcWt~#ļH\߿:/e~^xxD}U,Flj/ݚubfP}Ẁ9sW<Wp^qהowO kEzWHrMx )kiz$Ĉ.'4' EB90F-hE$/SMM2en̊ #n F'r='ôW9jB '5a!v&t$X!pcbltDa]K-#ڒYf'{Ž>ެޛs8^Kaߵh Uh\*^_UDjFT c_Ԧk-Qk2uiIe 5] U}VO[bsc_"B"1rךXdu |Zي۪m9Obu~2wW53*\vm--O!u^F3H\ y(v6;7J8Y:ӑ:j/f;Ĉ_Pz=sXsXኳH^T7JI^,v5`x{z1[~Ng6\'4kfxjRIDLNV0H[bS0yV)C~^0}`dfndN5{|qf5aJ>DA(?.ekh'Qr|l+ĭ=/T B4c{f-Q.EAl\ :M%DognڿR4Xq:_?eI6WjꐴB>+i%Qv|E?ZԶ #OmY-+iDQ-+L4& kgdILo$Uj?cqudh0!:)_7TZlOMH_Ny5/ask>Ȕ+Ĉ뇥_p f>AKaNAs!Ȝ6|͎G7;,6+roQ/OxN|?>AxJg]a`9dd=̌1/Xb-)#GVKpG\v7nZih{߃==|{p|dubgӜӜXӜx||6T #j.D^5㯵":U"-C+Ql<$EzP*1sRvp KfLjrۺ<%=ڽ@?5f:krtڎ/+1D~,"qdef'/9'v:[y{+LPB-J!Z?@C b|<;Ih;bej^ab#L~z> aŚyV6^P72A~EqKjʒMGJ4Q]x]h / .J ,;F5{'Ha;D񔙢#>C9>C3;~9PگϞ0]M1"%P|p2tX&*5;fv[H #Qk"J,(J\6^O)69:۔sƋxȎYLޜ/W͑{|:8|:<|O{ aSX#^I۷Eb[CAA {@xXty{9[3d{"c1pkAk~-/ɼ e6BX|!v_áJbf**%d~v].Op$ ]<)WnmTpbውם `zߔQXLxI8lCD!K[x][Kkk:ںZnjyWDZ}UM*llZyuZ4iiLxz!QD(B4j XWNNvҪKԤUU[MZ.Vb:,(3ٔp/$'d㛊D>Ru[8Õp?m$y@5^Gi B;_t;#:#\Vk1ÅrKm">MDdvx5 o:|RE+]];Fgqi3|B|%, Ix/-QoEq PV޵]8+@gs`|38`Q#uj?+DuXbLbm$=[ygJL!#b)d#NkKmG5®a]]ږ{eZnǪ>󝜙ӑ+/Xq흷g0y*"JkE6Rh5htȌaU᨝ )Qpn!Fk0l*"M7e-!X!$(IyK?gЁ5K$/hͶZ#֬PHoTofTC?sff~A,#O=kVU?a^N:4d 4gp^Ԝ^ÙXZ N )A 6 -kr\-{G2wzW~qOL>q zsG8ܸI;,:mvOiUKc뀻 ŏGMUC*V5UCo1ooQߢ4LB]h7՛l`tͣtr cP!@/x}OjcB_K|6lR/Ce ~UH`J)iLW%%zf_ C ߁j$α=79rx\h.Zւ[V/ռ2"$[IwJ \O}JD00$I9,<^b5B  UYƪ *޼V}ڄV}j\̀$*9nre@ J"S> m8m",[c +DWչ{pun_q^lnjn,`5Fm, wWYqC|'h{!K:r % GB)͑P ,k-&˃CdO&CK1֦.bcKZі%oBk-ڲlPt :Ux YoP 1cO.t"CxkfaC~eYqixAΊH ~$W%fo"$G,9ȊK,'='d1[9O\> YL|抷|: j!9 ~!~}G_B_BW. N/NN6w،f|=X[7,-.) T&Y 'kIA`y:gJaO'=w#HW.AЎGPPPuP P }sN272,KFX75`c EfJR{jg6zO艹]Ra+~{;Pߑm㗳N"C&  "&z+%aժO_zns҇svZͰXF Uz)b׳ R{?*<gD oDNn7Lnu1ɭ-1ޙ3$a"ά%ʶWlOAٽ} FpgC,gCl(yA s^Њ|,x0ޠ,#Ly(z;FsxCvlJˊ5Ɯk5+2OuphcDwxoTk2p6JMR&vLOM;֤!_֣[YG¯6XF~nDqIcQ$k%"NYC'EW Y$0m={= CFByi3 +B 劸#')wC*aa=Dk+L㸞zzdZ2$v$ _hPOZ8")DcSB*&Kry[DZ$;j6VXĔ(+5Y bc*Ugb!FА:-u=- *}[/ɼK>æ!l2 c+}Tm m Ub.PJ ְ@^^bF/|% &m4[N.`B {3߫߫ݎnw}S&JZbN^c/|LB-#srէ:O ĈIF><@:=t|.g2<L6DBy&;^;}":XU~KX"0Vܴh.$4bw@[Vx@VoyAm͡.78 ix @D%vXD"8@5oX,,Bꋐ%!InLӥɃ! ^GqƫʄQT EGQ'kdD9p-u F>CO}jNey/^KW[xVܭzxؽu$r͙>cp>ޛ>a p]ZݝzS͓pt:Qeu#^\sRIZSQv%OmtQ:@'nw#⡜2ooCP.m ĂN":tĥ 5x݈e3ۍaMZsQozMO7zZx͆Zx͆:CM ;4InĈ?ZтF#^^ᴗ;uʦL>HcR|M:ĈϋKGϘbB+w%rbNvE/V;w8pq8f)?Jl9_|`LPhyaQ @+.oQu1jux1!Fdu.sufXBM ^xO4lҍ ;+aᚇ 8l oE/|Q=ڽvxMH! H&XxLヘV7=1dGiN8c#d;ۗ=Ĭ <00<6ƃc;=v/>1&4Mi!ӴwS!l;),[Gͫ|Sy՝޺_랇W 5#mZw[~M5i˷EHL-\#]ZN[ĈײaN 1SL<>"OĝT1]+{1ݑtmLΎ{-O4Ń莚 o Sa'Q3WxO#]ţ2a%q>ʫ䨟AKϟa4RycMn ?}ez[|gyP\bL6wJdf&XW&uhav;-taui^+BLY,Y;%)2n=hzPBS99G3}tә8HE깛lܭs7=''sq~ CȿlB׬RGC,GC|aA{H$.=*iԺӡu3M/_hs;ЩB6zg} =*N\GŠU Q1(3UL?S%7{fj;锎W6"S!4`mc0-3%PԢui+m=k;FZ$i2:+jѿ543s9'= Ĉnf* D:F<}Q7FeX;T>ĈWڀ9ACцGm/ )KeMJV1tÊohmڟԈ6΃9k67(cێ/,uQ{ UbxYLr4Ѽ\q\y^uO_4s<ǃ:=uo1 7rL91 9ߙe,R'eErͿ=>>>%F?hz]vU{/%Vpd͉d̉춹omm+m6M"QVL:xK̤:j\uR* Ss][I;M^)_%k^#u.=1y79̟[l(_(WlxEx"nGS-t7-vx3]m7G7Ef.*xS՞5>K\~pXa<_?cx218IeZ*8f=Wc52~Pu(i#^9 kj#K,;DØ<$_LuʼЎr$CLIo+|ބy Fv΄v%tarJc/Q}A':IRsKJr =B 5"5R}kdMg4QpLÛ$Z*f\E`kLjZwy" C> Uو.sh M00FD'CQbvu+Nod5F1`ZU**.G-TĀf+y@ҊawXZxVOWBFu Ně) w"e=]{ `[n-Z;F'~D_ܗ~B{jߧeUqyuLg)_\n~:>̭-/d 䅒d]3ܢoTCP>A_l coYzWg'JY 7oP͂K]!Z{ha=9KFRgeVE:*XǡDDݸ)7v[l d;2o%)sf˚pqaЎ}m#ҎKLuI͊7*kD˖5eK1D1&D1&ܘ( "TGpWa1g=MN1 ꉾIbR%\ir?>tφҭwreI4XR<]6  q(2vga|B 'HlmkvHxcJںRn]=L&#ShqdXYl} +?[W>]xf٠ /B\>yM:m6~<lo6~vzB:=:;*v65U>iQAx" ZeZ7jq8fP1Ӆ3u{F0%_W_>Oօk s"8 3˚ -kE+. [ޞ:'nhW"%&.V^Ad:5Cfԣb7V9&-Ջ\K|7G]Q}^o>og7}ՊH ԊZMZ ''T>=aS*|GAtc[}D \BO$E'"aIlp_;392^۹ o3Ce38fɖEtΚYKkmNY5nt2Ih_H 7ZyċFqqq{V]:m?-uZ'K,%²I,~ v;b}G2mdSHW[vd9A4r23o3u].{T#jי:lYoYd=H7EߚeǷ, w0"_O 1v:|$%I,<5"WbD+o }f2S+(8DϹKiE #_?IiEbaH?T;,]K_06q^cW#^faBs0JXs!̾w1%L ߳ѫ{@ 9!~Q(0|i.orع:27M/]d[s9=qfXM2W/G}>eNOc?f3NOk硪sc_$Wٿz q|cJ2Kn-Se[Dce' p-L㛵=B)C$~'!Е!U&ؔulb(f)-xyJbKT<;6yȞM6O5yySco'O] 6-ĝMr[qNdooۄɬмA KZ)V9SZi /."[X}&.%٭w*EDsXeȄ7U혮tU~GyXm|J8_1y/]D3%dTe*2IG9.} +||燂8PP㡨~(K#-Ƿovv۾nX~rkЧ~q|y~Eom<(JӿXQDx>xXBKߒhm^QH^)Ifkgr`T$ke );q<51exSe-i4$Vb(M6cvKL j3 oiT.95-hQ1=^' Rx ;刔:L`Ra>:_F-kTZORa\V&p÷9`n^?b[]?V|;>\aFᮅ7b4cDE|BajOmaOJ1:UvrQݖ2Ln,] XFRgY 〥ɹsC=|*&37?u==涫^ʩ`W-&ۮz`W-f :;ƺƸk%6FS2K{I-Wu"Nxu"/v.JhW@٭RIQ/Zrk<" W؀J)T8*CjCe+~ g*'GS5d%s bGSԽ3.6H:3Z|ddݤ_l4W[#j\wlF!bXJu\,hWkן.s@D"SeKu }:8kwm ЙջB%RA"A s{q Fqv>aj~%_5*OY#_v7w"X_j\K {د7 V4oI¤Bu>0::=#xψNdlψ_qJ {AD~~r[4m涌R,y.Áu91t`{ 3cfA]#. /2]v W"]@yޛO 8g}1՜QsHjZ[{8=MCOFx?ywe칊[>*OV_!M e9oHM^&ѩ+*|p>%L*tzNNkvԦMCƻ୯[<շߣns lۍ?ofOthGaZ qXS\Z& e'P+!pG=rM5t-#͖eidj:c${s} #H~M0ȝ/;rgER 앿:fl$h-7s?v!82Y8F|?ɉLI4(bPԱ*VOھɢ zkUwj %L^ä7`ȉc__1(<WU]|DUk9ߪ=~Xcmtˤ 78Hms.'UUc| u!VÕwܗK-ɨ`Ju ,>^Յ+oDٲmqorJKVKd &8,!ϯoK1Rz~99/&{Za70C 2wnhpC mW1j@иr L<%T|}:*=Q QVp_\ 7X\ ʏ*hx)z$1_ަ aTX!xV5=QQs!ht&ZThN`bS/C۲`o^Pv|w}JJ&;B8Ў+twok7+̏T O7wA| ν`h7xt\#]c"X kl+k3cִN-t vFt13&1_}LPiƜSEsO瞘'MQ>^q2+_Ŝ}s [Mi3O7jw'']+<~w G:*̉Rr᱅Ͽ?ww].,:"E^Ϙu! -{snEx]4Swǜ*t~U]TSH%?g*(ΓXIKҪ%d^PfJ,&PzEq퐒\A-eT=S.#Dri\~==Lw(Ȱ< xsታSt-ʘz(+c'j.=ܿvۨ38`IZ|ufbYP'rP vmvmZIs0TO̬=y2ZgqOǼzr0=[gGI$X(b"<,Crc`v M-DhH' *SXhp"?! [p 4uשrEW**WO̟|0F#݃rcN?YQ\YHRk/ks8m+[`q㲉%)E;s[n2[ D;]e~w'vBl6ˆw]2 lO^+، h3(i @if-z]mEW,6*2d YƂqYa ':‏aοC0K+@+;`hvzSO<',AGPTZ(^ӎ}]ŕ ж*A7}kUHSSצ&'rҹ0n/1c,XlD")dŌ9!!zDη!y҃x+οF!Œ L*LB2?[BF1-Fq )_tsQˉBrs4NMްˉb)eٚ2*x,w)-i(U=8IP+^(Uk|I=_;eqs/=Vxq7-d~cɛRŢ%J9㧻NUÝt]AuV!T,7qdT:E x7Ȋˊ=,[Y eצ\KVh&ŜWLS_[8˿~jehMh.]F?h}G>㘲IY=^\N Y xLʠ0M>|c^aoU}ǜwm^tGb#zݑlBAf`?U=jw ӨYkY;c0>ѹߞgףnl_N UWEe0%`[ʠqU\ʟɯNs?4*̟\;8r/ӞUn`%B{\ ʗF1l(7&X1?#6yp#t=Dž.L4-ز`,CCv9; 3@S:o>}q.Ⱦ8Ao})BטLu 5Ų?Z1oO4ayS~f>yS375X m5^6ѶB[-`d]OwWlEPFJ } v_&H[~.gS@^7L|s|ڞKH+v^KW[mQh]Uϵl"SI#zFVgsqS"LI;wUEUPUc}U[ s6`I-9;I׍( gwϋBvsrX f\=ڶ.`_ۻ{{i3Rǡ.T˭NNtM}I3Q|0q;J~}81݊b[W`kS``b{6P/kbϳ\$-^eŀuoq׫eM c! EO)8s9ko}) yTAz?\#tKO/Uu)N~XeO7CD&4v"x9I: 2cϠ'\ZMSZ_1`ы3p~/00~W<|\kY VD:kU _1Yje2ْlsnvYo}2> fejQf֖-i `xYҘҶ6 ))IaǜfsaQr>t g;_ 49Oxi+V2(o'Lq7-&OP:ٙV3T8h3(P6Xޕh)a3o%]gy?IO F$Aj(!1IOs>~ |Tk-MCQHxtv*} t!TaFW}2)azӉZ?Z 1i֦.ǻjsRhv24rWH_\ ; @ޕw]'Rݻ?H-b9**- d QJt'q%5d?0?xȬ*os nNZP(ԙAkZk4rsap!z 󭝐 e쾝 eHt8lRU}<@:!C>F9 l{vWm'XlcSk߸@LLA2gnZ_hy*;N)1/)ZLfYT<Ŝ$=)np\M%!u2F/)<*mdD I69YQfCak;,_yg]_6TVV_} ٯ:*xd-.]h͔KVE$ kF5nb3֟{0_{ ;SSP/8uNǗEP#KL0/85L.v=ZTKhuiSq$pVNږV`.)Es1vc0$/7(f ;ⰼ^Qc;Nx\,ԛyek\K%r)_.˥Csm)9P[ ,5Y}}]ڇidև؎_2/1K#y2ƫNvRK nWuPbƻ]A瞼OsyzK`XZ( OyWT_)ٞS[6':P6gnE;;FVzÇ2*2Trq@oGA9 2K^W֌;ѽRm9TJ t8|<#vG؉?`2 +tnl3۝$,sw/^ǽ>94'0'O<O`#wDT#,^wD~k5VW&SK4YSKOuQ<+dws3QaF!%[Cw)%kLC[vyP?oX] _rV1MR:AƎ9p(AR15A*/|ʶ]*FAVs r7"% w6"xQ+f|T@01ழccCqfWP bB1cͯeQ;bz v, cW|{Š&}s>&Y4 e;K91a"@q6u]lŜO8A,LZ듸wyWS ϡ)nάЩBٟ"kQ@;mJWt1>2HOoT+`2l;/}>nO_Z:2\n{)Z/W^ -X?oc1_lrQ<2crvh }y" k$[9%x˷'WNKoc=XBJ]J!g AFrY.ۖOX~`3K?;=,ecη+'|WWN^tt v﮵0ŏ -rE盺T27+, 24ORziȌJP ,1jǺhYǺv; .Brz8[)V.ecÆ1UlŜ_\e4Zۘ[f|4ga˳0w=Z NQèUZ0헢|z Yį"i0&X i+i} qn#2;~(v<^8 KK7Ii#XUPrr (A8|\Ȼq]ǜf5HeAвhۨS1k7g^;}UPu%eh2gf3U5c7w` ug|K'I+5i X3[ϟ~0H{3ǜIUlQAjȲk7_Vz$谬Gѣ\]<]N1cnКY̌UhFH_#L~ݓG=oâ=.άHVoYVo񣱑AɆfdx=Q,vZ`uMג!BdD3K6T6cη?]%(% r\Up8wld y}J{*k#Lnfv~C3B7.uk ekݭ˒ K%v͊cJF9`- ˹2|n2J &M`7 *NӡU} ?Wf?q3c}q3SF^|dža>0$>f>^g} <+ Ɂoq ~_rh!wrhJο9D԰vISL:Xxi*U\U?Th^F@SH:2ɝ+ɝO'ɝxɝgRP{(2'fTڶ ٖ:CQY *yf@ 0"ːF7ib[u8[OrirdyO2g s GELi9䨍ӓFгCQ@6ҵs13R8D3vHLh*1⤪bkX 51it?t%O[!eP7;\5%*rq]6m?wĈ`@t5O^ 9/ծaLҗ\ݻ|$#$ V@=)?__TNXޑ+DL'H1K׺CuZM+~W`䣧JHp FQts]WbM`t Ӳ:>8,t~J'8|@4J{}jłIrM|j2b%֌99W{MIO\S[丌a!9Bqs=VR !M)kI3-9X:R2Y M۱3FFYDeR>vH<[cɟ\M=p5Քq5%}Wᠬ\(AYÕ\pry-M)6vHɏ|Ţ| bNbѮxhfS<<˗xEאJ~qkd"ͫ2iwvړsN 󾙘}#v7x<t8WQS^GM|~Vq Z[zzo0Nl(Xy: pV7%s(;QaFTs҈cch7F ǜO: ?:_(ad^ZUD{s;ȩu(vS'3&hŧ٧1)hsH9|,j׏4Z/ e;x ە Tv˕kfr\MdrI9vUҍ(Si3e5B]?* zI&SMW_eo3Һ*%ϫv5JGۄ{>$:rbf(jeN1XQG3ѵ5)9fџ3˺] ^g x%u02 tY[Zm3aӤH-l0la .)C8)>7On!PnyBa Gs>ܹKd(Wwj'w w/dpl^![0nC8Όp2y(峗b|}=y*@uT=mCRK0L-gh&ghKP 2AU pdY:3NUi*Bhs (6 ɚ0_zz[&8!sF;kHLPD3 ec(#1)A*(|#>if6ilS 9#*Ѫ C>7ݤ3Jae7,~!с)}A;d\\A$=|;yJ7O 5/iÎߪzTgWx*eQRgYJ]} Ԭ3i3gƱpqjqߍ0XJ5mzVeV3k~+XrwXH1t7p=nr ,bj %mb&MAG;*ʍ=`]~=,_X #1+EDcSqR 3/}:][Ҭoŋ-́mتn%bώ9y/FmΨc_cYn&t,[k&oskl~Ykk;`X[C`m3~UͰ'EZog*HM3͂_3z@'wUޒ@E]oRP~=M?[QC_a+k5~l#)c =)Ypɐ5K|?=;I]&¨gFne\wuu]SBSW^}xW(MX)os!:k&P)Zc ˰RwIWU,Ќ HO٢ɗB(]9;$-R*|hsŏ` Lgsn^u [Uj2s飷I"*[|VpwD?3߯ڱls /[!5B|F;>_u( | agl^t&dCřDldIE uZJΪ!}U[hOH[E`.sk@QNgG+Xz:X?麍–ѦkR>`:bzrn܎9~qv^*wUf{uOf.y~ʺ*gy(Яg hɟY(D} x ‰w#ԌI8Ϻ -HƤ3Lp=k 1TE( %ŀѦsyG;@g:=]'tzfsaWp:1|/rS8h7U▣݊]1vҘ 4&hLk3&{ `;ָ45ncw`SFUۺ:X7KyA:dF1I~nn1g:Iƌ!\6#~y"w#0\gQ01.U+OWA϶ f1> ᐑ*ᐒ3ǜ?WɭUr?*jD؜߆m5YE)~VђUT>8˿- ©~G? be;,R;N2ه}7:m?贝!%Fjr@4&GP&nj\ʅ-Ӱ&X3cM:㳟u&%6?ޱ>ZL |k~\aWh<k1r(IXގ9GE-86.zuPt 1-`8% s1E(Y*)B)|J;~V:xUBM$ _ 5?UGMI* 6sԳ+8u]s!1cp&Z&^0C~W-;hZU r9|ËbJ"|bqKm|fww1pl˜6CMN2i-~0ຨd?NAЩ鍼cο[`v,NvNRũ 7ˇQ\(.UL Ԥ,s#Mes>굂R^X=ǜ+(^-gX?0 ?U!ǜd{]q88XEnp,o;.?uSFB XHSm!Aن8 e_X;s{E,%òȰ`Л_(F>AG&Pؽz;* Uk8 ^b}1E^lOZ'B*sz,YdzXYҡa rh~ ^⾣Ϸ6J*,MN>8Kc[%c_nܻxcbwk/8\%ْ2D %\VIc{J%X7{a gl·E[Rm#Es>24)"!EZE9WPGz1k7 PSnodxNQraAlP T,͢&;vv23)[Hi^v]tl}R--CV>W&^;L!e9?G*z]eJϥdx@c[ھӲ!ξ+cN;H=d=$\ҏ=_<.Tۿ-M+#3Lycη%u1dd] Y2YK!0@*ܧkM1篓~J[z[YKOc;$ 2 ߜvwh%9,%[H.á1wCJcŁs2 8*vT(2"i\rp!Jܰ R 9w~2 ԣ2d,?w=r j'JvߍIl7wH7Eԫ&'Z2' jL1.j &]{?E s}C^-jW˧vɈO;eI"%9'DetBBtu;#+ˌs&|Sp8N8S{֣ljz{T[=$/[+"ivtѱG1*a!`DoVx9hOۮ":wltwQ9^OF^wǜUx+J RbdO}75UU9c}@5ѻHvkR؏I!a.߮PBN 5+׻RUL&/%x7o:OLj%9:#t0s;s90 ԼL(B04(K~Z'7 :dj*zFMeI;fYm|9P^QxJ$HyDQO]qq^$,ſfء 0{j%_ oU؟=oX##+ZL;={{ڏN]WdrC~w "7ҨXM=oXpU .-625XQ N 2L 2`euJtTCݔ.]퍧^ggڕ7RZ?~o[Ʀi|t+3z+$Qo2 'דY82u PThߊ9@( nPP/CM\R:&O^sםoիCmv:Q۲]jwV0 ^p1?q8Z _H7Lڿf;PI!şNM  u6-bߴ7-KzkZiޛo0ze֠ek26kPǞ,55?3fKMFw#jw#jB3~(_`c6U@+׿4rSî||o5a7uBuB ouBuB uN*r<I}>sbcYƚ(ȍCt:{CݻMݻ9)~N9s@kuΧe?i]l(_!fsn~j2 ԥ%a SQ?]r>3 j$N8+vʋVZTCA[[0+ǜHc>hķv@:$D[:)}0Ȱu`˃E/WB QX}^Dtxuؿ@OK?.z4{r/g_i' w,5iIC:IKsaL>d(O?:fa[ 7j=ȫ' 4W%אCc/eN>}%Voh s`eT g'Oc'Ab*J8| O:m⺖U}Y[خ؆V@ M(]H"B0f貕р?[0|}e G~>zO0gO8OYs>@ Lp.1![=oǜߙ4rZߑx)rm<ѤkTa˿+!y.LB*w }_^6 y|>JܦŠe|@h bVo z!%CԷ1D=:>F͌vz I Ŝowjl~} ݯO)X.)i-:5v|,uU'Zɿߥμӄ`@4Zix|ւg*ET J1*ŲLdoAWث'6󉽚bs'VָqDW=^oC , Y̽YAq^P\ri\I(*s`2 :F^Af9(~lD >i\:#ي 6t|qF':0%tcP"yZc| 4=˜ƝeJV[dVZ`>CJ^hϦqS4u`L1ߗ1>>fd`1NٵCJ~MնYwU9p%+/pjb.x8 )cmA)oLEK%$q,ʴ2(xeƣ|P[oK0<1ux.M}? bW*FYn.\zG QB/W;_J6L&N!:`מjϲjC+vvTJGQ0~+ RZ)A͟QWK KK%w~bR+{JV/Ŝv24Y&g"49 }>QR|q$;x^:((/-\Pӝ2Ep~;|;pɝfWř[A ͮvfsߕ7mkږⱶ;6ehQ?ETֈD4~ *`ZQjW&BSPډcJwP7|)8z$ɡ40OxkqF&'({I}M_rTKK^O|UuT'{-ǯwX xP(5{RXU'aBX:ǶedSKF:%#?לA3zTz eic׵#dė.]|Ȏ9}q wh3B>@1,5XW_jyF1cz]L}SC"gOܓ" ks5H" +u>?U.f{Gudݳuih v4k9ѺFMo55qv^Ek5q .nz"DiջypzzxaȚٶg0/#;!Ŝv~ ۱v~J te;ǜvlt#|Σ˹NIଘ>C/yHKOCC{/_99w7m*z8퀺ډl"U#b Q&%KMIǜkf}ܜuk'#}\Y&)W+y߆R%$tW$tW)|+xZ]_: ??/8z|<ʍ480-dݫZl!75dsBw,ODʞ[sPrk?7GcgLW;$7z ڱ?Vq:u `9_ֿVO){?|: {h25Ra@6͈񊛮pl]]qG9PRVRnͫf| I|qٻq.G:|:Q~DsU=QAzɝzRg}c vI,ǜS6\rh^X#Ћ1jBGI_Otq9sx #a9G-k1߁" GQ'd1.t/σٌ CHʌ{0 ׿ݽ-pݠT^[c"ֽ2uo̪cǜ -*ETBAp@"*7ѿ}Z*÷TuTRgK=%VkDs#Ngu),8/&tyqܩ?)Or|8F E"[OXv9ߠlp tp ֻ^GmA ŀʫ  ɂz %Ɵ `Um'(|} @5Fo58j@,*T1짻ُlhj#VyiB{)襸Y /߮O|>9J>Yl"2M䓥pǜrӎpxA.f?Y93ajI?}rOcο;PH!5LL??tm1`ʘ4hX Y[ȐueLC֕ Y>\جi?D1Y:yB1.9*G]~u>սW =yD{\$/ڄ~O&$'|&٣ k*x(xn zǜo+J8w! @)U Z!Fm]8d]lmoȺ(ю9f=:/AAt^^!%1K$y-$)AAwƏWk\ȊȒ.,;3Oc>cοYv:- l#hD7ڐgZ_O>PF:%5jLBXJCJ;8e b~ s>vU% y2w5\ܡ&0+1[]IsG5FHsG’9=vtkѭ[[NuI,\b_C=>v||ZuÉ C]>N;4v e?ic:Pu9#Qs8vwԝ3j=< X+~spϋ!a%9QKA'̎9fy{/I==*@ZUBȌ>j]} ǜ'V33`u?[ Xt87]=#gk|m՟JV~D.(B-9 B  ޥbjӕ*<]):[Y֤7j85j_;:M? }gQ? LUy :y57f-ؤ=ŜӌK^Fdzo2۳fSxpMbǺZyVޥ*9W 0R1 X" 3cxη8T0SĮ ET~q&^k$-\4I:5p8ZUU0]9|/xk4wpPdv2|9(G﨨[<„eIGUKZWk S`-~ҏA?^e:Mj󭦙8Z7IO?9(5r@nQ#Z{Ɯoǂ lQ;>7GcjSZ,YF'@z݉9A-4ѝ|8?/DFľ߼0>*8D,T(Eu(|G'+=!%Q chη¹VzFcηTզ|9 /dWڜ%u?9BOSkOm6Ӭ4cj?S,dÑ.T3mBBMͳo1>߃ehD 9FC4] "cT7pEp%s7[e@`^~3匧ϊw_`_f/,PN}НV? dE۳mO#i+i+~HfjX,2[sϐ`XbH'߫-Vژ4A$SV`8FBÏR5D_YϿC.$mo\WlG1݈3R do`$~rhLAk'khh ){!`@꘣_0{`<Î93-}g\Z*[PCɜM vx1@R54rRjGkXմvqhT>AVE9bo;][~uLkak`.T1 F:8|8+r7zbŽK gF]hkKPV{ ׀;R{uzmfs>hFk5J1 ';xWޱ(|@?D t 9agOW5=3L2ӫq13;pznNavǪUkG+c=JVG~.Wwk8&$xRUHϊ/ЅKgfj$Gв-36@jtU2se< [Pb 0*?*\lRK'C~WAWhqC;wz:FW7; `kYooO`>UL {vpбB/K\}? 5@lB!+cCV0U8(3{mS):C;|I&OT O|^Y c\h} 0OXX+5FڨX{*"w쮯",c 6su&P R1.Բ}\>^CWP7IdڋWtv89Aou'1غBaο'2XBx*t 鑁߸3uXW6U{O'a[BE۝R!%[ r3Y7D^81q]hkhǜo*~]Y !s~ʎ9{;‚AXҫ Íd-c'YMyM)D0,]Ь@cOetEHzP7Uv5*G] Θy o ,mxkJ^ɰFն`N9Sl+`܃x$9~@L2ά oD6*6Wt`ϋz檘MsYHE^a`Ta`,H5* Uȑc=BxR x ėw:d-O0jH8 =}h>@@4bο)Ib~j.KS<+vvhϑC;coWӧx^U= f+['1rur ȳÞIE 1.,҂Ӵ7K hC1 V/⮽8~m7Qk'y|khv]CRְ ͡zr+4 Awt[ͼX=Sc12 6;?8+H4K塊hiexXxVo< Z ׵ c=XqŜ*9AMC1W3ڭXV:vh tdz e`4` :LVoLȌ;w IhBym7aYB*s>U巹C=> )Y=Z%F'btR/9`l6o^m0ߴ!%ghλF%Jg9foLYE~!ֵ!,(-~f& >R}ZVLjI2Yvw֞A1~s8W-I;|\AP:OcX:]~qʎ9~qvӑ^{<m FՇ!ɥ6]"L h;ifp>Pzyޚ6&<_p1u'}J0C_kDOvusJ|6kžW| {~n/?p8}5 ܮԯXA!%!t ݟZ'2vXSevjKcql:+s=J8U|8))G7i8 F >x]"-܆siN+pW׭+ms5:(8~zҽtwNgbj: ۍ%JPH4╵G>!ޮxB4NEד!Ġ];9@;ݴRGQ1x!T B ?T|C:PáZX@֙T b"܎9aw•V&k15(?Չުv+|}fzN5` ;ݐfCIb20b~k*:D[洱ceU+ǜs[@خdl35}[g[,o%>?djbpK1%L no-poIh ޡ^i޷ay4# m@.+a./},2t6ٖzyo\c_ϰ'z{Bϋc7d0 ԫt|L.7qr$5 rw˄#*jA, ~Du]S^TZObk2C9qc(Sc(WP' eѥ(^e4 bDZE *K(g0s>K{b99t*sM).*ŕaeR^RUph2?;qh(^h4oR5FRs&Ka79ONEF4H*co!H= A:|=xp20 $'LƑ;(@B !`B3Xa~-'BYj_[:οyUNm\h(2s]\9;!Yw U X3~]0zT*-`B|6H%k'UAs-@!RAsu&\X^/hkui:W`PL36LfLާm+6MP$|𬴋wƮ0JD\DvkxF?qFu?c5<UrNƥ;WܵSg8v\N9EKg﹆Y?*N?53bd k}`X:77QE,:^1[3[`$Ehfk[ujLŠ9XϘ1y-Uo%Ϛq1262iT;^`Ө4>p I?G)AjIZKU)vC9\6\t_BMψ0byb<#Nhߍ9+Fєկd&>;wߌe]> U >RmI~<_#ke(K+*:zwBJY+zk>ʨhsOb& =; ް/:DZ3V؇dTb7X |f"(K5 D|$*up p;Dg2N8e u[@a. w#[aw-􂇦{]>w`vJ=J^E0廌qtf1ߏjlFYnK7x,0e_G-)&8Ahڣ:M=F]c"M&@Mʑ 6ݨCӆRfh DC~Krp 4Ud=&H[U%sckrҷnv26—u2;H;wchiyʾ#8i? Wοp gG_,W|trv;]KH-R 9f}Up*!=>ÖO.yBN{PehpZ6KTi L8FTl2L;0~|;5,x=%?ҜyT<2<2ͣNk&5U\eWEHgV$I>" R]YbUX3ŽF)Ӗ'`SBG&iH9^x.sbi+ ] <~AKS_qgj @~q12փU3P2r_9üTaƖ!g2`nhL>H]86#FF5=Uuk!BdW&cǜoTU4uKi {1ۙ)B|fU,y|'%߆U&߳0?lPR[ cI n;vA;\mbIi[lxƼ ǭN=jkFƞSXepv|op0]Ԅ8^Лev~nR6bT_ b.b]_Fl꾌&}ǜSf;3ʼTal.vI]Ϩa aϨavڗ) ~9VEx\OO ftWܱtWܡ뮟T@r(∎zBIic}MBFͤb.zF#GE5'V~CAa9B=IJYѪҌ@i6pd9wI$\[r+fqpNc0weLx3jv?;Œaz}iasb[t2F%5EGfe1~tm"_Ve$ bJƌcm $e ~=66ϷHW#߬J1烷U GRٚ1cOZ)>+!'XVcHEQ• y.,?3|Lڦ\>U_#%;E'V&x:kaUxD7Z3ڮlGYBB3i qE%UA!%c8F;K IȎCBvKȾ}bࠁ䡁 &\mDi:r?֮$qWS8w *rC-R d5 R@ U۫wc;H7e?wϿcvs;"1f M$8. z68PTQR%ƶފAv.`ѨыǪ2; VֽaLr+>[--)/~iZD@n+w]un/׹^s׹LG&Ov*ؖ$Ze#q>nG:ab\J|Y<ܵYMěp:8[đQnH2kHl!FuF ҮF5F\aq`=V V04юRXKw-.\4TuT>TcɎALAƯEy08lrf uul.-.r]g 9yd??`Ҿ..g Y"dڗZM%n&'ol>8s%РH+ڂRKfg|޹Pw6Qc%h$ B Jk4Te5Q^w{sLۧϿ.ko{\jq mZTU_z( !2fq:4|>46y;xgJǛd\t5JOVjܑWmԀq3#OȪ)O5G|vaE5^[>Vuf=lt|Q{{1"|jTS8N3x>*׊OdG t[kc5X*nbQ'c>iU['IIVu"Ew𤮢+v*W UmFAa.&;w|zN B'& 2?v'ӡ-PvK#0PojU6lʆ ]h+?K8lx%@3M7I@wӅ0fJq@3%, h9_`~o\`~.HuYPX_bUЕ^\Y]|}> mլNS j= afO>عqϧ&|sjA#_6͈>旨0Y0w_%c'TC#RN$yݠNcZ͔,^=O}$'[P^9*,&1GSʨSvwӌ=`mM-'@@ !,A?th@#8cXI7Ml.i{l WZq(QĩH{<.k(yY;etu=h [ʧiӗ)ZOq?}>LY|fGge"itxlο,]>OsDo?83.}yK}>0O&9#dM[ 'Lf!L:hjf)pX*܂_UL~I8k3*^ xMλ][yjZ)`ݾI `-O@^eJsk(Q>&P}6⧃Ў|wdd.w3Fa^tGN/޾g-/rxC|CLܿCCChE{Qu"Vv-s ~cS̀X\f[<'T7A -< zJ!CSS*SY!; }\W< NTA+e@CR=pҹ 큣 ]gs:Ud)!&%HMJX"%H=2xdѧ@ENJ8^ׅ߾ۅN2#l̨ԧJ8ߟ- ݍF;t2=8!'#Tu( g|{N2qUժͣvR6p(_#pE` WuZyu*Q]] `Bq9ih;Nh4,$B ɬDY4l!@]IjRkqZ C-BTIcf4TсhW9'ȉ`U@N\;(]\h%SP"EZ#f:IG^ōgr~[]EA=B ϫ3&VcoYPRj֟ jv߭SI!?9D*%1ky{q=vc~v"R3 )dfb.6BL`¶Ġ⊏紏 Hf(mMmaȎ۱U+ D324y. w>lCoiOD'(7!x*!(<$qYTRot`_EƬzB wQu;/;uKG dCC>I+-:bHMju'i:mbY1'.7O@h*.R|% W+(UC|w~πjkkHinl܇PtCgF3b- sv$D\sI_qg>WQ(ՃT\*Zn.7~tmS,CvE~MC}ad>)ִE3Dsymi-3z9wNٳ&Cp7ʆ|5- wsp@'Gt?!*N,>m}g#Ee۱7Ӡ 2U>֍.~}Y-]_Jח|]j/* QQS}{G\ vNg/UM`Sp^۾m_}(gq k+3*WǾTzm>֭X#\;"m9ٴw>VKFgait5ov95RN gĩCp: WbMځoЀnrXTLڧq(m۹ Π%ηտso\G8W~t!qt*X,& Ңdh 7E:@ zdڱϿG޳/5~a]fb-X DOBBIeG:룝~^-l&AspfClGg=tO:ݍv T 8J+:|>]bEz#=fGf%S!:R킐j?p4osic^' ObvvX^KƙϿz!!om ي|1Dl@gdxYAl /5zfZi׉$ PV|qrz"*(Qu>f`TNyS]ꤣ62VYCE#BR`Hv#a'S:<# AMʆMB);tgZ>k50]ݤ4G kaR*]'o!,bu5y6 7o>sفPӰx4jHBcfx. Ƃ9qǔ~ ?We`69]e泽b%#p(w=ԛxV~Mrs'9S }YzgS͉@ 9,f944ZVL\fTܫ3Yͺ?JTVn ls?㝏U;@&mISIygbK]WϺZi+~1A+!2ę`d ڱIz8&ɫ5>nF!-s,1sGjBޱT.kPkP`4{,#&$ SC;tN=+;Ɋ{*P\1"@Qc5q,91?(Ae~qxFS&Ns8\o ,\LiRc`gpiRR!]/+}>.-QOC|Run<&hP19tF> .p~ CJ_Vp_SPU sIԂOÕ̉0g?gH$w)Xa?>s-iڥ̯wonHSQ? oo =zߺ׷^֨Oߦ2]ty3/!UǜK? TL ѣaG/_g-LIȗ,"+7 7]"5 ?TObd̐75c8r>,'A-˴3zBKجoi֨Yj!830._wD^K f$Ϸn{Og>mƿ`S]:|w*j7.E1-ߜ^g /(x~boW4Cbʺa,/'ʹ&{g- TϿģzEj|-%eç^L8 ! 8 Hp.t(Ƣr?<*}occ0{P9!C0x~UcQ.߻9tٙ*ry}H^ԕŒB[/##a;vจg}I>װa'axiLةLP }8̈́K8G9ݨCm)0@7%X<>`Rp7NC9ϳfʳ#|CFjjvOְ;lAw.(קvD,#˄]'1Vė)߷.ߊ;yguG]/@-'C%|CKf {6"V V Tņ\-bA*MqWmN`UJYG9x܂QKL]er`҅ .̻ߙy+GD|-Ge7T7Gtbw V%5/'~9yk kҨ ;7^-K %[ݽXk}YF"/ WGG*b&{i-s(,u%'= JWj]4@ߍ)L(v^Ɵ: 3CmbCt+"@~$sHn\_إ&|wG=bׁLnj9ƬF2{.õ/ӿ (D1LB >…'pPq PϟN<~οCu_M*4&lCl57;AnƝJuc&|)pך1_k+y*q̧q?~K.${]cKtp [+B;աwkj K5Pj`ߝ>[g -v;S|1/|Q~BS!~=29WĽVz_2#S{~R*ۣxK'ciD~ste.71w?>SӮ:ɝ.. TS8>"x yNmi9a⾼T'{I[uӷZU)|VWUR6[EDb.~ \Gc |dq(? &|:wst~p}\/߹] w?R,D?az0%RU.`5|NǤܛ 7~Ʊk_B2W(جY5黣zz|*V~>~|k?~O(q1NUHÑ}:=綤s] ݃d%qI.]rΗst '{lZ'3,3ƘQVjurQת? O19”)8SWi%w(FS@Laڬ>fgvēCɡ=.׶z/1&(Lj. ;Wt}We ȩhOU:|R0~4?_ UK >ԟ|xU]ҞEE*M_&Y]woew]%RIz,|2sND7OϷ)d 4dRp&n({?R T.=@~2fo )Q˩4)4&3 r)Z8ϪlUYg:\9d yޑO9x?7͇ h=̔r& #t ,2dꡈ ԓ-A3U$asR ~E v dk I"/"MVw\z: Z0I|%Dipm I+>oYF%giFٚFoM#H U_׈R[@LgBwJ8 Rm%qb|(JP-89䌲Fu>D,E.ލɈ"߱nP92`90ڦxKXOUt-WE տ4OL?)}A4UBc F5bM&Uߎ}>7ykЧepsٱ'RHҪ=Gyj]=0]#.;K DVhtA0>FxK;%]Sߖv=!Q Ȼ n AuAz;>I밢z䬕i/" \E[obtͯH_@6\\BO\1[t0+^uVtN%1F&e(" D*ҤuF EX 1xZ?J9} Ѫ]9ߵjK\7W 믩kqM3g(pH׃;OE>N;;;oPh:j$cVW yYItAI(z;P4UP.EC=/ECwkzULnC] 4 %Ÿ/ )<;\uPޡol~ãw1El祡m0S3Rb~p+9r#01[z4[U .C>*Z:+_sU:0ѢJ}b_?ꁉF.H[˷Ο= TC`T->8T!le qs]H9WP|RXR^H۩@Zw ;k#۩#BKGhNYkk?7=<=7Or,slXlBXz{4ggwxM>y}֦(;[@%,1tAq5_76a:W0,2+i>QԞaK*ls磰ږ*kƲU .$;.%jJ ޙ|&'7\Oߴ-ߴXmiAΈ͑kV ">cO^?O,/.v*HaK.;}>d8mA:)VɹG0\T|-*|ũtVF|T1 tVWѮn}1٤ Umw+&Y[K 'lL3L{S;7O޼p̿QTC=z  фh 023{r읗co{5,PҘ{jLk95`^y-ݰ|+|>i mU0jTB(hjJ3v_qSaz5ʳ8Ы1yfE7̼Ri=dM2:YƸ=d2LaSlÒqr3C?*xӥRJ9)h7|$)w]ޠߍɮR"\hHkuh->Oke}n& _WEc,,f(aעy4`--iф4RG!>1r_3,Y?weJaI*$Q2.֑ )&F:Hbf~}I5R&SH=V|?6tD9C q9jSe;Hi/َ}fC@Dtkhu"Nci eA_2FA']N`P,h^ Vo~•:1bs4S=&=, wUwpyx5ݎJgthpCi&|;ij{qxij>/ѝ(r 0-,f14ج-+JҸ;FӞgRAz{y8wtk8ڄʓ}xgmǕX;;.?b;MOу gB>%L V*(uMTgy>⅛.tƃZ]Qȿ PХP`ZO E1]sz(u Cx'Z-2\jx!QCJGP`=c>G×GNkTL`X>愔1pDsZ?.#\߻I[7pR:פunќSRuUԻ}ox/o/&GlWJqo4y&W&X ^87Bh}?y(+ɮATF7Nn"L0xp}>Ax|=/^~@Gp't> KB9Ѝ5 ~S:)!袢$dTl(rWr4jS.zL"$oL|kEM|{:D_Ӻ*U|Cf'R5*Ś[]S.\/ S/QTF-Pg.H 9Ͱ(9iZFTeSB}Tr7cH7!<c*fl~|bxʐ x( fw&8 yy|Z|Z銗멜JBj +S6I4 GuKfC7E^xw)j@# +ȿCZ>Ȟq;0bc^ V%'S[{f:U/'FĘaߡ '5Acu'+A? dQ-XƲuPh%s ] Ytv쵛kPK}t;2݁R ZiT8OgK*w*˵<^hH 7U] .wwHT9[$AaޔcEk]4w3ߨ]5nq}<ܼ`vKȞs!.#=w={(DƧ?.Jռhco_x)Vuw>Ae _t>f5(.txz竧{uHN `i0YϿ@"(t6AF4S hf.GZyPMF|M)lBy68P[4xْ9@Ϧ)Qy!_w^yzyQ;ݽr[.wo=^ڄviKA1("t@W3|\2d!%V~JL "+wWt}RrE&{uqYfe!þL sk%TC c%ްTK(i.yjJx#S٩F֐\X})(0 'g4৻|HۡK9@8^=WhaX*I&w>"(T.D}#*6;<@iѡs<cCc2^i# ܎aƤS6s-m[5aV.i13Pˣr͵+tѰU:JzR͗8xhATլG$(@X. H& xkCe(M\s;mRAjH6TImJ;E˾Q[Ek>V~YeU\*>;L<42'(U A+Mk]V?s%J9@@tX9~z9oD)>9?pCd"Oi.wT?Mie镢={T y> =\<9>xJ&'G:AR2Ywؚ4H'<7h UR~!#+4iBBg# =XFS e{ 򺨨 gh/"*z%4^ȉȃdwU_dK]Imyk#\Nq/P<4;@ep̿bɭ4/0.='5Avot':0Rnt5^ ^-Ag+ڜ1ӷh_sm5 } {N;cx1w:Oܡ[rc$!; k]A׺D ^x<'Rdyk?p,\09Y h#_B65#2+<1z%cZ٢wYoj&Fh2cƽ3}DGQy4|9䟜Spb 8[ r]5`1ԇ-م.)(Hѕ')tЎw[:9 up}XejUK׵j!jIsUF#biIw== h\q\v"ޘ"8P"XQgeşDR){Ovp;ܑCU*D^u }!(9h.w6XsGݣ}>1D< `GRcG eqСށ g2ք}>[h:,la ฿G>[ Gdv3IlA.q.w@aa㱼p5Yv|Wk<Bd3 kEχ0` _Nu Qhe5M;@\sn2?U)E>9uNB#;0f4Lv qdp7((O4I<^I;ߦ^A{Juv蒟CkB .v"PeÉ+ccD?,K#fҶb[SEP;tP>ugh΀yHiYqRH}U\hnclŐ:cxCyPe/DNQV<Wԡ]|C) HbRHsPMzg.c~x+>񝥮Ԣ,΄*V!(QyIDzy;j<½t𝦜qNgi hw)d{GϿul.@ڱW%|G}YKf.:'NM+Wէ9|;yHŻC_Ta<Td#_LhGOB;p@obQzWtTRF^n&PY|NcT6BP[˜Ne6ܬ[A%J-clSC { ,^w1rmo݈%xƁeͻۼK οDzο([Rwlޫ(<?F\'uov`LL?d=E;A?Ő#`'yzȧ3I/<,%(r޵:d4f&oCrA_wrhMEsx"4҇#Үxiߔ.BTN()/y$EP\!$bM@ $XS- ꫀ/U 3gV(dzӒKpwȾ+S, Fo%{5v}1˓?h%ˠ%.bnPbaMcT1ֵ2\^qSV 9 hJmR} Uc{f@k?L37cr4dhe~tWq72a!33i%Co < I?Mx<\넽_1͘4sQ/ӌy!˖kk;E<;>zIđ͖J-u. "9R@OSb/6/،2IHY*v:`"{g&H7Gr~ Gl*(hPM-Mv|Q,+V-dGWHudTe@f:ޱϿg-%| Zwk](yM0J@N8sp' t"(l4Cl;я0ewÛ|1f]V5\ώ]Zjpe!%:M;tҍرu/"@K:_ cYrdP\;IW;w} b{"x!L{Pbu:dZPtzTɨ:a1kcr_az mW^p  fO#i#U#\‰xOzP2җJ)^1Q ϵ^'}u3%u_`;|dȻ(bΩ]Ǝ}>?&N}9cR9b>cq5b*g,/T+׌%,x,GWHgYm; "T.UWK??J]ևMJjB`1%AЅ:H0u'9')oNox%oRqJd7{ԽGڹC㏮׉ \S,ȦUjV:\|%.-~i!K s Nĭ:G[ ]z!OR^;-滳z"SE=GL=ziԦ!=7 9Y7%km"kk]co_یJW|u>jv MIsmw|S!aId]QT4\=o~O4f40~9)Q Y{0'5L| P( [I~ul ]2* Z׻\-W@ծ%ă~L%4윣x">\ɼZzM4l &s 4|}`o7Sr e.gVƧ\xyKV1*>Zj^-|~6 :P+6JK] tg"vC@2qߎ}>?VWL͂X޹b?Q-tMx N4^Y¼ aScf`*CN[$?`\{nrSnJ|| q?|j }MBg|7=Pȩx>nդ߱7šVڄ wO=/%̩{ts(cՇ>^{i/XD9I̤ANj#ugwv{-+2xNw8;рrmлg;7Ϯ$%+LjQF˜I{*R"Mrh?e|H+S_n Ýp <_η\Pӂ2,iمo;U!{*dzUȝϺ/7|TkT)1FCUVf1Oa\3`Ux +0]؀Ur3 I-+֡euiZ@ DHSX}>aK MɁx_ܡKY b2fPę,68Hv+NRHups/xݬ;twz7@HʦDL8f7YA)G(0Q6 |Zp cDcc3 (hˀ8V45k[  wnS`G9s81w(|2j΍ ቌWNj+R(c+ ب(ne9FGoɂG_п`W]Y9#`v7 <\oԊmAp$Vl N#L ͵ZׅEs-3|Qp \OĶmrC&?ʪe[Z(R ZhR74 Cї|ѫїqiv5tn]gvs++FIX,>LsO"U} d>Ν;ypWS! JW(/Ry;yޑ8 ٸ/b$^!̿q ֘똱s(#G2=Ab ͺHN&<+Zjh";tF JCSW>kMI' ;t 4= 'i\Hlw?2PR; ;AhVb:AhYvU1h%f`Jk|k Dί;FT|WxA]r4^0@w3i.cpeudE+#i2=%Dvr**н0}F8w*WD0 x/hI]M=58 ]2KI+գQ{K}5LjJQjo>TB?Г]q,h$BK捐A@ފ}TG-~a楆?'&=}!Cm[֘O,/!)1 ZN(D^|y?5ϻb싀q)*T rMf([E flR :|"8S3J$ D>x0>Ɔ SZ؀9@rU|*|p]ңƵ+XEf H_͑\OI{oq-3E@OEOnD__X蒄׃ڳj(;tw!vR-јnzUyn( Bć*l7^{| ޣ38w~!>u3N\ME^EЇ<^v>/az~hu]U磘[y0Z [+vs!5S-}QO +N!{Zh 08G*y>ZPJ휮v'(%TQWa/C3w ]J?fӸpLv|+/zmmx{YORv9%vR8!%פӦ(Q-H|C&~Ju3Gn27"6|W}9|m!>%BRUCR'(>PI *ad}͝3G?Ο 8T=MFD-{PN6;"VR|>Z&|D\m)ǡ,stю}݄WEX!4AaWdl l >4./#c@KPO]K' %43lK C| XS ǽJPO&F+! kۡKfhX#d@%vrȧN%ץܡK&r]Z Ki`#\w7Y3`ZbJ'԰p\~}*n|RH%RV [/A/%tU](MKB_Z ԓ3 H< BmdɭƐ֧]M i߽C1ڕ6  ܔ2_l(ڍ̑6+zUf;t6lv_6&ݒsb48kG=Lðf} ]Fe r A*qј(uҊnWMr?<6ȿٟ2&uDYh>xi\tsE9,F~Њ JF7<\ˣjRUb\;sݞxeǧ2G3ew]Jw%mheg$sJefU-SiWܘOxK^2GwznH1O]:Y YZXhv,ZNN8@\ H2i1/ H-3h,sB}[g>7)Oq@'/NM|Ƞɣ!4's"WѴB|yVoFx q^V"X]xZ.IKˇIˈIJ&J 2V{IcrK}Dԏ)U# T%˗4MjGrڴY?;*|X) 4ITxfx$ Hw!7-#=6.v=V=&"7},'4ͽ1(!|#p26}1F$= ]3I8-W$x!ر5H6 -}p[]2!QEbT.ס] hm+Aj p>8a<w(+(|3@}OUσm?}^-m)hKJ Q\DJH/y;afdYt骬|BPoE)Kdg=mOAx ڱAyT͑n'B|Θ!&ejm:;+n;ՠV۫j{> I穂4.B|B"!vr(e4w'F]5uQw+uHVNoqMUox{@Q0,Xzr9^{rE0nnl!3cCp-s+e20a)ӜnpqϿ l-+^:s T/ptB(+SX`s2]jT9]o |sW]:# tiDl^oQ6T) a:a>\[l\wJ95B-+.'Ko6]֡- ?`)ڴ HC͘vj_qn d9kˉHN+Z $.khLte+~ЕݮT^Ŭ?yz(GPEZ1@7(Ԡ ڇ~vl=tM'*"z*a1w80GKVl}x*.J2 ];vp)=^y);ߎy4kg54dZUDbVE$ ʏ+ /wOYl'N~.[U#Tl4Ֆ:&Q7 V>D,n5"o>w.xWx iO0cāOLy݂d >ΙjZy|<|H"J 0A' 7nwx>ATpOT+!yf5]^`~)ds}؝0죜pU}>بS t}=W+m^mdo?HQZIh0E ]w]oBkZg v^b'$~\li+( ?G >G|==?@OnZ|tyG[GDž>:hGE+&A}>1x>wf?v⡼9%y8߀}w߽woo LJ&BdԀ#օ˰.^N;<87=@K>ؐ ,Cxnmy[ Ҷ $tuٺ CMi5bO%npZ nG绠fzyH  կc;1U`!WDI1)Xy=K̿/*lGXgF6gc<ӽVjUtη~l|?-VO7Yӝ8?Uc|gd7 ZRVpTNd܃#ؤ:'p}'uK tb?nx3$B+!Pd!HLh>P0-҆ihpϷ_/yK?=ɦTO InI<{꠵Α" "C'F2:FW G!(q "`>=f#Rk,2Nu4fmlgLnJv) 4r?ȑ cB31Icbޗ`H@C%Kwjʉ;h|465{4W1>6jͫjZ++ٔژU>VpP>kZGyi':!^aöIh9>6},Ɯ9;'1\OyhM6 +VGFb/ ls8^ ~xdPHxv!gR;G쓯'Q5ӣM+w: Z>lDžA5AyN=PԾ&G܁JZ`ʘPH9g.R>m |9^xS1n3YeGj0=E0=}=zuQ8]Ҍxk5\Vq}ݵū{r4c俣?-BBe37уp4m0l`%֦\j1,gO,lTO)U< Gdt~S`^#t߿m<88Ósj &dx<4N %DAwh 1u*ؕ, d4RMqӛCI!c S"8'(3Pi\l|dݓ$?\"k0\T +{Փ.DUNǹU}9<%(Bypuc*߿4s)A/*'}`xL?onRe\&-I9)Rc&m4eJÊG`ίS_4 +7#!˪ZH We/qH_>XoYHNJyՖ)4C0g3ϋd=3@8΄|q&.dǟw{yEmm63Di&iz Ip E,@AJ Y@z@؎:av0d/vvxˣE>`YyT1 \a@&0o3NFo]1+~VGt"tg8; ūކzh t*%paw蒗ߘ3Q".zT;4j؋8--lVG{AئRPՄLUí@=pQc\`b`5]jjͺ5]4ޚ.o82rkO+2d; ߷O'yZ=O7 i, -[;PuDz8.|fm2([3ެ˯׉DWweC_AS(@ iډwZIe%d\Vu ݯcmf㾚v?S0] rK0@0I6uL\=;¹zc2V[V[?3g(PPPOݯMk^tcN5\toYO@\nD].(eB:Ubn"Z9s烁'(zj.$e/K >;@-BGRk|v 4Cd"ϊBxp"R-9|Ԗ*8{CIQ{]My Ab!5` u.qϿAA r @[tXhnc]뻜hPܬȶ΍R zy]ȶq{ &nLArJ*4~s}z%Z 0ڡK]\0FI"'x|2{4"L{6CcL Yu\a7qzh cCA'hqs{^ 8\ (\ +>֣mӗq8vG>)T6!_I;^8{rE 2!cqn77+7$k\ٳ7=ڿO@p&JTm4'(}@^}>۝.S ŝsr9w@us5vڥЬ!ߨ [nfud%SNČm͔? Ess{rDTPl*Ř*_sb/Αb6G>[0˱,Ln+{cTSdK3(v BLq |h\`āJrP Abc/1/* ¸ Ca7V p O^K1]+8kMm"db>*.l좶pi3G< ҡ6!}{18C,Nz`\&KW~lS~;@\̀]ic9 *AƋu؞/Q7m[&hK]Zy42G"K₠w/*CKĘ7+b?藧J4#˽lH{d8ˉ:#+Њ}> t{:=#N͔RܐW-퀴.hΰ""Q MJquiϿүˁ+Jc2vҥkVerݴutFa9@aq}#'̭VϿYɺCXiF2fCg׊zo oK+E'(.7A nF5 ;#]1F=EI0\W| qwP6hixV q Fehh+&BU2v_4HJ|@җ4C$Wy=tC05(T ׈nWTbL;ϡcꄱ>r7EzDrN4d%sO؞ݗ'诂go hRgi3¿= uD҅m6jܚ:ݘGϿ;.A)*rGEGqX{v`i Z7e =}G|;(@ pGk>~Em~\Iy NJxz]nalSK+盂Mxߑ #bP ) *lVd005{ rFZiׁjxY B@R;tϗW*_g'.'xR}B{FC|=um˨k~BO8vz+@3-ăk>!(WW3IW|&)fE i'5?]v'YKD߿M1E=D",}մ㝏whPy d(u#j?~}>,'-kJH;@ TvYTCPi&^/뒩ߋ\f(;&fX}aaaڬta1ъl؛Px$^1xDC&)|7y񉚧OTFo瘗1ї d ;$E,}͛}Y9oje=ؗ`Ӄjb0F$\|O^Rp2iu-!0xjp1fA"V{ؖ;hLH<)ORH&BhXJ@<͡n)lMbrJ)l|RPB ^'O,-x(m8Ż&"|b6%C}͊DVԙ2Ί2̊2‡鱜ODOV*U35i\Qc:@CM5nĒ=hPl[`K/DŜOb<+9/2]xZPh I3}9duOJ:H8zΑog7apLHBZWb'igK DKP"X`c}ǏگjZ"Vb4kŵcO3ua5W#ߨT'RXώ]- vږ`vyŮE%8T;>ɇ.ߞҏ75`=GHdT"}ZgaWr+aG>gywvYzB?G֧F訔)Ĭ16FdN4ϗ +#ή-Ts `!)ErAȿ;A&( Ygn0W2x}4\ڡa+O*zݧ++8\ `r gDٰpW~5 Gjoj9f\C%-G5ȴZϷ5b:R\(<6 AjVtʂ}Жgr;)+>*s&G\󟟤^{ LF|4,ǰ3D *e}E~>Q҅J/Tx%O{Xb}K%eӣ\iAP')㑘"H !dt_2WȺʑC>+dZW M;s-K$9r{?L Vt PUYKKk%22" (TEG=B qm>)gSC0L)ʊ8s1Kx0y" 0c "gNeL5^f8?2'(HFb6d$ g$BPMyD=(&TS2/zq`vEñp_4moH,'Tj31yp=Ɏɱ}B0nRPOtI,rWyK3`޼-Mэ |ZES`H޴^(0n=B*\隆Y<~W <jF@:#MQ_3%ǦQTjn`n5ӆ䯓B㺿PfG8=#Z}JI"O/z"?#h a^YWR4E@;g*o4l{[2iKkTw o73Gp@AMGZǛϪ$yt6zC)Y+y L50}+ۄ<-SYlXb1PTәҡrq%G)FJe排#l38{$ABiݨ*#jq&AJC_ 'v2Gb2I&AqiQ> #:ͼn̹5ʊ*$Sh), t nzx_omr)wK"OI2DUǴ9K8P6W-Tx^Dϑ7<(LWO,>ЀF$cjE~vww:gx'/> O%"@Q,_w^_pX)!)m/L&W1^I1&f?9Z)iꋡ&~ʶ@@kξ=xx|!&Nho3ыM )o?UYA+1h6#L.nJ[9Uɼߐpa ׋\0k$YX5 8*  k#̼i%k`+&*WoPNPrfr;De%{pW+kU&=* qӲ nH?m<K 8뺩$[7PxpA DHk}I@@R#0:ʦ SOj7P%u?NLXIڈκ m!-)k%$VDVgاhl-j1[3l@^4GѰOWD6nqw-rީz>J1iLdeX)QfyQeFr$N&\yDBOXY&§zoƧ\.wb"yU+{%Y4D k ),s" NDzY.v*f我ar&&7O^鮚Š;VjV4UULE* Xn^] oTBy 叠bkhcjH4,rCvwϪkMKqQL+ I$'2IyiNQɱgdzHrj83[-T>(>ʺ} k?0σw%Ր! 6?8o}$䔤x}'%Z.G8o.ˇPOhN5ߏ T~5x4q.Pq"٦>84EL ZG " ǟ;.c-ŔLU|/UaPi>[c:/fhImnk[ ML d|NR[M2sjR_BrDxТ"³(Nz,:`8Ff]l}p9O1R@ۅcs q9+4Ht; {& V)UmoGkgV)Wt\:cE~-Jpa_ p[COFmrP ]/ h-˭-cSq%A"uaՒiNՒ W/϶N2 OXgQHf1!WѨp:nhN}p˔\49B*#"&^IPlGEjqilHTK3Հc<3 %1[@oHMyp+t{t$|*qYetKH'tCT(JKr\͟e ,cvLfm%de]'n麊B:عjUǝ1v,63'UZ +v4<&;&?eqGڣԞ8c1:ז=EPa,zs f&1C4@Yן71okH*&. Rݫf1r*w߃cΨUB >h&ψc<_aOt'Wn{ 8{Hpxl$1K%I C j&0 SJ7Krծr.[UfJw0Oϼee}|G8}lV?r^RX-[<_݆QU/@]8k {w 8p}y6e`֮Eq,ݲ皞8c<3bffgHT90?A:^a'ILw^c+Kw{Һl,;A@4y^-aE·g%NxęOJ.%/wN:Qc˃Oz1^z 8JO_ H')/kVuj n+ZS#D)}WSq흃coDA}'Z{` tk}Һ˵3@\MOb7;)pU9>Q9j5#Մ#OOSꌳC4\AOһZ>kAQyKeg| x|DFe3lz㥱cem ^[:Ѿ aMM.;֪iB6x85yP9o57 &[eX;> P׬ѧ!k҄3k@dw勰82Qor -\*j${6gu"&NDurt(6sozQcG.㕭^|1smR-O"M=α~\dj2|t)alu1v?3n;rkM ז^[SshxMu v1)2r?O厯oUISܰ_%]4#U+`ET;3DIpj)l>ӈUpwԑqR{h#s# >|x;r⿙lr>4ɦU=MyeZ GRoQy𫚇D~ՔtRS?&cX/]ӔCJsFrEx~a*j(X 8=HH=,7r# u/-bK#g\8kLIj jFpՅo^nU\*c?u$(fiLҔ1&OSccRi kϔV;6* 0TQu,CEU9[p׹L5Ob5 x}kUͽ܈rwX1ޟqJmrK%Vfd╭kLAzt..|/-(q _݂f^-,Q·Z:u3N.1$/6d苌3;ΪeJ ߉; gR#OVgǒ]Д.'84wUuvnE~Q-2CQws x}r*[nUYKnU_ђ2lj2;ΐ981bEļeZ?n/~>l9x5ki ]R4U9 2?/~W)sRA<+^Wl_27#G+^cprq9 T˦a1oxs{fo6_98?\*HcIYwFpPA-xqW;wų1~t׾=,51;"N+"Dwe4.z8!pWE6Ȁ\$Y% xT14Rv"I1^;U%3+FS3;#eOE fDӔT,+p|5gӅp;O|x|Lcp;: wjc *q|bMo<Ӌ.g0c`p_aI&hv x :W -.HvF'qB$c7ln~2]΁ES[5CY'dA8N&aYIȚ?uIJm%V& Nͧr*Cz(V]#h_@ 'nr+7 Pv~*U]nG6 ?0X`šOˌl$S #ىq9']+Zsf㛎vXbw Ta\1/?~Vcp0 pWLࢵmBqDjHqWYK ugq18o|Ȓ}p<׬} v l&Ku#FF39'tui]˂5A׬k1^}1}atQ2Uvli$U=`tpFe-l-~$ERgvuzCk- s#s܎]ۆ$=t>d//U:uX UxΨƘ*UnH3% z6-1!<[IAspm>[Ef{ Qf`)'rP&{䠮x#UGTRfH%r"Q(Fe|7"tK45& V+Xצ8U#c{b<j$D_p"1Nä4.3xQ1]yLCŅcxJC*ˤ 4 V"\Exe,4ϭ#z1zd[AAs|둧S%)*`U@)@kYB`bH˭;]|>$b +P ί*uXqw6pv5hyXFjw*$T4_}48>C4` ]:^H:3p|.yWi"aJ* a?,sˎ8+6 nb]݄ ct H]]_cɐt򡕞Xo 7C;x>t>bSCTJjI1O#5-ܭ\hp#Sun1H ]J~_D a3.LJi5Aր,SόS)ƝCVݖ6~?5q kӃ7Cg܋#ܯKu%ٸ~*)q1q[xHD{ GVj8D#-Yi>9IG$h~EL5fs.S]c'l6q սlk<ж "NJ:  8=HGcf|]wwV98&Dݒ2A( g3|_[ƓwV[BF8̳Cxb*`EQT[O{/K8jy7qNݒvRu3QUY,W%1f&a9E!9J1^;Ry)ӄ=:ku>`~IQE6n|b;fqi\vj"=[ӿR腭 wr|xM " I:5s^ppT-֝v}E 5]rn3Rr7Yv>x]t^w:p.FE^#vF9ʶKs vmc48evL ߄a.|| 3tW-a])CN9d^axm';q)ZĬR6kq Mm^! ]p(t0\{<ָ0Jk!7d:c)'_!Q/nq61EQ\XKu!R\?&kʉ YA n-Q![+ƋYOݼR&ZJ0u[^0kHak;兵d)w ,֡νMPnq 8ƿ_^Y(^*^(O@#F?x= WpnJ]oQ(ڣݹDkv'-j^16}z ۍEr%͐4Aan]$CMhh:T>#_)ڍ4Bpp׆ -QΐoYp+ *"†m܅֍ x[58™q$?U͹&M3뎯y?IjI)VX R9gjӛԸ)v~(45~fL7cR7 bn] %= 8c!܈Qɱ8;y#E-p=81LQB YaL]8Ƌƾ\Ŧ,.E!ja6I}@@N;5󂯭$gK;.ϼ&-\C.X(l3cNMZIf8cַ4Ԋ8;ItuRv$[=ie:L-}amKO w+uϲ,Jgumgco$'2*Rq+YDMa5-b:/"/˟)C;'fXIuT`~shYK/%ɳcfA:eξ|Ū5_K}>aE +;ycoi PuGdysr1^ӴF)~6_8?Sګ$;k:}iP?691$'WYmrbZ͠ pivXirpPv[eac(賒r4OYSxm:?Âk8pQ$xҺUҗXk"; J2?i1ha[1^XR}kL}W17\^$ohѿFz%v~re|b5HeY_@O|eJ_7m6uDimJ$zv2l+\9" dbOr70[tzfHP}ӴGJ `Q^d+yr%<~j[TbPݔ,j_d-R$ƅ*v3 *k$bv|)Զ/~+ԺG-)ؖCbޞ-hñ']ӆn(Sph䴰":;3-\r2!,%+h-7꼲. lܾ5VXBX Zt$'8SZOGCć5ߤxYI,/AjЉWǽm8#bxkOR`VE1EfqY:b(I·xMӒ.p<.Ji>ޔ x=G@E@:S.|ڛ:cu*7/1~1QppWU! dV/g^;1=MQme|?u܋׷r9>Myu`{#lA28ʃD/͢nZ3 =]?p?րı́;M9.%=ˉxjDފrq^#5ΕkS&ux4/|==ʒR-%:Y=[u[7ּ)<{`aCOp@B-aǺ+aK,g5(5ɶeMd+zLbh΍<%/zPnYчG,%ҁncf˷`sx}ҫЍ:#ۤb_^gݫؕ dkmH1^O-ZR8ͧ4E://T^m6Fsw0s 3 \eȜ.:scrG/uO!DxJowz; KadA;э7<;N[ԇ>G1đnXzVib٬a+o62á^v!#.IU˒qFzXb1%43c݁fY'bc>uJ:*}"3-6d ߦ QjB R&QM w8$B[J':< +CKu8۵e3L(M/ʑ[<]GaPg4,QcCv*g:dsui ~4g NI[b^\"Ls(dI<=w/Uېxjr7QWfVC4qo~q~OPO6ϝ]R, <ȱ4*cS^󋣙UWyT3EtܿMlh!My-rj8{mJoXI(~13i Ej',UѾ&tJD{i/1Rs-,j4KFQ#[W%4kj +7|?kkl- l-az{U`  SFSGᗬ!c;(&0q g3O<|wʚy?UwS{8R~2C?>3k"M"M {ShHa؆ 79X ExDEc-:Qd(.ZSh.&[SWܦϪI<#p=ƻ5s[3n͜n,cđ)7 4"0Kf}iŒ/MAo]Li+<^M6kL4#&aRF[8P1d"$4 ej&3c>jvM"WfS1acqs䟊gK$$I͚7qJ[4 0'+m@2=E$?* uZÞN FXg!Xr(p1x-e.&uKNg+8zW5pxQKG $:Q x%紣-(-)mJss~tKs6wJKA'6mO2̾kː"|)e_Qs:+_TpY` #/W~irrSՇuOx_p%Y+3u\8_8ƿ 2̏ъ[Z5j: G4xדS!RٝMD}Plh\Y><'U?>j}JX߷;_X.>8?V{:>-drB^|to2o p$J&77c<TX2l:ljK-$̯{4 9d@ 5˱~՛oR[YƟ|j ӎƅ3uNk:OrD ^8ƿ/a_)V1h^4O1NDC_ ;ɾ+zeW,e< q D"G1㤆_舯_?|}-?^_`:FW4o!]FU_Ͽ?叿2kC׿ \"ob7 endstream endobj 199 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 188 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpkG7Hnk/Rbuild688f6c73bb66/spatstat/vignettes/datasets-024.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 200 0 R /BBox [0 0 864 432] /Resources << /ProcSet [ /PDF /Text /ImageC ] /Font << /F2 201 0 R/F3 202 0 R>> /XObject << /Im0 203 0 R /Im1 204 0 R /Im2 205 0 R /Im3 206 0 R /Im4 207 0 R /Im5 208 0 R /Im6 209 0 R /Im7 210 0 R >>/ExtGState << >>/ColorSpace << /sRGB 211 0 R >>>> /Length 997 /Filter /FlateDecode >> stream xXMo6W>doJnhm !؃7Qh9r[+"yC i$\x>#(mЍBλ?7]%PU(quokP0b#`~ %(pn ZbT>3eVէ/^6;L^AVU):TX!qj : h8vӖ]c%F1aJi nc;BL5hU,)pV}9-pu[JyT%8߫q1B+}6B }ioW-cѶzk6=89m-l?- %f46R ߠ41chC/fFœ[ ϭo׳ĆV*Ab"ȖMw=Ug݃zҥzX]/Y=H s4 <ѫC͠b~tW F B pV?s*P0`eߩfa41" 41g4gCKmf$ZPsF]aٞKNX:]hKG ÐL߷ϲ49 25[+zIш4΢Iw+,ekT6Z4;Ě楯(rL";.`Ź0iqQ9}>bM{ٶHqVsC ODDJ> stream x p^u$$NljmFUeCsaF[˔"J8^\4 3Zar9\. \ C5vNb{̝w.^~<9[^y˗FՑۿ>r럍O;8c_yGm޴mGF'ÿo~w|?`oµ/^WF!ZF^:md?_l'|}_G>G?ӿ=ٮ}{۶}_??\k}ד[߹g!W<5q텑trΰ3De~8OWxL 3t#ó3H2Jw\3 6okxFnj6`ox/~W`ibUg33G| sdnf*|_z?082zz% rɇS|Wr?A;6?# 9-C7@ չ v#],nAO~x>o.p]*&g?aßG>?Y.#/-kN?`tH>ytpG?0`ϻwxK $̎=@+r*}?*\s-v@fx@GG2쳱ÔM1laq<[d0TH'qȶّ>߷ ?M_mbMXx0(6v \Qnu9J-f{PdqbC#T aQ΀O9s:1`ɗ E~~f#5?xq6wx{ <3^ o56<2h!ؔ!L+b U 2$8C=t ;xKyXr'6>;KE%89F )UGW䓱b^&AŴ*Oz7&5؀ D66yBl!ʎ4, j RtMX T8!3oOQT"%?|/ظ=O&r$S1 <|ppu99^z~GS{R\bp@1K#3r8 o\`|ɑ$fJI(`D=XO?W?)Q &}6ZV ¿8aV; n0l< '3g[r'r ^c&C2 Ȝ_<;cr-4rf?@ !Qy}L%?AqB >q/#V<O(L绢DL}ⷚ{G6l@4q`f[3pf_R Jhq6>D~C`qkSYwj^ ptXFORh15L";Zwڮ1hxIM͕q冊T-*$Èes4qiԐWlL¿ ?gT@aJ33,+E 0 7ɫqf|p'GjK y <nw'Jr*ŭq+0PdLYGL=b6}Opi_/}{ %SCiV!pBP8l@bbQ/7*5c}D2bkqR(ĵP<dJc)68c$ 2Z EA fܾTlP LCU+h(qBpc^-)N+6-?x< xa*] m9^4Mh$ ZD8p& ǷOʅxGKdH9; աa ;r!m)aW%6R H0XhH]?BZϤ3s>ۑ`A(K h! / ݷ4="34^1" p:Q'K-yfCBXi#ǴrkΣL#+Ɩ_*u8,")@hqL.# /:&TC h? k%pu%t800,h]F#zNy9?IxnGCqc|<Kh / T5l0a37a~#@՗wigEVW`T`F +@h-oT\|3C.Jr-lɗmmz㌿1T]22@P_%8lۭӷܶցa!K FRjM5俼((- ;<,<ܪu#S"`OƁ} q9;*8_~SMX  x%8g3@(W~aK]6'af#hI~H Ra%0P Pp@ӡ*Զa8?ɻ]|>x9d!}Abl7}|CG0S*A$fš2T cS wEp"!ek`j %ç0Oa*T!h8i0 "ƀ*2UzlB J(\ÊP\:2|k|P1L0\TgT⪙fKMVPB,DrymN"ph=CL=N}ǘxerf#!7~F C{ < +Ѐ#8/y'CyOIm%-d.궙pj|"&޽>_?oD x5 xxlyR 06A!!޽y4(jN{ ၖ.Jm9fb:edh@UF]U?p]P5?D`rΙ U;J;FJPPisvԙx.DP+? ǣMԞT4.jw0\͟{cInl>fLvhu P|r$֓'†7ì5-DYf_B !^8^d6 0 O=،w!9T3S_[;!ys'ui/=~80'z}BZWoXR/8p~NGrA5 @ ysj#:Z:[b35þ#sZƃ0RzmpR&2r&w)b i92lBdCvS!,ئ-O P#0ګlm@^ G&2eBGi.ֿkMacƌGn[L( 1 P/F}E§l?Cpͪppm/o:'C*]6Z C}( xhYgajJO#ۂP%Zk&2/E 5<`}͠-hbBEh҆`جո&~$@51tq~4ʎ9kn8X'҉[ H?Tn-eS~.\o_d7"UDkv6J8; 3C,4¡-יRzym0;q N+ӫ&»Sa Y'0(XD3&/G6:>8OcϏF3,=H#^"Ɉ'.eIg=-7CJA7Mi0@b1h^:f[4 _9!zc\@I/5Mф,*%T< `n sO3N@r:"}!\3բɈ3r Zž/ B5LnJA a5r&B3F܂7fW^%fh  Þ\,†"ONJ/7N~Nr7ڟX2}  1@ $h *8;u8 vus+O y3Ŏ"Ncp٦2RP)>SX)Hא Q+ p "5{h^lKx=Zں:8yjrF'Tyeh]Vc7bVwtb `T~* S(&M?iѤpU]Jfפp(bd ͈!>U) p RmɐA4U hIsFLxb:Iga;4fz<жi4@Yd|sާqޢ^\O- -6ɩ$U~;hER{č- ^+t}=Os-xvK B_^[O#̇ZyJj;ү] ɜ {NdfSA1ŰTss!T'N\]d1 #p5REd^xR{n#QeFS^UlYw:;|5#8y)5þQ4 xFw/Z0a#v8!Oxd|FQClTaMأflRHL 7>&-E!9VdyX#Zz ځ dH9g(T mQб 3/9y:&R G-b߄\7Vu=lFKfeT1D;9bLAh"Khf:+6S%3dH -d ׺Č+ajޠv2C )ufO~e3l]^ǐYd `b0Tf=?A] #*]3HVRn29ŬƆ@z!} 4005hmswErD] ; "\$NҘ)Ch1AeBGmݨI9-jbV6VK pVjhV&;1nV\F] b)L351ĎF8y2&S4FeR|] GlHfS/аK!|KNase 0;|oGHC=PFkcHӗ EqnUOLH󧉱nHjRB pmmOLb,+}_㵔 e)59JףATJ+qRIދaxv`YIi#? -P4NGQF 0LDSR[>Yq&es*aiE(*zk񍰦@qQfTlV,_ aks!*rYՕ|dLW̲YH= L5'G:L3knJzh'ך *;݇ 6d LBG}+BY[WҝwHy·@S$ k8q~("c~lT^T[&THUM/DCI{㜺fHcPY6_RISEYɮ8 gZՌ] rʫDC_##ϧ.[E`BHiA}!i@Qj5+[[="BsIPUhWe\ȷlt֠g)&2y%Y.R;Lx!gA9f$LZ0e hjLWNNak K9<6}@ g-<ZXc/O+XCTF'UT]iz}"Hfyp3FiRk}#ylde-Yw#JTpb'=66:9 3p-fIF^|bW8xdW˜&Pw`PqToޙ ON>1!beFJy$C|<^U6JTD݆xoFn *ϳA@Җ[>W2)iѤ/,SoA%uYe^ bCSs;h7mn({~#~ίZ5LJ!5SQ#X%U'gE?# GTRڙ72bԏKu"J>TUx"NZ#K3㪕&-"6 qA !$܈hQJݏ3jt/ڡKӨ6%r+ȩzʃ@8E&adfOխ pWxsZ?#b>?[(cnj_b(Ȏ8&EF8U]P.5]N򌫙1e,dU:•d8 !Q1 ^\U^D]g:X ϮBU1b)CZ3cvf" I52kԾ|٦U7vCicv!ʹzJ0VϕDHjQJJ9J'Ew5nz/Vt䚿VFl `Z-A4[k(Pu?)^K3⿦5\Dד4ͪ@TW~ӱ~0QP/_Is`OqEc& 2T^7 &Z=M: う:I5c3k;6 9 $6T%ŁAECsW wE#Q46r2d԰i"qEp]2i%A6$UJ)MԈxj@l "tTEQcjƋla^ITnҮ.p3v&vElHj怰5Anl{'T "(Y{n⪤ٵbh $/ŸR\2 fŵ80 <1 'đ;J'ذҌ!>㐀=Z7O4`yi_y){ָ2\)mPd).6}uGZV Ng󊩬 *hlT I3Гʴ#XsD=4bTfj1laEO}$ 'Ukד,BjoP(J ) B!g\to倓E#kCEʂq'A;+4Mi`|yWВXQX3= a tRIGWəP$5ˆ1\nVOcI.7ä''p-Nn`kN3Hծ!hh蓩CV]bV "  ,}\lf+X&f$"}I\Y2+΍}i[ kdNz.R0{#%mC[Oh\ Z&Zo!:f;6p(UQ+\\ n93{\ڍJN.:pDqG4R5w|UE)#lNMyq^r tӚtھOg|{n C{ {>=% ssȅ)%%k [ ߹G3gFatZ/[ b-2-mz1`TT;"<Lj0Xɟ 2sեi8VoqqK8V~+w;9Flq%Cb>!J rUջSr՛؅VI8g7d2t%e8֊26L_7T`1`@oNhȁ"{. _VxSjkk KhܹY'OͨAC`}u 4<eX*>`Rɕ~择/U\viۣ#saއ/m}Ư83<3^HĒFMc:XTJC NMgICXX-TIX3nq ph15 B<|1|#S̪) 7)G*ɦF"V@Z %tϗw >g(ew>~نT{s !0y*PX*H]b\۠Zm3֕9"z=l-F1M /=>[Jfo3< c &ـv5Ye w/g2t#5Zm"_֔1k"*leh*k"3[==2sLۯu6;h=k[#"5ԃ DKD! 0@߳qOP ZEޮk;|d`NQm-SV}V#pRF򯬔*eCIK68*. h,*0WKXaB?M=X1}CIneb6TYJQh]l4H"u_S*"Z5$JB`\4bTH0ev- %&Q9`mr{wvdB!@O(;>Q2PK;P(Ad?QC37PlWX0udQӧ*i {18mJpRY|1,רP)X%_k60wKlYQQrmm #UВS5բhYyfFJי2**[9Yjl o0w+Q,mJ?#L,+.#*`ѨgJtf)3t'Ӵ>F OT|0hJ3Nda칌W37KEWJ'7 SšVXڠl[K 'G( 6q5t_ B"JF[ S(LZNԘ"iT]<,#`1¾gb h)([6ciF Wf(g07n01ݬ~1;"g34䇪-f9Y5K.Z%-KMp"b@iqU%?NXe-s_R]RE43\ P9 e& c3YkPB(q]}ڕN1F@ah[mk#vǢf59AbWݶҫf@nu1F|l#s2C|J{YjVթ΅\*"-r+*ɄF_AQ&pL)\tʢs1/vv&hY!b]^$65MQYpp~,vv5?nEفM7&poڣƤ|څW'AcYMjlkb &P83\;%XJFʑDb)9#L)9֔zsrfEZoVw ]+KbK^U g7 e)AWS>uRKbAVg15}2Bu8.8"X# VY?6W6D!jck7Dَ='pcMFD*0{ sQ[0`17@tLT2#D3&ŔW7v3' 0b^.u^.39Lhy23Ѹ^L&ݩ-֭W[p5..ĪHŬFw5|h{[i"4rC~wmʇw*;!\Rhnӎ=-oo0xe\#%VAA W{=+fR5+Zƹؙ*_Jb@`Cdew?X5٪KVl .\kv6Wʐ!p5YQ!N2RA5fTgobljC'"-nWT1kO.=* !j谯f\nuד uO<6pDCakICKb[K܀u"l5u{J0%nr+eЗewX˹!`]kԤ}.W@Yrư1b )^nܹY$>_jQ:܅y^HwƂvCN܄Ҁ:m᧓0Q ^TRb2`*m* (S7rָS6ٳa#(oJ'R(ЁnrO<&@;kv_̃sV`dnVe.$T_2{"rwJ։b֔­-j\[ nS{  "-j ҘrN}I Ӯ ,~y9RFs[J 6 yFbɘ%=TuIQK$վCgMuOȯk(T +rV:x Px(_ }aوjkD)fp-ڦ !6znI |V:B.OVm+ W}&8չj)hJ+ic՘V<5Zjg%kig^Oڳ=㣿Ц>ȋW2%R i_I;NC8FΩ,V{*6B6j/lBmY @SYga:|)Qؚ g{<åkvVi߭ =lD*|Φ:6.e.┖%'kQ |lJp].4|5$/fC9)&ׄo 7PIXGZ†\`#fXj ס?7S_v̫-2KCt#:PƧ1=[ "e^O>*~K-5UȪlZX,CAU#J+x}mN lgn5L 1k,C5 C_hoSDW/eU1f4ouT-VN$DN|Ҿ Ukq)(Mk(ci[l(՘ i0uksiC&tֺv<Ԭݢç%zu c/EU=mK.هhɲ >Ԓ;ŽXָPe`pu#y=_͎&ӵptP R,9sKۍ* p6K_+-P'UkQU"PpQ쑿" ~ZH/ZŮ'ٟ*foq,Er8Ӻ*3(QuGt5$FF 8ԙNjǕVdTUKj.>\ZMK{uu/ԚYЬ֧u͉H^*:mNZϕ s\AeQ#F61lU O-)pywkF(RfxGUhIte.n9}O< T @}AR:sVm?Nr:~c=؇*3(,/ꙛdZ^LE | WC+XBETM^Ȼař k]e3V.:Wcn7dМ63پO 5x^ԍ s+ Wְ*)ޥ23W)J5)aLs+0XY[cAb YHs[7\nJzv4^P:R1<#T'7|,T.Ϩ[]ތz *aQr\P@^hqHsG*P{z<+.eeLO3i c]_?ؠ-RCK>[y/E&>ć^Ӻ?:<)v[˷Pޥf9˨֭5&L'ԌղOt>r%[N$]͋菬TnSP]96VI"kjhkt)Vx \mU<Ҫdx-(բ -gRJ҄.y&dME$"yU?n2 "f5 ]>CMeY2$ȡuG\.6]߯G9ZFi)˫4CaAuA! XDXwj4PM{=ORVj)٩[M`BӇBkSt6t Uի֪J.QyVTTKu[M[Po4Zh(uG*RLX3x7͉T\a_US8/%ZM%k@hrjfc?L襝̕YfZd]ऩ `l<".jS 8.aa7G)M*+3V덼zQsi6^4UvqS\Me:F+P=+iuJRX0TfXx?%>=WQĭܮj6-}-SZkcVLh %(j`*֭v$.V?TNҙ~v/ Sh4/ø6䑏ޏ n@xXRg6Z@gpKV/ k8.~ݿ~GJ GYZSM0"¡2"z2׭)~|媽;{),T֙ր,$Ab0EM6~bhKh*c C`eіJv {,_"0eL|Iĕ%1zd36Ǜmvpl(O?c#bW_En\~K;Uu`|7%k~W3t]J[!g(StZ[C^F1p͆P|f.FqQEnf;K'ѝKaˉ0;BED NU,3(z+&(ca6ʯ5G41e9C;9`˜o34n &9_ 6,Y\j}~k(Um1?kowi a-d&\m%r~nsEWK V E*{D*f"tYuAO r4l6D]DAݤ'Es7=Y1j&FTkOWr cʪcTV%8?34cc(A 's:}I߲%+p+޾iL.U?鱪knӁk٬ԖiHb+tj#9(p͵{M;*MU*լWO?]š)%ȝ͜u9ݭsd^[D[L,tx =IS2-Ch$O!)M8ʫ+ȆΆ QH&,0YeyWll5-1ʞTjC~%w)U/eUQZd)N^+LjMy28l}<9+ ,]rJd~Jz(<88,M=:k );Siuӑ՝ֳ+'J=s*vQAaaε\U f! @J m~9=r +׬qs6L:3 v- w) "ʑt_O;p5qiG9og(*܃/lQn=[̢䫾gM[fYSs6%%|2[p۪,R* /g]R^5gBٸlDM6lMl@:юB\5eN-ūx)dU}b+'n].1,k׼C@ F{ĦI7lwt_ =?KW kR/ł)V=4 UդHo [/td EO͚ɅNzhiX;kDזmWv8kiiT`OJB}};jj^F鵩V9]PyfOGzÞB7rAYJ[+QZd ;GOR_vݨF{.;S*bURCW8+Uf'QY|WJ{> %.2v˫VFnZJ`kC>R:KKj)olX9+'RD՞6^,*x~[bS7jiJYZI/wn?+-z2[-KB] f;\_7la^z K2`}_xko[_Y񊎵,!ĕV:pU G!y(G>O?,7̥{e6.]txXFgђt#' 0A /,Nł+srn KdA\SqU}ިy\OWUZ,Iu~S\d)4qy(ow~D4X~Smi7̯x•lQe/g^ԃvl7pÉ&N0_Ӏ<|=aFS,G?\'3. x$0pmW`4<ړO[*6v{em̂1ӥ\M\)l4mUGf UUumͨ}ؤ=TUc5^%hSbz}SfS&gy2J4Z׻V8?viIqK4CD\3W<3yp)'Zo+. >8J,3BGL$+bL:=r'˹R.0e*͋pR [_,bKxi?|8?7!7B^TvG^ͲLHU~1kveIM۴Ėr_5,5".8kpsOg [&r cuw`Z}@ivN%w=iK qliZjcWVĈ-'я5ݽO G'F.$,sTϾiȮ`¿ W7.H^rWҦй\I+#oMثy",llUTs( z4|7m_) tmֺ,fWǖ:(t?Um U!VfԎe/`-Z(T,b>4m<bcxBpi7U3tE(rhWC2Wz{aUŊVBiz :mlۄ J2r ]G,3uc3U ML;90W|9 ߪlxΧNf:-Y GG2]z4Lbb#kAˬZZb*1./߷SXI*` U_h2 'HeL޶cau}.2pE=Ts|-(@U֚Asz{c\mExiLCRnKҊfDv 3avjꐺw2>JK٪M/4jPWaNTKI6Ui5f̽":B8:t]<NL]dB+KƒCH'llt2[WiӹmB@7}[qCǾCL׏T%;`^ yAHGRgjf!e\SUyIDWP`TEadZH.*Gc1l`_-칖Y~ߞ%V :  6 ;ݷ,(X+0fKI`P njiP\z1z:ɍ O^ժVLr U[%n|5 ]B]irfɸ%H*Ԁ:[cAs./{1 s[*Op,e͞xGK5d/'\d~M`J7.Tѭۦ3Eo۵ՋSuEIƩx*6q Ѡ~"WĴ*9Y$VWo! ]rH*ȦW7e/u'{lC飿3@'izɺ7ɣ Jh!~Wx4hj:2vPwc㡪^%UTv gDtb,Ϫx o-׏t+\ɸ(SVztUπ KqHҀtF*d!*b]Үjvgdk[uU-X-lP԰a=w5N Qc|6N|ϗ>{0;Sb&/Qz dI-]_erێ;^y7`IQxǕ߸FK ==ÙF9=zFtaluJB6kk!-R-nYKM#SZ3Y'R>w>ʺ5$ؿv"k+Y`>٘eJ W.آAQApT9;m߹dx@@oF>b/ ZMrhq.uɗ"ZYPK$Iq(fW&; '{M蚽SNx2̟0EYp$eysܝMm63W}bdе{ Y?ŒL%DL;l}bSQ+޵o| z D2!rNvT&+h{4.v.#z{B W|2v{fSzV,?3H1x(JKi=;+|eXl2/FATqWK!+#Mŗ3'BS; łtv5(+` 0~iRklu^z=Q Mkܟy@)1! Wg ,tV &yMRYl|W]W \WpUlm6lHsn C"#EE a١8!|r J4Š"Rih-ec)qMt }/@[Hӷb)Vzܳz!.$0T`y)1w+pQ=L'uRcE+Rj+,`;-⯣KuM\yG8sPz/޾+(@ DAFLUF? .B+U/ЧHn Mi<\Y'5'5\58*Zxzè,q&ljD3V3zT?6B*) *i\pqEec(6`<2ZH{cM&cOk+e+X:mB3 D6p5Nk O 8՝)=Š11Ca]51 ^,N^VTs]Ot؏d$ +"w W`cSG6/5,F&>|."B9}Y%?yOUJ'V!zM7_I%9LK Z(iRUGhBDZ1Vc? ѵY+`TȨZ<&giH$$2?AQɥK7HV\urAȏ\|T`蛄.Unb[;c1;'v kGn ce_AϠ ŗi qUY~jU?ҭ| w:K@FRŔ1P ;?fcp>UkٖWWW n/,óЀ(G` 4yp=@*J;kԕʄldBYiUz{-z%K6λ)foS0{&:sJ7Tr|Y8L=C|!IGdžI+WQŚ@eD9 &-SBFW~T1+=AV"+p *ˌS2oz{ـdx9FclXY3[!;4Q +s8 u{oǕRxVcZSyi0EeUƻEfUC:D^$6SB|)~_^UX[yjg\ʆ}7]Y5ҁ--/~'l'fU[BS!D~O0vdids#햔LڰU9U5\*=*Wb\_{!Z&D> +D*-zm"=tVN6gRJn 'MZ+;ewo8+]y>+2~!x2Z,VFkGdj4ш;ςDw 32;IgKWZSAhCQD2wLTZUUО*\h+07:*/J:L0P K2 T Hx I:aPR*UNlcE2 'N\ qlk,!{Y (\:.ڋRMwM,c e0Q_.ݜ,ϋR{2-vf3la>+7c[pIZh]SQ&vxh!9rUZ^V G" i+ݷzG W +$V#~J(.!C3XZ_T6 э&gWvwUndg,/'S#]H%fnBpQs-(~'{lw8ǻ^ռPsOR\%vxW֟Auo_4%,jITh'^16ꌮKOc_"ٲ]T/+MUbp5dog\GT~E({9Ӆ\HOoOu/ARuRa9D`@BQg4|yjmhݪ̘$&A:d]#ITDSɫ[)6&~-ζwMRpmIdOky6 ĔKZY#T|JuEjLOޟ2&4#( (GMA}WMoS5Y6مbj%1dC/2)=]`XYZVϙS/g[9}2PVpW]Le}ѨnWhq_Zi h]CRmijDhʂRMl>wqU~߶DGpVB~θm~# YӢ3twJOes#:IInQ%4b[Wc.-3[JP[m ^1}TIfFT5seO&KfȽN6;؞tXClm̪eC  r-g!P[:hqs#~ Q "NF׫wdcab [)fA }ZV6-r}+<'X!эURۚVH*bW1n69r6)YgᤍM^0]T_p٭%j*wUR۾4iǞ?h@ZHO:otW~1tZ-$q=y z@բyJ .+`onnDtAxG^w%ofQ1aCᑍ٦RQ\TءJ"xǺLx$vT}P XTg+Ъ TN>TO=^J&bij~bҖSZUYҶ2?졠^/RV1.ť-[B9JJQyOK?hl x{ZaAF~_doT Ri/2(W!Z΀ۆMaR4ȮY=`=*[3^:zɌ0TޤMЪW樲PQOd6ոj>Q/q[uufj{H=32)Grȷr4"I$Z\gM㍘7-dޏb?-_n[IKv9 NT'vpxiC.oB'6%ePaVl`Xgg{(!5Ek]x.ROgg-V}ڢSQj,kgBj]k6\71el2>4+;ޟ>A)TenA^:'W%yVjQ8+>#e<ʬ3-q+ 6A5bЊ[P\5yնL4+i BT'#~(*z:d+\*[D!c4@jmW5׵ jJl-:)~Waw} (h+vjfM}yFfn}7j[Sդ7`cr }-ק0=rrqL+gTh +X]BlpW˩+h2Lw 3OXe|M 2-F^Xw^ž+Wtٗn  ݓ-{,per4垔0{i<[3ѸM=Ѷ;d?9i"B^Ehe>ײ5rj5z,{-B.̓+u MT9]ۃP>#)qHXf30t2?nZ ^ 'oAalYӳ6ɮAE( {_#6ZمLѵ0V\w2*Y}{1)*NbХ=2]Ղs\⋑i\Љ{>H'5[e_VPK]q"ܼPN=8 vdztzd3nsp#(wqA)4ŭF1õ [i+\TqߒIzH} вVuS2c,FX+uLsk9ke4_4[JUhR$BǸZF[Ԛˮ2?n^7I;_lA*Q|+~ĸCER 2\K\{XKu!v֙ږŊI  TԸG$ƶX!E;1YaOXiBUUA!ֻ,eY(K{ϣ3we{b° D?KO^++Ŋ]STz> {vخTP4 >G~vuP; `bR̔&Y |D" @V_鉕 d<7Ikt+6=zUNN&'բ7QyQ|}VŎ]gQ~v(NVݨD%h$`+Ճ2" !A]w*uu"-k0V/&¼G@ o +e=bkʣH˂P"SzPTub"r/'|Q#9&g'SHI▗[{OP"r:D5ueB9ʢD.ӧuv>2JX"v_g>3]냜\Zʫ~)P_d@8x,[=މ::bx\);0-|q84lj}1 Ή̌Dk6յE~ `l5О#Bq{r5B-PC, #8HL1psa`X.p#׊'S醥,E13Vމ6C|4B?tPNQay8\6Ձ6>yG}HArrJΕ\}&ۖ.vc.:Ր)ɕcߕ}VfR J'0"Œ[-?jBd(ء'79Enk#oȼBS9cH$bJ:Lݿ~M؞M<: twۮHɁ7jqI{ؙl3S7. eWβ!bf"UbPݼQyL%v9su=BRt`(.b؈2{mWWŪ)OCLw#BI'J2mw^ ky4O[7>b_D|~+Y%6~˘F݆8 (AXĻʨ/G;r`f6`E0ۂ}< (f&2sޖj\A=9,CFؿ $` {SO`XY|Q* u`ӮPB8vPx" 2;$i Ru l~&sr] j3Qd(jʻw#u c0|3|Gv P쬕2ΤRYݼ:JU@eʹz[`K̾<Ns`7#5Hlo2ǝɬG>>`AT(xِkՊkhRk᥶5&0_˟#o\ Q8>GKOF~@Z‰rXA?+fɢ뎥LIpئ97wsPfnn9RPP!z"ҹe8v(`qE1-&JEH Gn19]  sh?t"!/CR~~?ULܨ0FQa WUe#c z#&+f"+=hWU;eibHP$Le|c>jzOՠ8d@r~os91NY@%٘)"ly}񥚓5_*ҹRZ*޼\h4wt!.׮COZ Q_ze41zh_9j4b ׿Od`JSM( ?Rq7jv0{'o\" /̝iTxJkIgu^D ([.M?.eTätDf`r\w,jL SE%RX<̅Ҙ/P xZq0x\;SyN@;KF5|}ih}@W P(dA@>~"0Nz(!򺘖m]JKv~+[jlY9֠v%N[~$s+ !=7:cehT>QX7; krraQhEMxM P(:%>=R;# CeMgաHB$j>K&Vx!މI1 pxt`JE{J韊/~1ß"oy]?*>ja/C87LR3-y-áޱx@XI׫10.5tk@i&c"Nrw*KVzJm]U^d\ 滃@0F5UEng"U'ryH> Y4Y }5ň&Ͷ.gV7,uϹe>b4uT!+])*9i_=Jb ,eGt5 &Desr4ېPi0{7C[x5ҀdQ%1iqChf3Qu@#.jN/3Z5] Ӎ$/}^EQb&w(ox.kF\]B?w =E@A dk@=[a&<ɹN&1#jX !o˧sf`X{0PQJ˟G_* єHQLYVmT7*:_b "&4 "\ ):* @pȲFtS )CҮݦqLf5Mb +{#me<'\3jQ3)@m ulw̨QMN'D O$ÇNScXlHUx<ІU3v7iXhؑOM=ڲH/83nWઘx23e[lju mXZz }/kM|/6RJ$H-/ Y u&&0ΞN"KDz(Iɝx0n 5?n7O< $w]6J U kePYqSpP`GA,{v%[(XrVyENr[Ah ZKs=QŚ_F>`}R3Ž>R)+Tt3f5و1Չ~GRw鰳nf \%Sqν{/j/2pPu'MÁ'1yJl\/ Z]SQ򓱬v;3~>N>,n%' Gw9\VH:BX1魈_3#{B!7"'%+p{,}:fRs1Pbb@¢شuAC29/B=#WB k?WQGٮЪ,h$Wi]I?HPg@_Me* aGQQ{5cJ 3vj|2c dz%Y\"la~ {ς1vjNbh0V~7Ue`-Eɴġ-uaWؒe3S1q:GK *{rdc3Q &go@ V~o+bǜt켧+ﻁOp:U7'nr4յ! CNdZdij\:MHTΦKiyθ4͝#ycVf2 nc4q M7%ݠ A˧@A;u8t+ē8+G=nT`_L2@DK=102ܼz A[Ģi4L˜=f:pr[9i~m0E?>9 'x{wҮCYmZZ*m炢kgu-ل?C[1 MK˃8QTã0W6~޶LӹUx'Ȝd)ڞ8h.P\(Xޓ_4 :SR 1$;c+G1(M§c/@WjVjq{'jW&:V_Pxg`92W[xSWtb؎.bupk+?S- 8JOWQ5`(HzApO7_b)S3r΅ˍp2ΰbt>cpi:ȦX: 0rBN@&/5'(OL0oJaxrqe+riXuCnwXce2i6 IC~KPh+GwdDZhITn-Pk3 metN U@=DDMA0 &+2v,zB::ȅe;BJwԹ7 d A!s¨XPӱedHdgDI9'8 @U#k:yZ=oi}Y-<+ 43cF jz,9Zٺ`T*ٻY(1~σ(7Sg0yHV`<n9:5gʬR=x"!r6DB8ɒ:uu]H*Ϟ&ʅǷ]QYYF(dIcnYgfB_HLE.srڱ(Rtw <+#IQb6;8|޻Y*/m$ IbbF( R_8xzIבzvgw:.wvi86Ƅ|$yb%ڠ0WEExgSrg 3}`Pa5!C<|#&2L[ҽq0Yx+}P1SDslIF%W??qU'_ʽZױqZopX19EWwv dx+Y gt+PQw3$IuIN.Yՙ FFb(sU6Zldf 2 ׬gK4N1,Є.R- 嬸SLZ @O-zHy^WMD-/7_΅1O9ӥ( {_1Їu.13#'xrXWZkYٮtӨ"rGOM$Π]+'Uc(+2pt]`-|7UvjLO2sR ע@tn7:\אlLUx )ZDg!?OЭJat$3E !cO- !F)It5`V|MΒ~s1sXvR`\c[Okr|B蘕 ϫ.xQCQY BXWiWnsf S*pc]N/܀<z 0,O1 u:G׃GR7xO^C :\ 3,8 C'+l.+RfnBg8WB,b4Cր{Do>Ys!W6<Gg~OC5gԅnWƩ>ꉤ\ obW ;V@][trҮBȈN1\L!0[3mӣQ.qk 3tf :h O &PYa"ݧSZD֍ qfOCeyQ7L w]֡n:yx(M~Vr0j\$Hk.c[cpc0FkP{m BoE~%وBU< LkPS Fn-|A Y+uڱi?1Y ,&Z0@`@@d(@~ 挑Rd( ut( $mT:*E97]2Vc =t<+H~#;iw!|g4V\'TN"cJLVJʤ1; z"6'`< q)t,av% wEz637I\kc~5=X47gKg^L[cu]>ŴhZ񹞔_ʷFHX'4O@Gԁcܧ!Dys~YZ_ _x41_m{c7]vjF{ʰ+N ;c0:.h鍓Шuˌ| |5Q g˲YQpE!)TmDO&m* "(R &X~.uxB+B0SGf+P :]_ (ey*_Rͯ+KrcBsOw$Wz1"+ye+{k?Z:m S _`WDg[^#ܘ&yJ-fjw<ʬQN!̻N)e& *?%+Pk>\do|Pwyvy˭sq\vatv ñ䈞bi8gzha TH5Y+1T'4}2dz)ĚrTzSqeq>`dl nXP]/'tfkddV bQfg!Z $FG7LI"I.Z8ٰ+YmᾨFY)b|^ a c&׃]n{u_*EЅO"1bquq1*PnaL%i-*^*a?b\Pvs5?^T2.ؽ>sdQWԉ (Xmvu&WݺƲ1A60pjx z J$qZI3W~Æct¥' +ItѴNl@ ֍#=D#O;k/ {=ukvKQ0H_(^;\Qw*d?LHvueozPƆi (T%8r}> dݴ=̼\AdÖGZN~3U",fӲE$Z{P8T/ַ#h± kFZu\ؒa͹GDߢͫաq,2i[2v1?op8&6oܵy.#RJ侔b|\P@!<|q }Qmj ]@,yﭪDC4~ΰ9 8lɰI)0#+mZANsfx+ ` ZFΊe$N]Jۉ 2 3c$$t:` ?`Z5cҴlYT{ĊvzqoyjX+#[* jʺ.}4"J{ɨ6eZD{sГ8cxFB~̹"s ~#V^C9CvLLl"lr!W!'atq\q-xfSgt/v&}d= 4(\m %㉰riIϜ5+M6pkOUNXSb+]a9WvG6i)YɜN[?=bn >˳eGv%sglKFR _tv[(~g`f0UF[qyo NZطf6nc˛xs۞~r7ELEet9\'қBH+@jӰiz&ze&,Fwy0lƞ2nT]r֦ת׿<zv펤]%\APQiOF(KZlBv`J{Z6^%ovsDI/mMj{Niv&c`{7u:ts,4G$f:ɐGjA@9z<F kU̻XU#_+ޚߨߓQ|daҝKK\O |sǭFu63ו z+ܑGβGGZP)օm.>PVӓ0C,Z$+=]*u@Q|0 9YSIwb2f"T1d.ӕ{O3ގ!R#pM{0n}f]( !B@Ҥ$22:"N!P =>1+,OQFwaTIB4D# Z.ƸT1m׃}9z, Fr[Zknye:^.kSdfA!( [Mg:h*[zE`dM9 ǀ?F%HE.ht6ٙ;] dDik${Z{~_huE,]5+8 _HT ~Ird?Q lvXf(Mfk:|n`4lK8S뒑oGQFV=+FUTc OCbu>l k{QL0YEO9J.K~2+d!üU:cR s0[GMOFe e,"sx!{r̦t`Ú ?]nĉFhR =<*Uy8.Haw0/88d赺 J}eWrrUL.XDKϪCj (P/^<rrx(KIdZ"PSJ$glZeG[Cy!ءqfXZL:,Z?g LT[f*-e͓zkt"ج_fm+CKK6>.RX:]qR*f6D(^uƺSj 󄺞 H>Qggk;Y`.30=%͕~gN8:{q O&G\u2()XUyWpS9/<%RPĨn%Q_;V P\R<?3gi=sFWj#c, +gu#0< R>,@ȁ P حbm%Yth t12y @rNQGF^W%wP*݄BY'PQN F%f”,n<9[都QdQjn*Bu&HvrO% 'ama9w \5cvYÉFt<M\J>-℔.`\z`38l P.."uHb9ƘZ>ʹ.i ;鶝Q2(/\ ׎jZ'8RDp:i&s儂N|If#z̭7 AIH> stream xugP$ioQ.K23/!g$!BIBhHC$DB2BeD"#U3|=zˏ[iK/lM7F1W=9Oi-}B>=z'V&75SJ+i^Es@³xSD,닥8=KXlEVX8`P9֘e#0}$a(Lc1i<&c'cQ1 1l: fal<y猾+z.DE|1S77\];:/ùq t3> stream x< Uttw!ĉ<$H R@ RL4A&ymms-\.33I.XPBAPH 6:#ʊyܙ#2wZ_k_]U+?zx{Yg/G+U׮]>~BwzW4^Z;v5z~[O^ox?_;㇁JOJ{[g,UKxwsewW#7_uwɫ/7?}Pmz>Y5hGgeYsna MbMU.ohT__}7~Y9}6o-lv oUۗU׬-#;ceWKSՋg]Z5]t7x~ov>9wK|/1|f/Y}L쿽wt禪ky?_ӻ|JA;i͆sz_{t-̛F;<\oU miO|5~WX֏FU^ߢ6ey˟Q oѪeUT ȋzT w^U*nU՝K?ZQg)/MK\؟ xū(WVg@w:5WoIJ9UR*c_cQ뿨Zý-A ~\]uuR:zzw0g')P2{$ )mC`urOU+3` Q4@mr3~Pô6ڭʳxRPF{jWX!ò?t+Zk5}>_O^BtD6_E>F=1:{]`X-Ȼ1"X5 -ZmiE6BZvѢ16-y5퀷ɳhaHlh_U' w ~̟UIՀ2<Z<Z&셞#+Z=Sw a[ɋأycrՀ7mu'FG1@~9ߥWt;:jt˽v _ JIVG&C: M]\-s$1 m Xn]Kë5PpiWkAk4 TWJXm`摘$cw݀h':xVUUo}Ǡ6t[Xm01>vxzVm9]7w䕪ꭌBCoXXy.7A tY%sK/o{"w=&?mmdYw~dL;#E_؝0pv `8t>7ͮ*CS@WEQnLkWEh\xl~eXy dI; 0ϭ=\AOɽ/L|'V/X(c{1g`vB ֍+r0d;Nde t32XMB;ųt*qmT_!7PX|l{ 0j?} "IIҐߞ$C<_;vbk?b wJab_Kַܪx2vD|'2ا%9:+-kUz, g@Γ8w"BOFxvPY=ts폂pÈ}h =~Zl_x^BS ׆{=z5w\ aE#bAޮ5AoFr+t^|rMuKtvpHaBwp;M~xd a=u㉳ЈsDW h|L^͞4N5Փ% /nĿJ:]B`+58j|zctg0FGVIi{0@gu0A2sjn'тȨkx Iii6 4iWeTXu_:(TXUߗ.3ĭ=`Fy;w"M?JStO^:[҂CM+ceFN_bu!ʈ&~ BDPQu|iίCH0 rxC·p 8 Rv$5 -}TCLHa猎qa# jMWKEoP \ _-E]ʭA;] gM [!}qj ݀"D 3981{fBѢ#vEAgpgdNW˟#: J!#/(+cLoCˀkt :~]"k'_MU 2y;8d;A4CrPNE/|`DhptRY#^V5pn n7 YF?-~7. ?idR-4) 15P49YKBb%160>'C 5tM%'WMue0Cu|&@N&xB T)qmh! & GߔXp0 #FW= 64`uZ+OyL&AEAt@ybBJ@ |Ѥ"Zxy: %s=+eBh܄ho^a:y j.DyPvӺe!BWݣ\2#^?|퐰#v~)!-( 繒j ]usVX@I4 -@ބt+|; M!Um =7gt>gBNx ?=^1vT"8Qh8b5mEx{Bs-h}+^0C';c7z=g,D@2A)<@I \ _?_`9r>&& JkNa4yOtc.H9' ĻuC0Dq3j'"޽vz5f>ʼnv^)t^~-  ucGR2ÁWoη`,FJ*p]wD͎C65j D:oo %00i7Xxꃃ߽x;i@??Gaa靰X`ҷ|rN%,_w>QO ,@ P}dђFJ1BI8 &X)~~\b#|q]b4"RxC~@Z4 ԙ |I1V?a f PI]9 >K<ׁ:ɯSоpzD&l3%_ Mm)*Wr<e~b/? (QwSBػ%Yk7osڧApܸ-Q_VgSljY0ULP̿7#I*!=Z㸪&PD5%SP uCЫ:i0xTmӦ륇Бhg0Fa Hu1ZʹF|hE-DGM!,Ye%zv# 5'L$qP&>bKlQ %x ?.t!8*`W-&I3(uՐ{{=VE8U8A7| 'UYͤn2&^\rg 04߬jC:,z䏄 x 55FFVx7&.O#I-Hd@=Vƕ}V+|l4@,"@=xu1וX0da E0^\6__-`5:\^4m,49K` bi]]OÇN0g-"tx2tH>ہy@} Gx}|\_UxKHA#`EFy@Jx?-OFgKB'qzA\7}ș0,EV ixg $V{LJbP`ueN%_z@BX}v )M5(DWt^0\|saK+rpdlg1` Ҡ r&E5R %6[t[>_~pqLJ13%v0FdN?D-{т3$ʘ uM9иXhqѓmb*̈́g1+6H\{3{^6ߔ d%\>4wY|/lS J~!vyhGDɐ'Kw,GMj3k<-dTzTnq\5v^ Xvsk;d\& B%9#sF`L1n Bt'gLf>i_ ܨa22RFa%Ik/bE#Gr[))Bni5\i^w1=voH/րO̓' dkxu Xoڻ ohd'i7jQ FYM c4U- gݮl] 0F-ϛ'v4"o񵴺`TNԛq mnC,{/2^'gsՠnSD.Gh߾z7fbXu )aL#<úw='y!_l آb-\1:BzĄ0i`chKMZ_ )A/s<ˢ])fY&_IZ0uX3hrd( iOF<˨4uDor;ċ޽~W,>?@"vody:R3o9 _z#TDZP$XD`jX&$=_% {rtqy$q QksARNƄ$Udv2^ HEIm) uB\]5{nqF/x"]HI-nm܆h7ӝZ8_ {#0MZ5{mrIuwdLBI`饇fm( <?MzS'T&qaD"vӿo=!!>P솈T!sh9/1p3o̅׹]EmH =d06%!9y6>?#"@H72Dk/[ϸ3GbD\_W'|(2Nx)+#I7ܮ[Z~՚ x3N IaHGv0. _Wގ`8/1ź?!mm B>uΞ.x?ZBT7,$_0yl'9 8?\!0`GvqOu*tO/lyo$Ǵٷ'AmLGl_ v!?1t׳8WoZg>TYa,tހ`i~hȌkrDy1$72uy3vޙpk[j@ 0\< a-eEx"! 9sw"60? ƨ="gAN\SEeh ](8'fr͒<T_u)hqKX6BmF(݀I]cE e}:j$m|܁::1*q'Xf4}׈@=bB aWRc.Vqm tᭀyug tQgE099Ir`آIb)Bf@x2B?ԣ NO޻cK!W:t5'm{o\|rbtp8^T b?"=Tck-ظavfM.ṀZ.Ŝ|3TucBiwڽ^c?Į;K9AzXSjx:LWM> !ޥiaPOeX4Ie#WHcd B>NSEt]MxN>!mpKkP 4Wuڤk81erxHE~ƷB)_!ظ+cS菈ܳ'&`[Z]4θQYA_:{߯+s'AKlT^?Z|ҥHOc{ƈr=W;:h:zB{`*NsCAs#ʽPcdy'-¸i2p$;yCzy/1@t6C#Cq:]$s#;O@syCuު!p=J~qqHjps!oz> ;Va';ouH;oB\erm."@ c`1h1(:0E L@5~V|6ךᚻs`!9gqjV.Ѡ@cnUbnKP!&pt.T{`S4e8ː@ Nwk+ hZwhǦ5N'tPg`nB?.gΤ3ž#30/ Wp{s3 moBw'I6K|ip·5܅N?$P\| > Bn="f==1[*K'S\kv,k0m!ғL.ոf']7q%uC njf΄ 2Q+D,!G Iw-{{) utJ(s%D5=]12\bHZpjlW#о`F uK <}+K9tyoPREŘ{v*@Fi/Bh22m|]6^܄$6[!.>-&m{lBO? ׿ePD>I:jCq&!UDG0-_@t8Js5sPGc.,|$&Β|2 s-G8 uPSW2` %K'<ǞI& ϭ}Zy?hE͞Nnv%@8@>~3iQ@hl:/yZg7-¨26RƩ>`E4s&2шP4|{i( 9O̥)WZ(\i;3X:8\KrnǟjsY (ɱVNhtAS@ysƮ'f&"Z@Zw5W-_ӀD"aȇDZ+:,:GFE6a8\!^`c \VsO1ijҊEVwjw f ˥BS_U@ܻcm\Wg@9p>9O'N1gIV0!4D6Wu }U_0v>f&iԱ=!pSCw=-g~9j{#zHjv;LAhűEP4Bk;fK9{: y1$idy+c?3 ?h$(Q8G/y0~ g|9}~a8mOlmt52nnw g+z~,Yȁg DL?cxF!F̨,柯A&I 0v2 9@aq0RčN2d>tv+I5M#)8ƨ9Z7e40F@QD'@P0(G@Y걮C_zbK,C&!:[ДXY;TZHW826mC]'9gK+^>V9{*w^nK G}h okIBF,9ϗNXXMUV݃73an,\"}ZB5Ѳfӷ0}7vSAeGRXCb:I,x U ͥ، Vs16 DwBie71Zkt z56*SnNsy@<>Pu}B .%ExأUd0yxH`C4gjzô <<~W#" 7Zt]@GH]$,Et3tb`ԑ9 AWyv*8&諮0bm!|vc'i˓o3@gFTCCAgNǡ0=qDG֦ ց8BHo|Xce!>% :p)Ac\,THlK ޠK{P@7"6T\A{y"Opbmq:a!=2"Pc_/NHd7#[62귱A_=7p34M%B~##Yy5xig̱VGoUPLv< Vy\swݘ$ VD7~Mu9_h]dIf>]_ ۉF#hO y'TLr3v꾭Εr)5*tڕBM/t Dz%-pB,A7ЦyG5jKomCi'`G=؉&;q$#9{eVL$a-w}` )Xz'ty#|یI3 1_] 8z۽K'@4ȿ9!F8P2'tD㫥|BA'w艰ݮ.̚&TC쁬t@@P+s̙s̈́8,䆍 0 z\B-yfc!<ƬKH,O1rчpH>cu @DJLӎɸt5~F:/ٛmp9tCK!9 dբ H,vUX9L^2b4n4Pl|E 3gD98Ѻpf N`8| v$8 s9Cb8+(65;D.y#U}\ʱ#dxIJZ lA|sfjsHQ=f$è!vOA5qhL 8uDZ؀u~q<"y-IWV4CU~־(vL@g؉:(UL؈%&P'OCx\s *V=bv1y&:/NTYbϷ2F0]74pkW6aA.Cg:cw h0ʵ]OF<ӞFP K\{xގEzFSwk7 F丹u8Ǩijhϐ5p݀ly]O~,9}DKܸhetRnp1Q'9Vf9$Jcif= yT+#27?V.*`#5"aꓴ#]6-|7Nq=ʼnŚY4)t&ڑ V2zy>N,;5juO \̙x|dhJɨ-c+mP+B#~<:1pMsHƸri'Рު6QH-^8t}hnƋq~Lr^A:'2W46 KUD+ƞrJGb8s[ P}t}9 ꉳb!%bXTc3m1ymZDˀ@c=PE ITA?=iIF,N"Y,) &ܕ^'Y7p:`*@kWsmqBUC]b'՟Ԙq nozM`W*Og ytIƇe͘P%#r@ze' N\f߻F 7~P&ZR>ƴc E}³r3xoALU:T__l-n NTr2\(#(Eea@]7cGGGX\XP $pOG-cAu7NoM.A'a* Jپce~+` X7}"j=CrpZ*6آI6ug}N|7GG.lퟍvx;R0K'/LAFB5NecՇ- Cn(+#Vrs$pZ$W3Fr%nޏ9InG7)D5*@YCe1* FD>q59Ic? ؟e= tޚ+rK10NIhѐpl7ę6A3'Qܖ37vOge{ZVݓVD;: !vWKKQÿlkhOofmy^^SQ#DKY$_& ,ì[C@1S%"IvkyK'ml9P4huyNIoB x Hf(N8Q#ʓHUz,K'8;p3z}E!%jM aɳuQ hf ?vnJɳar$J!d  ehᏮcKG'5E(uN x F280^mmV]X5'B/C! 1DiXE5zK+NzFv >tk48=<7rYEDt%Osmbŋ:c#mѷ鏣/n|GhEa~oAȪ{ЈbYwGp `bxggwҚ;F4ꉳv6zO9)fޕRإKEEn@YG{ ƣ1P#C| uhNQ {@bAG-{lL!΢z  #|^:nnQidǦ(}` MnƛYܤqa]>@@~j{J2P~G)6h# t&V9\\!a_U0^]wڗe$k'bJy?K`0FV$ʲPA` =cbGy9`czag^O)dlhcj$$_4TpRy#^靘<xetU<)nbQ;G >.f`D|9\\愧*bеcQ L{`[rӥ8_Nϵւɳ&A nXE/F?A s>8O"HloK(ZݮIQ =*`2g:mrlGeL%Fz>lL2W(/W-zN[˸8,vuҼK@THx1qDn,9@G0jșb:?͜ݑ/Ė Ēø~"?!Þ{n(!X)yèFb)*qM»uPc8zpK1ja]O""c%ù :*k,AA pQl4 T`wp@8Xn~4MTbc" B8RRMN2xŸ7`{=Bk$Cی}u⚨Ÿ]:)dH`̖_b Dq*+'qGQ*v[Z :Phh#eGhQz+"Q?uobiSRKcih_ i7kw.q7*F|m3WG*(θ5{3qJ[t4 ^-AŖksg [rFtI8fo,9nsCD9Ѕ`GF|pxݖ81u"W`kX@=UʪNg='rAbTe@8.>( ryi%|Cp2Gcs>a- 2L Ubo΋4 nѽ_GL<7}qbEH` T=x%6hGή E{Ʃl$+n ߲wl6 !W4ZZyF[?&rzz!Y Уbk ;#buLMvӠ.@]87Xy΂>g I6]uQu k'#+ ; `UVХOJ1`kiXf4a8z$8S4x ϻ%rF2b1o4];4ƹ s/lC󱛎Rrxj, =̼%T19둄<GHxx*h!ͱr(gQDcR&$a?Ўy/& εg-yP21&^"~/IkP@]K)*V>s3 ۴Rr8OD{ :.P.|5gzHȿʴ%ua+Na'MIV@J$0&B^KA5> zVb4<H1]!Q+,?4"nY7#T;!GrfCƎ)PonB:@8WG&|.\舘|#5anް<-r$4ZƁ}W_STLf;r 5!=9)-xSy Q:qN~{w#F0ÑyaM&|hL0f_lNkg޴Xҁokussn\GLe*/>óy#&GvA{ \XŲ`}_FDo;oRBw-t(:[EEɬ(<ƕ[xADD쯏3Nw$*8M\T~q*>$<(<;5DlTac(7]*ڡqv_wޘ7eh[eU?ܵZ w[g菢r{ROb9{=rޒSnݞOxJbM< ؜ͅC SEQ,QlrιNwϞNiofbԔ:UL}8 p~Q-sO#:P% srp0^$D pyIiKLxL$eg&qsq#$ x stZgW9#ڏ.BԄ_ q)(':J J9iݞu9qEÉqR^eN:`ZBq4!]9P]ܐw LpDT#HzÓN6飑!CP(TtY5}-{>\ ߯ v+H,wf)$ĕX! TKqO\G<_KtL{:Q`x|nǖ=@,t|^q5<-8 }~"K0u* 0/ yjqRcoHأjSXcsog5:@ޯey?K.3I@~se!I R(q1|y`.mq%zޯ*]ϱok1)?nkKؕRU{i,O 7Cd7ݼ,2w2{XrE^Q>o舂PNy9]*cx};}e츝^X}B lMZȗjn*R0y3^KqQ.~Ƀr⋮D-q>u6pzo \_+$o㯌jTx_U44<9;sV?=XΑOkP-C;G!f R`HAzLIfOCj"'=R VE9s_*a-w{?G2jyRumAi @_sKf4gD'y4 xgFtǍ۳*[Iy2*e4)6U+?^P,yq. vJ- a[9XX#Qg+ 5y_KLq xt1_eIi[^v 0 yӼF Zg -+#>88ՂyA6snc?7dΛ&'LS̙y<(onq&œʼnudGXPYw9'X]RүGx~BMcy1/.#W{=T:5FCT^:S%p{*{f -/ۊ*D&^B/j=rK7|zwRlz?~p%u|= NU9Stk^hK̓h5raEHeU7#IZXFvXEg_Ghe^>o9}_fDbGphn*ߺΛehЉAwr=Ob~ؒW͒`29GTC.П-'̢~=g$^Jw`ђ^obnIk:$+MG )KA CYxqzv&a&fb#$@szH(3hGӓ~gXχfeAt+kw= ZMF-@e$? cU2}~ˆ8iKr&F^v׬H =oJń]Å}ʐq`m#v"U\-ب0iBZwԾ_<#1j}QCkaY ]WZv4o2ZSLy1XyoP ݈BmnDBm.3`aYH%nwLpXbyv[uBs$QQ^P]VqEx5sYK?H7+x2idԺT,{bGnpqZTZ.8iRF7H\J:(O2bw[< ,Oq3ojqԜbd^ʍ^UgJ0<}6P98X M\]4iulvOmeI-|XZq /Z[B=7;_LA$yɵ+{nrtx ,HsE$w#d>a`xIhhVz DـQoDD;+7gX͇^uj:k`# EQ;M,,h Ìr5ZkTǣV{V`xtqKzl$ _Я9fQ>T)v @$GIu 6E7ˆs {e)EuwwP~s8QP,߯CsSBbTuF3gF9͂Z0dPOy#>Z#T~=.jFk,|qTTdܥe̸nX/E\,bzZ@d)O|_5t7b-5Ѕ 8Ą=gayhs(v|G\ ڋG!#^uՊlvˢtƽ~Vl)kc/-=n-涎p&Ǣ.v4e'SHmSbeͺӵ}JsfX`9=@ۜFyI~ AZg`h Uz*7N{Qu8,S,ҍ#9 ?w1= _Qpސ w⮥,ܝ\* NGKqIB1ƨ!GOS>Ei8+ǻpj!~ܧZ-leZ_ǤbgQD[l{D3lm+-%0XX,A/'u-˹%nCBӯ*&p**`L&%5M<; o i>UuzBGkjUp l-^1:Hu9_tb9Q.ίΉ2vW[q 2Ɯ4Q Zb_! YwC,?%DQnm8; ۙOyùMa'H'(w_lN%w3VXH-; (gRY{ldH~o9Wx+ }X=Z|Nq4Fq:>wac7DA'CrxoɃySe8(7Nq==l ֢*9&RtPx,# 8Y@z;&A fd\*IHlz9+H: {k !F3S ͸Z>o?vas&a81ͼaBl}[KQQҬRZ{gW` =Xt w=mƢ̙Qu'R6o .Ll#wM z[ZC8BBTLG8 ?p;1ҙFb[)a^tij;\tR4>6D: .Llڈ7#b.4*EnjFx>! 5c*y!)Nj@X:B55y!<6&F&/Sv͋Mq@^崙H 1^ s&ʨx7}%FGWKY۰ XdK ~FmO)׻N:YrGKprF &<]d&;8J޾Gt15܍7e^$/9JE,guǕ]Y^O)-{݄NTEp4~vyttv.-ldHcΪA;\IeAnw]iPM!JG`SY0Q ~;n>ZIWy 7 F`Οbb-%-@n7W/Od EK*khs3 %K'A Ǔ̃H\Xg ޲oo&mQL$Lة:qŒR9 PCҭk qxt0Vs#xt9;]OT֡X\U;DWaI/FYE#AS95+2DBixZߜl+T݄Y8@F_:1?A$%\UUA{}ӄ}q{Z+;Dm\)Mnf6o |[[#84ξt鲿BHz+9oaj6n -xI]`X7P{M )#7R:ZwFd8 ym8ʞ|>>a a 59>R#lA u.eu9c MMGfm` eL,w5ep+ Vg 5J;԰?Yidƈ]6"GvO]MđTŸֿ ]#uI0l!㿑z$9bCT dEvem{j_EnKh=yit+Z/}퍪Q6MU-;/^}ܚ9΅C+ 9Kr?@\huDv6457ʬ#«{Vݫw#NVfI^l!w?+~c{‘_Ul2y%mG:x ߴN?c4EѪ+ 6_! i@ E?'yjA5j"|mArM2jV()Gj/QB?ުz횗+-) $z>j.x0nSx<ʤ Yq䓈kW<Hz|-^E14;GpG@`Ϫ9&ijNh`Rl)Ԯl7m.(61+֡d؍ x?4X0\`H3ujO>\8˹#hr5]o3WC@yƘ"в-"$!YI6]\.g UMCUk8 }܁DZ^s՜fNb>P.n:mEqp t>LBũt/ݫy59HW.S?T$yx:8JՠUnkԀj ȴ_u ul_=w}c."8ͷu_ѿfT5Y4XaIJ}W~VM@Mj5T+i`LUX@z߭^`Pc/Z6J /x/}s|~19TB^jٮ @ax1پJ} +fkn<zDafe}ީ!MCsAS泩j%TڦPnp2R-)GY5L1= !>יє*:Rz;JIBVCis4'붢Mk2KBx>;aUm9ҷ-jLH(xsrsìmA+o'So ],QH G5|P-0Zu{i,ϗ~G4DX7NApU容׵ke텎`NGk`z3ر.OWx^a3:\_]N#6d!a | u$讼wVLjX j%rMB3 =Vro̍ %?)Oo QPr@@Ucע 0K=c.7a`)AYkPuAq<*1g 7ѠlnN.|ཊRtԲ-!}\ /]M\N5n^(Em\ڨj߿ԐǺOo٘g" }#\ƣN@-M^ܧ*Y劳dYh݆WFU6&*sC?̢:!}u[2s]a38]zb Z9w3ԯJxu<ĩ]3mOIy*_X9M[v<ۆt!X"໰T=-+ 1#$/sI-(ρzw+Uk [g]I5zN\%Ym.ʁv/,N4aKz.x6kһ뎯Dx@T "ðwPhuz{VmiRTdb; -cZaAPe{0 8JgGn1.*R 7%;U\"&b0UDk#Neq@U W ;6s|~CF0ZwZY/%3|ZC% >5_G ( {bX% em_G͑Pϛ O劜+6 eGEd+8VizxKFW3TƸ.hӘ?YKΜ4|^r7nkzۂչ~lyu:#:t\#Yu՜T[&X/ *6p ^ٛ-]XU,#l7 `Ѡ&-A@6B ̵ZS+)/W;8[&+tj_OX>k9ߐڴFqÈIzFdaPHk['TT-,lUuΟ̻3+8S8t6s]Ωh2:d>cIQg#Z il7:r_WR4Sjn Ϛp_k50篰e9(LET S{uH&'^1M9p-c6=iwBm(6^!\6HNxqMִ?##W$)t!) W _<QXu6CձcHdɵ3#s(u>oUr86DzDT뿝6\*W礟sZ eN?0G^=2=6U·TޜMfZ+/8~SVhs*qw:~`8K̋e5!f|߁۝r`H8 PˡB(jsNkeX^0A!T {7GʵQf? pkotgfW IUmĉ&]7@"/sc&WlVh5V{;aEV-5m,(g@6ToFIsbAE o9g3rGEQ*΅˺CwjzB".[<, w@að*"pCR p4 'at`zq0$E+$e t*c@/>^W/ 2qu fє:mȒ!XI&(*7͓ lT2u^0Ab &n痌$IM)@ɱbZ.G% p hlR,(4ͽ/B}=[^\ _j9/8mAnⲳ6#.XsM}dpuUw P+iv+f/[z0uaʣuK(8pPFloE\:etòXrua<+ CqW͚}ڹ(hC`@T5/UglY5)2pMsUg*/:VsA @M8ڪ66HΎ!,C5(Esf>VX?If:3NDvLYʢ{ LO>l~lnFuV ~iGZ22[99Q3tjj+!ޡZMFngVnLC 40PH+6 ci8y8 `[  ;zpį Z_SjX[,δkw4r(֣nk yޫ;и.^}8s)G`3m)PaO[Q˓3r%ju;p᛿&.$U?oɣO(Q6:Nd>twktB)KF7SΏ %aדcWnW03j^[{#z3(j|RnsoX6{mo"'{j4t>gmjMJeeB3JAaSoX 9Ѫ6G?fO6 S>⃐_ Ϗ2N?P{*=@2yF3@FH;/R)a=yq [tW59`6%;:J hkGLgLwO ΔdP~qK7<&/AcX g*3H9Q|Uv0[}ͧXs(Q7wsSc &ˈ{hR\aqI5|wT!{2~U'tEHA=t_\UbԬU2pР4j &b h)ja}IJŴDC Ҿf<V3M釴 rG>{pBo$/fz}v]=͞Njs9^P_Mhr jQR u+5?xdʄL ZΤ!w*=H:,G}a`eO;Oj) !D8ȃ9ݦzfҞ}՗5eVDv. OzlwMUSkHqe}v4埽FV[ ?=yQ\PޣkQI;dߥ^mɉ!@xZT7 c K䖫.nV]NOe]8ր@~[=f b{sR"mق*e:~_&[|4H_w_7O:7rq6ݡn‘AJ} =7Q,]{,0Ag5c_GRuLQJ(<3#!)$qj^+qh^L[KF{1:Z@u С".[{Rm85v$-@.׀z&T LKwh]oܿ#` 2ZS\C5ϥ9ôRѿU0[g(/3-s3==GT8QwՠUyjkbl*_pW 8TMr*j%iɒb}c)zYɒ.Ja<933akIS&WvcTEq¬ <>ҽb(>r5c3:CnL.6˦$M.J)yXᗶ~ުve ;\yqG"l|[ÇU>08a CUJq2TE/jU6zP7cǠեG k/}d:k1Rsq ca^lgDy9.Ly86h.yd5zXuUTvoGӳQJd9ઋG1#yh/zh1E*-@{ v9l">.9ߜ$%4 Q-AJ:8r {mbpMVX7')pQRHkV/ ?27#ί|mUx<6}Aŀi˲dq;eV51F Z3Td WrA| ^鼕6_ib ἺRS bgET8DXpb&)XRf`Xyމ|#H8:@J%!cG_c;J"|2oO쿱cfJ@46NQsNwjl51 nڠ}|3%Ub藿6]1HLiK>m֡Uo̸RDi/,hWݎZf /5Hhdl#ެ{5:UwaՊ~ɩo@ݙs4& _-l\jæEҚ_t$!le~z=ܑ4EtwOr7]NjpQ:(tR* #dpVEsSjUHCZmSOGh_] ޿ JBY 4TEa*4T)f8vmّ.%; qĒL'#}dBwڳTI4E"q dV]j6 S^r!+ ="sWGu#P'K./5n^]&Wz/-؎=ʻnwO5bJUBu 4KdbW{P,m72ʇżq'|QIy!77x-(V>bY!^dfF c1cc/o=ru +[`)I,>3%Ap|9,؂/l84UDXڂ$[ 4ߖ^͋j)ٴxojĵ7"TҲ=Ik54hJ6$ȕCı&!)ɴG/ {TX!V^0Y<p>09jh-SK! َ!xR w6m*duLUr{$kj8PՐp| DI}0̸;vJL6ț'a;s^C.tN L%Y%Mb_G菄T/S)x3sگt 5\9":aӔ{!~|F$0HŹD{û̉@wUyKc2OXeDw$R!QU&6E&̂"n[gSӽFx_I*(2r<=xOpܹ5_{wG[5ӃQ$/`RazQXɨU7G SB`{wfiv{/YfڎE;7^#)㚹yLx]k.&~0$měR{< Kۑ8Zc̀I>mA0/LM>u~RN*a@:?ڨ^Ξ*S>c+յ*K8_Y=>h$k&\ jԤBn ϟ~ʔ7C}h2 Ƚj,P4Mq-Կ0Qォ[7׌uÕ[U5&zhrvd^V+oCf/*̩FlIx AJ=VMg~΢]7 54s1MjϜ&Sώ@qݫu!^%Hii=:~ʵ&MI@o[v:cT6l 6ϕ^_CP{w#WDQ[]7H$\q[%OUF#xzW2Vm蜠6<ޚr[>3^Pu陣CxWR}4Mvp,ݔ' ]?άXd#N*7;7BM ǽ5k&X-`, wFyyT ,Jp/R g,/Q`@CۓmLk:*[3Н̜9`wgPciuvW3JKIבAE ZFd܍gM9]8n+M$8VT5U"@ o'S+x ne1Α(ՅwlZAewb8*(k$Cx䓃ST"+=!u5V otZp Xd48,T[]l1]yvm/0'īc:6]qXX#u`5yxJ+k. A,P59L/5^tFO[K q@{s*Lwg4m O?'}6WڎmmޘݕWW>` #3Q{}dG=c;g*тjӅVν|` s3&pѨMª^wLP@y &xVGF,Lj9쪑ai| I nZslӠZOd리ҷjbtIArfI]7Ȭ޸KgFJ*?oSip5 f^'uX ={>t߁$NZlpO $Vv2 c模i"ӣ.n-ґVΝlpA- =COw݁6񳔾mlZB!8BAΚ*]atnMC~65ӣ^Y]y+ aG GpzѦ :]51N$Z@Rl9"^oU!ˁq Xu X2GXQM5oHZBЧN_4^"-O·ˈVw$6J}ǍUx?myDz:x[CQ}rÑɆc2WLi6 4|HG̊Dz; c_GHi3{p"4/Ug۪Hwю GDC0Rz \ 4"i"d cxz? XɩikLPV "4\j!M9ܔz{MpRقsȢMO}t̷>~2#7 HQ=a( %1rv| /򉧡FkP3`;:˰ݠ̈ᓕޙs :K%}.jO߯#7O `]7=굢\&~byQCJ3uzhgU~5mФtvJGG-Z}- PT&v2o <XiuNJmuIࡹLtZt oPJ򙿦ZtPAZ2S[:=K{yë]-G]˫q_bڠ uMaD'ɿZ`("cN |0Av'V+l[*x[>Ue5h:@o/O&zP:^!J ?װl+;/ tZ_V GZXI?r}^&Gx [֎^<_u0\TFvzCs_)ѬjM t̴Pײ; a mB}*ZMjf8*sosV]1 eGMK?p%ZN!yXp^.xgPeL"f~kBm4t +jM5JK`E,2f:ۑ6 1K0˨`| jhK{LnQlTju] 9d$xx +NҕuǾ'LN. ,2SuGa|j)h1s0_xL`_1+fӻql!O SM %|ob IuIS̲Jh 0T=lhmUK[eVspSjYs8֔|l>c*dbQUIO.D.ګӟ4HW1`` !i*5mӛ[1vʣU) eß% /x7wTD\a3ӦsRҊ 9)If:CʱP'~s [-F9í'I B" cٔ#}2U V33!ouݜa <@㻨6 f|pɬeg& W|[29jx4hև2Ȥ5ErnsMx/wy8l1}x @^c8Jd,vd\w2 DHdZ?\5EHN9?WzlJX\@d2 1#Gt.N'xBw4rp jud:*d>sT ,ٵNtgf񖤜NS>vdqΛ:Q5--Mbp+Mɷը 睊UImIeKWaig^Υy,J\{qk^fV~l҃9!M DH+h<eN̺s.;j =uLTy uP=`>*F(;dwewy'rL@(@&_⟻֢q6`(Fn8$ͬe*QPv)g YF K1ۃ׵.kX7EdJnQL%jMNT&[jt1!.7 ֣cUE|]+Py8`@)qp/`P h8@Ntm5|.#^?\tr@Jk=[ Ct՗kP2O5S{= B`3 Q_k sdtqeZ8p/R~ tW-GObwz9}7ۦ:iVNWi^9Z'0/Q<^!|zھ%_trc@ͬvg&3d_鮡}  #0TS:jZQ^NyL7z]oŨ{һY%sN l>ڄiһn =&jFRׯĭ&[ ҺWM 2éf|žR7a.~he 4L0fϊ[^Ћ#TjhrV2,810?oF*k@rXi6[5G)W9 0U46)~fV=S.~=xAWI϶@ r[^%]zQF/2X4KF#~yW|0}!}87ps/+ i <#GH,SUg&[a)t,Q '¦a/),@t}>frc׃,/<{!Uˠ$VsD]73Izx}^ܨ+:CMٶ6mS Z=m8:i Åk8DH4>@Tj4G=kBHVY:L d0Xq[M'n@ |1nr9u0i)2:y\`;Ϩ}N=gj\SzjRl;m{ϬׄΎ%0Lu@ WB+6< Je =e<[> S}R ڎ7 aӜg&%)q+1Wwjk~\[g]Y&ѭ8Fzڅ(7OϟŠkZM?5@fgupAj a{N:/d/U .:Yϣ8l)>=(F=,?$iNۙKp~2"WNiq&T'U׾|j\8p-CR,OdCnX~%aašXf tMzPjWTvCSt2;Ou Ys+M67qv@u' <h3^l}#3\'krʚ$xfz5*UKba)W#U77êr#:S{zVlϐ⿦^ꀭ5t12[gmBAxՑ0lh}v !]F|.ԵKs xT-d'ߴW ́U*1 (g;48\TڠC<_Ws"I5"TGtuLwR~i x}Tjg]A&@gt|ѭJ@:2?_QGt_G'?_) !< )LhڋtwXTW zLvZ*p eЫ>#3x;끒%gwTͺ_)9M_֜LhFpKf:U #ª,x~ZNWM˕It]MR77 s1I`Rz;whYO+c-?cJ^|m~;ѽ8> G*7g!-5TXqd,]CTـqQty 2ٕ3~-~齮I8|=SS}`tXz(M\ƿH<)鈬1QW𼽔QD іDa l7fUۤNkUɀB̮eI3dm~z,kJsցĆ#jJ~FbjG?}zkoEڡJz{ދV TLͧg0ɐ9׉Dux=֨4-aX\M-AVyqu!lB2|q/{m'XY0Nۚ^Z3knؼ{+m6w kP T:g5Knon+,t3z!oJ)OJB6^"-N5%b&}B%"O@+ =gۆ2ڑ-zuRy%qnJcfxi}2y͸#=7DTULXJ,klB;:ru~%2D<"zr$tʲ2dJV(th Jx[v}0V9p{Ch1^]Vit<4'̒|<4vӦ<*|F+ ^{tr3sFLH6l)yFMGPCV1U Ji${.7^/ncayi'5Qϣ%f?U ͐/[ \#(-]J]43,s*5K+[i/z?lzhѠ#_e4< O`Ja%@!qveMNk=0]j\ j֏w3<|wMBwiŖ2`LX}Yβ F 4`,qU&  HN)tԤ+y\y웮J( ՇymT%{a*tcdGs+e>b "='=LW]g E鹂YX/uO{>U4qf9YG<YvUB;F\%ԡDu8V'Q鎛-P{✇W{ 0ǑI;r݃UG/1G[Mj!{ҽUTXDuy2~A4qJ܃Ok}}J=xi}Q`F/ơ? j4xrmW_5gwk~'}q%r+j:G{ŀ*w}=CşT)Ey$2F$\^G;]k7'K`͝"D,nV#78S{|&{h|KC`8n~*UP }6geW˜`]7`b_@p413vM(iVݘc߂ < ]R!u \U=3un\&UFMs$W-ZL]yt"Q;Y<&_9Յ;ʸdTKZ=` a2@)A@}‹ VTt$`1bQHcWt_x|~,<*)&m4W$U3hU8Sw5 n'iS?Rd+jF[:j}4Mq|xB)f'.#H#f%̑O"&lL͜VsXC[V{2a^ XYݙay5,Pq'&@lNGL<V{2lxiò <x&rVrHwy8 &"H5C*Pe(}y 8}։PAy529q#kbX^zXW=-#Z9^)ba>lq`9J{1f#F5/Wv cߦ@~UYPOzhsK_~l\Kf2eKvqWu8c!,Kb珫v>I!J>4pDL?q*gϔG cݧښUKF4`jbd]P:n)G UA[2 |Ȁ:S9 ~j%joa1*s%`pxUU"xE\Uúj" DBmG34vD^-jDZ #lDo%1Tnh&^y. d?v=HC5Oi~xE 5ؑ}$^jwN+_L6*֔Nߛ?yGDJA"AUCKMgڹC zEZG}A|>]wQu !8fbbu>dƱi c`i^ l(a1I;TF`{5՗hhi 1HMo+ ݩAnPjG>ga+ӌ%]P^ L/hYߪYMۀ)&ݶTA5P$W'2͹Т{XVxJ6eH-=r¦H;Uc_æ$+baރR16x(:A9z`|]JxAsuՔY gt,szQ.,mn"61[GLJ\Fe~颪^df?di{JVzGd9y/0:w%rʵ򤹂7~)P.һ~µ#1T7Wx)Yt2L7I\wƐ%NNld,Kv3m:'Sˠ"yj K 79|Sm Quj6rUGq:W5^⣛Ⰿ* k /h6Vݫ rԿڰmHlbSڣY) *cgtԅ8|=%ӘhIݷyw9fٻcȶץ|5TTtJ 1i_F $DfL[uQdV@Ǔe.<^exa!X*45 !HkȘb#:uBȇWekPMkMKO*')?|?gS3tpɚ!w=߬!G-UCtP̦b++sGiseK "w)6'U(0ӼM)d<{s;Ɇ_y5< {˅l 2fMs_3U9|6q n,@|~ -b{~FvcZ=7~] 33]6R2aߑ$ ? @fj~ׯ@wHH?jjV!ϲ;j WRMAfzҋT7tV5vvf=Xbw#=āgC^fhG%:TIӊj-yM1FG?x)ϹE禭ZKjOyfnҡF/jP39.@DBګHY8;Ey *&!*KVNP5IC7= *[/+ubż6f 6_j6 *v۲]NaQոٯWTsJt@.=aArm:#E_}sĹJ'3Pl6 !D7du[U5}2mew)u4j]Ν""6)xx#iaQ[0 {WíٞR8M(RVCaَ!|jEdfHWO"U{e+0z5O$2~e~R SYғjwj.W5bk 5ھRvA<]%)3GxߕH/t5F|@%]l! x\.8Q/$;܎!EXoJdŗnFpτNލF!xw[ڹl˲Vզ^U:=䌍ukgN\hJ%ǥT»C)x45؂fw3&w@:+< {j%qp)`;*ibU-kKxgV֤+0h(]M3Cv(`PyҚ^>1xRӝU1['t\C8S xЦaQA\aK"ɒ mscM?$w> uf댝ük+3]HlFu@pXUN)J{ Lyݰ:cTN05$Tb{Ƭy*εl^ ]h4 zfV&zA3mnT\y,&3X!nmz) Xތ&6 BXws2T^N%I7rˊqf3ffuA .IN뷠Ƃ PKsR ,D1&O{=?ƕfn X%z% 2 y?i RA^|!hR3! ןR?] ;3yӼծAmXƿ7*tD\3l?5pGcrW (8g '/ %@լ$ʺe&oDhȵ*M> s1>9V}Hʚ[7!o*i LKLA=o!Glh+j-y.{_+$ifyc io~MtVֻJiJd\ #T72V*5|<"z>^<#L_{y|gSM >\4mdt3V{ jhL}MIۢ&-Ze7p'{ k^ ݕ#LA._q97$0|F*C2?>"D/=wuѻ1{hR4x鹭|,kRN[%i.5_e6Nʉr?G!şSDZ zSp.AvKBNz5F0RqInh_]keD8wU{捻4/zxmX.FXbϼwkpXW*3 @"!,cp^!d(_j+8~Yd&ʩC<9,kF8Bg鮓i∨ח(oT#L b3`|]L*hUZ1~ z߲_w >㻾TgyĉRr韸׉uY5Z27)+ou=0nx8TùQgU!⣌ ϮP@^{ 0ܡm[S&*cWuhtgnm* ]0A^ϝR͗= =}ecy@n̑cf:Oߎin,Zov8ݧ߿rpk_13j5:U Y)U|l.C 0qScYuU"BW ;qkf_2v?@mǣ; WW$p 39&hPCT`l惀,Zj߻ϏWC\>8x.}5 `Xaj^U@!H -F`AȕZ? ^%X'#o֐ʚR'4%5/< =Np6u;f΁!Ygꩲ*F.}Xd ;I!;Qp*G7tX]s/bE8:Ԏ̘q7Nfvؕ4p$D k5clol+ŒiV*Q $ߴWݶmMH RΙ\ަ*^r~' +]D!=-߮pcRfvriiKd{ӕYf_Tk~ܴ&%3K=A B9ŒUڲFR )32sst2C'#[M=C0 pZ+nY| ;ZTsT1 ikT~e8GĦﭽݝ^.d6GZu%!fe]D4qaę"ޗA-fx6):?rN5m=x_/EEf.;@(!ZKow⤭l<6c=9fDVHåpǐj/Ψe8w>Tc횂DkqЕx&ZzJ.OOFA)) ,fhM1pÊe];r*xΞrHt.L1sdձqKۦH,d,[yY]0%$U5<.[,^|:ڙ(^QCk /Z!?xQ']$bxg{kjS]@\P[cۯ>c ؗr(&3}AbUU[V/Ur/z!E31T왌[cݱ#ٗ MVyA0fLvzbE ryPӫM*eu}ޔ%n?@5<<liOff7V+JhZ/KF+ޠ%4'E"^X1pmJY KY[|Ysu ːZ@u '[O_lvzjεЗVt0;]Gӏ7)>)Α~i Y $n̍iskkKMnX7yÈnš#RT؝'08vN`*q&骱?_ ]@U:FS_zDzxHkEɥQI})?I֯Ql')EI_#UL-$- ]eyX+v&Uζ}d*$[!Ͳ*Dm]m!!ٗ';.٨~ uɎ!*VGY`rU$|$d>}7wjkoQ]dcfh_GU337'{wT{+,Yt1¨'dtp$4dBf=Ξa1ote[952k洺D)±eդE}Q49o Ƽ'43>J&z~fe4kM@jv5JO:ŸQ8ou&XaxLZtDᖦdw4#16!dlꭝ&E adQR&<'= T5T[Hn?L@s6𘘔$-B: Df͠:+zzGU1ۗۗ16Ef7 =#\]d >vrZLK_}72uzs!j>mIX$»@@xwN LF,tF]p'ί|(!/:N09X8dYV sqKxMgkR)^!A֘E/i[kBJոKA*y^qgV-,^ձɆ15NHd11x\Çu*Ǽyt8OZcq Ir6c@Xۿo u2I:Yipd!ܖmAJzsD {jh-נ:5ѩh~xi%ide)sJI<   *d"EzM_qzFˎ2| ͬ6JQ8kI<{^p{ZTMAÍqhߦyfwDʚU5ZsL|mi$Cz[^Yg4O(H6בփ95!fhr-徶\eTȝFıae'ȶ~PZ@˂q uɪtp1dˑTpaP,.cy0P3 e6,i;ynP:qF*A.--tpE"?:UƯSCJ;}du`&/+4ILoס>x "m@x7`fV~R<*z+,qK&!ƞA݄7$2_yfsoH+x]&XEDԉƤ*v {D^H)2>ftOJہN]VA"m=ȄnWSu4"?,Tn /]`Nsdt"o{nh: lkaa<-S[d*S6DOn|}.Qtufd0 }1Wަ0/HŴ5\cr=l'j! L_} xG R*X}^lP޸-(dU*滶]  *K1m` h歆5 H: {CWy.E⽝k7 *-|ip7Pl70+!Jtݖt.L Wؘt7Jyr=igzGARk`/LreXa ?-dP`Zggi'2q؂cSY]Fk| HOy9z[jo}@L .높\<K^Nmi aƿ` dn,^D<_f5`ղ=y/2fz IrxelPCYѢb0KÈIыR2̀FreC^}̴֮ȝ+G*"\SXͅDaCV '. ᗂqXԀ%Ji5~ A٥}ݖO,w|y翓O!ƨĬ뽝)j LH1F=1k&9t^Umш.(}$H2eeVm]E>P^-"q %.M5 LOei )j^H)=XkX`}lX^'8?2>ĵ%D4)0] kMwxH(Nxեy~|{'JGTɩnV_[KW_ym#+CABԡX7^j"9׍bHUm5+]+Ls1zNԠ|^+A1mП9έytjCF%9_L1⼁=Z9<_3[vԅco9e$O.g3=kv6K#|OU#׸؀!˜ž$0spob%LAAlCT ;6{{GOuQ{] b#A=]0ȃ}\7 8:#8)U'Us::T.z&ujO S6]srݪ*n0CwAuWR?g$:r<-ռ D~E ƨ1q0Ȩ|͇ k gf|Bx{@1[pDtTLɕuD#sQ''KQA:ohҊx24Jl@<%f+D`PUۺw`=(CιA(,br< scp P',2- ʦlP="*{ Yn7a~PSs&2Yۑ ,-[-!9Υ1P\ltm;\LLSV`=z? 'ݏ<$>q;#wkz^(iNf RTx:ӻzG2PޭAtRp &-X{K?k~00\nAp5A19J5pzsB/$DRY9QӞ9#uV~,ːGB::}k"?jN1BOsyMď vv~ ~5cL=҂_!v zq2qC Aĕ5.Sۃ^뻡3f!P\{XԒh>&Mg}fbS 쯲胍Xw:x}9)]3aI໙qPӅ.WTX+sݣ"斐h̯{m!2bcS]{ U>pۂ2Hv.A{>oǾw<T.(f0G),ILYmlc#0UPe}C'vf_~"${v+۰i]=ܪql!{:>}IU.z?^)u@k#j [ cX*] Wz@),E*TodjvOo}}m <7 Sy͡!FHѾ*,4f#`DPlo;=L7.~#̣*5ÎwZ jY7{(#3VlZ}j@`:Fo& 1RfcJ''/{0jS]04G|"/ 蘗18/neGՍ(ÖBm I&54BB9d+ 䝾`t3 +̟ o;?} TZ!%I 83dpCR<م㿴}_*T_h@~ VՋI~V}ߴ6u23kVC Xq)KȭTa\aLݳW2Ef1奄?{+2Df{}9zD~oJ aBd78ArU:nD@A*cK>q"Ii./.}W o WN2i%2S(@v7Wғ\z߾k˶v_T;L) 4Dņޮ!HG=i}0{֫4_(,o5ba戀{S?B9@qڟ7͈jzU-3h0SbDw}^CeQDPR}b?xo/߄Za B#A`{N7D`BXlG\UVarVљygE.<ɼY;H0"<\o3 -Q(dGVƯ3cBA)5 ooF*(w_wB/-jQz?"~FTb@ו7nLˌ{z|rP4>K?{3'ZU fqO߂ +z3zpJ^4XA}YNW5ȳfL+ ]eU:wyϬ:#'B" aC77C-_,vq&8Hx=O!*9+n2\qST crj+Bdlo4JՠCluFy+.as|j1BS ҡvhCR_Uc1"=˹7q[ k)Fݰz$,iޯ/a$,chK<AN=U mW"EM3[' Ұk\ 1ء^]Vc)?]hRoSܟ3v\U1b G:5/ø=Pw DM |Wx嘴014hq +$\݄"G#ЎK_w\\ JY./DbC9yx~F{MKEFUk2/dgI92P[?(L\4]!8Ʊ$Ҝ!$6WD-,uEfMqޯ]է"`4EۗCa=P\_ɲYR緮{H2׀竎ۮdav+m8P xڬYれ#fj,2)V˙5gf+efCr{-Ra_{-Z Q1{ha{x@d"ȝ胴s3)Fu;ם]Tq"Mh[6- cw"td\|D +׭` 0 ӵJTb.0Wu(Zqbgk>-fױ i"f[9Â8.+ bY<4-.| ~.C7)Ofjz?Om, h12gk hIf1Eÿ=C%-81'$VGTܣ%JGkO\8~TvS)2?`YôA}]]% :a`$&}ΪK| UϷ7('tk .v X61B UU7(Z'dD(ɋhaë <jJdD'YD:LN$n\0lPλ4Myy ͽǥn+I$_)Zڐ="Wx3ׁnՠaM{ `/2勎o.|\$bE!HיL N uWikf,Ev_*)~r%JZsi% ߾?y?];ҿ+I=<ÞFXoa]q)>^6Gdw3/F7ّnp8b^ڑ$t&ϬH4*cّFLJ7`ea¯ XsL+4US"XtL#.e@EK}9ULE8k"G63&ؿip:e?82e.fM;^a4{r4dRuc![Noʂw%_V'\+ert"|SPu_%$ @"/R{Noe"᪾7c7Z_M_Ƥ;5aDuWSk߭ ~\]:gh\}GV-n_>x_8uO_&zx~ pqX_Ɠng/⳻'Xi7 \c.#\}=?cagBUa@!*J8Ҟ5 {GIi5Tid[C^4 NhBS]br,2h5[ZQ/< IB/\RI8#U7ZViyKj{)|OT` U {暜nc|rMI Fc[ ʻ߫I=>*?ˇkT|JM9 q e, 7Gp|gH?]XŊSoLAυx|?YDRX"[/^Y:dҐ#EDM^I ں#k VnB63DhG M!C!cD>ojOpu]>Q]ND Kؤ˺s~@-sGvjSWX=鹫TkeaRaݦ9م 'Hk9./njżwj2بFTCs\[8vWS'rAr ȝAIZR{$b1RSv[[tT=K?@E^n`Vg4PIH!iyЗ]j6Bx>$ NN(<#<KҷC--ౙ^mD!Mj>xC5e+cƇ[uUU*A0`vrhnuӚK_{퐴{zTT6iV(&xY*,">\a.,@x#M ang:zG-&!:tHV 8D ',. IQ]uU /PYa0qOvjv>!Yug/F]!/H^ʱq:e|"Z.us.9Agi8 | QaLP'$H{Z,\ <\wn}cװm2=kȻ'RB%ڗt]30EŔ*,gg^b! .נ7y,q6-"jzsxv}>4mrs3fj7#6Px] \X6s\x'|Bw C!Bz<.1k|ؼ)55vT~yմ<@ ; j/>RH/9Gk}K-ZC{FCwk[׍Fl XL̨S4ӯ_Aɟ͈w3vL͚U}Adg%g/: VB8^$ # HU~7aiFj5kՌuh>4ǡKNᢧ~tguxJ aJY{ 6h㮠ҋ&rJf)?-XC%35K܁4eI(wUӮzƌJ͡ i,A<c1ճow?x5gxQe6DAl+G+߆gMr#jf|] ɪ.ɓQC)^E3FFTS1 oU@NQ™bxRzA_&h둴zb{o,gS(?n 7!KFGZ5z ϐyȾZhN7x <H4DxZ Xv@{$<\X!>ix>/&޾ endstream endobj 206 0 obj << /Type /XObject /Subtype /Image /Width 1 /Height 1024 /ColorSpace 211 0 R /BitsPerComponent 8 /Length 723 /Filter /FlateDecode >> stream xugP$ioQ.K23/!g$!BIBhHC$DB2BeD"#U3|=zˏ[iK/lM7F1W=9Oi-}B>=z'V&75SJ+i^Es@³xSD,닥8=KXlEVX8`P9֘e#0}$a(Lc1i<&c'cQ1 1l: fal<y猾+z.DE|1S77\];:/ùq t3> stream xt lUyn>=. e(ĤD#i10=40=N $@$BBHRo2um]EP !!@  ~cͻ;u3#Y%9^]kkh>>M.Fha?Z:MOHh6?ۨ/_ūgF?8Z-?u/]zZoMnM~Zݯouq]j}?{enl;fvg-7ͺ-KOFKۓhro9<ԕp_]?=Z{۫#n[э^S߻~Xin[;xxj|7蚺B3gy֯uW%=VFeD_1P[Vq;gC?iucYd݀hVIetoZ mwo?|t3?7Zu7[km_}PQZ~R ߺAtdrVw{eIOt/Ț|Wˋuo~.ڗ'I/Ѳ\K/z}Z/im쾾NW"hI}PFjӵ>znR/ YӃK<>eSkU+:b%F+~xOﭭgםm%?,ֿtŶ4C}=ݏ|hqߡ;vߪsqV?WnL grQBc?>zO Y=GnaN:Z&gYoDm~̅]}\j|a{C>=ّzߖ* A\v~o]:0mh).Ot1P³Wb\Q׹*@S,hGRA=ܼ\Cmh-O^^ק&64zRɧ6Ef?풽6@}[b dUW6'wl=EOjJK Ji=%;yњJHћ\f8o/%ȰUbq]V"]~їfJ&ZҢl%udh}.ѫT_OWoCq{%a-]@=ҦGgRk.E- ٮVO[seo޶Y~|~Ìwkez_ϫdÊV,Y: e k}܆ DEJl#m/YiCť:)k ž."IֿRVڝzvJɰSĸIw?wu ɀ`X: FߤdT )ґԸNn@-Hw+YXTXW/\ {Zz m Q:?#v~/|>H0[xw.[Y$T籔Ճ⾀׾Wrߦ0X\m%*%O\uf&qJKlJK?վԡ6N6ޞ\h}T+(D{󘋏0(nZm_Oћank?W}G+Bws=Po*Jʰlr&YR\_W.t5zj+Kҭ bׂ6$)=91?UlVY$h9RIViu5IɹqeӕӀrǟWK+N<3}RҋPͬP\|WI5ײhy oGI.>޲2k}|+ s9DstҍȪ¥]?:˺`Gʳkr)7P2P@򕎿nFx|>zFo# T@o߮畐Pia_ $uD}ZoӋSmAo4eu=)u-JG !ѥt;Ba#h.w-ϥvSo(}hn!F',?FW4}5a i`3WoL4RgJu Wv3s,!P+2+vp/Y;>2yQVD= ƲujOqb5z܄}7x z[yX NNP-]/j?%U'}V8@Am[I"8IRS|[54/YL=ˡNYM\>k8WA"cKB 6Q=fL),շKf7IXk/ j(kaGZK88 A:rXԵ<0O@ Qe%pk;p uqU@i9':30[ka̒|u ~ꮻ=[hc_*b.dO͐gBa|xw9@45؛'兩v ,_IS -nԲB;k7Gvꐐc}Pҋ:kVBSzEޱdXجNh@BۿwK*`{eIraZIu+OK1<9=˾\&PiI .# ?|tlI8'hGc/AZصQf]ցD,nl ZΑEZ6FV#c%S)ÝL-rKw΢-qb@;nufkre~v,-Htd'B-`\*{25yZQVY[6lo^'q]f;PZ"-DTޮBWо_;k-,91sIo5:\6eZֶMϋO*:97牁'ⴍ>UDY:8/)2LC$\vaatq$a-Ot =L-Z],6pBa{mߏ,Vaq97-i!jbk;U^u݃g|{wiPSKGGN^hIm(џǥ,KA ۇa JXW9MJ$u{wIZ-;UK Z=iNrɵh˲GBŠ !jM6at.y$)Zo-N]7 B/[{mԻ..ܥ_:>lE\|>kux4wrZy#K!.vaO^L#4&őԥYUPd얕ξԗ&1q&xN-u@4H੏`vt=s1֭>=PȺǪ0OJzϣȞ^-H}X Bs;;ucpHrۤlk[tI{?t8E7N58ϒTEQJ;N-`p^;>1,>gj:`T⾼K\r:RG|Ǻ"LNo9+-Q)eAMGfr+6yi$SJaKur-:vDR󾻰9\[n2,N7LrtSrm fR J G^+4+X%\-kRu^٩c2k '唀vVꋛ%C,#K.?FGOK!0IVpl-$yFj9997;`[׾QwhmCD7S6}6K ywJi$KƺeX 99g@n$}?f,Ztu * pKS~y P .Hg&VBZ_%׿|} J~iDPXgׂke霸7@n鱌Bb7'~"v4,'2SRw?  nED+sLY؃rٜ;VO@DR!a੕\z\j߉ݘc-k{""rRBD)?:*oo /tX _/CWVT_}i~L:?TvIG*jYND+|ȉGY{ <_Gƈ P hI-g#ҋR8\'/#XyWLa5@t,ClNNͼKV {+(iX ' BOb}^&+cr+.Badg*VPX߆LzmZVA)9rEV_W_ b/Kz?Ե K.,3{0Zϗxyj{MgKuX\H.-E3_xA=ݶL '8jrbMGj7r3-Vr+َ b+_)?4Yx1H&#Ts"|:VY7R.bK2yIq>u0jUmlM G;HcIe\6IRM߲\E8`Uf '(g!p~pxMU JpW%M\*Hs׉(ILN03ޓZIwɭpZm)$3B=SZNSIpz9HL`zGS/6B9"+wL|Cj);29v$GQ` 48{jaϣ%r`;|#V'8%rD>JBj%HC$~/;f mD5#ւ5ɋ NjG8ұ9N;:u}$jx;3B[9yI|U: (%兒*{y$\.|y2zZ@|e^!(%z90F'P9 hIKHf!^RW;Jr)^'S n$;`б Y,bJ,:+mTž3WfmGklhx P aZ4Z bPFRl@D+X:.Rctou{BW/gZyJ=:)*GX#veoҢIl$Hz Pe@DuPp;J3}<ԍ OV>J&YfbYARgDh.u huis̡IE~N A_D⨖HÉ?~"^YxaWto2[$'Ry+&o?D=_df[«ÞbgnSz^vzh{Yc8-::ŃܘdR7;'7t' y9Vz[o9qHqY^B~ ۼڤzF:)̫K!'t[79%:1@[;⁅|bKG䅵PkYİrF$4DS!UJLzy/EJT>`#"Qg fƼ)(݆F+b%dOGRuR܁`u?BrfL4Rߨc4BR@ҝ>o@,QA H\vV< r!7忝vEREB')OݕnIU'{RQǫlrK"W-+ʗi$R*t?(Ũ捑ǚ׉{4D$~!Ǟr|asnɑ|/Ђ,fy5G0UtԸr HiS-(Yvo3i)_ׅj{vnc)Ejy3EeO97C[[ü@j51 -Z-D#++P} _ՄVΥlxSh,? %ڜ}0''>Uq,-WbM$p#)]¢ :J* o?RDRpx:S0RAx#\wsr-?r6L4DyS\b#it{upHqtLwQb;i'Scvqv➘ &ɺ4p38 ϓM`xtK م)ʫ]ؓs~Xy}E(H9taGS5!ɀ|r77c @-s)T}T  Uǟ7CY7ڱI0̙O12PYחZ5n_<~I8{Ԩe=??ZA0sMtT~'8Pz_:WL` qP«S|- W͎.y.0\Z)Yask VBVO'%"ͪİNNR.GR nÐۈKvEJ`@r1f6*W)u6/听"P;gX /{3 8:pV VP!(e9,?I15[&?R'""PWVK&7F~l~Ytڂ5%'&|}\H +'ɗHxZ7aN +>IK , (IVc鞳~Ќ DaVoW8MLzXhDStR_/KMKӁ$tQOHvP1!ǪelsM( G=n`@ːqv{zCR5Hwz@!RQrS9PXLJXvq*Ԝlu}GaW܊CSR+a{ W$gFB7/ADZLy G=#:n}QײoՏE?)&\j}`JVuK'8Wsl6l(ÿtpQt+n5!t1%ݺ?6$,veA*[@-O7RJcPq9>Pq:ѩ3H^ܔQ(pD`>gZ1C+1 ,]%.$et헔$H̬`Lu=J:w/jZnÑG@QBNWG}5 ,ߋ.S( 7:R?Q2fDbҐ(1p "5@=ݭ\@ݝtא۾d۸,o$-nu!1ۛxSJj, ۯM/,2zohH|&ڊËsw*y^}K;N̰tҏޠdGx=={$V?E2׺no#$FXU1=lb)zӔfzB;jxNk%劭9&Irn+DirOI0pH% ~OR4Fl;P(N,caožAgbH=-ii2dih /hS,cIݜ`0FT0emR$PvR;leb@[Q6̹ >v}Von^׿8!Qz~׽AhN  tqd p4W( ;.z HK"KAZtR jfj.Yj ZDi rʹ(3hme~iܐ!E {r-i(lӣFw1l^ (r^ A1`̂Lhy\z,U&JI8Rh^vItޖ$骓u4tL8Q\z%_T.8HKiu z"Vi9]}[oJk;1S'RtDCZM"~#U+m^RYTTɡ9dC3(&X.HϽGv6@z4AE ?O#T,,tړڃӯP'Y*C_ms~E,)vzY4ᢛ׾F}Q+A F\/YD%EΦ^1ʊ0,oPpyd|^2tRqy N+<[j38`W@sT;U_wPmg}+jU)@ZT;bJ~i\JȠlX*I[.t:J%vI;KdLEMa"D\gg=MGrU/^r#uxJ;$Br@/mSjqHǁ94TݼXh4kE!w9Cj30qh z 3PL~4Gi[D@{'(7&Z[w8!j)!@ E֬ZRh0,O> մ6SNou dih\p-iPيUq9\"hmUu]͋KvHn|HpK;~EkG}Ouk\tO+ xSΥ>6zRIb4;=,\+$ivʁNގ4V,[On[LkZdI:n..HkwT@66.Ux9SOe1etf#Ci.R(^cuϦTUxo! r~;EȽN+.l_歒L]}KB-](d uoj6 6IsU ĺ7PT-$z`H ոx.-I%`0`OlqĀV~ʺiz&2U VjTe58< UhTGjzx&0@h%k1K|3uyD[ b/iLF!^?f`V+7L%xLfDW2zQgzΡkY.$*w!_)awHlYqzxD㶛 (yNˎ{8k%]]\eP!:u!ݩ: 6#%̿7. ے}95kSa|Qh[j7;?&rb|[ZG]:}Qy5 u?Ew*WoLr`EL;mlh$ M[ )V8X)聬)$z}K) ;+{ga?St yhp''HLؾhqpb|ŷɅ/d6j1ٹ wr54)r%)=p MٯP) 3v>i۰\'Ar3/~c3es(O7^>W<˿sz;9ї>PF6\{]MR Hb2tgeech , CNY}{"Fx F n Sӿn{)}o^Ï/)Hl,ˢeԋcD6?(̢Û]MMQ"ٹ`Zwչ8@ۙvK^8@ k`5^ع,h~tzg$!JN\Z4lG_]/sF΂cR*+G7:jQU |xbkS &e2%`+Sn 3z$ qH$P6PIߡO߉{e:| R}1oQ* ~C!BKաʑ䤮9Mr} $u$y$EHa8e3gxcӚL(hSKC{^8ۥ2 ?S(kQIE,ޞd;H/CF\^wi('Ʀ;/= JC82EFwOҼ&Ɵ9)M:=>4̓ LE;j{ \AG%Johk2DY[L23pЀ5A[<'*fPB +Y/HmFA-!Q#g"hy/v@#?u2ro)uӭW&G̎eev+vaT$.zx@8!<3$mlk;@VObqVT.qODi0L6<}KF˴#h0Q>9piz!0Dhj/ִ[9a | zj݀^G4b r6S``r. 7BjMPE̻ޖ7Lfa6xI5;L=u[n}>T(@#4Šl}EꪢǏS~A"$ 1JBYEo-?k<{{rpTD3,~~uvu#/rQz6e0|2)M(̽!?sm`Ly3*X^tyPͼN0|0&3|UPnx\-D 4$!ݸTjpEF替%KG7eU{v`~'xCr3- ϬDk0P9GPw+:y)#JM郹6W=|8q'0ZiatcLj3J$zOܠF B0Q3mHuɆ6{kٙ=e(.8ԣ.3w\kG9߆t{iFqnvIČ! m A98t68ģk%XPfrʚ[F`Xg:}P;R43Ҩϐ"*i*')K MeH{d4U߯S0I@J'2-b 8;SWǸ2=ֳEtʨ1l3%' W[lrm緲Jhcg=LW9s^Ng\Ե›7@w{7Ic3e@_'4hz@f8[W&1^q|FK+Ы:O^&{C/<=U=qA(t}"''r⿽ @ũ'fB0PNB\i$ozuYs㯦!Ē hJZ"G h{ؼXPVJVá/#DCb[zw n`.0$bMl,m.rCfx}x2 ϲO;$\7+懾QsAtʹB0EIS8X"ҩo݌.'q*!ERBtD\.Hi!x`r,ITǏqkAt,n+O3K-vue:~<>}Dg֕22D}*} Um]M e^(y֏?r2ThZl=K"vh/1I֟xj@Ҹ03 E_.K*ga/yCx/x3/,jwϝ ΠD scq.hܦmTyw®eD>{":\,!kHT߮{YdCWQ”}`(젌DKkuUos3@l:;:A?pWFxC76j۩N ǂqk@|`\5ozi x-HF \nw0qhgah`v"X_Oo))ψ1Sڰ6TBT.E&2LJ LR&6Ylg7]i |EBr@K';FXifIBʯ pT2&Iks*CI< Jd"'svsFHITlћ%f6pɐ!+ :Zm΃g|!n෵ 4IւɉDW \Wr݌vOFؖN8$m^brCކQ!WLb|\85aڸ B>2\SܶNg_;Jʄ/P8D|h ]3tI [JX$*Ȱz]pNgfn_UJ䄂o߬`ȯ{|TRIxY@sw?et.߄bOwF jQO'mi\Ę6NSiLk#}&dJ2{`!x}*E@2$_ -=vc̽n1kŢ*)&]hˋk:^G(ItbUN\[@y#m)Li0ftznvSq AQ ׾097v2rI: %ci sa8[ESaS1D,N ML bs6}I}^Zg- ,h{C&ϸJFD2!GIQg`Af܆j՛NS;9wh%6 S~-a `܆ȸ2܏*8!?*Ps%G/AvHaBM_-,+/'{G|F+&KjM&+nH^: Ηl#Dk%'G͖qÍ'E΢^{g*)XPSٝӦQZqG= $A2{N-BLO&diR'L8qAETZMn)p2%}!{ 8wGfrJ'E>J- mʜΙx>'RSm$] DB6KtەFJuA=JyoҩW(u "-et}*h6' qEd67 hP5U'hM0X7ͰIC.x`ctC:VFR֓L:mR:wEAk b.ex>-S, )L5jĕM?:Eoʺu(@34\&Iey { #] OҕhhsƙRؾ?w _A^l$ƭ$͋5`=9h HS3ZM~- pM=ERIU}@KȜ<%Tzgl|e). ncl0x0>턩+,YY## >qcB񺸾K0eF\?4Zѽ 3G:YWJzuQu<ڣd3qqήmhsk!>vz\@x5< l ABŧY񉑶n |)0ratʹxg2ɉo嘻*LƩJu0ԣ#R?]y+j0nKW\&ejqKH(:U j-]HjW@i֓tB1 񘿖:=Mbk6{1 6vu6n lj%c/VVS. ;62H7Fz`.i8OAb|\M7YiK(LoyPWZKdT-#thI?wɢݼy'W)ChҹSe:vY! BD Q`Wݛn }a 3{Ͷ^1)r4+:^ise}u"ZQs v(4gWm91D nY9N?±mQ1YBs*4j֠%d¬]_S!S~wxqVτtu#b"A ;$@P2pvF8§ Ii0 5k*^5uҌt1EI"w3X쉄P3Y='D gq͎2MMyM1g  L<ӵ`-bJ.H#ui^ k6)#; %W4ɤ]N7:!A23mV-Q$ ETS/^sxXwq=(%G5Zg4:Q0v?3AIm;R)tJKoWsij7@/JV%KzAD-AC[]5lH@NWxz0|'+npr!]M;[zbwNZvI $к#$wFhg)<>I@7K{O!ʂO?6zG׺UTH8}ew*%ҳ{ŭjk D"L^ pq̽ PcĬTJV7_m$8y $M9>} # a}f1zg'S)Jack&8NԸ \lHz(e;<݃gj:0}V'[f1MeM֐b&CVFձTWV:}ܴjJ;F.Eyj&H@P/N[$is:/wSØ% ;eHk{/4_O?{oIA#/1PQ%iѷH$ru]Ƨ`U~aWNj9q wZF/\tގx 4c&HSz)4Bl^GS| 72CUi8 < 7YAP859{)\^f0qu\$$᙭=P,=Y9gb,kx zuu:&w )>v=NH$*7\JVWZ^B Ϭ'!hEҥ7KHyN(Jmg8>]¹a\k[AfDo KxI{ÂWsc2YI=TL~t67HzG=LC5 0/ 1z RM7-=IF+Bt}zOߛyg3YUI]1FHdߺ`Z6&Ұ woWφ@_gl <#[3jAB|)uaÎq6dfM,>f\ۈi/m n?|0i-V Z ͸5GҲ>6~*Wݢf~+ahݚVU&0zmVu[ñu*DyE$7_&*I͗o:#%f{ß|;$izͿ<ٳ {CƲvA;Jr):YVXE֌xtFًr@u̶֭ޠĦ T1rz`M7Ok݇3gIh RqP6gBtZo,®ЯwsxO;sM3NItMyΏ{צ$3+>`|dwaEu69RkI7åc)D.<N")U7O7D,~,T2ѻݫ덉ts%IP4DGH'`+^^qZ{NkۓCl;D&U&Lo]@;A8¡^==vx Vh HU.vG\R;hkW$4,}zvZ)weWۄ-v6ODӼŌ_L[--J$o{#A/9z,SJ _WZ%VOEwqmi \Vv#pO+f{HT(`=Q$!jwr8J> )韎b:L&U<O֔StYKzgQrêx#X@`;,dd7F9L[A#u0ݻkZN|mp5BrxM:- 初X.VKyd%K 3^dy6|`t$gizvuv2D{Y3iSgˉcV'Iyy ng 3/*OM]s;gX-M0rA]{׬{~;mԵxOOln#X@K5Rjc~ {vpŠyx?4H h+a7Δ] qf/~ExIw>7o׾qYE UE_|T+007&f0Ǵ0=!ۖmfVl: ֚L5`/ n!๏Ɵ9yu*TOA$Mm IA[WV~NAx&$EjXj5Q|O1#~F(5hkU[J.+m"ڄv7;&zs4ɘ!c !1< $ qjt4.f ~꠯Z=>O*oǭgB!n}Cl,?vVSe?L3HXK[JT$Q@(@.-C3i('dV6)DA=^;A?J8tI ~qo,<'Gr.PP6ˋ1SDc$CҔը55tAQeK;]5P޳f[[uuH҉ne:>n7#EC5ܬ&&G& = ikݲ >51FiF; dkK~!lj36Qss2SQXq5"mzp5O,B X܏39_G Pw_EV7=2csҾ[pFo'G3YLI?mTBrQ .ʍ6R'F.Mڔj!!5%o?hQg q㋊0͵X~2 {0L jo9ٖ=Nw\g'b/ \62a܎~>Ō֪lg$ Ծ@9w =UwTs9_}^g0LFPΣC)OP 9j'duIRPd%ӞB2$hR!lDf Eݍ}~tG՗b]L(3)>xĵƍ5^B{PO .'n_@Tb&hcTw*{+V!ۦL.B)mSC*ӡ\7Jo `!K5+J%WUWҦ ?!~TJ)4p1b*of#itl:$Wߪ3ZW6T+OfY_rG/dSme?L.T Ddou^6xađx-3 Ʈk\jS}:>1.snm8 ;x9p ;1]ju흫fNQBCe"]ʽ']*&B׻fV=ni{*^wDW;:2EqЏWj$M_d{8#HTG) b֋#uC}w>gwJ3jZe3SS<ӒaݽuӫδηFP8Γ {xWg䈖/z?WjSQCE9nB[B;$xz%.[pH Z37_V<\IrB1]'(H;K}Cֻ%K$+]K1r6=@3Upt G~:L4 q8n r4eep:KrVf;26%:KTеgg[a !ThYE'>ǔ:S\ ^ j 8W= Ji[KfVmEklYxr"h~ő{\FjYO|h^ 3m5V2[ѩ)ۯR$>{ޯ.u* MuK^SnE;n|:Cj T4^v=ѠC{AKoC[{؇>$簞{?A"YrI#ZO 2g} e ;OY-ݥUy7qkS?vئ-{){"H8n5CBw]y0{قkc4M'0աh<ƭzn[7;¤ P_7*wq)7 Xh 1݆h{C~U+W`vGW bI^Ò4jB355٦Ѣ̇T䪕V)3yP8f\t3O;sW݅O1B]Hqa2niN.Mt+ 4{ĜHQ^lk*_K؍z TS6|uqr>͙6aO9|&~| G+O`nà<oF Ϸ("+Oმ=;N^/x+U/#qn.W6ӯ`Z?.=pp 73Q<x&G>&`k%EW391Ā~$[sĹSS;+{%`:ViS\3;#wX P(XrЄf[ ;oj%a} B9m4Q-:_ Zh" ̰Tʊ0 6,]w걪e(Nô zVc$Saᾗe6BIHL 89sGYbhɮ_ bm븾|E~ 9mSqƻslIpWY%>H28o%ɷ82^Gqn]Qh!0v i&2T61(Vr6>}Ѝ|g~+q%athn᪸s'P'K?e)2<@d)y@yWѢ- Cl,f &˭.ID[Rmi_}i*"};IFP_d(j߿Fi3 `ksfWR&PVB7)%檄Ho@>N$Ы1kJnk[rW'{sV֛VA0@{bM=;s`Ow^!vSGCBBwa`oHM8 =ڥ}rq1Njݧ05 ˽ZZ6b蕾ŽF-%}|@zH}_ / l'sm.U YӝfԼ;dNZ+R.[!?wD|tMͬ$3=BPa%H)*sq q&lArA%d :ʚml.H=Y|O5 ӒV@э |iU)Hg?&2n K9e*"ele lraX7J;Kh=}L]OY2 XGV R-P=n=La\G>zG\a[ѽgI/IHe%HI"YֳOVe' gkA:g:ЭaNnq:JƫV%MyMEV5O>)cʎ29&:a ,CE3+g >D?*o=,B)Rq"*ak7pϽGN"墶_'B>,22kI=զf쉓s* pHtO/҂,19՝GàvcvC:m58wț~zpOfj侻Ϡ սr59e]eZs|L@BA6>41/{Eϴsɹ H# u>zoR92Z)<#֤~N"WOݍ*:#wp]Ր԰m߇$`=Q:qAY 9dwGG'O UEGETCCQ# ,09akC SUK;d>5eb;+z&S\t LfAa+q]q;C2< vVشlfnݚR;̚H_?%Ju5^WxǛ"f#ћ鲡~֓` ґJSu"8 ٍ`$ⷃ;kTrS<`SKEjAJ]Bfj4vh+;8hCpfg']c gQӌ\62_}菖Lmz#>,AzȽϷK=vD  5}Ƹ5mCSB2InQiZrW$h7UW.-P|k,փ[Vu/`>xmUh> ^լ+W`L|ސFmnݤ敠"򤩼o$ަׯGɦO &cJL-Ԫ9,+KFL#GKBdy[F;yD>B|MdwRo1^ZY(Gc(iYI3!5cŖXoB#P1/S(EtjIܸz#\j$PAveAaކ4CmhG!z3ŴRr1h'u8>A;4 >Oฤ2B~2*D/8B\G 1Fơ;#11ْ7߯fu݅KN B/:RKktt\Իٞ.LW=luf!Y+ ;[mV 83YQa_ ƥn_ {2熘"?!|JAbSUҳ]!oyE<Ш/{?] #rt*{dš,Q19#/|œznz(?ҧɛ470F:6 G7Lx!ǹ8ǩ)'%q@<żJB?)YZt+q!#0,AC)d7L[$xP^d9N҉xcobdp;'b"$*8ik6 '^ E_Ze^ $E3QPሓ'D7 =3,X(m@cذJ] =}.c$a2˳ Ӂua?s}wB ʶxa`۠W{nA>kkwNE۸gHhmxeWf D#j/It&5E%ヸY,dgkj$=7,W i\RENqqgL 9xէzX{5%]3mhi6㥊- 5ڤ& 7М)Mgl.Qu\[.ؕ;h?ȬmiupoZr,DKft[J_t=0F^fPADq2G JBeG}g=Z!oz ?wPgy 1/% vԹ; Qk%hp;vzu9J7C?5< K8.2A\If5|^#eVO7Dsy^y?Sb^!y(YrJW l 8T)>(Bdb wPφ;A/ D׳m>LnE!LB;>/'d>F#M>V{FkqOwt%Pg/T0'*3. |Xw* İHJ̤'^eu;J'.zK\jh4c^sՐ ֜M0(w:)q^zcS!EEAi=/3 Qeč:M {](c݆,׸ )O뭅+ЩݍsZ,$r8 07B-;IA$oz>&z+7TAkA!ja'霬_P`ȍRX9f&L(mAc3c_}+`׀G OFWK[g Shj^P6OIͯY rEfUtQYWon'>ia;/VnxzilLP^ERhbUK#~uBtl g-@^_Azk;A7R4cMl^OJV8]H8jE/'d^o%vւ}R0MC3ByR hJ0XQӟ{tLsֹ LZ296ڽhPyc7C-jG}mݟsk39m#b&T\57@t& KTj Z~+;iFCJ񽽹 8-z{q]6aWO|x>7˕-*CgL쩔VC5 RjM2x-+mɶG ~Rj|w^jޓ%!݉ы2$i8 Z<3w=Lvw$3En\!8NޣnGy2tr؛y:9>qMwt~>1IGa1#֞dgk4٬v[1y6%5{A35d$n ؕEԼ0%d J.˃h;+"z)QU| z)GDZ{oԖV+T4\L:|1q@Yu>Ab:e18بc \߫0{e!IF YԭS_yF՞p0ʁfG48eJ&`½"~yKw?Y7bɣWFo[.zBv0cU*bSඤycӀO*3CK8O{wbbpw :i ڟ=xh5KΉn\ʁh?ٽt\f.O;:^oz,ȳG)8U -P5L•@O{ 6]G8MZxAiTFrLCwʳ%tO*yҵt:$mFX8ͬ1|7A#^,,V~Xߋ ;[f$"+/t6 v5ǀ}2(3;s&%w WB{Eh,NȽSNЧ_AH2:gy[1g¹IWfa/M3_yRl0:?ClOP7<Ff*=^=}mO%q%p~:S Ԍ;ta$Wnt SN )F!N,cӦֺ($!}ZLj%)-.vE2Y-}{6vr|6nil-2StZ +Ap"U1[>aYOˋn0 |tSR^Z1~S12O&SKbRie{k67 uNk,KFƦ+`} {{n1o{WlåsfkfTTN ?'Gt!/-6po)u ۿk~@3ҸC 劓@x9$lsycS(DMҁr: {~22$FX;(݈V9F:V`ZbOj.=YiFieC8<S5bwK5x?!5nw%$>uj= IɈͮ9 _=ׂ2YFIH3?л>UyXڄgXC {<; eЕ00AWiUVKqrT¢e'ح4=CDQ,`I% cqIw˙:3TXEE֏4[{DX.y\Tq%33 pԡX1\ZQe`ⴔJ3jyuoq9K(Z ④GۑLH:Yd Yit/<}~^CAT3+$ê\nY^j>r.C::DK\zӎ&=~)COKa12Njz =".]xkqwHypU6")#/xj̉VKyRK C0Lټ'1 c҄GT~:Ƒ\Z-KTf`B{O[Bд~'ۢcT#ٿ˯wLOwSV;4z9SA%4 9uETvb;8-ȬQZ=ms: E0 @jfwFx9,d{C/`dK5876gbOL8A0W.k3_3PڴCs3JN[|ڴĚRz #悥Mj.̔H14!kWCgK:ue|tnh/\½ ϭtohwnCZcS2E1Io"vםc1Zjሰ偛?b c țY[rgRڱ *0 aC$GL{O]zU'( =2ȂgXزr^!K ԒyTQGavU S] E/6ۏo%{o9be<q\`@?ѮG[>|۱Sm&_cBhYT 8 ڮKܖAi/58Nt aDΗJ0-ͽ9A*N>JB"3t"G:Fafgg_T&s;RU+ZP!h0Ow>Y,ov%/e9jY` zĹmn"5.qKra&l˹h)0%%0h.IKC_@rӚ_"GYT kW!fgd^ݻW-i F_*# ЋešGipL@ZfwYPH&(wF};FAOI^C;0v_Y곚CKEp;moy7&*Ph ?q$Dew~7dP&۞A56bVOS=V̽ltVvF'Zxp6e0dva{q0sK1!KO[HmZ\eYQO 8 :\[)2SUweQș]gգӷdߏb?Jt99ʼnr ojS2m$&ڭ8ۈeu(b}L"yO֭1`/E9#?Y$Zޱ-zdqY:Us׈0vr'5w8NwsEէag>)3Jt2:p'San;aGa@{דreRhtaKsȈC>Ƭ'; R((H +ń {:|AZ"Ǟ 8i|.?qSQBfj~p+jM-Խ[)~3huViʢ/{d5ۛtUqXߋ&մT:thoEm%c0h~~!;'Y}JuTw R}6d咫GTdz4t8bz>M%,PI B*2kqdfi]s闭Ergz,/ i3=H??~0zڔu ;{chL-،#ma%зx s7%,f\XRH 5Mʦ_Df .vxၬEQc&76>E48_z/@pk:PXΈyM u"oRS̨2 {ݗGXl>;f+S2/%LΠ |=^لT'UMUL>G復kU Ap;$='٨?W!=M2"@ljk%R`0gy?y+t㍁zQ9䏸ͬՑokM$ӄڙq 6D&?ೞkperW,Y>mΎkt)> t{n+kջTz6 ԉ]Q&$&LA ttI83mSGCsV=fP򇾻bPX@G2 \Wp~{={pF9x#a#=^ii̻hcvH xq YƸ avnG׬dߩÿv;kMmjD,8lU[(C⺈7_1˶4>jum<+b#7YٴCfbB5$Z[FS}TpY:dF(y ?ןv(VK;uuwX >NkAr\a#3ih04B->җRe\;3z_U;S)^~bivzwS'en0T2Jk;Lq+.\VAGUZg<6A0 H)R3&I3#쬳tW/<>)){HVqG0'һm؇}e3xч#{FP +=li=(jrr {I + baH"?F?Wx1r,B_m9j[?uu}.J/He@sw?<6U + 2~GhŐʈt[Ͱv'zb9kڥyzvZ!oRn4nɝ6dꎬZġJ?;@z>PŪ1 ]]kFi>*IX6nB7Ok.elW E$%gcz\TfA ]tv>ֲKiB`{h 7MQ Z\ݯ^[p=] ] Gy0b"\jn]8K5NJx#\~;ma%@<Ɩ6Z~[wz%g}jvzmRO9z[S2B V#R(2^N)ŔhC`3Lu(T BtS|c+e+u:zVKo^ DBR*Q57",قFz1Ktd <9|GǨY>01Ȉ䤆OFF#\_Z} Z탰XBPW>qǜMysxX2X|y7qaNƶK=iح!1C;4ͽs vͤkW_6յύ6uw2VOk52%yɵvs*)vZG>A4;m!A ;ħq}nkt n^W-D'^LZP$_S"ֺjṚ#MiU7伭)p:2-@(%>fG0JMR&ў4 M{A]%1`iGּi d];c&a7"e`]h$C?ƟcH]pR7˩7xls{R1I>mĻDR' U3qЦB-iҦX=Zޖ*. hIG߫/~Ἧᙜ|g@yCB4>_-=w/3Jcߥ8O /|tVhVò4dٽ\u8A h^V]m cl"\ZmOAޥYn6,Ӫ7ӐG="*heyѐ]PҔYwOx+䕭}=x eמ *@Q}<Sjy}eI^P"k/ Bפθ^ynrS5K")@7YJ4+k)FsmcnzJF^\?׽Åb*)C ; >/0 \;謦Aý_pam|scМw=qIjOCyOTahjLroROUx - j2,`bilNc[;-` g5I+/0Y.'T4anP/E f*#:B .WƇ| ,D$$;ɘDXdAܡ'n̰=^*f<}T~˴ĩ|#ƖPmZ:<+DA~gF):Jfy@Js+xӦfiw!>n@'[:ׇonZɋ~S\iaGʐ^%X~B04>G-E.mc]+%;n=6o_o]sE=Y},#IVBu,mY}$ޚ:|ݗn'H!cǨMe}K%A6o㚭AռO z s#Y'L o̡vRS 9'hdSqVĎ&Wt:z-t %Tb$hs[\}Sziеz݆ܞۓHO -cFyf@5ZW4۠B+Y=:?"m" RzQyR5W#uZ{K)ց;`jtۍFabP3[p&M=Fxa/O*M!DYwRCQ  GѬL7=xׅҶ~0{]+AT ?WD8X kNXkW{a,rT {tA@e0a3)M \_"?4Ӵ|NtfkyÎr7bMcy:`ޅ7-g % ݙ*+)Y,>5/t(Unj }OB]|7қf5;/\ec<6%>iYu)TlVDE(!_T]tѴ.]~/Kފ6߼f~ڥWVSI(C1)Ӯv9{[4-f>Rt✣zlNw4,oWuS?foxlk6U_[]WۤKiJ5&& C lE"L'6mbFvý[mWٛ::ȕJcs~)X A\V|!L]2tMS<4߽H,M d2j< \+=˚Xuh"SlizBmL\v8)(-ۥZmBQ%|zqZfgA{XƗ9 >\vԏmXGxLgr4B^b A‘+{2P_6b|K{v״}wV-)pn_Fw++׽*̠GX7D>:$+T5rސZi'vG[(p>)i(!VzO[kWn{l8MH13sHںڊfş)hELEd .ȧL@"| w(hS|jvl/K}ʖzHDNp _L i&+Xդ0IϒCYj\"pùr"0xa%&@X1X߽.to`)OJcE*hWDG,)^Yq™~!M "[ѾE ́Q?WQMS~}@GYS8;(/Xm#eFV%6' ݘ_#sԆ^ڣ FfL~i_;@ah2& bCGhٝ0">)Ōz[+F1RɈN觬[BH-GS0EK6|7Au&X>b6Ml)Veb)eF{ ZW3  ѤxS."a>)xӛ/tKDA8et%]|fs7y69g K3Mb2T{  [iXMT]9K5Ʊ丹NVq~u#_JGOx3 >&X"1ZiN s`gGo><`{oMiGߊ IbC_ ]f m saڦz3$Tp ?@O!jîGn+4tf/#*;8a%Ȱ29nu9o0v^Ӈf>*Ƹ7k_P2Уӡ#?܎>t_R%CRP!baiFx(u!*!UfhK魚6Ԡ%܋ W2:mAxZ@)0g06:bfmN8 6kz٧GZ>a.J bA>F yl@?Aa-XAB yr?hCFi{7Q$Ȍ '1rٞVg0Hv&; O402X v,uϙώ<dm)ãht< GYńMok@d]ZrҊnAaasB526+.jӬD9V<::Z$Ȝx~Cӟˣ}r/߂9fO]zOBv\P~  ;}/o N'f F|ӛ.+ÒCM0oٙ3ֵXo㝑\egy<Ҵan{O[Juuw-ˆ6)Pqc%ߩ"6xa7"5A[{ު+4J|v$==?]y/2b02@Y؈TC 6ʓ>9,KkAz٦M@K*ܨ_hJs; y( U:[muk']f0`kH!=YFz?j{aW7'UyYRv'ݤ͟- Fڳw[&a2DkM'b js`3blO7#O5%)>w8intX~c%pdOC䫺lO= K7*#C/pRRϏN8WXaKEM\qTyhqL;,նzI ?-jWAe%+Fᕻil{!ea[*tas 'RQE53uXYet^%e^_"COI4a1xiEMwYM[il_}++` I$=_W' kgNj'TZ^rpk0SX1{[n 馄ռnAۍ75x&VJQnfP`f8^#&`ިE _v%]5DiW|^Xm*v(r*׸T'}|{VLEzSc:^uNY>*>Iwg56+56wLw_O~a+bmr{AR![H'9C e>cҽ5'wKozAyi=YR1y&+-'1tͻd(+'cibDSbeƵoȊЊ"+S;ODGC `:V"㸔*yfN1Dd<Э+7_svB#v7hMM6y-\bcӮv1aӫ{K?{⛎ObdhcYa n2|^jL[>r6qa@u;$^ yGK! mƛ\J!/7YgI3ӎi,5WSvR ^fɩ]#e? ~otU: i)ǡQJYug(Qu=w/W6Բ@OM5IőUzZ`?-ukR$eV֕xF3Bʘ,+>nO&5  -nBZ o;j2l\IX{g;)vgiemG0Ҹz;bb֮tz Kt8v<:jIO+O*O)OGh݌ZMD~Pxv{.K_CM:S/3rbmCg J<RKNg8 U->  *!&]?zm|?z:, wkhac`'E؍]* zq:c5+o\X%K'MI 3 az4t1țDh~x.8-.\i([d3jљYݢ-:@w8mޞc~a*eŏKFmRS!Id+2 nջ:†gc%:7p'(z'|1:>.BǫkAs/F]wqp xxFWO_]Pvu$nVaVr峽̡XK#gcs^0I!)`)znEȫ>ݖuf" Ns w`_aד>Mql@oP7U&+*:ܕ&Ra?r8?}{u5d-Њ߲U$Da# 4f&eFj0젞E{C@ݸ~iװ v^vIgq zdИGзK:0/y=+oxzϥPEF>SgƤPmJQq- bĸFb '%'00BZKG,츺ux{@NPP#sZHs-gCƧ5L,o~<(F0o( CrqCكoo 螌̂M"^5^k {pC*qvH{ @*F>F%]/[U}6AUNB[ɛ l (8}yA53>&2><^*!q >~S=%3fS)6Pf@t]VP\oRh[~?85%J2\<$$/f#o6`$4z yQ~ir#?7[6V^U.IgF >*R#!тykuiW;l5.;q|篌IҢWD0/Ҋ=,5==*uW 'AbܑN{V#X[tLRs?Ι%*^~vJD[a)Ta+_t0">ڲZ_}+H20J~ǂrM#'\Hߨ0F{Ơ uө5=+ BO>,[I_}OkCJ35wbU( /J} Y"=7EA-j~n] :j,v1;+t,BJ(R-W]6[9v{Tk'GT^ .F_^U ~q k3>> stream xugP$ioQ.K23/!g$!BIBhHC$DB2BeD"#U3|=zˏ[iK/lM7F1W=9Oi-}B>=z'V&75SJ+i^Es@³xSD,닥8=KXlEVX8`P9֘e#0}$a(Lc1i<&c'cQ1 1l: fal<y猾+z.DE|1S77\];:/ùq t3> stream x͝K+;e.s#1$C(]^O8CL#m.R,PHgs6><=?煟 ~ww}<_W_b.x'*gc9A:8 l>Y5Rr3(%4hpNmժѺUݪgMD/HxFT+~S8z;G:ZEV=C+20X + 0:-ԅ,˛'( )Ze#t}ѹ8krhU.nN8=NQ7TP0 zS9V%pt 2pVLtHn=S>|~V:I\R3bHS,&ɗѲG)la8!SĕT>˦i(eyУug~_Sbo"GΓUVEaw4CElv=wX=?NYi`CJ4/6d[hIM%T2V: izL# !`T9QA嘓㳲S/A0s(c%q\Uw֒,S*Y`jW/ 'gtI>^}F>D tZU9 $ ef'lKEG6Y/PG, ϭAE;y`J#Ueܚw'uI 7lPٙrnݨ'+$@,9ΟYH  ux ש807|'lpQW?w))1 *Ω:]'ܭd-" %; 6E4>DT t;+z 64tUTѵg-'T!|YqpA_gT+MU&:f.#G_OQ(t%NVJti @iﰺn+oسdZ?D']6& ЛNnkz3~\ |pĖɴW *K*k KBr.'x9ѳma Tra\W(fI\Z)s&]4pzȿe:ۉY8H7gǼU,`ރkQ dhLf,@UwRry&MT fu`#GP{+ʮ `IYW@% ъurxp{RiQ;1%Tzl'brM8%b)DqNPp56c&,}hKuܴUnn5M*l c!-,W2Fl6͕)0'ľ$ NZQАмm#,6vΜ>mWK]:<Ԅ2 PKt1kX" D\ l7`dc0_>v0;UJb9*apHWaNNs )W [EPE1aKX"䮏f < ha?`x%pheN99U=C?N;KWMBnΰju_U 2VC~iA[3!g/W@_DVIAWڄ O2vhɎvI@t(3u)Xhϡe#O*9_fqm7%Y cN0'Uk]lȟ4x>=;`zѼ?e/&}UPְj>A0e$,4D9 M]m$Z:~>& u 5:µF J{|<^<if>q"y04Ǭ16S?%H+,5u7ge \ԌZ5O2 *6lRKxHs䖣xڏQc2.!]+^t˜*bu' WBfʱsq5Lnkh<  y ,~*6ڈ|Tـ`v e^qT7J =8w3]y/*G@F6y!ϰ v**w_*>3Jś|=L BVܵ0L|#؂_ܵUd%xiAs.6k$$j%hs)‰rh++W 3דiwJOkS-O\,fi zLm%JxOlÃVxmi*hop0h*MZZtվ KJe&P(<&J[|lgûX.0v;TVuٲ䱖:5 zxϨTcef̨%h}h&f2E q<{ÈAW?+ vAf&Մz<*^_]j  P/z\̙y4a7LtNS?gRRԋE'Wzf߶ٌRrHN<}>{Q+7b WD{z*xh|`YC[LXhv 6mh[XMY5qp0$1n}P ["z7&=뇚B:̈́H:e "9@'TlCvOIW2D+QV>.4dgPBhj*NӸFyܔ:D,6baeĕ>:~VG18-7i8Q~I2- ^mUYH2<=h>(]v+uރ_[&s2hdJ)ky9,!4r7^0y9WR(~D!0H9T0\vb o z` 5n W9-),f,[E4O b^@蠘on4Xbr0z 3L۪Rm[A) juBpHh0ψ2VyW9*jf;* :;# H`kLx.>lښ +-ɲ~o.8G&`&nxv0>#4C_fZ& %Mhъ03NN1xifB.?b:_PAЈ_Do *ޟVV#6N u A4J#oir)9db<,!1H|;<~ʮE/Kc |ӯ.ޖtُP08o~71k6Fmͭ>RMyیxɘ ,IcE9dTo9VTg~5r%~edtl4" n/Q 篸pP|nӧC 9ʪI_ J$3 6p]#9qjNWcK:UrKno3,5j g {PύLͺk| HLdq97.RkhP)['w5tY ܥjQK}Ph@V3 >%1ncDW}WAHtJ>%rFu}[J T>9 FA0 s/ _MԵG#P>[o=W1> ;z5xyzϞ~Z_C}eblKƃ&~~n|ݶF?T<[U!8WצZD~ WyI@4 ffP7 <*?T=–A Wlٳ̵Ǖ _rxxEXȉ]`N]G$&E-SzGد3~/)Y+N۹@h2A_>՛G!rmUJS~ҭ ȉ6(䣝<88"mN5(rU(RO_m^⪐dǣ(H¤Q~~qJURɇk.( { Q.)վ {C6jROEu?A!LJy]/ɶR:bS{,BXE\eXV .Vpө~jF˽i"$٣@H GJYW$. e=I!z7 rF2x 8*L O,)ݓg6=ls-J-/4'K!C,Y%P@QWgls*x6%r~͓MnbAD)%!0SOFcUPX5jqQSOt>RhQ Zs9!N^{I%fSBnSS#]p}kcR2jvNrvZiKSR'z$_BQPݖJ3s,07/YmZ@g M0S1۰ZY~)j/yCCJdW`ihKyt`H324EhE#[[*!MV*n[tlKQDw43R:q~ڧFvcGi YMe wn y׶尊?\!GDia=N)u$ #"DH":6W6c잩J#: 2JNs=q4w4N''d&eq c>\2hxc '>&-pV!f|gqP}?ι Y=˦B?PBQz32v;~+|ebe?N"͏@V]s(!7-'S[4Ah/?z*'_.m_w"}=xT`!` GQ>;ǖq56r72f!KhQFҔ#V{SսX+/AEԨcB=YEgꬌȃJujG d V `iG׫}MYhg+NA ")pZqsKg #TK訅.aVڍO9WWS6>ꪗN{ ҆!?# _~3OaY Β4S32s!lܽS ق*F;qK%ǞEPC{nPrcDwV3C d_IQ O7A+SLo+\Se'B"[HA'0ߓ4a AcĘnVbƤtԨ>X{Qt.L/4ϯU+*fv_IyY;PY(b +ZT l4Lr=0{YWg^oH!2 (3kv]W!VGWE7FlF|u.6LQ xqlR]Ih 6k@eHtW(ٟfY+>7 5hҺ. &Td!յQlk o?628=h~ƕ*ŋ#WATj}IUؕEOjFz9L:^i6%KRY-WV)ZjU,J K"ID`>ΰW:͸z%TA*Õ;6K.$m ,+j 蛫J/aWH[AUyh&t 0aIBw@K.|&J2GZ#lDM:Yg⫽a*`W’ Xn[7`&r{UYe0KHFz~Xp/2RខVUvO {cj|-[C0ӿzb=q,wpdVE0!VjiB(zeH_ulZsikYt2W*BBvzSP25~kwN{3`o6ɦa8#+Qxם8=I WY ޶z98@ uޱӚzm4ōշ>¡e>^6"9 59Û5!C"uWDo q>ޖCuzڝZG! ĵu_WYWb_~V9)|m* _A0$}~4>xtN,VQ;)̲'V:ehiiy>D{əQYUvMN؟ɤd9;_X]I_<8 SVcXHuWrZ{%.VfpcQʎ.'fhs{kY)؆>l2Uf4agTM%v&,7x9#ꤻ )xA􃾿(uV|qžVa|1Y{#+9p ɒ(c=#2~YǿxWb7$BPe񉊿ھ$rό'eema&8N~PȆ8H=W; 1^;diMX)0v̥)MQǓ+ 9_m79fFͩm siNk!a_H*cRьW]h75O8{m2GӍʸt/;)Bz4ZXqPa>DRZg&@ M2[-r(E`}MASYvԹgx >D_lk ތ,YG_isMƴ(]@mqVX[KDV:l= ~ g褿P VW {˔W焲Ogs"ذo8XB/ Y,AvcuٿN ܚo؍rnvL竭@*1#]w{~f ʤv9DV?KTT[y+dUb|Cw06| E*0SM u6 GcNqRt."-Iut»+P}'VP0)} h&1Քö#!3۽x׾ig.9ld#VuCؔ#kHT6 bfH!'I|EO{a; LӨ)8&"M ؼ8j0׼9)*( v7F6Fང+5e_9gphP Ft #+$ [C۬kQ ]B膭y9 r ;g4]0&\vH!V)WjS\*&rʭR\Tݡ5%v+WSG6Tz+1eZ@Owj h^Fvelh0ѣҖyl@5>r F@) `Y+Mzzڜ)\^tb4ѲA geO3Ge= A"vkjA_x(܍MOt1Nw߰-6\}f]zHtL! j1,Di+)a߯ %3-Yص Y,B%6t(w{qRx~Wft|"Re@Zo_Je18n|Ip}y _BQrs>`0y*b"_?rFш6B~{_;0QSi(fE\WљKznU*PAJ&NޟX ggZ`A:J3sqTBcf*WC2 0APmfĎ$|6)Et)rcγIqs_PymSc|zPhR99TU\j 6b?xGJe5LGX#ΫόfG`;1+2)oLcR>o e)1sSњh~As8%r{W4|!TcTZ y>rR`B=r)6ټĊ5}Y _W7KSa\DZU'#̅Ue„H~G@>Lb,ABw#ǼʛqW=M >6b!btqUڻ~` |Op3mgq`n?9TU$%FR(ʇcCAjul ah!=ɰi:b&ܤZYEWW5x;I)NiZxV po.Q@{ ,\m #tDGj>۹f/杲}Pm6"'J>݀dOb%\$H6c3Up)4<:D]c13.!W蘁j4UBB5`umBf dOJ\ߔ< XcӜN?WJJ; rt TJɶIY`E*sBk& '>dz|(aq<)X,}@Ls]A=#2,>MaPf)KP1!*P\Rt3Rݪ)8g>vEP+HcPc10P *@w?*qWK=A:|[ b/X> sZ4 O4pS`xVY|>۩/<jJbg7%``9y`4S aTnm }O4+'C.FllkX+\"Aku00{q3],VNU%Wa,,Ȋ#[y?d6&_tAlv-Q W SH ݢ|h-jYk yg9#Y1p!r R+J7pQK.3@BaO4!dG,*4/Vt09b||>li8w) h j:̍Catkݜ=r]JּVs9d*"SJF?cs ` 13 Z5Z i<܆pZ@+@q<|\ \lRwQ {\9QMЯ%qWi"w!fU峃c XG4**fO_Y]ڇW#KFFf[0†lk 2Ngcʺ@ ]Qe?R:+9qr4kCi~<1;wxc7Ӷ؍0kBR,5F42B*c$Td8ll25xChUvu֪}_*䃐—nɽAfu h`|&* o{WW9A A:A4!zg"ӺI8(2-u DoTbksdpkhaa`\l+_-2V fLj[9tp!EK,Ȅܽv-iv*Po)Wnx|ۇ[Mr;o )C=jE{@;s p*h:phdH?EC8Tv5Χ :L0I_]01ńo] JPpkCV/|96YQI0y1a/Pp!֔Fk%Ւm6#؈!\0Mb⊺^Z&?TX )WI\W5Cպ`8L$P3fi,߇ -47B[ZUm+55)T'BD}eHjۈ+L=qf]7lrŌ.K2xZN6H0VK An($i)ތ:vFzn9X $l xÕ~{wSVW6ߕ`>e(IpZUH;t͛p*ӛ-b>l_SF!p.ͦ+ Ј!$)= hQ?w۸¾sFmh'ij"&)clo72~w Uo `o-VG@>HhX%wӾ M5d#<~n!]if XǣADсҰcϪQVV<`H#uQá^ w[8 e 0nZ WT P$Q7 KJKnjF/ڰ!<}PHpzߚ4w#eWk Srf ļ<|%> DY1VlQ*C^W#WYGidiʀR\Dc&5, Z7 F}w[hSu _.7bf]U4xZ62{:7R+}JJUMDSrc1Y-Jq%jRYA7ޕy%J5RDWJ"T3YըJqSh;jͣ) $1D&wy=yh.eVvCyaSW{]ݱdkua`.`7Ly0OCxz'H 5tN󶌭G<ԦF~t3tͶyFOq'`|ihcb69T8+`tph6ٓ |ռMy46Zb"!kʽ估0Q"p J`#AڶNcĘe0fA6Qvc8‰2R;J`{hFifxK3z^V?\Ќˡhx2ב+&XyӦ*gv[ nө{V8X8)!"!L[,k NZV3.J֚2 f\+5Lf7u"qd3G6s7Ԓ« 20kS>̔c{Pusy%SV~Ý&Q?&hIa* /I8sɤ-K=UDf`f[Xd ϧz_Or 0(|a%TNǠ|0})NX+*45&y'~- !|ˊˁϦo9ɸOzdܑ_c *QȨ U!"NrE$OisjÜ|IА{C:YB٬f1-{2jiM\iټ~r}!ddDV3ODl ;NoqlRan"h˱zZ Ecy4)7}2o7Wx`VTMDT8GȔ26Cp뇛p Աœ؁"$c]M[ӑZzwr Cfؔ JdC|oq O:"Sq<`ASW%Ԓe4Fvl<ǎKqzX_Z@/[ƕ|:wQýQZ4VSad]j )wI\ b|~xTqz>Xa;5¦p&L%0C;(:&n@mHU`r 7MG"+Q f.[^@ke#\q+2jP6O2!h%K?!!Pe>䤿W Z޽ F0c`6 ?xɃ'+xa(6R 0 Z}Ӓuݒ( rZ)@qVsCa#ظd-kRkHsʆ݀wj pBj)#֚͏̢ endstream endobj 210 0 obj << /Type /XObject /Subtype /Image /Width 1 /Height 10 /ColorSpace 211 0 R /BitsPerComponent 8 /Length 39 /Filter /FlateDecode >> stream xw_^-e3ιPcJIK+ endstream endobj 213 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 218 0 obj << /Length 495 /Filter /FlateDecode >> stream xڵSn0}WX<9z|K"T$Tf1Io@眹NZ3+pU*8.8`uC G bV05fxMDU֬)_;m@mGcy:PRF7y擏Uů=w+}ǟ< m#A)J_e๟ -=]!Cˉېu8nd䃿M"2咩J Jl8:/<>әADQ~8!|ߦHxF.b]ӌRkn>}F;J毙7׿3š:%ù2.>RQ1\P2> /ExtGState << >>/ColorSpace << /sRGB 222 0 R >>>> /Length 2775 /Filter /FlateDecode >> stream xZM$ ϯcriՋ؀p999aC~RL`$XjQ")XTv|8~_ݾo>~| gx_n_}0_sc?͎llx:?onyG:gwϟ P3s?{*gXO?;~{F[9}?pfwf[LߊisgmxyM&4xt3wl* Mޏ4>]NGv#ό*_.>ҙ`{)g79A.3^\7vv/Y:چxgGJ8!^9^*S(~m\΍,eGlg5 q3.3D\g3d\M:^ bg Ńđ- ;x #NiCppq0#-,W刭6!GqۂJS{gO;Cp剑ISerN Z<=ֹHd8p"v ClH3`8H%$ůGчv|2_Z3xp=."kG펒/|31Ukf"W-wv8 gR`ha<"zM^a!i]D S4pWPrmRLЊhnˏyŦ].zyc=O(.G{6hD3aE Z&dxItXf8{A~T ԯA `tq#F|S I`*N6.QTPl1[vP?@˴q,\{ *hB-f:^F|FQEJL{<\RШm,7ϙp?EbxTa\3n|D[K<<"^䃷hYu :#B/ ` , #Ͳ([P'AyPZ̀kF`PIS5o0'R$t}B [2xƆ Esp-삏 1KlQH*/[phpzX* 4tw%5gnDK\#5nߥ1q MYrpSWNgj$s^NyYrcOFYO,(86&6QcBzjX9LfeGFpMJҞ#QFĞxSӗoƺg?p1j#нL0˜ ?`M֤:mQ!600*[\_ӃcbFmArN]G| Q|ya,! O3AU LQoIØ!f:HSb%&a@FEo}M!BYTbRtH:g]u.~LbيB:^c!/4x8L@sd}snRa>-qF{34EbR΁3-d$-Lʹ`(N.%r.F5xlC_ِ-sw8Έv)?imF:kH_i$lTr$ks",v/>L0$EE~Z ˌ+q@截ң u^FiQ< af<}1e[.t=:+OjCOT+=APt:w("_X}5beVBr+90DZܧ1<ӗw牖&?XM!gx#y endstream endobj 224 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 214 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpkG7Hnk/Rbuild688f6c73bb66/spatstat/vignettes/datasets-026.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 225 0 R /BBox [0 0 864 432] /Resources << /ProcSet [ /PDF /Text /ImageC ] /Font << /F1 226 0 R/F2 227 0 R>> /XObject << /Im0 228 0 R /Im1 229 0 R /Im2 230 0 R /Im3 231 0 R /Im4 232 0 R /Im5 233 0 R >>/ExtGState << >>/ColorSpace << /sRGB 234 0 R >>>> /Length 3365 /Filter /FlateDecode >> stream x\ˮ$WɈ|odKxbX fXX{Yu00gB%άxY}-)mş tl?^>{l ;oYۏ/j;>}w?OO_Oćqc9?_l^V^ŞaMmfjy_-9u v (k3{3K}kh(7˔,n >Q|*[>;#<,]9Obϱp5O43=1Pܘ~ EN[$sO PRFA K"+I g%GJMS4i 1 n(?%ܓ<&mKF&Y[ * +L,ӑ=Ò$9A(09R@%N 5 y,+4:g)bbcIùx(d.u=FP5sĂKybxp ϟa+J@+-X4&^8#L*$u//[RYcIcj5"VB Z҇y!.~*3'0{YDcљ3㡸H ė$\XƉ1_qEO"#q#{)ː4#-dJwR9UT&X)/[=+lA&Y)RWX:\ 1;PeODeA#ۤ Bp)**֣iEaj0V 8i+pDql=2p6S2QbJ [d9DRKL;fh*EyJF>pkDREQQ0Qv茭E1UEaE8D J)ha*u'%^"Y;ґ"l6)5NK,RoQ/#>-^8`NqRdM 3GٔBNgRjw24VTJLf{/X^HH4U&·g8<4;")>[ۓ,d1AJRG E5ꇰR"BC2Za6qKںHNS楃JN [;aG`/wYGaZ{RGU>,󹄓C>p]c2=Nqe'|ge>{\Yse'|a'|ϟ,9\d |Xc2e~}qt&#UEyޯ{xgl(+wyq^yUi 耝 o1^*i^PΛO ʥ;z=9FЫ6NUj03-ϝGh9;9O,hL/@l`kOaGar h`T:|-[4NgmEĖSR[$hچC@Le%9GEqOX|%uElI+>BR3gey)C^XUR5ާ1M+奨8J^HdrT~݆]#q"+U.J\' mH Z%[WHv-o0VȪhyZU1*X EK q`) v>WКbYbmȏ |u endstream endobj 228 0 obj << /Type /XObject /Subtype /Image /Width 39 /Height 53 /ColorSpace 234 0 R /BitsPerComponent 8 /Length 2605 /Filter /FlateDecode >> stream x{eE4j%cf!cD4Hd*r(%T<"""!"hJS)ҬҦlN7w9p g}yZzz'L+6S ?vCh^_~e :ۺ۪"CǕ!;²_>|?fl-W@1ow\y)-]>}kW //b9̉pwW?(nxgΫ;u_t%p렲eK~ԥov_8㝧t]=\uRcşO>e?,nxZ:˽>{*ΏpSc/~l,f!՜uZY-JrSC\r}-VTdqC|G<7S[܄/͏s1C}yCǔk/W(-{byzuNyƮ1\Ql2\TLs'R6dsa NObCwg4R?+Eceuڠ-l2[3>[.p{kظh 8<;jNy'Sߍ<41I0b=p)  t]Q(n85@>(!㝓iyYvH;:9?Gʰ,;R־Mz}yQkntZL`ۋ[w~H{ V512_?$&IӞA 8p R#(hGE% >*S+Ph`dyGp҈я A/ 65suM]PCUW*h5Q5×) 6ɐ鄒GXJWV|S{>AM1 YjͿO89Whd ׋.݁2~*c90ҋKJdVhXݐ[@k IQ#LXYTQڀ$P^V%c,##U" b (> )n)_j1ʟ 1OPHQWM?2i6~C 5ACQh'!Ҩ_x0RC6CL&>Y8 㦙a),H,Fћ[(1ϣγ5"鲥5ʩ4f=$a#.H&ap*FH&I-+OV:2aQ) )\0ZOtՆI.z8ZjytIS0gM7Elʍx+Q?%x{rº˟$F0fMg}&#ʌlrn Xȭ NXjէm;&ԆL0žÆ<ݫf`NƾMSͫ>eW|~Ūƅ=*k RE mbmً-5ujlp(m-q6݁N;F)e<Ѷ|;]g %gL"]AWT38TFqFd+3c@u#ËIQ,z #^jJtqi]Vt_StYRtU[dL턩*DDt{ Nu~pG5t!_=5tQqu{q,'XkL΍ې ;pطIMNݭ :{Ioas&RXp[T2D`J2q%Ƀ&B&#/ݻRxF:h2 `24n˹I=ln7Ie3{W|RfIjni&<&~űaAͫ.#Lttܒ7DhXN镼,\竢 TݻL7@@0[5&Myz4eb˶aCmϠS̱USbDp\+˫/B'SBtK+P˕du9I\"j٠+ܰA..n$Q'I{1:Iw77 'kr(ʮhP Լ@8lL Mޕ=?_$$~ Tw6eЩxX?ȸBr)HDU?JՀƓF( u/acZLD'݁êN4N3eP΃ÚN56L< SŰoYۨDZ[PSum)~Htrm4w ɪN-YÄb-7mXBaa/vZ endstream endobj 229 0 obj << /Type /XObject /Subtype /Image /Width 1 /Height 1024 /ColorSpace 234 0 R /BitsPerComponent 8 /Length 723 /Filter /FlateDecode >> stream xugP$ioQ.K23/!g$!BIBhHC$DB2BeD"#U3|=zˏ[iK/lM7F1W=9Oi-}B>=z'V&75SJ+i^Es@³xSD,닥8=KXlEVX8`P9֘e#0}$a(Lc1i<&c'cQ1 1l: fal<y猾+z.DE|1S77\];:/ùq t3> stream x}VeÇh" RHeCD6JD*(" " (-Y1F&1PB556dSY3쾻ܳs=u_u?}=wO}~e3~ݿ;{llVִYYgvWt\}Meݶde.+Ns::/ͺ بNo=g팳Vobuk[/Yz<\K(VyHhв|+c,Qesum6X"TՆ5nw?}9llΎlenc;>ry[YYudU)XKE n{6lx6{PZ'f.[bpN0+?tk.wM~#lzТaBuלx%墲 *9 |%y,Ol I+=ڗTRe[0~xx=p\7=c׻.7RD'77lE°Ұ0wPduИ 5RL2ԏ|j@., BD%OUKQOfHiޕA WqxcU\luȤLnQ)'WT E6 قx:~vv6UD5(.h"+pEԚXVoW~v;mz(i_E;08AfrSyˤZmFiK&:FA) 0[iC+L>GpjFw||U8zjd&řGS|o %vxT, 3Qr->iiCSA-9tcivB)̶xQj P0$>7|Ac4N)™%sdBI%kb0icy*82yIt&OD#}qhiyDFZƜBr2QtyJ3LlTcqtdYi}:ljO8vsb͓6+!ʆDA@ #w =WD\¸ Oc?>W06'J}x2|h-)",|#Ÿo Ogon4ݑ4ᆹ: Y3xL-SOg)ҥba0Sܷ(!-cd{>^]>jCQs`Abp }4CoJ/xuy8 owKEtWxFăF`>sy@>B&xx. E3;%䔹Fu[q9Y}eʢA"oix@ݥ8ة)p)U*OǜKkSwKT7 ˆֵ;}K})\ A0w91 !fR!Ge $3 4*.DZ;>&f1.ZDLa'Iv-aT*hW4LV,"2W0[7E BϿ;ދR)/5-^qtiWcp~!v6Ʈ.Mxۤy=i+GϜ#8TP>S'$*~瞪h tǜQU[x=M'k endstream endobj 231 0 obj << /Type /XObject /Subtype /Image /Width 1 /Height 1024 /ColorSpace 234 0 R /BitsPerComponent 8 /Length 723 /Filter /FlateDecode >> stream xugP$ioQ.K23/!g$!BIBhHC$DB2BeD"#U3|=zˏ[iK/lM7F1W=9Oi-}B>=z'V&75SJ+i^Es@³xSD,닥8=KXlEVX8`P9֘e#0}$a(Lc1i<&c'cQ1 1l: fal<y猾+z.DE|1S77\];:/ùq t3> stream x}eD@ٳ(%CD!CH4jD$& 򵠢"|D@ceQQ:H@@ TSSiSM6}N;9{+f9{u_u`LG;m]v{w|ֵgÛ|[9xkȟJ-?mWMEr!?h 2;^hh˧f0T ΚE{|셚|O!?X#WjDžp||wl+{sWޞ׿dGon^]-.h7׹Ѿ勯.Wm&s`ΟPDE%[Wn')w_Odɦ,ڼ+-;z !4]1ZJnGv xלˇ&k=[㲜Ào|?jl>_*|ɒD wnjdyWv쟗nA,?;(,ӳbh:*W`ZHe [)/ZWW;Oϳ>g+?ݢyI(I6?(=ق|WxwǂP<5oFWgQv0_WSFxg ٗO̜0PV*J%*dɖ4g+ٳe<-ksI5d}n]^ÖIbԸn˽/eswf mzUlYe; AUA 7ZpBɕ'o+W=g'wXn[ExFw2Y~6$xUn? a|mgW~ue!Z$Z/_v傳׈o'.OvyUkcټފ+egS2KvLc%pXƿUZ6/0ϴYs 2d)q~S1@ҿz/mIސ&/ qrx!؝#8M8{ E!Jgy@cWӊ2.(+({.X v4*d֙M\8D{ Cއ,vsnj I%!DŽ\Tw ]GvRHdʂI O21y@+}p4!l5 b/iZs87RX r0/PT&= C'p$NcQi6[ogZ`|q{8Ob4d'H rARϡ 2rKIO| +h<0v "a!Hup㫜SN2U!m!ďF&@r&o{ M n%B TG)0P҇ԚT8o@f!14\54 iJ-=G6iCr!#e/V;B +F u/P<^*}%"dd*TCs۝}rgB 6³ !&dXI0!4x3׆ `B(F"c+[1ukVi%-7P4iit#qnibi΃\A7UDiTlDB]KW^mL@.j@ehdRD*d\.j#Gv`lI߶@ҭAiC4_8gwڅ<UCnm^UoƤ-nܚL:QGxYՖ[b0ںeW^l0%=~._#uyڬƬ7W'&$NT /^Cfksuq_q{ l /bhyUqAvY-I<ȤIIIOKzK endstream endobj 233 0 obj << /Type /XObject /Subtype /Image /Width 1 /Height 1024 /ColorSpace 234 0 R /BitsPerComponent 8 /Length 723 /Filter /FlateDecode >> stream xugP$ioQ.K23/!g$!BIBhHC$DB2BeD"#U3|=zˏ[iK/lM7F1W=9Oi-}B>=z'V&75SJ+i^Es@³xSD,닥8=KXlEVX8`P9֘e#0}$a(Lc1i<&c'cQ1 1l: fal<y猾+z.DE|1S77\];:/ùq t3> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 241 0 obj << /Length 895 /Filter /FlateDecode >> stream xڥUK6W9KRC,Т9h,ɱ6ofHl(QÙMhĕliʺNJEi|с!!C" _N:{s<^)>.c\ mt*L^T)VUSNi9UcLZ3Ҥ,}#y!_0ɾ3Q6Bt_X j嗻gĭQX-ڤES>@x=ajd߼*߾˕-g氈u͗J $z ܇ޭm?#- ]ۛcRFJőҢ͟EgEj/ꐇ9[*hRѝq ]n.ry%?;z ss-4/BoXA_k|(ٵS~fFk&m/y+? endstream endobj 215 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpkG7Hnk/Rbuild688f6c73bb66/spatstat/vignettes/datasets-027.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 243 0 R /BBox [0 0 864 432] /Resources << /ProcSet [ /PDF /Text ] /Font << /F1 244 0 R/F2 245 0 R/F3 246 0 R>> /ExtGState << >>/ColorSpace << /sRGB 247 0 R >>>> /Length 2642 /Filter /FlateDecode >> stream xZMHׯx3"2Ȭ@ivè{$>Ei0w+lg|xnY~Xdra-)e/oc"yݾCV%K_k]^ޖ_-7˧/q\tm6_o_uH՜f*3O/}ۚa}fYS[w-1 avX2΋hEQ|y*  *ZS+K߾|feᠲÑƹCNt2ǴYãb^pz#Q8Y + 6L$4<bJ NHVV>P` n1@aFwqRՎX rHHI0<_p@` n=qW.qԻ//ON߫ 3(i-6YX_*y`g#8H1t h2U9v y! (Y)<ɓ8k_> Fz_BC/,fLX@:pKxK^٢ ܂la Y?ɳ~@ҊH^ ew 'yD pR `NABE4 " lAZp_h@7`3#0 =ꃽ8)G$Xkl|&eNx\ B8)6u@ XuİDk+h~QEXprR8Wp>˳~09)܃dVx6g+ =>zKx,v*YĎ$ˆ3Z9>ɳ~ȻaF"5TxL |+=:x8 vI9Wڀ?fFO8qI/QߘcD3M>)Z (Jt,6NZY~&S1Ɠh1Ԃ3:taAHKQgxbP"O,n͢F%RX6^ۅI/u?U[=Gƈ̔b>~$aU"w s-o8"I9 h= P1=$ RqN7o0L'; 꺀q iPAE grNmDAk KY 6 _ G N+3"'A8:IM0 }ܬ}k8HZ'qRЁvk0>\n]R9ZY,?*c$Vv$i\a[p]-c -]>I67 ,G| ~-{,XT]a!1<{P>ֺ+c0 F瞵a#e`\>(Ы(;K}: a{@itg#n25hYP1ckbm8a}lw?(qO!LF!&#]>LXʘHㅒ)@c[œ|?̼r֎n5^Hjoc&,WO:ĥXc9$Re%7>n@b]_0I#`xj oq0lH8)դ ؆P QCj{ZxB#gW|wN%w1x-f@@\5ipG<7YyjH< XH\^FZ[d(e_0j:?^Q̜&qRld&U(Pbyh xPWvV [smfyֿpn&T9p\+ݞw:>?}dpbn n,ߤ endstream endobj 249 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 237 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpkG7Hnk/Rbuild688f6c73bb66/spatstat/vignettes/datasets-028.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 250 0 R /BBox [0 0 864 432] /Resources << /ProcSet [ /PDF /Text ] /Font << /F1 251 0 R/F2 252 0 R>> /ExtGState << >>/ColorSpace << /sRGB 253 0 R >>>> /Length 16727 /Filter /FlateDecode >> stream x}M%Rݼ>&Ϊ)FB%J &`[w22t}_,Ԯo_߼[,%\Sx+!?_o_/[x!_?-߾[|9V~o9߾7 kt|828[,p{wqL#+%ǻhM~k8dp=A8!H良>9vdpĊ7^8ǯޏɱ#c\c5Aux{p^߃ǎ ;l}8=j=1qS##8xDr8w>>Fp|XZBjűnG c=p a4k)?4V!h\-bcG16p$B1x6{5)i酈8jqq\88.DwZx;f~zڅ7:[ol!k[_~xK:ecp$A ]b~9crvC</Y# xD0@%y͡wq8cg9c,z7܎5b޿c8µvD'#酀CSS8%Cĥű#Z_+!ki>C1M; k<{p1@&OmWzV;{4m#c:dt[R#v2Vp$hžGāsolDž#+55vdzUnq:MF N氍}۹Mlʡ} iw,mB)"í^҅`gzrveҼCtȓv;t{ @[HpKBZT;v{/|hgC6ziJ;4%ӵ"M)C=!LSaBLvܑ(Opȓ{Dԛnnu"/ 5mcʵd#clHA#c)pdqؙu>CpހN/IM}dYMc3siwDZosz<7v`@ϵpp9Bל6GJ]s <weeGN^89]c!S~?3; 2dR4 #}3!S_f$̅L}/%a.DI6]p&Fv,'l~:rYIC hzCL:d2:tW_Gt@N 9/Yiep\By&<\3\c:5w=q\cG>2m#}ǎ D+fGt5( k%w9Zթ:dpZ+=1|GçN 4Tp8Dhwi@-2̕ :~4dKs}Ek/D+;+䟖\yDk}#Z"O;pzu/G$6CtV YZ?z%,-oyjq8؃f8ۓ|;,T pXŪ4Ck!=6u8*uf8 %hyu4zuix(vyk:q<4ͳy!jȃ5tLA}vCāsln8DCvG t8p :B1fn< r[V`7$XQռ;B)pte.CqJlhŽ6YkA"JHt!9ViG\0'ı#8 .iJ!:<n4yGp7w NAhM>q4Y ~82T7 s8<ǎ =ၶCA G?r<..sL8v,r#8LUE`UbGQ1pt ~SH"p f,t82e#,]E5ީCgeG.= ,7_'+=mu.}죸X٤Ҩ^1 hBKOX1h ؅H/#iXsyDe!8OjpP,tp":` \6Źuc;2O0vGdD"0Bm⩖!+D۬ _"cG=ȗ!4Io!߃;"+:^lu#DJ_WVl#ਊVWt+p(g\Q1q0!chAGBc>CA,3w#Zq8Bii[G_p,>Ghf(c);YƌC$+!|;DĽC!ouzxDqPJr!a"f|P*8d}n8D~ZH'+ͯŗ٤Coglz;} ݻ(GAjS̆ 8vD"zюjz\;BQ:z̋_hGd yyDxBp5#ʃgxY=k="k /Ӆ5<[,@Zqiꐙm9vi~\,gk[B ܼ;Nʃur~IhAf:Dq±ls ft-&vDЊ ttr-\95HWƱ#p"}; C~#~d//OY_X]"V 333t?6 GI)(Km|kneAj++!zlGȃYzC(ti;pϡ90C_2\y("OXpl y&`"G >cG̔T=9DBjg}ssB0Q ݲ/D6y:DЋp0y0 zHkV'ĊEF"Z;EA<;m!jQ(bE(C?qpXC9âVv4a4nqȃK(Uf]5.+`efh,:DevGIq{̾?WvD$G3;#G/Q|T #[ڎ(.vvvf_Y&ٗ#d~BE,#zZF("xnPfDž( ;iU>:DMfȎ7#3sK}q\.4[!`VҪȌh&-]_B͝1do}G}jIZoh~4/Gd!j{q ~="nUOGtcGɲqOU7I1v8qq!∪۲GRU }^U5Ue%>8H>.dr4Jv~DVey^qUepȼFQy]c!F9őQav=<"W׷ݑɁ(Gˎ(@{wܯ#cmBx+C#?A+CB:2Dvd$F9rX׸q Dž.ܯq!x1c(1kԔ_;u|y3=ȴiR #ybGΨɯW~ЅH]Y鐕/UAʄY'tla /N!ӎB-~Z9GɲvD!(G~*p"g ayB`fQl-y3`[^C=ee9A\Qs(:~:Ӄj#~uUЬQoiU%;"1=V#3&I[!{~yap\~2<(1=wb;䡟8;D#xȃ ʼcD² <"rxM>"؞TؗsraEUa0C\}y{{p+!A$!% KSTi#!3Ţq2¼b"}k +'JŲpvA7~]|t*3'MuH3׌F7 .{׵*/d~iF9{cb/9]q{Cؕ~ϡ>(Ec-,ZiJFKgO:AA4-`Ԡ:2ƺf/S$~YnNMuTU|}z[Dѩ`ݣmWnk(M3oɐ 6HȤeҲ,$&W齅٘3vaJ]+6- S+irYlvc%3~qt}{gpTs`!{Y%1ۍmީ|#*/z7nz nuRC[?Ϭ%lBL㏤O *WdPGyvN7˳\OWv {M^}t0:/>S4w TP={ >EZ_m\␰8_$1r1t%o ƨ`fA]AC=re#rǨ~^G#DE17gd@cLeKl͔' @*]U(wmiNNeڇ> Tؗiն^xW[,X}rړOgD8 ^MT'~3N> ( 8ԧY٪CU2 wT<~]^w 2P#αϱɍWG} Vxs*wUeoj4*2 6Y`g4|Ҳ"-{>9?7$}/-SJ͘q ;:}L,OZ4䟍I+zb-Aï|~SwrD$:{sVorLϼv뤥pw3\I%+ QEYϜYf(^і22Ѣ 4L@W{{o7vme __K~F|V]@o9T RTrqw~'}bZA۫~L`̛4? &1MᖙB*!"ào>Q?aePwr!"X=J;#֟Z08L,vjUV\]??C .z90*/=.};Z=/#)!ӟkAMo:b5|+k6> w?(;RPVi&M5 zcןFeGM#I17~L^t68ۙҜ ܐ@Au̼J̮gGB줕T7+Hn+hY+rT>wDEf̗{]@ |۔Y4eau,nΟ#F4\]h:,2)eZ}XȭO#5yC+tV@/_LMr#yw-WmkOvMA(|k[=CӾݡ:oH? :GՋ1Y9 '!JI" H]8?59*:VP*էYgEois>75 _%yXxCw< ,4{5C:zD 㞬>Yt`;cѝ=%Qh݇ D2=#z?pf5$[EóƔn.ݘ3 ۍeZ`WDf7# <'UԂ:9\$I6|Z4 ~C#WP3RfJ//NAڋ_^My8hҪ.nniI7=Umu|&pY9f~-/2% k3zѺmM d)$ F*I.F#nuwE:IіՐ5!~}ztwJF~ş]h!Lj2?8ҊUSI_#fwe3kj+vMWc>40^B(ik8Lxg6Fa"f"s¤T' 11LVewvih8ǔ9~>9(N %^N0Wۃg.w9scԫwr.zokը?Ìy297~!pΡь d]/z{S^-SL,R#kю 7aG}> J9miAY<Ɛ^, 1:NC &҆>GᚁY5C&/PwΚت҅+&4**NH_K4+.G?vң ;lWS]X+(9BhOu= \'OS;} 6zM*T/23Wksq6x'ғ8̓#Cnl֓/4!h25w"qE;fSmr@5@shhmSE yG0Z9MpI0OrSIv4x> ^ʨVkrỦVMڥ yOtDb(gg˺F.5ʮRǽ蝟\Cb3$e!xB|MF/ߥdv+2  7]MМeH;iУ[Q5ƾ 5 !.o/]nuLvts/ME 6 .F[tӲmo>{h+O"c`h Pmsբ=?> ڮ9'57B!5gaM(>-j2Gw~CT7! 0yw~Cjj$rSr(4$kV)zS0I3v҉x}!ί{: *&  EF;/$4 twlN iidb/NvJ|mMd[I&C+N:95i$DwzUKitCBh] 0SѶw)MM-t8OAR3/!R; jy;g8#'<,|XYcUi* fJp͢Iv>M"2QS@*XYh 4r9:t6MLS;,SR8ha5%E5zZy_N_:$BP{z :2x걥E;~7 MdRA 2Kj6z ԦP haw -u^d*zsg@NMᚁQٓVP*e)p'wuT{(LGL5ӧQɨ?9N]E_P:,opϐ8|X#r;N'jggPdW u}kwQj9HM5B ntF,87MSΜ; e3..,Ikw]aΡC^q}ؔvwGa3MGe~YF8ŦiRwmMNd,su9wz翯H#N($Io56HXyJ'OvFk3y1Y1B7vz秚:Tܬ-4Mr"11JM6dFv'}3i-/dgК'CǃDR1ZB]j1c}a) +DD¯Vi!8"BLl"AJ2-є 9+k=ZT8}܂k T_fgksјkX( \lhiDRfTch,-F]b(3ѳAf0jHGah/q`Lu.4^| F;/ewS_~Mшp*+h5yTD9&}gg+ Va53\|ў Vt>vV>SB ٕsđǢ%n4zu/eWnRBhqtFAJ8"kOy. 6f#O>GgQ GkȪsz=;gn%确P+V?f$\DlFwzw}|Y+NŇ?50$nNwM);w7r`γġmOe+ex:MEyHC4I5O?"쨢8NsAcɢ+WwTp*S'ml6f=gWMK6[`6 B;?5L yӉU 6I{K+)">,ZQƂ:8(У9퟼=]:U,(?\l{kotZggD%e~I}!aFpc0ʕ;Xcۥ)Mu(_IQViD-A . ]Bf AiEE6{8f^@yN>[9+qQ!Quhy@_VbShjOCQ>:ogӆ6/ŧ^ŧ72齒ةM:$6HDI.+MS .{]VXM؊l>W:]5}ю_dv;ӫ.Y VF;~\EUJjn ],L*/NثђI'x[ zKǢx@DC`nԗE!{Blz_D"Zb;ۘWT#&}s u&t0>iKP贮CjU0ɰM1̢k=u =z;GS: GGC{U*r+^YI|_n/A{,hD^!tL}R[K,SƜB@vڥ*83\a1\fow~>E.2NR+QS}"GM}D;/w EUhމe+3܄9bS=NzgF6:Gch7tW^_ٖ634ڂ`TTJ&~VXjYh5+ܽ o/gX;<ҲEik_q:ٶ8@ES_Gfi!(1UHϺюy^OS'cUoN23R.e0i8czSA lv,ZЙ<૳BGhzRn)lȤ,sQ֨EHnڅ4) S.K3}n&rNSɽY 6zbh^QRRanS.+K`̠]R~GEFoB %yrJwzUwC|$RMZɵ.DYX+T5.Pf}v_hrEݫә֗? I?j*h9i*ƛ)|evsN:t|IjW_Oh*ӣWwtlWYl1NξyS ~2GcF)+ˉʋiܜhǯS7_B>PM%t;}ܥ*8FG=yuw~=T䰆JeF;~ a׺17_]eƛdHw7R䘞_D3yz֡SPpu_JU:SYH zqg>VA]٨'խm&j>gmOKkE͵2_:rb9m5L3"zj4e}VrଜFd ueb<>N+ F^ ?UGכJQR/z\g+ kʂ0}; 5iO:ՙތ-?DEd~g3]7eJю_}pLO8"MoG6z痊T ?ԻlQyvC_ [B.U1>h(/ @A+\>Ghrke*,(q:֪&OT1PLlYULS]ѣ=[vL>#<ۢי.psƮ ^k‚E/8)4;Ek)Dw\*L87"ڠIt*e B>fZB 4A>֝ȧQԮIxE լ`юW*/ǼBcڲ<'i\kf&A7!Ba2َ{X+:Q&Heoʘ' [7zۋ`їj蝨,_P$ylp]$l*Ȱ;+#g ݛ%gM]_4TvBZ{|$ȝ_̲ҏ /P"}`w~IgvU׳S+ ¨;s~LZ:*j:N;_Y2O)F=UBS3Nv2g5v2C@y2s w~J3WyGSYO;+&C0 ooX, fTY<_6 Rem"X]ί{,Gӈ'Œ]y*l@3kݣ<3wk6zשճN9)&+1o:~橉jW2~Hͳ2-lBC0[\/Mw;B-r5vvRYEԒ$QCcTgHӢW#)wW{:wTV[r۝ZoطE$z,fS]ԱˋRQ|gjBKg䲹h/y3ʡͪvU.mw~EO߾jb#E (Yq0ImJWV̅чl, YE{~I/fd5x0)2G/KsҩCNЩ& ߙa ў_URe6;Uy1CvEQx5m%\> ,adV|*+L_!۪lE "ͻZ*z=]ot)b~uBAGU6 8'"UMWw zai}W5>nM'aMVМ`ů}.f,].|+pAdF;~BݩɳKs_~b)~8j~oX5qU2Ox`ȧE;~>MY3tShft;=~~>Z( <3K)*,G'1y #ίo'2*t&a1O3nKe&;X8oV]iSdUV΢;UssVf*YurFkؑ,0,\30uюd"4arƪpZD}6zjNlMv5]{jfu; 8oyVycP? ij69f} bG=?ӯ;Z$N?rb+ThWUW}''="#J)O)N׍jq6I6pq/(`p|8"Y":JJ<8 E¡v'nJXb_nF)'ю'fOR3MSw."̤sQ$XFjDӻjxF`ioZSsɆ_RKoMkP|K]/zՊ#0Cgc (ue(_W'V6e:kFEQkт.%;?7KDw[Rm*Zf: Ff1X3Vɘa1hǯoqs"1aߢםEڨ/rjYi>_P5 A4tt<,zJE}1xc/Ծ[i,'LZnqh:hz_uddNc(d>DEfh?}JU\mga,_闹7bJs;HS7Ch^ 񬚰7 [_HhȟO{cg*4ۇI23x':+W&RA}Kbҹ"5Go/OW_ǫHUG4;Jau6ǟe?g_RLmXOr r?O?o?X endstream endobj 255 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 260 0 obj << /Length 970 /Filter /FlateDecode >> stream xڕVYo@~ϯl{x} q$<I#vHmhTmcg *H XiEAuIj7b(Vb6n l OI|"_5%jG'J@Aè.qF6_õӺq֮w;ۿgf M L0Ķ\%$aa9gß%7S)"fǾuH:ARP9[YNws?M;g㊣B?HYAVxs@3g!*G93S)$ߥI׎|s>w%\oXjH܇-f;5Aӧi3!}cd]uǑ"py πz UuFrw[-ysUc#}6 NcJvx<MdFfl|WvnYQR0tdrbӱAx//Ve- =ypL>Е΄Tm v]vævK}׺E  \qd6ZhG2XZG?TN9JZG?Xy Vx8 5PwG6޵SN|{^>r/h佝5NIX΁=?+_g? .K ^%x% endstream endobj 238 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpkG7Hnk/Rbuild688f6c73bb66/spatstat/vignettes/datasets-029.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 261 0 R /BBox [0 0 864 432] /Resources << /ProcSet [ /PDF /Text ] /Font << /F1 262 0 R/F2 263 0 R/F3 264 0 R>> /ExtGState << >>/ColorSpace << /sRGB 265 0 R >>>> /Length 5566 /Filter /FlateDecode >> stream x\K,mޟ_d[8pYYד |ɌH dqiJE_"+pnG/o_񷷯?{_|[8S%[$(!O_*){ɷ}rۿ| o~ox-rz5c9ri9{Gx>NjW}VA$ dϟ"姹&}Y iXӽu:u=Fj_ г?Wl:ix/C9b42䀿bޅcā`5gKxY Yeڵ[h`V}=k6q RȌ>q{6i+ħml7 w׳|Ml!ٚOoI +BCV{]ڐ/;zPB"jiCNԨMܷ%X{9x !3˽-#I![0"  ;=de7ف,nǁTQ0&KFҖf0=D.?Tsʽ_{[$|}d2?98 Qdhi)m}Z[DΊrܪ[h+[Ƕ~pw|Wa'_ӍH!ܓTF UUHb@ۆ"4cNK!0a)l;3:$hB3qlkwbĭs(d|mKqXej.Q8)3; \3䔙KmW.GlKԐM6ʄ\b{MJ Uq R4\٨X\`ۣ9U+[A8HSy W yEŽ#)X_B%.3Q<x3PioKaHn DYBlŒ?+JF#r%%Liԯ=T=*k 2~B5G&w+ (֡(0ζM =R:&$"}J! 1 Bikޒ0\h1i+ħmlwNw49|yqh[)hLa#|?./9>{jdIi}]J_|>,`d6S3mNM9qUHNIr71@J;:O-;W\_͊N]kVZti W?VH]OS}n^cc5Fy@o#] ~-EaP+]nF=ҩ6Z[& IW%B1>i{-(.~6ȣo|50 tJUwii" }Ǡ>̵7FpM@B.:/9 sI4ZZK^w8-/TU@ Mf6u.1q R|[qI[!hSQ$&L{ט$j@ ɀ- U{7?xTms ͒ofz"3\ҩӱ2ߪqRx/%ӱq>u7>s/w\wڏr/} .,5$ie)KpbxLք.:7KNр>@o i-F!{ڡ;/_ˮ _'mCG^ʚ_I˙ +RM| W-]yѳ;d5*rrqPc'#&1XKxQ==f~qx':0eG xXvJ!ȫY([)G *ʼ%5Hiiov861oB*!ulwNwTeɒK;OS! oߟZd(Lg*˪JSG)m}ZOō N:mE9U:c%8R[XSTV*oҽ[t)Iycȿ ǭjmTo|8 Vg柬e࿈Jэ{z-p3 [ aYK-KX7^F1,X4HOn坷=2gK™s\I[!]<S}Gۭ/ ݗ 5; fv5bҦ58^u񐈺V\jd~ *s;RéX;"XmL=gKi6\;J[i*%sTI񌨳-j!;wʷxhV^41DϚ9b KMa36ɿ~PUŌ$6AX>49B H WF%%ofNĪ)gRvFRA!fr dBTbQFT&A|₉6;<SҸә3^8&Ի3,s zFޟ p|~5k:D|ZPP'n6_ v.0`=dX ۚt-J'Wnuim &V9mtse;5~)x B+Xʠme8?i~ukУN[n|ÕKeVHEǶ~`P=}ŃsKbCwnH6m~<S3HL[cJ!&!6u!ޮڏ6q R|[1Ǎv&vmΩ^T@.jڗD^4@;jܭ} ,EO-t!W4iʕ ͭf?Ҵ5HifK`N[!MSPU* St` ޙ GwH_zY[5\9[Os9UK^P՘W^5YԧGpܩWԨiu=fX6\@^l_!8O|ᕀ x>F>mmWj][]r{;]?'N Tr#ZhA8{=2QfwB["- )r \|)s͂DNa2!Vs:>Қ?G ayk,Q1;[=P]%I&LDoqe8ޒ ,r i/mag:Jɂ^Y \R3IQ:mK2˿mA$Uhfȳ6O*l?E?˽7 Ơւd[:ea͡?Sʡȶ~0N{ ;d ˏ֏VcPe^VN8 ,.5x̖ ^gi7uhx Be6i+[Ƕ~pw|rtU}qOe]?o]_0 Vf([~b,3ʲ_EI;-Q\ qDZ3p⺥1r %*OQ ۷h3EV8m* _MX-s5oЏsWL ?p9z 5[0ՙ_RfV*NK$Ғte@4QoBX;,u S[u5~HCX\ fAYޟ)m}-pP\T @B/񽔳# ph [6~+Xk`lp0I!2'}s̖{]mfNMmc[s;?L9ըX.8+!x1?;"P 5?fKǵb8jX,/,2_]סY-*Ew\d ;_{E j>/*.]WPZSvLNVF!!ޟ[XDP*&>>`o#S,?2q"m}Z[2K7S9m-ΗV FiE[:C5Z_O! c?Gf)m>-Lw\|Jl{S"[[&7+)m+0bCFɠOT8rVd9#3R endstream endobj 267 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 256 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpkG7Hnk/Rbuild688f6c73bb66/spatstat/vignettes/datasets-030.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 268 0 R /BBox [0 0 864 432] /Resources << /ProcSet [ /PDF /Text ] /Font << /F1 269 0 R/F2 270 0 R/F3 271 0 R>> /ExtGState << >>/ColorSpace << /sRGB 272 0 R >>>> /Length 48209 /Filter /FlateDecode >> stream x}I.9rݾ~]JyJ hBBH+Xjق3Ή\-wnL!c-:뷑>hz6?ǟ?'۟lxۿǟ*O}O6ǯ??~??5^oQ?|վe??ǿ?~/?DoZ(yo-VojoG[aw?>tߤ~:C|K?3_Gǟ=OW?b-ϟp[Yf`|/Ә60C0nA=/9-ݐm3myF>+ɟtmR]˰me~=+F^l\{m)[zmZ۷uH~CgYpw{kϬyccܐ)u|+e׫sn#}i/jՂ3W3rny|]L:gpbt com_?|>\=wM?Wt-fuZsT\5| fyA~;gݬ{ ł[.]A}wp(^ Ânh_ o79/ھm˭ٝb&p5v纙y}osfo;Fܮ:/_7p3æs^w69& 3}Lه}Z]3n-gҍl6I~Ǎn{mn=>jY"W$/pn8P6|7鞶?CV3W W׀a9pW}`kui20n1'64>=/>Ó5k-o5i|ِ=c=f+`Uծ <ɼx.ڳϭiI~rpF}7'U^ [C>.ͣiCmemcn>y65!D,N@PwRϋ9rb&P&- !tCB9+zvg.|~{;רN!$̎9 jζQTT?ᙚvS%'4AT mjqDp jí7\A'D a| #[<T< !vh<(㷆b?AA#bOT@qyx۰ zY6otugj'C?}΍uWOEQši]cM&EeI;b@B9PρNE=́;.!~c7Ç_rO YU1J5J'JmMX%&oie Ɔ"]-_R۞b5b)vJlp)a'XI8 Gf[ޒٻ;7a#i)vTX_F̕Q{8Tg쳇/ȼ;46a!'`0g]bn&^m~~wl>6 NAN܅1)DGctfUQm( ?9"L`mod,ȧ>IwF@C8Ӟ!|a&gֹ;lcܖNVۄD IlI^ 7X10THj}$VOUì o/p>c# CxmlUƂs1Kd4|,ke̒z:Yk Xdh5Qڹٌfsfj> . u~* آ;8U~ /5s kM~u+J nxYު7<,dګcX8uMV :-Lm7#gҁc^PZ^qM1ڠ@ɱEXc9XGXY1ßMKqtv "M^u[a lMQ3KؙJ|o*vw}/}; }V(n-ҦRz>L[)p6MAl!VeՀq,C;AѳV0Zi Z$܂:(.g݋X fۮXr7^u Ĺ+ م_#$ctuYYeVgF/{HdϭU˿H}nGPۓ-d!&[I VZa_. ;Ć\hN?}zXAfe*8!glʜgLګ`'lw1cKW j@S o5K8^/kk%)޲׌"[hHn."S8.3,xS,9\,d o;~^r!Q۴]q}.w/CyEۯ*f16Ye^Sِ֞s5bYhp&=fsf0߷~5أevEݿxz۫,kM ~ظ pv*xBΣlk!3i%OcڠLqXĚ}~Os`mM^5Lf/?^QºI3}GUX fܰI-}yhJzcXxI<,3,{^-/,b8vKUx~"/x3w?c `䩕ڄ1KPJ~1}2>'}+wA߷PϺu6`n )ql.n]O(]bCW/n*Eb0-%Z1LU7fۙNBxP(/lݏ^yW/^X1.i]=\ZWO [I2l"{^)(~WUCPT v ,@S!ђ|6QR[;GLy@T'E%#wuE sop[纗-p۫EQG ||WiIygno4KCe`GS%ŁƳe$5=%nkpļL]82,fok̸4S`q YA6̂NA>2 %*riN8&6%$B r#Bmn0*\ߟט_ke 6*|Hs 4U`"Ϩܷv?U:/{}e8 -Yo1;Ŷ`]д=ŰK6w =GoD;t1Ȅ#A-%#b#ŬG4L #P-xod%82٤|dfIv% y{2 X;f ̛}#P 6qJj̜пL V!wӎ5|B0Kp.8zș4646c͌X%$.,Lq YqVgF 8V.AQwJ 35=ʞD7<1K4ۭϬCN謵/?k`%vfSJTP3dtE%X!㧎K*E[ښz^,Y Huu4nmg!Xk[7E# r 8]_xVL_]>!>]n Ө\!3v$ _Lu6|5D&_j-}%W/뙗 U6ҎNEPAG mBpL΋VQ8'.O;_D.XWN(=Wzj8Omlnض]Z*2x7) ]`J,C,~+,=+6"Xtك{^4_;!x)hL@ýO0|+nC&ձSJХ[5hy,Ab WP=| BKdG1`RCf è"f:Vx]I*<%1܊ LG {Wp.pƖ qcvC 8x2!KQ3(Q<3 O"Epsc6EDw6gZA$=` e`vo]0JPqHȆug x '~:\HdDl|-%TA{ÊC?Zai kU^U>uԤ:@D9*4 Y*k3=+Yg3Cp {(fǷ STec_KDj%hTe߀7i=̤Ď-k2F{>03=Iܣ۽ۡUGf8{ ~2d0}Auc4=dQc#ɳ&,A]XG.>`SF7Jpr9VŖ#0b>J2n[;|Q&a/]-0_3 __ m 7B|OL͋^)V9uiVYzm1W^1oj?N(BZkgT=O? | %˴s>&VpJA7|(+3|]pE+W0^76i p~oX ;`!S ysa[r^rQ.l} SѻQ61+C_@/zHڣEEnɞTהMH,*{;GKluȜoOȹtL^q 2C'w` - crNy5<P˧p EZ>l[P`|m/a@ۡϩQPr1`/bDž#Л2B2;=z@S;=`YmRgCa/F?K/!~p嬂~FaL>q 'g~~(3AqC2=!Bl6yCg_w}x,A/wViۗOͿ!Τjۇxq/ln7tTiFm\QơzSCΫ׵FOir_0Gfkzj밋dRGnj u%hy 0TVep ?6*xcq2jy35-Wx84{ۇF %J8碱8B9 ('fY $:v֩ᶦ N3t3q+C(J<oLUbwur`>jQK;{J< J| ~ZwhOu?u8~HY2EX rrĹ?u qnNsձ&Xǯ:Gׄy~;U8gt--}p}n0FɖamU42D  }}=ȮLߡE=` 5#GAތrP5bLNB Kl~N؂ə$XP`Fk҄nߡ<hx=,"_HOhQeYXud6-Wygp~fu|\/tcR#% K 7P];\< ~';9Y5,yj7dZ89 g <7ԏh>!GfUה'vJ u7xYSf^({ 3\v~`Re|0/P'1KǴӡ:CMTxI8VK3[%bQ99+lft5`.aAҚN!$\[8N|Lk#:ڋ'LШEnۖhPɻ0CC`nZ蠔1z;wlY*,&Nf%i&(uV` oÖtZZ`􊠖nfGB3l]3Zo$-FZZXK(xSX0;f'Iz@E%h+[?Uc^Vc+,1Yp v̞$ 36XoYlW|piZՎ>Վ:u9YrS%6T99QQ-/1KuhCdž ggW}SQV@KX&<r|VdiE_V0tӱ:¦xKdwHg7Bbv੣+x~>q ͏=Z;)M*778~A$ q2+,G҉YVA_U谈ub|*x1~f~ok㪻uLc wv'8r^dxڻB!6yt5867aku,zX#[x ODpIRSy E0l0+'?$tǟUM~.AIYG?`Fkg_SZ+uKKNkǑ456HPw'ımDȅag0ZoǕnxz'DC:3ތvoCUYH6&C%o*]T 89}Wiv,bϫ^unOڦ\8sJSiS<ٷ]媊ߓ7&weY#Uef:v1iMB#Fs/lpsL󮧶u --~jԱrca*.;HD >6B~!u3FN㤁M|?͓"pŔQjiO8@T؊LLu?U:u>u$FJTWrނ'>p /޲#pG} 4 i zZn=8U؎TfaϨhKҞ[Di!$e]-UX-OSB Yӷv%v$__x z9@db+=%f:1+=+^A֙O&)`ybyGgDaUΈؓZƟzb8i:N:_waa,gG zT<GA'xG hEji`K[h /qHX=u—V'UN=i` 1֜j9O;G]U; 1iSuC8VouxJ GZX񊿉n#'L2-Me]% ä~X{PfoL{8$t!+6K-$5«Yi< <*J({wº ;f !ZD !^b%Jvι؃}"I-YZw tu$Żj͗m3\CpW85 E7k0"1Kt>~z56Cl"Z7G+ lB."o9}GsqԲK* ^ʪT Ale2X*px۸['p+& `bwly.tB"kKHzL)xenxTƬvEWρW؄?L |o{՘xyseu,YTh]{ݡC>% CwJ~0 Cj%ppO.5tn |s<K, 6ChKH DX @g* <\#Aqg{5>Q x4nlvAonz;V;c%x %G\9 'sȃҠ~R E cVN,O݇o60fţB@8$ 4qUq G8%lEXoٌ)Q?^Q&x>(~ιx:P@9~1áHz^:U^"K5w.+b6D!Y_U1S~ v~(VĘR0f1@ *AJO<=zBCOL5dvE_w#֫9Lqlr}&u^EfGy|,?^qVwMl^&ثÂf7rܰċ^n}{aitbw-NJB6 CeܣrgǪ#ѵK82B|{V sN[kByJ">qK%f; )A>ϩf'b=lAۓ&lK8^rNjCm|c*zKi :lO6[]=7)P]s9m2m271K4>i4L&b ZZ!q26&1[ 4> s&>qʤؒ8f ؟XPb-WS1K%IǮ;R-Qo404 Jt,A2| ehީA'ej0K@X^Fޠ2ܑEL^[hxg'@t^(3y:Yjfqo4z9K#GhoeIHtǹKGu~׭s -/.9о/9 /Ncy_ݵұzhAi[?]ԳK8R_j?$SNõ=2-){]jdtu :(ly!c|28>Tpgqӱo7a֭[n&=(d+KsY3,F F#A]dd$T[y=dsީn8}kLFcMpQ w|PEJwݞ#֩- )sJ[,,Qi8NR{Ϗ5?-itL:M~-o9.GcLisꨰ}QPuGmpwXhcO<%NN b5:}%6\2c1o;y\Z.!.)-a,J)$ Hn$}nOQ"DJ[n(I:-lp\c{N4KZ #Q_\4/آ9fʔ)efӵw0`ĚvX%JX>LE3NY&̬Rp2>p٘Ҟsܤ={peq YD8]nn(wE˃ew>ڙձڱ6ȺG!9\@7[8/'u[;`w09!f EhJ{wcOr:p:VKO{ =vo(E-㽡wKa;POow:}YuȪ1bħe,{^NwtrI,۞ЄfcE J8G VNyD.9cѪ'M>bh鐫S4tKq"/=f@Y]jkTs hfAR-M\2 .qj?6T 8sH!1 a#AM%GXEn;$IXs==K887򟐃ekbӤԭ GHD SM 0<11ave{&;=p z1%Ŭk=o/ɥ(wvZ?J'48ZDIm)IQF%-J0Vh$V6i^o7Xy\iGbe;O`zzIYc 뜄mYmҿ׬Sѿ* ɷzru bV=DI<8ǾwS%2HŠIºY]%ϸyPJcP Fڄ[rm!so^thO lQm:ݏC!2r 0 S琜4%1zVЦS'eJ%n'0f]*{X82;ŏո)`)jp=Xw1{9L.t.Ʉi'1 ۄ1KtGq3!fqȴSlx7=ysd<*8)m^zҵLSVIYVk{|1ǴOai ^!OK=SnWa햛9L!0!whxٍhOUnYulOe`XoI<<n*1"WXXFopdA4?ON![4ۑۇk6LvlyeJRμaĭI%pޖLL^&-fpJepnKxc׫DPzn !b:V$~x{oggIez{ᴹ#{Op%2Hh#?H&C5~W}Ѝih6un&@+i2Ms\7a\:*l|r͔< V*o?Jvzmő ~z\L7iYj652kt󹱗#tn4Tb6XD㒜 \23U3mFo_ W$I?]guK/a@UW$mWImiFo&C*/R/tK6; |E/XtR]=c"M3"/̴L.pxC Ux{;_8ֈ$~3,nf>4 r2qVÓ3\h^96df3\}ŒY܍|!w1Hsɼ9)m ƝzZ]v1+E)7nɃ.nGܗ ظ Jܗh߫I[rugBY*]W:X.S]7,yyI^HgKw3mh/nȝb7NG0ϼwe0/gص\O<@R/3p.θ 9Ǿy;UZ?p۬ mJvaSr` oЙx눛zqqfʝ=0cW}smYG璳R!w"z\epbQyM̿ӼŪ훍_W6Ӗ^}3c oz00WK ҾpvTld Kzn朼#ozg]:'c^f\ptG>6u3@+!enۤLܸ៌7*\&烼K@D N<՜ͮo;4ɯq2pU3~T Ƚ}̚yߍǗLvژ7#^[hf],Y&aW2+5ë;Eu7e1]Ͻ?yѫww=|0Ee+tܵ*鄭8m.ɭNdeYJ'; 77;PSSi3lNԚ].t04_ ikoΘJWImJ_{G#?6گqҕ$ WUR.Xy\%г>}u dh $KKJؙ ]";3/Mܲb _g7rZk|cBڟu==1[歷Nma\dF;KEnR7;Lxr! r| r}E=w|$Ѱk|=MnT8q?q0JTXȐ%GhԚ_+[1"NiO K-?<:1K[l^*aK$n[[GK#4f=l x# ux $_:fZ i̒=70KT> %xO1i>ʘA[wfD@Gpp/Y%1{(B%j Ɛ,ՁY(\RxI}\`nӗT& 䶷,aKP%Y/Gh[@OT.KsS3<><˚QœN _]׏:_uxHw!(PIf8v$ьQE^ csoI%z%]'h,19M0.:<̬^ªcs%pc{I\c'8ؓ:i,@$}[=?0Dz[]8Yt9m0`VGjd@FI(8€Q0ujY&:z4,Jb2!<4,# `TO$GPFtCu/o 8dJv6sB%&Lpwra Q \\/K'`qzp"DJcwJɉ)eOJ;-#6F}@G,tk?-ݿx5sہiL:~RjC5h,-u\aҿe#(ɎY0Ô-:K:;S)Vrvz7rU0fv,?k HE%K4?o{Y'-٫կoIpl4#-*LeF0Tp-3:^2oa$U!܃hYJ?u$hX`R%z;8fW'É"4IU u3dH0%J̀̉K l&Drݚxm?2Z^xݿn'g<xlH4~ČYzV 5m|=йEB/&!=q eB'c;Ʌ}j5|5~M'2nYqqEqSR"u3v1Tyop_D>#IE*lԡs@[qJxAJGPzW S|ɛ=9-OJ,e୤w# Н%X+ʞXR&Sǖ=qgG'DG-: 8m]-G N}8F}rR!Z ;k;{xF FkF{ڃEmA~žw:dbw#n~RGMȝX)ɣc`R!w7IYt&WT!Ά[kfݢ_"8YwS widR"wi1~y3e:xp:k].e<|B|M|8bziW0I<8ªEYoN> _uL:G+ rrad/:s3`Dx~'d%fY]xe?L$G, V-u|,.ŽLa|"nKH"`O6JTܕД>2Jsb^x55 LcT&%:u”KC8!]_kJLhDFlމlO$]p t'T" O"H-Zڛi E) %(vaÞX%r=؞|2ch,GjIv;xH}/š/L0]B͟ 'k[J D'ۑqzW. e"bfb&,ɑ6%_|&ŏrȼOӗCR=)G!UHۣx iz4Ť3OCS^w%I0sqB&U@jO8R./+Y*?pb>gQgOڱ!/xEN#4njj۱ڱY0#YXKaiοq|IMdq#]qJㄒ`0)E^yn9NLwUepj̠ϟr V`Aݔu\9wCV(oFuE½^:f%{V2򀲅|~I_1ڲvd,O8(ݹBU`\!d&o~Y)~&(W^zwkq3I(g į*bˀ8C8WqsZW!ixho݅V+{MNW>򝮜aLqIm-wKǾ +l6ݰy.[7ی{u6oso̩^m-9of7rjwg?lvtO>oxJ{ߜt{{wl4Zބ>99&8X JUkje7ьe;[>Z0-9j]+jb.jڴf. |rw%wv,== Fns96oW!X5ŀ->fͽWq *Ḡ&VPVF~u'4Q]}k _]+T39]2FN|M /*j]&ox;3Fe|]aK F~چn4$=9GDnM!0d9W#{f+fZNeݘ=71lu=y/VB'4 խ% ]7MjA|M^g$k \S*@n.dl0z0pȭܝ/ ӹݥ nF~u4'Xa. `le Tk0+I\Bk#j=ǸEP]Fe%_)Xn7C1t02J0vgi _ i&!r.hv1õZ & o9@}ߗ,"_8>ܩ; 契Pbn{I) erQdzyl5~%64h`o9kU0XWT;v'|3`NuY3oa8nlfFok42c󃡉fDZ<8آEa8GU Lp$aEmhH.xsy Ge: F1ڠ@p#^SNVtt"^N֮f Y^^5WNP^b8@1Y܉@Ga;ME 3n ^U.C-U5({." >::a[\)&/=#KA_G+~>K a-9أͿy-TX`dHG[f70!,)!G|'OX=FN7ƦzS$9ݟ,[z}Ewg";qh<gE kd"gVL`}ڸ\[l]?B#-3b`ߩWkUe7woop2VwC?K9D?(z}aoaE 3춯M_ rqiJ7K#hQ.mMk;"^60`A3ܟÖ<8[!zTνkcq[@y5{е=;~$lm:|3"q,7Zt 縭歅Sfn{x;'ꜩxNXomOq gCas5^ѠP{5FPgnPvu>ȽI~WZW.\Fu-[zmZ;]msc##H_ԛI$%t3zǴEb{tL&]k_ߵ]œ&}9oI ʘ[y`H\O_蓁 _w1*W{vI4YsNm SK6rӞQ_\&cDvTZ^Dt97 y-G1"V><*JN-ŷm&Atxkc T-҄8J8xt߳FIuXO[\9HG3_( B <>GW\~Pm*|O1;mY.XY /wx;,w w .:l'b1z X AlF='h!# ?0G#.fQ֞qx.؃(8ޗb,ʶC{Cݖ@;z}{,PG zG C mޔXn487j3H+}Ŋ/B"C$e^.`|)E&\myNVAʤL N%lRw'WDxPW];cwUq6A#0O61H]-a3O2c'0P2H:NwMc`,YUowa}3]"`n;nWr#šٓ5q1K8ь7uVaO<.D u F:F;h1x?eTr8_+j^Oa"y° M}jaD8Lg"X|J, IʴmWQۧ_q[|h#C^.LOf,p >S3CXyl>ENΌk]@%f ZM#@pwጅ*cٓ3 nםo,NJ roUB k XjXVy MjZ"+G5J&Ep^Zg$Х&5/\qïxjUmxr%q]@se4;{/HM6p.Sf7^E3mbqwHW`MQX"pcqx:k^Sv ]팪qJ7XeJ~3;Sfc͍P$ī·$x:vɕ<ivhc u fаuL1j$c ^jc :^o {pX K!~~&D[hAhySJXwLMOY6gfd,ca7o`A!O<p8>:FC~ /~ ?X5 O8cTn%Q+gC 7'XOű2X&Ξ^)Z)/VO=-b7ugv㒥#aԈ*yD Uf_f2Z%[ g~c5p մ}[Xf665;.1gOmsl=3Mvqˍ} 5;#?+/ꦲn÷6E(2nG{{BpJ]Wƽ8"kBF僂a=j1v:(WT2d14\ᇷ(VlQ^X>U.-S@"EځWt>trUPƴrⓎK6z=OZQg[yļ_>ws\G+\9Ʃ&.vQ :.Se}Rsx.bXkn%O T“"Q1@ ᚧ=T4kx9Z5Mt:<9q/<= oI,Ƽ:u8䁝b%qm"qNƘ>#O {L9>}Qg;KHsasA!Yib;a^66*7hS3Iz=O\aL9c?lif σgoDT+GAW&l Dx~1:Ex~mjk>c.=`9K64"-l9 H|WhC*.+&-Mi1{N+Ϻt'5r͔ce@"}ҍ&_jwER| 'Ʊ+ 7)D.>-?G8>0/4#S"+ =#RflDY!qu-(jpCvsa^8=wIT!̠>D =02S`"'#N{bβW̘\1 } 8ueC{=O~Ǎ:itZAe{^1@Ќ8W4G^!Bn-&}^Cgћ$&t!C>1>EɼK 1%b ӲE~aou7ym.f>u;E_98czG.ɕK7D9=K_\oOxo<<{wP:tʳ\G,ٕx<(t:i .ORxsJQO,1,3(:vޅ,vuc(J``VQhHI$<3Hxx_Y"k-c$7CV0 y$'I$—4t+s/([IY<'v?m~>rhqC)+uGF;GYxQaHcƸo#&:<~&vvU-O!dc_ƞmbb%H=D=Ԯ{?nc~x0m7u Υ34瞆nRpϪu`]1SК#_ݔG34 m@Elз90~gÕu8;/n~RۅgRa|xSa11^' ~pV'!  QL<*d*^lBog v%.`$DyaCq OpGl v.M)@_a BQK:Ŏe-wX%%ZuY?6_p 8Ƽ.\3s"Tmx¸"ؕ?x64uMF0'%<؍wǔX<Ԡ7yM qC&C9:Sk<8mS}zRnB#D)yTl119Gx}vmaqiiU:@b$D9N 1@3o (^ooZlcm2C6]06W*3B#D%_a TIXHACݜ+ \NW N+Yx@;=ᔍX/]شtaI5fzIr4rL{쩮>{Riy[Xmѫycȷn|nx>sK"L_";\> xӨa7WCSp+G$h]"vM~s'5#"j*eNW^uI p6D9S FUS.\4Mت5ƚTU.;ӲF_zށ-E#kŘWP Gg1_O=#+)83!*|K ʛBX.X~W< JR \՞+<=Ɍ'u%0m;u|NCl k}w~HmhsƟg9d{|Bg'Rb1^'bMe|Xʂ_+R~ψɼXs+%VWsbu:`9͕`&ͬXQ&EVa!-^{Om kżSn{;1/;Xp0F]B\a!cm"bLm_3}/)d;"dk۽9@#PFL|xnxBGDɋ1{NuN8okbKolOḎ3Ӻv ~ w>e{U>9z^o/C>?=}od/\. a{|l?\e0um7L?{}b5]{uB': 1\ $~Ihq_HQ_x5$7¯A4#>cv{Sy_;{_(ETo#};b~ i+ntIx=I!6r?kt#ex9tsT|̂H? ͫ_wчc"\5ADk_}w||qߦ s|L53/^epik:[u|$P7TߟF~0[&|e/yz_bZ;##b˳b3v/ I_ C%-ЉӴٌaZ=L'-=j]=]w9%oR_W5)c>>vዛm?uJe||R"Imdg?v"|57|<7 %.??kQj$J!0 a2ZIexɟ \7q|oAOYIfx΋^3ܬY('c7ܺq %d9HY7MqpXYHQPxÈΑ>LQAaF$X7 Ymk7v2l(shrӔ@Nǎ@&n8E'Ѝ)rpn;1BᅄGXbˀm&s`9q(a[ԯ\_?xU7 >q_dSNvg5[&v0LFbG!6zck oc'2ONdfhI(QX?.Ag,Y?_P?vzg 8ʫ>,RE 'TUяz=v»~ 3|2*Ԣ_\$a(G},,}T_ R4Ͽr`A.- +L4&So6ArLA2oXd9zw94AOd//k"N/F]Չ0MBCQ' ̧uTw9'~ þ7o9b~9oNXD:t1~dDqlH22#iP--۸?<cGv|?p?pw:Ħhޅ=a`@}Ahؼ805#GR }!_,,Gg?`,l0nKPau``wމs"C  kQOrĴr?\ߊ4ޯ`7_?lO;er}zcCFX\*YtG W! ÝGEpSH48c03P ±()RX_iL}?8?࣐~姃oqy|0ߍQ9IAPVW9;|6aOLCʾsK,gwmn9P|} LCͥeP"Yfpu#LFq\8B'AS5N;)?~~O:`{s0iA,B3̰P9BW(Y3a_R8N=_7.|c7a+EQq<,A?#TA˿A0sz0\|>ta=ܑmAIj(_ $ /"7,HA_D/lY'n _1 l7 N 6r“3DNN" *;7F$ZiqSD+70.ᅭ%WL0c=wZI+~U~ z$|T`9agQ|p'd9<ɲ=Opr'qqS\ NIb8~<>B0l3U0ǿvAEʏgh0Rg0u&G\ 2%~>VD+S̉΁!<vsS-˯uDdw2r䒭1ģq#,A#s:.g"D|{v45c{j;%gM+ۓL7}D@" pB~}p{(Hr} kc> nS a $=U.[ԧf3 t}|T!u ޝl,mRy%V}cĝy{cBƄ.<&ySx=Vh9 a 'Ga|JW WuP.b|O8|Q|fh^(zӷDLbpcݹceoFrZg1`p7#YAN','hqۇvGp 6[i!&t\- 7>06 4n״&i tO0/!?))[O9/cLcc$'7dfy<GyG5sI{/䮋QBa+b^LY ~U>3(POO+JK0"e~P3gpVJLl>ݣ pfe etQrЩ|Kdׇ\m$cR))EF#IAc15ijļb.Mw/fz3>'UZ]1Jf6 +W0Uc<uƯ6 NEof PbTe S^2#d Yc}_WQٺ+Lcee dmOHSBWOSeW&ݝmx[ޗ+Z͕JM\rmE_*lS#}A2˭C]z!^W]VW ܣȌ L +pM&̬qY-+=~Mk)浌-k_-U?S[V jQfR[?-W¿EmWS[rԖIeԖa{n) bO?AݾQz"K\[맞3 FOW}MPoBSe̵pӇ ۨ sxl0ϧړ_"pOdfBtOC.S6W&ipkoܻ# 7_F'%??x1"qӇFSb$z<1K&Zߩ\/_Ngdtd=ߖ?u|e[雙:R ~~vW$>NIp;O~?.ekpFO[dYԺOVtQsƑy0" Uà*+<R`l 0F憣xA9a:{X"C4h d9¸*@x:>Y^'=0̈~Uz+DsHR6qClvU)EꛚR:CNpdyVaEFa4M0Ma182C؜8bo9<"y>ظЍ4^}s =hO25 (MzQ:!cjC5  d9m;i;i؂.G<)PC$x?Fd$Y?~׹1 ?><_Ӻt?} f @'~ Y?P6j3zD[tO7fS1a2~h`%d9l;E1(ox=tN]^Î{·)^EC06 f}o䲾OFT$w,(e<\wT(HwHrS{m  \t(h}pPۆŸ o؇̸b@-7/F,<%F5aVpcCcˋ"={֏89[ر?1v*#o<`Cgp"dkXϮY?g ~l?*GBc}19i@x?_Eʱ"rk!n(8oB#"zzG#F20`*'L'>WG|3d6CE |mqF?SWܿQ~Gڟ |P]^0\fԐ@fƏXBz2~fD3Dlgz1N{' qR,_|,P9)nLhPrzT:`&eƲk(:uۉ Ӏ@OQ<sA)!OEFCˏnC_{ zߪZ"@ v|A˧ x} I7NI/=Wp~+*#:B" 'o`_ ABft}CP\NqqAQ9aK`j5' NT7vOƈ8Vid;("too/Gc7p|2/z]0tDld uA7 # 5?΄k/py_HyE weƗ_p'd-_Urlp?:dˏ⁣%#EyD0q|p\cOon1##pu~wd F DXÅ]T $E*/ LȒxo凅FB_ 竀C( *{l0Ewa3䆿aaȗi 6)4:Cc~)=i|!|KrX)ww9t`0PaT(j q. Fۛ ˑ4rWW7B!rA %Sq{ZEG(=I$(YɑD<_V̙`f<_{}go9\̘Oy1O2&cJ(I%d9aIT_(L ^8;0@o~o=JpEnd !` FˇꞮO'EO_j,]E! oruhƇ3>q] {|2mhX8r<@ىļ~Sb/CqF^؇d<.k}G#,,WoEA+{;>cs#o}csHP3`\U\?܅qS8&㕟:K,Q! .%Kmq_-3-ˆW eq: :boc_m9e0B8gbd46}m/-T:KщظyˋUJ-=-pw?bҴo,-9iGBҭ[;HR1g>^<1Suc+K t\^7Vme(I6x0QgS !M\7HQsC)+Rmm\:*a앿yZg_PEӨ/1yEq-i]FSAmPGԧ۰OҫEmE9cF9EQs轈˧]>i51v%n\ kpY[vdXdAƌT<<os ,9>z/#:V:7ϑtʛ'$~ǻ1Y^GEg?,M?1nW0 xOW,MNrW7))Ԑ(4S(0EҶ96l12LE^x^/Hdg(|1|)鋺Τ\P!qm`.eي)w2_mk޼m6n6<]*3W{k&}K{߷OS_neۑ]u0'WָJmR: IX24_&|]'.Cae.} 'R2eTY̨8UBE _k!6B*궀%luR_6y}rOɟ2#Ppqzގ#=ʌ*x1^FɨjB&+TL/ȹof3B,;7y讥%͇LuS^BCҗ>颢G+\٣'+\Y#Xix$+\\~rSCԹGE439{O7O;_1 mLXRmL6_mh-(Z8&k_Ͼ(ysZ: 91Ufx2nhuvjO+S 31V3JeI 3:8j7c^A!ckv Uo xlg1ybWQMfI㡼壍wNa۰/SA۽n7AmqrX6>5e~44BQϬCRxZZ5-w,VcaM׳Mɞm=fA1Osh%ͳP=FAYc,91~] >icҏV2n#-qp(t]F1vO[oޥ=HZyy-EHj'LCNo#r%%nyEil#Bh,Nճ_Ҏ:԰vfw'AW'Zԑ);g>|Z0S&Cej+v:nx\k9g1p٤+:>};D,v+YLz:XOwr3[NdQHJzWLWFc+Rj9v=fs0|3f:SW2tFO~aQaf_07f5ni` dԂfv(.Ì`2&  :rR+nȕoIQ{#˽S|Ɖ{Gj37oD_s y燠՛R4$]!oj|D_+[mvO=\8=W3?sC%vdi@>;~ap|aԐv>jW1lU2W@)~UsDx}"dӨ=‰B`v&eOKY:'z5^jil}#ڿZ˪8e5L. =Wr+o;~><~<'2:~َvi>g23ߩá $tng{i39G={6i?Mڇ ߪϓ־T*ߎ|ÿ|3|>ߖJ:WIL< //_b?Mo2|Zk+diR>VC.37edƁQ92OQ/Z^X[ ?JXA/WtdLnzuZH>W?Ve_&їQvDxg[LFdH5wT"]ċXUݬVDwh?K Ooh~kaB%aQ9ᱥ1u%-y 3naUE0d~|0EWW=XS= qY@ B*&'a ,,/(.lmbUM@Vv+G UO=*EפY+7[lƾWB#ҕ9â0+~E¶WlY_ 47j%dg,:s~[Wh+ z|7n; fbNBς!0D6҉ m 3YUB^I<~6?c-N s%=r47ϋ9o.s#El/ Kr?.'IsbaY'[/Šp7 q`!d9ER91pCU묿ntT(ShX I_|@Ir1Hz8~AO8<>D*XRA\z>9韱~+YQoAm!F-xyoC!''d9|mҟQ:%ME}x50~;eϤ6!p?!BFa aKBCh\LE@&/b2ĆC$w/>++>  Mra;0# EoܟS0c–z_A}o?^Ȫzz~\{`μF YPQ)v2/:<=x߳0cA=|3$&}py{?̆ϽaWpEϛ<Ƨ]zs293p1ʭ߱OU 1@47+P#WƠäZ#wt6x~ S5/' S5 f]AME卸.a[n*|}b3C-:_7|#D!_λ~MjrL[ q Μ >ϯ9?UR}[L?gې8pBS—|; ?'߶@.cc-o897?/P5\YvZg~~?XˣC_O``6fYrpgB Czc9}R ҆C<ϧ$"x}{_̟%u Cp)1]|]M]̕cѾ(&d9Mr^H1U|[ѿJ\l2i+BU|ڑUӤo&d> oOn|\v9lC 76Cg2Bn.L?Y%3wp՟|1يU8kCg,#><dA {h8`!1 ҵe,9y18ư^+,d9T9z¹}կ~;P ˇ {>ЌϪofrDGwo 39b|O"=%M尯O-\*p^i`Fo`b()ү k@Rw:'((xx R7|/&@~tfMܬ*?z ;&lf;ǒ _ԯԟ 峘G qK,?0G3ӟ@:ѿe4S|s,S J'Y__/,u✶ Cle@I@>e! +Qj*=*3]eEO"[ Qzۧ`#-x7S>05#H0hx<Ư. ǯ?ùUdBңn\L*So8ҏ囎QExwA!XWBwW#aMJ?,\E),w|\d? S'ӓԿLw~U_`a~u\{7}dJt08~5"*B0bKI֌!r(D֣[ڟqԨ^-_ք|Ҝ !>,i{e;.~ihzO:Id\]) }>lOx۷T8^)A7۷@6@n!Yw3.'i? N!_'8ej@h?,HNhJE(̝M7=[90ӳ w(Cr$fV'Oe\,?Qh-0-A#Vx9Q!/\䑓 7ܶ # Yϻ5}#ܶyu}\~<Xb8X"ܢmY"_)2}q [Gd*֖\^ TK4'˙7ӿ3}|GϜ/wyǓBozudvNx ? Oʯ\Ow檉 }ωr'gl)rS?G{ɯ_ʯw+CG|*'W?oǫ|+O(>а^<2gݔo\`WCy>~PZJ Jx J􂆙^0& {緾r–YiV*;3,Jcس-BRY׮_|Un/X Y5V%,{\e}~GK:1]hFl?[UEx~>?Ƞp-29=ag ؙ^1,OR&?jܿ!#c&'R^׿)oO0B|X#KQd94?NhrEw2I3_}_C3נ"d9FD9:XнY}d9 =m1dyRZ3䓜^_~E>#/)B*A_dd/b2s guRNx "Z<$'>!lҀ-cQLGlO T9}\r.Ű_4,ƱE\BcFp\ No035ņ>] N^#% =ghdynw™ N?*I qߧtd_ތO A +K5Nk00ӣ_G}wWy(70Ҽ7d_[jΔ|>q?O۳\Bg]P|GyPߧ7ǃ?T/?,Op N|gj_s~[? lb>ۯg}#$c:%p|lכG׀@cq dk8A:`F G84x|xfz),`q#/O"3p OYstdj]_}u߻mQ0Ww9$a`F_38qrL阰^GymL`CVi5޹QuxoyW>-5 F{iDwa[bqHmK3%N%y]-$)aϸ4 {6]4>>h_~0RJ=GfCz8ƼIϦLJO!SR,3cS s 1.ijUEY>a^e![%o ~,>@E7]1"4MW+KQY)v?ӧ=q^flCv?8L%mgao& ~TaJ58S } |XyRVd.jCIjgN5SC=MT$]tW)W]j_ c2YuOa &nNuZ: SߢWij wEQ"AX⫶5.D057n0[ PZ"<=*#UW¼7 * >^:%$b/onb:|/}1EU41 RYxzzF:ãv>2Gmpkʠ@x Z}oX`A7+\ϸ-R@d/\ `/Ү)+AI0}q\ ũ<{88f O{i,惒:\n"T7J˱ί`Y@m6B8q~ɻÿ&+lft{ynq;8Cigqn\guY=n'{:Xa+($xl]H7족wbI{wģɭz~s}/fHϰVG~žA{3q34þgN\VZ$= Β/t}/~Z2qqҌ &ӌ'4-m,q_m,m lÆFsxZlY4[KK҆SÝ| rs^13!=.qj.S>2͸>^i-;xqOHʐow\iq_"1dq? m1ʻ DFWԹ:E8b. !)C'=M'y}:WaЯ;xar^arL'm''?"<"R tJUUuul*_wګ6+kCo6+ iƷNI_̖uݩʤX.9ϕ }|aY}\mO )T,Q⑋\2E2b6'9%_뛉3$A"U8x>7grQE ~_%l~Sךp,u[ؽ?Z1 _v#xc!Dz:o|ڐZ{M2/ywi>NPcm0za| ?N&}/Nļ{4_ƊQG11JD^ޤHf8*F,e8ep,{N2RM͞I04F힀)mJC@{7Xg!qKlL)쉠7wI޾Yi4%h>?l^jc{~{E·f61m@Qwh!#7go L1QG&\y.h#o.¯ͥN z)A̧u`{*8BwV GÝOQ8b3xHc m0a|8Ϯrh&<8 ^GjH |~[Bcz ::G=LȓLٳd³ؙɼ8mһ]ѻ%ZO'.5v^2}!vU¯ܳЗ*yGm3,Gl;j1., OL3> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 279 0 obj << /Length 1193 /Filter /FlateDecode >> stream xڕWYoF~  0{pyHmZEzk iY5%"s-Evks7;cig*+]WYQD?'*M3Ka˭o gHL&"_id*uiMЛ*4Z^G녉[X{XX VsM^E⍎ RHDS-WxWvּw_pX|Yǯ"yzݑqs.no9Xc>9c :rL@j2UDž)rlǹ#UÄP ?/-f;dtY|Z: rYһ-ƣ)na!`T(ׇmU_k4 JxBnVVe]L<:=-NhzЪztAp$ 1~Lm7#DG32\פJg lQЖ%ЖEc<]؈k)g<QY7u3$ 1}7bBU.7ӄ%ǓiUy2ID`(y]Dn렂%{ff^NUIRQ5rK  ݭ;&G6e+]4@'8@<m-`95ki A:jD?1xawY YF0o0bR` 3LQ̾vN(i0 B~r҄7?Iܩ̖$ (KFᱚ*(~K>F<L̂Jm\nCkCB>f,jޢb/wظ-ve$6)<;Z#Fke\ Eb7~V$IAq%lG*(2y]bؔgcW}'A\NzkbIbV b E=謩`%lôcLLGsJl/vH(R uu\W+ >0]Lw4g 4 vÍsÿD10[;G$HpPqP@0]=<nx8cPZd23v&-=y`& JCIdž2`q0ʭ`pk+ endstream endobj 257 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpkG7Hnk/Rbuild688f6c73bb66/spatstat/vignettes/datasets-031.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 280 0 R /BBox [0 0 864 432] /Resources << /ProcSet [ /PDF /Text ] /Font << /F1 281 0 R/F3 282 0 R>> /ExtGState << >>/ColorSpace << /sRGB 283 0 R >>>> /Length 1856 /Filter /FlateDecode >> stream xYˎ\E W2]6D"LK,P($ɄL}q 1yrq]nI/w~OO?}YzTr)%]?'|<ؿl:H2Yyt~tN_}/6s|U%=M.pp?t~IY9~2^R&_}ϿŧO?N%=Izw'C~sjrL!auZ Ⱥ\%+I/y >Px^HpfϢIfBE!UaQ\yn|'=W[!Yʠ=U47- ._yx\.=aIפ|Qn>k?j1rlxny̤mwotb[?#^AuۧlVc82|8 S*ȨIϙx嚫 uǛ2'p oiL;\psJs T-Wlq>#8#Q;q󰦺ѳ8?셤#u;bT%GdMDk隷Eypc%ăǃF?<2S"@{=A1u*?6|'=˕1( !d؏cJe]tDXy>[#.-6+n"x<⪍\RB̝ Pfh@*l+-ZG68ؐ&fØ.g qhH<$ϡj H@]}vIIox{FoH)p|mJv+"vÊ0~vWؚw<Ю[dxM6 COais~ e'}SP@90˃iݠId⥈r1 $:cRe |)˦vO 5 1]w\>9Qۨn8t"}`mSn;d=~1oTmOwWсџx!(|+ԅ\|Onٞ{/, FQ1|pu]l࣊`$3`sH T]'@D κ_>`%+8+Hbho:ee,tt43y;z0:dxKX Kh$m}DҦyU #ABH@- ;EM\!XnSH;(8#pxCr-EP+hr\7X,vllEymW4<|$iY&l#%m XhZV<emz$Zaw+7Ca츍kyrt*xq$Sr] +{eҲ87G4m>L| V j_Dw-򆀡B3sN"d%UQ{V1$ғ{-P]ΉUTȵ{RME7Y94:#V|<뚏ppdωo(pvΏzSV%CL-p]osQjpcYgM!(nmTKlT}(0xE.Rÿu?B X7Q7P~Ľ;< rݞnˢ9ǯB9{TY>m5\nnNC53wF}X"T9qQ{#!64'yuVawk@wpVn Ҍda-(Wʫ, *:X endstream endobj 285 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 275 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpkG7Hnk/Rbuild688f6c73bb66/spatstat/vignettes/datasets-032.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 286 0 R /BBox [0 0 864 432] /Resources << /ProcSet [ /PDF /Text ] /Font << /F1 287 0 R/F2 288 0 R/F3 289 0 R>> /ExtGState << >>/ColorSpace << /sRGB 290 0 R >>>> /Length 11005 /Filter /FlateDecode >> stream x]ˮ,qܟ` ` xax!2 x]s(ւ ޞꪬȬx-޾_/o}y 㿿|ۯ5[,w KqV~O?߾_s:?~?ۏۗ?O_S%ؿGn d?=oy0o_px;?[.꽕[ >퇷V}7h6n){L 0s}[lxi|eኇp K̈-ׄ?pFXfoqጟ-O~}^(^9 m̀%>E O{Z3/kP{nPsҼg|oӷm~ܾe7_㽞̅tLIfXFl||{X}2~='|4WVĸͺc_;0Ը8\7xc!NO[֒Zs V/7~7M37>{Fl;_?3_γcaٺ&S7n~MLXyd:N[&wL_>` ׵pMk u_k=9u0 \\2ve /s^wZ|u0E{[Z/|mb}]+|5rچy*맯_apGEF-w/O:[)Mx+)Λh1omk\e^!ܹBWx2_Uzku,;S 6ӍYްn~YXq[[ug[F܆nS-a+% . _t#_ k_we,F1wܟ iwlZ:c#w"㣾^!>ZŖϾltK֒e1Vy̺4ĜO#n<pG7o;J%<4/0"m)ݦtڡ0+ږڎ^ZŢ[WvO1 &)~;۸"*Z rewvVxb?c!y[ f'ŴY[>vmaYnmiͳ!`&n'qUxv|.cg֘g:E$Un쌉fP.0!4IJ߰C뮡qÄHk`}|B9гٚ6n7w]^A:Xpi-QҶ2{\qy'ދ2ye'2@@}=oCxUR$;K s\;v8`'rI1mEBw!YVخ1H,I.|3=-k+]ֆϞ·92ky+OeMj]&3АtLko0k΃a˻(FEؾפ/qNVԸ;i4!o$kvB" -]ˆ{^fS!"tF,%F)$Gs̨u{m? b|Y)\koϙl|s+ >+znAw'R˜ǃw(4yEaw/Bx$JCT kP:#?ˆ'F>E)BĄyKĺ 0% XFm5Gn(".E~ 1x&ߊ9i$QXxg34"4*1^3rN+R`6yX7nfӭ_I a^b$Gq&v9r#xDf7G oxM֚.L#p܍-G/ӷƛ!JTdb#ޞ;E%26x\esE:ȱ[~,1Fǚ -.jAԀ ~ٙo+1ڣ9v~bk^z * rNښڱ:vbMAbz\w2OM]!nah;!O74'a]Qhc}Kzբ[vS ?/^<gJKvm2_[c&9+!مhHkP 33k~0XF^ax.{6s0k{JzcU7A=MT:^ &`õ{KtID&|]a8ӥ_Q’A{:tمhwJܩů2fQ{[;HɎ6JQC;?3u&V*6+&R:R#qEzZ=,Nd1=۔Q%95lI8&m2u r?ָiܯᇮHeʐĞ`,!tNkjb 1Y~iϖ>[:L3)50U9-iDZn2Ω5;sLXWLS9 O*V.pjYl:bֆ\c =[xrZxLp2* gbFXF$0XAl!2|n^N:^2'bˬ6ǣ?DžJ#®`E,"tqlͅ/p,&~MXXrƽll\ b'\_tW>FdH❠gdTTv@Xl3(øM85*w["CHb*qO ttQZ||=`AYg-# w *vԞP:Z\kP,+b>1@rؠ|ñPo e VY£LrPAj}dDwuHu 늮Ok~;D|=ϗPܙa"vwӱ͕ĹVUF(Z"DSk2o oxmFk#Վ & M| U)tVD[./%q,IK+J&k '"aJ.l2ʧXz9-wagӃϒ:#P# k.UF`mC%3-NV˂Qi[!;@I E?Yg`weWaYD| 6Te܂5 Jd 6å\Q֭M_ˑDyEȏ`C)tp%qgTދQ0gҠb9V<2x 4 #dHsu FQi$ŖrvkuWAb$p.<$rByP:nav̭:; MDxT na~G E<^6#Kvec"[ L!."Lв[u_[dxx'EndsEfRީV5nGj&N7uEA[j}I5S(p.CIѿj!D&"J\ϸUeM@&Dm(p4,X [zG:Sabpriܦ_H毳W])gs9F~%ꉶI؃Y"B%bV+pMl$ɱfR5[H-[H 4UMbO|T:FzecCZ@Ӷ+)ӠtaqZ1 3 窑ҭضtc$6 K4Upӣ}At$k4Pݦ\616K 0fluwME:!AQ4*.о`B-7+.t-ՑfYQs&/Zy^e®թ9:VB#mfxT6f{)?om`V8R@f5<fEQY*A06jv2i};F92fRDrִ:!āDEBaqDMtIFҠyZuiQbp%aY)+ S?aMdiTRhUԔ}l'-7 d bTU#%N5b}UraXgL쯗`h*;05+f yr1G)m!g/ <;G TuMFppTQKeu(MFu+N}-e) sQQa.W㡜.νv!.uT l έh'?X=V_#MИNBWQNlW+$vQ0aO# vRۖx>L{lEULX}^XəU=JuEh /☶N>Vur@?ZJuYҲ?=fa1毒Sp~V>( pPLXOR>tӧ@p_vL #L_e[CykaϰRI\ ù\Rʱc-:SmĚ&y#ݟu [2yAY$;LQ_SG4=QX&c804@e,]\v pK%wɡabEf.)LC5;$1 Ftq'4#7PgRq 2W:~(ͩAScOߝAi\^؅+[<$IsI[GQ4 ?@Ne4/xǺB&GUxz8f-+ձ9r0K (uڒЧUH6yB_T1,߁m`fM/ LDI pժ+9fntTxEݰ~bywaPXRbj 8=8֮ζlFaPl l̪Ҥuae g J<*?*iD]ḻ;5t\ͬWc'lhSf?EWgjڝfrؾ#="$Ԯt \ٷ>YZ~-x͝F7!Y5fOx+E|a=r҉kVm33QvrEbVΡX8c8kdBTMon[G..~7Ȫ) j4RTEp,ݘ Uc{==U@?.u6%*K;NᚺF2lr[/v0W!ZΚhyL5"h-#H\+[-$Z&0HY-GC@^ens2x^;/.\O3&"K0֧33AZD/hHIVsgp%X H$aHzddKa]Q!*zqP|7V`Hձ %|ht7=5V/&$tmvI[9Sl+a۪uR]ںcQ$-CǨmVNMBGa]"S[ZIk}*BD:b8r87Ǻ #vQ}4{ܔgtԶ.rR )Q)u0oIp1( a%-7( GLH耯d3z4 Cx0!#HBaG< /$>ӚE+{U܌xl);Iǫ>P:b G*nvTN5p]©.fnykiksK5GkaXLB0,㒒-!vRJj3;>F ˚gtk+1uc'2mW 30ق@0z`j0m;GnK8U4l(+Z.aNfCa3pCj/kuEWÉ&qc8 WW`;,2ْxdtp5ܬhѱ`K,}(K:[̛◠SjM񳊁77\<{zJ 5 =Z3RaT]{gYD =qU 5B@%WK|qw-f=x|{VFvLG5?]:X&V0~}̵v-8w<љ <z-S JޤY0ي|;Ę+tnXP M2*jGquƋQ,V,@B_ѷRtqt|h_sw͹5B(79z{iѽL8+&C0[:^;|˪EƏ- jR{ S dO߽F,k2ǕeM`r#18!ˏ:QARw'}"Qԍ=S&8TxIcXfVcOTkQ8l"$['T7 Wc4EOpJ(̱N o0'I7Qr= N5yq 1)@zy5D }tfDfrݴ c"7 e6 Hz7Y)b60ֳhT5賢=VNHF9U v-xwNmy9{B-Qa]u|Pw=@vnKda';q4>4 T*(+؛@4RZm!=ZHr51Kӟm0T@OאgWҢ6/NyWOǙɻ͐yn.lm%OE!b%԰qjޠ: ԸRt?9&B~J0onҥcl vu'赪1mI'Qe~ZgrkPIQ*smCvzTP uzNS8٩Uٱgwt<.9w[-63(ߧ9n io_\?OuSҪ,KPG疬C=*³_;iztᜮuBw~kVt<) h*nx q~ }*~t2 ^Z(GA0'Ta^tP`qaWR7u[jgb<4g>Q'{D\Vh+᷿}-ݏSxpLs~t_~#/H//ܨ endstream endobj 292 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 297 0 obj << /Length 746 /Filter /FlateDecode >> stream xڕTn@Wpɴi%H,'"E[Cg&0Om K`_U= 4L26pک(]X4+r$ӏmMX̥Ѫԥ S4TQȄ <<~?v jM !(vFQl D.Nrc5'۴GX=f~ӼZ*6;%N՚gN8 fb05?/Wg->:D) adl1g endstream endobj 276 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpkG7Hnk/Rbuild688f6c73bb66/spatstat/vignettes/datasets-034.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 298 0 R /BBox [0 0 864 432] /Resources << /ProcSet [ /PDF /Text ] /Font << /F1 299 0 R/F2 300 0 R/F3 301 0 R>> /ExtGState << >>/ColorSpace << /sRGB 302 0 R >>>> /Length 3750 /Filter /FlateDecode >> stream x\Md7ds򷷉)H'@I @{9vuGbWSÎ~>_}~|±~_&|?̏)O#8[;^^O/^:d6Z~#UsX=3]?ޭ|GFr7G8%p3u׷?_oY"kTuvA3ak&G;sGw%aph_0Yx/39:ǣsOrx~hptiZVk;SƏ3{fm3:Ɲdlnd3zmv4;:N,Ǚ=nL=ck=p[ZgG{N0PkX fl65>Ll4<͛?Tߪz׳&nF'x;.fnHӒ{98u鬎\w,L]E` SމiΧp 3p4_ꕋ^­qy2La8v\: : .bYTऊhbط `HuϠ)Aɺ+QBZ"g\$\GN]V^haFA a#^XSPh V9BgU9dݞ'L5KmJ9 8Dq ;Nn10ԇ.g*.ۃM4y6jq.-;PW_$:B͗|synrZ j82O~}U$cDCd?29S3˛/uQHYQ.nR1yo=Gm5<"AE+ʜarwԻk[>y\\͋/k \,O--ԝFS̱{XypM6%EP9xdO23doKt͗g]i]…{nKWOj\ e齧y'Xqܽb|axr7YFʒ:-o~<߼%0|2C#Wy7e\ @S n-ZTl7eN*'+ٺNEꝁCܕmN3lBuWn7e&>ȋs}}{c.7e6%s^H.14"˛)u!FJuKܕz2rl 4vGj.qSf 03AVWBKeaCܕZi ;h|7db[/cwi-*i;v=kL:b=]yV>w^==$DJ}fW;ଵ< sMYƾ?+Vx߮ђ꛸)uChgWklMMYNYخa@CqPMY+ƒ yHS'T+,ӎz'~w}ɄyUm&Lo  AOnY`x|,a\'>,9%@c3sYVy^/ ׈٫qvi m54&? 0((2^cEqㄢib]_PtE1 ײZs}SdyZb5anM;.I_OH"reQIK-/q0L ](dg(A̘sO{h4p5@ȓOĨM7i0īBlӵӾ˻²Z?QNpQM'u^?1*̙ $YR7|1~G]_O3E`Q%^lFR]_O(1FeICH&%?o-50K%w>Pl ~d=vy!)v?6{]_Oh|MoL쒟! V*Ċ H0uy]AߏԷF߬ * 篗 ["~oƮ/31fv%?黿mѰ'o6M,2+O'[_y|7˗|?+j-X .,}o{O췺|7k endstream endobj 304 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 293 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpkG7Hnk/Rbuild688f6c73bb66/spatstat/vignettes/datasets-035.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 305 0 R /BBox [0 0 864 432] /Resources << /ProcSet [ /PDF /Text ] /Font << /F2 306 0 R/F3 307 0 R>> /ExtGState << >>/ColorSpace << /sRGB 308 0 R >>>> /Length 1904832 /Filter /FlateDecode >> stream xO-my}wyΩ-@tK d2d7_ w#ZUW\c?+_oǿ_o_o><~y~ǿ˿ۿ_?7o}02ۯ_??o?>L?Ɵ_?ן{<x W_o6oo?>?ۯyϿ??~}'?oo\8Os<﷿>~~=:?O߾_Oχ篷7w٣!)7{^<;x2ǯr<;S)wr<;c?~'O=|2:f>>|2:n{OF>c>|2:d?t'O]y''Q5z~|Gē}x￾x7҉?x~}}Ӊ罕(~z>Gq,~?y_/;xON<ɸFO%'kd?<޷ɸFO}{=FO5z?<ޣ=>d<ޏOY]o;dt>t'?Ydt>gՓw;do<޻)Ϫ'OF*)Ϫ'OFʳ8XUOf?Ydv<~˳`yV=*gՓwq8].~o@GqQ|ϕr<;S)~}߿-i?1)i7x@x6:>~|1߭)i?|?4E͏c°hj~45?~=|ϦgSlj~65{_Mrfyyy]>my^rGj{+ʕV>y|+W[^|reO ~)WjBj,J(ww\eo}soo_o<{{={?З{{=zo'|1?CAv=ޟ:8tOَij~_Nv̢wS>fj 棩hj>~}c5M͟OS}xijOh5.ji"ji"j>>D<>D<|B}xB}z.z@<T'Nȭr|&9v9t@n'o_? wr? w߾>@Alj~65_MͯWSnj~75M棩hj>棩hj45?M͟揧Sh55! Ҡ&ǘF;<51c5s*Ǵ59Ʒ}1c5j><61c)jsOQܰ&9`Qhj~x61?MϦtގ)jj~yJEwSm5x6: 'Oo=r@;^p@nSBo:Z8 w_qrQ{@ά[ $nj~75n|45MGSߏ?1?M͟揿?|]|BV ZZ'Z'E'Z'Ƿ>柢Z'Ƿ{,j~45?MuߪMϦgSjj~55_MͯwSnj~75棩hj>棩ij45?;Ǐ'2O± =!GJX~cȑZ91}|=>GJk[nr-O;~r{혜vz)ʱc45GShj~65?MϦgSjj~55_Mͯg\Xnj~75ߞqy~45MGS|x?M͟OS3.E'Oh5ЃOh5j>ފOh53.Oh5j>ދOZ'Oh5OZ'Oh55?%!yfH?JEEґ<`Q=JJ^%&yf{ z=JQE(Q5KVE(e5K[Q,j%0y@StkrNv@G;)Ogn 58nPav 82},t~[:2Ұ4lwmخڰݴ~4l ۇAAenA݉A݇nBO0:0:FFF{3 3 3 3 3)s;*s;*s{0enAenAenAenAenAenAenAenAenAenAenRɘug:w؞-O1*`JaqݙgKEug:w؞- aG9ҹavPavP܃)G8*;*{0AsUøL2;lƵer`J2;kƵerwVaQ av<FFF# LL理A&wA&w0:0:0;0;0;XqL<f3݃g`y1;6*;*;l;젒젒젒젒at@Zn>H=t@n }tr7p NRJ'wOa tr7f:{BJ5 ܇On`QS%fykqrfyuqr7Y>m;XN } cr7pmcrfr7W6'T-/W6=¹R87@ Vpn>kk@_+ P@_+c'l2Vum3Ĥ ˤn/^&uOT^/^&u}2{B%un` ݄I}: V=V gROWLx&uI@ Ƥ `L3I*XSR77c&uJ5 ܧJ5 ܧJPIfn`QɤndR7j2h5 LZM&uO>'0h5 ør7*ra@n:{BqO0n  伀ør73rja¸EMq 5 ,j*{Bq 5 ,j*XT7yq0nd7C-ZMq&øVa*h5 ZMq90nd70n`QSa¸q70n`QSa*XT70n`QSa¸EMqj{Gս@gȯh {0qo7{P7-{`uo7[r@XTh*n`QS@~7n`QSBS'To7T`Qߍ{@v&{*ڽ@ndo7jh5 䶍{V]v&{EMv5 .{EMv5 ,jXTo5 ,jXTo7n`QSzEMv?soZMv rϽ@ndo7jh5 {VzEn`QSf{ bo7 ,jXTo7^ ,j v5k{EMv5zdo7jh5 ZMv+ndo7jh5= Ĥ ¤n&u}0 'u9n&u}0kI'}Zͽ{Cn{C_{C{Cn{CnkEEA@BC@D@E͵{âaQsް}oX\/}oX\7,j}5׾ kE͵{C}oh5 9( 7{׾ 7{AVsް}oX\7,j}r}5׾ k׾ kE͵{âaQsް}oX\7,j>y//fom2doo~ !Ǖ !W !l ~5|79l ol _l )l 9Il k/E5|â !rG6E͵uâz5|â˺aQsejZ=|C~Íl ܳ7{6VsߐAj\7 7{6E5|â aQsߐ/߰foX\7,j5l k6l k6E5|â aQs߰foX\7,j5l ܳ7"/foh5l l ܳ7{6Vsߐ6j5l 5h߰foXlA{6E5k6E5|â aQ5kО aQs߰^l k6EgoX\{6VsjZ=|Cgondoh5l ܳ{6l \7gokeʞ !C !C =|C_+{6V2 e.ݡ7 /&nIoI_pMbݐ*(ߐCT(pIbZ |C_+{E͵ |âZ!g"P7{Vs/Z7{Vs/ߐ3(jeZͽ |Coh52 kE͵ |âZ~ |âZaQs-߰oX\7,je52 kE͵ |C |âZaQs-ZaQs-߰oh5kV󱖁oșoh52 ^7{AVs/Z!uP7,jE52 %ʉ2 ,'7,j2'7,jנ |âf{ /^2 5h/߰Y:(߰YR(߰Yj(߰^2 ^CJLaްO|OTa>תoSw~ >Hz0]ԥ?l!fu.'cwdwd`GAVzAVzJ0:J0:J}`LavPavPavPavPavPavPavPavPavPavPavPރ;;;~ΦߏUa <γ 8 < |0UAUAU*0;*0:,G  ]S1x`1x`1`*,*g`b}kTl,FAAAR Iatat<<<<<g#+Fld1`b0;X, **gb0;b0;b0;b0;b}htt18όRDObp ]t186H] gF*H/] .P EMqXT1YZ\ >ayoq18Y^]\ ,jXΐJ>C, W |B}苗苗@_,|r8SQ@0g}90 ˁQ* ˁQ}(pf8p=a{"1 h`PQ@ (p>h>E0E}e8(p`QSQ}+(p`QSQ}+( ,j5> VQ@(pd8j2 h5>a((poQ@n: -1G(p ,jXT8(p Uw8V`QS5* XT +Vd8j2 oZMF&VQ@n8 h5>|q8j2 XT8(p`QSQ@n8 XT8(p`QSQEMEO(p`QSQEME5,j* XT8(pd8j2 EGO(pd8;ZMF&VQ@(p m5,j F5kEMEO(p`QSQE(p`Q1 XT8^,j* XlA5> VQ@(pd8j2 nG&VQ2,Ӿ6i*h Lx}9o ; Lx}m_C?Q* c* X~հ9(p 'U䤊'T +*rRQuBE}1 XT +(p`QSQ@L8 h5ZMFO(pd8j2 h5VQ@(pd8j2 XT8(p`QSQ* XT8(p`QSQEME5,j* YG5>EMEZ͇V(p &ZMF&VQ@8 h5>q8j2 XT8(p`Q4@>a: X,!PG5Qf{ b5Qf{ b8Y5KQf: XlAZMF&%OOt;O tLQas(&p0Ea`>g:lS?lA]avP0;~>[:2FAFAF(0:(0:(0:(}ydxd`(0;(0;(fffffffffff>""o;`}|a Z?8 a{u>)9lˇa ZC?za{0x>92<*젂젂SqT0xd0x`0x`0x`0x>5N0|0  `0:`0;`}>` 缆A͆ld0xd0xd0xd0x>52<2 젂젂`y62|ld0x,FV `0;0;`0;`}`     GQAi$j Et0 Ei}5J:H) >@` g ,j*XT08p>EMf&pO> lՁ> lP!@_ld d8'',,T86@&  s@{o>z\a71h=v}51{BeCo`QSV`QSV`QS*XT7ٞ@od7j2h5 = @l9ȁgUo 7=/v7;r@Np9g9o  ,j*XT7o`QS*XT7o`QSEMe&V@8h5 ZMfOod7j2h5 |VEMerEMeOo`QSEMe5 ,j*XT7o jo5@ۺ&2ׄ)p`QS52ZEMՄ xׄh5:jEMehZMք&kn5@ɚpdM8j&h5YVkV5&mׄ&kEMՄ 5U䶍kEMՄ 5U,j&XTM 5U,j&XTM8p`QS5jEMՄ ZMք&kܨtM ZMքQpdM8j&h5Y r75@ɚp`QS5f{ bM8p`Q&XTM 5U,j&XlA 5kkEMՄ55jEĚp`QS5&h5Y ZMք&kCwM8j&h5Y> kDz&hY>j65@ϚpgM8S# rj5@ϚpgM_hWaM80 &ׄOV `\k'TF+0 r5=uBՄ}&XTF+p`QS5@a&h5Y ZMքOpdM8j&h5YkV5@ɚpdM8j&XTM8p`QS5&XTM8p`QS5jEMՄ 5U,j&!ׄ 5U>jEMՄ Z͇jV䡚p|&|BՄ&kV5@ɚp G\ P5@&h5Y,j&XlA 5K=5f &X, Qׄ%#p`Q&XlA 5KO5fI&X,UQׄ55@ɚpl5_N dؾ$9iar Wcؾ8R/0Sjx~ð}`5 avP7avPa<0;15<25<25|0AAA S SSjxdjxdjx>Q}0AA0;0;0;0;0;0;0;0;0;0;0;ffFFhccx?拟a/c8a{:v>9lSaƘ/cSkatatavPa,0;|0;|hf/f/> 9at)_52_<2_ `y62_|ldx,FV|0;0;|0;|}`g_AiŁk EtEiŁ}5J:_H)/>2@| ,j*_XT8p>ŁEMe&f|qOŁ> ~c$0_|B }}2_*h@_|hr8S@/g}O`8З'T8З'l-ŁD s@;|qb!@;| /,)_[*ŁEMaEMaEMO|q`QSf{3_|@&ŁV@|qd8j2_|g8j2_|@F9Jq 7!6#'T8;Ɏr/@&;B.G9q !d#ā3t8q`QS"'T8q`QS"āEMEZMF&#āq8j2Bh5!P@qd8j2B)G&#āEME5!伊#āEMEP"āEME5!,j*BXT8q`QSOSja~-q gY6.ƁCs8_ m9'9q s6>Ɓo6䦮ƁLs8q`QSm^EMjzz5:ƁEMEzZM&Ɓm@ɶqd8jmh56>ƁVmm=&ƁEMj56䞎ƁEMj56,jmXTj56,jmXT8q`QSmƁEMjZM&ƁtjZMqd8jmh56lrm@ɶq`QSmf{ b8q`QmXTj56,jmXlAl5kƁEM5mƁEĶq`QSmmh56lZM&Ɓ_Vmmh5/Kn;_ȭy-5Ł@K|q F/ԈŁ@K}lWa8п Łq8p#r@8_|Bu9|q `/>]q80ݮ*_XT+nW`QSŁp8j2_h5/P@|qd8j2_ &ŁV@|qd8|q`QS'T8|q`QSŁEM5/,j*_XT8E5/,j*_|B5/C@Np8_h5/ZM&Ł`8j2_|B9\|qd8|q`Q1_X,ER%J|q`QtI/,j4ŁE|q`Q1_X,R%S|q`QJ/,j &ŁV}$y1_SxTwTwdx<6Tı1{9la6=F=F=f=iLiavPavP)8*{dL3m<δ0;1a\w0;øLLia\wô}`Ǵ0;H0;XLLLL'†AA` SxdxdxTxTx,?VLǴiavPiavPi)7*m<*mH6>Ɓti@q }t8p NRJOe t8f:m|B%(J56'Oq`QS fykqifyuq8Y>5;mqOƁ_> LP@_Ldd8'Łzq> ˞}<З=Ł6^h(mŁ Ӂ@k@q5a Z@k@ (,(P#Ł8 (,j*PO5(,j*PO5(>ŁEM@( ZM&ŁV@@ 3@@f3Áuf rי@:3]_g9p 3Á@pf8 r˙2ÁEMeOp`QS2ÁEMe 5 ZMf9pdf8j23h5>2ÁV@pdf8S! ZMf 5,j*3g 5>2ÁEMe 5,j*3XTf8p`QS2ÁEMe?Vs@n9P/w(>Ł s8_:Pȉ$9@q we(>Ło(֬Łs8@q`QS\EM +w*s5U:ŁEM*sZM&Łn܁@@qd8j2Ph5(nŁV*Pm&ŁEM 5(䶍ŁEM 5(,j*PXT 5(,j*PXT8@q`QSŁEM ZM&Łt ZM@qd8j2Ph5( rȁ@@q`QSf{ b8@q`Q1PXT 5(,j*PXlA 5kŁEM5ŁE@q`QS2Ph5( ZM&Ł`w8j2Ph5 P@pdf8j23g9pdfǎ?yܲA{A7Q g\ |cr @θA7OV g\ 䌋ā{|j5 ,j*XT8q ; lZM6&'T8jAh5 lr @qd8jAh5 ,jAXT8 ,jAXT8q`QS āEM5j5 āEM5jP āEM5C @P8nZM6&āV @q ; lP @NAh5 ,jAXlAl5KV f)AX,qQ7/q`QAXlAl5Kh fiAX,Q75 @qd8|~$fxؾ`/t#O0jؾT/(/Ӱ}4k@1lw iwa4t6A4탴avP7'cxdxd`    繇AA@0:@0:@}`   LavPavPavPavPavPavPavPavPavPavPavP)P<*;*;2P<2PA?l'A1;}td`G?AEAE0;0;}f=~batAatAaX0: DF'DF'D>x_by>{ldyTm,FAAAk atAatA)<2<2<*<*<g#+Fld` 0; 0; fDfDLLAavPAavPAavPAavPAa0: r -t9p "PA@ r mt9>:ϖRI)'T2^:H3D>EM "3'T9e`Q8| r`Q8X,DI`9'A}<'A*A@_ "Pq@_ "d9cD'3}3ϳg93|Bm<ȁdc9p=a{:0hMD& "PA@k r>hMD>ȁE}cd9T9 r>XT9 r>XT "5D,jg6d9j2h5D "ZM&'ll&ȁV@ y|B5q <Fǁr8 nr@5yC79q G<,jy|B5j5<,jyXT8qd8jyȉ7&ǁV@ <lZM6&ǁ q8jyXT8q`QS@yXT8 <,jyXT8q`QSǁEM5j5<,jyXT8j_: w,RN)r)@:=4SʁsrJ9S-N)r?)*fM 䦮SʁEMJ)5R,j!XTJ9r wJ9X`QS *XTC,XdJ9j2o՝RL)ZM&SʁV)@#9h5R>RʁqJ9j2XTJ9r`QS)@n8XTJ9r`QS)RʁEMOr`QS)RʁEMJ)5R,j*XTJ9rdJ9j2OOrdJ9N)ZM&SʁV)@ɔr 7RL)5R,j J)5kSʁEMOr`QS)RʁEĔr`Q1XTJ9^R,j*XlAL)5R> SʁV)@ɔrdJ9j2ȭy&SʁV)@ɔ RL)ZM&SʁqJ9S#N)ZMX#'T9g@!F9Wsx \#́qr@8|B9Ws`QS>vBE5U ,j*XT9&0ZMF&#́V*h5a0ZMF9osd9j2h5a05a,j*XT05a,j*XT9s`QS"́EME0r"́EMEOs`QS"́Vs|(|BՒkɁv@;Zr C\KC%P@,hXK,Xj+%Z KԵf霺Xl+%5 k'l+%5 kɁER=u-9Y§%5KԵf{_a-9jh5YK8joOZm bzض\ɋm/ՋmKbJضPZmb*ضH/5k/<^lw6b۝Ŷ9a}ŶƋbbvp_6}bvpY %D-y^K~1:ZAԒ_b$AԒ_|m|TR RJՒj%= 5Z{-pt^K`Qsa^YZTK,-%_Y^]TK`QsoDU<6}A*h nsUqO24/ Y 6xA__pOd/{Y ns`_p lqd/hM55A8p_К k|m$Y=k|ɞ5}_pܳ,jY n3c,jY 5 5q5`Qs_g/h55DVY ZMd/h55D8=5DV r_=|AN4)|A+|A*|A+|AN)|A6)|A+|A+/ȭ}/XC,j 5P{(V ZM/a/h5 D(V=|AP&B_3 _j"| `Qs_//XC,j=| `Qs_/XC,j 5P{(E=| `Qs_j-`~z@n7)1|A~ͯ]֧ƓwJ _@J _ J _J  Yu`QsO _'/X,jm 5{bn\ 5{[+pO _.XZH _j"1|A~í&H _j"1|Am&Á{bܶQbV 5{bE=1|An(1|`QsO _'/X,j=1|`QsO _'/X,j 5{bE=1|A&^Á{bV r/R ZM$/h5DbV rG ZM$/X,j $/X,j $/XÁ{bE=1|`Q!1|f{ BbE=1|f{ BbE=1|f{ BbE=1< ZM$/h5DbV r\ ZM$/h5Db8pO _j"1|A&_/ӇpOCQ[4QQ8py]0_0= 5{[+p_.X{^,j 59Ƣ&_j?/h5DV rE ZM/h5DV 5{Eͽ?/X,j 5{Eͽ?|`Qs_/ +/X,j?|`Qs|] ܢ{Aa{A[ty.9".}n彠/{ty/] rDH] rDH] G`Q}GE9]9] 5xty/X,}Muy/X,Muy/X,Muy/Xl彠DV] zz]]a kؾ\Ӎd>j8l_aR WjؾPSwؾLU/0ǰ1l|9c}c~fuOc _ ۇ]A,]at]at]ރ;;;;lFF= q냩;;{0uyAuyAuyAuyAuyAuyAuyAuyAuyAuyAuyA`.0;0;0:.0:.ﰽ9{7;dt=a\?|-a\?|q}2|qcw+Ǟﰽ:l0C| {SwdwdwTw>f9;{0 A|A|0:Ȟ0;{1a\w|q 㺳{0|qug`;|+?|A|cwdwdwdw> 6;;lFF=z { {zz`y{cw,?|VL%avP=avP=affffOB|@ F|{v t7R{B {4=*XT7o>l|B| A5[{',-5˫{EM|Ȟo|@*\esӁ== %do>TK}?xdoex7&0{vex7Ћn*fx7p b3{Bwb+û  c*5 ,j*5=»EMw ZMw&ûV@ndx7j2{dx7j2h5 䞥#bsr@9A"Gpn 7="Gw7;r/"EMEp&#V@Nv8h5 ZMFpOnd7j2h5 䠀#V"EMEprd"EMEpOn`QS"EMEp5 ,j*XT7n`QS"Eylt V@~u|n - Nq>7_]:ys9y|n =EMu5 ,j*XT>7nT`QS*QEMuN|n`QSݨQV@|n mv>7j2h5 ZMsK|nd>r@|n`QSEMs|n`QSEMs5=EMs5 ,j*XT>7|n`QSV@|n w=V@(:h5 ZMs&ܶq>7j2XT>7^ ,j*XlA5=EMs5kE|n`QSf{ b>7|n`Q1XT>ZMs&V@|n 7 shع C!vn90m &rS>CL 1e{Bl} ,gH)@8ew9Rm GJ=X|Ll9R ,j*eXT6pO`P)XEM%J5 0SV)@ɔmdJZMl&SV)@8eh5 LZMl&SEMlJ5=REMlJ5 ,j*eXT6m`QS)Rr6m`QS)*eXTertq@_HBb6㰁q63Bb6㰁 㰁ERwt6Y5'ca1㰁ERzt6Ybrq2h5 Tϗ@a 6l_arL7aް}K5l_aBLqa2 WiؾHüFo{ǰ} t~LamcMc> :1~1lfux2aAaAa80:80:80:8} ydvd`ǐ80;80;8fffffffffff= 0:8F?}2cqmG;qmƵe8Ƶe8vזa-ñø ~`/ ñQat؃);2;2;*;l0ff=y±±S8Ƶev>7k80-!a\[a80-øLqa0;aӢ+8aAacvdvdvdv>72;2;lFF= `y1{cv,8aVqL͵avPqavPqaffff"Ba)@8  Fa㰁$r t6R:{Ba㰁4q*VXT68l>{Ba5[',-5˫㰁V@@LbJZa5 'T. 'T5x ܇}!1BbzYq=a1d5Ɋ Uq zAz=Yq=*+ Uq 5Uq ,jOo5Uq=*EMU\ZMV\&+V@Ɋkd5jzd5jh5j2ȉ2ZOPk 7j XCq5rCۡ*=mZPk j 䔙CEMZ&CV@NY8h5j ZMZOPkd5j2h5j ƼCVBEMZ rޡBEMZOPk`QSBEMZ 5j ,j*XT5Pk`QSBEMZm5*w5_@:a'^Ok t5SNrЉ@8/ x dp5;NPFEM%^J5x ,j*XT(k`QS@~k`QSmF'T5Q`QSm@kd56;h5x LZM&^&%q5j2zB%^Kkd5k`QSq5k`QSEM%^JPEM%^J5x ,j*XT5k`QS@kd5ۍN1h5x LP@kd5j2='^&EM%^5Ek`QS*XT5k`Q1XlAL5x ,j &^J5kEM%^kd5j2h5x Lr'܉@[ s'lOAV=tUj ]T I`Q5u9j ;\T <pQ5.Pݨ@wUOnT`QSE{7*5Ս ,jXTQ5j F\T ,ZMU&'TQ5jh5YT ,rjE@ɢjdQ5jh5YT ,jXTQ5 UT ,jXTQ5j`QSEEMU*5UT @EMU*PEnzBM9oif4RnhY7 伉릁7q4RnhY7 Ԭru@Kͺ ۧTMS*릁ER)t4YB5Kufʺid4jnQ(M6n:l_az ۗ` ֆk5l_aJ `ۗiؾJE5Rt~;S;ǰ}`o onavPwavPa0;œn:n:nz0MAMAMAMS Stdtdt>{0MAMAM0;0;0;0;0;0;0;0;0;0;0;T7fUfUFY7FY7AM0:Ⱥ0:0;˧0;˧ø,>˧ø,㺳|:l?XYwOqY>㽇a{t>7|z0OAOAOAOswʧʧSnTtT`=˧ø,g׆qmY>Ƶet>4kT>ƵetזӃ|:O}`0;0;Xq,,,,gӆAOAOci ˧ ˧StdtdtTtTt,8OVq,3avPavPӃ)E6|:|:l<ʧʧʧʧʧ at@Zi>H]>=ʧt@i }t4p .RJOOi t4f|zB*5U> lOi`QSfykqfyuq4j|@@,PY@_+,ZaʺZa4pR ~i>ˁ}<ЗC'l!F@xF} gh4p gh gh4З=C  ܧ}34XTh4h>HXTh 5 ,j*4h5 ZMF&CV@ @hdh~r@n:yBE?g yF9h  Cϡ@84qF P ;EMF 5 ,j*4XTa'h`QS@~h`QS ;'Th4N`QS@hdh4_ ;4h5 ZMF&Cܔph4j24zBF)hdh4h`QSBܱph4h`QSBEMF PBEMF 5 ,j*4XTh4h`QS@hdh4[14h5 P@hdh4j24=F&CEMF5BEh`QS*4XTh4h`Q14XlA 5 ,j F 5kCEMFhdh4j24h5 r'ܡ@޽}/*,PE@nvnAmp4s.rE=h9 A9" Z EMAOR`QSE{k*5Z ,jXT4"h GJ\ ,ZMA&'T4jh5Y ,rE{K#Im;QOTXK] 06>AOEР"hd4i,T4X"hLSEЃ*4U i,T4X"hLSE`2MAe*yE`2MA!AO] O;yP]ϠɮgGJ HAO]Ϡɮgd3#%z=Mv=z4{Mv=egL$ iJ2^]Ϡɮgd3N?y~59_*3[/~8_ۯ~ +8__Y2L/*ckʘ7W71oP/&c1oP$?77g~q~q~q~xǸAF?ǸAF??Ssdsdsl?cޠcޠ)9 *9 *9 *9 *9 *9 *9 *9 *9 *9 *9 *cޠcޠc c c{xod3ǸAF?ǸuA>u3w P9;|ppgn ÝAOΠpgdAOΠpgd3gw=M;e w4 i*`;e w4p4o +=@<hO {A~hI i*,Tz2Mpe4 i,T4XNLS=`zA~-hLS!`B8 i*,T'i4 ;c@&{AO=Рhd4'whΠ7pglH`ِ!;|pglH`ِ!;eC wpA;e w4 i*,T3XpgLS`2M;&ÝAO :y}^a3i240,lW i*,lW i*,lW i*!ÝAOΠpgd3i2l; w|=p;T!Ggpoz>.|e+|42ͽp;ܛ4 2EM`ܢ2EMχe{oχ3 y|r;p;УFvrG C@_k>}͇AA|;}͇| ;}͇| }͇e{_vNa }͇e{_a|iD_󡧉CO}͇&=M55zk>4D_!sU}͇|ުfpk>#W5U}͇<C>W_!_|k>4|iD_3L}͇|CO}|k>4|X5i}͇e{_!̪Lsk>,Lsk>,42ͽLsk>,42ͽLsk>,4 =M5z?i\Khde;[;e;ἲiv> e; ee;[;e;򨁲M·|ġlC~kl2=Ls<,ܳ=Ls<,ܳ4l2=Lsv>,384l2=_*Ls<,38=Ls<,38=Md;zv>v>4|i"D󡧉lC>wP|󡗀ò=,ao>,KK͇e {|aY|X7%͇~B|3742=Lso>,42=Lso>4|i"G Oo>4|i"]C>Q|󡧉fpo>c7zo>,4ۧ7i͇eS4fpo>,42͇eS42͇e{|afԁ2=9CO͇&=M7zo>n7zp9Iʜ$\e^|'*s>p9z(s>! 9ʜyHCe·[!i4T e{a^ e{a^|u{aYzXwie·e{!OD󡧉2COe`,'UT|i]2COe·&ʜD󡧉22ͽLs/s>,˜Ls/s>,˜422ͽLs/s>,˜422ͽT|X9zk>&5{_gCQ_!O3A_gC}͇<52S_!OL 4K O}͇e%2}͇&=M5@AW̱|W~A1_j|N~-K1_B|N 1@gzq_c+~q˜77Wc'sd'sd'3u2ǸAv2ǸAv2ǸAv2c ;c ;9 9 9L1oP1oP(L1oP1oP1n1ṉ7 c ;c ;c ;c{/ǸO64~ɆɆ揱9}9}9F 1 ͱɆ7Ȇ7Ȇgjhqlhqlhyjh'ƼA54ǼA54?S{lTCsd's׈̱;cFdtxL1^#v2|L1N9 v2㌟˿UdqdqdqdƸAv2ǸAv2bc ;c ;9 9 9 9 9 [N7Xb'3u2ǼAu2ǼAu2ǼAŲƼAu2ǼAu29 9 9 9 9 rdAѝ̃dEw2\;Aѝ~T6I(Kvt1'y., 94] z.=MF.dt1gw.=MF*|qt1i2,Tt1X>;0,Tt1X>;0,Tt񠢋2MEe.4gFe2MEe2MEe2ME?dt1i24] z.=MF|dlG.et1َ.+1g_6Ìcޠ^3ƼAbǼA\c c 8 8 8 8UUL]1n]1n]ű$g*y*y*~77777777777ng*y֍y֍q*q*Ͱ1n]ԭUUUۻuc c ޻v\v\16Ǹ]6Ǹ]6埕88&v\\\L1n1n1oPűܘ77gXyj.ql.8"5bsq׈ű8k☯8 6:~ocޠc`1n1n1nű77~>ldsqds35ǸA6ǸA6ǼA5ǼA5Ǽo\˿Ul.~77777~235ǼA5ǼA5ǼA5ǼA5cܠA~R6!xP bkts1=O9I7\ rn.L7,Ts1Xbp? {P`x2Ńs2`x27ϖ~8K:`?[A~T%;X Jp?2wP~d.7`p?2m`LS`rAO`dn0i274 z Tn0i274 z߯7dZA>r0^nT0g^n- {E)|aO" [A>s0iE4" zlT0iE4" zl`- aLS-`Z2M|aLS-`Z" iE,T0XaLS-`Z2Mej4" iE,T0Xl=M&[AO/Ok3+A>Tu0Gve+U" O v0o ]1 W |aOO\1 kW &+2MU e*4U1|ap .f0'$ܡ ;Tp 5CAp0CyHá i*T,TR72M e T&XPaLS9PaLS`R72M e YOq0i2T4* z T&i2T4* z yš Pad0i2T4* i*T,T0XPA e 4* i*T,T0XPaLS`B2M g ?=g gWt0 Lc ~fqlc1oPw~Xqmc q q ~777~pvdppdp3ǸAǸA㲟)88 *88 *8cޠcޠcޠcޠcޠcޠcޠcޠcޠcޠcޠm)88 *6 *6 288 288ƸA?Smdppdppdpplq gc.c{meppepvvV~{eppdppdp3ǸAǸAǼAqcޠcޠ)4 *8c :0*8u`TpׁQ)*8_ LQ10*8Fc˿G q q q ƸAFǸAF^c c )*8 2*8 2*8 **8 **8 {Ĩ7777777~3EǼAEǼAEǼAEǼAE3cܠAQ~5!:*xPQ `ktT0=:*܏9IG r LG**,TT0X`p?zPQ`w2Qs2Q`w2 ܏@ '4`p?CjOHi~7 * ڟR e0 e0X4L=M&ӀAOi4`dҀAOi4`dC<_O A>er/LyÁ w:&|_O AOA&AO_ z 4 i*,T/GJ.%`ِJ|!eC*ˆT ,R/X6_lH%`ِJ|_LS%Y z,=M&K|AN˺́ 3vC||__;wPy/ c΁ q/$Á_d/X_LSy` i*,T/X_LS`2M=e 4 [;e{48_LSy`AO__(;4 z =M&rU _\߁`* UQ/XA>r/ *(~`/X_LS`2Me 4 z =M|8 _dAO_d/+=Me 4'e 4'e T/X_LS`f_L}`/X_L}`/X_L}`/X߇ =M&AO߇c/? crw|Ln` &w7𠺁Ap704 y`2Mu<,T70XnAge4 噃4U i<,T70Xn`w&AOnAg&AOn`c z=Mv&AO`2MueT70Xn`LS`2Mue4 i,T70ȃ[4 z}X w\ 0A_2`}X AFq/W~5a/Xރ,,0e_\_\;ރ߇4cpl?6_dl) 8_bbl) 8_1_gzy_Cck~rleL/ cޠ^ӓcޠ^~i1ni1niϔۏKqLqL~47474~~3ǼAǼA?SpTpTpTpTpTpTpTpTpTpTpT^3ǼAƼAƸAǸA74gʫqLqLqLy1nǸpǤ~wgRplϹqL qL cQr/gJy񿠧_dAO񿠧_d/G>=Me4 i*,WE9`*UQN/XrzrU ^\|$^`(,/}Ȝ^/=M&szAN9 (9w}Ȝ^ U*szA~^f9W9 wN/i24 i*,TL#XAei4 i*,TN/X^LS1`rz2M69`b2M4*,TL#XFdN/i2=M&szAO9(^l xA~(^l xU/+}3wPQ 8m(^4 i*,T/X(^LSQ(^d/Gl z=MF*4 z=MF|(^d/X(^Lg/X(^Lg/X(AEe4 i4xFe4xFe4xFe}(^d/i24 zA>[v1/g.yA>[v1/g.lż -wPż -e󂾑X ؃y2Me*$FLSż`yUk i,T1j-2Me*ZAe4Uk i,T1/6yAOżb^d1j-AOżb^d1/#%.=M&yAOżb^LSż`y2M,T1/Xb^LSż`y2Me*4U ii'e*=Md,}X;X X.yż/y{0d,y@ż ϛ yA,lX i.y2͒r1/ȣP.}b^d1/ȣP.lX YwcOvl?Tcul?Tcy|7ctw~qlӎcn7ݘ7{XYYL1n1n1n廱d77g*ߍq,ߍq,ߍGm?SnTnT3ƼAƼAƼAƼAƼAƼAƼAƼAƼAƼAƼAU>SnTlTldndnl/0q,} 777^Y- ycd1ol?;NՋZxc,^a1o ycʽbc yc yc y7 7 7 7UULcn?33cn?3:c|wcoY}g*ߍen-wcʿ,ߍy*ߍyƸAƸAƸAbc wc wc1n1nTYYUU˿,}V`nTnTnTnTnTnTnTnl?wcޠwcޠwcޠwcޠwcq1n W]p?-];][t.5|] OwA廃*K\wU i|,T.?=]LS`f`f]LSEٯ ki~R#:Xkկ eI~uWp?-Ojs_AHӲAHׇ=M낞&uAO~]duAO~]dCV| ]Ot\ +tA>q.3t BW| ]-\ 9+tAO=MV肞&+tAO B4Y iB,TrqS..X~|ˏO`),?>V..Q˝\\}\\w6sqA=M₞r_׍ CsA~u܇ ǡ th_d%CsA~y\ CsA~\dh.i24,Th.X@ELS 4i*4,Th.X\LS`2Me Cs2M*e TTh.X@ELS\dh.5 z =M悞&CsAO`*4,?[ء`*4,?[g\3 ghBsA>ph.۞` i*4,Th.X\LS`BsAO\O;344 z Th.i244 z \LS`f{\LS`f{\LS 4 i*4,l i 4 i 4 i 4\dh.i24p +tA>!v.'Į}BbW|B ]O] +tU +tA>!v.軌` (2MUeDLS`*tU@ iB,T (2MUeT.XJLS`*t2MUtB4Y z=MV4Y z=MVEQ!( {y=EypDEQ<ߠ(C>S(\p=4{i" D!\( Da{X^o{X{a{[p=[p=,^oDoADoAz=䓛_/_W}{=uFu߂{//]luFu[u򣴺oOݷ&o=Mtz=,ܻo4^2ͽܻo4^2ͽLs=,ܻo42ͽLs=,ܻo-o4^2ͽܻo4^2ͽDC~COݷ&o=Mtz=,?۽lC~ݻov=,?۽}{ݷ}{=mrݷe{a}{Xwiݷe{COݷ|$[GCOݷ}{iDC>P2ͽLGa}{XǣLs2ͽLs=,l}{XǣLs=,l}{Xwi=,ܻo=4}{i6~F!}gtbC>>V1!U{Ÿ||bC>>V1!U UOz(=4Q Ÿe{a^{XnIe{1a^ e{1a^ e{1a^{J{1a^+yXJiŸe{1!*=4Q{iD1.Jz(=4Q{iMT{iD1bCOŸe{1a^{X{1a^{XiŸe{1a^{XiŸe{1!BLs/=4Q{KbC_2{1/q}P{KbCQ1ad%bCQ1_MP{Wi`(=,,(ijqyKŸAz(=.=q=M~6>Smlэ?7>Sml?1t7>ӽSjdmdmdmlYq,q,}";Y~SE5{1+,ocWX~~r6{己 777gy,ƸAƼA~cޠoc oc1X~ߏ己@Θ~'|1xocX~=~r:1Ƽ:777~ ldmdml? 6 6 _86 6 6 6 6 y>SmTmT3ƼAƼAƼAƼAƼA6 6 6 6 6]~ r.SA巃*E߂\oA~ 4I(]~;[t-evP`o2M߂Ѓ*4U~ iO-.,[\~ i..4Ua z,\p?OGV g ~:" ^a.'pUp?<Z},\ pAOY,\d.i2 wPY,\d.i2 !pA>fv.,Y 8 g|,\Z apA>nq.Y,Ae႞&pAOY,\O5 z4;~[,A˽~[+`Wo {Eo ^a-{ۂWo mo +ۂ~[%o mA~]~Auto ;Cۂ ?S<ۂ&mAO~[LS`2M,T!X~[LS`m2Mۂe>4o iw54}ipP`2Mu&mAO Ft-i4o z A`!(vP`!(,?ւ?Z Tւ?Z;Z_;Zܟ 4Z i*,Th-XZLSZdh-'PloZ z =M*4Z z =MւVݡZLS`f{ZLS`f{ZLS 4Z i*,loZ i7 4Z i7 4Z i7 4ڇM󲯈ymA>/s-m߼dL(׉ :D[rh QmA>u-i2v2M&ڂ&mAOJ4 i*,T-7eJ4h;]5x况~G7ԏ6NcI1nUTEYEYEYE7qq}*7*7*~3UƼAUƼAU>SmTmTmTmTmTmTmTmTmTmTmT3UƼAƼAƸAVƸAVSc h4 6 6 6ƸAVƸAV>S-gjߩZ^*V~U1+^aml?YWXE*~{dmdmd3UƸAVƸAVƼAU~Rv|nq,XY>R,T,i5vϲ5܏Z3AƂ~2^<[ceЋgk,>dk,ų5O)AOXdkZcAOXdk,i2D]Ȃ|YO5" CdA2p,kt! o8DȂ4;Xߥ8'Ă|XO7 {=nūœX_9'*Ή9 9'4 z̉=MĂeʉ4U7i*'vP9`2MĂeʉ4 i*',T XXLS9 TqN,XALSuʉ4U7in4 z̉}sbAO9ɜXd1,X*aT1,X*} .} vPŰ rw1,[Ű`U [PŰ`a2MÂe*4U z,=MÂ|bŰbXd1젊aAOŰbXd1,] z,4U i,4U i,4U ;bXLSŰ`a2Ű`f{ObXLSŰ`f{ObXLSɫ`f{ObXLSɫY |O\ 򩗋aA>r1,ȧ^.c1,ȧ^.@Ű ȺbXd] 򁬋aAOŰbAÂ&aAOŰ`a2M,e*4U `a2M*e,T1,XbA,e*4U )*4i*e,T1,XbXpOY4U ŰbXd1RAOŰbXd1,C.=MÂ&aAOŰbXLSŰ`a2M,T1,XbXLSŰ`a2MÂe*4U i!Âe*=MÂ<:bX`\ Ua1,bX`\ RrF.yŰ_0X Ű_0X iY,,m#ÂemaAOŰbXg\ ;f4[1}?05c8(g`l?6c)L~:qlc^ccc7ȼ7ȼ7ȼ~qdkd3ƸAƸAӱ)5 *5 *^cޠ^cޠ^cޠ^cޠ^cޠ^cޠ^cޠ^cޠ^cޠ^cޠ^cޠE)5 *Y4 *Y4 25 25Lɢ1ny1ny1ny=Y4 25 7gJЌ~v#x1cL>bk_cy}#c _c _c _)5 25 2c/Ǽ~4jfkl?7]35]3^c5^c5^)5]35뮼~Fʼwͼט7X^[ۏ>q{q{ƸAƸA>SkdkdkTkTkTkTkTkT3ƼAƼAƼAƼAƼA)5 *5 *5 *5 *5 r{Ay{E罂\^Ay~1I:(:Wt+e:uPy`^2M彂ƃ{4 iO-{,[ i.{4 z{l{s}p?sPy~'  ϼWp? Gv  z{#,Id+i24:Wd+i24 z{dy i8S 罂|W ^A>p+{y 86罂&^ z{=M潂&^A>,p+i2uP`Y:\>u|2WO`Y:\ z}p>v^;\A>r;uP q+7t͑ Q8stTP s+ȯF #]A>4s+i24 zt4 i*(,T"]2Met4 i*,T+X@LS`"]A~5HWLSA` i*(,TP i24 [;G&#]AOHW= IGHW= d+o' d"]A~qHWз #]r *,"]2t4 i*4 ztLǑ=5#]AOHAE&#]AOHWp;4 i*,l i*,l i*uP`"]2MEe=5#]2`"]2`*S2`*S2?S/<^A>r+ȧ^{y z9y z9~M`^A&0Y罂&^AOyɼA彂&^AOy`^2Mu'e{4 ,T+XAu'e{4:DLSy`^;qPy`2Mu'e{4 ݉`^24 z{Tw"i24 z{yHyɼWd+i24 i*,T+XA彂e{4 i*,T+XWLSy`^2M彂Sjl?7_VzcgjUqlUqlU'H?SjTjT3ƼAƼAƼAƼAƼAƼAƼAƼAƼAƼAƼA5v>SjTcgTcgdjdjl?< U;c [Uc [Uc [Uc{cgdjdjlo;V2ƿ;Vc!1ر߯cXcر=Ǝ1v#˟{1cXǸAvƸAvƸAv>Sjd~Pzgvgfx5}n#v>S7kYcLݬ1Lݬ1Lݬ1לּ5Hݬ1o5 5 5 5F"qfqf}n7n7n֘7n֘7n֘7n֘7n֘7n֘7ngfyfyfyfyfyf3?S7kT7kT7kT7kT7kl?9  G2Y rfFwܣYd`t7+QuPݬ wnVt7렺Y2Muef i,,Z:X>,,] i*p4Y ~WLp?6ܟg0A~l0ݲ~O0~nY 'ZQ~6WAOAկ&WAOUdWA>q*G!\0ā~$!\Dā WA>q*C \x΁ 8puPUd*i2pS&W )STˆ )S!3UAo7LUb* 1S(g~+Q)VA~?UNan7|U ܰ 27탿x=ٰ 27 ? a4ٰ zlX=M6&V2M5e}`Vհ iejX4հ ia,T*X߷ia,T*ȯ)ܰ ie}jX42M}d*ia7hnX=M6&V 2UA~LUU .c*軌 t*軌T=3UAeT]LUeT4ff*S,T*XLUd*i2S#gT=Mf&3U zT=Mf&3UA~LUd*XLULmf*XLULmf*XLAeeT4 iT4ffe%4ffe%4ff? ]}gWnXlW ]a+7|vUϮܰ  ]acU7|U/lX=M6&VAO ɆA5&VAO `V2MejX4հ ,T*XAejX4հ:BLS `VAV2Me+4հ ia 2M5>d*ia4ٰ:Bd*ia4ٰ VAO ɆUd*ia,T*XULS jX4հ ia,T*XULS `V21 `Vհ ia4ٰ tVAr*†UW VAq*XV,6~`*ȃ[nXT* 6emVհ iVAsC6&VAs`{ņUd*iAagSaؾ|ؾVΎ3O=#<~ؾؾg +:VLa1oPa1oPa1oPa1oPa1oPa1oPa1oPa1oPїV}}VVڎq +}7Ȱ7Ȱ7Ȱ}VVۣ/?ưgqǰ~rq?+JcV1x14{agcXiJc1na1na1naVVL?yɐ 9~>Sil?S1Ɵ=8ԘKcٵ105ơ>SjCqC}8Ԙ8Ԙ8Ԙ[=bjUj,15 25 25 25ۏqCqC}87878Ԙ78Ԙ78Ԙ78Ԙ78Ԙ78Ԙ78gCyCyCyCyCyCg"?SjTjTjTj,# rCAqCEǡ\PAq~.I:(:8Tt*e:uPq`P2MšC4 iO-C,[ i.C4 zCxAqp?YrPq~$ ɒw8Tp?n GEd*2܏3 Aš&PAOqC=Mơ&PAOq2O6ͩjNͩ 9s"7 9"7>ds*EnNͩ~>䠚SA>ts*FnNTs*i94ٜ zlNͩA5eCjNLͩ`ِS!5>ds* 9؜ zClN!6|L9x79nN>ͩ H9c7ͩ/^O6ͩ ps*ȏnN=M6&SAOͩTLSͩ`~y>XA5e`S2M5ejN4՜ iejN4՜ 7e`~yS2M|LS<4ٜ zlNŜSAOͩA`R9*{娠娠.G},Qr,J{rAe*G4U z,G=M|r娠rTd9QAO娠rTd9*] z,G4U i7,G4U i7,G4U:rTLS`Q2`f{rTLS`f{rTLSi?>&|SOx OA>r`)('|ӇL<ʉ :OAOSd)i24x:Sd)i2,T)X@LS`OA>su)X@LS`O2M%=,T)XSpJ<4i=,T)XSpo4xSdAOSd)cN<=M&&OAOSLS`O2M%*,T)XSLS`O2M%eJ<4 &eJ<T)XSd)(N<yNʉ OA_&*L<yʼn`*J<lox  I9tP_0x iYL<T)XSLSSd)WN<lox zL<=M&Ӈ{bM׶^f]6;ǚmNmvXpl5]۶zm;)xm[kvH6kFm'Xӵ|5op5]Xk kk kk !=tC2׼=$sD7Xӵ5n=$sD7X5nk[H7X5nk[HbMc{~5]ێ]k{ kk:X5c5]=XӵXk{ kk bM׸AĚq5kv_:쿜a<C"Oxy#O׶׶3cZ5ׂێ]kp ߲eG]`kᨱ=uaG]PmD8=uCk Q׸AqG]ێ] "um;vD87p "uD87y{87y{87y{8jlG]p5opG]p5opG]pԵH k G]p5nPᨇ\Q#9D{8!pCQᨇܣQ|9Ir GpCRᨇ\Q=LsG=,Q|=LsG=,Q4i. G=,Q=MnnnU{8v^vv^wpCᨇԇyAzG=>NF<܎RpCOᨇ&Q=D8ꡧpCOᨇ_+~{r*'nY?<,SևUp\=5e2WMA|ȦC>fSj|ҦC>kSvF&g2WMzi"sDꡧC>RꡧC>Qa{a{j=Q=F/6zT}ѣz'QT!?G{T]zT@=|C>P!S*J_Q={Q=*Q='WzQ=4ѣziGDam{aޣzXiLsQ=,{T42ͽGLse{aޣz/UԣzXJ2W{aޣzXzQ=4ѣzԣziGDr!cTra3CHN=C~ǨCHNC~CHN=,ÞzXn=9-S=9LsON=,ܓS=M$zHN=%-S=M$zHNCOɩ&S=M$n%zHN=,p+9LsON=,ܓS4[$iɩzX'iɩe-S4[$iU_D!2 _ ԆzȇLjC6C>dR!2 з PIm|6COm&P=MzhC=4цzi P=MzhC=,P4h2ͽ LsoC=Qiтe{aކzX{aކzXnP4h2=ZLsoC=,P^m&P=MzhChCOm&P=MP=MzhC=4цzi L6ͽ LsoC=,P462ͽ LsoC=,P462ͽ L} Baކ me{ꡧP Wm*hC=UA!ϛ \ loІz pP /hC=,loІ me{aކzi D!B loІzi D*z f,u}5';;o:۷:Lu}c}cJz5:tT3աƼAաƼAաƼAաƼAաƼAk>SjTfTfdjdjl?/: 5c Pc Pc Pc{fdjdjl/|GTߝ娱p,Gr~.oQ?r?x5r~oQc)1n1n1nTL?kTMV|cFJRcFJ*5LU~,lJuUD_'XY_k~3ƸyƸy>S9j̛W9j̛W9j̛/65ͫ5͗YYYYۏq,Gq,GƸAƸA>S9jd9jd9jT9jT9jT9jT9jT9jT93ƼAƼAƼAƼAƼA5 5 5 ,Gy*Gq.GBI rAܢQA rTp?$] r.GT9*]2]:rTLS`QAe*G4U i,T9*XrTLS`frTp8OT9*f g~%ݲ[~C-Qxjp?7ZT9*f z,G=M&QAO娠ri~?SY N;+uPYoJs2Mes2pnC֪V>ת|ZUO\ G#|ZU\ V?תU ڵ VuPZUd*iV0ת&SrԜ KԇlN}؜ 9%cs*KT0ݜA7`A5ͩ ?X9@7|.T9uPT_ 91nNͩTds*i94ٜ iݦTLSͩ`S2MN~LSͩ`S2M5ejN4;2M5ejNuS2M5ejNTs*XTLSͩTds*o ݜ zlN=M6vͩ`3՜ v`s*ہͩ ts*ہͩjNŹSAlNTjN߲9uPͩ`S2M5&SAOͩ 9ues*i94ٜ:Tds*i94ٜ q7&S2ͿݸS2M5ejN4[6ejNTs*XTLSͩ`f{TL=/[dsꠚSA=ٜ SA>ps*gnNxͩjNxͩ O9-Tܜ zlN=M6&SAOͩTds*i9uPͩq/r$Imp@!?wTM[UD(cwٞ&SAOͩ`S2MejN4՜ ϿS2Me!4՜ i9uQ1`S2M5O_Ts*XBLS1`S2M5|Tds*i94ٜBds*i94ٜ 򉿛SAOͩTds*i9,Ts*XTLSͩjN4՜ i9,Ts*XTLSͩ`S25ͩ`S՜ i9,(RyHA 9H%c*K UЗA 8H%cb U& Ry~A Rn UL}cꢂT2Me R=M&TArb Ud*i2HuQA UlA`?1I5G;'; )I5ul?7ul?wLI}c}cN|ǔ':zLI1oPI1oPI1oPI1oPI1oPIcJRyJyJqLRqLRIǸA&)3 2I5 2I5 2I5'qƸA&ƸA&,ӱ)sr+r5b ccxu`kl?B8~y105Ƹ3cXc1ny1ny1nyc5R`ר05ktt+gnA>tt+ȇfndt+fnѭ~2颢[A>ewt+gn]Tt+i24 znѭtV+u鬠 /鬠 / owױ.ױ:Va\ чXAs_XAs+;.cu:Vd+i4Y i,MSu`X2Mec4U i,T+Xz@LSu`XAqq+X:VLSuc4U i4Y zcy3u:EVЋg+3 XA/ `yV,^`YX `4 i*4 z `d#,XAOE&XAOV z `4 i `4 i `4VLSa`P2~WAޏw*n\E?C&7Uݸ ~Wո !WA>dr*WWAOUd*iq4ٸ zl\=M6&Wո zl\=M6ej\4mT*XE5|ULS}` 2M5ej\]T_!XULS q,T_!XBLS`WA>`w*iq4ٸ zl\]T_!iq4ٸ zl\ݍUd*iq4ٸ iq,T*XE5ej\4ո iq,T*XULS`fULSj\4ո iquzm*E `}= z2Y `]l  8uQ`fVLS `4 i*4 z `yXAOE&XAOXy>͟M25G;)5/vl?7ul?wL }cɼ}cRcyǔ+G:'zL }cޠXcޠ7ǔ9֘7͘777~,td똲7c Xc Xc Xc{fdkdklO/[Xcqcc)5$Ώ1uL 1^?Cc~Lg15/{Y?tCOqLgqLgS:kd:k)]0t_yaVkO1_¬Zc~j珎)5ō=Y105 25 25 d:k̿~Ycp*' .Uyͥ1/ĥ oT RUd*iT4Y z,U4U iۦoJUU ie*U4U iT,T*XiT,T*.U4U iTuQ`JU2M&KUAO ケT4YRU\ z,Ux^²TuQ`JU2M&KUAO Tu}e*iT4YRUd*iT4Y nKUAO`JU2`JU2`JUU i*4ouwݛ {SAvo* {SA>qo*;My۽ p7uQ ̸7&{SAOTdo*i74ٛ zM=M&{S՛ zM=MeM43i7,To*M43i*g,To*XE eM4՛ {S2M e4՛ i7~2M74ٛ zM]T i74ٛ zMTڽTdo*i74ٛ i7,To*XEeM4՛ iAM4 eM4 eM]To*XTLS? bU*W1Q} b3Ff*1Q}>1FcTAqbT21`bT i*F,T*i2F4 cTۧ7ƨ&cTAO1Q=Mƨ&cTAO1 AO?%~K ǔ7;5;g<)G5ul?~7ul~rTcɻ}cLcH)G5Otl_~uTJrTcޠrTcޠR2ǔ~qQSJfdjdjdjlOɌqQqQ9cu[2VcJSj]1_w%hxݙ:T7~:q`j`jl?8%&~11181%ƸA&ƸA&sLc~J &|+/L\1q5~pWcِ@C{Wc Wc Wc Wc Wc'ȏ)5ߋ~||毎)5׃Wc~=(5C71__q_q_GƸAƸAS{c Wc Wǔ:՘7՘7՘7՘7՘7~WcޠWcޠWc`l՘77U+t*_r_]T*-: r_3AN GE寂ܥWA._4 i*d^T*XULS`W2M寂e_4 i/X3Bdep6=0FʠƈUp?d*1b_s?vQ~F( F񵋊X=MF&#VAOgY.*♿ z_╿ Qe_╿U,^`YW8rmjcPm ˺Y9 ѹS::|T6Vp?buQm Z.4 zlc=M|:6Av.SyQUU cSTA>p*ȻNQy)9/-) oz9EFUd*i2E4 zLQ4 iei*E,4 i*E,T*XULS)`RT2MULS)`RT i*E,T*i2E4 >STAOA J[-RAA o9(nTo z Jn ݶ JnTLSA`RAOAɠT>2(4 z J]TP*i2(4 z JyAɠTLSA`f ʠTLSA`f ʠTLS;]O5yq :[ǝ-NA>p);yq :tQq X8ǝ&NAOqɸSd)i24w z;=MƝ&Nw z;=MƝe;4i*,T)ȧ^;4i*,T)XEe;4w N2Me4w i*Yǝ&NAOqɸE&NAOqɸSUw z;=MƝ&NAOq`N2Mŝei*,T)XSL} b)Xپ1,T)Xپ1,TN2Mŝe;]M} O_5QAo诚QՌ zClF!6QAq3* u}(d3*76<5fE5eC!Q2M5.,T3*XfTd3*i&7.lF=M6&QՌ zlF=M6&QAs3*if2R:X?@ٛG{c1G;Sojl?A7o|l~tl1yۛԛ՛u9Ԙ7˘777uٛ:777uٛٛBԛuWoj]=cM75;RclJ xٔugSjl?a86~M1nM1nM1nM\7Ȧ7Ȧ15ƸA6g|m՛ۏ9g|m {Sc1_-{ScM皎75jۍ1n1n1n1n1n~|~,Rc3c)5 )5 ) ݔ GRՔ rnJF7ܣR(htS*Q)uQM wTtSꢚR2M5ejJՔ i),TS*XTLSM`R2M5eRGp?ܟIAM~3聱)Iٔ z`lJXI?jJIqp?VO]TS*i)4ٔlTSF=jf5Q2je`Q2je.*,V6*XFlAf5RՔ RA>zuS*ȇnJs.A>sS*'qnJdS*ȇqnJ8M~墚RApS*ȇnJ]TS*i)4ٔ zlJtMt)ȇ?y; o8[ǟ|S ƔO3_OAޝr)Ȼ?񧠧Sd)i24 i*,4mT)Xi*,T)XSLS`O2Mşe?yW`O2Mş.*,T)XSd)i2 +ǟ&O z?[Ɵ-OAr)2tQ o:n *,mD *tQ`O2Mş&OAO 8t}e)i24Sd)i24 OAO`O2A`OAONAKu~v9{N8y/ :p Nk*T'E%Sp zL8=M&&NAO ɄSd)i24p zL8]T)i24p i*,O4p i*('ei`~Z?XSLS i`N2M%|8SLS?,O4p i*Q'&NAO ɄE~d)i24p )NAO ɄSd)i2,T)XY~Z `N2M%ekN &ekN2M%ekN2M%.*,T)XE%eiAv. 4ϚU zT*i 4Y z,Cye2Ye2TL}d*X>k ,TP2Me*C=M&PAKrb2Td*i uQe2Td*i 9&P2pr*XY2N4K6gg㳭1!>aQ}>^[m'gy϶ggۋjlQ}>^QmGP?^Om5F7ר>"^܋0y{3n5ϸAԨ>ۊ0qQEϸAԨ>QD곭7g jTmU$7>&RgȱT7>&gJUq(U}@3n϶㐟} k(U} T7Rg JUm ?QDjl/U} T5>-/(U}k>-/(U}k>-/(U}kRg(U}KUmG>~>QD3nϸA>Q{l/U}_oIϸyRg^̛KUyT>Rg|7ϸy>QDg JUq(U}2D3nT7Rg JUy{3op/U} ϼe{3op/U} ϼT7>Rg^,?QKUy{3op/U} ϟ(U} ϸAr*U=c?U ܢJUFr*U=N>$UzQTKUKr*UR2ͽTLs/U=NR2ͽTLs/U=,KU4R2ͽTLs/U=,,_~I/ngHbvv/ ѭ۱aon=,}{taۣ[dn=kѭۉ9z&{ys ѭa&[=_O5\knG1{_u=nÞzX^{+ÞzX^{kn -|lC>V!]p;O*zgjy +zjy=N&CP!e[^=Mzhy=4zjy]<->U!(wz{) }=YB_tCWp+< )7zț* }=7Wz}=4zi"DazXizXie{azXie{azț* }=,C_4Wp}=,C_4CO&B_yO&Z^=Mzhy=4z}jy=4 -CO-e{af2ͽ[^42ͽD롧C>%Q+>kD롧Wpoy=4ziD!DazX>kD!o!oKUzۥ*r=Pzȧ*r=Rv\yTE^zۥ*r=|z(r=4QziD롧"COE&\=Mz(r"COE&\4"2ͽLs/r=,ݦWiEe{+WiEe{+WiEe{!Ls$<,+ 4"2ͽ?UziD롧"Wp$<4QziD!ED롧"COE&\4"2ͽLs/r"2ͽL} B+ؾL} Ba^zXپLs/r"2ͽLs/r"2ͽL??&?im_~\Ͽ?y=,}{ak4{W6f}T%Y}TG_=,l4{X>#hLs=,l4{X͂{a4{Xz=44{ȳY Guz=44 A&f=Mz=Az=,,0ifAeʁ|cllٖcc J)i6{_Yc kl?;)i6~wlEٖcJ/1oPI1oPI1oP1oP1nI1nI=4 2ivL1nI1nI1nI=4 2i6 2i6>И7٘7~fcބfcބCcfcfc1nI1nIߗ41&ƸA&ƸA&ƸA&c fc fǔ44;̝|ݕu/=̝+71^{;uglם1Sjc1n1n1n1n1n1n1nI1o*i6u;٘w٘w٘w]Mel̻Vl̻.2i6]3i6]3i6<~rdldll?< 2i6 2ivLI1nI1nI1oPI1oPI1oPI1owA%ƼA%͎)i6 *i6 *i6 *i6 *i6 ϘLSlTlTl,1444 rNAIJ E'͂\fAI~d8I:i(4Yt,e:ivQI`f2M%͂J44 i*i,T,XYLSI`f2T˼ 1O!҂ޠ7RZp?vXJ zCߐbh7Z )d -!Ђ~'1Z>EЂ9_]T,Xg; z]T,ųw; ūw; ūwvQ`Yzgxw4; zXJRZO]J yKiA>w)-y KiA>u) KiA>u)-.#OUJ KiA>!w)JiAORZd)-Ǣy1 X8vQ1 8 Ђ|ZEC chLC chAގq -.=MЂ&chAO1ZLS1`2M.*,ݦZLS14C i*,T -XZLS1 o8,T -XEЂe4C z=MЂSE΂^{gAY7; z}]T,{>΂e}I΂e}]T,XYLSYd,G(]l'; z=M.w4; z=M΂YbAr^~>Ȼ΋ywy N:/%c^,΋ywy?5 b i*/Ił&bAOyɼXd^,i2/4 z̋=Mł&b z̋=Młeʋ4,i*/,ݦfALSy`b,i*/,T^좚2Młeʋy`2M5 eʋ4 qbAOyɼXd^좚AOyɼXd^,x΋=Mł&bAOyɼXLSy`2M.*/,ݦXLSykb25y`b25y`b i*/,T^b2Młeʋ=Mł5 a5=Ϳk_e 9҂B+A{8i24n ؃mAO`m25`mn i*,l_*v} b-Xپ1v} b-Xپ1,T-Xپ1,Tm2MۂeJ=Mۂ&mAsbt[d-i2vQ鶠t[d-i29Fۂ&m2Mۂekm2$~^zdndnl?*= 2 7 2 wLI1nI1nI1oPI1oPI1oPI1oPI1oPI1oPIcJyJyJyJyJy'pǔO&ƼA%ƸA'\p'pL rNF'ܣpwt.Q: wQI w$\tp2M%eJc i* ,T.X$\LSI`p2M%eɳy`A^\p?؋ ?G؋ z`?;$\%( , Jn /IQp?|QI~20Ep?E܏_T. $7gG7\ gG.*&k1_+kE1~)X^+kE1kE1`y(&w1ɘ\dL.iLnmwI.AbvQ/D^_\ zU z,=M&zA>UvQ/W.E*yE C,|^7T]ԻJf^wU] nzA2pQ/i4Y z,=Me*4U iwQE`6m*4U i,TQ/X^LSE`zArQ/X^LSE*4U i4Y z,yE*>^zAE 梁XԻ^wa] z},T ,TԻ^LSE`zAOEɢ^O\Ի>N4Y zd/.yĽ %yҽ o@ H₼Y^\7݋ {qAހt/.]T/.X^\܋ z=M₞&{qAO^\d/.i4ً z]T/.i4ً i,T(!X^\LS ۸,T(!XPBLS`zqJi,T/.!4Ji*,T/.X^\O݋ z=M₞&{qJz=M₞&{qA>r/.i4ً z=Me4JiwQ`B 25k{q25`zq25`zqՋ i,T/zq2Me=M₞&{qA>w/.i4ً z=M|^\d/zqA>w/.i,T/.Xپ,T/zq2Mek_.A4 .A4 e4 e]T/.X^\LS^\d/.ȣP]l_؋ z=M.4ً z=M<^\d/.X^\L} b/.XYbi4i`{qK2ijA =XپWi45NvRWzl?9_2W.~slc/~}7_ݱ퇃wsL]-gl?c wc wc wc wc wc wc w԰ۏSnUnUn-6a7a7ٰvٰvٰ(Snl?>\T.XjCwٰ Ɇ]N6Qj<گ vJw/ȇU 0+Ap*AO_d/$! %;pT/XDLS5`z2ME|^dT/i24ջDdT/i24 izAOQɨ^dT/i2,TT/XDL} bT/XDL} bTbĨ^L} bT/XDL} bT/XEEe4ջ^LSQ`zAOQɨ^ z=MF&zAOQ ;4ջ^ z4 iA4ջ^LSQ`fWQkz25Qkz25Q`z25Q`z i*,TT/i24 䑣z F&zAOQ=MF&zAOQ 94 i*,l_ iҚz2[sT/Cq4KqQ k:w} bT/XپWk4K{>͟/، OU33ۧ3/glؾcG3'3i0cދb}c:Xߘ7(֘7(7X7X;(7X7X7X;`֘b}cO)7XߘYcSh`ol?:M07Xߗ1ƸOƸOƸO#c'c}c'c}ڍږ|mY-=*2k[{eo/>)7O|qqqqqqqStoTtoTto,17 *7 Stol?j< 273>~~dtodt{c {c {cޠ{cޠ{cޠ{cޠ{cޠ{cAEƼAEƼAEƼAEƼAEƼyѽcyyy {cޠ{cܠ{AѽSAѽjKEG\{Aѽ~4=I:(ݻ^tt/e:wQѽ`{2ME4 i*,Tt/X^LSѽ`{2MEփ-s} z[-s}zлe/HEлe/2"ݲOd/? |AQX /*c/Wy 'M~ 2K鼋Jd:/ y[`٭y[.*,U:/Xvu`:/i24 zL=M&yAOz~AO=)AO~Ee4 iL4 i.d\Ap\0ȃ yq ȑAp\0G q~XA>r\0ȣ ^T\0i2.4 z q :.=sc`_ A>Ct\0Ȼ ^T*&KKp\0[ yq 9.4 z =M&2Me 4`LSq`6m 4 i*.,T\0X`LSq o]:.,T\0XEe 4 z =M~` zC^T?0!/n!eCC!AxQ`2M&AO xQ aW| `\ +Ap0' o5W EUey{ `d0i4Y z=MV&+AO EU&+AO`*2M/.*eCۆ뻨NElH`ِr}թ ),R/ȧˆԩ7u*M`r}A>r/i24 z]T"i24 z|Ź\_d/i24 i*,T"Xپ0,T"Xپ0w}_a/Xپ0,T"Xپ0,Tr}2Me]T/X\_LS\_d/ȧ=M&s}AO\_O z]T/ȧ=Me4e]T/X\_L} \5`f\5`f\_LS`f\_LS4 i*4 zyǹks}AO\E&s}AO\_̜ z4 iA4Kk͹`fɭ913e\_' ؾ1,l_ iZ,,e{ 2m?v6϶Ul;6϶E~ mc{mmcl;6϶>϶~l[g[nkmp~ ϶g 2q~>s[q~ "7 g[n3nϸAdg{3_=vRtl~g?uS\#;n;yv3϶Dg|B"3g!x ϸ]$?vE" g.qHϸs1#{yB>3_={yB>3_={yB>3^w?{mg>|gۉϸA?q~ "7|g q[6?>g,6"-?>˿H ~}"-g9=-v 3i϶c{Zlg ҂qH iϸA?ӂy{Z3opO ~ iϼ=-7lOӂy{ZplO ~ ϊH ~ i=-7?g`gTZ!WG1rJ CnQi\҂GnrJ >( iܥ҂L{Za|Xn݃{Za|Xiiӂ42=-ӂ4~p;~p  rZA_+>k9Ç۹~ gp~ gЯ n?rDC>oQ‡~ Jp;2ЯSr@Z_H C|҂S=-p;oO >,kH >,ӂ=MzH >4|i"-DZ𡧉COi&҂=MzH >4|i"-ӂ42=-LsO >w=,ӂ4`zk|s%>ul}U|3N> u򬁺y@۩|N>Yu{w𡧉CO&y@|CPWw!og;|G>Guֺ;𗖠C_Ww!k;7|i;Dw𡧉COe{wa|Xw{wa|Xwiiܻ42ͽ;ܻ42ͽ;5|Xwi|Xwi&=Mt҂=z&H >fz&H CmVZg2=-̤}CZ i~CZa|i"-o{Z!a(-B|!J >5҂yCi|C>wPZ!o)-ה|kJ 2=-ה|i"-DZ𡧉COi&҂=MzH >4|i"-ӂ=MzH >,ӂR\R\=\=+Uc}Uc}y?^ cT<,o{ayc}H|TXCO&b}=M{⡧XCO&b}PB&b}=Mz=4{Xieb}4J2 `X2 e{afX2ͽRc}4J2ͽRc}4X2=DXC>UXCO&b}=MQb}=M{!*Da{Xiؾ}cױ}c{&wU/Kƪ^G\ @zA>wU/qU pzApUzAOUɪ^dU/U _CWB^O\ yzAvUz_ZzAvU/ۈyUɪ^dU/i4Y i,TU/XEUe4U iiTU/X^LSU4U imDWe4Uջ^LSU`zAOUɪE^yA/ :λp^wa z  %v &pE~b8/Xp^d.]T.˿wAw.y D8] &wA$s.țd]T.X]7ɜ z=M&wAO]d.i24 z]T.i2wS;(,NϩB9 %Ƞ\ J TI!X^ yA 8(KAɠ\dPJ AOAɠ\dP.=M傞&rAOAɠ\LSA`J 2A`J 2A35r2A`J 2A`J  i,TIᢂr2RRpP.GEdP.i2(SL傞&rAOAɠ\dP.ȧ=M.*(SL傞&r2Me 4 r2Me 4נ 4נ ]l_ iA 4 iA 4\LSA`rAOAɠ\g?ؾ1(4 z ]TP.i2(4 z y,Aɠ\LSA`fĠ\LSA`fĠ\L irAsPbĠ\L} bP.XYj\4KA%\LTi.?Oa8F7c/xL/B_c`lSnl /94}cqѱ}c{阖?l?): *4 2F7 2F7ǛƸA)4 2F7 2F7 2F7ǛƸAƸA)4ܘscyc ΍+87x;sc ΍n?Y4~^ۃsc|bpnY sc.sc.sc.scq1n1nc ΍q ΍387^޳uW`d׽g187Ș{ybpnם1087i<~qdpndpsc sc sc sc sc sʿc ǍyK ǍyK1Ƹ%Ƹ%3kqK Ǎ')7qc q;pܘ7lP1oPc ǍysqcޠqU:ƼAƸA\q q rFܣqvt8.Q:wQ wp\t8q2Me (p\LS`q2Me 4 i*,T8௿ɪ\p]TU.?'rAY;Ȫ\/AV~ *KUAY ?KUɪ\dU.i*wQU~L5虰*LX\3aU.虰*܏^TU. z&2U ~t  z=MV傞&rAOUɪ\dU.i*4Y z=MV傞&rAOU4U i*,TU. i*,TU z]TU.y"U ͻ*%cU.ȳAy2U ѻ*SzWyJU h*wQUɪ\dU.i*wW|T\7] ֢rA~[vU.GvU os*wQU/-U u*=FW傼\dU.i*4Y z4U i*,TUr2MUe4U iiTU.XEUe4U r2MUe]TU.X\LSUɪ\dUrA/U\wa] z ]TU.ȻVe G;V~bUrAI*,TUo0\w"\ NrAwU.yU D*W傼\] &r2MU傼IEUeyUɪ\dU.i*4Y z=MV傞&rAOUN[5;mAGN[w~i Q+P,V XFN[w~i ;mAGN[dAON[d-;=Mvڂ&;mAON[LS`2)`2)S*;m2)`2)`i i*P,T:m2M e =Mvڂ&;mA>t-i4i z=Mvڂ|.N[d:mA>t-i,T-XN[LS Y,T-XN[LS`:m2Mu.A4 vڂe4 vڂe]T-XN[LSN[d-]l_i z=Mv.4i z=Mvڂ6 vL1oP1oP1o|c9mTgt9-9D.]N r.Gӂ 'rZt9iA rEӂeәi2M.,T9-XrZLS`i2Mӂe*4UN;KQU ?9_jU /Y_̪dV-'jA>U ɬZp?YC-A>U z̪=MfՂ&jU J jAτYʪ=fՂ jTƬZ3aV- (<'^dV-i24U z̪=MfՂ&jAOYɬZdV-i24U z̪]TV-XZLSY`jJLSY`jU i)dskAps-n$͵'kAqs-snD͵ ϻ]Ts-3=ny͵j=M6ׂ&kAO͵tV-gnΪyY ;:WigՂ|ZOߜU jU fV-ΪyY 84U z̪=MfՂ&j2MeՂeʪ4UZLSY`j2MeՂe4i*,TV-XZLSY o@:,TV-XEeՂeʪ4U z̪=Mӂ,}YN FiA_l.Vӂ,V9-X.vrZC,]T9-!ӂ&hAެw- y?q o;zт8ZG ~hAެw-țyq`hArh2MтۯY8Aт&hAOq8Zd-i24G z=MтG zԌ=jт8ZZU`2jт8ZG zԌ=jт&hUz=Mт&hAފv-i24G z=Mтe4UiO4UiO]lRG iO4UiO4U8ZLSU`G i ,T i24G hAOq8Zd-i2<т&hG hAOq`h2Mтelq`h2Mтe4iAGؾ1,l_G i*,l_G i*vQq`h2Mт&hAOq =8v} b-i24G8Zd-i24G DhAOq`h25q`h25q`fI>9,,'т<,85q`f8ZLG ihKq`f@94G r5vkׯ06매ǘ_yaVmOI1_¬jc|aVml?(xLY~JpdVmdVmdVmdVmdVmdVmdV혲jcޠ?771Ƹ Ƹ ǔO2q]ǘ)w5b&}1/1w5 *w5 :՘7vɘ7rɘ7XmdjTdt*:w܏A9D.Y r]G箂Y 'UtrWAҹ E宂e_M箂e]]T*XULS`rW2M宂e4ULSWiP4(/ٯ~r=Kv~Կ*'׃d?X>4,}W>m,}fק2E͂^ ig> f,G"y+ 9|4> z =Mς&g2Mςe 4>YLS`g2Mςe 4i*|vQ`g2MςYLS`g> i*|,T,i2|vQ/e/ ^eA_2.*_T˂d̗%S,w˂|YR/ ݄,y݅ P]ʂBY]( eAp,Ȼ.y݅ oP +ʂe*yʅ*4U( eAOBYd,iP4Y( z,=Mʂ&SbA)ǔXcJ,ۥN3> }O) o:%RĂ^SbA)ɔE }dJ,i2%4 SbAO)ɔXdJ,i2%,TJ,XiL432q)$Sb2q)`~>X>N2%,_TJ,Xig/*%,43AO)ɔXĜ zL=MĂ&SbAO) 9%4XĜ zL4 i*%,TJ,ȧN4 i*%,TJ,XXLS)kSb25)`Rb25)`Rb i*%,TJ,i2%4 Sb Ă&SbAO)J=MĂ&SbAO) ϸ8%4 i*%,l_ i*%,l_ iSb2R3rJ,WN]l_ iAL4K)`fI9%vč iSbAN) X);?;cc~czm-??쌍D[*z_caȱ8|8scplq쌍q쌍=1ncq쌍q쌍q쌍M1ncjSgV-1_[zUKlV1^[xms^]?jSglw|_bgll}1/3c쌍q쌍q쌍q쌍Ƹ]vƸ]vƎ3vL ՘_yaglO-1_ؘZ*c~彇1^?v~:ccy877777771uƼAS~ݯc~5k~_c1^#vO5_c|`kl?; v5 1 1  r~AݯyEw\_Aݯ~3I(Wt+euQݯ`fWݯ`_ i,T+XWLSݯ`6Mu.,TK.% a)K`N,%0'܏Ă^sbyK`Nﲄ_' ~?MWwp?,Ӽ9e2MڂeJ4: itN4:T[LS`BGj zL=Mڂ&SmAO~7i2vQT[d klUc j^klA/5؂^kl\&[K`-^ M5~;u&klAO5[d-i4Yc z=M؂&klAO5[djl2M؂e4Uc pe4Uc[LS5E؂<[g=\c DklA_l؂\c dklA p-[\c klUc z=M؂kA>tp-ȇOy f9cBׂ|PZ\Z yZdp-i24\ i*,Tp-XEׂe 4\ i*,Tp-XMS 4\ i*3ׂe 4\ZLS`kAO }\ZwD\ 1vQ :%cp-X.kA0zׂ#Z& dp- ;sׂqZ\ 򋿃kA~wp-y I9,Tp-{R]Tp-XZ\ z =Mׂ&kAOZdp-i2vOiAoٴ7lZMeӂeCˆ  )}MgӂlZb6-7fӂ~{c6zAOٴlZd6-ȻΦ=Mfӂ&iAOٴlZLSٴ`z2ٴ`z2ٴC!i2ٴ`z2ٴ`zM i,T/ࢲi2Me=Mfӂ&iA>r6-i24M z̦=Mfӂ|vlZd6iA>r6-i|Q#Gd+5{]/prYV( x̦4M i*,T6-ȯUM i*,T6-XlZLSٴ`4 fӂeci2Meӂeci2Me.*,T6-XlZd6-i27Φ]lM z̦=Mf.*4M z̦=Mfӂ1,,&gӂeljr6-XYrMΦ9MgӂiANٴi?ݓh} mm~f>϶l{@g|h}-϶{c{Zlm-g|]BZlqh}gAϸ]>vbc{g~kZl6+=h}o|篼?>kZlmgQ϶~qh} 7g Zlqh} 7b+6q}ݴv3>}ۻimx?ng xMit>~kݴit>g`wݴϼ7nCPݴi=[T7!רnCQݴȦi9Juӂ{7!wnC.SݴM{XYQ7aM ݴe{7aM{Xwiݴe{7aM{iY6Ώ,Bq7Zp/=?P\{3 Bqv7Zp/=,ڋkZp/=,cF42&Pŵe}k42fPŵe{(iŵe{hŵ&k=Mz(=4Q\{~icF=Mz( ܋k#z (cP\{%p;l/R(=P\{{|)nn$nz(=4Q\{iDqCOŵ&k=Mz(=4Q\{iDq-iŵe{qa^\{yXiŵ^\{XiL9c{ʱ=qPrlrlyC9<CPv0rlyD9c{i"Dk%k5kkyLŵ2Pŵ:PŵKŵ^\{[*=W*CޢUqCOŵ&k=Miŵe{qa^\ ŵe{qa^\{Xiŵe{qa{2ͽLs/2ͽLs/2ͽLs/=4U{gQU{ț=ZOyVU{gQ~VU Q~@T!+7=U{ ti]ݴnCV7!owM{ȏ=wuiy[ݴn2ͽwM ݴe{7!,D7nCOݴ&i=Mtz=4F Hz&H=LF ie&{axXfhyRi h 4C!ܛ=MzH=4F{;J=4F{i"D42=Lso<,lF{X7i}H>i}H=,ܛ4>iMF{X7iMF{X7iM&h=M{$zH=4F{i"D!GR4ZpO=HJ=4F{Xiie{!TaF{Xiie{aF Aiec?{aF{X>!LsO42=LsO=4F{i"߾+lF{i"D-zH=4F{i"CF{i"LsO=,lF{XiAH=,,$iVhyfIi`421ie42NR-XIJ=,,$rJ=4F{i*is7ml_>ڱ}cdƦ1Rݘ۟۟cjfSml?8ǚMczu_cK.:ual?8*1nM1nM]4 vL1nM1nM1nM5 vL1?ꦍS7ml?wLݴ1?ꦍShic|MOS7mnMkic|a7g즍q즍q즍q즍Ƹ]vƸ]vӎvLݴ՘nژ?mvo3篼6ݴ16;n~ol?7 6 6 6 6 6 6 vLݴ1oP1oylfS혚jcA1>lx6cͦ~w;jcTzgS혚jcT˿9lygqnB7Ղ E3ܢjAM Zp?$T rn]TS-]2TZLkZLSMj4T i,TS-XZLSM`jAOƒO6kAZcp-_7\ z` =0ׂkzcpk20ׂ~cpk2Mej4;DׂeMk2Me坢k2M.*,Tp-XAׂ&kAOZdp-<z ]T(i24TZp?ͦZO6j,oO6jA?ljA?lA܏ddS-i4T zl=M6Ղ&jAOMɦZdS-i4T zl]TS-XZLSM`jOLSM`jT i,TS jT 򰇛jApS-n6ՂT jApS-^nyM !vQMɦZdSΦyٴʦiAr6-M @gӂٴʦyٴ t6iAޅu6-i24M z̦=Mfӂeʦ4M i*vQٴ`i2Meӂeʦ4M i*,/M i*,T6i2Meӂeʦ]T6-XlZLSٴ,vQe N2?g-e`yUF2ZЯ ,ye NkhAާv-.!e S}jтSe ?pтO2Z]F h2Mт?2Eтe*ye2Zd-i4YF z,=M&̂~>0 d,}NY0 ~fA0aK fA0avQzL=M&̂&fAހt,i2a40 zL=M&̂eJ4`f{ƄYLSi7hL]loИ0 i7hL4`f{ƄYLSݿYLSi0 i2Mv?i2a40 k'̂&fAO ɄYd,ȯm0 zL]T,ȯm0 zL40 i*a,T,o0 i*a,T,XYLS `fǠYLS `f21 `f0 i*a,T,i2a40 n'.AL=M&̂&f0 zL=M&̂&fAp,i2a,T,X>1a,T,X>1a,,]"'̂e%MYG0>1a,l0 iHf2)rb)9a,,"'̂fAN Yt,L1}=zl1~ƆlO\O15xa6"Ɇ1<cal+0_36ƸA67Ȇ1uƸA6ƸA6ƸA61uʎ4H1?GꔍS6H1?Gj9bl;eccꔍ__);ec|}al/1n1n1n|NN1uʎS6jW^{)f̘N٘?5cvS6vu)֍GƸAvƸAvƸAvƸAvƸAvƸAvƸAvʎS6 3 S6 EvL-Os"O͎dll?2;S1^lexMEvL-1^lfl,E6 *-1 E " 'v[dUrnFȂܣ[dXdt,QEvQ- wYtZd2/[d2M.E,T,XYLS-`Zd2MȂej/z} ]T,1T* z} BeAPYc,z} ]T,Fʂe ]T,X@PLS`fyPYLt,X@PLt,X@Eʂe 4:PYd,i2T4* z AO =Mʂ&Ce* 'v~* fBeA? d3TO^lC d3T܏E:PYp?LOQcAOPYd,i2T4* z =Mʂ&CeAOPYd,i2TvQ`Be2Mʂe >2Mʂe ]T,XPYLS ]T,yá O 8T* tCeAp,ȓyv Om9T9#ʂi(4Y z,yEɢXdQ,i(4Y i(,Oe=b2M>Xރ(vcQ,Xރ(,Oe=b2Mb2M>X_TQ,X4SɢXdQ,W\ z,=Mł&bAOE _qQ,i(vQE _qQ,i(,TQ,XXLSE sQ,XXLSE`b2Młe*]TQ,XXLSE`b21E`bU i(,TQ,i(4Y [i.A,=Mł&bU z,=Mł&bApQ,i(,TQ,X>(,TQ,X>(,TQ좊b2 rQ,ȳ<.]lX iA,4K2E`f(vt\ itbANE XtQ,i(t876>}l1)i۟űI۟cJflSdc{^WX+E6czI_QÁc؞ _M~7؞ ";T777؞:1)E6H)1)E6H91>GL9bll?vL) Sdc{l/L)1E62E62E62E6v"v";1ea) 3稼076HY1?G1>G̍ut1ucѺ1N076 276 276 276 276 276 27vL1oP1oP1ooo)cJjX3E6=~v5Sdcq1>Lzgll?I;)cJzgl;Sdc`7)1oP1n) WYp?"vD[t,5:E" G&SdA)JKȂ\Sd" i" i*EvQ)`Rd2MȂeJ4" i*E,T SdA)J>Ȃ^SdA)~091Eϒ>Ȃ^Sd,ycRdA02E,TRd2M%eJ4GȂeSd2M%emSd2M%.*E,T,XAȂ&SdAO)Yd,wozL]T(i2E4"Yp?YO6Sd" f,')~b{b,')~2!Ȃ)~" zL=MȂ&SdAO)Yd,i2E4" zL=MȂ&SdAO)J4" i*E,T, i*E,TRd2MȂeJyǝyÝ |Sqwʂ ;eA=r,ȃy SL) ;eACr:eAO Y:ENy) t,o" ;*.*EKSdA~QY_8E]XȂz)Yd,i2E,T,XYLS)J4" i*E,T,XYLS)`Rd2?mJ4"YLS)`Rd" i*Ev>.6mDƂ~XX] fm,+lƂX] >\!_|8mgW}т6ZF hAv-nym'mjym om'тejymj4F hAOm6Zd-i4Y8 g,*Y\8 ~ gA&pk gA&pvQ?z,=M΂& gAct,ip4Y8 z,=M΂e*4`f{'YLS?iwR,]lX8 iwR,4`f{'YLS?YLS?iU8 i2M?ip4Y8 ΂& gAOYd,of\8 z,]T,of\8 z,4U8 ip,T,/ ]8 ip,T,XYLS` gU8 ip,T,XYL} b,XE΂e*4U8;ȈYc,ȯ1>0b1 z}]T,1b1 z}yLjY/4'F̂e4'F̂e]T,XYCy#f2I`f 9b,,!G.#f2r,i:b41 r9MĜ΍1bt?*^Y8c0{pLţ:/~6?c??cL1>Oώigc^':עhl?r8uegqu1ncqqqecShϑgc~T?ۏSlϑgc~T's#vc|`m7khc{m=,e6]6]6]6YFYF;21UjR3稼6H1?G嵇e1>G,ue$1sc11N6 6 6 6 6 6 vLe1oP-1oPe1oPe1n1>g|~6~w'gc~F 1~a-_lт&hAOi4Zd-i24F zL=Mт&hAOi4Zdh2MтeJ4F 獂eJ4F4ZLSi`hArhAq- Nyig4Z璜F ThA9p-SN=т<4ZgF4Zd퇮MgA~/Y\? >ς rgt~UgAޅu,ȯ]? z=Mς&g2Mςe4U?YLS`g2Mςe4U? i~,T,XMS4U? i~vQYЏg8 ~gA?B ]T,}}2pY/gAފv,}}>:\0m wӂ~M b7-;yQ ;vQQ 8qGՂ^jA/Q 8]GՂ^jA~mZ7U 򆕣jAްrTj2MEՂaZdT-i24U z]T-臏i 9, ,hAs-}N}e34ZW6h f-i24F hAOi4Zd-i2,T-X`@Lb-X`@Lbb{?4ZLb-X`@Lb-X`Eтe 4 4ZLS`AOi4Z84F zL=Mт&hA~34ZdhA~34Zd-X4ZLSi`hA~i4ZLSi`h2MтeJ4F4ZLSi`61i`f4Zdb{WЏ-_A? ]T+ǖᯠ[ _A_ }3eWgWLg+XEeWX3,l iΎ_2͒qb8,,_AN Wt+ _A}`cglP?̘,*~U}c6i8*VUy`^< 6Tecz_vc^_~*7*^Y;27*7*7*13A1?*͎5A1?y`kl/15^c{k!{5u5555FvYvY:ױ?8}4稼5H1?Gu1?G9*/{9bkl?7 ֽ5uu1nu1nu1nu1nu1nu1nuc{yjy{y{q{dkl?zLuxOֽc|6MO7FW) e+ǖ ozx7EeW3a+0L ^T+0,3Q"ȻYxy7 f9uQ`^Ar+i24 z x=M6~ ~]>7FW/]A_lt}uQe+i4 6]AOFWd+i,T+X4[6emLab{FWLa+X4[6e|ۍ`~,o/,oemd+i/nt=M6&]AOFW_4躨FW_4 i,T+XFW ,T+XFWLS`]2M5.,T+XFWLS^e*?-WU Bb* ի/$Vի/$VX Bb*/]UL3f*XEUeUg?\z,lY iW2͒qbɸz,,=WWANի Ut*WAKr*M[Fv|a>e/|]$mg%r ի϶m|Mmc{3>^}3>^Vy@3?{fll{l+|J}k>^zVD3nիX7g Wq^)^^^l;5W>W>Wi>g[3z_ PVme:g|@Vq([}}lgϸO>>QVc{3?G5e>sH>sT^CPVm>kVc{gۙ:g Vq([} l7g Vq([eϼ37>g VDjl/[}''Vm?㉲g|U ܢVFr*[=NT>$UzQlVKr*[2/V4Up/[=,V42ͽlLs/[=,V=ͿS?,˽УFG&Vpob=zQУFvG&CM5X4&Vpob=,ܛX4&Vpob=,+642TMe卩X4iܛX4&2ͽbܛX4&2ͽb3&COM&X=M4zhb=>4 &Xhb=>S&C?ehb&C?ehb=S&ph9D.An= T{t*} rA9J..݃ rA]T*XYq*XEeA4Ճ i,T*XA&-OAɧwE%-OAɧwSp?n| gv-OAɧ~4|SLS`"-2ɧ`fySL64P'e\T)XSLSL>=M&&OAOɧSp?4|HKd) eLA?l3ز܏T^l=Lo_e6Sp?7n3}ٳOAOm6Sd)i4f zl3=M&LAOm6Eej34f i4f itQm`L2M<6A旂<'R'%_ {痂 KAq~)/=kGA v(-kGA~QLvtQ_z>]; ׎-Q\; K׎ѵQd(XQLS`jGU; iv,T(XQLS`jG2MՎe4U; ίkG2MՎ&FAi4 򎋃FU袂FAqq( A/$4 fFAqq( K8|.kgF^#JA{sD){o(!RK`D){o(y[ ?|.E嗂Rc~)1_ sKA~v~)1_ z}/y o9tQ`KAr~)i24_ z/]Ta) , K Ro}0>YX da)듅*,}}4YX z,,yͅRda)i4YX i,/Ѓe] K2M=Xհtaa)Xհ,/Ѓe] K2M K2M=X__Ta)X_4 Rda)o,\X z,,=M& KAO oRda KA~Rda)XRLS` KA~RLS` K2Me*,4UXRLS` KI GNRУf')QI bv5;IAGNRw.I iwe7NRIޥ,lRI i;I2q'bɏ,,w;IANӝ NRt').;IA2I Mʼn~QcU4I?ԛۯ14gl>c(y`DilFl#Jc~N9WEll/ac{8el#Jc{8edDidD)c #Jc #JJ:ɘOx*4:PҘOx*r2k=4k1^ %S(il%zg(i;CI?c(id(id(id(il?5}24}2tLc!+!yPbCy` iσc~cHc|COu1c!'kc|-` id id id id id id bHcޠcޠbHcޠbHcޠbHcޠ?c(il?xLtkc|JcP~*q̏~~L1^ %zg(il?2}PwƸA\CIbCt(~J r%G) 'PRt(BIAҡ PEe_#e %]T()XPRLPEe %4J:x~?-+JAG͊RУfE*JAG͊RУfE): zԬ(AG͊RLS~*J2MUe(]TE)XJLS9`fyWRL1uE)XJL;uE)XEUe(4S9ȊRdE)i4YQ z(SAOʩ=MV[V.cˊRЏ-+Jvٳ|>YQ gӂYQ 'z/{V~V"i4YQ z(=MV&+JAOɊRdE)i4YQRLS`*J2MU1`*J2MU.,TE)XRc\Q:ȊR%\Q +JA~R3aE)C;(yd:`PR9w@%̈́CIJ ̡ p()%y͡ r()ȯ%J z %=Me %4J i*tQ`BI2Me %4J i*,T()XPRLS o:,L-@l!y-.}VB Za )k- o鹅}VG/7J CIAτgPRJ CIA~v()0LJ CIAu()%=MV&+JUQ s+JAOɊRdE)ٮ(9ɊRdE)i- WEUe(y ɊRdE)i_-j!_WQ )ȻYn!]T )ȻYn!y7-2B *c ZHA_el!=M&[HAAs )i4B zl!=Mej!4+`f{oRLS2i&l!]lMB i&l!4+`f{oRLS2RLS2iWB iW2MF^EA-hQc(ȳyVѢ`f{8ZYG.4IFehQL>-Xr4KѢ hQt(i:Z4- hQ Gn1Z-cU4I?ԻޱۯFcM:#99b^1h4Acz_6N1h4NƸAƸA)p2 2h4 2htLAc < T䘂Fc~<4PiixM3htLA=h4A1^ A1A1A1A(ɠɠ1)htN0h4Aq1?uA1?Ky( y`hl?z6 )h4;OA1nA1nA1nA1nA1nA1nAc yJHy y y yJGq Shl?E8ǚAkX3h4c͠1x3h4AoFc44 rAAx[t(5:h4 FAA K\F4 i4 i*htQA`F2򯒃F4 i*h4od(Q3htQAG͠QУf袂FAAG͠QУf( z AAG͠QLSA~>F2Me ]T(XILSe`fyWQL1u(XIL;u(XEe 4U69ȠQd(i2h44 z AOA*dy(ŹPCA?,CPp??_o'>Y g塠O~! z,]Ty(i<4Y z,=M&CAO塠PdyC2Me*4U gqe*4UPLS`CAqq\(C ylq hw\(%0.Dž< άBAlp\(oc DžFq SP_8.-ZDžP_8.3 =Mƅ&B2MŅe 4PLSq`B2MŅe 4 i*.,T\(XP7Z z ĸP0 Za\BAp\(kqƅV BAp\(k|2<X >CU zC,y <lCAo oѺ<-ZPLe[f.*Kpg-DAYw,Q% CDAYw,Qлe(CyY4% DAOY,E6?-CU ??OPw\Pw\ 7}!,fAhl?8ǚc*zgAh; Bccx 4I BA~/! tQr.Fܣ Bl_tA(Q tQ wPtA B2 B2M. ,TA(XYUrAbwHPp?n HPлe$(2tQwHPлe$(2Oy[FqwHPлe$(XHPp?JwQ`"A2ME.*,T$(XILt$(XY{:,T$XYހ:,T"A2MEed$(i24 z=MF)ϠHAvd(';@v Oũ;@A_ӬA~{PЗ ;@4kp?܏ =Mv.4 z=Mv&;@AOPd(itQ`:@2Muec32Mue]T(XPLS Q|b5O|{]`OLS ?;,T =A iw`Oлe'2e{`Oлe'2n F=Aq碂=2M{`Od'iMjHM6yV Β9cwsx33 23 23 23 23 2sL1oP1oP1oP1oP1oP1oP1o :c|y`Zg:cy1>LSZg:c|-`Zgl?Bz{Ĵ_ Oi WNp?ֹ_4Eu\:Ai~ /I:(ֹNtZ'e:sQi`fi`: i*,,b9sNdZ' zL\TZ'2nֹNлeZ'2n 5-:`sлeZ'2,TZ'J4 i*sQi`:2Mu=e卧:2i`2i`?mJ4 iqiɴNdZ'i24 5&: O@NZrBb '- +Nr GKӁ/$r~ Bb 9AO@Nd 'i24 z =Mr&9 i*,T 'X@Np?,T 'X@Ere 4 h37A~M 37Aq懎yƱ Mp& j.*Vc5Av&ۈyƱ \p&ȯ ƶc5AO`b52Mje\T&XXMLS`b52Mje4 i*V,T&;=Q=Eh(MЋg&ųG٣ .{4A~v&ş33d- l.dnY K6U z,y % od'Wl-K6A%`J62MlMLS%ձ ?:V4 z=MjXM\ z=Mj&c5ATq&ț*\T&XXM7U z=Mj1VsQ`)VXM c5Aq&Ȼ<}0VXM c5 Za&i2V4 Vc5AOXMd&i2V,T&X%4; je]rL`b{XML`&X%4; je]Eje]rLSKXMLSKiwAOXM_8V4 z=Mj&c5A^ԱXEjѨc5AO`b52MjeQje4 i*V,T&XXEjGsQ=gM3a&虰Gofݣ z&=h {4A= M?|w&oݣ i{4A=`f{MLܣ i{4K=`fi2G4ݣ r9Mh{4AW\أ 5M%h4Hh>"mg_o1%>avq}][c^ZЪl[g?۶8j>Vg?V?j>Vl{]la|*^C泭7Vg Z5c{3nG3+>h>h>ێS==xmGMh>z4D3^|=xGs =ϸ3h>У;Cgz4qgь=7rݢ97,-3Ds,gnќۛ3m>ێ}k͙xM97g 3qh| 97ޜ77g>gޜ7~3opo| D3>h|Z5CDg|Ъ[5C3>h|+U_CЪl;zM\Z5Cx9DjOrj<ժy=Up;T!GVMpo<.ժyeU[54˿pj<,[5ULso<,,ҩU,֩U3|CO9yi"GD&hz<4yi"Gp;Dvt2=GLs<=GLs<,s4=GLs<,s44{Khi<,+4{LhiӦofy2ͽ1CO9&r4=Mhz<܎c>4Qy?3Bqv/خgnCKLgZAqv>!Tq桯gnCnnZAq&gz(<4Qyi8Dq桧COř&3=Mg{qa^yXgiře{qa^ ře{qa^y4[yi"?sTl塧CO&b+=MV<4[ գb+=MVie{la[y?*Ls<,c+42=Ls ?T塟OTQ{!*C?<*C?<䯫UE U~A!b*CXUe 2 2 2 cޠ~)> 2 2<d1e-)c|nYLsb~n-)TLsb[Sܲ2 )c!ϟy9CS)+ bJkt1%=9IS)UL r.LS.,,,T1墊)2MSe_AS.Od1% z,\T1%i4YLbJd1%i4YL g*&)qd1%XbJLSŔ~p)2MSe*\T1%XbJLSŔ`fyobJLt1%X DLt1%X 4MSe@d1%i4YL z,=MSʃLr?܏]lf$ HM5&ĴIp?W6 1[Уf&AiɴId$i2m46 zL=MM&&6 i*m,T$XIp?,T$XEMeJ469xZrn[;kW97W9w kƱ}ؾ1>?_1rL/c+~6kl]O?celOqqS|bd/c+2濻/cc ?^x12?F\sGcq#.c#.c#.c#.cA1n1ncxc~<˵Ɉ˘O}̏g6qɈ~2k&#.qOe퇲xM32k1n1n1n1n1ncy7˘7˘7˘7y7˘7~p".c /c|xw^qc|x9w^32睁L_{xj́~V3:xE^\/Aс~-I:(xKt%e:rQ`fс`/x i*,,B:ryb+[_Y͠wE_-/A[_-/A~2ݲ܏[_e4U '.,T%XE_e4U i74{O_e e4P_e e\T%XKLS=M_&/AOA^~$kvKp?q䷍[^o*nx '.X[лe/AKb% rQ%7KbJ,!XeC*ˆTb BeC*EXe*4Ub9xtYg%o p z7\..A~K6ro7q_8AWFe3~¿*=\9 oI;G#\T&[@s4 s4A.9 M˝ s4A~_MLS9`r4 i*G,T&XY^ i)he49Ldq梊3A~vq&i8sQř ?J84Y z,=MgLwy\ z@j.Um'jVM [5A_+lyɭj}Um'jVMݪ iU,T&XVMLS U,T&X?pZ5A2p&ݲUn٪ zly oUn٪ zl[jVMwܪVML6}7˭ z=jh5s4As&`=jh5s4A9=jh&s4AO9 o:G4 z=Mh&s42MhesL`&X_;4[h.4[hesL`&X_;_T&X_;4k4k`~4 zRh&s4AO9Md&_:G4M?7u&i2G,T&XMLS9 xMLS9`r42Mhe=MFe~V YaT&:*¨L 2A?+e2 Q vT&/ Q`f%8*sbT&Xf1*,,GeeRNpT&Xof{Ũ r9MGe2AX 2&L9Fe4N1*3EKllkcKwc5_[9=3cR;=[gep^;283cz_5s[ck~jkl3c 3c{(cdpBc 3]1(Spfwgx183?gkQ1^+Q1Q1Q1Q8¨¨1EeGc~<1?cTf̏~T?dz\ʌdTfl?|5QcʌsWc5ͨ7Ȩ7Ȩ7Ȩ7Ȩ7Ȩ1EeƼAt~TTfTTfTTfOǼA`~TTfTTfl?awLQ1o7Ȩ~m;2c|ۏygT昢2c|ΨwFekc~ѵP *|qT&!:*sQ rFGeܣ2pZtT&Q:*sQQ wLtT梢22/22MEe.**,TT&XYtT 1AOݘ~n2isQݘnLd7梺1AOݘnLd7&~ z#2Muce4Ս G.,T7&XnEuce4Ս i4Kwceo4{Lwceo\T7&XnLLS=Mvc&1AOݘnA_)m}A~-Kc%p A_^/cp=" z}\T%rQ=T~VSJ<+˳J<+l_p? {*S =_0΍\>+N.!Vñ )sl%[Jpc+A~ EV" =ȯNp6\Kw\p z}OW] /U /A͟>22?-6?Q<1Ǡ/cklo=ˏر>}c ?_w_ug1}uge;/#ulD|ퟧcqOэquceϧnbYcdel?}5#.)2W^=͈ӌqqqqq<˘7[ǼAE\ƼAE\ƼAE\ƼAݢ> *2 *2 *2{L1oP1o/c/c/cI1nx&x&x&xϧq qsA~49x9nܢ/Aс Kp?$x rT%]:2x9KLt%XA^e 4_O^d%i2܏F=M^*4x z T%i24x z AO~j7XKLS`/A^e 4x9KLS`/2ҁ`fzKLS`fKLS 4x i-d%i24x z =MX~0\b z&,ÆAK3a%6 ?]Aτ%*T2%觏ɔ`yL Oɔ`yL Oɔ~  Lj~V)ALyU Zr%\E %WQUrG?亊ZT%_\E 烼$Sz>ܲKЯ'K,A^Mv%.z>tl% Tl%K3ql%_[ c+A^ql%%8[ :c+A^sl%wc+AJLS4[ i)V)VeCʱ`b+ֽA /9?1[ z}Tl%b:[ z}>VJW?[ 'K,A^q%ȫ<.T%ȫ<.y%߂,d%ȫ<.T% K,A^q%ȫ<.y%`J,2MXeK,2MX,T%XAX&3-A^Tq%Q3i z̴yQř /8i z̴=jfZLKi9LKLS 9>V&c+AOJ[ Jc+AOJdlb+AOJdl%Kю=MV&c+AOJLS`ni*rP`ni?茭l[ i?茭4upLAgl%X[*,-2M"|P`niᠧJdl%ț+[ z=MV&c+AO otl%i2rP tl%i2,Tl%XJLS tl%XJLS`b+2MVdO%=yϧ{*A?ܲs˞J?ͻrP=Ӽ{*A4Jo{Te~T/K4ۗ%TeTeTS iS r9MT{*AN=+$y=K$i=' ʵ\[bזqmyA\cX][\זLLڲkP-;͔k|mfʵe͔yLvo֏k˧hֵ3r097Sq{3r77Sbk?"6Sbk3LǷ6S-bk3c{3c{3c{3rc{3%6Sq{3elo\&f5nbo\&fʵ5nbo\&fJlm.ʵ5?g}wQ9[笼.5>g{rPG{%vQ--g}wQ}wQ-ǣq{7wQbk7wQq{%vQy׼r\(׼r\ ]kE ]k˱E ]kE77Sq/{37S-gʮq/{3%6Sq/{37Sq/{3r7Sq/h|#[h|!z")rh|=rCN͔9J4S^\)rh|eLLEE32͵LLsm|XY"׷gQ>\"~Y,ʇ^ߞE۳(/Y=׷gQ>,ʇQ=rJC0Y4,ʇek׋k25aEyq͢|XfQ>,\(iȢ|XYA"aڻL|D2͵wELs͢|XLsϢ|iY==򡧹gQ{ ˇɿK'z {߇ |rUz {ŵ|Z!\~fp9Z!2$B!AC^GC` ɇY ɇKC4 ɇ ɇk*$ {C{C{C{C^EC^EC{C| 藼K>_!_!_!/S_!g~ɇ %.% %3K%i~ɇek2 ~ɋ 4'%z{ClO|ȋdH|!Q'J^\%{H|퉒==Q'J>$J>$J?8Dɇ~{C?=Q! Q! Q߂{CDɇDɋkCDɇDɇekC^ D25Qa(Lu%iDɇek25Q(LO/c/Ћ%z{C/~|1|1|_K>EOK>EOK^\%i((%z}{CoO|ȋH|KH|퉒=Q׷'J^\%z}{C^FCOsO|H|i==Q򡧹'J>4Dɇek24Dɋk24=Qb'J>,lDɇe]i{2Dɇe]iwݾ&J>,\LsCOsO|iy/#%z{COsO|i==Q!jDCOsO&J>䭍H|i4Dɇek25Q!orD25Qa(LsM|i ɇi׾+$WH>O|ȟQ!BafBaf>WH>,,wңBaf|XYGCN9MTH>4Q!DC; ɇ<Є ɇwi|_fdl c[llcxLb̏A_71VHC/cTCw*$c|mY!7:ז9ז1<ͱSsl?}5fgOcdddl?v5 B?>VHSd̏O1?>UHBB2+$c|B2+$c91X!y욥cid;K#c|Y~Fl;K#c|YyLTۏ>9+F1?g3FNc|4J#c~nl?4-K#c 1n1nTYYyL1oPEy*y*y*yzTidTidTidl?}J#cޠJ#cޠJ#42 B2-B2-B2X!yL1n1n1n  Ip?)G+$Am4-B]! r#]AN G AUHܥ+$A.4_[WHeT$X Idh$14 =0F*4 z` Th$14 z` A~5>Fe 4 *4,Th$XAFe 4 io4˗DFeK47EFeKTh$XHLSy z =MF&C#%܏{["Al#zAhH/6["_["T/6["9"HR$X#q~(<"ȃ~yя t㠢A>GB:qPяށHG?я~ApGz-;["A^uKzCly- /޺% zCA7 9\HKp.$΅y˹ '8 r ΅yչ /u9䧉s!A^w.$ ΅T.$X\HL|H9r|H9,,R΅<e`̅=0B\Hhs.$1rP \Hc.$1 zs!A^Op.e|U~C#243HC#A?C yš 84߼ yš eh$i24,Th$ȋ4 i*4,lW i*4rP`B#2MF*4,ThADңfK$Q% B["A^qK$Q% zly- /z%rP-\HW z`̅=0Bs!A^Uw.$ȫ΅=0Bs!Aʅ=0B\Hd.$ȋ΅=MB&s!AO\HLS`n i*rP`n i?̅l i?̅4u{lLYf.$Xc*,2M{P`n iؠ\Hd.$ț z̅=MB&s!AO o?t.$i2rP At.$i2,T.$X\HLS Ft.$X\HLS`r!AO>B*/΅1\H<}s!A \H?;΅f.$XYnuw.`>\HL}b.$XYnyw.$XYzw.`ݹ`fݹ \Ht.$i:4 :\HG :\Hp (ӃcW5~_}+Lcy}c~L1n}c3#3w`fd`f1}܎ퟶcvl?5 232 232yL1?>eFSfdl?e2#c|1323#c|132{ǔ1D2%2ז)1L2%2ז)ǔyL)XԘ~`Jdϙn{sVL9cJdl? 5S")%2ۏACPc|2%2yL)1n)1n)ǔus7Ș7Ș7Ș7Ș7Ș7Ș7~1DƼADƼADSJd?o)tĔ~0l[bJ1DƸ%DƸ%DƸ%D`cS"cܒS",XsrJ$S rNED\S"A)~+I:%(9HtJ$e:%rP)`fkHLS)J4y7ZHp?X 9ZHb-$ rP7ZHb-$ ˆT Vxc-$XZHLS~hj!2MBeT-$XZHLS`fZHL|t-$XZHL|t-$XABe4xZHd-$i^n~˗Adl~svd#藌͏j~T A-REuhqPE /Żh^.ZT"E r⠊AzEhH-eiWAf[uyc #knay-Fc #9qP gPFBCAk١ gPF3q(#Key!ǡ`BA^v(#eT(#XPFL|9q|9,,`naO0[$na!0?IzCla!0[Ak- ƃϿt>dE#YeE#[V4FW"\mϊFo{V4AU4&+AO`*A^$sE#XFLS`fĊFLSh4UiqP`*2nz e[2A e[2-CA :B2*B2PFc(#17/=02C2z` eT(#1h2&CPFd(BAOPFLS`n i*qP`n i? eli? e4uchLYf(#XC*,ԍ2MzP`n iРPFd(#z e=M2&CAO os(#i2qP s(#i2,T(#XPFLS s(#XPFLS`BAO>2*e1PF<}CAPFgCAi8ؾ1,l߇ii8Xv(#XYnv(#i:4re9M2K012NL0ܧՂoa1=1?}\c{W>2cBcCcǸ 2g7P7Pc?mS[cg~fkd(cd(cl?5 2Bc~| e)1ǧP~12c(c12c(cl?9C)c ee(c-Cc|mϏe(c-C)Bc1?gPƘ3;笼sP~Pj2S(cl?3ۏٍ'e(cl?7 21 2Bc Cc C)1 1oP1oP1oP1oP1oP1oP1oPc ey ey e _ AGF,8ԃ5A^tb#Nl=j&65Aڡ PF/C_eb;PFBCb+j2\PA2e e4CCAO- aF3a ZA-gA0[Aτ-gF3a #yxg_t_mЯ +As DŠFЯ +A*hy% D+A`W44Yzh4U"+2MU4eh42V4ehTE#XFLSh4UzLl%/8qP=j&6AJl=j&65A 1:5F'6hy17ĊFbE# l zCh!V4dE# X8Fש]7=MV4 zh=MV44Yzh4Ui`*Ui`fˊ`fˊFLSw4_^V4ek*2M5,]Ui` zh=MV4ɊFdE#i4Y:W4&+U;W4&+2MU4eh4Uf;W4eh4Ui4YcE*AFO+A?}h׾;٬hy O;`f+2}`f`fnW4enW4+AN FtE#+AcqE#+}?bEcl c[llcxLb̏A_74ccl{=c;clcTXcz[b}c[b}1}Lcޠ>\c c cI1nTS}c̏O1?>7K1c|1c1XOu`)cKc1,eu`)12S)cl?Q5l1?gwY.Kc|XۏQCT1OPG@ScTq,eq,e?!V4٠_OV4+A^pE#ד_OV4~=Y2+A^pE#V4aE#iqPɊFdE#XFW\i,TE#X ,TE*2MU4ehTE#XFd #i4w=M04zla=M0Fx 9wLz&]=.-AτgŃ]=. sRsA^v"i2woT z]=M.*w4z]4i`ri`fǕ`fELSw4W.er2M,i`n z]=M.͹Ed"i2w4v6.&s6.&s2M.e]46.e]4i*w4Yb"d]b"gE`yW-~hE hY-~hE`fâEL}a"XYv"XYnv`UE`f[E Et"ih4]EQ\Epw-21? mlsؿd=cq3uwcLOʌo]3ic]qg]19>Ɲv1杩v>ǼA.ƼA.ƸA.ƸA.Zc kv1ǧŘjc~|]Sb1.cb1.Ñc|v1_.S`c|Xc*Z}h1Ew؎y)db^-Pc(c*ZgsPcYͱ~R1-yc c h1 h1 hcޠUUUUUUUۏl=Ř7Ř7c*Zy*Zy*Zq]q]gƸA.Sbdbdbdbl?5 v1 v܏e9C.kUr]F.ܣkXVt"QvqP wEtj2Wڵ`jU8GkAo~/ vqP7EbjAo7Eb"' ㍵A.e]4Ugv,T"XA.e]4Ui/]4@.e]4˗A.e.T"XELSхYz]<,ET[?B9K3,Ep? G~2KqPY򺨳A ,Ae)H,E?9KqPY~?9K&RT"kRyY :K,T"X,EɃEX8Ƀ~vJ.F+]yҵ Zv{+]^a"i:hA-E/AZyA /8hE ZyA Z9EL|9hq|9h AAA-灃A/A Z-^A/A`YAA Ń٠FPt RA/) a85 0^SA/)F0SAH14zLaT #i24i*50eJa4iLa48FLS)`Ri*,TAv2-;A~+oлe':AwNFлe'#ȋdy)[0^SA/)Fp=1%0x)FK` RA^v #Na=M0NAO)A0&SAO)`R2M,T R2M,l?8~2,l?i`fFLST #XBe/R2M,}AO)Fw94zLa=M0&SAFd RAFd #XFLS)`RAFLS)`R2M0&SA?CLa#SA?CLa 1,PFo^0~20<Fo^0~2,l_yi3m^RvLk~^[s_RvLk: vk=vtk ׶Um?Lkۙ׸Ad2^3qd "1g2^p_Lkx3y{&5opd ׼=vkld ׼=1g2^Lkx "vF5 )=wkRqgHaz;C 5L)۱R960EQ)ܣRW9I0.rJaER)\R=qL%V b j=..n.z&]E jvq3AgEۉC⢧Ep]\,k4XVp]\,k4Ep]\,k42ͽvqLs]\,k42}Oe{!..ie{a&j]\3EΤE?C]\]~Ž7/jQ˛]\6]E^3V"Q"..nw_/7]\]EE^tW"/vqLs]\,k=M..j?~EWPȫ]/E^zT"/>vq..EWPXR0{ "/(qKP "E^ T "/(qKP "E^V #0.CJ)eCJ)`zEA)^R=qvP 㢗FpOa\¸% qK@ ?¸*qU c?e&d\Lɸb;=t2.:yC :=t2.z&d\u2.:=Mt2.zd\4&:=Mt2.i0N2ͽqLsd\,l\ɸXw2{'bɸXw2{'bɸX.zHa\4¸{D #0.zHa\4R\@e).UA_d).UAvE*R\, yZYN,ES+Kqשi"Kܳ.zR\4Y&=Md).iYe=4,EpR\,ѼXg).ieR\,lːXߣyL-Cb~fpR\,ѼXߣܳ4{4/ih^4i"Kqw)KqD⢧,EOY&y=Md){"o"S⢧,2=KqLsR\,ܳy;4,2=KqLsR\4gYVY~gYj_y7/E"(Kq 7/Ebfʃ,2Yede).i;{XYnOV",ENSY9Me).:,E)Q⢃R\ܧ3K1?-6?Q<1Ǡomc_Xs?gֱ^ߪc;1 ׌oӱ]:IcYqY19wcTbT1OxqLV1Yc~|JV)Y1ZzLɊ1ǘ{Ɋ1ǘۏT=cJOg0=c)Ҙni-c~|ugBb엞O&$Ic`cpBml?o2JONgs)!1xL 1n 1n ǔu7Ę7Ę7Ę7Ę7Ę7Ę7~1%$ƼA%$ƼA%$SBbTBbTBbTBbd^bl?4 2/c c c c1ny1ny~):/8 ܢAy Dp?$rKT^"]:/28DLv^"XDK`A"% ϵ$ Xz ,HTA"% XG. !łDdA 2M$e*HTUi ,TA 2M$e*H4Ui ,TA"XDL|sA"X>A$e*H4GxHDO#Y8HDp?FS gu~2q=HD"gG"*UZG"-đD߃=đ 1p$"AhHD?8;G"eD4i)ۺ['A?N'~lOmAٞru[woA'fDW-ݞ-DoA'ejO4՞rAKb;/B%D/AKˋ]>sy X;/4؃K%^A^q^"X8/8Dp^"%0/,KP^")%72M%qA؞nOT{" =u'nDb{" =؞A^7q{"i=4ٞzlOT{"i=4ٞi=%='ejO4՞ilO4՞8DLS`՞i=,T iu05N~GVp~ogGz#y} /Ѻp/Ѻ%Z&GS}>Bd!i4Gi,͂2M,͂2Me#4Gi,͂2Me#y/2M,,͂Gif`n z#=M }>Bd!i4G$&G$&2Me#4G9e#4Gi4Gb!*Gb!g}`yW~} Cw!G>Bo^e+2}`f}`f}fWe~WAN} >Bt!A{p!}?G(Ӄcзc$0-*<@c|l?@2L{#c{lwo c|?0~`W1UUxLjƼAuƼAuƸAvƸAvKc c||*<˜ c~|*uSWa1vcWa1vc|e0OČf`l?5dÌf`^[ s0ccj#0c1~H1 Yc [c [e0 e0 eZcޠ2222222O<77cjyjyjyjqlGeƸA S`d`d`d`l? 3 e0 eO9C [2rnF ܣ[Vt QepP- wAtZ2W-b3W\Ap?\A ~+frAb3WOT s\A e4+*W,T X\A e4+i*W,T X\ALS`f\ALSwT X\ALSw?\ WAZ@kǯoKo$v+ A^tW ȟ8@W)w+ojwbAuh@B2Mue 4zTt k9$0:@< y]сdt ȋyyсߟdt X@LSс /4>iHpPE /H\$r .yE /H\$r . H䇔A^wvߺi20WpPg\A3a 1 s+߼= c O0 e4+z)AO Bd!pP Bc\Ar+AO Bd!1 y9 Bd!ipP Bd!X BW ]Ai,T!X,T*2MUe T!X BLS-`fDBHzL$}MYʃJ$ւ;Aw"!3f'DA%DBWHzL$;_& AOJ$=M&& 2M%e_0XDA%e_0XDBLS` 2M%e_0XDBLS ogr"!Xe_ 2M/,AODBw94HzL$=M&& A[DBd" A`DBd"!XDBLS` ADBLS` 2M%& A?CL$ A?CL$ 1,PDBo^&~2q'<DBo^&~2,l_yHi}oc|՟}kc|xL11Hylۏ}cG&y< 7tĘ7cޠy;< *1 *yJGyJGqLGqLGgƸA#xL1?>#U=tcLG=tcLG>c:bl?2Nst~ml?1Lst/LGgkcǔ}Ǒca4cJG'ƸA#ƸA#S:bd:bd:1#ƼAݔ: *1 *1 *1 *1 *1 *1 *17zL1oP1oPǔzJgƸOf%SVbdVbdVbdVbl?q4}2+1}:+܏9Qg%rJFg%ܣ$\tV"Q:+qPY wDtV⠲2YJb3+ͬDp?ͬAe%~fV⠲AJb3+܏=TV"O sYɬAe%eJ4*+,TV"XAe%eJ4i*+,TV"XDLSY`fDLSYJ4i* TZ?" #mp?{刃*G^aV"ȋJ ANAe%mAVDWӝ8Dԝ򒺳A^dsV"XDLSY`2?eρ,>?疵Uyȵ[*~nYe"KUT"=kA^t"+Ug"XZELS`jA^tݜA^qqs⠚A^ts"+.nNz9D՜͉ /X?qτ͉gA5' Aτ͉ ո9:ܜz&lNTs"9,3Qs"knN DLS͉`# Eлe"2Hn_RT"2H) Eлe"2Hnz Ry!A Ed"i2HqPA Ed"X EMi*H,T"X3H,T⠂2M)e RT"X ELS`fk EW z RLJ Ep&~ku7A8H"A^v⠂A^v"ȋR=M)}FAOA A)&AOA`2Mݝ,T⠂2Mݝ,T"X ELSA`2Mݝ,T"X E79H,ݙ2MݝyPA` i̠ Ed"z R=M)&AOA Xr"i2HqPA o[r"i2H,T"X ELSA os"X ELSA`AOo ֵ>*)׵>*~X}b"w6kAYkA2p"w6k2rwk!*ek2rk2rk˭U4ݲU9M*kANӵ ZEЍ*~=8GЯ'cAqT#ד1?jG>*fQGdb2M8eqpi*,Tb2M8eq4i*,T#XGL|1s#XA8eq=M6 so#{]p9l A6~; 'd#kq+cBcAA8]A^w#ȋqT#qye1 9,T#XGLS1`bA?,uij%":~Yg#' /WqP?XKA^t#:e*u4UiK:3A^t#1xT# xy9 _lg<|v2A~9맷 ?f3n|!6>A^r#̸8Gv#X6GWi,T#Xǃl|p7>-AwGwA !7>-AwGd#Cn|=M6>&AOj|=M6>&2M5>VGLS`2מ`i,T2M5>eq4_{6>:GЋg#߭-]ՃučG¹¹A^8w#i zl|=M6>4zl|4i`i`2M5>ej|4i`2M5>΍`i iփj|4uOkLS=M6>&AGd#i4zl|yAOj|yÓAO`2M5>ej|y2M5>ej|4iWx:HO< U eA~Y g$'u =r܇:HП :r7 2rC 2Mu`f&:HLܙ:HLܜ:r{ 2r ANu :Ht$itSu tWu>͟dlclA1h{FXk#c;yl?F3.~Lőcxl>7YyL_wƼAGƼAG71oPő1oPrƼA} g1oPő1oPmƼA}fTqdTq1}UUYYq,82ǮȘ#c~*Sqd?Gcqd?GC?ƪ~Tgl?a6U~Jgl?\6U_zXϕGSe*2ۏfecqT:qq<7Ȫ7Ȫcy_yTUdTUdTUdTUdTUdTUdTUdl?"cޠ"cޠ"*2 *2 *2 *2 *c,S9d;c9d;c9d;c9dl?5Ɲ2ƝOw95C!U ro.A吃*AN GrACܥ!A.吃*4S吠_OC~=Y I~=Y9rHЯ'!A,T9$ד吠?jX IO|QrHd9!2MCe*cuU i,T9!2MCe*4U i,T9$XrHL|1s9$XrACe*<8Hp?`xAisp?98HЋgAA\8H98H   ! ^;kA*%lA8H i*,T$X8HLSqj|X Ua9$W /rHЯ !A*,yI吃*rH%] ʤ!2MCe*4U i5JC!A^s9$1.T9$ .y9 _lC|] v9!A~|x_:Ί=0fE" z`̊=0fE>HqV$1+rPY H?{8+,ԍ2MeEeʊ=kHBwV$2+n z̊yYʊ[fExH z̊[fE&"AOY /9+4 z̊=MfE*+4 z̊4_{fEe="2MeEe="2MeE*+,TV$XAeEeʊ4i,T9$Q7rHp3m%.=jCrHW]9rHW] !AO~Vd9$irP吠rHd9$XrHLS4U9rHLS4U i,T9$XrHLS4U ix.4ukLST9$X]e5i4Y N%C&!AO吠rHd9${\ z,T9$\ z,4U i,T9$\ i,T9$XrHLSoKH/#Adl#Adl9KH)9Xus$O6G4as`͑`f%͑`fHL}cs$XYus$XYnus`A͑`fG͑ Hts$i94 HGJ Hн܎~=$m__k[7!׶׶S=mm m c׶=m m m m c{5W2ym@xDdl7L^5op _^/Nyצ׼Kk2yܿ0 _^/Ky{5opߓ^k2yD5n!׶cqۿ푓 r򚟗=r򚟗=r򚟗=rvol&"'k|o"rv"'m^λ׶Ӕmg^ێ~Cki׶3nc{k׶m^N푓׶cq "r2GN^9yDdl ׼=r7GN^k9y#'y{5opQ#'y{5op푓׼=r7GN^k99k۱=rwk"'qg7{;C5LY"'96EN{"EQ9I*rrT$GN.r\29 e{v_ODN.Dv_ODN{_ODN.D$GN.D?j9k ԋA䢧Ip\,#'4`p\,#'4Ip\,#'42=rrLs\,#'42Le{$GN.iADN.n /5EN.z\^~Ծ޺#r#'xDN9˅\|EN{"*rrW9 ["^+rrׯ9 E^V"/y)rrLs\,#'42=r2g .D_OQ.D"/ )rѯ'(z"rѯ'(y1SyGQ.(yMSye{bGXQ.iy<2Ϳ(b+r/(yiQyb%Y.v%Y.%C_2$Y.%C"~(,+rџ&H\ ,rg,4J\,ldXپ!rLe$2r,r,4J\4di*rT"$EJdc,J\tI.0IQ￱11=±oǠ[k1Zk?=ǔk?:SFc1lG8?1c 5ǠD̏13D1x:OLS74uwd'i]t=My&c||e1e}Ϝ037K~jl?k1e}ƸAf}ƸAf}S٘7Ϙ7cN6 *3 ؘ7occޠy&6 *3 [ؘ7`cޠyycޠ>c >c >ci1nY$3&1?c~Δs~1%eg[&~Y4>cѱD~nY_z>p)3#ۏ퇘ocǔeǸAf}ƸAf}ƸAf}ƸAf}SgTgTgTgTgTgTgTgTgl?>cޠ>cޠ>)3 *3 *3 *3 *3Y1{Sg;cg;cg;cgl?6Ɲ13Ɲ9O95{= roN8{ܣ=dt'Q:sP wtOt'X^y~UX Ua'] Uam]ژ7Ә7;1oPgcޠƼAƼAӍ٘7Ә777}.vddi'+Mc|=YizL14Ti>!*McYi5}n?KLcױ}n>k,1׌%d}>z>WJLc餱P}z>R7v~L%}zdididid1ƼAƼAƼAƼAƼAƼAƼAƼAsq4 4 JLcޠJLcޠJLcޠJLcޠJLcA_c1Ƹ3Ƹ3Ƹ3#ncKLcܙKL|[Ss)ȱJLA% S{t)x r.19J^T)]bK+ R/bKXkdl)i2bK2MŖe-ﳃ/*,Tl)Xҋ-4[ i*,Tl)XRLS`bK2r҃L&A>qr2)1O'Uc2E%Ipj>?OdR>fk?GF3E_Wn8NA~S_NA~SDwn8m:ځ7Sb) 1Ao7S2x zC <!OA>p)ocx i*,T)XSLS }_>(AτgS3a)ȯL] z&?=֟ O2 OA,l O2M՟e?4U i?4U i,4 r`NC90i .u*ȁ9  r`NC4T{P~؍zQݨ r7*ݨՍ !w܍ qQA>p7*܍ !w>ٍ d7*XnTLSݨ`Q/,T7*XnTLS%`Q2Mu%RAQ tT*Q3* zԌJ}R/** xG>QGͨTdT*i2*4 xG&RAOQɨԋJ=MF&R2v+TL 0*,TT*Xn i**R2MEeJTLSQ`R2v+TлeT*'PJQ @9*(G-RA>uT*ȇJTG ᨣRA>uT*xldT*i2*RAOQɨTLSQ`2ME^TT*X@LSQ`R2MEeJ4i**,TT*uT*X@LSq i*.,T\ i2*4 * zJ=MF&RAOQ a8*4zQQ a8*4 i**,TT*XTiTLSQ`R2MEeJ4zQgSJyQgSJyQg¨T3aT*0*) G^,GcT*XYJJ4KQ w8*bcT*X ilR2R.pT.pT*XYJ9MGRANQ TQ!F 1*tTQJJiM__L?ӏcJgWTj~+*ʨvF3*Rc}l4?Ϣt7v4?ϡchzLQ1nQ1nQxcizLkcޠRcޠƼAݩyOu6 **5 m٘71oPQ1oPQtc6 **5 **5 2*5 2*5vzo>0F5vƨSjbTc~Ϥ>? gSc{285v0c?z]Gy)85v\Oxc)ψGtc S)85 285 28ScޠScޠScޠScޠScޠScޠScޠScޠSczL1oP1oP1oP1oPܯ1*5-1*5-1*5-1*5vq ۂRAQ rSJ*G G/RAQ rNCA|LC1 O1 PA|LCya; PA_LC}e3 O4T>4TdEeJC4 '_T*X4TLSi i* ,T*X4TLSi`P2Me>i |n4Tc*x sa4ԋJC=0} &x{Qi}X*x1 iPA1 42gMOK_' pt+N` A8XA>p+￝ p'  z`c=0w#聱 XA}>Vc+,cX2Mec4 i{?V3a+虰,3iD'X2O$X/,T+X>VLS}`XA>~p+X>VLS}`Xng>dz\YA>p<+ȯ r}g>dz\Y/*>x/[A~-V߽9֋nݛ[A^n٭ 8wonݛ[A_n4 i*,Tv+X֋n4 i*,Tk#XVLS٭ eg^Tv+/ zn=jf5[A>pvEe5[A>pv+gn=jf&[AO٭V: zn=Mf&[/*4 zn4ۭ[2v+VLS٭`f`v+X֋n4 i*[2Meen4ۭ[A٭ 9=g|VO znt٭ 9[A>sv+n٭AO٭֋n=Mf&[2Mee/4zQ٭` 2Meen4 i*,T~!XVLS٭Lee/4_xQ٭` 2M&[AO٭ ;4 zn=Mf&[Apv+i2[Apv+i2,Tv+XVLS٭ %9,Tv+XVLS٭`[2Medv+j[Apv+^Ϯ zqXgX]?z #5ϸ $>>>}v}T}v}R}v}P7S]g>>>>>>>>g XqH`}v o>g>g}7x5} Lyg>{ϼ;7x'>}7x'>g XqH`}vDkN`} _N~YϮSӟEV3jYYYϮӪcwV3wV3~ j}v:u@kj}^d>}kY;u2g Ϯv]gϮݟ]>7g ZywV3oj} Yϼ;7xg>gZywVL؝ZywVkj} Yϼ;7xg>kHg}ƽ tg Y]>^{Q:k:((Y܍YE~|)YENRERx0]+:֢_!Ա~P uE^c-U+xױ}񢎵uic-^gH}񢎵iXewkLc-^wkLc-iu]Z,ӼXewkLc-iu2ͻXyױ=M4Cs|0բguhnzxLZLи ލEP:k.Z w:kO/ZwJg-[n=Y|`4Z4Z4Z4Z &[&[|֢֢֢"(ȯZ,Ӽ[ewtkLn-iѭE~sG=-mEseWD=D=Dw[f3Atk3At+xG "H(Gn-iѭ2;XyG4"N(XyG4bzQѭENIgh[[pDE~\"w\"w\"w\Wu !׵E~O\" Tk/r];׵/Ze\"2)׵+E~\" TkLnt,i޹2;׵Xy4\Wu-i޹2;׵Xy7:4\bZ4Zjy"VkыGkыGkыGkOf -E/-E>QkOfZZ4Z4Z4ZCC=M=M=MwkDkDkL'XZ,Ӽ[^e>-2ͻ[^ewkLnybZ,Ӽ[^&\|""E\|""Eȵȵg*r-Y\ȵg*r-Y\|L"Kϋ&\&\iȵiȵi\ewaL.r"b]fX,Ӽ\ewkL.r-iE2ͻ̰Xy4""ZE2ͻȵXy"wkLNE,iީEOEEOEE>WkDkDkDkDkGJTZ4Q EE)QkDkL.r-iE2ͻȵ#%*r-iE2ͻȵXy4"b] LEoEoEoEon规&Z~JkB`M(׵ !׵ !׵7y,IE7QkL'Zy庂ږX|mZk"_[*X!X5v`c787878}1ݮycyc=;1oPq1oPw)cޠQƼAݡy?zLq1oPq1oPq1nq1nq78ccqvz}Lᬱwc|pc g}W8kYcp?{ߓᬱt} h>7vm5p}m Y)5vϽgSocIOcǬic Yd8kd8kT8kT8kT8kT8kT8kT8kT8kT8kT8k>YcޠYcޠY)5 *5 *5 *5 *k gq/ gq/ gq/ ggƸƸ 'pVq8E܍Yb_gpV>$ rg6V>6V/Xf/X/6VX/6V'XjcӜA_lc=M^T+X6VLSm}dEejc4zQm`X2Mejc4 i,TL6V>L ׂ}F(虰XAτm}2x^T+x '+|6Vd+i4zQm`X2MsN_C' ɭ tr+ȯ# a[A>qr+"NnVD q'~ɭ !wr+XVLSU`i>ߊd+?/f %/f Q3_A_|}90W pge|4 i*,T+ȯĝzg&M3Q&ԤyW3a+02_Aτg׋| WO 3_2Mee|4?|4 i*2_2Mee|3_ANA>Ty|b]3Y d=,]{|RzXa/zXa/࿕ eZ,זi wZ,R eZEł.i ?j 1bA_L]bA~XLSɐ`b2MłeJ4{Qi`b2MłeJ4 i*-4 b/*--ł^uZEł|Xr QbɃ&bAOiSɴXdZ,i2-b2MU$eJXLS`b2MłeJ4 i",TZ,XXXLSi`b/*-,T#XGdZ,i2-ł&bAOiɴXdZ,ȣ3N=M^TZ,ȣ3N=MłeJ4 i*-1łeJ4 i*-,TZ,X؋J=MvǂcAݱXQw,(=pwpw,聱; ;Fvǂe%XLT= c/;Fvǂ- wǂ|m uw,ݱQbw,cA :X}??c%_#s~W1pc6vZݯc i{|L11[멘ٯ1f6ד11=8vo?Ʊ1'űSqd1ƸAƸAlhTlT1yyu3 gMϘ7٘7٘7٘7٘7٘7cyyqq{ǸASldl_oӹcicic1n1nHH}i)6-)6-16ƿ'icޱX}o>k ׅS m>4vg)6v)OƸAS md md 1ƼAƼAƼAƼAƼAƼAƼAƼAct)6 *6 *icޠicޠicޠic Cgc11 }gl>61iA;tBgAA~ 9tBgdt,QeOseɖY>!ɖًjyeًj} e%ȖY>!ZfhЗ [fAO-2 ie,T,x{Q-`Zf2M^T,XYLS-`Zf2M̂ej=M^T,x \YK`,x χ\ًʕʂi},$x{Q}(x seAO\YdEʂeʕ4+ i- w׮+hA~ ZOQ\A 9+hA>IqEUЂ| Z_஠4UA i,T4%X Z|3ߖO o~ZmO Bb?-ȇH}!iA>Ir?- /$ӂO iA~O~ZLS`i2MӂeiL~T z&?=v+*ϋVg~Z3a?Eӂ iAτO  ӂe4O i[ӂe4O{Q`i2M~ ~Z||ty-.ey-X3 /$ׂ\^{Q /$ׂ\^{QmͶ6ۂ~Wl qmA~gf[ m/wnf[Ol Ͷ 3v-l i,T-Xf[LSͶ`m/,T-Xf[LSͶ`m2M5ۂ&mA>%qE5ۂ|f[Ћg-ųl m/l mA>s-ų4l zl=M6ۂ|f[d-i4l{QͶf[d-Xl i6ۂej4}m2M5^T-Xf[LSͶl i,TAfق|d,[e{QY Y;#kgقlA>v-Gβ,[e 򑵳lA>v-x}d-i2lAOY,[LSY`"2Me^T-XFLSY`l2Meقeʲ4i*,T-_w-X,[LSYe iQ,T$i24e lAOY,[d-i2Ygق&l/*Ygق&l2Meقeʲ4e tl2Meقeʲ4e i*,TEeق&lAY,[c-(gقO 8bI8e z`̲#l2'q-XY$βyƉ`9KÉ N'҂9H :IDZQ*&҂R1t苉S_Lb"ŖHn?~Aa?m~e϶u_/6v1v1z6vu̯i~_Ozic'ic9vl>d9vdq=~7~7~}1ݬy꧍y=1oP1oP(cޠPƼAݟy;OOOOOO{L1oP1oP1n1n|7~c꧍q짍q˽ͯqkOq쵍q쵍Ǹ]Ƹ]SmUm-=^ۘ^ۯ6ƿ lcޱ}Nolcggm>7Md+ݧcټ@cjgF3c1nMddd{LM1oPM1oPM1oPM1oPM1oPM1oPM1oPM1oPMcjyjyj=&ۘ7&ۘ7&ۘ7&7[v޲6v2{[wׂ ^wׂ|]{Qݵ cwׂ ?j]{Qݵ}1IiAmYO ozZ>%zڋyuzڋ}!zZ>FigiAOUO i,T=-x{Q`i2M^T=-XzZLS`i2Mӂ&h/ϟD O6Ђ;x dE5Ђ~?@ G#}E5Ђ!}42n=M6Ђ&hAO @ i,T-XZ>s,?SyQi tZ-+NjA~9Z_m;Ni 8jA>gqZ-oV i*,TZ-XZLS=`j2kTOAFق9e d-蟓Q`9e IlA_([e d-KQ/AFق|l([_;,T-X([LSQ`lA~eZKQ'%~}Zp-KŶ /Aۂ]l{QŶYl ]a-wŶۂ|b[Sbۋ*=mA~0Hۂ`-]l m2MXe*4Ul i,TEۂe*4Ul i,T-Xb[LL9 (:rnA>Qt-ˁ9/܂s sn/*[Os snA_̹=M܂&snAO9 v;4s z̹=M^T-i24s i܂e&9`rn2v[LS9s i*,TE܂eʹ4s{Ŷ vۂ|b[]l zC,Ŷ۟ vۂ|b[]l mAOŶb[d-i,T-XFLSŶUl i,T-Xb[LSŶ`m2Mۂe*4Ul ]l i,TEۂe*4L z,=Mۂvcz?^ؽ]ؽ1kc|=]cꮍv]?t1v?P1nݵ]]]ϳ=[1oPݵ1oPݵt1 6 Nc}Ƙ71oPݵ1oPݵ1oPݵ1oPݵ1oPݵ1oPݵ1oPݵ]]]]]q=777}=yS1=yZam>x4v'cЏ6v4OƸAvSmdmd1uƼAuƼAuƼAuƼAuƼAuƼAuƼAuƼAus6 6 :ocޠ:ocޠ:ocޠ:oc ;oc|oy{}l-;oc|oy gς|{y vEuނ|y "qEuނUk d-?}3 Uk Zp-ȫUk r`-ˁ}JEڂ)/ڂ&km/,T-XZ[>Fjm2MڂeZ[LS`jm2Mڂ&k/O4͵6ׂ :x ]asE5ׂ~W\ އ)}E5ׂ1}1gn=M6ׂ&kAO͵\ i,Ts-XZ>4||_>  CG 8w͡#pA~\`;G|܋4w i*,T.X\LS`.2MEe44444C|C\uA+L0Yd] uA>r.Xޕ &|d]Ld]L4 ;'eJ4 i*Y,T.o z ?E^OQvWd]K`.%0YuA/ɺ`YBa.)&|d] i*Y,T.Xj i*Y,lw5L4 i*YN9M'난uANɺ [un _x6~ !zA>JtC/X^x5LpC/7^TC ~9A~#1|^'{/sw}/a}/X^\ i*,T}/X^LS`{/,T}/X^LS`{2Me=M||4ߋJiV Za/N4_ |A>>v/N}04 zL=M<4_d/i24{Qi4_d/X0 i; eJ4|2M^T/X4_LSi i*4 4{/*i N=?ڟi24 |/X|2q/ȓGN4K4i_t`/Z1bOp`/2{A{ @{A.ށ {x- >ޓϮϮw ߏϮ+&]3{{]̯]{ z_Ogײ?wg}u3n7g {]Gϼ7xϼ7xL| ޷yg]{yw}3o} ϼ7xg]{q} u3n7g {q}vP~{f9O~OyOyQoyg~g~g~gױ5Ϯ35ك]3kyf8v _C3wJu(gשϸkT ?G]?Q%]>:~v:u<|~vs:u3nU±J7*g c*g]%cw3o~ UϼJ7xW ?*gJ7xW ?*]%yw3og|P:%?uDno |3T E~& x\ǂ x ޑAD_Hu(t[W$0xG9wE9xEw$pыG$pыG$p:#yE/b \,Ӽ#ew$p: #ew$pLHb \,Ӽ# +| "K\|:M\g:u-xwDE>-UpDpDpD0M.i2ͻ;يEOkTE>PApѩ oT\t*.I |N"1VAp oU\3+4N..iޭ2ͻXy4b]\,Ӽ[;ewApL..i2ͻ Xyvg> ];m%Z%Z^^^|jᢗjaO[]-\B0xW 4jb]-\3U 4jb]-\,ӼewpO& ޥE?wioOYKOvsf7aE.7,!L! RpL.ia2v0b&\,l7K.ia2r0"0"0"0"0"0"9'iS;\C&~wE>~Vp;\by?"?j; ޽AEE.Dp'z|"w;\#OzAp5.i޽E>SpL-i޽2ͻwXy4a.i޽2ͻwXy4b;\,Ӽ{!.1b;!.BB qbC\crw qb|L"+ 1EO1EO1EO1EP qD qD qD 1x=M=M4be12;Xn?C\,Ӽc;Xy4b!.i1AyjD;\yjDE.Ԉz<5a.Ԉz<5"OwS#.z.z.z.z.i޽2ͻAXywpLn,i޽2ͻwXy4b;\,Ӽ{ewp- 4b; ޽2ͻwXy=M=My"GEOEOEOEOEQpD0xyXLEO2?>"b;\,Ӽ{wXy4b;\,Ӽ{ew0x4OE qѻE 1XRE!.b-b;C!.z!.z?%[wSE!KH12͒/R qC\,, #_Tpѩ7$ V@IEސ4\t I`K!iȣPJ.rJ.rJ.:ir+ews˱|LFݟ?cyL1 %Θ_k1 %f(1e\eXx08v?ǸAǸAǸAǸAcT(qT(1ly %yu1 &cT(qT(qT(qT(qT(qT(qT(1ǼAǼAǸAǸAc C)8 28 28 28vJJJqu: -6y6y6'jSqTqTqTqTq>= *8 *8 *8 *8 *8 Mhc6cdq>'>v9nq׌6׌Ʊwhc6co-53)8vaa>8 2c c )S6h7h7h7h7h7h7h7h}1EǼAEǼAESqTqTqTmh}oc|m G|+m pEELp1x2 c^|}5xA GP\Ӌ/*h^ rN/bлez12OGb>n^ ާep*1XbLS}*EeJ/4^|Q`ҋAO _[ tk˺b+Kg>A_+ H>VR>4YW z+=M&/iӦbLSu` HLo ΃z~ |Z  "W$ 6tE2/] fE27`yU KlW$ ",TEU$e #4F i",TE2XdLSa`*2MU$eH4U i* +AO9g #=Mf-&A>pq2i2k4 zZ=Mf-|ed?mZ4 i*k2Me-eZY`2Me-eZ4 /*ͬeo6A?GAvWo6AZ˛ޘ sY 9k33g-eZ4 i7f-eZ42Me-eY et2i:k4 rZ9Mg-惜惜A>s3xb1^A>w3ȇb1^遼[l2~Y|Ql2~Y kA>r-37U kAZfПCe4U iY2eU4U i,T-3XZ拪e4U i,T-3XZfLS`j2͟2M4Q GaJ3cNi}1UƔfWSA{pJE4ʘ ؃SAqJ3諌)͠ɔfdJ3i24&SAO)͠ɔJi=M4&S2vo”fLݛ0,TJ3XM i*R2M4eJifLS)|QA 8A1~ 򠏃AqE1p& byA 8A1< fd3i24 iiT3X fLSA`:D2M1e b4 i*,T3X fLSA )b4 i*2M1e b=M1&A.r3i24 z b=M1<\ fdE1<\ fd3X fLSA`A.r3X fLSA`2M1e b fLSA̠Zfǒ\ zԬe=j25kA %Y z,*2<1Z%`ZfL4\ k2r-ݼ|ke>6/nyb+%y9)7/\A e_A̱?g7z~dn*ƼA1ǼA1Ę7 7{1oPwcޠ#ƼA1ǼA1ǼA1ǼA1ǼA1ǼA1ǼA1SsTsTsdsds>: 2c c c cQ1nA1nA̱7 7 c by by b'jSsTsTsTsTs>= *9 *9 *9 *9 *9 M c bcds>'>vOgq b׌A̱`w c b3co-'2c)9vaa>8 2c c )9 )TsT11ǼA1ǼA1ǼA1ǼA1ǼA1)9 *9 *cޠcޠc c1fW5s2x es2 u2x|!}3x 0 r@I9!$_TH2 1$ g_TH2x zC Ilp/XdLS!}E$e I4|Q!`BAO!}"ז!}-DI2$?!}R2xxQ!ɠ/{$I}<"xG{Q!}'x z I=M$&CAO! iiTH2X I2M$?忖 2ծLU|f_%||)l vEe/|@e_ :{䗃^Aˠec2ȯ jg/e '4NzQ`I2Me^4 i*{,T8)XeLS`2Me/e '=ed2i2{4 z^Xˠed2i2{4 !AO I2Me/e^4 i*{g/e^4 i*{,T2ȧ$^lWo6A^f?g*{lWo6A^˛ޘ s 9{1g/e^4 i7f/e^42Me/e et2i:{4 r^9Mg/ANANANibr`iD5 (X z`iy.5 OX z`i=04_TM6^SA/ z Lm6|df z Lmf C͠? iiT*XՋJm4 i*,Tj3XfLS i*,Tj3XfLS`R2M6eJm>Țfg?\|Q5 ~ĚfkA_Hiy5U BbM3ȳiy,5͠/$4&kAO5͠ɚfǒ\ zi=M4&k/4Y zi4k2vfLS5`f`M3X拪i4U ijAO?eQ:Ar3S#pf9)w8<'pyN I9)w8<g zp=Mv8&;2?m4U8 i:2Mep4 i,T3XgLS`:As3XgLS i,T3i4 ;AOΠgd3ii'w8&;/i'w8&;2Rr3XgLS i,T3XgLS`:/,T3i|#Ar33 ztxF:G:\_tKEO~<Bc < ?<}rSs̯ c%7Z_cs5 c|Y|LϱJ/:/*{LwcXYYYY=;1oP1oPt1 9 FcmƘ71oP1oP1oP1oP1oP1oP1oPTUUYYq,x>7Ȃ7Ȃ7Ȃ}j|dsds>0> 9 cޠ cޠ c)TUUUUy*xy*xy*xy*xy*xy Jc*ic1nα9vvY|Lα}sm0}1U:c/B'#c +c +c +9 )S6J7J7J7J7J7J7J}1U:ǼAU:ǼAU:SsTsTsdms>]7Ɵݵ}D.ߵ _6_Tm3}2x , ,1 fjf63^jfp>O/,T33Xff>y2M53ejfffD>:73~m Gǂ|$ffЯ-X~$Tc>𢚙A_lfSB>:AO̠ffd33i4|Q`6?mjf4 ާejf= ze3NqSA~gSA>zA_HUߛ_ t84ȧ\ t84$ [ICV Za84k !w84 i*=,TzECeJO4 i*,T84XphLS`¡2MCe 4 i*= áAOРphd84i2C&áAOРphd84g=MC_T844 i*,T84Xphk i*,T84XphLS ܸ 6h'۠A???GOA~? ݠ G ۠A>s4qn4 i ,l7hl4 i4Aej4 ۠ANm 6ht4i 4 rn9MA۠AN AX wJXѠwhлer4CNyѠwhлer429A=Ҡgi\# z&=H {Ap4g=H_T4g iП`4iiT44 iG,T4XiLS=# iG,T4XiLS=`z2MHe=MJ?󯚙/*siЗ_53\ y(΁ 9 As 5Cq@jH mlR_T 5cfy(΁ k:4H z 4H iiT 5X@jLSu`2MRe 4H i*,T 5X@jH i*,T ERe 4H z =MR<@jd 5i24H z yˁԠ@ yˁԠ@jLSU`2MR<@jLS`2MRe 4H}Q`AOԠkH z XeyX́Ԡ@ yX́Ԡ@jЋ)2R^O)9bi9,,2R<@jLHEgNN.2stt[⒙ 9s䆜9 rCΜ!gNܐ3btQ2v`c=SdcʜCǔ9k}ɍVט9k_kfNSt ml=1fNǸAfNǸAfNǸAfNǸAfNuaS6ce9u0 faԭ˜7777777cʜyʜyʜq̜q̜ǸAfNStdtdtdt>e< 2s: 2s:v}99}L1oP1oPӱcʜyʜyʜyʜyʜGǼAeNǼAeNǼAeNǼAeNǼAeNǼI9}L_c1eN3cQ?z]9q̜>}m>%:v}Ft>X2cѱ/3c1n1n1nǔ999999999N>77cʜyʜyʜq̕s[?s2.9 @}0x# e4ז`ym _T4x ާj}6x}Q}6x iiTE@e 4}})ȧ x 18Ϡ_!f<2x?K`Eet3o * /*h? /* @ s4 *c4諌Р2@ =v4XTLS} i/,T_*XhLS`2Me 4 i*,T4Xԃ =M@&AOРh9 z =M@&AO 94}ݛ0,ӦMS i*,T44 i*,T4Xh4 d4Рϟv$XOAQ,РO@l7h Qhϩ I2M@e 4 2M@e`2rht4i:4 r9M@AN ht4i>i>i:-ӡA>u:4#NxC^<ӡAp:4ȣNxC^<ӡA/ zL=C|Zth3a:40L ӡA>u:40ҡA$p:4O0Cc:4XthLO\/*,T+XthLS`ҡ2MC_T:4XthLS`ҡ2MCeJ4 i*,wE<y]ѠNvE dW4ȳ<hЗ A3sW4cf} +4 z=MvEA_?%%hLt x2 sW] U:苭 :h\ r A\A.u}DCuбa@W0v1m`Y0v1ASccVtZkft5렏:v_ic6v_gce6v_eN`c c c c c7:7:cTt?cޠ7:7:7:7:7:7:7:cyyqq݇ǸAAStdtdtdt>< : :vYY}Lu1oPu1oO٠ꠏ: : : : :vVUUUUU&aX}Lб}l>9}:~v@c' :v5{ݿH1v?}s}xdtdtd1@ǼA@ǼA@ǼA@ǼA@ǼA@ǼA@ǼA@ǼA@C: : cޠ cޠ 6jcp(\> +6f> pژ(\>Odjcdh>OS/,T ?ߕ ?_!1 kw3Wm}'/ އd3x ߂},3 e3x. υ&ۘAOm̠6fd3i4|Qm`ژ2?m4 υejc=v;TA( gÙA~pf_8Upf o3 }='='1bA|u1bA|uՙcA~yXg_9XgW6cA_ٌu4ib2MejAXgLS-`ZP2M:eu4 i,T3XXgLS`b2Md3i24 zu=M:|Xgd3i24 zu̱ΠX~`b2M:_T3XXgLS 98,T3XXgLS`bA>}p3ΠO:~? ڏ"\A?gd3`y? cA0#%:|XgLS`b2vXgLS`fAc3XXgLܠ94 ru9M:cANӱ Xgt3i:4 ru94i=MBQ <A(p 4諌Р2Bv=4 X zCy gX}Q Z2MEe퓚`"\/,T+XzhLS`2MC_T=4XzhLS`2MCe4U i,T=ACQˢ]>~vg1ϮcwY g/_|v:7g ʢq(eϼ,7xE? eϼ,7xEg]ˢ] g]ˢcwY3o.~ e]γ-^xg[γ-^ˋpAwt:϶xg[~.^g?x\~.^g?|F4x'@‹aE~a_$@4\ a9Xx:x= E_H.^=M$@wtDtDtDtDtD4x'@48b],ۦy'@4袧߻ =E.kAF}E_.A [}E~>ha-{lA4i_-Eeh.-C4x7II M$][&24IMMEIO4I4&b6;yewjLN^&b杼Z,ӼWewtLn.iM2;yXy7I4&b$],ӼewjMEOMEOMEOMEOME>PtDtDtDtDt$]4$ {04I4=eBM2ͻIXy7IwtLn.iM2ͻIXy7IBMEh.DtOA_~? sj~?עϟvs7IQ&?j$](5I8JM2ͻIXy7I4 ewtLݠIXy7I4 IXy=H.E@tgT3])LˋpL}}fȳ .4j>Q3]颯OL}A5]3DtBtOU3]P3]P3]P3]Ly0D5Eo5]3]Lb[,l.iQ6ͻfXyL4i.i52ͻfkewtL.i52ͻfXyL4b]3],Ӽk&R:]BNyPE_H.zH.zH.zH.LR&R&R&R;ui"ui"uXj:],lw5H.iީ2vWb: ީ2;u.Dtїjf5E_.r@tїjCE/*ce4v_EcE4v_C)5v_Ac4v`~S:v=xCcc~5C>0:±"O?CǸACǸACǸACǸAC#}1oP1oP/7x7EǼA: _1oP1oP1oP1oP1oP1oP1oP8q>x7x7x7x}yd< 2: 2cޠcޠc!Oyyyyyy0kGc#cg`t>D9Ɲ1"c cɱcG'Y GzǸ3F@ǸAF@ǸAF@StTtTtP1oP1oP1oP1oPбhcyy>77\gS:}`+x> 9#ɲ薚Efof7lGG-G* D<{n8_ [ [|`+/p>xPp>b8_ 3<?Qp(??g8(GIB>v3+Ėg8% -p>![|$? ES8@yP-/[ɖgizly&[ɖgizlyT3\`LS-p4c\u|J\AB+u|T\guu|L\g諌ΐ9s!9yPO i*_L;wK`4X= h%z T4X V@C/АO\ C?X i.T4\W@eL4: hLSp"S2MU@e4U i*2.T4\ hLSp*2ME>d44Y =MV@COd4'&+ hizdvk hLsc4\?p4\ hLSЃ4U i.T4\ ly|zgw-σm[(1yC>0B(| ^/k~]5nk C׸A>q }^}_m׼A>y }~j׼A>yvkޠ~] *yT75oPkޠB׼A>?S75oPk C׸A>矯q }~5nk C׸A>ͧq }^6x 2yd3>y }^6LkޠB׼A>y }^6 *yT75oPkޠB׼$y^M]<^N<qKy~Ե͇?S||÷1k c׸A*G^8 #x~ }5e'0p^8d p>sP p> |PC&04D8VΟApfi2zL`&fi2zL`& i*.T3\f8.TC&0C>?u:I&0C>Du3#,'0C>@p3Uf 8 ̐f}/fLS}pv'>fLS}pv'>fLS}pr'>firc!>firc!>firc i*r.\>\I_oyW 0]av3qg7C8pv3fw͐ y?0 ?069C~q3s=&giyPpMpU iMSMp i.T󠚜2M59ejr4 i.T3\&gLSMp&A59Cms󠚜!b3\^!59C_lrs.&g苗MpxM/&gizlr%x`3,͇gg?S|6|||f.1eym>m>vd3,yJY^ *eyT75oP)kޠR׼A,>S75oP)ϔ *e[tQ1?,q14j =MCOƃ 5&CPci2ىCPcLSypB2MC>s1\6MC>s1\PLSypB2Me 54 i*.T1\PcLSpB2z 5&CPci2CPci2z 5&C!?q14j_|hGgp<;ߜ?4d24ـ =M6 CO d2g7 CO d24ـ =M6 C~ diype2\݈.T(\݈.T2M5 ej@4Հ =MCB,9|cW%Яnye1˒cȏ!\r AK/^C_,94[e*94[e*94[CN%ǐt14]r 9MCN%ǐt14]r 9MCN%ǐt14]rSp~{Pp>Ο7jyH8k|j)rt=Y{ =MCOd14Y{ =MCOd14Y{ =MCOdj2Mue=4U{ Oe=~v 2ISU }2S!p8NA S!9QSU )pR!dLSp4l?SɐZ\З !0r`a2dˁɃ*LX }90r`a2&COprK2M&e*L|dLSprKU i*.Tn)\dLSp 2Me*L4U i0.Ta2\҇,L& di0z,L|ldi0z,L& dQ\ =M&n,L4[&e-4[&eY 2M&e*L4U =M&CB,L|dWЯeny(Cea2d?\ /^&C_,L4[&e*L4[&eJY4[&CNӅɐta24] 9M&CNӅɐta24] 9M&CNӅɐta24](}P9@ymP9@3P?{tfgك9@3nʟqPDg!qPDgqPD78( ʟM'nʟysg78( ʟMl ʟysg78( cEl: wmEl:(Ϧ?l:=|mEl:ϦX?Ng-b?l:`x"9Lϼ9ymEc?Xϼ93opEc?N\]c?ÿgONGSNϞN&OsN=<=?>L~8= ~hcӇO¹_pXaC>St!iʇ4~E+zW>4ѯ|i_D򡧉~COʇ&=M+zW>4ѯ|i_ʇ4xes2͹_p2͹_yʇ}_'dVʇ|>~CTʇѯ|O ԯ|guW>+ùԯ|LsW>?Vկ|Ls<\9+ù_poʇ|R~E+r@!ר_З}9_Зܯ|ʇЯ|ʇ|z~COʇ4es2͹_կ|Ls9=\9ǜ¹_pszLs9=\9+.Ӝiʇ4es2͹_pܯ|LsW>\9ǜ._D򡧉~COʇ&ɋ=M+zW>4ѯ|i__D2oѯ|LsE29pv~e+.Ӝiʇ4~ET(ʇ>Q|p=%gq33W(ݸ+} +\|KʇQ|LsqA2͹Bpv es2Eʇ*9MU(rP>4U|iBT! CNSʇ*9MU(rPs29Zpr eܸBpr򡧉DCpPDC ʇyAʇCRԠy ˇS)˗COˇ&ʗkzLs@2͹ˇ4^eMʗ?4es2͹|p\|Ls._>\9/.Ӝ˗iˇ4es2͹D򡧉éD_x/N]~Q|p._>\^|pye8/gʗdCL@"ʗ3ˇCOˇ&ʗCOp._>4Q|i|ˇ&ʗ=M/.P|LsB2͹|pv'esߜ&*nЗ}9 nqˇ|qˇ|qˇ< C_[>4|i_'կ|ȓW>QE+]A!ϫ_կ|ȓW> +yx^pW>y+.Ӝiʇ4~es2͹_pܯ|LsW>)Z+.ӜipW>\9+.Ӝ=M+zW>QE+zW>4ѯ|i_D!*_D2yTQʇ&iʇ4~es!*_pܯ|LsW>\6͹_pܯ|LsW>\9+.Ӝ=M+E!V_pjԘyRʇЯ ~CT/$+BB/$+̯S2%?~CU2M+A~y$\'rm^gjC]qmǵyq|6gW^OI_ԯHkk~ԯدk|دLk{m>.|mxLT1e7~gW^ٯ _ym>~\կ _~\կ xͯxͯx&xmxm>z&x&xmxd|<7Ⱦ5n}kk ׸A?S75oP}kqw xT7ϲAy;^w xT75nMkkkT6666kMkkkkVTl>sm>w xm>sTS7gj*^T ~âpt9{N C8? 1*p~ΟcăJ'p~"Οcp҉tbȇDN'4N i*.T:CCOd:14N =MCOd:14N =MCOd:14N#sW1 w;.ܮOveTW1\LLS]pmʟ[Q1\C>xqCC_e5|\c諌Ws2**c1U\ci2as\cLSգpr2MC>t1\QLSգ54U= iz.T1\\cLSpG2Me54k i*.TCCOd14k =MC~\ci2z5&s\cȏTk =Mn54fe4fe5g?4k i*.TCC~ "coE 2C~"AC_e{?*2xPEW2C_e,24Ud 2MŔe)4{CNEƐt14]d 9MCNEƐt14]d 9MCNEƐt14]d<"cLS1pro"Epro"ci2s\c1z}5e54Ie54I..T1\OT(cpnz<6<LkkB)xm>|BC1xC)xm|ͧڽ6_ʵ5^ =^zLk C׸Ag;?/kޠB׼A?kޠB)x߃=(x߃|)xx1xm>z)xx1x׸AFq2^G_e 2׼AEy2^~(5oPQkޠ׼AEy2^^e *xT7((ȵ5 ^K;c`|wl8rk8?\AM8o 1?y \Ň 1_7F gAp> .Tˆ2Me #󃷃 #4F i*.T1\0bȇ*#4F i*.TCCOad14F =MCOad14F =MCOad14F<0bLSpˆ2MpLSaF "0ÈUx "0 È! 8_f#>F aĐϩF aă*4ap /F i.T1\0A[Q1\C+ #F aWÈ2C_e #T1U0b諌ad1C9COapC2Me #|<0bLSpCF i*>.T|(\0bLSapˆ2MŇe #4F i*.T1\Ї #&È0bi2z #0ad14F =MCOaĐk8z #nF in #4 in #4:0bLm #a> ۇZU&aO> i/.\n?><~}.\n?> iY &<jbݲz&-!-лe51CwjbݲΙлe5񠪉2MUeˍ2MUeˍQWe҇sb./*A_>C|1s%AUB_ٌ1c JA9f14c ,Ji2z1T%(\,1.T%b2MUecc iMS1pb2Me14c i*.T1\cLS1pbRci21dz[ TR/<{[ ^py;)CL`o1+^&!{&g{biP{biz-To14[ =Me۝{2Npz2N_-sb\-~bˁЗ{[ }9r`o1bˁŐ[ =MCO*3aR1 H'C|L*;'C!;fR18Tcr J'_C>hr 0? C_ |@`G9U: in'2MN*.T$\@`LmT=0\OT=0z`KO\ } d=0%zAC_Y =MC>r=04Y i*.T=0\z`[4 i*sPp=2M{e4U i.T'\z`LSp2Me|z`iz&끡z`\ =MCOd=04Y 끡z힚pvOz`LSpvOz`LS4٦6MCO_ Za 0hT%g@`k }0Za 0@`LSp74; ejބ4; CNӁt 04 9MCNӁt 04 9MCNӁt 04<@`LS͛pr@Ápr@`LS͛~z`QzԬT$Y =jùsPp.c5\ =jCp5U i.\R] i.\R]<ܥ.\Rz(.R:% wE)pyW 0J]a0{%`0JxP)?0% QpNT0!(ai2Jd04%U i .TA0\`LSwA0\`LSp  z,& ɂ`i z,& ɂ`i z,& ɂAe*4U i q 2d$0 G*hd$0C!GCّxq$0%H`| i*.T'\H`LSp"2M%|e4 i*.T$0\χ&#H`i2z0d$04 =MFCO 8z inԌ4 inԌ49H`LSp?ʩL^C i*.T 1\bȳ͎!4C i*xP1pb2MCO1d 1RCO1d 14C =MC u 14C<b!&c2Me!4C y1pb2Me!4C iMS1pb2M?d 1bo1ĐgaC }1*c 1Ub諌1ĐgaC }1*c 1UbLS1ăK1pb2M*+67>NԵ]6Lk~;6Z;ךk|;Lktm>~mWҵBL?l??߯q~5nk {c5oPkޠzixwj^v4ixe˦5nM4ixe˦@5nMk ׸A6 ql~5oPMkޠ#ixT75oPMkޠ惮׼A5 yj^4 ~p~OxΏOp~OxΏOp~O*'o*'o*4U< ix.T0\Ae*4U< ;U< ix.T0\aLSÐw0\aLSpxz,&aixz,&aixz,&Aeʭ4U< ixΏCO'!xxPu'! x./A?Y< Ðv,2d0\:Ae4U< ixxPÃ*a|*>Y< } x/AC_,Y<ApzBΉcЇ`?Їsb9'weN >>{8=kz8=8}pz Ήӳ4esb291p esb291pz@Ή4esb291p|+1p|LsN >\9'/"1Db𡧉CO&=M$zH >4|i"1Db𡧉COpN >\9.ӜiΉc퇞&}"1} CH >%_J >{|K|C>Rb! `8P..A$.Ӝs(|LsΡ<\9'.Ӝ sN >C_H >%Db/A$Db0} "1З =M$y=M$.ӜiΉ4C>Sb2͹+p esW2͹+p|LsN >\9'.ӜBiΉ4esb291pCO&=M$zH >J >4|i"1Db𡧉C~COpFb24e=5i]pN >\9'z>`sewC~`8WVzC;\|++?sewéлEE2=*iΕ4>4U|i"TE!CNS*9MUr>4U|i"TE0+iΕ4o{U.\~۫"p\YyLs>o"З*xP|8]¹"pJ<<倊éЗ*}9"p<4Q esE2V4e*re*i4[U.Ӝ8\|Ls<\9q¹"pyLs<4Q|i"p<4Q 0CO&*=MTN=&*=MTN=&*=MT9pves'+i4[T4esE2͹"p\|Ls>\9W.Ӝ+i4 es顧COS顧CO??|~ |sbowRH >$/"1?| H >$zH >{%zH >4|i"1Ή&=M$.|LsBb291ΉW;'._˅rEbr!͉H >C_HH sb/$$BBb/$$zH >4|i"1Db𡧉esb"BydZ |ȿZP(!jAp>_-(PCW(!+ B5 >(p |ȣ >\9.ӜC |Ls>\9z>4 | >4 |i"D(𡧉PCT(𡧉P`8B=M.ӜCiΡ4PCT(29p |Ls>\9.ӜC |Ls>4|-j|ӥj>C_Hh>C_Hh>RBB /$BB 2͹-4;c拹kvm~o׵Lk{um~ԵLIϔSIkk~=טדIk|=LIkǦͣ3~_^___ 2wd|3 *wTwl5Lپk}׸3f'Lqg}l5پk}׸3fGqg];lߵ5nپk })wT7lߵPg] *wT7lߵ(5oPپkޠ}׼Aeq_o#?H ?G8wMIA8??a8?_ IA8??a8?_ IA8?=Cτp~Ճ*Cp 2M@.T0\Ae*4U BU i@.T0\aLS\ i@.T0\,& ai@z,& ai@z,& ai@xPpg2Me*d0aȧT.T-%aGU.|a¹@d0cSC>r0..AeT0\ZJLSp U <a|*aЗ !@d0%aKƒ*Y } @z,|ai@.T0\aLS\ iI.T 2M5ej4U i@.T0\&QLSp 2Me*4$d04Y =MCO@z,& ai@COƒ=5 2힚pD2힚pDU i@!;ǎ`#9wz}sg%<`Qg%z}sg%\֧JLs͎`LSp;tG04 9MwCNtG04 9MwCNtG04 9Mw#.Tg%\vG;p;p:+2w ժ@! ZC_H,Tz#a pT0 /$ù Bb0ai@.T0\aLs3v0\aLs3vrgaLs3v0\aLs3v0\nAe49aLSݜp9ai@L ai@z,& =MCOp}& Ause۝1 2Mus@.T0\@xp3f0\aLSpi@.T 2Me*4U i*.T$)4Y =M9z,& !H/< «@./v'a* I@g @g &@g \ =MCOƒ*& ACOd0\I@.X in\Y Vpj_,˅re0\.$?d0a ƒ*X }!@Bb04Y =MCOd04Y i@!k/\<`ȿ p 0_dk!zCqk!OֻXk2MeT 0\`LS5d 04Y yv5d 04Y =MCO5g'] =Mk`LS5pj2MCt 0\`LS5pj2MeT 0\m~`诖5_-k!OzBb 0` 5k!OzBb 0` 5pjU inwƬ4Y6av]߭Tt6WN}g*]kߵ5f*k,]k5f,}ߵya_YL?pH?n?mq,]Y wm>6~^U w;Sw5ſk泠׸3>Sw5ſk׸3'?S|.75nſT wT|3y*]U wT|t75oU }ݾp"?j'IE8ΟT' O*>d0?gIA o8.Èd0?=M CO}p0"4'A e4' U' iO.T0\>aLS}>' iO.T0\>&>aiOz&>aiOz&>aiOxP}ph2M ed0]&C+L|vaȧN72&C>u0S4'C>pr)!9A.T0?1 i*.T0\A%*A.ߧ}*Ad0%a;N } 2Ad/A&C_L&!5:AzL4 i*A.T0SG'e43:aLS9prF2M%eJ4 i*g.T0\aLS p2M>d04 =M&CO d0g4N&ai2AzL dvOaLsf0\QLsf0\A%e}a1Az}L ƒ ^Z >&*Az}Z ? ^Z ).~g3A.Th%\v04 9M'CN t04 9M'CN t04 9M'CN ƒJ4Z i. BO d0)Z'CO p92Θ p9 i*A. <3A.T0\aL4MS p*I2M%eJ4 i.T%)4 =M&ùzL&!CO /< ~b0\^x% ^  3 ? 3 aȓN&ai2AxP d04 inwRL4;)&eݾ7/O.}pʶ߼U>' }OxP}W2 CO}d04' =M CO}d0\>ᇬ ;W2\ w <<`ȓ ^+!Dŕ` <`?Qqe0\AUe 4U =MVCO#] =MVCOde04Y y<ҕde*!G2z 4U i2.Te0HWe 4U i2.Te0\AUe ~Ȑ`/!_C!k:$ZaH0`k! C!k:$ZaH0`k!pB in7 {Ixm~Ooɵ6!um~;ƵL Lkk~ ׌k| Lkz'ql^擞柨׸Aql^ 6L?Gyj^ 杩x;S+wV5kk[x;c+wV5k)k[#ğxm>@|d+7Vgj^ xm>Z׼Ayj^ xm>xT+7V5oPQpL!?SpLJB8Ο܅g A3p+?S pl AY8?*nf0 *B Уf0?=j6nf0\Z p2M5ejT0\aLS p~uP p2M5ej4 @ p2M5ej~aiAzl&aiAzl&aiAzl& i.T0\a82!3!`诖pjD ͙D QgC_H |`p |4A%QB>sf0\`'$ 4D i*3.TfJ$.ߊJC_e, |`諌%WK2$*cI0Uƒ`諌%O"] =Me* 4U i$K2MUeTI0\*QLSUpJ2Me* 4U% i$.TI0\`LS%pD$z, &Kɒ`i$CO%dI04Y =MC~`i$xpmfI0\6$.T(\6$.TJX`1z`бJcD =0*z`D =?JcD pkp(2ײc!X`i:rc!X`i:rc!X`i:rT,0\$JLsXײc2ײc2M%QeT%#\Z?d,0WX` d }!1>(9r˂薺z&S؈x7 %v뒒p0 {!=X`'cVbEMرbEdXرfy2v,0X``Q<;XT@J59P7EM%n&cV@Ft,0j2h5 ZMFd,0j2ȍZMJ5ۓ1cEM%nT,0X``Q=3x`{2f,0X``QSbEMJ5 ,j*XT,0dQ`QSɢ@X`d,0pOZM&c<^XIr ,=I1X.b·')˅W,X` X` u8-ZM&cV ZMIf{b,0X퓗OQ,0S'/ceO^,S,z1YXz1YX`d,0j2h5 ZM&cEMp_ :h;P@nu/&0s\}܎p_ 7:s5Q@ 5 ZM@p_d/j2h5 nD&}*݈ZM 5 ,j*݈5 ,j*XT/p_`QS ,j*@T/пm@t/Ӂm@O= t`/; t`/Ӂm@O9m ,j17l>lk0商W|~Ôް}0_%{x]{8laJ %7a8l9l1 { { {]pTboTFo]RFoؾq]bFo]bFoؾq]bFa K K K K 7>Laatat)7*7*7l0eAeAeAeAeAeAeAeٓۗ^*G/KR|+p_?P})>p__xKRʑK[})>p_ܗK[迖]@Ł;}#pu`Hz"Db/]MF'~Į_`QM$v5 ,jw~EMucXT/_`QS]~\q/_`QS]~d/jh5 ZMv&~V]@ɮ_d/jh5 _`QS9~EMu d/H _ˮ_`k( Z~\q/ߏ Db/r]@ոw%\2t/_ l ,j*PXT/߁?Hu@Az$=WX \a/suU \a/su@:_d/:_`QSu|\t/ P`QSU ,j*XT(:_`QSu|EMe5U ,jXT/ Y ZM&|Vu@r/jh5Y ZMu@:߁uf{e/ P`Q=XT@ށ*Z6.эU &l%@kށjZ/DɗJ"ք$;{|ށ*5U ,jW7{TӍ@^ tc/jH5 nRM7{TӍ@ށj5U ,jW7,n5ˇ{EMDjD`k{ {k Dbc@%= Dbc/p^8Dbc/pz"^dc/^`QS{XTc/Y;<X,On5 ,j'c7*^`QSe0XT&2Ldc/j݈nZM6&{V@^ w#h5;P@nuc/jXT&ٞ ,j s{EM5ɘۓ1{EM5j5 ,jXT'^`QS{EM~*ZM6&{<^ށk{V@^/<{X.|{bc/\x5˅oORl  =@ =@pc/ lZM6&{h5 l5ۓ{E$ށ_>\ ,2zm>\ ,I2Č^'3z*Č^'3zV@Ɍ^dF/j2h5 ZM=d] ºw!c.ܻ yM۽@n=u.ܻ yT.ܻ ,jwh5ٻ r{wV@]d.jw}ZMT.ܻ 5ջ ,jwXT.ܻ ,jwXT.]`QSzwwXT]'{wS]{wz:w] w]{wr{wwX{wѻv?l]a{ö+>lo ۛv޴m߾7CoG6>lSm޴M~va۞aö>l}ض vFѴ0:݇m{nަ0;>NMatm۶~@nަ0:6݇ >NMa۾h}ض)uަmKA>M7lo}tf6݇mm{ަ0;>mv}tf6݇Mwؓ//" Hn߁{mm-҅wۺp[p{/s}}.\X?)= }û= }ûp{9'zxz~wBO.DMDžޅE6?û.,jޅEͽwaQs]X{xng{¢û.,j= {B䨇waQs]X{x5@.]h5ûjwDBޅV= &zxZM.]h5ûj.,j {¢ûp{0= -H= עwak$ȅ\'TB w!w'zxrZ= bޅ\R/pO\ȥk.,j= LޅE= raQs]X{x{BgQƣjw!סTƣjwGB]^ƣjwGB݅\UB݅EͽjwaQs]XܫvrTU {x¢ ܫv5΅E=saQs]Xܫv5݅E=saQs]Xܫv5݅EͽjwaQs DB݅VU &vZMT.+%U.]h5QjjwDB\RB]`{E¢f{E¢޹aQw@TvDžU RUq5A­q5A.p]hMvDžkow\hMڇwq!ߍ??{¢Y>\Ujjw!TB݅TSU vRMU.]H5Ujjw!TB]^;.,jWUˇv5ˇv5vDžEͽj.,^݅kQ U=p'vz"jwV ܫvnU =Pp+\艄݅H]h5QW.,jU {.p]Xܫv5˓vXU XU {¢fy2V¢b ܫv5˅EͽW.,j- {B݅VU UQU &vZMT.]h5Q[UjjW..ZU.]X[,5ۓ1v5]^W.,j'cTۓ1v5݅EͽjwaQs]Xܫv565݅EͽjwaQsU {x¢޹jjwDBnV.} BB݅V T.,~]X.|{Br݅·')T.,~ DBPT.=U yAU \U &vZMT.U &vZMT.,j')T.,j')T}jwak݅m]X&RpE2@T.DBBO$T݅H]艄݅VU &vZMT.]h5QjjwDaB PaB.Ua.p/]*]ȍ*]m*]*]* y@ y@ '_ & sr sZM.(]h5Qj0w!ک0wDa.p/]Ƚv*]h5Q.,j {aBSa¢^.,j {a¢^ sZMD܅?G.>E.]h#"wGDB.UDB܅6 m<"rrw"r{D]vWv~au~Y{zooP훱6lcؾtخŰ݊6N6lY UӝjؾSo~ߦAކAކAކ0;{f'{F'{F'{oF'{{zo{o{o{o{oSmؾYsdmda w=LavPavPavPavPa~avPavPavPoT}A9p_P73D/( ʁrf&p_P>P}A9p_P ʁ*Q ʁ}A9p_PT).U`QS%\ྠ\/sr/s|lf6s@l6=٘ uzf1w`l5fc.\`Ql6j5՘ ,j1/\`QSsEͿMM5j5՘;PsEM5\dc.j1h5٘ lZM6&sV@\dc.j1h5٘;PXEM5j=@..w2@~w).ӁU y=X jKq\kt).+K.F ] ,j&l5U LZM&[d-j2h5t;PI@ɤ[d-j2O'&nEM%J5t :nEMUg[`QSՙ3EM%J5t ,jmj*XT-́J5t ,j:@&&nVI@ɤ[d-ot LZM&&nVI@?r-j2v`{d-ٞ5t ,j:XlϚL5Uy mMhXm DWT"&)mmhXm I}M7ϏEM)ն@j[ t-jH5]m RMWmTն@j[ t-jvmEM)ն秫mEj[`QSmm*WXZUj[ZV=Xm;P@O$V=Xm sDb-p/iz"j[d-j[`QSնmXT-Y]m;<X,OƮ5Um ,j'cWjY]m ,j5rmEMFjZMV&m6j[d-jh5Ym r۠mVնUm VW&mEMFɘնmXT-ٞYm;=XT-j[`QSնmEMe5Um ,jXTX&Ld-j֮ؾh5Ym j[`MնrUm ,^նrۓm«@V}O`-V}O`-'\m 6mW&mVն@jہZMV&mE$j[`Q=IvmWV_j[`kۇ+me"WVDR크z"jہz"j[d-jh5Ym ZMV&md^-[2W;Py@nw^-W Lyy@nw^@x*Ϋr#j-Z 9h5W ̫ZM&jZd^@y@ɼZ`QSyjEMyjEMʫ5W ,j*vjVyW ,Yy@ϼZg^-3hW NOm?Y ,sE<X c<X cV@Xdy,j<h5Y ,כXSW+֞% E`,{՝%;PY@_>f}%;PY@Uw,{՝% qg9Y@nms,(% ̒ZMf&d,Yd@eY@,Y`QSYdEMeYdEMeʒ5% ,j*Kvddy@-5cXfy,R<hY ,rc@KXfy,5]#2.6l>0ņ).6l>;la`|3ņC1lL9laņ7a>aa~w} }j;ð0l,;l- ,jOAǼJ5:P_ _˘W'c^{'c^HyT+p?LĘWXDb+1@ɘW`QS1b^EMżT+W`Q<;u`y2v+Y ,j*X,OƎy58P1EM%(T+E`QS @ɘWd+; yZMƼ&c^V1@s+j2ub^1Wd+E`Q=3XT@ży5ۓ1c^'cƼy5 ,j*XT+I`QoSoSS1b^EMHZMƼ&c^gׁkc^V1@ɘׁarm{]ڪXm{Xb+\[^W={]uzڳ]ur+{]V@^Wd@&{]Vf{Xb+ ,SI@u+L%thLr$ t`+ӁI t`+ӁI@O&&\VI@$Wd+j ,)Xk)@VMխU bu+WխU ,WHխ9*nr?[ܴV n[Vխ@V h5Y:Pխ@n3su+jXTu+V`QSխ@n3su+V`QSխ[EMUn=@vV h5 lgZM&YvVd;+jh5 w@~+հ /ϰ} ۯ7cj~e[ 0e0uؾp>[uؾ%a]Z:lY0AA{9*e0媆qܙqgjؾjǝ)W5\0;sU8U w 3W5lߎ0媆\0:\ÔffzrUrUrUrUrUBrUrUyc?''uվf/Lk+ L%}4ppB9if5 L5BfPNfXT('P΁g5 ,j*sYEMr&ZJvGe@ Z oGe@ Z oGe%Ko,kV%7 l7 V`QSefaXTY+n,k5U:PeZEM*k5U ܗ*kV`QSeZdY+jh5Y ,kZM&ZVe@ɲVdY+jh5Y ,kV`QSZVT!pm dz-5Y*e dz-5Y\w<+ grn Y\p<++eg5n,j*@VU[UUD}ت e*Г@*UuE=y٪ e@t5U,jUXT*VU`QSZUEMjU5ժ ,j@&[UV@VUd*oaܪ lUZM&[UV@p*jUu`{a*yت ,jXlCU~>CՁQzȘ ݄9@}~nC|} _>?:tXԡfs*j:GH5 QRMsTT9@U t*j:GH5 QU`QS9sTEMQCEM9*G@2GՁ:U'sT9 z"1G'= Db*j2GXT*U`QS9 ,j*GX,OQX ,j'cQ5˓sTEMT*B`QS ,jXTa!j2Gh5 4&sTV9@Ud*{М QU 9Gh5 ,jXlOQ5:P9rTEdՁɘ9rTEMQ5 ,j*XT*ǁQ5,j*h5 QrsTAQZM&sT*q`{b*\$VU` T ߞت ,^U{[U'U{[U7VU 7Uh5٪ lUZMT*jUh5٪ ,j')S9JQۧ sTxUg*3GurT69@Ug*j2Gh5 QZ4:S!Cũ@;ĨԁJ"0*ԁJ"0*ʎJrRT 9*JrRgTdT* JT 9*h5 ,j**XTT*T 9*XTT*T`QSQRVi@ 4Tcқq* `LC4T 7: h `LCZ0a%_aaf? {TOaaa&ˇi67E }*۷'TFYFYozf0;To:qgiǝa~aw֟0;O8? 㸳4l7Ӱ}T}{O OSiTiTiؾQa? ? ? ? ? ۷ ? ? 5S-QxXk#_TKRj@Eg},p_J _Lྔ/K˃})@U})5p/K*:/5 ,j*:s*UEMEgJU`QSљ3d*p/^zȘ 1a'zȘ 1a'zȘ ,C&LXn„Ub&w&lw&n„U`QS f0aXT*&LX5:P VEM%JX5 ܗJXU`QS Vd*j2ah5 LXZM&&VV @ɄUd*j2ah5 LXU`QSVd*޲RA ʉ+UU +U@Bp* Tr)ϕlWJU W\ ,j*XTlMԁjMrҭ@;ٚ dk*S@a59=ٚ dk@=ٚ ,j*XTk*T`QSEMJpT`QSdk*j5h5ٚ lMZMǭ@Tdk*j5h5ٚ &[SEԁ:Q!ck*ܚ:P'=dlM'=dlMT@Mؚ ;&_:QɗN󣨩EM,j:[STӭ@T tk*j5H5ݚ nMRM[STӭ@ԁjM5u>Y>ܚ:|й5XTk*ԁ:QXTk@,ZSdk*-[SHlMCցHlMz"5>Po'[S@O$=ؚ lM5՚ ,j5XTk@jM5˓[S'cحZSEdT`QS՚ ,jXdnM5UH,jh5ٚ lMr'[SV@Tdk*j5ȝdnMZMTk*;ܚ lM5UH,j'cjMT`QSf{2fkdT`QSZSEMjM5U,j5XTe@l5UlMZM֭ &[SVU8=I5X.|{bk*\x)˅oORlM ٚ =@ؚ =@nvk*hݚ lMZM&[S5h5ٚ lM5ۓ[SE)T`r[SxSbZSdk*Ƴ5hٚ:P@Tgk*Ƴ5h5ٚ lMZM}ߐo]:= RXS)X ,)@_!@t*N˿S! QrCT,U ;D-Qr+CTJU 79Dh5:P!@ns*j2DXT*U`QS!@ns*U`QS!BTEMTbk*5u`۸5]]nM!Tk*ݚ ClM!7O$>l|ضqöƇmÐ0mma{2öa޻ՇmöIaަۦԇm3öͧa{2 QFmGӰ}"}ܓQdԇm{>dԇqܑmЇqܑ'>dԇqܑ0;Qc0;Qmb'>la atɨa{20;'>lo3lOF}ܓQfdԇ=avpOF}ض0;'>"afHF}Qkd԰=afl{y},\-f]^t]^Y1nn+]nnot̅:Jׅ:ۛ u=Duzzmm5pO\XCT5Յ"lc.,j=DuaQsO\X1pV\!CBBTnzCՅz2.,C&Q] BTnom.!v7A¢f DuaQsQ]Xlw.,j! nՅE=D.,j! {¢./,j!=DuaQsQ]XCTj"DuDBՅV! &BTZM.Q]h5j"DuDBU}.hM]h5њp{r!\Ԛj5.事ZSZM..ʯZSr P=p!_A5u!aԚKjM]Xܓ ZM.i ̨Sv#t!v+gJ;]hv#.H;]hv#taQsO;]XN5ŅE=taQoSs/^i {¢^ӅVi &NZM.H;]w3J;]h5vj"tDBӅ|㠴ӅViӅEӅEͽx14]ȗ= 4/G4 }O@BholK'|/=k?~!_A>CCY>hj*t!TB@ӅTS MRM. 4]H5hj*t!TB@ShCY>h ,W 4]XM5@S~ ¢~ h0]? =W` _蹂Ӆ+h0]>Lng`/\A\+h0]蹂ӅV {¢`7ӅEͽtaQ<X~`Y~`7.,j_5.,jtaQs/\XMTj0]X5rV &LrLZM4.h0]h5`jt!7QtD)po0]MTj0]h5`.,j_4.,j taQso0]Xlh0_4.,j {¢`7.,j {¢`L5 4.h0]h5`[O` l_`jtD)pO\h5h ܫ QiY4]XF˻Khhh@Ӆ.  }@Bn|VB.UB@ӅV &M{B@ӅV 1 # ;h.,ƷH.,4]hh#.4]hh#tDB@ӅV9@hx_Mr_MQ }{b_{@b.҅ܪ҅kY{fBUfBnUTfBnRfBnVfBnVfBUfBUfBnRfBRYYj"taQs,]X3K5ܸ҅҅E=taQs,]X3KZM.`X-!]]z!]F)p!]]z!]F}F?gؾgp ªѰ]aanaa o k1lmoOao^ϊj4j4j4l0UAUѰ}0;F8 㸳j0UqY5qghǝUaawV;Ѱ}0{X5FY5zF젪F젪F,ShThThThThؾoeThThS7_,f a! f i5kHS i5kHḀ})p_ ܗ}@M})p_ ܗT$p_ ܗ}?p_ j5UJ ,jRR`QSm&XT$Id))CRR};CRR}I8CRRUJ ݄I&,%&,%5݄JIEv7a))RR`QMXJ ,jtJIEM*%5UJ ,j5UJ:PJIEMRRd))jh5YJ ,%ZM&KIV@RRd))jh5YJ:Pr4d )1_SC 2cH\0t )5!>Ɛ1@.;ȥCǐT o.C ڍcH\At ); OKQf(RwUH6Qf(RwtzG@KQf(Q`QSEM5; ,jwXT@ZM&{GV@Q ڸwh5; ZM&{G|Qd(Q`Q= wXT@#߁u{Gw/<{G~Q~@}O`(.׽w %{Gw`QSGʽ@Q t(jwH5; RM{GTӽ@Q t(jwtzGEM,j+,W5; ,jwt|5u7A=@AzwtzwQ~@#+g=W; \a(jwXT(Q`QS; ,jwX,X~; ,j_5ï{GEM?P5ufyu(V@Q Bwh5; ZM&{G Qd@ʽ@Q`QSᗽzGwXT(~;:=wXT(Q`QSzGEM5; ,j~5595sZMԽ &{GVsL ߞ4 ,^rۓF+i@&}4 =I@4 r' I@4 LZM&T(j2ih54 ,j')&SI>'Jۧ Fx%ȤQg(3itF6I@ϤQg(j2ih5?o } BI@n u(H4:= 0iXS$(4 ,)@;& &z Dd(;& t`(& Va@r(Ӂa& +&DEM 5& + 5& zȘ:4T 8=d P 79/ٷ v}vv #AӰ}װ}ݰ0E7a.avŇ{&hz0EAFAF{gH0;H Rqg$hǝaawFH0;#A8 34l15Hа} o{#A #AS$hT$hT$hؾa     7{   }q1b\a 1.4cĸ1b\hؾg<[t(p_5 ,U}2p_?P}2p_5 ,U ,U}2p_,T(p_ ,Hrz`QSDEMUU% ,j#XTG@U5 ,j)j2Yd(dQ@d(dQXd(dсJVdQz$з&l&VdQ`QSɢf0YXT(n5L5,:PɢEEM%J5, ʣEEM%J5,z EVɢ@dQd(j2Yh5, LZM&&EVɢ@dQd@%>|P W }>T>(1h }rAEM5 ,j*XT>( ZM&AV@q>(j2h5 ZM@|Ё3ff>职sЁ|#@t/#@|P~@E= f(j @G.A5u:Y>t RMG#@T@P t(j:H5 RMG#@*X9fq@EæEMEsk`<@Ov~=9P\= t`'p?z:?9@Ov~ׁz:h5 ,jXT'ρ5 ,j[w~,Ϸ5;?EMu~֝:?XNO`QS:h5 rs;?V@Od'jEZMv~T' 5u:=ٞo ,js:?EMu~-;?EMu~5 ,jXT O`QS:?EͯfOd'j-ؾh5 Ad'\$O`:X.|{b'\xu~O/<;?'{;?ܦO N{;?V@ρZMv~&;?E$ρ rc49=lr4l߰5lSf>{syؾaRqؾc.aaSfdfdfؾa Y9P'u}X hI@_!h%@Ba&Г%@O^h2%@s&/\ ,jn`QoEM t&jDH5] .RMhK4T%@M t&jDH5]9P%N5U ,jD ,jDXT@=PG0T,z:D́:MK4U Oz:Dt`&Ӂ%@M`QS%J4EMhT&M`Q<ߺDs`yu&Yo] ,jDX,Ϸ.5U9P%O5u~@h:?X@Md&}\ ,ZMh&K4V%@nq&jDsJ4Md&ӁE|M`QS%U ,jDXlϷ,؞oY ,jDXT&M`QS%J4EMh*5U ,jh`QSG&K4V%@nt5%@Md@kL`:Xm{Xbl&\[fL-c3z36]юrc3ZMf&c3*6h5 5c3*6`5 ,jO$f<@Ldl@f&c3V@Ld2@%c1vɘ␒1SdL :s`e2&8r/Kwr/*r/K 8h5~9P@q%j2XT%K`QS@q%pW\2l3l3l\2lrؾEeؾgnT.8lߝ2l߾3lqؾa* 7 =o)ɆAKAKr0;UVua[IqlY'<Ʊea زN2c:0-$xadؾa 7r uatuN2N2N2l_0IAIAIAIAIoAIAI $8~, _@2T cdǏ@2ɰ}0CX yXN(=u}5 p_Q W}@%}5 p_Q ;T2 p_Q W}3p_Q:Pu}E)p_ &$*XT$:IPu$EM%J:I`QSɀEMI&$NǓu}1:IǓu}1:IǓuU' ,:I^%зIlI5$EMIV:I`QSufհNXT@I5U' ,jNXT$p_,jNr$E/5U'y $Vu@:Id$jNh5Y' ZMI&$Vu@:Idl/@.H #l6Fč@.A1ȅJ7Fa@ qc$nrҍrJ!ք@"u(B K!քU &,ZB K!\v)$w0B:XT)$RH`QSWSS@RHd)$jh5Y ,K!V@RHd)$j{B&K!WBK,} cF$/@9P,} [gD@lfD= 9gDN@sF$cE/{~5ˇ3"T@H tF$j:#H5 ΈRMgD3"T@H tF@eD:fXTF$H~-H`QSu@ ,S2#H3"\3"̈TF$p?H~3Ӂ@OfD&3"EMeDʈ53"EMeD3"E|H`QSfyuF$ȁʈ5u26*#Xw8#@fD&3"ܑHdF$j2#h5 ̈rG3"V gD&3"EM ,j[fDʈH`QSf{eF|ˌH`QS2"EMeDʈ5 ,j*#XTF$H`QS_ ̈r3"ÄZMfD&3"vdF$\[eD˵arm3"*#@fD}m gF$Ӟ@n_vF${  gF$j2#h59P@ɌHdF$X ,B}*ڇK!>BRHc)$rJ!@RHc@>m{6bzܥG wq`d#7 F }norwք7&jorzF`QS@q{#pW[u7{c[cZ~37Ru w ۍ|v[:b2wj=La+a6aMZv ư}0:0:fvƱe{cǖaa[7s8lo ز1cư} 0_xxa{cd{a:7?11l07A7A7A7A7vA7A7ٟ%0;8lv ߟ㸳1fqgcǝ͎1fǰm0{xX:XjV}=)Gd#Gd#ǁ}T#p7[ k[ kx`QSjEva#G`QjX,jqjEM>}5U,j5U8PjEM>Gd#jh5Y}ZM>&kV@Gd#jh5䚴\t#FrzTg9:,G 08P'Y@.d:ȥL7Ty#&5D7FM`y#&q6@F`1A@.kEM,jXTy#F`QSA?@Fdy#jh5YڻV@Fdy#jUx7&O7Way#З@_>7u21З@py#o3\fy#3@Wsy#o\57:Xfqy#jH5].oRM7T@F ty#jH5].oF`QS'*o5U܏5U,jqNW2hkSe6q@ϸF~@5O6qcg\#3h5,j*XT\#Ya,j*X,kXa,jGX5k5#EM5T\#EM=Pq΋5u^4j2h55&Vq@ɸFd\#`kF 78h5,jh`Q=2XT\@5k5#,*XT\#Ɓk5,j*XT\#F`QSqEMs,jsd\#j2mkؾ1h5kFd\#\[5˵rm*@5}mg\#Ӟq@v\#[g\#j2h58Pq@ɸFd\#,)XjLdɉ@;DFb"@%2v@;DƁJd&2m%2͉@nms"ADF`1A2tf"9.cΡ@nFq"ЃŁ ]r3Ce[.E :tXBϻo'\#O~*~o~?V۫r?U?>{ö\~~latiUۚ~\s?r?n]خƱ]s?kƱ]sa5W8kql\o*~M*~-?5W[-W~6-WavpU0;*~r?f\["lU0a5W~a5W8kö\㸯q_s*~}U=kʽ9@߷,^ ^y߫7,^wz󂿿՛xv޼/{^/߫7/; ^y+/{-eOJ\S/h5הŅ,jn),jn)tᖲxv 5/XΗ`QsKY`QOQsMYsMYx)x)<5e5eqᖲxA皲x߇_з5eqaլ)rլ),jn),j[͚x斲xfլ),jn) 5 5 5 5 ^g斲pKY`QsKY`QsKY|xA,^j)k暲xA,^j)k暲xA*>pmN  rqoN  ͉K9\bޛ/ȅȽ9qvfcoN W#Y z׬ roJ W} z׬ z׬ą[V=kV=kVg%^{V}Z/XN`QsJ`QsJ`QsJ`QsJ`Qs;kV暕xAf%^jYkVFg%^jYkV暕xAf%^{V暕}DY\/ȗ {9 qvj+k9=yrą[9=yr z+|ɵ#^r 5cw/X܎ݽ`Q|?A5TA5r RͽTs/G /H5r RͽTs/G /H5rą[9۱,jn,jn ٽE 5S`_wYtX/f%.SÚxAO5+S]e%^tX/ z:YtX/h5׬ 5 5 5힕x斕xfyݳ=+E|g%^e%^Yo 5ą[VI,jn'%/ܲ/XNJ`Qs;)VsJ\/({V暕xAf%^jYkVeJ\nYf=+VsJ`Qs;)E|f%^e%.ܲ/Xܲ/XlϷkV-+E-+E-+qᖕx斕x斕x斕x斕x斕x斕xv6kV暕xAn۳AkV暕xAf%.ܲ/h58 +}r#xr8qZ/ɻ!^ГwC q䦸=񂞼k=y8 Z5qxA!^jq,jn,l,{xd?5Yn&kZqp8s8s8`ϭW^Åi8`ϭkk" wS][n]nm]~ͽ/{}[ð]oƺ]a aaS`ư]}Ͱ]a.n0l:lb3Nn0l_0 kA A 0;AqY>qg`qǝ僇0;8, 㸳|0l=T>{ avPavPa|0|0|0|0|0l7|0|Ǿ),/ۆb0b0bTLF_XL{ +,ٓ6}A-p_P WJWJoցJIྠv@+%7}$p_P;PuR/ j l-ZMZ EMEZ,jt`QSTk!ЁEM,jXTk,&Kq=*,&Kq=*,&bBGńoo,&n,&āa Ev`1!bB`Q0XL,jp EM*&5UL,j,jp EM*&<ń@bBd1!jh5YL,&ZM& Vi@*;mE A`wf p8me :NrU/x^: zAǓ@'^d Nr@. ^[ EM2 ,j^XT zA`QSN>@zAd j^h5Y/zV@zAd j^ȕk &O/ c N_|Lriu+Nri@p 3i@l .i|A`QSG:X,:y~HL=?j>?Ti@A t j:mH56NRM *mXѯEM i~zA~l'gm<6@z:hY/vT ]/x m<VEM Ezfyu zA`Q<º^XT@ 5u@ :XQ@zAd ;\/ZM &V@np j^pzAd zA`Q=²^XT@ 5#,^XT z5U/,j^XT zA`QSEMl ,jdkd j^Mqؾ^h5Y/zAd \! ,W=^Xd W@O^ =yY/V A@O^ =yY/zAd j^XT vSAX[ Af Ѓjz g, r ^;αc,<">l9lwl0l9l0l?">l0Scbobx۷0 <">tD|0;u0;?L-qyD|ǝGćqyD|pǝGć/#WxatG젎:">l}0fuD|avPGćA4fuD|o# /<">0# ?LGćF_xDa:">0޳xDa#蠏+Fྡྷh+V%7p W4iKn]+phӠ@~}E#p_8PsMV&ӠEM,jx p5u4Ӡx`QSA: XZM-Wn=d<[!}&CƳ2-?Pg=d<[ ݄glw-_v7@Mx<&<[Xfly`QSgΖ5u[%J-?Pć}y<+r>5!@_x?P˅y@.x }<~>{CEM,jx`QS:Xyx!@CV&rEڇ&ZM<h5yh5yqC|<u}?P'h}x<>~Nr}߇C\!@l!@m!5u&4E໧|~P!@CTӇRM>H5}X!fy!#5#5u,@ygzl?1 =1>,@ggc|YΎ=1fc*9*@9*@GTsUsUsUsU ƜAǜA111n1ǘǘ=Ƽ=-5c51f~ob&EUM~ $7 MNob/&E_dĂ_dM*;AM頣EU{m'h:t4YcDS`&EU%*;K4UvtDSe`ʎMU%NMݙxݙx~Yt< k k aЯ ,@tpq,T:XtD&,T M%*@K4Uh%*@_T:XtDS,@M& AG蠣td:ht4YckA^t9ț;1}!kA^v91_T/,s7~lTeSA?l*ز|QM`ylU NAuS9Ȼsn*}ٳeϦrDSM`M5%j*K4U;ȦrdS9ht4T:l*yM堣ɦrdS9ht4T&AGM,#=v/#2r7]F&G]FnAow9苗e/^_0~A>î{Gpq~#E:>.^}H{EU ~#h^t4Y/zqdMՋ%{<U/hJ,TMU %K4U/h^t4Y/7ׂ~Y/Ex֋~Y/zq<(kkoЯ zqp,T8XzqD&^,TMՋ%K4U/h^;MՋ/^,T8XzA֋&AG⠣zqd8htLX JV]=nп' A,^Ta7ߓ o{Q $\ Ava7Ȼu.yn.K4U h,Ta7XnDS,Mv& AGݠn\ :,Mv& AG o*t4ɽNnw c'7臏ܠ>vr+NE1Nn ;A_}e,lo:K4vh7vr%j;K4˛hCv,TjMn%K4U hv,TjMn%K4U hv,T6XmDS`jAG۠mA?Kv vjvV۫mgџmVm'џmU϶s豽v v϶#϶U϶~3fU϶c3W}g^lQg mG1/ϘT}?c^PyA3U϶v^3g c{3gp~ U϶c{3gp~ UϜ3W}? UϜ{ U϶P9C3 UϘ3T}?nƜs^9C3 U߱sg1g cUϜZccD}3>D'şDg~5}Fzo7 n7 nn7 nn{ !s J"{X"{X{X"{X"{X{XD CG%[u硣CG%^x|Xh%۽^~Xh^~XhLJ%{ a^~XMG V{%~Q%~Q~~/AkJC&pЯ (A?,KK4kJK4tp/A?,KK4ͽDs/A? JK4ͽDs/AD CG%臎&yCV7AyX߇QCS!}# h߇%{a}XhUA:>t4}hD!DCG߇&yY߇~ᇼC?h?C?h?M~5{a!9w~{j?kK4vͽD]9Ds/l<,,oWKRAF L2-{s!y~0y~0CFSᇌM52j?d4%{aa~X7LO%WA4:h?tn_ܢ9йEs!Ϡ~=5:h?tn~h9Dsa~X7hj?,ܛK4Q5僨K4Q5h%僨K4ppo?,ܛK4ppo?,{=K4^CGᇎ&yCGᇎ&M4:h?M4{s!~h9Dso?,lD~X7{sa~X7h%{sa%{sa~X7h%{sa~X7hᇎ&M4:j_~h9Ds87Ds4ZAs4mj?X~kᇾV~k~kAT|E!C~CstO߇&~5,<,<b1Kl2'XdϯƘA6ǘA67 < V^dsk?vө<}L 6c|؀~pσԀkc|-`c|-fۘ3%W2tS;ߘ pS;3 /vpg{ =~_+3 /Zap{ :ljM6&_t4:lj_T+3XvDSM~M2%je^TS;XVfDS`M5%jjdbYe;ǖe*cزت}],c/{Y?],cK4Uh˞e`Uh,T;X2vDSe~,TM%*cd;ht4Y:,cd:E<q:[_T:uݷFAѹoM%[K4շho,T AG}렣ɾud:hoH&AG}렣ɾud:[d:ǖꋪTygѕ[V~lYe:;T_T%WJu7]g:X&KD#R,T%XޑXh,,HTMW+AF`4F`4F`4Fӕ Jut:hRd4]2T_T:X&KDS`*Uh.,Tk [AG頣E&[AG ݚͭ頣tdk:h5t4ٚh5,Tk:XY>k5,Tk:XY>k5}|tk:XY>k5,Tk:XY>k5,TkZM%jM_Tk:XNSDStdk:ȓ6&[AG頣tdk:C`&[՚ح頣tDS`ftDSjMK4՚h5,Tk:XtDSjMK4՚h5,Tk:XtDS`ZM%jMM&[A~uƭAlMM&[G֛tYo:Ԭ7j֛>A~1CzsСf9P|QCKyvuʻ~ 3~cΠcΠ.1gI >~z4,|L1f 1f 1f hYbyl?0:~\4 < |L1gP 1gP N15ǜA5ǜA5ǜA5ǜA5cΠcΠ?zcn1ǘAǘAǘAc c ncncn>v3v3v3X^ns ?v7ﺌeczlrL[6c|n٘s~pϭԘ c|}accΠcΠcΠc/gKEuݥ 8w~:A>zhgpy~3 7>~:gpt4ѾfdG;ht4ɎvdGʚMu%hFEu%*kK4UּvDSe`ʚMu%hK4>ȎvٳcˎvЏ-;eG;aUGbEؠ/{v?]hK4h˞`:h,TG;XvDS~,TG:Mu%hdG;ht4:ayv]þv:ay35싪ay&vw]kAu ;sFװ<r ;XvDS5`jMհ%*d ;ht4Y:aMְUvd ;ht4Y:ay5-j~VX6 At;g~VX U L [w%*K4U h2%*K4{ Mj% wt;hhhhd4]2.pM AF*pK4U h,ӢBDSͅ` Y:,pM/t4Y:,pywp.pM& AGwDS` Y` Y峦 Y` Y` Uh,T M%jmM& Aùt4Y:,pM& At4Yྨw'.pM%*pK4gM%*p_T;XwDS` M%*p_T;XwDS` M%*pK4Uh,T;ht4Y{5.p_l b;ht4Y>vd ;ht4Y+_aװ&kAG5젣so}}L1== = = = c}YwZ|wZ{zA>z};Nkpz~5i wZ~zNkpt4hd;ht4-wd M%}E%*K4UwDS` M%}K4>vЏA?Blo_T{;G^ GHe{;Wdxe{;Wdxe{;X.^%jo_T{;XvDS`M`DSjoK4h}vd{;hOu PWjwAvy/vy$jw7]򖼫Au;IW<r;XjwDS`MU%d;ht4Y:vMVSjwd;ht4Y:vy)u~>Y d)<{.yԥOƒ~>Y d)<Ȼ._TQ'ƒ-Rx7] h,TQ'XY h,l,K4U hwAƒKAFӥ y~0y~0br~0Rxt) > ~w19*9*g1,1,]c'KTc>YRc>YRc>YRۏƘOǘA1f%1f%c*g9*9*TRsURsURsURsURۏ ƜAǜA%1f%cI}dI}dI}dI}l?cYRcYR?3Ȓ3Ȓ1ǘAǘAǜ:Ȓ3X^YR?wƜ111[c c}11r1r3r1ǜAǜAǜAǜAǘA߃;*A>.xQ Y߃}~:߇>{pC>tp}Q~:߇:,_T=ht4Y|:,bd=h~Q`M߃{AMug%^T=XlDS`M߃%*d=G!/#{po#vA_,}A_,},l/M/,T=X{DS`3XE߃%*K4U|?{d=h~ oSwۃmn{Ly yn{mNAGu=OwۃN,T=XY>N~|t=XY>N,T=XY>N,TMuۃ%_T=Xn{DSn{d=c8wۃ&AGn{d=bwۃ&mn{DS`f8n{DSK4mh,T=Xn{DSK4mh,T=Xn{DS`Muۃ%Mvۃ&A~AMvۃ&KA'% az_p =$tXB?|L~gW._T =ȳ+Ѓ۞϶)+m7?g g^Ql{Ul{Ql;"DE}l Ϝ>͍53gwϘ]?cvQ_l;E}}lm~좾g.mT1 v>3g c{m3gp ϶s3?sg^_m9{}3gp_Fj5T?c>QmD3϶ϘOT?c>Qm۫1|>W?c>QmD3gF3X^#Qm۫9/{1/Ƽv/tl yA6g 1/yA}l,T?cQm۫9{3gp Ϝ3K*?nQC>U*?nQ?܊0ϗ ӓ==Annnnñ{aCG?%(?܊M:(?܊M:(ͽ0Ds/?܊0Ds>,;0Ds>,;K4ͽ0Ds/п'jп'jn{`Q{uD/An⇾Q{K[/Ah^{Xh%{a^{>,kDs?,k?t4Q{h>xl<^ u~_?t~%N߯xndl/CW{a*)Ds:&hF&2xhIsV19hܣ/!1.ۏs8=cI؞=c{2`wXC1Lp`l_s1fK: j`R1{9JNS/cTw׊ZcV0kc.#Sw xpa 1fc C1fc ; ja2~gƜA-#9ZFs0 jal? ja2qal?|rǸ0|ra2ee1Ǵ0|ra21-#1\Fc0 H.#95c3-*9gZT1.*1g\Tcθ0=EsEsE1挋 c1挋 Ǵ0 rQa1-*9ZTs0 jQaԢ˜3E1fk A>^[7/jm!߸iBpqs鵅~L {=>mp?S g*t4t4pM->mBBpM-M-\T;X`{%zK4U`MU%Z[hjm!X\[E-{rm!WڃTb+A_\[Bp} rm!Kk gKk M-\BDSk M-K4,Bp?Ohjm%Z[hjm &981|rp'ƘON1sk$'Ɯ1gr~kŏqb311Q7]ǘAQw\ǘAQ1o1811fcc (ibŘ311gPccΠ(ƜAQ1EpS~QcNyO(^71 KQC^789F'׍!Np?Ĺ111:::u㠣11jK45F,Ep_EDSM`5F,T>X&|DScMQK45FqcA1OQjN_l ({?KcA_/AQ} r"v r"XEQK4՜hj"X1`(vDSc5F,EDSc9Ft49Ft4M}>)A?#FR~`\F 0V8Q0<(F 0.q`K480%/h_/hjsG$9<"G'B^959<"E-rj.ry@E (!959%K4,"GDSM-rK4U=EEEEE Ot4t4t4t4YY?ttQW@CCC 䑓W@5W@5W@%W@%*|K4GK4U` MK4GK4# h\ hG,,d4d4d4d4d4d4FY> hW@W@W@.8,1GGG1G1G1E炎 > >b>:>~GGGGDSCM }K4>}K45,, jcT~g:ƜAMu9s51 jcl? jcTǏqclM?|r81|rcTɩhTɩcc>91|r㘦:ƘONu1sk$:Ɯɩ1gPScicl7j81rc~c&>c.'>{c.'>Ƙ]N|ǘ]N|3ȉ1f41 jcǘ31gPcΠ&>ƜpcgcoC zoC A> [}Q A |+An~|+o]6H6H6EmMnMnMnw6H6E K4 ,6Hpo_6HDSM \6HDSM K4 ,6HDS i{pE  p8$3 p8b>9>9ᐠOy'C:C'C>9rQ!M5΃%jK45,pHp?hj8䢆C% hj8 C&C毿˯r&EMyɑ`U49Uɑ52N}qr&G89Uɑ ɑ D=9,ɑ`f{HDS ɑ`.jr$hrJAzy$c/y== `GrQ{$A'{$Az$sXy==`JMK4G,IDS{$Mr$hr$hr$hr$hr$#0MMMMjZy뱒 O*'p$XjD}NXIDSՋ+ hj$X`f`J%sJ%K4MMMMMaxU$ߓ"A\ $ܫ"A_\ HЗ WEWEWE*rQ"A~Ië"AG"MK4*,ԪHHDS"MK4*,ԪHDS"*,ԪHDS"MK4*,l*rQ"MK4*t4*t4*Wۼ*r}HAyU :*YWE.vWE1|c53&c| 3g3q3c91s5c2ߖ?1gP3&cΠfLƜA͘91oƏ91s5cc1я111Θ11c>9c2|rdl?4c>9c2|rfLƘOΘ11913&c gLƜ1gFrdԌɘ3X^#9q2&NƜkM'NƘkN1ל8r~{~{s͉s͉c8c9q2 r&NƜAM98s5q2 jdɘ3Ip?Ip?Ѹ~~{5qdrQ;/A_y K9KDS;/M\loy hK4z,loy hjv^%Z8*wyd  =䱌G`8G`.j&ˁ#0Aa{&SlyN/K4UhA#0M/j&X`9t49t49t49t49G`&G`&G`&G`&G`.)Ÿ=п*Mп'Wi<*MK^U J WiJ%U/AyU ;J,T9XrD}p*MDS`fU`Vi.j&XU`Vi% Wi%Z h.\ h3,,\Jd4Jd4Jd4Jd4Jd4Jd4F`4F`4F`4Jd4JsQ2A~E2-7-w-t-t-t-sQ=ؠmm 8m 5mmmmm`e%ږ hvޖ hj[&XY>y[bhm`fhmse=-m m/An} r[&K2A~q2A~;2A_ܖm mm`e%ږ hj[&/ny[&Xm`e%ږ hj[&Xmږ hj[&Xm`e%ږ hܖm`e%ږ :ܖ :ܖ ;ޖ>Ss[&hr>&lyJ OI<s=ycc9st=)cE 1A~1|LD=s=,,}MMMM>x[Ǹ-3Gol?ۃ7eԍc{ȍ;m=mc{Ӷ؞=hce< j[fԶ1+9ږs< xԻ31g%z#sW2w1gPcΠqƜA31gPc qƘAOc qia>3}1f8c3}1f8cI3}1f8Ǵ0 jg>~pL8cΠƜA9s3(9scƘOqƘO1c>3|1c>sL8c'qƘO>}1f8c`y>Θ3X^#3 jg,S8cεq~8c5qƘkwiglO?\sglI?\sg>~~>11csL8cΠqƜA9s3 jg>3~bd,Ol.Ol̆sabp_ ~WaAn~Wabp?L휠휠휋 : : : AG9AG95h,vNDS9}5ࢶs%hj⢶s%hj"X`s Lnz1vNp:& :& A| u{=:au:A:A:t8t8sQA:A:35,:BDSM K45ܿM,԰E K45,԰AMWyqy՝!\N'~^ N/Wwmx՝Z N/Ww%};՝};՝`fF%};M͌\NDS3#AG<lғA?qO6||OrO_cOOOOODSc>MK4˧7K45,,c3/J| ;9gؾ3/J| ;9gྺv3gp_Wwau3;װ՝ϘO||bu3;1XWw>c>՝3ϘO| bu3gFbu3gFbu3gp_,X}u3z_ݹ՝Ϙk|\cu6ؾv3;mw?cs՝϶1XWw>c3՝}u3gp_Ww>s՝Ϝ}u3gp_Ww>c'$CG<ML<:yhb'Oa<,'yh<>DsxXOaI%} a>Dsyhol #6:}za#(o9DsE^_CyT顇ZDs-zX>Iaa>ZD}h}af$ѢF2-zhj!ѢF2hd42["={oay>|h%zXϽ<v.C?% |.Ch!].v:%zhb衣]&vhDK4]%vhDK444AL=C= M=t1=ЉC'C5M=W4=ЉPߒxL=t1=^PpzXO=,ܧkzXO=,ܧhCK4顇%PpzXO=,ܧhCK4GXL=,ܧ}za>=CGCJMGX ==c Jօ<[Byu<кCgh]!o]j]!ϳ.'mZz =]u =,,Z  =,,Z  =,,#Zzhj]!uօ2Zzhj]!us7kڱvٱ=4=4ul>ul؞cdAs:tlO1M#:'tl~kǜAM9:1gPCcΠǜA9zs.> jzh;31gPcΠƜAM3Y1gPc ƘAN9c iVc31fCc31fCcY31fC41 jzh~ƜAj9s5=4 jzhl? jzhЏg1fD?Y1fDc.gƘ]Tc.gƘ],Y1fD3Y1fDc`y,ј3X^?9K4 jhX{cή~Cc.Ƙ]Nizhl?rzhl?rzh~~1M1c9=tLCcΠƜAM9s5=4 jzh3顋Z*g=AfC碖 1dPp? K5=dz<=܏!Ap?<>܏!Ap? : : :顠顠顠>ct49=t49=tQ MMK45=g .jz(X`6.jz(X`6% hjz(h}#PsF5G5G5G6GСhQСhQp:- :-hAGFAGFt4ht4htQAGFAGF5.t4ht4h%.j(Xq`%%Z4 hj('XEZ4 hj(XE\4 :\4:x|r(ǖsG5wY玂~l9wtQsGAz(˞sGA_; ?#q(˞sG5weϹ`f{GĹ`f{GĹ`f{GQDSC,Ĺ`X.j(X!`X&=ٱ<=RgpB RcEm!} r )/3x )ȯ3x )Cao!} r )XD]B hQ[HMm!K4?----- Ot4t4t4t4n,ϊYPR'J YPR ~V87J<䙨.j()ȯx()ȃQ%M%M%M%K4,%K4,%K4hYJ h,l8,PE %K45,PRD}PRDSCI1CIM %K4,%M%M%M%M%M%M%M%M%]PR<J AJX>y()XÕ,<ɡO%]T<ȗ%R㡤 桤 y䡤ɡɡɡɡ`%J hhJ hj()XY>y(bEEEZ4 :\4 :\4 Kq^4 G^4 :\4E yEEE`%Z4 Kq^4 hj(XE`%Z4 hj%Z4 hj(XE`fE`.j(XE`&&r(ț-X bt4gt4gtQMK4gG .j(X}`5.j(X}`5%3 hj(h< %%ZB rRЗ7=%%/.!]RQḇDZDZ:}; :};iDZDZ>-qQcG>MKK45-,QDScGK<M]QDScGMQQS 2n(o(yt 2n(]ԆRP LJApC)*Wn(j P hj%X踡tqC)X踡tqC)X踡,ԀKDqC)XP hj%X`f{RU|@4S'|4EM3Ri _j<t49䗨<t49t49t49t49,4SDSLLMM3K44O3],<t49t49tQLAGLAGLA~LA~LAGL5䗨<t49t49,4SDSLA~LMM3K45,4SDSLMM3]4SDSLMM3K45,l59,4EM3K45,4S4S4S_=Rw~t 5x})l/y %䡄חS ϳyח j}i19Z_s^> |3w1gPKcΠǜA9zs4 j}޸ǜA9Zc4 r}il?>ctL!c חƘA/1\_ƘA/1\_ۏ~ƘA/1\_:1gPKcΠ֗Ǵ4 j}iҘ31gPKc1gPKc ƘO.,1\X1.,1\Xc>4|rail?c>4|raƘO.,1\X:1f Kc ƜɅ1gFraiҘ3w7ǜ]-,Ƙ].,1\Xo~}~}˅6˅cZXc4 raƜA-,9ZXs4 jaiҘ31gP Kcw/wj~z4+6}ڗ BT/gT/}>}`f}}}eRARAR5t/t/uQARAR}v%}hjv#X}`הȑ%[tJ!?fGZgi"oKFpneK]Tv#XTLS}K=M&RAeK/|8ToR՗ R/ RdYП&Ki¾Ee4ۍ}FǾTLؗntK4ۍ}`52vc_*X~Ee4կ iA|,SL<2UY] f*(.S]T*+e L|2ULSe`2veL,TT2Me d*iL4Y z,S=M<2Ud*iL4Y z,of+[uQU`y 7[٪ ;[lUo6UAVy٪ ;[uQ٪lUd*i2[4 zV4۷7feU2MUeU2MUeU2Me.*[,T*XlUL}{c*XlUL}{c*XlUL|{s*i:[4 rV9MgUQ `ym,-_ Vgږ/KNHy;!eE%읐 wB*i2! +'&RAO ɄTdB*XTLS `fTLS `fA<Tc*1 CPAr*1uQ! I9 )gCP2M<TLS!`BP2Me A4TLS!`BP2Me#CP2M.*,T*XTd*i2R|S?n=],m|SϖzX n=˯[OAE׭ -,,e%Œp)XYn=9M[OANӭ St)_~z ǭcil>ٱ}z:w\*7q? Gقb\*i2.uQq`42vd\bE2.,lHƥ.[$R2vd\*XML" i*AsQq`42M%he[ ,Oڲ<kT'\ LSA>uy*X^[ <EY SAqy*c.O4U i],l=S2M .<,Ty*XvA&SAO婠Tdy* .O=M&SAO婠yJ{&(,3Q!XffEee&jVLԬ 0UAτͪgfU3a*CUnVy,ͪjV=M6&UAOͪfUd*XپY,lج i*,lج iY,lج iYuQͪ`U2M5e{U2M5e{U2M5e{UANͪ fUt*iY4ݬvAkWU wE`yW.w|r*X ])_\ õ ?0\U]=M֮&kWAs*iv4Y z]=M֮e]4U iY]4U iYnV=6.YLج z&lVylͪ fYLجfUgܬ z&lV fUП`lV4լ ؠU2M5ejV4լ iY,TꢚU2M5ejV4լ i}lV4լfULSͪ`UAOͪTO\ g~婋TT] SK uy*\Tߺ<ge]TL+\X.O4K Tty*i<4] r.OSAOIؾڱݱ}cd)=5vlQwlؾֱ}ǔ4ۗ:ul>cJO#7:Otl_+1ǼAƼA> *=5 f> V> F> 6> *=5 > > = *=5 *=uLw1oPY1oPY1n1n驱71eUƸAƸAƸAgYc Sc Sc1n1ncʪyJOyJOESzjTzjTzjTzjTzjlzTzǘ1ƸAƸAƸA!c Rc Rǔ:7ȼ7ȼԘ7X> RcޠRcޠRcOcާR?ƄɄɄ1%ǸO&_ǸO&ƸO&ǸO&)!5 2!5 2!uL 1oP 1oP 1oP 1oP 1oP 1oP 1oP 1o7_l4ƿ;Uǔߝɪ1ݙ+Sjl4ӿ9 YszQ595&.*gUsVA^YJׁsVA^ Y]T*i2g4Ud*i2g4 %ɜUd")2M嬂eYE嬂e4IULS`")2M嬂eYw%+7dޕlVb%+ sJEUe4-JVL"Yɺnd4-`r92vd%+X\EUe4 i[$+YY e%+ז O W|xJV%yǕd}ٳiWƷ^[AO񭋊o=MƷ&[AO񭠧Vd|+XپN2,l_' i ,l_' i*,l_' i*uQ`[2Mŷe$[2Mŷe$[2Mŷe[AN Vt|+i:uQ) _['.*,R *,mVJ)y;u /{'кVZAO ɄVgМ zLh=M&&ZAO `Z2M%e˒Z2M%&+YA/dV^+YAFt%+#_dV./We d!JVRd4U 4+Y2MUed4U i,T%*Y2MUed4U ivd4UɺJVLS`*YAO ѹun]c[WKí uc.@[WA\UIuuQ .u*n]4K,í`feuu3ܺ ih[WANӭ Ut*iu4ݺ RZWZWml!mm[Wml mmkl޺lmmClgLgH?66϶~h:?g޺oy{3op ϼF7?g~ oy3opo]} 77^>g ZWqh]}=DDjlo| u7g ZWm>ѺDg ZWqh]퍗ϼu7>~4[Wy{3opo]} ϼu3opo]]C3nϸAu7g ZWqh]}=D3nu7g ZWc{3nϸA> ZWysϼu7>~[WкD3϶gԟqh]}Fg'ZWm?O}u5>ѺDjlo]} ϼu7>g޺y{3opo]];_uAg|];XuA3.`}=8;XmO4?㵉gkjd=F;yFíCYyqYypK<5FC^%jdFCO&YD#롧FCO[⡧FCO௵CLsod=,YEpod=,)4DJpod=,)4D2ͽLsod 4FCUz^*C#+7*C#롯24*C#V{y z^ie Y4w4 xdz}`= ^2X>d{ׇ C[#gz}`=,4 2=p;Ls`2=Ls` "D롧2=3A@gCہz h=L <CC@vCO6vsE@af"l7Wi+ZvsE@af"Ls/<,l7Wi%zXti%e技COo4򌇺\yC]/?cW2t-C+w-C!ܻ\i.CR!W!T!gL$ATzXY *zKDEe% CNSE^9Mr*z=4Uz5Uzi\w9]c(4IyLծ̱}c-Sı}c `ӝ{TkTncޠ]cޠncޠncޠncޠncޠ]cޠncޠncޠncޠ]cޠ]twUUYY۟qvSfdkdkdkl0 5 5?fYY:Z͘7jט7j 1UƼAUƼAUƼAUƼAUG cޠ]?j7j7j׏5 5 5 5?CYY:j7j7j1UƸAVƸAVƼ9jט7X>YU:272׏5 5 5|Le1ne1ne1ne1nec*sq,sq,sSkTkTkTkTkTkTkTkT_7z5?#ɢ1z5דE1cY۟XX WX.*ܟ!Wpܓ q+kǹ 羂{% ȹ /!.*4 z}]T+i24 z}Fd+i2uz2M徂e}E徂eJ4WLS)`R02uùya{t$_.*PXp}}2vQO‚> d(,p> d(,XPXpI4۷‚e[ CaAOXdE,i"vQɊXdEB'AOɊXp\TE,i"4Y i",TE, i"vQ`B'2MU"4Y zy&gŠX3aE,虰"i]TE, =V."WĂ +bA"=MVĂ&+bi++b2vseEb",l7WV.++b2vseE,XOL\Y isQ`?2MueʊXdE,"[ƊXO\ +bA>wE,跌 A"vQ?MX $+bArE,XXLS`* 2iŠXLS4U ipɊXdE,i"4Y +bAOɊXdE,i")WĂ9.]T,X*y$岋*y: E\,br \,eAq,K岠rYd,i\4Y.rYd,i\4Y. z,=M˂e;5e2`b2`e2`eU. i\,T,XپS\,T,XپS\,T,XYS\4]. r.]T"ȗq _>ɂ<8Y|j_˧EWdAmt,Ӎye;N8YWd' z=Mɂ&dAt,i2N4' z=Mɂe4' iC4';Eǂ~ -c,ȓy2.?q:ǂ4a,Oǂ4a,ȓy`c2Mǂe4 i?v}c,Xپ?,T,Xپ?,Tc2Mǂ&+bK "lWĂ wE,r\ y+bA>rE,ȟ] 7FWĂ|XL$J\ iJ+bK`fI"4] r9MWĂ+bAN ctE,i"'aEllql>c\'9/rl1U5c۷8O*bc3W8xLw1oP1oPciyyay]yYyUyyMyIyEyyyT=gT=gdEldEll2 "vL1n1n1n7Ȋ7Ȋ<`dEldE9cޠ*bcޠ*bcOTUUUUU1VƸAVƸAV~1n1n1n1n1ncqqSEldEldEl,ys1oPcy+bc +bc +bc/TYYYY;7Ȋ7Ȋ1UƼAUƼAUƼAUƼAUƼAUƼAUƼAUƼAU~czdEll3דcdEl'+bccxM"6?w1WĂsNjguE,Xm˟V OI{o*bA^9y"XpOy"%EUĂ&+bAO=MVĂ&+bAOz=MV.*,զLLS)4 i*sQ`R02M`e+bAO E`E,""{8A_]TE,OVĂ>Y =OVĂ>Y i"k2`fVÊXLS L]l&Ƃ-cA[&Ƃ-cUA zL[&Ƃ{cAwXLS`cXLS4UA i*1vXAO O3? zC!ǂy7Eǂp,_\\. IeA>r,g.ceA> w,XY(.4K岋rYLQ\. r.9M˂eAN rY?\4k>͟Yw±}cf)k6/vlpl>ױ}ǔ5ۧ:/ulؾcʚ+G:otl1ǼAeƼAe͎> *k6 n> ^> N> >> *k6 > > = *k6 *kvL1oP91oP91nY1nY7Ȭ1|ƸAfƸAfƸAf_ǸAfƸAfǸAfƸAf͎)3 *TgT瘲fcޠfcޠfcޠfcޠfccޠf?Ƭ7Ȭ7Ȭُ1k6 2k6 2k6 2k6*? 2k6 2kvLY1nY1nYcʚq̚q̚ysY1o|2k6 *kvLYG1oPYcldldllfcOc fcc fc fcӈ1nYcʚq̚q̚SlTlU61eƼAeƼAeƼAeƼAeƸoLSd5gc1LSl5gc~<۟=Sll8ɳ9y;^T,?gU$XJnMg< q,kɳ 'ς{& ɳ /!'.*y4< zL]T,i2y4< zLGd,i2yvQi`6_mJE%ςeJ4YLSi`42uɳXZpϧ1XE҂~K el.*XZW6ciA_ٌOW6ci2}`bi2}`f>XZLSL>&ς^g^ z}L>&.*< z}LE%ς^gAɳ`Yg2M%ςڿLSɳ 4^ i*yvɳYdb;O?XZg$K z}>҂^ciDE҂<-XZc,biAp,-1#ciAOXZLS`6_mXvf,-Xٌ]ll҂eXZLS`fg3,Tbi2Me*4=` FAOov [n` lnlAq-n=6؂ l`  O,l`l4'l2Me ` ` i,T lAO [d-i17؂&lAO [d-Ccn=M6.ɱe4ij;4inݦwN1LS `vAqv2M%Ա}ǔW:tl>ѱ7cyJ؍yJ}Tn|Խ|ԝ|}|Tn=||{TnTncޠMcޠMc vc vc31n cJ7qL؍qL؍qL؍ޏqL؍qL؍?܏qL؍qLSiTnTnlMvcޠvcޠvcޠvcޠvcc 3uc3uc3u?LwLwLwLwLwL1eƸ3fƸ3f)S7 2S7 2S7 :fƼYLݘ7L1ezǼAe~1n1ncԍ?qԍqԍqԍOƸAf)S7 2S7 2SwL1oP1oW௲AeƼAeƼAeƼAeƼAeƸAf':c|kL0g53uǔkLݘ_keǔ۟.zgnlcGA^nq.X*j_E  rvA^Vnyaa]p yuaE5삞&vAO j=M6삞&vAO t zl=M6.a,T.X]p\T.XPLSj4 i5,,Eܰ;L]Я3u=y+L]Я3u WH^c e2uA_}2SL]p1}2S,l_y i*S,l_y i<4j:L]c.1SwQɛL]c7AL]pO\T.1S )S,T.) i*SwQɛ`72Me2S4 z]l}>f^3u z}>fy"™LEe<L]c.Ffꂞ&3uAO`2u2MEeLvf.X]llfeL]LS`fg3S,T2u2MEe<4=`"OAOguO.{rA\ {r{rՓ 9 '7 n> ^> N> >> '7 > > = '7 'wL1oP1oP1n=1n=y7Ȟ15ƸAƸAƸAǸAƸA_ǸAƸA䎩4 '7 '7xyL=1oP=1oP=1oP=1oP=1n=1=1=cOn;cOn;cOn;cOnl|;cOn;cOzrc{rc{rԓٓٓg{rc`cOnT3nlv;S3،،،=f+wfwfwf`;c3qc qc qԌՌՌՌՌՌՌՌϑ/ɍm>'7=y䎩'7=1ɍ'7?@g{rcsO.?= rNu2)U2.}ld\WqA^;Ny8>A^@Ny 9wQɸ "'ゞ&q zL=M&ゞ&q=4 zL]T2.Xd\LSɸWd\LSݟ`? i,T'XYn8wɸ_!&{~&Wɸ_!&.*,qdE%ゾx e2.苗ɸ^F e2.Xپ0,T2.Xپ0,l_y i*tU*\c.聱 XLc.聱  3U z`=0Ve`LLS`pU i0,T pAOU*\G,?ۧ&csAGA5csAw*6q 5cs ̄csA\dl.i26,Tl.XSL}`lb*\L}`lb*\L}`l.XSL}`l.XEe14Uc i4Uc z`KtAq.ȓ.=0KtAq.ȓ;.y%]cJtAD,T!X>X i>eJ04gKt2M%.D,T.XA肞&KtAO%]d.?.=M肞&KtAO%]g\ z,]T.i.4U iD,T.X]G\ i,Ty%X]LS% DwQ%`JtA r.XJLS%`JtAO%]d.iD4Y z,]T.iD4Y z,=M肞&Kt2A,4D,Ty%X3%`Jt2A,4U]LS%`Jt2A,4U i b.X]LgKte u.֙ʲ:SkL]G| w.ޙ /{gL}`f>L]LSyۇ9@y@%ap7|~4\ wA>>v.G(;wA]'wA  t.ȓ LJ=`f ~w$t\ iwAN ]t.i~4] @wwm?~mc{mm~Mmmwm?~mm[llg?fٶ϶gwcM3op} 7ϼv7߬?[goӟyM3opE ϼ=p7ߝ?g}D3n϶ q܍ѧϸA>DϸA>lm3nϸAgwm?{큻ϼ=p7>gl3nϸ3>gwqg}Ɲ!pgwqg܍큻ϸ3>wq} "p7X>,u} Ѡϼ=bw ϸ%D>lwl}g۳ϸ%D>l{g} "b7G>g#vy{3op} ϼ=b7wm{>{g|l{#g|l{8>=~ >۞;^S!W-w7Q.`^{X*{ᖂ T{ KT{UR!/{!/1zCO&w=Mn&w=M{a^{Xn6ͽ~Ls=\2ͽoLs=,,ETˇ4C|H=܎<ˇ4C|H4i[ꡯl{lFlnye{af>42=L}Baf>42=4CoB7]pO{i>{J= eDOio5H=,lj o5H=,lj{iO#ÃA=by OaSY؃<,vʂ,A>by4|`Yc! Uq"zy&BLJ<C>S!?K@񡗀C>S!V!u{|Xi`)JLD{|iT!CNSLJz;z9M6ɦؾ}ztۇ7=nl_>cj:?7ml15[ۧ6/mlc'7[,ncޠcޠncޠncޠncޠncޠcޠncޠncޠncޠcޠt99tt۟ql:Sldqdqdql>4 8 8?tt<٘77ej:yj:?yj:yj:폁~1n1ncq[bq[bq[bql~3-8-xL1n1nc6q6q6y1o|8 5-c6qK6qK68?m|qK6qK6ϼƸ%v83vǸAv8 8 8 8 .TqT񘺍cޠc c|m۟E=bqc1G6Sqc~m۟Sql.> v?nc+t1?мncw/S,u寢ncp6{Q /+wm r1r1Ƌ6y4mncd1i4m &AOƋ64m iO\ݦMSe`\m i,T+XYx/ɨcЯ-u ~mu e񢢎*O }3xQQǠ/{Fu g1Xcp4ۗ%Fe:4ۗ%Fe2M2ux3!Fk zC:!F9:!F!E9?u i*xQ`bM2ME24u z:=MF&l,>}9AX| r`18cwE4. i],l_"X| i]4/gc$^ed2cSy$} ϽO7)E)<>e'ݧ iO,T2X>e}o6Ϳm424iM9Nq3 {d39{yϠ7ļg'r!A9 rDɤhc 9A>LsR4ȟ z}L>&E|hO, z}L>&Ee}J4XfN4KI htR4i:)4 rN8I uLб}\c}ZcaS6tl1jlؾ}Rǔ 5ilؾ1eCOؘ7l1|ǼeymwMw-wT6tvvԭvT6tT6cޠ2{cޠ2{c c cs1ncq̆q̆q̆OǸAfCǸAfCc c ǔ  ۟S6tT6tT6tT6tT6tlcLƸAi1i1i1iб441AǸAǸA) :,AǸAAǼ47X>=ژ19ƽ09ƽ09?xv4w! 9d!L?xj< :S^}Sc):jl>}Mccrc1oPcyyyyyyyݎyَyՎyy]vTpTpd?HޏoΏo,"c ol8۟0}rTr=rrqqTrԝq}q]qTrTncޠ"cޠbpc #c #c1ncqTqTqTOǸAF*ǸAF*c #c #۟+SrTrTrTrT1%1(9A1 JO1(9Ac JcPrl?0% ~Fa! J+! Jy31oBc4r`4r`4rlLȱi7h(~`4r`4rl ?%F#)9%F#ǸAF#)9 *9 *9 *9 *9 *9 *9 2 yLaȱ#!;c|{0~fgÐ{0?'Srl?2 !ǸA!\ÐtACtrFU1Xj?~ q1k  / ~cWۏA^Gn?^T1i4~ zl?AOǠEej?4~ g!.,T+Xt.,T+XWL|p9aȠ_x!~ g[~ g+ G0E!L`2!L`2X0dp?,lߤ i* ,lߤ ioR C4U1;0E^ÐA/aȋ*3!^ÐE!^ÐA/a`Y^R C!0Ee*34<0dd2i2 4 z C=_`[V#寢jdWY *c52諌 OVW#Xjd\ zF=MV#&AO`2MEÂeF4wV#e wV#e 2MEÂe 2ME.,T4,XhXL}7a52XhXd5_V~؛J/{AM^To2듽 O'7$&gl{:G7?ggܣݼg|Ctg|c{t3n~!n~D7?Nq|D7gy3Yl;f9! g m$ gϸ9?G|ƽ  gш D3nAα=79? g܃y{3ozB3n~!v3u~϶CVCslu~ou~:?W϶J3ϸA:ru>܎< O:?|·r|kG·z|x H· ·|H|i"D󡧉XCO·&b=Lsu>,c+=Lsϱ=,^ i,4|Xiu]Av<]Av<]A3<we/y>N=J(y>J42ͽp;L}Ba^{XپfL}Ba^D3ײz (y>P <z (y>rC/%χ^JCHR(yZ2ͽLs/yD󡧉CO%χ&J=sڋ>}X^I" ЇEhh>C_h>A/5@}}yB Ї&=M4@zh>4}X7@i!e{afV2 `V2 Їe{afV2=42=L}Aar{iyч~ ~}}tG򀖺yDKч,l+tGi}XwGiuGiчe{waf];Lݮ}XwGi4hp>,ܻ4 =MtGz>4}i;}i;DwCOч<&CO}ȳ>,ܻ4R2ͽ;Ls>)8uGie{)a}XwG<;Ls>8uGie{wa}i;DwCOчT4X>jA= yr,#% >,4@hp>,4@2=Ls>s{B{ a} >4}i"D @COЇ&=MBz>4 Ї&=4e{;ܻDw>uG茺,Rч}ȣ3>uG,Rч&;"}i;Ls>,ܻ4iXЯ XS^!K򹃊A^,}*>ŋbC_(>ŋbC>U4Xm*>,,6Kr*>4U,}iXT!bC>S>͟wұ}cѱ}c Stl(:`llX:c[?c?? =b4??T,U,uuuuwU,ugu_uWU,U,=7b7b7b7b~`djc c c c1n1nұ7b7b1ǼAKǼAKDzT,U,U,U,U,1G'c|Xp|ˣc|X=Gc,`yt,?7dIpKƚcdb#T1\{7^~TcG;,y*y/*y/*1GǸGǸG<:^X^q/,q/,g.by1GǸAGǸAGSytTytTytTytTytTytGƸ-c{}oY%etl?5{*-cɮ1[VIStVU1>_X%ۋqyq\~% G?Hp?y£!?x8<h<qx4 Aqx4G =MGCOdx4ń&ã 4 i*<b^Tx44d>4d>4p?-Q|hG /*Th+<|̇&󡡧|hi2ż&󡡧dx4<.£q'5ãq ˧ d3<hZ<5hO6ã/*<£hi2<z &ã1MGc*4 i^ i^}=hxL=hxLS%{Jn/*<T-<[xL=hxLS%.[9wUҐg^\% yUwUwUwUU% `4!,WICr4 WICOU𘦪1뻌U𘦪/JT4<*_T>4<6t}0t )t )!CcCʇ>|hȯ&󡡧|h·&󡡧|hi2ż<|hi2!<:T/<_xLS!9T/<_xLS!91MCCls>4<_xLS^a>ACCOd>44'=|hȳ·<|h|hcm<7ӕ·<5·|8!5·4 iCcʇ4G󡡧|hi2򡡧|hi2ż<|hi2ż&󡡧|hxLS򡡧>*Cq2*C&w2>*CsE*Cs2a1*Cq2kr*COd24 =M*c U?7˱}cۻ}c C3ɱ=77d:9IBU;U Ͽs!dO!?J1W%C~\|QUɐW%C~\|QUdU24Y =MV%Ki*zJdxLSU𘦪~EU%c4{QU4U ,1GÑ/*GB_x#C_x#L pd p G~&0B?|Q#C? i*T82Ö́4 i*fc/)#c/)#COmзmзmзmзmp?ڐoېoې&lCO!C?M؆|Q-ZH1M!d24ن =M!COmd24ن =M!CO Gǵ GǵUl+<‘22cpdSPGȈc#8~`0z8<ci2z8&#Ɉc8&#/*#1Mc*4q i*#1Mc*4q i*(#/*T1y0Gc*4#1Md14q =MF;!o;!;!o;!oς<&Mi|QƐOwc44;1Mucĝd14i|Qd14i =MvCt14i =MvCOd1 _>C\<\+\ޘ7@7@12ǼA2ǼA2ǼA2ǸA.;> ] ]> ](cr?.c1tcr]1w+c V3X9yLʱ;c1w+2cq,~Xf+ǸA+ǸA+SrTrTrTrTrTrT1X9`c VExhc V1X97x|L1>C3ʱ4ccޠcܠ!W`e 9D7)JJ/IrnR!g&et7)CnMդ 9w7)CMդ x6)COMd2釞&&jR4դ iIM^T2d4|QdA3?4C_qA3eA3eA3eA34CY gA3Ǟ͐o]|M yg4_TF34 =Mf4COdF34 =Mf4COdF3?>crl?z01B9#caۏŒ'a0c9cP`30cI1n|L1oP1oP1oP1oP1oP?~bɱ=yBc1?&Cc Mg||L1>C3ɱ,cCcޠBcܠC!Wds9D&_Th2<*B!'dȑ;4rM;Kwh2|Qɐswh2|QЋgh24 =M&ui24z MdxLSB~|E&cʗ4/{Qe1Ϳq|"0\g|Qu1Mcu1:ciz3:ciz3&!ڹz3&댡:ciT1<:V#*I2C$s1C_xCrt1I2Chzu yx]ǐou|Q]ǐou ]c1u yf]ǐu >vC]u =MvCO]d14u =MvCO]d14u i\1Mu_T1wc:4u i^:4u|Q]𘦺1Mucw61Mec:4u &]d14u =MvCO]d14u =MvCO]dEuCO]d1&/*%cxLS4a1<㋊>4}|]ז]!_U/U?:سcϮc:x4u g1϶϶ ?>?ێ#}Ɵmmg~}}}/yWϼgg7ܿR~ _(?>ϼ8?>ϸA ϸA>ϸA 3n}Ɵm-~ 3n}Ɵmg,~ 3n}Ʊ3op3?^_}Ɵy{gg7~Cgm}/]O v^gq_϶"?ێl;)/vLg)mg~ _8  _3n±_3op?~ϼ_3op~϶7?Cg!_%x/?Cg۩±_3>C/!l;4v|_3opTCPr{^ ~ᇜr~ș_.C.]u ý_!~/ЋGCO=M ?~i_~a ?< ?< ?܎ {c{c{O+{Z{Z<~_£_/<n/?GC_x ý_ ~ ý_ ~gn/?< ?< ?^xL%~4_R_-ڢ_-n)k~Aks>c~أ_Gc{p=M ?4/DCO=M ?4/DCO=M ?<gcqǙ><7DCO ?D\Q ?{` ?qp~ȗ~i"nᇞ&z~xLs~xLs3}xLs~xLs~xLs~xLs4 ?+{pYACV/П ?gyK=M ?4/DCOyQ=M ý_!#_1=G1=G1ͽ_1ͽ_!OM_1=G1Ϳ}tJp~xLs~S{c{CRc{c׏_ ?</|_߾e)n!o)n!O_*n!N)n!o=S<@ Oa>ȧɃ[)=M?<?Cb3)ƱcScޠRcܠS!Wc9D_T1<* R!'cȑ;rN1ڐKw1֝b|Q)Ɛsw1b|Q)Ћg14b =MEmi2zL1cxLS)R~EcJ14{Q)`1Mc_T1_L1L1))b L`1o~&0R L1~&0T1<ci*T1_4_RL14_R-|-|-|-Y9.k~V"'ПlDO6k?٬-OO6k1Mdm14Y[ =MCOdm14Y[ =MCOdm147;g<;uc 3X:Sud1i vы4|Nc;/-;Nciz4&;1MucL4i iT1Nciz4&;Nciz4&;N4&;NcxLSi1MuCOƐi ݝƐi ſƐi }?i y ȝƐi yÝƐi =Mv_T1lwCO:1QNcxLSi iz4|NckNckNckNc4ǵ=[4x$i g1Ǟ{vCp14i 9MwCNӝƐt14i yV?=v3_1uK3_Œ:cul??1ZW8Tcul2 cԗǘ7N7/1oP_cޠ4ƼAuǼAuŘ7N7N7N7N~dd1uǸAvǸAvǸAvc ;c ;c1n1niii_?N7N7Ncql7=&㿝Ʊ(Zzl?2;;cZ8oO[Sql?'1lZc-8jii|L1n1niiiiiic4xi?g1g1燾L1L1cgS~>#3)b L`13)R1i*T1<ci^b i^X[|QIWWp?jk?-'BxY[ emE%yBxY[ emACOdm14Y[ =MCOdm14Y[ =MCOd1v82g823#Clp8#pdgÑ1ͣpdi:rGÑ!pdȓ g8}JolvcۯcJmllcU/~1U%~dl(gSUrl@O!?Ïew˘7o1oPU1oP*cޠSƼA}yJyJ>/1oPU1oPU1nU1nUɱT7ȪcJqJqJqJgQƸAV%ǸAV%c(c c *9 *9 *9LU1oPU1nR~al>}uaqrl?0`qc>89ۏ'qSqrl?1Op3889 89 89 89 89 8cޠcޠcޠcޠctSqrl?y0~D1'Cc,NM1?'xXόc*N!,Nycޠcܠ!Wd/9D'_Tq2߱d8r.N\7] 9g'_Tq2] i'_Tq2Y =j'COpz,N&/8Tq2 ?J|2*B?J|2* ɨdi2*zJ&ɨdi2*zJ&ɨdiAOQU11ΨdxD𘉢2*z&J !v8*`JdĎJ&ɨdi2*zJ4 i**TT2u#c}F4F4Ս|Q?4Ս F4k i^?"؍ iT7Av#COd724ٍ =Mv#COd724ٍ =Mv#_T724ٍ =Mv#cF4k iT72aG%CKrT2yG%C/QK`T2$G%CqT2wX/z J/1oPQ1oPQ1nQ1nQɱ7ȨcJqJqJqJUƸAF%ǸAF%3*c c )*9 **9 **9LQ1oPQ?p~al?0I>p~al>1#ҵf8rl?3ۏfN4e4 i^O0c*axLSʇ|}ˇ/<ˇ/<ˇ/<ˇ!_|³|agˇ!O|r03Qr04]> 9MCNÐt0)}rY7ۯ~yc/~e 3sLuñ;?cT7?c:Pc\ycޠ9ƼA}oyU7U7|L_cޠcޠc c cA1nuT7Y7Y7Y7ۏwqq';ƸA ǸA SpTpTplicޠp`l?n𘪈cazl?i0k*~fle=ۏ1V=bql?2|<*~bl?2zYE|LU1nU1nU1nU1nU1nUTEUEUEUEUEYE|LUıucTEԏ8cqUı!">*!"*7x|8 8 r"ɄCtEUCnUĐkt1]E ׿!'*bQ!w*be*biz"d14YE|QU𘦪1MUd‹"4UE i1Mecʆ4/pW_T1""UUUE L`1܏~& "~&T1<*biT14_R">*b )"UE )c1UП2VBO?e"*bO?e">*biz"&*biz"&*biz"&L&f ]q&c`j3L|c21L y<Đ/L =0&_T21`'COd214L =M&cJ&4L i*T21?nmßm՟m#~}R}P}Nom'#~ _?/m3op7oܿ5~ mßy{pl7  mßqhDg۹qhmßqhDg چ?NSDg چ?RDg چc{g67 ۆ?g}϶c{gۻmCbgfhl;p϶&?N&6vgAmL϶ ?Dg چ?67ϸA  m3nmñm3opoۆ?ϼm3nmñmx6velol;3? چ?Cgۉ6!h  chۆ?6+T‡ n>T(R  rZ*{CK/ ýPB noxýPCO=M ýP1ͽP1ͽPv   ^(5Ba ? Bᇾ(~}Q( B~&PvC?P( B~&P (~xLs/~xLs/~^(^(p{c1ͽP8EC"Bv # !B?H~/$D? p~" "z~i"Bᇞ&"z~i"Bᇞ&"z~i"Ba7><7><G?Q|x lo}x l"B""m"z`{CUCO=MD?4!Dc{c{c{c{c{c{0~1"B1#"#Ӽ~1"B1|@i i At? ̇^:$:,:z} ~3ׇ<><>t?<>t?<^|jxLx0>ȣu41u4  Ӽ(~iPBa ?4Q(DCOyJO=M ?4Q(DCO=M ? ?]ACPSCPSCFQC ?I ?5 ?9) =!P!IP)({CSP()CCPCPCP(П2 ? qp}7}!qyEq>>CCqBVq>>C>4DCOq=M>4DCOqp}i"8߇48߇4zՇ48߇48߇<+8 |zC} !q!>MooBCoqyDq_q*Wq=M=!_*8߇48߇4||D/<.RqL@C?>;t>4TCNSq9M>4p柧|c/~uSol6cۯ~a)7`cul|cgul?S0R9}L cq1oP_cޠ<So7ǘ71oPcޠ|cޠ| cToTododol?R0 2|c |c |c |cA1nq1nq 7878cyygSoG{l}c+jul=p~cl:a1^`/)7{L1n1n1n1n1n{Lᾱh}cۏ217coᾱ! =p! pߘ7x|17 *7 :rCt~/_܆{7< 9^T~/F ^T~/L =;p?z4 i*T~/܏^xLS{/*T*zc}*=^z},< ^W.^ {/ {^iz,셞& {1Mc*4U iTa/<ދ*4U iTa/ݽ]w݅݅Dž?E݅~&v]gkw!_vr݅kw!]ivr݅|M]O_Ƣ~ +vcl?j}LEC9?c[T?cр8cO1oPcޠvcޠƼA}7yfUU{L_ cޠvcޠvc vc vc1nETYYYq,ڍq,ڍ/ǸAƸASnTnTnlvzךzzl;jwctl1=k~`bl7:OK1ҖXOJ%sv7YY{L1n1n1n1n1nTUUUUY{LkwcTv7cn!=!ݍݘ7x|v7 v7 vr݅aCtA6nțf]f݋jօnօnֽf]f]f]}Qͺp'_B>zܬ iYT.sc} =Ƞ]1hz} څ<]7څ^v/*h򵫃vɠ]i2hz څ&v1Mc څ4 i*hT.<݋ څ4 i*hT.Bc.BNq.4Y =MBOU =M_{«,y Ʊ8ӷ~d`l y0 a7Ø7,ݘ7,cRq=,7,7,7,Vdndnl? 2K7 2Ktcޠtcޠtckǔccǔ=bnl=j?N{l=X_h1g7ߩ,S9 c sv)g7{L91n91n91n91n91n9ǔcL=d~bɺcJ֍x]1Y7{d~h &Sn &`n< B'Ń켽[[ ۋ꼅B켅R-_[N C(!?ĺoC~hy uEuc꼅4y{QJ1M%c;o܋ззp11"po#p~8#ӄ 4a.ӄ"p1ME xxLS"pAvBe-y{QЋg-y xvzųz켽[ųz=[iz켅&;o[iz켅&;o[iz켅&;o/He-<[) SAB)c-Bt-r{Q)*r =MBO)d-4r i*T-<[xLS)Rn1M^T-<[xLS)Rn1#Sn1M^T-4Yk =0B]k{Q|AId+52/a*lj&YՂ^2AXk ܵ ]k z`=0ڂkmA^p-K=Mڂ8Zp9~Nт8ZЏ hA?=8ZЏ hA?2]т8ZЏ hAOq8Zd-ȻS=M*т4 .T'LSq2Mт"8Zpb=ein0\XOp hein0\8Z7zk탼h [Wd;|; [ld Ar-A  9 [p [p [plegle i.i*vPA2Mق4d .\>d .T-LSAd z =M*4d z =Mق [d-i24d z*-.* [d VlAp- yA ou9!*-.ق4d{͵ O1\ Gas-͵26ׂ<`ws-v7d9- o6 rZ/ .4pvP2MŃ4 .\>]8 z,T,ϛ~X8 -c,ϛ~X8 -c gAe,%?MX8;YП&,iYp ge*ysp ge*y󃌘K=jF̂5#fA`?zԌ=jF*b1 zԌ=ȈYУf,i2b41 z=MF̂&#fAOɈYd,i2b41;S20Ê= L20Ed,1b1  1 z`T,ȳ?Ĝ&#fAOɈYd,LS2MÊ41 .T,LS2ME*b\Yp"fein12b\ك/AƈYc,gG1  ϝ1 z`=0F̂#fAށp젚aY5Â\fX+ zlx6d3,ų zlyͰ o[ zl=M6Â&aAq3,ivPͰ /\Lpz2eji1aei'\fXpz2e li'\fXp{a?d<A 8(\~ł| 򺌃bA^pP,ȧA 2ł|;XObeb/ł4/ł4 .\ .TP,LspP,LSA i*(\XpXpbe =ȠXdP,i2(vPAɠXdP,i2(]6ł&bAOAɠ؃lyOͰjڲ=)7Â'fX7 g3,nyOͰ /ݸvPQ I\fXdP,Eł9 dP, =DWNt0DWNtynDWgWNtiDW މ ʉۧ ]eJin&Lt=M&&]AODWd+i24 zLtT+i24 .T+LSɘ2M%4 ߉2M%c4y1GX:W _A?+,T+g寠W'.yBW _A?+,=M&_A4Y:WG.=M4U .>Y .T_e*in,>Y:4]\ O1]:W.W,y%W,4] r.9M_A4?e_~OXySl8c/Xecʂ'v_;X?}LYcQOXPWX?O71oPY1oP_cޠ8ƼA}myʂy[6o1oPY1oPY1nY1nY~? 2 `c `c `c `c}dldlq̂q̂=,ؘ7,ؘ7,X?o1fX?ԏ) 6Y~8=Ϧcʂ~.=֏N1JctߕW%~Y_`c `c `cdld1eƸAfƸAfƸAfƸAfƸAfSlTlTlTld11L)5o}`k_ט߇1~0_c,`kLy_cޠ_cܠ_A` 3\~yP`? !A W9Ig3\ rpLg*4 .T&Lst+i2uP`? -c+跌`? -c+跌p1%4a2\A0 3\epi*쇿epi* K[~2nY:Vлei+ݲnY wVлeiJ[Awփ,m[&K[AOVdi+i4Y z,m=M&K[AO*mTi+ȣ3e`Jz K[AVL 򪈓XA^Rp+I O'|>:$VpXeWX'4'4 .\: .T+Lsp+LSIJbi*\$Vp$VpXeJb=$Vd+i2uPI$Vd+i2/'&XAOIזI ,9uPI_[&$Vw 򖁓XA?LbyI ,9'*b%'4 zAyTAZՠ<[v *ȳeנ<_q *5 ٲkPA-kP52MeD45Td *i4Y zA=M֠&kPAO5A=M֠&kPeAi*#\TpjPAJ\Hp2"پ蘊 ө_BLE 1TA~ Ab**3t_k^kwƖ^kkk|zͯk|ޑ9#w^kXƖ^{5nP9\rP/]䛯E/]l՗.r/{}"gEA`/]P_]lG)wpPR}颧Rח.ԋ~P_ <2ԗ.-C})Kt]LOԗtџ&/] Kiet^\K=M4.z}h({CׇE ^JۑE ^J?F ^Jh(]PitDC颧EO &J=M4.zh(]4PitDC)Jclg.ދˆzⲡID&7LEo73Iy¤LEo`$]12ILEO&2I=Md..왤4{&2͞ILg..왤4{&)3IiLe=tqf$]\RL bG=Ď.z&]L;JŎp3A");ȃJŎ.z& "Yt3AgdQ2$.[ɢ'b|._:O[gˊ]䉧bG)v챣|;'[|;]bGd+vtObG9MŎ=vtqf]\|(v\t;LsQ2;LsQ2;LsQ2; e=vtqf]\|~-`?cA *ScޠƼA}9yjUUzL cޠ*Scޠ*Sc +Sc +Scde1UƸAVƸAVƸAV77X?YYzL1oP1oPRX?oL_Tgc}?:7:X?ec1թ叱~cccqc_gcޠT?:7:7:7:X0 N5 NTc Tc Tc Tc Tc TN5 N5 N5 N5 N5/fjylaj5Tc_k֩Z/9Syg1թN5u~%;u1oPu1nu W:ՃlL&1|ݘ tcSAnLv1uPO62W GS~+O*@짩A P~2@짩A P~2@uPO?*@G TA0@\UpT~\Ud*1@uPUc*1@A80 z} P>d*1@4 z P=M&TAOUd*i2@4 z PTS՘ 25LԘzgT3ac*؏w1SAτjLyTП`lL=M6&SAO2M54՘ .Tc*LS2M54՘:TpSejLi1\ސP!TAo /:FFb*L5_f4cd15ر~(XciA:ձ>~Oa?c)Xcޠ;ƼA}syUUzL_cewƸAƸAu1nETYYYƸAƸA1nE1nETUZ{a-k_xL咱_T~-1ղƸֲ~#`_xL~Se_TGcX%cޠ%cޠjY?Z7Z7Z7ZXv1 5 jYc kYc kYc kYc kYc kY5 5 5 5 5דE~o{E1,b++c|=Y|5gET3"iǖƼAƸA\X"V_}t+wE*bf"A~?Y g~j(.-.bճ ?]\:"V~"V~"A~?Y [A԰uPE?jX E2M4U 2Md*Dkص zCZ!vL4 kص:Ub* k VAkAO]Z=Mv&VAO]ɮUd*ik4ٵ zZT꠺Vյ 2>r*虰kLص z&Z Au<%r*虰kuP] ܵ ]ɮUd*ik\UpVeZik\UpVյ .T*LS]2Mu&UA.:]L z&LW=|? [Z GaK+(jiTK+觌-2< wK+#N~ )cK+觌-2w\8Ip$ .T$LSqɖVdK+Cni=M&[ZAO-ɖV zliTK+ni=M4 .~7aK+LS-jii\n–w~ gK+-  gK+-? [Z0[ZA&g[Z?VtK+i4 rniy- VO竈-qliqli;c [Zc [Z5 c }1n~Y1P~Sgk_T5W ~w1u͙~qfk3cޠz(cޠz(cޠ:[?777X?kzL1n1n1n1n1nXG_Ov1l\W3clfgk4;[cBc ;[c_\ rb _p|: VXAbT+ b`? ZZbT+K~? d+K~? d렂XA bQ~_ ҃ bQ V5 bi*\ Vg.TA`XAoA7 Vb+6XAoA b!X2X~4: Vd+i24 z b=M&XAOA A*uPA`? .3 XAτAg VT+ȣ X A? z b=M&Xe bi*\ VpXe bi*uPA2M4 .TA6<t*%yؼ z l^yA5;^WA y䡠WAHyؼ z l^T*%yؼzͫUK`*%yWA^Sp*ȋ n^=M6&WAOͫ ݼ zl^T*ȓf74 .TD$LSͫ2M5^;zcU'^ynUt*i:z4:UpWe7Wo$G4o$G4 .\:UpYUpYVյ .S*O]>uԵ ?Եz]ǮUc꠺VAO]ɮUd*ț=SN %שN\~us:U\ #S/ԇ~?{drvaa9|F>,w>,gG>\Y#S.Ӭ凞fHX;Rz #;Rz #a9Kޑzv>zGC/w#?zGrT?zGڑ=ޑ=ޑ=ޑ=ޑzf'>\|py?k)؃O~Ӈ |G>}^yӇQӃOz=كO.ӬiӇ4ke52|pf >XO.ӬiӇfo:}04>Nz&gқNz&!tz6)>LzC!Ot lӇ~{C=b6}7g>ݳMmov6}7g>ݳM٦ytlӇ٦=͞m٦=͞m6}ilӋ5!ORmpfd|LF2>\YM.Ӭ٦ylӇ4k$e52+g>\Y .ܾ"{eL/2Ӈ~zC|42Ӈ|Pf/Led!l>䓍2ӇK>A%yi\_c=Jb=/XχXχK> %yꅒχh|>#K>h%yꅒχ/n`/|Lin`/|iχf/|iχf/|iχf/|iχf/XK>zK>.Ӭ%i2Zpf-|<|>\YY4?kpf-|L|^g~>3?뙟kC|=_yf~>3?yχ4{COg~>d~>4{COg~>4{COg~> 12?z=b|bd~>4{e52pfX3?.ӬinϋC=왟χ~m{Cq"_۞Яm|7R|Ǿg~>c3?yχ|DCN9Md~>;2?r|i4XeShceXQcjWtc~=zL c)XSh|?cޠxShԷΘ71oP8cޠ@cޠ@fThThdhdhzL 1n 1n 1n ~cdhdh_zL \w(ƸA'ƸA݉Ԇ?O~ob_&S;hd;h_wyUƼASbTbT; ƸAƸAS;hd;hd;hd;hd;hd;1ƼAƼAƼA~}~;}1?c>X3}>?ǖ4  @~k>A|y r@A>`? }``~ A cϠlƃ~ cϠlƃ~:xPo6A~"\lŃ*D>!ƃ4 .T<(؏=d@Af>P$f>A~ .o@@~@ z=M&@AO}>Pd(i4:>Ppy? .@>P'@~xyP} 9 d@gG @A԰4 zi\>Pp@ei\>A4 .T(i@AL z&=<&sAτ}  } f(7}f>Ѓf>Po6@Aл@A4 z=M<(u(itP} OK .T!LS2M4 }2M4 .ܾ" .T(Lsd(LS]ۼ|:>P/@A|@A|9@A>d>Py*>Pt(i4:>Pp@e˗@U ŝ i] z?Ó<_*=_*TTAn͟cec*|c/X?{Lej{̯83?XcLe~<1oPJcޠ8cޠƼA}y6UUyL_Dcޠ8cޠ8c 8c 8c72c*q,q,q,{c 8c 8c7272c*7nߩ3Hq, 3֯Gq,0:cX3k 33/˜7Θ7X0 j cޠj cޠ:?Ƣ7Ȣ7Ȣ7ȢX0 3 :c :c :c :c :c :3 3 3 c,BcXW33e~Kh83e1>G,k-,9bgY1'l6AN8asP ('lYSM< rAVj1OVj1OVjRJMSu`?;1~G +5e! ~2DM'C4~2M'C4 d& <M5 q5 T&i2D4 z =Mh&C4AO!Mdk&ejLٚ -ck&跌`?*.|bL.ybLt1&i4]9bLp1eW.gA=c?C>y |Y? ry(Lt&i: 4pg1f'c-yLŘcc'~3ߊN7b=bXDXt3ϟ'Ø7b̘7b̘71oP_VcޠƼAcƼAcӷԘ7b̘7b7b7bX1 1c 1c 1c 1c7b7bX0 3 c%1ɚcɌ=֯eq׬Ɍ{=&3oq׬Ɍ+=@0j2c:X3 &3]&3 @0 &3 &3Iy<77̏&3 &3 &3 &3/FqɌq<7Ț7Ț7Ț7Ț7ȚcɌyɌyɌyɌq,͌+HcX43ogifK343g1>c,͌p-,͌cif_1fJ3A.4sP (fZ݋SLX riAfp2Ofp2Of4LS*5`?74~G K3e*<ȘLoc2Ae1&vAe1&sP12d<9Od4aL& c2 z=Md&c2AO1ɘLdL&i2&sP\bd/&wbz1A{1Ջ ?0؋ ^Ld/&LS2Mb4Ջ .T/&LS2Mb\^Lpz1؋ $ʽ Ϣ܋ zC!b{1AIsPR> {1AMc2A=:&c2Aٌf3& c2Aٌf3&͘LynLПC9ĘLdL&i2& c2AO1yLpein_i*&sP12Md4 .ܾ" .TL&i2 $L? 0A(L0A>eNyT$LN9 S$LO0A: 䱩0ANI $Lt栒0eJ=A: i0A: I0A8 sPI Oe9C34A;C4 r95C15j5߬^jA>FX4ߥ~5cc=ccr+l͌Yakf_{lfkf [3?dA%cq2&)sPɘ`$vYl=dL. p2A_~W~9K0A_~W~ (rP`X~{%ON`%ϛd%wm]~Wv ]~Wv9KpyWv ֠?0v m`?j mj=M]&.AOmɶKd%i4v9a\]˻˃l¶KT%ȿnw% ]߹.A`4v zli\Kp.eji\A]4v z̷=[eη9S|Kη9M[-AN |A[4oyPA=c;_,،V`3oZy`f_zL1*،+VcX 6cޠcޠcޠ 6cޠ 6cNǘ7c*"y*"y* 6c 6c 6c 6c7Ȃ7Ȃc*،q,،q,،q,،q,،q,<͘7͘7͘77Ȃ;Y' 6c\>Y‚c*،Yaf 6cc5 6c|VX1l` 6?H*u`_҂D4W\lVZχ 6CsP`? e&זڲCU:4~*OJسC k2~&k˚LЯ-k2~&k˚LЯ-k2U `? 3{d~Y Ҡ{d&4Y z=Md&k2AO5ɚLd&ȿv&זM_[6a~mل jnس sPM M{6a~ل ݠ&Ld栚0eji \&Lp0ejT&LSMސ)!c*i1Ao7`Lb0&Sc* h  z yx`Lgf3`̃ f3`Lo61A: 1A11AO`LgY=Mc*䁖1e i*\}E2\`Ac4 .T0&Lsd0&i>F L+N"05L,NS3A>NyL~N|:5LϧS3ANө Ltj&i:549L4 xͩJyLglNyL[NyAfl88䡾8A;0Nt'i:sp a}sY ՜[Z{z5>ڻZ{_kcZ{_kok} jk}m>t^k9cZZy՜׼^y՜׼^y߀y7ث9c5oWs^{55n՜׸ATs^ke^Q՜׸ATs^QyD5v=5n՜׸ATs^k7c^Qyk q{1aln݋ q߽7bkw/&Mń׸wo;?ϛowϿDev5w;{ev5>,gZbT^㳂kvk|VPY땥׼^xՎ׼^Yz앥ڽ׼^YՎ׼^x앥c, 7k *KFq, 4+Kq, 7k *Kq,k`, .34΀1Ԓ^㟇Zk5G rczk.G[c'Gq*{"H.kt.k홋v0}ݞ .CVbsx]n\SxE|]ˇxQNj.Cb;׿`]l.svBuO6DE|]ˇ>vw/DO}Zvv]#DdOt=_>QdOtD袧>Q.z]4'iO <;P_>$.C_>$.`O])Dd#A E }Dy~>Q.z`]!Dy>EO ~? EH] D~"At'D~"AtJ]DQE AtD袧E(AtD(Dyڣe=AtqfO]\} "AtqfO{2͞ D"2_Q|E}.z\ȿW砢>xE}.tIQ<_R"E.^Q\>9ME}.r\4i*sT'أ>=rU]_+tE=)tO]r@yE)T.U9L, c|nXsX9疉ǔXXX7ƼA%S"dT"dTbǘXXXXWƸA&ƸA&[~abidbidb1%ƸA&ƸA&SbiTbi\~ab&~Yǔ5^5^5[>51׉ewOD~4/u`?5U' 5~1Ś OAA~ZM d'蟓a`? d'؏Ay@OWП e`? ٠?9 ~ z =My&A;y>テAoI ˜ (I I>ewe>AOIɤOd'i24 zL=M&}&> zL=M&}4 .Կ.T'LSI * .Կ.Կ.T'LSIJiYeC!ƀAIt':n rNySI o8n rNySIKI oI94> zLq>>~\b@c}8c}7c}6c1ŀfdb`Ƽŀ呱96 *4 *4 *4 *4 *4 *4 *SǼAŀƼAŀƸAƀƸAƀ1n1뗖ƸAƀƸAƀ}1n1s??ma1na1na~a廟a1na~Woghgh_&~ƼAShThď=b114!1G !c|B{c !s!~w !s!1oP!~1ƼAƼAƼAƼA1oP!BB1ƸAƸAƸA}1n!1n!ǖߗBBBzL!1n!1n!BBߗBzl}!c1ƸƸǖ,ǎFe˾~&P`:f`}Qu'VT'O@h;ʮȖ(8gmYcI׉-LP PwosVpOp?_ZA_\T'ˁOd'i4Y k=Aw'k=Aw_]T'ۮ} sQ r%ZOЗ k=qd'isQ`j=2Mze4U isQ`j=AO_E .* z}L) _8 z}L>||E|^S>A{q'XOLS)O Olv~7f'X۝!v~| Od'=Mv~.Snw~e\T~E5w|N z&l=6w|L Q;A>vs'ȟ`n͝ Nts'i4 rn9M7w.so|N 餛;A>ts'nH͝ s7w|pE5w|pN zl=M6w&;AO͝1 SŊ`~*,Oy| O|?U流T'X A`QbEQ'X E+e 4 z =My&2w~|_O z=Mv~|Or a;?OMv~eHwLd'i4 z=Mv~&;?AOOd:?AOOLS`>,T'XO8 i#2M};XOLS49/Cw(WE=0F#@A b: z`=0F.* g#@2vPL12,T(X i*tQ`"@2M+.*,T"XbEd(i2 vG&#@AOPd(=MF.*qG&#@2MEe펑`"@ i*,lw]lw4##@ zC uc>u1nu1u1u3u~3 ;4Hp?_JU ~8T 'W]-EՁ~@ ٕUWp:Pp? A_]T(ˁu:Pd(i2,!e|2_f|+f|.*og|V Za'mZa'i2sQ`2>2Me|e4 i*sQ`2>ϙe2sQnjOc'ȷ0njOc'1g|.* 3>2Me|e4?Aip'%9OK`'% ?A>v'RlC _4 1?AOjڍ1 96x~21 9t''1 8xcA>v'' p;4 z=Mf|&3>SAO=6wېA!E0eCJˆ -A!E0eC`ˆ ),R:(X~),T:(XtEE0eJ4 zL=M&AAO頠tPp/ =M.*4 z?iAF.Vѡ _9:tQ!5CAst(W =jF5CA!vt(7Ďjѡ-C2M}B,Tt(XEEedt袢CArt(1: z`ѡPct(1:tQѡP? i;FFe펑ѡ`C2vPLSѡ4 i*:tQѡ`C2MU9&CAOѡ?4*Gdt(i2:4 |CAOѡy>ѡPLT9 i*:,Tt袢C2MEe펑ѡ펑ѡ`fcdtbcdW r& 9wBAcqW(c, 9wBAcqWb+1w ~CcS@`lؾ}vc '7/nl~ᘢCc[ۧ6/혢CcޙCcޠCcޠcޠCcޠCcޠCcޠCcޠCxqSthdthdthdthl?5 2:4 2:46:_E. !13d4ǐ1}~CFc2ۏW+! chl?Y9c`ahǼrј71ƼAƼAƼrј7{wy881őƸƑƸƑƸƑc Hc HG^G^G:8f14 *4m=8Ҙ78Ҙ78Ҙ78Ҙ78~qTB#cޠHcޠH?8787878~pdidXwcididHc Hc HGG˽HʽHc #Gc3rc"hpQ=~9 '?{~zD Sp?܏33L8Aفo~m#Qp `-Xp?rQ=~v  o# g 6ˁ=}9G4# zld(ݲ n . i-@ !@Amw6Ppcn zl]T(X6PLSm`@2Mej]T(X6A|L6Pc( |]6Pc(  @ z}lm`@2Mej=M|pP3a8(0tᠠgpP3a8(0c~|ppP3a8(` ᠠpPp z ]T8(d8(GP C9ᠠ 0t8(AA>u8( 84 r9MAAN pE2_ :[|pP -AA>Ov8( 2h|pE|pP z =M&AAOIpP2ͿI`zD2M.,T(XQoI# i,T#XQLS=`zD2M1e4#>FLS=`zDAO=Qd(iG4# E1&{DAO=c=Ϳ8Ȓ%`3OjX2 KFAs(!.}9dQ]2 ݲKFAqbI͒QLSKi,=M&KFAO%ɒQd(id4Y2 z,]l?Y2 z,=M.d,҃e*4U2 KF2M}.=Xϥ4U2 idtQKidt%*%ƒQc(聱dbKFA%ƒEKFAc(XY2 i;Fe*4#KF2M.d,T(XEe*4U2 z,=Mv^A?3 儻AAݠnPc7(ȗ]T7( we4 i4_z,$šPcq CAšPcq(.šPcq(W<.=M|Pdq袊C AA>q7(ȇ0]a7( AA>q7 򩴻AA>t7(ȧݠ `4 r9MwAANݠ n_< 9*ߖQ j:*cMG|P BA>bwT(72*A>gwT(7.**4 򱰣BAOQɨPdT(i2*?t4 z ]TT(XPLSQJU4 i** GeJU4i**,TT(XPLS`B2ME.*U,TT(XPdT(i2*4 z =MF{ i2*tQɨPdTRAOQ?.* ? _UtQ<[EA[v(ȗ/n}UUVQ* ݲ[EA|qb`>y,lU4* zl=M&[EAOVQd(iUtVQd(iUtQ`>y,T(XVQ* i2M}e펑`ZE* iU,lwl]lwl4#[Ery9 8Gtr(%8G.$sDAq(%8G,K(e爂!CJ9_g}mmzc?ۆٶ϶}؞#lg>vfcl}mlm[lggٞ#sDy?g#sDy{3op9ϼ=G7>#D g rDc{3n9ϸA>#l;7g rDm*?#S?D0gz"UH}Q4REc?5Tg*l;:Vm*4REqH[,6H} [,6H} =U7>Tg`A3opO} {d>1Dhl} "c7g 2Fm?1Dhl} "c7؞13Fy{8؞13Fy{3op} ϶y{hl} ϼr?g 2Fq} "cv43nϸAd 2Fq} "c4g>1Dhl} ϼr?XD3nPۙЇ1zo}!y }n 馇ۙЇ馇Adnnog{ vp v%gnnnz=<>>1DR!*)O${R!*)O${R!*)O$zc}mW J =$iIe{RazX'iIziTER!B*)CRׇC^PR!_0()CRׇCI|ѠPpO =,ۓB42=)LsO =,ܓB=M$zۿ?' C^zC =z7Co!7O&z7CoׇCO|FCOGR!()Cz"J =I#)EC>!VR!))Ozȟ J =4zi*)TR!CNSIB9M%z`J =I|C>bTR!))zȇJ =n%nA$n|此í@ܓB=M$B=M$zH =4zi")plCOI&B=)LsO =,ܓB(LsO =,ܓB@Ie{Qa^xX'iIe{Ra^xX'iI^xX'iI&B=M$zH =4zi")pCOI2M$򝫒B=M$zH =4zXw^xiڢqo8zȷ{j=ڢq_[4ųG6H8z|CxV!ql"qLs2/G=M4zh=48ziqD衧CO&G/G=M4zh242ͽq8zXbae{a8 XX7z*D)ב-H=Mԑz#=4QGz#=4QGziD)בz#=,l#۽&H4۽&H4:2v:2ͽH4:2ͽH4:2ͽD顧:CP顧:COu&H=Mԑ`H=Mԑ{!DayX5QGzXב{a^GzX5QG {Mԑi{Mԑ^ue^S]<\C.RW)X~RW!E*="urC*=Uzso*=7ug\~~4ool1g7nl?rL]}rcW4mlؾ}jcҎ4杩4 4 =0 4 4 4 4 tL]1oP]1oP]1n]1n]47Ȯ1uƸAvƸAvƸAv3|c Jc Jc1n]cO>4=1~cOR1{=c4Ɵ1z{"ƸAƸAc`'biǼrOӘ71ƼAƼAƼrOӘ7Ә7XdididZMc'[Mc'[Mc'[Mc11cj5ql5ql5SiTiTil?}L1oP1oP1oP1oP7V1ƼAƼrV7V7V7V~udidXWdididZMc [Mc [Mjj˽"[Mʽ"[Mc`Wt){ g.c2kp?Op]T) ^q~+w?TSpU|Q/~j ~RMCp=j)>.;ROy\ E|R.*^廢RpQ5݃KA`p)XRLS`K2Me .]Tp)igT3Yc z1]T)i4Yc kLA>w)i4Yc z1z514Uc i,T)XSLS5Sd)=M֘^kLA51>֘^kLA5 Gט^kLA5S]c )kLAO5 vtQ5 x#ט³5 |1 5 ~38טSt)i4]c r19MטkLAN5/*.Oa)N5ѩ u9.|TSOj eL5Cj zL5=M| TSd)i24j zL5x=M&SMj i*,T2MeJ5`2M1eJ54j i*,T#XTSLS c4j i*4j zL5=M&SMAO^zL5]UTSUj zL5=M&SM2M|TSLSa<]ab+<ۑ}F廢_T)< _Y;+kG|o#O2M}>X-c)i24y z<=MF&#OAOSdbSd"O2M}>X-c"O_3y i2M}>XSLS}LS- PU ͩ PAwTлe*ȗ.@[- PA*@[eFF`fe*XTL݈,T P2Me*@]T*XTLSTd*&.@=M& PAOTQ\ z,@]T*(.@=MeJ4ۍ( P2M.,T*XnDYnDY iQ.QeF9MG<̑ gSgyXB<yfɑ gS3q)t<yΑ~tؾ}z4ۇ7nlؾP1Eɍ79ؾ}lcK;ӘwӘ7Ә77Ә7Ә7Ә7Ә71EƼAEƼAEƸAFƸAFCc #Oyyyyۏqv*/PU !Pi*\2Tp_elY i ,T*X2TLSe*C=M|z2T] z,C=M|2Tϻ] z,C=M&PApP2Me*C4U i ,T*i 4Y APAO٨lTd6 QAO٨lTd6*F٨lTd6*i2lg|SOS OA /OA>p yOA>p)a?ye;4 r?9MǟOAN Ste?9C_KQ/_:tfXA>s+o6XW+Xc^y 9y籂|6i24ǺXp?q BL]T,GNb`A>wv, `  A ߁8X/`2M%eJ4 i*vQ |ٕ 0X z}|Õ vX z}|>V|EUWLS`*_2MUe|=MV&+_A>w+iL AW&+_AOWO] +_AOWd+ծ|O\ ق+_U +_A>`p+/+_A>lu+ |+_A>qu+g |˕ lW+_ANӕ Wt+i4] r|]T_Br,-`A>s,Gv|}/_Z^&YpD\T,ȇnf,Xj7[M`fI@7ɂ园&Y $ lI/ $ zl=M6ɂ|N&Yd,iI4$ zl:@d,iIvQM`d2M5.,T,X&YP$ i,TS%X&YLSM`d2M5Uej4$JLݽI,TS dAOM&Yd,iIAOMj=M6ɂ&dT zl=M6ɂejM`*2M5Ueif{^̂kfۿ!̂wЂ\;hAt-ȓ`?+v;>}wcՍ'6Onl_>1uc۷6Oml_1uƼ3uƼAuƼAu6ƼAuƼAuƼAuƼAuƼAuЎ6 6 6 6 6|A;777~udmdml?= 6 qrL1oIc꧍yK꧍yKWqK짍qK짍'Ǹ%Ƹ%ciԯ3ƸAƸASc ic iԯ}icޠc`c?mT?icޠicޠic`c?mT?m,u짍q짍c?mic;v, [f2 j1 ceO\T,K-`Zf2M̂ej42sx&CgA>Tv,G=M΂&CgAp,=M΂&CgAO _u8tvQ`Bg2M΂e 4: i*t4: z ڡYd,i2tvYd,i2tk΂|Yd,i2t4: [fA2 }̂U- w,XBJ0- w2 [fAYO2 u,ie42 rn9M̂[fAN- E: CgG5΂|Yr: *c,ȧ_"x#[Ղ{jA>)t\-1,S\-XZ Lq :,S\-XZpod\-i2Cl.*4W !jAOqɸZd\-i2cAOqɸE%^e4WKLSq`jA_q\-XKLS`j2MՂe4x i*,T\/2vȸZLS=MՂ&jAOqɸZph=M.*4W z]T%i24W i*[iՂeJ4x i*,L?h\l3nt 򕵃n=Vnt 4nz FлenAAwˠ[LkϠ[LS`fמA`:2=nAOAɠ[d-i24t z =M݂&nۿ =M݂&nt i,l3vkϠ[LkϠ[LS`n2M.,T-i2nt[Gt z [݂-nAq-2nt z ]T-2,l7 ]l7 4ۍ(n2M݂eFA`nt i*,Tn2M݂e =M݂&nAr-i24t z =M݂<[dnAr-i2,T(XnDt i*vQA`n2v#ʠv#ʠ[L݈2v݈2,l7 4t iצ\X~:n L) w\gaX4Ny) w#)oSpcS2dlؾ}vcrǔ'7/nl~FRpc[ۧ6/RpcޙRpcޠRpcޠ!cޠRpcޠRpcޠRpcޠRpcޠRpǔqLS nd nd nd nl?.< 27 27b;̘7wռOcǔ>>>>qˍqˍ>;ɼ7ȼ7ȼ1ny1nyc{y^y1oPy1o+2/7 */wLy1oPy1oPy1o+2/7 */7 {EƼIcznwaznsǔx=0=7Vbsc;x=0=wL1oP1oP鹱ǔϝyޔ1oP1oܛ2=7 2=7 2=7 2=74;VM;771ƼAƼroܱroܘ7XM1a~Fsp?[K3*E~X:!LJp߶S gTփlF\ϓ݈ kjPٍ7wn]T#. |F\p?4sQ6₾V؈ i,T#.XFE5 1 n z ]T@.2 n rA>xv@.2 n z  4 i* ,T@.X\LSɀ\d@.'=M䂞9?Suw˺A-rAuw˺\O] yrAuw˺\лe] rA u ew@. qw@ rA\ϐ t@.i: 4 r9M䂜rAN EUσ{sA \O֜ ٚsA>qz. :=s^{H/?u.ȇ=MF|h]d.Xh]LSѺ`uA>ct.Xh]LSѺh]d.ߎ]T.i2ZoG낞&uAOѺh]d.&uAOѺ4 i*ZwQ`u2ME|3h]LS`262MEe4 i*c,T.X5 i{MFed.i2Z4 z=MF{+$i2ZwQh]d26AOѺh]LSѺ g;Z,T&XMLSѺ`uAv.XVEG͢]4\ G͢]4\HУf.D5vU z,=jeVE`B"2v+]LS!`f`.ih4Y z,=M킞&vAOEɢ]db`.ih4Y]LS!`f`b`.XnX i*$,T.XEDe*=M-vU LvAEwˢ]лe.:.[-vAE*[eFEFE`fe.X]L݈h,Tv2Me*]T.X]LSEɢ]d.ȓd.=M킞&vAOEɢ]'\ z,]T.b.=Me/4ۍ(v2M.h,T.XnDYnDY iQ.QeFE`v2MeiiH.nx<q /ȓnx\[xAu /ȓn;?clgg>v6϶}ml[g>ێ-϶}mkmSl[;[xy{ 3op| -ϼ7>gy{ 3opo} 7gϸAg Zxqh} v3n-ϸA> 7޵yϽYN̻zy{3 ϸk4>]wgۧL>;%qh} 7ggK>DCol| {L4>Ng`DC3opo ϼ77>=&zy{C3oc77.h}ƿ zz@Colo} x=4>>4>zz@Colo} ϼY77>gzm?˽)zy{C3oܛ7g zqh}{DC3n rog zqhDC3n ϼ77>˽)zc ϼrogܠz3p<bp=!>܎n?>p;p< qO`U{j}x | zUC P=.nZnZq*p;T{xXie{=.Q{ݢoU{\=nQ{ݢ/T{G=nQ{ݢлE=!_)q4z2ͽLs=,q4zCO&q,V&q=Mz "D @CO|4@C>R @COB=WEUUUeU|tzC>S=!eU=qNUUq9Mr=4U{iT=!zCNS^D=M"_qTI|LzC_=M qGp=܊k/zivo=,۽umC>S/$ io=,޺{X.uԺ{X.u\H{CO|]po=4Ѻ{Gj=4Ѻ{iuD-D]p<,[w4]p<,[w4C%QayXnie{a޺{Xnieee{f&Zw=Mzh=4Ѻ{UDzhCO&Zw=pD2ͽuպ{Xnie{a޺{BiK/C{!ZAB!a(pC}ucc7nl?-yL}mcSۗvL1L1oP1oPa1oP1oP1oP1oP1oPcyyqqGǸA)7 27 27 27?0 27 2wLA1oP1ohʘ 褐ac ZQo׊)cV 1^+ Zaol)2 27 27 27gdod)c`oeoT4e, ySoToTo, y y徕A1oPAcowao~x=07>e~c;x=0wLA1oPA1oPA)˽)~cޠ~c`7eoTo, q q q ǸAƸA{SƸAƸAdodoToTo, + yޔA1o_| w.?s܏sNp?yQ=~"2#'"g..e_d//.*c_K >8wQR8c(EK C8+4 i*,Tb/Y z,$% Y z,V% պY z,=j|Ee*4U i,T 0X`d 0iC\&KAO%A&KAO%`t] )KAO%`d 0KAX 7\% u 0g.% u ] KA`4] r.9MKAN% `t 0ixQ%,E"% F(|`'KA>u 0G.E{$.=_+A>{; EP`EP`0X ^T`0X }}20,קr}*0,קA>t`0XOT`0i204 ٸ z l܁`d`0i204 !`d`?2Me ^T'X`LS _80,T'XOLS`2Me4 ine톛`? z =M&AOX z ^T'i204Od`0i20,T`0ȗ 4 i*,T`0X`pϨ\T`0i>ߖFAq}05X Ԉ} Bb}0G_X`2ME_e`/2v`LSї`f`}0i>4Y z=M&AO`d}b`}0i>4Y`LSї`f`}b`}0Xn?X i*,T}0XEE_ed`0i20xQ ϛ804 z =M< `d`0i204`d`0X55 i{Me 4۽&2M/*0,T`0XEe 4 z =M<`d`0i204 z ?SE<`d`0X2ULk20,T`2Me^^`fd`bd`0X`LS 4 i*0,T`0XM4aga&Xp0 <4ŒGr0cNy9MU4aǴ& ӄO' ;>}wcՍg8)M8Onl_>1 c۷6Oml_1 Ǽ3 ǼA ǼA%^ƼA ǼA ǼA ǼA ǼA )M8 *M8 *M8 2M8 2M8i&<4747474~}dpdpl|74741}ƼA ǼA OUlw._)73毋Rc(8毋Rc6BMxm28d&Sc6B?4 2xL1n)1n)1n)ıNc Sc Sǔ}2Scޠr3c`Of qTnRcޠRcޠRc`Of qT q,L!yJ!q$L$YILfx08f 3cVI9s1ggn:MǠl6|cqt j:Gd1ive}m RU*2XJE^T*2XJEV`않 ^`않 ASr+,T*2i24 S zLEȠTdd*2i24 ՑTdd*ZM2M"eJE^T)XTdLS _O9,T)XVSLS`R2M"ej54}`ZM zLE=M"&SAO zLE^T)i24VSd*2i2,T*2ȗNE4j i,T*2XTdp\T*2XVT e_("@+Ehw5,P^lw5,P}}@(/ ep}}@yQ`@2MUe`@2vWeLSU`fa2i@4Y z,P=M(& AOʠedba2i@4YeLSU`faba2XjX i ,lw5,P4U i@yʠE(<ed2i@4Y AOʠed AO`fdbd2X5Y i@,l,P4UeLS` U i@,T2i@4Y  AOʠed2i@ H(& U  AO`d2veLSʋ*P4U i{M(/{M(e^ʋ^` 2M(/@,T2XeLS` 2?>7~<J_v2[^,8lWa B[^,q--}qcӯ6ml>}i<777Ȱ7Ȱ1nac [q [q [q [?0 2l9 2lyL11oPa1oPa˱C)c:o.?Ss?8)d4c)9c)9! gcQ1n1nαnc 2?6 29 2yL!1o39 *d4 y8ǼA)9 *9 *9 y8ǼA8Ǽr?77xo`s sq17A9x19q1^Gsku8#9)9q1oPqcsysysd:878787Xi=-cޠc`esdsdsdslP7878rO8787819ǸA9ǸA9)9 *9 {Z9{Z9ǼrO8787Xi .*?܏s]4E;i~6A9jsmZ?5ns^T3|g6gp? uQmZ?7ns'E9|{6gLSm΋js=M;3 ?S :#7; r`3HwΠ/; r`3XpE;e w4 i*,T3Xpgd3i2sa;&ÝAOΠpA9|6g漨6gO ۜAmΠO-ÝA :s7|ן{KAo%Ѡ7ĒhO\ KA>pI4X`,A`YJ?hE] KAN% htI4i$4] r.9MDKAN%ы*=Ϳeh_[fF/*3זѠ̌ Й :3!gF Y3A>vf4.*30AOѠh. z' i*O,MSy`DiMSy`*2MUZe4UiJkLS`*2MUZe4Ui i,T5XJkd5i WZ/4Yi +AO֠Jkd5i WZ&+2M̂e4UiYLS`*At5XYLS=`*2MUZe43 i_Xi ig,l,T +AO֠Jkd5iܣQAO֋=MVZ&+3 z=MVZe VZe VZe4ۯA43JkLS=Ʉk_!&\ 1W . +Ąkǯp /^&\<~Eexp Y/^&\/*,TV+XVL0,TV+Xnyp i*,l&s7&Dl3nϸAv?>d7g bc{3oNg&, 7g&cywv?g`ϼ=7X~'A3!;x?B!ޱ=1x?5glgBc{3^c~ ]x?.By{wl~ !ϼ=7XwCy~!ϼ=7XwD3n!ϸAx?>7g Bc~!ϸAx? ZeǪsA|%НA1;l0:!],] ~,] +] ~,] ~܇x?.x?j})xp}9ܞzp1I}bA~E;,7U wxxpA~];,wV}p}A~qܾwxI=Fz Fz}`7p} FzŚFzG} Fz}`>>XGz,j#5HE}`A&Fzg}jbA&Fzifo!>WU|R!>E goGD}o's+ >ȯ4`E}0A`|rE}9">{s}Rk0As}jj0As}jj0A>;k_r Oq>vSCL ?! ?h0 ?08S+ ?_o~jbAaE}mvAa&g~jb?K0E}澈?X,j5a0E}8p~?X܇,j5a0E}> `Qs~jbAa a&7P~jbAa&~@iAa݃E}> o=X܇,jQ ?Xܷ,j[w5a0E}u`Q 0E}f{a@ ?h51 0VZM ? =h51 o=h51 0pu0V5 ?Xlo0 `Qsߺ{au?Xܷ,j[wX ~?x? Z*~&hA2/)-~zav/ZUav'Jۍߡ~:m8l90-M8lpخôT<jxR0;UavPK젖A-Z*f vA-Z*FT<rx0:ȥôT<rxR0:ȥas: A.FT<ro<3jxRyôT<jmr0;avPK'ņA-Z*fT<jmR0;avPK젖A- \*FT<r0-\*?C;r0- \*FT<rx0:ȥatKŇim,IT<jm,IT<j0-Znf6$\*fT<$\*fT<$\*ƿR0T<?.aƸT<RaƸT<R0^c\*fyK`9rxRaZ*fT<jx,].Z*fyK젖rR0:ȥatK  \*FT|X9rxR0:ȥôT<rxRarR0;Xλ\*>wT<.A-˥avxA/ ^q&ѓ^& pD?z?D?uq~G 1ܿo7Y1oP#Ɓ :#ƁHj8iy8p)pB?E*?#? z8U4諌j8W}q8y8S{8W}q8/q`QSǁEM55Xq`QSCVǁVǁǁVǁVǁV^2n#%6FKm@dF'FQF;1o#_m@~(m@6rj4_Aoo=@}s`s/$1 I{́({́0{́B`i`4XX.$1A1Bs s s s s s s s s s M?Eͯ;K΁V3ρ3ρ3ρVK3ρVK{VV|kܻӁ_Hkjw: w}pw:M{w:N{w:#zw: w}pw: w1wU@@@~@?t`QSkEM ]ڝ,jjw:v/t`QSӁEMN5;Xt`QSӁEMN5;Xttt ڝܝޝܝܝܝܝޝܝ,jj-1ܝ,jj-1vwZK ,jj-1vڝ,jj-1qw:f{ttttt tttZK ܝܝ,jAܝ,jAܝ,jj-1qw: ;XZb`QSk3؏&qBM %bu_2.V%bu藌Ձ%kg0.V y۠/`p:`\PՁEMM 551Xlg0.V551Xlg0.V551Xlg0.VZM.VZM.VZM.VZM.VZM.VZM.VZM.VZM.V_`\\\`\,j3/lg0.V5ՁEMM 5ՁEMM ZMRqRARZ0RZ0RZ0RVRZ0RZ0RZ0R_Q"FԁEvb(u`Q9JX(u`Q9JX(,jj:Q 5JX(u`QSԁVԁVԁڣԁVԁVԁVԁVԁڣԁVj:,jjT3N,jjBR55JXl'FR_N,j#G/l'FR55JX(,jj:QF,jj:jr:jr:xBRZt5m-}}}66}}}¾nz;ϯx;jzla]$9lrخaK9lwrخai{خaav-LC Aa? 5r{٨atC 00;!avPC'0;aavPÀ젆A af5=j{0;aavPC젆A af5 8r{0:!4=r{0:!4 8r{0:!ac Aaaav'0;aav'0;a4=jp00;Xޓp{0;Xޓp{0;Xޓp{~>LC08}0^ckCg0^c>LCxq{,]a!avPC؇i{0;!avw9=j{,]afyC Aa?4r{a!atC 0:!atC؇i{.j{a!avw9=j{,]a>LCG{/Hv\~vK~b~ܟv ܟ ĺP#ف#s}ہXrA;p;%~yAB-hw+0/h]@~Ʃ /ü8pv*\_ ځpA;k{A;*zA;W}qA;_yA;p{A;W}qA;/Ԃv`QS ځEM-h5Xs`QSśV ځV ځ ځV ځV ځV rA;_yA;oܼ藌 ځ~ɸȏRȷp^оP ځ4 ځ|@6v 8oOH݁'@jw` i;X.^v=/WɋWūrj5\Z^^^^^^^^^^^^P݁VK{Soh9=~aϕm<m 1h51my~f;aU//Z"z /Z"z "z "z /Z"z /_XWgH5~a9ax߄v1^۵j}0-ZDf>r}"1ati}"0:EatφA.\D?w6r}"aZf>j}XaZDf9j!r"0;EaчA-ZDf9j}"0;EavP"A.\DF~чA.F~"A.\DF>l.t"0:Eô9$\Df9$\DfyчA-DZf{.ZDf{.ZDf{.ߏi}~\Dƿ"0^c\Dkxq}0^c\Dki}1.Eavw>j0-ZDf>.чA-EavP`9r}"0:Eat'A.\D?w>r}"aZDF>r0-`9rr"0;Xλ\Df>.чA-Eavvk]j-=p/p~}}r-='}}r-='ZK,jj-B5XZz`QSkEMMo55h5h5OŽh5h5h5~࿿~'x ?vz ?z zg=/6wbsg=zg=yg=o ~vw;=ĝ@ɝ@毆k/iO{UX^mWE|mh=*v, t/ -5H5H5H5H5H5H5H5H5H5H5H5~W劉/{ ?[{/r{ ?a{ ?c{// re// _@_~U X~BC ,?!!^Ub` i1/W}r>E{>/W}r>/W&W͹@U@U@~KU@UM簾V-WC ,jj>UVZPEM5 X*|`QSEM5 X*|`QSVVn٫j>jr>-{>jr>jr>jr>jr>-{>jr>Ef;s>Ef;s>E X"i`QSEM5 X*|`QSE6EM-5 5Hz W&W&W&W&WyGW&W&W&W/"i*|*|`Q *|`Q *|`QSE6EM-^UIZ$=b_2_@d\KU@d\Wq>d U@6*| o=* o5j>@ U@ U@ U@U@U@U@U@U@U@U@U VVj>f;qv*|`Q Xh`Q X~PGքքք}քքքj=п8`~,js/{`QSEM _fYe ~~~~~~~~~P|VEMͲ5۹EM _ ۹Ev~PEM 55X{`QSEM ZMZMy$_@gZM_@[֬-ӷ_=y3Wp=Sgye{>Wgggye{>Wgg/,cNpB71R^}@~E}~ց oɼo߯:pe?>.e@~e )}es?W6y}es?eZƿPEM-5X2~`QScEMZM.ZM.x/ZM.ZM.ZM.e@~e@~Fe@;2~_l.2~ ?/2~ ?2~ ߧzB-#;/ͪ{VVE簾}MM-5Xij*XT <@`QSy@~<@`QSyEM3T,jjR9<t j:H5RMTy@<@ t j:H5\<@Ҕim r}!1, a@~^@ ?1wX җ4e ?İ@`Q?\PՁf`inBUˏOsǧ@_٬fu :+Ձ@_٬fu j:oZMV&bՁ@@`QSsEMUMMm^@`QSsEMU5U,j:pEMU5U,j:XTu @`QSՁVՁ@@ opuBU&Ձ@@du j:h5YZMVZ ,j:EM-5QՁo/Tu o5U,j:Xm`Q bu f{@`QS:h5YZMV&Ձ@@du j:po&VՁf{@`Q bu f{@`QS:Xm`QSӎv&0f/ .ԨmM` &0fh3ܗCmc61}94п3MȎ\Qf;1fXԨ텊55jXԨm`Q3,jj6َv55jXlG; &cV1@ɘAd j2fh53ZM .lG; &cV1 3,jj6َv\؎v5ю1FmhǘAd 7:II@Lf ;II@LfB% {I@b +9IXT $A`QSIf;1IXTB% J55{EMM55h5$L' &VI@$Ad 48Ih5$PI@>m$Ad f;1IXTB% J5I If;1IpEM% J5$,j*IXT $A`QSIVI@$A sB% &|FI@$Ad jAe5|V - 5ˆ25Yfv!Yv{ck=RMsTӹ@C [s!j:H5{=\X{ck=RM.,gM~ 30ls]4:lr]a'ӯav2:Lav5[xrrr젦UAAAAAMR= = = = ۟F{8Latatatas s sG"AA40;0;]4a= f5;*0*0lpTaTaTa40;0;0;0;0;iatatatÔ{F{?j=2pqAAAO= =iav'aa40;Xޓ00j0AMf{AM= sr`yO0{ƿsw`0:ba#:ba0^G= ua= u0^G=0;Xδ= =0;0;0;Xδ= = L0;0;Xδ= = = = ۟F{F{8i{F{F{8LatatÔ{f{f˙ʙavi{f{f˙avPÔ{f{f{8_;D~JAOa\DF$!,js#V@D_lF$b3"@qD_lF$b3" !F${g#EvzcD"D`Q,j*"q"EM 55{"EM 55h5HaG$&#V@ɈDdD"C8"h5P@>DdD"af;1"XTDBE$H5 f;1"q"EME$H5,j*"XTD"D`QS"V@ɈD !sDBE$&#|@ɈDdD"j2"h5pHZMF$ef{[,jye$v-.iBYvI²F`>DK|$%@^.itI#&%@>F tI#jH5]ҸvG].i\X*iܙ4>l6/?lrؾZa9aaV^|ئm6~&mZ SaaiaƇavp/i|Wk?%{I{IZ;l/avp/i|DI J=}at%a{I JFQ0:ƇmϜ~DI J=nat%(i W?%{Iö6%}u^0;4>l{X^0;4>%avp/i|KfƇ}u JFQ0:ưat%c0:ư}u JFQ0:ƇmPDI J= Jf'?$(i|W%}u`yOƇ}u`yOƇav'AIw@Iw@Iw@Ic^0^G(i|#4>J=DaP0^G(i K%uưaviQ0;4%{I^0;Xδ(i|Kf˙%{I`9Ӣat%(i|DIö'?at%aLƇA4>1l/i|DI JƇaviQVδ(i|,gZ4>%LƇ1l/i|KfưatPƃӌn\<=v{e큋}p{ ,Ѓۃn\<=Av~^xp{큋s<ݏv+ۃM}>ǃۭl{79|p!psX^}7p`QstH,js:$5%܇@C`ȃ~U yЯ !FvC`ȃ~U y*혅`ȃ`ȃ`ȃ|Ey#!5`ȃE=`Q y~E}8p2.RMe\ʸ⩌˃TS2.RMe\f2.Oe\ʸAθ ͒1 3l}]4 7; gl}}2c@_\,󏢦26EMel.,glTs26šrp:'8 ,i3:8 ,i3:8@;tNb:'_29_oLZMs&9V@~tNd:Bs9V6J5 N5XT:'tN`QS9*XT:'tN`QS9EMsJ5 ,j*h5 Lys.lW LZMs.Ԅzd:'j2h5 ]#NZMsP,j+L5|C9EMM55~9EMM55XT:'tN`QS&m9EMM5 sP?@tNd:'j2h5 vNZMs&9V 5h5 L5 sm9EMM5 sPP&J\tN}Z0Fw-;vctBZ0Fw-;ѝ}r;Ђ1h 'ѝf;12Xl'FFwZPѝZ,j#;EM5ۉѝNdt'j2h5 ZMFw&;Vѝ@΅Ndt'j2s;EM5ۉѝ ۉѝf;12X|`Q9ѝ@*Uat'Я ;'ѝ@*Uat'*Na0_GwN`QSѝ֦N`QSk*Xt`QSk*Xt`QSkӁVѝ@N qt'j2h5 ZMFw;Vѝ  LZMFwZ,jc;EMEw.Tt'N`Qݹ ,jc;*X,5ݹPѝ;EMEw5 ,j*h5 OGw.Tt'j2? ZMFw&;Vѝ@>Ndt'Y ,ju}Gw.,5ľ;|$ѝfwt':sa{N`Q bt'Y ,j}Gw.,5;Tѝ@N tt'j:H5 ӪRMGw;Tѝ ;|Zѝ@΅m;?a0 :vh??а_w0m/a57lWܰAtJ_nm]b0P%%%MchԱd0;#0;0;0;0;tffFF?=2t@ @ @ @ǽAAOz avPavPa͇)4j,}X0;0;аavf˹avPc@@@@҇AAAShdh<0:aKFFF??242t҇avPc`yO0;f5>j,},If5>$ Kf{F~Sg=OF~SgF~ZagF~O`0^+0^+ a 0;a 0;0;Xέ 0:0:0:ϰatatʹatatFF9LavPav[9[f˹avPav[f9LavPavPff@-*?z??߲?s@3>S3>C*?~????Mr@S3>EMŁ<#}3oGye*Z߼X_SE-8L&Ci:ܡ J ;q.T(-j2/%J WBZM&CiEM 5J ,j*XT(-Pڅ 5J ,j*XT(-V`QSa@PZd(-[:h5J;-@dl=[h-@dl%c -_ȯ/B wn]Z p -޹h5B lZMjs< 5 ,j45 ,j* XT -Z ?Zv -0H`QSaZhEM![hEMA 5BnRM[hT-@Z t -jH5B nRM[hT- B _W~_gˊڅgˊZ+&W%+jYQ eϊZ tE-_x˞ErX:(jGQYKErXrE-YK5,jÒ+jEM.TE-|@`QS UQ ,j*XT> jh5YQ ҮZMV&+jV@ɊZ vE-jv*jV@ɊZ`QS*jEMU+jE͟*jEMU.TE-Z`QS UQ ,jXTE-Z`QS*jEMU5UQ ZMVy+jAZMV&+jh5YQ ZMVy+jVE6E6XT#&F`Q bE-XT#XT#XT@V&+jV@ɊZdE-7红h5YQ ZMV.T#jh5YQ ,jA5 Vjb5 Vjb\Z`QSM*jh5YQ `Z0Vy+j@ ƊZcE-Ђ]Q `]Nw+jEvbdE-N5U P!EMUCȊZ`QSՐf;1XT5$NZMV&+jV@ɊZdE-jh5YQ ]NZMV&+jXT5$N]N5ۉ!EvbdE-?>VcE-7컢+j+ UQ ͊ZlV+ja@_٬5,j+jEM.TE-|@`QS UQ ,j*XT> jh5YQ idEB&+jV@ɊZ srE-jv*j|@ɊZ`QSf;X,YWLZ`Q \Q ,j>+jEMU%RZ`QS K*jEMU.TE-Z`QS@ɊZdE-v*jV@>ZdE-jh5YQ ICW&+jEpE-Yr]X5%څ%Z`Q7\Q ,jA5 V%Z`QT8\Qt8\Q ,j+jH5]Q :ŊZ#X!XWbE-),VbE’pE-$+jA?z3l/].g.ްݻaoauvScخܰ]ao1lm{pڰ]aiMͰWcmdmdmdmod0AنAAqdmdmdmױ6x80h8`8X86pNll l lASmdmdmdm40:20:2۰AateateTfUffUf?b~l  `96p1 l`96p16666p166vl l]ateTFYfFYfFYf , ,0;Xޓ6p1$, *\20;0;0;Xޓ6p1$, *\ = l6, 'l6 lx6 lGևZamSmنZa0نrnemT0نAنAنrnemTm,VنAنrnemdmdmdm0:20:2a20:20:2a* , ,20;20;Xέ,Vέ, 20;20;Xέ, *20;20;2a* * * * _A.T .pl:p>+p5p#p>BZcw .pBG'.T .p?p>+p:jh5Y ,ZM&KpFV% )jAygSVi 0 L5 0 ,j*MXTBJ5?x5 ]_9@j^XI r/8?V[ M.T/`9XT/|% '&ߞwL.T/!諌e@_e,*c/Tw~X .+E*5*]_`QSe~EM*5UPe~EM*5 ,j*h5Y ,{o&~藌e@~=_ r/藌e@d, X~e U w`._&~Ve@ɲ_`QSe@~_`QSb5E͟b5EMj*5U ,5 ,j*VXT/_ p/XM`QS~e@_ t/jH5] .RM~Te@_ t/jw~Ve@~5_,g˲_,+J%~Y eϲ_ .w/_˞eErX:(jGQYKErXr/YK.5,jÒ~EM%-.T/E`QSI U ,j*iXT"jh5Y ].ZM&~Ve@ɲ_ 2p/jw~e@ɲ_`QSI~EMy~EM%-?MM.T/_`QSe U ,jXT/_`QSe~EM*5U ,ZMy~A,ZM&~h5Y ,ZMyo~Ve:-E6e:-E6e:-XT%NK`Q b/XT%XT%XT@&~Ve@ɲ_d/wh5Y ,ZM.T%jh5Y ,jA,5 5 \_`QS~h5qٿ@_c/S;h }>fmٿ@߅8_12XlIf8_`QS ,j*sXT&َ5 ,j$EMen8_d/j2h5 ZMf&Vٿ@߅8_d/j2wEMen8߅8_`Q' ,j*sh5Y Oe@X .'IJ_` )i苗e U ˲_/^$~ae@_,5,j#~EM%-.T/E`QSI U ,j*iXT"jh5Y $.ZM&~Ve@ɲ_ sr/jw~|e@ɲ_`QSIf;X, z_`Q/\ ,j~EM%_`QSe Ke~EM.T/_`QSe@ɲ_d/w~Ve@>_d/jh5Y ,IC&~Ejq/Yr-.]X-.5Ke@>i_`Q[\ 䓆.]XloX ,j~ERqqqq/YR..:jŲ_ t/q5Wc/Ё5~~waɻȇ`] tne vFۅ:wNv.M|0v`>Lua߭v=LoAaK'K'K'K_'Kt:~$8~$x'K'K'K%avPavPՎavPGavPavPavP%avPՎtfUfUFYFY , 0:Ȓ0:Ȓ0:Ȓ}=`dIpdIp>0$8$x,`9+$8rJ젊,젊,`9+$82"K`9+$82"K젊,$8$82$8$8$xJ Kat%TdFYFYFY' , , "0;Xޓ$82$, ?K8L%avPEavPEav'aIpTe,IXfUdf{cp<yyy폺,,"0:"0:"a*,"a***"0;"0;XΟ,*"0:"0:"0:"9atEatEEatEatETFYFY~z9q;˟G [ZM-&㖁Vq@>edB-<㖁Vqf8nX,}-gD㖁ERiq2&[5 ,j*nX,-[^X-[5 ,j*ny▁EM-[ZM-&㖁|q  [)N-&㖁Vq@ɸed2 :nh5 ,j|㖁ERrr2Y2B[yA-;Ȏ;-:%W?O mlUzE$Nfi 9nyA-_lA[4m2Rr2XYB[XB[4K]qˠl[]ac2 ANqˠ{[yqˠ[]d2. /Aeu2: /F%_j5Ϯv kc]g?v ٵϮ1Ϯv}~v kcwkݟ]߻]gײϼ;f—q_~}"|gg'—cg'—q_q_~}"|gE3oT#3yeg}7x_~ ϼ;23v_~ ϼ;|7g —]Sg —cw3nϸA/?>DD3n˱;<7XCcw3o} ϼrg,׊_~ kE/?g`VD3oϼ;|7x>D3n˱;|7gߔ7D3nϸA/?~ϸA/?Cyϼ;<7XI"ϼ;<7x>= —ywx3oܓ |7x>= X~"`g.]?vErX~"`E3nϮScw3oX~ ϼrg,בX~ ϼrg qX~ "`u3nϸA,u$qX~ "`9v,?DrX~ ϼrXD3o\G"`7x,?u$ywrX~ ϼ;`9v,?gywg| ϸA,?Dg q}ğ]? E+\N/^'ml;xg[N/r f.^ԃw0s:jπXf.`u@}D0sD0sD0sD0sD0s:i"&&&!EO2;x[,Ӽew03x34`b\4`b\N,iiii"ҹ"iE0H|tH"Qs:ȗ\#t.^";ҹȗ\kLt.%E:4Hb\,Ӽ#ews(9H碧H"+ҹi"ҹi"ҹi"ҹi"ҹ't.zt.zt.zt.ztHb\,Ӽ#|ЯHb\,Ӽ#e?mwsLt.iޑ\,Ӽ#ew3xG:4Hb\,Ӽ#ewrLA.zt.zt.mE:\Ez"`ùG7p.p.% u8([E>Q3xw8<[E>QsDsDsDsLp.:ewZjLNK-i2ͻRewsLp.:ewZjLNK-i2ͻùȇ=p.ii2;-Xyw8_TsTsTsTsTsTsTsTsTsTsTsT3xw8=姿t."#ҹ<"#ҹw\t.;E' ҹw\[ t.;2ͻNXYL\I=XyG:4Hg\I=(Ӽ#ewsL,i+)E:4:Mt.iu2ͻN#ewfL,zt.zt.5E:=MD:=MD:=MD:=MD:"&";ҹX\4\,Ӽ4ewsLt.5E:4:b\,iӼ4ewsLt.iޑ2;ҹXyG:4Hb\,Ӽ#ewsLt.zt.zt. +E:6EOEONZ4\4\4\Vt.zt.i2vHbNZ,lAt.i\,ӼIew8iL!ҹXn\,ӼIe62;Xn\,ӼIt.zt.zt.zt.zt.TE:=MD:=MD:=MD:w8iDsDsL!ҹXn\,ӼIe62iE:4Hb ޑAt8>t8>t8>t8¾:^:^:^:^:|a_E`DsL]NùXy4$:ew*xw84bݺZ,lp.iޭ2v9bݺZ,lp.zp.zp.zp.zp.zp.zp.zp.zp$:&:&:ùXy4$:v9bfDsLt\E<(p.S?ep?ep.S"ùȗ\ E\,l&p.i\,Ӽ4ew&xw84bY4\4\u8=Mt8=Mt8=Mt8=Mt8yHEO\#u8=Mt84K+F2͒QsG\,,u84K6F2RQsLp.i|:ew3X2p.i2ͻ;ewsLp.zp.zp.:ùiùȃp.zp.zp.zp.zp.:&:efbf ,I#u84KHETsL\yAu862vbfùXY:GpKH2;RsI8t8CsiBt8'DsyBt8yUE' \tEg :<ʪcp[2οk?g쾼xLc)?$cyLwccd|ccp<xLMS1v_y?kTidsc.۞c.۞cW˶cvv|Lc.۞c.۞c.۞c.gs7Θ7K1oPcޠ.{ǼA=ǼAut777ȶ7ȶql{>7ȶ7ȶ7ȶwql{ql{7ȶ7ȶcj2y:m1oPMddu$۞cޠLc`ds,בl{yj2y:m1oPMd|Lm1nmϱ/X|LM1nm1nm1nmϱU|LM1oܓ9 4 {=ǼA5SsTiTi,$l{yj2y垄m1oPM1oܓ9-9-9v-|L1n1n?u[cޠZc`cksTks,ulmqlmqlmqlmsǸA6ǸA6+ulmqlmqlm>77cjmyjmy庎u[c`cksTks,ulmyjm>77cjmyjmyjmyjmyjmgǼA61n1n1nͱ77q{^q_۞/m}8x g^T3x 'g>gm}=x,{Qm ?xn{CAOmϠɶgd3i4 ˂&۞/4 zl{=M=ɶgLSm},XgLSm i,T3x,{MMSm`ڞ2M=ej{;2iL=Ѡegžho'{~{A}Q= _WqO4x )D_TO4ȗ )D|q=`z2MDe4 DdO4i';=MD&{AO=Ѡɞho'4 z=MD&{/',TO4Xh/',TO4XhLMMS=`z2MDe4}Q=`z2MDe4 i*k4 z{'= _pO4ȇx=ѠgO4? q{A>rOED|fhO z=MD&{2MD|nhLS`*X2MDeꉾ VLS=`zA>rO4X VLS`z2MD|hLS`*X2MD= htO4i'4 r9MD{AN= htO4i'zAO=ѠgOEt'= u'h ޅ{Ayh 4 i+)De 4˕{/+w',TO4XNL\I',THEDe 4yQ=`B:2Mt&{AO= _qO4i'4 z=MD|u=Ѡɞ{AO=`B:2MDe{2Mte44 i',TO4XhLS=`z2MDe4 i',TO4i'4 .D_lA=MD&{/*4 z=MD|=ѠɞhLS`f bO4XTL',TEDe @4 i De6=`P2vĞhLS`f bO4Xԃ=MD&{AO=Ѡɞhº'4 z=MD_T*i'4 i De6=`P2vĞhLS4m{2MD_TOAD^{A=ѠǞh z}>D^{A=  '}]N',l4 iIDejvhLSͮ`]2v9ɞhLSͮ`fdO4XfWL]N'4 z=MD&{AO=ѠɞhdO4i'bdO4i'4}Q=`]2v9ɞr=`fdO4i2[˿SϠ? q`E?80ǁ 9[q`3ßA i?_T3XMLSś ix,T&i24 ßAOϠgd3i2 ?&ß/* ?&ß2q3XY:3y6`f8,,?e%9gLS`f8,TҞq3XY34 i*,T3i24 Dß/*4 DßAOϠgd3i2>?&ß2͒:r3XYjGXzG4K 9,,#?<6`f b3XY H4KK`f)!9tKϠkr 4d3!ßAw  ̩ßA  ng}C?_,u$?2929vq >1n1nt!<292929r?ǼA]yyu; wTsTd1]y y q q M7c q q q 977q q >@Ԙ7X#zL1oP1oP1o\G29 *5 H?Ǽr7@Ԙ7X#zL1oP1oP1n1n1n&1n1n1n= ßcޠQc`'asT 1?ǼA?c`'asT j,$ y Dy垄A1do;O?}2n>> ~c07vrgA1~V{LA1~VgA5~kƼ}͠ߘ7ߘ7X{LA1oPA1oPAay ~c ~c ~c ~c1nA1nA@7Ƞ7Ƞc qSd|t$EE9 7 _q`)OH`>ؑ}5xByQ ?̎AOH`d$0i24 އP&#/*4 z=MF!H`LS}%XH`LS i*,T$0xByQ`O"M`"2ME2MEem/U ޯX ɂ*,`Ͻ] ߧ A`o ;.` 2Me*4U Y  4Y a AOɂ`dA0i cI& AOɂ`dAEe*4U ) 2Me*4U ii ,TAEe*4U|Q` 2Me*4 i*d4Y z,(Y ς`ύ] ɱ A>sA0< AY A>CvA0'c.`/3 1& AOɂ`LS  ,T&XMLS` /{,TA0X`\ i{,T&X`LS ,T&XMLS?tA0i 4] r.9M AN `tA0i 4] r.`L\f9/oy+ Fп0/w|`_ `o8/{ 4 i,,Y 4i,_,Y?4 i*/,TW#XY. i2Mu5ej`LS]`AOyɼ`/8/4 z =M&At`d^E|yɼ`LS]`2M|y`2Me 44 i*/,T^0X`LSy`2Me 4 i*/4 z >/ &AOyՃ z =M&A`d^0XLL1/,T&Xn i2͟6M|Uym2vļ`LS=`f b^0XLL1/,TA&AOyɼ`d^0ȷh z =M&/4 z 4m2vļ`LS=`f b^0X̋ 44 oOd{0i=4 zly`d{0i=4 ۃAO۵&ۃ2v`LS٠`fd{0XlЋj4 i*,lךl4 ikMe4۵&ۃAO`d{0i=4 zl=M&ۃ/kM&ۃAO i*,lךlخ5 ikMd$02n zH`лe$02G|ݑwH`лe$02 G/i24 3&y/*4 z=M񌠧|^LS}<#X|^LS i*,T>/xxQ`OOy2Mewe4 4[ɶ^~<ȧnf[E|^пl^T[/nf[/ m|'m H,T[/X^LSm`zA>v[A&zA>t[/i4 zl=M|L^d[/i4 zl뽨^LSm`zA>5t[/X^LSm`z2͟6͟6Mej4 iz2Mej4 i*,T-i4 Ѷzm 㺭\|V^?x]|^O{Qm _.p[/Ggn=M&zAOm`zA>Ds[/XKLSї`z2M^T%X^LSm ,T%XKLSm`zA>]s[/XKLSї`z^t[/i4 rn9MzANm ^t[/i4{Qm`fr[/ zAV{QQ+l[a[/ȧn\m0 m q[/wD i,TT"XY. i**,,YnX.i,T[/XDL\f,TTEeJ4xQm`2ME%&zAOm q[/i4 zl=M|#mɶދjFzAOm`2MejFz2ME%ej4 iiiz2Mej4 i,T[/X^LSm`zAOmɶ^/b b[/i4{Qɶ^d[/ibn=Me4mz2MNe6m`z'/,iT[/{,lAl4; i e4mz2MNd[/i4 zl=M|mɶ^d[/iz'AOmɶ^L,lAl4; i ej뽨^LMMSmO&zAOm v[vɶ^d[/izAOmɶ^d[/ȃ n=M^lךl4۵&z2MveZm`j;/,Tm'XNL]k,Tm'Xٮ5 i,lךl=M&zAOmɶ^d[/i4 zlخ5 zl=M^T[/XNL]kbd[/Xٮ5{mw˶^лe[/ݲzAmw˶^ Ev-zAmw˶^;4xQm`2ME%^T[/XDLSmɶ^d[/3n=M&zAOmɶ^ zl뽨^ zl4K m`fiDe%^L" i\z2Me^LSmK7m`fIG,T[/X^LSmɶ^d[/Cn뽨^d[/Cn=M&zAOmɶ^g zl4Km`f)bi,,<^L4K z/ e6m`f,,^, Ѕ .X\ B^-/^,%Y>Y{LWc'+xc'+xc'+xcgo7Ę7k1oPWcޠ#ǼAUƼAU%7 ޘ7 7 7 }~d1UƸAVƸAVƸAVc +xc +xcY1n1nT˵"+xcޠ*x7 7 7 kEVƼ˵"+xc`VdoTo,׊y= ޘ7 ޘ7 7 7 7 cqA7 c*-ϘSw.x|Q5}'ȏkvc>jvA~f?f4Y z=M삞&kv@DdE삞&kvAO5ɚ]>4Y ifD4U ifjv2Me/f,T.X΃e4U o4U if:if4 /y٩݋J٩݋J٩]/9uk9NSwA>vv.X]LS`Rw2M|݃L=M|]d.i2u4 zL,Щ]d.i2u4{Q`Rw2M|4]LS`Rw2MeJ44{Q`Rw2M^T.X]LS`Rw2Mׂe*=M&SwA>v.-kvA>u.'5e.'l5 fjvA$]\ z=M삞&kv2M|]LS`2+2MeٽJLS5`jvA>q.XJLS`jv2M|*]LS`2+2M5 ]t.if4] r9M삜kvAN5 ]t.ifjv2r]пf2AVX š]O] ykvAafkvAY]f,T.XCL\ff,T!XY.\{\f=(T.X]LS`fr.XË4qi*jv2Mee8=M삞&kvA]d.if4Y zFkvAO5U 삞&kv2Mee4U e84U if,T.XӦ]LS5`jv2Me4U if,T.X]d.ifbٽnX z=M^Ta$if4Y zkvAO5` #2vĚ]LS`f b.Xȋ4U i0,lA4mkv2MFe65` #2vĚ]LSY z=M삞&kvAO5 _u.if4Y zٽHd.if,lA4mkv2MFe65`jv/f,T.X㷿] z=M삞&kvAO5 &f4Y z=M삞&kvALp.ifbd.Xٮ5Y io,lך4շyQ5`62MmeZ5`62vɚ]LS}`fd.if4Y z=M삞&kvAO5ɚ]dvɚ]d.ifjv2MmeZ5۵&kv2v\cv.1;sAٹ\Gq rgsAٹ\ 򼃳s2Me-\LSٹ`sAOٹ\ z=Mf炞&sAOٹ 8;4{Qٹ 8;4 iꂳs2^pv.ȣ84K}ٹ`f 08;,, ge4Kٹ`s/s2cpv.X\LSٹ`sAOٹ\{Qٹ\ z=Mf炞&sAOٹ 9;4 i.s2͒qvqv.XY yٹ`f8;!;g^lA4ms2͒ qv.XY!νX!ýX.݇ c>\?x^, <>\=+.Ztӊ}V#d/1v1>?6vjcocG?Xcj>?Uc15?U6vcj~y-uS2v-ɘ77>1}1}kd1]qÍq=K1}1}1}1~ϳ7 oTaEߘ7K1oP|cޠpcޠpZoTnTndndn>> pc pc pc pc1n}1n}t7>7>cÍyZ}1oP}ԇՇՇ˵"pcޠpc`Vdn,׊ÍyÍyZ}1oP}ԇՇՇههه{L}1n}7>cj-qÍqÍqÍǸAƸASki,$Íyj-y垄}1oPԇZZ= pcޠZKc`'anTkidmwvc6;;o6;;oc:oc6;;ocQw;oc1~6 6 vƼd{L1oP1oPyyyyyOy;oc ;oc ;oc ;ocy1n1n77c꼍q켍q켍y꼍qnp.x ޯ3/YAF܀ އ_T.x }h0xyQ ܀ އـ zl=M6&pAO }:!ipAO \d.xNzl4Հ ާej4Հ{Q `p2M5Հ i,+2M5ejWej4Հ ޯ4Հ{Q @\~;g=^T .=^T .ȧ=|3́ q .7 q2Me 4 i*hd .i2#@₞&qAO@\d .ȇ=M₞&qAO@܋ 4 i*Ae 4 i*,T .XӦӦ@\LS`q/*,T .X@\LS`"f2MÊ&qAO `;ϖ :#Y|8@\?[2Y|8@\{Q p .gu=M₞&qAO`qA>s .XrILS`q2M^T$X@\LS 8,T$XrILS`qA>s .XrILS`q@\t .i:4 r9MₜqANӁ @\t .i:4{Q`%/k0MWz\п2|G p=.o6a=.w\ e4Mik0e&45q/ke4U i*,,`4MxQ` 2Me^T=.XlBLSلz\d=.wy\ z=Mゞ&qAO q=.iqAz\d=.XlBL],T6Ee&4U i,T=.XӦz\LS`q2Me4U i,T=.Xz\d=.ikfǽnX z=M^T#i4Y zqAO`2vz\LSE`f b=.XNj4Ui,lA4mq2M=e6`2vz\LSEY z=Mゞ&qAO _u=.i4Y zǽGd=.i,lA4mq2M=e6`q/,T=.X] z=Mゞ&qAO 44Y z=Mゞ&qAip=.ibd=.Xٮ5Y i*S,lך4|`252MejeZ`252vz\LS`fd=.i4Y z=Mゞ&qAOz\d=vz\d=.iq2MejeZ۵&qAO7@\b EqAo 8K!qAo ;8 4UFiq2Me =M₞&qAwp .i24 z =M<@\d E<@\d .XY 4Ky 8,,e@\LD i*,,%e ĽXj 4K`q2Me =M₞&qA`t E₞&qA`t .i24 z =M  ϮϮ?g뿫c?]6T}v}gGyyg}I:>g :oq} 7}97g :ocg :oq} y>g}7x_} myw3oݝϼ7xw>yDg :ocw3nϸAt>y:i7g :o]?yDm} At>y;oyw3o\7xw> :oyzϼ7Xy;ocw3o} ޝϸAt>yDm} u83n7g :oq}vD3n7XIyyϼ4vw>gZ,$} ޭϼrOgZz MϮw&g6v7>M;Bmn}lwT'd+h}vh} 4>ly?߲=&gdlyw3on} Mϼu3on&g lqh} u 3nMϸA4>} 7&dD3nMϼ7?cQSWnC.xwt\]E ]E."RnojK-i]2ͻKXyw4."K7.ݢ."YKiKiKiKiKȇ-z-z-z-z.bݥ[,Ӽt|.bݥ[,ӼtewnL-i]26ͻKXyw4.bݥ[,ӼtewnLn-iEO]EO]E>pWn?[tY]E>BVn(ե[]Elѥ[du0Y]E>T.xwVt|^.ݢ.ݢ.ݢ.bݥ[Ku4&bDY,Ӽtew.x7Q4.bݥ[C$u4&bDY,Ӽtewn4ե[,Ӽ(eweLҽ.".".".".".".".".".".".]-iM`Snѿ2t+C.x+Cnѿ2tt|SB]EKott|kG]2ͻKXy45tewaL\K,`i]2ͻKXy452r KXy4 C-iA2;ȰiKiKȷԥ[4ѥ[4ѥ[4ѥ[4ѥ[[@-z."RnDnL2,ik0t4 C-iA2ͻKXyw4.bݥ ]2ͻKXyw4.bݥ[,ӼtewnL-i]EO]EO]E.]KiKiK[!&t&t&t|AM]EO]2ͻXnХ[,Ӽ[!e6]2ͻtew+dLn,i t4mtew+dLKXyB4mtew+d]EO]EO]EO]EO]E֫.ݢ.ݢ.ݢ.]n,z-z-i t4mtew+dLKXywwnL-i][n[n[n[n[-z-z-z-z-򤄢u-uv!hbfEnLN,i QD4|GѺ2;Xys4ۅ(uew:gL]"ZXys4ۅ(u&u&u&u&u&u&u&u&uv!hݢhݢh]-i2v!h]]"Zi"=% =s^s^s24{QIɤ]d.i2iL=M&eJ#2M%eJڽ]LSI`vċJ4 i*i,mmT.x,T.X]~/=X݋J4 iLI ;i{QIT.1ivA>w.1isNvA;]O i*i,T.X]LSI ;i vAOI :i4 zL=M&킞&vA>Nu.i2i4 zL=M&^T.X]LSI :i,T.X]LSI`v2M%emmJ4{QI`v2M%eJ4] i*4 zL=0݋ `a M;LS 1LKa R;LS^T.ȷ% tAOa0]d.X0]OR i*,T%X0]LSaE i*L,T.ȇZ4E i*,T.X0]X i*,T%X0:L4 r9M邜tANa 0]t.i:L4 r9M^T.X(ʋae +cE1La p. ۄaM |?a`t2Mea` 2r 0݋a`fs.X"CL\=(,`?t2Me*20]LSE` AOa0]8L4 z =M邞&tA0]dE|a0]LSE`fc.X"Ë 4Udi*L,T.X0]LSa ii*L,TEe 4 i*L,T.X0]d.i2Lkfӽn z =M^T$i2L4 z tAOa`z 2v0]LS=`f b.Xȋ 4 i,lA 4mt2M@e6a`z 2v0]LS= z =M邞&tAOa _u.i2L4 z ӽHd.i2L,lA 4mt2M@e6a`t/*L,T.i=sAO\d{.i={n=M炞&sAO\Gܞ ibd{.Xٮ5ٞ i,lךl4yM]k=,T;'Xٮ5ٞ i,lךl4 ikM炞&sAO\d{.i=4ٞ zl=M^lךl=M炞&s/=,T;'Xٮ5ٞ{]k= rA{Qyf^.ȓ/rA ͼ\" {y {y`r/*/,T^.X\d^.i2/傞&rAOyɼ\d^.o; z˽\'_ z4K y`fA8/e \Ld! i2r2Mem4{$" iJr2Me4 z=M= 27 27v{L!1o\17 *BpcޠBpcޠBpc`cnTn,| y嚏!1oP!1o\17 *BpcޠBpcޠBpc Cpc Cpc Cp)7 27v{L!1n!1n!1n!77c Cy垄!1oPa1oܓ07 * BpcޠPcޠPc`'anTj,$ y Cnc16Ɵnc1nc16Y2X26vPA1~nt'sˠۘ7c y y =ۘ7ۘ7ۘ7ۘ7}piTtttt+q q 'ƸAƸASmdmdmTmTm_͵ ls-xzQ}H!ȟkohWbނ; cV}`Eg z&L=Yg܋JL ' zL=M&SpAO)}!i2RpAO)\d .xozL4 eJ4{Q)`Rp2M i*,T .X\LS)yLS)`Rp`Rp/*,T .X\d'.ȇ>v^T'.'>v^T'.>v|Ν _`r'.WK܉ ;q2Mue4Չ i#~wd'.i3Vw₞&;qAON\d'.ȧ=Mv₞&;qAON܋4Չ iWwe4Չ i,T'.XN\LS`o:q2Mue4Չ i,T-X Zd'.i ;q/skw|rN\a݉ 'N\Plw|N\Od݉{Q _p'.Dz=Mv₞&;qAO`:qA>u'.XFILS`:q2Mu^T$XN\LS ,T$XFILS`:qA>u'.XFILS`:qN\t'.i4݉ r9Mwₜ;qANӝ N\t'.i4݉{Q`%/k0wHWN\п2v| lp'.ov6a'.B܉ we4Hik0we $45;q/k0we̝%,[,A=`Yz/*,KP XAK`-%0{5β%,[_R̲%,[d-je z̲,[84e iG,l<̲4#xQY`z2Meقeʲ4e i*l266Meقeʲ4e i*,T-X,[LSY,[d-We{u04e z̲TFd-i24e /gق&l2M2eY`R2v,[LSe i*,T*#X,[LSY`l2M2e6Y`R2v,[LSe z̲=Mfق&lAOY _u-i24e z̲TFd-i2,lA̲4ml2M2e6Y`l/*,T-i2lAOY,[d-i2;β=Mfق&lAOY,[ e i9bd-Xٮ5e i9,lך̲4՜yQY`32M5geZY`32v,[LS͙`fd-i24e z̲=Mfق&lAOY,[dv,[d-i2l2M5geZY۵&l2jAW WƸZEW gՂ1q`1 j/* jA0j2MՂe=MՂ&jAZd\-i24W zy\qɸڋy\qɸZL4W ijAq\-XYZ 4KNq`f)*8,T\-XY 4W{W i*,T\EՂe4W z=MՂa\m.ccocC_#c%2vm yLck\_cjP=9yzL1oPwcޠOc3FƸF1t4ƽ06ƽ0ƸFƸFƸFHIw#icޠyƼA]yII{L1oP1oP1n1n$7HcqqqǸAFƸAFc #ic #i)6 k7FƼAES$mT$mT$m,nyyڍ1o\16 *6 k7FƼAES$mT$mT$md$md$md$1EƸAF#c #i)6 26 26 26vII{L1oܓ06 *4 {FƼAŖS$mTliTli,$y8; {FƸAsam?sa1sam><&Ch&Chc1~6Bg!8? y =ژ7ژ7c y y y y GƼA!1n!1n!1n!77}6hdmd1ƸAƸAƼAƼAƼH.P?oԂOaZ>O} +O jA~j}\*xE_Qy7jAoy7ļZ>W{Qy7ļZ>8<ȼZd^-i24W z̫AOyW z̫=MՂ&jCd^-XZ>,T^-Xڋʫ4W i*8ZLSy`j2MՂeʫȃeʫ4W o4W{Qy`j2MՂw~Z4^{Q _p{-ikA%Zd{-7^ &ׂ|c p{-XZLS`k2Mׂ?i4^ ѭkAOZd{-iC\ׂ&kAOZd{Eׂej4^ k2Mׂej4^ i,T{-XZLMMS^ i,T{-XZLSe`bAOڃlZ^ kA>u{-kAZ^ ѸkA>v{Eׂ|G 4^ zl=MׂejL`2Mu?ej4^{Qݏ`k2Mׂ|˂^eA`Y/;X#e/I˂er`:.2v9`YLS, i,T%X.', i,l 4q iI˂&eAO`Yd,i2X4, z =M^l =M˂&e/*X,T%X.',{]NIdAV$ &Y($ `7ɂIoM OIndAaId/I,T,X&Yd,iIn=M6ɂ&dAOM&Y($ zl&Y($ zl4KzM`fI1 7ɂe%A&YLT$ id2M5ɂeF&YLSMKM`d2M5^T,X&YLSM&Yd,&dAOM I4$ zl=M6ɂ&dAr,iI,,17ɂe&ً&YLD9$ hd2Rp,ȣUnn$ i 6ɂe&Ye!łn ,b9,A0ł.!,t ekH,CbY,xӳ,6v#1v1!/`*#5v< k`OzLu#5vyzLu1oPcޠOcU3Ƹ1et13ƽ,6ƽ,cƸƸƸ˲Xwbcޠ\ƼA]yjUU{Le1oPe1oPe1ne1ne7Ȳc*q,q,q,ݧkǸAƸAc bc b,6 k7ƼASYlTYlTYl,n,y*yڍe1o\,6 ,6 k7ƼASYlTYlTYldYldYldY1ƸAc b,6 ,6 ,6 ,6vYY{L1oܓ,6 ^4 {ƼAՋSYlThTh,$,y8: {ƸAc!l>S!l>öcX6vg1~X؎3B?c,=Bؘ7Bؘ786 6 6 6 6vU3ƸAƸAƸAC7c ac aca1n1nTYYUUU'_҃ MnN)ł]-LX~?_bTnX>0 bAomjw?jwbAom}|$ -bAom`ِb- zl=Mł&bBd[Eł&bAOmɶX>4 i-*4 i-b2Młej /-,T[,XXLSm`bm`b2Mł2M^T[,XXLSmɶXdx,W z X8<n{Q _"px,2< xcAX8<,Tx,XXLS`cAX؃ =Mǂ|Xdx,i2<4 z ᱠXdx,i2<4{Q`c2Mǂ|XLS`c2Mǂe 4 i*<,Tx,XۦXLS`c2Mǂej4 z =MdxEǂ|ZX cA ǂ|nXO ٲc/*<+ᱠXdx,i2<,Tx,G4i*z,Tx,X؋^4 i*<S?ǂe 4 i*<,Tx,g4 i,Tx r9MǂcAN Xtx,i:<4 r9Mǂc/*<,,Yn[a[, b/Iom+lNbAX_lXﰸ-;n4 iI,,Yn4$i,wϩEuϩ>X G _)q+ ;_A #W%+%|=Mv^T+WJ z|44 i+%|44 i,T+X׋|4 i,T+XWLS`:_2Mue|=Mv&;_A׋횚WdE&;_AOW4 i*,l|4fikjve 3WLuɵcI-oLr-WkeC*"H R!X0CLS`:_2Mue 34۟A|4fi? i* ;_AOWd+iQw&;_AO׋ 3=Mv&;_2g;_2g;_2Me ve|WLSzQWc+ ;_AWc+yw׋v`fd+X6JLN,TEuej4|n' i,l|4F iIv&;_AOWd+i4 z|=Mv^l|=Mv&;_/,T%Xn'zN2)Ly`S^Au+/Ϯ]Wg|cg \qHr}v8@k)@3nI~3nIϸ $>&????@>ϼ3oNr} I;7x'>$g \qHr}v=7$؝D3nIϸA$>;D3nIϮGN?DkNr} 3$>$؝\yw3oܟ!7x'>\y Iϼ;7x'>$g\yw3nIϸA$>\qHr}v=7$؝D3nIϸA$>D3nI;7X&A3o} IsAcw3o} ޹ϼ7 \yw.3oM$g Zi;=z3^+HkkiϮG[?㵂gVz3^+Hk}ki;>w>2S6X>gZ]O| iאDZ3niϸA>sDZ3niϮ_>DZkNk} "7gZywZ3nP٭!E~ ]E~ZQ٭ZN/b~]ETQvkz~i?XZNn/^Gwvk:2 [בEoߝZ٭iEoߝ ٭Eo٭Eo٭AdI٭EO٭EO٭EO٭롂EO٭Z4Z4Z4Z*X4Z,Ӽ[CewvkLnbZ,Ӽ[C;Xyg4bZ,Ӽ[2͟6;Xyg4Vn-i٭2;i"i>/?MEPkѣF+x7y@MEMZ)5=j4yNMEqRk'ZZy54&bZ,Ӽ\ewk' DkDkɵiɵiɵiɵiɵyZ4Z4Z4Z4 M2ͻɵXy7\ewkLnr-iM2ͻɵXy74&bZ,_OZ,Ӽ\ewq*x74b]Z4Z4D+x7q\\ME~d&עBhr-膚\\ME~t&"?V+x7yCME~2&ע&ע&ע&bZgjr-iޱ2;Xy74&Wnr-iM2ͻɵȏ Z,Ӽ\ewkLnr-iME~x&bX,ӼewE59M59M59M59M59M59M59M59M59M59M59M5wkLfɵ \ɵ \<0&"Lɵhr-\<&"LɵXy74~bfRkL,i۬xCeo{R~ޯ_􅄲֢/$yjDeE_H(k-BBYkZȮxV1"XyH4#YewdL1"i"i"i"i"i"i"i"i"lwg-zg-zgxb]#Y,lwg#X<>֢co$Ek' /_+c-$X<>֢/{}٣XewkLc-zc-zc-$X&X&X&X&X|H@}EO}ZCc-zc-i뫏XYޯ>"PkLd_}2}4˫Z,ӼXe}c-i}OfyXewkLc-i}2ͻiiG }EO}E>Z>֢>֢>֢>֢>"RkDkL(Z,,5 G>bfIRZ,,] XgXe 4KB}EsZt}`iǜZt}E~Zt}EZ%o`kwUF_t}_DcnK} }r 1fffSabƼAƼAӽ7cq[v\{c1~oٹ1dj?_عv󅝫1oPcޠ"ƼAuƼAuSjTjTjdjdj~sd1uƸAvƸAvƸAvg9ǸAvƸAv8ǸAvƸAvSj,Y\y\=՘7՘7՘7Xعչ};Wc`bj)TjTjT1uƼAuƼAuƸAvƸAvƸAvSjdj~lzd1uƸAvƸAvƸAvǸAvƸAvSg,s5 3 IعyL1oP1oP1oM՘7vϘ7X&ajdj\==ٹ6cZaj~t ;WcVع1vSj ;Wcޠ:W k1oP1oP1oP1oP?cjdjdjdj~ddjdj~Tedjd1uƸAvƸAvƼAuƼAuƼAuƸ'Hoλ B֋Jw!+RzQ}>R %(!#CJAH)! Y`w>4Y|c>+xz` z` s=g|Vc>+1 YAc>+i24 zgg&Y/*4 zg=M泂3AO`f|VLS`Y/*,T>+X|V~E峂eg4 i*,T>+xii*,T>+X|֋g4 i*4 zg=MՋ\=Mv|ɝUd*is4ٹ !w&;W2͒p*XYj\Xz\4K q*XY\;W/?ع i?ع iB;WUA^,|)j_Od>j?{5|ixM35v:5|c< *5 *QcޠQcޠQc Qc Qcc Q)5 25 25 25v?8 25 25v?8 25 2Qc`a>jT>1ƼAƼAƼro|Ԙ7|Ԙ7Xm˽ QcޠQcޠQcޠQcޠQcޠQcޠQc Qc Qc Q)5 25v?< 2Qc Qc Qc Qcc Qc Q)3 I$GyJ<|Ԙ7$Θ7$Θ7X&a>jTg,05 22Pc15v?93X<3Pcsc<3Pc<3Pc#c<3Pc<3P)53532PcO٠2Pc3cޠ2Pcޠ2Pccޠ2P 7 7 7 7 7 $7 7 c@q@q@y@y@y@y rND(Q/*6 rND{^T"*xTr"ND{\QAr"*x?+'&Qt>Q0zc?*xFz}ԏ z}ԏ IG~Tc?* QA~Td?*i=M^T?*i4ُ zGAO`f~TLS`Q/,T?*X~T>Q2MeG4Տ iL4Տ ii,T?EeG4Տ zG=M&QAXp?*ųQAZp?*ųQA]p?*ų8<~Th܏ Q2MeG4Տ iqd?*iG=M&QAO~T,4ُ zG=M&Q/,T?*X~T4,T?*X~TLS`Q2MeG4Տ iQ2͟6Me#~TLSu`HAODԋJD1QA~DT; 8NDQA~HDԋJDy6ĉ ?)w"*i24 zLD4 3s'ej4.i*,T"E%eJD4 K'eJD4 i*,T"*ӝ i*,T#XD:4 rND9M'QANӉ DTt"*i:4 rND9M'^T"*i@g*/<+PAp*@3&yWmӋ e e%:tu`g@sMAgk :>\S暂07avic7v_}c7vZ/t+=v_xcu7v_vc0v_)M0ve0vcJݻ&xLw cM7>f A5 1fx14kA1^c =kcƘA5 c y y q q q = 7 7 7 H7 7 47 7 c y^1oPǔAAA˽3Hcޠ2Hc``i, y y y y y y q q q = 7 h7 c q q q Ďq q =$̘7 Ҙ7X&ai,04 * 2Hcޠ0cޠ0c`1oPI1oM 7Ȝ˜ceOqqqq=˜˜cy?e?eyy!ysFc sFc sFc sFcS1c sFc sFc c sFc sF)g4 2g4 2g4 *g4 *g4 *g4 *grcܒ3HAArE>ܔ3HAˋ sANKr)ȟq Lg^T#XYH 9H 縃&IwFRFR~ iS#E5&IAOH zl$=M6&IAO}F>iIAOFRd#)xzl$4OM6ej$4HzQ`I2M5H i,T#)XFRLS}9XFRLMej$FRLS`IAOFR|^&JAup@)Xז y OS8e RЗ=3HAq) }3,ef{EA DW+ z`=0<^Qg"+ z`^Qg"+ z4`zE2M<^QLS/iW,T(X^QLS+ iW,T(X^QLMMS`zE2Me=M&{EAqv^Qd(iW>4+ z=M<^Qd(X4#{E2M}>X^QLS W,ez|LS`zE2Mez|LS`zE/2Me d(iW4+ z=M4;4;zzQݡPdwAv&CAOݡPdw(xuzPdw(i;4 g݃&C2Sݡ`fPLSݡ`C2MuY i;,Tw(XPLSݡ}8XPLSݡ}8XЋ4 i;4 z=Mv&DAp(!F^T(ȃ}0JDAp(kQ O9J96G<(Q&% i*J,T(X(QLSQ OP8J DAOQ ?v(i2J4% z=MF0Q(Qd(i2J4%zQQ`D2MElQ`D2MEe4% i*J,T(X(QLSQ% i*J,iTEEe4 z=MFpQ ?w(% Q 8JADA~d(ыyQ ?w(KQ(Qd(X(Q;J,{e4% i*JD2MEeaD2MEe4% i*JG4% i*,T% r9MGDANQ (Qt(i:J4% r9MGD/*J DAm% k(ыz|_[F2JQ GXzPLS`A2M >XzPLS.`A2MՃ&AAOzPd=(iy0׃&AAOzЋz|d=(i,l,l,Իe փe=@P] zC !AAo : zC !eC1,l7 4M!A2Me`Z /*,T!XVCL2,T!Xn  i,l7 =M&AAO@Pd (i24 z n  z =M^T (XVCL2b)d (iזo$փڲ]׃^kڲ]׃>yL1n1n1n11n1n1n1ni{ƼA{SgTgTg,i{Ƽ{1oP1oP1olPyL1nY1nyLѓ1oP1oP=}L1oPѓ1oPѓ1oM`Ϙ7ɘ7X&agdxgdx籲AwƸAwƸAwƸAwƸAwSxgdxgdx1wƸAwƼڏy ;c ;c ;c ;cc ;c ;c1n1nU>՞> rArT{S%/*G9W{S%A. w`OtE'e =M6y^T'xz}l#A6y^< z}l=M6y&<ydE5y&A4 il4F6yej4 iO&OLSM`<2M5yejc2M5yejc2͟6M5yej4 zl=M6y&/Q |X f'Cgy5 U,T'XOLS5 W k>AO5 ?v'i4Y z=M|5ɚOd'i4YyQ5`j>2M|5`j>2M|e4U i,T'XOLS5U i,iTE|e4U z=M|5 ?w'O] 5 O|<O\yQ5 a|Y z=M|ey,5`^P,T'XOLS5U i,T'Oh] i,T'XOLS5 ,T'XOLS5?t'i4] r9M|k>AN5 Ot'i4] rσ y=A `O_>{|gu-|v] ]??.2 >.Ϯkkck?fkğ]_ٵϮʟ]kH|-!uS3n ϸ%n>[BfigR7cw3n ϸ%n>~g R7]OC} "u3vn>D3nϮg>DɼϸAn>S7yw*R7ywfN| ީϼ;u7X~"u7xn>Tn>Tn>gS7yw3oN| ީϸAn>DfN| "ug R7cw3nϸAn>zv3nϸAngS7cwf3oN| ީϮG+g3#yon>g`ϸAn>+DYϸAn>zL3nϸAng R7qH݌ݩϸAn>goS7yw5n>D3nϮg*>D(g R7qH݌ݩϸAn>S7yw3oN| ީϼ;y'>>YBYN/rJ"w"FN,^ǎ9Wn5XnKnLnwaLN,zHuxCf:YS7^߿;ux,^ݩYݩEߝDfCfCfDfDf:i"uS7&R7&R7&R7EO222;uXyn4u0;xn4b杺Y,ӼS7ewf:Xyn4uuLMMN,iީ2;u3͢͢͢';YN.t:8D'xwpyBE_"YKo,:8<"O\Xywp4|BY,Ӽ;8<{ :8&:8[EOEOEOEOE~ ΢΢΢΢N,i2ͻOY,Ӽ;8ewgL,i2ͻXywp4b 2ͻXywp44bY upY4Yupy0@E Pg,:8< "C"Ϫȓ,DgDgDgL,̀:8ewgL,i2ͻ;8ewgL,[up4bY,Ӽ;8ewg Y,Ӽ;8ewgL༨""""""""""" R7<[EYWEYQ nyBE_H,EfYQ n}"uXy,}MEWФYwM2EWФYwMEQfGԤY 4iwfGԤY 4iM2ͻIXy7iyAM2e̋ewfLn,iM2ͻI4ewfLn,iM2ͻIXy7i44&bݤY5iѤY4ѤY5i6 MEOMEOMEOMEOMEOMEAPfDfL~bfBfL~bݤY,Ӽ4<&b2ewfLn,iM2ewfLMMn,iM2h,zh,zh,zh,zh,4&4&4&4㋞&4&4e 4i4۟Ah,iޯ_,lIi";ȣ,z&,z&,z&,z&,該3 3 3 33͢͢͢͢"%QfDfLN,iީ2;uS7ewfLN,Yn4KAE>KMbf32gP&X^"BQfљn Afѩn9jnSgfvcAK;3edc3/3;2cqc3/~μ<;2c1ߕ2cü7ȼcˌqˌqˌqˌݏqˌqˌqˌq<̘7X~1/3 */2cޠ2cޠ2c`Ǽ̘7̘7X~1/3 ˌyˌyˌyˌyˌyˌyˌqˌqˌq<7ȼ7ȼcˌqˌqˌqˌ qˌq<̘7̘7cJ{yˌyˌOI>̘7ǘ7ǘ7X&a^fTc,0/3 2X31ccc31c31ccc31c31)3-13-121c31csc1ollPT7L̟13 23 23 23v?0 23 23v]yL1n1n1oP1oP1oP1oP1o)0*0:!z ާۃ2/䆝 rN/^TB&x!rN/\2A ̋*%4 i2Me^T_&x!z#AO}ɾL>B4ٗyQ}O}O}ٗ z=Me&2AO}}:i/2AO}ɾLd_&xz4OMee&22Mee4՗ ާ_T_&XLLS}`22Me`22Me`2/ii/,T_&i/4ٗ z=Med_Ee W/'2// eٗyQ} Z/%ȾL>Z2?S} / ee4՗ i/,T_&<ȾLd_&Oݗ z=Me&2AO} ?w_&i/4ٗ z=Me^T_&XLLS} ?w_&XLLS}`22Mee4՗ i/,T_Eee4՗ iii,Tw%i/4ٗ \2A p_&ȳɾLgTܗ 2Ap_&ȓ*˼LUܗ 2A_=Me&22MeX74}+2Mq>X] i2vJdw%i4] z=MvW&+AOݕʋݕJdwEuWez|Lbcw%X] i2iyj[Xl z ,[bKd%i4Yl z,t-AOŖ`-2M[e*bKLSŖ`-A>bKLSŖ`fb3Ŗ`f3Ŗ`fݺ`w%+ANݕ J>IvWa?2Y>̱{c&I݋;?vqA9vqczؽ?c1sxvP}`1\1~AyL1AOČ? 1,`e~f5 2 2 2 2v?3 2 2v?5 2 :(c`}ʘ7cꠌyꠌyꠌy;(cޠ:(c`}ʘ7ʘ7cꠌyꠌyꠌyꠌyꠌyꠌq젌q젌q<77cꠌq젌q젌q젌q젌q<ʘ7ʘ7cjWyꠌyꠌ`>ʘ7vŘ7vŘ7X&aeTb,2 g2v?5Mg2Mg2v?5Mg2Mgz&c{&c{&g2Mg2v?8vzL=1oP=7ɟg2 g2 g2 g2v?>0 g2 g2v 33yL=1n=1n=1oP=1oP=1oP=1oP=1o?e?埧ʘy埧WqzwPi ʋzw%xQ}7K Dt%]2AyQoi,T%x}I }-#)oлe$%2[FR^T$%>fHʃ[FR-#)AOHJ>4IyQHJd$%i2 =MFReG*#)2#`")2MERe/*,T$%XHJLS`")gLS`")gLSI ii*,T$AFR&#)AOHJ|>mL[-c{EW! t^ \+A_lJ;^ f{%x}Q}1xi1Wej4^ i,T{%nW d^%3LΫyyOUdEL 5G͚ɋk&A5G͚IУf$k&A5G͚ILS5`j&2ML^T$XILS5`j&2MLe4U3 iifLd$ifL^l=ML&k&/AO5ɚId$]3 z4V`j&2ML^[e4U3 xk&2M:XZ4U3 if,T$XZ4U3 ifj,T$XId$if4Y3 z=MLɃL=MN|é: zLЇS'AOId$i2uN=MNeJ4: i*uR'2MNeJS'2MNeJ4۟AL4۟AL4KA%A 1 wPnOnA r[wPYMK2v/hؽ{>cz{y?q3Eӻ|d_d~ri_OE8Ư'"cz/"c6v_hcu6v?2v_eƾ1EA_ c "c "c "c "c3 c "c "cIc "c "/2 /2 /2v?"cޠ"cޠ"c`¾Ș7Ș7Ș7Ș7Ș7Ș7Ș7Ș7Ș7[6Ș7ȟ/2 /2 /"c "c_c "/2 /2 /2 /2v?9 /2 /"cޠ"cޠ"/2 /2 /2v?"cޠZcޠZc`}1oP-1oM¾ȟ!2v? 5;"c!2v?5;"c!"c!2;"!2"cSycAHyj"c "c "c "c1n 1n 7Ȇ7ȆcjqlqlyjyjyjyjyjHt{E(?]=2yQ/i=,T{$x}ǂH>X z},AU z}s z}TyǂHcA$ 4Y z& "/ 4Y z,=MDޠɂHL`dA$X~0 ,TA$XHLS}EDe*4U i ,TA$x i ,TA$x i "266MD& "AOɂHdA$i cA$x= І "AXyQ On ͂ȋ*y|lDU ` "ApA$XHLS` "26MD& "AO ?SwA$i 4Y z,=MDtɂHdA$i 4YyQ` "2MD` "2MDe*4U i ,TA$XHLSU i ,TA$XHLMMSٍɂȃ,y  DX p "A-pA$..yU "AwpA$諌ɂHdA$XH'\ i ,TA$XHLSU i ,TA$] i ,TA$XHLS D ,TA$Xs.9MD "AN HtA$i 4] r.9MD "AN ȃ쀼H)y OSUHW; ACr$}1dw!+ Yy! ?u" CAWCA2p"} YE C2M,e Y4i*d,mT"SY<ȐEd"SY~1d4z Y=M,&CAO! ?u"i2d,ێe Y4xQo;i*d,T"SY4`v,T"XELS!`v,T"XŋzqLS!`BAO!ɐEd"i2d4CAO!ɐEdE8i2d4i?i?im2gC2d)=,^CA/!EK`"SzY,^Cvkǐŋ֎!H1d,lv Y4ۭC2MN;Xniu/*d,봃ezvL1d,봃e֎!`O@:d4z Y=M,&CAO!ɐEd"i2d4x1d4z YELSi[;,^lv Y4ۭC2MN;XELS!:`4:xF'w AwxFлe<#㑎gy$xFd<#XxFLS`/*,T<#XxF 8,T<#XxFLxFLxFL?t<#'rg[3ܭA}L'"#c+xL/+cO >OE)(} |׊A^-RCA^0Rwy8JU(E~>EE)I /G)/AQ (ŋzzLSQ`탌R>F)aڠ(Ec"x z}R(E(E(ŃR>F)^AQ(E>/4xQQ(Ed"i2J=MF)e#2Q`2ME)eR/*J,T"X(ELSQ`PbLSQ`PbLSQi*J,mTAF)&AOQ(ECOoz.غxQ}T43n]]aE.w2a0<Fd#ȃ1ay,! 80^T#2ay!Fd#i2,T#a4i*,T#XƋ a4i*a4i*,T#XF0i*,Y 0CAN! Ft#i:4ra9M0CAN! FtA0^ÃOa#0<F'8*c#諌! >9 0d!,9y0% ?w!苗%Ury^ȒCd!i4Yrz,9yj%ɒCd!i^4Yrz,94Ur@K2M7X4۟A,9<ȒCG\rz ,9^KA/% Xrz ,9ڭK/[;!ŒCLڱ,lv,94n`fc!Xw;CLSvi2vkǒCLSvi[;ezsLڱ4Yrz,9=M&KAO%ɒCd!ibc!i4YrxQ%`,lv,9nXri[;ezsLS%`J/2Me*9yԙ`l<=jfkLGC'yd3AGË<=jf53AOC'yz<=Mf&3AO :4yxQ :4yi2v+CLSyi;,,v!XY^ CL 0,l<4yi*,T!XCLS`2AOCGyxQCGyz<=Mf&3AO ;4yi*,T!XË<4yi*xge<4yi?yi?yi7;<y>ޙ wCu!:nyއ=1{"cq4{c'}S`{?v?LGVcϪǔ::::qLqLݏAqLqL<77cJyJyJyJyJ yJyJyJyJyJyJyJyJyJqLqLqL<77cJqLqLqLݏqLqL<77cJyJyJJ=7WyzU77Ә7X&aϘ:1~:ǔ:1~:xL1:AOO<}Z}~l>< zLqLqLqL'ǸAƸA#c Sc S)u0 2u0 2u0 *u0 *uu/Y[KDT'VcmmUCV7= avPS젦AM +[~.*EahZa.Ve tzZ!p  Ϩ)BԴB~H%=H)=p 4  XԴB`QS kjQ@{Q.6r!r!p e8P /2ۿ4@2[2[2[2ZM2׭V j!jr!jr!jr!pn e,jT25W*Ge,jj!Q@255X(C`QS EM2'EM25555pFe,jQ@Q@Q@Q@Q@Onp?n OB.ϓsIh V<95995 9sI聚s,jj!9ԜC`QSsEM955XԜC Z<@9ZM9sVsVsVsVs<0CCCCÁs,jj!9@x!9s,jj!9s,jj@955XԜC`QSsEM5=9@{r!sɅyt9@99s<C OVVVV<BBB ZMn ZMn 5!o 5 ,j*X9sc1as 3~؜9A ;\=ففuv2>}v2>}v lal[_--&ll9l _-7-2:\Fف>;pg.[/etpl9}v l}v2;\fف:;p\g.evp8wqps[:sp:spr}:sp:spr}f>'.Wf^6?| 9 }RI|Rbk39x`Qs9`Qs9ruVs9g>~3}__A:sA{|3>sVs9x:sA|j3Z}E|پR5י5י5י.׭/\g>X\g>X\g>X\g>X\g>~||p9!`Qs9x:s:s:sA|j3>sA|j>?o |p|r/Q .\.>\>}{@xẁA^`a/\7>ȳl |Cy 5 5 5 5 T澁A*`Vs@7>h5 Z}ჼZ澁Ao |j 5 5 ,j,j,j,j,j,j,j,j/\7>X\7>X\7>X\7>X\>X\>h5|*{i'|?}=ჼzy w_O oA^`=ჼGzy Q>h5Z}=Vs_O>X\>'|'|'|'|'p]O`Qs]O`Qs]O o溞溞溞溞A^+`=Eu=Eu=Eu=a >H5ARM'|jb=T XO z&>H5ARM' ;6>h }VA p+ gc6a+M >hcl|&c >!|+ gcX>VAX>VA|jcXcXkEuEukEuEu|5kEuEuEuEukzEu8Eu8d5ه>hMZ}8d 0Ak|7>5=A|j,jE>X\ӳ,j5}8臽|W>A?}8ڇ>+ |W`QsxaMj`Q&|ւ?XlI,j,j5Zppy,5Z}8Vs܇>h5Z}8Vs܇>h5Z}8}8Vs܇^|ւ?Xl/l,j,j/\k,j,j/\k,j,j#>h5UU}UAê לm*AޠaUAêm*A p]W>hUZ}UVs_ oа*A |j*AGbUVs_x*AGbUVs_`Qs~~OW>X\W^ | |~OW^~OW>Xl'/l',jU5U5U5U5U5U5UZ}UVs_ ʱ*uUVs_ ʱ*A |j*ALcUVs_`Qs]`Qs]`Qs]x***ALcUEuUEuUEǠ}UEǠ}UEǪ@ |jb8dxa] p8`4헜ӳ?a?a40l8laeaU{e0  ۿT A oA _F90  f50lkA  f50j8`~v>j8`p0;avP젆A  f50j8`p0:atp0:azF90  F90r8`{A xA  xA  i8`T~T~p0;a_f˟I80 ^090?kl V;aaF90r`80:qa0:qat3at qat A f50j`80;qi`8wq)*=*0 # yjyFf50l?;F=H =_RD#w ]0B m0B }0B~H%=H)=p*ށ4X0B`QSiF,j@&IKՄ@KՄ@KՄ(8Rs5@&Zj&Zj&i&&&&&AA#C#WKH0q p_ &A wB`QSEM#EM#5X>B`QS8>h5ȓ#ZM#ZM#ZM#ZM#VVVVj!}y }G,jj!}G,jj!}Xl >B`QSj!i}@^Iy!?#R>B 'y}@Ly!{G>B h5h5h5h5X>B ҼX>B`QSEM#5pG,jj!IG,jj!}GmG,jj!}_}@}@}@}@}@}@}@}@}@}@}@}r q gI^9x wn@Ucwy݀@^Xy7 V 1w߻j7 ( ZMZMZMZMw&wj,)3hؙϖ@vg>?[vgg>?xvT4U* Ǟ@^S3k wgg>{v&S򁼎qJ>ϓ)@?O<dJ>1Ny2%XRaJ>1N:)RxJ5?PREMy|`QS*EMJ),j*%&SV)@ɔ|dJ>j2%h5LZM&S_ИLZMTJ> l`QƔ4J5?PREMT6|`QS)@8%XTL~`J>3/L|?L)@^S8%S80%R80%SV)@ɔ| )LZM&SV)@8%h5?P)@8%h5,jXlD0%XTJ@J5/L~`J>~`JKSEKSEMJ|`QS)RV)@ɔ| #?P)@ɔ| #LZM&SV)@^:%h5,j*%XTJ>J5,j*%@J5,j?1%Xl bJ>YNRMS6S<tJ>:%`E)ߧ̔bazg9lZ5l?VzR'5lP4l??_Ɣ0>#s/aÔ  SSJ~~372%?2%?lFFRRR͇)%?*%?*%?*%?*%?lfffffffffFFFR Sat))%?2%?2%?2%?lbFFRRRSJ~TJ~TJ~~0AeчAeчAAb,&a|~{&ϴ}L4x 1}L4x O2LZM&ViEMJ5,j* XT@J5,j* XT<4x ,j* XT<4x`QSi@^9 @&Pi؁EMUU0nx Ob O@Ÿwynx?D@ȸ3-wynZMv&V@nxd7P=@ʹȳ$yv o? vTO;i{ځsO;gIiZM&{d9/*u}:>fmhu}:΁|sc91w.7rś~d,7Grs \n#c9<2mrs \nك́6a@yrs`QSEM*7ų́EME.5Un,j.7rs`QS@xv9jh5Yn,7ZM&́V@rsd9j|`@rsd@.5`,7~c9rs`QS] ,jXT@E*75UnḾEME&0uB:Mg ΁?+:s?+:¬:¬s?+:ZMf&΁|sd9j2h5u:HY@ɬ:HY@ɬs`QS5f Y*XT9~`΁E0u,j0u,j*Xla0XT@e:5u:ZMfy:ZMfysd9j2h5u:pY@ɬs`QSY΁EMeT9s`QSY@n9XT9s`Q1Yfc΁ERv9j:Hou>tZuU΁Y/:>Hf%?a?a-Ôu?a?aӰ)2f1=B 䩅{|m _Gȗ =@ZGȗm{~dGGm GGm`yd={|m GohT6yݣ ,jGXT-.m`QS=@Ls6Z\`QSzEMh2=գ ,jGȗiZMh&{V=@md6jGh5٣ 穈k`yJ~qa #y*X"*X"a@%UÈkjq QG\JZfYx ,} *m |_k ߗ z  V@k ߗh5Yx ,ZM^& |kd@^*ׅ@k`QSaf XT5~` E _?Xx ,j_?Xx ,jXl~XT@^*5Ux ,ZM^y*ZM^ykd5jh5Yx ,Å@k`QS EM^T5k`QS@wXT5k`Q1fc Eu5jHo]x=$]x օ/.>+^9l5lX7SujwXxgb2l^y oaðdlda*A^A^7 SuTuTu~0^A^A^A^A^7 Sudu~80^A^A^A^ SuTuTa***b0;0;0;0;vduXx} a{@nXx} a{;n~;l7 =7l>scu^A^A^A^A^0;0;0;0;0;a*j***o^A^i k Et@  х@k~WH%]x .Va t5fzZEM^*'XT0j2O[_-de@_-(ho ?ʿ[_@{kd@_&V@k~h5 ,jT_+Ek`QS ,j*XT5k`QS+k`QS+jl5 ,j*h5 ̿ZM_&V@y6j[a-۰M؁[as&p l犁M~xhoن=Pm@k h5ن=Pm@^l h5ن lZMa&۰EMaj5Ն ,j XT66j5Ն ,j XT66l`QSm@ XT66l`QSmڰ<0rlZMay6ld6j h5ن lm@6ld6j h5ن=PmڰEMay6l`QSmڰEMaj5Ն ,j XT66l`Q1mʧ XT4j {ay16lBljm@͸ ae6\nm@^q ;.aT6j h5ن lZMa&۰EMay6l`QSmڰEMaj6l`QSmڰr66l`QSmڰEMay6l`QSmڰEMa۰Tm@6l t6j H5݆ nRMa۰Tm@6l t@a̿@v@_yjk g;ȗο󯁼r5ο*Gk~d̿1ο1X|k :οۄ \_J5 ,j*XT5˜ ,j*%XTJ.k`QS@/s@_ʿ}V@kd5j2QX2Qz`?LDi?LFX?LDjDi?LFY! B?B +B +|3 igB +Bz*B +V@ i ̸Bh5Y! ZMVH&+|i id@UH@ i`QSf *BXT4~`+E+0X! ,j0X! ,jBXlaBXT@UH5U! ZMVHyj 遪ZMVHyj id4jBh5Y! @ i`QS*EMUHT4 i`QS@BXT4 i`Q1fc+Et4jBHo]!=d]! 䕁+/#TH_?̗mez^WH_=meszeS: җBe˶/m+ ˶om_'B:ll{2: */ޮB2: 鰽B2;WH_f ˶sa{evpҗB2;WH_yҗB2;WH_f ^!}+/{evpҗATH_FQ!}Dt^!}De * */BetҗATHҗB2;WHҗB2;WH_`+/{evpҗBetҗa{e ˶ 鰽Be[eE˶ŗmݽa{e[e1˶ŗmɽ3 ˶˶l^!}+/{evp+/{evpҗB2;WH_a+/{:evpҗBetP i*nRDUHdޅtQ i*GUH/.//^H)U! ܓyKUH/ɼ {¢^!p +5dޅV  -*[TH/N$/ -*[TH ^! OJ+۟Bzi_^h5Q!jBzD"B E E EͽBzaQ}BzaQs {¢^!WH/,j {¢^!p;WHEͽBzaQs^h5Q!jBzDB V f=sav;}xK/^]6/p;tp;ٹp&ƣ_zvxvsvsG4By_zD4p^[K/^h5/j_zD¢/K/,j {¢/K~Eͽ_zaQs^X5~Eͽ_z!/K/,j {¢/IDB~酼 QB~V &ZMK/䕈ZMK/^h5/j_K/,j {B_zaQs^X5~Eͽ_zaQs^X5/9/,j?_zaQss {swT/6^[K/O yo~酼QBܨ_z?^/K/K y~i/j_zDB~V &5~<S¢/K/,j {4p^X5~酼R¢/K/,j {B^R_zaQs^X5~^H5/j_z!TB~TS RMK/^H5/j_z!T4p^XZMM/5⦁{B^p(nz!_+nz!E*nz!E*nz!7D4p^观~~*^观~~*^7^观~|C~|C~|C~|~@4p^W^X#t5݅Eͽ_zaQs^e^X#t5݅Eͽ_zaQs^e {B/SB~V &򭗊Z0A/`(^hPЂ.`(^hPoTЂ!y~^f煖 }8 y!_g(y!_g(y?~^{BD?/~^h5oj"yDBV M &{BiSBEͽtaQs~^Xܣ%5EKD?/,j_" lD yaQE=yaQs~^Xܣ5E=yaQs~^h5j"y! ܣZMD?/}ZMD?/~^h5j"y!߾+yD¢G?/,j=yaQs~^Xܣ5E=yaQ1 A~^X,HE?/~^Ho ,?E?/yE?/~_D39l+0=a?a|'5lP4lM=lJa|F~PۿX+ *atχ)9l/=2929l%=2920E?AE?AE?7SsTsTsTsTs~1*9*9*9*9*9*9*9*9*9292920E?AF?9 Ssdsdsds~)62920E?AE?AE?0;0;ja~~~~~oAF?ߏχݰo?F?~l؞aO홻)9l/ /0|0~핰avPavPavPavPχ)9*9*9*9*9*9l\|Ju:u젢젢{at@Zg~H:+RIG?)PH/ ~B]`QS⁊~5U ~g@{g~ho Sa3O@?F?T bG?T bG?@!v3y *~5 ,j*BXT3g _9XT.]`QS|灊~5 4'-c2?>&-c@ըc2 )dN`2o1 w@Ya@x) SZj)T2R3Lh SZM)a@0ed2j2Lh5 ?3 S0e _9Lh5 ,jXT20e b:LXT2~`ÔEÔ_ ,j_ ,j*LXT20e`QSa”EM)&ÔVa@`w@)&Ô|0ed2j2Lh5 SÔVa”EM) S0e`QSa”|}0e`QSa”E )A S5KafZX')@Jje v2pgraz=lZ5l0U+Ojik~X ۿtj+gu~2Z0U+o]AV+AV+/\AV+AV+j0;j0;jaVVVVVVVVVVVVVVVVV>Lata0:jTFYFYFYA VV>LavPavPʇZ9Z9Z9l{젪젪젪젪Kat׳erY|a{0kY|a{+k~8lW7=5l= mcr^fUfU|젊젊SrTrTrTrTrTr~0-Ae؆AE؆A-A-x蠋E/"hyktE@e }t2p .ZRJ-T~-^hH3]OgZ͟ʢed2jhh5Y O&Eʢe`Q}hXT2پRY ,jhyEM-*Z5U ,jhߋ5U ,jhߋ5U@e F ]ZM.&sV@^8wh5 ]ZM.&s*wXT2e J ,j*wXT2e`QSrEM.A]5s ,j?1wXT@.&sq2W8]ǹ@ -sq28]e œ es s2j2wyrV@ed2j2wh5 ,j*wc1.]5 ,j*wXT@.]5 sEM.]5 ,j*wK*.]5 ,j*w ]RM.sTӹ@e t2j:wH5 ]RM.s*wXT2ed2]L1avP1avP1a)8*8*8*8*8l=*8*8*8*8*8*8*8*8*8282820AG cSqdqdqdq~32820AA0;0;a99999~s5c`030ѝa{+js|ba{&j~^7l?㰽5l_i ecqTqTqTqTqTa999999/0;0;0;0;mt1:RDTy,.:Hs 9ghT1@J*Kic<XT1c~!ybEM&cde12O[T12hos| c1@{˘ci_?9gV ?+1@ɘcd1j2_+ZM+1fJe1c`Q}2XT@95s ,j*XT1p? ,j*XT1p? ,j*xvEM9ZM&cV1@ɘcd89P@3Yz ǁUz ǁ@Xz l 'p?C ǁB2p o}q`@ycd@y$cd1jh5Yz ,=5Uz ,jXT1c`QSJXT1c`QSJEMyc`QSJEM*=V@c J\z ,=ZM&KV@h5Yz ,=ZM&KXT1c M\z ,jXT1c`QSJEMA,=51 ,j?XT@anDcFd ܈ V@FjDZM6"&V@Fdd#2Fd o܈ ,jXT#2Fd`QSՈ ,jXT#2gYnD5Ո ,jXT#2Fd ܈ ,jXT#2F/t#2jH5݈ nDRM7"TӍ@Fd t#2jH5݈ nDFd`QSE5 y$T@2W#Hռ|d : ȃ&$y5La{fW_7za{Zi~;6l?5'ⰽ4l?_ k_dqTqTqTqTqTa      /0;j0;f0;0;jt1:QRDT+.:H@  gTT@JuKiXT1b~wEMպ&_e12[T12ho@| @{bi_? OJe1j2h5@ VfJe1پR@ ,j*Xl_ 5@#4_{{e8|˶A4×m^F8|De+A4_F87_f8|v8|/{evpo×mo^_f8|/{evpo×q2;7_f /hq8loq  /hqet×A4×q2;7×q2;7_ /{evpo×qxsAeA˶ ˶sAp8|֢yٖPzv#jhϚzv򲭞LuavPuavPuavPuavPuaavPuavPuavPuavPuavPuavPuavPuavPuavPuatuatuatuć88l?;:0::0::edqda###>LuavPuavPuaTGfUGfUGfUGfUGea{i*y a{i*>LUa{fmߖ ذek o8lO5 /ʆAVAUAUAUAUAUAU0;0;0;0;0;a*J **ۯAWiU EtU@%颫U@b~DH%]U *X tU1fx`EMU*}XT,j[V-]jeU@U-UhoYU *[V@{˪bdU@U揪V}x`JeU1jXl_*5W*EMU+UXTU1b`QSU Z`QSU Z`QSU ,jXTU1jh5YU *ZMV&>+L.bbr1П&Tr1П&Yar1p?;}V\ 5ˣ(.П&{(p< L.b R\ L.Sdr1j2h5\@b X\ L.ZM&&V@޳8h5\ L.ZM&&*XTr1b [\ ,j*XTr1b`QSEMAL.5 ,j?1XT@%ȪϪb ό\U 䡑<5rU1F*Ϫb \U ɑ<:rU1 *U@ɪbdU1jxOQU@ɪbdU@U&VUsU1b`QSUEMUTU1b`QSU@tXTU1b`QSUu1ȧ1y~c7.1^T1<{Ac c?x|Lcc 1{A>&w11T= ccOc i* ,T0XcLS= ,T {A>r1ȧ1'Z= lhc O1c\ccП 14c z1=M|zcd1i4c z1=ƠE?g4c it,T1Xcc i,T1XcLS=`zc i,T1XcLS=`z2Me14c z1=M|E&{A>su1i4c z1=M|cd1XcLS=`zc iA14ccLS=`z25=`fcLTc iP{c'K`= cX~Ku^/zc+5Pc4?__cq{cSgl93?6o ƸA8?1ccq1q1SqTqTqlDvL=1oP=1oP=1oP=1oP=Ʊ7777777777771ǸA1n=c1q1q1q17,c {c {ccc<77~rL=1oP=1oP=1oP=1oP=_cql?~4s1Sql*ml?(:~2hl/Sqlr1ȿߺo]r mϒcd]rcLS%`JAO%Ǡɒc\r z,9=M&KAO% -4Yrc\r z,94 i,T1ȧ^.94Ur i,T1XcLS%Nj*94Ur i,T1XcLS%`J2Me*9=M&KA>uJAO% Ⱥ4Yr z,9=M&KA>s1i,T1XcLS%Nj*94 e*9^T1XcLS%`fĒcL} b1XY~.94K%_t1X%c ]r r.9^,? Kcczzl;Xcc*9Bbl~%1F,9(OCJcGlbl5Yrc{=gl/BgZ1p/1㔱5'Ac{jc{jl7YkUkUkUkUkUkUkr1XZcLS`j2Me5^T1XZcLS`j2Me54Uk i,T1i4Yk kUk z5&kUk z5=M&kA>s1i,T1XZcLSƋ54 e5^T1XZcLS`fZcL} b1XY~54Kϵ_t1X%Zc]k rԮ5^,;+ic'c 9?8*/)WXrǒ|blD?8&ƸAǸAfc Kc Kc1n%1n%c*9y*9y*9OȎ8 8 8 8 8?UrUrUrUrUrUrUrUrUrYrYrYr<7Ȓ~10 xL%1n%1n%1n%DZN`dqdJcޠJcޠJTrUrUroV8 8 8 8 ??k~1|frlۣ;c{ej?k 9v~4祎9mVml?ۛgTYYUUUUUU<77777~0uL1oP1oP1oP1oPȱLjt2ܯ  r.@F ܣ /I(]Xt2eyQѱ` 2M E e4U<d,@x ^< d5ųyQȠdЋg A/ȠdЋg2=M & U z?*@=͟aadd2X>Y ic e*@4ۇ1 2M /,T2X]LS` -XdLS~,Tv2M e*@=M & AOȠdd1ߌ\T1Xv`"Ay cp<#~/~;0܏@9Sp?g z<^T1KG&#y őǠcd1i24y i*,T1XcLS`"2ME/*,T1XcLS`"2MEr1XcLS`"2MEr #AO _y z<=MF&#AO ay z<=MF&#AONj<4y i*]#2MEe<4y i*,T1X`L} b1X`L} b1X>  Ϛ\  O\^2 w3Ct#3XFfLS`2M52ejd^T#3XFfLS`2M52ejd4 i,T#3i4  zldڍ̠Ffd#3i4 AO`2M52ejd^T#3Xپ,T#2M52ejd4 62ek2RGt#3XYnd3K :nX~U#R=_϶Ig۫Dfl`~m/gk0 g|l{3g0?>q>| "7 gӗϸAd0?l{7 g 2c{3op`~ ϶'c{3op`~ ϼ=7g0?۞} ϼ=7g0? g3y{3op`~ ϸAd0?Dsl`~ "3ṉ=7 g 2q`~C| "7 ؞3y{sl`~ ϼ=v1g0? g3o=Gmͣ϶[϶϶K: vV%lp}̱=$l+p}]F}Dgۑg[|(43nϸA$2?y{"3opOd~ ϼ=7'2Dgy{"3opOd~ ϶=7>6gmGIqJd> |$= UܢF%2rJd>rJd>( UܥL%2{a|X'2n=Ls=,]G~zg>̇C ̇~;o3|v@?g>4|k>4 ̇|iOꟽl?{?>~f}RL}RLsg>,lg>,Lsg>,܃4~2ͽp{X3i̇E2ͽ܃4~2ͽD?󡧉~CȮ&=M3n$3pC ̇~;o3nWxv@?v p;uz]=̇ہvD?33Ḟ&w0g>4|iD?󡧉~2ͽLsg>,4~2ͽLsg~2ͽLsg>,4~2ͽZg>,4~2ͽLsg>ٖg>4|Ȼ3zg>4|iD?!/h|iD?󡧉~CO|X3i̇,{4 3iL_7V*o>䕕ʛ7J͇Ry!T|{+7%əRy!T|iDy󡧉CO͇&ʛ=M7{y󡧉CO͇&ʛ=M7z(o>,˛y2ͽLs/o>,˛4fp/o>,˛4CަLs/o>,˛42ͽWj*o>,˛42ͽyQ͇ʛ9M7r*o>4U|iTy!CNS͇ʛ9M7r*o2ͽLs/o>,˛yO2ͽLs/o>,,_T|X7iA*o>,˛Ls/o>,˛4 7i+|Ke9@Y·<P!Dy37@M͇٢?[7gCZj*o>䩅ʛyjCZg*o>䁚ʛg*og>sA3n3zg>#k3n3/g>|Gg~2ͽLsg>4|i_|iD?󡧉~CȮ|~CO_eg>4|iܳE4~2ͽ|X3i̇e{?a|X3{?a|X3i̇e{?a|X3i̇e{?󡧉~CȮ|d~fpg>4|Gg>4|iD?󡧉~C>T?󡧉~2ͽLsg>,L} B?a ̇e{?a|XپL} B?af*Lϼ~f/V̇|~CV`E59cÕczyl=cscg"Hel1~gOS3c' ed?sd?sl2 9 9?><~7~7~޼> iq7e o48Û2M7/*,Tx3X`LS`›.XfLS~J,TxJ2M7e o=M7&ÛAO͠fd[ښ_LЋg[ښA/m͠϶fpл϶fp? gPB/ų܏GTp? zlk^T[35&ۚ Fm͠ɶfd[3i4 i,T[3XfLSm`ښ2M5/,T[3XfLSm`ښ2M53++3r>3K+3/ ̋yr|fGWg=M3&AO̠|fd>3i2yQ̠|fd>3i24 zg4 .`2M3eg4ϼ|fLS`A9,T>3X|fLS`A9,T>3X|fLS_t>3i:4 rg9M3AN |ft>3i:4 rg^T>3X|fLS`AL:,T>3X|fL| r>3X|fL| r>3X|E3eg4 iAg4 iAnkySE5|f'|nkd[ښAޠϖm͠lke[3k 5As[3k 5p[3k 5<{p[345/yd>3ȃ 3||fW |fУf>3ȧg |fv>3Q3Si3/*,T>3X|fd>3i23&AO̠|fd>3+g=M3/*3&2MUeg4 џ2M3eg4 i*,T>2M3eg4 i*,T>3X|fLS`AO̠|fOϼ|fd>3ȧg=M3&AO̠|f* zg4 i*,T>25` i*,T>3Xپ1,l_ i2Vt>ϼXwg w|󙿃e>sl+Bsd>sl|2 29 2yL1oP1oP̱13ǼA3ǼA3ǼA3ǼA3xcޠcޠcޠcޠcޠcޠcޠcޠcޠc c c ǔ"qgS>sd>sd>sd>slj<|7|7|13ǼA3ǼA33cgygygc~TrḺy4_ c=_&2c-4u`"c{Vhl/pMc{c5ƙ i*,lLd4ȼDfLS`2M%2eJds`2M%2\LS̋ 4 i*4 zLd=M&2&-?!V05+A̋`=jV05+b5+4_3CG fp L)QyQ \\ z`^T3SW0&+AO̠ fd3X fLS`*2MU0e`4U fLS`*2MU0e`4U `*2MU0e`4U 2̃`=MV0)ЋJy5h? !SA%)Рwh SA%) hлe 4XE@eJ4 zL=M@|hd 4i24 zL)РE@|hd 4XSLS)`RA>t 4XhLS)`R2M@eJ^T 4XhLS)`R2M@eJ4 i*,T 4i24 ѼS zLh)Рhd 4i24 SAO)`R2M@eJ^T 4Xپ1,T R2M@eJ4 @ekS2Rt 4XY"}^,>>| gtb]ӵٱ9cscz-rl>c#c}"thl1~}ϕcjcLhdsdsl4 9 9? YY<77jcޠjcޠjcޠjcޠjcS1oP1oP1oP1oP1oP1oP1oP1oP1oP1n1n1nc}q}ǸA>9 9 9 9?YY<771>ǼA>ǼA>c}y}q~g0S tlO;3c~63׈%бfflO(3c;^Oۏe[=3vL%б^cd td Jc Kc KcޠJcޠJcޠJcޠJcޠJcޠJTUUUU/: *%7 *$7 : :q.B@ZCt rAn% h{t 4_9I@K r.L@/*!,T 4Xhp?hLS `JAO?8 OFh{20܏GOF~ 20܏G~ 20zQѠ߂ d` A -hd`4i20C& z =MFOÏAOGы 4ۿ i*0,lv``4XEFe 4 i*0,T`44 i*0܏e ^T1XhLSѠhd`4i204˫W5/ނ~UR-X^k_j˫[*,&X~3zQ ֠Lg51ܯȂA փL=0&X+ h?xF30,?xFh WE o 3ы yhП 4 i00,T`2MFe 4 i*01 z yhd`4i204 z yhd`4i204 z ^T`4XhLS t i*0,T`4XhLS`2Me%e|FeJ4/ = ы=Gr}4.Gr}4.G/>䅗Ax>zQٝ ϼ\ i>4Y z=MG&AOѠEG&AOѠhd}4i>,T}4[XGe4U i>,T}2MGeyhLS`2MGeyhLS`2MG ht}4i>4] r9MGAN ht}4i>zQ`2MGeyhLS`25`25`U i>,T}4XY>,T}4XY>4Y z^T}4i>4Y z~4=?kAOӋ*ynEuKs4#*wK![,?uKp4>wKTq4KwKp4#*G^AL>QG|hw}4>s{G|8hw}4?>,Sv'XEGe4U z=MGkLOы=MG&AO @>4YhO\ z4U i>,T}4ȇ4U i>,T}4XhLSы4U i>,T}4XhLS`2MGe=MG&A>w}AO ۻ>4Y z=MG&A>u}4i>,T}4XhLSы4 Ge^T}4XhLS`fhL} b}4XY4Kы%hd] r^,k>,^/ c+5Pc4?__cat cßcl81?8ƸAF0:?YY۟q,q,SatTatTatlsL1oP1oP1oP1oPѱΘ7777777777771FǸAFc TYYY۟tq,q,SatTatTa cޠ cޠ c1FǼAFǸAF; alFGTO =45batlJcU~37̎0:flϗSatlYY=777777771FǼAFǼAFǼA5ƼAFǼAFc*yjɍy*ɍy*y*1nЅ Whp1 r.^TC.-0] r. 'htarA҅ E5e*4U U i!,Ta4iA& '6ip? '6ip? YAy& Ay&6io^I~MzmҠ߼l=MI&ۤAOm~=4&6id4iM4ڿW~& z?+?j^l^a4X6iLSm`f ۤ2MI/M,T4X6iLSm`ڤD/X6iLSm~,T"2MIej=MI&ۤAOmҠ6E~=Y5iЯ'A~Ac4X^Ol\5QEPa5i4C wmAO=ԃ zңf󢂞AAϠG͠gpg'Vz=j=/*䝕AAϠG͠gУf3X>ogLSA`2yˠgLSAϠɠgd3=&AOAϠɠgd33=&AOAϠɠgd󢂞2M=e zygLSA`2M=e z4 i,l2,T1X~Ke3i2 z& zyFgTzyJgTz^l3a3ȃ*=w3Ȼ77;p3 7;|AY ;#Ng9|,gw31wg9|,gw3?;I2Me9/*,T3X,gd3i2s$g9&AOYΠ,gd3Hr=Mf9/*s$g9&2Mer4 2Me9er4 i*,T󢲜2Me9er4 i*,T3X,gLSY`AOYΠ,gOߝ弨,gd3ȧr=Mf9&AOYΠ,gU zr4 iAr4 iAr4弨,gLSY`25Y`f,gLd iңKgY :4X~tw,7,19_ c{Nil?8~,0 흠R`ӛc؞W=uLͱ4+c{қc7717ǸA7ǸA7ǼA7ǼA7ǼA7ǼA7ǼA7ǼA7)9 *9 *9 *6 *9 *9SzsT*mT(mTzsTzslQqNoB7XCtziAn f{tz39I7ӛH rNoL7/*,Tz3XfpbfLS`қAOOA .gp? .gp? OA GA弨.glv9~gy]Π.gd3i4 AO]΋r=Mv9&AOKG]Π./v9e_:r4 i,T󢺜2Mu9er4 ioer4 7z2Mu9/*,T3X.gd3i4 zr^,A/Eϋ*z=^e W;=K?Xgd3i4 z&ld=62 <A;LȼFfGOnd=62e&jdɠ? iyQ`2M52e`AO̠Ff(nd=M62&AO̠Ff7)nd=M62&AO̠FE52ejd4 Dō`2M52ejd4 i,T0XFfLFfLFf<7OƺfPk^T]3聱5ACEADy u \ 2u͋4U i4Y zk=M5&AOu͠ɺE5&AOu͠ɺfd]3i,T]3 R5ek4U i,T]2M5ekyDfLSu`2M5ekyNfLSu`2M5u ft]3i4] rk9M5ANu ft]3iyQu`2M5ekyfLSu`25u`25u`U i,T]3XY,T]3XY4Y zk^T]3i4Y zkyfd]3i4Ar^T3Ȼ7w9<.r3 w9|n.g ] /8 42 :yQ ;S7|suz3i:y}G^slM>^϶m/gӜ=j|mgۃ϶Wu@^gg؞lpl{|v7gۓϸA5?l{h7g c{^3opk~ y϶G5c{^3opk~ yϼ=75?| yϼ=75?gy{^3opk~ yϸA5?D^slk~ "|3nyͱ=7g qk~=D^3nyͱ=75?؞y{^ ؞׼v  ϶϶϶,gM؞ll;lK||Ɵ5mm5϶#϶϶H؞l |ݧ||g-g qHh ϸA$4?y{B3opOh~ ϼ=7'4gy{3opOh~ ϶+=7W>gmͨϸA%4rJh>ܮrJhCnQ ͇\G%4nW]9I%4rJhCR ͇\fLsOh>,|Xi ͇}͇1Cl|-c·٢p;|X~{k>܎9mfpk>}͇~ۣ9CO}͇&=M5zk>nDzkCO}͇&=͟oiFo5i}͇e{_af74fpk>,42ͽLsk>i}͇e{_vLsk2ͽLsk>4|iD_󡧉feC]ʜ!9nsB72òK!ʜˆòK!ʜ2C~){ "g?[,g+,H9ˇ"glo{,n׈GY>ܮmC푳 9ˇe{a枳|XY>,s=M,zY>r=M,zY>4|i"g7 Y>4|i"gD򡧉epY>,s4C(gLs >,s42=gLsY>,k42=gk4COʇy[bep/V>P|#+Hʇ<2R!όT XF*V>䱑=Ls/V>,܋=M+z(V>4Q|iXD򡧉bep/V>4Q|iXD򡧉bCOʇeiʇe{a^ ʇe{a^ ʇe{a^|ț-+iʇe{a^|X+zKʇe{a^|X+/XT!bCNSʇ9M+r*V>4U|iXT!bCNS^|X+iʇe{! U|X+iʇek4b25Hʇe{2+iʇe{afb2ͽXL| R򡧉bCO^|iXD򡧉bCXD򡧉bCOʇ/zh]Cޠu>j]>5Z$\ˇ|Cޠu*V>RU|%X7*V>Q+ g~+z (V>MpTʇ3g~+ie{2+iʇe{򡧉bCOʇ|bCOʇ&=M+z(V>(+z(VbC>R򡧉b2ͽ"Ls/V>,܋pTʇe{a^|X+iʇe{2+iʇe{a^|X+iʇe{a^|X+z(V>4Q|*VbCOʇ|nbCOʇ&=M+z(V>3W+z(V>,\U|X+iʇek4bep/V>,܋4b25ʇek4KQʇRS!*JS2X~tUf~cO}l=l)J9c1c5F):0J9??OcR ~q1ǸAF)3c c cs1nQ1nQcRyRyROc)J9 *J9 *J9 *J9 *J9?<(7(pd򘢔c c c c31nQ1nQcͼAE)ǼAE))J9 *J9 *Jki4'dxrl?DŽGTlk O@l?)<9n3ddxrl?,<777777771'ǼA'ǼA'ǼAeƼA'ǼA'c Oy}y}y Oy O11n Wdp? rO^T+-:< rO 'dtx2_A Eee O4  i*,Tx2XO 2;Al߂Al_AlʠoCNUo_CWʯDZo?_]T5XULS`fM`fM`W2K2M.,T5XA6Q&AOMԠ&jd5_!=M6Q/4D zl=M6Qrd5X~Y+`feGM`22M5Q/,T5X&jLSM`3X&jLSM~,T2M5Qej=M6Q&AOMԠ&_*>T^kA5~yQ5ՠǚjcM5X~dM5X֧b:ɚj4ۯ=MV3~xQ Z jp? 3ՠ?Z gA&0gSZ i*,Tj5X>Z i*4Z zLyjdj5i24Z zLTj5i24Z zL=MV/*,Tj5XjNN4 i*,Tj5XjLS`22MVeJ^T&3XjO oa  a z`ySkWuyWku^lc5:wXsD2MuXe=MvX&;AO֠kd5izQ֠kd5i4a z4nT5XkLS֋4a izQ`:2MuX`w5쎴yHk[.'Z\|k]p A>r5ȧ.=\p5g. ,T(XE\e*4Up z,=M\|kd5i4Yp z,`נɂE\|kd5XVLS` A>su5XkLS` 2M\e*^T5XkLS` 2M\e*4Up i,T5i4Yp ׋*=M\<p5i4Yp z,=M\?g4Yp i,l_Xp i,l_Xp izQ` 2M\ek 25`fI4]pXr.@ /$Kcm=?9SctlZ3D%1gtq6ȫAgtq68A^8A^8{ ] ًJ-4U i84Y z,=Mg&AO٠Eg&AO٠ldq6i8,Tq6{kge*4U i8,Tq2Mge*ylLS`2Mge*ylLS`2Mg ltq6i84] r.9MgAN ltq6i8{Q`2Mgek25`25`25`U i8,Tq6XY8,Tq6XY84Y z,^Tq6i84Y z,ygldq6i84Y z,4U i]mOߝ s sA>}w6sng|ld]l] 򁬋A>rq6G .gpq6.h -8,Tj)XEge*4U z,=Mg|ldq6i84Y z,٠Eg|ldq6XDXL6Mge*^Tq6XlLS`2Mge*^Tq6XlLS`2Mge*4U i8,Tq6i84Y Vً*=Mgpq6i84Y z,=Mg|Zldq6XlL} bq6XlL} bq6XEge*4U iA,4 ge*4 g/,A>uq68{:kcKk,ՎQ1jWcl1bl3R_jEe 4 z =M|&CAO!ߠɐodBAO!ߠɐod7i24 i>-r$I蔺2wt7ү%Rp++4 i*,T7XE|e 4 S|e 4 i*,T7O i*,T7X:4 r9M|CAN! ot7i:4 r9M|/*,T7XoL icC2M|eoLS!`f99,TB2M|e 41!`B2r r7i24򽨐od7i24 i|&CAO!ߠɐod7XoLW6r r7G^T7ȧ!߃l^T7nխދj<ۭ -z|zVo6[AjVo\ i&_pZ2MEej^T7XVoLSޠVod7[n=Mz&[AOޠVo zl^T7[n=Mze4 i3Wzej4 i,T7XVEzej4 i,T7XVoLS`Z2Mz&[AO _pZAO _p7i4 zl=Mz| Vod7XVoL i,l zej^T7XVoLS`f;,l zej9Mx/sA>Vu7ȁ9{W9ޱ?̱?cJ3cc!gl1~~w~s8ČČﱲ%f|4ƸAf|V2?=77Oǔ۟݌yyyyyy3ǔד۱1Sv_Ofndv43cï1^܎f2c51enccc1n 1noW1n鯒?vޔ&sǔ>_kTcǔ_k?cc5sc5scKc5sǔ?cc sǔ>c sc sCA˽9c7X+VxǼAxǼAx)|6 *; *;); *x6 *w6 *; *;ƸAx\s E΂ܢsA9 opO9Ixs: rLx/*t,T7Xop/r2M΂e4 i*,ӦL'܃e8eWA_HL}!1|Q /$&Bb8`pp6 `f `i,Ty2Mׂe*4ON4N4U^ iivQ `2M24zL=M&&ME%&AO ࠧppS=i2,l?pLd2M%e'c&eJ_T8XpLS `2M%2M%eJׁeJ_T8XpLS ࠧpd8i24϶azxƃ/"z/*zۏU iai",l?24z=_+dv8ȷ2/0;_ͽn0;_ Sav8 o_Tv8A0i2Me&AO ߴsv8i2;4z=Mf|ᠧpdv8i2;4pLS`ApLS`2Mee4i*,Tv8XE_e=Mf/kApcv8gZg| _uvb[Ap:;|Q`2Me&AOᠧpdv8i2;4pdv8i2;4z=Mfe2_m_Tv8XpLS4i*;۵4i*;,Tv8Xp:;,Tv8XpLStv8i:;4r9MgAN ptv8i:;4r_Tv8XpLS`f99;,, ge41`2r rv8XEee4ic2Meepdv8i2;|Qᠧpdv8i2;=Mf&AOᠧpLS`2r 4*o#NW~/gW~|EU~|o8] 9+Ac+AoD#NW~e4U i{Q`*2MU~&+AO #4Y z=MV~&+A>Gr7i{Q #4Y i,T7Xo8] i,T7XoLS`*U i,T7XoLS`*2MU~e4U z=MV~|EU~&+A>}w7i4Y z=MV~|od7XoLX i,l V~e^T7XoLS`f;,l V~e9MW~/4+A>t7ȁ{߱?̱?c*~0cc q*7X,yꬍy*y*c*y꫍yy*y*1n WppW9D/]r.G{!(I8(]Ztq8e8|Q]`2M{ꢊ2MuՂe*4Ui8,Tq8XZL_iO]`f9,,ǘeMdi4zKZd^jAOy頧ɼE奃&AOy頧ɼtp=M楃&is2_x2Mej]T^:X6_LSm`f98/,,'祃ej4y`|i*/,T AOy頧ɼtd^:i2/E"i2/}Qy頧ɼtd^:i2/E"i2/,lǠic?K4ic2M/*/,T^:XtLi*/,l 楃eK41y`f;1/,T^ AOy頧ɼtd^?~e^:3/♗ZlЋg^:3/_otЋg^:3/,o?2/,W-6X~e^:XZlL˼td^:i2/4z,Hί AY ,HgA:{AXE' = ,H4Uz,H=M|頧ɂtdA:i 4Y_& AO頧ɂtdA 2Me*H* 2M5|e*H4Ui ,TA:XoLS`  i }6U zKAet:/4}o祃|y ߷u^:7nئɼt%tLSy4i*/4zK=M楃&AOy頧ɼE奃&AOy頧ɼtd^:i2/,T^:Op^:XMW2M奃eK_T^:XtLSy _v^:XtLSy`2M奃|y`2M奃eKANy tt^:i:/4rK9M祃ANy tt^2M奃er r^:XYAK4ic2M奃etLSyK4i*/,, 祃eK41y頧ɼtd^AOy頧ɼtd^:ȗzK=M楃&AOy`2M奃etd{:ȧnO_T{:ȧnOpjOT =(g~|a _p{:ȇnO4՞?jO4՞i=}Q`2M&AO @=4ٞzlO=M&A>r{:i=}Q @=4ٞi$,T{:XtGݞi=,T{:XtLS`՞i=,T{:XtLS`2MejO4՞zlO=M|nE&A>w{:i=4ٞzlO=M|8td{:XtL؞i=,l ejO_T{:XtLS`f;=,l ejOKy b:/䆜X~(T^ /}}^mgۧ5'z?n#mw϶g?mg l?۾]C3w"g;l[5?>l{Zvl9x ggV+ r֟m||k9r g7g rc{3opY 9϶ys֟y{3opϣ~ 9ϼ=g(3opY 9ϼ=g7?ӟsOO]#=wgϸk?㮑ӟqHO]=sYgVl{b9ϸk?~]#gwgSϸk?9ϼ=sz wGzlO]ƿ;Rןg϶w>g C3^H]?g[3nϸA?FqH] "u=>D3opO] ϼ==ϼ==ϼ=ϼ7?g[SklO] ϼ7?g[I3nP\RCQx{-*ukT!íVT!Gupo=.~e*uo42=up i2ͽLsO]?,S42=uLso=,,ie2ͽS42ͽDíD:7zH]?4뇞&R=MzH]?܊W=MzH]2rQa6rQa e{'a |XY=J]?,,'ie2ͽS42ͽ8CO뇞&R=MzH]?~#㡧upO]?4~i"uD=Mic?{af;ieϞ~X{a枺~XicR42v Ba枺~XَAH]?,l iAzH]?4~i"ul?ߢWr@/t{/tr@!HT/tr@aϷ`?,޼}X~Ea޼}X~ECO쇞&:=4~-Rv#u_[=d{k vO]?eupO]?eC_H]?J]?J]? J]"uD!߸TCO뇞&R=M_zH]?4~i"uD:ie{!_Ua~Xie{a枺~Xie{:iA[{!+W!U!UC_CaCCuM ix{Xi뇞&R=MzH]?4~i"uD:zH]?4~i"uD2=u2ͽLXi~Xi|Ze{a枺~Xi|Ze{a枺~X/*uT!CNS뇜R9MrJ]?4~i*uT!CNS~XicRr Raf9)uLsO]?,, ie2=uS42=uL~XicR=MzH]CO뇞&R=MekzH]?4~i"uDa枺~XicR4i.<Ͽa[y~ȇk?Y嵃{^!v+d~ȧ^k?= = iye{^!-(Lsk?,=Lsk?,=Mzk?S/zk?4~i"D^!z)D^;=Mi%Ňe{^a~k?,42=Lsk?,=Lsk?,42=Lsk?,42=D^C+vpk?4~w~i"D^COy| COye{^af;!Lsk?,l iy~Xiye2v B^(h[B|8CgQ\X?#?9*~oc}bl?1cG?:~l=wc{wVwg{l_=ͳ=?cmcXQ*r=2Ucc{d{d*c`XUy=lPU1oPU7*7*jT{T{T{T{djc#1[K,LqOqOS}z`}zl9M>}ܗX۟_wwr%֧Ǽr_b}zTy_oe1~]X>2.,S2.,Sdzl`&c6Y_-217272"YY>r7272727bۘ7X?L}|`z,X>V?L= ,SyJy*Sy*S c*SyJy y*Sy*S1ne W2up^9D/*]r.SG{\(IL(]$[t:eL}QI`2M{2M%قe*S4UiL,T:X$[L_\i*,,eJ]T:X$[LSI2ud:т&d z,S=M/L4Yz,S=M{*iL4Y2uLs\iwwf9L,T2Me*4˱e`f9L,T/XY?.S4U2uLSe`}Yz,S=M&AOe AOe*S=M&AOe AOe`f;L,l e*S41e*S_T:X2uLSe`f;L,T:XَA,S4Uic2v b:X2A&AOeꠧ2u w:ȗ4i*x,T:^4i*x}Q`2M&AO 9x4z ^=M&g읃AO 9x4i2Me ^_T:XuLS`2Me ^_T:XuLS`2Me ^4i*x,T:i2x4E/*x4E&AO렧ud:ȧ^=Me ^41`2v b:XEe ^4ic2v bݴآlZpMlZ_lic6 6/Uۿcc`l?lُ>0حߏ}?nwnjlwcVczle5n 1vk1n1ncÎyczTcޠcޠcޠcޠc?yVyVyVyVs`+Zc؊V19=?Vr/`+zl.8{[c=?㽀1oP-1o ؊=V7菱1=Ư'Ԙד1~=٘ד겍xM1=kQi61=wƸA6ǸA67715ƸA6ǸA6ǼA5ǼAƼ}c c`}1o|_aczTlm,WؘY;7ژ7ژ77؞ݘrnLUCtcjAnэ t{tc:gAҍ鋊K7\W i1,Tc:.1,T\-XtLS`2M5e4ō`j2rqc:XE5e4W zlL=M6{,i1}QqtdcAO頧tdc:7&AOjL49Ǎ`Jz՘i,TI2Me*4˱Ǎ`f91,TI/XY?nL4UһtLS`Jz٘zlL=M6&AOAOjL=M6&AOAO`f;1,l 6ejL41gi1}Q`2M5etLS`f;1,Tc:XَAlL41`٘zlL=M6&AOv :l?k,_!u+u|OZ}kˮu/A'u/A_Z~g:XmL4ήuLS`fi]렧ɮud:ik4ٵzZ=_q֩|uꋪSA:uºN:u/At:uNKSd:w]Dש&AOuꠧ:ud:wd]zS=M֩&AOuꋪS4UiN+S4iN,T:X:uLSu`2MթeS_T8X:~=o|j NWAhtuлe:_t;g.b E*b E }QE b| "v:\2Me*b4UY2Me*b_T;X"vLSE젧"vd;p.b=M&AOE젧"v\z,b_T;p.b=Me"vL\ 2Me*b4Ui,T2Me*b4Ui,T;X"vLSE`AOE젧"v//}QE젧"v'4Yz,b=M&A>v;i,T;XَA,b4Uic2M/,T;X"vLXic?"ŖDf;g.bEfb #s=EoToc`l1}c?~loOw0_cw1ec5c36ƿcX_o5"r=2kEcc{lc{Tw,cޠ*TUUUU۟*y*by*by*bq}⯪cX>Z?#ֲ}L1~Fe#}'XE>ZeO!ǼAUMǼAUMǼr`-{TjcޠcG8}]X1U9=ρ1~lo's`e{l5aVx/`e{l+aV=ƸAVǸAV771UƸAVǸAVǼAUǼAƼc+c`~1o|?be{Tnn,ߏXU;7ܘ7ܘ77]rlWCterAnѕ v{te;+AҕKW\+ i,Te;7.,T^.XvLS`*2MUe4ŕ`r2rqe;XEUe4 zl=MV{<-i}Qyvde*AOvde;&+AO49Ǖ`~Ui,T*2Mue4˱Ǖ`f9,T/XY?l4vLS`~Yzl=MV&+AO; AO=MV&+AO; AO`f;,l Vel41i'*2MUel41`*2v be;XvLXic+2MU4Yzl=MV&+AO?m',J;,J;, eo,{A0XwLXi*,l'e4 eɲwd;i4Yz,{=Mv`Aw/,+.{b;苗e _w;]i&ߠw;w]J&AOeɲwd;w}]z,{=M&AOe*{4Ui䫿.{4UHi,T;XwLSe` 2Me*{_T!9i]﫻.{_T._p;W]h-AAw/G.{_le;X]LSeJ4Ui4Yz,{=M&AOeɲE&AOeɲwd;i,T;_bp;X]LSI`6m*{_T;XwLSe 'w;XwLSe`2M|e`2Me*{ANe wt;i4]r.{9MANe wt2Mer r;XYA.{4Uic2MewLSe*{4Ui,, e*{41eɲwdAOeɲwd;7]z,{=M&AOe`2MewLSe`f9}w_A>`w;~_T;~ 9g|wLS`2Me|wLS`i*,T;i24AOwd;i23:g&AO`fI:,,@g||wLS`2Mee~4wLS`2Mee~4i*,T;Xwd;i2LOw//84z~=Mf&A>!v;i2,T;XَA~4ic2Me/*,T;XwL?xVof;Ǫw]pfbk83g{l~U1Kzl zl?b?ۿ?_ce<_c>ǼAD> 2=q~sׅ1~]|rm2=2kcd{l1d{l+Sw,&cޠb8y~y~y~qJcl gX12>ϖce,|l9ϖcXtsxa,|,ǼAYǼra,|Tbcޠc cc1~S,|cc X?x/`,|x/`,bc{ jd,|d,|loCqqSLod,|d,|T,|TDo,ߏ ?V1> G+ߏ #ǼAUƼ1oPcyyjyy-1nб WXxpO9D/ rGƒ{)I:( J^t,.϶ɟmgvA}c]̟mgGMfl?ۮ϶*lv ? ? = m>n|kg!{>gYr"{*gn=+-m7x"{v۳ğyE3opϼ=K7{}7g?gcϼ={7g?g$gc#?w$$gܑD;c{3~H=T;cރ$gxA3opO~ {y{3o{Dc{3opO~ kȥ=rc{.3~Fȥ!gK̟ўK\gO gO >?Yqȥ "VD.3n='7\g ry{.3op~ UȥUȥ UȥUȥ Uȥ ϼ ϼ=8?^g^ sy{.7\CP[!\zp>OR{S8q7nX>=w5í`5i{XY~Wca{XY~Wca e{\a{i1DcT{i1z=M4zhCO&=M4nݪ&=M4ƃ{af91Ls2ͽLvOf9&1Ls=,,g5i42rfPca^ e{ca^DcCO&=M4nRDc<zh?4i1p&41e2ͽ1L4xpo?,42v BcaXَAh?,41e2ͽ1>CO&=M4zh?4c=_3=1?=i"zL=XَA?,ܣ41e_)xXَA?,ܣ=zLs&?,ܻ41e{7af;!zLs&?,l Dz?4=i"zDCO&=MDz-DBC=?/(z!+z>C}"zGCO&V=MDz?4=i"zܣ42=z/)+zLsG?,ܣ42=zLs?,42=z=MDzEkEσ{!C|Z|Z-?K?+׊?K׊n=Xi^{XGi&=MDz?4=i"zDW!۫TD+*?,܋42ͽLs/?iEe{=iEe{"COE|"COE&=Mz(??z("C>S"2]TafI/OUDXY*?,܋Ls/?,܋Ls/?,܋4"2ͽLs/?,܋4"2ͽD"C0"zp/?4QDw"TDiD"COE|"COEe{af;Ls/?,l iE6v Ba^DXYAJ?tW#]_xϮ~L~~SylKol >ۯ?1}c7_oc6#/cyԏcޠc c㲱.4Ư c0O>2Ư ʵ<Mɏky *O> *O> Uv`|l5M0]>M0]~L1n1ncJqLFǸ ˏcS1ޗ.}1oyt71ǼA`ǼA`ǸAfc5???f1k>ϏY)??f1kc̚¬b!̚S|lrq̚q̚A1nY1nYc*q̚q̚yʚy*y1fǼ}Ycc`>Ƭ7T7X1k> RxLY1oP1oP1oPY1oPY=7 :k 5 E%/։ N_T[/XJV^x׹A^Ny;1fA^Ny;1~Qm`f܉`z2S2M.*1,T[/X^dbu<wA>Qt<w<ȧn _$pAHy/yE7σej4<i*,TQt_cc%6_acvLۯۯcOl~l1}c55_Rc5~>77t1]>ƿ;c3]>3ƿ;tJ?ˏ;{L1w.yJS|T"xT"xlןcޠcޠcޠcޠc cc1n1ncJqLqLS|`|lp:M0]~ܗ.۟t77XKLyJyt71ǼA%_ǼA%_ǼAf'cl5??[f2k>ϖY?[f2kc̚¬J/̚S|l/q̚q̚1nY1nYc&q̚q̚yʚy%y{8fǼ=Yc{c`Ǭ7<7X1k> xLY1oPY1oPQ1oPY1oPY=:7 :k 5͹ x_ǃ勠^|/=!uQ /Hǃ$t<͂.t{22Meeǃe ]Ti?~Qὠxdv<rwA>t<rw<ȇΚY ;k~QY Op<5A2Me̓eʚ45i*k,T<ȧAΚ45i*k~QY`2Me̓&AOY T:k45z̚=Mf̓&A>tߘxmj>ߕ<{2c1oPcyyyd|T=xT|T|Tq/,q/,ϿƸǸϏx>ƽx>ƽx~L1טrb|,,ǼA`Ǽrb|T v,,ǼA`x> ; ; x> >?z1>ϝ51~>ϝ5c };kc; c~L5K6 > >'ƸAǸAЏ8 > > > 8 +XC?Џykcޠbc`7N1ǼAEǼA%ǼAǼA7z+t =.,_lyp/O]T<+ g˃z-Q / g˃-^_W2C2s2M.*[,T/X^_d|dz}AOlyd<4-ic2v b!v<'nMjM _^pߴx2i>߱)<5c#1oPycJyyye|TxT|T|Tǘ4^4^4q/Lq/LS|{a|{ac c1EIcŤ7XYLyJyŤ77XYLyJS|TuTuT|d|lf?ϝIcJsg|;cܙ4۟ԏsg|;?Ƥ?w&Gxac{ld|d|lϕqLqLSqd|d|T|Tq,4?V1i> L+4?&ǼAǼI1oP cJy 8y7yJyJy1nI WEEV rn_T{<ȉ=rnYKw{<ȭ=~Qѽ xw{{22ME.=,Tt/X^d{u=i2~Q ;4`i(2ҥt=4K`fS:,,J؃e 4`i*,T=X{LS`2M؃e =M؃&ARE؃&A5{d=i24`z h{LS`f;1,T=XَA 4`{LStc=6AwXzp~1~)W1`l `l? ?۷?ߕ~O>dǼA; > < 1n1~]QׅcdG}lEg_5k I< I< k1oPM1oP1oP1oPُqg쯏qg쯏qg쯏Ƹ3Ǹ3׏>Ɲ>Ɲ~L1 ܘwrc},:ǼAgǼrc}Tv,:ǼAg> ?; ?; > >%0M~Lm1nm1nm1nm݀1nm1nmc}`}l]e%ُ>7ƸAǸA76761/ǸAǸAǼAǼAu/Ǽmc{#c`6m1o|od}Tr,fո<6777676؞frz`CtD"An z{tD='ڂ#AKGԃ\#U iQixQ`2M&#AOh z_T=0i24Qz`dD=i24QɈzdD2ä#2M/*,T=0XzEEԃe4`f`f`23#2M/*,TD=XzAFԃ&#AOɈzdD={4Qz`dD=i24Q=MFԃewσeĈzLS`f;1,TD"2MEԃe41`"2v bD=XzLQic#2ME24Qz=MFԃ&#AOĈzdD=i2~QɈzdD=XَA41`"2v bD=XzLQi*~Q`"2MEԃeĈzLS}`f;1,T8XَA=MFԃ&#AOɈzdD=i24Qz=MFԃ&#AO4'ic#1`f;R8X>qd=ȷnn >ۃ}ۃۃ|p{c=Lp=Mp=7nz =Mۃ&AO `4ۍ`ni*,l7F/#2Mۃe4i*,T nn-~ۃ|*Adp{Y;nnJnۃ| _v2Mۃ|`2Mۃe =Mۃ&AO$GcAR w2j=QT"3!ZMZMZM_@@@@@n,jj0nPEM 5kj[n,jj=np55Xp{`QSTTTTTTTTTTTj=f9yr p{`Q<Xp{`Q<Xp{`Q<Xpn,jj=f9y=f9y=jr=jrB ZMZMZMW<h59h59h59h59Xp{`QSEr p{`QSEr pf9y=߶9ԓj=_x=ߜz=ߜz=ߜz=_x=_{=_{=_{BMK}O55X{`QSEMM55X{`QSʓEMM55~&|,jj=jr=jr=_z=jr=jr=jr=jr=_z=jrBMOZMN5F'e&ӓߓEͲf{`Q,fz=&|,jj=&|,jj=@@@^F||}OZMNZMNZMNZMNK}OZMN55Xl N55Xl N55~&zU@zs=\uW~|ئ6?>l_ئ6?acOc~~6?=y?=x?=w?=vU?fO1;F?^ .|gX|V^X|vc;ǶǶ'}c|^cc7n}leؾ1; +u}cvp_|?f˰1z3,acxgX|=}c gX|/acWwg`yacvp_,:,Y}vؾ1;~NeXvck5` ck.|` cka ck/X69l_6N1:5Alۥ CX1;x~M+X,XV71;X71;[?fΏ}sؾ1;/u~;mctPkBm?r(m?H Ҧb܃KR-mc.m?H>X+`Qs3 7,jc51&6tp[{jb=p3|jbAM}AM&6tpzjbAM}fHMa`Qs3|51E Q5ˇDm?X ,jOt5M@l?h5ĦVZMl?].jb=p3|jbAM&6.?h5`Qstp\`QstَAtَAt5MEv 7,j515MMM@l?h5ĦVZMl?h5؎AtjbAM}AM&6,jc6,jc6,j515MM{oS`QstَAtO?Xl l?Xܧ,jc6tjbAM&6tjbAM&6tjbAM}>]`Q؎AtَAtO?XlǠOĦ{`{0bAۇMy\ ojAۇMm6%hAf6gm?hĦVZMl5ۃ5m}澍`Q=ttoc?X7,jؼŦMyZჼȯMyZ"6t8My_B6jm5MyZ51E}AM&6tjbAM&6M&6tjbAM&6,j `Qs3|>X7,jE} vk`Qsto?X7-om?X7,j5M ԦTSRMm?H5 ԦTSRMm?H5 ԦTSRMm5MMr ҦEr ҦE}f9i1A5M{`Qsto?X, m?X7,jc6tjb=ptjbAM&64hAM&6tjb`QstYAto?X, mc6,jc6,jmg1K?\ jAk;W?\ sjAk:u?Z,j355E} `Qs_?X^Z?X5E} A5&Zjb A5&X_j A5} AVk5˶,jyM?ȻZY66`Qlj fYEߦ`Qs_k55E} `Qs_jb A5yEkVkXjb A5&u?h5`Qs_َAX?Xl ?X5 5'1G1 S4kvFۅ|6e0:j}p0;>k9? ƿOV^_ ƿOV^?Ї51a0 $㳜cWԆ_1avP0;av5<j ~1F?9"?~rD~0a#iD~04"?~rD~]0:3#`yFrD~0;Xf5;g$GA[,Hf5"?jD~~b0ā40?.q`~]0āaat%K/aR0:ȁatat Ӫ0:ȁatA=T=T=oq8XS90?j90  f5:j`~}qڐg.Ԇ| | | |PHw!H{!)77/xb dސw=OPEM'55h5!h5!/ZMn_@ @ 5h5!h5!h5!πZMnZMn_f /Ԇ|`QS㉁EM'^ O ,jސ,jρސ,jj<1Y> zC> !XԆ|`QSrC>jrC>jrC>jrC>jrC>ph5!&7&7&7+VEMmWڐ,jc7 /Ԇ|`QS㉁EMm51GEMm51EMm51Ev |`QSrC>jrC>jrC>jrC>jrC>jrCv ||ڐܐܐ,jc7 6 6 6/mjc7ڐ,jc7ڐ,jc7J,jc7&7&7&7&7&7&7&7&7&7&7&7/Ԇ|`QSSɁEv  f;qC>f;qC>>a{r}>r}>ҽ>_| z}>r}>r}>$@yVVVj;\,jjB5XlT_\,jG*Z,jj}>jr`¦&y] Kk&(uyj`>@޹|`QSˊEM 550h590h590h590h590h590h590&&&&&/x`>e,jj`B-+550X| v{`>,jj`>=0X|`QSEM ARMRMRMRMRMRMRMRMRMRMRM_ 1Er |`QSEr |`Q׿PEM 550X, 550X, ZMZM_@Ɂ@Ɂ@Ɂ@&&&&,jj`>YA,jj`>YA<0X, 58? g&ߗy>_Jx>zBn} n} n} n} n} x>W[+ޭPEM 65[Xn}`QSEM5[Xn}`QS̻EM5[vڭ,jj>jr>jr>_z>jr>jr>jr>jr>_z>jrBw&we"ԻEͲ@2n}`QLz>YB[X,ޭ,jj>vڭ,jj>vڭ,jj>jr>jr>`[a;q>jr>jrBZMZMZMw&wڭ,jcwڭ,jcwڭP'4m NۭK;lwvخa ;luخ4c=lU?a#0as?=savPfA}f90Xƿk 5g+wۯ 5g+wzlI&9ao/|l0;IavP`ys~$0;X^f5[??|34']NFw9i?lr0I4i?r~aFw9i?r~,ONIavP`y~r~0;Xf5{&Af5i?j~~mcg9w?l1q0gF8w?q~~c=0zƹ?ƹasci~>7r~}o0:ȹ4:r~0;avPç`yas`yas`y0;avr~afp:jt0;a~0: 5ZGh} 'iGG/Zc 5h} EhZk ȣh IPEM55h59Zh59ZOZM_@@ h59Zh59Zh59ZZMZM_f/h}`QSkEM5^Zk ,jr,jOs,jj1Y>y> 5ZXh}`QSkr>jr>jr>jr>jr>ph59Z&G&G&GVEM,jjB551EM5^Zk ,jcЏF,jcG,jcGFh}h}h}h}h}h@@ 5Zh59Zh59ZXl 51EM51EM51EM_oSSQ 1EM51EM-,51VVVVVVVVVVVj>f;qv h}`Q8ZXr`Q8ZXMv h}h} /{>W=ZKOx>@@^h}h}h}h}hZ,jG*GZPEM-5#G*G\yE@Zyڋj1wh^V/[.Th^]k//Ԣ}`QSrEZ,jj>jr>jr>jr>jr>jr>jrB-ZM.ZM.ZM.ZM.ZM.5h}`QSSEMM95hXԢr ,jj>E@^}`Q󷩩E@Z,jj>hXԢ}`QSEM-A/RM/RM/RM/RM/RM/RM/RM/RM/RM/RM/RM/_EE 1ȋEr }`QSEr }`QSEr }`QSj>EEE@E@E hh5hh5hh5h_}}}}}`QSEM-51ȋEM-51ȋcEt%9_tmg  5Y/// of///of{^^,jjB-55YXr`QSEM-5Xr`Q󷩩/r`QSEM-_Z\\^\\\\^\Pj@fYPr`Qz?W-X,K^,j1U/5˞Z,jj?Z,jj?Z\\E/_؎A\\\PVVVM@q F;^9lrخ4=lwrخai{8la۟G 9l@۟C0;Au09af9q~=j>9XyMs~3j>9XyMs@4O4 =rۿ?u5avP`yMsԀ0;X^f5??rXܿcF9?^s8[a580zqai80:qav<[9?g+AqavPc`yrXaf5<j,x80;qa avP &80zqa"3gcF8?q>z Aw A?LC Af5:[Vo9?[Vo9?[Ztf-AqavPKv\AdPےzb??DO~9zb??IO_m@0='/Զd *gO`EMmKZMNZMN~Vj[2jrb?jrbBmKZMNZMNZMN+iVVj[2Y>yb?m 5XԶd`QSےjb?m%'3'ږ ,jf,jj[BM55XԶ䁜/ZMN_m@ɉ@ɉ@ɉx~`QS&ؿPےEv ~`QSےjb?m%m&ĉ&ĉf;qb?9h59h59h59h59h59a;qb?jrb?jrbBMZMNZMN51Ev ~`QSEv ~`QSEv ~`QSjb?mjjb?َA,jjb?َA,jj:َAؿPEMA51c'ĉĉ/~`Qj@J~ z?Rs?Rs??RsBF-5-5&&/Ԏy`Q=oXԎ,jj<ٞ߿=oXl[5cXl[Nv'y7VV@ޓ~ ozb?jrb?@~ oL{b?w=&iO5-X~`QSVVVVVVjb?jrb?jrb?jrb?jrb?jrb?@~`QSےEMmK55Xږ ,jjb?@^~`Q󷩩@,jjb?ױ=X~`QSEMMAORMORMORMORMORMORMORMORMORMORMORMO_& 1Er ~`QSEr ~`QSEr ~`QSkjb?֎&@ɉ@ɉ 5h59h59h59_~~~~~`QSEMM51EMM51c'&t*?Gp??.?璣^v.9Նf wJBmhK}S)@~@ wJ _;%/LN SR)R*%XԆf`QS)REMJ 5,j*%XTJ :%XTJ J 5,j*%h5L gZM.TJ j2%h5L y&S*%[#N ZMe)f|uJ F,jWe)fYuJ @`QL:%XTJBJ 5,j*%XTJ @dJ j2% MN \؎AL ZM&S*%h5L ZMyk)@ɔ@`QS)f;1%XTJ َAL z@^p- Z@kv wŇ ۭK7lw0-v?a~ -eavPk^Za5Z0}pXyM0lka0-Z㳜aaaWm0iA-Z5Z0;EavY f?%awyڟXFYFYFY<γ$0γ$pJŰ_:/;Fzg`Ԉ0;X^ k00l0lRrXwٿc  aa|=0l0 ߒaa|=0lpz``_ 0:Ȱ0:Ȱ0;X  ̰0;av<6f5<g2젆S`0;A A _ atуatу=F=F=F=_)F=F=c   }S`ؾR92z02z0l42z02zp}AFAFAEA-彘у{1`y/f^0;Xދ=f5;bF_\;LуavP{^0;at}1mN/ ȿ; t }N6c .T /e bv /g &0bdBmvZM &j3j2]h5.LsV@t ,jͮmr}lsk>Hͬmb}lcVAmR}lsccۏc۳n4?~}cvpH1; ?F vgcF`Xy#@wzGccǮ?=csm->]=@1 Ǯ?f}cv l=@p ۾Xwپ z@c|= @+z@` _|A>F ,d>f3}cv< G?f3}x G?f=@1;>}1;|"@0l|"@1: ۮ|"@1:e|"@1:Ƕa{c |l1: ]ޏA>F `y/F`Xy/Fcv#@0#@1;Xދ x?f{1뵺a{cvpgw?f{12^߯PFA(#g>ȗ2nӮը@x/HeKRTFm?AV}?AV}?AV&2nKpZMd5e,j{F澟`Qs 35~E)F僌2555E=#`Qs 35E}?s 2ZMdX35=#`Qs|g>XlǠ}?澟`Q?g51E=#0&2ZMdxxx5lE}\x7h X5,jkj{c5Eͽ1pRM5j 7,j5E}@f91DcA@xj1DcA}5h Xܣ5E=z`QsV=xGE=z`QsavPA}cl ῤʳ̓aa9ypXyy0l#Ɵwô=l / /sۿa}=avO0jIx,! >L=avP >0;atU@WjW _ U}i6)W .T /+W r /-W 9@\5\9@ɪAdByZMV &VU}.jjh5Y5PsEU9f9MX|r`Q&lZM .T jMh5&l&lZM &VmEM 6A`QSmf9Mpa9MX, j51m@6A m@^v КMhM~&&?Gm@k6jZ  s=0\ke ݲj3. @^{p . EM Z ,j*\pEM-v5.,j*\XT pA`QSEM EM  \pA`QSV@pA o68\h5. ZM &tpAdB y@pA`Q:\X,ҍE2^pA`Q:\X,5.,j[  \pA`QSEM  5. ZM y 1@pAdB &V@pAL &EM pA`QSf;1\.K3\paۯg  3l03lWgna+{3lf폳?̆ϲal$?ӧavPA A73a+foǽ?p0.8< .߭Ɵg7a90\0l8L#pfȰ.gF,``H0;X  Yp=5OF _ƒZa`_+ kQa]a|0j0F S`_+ kQ5F5F5fQav<5f5<5ƒf`Xa Kf5<*j0+j,yT0%KF<w <Fw<Fw<Fw<ߌFw<FwLav_q3x0pA 1G oA ]5,壃|8juA _<AWVQ h55\5@ɨAd j2jZMF &j3Y>`8jXԚ煊5XԚ煊5XԚg`Q|p Y>r8jXԚg`Q|p 5 5,j*jXԚ灌ZMF &VQ@ɨA~A;j2jp<&VQ@ɨA~A;j2jXT pXT Z ,j*jXT p}EMy5Xl F Z ,jcЏEME ĨA`Q5,j*jp VQ@ɨAd j2jh555ZMF .T j2jh55,jcEv b A`Q5,j*jXl F \A`QSQEv b A`Q5,j*jXl F &VQ@ɨAd j2jh55ZMF &VQ@ɨ5Xl F .l F ĨA`QS ܁Ev b  5,j*jXTBE &TA xhY<,\<8xȋ.Zj-5UyŃ 5HXl[PŃyyA`Q=oY<毣wAb 1j[Ab A ;j>*jXT 5,jj3A`QSQ@ɨAd j2jh55ZMF .T j2jh55ZMF &EME ysQ<Z ,j*jXTBy55,j*j5XԚg`QSQ濦EME .T A`QSQ?A t j:jH55RMG TQ@A t j:jH55PQEr rr r YA55,jcEM.51Qv/T vA`QSˁEr r j2jh55PQ@ɨAd j2jZp j2jh55ZMF 55,jcEME A`Q5,j*jX, G /.vŃ@;Ai?*ڡ.T xhX<C, ŭ@ަp+!_YȯJP#ڭ@ަp+!_<oJm y­@ަp+!jXT+!Z XHh`QSZ EMj%5J,jXT+!_<XT+!V…j%5J,jh5Jl%N[ V@VBd+!j:n%ZM.T+!uJl%5N[ E2VB 븕X,{n%5d[ EͲVB`QSfu+!V…j%555J,jpZ EMj%ZM&[ V…VBd+!jpZ V@VBd+!Jl%5J,jc[ EMVt!Л!\VC:0ldȰ]a!i}ǰ]av9]aG?lby5l\ ۟VհYu> x7*0j}avP0;t>1WaY°V0C8< C_Ɵsw4=! ۿ=L9a|00j^{,AkY0;X!4< L% ۿ,1uT0JS :b*a_GL%T0JFJ8Latatav<˙Jf˳avP3`y30jfy,rA,T0;avP3R ?\GfJ8L3fAgSadadada~mfdad10~20~20l,sr%a9at9aat9at9ôZ<2020*0jx,!VާCf4si>0;yavO30j0>0;XާCfG5A`3)TQ}h6;jp|8jȗ|8jyuA _ \5@ɨAdByZMF &VQ}.j2jh55PkE1Q&[ ŭ@^p+!߄oJP#&ܭ@^p+!_YJ% yIí@^p+!jXT+!Z XHh`QSZ EMj%5J,jXT+!)J,jXT+Bj%5Jl%ZMy­@VBd+!jh5JE&[ ȋ>n%ZMeD׭fu+!}J,j1]eO׭fu+!VB`Q꺕XT+Bj%5J,jXT+!VBd+!j{on%\؎Al%ZM&[ h5Jl%ZMyȭ@VB`QSf;XT+!َAl%A=V…m2$[ vI rч~ 1l01l'a۟Wհi5lX ۟U0;wZ чA}f0L7ݰQ`Ga~`Ga~pF(V( ψai{{Q_c_K{: `Ga0;X( fsav<'QFX>6񿏍acca_cl, ka|0l0165aj, katXFXFXfsav4jh5YQ(w&+ EMU;ցEMU(\* EMU;X|h`QSEMU(Y* Ev bE!َA(5UQ8@ɊBdE!jh5YQ(\؎A(ZMV&+ h5YQ(51f;XTE!َA(5UQ,jc+ EMU.TE!B`QSf;X绋(5UQ,jXl V&+ V@ɊBdE!jh5YQ(ZMV&+ V@Ɋ…(55Xl V.l VĊB`QS݁Ev bE!…(5UQ,jp* EMUdE!7]Q]QWm<+ 6@v+ Wys@ϊBgE!B oѻpåB`QSX<~`Q=YQ=YQ,j1C hPB 9:hJ %ҷC rPB /<:hJ/X8%C ~P… %5J\Z ,j*XT(!j2h5J %ZM&C V J %ZM&C V@PB`QS@^w(!B %5JP EM %C EM-5XT(!PB o:XT(!PB`QS?PB t(!j:H5J%RMC Tӡ@PB t(!j:H5JPB Er r(r r(!YA%5J,jcC EM.51ȡF/T(!FPB`QSˁEr r(!j2h5JP@PBd(!j2_Lp(!j2h5J %ZM %5J,jcC EMP…PB`QJ,j*X,  %PBc(!PB /x;hJ}? %ھPBQ(BmC @PB{ N,kr'db!_;ȯɝXPۢ܉@pb!g;/ X 'yÉ@Bdb!B`QSۢEM%.Tb!m EM%J,5X,j*XTb!B Z8XTb!W-X,j*XTb!j2h5XU '& V@Bdb!XL,\B o9h5X,j]'e~׉@rb!Y6xX,j^'e׉ E2B`QS X,j*XTb!B`QS V@B o9pa;1h5XL,\Bdb!j2h5X$'& EM%B`QSf;1]Q$W/~( =k0l`.aZG+0l7`.ч?a?Oai<?gavPAUA6aZGfmԇa珱0l*8?#V+ VgĊa9°0XQ柑ֵaaaw0 VAk90;uav<'XQfsatacEaXQ<}(1V5Ɗ0XQ+ cEa_c(0XQFYQ8Latatav5U;=X ,jj3A`QS@ވu A`QSjе@A t jvH5];RMkTӵ@A t jvpjEMA`Q\;,jvX, ZN,jckEM-'_A`QSɁEM-'51ȵAd jvpjV@Ad ^Ad jvh5Y;5U;,jvX, 51ȵ 1ȵf9vXT YA5U;_Q)@B )@v !j2h5B? G)@y?S)@Bd !j2h5B,G# Qw(;tG.zh CwD!;Q7(FGy@^pD!j2h5Q,j*Xzh`QS Q,jj=4B`QS" EME(5Q,j*?(5Q,j*p" EME(ZMF&# BdD!j2h5Q(# V Q!G&# E2B`Q:C(5# EͲB`Q :XTD!YxQ,j*p" EME(5Q,j*XTD!j2h5QEI0lGئ6> ?lGP?=>=>=>=>=>fOp{DcvpG?}܆ǶCǶe%|?gB .~?gB ~?>>1 z udY薺3K:nLب*+RAH˶y}@ev| 2;h^fgB /#/;" mi(x!2^(" /Y" /°=2^("0l("2:`@Dev| 2;,D^f>/,j;{¢yaQsYk;Yn;|^X,߹\X_S_S|*wpaQsV &rZM.\ij"w|^h5;j"wpDHV {HE=wpaQs;5E=wpv9p\Xw>/,j;5E=wpvn¢;.΍^XlA?{¢f{ s5@.\h5;j"wpDBA`{ BBV=wpDBEEE=wpaQ=!wpaQs\XlA\Xs{¢;.,j .,j 1 {¢f{ BBV &rZM.\h5;j"wpDBV &r{¢>}aQ=!w;;o_XlA\Xs{¢;E=wpDBTBK \?'PB"VBK g\ o ;.b.,j 5-r_{ ytU ypP f#wpl.1j.!V.B.,oy!A yZ yU yU=wpaQs\S\Xw>/,j {BV &rZM.\h5;sZM.\h5;j"wpD¢;G;|^Xw>/,j { p.,j ybU E}¢;.U.,jkj yU rRM.\H5;j*wp!TBTS rRM.\H5;s5EA`y R¢fy R¢;Y;#_X,A\XבE}¢澎|aQ<)wpaQs_GY;j"wpD p\h5;j"wpDByrZM.\h5;j"wpaQs\Xs5cr5EA`y R¢fy R¢;Y;%/ڭB Zj.(p!O+pF(BK.?Vg%\h9;P…g%\hP…V &B &* yU.䯘(\S#(\ȟUQ?ͫχ^ȟUQ?ͫp!`WEB…<5…<5…V &* 5…E}>¢^Q+ 5 {E¢^QW.,j {E¢^QW.U.,j {E!p(\X+ 5…V &* * ZMT.(\h5QQjp!%pDE!p(\cI(\h5QQYwUQYwUQǒTQY6xUQYfxUQYxUQW.,j9^U.,jpaQs(\X+ 555c* 5…V &* |* 1 &* ZMT…V &* ZMT.!*U.(\X+ 5c* 5…E(\CT(\{QYQ{0l`n]i9}خ݀aaZN7a?HaѰafu70j9}Խ0;;i9}m0;acEaVV> XQ[VG}`Ea-hw+ S7S_0~0jA{,( Zfg+ `,`EadEaYQxVaOΊcEa?VcEa?Vg7cEa?V0^( (H{\Q?(@*B~6d(ZMV&+ V}/jh5YQ8PEB`QSX|h`QSX|h`QSE]B`Q|ݺX|h`Q|红X|CȊBdE!jh5YQ(G&+ j>4jh5YQ(G&+ EMU#́EMU(* EMU#X|h`QSEMU(F(5UQύ5cЏ* EB`QSh5YQ(ZMV&+ Vc+ V@Ɋ(ZMV&+ EĊB`Q=XTE!XQ,jXlA(5UQ8P* EMU1* EĊB`QSf{ bE!jh5YQ(ZMV&+ V@ɊBdE!jh5YQ(B`QSށEĊ1f{ bE!Uf{ bE!(5UQ,jp* EMU&+ oYQԬ(o \Q,j3Ů(55Xl(>oYQx + ~YQ8٬(+ < 1jWy@!tE!ٚ 3ծ(8+ <(5UQVW ,jXTE!jh5YQ(ZMV&+ VUQ(ZMV&+ V@ɊB`QS@vE!C(5UQ8PEMU(Ī+ EM͇55XTE!B ϮXTE!tE!jH5]Q(RMW+ T@B tE!jH5]Q(B`QSfy rEB`Q<Xtr`Q<Xtr`Q<Xt(55Xtr`Q<Xtr`Q<h5YQ(BdE!jh5YQh5YQ(ZMV&+ EMU(5c+ EMU1c+ EB`QSfy rE!(<ۭ+ @͌+ 0YP°T0 % a`(a~f~PvJAd峀avP#`,`(a, % % ;C Ӑ030l5|Jc % P0^ % P°x0^ % PJ돡at)02020P0;!av|00jHy, % R~B 젆A ) % % ۏN % ,jvXT L];,jvX瓦RMkTӵ@A t jvH5];RMkTӵ@5U;,j ,A5ckEMM5ckEMM5ckEMMA`QSǁEMM5ckEMM5ckV@ZM&kV@kV@Ad jvXT A`Q<vXT Y\;8<vX,A5U;,j Y;8 vhY;ŸvޮZj-5k@klo ?Z7~2l\ ?U*ӝ0;avP)avPA=LnAz ۏp1}` a峀)aa|BxX,` a~dq} 0~00l?u3l?E;laJ! gSfY0;av|00ׅa|]Ixua&a30.$1fd&aMfd&a~cMfd&a$ $ $GZ ,j?GZ=P" EM>@HBd$!j2h5I%ZMF:hd$!j2h5I%ZMF$$5I8P렁EME$T$!uA$5IO~5I,j* ,jǠE1EHI$ZMF&# V@H1@HBd$@E&# Vf{ b$!I,j*XlA$5I,j F$HB`QS" EHB`QSf{ b$! I$ZMF&# V@HBd$!j2h5I$ZMF&# VI,jj;I8=1XlA$5XlA$5I8P" EMET$!HB`QSX/-5;@!;Swy@KBf!RBiAXT!A,jjT?>oAx SJ5:LZM&SV@Ad@&SV@Ad j2uXT g:,jj3REMg`QSRlҰMH\-װc1lp ?7avPZj̇Ad żA_ c gk>fYT0 aa,``~PvT3 AU峀5avPk`,``,  Zׅ5ix_ šc`M d`M G7d`M 0:Ț0:ȚT3FY3FY3fk`~``20;XX3fL< A-?L5avP젖A A *A eavP?;Ț0:Ț0:Ț0:kzSaTA>Uk0+xXAkaXA\z*cKx0l?0UAVAVatatavPavP``a^avkr 0;X5XAf={ VA A?LavP+6r s p ^A _p @Uw@ ug p?7߽@~WCW{V@^ ,j$ >RYYOWyՁ@gu ]OWyՁ@&:ȿ pu ']OWyՁU,j:c55XTu @du j:h5YZMV&:h5YZMV&VՁ<@`QSsEMm5U,j:p65UJW ,jjn3@`QSՁ@tu @`QSՁ?@ tu j:H5]RMWTՁ@@ tu j:H5]8PՁE1Ձfy ru fy ru fy ru U,jjj8fy ru ]ZMV&jj8j:h5Y4VՁ@@du j:XTu @`Q<:XTu Y\8<:X,A5U,j WY8 :hY䟨:֮ZjV-5Ձ@Yd V&VՁUZMV&<~@du @`Q=:XTu X\?:p]_-oe˻._W-oe;uFK-oe˻t&]h [o.[>;.[>:.[>9.[>8.[>7֛zu\ui2;d]f[ui2;_]f׻a{Y2{Y \{Ye|@XeZe˖areɮxe˖ð-?exe},^їre`etp/ \|x_,^e/ \~_,0l/ \ks/ \ks/ \ks/ \ϸ^^[,pakY2:.{Y2;Xev|?e{,{Y2;^f^kakY2;^f׵Ze˖,^fײev,p,pr2:W.Zku2:pxՁa{u2^{u2^{uRZl9t:prf)l\Ձ^[/{u2:W.ku2;N<_f}^+!{u2;XC@X٫rW.ev܇Ձ:V.evp]r:0 .˄ˆvC;|x:A|{;*F;25A^h|eVso|f~YX`Qs|`Qs`Qs|`Qs`Qs`Q|ϠfA;Eu4E}vh vk;EEͽA>h5v\j{;Vso|j.?h5vk;5vk;hk;E͵| vhhk;E͵k;E͵k;k;E͵f{ Y1wm|jZͽA>h5v c{;k;Vso|j5c`Q=5v1ho|>XlA{;E͵µ`Qsm|vk;E>X\,jǠA>h5v{;Vso|jZͽA>h5vx`Qs`Q=Ǡf{ ,j5c`Qsmpm|>X\/\,j5v@{;<ӏvy/{;vo| Ϝush|?vy1 O>uhpm|>ȃh|f~>X\{;Vso|jZͽA>h5v v{;Vso|j5vy555vk;hk;E͵AD;Eu4Eu4E͵ I`Qsm|jAv&h|jAv&h|jAv&hpm|>X,Ah<fy B;Eu0Ev`15555c,j,j >h5vx:A>h5v ZͽA>h5v`Qsm|>X,Ah|>X,Ah<fy B;E͵fy B;E͵µ^nv- D5{;zo|R [?k ZꟵA>h5v/\oh|EA@; 7t^f~Av39oh|;{;Vso|>X\G3?X\/\,j,j5vk;E͵`Qsm|>h|>X\/\,j5v >h5v{;<\vx A>X,Ch|Yl ϽfE;EͲive5ve5v vk;E͵`Qsm|>h5v O>h5vx{;Vso|ǯ,j5c`Qsm|vǯ0,0l7oq.>LCpa8lhydž,ϕaʰSe0݉ ff54>l44>kԭcX`_kVwgfXazgX`~,q_kaaaB0zgX`p0;XwA GzgX`,; uaXaׅaaO0,kaa6kaaٍa6ka),02,02,0AA0;Xf5;A aavPSX`Ԡ0;AavPaavPaaavPaiwTX`TX1,02,02,02,0l?31:0. 0.сa~]=&сa6kсa):0l?4&NSt`~ odt`dtaڈFFff ={FVQf= +( сavPc`Gat`f5=jz,(1t@ྙ~ 8p63"0~ t@ F@^NX\yU:Xff`Q|=8XfJ5XfJ5Xff`Q|[8X,_N5X,N5yEM| V@t@d: j2L@t@d: j2L5܏5,j*p63J5܏t@`QSEMmf5,j* ,j*XT: p?XT:@J51W@t@d: j2h58=1h5Lt@d: j2XlAL5cEM1Et@`QS,j*XT: ,j*XlAL5,j &V@t@d: j2h5LZM&V@tJ5sXlAL,j J5cEMT: t@`QS,j*XT: j+X/ H<@ +h pP ,j  ,j &Vh5LZMg NZM&V@t@`QSEt@`QSfy r:t@`Q<9XT: Y,j*pd:v+t@f: t@ [;hLZjwN@Kt@U: j*h5x *НύNx @t oN7tK@p: j2h5L5,jj33tJ5XT: t@`QSEMJ5! J58PEM&V@p: j2h5LZMy~@tJYfjLXj,jZyfu: YVk,jZJ5zEMT: t@`QSEMJZM&<t1@t@d:@&V@t@ _9h5,j*XlAL5,j J˘7s^ʇiJ|Fa8l)a{8l qanqanqanx S`d`d`, 廃qavPû```0;X;f50A fw젆wAavPqavPq?80:80:80:8p0:80:@$'Eqa~]= (0̯KGa`Md`~Ta J 8FFxAAAAFq{r8= `Ga`P0;XQf0A Df=goگ O}~<`>ȫ3<OR`QSj?+Ԍ`QSEMW5˧g3EMW5˧gگڿr@-555@.򔣗yK~藏KcX,,jj2Y>kX,jj?r?jr?jr?jr?jr?p?h5F(&&&cV[EMmx,jj@P5X~@m55BXe`QS[EMmg%,jj?p?+X,jj?-@-@-@-c&&&&&&&1[E-1[EMm5c?P[EMm5XlA,jj?X`Q=q?jr?jr?jr?jr?jr?jr?jr?jr?jr?jr?jr@m55XlA?=q?X`Q=q?-X`QS[j?-& {?3iyo|Ԙ| {?l#1F qn?Яys<@'~/^Hys<@,,jjn?<Xd`QSsEMZMZMZMZMZMZM@ɹ@ɹ@ɹ@ɹ@ɹy sEML53X~`QSsjg2ysEML53X~`QSs<,jjnzn?jzn?jzn?jzn?jzn?jzn?jzn?jzn?jzn?jzn?jzn?jzn@55X,A?<yn?Y<Xn`Q<yn?fy ~`QSjn?6v1sEMm5c&&n~~~ O{n?jrn?jrn?jrn?jrn?1sEM5c,A,j 55X,A,jjn@?s[ZjoI<SўԜԜԜ}T-5-5?&E@dE@dErQ??Y{Q??Y{Q@MI'k/'k/Wi/?/4y‹VVEM-55%XԢZ,jjJ2EZ,jjQ?EZ /5XԢZ,jjQ?E@E@E@~~~~~ xQ?jrQ@-,&eՋE2E@3~`QzQ?Y&YX,^,jjQ?YYXԢZ,jjQ?EZ,jjQ?jrQ?jrQ?`{ ~~Z\\\!*/ZM.5XlA\,jjQ?XԢZ,j5eK2lwdخȰݐi{ǰ]av9iav3b ۽b=Lyv) ?U 0;ۻavPs젦A {A דs+45͹aa~=5{"a|8?l?~0_ gg'aat3 geat3 gAZf=gV8?{"?q,Df5!={"Z~fAMGf=)j =~ M~H=`H~>H#=H'=RKRLV&Zamlse:/sle6/۬y&˶a=6c^ 2l ~/>2;|a0 꿌/N'/&-_}G_z[ l;+7axbPevpE~,-_fQrbPevpȇ_*2˾'_2? c/5xMcle۩xMcl>2:A_F12:򽂱򽂱}Xev|`levp},+܇ec/>,2;XW02;˾caٗ}levp? c/ _z c/ct/k]{02Z 꿌5_/_ _zw l;z5lv&e12:Aaz _F12;/}_f}  3//B}Pevp~ǡ_f}/.܎^H5!MԠQ#Qn#H _H'56rz!ԠSE}I¢$_Xܗ$/,jK5'/,jK _Xܗ$/,jOL _Xܗ$A E}Ir /ԿjbPBA &/܎^h51/I^a3Bۇ S>l_X7/N^X7/,Xž}3b߾v7p̿̿E^X7/,jnG%/,j5 5}3¢澙aQs̿jル7/'; o{7/̿jb3B &6/̿jb3¢f{fE fE fE f~ྙaQs̿o_XlO:̿o_XlO:̿o_XlO:̿jb3B &6/̿jb3B &6/̿jb3B &6 E 15 15}3¢澙aQs7/,j5 &6/̿gٵ!Ok3BfVZMl_S̿g/_ȣ̿d!j3 f/B_\ſ/f/Y yUlf/._f/9W_Sſg4_Xg/A/_X /,j5Y &f/ſjbBY &f/g/ſjbBY &f/ſ_ſA^X /,j5Y}¢>aQsſ4aQs߃A^Xg/,jf/,j5Y ,ſjjBY f/ſjjBY f/ſjjBY fY ,EYY 1H5- 1H5- 1H5-}¢澅{aQs½Y4aQs½Y4,VY y^ZMPf/ſjbBY &f/,j5Y 1H5Y 1H1H5cf/,j5cf/,j,@,_Zb>}cB{ wZܗ/X|!O(kB{ -/ϱAj/_h5 omYe-mYe-_ /-e-_cZ|V5 E}>p_O>^Xܗ/,j5 E}¢|aQs_|aQs_/5 VZM,_9ZjbB &/X'r|}|!OhB eT5/a1-_X,ZYP|aQlj¢|aQ j¢|/_Xܗ/,j5 E}B &/>- ,_h5|}|VZM,_sRZjb¢|aQ=a¢|aQ=a¢|/_Xܗ/,jsR_5lk4=l7k.ְݫaVӼ]aSvF ?aNv2 ?rӭ0;avP浇A!|浇ANkpa0^?L]xmr~?9}0MavP`69}?jx,&AM&F?9?~1Y}O}D4U;;'?Ή?Ɖa|o9?-'D0~p?{80MF90MF9??N젦jljavPS`D0;ڇiT0;av|qT0;ivD0;?Ɖat 'ANۏ1 F9rʼnat}}(DA>{"a|8?l?D0ۏ gaa,|0:iz|0:avP6rO{"垈+Df=AR垈avPc젦AC/˨tCC2j ~ '!v^ A>!v^='Ϋ_TW0'Ϋy ;A^p^= Ϋ=MՃ&2MՃe 4WzLS]`2MՃeʫ4Wi*,T^=XzWi*,T^2MՃeʫ=MՃ&Aq^=i24Wz̫=MՃ}yc}sw7nl>Sۗ6ygou8 > < ~q1ǸA~\l'~\lxm>&8~k&c/91xm> =; k1oP1o\lyjyjS{v[ 1]ο߅1] Sc?o?L1n1E¯?ƽ?ƽ?ƽ?ƽ?E'ԏُُ?~7~7~7X~? Ǐyjyw8cޠZc`~7V1ǼARǼARǼ;1oP1oPcǏyǏyc c c cQ1n1ncލ1n1nc?ލ1^1Ώ+RΏcw~ccΏ~vc1~;? ;L1n1n1oP1oP1oK;K;? {)v珕{)vǼr/7F7X؝g>76727z|,zz|pϷ܀= \r_T=>#~*2M52ejd4'2z|LS`fr=>XFEe4x]/{8s"X{k4cA>v=G֎_T0G֎ڱ Y;#kڃcuK9 > = frԭ1բ1쬵+kc1쬵+kcY1^ :EZ[`ll}L1^y*yc}TulןJcޠjcޠjTz,ɒ1@T t KcVXrxZ9Me}g}gʤc<+cޒ*cɌ_c}[b}[b}[b}[b}l?1]Vޏ> > ~L1n1n1og},YyU&~VǼAIǼ򻟕1oPecy*y*yw?+cޠʤcޠ*TyUyUy5VǸAVǸAVǸAVhc +c +}+c +c +TydXb}*c~ʽ+c|Xy~*c1>lg+c1n1n1n1oP1oPi1oK~K> {)Vޏ{)VǼr/77XXyQ>777{+={p?\yrA EUރ`w=nݭ ~ڃBҿƐv܏!`v܏!L?s܍c}l1???ǼA\Ǽr1> *: O11oP!1oP!cq_;O?cy1| S(t?Bc5~`)Z:-)?-)?\5Ǹ%Ǹ%Ǹ%Ǹ%#)cfc c =c`g`~Tt, yy垁1oPc yyy垁1oP1oPc y y c c c c1n1nc徎1n1nc q q S~l?b7K1?1 ()?~c1~0?gc c cޠcޠc`bXb~,R +R y^1oP1oK1? |L1oPy1oPq1n-tb q=}t=w~d<7 Ng/*䧊3q`2{L|8,T4XY>gqi*[zQɼE̓ڠϼyЋg< z̛x/*o7g^<7z̛x̓AOGyE̓&AOyɼydy`2vyLSyʛ47i*o,l ̛47i̓eʛ47?ȼyd`A q<ȃ!n=Mσej4 i}~Q`2Mσej4>i},T/XyLS },Tzl=MσzlyfEσi},Ti}4>zlykAOEσ&AOy>zl4>iAl4>iAl4>yLS`25ikGtwk~L}c;?jؾ}ct:{lؾswl1ݫySUMUmu: XF2r>)2r>'52[*cq1^c,@'TF5277X1ǼAUNǼAǼAǼAǼAUNǼAV>cj15>1^x>|>돵1^S!tTm}Tm}l?HkqqqqgƸA֏> > ~L1n1n1o܇> ǼABǼr7B7XCX[U=7B7B7XCX[UU[?77> > > >Y[Y[?VY[Y[Y[?77o܃>1הּgE>J~*c1~>Yg+c +c +cޠ*cޠc`cXc},`+`y1oP1o܃> *|L1oP1oP1nЕ-tb q=|}t=wҕ~=7ӕ NW/䧊+{p?{Q ?Z\ii,,/4G+2MWesƕ`YLz},>Ӄ۠bzc1A~"<~bzc1=4TLz?*_T1=i4YLz,=MӃ&2;`fbzLS`fbzLS`fbzLS*4ULi,lYLi,lYLi,T1 AObzd1=i4YLz,=MӃ&AObzd12Me*4ULX2MӃe*4ULiEӃe*4ULbzLS`AObzgf]L?S 4YLz,yz ϗ~Q q?G/Axr=&_T 0SOyQ :(z'EA>q=g_T=X(zEi,T=X(zd=i24Ez=MFу&Ez=MFу&AOQ(zLSQ O8,T 0X`LSQ`U i*,T= 4U i,T=X(zEi*,T=X(/:4Er9MGуANQ (zt=i:4Er9MG/*,T=XY9~| r=XY9,T=XY9,TC5XY9,TC2M5Tej4 Gуej4 Gу&AOQ􋊢=MFу&AOQ (:4Ez=MFу&2MEуe4 Gуe4 G/A4 Gуe4 Gуe_T=Xo&A=A O ~QwbzG2]LHAwbzлe1=.yZDyr9 9Q ԃ sAτ9 N:5ԃe@]$mg%v϶l:>.϶^l0>ۮ϶O϶blq?gށn?{khƟrvT3|hk ϶SCk8l;1?~|[Ck3opo~ k ϼq7?gZy{k3n_O?~gۉz>:?~g~=~gol;gi?5kvzG3so]ϼϽ?]C3mϸO?>vDg'c{3nmϸAg qh ?}y{W3o7wE?}y{Wtlo ]ϼ+7X{vܻy{}lo mϼ~ mϸA?vDg qh{E?vD}lo 7vlpMϼrgqxۻm?&}l;=w?NaMv3~.7g y{3op4 3ty rg`?C3op2 3t?$}{̟y{3nP| }unG}T!IunGT!Nu߃{!?U},hQaސ}XY>_}XY>b}X7dz(?8C(?zp/?C('JQC%IK=͟iCODICO%e J4oAi%e J42[%e{I=i%e{Iaf-2ͽLs/2ͽLs/DICO%&J=Mz(?4QRiDICO%^RX7i%e{I!Ls/?,K42ͽURXi%^RXi%&J=MDJy&V%nWIa|Xi%&J=Mz(?4QRiDI=z(?4QRiDI2ͽgBTRX'i e{Ia^R e{Ia^R*?,42ͽLs/?i%e{Ia^RCNS%J9Mr*?4URiTI!CNS%J9Mԃ{Ia^RXY,_TRXYLs/?,,_TRXUiA*?,۪Lso>,۪4 imՇekJ=Mz(CO%&J=MȟJ=Mz(?4QRiLs/?,K4 i%ekJ5H%ekJ425H%e{I=i%Az(o{z(?[ԃ{IwCrTI!9лEIwC%<C==tRf7zpϬ?9 ezCȬ?Yȃʬ?5e2!d ?>X ^ |n{p>s{5^ |nC)Q!GJ`j?,42ͽ42ͽLso?,42=>Lso?,yD e{a` e{a`iD!O;DCO &yI &`iLt)`XYҔj?A<5i>4KR eR2ͽLso?,42ͽLso?,42ͽDCU=ؾD{po?4`iD!ϽDa`XپLso?,l_`X7؃{a`X7iAh?,l_`X/d}lձo:EQ%//cG~~t#<_6cU3_4c~-71oP1oP1oPc #ca?V#FSc#=V#FcaStl?@8w~"ca#u7777777o٠b+l#?o׸_q Std~d~d~l?2 2\? 2\L1cS~U~UcޮcޮcѬ_c~e~e~e~e~l?s52joQ1nQ1nQ1nQ1nQ1oG2j? HFǼAESǼrɨ7h7X#M=7h7h7X#M}$cޠc`d~d~d~d~l?r: 2j? 2jG2j? 2j? 2jLQ1nQ1nQcڏyڏy *x?.^ʽc c cc ޏGJǸˏq/ ޏ?է˽ʽc`wcXwc~,n ޏy Ryލ1oP-c ޏy ޏy ޏqn.i|Vi=DoAYn}մM!Ij}LS5`fq>XY>EܴzoAB_T%>W~=W}J|/+,{/+A_?AOG~4Yz4Ve +2MUe +2MUe +2MU/,T%>XJ|LS`*Aq%>XJ|LS`*Yz=MV⃞&+AOJ|d%>i4Yz=MV⃞&+Uiw,T%>XJ|']i,T%>XJ|LS @,T%>XJEUe4Uz=MVyՕyRܕ _l&CAq>_T1s9y*! :|gCAp>'_T>X|i,T>X|d>i24z =M&Cz =M&CAO!|LS! Q8,T1XbLS!`BU@ i*,T>4U@ i,T>X|i*,T>X/:4r9MCAN! |t>i:4r9M/*,T>XY9| r>XY9,T>XY9,T6XY9,TB2MaeJ4 eJ4 &CAO! =M&CAO! 94z =M&C2Me 4 e 4 /A4 e 4 e _T>X|db{d=sn_T=i?ރ<{d=i4z\[CA.u>jy!|c>ȳy@! Z8Czd%>ޕ#ޕ ۻs{W|nJ|GJ\H+A)q%>4Ui*,T%*2Me4Ui,T%>XXJ|LS4Ui4YzyɕJ|d%>i4Y+AOyɕJ|L1]i>+As%>XY"4K'ӕ`fIe,T%>XJ|LS`*2MUe4Ui,T%>i4Y+ V⃞&+AO=MV⃞&+AO3Uz4UiA4UiA4UJ|LS`*25`fJ|LTk2!? c1cOqL1 ]/:~e~]Lwc55_Rc5֏1y;u_kďLr0?Lr0?Lc1^+ď|ǔL77XfǼA=ǼAeǼAeǼAeǼA=ǼAeǼAeǼAeǼAeǼA=ǸAfǸAfǸAf)? 2??7L7L7L~ed&~d&ǔ?LLL1eǼ]eǼ]e[LLLLL~\ke&2c 3c 3ǔ}$3c`d&~Tjt,ďyJy>1oPcďyJyJy>1oP1oG2? *? HfǸAfǸAfǸAfӪc 3c 3}$3c 3c 3ǔ?L7L7L7W_eJϏYǼr?77~Nc!1n1c c1n1n1n1oP1oP1o2=2=? A珕AǼr?7n7X>777|g_`V9=|pA_N9=Q ?8#|N4ieSe>VBb 3A_H}!1 |p? |ҏ2zҏ2AO}34z4g<32 |LS`fg>X |L}3,T22Mee4i*yge4i* |d>i24z=Mf&3AO |d>i2Q`2Meeyz`22Mee432Mee_T>X |LS |d>ȣy k94zy :QI O{;d>#7yy O=8>g3Aq>432Me4z=Mf&3AO |d2AO |d>i24i*9 ge4? i*,T2Mee`2Mej OӃ|?Nt;= OӃ|?Nt;=ȏӃ wX='$a ;! Ճ 9 OǼA9> 9 9 OǼA9ǼrZ7Z7X?YkYkYkYkǎqq+qqS}d}djcޠjcޠjcޠjc_e*qg,qg,qg,>pXd^S}l?:Ɲ>XdXdۏ[q,S}T]y,|,+|,y垏Ec垏E1o> *< {>ǼAُ> > > :qzpA~8FoA)_Tt=G׃?G/*2)zL|P8tzp?tAӃNr`:=ˁ~=ˁ/Ӄq/Ӄ~'5AOtzL}R3,lL4NiOjӃeJ4'52M/*,T:=XtzLS`Aq:=XtzLS`NzL=MӃ&AOtzd:=i24NzL=MӃ&Ni*,T:=XtzGNi*,T:=XtzLS #:,T:=XtEӃeJ4Nzyu:Eуw<2 n Avr<[e _Tu;Gn( )Vv~pfzl=sLw+flL1뱿 ǼkǼAǼAǼAǼAǼAǼAǼAǼAǼAǼAǸAǸAǸA|= |=)Y>777~ddzdʃc?82濋TEU1_bS{׃cTOXcX*Vx== = = = }LU1nU1nU1oܛ= {SVǼAǼro*7B7XMYUH<*7B7B7XMYUH˽)cޠc`7e{d{d{d{l?9: = }ܛ= = }LU1nU1nUcbybybybyby =}=}=}=g>~*tdM{l?|L5@ɚ~PydM{dM{l?< }L51oP1o+}+= {Eִ{EִǼrȚ7N7XYU>77erɻeBA.fbv^T1;/*f!bvp @uL|}`Oz^=jv{ٽz^=jvQGuУQ:z?^=yud:i{,l^4-2Mue`2yuLS^4սi{,T:Xu_ܽi{,T:XAv&AO렧ud:i{4ٽz^=Mv&AO렧Eueʻ4սi{aAwe^4սi{,T:c^4սi{}Q`2Mu&Ar:{}Q U{ؽz}^yt {}Q O{} Ϻ{Aw$s:ų{!.ws|zL>=M&&APs9jN>=M&&AO 9|?= }Q=GtG܃{A>v:Qك{Ar:A4Ճi}Q=`z2< b:SL'|s8|A>`w9N>砯&|s؝|sLSٶ`2M%eJ>4|i*,T9XsOߝ|i*,T2M%eJ>=M&&A q9i24|zL>=M&<RPs8XLe%,qQ%r`8X.erP8X.e<',q\,qd8i2K9)g/[f&AOY=Mf&AOY O8K4%i*K,l4%i[fe_T8X,qLSY`fe8Xo%i*K,lM%__?|ӏ??pUSul&Krl" rlUqLYj/ƱZKW1eǼAeǼAeǼA[Yαkf`c5ʮ%Oqg)6M㮙%ŌO)K<]3K|)g|L91_G:Rxl? k:bxsc3uĜ7Ȝ7Ȝ7Ȝ7Ȝ1ǸAǸAǼrO˜7Xi3=-scޠuc`exTrcޠucޠuc`exTn,yy垖91n91n91n9T7Ȝ7ȜrO˜7Ȝ7Ȝ1ǸAǸA挏)g< *g< *g< *g< *g< *g< CƜɜɜ~Vrc191c'sc91919ɜ1ǼA5MǼrȜrȜ7X3>V3˽"scޠ"c`WdxTrcޠrcޠrƿqp?X\A;g;g,/3w8/*g䇀s`}p?,4G~z`L =0ocZ8(-܏=OcZ8>&2ǴpL}1-,lExT7XOxT7XOx<oGzQAxa7i4 z=Mx&{AO=ޠod7i4㽨oLS`z2Mx<%oLS=`z2MxeoLS=`z i,T7iA"x^{ 4{A=ޠog t{ { {As73Ky= uSx^<{A^r7s= {Q=`zA>q7XZLS=`zAO=ޠod7i4 z^T7i4 z=Mx&{2Mx|:oLSu`j2Mxe^T]-Xot6ȷ̅ ׹po Ae.^T6ȷ̅`y= = A~pCdn/p,,`.4 i{0ne ]T6X ^LSA`fs6X ^L܃p4Y z,^T6ip4Y z,y۠md6ip4Y ip,T6XY\ ip,,`.^,`.4= 2Mne*4U X 2Mne*^L O8n z[ot6ȇ.څ Yp#kn|hmЋg6G.څۋ*څ` 2Mne*4U i,T6Xm?*4U ip{Q` 2Mn& AO Zp4Y z,=Mn/LX~ ؃˯Aτ`ʯ2_e&*y"`ʯ2_c59.y"׋*=M_<:kd5i4Y~ z,y>נkLS`fc5XkL,Tʯ2M_e*4}˯2vkLS`fk׃4Jda?^϶l{q|gK|0umgv~]mG,gۥv~]mB>.ӱ= 7ga?,g үmXv~l;uX.үm>v~lE3g>ێ}";؞~Egۓϼ=7_?~ ϼ=7_?Lg~ӯy{3opO~ ϸA_?~DulO~ "3nױ=7g үqH~=D3nױ=7_?)?l;=;'c?Ͼ'c?Ͼ'cdgdgdgۉkH~x!?$c?D23nϸA$c?ۓqH~ "7X,H~ ϼr/dg,H~ =7g> g`F23opπ} {a$c?dg`F23nϸA$c?l;I7dg c^ϸA$c?D23opO~ ϼr/d؞ܓy{23opO~ "5;Fj؞l;#wgձ=5v<3϶cqH~]#5vb3ٱ=57&?=&RcϼrXDj3oc"57f?=&Ry{jvlO~ ϼ=5{M؇ii|}WN؇i|}X^=܃y+p;mp;{܃y)+p;m>x f#o6f#p;o6fևiև~x_xCևA뇞o ![{!O9(B{ay 9ԇsyA9ԇsŻPwϡ"/r}"DCO9ԇ&r=MPzȡ>4C}i"D5Piտe{aC}aʡ>,s42=Lsϡ>91Pi9ԇe{5Pi9ԇe{CQׇjpϡ> Pz}ȡ>C}ȳʡ>CP{!̪xT<}b*>QO򰀊VӇ|HCӇ4Q<}ix܋=MOz(>4Q<}ixDa^<}ȇ*>,k[42ͽxLs/2ͽxzQi҇|4CJ>k4CJ4CJ>e4C>oT!(MҤy+MҤ4͒Ҥ4͒Ҥ4XYpO>,ce4X2r42=VL,)MD4ipO>4&}i"MD!+MD4COi҇&Ҥ442=ML,)MLsO>,,7KJ͒Ҥ442=MӤ442=MLj&}XIii&}X_&ңFGnC҇,[Yԇe{+aJ ԇe{+aJ}i"C}ȇʡ>#kPrZ9ԇ|hC9ԇ|dC>V5PrZ9ԇe{aC}XPiYe{aC}ʡ>,ܳX4KK9ԇe{aC}i"D!Z(D5gBC&ayLЇ3yREЇ }+Ї }ȓ*ʄ>IeB{&!O(M }i"D&LCOЇ|LCOЇe{&afB&a }X }XgB{&a }XgBi!dBi!dBiЇe~Ї40fBglyWglq)84c+30c~]Z/رz/ױjS&tlT+ulP_!cezL1oP1oP1nбDɱ]fB:c.3v Oq̄h)4v njB):2:۟yʄyʄ@ǼAeBǼAeBǼAeBǼAƼAeBǼAeBǼAeBǼAeBǼAƸAfBǸAfBǸAfB): 2:? =47L7L7L}d&td&Hcޠ2cޠ2cٕ ϮL1eB+:]cʄS&tן2c_c&t3c L7L7L7L7L7L1eBǸAfBǸAfBǼr/L7X p˽03cޠNc`f&tT2cޠNcޠNc`f&tTi,̄yʄy^1n1n1nб7L7Lr/L7L7Lo1n1oP1oP1o 3zL1oP1oP1oP1oP1ooo1w2:w3'rLUΠG*ggd\ )A>#u3ȧr^T3X*g\ i*4,T3X*gd3i4Y zr=MV9&U zr=MV9&AOUΠ*gLSU ,Th*XTLSU` rg"8|f/ A O3||fOA t>3 O3|"|E3e`Y i,T3+XYj i,,w5g=M3& zg=M3&A>5v>3i24 zg=M3eg4 i3eg4]]2M3eg^T>3X|fLS 8,T>3X|E3eg4AW/z}s2E>l7.}} 2yQϠ/A> d31} :GeC4W Ѡ_!6D/!+Ćhh/ArC4#%n$ Ѡ/^6Dej4 i!p7Dej4hLS `2{ ;crgB|LLhɝ )3A ;crgB/*crgB|LLhɝ i*,T&4X,VLS`2A>`w&4X,VLd i*,T&4i24 f ›AFqx3(o}0f Û oy͋ oy 84 z o=M7&ÛA>wx3i2,Tx3X0 i*,lw o4޼fLS`›2vfLa0,Tx3X0 oSpliWfla(^8_c:_c:9~W~B›cޠ›cޠ›ƀ汲A4S5c zq h0)4WC)9 29۟y hy h@ǼA4ǼA4ǼA4ǼAEƼA4ǼA4ǼA4ǼA4ǼAEƸA4ǸA4ǸA4)9 29?q1oPcky y y>q1oP1o'39 *9 d5ǸA5ǸA5ǸA5rc c }2c c <77X<77777~<73ec<ÛcYc oƸy7cc<Ûc<Ûc 1nc oq oyc1o2y29 O7ǼA7Ǽr717ǼA7ǼA7 z3 g| A̋gy-: t2)ˠ_x,~ᙲ '~ᙲ g2D I~H6epw_p?${QY`O򡺳 GeJ˨dZY` zJ=jf% "f#:=]ɠ i*Y\8(*X?ʼn")N<~cGEeN<ylj"owȣ̉"2'<ʜx,ڏ*X\|6qⱸLSx,.T)LS2峉e*%i.Mx,zL<=M&?Ti2X4x,zL<y}҉Ǣcdi2X4x,.TⱸLS2峉eJ<i.MxplcqeJ<~cqeJ<y#eJ<i*eJ<i*X4x,zL<ygAO=PJeVC忊vĬdq*+EJ}3+YaϬdw8+Y]#}l?d,?ϔeѯ SE*LY~eѯ SE*LY~eoLY^`oLY~eWߝ,.TʲLS)2M,4,SeJYi*eReJYi*eY\-EO"VYw*ZeG],zUyݵ"VjE^}wȫUyݵ"o qLS2M4U,.TUi%U\\,.TLSʢZ?y׈E5ccX,k,cwX,ǢcdK?=M4,.fX\cq72M?TLS2M4?in,.TLscq =9ދͿͿ/j66pGkl;rL|#56OHl>Ly?}ɝxlwclpL|V;c16=WCxwi Ʀcj<ƼA5c5И7cT1 ծyj<ƼA5cޠ1oPǘ7vMd1 x1oPM7|.gS2 lJƼA5%cM7ȦddS2 )qlJƸA6%-ٔqlJƸA6%)ٔqlJSS2 )lJ[>)ՔyjJSS2 )Քw7ndFb5cF{b5wndf726Nٍdñ_Ep,džcq?_קcq?8\+η绚?Tñ8J"/oJ2M4UbP%2M4Ub<cdiXM.1=M&KEO%ƢcdiX4Yb,z,1~cqJe*1iXA.1iX\cqJe*1yKe*1iJe*1iX4Yb,3%"op聱XXb,Hdl -ycE~b][,cE~bcl1[,cE^rlȯ}[P2Mױ2M4[,.Tli2X4[,z-=M&cEO[,z-=M&cEOŢbqbE^$slLSA2M4[,.TlSP="ZiSP="$~X4S,z)~bdOiX4S,{EO=ŢɞbdOiX\bqze4S,.\>~ze)iX\bqze)y{e)ize)iX4S,z)y{EO=Ţbѿx3U}bl[PWl~O`lț4[,& ?>IX}5.1~+,1XK*1XK*1ncqn*1y%2M4Ub,.TLS%"X\cqJX\cqJe:XpWP*b3a+"y%UUE,JE^ w+"yEޤ*b7iX\*bqE^CwLS2ͥ*bqY ,Ua-W_W" OyۃkEaXkEtZ`7ݸXe&keinX ,.T-Ls[Z`qjX\Z`qje24eiX\-c-w _Al _@lS-6c?>jdl>$c☂hcl>c_|$S0 *cM1nѿc˖ͷŸ%Fb{1Eb]C1nѿ/S/-1Wb7y u1oPѿ7_T/ *.yżAEbޠ1oPq7_d/ 2wLѿ7_lN;Hd/ 2Wc 1nѿcļAEbޠy *.9weļAEbޠ1oP|BT𘂇1oPØ71cޠ1oP7ad0 2xXD--*>x9 +7[ce+7[c=|kJ?TZ8V\f^qz*W\f^qz+z&L=}i|uq58[4Piɴ^dZi2W4+zL}^qzeJi*W5Ni*W\^qzeJyzeJi*zeJi*W4Y+zC}z^w5WX+zCyy+kiYƁ"oUp ȋtyā"t 1WM y΁"/8Wׄ}@^qyE~W@^q 5e i*W4+z =M&yEO@އ =M&yEO@^d LS"v LS2Mj4+.T P"ZiP"$<+sҁ"$<+@r P2yeʺ|@^q.eʺi.?+.T֥Ls@^d i2.EO@^d <=M&yEO@^qye i.?+.T Ls@އ i*W\@^qye i*Wq4+.T C4+.T i2W4+yEO_**Tz^ѯ y**T\^cyUQ=C˫z^oz^иW{WEaW4YK`%0WgE/Q(߇F^|*W\(_q.&(߇yQ2ME4+.TLSQ";W\(_q|*W\(_q|eOF&xE^vKn}^+xE^vȻ)+n 7 "p+ny%ܙ̿1V[+2ۇʿqȋο}0V[w8V]#οy[qoe8oeʿino̿i*oeʿi*V\3V\3V\[qoimNȿ]~צpmd׵_m_}οd׵阼6צ#t@^T۵h6צcWt$5opο}۵6k oe/H]n~ƽ vmz9k}/׸$ޮMoצN9v{At˵zӵ؜x Ήkӥkx Ήkx 9kx Ήkx ΉkS "vD7[lN]x6] "s ׸A$ޮqH]x6]߼ "vD-6ys78'bsN78'ޮystA36'ޮysN78ys78'ޮMWدys-6'ޮys78'bs78'ޮys75nk o׸A$ޮqH]x "vD7[lN]x "v\Ix .$H]5op9'A787y9 o׼9o׼9q׼k߸ .$H]5op9'A75nk oצhqH]x-$H]x "o׸A$ޮqH~ .$H]s$b9 o׼kx .$H][lN]5nktWlθ]n"vm5nk}^׸]db?.2nצ[qȸ]vq6u\>G"[>G"v\>G"v3n׼s$2n׼93n׼9f; stN‡=k{֊stN‡=k{Nw.]`y\%ˋ=./\"{s,C(=C^IWtŇ y9]%\"{iDD졧CO%&Jdşms2͹Dp\"+%C%4es2͹Dp\"{țT"{Ls.=\9Ȋs2͹Dp\"{i"6w)6{ 63Al,dC~ͩdC|dC^V2!/(_+_6)3A2!/+ {/ߕ {ȯ +ɰ4dC~des29p {i"D2졧dCOɰ&a=M$Ês2졧dCOɰ&a=M$zH=\9' R%.Ӝi͎4des2C%Ês2!Z%U2 J=cJ=䁤dXqdCHJ=WJdCHJ=Jde%.Ӝ[9pܲxLsnY<\|P22͹epCɰ&a=M$Ês⡧dCOɰ&ayMAɰ&a=M$zH=4 {LsN=\9'.\>~(p {LsdXqN=\9'.Ӝaiɰ4des2!/*p {LsNdes229D2졧dC^U2졧dCOɰ&zb'/zbŹpyɶ`=\^'V{bl=\^'&==R=nCO?g_R==l0{8xz}h=0{8z}h=0+ ^f>4̊sׇC C ⯆es2͹ap0{Lsn=5.Ӝfi 0{Lsn=\97.fap|_8C;{zrO9rOy ]Cx({zȫ^==u{.ӜsOiι4Sq==\9.ӜsOyK4es29p{zLs==\9.ӜsO?g<,:elGcl'S'6cc*.6u\lkpG\l>b 6lTty*:L1,3[^wb=1,3c??6^2Sl~S)םe|[AlZ?6_?2SltT) Ufyz : Ufy*3żAbޠq,3ŸAb LTfq,3 J1necz : Yfq,3H1ne721=Ufy*3S1oPe72SlntLe7cޠy*3żAb̘721bޠL1oPec*3żAbޠL1ne72Sd) Yfq,3ŸAb L1nec*3ŸAb L1op9'a) .$,3żA=Qsbޠy9 L1oPOS) 7'crN2ST) .$,3żAbrN2Sd) YfͷĸAb Lǖsb L1nec*3ŸAb L~ .$,3ż圄ec9 L1op9'a) sbޠLTfy*3ż~_c}KvRlM%}>Y_wĸO֗x/Sb'K1|_Jd}) .Y_ygE֗bޠK1opRT}K1nН(w+Dq[Cus8߭T#vTŹQe׎ fvT\^2ՎKA֎~X;*%cȫ̮̊sR8`Vf׎>TivT4Y;*z=M֎&kGlTLS2MՎ>T[ \;*.TLS2MՎ4U;*׎4U;*.TCՎ4U;*.TiYTg7~,:P_b 6l4q2Ztlym-ͷ'2ZN|gB-E46_8hQ̯w$+ 1EbEԘ7cޠE1oPѢ7hQ1oPѢ7hQT( *Zpd( 2Z-:hQd(6_q1nѢ7hQd(6_qŸAF7hQTyżAEb%cżA= pT( *ZƼAE)Z-yS( *Z-qŸAFb E1nѢ7hQd( 2Z-qS( 2Z-y9 E1op9'a( A70Z z\I-yzE1oPǼA=sFbޠE1op9'a( *ZsFb E1nѢ7hQl-q[I-qŸAF)Z-qS( ,T( *Ztl9'a( .$żAEbrNhQT蘢E1oPѢ7hѱy ŸAbM#1na70Ql_$ 2LtLa70Qd( 2Lq ŸAbޠD1oPa70Q\>2L&:0ѯ9S#E^tZ8߰:9QuNu K)Α"/v:SoX*z:/)7,yi)zL=Mu&:EOiɴ·JilTZLSi)b:eJi*S\Nq:ENq:eJ|Nq:eJ=Mln%"8dS>l%"gtɦ3llMW\);ol%"ϵ]P%2Ml~dS\4.=Ml&K6)ꢧɒMdɦidS.=Ml&K6EO%ɒMqJ6e*i.?\).TɦLs͇*idS\MqJ6e*idS*l4U).TCl4U).TɦidS4Y)•K6EO%ɒMdɦi2sSMqy=zqy=hS).27Tד"/;sS E0sSqgnsT=V9E^[v=+y9E6\)zN\)& sI2Ms4UP2Ms4U)9eiS\zNq9eiS*3!_qL +6[QGVl>bqL|LC*6Q>6Oȉqkǖ׏[^?Fpb1=pojc'6!+ f|1!6pL|56#ļAEpbޠ"81oP7Ncޠ"81oP7NT' 7Nd' 2sL7Nl9Nc #81n7Nlxq1oP71=pyUyz4 Ә7NT'6_>yS' *9NT' *qĸAFpb #81n7Nd' 29Nd' 2sFpbrNNC1op9'a' 701Epbޠzyz9 .$ļAEpbrNNT' .$ĸAFpb #81n|> 290qS' 29NY6lp9'arNN\Iyļ圄71Epbޠ"81oPcc6ĘM[b&69M[b&-1fwtĸ%lbc6fy:lb171l~1e(rc6)Y"I:fSo.:ȘMq2ya1|sQ&)7bHq |Md̦i2fS4)z=Ml>ȚLg6Ml4)¹c6ei*fS\Mqb6EMqb6e|Mqb6e=QU) ncȋz.uK7E~M.y٥"/tS.K7EPtJ7e*yVMq5/.T馸LSMditS4Y)z,=Mn>TitS4Y)z,=Mn&K7e*ՏK7ezԼLSitS4P1" l|p̦c1"lJ1"r̦c1"lȚLsMqP12M=+]\g4OiY2c6EO1ɘ͇zVi2fS4)zc6EO1ɘMd̦i2fS\Mqb6e' l4).>a0fb6ei*fS\Mqb6ey12Ml4P12Ml4)z=MlpMd̦i2fS4)zL_lnˋG͋ˋ}zc馸*|MqyU)./J7E,yܥ\A(}"/tS+EOU)z,=Mn&38) Nfp0S[#Ee}128Ee}128Ee}1S\ N8S\ Nq28ei*Si*S\ ·i*S\ Nqل2Mep&c6E>͇5p̦goJc6*fSM7p̦c6E^șț4)& lm8fS\͇i*fS\M).ȚLS12Ml4).ȚLS1׊56=^Ml>b|S$6<؉͇Nl>rb{1jba||S؝9 b|؝o`gl ׁݙ?L[ b:1ugb|WAl? ;՝yļA=՝yļAugbޠ31oPxƸAvgb 31nݙcĸAvgb71=ٝqĸAvgbu7Ldw昞y?g31oPݙ7LltLݙ7gaR4z)zl|&[/EOK_R4z)zl=M^&[/ejiR\|p른LS2[/R\KqZ/ejiR\KWz).T른LSz).T른LSKd Pn=M^&[/EOˇjbR\^l=O]\^[/VC^ˋKqyz)fknG~b nG&[/R4z)zl=\'gA`w2S! Fч C0E |Lч C0E |Lч C0E i*SilTC`4).T{).TLS!).TLS!2 C0e in\X)2%J̇Yp% Ϯy ݕ"op%Ȫ+1Ev\)+1EJLwSd•2MUb>T%LS2MUbJLq*1eiS\JLq*1yq`b(|C$6! Gl>T{^)+R7n=+ERKWrJߙR7VnyʇjiR +ez2LS2MW&+EO핢Jd{iR4^P핢Jd{iR4^)zliR ni2M=]\/:*E7W)pp^CU<W)pp^/uW)kU<W)pp^y"q^CU4ΫiW).ez@LsJq-.\>D8R4W)z̫|t&*EOyɼJ#W)z̫=MU&*EOy2MU4W).\>D8R\Jq*eʫi*R\Jq*eʫi*RU4W).T^CU4W).T^i2R4W)T=MU>EOyɼJd^ nWsqYю=+e j%R^)z l~b{n닞&+R4^)zl=MWs_ϤA&]1RI}Lcҥ8w3>tPIl&]>tPIl&]&.eJy .eJi*R\Kq.EނKq.eJ|Kq.eJint).TҥLs<6i2.Evt)z.E^wҥȻFt)K']hKw8R]#NYpҥ^.*R\Kq.TҥLSI2M%]4t).TҥLSI4?痊˵ǹ64צ9rm:HMȵ6!9qm:>Mǵ6צ؜t6צt\\ޱMGg?s?sH\kb_k ^f5ΐ6]ƾ2;CBtgH5"!smttSElN\[_6]6O2׼9!s2׼qk k k5n k 2׸A$db$dqH\͏k^ "!sDBt75n 5opN\Ll~\78'dysBt/6'dy׼qk kek͏k^5opN5opN\5n k 2׸A$dqH\ "!sDB75n ؜ "!sDB7 !s\I ΏK_s$dy׼ k k krN5opN\s$dysB7 !sDB75n k]׸A$dqHĖs$dqH\ k 2׸A$dbsB78'dy9 2 krN5opN\s$dysB&6'dysB78'dbsB7?oPFy8=0uy8HDfC^Qft#C^Qft#ép!(3R3#CO&2#93Df䡧HqΌ<\9gF.Ӝ3#i٦9gF.Ӝ3#iΙyLsΌ<\9gFJ2#iΙ4HqΌ<\9gF.Ӝ3#=Ϳ僼tͿst?F(C*пDQ(C~(C~(Cs(WEyk!<䷀apXmq.<\9?VpXe' S.Ӝ}LsbCOŔ&)ڇ&)=MSz(<*<4QLyiD1塧bes12͹p CŔ4bes12͹p\LyLs.<\9S.Ӝ)iŔDbes12͹R)iŔ4z(<4QLy8=D18?MD1塧bCOŔ#ŹK@1ᲄ v(<\0?!\)%ŔbC/Ŕ^)M Ŕ=MSz(bCOŔ&)=MSN?4}Ck[+AVCZy_IpIin~Dߥ8]]Ҽ.yO.y.p.x.yS}]G}]G}LW ]|S}4esߥ8].Ӝ.i}4es2͹PÄKl/`lG1%\bq|.G@l>bc;1%\bccc1= L淓cCoɄKl~'o$1y|='.1>w͗c}2rLy#w7+Fcʻ~|(6qLy7KT% Qʘ7KT% *wyz2 2wq̻[0wq̻ӣ1ny7ȼKd%6_q̻ĸA]Qʘ7KT\yʻļA]b%cʻļA=J(eT% *ǼA]Qʘ7KT.1oPy7Kd% 2wq̻ĸA]b .1ny7ȼKd.1ny7ȼK\Iwy9 .1oPy70wy9 .1oPycʻļA]bޠ.1op9'a% *s]bޠ.1op9'a% 2wq̻b .1nyc9 .1ny7ȼ1]b .1nycʻļA]brN¼˱圄y70wy9 .1oPycʻļA]bޠ.ǔwyʻ.E^qޥ8_ Py*(.ލ̻PA}w)nyy|Fqnh{7ˇʻFdޥi2.EOyɼˇʻi*R\Kqm|]y2M]4w).TޥLSy";R\Kq.*R\KqR+EOE^>蟖aZa>TE^>kyQ1)1g=c4E!9FSChrMY:FS7Fy͇{c4ezTLs{O`Ls{O`LSdi2FS4)z=Mh&c4*FS4)z=Mh&c4EO12Mha8FS\'?TLSO,i2c4ezbLsMdi2FX.z=Mh&c4E^+pi2FS4)z=Mh4).TLsMqb4ei*FS\Mqb4ei*FS\M).TLS1).TLSMd8?]4PMdi2FSP1MqY..K>1FS\G?T,AP%ꢗMK`8?_c4MdCh&c4EO1Mq~i2FS4 ~=?:lSlzqSMѯ'7ElZCѯ'7qS[ 7Eհq7EOMq7EތMq7ejiqS\M7qS\Mq7qS\Mq7e46n4ո).>qS\}@c㦸LS,yÄK5E0R͇.yqԥ" bQc*C 6Tۏ?_s+^s'~?*sL_?qb|XƉo_5{2Nl~:A!1XwMlc;ӓ.|*6rrL|U?|1ob[obޠ7iT& ~UyļA=iYqĸAo-;XqĸAoI7Md& ~/ƸAob 7iT& ~sLOƼAobޠ71obޠ4yz4 ~Ucޠ7iT& ~sL7MT& ~YqĸAob 71n7Md& ~sL7Md& .$ļ圄7M\IXyļ圄71obޠ71oP7~Uy9 71oP7~YqĸAob- 1n7ͱ圄7Md71n71obޠ71op9'arNM\IXyļ圄71obޠ71oPcļAo~" ~S/Mq8y|WAosǡkI:Mq8'F]BqN|MqN=Mo&7~S4Y)z|Mq7ei~SNo4U).TLS2MoMq7e|Mqz2e=Ϳ?i"E;SU-q4NG8E^vȫeU :*S):Ed1S:Ex9S\^2=^!Gu*Q"rTS0Gu>M).#eۻ :eۻ :ez :EOQɨNdTi2S4PQɨNdTi2S4)zi*Si2M=^4)rTCEuNNJ:*SNNJ:E~N_9SNNJ:E+i.0).\>a8S\'?TTLSO^i2:ezLsNdTi2󡞼.z=MFu&:E^pTi2S4)z=MFu4).TTLsNq:ei*S\Nq:ei*S\N).TTLSQ).TߠLS}ɨNdT8?D^4P}ɨNdTi2S"Pt=w ;2s;e&zCwL@wq.z&,=wskw0w&;Ź5P4YPŝNdqiS[EOŝ4)./rOC|Odi2S\Ow8S\OqrTLS2MzVOqZ=ejiS\VNl|Lŝؼ<ؼYROli 'K=TRO4K=>c*[bf"/QSo)Ω|3Hq8R>Tŧ8R&+>EOU)z=MV|>TŧLS2MU|4U)+>eiS\Oq*>E^uŧLS2Mo>TŧLS2MopB/U(?E^+s+}|S_E^wȫfy)q*[ǁ~=*d\*/ǁDS\?TLSρi92C?ezLsOdi/z=M&?E^piS4)z=M4).TLsOq?eiS\Oq?eiS\OW).TLS).TPLSAOd8?^4PAOdiSZPO= ? 2s?e&zCLyq /z&=sܠw0&?9nP4POdiSEOχ?|8PqY@e d%0T*z Ɓs%0@EI1T@*T4*zi*TM7i*T\8Pq@eyӍ@ei*@ei*T\}cLSq2@eG;Ɓ4:8Pd;8pSeU)?EaOwXSdU"o.rC2MOq?eiNJOlflelel^1U|b&c$c"c %sSQ!O_c'׏_?zROK=3bHl~"M䘊 1Y9'_c/;cj<1Xw'_c]%TcT"+ܮy5"TSOW])Zk~8S\4=MF~&#?aOdi2SG~&#?EOOd䧸LS2ME~4i*S\Oq"?ei*S\Oq"?eyQϑ2ME~4P2M4F(z=MF~EOF(z=MF~&#?ߢPb /.>T\6?TlH 17Pb8 ~{ciT EOU*z,=M& @Ź`P4Y*z,}Pq?"Q\dX La =0BEaưPqNd=0>TX1,T{B*,T4*z i*,TC i*,T\PqBei*,Be i*,Be i*,T\}bdXLSa2#Be'F4:ȰPEOI"ocqå$Q$*63'>TNyDEf$Q'I$*+'4lTC%4$*.Tw yuqmSX(6/+6+6*6ByTySyRyQBK̯D̯D Q;D1 ST"םA_wb1ww&{Dq Ÿa5Ɗ[b}2VtLd(6sL|B4cEƘc{b<+ļA=+yżAŊbޠbE1oPǸAƊb cE1ncƊb cE1ncz08 2V+q櫷1n7X1=+yӃ1oP7g٠ >XQԃ1oPǼAŊbޠbE7X1=`pTbE1oP7XQd( 2V+qŸAƊb cE1n7XQdbE1n7XQ\I+y9 cE1oP70V+y9 cE1oPcżAŊbޠbE1op9'a( *VsƊbޠbE1op9'a( 2V+q[b cE1nc9 cE1n7X1Ŋb cE1ncżAŊbrNXѱ圄70V+y9 cE1oPcżAŊbޠbE+yŸANJbXQqlbE9Qe+NJ+*ٍ"]9VT(cE\)wtKŹbEŹS4+*z}XQdi2VbEei*VT\XQqT_\XQqmi*VT\XQW|+*.TLS+*.TLSWy"/;sTEsg>T^y͙"q8sTQϝ9*&3GE^osȫy4<>T ȯ=H*z& $=؁"s ȓO IEτ"v /H*4ԁw04U((.H*.H*.T IEO@Rd i2T4HP@Rd i2T4H*z $i*T&$iPt樸7y88sTQ3G*sTQ3GE~oQ:sTQ3Geˇg49*.\>D8sT\'?T樸LSOi2C3GezLsQdi2sh/z=Mf&3GE^iqi2sT49*z=Mf49*.T樸LsQq2Gei*sT\Qq2Gei*sT\Q9*.T樸LS9*.TLSQd8? _49PQdi2sT8.zl@*z`l =06>e`'F6Dj 20=^@*z`l 'ڋ~ciTc EO @*zl =M6&H9P4@*zl }Rqg22Ie'd=iT4YO*z'=M֓siIEOz҇'=M֓&Ie'yCIe'iT\zRqIEzRqIe'}zRqIe'in5YO*.T=LszRqY2MՓT4YO*zieϚipwȫ.yǜKTUiwcݥ"sw[]*^w>Tw[]*^w*RqKeswOݥkӼM6ڴ]6-4kӮMݥkӨM6MڴwVvn+]nvn+}5h+]kRln`\k5h+],bk ,צwk5O׸Aqh+]n! mk}tmh+5>V'JJצF[t7Nln+]nxcmkӍ8xLtJ׼tJ׼tc tD[7Rl5nmk J9k J׸Aqh+]._V c mkV1_5opn+]5op~78?|J׼tmi78bs׼9kVmkV mk J׸Aqh+]V tD[75nmk JtD[75op9'A[7tJ׼mkV .$h+]Rln+]5opn+]sys[7tJ׼mk J׸Aqh+]nP tD[)tD[7Rln+]V J׼t\IV-$h+]sys[7tJtJ׼J׼tT[!Jp^JJC^R[tQ=éקVz8p_J=NMqn+=6=Mzh+COm&JŹpVzLsn+=\9N.ӜJi٦{VzLsn+=\9s[29p*m48es衧yLn//Ad9=ezBzNŹsz+{9=u=fC^W!WXs zbOMbO=bO!ĞzC==Ҋ==w==ٰbO!ĞzC==ӊ==䷃==ybO 4NebOinoo==\9w==4{zi"D顧CO{zi"D顧CO&bOiαGi+9ǞpP8ǞpP!Şs!ŞpP!T!T!Ş.3{zLs==\9?p!B4Gs24G.\>D(peˇŞz==4{*Ώ?4{zi"D!/(D顧CO&bOiα4G.ӜcO9p{zLs==\9Ǟ.ӜcOiα4es29'{zLs==\9W%s2͹*p\xi"DCO\xi"D顧臞&bOz`=={*?\}bD2{z l~2bO=0ĞN?{bO=MĞNm&bO9D顧COS⡧CO{zLs4?D=\F=g.#p "УF&GLC52QC✉zQ#D&8gzD=4zLsD=YD=\9g.Ӝ3QiΙ4LCޛLes&29U3QiΙ4LeۧTd.Ӝ3QinRzLsLes&*LCO&2Q9D&ꡧ/zOL=pAs`!+0)0)0)0)0USySySy%\4w3Qy$y#y"y!ǔ8LTlFl^Fl15b1)cJA;c *SP1e *)|1b1LAtbT 8Q{;|8v>T 8v&SPEO)*zLA=M>T LS)2M4*΋4*.T ÿTqBe }TqBe }TqBe =Ϳۋ?F^;uDȫH}J*2#RR|G&TWd*}";"U5}G&#RE^qD #REo*G#REo"vDo*<7ĈTbDo*{EGTDi2"U4*zH=MF㎒9$ nePlffcܯ] pFeHݏ#R?EEg:"7NHtpD*ȧ#RA>TOG|:8"WH%#RA> i7H4 i* ,,H4TLSI`2rTLSI`fpD*i2"4$@dD*i2"4 #RAOɈTdD*i2",TD*X$@LSH4 i*",TD*XTLS`"R2MEeHyg`2M5'.*",Ts"XDdD*i2"4DdD*i2"4 zH@=0F#R`XbdD*X_TD*XƈTcD*F&#RAOy~dD*i2"4 删ɈTdD"R2vȈTLouT/6U cޠbXcޠbXcь1oP1c7yaS kT kT kd kd kd kd kd kd kd kd kd bXc cXc cXcޠbXcq1o|&a kT k,IgưƼAŰ)5 *5 *5 $ayay3 cXcޠbXc`L777~ad kd XL771ŰƸAưƸAư)5 *5 $a+IgưƼAŰƼ11oP1cayayaS kT kt +ȻYa; Y oi9d +g]VpSܳ. w{q(" ۃ{qbX84 za]T +i24úVLS1`bX2MŰVLS1`H2m#4GVLS}`H i,T)X2*hygŒV3aF+[hypyv &:g~cF+3h=Mf&3ZAqF 3ZAohݏ3ZAo7ČV߄;h 7ČVbF+ĝ Ig,ƌVLS`f{ycF+X^ޘ iqɌVdF+i24 zh]TF+i24 zh=Mf&3Z2Me~İ<ȯѺVT9wUhtpF+ȧ3ZA>ѺVOg|:8䗧h3Z2Meeh4E3Z2MEe"` i*,T!XY." i*,,h=Mf&3ZEzh=Mf&3ZAnrF+i24 zh=Mfeh4Ei*uQ`2Z2Meeh4 i*,TF+XVLS :,T#Xt(1xl|)2?sc]c~ܕOqWǘtw)2ǝ鮱i=?'1?c ]c ]ǔQ[̘7ʏ15ƿ'\ǔߓy1=ߓyȘ)~\cq1>ۏ19<sy}ycsysy)7<7<7<ױ<7<7<1~dkdkdkl> 25 2uL? \cޠ~ ? *5 *5a?<ט7lw٠~ ? *5 *5W:y)7<1ƼAƼAƸA&Ƹ3&Ƹ3&)5Ɲ15Ɲ15Ɲ15Ɲ15Ɲ1uL 1 1 1L Ƙ7X>w05 *5 LpyJpys\cޠ\ǔ&ƼA%Ƽ 1oP 1o|`kdkdkdkl?0 25 2u|`kdkd\c \c \ǔ&Lpys\cޠ\c`ט71%ƼA%ƼA%)5 *5 :])'MJpL 72c2Aޟr+T1Jpso[stQ 9 zLp=M&.*4 zLp]T+XWLS `\=`,T+XEe4i*]]'ej+4VWLSm`J2M%nbb\Aޒu+țs]T&sy:} y@} o'~yc+ȃs=M&\i/e9ƻ] [#ǻ]A Cw+o0 ]AxWߦ;ww)`2xWL1,Tw ]AO񮠧xWd+i24ﺨxWd+i24 zw=Mƻ~ r\A~SWU9䨝 rNp9j'.*䨝 rNp\A~}WLS `\2r)WLSY`fp+X,E%e"4EiK'e"4˥\AO Ee&\AO W zLp=M&&\AO `\2MeeJp]T+XWLS `\2M%eJp4 i*,T+{Np4UidqQ `J2M,&\AO zLp]T"i24 zLpd=M&&\7AO EeZ `r i*,T. i24 Wd+&\ zLp=M&&\d4 zLp]T+Xٮ5 i*,lךmXb+X~߶],1,[eo. _ iWO{f~3Ӟٯ z~]T+i24Wd+i2,T+ȳv~4 i*,T+XWg i*,T뢲_2Mee~4u<_2Mee:ٯ`fg+XAf&_AOٯ~=Mf&_AOٯWd+iLv<XX' ScArj,Ny̩JyЩ σ95Ge0llklG똂acc5?Tc#5?P ;0ScA:۟ ``؏16ǝ1> Selz' S0ll?r4 26 2vLa1n1n1Qy qo͘1fLlkfLll?2ƿ5cbcc_|gLll?5;cbc|3&6 Ď)&6 *&6 Gc cbc cbc cb{cbc cbc cb1n11n11n17Ș7Ș1,9yQ7ؘ7~Vbbcޠ~? Gcޠbbcޠbbc 1oP1cQ7ySLldk#]H?F15Ǐ1>~tSk̏"]c| ak!tG:Hט7X<#]cޠ"]c`gk3<#]cޠ"]c`gkTk,tqtqtqt ƸAFƸAFyFƸAFƸAF)5 25 2uL1oP1o\3u\35 yFƼAEƼr=Hט7H1EƼAEƼAE)5 *5 : G-tL 2s3]p[=7sQ~ <-~S9.*KHAOHEE&#]AOt4 i*,T+t4X i*,T+XMSt4X i*tQ`K2Met=yc o&_AO oMV.~W^ i,T&i4 ._AOj Amq,J *^`Aa 8 ䷦}`Aa0X; ,T,o^ i,l/ 4 #`2M@2 4 z =M&`AOa =M&`AOa0Hv,Q; c`Aa G0Xv`Aa G0X_: W4 i* ,,4kiKe5\T,XXCLS`fp,XXCL\ 8 4 z ]T!i2 4 z ya0Xd,i2 4 i* ,T!X0Ee54 i* ,T,X0XLSa``2M0XLS`Z iu,T"i2 4 A0E.&`AOa0XpvG)`_zL]lWL=j&ł5b"QT&łeJ\TR,XFAZ)GͤXdR,&bAOI"zL]T i2)4 zLAdR,i2)vQI`fJeR,XXL]`m㩌Y<ʘ㩌Y<Ҏ`y<1 dz]1c,2f1 _j1 ɌYp=Mf.*c41 z̘]T,i2c41 i*ciDĝeʘ41 i*c,T,ӈΘ41 iiDĝeʘ41 i̘41 i̘4gf̂eʘd,i2c41Yd,i2c41 z̘=Mf̂&3fAOoEêq``c~k`c|-`ll?1Z81Ƽr]8ؘ78ؘ7X41ƼAƼAƼr]8ؘ78ؘ7Xu9`c `c `c `cM1nq1nqc府q1nq1nqcqqSlTl,匃+匃y府q1oPq1o\36 cyySlTlt,;3  o8o,d,gn8Xp37 7{)X^`8Xp/0=M&` z=M.*,T,X8XLSq :,T)XSLSq`64U` itQq` L2Me4WrXwO] aAQq9,ݲnY >aAIu99Aw9,[|.y尠wrXLSrXd9,[-.=LfłX߲:+w\Ί=Mfł&bAyqV,[ gł&bAOY wV,XX98+vj2+,TK$X^5 iWMfłejdV,i2+4 z̊=Mfł&b z̊=Mfł&b?8X_9Wc]T,o 1`ANq 8E`ANq u,ȯl i*,T,XY i*,,o莃4]8XLSم` 28XLSم`fyCw,i24Bd,i24 `AOq8Xd,i2,T,XBLSq4]i*,T,X8XLSq` 2Meyq` 2M$.*,TA"XDd,i24Dd,i24 zңfbJe,Q3˨U*`2j`2j?XF=j&`AOq8Xpd~4 z=M&`AOq4U*`2Me*q`hO\ݦٮRO6hO4U*;g2vYLS`fJe,XYL]s,T ;gAOYd,isܳ(AO=Mv΂&;g9 z=Mv΂eyҝ`:g2Mu΂e49 `;g2Mu΂e]T,XYLS`fYLS`fYL} b,XAv΂&;gAO=Mv΂&;gAOYd,is49 z4 v΂??ކ{dmF?N?}l;v`tl}v}{̏#g|l#-zdc{ga϶mg?#l;7g zdc{3n=ϸA>N7/y{3opoÌ=ϼ s c#t>cg|1l;#t>cg϶sgu:gۑ:1;fy{/3nϸAt>1+U} c7KD3nϸAt>NH| c7K,U} ϼ^gK;fy{/3op%| ){ y{/3opϼ7w̮G6>=D3=#O>=G}4zd9gyF3>#{dy=ϼG7X#{dc{3op} =ϼr}g#,} =ϼr}g zdq} Gv33n=ϸA5zdq} G6>#Dll} =ϼr}XF3o\_G7>5zdycc5zdy{ll} =ϸAzdlGp+<n7#{un[YvV = [YvyE{}ziGD,z=4# =e{a#{X>zd42ͽLs=,{dyGT=e{au =e{auzXi=e'S~ţd]U2{;*==z(=Q2{;*==X̂{!oêd7T2{[,*=,ܛ<4&CO%&JfyE%&JfŘJf=M̂{!S졧CO%C~SC~GCO%&Jfy@%e{!P,^5Q2{XOiWMiWMiAz(=4Q2{idD졧Yp/=4Q2{idv>[3{3ʂ{!:S!X:֘7XYU˵0ZcޠZT 5R˵0ZcޠZc`fUkTUk,¬jqjqjqjw}ǸAVƸAVkaVƸAVƸAV5 5 uLU1oPU1o\ u\ 5 kaVƼAUƼr-̪֘7ֱ?eZU1oPUcjyjqjyU~;袪ZvPpe>LpSyUއ އ w*{梪ZNep;=EU 颠ɪVdU뢪ZAOUɪEUej4U i}IWej 4 i,TU+;j4 i1tQU`C2M5ej4U 򆥓[A3 Ln]lO&SVJܺwH VO&gVw ^[2Mce=M&&[A5qr+i2fNn=M&.*WgNn=M&&[ADqr+ȯq '&[AOɭ o)8,Tr+ȯ2ܺ^5 i*,lLn4۫&[2M;24 zLn=M&&[AOɭJn=M&~U r`j90WZA~uV_\ r`j90W.\ U`fy[vU+XYޖ] i,,oˮj4+ieWe\TU+X^ALS`fy[vU+X^AL-4Y zj]T i4Y zjyUɪVdU+i4Y i,T XEUe4U i,TU+XVLS`Z2MUIVLS`zU iW,T i4Y ? zj]T i4Y zj_=ǠmB◱`ju]lu\a+ c]+`y_ƺ幢__T+X+zF,q6Lj #bc|`l,g:1ƼAƼAUƸAƸAƸA*ƸAƸAz0 6 6 6eYY;7XޫXU={ `cޠTU=U=UUy*S`TX+_cc*_cck1V5+_c|5+_c1>Xsc|yz1oP1o\5 uL1oOAUƼrט7ט7XwYU.+_c +_c +_c +_c1n1ncz1n1nc|q|q|SkTk,׻|+׻|yz1oP1o\5 uL1oP1oPc|y|q|yŕ~*_>Rpt^Mpyޫ +_gp\T+ ~3c{J*_=4Y z|]T+i4YWLS`*_2MU/WLSY`F2MUe|yҕ`6MUMWLSY`F2MUe|yoep.* aA0WÂxX2Â~1]XÂxEepxX7 i*,T'X Nd<,i2 Â_%L.,7u.[.,u.[ł-bAޜqY,ȯ] Kł-bAe`f{IeY,XIL,,T$X^RY iTłeJdY,i,4Y z,=Mł&bUr`.]TY,ȁ,\ r`.ubA~aXsY,ȁ,\X}.4۲b۲b2XLSq`fy[vY,XBL-,,T\b2Me+4۲b2MemeɲXdY AOeɲXdY,{n.=Mł&bAOeɲXLSe` 2M.,,T\!XXLSe`b2Młe+4U i,ałe+4WXLSq` AOeɲXp{dY AOeɲXdY,L8i,v.$.$ v&v&Y26ɂ~IvQ?gYlf,Xe1E5ɂY4c&Yd,_I,T,X>I,l$ iIvM&Yd좚dAOM&Yd,iI4$ zl=M6ɂ&d21M`f9~pl?:'ۏSll?d7 1Sll?:bۏGCT,b~zȉe?b~Ny_X,)Sfl?<Y,Y,:q,q,Sfdldll?tLŲ1oP1oPc*yy*q,1blec)1>F,1bll?3LjŲ٘#:b~l,Sl,y*8q,q,q,+U,q,q,Sadldldll?"1 X6 XvL1oWX6 0 *ƼAX6 0 0 X6 X6U,;7Ⱥ]X;ؘ]Tuc~va|`]bc|>.6 kSƼrmʺؘ71ƼAƼAƼrmʺؘ7ؘ7XMYU˵)bc bc bc bc1nu1nucڔu1nu1nucqqS]lT]l,צ+צyڔu1oPu1o\.6 .vLu1oPu1oPucyq'Gt oy8=o]Tz,. y o8=v cNfp\Tz, ~'3g{cAO鱠Xdz,i2=4XLS`c2MǂXLSQ`H2MǂeJy`6?m"]Tz,X(RLSQ`c2MǂAǂR鱠S ? o:= ǂ~1= o:=~1==UǂcXLS`:92Mur&cAO 8=4se좢eA~hYЋg좢eA~ hYЋg,3ZG˂FѲ q,3Z- icF˂eeۋ1e2MQeŘѲ`f{1f,X:AF˂&eAOѲhYd,i2ZvQѲt,ȁ9ZvQѲ hYs,ȁ9Z䷀=eAѲ hYs좢eA9Z,,oˎ],oˎ4۲e2MemѲ` 2hYLS4U_i,,oˎ4U_ieG˂&eAOѲ/=MF˂&eAOѲ o9Z4- z=MF˂&e2ME˂e/4-hYLS`e2ME˂e4- i,T,XhY- i,T}ᢢe2Me/=MF˂&e'AOѲ/=MF˂&eAOѲ;⠧hE?X;55f55fٮY4خY4 )ȢYOA.l,SP?忨Y<S`~4Y4 z,K=M͂&fAOE)d,ih4Y4 ?z,=M.2vM͢YLSE`ff,XE͂e*4U4 ik_*45/͂e*455f2M͂e횚E`fY4 z,=M͂&fAOE@ z,]T,ih4Y4Yd,ih,T,.4U4 ih,lX4 ihvQE`f2M.h,T,XYL} b,XYL} b,X>h,T fAOEɢE͂&fAOEɢYd,ih4Y4 z,=M͂ecf21E@0yo# ?wnHvm Ad?9.ԍO#cc*'ci˱x~uc4~xl?6+>zݘ[clc~lU1c 執s?"7z7z7z~mdnd@c uc uc9c׍y*y*SnThTn ^7]cz~kuc|XOz1bnl?8Lj1N^7>uTucޠ !c uc uc u{uc uc uTYYYʌq׍qS!d,U׍y*y彊1oPc׍y*y*y׍y׍GEƼA뎩2 ^7뎩^7|`uc|>^7uc|>^7S1>X;z׍yڔ1o\^7 ^wL1oP1oP1o\^7 ^7 kSƼAƸAꎩ6Lju1>F1bmnc|Xwcĺۘrƺ_'Xwnc|`ml?1 k7ƼAƼrƺۘ71ƼAƼ?8yqLu1oPu1nu~{';Uw w{A% woyu ovQu~W7w] w/4Yw z=M݂&nAOu4Uw i,T-p4՝ i;,T-X[\w i;,ӼG[LSݩ`S2M݂eygu[)]T.?pA؅ pAzp.'pA??م FpAުtRDAޭt.Xٞ]T.XQLS).\d.{=Mv 䗻՚pA/]pA/].\w>܅ ^w].\Ћg.X^م i9]l/4 icveŘ]`4م z=Mv႞&pAO] ymA~i.\spA] .\s.݅ DwpA]`Xy[v.X.\L- w- ,,o4հiewejX4۲p2?.\LS `2.\LS `fy[v.i 4مEd.i 4م &pAO].\d.i ,T.XELS]4հi ,T.X.\LS]`2Muey]`2M5,. ,T"XEd.i CAO]jX=Mv႞&pAO]k젧.vh\7q7qGh܌]l܌d4.'qUKgf4.X%\T4.XZBLSh\d4.$=i24 hE&qAOѸh\p%=MFゞ&qUKi nFe47q2ME.*,T4.Xh\L]pR4.X.),T4.X. i*,l܌4;h\d4.i24 zJd4qAOѸhEEゞ&qAOѸ`qAu4.Xh\LS`fh\LS4 i*sQѸ`=2͒q4bh\LSѸ`fh\L} b4.XhAFゞ&qAOѸ=MFゞ&qAOѸh\d4.i24 z4 FecqǠ-y*--lVNXl;_vt^̏^l;v\5>l;_muϸA>QDg xqšϸA>Ql;7>g^xy{q3op} uϸA>Nw|϶qC93>~(}ymg4?rg~y5弱_CPyq(} 7rXyC93nϸAg yq(} v3nϸAg`yC93op| 1>^+y{}3op/} ϶2y{9ol\C3Pxq}ͣ7>QG3nu϶9q}ͣ7>Q,ן} O>:^xy{3o\7>P.}T>^T>}*p gɊ|@3>P+py:ϼ7W> g(߲r 5n@mn@{v DyyDv !Wv v $n@Jo\\p/=4Qz{iDCO^z{Xie{!o&Ls/I=,KR42ͽTz{Xi%i%e{Ia^z{X.Jo=sZLd/ \p=_ !w━{ѕ{'/2pE!)䔁 cm9ei'/2p=Ls/=,C=Mdz==4<D#!T#!`R#hFC~ˤFC?Ј{qqqqt@#ayX^ш{XhiWj4i1eee{f&q=M4zh=4ш C~k CnH{ )R!7 C~ C~ CnHܐ2p \p=,3p4;2pΫ 2Ϋ 2ͽJL*LsR<,,=,-=,ܫ4*2Ϋ 2ͽJL*D \pR<4{i"D!*D CO&2p4 2ͽJLs 2ͽJLs=,3p4 2=LsR<,3p4 CުVa^xXW){a^xXW)z=4{ \pR<4{i"Dꇞ&2pڥ:r˟]# l=,v\]# l\.{X{9!i儇e{9ᡧCOz=4{zrCO&r=Mn儇&r=M{9afTG@a^NxX.{X{@a{XiK_{@afTieR{XiKuiAz=4{i" D@ yi" r=MzCO&r4CU@a^yXqiA=,8= Ls<,8= Ls<,84 iecr4 iAz=4 &r=Mz=4{i" D@COecr4 ch?;nu t!t]tu{7.C@]ޥ{X6yѥ{7t4;o,|ޏŽÏ3 O`3cAcq"|l?>2ۏg T'cZ7>al?Y8 ZwL1nպ1nպ1nպ7j7j1~ƸAVƸAVÉTUU9jݘ7Ϙ7jݘ7jݘ7dtc1>F,Ӎ1bnl?7Lje#L7tTtc|`ndndndXybndnd0c tc tc tc1ne1nec*Œy彊e1oPE1oWL7 sLe1oPE1oPE1oPe1oPe Θ72ݏ"wL11cȍqȍqȍqȍrƸOVƸOV䎩"7}"7}"wL1o\+"7 "wL1oP1oP1o\+"c,qK,qK,qK,Sm[bm[bm[bml?0-6-vL1o߲6 vL1oP1oPcU`O m;U;g w{$ w2lN@ppQy~' w,J^ _0؋  % F^\LS4Ji*4ً z_=M₞&{q=4ًPBd/.i4ً ^\d/B 2 `B 2 `zqՋ i,T/.X>,l؋ i,l~,T/.X>,T/ {qAO^\d/.i AO=M₞&{qՋ z=Mey ؽ`52MUkec{q2MUk.,T&XjEe4U iA4U iA4 ed/.i4ً^\d/.i4ً z=M₞&{qAO^\L} b/.X>w} b/_>~ciJsA\] .>^KsA )Ee}=`JsU i4,,ez׍t1u?O~0~l?PyLT~rl??OSnl?H9ۏQg?F#p7F1uƸA6ƸA6ƸA6c uc u٨٨YSnTgTucޠ?cޠucޠucޠuc uԯۏ"cnuc)1>~׍c_7Ǐ`_Cد;~_Cدkuc uc uc uuc uc uԯٯٯٯqq׍qSOg,c׍yy}1oP=c׍yyy׍y׍ƼA~w1uƸ3vƸ3vƸ3v>c;tc;tԡءء;_ء5;tԡաwcľ#ݎ6Lj}1>F컍1bml?J0Lj}_'wnc1NvL}1oP}1oP}c껍y߰v[&Aw̿,)q7/8K~"߄ŗ~"߄]p ܋/&Dp q~"ȯ 7!M&Dp oB z=MF&#wAOEEe4 i*rmGed4 i*r,T.04 iuQ`6MEܑ4 pG&#wAO|,]T/? xAX xA~^l~f-E^L=Y^LS`IAO^S4Y;^B, xA~^.J.\a/ xA~^pw.\a/XNL,Tj'X^Y i*,l/,48 x2Mv4Y z,=M& x?^s/ȁ7.90 xA u/n] r`.90|s x2Memm`fy[v/XEL-,T"XYޖ] iqQ`6^L-,T"XYޖ] z,=M.4Y z,=M^d/i4Y z,4U i,T x2M/e*4U i,T/XELS` xAp/XELS*4տi4Y z,@dAO^d/ȳ.=M._{;,T4y2ME#eF=M&yA3s;bv^d;/iwQшv^d;/iܣAOvEE#ey2ME#ey2M.,T;/Xv^L}v`;/X>;,T;/X>;R;/Xv^L}v`;/XvA&yAOv^d;/N&y zl=M.4 zl4 y2Muxe4 e\T;/XOLSj4 i,l i,l iAl4;v^d;/iwQv^d;/i4 zl=M&yAO`fv^L} b;bv^Y z]Tu/ݲnY |{Awu/ݲnY z W.nY iw{V.,Tu/X^L۟g_9Oc113cϱ~sl?L~xl?9O{}cOJc {}9χqqShdodol?zL1oP1oPcyyyyyA7c˖[c|leo-[~c1?jSo/l-1n-1n-1n-c=-1n-1n-cjqlqlql'ƸAƸA4 8ƼAƼǖߘ7v1ƼAƼAƼAƼAJc {}cY1ncqKqKqK'Ƹ%Ƹ%7-7-wL17 :7 7ƿyߌ1ߌ8'e?o[.c|޲wL1>o՝;~7?vLo;Au ._pS]~"q?) zpSx|_ /rڡ~$7P`po8Ipor~$ M} &Ipo=M&CAOP`d(0i2xQ`B2Me yǡ`rb2MĂe 4 ΏC2MĂeʉ]T(0XMorb2Me %CAOrb&31xQL gb0{pN -A&01kAދsb0ȻqN ^T*rN 4k iM,T*i214 s'& A0N q e^T0ȯ' Y8a2 18a_:N4UA i,lo4UA i7 e]2MUe q`*H' z=M &z},˅A raߕ\\. r}.> 5w0\ ˅AN‹*4U il /l e=`#2raLS`fyv0XE e4{˅2raLS`fyv0i\4Y.Hd0i\4Y. f˅AO rad0i\,T0XHLS‹*4U i\,T0XraLS`#2M e*y`#2MG.\,T}$XHd0i\AO‹=M &˅AO OǸ\4Y.LGeCʄˆeCʄ\T-1X6LHl}^a-bZbl}^a-bZb/o%ZEeBe k2MeB.,T&$XLHd-1i?/+%=M&k z%=M&k=4YK z%^T&$X>,T&$X>,T-j2Me%4e k2Me/e%4e%d-1i4YK z%=M{&ixQĠZbd-jAOĠZbL} b-1X>,T?)X>,T?j2Me']T-1X~RLS`fZbL[,T?)X>,T- kAOĠZE&kAOĠZbd-1i4YK z%=Meck21ċckAO"dJ1_8R zL)=jSA)ŠG͔bУfJ1Q3[RbL] 0,TJR2MeJ)4ۥS27iB퇴_BSjl3?o~c/ cӯc)8}0|-0h3h 4F28gc@[c|lh<@__h c|}aqdqdXycqdqdc c c c1n1ncsy=1oPq1o18 *uL1oPq1oPq1oP1oPƱl7@~i[bcccc1n1nc 4qK 4qK 4Sqg 4q &c &17dd@e#A>4A>"48oPHcp?\T1G~/'i?e C^T2ܿj_UÐ w a~ Ð.P; @ w]~( z C=M!&ÐAOaȠ0dd2MEe C4 >Ð2MEe4 i* '!e40dLS`?2M!e CÐAOaȠ0dd52/] RW#~TX Qa52ț2Fս A t52[F^T4,ȻF4 U i*,T4,i4Y ;}W#&ÐA2 -ÐAރp2o 0E!aȠH CDb2C5ÐA~0dLS`"S20dLS`f{g2X: ,Td*X^ i*2uaȠ0dd2i2 yQE0:  r}CyOa 0ds29 !;0ds2F!Ð i*,,C^,C4{Ð2MVe=a`B+20dLS C4Z iT!/l!e 4{ÐAOaȠ0EV&ÐAOaȠ0d7 z C=M!&ÐAOa`2MVe C^T2XJLSa`2M!e C4Z i* ,T2cC4Z i*rQa`B+2MV&ÐAOa =8 4 z C=M!&ÐAq2i2 yQa O4iQh%XJL} bo2XE&e 4 &/AM4 &/AM4 &e 4 &e \To2XJLSddo2sM^l؛ zM=M&/*4ٛ zM=M&خ0 i*c,lWw?qsl?>`cO:_tKۏSsl1~wl)~wl!10#c8c"pcocc ۏq `q `Sldsdsl?{L1oPq1oPqc `yy `y `y `y `yc|lc~ `esl?=ǖc `1090ǸA0ǸA080ǸA0ǸA0)9 29 29 290<ژ7X[{cޠbk[[{cޠbkc cy1nc `qK `qK `qK `Ƹ%0Ǹ%0)9-19-1yL11HyLA1 w ~TXydrfrl?5]3H9]3H9< w w 1)D'[A>JU^,sfsA>XUA>^U9Wo\ep?\T2W*eU=M*&[ղ ߷ۃ}E, w2 :ȖepWnY[AVE,~+( &[AO-ˠɖed2ie4ٲeLSi`Z2M,eeLSi`t2M,ejYy-`t2M.e,T.X_eyQ-`ZA~ed2ie4ٲ w,ز Qa2G- ﯸenYˣe/lYy- eyQ oe,l/lY^T2XYLSɖed2ȯݲ zlYo˖elYy;- v2'[ղ `,~"e- -e7nY[2Mueb4k<[2Mue5-`X2ϖeLƳe,T [AO-ˠɖed/A̠w n] r`y# w fu3ݺ W0% fu3i4] fLS^%7-NH$#? :O'ِtIQ<]<<] ,j+EM_i f`QSfyڻXT@U0ʿ5y]/Ew3jh5YnK>nK>ZM0V) &RZM0/Ha^h5 S5݅E=yaQsOa^0/,je {¢¼0/ZR5݅Eͽl0/,je {¢^.,j)́Ha^h5¼j>;ߨ :r\ 9SN慾dd^KN\+Q'BΚy/: nN\S'3po];u2/,j_t275ۯ :5څV &:r:]́*GADBD4/GADB (y!WѼw"{DBN+y2D4/]\(PDBN +y!'Ѽg.,jY E=uaQ=Ѽg.,j"5 {Vk "ZMD4/h^h5 1[D4/hV "rC "[E4/h^UE4/:"[E4/h^H5 #5z̅EWD3<ѼYh^X15^ {=¢fy+yaQs {=¢^Yh^X15^ &"ZMD4z̅V &"ZMD4/Z"ZMD4/h^h5Ѽj"yaQsh^X15fѼc.,j {D¢ѼG4/,j {D¢Ѽ;8Ѽc.,jM2 RD¢^j"yDDBPDBV &"ZMD4/"E4/h H &"5z̅E6 BD¢^ #5z̅E6 BD3 ѼنAha"50 {=¢f!yaQsnja"5z̅V &"rK"mV &"{=BV &"rK"ZMD4/,j mEͽsaQ ѼG4E=yaQsh^Xl D4/,ja"5E6 gh^X#5E=9 &"ZMD4/h^h5ѼpK]h5 #ZMD4/h &"ZMD4/,ja"50 {¢f!yaQsX {¢^ #5ՅEͽbuaQ ѼW.,ja"50 {j "ZMD4/h &"ZMD4/h^h5Ѽj"yDDBV mE6 BD3 ѼنAh< ~o7/D~9ۋ D~B߂o^ȣo^ȣo^[ } "yoA7/,j7/,j7/,j=yaQso^X5ۋ 5EV܏?9߾~~aj ۏ O ۷ϴ w4?Lmaavaqafa 9lLmatmatmatmaNatmatm·7ۜx`y909AAcsTodcs-xmk}0^[66ڲO9+xاoek>0^#)[EO0)cr'ɋ@eB@J9P/b9P@e +@ey@*e}_Oo eEM*&V} "j>3nٽ 'T2p' }B;p>P}B;^k-d2p_k 촻ZKrZKh5ٽ ^ZMv/&V@ed@u/*5ս ,j{w/*5U ,j{XT2(^5U ,jTwEM*57575ս 䔶V@ed2j2IjG19E(f'brQ@NW;(f`jrQ@.9yh\5s3~jpWo6aVay56l?-3l?,3l?l?*3l(06dd{K}̰}{Lɰ0:0:FFF7Z [ [ [ [SksTklTkajmj jmjmjmjmj jmlmugksؾ x;[x}_0^w60^w69=lmwo[ [ [ [Sksdksdksdksؾrdksdkajm0;0;Xlmj=LavPavPav<fFkf0^#630&Vdor?{>؛AFٛFٛ|z_@^&'E@^)',},@^0'}K/8P},p/G *N575U ܧ&V}>jV9}B8p>P9}B8p' }B@( @:GU<9}"QkZŁQkZEd2j2Gh5 QZM(&sV9 ,jXT2e  ,jXT#.e`QS9@.58GXT#.F܁Q5Ո ,jXT2F\`QS92Gh5 QZg5a2nUrͭ@_l*}٪ $[u2@_l*Ve ܪ5|:POEM%mVe`QSɧfUXl *J>=@Ved@*&[V@Ved2jUh5٪ lUZM*mVe`Q ت< ت ,ja[V>c3P<1诏 @}L`fl&0}g3S(N` f`Q1XT3އ ,j*yEM%0J`5EM%0} @fOC,}#h5<泯0KO*KO$m>IO,e>IiQg)9I,e~ȹQg)}@e)@,ed@e)&VY@,ed2j2KyEMR5 ܽEM5 ,j*KY|g)5;PYrpEMR575? R>Y@,ed2j2K}g)}_0K,M0Kbg)9_,e g ],RrY@eR.c2R5 EMM5YrSE`2T`Q= ,jEM,ed2j2Kh5jS7լfe`QSլ ,jYXT2އج ,jYXlClVZM6+fkV_P% OLav<ff3avPÇ)Z9*z8*z8g젢 #Spؾ{gfcྜϾk&}/ ;=}9@}G>/c0,jXT1c`Q7&V}8jxVf+2p 'GՊ 'G}r4p 'GT+2p 䌣[ي 9Vd>oVdd+2jh5ي lEZM"&[V@VjE5Պ ,jXT+2nE5h ,jXT+2Vd '݊ ,jXT@"j5h ,jXT+2snEZM"&[V@Vd gh5 IdM`H2&0$yk$9dM`H2&0$h *C#ȅ$G! ,j#X<0 IZ_K@NV&9Yve2j2h5Y LZMV&L5W ,j2y*EMUL5U ,j2XTe2J`QS*d`QSy*2XT^%ۛn\| +V@nqe2j2h5Y LZMV&ƕ@䁪Lr?+V*E6 be2ʁL5W ,ja+a+E6 be6 be2نAL5W ,ja+EMUTe2J`Q*2@V&&+Emdde2j2y*V@dde2h] L5W ,ja+EMUmd`QSyU ,j2XTe2نAL50*E6 be2d`Q نA2@V&&+V@dde2g\ LZMV&&+h5Y L50f2XTU)نAL5UU:PJEMUTe2R`QSUf2XTU)نAL50Jde2j2h5Yex)T2п S7}@q2bܧ oEM)M}E&>e`QS}է ,jOXT2ޤا ,jOXloRSZM)&>w58^9l?p4lFSk~h~h~a~hؾaWO ۷  |{FF|^ SkdrdrؾeaW { {=LavPaavPavPavPavPavPaavPavPataaFa [7ugr;Öx|–xΰ0^w-;aaardrؾtda [ [ [ [۷ [ [>Laav<ffaavP!Ƈ)l9*l9*l9֖ܧu}4p_ ܧT2p_ܧ}wS>~o! ܧu W-$~NEM+W5 ,j*_X@|ed2p]W|emOj>zږj>Oj[t2pX} ۖXu2p9ն l[ZM-&ۖVm@ɶed2jmh5ٶm@ɶe wmh5ٶ l[ZM-&ܑۖed@-Ym@ɶe`QSQfmXT@-50m0mfmy`mXl -50m.mXT%Kd2jm-n[؆Al[ZM-&ۖ*h5ٶ l[ZM-m@ɶe`QSQfmXT%نAl[5u9PmږEM-mĶe`Q ض ,jmXl -j[50-j[>m@ɶed2jmh5ٶ -&ۖVm@ɶ*2ZM-&ۖE6 b2نAl[5Ud ,jaۖEMT2"S`QSEն ,jXT)نAl[5Ud ,jaۖE6 b2ɶed2jmyږVm@ɶed2jmh5ٶ l[ZM-&ۖE6 b2نAl[؆Al[50m>e ×* _̌×,/}U Ua@/e0 /yf@`0|XT2^ ,j*|XlY _5U}p|eA *.O>,|pYZ`Qsb~V1?X\,jU5*^ܫ\?h5* *^ܓ\d?L~p2%5eJ˔).S\d_&3?L~Hf~p Y\d?)K$3?|pY-xܓ{2VsOf~jZ=A'3?h5d|`QsMf~&3?X\:,j55k2E5AN#Z`QsѽpMf~>X\kt,j5d9ωdܓ{2VsOf~Hf~j=ZµAk4?hM9!955{&{O\AOkN\AOE 4_4?X\sj,j9Zͽkd}_^ 'P PAߟ{8F:FX,o(q~&h>X,o(q~&h^8?X\4,j 5J,j 5J{Vs/qpM|j%ZͽA8?l8?h5^K{E͵暠`Qs-qp-q~&h>X\K,j%5kE5AZ`Qs-q~[-P`QsM|&h^8?X\4,j ZͽA8?]@(q~j%ZͽA8?h5 %ZͽµAnPCVs/q~&h>Xlà暠yZ`QsM|نA{m8?Xlà6 K,ja^`QsM|نA{E5Aµ暠`QsM|j%ZͽADm8?h5^|ᚠK{Vs/q~zQK,j 50h/q~&h>Xlà暠yZ`Qs-q~8?Xlàf%5m8?X\K,ja?kE͵8?h5ZK{< ^K{k"Vs/q~j%50h/q~نA{E5f%5D k"E5µ暈`QsMD}نA{E5f%50h/q~&^K/\K{Vs/q~j%ZͽA8?h5^K,ja^`Q  0h/q~نA{E͵Ag:?Kg:?Kg:?tKf:?_=AAl?5l>l? 05<[ڇڇGS5<,5<0:Ȇ0:Ȇ0:Ȇ}0:Ȇ0:Ȇ0FFoffSsTlTsTsTsTsTlTsTseΆP0^w6<9l?0ם χ9ם alxugs~ `{6=}f0p_g7l>3xƁ}!p_ ܷ[ *3K`}!p8pXྦྷ%p_g7g`QSEM> |5 |ZM>I@ |ZM>s3Lo>{ꟁLo>3L3t3p}ꟁLo ';] !EU ZM?&럁V@gd3jh5YN[ ~ ,j7AT4[`QS}@g?'럁s3}ϩ6WoA?9g '] 䴳럁Y젚j젚jSStTSmTStTStTStTStTSmTStTStTScSt;xΦ0^w6E0^w6E):םMaقaaSt{FoF} M SSt,?6EA5EcStTSajjjǦ0;0:sN}i#p߫ | {}A8p/mhྴh} n6}i#h`QSEMG 5 ZMG@ ZMG&]|&e4pyg}@J}9pyg* g9}Ti>J@Ρ:UșgJT4j2Uh5* LZMJ&SV@Tid@JJ5* ,j*U9~Jj5 ,j*UXT4N5 ,jwREMj5* ,j*UȉWJ&SV@Tid4NZMJd41rz"@i '9 #@ic4P]`02rXT@EN5{ ;(rj@N.c軌@er@NtcȩnwL}cz:v4w;EMUi`QS1 ,j2XTe,5U ,j;EMUi`Q=1 ,j2@vL&;V ,E@05&P}軌@e,..s5w \t5w \@ .RMP T@j t@PJ5˻ wP?Enj`QSfy7q5ρ*5 ,j*X,&.5 ,jwP& V ,ZMP& \}w5jh5Y@ ,ZMP*5 ,jz EM*5U@ ,jXT5O`QSfz EMJj`QS?V@j ,h5Y@ ,ZMP& ܳjd@P@j`QSfXT@PJ500fz`Xl PJ50?XT'Od5jȝ.؆A,ZMP& *h5Y@ ,ZMP @j`QSfXT'نA,59P EMPmj`Q X@ ,jXl P*50 Q5j>iàP& V@p5jh5Y@ ,Vd5jXl Pmj`QSfXT`@P l5:P[EMmj`QSfXl P l=@jd@P& V@jd5jh5Y@ ,ZMPmj`Q X@= X@ ,ja EMP& <@_OP}=Y@ d5g\@ d5\OPSj ȳA.5U@ ,jXl/h,5U@ ,j4P*j`QS Ej`QSf{Ac5jh5Y@=P@jlj<ÔG"XSu~k Ob ?Lya1a&a!aa):l?N(7l?4_:L avP avP av<@f@F@=P }m#p߬O +J}E8pmjྶ/h}Jn>}m#j`QS EM%PJ5@ LZM&P@JZM&P&*gR}}^:py}^@Qy}^:pyG 9>j>/Qy@NKZMQ&V}@>jd5jh5G ZMQT5>j`QS}w5 ``QSEMQ9>j`QS2XT0 ``QS}yu5jh5G ZMQ>jd@Q@쭣&r@xjd<5r@xjd<5j2E*ST;/j2 S5 ,j_K`v5_tv5ήdv5'] ?]=P@α;XT,Y`Q=:] ,j*mvEMJ5ۣfE`v5Y`Q=:] ,jGEMjdv5j2zzC(U-gWj2fW}1軌?@eή.sv5 ή.sv5wT@j tv5j:H5] ήj`QSfy7qvnj`Q8XTo(YM] ,j7X,&ή5:PzCEMzCEnjdv5j2zzCV@jdv5+ήZMfW&V@j`QSzCEMeWTv5P`QSEMeWʮ5 ,j*X, gWTv5P`QS] ,j7XTo(j2h5] >)gW&V@jdv5] ̮j 9h5] ,j7Xl fW j`QSf1z`1Xl fWl fWmj`QSf1XTo@eW 5 ̮ZMfW0@jdv@&V@j 7>;h5] ,j7Xl fW 50zC*XTv5j`Q ] ,jaEMeWmj`QSf1XTv?ʮZ??mjdv5j2"ήZMfW&VU ̮ZMfWmj`Q ] ,jXl fWzj`QSU^*XT+W`Q ] ,jXl fWmj`QSU2h5] ̮jdv5j2h5] ̮ZMfW&V@j`Q ] ,jaaE6 bv5j`Q d 7Yloolb&k/6<&k/6lwM@Gr5d ,jXT5d ,jXlool5d=PMEM5YME&kd5jzVM@&k毿d}^u~d~ajx ۿa aa&:l?5lߡ?l?5lߟ?l?05Y'[~itGMaatGMևW7:::l>:0A6YA6Y&0;^0;^dfիfdfdfdfdfիfdfdfdFk^0^wZ:l?0םև:םaugu~*a{kZAZAZ^0:^0:^0:^};0:^0:^kfavPav}D>g/BkྜE}@^u* ܗST5p_N W;rJ?@^JEM^*5Uz ,jXT5kd5jϰZM^T5jh5Yz=P@ STx>OS*OSTx>O l>)[g`(Or}*=1 l/63Ї3b3!f`y@Gr6 l`QSf{{c6 l`Q1XT@e`5 ,j7f`53V@ ZMf`&3V󿿋ϱ^_8jO wop40p~?Zl S w~m~La~r~Ha~ajO( OV' UtG-aate[ [ [ [] [ [S pd wd wؾwajjj>L-avP-avP-avP-avP-avP-avP-avP-avP-avP-at-alug w;[Cx}Zx0^wpG3-aa w~*cd wd ajlllll>L-av<ff-avP-܇;;;[Z[}ZRI$pW 7J|ྵ.p_S:P-}M)p_ /kJ& ܗ;j5 ,jXT 7n`QS-ZV-@nh5=P-@nd @ps泏 ܗe}!p_fܗTb7p_fܗe}!p_f8P}!3N Qb7p_ftV?&V@ndb7j2h5 LZM&v&*XTb7n`QS@.W8XT2d`QS\pb7d`QS= ,jXT2n`QS@!;h5 LZM&v&Mvb7j2{\pb7j2h5=P@{neb7Nr܉@{neb72ȵ8'vTD12ȅ'vJ>Šn V] +UWt9n2Vt}ȹ~Wto~Wt}{*wE7R]`QSf{XT@Ut*5U ,j+EMn`QSf{XTE7ndE7jh5Y=9 ,S@9U8'v} 2[@߂LNtb7\vb7TӉ@n tb7j:H5 NRM'vTb7T`Q8{`yqqb7Y^\ ,j-X,/.N5Ֆ ,j'vjKn`QSmREn`QSmfyqqb7j2h5=Pm@ndb7j2]NZM&v&V@n`QSREM%vTb7T`QSEM%vJ5Ֆ ,j*X, 'vTb7T`QSm ,j-XT[*j2h5 ,'v&V@ndb7{ Ln f9h5 ,j-Xl &vjKn`QSmf1{`1Xl &vl &vmn`QSmf1XT[@%vjK5Ֆ LZM&vՉ0@ndb@&V@n 7[;h5 ,j-Xl &vjK50R*XT[*n`Q ,jaEM%vmn`QSf1XTb?JZ%vV@n 8h5 LZM&vT-j2h5 ,jaE6 bb7H[`Q ,j*vEMEn`QS"mE6 bb7H[`Q ,jaEMEndb7j2{V@ndb7j2h5 LZM&v&E6 bb7نAL؆AL50E6 &<Վ@nM`7&!wmńjՎ@HXlv5۫EMwՎXT7n`QڱXT7^ ZMwT7jh5  ^_Ls6A٠7S]bg833m  X ?<@; Z_9mm˶/e m_m }l;~v~d6~vl;<\˶#/ /΍l_yvettgol;2:mǗA_F6~DeA_F6_F6~Dev|ێv|/{evpom2;_f6~Dehmmtx6_뎶x6~;/ļ=h=hl;2: /hmm3 /h/6~,?_fm2;_f`ym2;_F6p[1p/.v\\mp'ns.VL.ܶ@\ip[ nynn..vm u {¢6/,jm {¢6/h_h56p[¹jmWQm % &Ɓ{BV9bJM@9C2m4pO&_ ] ] ] ] Y Y Y =|2t!'Lp['/V.,Z͟='/?{2B'/H&_h5Lj"|D2BdV=|aQsO&_Xܓ5d\Q2¢=/,j {2B(|aQs{^X{2¢=/,j {2BNM+|D2BdV &rZ &Ɂ{2B(|D2BdrSR|FOB|!SR|FOBK\SO9pi^hSA)_X{]@/Pb_Jr6X% 9?Qb'JreB% 6\PBs^bK*1_X܃5E@¢4 K5E=hxaQ=:Pb /,jGJ5E@¢^b/(1_h5Qbj/rJ́{B|! Ubз JDB߂(1UbJr[% y |!TBTS% JRM/*1_H5UbK5مEs`yqQ¢fyqQ¢ Y^\Tb'.,j/,j |aQsO]Xd5ˋJ5مEV% &J́{BV% &JrOJZM/(1_h5Qbj|aQs/1_Xd5s^b'.,j% {¢^b/,j {¢f/,j {,p/1_Xd5مV% &Jr'JZM/(1_h5Qbj|!w|D9p/1_ȝd*1_h5Qb'.,jaJ5Y^b'.,jaJ́mE6 B9 PbنA(1_Xd50% {,p/1_Xd5مV% &JrJ́mV% &J́{BV% &JrkJZM/,j mE=AvaQ Pb'E=AvaQs/1_Xl /,jaJ5E6 B¢^bنA(1_XKVg/1_h5V ~Vg]h50% &JZM\߅V% &J50% mE=waQ PbE=waQs% {¢뻰نA(1_Xs}50% mE=7% &JZMV% &JZM/(1_h5Qbj|DBE6 B¢f؆A(1_Xl /,j% m* |!O(iК |5ABkLU;u}c]^:PJՁEMT:Ru`QSJՁEM*UZM&KՁ*Vd,UXVU,UZMsP@uQ:jqh5ٸl\ZM6&ׁVո,jqXT:u eܸ,jXTs5u`QS@.ϸqXTs5j\5\ ,jXT:u gݸl\ZM6&ׁV7ոl\u oܸl\ZM6&*Xx:v  `xm<؁\t@ULm<؁\rXrn@fQ]ƌu 瘝,3ց˘]ƌu ;lޒcǖ%vVþJT^ɠ3ց@5c,jj 25f Ԍu`QSkEMA5/X5Xl8cXd`QS3j 2f&g&g&gԌu oBzAX>gy'3ց>9cC3ց>9cz:gyz:=cC3ցT3ցT3ցT3ցT3ցT3ցT3ցT3j:f9q3ցEr3ցEM5ˉgo{P3ցEM5X,'.,jj-YN\eg&g&g~[uuu uu`QSmEv23ցEM=,j X5eg,j X55cXlA,jj 9ch59ch59ch5?2@ќa XZMXZMX?9@@f u`Q]q:9f u`QSsj:9 Ԍu`QSsEM5eg3 ,j X5eg3BXZMXZMX?@@@@@@@@f u`Q]qvf u`QS3ցEv/u5u5u5u _43ցք3ցք3j:Кp:h8c`;u`Qq:f;u`QS3ցEvfԌu`QS3ցEMX5yg,j>XZMXZMX?@@@@GVׁVj:jr:jr:y:oj:jrAm\3o\ZMn\ZMn\ZMn\5峿jeZv;2Vdذ[/:[a׿n ߰2cv^3brͰ[/:͚aa{5Ac\FeZF!=c-~ֱ/ \FeZF=r{Zɗi{0;/:0;avPֱAcZf:j{:0;uavP=Np{:0:uaEױLױ c\vQ4Np{.q{20:uat_uat ױAcF=r˴=ߍ\f=ߍ\feZf=j{,=j{,3kT}+~,AMg7X'Xc)5x?1x K 3 o>k}?~/x?1,jj:AMg55Xtv`QSفEMMg55h59h59x9 ~PفVفVj:;jr:;jr:;jr:;sh5xL bv_]>_fʃo)͔>߃/}!w|sw&wԮvhW;j~h5h5h5h5h5h5v,jjW;]@v`QSSEMM5XԮv oxW;)\Ԯv`QSSEMM5XԮv ]@]@]@]@]@ݻځVjW;}h5h5h5@[+jrt;&GÁ@y\ @\ @NggE@=ۋ܁:诓܁E@]ۋ܁˶}r;Ї yŋ܁ 7yŋ܁Vj;7[X԰e`QSÖEہ܁EM [>E- ,j\,jj2~;p;a԰e`QS܁EM-rZM.rZM.rZMnަGv oozt;78=裌ہ>8裌Gyyt;GGyݣہ<<H5=H5=H5=H5=H5=H5=H5=FZ ,jsn?XM<X,&,jjm.YM<X\`Qxt;5X\`QSksErnģہVہVjm.jrt;jrt;jrt;Zxt;jrt;jrt;jrt;jrt;~PہEM55Xv`QSہEMn56Xv`Q\ytAn56X܃,jjm.@@@>@@@@@>@5<h59X\`Q]qt;5X\`Q]qtvf 2ہEv2ہEM=Z c~]qt;jrt;jrtAZMnZMnZMnsnZMn56XlA,jjm..8X܃,jjvuP^:v`qH؁švuֱCz؁!c!ױױ؁VӮW>Z$ZO^:v:v:ZI \\,jc5 ױZI ,jc5ֱZI ,jj%Ac5XJb`Q]p;f^:v`Q]p;/:v:v:Z\\\\\\\\,jc5 ױl+\,jc5X:v`QS/tvtvtvtv %tفvفvj:;q:;q:;9y:;9y:;فEMMg5I!~PفEMMg55Xl',jj:;N 9h59h59&&&6}$"w"Z\\Kv^g^\~P܁|̋܁6܁6܁EvE,_Xj󟷼L[nr vev5jذ۪a9_-aRsB _vev3.it0:-at[ L{G{G[ Fe+F=r{ʗi{^0;/Ӗ0;avP[젶AmyfW:j{Ԗ0;-avP[ nyF'=~eii{0:-at[ ny%nyQF=r˴=r{0:-a Any[젶[젶L[젶Amyw#Amyw]=!x KԖw|W}-~&~|5 XԖw`QS[j;-,jj;-@-@-V^~P[ށV[ށV[ށV[ށ@Z_]ureZ,jj<@u+V+V+V+V+;W&W x oy9[797Pၼf8 ,j,jjAM55X g`QvDx`QS3EہEMp55f8,jj"@ɉ&2EMM=&2 N5e'lA,j N55XlA,jjrAM55X^DxDx D2VVjr/jr"V.,h5h5f&&‚[ށEva-f‚[ށEM6>-fm|P[ށEM655Xl,jj1.,Xl,jj h5h5&&&&&&&&‚[ށEva-ۅ‚[ށEMmy5XԖr;q;q;q;oxvR-@;-@;-hh7BȗXԖw`QS[ށEvR-[ށEMmy?-,jBny5Xl'~P[ށV[ށV[ށV[ށ|m[ށV/}$r"<ĉ5>8ȗ<ȗ<j"qJϰ[a =nLyv9r\|0:q/Ӏ0:р0:Ѹ0:qa/ ŇATAvef5:j@4.>j@uԸ0;qavPŇA f5.>j\|Ըxd}ma LJ/B /2 /F_8<>px|0aat'ALJAF9<>~n0:/0;X~orx|0;X~orx|ix|0;av0;av?;QvZ}0~A@5Jx?x<   k?QvE e}_;~PA55JX(%,jju@e^I^I^I^I^I^I^I^IP+EM5˹W,&^I,js55X,&^I,jj0YMX8裌||3>8&f'&'P,jsN55&P,jsʆ_&}8G?vc?vcaDr^$vإ]o%]إ{Ǯ~rǮׇ~a b}=0cts?F1cK6?F1ctf1:a^vfa{=cvf1;xf?fa0{1 }0]/`}0 Տ ` `Ǯ7~`3 ?F1cK?F1>f1:af1:a^1:afv,71cvf1;X~ob=>f1;xߛf1;x,71ctP { ^7:^O.ޣ { ^7:^O.-Cޣ ^7:^O.-]x݃G^H^,\x݃_x=Tx/,jޣ h{=ھy/,jޣ h¢=ھy/m_h51ھEjb=m_h51ھjb=&F^HZM/m_x"]h51ھjb=s"]h51<^7^&15kxϽ/nM,M/nM,nM,Ms { y#As {1jb}BZ=j~VsϽ/{}s &ZM̽/{5E{}aQ{_Ȼ]{_XԼ~5߅E{}aQ{_^{_XԼ~5{}aQ{_XԼ5E{}!oAh}BVs &fZM̽UBVsBVs ¢槩ij~MͿlڇNlBM/=1M/)S aJa/WL/&_țǒ|?; !`g?BBޮB,BުBެB_; &vZM/m/wv5-ۅE{vaQ¢ex/,j[ ¢fՁE{vaQ¢ey[ ¢潳jbgB{}rN/,_=¿5¿75¿Ї FBP# yj!A/&_CP# FRM/_H55¿jj!B{aQ^\X,'.,'._X,'._XԼ'5ˉF5ɅErE{}2_XԼ'5ɅErE{}raQhB{}rBV# FZM/_h51¿jbaQ_XԼ'5{aQ^\XԼG5E{aQ_XԼG5eF5!{aQ\XԼ4E{HsaQ\h51¿jb!^BV# &FZM/䃕_h51x/䃕_h51¿yi.,j /,jC¢=.0.0¿.0.0¿.0¿yi.,j /,jC¢=yi._h51¿k?]aB_5¿jb?_5¿jbaQ\XlA_XԼ45eF)Ү0|W)Ү0|W)Ү0bJ``J``J`|F]h5?0BiW{uè ]`JĔB)Ev)Ev)E{uaQ]`JaQF 5aԅE{5_XԼQ5aԅEv)E{uaQ]`JaQ]`JaQF)VS &)VS &ZML/_h51jbJĔB)Ev)Ev)v)Ev)E{JaQ_XԼbJÔB )vƈ) S -Z0L/{:_h0ЂaJÔB)|EES \¢fՔ¢=yO/,j3FL/,jS¢=yO/,j3FL/,jS S &ZMLS &ZML/_4j~dڇp/+0¿iO/>>1¿i!_[BG5/_XԼG5Y F5{aQ_Xlg5lg5i1\v_a?~In}|G v7>ݯ }?v?~hiѰ[aG0.\FeZF?rH- --A.\vweZf4<ji˴?jixB0;avP A- Zf?j 0:aXW _F'eZF'?Np aaat+A_v+A\v+0r i,?j,?j˴?j 0;X~7r 0;X~7r\a^oOB}k?~V/x?xzP zZOޏ޷gB`QS EM-?Z,jj?&&;V j?jr?jr\ZM.ZM.w^&&B`Q5o;o;f܃x7;fo_m@m@m@m@m@m@Ѷh?j~h5h5h5,jj?m@'`QSÁEM5XԶ y?Զ`QSEMm5XԶ o^x?jr?jr?jr?jr?1h5_EMnZMnZMn?m@m@m,jj?mMͿ75ij.o oSߦf ކlfQ_uq!B`QSSEv<Ã4@WXyw5 +𠦺@<CgB_ȐC&0 9gCV!@Ch7j~4h5?$!@ɐCd!Nr,j| ,j|}P!fy}P!fy ,j| ,j|$!fy!@ɐCdA&CV!@ɐCd!j2h5r 9ZM$!f;gvϐC`QS!B*XT!C ߙq!b\yxNhXy}<>VZ+@Cc!N'Yy,jIVC,jIV<5$+EMUT!C`QSf;d!C`QNh5Yy<>YyC s!㓕Uy*V*EvVC`QSUy,jXlg5<QE 2,,c<YRM,cT1@E t"j:fbEM5˹cs,1V\1V\1V\T"V\1V\1@ɘEdAZM,&cV1@>Ed"j2fh5Y5,jj5ŃY5XT"E`QS1bEM,Y5ec*fXm`QS;*fXm`QS;V1@ɘE t"j2fh5YZM,(cV1ĎYZM,ڹ ,j ,ڹ}P1vn21ecEvĘŃ21f b"f b",jj6@ɘEd"/`8f` b"j2fh5xP;V1@ɘEd"/`8fh5,jj6.,jj6^E`ly<{U"|<٪WX>v^E`9իB*}سWÞ@UZM*ޕ{V@px*&{Vf;g"٫,jj`8٫,jj`A*,jj`A*,jj`8٫,jj`8٫,jx*B*&{VիUZM*&{V@^Ed"jWh5٫,jx*<y<{EM*U<^E`QSz|ƽ@^E _qv:^Ec"Whث 5U>*m{b_;d"N'٫,jj9N'٫,jWXlU5իxPzEM*tzEv:^Ed"jWzV@^Ed"!W~ێ(bR"/j:fYb"9f胗1@Ye"9f7˘ŃY4,&cEM,픇1b*fXT"Nyx0fxߎY sL1a0(f1>HL1a!2>B a+H_Űv4>4o /SbѰvx4*f1f12f12feFFF?Y _F?Y ,L c crL1avPL1avPbbbbA,A,AF){(Ű5a2J1[F)oX Q/SbR Qa2J1~j0J1?_Fv02JeR R R R _FF2E);QavPQavcbTfff8F)AE);QavPQav?;oifx?Nx?x߳ zP5qmV}A,{CXcfx t ] ',jfXT"ŃY5U,jfXT"E`QS5@ɚEd"h5YxP5@ɚEdA,&kV5@ɚE}C7jfh5YxP5jEM,wS7oWZM5=kە*x 5 &Fd^#j2h5kZM5&VF(h5?kZM5&Vy,j*XT^#F o:Xs`QŚEM5kEMm05EM5k5,j*;8kZM5&Vy@ɼF ':h5xPy@Tt^#j2h5kZM5k5,j*XT^#F`QSyeEvļF`QSyf b^#ƃk5,j'XlAk5kZM5n]1`;c^#&0hxP6y@C`^#7k25!0X>MX!>F p^#n[5}|2h5kZM5yy@ɼƃk5,jj:F`QSy@>ZF`QSցEMMX5ۯ5,j*XT^#O8XԄu`QSyVy@ɼFd^#8|y@>F25}1裌y?y@ek(s^#O9ȣy@F t^#j:H5kRM5*XԮl`Q8`97q^#YM,jjW6YM,jjW6YM,jjWA5ڕ ,jjW6YM,jjW6YMkZM5Ԯld^#j2h5SzkZM5&Vy@ɼF`QSyvekF`QSEMZM5&|yeVy@ɼƃ kZM5&|#y@ɼF`QSEvļF`QSEM4ǧF`T,_;UgA#||*hXl4@,hfA#G6 >Y^.hZ͏&GǁVNY,hZM4Tf;UgA#f;UgA#U,jj8U,jj8f;UgA#f;UgA#NY,jj Y,hZM4TA#jh5Y,hZM4& V@ɂFdA#NY,jSu4l,h5U,j EM4*h4m |g$ @ǂFcA#̸hX},h>4}tf;dA#9f;dA#F`QNXTAA4*h5U,jI4*h5$ V@ɂƃ*hZM4& V@iƃ*hxY˂F sA#/胗Ug.hZM4*h5) EM4TA#F`Q`;aA#Sиa[t˔v/bݰ27ְwkzҗ)1>& aI#˔v4>/% JoaLo Lo Lo|fɇA7A7Ww 9,0:0:0:ư/Szc,0;Y/Szc,0;0;0;0;0;YavPat a11~jGLh a[T=bB˔=RBcN01~wjN01?'Fv??2eJh Lh Lh Lh FF2%4 avP avbBcTB˔fff*&4A%4A%4L avP avP /t^A5g Z*x?kx?<x o=F`}C7~B+~,8~A5﻽c0sEM5k5xPyEM5k5,j*h5kw{&*h5k@GG 5o6ff#۱6;}{,6@+}Xa#:}V{V@Gd#jț}ZM>T#O?XԦv`QSځEM>}9>,jjS;~XԦv`QSz|4ǽ6}5}ZM>&{|Ƚ۹ {Km2>m2>}ȣ̽@e}8>yH5}RM>{Tӽ@G tA> ,js>,&}5˹{EM 5˹{EM 5˹{EM >G`QSCEM 5˹{EM 5˹{V@ǃ }ZM>&{|lн@Gd#jh5,jXm`QS,jXT#G`QSzEM>}5e{XԈp`QS#XԈp`QS#V@G hu#jh5}ZM>D{VI@>Gc#ا"`~PՎ"`bՎ Ev1jǃbՎb_a#ا)]̰Xv55Xpd#jȧ]x]̰h5Yv<)@jGd#jM\v55Xl3v55R#|B sOS3X>!9Ω,_0G^9}2胗a@ s-19GKʁV%@QA-)ZM9&Ev0G`Q3XԒr`Q3XԒ s5XԒ s5XԒr`Q3XԒr`Q3Xlg s5 sZM9&*h5 sZM9&Va@0Gd#j2Xlg s58q9 s5xPaEM9Z_[uhoLz[&=-|gIۉ(I@{ˤǃJz[&=-I@3G`QSցEM[5ۉ(EM%=DI*XT#G`Q2XT#NDLzZM&=T#j2h5LzMo|BJzxˤG _s#9h5xPI@_Gd#G`Q0XTA%=Jz5)S&=?01~nE~z/q_Oa7> aI_ǰv4>o& /ScZҰXv411ePFFF?} _F?} >L { {vLavP6ԿLavPzzzz6ԇA>ﮋ. -;!- aWe' -;!U'd2~j222~m`d' F F F v,02e w;!:!`N0;Nȗ222q |:!:!:!_NNȃޏ>2T'$~!~1x?0xߧ{PYrX rÃ޷rlG5 ,j:!EMuB5 ,jXT'$jh5 o!ZMvBT'$jh5 yP@NHd'$jxBZMvBT'$NH`QS5 ,j:!EM5;jv5mjh5Y5yPs-@BbAUM jx?xߧ o!?I}.~#7\5 ˪IG bjh5Y5 ZMVM&&VU@ɪIjh5?Z͏&VU@ɪIdAUM5U5 ,jj[5U5 ,jjXT$I ojX.v`QSUU5 ,jjXT$I`QSU@r$jjh5Y5 ZMVMy7U@ɪɃ򖬫&VU@ɪId$jjXT$I`QSU&EMUM<.X5 ,j VM5e&EMT$RC`QSf b$RCd$jj\5 ZM>Ԣv Pp#]9>kv/d#hXk|nƵ@>ǃZ}ZM>&kuGdA> kEM-j5XT#G fp#E}5Us }`G`QSځEM>}ZM>&kV@>2ǃ܄h6Gkj6Gk>X]QG 2> k<\}RM>kTӵ@G t#jjEM 5˹ks>ĵrĵrĵrT#rĵrĵ@GdA ZM>&kV@>Gd#jh5Y}5U,jj 7ǃ}5U,jXT#G`QSjEM>2ȵU,jj8eU,jj8e@Gd#ϥh5Y}ZM>&k|._Όx3@HLG)Z#} 1X$3X$>tD`94VXXmd:"j2x3h5LG<@tDd:"j2wLG55VXlgLG5U,CP"|u!Xv:D`9>UB!}|u@Cd"h5? Z͏j7jh5Y,j \,jj7Qf;f"QU,jj7QU,jj7Qf;f"Qf;f"NY,jj YCZM!T"jh5YCZM!&Vu@:Dd"NY,jn!T"oXT":D`QSu|u/d"޲hoYC[!Q!-uUC[!-|uJ,jQ!C5ۉ(EM!T":D`QSuf;e":D`Qh5YC<:Dd"jh5Yk1D[ENGO)菏@|LG7#}d3#@LG7#VJNGZM#JG5EM#T:"tD`Q1`;b:"·Z:8?vc'0J֏]_{sǮǮǮ~:$ď]]ou4̏]ⰻ+c 0?v?vcvJ<ďAt%~+11:ďAt%~~ď]/]]r]JDWǮWC]{s{ݕ1;xon?fď+cvJ71B=BǮv~#!~#!~z_=Bb݇1>ď?ƟCΏ?Fч1:>ď]Db݇1:>ďA!~c?Fч1:>İcvB݇1;X~WcvC ?f>ďcvB݇1;x!}w݇v!~~mshwtbDɅmshwtbDɅmsh wtb9# '*aӻ wt"N,,jщE;:xG'5¢XXԼ wtbaQN,N,N,L/NщVщVщ;:j":j":j":3j":j":xG'5¢Xxݙ wtbaQN*щE;y'5¢XXoSN* {vHd,n/~"x'2D!ȅ׭a^,8%2G  ` & & & & & & Nd,;j~DB Nd,;x'25D¢XXԼ yXE;y'25D¢XJd,,jމE;x'25D¢XXԼ w"c!o)j"j"j"j"7Xh5 ywYVVVVE;y'25D¢XXԼ w"#]!.XXԼ 2E;ox'25¢7XXlAHd,,jyVV|CVV;oj"xX/k,u5֦ m mXhXhXȧ=X{k,v5ZM5ZM5ZM5ZM5 &w\c!o+yX/,j w\caQk, k,,j ¢XXԼ Pw\caQk,Ik,,j w\caQk,k,k,k,Vks5/^] q{Uv25(C\A5(S\c!25)@5(S\c!T\c!T\c!T\c!T\c!T\c!T\c!T\#k,,jqErnF`97Q\caQ(yO/,js55 Ern¢=!x55 E{BxaQ(yO/,js5ZM5ZM5 VqVqVq|QqVqVqVqVqE;yO/,jq;y55¢XXԼ w\caQk,,jqErFXXԼ5a;y;/,j & &  & & & & B FbFb!UbFba]Xng],,R߻wba]XwwZj.ZM.r.Y*B &B &BBBBBB¢潻RXXԼC ˇp.],,B;Bbar|!.] DbO.DbO.Db!GRb ^h5?w";t|C wb!_PbaQ^XԼ5¢XȗXXԼ5};ty /,j E{_xaQp#tNXXԼ"tj"tj"tx.ZM.ZM.ZM.ZM.ZM.ZM.ZM.ZM.5 7B ;t(ty.5¢XXԼC B &* Fc!_/P#RRRRRRRy>/,jޫ ,EͲ ¢]XXԼ+wcaQ`,,jEv ¢]XXlg`,`,`VVV;t|Bw"],,XX>;tПB nB nB }"t/B }"t)t/)tj"tEM.ZM.ZM.¢f;AbaQ]ޡE;tNyl<],,jS.)=+ϗ1pݟͰa'eZEaTuʗ1>Xݯ a*Tv2>J `a` ` `|ŇAV0AV0AV0os ` `|Ň*Ga;_ 0;eavP_ 0;eavPavPavPat껃!'+{ ưa1#V0/ V0La9 0`cN11ew+ + +_ 0: 0: 0: ư9atat/Sc,XfUf*V0AU0LavPavPavPavPaC*_ 0; 0; Ɨ11x? U w޷T!#~2x?x?x? ]) XT!#Bƃ*d5U,jXT!#BF`QS@BFd!#kh5YxP@BFd!A2& V@BF}:jh5YxP EM2XTQ!ƒ*d5UT,jXT!#BF`QS E55+jxߡ CrvUUCr7O6X AW9U@ t#jh5YrZMV9&VU@*Gd#jh5?rZ͏V*ǃr5U,jXT#w],jXT#*G`QSU@v#*G`QSUU,jXT#*G`QSU@Zt#jh5YrZMV9yU@*ǃrnVU@*Gd#jXT#*G`QSUEMU9r<.X,j V9r5eEMuT#.B`QS]f b#.Bd#j'P\rZMV9?2{j3;MZ<-{=/d#޲hos+yy˞GdAmfZM<&{V=@>AGdA<{EMmf5XT#G jq#6y5-y9ǃ܄=h6G{j6G{>QG 2<<{<yRM<{T=@G t#jzEM<=˹{ErnG`QSKɁErnG`QSKɁErnG`QSKXRr`QSKɁErnG`QSKɁErnGd#j񠖒&{V=@ɞG t#jh5yZM<y5XTA<y5,jXT#G`QS=zErǃy5_X~y5_X~ud#jG2yZM<&{Vm@ ƶƃ팑m@ ƶFc[AM#E0M>F`LSE0MZ058|m#Vm@ɶƃ lkZM5&|m@ɶF`QSSEvȶƃgSu*Xv|F`9,`;`>#g|!>!|FA3}27BgZM3&j8j~4Lh5?g5g55LX0q`QS|&~P&,jsj3&,jsj3휚@|Fd>A3&V@|Fd>#j2h5gZM3휚f;f>A3EM3g5,j*ȧgKK|F p>v|Ff>#R3h{gZj3-5@K|F p>#-,,*XT>#|F`Q2XT>#RgZM3T>#j2h5g<|F`xP )X>!3 1ȗxK#/>xeg=3&*tg5,jS3g<|F`QSf;a>v|F`Q0`;i/ư%age+vrn~Ӱc2?a̴W>>o|#yȰ8v82>Lma[#Cx0;ƶ0:ȶ0:ȶƗi|d[cd[cd[c_FF?+F?jk 5S|ʇAʇA5A5;a1~`Glk ma=b[c0~Q]Ŷ0`[Ɵlk 5A5A5A5b[cd[cd[FFFv?=1ejk w`]Ŷ0;Ɨ11111~h{T[ff25A5Aw7'Tw#~#~(3Qx?3Cɷne7'g8Tw#x?^x?/XTwAu7n5xPݍEMu7n5,jh5n޷&h5n&[V@Gd#jh5l}ZM>G@Q#j~xPZEM>j}[EM>j}5,jn}5,jZEM>j}5,jn}ZM>&[V@G oh5xP@}w#jh5l}ZM>j}5,jXT#G`QSe[EvG`QSf b#fƒj}5L,jXlAl}5Ll}ZM>D[V@Gd mR3h S5@yG!@KHf  Zj@C |!@>Hd$j2h5 q@&C *g ,jjl;B EM@C EMm55XT$H qA@ @,j*XT$j2h5 17@l& /E >yP > QH 2@y9ȇ/ QH t$j:H5 RM@C T! ,j*X,&ư{ßư9a4{0;_0:0:t0:0:0:ưAatݍatݍ,at e>YSwcY22ffư{Rxvu`[c0^5^ưa15^0 FFFXy?b[cd[cd[cjk lk lk lk ǜA5A5>0;XޏffS[cT[cT[cT[cT[c=h?15A5A5>0;0;0:gGT#9o?i{d.A59i{d.?T#*1XTO!j2h5 h#!V@HHd$$j ȡ D)݁rA$.{sDm< "6 hY ,r@r ?gZMD)݁V@ɂHdA$C.ZMDTA$.5uJw`QSt5U ,j 9#D:;S*5U đ " XTA$cG.5uJw`QS "V@ɂHdA$|.<&,?EGz *cA$Ы@2D\ x "\e.RMD "T@H tA$j H5]yP "EͲ7qA7qA$Y&.5U ,j "EMAX,{D:AD::3eoH`QSgP5@ɂHdAAAh5Y ,ZMD9HdA$j h5Y ,5U ,j " XTA$H`QS "EMD*5U ,jA.`G vx;m<6@;`2h v<`Gg#3h|EMmXցEͶe#m `G`QS,j*XT#ٶ v5,j-,V@`ǃ vZM;&o{,/*X^;ˋ`G'|~@>h`G'^ vz}2G&vZM;T#8h5,j*Xl; v<`G`QSf0`0Xl;l;Gx/t@evwݯt@vw+}lv/aE>~,bؽ?`ǰav?1*112121>212121~n`dcdc: }`0:GaScHHfFxkĘǰ{h~cxv5bc0_#<>ǰ)? <>0'fFFFXybcdcdcy y y y 'A0;Xޫff{cbScTcTcTcTc=>*10;0;0:Ș0:G=jBH=x3x=G=4xOޣv*xOޣvt=4BW }5xPBEM> }5,j*h5 }_ZM>T#j2h5xP@Gd#j2xh5 } }5',jOXT#G`QSEM> }5,j*XT }ZXXj  ,5 'J³Y͒{$/<%~,yP͒{+>(y, 'ݲ%'YXaEvO`$jYh5, lZM6K&%V͒@fId$jQ$jQ$jQA5Kj5, ,jY)7Kj5, ,jYXT$n5, ,jY%EM5Kj5, ,jY7K&%V͒@fId$nZM6KT$nZM6K&%V͒@fI`QS͒%EM5Kc%E1͒ 6Kc%EM5Kc%EMT$C`QSffI`QS@fId$LnZM6K&%V͒@g2h/NMԡၜr$+rvA@/M4 &^ z90hq*M=HI Y4 ZMM&&VA@8hh54yPA@N8hXԡEMXT$I g4 ,j ,j*hXT$SWGˁk!G˧×Ty$|:|9+@lG9H g]yX fy$j<_,ZMG&#wy$j<X7:ET\$,FǸH`Y ,Aq2.H"^z90.ȧP ZME&"d@G'3ZD'3?H ' ,jdNf,j*.XT\$王5u2s`QS'3?H`QS'35u2s`Qo ,jdf2.Xl[E:ZME&"*.h5 ZME&"Vq@ɸHd\$j2.Xl[E5 5 ,j*.XT\$H ' ,j*.AEe\$3.h rqq@ϸHg\AEm<"6q@ϸH  ,j,j-,"EMXl[XEhH p\$q@OE> 3PE&"*.ȇ> 5 ,j] "EMET\$H`Qjyj ,j] "] "EͶ_0^eG _4ak>~ɇݏ _4a=~O k?Ȱ0~a}oy#݋0;_0:0:t0:0:0:Ȱ{dyddyc:}0:GaTfu0;?0;܇AG5byd#xXv5byd#aF, #x`yd 0'X### # # #+U, , ,|Latatata0:0:Tf{##`ybydTyc* * * * * 'ׇAG>0;0;TfUfUFYF2:*I=$I=>xx=*I=mx_3zJx_3z4u=m$w T=XTAUI5U%yPU$EMUI5U% ,jJh5Y% ߽ZMVIT$jJh5Y%yPU@*Id$jJxh5Y% <*I`QSU$wJXT 5U ,j@XT$*I`QSU EMUI5U% ,jJXTZMVI&$VU@Z@h5BJ=J/6K(K/6K(,<J/6K(,/K(xZ v %!P:8C,ZMP&K(V%@Jd %jh5YB ,Z?*Z?*Z?*<J`QS%J(EMP9J`QS%J(EMP*r>%J(EMPT %J`QS%J(EMP9Jd %jh5YB ,rp%@ʃ*rz%@Jd %jh5YB ,jXT %J`Q} b %>`J`Q} b %J`Q} b %Ã*5~,j*XlXB ,j*h5YB ,r%@Jd %jh5Yɿ{g7T۲ߖ @N97T9J*^Hl| @/$6TP `*V @渡h5P lZM6T&*qC%j*qC%C:YB d %.r%ۮ%@OP&K(@Jd %jQP&K(EM`{b %LPrhoW,J`Y%TB K(^,z9JK(|%@Jd %j򠎑ttGH?J ']B ,j,jXT %.5ut`QSH?J`QSH5ut`QoYB ,jf߲Xl[P:F,ZMP&K(h5YB ,ZMP&K(V%@Jd %jXl[P*5UB .5UB ,jXT %J ']B ,jAPe %ƳhYB ,r%%@Jg APmI p$j2vb'|:ñ@I`QSf۸0vXTAN5ƅƅf۸0v`۸0vXlN|s/ʏ]Ǯf~"?v]b Ru%~zL|!?v=cǮ~7 C(?v=cv>\By !De}AP~"ct!F"ct!a?F!ʏ]ʏpʏp,5BǮ!5BxBk~xB1^#P~]1^#P~zx@x@Ǯ)~ P!wB1:ʏAP~"2W!ct! C(?FB1:ʏAP~1:ʏAP! !w`yBBvP~!wB1;xP~?1;xP!wBvP~!w B(?FB1:uPӅ(k/.,T^c Ql_]PYxM,N^ پwAe5vXx-^S*wAekjaQ.,,jEͻyT¢]PYXԼ * wAeaQ.,,jVVw & *wAeDAeDA%.,(,(,(,_h5QPYh5QP s~gyT5;y 5rA]PYXԼ w`aQ.,,jEͻy 5¢]PYXԼ * wAeaQ. DAeDAeDAeDAeDAe5jx ZMW^8 + + i핅КК lw0W^q + }C{eaQ>txaQ^Yh5^Yh5^Yh5^Yh5^Yh5^Yh5^Yh5^Yh5^Yh5핅V^Yh5yW5¢^YAW5¢^YXԼ+ w{e!GB^YXԼ+ w{%n,,j핅EͻyW5BNjjjj"j,h핅Q{eD{eD{eD{eD{eaQOSn,,j핅Eͻ>>>>yW5 W5dD^YXԼ w2baQ} B{eaQNF,h,h,+ &+ &+ &+ &+^Y핅Q{%>|!^YWrB핅^Hh,BB{e + ^Y腄B΢jsh,h,h,h,h,T+ &+w{e!G^YXԼ?_XԼ?_XԼ+ w{e!^YXԼ?_XԼ?_XԼ+ w{e!G^ + w{e!^YXԼ?_XԼ+ w{eD{eD{eD{eam핅巽^腄J}B/$Wz!+ ^YȅB I핅\Hj,j,j,j,j,j,j핅EͻYj+ e¢^YX,W5¢f~yRxW5S5S5C핅EEͲP{eD{eD{%>|D{eD{eD{e!^Yh5^Yh5^Yh5^Yh5^YXԼ+ w{eaQn,,j핅EͻyW5¢^YXԼ+ w{eaQ| R{%n,,jއ/,jއ핅E0E0V핅V핅BV{%m?IYX~۶@$eamCHB/$DRz!! V$e!GI lDRz!! VVVV$eD$eaQN,,;uH:YXS' E;u2|DEVVVV}NBN,,jީQԼS' w$>'zaQN,,jީVdaQ>'zaQ>':N,,jD/,jD/,j]*R' 9 m¢fۥ"uy=VV;uj"uj"uj"uj"uj"uj"uj"uj"uyN5¢:Yg:YXԼS' wdaQN,,jީVdaQN,s5xGRB$eaz k8W$%m~IY赂HBDRHBDRz 6,,j/") Y mH¢}¢f"yGRH¢IYXԼ#) mH¢IYXl_DRZMDRZMDRHBHJ,,IYȇ>IYX;K^,C,c,D$e ") IYIY(IYh5I #) ") &") w$eaQm\IYXԼ#)w$eaQ8IYX,IER5KCEͶqA$eaQm\I92&'[Wc:x~}=ݗcP˰b|L/ŰJ g렃?Hʰ0~^`}wv?-07|LafIIFIFIFIFIFIv |L '?8H0;Hʰ{c :x/珯w|a@e*xPǀʰ{}*xPvϟ+c =a02~$bT@c |122221TATATAT * *`yc@eT@e,c |LavPavPavPavPa 0;PfPfP*** * *_FW iW bA{ &|Pq{.&t i W ]{0bT\%? ,j*XT\%J`QSqW ,j*XT\%J`QSq*Vq@ɸJ]d\AU&*VqW ZMU&*wVq@ɸʃ]`QSq*w*XT 45&,j*MXT\%J`QSqEMU5W ,j*XTZMU&*Vq@ɸJ=!h5WyPi@ &,Oc%Ђ1xZ0fY-,*he `̲?{,Y{">fY:8}̲ZMfY&,VY@,Kd%j2h5e ̲ZMfY&,VYe ,j*XT%,K GOe ,j*XT%,K`QSY@8XT%,˃ʲ5e ,j*XT%,K gRe ̲ZMfY&,VY@N8h5eyPY@8h5e ̲ZMfY&,E?M?MMeYʲ5 fYc,A̲5 fYʲ5 fYI<,K`QS5jE1YjVY@,K GÜe ̲ZMfY&,VYg?͖@M,J[elz-^elzA6[l *c%onZM6[9fKd%jh5l lr͖@f˃jr͖M,j-EM5[9fK`QS5uhz`QS͖-sA5[jrT͖M,jXT%jh5l ̲~Jd?%~J`Qm\O ,j)E͒p?%Yj5Kf۸XlS^Sdɰ|Lǰr 0v_av_a%|tOvݳð{r}o)ß~ʰ{hT?/c?ed?ed?c:w~d?ed?ed?e=?21;?22)ΝfuOF [)S?e)xO~ʰ{Z|)xOv=a xa?e! }22222>~0:~0:~OFOFOFOv |Lav2222)))))aavP221SASASASASAS2S{AS﹘{5> T?% ]{0BASA{ & ﹘O G5O ,jXT?AS5O ,jXT?%~Jd?%jxh5OyP@~Jd?AS&)V@~J]d?%j)E?MMS<~J`QS XTC @`QS)EMSj5O ,jXT?%~J`QS h5O ZMS&)Td?A5&)V?c-Y{(2hoe NJ-,Ye ̲[fYﱢ-Y@{,K=XyāEMGXlTfY&,VY@,Kd%j2h5e ̲ZMfY&,VY@,˃ʲ5e ,j*XT%-β5e ,j*XT%,K g[e ,j*XTAeYʲ5e ,j*XT%.βZMfY&,VY@,K ^e ̲<,K '_e ̲ZMfY&,VY,E?MMeYʲ<>1Xley} b%>1XT%>1XT0AeY F5,jA̲5̲ZMfY9q,Kd%j2h5e ̲rKd%'-nrAXζl dA5[l dlz %fK Gl lr͖@fKd%jh5l -V͖l -EMXԉEM5[jr͖NT,jD-EM5[9f˃j5l x-EMXT%fKd%jh5ly0l ,+ZaAVfK --\+nraV7[Vl Zq%jH5l nRM7[-T͖l ,jX,; 7[,; 7[efK`QS͖faXT%Yvn5lyP͖[,jfaXqEͲp%jh5lyPǭZM6[&-V͖@Lh5l lZM6[&-EM5[j5l ,jXT%fK`QS͖-EM5[j5l 4-EMXaXaEMh5l lrd͖l ,?S͖39 fK-^lz91m7[9f˃mfK-^l<&-V͖@fK Ǵl ,AfYAfYSeq젎,hz22׏Y)2׏Ya~̲ ce=>׏Ya~̲ g,˰a02fY>,ǔefefeFeFeFeXycededcʲ ̲ ̲ ̲ iAfYAfY>,0;XǘefefefevOvLYavPYavPYavPYavPYa<0;,ǔefefe,젲,젲, , , ,u-.ؿFNSJDL=xOScJD=xσ<*x޳eEM%bJ5 ,j*1EM%bJ5 ,j*XT"&j2h5 &1*h5 LZMfi&4VY@,M ~ <,M Ϝ ZMfi&4VY4E?M?MMeic4E1Y fic4EM51c4EM51T&&F`QSMf,M`QSM@,Md&wZMfi&4VY@,M g D4; ,M`;d&KY d&KY2K%,M 49t&j2Kqgi&4VY@,Md&_ZMfiT&`5uh|`QS5 ,j*KY0gi:4>C5 ԛ4*KXT&5uh|`QSY4VY@,Md`&B:9kYus 4^+r8Kȵ,M y Zq&kY@,M t&j:KH5 RMgiT&,M`Q0y0 ,j4EMeie,M`QSYfa8KXTAei:Q>e,M`QS'5Y@,MdA(h5 ZMfi9,Md&j2Kh5 5 ,jijij*K4EMei5 ,j*KXT&,M`QSY4|YJEMT&B`QS%@,Md&c[cT?&޲hoُ r@ʺ`{g?&޲hoُyP~[c&1V@p`12Xk7F&b`Jڍ"1d"&Ђ1h `LZ0&b1V@DLd"Ah5 L5 1EM%bA؁EM%bJrى,j  ,j ,j*XT"&N5uv`Q ,j 2h5 LNdt'j2h5 #nVѝUkZMFw&;\Xdt'j50?{X#)::q *c'Ы *c'Ы{\OWs=E\O`QST5s=V@\Od'j2h5 ZMz&s=V@\OdAz5 ,j*)z5 ,j*XT'R5 ,j*r=EMz5 ,j*)z&s=V@\Od'cTZMzT'|ZMz&s=V@\O`QSr=E1 zcs=A5 zj5 zj<\O`QSZ!E1Z!V@\O  ZMz&s=V@N,:9zw2迓w%\O s=*%\O s=d'K@/Az9\Od'CJZMz&s=V@\O ǜ <\O g ,j0,j*XT't5u~`QS5 ,j*\j /ְiar 5N=Wi=Kvv1acJ gݷa0;$_$0:$0:$ǔ+FFFvOL L|LatIatIa\ǔf+f`$/cg-<aL|LIaL eg-$0:$0:$0:$ϰ{&vdgdcJ =IavPIavPIavPIa|ǔffffvOeJ|LIavPIavPI)3*3*32323232323et#FwFwvdL at at;G?y{&j}PbYu=xO>Xl ,j*\Xl ,j*\AEMK 5  ZM&At;(jh5 lZM9ZAɰP Y P_"0,>?Sa@O> B^ z}2,qS&BrX(j2,h5 ZM9ք@Qw|&;>V@Ɏσ:sEM9XԙEMu|r,j};>EM9Av|&;>V ZMv|&;>V@ɎOd'jh5 ,jxw|r:>EMu|5 䜱;>EMu|5 F~"0@E`'듑@[;`p3O'#?*O'#?EͶf'6܌5u~`Qm ,j fp3XA*XT'O`Qm ,j*XlnF~&#?VyP۪X~[u|T' @/$v| S r`@/$v| Bb'jȁ}w|&;>ȁ}w|&;>EMu|mO`Q ,jXlv|lv|mσmO`Qm? ,jF/5.;cץJŏ]q@*uzǮKc\@cH]awǮy)?vM#?!ct ?F1:ϏAD~~31:ϏAD~D??Fnu1^w~1^wuGx1^w~1^w~z`XyCxA`yC`yCv~w @?F1:аЏA~"4D @?Fk @?Fv~?~wk{1;x~wkv~wh1;x~D @?F1:ЏA~;oٛ"*c]Џw){SD~QawTǸ1D~A״k6>i{5H _xM,o^d ״kv5 x^ iB*PZx ,f5¢ZXԼC wp(-,jE;8y5¢Zh5Zh5Zx ,VV;8j"8j"8j"8TYh5Zh5 C wphaQ-UijE;8T C wRbaQNJ,,jE;8y5¢ZXԼC wphaQ-,jI-----ZMBBBk`nDphaQ} ϝXh5*ZW~*Zx ,+TBBH-cz"Uxz"UЋHa`{_Ah/RE }EETBTBTBTBTBTBTBTBTBTBTBTQ*ZXԼSE whaQN-HRE whaQN-,jީE;UiJ-,jީE;Ux5T¢*ZXԼSE wh!gՔ*Zh5*Zh5*Zh5*Zh5*Zȩ5ZMTBO*Uj"Uj"Uj"Uj"Uy5T¢fTQ`T¢fTQ`T¢fT¢BYXl*ZXԼS(whaQN,,j)E1E;j"Uj"UJ-H-H-H-H-RE&cC9-+BB-+B)c9-Eh/:F1ZŋB/^tZMtZMtrbLVVVVRhD(-Ж:F w`aQ,,jEͻc-,j}EͻOyw5Bcxw5BNcy 5¢1Zh51Zh51Zh51ZX m<:F m<:F m<:FcƫcƫcS--------EͻcY }:F e¢1ZX,u5¢f'cyw¢1ZXԼ;F e¢DXX,uZMtZMt&BBBB΍cjcjcjcjcyw5¢1 ;F whaQ-,jEͻcyw5¢1ZXԼ;F H:F w"daQNE;y'BZMtZMtrn\cccc-:F=<vAhaVrrۭ5b5-Fh/6jD }Bh!Ph!PhDhDhD(>}DhDhaQ-(whaQOS(j5EͻFĪ-,jއ/,jއ5EpEpEͻFy׈rX5EpEͶ{ChaQ>} jD &jD &jDwhDhDhDhDhDhDhDhDhaQ-䔻jD w(-䔻jD whaQ-,j5EͻFĪ-,j5EͻF>sG"UЯRE !UЯRE !UBm7TB/^z"UJ-Eh/RE x*ZXlq5nEͻvH-,jޭEͶGhaQnީE;Uy5nE;UvH-H-5kD /t׈BԈzFk5|0A5Wh jD VP#Z赂BBݫFjFx׈r^5V5EͻFv-,j5m¢faFv-,jjDm¢faFYb6}QY#v?D1쾘Sa})Wrİ:~L5aUv_aK8N.0_?7a`1ՈS{ʰ{&}T/chdhdc1    ŇAֈAֈ>0:0:Ѱ{Fc 1  ugh;kDAa}_wֈ3xY#Ѱ{z;kD>VX#{kD`ych, }L5avP5avP5at5at5at5?ֈAֈAֈ>0:0:0:Ѱ{vudhdc 5avP5avP5avP5aT#fU#fU#fU#fU#vOO}L5avP5avP5F4F4F4F4F4F4F4F4F4F4etW, ߧ0}e01>01qp4kw]? quw=xOA:;\ '{70xP{= }G~xϾ3EMՏ5U? ,j~GEMՏ5U? ,j~XT(j~h5Y? g_&G~h5Y? =Qd(j~h5Y? g_&GVU? ,j~XT(}y?MMU*TQd(j~h5Y? VUZM֏&G^d(>~XlJM knR_[v1@&e7) , Pݤ@/{vM lH&zٳXlH&5u`x`Q#h5M &ZMv&IVݤ@nRd7)jh5M &ZMvT7)nR`Q#Is7)nR`QSݤIEMu9FnR`QSݤIXT7)nR`QSݤIs7)jh5M &ZMv럩nRd7Au9nRd7)jh5M &5M ,jXlMz} b7)>`nR`Q} b7)J`Q} b7)ʃ&5^ ,j*XlM ,j*h5M &rHݤ@nRd7)jh5M 个Id7xM +nR ')MzPݤ@B&rݤ@nR/Id7)Ћݤ@/^v&IVݤ@ζh5M &ZMv&Is7)jIs7)B`QS=IEMu9xnR`QS=zEMu&rDݤM ,j9Ew!5M ,jh5M &ZMv˿Vxvԁ6ݤ@n_nR w7)ƻ\wiI4ݤ@nR t7)jH5M &RMwT7)nR`QMzM ,j}IEMuenR`QSݤf'XT7Au&5M ,j}IEM5enRd7)j Vݤ@nRd7)&ZMv&IVݤ@nR`QSݤIEMuT7)OSSݤIXT7)nR`QSݤIEMu0IEML9уmQW6G^ =QW6G^ 5Vf۪3pXT~ ٶ 5,j:GEMT(Q`QSf۪3pXT(ٶ ZM&G*pX~!/у z0pQ G|@;pQ G^+ ZM럩QdA9Qd(Q`Q08 ,j*pGEMmуmQ`Q08z08 ,jGEǩj\?v? 1Sa}ݗyİ"La%v_a;1̇wax1Տݳΰ{|T/chdhdc?    gA֏A֏>0:0:Ѱ{c ? F,# ,# ,# G >20:20:20:2Ұ{z`i=20ޗXFf{#H`yodiTc*# *# *# ,# ,# ,#}7441AAAө H HSi,,# *# )4잔HHHHHavPe441AAAAAAAAAA_;`~{asQ|U^>uGɑeɑ!YYol]/W*T7€qbg*FuW1j?{XϞ}P}I;}xPO;}xPO;}x9&& xi CդQn i&,TM*XTLS5I4U i&,TM*XTLS5ɚTdM*x=M֤&4Y zI=TM*i&4Y zICɚTdMjR2MդeICI4iP5`2M;eI4U i&,T#XTLS5`jR2Mդejw|ȚTdM*i&4Y zI̠ɚC;&kRAO5ɚT zI4ckR2ĚTLS١ Ua*W}mUЯ ;T*WP`CoT2vULC,4{;TAOUd*iC4١ zP=Mv&;TAOUd:T2Mueա iӦULS`:T2Mu.>i4Yv i䡬N2M}\|LS,T)XS.;4q2M}\Ce`>.>XSLSe e]v ie*;4oR,;}ȲSd)iPeɲSd)i4Yv z,;=M&NAOe`NAp,TNAp,T)XSLSe`NAʺ,T)XSL=,TCf2ז٧>e)-OUa=٧} ۞٧>mS>4>4>4ei} i*,l3,TᡲO2ep)#dg} i*,l34}zCߩS,;=T)ov`)ow)ȋsv`)ov`)iŹNAOe*;yqSd)XSLSLSe*;4Uv i_"Xvz~`)X~`a%e`f%e`fi\v`~?Sb~yEL%_O wL'cO`{NcgUvk,;q,;q,;}z7Ȳ7Ȳ7Ȳ}b= 4 c Nc Ncag*;yWq}<ƽ4ƽ4v_=q/>}}<ƽ4vV7Y}&Ocޠ>},>y>}Ә7Ә7777g}է1nէ1nէT}Y}Y}Y}EǸAVƸAV>Si,>y>y>y>G˟4 4 4 4 4v*y>}Ә7Ә7g>y>y>q>q>q>q>q>q>q>q>q>}7#1nL׼L՘7XIXU3 Tcޠ Tc`y&ajT3ƼAƼAƼL_s*xߓ}v!xߓ}v!xߓ}XP ,x9=@onLS` T2Me*P=T*XULS` T2Me*P=M& T)i@PUd TAOUd*x<=M& TU i@,T*x<=T*XjGLSՎ*P4Ui,T*XULS`2Me*P4U i@,TC& TAOUd*x_=M4Y z,P=Mfd*XX i ev=M橂~ɘ %c*x1OK %cTsكi}蘧 T2Fd*oua{c*藌_2P`#,_>uώT|ԑώTTHyTH͎TdG*i#P?4ّ zH4Ց `>>Xϟi#,TG*ȋVwe`>:R2M}|LS?,TG*XTH42MueH=Mv&;RAOH=Mv&;RAOɎTdG*i#4ّ zH4Ց `:RՑ `:R2MueH4Ց `:R2MueH4Ց COLgd*/<#SA T_xF_ g#SAL`d"SAL`d* L4/L4/L4Շi_ i,l32,T"S2҇pd*SbG_ i*2,l324ّzT?TЋgG:RA/ώTЋgG*ywvG*ų#ّ zH=MvvG*i#P ݑ zH4Ց iؑ i#P`:R2{;R H4 H=l'#,l'#,,!w4;RcJ:/؟__w쾠LcOxz:Rcg쾛ՑkHqHqH}7Ȏ7Ȏ7Ȏ}< #5 #zc ;Rc ;RcgHya5Ƹ Ƹ Ƹ +1nԊ&؊&؊&؊oǼ }.}Yyod+j,lEy\1o75 ZQcޠZQcޠZQc [Qc [Qc [QFƸAƸA>S+jd+jd+jd+j>ييL1o75 5 5 5v VԘ7VԘ7VԘ7VԘ7V}< ZQcޠZQcޠZQ5 5 5 5 5 5 5 5 5 5 5 ZQc [Qc [Qc`y&a+j,$lEyjEy噄1oP1o<5 ZQcޠZQcޠZQc`y&a+jjPdS}lՃdÆ}lՃdÆ}lǃaCE6c}=fzTy i*2,Td"S2MEeL=Td*XTLS`"S2MEeL=MF&#S)i22PTdd"SAOTdd*x<=MF&#S i*2,Td*x<=Td*XFL=Td*XFLS`"S2MEeL4i*2,Td*XTLS`224 zL=MF&#SR4i22P퍠Tdd*i22O:&#S2TL=12,T{#XX d*ד}lUЯ' T,ד*Pz@.U?jX iwAe`f{d*i@4Y z,P=M& TAOUd*i@4Y z,P=T*XUL @ T2Me*P=T*XULS 9] i@,T T2Me*P4U i@y TAOUd*i@䡧 TAO*PyqUd*i@4Y z,P4U i@,lA,P=lA,P4c Tc T2ULS`f{ b*XNCe4թ i e=M& TA^D@4Y z,P=M& TAF@4YzؾY U7.P=T*/ TA@I TAg@w6 T@w6 TAg@4Y z,PyUd*i@4Y z,PyUd TA@,Tv"XDLS` TA_@,Tv"XDLS` TAb@P` TA^c@,Tv"XULSUd*i@PIGUУf GUv*#c TA GU.P9M TAN Ce*P4˯.P=, @,, @,T*XY~p*XUL*ULS*P4U i@,, @,T*XY~p*i@4Yz0Gd*i@4Y Ud*i@4Y i@,T*XCe*P4iMS` T2Me*P4U i@`@,T'XCe4 z,P=M>dG*/<;RAّ  eݑzT|T /jPEhc *XA}Tc *ȿp *Cvנ(>Xi,T *Rנe`>(jP2M}P|LS,T *XTץA4A2MՠeA=M֠&kPAO5A=M֠&kPAO5Td *i4Y zA4U 5`jPU 5`jP2MՠeA4U 5`jP2MՠeA4U 5[;*LE0TTЯ SQA*LE0TTT?0 SQ SQ2TTTL=0,T"Xٞ i*,lOLE4xTTL$ ש `RQ2TCՠY zԬA=T *QY zԬAyTخA=j֠5kPA5TخA=M֠%kPAO5`jP2kP2Mՠ,T *X~` aU5`fU5W֠eW֠eT5]G?^鱻xu2캷"ŋ]/ϮWgשϮg5zqv2gϜg]7?3Ԡ~ 3n5x3n5qAD gϸAԠ~ 5v/~ 3n5]cw g]xsܟ3n ]?PϮKqKHqKH[BEg*R?-ݟ㿯"3o"3oy=ywEjH+R?ϸAT~ "3n򞊊ϸAT~ "5vW~ "3nqH:7ϸATϼ򞊊ϼ"3oH+R?} ywEg׍ϼ"3oH:7xWϼ"3oHywEg]7ϸAT~ "3nqHDEg *R?Q7]7ϸAT~ gT~ gT~ yy[yywEjH+R?ϼLϸAU?Yבuu?>}\ b:R[]Z b:R[YU?XΡ4b] 2ͻ"XyWwEjLH-i2ͻ"XyW4ԢԢui"+R&*R&*R"i"i"i"xC-zH-zHb]Z,Ӽ+R9TH-i2K6ͻ"XyWK2ͻ"XyW4bX,Ӽ+RewEjLH-i2ͻ1ԢԢԢԢuDi"&*R&*R&*R)袧bf{ BEjL="Xy34cA$b#1m/6S~Z.b#1S~Z,/oE"1CHL-iH$4V_,loHL-zHL-zHL-zHL-zHL-zHL-zHL-zHL-zHL-zHL-zHL-zHLb杘Z,loHL[މ2wbjLNL-iމ2;1#Q%4b杘 މ2;1Xy'4b杘ZͨS&S&S&S&SUbjDb*x'yƬԢԢԢԢb杘Z,ӼSe1`{ BbjL=!1lAHL-i $4bf{ BbjLbY,ӼC4e12;Di"1i"1si%=M$=M$=M$=M$y8Ԣ ?lO-k"՟ EmџZISeL]1n]1n]1n]v[gޟTvƼ.Ә7@{*LcޠL4 4 4 4 4 YyOeidid3uƸAvƸAvƸAv1n]1n]e{*LcޠLcޠLcޠLcug 4 4 4 4vĎy2}.Ә7.Ә7.g2y2y2q2q2q2q2q2q2q2q2q2}.7.7.Ә7XIe3 LcޠLc`y&ai 3 LcޠL4 4 4 gvƼ_q9{N> wAn='*XSLS=94s iP=`zN2Me94s i4s z93ɞC&{NAO=9=M&{NAO=}F4s z9=T)XSLS=}FP=`b2M*iT)XS i,T)XSLS`zN2Me94s i*V!{NAO=ɞSd)iO&{Nz9=M&{N4i,lA94c{N2M*e1=SK`)%0^cOA/}{zSK`)%0Oƞ!SL2,ԧ4'cOAOSd)i24{ z==Mƞ&cOAOSdbO2MŞe`f{d)XSLS`bOA^:,T)XCŞe=4{ i*,T)CTǞ&cOAOSd)ȓTǞ&cO{ ==Mƞ&cOAOSLS`bO2SL=1=1,lA=4U} i ƞeޱSd)i24{ i*,T)XCŞe=4{ iiӦSLS`bO2MŞe==4 i9P`C2M5&cOAO{ S_xƞv)[ǞSCj>'cޠR 1n 1n ԻِِِπǸA6ƸA6>SbdCjdCj>L 1oPƆ}"9-!5vqKlHqKlHWcR!5-!5-!5-!5vyKʆԘ7XSِyc`yOeCjTC35ƼA5ƼA5ƸA6ƸA6ƸA6>+lHqlHqlH}7Ȇ7Ȇ7Ȇ}8 !5 !Rc`yOeCjTCjTCjTCj>L 1oP 1oP 1oP 1oP vTC35ƼA5ƼA5>SCjTCjTCjdCjdCjdCjdCjdCjdCjdCjdCjdC35ƸA6ƸA6ƼL†Ԙ7XIؐՐ3 RcޠRc`y&aCjTC35ƼA5ƼA5ƼL†Ԙ7Ԙ7XIܗ T>VG>%^>VG>%^>ާ*՗ /i}K=T_*x_Q4՗ i/P}`R2M/,T_*XTLS}`R2M&RAO}}E4ٗzTd_*i/P}ɾTd_*i/&RAO}K4՗ i//,T#X>Ce4՗ ~2MeK4՗ i,T_*XTLS}`R2M1>d_*i/4ٗ zK=Mid_AO}ɾTd_*x=Me1}`f{ b_*X>FL=/,TCƧ SAτ}OL z&O{gCŧ SAτ`I{se|*`O4ۛ+S2M}zL2>4 zO=MƧ&SAO񩠧Td|*i2>4 zO=MƧ*>,T|*X\z\ i7WƧeO4 i*>ɪS2MŧeO=T|*XTLS`S2Mŧ`u|*i2>4 zO=MƧeu|*i2>P  zO=MƧ&SAO`S2Mŧe11`f{ b|a{ b|*X iB,lAO4UyTLS`*42TLSTd|*mǧ&SAO񩠧Td|*3nǧ&S2>}3>ז ozT_[Ƨd|*o{Ƨg|*i2>4 R񩠧Td|*i2>4 f񩠧Cŧc|*XELS9`S2Mŧ<(u|S2Mŧ*u|*XELS`SAO񩠧Td|*iiLPY z,SETAe ] r.S9jTALTANe 2UtT2MeW_\ i_\ iL,, L,T*XY~p*X2Ce*S4U i_\ iL,, L4Y z,S=T*iL4Y z,SyY2Ud*iL4Y z,S4U iL,TT2Me*S4U iiT*X2Ce*S4U >\ i*,T T2Me $=M&T2>SA;>SA:>ז j O 4 O ۞ j񩠧C}{d|*i2>,T|*Jǧe0`>=XTLS +iiԇ4a i܃e0`S2Mŧ,T|*i2>4 zO=T|*i2>4 zO=MƧ&SAO񩠧Td|*XT7O4zT7O4 i*>,T|*XTO4 i*>,T|*XTO=lAO4Ui 6&VAOMɦUd*ii4ٴ zlZ=M6 6&VAOMjZ4Ui 6 6e1M`*2ĦULS`f{ b*XC5erU7nZ=lAlZ4մ i 6inٴ zlZ=T*ݲinٴ zlZyUnZ[6-VAMɦUnZ=M6i=VAOM`V2nϦULSMjZ4մ iw{6w{6eݞMݞM`f{g*XYGnZiVcJ:/}5?vȟ1vc+cyUC}qzW*XULS]4ջ iwP`zW2Me]4ջ iw4ٻ z] C&{WAO]=M&{WAO}a4ٻ z]=T*XULS}aP`2M%9w,ӦMSI`zW2Me]4iw,T*XULS`w4ٻ z]=M&{W.5iwPIUd*iwOO&{W2UL=w,T#Xػ i*O&cXAo17V> zCa!ư!^b bXAo17Vb +x߈1`f{e +XOiw^ư&cXAO1Vd +i24 za=Mư&cXAO1CŰea4;/cX;/cX2VLS1`bX2MŰuu +XVLS1a4 i*,T +XVa=Mư&cXAO1VGa=Mư*5cXAO1Vd +i2,T +XVL=1=1,lAa=lAa4ccX2MUqe11`8 i,T'X i4 zaiNO1=Mư&cXAO1  za}Uz]c*/{WAػ xٽ ܻ ;ٻٻ ; ݻ z]yUd*iw4ٻ z]yUdzWAw,T"XELS`zWAw,T"XELS`zWAwP`zWA^w,T"XULSUd*iw4ٻ zd*j{WAڽ GU]9j{WAڽ ݻ ߽ GUt*iwP`zW2{W˯]4˯]4ջ i_ܻ iw,, w,TzW2Me]4˯]4ջ i_ܻ z]=Mw4ٻ z]=Mw*iw4ٻ z]=Me]4ջ iwP`zW2Me]4iiT*XULS`zWAч{W2M5ej2=T*X&SLSMUdCu>=pu*[ڲw_-wi{WAwi{WA۳w_-w4ٻzzz]=Me]y;ULS,4ջ iw2?m`>zW2M}{LS,T*XU]4A2Me]=M&{WAO]=M&{WAOUd*iw4ٻ z]4ջ ׽`zWջ ׽`zW2Me]4ջ vҽ`zW2Me]4ջ vҽ1` 2Ud*iw4ٻ z]=M&{WAOUd*iwP` 2UL=w,T#Xػ i,lA]4UxUL0ܻ ׽1`zWAOnC-{WA][-{WA ݻ ڽwUлe*ݲw4ٻ ڽCv*iw,T*Xٻ iwP`zW2nnUL۳w۳w,l]4KɽI:~~1?S~c~)Wr>_ϔ_űEp>;L/_?7,cc3%sg}l< *iטL1nI1nI1nIFxdjd37ƸA&ƸA&ϔkLZcVc]ĤĤ};-1iVcVcVcVc!?+LZy=I1oP? T&ƼA}|gJZyJZyJZqLZqLZqLZ}VSLI1nI1nI1nI rdjd3%Ƽʤ՘7՘7՘7}VcޠVcyJZyJZwcޠV)i5 *i5 *iVcޠVcޠVc Vc Vc Vc Vc Vc Vc Vc Vc V)i5 2i5 2i5 g&ƼL¤՘7՘7XI3 VcޠV)i5 *i5 *i5 g&ƼA%ƼL¤7UM 9,5xI 9,5xI rWL_>ǹ]`rW2M*w,T*XC宂e]4 i*w,T*XUd*i2w&sW z]=M*w4 z]=M殂Ud*i2wP`rW2M宂C宂en4xULSэ`4 i*w,T*XFLS`rW2M宂e]4Ud*i2w4 gAOn=M殂&sWAO}y4 i 殂e1`2ULSэ]}Ub* 1wsWAo}􆘻zUb* 1w􆘻 'Axc*Xy isރe흗Ud*i2w4 z]=M殂&sWAOUd*i2wP`rW2UL2w,T*XULS ` i*w,TrW2M宂e]4 i*wYsWAOUd*i2w䁬sWAO]yUd*i2w4 z]4 i*w,lA]=lA]4csWcsW2ULSQ`f{ b*X(C宂e4 i 殂e=M殂&sWA^;w4 z]=M殂&sWAކ;w4  U_>殂1wasWA9ww6sWAg3w!sWAg3ww6sWA^;w4 ϹUd*i2w4 ϹC宂>XVLS-Vd +iP-Vd +i4 zla=M&[XAO-`ZXA,T ZXA,T +XVLS-`6M,T +X:FLS-`ZXAV=,T#X zla=M&[XAO-Vd +i4z zla=M,T#Xz i ec4c[X2M1e1-` i,T +X i!sWA;w4 z]=T*i2w4 z]yU]=M殂&sWAOU]=M*w=sWAO`rW2ULS]4 i7t7t殂e `f{Cg*XYjI=w_jOW3u;y~ELݎ%_`~~}/}w3ƼAƼAƸAƸAƸA>+laqlaqla}777}8 5 ZXc`yOe kT kT kT k>L-1oP-1omǼAƼAS1oP-L-1oP-1oP-1n-1n-1n-1n-1n-1n-1n-1n-1n-3 [Xc`y&a kT k,$layjay噄-1oP-3 [XcޠZXc`y&a kt +nac}L'xc}$xc}P-w&/xCULS-`6Meja4zVLS-`ZX2Meja4 zla=MUd ZXAO-C&[XAO-V> zla=M,T +XV{VLSE` i,ۿ`ZX2Meja4Ui,T +XVLS-`4 zla=M&[X,5iPEVd +iR&[X2VL=,T#X iP-?=NJVcE+x>V^+Z/PNJVcE+/OȊVLg,4{6+ZAOɊVdE+i4Y zh=MV&+ZAOɊVdE*Z2MUe==`f{fE+XVLS`*ZA^к,TE+XCUeh4U z@=T*Pg2P k T|m ۞ ۞홁 ۞`f=`f=xfexfe4@4y TLS`b12{<3P2Mb&3PAO z@=Mf&3PAO Ϣ z,=yS_! W v]z `ͥyYz Yz yYz Sd)iPSd)i%KOAO*=ySLS`" 2Me*=ySLS`" 2Me*=yfCe*=ykSLS`JO2M&KOAOSd)i4YzzeixFTm݈ QAu#*ȳe7Fr#*o$7Fr#*e7Ɯ^cNA1 s d1ǘSc)ȣrǜ&cNAL;4szS'ӎ9=MƜe94{6cN2MŜ*,T)X޳sz޳s ilƜlƜe=1`fWFL_YTzIϮs]K?kw?.vn.azvMgk% sgϮ?NvR?vu3oK=C]g R?QK?Q7ϸAԥ~Ɵ!KD]g R?Q7Ϯᱻ.3o.a<>[EEqCy*oYSיׇԢׇu&(O-il4O_,l(O-z(O-z(O-z(O-z(O-z(O-z(O-z(O-z(O-z(O-z(O-z(Ob]Z,l(O{6Se=2ͻL5-i޹E^̽y4\"/i޹2;״ȋ94)_,ӼsMewiDiDiD)x=M=M=M=M=M=M=M=M4\"9kZ,ӼsM;״cN4\bkZ,ӼsMewi'5-i޹2;Xy4\"O2k ޹2;״ȓL=M=M=M=M=M=M=M=M=M䚂1EOEOkZ,Ӽe1`{ BiL=!״Xy4crMeZX2;XywiL5-i޹2\Ӣ\"oՕkZkZk ޹EEEEު+״Kl>>y\Ӣ\"/kZ4k ޹E^b+״i"״XyWf4{6rMew)x4\bf{F)޳kZ,l5{6rMe=2Ai5-\S|#9\Wc>S>bؽ}/YWq/ ~?}Y=vLz 8vUy55ƸAƸA>S>bdididi?Ckkkkk/?SiT>bd31n)xdidi>rL)1n)1n)1n)xdi,L9y彘)1oP> bƼA}lgJ9yJ9yJ9qL9qL9qL9}VދrrrL)1n)1n)1n)zrdid3Ƽ^̔Ә7Ә7Ә7}RNcޠRNcޠRNcޠRNcޠRNc7gJ9yJ9yJ9}Ә7Ә77Ȕ7Ȕ7Ȕ7Ȕ7Ȕ7Ȕ7Ȕ7Ȕ7ȔgJ9qL9qL9y噄)1o<04 *4 gƼAƼL”Ә7g gƼAƼL”Ә7Ә7XIrr SN3̃ m۟}> zL9=M*,T)XSS|SLSY`r i* ,T"XomJ94r i* ,T)XSLS)`RN2Me!>d)i24r zL9=M md顲AO)ɔSd)x=Me1)`f{ b)X,DL=1,TRNAOL}4y z<#ƠSd)x1=Mv[S z<=Mv}mLS,lo<4 2Sd)i4y z<=Mv&;OAOSd)i4yzSLS`f{Cga{Cg)Xy i,T)XSg<4y iPQwbQ/6GAyQ/6Goos ?z? i*,TBG2M%TeJ4oo4P z=Mu(i4? z=Mu(io*b(GA? o? y?y? O? zyQd(i4? zyQdGA^8,T X@LS`GA:,T X@LS`GA^޹P`GA߹,T XQLSQd(i4? z=M4? z=MzboR*wV)/JA:Y *wV)\g<wV)ȟ *3Y &8PY`J2򛔳JoR*4oR*4U iߤU i_pV)XRLSY*4U i*,,I9,TV)XY~rV)i24UzRdV)i24U YɬRdV)i24U i*,TV)XCee*4U i*,TV)XRL_J2Mee*=TV)XRLSY*4 i*74U z*yRn.yXR_6"ͯKAVaKA9KAP69KAOͥ`KA^,ԇ4a2M5ej.yRLS6,ԇ?MSͥ`KA^}(Ts)XRkn.4a2M5ej.=M6&KAOͥj.=M6&KAOͥRds)i4\ zl.4\ ͥ`K\ ͥ`K2M5ej.4\ ͥ`K2Mej.4\ ͥj.4\ ͥRds)i4\ zl.=M6&KAOͥRds)iPͥ` 2RL=,T!X\ i,lAl.4oxRLSͥ`K2҇*yRcV)1PYƬRcV)1 JAx;U z`*yRdV)og&JU YɬRLS5`f{[fV)XCee*42J2J2̬̬RL-34wٗ8rͥ .\ :]Kc_ݻL{c(?:_y_ű{ck8vc+KcOɱ{9v̎?!?Ssi><݇cޠKͥ1nͥ1nͥy\\\6ƸA6ƸA6ƸA6ƸA6\yyk1}`sdi>$>c>coǸO>Sidididi>c4{Lc`yfigy}=1oP}zLcޠzLcޠzLc {Lc {Lc {LiƸAƸA>Sidididi>ccL=1oO4 4 4 4vӘ7Ә7Ә7Ә7o٠zLcޠzL4 4 zLcޠzLcޠzLc {Lc {Lc {Lc {Lc {Lc {Lc {Lc {Lc {L4 4 4 gƼLӘ7Ә7XIcc3 {LcޠzL4 4 4 gƼAƼLӘ7_JO CÑ]n'fxO0wmck}P!61x5Cx9=T)XSLS!`BN24r i*,T)XSLS!`BN2M&CNAO!}a4rzSd)i2P!ɐSd)i2/̂&CNAO! 94r i**,T!XCe>4}i*,MS!`BN}i*,T)XSLS!`24r z 9=M&CN.7i2P݇ɐSd)i2/z&CN2ĐSL=1,T!Xr iP!`2&&CNAO!}64r z 9ɠɐC&CN{˕\W%ڥʕMҧX pVF$Q.Zd)j2O5,j:CNEMΐSd)j2h5r 9ZM&CNV!@ɐSd)j2h5rP!BNEvCgvCg)n 95r ,j:CNEM.T)S`QS! r:ݤ@&e7)п-Ibu7)п-II>M DI*Ӟݤ@UjwJ5^ ,jIEMW&IVݤ@αh5M &ZMv&Ihu7@9D4R#0i@U:q"}~2iF 4RO94Rd)EN#ZM&HVi@4R 猜F L#]4R F ,jjC6,j*XT)cGN#5`QS5F ,j*!8.T)4R 'F ,jjCHEM&HVi@4Rd)j2h5FPi@4Rd)j2h5F L#5ijjC?Vť@Oy<]\ حK<..xR Kwq)yqq).Tq)R`Q,\\,\\ ,jKEMeR`QSťfYXTqB*.5U\ ,jKEMeRdq)jtKVť@Rdq)#..ZM&KVť@R`QSťKEM.Tq)R`QSťKEM*.5U\ ,jjjXTqB*.5U\PťbCEMņ&KVť@Lh5U 䐣J*{2[G9R U )ȨR GU Jj7@;[G*rQv,jj7JEME9IR`QS5*5x`QS5U ,j*I2GMQ@ ƨRcTBE-JQ@ ƨR U hJQ@ ƨR _pT)j2pG&J*pG&JEM^̨R`QSQ U ,j*XleF.leF̨̨҅R`QݖUM$M HI_[&#|Ið]aw70GqخܰnҰB8=ua& 缇Wa(0;nn0:n0:na1 & & & 5ݤatݤatݤatݤatݤaa& 1 1dS0#.JQatMatMa0˦aj* l* l* l* g]6rgSi,p6A_>{8J/?LMavPMavPMatMatMatM=MatMatMTFTFTFTVl* l*0;Xl* j* j* j* ')SSiTSiTSiTSiUavPMTfTfT:LMavPMavPMatMatMatMatMatMatMatMatMatMTFTFTf3 J`y&aSiTSi,$l* j* 噄MavPMTfTfTf3 J젚J`y&aSiTSimM[Ptq6=LJy<~ _PÁ{OfE>mtO@f u)ֺXT)S`QS毦毦OXT)S`QSOEM&OV}.jtOV@Ӆj?ZM&OV}.jh5~POEM ~ ,j*XT*BJE5,jX,PJE5~ ,jXT)S`QS~ l?ZM&OV}|8jtRV@Sd)p<l?5cOES`QSf{ b)Tąj?58}3jh5~ ܧ;&OV}3jtOV@Sd)p<,j*gXl7t5 @Sd)jh5~ l?ZM&OV@Sd)jtOEMӅS`Q~ ,jXl7tj?]S`QSOt NXp /ĂS i]p /ĂӅmÂSO^9Ӆ*e).8r@,85U ,j% NEMb& NV@Nͺh5Yp ,8ZM& Nu@F=i 䀦#M*HS 4i |#M>i )HӁ4d)Ч #Mv)j2_V 4ZMF&#M\r)j2t"M^r)@`QS "MEME9HS`QS EME4rΑ i ,j*:Gj5i ,j*h5i 4ZMF&#MV@HӅ4ZMF&#MV@HSd)HS ?i ,jȃS  Nu)@ly]p v).8:S C.8:Ӆ*85Up ,j՛ N՛ NEͲzs)S`Q\p ,jX,7*8]S`QS NEͲzs)S`Q\p ,8ZM.T)jh5Yp ,8r@ɂSd)jh5Yp ,jXT)Vo,85Up ,jXT)S`QS NE_M_MM9S`QS NXT)Qd)jg_*ȼS w S9bS w N>?w NuBko9bS`QSy@9XԾEMkXT)S Ŝw ,jj_5Py5,jj_NEM9,S`QS5w ,j*h5w ;ZM.T)j2h5w ;ZM&NVy@ɼSd)S w ,j*tNu)S`QSyNEM9bS`QWSWSSNEM9@Ӆ;5w NVy@ɼSd)j2h5w ;ZM&NVy cNVy@ɼӅ;5Ց,j .lA;5cNEMu$1y:EļS`QSy w ,j*XT)w:y@;hw `;]Sc)Ђ1hw N*w)Ђ1hw {;ZM9TSdB9TSd)*M`Qݖw ,j*tNEM̼Ӆ̼S`Qݖwݖw tk K‘@d:#M_i0l0 0l?)0l?vqp~G0E헳ab6l_Si>=l GiAE0FAFAFSadididi!4 4 4 4 4 {SiTaT0EA-r>9^34lFpFp_{pFpFpFp 8 ΀0;X 8 ,},wAm~NNN N N N;N N N)4242424lFpFp:LavpfpfpfpR0;0;0;0;Ӱ}wT0AASiTiTididididididididid0AAL€0;XIpfpf3 NN`y&aiT0A_xa,$ 8 8 噄avPav.Th*14h } M#Ё}:B`QSՅfg34h5 MZM&CSV@Tdh*j24h5 MZM.Th*T`Qݳݳ ,j{6CSEMT`QS  ,j*4XTh@v9SA:]V:\a):]Sv9S u ®Sv}Xl+ v&NV]@Ӻh5u :ZMv&Nu@v=u ߖ] u d)Û:rl]@:d@v} S]@Nh5u NV]@ɮSd)jy&w&Nȡ&w*5U,jXT)M:5U,j XT)S uP]Ns)2@`QS]NV]@ɮSd)jh5u :]Sd)jh5u :ZMv:r,]Tѧ@S Mp)>GiO4ѧ@S G} ,&(ȋOH9tOEMEeiӅeiS`Q,} ,j*X,K;G>5ѧO*XT)S`Q,} ,j*X,K;G&OVѧ } >ZMF&O3v)j2h5} >ZMF>5} ,jOEME>5} ,j*XT)S`QSѧ毦O*XT)Ӆ>5} ,jwh5} >rѧ@ӁL70tS Rn Ms)gMv)sN7]=92tS Rn \Mr)=ڳ;44t L L Lx aididididi>{L 4tnauaH0z:0z:Ӱ}wf0ՙk֙k֙k֙35L`4;LvBfuavP;:0;:0;ЇA֙A֙A֙+w֙A֙A֙Sidididi>94tL`4444lxh5YP@RTd)B&KQV@RT>h5Y ,E]RT`QSJQ*E5U,j:qJQEMU'N5rإfYXTu"Y.E5UPJQEMU'd)*jh5Y ,EZM@RԅNZM&KQV}V;jXlA,E5cKQEMU'1XTu"ju:}5hX DZmKQhX },E>q@RT>XT!C`QݳY ,EZM&KQV@RTd)*jh5Y ,EZM&KQXT)*,E],E5=JQEvf)*Rԅ*E5U ,ju {Oe¶N`)Ƴs.T)Ƴ)soh{ =x9oS '{:@S s{ =ZM&{OV@:¤S =tPIUYƤS2&.T)gN>˘t:I@eL:,c)#N:ZM&9Sd)j2h5t L:rI@ɤӅJ:rI,jjNEM%9S`QS5`QSINsB%J:rLI,j*XT)j2h5t L:ZM&&NVI t L:ZM&&NVI@ɤS`QSI@Β:XԾEM:P'iN*Ht N:'iN+v)&8ȋNH9tNEM%eiӅeiS`Q,t ,j*X,K;'J:5IN*XT)S`Q,t ,j*X,K;'&NVI t L:ZM&&Nv)j2h5t L:ZM&J:5t ,jNEM%J:5t ,j*XT)S`QSI毦/Ϫ;XT)ӅJ:5t ,j*h5t L:r I@ɤSKt Wa)N:rJI@N9賌I@N9tv,?AGP<(9r@N:tA>c<=9u<(br"v,j*XT<(j2h5 ]xPd<(j2h5 ZMƃ&AV@xP`QS@9XTFDct m yac{ǶЏy>F61:<а=1:<<@}lGcv,~>f?f˽y}+a{cvp}[q"1:< @ʽyA>F>FDctymt}"4l}{?@{cv@<@{cvp}lU)ym{h؞@{ctyA>FDctyA>FDctya{ctyA>f3 @y=1;XI@y=2l}=1;XIs/y=1;XI<[⩿A$>Q1: C CH}l{`XqP95#z_D9}1yB)GRۘ<#zrD{AZ=ȓK9yz)G`Qs=XsD5уE=G`Qs9{#z,j9{AуV9&rD{AуV9=GDAуV9&rDZMуE=G`Qs=9{6sD5E=m`Q,#zYV=Xij9{6sD5уE=m09&rDZM=h5#zp~j"G =h5#zj"G6Df{ Bf{ B6x#z уE=mDTA;ЃvQBnc!DЃvQB!DƴC =i?B5XEvETAЃVQ&BZMD =h5zj"*DTAЃVQ&B{Tz ;/B5۝Q{Tf"*`Qs Q{T4P`# EA+ H=s@[H=hozsJ=It" DA* DA4ЃVi&@rHWEAkQykۉσ>P ?DBAH DAH< 9aσSAN6DAσV&?rI&?{A:`Qs"EEͽ`Qs<ȑ',j[?Xܷ,j9O^y?5-,j{AσV&?ZM5ffsx>03333a<9La<󁙙a<0;X e]0;-.`ffԖ·)33*33jKatatatʺatatÔFFFOm 0;X e]0;a Aef)*33lf9LavPavPatatatatatatatatatÔFFf3 33`y&affTff,$ 噄avPÔfff3 33`y&affTff,$ ?g9q36Wgfg0emyfmyfmVfy;yoͅ9p'M΅9p')M O:7o.T&睛7<ܼ ,jyXT&M`QS͛7EM5o.T&M`QS͛7EM5ojZM6o&7dddB5o&7V͛ ռ lZM6o&7ddd&jys7EM5ojCyXEM7EMXEͲv&Yn5?MͲv&/T&M`QS;Md&jyh5ټ lV͛ ~d&jyh5ټ g&7EM`Q=yXEM`QS;_M`QS;ZM6o7v͛@;M>lhؼP͛@;Mb&y!6oa@_ؼ ,jjfyh5ټ lZM6o&7V͛@Md&jyh5ټ lZM6o.T&M`Qyټyټ ,j;/7EM5oM`QS͛ ռ ,jyXT@km5swBkm5vw&.>km5@\wkd&j\i_k&5V@rMd&/Mke&Zk˿H,DbBk}"\Y rMO$k9rM \ 䰔5V@rMd&j\ȱ)k&5\)k<*5U 5EMmoXEMk*r U ,j\>kL{װ4l?Xcuag~i~Scf~Ov/;aE05fſ̰}nnTcccfdcfdc0%?131313 6fA6fA6fA6fA6f#y13j/avP{0;࿺w`f>5߁ma00Gl y6Ͱ}p#iSf#iC@\ݣ@^ _PGC93  IN OKGt5 ,j*XTD'N`QS  ,j*XTD'N`QS":V@ɈN>h5ѹP@ɈNdDBEt&#:V@ɈN>h5 \N`QS":5`QS[_N`QS[5`Q, ,ju#:EMmX,mGtBEt5ZMFt&#:V@ɈN>h5ѹP[ZMFt&#:V}>j2XlA5c#:EMmXlA55`QS[Nca'ЂhX '- ;hX `,Z0v@ N`ݖY ,jjf-h5Y ,ZMv& ;V@Nda'jh5Y ,ZMv.Ta'N`QݖYعݖY ,j2 ;EMvN`QS U ,jXTa@vm ;vaBvm ;wa'.>vm ;@ȑyvda'jșev& ;V@Nda@FtbD' Ft9NN Y #:>W ˆ΁\aD' #:vD'sr@ɈNdD'j2h5 |#:V  䐗#:EMmXfEMEtr6S,jj3":EMEt9΅5 #:EMmXTD'NdD'j2h5 ZMFt&#:*h5 ZMFt&#:V":uD'L=N ta'Vv ;tȅ@:N ra'.!v ;š(ta'7veQ΅eQN`Q, ] ,jX,Bv*5ˢЅ ;XTa'N`Q, ] ,jX,Bv& ;V U ,ZMv& ;ua'jh5Y ,ZMv*5U ,jE ;EMvmQ΅*5U ,jXTa'N`QS@Xijpv*5U ,ZMv9Nda'jh5Y g$șv9EN G] hk@*NѺQEv9N u\ 䀷 ;V U ,ZMv& ;V@Nda'jh5Y ,jqv*\N '\ ,jXTa'N`QS@NXTa'}*5?N`QS@h5Y ,ZMv& ;V@Nda'jh5YعP@Nda'N`QS5U ,j Hv?}*5c ;EN`QS U ,j45c ;V@;΅*!v ;hX C,!v9EN V] C,Nda'jȡUv& ;ȡUv& ;EMHN`QS U ,jXlw^v.,w^Gt]eaD']Ft.,;,Gt]gaD#:ΰhMk~a:Lۏ԰3l?N4l?J)3l ';Waܰr":aEe>77*13232s6FFF#: #: #: #: #:f0;MSDgԦ":#:e w`Dg>/8#:x13la<a0G e=0;X ڰz, FtAmX}":":6FFF9'FF9Latatatat0:Ȉ0:Ȉa e=0;0;X 0;0;0;0;ΰ}oTD0EtAEtAEtSDgTDgTDgdDgdDgdDgdDgdDgdDgdDgdDgdD0EtAFtAFtLˆ0;XIff3 #:":`y&aDgT0EtAEkc?<#P@t'O`QSb?EM~5Pb?EM~5 ,j*h5 V  ZM~.T'j2h5 V@υ5 ,j*A_O`QSU*XTU @`Q, ,juc?EMUeO`QSU  ,j*XTU@~&c?V@Od'p\@d'j2h5 &c?EO`Q=1XTU  ,j*pb?EMU ȞO>h Cv= C!|{>|~b'8ƞO`Qy ZM|&{>V=@ɞOd'jh5 ZM|&{>XT'\5۝=z>Eve'υ5 ,js <vBUy<|wU@=*Ob'hX [rUY rU@*Od'jh5Y9U@X ?U@g_>WXPU@+\a@Vy}sU@kAEWy9&*Od'jh5Y r`U@*υrjU6`,jjw 5˺;EͲsx'΅ 5 ,j*X,>w 5˺@NdxBw&;V@N ga ZMw&;V;EMweN`QSf[1s;EMw 5 ,j*XTx'c5?MMw.Tx'N`QS@Ndx'ZMw&;V@N ga] d<wu'3r|p|p|p|p|p|pU@șBWy9t*Oc'js7ltΰ}ns~_{vP?atatôQ0:0:0:0^'FFFFO0;Am;6fΰ}.k;ya<a0G 0G y0G ]5232s;`YO03*3;;)3*3*3*3*3lf9LavPav80;0;0:0:0:0:0:0:0:0:0:a 噄av<03*3gwAwL0;a j j 噄avc`y&axgTxg,$ GAwAw+$ )$? ߼F'F't1N0sB?C?C?-+%~ '8ȟ u>Sá] u>kC?<#P@t'O`QSB?EM~ 5PB?EM~ 5 ,j*h5 ëV  ZM~.T'j2h5 ëV@υ 5 ,j*U_O`QSr*XTn @`Q, ,juC?EMeO`QS  ,j*XTn@~&C?V@Od'p \@d'j2h5 $ 5cC?EO`QSf{ b' 5,j*7p [>ք-@k–O>hMP-@k–O5a'КOZ|&ʖO`l5͕-@ɖOd'jh5 lZM|&[>V-@ɖOd'jsZ>EM|ʖυʖO`Q\ ,jXl7W|j\O`QS-Z>5P@k\O 8p'8hM &Zzs=w@zs=v'j2h5 ZMFw9΅r6ѝ@N:ș0Gwm<;6ѝ xFw9N  䈘;Vѝ@Ndt'j2a1Gw&;*ȉ1Gw|=5 ܎;EMmXEMEwrѝ  ,j*Q2Gw|=N`QSѝ@Ndt'j2h5 ZMFw.Tt'j2h5 ZMFw&;EMEw9N`QS5z`QSѝVE@:"υ*!y鐋Gw9N c `rѝ  ZMFw&;Vѝ@Ndt'j2h5 ,j*Gw\N  ,j*XTt'N`QSѝ@n9XTt'5 ;*XTt'3ZMFw&;Vѝ@Ndt'j2h5 ZMFw.Tt'j2h5 ,j*XEMEwrѝ,j*XTt'΅5c;EMEw.Tt'N`QSѝ@NbtBEw;vѝ  C!Fw;ut'C!Fw}yct'j2h5 Ъ;Vѝ  Ъ;Vѝ&Evet'΅5 ,j;/;;0 ; ;0saQѝ߿ôIp /$5l?Via_Ci~G0Ew헏a1l_:Stg>9ln GAEw0FwAFwAFw& ; ; ;x`tgdtgdtgdtgdtg>Xw;6fIa $Ttctg;ѫa GׅN;p!Wy**tB<] ,j*XTU(P`QSUBEMU ]P`QSUBEMU 5U ZMV@ɪЅ ZMV&B*h5Y ZMV@ɪPdUBU 5U /TU(P`QS U ,j*|XT YV 5UEͲJwU(TU(P`QSY ZMV&BVU@ɪЅ ZMV&BVUU@ɪP`Q=*XlA 5>,j V \P`QS*hMX & -ZV.TU(К*hMX & -ZV&ʪP` 5͕U@ɪPdU(j*h5Y ZMV&BVU@ɪPdU(j*tBEMUʪЅʪP`Q\Y ,j*Xl7WV ]P`QSUB*t @>l` t@>l` 6P _p(/ t @|Om@Nx @Vm@6Ё,[~9O p] Oe'޲s ?@{O ] D ?Gs'jh5Y ,ZM~9OdB~9O`QS5{`QS ?r'=O`QS@ s ?EM~9O`QS5U ,jh5Y ,ZM~& ?V@υ*ZM~& ?V@Od'O +] ,jj36s,jXTBm:HP@ O s'9H ?@ O sBys'YV\XV5ˊ?EͲbt'O`Q ,j*s?EM5ˊ?EͲbt'j2h5P@Od'j2R&?V@Od'O`QSfY1:XT'YV\V5 ,j*s?EMr?E>;XT'Od'j2R&?V@Od'ӥZM~9kO g@'Ugge'v.r΅@NhX  ? e~& ?V@Od'jh5Y ,5U 䤊 ?EM~.T'CT.5U ,jXT'O \ ,jXTg O`QS@Qs ?EM~9Od'jh5Y ,ZM~& ?V@OdB~& ?V ?EMu*5U 4 ?EMu*5U ,js ?EO`QS U ,jXT'jhXP@;ObB~ ?v@;O 'Z] D ?v@_X ,ZM~9OdB~9Od'J`QyY ,js ?EM~O0 Èυ&O ' t }!uD|>].l;\ۮǶc5ltcۑvv>mGiv6mca6mcvp\ "> 7DctAD|>">|"1: ">ͿG|>fM?fM}=s">w@c61G|">f>a{c<#D|>m|y`YO 1;X|f">?=1;G|>f ?FDctae= "> 7DctAD|>A}"1:ϰ}@cvp|">{g#>{cvp|mN{g#>{g#>{ctAD|>FDctAD|>FDcta{ctAD|>f3 ">=1;XI#>=B1l|=B1;XI#=1;XIDctAD|>"1:ϰL "> #>|"3l| LL?[zoAc<fx> 4l}@0Ƕ7Bk2@6 QUojЃXYU z?Av߶\lT zp{ ,p.=y sUjЃh5Q ܫAZMTՠ&AZMT=M>h5Q zjW,jՠ{5m=p=XܫA5A^ z ,jaejЃEͲzW56xY=XijnjuH =胍PzI}zI†Bu%") PRAΛ+)CH =h50Ѓa9\0ЃUA) CAۇ0@}=hz =)K䌛@ZM=h5zj" zj" ț@5m,jۼ?X@50Ѓ8jЃ ՠ9jP^ zʪ=h5Q zjD5AjЃVՠ&AZMT,jՠ9jЃEͽW䜔A5jЃEͽ`Qs=XܫArNJՠ{5.xW,jՠ9'jP^ zWAZMT=h5Q zjD5AjЃVՠ&AZMTjЃVՠ&A5jЃEͽ]`Qs=XܫArPՠ{^ zW,jՠ`Q=`Qsՠ{5^ zjB5(p=hP zWC=hP z VU zԪ=hP zЗ7T=h5Q z=h5Q ܫArUՠ&A5n˃EvE5^ ܫA5jЃErU5AghP z!TK@ՠ9~jЃѠXs5\:L;ɰz0l? qva`~,b~$Sh~hۯa߸ g3}m>> 4 4 tvFYFYFYk@ @ @ @ @Tf0;ShN ?dg>%5N{x3 ?aw'rfq@;ĸOb@}>vq@;!J}9Od'j2h5 OdB}9O`QS[5U{`QSq>+r'ڪ=O`QSq@N9s>EM}9O`QS[5 ,j*h5 ZM}&>Vq@ɸυZM}&>Vq@ɸOd'O 1 ,jjj,j*XTBmH5] }.\O s'H\ }.r@O s's?>e9υe9O`Q,'] ,jX,I*5r?XT'O`Q,'] ,jX,I&?V U ,ZM&?t'jh5Y ,ZM*5U ,j?EMe9υe9O`QS?XT'O ] ,jXTB*rY ,r@Od'jh5Y 0?V .jxHMoCxHMoCxȁl#r2m@ݸ9z>8ts>?j#r2|ءF:m@;6RbB9O6뎒l O}d'?QK}7$2I)"96Rd)jh5F l#ZM&HE6R`Qt`yc)6R`QSmF ,jXT)`n#5F ,jXT)6R F:Pm柦 EM6Rd)jh5F l#ZM&HVm@6ҁj#ZM&HEM*45F ,jȩK*45F ,jXT@j#56mF ,jXTZT(К0hM?:P@kQ5a(К0? ܸGc(п`ZM&Gu(j2tGu(j2XT&5?:PGEpEMa)),q96R&8l xGaO'440 AAAq44444l|GZZShT`dh gņѰ}q GøV?/Zaa Zah GøV?Gav34wA5>wA50AA5>440:0:t0:0:0:Ѱ}kdhda:k|,  :k|,  =LavPavPavPavPa`0;?f?f?zGG G G G G G G G G GShdhdh,{'ahTh,{A'ahTa b b eO0;.0;X$ b eO0:0:0:Ѱ}dhdaeO0:0:?F?F?zG `ٓ'ah,{6Ұ˛a4v caj# ciHEVl# S8 | K...yF[ҁ:j,..R`QSG5U\ ,jXTq)R`QSťKEMTq)R`QSťKEM*.ZM&Kqdq@&KVťU\ ,.ZM&Kqdq)jtKEM*.XTq)ҁ*.5U,jX,eR`QSňf4XT1@*.5Ux KVť@Rdq)jh5Y\:Pň@Rdq)j)h5Y\ ,jmKEͶ bq)bD`QmX\ ,jqKEM#*FZMmK6ťU\ ,.mK6ť@ҁjMGť@Hh5Y\ ,.ZM&KVť@Rdq)jh5Y\ ,.ZMTq)R`Q=?Y\:=?Y\ ,j'KEMR`QSťU\ ,jXTq@5U:PQ@2F}U -cT)8ȏ+Uz JQ@N;_Fnҁ&rݤ@NY7w IքݤhMM &&rݤ@_7w&IVݤ@nRd7)Cp&ZMvT7)p&5u}`QS5M ,jy$w:>&5M dIXT7)I&5u}`QSݤIVݤ@nRd7)jh5M &nRd7)jh5M &ZMv&rݤ,jIEMuAEJTQU *RMGJTQ@;H5U *RMGTT)j:X,*X5U ,jwMG*5˻JEME]QJ*XTT)R`Qk:XTT)Y5U *ZMFTT)j2h5U *r^Q@ɨRdT)j2h5U ,j*XT"Y5U ,j*X,*X5U ,j*XTT@E*5U (JEME*R`QOSOSSQ@ɨRdT)*ZMF&JVQ@ɨR 5U *R 5K1VS [MUt@L}Wj 䨢[M*fXJ{R$?Cr}@9 CۿՇ {Pd*޲hoه:P}@{>Te*jh5ه CZM&PV}fIe*~Rه:Xl?C5Շ ,jXT*kC5Շ ,j*aXT*>T ܇:P}Pt*jh5ه CZM&PV}@>Td*juPV}@>T`QS}EMCr}EMC5Շ:P}PEͶ b@C5Շz Pք}Շ &CZT*КhM؇ &C>p*п`C}@>Td*CZMT*CZM5Õ}PXT*YOƣT<*#ƣ1bl_a5B+>l_a_)5l!߇)5l8 $ؾQ Q QSbd0,5kaaW-?+R#؁4щ@DV mt"+>:ϹRI')Yl@zDV t"@ X,O':@%:>Y NdDV`QSYEM%Jd5:PYEM%Jd5 ,j*h5 Ld#؁V LdZM&T"+j2h5 Ld#؁V@DցJd5 ,j*pYEM%JdDV`QSEͲq"+Yv>Nd5,jYEM%>T"+DV`QS2h5 LdZM&&Y+VLdZM&&Y2Vf1Xl &J|56*XT#Gd*p$ d*@O/L}? d@?SUSj O CWV@Ud*j2th5 ]ZM&CWV@Ձ ]5 ,j CW CWEd*U`Q= ,j*tuBWEM ]U`QS2u`{t0tT/Q @ζ;@T*s@rj@Ϋ9To6Pd*7@l9FT ( PV@Td*j2A6&P*i6:?C @5 LPEMXԡEM @r ,j*#:?T`QS@Td*j2h5 @ZMT*j2h5 @ZM&PEM9T`QS5u(`QSPPP?@ԁ @RMPT@T  @RMP*H5 ,jwM,@5˻PEM]PET`QS ,j*XT*Y5 ,j4PV@Td@&PV@T  @ZM&PVPEME3]Eԁ]PEMT*T`QS@:XT*ԁ @5 ,j*h5 @r@Td*j2h5 4PV 4՞LGtT 礜 d:*@O9tԁٞLG:'R9ttT ' +hoV:*}JG=@{tTe:@-Q@{tTd:*j2h5 LGZM'fIe:tT`Q2XT:*tT`QS@΃9XT:@To)O9R'{K-~R'{Kao)?5-@tzKV@R`QSEM-rԽEM-5[:PzKEM9R`QSzK=c C1Sb)1tbLv1@;Sb)1%Sc cLV1@S c 1S c 15 ,j'/cLEMŘT)S to)j{K?]ao)]bo)pK}0lk0W|[0l=lSoi30l G?Soi>2;l_a/0:0:NF[F[F[0:0:0:0:Ұ}a- j' j',640_>6A_>6A50;0;0;0;Ұ}&lTSaj* j* j*=LMavPMavPMatMatMatMatMatMatMatMatMatM444= J`ٓ44= J젚J`ٓ405A53A53'aSiT3c,{6A53'aSidSidSidSi0:Ȧ0:ȦʞMatMatM4405A1'aSaeO¦0;X$l* - 5coi>?׌af- ۿ k[u0[߃<<;[uҰSR -to)p[-{Kѽ@R>H%[ -{K4ӽux`Q%eT-RJ^Azi&ZB/\`QX\[B,j-Zͽ%A>Lgj--Zͽ%A^>h5[B\?h5z`Qsm }>|8µ%`Qsm pm }V>X\+ ,jmZB,jZB,j5-5׊ זkKE?Eͽ%A>h5[B\i[B/\+ {KVso }j-.|j-56ho }ٶA{KE͵f-5׊ זkEE͵A>||AϽ%µ%AϽ%AϽ%AϽ%~-~--fo }p.[BO[B{KVso }j-Zͽ%A>h5[B{KkKE͵%f{ -`Q=kKE[B,j--5זkKkKE͵%~rrr >ȯK CC0 } } }c }C] }e!4A>h5 ǻCC/\CCBC,j`Qs }6 }zkhE54A 454 o`Qs=E54CC{hVs }jZ=4A^>h5CC{hVs }> E>X\w`Qs=E54zzkhE&*DuD胔RjT>HQ! FE胔RjT>HQ!zZ! FE [**D,jTT>X\+D,jTT>X\+D,jTT>X\+D/\+D,j5 -5 -ZͽBAW^V>h5 ^!+D*D{Vs}jZͽBZ!`Qs %|YRQ!`Qs %|YRQ!zayKEE kE͵BZ! HQ!`Qs}V^V>X\+D,jZͽBAW>)=T>h5 ^!+D{胜CVsp}SzZC{vhO}tH}3KH}퉢ڡ=QA;'>q:$^V1>hD9ND9NDkE͵f{>b3mj'3,̢MɻG> D> q/k郜c|/k)?׈9bǿ ׈ ׈G>߄=A&r Z=AG>h5q`Q=qG^W{E#N,j5j8}݃Jv*}Sz*} i*} i*} i*} i*}Sz*p *} i*}Sz*}>X\,jA5נ9@kPE5T`Qs *p *}>X\Jl3J,jAZ=A#A#Ak>hM נT5كJ&{PGP郌 AAl*}jAZ=AL#A^>ȑi>h5kEp݃J,jAA5נ&J*}e=A} K-Ar A^XtP94l_ ;lSa~U}~Mt~EhҰ}| }>I9l_S4i>;l_!a /h0:h0:h@FMFMFM5h0:h0:h0:h0:hҰ}a& :RI)J$@zR tP@$X,O*:I@:IfR`QSͤu}`QSͤIe|Hsm)kKR kK\-r9i^ז\[ rpm)U[ rpm)Yo][:ߺX,﷮-5U[ ,j[ז-5kKEMՖTm)R`QSfyum)R`Qߺh5Y[ -Rdm)jh5Y[ kKV@Rdm)jXTm)C`QߺXT!Yo][:ߺX,﷮-5|,jXTm)ӥ-5|,jtjKEMՖ-ZM֖&kKsm)jh5Y[ -ZM֖9Rdm@Ֆ92SL)@ S b 䴓SL)@ Sc)xN1Ec)xN1r)EM-S`Q=b ,j*Xlg@&kK-ڲ!Gז9jK}k[U[ gm)˞@/{֖9NR g\[ -ZM֖&kKEDbm)ٞH-؞H-5jKd(tZMT(j_h5/ ZM9N~сZM9N~Qd(B`QSEt(~Q`QSEEMT(~Q`QS@Nz_XTAbԄ@_ȁ` Eք/ &Z EUw(__0&EV@~Q Ǘ/ ~Q Ǘ/ 5 ,j+EEMT(~Q t(j_E_t`:_i'aE?M~~aa ۯ}ۯۯ//a}ۧa ۧmk|>8l_/F/F/z: E E Eø/F/F/F/F/>LavPavP_(?q60F(F(F()l=Latatatah0;gh,~6A>g?EEEF(F(F(zXyQ4Q4Q0>Q4Q4Q4lF(F(zf˳avPj j F0;F0;F0;F0;FѰ}|kTaj j j=LavPavPatatatatatatatatatQ4Q4Q4= E`ٓQ4Q4= E젚`ٓQ4 05A5!A5!'ahTb,{6A5!'ahdhdhdh>:Q4Q'ahdhdaj l l=LavPavI(zXٓQ4= Ezs`h>;a,0#_4l{~/{waʳa`ht(_RD݁t@~Q }t(p RJ݁@~с:;Yb5up5upw`Q5|,jEEM5T(~Q`QS͇_h5/ ZM&EVV|ZM&Ew4Vf_Xl j>56_XT!Cd(p@}W/ ]a(w@}W/:P@`_ ~QuSd@&EV@~Qd(j_h5/ ZMT(~Q`Q=/:=/ ,jgEEMY~Q`QS/ ,j_XT@5/ ,j)j_ȯ_/ /~Q ?q(_@-E*@d_iD9 ~Q r(j_h5/ r*@~сr4w,j|EEM9~Q`QS5u{`QSEq@r^w,j_XT(j_h5/ ZM&EV/ ZM&EV@~Q`QS@6_XEMXT(~с:=~Q`QS@9>!7 _qR(kq@ǍV7 Zq(cr8nȵQ ׊F*nXT(Y^~7::nX,/57 ,j_Ǎ5˯FEMōT(Q`QSqfyu(Q`Q:nh57 Qd(j2nh57 |FVq@ɸQd(j2nXT(B`Q:nXTE!Y^~7::nX,/5UQ,j*nXT(5UQ,j*ntFEMōZMƍ&Fs(j2nh57 ZMƍ9Qd@ō9)Bh5E:P@,Rd)}"rY@,Rd)j2>gT"j2XT)}"5E ,j*Xltf,R`QSYf{3h5Y> c(.r/^*V@Y> f(+@|i'Y> ,ZMQ`Q=tX>:=tX> ,jGd(tZ0T(Ђ_h/ `Z09N~сZ09N~Q/ ,j*XT(~Q '=/ ,j_XT(~Q`QS/ ,j_XT(5/zzZq@;ĸQb@ōFvq7 C!ƍFdw(;n7ƍ&FVq@ɸQ g7 Q g7 5U ,j'/FEMōT(Q t(j:nFn0nt`A8nf B8nF߄a_)b0lr5=ln6ljShؾ3'0AE AE ć¸0]7O z`h>8qa\ ۇq=0n0ōq=0n4qa\  Gl^0;X 7fu0;X 7fu7f7fu0:ȸ0:ȸ0:ȸ{F F FA F F Fatqatq av0n4 avPqavPqa 7f7f7f7f7o =LqavPqavPq)n4*n4*n42n42n42n42n42n42n42n42n42n0ōAƍAƍ'ah,{ƍAō'ahT0b,{ƍA#0;`0;`0;X$ F eO¸0;`0;X$    TAƍAƍV$  =Lqatqatq)n4* 1= F+{ƍ'ahT bdaYŸ0#ƍx7{ĸѰ}}FShN ƍC+*ƍwqatq@ZQ>H7:PzEǍiFq}>6J:nH)7:PzKǍiFTfy9nXԩ*nXԩށE4s",R`QSY@N8XEMe"ZMf&HVY@,Rd)j2tHVY@,Rd)j2h5E ,j*ȑHg:>"5E:P'5E ,j* ɿ1An&rPͤ@mL:Pͤ@.$7L Br3) ͤ@NȅfR I\Hn&fR`QSͤfy3v3ffR`QXT3)YތL ,jX,on&5L:PͤIEM5ͤIEffRd3)jtIVͤ@fRd3)cn&ZM6&IVͤ@fR`QSͤ EffR`QSfy3v3ffR`QXTb!fR`QSͤ@NXTb!j&5L ,jh5L l&rlͤ@fRd3)jh5L ؠIVͤL ؠId3)jtVͤ@fR L lIVͤ@fRd3)cn&|Ed3)fR L ,jXT3)l&5ͤIE@g3),R +E _ V)W跲H*,R/H^"rY@I9,R/HVYGd'W] b'W]@_!v}S'dO ' $>^zXT'O  ,jXT'O`QS] ,jXT'CZM{ =u'NZ{ =*hM &LZ{9 tO w'п`LZM{&=V@:h59P@:h5 ,jXlW{JtO`QS@tO t'j: =ց=q' {,j:/{o~#Wnؾm~نWaJ ۗ}.tؾw0{#u0{A?<0y Gְ}hgg>73333lFy<<<<<<<h59P@tOd@{&=V@tO>h5 LtO`QS=*XT'tρJ5 ,j*wX,{etO`QSf8XT@{J5;x =V@tOd'j2nh59P@tOd'j2Gh5 ,jm=EͶ b'A`Qm ,j*wp=EMZMx}m e'זC@_[x}m9P@/{xo g'pF)˞ ZMx&J\XY?J\X35̅EH\h5j"=sDzB̅VZMg/H\h5j"=sͅV m̅EͶ Bz¢~EͶ Bz¢~aQs?B̅B\+̅ۧ ! s2{@B/^d.ܾBЋ xp B/^d* &2ZMds &2ZMd̅V &2{@¢ٞ+ 25s {@¢f{ saQs {@¢ 25̅E= saQs\X2d.=rVvϸW r7rh^ 9ͅ^g_ϙʹ \X=s!?\R(g B9[>r.# \ȹ}r.ĪB9r^P ʹ ʹj"s!'ʹj"r.B95/,jGs_XC95P΅R(¢~4Eh {(¢ʹ\  {(Bt)saQs?¢f{@(¢~4@r.\h5ʹj"sD(BPNʹj"sD(BP΅V &B95P΅~U(¢~4Eh {(¢ ܏澰r.,j 9 P΅Eh 5kjY_Xg;(s!鱗>~*s!鱗>~*s!鱗>~*s!j\ȟe}.O>RMe}υE=saQ ,{e}.,j>5υEͲwP¢Y\Xܳ>{¢g}.,j>5υEͲwPBυVY\qWe}.Y 9υVY &>ZMd}.\Xܳ>5 EͲwP¢^Y>5AY {¢f;(saQsY {¢^ܳ>5υE=sDBυwUBυVY &>ZMd}.丫>ZMd}υwUB*sDBυVY 9υ)TBυVY &>rUY=saQ=Бg}υE=saQs\Xltd}.,j:>5υE@G¢ͅRBΨhm.T6rKEhs!TS]*\蕍ͅUBhs!TS]*\X܋65ͅEͽhsaQs/E {B?S&p\蛍hͅSB)Zso65f#ZGk.Frhͅ.UBN*Zs!g'j"ZsDBhͅVњ 9ǨhͅVњ=Zs!j"ZsaQs/\XlHDk.,jњ=ZsaQshͅd ZX\ȱEk.tњ lABG[Dk.gWFkW`ؾ6a?5e9l/a(Ұ}I>Lњaܰ}=ۗ}5cfؾ+qؾAFkAFkAFkAFkq1Z32Z32Z32Z32Z3lNy5F4lEƿ 54lChͰ}9N0h3N0h3lF'y66sOj><fu0:M -t&pUt&pGm@*M t@H/ M`QS'9M`QS'95AAfy:hXT&M`QSA ,j*hXT&M`QSA6VA@ɠM>h59PA@ɠMd@m&6VA@ɠM@d&j2hs6EMm  M`QSA6*hXT&M`Ql{ ,j6EMeM`QS ,j*hXTx ZMm&6VA@ɠMUd@&6VA@ɠMMd&ٶA 56AEͶ b& 5,j*<h5 b&WA3@_!m}9PA@/^m/x e&pB)ЋA*A@ɠMd\a&j2hh59PA@ɠMd@m 5sAsAf{0hXT&ٞ+ 59PA6EMmT&M`QSA6EMm6VA@ɠMd&jj2)|p9 (S8v 'ז)@~$@NCșDwr_H@~+N cwr;9NN G Za''k@vr&;9Vt''js:9Xt''::NN`QS@΍XYׁEMuXT''NN '9P:9#s'' κ~ ;9V@NNd''jh59P@NNd''jh5 5 伭;9EMuXYׁEMur5 ;9EMuXYׁEMur55uu`Qmκ,jm?Tɟ@O t'j:H5 NRM'?Tɟ@O t@%J56ɟ6ɟf9XT'YAN5 ,jm?EM%T'O`QSɟf9XT'YANZM&&?Ud'NZM&T'NZM&&?Vɟ@O`QSɟ EͲ r'B@`Ql9l ,jm?EMeO`QS ,jXT!@%J5 LZM&9Od'j2h5 Lr ɟ@ρJ<@ pρ Z0{-=u'Z0{-=@:s=8w'pO z ,j*XT' 5c=EXf'jYvTa'cf.r̅U 䘙 ;3sa'K@纰 Hv9fN \ ,jXTa'N`QSU [NN ' ;9es:9喩_vr9NN + `;9V@NNd''j9Fwr&;99Fwr&;9EMHANN`QS N:΁%7N ' t)@` t )@` g?p#W`ؾa?R87laѰ}=L)aܰi0l_8l_?)aRaBFFFF0:0:0:0:ΰ}haJ :~cgӰƿ S8ّ4lq=03l0p#øyR8ø0;0;ΰf{S8`y/` gԱ`y/` gԱS gT gԱ S8 S8 S8+L L L9v#'n}rBM1@_ %ȱcF9vsn} r&З n&n&n&n&n&n&n.M`QSc7E ۫nՁc7EM5۫nPc7EM55vsn ,jj&nZMZMZMZc< ?OQd[ hhȏyC'8N {{C'qC'qC'N`ojC'7y::ԍ7t}9pC'}wor΅GrN/nܯ7ty:V:jC'G/XT<:xt`QS:EMm7tG5,jjC' @΅ ,jjC'ἡXT<:N`QSh5h5h5h5h5s6t&7t&7t&7t&7t&7t ao5,j*XԆN`QS:*XԆN`QS:&e=ӈ /ȗcyV1EM55s??c@s>&g<sR ?&п8 1Pcy1V1V1V1V1LM=p=f~J0, uaa3c]{sc:z0;R0;avP1cA ZfUjf\fUj>L1cAA \FsX{1 cAR0:at1 cOA \9Lav=avPavP1c폢z0;R0;avP1cAavP1cz0;avP1 cA \F3r=fz0:at1 cz0:at1`'z0;XI3t?{ *垄1Ji=fT~T~,$\fUf= cArOat1 cA \FsX'z0:at1i=fz0:ô3g?{VI3{ 垄1|W\ Fϸ,3l?xXy0zeaqa2e'3.ea2`y0;0:e@Zel E̅J*E/F/G/fe)e.TR9^zY&fzYB%2EM%/ԲL`QSIfyE̅52EeeZ ,jjYB-5,XԲL`QS2EM-5,h5,h5,\P2V2V2jY&jrY&jrY&jrY&p?h5,h5,seZ ,jjY&p?eZ ,jjYB-5,XԲL`QxY&Y|,Xv@`QxY& ,XԲL`QSrY&jrY&jrY&jrY&jrY&pdN̅\ \ \ ?h5,XlA\ ,j .5XlA\ ,jj;B-5Xv@LO.@ e@\P2,)З e} rY&pR/A.\e@_\ %e@e@e@e@e@e@e ,XԲL`Q:pYef{uL`QS2Eee.ԲL`QS2EM-\eeZ ,jjY&e@~ 2V2V2V2VG2he揖e.ԲL ?v<&ܤP444@7i1o7im7im7imk| k}9p&`MkyӃ5V8Xk5V8Xȣ AOZM\@`M`QSJ́EM 55X3w ,jXT9ky΃5j&@`M`QSf 5EM`M`M`M`M`M`ͅ      ,jj&g=XXT9Rs`QS5EM \k e5,j*:X`M`QS5<ӁEME6ȃ5TӃ5TӃ5TӃ5TӃ5TӃ5TӃ5TӃ5TӃ5TӃ5TӃ5TӃ5j&f `ͅ6ȃ5Erk6ȃ5EM 5mkP5EM 55XX,A ,jj&Yn =XP0_H!/ 5X.^MW0x9苗0< @_ [OZMNȕ@k•@k•@k•@K z%Кp%Кp%Кp%garV^yL+/< 땗@K`QS+/EM5ۋ+W^ŕ+/EMZMXOy'v'<@I %y$ǒ~ȓG ɱ@ ɱ@H xl$'=6WPc#96Kc#<豑@H fyl$jrl$jrl$jrl$jrl$G<6h596rFyc#Vc#EMmA5۫F & & $O\X SLOzk!M<dȰa1l6a >l?2l}?忷k~&C'W~Wհe _R+j~A ۯÔF92r2dd0^G F92r2dd0:ɐa42*=h2dykӰy0~ ?'>L/KGqxq2dx#N uɐaa8r&C:d0;<0;ɐavP!n&CAM ff f=L!&CAyAN F9rX{! 'CAN<0:ɐat! 'CO2AN 9Lyav=ɐavPyavP!&C.d0;<0;ɐavP!&C,AMɐavP!&Cd0;ɐavP! 'CAN F92r2dd0:ɐat! 'Cd0:ɐat!`'d0;XI82*a>{N J垄!i2dT|T|,$ f0f= 'CA%̇rOɐat! 'CAN  F9rX'd0:ɐat!i2dd0:ɐ42*T>{NVI82{N ʓ垄!uS#iN ;NsjdȰ0ɩaS#r'F0ɩaS#`y0;0:詑@Z詑f EȅjEOFOGO7F)F.Tc7^zj$fzjB5vS#EM5v/H`QSfy%ȅS#ErF ,jjjBM555XH`QSS#EMM555h595h595&F.HHȅ    OZMNZMN\F ܳjj$F.H`QSS#EMM5mFS#EMS#EM/H`QSS#EMHHHHH@ɩ    ?h595XlA ,j N5,j N5PS#EMZM\^&ok"V&rD}qM$pQ2*H@_e\Pk"ʸ&諌k"Vk"Vk"Vk"Vk"Vk"Vk"jM$5f{ȅk"E5Dk"EM\5DZPk"EM5&XԚH`QSk"D&D&D&DD!Z9\5@~|k"VZMN\@ɩ@ɩH ?ੑ@>HHH ?橑 P;$I`j=;$COy$wHyj;$<3@_H! @_H! ewH&wH.I z$o`QSvH! A>5 ,j*XI`QS;$< CXI y$o`Qr$jr$jr$jr$jr$jrwH&wH&wH.T7jr$jr$@I`QS;$EM5CXɅ! ,jj$@I`QSǁEM5C! ,j|XT8YnCH5CH5CH5CH5CH5CH5CH5CH5CH5CH5CH5CrvH! ,j \XnCX,A! ,jj$YnCXI`Qy$ CXI`QS;$ErvH6;$V;$V;$j$jr$jr$jr$Ch5Ch5Ch5Ch5CXI`Q}e XZ,=.ԲG`X.-{rG/.{򸫗=}9p#З=}9p#jr#]h59h8h8h8ӥ>wwwwt;.xG >{#K=ӥ,jj#;,jj#@ɱ@C Ox!c<1{=y:cj!<1{-=ty:cVcVcVcVcIi6%?=0ؿ lEYǶ+cۅsf>]6ۮmǶkf؞Qv|l`>F11:YZ, f>F11:YA2|l{4le3ԏ{_>sk|;>=nd}l{1^sc01^sc00lsө{:cvps>=>f9}cvpO~C01;SsNĜ >F109A9|ba؞NĜ >F1 >F10lO~C01;S?f9}caNө>)>0ls>f9a>1;9|bctssĜ >F11:9A9|ba>1:9A9|{9|{9|rO9=1;XI01;gs{&cvpD{9|rO9=1;XI01:9A9|bcA9|baX' >F10lsĜ s{ cvܓ`aX'`',$sĜǶC+4>F?1(հ:9O9|l;E19O9|l;@19a'>F?11;X^11;L?F5 -ԜÃq)tQsF9`Qs>X,s,sxY^5`Qssx9`Qssx9֥ ,js59}>`QssxYn{4`Qh^~Yn4`Qsys59{{ sxjbA9&>yĜC&>yĜÃVsnyjbf œÃEv9{f œÃEͽ9X#5Ã<姭{9}o=Xܷ,j[h,j-{ f ÃTS[RMm=

}<yD3<yD3Hf9xC4sV3ZMu?te`QS EMyҕEM+*5Ua@/T? OXT2`?t,ZM& V@ɂd V@ɂJWZM& EMyt EM*5UP EMy EM*5Uij5Հ ,j  T@ t?jH5].RM T@*5U,j /,A.5m EM6 Er`QS U,jXT?Yn\,jX,A.ZM& h5Y,ZMy@ɂd?j3n&~J7&~&~ O!y}@r>B!y}@sBy}@s= uGQ@>`w=uCCGyQ@>`wBEyQ@>`w=uyVQ@ɨ{d=j2 |Lx 9<~a):<7t qۿaWso4g~ 0??Laae7l|~u /__ c~]_ x=0<>2<>2<>2<>2<>l0LJA8g`0,?Lqa1X>?#ˇgao3 m9lmO0X>lGxO0X>*X>*X>l0ˇAAAˇAˇ   %ˇAS|T|Tpd|d|d )82X>2X>2X>l02X>2X~`{avPqavPavPa)X>*8*X>*X>*X>l5*X~젂젂)X>*X>*X>2X>2X>2X>2X>2X>2X>2X>2X>2X~ `'a|,$  垄avP!avܓ0X>*zBB`'a|Tu,$  垄atatataىatat= )X>2X>2X~r`'arO`0;XI,f9f= 忌a4'*iˇOˇĞOˇOˇgOS|d|To, J BR U F飃I@*`y tB饃4 U ,jWp*]`y`QSfy%wZ`y`Q;XXT<`y`QS ,,j*XXT<`y`QSV@`y _`ydux/$yͭ@r;|ju[݁[v;ϖeH̭@{p;ϖP@{p;ϖluZM֭@Vwd;j վAVjRMGpQ@GtƚQ]q>lr0l}aac =wY2Ʊ  >l0v=lW{d{f{d{d{d{d{ac=20>La90=?׻a9sЋݰmR7l'g0=lq0=*=*=l0AAAA ` ` ` o"AS{T{Tlnd{d{d7  ؇)672=2=2=l{d{d07bs7S{TlnT{T{T{eT0AAS{T{T{d{d{d{d{d{d{d{d{d0AArO0;XIff= š`'a{TX0A5A5rO0;0;XIff=      ؇{AAS{d{d0A3rOa垄avܓ0=*9{'؇iO?+ ` `3''S'؇)=~2=6iA߆Ai؁{^+":}ot@v }t;p?H%`_[ t;f:}oE v`QS ,jX,`_X^,js `5,j*}؁EM `5,j*XT;j2h5ܣV  `ZM/T;j2h5 `@vdB `5*XT; `5,j*X,=`5˝؁EM*XT; `ZM&؁V@v!@ʪZM&؁VCVf b;n,j*XlA `5UPEMeU&ׁeB5m<ׁǖm<ׁ6cK6 ոl\x6m<ׁǖm<ׁV ոl\ZM6/T:u`Q2f˘feu`QSfeu`QS ո,jqXTB5j\5 ,jqXT:=r:jqh5ٸl\ZM6)$7&qȏùqh5Ըl\ZM6d:ѝČuf 1ggy2@_+XZa:sBE&}0c8c} 3ցČufyj@u: 3ցVŒu /;cӣXZaBeymEMX5A>g5o ,j*cXT:GP2ց_ȇL0!;́|s ͻ0;́|s G0;G05a0ZMv@sd9j:|aIO:7UZ}N-Sˁ|JrL-wwkL-avo avSvŇ}vr=2<^3<2<2<2<2r9h7Pߍ@>9u9ONhS7Ǎb́EM5/T9Fs`QS@Fsd9"hl4ZM6́|ō@Fs G(n4Fsv[{Zڏmoolsc;۞;ئǶ7?= &e0lc$mlcw?FwaDctAt?FaF̰1;=gFEc;a{c٢1lh?Ƕ1>Ɵ"aKǶ0>/h4fFh~1;>fFhSFhFh,$h4_F h41:F h41:F1:F rOFޅ,$h4]ȏrOFޅ7?f.ޅ,$h4]ȏrOFޅ,$h41:F ۞1:FrOF h41:FharOF|,$h4<1:FǶc.k1h4l+}1:FǶR F {cv1;>Fh~Vz"j4?Hh~>VzJ T95N TAFs'{Y^h~FE=N`QZX^h~7,j`Qso4?X́{h~7,j{h~jDD9po4?h5h~j7h4?h5h~jV{jD9po4?X5F[.po4?X5Fsh~7,jGG{fQ 5FEͽ:&ZM4h4?h5h~p ǃVDAFVpXܛZM4,{AKFh4?hh~p`҃FAKF&=hh~jWQ&5FEͽ`QE9Ef}FEͽ`QEh5FEͽ7,j{Zh~7ǒh~jDAFV%5h4I95?{A7?{A&@[AA+8?ȏ) ?<p~Wb 8y}! ?sDA7h~GBhd\r@A_h4?yFYyBT}971Q5,jq{h~7a=5,jq{h~7=5FEͽ `Qs=XlD`Qs DAFV&ZM4h4?"h~jD9p=h5h~j`Qso4?Cj4?X5FEͽ`Qso4{h~gh~7,j{h~h~7,j{B5j4?H5h~j TAFTSRM5j4?H5h~j7,j6HrFErFEͽ`Q`Qso>X,Aj4?X́{h~7,j 5,j6H&ZM4FV&ZM4h5h4?h5h;ciЅ~Ђ 6 O <|??s* V~!ǍqUVA>RVA;7*Lg۟: ;lP{v۟eeLgU:luaJ:-K:lwtخÔF?FFFF?v젒[~J ϝYa3=ljj0ܙƟ;sgV{0ܙ%z^": >?zuL atYatYatYa; ڇ)a7fff~v젲젲젲AeSV{TV{TV0eAerO¬/cV{dV{dV{dV{dV{dV{dV{dV{dV0eAfAfrO¬0;XIff= R`'aV{T0eAL;p>=ôy@>Ls¿|:=@Ap*>l0eЇA%A%AeЇAeЇo ʠʠʠJ:fЇA%S}T}Trpd}d}dw3 3 3)982>2>2>l22>2~`avPavPavPaSÔAffAfAfA?fA?LavPavPÔAf= 32迌atatatatatatatatÔAFAFAf= 3`'a}Tzu,$̠J垄avPÔAf^f^f= 3ҫ`'a}Tzu,$̠̠̠̠" ̠̠VIAFAFA?LatatÔAfXf= 3臕{fЇrO 0;0:0zaaa-SSWSS0zaJgj * avPatЩ@ZyR N__ ]t<6:uH:CjTҩ@JjKiSX,N5PE5٩REM/T/y`QS :,j*uXTY3ɚy ?y ?y fK5@_a_i/A&S:P/Tl>loe,ە7lna*fm6lwmخڰݴTBFXBFYBFYBFYBFYBxJ젊JJ%Ї%Ї_K,000at%a؇s=0:atG:8>>lv0ЇAUAUAЇAЇ ****:S }T }Tupd }d }d wK K K臩:8>>>lG2>~`%avPavP%avP%aMTBfUBfUBfUBfUB fUB?L%avP%avP%TBf5 KJ迌%at%at%at%at%at%at%at%at%TBFYBFYBf5 K`&a }Tu,$,*嚄%avPeTBfUffUff5 Kʬ`&a }Tu,$,,,,O# ,,VIXBFYBFYB?L%at%at%TBf_f5 K臕kЇrM0;0:0za%a%a -KKWKKsWK臩>̞)8*8[ЇA5AiK{g-"~t%@z }t =pRI)K*H/]B._``Q|XTBJ5'Kd*5UBP JEM/T =z`QS%JEM*ZM&K{2j~JV%@*ZM&KV%=@h5YB,_z`QS%J UB,jXT B*5UB,j%JErz`QS% UB,jXT@&KV%@zd =p"j~V%@zd =p"jXlA,5eKEMe{2%XT7lo`Q]hoYB ,[-KhoYB,[-K% UB,ZM/T =z`Q~a{Ke =RYB,jXlo,5UBP%JEM/T =z`QSJEM4KV%@zd =j\B,_z r =jh5TB?*Z̤D3!03|F̙@>Lz Ϧ;'3~}23d&=ЯOf3V@tByر@t<3vh;c<ygByԱ*EMU5;<5U ,j*vXT*9*exʫʫۿFWFWQ92~R 'AՇ}ۿF;0 AՇAՇG0;0;0;0;[avPyavPyavPyavP)avPyav=ȼaʫʫJ̫̫̫Vd^}d^}d^0 AՇAՇAՇweAՇASp,2>*e8*>*>l s[()>*>kՇrM¼0;/c^}d^}d^}d^}d^}d^}d^}d^}d^0ՇAՇAՇrM¼0;XIWf{f5 r`&a^}T0ՇA^A^rM¼0;0;XIWf{f5 S ꇕkՇAՇAS^}d^}d^0ՇAE]rM¼a嚄yav\0>*˘PFP?|2>.1>l?uXLeB}]bB}~0k]bB}]bB}~&k]bB0%ԇ avPav|2>*8:H Pm  NF'{-J:H)PP@zz tBB ځ*XT;0Y>uP|:XTB=j5P,j*~EM%J5P,j*XTB=j2h5PܳV PLZM&/TB=j2h5PL@ɄzdBB%J5PO_z`QS *XTB=z`Q\8X,W>N5P,j'J_z`QS z2h5PLZM&&V LZM&&V f bB=.P,jXlAL5P zEM~2!1@zc=p (1~B聶!@zc=p (1hBP!@12h5BP!BE퍑!f{cd=z`Q12XTB 5BP!BEM 5BJZM&CV!@z \r=j2~B||!@zQ=j(h5B,j*~ok׼Ϗ:9~3<|@>;z O;z %s|BEUz q|=j2C_z &;ȓxm<6@Rv|=gD__z :XTu0``QSEM5j_{`QSMVM@ɦ{ h5tlB7]M@p=7Gtnn_Xݻh}l]~l*cۭm1Mm|lc76k>I͙m|l3ff}AcۛǶ{Bh1:6Ƕ5m1;wm1;w?FfDch;h1:6Ƕo/?Ff?>?}q1:v~쟜f۾?fA{cۗ{cvpom1;?f6f{ctmA?FfVDctmAهďA?FfDcMA?Ffw?f߃hď1;?ݥ?f6f{c>lomrM6`&Acvpo_6 h1:6 h1:61:6 mrM6ޑ,$hُrM6ޑ?fޑ,$hُrM6ޑ,$h1:6 9|1:6rM6 hh>Oq\1;XIfVIf,$hf]B}XLEct mmT?Ffvnct m%?]B}ffӄ3mJ1:6PmۃQm=J ]TAڨ6QmۃTRm){Az64Sm?kfU% 5(E6{`Uf(Eͽ`Qsom{f,jm{A6Vm^VmDA6{fjDA6[LA6Vm`Qso?Xn6Eͽ`Qsom{fY.{fY|f,j,jm`Qso?XfjDA6Vm*h &ZMh?=PDf Bf B ~.f'6E=A`QsO?XlAh?=CAۇ6<>6mm>yC=po?7Fhij7F,j7F#5#56Eƈ6Eͽ,jm{=po?X56Eͽ`Qso?fjDA6VmZM6||Im&Z͟Z͟`QsoG`1~?X߫WGA>|пO$H@~AA>{r}o5H?g|Oh( Ϛ*'g%S%H?h|# ?+ *G=O1m<5{:|',jy P{:|',jyP=`QsO?CJ?X[5 H?X[|j"DAV&$H?h5|[ZM$H?Xܓ5E=`QsO?Xܓ{|'aj%,j{|'ql%,j{|PRM%J?H5|j* TATSRM%Jij %,j %e5e5ErE=A`Q\)`QsO{ ~Y.|',j %H?h5|ܓZM$H?h5|Ǵ|j"D}9\sLX~9 _?ȣ?C?br@,A W,>p?b5;5b{,AW,AVU,AVU,AކS,AFQ,AVU,AT,AT,AR,AFQ,AW,>p?Xc5XE=`Qs?Xc5XE=`Qs?;Ċ?Xc5X|,j&bZM?H5Ճ_ MPAPA9UAP۬kKS~~i~d=m䗱?lWgnΰ]a79lf.Ͱݙa2vcS~}a~1la4???l0A?*ۿ =LaS]aS]aaa'[8~?~?~?lsd+~d+~0:V0:Va0;V0;V{ԊfefefՊfՊ9???8????8??X[atateFيFيFيFيFي?L]avV0;.0;V0;Vajjjjjo jV0;V0;XI؊f5 [Z񿌭atatatatatatatatԊFيFيf5 [`&a+~Tv,$l嚄avP]Ԋfյfյf5 [젺`&a+~Tv,$llll8 llVI؊FيFي?Latat5 [`&a+rMV0;XI؊FكF_؃?|n??l?uXd~}a~~4k}a~}a~~*k}a0==avP9av|n?8H ݃ks= B F{{m.JH)݃P!@z| t?!5'{EM/T>b`Q|zEM5B ,jXTB5Ճ,jXT>|d>j7:&{h5ك_|d>jh5كV=@5Ճ,jLP=zEM/T>|`QS=fq>Y|܃,jX,?5ՃP=zEMed>jh5كZMg&&{*{h5كZMg&&{Ev|`Q]XT8.؃,j*{|zEMe5e{;=@|NcBm{=@|Nc>z~cd>j៦f{cd>ك1Xlo5Ճ,j7F_|`QS=zXT>|`QS=z|^=@|d>jh5كKZM/T>(h5كZM&{EM/ TS.׊J嵢|`y$X^+_6A_| @w>п26+c>Aڲ~{c>A'?ܠV7/T>GݠAV7m<6 @} UA#nx6/Sg>ƳAXTBj5ՠB7/5_ ,jAXT> ݠP Gݠ,jAXT>/t>jAH5ݠnRM7T @} t>jAH5ݠn_Y.ܠ,j 7/,An5eEM2 ƁEr}`QSe ՠ,jlXT8Y.ܠ,jAX,AnZM6&Ah5٠lZM6y4 @}d@6dB5r`>/6y @t>/6/ll} A~9AXll_} OA[nV } A[nΩs} fA0nAXT>}`QS EM5j5ՠ,jA;n5ՠ,jAEM5jZM6&<}d>j:3ț_XZTgvfBe/~5?La}aqaW[w_$ɰݜa8voS:tnͰ]a3ve0a~Ohdf~df~df~aJoBS~UqUqe~e~0a=LLLۿFF92A?2Ar9S~TqTqT~T~0;0;0;0;0;0;0;0;0;0:0:0:aH&A&A&Sqd~d~d~~gd~d0w$r;)A?*A?*A?*A?*A?l2*A`&a~,$LJ2&A&A&A&A&A&A&A&A&S~d~d~,$L嚄 avP)av\0A?*;k&AtS~TJwTJw,$LJ嚄 avP)av\0A?2A?2A?2A?l?02A?2AX&a~d~d0%A&A&S~ST~TrM0;XIFF_?|n2A?0A?l?uXd~}a~~2k}a~}a~~(k}a0%= avPav|n2A?*8:AH w ^ NF'{.J:AH)P@z} tB ڋ*AXT{1Y>YP EM%j/5,j*AEM%J5,j*AXT>j2Ah5ܳV LZM&/T>j2Ah5L@}dB%J5ϥ_}`QS *AXT>}`Q\8AX,W>N5,j'J_}`QS J2Ah5LZM&&#V UZLZM&&#V f b>.,jXlAL5UZP JˁEM2@ |ce>p&'Ђ2*@ |ce>p&'Ђ2hXP@2h5YP柦*EMU/l}5{+EMU*2XTe>5U,j2XTe>| Wre>j2h5YZMV+V USHZMV&+V@|`QS m)|}*S>ԧ,2˫L}@>du_e,Ͳ},Ͳ} q>]/e@Y/e U [*|e@>Px}`_*^C؎@Kx}f>DZaX-5*^Zj65,j*^s5Um ,jXT>x} O(:^EMyLEEMUd>j2^h5ZM&?ZM/T1j2^h5,j*^܎5,j*^XT>x5,j*^ݎ5,j*^XT>x} ;^XT>x}`QS_x} t>j:^H5RMT@x} t>j:^H5P柦EM/,A5eEM%2ȁErx}`QSI ,j*XT9Y.,j*^X,AZM&*^h5ZMy@x}d>j㙽-*{Xm @w>9 {<~!1{#_}_H5{/l_&~ DnƳ&~ 븉nn[&~ O:nLXT?&~`QSMEM5j5,j{n5,jEM5jZM6&<&~d?j:{ߧ<+} o:{_u0{;L&/7oa1avq\ݚa4vg ۍ9Laa~7id~d~d~aʕk;ÔfwfwFwFws:2{...KAfAf_0;0; Ôffff-:*{?*{?*{?*19*{?*{?*{?*{?*192{?2{?2{X;atatÔFFF&FF?Lav0;0;0;a 0;0;XIf5 젲atatatatatatatatÔFFf5 `&a~Tw,$嚄avPÔfff5 `&a~Tw,$ۏ8 VIFF?LatatÔf5 kfrM/c~`ȴ0:a峑iatiaatiatiaavByÔ%Aȴ0;0:} -t>pRD/T1.:mHN@*} tB54i t ,jOMj:^}`QSMftBJ5,jXT>J5,j*mXT>}`QSi@ɴ}d>pύZM/T>j2mh5Pi@ɴ}d>j2mJ&Vi ,j*mXT>p?^~EMJ_}`QSiEr}`Q\8mXT>Y.,j*mEM;ȴ}d>j2mh5LO>ZM/T9j2mh5LO>ZM2if b>s`Q]1mXTB;5w,j d>^?=hXP@;z}b>^?=hXC_z}X ,j^Xloo_X,j75EM/T>z}`QS U,j^XT>z}`QS@>uz}d>j^h5Y#&^ȇ\ZM&VmJN||iT>,/AKPi@>Hvm6@>B} {hؽ v/T6oߌٽ?T>ݽXw-5@K} c{ӰZjv/T>Gbݽ5| ,j{XT>ݽ,j*XT1}`QS@QtBu򜢻EMOv >}d>j{h5ٽZMv/l {h5ٽ_cd>j{XT>ݽ,j{XT>}`QS ս,j{XT>{XT>}`QSj{H5ݽRMwT@} tBuj"5e w2ȁEr}`QSMf r>&5D,jX,A5ս,j w&V սZMv&j{h5SS1PGyQ(~_e<(~_e_>Q@wBEQ2<(~ O9ȣ_(~`QSQEME5,j*XT?(~ o<;XT?(5,j*h5LVQ@(~  ?ߧ*s0EQ ~3(/ ۽9L1a5vi ەs WV0:(0:(0:(f?LQaz..U]FSreeee0:(0:({atQatQff{z젢젢LAEAEAEA(AEAEAEAEA(AFAFAF+G2?2?2 {H )@9#fff4:LQavPQavPQavPQavPQaavPQff5 `&aT1?2?2?2?2?2?2?2?2 `&a,$ 嚄QavPav\0?*{¿¿`&aTw,$ 嚄QatQatQatQaatQatQ5 )?2?2`&arM(0;XIeV>F':|62?N0?l?}5N0?N0?l?x5N( y(0;&0;X>fFɽ@(AEGiQ=H%_d t?f:jE(~`QS5 ,jX,_(~`QSQEM 5ſPQEME5,j*h5@(ZMF&*h5ZMFid?j2EME_(~`QSQ*XT?(~`Q\8X,W>5,jG_(~`QSQ2h5ZMF&VQ UZMF&VQf b?.,jXlA5UPQсEMU2QCO!F/T?1hCO!F*7F5,j7F/loo5EMEQ*XT?(5,j*XT?(~ :r?j2h5ZMFVQ DZMF&VQ@(~`QSQ X~*|T1?>U,OS@>eb,ɜ~5aN?К0hM'sք9@kœ~5aNB5t s|9@>b*򈶋< b~e1?޲hoYamyT@{b*򼬋REM*ԢEM"JE5U,j._b~`QS@bt1?Td`Q}ڳXT*@&V@b~d1?jag1?jh5Y̿P@b~d1?b~ O{XT1?b~`QSXT1?b~ @p1?b~`QSEMyEM*BT@b~ t1?jH5].RMT@b*5L,j /,A.5eEM2ɁErb~`QS U,j*XT09Y.\,jX,A.ZM&h5Y,ZMy@b~d1?j_ rN?9 's~ 2cs~ 2at`N?/Ay9 ,jOb~ aț.>b~ ț.>b~ O#B.XT1?b~`QSEM*5U,j{.5U,jEM*ZM&L1avPavPav\0?*<kArM˜0:Ș0:Ș0:Ș0:Ș0:Șa嚄1at1at1FF?L1avPYav\0X&aL,$2 +`V>F'F'F'f'dFdB5yۍ@u?޲hol ,j*Ny V@Fd?jh5述}ڳh5l_8ed?jXT?,jXT?F`QS ,jXT?5XT?F`QSovWc)m0hǶ Ͱ=͚m|lsc2ی|`4c cA|b4ct۾|֏=:l]Aew>_{c|=`Pc|=`PcxzzzǶ/?F1(1:AmbPctx>(1; |lx>(1;;?fx>(1; |lcvp>fA=1; |>(1;;?F1(1:AA +bPct;?F1(1:AA |l1:AA oS |w>fA}Pcͯa>(1; |Ƕ{6}Pcvp,$,$.à >F1(1:AA |bPctĠ}Pct,$,$ܣ{4cv\`Pcvp&>fhM,$ܣ{4cv\`PctĠǶcĠrMAA |bP`>(1:AA >f4`&rMArMamg8 |[ |l;5|acbcɬwawam>  ۇ> |/,qܓxj-; E0@^|.jAڨa飆ܲRI X,~x1#P5a0E}> `Qs/b>X܇,j0E}> `Qsx 0V0VZM  `Qsxp;] `Q\hf0E}f0E} pxsx>sx>D,}o,o5fxvio` xК`oAk #/xhoAk & Z 1}fDE}`Ϟ,wG y~@D:13!]\Ã<(y@9' 5 h"A`""px75Dă6&"YMD<Üx'4_eYMD`Qsxތ1`Qs'",j5}">`Qsxjb"AytFRM@<ߊV @<ȻZx+W  İFѰ^ϰư%˸1l?p4l3lga*ۭK3lwfخ̰ݘô1pbtF1rb İ{ô1;{ji!b=1a~:̯X…a|p!b0VqjZB0V1r!b50:ȅat oA.D \8L5avP "avP5avP5avP "Zf1j!bTuB0;avP "AXA.D \FqX  "A.D0:ȅat  "w/A.D \8L5avʅav,A-D Z߮^,jj!"Y.XBąZ,jj!"h5h5h5h5?h5qV V V sV Evąf BD`Q=5e"_EM2 ք 1ք j!"Кp!"Кp!"Кp!"pr+Кp!"Кp!B-D [~BD`QS Eƅ ;" EM-D5;"ZP EM-D5q"Z,jj!""@"&"&"&"&"ȏ"&".BD BDBDBDBDBD`QS EM-D54545C?!RJX*E`qH!RJȧȼJX*E`qH!R7Ry2RU@]p"q"q"q"x"O@x"q"q"qBE'+P'+H>"gq,-,y{{{{<=@'EŅڳbYZjY5|{EM\ ^=BEM\ڳ,jj"g8v] ]\fE`QScBLF ψzCBmh74yF@89wQ=Ǘ/?[! .o؃Byd[!~9B{8/_/ˁ$5}@OrIy$q$OS#' ݄$~es$Яl\}I' ,jj$}I' ,j:5OrI' ,jjB5OX>I>I>I Iy$jz&I x$?xO\ et0Ɣ ט2t08]2lw31NL08]21N .߱F9]2rd~ft0:Ô f5]2jd=a.f f f5]2jd~cft0;avP%2젦KAM .f5]2*<rdt0:߻.F9]2r0eAN .F9]2lQ=rdta]N .f5]2l3}KAM .f5]2jd~[t,$.f5]2kN 嚄%젦K~KAN .F9]2rdt0:at% Kt0:at%`&t0;XI8]2*>kN ʺ嚄%젲idT}T},$.fuf5 KAe݇rMat% KAN O& .F9]rX&t0:at%idt0:4]2*>kNVI8]2rd~9yaqd~9yaqd~q'Ig=$idC8O2!'fIAyAϓBϓ@y ' ' ' RIϓRJϓ\.o @@@@@>@ @w@h5@h5@h5@h5@XI`QS $EM-5?M͟M6 ,jj$aM6 ,jj$Ϙy$aM6 ,jj$Oy@ZMqZ0Z0Z0 Z0Z0Z0\yII Ɂ6 !w򈱇M-M-M-My&9lXT@ZMZMZMZMZM\>9lh59lh59lrV&V&EM 򼼇M6 ,jj$aM.԰I`QS&EM 55lX԰I`QS&EM a55lX԰I`QS&&T&T&T&T&T&T&T&T&T&T&T&j$R|`Q\yraf I`QSf I`QSf I`QS 5lXT)>R|`Q\y$af IIɅ6 6 6 6 C6 6 6 6 6 ,j)j|xޓ(j%п2N'Qy ԓ(q%п2NO'QnI 'QnIf$J`Q}q%IOJ OzL%'Z=rTyc*<1@cJ yL%9~؁ >>y~7w.,Q}o֟7w΅ s6wgw'/_͝aaMa!a7A>lf.Ͱݙa2vc0͝a+q~a0:͝at;pisgT|T0m jahg.柋؇ig.bkS]a|mrg_ߎ&zS{_&zAn oBF3l8rgVabf3lsz⠶z7Am kf3jgV0;.0;avP[=젶zAm bF3rw2zAn 9L]at[= zAn ^ Fs`;[=젺`;[=젶zV0;avP[=젶zAm 嚄[=젶zrM­av\pgV/V0:ȭat[= zAn F3rgVaF3rg,$f5 zAm 嚄[=6rM­avPig0;av\pg0;XI3j`,$F3rgVϰ0:ȭat[=kn FszAn 9L[=젖 rM­5 z~7w+G7wh+G7w'h͝a=#n {7w0psg 3#n M ܋ћ;*6HHH2 ܹP@z͝@͝ ,jO*o5P;E͟6wܹP;EMm5XTl:͝6w.N`QS;EMm5XN`QS;V;V;@͝ h5h5s6w&7w&7w&7wV;V;js'͝6wjs'ijjsBm5XN`Q\xs'Y|XN`Q\xs'͝ XN`QS;rs'jrs'jrs'jrs'jrs'px!jrsBYZMnZMnZMnO6ZMn5e7w2;EMY5e7wP;EMY55gXlA ܟq ܟq ܹP;6;6;6;V6;6;js'oR  7w ,j7)n\ޤXloR ,jjs'ޤX΅ ,jjs'͝ XN`QS;EMm5<ȧ<h8sqqq.8N 8N8N8N8N`qH8~{8N`QS8EM55X8N`QS8j'qq ,jj'q@>qq ,jj'q@>q9h59k<h8h8h8e<ȧ<h8h8h8sr88||8r&jr&jrB&o&oy7<@ S޿ ܿ ܿ ,j45ڿ YJ5~,j*XM`QS7Y˅|5^w ,j*zX,{%}`Quj%}`QSf#.EM5^w \w \wP.V.V.V.6lv>U4lwel ݆ۧniGݨaPv: m:L-at-a6rvDl l ?:L-avP;A>L-avP;?A5X0>`ߧ0>xw<4,xO2y`9Lfei6XA6Xl l ?%F`F`9LA5XA5X0;}j j ?f`f`f`fxTe,`9L avP avPA6XA6XA6X+`F`F`9LA6XA6XA6XCl l}`y0;}`y0;aj j j j j ?8f˙ avP avI`f˙ avP _0:0:0:0:0:0:0:0:aj l l L0;X$l y?3 ,vL0;2j0;`92j0;X$l y?3 , , , ,ٵat at ʙ at at `F`F`9L avPr&ar&a嗱rXy=be,y,x`u`eCy=bel0> `g,`y=beat @ZK>kH`Pˇ, @K>H%` n\Á @˅Z>X,Tn5|B5XZ>XTB5Xj5` ,jjp`QS ,XT%K`QS ,EM5X&,V }8jr,V @˅jZM6X&,V } >jh5`P ,EM5X ` ,jXTB5Xj5` ,jc,Erq%K`Q` ,jr,EM5XjKd%jh5` lV 7lZM6X&,7Q&,Ev b%َAl57,jc,EM .T%A`QSyf;%pKM`B5Xm,6 @K@KM`B5Xb%p\Rl5` ,j,,Eb%K`Q=` ,jr,EM5Xj\K`QS ,EM5X&{(\J J'{(﮸J'{(z凉G {(~԰XT%J`QS=z(XT%J`QS=z(EMP+P5C ,jXT%_r@P&{(ˁV@XJc,%1/8ȯ8hK }>R.@XJc,%sq,@R&c)V G9j2h5K 䬷c)u,%j2rb)u,%j2h5K ,j*X K ,jjr`QS{5K c)*XT,%s5G9^K ,jj򁌥ZMR&c)V@H`۪0X* #XP@H,Fba$#.Fba$7 #FRa$.rڅ #EMF*BF #TӅ@H ta$j0H5] .RMF #TӅ@ȅ*5[=Y0raygH`Q3va$E؅v5;cFڭ~ #EMV,jjz`Q3va$H`Q3va$j0h5YP@Hda$j0akF& #V@Hda$H`QEMG} f~$3Ώr@_lG} d#u~$!G.l/̏9H`QD2?Xl/̏Z?ʏr#3s~$<ΏvG9pH  䘙#q~$;?r#q~$;?h5 ̏ZMG&#V@Hd~$j2?h5P@Hd~$H`QS#EMG9H`QOQ@_2G9H & %c~$З@_2G9H&̏iH&̏\H&̏5{0G.l ,j` zw2'͎@NǸ7;9gz6;.1lv>y5ls>2lelv ۇzZWiݨaPv: m:L͎at͎a0:fa)<11l05;AfS05;AfߵzVcy ZsScڝ;̿k=aD7;{iw0y y y ?FF&71qvy y ?8L=avPsAff6111jw0;0;X{qzzvy y yV{11qvy y y ?FF8Lsޛ=avPsޛ=avScTcTcTcTcI0;X$y y Lž0;X$y y23 {3 6;wfǰ}f0͎a~fajv f0lv 5͎avPjA7;in EtB- nvF7;飛d t#RqK7;ijYm`QXԲ ,jjYm`QOSOSS͎EM5;ZVXT#fDžjv5,jXT#fG`QS͎@fGd#p lv\fGd#jqV͎@fGd#plvZM6;.T#fG`QS͎}B5;jv5P͎EM5;fG`Q|,jX,7;jv\fG`QS͎h5lvZM6;&|dBlvZM6;&7&Ev b#َAlv5?َAlv5EX:51Y;,r\,G/6rb3,Y@_lf9.T#!f99,GCr5sY sYf{1XT#ٞCr5PYEMe9.T#,G`QSY@DƅJd NdwDƅJd NdwDF`jvo{&250'2}3۞@Ld5,j*XT"B%2Jd5,j*XT"#DF DF`QSEM%2JdKNdDFd"#_q"B۾P@~Fd?#j/1~3&V@~ƅgZM3&VgZM3.Ԗ@~Fd?#gr@~ƅgr@~Fd?#~F`QOST?#-EMm ,jXT?#g\~F`QS@νXԖf{Ag?#-h5gZM,R-@K͖EfB,-5[-@sehٲlYղX[6v"E`QS-Z-@E t"jeH5ݲnYRM,[T-@E t"jeqZEM-,jwnY\XޥeX,Rݲ,jjQw`QKu"E݁E.-u_E`QSZX,Rݲ,jeX,RݲlYZM,.T"jeh5ٲlYr-@ɖEd"jeh5ٲ,jeXT"YA`ZV0 +t#m`ZV0 +Uw#S`ZV0.l/` F`QXl/`G9w~p\>'29DF jNdrω@9ȑ/'29WDF ?`w"B%2Jdv'2&V@DFd"#j2h5LdZM&2&*h5Ld5,j*XT"#DF g?,j*h5d?#}DmceHG|l7ǶmFی&6>ͦa{:ct 鈏moHG ~"1:tǶm鈏}}྾v؞~鈏ɉ]ɉɉ]ɉ]?kwsɉm} $']?HN|"9A$'>F HN ׻~ɉ=9ɉ}cvpON|ɉm}ɉ=91;''>f{rcvgGrb؞ܓz׏A$'>FDrbXyώ HN ׻~"91: >Drctɉaz׏ɉ}`yώuN`؞ܓ{rcvpON|ɉm~3 {rcvI,g$'>feHN|"91: HN|"91: HN ۓHN|"91;X$HN|3 r&Arcvp_1;X$HN|뿇ɉ}cvI3 r&ArctɉA$'>F6y1: ʙɉA$'>F''>FDrb؞ܗ|3 !1^69> 1^61^=㽉<ǶȏDb؞M!>{ymcbԏA!C< >H>Hx6* }TmA*<ăRy}/Ryi^Cy^=`Qsߋ`QsCx#C!,je!,je!sjG3 [ [ [ [1atatʙatatԪF٪F٪8LavPLJr&abQ 1+Q 㿝=a$0Q Ű}s?(Sb?(cb>|Xy `b&at=@ZE>HݣPX{=@E>UH%ݣQ\E=@ŅZX,Q5B(ZXT"E`QS=zEM(Q5գP=zEM(Q5գ,jGh5٣QV= գQZM(.T"jGh5٣Q؁V=@ŅQ5գ,jG^E`QS=zGXT"E`Q{ܣ,j{EM(E`QS= գ,jGXT@(&{V=@Ed"pRCdB-QZM(&{&{Ev b"َAQ5$>َAQ5$Ÿf;GXԒ@E%@_O(.T"ד=@_O(}=٣ܿ?E'{GG {a"Џ(QŅQE`Q=jأ,j[IQ5գ,jGqzEM(Q\E`QS=@6DlC6D 6D`wj[toA!} od [m@߂lCd"з EM!jC\6D`QSmEM!jC5ՆWv܆,j XT"6D`QSm@~ymنlCnCZM!.T"޲ hoنlC?m@ hoنlC[!.T"޲ hoنlC6Dd"j qZM!&;v"SnCZM!.T"nCZM!&EM!jCrm576D`QSm@ qEM!96D`QSzm6Dd!КhMiP@kNC5a!4Zv ; Eu9DNC Gii: EMu~; Tӝ@NC t!jH5i4RMw; Tӝ@NC tBuZX,is!YXBfyNC`QS }; EM-PB5i,j}45i,j}4ZMv&; h5i4ZMv9bNCd!jh5i45i,jXTBu&#uBEm#Ut!`8>Fm#v!8>F.lٌ8ȈC`Qf3Xl-@vm@;Zvڡ?څ| dȱA,9Ņڅ1cgy $p"dȱA,9fE gy\ KdXT".YZM,&KV%@ɒEd"jdh5Y,YZM,.T"jdh5Y,jdXT"E`QS%@θdXT"Ed"!Urñ@_O*}=d"3KUz2VXb~0Vqb~0VG cTɉ@~^D QPɉCurb}0%'?Oɉa9a/crb>4lv>b2l~:Lf e6lWmnaJN gLN LN LN LN LN ?:LɉavPfA=LɉavPfA%'A%'Oq8SԪavPavPavPavPavPfSbԪavPfA/A/ퟳ_U   OWA/A/_ _UϬSbԪavPfA/A/_ _ _ Z5;*13 )1*1j0:0:0:aL0:0:aZ5;212121l~dbd0f˙avPavIf8Lav8Ű#avIff˙avPavIf˙avP_0:0:0:0:0:0:0:0:a_ _ _ L0;X$_ _ L0;`901ja_ Z}>j0;X$_ Z}>3 VL0:0:0:Ű}ndbdr&abdbd0/A/A/SbԂataayT0=ϛT0+LU ۧ^a0*^ab*퓫5Sx0U1:UH 'Z)SjGk ]t"6:UHSҩ 5^:UH3P;ZITE`QS;Z/T"EM*JU5JU5PREM*.T"TE`QSREM*JUZM*&SDkdB*&SV LUZM*&Sud"j2UqREM*JUù*UXT"TŅJU5,j*UX,*TE`QSf98UXTB*JU58@TEd"j2Uh5ܿh5P&SV@TE@TE`Q,jcSEM,jcSEM폿POSS6Ev b"ד d"ד@_O*oOz2UTŅJUQTE@?jf{0Uqa{0UXl*JU5ۣR*UXT"TŅJU585@SX?5@~95@eQ.c"wkkX]E2(}FXT"ŅQ5U,jFXT"E`QS5@~5jEM(Q5Uwi\85@E E<33B*-5S@KTE TE ѝLUZj*-5S*UhLUZM*9TE 2LUZM*&Sv"NUZM*.T"cNUZM*&SEM*JUrXΩOSS@N9UqREM*9TŅJU5SEM,,jt*ZY| dF\lD/dFrو@_2f#}ɘ,Lو@:ȱVg#4a6"Of#FBg#Tو@lD t6"j:H5FRMg#Tو@ląF5:Yރ9qaylD`Qs6"ԁEوVP5{0g#ZA}EM,j*X,,j*X,FZMf#.T6"j2h5Frو@lDd6"j2h5,j*XT6"ląF5455,*8horp"28ho䘶8tp"28qa{gp"28Xl N5۫>*9TEc"1UhL;ci w,9SE g A?hu,u,.T"jcșBw,9E }ܱ;cXT"#XZMv,&;V@ɎEd"jch5ٱXZMv,.T"jch5ٱ,jcXT"E`QS@NǸcXT"E`QE MԈ#،\b3rE Gf"@?P@?s@E ?s"c,\\E:lS9La$}|j>0lPN2F.Sϓ)ôPvn۰]akvՆ0z0:0:0:0:ŰfPvBfPvTbTb)*r1*rq\ \ \ \ Z({"Z(;2r12r1lTxd0-FFF<2r12r1lxdbd0-ffv"Z(;*r1*r1lwTbTbTbBavPavI8LavPavP eAF.AF.AF.+gF.AF.AF.Batatata0:0:aZ(;3 #"`90r1*rq"Oq80;X$\ \ L0;0;X$\ L0;/cbdbdbdbdbdbdbdbd0E.AF.AF.r&ab,gF.AE.r&abavIf40E.A-Mf4},gF.A-Mf˙avPKӇr&abdbdbdb>72r12rqX90r12r12rq" # #)r1j5/cb>:7555)aAFFϧV R R A}45":Hq6E)iA}/J:HH)PX饃4A 5YR5B)XT" E`QT"MEM) R\ E`QSAEM) R5 RZM)@ Ņ RZM)&*Hh5 RZM)@ EdB) R5i/T" E`QSA ,j*HXT"Y=R5AErq" Ņ R5,j*Hq VA@ Ed"j2H j2HqZM)&VAVAf;1HXl )Xl )E͟OSS[ZM,mS*eh LY,H,mS*ehܿ#S~H1eXl),.l),!ŔE`QS)f{H1eXTB,JY5P)@ EB R\ E H E)}0H{A@~A@+ R^a" W[,j*HqEM) R5,j*HXT"vq" E`QSAEM))d"j2H/8Hh5P/T"R3Hh R R?R3HqA@K Ņ RZj)-5VA@N:HJ)&VA@ E y4VA HVA@ E`QSAls"ijjr`QSAtB) RrA!5 :EM-C>q@q q@,>!98D V8Dl!}g3 CRM!Tq@8D t"j:H5CRM!*XԺfy?8ąE~qE5!!ZX,,jj]C5.:8D`Qr"8D`Qr"j2h5Pq@8Dd"j2S!&Vq@8Dd"8D`QSq*XT"F^`QS@ye"U\ZE`۶UFbB*}#V@pViD*}#Vqa{ZE`Q`"ZE`QW1@{Ee"`\[v.-;@*hoge>B39|F ' so燍|F|ƅgx39|F ޜpp>B3gr`@|Fd>#j2h5gZM3&V@|ƅgZM3&EM3g5,j*Y3g5,j*q q>#3.gb3|F/6r>#@_l3b>B3b>#!G0)#qBE0?'f5lI Ǹ2av5[x     )S c2avPhS c2avPavPavPavPavPhA2A2A2A2A-=LavPhA-FF2>2q d d d ?FF>212q d d ?eH%]\ ҡ@ŅX,OO.zBEMmx ,j*tXT"E`QS65,j*tqBEM. ]5,j*tXT"j2th5܇H&C*th5 ]\Ed"j2th5&CV ,j*tXT"pyPBEM..T"E`QSf98tX,'. ]5ǡB*tXT"Ł ]ZM.&CV@EE@Ņ!h5 ]ZM./ ZM.E`Q,jj|`Q,jj ]5C>51v F5aBU0 +ք@k FU@k F5aBU0 +Ec#O0V0 ƅ F`Q=X,jXlO0V0`\ F`QS*e,[eș,m<[6-@ϖE Eg"ƳehٲlY(nY5ղP-ZEM,jY5ղ,je/eXT"E`QS-Z[eh5ٲwܲlY\V{/T"3th ]K]rܡ@Eg"3tqB6@Ed"Ӹ]rҡ@Ed"j2tȹ\.9EdB.9Ed"j2tXT"E ,jjs`Q0,j*tXT"]\E`QS@L:tXbf{Ag@F'OatBE'Oat"S]D A(]D2F'~NRMG'Tщ@D tt"j::H5NRMG'.Tt"ρEщ {G'彉EMm},j&N59Yޛ8:X ,jjs`QSщfyoD`QSщ2:h5N\Ddt"j2:h5Vщ@Ddt"j2:XTt"D`QSщ ,j*:XTt"D`QYA51)8 F}` F}`rT@΃;@٬`r@*; !V0+EMU0g#޲hoY䰘+@{ Fe#s`\&Ldd"B%29yDF g'ɣCqo?JdvDƅJdv`"#Ndr(Ή@N99 '2.EM%2JdDFd"#j2h5LdZM&2&V@DƅJdZM&2&EM%2Jd5,j*!'2Jd5,j*h5ȸP@N8DF/6Ldr̉@_l&2},{c"#!&2r"#;ȡ'2.T"#p`O 7)1li '6)_Dư}l>0luv"=k8l0%2A&2A&2A&2A&2A&2D0;uD0;uJd Jd Jd Jd ZG{ZG;2121ld"0FFFz?2121l~d"cd"0ff&zZG;*1*1l$tT"cT"cT"c:avIf0%2A%2AFFF8IFF8LhA&2A&2A&2!  i0;X$Ld Jd LD0;DaJd Jd Jd LD0;D0;X$Ld Jd LD0;X$Ld Jd2&2A&2A&2A&2A&2A&2A&2A&2A&2S"cd"cd"c,g&2r&a"cT"c,g&2A%2r&a"caJd Z>j0;X$Ld Z>3 ֿLD0:D0:D0:Dư}Npd"cd"r&a"cd"cd"0%2A&2A&2S"cdb]bb]bb>q:.1gqrssI9at9a/s"p#9@ŅHYG,@*E tBq YL,.rEM, ,jjk`QS9rEM,Y55E`QS9 ,j*gXT"E`QS9rV9@ɜE>h5P9@ɜEdB,&sV9@ɜE>h5Y\E`QS9rxY5,j*gqrEM,Y5˱9f98gXT"Y?Y5P9rEM,d"j2gh5YZM,@ɜŅZh5YZM,@ɜE`Q,jcsEM-,jcsEM-P95}f;1gX2@EbB.[v@;E@;EbB.[v պ㍭f{uqa{uXl7.j]5㍭揿fE`QSZu,[u}.m<[6@E HEg"Ƴuhٺl]+%n]5պPZEM.j]5պ,juouXT"E`QSZ[uh5ٺ䰿[V պ[6@Eg"&p"n]x.m<[6 պl]x.&[u"Cn]ZM.&[VguF.&[uG.&[VZEM.9iE`QS?7sB.j]rѭ պ,juG.'Xl/l]bŅ*V/bE` X{Ŋ@eXL+}X{Ŋ@+,VB+yX{Ŋ@bE t"jXH5].VRM+TŊ@bŅ*V59YaXqaybE`Qp" ́EŊ645; +|EM+*V5; +*V5U8Ŋ@bEdB+&VŊ@bE '=],VZM+&VŊEM+*V\bE`QSŊEM+*V5U,jju}`Qd\W2.T%#З@N@13W2}X,c%@?MXܸ+EMU2d5U%PsB#jNh\؞CLhZj&49;ƅjZj&49;ƅjZj5b_B59F 90ix?k^9?g{}@sA<59F g?׸+k5,jq V}@ɾFd_#jh5kZM5&h5k5,jXT_#F ǒ,jXT_#Fd_B59F/6kb45}f_#\}@>\Ϯdr\Ǖ U>Lmc@ǶQa{%c4Ƕmۦt>͏]JǶ鱏mct> 7~l3c<ئ6 핌AT2>FQD%ctAT2>1;o7+Fڏ1;W2>fJ^7~1;W2>fJྑv^7~i?FQD%c *d|1:JǶAT2>FQ6,1:J *{%cvpd|lv^7~i?fJ^^+{%cvpH1;X$d|i핌1;oD%ctAT23 *d|1lH1:J *d|lFD%ctaFڏr&A%cvpd|3 *{%c^+{%cvIP+LJ^,gT2>f˙q*d|1:J *d|1:J *J *d|3 *LJ^,gT2>fJ`91;oW2>fVr&A%cvp*1;X$d|[?f˙AT2>FQD%c( *d +gT2>FQD%c^D%cta{%2)> r~ea{c~(S|em3eLmv2)f7cDe$QA>KTx6L }TmA*2ŃRe}%ReiJ{^xpG 552ŃEͽL`Qs/Sh5QZM)(Se&ZM)(Sh5QxjL),je{m3p/S5A| =Z'&O Z'ă~!>{|"=xٞ`O6l ۇav{.    SbavPcSbavPEavPEavPEavPEavoqPEavPEavPcSbTbTf=vatEatEal0:Ȣa;h1h1h1lFYFYS ,Z ,Z젊젊ϩSbavPcA-A-N*Z *Z *Z ;3 젶0;0;   3   i{0:Ȣ0:Ȣ0:ȢŰ}$bdbd0-r&abTb,g-A-SbTbTb,g-A-r&abTb,g-r&abT◱h1h1h1h1h1h1h1h1hq  `9h13 젊`9h1h13 젊h1-h1lef˙EavPr&ab6avIXFYFYFY,Z ,ZV$,Z ,Z ,Z0:Ȣ0:Ȣ/c-ᗹO"RQ@ZZB =u-!p %\ZB>#W! &4p %5&4ZB`QS}FBZXT-!ZB`QSj EͿMͿMM%\ZB`QSj EM%5UK%ZM@Z…%ZM&k h5YK%ZM@ZBd-B%5UK܇ /T-!ZB`QS UK,jXT-!ZB`QSj EM%5UK,jXT-!Z%ZM&k V@ZB>h5YKP&k V@ZB>h5YK,jck Ev b-!]Ev b-!]XԮv51j Ev bJ!pʅJ)!S v)(v)@;Ĕ…J)!S *S 7J)\oL)5)R ExcJ!…ZBd-B95ZBd-!jh5YKk V@ZBd-!jY~%\ZB`QSj EM%5UKhk EM%5UK,j!d-!j)k&k V/B nR,!}0{)@;[}0{)@+L)\B&S V)@NBh5RL)\́V)@ɔB g"RR V) RXS V)@ɔBdJ!B R,jj-s`QSkJ)rT) R,j*y5Z| ,b\p,b\ ].v @'\|\T@rA t j\H5].P5KH %5KEM ,jH C{EM *5KEM *rAd j\pV@rAd ӈ.ZM &V@rA`QSEM .T rA`QSEM *5;rA`QS@ʺ\Xvp__Q UK䰘k @KZBf-!s%rX̵@KZBf-!R9F-5k w%5ۻT UKԬ%Zj-5k 3s-!RhYKԬ%rҵ =RBK͔B MR) R(S tJ!з S sJ!PN)r[R sj a\ڰ}b>3lJe% G҆aw"EME \@`QS"EME 5,j*2XTd @`QS"EMEdd j22h5 ZMF@h5 ZMF@@`Q,jc#EMm,jc#EMmPp5;َA 5,jc#w3.Td 12hC C !F.Td 12hP@?ܿ#XTdxcd o 5,j#EME.Td j22h5P@;2h5 ZMF9@dd j22h5 rޑ"*2XTd @`QS"EME9@`QS"EME r6ޑ r8ّ@@<A`ja* X A ] H X Ao$.T 7 V@A ] 8@Zh5Y ,r@Ny@h5Y P@z@h5Y ,ZM*r+5^9A`QoSS*5U  V@_.T* W@_~S*N8<p*  p* 3zNRMSTө@TJ55Y^˜ 9X,eN55Y^˜ ,jj k`Q9X ,j*XT* T@`QS@:h5 LZM.T* j2h5 LrЩ@T@d* j2h5 ,j*XT* TJ5 ,j*XT* T@`QSP5 ,j*qWZ@X ,jjg;c .T cfFb 7'3s 7'6< liOav=Latatatatatag]0jO0;=]0jO0;v0;v0;v0;v0;=v0;=l l LA ӞatatataX0:v0:v}dd`d0 f.f.X:]0]0l~T`T`T`ԞavI.f'0 A A F.F.F.8I.F.F.8L{BA A A s!  ]03 `9]0]p`9]0@0;X$l j Lv0;X$l j2 A A A A A A A A S`d`d`,g r&a`T`,g A r&a`T0 A A r&a`T`,g A r&a`d`d`d`>8]0]pX9]0]0]p ̭@F P}Z'p խ}B- =VK2A $3p MV@>O\V@ tj5 ,jXT+ %EMj\%EMj5 ,jXT+ jh5 &[h5 l\V@d+ jh5 g&[V ,jXT+ p{PZEM.T+ V@`QSZEMj5 ,jXT+ V@`QSZh5 lZM&[pd+B-lZM&[pd+ َAl51i51i_V@`QS˴ZXl 1Xl B\V@b+ h ܿ h Cl\V@b+ pZ~'+VV@`Q= ,jXl7j\V>.T >ms9@Β;h}>ms*w 5,j*XT @`QS9@;XT @`QS9r7w@&su j2h5P9 ,[}#19@);)Q}#19@H\@o$&sV9@9p sV9 B9j2h5sr j2prr j2h55 sEMP,jjr`QS9ߦaXT @ Ǫ8{/@?п>_K@;{yg{o Ӽ?SH5?jzo ?E,j,jj;i`Qxo`QSIE{Nz5?EM,jjo ?jro@ɽjo@ɽV{9V{&ZMX5B,jjo`QS{X59EM59jo`QSy$s%0pr$s п}29@9:I2}29@ߟ5,j*pa{@`Qf@n䘙7[nho?cfho?r@N@z(ho?'_[ +c+B9aV@`;@ +9r~pG@q9?f+ V@`Ql5 ,jp [V@V@d+ jh5 lZM&[h5 l5 ,jXT+ V@ ǯ ,jXT+ V@.TH C mC6uG9v%ID2KjV2 w_vd{9j@ x>$hxH &@`1|+ [Vm\Yaxn m>[ac>G:L7틏aLnؾ/_ܰ}1l! q9l7.at7Ӎat7AFyc`䍁at7tc`TsT0ffuc`ԍavP7Affuc`ԍavP7AfFyc0 l_d?jVM@ɦd?p"ZM6&XT?@PMEM5/T?`QSMEM5j5,jXT?`QSMh5lZM6&n7jׁVM@ɦd?pZM6Ħ`Q=,j*zXl 6^_`QSׁE b?/&E b?p.T?hClYBb?vM@;ĦjM ,jazc?پ,jXl_ol5?@ lrlc?Ђ1h<@ lc?Ђ1XS?Cmg_l`QSEMe5nEMe5rBld?j2wl߭ 1:ᥳ 1 1 1h5r2cBEme@\e@2*rs2c?XS?2 .5I,j*XT?2 wH._2`QSe@\?e ce@?>r?lv?lr?BTe@2*5 ,jaE/ 5˿t\,j*X,q?ׯ.5U,jXT?2 Wz.ZM&Ve U,ZM&\h5Y,ZM&EM*5UPeEM*5U,jXTA92`QSe@H],jXTAB*(5Ug/g,OSᩀ@Tx* 5 OOEOx* _<pN SੀN5u*ੀf{T<O[ <hoy* 5 <hoy* T@ >p9@|* - O\SnT@`ө@y* B p˧9MT@ wگYll ,jT@`Q}y* SEM 8&OZM <h5y* jT@䩀@SV.ԩ@SV:XԩN5u* / ,jT@`QS:h5ƅ@ W#AZ&#hMxG @&#hMxG T@ ݧ穀 u* p;/mT6?P>>P>>MP>M> P>M>.émmۓmma{c N O|T N|T N|TǶİTܳS{cvp?1; O|>f,~*cv O|Ya=1;g9?Fq*ct>s>Fq*`؞ĩA ĩmA ĩmsA ĩa{cvp?1; ضt ܳ{cvp?1; ضoO|>fS{cvI>f,T~*cvpr~T N|T&A ĩa{ct>Fq*ct>j>Fq*ct>f˛>fSMSaT~*cvIp*cvp?1;X$81; ,o ,o O\S81:S81:S81:S81:SS81:SMSMS&T`yT~*`~*cvp?1; ,o O|7 N|>f˛>Fq*ct>Fq*cct>Fq*`XyT N|TTe?F{mt Wmt,FREn= ??@Q=6zp?Xܣ5E=`QߦߦG,jQ=`Qs?XܣZMD?MQ&ZMDVQ&ZMDVZMDQ{pQ{ܣ5E=`Qs?Xܣ5E=`Qs?Xܣ5E=`QsDAVQ&ZMDFZMDVQ&ZMDFZMD,jg53Q{f{!`Qs^Q{^~ٞA?Xܣ53QF p?h!Bx!B?p?h!GnʃE=ؾپG,j7D,jQ?hܣr Q-Z0DZQ-Z0D`?XۣrRE=G,jQ{G,jQVG,jQ{AnVQ9vUAVQ&,({ UA'UП2 )C?p/?OZM(?)>mr\ C?p/?鐊>}(?Xۋ5p`Qs/?X5Eͽ D*mj{|y r54TAs* 9)GPiTA~K>OiTA4TSi=`Qss>X,vP?AiJ?XÜ5˿9,j;(`Q_M,ji{[;H?h5j"H?h5j" vJ?h5j"DA4E=`QsO?X{,ji{,ji{A@,j=`Qs/?XZMH?ȭ݁V :+[<+[=ʿ~AntA}}&A~_'/8lzؾ 훙a0'fCa\f> 7̰}5lavA'8L y'Fy`}at A'E}avP-avP-t`T t}avP A'fu`T t}avP A'fu`T 0'ffFy`}aDi}Fy`}at äat A'oA'Fy0@A'fu`>8L A@A@A'fu`>fu`}avP A'fu`T 0'fu`T t}at A'8Ix`}at S t}at A'/A'Fy0'f˛ A'f˛ A'8L A'fu`,o'fu`,o'fu`,o'f˛ A'eO0>0:  O0>0:  Op O0>0;X$O07  O07  O07  Op O0>0;X$O0>0;X$O0>0;X$O0>0:  ۇ  OpXy>0: 2_P };HG}I pN^fྜྷ $z1ʅ$H[}I pRX_PT> $XTB3KEM]oSS$X% uI KEM]$h5yI pIZM^P&/ ZM^P&/ ZM^$O&/ ZM^P$X%}u. 5uI K@`QS$X%. 5uI KEM],j@`QS$X%yI j@%@KVk% $h5yI j@>w $Xl ^,jg/ 5,jg/ 5ھPJm5,jg/ 5uI ٞA$/%@;Kv/ k@;Kv.%@;Kv.%@@5uIKEKEM],j7^<hx,B ! C<U! C<hx, rcEM P:XԱ5u, c;X@`QS:XԱrcX@䱀@.O}, jX@䱀@cE.ԱcE)㱀@i}, cF )㱀@x,B &ZM m\t㱀@cX@ '<>hx,B ǿ> }<XӱzMcEMA5u, cX:p r{G=@@ JȵS{@ ?er;{@ =@{@`QS!f\X{E/,j*X,=BE/,j_X=5u 9h5y jh5y j@=@.| j@=@{VX=\{EM,j@`QSX=EM,j@ ǜXT>8|XT>8{V&r1{V&ZMŜZMP=@{EM,jg5uB,j@`Q=x =f{{E =@.| j@=@{\@=@{V9= 1Vc@n|, 4 O.Ա@n|, ? rcܠX@ C>/  ,jX@`Q}aѱ55u, cX@䱀@cV&ZM <h5y, jX:h5y, jX@`QS:XԱrcEM ,jX@`QS䱀@cX@ W>hx, &X@Mీ@}, &X@Mీ@~IX@ >r?\c_6h3lCvه틚apؾ/!af>2Oi#a}_8l_ ]qav?0:C  灇AFyH`!at3tH`TsT0ffuH`!avPAffuH`!avPAfmeUm 4m@6c?ا6`QSm@d,j*XT96`QSm@ePm@Ɉ~ MGk:ȝ#\8ȏ#88ȏ#\9ݘ#88ȏ#T ,jX,Ԏ_X,jojGG57#EM#"*XTD?4G/TD?~ 7hZMF&#V ZMF&#ܠ9h5ZMF&#EME5ѿP"EME5,j*XTs7~`QS@#,jXTsBEj5 ZMFAsD?j2h5ZMFAsD?j2"ܠ9h5,j*Xl F_~`QSf{1a{1Xl F/l F&;h}>v93s'?h}rN^>vt'?ʰ~ g.r~ '_.*r~,@]˅@}\,jXTa?پjTa?C?EU~`QS h5Y,ZM& V@~da?j V@~`QS EM*r~`QS EM<9s?_hCl!ct?hGw5;}"wzvׇ탚ap>Sؾo Kak>0eLͰ}5l 3l LLgiatiatiatiatiax0Ae0Ae0STsTTTTTsTTTTT0Ae0Ae0AA틩atiÔFFF句AAatiatiÔffOSTsTsTTؾ9fffff˛2)?*?*92?2?2Xy0?2?22 0:40:4aJM40;40;X$LJ40;40;40;X$LJM40;SavIf˛iavPi_40:40:40:40:40:40:40:40:4aJLLM40;X$LJM40;40;X$LJ40;40;40;X$LJM40;40;X$LLLLG 7 C>B"}d }rB);Owv(BR> 7CtB"n v,jXT(?P~`QSBV@P~>Z _P~d(?j2BV@P~d(?pZM&C*XT(?P~྅PBEM/T(?P~`QSBEM 5,j*XT(?P~`QSB2h5 ZM&C4j2JҁV@P~d(?p_ZMP~`Q=,j$Xl *I_P~`QS%BE b(?P~`Q=? C !C@>1hʿP@;P~b(BC Fʿ}1Xl_o 5,j7d(?krt(?&0h rP~M`(?&0XLP(?C\:XT(B 5,j*XT(?P~ Ǿ5,j*XT(?P~ gP~d(?O5&C*h5 rP~`*X~ P~ rP~?H P~?H _P~?H P~d(@-[h`lr~ g(nZ0/T ?[-[-@.,jU[EM*,5,j}[h5~ M9q ?qp ?qp ?3-9r ?qp ?jZEME - 7[EM~`QSfv ?d`Q|SXT Bj5,jș[V-@~d B&[V-@~ gfnZM&[V-@~`QS-ZEM/T ?~`QS-ZEMj5 ,jXT ? H5սP-EMuu&[V-@lZM&[V-@l_~ gfnZMj53-ZXT ?ٞAl_؞Al53- 3-@~ dn>m[-@.}l>m[9Ձ-@?-@<2-@-@<¿P-@ϓ-@հ[\u-[EMj5Wj5W [EMj~d ?jh5lZM&[V-@jZM&[EMj5,jș[EMjZM9r>К0wrQ9whM&ZFt>К0wX4Q<nr~l_~W-au[NS Aj[af>:L-af>#/ia𗱅?l_G4yװ}>xO̰3??AAAAf՝f՝ e}H{6_.C d ,jLXTB*5U,ZM QdB&Ve U,ZM&-jLh5YPeEM҅*5U,jLEM*5U,jLXT>2}`QSeߦLXT>2,ZM&Ve@2}> ,_tsd>jLh5YǞVef{LXl J753eLXT92}`Q=X,jLXl dBve@;2}eC,!/T>LhXPef7/l_o,5EM@ηp@@@@1>@i,j*>EM5,j*>XT|>Z5,j*>XT|>;[d|>j2>ȡV@|d|>8VU|>$9t|>8Ab|>$Ab|BAb|>$&2>hPI@ |c|>#p|>Ђ1>8>h`r |`QS@@,j*iXT8|`QS@BP@| Mk:>%ܢ8>ȏ88>ȏE9>e88>ȏT ,jX,Ԏ_X,joj.57EMU*>XT|>K2/T|>| dZM&V ZM&\9>h5ZM&EM5PEM5,j*>XT6|`QS@,j*dXTB 5 ZM$s|>j2>h5ZM$s|>j2>\9>h5,j*>Xl _|`QSf{1>a{1>Xl /l d_>{0-}@ ƾ| `Z0-}@NݗP"X2EME<К0[hMl@N@klrl}5a>hl} ׈rl} W@5,j*[Xl`,j*[Xl`5,jo0f&V@l}d>j2[h5ZMf/T>j2[h5,j*[XT>l}`QS@Ԝ,j*[XT>j2[ȱք @kl}5a>К0[ք@j̲3[N3[<_;2lc )S~!t aHg>;LIaBg>Ns˱a21i?l%ۧ9ذt 9b~d~d~d~d~>9LIavPavPÔfffffffffffs ܗ*L?9n Pa}$/p)v>p_^"c Ga}{70}`QSa ,j*LXT>j2Lh5GBVa  ZM/T>j2Lh5 @0}dB 5܇I*LXT>0 5,j*LXT>0}`QSaEM 5,j*L Va@0}d>j2L9&h5 ZM9gd>0}4,jgEM0}`QS ,jXT>ٞA 5,jg2Lva@;0}b>p_!*LhC _0}`qHa?7aE0} Gr~0} 0} r 0} 0}0} G5PaEM 5,j*LȍEM 5,j*Lȵ2Lh5\a@0}d>j2L?!VaU>$9t>#7Ab>$AbBAb>$&v U4Cl!9r{> =ȕv␊Ɓܬ=XT{>{ *5U4,j=XT{> /T{@ @?\@J| vA>R  " H].Rj/TA>tb`Q|ߺ auA>Yo],j*X,߷.5N ,j[*_|`QS_O\P | h5Y,ZM/TA>j h5Y,r|dA>j h5Y,5U,j XTAB*5U,j XTA>|`QS EMFtA>Tm`QS U,j*UXT6j h5Y@ɂ|dA>j h5Y@ɂ*r|dA>|`Q=X,j  EMĂĂ|`Q=X=X?@\`,Z0- n hX`,Z0)tAB`_,5_>@k‚|5aA>>rA>,  m ք m XTA>|`Q}QA>|`Q} XTA>پX,ZM& V@ɂ|dA>j h5YP@ɂ|dA>|`QS EM$sA>|`QS_\@k‚*r'|5aA>К hMX@k‚| \d6]R@YP/hg/9~l)c?᰽.mmfmm 6ll|l[~l[|lۆ}l^ǶqǶǶYǶڰ1: 1?FQD]ctuA?eu1;u1;?f^{cvpu.1;?f㰽.1;?f lh}.?l/9~.1: ۖY.m1: ^.^rK{]cvpl[|7 M^r,o?{]cvoqpu1: +o?FQD]~^rD]ctuA?M>FQD]~^,o?f`y.1;u.1;?f˛u.1;X$u&A]cvIP.1: .1: .?l.1:`y.1;X$u&A]cvp7 {]~^{]cvIPM^,o?FQD]ctumӏA?FUpۨ=M{%A ඲}!6zp[>O*n+=඲}IUV{me`Qs?X+?Mͽ`Qs? ?h5Qp=h5Q+ZMT&*ZMT?m?h5Q+5JEͽ6> +5JEͽW,j{%^W,j{%^W,j{%^JV&*ZMT?M6&*ZMT?M6?X+nkJE=`Q=PgJE=`Qs?Xl T,jJ@TJv*!T!TC*!TJšK<7T*5*5JV!VA.?O!DAV?O,?=j+~~U!{>p?XC5E=`Qs?XCrΪE=`Qs?XC5*?!&BrYV!&BZM M!v?X~{AHM!ABA$ !?H?h5!C?hkB!B{A*!`qh?yB5V(`Qs?X5E= G ! zު gj?1Z[j?Hoz ,HZ[j{fJU=|`Q|`Qs>X,_j?X#5WZ5{z,j?Mͽ`Qso?X[zjD=po?h5zjDAzjDAV{zEͽ`Qso?X[5Eͽ`Qs>X[5`Qs>Xܣ?Mͽ`Qso?MZzj [j?h5zjDAnD=po?ZZM,jEͽ,j{`{`Q=zl  Wj?hzЂCAίzЂCA \Z{)y`Z C%AT,U6K-*{%AnTЂ`l?٠*5w*{%^W,j>T,j^W,j>T?h5QjD%AJV&*ZMTJV&*5JEͽ`Qs?X+rfJEͽ<PК5A%ANT#*U&?hMPК ׈?X4+*X ?ȥB|pG>XOg h2) ?ll P}HOsEM/ij45s,j*h5s̹@ɜʹZM&s*h5s̹ZM1Yd=j2~rEMʹ+ s,j*XTBʹ5s,j*XT={`QS9rEMʹ5s,j*~ sV9@ɜ{d=j2o+&swh5s̹ZMuxd={> ,j*Xl Ĝ{`QS s,j*XT=ٞA̹5s,jgs2~rv9@;Ĝ{b=p!s*hsC̹_{`qH9js79rV9@sn9@b{ .Z/T=  \M*rb{`QSyEM*rb*B魋޸@zb{ u=+q=޺Ho]l._b{`QSfJub{`Q|XT0YR]l,j*UX,_.5UlPEM?M͟fh5Yl,ZM/T=jh5Yl,rb{d=jh5Yl,5Ul,jXTB*5Ul,jXT=b{`QSiEM9t=4l`QSi *5UllYl,rb{d=jh5Yl,rb{dBr=jXT=ٞA,5UlPE b b=ٞA,_؞A,b{ V.Z0-@.\l`,Z0-~ŁEeSd=ms9@s69@ns}̹_{ Yι>ms :Xl_̹_{`QS9rEȜ{`QS9fb{`QS9fbd=j2h5s̹ZM&sV9@ɜ{dB&sV9rEMʹ5s9r~6@b{ W].r'b{M`=&hXl@~I(,_X.rb{,_b{?>l9Luaΰ6l dS}x a|g>,;Lavg>>/w훲a>lG; 퇩8>>>MۇAۇAۇAۇAۇTlfUGfUGFYlFYl/AۇASqT}T}>9LavPuavPuavPavPatf,oۇ&a}Tq,oۇAS})7AۇAۇA+oۇAۇAS}d}d}d}ؾFYlFYl?LavIXlfUlf˛avPTlfUlfUlf˛avPavIXlfUlf˛avIXlfUle,,,,,,,,,b0:b0:b0;X$,Mb0;b0;X$,*Mb0;ba***Mb0;b0;X$,? Mb0:b0:b0:b}:s@Ӛ}H]*8aB9l ]ww=pӚ ],j45]ܧ5V@z> _zdw=j~V@zdw=p_}ZMv&XTw=zPEMu/Tw=z`QSEMu5],jXTw=z`Q󧩩zdw=jh5]ZMvddwB&V@z>5]ǻEMu_0q`Q=],j*L|EMu53E bw@v/Tw=h]C@;zbwBuv ], zcwz`Q}XTw@/T==:Sa==?Sa=={NSa==_ @]O@a~EM5UO,jXT==CO5UO,jXT==Od==j͝V@zzd=ŸV@9?@9ʙ@yg<3s~26@,mK6%@J\2_dhX2P%@;K6%@=]2%@L\2dXTv7n`QS%J~/TdHo]2&%@R\2.[K\d}K%@zy uBJ5WKT+%~Ey`QS fJu<*5U2,jdXT>>>>l 0;0;a***0;0;0:0:}6~ i0:0:}6>~ 젪젪a**g>`y>7 `y>pxOqIp8>>~Xy>>~ m0:0:aM0;0;X$0;0;0;X$M0;0;X$M0;/cu}du}du}du}du}du}du}du}du0UׇAVׇAVׇ&au},oVׇAUׇ&au}Tu},oVׇAUSu}Tu}Tu},oVׇAUׇ&au}q,oVׇAVׇAV+6p}S@ }JH\Aܧ*Oi+B)m s=p UA,j45UA5V@ z> _ zd=j~*V@ zd=p}ZMV&+XT= z>P*EMU/T= z`QS*EMU5UA,jXT= z`QT@V&+V@ zd=pAZMV/T&8jh5YAC@ z`QS}XT= 532XT= z`Q=XA,jXl VDJZ0&-}hH`L_Dzc"=Ђ1~E0%:!Dfc"=DL_Dz oNDz*LDz ǗNDz0H;`"B%J5H,j*XT"=Dz WN5H,j*XT"=Dz NDzd"=9'&V@D?遜>9~ \96@Dzg"B%m<6@YA@YA@%+a~*vW[@/]A_d5 ,j*XT= z_y t<39q2W>2W>l ̕̕۷e s s) 8*W>*W>l\0;40;40;\0;\}37 s`y0W>* 87 sҀ)W>*W>* 82W>2W>2W~Xy0W>2W>2W~r s s s0:\0:\aʕM\0;\0;X$̕ʕ\0;\0;\0;X$̕ʕM\0;\0;X$̕M\0;\/c|d|d|d|d|d|d|d|d0ʇAʇAʇ&a|,oʇAʇ&a|T|,oʇAS|T|T|,oʇAʇ&a|T|,oʇAv5?z/T<!@ɐw EyZM&CށV!@ɐw EyZM/T;"&CށEMĐw`QS! ,j*Xl /l ĐĐ y_wb;1h !@;Đwb;1ȁC:Xje;_o yȐ yr\w y[-Cށ\9qCށ9hoP!@ y[+!B*XT;w`Q}2XT;پR(XT;پR yZM&CށV!@ɐwd;j2h5P!@ɐwd;w`QS!BށEMr3ihXP@;wb;S(W9r;hXC|r/t;1jV/,AWqq;IjV/T;p}+vJ|ذS=l 7li ?ŰG1lI:;Luah>YoatuatuatuTFYFYFYAAAASTfUfU;LuavP%avPuavPuavPuavPuavP%avPuavPuavPuavPuavP%TfUf0bFYFY׆ASod|d|d|ؾZFYFYֆAASoT|T|ؾ:LuavP%avP%avIXfUf˛uavIXfUf˛uavP%TfUfUFYFYFY?IXFYFY?Luatuatuatuald|d0&a|T|,oAS|T|T|,oA&a|T|,o&a|:0::0::0;X$M:0;:0;X$M:0;:aM:0;:0;X$M:/s;pG|S}ȟ+߁Bak | ܇5p}r*߁:w>u |ZMVVdBU&+߁V U|ZMV&+߁ +jh5YP*߁EMU|5U,j}*߁EMU|5U,jXT;w`QS*߁EMU|5U>@wd;jh5YwV U |ZMV&+߁$6jXT;p5U,j}JE b;텪|5U,jXl V|53Y7hX`|Z0Vuc;Ђ}*߁@ |>%ˑ+I*?^Uka5bt, Z L,T`{,l>V&CR! yg;v뺸3 o\ o \2Mcej4Ui佁A^>nvs7;ȁ r`f90we`nvLS`fy}<ig2MenvLS`fy,T72Muef4i乎AO젧nvd7AO젧nvd7;sw&AO젧nvd7;XnvLS`i,T7;XnvLS`2M;ef4`2M;,T3Xzgd7;i乎AO젧nvd7;i乎AOfynvd7;XnvL} b7;XnAuef4 vAf4 vAf?nAuAo7nvw5f!vAo O>klnvЏ7vd7ʀ'A8AQGͨvbjyvk`Lyx{q7q{q7q{q7q{q7q{q;q{q7y3^ܾ ${q7y3^ܾ k`Lykq;ykq7y3^ܾ k`LqYOy}Pp9//j/5AZ3t..../sss%ekFCOsqi5=ͽZkz{ŵ׸?4^p9kz{ŵaZLsq\ָ?,\ki5ek2͵aZLsqXָ?,\ki54ek2͵aZ5=ͽ׸?4^p9Xk/E=ͽ׸?4-퇞^Lsq~Xָ?,\k/E4ǠaZ}qqXָ?,\kiA{2͵af?\_\zC{pCohn {=7_\zC{pCohn?mn |X_l=af{=ͽ!o?۽{SClKDS]m7?~oj;m4?"m7_\iM4צekS2͵ahjX6?,\iM4צ<]DS;[z!oܛz{SCOsoji}y޷W?A*W?TȯAQﲽRZ߅Re{CǡR!/CPˣR!E2͵aZjLsTXV?wToQCŵR!J*r`T/QTȁR!Je`هJek2CC2C25afyRagLn= $[y*ɍy3 cޠJrn= n= n= +7Szn= n= n=q[?7Ⱥ7Ⱥ7Ⱥ~6 n= nc`Lº77X>n= ncޠcޠc`Lº77X>n= n= $[y3 cޠu1nu1nu1nu1nu1nu1nu1nu1nuTYYg֭Ǽu1oPu1o|&azTz,IXU~Lu1oPu1oPu1o|&azTz,I(;?πT;cog{l/}_~~\:?j>oUr'Wʁ~@w/42Ẃ~ ܏idp?G>w42X@wd;i2ς&z t=M*4z t=MYd;i2}P`2MiAe t4>@wLS`2Me t4i*,T;X@wLS`2Md;i24z t=Mfd AO@wd;=Me t3`2M`,T;X@wp?#>@wLS`21`21~~P7@wb; 1OуAo t!@wpi*}=iAO g@w?[2iU;=A܁ ,m@Ae t4i*,T;X@w7t4i*,T;X@wחt?S;í@wd;i24SMovLaս{A~qvУfzA~{vУf;ȋ:%q;o{Tej4Մ i,T;oy{r`a90{Aa90{2s;X\L<>X}a4˳=`Bp2s;X\L<i}P=`z2Mea4j=젧vd;i}P=젧vd;iՈ{AO=젧vd;i,T;XvLS=a4i,T;XvLS=`›2MeayvLS`›i*,Tx3i4j=젧vd;i4j=젧Aq;i,T;X>,Tz2Mec{ ec{ &{z`a=0{A~z`a=0ۯ˺il{L~~4 *yט~Ly1n1n1n1>CSzT^nT^1%ǼAƼA%ǼA%ǼA%ǼA%ǼAƼA%ǼA%ǼA%ǼA%ǼASzT^nT^ndzdzl?~Ly1n1n1nndzdzl~Ly1oP1oP1%ǼAƼAƼ1oPy1o|&az,Ig&ǼASzTzTzWn7Szdzdzlj~L1n1n1nmdzd1%Ǽ1oP1o|&azT1%ǼA%ǼA%Ǽ1oP1o|&azTz,Ig&ǼANc c c c c c c c ׏)y= 2y= 2y= $L^y3 cޠc`L77X>0y= *ycޠcޠc`L7_hۙ;13y=v&ǔH1~Lvtl?j+*n(~mv pE;_[9W+2XAU| #ɊvdE;=MV4YzhTE;i4YzhɊvdE*2MUehh4Ui}P`*2MUeh4Ui,TE;XvLS`*2MUeh?ȊvdE;i4YzhϠɊAef&+AOɊvpLzh4U2MUehTf6XvLS~|P`*2MUec+2MU&CBAτgPv3a(;= Cz& e=*[,T(`{1,lO0&CA9ϖ ee(;ȃLgPvo{~3䁺CAf:۞ e4i*,T(;XPvLS /5i*,T(;XPvLS o6~Pv)ќCAO젧Pvd(;i2,bA9ٓCA~Pv_:>Pv߹:ϡ /Pp(;XXlLS`B2M[A~yvr ZAn- 7vr ;;&- 7vr ;{,T.XYona,7evLS`fy,T.XYona4>vLS-`Z2Mejayvd ;i4>vd ;i4-젧vd ;i4i,T ;XAeja4i,T ;XvLS`Z2Mvdb;i2,Tb;XA~=d(;ד ez2Pv3ey:PvЏ~0UCA7DtLl]ffb`i9A>vp?^׉_ǔ_ʱ!3crvc8?_pl W1%G~4Kc]cJlKc~4 *ט~L91n1n1n1>CϞSb{TnT1%ǼAƼA%ǼA%ǼA%ǼA%ǼAƼA%ǼA%ǼA%ǼA%ǼASb{TnTndb{db{l~L91n1n1nndb{db{l?~L91oP1oPn1%ǼAƼAƼ򙄉1oP91o|&ab{,Ig&ǼA%Sb{Tb{Tb{Wn7A&ǸA&c1nǔOƸA&ǸA&Sb{,Ig&ǼA%Sb{Tb{Tb{,Ig&ǼA%Ǽ򙄉1o|&ab{T^1= 2= 2= 2= 2= 2= 2= 2= 2c c c`L7X>0= *= $LlyJly3 cޠۏ)= *= *= $Ll?ۙ;13=_cJlץcqLl?~X:ߕ7ڵi~yPi~ohWAi 4wp rȜQ4wd;i2O႞&zLs=M*4zLs=M)\d;i2}Pi`2MUAeJs4>4wLSi`2MeJs4i*,T;X4wLSi`2Md;i24zLs=Mid¶AOi4wd;B=MeJs[`2M*l,T;X4wp>4wLSi`21i`AO9l }P7nwb; Aov!vnwL=>ov4nwuve;-Al`٪۞vynwvg2Muev4i,T;Owev4i,T;CPw4vynwd;i4z0g¨wzyw^< ^G^<[^G^ *08 *0cޠcޠcޠcޠcޠcޠcޠcޠcޠcޠcޠ)z> *08 *08 2z> 2z>"q?777~8 2z> 2z>qq?77~cޠcޠc`L77X>0z> $y y3 cޠ)z> *z> *z> +7S|Wn+y3 c c )z> 2z> 2z> 2z>qq?7X>0z> *z> $y?777X>0z> *z> $yy3 c`L7_c|d|d|d|d|d|d|d|d1EǸAFǸAFǼ1o|&a|T|,I==gFǼAES|T|=(o_@~= c|0P>߼@c c+ ?@~:_@7@7Oy^]~P~.WAN Gvyp rWn,˃hrA rn=M˃&4/i]~PvydAOvyd<_=M˃&.i],T<_Tg2ceG@,Sv4X֧hc<@ AyFȃ& A~@4Y ?y.=Mȃe*4'ȃe*Tcю+Gc?7c7__TcaTUEcޠE1nE1nEToYYY㳇E1nE1nE1nE1nE1ǼAǼASQ~TqTQ~TQ~TQ~TQ~TqTQ~TQ~TQ~TQ~T1ǼAǼAǸAǸA1nEToYYYۏFǸAǸA{1nE1nEToUUOSQ~TqTq,IXUogǼE1oPE1o|&aQ~TQ1ǼAǼAǸ*ʏq?ǸAǸA1nETYYYOBǸAǸASQ~,IXUgǼASQ~TQ~TQ~,IXUgǼAǼE1o|&aQ~T(? (? (? (? (? (? (? (? (c c c`L¢7X>(? (? $,ʏy*ʏy3 cޠ(? )dVj~<cƏj~<?!Wc|?c~< ? ? ? :Oւ7E9A h|p? r[T4>y9,S4>ߠ?h|d4>_S=MF*4zT4>i24zCߠh|d42MEe҃4i*P`2MEe4i*,T4>Xh|LS`2MEe?h|d4>i24zk砧hA&AOh|p?z42MEeT89Xh|LS~}P`2MEec2MEd>!v ؅zC!v}b> P]7.|l=؅.|L=؅?o4].|7pe>-Alم]`٪ ۞]y.|g2Mue4Յi ,T>CMwe4Յi ,T>ȓMw 4헥AO~Yb>i 4مzy=h|LF埩sR?S u4>ZG~;0A8ဣA8,T9XsLS h|#uA. h|Kp4>3G h|,A) I?h|)Ge!hr4>XYR4R i2Me!bLS)Ń4R i*,T4>Xh|!=MFマ&AO=MFマ&AO Cz=MFマ&AO`2MEeT4>Xh|LS`2MEeTU4X~Je=-A^68۞ILg={0'уlp==DI$zLSI`f=I`Di*,l3~$zL${<2{<2M%уeX~gL=!ԃbL= cAZ8{1+^aL=+ {1 S1gS1 CSwԃ^ 7_g_py{5o|&A5op $ ׼׼׼=| ׸A_pD5n׸A_pD5n=7k y3 y3 y{5o|&A5op $STks;}m+V_bm;~F5!׶3׸k]#vwbm^~D5n!G7k_i]i_iȽ)~q; )~Sv@|S=G/rz_iݶ"~DvyD=2MD/nW=MDڃ{HEO&"⋞&"=MDڃ{biXG/nG=~Ls_,#=~Ls_,#4H2=~Ls_,#4H2=~Ls_,#4H "=MD/z_4ii"~qi"+=MD/z_4i]n_4iXG/n4H2=+4H2=~qe{biX>!~Ls"~q !#!D/zC_i]_i !#!D/ "xCbf{!l7D/i"=MD/M"~?[D/gHE\*~lHEi="yHE^_*~o{Dڃ{biXG/ie{bicLE/ie{biXG/,Sii/K=~_~{#=_eӃ{"/i&"?s_M"y HE~HEi< P_䑞"yHE(~_,42=~PQ"-W"_lu/V"_lu/D\|a:y{p_sHe9{o}!c@S,ql1o1eO}c,U<@~6f>7pk q q ?X7Ȁ7Ȁ7ȀK q q q q x)> *8 *cޠbcޠcޠcޠcޠcޠbcޠcޠcޠcޠcޠb)> *8 *8 2> 2>߀q ?/osd}d}lppۏ>ǸAǸAS,qT}T}l{L1oP1oP1o|&a}T},IpgǼAǼ1oPpppppp|&)> p3 c c c cU7Ȁ7Ȁc y3 cޠc`L€7c y y y3 cޠc`L€77X>0> $ y 5ǸAǸAǸAǸAǸAǸAǸAǸAS}d}d},IpgǼAǸ?p=ƽ0>ƽ0>h?VY q/ c c cE0c^Fۏb1C1na1naFF>rMN'σ\8Q9}䬜>A.J9.σJAσedi*},Ti*},TzLKꠧyLS~I,T>i*},T1}4Ӿt< 1}~P7yb< 1}ܯAoJ!σ5~Џ7σeJ4yL=ޘ>zLy!y?[σ2}ϖ >A>myNyyo{*},T-;glcj 73Ofl_cꗏjc cژ7~7~/k엏q엏q?V7~7~7~Y엏q엏q엏q엏q엏gs_> 8 cޠZcޠcޠcޠcޠcޠZcޠcޠcޠcޠcޠZ_> 8 8 _> _>jq?V7~7~7~'1n1bl?//L1oP1oP1ǼAuǼAǼ1oP1o|&a|,I//gǼAS|T|T|d|d|d~7_L~7~7~7~~; _> _c`L~7~7X>_> _cޠcޠc`L~7~7X>_> < $엏y3 cޠZ1n1n1n1n1n1n1n1n///|Dcjgl'ӏ;cY=_K~*=gc|c ~<;ߏ=qlql?777_o] .x[s<ȵ osAN]~?.xs< d<@=Mv&z=Mv 4z=Mvmnd.xLS]`21].x!v zC!va}b< ~P]7.xp?.xLS]`f{ ~=iAO9hxgYg4<eJ4oL4>wLS`f7Fe7Fii*,l12,T2Me OJyw?!~2 c>mo^ֽwg{ݲAuw˺w3{ywg{[ֽك2Mսe{4#u`Ui,T5X{4Ua iG*2MUXe{?Ⱥwd;i4{=͟AO kdi;[B֥mYvo$H,my+v.mFbi;7KAO_GFA5ejt˨>FwZ`AGFw0nt=j65AOFwYnt=M6mփ2t x`f; 3Aw>X{xyw5f*C~8co}=cxS,ol~ۇ31Ռo}3cd<~62_y xy x폰1oPƀ7Ȁ7Ȁcq xq xq xŀ7Ȁ7Ȁ7Ȁ7Ȁ~cޠbycޠby)= *7 *= *= *= *= *7 *= *= *= *= *cޠbycޠbyc c c7Ȁcq xq xq xגc c c7Ȁ7Ȁc y xy x7h)= *= *= $ xy xy3 c`L€77X>0= *cޠcޠc c c ޏ$?ǸorǸ_L€7Ȁ7Ȁ7Ȁ~; 2= 2c`L€77X>0= *cޠcޠc`L€77X>0= *; $ xy3 cޠ1n1n1n1n1n1nlۏ+gzlZ1nwlhZǸyFSzl?foYǸAFSzl?U~L1n1n1oP1n ~ܰrlnX us:_9:7aܰrynXٰg~A>ݰzlXT:ia4ٰ>ud:ia4ٰAO 렧ɆA5ejX4հ7հia,T2M5ejX4հia,T:XuLS `2M5ejX4հ~ 렧Ɇud:ia4ٰAO 냊=M6&AO ~44ٰia܏ejX4հ>kLS `h2M5ejX4 6d:ȫ?g 3z&T=f 3<0SL>LuD~[`22c`{1S,lO0f&3șTg:<3A9S,?xe~&0S3 $Ιꠟ TT:XLuLS`22MeeTy!LuLS`22MeeTy+LT=MfjiG3ˣƙ`fy8S,Tt0XY5T4 i*S,T:țge4 i*S,T:g&3AOꠧLAe&3AOꠧLu!T=Mf&3AOꠧLuLS`22Me*S,TAƦ cAτ郊M=Ʀ cA967M=Ʀ*67Mcl:'c2{c2MŦ*6,Tl:X~cl``f>Ʀe>ƦeM4}M4>tLS`bAO= ܓ>tIC`O:X~dO:K*~WGyٓ= Xܓ>= vO:ݲ'nٓ= ܓ>=w˞AOpO:XtLS=`z2HeOz2MCe*4#=`2HU14XbhLS=ٓzI=MO=頧?,LUjU t:AX*AU~; ہU w:ȯ]*td:i 4Yֿӣf2jUeԪBT:XF*tZUG*tУf:{W5AU蠧*td:S(W&UU蠧G+e8Ǧ26t{M] fl:f0cK%α u~M?>}?cbl_>vۧ3?&yLa}5cbl>}1)6=5_wj]cޠbcޠbc#lTl16= 26= 26nc cc cc cc|f16= 26= 26= 26= 26==7ۘ7cMy yMyMyMyMy yMyMyMyMy =7ۘ777~8 26nc cc cc cc77~8 26= 26bcޠbcޠbc%cMyMyMy3 ccޠbc`L7X>06= *6= $MyM?77777c3O1n7+IiǸAƦǸAƦSlz,IgƦǼAŦSlzTlzTlz,IgƦǼAwǼ򙄱1o|&alzTx16= 26= 26= 26= Q ud1şǸAƟǸAƟc/qԱwdyl?O1n/OǸAƟǸAƟ1n1nǔ:8kCS g0tCt:E\4A: }Pa 70tt:_9?0tp z C=M* 4z CT:i2 4z CSҠ0td2d:X0Ae C4>0tLSa`2Me C4i* ,T:X0tLSa`2Md:i2 4z C=MqdAOa蠧0td: =Me C`2M*,T:X0tp>0tLSa`21= o3aAτag0t3a:=  0tp?/ 0tLSa`f{1 }=i'AOa Cy(0t?x3 a A?0tGCy0t* ,T:X0tLSa`2Mfs:X0tLSa`2MksA&A9 4 C=͟AO~)|Pa Oz?djA2 9A8 9A>0t7CFb:( p:p:XZkLS s/~%s9ȗ _2&rW@n"=M6eŧD)SˆZ:SrСR^5g_ڶ׶k۲^ۆ5'^fdzmkۦkۢ^Jm{zmkۚ kmTmP=^ ka~m{ c0 "7 ؞<{D5n׸Ad_ ׸Ad_a~D5n׶=7'^؞a~ܓgy{5op0 ׼=7'^ ka~3̯y{5opO׼=y7'^a~D5n=y7 k 2̯q0 "7 kk 2̯q0׼=7g_ێ ka~3̯y3 2̯y{5o|&A5o|&A5op0 $0 =7g_ k 2̯q0 " ko0 g$=7k~k~k~HaO):):),@5> "~ k},#E7X~/G5opC5n";'~_kk|m}m+-mmmEJxv5>_mïqH "v`7k c{5n׸A$~_k0TU|{"^,, U|偡2PbށXY^,;p42ͽ{W^,;p42ͽ{Ls^䍑*=MT|/z^4Q ߋ&*=MT|/z^*=MT|/z^4Qi{Ls^,+4 r s {ѯ ryE~E*E~E?0۽r@nb۽۽Xv/i_ _۽X~Bn7~Bnbf5 ݋e{nbf5 ݋e{n7v/i݋e{nEOuۋyE'n{Jn{_n{B"Tm/[im/[im/tFu^oUXm/iuۋe{b^?UX /i‹e{b^?U uۋe{"TEOuۋ&=M4j/so^7j^Sj^ѨQQ{ѣF67j/zh^ѨoNըoNըQQ{DFEOl/^A6l/X+{6l/X+{+^{ۋ2PEOۋ&=Ml/Fۋ&=`{0 ^4XG/:EQ#D#^tuۋn,)m/CuۋΑn,srvlcێ۷5?ezL%}VccklؾT5?=scjl?#۟qcޠcޠcmTn; n; nJRc c c c|n; n; n; n; n;J=7֘7cێyhyێyێyێyێyhyێyێyێyێyh=7֘77Ⱥ7Ⱥ~7 nc c c cY7Ⱥ7Ⱥ~7 n; ncޠcޠccێyێyێy3 cޠc`Lº7X>n; n; $ێy>777Ⱥ7Ⱥ7Ⱥc3 c c 붏$qKcc먏9-9-7Bc::Ɵ::Ɵc::=~6_kc~Xۏ[Tm[VGSutl?SYYƸAVGǸAVGSdodutdutTutT\ﯹ:\ rks hsu49WG:\ rxKY l;J$9,x=4M3/eZe4:S<*j\>VG&U z=MVG/L~4Y zshdu2hLSы4U i:zQ`2MUGe4U i:,Tu4XhLS`2MUGedu4i:4Y z=MVGddu~AOѠhdu4'=MVGe`2MUG/*,Tu4Xhp?OhLS` Ͱhp? ͰEEe3,/aѠ [Ԡ /}1,â5!Ee 4ââ29İhLSaѠ_AX ы+cu4[WG:,2UG4au4$ i,T++X$iLSI $ zL=M&I&$ zL=M&I&A8I4$ zL=M&I&2M%IeJ4$=$iп&I/*IoIҠ+Ly$i/$ $E%I|$iL$iL4$ ų2M%Iek,&Iek,&Iek2M%Iek2M%I/*I,T4X$id4ii_+ z`y^i ۽ (W+ /+ e{Av4ȣJ/WnJe4+ iW,T474 i*W,T4X^ioN+^iLS ߜW4+ z=qאﮜ$$iU$ ݕAIҠw$iлeAIҠw$iU$ 򵪓AOIҠ$id4imW,m WzQ`y+ Iҋ˃^iO}z:ۇ7nlW7Bcϻ}pcގ)*4ml۷6Oml_1NO8iTThTtTtl:5NǸANǸAN)*4 2t: 2t: 2t::NǸANǸANǸANǸAN c y y StTtTtTtTtTtTkTtTtTtTtTBcޠBcޠBc Cc Cc71NǸANǸANǸANc1n1nӱ񟧿|q y y y 'T:::I::I:I::I::=77777wNǸANǸANJS`sl?]Kchg lGXc ~uLͱklϯ뫱dO˫jl? ۏƸA6ǸA6K1n1ncɍq lq ly lyȍqoB7ܡ6+): YA͋o9I7l 6+e:4߼fd|3i2yQ͠fd|3i2o&AO͋o4ˇ2M7/*,T|3XE7eo4 i*,T|3XfLS`2M7eo4 i*y͠fd|3i24 AO͋=M7&AO~k4 i*oeo4߼]LS`2M7!2gK`39/X z ,s9eK`3_]!2gR,s=M9/,T3X>X>X i)9e*s=M9/ k9eΠe,sy<2gп29W2gП&,syL2gg.si2E9e*s4U i,T3X2gW.s4U i,T3X2g.sd3i n9&˜AOeΠ2gQ3W.s=+&˜AO΋C`37v ;lEe;|lg'Aw3#g;\ i*,, .s4 i,T3[9eJF4 i,T3ȫ9&˜AOeΠ2E9&˜AOeΠ2gW#.s=M9&˜AOeΠ2gLSe`ʜAO͠o^T|3 g;э }2M7eo4 i*Q7ej4^ i*,T|3ȗo^T|3Xf/G zo=M7ʜArʜAVu3wW.s=j95˜Ae΋*s=j95˜AVu3ת.s=M9&˜AOeΠ\a3X=  A?lz^T3XA5=T󢚞,A?lzeAOMϠɦgd3ieAOMϋjzytgd3XbLSMϠ{w ~]c3AW gE?/2yw ?Dw :gp?{c>c ~w7"GwLq}rc>}o6[ۧ6/혂cG~4 *3 *9 *9y ~c c gc c c c ca1?ǼA?ǼA?)9 *9 *9 *9 *9 *9 *9 *9 *9 *9 *9 *yL1oP1oP1n1nϱpd󘂟c c c c 77~>8 29 2yL1oP1oPϱ2똂cޠcޠc`; cޠc`; c`; cޠc`; cޠ GA0܏.a3&=& i*,l5Lz^l5Lz4G 2M%=e <1 T D1 T i*4 է{Al gg?[><q3}ػcg@}^T3޽?LSϋw3XY{>eT4cgLS`z2M><q3XJULS`z2M><q3i4 z}^T3i4 z}ygd3i4 z}4 iyΠ|vc3AոbW;~Yjgo] f3'ΠlV;<^p37Įv4U iCv^l߇X iCv^l߇X iCv4U iCv4UjgLS`AOΠjg Nz>&=Op3'=|g/ z}Lzyg3Nz>I g;,S󢒞Av3XgLSI`2M%=|gLS`k2M%=eJzIϋJz4 AOIϠ0gd3i2+N9|0gd3i24漨0gd3i2+N9|0gd3i24 z s˿œ0g{? dœ*,œ GPa`y d3Ga  z s=M9&ÜAOa  z s^T39&Ü2Me s4[ʨ iǤgU;&=1t]IϠLz]Xdbi8ՈAwXF89vl1%=?KՍ;|>ctlؾck?ASۗvLIϱsl?N?ƼA%=I1nI1nIcqLzqLzqLzI1nI1nI1nI1nIϱv똒cޠcޠǔ<777Ȥ7Ȥ~08 2yLI1nI1nI1nIϱLpdsdsl<77~|uLI1oPI1oPI1oI1oPI1oI1oI1oPI1oI1oPIcJzyJzyJzqLzqLzqLz+'asd3op)9}8ci~vLIϱ3&=(ll ;~6`c9~CkLz`c)7Ȥ7Ȥ~6 29 29_qLzqLzSndsdsTsTnT\jg3t3DW;ܢ,9yQ jgt3_*d39LW;bd3i4Yjgd3i4Y `AOΠjEU;e3`U i,T󢪝2MU;ev4U i,T3XjgLS`2MU;ev4;jgd3i4Y zv jE&AOΠjgp zv4U |2MU;ev^TJ/XjgLS~w͠+lo[a{3[a{ڛAV fп7]eloʠ?0 ͠E7ejo4ۛۛ2fLS`ڛ"0g9sJaΠ0gK`3%0yÜA/aΠ0gR sy.0gs4漨0gLSa`œ2M9e s4 na`œ2M9e s4 a΃ s=M9|0gd3i24 z? s2aΠ&ed3iv?jRd~A I  "I`wQI  f3'=&=1эA>v3'IϋJzIϠl&=~ f3'=|}gLSI`f>Ĥ}I`f>Ĥ}I`f>ĤgLSI`f>ĤgLSIϋJz4 i*4 zLzygc3'=<^p3Nz.IϠǤgw5NzyWgɝ 5>%=/*kr'=eJz4 i*,T3ȗNz4m i*,T3Xg/GgLSI _:4jgc3OW;|jgc3Xjgc3OW;|jgd3i4Y zi?%=几g BӁIϠO&=/*,ϧ|*yQI`y> SIϠO&=&A'84 zLz=M&=&A'84g3Nz=M&=e4 i8Aw :88t1Рx^,!@7q 48#cA :hЉF@MccS tlؾSxl_>c ?{;ؾwl>}i?vO1oP1oP11oP1бoT =777?ۯ): *: *zL11oP11oP11oP11oP11oP11oP11oP11oP11oP11oP11oP1cyyqqc c ǸA@ǸA@1n11n1cyyg[IIII=77777w@ǸA?뱱B瘂cQ~3wcG[bslOc ~bc~-6~)6q ~q ~7bc c c771EƸA?ǸA?ǼA?ǼAƼA?ǸA@/*D dA1 whp?$ r^T 4ȵ:^ 7 dA1~4 z=M@/*4 z=M@cd 4i2zQ1`f6F<2pc437F<4pc4 Ѡl6FL ō /_ ō`bh/T h?>gD9#䁉3AwF4' lgD<"rF2A>Έ ^LSыvF4XYlgDef4듭lVLS٬`22MeD<@qF4XlVLS٬`22MeDh?6ȱ=6϶O϶1~mql}-Cmm3l['l㤟my{3op~},~ qk~ "N78D3nqϸAI? ⤟q~ "N78g ⤟mdc{3op~ qұ=N7I?8g'㤟y{3op~ qϼ=N7I?8g'㤟y{3op~ "N78gg c{3nqϸAI?'l;[DtlgUgqgϼH? ;yg{3ϼ=@7Xށg`{y{3oϼ=@:H?g D3nϸAH; <-D`(hl~~]}ݪ}q'l;SlRl;R㤟m'jmjmjmimG'l;NlMD3nq϶ϸAI?'lHD3nqұ?1g ⤟q~ qϼ=7I?}?p@{!?STvkT4Gr>$}G?p@{]?p;|i?Dhp>4}i?DvDhp>,,>,?Ls>,?Ls>,42ͽ?Ls>,42ͽ?Ls>,4( =MGz>4}i?p |i?ܣ=MGz>4}>4}XGnw42ͽ?ܣ4COчoчICVP ч"пTDnךC`"p| TDz>4Q чe{Eaf@E4>0P}X>0P}XWDiчl !1W;J>j%FzCH>} !1J>} !1lhO>-42=142=1LsO>,4CU(1LsO>,42ч&=M$Fݷ=M$FzH>4}iч| COgod>4FCO})4F E!o }+G@ч٢??۽R>͋Rg/>y2ͽRxQч|gC}S!oH}7>䓭C>> ?O2ͽ?,O4˓2=L>{azXGiч1QazXǸiчe{!M}i?Dhp>4}i?D!M}i?DCOчe{aD4G'C}ȋGݲ?S^GE!/r ч~x}ȳGݲ42`ʃ2`ʃ2чe{afʃ2ͽ?42ͽ?DC=?:COч<{P!}>[LGz>EyC`W!_?D4G42="Ls>,rTчe{Da{XGiч|9hp>,rTч&Sч|ɤhpO>O%F%>$Fz}H> ч^>$FSч&=M$FzH۟H>,?aO>,?|"1'FsO|ч|"1Db!}i"1DbCOчOPbhO&=M$FzHl2=1L v2VOD{>tq҇n!N>I8C '}$?8C^(NUHI)s}chol1=|c`(kzL}ccPS@hl_Gؾѱ}cB)\: * 4 * 4 *\: *\:dy åc åc å...g$åc åc åc åc åc1KǼAKǼAK)\: *\: *\: *\: *\: *\: *\: *\: *\: *\: *\: *\zL1oP1oP1n1nұqd¥c åc åcy t@1:Ts?O:L39ߏfs4ci:L3yL1o|fsTcޠcޠc c c k:α6fl/c)9[g1c~sLα%F8[Tkl:~5iWZc~kpZc77~5 29 29eqpqp1n1n1oP1oPE1oP1oޠA.ԁ~H rt @E:AՁ~=x~f ׃AցΠ@gdAOΠ@gd3o=M:& iS:e t^T3X@gLS΋ t4 i*,T3X@gLS`2M:e t4 i*,To AOΠ@gd3i2ܯꂞ& z t=M:&.i2,T3_4 i*y _;O̠Be㮠Be^T,3臗̠^22Xfp; e,3i24˼XfLS`f{x˼Xf'e4 i*,T,3XӦۓ4 zlpyg^ zlp=M68&A4 zlp=M68ejp4 iyQ `2M58ejp4 i}2M58ejp4 ilp=M68&Av3i4 zlp=M68|gd3i4wu"eD4wM;A AVX~+wyzg1w^T//3U * b\ rnp^T3A g'$np O78|> L ΋jp|krA>np^,ϧ2|,T*XgLS ΋ R4 i䉉2Me R4 i䱉AO ΠgdAO Πgd3ȓ78&AO Πgd3Xgdf2A fw5lؙ͠Of6/)'3A?lyWEe6~> x ;,Tf3Xپ0y}af3Xپ0y}af3Xپ0,Tf3Xپ0,Tf22Me6el=Mf6&3A/8y͠f lyf 3AO j ƙ _;yg6&3 ռ32Me6e 4 i*Og6e 4R i*,Tf3l^Tf3Xf?<߲>f6|fټf8 =3A͠fcf2A͠f8 3AO͠fdfOglpOH ` ?: e, W `yx zlp=M68Op3i4 zlp=M68Op3iyQ  zlp4U i,,@78/ 2Vd3A 5@gе9:n2tΠ t^T3!A :@g_?:Վ۟ͱ}d:?cZS@hl>Ա}W:91otlؾc ty y y ty tcޠ@7@7@1ƸA:ǸA:ǸA:@7@7@7@7@~vL1oP1oPc ty ty ty ty ty ty ty ty ty ty ty tSsTsTsdsdsl?<@1Trl?C:/0|9g˱hs ×c|V50|9gc _y{+×cޠ—+=ZcxAʠzec2ߣ>+e}W{`Y2M+iOٲ{ٲ{ϖ~Pϖˋ^س{cep{v/~ٽ ~AOˠed򢺗2Meg򢺗Ar{,T2XeLS`Ats޹2_gWlg T6~ڼfoQ q`k3ǁ͠6|fЏ[A?lm8,Tk3XfLS͋jm4 i,Tk3XfLS _,Tk3XfL}ek3Xfdk3iu6&[AO͠fdk3wnm=M6揪AO}Q1XcL}Q1i %\ ^• /&\ Jd3W:q*A +Aq^T3O8t3O8⼨g d3!ΠO8czl{۟ٱ}cX)h4Ouldlؾc +7:Otl_18ǼAƼAƼA8ǼA81oP!_csdsdFc Cc Cc CcdsdsdsdsdslP;7718ǼA8ǼA8ǼA8ǼA8ǼA8ǼA8ǼA8ǼA8ǼA8ǼA8ǼA8)9 *9 *9 29 29Eq qS󘂚c)yLA1nA1nA1nAͱrgPsgPsl<7_29 *yLA1oPA1oPA1nA1nA1nAc'*ccsloU)99*c ~AuL!αA8멱xjl:~95NwSc~kk qq qq qq qRc Cc Cc771ƸA8ǸA8ǼA8ǼAuƼA8ǼA8ǸA8YT3t3!:ϢܢC rq9H8A8YTt3_9L8&CAO!΋ q=M8&CAO!~P4 z q^T3XY>0 i*yQ!`B2M8/*,T3XgLS!`B2M8e q4 i*,T3Xg_-)~  1.܏|` Ab\0"q  ~wh&AOqɸEe4 i*.ie 4 i*.,T\0Ԏ 4Ӧ2.*lfa, 5 /Y '7 ~, )c0觌 _~YSfaO2M5 ej4, iYxQ`2M5 ej4, iY[Q7 ej4, il4, zl=M6 |ifad0iY4, zl faQ.ii2M5ek32M52h/A V;hj |caпl /_6AN8hUS AOgA_t0Y`o[$g |% Y‹ 9K񇳄% Ar0|%X$g eAr0XOLSY` i*K,T0g e 4 i*K,T0Sg &AOY ,Ee &AOY ,a=Mf &AOY ,aLSY `,`g.A/<e~Y dy0G -<ee*4//.,4//.,4e*4e*^Ty0X`LS`dy0ȳ/<4Y " /r\ ֺ<4Y " /r\ ֺ<,TyʃA<,T[*XTLS`ʃAty0XTLSm`ʃ2M|Ee*`cy0聱<J|`cy0聱<X`cy0聱<J|Q`dy0i<4Y PӁ` ?X ʃj:<EUy0X^/<,ʃ<4Y z,ye`dy0i<4Y z,ye`dyʃA^<4Y il,Ty0XYf.^, 3eb`3f k1Kt_Y  [b0X~Bzc0% :,a7f 1K0~,ؾڱ}cS=O}c^zl_>cŒSۇ:0cJ?azLY1oPY1oPY1oPY1oPY±#sTט%%%<,7,7,7,?#%%%%%/Ԏ)K8 *K8 *KxLY1oPY1oPY1oPY1oPY1oPY1oPY1oPY1oPY1oPY1oPY1oPYcyyqq7L1eǸAfǸAfǸAfc1n1nrd&pd&2c`>L7L1eǼAeǼAeǸAfǸAfǸAf3cq~y8GOca1eñdlwϝ)86L~4_:Nǔ Ϝ+ilq`1n1n1nid&pd&plj  <7L7L7L7՘7L7L7L`pa r9Dg S[t&2Aљ L`p?;L`pa r L`d&0i2xQL`d&0i2ܯ&3AO432Me/*,T&0XLEee4 i*,T&0XL`LS`22MeI`U$ =Ϟ_p? {~A/=x5[op󻨞_plAO=ɞ_d/iwQ=`$2MeV=`z~2Me4 {~2MeKLoB^T&0w?b&2Ar&0臗^f~x e3A?2,T&0XL`LS`2 i*,T&0XL`LS`2A)w&0XL`LS`f6L`LSL`d&0ȗ=Mf&3AOL`/ z=ゞO6x?^T9.Xپ3,T94 z l* _f!l7^ O+Yny_3Ar&0ByL`/ 3A>+83 Yq&0|V XgeYq&0XNLS`2U i*,T&0ge4U i*,T&0Sg&3AOLEe&3AOL`=Mf&3AOL`LS`\ _%*%2/c~X )c 0觌% _ce*4w/&,4w/&,4we*4we*^T 0X`LS%`d c z}yW_w5ye_r z}yW_w5ye_r ˱ye_LS9`rN2Meұ`rN2M圂e4 Ec zL=& }Aτɾ _:' }AτɾgdE% }Av/ȗNɾd_dbgbg/X~cU`!*_'|A9u/XOUT||,ϧ|AOU*_/] z=MV&|AOU _4Y廨*_/] z4 i,,.W.|2r/Xi:tyɾSL]b/}AWИ d_%4&.*t ɾshLDc/D'cAc`lؾ`l_>cʷ3|Gۡ _:&CAOn?ϊϊ`g<+RE0臗^V|`<ëEUUE0X`dE0i"W&+AOɊ`dE07=MV/"W&+2͒ sE0XYay`L\ iz+AØ `=,&.b11tlL VlL ^Tb0@A'ژ :`p;{ll[g۠?>+><R|m˟mmKl؞lg^m#l؞l[g۟[mlgzg6y{b3opO ~}~ kH ~ "17؞Db3nϸA$?'qH ~ "17g mjc{b3opO ~ =17'?gy{b3opO ~ ϼ=17'?gy{b3opO ~ "18>F3?ktg|ޑl@;}c{3>H}ϸA>Do|A}mmvmm?mc{glO}6tg۹g۵g۱؞l;ll;lltg }qH} "v7tg }m=qH} "7>D3opO} ϼ=7>t5{!tR!צt_pO={)p;"DvSvDS!ǧt졧tCO龇&}=MnA=MzH'4G}4t_pO=,}4t_pO=,}4t2=LsO=,}=?{vn{"p;y8 =n{<[zCCQT/z=4{i"D_p=,S42=o{Xie{a{ =,|4_p=,|42iMmz(>4Q|iD!_D0M=Mz(>4Q|iLo^|XiE^|XiE^|XiEe{a^|X򕹊4"2ͽL5Ee{𡧉"COE|"COE&=Mz(>]z(>4CO5gO=,So4_~2=܋42 @-Z60@-$PK!o |聡%CK!Z"Cg/*>䑃YEܭ[FE^|ݪpk[=nU ݪLs/>,܋Ls/>,܋Ls/>,܋y"2ͽLs<,܋4"CCD𡧉"COE^|iD𡧉"CLD𡧉"COE&4iE&r]D.0r"s]~ |G~ |ȷ>wie re re r4\2e{.0ie{.𡧉\COAr=MfIYR.!o? |>4 |s:NP.!_+Ls\C`W.a枒zXie{.!b*LsOI=,SR4\2=o1 ̨K@𡗀"C/E|i"CQ𡗀"C/E^R(>lz(>z(>4{X~{/>{X~ѿ凰GDODͩGs|ѿe{COѿ|sCOѿ&=MDz=K}EzCW2Rafx)&=,,1/Ei4[ EW*:`"C'P|RCcj("CP|TC~}93}l!3O>b;?_#?j>elؾc۷=O{l_1ǼAUsƼAUsƼAǼA1oP_c-pd-pd-9c kc kc kcle-pd-pd-pd-pd-pl?^;lP1oPnjcޠjcޠjcޠjcޠjcޠjcޠjcޠjcޠjcޠjcޠjcޠjT U U Yυe1\Xsao?7get&~TɲMl7 7 w|Ga~'1эWc~#qLտ?'c~sLտOVhgl9~3:c7777~3 7 7qqSjdodoToTjToToT r1n.l r8nl v_p r@n9!EWdn=M&}AO~4 zl];`f`s/XvEej4v_LS`}2Mej=?R;%ȗ%=M KoL z _84绨<_d/_W=M.*4 z=M&|AOy4i*,T|As/X.ELSy`|2M|<_LSy`| i*,T/X<_LSy ߵ84 z=M&|Auq/i2wQy _84 z=M&|2Ϳm4 i*wQy`|2M.*,T/X<_LSy`|2 .*,T/X<_L5y`|AOy<_f z=M&|AOy _:4 z=M&|UO i,T=|2MӂeAXİ_=M.i2~A8)~AOaɰ_G-~A޼8Ջ~?g:~ ryEܭ~=QnXv_LSa`~ i*,T82Me y_LS}`82Me y_d/i24_d/i24 aɰ_d/i24 imۦAWa/|v_O} }A?elSv_o 02e }we }we }2Me }2M.,T/Xv_d/i2 v^|A%9Y|A~8 v^|A^9Ŝ|A~8 ve]T/4} i*,T/X<_o1 i*,T)X<_LSy]Td/'#{A> ^ #{A> ^oٻ^5Vڑ^W z=T |ۇ1 |j,OH^~xY |, |U W` |AO_/G] z,=M& |AO ۻ4Y໨_] z,4K`fic{{e%_L4\ iL |AX : _Q(Btd,(c |A7X R_Н2wLcؾ}c>cۇ>|l|W> |c}cn3{lol>}TUUUU?jǼA~1n1ncیq,q,q,1n1n1n1n |cޠ |cޠ |TUUUUUUUUUUU;ߘ7ߘ7߯7[1;7}1>Gc|X;Z#7爵(skyc kyc kyT/˴.alۯ7Gd-ol?oe7Z~'3ɌW2TodBfl?YYYYbƸAƸA{1n1ncKqqyyUU;Zޘ7Zޯb\ r3]T-/ٸp\ VY W,Aǵ~`\ rB ?\ z=M&ky 'i4Y˻Z^L|,T-jy2Me]T-/XZ^LS`jyAOo ^pmQAn4_A ^p( xAA~Q[ ^x z} =M&xAOA ^d/i24Ļ ^Lox2M.*[6e*A4Ui*,T/m4 i*wQA`x2Me 4 ՇxAOA ^d/i2K&x MxAOA ^d/i2,T/XMSA`x i*,Tx2Me 4 i*,l bx2Me 4_ i*4 z A ^d/i24 =xAOA ^d/i2wQձ`fkЏc2MU.*,Tu,X נ_,)dJ/?!5Szۨ ) )G͔^УfJ A [ TA _; r}y E\x=ĻXקLLSA`x i*,T32Me y ^LSљ`32Me y ^d/i24Ļ ^d/i24 `A ^d/i24 i*,ۦL=Y 0ĵ ߾,]T-/ȷ#Z^Џ kyAakyA?4./.4./.4e4e]T-/XZ^LSZ^ d/A _:,ϧx|*wQA`y> i*4 z rA ^d/i24 򽽃xAOA A ^Lt iTxAw/XYzU4KA`fV4] :Z^&8t‰Cbc-/kyU 'Z^E1n+ H?tl>}lۧ~L?c>cS-ol3|l1muۧ=/jycޠ6cޠ6cޠjycޠjycG7Zޯ7 7 wLu1n1n1n1~7 7 7 7 7S-oT-oT-jycޠjycޠjycޠjycޠjycޠjycޠjycޠjycޠjycޠjycޠjycޠjyTUU5V1wanwV1>Snwc|X~y9ջ1>ލq?{ㄱrjyce~6%3cU1Zfl?oe7vZ~'3ɌW2TodBfT-XYYYbƸAƸA1n1ncKqqyy 7 7 7 *û^p/iܯOQ܇{xJ*ȉH WRA~V䧕{xAO=^p? z=M.,,]4û^LS=`zx i,T/iQ. %Apzxhp?( {xA=~P+ ^{x$>.4 z=M&{xAO=^d/iwQ=`B2Me=`B2M e4 {x2Me]T/X^LS=`zx2M|^d/i4 z=E|^d/i4 z4 imۦ^LS=`zx2M.,T/X^LS=`fk{x2M.,T/X^L5=`zxAO=^eQvYd1eȚTLZU{Upyy/Nd/i4 zyB^d/i4 zὨtXL} Q:,Xt؋4 iA>,l~dI/ .K .ym%^lKzA^^pI/+4.{xAzxA^Fp/c8ܐ{xAn= /zxAn=]v rCὨ^'4 i*+,T/Xދ4 iee4 i,T/k =M&{xAO= z=M&{xA^p/i4 z=Me4i)d/诓= /{xuzxA#^Џ {xA?y1= /#^Lދe^l?,l?,T/X~pa/Xދ4 i4{zxAYr/ȣy&{xAYr/;Kyk= zy= ӹm ! z}&AE{Q=^d/i4 z=M&{xAO= i*,T/X^ge4i*,T/X^f4 izx2Me4 i{xAO=^d/iQ{xAO= <=^d/i4 i,T/XMS= i,TEe4 i,l izx2Me4 e=M&{xA˺4 z=M&{xAк4 z=M&{x/*,l~ i*zx2Mec{x/A4 &cyA^gp,/cy/g /58X^ cy^'pὨ^ = g^3q//Ὠ^3q/xe&Ὠ^'4 i*+,T/Xދ4 iee4 i,T/k =M&{xAO= z=M&{xA^p/i4 z=Me4 i)d,/ o8ѼcyA彨X^G|2X^'cyAq,/[#|2,l?0b`fO54O54 ij i*by2Me=Md,/ MynX^d,/ Myɱ 8乽cyAO 9];RX^4i*乽cy2Mee>4 i*cy2Mee>=]+N3y ]/&^l}L d.GɻA&ow[)k7v_<﵌Z[-)k7mYF}evcm2}eTטoqڍqڍݗWƸAfƸAfS#jdndnTnTjTnTnTntE:N7q}+AՏ ˎ[IA~ [8];tA8No%8݋q8]dEe q`\yQq`t2M^T.X8]GU{c.x q '^Hg>   z}{!A8N,=M邞&tAOq8]d.i2N4 z=M^T.X*CLSq`tAj9N,T!X*CLSq`tAo9N,T.X8݋4 i*N,T.X8]G =M邞&tAOq8]=M^T.ȓ邞&tAOq8]d.X8]LSq`6?m4 i*Nt2Me4 iA4{Qq`t2Mect2M邞&tAOq O^ z=M邞&tAOq ` z=M邞&tAOq iA? n4Zt2Mect/A4 ei?܋q`.ț y7 o'?yġ  `ˡ t(.ȫ&ŽP\O₼oP\'yP\7 i*,T(.XP܋J4 i*Cq2M%UeJ4 i*E₞&CqAOP܋ =M₞&CqAO //84 z =M₞&Cq2Me 4i2P\78 ܡ_^T(.cr~> d(.瓡 op8 ~> ij{TP\LTP܋ee 4O5 4{Q`Bq2M₞ey|bE.țGy\лeE.țGy) ~"+rA /Ź"8Wey\LO*rA",TI)XRLS`*rAb" nAJ[/nA,y^[['nA?,yۋ*|QnAOE %Z:nu!^lC,#Ȣۋ*#Ȣ[/.#[<*[LSE`nAOEɢ[.=M݂&nAOEɢ[G.=M^T-#k݂&n2͒Dr-XYH.yd[L\t i:n2Hr-X[Б$ނN 1byq-soA{{Q{W̽]bEނ[+ނ_1?7ve>>.|v=$]y]|v=]ow]Og1v>ϮwϮSp]Og?g1v>gSpyw -3oNg RpqHϸA>D 3'#7g RpqH} "ϻSpyw 3oN)ϼ;7x>gSpyw 3oN} )ϼ;7x>؝Sp!g'roqȽ}v]zDmν}}"g'ro]W?>{Dg roq=d>ݙϮ]w>|vl=0vg>}vjue}=+}vgue}veu3oĽLg 2qq} "uE3nϸAd>n| "7L]kD&3nϼ;7xW>Lg߲;7xg^S&.xgU&n`ekG-^׎]V&nvo2qV+x];Z;2q|Q&nvȷeGEOEO[,,o@-i9oiw!e4L\-iޙxueG j-zh-^7Qy\n-^gME2!i!xDYIr&rME7!i!r&r&r&r&r&r&r&r&r!Xy4bݐ[qrew!Xy7wCnLn-i 21 2ͻ!i!i!\5=M4=M4=M4=M4yܢܢܢܢ\f-iA?wCnL!Xy74Ǡ!lА[,lА[,Ӽrecr8!j-ƒr܋j-֊rv_Sm5v_oASm?zXe/w@+ 6v_?ƼAU~U1nU1nU1nU^7*7*}cdmd1őƸAVƸAVƼAUƼAEƼAUƼeUe^ ;ZCׂu^ qF:tx-k:No*Z;k/ׂ&kAO^ iׂe M8,TxEׂ? c}$ׂ&kIGZ>6 ޗM1,TxEׂe 4Ǡ^l^ iA 4^ iAXY>9ɶcA97ǂh&nyͱ bcA^ps,CF7ǂɂ Y1HdA<;H YO6dA? y'A D8H YL< ً!ɂe!^l?1H,l?1H,T,X~b,X ً 4$ i*HE&]+ zԬyǵU+ zԬy͵ Vm ʂY d9,w9,XOÂe*rXLS`aAO尠rXGE.=MÂ&aAO尠rXGE.=M^T9,SLÂ&a2Rq9,XY":.yrXLt\ ia2q9,XrXM0twq3?b,`A7{Qq`)tqSH88_f~ggkc<7m~:1Gc~3xLq; *6v?)6 *V2 *V2 *6 *6vEy`c `c `)V2 26 26 26dƸAƸAƸAƸAU)6 *6 *`c߲AƼASlTlTlTlTlTlTlTlT1~11c|q/|q/|q/|?b}nY|q|=78ߓqcGc|쾚1vxLq}-c쾕1v_xLq|}#c쾐1vc<8}cTgTQqq7(ƸAƸASgdldlTlTgTlTlT ޗXV/ B<p+iy׋j|}-$7&_ZHgn|=M6&_AOWd+i4 zl|=M6&_AO׋j|42M5ej|ypWLS,/4 i_2Mr{LS_2M5ej|4 ii_AOWd+i乂_AO ˍWd+i4 i,T+XWLS`6M5ej|4 i,T+X>,TE5ej|4 iAl|4 zl|=M6Wd+i4 zl|yWd+i4 zl|WL} b+X׋j|4 iA?j|>,l i,l iM`Am`A^q, yuU F`A޵p,SF1,Tp+i24 Vdp+i24 Vdp+i24zQ`fVLS i*,l~z} bp+X>1,Tp+X>1,Tp+X>=ĩ?ө O̝ ҩ o8?TWW8 ҩ / BS]A^r+XTWLSխi*,T+ueV4խi*,T+ȳu&S]AOT׋Ju=M&S]AO o84 zLu=M&S]2MeJu4zQ`2Mv~zQ O cEuWO6;^A?xd% wWO6;^2;^/ iz<ĎWL<ĎWLS`fy`:^/,T+X׃?d,XLXw\{QNX\ `AXd,i2[A| n1I[Ag8?9V  ~[/*,_[(,_[A:SVVO[A?e n1SVVLS`[/*,Tp+XVdp+i2Ɍ[AOVdp+i2Ɍ[AO dVLe i[A:,,e3VLf i*,l6^,&njdaS+* Z/tM@ljVЅ 6nt%MyWW2voo:/c~O|L_8vߢñzLoc5v?Wc;Tcmݏ&8v?Pcz[cޠƼA'1ƼAHƼAHƼAƼA1oP_cokdokdo1HƸAƸAƸA^7777}#1ƼAƼASokTokTokTokTokTokTokTokTokTokTokdSk?6+dcZc5Zc5v_M15v{Zc]15 5 5Ư-Bcjq'co,Sk쾈4vV/+wSk?ު5vS)ݷSk쾣0 *3 klqqlqqlqqlqwƸAƸAc [\c [\)3 5 5 5 *3 5 5 knqA~+ 74d+xW<- )nqCA7仃[\PonqX" zlq=M^T+XY* iZ\2MO ~ =x[} O zlq$AZ\d4x' <-wWO䡀[\AO-Wd+i4 zlq=M&[\AO-Wd+iZ\2MzLS-`Z\A\,/4K2MejqyWLS,/WLS,T+XWLS-  zlq=M&[\AO-  zlqW'\nq=M&[\AO-WLS-`Z\2Mejq4 iml iAlq4 iAlq4zQ-`Z\2Mec[\2M&[\AO- o4 zlq=M&[\A14 zlq=M&[\/,l iZ\2MecЏZ\/Alq4 ejq4 ejq4 e4 - o׋jqyy- p[\A_%JW9 i2MejqyWLS`2MejqyWd+i4zQ-Wd+i-&[\AO-Wd+XWLS-`Z\/,T+i2kP׋ uyLPWЯ-C]/* U+Ǟ{PW78 g+X~XbC]2C]/ i i*,l?,1,TEef=?/G o;m'WϮϮs[]g;g<ݹϼ3om}v=Icwn3oΔ| ޙϼ;7x>?km} "7؝)Dn3nϸA>{9r[qm} "7g r[]gs[cwn3om} ޹ϼ;7x>gs[ywn3om} ޹אd>٬;f}Ylg}@63 u3 u3 7lg Yq?G?`?g}3Hq}vI캒0v>!}vG캎uaNq})Ϯ]>!)ϼ;u3oNqg R\qHq} "u7g R\]gqHq} "5vwv>D3oNq} }ϼ;7x>ZR,VnkJ1ubr[-Em-Gnk_E(xY䛀r[6֢֢Vm-i4Vm-i޹:Z.γDѻEnklS[&< Pn+xc"ZnZ.,P@EOEOEOEOEOEOEOEOEOEOEOEOZ,Ӽ|Lm-i޹E\)Xyb{ewnkLm- K243޹243_,Ӽs[ewnk m-zm-zm-zm-zm-`AEOZ r[&r[&r[&r[&r[ewnkLm-i޹2;Xy4iAm-iAm-i޹212;s[ewnkLm-iAm-i޹EOEOE PnkDnkDnkDnkDnkgm-zm-zm-zm-zmbfb ޹2;X>ܹ`s4Ǡ;Xy4 4bfb _f 1HE*6J|-_eWN|-_"EZ䱨_p"(K"J|-iމ]X,Ӽ_ewk'J|-iՊ2ͻZXy'4"Z4Z4Z4 މEOEOEOElPkDkDkDkDkLN|-iމ2;_ewkDkCTZ*^~P E^ Qk|wkO6*^~QZUy1DE?٨x-FkL05vc1<}Wk0v`/0<ט7}yaTטǸAƸAS1n1n] u+x߇x}U'ȗՅ}U'Wօ_[U q|] Wu|]z<.|=M& _/,,O _2M^T+i{ާA}[GW _A}%c^T+x(A8 iރe=Wd+i24 z=M濂&_AOWd+i24zQ`~=XWLS O iރe`_2M忂1,lz} b+X>1,T+X>1,TE忂e4 iA4 z=M濂5Wd+i24 zyWd+i24 zWL} b+X׋4 iA>=L忂e4 iA4 iA4zQ`_2=;9_4m_ԟX >cArrE%ǂkXQ ɱ b rcA^rr,ȋ)Ny-ɱi*9,Tr,N4i*,Tr,XX zL=M&ǂ&c/*94 zL=M&ǂXdr,i294 zL4 i*9,TrE%ǂeJ4i29cA^pr,藏ɱ 򦊓cA|Ld39X7U 򦊓cA?Ld39,l?19,l?19byɱ`fyɱ`c2c2M^Tr,X׃>؃R#k|SƒY,__{>G3SYUy?U=G3Uy?A^U|r~(,T+Xٞ2e)S.X׃ =M&w/*x4 z =M&wAOl]Gֽ؞f낼l]=l]wyBu/*[,!e<&wEe낼\l]yl]l] uA?+0[,T.ge4{Qٺ`u2Me낞&uAOٺ | z=Mf낞&uAOٺ " zֽl]g=Mfe%l]L ,ٺ`f19[,,E&ge%l]LSٺ`f)39[,T :ml]q#f^T.uAW{QٺC[b.u;;Gcz W`_ؽŏocMv~b;@:v?crwc8v7ݘ771oPocrwcޠCcޠCcޠrwcޠrwcƼA~1n1nǔ/=ݘ7ݘ7cݍyݍyݍyݍyݍyݍyݍyݍyݍqLӍ70Mtc|]4_~c|64>0M7v_A4MƸAƸA;TcO1 c37vߡ{L)2Lݗl;6)7;SxczcJ}fY3v_yL)1oP)&ʘ7ޯ17 27 27 27vqLqLc Sxc Sx)5 27 27 *7 *5 *7 *7 :+^) SxO/SxOSxALKN;/;byҝ zL=M^T /XYxeJὨރKA}Ee%w]kwA}%S^T.x+AvnY i*,TA&kwAO]d.iv4Y z=M&kwAO݋4yiv,T.C-e<4yiv,T.-e<4yxQ`22Mee4U ]d.iv4Y ˵݋y]d.iv4Y z4U iv,T.X>v,lX{} b.X>v,T.X>v,TEe4U iA4U z=M4]d.iv4Y zyL]d.iv4Y zݽ]L} b.X݋4U iAݽ>?e4U iv,lX iv,lX ivjw2Me=M&kwA^x} ?eA;FzAL12Ǡ_4m~4m~|~?3+GiA?L`;} /չ:_T1țu;y} oacww AuqEe;4w ʋ2Me4w i&AO}Ǡɾ;=M&AO} c4w z;=M&2Me;4w|Q}`2M&/*,~m|Q f9kc?3,cЏ=A??4K?4K?~Xb1X~Xb1X,]LcLSY z\ wO{xܔ bS2WM /V)J7%d/AuSE5%~xٔ ί2),T'/X^6%ejJ4Ք zlJ=M6%&/)4ٔ zlJ=M6%&AOe ] eY ޛːApA?˾;y} /A^s1ȋ!;ycH;x^2(c8 9 xԛ>1oP1oP1oPe1oPeȱ? cޠʐ27272c*q,Cq,Cq,C,Cq,Cq,Cq,Cq,CWSrTrT1!ǼA!ǼA!ǼA!ǼA!ǼA!ǼA!ǼA!ǼA!ǸA!;c|]X|Le1.,Cuar ːcM1>,Cdr$8geȱM!l 9 9 9v߾YY|Le1oqoC1v_Xo=}{b[bmr3v_yL1nɱB}1&Ǹ%&YY|L 1n1n1oP1oP1oP1oP1nе _Q&Y {|Y] {|e] em2x_ wm2ȇܵ}'ܵ˓ddm2i6j2Ļ6,Tm` M5X}Gej,>CУf2x_ D)ʠGej䱄S/*EU & i,T%XLQ=M(&SAO)ʠed2i2E4 zLQ=M(&SAO) i,T2XehNQ4` i,T2XeiNQ4` iR2M5Xej4 i*EцSAO)ʠed2i2EASAO) )ʠed2i2E4 i*E,T%X>1EbeL} b1)`feLS)`feLS) i*E,T2X>1E,T2i2E4 SAO)ʠed2i2E乼SAO)ʠed2i2ER21)`R/*E,T2X>1EbeL} Q2XeL} b2XeL} b2XJQ4 i*E4 zLQy)ʠed2i2ERAO)ʠed2MNQ=M(eJQ4 (eJQeLo6ͿqWgAg"8}/3up gygÙ/*U 3pfW ÙA^q8Ee g4 Ù2MejA4 i*=3&ÙAO̠p g=M3&ÙAO 94 z g=M3&Ù2M3e g4|Q`™2M3e?m?eLnfԜ JnyA͠+Ln]ar3X+,&7~`rE%7~`r37 &7e,&7e,&7_l?f1,l?f1,T/X~br3XjߋJn>\w o:m''7~ br3ۥNny o;9f|Q͠^&7eJn4fLS`f{x i*,Tr3i24 zLnfdr3i24 zLn>Ȫf'jyvmԬjyU / AeUEU5fw\\|QU oW5HfGbjxV5^cU3i,,6W5e%fj4KU`fɷ,iTU3XfLd\ ibIj]PcU3/tʏU͠c~jؒjyU͠~j’j%_QŏO}f~{ߝ7(c8 9 xԛ>1oPմ1oPմ1oPU1oPUͱ? cޠƪ7Ȫ7ȪcqjqjqjjqjqjqjqjSUsT5mTU1U5ǼAU5ǼAU5ǼAU5ǼAU5ǼAU5ǼAU5ǼAU5ǸA1cX|Lu1~}cSscu1>cccs 8gu̱119 9 9v_YY|Lu1o7XYIݷ$ƸAV5~9vY/vSUsdUs쾳3v_yLU1nUͱ}e쾬󘪚cޠc51oPU_cUsdUsdUsdUs>YYǸAV5ǸAV5SkodUsdUsTUsTcoTUsTU\ Us93xy}K'ȗ}K'W a3- c3|] ޷t|]|<.g=M3&˙/,,O˙AOq}cEUaw8f10ZBлe3xߘ `q̠w8f1/*~  i ,T&X8惌c=M1&AOq̠8fd3i24 zc=M1&AOq i ,T3X8fac4U i ,T3X8fbc4U i 2MUae4 i* AOq̠8fd3i2yAOq q̠8fd3i24 i*,T&X>1b8fL} b1q`f8fLSq`f8fLSq i*,T3X>1,T3i24 AOq̠8fd3i2䱼AOq̠8fd3i221q`/*,T3X>1b8fL} Q3X8fL} b3X8fL} b3X8拊c4 i*4 zcyq̠8fd3i2AOq̠8fd3Mc=M1ec4 1ec8fLSq`/*,ii|#jny A^gts3dnny /RF77f R/*,Ts3Xfw{ i*,T *XfLS o4 zln=M67_Ts3i4 zlny͠fds3i4 i,Ts3Xjn4 i,Ts3XS ryAΠe r g [ g29179 g& rn gL gL g09eg09ej4` r4{QA9 kA|u؞l:Zg7Z]|Q oV:~ZgO6kA^vE:~Y i,lO6k2M ef3XZgLSΠZgd3ijAOΠZgd3i2䩗/1?g5d3;.c8f7 /*@18f  Hq̠w8fлe32,T3#11ec4|Qq`2M1&AOq  zc=M1&AOq  zc8fGbc=M1e%8fL q`fI9,,61e%8fLSq`f9,TRds :8f)41_T3"A7|! q̠|ccGoc7vv__eO} g~[7>v?csv?-|TlmTlmTTsTTsOɘ719 29 2bkc c c coc c c c cE[[{LQ1oPQ1oPQ1oPQ1oPQ1olPQ51d11ds c|VYas c1>+c[<>+cYasdsds 5 29 2c`yfsTsdTs0vtv=}feTs|3v߽yLQ1nQͱ}1E5Ǹ]F5+7ccjyjݷEƼAE5Q1nQ1nQ1nQͱ ~dTsdTs>|L1nQ1nQ1oPQ1oP1oPQ1oPQ_sT3WQ}AF5e _:G5e _=G5| ޗi| AvT3x_ YvT4;4 zjfL<Վj>nfL1Bd73x_yQ}c!i/yvnfd73x_  2Mf4 i*,T7Av3&AO̠nfd73i4 zf=Mv3&AO̠nf4 i,T73.w3e 4 i,T73ȓ/w3e 4yQ`12Mcef4 ̠nfd73i4 H̠nfy.nfd73i4 zf4 i*,l|} b73X>bnfL} b73XnfL} b73Xnf4 i,l i4 zfy̠nfd73i4 ̠nfd73i4|Q`fnfLS i,l|} Q73X>,T73X>,T73X>,T7Eu3ef4 zf=Mv3enfd73i4|Q̠nfd73i'w3&2Mu3ec2Mu3_T73XnfLS i,gOg| ϸA&?eqM~ "67gobqM~ "67g b]7g/ecwl3oM~ ޱϼ;67x&?gDP3]r%?g|캖%?g׍< (uS3>J~AϸA%?7g cwP3o'#(7x!?{| !ϮCϮ3ڟwv]sC]7O>vudQ~#DuGgMϮ&cw3oQ~v]C!D7g BqQ~vO "D7gg BqQϸA(?Cyw3oQ~ *6WF b=E'Xy$=M$=M$ye@=EO=EO=EO=EO=Eث'i'i'i'i'{eczewO2x$4bfd} {ecݓ\,Ӽ{ecݓ\,Ӽ{eczewO2x$4bݓ\4ѓ\4ѓ\%$=M$=M$=M$wOrDOrDOrDOrԓ\4ѓ\,Ӽ{eczewO2x$4bݓ =2ͻ'Xy$4 $4 &ymME^\Sl2x&ywME&Slr(\M &yREޤTlr\&^bewhLnb2Z,Ӽ[FewlrLM.b&b&b&b;6i"6i"6i"6țgM.zM.zM.zM.zM.iޱ2;6Xy&wlrLM.iޱ2;6Xy&444Q\u:(F2x(yN%EQ\7%EQ\u:(yN%ED!(yS%2ͻDXy(7(4Oo(Q.i12Jewn?E_[4,yV EޢUr԰\kbym"oѪa[jX.~Ѱ\o5,i 2ͻaX{4,4bf}bݰ\,Ӽ&&&aiaiai>̙aț*jX"aț*jX"aț*jX.:=@rDrDrDrDr[jX.zhX"԰\4Ѱ\,,45,4K M EaXYrhjX.i"e%bm%bfia,u45,4[ Eg ށE\t`4"pw.]C2Z \O_{L/ʍݏ/<m~z c3vo=}F3vocS7ژ7c \yy \y \y \y \q U&C)T9]3T9]3T9]3T9v_ioq Uݷ,Ǹk*Ǹk*ǸA*ǸA*kBc Cc C)T9 V*ǼA#ǼP7pnG_[+cޠcޠc1oP_crdrdrdr>ϴǸA/ǸA/SJodrdrTrTBoTמ%_/|i\ 7Pd2x@ q2x@ 1u2}%g V/ X/_,AOˠ勪_4ˣ*K A}_E]e/$G,Yp+[?`4Sq)˨]"=3 '1pˋ \'L  i,T%XA.&AOˠed2i2p4 z \=M.&AOˠE.e*4 i*p#-.e*4Uv i*p,T2ȇ[\4Uv irQ`.2M]e \4 9AOˠed2i2pS/.& їAOˠed2i2p,T2XKLS`feL} bbeL} b2X_mc2M./*p,T2XeL} b2Xed2i2p.&AOˠed2\=M.&AOˠE.ec2M./*p,T2X>1py} b2X>1p,T2X>eLS`feLSˋ \4 i*p4 z \y8Łˠed2i2pyQˠed2i2pL.&2M.ec2M./*p,T2XE.e \4 iA \4Ǡ4ݰ Lհ XA s2nXy Xa!K7,<esܰaaK7,e ]T2X RLS`2M5,e' z \^T2s\.^A/ O9p9.I1pM s:p,T2X22MeW;.&A|lXy ºay07,~ذ f2ȳnXyL σa͆eǴݰ_fٰ ia,lW62M5,ejX^T2XeLS ˠɆed2iayQ ˠɆed 3A>r2w O9Syg*/*S)=g*p2i2S4 zT=Mf*|r2i2S,,13g*egLeON i325s2XYfT4 i32Me*/ƙ32͖9clbA UVLdl2'M]clb&26_'c>_Ec>~rW1c3:ۯ>ҏ>c|l?3_u'u?H77[~ |TrTHmTrTrl?5 *RcTqTqTSHmdrdrdr?sۏSrTHmTH"cޠBjcޠ"cޠ"cޠ"cޠ"c #cd"c5#c5#c5#c11ʱwH~rfrfrdrdrl?4 2R9 2RyL1o[x˽1oPcz X+?,cc1`9ƿca1^c X'Y,5ƀ~el?rL1^c XWcޠcޠcы1oPcrdrdrdrl= 2`9 2`9?<7Ȁ7Ȁ77 7x|a\ O|m\ O`ϟ`ϟy`ep?e IW 5ed2i`yQˠHEY7Hep?-sQu~$! 1ROxʠ7Hep?-3G*/*Rܟ2A>(p2X:KLSu`" zT=MF*&#AOʠHed2i2R4 zT=MF*&# i,T2XHe i,T%XHeLS n9R,T%X:EE*e4Ug i*R,T2 T=MF*&#AOʠHeO zT^T2G_T=MF*&#AOʠHeLS`,2ME*ec#21ʋc#21`,21`" i*R,T2X>1R,T2i2R4 Ȁ#AOʠHed2i2R~G*&#AOʠHed"21`" i*R,l>1R,l i*R,lU2XHeL} b2XHEE*eT4 zT=MF*fo6{Auo3ȓmyͽ͠l6~ fo3Cmyͽ͠Cmyܽ͋m4 i,l!6em4ۼfLS`zAO͠fdozAO͠ɤfӜ gR33Njy:I͋JjyI O8'5|f&5>OqfqfS7s,Hv3ǼAu%Ǽrd7sTW򘺙c ߝMͱ ljΦ~fcٚh155x9ZSSscŘ77~bTS۟\qljqljǸA65ǸA657 9 9 9 9 +fA65 _ 75 S75R gLX ruS31 uSbf zlj=M65/y̋cAτ~"z1A3a63 [~g3 DL_;yQl7 ~g3e4Ջ i*y̠lfd63i24 zf=Mf3&AO̠lfd63i2yQ`z12Me3efd`z12Mbef4 2Mbe\T63X^LLS`2Me3|\lfd63i24 zf̠lEe3|lfd63i24 zf4 i,T63X>1,lͼ>1,l i,l isQ`2Me3ec2Me3&AO ;4 zf=Mf3&A>w63i24 zf=Mf3/*,l i*yQ`21̋c21`21?,T63X>1,T6󢲙2Me3ef=Mf3&AMq63i24 zf^T63i24 zfyT̠lfLS`flfLS̋f4 i*yQ`2Me3ec21`oOM ùyQM ù075<f'" L75<fsԼRL55ei*tQM`K2M55ejj4 򸎛AOM͠ɦfdS󢚚AOM͠ɦfdS3sRnj=M65&AOM͠ɦfLSM`2M55/,TS3XfLSM`2M55e=M65^AsS3snj65^A/M O̹975e j}bS3Ünj4 iyjǦfLjǦfLS-`fՎM̓ n? S3-_6t4 sW$J/WaNJ<^ig?+ ){A+ Ĝ{AKr4X^iO1+ iW,Tz2MJe=MJ&{A>*r4iW4+ z=MJ|^idzA>t4iW,,:Je^^iO1+ i{2Rs4XYBv4+ i{2MJ/{2s] LAY ?hBVGn:DN1Jkl7klߩߧm/czjl?2_Tc55f?oc~}jcޠncޠnT+uuwtoU+UU+U+ۏykc kc kTY+Y+Y+*JǸAJǸAJǸAJǸAJT+UU;Z7ߘ7Z7Z7Z鏱H: HzLE1nE1nE1nEұ7"7"~pdtl?: H: H: H: H:Y$Y$="7X,yry>"7*1IǸAI;c11Y$ߝEұp?IIcc*ctl?48"?IHc)1oPE1oPEұŘ7"鏱H: H: H: H:?Y$Y$Kq,q,SpdtdtTtT\ ohWGI"A"A^y5:OyA:%hp?)Ur]:4Y zd4y~墺8)'J_; r4X.NLS]`2MA|(-r4i2zQy 94 z̃=MA&2MAe4 i*,T4X<1y`fGr;4i4 zl=MC|vhd;ڡA>t;4i,,!9Ce%vh8 iۡ2Ҕs;4XYrn4 iۡ2MC/ۡ2͒s; ۡA vh=@C.Lб;vvf1Uc;5_[c4McuLo~YgZjl1,c~lӏyS;tM~-ڡcޠcޠccޠڡcޠycޠڡcޠڡcY1oPc;td;td;yc ۡc ۡc ۡcYv7v7v7v7v~ڡcޠycޠy=>w>w>w>~p;ct;ctl?c8Ɲ:}oq샎qSt,:AǼA52ǼrctT#cޠ?v:v_CIc~]O&g:kбP~Zڡc6#%ƼACǼAC3cޠڡ?v7v7v7vyd;td;tl< : zL1n1n1oP1niw<4~ 4HW4HA^ A^yM:*yY:z\=MC&+Em_xV>Q w3~T$o]gp܏U+EmQ w3i4Y z|=MV>&+AOϠgd3i4Y z|=MV>/,T&XgLS ڸ,T&XMLS`*A>q3XMLSy|4 i*o,T3XgOs\ z|=MV>&+AO 4Ygw\ z|=MV>&+AO`*2Mme|4U i,TbgL} b3XML} b3X,T3XgL} b3Xgd3iCqW>&+AOϠgd3G|=MV>&+AOϠEU>ec+2MU>/,T3X>y} b3X>,T3X>gLS`fgLSϋ|4U i4Y z|yÕϠgd3iyQϠgd3iW>&+2MU>ec+2MU>/,T3XEU>e|4U iA|4 V>e|4 '@| Ar43Ny B:9+'@<Eg*'@<hLS)`2M%@eJy Рhd4i2zQ Рhd4i2$'@&AO Рhd4XhLS ` i*,T4XhLS `2MeJ=MAAms4ȣm!AAo} KAK!/4/^l,l,T.iϥzQ}Р_!A<>hR WH}Р/^Ax ȴA_}Ü3 z`=0CA 9: z`=0Cgl;Z77G97g qh~=Ds3n϶֟qh~ 9: ?Ds3opo~ 9ܛWs!_5GnH}"y+Qч!}Qч!"}kRч, Sч&>ܞ?+8(C~8CB>܎,k14h2=O$|Xbie{4a|gf>,k14ZLpf>,k14Z2=Lsf> E3zf>4|i"D4!Z(D43G3Ʌ=MD3zf>4|i"Lsf>,k14h2=Lsf>,ܣ̇e4Z2̇e4Z2=Lsf>,l|i"D4!*D4󡧉hCȮ&Ẋ&=MD3zf>4 ̇ec4hfpf>,ܣ4 D3c4 D3i̇ecп{4a|X>G3i|XG3i̇&=MD3򜃢=MD3zf>4 ̇&=MD3zf>E3zf>,ܣ4 D3i|XG3i|XG3i̇ec4 D3inܣ4{fhCR4! )|f>,E3yhḢ E3䆢4h2=Lsf>,ܣyḢ&=MD3zfhCȮ&=MD3\=MD3zf>4|i"Lsf>,ܣ4hfpf>,ܛi4h2=Lsf>,ܣ4f2=D!OX)}'|% }K@򡗀CR!)}K@aYB`ˇI!}Lj2җ4_6CBH_>+CT!G*}/җ}"}/җyrWˇx|A/z`H_>|!}C!**}CC ˇ<CUT: }LsO_2=}LsO_>,l?|X/i|X/iˇ&җ=M/zH_COuˇ|ĩC|[>IO-̒n'=U|3K[>-}enT|3K[>,Suˇe{a^ uˇe{a^|inD!#nD򡧉COuˇ&Suˇ&n8U|inLT|XYf[>#N-i4KLuˇen2ͽnL$T|X-%s2R:S[g[>tuuˇΡn9-=L_wJ /αd~iSrl0rl?c%9c[?_ƏW~flKqlgĘ7cޠʗcޠʗcޠ~6ypL1oP?ƼAP>#aTrT)nTrTrl?5 |c,_q,_q,_S)ndrdrdr?XYYYYSrT)nT)ʗcޠJqcޠʗc?woNqũ^~矛Ssg c< c< c< cq1n1n1nαD7B1:xesdsdsl?b6 9 yL1owYU}1oPEc*ty*Jy*Jccfsl?5ך1֬wgZ9Zc*mg:9kͱH~c{Qis{Qisl?1潨c,mq/,mq/,mq/,mkc KT۟q,mq,mSodisdisTiNMy6/*ߩ _6LmC+A^NmC+A^Nmy=:yI:Efp?O4ڼXL6&H_a-HcxQ 3 l7j(3"A~bp?tQ ~F$ȯP ixQ vC1XxHLS`2M5 `!2MC.,ݦb5xQ `A~bdC1i4P zl(AO ŋj(AO ŠɆbdC1i,TC1XxHLS `2M5ej(^TC1XbLS `!2M5ej( `!2M5ej(4P P zl(l ŠɆbdC1i4P AO ŠɆbdC1ixQ `fĆbLS ŋj(4P iAl(^lP iAl(4P iAl(4P iA,TC2M5ej(=M6&A>vC1i4P zl(^TC1i4P zl(` ŠɆbLS `fĆbLS ŋj(4P ixQ `2M5ec21 `6M5ej(P hApC1ǟn( ȸA-7TC1ȇn(^Th'A>wC1gn(4iy7/,TC1"n(=M6&AO ŋj(=M6&AO L4P zl(=M6&2M5eo4PbLS`2M5ej(4P i*,TC1i7<:bK`C1%P TArC1%,Khڱxjdžb7)6eW;6eW;6&3Ams&2A$y͙ 9Lb'3A_$yҙĠOf>I0w81c~rc)1n1ncqqqN3$4 aK' L3$A^nN3$A^qNy9iϐy9iap?44X>' =xQ`U{0a/=ܟA~C`p?sQ~"ȯ ~.=V]ۃOLS 0v{0XFLSэ`ڃ2M`2ME7.=,ݦ`_=xQ`ڃA~`d{0i=4 zlۃAOjŲۃAO`d{0i=,T{0XFLS`ڃ2Mej^T{0X`LS`2Mej`6M| Eejl=M|`d{0i=4 zlX`d{0i=4`L} b{0XEej4 /Al4 ej4 ej4Ǡ i=xQ`ڃ2M&ۃAO ź=4 zl=M/=4 zl=M|2`d{0X`L} b{0XEej4`LS`ڃ21`f`LS`f`t„Aq0# a ;L π7p1ǼAǼAǼm_1 +ÃcA1 cx"c~/kH_3Esc~ 1Fsscs<7H1EGc)~"cA1n1ncʔqq ձ`U/8_p?e_p?_׎cGcA^@HAO*r c Y ubi_p?#7; g$ױ~|bD:܏4 _e\4Ui*,T/ȯ ir,Tb2MU.e\4 i*7Ž=M&cAO_ ;4__;4 z=M&c2Me\4 i*,T/XEe4 ir,T/X_O inݦELS`bA>r cAO 94 z=M&cA>r/i24 z=M.*,l i*wQ`z~ V>/X`y 7}W`yϻ|^ >|^!?d>yAO|E&yAO|^% z4 i4ϻ|^LS`y i*,T>/X>X0,l, i*,l, r.y2… b|VB^'$\ yAq!/g.]Tc% yA>8v!/G.4U i,T!/XB^\ z,=M& yU z,=M& yAp!/i4Y z,=Me*4s iwQ`rN2Me*4U i,T)XBAv<]o;wAes#wv^;wAs.sع z}yԝ ڹs=`:wչ is,T.X]LS ڹs,T.X]LS]d.isw O#sw.siDw4t.苗 |s,T.X] ݹ is,T:w2Mue=Mv&;wA>q.is4ٹ z=Mv|h]d:wA>4t.is,,i#wen] ݹ iđ;w2R9r.XYBG4չ iڑ;w2Mu.;w2=r.XٺG]b.^ku8V7_FcU4?yۯcW~Aj/c3_;c{?csLo~ٌ@flfKncޠncޠjucޠjucޠncޠnTuu>tUU1uUyޠju?Z7Z7Z1UƸAƸAƸA󏵺1n1n1n1n|1ƼAUƼAUV7 b4 VcTݱ{+wc|Yk~l5+wc1^܍zgol?sL1^,yK*qK,qK,rƸ%Ƹ%9c xTX֎q,+?;XYY;ޘ7X~v7 6 ,y*SoToToTodo-x{:~|l-xc|oYO%1TON戮ݏd7۟L'vƸ]쎩d7?ϑ's p.b] z]T./d] z=M႞&pAOu`p2MEe4U i,Tp2Me4qi,T.'W4qi#.,T.*d.i႞&pAOu:\d.3=M႞&pAOu:Ee p2M.*,Oт~Wa`{*vQa`F KPa/AтF hA_ } 24F0Zd-i24F qhAOa`h2;5h2M.*,T-X0Eтe 4F iߩF iߩF i*,lS34> gAn]T,ȧnl }qς|Eu7g}Fς|YLS`g2MςejYd,i}vQYd,i}gς&gAOYd,XYLS`g> i*,T,XYLS`g2M%~ejd,ȳn}[YogAcq,c,n}݄ }1ς}vAFʂ3) ?) r`"eA_}90RHY) r`,ˁ/̂& fU0 z,=M̂& fAr,i`4Y0 z,y Ou`,T,XÊe*4U0 i`,T,S].4U0 i`,T,i`4Y0 z,d,so.y*yb ~`vQ O̹`̂|YO\0 e,cI.4U0 i`=̂e*4 f2M̂e*=M̂& fA>Bq,i`4Y0 z,=M̂|Yd fA>s,i`,,̂exYO\0 i f2Rq,XY7.4U0 i f2M. f2q,XYZ89M͂ 1ot`yPkllcvLۯT~mScl kl~lۯ,~E~=ӝ~}~TlTl~fcޠncޠncncޠfcޠ5cޠncޠfcA1oPycldld5c fc fc fcǼ7ȼ7ȼ7ȼ7ȼ~fcޠ5cޠ5ǔ7fc|7kͼ~l5fc|7ϗfll?8y1^{>ۏSl4gcޙgcgcgc11鳱<w1Ƹ3c gƸAƸAώ)}6 +Lycy gcޠXǔ>>>>5c1̢+;hcɲ1̢}glʛ*ߩُ1o6~[g}ƸO͎)o6?OC>ǔ7QZall?3kř W)LYv,y;S܏fd,rΔAܙ LYp?֝) rΔ=Mfʂ&KdQDp?[7.]T,?g %~JdlAߡD܏\T,-TȂ w,oS]" iD,TR wE`yWwE`yW F绂~W |W7 滂 kt+] G绂&]AO|Wd+X|WLS`]2M廂ew4ﺨ|WLS`]2Mew4 Ɍ]2Mew4 ] zw|Wd+i24 ]AO|Wd+i2uQ`XYF VU8,V+XF VУf+Q3S 5XAAG E&XAOA V1 z b4 iR i*uQA`X2M.*,T+X VL VL VLSA`f-A`oOӵ SZEղ|XZVS] kYA>Ru-+ȇVe]T!jYA>r-+gWe4U i,T-+XZVOR] ze=Mֲ&kYU ze=Mֲ&kYA>t-+i4Y ze=Mֲee4U iuQ`Z32Mղee4U i,Tk UA|,W\rU'\ dUA_,W}e\is*i2NuQq 9N4 iXT2+q*'mS4Kq`f8N,,ǩeS4Kq`TKq`fɘ8N,,%ǩ~Љ#/LL϶+Bl{"v϶l;'v϶l;"]cve|l.>.϶bl_ wϼ=17'>g~Sy-3opA}gS y3opOL}SאDb3n=7g SqHL}ƟHL} "17g SqHL}ޞS y{ dlOL} )ϸDO}> ?vj3O}϶[}@~ g ?O}!+?Ty{3opOS} "M74g ϸA>l;74؞D3niϸA>Nc} "M74؞,?EyTy{hlOS} iϼ=M7>l;[7lXUm>&@~lg5>N}={l;7؞l{vtgI=?v3nkH=܎jQA!l{A!OTPz~*(=? J̽_TPzG*(=, J42ͽLs/(=@z((=4QPzi J=Mz((=4QPzG*(=4QPziDA顧2ͽLs/(=, JLso<, J42ͽLs/(=4:z'RGD!+uOߕ:zK:zȧJ=wD(~qA(򹽂E^4z}h=y5z}h=$zI$z&j=-Me{(7iMe{a$zX7(D4iMe{a$D衧&CO+`CS!(X܃EyXL<`Qp=!*8EU|`C_=Iie{!l)XLs=,܃E=XLs=,܃E=Mz=z=4,zi"XD!l)XD(E=MiE4KB|`2͒Paf`(XL0,zXiE4`Q1,zXY =,ӦCDP3z?o׌ֱ*۟1 /~ӿ/ۏ+o~?_6cIۯ97[7ј7ј77[1ՌƼAݿǼAݽ1ݻǼAՌƼA#ƼAݵǼAݳc2cޠjF?ƚ7Ț7Ț1#ƸA֌ƸA֌ƸA֌51n51n51n51n51ՌƼA#ƼA#f4 1 f4֌Tc|X3~xjkFc1 X3㽀5<߱5c|/`h,?X3Y3Y3OqqQƸA֌f4 f4 f4 f4Y3Y3:ј7X~f4 \3 Gy*ShThThThThl?c,GƸt4*^X:^X,EcA~bg;c蘊EcCwb1c.E?vC?t( G,.*g3 ~ sFECv(rf猂 Qv(iX 'A~bE?o:], P.XܟuEE t(B w.\]T=(/[] .׃X r`=(ˁ vu=(/\ r`=AA~zPd=(i4Y z4U i_t4U i,T=(XzEՃe4U i_t4U i#׃e2M`6?m%AY zMAAOzPd=(iw֮=Mփ&AAO*L},\T'XfOD%g’O3a'/] z&,=|sQ%`,=M|&K>A~Od'XOLǒOLS%*4U isQ%`J>2M|e>|e>|e*4},4UA||tOoK>U 9K>A>r'g^.% GsQ >J>A>p'Gj.4U i,T'XO\ z,=M|&K>U z,=M|&K>A>r'i4Y z,=M|e*4U isQ%`b 2M|e*4U is*Wa'gˎlٱ2z ٲc=A>[v'諌\T'= Dn z[x<Oлe'ݲs= =sQ=Oc'i x^{t'GE Q(x|Ex< O ){r'苗= ~,T'XO i,Tz<2?mx==Mx|Od'i4 zx=Ex|<Od'XY 4K= O,,xeOL i,,=xe\,Mxe%OL i)&w~935ccܱ29^>_$c52.ۯcc1_cۯcz+cl?R0_ce1_tuuu>Θ7[7~ncޠ;cޠcޠncޠncy1oPɝcrgdrgdrc ;c ;c ;c7777~ꘒ;cޠcޠǔ81fu3Mc|kͬ_kͬα{:c~=zgVg;:c1^LH)3}*3}*3}23}23>>OqLSrgdrgdrgdrgl?4 23 2sLɝ1oZ߯rOdBغtQ #Lk[ A~w~Pлe?(L \-[> [\|-ɖOd'inm[>AO-j[>AO-ɖOd'i,T'X,T'XOLS-`Z> i,T'X,T'XO it4oej4i4 zl[>AO-ɖOd'in=M|&[>U 7[E mv'XlyV'7ElyEly}E*}b'i4Y khy&<2Mye7ye*\T'X"OLSE*4U i,l,l,T'X~{c'X":SGw|NO" 򙗣;A>rt'^DѝTt'Gcpѝ :,Tt'XNLSѝ 94 z=MFw.*4 z=MFw|^Ndt'i24 z4 i*,Tt碢;2M?e4 i*sg 8A>!v'3 8A>!v c4AO1 O8FsQ17Mb& 1F)hc4Ao 7sQLco&聱7wf{3ALXܛ {3Aߤ؛ i7sQ`z32Mfe4՛ `{32Mfe4՛ z=Mf&{3A>iso& 7f|Ef<:Lݛ I{3A>|qo&苗 '7,To&XL_ܛ i7,Toz32Mfe=Mf&{3A~Ldo&i74ٛ zŽEf|Ldo&XY4K 7,,fe%bLLt ܛ i7,,1fe\,AfeiLLd ܛ i7tڀ1X?ьc_:_C+hlcsL({lxkgl>_9csLo~ٌOflfKncޠncޠb4cޠb4cޠncޠnuuuӽ{Tf?uuۏryc4c c4c c4ǸAhƸAhƸAh11n11n11n11n101hƼA1oPb4cޠ1F3k_khc>c|k~g̯u11^ьzgfl?t61`1hƼOhƼOhƸOhƸOhc'c4c'c4c!11cьqьqьqьG{ƸAhƸAh)2 Gьyyc4cޠ"!c4cc4cٱ11cegьƸ3hƸ3a#2g~̱~ r G .*;XKp?܏k G Q~ %r#ZQ~b-Kp?rO{i?=Հ9LO6`ـ d& 2/A_?Kݙ@# vgcBк;\Tw&PkZwgV؝ 'ݙce`1AzLc=&/`] /c1U ;0c1A>r=&i4Y i,Կ?i,T=&XzLLS4U i,Կ?i,T=&ȇ4e2Mce4lc4Y lc&1AOzLd=&o] z=Mc.,o1A~=zL٪˛zLo61A٬E1A٬}b=&1U >zLd=&iƮ=Mce4oo4UzLLS`1U i,T=&X~{c=&X~{c=&XzLLzLLSrZ&XN\TZ&2A>rZ&G]NPi`22MeeJ,TZ 2AOiɴEe&2AOiɴLO zL=Me&2AOi`22MeeJ\TZ&X~GLSi`22Me2Sib|$@Lлe &2Sib2Ly |3/y z&̼f^^3/AOp%%0y 3/y z`̼=0f^3/A>v%1y z`̼yƙ v;g^eʼ\T%XKLS`2/2Me^%q%i2rQ 84y i&3/2͒p%ȧ$μ4K`f8,,g^eʼ4K&`2/K*`f8,,g^e4K4lufl1-B~uOk-~e`|l۟W~A1ǼAǼA5`ƼA5`ƼAǼAߏ3 > >87̘7s? > >u716`ƸA6`ƸA6`܏qlqlql0c 0c 0c 0c 0ccjy7cޠ15`ƼAcfl?3ך 1lqZ3ך (Θ_{0cـ~:l'+?؇9>̘>̘>>>ؿvهvهOqSfefdfdfl?3 3 sLݎ1obfTc,?؇8>̘7>̘7>̘7>̘7>~B؇؇qg+;cfl?5Ɲ3Ɲܟ#)E5Y|ud GRsqGɱIDOiJ8Kf6GQDeFd }}?pgT%p}}Z@5Yil`K` /`K/-~>l `K>@? >JO^M@*L[_}J! N)c%pRjS/~ʘz ܧ$`K`DG~& <@k`K5a%r@k`ˁ r}[6#V@`K`QS5l ,j*XT%`ˁ 5l ,j*Xԑ-EM[`K`QSG~:XT%`K םly -V@.>;h5l ZM[&-\Nv%j2r+)UWgW-Sv%2eW}˘] -cv%ήӄٕ@0OfWTv%П&̮ZMfW&+\uv%j2XTv%] ,j*r+EMeWʮJ`QSٕ+EJ`Q1XTv%] ,j*r+ٕ@.;zgW؁rv%;Cή؁rv%CήJLeWJ w] ^+EMeWʮ5] O5]y +Vٕ@ʁʮZMfW&+Vٕ@n9h5] ̮ZMfW&+EMeWʮ5]9PٕzEMeWʮ< @n29h@ CL<9@n;Grr$~# ּs$+̑r9@H5H ] #vő@;H \] C,!G#pq${.rő#8XTq$H`QSő#pq$H`QSő#Vő@ 䶍"\wT$Q@D8*EGETT$3rGQ@n8*=GE|2*nGE5 "EMEEH`QSQ"VQ@ɨH Z ZMFE&"VQ@8*h59PQ@8*h5 ,jC掊59sGEH`Q6wT$YΛ;*X,G ,j**X, ,j**r`9{H`Q?wT$YN;*XTT$S<5-w=ugl/\-e-bzly.[6/[ecw] _5˖ |;^#{2~5˖YH.k@Z#5ۼetw\Fwe˨etw\Fweetw5^#5^#l[5^# [+wl\fJevk$H.k2;H.k2;H.[SK%Tr23uK%aTr2wK%AJ> |pe2/µTA^J>|p2/J> |p2/e^8 Ry.\FY>LYp-|pey?.82iµqrw4''7N>'{o|pqA?{k~Z j.[7)\6_خОNОNಫA?{:,t./\~ϵq7N>7N>7N>Y4N>14N>7N^6N>%24N>+hp=_AnqA7N>X\',jk?X\',j5kkE͵q8`Qs=_8`Qsm|{ h|`Qs=_8`Qsm|hZͽqA.iqA7N>h58'"5'{¯ J>X.Z*/|R}RޤRJ>τTµTA&쥒ZͽTAJ>ȥZJ>h5RkE&J>X\K%/\K%,j5R RkE͵Tf{K%,j7TZ*`QI5R R@H> #b$b$/\c$b$b$/\c$㟹H>-"H>M"H>ȭJH>X\c$,j15s,j1=FAH>h5 #c${ܣCVs|j1Z=FAH>X\c$,j15 kE5FA6^6>ho r " "M؃N {06af|&r/r r5` `k0E5`Qs f|f|3>X\,jZ=A3``Њ`}``=` ``=```|r5`k0\F0E5x`Qs f|3>h5` ܃{0Vsf|jr)Z=5A.E#A3>X,g`QF0\F0ErV5qi3>X,'`Qs f|YM#xa9:`4,jf|3>]3`/Ojÿ흺E:X~rTlӺ#o9@?w_ȍܨGN Q@?82lwoخްݼaxavv ۝{&ۍ 7lmخ۰az6lWmnڰ~=K0;WavPavPavP/^=\0;avPoOcTe1avPo5^4~s- s- s-1atatatap0:\0:\t}dede>0:>7/ces-xk\˰}gs-xk }(D̵ gs-Y\˰}l? k)2*2*22222lFwkFwk ̵O*{@Zi}Z%p gTS%\>Z9k ,Orky s-Z>'@?̵sh~k \ˁZMZ&s-VY"d%pr e%p8][^}mz w˵U%p! 䂴C/~z9P'-Sl[ƖK or ҙ[.elK r ˁ:\&[.EM\:XT%K`QS-Z.XT%K`QS'j5r F[.EM@,jr`QS-Z.\vlZM\Kd%jh5r lr-k ,Vk ,Vrmk ̟\K\K{Z3rr-~k ̵ZMZT\Kd%\K`Q,1XT@Zʵ5k9Pr-EMZef{Yb%\K`Q,1XT@Z[.nK^%䞑[.5r@\qK r9P-?jr-@ 5Q%K`QS-Z.{x r lZM\T%jh5r lr-@ɖKd%jh5r ,jXT%ˁj5r ,j@FWJ ]9Pѕ@#9@vUJ 7 U 䦡*4tW%[fͮJo6*4tW%[r?]*XTW%J`QS]*suW%J`QS]*V]@ɮU z*\wW%]@n<ȭ"wUTW%pWr=]@?J ׌U ɮJ w1U ,jXTW%k5U ,jr*EMuUZMvU&*\sW%jh5U ZMvU0ǮJdW@uUfJdW%Y3X,U 䚱*Er]f9J`QmvW%J`QovW%ʁ匳*Er]f9J`QS]@ng@vU&*i{OQrVrFvrJvڡA'c-Gݬ Clye7kCȲ5CnԬ 5YXT&fM`Q}_E͟f ?. 9Ͱ3&?$gd ۟4?cp ?L/'c` ۟a'x^GA* * z f0vAz>?Lo ;;AughAr2vAvAv20:0:0:ΰ}kdagdaa* , , ۧ7avPeavPeeuT0v>3ax0v>3/<v񳀅a,`ag? X gs?OaT}a ת ת ׬ ׬ _kwkwCY5;S}gf}gd}gd}g>530O7avPavc}gTa ۇ?WF?YF?Y,<2ϰ}rudgt|'p GT|'}.pi GN>4#MHS9P})pi ܧN X:Ӂ+N`B<}}1pG ˾N^uy@?e_@5`&:V}@EͿԀy <+, <+,+,2O>O &tȄN-:Lru @9k˄΁Jr }V%NS.pB'N5 ,jTx`QS :EM%tJN`QS :EM ,j*XTB'; N5u*Ur}~_>JN>JNlVrds*9~Y ZMVrJNd%'JN`QXT%@Ur5U9P*9EMUr}f{b%'JN`QXT%@Ur+9ٮJN{ގ+9q%@UrJN q\9P?r/Ǖ@渒FWr5U ,jH<@JNd%@Ur&+9V@JN w\ ZMVr&+9V*9EMUrJN`QS@@L9P@z9]/b@L W 䖵1r &^rˁ@8 Yb @L`QS1EMb rρ1EMb ZMb&1d &rف@_b@L 7B9Pfb|2s1\.u &'1ܣs &@L`QS@.:XT &@́ 5 ,j*h5 rMʁ@@Ld &j2h5 1V r1Vf9@L`Qu &˥5˙\bX1Er2ׁ1Er<ׁ1#5)]b堮1EMb@ rɁ rVZMvg&3Vݙ@L 7ܝ 5)^wg 35՝ ,j;s3EMug5 vgk3EQauOKЇ ?@g Fu4~U>F@Fi1}*p Ǩ(́cTU>wO\[l˵U&\[l ,V9rmy s6d>=Ǟ9@?ӓ~왳 pJd&j2gh5 ܧ&[7eluO-n[7el!eluӄŚ@X+b́*rŚ@_!kT&g.3\u@^5\u&/5EM,jXXT&bM`QSŚU ,jXXT&ځEMk*rŚNh5uB;bM`QSŚ@.tX@k&5\v&jXh5Y ,ZMkFbM`|*bM`|*˧bM/|~T bM>bMlkdXs5~Y ,ZMkbMd&bM`QXXT@k*5U9PŚ5EMk}Śf{b&bM`QXXT@kZMlis6*g+M 9P9@n 9g lT&r6r&@r9r6EMl͐r6d&j2gh59P9@ɜMd&j2g=:l&s6V9@ɜMd&M`QS9r6*gXT恌(M  ޕ4ܻr&!rQ@]9JȽ+Giw(M 7B 䶪4Vu@Ei5 ,j*JXT&{t5 ,j*JXT&j2Jh5y 4\Pv& ʎ"0JȍJGi(́@.(;JQ 䊨4~> 64EMEirEQ4EMEiT&(M`QSQ@(Md&NZMFi&4VQ@(M  (M WD 5Y\Gi84\u&Y:JX,r ,j5 ,j㹎59u&YN:JX,u ,j*JzGi(Md&H(Md&HZMFi&4VQ@(M  5)^Gi 45 ,j*Js4EMEi5 Fik4Ey. a1l5t:{.a0?Yk\ a4Śa, g>w"kŚa,`f>>7,)OJ=ݙ}R*p 'I;s3ԁ+zL`B+zL>Fu1 9cTd=&ps 1}*/1c^c&1*Eh5Y ZMc1@   Fi Fiѐ@(MM`&p y 3\rw&ݙ՝ 3"r ݙ}>&Ksցpw&dw&sցEMug5՝ ,j;s3EMug5u:L`QSݙ@n;X9Y5՝ ,j;jwgLdw&kZMvg&3Vݙ@́˧L`|L`|˧L/|~ԝ L>Llvgd;s3~ٝ ZMvgjLdw&L`Q;XTw@ug5՝9Pݙ3EMug}ݙf{bw&L`Q;XTw@ugBe6LI2qZ@eL wj9Pi?Jri@n8--EeiL`QSi2{# LZMeTZ&j2-h5 Lri@ɴLdZ&j2-h5 ,j*-XTZ&́J5y 2*-ȍ-eL 7 v2ܕvZ&rci@nl9- eL 79Pi2EMeJ5 N2EMeJ5 LZMeȴL ׌ 䚱2Lri@ng8-s2{#NdZ@eLOeL`QSi2\tZ&L`QSi ,j*-XTZ&j2-h5 ʒ2Vi@ɴLdZ&j2-ȕ%e&2*-EOe&2Erhif97L = ,jóN5Ye2EMe2EMe,gi ,jN5ˉZeJr=i2-h5 V2*-h5 V2Vi@ɴLdZ&j2-ȭ"e&2Erif9q̭́2EMeJL`QSi2E5ifĴL`QSiݙao9l%W|Xatz=Lgݰ?tgnaz?pm  yԫ0;0;0;avPSwf0;a<;3L0;avPavP/ȿݙatݙatݙL0:0:0:̰}|jdwfdwa:?;3;3lz^Afu&azfu&;3ݙa }`wf>4ݙa >w"vgݙa,`wf>[75I3I05?MavP͏avcfTaj j j j j ~?coX ۧL]jVef>`:W3?4^~[6e_-}j0p GȖM>fYhV޲9\_+ĖM`Bj+M`Bjs[ek\e>@l}2p eUZMl&[6`h5ٲ lZMl@ɖMs `LZ0&r`LZ0&r@ DNc"'pqy C7U9P@9tX~>P@.9t觌@?e r ¡@.;t觌@?e 5 ,j*tXT&́ 5 ,j*tXB7EMn;M`QS:XT&M y C7V@.;th5 ZMniOr 䊫C7)tX.B7|Mnd!nd3t' ZMn&C7\u&j2tXT&އ ,j*tsB7EMn M`QSB7E>M`Q1tXT&އ ,j*tsB7EMn+8 #Wpe ΁r@s*83U &+8r'r+*8EMUpH`QSh5Y  Nd'jh5Y +8V@ Nd'jXT' N`QSU M w 䮗C7r&rC֡@z9t]/nM 7% 䆬C7r@n 5 ,j*tXT&e5 ,j*tXT&j2th5y C7\3v&k"0tMCń 5@.z:t瓡 䢧C7~> NC7EMn rӡf9M = ,j*tXT&j2th5 ʒC7V@Md&j2tȕ%n&C7*tEOn&C7ErJסf9M = ,jӺ5ˁ]n̮C7EMnC7EMn,w ,j5^n r=ޡ2th5 zwn.wn.އйjssDBͅ\lUBͅEͽssaQssaQs {¢޹ ;75ͅEͽssaQssaQssaQs\XlC\X;7{¢޹TB.+ssE\#Ep΅QBn(Gp.㟹Gp.>"8rG ΅Q¢Gp.J#5@Dp.\h5 SDB@*sD'p\=Ep.\h5j"sD¢Gp.,j=3ss!Թe\2un.侃:7r+W _ͅ/SBss!Թ[\2unͅEͽssaQs\X;75ͅS¢޹wn.,j &:7ZMtnss!׌չk\苀ͅTBngswn.䖵:7r;C fͅ\TB?\ȝ6un.,j {B.zssaQs?yaQs {¢޹jssDB,ssDBͅV &:7reI &:7{B.zssD¢f9ͅErW ͅErW H:75˩\un.,j h:75M`9ͅErBW 吮:75ͅ\Wf :7ZMtn.z:7\h5ѹjsXNss!Թjswn.V:7ZMtn.,j\X,xչ ,xչwn.,jssaQs\X;75 tn.,jA\X;75 vn?wn`~`?Lg]әa39l$OxޯOa?'azf^=s3s3zԹf2=Uz7azfչfu+0;a<0;_0:0:AvnV ;7 ;7٪atat0:0:Ͱ}azKfu9y9_ƖͰ}2i5[6xٲ% ff5[6ya彇-a|ٲ-a0>leOٲ  }2p  }2jeh5ٲ9PM@ɖMd&jeXZMljV|V{&r3 80s uq`"'Џ9Pׁq`"'Џ9L&r80@nT&Kjq`@nMn80t]nMn80tǁB7EMn M`QSB7EM,j*tXT&5u;݁EMn ryܡ2th5 C7V@Md&j2tX.B7)tsB7)tX.B7| rߡ@.:t'G@?? iC ZMn&C7V3 5 ,j!n M`QSB7*tXT&M`Q1tXlC 5 ,j!n M`QSB7|ΞgxWpC w\ ^+8ȭ!Wp9 ΁T';Dr@~}"Wp5U S#EMUp Nd'js&+8ܻt'js*8t'jh5Y ZMVp5U ,j@nT&ir3͡@n9tnM 7 fC7Ls&rס@n9tsB7EMn 5 ,j*tȍ-n 5 ,j*th5 <@;t5cn} C7p@nM 3 䚱C7\t&'C7is&M`QS@.z:tX B7*tXT&Md&j2tȕ%n&C7V@Md&+KZMnT&ZMn C7Erס@.z:tX,r ,j35˱\n 5\n X:tX,Gt ,jS5 zC7d&j2txnT&j2tȭ"n&C7V@Md&[EZMnC7Erס9^n 59PB7EMnkC7E5B7E5nXjo'ro~n~6ll6lhQa*3*3]|ԛÔf>%|azfff={> 3132320A&rA&rA&rş7atatatata,~=*0*0Z2&rcKxkDΰ}bi59x+ .=L D0>L yg"aJgXytata0:|Ê ۇOysys~ٓx}+pE ՜ }+p gTs&p Y}l,b>:9h5ٜ g&3*h5ٜ lZM6gY@L`QS͙})jA_ZM>cU ZM$jɬۃZM$j@?HAbV&2+PgeLeV&2+ȕyge.LeV&2+ho 2EMeeL`QSY2EM,j*+XTV&5u:cԁEMee=2VY@ɬL ו ZMfe&2V?Mi˅W&\xiT&\xi˅W&A@.;HȅNi M?~ gO{b&j2Hh5 rA@ M`QSAf{b& ́ 5 ,j*Hs4EMi 5ۛ4E& M`QSAf{b& ́ 5 ,j*H@i~39럩L 7oܜ9P͙@97gTs&3Vqs&+nr;͙@n9XTs&L,j9@6g&3V͙՜ lZM6g&3ss&j9h5ٜ lZM6gj5՜ L ˜ ~2/sV&r+Y@9+2ge_L V2/sV@ee5 ,j*+XTV&;P5 ,j*+XTV&j2+h5y 2\3vV&k"0+m8gérY@ng8+5cgetLOfeL`QSY2\tV&EM0:?R>0:XͰ}ic5x: }`fc5)a ۇ4񳀱a,`f>x7 2l},p~ G G},p1 ܧT&p1 G} ӏc>OWӏ*dh5 ܧ&C6h5 ZMl@ɐM`QS!}*ߦ[.8D/8>NOq|؞Oq|dLZM&pN W LZM&pN Wb LZM&p&8p'N`QS  ,j*XT'ہEM%pJr n5up;N`QAg'j2h5 j8V @Nd@UnT&\[Un˵U&뢮kM`g&krԕ@??Q&ЏO{Yb&jrh5Y rԕ@M`QSf{Yb&́5U ,jrs*7EMUn5+7EM`QSf{Yb&́5U ,jr@Vn8*!N 9P @8Ƚ'pT'8ܐq'[2Nr @n8XT'N ,j*@&p&8V  LZM&p&8/t'j2h5 LZM&pJ5y +7r4WncM 7\ b+7u&ir3͕@nrzWnM 7\9P*7EMUn5U >+7EMUn5U ZMVnM ] 䲰+7rו@Xrs*7u&;rYؕ@.]r瓕@nrXT&M .] ,jxg`QS;T&YwrȥKWnMd&CZMVn&+7V@M ׇ\ MׇX 5ˉXWnP+7\t&YNƺrX,c] ,j5U ,jC5U9u&YʺrX,e] ,jrUuWnMd&Md&>ZMVn&+7V@M 7|\ 5YZWn8+75˙ZWnM`QS*7E5fM`QSfM`QSf w5qg~Od~G7a:=l1l{0߉aWa߮0lt8 l ?WavP/88^A?L avPo7avP avP avPc2^8 8 8S`dgdgdg>52320%A&pA&pZk0;0;>*023lw0&pNxΰ}i8 a,`g? YÔy8y888<8<8 at at a0:ÔFFFL L LL /N;P}}8-pN &}8@q}8-pN ܇ ܇}n2pz 8d>7MsYV}@>N>h59P@>Nd'jeZMqsYEMqmjwL[G} G;} g'pg@5y&O? l l3M}(j@uN \ `,Z0uN x] `,Z0u-:lpY'K.5U9Pe:EMu:,XTY'N w\ ,jx`QS2:EM,ZMuNdY'jh5Y9ܕgwM ,wEMrW B<宨X<`'nrM@`5yO{b'jh5 lrM@&O`QSMf{b'&ρj5 ,jsvv'{tN`QSٝ;EMewrٝ;EMewZMfw&;dv'krٝ@_fwN 39Pٝ@n: gwfN W' N wڜ ,j*XTv'5u4*Xy;Vٝ@N ZMfw&;Vٝ@.9h59Pٝ@/1h5 ,jS5A\gw:N`Quv'Y:X,gr ,j*X,s ,j*s`9N`Quv'Y:XTv' <ٝ@N Ν9Pٝ@N t ZMfw&;Vٝ@8h5 ,j5^gw,x ,jsrٝ;EMewk;E5ٝ;E5ٝ;E5ew3la5l]0?߫aߩazΟoӰ. o0#}x ?;Oaߴa=`ԷavPMavPMavP&0;WavP/`k0;&0;&0;avP1avPMatMatMI0333lFFy < 4Ma0~3l M05y05yy5yy5yy6yy6yT<<<\SmTہSmT[>O]OSme>@z}2p &{=V}+jsV@^Od'p 5 59P5V pC f#(7}%7@l69@l6T#(7@O> AV28|mc>61@M8ȕa|mc>61@O w) c>b@|5 ,jy`QS1b>ܶp'SEM2,j5uV1@ɘOd@|}W9P1rW ,wE1@:Xb>( |O z Əb>Q'j(sb>V1@ɘOd'KZM|5kc>EM|T'O`QS1 ,j*XT'^ ,j,|5kc>EM|T'O`QS12h5!t@~fE \ 䎒K?M%~ρ*T'{K.rwɥ@ź&~*5U s*EM~Od'jsJ?V@Od'[.ZM~&K?V@O`QS2sb>s'$r1@.;Ƚe|O 7 c>\8w'{ˎr1 ,j*XT'O`QS1@8XT'O`QS1@ɘOd灌r1@;O \ vc>*=W|O ׌ c>~> Nc>EM|ru1N5u@|:X,Xy c>V1@.9h5 ZM|&c>\r'j2sb>\r'j2X,w ,jru1f9O`Qu'Y:XT'YN:XTr1f9O`Qu'O y c>V1@.;sb>V1@8h5 ZM|&c>q'j2X, ,jXN:X, vc>EM|5 |kc>EM|kc>EM|kc>EW5z6Qyv7_̗meۭjv#_Ǘme]jv_—mwe |WoUe˶ϕm+/lPIxܿ"2;9xܿ K?/['xK?/{evp62;x6C2;8 A~^FQ^FQyDetm\/(3l/2:˶)a ^:xKwt2; ˶Y2#T^AmeGl|?'Pz?'Pz62~N4l +@/+@ ^z+@/ΣmetyT^ VΣ4lΣ2: *@/&^FQzDh^6y,Qz&/o#*@/{d^z+@/{evpm=[f( A/B;9B!eۨ4'3;pv\=Ŏ.bG{B)vt!w; cGcGrL QхrVBn)vt!;ǎ.䶭bGZMĎ.]h5; cGZMĎ.]h5;]h5;j"vtDBхV=gt!3{:]=.䒻zFr'\= ݨхnTBn7gt!3;]FхEͽgtaQs]X{F5х R¢3.,j= &zFZMgt!W3]苀хܭUBng.nzFr#D= ڬх\TB?]=:.,j= {BNgtaQs?{aQs?.,jy/,jy/]h53K@]h53jgtDBх\RBQ3K@]h53Y)gtaQSVBNgtaQVV¢f9хErdY= {¢f9хEͽgX.gtaQ_V¢f9хEͽgt!3хV= pQ3jgt!3jgtDBхV= хV= xzF5 gg.,jc]F.,j= {¢fхE5= {¢fхEͽgtaQ} B¢f?P8v4l;=lpwy~x~G=_h5YB ,jϗ5UB ,jtJHEM*!eyRf})p Ԭ/Zj֗@KRf})p Ԭ/Rf})Rh5Y_ /ZM֗LRo3Ie$rƙ@;[LRo3Ie$r/ƙ@;OfT&)$5I ,j~`QS2Iܜq&)SEM,j?t$5u ?j2h5I >3IV@LRd )з%@2T )2-S ) .![R`e*!ӄ%@n%miRd )jtJHVJHVG%;KHV%@ҁ*!5;KHEMT )R`QS%UB ,jXT )XB ,jw0*!5;KHEMT )R`QS%h5YB ,!Z;J{ $n(r P ?S @ntJ3P JsC);n(r @¹XTC)n(ZM6&JV P l(ZM6&JvC)jh5P l(ZM6&JȍJ7R wP ¹JCwC)n(r @nT=7R 7*P:P JEM5j(5P 䞎JEM5j(5P l(ZM6ȆR P RJl(r @tJsuC)$n(r) @k @n๡XTC)R 5P ,jp`QSTC)ÁEMl(ZM6xRdC)jh5P l(r @Ɇҁj(r @ɆR`Q}vC)Y?uM7 JEr f9 R`QS f9R`QS ˑh7TJEr0 J\rwCl(ZM6ҁj(ZM6RdC)jh5P l(r7 @ɆR`QvC)YNt`9:R`QvC)An(5P ,jXl_P ,jAl(5P ,jAl(5P ,jAl(5P} Kv -K0  0lS0`~7~~a1]aҰOaz10;0;0;젾T3+/c`id`id`aH , , , gȆAA0:0:Ұ}ra0*"1*"0}Kff 14a( 砆>04a0Fs{Y0~04lFQ柧QFyEyFyFU<#J<#Jɪat)4342424oOH>Oyt^>vKt^>id|)p G@ҁ/ZMƗ@ҁZMƗ&KV}0-j2XT|)pL ,j*XT|@ŗ/5_ T|)`_S>X`J~| S>@?H >J~|:P@?H >ZM&OV@ɦS N+l:rM@.¦S N宨7S?0t:PM@XT)@`QSMf[ƦӁj:5,j'Xlt ,j'h5t l:r[M@ɦSd)jf?dAfԁAT`٪ArE5rU ,7[5@!נ6Td *jh5Y:P5@һh5T:h5Y AT`QXT @ՠA5U:P5jPEMՠ5f{{c *T`QXT @ՠA5Uz kPV5@Td *jSܑ ޡ;R9uG*Hr@!#u:R3Ց F;RJtG*Hr;@n(#XTG*HZMv&;RVՑ HZMv&;RvG*j#h5ّ HZMvTԁJEr[թ@]9Ƚ+TT w 䶪SQܻr**۪NEr7ȩ@;mUT**TT`QSRQEMTT`QSRQEM&SQV2nTT/SQܭu**/NETT wk SQ\v**NEd**NE5 ,j*EO:#X ,jt`QSg&SQV@,9h5 LEZM&SQ\Yr**j2uRQ\Yr**j2X, ,jNErөf9TT`Qv**Yt;XT**Yv;XT*r۩f9TT`Qv**TT 㝊z SQV@;uRQV@;h5 LEZM&SQ*r**j2X,翝 ,j#NEX;X, VSQEMJE5 kSQEMkSQEMkSQEM~SQSQv -K0e  0lSa~7~~a1]YaҰOaz10;T0;T0;젾T3+/c*jd*jd*aJe LE LE LE ۧxAAT0:T0:T԰}a0*1*0}Kff 15aLE G>05a0C}YT0~05lTÔ柧TÔyyyTO Gs2?L3?5lga|eO*?O0j>O4I>O_>vS_>nd~*pF ܇Qa@ԁOZMa@ԁJZM&SV}-j2?XT~*pv ,j*?XT~@O5 ܇T~*T`Q(zʄ 1y+U>+U`B W~x g2y臗ɫ}*/W*yh5 L^ZM&&Wd*Зm@2M5~}ئ c*O6Tor*nSf@6Ul56U`QSuEGmVm@6U Aܦ lSZMT~*@O}?-;neɖh$jS*s6OT`OOTo5OVT w }M OZMTV~*j~7hO=Ac~*j2?h5zSEST~*T`QS*?XT~*T`QAc~*ޠ1?XT~*ޠ1?XT~O5 ,j*?u SV@Td~*j2?mTYt*{nS=Pm@n/M F6U/M6U ݦ Tkt*nS5զ 䮷TVm@6UdjSZM&TVm@nMh5٦ lSZM&ST~*۪Or@]9??;?T ޕSܙq~*;Or[*?XT~*T`QSSܙq~*T`QSSV@ԁOOOT wN ST~*;Or@@@OT`QSSSEM}:]?P>wX&SV@e@Td~*j2?h5 _ O=P@e@T`Q|2fpSSE qCO5ĝ ,j*?X,w~*Ɲ ,jό;?X,w~*T ԁOZMxTd~*w~*j2?h5 OZMUTd~*Y>ST`Q|;?X,-w~*[EO5 ,j*?XlAO5cSEM1SET`QSt~* ma$vG 9Lav=v 8Lav3b ۽'a?{azf1M5M5cajS zf1l5GavPmavPmavP1WŇM5M5Mu T T TatmatmF٦F٦ϜavPmavPm11ئ60^Nxئ6հ}iTaM5wx/`0SjTM5yyyT><5̞{ZV>@5}l1p u-cb>O=P }l1p[ 'j 'j}6j@5&Z^dCjZM6&ZV }b/jXTC+p ,jXTCjh5 ,jO=P ZEM5MwQݮ}"(Wݮ@_v@_v}Uz**vja]Zov=Pݮ@nWd+jh5 V[ܨp`+?'[T!K@n9- z[ܷp`+K@/AՁ T`+~C`QSf{u``+~Cd`+j2&[Veixַ}Y f}+X.[b} ׷V}+!ַ}b}+{jorG@Vd}+j@շ[V[@V 78\ o5U ,jwoo5Uz[EMշo=P[EMշ[E[T}+V`QSY oZMַ&[V@n h5b5 n]tvrKݮ@nj@ukȝMwnW w ]t+nW w vZMv&]T+jh5 vrݮ@nWd+jh5_r@_r3C_/>' 䖵dt,zO>Y`r_}@@Oi =P}dEM5' dEM5' ZMd,{>Y w' dT,;r{}@O? O>Y wN' ,jOXT,v,5OX`~d,jOȿOh5' ZM&dۛdV}OȿOh5' ,j:dEp,{s,Y*5K(}fiOXT,Y5'{`OX,%>Y`QS}@XOv dV}@XO@&dܱp,jOh5' ZM>Yd,Y5KZ}dEp,i5' ,jOXlA5cdEM1}dE>Y`QS}t,'~~/4lhϰ]3lwgخΰݜ{3lfnͰ]a+az<3lf}ѣɰFvLA= z f8re젞EA= [9dTlTe0;a0;cldld0[AˆAˆAˆR e e)22^62^6l;L--bTeԳ0:Ȱ0^#͆1l6lkİ0^#͆c`x6S5Rl '>a }ѳa^=;Lѳa^=0Fφq=0z6l?0Fφgø=;La\ G!AFφAFSxh,  uѳavP=f=f=f=f=3݇%҆ci)6.16.16.prcdm=cdm=cd0Eֆ3FֆCxcdmddmddmddm><~26~2v"kcʇć퓠x~}4!}-p}jph>$/CU ܇D!}H4p #\>$ˁ|䁬Z&kpV5@\>h5Y{ :V5@\d .p 5U #5U{jpEMT .\`QS5jpEM!濦濢& tWM`.&@|m tTc(&@hX;@]> tT.j@h5Y ,ZMvg\[vwI;rܿtG.Gz!#@u\;r^HrCfdz#@u*5U" ,j{<;rEMH&;rV@#h5ّ b#͎\/6;r=PrՑ ,[@n #X.:r#;rܦtG.DZMv&;rV#h5Ց ZMvg\dG.\`Q{cG.Ց ,j#XTG5Ց ,j#Xlؑ ,jwo5Ց ,jwo5Ց{:rEMuȎ\dG.j#h5ّ r@Ɏ\<s.r'y*O`< 審<] " 䮰t[p.;roytQp.j2Oh5 =Py@<]d.j2Oȑ&tVy@<݁oooȺ]]/bjr@ {>5־ {yDq//rԽ@M {yv@;^^ wN ~{y7q//^ ,jXT//^^`QS@XT//^^`QS@^^d/ {yv//p=P@gm8^^ 6{yp//v//듽bBEM5,j*@ E5ZM7F&{yV@^^d//ct//j@7F&{yEp//Yo5K½fiSX,y 5K½zy, %U^^`Q* ,jȝd//jȝ^^d//;3ZM&{yV@^^ wf 5K½fYҳp//YrнzyEM1f{ b//^^`Q=XT// ,j<^^S{٦6^ͭa{eY/z6qmRlseR/یz2 /܆.}^h6 ܣW> /}DBۇᅶa 9Va &†ZM ]x!.څz]x څrX Yv^+h cv^+h^赂vᅜ_P¢f] /,j {¢f]xaQs\h5.j]x!*.j]x¬^=Кzx5ABkzxad^X4٫rO &{BP=w0T/^T/,j }5a^=W/,jÁ^h5Q=jzxDBPBV 95g.š^滰8.,72߅š|}M:P兜yPBN=(T*/Br\=Ty/bBr9(T*/rPB.*/B5P兜QBPV &B{BPV &BrC &BZM*/~y[@ƨbW,/ 9WBnbyW6*n/o*qy!Ǯ#%J\^hJ\^)%./`H\^hЂ!qy!핸;J\^i'%./,j=qyaQsO\^X5E=qy!wN'./,j {BVˁ{BXzP兜OPB*T*/|BrU PܱPB2*/8BrU {BX(TyaQs/\X -{¢^hZ.U^h5VBPV &BZM*/䟅j"T*/䟅j"TyaQ^Yz/ U^? +TyaQD_Y/ U^X,*/,j PE=TX0 U^X,*/,jB5PܱPr BZM*/fB{BPLSBPV &BZM*/fBZM*/,jLB5K)FҊQ¢f(Ty!*/,j 1 1 {¢f{ B¢*P兮!Tyn Urrخװݮa\vSxinְ]aWvSxi.հݩaRv/ÇeخӰݦaLWa]0= zffff2Y0*A= zLC0;P0;0;ǓavP'iavPO&P0:P0:Pa n U U UۧA*A*atataazff:L젂[G9 *Sr UPa UP0*q=0T9끡a0*q=0T9lPa z`rdrdr>92T92Tybj`ydrTLm, UP0;P0;P0;P0:0zP0zP}2x=c0*3*3*3*}2pyX'3p9ߧø<q=0p9kƵ0.q0p9lƵ0;X \~ϗ}}Hl{aPs}aProePs}aPs>=09{vrpp>-,?+pp`9U ܇W>pp>Q@U:\ Y s3jh5Y tVh5Y tZMV:@Jg`QS}6Jg`QSXT3Jg>Q@U:t5U ,jXT3p[ ,jXT3p[ ,jOSSd4p Ynx >e@2hgJxAm<ˠ hY ,r e@2hd4j2XJxZj&<-5Hw3ۘNxZj&<g 2 LxZj&<9g ՝ κEMJ95 RNV @Ʉg 7q Lx>P@XhX tV:m+T)Jg`1A@n`+I+V@neh5Y tZMV:Jgd3jh5U Φ+V[*E֎*T3Jg`QSXT3Jg`Qc3ڱXT3ڱXTt5U ,jy +V@Jgd3jȉW:&+V@Jg<[j}M5UX ,ˡep4,rho ,ˡep4,<t4ߐk\# 䔊k;p􁪑,k5@Hi ׊k\+rh5@NF)H&kV5@U# ZMH&kq4jFh5Y# r5F_kq4 r5@lH9i n* @Sq41UbJ_Tic41Uh* Su4اTi`QS*UXT4Ti`QSRu4Ti`QSRV@i wf?>>e43r5FșH}i \# Όk^Hr5@nFXT4;35 ,j*@H5 ZMHoH&kV5@id4v4jF@HoH&kERq4Y>߮5K5fFX,}H5K5j,Hi`Q~\# ,jFȝHd4jFMCHid4ZMH&kV5@i 7 ]# 5K 5fFr4Y@r5jEMH15f{ b4i`Q=FXT4X# ,jF:i}F9U:lwoخްݼax)5lnnݰ]as)5l7n.ܰݷan`3lwmخڰݴa0{GavP4RReA=T0;avP0;azf*f\f2e>R6Ø*F*F*=Lɵatatata<0:T0:TaJ LL 0;0;az2f\fT2Ø*kTaSx*kT}mSaFL}a0'*=LaO0U:ߋR)U:ߋRü*T0Jtø*T}nS)7끩atataT0:T0:Ta uavPYavn2U:*wRRRR)~2q:~2q:~2q:l7F?8=Laaaa0yfBף~gg6uvfS+̦l0a\̦c60eSq1:a\̦gՇl0;l|ar=cnu>~rQaata>0z}0}=cnu=cnu>>(Baδсq5pǙR~-pBu5pGLk W3~>j| 3\δZMfZ&3V}8j2@eZ&3V@Lk>jh5i ,j*5i ,j*@eZʴ5i GLk`QS2EMeZʴEMeZʴEMeZLk`QSOSk>`o w z0 Za5kx Za5k@8h5x ȞjbO5r=@;ĞS 䆫{v=@;Ğj OS {wO5з7Tn5=[V=@ɞj wS >oф@:hM[ &ZV cT*hj`D@n:;c1h5[ r3ֱ@jdl5j2@V&cV@j f[ ߊ5[ ,j}5[}bEMV>PbEMV}cEbEbTl5j`QS[ ZMV&cV@δ8h5[ ZMV&c`ql5j~ͯX,j~55_MͿ8h`QSlA ,j3XlAl5 ,j 6y5cE&o`QS h5cS9 o ' ̋ST7fYNr :@~ t7VNrϩ@b9h5 LZM~od7j2h5 \SV@s5 ry@{˼pe^8 ry@{˼pe^82/ȡ9Hpe^8,j*/XT^8p`QSy@8/XT^8p`QSy@ɼpV^8{NS2/ȉ9MDp^8[ r/y@/$9,p ),j*/ȽHj5zEMujZM&q^8j2/h5 ZMpd^ ry@ɼp`QT,jq^8Yj\ 5Kyfir9/XT^8Y\ 5~`s9/X,}.%p`QSy@E:/| Vy@E:/@&܋t^8j2/h5 ZMpd^8Y] 5KyEr^8{ 5,j*/XlA 5cEM1yEļp`QSyt^8йMd^8pW煇 7l0 ۭK7lw0 ۅ6lm}l U6lyfH3fT^xT^xԣ0;Ôf3f~;LO02젞]A= ǁA=| ‡)82/<2/<2/Latyatyaaz<fIfI_b^xxO 5b^x>:׈yaO0/Lavn2/<*9M慇A#S^xT^xT^xT^xd^xd^xd^xd^x><~2/|'''ayaN~k? B3u<ׁaLu`xSx:0 :>La0u<^0;0;0;U_iaiaaJ+Lk8kC55yavPḱ0y>?Rd܇{u9Y'a@.p'a}I@.u'&́VI@$s>h5d~́VI@$sd9pL25ddzJ25d~́EM%J2T9$s`QSÍEM%́EM%T9$s`QSI@E9㼁Qs,?J{o˜sWc΁^e9?P@2Ɯs>1@2Ɯs䨐c΁V1@r \]ˁج.?P@Cr/6ˁqu93.r*@߇X]}fXT}/jh5Y]ˁV񒱺@Ur5au9КhMX]&.?PˁEU3r`.rdu9j=jW&ˁV@U].ZMV淪ˁܲvu9j~XTu9r`QS@PXTu9U],jXTu9XlX],jXlX],j@U.5U]>@rdu9jh5Y]䨏ˁV@rdu9jW&Tu9=.5U],jXTu9r K\],j*XT5X],j*XlA.5| ,j V1h5Y]?J~ˁqu.r@ø@Uk%r Tu9K@.AW9r c\]䌖ˁV@rdu.ZMV&ˁV@lh5Y]lknw0v;Ft5a#:c}nD?P@kFt g܈䠏сܲv#:w06&сV@h5,K-5ҁ@a8 b-5ҁ@K̀t GҁT@:t`QSҁEM9t`QSҁEM&ҁV[@Mf9 t ;~2Ёq:@r@n: t ̜x3Ёܷw:@5? ,j@e5? @ZMf td:j2h5@r@ 3ЁVf9X,4g t`QTӜ,jp3ЁENs: t`QԜ,j*Qs:Y:j@5KJ2Ёu@f&3Ёu@ZMf9 td:j2h5@r@ t`Qt֜,jԚ3,5g t 7G,j*XT:,j f@5c3ЁEMe12 t@: t7lWonް]Ô5k7ln.ݰݹÔ57ln۰]akaz6lWmnڰy=#0;avPavPavP2dSzS0;ga0= @Yzvf2lOfa@@@0: 0: 0: }Vndzd0,AfAfÛdTrT0= Yz*fLa@5b~aF@5bz>7׈aX0^#f> }t@ 0'*+z}*=y}*=kEa\+LDZa"zq0=ka0&Svs   atatÔfk*젲`yMe"zTv0%A%A%A%07k559aiÔFFF`fZzfZ0A~K)at_y}0:x>*'FF>3=2=2} ڇ)=*=wf+jYavPYa^>L9a\qz`{1渇q=0=la}søasøfS{T1Ü9}lTw>߹S݁;_R݁~>B@yp;pDI>B S݁ 8h5LuZM@TLuZM&S݁}d;Tw>BXT;T,j*XT;pR݁EMJu5,j*)5,j*)5~R݁EMJuZMZ3_#߁v |_B{Ww`%( d;듑d;듑@9h5Բ}w*_x& >p<&@9xM`<&ȁ,9x b\M@ߤX,j EM & V@^h5Y&,r@kx5a<КhMXwE&*r@X@xd<#.ZM& Vh5Y,Zo9xV<x`QS q<x`QSXT<x`Qc<XT<XT*5U,j~ V@xd优q<3c΃ZM&Vy*h5̃ZM9BcEMU=P1*~EMU&cV1@n9fh53ZM&cLs_BeF(m/5BexP:(l}(K/}a{e>갽t2{e^+{ e\+(kmS/ZAe\+(l| J˸VP:Detm/(t>l/T_f`yMEevp/K/{evp/,(^t2z˶Yk·k_FQ:Feg$^FQ:F|^:K/A_FQ:De J/(+(t2:t2: Jþ`y}Gevp/?:2 (lW҇}a/l~ />{1@!e\G(lİ~qs/:B!ev@!evpou /{etdyVpۇ˕RZCb)~!/n"WLi \ i yPZ-nVj"~DZBVi &{ZBVi &nZM/,j~55E=~aQsOi {Z¢Vp5E=~aQsO_XnS5E=~65}aQsOi {Z¢Vj"~DZB ~C_X~C-О4Ћ=i|/z> x]Ћ 9v*_햊z]Wk/_諂Pw59PwB6 9QwB稻~oR_^_Xl7)t/_h5] i mrAi m/ H_hVܛń=~!'V7)/M i &ZM/̃ZM/H_h5VZM/H_h5PZB/,ji {Z¢V9J_X5zV/,ji 5[;/,ji 5zV/,jiH_h5Vj"~DZBSZBVi &ZM/ZM{Z¢VE=~aQsO_/,j {¢f{ BZ¢nV/,j /,j /,jH_h5Vj"~!TZB(/trGi=L|/Ͳ>VSi=~!קr}*~!͔Vc>J_A/H_h5Vj"W/H_h5Vj"~!gϔVj9yvv{CwB;[TwBN~BwB;{/<{#r% /BBBN*~!ǯsʹ_^ȹ_E=/,j-j"/DB//\BrjD! ! s5 _ȩ/r@B/E=aQs_Xl/W_XC5|/,j! &BZ=~!n?[>pcsPʹ_/rroY9 -5rrJO9 9{PB-+~aQs^X;{¢1w /ȹ_h5sxʹ_h5sj"~DBSB{sxʹ_h5s *~a?όx`t>1멲m13:d>j2fh5ZM3ZM}d>j2fh5?h5,j~55,j*fXT5,j*f၊5,jXT>}>3XT>}>3XT5,j*fh5Zh_j2fc70fX~CDP{Ø}/c^Ș}/c^gWRUa0ff$h􁾞LrTI@9I1!'9($}f>R3Ih9q@&&;qJ&9,$}M`>&0Ih L?P-b%9;$}oRL&$}d>j2I) '&VI@$LZM&&p>j2IXT>$}`QSI@8IXT>$,j*IXT>1IXlo혤,j*IXlo혤,j*I@%J5?I@$}d>j2Ih5VI@$}d>j2IQ0'&jj>5,j*I@%J5TEM%J^5cEM%1IׁE$}`Q=1IXT@&&VI@U^򁜿qX*/,a@2| W\er a@8,ȁ&Va@ɰ ZM&;vmA lw0 !t;>>nZ EvӖ 2I-Tq>п[&e>}NrI@8Ie$}=^L5EM%Lr:I59$} cr`>ˁI@/&9$} g?r`>ˁI*IXT>$}`Q 2IXT>$,j*IXT>j2I෪󁜎9peu>3Kr@θ:cW-su>3.r@n:XTn1U,j*XTn1j:h5YV@|du>j:=:W&Tu>{tZMV|`Q]ERStu>Y5KSEVtu>K\f+:X,EWrYr:h5YV@|du>j:=:W&E_tu>Y?D],j>vu>|`QSf{ bu>X,j:XlA5U,j V%:`2N&:hr$OϽaWa+05V`ؾ `ؾScuؾ+`ؾ4'av/ ?LX젞AAA=[ z:LavPUH5W?;ajԣ԰;9ثF٫F٫?Latatatax0:^0:^ajۇ0;F0;FazTff4!i#҇e?ז-,ڲe?ז-aԲk˖0^[ڲe?l [e?AgP0gP~5kqe?-a80#qe?luĖaj:b~d~d~>=e?e`yf~Tv,lj0;0;0;}׎oubW?γW?lF٫F٫x^0:^0:^0:^<{`yg~T~,ۇAA+^0:^0:^a_AA+嵟at/g\'[{a~/l–ajd~&[Vޣe?k-a'R+Qزf{AUЇ-avPԲfU?f>F_?z @}@jO??y`Y> pI OBIO|@II'[p O< j$@ɓ&O?bh5yu@IV'ZM$j$N?bXIEM@XIEM: lu`QSf{IEMe՞'5u@`QS'd@>%R*;З'$@M$@_2KƓ%S*@Vtk \0r?ЂhX`r,U@ϸ r m@_O}=lz@E%l7 9m@jlZM9~d[?jh5Vm@ɶ~d[?CnZMj5,jȑ#j5EMj54 EM EM~`QSmh5lZM&Vs[?jh5lZM9~d[jrvmEMj5EMU~5cEMU1m߁EĶ~`Q=XT@&Vm@B~ ! _V\ \+.r139B~ 'r\Zq!?jh5Y܁V@B~d!?g+@v@Nu9l7]t> Jol 5&OtOx`@&OxN<} j@9&O,jN,jN,j@N_QrԹ^H< z!܂@܂@܂@/$[s s : s -,j܂f{s -,j܂yn~o599s< :> > >> > jh@Nh@h@h@hEM59 jr5 < jh@nqh@ɣ &&< jh@nqh@ɣ   &&,j`&,jf&&,jp&,jv&,j|&,jhfihh&x`hhfIihfih&&8GZMMwM@Mh5y4A w}4AVGZMMh5y4A 8}4AE͒ER,MMX,MMwMX,MMwMģ 1G5u4A`Q=h&,j MX ]nch@ױy4A< ЅlM?M0l0}}%d} ۗ} aJۗ} a5l]aak0; AM0jԓa:`c0;a-0=R :`Tx0;GawsԇhatG <0%AM0hatG ۧCAM0hÔ<Fy40:ȣ 3yiTxT0=* JzLf4ixmy4}v-&kˣ sxmy4}}-&kˣ xhahaha'>LG Ϡ gPx&uģ søx40#M0oz\G<0則qhatG <`>>hatG<0;X^y40;<0;X^y40;?l'4G G G# ӑøy0ii0la 45# s9`yo# A܇ކG *# Ae܇AE܇s0@~7> !ߏCxȁ\> !pJ WC8(?c9?|Bq@.=cm$ j@C& <$ d>$ jꐀ@N ,jꐀ ,jꐀ@EM15>,j XT<xH@`Q=@C& Nr>i@(qj'Uq>;Nr~޹ mq>1 4|bJri@;4|`qHm@N9 XT>4|`QSmڬVi@4| w`1 h5L?Pi@4|d>j2 mU&EMJ5EMJriEM2 @94|^HLz!1 o4|^HL?Pi@/$J5J5EMdomOp= 2{ U]}rt@a{ Gg\,j*{@5= ZMw{d=jh5Yr@UޕVfiXjj.,eB%N{`QSf)XT=Y25U`IX,B%X{`QS@n~ V@n@&Vu=jh5YZMw{d=Yj5KER5t=۪5K@n{`Q=XT=X,jXlA5U@f=a f= b!f8l_ Wa} W}*} W} i8LOZv_A=a zfduA=T ojT}TUv0;Gawsԇ>>~ atatTFYFYav8aazTfUf4!i#0;c~;a,ug~>x<םea0^w ?l8뮺a*㽇ea ??갇 ??3;ea\G,ǀq ?ea:b0aq ? ? ?lIFYFY?Luav ?;pAaS~T~T~_~qwy`~g~>@?γ ?γ ?lFY?Leateateatea0;z0;X^YfUfk? )ateatek? ? ? `yg20;X^YF~:{]wxXu`Q~ a,0 X{V(?Eav'bQ~Tz,Xf>LEavPEavP=av'O?;ݩON՟ bs>pM כ;\q s> X?rp>pTO o.zp}@} ^+,}d>jph5Y, ,?P@}d>jp'jpXT>pO`QT*5U,jp*5r,j*Xl0X,j*Xl0X,j*XT*5U,jph5Y,ZM& VpR3鯩} '\,?܁T3#_)x9 ?} T>W@U􍑅@Yvcd=sdrq@;ĸ{b=drqjȡ29^{ #wq@9h5wrDq@ɸ{d=j2@&Vq@ɸ{ '&w5w,j*XT=;5w,j*@5w,j*XT={`QSqEM5w}̥?P@_l9K\z/6s遾̥b3\z K}**}b.=\z`QSrpq.=\z`QSf{`.=\z`Q=X0XT.=,K,j \zd.=j2ru@N@{:y 'A\'?@g8@@9y  ܐu<К0@@!y5a<#CrhEM*5U ZM!@yd ZM&V@z9@h5 ,j*@XT< ,j*@XT<5 ,j Yy  wy^H r@n:@yT< @/$EME2@~@1b7Cx w`lrx@/1ȍg7x ,j*@55 lZM6xdc0;avPR0;cc|dc|dc0IA6ƇA6ƇA6Ƈr  ㇩O:1>1>l%LJ0 j0 Ϡ0#LJq?>lu0#LJø?L a\Gۇ·ALJASt,javP ffF廼|/60ɇø&6}~䇩M>ma\lz`|T|><lj} atmatmɇAɇAS|d|d0ɇAɇmɇmatmaS|>i=ׁma:M>ׁma0^S|l㽀mag+&{`yO60;XM>I<Dl60;0;0;X(Ca[_T!39R 䤙ぜ+r}r@>ȱ-;s}h5Y䀄V@xd}ZM&V@K>h5Y,j>XT}XT}5U,j>XT}s&<Ђ1#΄3|BLx 9#΄5ǻPxEM&3V@>p&fIO 2Ȅl;1:L2NN 2Ȅl;51:L=1ޗ K=!1}{*t؞=!1}{*c\cHk m?5ǸƐv>c\cHSCBct A$?B?FDB|؞ ,HZS =:lO =!1:ge~-wΟ=1̏q=)9\Ǹێ a{Bc\H q= !1;'?e,$?f` !1: Hl;1: ʾ A$?F'?FDB|؞e_/@Bcv < CzcۋǶCzc;Czc~Hl{c~H=Ƕ7re/Bzcv쥐e/?f^,{)/;uq~h?ȏM]-'.%۟o? WrK&Ѓ\]J^4C@A(?:ԃ^(?^(?'{AOD| ZM(?h5Q4jh"ՃVEhDAVE(?X܋n/R=X܋5yE{^4p{*p/?X35EͶA~v5(?X35ۮE{^4jhDAVE&ZMWDդ};WFA_lȝ?苍y;!}yX{An:j"D(AS(AR(=pO3>ȳv ?ȓU iyNyJyC&BZM?h5J@Jj"D(APzJj"D(AP<PV{(J%,j{(=p?XC5PE=`Qs?XC5DE=`QsDUEAJz^QGUQR " /`Qs?X+5E=cDEA|8z^Qj DEAV&* *ZMT,j{E^Q'P?X+5z^QW,j{E柢&DAw   z DA/AD__,j 5@끿lϖ?ϖ?П='<%CH?cJ?cJ?gJ?xJ?cJ?cJ{[|j"DbA>%QbAV&ZM$<%HO&5KO|$ETbfi*`QdX',j5z`*`Q$XYJ?XiXj" *'H?ȧJ?h5Xj"DbA|$V%ER Tb=*`QXOkXYJ?ȧJAH?Xl?X',jAH?X5 $,j XЅ_$t]EbAw~XХ_&ˉaqؾ߾Ssؾ`ؾ 0%*>0;_V)>g0;U[aI5*>*:0;R0;Q?atatÔYFXFXFX&FXFX?Latataٺ{iTfuTf0TfYf3iԏavP?A@fA&+3&և &և &և- &և &և &ևg &Sb}KL}ÔXO)ÔXOaR501&և癇q1>kaQa\cL01&ևA&ևA&ևgA&ևA&SJu,Lp~,LJ0;0;0;X(5zXc}>fۇc0Fq)c5l01fۇgq1~øƘm5l0;l0;l0;X̶ʶel0:l0:l0:l0:l0:lael0:l0:laʶ̶̶l0;l0;X̶V̶el0:xCF_~affozL0L%aaa{ 3)?,fwc&wc&~,{7f+{7fwc&~TTy,{7fAS&~TLyTJy,{7f8p$@^ wMy=q|BeZyQ_ @_ ,j*z LOe@B,b>pQ,|B>2}/^,/zL^,zL Ve@2}d>jLh5YPe@2}d>jLh5Y,jLXT>2Y`QmyX,j}EM*B5ۖeЁEͶa>"t`QmyX,j-EMd>jLh5Y,ZM&Lh5ϡbjrN$?~Yc ?ЂEhX`_~c ?Ђj5@X,j{kEM%&k65YHy&5 Hy5 Hy5@q ?jh5YZMyb5@~d ?jjV5@~d ?wh5Y,jXT ?~ XT ?5U,jXT ?~`QS5 L􁖚e@K2}f>'\,_2} qLhY,Zjye@q>2}`QSefۥLXT|BU&mJ|`ѤY, +<_dz迓w{<+^z9*^+?Е@ީ]!v|>?C&y@^p|>OVP@>t|>k@or|>|`QSEM5\jZM&|ZM@|d|>j2>h5C&&EM529>XT|>5,j*>XT|>|`QOSOSߢ&P@B |Bl}/^_l}/^z2[ ٲEMeml}`QSY9952*aքe@Ap>G\s^0e@Ap>G\PeEM%&Ve@>p>jLh5Y,ZM0e@2*aVef%LX,DHef&LX,D2}`QSefI(LXT’Qt>YJ.5KLe|Z2,ZMe U,iVe@2}d>jLp.ZM2}`Q]],j|B2}`Qd]b/l?X,jA,5U,jA,5U,jA,5U.:2},:2},:\g?l?gؾP1ߢ,} Ȱ} Waʢǰ}y Wǰ}q wgذ}e ư}] 7Uq~ 5*[?*[?0;])[?70;_\a5*[?*];0;Y0;Y?atatÔFFF?jd~d~d~d~~0ff=LAkAf javPAk#xݙ4t0^wG;3M?םiaaL40{{)M?r)M?);Vq1M?l?D=kia\cLOc0bq1M?2M?2M?l??2M?2Mr`~g~T.v,L40;40;40;X{\?&+L¤0~.L,0?ø6Ƶɤ́a\L0M&AeQA%A%^Ga~Tu,{&A&A&A&gA&A&+{&A&A&S~d~d0%A%GaGa~,{&)[?l?i>N0[?lG0ė f {f'a/1[?`ٟ1[Xٟ1[?젊`ٟ1[?*|J:`ٟ1[?*ÜC er>I=@^+'yP}@^0'%SB]yp>pE*7'/T>kWlÿP1@e ?П-cgl_? g ?˞1@/{/T ?˞1b2h5ZM&c+oV1 ZM&c+oV1b+oEM_ t`Qm,j*}bEM B5f1ЁEͶYb ? t`Qm,jcEMd ?j2h5ZM&c*h5'JV -}@{~ec?p.޲hoؿP@{~ecB5-fXTsB5&| &{d=jh5dlYVMEM5j_&{`QSM|&{`QSMEM5j5d,jXoSߢ&kϖ5@>t=8]sg˚{?[/T=˞5@/{Ysw5jEͶYb=_{ ((X?_{ ̸hMXsE5@>4t=Of\sn5 Us,j*cXT0jh5Ys{d=jh5YsɌkV5 Us&kE͒t=YJɌkE_Mt={`QD]s,jX,eA_Xꂮ5K`5fi :h]C̮鞳*h]=gv@;zbv=O]f):X,@g/,@gz <;X,@g f EMe EMe EMe2/끎2/2/gׇ92l%?dؾFKdؾBSsؾ>cؾ:cؾ8L+cؾ0bؾKiavPavPavPA>:LavP?A4ߟavPavPYavP?AeׇwavP?~ ):2>2>2>l?x52>2~ 0;,0;,a5*:70;_P~? 4*:c}~nt}a}}a}~dt}a}~^t}a}}a}~y}a0ڇڇ\aʵߧaʵߧ\0}J=a\c̵Ofc}sCøƘk?L)a\c̵̵̵FkFk?L)av kfBf˾avPÔkfkfkf˾O0~̼0~̼aʈS7->ifއqM3>la43)>*#:*#:*>*>l?J; 32`0>2>2>2>l?=2>2~X0>2>2~2 3 3)>*> 3 3`0ØyF'y?LaataÔy>F'yq2N0>N0>ld(3fއ0;Xg̼Vg̼eae0;Xg̼j e0;aʼj*e0;x0:_^W@^|y_{ /T58HP=@ޏ - ,jh5_cP@e<>П-nlv g<>˞m@/{/T<>˞2h5ZM&vV ZM&vVvEMmx|`Qm,jt|EM*5fJǁEͶYb<>q`Qm,jEMd<>j2h5ZM&*h5ZssڝM ۝M@/6ؤ_8 r`>ˁM դr`>ˁM դr`>w`Qݩ٤,jXlwjyte@`uYBye@JtYBye@MtY>\},>ye@ɲ| P,h5Y,ZM/TY>j,h5Y,DVeEM*|EM*_|`QSeEMd=&0hr \C6! r0mC6!@ߤrI1~ByK_[H^2V菏@|+`wBy}@>`w>݇6>݇P}@>is>}@q>>|`QS}EM%OJZM&|>ZM}@>|d>jh5ه &EM5ՇP}EM8}EM5Շ,jXT>>|`QS} kj_QM դd>ONݤS7}=٤dB5}=٤,SM@jؤC}7MEͶAc>jQ<>_?*ȇ/z2G|.x| 8ȧҎ*XT1cd<>j2ȇ/ZM&V@x| 8h5P@>|q<>j2X,1Dx| :X,QDx|`Q,j*X,}D_XNy*Oi@>/s=П^Lz24z :4z/^L5F$F,j|4z`QF\/lqm74z`QSifۍ3XT=vL5FN:4zL:4zL:4zSi_wӇU4l/;aؾgؾ|S{sؾxkgؾt+gؾ8L?fؾjfؾ/#aOavPavPavP?A48LavP A*߼oavPavP}avPavPa-{T7>>~ L 釩:>>l?[w~i Y~` y50;V~V lOiKlKlhKlOgKlKl]}tMTFTfԆ{Izz>>돽aa\co}~׈ aaFl5b05B50 6܇>aO~`/>:       >>~`/~X/>;xi?LaaND0^wvڇo~ ug};;>Na㽇avi?ifaviffavPifffavPatН iw=ߝ@^~wWAi;t  Rw/Tz4#wyKrBuJ5= qψ{?xF3G}O`=p=,F/T=FȈ{d=j2h5qoZMF/T=j2h5qoZMFo5q,j#EͶb=45,j* Xl;)FJ5NEͶb=vR5>@Ɉ{d=j2h5q_{d=j2h5q'q'evg=׶{<^H̿z!1ʿz!1ʿz!1XT;̿5,j{, Ӷ. #. <{c=Ђ÷.ؠ <{d=jh5YxP@{d=j3\.ZM*5Ux,j.5Ux,j*~ E,z?>fyY@|̢_,z [9,zW6^̢쟳'd~~|̢򠆳|d,z?>f1CC΢ؐ|x,zW6^̢?Y@,z t=j:H5ER*_:RM3gN_{ Q8gNS|^{ N_{ 9{S<{`QS)REM*}5U LZM) bLySV)@{d=j2ȇNZMJ5b,j*~REMJ_{`QS)REMJ5b,j*XT=J5b,j_Eh5q F/T=&0hqP@{M`=жv&ň{oR_{`Qm~CԿT>K WSR<^ڝQ>QBeʙ@_lf}wgpԙ@>r>\l5ݙ wg kZMf&3|}d>j2sh5 3V Qg&3EҮt>Y3EҰt>Y25Kҙ2E͒t8Bd8>Or`8>ˁ@/nr`8>ˁ@>v84t8>YaE6t8>o,j-EMmp|`QSf2XT8:031n3 1nap_Ua2l_b?cW?bmчkؾS}tؾ kؾjؾ9L;a/aoC0ɇAȇAUAUAŇASU~6|TU~~g?LUavPUavPavPUavPUa|TU*?*?** ` ;*?*?l?x~ _`*~} 50;]*~s XF_X?XF_X`F_XF_X]F_X?Laaag?J?:;øXr0?'ȇqJøXFYFYFYFY?L%avXfUBf˞ava8LavPavPavXa,r0^#?׈aaF,5b90UK?}aO?la,r0;j0;j0;r0;rt0;XK,er0:r0:r0:r80:r0:raer0:r0:ra*,,r0;r0;XK,VK,er0^wS9~~|;WS9~~ |;?םa;*xa90r0;X`,V`,eraer0;X`,jer0;La*j*er0;80:_ev4 ՜4nR7)Kt@*R 7;KUEMR/Ts>Rj`QSW}UPA@_}UUa>p+WArU7a0H*HEMd>j2Hh5 ZMw&*Hh5 ZMw&EMw 56Aff1HXw.T>Xq`QSff1HXT8ٶY 5+,jmEͶb>X ZM&VA@ }dB&VA@ }d>jӾ:Ή=~C`}?vy:ݝ չ d>득 չd>득 չ,j*CXT<پ:ع,j*CXl_ZM/T>G|M/T>|Myv)@;Ĕ}b>'~9Fy)@ɔ}d>j2eRV)@Z z2`ɀy :`?z2`~u<3< yw*C3(ΐI3^̐z}2Cȳ(ΐ@g3T@ y t_qB&|ydCLڦ灾1}~EͶ)վ:~?[F/Td;П-lYTٲeϢz=|w/E_lzQ&+v@;P{ 9Cv@;P !ء@P{c=j ZM&CV@>s=j2~B|P{d=Yr5Kҡ@>v=Y555j,j8C2}h>`L_X:{NY >cr-@ y ;}~a 9}X,=f 9}9N_vL5ێEͶcd,j*}>t"@Gr>t&@r>t*@>t=<Gc_m}l?cm}l[yްm}l[uǶ5m2ll[pǶmm ۷cvp]}1;wч]1;w?]1;7N?f.Evܻ藡1:. h 78g{cvpi1;0}?>fcvp91:fʾ%4?]B3ct mm?FL7?FL]B3cvio7?}ov4?zC3c\hl;1?4Ӈ Џq1:f 1:f1;Xh Џ@3cvŰ1;7?ff`O~fxL5B3c!1^#4ӇΏ?{cO{}LJx:1;:?ffLv cvL{ef h h+%4?FLD3}LD3cta{3cvpo~ At?k.xmExmExmE-//ۻ]B}Xg1;XY+,t?f> ]=Y1;XYa{cvpO~ڏBcvpo~w^ܻRu6?HqEp{A.G]= RA*.z|w2u z;.E=`Qsv>h5._J*?뉢z^TE}=QTEDQA_Oՠ׃ՠ,jE{Q} ZM(?h5QTj؃VEDQAVE(?X܋n/=X܋5 EmE_cmE=`QmPTgq,j 5,EͶACQf۠`QsDQAVE&ZM(E&ZM(?h5QTjyX !ؾW[Пrлr끿}Xx[Ћ=/rz",j{*f^An~پW[jSQTК5AQAVQATQA,SQAkEyVXEm|t|Zjv<~v-5 ?v<v|FvC<$,iz}'L TAvTSRMjw?H5~jH5T@y ٢?[tllEП-A]E]N]N]Ձ.Eͽi`Q}uޛց54EEyEyEB=p?hE ੋ ੋDA.V]&ٲZMt.|.V]%.ERTA>[VfB`Q!EjQ/6健yF.j:Q>Q>Q>Q>lF0;F0;0;F0;Ff?Q(F(F(?LMatatataatatF(F(;LцA5EA5EϳavPMavP?͆A0flԏavPMavP?ȆAfc쇱_>_~X_>_>l?9_>_>l?l<_~y_>z_>l?^=a\c|~z凩9atata z`k_>9凩_>_>_>?kae|-xm=Rk0^[vSsm~g|/tB=avP==fff=f=f>avP=av=F=F=F=F=F=?=F=F=?Latat=f=f>>ata 0^[vχo 0^[vχ/ e|-aav=?=f>>av=ff>avPM=fff>avP%at@ZxKt ULn.R8K@zb rR=)*XS1=bz`QSEM9&K6,_bz'遾,zdbz'[ _հ~EM*bzd1=jh5YL,&ZM/T1=jh5YL,ZM*5ʶEͶAc1=텿mbz`QSef۠XT66h,5U ,j EͶAc1=,ZM&V@bzd1B&V@bzd1=jh5i_WWM3!@w=Gb{`\C쁾'0{C*{C쁾'0~BEM*l5CEM!@{վ633mi G7y @1s<@ Yc7/Tt=}0XT;پt:~p %!ȁ?ڇ~`)z}@O ݑ@FVP@k}5a>\G(ב@k} ;rh\}dBE/s>j2rX,=QG%)} Cf5KWԑ%`| w88 _R_Xb| |`QT t ,jEMo|`Q,jEMm'|`QS| >| @| |[58W+?>;ltalͰ}?a\-/az/ajoqrؾT+uؾPa2=LavP[avPavPavPavPÔffaT0;T0;|0;T0;TkbT1U?2U?2U S S SØ S S);2U?2U?l?y~ v~ I70;s~ !7g0:ae_0000aKKKLߧ0}Jߧ0}La\cLcr~~R~):ka\cLL?FfBf˞avP)avf?LavwqPa `309?}aaÔF_F_F_F_F_?Lyaa#x0;80;aʫʫ=eƌ0;0;XnO9Vn0:Ȍ0:Ȍaeƌaeƌ/U1זa+)c?l?>זa)c?l?>זaxmk[YÔ 3`g1cXg1c?}3}3`g1c?;}3B)c?;;}33@R7g)3T@Z} sBUR3 UI ,)cXT>}`QS*Vᅧ.U4a?p )Кքu@k:~5a?p'КhMXǿPu@X_ u U,jXT@&Vu@:~d?p7,jVu@:~d?p-jXT?p-}5,j}EM/T?:~>Z`QX,j*7Xl> 5۾ufXTn@&Vu@:~d?jVu@:~d?jh5Y,jS<2yտP]@_2v}d XTWBu5w,j/:vʻ5V]@ɮ~gyh@>Vt>Ћ@/^f@g3T@} t>j:cH5RMg3T@5L,jg/,+5,j*cEMM 545L?Oe?Ђȇ._"c?]nE@ "M0E@>vB%T*5U,jXT?"d?jGE._"d?\,ZM&VE@.ZM*5U,jȁEM*w?" x/T?"`QSEȁEM*_8r`QSEVE@"d?jh5|"*>mhX},vEbWFb\?x3r\ 8e\=s'0Ǎqso$J57sV@}#V¿77o%5<j[ @h59D!jr…XTF8>`~xz=a禇38L3F8>`=a4>`>O a8>`~| a\c0lCa015  KF9>`Ôf~A%havPavwqwqPatߕ?~r\0q'q' W0~r\-a O+8La 0;q2i\0;0;0;q Oeq2`+r\0:q  W0l?>r\0:q" W0r\aW0r\0:qi\0;q`+r\aeȑx8`NaI0l??׏# x8`&0^?${G _"{GxHav8లHav8లHav8`Tby,{)$fUW>L# AAHavPMat# i )\yt iG+_bWH)=BukL+P55 yEMukZM+P .Լ@kyք .Լ@kyք  Z+&Wpy.q^WXԼyV &ZM+Wh59B+Wh59 jr^A`伂o5۾ 85۾ 8_yEMMMemyEMemyEMemyEͶ㼂2r^A伂@yV &ZM+P &ZM+Wh59 jr^A`QS Wm|xݓ}9 }9 /RxA {A/6'bsA`+}⤃ 5 !N:,jjtXԤ&5W$'55 پ"9 jrA䤃@y"̋ eáȣyOȣ'pf$g$4g$g$g$⌄@BȧX>!Hi1H9 HI H<, jFB/g$55#zFB @ T3g$RMHH5=#!jzFB @ T3.ԌE H,jjFB`QS3.T> *aXԌ柦Ǫeȇh9!O=!O=!9]!y.l_Xtft@ V&+ZMNWh59]!jrBt 5]!jrBt+55]! EMMWwOW,jjjjNՅhDS Dxą"XHLā*k kk*kC_XkEgt?YkX,F_XJ5U,jXT?ɬʬ;ʬ{ެެ_)-5?l\aT}?a'z>a</a:oSؾȇk|ؾća?LavPavPavPavPavPÔffT0;T0;0;T0;T fT1?2?2Һ S S S S S);2?2?l?ey~JJҺ~180;Һ~ 80;T0;d=c~w=c=c~|w=c0333SoPbuq0?k9a\G:b0%Vq1?9at9aat9at9ÔXf˞9avPavf?L9avP9avP9avF(Y;- ~)?a\0q=0?aya\eo0;X6Jq0A%kA%kAAg7eTv,{SAAA퇉AA+{SAASdd0A7e7e#)?l7aS~|w1?# =Ô{`K1XK1?`K1?X<bŇ)?T<S< gRP)gRU  Hc=B5b)gR[Pf55 EM5bjZM\v\v!_Ch88B 퍃8p558 rp@@V&ZMP&ZMah598 oEͶ)ȁEͶ)jp@`QSUofXlB,jXlB,jXlB,jM!5UE>&ZMh598 jrph598 jrp@@Vh5!#P!#"GȳGh8 qA<@7y 5ø8@bHø8@bH a`/q0;0;XK 1*z<j0;avPavPavP$A f~$AEat$AF9@b~zat$+]F9@b4@bat$avPaavw9@q0^8L ݇:pİ4b0^{1A ^A.^Ai0 8b,{"88b,{"88b,{"fUf˞ Aŧ avPavPav8bTrz @ZA+qARTHY="pk"zD Hm=B5X A5 ,jh59"jrJDÅh8%"ЂqJąh8%"ЂqJD㔈݇@ )S".Ԕ"XD\)EMM,jjJāh59%"jrJD䔈@)k9VS".Ԕ@)VS"&DoZMN,jjJDX`Q9%"Vu`Q9%"VXT:o5ێS"jU5ێS"jU5ێS"m)EM䔈@)VS"&DZMNqDZMNh59%"jrJD䔈D5ijS}sD@jD@|C'~OZM>h59|"xD O55|B ,jjD`QS' '>Xl_>h59|"jrD@@h<j9Y@_@`@>@l,jjE`QS/~^RMxH5="jzE @T/^RMxH5=B ,j*X,_:xqa^55BExXT?EM ,j*XT?jrTƅȧȇ'PUQ<Q<Q|8Q^eqa[eȃ!ȃ!qFe5*㨌Fe\QEM,jjTF䨌@Q|8QjTF䨌@>@QV2&GeZM(XT=QEMʸPFe55*#O<*#t{`QSFe55*#QEMXԨ n,jjTF`QS2&GeZMh59*#jrTF zTF䨌@6 NS/r ۝C6yVC6@C6.Ԑ@@>@>@/rfSsF`QSC6N!EM ,j;5lZMh59d#jrF䐍@!VC6&l\!VC6&l55d#!EM ,jjF xF`QSC6XԐl\!EM ,jjF xF`QSC6XԐllZ?59#ǒ<3x@qx@_OP9y9}=9#ǒ<#ǒ<#g\)Mm~?]mmcۥض_mmwm7mmax}<>cvp1;s s|9>f>x}<>cvpcvp|[8$-Mzd76_~U33$A 29>FDc~ s|"1DctyA9>F؏a~ 1;~g~ 1;~{cvps ۿT?F61ztƱ?Fϐ;>Fϐ=Cc y9>~\Dc۰x? 1s|ls| ?{y^AcWضmctyA9u{cv <e]<,??_Ɵ'r 9Dcy"1<xO#1ȁ s =xO#1ȁ|l1;X`ȁ|G_5r {cvpρ|l1lρ|G_G_9=mcvpρ ۏ܏,k@>FDct9mȁ|"2Dct9a{ct9A@9xFcX=m(cy"mȰ=mDc>FcضEcg=>{YFcvm,kd= em~d`Y 1;>lz|G zA}MmCAm eɃ6yw6&rGm9Ƀ&F6yM.TAɃTSm&RMMjAUbUQTEy*ʃQAΨ C*ʃ?QE l'(rtFU9:*J^EyOTQ,jU`Qs^A}pf:)uT2esuT2eT20}˘L9LugL ۧ3&SS2e=c2e=c2e=c2eJ SG:ߓɔa 0L dʰ}|&S"^a2e&S^a2e&S!A&SA&Sӑ`Y02*2u)젒))2*2*2u)젒)`YS)`,+W,_f+gFFFFFFFF9Latatav[f˺avPGe0;؇AefAefJSffSTffؾdTffTff>?*3sbfu0;XIFFFO VIFF9LatatÔfu0:Ṵ̏}/aJ w2 ό)aÔƟS25x2%3-S2}5x2%sR2x2%3-S2`Y0%3 S2 S2`Y0%3av_fuaJ :d~`Y0%3tat)@ZZL u-&sj1յ@ZL eu-&pߢH_] \[T)k1ֵ uw`QSj1EMb:;&k1V@Z̅>bmk1hX }>b](@Z̅ʋܵj1XT-&ŹZMb&k1V@ZLA*jsj1V@ZLd-&p߻h5Y ,j ,j$k1EMXlIb:"Bb:">#m9ZL`QSG5r,j$k1EͶd-&#d-&jh5Y ZMb&k1h5Y ZMb&k1Vj1f b-&jD;ٙ ,:3ؙ ̅v`g&з;3܂L 73ہ@v`g&\5ՙP:3EMufmL`QSf[D3h5ٙ ZMvf9LdgBuf9-"ΩP=۞=_o- Z3MU+Ŕ rhс@9'?ߩB  ?T'kP@N9[@*pw^Oe'?[e'Qr@g'3RrJ@[۞?EM~?T@O t'jH5] RM?T@O tB5 ?EM.T]$O`QS U ,jij.XT]B5U ,jXȁ]nP 'U 䬀AAX~|f7vg!*w9DnP g nP`QSݠf ,jtAEMuZMv&Ap7Bu&Ap7(jh5 ZMv Yw&AEM5%5 PMAEMu YwjJ5Ք,jXT7(nP`QSMAEMu.TS"nP`QSݠ@nPd7(jh5 rVݠ@nPd7(j |,r>š@šTqv`q(:.r>š U 与COpq(vq(з CEMP`QSšf{;8XTq(,ZM&CVš@Pdq(j8h5Y ,]Pdq(j8XTq(P`QSšCqq(P`QSšCEM.Tq(P`QSš U ,j8XTq(P`QSš@Nu8h5Y:š@_29P g\ ,Cqq(Зš@8A<9Ѕf9P`QP`QP \Pšf9šC#] ,jS]P`QP ,:P ,:P ,:PC,:Pc,]ؒ3,rJš@'?XgX\{0lB -Kp+0l8 _tW\_a/Ӆ?Oaiؾ?SqhTqhTqhTqhTqhTq0?AASqhTqhTqhTqhTqh:fUe, , ,0:0:0:0XFYFYFYFYavPeavPe;,0uԧ0;avP>YA}\fUfU8L_UatUatUTFXFX}|dg_XO ̇AV~AV~Ӊ.+?.+?.+?] +? +?DavYfUf˻avPTfUfUf˻avPavYfUf˻avYfUe 0:0:0;ϰ}K0;X֑ :%,HV~A?33lr*?NfUfUSA?oV~AV~AV~AV~ +? +?oV~AV~AV~Sgdgd0U~A~K>3l[0?|{^Sgؾe.e’0ޛ, ɒϰ}0ޛ,0ޛ, ɒ0;X(, e’ae’0;X(,  e’0;a* :,k|A?H ] .R|.T'HU] .[?K>4% uz u'ںs^,jXT'O`QSG5uzdBRhP@ XOcBz-c=@ XOཱྀ#Ђ1hP@? ,jc=*XT'XρZMz&c=V@XOྷ(j2sb=V@XOd'ph5 ,j*o{ ,j#c=EMXl+Fz:gBz:g?smXO`QS5ۊ,j#c=EͶbd'sd'j2h5 ZMz&c=*h5 ZMz&c=Vb=ff b'6 w[ iS  7Oa(w@P @el.c( 7}軌 f[XTB5mmЅj5 ,jXlk6j5ڄ @Pd(jȉy7&@ȱy7/jS*ſE!0gsF*9@ߟr'sFpO9Q : @: ([Gt(#tndB9GQO­@yFxUXI Ua%)ce$]JR +I-s%),$JR$r̕@NYW$5UI ,j ]I $RMW+ITӕ@JR t%)jH5]I $RMW+IXTK%Ye$]Xe$5UI ,jtZ*EMU$]J`QOSOSS@XTK%J`QS*I:j_r_B9?R ;W W }@W hJp_).l9R`Q=W ,jgJEM.T_)R`QS}@ɾRd_)+]Rd_)+ZM&JV}@ɾR Rd_)F`QS}JXT_)R R`QS EM+5W ,jXT_)҅*h5W ,jh5W +ZM&JV}@wh5W +Z3AM_ @8߾;KWKr|)!/]R ǯ_ `KK?_ ,j*XlƗ/5۫KE`|)j2h5_ /ZMƗ&KV@Rd|Bŗ&KVKEMŗ/5_ ̒KEMŗ/5_ ,j*tKEMŗ/r\KEMŗ/5_ 8KV_ %c|)V/r@9"Ǘ}_ Kt|)E/]Xήv|)/5֎/5֎/r _ ,j_ ,j*ta9f9 ˑ֎/5˩֎/1R+/3RK/5Rk/yR'/{҅R _ t [s|i 5l`.ar`خ4l`~_asi~~ؾ;L~H?a*n8:LavPavPavPavPavP_f_f_?}S|iT|iT|iT|iT|i:f_e/ / /0:0:0:0_F_F_F_F_avPavP;*0uԧ0;avP젂>YA}\f_f|8L_젂젂 K K)40404w0zҰ}L}=c`i=c`i>>141t3gK.K.K  K K0avoXfXfavPXfXfXfave`iT`i,[e`iT`14242424242424242tK K KK=!`YG24*^0u$KKKM )4*^0*^0*4*4lfX:LavPav|04242424lFXFX:|04242tK K K)424l0ƐS iؾ=e#Ɛ{S>b i#ƐR>b 0Ő>b i#ƐQ+ Ɛ` 鰲` i, ƐA` iTc0ŐAA` iTYat )0> XK RR v)BiKIإ@RRO#2H]JPhKIJIEM*%5u}`QSZM.wc)jtKV@҅j,ZM6&KV}Fdc)jtKEͲpc)҅j,5X ,jt KV@Rdc)jo! l,]Adc)jh5X wZM6mR`Q5X ,jcXlkM6\R`QS:EͶdc)/mR`QSf[kXlkM6Rdc)jh5X l,ZM6.Tc)jh5X l,ZM6&KEM5=ER`QSf [@u ]@np)?>vc)w6N;]@:rNSlv:5z]NXl!v.T)S`QS]f[XT)C:ZMv&NV]@h5uP]@Nh5u !Zs5ivkL@p@V}X 6 W}X %c*OV@np*O&9DU G O@ f'9DU  DՅJTrщ@߼LTrwUEW]~U/U?ӗ@_29~U ܯ %c*O9~U ܯ UEMW5կWRMUT@~U t*j_H5ݯ WRMUT կ ,jUX,/:.,/:W5կPUEM.T&~U`QOSS կ ,jUXT&~U`QS@ν_h5 Ua*#_[rq@/8n¸U VFt*s5[ՅU  ,jVE`*Յ[5 ,j*nh5 [rq  [rq@ɸUd*j2nh5 VVq %EMŭ[]BI`QSqV^p*BI`QSVEMŭ[5U( ,j*nXTBJ[5 [ZMƭ&VVq@ɸU j [ZMƭ/OV!0nǭ9UOƭ.lƭ9U cPq@ν9nǭ9U`QSqVE`*U`Q: ,j*nXlƭ&VVq@ɸUd*j2nh5 [ZMƭ.T*j2nh5 ,j*nXT*U`QSq@n9nXT*U`QSqV*nXT*U  ,j*nXT*U`QSq@N9nh5ٯ Ua*YWr6@N_#}Uد ؠUt*GW]Xv*cW5W51Wr( կ ,jݯ ,j_ua9f9 ˩W5W:~UCW:~UcW:~UW:~U1W:~Յ-`~U ܯ t@ [ƈݯ{0lB -Kp+0l8 _tW\_a/Ӆ?Oaiؾ?SjTjTjTjTjT0AAOԯfկfկfկfկA~U U U1_5_5_5?AAAAsŇ+vTOcTO0}i xf:ug0;0;OavP>WAA4ӗ0;0;0:QYxwqu`iN a; u`0> Ya}ľӰ}?0> wfu0;XG; ; }ľ0;a; ; ; }ľ0;`y44N`y4wFwFwFwFwFwFwFwFw:L}at}at}avP}aavP}avP}cfwfw:L=avP}avP}a>wfcfcfwfw;0;0;XIwFwFwFwϔ; ;VIwFwFw:L}at}at}wel8 ݇ɆӰ}aj8 wa †Ӱ} 0+l8 †Ӱ}0+l80+l8 †Ӱ}ae0;X l8V l8 e0;0;X l8 j80;0;|0;X l8 *7 n8]W/u~ u)p)HpP @ZS =v)&oGn8Rg7.Թ4 @:Ӆ:W?S`QS NEMXԹV p l8ZM6.T)jh5pP @ɆSd)joGl8ZM6.T)Yn85pP NEM5d)jh5p l8ZM62V ,l8ZM6&N&@ɆS`Q5p ,j&NEM5 mɆS`QS͂ p ,jYXT ֚l85`a`Q5p ,jYXlkM6mɆS`QS͂p l8ZM6&NV @ɆӅj8ZM6&NV @ɆSd)S5>p ,jXlAgGsm4T P*LCrP*LC0 XJ[f1 P~`0 ȭV'X}t+]czXXt+Ccr}XEMccRMXT}@>V t+jH5 cRMXT} ,j*wX,oA.,oA->V`QSXEM.T&>V`QOSb ,j*wXT&>V`QS}@h55,kzA<9V GJ\ dY+דe@Έi'aYaY+3.k5ۣef{԰XTYB*k5U ,kZM9օ*kZM9VdY+jh5Y ,kre@ɲV`QSyZEM.T%V`QSe@XT%y ͂m ئǶ6>]av?=>]mcRn~?=>=>=>>=٭=1;g>fܳ[ܳ[۞>ܳ[{vcvpn}l{~٭ːDvct٭a{ct٭Ad>Fn}"1: [n}lW~=s2l3'1;~{cvpd?X?fՏ=1;gN_{cvpϜ|"1:$x691^#$>k$Ƕ!1^#$Im>\ʻ Im[>\{cvܓ\]$'>f$,*$>f$`yW!1;'>f˻ IBcvpOr]$ \Hr}"1:$ \Hr}"1:$װ=1:$ \{c~=1;'y=1;'y=1;'>m'>f)y=1;'y=1;XIDctIA$> "1:$װM$ \Hr ۓ\Hr}"uY'Y|Ƕg}lg1z|5Y۶X|^#5lg}^#1z|Ƕ~G>kXy#1;Xg} ~G>cvpg );Y{9cvߑܣg=i U>m;̃TX= -V>Az|փ4Y=RfgIiYi{>z' |փE=i`QsO
WA҆Agӗ0;0;0:V0:P{ha u`m>=ׁ=a0^ІYa}ڰ}'0> Cg{h`y66{hzh666{hzh`y66{h`y6CFCFCFCFCFCFCFCFC;L=at=at=avP=a·avP=avP=KfCfC;LavP=avP=aVCfKfKf?AІSzh22CFCFCFC VICFCFC;L=at=atͳa۷>ٰ}0ٰ}00ٰ}0aj l l 7:Vl a0;Xl j 0;a?glTd,p6φALA7.TB&鯛gΕ@*مjb7鱛g4ͳ}{@ ev,:yvR4ͳ@:م?MM5j5PͳREM&jP[6ͳ < l[6.T,޲yho< l{-gͳ < ,jgEM5.T,Y`QSͳ< lZM6&gVͳ}[KdBU3&gVͳ@Y*jyXl Q6m!Y`QSՌf[yXT5B5f5U,j(gEMU3m!Y`QSՌf[yXl Q6fYd,jyh5< lZM6.T,jyh5< lZM6&gEY`Q}yXT,>< ,j-ԁVb[ Y `Z0v-;k@pg-ЂhY `]Z`L@?Y ,j#;kEͶbdg-Z`QY ,jv:kEMu]l+Fv5ۊ:kEͶbdg-jh5Y rޝ@څrޝ@Zdg-jh5Y ,jXwSaQϹt3mv=Փ ,Sr= Tl.\OTl.\ODZ @ql.j26h5 ZM̱@\dl.jDȽA.r*%@$D]'Kt%] ] tKt~԰DwJt?S%@?jX ,jXT.] EOahذ Clr հ Clr @jahذ Cl԰ 丱vt.]`QS v @] t.jaH5ݰ nRM7vT @] t.jawvEM݅]`Qyݰ ,jXT.݅jG5հ ,jaȉw7jG5Վ ,jaXT.nZM6Ԏ rr U wt.cfZ w*w.MT=X PwEc.ٞ`5UPwEM&wV@N@~wwV@N@~h5Y ZM&w3s.j~XT(]`QS 7 ,j~XT.cf57 ,joXT.]`QSFEM]Q`QS@w~ Y fg-3r˝@ոڅ9Z ]Y ȗ;kiDw9Z Y ,jXTg-ٞC5Y ,j;kEMuZdg-jh5Y ZMv&;kV@څZMv&;kEMu5Y ,j!w5Y ,jXTgBu5Y 䈐;kEMu5Y ,j!9څy90ZL]Z gyR ,?rSjCR ؠSjtJ-cN5iN]XΣwJ-YwJ-Z`QKZ`QSͳ␚g< ,< tڅͳ@]< tޅͳ@^< tͳ@G^< tjͳ@dž< tnͳ [ͳ@yggٰ݃adn]]ai.?lҰ/}u.4l" Hְqtg젚g젚g젚g젚g젚gy6y6y6l0;0;0;0;ٰu8y<F<F<;Latͳatͳatͳa|y6y6y6y6y6l`=L33KsTcfW0;oavP_젾/A5fA}[f<f<f՘9L_33 g g_ڰ}x{hxCʻ=aF0^#Іw{hw{ha|N6 ІbmTm,*ІASmTmTm,*ІAІbmTm,*ІbmT헱66666666vzh {h {hzhYavP=avP=RfCfC;L-avP=avP=aa j j 犇ASKeTKeT헱6666lqFCFC;|66vzh {hͳaͳ<l O6φO6φ;0O6Sldldlؾfl,i6+i6φflTd,i6φAeMSlTdTd,i6φALA7.T"ߪCHD ܷo.T-;HD s4A@ څPRw ]E`Q󧩩 Z`QS D ,jCXT"j2vJA}?2vhA@{ څ [-hA}?2hoDPAfY<8XTB 5D;A@ Zd-j2h5D ZM.T"j2h5D [&hEͶe- 5,j(hEM-.T-E`QSyf[2XT" 5,j(hEͶe-Ł ZM&hVA@ ZdB&hVA@ Zd-j2XlA 5ghEM-3AhEt6?Rǁ+ߊ7o[`*@H Fb-7oܶ[o$}#1 ~ ,j*Xl+c.l+cme[`QSf[3XTB 5~POS~ ,j1oEMme[d-j2h5~ &&o*ȝZM&oV@[`QSoEͶ2f¶2f-V9j4ޅJ>9o4^ 'z }L>mxbmT0݆A݆A݆>bmTm,#݆L}ĸ0;XG 2݆A݆A݆A݆A݆A݆A݆A݆ASmdmdmTm>=*6*v1nn)3*6*6l>LqavPјavPјavPqavPqa|0;a 2݆A݆A݆A݆ n no݆A݆ASmd0؆Clat!at!a0a K K ۧ+T؆y0;Xީ 0;0;Xީ ʩ0;0;0;Xީ 'S!}E vBCl!@j[>Hb ]E uw-ۅ_5?MM 5տ,jq_ZjV-5+mt|fBU-5+m Ui ԬZjV-5+mt|f-Y5•*mXT-JہZMV&+mV@J[q#jvZV@J[d-pSh5Yi ,jU*+mEͶJe-E`QRYi ,jEq*mEM(jQ5*ZEͶJe-nEͶJe-V5բ8@J[d-jh5Yi ]J[d-jh5Yi ZMV3f b-E`Q}XT">Xi ,jEq៦f K-glg˦\lg˦\lg˦\o{6}۳)1ngS.з=rVM Ք ,j)Xlx6.lx6mϦ\`QSMf[dz)XTSB5j5ՔPMOSٔ ,juF.lFF]`QSf{02rXTBE5 ZMF9݅]d.j2rw"wV@]d.ZMFh5 ,j*rw"ZEMErӑ"ZEMEZ v _Rp.r@88{ ۽\ GO s*8H^apB 5 ,j*8XTp.]\`QS@NS88h5 ZM&sV@\dp.j28h5P@\dp.\`QSsEM9\`QSsEM ]\`QSsqp.\`QSsVM@ )-rrS.CTnr:M@ )!*79D\ ܔ trrEpS.1n5KeM@NǸ)wrcw ,?[a-!a-1a-AUb-a%Vbb-E:ľۅ-ž۟w{0l-Kp+0l_tW\_aEӅ?Oaiؾ ?SmTmTmTmTmT0݆A݆A݆OwfwfwfwfwA~n n n3666?݆A݆A݆A݆A݆s3lTOgTO0} f5kw0;0;0;0;0;STmTmTOgTOgdmdm>,;_ƆaÆ0pƟ5n0?k6܆g͆۰}am><;nx6lG avP avspfp;L avP avP avspfpf; av`y6wnn at at at at at at at at pFpFpfpj j0;0;a j j gSmTofTofTmTm>Y<vz3z3n at at at a(0:Ȇ0:Ȇa囄 at at pe ugom>E>םa ȇ񺳷vzkx[ڰ}Oa0>{[og8[favPuav[fF9LavPUavcomTeto-]E vo-p}H[P@*Z %wo-xt{kdH[ ,jvJEM?MM,.To-E`QS%[ fo-RZj.To-Rh[P@KZfo-RZjeYZ`Q,[ ,jvzkEM4Zdo-jh5[ &{kh5[ ZM-5Vf[XlT45*: XT!NC`QR[ ,jXlT?XlTmZ`QS[ ZM&{kV@څZM&{kV@Zdo->[ ,j 45g{kEMu3: XT!NC`Q}NC`QSf K: E\]d.j2Wh5 ZM&suV@\]d.j2Wh5PruE\݅3f b.\]`Q}1WXTB5PruEp\]`QSf b.j2Wh5 r;suV &suV@\]d.\]`QSf b\]`Q}1Wwa b.> ,j*WXlA5PruEM3ruV@\] 9Wh5 ZM&suY\]d.j2Wh5 gk593/ ÿ-Cw6 ` rCw*}h f>š@n"p.Ђ1twBwI]`QS7EM rۡ7EMo9H?3Ϳz 1wzW v.T\/ z^  ¸^}0XT\/s5 ,j*XT\:H5 RMzTq@^ t\/j:H5 RM.T\/T`Q0׻0 ,jzEM5e^`QS zEMqREp\BrzVq@ɸ^d\B5Ԑ:Y@{,_e/0 䬺|v/2ho g9,_eHe/5 ,jG*|EMe.T/,_`QSY@,_d/]ZMf&|*h5 ZMf9,_d/X`QSY|*5XT/,_  ,j*5XTj,jngw{X {sy/3K.v`yv;!d9Nޅ*rfX޻P{EM*5U `{XTy/!.ZM&{V@^dy/jh5Y ,ZM.Ty/jh5Y ,jXTy/^`QS@,XTy/^`QS{XTy/^ g\ ,jX̒{b?/S]r@ݸ9~^ Tyq?Rop?/Yrf8ȡ˿dܲ t-@g{ز t-@{ز t-@'ز tD-@gزز ,[vIewaKeܲ{0l-Kp+0l_tW_a Ӆ?Oaiؾ$?SnTnTnTnTnT0AAOԲfղfNfղfղA~[v [v [v+4e7e7e7?AAAAchTWhTW0} f4+h70;0;0;0;0;0;aj l l gSA~rog[0uζ0ٖ>Vl Grc[n?Ӹx-7rAaؖfՖ;LmavPmavPmavؖfՖf;mavPmavؖf;mavPmmatmatmatmatmatmatmatmatmԖFٖFٖfՖj j0;0;aj j j eS[nT[gT[gT[nT[n>;ffՖel l l l aAA+$l l l c#n><׏a~l ۧwqx؈Fܰ}c#n!l g+16c#nTe,16A\S#nTeX0;Xcl J2wI}݁ t Ձ Rdw;pq mv.>w T:pXT.TaBu*l5U8J'dA.X \>ہ U \o.TA.з rX \`QS rEMe\`Q󧩩\`QS  h5Y ,ZM& r}dAB& rV@ɂ\5$j Xl[m}˂\`QSf[߲ XT:BJ'5N,j- rEMm}˂\`QSf[߲ Xl[J'Ȃ\dA.j h5Y ,ZM.TA.j h5Y ,ZM& rEĂ\`Q} XT:!>X ,j*XlA,5NP EM3 EĂ\`QSf bA.j h5Y ,ZM& rV@ɂ\dA.j h5Y ,]\`QSf bAĂ\`Q} XTA.>X ,j w rEM*]\`QSw= wa bA.\`Q} h5Y ,ZM @ɂ܅*r' rV@ɂ\dA.j XTA.\`Q} wa bA.>X} XlA,5U ,j *]\`QS rEĂ\`QS@ɂ\dA.;\ ,ZM& rV@gtA.j h5Y ,ZM.T@mr\ 8/>mr\ w8/}>.)XT^.܅5 ,j*/m5 ,j*XT@Oa..T.S ^ .T.wt00]3 ]0]2}1LXT.5 ,j*LXT:LH5 RMtTa@0] t.j:LH5 RM.T.R`QMM ,jtEMUem0]`QSfY8LXTEB(5a  ,j*LMZM&tVa UQ Z_h5D+d.з{wh] 'ݻ {wػ ] @wz};wwa{RwXlOj5ۓOSSzwEM&{wV@Nֻwwa{Rwh5ٻ ]]d.jwh5ٻ {wV_EM]W`QSzw_v.W`QS v6I@Ϥ] |vr.3iwa3I@NE;i>'.T.N5J]]`QSIvEM%JrRI ,j*iI'&vVI@ɤ]d.j2ih5 LZM&&v*ih5 L5 ,j*iXT.] ,j*iXT.]`QSI ,j*iXT.CTN5 "up.?V9Ij] '\ ,up.drպ@NZYW.,W%j] gy\ ,j^uq{=s=sN=sN>=s>=wa=qb{– C{Bm|l{~lcv>)QMm~?]mOяmcۥv_mmOmm/ma{{cvpo}=1;>fܰ=1;>fǶϰ=1;>fޞs^{{2>FўD{nޞD{ctA>sh}=1:$ɍc ['rz{OCɍLG{etmzܻC5z3/jevp_˼+;2;^fޞ{s/{whޞ{ܻC/{wetA^=wr^;!2hȽlRVhȽl}#4^ m/}x!mfeА{#4 !2;7^f˳ !2;XhȽ ,GCev<ѐ{r!2: r/hȽ!2: r/hȽ!7loȽ!2:ސ{62;7^fܰ2;7^fܰ2;7^f˶a{Cevpo흗!2;7^)흗2;7CCet A4^Fѐ{6c2: rʚ A4^Fт{62^^˶Iߗ:7lo-:m|X>B e|m|X>B e|2;XЂ{,GhYa{ evpoE}c9L- I}Ղw6D~!V .po]HՂ"w!UV mRBڬ܅Y-q!V ¢ނ [5܅EͽwaQ󧩹.,j-Ƚ],ronx.Ƚ] _h{ sox.Ƚ mPBۅV &roZM.,j {¢f[!ؖAȽ]Xl 2 mۅE=vaQ-{ۅE=vaQsϽ]Xl .,j &roZM.-.Ƚ]h5{j"vDBn!TBۅV &roZM?MM.h]xj]}(j]]j]h5тjw!w!ww!wD .V.,j- { .p\X[p5܅܅EͽraQs\X[p5mj Al=П-tE.p]q "N.m8݅܉8݅8݅з=tG¢.,jq {@.]H5j*Nw!TB8݅TSq tRM.]H5j*N.,jɣ eIq8݅E8݅E=ytaQ,)NwaQsO]X,/K]XܓG{¢<'.,j45хE=ytaQs DB8݅Vq=ytDB8݅V BBnQ.p/]m1*]M*]Il.r݅WP;TT u1r݅EG¢^(]Xij8u5r݅V &uru1r݅V &u{Br݅V &ur[ &u5ׅEͽ\waQs/ {¢^*]X;^ZM.ȗtZj.Ԉ]n.4trKq -5tMj. trQq=Nw!e.\t58݅E=NwaQs]XtrFq.,jq]h5j"NwDB8݅Vq &tZM.q &tZM.,jq {¢.ȗt58݅E=NwaQs]Xt{¢.ȗtZ͟܅mSB?X ]! .hs{Bi?w!Gԟm]6.sՠ܅E͒kPB?waQ4ԟC܅?wS;]s:܅?wO]s:\` ?w!'rԟ([qny:l`.]at.?L~Oa_at폤ai@GSnTnTnTnTnTa ۟>SnTnTnTnTnu8?؟FٟFٟ{s s s sô۰}aZL uW'h:A:b:A|ݵ0_w0>{|go젚o젚o젚o:A)66lF|F|Oul2ۆs|gam>0ۆ#|ana\0l v۰}tۆ~`aj j j v0;v0;Xl j v0;Xl j2ۆAۆAۆAۆAۆAۆAۆAۆAv0:v0:v0;v۰nfffnfn{Z9mmˇ63366lfn{Z9Z9matatata0:v0:vnFnel Wdm?O6؆3y05؆dmi6؆c++l =۰}a{ a`f l`^amT+aj ʤ { atН}:HIY ;kHUY RWw@Z ug@E*;kEMuT"Z`QSOS󧩩HEKi˛ mO^a- mlT- mWXh;P@+,^a-Y^8\h ,jwJ5ˋ mEM T-B[`QSih5Yh ,ZM& mށ@BہJZM& mV}@d-^~Yh ,j_J5/ mEM T-4A`QSif{e-4A`QXT ^~Yh ,j_J<@B[d-jh5Yh ,B[d-jh5Yh ,ZMmB[`Q-Xh ,j*MXl J52XT 4A`Q-Xh ,j*MXl J52@B[d-jh5Yh ,ZM& mV@B[d-jv mEMmBہmB[`Q-Xh ,jXl *B[`QS mXT-B[`Q-Xh ,jXl & mV@B[ w h5Yh;P@np-jh5Yh ,ZM*5Uh ,je me mEͶ b b-ٖA,5Uh ,je mEMT-B[`QSf[XT-jh5Yh +& mV@B[d-\h ,ZM& mV .MEm@np&5nm@ns@J-5nܷ[ wnZjlRXT-ہʕ5v ,jn5+ ,j*WXT-[ 7vnE?MMJ3΋\ w86\/6cs،b36wbs،b369\C5 ,j*6m5 ,j*6XTl:6H5 RMcsTӱ@\ tl.j:6H5 RMTl.JQ`Q96w`y{sl.Y ,jRX,oo5U) ,j7\`QS*EE͟OSS@nql.j26h5 JQdl.j26h5],rKtDȍC.r3Kt8w.7KtX 6%@H,؞,53%JtEg.݁?Mg.]d.jD.؞,ZM&KtDh5Y ,ZM9q]d.hW`QS%Jt*XT.] '] ,j*gZ9]f.RDYu9u] G\ ,ؤf..r%U ,jVU ,jDwJtEM*5U ,jD!T.] \ ,ZM&KtV%@]d.jDh5Y ,]d.jDXT.]`QS%Jtms.]`QS%JtEMT.]`QS%@Dh5Y 䴓Kt)q.п'Kts.#|.r%@D>9] G\ 䴓KtKtEp.N.5KI%@wlؓ t9=3aO.aO.'rbO.)'lA19t\Nزbݓ{0l*-K090lW`@ 0l_ap~~0]a#iD?_ԓfՓfՓfՓfՓfՓ{zrzrzrԓfՓfՓfՓfՓ2AA0:Ȟp;;[eSlwDcl>hV0l glV*ax0;t0;X*fU yZe젒!젂!`xaA},6:HH$ b)d*HH$ I@j Y t@5 5$;PMdEM 5 =˯U2 g0eQ.c,wSfe2T,wSf˘2;P)@eL5u`Q8eX,$N5u`Q8eX*eXij*eh52 LZM&SfV)}輞,rX YhX C,!ahX C,ra~aEM*raEM*5U.RMaT@rX t9,jH5] .RMaTU ,j*X,/.X^ ] ,jB5KaEMUe2XTU&L`QXTU&rY ,ZMTU&jh5Y ,r&c= 7Ts,п'cܵX G -XoA6mͱ@Zps,з co6ہͱcEX`QSͱ ,j45۷cVͱ@X w-9v`v`s,j9h5;Pͱ@Xds,j9x7&cEMj5;PycEM59X_ͱ@KX ';Pͱ@KX ' 䌨css,R9v`ͱ@Nֻ9Q7Ts,X`QSͱ@N;9XTs,X`QSͱcvrs@5jrͱ@Xds,j9h5 lZM6&cVͱ@؁jZM6&cEM5j5 ,j9A<7j5 ,j9XTs@5j5 ?rͱ@_97X  crs,3.nrͱ@9ȁC7jX* n5KHͱ@θ9X,57~b0+ b*+ b0+b0+bbqV,]flu)fbvav & ۟yva?l ۯۿ ?l$ ۟HҰx8zb젲b젲b젲b젲b젲bSVlTVlTVlyb젲b젲b젲bOqPYavPY_Ƭ0:Ȭ0:ȬÔF{ދh[0.J dB<̟9xo2+6&b^0ޛ̊ ɬ0ޛ̊ ɬ0;0;0;dÔffFFO=L %&%&3``qat at )6.16.16lR|Xy16*6```y16`` at at at at at at at at )62626*6lff{/``SeTlTl>0%A_A_A%A%w`Se0;/cldldldl>j72620%~3_Smc?f310eck1f+yfck>gk1f젲`y350eA4A4z\U u,pS nRS71@؁jRV7` }0κHk;P `EM5T!X`QS `E͟`Ky@ d ,py @X>[@߂ 3;P@߂ d @} 2XT Yd ,j5aEMT ,ij*@&aV@@Xd ,p <@d ,j2h5 '&aEN@X`QS3XT ީ;Pɭ}">ɭ{4&-VoA&} 2[ɭ@߂Ln5} 2[ɭ@Vdr+j2h5 ,jwj&흚ɭΚ,jwj&:k>ީ ,j ,jΚ,jwj&:k>ީ ,jf{fr+j2h5 LnZM&&[Vɭ@Vdr+j2h5 LnV`QSg5;5[wj&흚ɭ[EM%Jnr,ɭ[EM%Tr+V`QSɭf[1XTr+ٖALnZM&&[Vɭ@e;h5:Pɭ@g;h5 LnZM&&[EM%Jn52ɭ2ɭf[1u`[1Xl &Jn52ɭ[*XTr+V`Q- ,jx?[dr+j2M-NnZM&&[Vɭ@V X9h5 LnZM&&[*@Xlˠ^x {\qv`+ i?[:P@qr{\!W ׁv`+W`QS=^,jXT+q5^,j*XT+W :P=z\^w+B`QLşWd+{) |ZMf&3_V |ZMf@W`QS2_EMe 2_EMe]ә@W t+j:H5 |RMg3_Tә@W t+j:u2_EM\]ә˻3_EW`QS!fyt+K`Qk:XT@e 5r ,jwMg?Wd+j2h5:P!@Wd+j2m1|ZM6!=3nX?61 @N @ߟlrۚ`ܘXO6l_l5W`EM5 `Xij6&`V @nLpXd,jvrIV @Xd,nZM6%5 ,jvrIEM5jr` @X V d` @KX G Ъ` t,Rv` @ȡU7T,X`QS @_XT,X`QS `r@5jr @Xd,jh5 lZM6&`V @؁jZM6&`EM5j5 ,j@7j5 ,jXT@5j5 d`3s,#_nr { H79X G \`t,nr ``ER=p,s5n5K _W%,yW%,y:]’W:,y:ÒW#:,yRN,yr%@tX:XK^v폯av & ۟\va?lh ۯۿL ?l^ WҰ{t8zJ^J^J^J^J^J^SkTkTkyJ^J^J^J^J^avP%_ƒ0:Ȓ0:ȒTFYAaTy^AwPf#V>bukYխaXխaX$>LխavPխavP5R3R355 [9atխatխa00ְ}zp]buk]buan Kn Kn gVgn n y0;0;Xgn y0;/cukdukdukdukdukdukdukdukduan n n n ۇAUAU0;0;TcfUfUO=LխavP5avP5avPխavPխa0;TcfUfUen n n n ۇۆAVAV籂5<: 0: 0: ְ}a 0: ְ}a 0: ְ}sgk,]VAe# 0;f0;b/s*f\R6׬kVe s@լ)kV5@U>H\ ,fujVEMլYU`QS5jVEMլ%50.;X 'V>O uV>蛗 Vo^vT+3:?Ye ,jw:?Yi ,j ,jXԙd+jh5 `ZMvi@ց:?jh5 `ҁVf{g+ ,j,XMXn/K,KuY ,KZj-5Re} @fY*R,o,KTdY*j,Xlo,K5[*REMXlo,K5uz`Q,Xԑ,XԑEMXlo,K5uz`Q,XԑEʲTdY*j,h5Y ,KZM&RVe@ɲTdY*j,h5Y:PeT,jTTY*.K5U ,j,XTY*T Ǥ] ,j,XTY@*K5U ,jeREMmIJTdY*j,h5Y RVeU RVe@ɲTdY*j,XTY*T`Q-X:-X ,jeReREͶ bY*T`Q-X ,j,uREM*K52el,@&RܸTdY*j,h5Y ,KrRVe@ɲTdY*j,uR:EͶ G)n x TӲI?-T&U w Ișm7}#IM@r*ܤ TVMۍ&U`QHlR5 ,jIXT*nR5 ,jXT*&U Ǻݤ:PMTv*N@`QSMOSS msLMQ 9-<\:Pa@np+|B sd+'\?:Pa@ߟ sd+! ,j*XT+0W`QSa@p+0W`QSafyv+j:H5 sRM\Ta@0W t+j:H5 s0W`QSufyv0W`Qp;XT]%Y^ ,jX,/s5UW9Pa*EMUaw8h5 sZMT]%j2h5 sr\V/UjW?!VծU 'jW QYuW}蛗ծ@ns+(\ jׁ{ծf^a+jW`Q}XT@Uv5ծh5Y 6 Wl+vZMV&]*ph5Y vZMVծ@jW`QS]EMUT(jW`QSծ@np sra@V:ho sr\u+s[l2ȝsra ,j*XT+ks5 ,j*XT+0W :Pa\^s+j2h5 sZM&\Va@0Wd+j2u\Va@0W`QSa\EM sra\EM s5:Pa\EM9X0W  ܛ\.r+ sra@:ȹ79\0W H \ u+0ׁ0W`Q p\ERGp: \* \.0\.0&\2\q+el}&~0װ݃akn]Ɂa'װ]a.Zk?l/Ӈ۟Wհ4l?00;00;00;00;00;0ff?}00;00;00;00;0װpT152520was*V3NjaW{aWiƷ^a|kƷ^a|kƷsS|kT|kTao ?6FFea ǣ25lk0^[ư{xmkkְ}tؿsz+ư1av<_ff1av<_f cX cX cX cX cX cX cX cX cXS kd kd kT k>[6*5*0EYAŰAŰ(0;0;ְ}aa a a GȆAŰ0;0;/c kd kd kd k>82N52N0ũAƩAƩC+AƩAƩ+AƩAƩ T T`y2N0ũA~#S4Ƒ}x082Hm ܇i#S*2Hy >LR G "S\k eL5 ,j*2u"SEMEL5 ,jXV?Eg? Ua*Wa@_@_}U:Pɀ@?0 `@`* T*d@`QSɀfyr*Y^ ,j*XlC [5 8PaVEM%ȰUd*j2lh5 [ƁVa  [ZM&Vqd*V [5 aVԉEưU`#d*R3lh [Zj@KͰUf*p [Ud*j2lXlo [5[*VEMXlo [5u{`Q2lXԉ*lXԉEMXlo [5u{`Q2lXԉEʰUd*j2lh5 [ZM&VVa@ɰUd*j2lh5:PaNt,jTT*[5 ,j*lXT*U nj ,j*lXT@ [5 ,jeVEMmİUd*j2lh5 \VVa pVVa@ɰUd*j2lXT*U`Q-:- ,jeVeVEͶ b*U`Q- ,j*luVEM [52azr*\ pWX ]U Kzȍ^rLի@Ձ.c*2V:?U`QSիLU:?C^5U WzXT* \ ,jWE͟OQի@$Vd+jUϩRl['^W'^*-"x멀W` xQÀW5 xW5 x5  x5 ,j*XT+{G ,j*XT+YxRM^T@W t+j:H5 xRM^T ,j*X,k,keW`QSfY;8XTh%Yx5Z9PB+EMVeW`QSfY;8h5 xJd+j2h5 )&^V z2\ .X 7H v` .w9.X .X wT ](.؁]fb,.X`Q}] XT@u5 ] h5 .wl_WZMv&`h5 ZMv]@.X`QS}`EMuT_).X`QS]@%qϰMF9OhX V ԌZjF Ѱ@S;ȡUG-5a6 .G9Oh؁5 ,j*@G5 ,j*XT4,hX`QSѰ@N:h5 ZMF&aVѰ@hXd4,j2h5 ;PѰ@hXd4,hX`QSѰaEME9hX`QSѰaEMEhX`QSѰaZ}`tQ,.rE@n(X>9X 'w] LbqQ,.rrE@N(XTQzpQ,Yj.rEfI>8@ a*F a0Va0^ajaq4,Fl=7a-\<.[,l luE˖e-?l-Wtr/[.eez/[I-O˖eWe(l]fhevp]fhevp]fhX Ѱ lyѰ Ѱ Ѱ˖ܣa=v#^o [.{2G.{2~o˖ð5u]#^5F.kde2zᬰ5u2.w?=u?=u2)w?=u?=.=.[7/[a.+κlۼllκ{zg]f=.?vpg]Fpetpg]Fpetpg]Fpetpg]Fpetpgκκl [15u\Yak02;.k8e*l g]f`evp \fpevp g]X]fpVκg g]F5uY-~=uYak82:.{8e/<petpg] κ˖پκ`y"dA^A> }!2"de탼Yp Y}!r"dek\AˬhAE5dz`Qs Y}>X\CV,j5N|QN/ZͿN^zIk^{Vsy}phk^{k'E6k'kE͵IxZ`Qs$|v>X,/gy}YP`Qs$|ּ>X\k^,j55ךk'!py}j5ZͽA׼>h51ZͽµA׼>h5^2A׼>Xlˠf[e^`g3Z`{Yl/k}ЂeZel/k}Ђe.so/kp-k}jeZͽf{cZ,j7ƽzq/k})f{cZ,jGʿp-k})zq/k})f{cZ,jG^Z{YVs/k}jeZͽA>h5^Z{YkYEH5ײkY냜EYE͵Z`Qs-k}>Y\>X\Z,jee5ײkYEͶ Z,je52h/k}jeZͽA>a[>h5 ײ9q^Z{YVs/k}>X\Z,je^za[e52h/k-m>X\Z,je^`Qs-kp-k}>X\Z,je^`Qsm|jeZͽAAYVs/k}jeZͽA>}3(k}jeZͽA>h5 m>h5d>ȁnd>Ϛ?V"d>n}n}зo=An^Cvf9zaܳ[,jGf>X\[7[,jG`Qsn}npn}f>mn}`Qsn}j٭Z=Ag>h5s5Y|N&)R5^{zt^ b,jr b,j5ׂkAE͵ µ Z`Qs-}[gP`Qs-}>X\ b,jr b,j5ׂe& b(}j A& b(}j A& b(}j µ d`Q,P{aY fY d`Q,P`QsM|YA(}&Y^>X\,,jI525$e^ b/\,{AVs/}jr b{AVs/}jUAh}/// ̠=AnL@{샾d{{~r!c>{ac,j/=`Q}5 k{E͵=A>h5g/=A>h5 Lc{{m`Qs4}>X\c/\3M,j5=&c>^>hOrrZ=& =AN=µ=`Qsm}h}>X\c,j59; k{샜D{Vso}jZͽ=A>h5c{{Vsopm}jZͽ=`Qsm}>X\cX/c,j5k{E͵=µ=`Qsm}ch}khpm}h}kh}|k{샜mF{샜mF{D{B{샜mF{샜mF{샜mF{E͵=R@{E@{B{ER@AkA:>p^={A샎^ }P>{A-$łذ݃asfn]ɁaG̰]a.۟.k?lo,ʰo7l=LavPavPavPavPavP 6 6 6lLavWavP Xz]aX߁ծat0Gv ۧ&?Sk#V1aL}jװ}Vr>A2lGfUfUf}j0:rTr0^:\5ׁa,W=La*W 㳀a|\5lgU,`a* *W *W=LŖavPavPaTfUlfUlfUfUO *W=LavPavP_Gat~?*W=L_"԰}j-P٫ PxmY]=<_Xk"԰}j-Pgӓ>lq;w)MLP ?vT)p g@>ܝo:P}"qMEM7|S`QSMEM:?&MV}"j2t7ZMaV@|Sd)p 7ZMaE|S`QS|S`QS5u75u`QS5ۛMEM775o ,j*XT)|Sd)j2h5o 7Vud)j2h5o ')&MEͶ b@>X4i}0Mta &0ZF #Lք}6К0hMa nc@E&#LVf{c)a ,jf{c)N?PN,jf{c)N,j>F&#LV@Sd)j2h5a 0ZMF&#LV@Ӂ05ux`QS"Lbt)S`QS"LEME9S`QS"L*XT)S`Q-a ,j*Xl F&#LV@S a 0S ga 0ZMF&#LV"LEMEmӁmS`Q-a:-a ,je#LEMEmS`QSa ,j*XT)ٖA05U 0ZMF@Sd)j2h5a G&#LV@Sd@`mS`QSn^ 1?7z?Oy@B7rMov)7/Myo !9k|S 'o 7n^e)s75o MEM[XԹEM7ro ,j*Y|:<|S`QS@|Sd)j2h5o 7ZMT)j2h5o 7ZM&MEM럩|S`QSMEM7|S`QSM|S`QSMEM7rNjMEM752@|S t)j:H5o 7RMMT@|S t)j:tMEMU-e|Ӂe|S`Q,o ,jjX, Z52*XT"E`Q,o ,jjX, &MVU7ZM&Mܶ|Sd)j2h5o N3Wo/sO*;%ܓ CI!{RT wJ'hؓ CIr{RToIؾyٓ ,jo^I57/{REMTO*T`QS=@ɞTdO*xܓ:}'h5ٓ INdO*j'h5ٓ !&{REMwI5Փ:PzREMq=//;6u`3@T #96ubS6@Grl*Mr|ٱ@ԁxƦɱf3bSEMŦ9#T`QSbSEMŦMrFԱ ,j*6QǦ&cSV@Tdl*j26h5 MZMƦ&cS*6h5 M5 ,j*6XTl*T Ǘ ,j*6XTl*T`QS ,j*6XTl*:S)=gT*TrJϙ@|Trۙ@N;SRg9 LU 3UEMeTX2T5K @B9SX,9gT*Uf`*uf݇a*Ѕfl"f9LU;1TjET߿T =?g %x+0l _ۯ2lK?l{.2l ۿ_xÔfffffz2U2U2UÔffffT2fAf06~2E5~2E5~2E5ߍLQ LQ LQ LQ LQ (00:407FO LC '2ESjfj>9l=x4԰}s> 2luFffeL5 T0~.L5=La\jυ)4&SMxo24&SMa7j{)2*4*0TAAc0SiTiTiTiTi>2*0AA~#/~IY3r345I aN2'߁؜@ 'U9 }R%||J R>6Oc|h:tH8@>6Ȼ HEM%J R`QS HEM%: ?&HV } j2tL ZM&QV @Rd)pT L ZM&QEM%J **XQEMHEMXQEM%J 5@ ,j(HEM%J 5@ ,j(2h5@ L ZM&&H|_d@h5@ L ZM&@R`Q-@:1dX.N~ CF b3d͐Q/6CF0g/6CF Ü~1dtBF~1dh52 ,j7!N,j7:;2 ,j2 ,jN,j7:;2 ,jf{{c(j2dh52 ZM&CFV!@ɐQd(j2dh52 Q`QS'x52 ,j*d17 52 ,j*dXT(o52 ,j*dtBFEM 52 ,j*d7&CFV!@ɐQd(nZMT(cZM&CFV!@ɐQ`QS!BFEͶ b b(ٖA ؖA 52!BFEͶ b(с 52 ,j*dXl jZM&CFWw(j2dh52 ZM9Qd(j2dh52 J`Q-2 ,jrrTߕ@r8Q ]9:٬f(Q\9:P{f(R\9 䘺+Gv(jrt`Y9 ,j;,jrXT(3î5uw`QS{5U9 ,jraWT(Q ]9 ,jx*GEMU&+GV@Qd(jrh5Y9:P@Qd(jrh5Y9 5U9 ^ W5U9 ,jrXT@U5U9 W5U9 ,jrXT(S\9 ,jrXT(YARMW+GTӕ@Q t(jrH5]9 RMW+GTӕU9 ,jX, W, WeQ`QSfYrXTk!YA5Z8PZ EMeQ`QSfYrh5Y9 Bd(jrh5Y9 4W&+GV@Qd(Q`QSfYtJ܀ReY)޲hoYV e@{˲ReY);*]V -Jo{۞eJEmϲR`QSeUV ,jXTY)jh5YV l,+ZM&J3h5YV ,+ZMe@ɲR`QSfgY)L`QSeJYV e@yjm1l:zXa'A g g4CYas2K0e,0+=LYaW%{YaW% ,0+=LaW%f%z2%젲D젲D)K4*K4*K4*K4*K4lf%zD젲D젲D쏲Der©uFt>lͩ}@s*'p @r9} p T΁Jρnv*'TN`QSR9*XT*'TN`QS[,j@TNd*'p@T΁:l=j2h59PZMr&S9Vw&S9Vuz`QSR9J5uz`QSTN`QS5uz`QSR9EMr:l=TN`QSR9EMr:lLZMr&S9V@TN>h59PZMr&S9V}`+j2Xl` KM/7d 1xKM>cKM>c 7~0xs7~0xh5 ,jw0oNz,jw0o:9 ,j ,jNz,jw0o:9 ,jf{c&j2xh5 ZMo&7V@Md&j2xh5 M`QS'=5 ,j*x#o 5 ,j*xXT&F5 ,j*xs7EMo 5 ,j*x"o&7V@Md&sFZMoT&oZMo&7V@M`QS7EͶ b b&ٖA ؖA 527EͶ b&́ 5 ,j*xXl obZMo&7v&j2xh5 ZMo92Md&j2xh5 ,F`Q- ,j*q7d&r@| G'=c&r@ f& 7?u&j2xs` ,j;Nz,j*xXT&Ө5us`QS'=5 ,j*xToT&M  ,j7EMo&7V@Md&j2xh59P@Md&j2xh5 5 7EMXT&M`QS ,j*xXT&K ,j*xXT&M`QS,j*xs7EMoeM t&j:xH5 RMo7T@M t&j:xH59P,je7e7EͲ r&#eM`QSG52?P,jfY9xXEͲ r&j2xh59PGZMo&7V@/s&j2xh5 ZMo 5u~`Q,MͲ z`QEMFvTd'[7 , Ev}0{@*rd'[7 NFvӑ@*rd' #;7 Fv ";EN`QS ,j*XTd'j2h5 "GvloZMFv&#;*9h5 ZMFv{ʑ@N`QSɑf{`d'H`Qa0XTr@EvNr{#;vd'3h ,Gv@p;h9N 7e9Xl3XTd'N e ,j*XTd'N`QS@:s";EMEv9(Ndd'j2h5 ZMFv&#;V@Ndd@Ev&#;V";EMEv5 #;EMEv5 ,j*s";EMEvrۑ";E?v'ry@: Hy}ɘ ؽ<w'ùry@;ȱ{y597.13l"OC=L at a}c>0eoAeoAeoAeoAeo젲7SfTfT0:Gٛ_|́:9pc |M>r5K` X:P}*pK}*75*_i>aȻ5EMk|M`QS5EMk::=&5VoN&5@|Md@h5 ZMk9 ZMkEMk5EMX*_XEMXT&|M`QSN,j*_XT&|M`QSN 5V@|Md&j2_ϛZMkV@|Md&p ^ؙy ;3lٙ gL?[vfٲ3gg&p3 mLo{vfTg&з=;3ٙ ,j%vfeX,j%vf:`9^ؙ ,jՙ ,jX,j%vf:`9^ؙ ,jf{Ybg&j3h5ٙ ZMvf&;3V@Ldg&j3h5ٙ L`QS,5ՙ ,j3Iwf5ՙ ,j3XTg&335ՙ ,j3s:3EMuf5ՙ ,j31wf&;3V@Ldg&]ZMvfTg&[ZMvf&;3V@L`QS:3EͶ bg bg&ٖAؖA52:3EͶ bg&́5ՙ ,j3Xl vfQZMvf&;3xug&j3h5ٙ ZMvf9Ldg&j3h5ٙ E`Q-ؙ ,jFq:3VBM ] T 5PfBM ] | 5~&Pg 5w&s.rRԅ@B́BM`Q=X ,jh 5EMj98BM`QSG35u4s`QS 5cv@j*rمf,jPXT&jPh5Y ,ZMj& 5VU ,ZMj& 5V@BM`QS@ܻPX́EMXT&B́*5U ,jP= .5U ,jPXT&BM wPXT&BM`Q,\ .RMj 5TӅ@BM t&jPH5] .RMj 5PXEͲ r r&YA.5u~|`Q,\ ,jfYPXPXEMX, j:?>YA.ZMj& 5@BMd&jP-d.ZMj& 5V@BM`QSΏ,je 5EMX,ˠ52VUU Wq!U!Vq}s8?Y *N xJdV*N`QհXT'jX ,js8EMUqZMVq&8ܬ*΁U@*Nd@5B&8VU@*N zh5Y ,jXlo55 ,jVqj*Y U@np'xVqm<8B*N 7CȉsWqm<86Y 2WqxVq5U X8EMUq5U ,jȱ^WqT'*N z] ZMVq&8VU@*Nd'jh5Y *Nd'jXT'*N`QSU88w'*N`QSU8EMUqT'*N`QSU@NXT'*N`Q)j)z?= ƆO ܸ>_v'3nZ06|97O 4e7|9O 4XT'O`QS n5nr f9d _LOLOLρLO t'[gz ۟v o23l lΰ}u>0es33ljO ۇV;9yaǰ?+˘F'F'F'O^ L LaX c&>n:L}*;ulaT&;ulaC͵b&8\!aCME5zuPScC͵b&8XU 5598XPsX 5598\!aCME5zud&h5Y ZMaVud&h5Y ZMaVud&h5Y ZMaT&8XPSuPSu hjjjjQ aCMaCMaT&8T&8T&8T&8T&ȓ&0A:Ljyud@av/{)Ku?eg_e]{_x^Op^_<|gy2;ev;wc,_fY<w/,_fY<|gyןÿ!Ady2:,ޫ"Ady2:,_ƿetY"Ady2:,aHeC?_:wB/5BB/{^N9k_:Λe#h!i!i!i`eFHeFH|이xx;iy_:Ne{'mס6_fI6_fI6|'m2:M=ב ͅ#wב _G>.|uۚ;is4҅wN#]DI;isEⅯH.Q¡;isP󝴹pNI ͅCw¡;isP󝴹p~DBͅZM$m3D׋ &6 &6ZM$m.H\zxDBM=¡;isP󝴹"1N\8|pp|'m.jG_8|ppN\8|'m.j65#/j65I ͅCw¡{j"isDBͅVI _/.H#/H\h5j"isdЅVI _W>ͅ?6CB|H\:Xul$m.|w66F&N\;I }g#isPspCH\8|rpCH\8|rpCH\8|r65߳/jg9_8\!$m.jg9_8\!$m.jg9_8\!$m.H\h5j"isDBͅVI &6ZM$m.H\h5j"i|'m.jg9_8|'m.j6p65I ͅCw¡;is!9(isP󝴹pNI ͅCw¡;isP󝴹j"isDBͅVI yGI &6wBQBͅVI &6ZM$m.j65I kMpmpAH6I kͅCw¡!isP 65I ͅC͵ B¡;BqDBͅ<ͅVI &6ZM$m.H\ȣJ\h5j"isDBMpAH\8|G(ͅCw¡!|p.F B_lp>ù;s/ }П }@_ԊS/s1 yZ1 &b8@b8ZMp9΅CB ¡{ CB ¡{ tùp}P=¡;sPù c851 ydY1 )51 ΅V1 &b8ZMp.\h5ùj"|p.\h5ùj"sD B΅Cw BW ¡{ C ΅Cw '}Pùp\O=ùp\8|p.jc851 b851 ΅Cͱ R B΅TS1 b8RMp.\H5ùj*s!T B΅TS1 b8w ¡{Cͱ R '8A\8 p.jG_8 p.jG_8 p.jG1 5#/jmb85#/jmb8ZMp.#/\h5ùj"s!LS B΅V1 &b8ZMp.jc85#/jmb85#/jmb8 R ¡)sP4V;s!?QFB_2dt.%CFB_2dt.[\bѹp\wFB?Mѹ߹*s!RFB?M 52:5  ΅C͵CF¡;|gt.j3:5 &2:ZMdt.DŽ  &2:ZMdtnȅV &2:ZMdt.p\h5ѹp\8\k0dt.j!5  nH 2:2:u; sodt.x }; s!SFB~K΅<΅  2:5퀌΅CwF¡;s!OE+sPѹp\8|gt.j3:T2:wF¡;s!OE+sDFB΅V &2:ZMdt.\h5ѹj"sDF'\h5ѹj"sPѹp\8|gt.j3:2:5 ΅CwF¡;|gt.j3:5 y`_ ΅CwF¡;sPѹѹj"s睴j 1\] yZX]s!s!O |w}.Avu}.iau}.υjA] Ղ> >H>ZMt}.j>5] OpLW¡Vυ<υC1^] M@B >x>υjs\>Ϡޏ{[?&>Vc|}Lc3cko׏xc?[{1u}>fTcvP]Au}>f1u}>fTcϏ1;젺>|jEÏ3{~LkV6/l|1 6x>F_}ac􅽜Oz9^G|?>]Q'{9{_1f|}cد3k~L7cc8{1k>?_9}}ԯGp>>)Ak>fկ5?~;4js}f">3|$LM}f">3|$L߯;T&>|$);4C| i;4:4:4:4CjCjCjCjCjjzjCoVgM&.h5١9Ps׃Vd&h5١ &;4ÁjCjC|;PPSs׃CM]?PPSs׃CM]5ա 5ա 5ա 55w=8T&8T&8T&8T&8d&h5١ ZMvhVd&>AMjCV5 k2A#&L}#5OdEAߟ}&sj2Aߟ}&jU k2Zհ&jjrpV5jU k2(Lp1(kUÚLp1Zհ&jjrpV5&k2AɚLj&&k2AɚLj&&k2AɚLj&sj2(J5PScCMdCMdCMd|LpLṕ5dM&h5Y ZMdV5 O&LGJ\ ZMdV5dM&h5Y 5U 5U 56565PsmX9pmX 565PS5PsmX 5U9P5PS5PS5PsmX 5UqZMdV5 &&k2AɚLj&~ |m[{ 1]ޏޗc?O? c>c|*1;b<|*c|*1;OScvP1Ax>fԢc">Fi1:; |`pct we|}Dc({uއ>Fg>Fg~Lљ}}c3{1Eg>?):љ}}:\ާf>>O3|}&_K}x ~!|8P)@}x ~!|O*|_Z)r :Z!\s-*j*j*rR.R.R.R.R.&S.AɔK)h5r9PCσV)d@ =ZM\V)d%~L&S.jypKpK@\CM =55@\CM =55<8T%8T%8T%8PS)PS)PS)PS)PSC)d%h5r ZM\V)]}j2r&S.AɔKj2r/< ;/AB /K}!蛗o^v^ A߼}r:/A߼&;/Zj%;/kKpZjjpjjqp<pj%;/kKj&;/AKj&;/AKj&;/jjqpKpKoky 5588T%8T%8T%ȳKpKpKpKpKy ZMv^Vd%h5y `;/Aˁyd%h5y ZMv^VPSPSPsmy9pmy 566Psmy 5y 56PSy 5y 5BZM&N*L}U8 0qUa$cPN}U8 @ &N<Ɂj`$syNy2ωPS Ps89p? Z0OT$h? Z0O~c$h? Z?Gy57g'AIjDOV׳Ps=? 5578\>OCM =PPSCM 5? 5? t'jYOCM 5? 5? ZMOVd$h5? ZMOV? ZMOVd$h5? ZMOCMOPSCM 5? 5?9PCMOCMOPSCMOCMOCMOPSPSPsl? RMOT t$H5? RMOT t$H5? RMOT t@OCM566Psl? 55^;8 OCM56PST$8xPS㵃Cͱ r$8xPsl? ZMOV5^;h5? ZMOV r$h5? ZMOVd$8T$8xPsl? 55^;8 O OCͱ r$8T$8۠8J8f%8Jq r%OTG b3{G9pG 98JpVok8Jp8JpVo8Jp8Jp8Jj2a((A8Jj2rA8Jj2ٚ(A8JpCpVo >k8JpÁ > )Aw9u#*)rJٹ".y&唠o$S\7)A~drJpn$SCy#jr)))))A|v9@SCMS~|}c?L?d젒5J|*Y1;d젒5?d젒5J|1%k>fTcvPɚA-?cvP+s6{1-^>F_}ac9os6/|0g1lާ Lٖ~}0c={01e[>?)7yﳅ{1e[>>y~}dcC?lGyeί﷪_ '﷪ʯ﷪ə 5p~@WWWəL?>9s+++*j*j*j*j*jjyj2̯߯V5J' +JjjU +ZհjjppV5,-jU +F JpF kUJpZհjjU +AJj,& +AJj,& +AJj,Jp + +A\ta%8|PSCMVCMV|JpJpʁ*****&Ӆda%h5YX ZMVV a,J/]X ZMVVda%h5YX 5UX 5UX 566PsmXX9pmXX 56PSPsmXX 5UX9Pdy$[Gey$+.}#<H7#Ary@MFby$#T.y呠o$GC͵2fy2?MZ"GAs$zj'AKIRv򃬝-5k'AKIRQ$ȳ߳T<Ɂ&k'AI\; ZMN\[NCe$8ԘPs=oY; 55@NCM 5578T$8T$9T$8T$ȓjvjv&k'AIjv&k'v&k'AIjvyߵPSc~CM 5U; 5U;9Pc~CMNCMNVc|}Lc3cko׏xc?[{?~LUAU]>fUuTcvPUAU]~LUAU]>fUuc|1;젪.Z -{1^>F'Xn`ct喏-,|N/ccVH~LI\߸߯7|q߸߯7@=7{}$~};0H}$~|9PuPSuPSuU5U5U5U5U55;h5YZM=w*Aɺǁ{&jvj{&;d#h5Y8PsCM=CM=w*jjvp9jjvp9`{{{{{&AɺGj{oVu5;h5YZM=Vu4YG`#u=Aߟ{}|d#uUd#h5Y5תuPsjX5568\=CM 5תuPScmT#8XPScmC͵a#8T#8\=CM=C͵a#h5YZM=Vud#h5YZM=Vud#h5YZM=VuU5588T#8T#ȗL{ajjqpGpG<]5U5U8PuPSuPSuPSuPSu v{&AɺGjx=VuUAɺGj{&qjmmjjm?^FgGZM2Vd/#3+eA^FW{A^Fjj&{Zk)?,m?=@6Xv`iYv`i#ہo?*my˥{gǹ\8P#fVdi#h5Yx&KzRj'5KFIFpjjlpJJAti@6CM6aJjlpFpFj,m&KAFj,mFj,m&KAFjj| 5U5U8P#fCM6CM6ԈPSPS{*m1jjlpFpF=X48\ 6CM6Cͱ ri#H5]RM6Tӥ ti#H5]RM6Tӥ ti#H5]RM6TӥU5598 6 6Cͱ ri#88Psl\5598 6CMc>PPS㘃CMc56ȥPS㘃Cͱ ri#h5YZM68di#h5YZM6ɥdi#h5YZM6VPSPS㘃Cͱ ri#88Psl\8pl\56ȥPSPsl\5xp Gu8&p 3Ak G9wp 3Ak Gph}pc#8\>f8C͵c#8T#8\>f8CMe8T#8T#8T#h5ZMf8׺d#h58P d#h5ZMf8d#8T 8\>f8CM5C͵c#8T@e8CM5CM5C͵c#ȓxn^<b@-.j jƅ.-Džcʰ 1hXu  DB~:ȅBuzԠroAA.-:H?Gu yV]u & wBUWB:ȅCw¡rP] ۪\8UgUp9pr!Ϫg gG:$H\sJ\H\H\H\H\>q:cO{Kcrco>~P|-t?{?#>{_v.ޏ{?>ǔT:cvP鐏AC>fT:ǔT:cvP鐏>?t!J|jM1;ޟQ˘i1^wA>~|9P%H}d$8T@KAg#>ucccT2}K}??Kn~}y?+c{{{{&1:>1;Zj?Zjuju|TTcvPA?fzc,r|זEڲ1`co8W?~_>>Y%p\c# {H~俍'A~| HU? H-#/A\? _y~]IpɁjjxj~/V5 fTcvPKA-efy?m1-4>~5c|}fc#{sG~LMOk|36Aj9c|)s@el ϺǰC *c| Fv&>Mw3626*cj*cj*cj*cjj{pAɌMj2c| ZMfld&h59P߃Vd&h5 _&36AɌ́j*cj*c|;PPS߃7nTi{pاaJ5aJ5aJ5af}*}*}*}*ǃQ3d&h5Y ZMjVepjTsf&K5Ao_hj _H!fl e&>ˌM7/36Qo^fly9Pd&h5 5תPsj 5598\flCMo5גPS 5 5 5גPSPs-y 5 5גd&h5 ZMflVd&h5 ZMflVd&h5 ZMflT&8PSPS _9cjj{p2626Au&8PST&8T&8T&8\ flCMelC͵ b&h5 ZMflV O8cM ZMflVd&h5 5 5 566Psm9pmAhOa&&}1Fsb4Ae}1Fh<Mwc4AekmMpZ>_?59ɌM=Z<Ɍ́}= z2c}= z2c O9c| 036j~sjMj2c| ZMflCWPsd&8Psd&8 5598Psd&8Psd&8PS598T&8T&h5 ZMflVd&h5 ZMflT&h5 ZMflVd&h5 5 2626j~spMpM8cjj~sp1cj*cjm36269c36AMj:c36AMj:c36ÁJjm36m369cjj(yp9AJjm36MpcMp9c&36j(yj2c&36A~rMj2c&36AɌMpMp9cjj(yp9A8AcMp9cjj(JlMuq}lm7AMqwnm7AMq'Gn`dd&8\I6nCM5nC͵d&8T@5nCM5nCM5nVd&ȯܸ9p-'ٸ ZM6nVUZM6nVd&oܸ ZM6nCM"C͵d&8T!"8\I6nCM"T&8T!"8T!"8\I6nC͵d&8T!"8\I6nCM"Vb'h9Pb' pbxc' pT'8\7pCMpT'8T'8T'8\7pCMpCxc'8PSd'h5 ZMpVd'h5 ZMpVd@pVd'8T'8T'8T'8T'/_ 5 5 5 59PPSPS ?r'8T!8T!8T'8T'E ZMpVd@Vd'h5 K&pVd'8~[qU'ߖu[qJu[qY g'ȯ~1]dcr:A~mNp\1] qW 64aW' :&:AɮN48ݻjI::::A7 ht΁tN 8tׂW]霏돽o돽S:c{c[c51]⏽wc_F>67)-1;t9?t9J9J|*ǴT:cvP霏A?f11^ws~Lk31^ws>tο쏊 {އM>>/ccCP{eއ>~c|"c{-);O|}$c%{z1>F}K'w8AG| v;s">A;|8Pb#>A  p'>cPSPSPSPSCMZMF|Vbj2s&#>AɈρw&#>AɈOJ1h5 ZMF|ԸPSPS⁊NpN/8LPP'8LPP'8LPP'8Lt0AA0AA0AAxH))MAuVAdP'h5 ZMuAɠ΁. &: : i2/ĠNп:A߼ 'Cy eP'>ˠN7/:* &:Za0j:&I  IZ0j*s:::Z0j*j%::Z0 &:AɠNj2 &:AɠNj2 &:Aɠ΁ Zj*j*5uCM 55>8TP'8TP'׼Zjjh Zj*jm::1 &:AɠNg ZMuTP'&:AɠNj2   kĠ΁kĠNpA A 8Ap@_h0 ^a@eqW ^a';yYfqW ^a'8\+ fqC͵`'8T.5 5ZGmg3Y:P㴃㟩P==ہUU v`U(ہUdU(ہUoVX BdU@Z?T ZMVAɪPp kPpqZD*jj jjvp jEBikPp  &BAɪPj* &B* &BAɪPj*  3CMUCMU8PSUPSU5N;8TU(8TU(ȣ jjvpA  kĪPpPp9A BAPj* BAPj* BAPj*tBcЁcPp9A jmBcPp1*jj |p1*jj |p9A &BAɪЁ &BAɪP@* &BAɪPj*j*jj |p9A jmBmB*jj |p9A B AA~pPЂ1` -AA~ZpP߯8` -AA pPV9׊Ps 5 5׊PS 5 5 ZMV s8bd8(h5 ZMTY#h5 ZMV is8(h5 5U5׊PSePs 5U8PPSePSePs 5׊PSePs 5U5RKM5~#e(ho+:Pe(o1+ "NJtñeHe(o1+ "NJT(8\TƊCMŊT(8T(8T(8\TƊCMŊCHe(8Ps=R+ ZMƊVd(h5+ ZMƊVd(h5+:Pd(h5+ 5+ 5+ 5+ 5+ "NJCMŊCMŊCMŊCMŊT(8T(8T(+ 555+ 5+ #NJVd(h5+:P d(h5+ ZMƊDZd(h5Y۠d(82GqU9 0sUa(O9 0sUa(3GA~?0s`)8Rp\1 r@ zAn  5l Rjl &HA~RjjjjtjtHA~RpRpRpRp(t $l 'a)B HH{{{?޷ckc{_ᏽ揽ޗc2?}moSc[cvP A5~L A5>f@15>f@Tc'̏i 1;HZ}jW|O됏?}a;Ǵ}a;cenއu~8By}}cs:{+K{ >>)Ssc3{H ~Lͥ?|}cӌ{}dsctͥA6>f\T_VSWɭeʭj Zr)>rZMdj5P|ríJCMCMCMCMCMVd)~l5Bjl5Bjl5&[Mՠd)h5j:P%Psc)8T7)~WEݤ*&UQ7)8"qUM nR@uちnRp<0Av~`&&IAnR;h5M:Pd7מݤ_{vd7)_ݤo^vyM }nR}"蛗ݤo^vT7)h5M ZMvCמݤPsg7)8PSݤPSݤ50<8T7)8\KvT7)8T7)8T7)8\KvCMuC͵a7)8T7)8\KvVݤd7)h5M ZMvVݤd7)h5M ZMvVݤd7)h5M:PݤPSUPSݤPSݤ _ƹjjjjjjawCMU CMU T7)8T 8T 8\ vCMU C͵ b7)h5M ZMvVݤ C&nR/M ZMvVݤd7)h5M 5M 5M 56ݤ6ݤPsmMA6T~ ho0 FAp(ho0 [6<~Qp0 [6 e(=n@pڳaj*?pFAɆQj@jU AăGPT)8~E*ɨR'J?ȨR'JAߟ*}2pGAɨҁ&4MSpSpSp<<1Sp@p<&+OASj<&+OASj<}Pd)h5Yy 5Uy 5Uy 5Uy 5Uy +O*O*O*O*OT)8T)8T)y<||<<yɕd)h5Yy ZMV>Gj<&+OAsr)h5Yy ZMVV?kTp2%)-c*[T 1-c* PALɿOS?h]ǯI{nR`'ݤ hT!]|7?`^{^{cm$쵋䏽v޽?A^HkC{m9ݫc#wADADwwjU6-\ۤZU]wJ yT&U]2|!ojU\jUɡZՅ4ZՅ||VuPsT kU5 ~ŅV &jU _h5Q ZMԪ.U &jUZMԪ.U]2|DBZUݯp9^sjN]yḶԅھSknN]8;pḶԅcؿSͩ ǰ7.`8ͩ =ќj9uDsBԅZM4ÅVͩzg9uB띍ԅ l@4.BsB^4.Esז =xќЃͩ _/.EsB^4wԅZM4.jw6S5ͩ Tc5ͩ TݜpnN]8\S4ԅCws¡9uPsMyМpnN]8\S4.jS5הͩ &SZM4.hN]h5ќj9uDsBԅVͩ &SZM4.hNͩ ŅCws¡9u!?9uP]pP\8|7.jSӯS5 EݜpP\8|W(.jeS5 kԅVͩ &SZM4.wf5.hNͩ Yͩ &SZM4.hN]h5ќpnN]8|7.jeS Bs¡Z9u SwB{ԅ&ԅ(ԅͩ -Sr{SrS[4.hN]hoќwԜ 5ͩ :Cݜp3\8|.hN]h5ќԜj]gj£su/<:W_8.suՅ_;W \]g:Wsu \]g:WՅV _}D* xj\]h5}w.|Vw¡IsuPsͤйp k&ՅCߝ xp k&ՅC_8\3)t.j¡IsuPsͤйpѹjsuDBՅV &:WwBՅV &:WZMt.\]8|w.|Cw¡s|Cw¡s|Cw¡su!suP5?~Ps-йp kՅCw¡Xsu!TBՅTS :WRMu.\]H5չjsu!TBՅTSsuP&p9A\2H cՅCw¡XsuP&p9A\]8| ՅCw¡;LpPs,Թp\8 u.\]h5ѹ ZMt.\]h5ѹǜԹjsuDBՅV ՅCw¡XsuP&p9A\2H cՅCw¡XsuP&;W5a &:W:W>t.}\]hйԹmԹsuCBۇՅ<Յþw*\]8\It.j;W5t Uݹp\]8|w.\]h5ѹgչ $:WZMt.\߁ &:WZMt.\]x\]h5ѹp\8\It.j)5t @Jݹp\8|R.j$:W5t @ʅC5D¡;rPݹp\]h5ѹjsuHjeQށϺǹϺgfϺN._Cg]\g]33g]!|VpϺ`Ϻǹ Y5 |VϺpg]8|.jY5 |օCw¡z; uD>B|օV &YZM.g]h5Ϻj"CM.g]h5 Y5 |օCw>BR>¡;uPϺpg]8|糂|օCw>¡;u!O*uP#p\8|.jYĖYZM.g]h5 {$ZM.g]h5Ϻ'Ϻj"uD>B|VϺpܲw>q }ːϺз yK }ːϺз 4A>BS>BR>B?Mʼnjz-;?8kk&U(v2a4ʄ]8Ҩ(v!O)vH#UL؅?avP bA>__Y0zaӼ0zam95F޻~L޻({<콇Ƴ{˘;!Ξ{{o=콭ǔ;콩{t{1{92=vdz/F;2=vTz0;&avP2ǂ A&>Xw c^*=|U9=|E XOǂHA>< Xp9bN}PPSPSdz,h5 &cT$h5 ZM>Ij2=L&cWdz,h52&T0#8.aq FXp\3c|S{k FXpO3~'aAO6‚Vd#,Zl}Pd Xp\2E`, X{O'3`>'3`AOf>̀ߟVPs? 5 5~>8T,8TʀʀkV  5 5 5׬PSPsj 5 5׬d,h5 ZMfVd,h5 ZMfVd,h5 ZMf> Xp0Hp Xp X̜ 5 5 5 5 3` T,8T$8T$8\ fCMAC͵ b,h5 ZMfV :̀}P ?:̀&3`A Xj2j*j*je3`\ fC͵ b+h Z0>^WЂ`uŽ w`u-{]A ^W;^*z-j2ACMU&CMU&Vd+ȭVu&{]AU&VoK_A2>[Wp2;Wp2'&,}4a+ K_AW;AAWj|xjWpW;暃j'C5c+8OPPS?qj'C5c+8OkWp5PsX 5?Wj,}&K_AWjAVd+h5Y ZMVPSwăCMCM>5U 5U~>8T+8T+-.}PS?qjeK_~>8\ CMCͱ r+H5] RMTӥ t+H5] RMTӥ t+H5] RMTӥj*jeK_ Cͱ r+8T!8 CMCͱ r+8T*}J3J3cWp4Cp9A.}&K_AfZMVd+SE.}&K_AWj,}*}J3cWp4Cp9A.}}p,\ 52ȥPSiPs,\ 5fJ_ AWgf\ >c+\.}yfƥc+hX >Xp^:  }PPSPSPst 5 5KPSPst ZMĂVd@,h5 ZMĂVd@,h5j2  &bT@,8T@,8T@,8T@,ȃ` *  }PPSPS (: jTjTj* j* 2ĂVd@,h5J%AɀXj2  yd@,h5 ZM>Xp2Ă) -c@,[ƀXG 1 -c@, bAs@,#d4a@ ywϿ#㖍,[&2+jqFeknŠZpܲ57aE-8&ZpuE=={#TQ;={?釽a[ ca1~{~? avP*jAUf5M1Uf5C?1fUQ;jf~Լ k?at k?9atV{a-7c*޽r{7a+7ޣ_aG{o>gTX;콙^{{1{:vdaǔ9vda0;avP kgW ~P} jA)ՂZT_-tr_-ުs_-jV p_-ުjAՂCMaCMaV}d_-t}P}d_-h5W0AɾZjߛV}d%-8)?EL=e(S%-?AU҂cL=d%-QJZУd%-h5YI O&+iTO$h5G|+ BɄZp\!~ jA^&ԂLߛL=xP w=xP ZM&>Zj2LJ߾CM%ԂCM%>v@pZpXpOuǂ~;s Xp5O`w,8cq?<ݱ5aw,/I&cAXj;&cAXj;}PݱPSPSݱPSݱ ?1;j*pj*pj;j;OwǂCMNCMN>XpIpIp kXj;&cA~ uw,h5cA~uw,h5 ZMvǂVݱdw,8Tw,8Tw,8\kv>kXЂ;`}Pݱcw,h rcApw,h Z0vǂݱ ;A5/CZfw,8Tj^j^&cAX ZMvǂVݱz-G͋!3c%-hXI Z0V><@pJZpJZ\I TI AV҂}&+iAJZAJZMV҂VW7؃V_U҂CMU҂`5׌PSyPsYI 5*i1j*j#+i1j#+i?JZj&+iAJZj5cd%-h5YI ZMV><@j߿jjACMU҂CMU><@pJZpJZ۾]I 5552PSyPs-XI 5UI 52ȕ t%-H5]I RMW҂Tӕ t%-H5]I RMW҂Tӕ t%-H5]I RMW>JZpEp9A}p,\I 52ȕPSYPs,\I 552ȕPSYj*kj*kje+iX&+iT"h5YI ZMV҂V &+iAJZjjj*kje+iX r%-8 W҂CMe-Cͱ r%-8T⃪Z&ChA ZBBhA Z'B XChA ZЂ1` yb!L!#Ch12j*j#ChBhT-8T-8T-h5B ZMЂv@ /<jAisB-ȃCNL`BuńZ'P LjTB-8\+&ԂCM%>ZpZpZp^WLJuńZp|Gp^WL&jAɄZj2L&jAɄZj2L}P dB-h5P 5P 5P 5P 5P Lj2%2%jjT$8TB-8TB-NʔʔJJy dB-h5P ZM&>LIj2L&jAsB-h5P ZM&ԂV VRيV|-Z7 O9f|-Z7 >:4ׂ~1Aׂ]p $c )c= z 1w&sw^dQ"8.zEo" ˧c /^#-{[#-G6[xƦG6[xA ZMV-PS-}p^pճ5ե k.]p\Bg.8tqm ]ึǰ_/tv=٥ tA{vV]d.h5٥ ZMvV]d.h5٥t8ttA~xr.8T'8T'8T.8T.OP  |P]PSPSPs٥ 5 5<]d.h5٥ ZMv^.]jKAuh.]jK&tA.]p.]p.]p}p٥ ZM6*  mrAnp@.mrA\&0  >H)Cd@ ʁʁ&rAɀ\{} ZMVd@ʁ&rAdzJ鹠dz.8Ts!8Tz.8Tz.ȍhNJj. sA\j2=L&s]dzj.&sA\j\AÚA5\PSͅPs5 5\s k2=jj&s k2=j&s ?\j2=L&sA\j2=5dz.h5 ZM>Bj2=LJ?lj*=j*=A5CMCM>Bp\p\ 5\5\52PSͅPs- 5 52 tz.H5 RMT tz.H5 RMT tz.H5 RM>\pHp9AN}p, 52PS!Ps, 5 52PS!*=j*$j*$jesB"X9=L&sTH$h5 ZMV 8=L&sA\j2=j*=j*$jesB"X9= rz.8 CMDCͱ rz.8THJ &sA \gsA \ sA \Ђ1=`LyzL#s12=j*=j#ssTz.8Tz.8Tz.h5 ZM<׌鹠dz.h5sA\j2=Ly2鹠dz.8T'8\3FCM|C5cdz.8TJjjk\pfLjk\pOp\p\j2=L&sA\j2=IV鹠G-d.[h]зѺ-c.[h] Tuqz1ZlGi~t0;havPѺ&AMLѺA{?}~L젢uAEf5/?}0;IuV}hFy?Oqx=d0^OF{1M$T0iF~Ls8/cx;t;cޛbLޛ{o>콵~ާT;O {8콥;콡VG fxdx0:ޏ\tdx0:avP{A ftZ/ȫ^3J{\N;Nyݝ w9 ^30gzTZ/ǀzAAɴ^j2|q ZM>nPj2L}PݠdZ/h5 ZM=.A@ Sy1 @^p }5T /8y1A = ZMV{_Mj2At>vFp\.c/8zWHd/^Ѓ /x/xA^6PA^6V l&xxGPS PS Gd.8ǿs?٣ !]p d.!]C= ?Gd.!]jG&{tA]jG}P=PS=PS=PS= )GjjjGjG%CMtCMt>]pNpNpkN]jG&{tA~s.h5٣ztA~s.h5٣ ZMV=d.8T.8T.8\sj>]j29frJ} f39V',\7ɹo6sq+ɹCLbr.8\H&>\p Hp Hj29Lɹdr.h5 ZM&> Hj29L+?NT.͹= ᠾCp ec8(s.gc8(sA}d.]j2s&3w]d;&3wA]j]#A?;|PGC5e.8T!8\[fCM>]pCpCp;k~]pCpk~]p2s&3wA]j2s}po ZMfV&3w2w}P}PSPSj*sje3wT.8T!8T!8\ fCMC͵ b.8T.8 gTә t.H5 RMgTә t.H5 RMgTә t.H52w%X9s r.8 gCMEKCͱ r.8T$8 gCMEK>]phIphIp9Ac]j2s}Pђd.h5 ZMf<]j2s&3wA]p]phIp9Ac2șPs, 5- 52șPSђ*sj*Z-3wAr-3wAr.ȃ&-3wA ]Ђ1s(gC0e>fk]p]pf}PPSPSd.h5 x3w\3FfVd&3wA]j2s3gVPSPs 5U 5׌PS*sjjj#3w12sjj#3w?2w2wA]j2s&3wA]" ZMfV!&ly 'xAO6ly O2d-^G xT/n- 5 5 5[ PS Ps 5J 5[ d/h5 ZM6V d/h5 ZM6V  xA^jA5CMRCM5CM5`pKj2`Ly *`Lyb 8`L-A `' 12j#12j*A%CM%CM%V d0ÄN~p ZM&V *L&A`G ZM&CMŏC5cd08T(8\3F&CMŏ>`pQpQpfLk`pQpfLJJ&A`j2L&At0h5 ZM&V *~(~}AlyF} f>`pd0Au08\H?>`p>`p>`p^>`pLp^&A>`j&A>`j~p" ZMV}j*"jjECMEdCMEdCMCM?Lp>`p>`g 5 5 5 5 ,A>`j~Pd0h5 ZM<>`j&A>`p>`p>`p>`& yT}M`0h <>`G !> ;J"?%>`p9r"yT}Ps4E 5GV}PsE 5Ԏ}PsM 5Gb}PsTF 5Gh}PsM ZMV} Oq5c0h5 ZM?8*(yT}dyT}d08ECEq#>`p>`p> 5 5 5 5 5/t0A' :`uە{]?u]kNxc^a^wF{?:}kc!^/?''釽g9=?cvcvcv=3cv=/c'CA$DgctTA1:1:׹ޓ?f9?f9?F#<#c{{csw?,^;ǐ#cOvp׉?$w;G^[k{ct9? r"Gxػ r"G;G;G;G;G;G蠢\K(*x!v6^a ^x!ǪwTBWE/UTצj"*xk΅VQ;tDTB`pj"*xDTB` v\hR#*|.RQ _[(CwT!;t ZjD/Ԉ ^h5j"*xkwυVQ;|Z.kp\.Cqm׻ 9 ǵ}ZDB{/GB{/|m9 =#j"GxDBV9 &r5Yjs-wB?J^8|/jӀ5 i &ҀZM/'!/H^h5j" xDB4Vi &ҀZM4Cw¡; xPߊp.&]8|.jӀ5i Hi b҅Cw1)N^8|.jI5\i b҅C5DB4Vi &ҀsҀZM44Vi &ҀZM/H^8gH^8|/jӀZM4eH^[4(4,4eH^[4㖭Ҁi4~ x!I( xPp{\8|=.H^h5{&j" xDB4`j" xDB4ƣ4Vi x/Ҁ5y G-5׋?DžCw#Kw¡rx/j+5ߕ Cw¡rxP]9p^8|W/jrxDBV &* ZMTDžV &*Zߕ _ CwWC¡rxPs-P9pA^8|=Cw¡;qPs-P9p{\8\ T/j52 kCw@T/^h5Q9jrxDBap-P9jrxD0{\h5Q9jrxPs-P9pA^8|=.je*5y kCw#^8|=.j52 DžC͵ B¡rxPs,T9jrx!TBTS *RMU/^H5U9jrx!TBa]9pn\8 UcCͱ R¡YsPs,T9pn\8 U/j5w¡YsPݬp9A^8|7k.je*ZMT/͚ &*ZMT/^ȳA^h5Q9jrxDBCcCͱ R¡YsPs,T9 e*52H fͅCͱ R¡Y|W/j5ZMT/`^gߕ -***Z0T/`^hP9GT9p׌ kƈCw¡1rxP]9 +5ߕ V &*t*5cDBVrxDBV yO &*5 } y4Y} yN} C1#^Cv#^h5Gp9B+?khjڊ5GpE} >C͑]Q¡xPsGp9+#^8 /j 5׼} &ZM/Okއ>V} &eQBSB>bGGjxPS¡樴/j5}໏xPGp#^8|/j5}xd#^>P>A>a+{Gx{Gaca~@?{ㇽaacf5]?xT0;avPS0;9avP3O0;>avP}fA{>jR_>at} ?>at} A} A#FG<}"4>xT4/-?>a{>aޠc#hw{amv&>1_>a=wy,au?>au}FG1{;xdǔ:xd0;>avP}A#ft1 A]@ |ol bC{cct1!bY@ w& AU ZMV, & T[tVàd >apapjc&;AɎajc|o ZMv ?jՖ kLp\Fb08z#cVmd0aώaÞà=;μ=;A{v Vàd0h51 ZMv VP󟥦ᐂᐂzu08C A~cqp0h !v{JЏ7V*8 &Tp08g`p ㋃:L:LA~qp08T)8T 00kR`pSp& &A`j28V*8BVdp0h5 ZMC   ~Pd60 l` :Zg} l`_m zA? rAn p608T/#8T/#h5 ZMfJl`j2&T/#h5 ZMfV w8xzzT608T/#8T/eeee Al`j2&w h5 zAl`j2Qd608T60~PPs- 5zzZ1jjezZ1jez?l`j2&Al`j2 b60h5 ZMf?^Fj2kl`pAekl`p^FpAe|PPSPSPs- 552PSPs, RMgT t60H5 RMgT t60H5 RMgT t6,5 5 X9je"0X9j*je"0T608T&8T&8 gCME`Cͱ r60h5 ZMf?Lj2&Aq60h5 ZMfVd608g9Acl`pLp9A~p, 52PSPs, 5"0A2` C,ˀAr0ȓ/.ˀA;2` ,!?&,kR2`p2`p&,*~PePSePSed0h5Y ˀ\BVed*&ˀA2`j 6VePis08T(8T(8\BCM>2`pQpQp&,kR2`pQp&,***&ˀA2`j ,&ˀAv0h5Y ZMVel,?kl`К0A2ք5a60yl5a6l`К0Jg?l`} 5˕*j*j*j+z2j*j+Al`j2&Al`j2&\/WfVd6yPSPSPSPS*j*j*a_gCMcCMcCMeCMe&h5 ZMfV Ϝ:&Al`j2j*j)))Cl AsS0h !6vM -) <7~)A5<`j)j8;$?P`p9"%n S`p9R%n kRȦ`p&l Y`p9%n \`p&l &Aɦ`C)l &)7<`j)A5<`j)j()6qS08TS08TSj j j j j j cp0A :`}88xꇽ/akc ~~~~na1p_O? pp{~3?avPs젂Af51f5}?1M f<j~Ԥfe F<28c F<28xdp0<28xdp0:atgiL3젂ALU:kFS}g{{1{;Wξۡ{C/c6ޛ{o7=}|S~ cA{o;2Fxd#<2Fxd0;avbAL1bAsA  `!2sA6 A7?`Cι s^dn0usn0h5NA`j27AŝVdQ* ߻wEcQ* }* }* [$>`pا`pا,(}, mAɢ`j(| ZM?>Lp\>a q{| AlFٌ= 7?= zd3&Ah`j2V/8nzf/ͺ_4\ ff/ͺ_$~A?X~Aɺ_jAC}Pu tj*j*jj7CM咂CM>_p\Rp\Rpfo%kƺ_j&~A~ q/h5Y~A~s/h5Y ZMVud/8T/8g_pUA_ז7~A_[Uz3WP>rEÞ ?;yCM+CM+Vd/O&~Aɀ_j2A+Vd/h5 r~Aɀ_prEpH *W|PPSPS*j\j\j*j*j*j\j*j*j*j*j\ &~Aɀ_j2 yd*W&~Aɀ_j2|VPS;j*j*A+C͵ b/8T *W*WkĀ_prEpA *WkĀ_pA *W ~Aɀ_j2 &~Aɀ2d/h5Aɀ_j2je~Z1j\je~Z1j\ACM+CM+C͵ b/8T"8\ CMCͱ r/H5 RMT t/H5 RMT t/H5 RMT*jT/8Tc_p9Ajc_pVKp9Aj|PPSPSPs, 5j 52d/h5Z-Aɀ_j2 yDd/h5 ZMVPSP󟥦~~Z-X9 r/8 CMZCͱ r/8T j&~A;Ā_1C y 8C ~A;Ā_g 2jI!~~2j*ACMCMVd/y}pM  ZMV* &~Aɀ_ ZMCM5C5)d/8Tc(8\BCM5>_pPpPp& kRȀ_pPp& j   &~Aɀ_j2 &~Au/h5 ZMV1 &~?F_& lm}At/gnm}\O66 EA5F_pRSPSH7CMXCMXCM5CM5>KpF_pF_Gv 5c 5c 5 5 X}AF_jl}P9d/h5 ZM6^} ˠuT!x] AONx]_ z|2^j&uA~v.h5 ZMV&uAx]( ZMCMUC`.8T჊66|PPSՆPSՆPSPSPSPSՆPSPSPSPSPSՆd.h5 ZMVd.id6&uAx]j2^|4~j2^j*^|4~px]pxUm5 5 66kx]pjCpA6kx]pA6 uAx]j2^&uAx2d.h5 Ax]j2^jeuZ1^jjeu Z1^jACMUCMUC͵ b.8T!8\ CMCͱ r.H5 RMT t.H5 RMT t.H5 RMT*^jSjeu Cͱ r.8T$8 CMuJCͱ r.8T䃊cx]pNIp9A&uAx) ZMVd.!&uAx]j2^,5Yjeu:%X9^ r.8 CMuJCͱ r.8T䃊 tAk>]КO&y} uO& tAk>]Gܧ >׼}Psا 5է 5׼}PS}OjOjO&tAsk>]jO}P}d.h5٧ ZM<>]jOjjyt:OjACMuCMuC5c.8\>CMuC5c.8T_'8T.8T.h5٧ ZMV}d.h5٧ (tA>]jO}P}d.h5٧ ZM>0C&O yF} O }p?٧ <>է tAQt.6>]p>]pޟJ>]jO&tA>]jO&tA>}d.h5٧tR$ttAQt.8T$8T$8T.8TJy|}PS)PS)PS}PS} OO&tA>" ZMV}d.ȓ&tA>]jOJ|_u.8S.8S.hX ><]l},mvA[v.x.`d*yed.8C5w0~ 5G ePs0\ 5GePs41\ 5tePsM'Y 5GePs1\ 5G"ePsM'Y ZMVe l5d.h5Y ZM>8.yed*yed.8 C*}PePSePSe OljljljljljlteR,]b.vv/a~?aagaGa'a[c񇽟a7ӏ&wjrk|?a4)ac0a~ʇ٣a3ׇA͒X;&wdMT;&wdM0:Ța|&wdM0:Țat5 krϯfÇAq5W5SMDw޸t{#&w{Ca 򇽷ރ||krIw{a3jr7FY;&w{at5 kr?at5 krAfU;&wTM0;܏&_\ uM.!\0QU r&qM.߻?{`B&krAɚ ZM>`\%h !>`\p8`\p8`\ C C qA;`\Џ7Vd0. }P qqU2 + Dž_«d L`0.gqA? 7L`0.h5 ZMVd0.h5 ZM~e/nA_>݂?]v c-Ͳ[ Ͳ[#ef-ͲUv 5Uv 5Uv 5Uv w-8T'8T'8T-8T-ePSyPSyj*j*jn<,&nAɲ[\v ZM>[_\v ZM݂Ved-h5Yv 5Uv 5 5Yj}pg >[gA2ق>&z_ق>[У}GlA>[jW`قV}d-h5gA>[j1}d-8T 8\/قCM >>[p0Ap0g 5&5&5g 5g 5g 5&5g 5g 5g 5g 5&AقV}d-h5g ZMق_ZM>0Aj&l&ll}PaPS}PS}'?>[p0Ap0ApA k>[p0ApAk>[p0&lA>[j}p-g ZMقV}*L&lZjelZj*LjelT-8T 8T 8\ قCMقC͵ b-8T-8 قT} t-H5g RMقT} t-H5g RMقT} t-H5gl:X r-8 قCMu:Cͱ r-8T#8 قCMu:>>[pNGpN;ʶVdۥso߱Dӿ!RMI@hE2 r-NG`QLg ZMT#jh5g rlV}@>[d-jXT-NG`QLg ,jX, , e>[`QSfXTAt|}@k>[5a-К>rlք}@k>[5a-;|g }۸}fXT-}5g{P}lEM&lV}@n>s6c-jh5g{P}@>[d-j=PZM5۸}2E6c-̃5՗ ,j/Xl>m>[`QS}fXT_&>[`QS}@>[d-jh5g rlV}@>[dAe&lV}f1ք@kx[ !:Ƚ^ZlW m܆xۃrm܆x[ 78XT-x[`Q=\o ,j*XlWm5Õ@x[d-j2h5o ZM&mVÕ@x[dAm5o ,j*m5U,jXT-xۃm5o ,j*5U,jXT-x[ :h5o ZMTm#j2h5o r[mV@x[d-j2XT-F`QSeUv ,jXT-jh5Yv =&nVe@ɲ[ w<Ƚ{.ZMT-{\v ,5KKQ5-!/EMT-[`Q5Yv{5Yv ,j&n&nE6d-YB.5KefUXlcM&nVe@n+u6d-jh5Yv{+\v =&nȽ{.ZMe[`Q,\v{PenEMwenEM*5Uv ,jln5Zl׮}ޏ]ǮCcבv~캤캢캠캞}.]Ǯ̰uu{i?pG{i?pEu?p~캛u3^:Ǯ=?{=| D/mK1:^ڏA~c|ctD/ zi?v'v1޽ܸ3@x̾Wnk]+nk]~ZxttB;tBސn[h5n[h5n[xlYh5n o9e#' )kO$DSvGNS֞H-,=}[XN9BM}[ o }7AmiDmDmDmDmDmDm l lUg[uζп'l } ζЗ l  } ζЗ lwm/AD--,juEͻζy5:¢]g < wmaQ-58¢ l wgaQ,,j l wgaQ \Pg[h5Qg[h5Qg[h5Qg[-u|:B:B:B:B:¢]g[XԼ< ԼlQg lWAm!xg *Cm!_xg *Cm!3ζoUg[ uPg[h5Qg[h5Qg[---YVuVuVu\:B:¢%XXl5,A]g[XԼ w -,jYE;Ky5:¢]g[XԼ wmaQ-,juEͻζyg ζjζjζjζjζB:[%Xh5Qg[h5Qg[h5Qg[x}}DmaQ-y5:[ wmaQ-:=5,¢%XXl 5,¢fζyg 54uE6 BmaQ DmDmDmDmDmD-MPg[h5Qg[h5Qg  &l &l m:¢fζyg 54uE;K٦A-,jYζyg 5,¢fζy54uEͻζYA-----------uEͻұYAil e:¢]XX, 5J¢fζyW::¢]XXԼ+ e:¢]XX, ZMZMJB:B:B:BnPmDmDmDmDmaQ-,jޕE2 RmaQt,,jile:¢fζyW:54HuEͻx5J@ZZZrl SBuuuuģ:BPg l>5۸uEͻζ}-,juζy5:B:B:B/S-Pg[h5Qg[h5Qg l &l &l &l YIuVuE; }-,jf%5¢ l w@faQ,,jql m܇:¢YXl>5¢]g[XԼl &l &l &l &l &l SuVuVuVu; jζjζ٦AYuu܆:BRm5A-=\Qg[hMPg[mu!:BnCTm!7Ԩζy5:¢f{ζy5uEͻ--------+l &l &lwmaQ.f,,juEͻζUg[XԼ w1caQ-,juy5:BUmaQ.f,,jŌEͻζyr[l &l &l &lw1cDmDmDm!ζjζjζjζjζy5b¢`[Xl }wmCmCm!)!!!`[y -}ܞB¢f<(y5[`[XԼl m8` lI5p6DmaQ '`[X, 5KBEҿPmaQ '`[h5`[h5`[ȝ $l &l &l%BnSmD--<ZM5KCEP-)y5[`[XԼl wmaQ-,jE;oWζe( :{W߿gm}dv}\݇c a>ڇTgv_+}}ݧ~NTYA0O 9?0O7xj9쾒>9go;12sؽyi 0=+k +k +kSemdemdemdem嬬 ~LavPatr.ޫcyh5Yn ,=N΃=8[཰%К0hMg{PqlE*X4Q-h8g q@g ZM-Vqg kS:SI`9e8[`9e8[`9ej|q@Mg ݄q@Mg "ZM&lVq@8[d-j2A&2kiX Z/$&X ڃJBbb-k* kE?MM%J5X{PkEM%j ;EMvTb-N`QSf~0XTa'نLZM&&kV@spb-j2k|Zdb-j2h5X L5X ,jXTb탬=eE-o\Q{P@_H+j* @GvE-o]Q ĊZ/$V&+jV@uE-jh5YQ =|@dE-jh5YQ 2W&+jEMϊZ`QS45UQ ,jxAU5,jXTE-Z`QS*jEMU5UQ ,j*AV&+jV@ɊZdE-F{dEA&+jV@ɊZ@ɊZ`QSEMU=,jXTE-k\Q ,j*XT> ٦A5,ji+jEMmĊZ`QMXQ ,j*AV&+jV@ɊZdE-j`h5YQ =|@dE-jXl VmĊZ`Q54E6 bE-|5,j*Xl V54*jE2 rE-jH5]Q RMW+jT@Z tE-jH5]Q =Z`QS5f`X, Wi54jE2 rE-ƃ5U,jX, Wi54@ɊZdEA4&+jV@ɊZ 4h5YQ ZMV&+jEMUi54jE2 rE2 rE-YA5U,ji+jEM4TE-j2hJ ơ@Zp(-&0hJ rCCiI1`1XlC; 5ЎBi*XT(-PZd(-j2Ƚ^=؆v ZM&Ci*h5J ZMqȡ@PZ`QS-f1XC5r ,jBiEM\j5Ўf1XT%PZ`QSr ,j*XT(-j2h5J ZM&CinPZd(-j2h5J{P-@PZd(-٦AJ & ZС@r(-К0`{2hMJ fBT(-h Pڃ 5J ,j*XlW 5ÕEpe(-j2h5J ZM&CiV@PZd(pe(-j2h5J{PEM*[5J ,j*EM =E`QSBiPZ`QSeEM rCiV@PZd(A-&CiV@PZ w:h5J ZM&CiEM*[5J ,)XS(-1hJ <mCi@PZ -;x>T(-;J 5KqBiEMlJ ,j =؆ 5ppfN2X, _PZ`Q$,J ,j$CiV@PZ 7:`N2h5J =XrCiVJ <&CiEҷp(-Y=PZ`QSBi܁PZ`QSBiEM 5J ,j*J t܉@J 7:6>;>1҆rv_c /a>>S?~}La5k=@Bv,/3 žY*=@ɞY _cgh53 ZMԇ&{fV=@ɞY W8gh53 ,jCEvg,?5!3 ,jCEM}?Y`QS=zfEM}?Y`QS=zfEM3 ZM&{fV=@Hr,jgh53 ZMzfV=3 XT,ك`QS=zf\.Y`QS,jCE6 b,54=>Xl mĞY`QS {fV=@ɞYd,jgh53{M3 ZMԇ&{fV=fgXl ٦A5!fgXԇT,5!fgXT,٦A53 ,ji{fT=@Y t,jgH53 RM{fT=@Y tA^54=4=fgXT"YA5,ji{fEME/T,E`QSыfgXT"YAZM&{f*zh53 ZM=@ɞYd,jgh53 ,jgXT"YA5453 ,jg`gX, ^54=gh5Zʱ@kY w8vZ cgք@|q,w0l>mY`QSf1vXTA5; ZM)˱۸@YdA&cgV@Y w8vh5; ,jXl>5۸z,*vXT%K`Q; ,jqcgEMX5;{P=bgEM&cgV@Yd,j2v}ZM&cgVc ZMmYd,;; &=Zб@r,К0vhM; &r3cgY`QSbgEpe,Y`Q=\; ,jNXlW&cgV@Yd,j2vh5; ZMlW&cgV; ,jNXT,:E`QSbgNXT,كS5; ,j*v}5U,jNXT,Y :vh5; ZMT"j2vh5; r[cgV@Yd,j2vXT,:E`QM; ,$cg@Y 9vh; }>ٱ@ns,1vbgܞYd,\F`Q '; ,j*vbgEMm8bg$cgE6d6d,ن5K±fZ8vX,i m8Yd,j2vȝ=؆ZM&cgօcgܞYdA=ϱ@Y`Qt/; ,jcg*vXT,ك5; ,j*vXT,Y`QSbgr1vcgћcgٰ;va>ڇ;v_+}}ݧ~kx1Άwpw La n}vކwac{0_ e 0:;F;F;F;.cg cg cg cg cg=ӠoTldlؽqqcg+灱aa< y`l>b}N}jؽ/cm5lm[Sm5lE[ESmؽpؽ9`ؽpؽz`m`mؽuؽ_eؽc   }LQatQatQavPQavPQavPQavPQavPQavPQ)6*9Q@7q-^b Qe 5([ *GEd-^KQ{o .G&lVQe{PQ{mNM`-&0lE  T-([`1AQ2lI1h5e ks&lb[`9b[`9*:,:,Yl Ŷ@jXl Ŷ@b[dA&mVŶ@be{PQ@K([f-R3x/FԌZjFT-R3he{PQ@K([f-([ _8XT-O`QSQl|5([`QS5j>*XT'O`Q e ,jXlCF&lVQ@([ ߙ8h5e{PQ@s-j2h5e ZMF5U ,j*Avg=@/vݵZ fr`w-Зk|Zdw-oT] ZMv&k*8h5] ZMvݵ@Z`QSfSXTpAuޟ~PݵEM5] ,jXTp Z`QSݵkEMu |ݵ@Zdw-jh5] (w&kVݵ@Zdw-ˤ] =ZUkEMuTp Z`QSݵ@sw-@`QSfXTp ٦A5,jikE6 bw-] ZMv&kVݵ@ڃmZdw-jVݵ@Z`QM] ,jikEMmZ`QSfXTpAu 5,jikEMumZ`QSݵfH5] RMwkTݵ@Z tw-jH5] RMwTw-F`QL]{L] ,jikEM7eZ`QSfXTAuo5,jikEM7eZdw-jVݵ@Zdw-] ZMv&kVݵkEM7eZ`QOSOSL]{L] ,jikEM7eZ`QS] j6i@drZA5mjܿZ 7 8hV Li@ߤV{ V ,jjEMmhǴZ`QSiV ,j*XTZ-j2h5V *lC;&jViV LZM&j܊ZdZ-K`Q V ,jXlC;0:0:0:0#Y:FY:FY:FY:FY:v`Jg KgxX:X9, w y`lKg x\ؽ{ؽeؽtؽc y/7cum;kE]Sum;k3V׆?ڰ{0ڰ{i00ڰ{{+˰{uT]v-FY]FY]v/+FY]FY]k k k젪k젪k젪k젪k젪k젪kSumTu/h"@NK3K:_I{if 6'T-ld I{[ ;'UÁd LZoJ=$[ས'&0hd{PIbl%*XLP-$d MI@ߤd LZM&5=VIU;z=s 1b .ńb-WTp cnI1cnI1h5s =[d-j2h5Y] )>Vmk:@ZcuAUmkյU] }>Vu ȗ35U] ,jXTu-Z _ӸXT'σ5 ,jXllVj53յ@Zdu-j7ZMVTu-/]] ZMV&kVյkEM|&j2ו=@gX-,V{PE@ϰZ _;xmȰZdX-j2h5V rmjVa@ɰZdX-j2UYZMTX-˽V ,j(XTQ Z`QSa@.tX-@`QSEf1XTQ ٦A 5U,jijE6 bX-V ZM&jVa@ɰڃmİZdX-j2Va@ɰZ`QMV ,jijEMmİZ`QSEf1XTQA* 5U,jijEMmİZ`QSaf9H5V RMjTa@Z tX-j:H5V RMTX-F`QLV{LV ,jijEM7eZ`QSf9XT}Ao5U,jijEM7eZdX-j2Va@ɰZdX-KV ZM&jVajEM7eZ`QSf9`9X, o54a*Amj܏ڃ*  a@.wX-&0hV rjI1`1XlC; 5Ўaj*XTX-ZdX-j2 R=؆v ZM&j*h5V ZM-a@ɰZ`QSf1XT%نv 5~yPa/EM_mhǰZ`Q V ,jXTX-ڃj5V ,j*h5V ZM&jVa@n&tX-j2h5V =KdX-j2Xl mvZM`;d;-&ȍ}nri6@vZM`;-N }b;-vZ`Q=?N ,jXlO]5@vZd;-jh5N lZM&iV@vZd;A]5N ,jȍ}n5,j*vXT;-vڃ]5N ,j=n5,j*vXT;-vZ 7h5N lZMT"jh5N lrsiV@vZd;-jXT;-E`QMƚ 5V ZMa@ɰZdX-j2}rjVaV <&jEMu;mɰZ`QSaV ,j*XlcV{5V ,j&j&jE6dX-Y5Kafv8XlcM&jVa@nuX6dX-j2h5V{<V <&j*xZM%Z`Qt=V{PajEMajEM 5V ,j*XTX-Ё0bX-Vvavaj6쾢zjy}1iD1ӆgpl Ɔ~LCa=e}KvQ7ac !ذ{70_e x0^}Latatata26262626261AE2҆[X3rHvc@0k҆X7 lWvvvUv=vTk% k cm cm] bSmdmؽ!aؽFlؽc . { }LazatataRatat)62626*6*6 cmSmTmSTmTmWyh\y [཈4+o+Օ@^+?[R/+o@[d-jhXy WVm+oXLP-[AU Ty +oI+oV@[(jJ+o6bB{\XLP1&W㊕b1d-7)V}b-7)V&+oVUy ZMV>Ȑ[."Ђ1hr EZ0-Cn*hr ` =[c-Ђ1xxP!@(r-[`QS!BnEM!AEMT-vP`QSf{,3XT;( ZM&CnV!@Kr-j2Bn|[d-j2h5r 5r ,jh5j &ߎ"@wAE -5[m|WV[ ֻhj l[m|kV[ %h5j lZMT jh5j lr)[mV"Eve-Hj5),jj5),jXT-V[`QSZmEMj5j ,j*RA&[mV@V[d-Krj lZM&[mV@.s-jZm\%V[`QS"EMjr[mEME 54"E6 b-HA`QMj ,ji[mEME >V[d-jh5j lZMl &[mV)lZMmV[`QMj ,j*RXl 54"XT HA`QMj ,jXl j54ȭ@V[ t-jH5j nRM[mTӭ@V[ t-jZmEM=eVۃeV[`QLj ,j*X,  z54ȭXT#G`QLj ,j*X, &[mVlZM&[m\V[d-jh5j l5j ,j*X,  z54ȭ4ȭfXT#YAn5 sl>̱rslC[ 85αd3[`9m[C̱rOslEMʱ5c ,jslEMT-[`QS9@[d-;c{ޘc ̱ZMT-j2h5c ̱rslV9r2E6zc-L`Qޘc ,j*5 ,jslE6zc-L`QS9rl*'XT-[d-j2h5c ̱ZM9@[d-j2r2V9@[`QMc ,jisl699@[ 9=Pαmsl69@ns-Mc M9rlEd-[`Q=?c ,j*XlO&slV9@[d-j2h5c ̱ZMlO&slV9c ,j*XT-[ 9XTA%2ʱ5c{PrlEMs9EM%2ʱ5c O&slV9@ۃJdZM&slV9@t-j2h5c ̱ZMʱ5,jislXSq-hX\ >:mkŵ@Z w}t.>Tq-\\ ,5,j$kEMTq-Z`Q 'Uq6dq-ن,=؆,5pŵf|X,%Z`Q 'Y\ ,ZMŵpŵ@ZdqRpq-\\ ,=Z ѹh5Y\ ,jkEM*5U\ ,j}t.5U\ ,jXTq-Z`QSŵkEK:9@c 5ȱud:?v;cAuusl?v]?v]!ٰ;c؏]7T˴aXǮʏ]n)?vQ~캡 Ga?f1؏]rǐ\1:ڏy$׆ɵHDr k?{k?F\1:ڏA$~"ca@{ ɵv5kʱFrǮM{?cڏX#c~󱾓k!6NصUǮ?vm9k ]NɵyDrǮ?F{C'rl?vJkذ;c96$صFw~Zc9{D'rl?v-1۰;czȱDǮ?Fc1:۰;ct9ȱsl?fۏrDmXG"cvα8x~9ǔc 6 yhc[x-"]ȣB^H2U-α-䕪B^ʱ-VDmRo!/X^ZMZMBl^ d'ŵ컸컸Zx}>@>B߇P\[h5Q\[h5Q\[x-0Zh5Q\ 3H퉄BbB{"p] ŵńDBqma1N DqmoR(-M ŵIjjxZMZMzКZݰККxwZtZtnBknBknkuCM[ȷ=-,jݴEͻyw5nBQ7maQ,,jyw|5¢f{yw|5ݴVݴVݴVݴ|!nBnZM[7-----,jݴEͻj"o1F -h :]i "ԕF[ȷJ-H-H-uh WiViViVi j"j"j"K>F[h5F[XԼ 햊4¢h w`aQW%(y75&¢F[XԼh wmaQn,,jiE;y54¢4B4B4B4B4B.QmDmDmDmDm!r)j"xr5h w`aQn,,jiE;뺔F[XԼ w`aQMF[XԼ m4¢XXl 54iEͻ 0iViViViViVi6 BmDmD-n,H-H-,jih m4¢XXl 5_ m4¢h w`aQn,,jih wmaQMF[XԼh e4B4B4B4B4B4B4B4B4B4B4B4ZF[XԼ e4Z`)YAJ-,jqE2 RmaQk,,jih w\#N-,jqE;YAJ-,jqE2 RmDmD-k,H-H-H-yZMZMZMZM54¢XX, 5¢f)XAJ-,jih w\caQLF[XԼ?ttriw0`M@7m!7}+M[hM[hM[XLhC;t&nBn>S7maQ-,jݴEͻنv-,jݴyw5nBnBnBGR7- M[h5M[h5M i &i &i &i EݴVݴEͻنv-,jݗE6C7maQ7Hyw_5¢fڡنv-,jݗEͻyw¢M[XԼi &i &i &i &i &i wOݴVݴVݴVݴjj٦A-,jiݗ-&+j j wOQ BkBkBkBSTm!w(w0D5¢f{"yG5QEͻ--------+j &j &jwTmaQ`,,j+jwTm!)yW05 ¢U[XԼjwcaQ-,jQV¢]XXԼ+ wTmaQ-QEZMDZMDZMD BBBBUTmDTmDTmDTmDTmaQ-,jE6 BT--,QQQ܁BۇBۇBۇB[VTm!w)!xGrj &j wcaQ 'U[XԼjwTmaQ-,j$jm8U[XlKCQEPTmaQ 'U[XlID5KCQEPTm!7*6QVQZ`N"j"j"X-U[XԼj wTmaQ-,jQ܁¢U[XԼj wTmaQ-,jQE;yGTTmSW-7:6>;>1EՆu}LWbjK}}Laoj5y uv17a}c}v57i5^k5޲;^k16^36^31ӆAӆAӆAӆ~p0:p0:p0:p0:pڰ{4fNvcpʱf8mؽo5ixNvcpڰ{__cvov/ vorvoZvoqbl5cl= 5cl5Cmڿw/vovLBmx=0626^d1چ݋%]چˇ]چ]چw]>P۰{0:P0:P۰{m0:P0:PjFjFjfjfjf'CmBm?AچA2T'ơ{aj Cm<> Sy:BmRj P[ུ ޫy:x Zo?4O6Cm> ދ}j fAV-l!*XNBm1A}b-j2h5j ދ&Cm`{"1hj ,&'Cmd P[`1=j ,&%A}b-7)}b-j2h5j{P@P[dQveɢZ'jɢZ'jɢZ`9*KTQ-\T EjEM*򅐋jEMFmiܚhZ 79hM{PѴ@ns4-j2XTv#ن5M{PѴiE6d46WѴfNhڃm8hZ`Q 'M ,jiEp4-YR5pѴ@hZd4-?M{ 'M ZMF,mGѴ@hڃriVѴf|8XT4-hZ`QSѴidhZ`QSѴiEME5M ,j*XT4/t,?avav)}6>4#3fݗǔ>v_c k}aE1ݗ v_{}yLcavW#aavWáyatyaty)o62o62o62o6,͆A͆A͆A͆A͆{?a0:L03f>V3e}rx̘)cLٰ{0^̔ n~6zeL DŽǔ0~L w w|L abaat at at/̛ yaa2)o6f5fSlؽrqflؽ{flflؽvfcʛ W+̛ ̛ *̛ ̛}LyatyatyavPyavPyav[7X2o6*o6*o6STlg<(p;hZ05Ѵ@uG+QiM Ѵ{M W6hZd4A%j}M f4-^hZO6i*XNid+x yPѴ@߇M }Ѵ2iVѴ@hZd4-^h5M{PiѴ@hZ`1=M ,&(XLhO$F hZ`1A2iI1iVѴ@hڃZMF>.ڃe-Ч]@2vT-Ч]@2vT-Ч]@ME W/<.Z  hEMu5E $w5yP]">EME|A.Z`QSf{бh5E ZMv]@.ڃ]hV]@.Zd-jXT-0{P_` Mf@Nw,/0 ` Z0Z@Xw,0 ZM&fV@ɀYd,=0 5f1`XԗT,/50 ,jT, 0{PfEMr`QSfEM 52`h50 ZM&f\Yd,j2`h50 rfV0 ,r`QS_,j*`XT,K0 ,jEM}?٦A 5f1`XԗmĀY`QM0 ,jd,j2`h50 ZM&fifV@ɀكrd,j2`Xl mĀY`QS_,jifEM}?٦A 50 ,jEM}?٦A 50 ,jifEMeY t,j:`H50 RMfT@Y t,j:`H50{P*E2 r2 r,YA5U,jifEMU.eY`QS0 ,jrXT"YA5U,jifV@ɀك\ZM&fV@8w,j2`h50 ZM 5U,jifEMU.eكeY`QL0 ,jrX, &f*`mf6@p,0 -@p,7)lC;mhǀY`QSf1`XTA 50 ZMЎ@ɀYdA&fV@ɀY n8`h50 ,j*XlC; 5Ў,*`XT%K`Q 0 ,jfEMX 50{PfEM&fV@ɀYd,j2` oZM&fV` ZMmĀY`QM0 =؞ @nVr,&0`h0 rfYoR &ŀY`Q=?0 ,j*`XlO*N5@ɀYd,j2`h50 ZM&fV@ɀYdA*N50 >:*N5U,j*`XTA' 50 O*N5U,j*`XT,0 ZM&f8h50 ZM9@ɀYd,j2`h50 ,j*`XTq"٦A 5U7 ,jnh5Y7 rfVu@ɺYd,]7 <&fnxZM*k5XufnXT,ƚ=ƚ_gnXT6d,ƚ5KufxnX,%mɺYd,jn͡=ƚZM&ff܁YdAu@ɺY`Qd>\7 ,jnXT,Y`QSu@s,Y`QSufEM5U7 ,jnx̆݇`}`Lao?w0c a%1メ:?x}a%<쾂?a0Aϰ{0_ƀ0ˀ0ˀ0F0F0F0vJF0F0b\ f ffdlf+nja>a 6\|ȳYར5'@嬨3g9|ۃg|x/ ,ZMTg)'@lN>,d|gd|XNgU> }@߇X> g|h5Y> ,ZMNVOz=X> ,b0ńDb,ОH, gI|gI|h5Y> ,=Yd,ǖ@[&T,ǖ@[&T,[%}3Qx/AxP@!r,З=e( ,j*QXT,( ,j*XTA%5 ,j'eEMezDYd,j2Qh5( K$'&e*QȷNZM&&eV@DY`QS2=dA}?Ђ1QwN=oZ0&܉@2w,Ђ1Qh( s'܉@v,j2Qh5( L=oZM&&eV@p,j2QXԷDY`QSPXԷT,OS󟦦XT,DY`QSXT,DY`QSeEM}LZM&&eV@DY ׷8Qh5( LZM&&e\DYdA%ʉXԷJ5( +'6`QS,jieEM}?٦AL5mf1QXl &6( LZM&&eV@DكmDYd,j2Qh5( L54f1QXԷmDY`QS,jieEM}A%6`QS,jieEM%mDY`QSf9QH5( NRM'eTӉ@DY t,j:QH5( NRM'T,E`QL({L( ,jieEMu,eDY`QSf9QXTA%X5ձ,jieEMu,eDYd,j2Q:V@DYd,KÝ( LZM&&eVeEMu,eDY`QSf9Q`9QX, 'X54ȉ ({P@w,&0Qh( LrKe\7DYM`,ІvL=؆vL&DY`Q ( ,jeEM%mhDY`QS( ,j*QXT,j2Qh5( n'lC;&&eV( LZM&&ecDYd,J`Q ( ,j*XlC;&J\! }@߇X! .NWr?+dG$+dEM! Yd,jBh5Y! ZMV&+dV@ ك Yd,jB*dEM! ك Y`QS*d XT, ك*C5U! ,jB]5U,j XT, Y 7Bh5Y! ZMVT"jBh5Y! r+dV@ Yd,jBXT,2D`QMX! ,jBXT,jBh5Y! &;W&+dV@ Y &BMvZMVT,\! 5U! ,j&+dEMUT, Y`Q5Y!{5U,ƚB`kBXlcMV% Y`Q8\! ,j+dE6d,jBh5Y! OWlcMV&+dVK@ns,jB*dd Yd,YZ5U! ,jBXT, Y 7ٹBXT, Y`QS*dEMU5U! ,j[R\>x :`6>xc7>tSl}m}؆Gc /a1  [4nvv_c1 /avWavWatat)`62`62`62`6%̆ĂĂĂĂ;?A0:Ȁ03>Vf]tx0cƀٰ{0^ 7v~L"]aaY7f}|?_:ލ1͆˼[Qz`l͆z`/W!)n6fgSl?͆y͆?ٰ{=0:ϸٰ{ 0:ϸ0:ϸٰ{0:ϸ7vF7F7v/F7F7f f fff`2nrelTlTlTl~{=X ϒg@(y\> Wl|Yཊ62ufT,wyp,^2A&gz@ك|2nɸY཈*q@OT,OT,^f0n[ fd,з&fVq@ɸY཈*j2nff6qbB{0nXLPJ&:CqbR2d,7)}b,7)&fVq7 L1Qe>BL1Qe)Q苗{}ƒJex( DYt&/eEMezT,LO`QS( ,j*XT'ٞ+L5 ,j eV@DYd,( L=DY W:Qh5( LZM&&eEM%|@kX5ah,К04նCcք@kX _p;4WCc14h5 =/ZM&CcV@ph,j24XX`QS_PXTh,/5|Th,X`QSXTh,X`QSBcEM}a ZM&CcV@X ך84h5 ZM&Cc\XdhAɡX 5 B(~`QS_,j*4XTh,{M ,j E6 bh,٦A 524h5 ZM&CcV4@XdhA}a?j24h5 ,jiCcE6 bh,/54Xl  5BcE6 bh,X`QM ,j*4X, CcTӡ@X th,j:4H5 RMCcTӡ@؃ 5U,jiCc_w\vJ~>~[ODn 5ȡjEX`QS5fy rh,Ń 5U,jFX,A5U,j &CcVU ZM&Cc܉Xdh,j24h5 5 ,jFX,A5U,j ,A5kCcEM(5ȡ24"rCc6@XM`h,  nmCcńchch,)miX`Q- ,j*4XlK; =X`QSBcV@X O84`[14h5 =Xdh,j24h5 y&CcEMmiX`QSf[14XTEA58=ؖv 5Ҏ**EM =J`QSBcV@Xdh,j24h5 )1&CcV@؃ZM&CcEX`Q14XTE fh, "a34@ Ccذ ݰ}Lav_aev_)46쾝?5аfv[cZ oa}<쾍w&V>[xT0lԢc0ld0ld0c atatatataÏiq15>ogk}m  㿝aav.  7`߅ᯏ)5>W7{=>2>9>1ۮGCG=~`k'j|LQa0 6U>(0 (ذ{v]QaʟQavoQ) 6ޯ82 62 6ު82 62 1EAFAFAEAELfc(0;XɌ}LQavPQav?v𿿉?~.~ΉuN,ȏ9@~Ή{e9;'O9{l`UvyP9@>s S9@ɜXdN,h5Y GbX?2PX Gb؃*LŰxo9yPŰ@?MX ӄŰ aVŰ@bXd1, h5Y {PŰ a6ŰbB^a1,K`1}XLh+,Tv a~Ha~Hh5Y ,=b E`AE} E`AE}2 Q{/wG}2 Q{KOF/>5 ,j*`EM%uJ5W`EM%uQ@(Xd,j2 ȟ# =(X t,j2 h5 ZMF5 `քQ@k(X5a,?^; ȟ &ZF#`Q@(X`ZMF&`j~d,j2 h5 G&`EMM,j'`EMMPQ55 AEXwAE5 ,jj~`QSQ`EME55 ZMF&`VQ@(X 8 h5 ZMF&`(XdAE5Q55 ?(X`QSQ@nr,iEMM,j* XT,[{1 X4f{ b,^ ,jj ZMF&`VQ@(؃5Q@(XdAMZMF5Qf{ b,iE(X`QS5QAEXT,^ ,j* XlA5 ,j G`TQ@(X t,j: H5 RMG`TQ@(؃5U,j G,A5k`EM#5QE(X`QS ,jXT9"Y^ ,jX,AZMF&`h5 ZMFQ@(Xd,j2 h5 ,j* XT9"Y^ ,jX,A=X^ ,j G*G5k`d,> 6mGm`6Q@(X O8 mڎF mi(؃mi(XR5ҎQf[1 XT,ٖv5{PQ`EME&`VQ@nqc,j2 h5{PQ@(Xd,j2 # ZMF5ҎQ'EͶc,Ƀ5U< ,jǧm{- ,j`EMO5{Pœ`EME&`VQ@(Xd,j2 ȃ`ZMF&`VQU< ZMF5Qf{ b,Id+g !wyϓݯ@~L_ۅtKͰKͰ>ۅEx75¢vYh5 [h5 [x[h5 [ Ͱ [xZ Ͱ a Gv7n-Ͱ~O4O4ZM4ZM4ZM4^;ZM4fX`^A3lM@3la1}p_ a {Ͱń;2Ͱ~H)4!fBfBf@ĽBmZ-^wko$ĽFBka {-={-􍄸BH{-,􍄸BH{qE;yq¢YXԼ8 ¢YXlxĽZMĽZMĽZMĽHŽZMĽB(BBBB¢ZXԼ8ZZߠZ_ZZߢZ_ZkZ] BkD+Mj"ѵj"ѵj"ѵ[$Zh5ZXԼg/,j] l;ѵyϦ_XԼgމE{6¢=~aQOSMy'5DW=~aQNt-,jމE;ѵy'5Zh5Zh5Zh5Zh5Zȭ&Jt-Ht-Ht-Ht-Ht-'%ZM$DB~RkaQMyϦ_XԼ] wk!A)ѵyϦ_XԼg/,jމE;ѵ lAHt-,j޳5k] 5E{6@$ZM$ZM$ZM$ZM$ZM$k] &] &]lVVED¢f{ BkaQM^ZXԼg/,j $5sJt-,j޳5D¢f{ BkaQNt-,j $5D¢fy Rk!Tk!Tk!Tk!Tk!Tk!Tk!Tk!Tk!Tk!Tk!T+Nt-,jEDW`y RkaQ)ѵyw5k] YEͻ㰰ywD¢qXXԼ; 5HEͻ㰰Y^Zh5Zh5 ; &] &] &] }YVVVVE;ѵyw5k] waaQ)X^ZX,AJt-,jz%5 ] yC2~Bk~Bk~Bk~BWk!LߵКߵh}wu] m݇~W`[ߵ}w-,jEͶCkaQwEͻߵyZMZM]m݇~B~B~WZh5Zh5Zh5Zw-w-,jEͶCkaQ.,,ju] w$w-,j咅Eͻ\}w-,ju] wdaQw-,j\y5~B~B~B~B~B~BRkDkDkD+.,w-w-,j 5k] wdaQ!gZȃC{-􇍸B؈{-􇍸BiSk!Og(6^ Bk![*gZȃC{HĽ5¢fDkDkDkDkDkDkDkDkD+}E"j"j"xǽ5¢fD+}E"yǽ5W\XXԼ^ w+[*y5BZXԼ wpaaQ{-,jq< BBBW\Xh5Zh5Zh5Zá{-{-{-{-{-,jqE;^ZXԼ] -] -] yN   <}DBSkC+Nt-9:%هD¢yXXl+F$5DWZXԼ] mňDW`[1"ѵVHt;ѵVKfW(ѵY Jt-,j] mňDBDBDBT+Zh5Zh5 ,U %] &]wk!)ѵj"ѵY Jt-,jމE;ѵy'5DBSkaQNt-,jމE;ѵy'5D¢ZXԼ] w+Jm]7vK1%n}݆ݗm}>Dװv]u}~Lka:WݷNvߨf}gJt gZcLt gLt gLt}La􌉮atata|0525252525>81-`1Ɇ0=v!{9a,ǔvWvZvvvrZ @SegNkؽ=R[0=R[x05 S[Sjkؽ#pݛ^ajkݛTA>ְ{0:0:ְ{0:0:ǔFFfff󚩭5S[`y^31AABͿI2}|W8xo hX#AEh; 7kq4,a@%5˂Ѱ.*XT%Kd4,j2xD{< GhX?2FZ Gh؃LѰ)xoyPѰ@?M ӄѰ2h5 ZMF&a@h؃=ؾW  ae {ѰbB^a4, CѰ@? CѰ@hXd4샌=hXoh؃Fb4,7a OOFb4,7a@HFb4AE5yPѰb=EMzhX`QSf{3h5 ZMF aVѰ ϤZMF&aVѰ@hX`QSѰb=d4,6a ⎆wqGa3Ѱ@:hX w4,<;a~1h5 {P&aVѰ@hX 7m8h5 ,jjZ~`Q= ,jjZ55-?i*XԴ54545 ,j*XԴaEME5 ,jjZ ZMF&aVѰ@nq4,j2h5 ZMFѰ@h؃raEMM,jjZ~`QSѰahX`QSXT4,hX :`{ b4,iEhX`Q1XԴ2h5 ZMF&aVѰkaVѰ@h؃h5 5kaEhX`QS5Ѱ5kaEMMOS1XT4,^ ,j*XlARMOŽ@~*eSq/+{YT , |`zY|`{Y|`?3 ,jXT7!Y ,j.b&5MxP EMu E^Vd/+j V@^Vd/+;w eZM&{YVzYEMu E^փ fyp/+Ǜ Lb&T+ Lb&mXܫ$V 7;h CI@|q+{՝ ,j*`[1XlK;&Jb5ҎIX*XT+$Vd+j2cNb=ؖvLbZM&&X*h5 LbZM&yI@$V`QSIf[1XT#ٖvLb5xPIzEM>mi$V`QSIXXT+$փ}5 ,j*h5 LbZM&&XVI@9r+j2h5 Lb=Gd+j2XlALb5kXEM>5I@r+b $V?l&a3[Nb쁓X sI@&t+' 'l_Lb5 ,jH&&XVI@$Vd+j2h5 LbZM&l_LbZM&&X*XT پ"z}E2XT+$փ 5 ,j*EM%Jb0XEM 5 ,j*sNbZM&&XVI8LbZM&&X<$Vd+j2h5 Lb5 ,j*pXlALb58xP@^Vc/+G }e>m{YH6O^ְ 0+lb Maayɏ5>6޶<>Z1>*9>X15<5ݧ$;ݧ}>0z&ְ{0zG-a歏5c SKk1cKkؽcji =Ɩְ{0cli =Ɩְ{w0:ȖvoFFvFFZZ [Z [ZZZZZ`yγgKk,y>Vli ji jic_;:rYޤ@^w=~W oEy3xo}P{\ H偼) m~W`QP ,jE]EMuVT+J`QS@~Wdt+П ]T wSa+П ]*X>%͌~`0]~`0]d+j2h5 LtZM&cVz}u0h ,&& ʷW]ńDW`1A2]~H1]V@D Qm?S@ mv`h+B[ʡ@ mv`h+ޖہ@ m=V`QS7*XT&M`Q= ,j*pXlOj&C[V@V Zuh+j2B[}ա@Vdh+j2h5 ,j*XT mfh+6C[9ݡ@V?la3ȟ m'kbh+!&C[j|dh+j2h5 n&C[EMM,jC[EMMP&55aA0XԄB[E?MM m= EM m5 ,j*XTh mZM&C[V@V w8h5 mZM&C[3VdhAq̡55f?V`QS@n!sh+1EM,j*XTh+z1XԘf{ bh+^ ,jj mZM&C[V@փ5@VdhA mZM5f{ bh+1EV`QSc5?V`QOSS@nthV`QSfy r*Wѫ@^!Gy r*\!Ey:zțѫ U o^Gy:zM^5UF,jX,qG*#5jѫ*zXT!2B`Q ,jX,qG&WVѫUF^ZMF&WUd*j2zh5 ^5 ,jX,qG*#5jѫjѫfY;zXT!YV^FT*[l ^FmWU :zh Cѫ@q* ,j*z`[1zXlK;F^5ҎѫW*zXT*Ud*j2zȃ ^=ؖv^ZMF&W*zh5 ^ZMFѫ@U`QSѫf[1zXT#ٖv^5UxPѫEM=miU`QSѫWXT*Ճ*z5 ,j*zh5 ^ZMF&WVѫ@q*j2zh5 ^=Gd*j2zXlA^5kWEM=5U Vvyb]@ǮUc*ksZĖV]յ -w`d*U`Q-'ٵ ,jkVEMum9ɮՃm9ɮU`Q-'ٵz-'kXlɿOQ4ܵ ,j샻VER~p*ٖZZMv&V0T]؟ ao*M2>0>1%Ćݛ>հ{埧հ{_ǔ&vvWvoiFǘvoTMvTx1A5T Sjؽo1&{cj1&{MA&>հ{30:0:հ{0:0:ǔFFfffs <T`y3AgjTjTl T֥@^og ^;{xo uwAey;{W w)U`Q, ,j5WEMKeaU`QS ,j]XT$j2{h5Y ³l ϲUg,e@_xT*\x팁~&lV~&lgVd*jlh5Y ,[ZM=cVeUz};lhX ,&o ڷVńU`1AlV~HlVVe@xU r*3^h WT*Z9^h W{omy9@9r*:z}1XT*پ˘ AZM&sPV9@Td*j2h5z}1h5 A=T`QS9wsPE]T`QSA5zPA5 i8XrPEMyP9@Td*j2ZM&sPV9@s*j2h5 AZMA55?ԃA55AmsP9@r*1h }A̩sP<=TcAyz9@? ,j*XlIA=T`QS9f[N2`[N2XlIlɿA5ro(|S,%T`Q ,j$sPV9@T 9`[N2h5 A=X AsPV9 )&sPE͒Zp*T`QS9f[N2XTAA5 ,j*XT*T`QS9rPEMT_砆g}yWcA Ͱ ̰|L9a1-VwǴTvs[n} oa1-Rw00?00ǔ|F|v?A&>5ɰh0:0:ǔ|F|F|v!3KSfi}jؽv}`}*c, ^  |LaaفaѽҰLǔvhvvovg3KFSfi}_>0c, =Ұ{ǔYvo{{aNaǘY{aNat)46242462421eAfAfAeAegfc90;X,}R`U,v`f)| aAfbf)!fbf)j2h5K %[ƒ-cI*{K %[ƒ)@{X҃%[ƒ%=XR`QSy&EHe,)I`Q=RK %ZMƒ&cIYұ@X҃%Iǒ&cIV@XRd,)XR`QSy2菌@XR Rv,)cIK ˎ%eǒcI~0 cIX҃h5K %ZMƒ@XR`QSSiXR`QSST,))EMM P55%^K ,j ƒ Aƒ&cIV@XRd,)j2`{ b,)j2h5KzP#&cIVf{ b,)^K ,jj$|`Q1XHf{ b,)*XHâf{ b,)jjFLQ w(ƻjH]5zPU@Q w(ƻjxWiFEͲu(EͲu(jX5U5U@ɪQdA ZMV&FXQd(jjh5Y5 5U5 ,jj}`Q,a]5 ,jj}`Q,a]5z,a]5 ,j%FEM ,j%F#QbAUGU@;ĪQb(jˮrFvU@?X5 W5U5z- Y5 ,jE!FEMUmQȪQ`QSUU5 ,jjXT(jjh5Y5 zWlBV&FVUU5 ZMV&FlQd(Q`Q- Y5 ,jjXlBV*AaEMj\DVm@6QdA lZM&DmP`QS ,jiW-\X ,W-\X ,W̗w'\2b@r烬e''\y-\X Nd2q@q'j*8< Nd'YϻXT' N`Q-\X ,j*8EMUp5U ,jXT' N`QS*8+83<1Upf}iWf}a> ΰJ7rd}_ oa]5쾩ǴvQJ7 K7 K7 K7Sf}af|v?އAnʆAnAn>0:0:ͰǴXJ79bSf}gؽrؽ|}Hlؽc* w\χ ST@vrv vovq K7'Sf}5}}5c* =0c, ~LaVaXv{aXvFYJ7O K7 K7O K7 K7SfdfdfTfTf,yn>V, 9savPavPCr',@~Rޛ%a9ȏ{d ?1gpT'gpp',2828EͲr'YVS5UA,j%38EMUT' B`QS@ ΃)M-K7,~}mY eAn}۳txH m̓*g&з=K7d&jth5Y ,ZMnm?VUyPY@,=Y , nxnˇݞ,[YtK7~tK7V2fh5 ZMl_T&?8fh5 L&c6V1 5yP1EM'ɘM`QSf{j2fh5 ZMlˣc6V1 ϏZMl&c6V1@ɘM`QS1d&:I~ g&I@$M v&;Ig4~&0INama5ۺy?a5455?ǏA9~csDǮ'?F1:>ǰct}s?f>Ǐ>ǰ>Ǐ>ǰ>Ǐcvs<4~ D 챐 }n 1챐 }n Yx=򑣲B>tTXxs  wcaQ.{,,jeEͲScaQYc*{,,jS¢=}aQj"ޱGЗ񎅾|w,,Cc/ }g#ޱw6wcl;Fc  & & & & }4 &w#C6 ˇw,,=~acnq;C?~!ޱ!;9xBB;B;w?ڨBxW6B?P+ DE{¢f{yO_Xl7T6ZMT6ZMT6ZMT6EU6ZMT6BBBBB¢]XXԼ'/Hp,Jp,OJp,B*Hp,Jp,OJp,Jp, ~` g. V V ܗB¢={vaQ=0XXԼg E{¢={6Np,,j޳g5ٳ wcaQNp,,j EͿM;y'85GXXԼ wc  & & & & 'F V V V V ܡBGXmZJp,,jc5 wcaQNp,-%85 XE;y'8rwcaQNp,B%85 5 E{,@$8ZM$8ZM$8ZM$8ZM$8ZM$8k & &XV V E¢f{ BcaQ^XXԼ"/,j $85ȁwcaQNp,,j EBBE]XHTXHTXHTX=*^, *^, *^,CJŋ܋E=xaQ,TXXԼ'ŋE{¢=xaQ,TXXԼ'/,j & &VŋVŋVŋܥBBBB¢]XXԼ'/,j EͲS",TXX,K;/5 s`BnUTcB#a,VE0!0!0!0rr UQ9š{B?ސXXlB05ۢ9(DcaQ- XXԼs mQ¢s wcaQa,a,a,o0ۢ9V9V9;j"j"j"{~Xh5XXԼs mQ¢XXlB05FXXԼg/,j35ۢ9E;y05¢XXԼs wcDcDcDcDcDc!OJ(j"j"j"xϋ_h5Xh5XXlAa,,j 05y 59E{^@t+onBmѭXXB=y{Xx=,b }"7/b yzJ<BPaaQ=,,jޱE;y¢{XXԼc yDEE{6¢=zaQ=,,jޱaaѤ}E"К К Ol_',&',&'1',n |BOX=',',,j',,jE;پ"OXXԼ w>aaQ',,jE;y5|¢ߦOXXԼ 'Rv_av_a 6l6hSZa}7Li)0ݷܰvpcZ /Z K /ZcL+ /L+ /L+|LiaiaFVvFVFV  S"aؽ~ؽiؽs}DcJ$ w; wsOg r~LaVaaaai<{{Lư{ݰ{o4|0^>|0^01xoռa)0޻0& ^xo20& S>ad>ad>aT>aT>a,'||?00>VOfOfOǜOsu>!|O@~'1;{_T ?kT>!q>! EM'5O,ju EM,jŘ EM~PnZMF% "0x/ E`!@ߟ [}2"?AA ZMF&#V{IdAE& 6bB{RXLbB{RXLhOj !Am ~H d!К0hMAmP@B5a!К0xo & ZFT!O0FcAE B`Q=A,jjy`Q=A ZMF&#@ƒ @G&#V@Bd!B`QS3&#@HB?F0ȟjA䏵 ZG`! Fq53 F&#V@ p!j2Xf{`0XA,jjg`QS?T!韁EMM ,j*XT!B`QoSS"EMET!B`QS2h5A ZMF&#Bd!j2h5A r#VAF,GXԤ"EME%&55)7B`QS@ntAEXlA 5k#EMM #V@Bd!j2h5Ax1h5A j2H @>j2,jS*eX, 4 eA`QSSkeAd j2eZM &SV)@s j2eh52LZM J556YhN556YhN8@c Ђ1q@ 8@ H9ȭZ0T [`q qq/@_x@@?8nAna3Xl_:XԸf?qEM,jj~`QSXԸ55n?qq??쾲 ;쾮1v_a5v_afԼaM7ݷܰvpӼavB[ -P1F'8oi0:y?~La^atA?r4Sؽ'{ؽfؽp}Vc?ގ=[3V8>&0T1vovovvvovtf&4Sؽkؽcqga.1™x]8Sc?៧#>ɇLa֢a79SMF9SLao g?Y|Lat3? g?j0;`~Lfg|?p0;fc_m)>xoxP--ti޻w)ROwOP4@>Hakl~`56?"Aͱ~ql~Coϓ|@<9>П'[^yr}?OηP=UQEM,jG 55:5oh59>jr}|@V|@V&ZMηoX|UZM5>#}9>~z} @~&p} a=An 3#&GZMah59>ٍELf7>EMn ,jjvaXf755>EM,jjvc`QoSS#aF55>@V#&GZM&ah59>jr}@ny@j} =y}`QSRXFrG55!5 EM,jj} yaXԄf{ f{ &~#&GZMah59>jr5#&GZMPR&GZM,j ,j ,jjBj`Qq}`QSR5#F55>TӳԬ@^xϚ@^xϚ>AϚr594Ϛ~AϚPCL@n@nߦfYIy|`QɡEͲ@YV@YV&gr?gZMΚ5h59k>jr|`QSX,+)Ϛ,jjrh`QV5Xl+FΚ9k>V5XԬf[1r|`QSԬf55k>jr|@mۊ&gZMΚP&gZMΚ55h59k>YEͶbf5ۊ5f55k>YEͶbf55kA͚,jmjj|`QS5XԬ@YV&gZMΚ5C5h59k>jr|5y;jr|f{ f{ &o5kg55yA͚ g?Ys| Ox5h9k>'|{=k>{=k>s|`QS5XԬ5k>YEM͚1 Ϛ,jmjj| Qx5XԬ@@YV&g?V&gZMΚAϚ5h59k>jr|f557Yj|`QSc5AΚ`5h8kA͚`5h8k>'{=k>Ђq5=5h81>З}81>=1|ȃ``ܜ񁾳91AM\O͉EMM,jN,jjb|`Q}upb|`QS&551>EMM,jjb|`QSX51>V?'~Lu}Mݗt}E?|=51~} a-7741~aNavB  NF'81c?Npb?~Ӣdؽ}at? 'c 1Mvvovv7Nw 77}LS`R{{R?Nh{;4Tax]8c ?ׅSp*4~Ϳ':_ v{SݛTa79~TatSݿb ?r*atSAN ?r*0;젦S?V8~, 젦 y*|ཙAM73R O73 O x HE<>gHK<AMST55>EMM,j558Yq X5>jr*|ཁ"N?S 8>軌S}q*|ཷ'w?8>wS&ZMN h59>@h59AMBX>HTakTs`#SˇT{39>69ĩ5=-x|{/<Ƿs|{ཋ$˅5=ަ<=?5x|{ gǷ53Xloh59=jr|{ @j|{ @V&ǷZMo,jj|{`QS&Ƿs| 큾ȟ"==?Fz|{ @?8=oV&ǷroXf{&p|{`QSs 55Ao,jjna`QSs XƷ55=EMo,jj|{`QS?V&ǷZMo h59=jr|{@ܛV@nP55t4EMoV%o,jjh`QSCGX@n5==EM  ,j o,jj9=jr|{@V&Ƿ?^8=jr|{5t4jr|{f{ f{ 5kǷ55t4^8=j|{`QSH5==;<=C,!a@^>aP#5yg{{ڟ;Cyg{{ y{ wy{`QS#5X,!a,jjf`Q<=jr{5R3jr{@!~!VC&ZMaXFj5zCX,!a<=YCXH@!z!!C@n@ !C-rrS4}0>a,j#5ۊCl+Fa,j#55=VX5=!EM ah59=[=b@!VC@!VC&rwZMa,jj{`Q9=!EͶb?!EM a,jj{`Q9=!EM aPC55=!VC&ZMah59={==jr{@!j$u@!E!E!EM,j a,jj$AYI9끖sysyss-5ZjYQYrYrYԜXԜ55gAY,jjz`QSsysXԜ55g=S_|Ta 9ݷnvߌTaES͇ćM58|}TaS?  i2>r0:ȩTatSZMN<&* O |Lχݻ[Nݻݻ{>&=]P Daaa)Dmax=9)c>ד҇zrR4)}9_ vo&v{҇;uat҇ANJF9)}S0:I 'eF9)}iR0:I '>jR0;XW8)c{҇I++>jR0;I7 ޫHp<΃L`;im,l =M΃&{AO w4;?yO;z=M΃&{AO`z2M&{AT<_x΃|Hy);a{A&wg{A>2wBA&w4;zܽyLS`fL`y{ZݯD)k|=ۓݯD5HvxM݋'_ێ׶ǪF$k~D5n׸A$_ "7dkS׸A$_ۓݯqHv "7'_dk`dXdk`dXdk~ܓǔ5)}q{ܓg9)%/n/rUJv_䮔쾸i)}RvL"dwpOv_ǜ4d2rϧd2rۧd2=}Ls}3AgE :=t/n/.z&\ECzCEO닞&:=Mt{upU_ܞ^,o^X޲-Y$\,o^X޲V}q{^QstpoN_c5/zhN_4ќi9}DsbޜXe/iLA~WwA|Ʀ E>eSzA|Ц E>U?0 /򡯂vR_4i"H}Oi"H}Ls/],lR_,Kl=H}Ls/],Kl=H}Ls/],Kl4 2=H}LsR_,܃4 2ͿehFEӢQ|h_FEP/$4{"cQ|}!Q|y.C?~dwdewde1Uv;KcyTVcQTϧGsVc)c*c~1ci˘77W1G=bwl29uT{:#ySwc`wl?؏?';ωywyǸAyǸAyǸAygȌnp"|P ݷ3A"0ܟB[ftS -3BG _;Ɍn'32v}2,T3XٮOft&3AOݠɌnh zTF74=Mft&3AOݠɌnLS`2Met&Ael)oA>asc7gln>nz aAelin|=XП&l=M6v&A>uc7i,TI,X>M i$vP`Jb2M,TI,XXLS`2M5vej>jmO] zT6c=MVkZWk&AOڠjmg \ 2Mee4U iZWkZ,T6ȃ944U iZ,T6AVk&AOڠjmd6iZ{kZ4Y zT6.iZ4Y i{MVke^`q2vjmLSٸ`fd6XlAUke4Ung=&/gܞ ۳ rp{6s*nyR /gܞ rp{6Xlܞ zl=Mg&۳ zl=Mg&۳ATq{6i=4ٞ zl=Mgej4 ige4M۳M۳2rlLS=s3ٞ zly ݸ=4ٞ=lnܞ zl=Mg&۳As{6C7n4] i{Mg{Mge^ك^`fd{6XlLk=,T{Ofd{6Xld{6i=QEg{Mg&۳AOكj=Mg&۳AO *=4ٞ i=,ll4՞ i{MgejT{6XlLS`fd{6XlLSكj4՞ i={MMS`ڳAO٠ld{6i=4ٞ n=Mg&۳AOكt=Mg&۳2k۳2k۳2Me:e getT{6i=og毱:~LױH~l?4~Lױ4~l?4ךTz۟͏gcccq1oP1ncv+c31> ?V`*c~Tg+caT 3*cޠ* 7 7 7 edvdvl8 ; *c +c +cޠ*cޠ*c`9 c+c`9 c+cޠ*cܠKA.ͥ kp|P9ܟ39^\KDs5܏@Tz=kLS`ft`#t5XYn ]z zAeAeAe11z|0i˜냌i˜kd5i24s OQ&cs zT5?# ^IփJ> $kJ>ȰjpyPa UêAUV O*ܟ!U êI|)V <;ܟa êUưjLS)`ª24V z =MU|"jdXªA>qX5i24V z =MUe 4B i*4V -cXªAe 1sUU|jVV -cX5OU|AŮ4aX5i24V QêAOa`bW2i°jLS 4 i*vuPa`bW2MŮe 4V i* A>u51zPyg*G=222s1Ac ˠG1uGuG1u˺߇?*n}xիᏊc,cp{TgcgKc%1%ٱw͒w͒~tlfIkdc5Kc5KcY1%1oP1oo,Ўg@;ڱwhSv; c|Y}L1,Ўڱ0c*ЎeǼAhǼAh1n1n1nڱi77 vdvd1hǸAhǸAhǼAhǼAhǼ?hǼ?hǼAhǸAh\ An 9s6?rs.:h ms6A @{P` 2r?rGmL@ ;AgAcA³#܏I AudL`G6vddG6vdL`G6i#4ّ Ϣ&;Ց z>jf!Ԡ_P "(B |,j!@>g3oB=jLS! CAO!Ԡjd5i2 P&CB )CAO!Ԡjd5i2,T5XtaLS!`2MVR~?YI=Jj'+Aٕ ,%WR~?YI rzPͫ?jXI z=MVR|Jjd%5XUL}԰,T*2M5ej^T%5XULSͫ`*2MURd47҃*<ҠlKXiK~Y, f47 PX#jKb4XLYLS`Ap2MK|zbiLS`f{biLSY, z,=MK&AOҠbvbid4iXzPbid4XX, i7Keʔ42Meʂe`2eU, iX,T], %Gbiv4'.T(*QXL҃*9jKA XSK&AOҠbA&AOҠbi], z,=MK&AO`2MeV`BQ2r+br+biL X,T(AK-A IXK-U, LAҠwbiлe4:.y&`R=2v#bv#biL݈Xz݈X,l7,4U, iQKe*T4XbiLSҠbid4ȓd.l7,=MK&U, z,=MK&A$s4iX,T4XnDY, iX,l7,4U,=biLS`2v#biLS`U, iX,T2͟6͟6MK&AOҠbid4iXpK&AOҠbA&AO`f5`f5`2k2MX,T1iQ-WBnAB-[?  Ku`t g5ozPMנ/{6]2Au5cn=j6]52vٳ,T5XkLSM O,T5XkLSM`A8w5i4t zlT5i4t zlyxMנɦkd5i4t i,T3XA5]e*y4t i ˬu : [Au4jn}u : nyvҭӠ/^N[A~`Z: iuϏ[c+;cZc:ck:8c~=L/:ۯDZrۯ:ۯ1E1E_ct{at{a1NǸNǸN u: :?jI>Vw6R:}\lc#ul?;1lsFcj31~: : Oݹ1scdwul?7k1^+쮎xXV]ۏeZawu c1^+쮎y΍yZ}_eSI1}1ԇ}L51}ر>cÎqÎq>>>?#هۏ=>7>qT؇ههه۟-qÎqÎOǸAaǸAaSvdvdvTvTv,?ه}fv,?ه}fvTvGxl3t<6!:܏\T<6(rA 'xlp?r* r@2Mc*,T<6XYn=Xn i;Fa~mه ϵ~mه G.~mه e6gA_}ٳ A_}ٳ4ه zAO}؃=Ma&ap+xlpP?W9ȇ'A>r<6''A>r<6T<6Xxl0 z=Mc&AO 94=xl z=Mc&AO`2Me4}9&oiRfg*; z  9;:gg^AH1;cgg*\4 z=Mfg|ldv6XpYL}H1;,T젲2M˂e Tv6XpYLS`2Ac|xlo Ae1 A>ew<6ixlLSŸ`2Mc?LS`A>Zw<6Xb\L݃=2Md<6i24 z=Mc&=AOؠxAゞ&AO`fc<6X i,l`4U i{0ce*T<6XxlLS_t[AN ;{P lO杀 ѼAN lt6s"N ؠld6i2{P孠ld6i2y'`&AO ؠld6XlLS`flLS`f `fld5i4Yy @+Alp5izP O64Yy z=MV^//c[/_O- 1kKnlSvlƼO&T5kǸ kǸ kSv`v`vlLڱ@? >V|tX,`v;KcgKc|Y"}gv;Kc11~tJcavTsTvTvq,Bc*B~,>B1rcX}\c,Vx;kܱ1rǼAƼrwsS$c|Yβw?uSY1SYwl?0]ʺc5˺c5˺;];VuZ; ;?U5uǸAuǸAuǸAuc ˺c ˺c)1ne1neTYYUUwu+?Ywu+?YU]=a3tY7!LTY7r.3Ae 'np?3*] r.X2Mu,TY7XYn]=Xn] zT7? b7b7W}~N#苗}܃}>}>nd7i=Mq4 zǑ@ЯAqq_!qq ܏ } ܏ } {P}`A>1q7i4 z=Mq|~>ndA>s7i4 z=Mqe4 i,T7i[bY7G.Peݠ7IJnbY7c .pe X zC,e݃=Mu&˺AOe X4Y i,lo,4Uu;nLSU`nU i,TAVnrIWn~X=mO] c6+۠lVn|Rm] f6+`*2MUnC 5nb_[j_& 5nb_[i_^k\+׼Pz\+׼|m9dx7rmk|]zpl_׃u|^ksʵ׃-xmkAkZ uk`6z5{=|}fbukkk~=Z5k8\kzk3Z+h_u׊ckV|{7֊yk7֊Ǟ?g_[_Z5np_[_Z5np_[_Z5npZ5np_Z5op_Z5oOXk׼r?׊c~b_Z5nWB2EC)$/)$rH&kEC)$rH"er25e2LsMXY$2~K"ek/Ek.GM>׊?,/Z+p9jϽVZ+^+^+}}}}=ͽV׊?\iZ^+ܛ7?\-6?\-|p9!֠9rnC>CsC>As‡|ȇ|lksaek2͵9ܛz{sCOsoi$=ͽ9ܛz{sCOsoi4ek2͵9aLO?qV׷׊? Ԋ?CLԊ?cLԊ?Z^^+Q+~q}i=ͽV׊?uԊ?4Zek2׊?,\|/4:߇ekŵVaZ |~ksC>WGsC_{sC_{sC_{sC>_GsC>aGsC_{sC_{s2͵9aϩ~4De(L]ix?gmi=ͽ97?4foi=ͽ9(ܛz{s2vW7?,lw5{s25Qafٛi4]LsM6?,\i9M4?4~qM}i9!u9!g9!&rhȓhhi=ͽ97_\Soz{sCOsoi<=ͽ97?4LsmX>,,?LsM}XY~9be:=+ܳz{VCBVC2@VCOs f?)d?4ܳ i=4׬e^s X5^s X5ekV2vg?,\/Y4׬ekVCOs iYy6 Y۽ܳz{V5g?4gܳi4۽Ls}X5ekV5+aLs Xf?,\iY4׬ekV25+Ls Xf?4ܳz{VCOs  iY==+g_\z{VCOs X~ ڳi_ekr2kОLsMNf?,\i׬ekrCO9v?2'2УSz{CzOQ)y)==ebKSi?k0w?w?w?w?nt?w?w?w?nt?nt?w?w?,l?tek2CgXv_\;i4ek2͵s!^9;z{ŵsw?49=@COsi=ͽsw?,\;i4]9LsXjk8׊?akԊ?AԊ?׊?!P+_@ChEC_+{C~OeԊ?P+~q/CV>˵e_ELѱ%__T_kZjc:_V<_cދjcދjſZZZcq/q/?4Sxl?;?&X+&X+{?V>CX+&X+ۏz?V>CX+&X+OyqqZcy*y*yyǼAՊǸA#Jc|X2~L%1f,5c1<8u˒cexl?c<%1^,NjǼAǼAǼrݲd<%1,}gxl?S12އz_x<zL˜w*yc~U\~\1xu9惊5?S栯Oƚ>kd9#50ٱ栯Oƚ>kiƚej;4] c2Me`fa9XヿiŚڎAO栧Xsd`a9i24k>cd9i2,lw554] c2Me`ڎ2vWXsLS惊54ki*4cr}1>\{A> w9'1>\{A= ep^{AO=栧A5&{AO=栧sOcz1=M&{AO=`z2M5eg{2M5eg{leg{|P!sd9C1y=栧A<sd9i4cЍ{Alp9XYLS=`z2Me1l14۽&{2Me^=`zci,T9i4c<{۽&{AO=栧A&{AO=栧scz14U i{Me*4۽&{2M,T9XsLS=`z2Me14ci|P=`z2M&{AO=栧sd9i)Z&{AO=栧A1&{AO=`f5=`f5=`Ҙ2k{2M1,T3X4AeJc4 zUi;c6e:Xi۝1A_Hl@}!}PĠ/$6؀Bb:ovgtL4b :W_!<@tB D}2@t A_ D}2,l?i*,l?i*}P`2Me D4;D=M&AO D=M&AO @td :i24z D4i*,l< D4ci[d93.9=jKA~S% YrK<.9M KUr.9/ %e_ELֱ%__Ta_k*cu<_Ə<_cޒ*cޒ*ϿcuW+A_eX?ȊuW+A_eX}b4Y&+UzX=MV{bdT:Ɋup?/W b/W bC0Wb܏U4F ib +AO렧Ɋud:粮X=MVbYW&+AO렧Ɋud:XuLSm`*2MUeX=/_gٿiyP`J2M"4>uA%&w:1q4zL\Tv0i2q4zL\܉렧ud:i2q4i*q,Tv0XY~f;q,Tv0XY~f;q}v:XY~f;q v zL\=M&<\uzL\T:cN\=M&&AO O8q'ej4i*q,T:XvuLk2q,T:X5i*q}P`2M%&AO 9q}k2q4zL\T/i2q4zL\yX̉렧uLS!`fd:X_Lk2q,T2M%eJ\4i*q,T:XuLS`i*q,T:i2q4zL\=M&&Au:i2q4zL\T3i2q4i_i_i*,l1q,T2M=e{T:XgLSq`f5迊{>ݍQcABj 1+ĬvBnY/^f1ˬv/A_jgejLlwj4827~cs;藏 ݻvW6A_lny ݻvW62c`2c`i,Ts;XvLS vds;i4>vds;i4;nn=M6&AOvLS`2v?vLS`fbsA6wi8e|?wd;i24Y#AO|בwd;i24i*,T6XwLS`"2?e!fl"vxO?g'A>t;<n}}vxG:A>Rw;Ã< vxr}nvxd;zAOvxd;<n=MÃ&AOvxLS`z23`z23g23?h^d;_㏩>_cޒcޒ...c겏qK첏qK첏_WxL]DeL]1n]1n]%vǸ%voeeۿ`0->->`g?.777.7.~lvT}T}_`}c|lL1Gl=b15>y`9f~~ls1o|N> : 61n1n1n7fcjqlql?f7f7f~@15Fcޠcޠ)9 > >/zL1oP1oPlllllyjڏcc5S}l?1 > >llL1n1n1oP1oP1oܟX?c},gl?VlA.9R7ۃA5ۃ! f{p?D\lrnCDAn Wf{LS~堚2M5ۃeVf~&?/׃ᚠ/zЗzЗA_,5A_,=M4Y^z,dy=8zp?ߓ~R#f.A>uy=ȧf.A>uy=Gg.Ty='.2Mdy=i4Y^z,4A׃|zdy=i4Y^z,4U^i*,Ty=XzLS`)DG]Ԡ_[݃|AuQ~mtѻE g=gA/{݃t,AOA ;4tz =M4tz =M݃<{d=X.jLDb=X.A݃e4E={d=ȧnWaAp=諌26ۃlxA>Lw=諌26ۃlicu7{6ۃe4۽ 2vof{LSi&?j=_&AOfvof{d=i~Pנf{d=XMli{6ۃe4۽ 2M_ej4lY2M5ۃej4lEgكAY ;3tgكAY ,{=eAY ?ez̲T*3i24ez̲ Y,{d=i24ei*,T*3XY~,;,T*3XY~,;~Xv=XY~,~Pzcy=E.yA׃jϿl߰߮F߬ߪzLm}ߦ]ߤ1515S}lǼ%5Ǽ%51n1nlllf~"|[I3Ɲ>Ɲ>Ɲ> Şwƞ g{c{cƸ3Ǹ3y4?7N7N77~vT}T}7~p;cy1lď}g#15?ԈF~z6ca#~Tt,=lďyjy峇1n111~cǏq/Ǐq/?~~~~1Rcޠcޠ)9 ? ?zL1oP1oPԏՏՏՏՏOEy? ? {S?~l?p2 ? ?5ُُL1n1n1oP1oP1oܻXwc?~,n?V؏݈rnTy8!݈ǝ܏;G7q 'F|t#>w rnL7ejS7Ոi,,n,7n>Al}9O}9P/6⃾؈/6⃾؈r`#> r`#>iPF|d#>'Aylc#A>s#> yA>s#> A>s#A>u#> i AOF|d#>ȧn=M6#d7⃞&AOF|d#>XF|LSE`2M5ej4Ո?F|{is&Ld2~˘-cz>SNT5X޲?M?|П&Li|dz>i2=烞&AOA]&AO|'YzL4w itL4w=|LSy`zL*LTz>N}1=U|WApz>N}1=U|W2M|"vo|LS`f7az>XMiqx?Gmdz>i2=4?MzL=Mq4zL4۽ 2vo|LS`f7az>XmLS`J4i*=,Tz>iKܥr}yx] ۻKܥr}>wv郞& z=Mv郞&A>w>iK4٥z=Mve4 i4 i,?ݥiT1Kإ@Ap>KP] OpKإz}>v<\.}'8ܥi*,T>X.}LS]`$2v;.}LSA`fd>X Aue 24d z=Mv<.v;.}d>iKPAƠ.}d>iK7w郞&2Mev]`2v;.}LSAƃ4d iK,T>X.}LS]`2MueT>X.}LS].}d>iK4٥zyZ].}d>iKP5ՠ.}d>X~ b>X~ b>XjL.}LS5Ճ4US izP]`j2MTe ve4ۯA4ۯA4US}].}dAO].}d>iK4٥z=Mv郞&2k2kۯA4ۯA4եi_إg>]_xv҇A³KgA~] .}П .}LIK,T>XإiKP]`2MueN]`2v'.}d>iKP].}d>iK[ܥz=Mv郞&AO]`2MueN]`2v'.=MvrA~] 6K4٥{WwFե-w=Хm{^ޯ׶klo0N2ƽv5׸tk :q/׸t_ێtƽ?V>mkR}3 ׼7X>D5׸t_^m;{A5k :mZk׼7wk;mk;c{5op ׼7w_Na c?{5ng {_;m\^ʽ;mg[^Dl 7k;y rk`wCܻ7EP࿼Cu/ngX.r_܎WxEQ񪋜:9Ju/nǫ.r_2Xw/n|{bXYnD>/z_܎]n-["XѻEw`E鴋&=M/z_y񽸝''$tNy|`E>Sv"+X|$`E>S`EO&=M/ѱ=M{"+XD`EO&4`2ͽ{Ls_,܃4`2=X܃4)OycJ_C[/Fl/F"(u|QV$)]ŬO` ?{,Vf U9م~1uj~!?B߇0uy._h51u' 4ujb~B{NvBVS yES &59مE+S l=uy.,jsB\S O}=u24uW*B_e_ȃ_ȏ5uW*B_e_XԼCwMgL/,j޳ S S l+jg{~gsZ͟lsO/?lBVS &ZML/_h51uٞM0uٞM0uy.,jgL/,j޳ ¢=uk~aQ_XԼ5TSS i>M/M/G_H4ui~!BB~4OS mZMLUB~4VSB~4VS &ZML/_XԼ5m҅E;[S 6¢f;[S &$//`XxZ0k yCk -Z0/`X_cIZ_Z_XԼg55E{~aQ^_XԼ#5#5DžEĈ5E{1^_XԼg5DžVk &P#ZM/XgZM/X_h5ffjb~aQq\XlOX_XԼg5#5{~aQq\XԼ55E{~aQ^_XԼ55E{>^_XԼ55Vk &ZM/X_h5fGfjb~ĚB5{uĚB5E6k m5 օE6k kfyo.,j¢yo.,jAX_XԼ7X5 /,jAX_XԼ7Xb~ĚB5X_h5fjb> šB5VkB5Vk m5 AX_Xlofy/,jAPLL/~U0uЯ "_WS `~ayUccEE{~aQ=fa~aQ5E{~aQ=fa~aQ_XlY_h51ujb>&-_h51uxO/[4ujb~BVS ¢=u0uyO/,j,/}X_h~!5<5k */Wf_К}f_њB~Ck v^aZxvXjR _iy: _a4~ݿ5ac}v:ݿi~}ft?jq~0:ȥet?>N>r~\v$Ftբ70a+'W_{xXGr +H\va 0avP+ü 0;EavPVAZf?j|q!}:r]BX0ąat B0ąat B0ąa%.?ϸ?A~ƅavP+`q!%K\vF0K\F0K\F?.i=}lz0;i=z0;a!i=z0;i=z0;avPݧA2\F?h=}f)u?ZgiA\A\f?j=,u\Xyz0;X븞\at>PHE=¿,5YjZ?>HY=x zRZOR[O55x*z,jgIO?/@~@~@9h8h8h8x }ϴ>ZMZMɁ xB )@~J ?[@~ ?@~} %0{ ?y g|'&&&&,jj7,jj ,jj ?j|m5~ "xZ?{Z?КpZ?КpZ?КpZ?;jb0;at4|q0:ȥaAatK atK atKE \Xr,A.Zf}Kar0;_5aak gΆ?Lk ?Lk /Zv2{A?LkA=LkA?LkAZf?>K6jq0:5atGkc:Oyvњô?>3rô?r0;5avPk`yvʳ5g72/Gq_a@@ x ^OI/J/ާ)9ыEM-PEM-5G-G-G_Q@ Q@ Qr?Ђq?Ђq?Ђq?>8h8xi VV_l. 5*xQ @~J ?[b ?b}D%0{?yZ,jj?y?jr?jr?jr?jr?8{?jrZ^\\\\,jj?QZ,jj?j?柢|/Y3oG-5<9@ ?v_ R/h  /Ԇof_ b/XT_ 6|&V}@p_ j/h5 PV}@ɾ@d_ g, 5Xl 5B ,jj 5@w2>By@_d| З K@_d| @ 8>@`QS[/T% 8J@`qHd% hX C? G@;Q@;J@b% +V@J@`Q=aXlO55*XlO55*XlO5U x*EMU5U ,jȃN['<@ 8!HoN['y @$pB :!HoLPV @Ʉ@dB  LZM&&V EM 5o{' ,jNwBPv@;J@ Ic,!V^J@ hX CT 9)Wyŕ15U ,jXT%f{(d% yf{(d% yX@J@d%ZMV&+V@J@d% jh5Y 5 Vm+/loX ,jA5U ,jAZM^@_^@_0o8o8Ca0CE@`Q=f1XT 5,j*XlY 5,j,&CV!*h5 ZM&CV!@@d @`QS!f{b @`Q=fq?Ǘ3y|[[-o-o o5vavWy"_iz _a;~yݿ0vtݿԆݷ; vߌOEOE~#'#'#S$`d$`d$`}y"'#߉'ɸ4=^3.0^3.0}qaqa/V ׌ z1^3.0^3.0*.0> 0Ajffv fffFw4l%Fw%Fw%Fw%vmFw%Fw%x..S`e`EatQ'`2J0jv,OF A (0;(k,vF,F,yt)X0.1X0.1X0 % % % ݆~`0;`,f,f,vr{젂젂S`T`T`T`T`}m,u    ?  O ?G)X0>,42X02X0 A A A A \`s`yca幎_`A}*(:XBvD 颃*XxX 餃Z t Y,,j*XB  5@ ` ,` Z0 `Ac Ђ1Xh,8Z0 w1Xh5, GiC` >JBGiC` >J,3p )Q@~`A ?jt 5,ZM &V@`A ?v j2XB V@`Ad j2XXT EM  5,xEM  <^L Sҩ@_Lr` 8:Nr` ЗS:^f{` :,jj8!@Ad G<:LZM&S/qd j2uh5: &SEM 5CSEM P"~REM ?@p N^A 8uK@_Ld GW:} 2uK@_L5:^\:xx@_0WQ< Я?ӯ ~U}`< 7 }`< ~g3Xl,jjN8~g3XԜp`Qf< x ,j*XT< x@`QS@~h@@`/5"H]e@J@ vY 'P\LJY. ZM^@ɲ@dY j,O], ZM&VeEM5ˣEM5ˣ/,Pք@kx@ ,9#%Z^x@ 8hM&x@ ,9#%5XT< x@`QS/l}5sEMf5sEMfPV3Z ZMy>&Vj53j2h5 VV3V3V3_x@`QSEMf5,j*XT< x@`QS/T< x@`QS@x@d< j2h5V@x@dV0 A A Z0;Z0;Z0;Z0;Z0;XX+FY+FY+FT+vQFʳۏjS`}:id`da     ٍg7 Zʳk̵R 5tZE r/T >!H!]+OI ik @ZA`Q<'VXT5U+,jE7 /Ts К9hMx ք́@k@5as >hMO@`lGOpg19@~ @ ?KtV 5 ZMf&VY@ɬ@ ?vV j2+BeVY@ɬ@dV j2+XTV EEMe 5xEMe Z? ߖA@~ A ?t $ $ A/$ yAj7?$,j*HB  5$ ZM yA@ Ad j2HBZM &VA@p j2HXԸq`Q=~0HXԸ $,jj8q*HXԸqd N d 'O$,N } 2HKA@@q g $% A/A } 2HXT $x _~h? .P/$BP  C8 C<@_H,jj7~q?uf]ơ_,jj?@~^ LO?^ /OP<<c@^^&'&'&'1'&'&'&'&',jj5Y"<Xk`Q?~x1(0x1(0>?x1(0:bP`#0:Ƞ0:Ƞ0;X ^Π0;Qavf5J;{9젂SP`dl`]bl`]bl`}fp]bla K K H' ާ)cE@`QS*6XTl j'hxz6=@ m{ @@}x07)}bOgc!06xz~@几,P ?ul > G gIc<Ա*6XTl :6h5 ZM&cޱ@  ZM&cV@@`QS~ 5,j*6XTl 5,j*6XoQ󏦀C` ?6  @_ld Q0Y7 }}a @_l5S ETÆA`QS aXT Ad jaȃ*nZM6 &V j9jah50l̇V v v_A`QSρEM>P v&wa'/e_2% x?&<藌/ Lx?wn@M8ȣ3 5xX`QSEM 55 f{`QSE /_/Z 13=q?jr?jrZ 13ZM5xXl`,jj2ٞ8X 5Xe`QSEM 55X`QSEM 55B 55X ύ{?jr?jr?jr ,jA,jA,jjt6q?j?Fg_Fg ,jA,jjt6q?q?r?jr?jr,jA,jAa{f{m y/l`K@dq?/K֞8  Pp?ٞ8Xl`,jj,jj?f{`QSE @@j?jr?jr?jr?=`QSEM 53 e_@P @~I<<j?_ {?'=:lgIP%.G>v)2^%.?>vˎa".7>vˌ]b|7|cwcj]u8x>ͻ1{}^b5( (|^~7.|:1zbǮ(|1:|xC'9Oq?vǮ|?O ݋zDc9FcZU|41^Ө|4wc?zx1;x/^^8]/wcv|.?>Ɵ 1\PDxEDc6QDDc6QD?_&(" ("|1;X~?1;X~?1;x/~("|˹wa]DD-c 3>v=C-a]K=C-c aw-c 3>FPKuc%|޵aw-cv%|޵])ݵ1;xݵ1;x>f3j w-cv<1:Z j ?w-cٴsݵ]>F3]KD-ctA>fZ]K,|% +|%|g>g>^ZBjZB=D&.:x^G#RH^G#IJ^G#RL5sj w-!%,,j޵VA;/6 b#0A~DXA~DXx\[Aסʅ!!>_,Y :@x /P-OX<ܠZB~ ZBpP-a!? V-!%,,j޵@XVVVVhXVgTKXh5QKXh5QKXh5QKXh5QKXXԼk ¢]KXXԼk w-aaQ%޵Eͻy5Z¢Ϸ^Da0,YY"°З=" yF<ҠB_0,eB_0,,j%D5" w!0,,jE;°j"°j"°maXh5aXh5aXh5agZMDZMDZMD" &" ¢f{XBaaQ# ¢=xG5E{f{ C@a(4,BB_(4,BBQa!Gа' }}а' ; -ϽЂ_,,`,,`,,`,,uZ0tZ0tއ¢f¢f¢Y; wgaaQ,,,jޝEͻIyw5¢T!^^ OQa!?`Wa!2*Sa!2  yаW yx/(4,(4,(4,*4,(4,(4,(4,(4,,jޅE{xaQ<0)"?[\R yB)"xR }#З=R }#jRXȣJ),,j wJaaQN),,j)E;RXXlKH),,j a )E{C4N),,j BBBPSJ!=,!j"j"xo.H),H),H),5ZM5 хE¢!y5BTJaaQ]XԼ7D5¢RXXԼS wJaaQN),,j);y5BBBBBk+uDJ!&R y\)V)V)V)E6)E6)E{waQ BJaaQ S ¢=x5߅E{waQ BJaaQ]XloRXXloRXXԼg|"j"j"xZMZMZMZMZMZMZMZM5 5  5 5¢f{¢RoQ<B`{@Cga_OtDg!,,B,,B~uCՠзtV¢f{@CgaaQ=yw¢YXXԼ;  EͻYXh5YXh5Y; &: &: &: uZMtZMtZMtZMt5¢YXXlh,,,jޝH),QbR wJa!+К BQJa!+ܕRS y_)<宔BBVJaGRX?°۠a@n7v3gΰ[iz-ΰۛa6nkݿ°[a1naݿw0쾏 ,>^0Av0:0:°YfSOq?8-/lE 1N0mlE lE lE 10N1N10:Vİ8x_b+bKlE jE a?LavP[젶A"A"_ fՊfՊfՊfՊF7?1?1>7'A'A'A'A'A'< `ydayЍ`^ UscvmnL77&3/Tc">3Hܘό;7&i@D onL5՘PVp,*@h P<@EM`"&0@x PާM}b"7)(p*@K|N\=RnifK#>$BC%cK#/[-i:>$ȏ[-XTK#h5liZM4&[-@ɖ 4&[V-@ɖFdK#F`QSЁEM4ji5,jB4ji5,jXTK#ߢ)dLt/Ԙu_2&:ٽމ@dLt%c#gI':}7a#w&:E DG`QS*XT#DGd#j2cNNtZM&:&Vj=j2h5LtV_DG`QS EM-PZhw'~GBwP X~Bwe#;}苗;Z~Wݩ͖F_l4bL4b;5[SEniP-fSXTKji5,jXTK#F ?^wK#F`QS-Z3H5]xCp#?W8yCаt OW8y}c/p+>] G OW8^a@ Gd#j#pZMV8&C@ ,i;:S@.b" .?1C`#\1} KE@_,b<"F`QSSEM1*b5U,j"F`Q=Xl`Q=X U,jj 6)@"Fd#'\xa{pa#jh5Yx`&VE@"F O̹h5Y,jj 6ٞjX,jj 6"F`QSE@t#)`*b5U,jXT#"F`QSEXT#"Fd#jh5Y,bZM1yE@"Fd#jB .ZM1&E6Ef{"F`QSˁE6E_"F`QSˁEM .PE\,jA,b55XloX,jA,b55@1&VEh5Y,bZM1&VE@"Fd#jXloX,jA,b b#XT#XT*b/ )UjZ_xz&T$j2kh55y &VY@ɬId$>h55 ZjfM-5&|}V ?SA]/Qw< RyR*[ʷP-EM[&-V@us%j2h5o ̷PQ@|Kd%j2ȣSηZM[5s-EME^|K`QSQ/T%(@`QSQ@|KB̷'|K O8-'|K/^[y@q%/-iEXh uv4u8.y2 j j ɰy6Oy6Oy6O0:0:ɰ.05Oݧȇy6O7at͓at͓at͓ay2y2y05OA5OA5OA5OA=a2P0ô#?222>?22>?200;XCf#0PAڑfCfCvfCfCfCfCF|&.0606ô=.2.0mKl b4.fFfFfFfFfFfFfFfFfy, , ,`60;X~2jo{,Sff=ߩl j?h5n G&-/W9XL%_˟K/&_K/&_}90ˁɗj/&_}90B%_}90@&_^K}1Jއ}90xF Y '_}9B`y=@LM`&&h؃ `m{06=xK 0`yH=XT&e܃ ZM`&{0V=@q&jB`yx=@Ld&jh5ك ,jX.x`QS=z0EM`P=z0EM`5Ճ ,jȓZ5W|IMbdjZM6hyf j=jAC3n،4V @Md&Gܠ ,jAXT&ٞ5٠ ,jAXlϚl5ՠy4EM5hjZM6h&4<_Md&jAh5٠yV @Md&Gܠ l5D,jgM6hj"P EM5^M`QSME@M| @M ;9P51P+@s& @-G@-@ @ gf}Kef&зTff}O`f&gȜy23'03XlTff^L`QS23EMefifQS*3XTf&/tf&j:3H5 nмPcA4<M`k>5vX~j`4<M nMc %NPy )@{p %>P^J =8K)@_LS(<J =8K)R(EMPJ5ByaJ`Qv` % fJ`QS/T % 6&S(V)@s JZMPyX)@Jd %j2h5B a1P&S(EMm5B ,j*Bm5B ,j*sN5Xp`QS)R(EMPJ5B ,j*BPJ5B LZMP&S(V)@J O;h5B LZMP^1@Jd %1XloB ,jjL;1XԘ B ,jjL;1*XԘv`QScځE6)ƴmS(E6)ƴJd %j2BP&S(V)@Jd %j2h5B LZMPmS(E6)AL5 PJ5 PJP)@J [;J_lPb3BPb3)@LS(1S(1XlOoL5S(EMP^J`QS)R(EJ`QS)f{zc %j2h5ByR(V)@Jd %B LZMP&S(V)R(EMP)@I O@vȃxP@t$&vclk'< U; fNy8׵@u$u Nݧ`$Vv2kmװ[i|حְ۬aXnaUnN v{j'n: mvfT;vAN* E%aXB%aXByJ(x=2K(;TBvt%auPAPAPǛ0;0;TBfUBfUBfUBf92 ׬j}T%eT%eavPavPavPavPatV=њ0*˰k.,.,Ӛ0*0*ô=20*/cededededededededa w8,`*0;5avgeTe,YefUey,젪,젪, +Syedye}|zdya* , ,ȃRNBz?Z(gD慊l#2v@;ĈLbD&1"ȓE!Fd#2/TD&>w|#2/TD&L y8"h5 ZMFdkc&W65gc~&?>gyx@ @_ 3/T~&W63Evcd~5 ,j*?XT~&L O8?XT~&L`QS_L t~&j:?H5~Hi^@ _iMCnWE>X^iym@r&7 iNLLvb^NL OSț;1/T'&)܉ NL/vbyfɝ@Kr'&)܉ NL`QS:1EMubļݩى ,j;5;1EM $5۝_NL`QSɁEM $ZMvb&;1vK]?v6|~;"3|;"1]Dd>awDcDd>v~]Ǖ?]wޏz@DctADd>vRvGd>fvGd>f#2`:~TyٿDc0Ͱ{cVi>ka]?kaZAcיZAc1{cv< L1;x4=j1;xi>f0Ǯ.|a;L1;xi>f0{=1\c1炸ΰ? 㵉xm"K_㵉xm"3SMw>ka;|"1: ;|"1: ; "|gw>f3 ;`yFAxcv|gw>fΰ;1;xw>f`yFAgEcY]DyQ]Dy>FwvGy>FwEctQ]?FwEg,ϑ|Qawcv|Qawcv|Q(,ϑ|"1:( <~"1:sGy>v)ϑ?wctϻ<?wctfs$ԇ>ԇ>ԇ>ԇHC mC mCw}ha^g_XԼ5¢]ZA-----Y5ՇZMԇBXS}hD}hD}hD}hD}haQ-,j w}haQ-,jEͻ>xׇ5¢]ZXԼ5¢f{D}(-,jEͻ>'U.N߉B;Q< G } x3U< Dh!ȩxGTDF yQQ;j'F yQQ;j'F yQQ<¢5ZXԼF whaQ-D5¢5ZXԼF/Th!Th!Th!Th!Th!_EUQh!f)j*w*cayUTQN-,j ¢f{4¢=n!M!MyDhDhD(N-H-H-H-H-H-H-H-H-,jAH-,jAHAH-,jAH-,jiE6iE;MxZMD i~&Zi;M/6D b#M/6D yZi!M!>4¢f{zChaQ=!My4¢&ZXԼD iE;Mٞސ&Zh5&Zh5& D &D &D &D yYiViViViViE;My5DD yOi<4Q&Z3J- H-qt̯D y.Ui;Mg~&ZsJ-L4Bb&ZbLMvl v0mv{7nmtn v 70ݶ ev6}86aj gj j j EmaMghz`aj 60^l Sh}sݷa&F&F&v5~DD젚ShTaTh0;gavPO젞c~ 㿁ݢaa7[05:bh#v:bh}6|#v:bh},|#v'A5ln0;&-fff-f-v;aThThThT`T?uS~ў02 .Ӱ )0uugi=2 㽇]aֆA2=L{Au=avP]avP]at]at]at]at]at]at]at]at]at]cevSddi,`2 ]avP{`yciTi,`2 2=L]avP]avP]av<0u]v]vei}Fea2 2 2 2 O2 2=L]avXT^+=r|}5[e{)> u/$}!|Q$Ǒ--dƦٹ?䓧2 4pAb{B}^ Ab{B45}J:V% t ,NU@&2cϤS=N>L:#Á>L:]S=N2ȁ.'.T)pp,?v΁>HL:΁/t)I}>3= tzO4t)jh5{ =ZM9Sd)jtzOEM,jj7}`QSzOr)jh5{ =ZM9SdB9Sd)jh5{ =5{ ,jj7}`QSzOEM=]S`QSzOEM,jXlwM.T)S`QSfk¢fkB{h5O1៿ &Bh~M`B9m"VM`+&hX ,b]CX 襋Xt+C`Quua{`+:X ,jXlo,b5U ,j*b]"V`QSEXVE@"V ] ,bZM&Xh5Y ,bZM98"Vd+*L`QuXTB5UPE0EMUaE@\V?x3I3@er׹@DӾ~T }uac+s.brE U d+#.b]"V Xu+P.b5U ,jXT+"V \ ,jXT+"E@"V t+jH5e9G\V g ,LȔo ,L@NT:Y;d+x.brE@~ոu,brE@uVrFE@-XEXEM*b]ؾRY ,jTZXl_,b5 BJWtUb*#|NW]tUb*#|NW!Uv@;tU G UEMl,j*]XTBl,j*]XT*ӥNW5=EMJW5 ,j*]XT*tՅJW5 ,j*]h5 LWZM&UV@;]h5 LWZM.T j2]h5 ,j}EtU`QS ,j*~XTB5?,j5 UEMJWZM&UV LWZM&UV@tUd*j2]h5 ,j 5 kUEtU`QSf{ b*tՅJWZ[L`*CZ]خvZvmVkhص Zv9TUص VEvc*U`Q]ص ,jkuVEMuZ5Վ]VEvc*jkh5ٵP]@ɮUd*jkQbw&VV]@ɮUd*U`QS]fڱkuO 8zhM XWu*c^ZF9qU z tW*zȱ^G9]U t*"F"b*)"Fw5U)0lhϰ]a=)0lwgخΰݜa8xհݚa4vgsUU/a0;x0,W Ϣx/cjUx:La<W y`j=zU)a<W ۿB<0^52^52^5l >LavPavPfffufufuf8Lw_ư0ߏaU3ư011l5l3ư01x* ð0;a [ * * [ [ =avPaavPaavPaavPQavPa_O_Ggak}ak}ak}ak}a0 0 a b / b wX 0; ְ}\T0 A AS`TkTkdkdkdkdkdkdkdkdkd0AArwck,w7A rwckTk,w7ASkTkTk,w7Skek>?2u.XatAFwFwFw b b 0;X b b 0; 0; a b b b  0; 0;X b bxհ}y}aj}aj><0^50^u,O2;Ub,x0~g1^5*^ u*@>aǫ   G v*pYWUq*pY,j*^uUUZOeOf@= ߓ٫}5G٫} ;G٫@Af@Af.T*G٫@Af.T*U`QL^BP`yJmV`yJmV`y*V'S[~Lm@0O1 ir5 YaM+p䘙kZ>+icՁwtM+g5}?C*օJmr,ҩ@{Vej+2ho PS[@{օJmo>EMJmr>̩@Vdj+j2h5 䴢S[V ȢS[V@Vdj+j2XTj+EMJm5 ,j*uR[EMJm5>V`Q]DںPR[EM"V`QS@OBOBh5P@Wd+jȁT&_V@WdB+j5 |_EM+5 k_EW`QSf{ b+W`QXTBj5 lZM9Wd+jh5P@Wd+jY&_EMo57XT&ͅj5տ ,jXlAl5k`~* 0 1RT Sa,Oa@4; v ^~T Ӿ~T}Ak9ׅ*xșU.T+g@10*x5U ,jXT+C.x5U ,jXTH5] .xRM ^Ty@>O< 4^yEXZ4y*șSdBu9W 0P@h `pr@ڹw-;\wu+W`QS:\>v5w;\Oϖ}@Sr}@Se*|SسOȹ7}٧ g*Ǟ}Tj1{`QS}Tt*EM-f,jOXT*>U`QS}TEM.T*>U`QS}@>Ud*jOh5٧ Sr}@>Ud*jOu"V}@>U`Q٧ ,j{U`QS է ,j*bXTB51,j{UdB&TV}@>Ud*jOh5٧ SZM5}f{ b>U`QOXT*^ا ,jOuTV}@>UM`*&OIlmT6}@>UM`*SK}@IOXlW;S5Վ}TOXT*>U`Q]ا ,jOXlW;&TV} է SZM&Twu*jOh5٧ SZMS5է ,jT8Ts*YNP]U K LPr @N:A9F'.T*ӥNPr @UcBLP:'UBLP:suVM{QG} =l;TA5lό 1ό aj< aXTg?F.T)#Mi ,Bu)瓑}*9瓑@OF@OF.T)瓑"M*XT)HS`QߦM65mjTC'T`QSzSEMjTdo*j7h5ٛ MSɁV ՛ S ?-cT 1FPr1@$ƨ}r6pAb*pDcT>Hg?S@rqQN7}ظ q`*ǁ@69HUW>l\]U`QSZXT*U gܸ l\ZM6&WV@NXqh5ٸP@Yqh5ٸ l\ZM6&WEM5ZXT*U`QSWqXT*U`QS~U`QS ո ,jqXT*l\5ո l\ZM69bUd*jqh5ٸ l\rՍ@Ud*jqh5ٸPf{ b*…j\5_,j 6.lAl\5kWEM55WEU`QS ո ,jqXT*jqh5ٸ PWV@UdB5&WV@U sݸ l\5U ,j 6*\U`QS 1qXT!&BL`QqXlAg&X~d `1V #c+Џ@Z;uX~ Ӿ~}1 p9+օ `0ȁY.T+g@N:; `5 ,j*XT+`5 ,j*XT:H5 `RMXT@:V X|خcrFuU ,[uU 䴯3Vt*ӈXr@UM`*Xr@8cXL@N ;ciagX5}I1cXl_RX5>j2)'RmsR*O&S} dR*mNJ2)#ȤT RtRBR,j*)XTR*SNJ5K=]EM%JJ5 ,j*)XTR*ԅJJ5 ,j*)h5 LJZM&&RVI@Ϊ;)h5 LJZM&.Tw j2)h5 ,j;5REvfR*@`Qݩ ,j;pREMu\T`QS݁EvfR*@`QSI ,j*)XTR*j2)h5 LJ]TdR*j2)h5 LJZM&&RVI@ɤT`Q1)XlALJ]^ ,j &JJ5kREM%.TR@90ԅ0T'P~ C]0T'P~ Cy2 eU0Tj W PEvAc*.h C5PaPEM0T`QSaf1 h5 C]0Td*j2 h5 䌨PVa@0Td*j2 XT*0T`Q]T PYrB90TM`*Cr$a@9 uPt*}CRM] b* ]b*нqikOkOx?a?a?L0iO/awa < Ϣ0, <2bawذ+0<04灁aa < gچ<04l0:0:Ӱ}0AASiTaTinnԵnTaԵc>:O14lDόόa< < ei>-;疁a< < e242t`K14* pOOOyavPavPavPavPatrGK+%%ataataô]bj>?.1 uP쒖P`0 5ji0;0aZ?ji0;0aZ?* 5{PaataataataataataataataataFFf˽aavf:f˽aavPaavf:LaavPaavPaave C00o* 5lF:LaaaaaFFFO C C00;X C C00;00;0a C C C 00:P0jƿCM"2r_03S/ 5 ciCMxTP@~n*wS)pMPM}69Jlr`T R T wJlr T: 菏5 Uc S?>֘WG]c f)pf)'5}8'5 Uc ,jXTB՘15Uc ,jXT)S`QSjLEM՘15Uc ,j*"s kLV5@Sd)jZM֘.T)j蟖5}pvXc 9pSkLr>. r5@$֘˟S`39uS .]c q`)ǁ5@֘}Xc ԣkL>18tjLEM!,jj y`QS5jL@s)jh5Yc 1ZM֘9SdB՘9Sd)jh5Yc 15Uc ,jj y`QS5jLEM՘1]S`QS5jLEM!,j-kLEM՘.T)S`QS5f߲XT)jh5Yc L80aQ G0P}0 ,j*aQE'XFEM%J50 ,j*aXTB%J50 LZM&&FV @ɄQ 0 LZM&&FjC~d(j2aXl[&~˄Q`QS~˄Q`QS/T( EMmȿP 65!?Q`QS @/;aXԆFEM%&FV @ɄхJZM&&FV @ɄQd(j2ah50 L5kFEĄх5 f{ b(Q`Q1aXTB%d(3N].hLy2aɄхJy2aɄQ'Fu(_5L @0aXl4&ƄQ`QS 0 ,j*aXT(.hL50 ,j FV @ɄхJZM&&FV @V:ah50 LZM&&FEM%J5w²}@,o".T(CZ9Q 7 8Fo!G9NQ t(a(}(::b4l&G2lga ۟ǰq ۟ư'gq"F'1la7İ 0EgQhE~#Faj=u"Foa0FP)b4l0Ѱi#F #F #F1ff8LavPavP7avPavPavPwavPavPt0~  gFd0-ϓa<  fh>:ga<  ۧVL3p42pt0;E)p4j0;EGG)ThThTh"_ƈ0ˆѰ_F_1F_1:L FFӂaa0ˆa w#Fň0;X! Z0?*btZ0?*bt ƈ/chdhdhdhdhdhdhdhd0EAFAFrwch,w7FAErwchTh,w7FAEShThTh,w7F~#F)b4 GkFShfhf0EkFkFkF5#F5#F)b4;&#F"F)b4*b4*bt"F"F"F`c2b42*tbďJOO0*43*43*4;*tvT(B  ܧP>GP>/GnP?!v tA 'nP>{at7(Їݠ}6Їݠ@^v@^v.T7(nP`QSݠ ,jXT7(nP`QSݠAEMN5 ,jXT7(ɁZMv&AVݠ@nP>h5 Pݠ@nPZvi Ab7(p䌗A>Hh>-nP>ȉCw}haBOP`iUPš@I8PC>H, 8E}X AbqBZXԪCEM9-Pdq(j8h5Y ,rvš@Ѕ*rš@Pdq(j8h5Y ,j8XԪCEM*5UPšCEMZXl7c*]P`QSšCEv3fq(Pdq(j8=&CVš@Pdq(.ZM&CVš@ЅZXlA,5 BZXlA,]^X ,j *5kCEM5šC8XTq(Pdq(j8ȱi&CVš@Ѕ*ZM&CVš@N8h5Y ,jXlA,5UPšJEM>.Tq(G`QSf{ bq(^X 1Gta1GGQsD*GGс? rTQ@u:*ȑ:GUèPj вBtTBE-BtT(o [F 5 ,j**yOG 5 ,j**  RMGBTQ@P k5 GO9*XLPT(PM`'r@?TX ?r'c,r\@N]9aO`QS U ,jXT'jȩ.˟ׁ>8O \ q`'ǁ@9pυZ{O`QS@X^5U ,jXT'O`QS?XT'Od'jh5Y ZM9Od'jh5YP&?Vf߲Xl[Z.Xl[Z.?EM-,jj5\>EMr5U ,jh5Y ZM.T'jh5Y ZM&?V@Od'^X ,j .lA5k?EM5?s ?uvAc'ϓ@?O.T'ϓ@?OmǶOcwǶf؞|l`>ُm'cہz>eO|?˞ m_ۆ ?}q|lz>}k|l++c7ư=mnc<H|lR=H|"mp؞W+}E~SmcvpK}+?f{~~61>#>gǶΏ!3l_!1>#>g=Ƕ!ӏ{{m|"3l_!1;Xn|+}Bcvp|mc{cvp|ۅ ?>F_f}Acaj>M 3l|Y|?f}5fWajrwCcvp\ ?|"1: ?|"1:ϰ=1: ?`!1;>f =1;Xn|a{cvp|=s?.??Fw'~>FwEgǶ)]$~>Fw61.?`G"1;'~퉟=1;'~퉟=1;'~>f=gA|.K??{ctI }h5 8ZM$pO΃l`;HH<>)%pABmdmmdA$$pܦܦAB'pOQye}>Qy*<΃>(eJJ{Y^y]*+*DTÃVQ}gVQ&25-Q햊̃E}gEvKET澳ja Z>J5B5:525j0;0;kaz c>8ϏQa|~ 1*scTf2¨̰}vCFeC 2 2|0;Xn ZL~2ZL>**3**3lfffe  V6   11i0:p̰}<}%cA-f݆avP AcavP Acavfe p0:p0:p0;Xg ~p0;p0;Xg ~p0;pa 2cS8fe8f>s?2s1.1.1 ']c]cC.1.1)3{$11)3*3*s111 }.pON\J>R*'P(q r%n9R/'P?b%П(@P @BLOH }B%P@^&Py@^&P}x@ )}x@P (EM%P.T%J`QS (EM%PJ5Un,j*XT%J`QS 2h5@ LZM&P&(7za;v9s>s_>7#xJ>#́>\xJ!U'J`TVPe@˲J/*>,ȑ;U}xYV eYBUZ)XJ*EMU9JdY%jh5YV ,re@ɲʅ*re@ɲJdY%jh5YV ,jXJ*EMU*5UVPe*EMUZ)XlqUZ)|*EMU*5me*Ve@ɲJ \V ,ZMU&*Ve@#h5YV ,ZMU&*`*EM-,jjq*5UV ,j U.lA,5k*EMU5e*EIJJ`QSe UV ,jXTY%jh5YV Ȯ*Ve@ɲJdYBU&*Ve@ɲJ ']V ,5;,j U\J`QSrXT A`QXlA,5;̮a3ra1J6+*r )~d\ؾM~O 8)*tAF>AF>\)1j0;E؇)1j0;+0; 0;0;0;E.6젮5.5n-,왖]gZv=1F21F2lF#9Lˮ3H3H3H퓫 c$ c$?O1at1avPˮrWddԲ#fzԲavP1avP1a0;0;/cTd-""lg˨aZ=ϖQa|el ")*2/ Z="젖A.Ũ0;e`g1*2{"젢"QatQatQatQatQatQatQatQatQFFf=Qavܳff=QavPQavܳf9LQavPQavPQavPQ_ƨa ']FESTdeTdeT0EE]FE]FE]FE.".")*2{$"젢")*2**2**r"젢"젢"Q}.pnO\H>OR*GE"q rT$n9*OR/wC.T7$! Pݐ@>e\nH>znH>nH'!Td'!XT7$nȅ5 ,jXT7$nH`QSݐEMuC5 ,jXT@vC&!Vݐ@nHd7$p \nHd7$j2*C`T$p OFE!0*|2*3hh'"r>O-|2*r"VQ@Gk/@?L.T$WN.T$j2d!*L&C&V!@ɐId$CTZML&C&V!@9dr652 ,j*d>LXB&EML 52 ,jUXS$اVI`O@VIc$Uh* }lrԭ@VIc$jUrhZMJ&[%Evd$ٮl5D;ٮl5DBJZX * ,jjv`QSKj5* t[%EM-,jUXT$jUh5* l\VId$jUh5* lZMJ&[%V@VI`QUXlo:l\t* ,j7Jj5ۛ[%V9@̑g$Αg$ :222rW G@S davPABAB3!!a| VY e d-!i0>[B2r;e d>j>/ @0~02j0;XY Z<{!;@0;XY 2BABABABABABABABABS dd dd d,,Brb dT d,,BABrb dT 0BABABAB~!)222lHFw9LatatFwFwFwϧ @0;X @0;@0;@a 2:tP}R1p #v#ٹ}6ٹ@>i:dtBE7Wat#F7.Tt#pߢg.Tt#p ܇.})ct#Чэ}/Чэ ,j*XTtBE7n5,j*XTt#F`QSKn5,j*XTt#2h5nZMF7&_dtBE7&Vэ}2|2э}2э}2pO  }>gs}67э nZMF7v>Ͽ۠:?t~ڍhPПП!!%B!!%B.n@17&B9tDH`QSqڍ{!EM ,jj7n`QS!r"$j2h5 LZM&B9 DHd"B%B9DHd"$j2h5 L5 ,jj7n`QS!EM%BJ\DH`QS!EM ,j&Bڍ{!EM ,j*Xl+L5 LZM&B9DHd"$j2h5 Lrщ@DHd"$j2h5P5VE_DH`QSf{ b"DH`Q1XT"$^ ,j*XlAL5P!EM%B&!V@ ;h5 LZM&B.T"$j2h5 LrfӉ@DH`QSK5_DH`QSKZ!EM-,jj`Q1XlAL5?^9@ H G)] G g$CwCu#S}\GM`#gP@'0o>;BEM>.T#G`QS_G t#j:H5}RM>CTӡtC̡@d;ȡl>9G chCu#}rpˡ j\m6]@FM`W#sRjv5m6]@qK]sW#mEMm r}@ >Ac ЂOpZ0 -}@ΠOh',in',jO6 ڪXV ',jjn`QS[u5'hEMm ,jOXT jOh5'\>Ad jOh5'ZM &V}@>A`Q',j##Evcd >A`Q'g <r <'>A g 'N` ;}fIOXl7) \>A`QS}Evb >A`Qݤ'ZM .T jOh5'r:}@>Ad jOh5',jOXSd 1A1'.,[ 䬀*AY'r c,Nrx a'eT @A t j:A.0A'$2&`>1l?Cp#0lavS`Kav?LqA&A&S)A0)*A0l8L avPqA]A]A] A] Afu)fu%fu!f f0Afwd`d`>42ApVL L L gՆA&A&cj  i50;0;}80%AfwT`T`>-9*A˘gĔaĔ0>#1%pV3bJ`Si0~O0%0{)Ô Af})avP+`r_bJ`TJ`,%r_bJ`TJ1%02%02%02%02%02%02%02%02%pR S S`/1%0SR`/1%0*%0SR)%0*%0*%0*%˘8L)at)a|0˔aJ L L 0˔0˔0˔} |eJ`eJ0rdJ`TJ0AASJ`TJ9%ȧJ Ck S|N |N  b @ \ @>O4S] @>O90 @ ] *XT 5,j*XT @`QSA5,j*XT @`QS d j2h5 ZMa@ Z8h5'wC#ȝ{r>T#ȝPa>ϻr>>vr@ɝVO;C3j2%pRV)@ɔJ 5C3J EMJ \EMJ rx)vh5CBڡXREM9@dJ j2%h5L r)@ɔJ r)@ɔ@dJ j2%h5,j*%XREMJ 5P)REMڡXlAL 5CBڡXTJ ^,j*%h5L r)@ɔ@dJ j2%h5PSV)@ɔ@dJ j2%p5kSEM.TJ @`Q1%pa{ bJ ^,j*%XlAL 5,j J \@`QS)RV)@ɔ@ L ZM&S*%h5L ZM9@dJ EĔ@`QSk/TJ EM P)ֆ56<^,j ZXlAL Ȕ@M`J N 9nZ@ \ gZ@=ku- \Z@6k>t- سnسXT- Z5U ,j ] RMkTӵ@Z@ t- jp wRwrz;9@P{ Gl?,?;mw_ 6;mw(w h?Sz6rJ;)=,jjk.@܅6.@܅9]H¿P\].@fy~ ? j~`QS\X.vr6˻X.vZM܅h5 B܅h5 ?jr~.@]V&wZM,jw5M/l7),jw5 ?nR\Zapi}?!.'ĥji}?!.@Nyi} G>ЇK}x>ЇK9ji}`Q]y>ji}`QSKZZXlW.,jji}`Q]y>jri} >jri}@ɥ@ɥVK&ZM.,jji}/NSE7Z0n47_Xz} e7_7Z0n䠏7r;tolFfYdT7RMohhuM۪_^w?lq?ͰƟd>z0l?"rǰ}`~:c~6Ӻaouc~.b~*atAuַZzַ; 9 7 5 Z:000j0;0j}0; \w?lmF0oF~atV \w?lF~ôuԺavOqP[ӺavP[Aof~Ժa/ara~?k?LFa˽ksm}r;x޹~Ժav{~Ժav{~av{~,fkAF~atkAF~atkatkAf˽kravPkravPkravPkavPkAfqmaZ[?rm}yô~atkatk]Fw~>\e܁2@yw'q|rZXf6S5;e܁26S5;e܁V˸&qZM.P˸&qZM.\h5;jrw2@e܁V˸Ve܁Ev2 ۭ˸Ve܁EM-\'Wjr+JJ@A(Wjr+}R;G+}R;P^}Vj5Ņ+Z}Vj5R;ځEvqJVj5Ņ+&WjZMԾP+&WjZM\_{vJ@ɕځV+&Wj5R;jhe%WjZ0R²+O?RB?R;Ђqv FR;J@j+/,2R;Yezv J@ځT+Wjz5Wjz5WjzݙߢaǶcۏ'^}NǶl;?ێϰ}ǶWێǶ mlUǶS|l;3ێ̰} mbǶm{'?fwcvpX|׊R1;_)>fucvpL|W}wr~N~{'?Foct? |b} mb mlPľA61:}ط=l;1;mlIN~{'?f}؛1~f؛=_7c̰7c̰7{ؾc{?sm#abocv_7cvpw1;X/؛1;,,f_؛1:؛1:؛1:؛1:؛1:r}o``o7{ؾ7cvpߛ1;؛m2ct{{?Fw7ct{{?Fw7ct{?Fw7c(.f{$f{$f{{?fa˴7;pߛ6%66 &ڛ6%666[6~pyp|p-pzp f?MI> >h=MI> {,j{,j{555555555557AZM~jboV{F؛/}jboVZM,~j?ͩCR9}>TmNmmAO,~p|p#zp|K|bVKXR&j?h5TAZM,ܗj?Xܷ=Xܗj?Xo{/~/ܷ=Xܗj?Xܗj?1-~o{o ܗj?Xܷ=Xܷ=Xܗj?Xܗj?-~jbVKXR9ͣZM,ܗj?ȑ-~jbVKXRR6RRRR}E}E}E}܃E5mrR6R5K,jKXR9ZM,~jbVKX ҴTAZM,~jbVK5kj?XI-~/~^T;aE55kj?Xܗj?XlAX`Qs_/~/~/~jbVKPj?h5TAZM,ܗj?h5TAZM,~ZR5K,j񤠖j?X?XK,j,j,j ,~^T(f{ R?8{Ģf?ȹ/f?ȱE~_ڛ~ڛ '7A΀io>؛ h7AN#joE}o 7ARM~jjoTS{ڛ f?H5Aj5Rj9r0D䨪Vc?aU~~_{"#ȰA?2l~<~_?Xپ'Ai3=fr]ۯ,j{"b{&au`z&}VAνiuAl~soD jAl~gM555M5MrF,j ,j,jD&&6QD&&6Q?h5AMZMl~jbV,j6Q?Xl7 ll7 l~nD`QsD=˦_˦M? M?SeaҲ9ea>eX6OM?!-ܗM?Xlw,~/ܗM?XܗM?XܗM?Xlw,~/~&X6IJ&M˦X6IJ&M?_vkV˦X6IJ&M?XܗM?h55IۿQ<(?e/5ZM~QԚE~QԚ&T?9 ~֚`Rk,jZS ԚT?H5A5z3T?XS 5ua˸z~a9Lkgaa9Lkyaa9LkAFzôzԚavPkbԚavPavP׊avPavPWavPkA]'A]&A]%Af0"Ayfqatk T5 T\S=rM} hatk@ T\S}<ZS=jM}0fqԚat릇uӇ{Mߝ릇uӇiM0?%;#[mW [LK{J 1|$bpsޕcq1^Mcnzl?7돹cMcnzT&p,!Myy>1oP1o܇07= ǼA1n1n1n1n1n1n1n1ncMqMqMy>1o܇07= *7= ǼAǼAǼAõǔ?~wenrc.sc.sǔvvvۏqMy>1oG27= *7}L1oP1nF#DKp?7O_T6:i 'Z~<ltp?܏4Up?xQ~1 ~lࢲ2MeeF_T6:XltLS`2MeeF4 i*,T6:XltLS`zF=Mf&AO~*i2}QYڠltd6 PYp?Tv],C} } ;cЗ cp?;} }Qe/A&AOe蠧2td:i 4Yz,C_T:X$ZLSe`mPYLSe`D i ,T:ȳZ.C4D i*vQe`h2M%тe*C4U،AOe蠧2td:i &UAOe蠧2td:i ,T:X$ZLSe`2Me*C_T:X2tLSe`h2MecUi*,T:X> ,T:i 4YAOe蠧2td:i 0&AOe蠧2td21e`mlXiA,C_lXiA,C4UiA,C4UiA,C4U2tLSe`AOe蠧2tG]z,C=M&Uz,C=M&As:i ,T 7X> ,T 2MpeJ^T:XnLS)`f2tL} b:XnL} b Aq:X<2EL;_|4G^A>Ls9ȇi"0Q(rGE92ME/2R4t9i:4Er"9MG 3tiQ ٣Y:c72c1?bkl<_Yc/:bDnl+jlJc c c#c*&y*y*ӝǘ7R71oPcޠ8ƼAoy*yu1 >cT1yTn1ƼA ǼA ǸAǸAs c T*YLYLYLʌq,&q,&gdƸAǸAT8 < <9b7R7 =|<9u#fcX>u~lT>u7bݘ7X'X>Ucޠc``x,,y*c c c c c c c c T>Y>Y>c``xTx,,y*y*y*G]|< |< |cqq_ǸO֍n<}n<}n|Lu1u1u1u ɺ7XY7˽"cޠT7U71/` &4Vp?}Q~P00 ~ Ip?(܏WOEA~5~ c*4mT80qLS` 2Me*4U0i*,T8XqLS` 2MS`4Y0z,=M& SdAO㠧ɂAF9*c8o~+諌~0 G )G I~0諌⋊}1RUHqd8i2R4)z=MF&#)i,T8XHqp?,T8XHEչe4)x#2Mչes]T8X:WLSu`"2MEo; xAu7y} O{Q O#WvoϑX*y #/;_|s$y-A>Gr7H}ޠw>oO i,Tb,,ot7-Awm ric72c1?>ykl;_YcԱ/:>jl+jlzzc c cc;;1oP=1oPwcޠ9ƼAqyu1 Nc}Ƙ7>71byyqqǸAy7 ; ; ; ώqqSOoTwTwl?1rL}1o߲Ay}1~+{`wl?5}1^Gǰx;}ޱ#y;}1oP͵1oO; ; yǼAyǼr?>7X'1yǸAyǸAyǸAyǸAyǸAyǸAyǸAyǸAy; ; ; yǼr?>7>7X'ۏSwTwT>>U}'>>=>>>>~{dw,y^}1oP}cy G ٔ~+>op?bϦs\`vp?6uQ}~6%1 ~ap?܏M#Eyeiۦ>Eye4 i,T7X>oLS`2Mye4 iz}ޠ>od7i4 Ǧ&U z=MyO]lWA+`A_eÉ$Mp?U>op?OÉA_e^T7諌}ޠ2y&AO}ޠ>od7i4罨>oLSa`2My!`2My/*L,T7X>oǣ i*L,T2Me S4 iIy&AO}ޠ>od73+=My/y&AO}ޠ>od7X>oLSa`2Mye4罨>oLS}`2Me4 y/,T*X>oL} b7X>od7ia+y&AO}ޠ>od7cW=My&AO}ޠ>Eyec2My/,MS}` yec2Myec2Myec2My/,T7X>od7iQOy&AO}ޠ>Ey&AO}ޠ>o z4 iA4U+>oLS`j iV,T4X>,l iV,l=oܠVnd+ZAOܠVnd+7'n=Mr&[AOܠVnd769܋%Jwi-]O>q7i2cp|lnm z9`r2Mp/Vs2͒s7i:4 r9Mp2tF9 n9ԯǭܱۿɱ{ۿc+wl;_x/~э~S+wl4_oc6_mcvLm1n1nܱ1ݛyjۍyjmɘ7ݘ7[1oP7$cޠnGƼA݌yjۍyu2 &dT+wT?ƼAƼAƸArǸArc [ԶOzqlql<ƸArǸArm7 ; ;8V7[ؼ=V ؼ~dcؼ=z`vc1^lSvcޠ:fc`/`vTv,lގyjގy往1oy; yclގqlގqlގqlގqlގqlގqlގqlގqlSvdvdv,lގy往1oP1oP1oPcjގyjގyjގgKy; y; Oد>ټ=15oǸO6oǸO6oǸO6oc'c`Wdv,lގyjSvtP_p?GONCյ 9~r*G.kϑC}(tp?]T6 M~/?mLS]`յ ik,T6XmLS]`2M7e4յ ik,T6XAvm&AO]۠ɮmd6q z^Tq3ik4ٵ=Ȯmp?u]ekOk}k#1 aW ap?}k{Q]۠2vmص z=Mvm&AO]۠ɮmd2Me4յ e4յSLS]`Aer6XSLSA4t i*,T6XmJܵ z=Mvm&AO] ϗk4ٵmLܵ z=Mvm&AO]`2Me4յ ik,T2Mume4t ik,lصmLSA`21]`AO]۠ɮmܵ z=Mvm&AO] Hk4ٵ z=Mvm&յ iA4յmLS]`fĮ1]`fĮmLS]`fĮmLS]`fĮmLS]ۋ4յ ik4ٵ zy]۠ɮmd6ik{Q]۠ɮmd6ik:wm&2Mumec2M%@/k,T4XEumeJ4 iA4 vmeJ4 vmk$Dk{Q] k(wm/k=wm<mܵ zy] Ok9)vmsc6i14٘ zl=M6f;炮u N{d>|9_<꿥wB[Zp=ji|:m_\ Au`f ùN,,m8i/:2s6iN4] r9MiζNtu :mt○ӎM-c78uڱ;:~ٍ.nlKcz/j/cЍqӎqӎO*dTnTnKƼAUƼAݒy!u;2 fdTnԍȘ7ې1oP7!cޠcޠ*tt1 B7 B7 N; N;?Y= 7:7:7:~^cdvdvl?1 N; N{L1oPu1oPuڱWVfOVfwVfǸkVfc5+c5+c)11ٱw1UfǸkVfǼAƼ77XYU;+c`ygevTeXYYYYYYYY=777XY;+cޠ*cޠ*cޠ*TUUOzSevTevdevl?:}2;}2;>Y=1UfǸOVfǸOVfǸOVfsc'+c`Wdev,̎ySevt+6 jw{} b+6X>,T+6X>,T+6X>,T+Z2Mbej=Mb&[At+6i4ي zl^T+6i4ي zly>έؠVlLS`fVlLSY͋j4 i*yQ`2Me5ec[21`21؃l?ɓMn^T+6wnyɭ؋jyϭ 8)'b5[Au+66jbej4ՊVlLS`Z?Vlt+6i4݊ rn9Mb[ANӭ VAb_, 5G]|JkOIukOIu iKOQ`fI9zDu iANQ kt5i:tQנ{9MG]f~Amg|8m5`?^϶3.5϶gv~]c{ gۅv~]mW^D 3n5϶gcMg^kqcg^e>;ϼ>3opkqyg~7߽| 5ϼ7߸| ϼ7g jm?Qkqq~ 7gۉϸA`?Ql;7g jc{-3op~ :VoQg;Cg꬟qg~;C3 u϶#?qguϸ3Y?g`yE3op~ [Y?:g`yE3oߢ7Y7:g ꬟q~ 7:g ꬟q~ :Y?QgD3oߢ7XoQg묟y{3opuϼ7Y?Vuϼ7:gۑϸOY?>Qgl;m:^gD3uֱ:g'꬟q~D3o+7XQgc{3nPuևۙQ^g}{x{"~9 uևq!ۙq^g}{|9z{uևe{a^g uևe{a^g}XYiuևe{aލ|XYiuևe{a^g}Xw#Qg}iD:COuևۙ&D: v!!>NH=0C_H>=<=kp>C_H>4`}i"DCOև&=Ls/=,42=L6_>ƣ2ͽ,42=42=Ls/=,4CRa^zXXiA>,=MXz> Xz>4`}i"D!ϩ)DCOև&=L} Ba` ևe{afk} Baf2=L} Ba`}X>!Ls2=Ls>4`}i".`}i"Dkp>4`}i"D!)Da`}X>!LsoU2ͽULsoU2ͽULsoU>,l`}X>!LsoU>,lQ}#HʨCSF!!)3yTNՇ,342=zQՇ29MeTrʨ>4Q}i*TF!CNSAdT(2yRՇ<CPF!Z(*Q}cʨ>) eT2(>PJ ԇ4QJ}iD)R2΋R2΋Rjp/"iB7CoIӇ<C>sU7CoIӇy@I4}CTJ>q%M򙫒42=iL"iLsO>,lH>44}i"iܓ=M$MzH>44}[J>44}i"iDߒS!Jw,i0Nz>2NzPBӇ|(q-%>8_PBӇ|(2 S4Xa>,,0N%2RS!CNSӇz9MNC=;}iwT>//{c+30c|];=~ulXkzc3Bۯӱ2ۯcꯍq읎q읎 fTmTgƼAƼAˌyu3 .fTm̘71oPw/cޠzcޠkt2 6 6 w: w:?;=777~tbdtdtl?51 w: wzL1oP1nc1ӱ(ww~@g;ct;ctl?3ƝwzL11L 1o߲w: w: [NǼANǼ~7Xo;;1NǸANǸANǸANǸANǸANǸANǸANǸANw: w: w: [NǼ~7771NǼ_eꝎyStT~,rdzc'{c'{;>;>;>;ۏy^1oP1o+w: wzL1n~hZp?܏XEUKٍ~4- NJѴidp?V܏U- &`U- ijT4iZ,T4XjiLS`2MUKe4U- i*xҠjid4iZ4Y- NJ&[ z=MVKZz]H_jip?jip?܏yA_HyHHp?jEUKX- Bb4iZ4Y- z=MVK&AOҋ4 iZ,T4V iZ,T/q4X\OLS4 i*sQ`r=2Mze4U- pAOҠjid4iZ1 WK&U- 򬄫AOҠjid4iZ,T4X\OLS`2MUKe^T4XMS` iZ,T4=4 iZ,lX- iZ4Y- zyҠjid4iZ4Y- 򴙫AOҠjid4iZzQ`fjiLSҋ4U- iA^lX- iA4U- iA4U- iA4U-jiLS`AOҠji']- z=MVK&U- z=MVK&Ads4iZ,T4X>Z,T򢪥2M!eJC^T4XMS Zz} b4X>Z,T2X>1L& 8äAr¤Ais4SCynaҠ0i+& äߩ0i& i*L,T4X0:L4& r9MIäANa 0it4i=FGt3%0 z L} ݩϋJ}yȩ ;J>eJ}4 iO>eJ}4'SAOϠE>&SAOϠgO zL}=M>&SAO h;g~w@/d;A_ hw4?/3og/3?/3;2Rr4XRr4XYbZ^,9-w@eht4i4 rݹd4%;AN h^:Hҍ#c6jc6׌бb۟~%1EB%cU<_c5<_-  Ϙ7h٘7h1yyu3 &g-Θ7h٘7ۛ1oP77cޠnmƼAEBǼAEˎfTlTld$td$tlj< 2zLѲ1n1n1nб7H7H~ad$td$ecޠ"c cc ccɗ11ϱ~e{abc ccދNc9c}1oP1oo29 *9 M>Ǽ719 29 29 29 29 29 29 29 2yL1n1n1oo29 M>ǼA>ǼA>ǼA>)9 *TsTbcޠb??889n!1n!1n!б1@Ǹ]@Ǹ]@):2:2:2: *: *: *: H@ǼA@)c}E>~z#1 }Yp?#f Ĝ~(1 7.*O?E2M>/*,զgp?,T3XgLS`2M>e}4 i*,T cAOϠgd3i2=M>/Q4 z}db ~B/ ~B/'􂾐 '~B/ ϋ}}!1gd3i24 z}=M>&c ir,T3Xgp?,T3XEUne4U irsQ`*72MUn.*,T&XMLS`bArp3i24 z}=M>e}4U i*,T3XgLSϋ}4i*,T*72M>e}y0DZ`*72M>ecc2M>&cAO 84 z}=M>&cAs3i24 z}=M>/*,l i*yQ`b21ϋcc21`b21`b21`b i*,T3i24 cAOϠgdbAOϠgd3ȃl}=M>e}4 >e-^T3XbLSŋ}4i*6>/A}4 >e-4 >^cAgs3CA}^T33m}y2ȱ 9 |cARs3sj}yRͱ`b2M>e}cANӱ gt3i:4 r}9M>cA]t3Ë}yDZ L8 >nkm / 5ejk4{ۚ2M5em͠ɶfd[ښAOm͠ɶfd[3ȇ/nk=M5&ۚAOm͠ɶft[3XۚKim͠/5|fUw[3ȿ_U|pYS ~nk4K|m`ښKm`fiPyT iۚANm ft[3itm͠lk9M5ۚANm_9xck7tc+7p?ƶ5c[slgc<_c|Lmͱ~~ 15ǸA5ǸA5et3 6 vL7;cޠacޠntƼAyu3 6 f͘7[1oPm1oPcҎqlkS#ld[sd[sd[sl?50 9 91t,T2i2t4 ЌCAOˠed2i2t..&CAOˠedB21`B i*t,T2X6>H,R2X6>H,R2X6>H,R򢊔!)eC*REʠ"eg] z,R=M)&U z,R=M)&Ar2iH,T2X>H,T𢊔2M5ej^T2X&`LSM`f"eL}^a2X&`dt21:9/G' w:2ӑAȘ GtdF Iӑ#Sn+MNG^T:2MNGy`ґ2M#|tdLS`ґ2M#&ӑAOȠtE#&ӑAOȠtdd:2i24 zLG=M#e퍎Ƞ:d/u2ȇiC³ :d/uAq2缮C0uȠ',T2XޮX i,loWC=M!&U zC=M!&A>q2i4Y zC=??G U뿓rwb79W\eO OU'w*?/>?/?;W,,='*eU^,M'*e%\vr2XYNU9M*sANӹ \eE*n.2W4 rU9M*<ŽScK:c :ד)˱Bʍ~1,c>_c= 2e9 2e9 2e9?;1oPI1oPIcu4 h И7۟1oPI1oP>cޠn|ƼAyJYyJrϘ7$ט7$7Ȕ7Ȕwd\c Sc Sc ScY1n)1n)˱7Ȕ7Ȕ1%ƼA,ɱNg~d?[f'evrl?`2Ɵ-ǔϖ1~avrl?Xr1;9/Ny=1oP1o1;9 8f'ǼAe'1n1n1n1n1n1n1n1ncNqNqNy=1oP1oPcNyNyNSvrTvrU6 g);9 2;9}2;9 >>OqNSvrdvrdv򘲓c'c'c'cޠcޠcޠc`WdvrTv ǯ{ 2@dp?j܏_CPp?~uQ~* ~d`p?O 'e D2M"e D4 i*z,T 2X@dLS`2ME24 z D=M"&Pd wAOȠ@A") '悾V '~b.k~b.{'悾V@d A_+ Dj۵@dd 2i2yQȠ@dd 2MQe D4 're D4@dLS`6MW.*,Tp%XE"e 4\ i*,T 2D=M"&AOȠ@dO z D^T 2D=M"&AOȠ@dLS`+2M"e D4 i*yQ`2M"e 4 i*"e D4 iA D4 z D=M"<@dd 2i24 z Dy0ʁȠ@dd 2i24@dL} b 2X@E"ecT1X~cT1X~cTbT1X~(UXc'k z5=M&ck z5=M&cAr1i2,T1X>;0,TKb2Mej]T1X^LS-`fXcL}v`1X^dzAr1#&1^T1SP1y= Oc D{Ayr1SO1y=`z2Me1{AN= ct1i4c r19Myt0ȧA.ڲpkaà/{K=Ơ{AU= ~-9ŗ$:_4c i({2M/0{2&rb,,"{AN= c@t8= ct1i_S8?^϶el{U?^k1~mg _O?.϶rmgUv=϶mWgv} 7g zmOϼU7ϼU7} @y g~7>[ϼ3op{y{jly{3n=ϸA?~ 8>cD3n=϶q1~ v 3n=ϸAVgcg?ǟ-zgg)E3lcl;`1=E3~A`XyC3~A3oǡ7?{zy==ϼx =ϸA?cD3n=ϸA?cD3n=Ʊ7g zy==ϼ7gc{c{3op1~ =϶oo?s{3n϶ӈqh5~v3nƱVg.Zc{3nϸ]?}$Zy>ϼ7X#j|{ngł{vhvVvvvV,ngnnngnF{vvvvh$niƇۡe4V2ͽLso52ͽLso5>,[4V2ͽ?7VCOƇ&Z=Mzh5>3=4j &Z=MQ\|lx-zţp;p;p;ЋGqvvvbp/.>Q\|ţp;lGq񡧉CO^\|iDq1ie{qa^\|yXi^\|Xijܫ)Ls<,ܫ)Ls<,ܫ)42ͽOU\|iDq񡧉COŇ|fCO^\|*.>4Q\|iDq񡧉2ͽLs<,܋42ͽLs/.2ͽLs/.>,ݦiՔe{5%iŇe{qaf2ͽDq񡧉C6Qq񡧉COŇ&=M蓊=Mz(.>4Q\|i܋4 i^\ |#{ayM>҈ {a {' пF|CsJ#>4F|i"D1zH#>4F|i"痔F|i"LsO#>,l7H#>,ܳv=Ls=,ܳv=Ls=,ܳv4M>҈4M>҈4 yHÇ6pyQ!K Çe{a8CNSÇ9Mr >48|i*pTp%‡|C>xV!_D8dC>W2!+M ! |J>%Ԉ>5dCQ2!T*ܓ4dC>T2ahzX'i&=M$zHdCO&=M$zH>4 |i"D2𡧉dCOῇ~{e =C BayC2P!) =ŋC_C_=,4 42COῇ&=DCOῇ۸=Mz=4wDOT{顿OTD!%DuCl!CZ~|;bwjCCl!(6,i؎b42R9_4Ks|),ibQlaf(6Tl!CNS.!6M<CNSb9GE1W}l|lWD8c=c}L%±ll0 Spl:1_c?_c Kc Kc KccUUiUi:1oP1oPwHcޠƼAy7Uiu_4 h=ј77J1y4y4q,q,hǸA4 D8 D8 D8Y"Y" q,q,SiT(xL%1XۏugĢF, ':3bQ𘊂c(8, '9*{E|ј777XޫX{cޠ?E1nE1nE1nE1nE1nE1nE1nE1nEc* q, q, y彊E1oPE1oPEc* y* y* SQpTQpTQpl (xLE6(8*vYvYq, SQpeQpeQ𘊂c.c.c`dQp,, y* y>E1oPE.`p?vQ]~$ hq~ 좺@Xp?܏;aLEuY~)U gJ. *Nbp?S,ݦ.`LS]` i ,T0X.`LS]`*i z=Mv&AO]~)i xQ.`d Bp?] ~/ų 3_Ћg𢺀A/].`p?v-].`d𢺀AO].Eue b4 i ܏4 i xQ]`2Mu<.`LSɐ`! i*,T2$X.`LS] 4 z=Mv&A>Aw0i xQ]  4 z=Mv&2MueJ4 i ,T0X.Eue4 inT0XdHLSɐ4 i ,l i 4 zy].`d0i 4 AO].`d0i xQ]`f.`LS] +_B + WH ū_\ }2ˀ_ e/i24𻨀_d/i24 ~AO`~2v7΀_LS 4m i*vQ`m2Meۂen`fg/XlA<_ 𻨀_' ,~Ap/%U/ey. O8%ŀ_LS`~? _t/i:4 r9MK{A>ui {yA>v//G]T( Y<s/zy2M|^^LS]`zy2M&{yAO^E&{yAO^^d//i4 z=M|v]8w.Xkծ Bb.'nLj$ ]ݮ Bb. `fv]LS`fv]d.i]wQv]d.i]䟅ݮ zl=M.-v]'uAl]TF(K/AuAa_[#vAa_[~m Va ~Œlq._o ha%]L[ in,LSa`vKa`f8lw\ ivANa ]t.vA' :ư]t.i:l4=a2L)z7DcOcblI?cލ!'Ncᘢwc__ co1nѻcލqލqލ폘6jTHhTHƼAƼA=ywu4 i]61oPѻ1oP!c_:[1oP!1oP!1nѻ1nѻ71ƸAFƸAFƸAFc wc wc31nѻ1nѻc q Sd?c90l7ƟvcI1;ݘ wvc ca1.`nl?w3 *l7 *l7 #Ƽ~İݘ7ݏ1l7 2l7 2l7 2l7 2l7 2l7 2l7 2l7 2lwLa1na1na1o1l7 *l7 *lwLa1oPa1oPac ۍy ۍy ۍqc y֤ߏ 17 #cdc dF.ȓy}}2#4 z]TF.i2#4 zy8Ɍ\LS`ffF.XEee4\LS`a2v͌\Lp3#4{ ){o{ {oA>w-ȇ#[{ ؉{oAw^-ǪΫXy :+Ղ|ZOWZOW jA>w^-ǪΫy`;2MՂeʫ=MՂ&jAOyʫ=MՂ&jAOyɼZd^-i24I #iAב/F҂I Q#iA>p$"iA>u$-ȇ_G҂I r`$-X~S3,T$-X~S34I z]T$-i24I zGOG҂&#iAOd$- LHZ'#iA_} 2wI /h}  ςZrA-?] ςZ/< jA,K. jAcA-ȿvT@\P i j2MԂeriRY .4K% ZLA\P r.9MԂ jA׺XP łZ.Ԃ jAN EBͱw ZpA>w .Gnh- C&e* 4Ղ i4ق zl=M.4ق zl=M&[pAO-h+Ѷ :mh[Gm ]T-ȇg~Gۂ[mAѶwh[LJe-Xh[LJe-i24mh[d-i24m ψ=MF.mߧ*8A}2Gۂm d-KѶ \h[r-KE*,B݂KnATXt ¢[?݂KnK;E1/ .n2q-X[LDΗ29_4KME`f v$E\t inANE [t-nAXt Ţ[Э3݂nANE*{m 4_c3:~SAnl1U5SAn̯ r?Ƃ_35c:[nl&4ōᎩ 7}l6vY;7Ȃ7ȂnǼA~ƼA~.pTg71oPwcޠƼA~ƼAyu7 7 sL{cޠJ?cޠJ?c rc rc1nc*q,ȍq,ȍq,ȍǸAƸAc rc rTYɂ_Oz 7)ɂ1z1k,‚i.cYU rc`y_aAnTAXYYYY?H%[tJQMELI :_ ܅NcӔ7Ȃ\dA. r1n7Ȃ\\‚\TA. r1oP7c*żAbޠ r r?_'s1{<M1^+,W s1^+,W s<\ s<y}+s-,ż徕幘7\\[Y7+g劜tŹKwP]|VMKWtAuփY|AvYcgq>Wq3Gե+z=MvKW4٥+z=Mvq.]qtե+.TLS]2Mu4ե+.TAv銞&tEO].]d8+zTiK rHez+zC #xFq>jX;\b@ 1 W6Ā\b@85,ɀ\d@re*'i* W\\q>9T\\qr+.T@LS"y8 W\6igqP2M,4ճ(.T@LS"; W4+z =M䊞&rE>|w@i2 wP"; W4+z =M䊞&re igQ\\qre i* wP2M4+.TϢLS2M<&\qre in1 W\\d@i2 W䊞&rEOɀ\d@3L=M䊞&rEOɀA4?;63W\~eˏ63W\2e*W\2eU \W3pE_eyV2fʘ+zTi2W4+zy \q2pem33peTLS2M*W\Xqcem33pem3{mk+ؖ{mEpȣQ[y(Ž"Vڊk+ {mm{mENӽ"^[tiV4k{I"9vP"9VSi'ي|l$[Od+򩴓ld+򩴓lE>vȧNDI"8V\~PqleJ=M&ي&lEOIJ=M&ي&lEOI$[d;ȏN+z`l=0ӊ|.vZpvP" V'n1iE20ӊ|vZqiE~vZd;iV4N;vZd;iV4N+7ӊ&hE~V<ڃ̣0VΣ}1VUZ\>Z\>ZTGqŸAb h1n}7>Zd- G{L}7>Zd- .}`- G{L}7>ZT1bޠh1oP}o祐cϓ8 lP?i1 lPi[uvZi[uvcjxuvcjżAubrOvc=-i1ope;- {Z~ Zq>W]]+յpEյ\]+V$]]+·\j0fq>Y>*VV+z} TX1VV+z} rEa|N8>Zqje i*V\ZqX2V4V+z =MՊ&j\dXXEOaV+-| ZbX8,g7!Ţ7İAՊjEoa|H!Պjboo =MՊ&jV+.TQLSa2MՊ2MՊ4V;Zqje ya2M"4iՊ4U(.T!LSa2MՊ|ZdXi2V4V+z aɰAՊ|ZdXi2V4V+z i*V\BDqje i*V\AՊ4V+.TXLS2MՊ4V+je i*V\İZqjEOaɰZFV+z =MՊ&jEOa"19V4V+z =MՊ&jV+. bXv̰ZqQV+.?v̰ZqV;ZqV+.WjE_e }1VaAՊV+*cXi2vPaɰZdXi2V$Պ&je in i*vPa2MՊ4U;Zq`e*in =MV.ӊ<vZGN+vZGN+PiEKq;ȃ)n}V!/ӊ[vgΣ\y"8V\DPqheʣ=Mъ&hEOyʣ=Mъ&hEOy<ڃ,y hEo7Z\@+hV<Њ("?s V hE>%qLS"?siV4Y@+z,TiV4Y@+z,! h9+sGd[FΊđ/$FΊ9+Bbȏ9+sGΊ1+ŽYfcVY[aǬ ;fEV1+,w. ẘ~{cǬ7 v. ẘ4\;fei.͊4٦[Yq撮ppǬLsWcV41+r9MẘnJcVtUR]bǬV;fE׺1;87-~?.ct6 es29pq?Ls]x.z]4?ivD"̫vD8.g=M.z]4?ivq?Ls9\\9..Ӝgi4Yq]\9..Ӝgi!4es"ϐvq?Ls]\9.. B좧EOU"ȪEV[d@V-| E>U"ѩEvB"Ls\\9..Ӝ[d=M.zh]4"+-&Zd=M.zh]4ȏה 7v3An")7v*7VscLWxMLE!7v%Ls΍]kʍ]4i"7vDn8.zȍ]4i"7vX)7V-e\E(h]j]UE_eh]UE~E~E_eh]o-I-I-Evѿ.E~YqIEvoh]Zdť)eKVB-4eK[B-4YqK3 .3 3 aըyL1oP7FMd, aŸA6S& a0ql1n 7ȆYl~0ql=FMd,ׅ _6bc_6S,EMM6bɐǖ>aɆYl>ɆY&f1opf1opf1oP c, a0qlŸA6b f1n 7ȆYd15b f1n 7 7YT15bޠf1oP 0yjżA5bs_15b}b~E1vb|E5c-׌]/碑u.Z-h1^T-hu.Zd1ubޠzR1opf]7CEy=4h??|ޯ8+y\T;Zq>W]T+E*~E^.Zq>wPE"/Պs+E˵hZq>FXM;hZc41vPѴhZc41V>FӊAhZoi*V\hZqieh=hZd4i2V4M+zEOѴh=MFӊiٌ3pJq>hY/Ѵf4_6ihZq>\ Gӊ~b48g.zi*V\Z@qiesSei*vPѴ2MEӊ4M+ԉiei* q6MEӊ4M+ٿiM+.T4=MFӊ&iEOѴhZGM+zT4=MFӊ&iEOѴhZqieJCi*V\hZqieT4LSѴ2MEӊ4(.T4LSѴ"8V\hZqiein1V4M+zyѴhZd4i2V4M+DiEOѴhZd4i2vPѴ2 vъ˿_/V\uV\uъ˵.Z hE_+y]vъVE+zTiV4E+zy].Zqhe/heTLS]2MuV\^Xqzae/eEoQvP/6ʊ<FY'9(+,eEpˁ"ObQV7~Ѝ"cQV4(+rn9M7di:=E>*riȧ."b4VSL4VSLƊ|XO1]+KcE>=piLS2MƊ4U+z,=MƊ&KcU+z,=MƊ&_EX*WW|\*KWUWկ"?rwV~7aO]*KW|կWdiuPկWdiU1{[Peث[ a/$*Bbث*/td"?qȏF*g_xhKE*=KK2%vWq]eKS2M.]4*.TWpLs+U4*rnw]A^\E}>Wq>W*LJ^\E1U\>Wq\esi* \EO}>WdiUϢ=M*tPْ=O=+[R˨p>ZlmSMq>Z!;YѣfQCV4!+B&;dEO=MvȊ&;d!+.4!+.T8+.TLSiCV\Y'h!+.TLS)iަY!;Yq:dEfpiCV4!+z=MvȊ<Yd:dEmpiCV4!+z=MvȊ4!+.TLS2MuȊ4!+.T:deiCV\Cq:dey2MuȊ4!+.TLS"C ;dEO"CV4!+z=MvȊ&;dEsiCV4!+z=MvCV\YqR m7kEkErCVY ;dEtkvȊ&;d!+z=MvȊ&;dE siCV\YqvYq:d!+.TLSiCV\&Xdjo;Xыgj#hNyĩ"85VƊ^$Xl~rqL=$Xd, 2 <Ҙ7$XT1ݑƼAedbޠFcޠEcޠDcޠCcޠ221oP1oPw1oP1oPI7cyļAedb `1nI4 2 221nI7$Xd,6?5qLŸA&b7$Xd1ed~I&b#&S,I#&b)ǖr&bb:b,6=:b, .LżA%~I7$Xd, 2 qLŸA&b `1nIǔqLŸA&b^$XT, * `1oPI7$cJżA%bޠ`9`1oPI7(1o/\OlP97ZTǘCq̡ŸAb shCq̡=Zd- 2Cq̡ŸAS- *Cy$̡żobޠP1op\Χ*VOӃE^AԊ"/ ԊsHBj`ׇCj9VTHȋ!✲*.THr[4J;ϩLq9l? <Ȳ[ї nE_,} V*} V%_% B0E_=]o]q>|P/$v芾ء+Bb8T.Bb }!CWAu4`(.TLS|(LS2MuCW\]q:tErLS2M8CW\{mJqini*Q\~CW4١+z=Mvv٭ȇ.Le"?8w٭LS2M݊4Uv+z,=M݊&nUv+z"VW֊̕"?se\Y+de듕"?ue\Y+W֊&+kEOAU֊&+kEOZV\m)ߖmYY+oAW֊XY+Bbe "?peȿR+eHȿR+lԊ~mR+eH wCjlCjE_ }3vpW8V\+R+.THLsX8V\%dcmK2ͥfZqpHi:V4R+:1ŐZё)ԊL1Vth!Sa  cH\Cj?K76cޠn=cޠ;1oP7ȐZdH-6?tq =NdH- 2R͏c Cj1n!1 2R{Lc-_ ) Zhc{2h1^ 1^ x=0x=0 Z\ޓD1b h1nA7 Zd- 2Dq ŸAS- 2Dy{2h1oPA7 c żAbޠh)Dy c żAbޠh1oPA7 ZlļAS- *D1b h1nA7 ZlĸAb h)Dq ŸAb h1nADy >żAbޠh1oPA7ST) :zVT8+\g9zvPѳ|;zVg=+늼=+ѳ|gE^&Uqgѳ"Zq>6vPE"ZchEE"Zc8h,z},EE"Zq>q\\"Aъ4UD+.TAъ&hEOE"Zd8-z,EP.z%EP.ʥ烒2V\Z'siE_̥E_̥}}2VO}}2V4"sX̥]E2VOT. /$ҊK+'K+Bb.riE_H̥}!1vP2Mu4K+.T.8=,.T.LSʥi*V\\ZK+.T.LS9ʥi*'Q\GdK;~0V\Dqv\Zd.i2V4K+ČsiEOʥylƹ\Zd.i2V4K+.T.LS92MҊ4K+.T.LSʥi*V\\Zqrieʥi*V.Ҋ4K+.T.LS2MҊ<\ڃ̥=MҊ<\Zd.i2V4K+z̥yй\Zd.i2V4K;\ZqghExV\"ZqYhE/E"Zm]D+z,xъ^5 .5 5 XuuwuoUy*<Ҙ7bMT& "Yb +bXYqŸAVbS7ȊXdE,6?qŸAVSX؏߅Tz`E,4=VbVbXcXz`E, .ɬ"YqŸAVb +b1n7ȊXdE, "*b1n7ȊX\ޓYyżAUSE, "U{L7XTE,6sSE, "UyżAUbs& ^6XTE, "cŸAVb +b1nqŸAVSE, "YqŸAVb +b"UPyżAUbޠ*b1oP夘7nR|Ba)_yD^׌cŹ?vP|" ;Xq>,ZXqâ+rLs;\G=MVĊ+bEo!VĊ+bEo| "V!VĊ+bqooTELS2MUdEi"V4Y+z=MVĊ)ݢK Q\^!%Q\^!%Ɗ+Xq>~ cE_L}21VXq>\X/cEOXdbi['+Gʊ9'+'*NV8Yї dE_E_} 2NvPq/AɊ';8Yq_\8YqdErLSq2MɊ4'+.T3_i*NV\8AɊ4Uv(.7n\'+.T١Lsqai2NV4'+zyq8AɊ<8Ydi2NV4'+zi*NV\Cqdei*NV\8AɊ4'+.TLSq2MɊ4'+Șdei*NV\8YqdEsAɊ&dE si2NV4'+z=MɊ<8Ydi2NV4'+zTLs3FJ*1V\vXѻeb21VI\'Ɗ-cEwA%Ɗ-cEOX+zLi*1V\݈21V\A%Ɗ4+.TbceJ=X];X]+ċ[`E+[`E{q {]"EuP]"EUHwUHw|.W3*c~w,]"U6w4*zr= Ae"?Op2XE~ V'8Um`=jf53XE~ V'8Um`=Mf&3XEO`=Mf&3XEO`[eUwAf+"o՝*Bb /$f+Y* ~mY:UtUkUѯ-KWEUtupitUeUї=KWKWeKD2M4KWe*]\j?itU\%)UqRpitUt凥;?,]]aKWE~X*U5'9t9Rk 6m_.צ5sd颌צxm8^צK#6G^.׼971G!7k"X97kkӻkӛtl`6}7k "X97k "XM׼978Gbk\yob_[׼5op}}+yk|78߶ ׼9X_k\yD5nצ_q`k "Xq` "5n׸AD^bq` "+z~Ĭ^zFqnlyoE51צ'q׈Y]#f51׸kĬ^ysbVqY "f7k bVqY "f7k bV9f7k bVy{+bVys5opYkzcV9f78Ǭ^kS%6Ǭ^kzcVys)7{zcVysbVqY "f7kS5n1׸AĬbs5n1׸AĬ^zD5n1zsS")T#OO|E~̯E~­E~E>[V"@)tOy ttTkLIjL7jLjL>Ԙ.z}1]11]߼1]1]#jL=MԘ.z1EO5&jL=MԘso;ט.vR5 jLjLy;E_H1]E_H1]_: .]ݸK!TKy7E|.]\^+ .]KKy7EEe\?\*4o+tq\*.\Wp2 .l\c 4+tE.)EWe\ K]Ap{C.]tqn1.36xL?lcv}?6_)/|u#6_xL|aļ bބK?R`p)M0K1n6ߥcc .揙c K1n\q .ŸAbǘn6 *\zL71oP7ؘ7[ؘ7ؘ7ט7(HԭkԍkmkTp) * Xcޠ 1oPQ7Rdp)6b K) \q .ŸAbÜ7Rdp)6b K1n1bSp)]3\+[[\q .1wRl}fp)]3`\1b K1n7Rdp) 2\q .ŸASp) 2\y{+K1oP7c .żAbޠK)\y .c .żAbޠK1oP7Rl#ļASp) *\1b K1n7Rl.uĸAb K)\q .ŸAb K1n\yJļAbޠK1oP7TLT(& >e$1I?kLUc*͊y"/(טsjL(dWkLŹtP5|K5\c*G!T\Sqrtp\c*.TA֘ kLUc*z&1=֘ kLb3a8-z&1=֘٢Xc:S`1i kLEO5SditP .sRB .sRp88.epAx\*ep苗|苗Rdpi2T4\*z .=M4L5_!֘T+SѯkLEB1CE_1}tP5/^֘xYc:S/kLe1iTY/ט4Uc*.TLS52M՘<SqjLe1TLS_/.ԗ4[֘4%2햇5SdiTQט&kLUc*L{\y "?{s¨ȧ^N "?8w¨ȏ0zi"?vrڧ?9O;9SPNyOѿ}Di"wڧ?).i"?wڧi2S49Odڧi2S4*S\m)i)i"j)Bbڧ i/$}d>{~X9zO} S$yzO}\zO/=E_\zOqg{4U).\zOq=AS\|w?wu4/DSt%(aH =EgJX):^zO{ذ 1Ǐflc|Q>zOl٘7zOT1ƼA{bޠnbcޠnacޠn`cޠn_cޠ 1oP1oP71oP1oP7ccy*LļA&b =1n7zc*LĸA{b =1n7zOd'6? YyLc'}>Yߺ~lyd'}u>Yqo]ǸOVxb'+<[1c' YqĸAVxb +<1n7 Od1Uxb +<1n7GUy< OT' *<1oP7 OlO< OT' Uy1oPTyjy+<1n7 Od'6b +<1nTqĸAVxb +<1n7 cļABbޠ*<1oP7 OT"$ *cOf>3UO Oq>wW Aex"(gx󹻃E3<9sP|k)*S\ Oqr9spʔ).TisPNK`i%Svs%SX)αvSK;EI K;EONdii2sP1|2fyqy9or}2Sɘ΃}}2SɘN'c:o'c:EO1ɘNdLi2S4)zi*S>N/K;U)ci藏_>v˧N\)fi+lvSNqJ;e*y.˥2Mv4U).TiLS"OhS\NqJ;U).TiLS_..X).W4!v&K;EONf\)z,Tiȳ3.=Mv&K;EONqJ;ejtqJ;e*iS\Av4U).TiLS2Mv4U)K;e*iS\NqJ;EriAv&K;EriiS4Y)z,=Mv<}NdiiS4Y)z,TiLs3"S\g[v;NciSWv^K;EAv^K;E1S4Y)K;EO2Mv4Iv4U9NqJ;e*3y":sP"8S3Wgx<|A>isGbH)m*lS<6E:lSg6E)M8lSM6E~6M@:lS49Md2).}90OSVMq>vP͛|ȭܼ͛9Mq>;WMqn篫TLS͛2szyS\Mqi2sPAg N3a0S=qsՠ0SL)g Aq~cw0qdi2S4)z =_vxqyT)%+oWHvxqy,/k9E_u'm4E8Aiq"vp4iq"? t?k)oiAsϩ)@i&49oTbQ31S)zLySLubQ31S9a_[&f.pym7)fYeT֦ț%gm~b֦xgmYr֦%fm.u֦wfm^<6:kS\|Y2Mem4:kS\;i._vE4Y cm`gmns0kStY]`֦F6EW])¬Mefm~rb&6_/cc/ѱ|LYt#/| '6_7yLY|ļ?dbދ6?ƬM{a&ƽ0k61Yc 67ȬMd1emb 61nYcӍyļAemMnT& 7 6 6 6 *u[uSuKy2<٘7,CT! 2k_q<,Cd& 2k_qĸAfmbWc 61nYǔeqL*ǸOkbW[#qL*ǸOkb'51tMlrdǘqLĸAkb 51n7tMd& 2]yL7tMd& .LļAkbޠ5)]yJU\t̓L}2]StM/5Tqdi2]S4)zL=Mk&5EO2Mk2ldW|W隢_!kis38TU\)isȏ])>iu"Y҉ߜq,ĸOtb7c'K:?ƒNdI' Y҉q,ĸAtb K:1n%7Ȓc*ĸAtb K:1opydI' UyL%7NTI1tbޠJ:1oP%|1tbޠJ:1oP%7NTI'6y*;WAt݋ٹ"7ttJ8+sJ8t*S+E^yNi*sP)2Mt4+)Ji*S\NqR:EO|IJ虰SL)G, ;;ŹPL)z&B3ag:;e&΃=Mvv&;;U)GIϩZNq>Z\~N}uWNjY)ΧYy/ArY)d-8r.z=Mr&k9EOZNd-iS\ZNq>\\ZNqiS S+ZN\)b-苗/^rSZN/k9HAr c_km^n>%}}c_nM[bw6%f>Ǹ111lL̼6%f>Ɲ1npL̼6%f>GǸ11ڔ78&f> kǼ)11opJ̼63ymǼctK1op'|n? N7[ySbcOxm 78> Nqcbc_]78&f^ Ǹ111npL|ﯮ3Ǹ111npL̼6> Nژ9&f>W_[#Ǹ11󱿿R1sL|3+>Ǹ11󱿿R1sLǸ111npL|3Ǹ111npL|3kSbc78&f> .cbc78%f^3Ǽ)1ڔ78%f> N})11opJ|3Ǽ)1󱿏|3MySbcqcbc78&f>qqcbcymJ|3Ǹ111npL|3MySc78%f> NyScBsb 3Ή/ǜ3/z3_Sb >6}l┘¿O4~!914)14.\91sqJ|2)14.Ӝ3_i/=b__¿S__BPBP)/{ |߇P̋c( >4bC1P/\~)眾59b.NߚB_c( >P̋c( } /%8bЗ¿BOs |9bC1_i/4Pzc( iN/T.ӜB1_Ls \54LsL|tЯ혘Bcb yjN|_11~m33_~L|߇/N/914.Ӝ3_LsJ|2)1<5'fp攘eSb┘eSb iN/\,/\9} in7Kcb =11昘BOsL|!̉/4)1<<3'f3_i/4zcb iN/\9} in7Kcbv4&fp攘eSb iN/\9%fp攘eSb iN/ 914.Ӝ3_LsJ|2)1<5'f^3_i/䁯91昘BOsL|9&f3_sb =11昘BOsL|9&f.N/\4&fpДeCSb 11ИBb3_ /zCcb┘BohL|D뜘B3_i/\9%fpvS8&fp攘8%f뗟>3_ȧs v\3_ȧs o|!/䱝?|Z;gOkܟBߙ3Y9bs( y4b.N/}{es ԂB~5`-/܂B޸-/9`.=7^؟8g s ?B/\|z|!|%A4i?Bc y?4ip9/qWo84_Kp|/1NU4_[9N8.\;i.Nq/\|w|2)NsqFe˗r8ks i.̝4_Nƿ/>p)_s ?\4_^B382iu1N0N9N 1ǏflcE|Qk1ݓ1ݑK.6_q[lӽXlbޙ41Lqc&Ɲ1NyLqw8Ml~q<8Ml0 2NyLq78Md&6dcޠ41oPqtsy1y-y)y%y /ļAǼA ǼA ǼAibޠ .8 *^qĸAibc 4)qĸAibc 41nq78Md1bޠ41nǖAhb 4 1n7Mlqd& 2@p1hb 41n7Md& 2@q ĸAhS& 2@y 41opyd& *@y <MT& *@O<MT& *@y 31oPy ļAh~7Md& 2@ϳĸAhb 4)@q ĸAhb 41ny`ļAhbޠ41oP7Etȟ"Fh"Lh*@Su^ύy9@SύT8+s8;Mq>XMq4).TLs9Mq4e i*@S\2M6f1SO-dc8W}1S\(A5f}1S[ 31S[ 3U)·˿J1hq՗򋾐X)Bb)8-Bb)AbX)Bb) RLd)iS4Y)z,=Mb&K1e*e*isP_~..T)iSkRLѯ-K1E}r)ז_[bkRAbY)g)8[?RL\)g)LS2Mb4U)K1e*isP2Mb4U).nX).ԗ4%b&K1EORLX\)z,T)ȓ,.=Mb&K1EORLqJ1eRpqvRf2f*iS\RLqJ1e*iSq+b4U).T)LS2Mb{{c's/1|7}2k̽ĸO^b's/1n7Kd% 2{q̽S% 2{y=7#{yʽļA^bޠr/ǔ{yʽļA^bǔ{yʽļA^bޠr/1oP ATr/1oP7˯1{q̽ĸA^b,1n71^b s/1n7Kd% 2rL7DT% *{yMĸA^:RѹXM^.*RƊޜ{)ί]T86VEKq~mr/"<^4{Kqr/eE^4{).TLS2M^2rQ(d8<{)d8<{Kџ's/Ej{9EE[ 忩hKq~ 81 |ї-E_#E_dˁі/F[m)G䋞&-EOіhKdi2R4m)zi*Rm-.TLSі:\\hKqi2rQіr-Emm)khK_[F[.*RehKї=--E_i*R\hKq-EhKq-e\TLSі2ME[4%F[4uLs{Xbi2R4m)z-EOі-EOіhKdi2R\hKqNinK\m).mhKq-ei*R\hKq-EPhKq-ei*R\hKV9rіhKW9R4m)z=MF[&-EhKdi2R4m)z\TLs6іE[^-E/і"t%0Rm)z \T%0RMі"_um)zi*R\=1R\h/y:R8m)Wv-Eі"hKs_9R.+NG[-EJі2ME[.*R4m)G[.*R䏀m)G[|bt'FG[;Rَ\PKν(Kcr/E> 8R{).[;R\ַvG^Vڹ2尰s/{)+K_!^|Xrs/+fu苗/^^x{)9޹"{)eLs9E^4νi*rq9KqrչAW^4ν]Y?ܞ80><՜?Yμ:Rt̽];a s/EO{|u@l?6c1u76_<|ӳ\lb/|혞bΔ{ygz5^bs/1cʽĸ3^b? 2rL7Kdr/1n7Kly1=!ǼA^bޠr/p{yz0yz,yz(yz$yJļA=ǼA= ǼA= ǼA^bޠR2yJĸA^b s/ydR1n7Kd%6#q̽ĸA^b17KdR1oP7Kl>kL#1nI|7 2tGxc .1nI|7 2kLĸA&]b .1nI7ȤKd% 2tqLS% 2ty}I7tyJļA%]bޠ.ǔtyJļA%]bǔtyJļA%]bޠ.1oPIAT嘒.1oPI7˯1tqLĸA&]b.1nI7Ȥ1%]b .1nI7ȤKd% 2rLI7DT% *ty JĸA']:RIRYM'].*Rʊޜt)/]Tҥ8TVEKq~좒."<']4tKq.eE%]4t).TҥLSI2M%]2R0Kq>_0Kџ ,џ ,f)Sa*|*uܰ(/EjW)z̫Ey̫=jU5*Ey|i2R4W)z̫=MU&*EOyɼJq*]2MU4Weʫi*rQ'&,f)Saאf)SaOa?Y.*R\>H,0Kq~,E0K7 Y4f).TLSa"]rLSa2MY.*R\0Kq,ec,e:[\=f1R4f)z =MY|a0EY|a0Kdi2R4f).TLSw4,Y.nY inY \f).TLSa i*R\0K>9R\0Kq,e i*RKPdi2RP=MY&,EOa0K9R4f)z =MY&,f).ܾ b^ϓy?OU|y?OU9Rs*EތW)eU.wG~0K0K[Y.*RC,Ea0,ey^Y|pLs90K d{/+^)I핢l/r핢OW>^)d{ȇ|W|w{2堩+^).\R\+e˙SW..N^).\Rtg9{zpg9}zp g9zp 핢sl`{+EG^):?7 l36b/spI+26_(/|516_R<|!ƼAuYbޠ~]7.Kd嘺,1n]7.Kd%6H2 e9.Kd% sT% rL1oP]77G777AԣrԃrcrT% GpLO1oP=7Ad% ςǸAvYGeqĸAvYbY7.Kd%6qĸAvYGeys Ƹ3Wb+m;c{%ƝƸ3W~wJ;c{%Ɲ{ǸAWb +^qlĸAW^qlļ^J\ulļAWbޠ+1oPcjļAWbޠ+=cjļAWbޠ+1oP하7Jl~ rL하7JT{^qlĸAWb +M7Jd{+1n7Jd{% ^9JT" ^yjļAbܠ+E ^)Jq~ȿ+^)Χ΋gE^on7.R8+s{.sWgE^ynirQ2MW4+핋jiR\Jq+ej=MUq̫g^8/ ϼEUŹP߬+Χ/R-Ιf]q>u^ ✩(JѻeA *Ew˂JѻeAiR4YPJdAiR4YP)z,=MT4UP)/}iR\E-.TALS:[\gdAO?T.R‚Jџ *UP)SaA|**/RE.}`A *e*iR[F.iR\ET4UP).TALs{bALS21ɂJdAiR.=MT.R.=MT& *EOɂJq *e:[\=frq{bALs{bAłJqłJq *e*iRKJ.iR\Jq *e* *YP)z,Β *EOɂJdAiRt.=MT& *EOɂET4oXP)dAϓ"߉tAϓ?OT:RC*EީW)[iU.g?a" rgMY|6qgY^<,f)a0KqYra@,e˙^Y4f)a E`,E>9RaEX:R0K',E_ ,E~0K',eISY.*R\6uLSaˉSY4C\\:R\Ϧ 26?}6]MWgt-~6]!B,y5Y>fD%6Y>fD3naϦt~ "70Kl| "70g!78Y>0Kl~p,yCg78?0 Ώ˟ys3op~T?ϼ978 bg+cq| "tP3na+D3naϸAY> "70gAϸAY>fͱϼ978Y>0g+0g,qg|6;C3 aϦqg\C3 aϸ3Y>fl: w0g ,970g ,970g ,y˽aϼ^0gf\u| aϼ978Y>^oaϼ978Y>0gflz3op0gf,fD3naϸAY>^D3nafD3naϸAY>fD%6Y>\9W[.Ӝ-8es2͹R?\9W[.La#R,G塿0Cfy/<,ċs!-R <=a' pfyȷfyLs<\9Ys29pfyLs{B29p$0COa&,=MYYz0C0COa&,=MYz<\9Y.Ӝ>\=I!RܞfyLs{B=I!p$0es29pfyȗfyLs<\9Y.Ӝ,ia|]Ia ,=MY%Yz<4fyi"D!^R塧0COa&,=MYs塧CQPyȗUPy/< *GA塿( ?TCɨCcCPPy{"):y_g(uV8N7mJf)uTNSN;:Y͑+p4GRO|P$!?IykIyERZA$r,'kIy\+ZER.\*pIy'cERYJq<3:(D\NrЗ :(} З :(:(:(} З :(i4Ý<\9wPOuP.\xR\NyprS.tP:C~' ?[g9ypgˀ!tP:vP~ȏj 7 *_;\lK:6bW혾h1TP9Jlc|LyBqlcU/cz‹͗pTA% fA% YP97ȂJlqgdA% YPq,SA% YPͧi; UP9G7JvvԣvԃvTA% !; ; ; U8g7@TU YP'c *Tq,ĸATb *DwdA% OtǸATb *Ty*ļATbic*ļAUb +)1n|*6-XIͧbc+)7+)1nJJ[b%%6̎qK1XIqĸAVRYIqļ~JJ\gļAURbr?c%% UIy7#UIyļAURbޠ*)}7J1URbޠ*)1oP7JJd%% ͉qĸAVRYIqĸAVRb +)1ncļAbޠ*)1oP7JJT! R_+JJ_WR rE~ \IJJq>Y^_+bt%8 wQ\W+)䊼,]I).T%*)ei.+)UI).T%LS2MUR4UI).T%zEO|8Vc_ײRъq/$Cs h(nd= h(ίJRſ zHd=iR4Y)z=MC&!eW%4U).T=μiR\zEy-.T=LSAB.*Rז \T -!Em)._[B|uƁ/{B)g ׸)%B4).T !e i*R\=,1R\@Hq@Hd i2R4)B&!)B&!EO@Hd LS2MB4%B.nK inK \).).T LS2MB|ǁ2MB4).T LS"q !EO"_q i2R4)z =MB|9ǁ@Hd i2R4@Hdȷ(G_>f>1qQS+"_sȗ(كKE%"AEQKE5Y(H<|T?y<!@EBST B|pq K @Hї !\:R\u Ls9>@Hq!E>S;rudrhб/:(r`x:|wˁ/:(;t:4u\\:Q\'trб2LcEg3u}ʟ,Kr?l :nN0Qtu[wbu,' F|G_E?ylAl1}cW;6)&pLOk|sZlcޠ21oPhGd# 2qL7Gd# 2q|S# 2A= *87GԃzczCz#zT# < < < *u,cޠǼAq|ĸAf>b7DZAf>b7Gd#6Iq|ĸAf>b7Gd㘎ǼAe>bޠ281e>bޠǼA54(o<iQ\E]-.T磸LS:Z\Gq:e܋lg&;E1G_[v>ڲQז|;E_|}ٳQeGqQ[4|iQ\Eu>4(.T磸Ls{Xb磸LS2aGdiQ |=Mv>.Q |=Mv>&;EOGq:e|inK|\(.=,Q\=,Q\Gq:e|;e|iQ\Gq:EAv>&;EGdiQ4(z|v;EOGdiqQZG_:"Qu/jE_u֜kEF?wsȟQoRܸF?ws_Qo(6nnk77Er͍"mFqejniqQ͍2M57 Gr\TȟD8Q3E>8Q䃋E>8Q{rL G.A >iW~834]N=4EOf\Jq*E>SR\Jq*ejiR\E5T4P).\亡rq9Jqr0 2l*ej*P\K(E%GJѣf \B){$P5K(E%GJ?p QR\t Ls{e줸o{e줸o9[IѥNn0vRtgy=wE7~S'~c'E7;)ŭI?j{L7@Tr I9LJ{a&%6q/̤3)1LJ{a&%6͎q/̤ĸfR)=|4; 2Iq̤ļLJ\Y̤ļAeRbrb&% *IyʤY)IyʤļAeRbޠ2)7L1eRbޠ2)1oP_c&% 2Iq̤b 3)1ncʤĸAfRb 3)1n7LJd&2)1oP=7LJT&% *aqΤ;RRc_gR.*ROI)/5^T&8XLJq~2)"/KgR4ILJq2)eLEeR4I).T&LS2MeR4IHq2)EO \(ί'*ŹP_++'֋P)5/A6Ts8{ /A6Ts8XCP)zl=M6T.R4P)zl=M6T&*EO 2M5TejirQǂ4P).TC墎iR\Jq~*e:\4P)kˆJ_[6TڲRז "_qC|mP)gC˞ "_rCWlP)gCLS jiR\JqĆJq*e*EO ɆJdCP)zl\TCȗP)zl=M6T&*EO 2M5T4P).P=,R\=,rq{XbCLs{XbCLS 2a 2M5T.R\Jq*ejiR:ndCiR ;n=M6T&*EO ɆJR4P)zl=M6T&*P9Ik;)-c';)Bb "}sȷ;)'܎nN}c';)'܎Oű"/I|q_8vR/_;)WN4;).T줸LS=c'E;)I?2pȇ_N|p' N|p;)ױ"0;) ñ"vi2vrQ"vi2vR\Iqb'eIqb'ei*vR\Iqb';).T줸Ls9Ylc'eZN4i*vRedigRn3)z[L6=" rϤݲgRn3)zL-{&NySEgʣLy}"gy:LYN?|,gkC0Qt)Ly\ Ly 1E@b?czPͷ|׉7|ωͷczD77Hɯ1qS$ 2qxa$ 2qĸAF@bAcļAE@bޠ" yzyzyzyzyļA=ƼA=ƼA= ǼAE@bޠS$ @}ԁ7Hd$6fqӁ7Hd$ 23ǸAF@b # 0sd$ 2rLcޠ" 1oP|" 1oPcޠǼAE@boH{aq/ĸF@b # q{a$ƽ0rLH=7Hd$ .,F@brb$ *{# 1oP7ܳyS$ *yļAE@b1oPcļAE@bޠ" Hd$ 2ŸAF@b # qĸAF@b # 1n71E@bޠj1oP7HT$ 2)/w/# e"\T8./y1:R_Hq~ٯ+EE@~E^i*rQ2ME@4i*R\Hq" ei*rQq2ME@4]BHq>^*/n_BY.Vq>]BHq>^BHq07XRBHq0s !EOBEB& !EOBHd!iR\BHq~MLS2MB.kq !e*\ԡ2MB4U)o^T!LS^4ui-R¶Hџ "E*l-R:n-R\>Ea-Rcn"E0).T["eji-R\=f-R\HqŶHd[i-R4)u E&") E&"EOmɶHd[LSm2ME4,E.nYlinYl\).).T[Ls{b[LSmji-R\Hq"ej ")zl"EOmɶHd[i-RAn=ME&"EOmɶEE~T,]/Ep"E/E>"%Ewp;Qܿ(_iQ\Eqe_\TiQ䷵_E/"ܿ(_A/W"tܿ("tݿ(_A/&տ(EqfiQ\Eh_iQ\Eqe_iqQ2M/4C_\\Q\,uLs9\EqE~A/&[Eo-"tȓ(nQ!([E7>-FCh< ?|dg;>5gl4}|~6wL7FCT cޠ 1oP1oP1oP1oP1oP7FCT! hyj4S! sy7FCd!65ql4y7FCd! ϚƸA6b  id! pLcޠ 1oP| 1oPcޠ;ǼA5bޠ ?\`aq0ĸ vb; g`!MpL7C`: 1ޗag?c/pl/; 1oP7ܗay0ļCT: 1oP7CT! 1y0S! a5vb ; 1n7Cl~% a8Cd! aq0ĸAvu; ay0ļAq0ΊPY_w.PΊa(/]T8tVCq~: "/Kw4aCq: eEu4a(.TLS2Mu4aAq: e0io& ,rq~8z}Xp(gDk_>Y䢯l1AYp(nE& EO*8=M& EOɂCdLS[q e*8\2M4Upe*8iPCqihaq4u E*,8P‚CoP‚Cџ SQ7\p(aP *8iP\CqłCq ec EOɂCdȗ-\p(z,8\T7.\p(z,8=M& EO2M4Up(.Xp=fP\=fpq{bLs{bLS212M.P\Cq e*8iP B.8diP[B.8=M& EOɂC P4Yp(z,8=M& HC"t" Eב"OHEEɯ# EБ⿺i(4K G# Eڑ2ME4i(.TLS4=?[|N8of|yM&&ܽ"'ݛD߄7Q{EDf7QM=&|toݛ(zMo{EOM8DdoLSL4՛(.ToI&4՛(.ToLS2M&4՛DqzeY&..Gdݛ(.\Nɺ7Q\uoLS"qo {EO}hك 3{pqS?Qco4<Eg\3{P)(g>_$e>/p"{t}6}ҟMtl?>Ϧ+ClcG 6csltc=Ϧg=M7ϼ9{78ȯ!{7g 9{7g q| q| "{7g Mbs3op| ك؜=ys3op| كϼ9{78g>g=ys3opg=q| "{t3nك|3nكϸAd>=l:7g MG;?=D 6/ysX]l| Nj?şys3op|6g|3op@x5d>v~ "7gϸAd>Y͙ϸAd>Y͙ϸAd>v~ "[,| .Ad>grDf3op,| .Ad>Bl,| Ιϼ97gY3 978g>5d>YDf3nϦ7V>YDf!7g 2 q,g 2 q,g|3op,| Ιϼ978TfCYx/2 kB񵱇Ӊׇkcy1*pzm8gN=鵱Yx862 iΙYxLs,<\\,esf29pYxLs,<\9gs'29pYxLs,<4޾4<?^y85.n_xNGN<>Qㇾ' p:pzi1@Chx88p:_ =Mz4^@CO& 9Dᡧ@esfes29PO>\9.Ӝ es29pzi8.ӜO>\9|Ls4<\6`ThxO|HThxOThx|*s!@Ro4< hx i΁4,.Ӝ inY4<4hxi"D!_ Qᡧ@Ce4<4hxi"P =Mz4<\9.Ӝ inY4,.h(nY4<\=f!phxLs{B29P i΁4@es292)Dᡧ@CK@CO& =Mz46O'&Æ 245sSW(MsnjԈqljs1nM7Ȧ1%yjjļA55bcjjļA%y:KԈyjj31oPM7FTS# ,yd7#63ƝƸ3v3b͈qgfĸ3v3͈gucǖ{xT7# {1oP݌7͈yfS7# ͈y˽݌7nFT7㘺1oP݌7nƯ͈qfĸAv3bnFd7# ͈qfĸAv3b 1n݌cfļAЏyfļAu3bޠ1oPcܠ="fp78X݌=|8XnFq~="Hw3{(ίyYQ\nEu34(.\.Ow3.Q\nFqe7iQ\nE4(.T7LS݌2M2QZ,Ë7̋ۧ¨Fq~78S .SaT8\\>E5s8xQojrAq~58|QQɨFdT4(zj=MF5.*Q4(zji*Q_ +.TTLSQ:Y\FquڳLSQ2ME5[(.i2M,.TTLSQ"_:Lej0QGj0Q¨Fџ E*j E0(EE5a0Q Q21Q2ME54,F5&EOQɨF8Q4ո2MF5&EOQj=MF5&eji*Q\=f1qq{bTLs{bTŨFqŨFqecej\TTLSQ2ME54(.TTw8ȨFdTȗI(zj=MF5&EOQ"_mrTi2Q4(zjE݌"nF't7㢺E݌"YnF/s7fK w3E݌")nFqefiqQ݌tȟQ{.nF:Qχ\('.nLF..nE|ō"Fdq_4㸨E8S9TrGї sE_qGhq} 2Q%Gї sEO9"М(zq\T(zqisqreqseqi*Q\Gqreq\\(.\(.\n;Q\vLs9GqGdi2q}49˿Hq}<93qq4l`å-Gѱ850Qt9|<[s8be?A?|96O86׏[lxc{L|K?7|ωͷcNT# FkqĸA8b sǔqqĸA8b s1_qqĸA8b s1n9|4r1oP9718bޠr1oP97GT# *yqļA8bޠr1oP9718bޠr1oP97Gd#6qq7Gd# 2ǸA8b sxpd# 2qLGcޠr1oP9|4r1oPGcޠǼA8bޠrhfT# *y:I΋21^Ոq/jSV#ƽ0^8F{aV#6q/j[Yj1EYYjļŬFTV# .,f5bޠǔՈyjļAe5brbV# *8FTV# *kjĸAf5b 1nY)ՈqjĸAf5b 1nY7ȬFdV㘲1oPcޠ1oPY7FԹ7Fq~MȿE~(ί 5pV㢲(rq~MȋYEe5kE^j/*Q`-tVLSYji*Q\\j\TVLSY2Me54p(.TVLSY 8i*Q\Fqe 8i*P4YɔGq~8J/7;/nK`ʣ8t\)/|6q(G|w>|lvgw>"xGdiqQEu>ST:Gї ;E_| G`|} Q%Gї ;EO(z|\To(z|iTuq:e|;e|iQ\Gq:e|\\(.\(.\΄Q\ w磸Ls9GqGdi|}枝˿H|}O޳qq?,"?xր` ;Ew (Gq>w?|(6ұ>96̱ rl#͟plәOo:o9t777Gԍq|ĸAv>q|ĸAv>bq|ĸAv>b ;1u>bޠ:1oPc|ļAu>bޠ:1oP7GT# y|ļAu>bޠ:1oPc|ļAu>bޠ:1n7Gl>8Gd# c ;1n|^9 831oP7Gl>+zL731oPgcޠ:1oP|V4 y|ļAy$|ļA5:^G;{sg#ϝc˽|N=ϝ?w:b9=uxa# .:bޠzyuļA:bra# {z1oP7^ǯquĸA:bkH1n7^1:b {1n7^Gd# qL70@T# y ĸA:E( ^Gq~ȯ{(獋E^uw/Q,t8 xQj{eu\TLS2tzeuiQ\JCqzeu\TLS2M:4(.TLS^GEO/Um 緙KAы4)o3痂EO6sd@i2 rɀHd@i2 R4)z \T@i2 R4)z =MD&"e Պ4).T@Χi* R\EO-.T@LS*ED4u>LSS4).T@w).2MO-z "E y2 RɀHџ'"EQHџ'"T@"Ej)VÀHѷD4)D&"EOɀHd@\)z \T@w])z =MD&"EO2MD4).=1 R\=1 rq{@c@Ls{@c@LS22MD.* R\Hq"e"e =MD&"EHd@i2 R4)z "EOɀ/rQ"FHu#!Eۍ")FHt#ȟpRO)'n+z7B!ejiR\F?Ϥ ?.I?rȟIKR7.IKRϤ%)g4.I<$EO].Id䢺$EO_4g1wrQS|;Iq)wR\|ʝ}e3wRs'E>1:wRIW6s'E_̝=MNfƹENfƹIqi*wR\I8wR\Iq;q̝Sr *w;Gm)wyJļANbޠr'mT$ *w;yJļ{Nbޠlp׼{Nbܙ;sg$ϝc˽|?ϝ?wNb1?wNb0wITr'1oP7I\=̝ļANbra$ *w;5Nb s'1n7Il~* 2w;9Id$ 2w;q̝ĸAN)wUyʝļANbޠr'1oP=7hEEKk8ZR_N,ohEEKrb%ċ-)/'^T87[hIq~o8qhIq%eEhEEK4-).TLSe2MEK4-2Dq%ei*ZR\2DqEOђhIq~չiN7Q/nf8$]_ *o^FIq~I8T_.z̝痤5s'E̝=jN5s'EOIdr'EOIdi2wR4;).T8V\Iqr'uLS2MN.4mqr'eʝW)/*wR\Ӵe:M[\Iqr'EIqNi4mqiGi/*ZRhIџ %E*^%E*0ZrQђo}`%EhI8ZR4-)z=MFK&%EhId䢢%EhIdi2ZR4-)zi*ZR\hIqh1ђ21ђc%ec%einYi*ZrQђ2MEK4-).-).Ti2ZR4-) GK&%EOђhIdw]-)z=MFK~ђnGK%-)'܎MGK%Eђ"hI?v_;ZR/_-).TLSђ%)]:[T.E-{AwI|q:%)G#wI|vȧwI].IdiKrQ].IEOђ"Yv|m-)._[EK.*ZR\%E_hIO}3ZRehIї=%EOђ"qi2ZrQђ"qi2ZR\#ei*ZR73i*ZR\=,1Zrq{XbLs{XbhIqhIqrђ2医%e1wGK4%FK&%EOђ_t%-):"hIFK.n1f_<g--):ihIQ FKZ0ZR/GKbe?A?|96O06tߎͷ|c|[9;vlļ3ygY$1aw01Ib$1na70IIb $1na70Id$6>0IT$ *LrLa70IT$ *L&y ļAIbޠ$1oPa70IT$ *LrLa70IT$ 2L&c $&q ĸAIb $`vd$ 2LfǸAIb $y ļAIbc ļAbޠ1oPa70Il>&y ļAIbޠ1opa781Ibޠ$1oPq7|Q2MN4ȩ녤IqR'eJ\TLS2MN. Q\IqR'eJi Q\*Ddi2uRߞ.zL\T ;(׋A)/eųR_.7_^<;(Jыg ;(E/JdiR4AJdiR4A)z=MvP4A)ίiR\E-.TLS:[\Jq:(̋inqiR\JR\e:[\AvPt@iɂJџ' *E, *E,yrQyR\>OTհR.. *EjXP)z,=MT& *EUJdA *EZJdAiR4YP)z,iR\JqƂ2 *e *e*inh,irQ2MT4UP).XP).TAiR4YP)UT& *EOɂJdAȗr\P)z,S'E۩"oEN S'Eҩ"}I?vȟp;uRO:)WNN S'eJE~GJ&\"sgwP.hoѹR䳉;(EJ?|lm6tt_l;gM3l>w6߬wg *wg *q(|{ *q(| 7g *M'fcsA3op.| \P *ysA3op.| ϼ򙿞s 3=gz5D 3~=Ql:i@bs 3^Ө|k5xMt:3^Ө| t:3n5ϸA@b ϼ78@>Ng|B3op>!78@>gϼ78@>g|B3opyF 3op>!k ys 3op>!7g j qj {g|B% %OwTK>^1l:TK>jgix=ZTK>垌jgP-Ւx=Z78WK>{2%ys3op'Z78WK>{2%q| Z7jg Wq| Z[ɨ| Z7jIl| Z #y/sa3e.g\{y/s #7  #Ź0p:9pz!M\y8I+Wakq.<^r}GKŹ0p\yLsT)\yLs.<\9Fso2͹0p\)ν4esa2͹0p[xLs-<4Qyi0pz/Hq-<4{ AINokmmvMpzQmԇV&y8( $}MD䡯IZA䡧6COm&$ŹMD䡧6COm&$=MI.Ӝ$.Ӝ$im|2͹Mp&)G_.Ӝ$im{ŹMp|2es2͹M/lMp|2es$6COm&$=MDD&ED C)~<\sDߞy{""}O@D$Cy{""=MDD&EDzCCO&""=MDDz<\9GD.Ӝ#"inOR')DD.ܞ)nOR<\=I!"pyLs{BD29"R#"i4eۓ""i&""=MDD EDz<4yi""DD!QD䡧EED'<䯲)C[!""9"?VD!C[Ͷ""Ad9Se9_{e9yчIY^Y|^!GR!W!orQlnv<\>fC>w4;nfCO͎&=M4;L͎&=M4; 4fes!wP2͹p fGq{C2 ͎ fe3i.gxLs9fe p5;.ܞxiD㢚}͎/}͎>afGqk9񐧀xͭ<tp͎͗Qlc}Lslc?G|LwI76~cc "6b^t60kloĸ7b t606b !6b ވ^Fd{# ވqloވyjoļA7ވyjoļA7bޠ1]Ј뢆F_54bׅ |V6k cjhxmɆF&l&1n |V6 81oP 7Fl>xL 71oPcޠ1oP |1 ЈyjhļA׎ys21oP絏Јy:ЈqlhĸA64-lhĸA64b t^; y7gyNf#ƿ;6;bϪxϪxfG/c㘚1^lvļA5;bo1oP͎7fGT# .qlvĸA6;bM1n͎7fDZ6;b 1n͎cjvSC#O cjh1~:k.a_t Jjq~ٸQt_T 8Y%E0E\(ojsU(Jp Ls\¸^*aiQ\E/.T LS%:_\FqJe*aitqN=M0&KUࢧE/z,a=_4(ί^ܮ264Q_]elh線5Ѣ264[E_elh}q 264(zlh=M64&(zlh=M64&EO ɆFqe2M544иejhiqQ'?4(.TC8xQ 2M,.2M544(*744uLS'?48ȆFdCiQ4k< 3Em(kLEe2VU&|m%f2(g& 3E_d}3Qpd3EOdҍ3EOLFd&i2Q\LFq2e3%f24%f2.nKdinKdi*Q\=,1Q\LEe24(.T&Ls{Xb&LSLFd&ȗt(zd=Mf2&3EO"_q&ݳ(GY>,Q{E="[E?vϢEgQ䏢ݳ8^Ɋ"tyE~dE:YqQ%~Ɋ"uۛEɊ"LdENVi*YQ俼NV=M&+&EOɊJV=M&+&EO󟿗i׸a3vQEY][a+vqQa+vQ\>l.(1˱"(>Edi2vQ4(;.&c(;.&ce:Z\\EqbE~Eqbec7.47..nOo]inOo]i.(.\[;vQ\v좸Ls{zci2vQ4(>KEtg ϕKEvg ,a\ ,ay%uGq[DW"%q{pyg/ն$(DV!+ $A?:-m0 gb!l lf f aa6 6?La9sð6a- ݔ[Cq #Np #Np 0~a!l!vF{0 0 0 0 00-aA-aA-a%0;%0;0yZ \\0:υô0F0F0F06at at asF0F0q0;0;;L avPuqT]f0f06avP avP avP avPuq\?0fU0;0;8ra"ra"ra at at .\\,ϟ?l. )g)=2E\d{e0~ϸL6 ue[~vp"-)2E[.S2E\~vp"j".?;LfLf\\\\O*A.SA.S2EaZ|Ea(rEBB_\a!Q/ /B| ssx i s jj}pQS Z_P]xᢦ 5pEM/.jj}pQS 5ՅZM/ZM/ΧZ &.T^h5Ph5p9P8p9P诓  K G|DЗ  ӻ9PK rЗ  } rjrjrjr4C4C4Åf(f(f(\4C|4pQS fPaᢦ 55pjEMM3.jjp>5w 5U.j6,\4Cᢦ yj 5U.jj@N3ZMN3ZMN3ZMN3Zh59P-.4C4C4C8P|o5P˞ rЗ= }sGy 7).!ZM.!ZM.!ZM.!QKVKj ^B(\B(\TZZ%EM-!QKZB(\ph%EюKnv\B(\PĶ^B(\\z[/!.j.ɭ 5G;.!ZM.!ZM.!BKg.!\Bkg.!w…[%B6'^B(t [ ˏ'd\B(%ك -%8L ͗^,@0݀o!^|a-0z6_a~ua\;ε0\;8LY|Շ}|͇͗|AwAAAAaZ;Z;Z;8LkavPaۿgwZ6WatatasFwZpV .W .W .W /r r l W W SyfZfZ6Wi < *OV V /j j j < s$W ôZfZfUAAA:W8uj x p>:WHIp᪉ ܺwG/.j*C.\@ᢦ.T\%EM-.jj pQSrᢦ2B%B%gʐ & &.T\h59P8.p>9Pog Ån'g spaO^ }}r&'gL@OL@L@L@LəBəBə 5Ph59Ph59PNᢦf 55pʷEM.jj&Bo (\L@|B.j|+\L@ᢦf 55P#- (\T3WfpQS3r&jr&jr&jr&jr&jR3wؿp)/\C#c;ؿpm< }2/ؿ'\cB_/T_C &cBؿj2/ 5.j*/\y_=0/\y_=0PwL߅3 }9. ٛB~ᢺp./)\T.d\>̅>0_ʥ} y}:`PK!j0GB^+ 瀹} sᢦBp\h50ZM̅V &Bɀj2`.SBkºКn.Eͅ&/T\hpDus5a\hMX7us` &Bɺjn.ïBɺBͅ|u\h5Y7.j*+\T\]7.jn.\ܞX7_=n.\ܞX7_=n.\ܞX7.j.qEͥs\$v 5>ͅVusd\ȷZ?sB7/\~й.t.e$ &]t!O.tKsM?\겋?aK6_a 6·e_ _t oasz6_a6_OwװJ [c׏ s_?&̇)= /а /ϰ /Ô0IdF0$2aL0%avP sT|08.1)KLD&.1)>LIq]bRF%&as"F%&as"FA&Ň) J젒99LIqTfiA%avPIq؜ȄA%avPIqTRfi幎IqTu0;8* L 0:Ȥ幎IqdRF82)L 9 L Ô幎)r؜eeז)r|"-S0La|m"9a0EÖqLR0;cf"gS0:92EL.2)=ˤ82) it؅ ap|:pk.T4\H ef!5p4|"¹,}p\f^(pQSpᢦ %.j*.\T4\hpQSQbᢦBhj2.YZMF*J, & %Z)J<os(ەܸ>ƅQ +qS{S8,ܸp>Z+ql27.ܸj27.̍ &sBfn\h5ZM*7.̍ &sEMƅAEMƅʍ/T{SܸpQSjo 5.j*7.x]ܸpQSqᢦrEMƅʍ yĹqᢦrEMƅʍdn\h5ZMƅVqdn\h5Z_-j33Zd\/*.[ʅv{Xb\-CB_ yȡr!O8T.ePBʅYh5^j22-L/TVh5?8U- @߅e[ ˞jUTQ8+ep>kZ˞j/{@-ej@-,P & BgZh5YZM@-,P & EMqEM*P/TqQpQS*P 5U.j@-Oj]pQSjᢦ EM*P yRjᢦ EM*PdZh5YZMVjdZh5YZMVjVK& {pnK,P eZ- B?rZ.P }ٳ@Pj!O!@-eЗ= Bj@-\TWͩYInJƻP Εd!w%yN+BJƻ,:WAJƻ,\T%Yț+BJj,$/iBJj,$ y3~c_YhWZ0iᄇpL}e" W.,\S_YhWZ0iᄇj,+ &Bɾ+ & WA}ed_YpQS}eᢦBv_YpQs{bd_yȾpQs{bd_yȾpQs{bd_YpQs{bd_YtC+ 5t}e!黯<}ed_Yȓ+/ܞW@W^t_Y}ec0nBt_Yc:sFBMߞMߝ{I.IM΅ͷM}l=h?6IlǦǦsM?>6mÏMg?6h?F~=D{l{ ڢ!_[4Κ-°!_[4kc|m~l:k1_~l–ch?F~! {!Mgcvp>fcvpn?fcY!!{!\ ~ν A4hÖ,4h?F A4h?657h?Fz!°!ߋss%4Kh?F~l>F~.!o}.! %4Kh?fh?fcvpcvpn?fh?F-_w?Ɩctr_awA{)ܗ~Є-6RLIT,>8eaN b) {pCErReNY؃YYS V\,>9.j⃋s.=h5Q,>h5Q,>8M|jX,ӥ&&9]zjX|j"g|xN' {rGtp' g|xN'=9ӡ}O@ 3>{r&r&r&r&r힀AAp\9.j9ニsX8.j9ニsX8.j9ニst0p\Ԝs5E9g|pQsA.j9ニs3"g|j"g|j"g|j"g|j"g|j"g|j"g|j"g|j"g|j"g|j@(1Q!J|!D\y2IQ⃾x%Q<(A_ŋ(1|(ί4)33۟,;Mkp|3y@M9ー(g|׊rp(g|׊rpAA^+䵢AS )g|q.j9ー+g|j"g|j"g|j"g,?h53>h53>h53>8usCCC 3>6.9c3>6.9 9 9|NPDDDD a3>h539|UD3>9.j9|U3>=1"g,ܞ3>=1"g,ܞ3>=1"g|pQs\ܞ3>H\\j$ݕr@ZM2PX=1"gP9.ː3.?3> 9ː3nryT9s0FA) Ba 젢0;h!* h!* B0;h!* B0;0*Z  C0:y!`dF02  C9  C~B0;0avP!`T6G<8d@FAataAataA)  0:Ȁ0l0;,f@fg10:ȀH0-#0?Tcsytaa]bx60-3atr?sX?bUX|BsV8 sV`¹ +U`ׁ¹ +`pQSbᢦEM 5'ZMVb|j2XPqRdXh5,^8j2X,?jM:%cX8ʻp0u,KԱp>wv7aX藌c|*wBM:n@VcdXh5:ZMV݄cdXh5:^Աj2u,L 5mj*u,\TXBJ 5:^ԱpQScᢦRԅJ 5:.j*u,\TXԱO:.j*u,\TX@VcdXh5:ZMVcdXh5:ZMVcdXh5Aj`_' ,$B /TXP<< Sj:u,ĩc!2z!&N y9uP Ωc!9u,:AԱ?:SEMԱj2u,L &S upj2u,L &S¹E㊚0,$ &#BH&$ 5I.j*Pdᢦ"EMEVdd$YgGVdd$Yh5IZMF~6a$Yh5I^H͎$ &#EME$ 5Iّdᢦ"EY۳&#EY۳&#EYdᢦ"EYd[9,\\+G|GH@FVd! GnϚ$;Br ȱ,t%Ѝ ;%Ű6mӉ6ow?a/,ffӉ4rY|w ?.;LaaYYY|c /Ұ=L'S0~X{Ƣ/l>TeE_a'xݲ +T' 0:Ȣ0}avPE_T6>LE_TEfUA}avPE_\A}avPE_TfUA}avPE_TfUAUatE_dFY< , TA}atE_d7<}atE_TfUA} *젊9LE_TqFYuLdF?YuLdF?YuLdwj0/~ B+~ i~avP_\~N;l kj/0F/O~a{.3~-3~at_ܑX}Sg?lNHx?s%Xȿ+¹w)_֕ p' + ܕ`!/WsUȫǕ`!WsUKȕ`ᢦ*EMU 5.j) &+^d%xBJjP]Od%Xh5Y ZͿQB b/,]݇f_X8p/,;p> W S/,}} &Bɾj/, & Bɾj/P}ad_Xh5.j/,u.j/,\T_xEM /T_XpQS}a|B 5.j/,\T_X" 5.j/,\T_x Bɾj/, &Bɾj/, &Bɾj/, /y>@r>Awс݉Bޅ* {ո+l.TWn~V_ᢦZE_j 5]֯pQS_Y_Y_֯pQS_֯pQS߅j 56ʭ_! ;_dWl.ܞ5@|vZ6cZ67a-la7-lh'ld'luSZ666_aG:) ׮D,_;0~LcaڙD,l,+l>돉X9aJca͟D,l>FA&b) u<aJ젎A=JyT"fA%bavPG0;D,* J젎A&batXd"vD,2 LS"FA&batX؜?A&bataJ0;D0%bavPXT"6g) ;?& 0{-.9.9S>Fw]catX؜]cavp|,* fv*0~{ ,Of`a{2 s/1;l/1 P ,lXx_bҟs>VȯXBt>vT>V87:ܜ]s>V87:꜏s>V87:.j*+\T>V|pQSFᢦB|j2+ςZMc*( & mZMcVXߋl m۲Bp>zuvb[VhؖG.nRl m۲UM`[V8 ,MmYoRld[Vh5ٖZMeVmYd[Vh5ٖ]ݤؖZMeVmمj &۲BɶpQSmY|pQצڲEMej 5Ֆ]pQSmYᢦڲ|˅j 5Ֆ.j-+\T[Vp>RBej 5Ֆȶj-+l &۲Bɶj-+l &۲Bɶj-+l &۲ ՖZ8L XBRZ!dqVMN yZ!d\tV_/:Y+a;'k5I~ yqV[c un8vP yqVȟ Sб[! :v+OAn)حpQS[! :v+ &cBB@. &cBحp>?ۿoZjfr܅ -53BKL'Ý.R++\V&w2Ejer -53BLOR &3BLj2+ $Lj2P\!ZMfr 5.j*+C3EMerS*3 TfrS*3 TfrS*3EMerS*3EMer*+\T&W^d&Wh53 Tfr2-l~6Ua:濧Na*T6$X|͟'|O0_aGAaIݰ: ?:LW|R7^ ?:LW|R7 ,SfU|A_aIT|A:-fU|A_ai0;+ *NA_avPWTfU|A_atWdFY||gFY|A_ , 0:+l. 0:0_avPWTu0;+ T|AavPW 5Xf k`a5X\]k` ׬5k0z,l.5k0;|g FYu{ƪ0a ^o@ _;KTkg)ƯXܳXJY,[Y,V%,bag 30;ras\;2pn, MuvʴB~_];2p.77i]O!ri]O!/ui* 5U.jL+\TQjL+, [Veڅ> &˴B2BEVeZdVh5Y2&L+ , sRn7)i6eZ|N&2&L+IiBߤX&2@iVeZdVh5YZMiVeڅMeZdVh5Y]2jL+, 5Uο/\kSSeڅ* 5U.jLPeZᢦʴEMi U.jL+\TV2pQSeZ|pQSeڅ* 5U.jL;eZdVh5YZMiVeZdVh5YZMiVeZdVh5YZMiL+\kQ‚/ y5܅ y5\!nu W_v+O#w.T W_+/^]W 5U.j;5\!g]!o yctGwN(莮7Fwtr!o yctGWȟ 3]!f+lwt 3]dGWh5ZMvtrdGWh5ZMvt fGWh]R+ c+\VGWHBut ZjvtV]!ZMvtV]dGWh5]dGw:B?+ 5.j+\TGWȷ 5.jnO.ܞR.jnO.ܞR.jnO 5.jnO 5]pQS]ᢦ:BɎj+;WwtnO[GwsL Ǧ[>6}Ӗ}lcG-lcashSM?6}IԏM͡ǦC>^ǦWc!Afcvp>fc!94?fcvp>6DC94C }Ρ}Ρ}"4Dh1:,l>F}l:1:,l>F}"4Dh)Dh1:,l>fcv}Ρ}l ,cvpK>fcvp>F}"[~#BG1:cS1:-l>F}#BG)*G1;G1:o?}Ojas1~?}Oj_xcSC1~_}A1~_}ߗ^xcS1-:kr^xc!^T|8=WBۃS _ Eor(z{p: _Eo5QthA=8EA=8yEo.jۃs=9$.j=ɃVۃVۃ)&¹'yj"z{j"z+{&&&SV=_8r!r!rNǡ rrP ܃v܃qrr}{C.\A\A\A\A\A\p!{j"{j"+s&r&r5\.jι܃s.W8r.jι܃s.W8r.jι܃s.trp\Ԝs5\E9{pQsN<9rs.=9r \E9{pQצEo;E}N=_,*{p: AvQq_=_0*{p:" ϭpq }.jq߃s=9}?j?Y:Ϸy>!zO"y@~t o'KNA**ATU ~,ϫ?ȟ*OU秪hA>ک|v*䣝hAAAp>DUDUDURߺ-B>ߺ-g &B~B%Vld?[h5셿op*-ĸB_Hk }!1-[ .BR\{BR\[j2-k qmd\[h5ZMƵVqm!8-k/T\[wk &EMŵk 5ͩEMŵۓ1 'cƵۓ1 'cƵۓ1EMŵۓ1EMŵ*-\T\[j2-k 'cƵVskd?2l&lOܰðð?< T͟uCaجar!l0l6?l0UasFYAV r젪ܰn8⠪0;*7l.l젪0;*7l.l젪0;*7 *l젪0;*7 r젪0:*7 rSFY5Sd{0:*7 r0 rSfUAU r젪ܰ;LUnTQfUԅAUavPUn\AVFƿ'0=Y- ܰb *0UaVXZakUn\ZakmvaYކkgyƯm?jBZ)ka^=Ln,vf5ݰdF΅^b^b7oy˽nu,vrcfUb7 P0:BB…Ʌp|ƧFPp!r!\H\R,…BBvP ]" YᢦEM…* 5U.j*+\TBWh5YZM…aBBB%tVpd!|BBj., rPpb!\hXΧ.no,  Tۅ퍅pb!\8+C,  BXBj., & BBjp., & UZM…Vpᢦ Cᢦ EMƉEM…*/T!\BpQSp|B…* 5U.j.\T!\8)\T!| EM…*d!\h5YZM…Vpd!\h5YZM…Vpd!\h5YZM…V* 5U.j.\T!\8a,_ Z y6q!3𸐿vx\8R,䯇 ydq!8<.QDžEmᢦ .j*<.\Tx\pQSqᢦZMDžV4&3O!tN B~0d@&˅`r!?s|B~0d9Y.;W'˅|;J.*. *B|pq\WɅ|pq\דUr!]%Ur!]%Urd\h5Y%ZMV( &B*Ԯ yשp:3_p:3._zB_ } g.%ȞpΟ.T\\ KP= KP=sᢦzBɞjg. ̅V=sd\h53ZM̅|cឹjgP=s!Xg. 53.jg.\T\ 53.jn/ܞ3.jn/ܞ3.jn 53.jn 53_pQS=sᢦzBɞjg.1/ܞ3ZM̅VLE}?p_O@yTIc`6N6g&l4l(S`6B6*aa6:z9T ? /ŰX͡Jd`FX:* X0avPTT,fXAas,fXAavPuT,fXAavPuT`fXAatud`}0::l XAatud`6Gatud`}0;:*>Lu8p2*0;Aavp2 SffAavPav8',)'? j iU!wC~BWB^n 󀅼 ܐBpC~"B^ n 5Ր_ȲpQS yᢦEM5䅋, 5YZM6V y|Tj!PedC^h5ِ_Ȳj!/l &h^c`~B Ђ10/(^Z0 {B p>Wh.m>態 d`^h5ZMVyd`^h5_ZMV &BpQSy|pQSyᢦ .j*0/\T`~EM態 #(*0/\T`^pQSyᢦtTᢦ .j*0/\T`~ Bj20/ &Bj20/ &Bj20/ /T`^pQSyᢦLiEƑB:iJ yiz!4t^88-/֝WN yiz!9M/!EM酋J 5.j*M/\T^4pQSiL &B4?noݭ]Bt^ݭ@w[/䇁 aB!v^G;wy x/ , $⽐OR./TS')|r^')6{M`^ȇ||w^ȇ|V{d^h5Y_:jx/, &B>x/?ϚwR+_|.!wH|R+_苗|/^xi݅j W|r񪕿P|ᢦZEMV|d+_w:n &[BVj/l Nǭ|d+ZBq+_h5.j/\T+_Vo.j/\ܞ_=/\ܞ_=/\ܞ.j/\ܞ.jP|ᢦZEMV|d+_χ_=/l &[ ZMVs ^SfކAavP~T6avPaA)h ǰ?_[5c0^c x15ư?lZ=~>aV1_ra{O*r -/- @_[ @_[{.O?96ar j .OA A0;A0;A0;A0;AhY @!-<8,+gJ`>{Wo |/(\C/(\C/(]B^/(B^/(|A!/[\pQsy4|AᢦEM.jjpQSJx 5ZMZM' & .T[h59_Ph59_pBBBjA|dBm>n>nGQ/n6(}6(^mPhmP8,}6(}6(Z.jj@nZMnZMnZMnZMnZMn\mPh5mPh5mp & & 5mP8*\ԶAᢦ .ԶAᢦ 5mp 5mPmЅ6(\ԶAᢦ 5mPm4[ᢦ .ԶAᢦ 5mp & & & & & & & & & & & .ԶAᢦ 5mPmeMߋ2^E(9"\UB*B!+x**B|'P3 ^E()V"W yzƫWHYyBx u<>GM t@Jy H!H!ȅ)\ԶHᢦE 磦j[pQS"SE.\Ӽ-R[h-Rh-R8-\Զȁ)))))p1r[jr[jr[BmZMnZMn.jj[p>sVmEMm\mEMm.jԶHᢦE 5-R89P")\ԶHᢦE 5-R89+\Զȅ)\ԶHᢦEHHHHHHHHHHHȅ)\ԶHᢦE 5-R8-\ԶH6߿}n@dI|Uz q q ^B)+/ݕP }{JJʅ &P &P &P K(VK(Vv]noЯ!.ipqH-Cm)\nKv[ R8ݖ!.ipni/nKᢦv[ 5rv[ 5RݖBݖBݖBnKnKnKnKnK!zjrB=w[ &w[ 5RݖEM.jjpQv[.ޯppQs{ݖ +m)\ޯppQS-.jjB.jjpQS-V-V-л-nWRh5Rh5rv[ &w[ &w[ &w[ !w[ &w[ 5ݖ5Aa&>6}FIMPl2|l&>6տ~mIy cn ؏M1Ǧߗk0 ؏M,tǦctk0X `>f5y cS6|`>f5M1;8|k0s1;8|k01;8|b ctk0X `>F󱩔Lؼ1:5A|b cS)1:5Ak065|k0Ǧj?l^>f絆y cvp^Tk0a `łkc &l^,v|lZ,?k05x__>65׀ŗkK?ciuc`c`%l^|#,|s?|`>F'1:5M`5 |N` ctk0}N` cvbb cSP1[1;8|.V|k0a1;8|k0ǦCZyj LQk0̃tQk0Fh51S8OLj'j'j'j'j'j'j'j'j'r'r'r0mAnat=i'r'r'r'l6 9L=avP=avP=i_#j'j'lR8AmAm[ avP=avP=aBFa 9L;aڹƯ=?_;7z0^9L=a=?Ö{7z5p'_rFOؼ{7zxFa uč0^G9?_G K K 0môFFF6 K K ԅavP>avP>avp's'j0mAmAmAmAm:蠷} i} c>jۧ.zۧ6zۧ>zۧp>YH%SH)s} 饷} i}.ԶOᢦ} 5S88P>)\\smE>Z(\\SEMm.jjۧpQS>Z(\JEOO|jrBTZMnZMn\BmBmBm|Oυ)))#_5Sh5S8Gpkrۧjrۧp>\h5Sh5S5s } &} &} &} &} &}.))P>V>V>).jjۧpQS>jۧpQS>+\ԶOᢦ} ujۧpQS>)\ԶOᢦ} uP>)\Զρ)))))))))))P>)\ԶOᢦ} ȅ n.jjߚ){yQۏc>*[G<ၣxy! yB  yVG<ၣBǁBхܚ yjG<7䁣 5pTEM .jjpQSG8*\Qᢦ 55pt  & & & 橡&?&ҟyvA|_武B>axBm5[M|VS!୦B~学B~d୦B>zVS!୦ Th5Th5Th5Th5Th5Th5Th5Th5Th5QBiS!tBiS!ߴy婐oڼThoThoT7m^y* ^y* ^y*\y*\y*\yPysSSS!?OSSSSSᢦV 57.jjB<.jjpQS+OZy*\SᢦV 5TEM<Zy*\SᢦV.SᢦV 5Th5Th5TȏFTh5Th5Th5Th5TȏFTh5tV шW &W 5TEM<S;<.jjpQS+O <.jnot6+O <.jjpQs{ĕEM<]EM<.jjjrjr婐({mW &W &W.SSSS!?SSᢦV 5A\y*\uPi*l6:l Is#?Pa/Gf?l]a -6"&lV8lma WuaatPasF9F9u&&&4f5f5f5f56AM@AM@AM@AM@AM@AM@AM@AM@AM@AN@AN@AN@ 0: y:#r0M@AN@AN@AN@atPatPi*j*j0ͷAM@AM@͓i*j%j%j*j*l:LPatPin$_;'s0͍kT<7Pi*'x0~} yi*_FxF8F86 ˽QavPQavps:*j:0MGAMGAMGAMGAMGL蠧 i SQj:.z:6z:>z:p>eZH%=UH)=u 饧 i.tT4EMMG]EMMG.j.Opy.tThEM.j.χ*\Jᢦ 55UEM.jjjr:jr:p>_h59uFP & &.JtTtTtT|jR+Uho+uv -w -w nTJ[J/nܕ*ܕ*@[J[J.jnTJ]B]B]B]B]B] [*w &w &w.ԮTTTᢦv |ڕ*\Ԯԅڕ*\ԮTᢦv.hF?ڕ*\Ԯԅڕ*\ԮTᢦv 5+U](_ᢦv.ԮTᢦv 5+u w &w &w &w &w &w &w &w &w &w &w &w.ԮTᢦv 5+U]t6Rڕ* !l]>]g/~fs~f}9UȳW*kΧ yZj\=USX*kxk\ǵ.ԸVV!z\pQs5^Z*\Ըօ*\ԸVᢦƵ 55UqEMkqBqBqBqByrajߟžӶ?ͮBͮBͮB}m?w6G &#b|؅+;q#bmG ѥG G N#bmG ]BBBBBBBBB2- _˅8yVwtFF6 nAmg76ZZZZZ| 7zczcp>[Hv6 7 i7 7 cTkkjc^zcfzcBm.j.OX+\څX+\ P荵 _wDZ%[tJ%鉠?W= D3,,X i*,,X i*,Tc-XZLS`@2M&kAO&k zl=M6.*4X zl=M6ւyZp?nXZлec-ݲO\_lol[6ւ-wZp?qnX zl4[*kX zl=M6ւ&kAOZdc-ivQZdc-XZp?;,Tc-XEZej4X^KLW6M5ւj4X i,Tc-XZp?;,Tck2M5ւejdc-i4X zl=M6ւ&kAOZdc-i4X zl]Tc-XZLS`kuLZLSXA*[p?}}`-ˁѷ~b[Зo }90oXA__pe.oyy O_B7WO: ,,) ib,,+ ib,,. ibuQa`*V2MU&ÀAOa&ÀU z =M/b4 z =M>z CAa [-À0`лe0R z Aaw0`L2 xa0`d0i2 4 z ^lo =M&À z =Me 2Me ^T'X0`LSaj4 ijT€2M5}ej4 i* O240`LSa`€ z =M&ÀAOa0`d0i2 4 z =M&À i* ,T0X0`p?,l?1 ,T0i>0)_ _E2Q {A_e g "諌`fl`LS4*g ~1a1 c0藏 Ϙ:c ?g <_aW63Aw2A_43ە͌aL]41 i*c,T0XaLS`21 z=Mf &3AO O`U0Xgx6q8 zlx6}A_5l^\c t1_S8)k Ơ0k ŷe5) ŷe5x&s) z5=M&sA͹Ơ\cd1i24k i*,T$X\Ee4k i*,T1XMSƋ54k i*}e54k\cLS`rAOƠ\ct1i24k z5=MsAOƋ5I&s2Me54k 54k i*,T1X\cLSƋ e e54ۏA54k\cLS`rAOƠ\ctk~ b1i24k\cd1i24k sAO`r2cs2MeǠkM:y|t2XY>R:,T,XY>W:,T,XY>\:,T"2M˂e*=MF &#J=MF /X4 z@^T,i24 z@4 Gw-# z@[F -Ƞwdp?U~2n ʃ-#A`f{Ke #AOȠdd2i24R z@=MF /*4 z@4 #e@4RLS`"Z i*,զEE e -4Z i*,T2 i*yQ`"2ME 24 z@=MF &#AOȠdd2i24 z@=MF /*,T2XdLS~2ȓG^T>2˞`/!t>b왏 i˞`2M#eG4 i*,T> AOȠ|dd>2i2דrZ%T[2ˁmɃlK}9-dЗےA$u[2snK^T[2ȣnKy8'K%dE%+%deĜ NV'+q :Y'+A& :Y4ded2i2Y4 zLV=M&+&AOʠdeLS Ided2~gk˞eЯ-{AYڲgo?ܳOٳ Y\{AR= e&Ynže,}c2XK,}c2ųg4ٳKd2ig4ٳ }Y=M,&{AO=ˠɞeLS=`B.2M,/g,T%XeLS=`z2M,eY4ճ ig],eY4ճeLS=`zAO=ˠɞe.u2ig4ٳ zY=M,u{AO=ˋYR,&{2M,eY4ճ 7Y4ճ ig,T2XeLS=ˋ ,e ,eY4ۏAY4ճeLS=`zAO=ˠɞe7ܳ~ b2ig4ٳed2ig4ٳ {AO=`z2c{2M,e ,e{0,?nk=϶g=϶g CNguv6gE7,?.϶[mwX?ۮ϶c{gϸA,gۍϸA,?ѳ{y{3opY~{y{+3opoE} =ϼgv3opY~ =ϼg7,?g޳{y{3opY~ g7g zc{3n=϶%qY=ϸA,?ѳD򳭽7g zc{3opY~ =˱7,?g{y{3op} =ϼg{c{3opY~ u97>kg|кۛd5C3fh]~.g[+3`z_Ot0?Ylk}At0? :m>g|Dsl`~ ϼV4w0?Tg "Fg.mmϸ]42Fg.qhd~vF3nϸ]42?ێ~ Ghd~ ϼ772Fgy{#3opod~T#!WFCQ|-kT#!F CNṘT#!Ffpod>,,0|X72{#at{XY>i,5|XY>nLs=,,9|Xijd>,܃nLs=,܃n=M42zhd>COt{iD#3zhd>4|ipw=M42nǁzhdFĊ-FĊI`{KE#wFĊ-4[*hd>4|iD#󡧉FCO`{KE#󡧉FCO|iD#a||X72izX72izX72i̇ۙO|X72ng.i̇e{#va ̇e{#aD#󡧉FCȮ&=M42zhd>4|iD#󡧉FCȮ&Lsod>,4F2c4F#Ͽ{f_!5B]v_!5{]/^5E]v?>/u͇xQ|X>L}A]a u͇e{fogm= ·,Ӭ&{a ·e{aD󡧉hCO·&=MD; AOS__χv}>,{!}*r}!!ryVχB:EOT(! */ }*?*CBg^BByEЇ%BBz>4 Ї&B=MBz>4 }i"D(PCOЇe{(!SPCOgz + %FCU} UAb!hpO>o 7KJ>>hpO>ò-чe{[aY|C/ч^<DbCOч=M$FzH>4}i"1LsO>,:4hpO>,:42=1LsO>,42=1LsO>"%Fiчe{b4'Fiчe{bCOч=M$FzH>4}i"1UbhpO>/w}i"1LsO>,4CwPba}X'Fiчe{ba }X~ Bba}X~ Bba чe{ba}i"1Db!o)1l?!1DbhpO>4}i"1Db!o2)1Dba}X~ Bba}X~ BbarbtlvLѱ8Gc]1~owl;_tǔ/~Wwl{ǔ7tǸA&F)1:=77~cޠ]cޠ]cޠcޠc1olP1oP1oPcJyJyJyJyJyJqLqLqLSbtdbtl?2 21zL1n1n1nѱ=k6 21: 21zLi1oP1oPcJyJyJycJyJyJyJyJy1oPcJyJygZ3M:ךi1LS n5Ӥc|&c>/oe5brc1w:Ưsǔ;׈1~;#i{2sc{o̝=1o'3w: >uL1oP1oPcʝyNyjN1Pww؞㮙B=www~ xf uf ul?< Ly?e?eJyJSc*1:։ NA~h_c'F*;1܏w_Tb4$Ɖ~;w'FܥA.ӉыJ4˧:'FeJ^Tb4X^L|sbb|hL|sb4X^L|sb4X^L|sb4XE%Fe4 zL=M&F?zL^T/i214^db4i214 N"iod4z^T4Q? /[G5~hУf4cz4-2MG?4 z=MG&AOыѠhdAOѠhLS~ 3XhLSыq4 i?zQ1`2MG̋?m?mq4 i?,T4 i?zQ`2MG?4 z=MG&AOѠhd4i?4 z=MG/?,T4XhLS~=X~ b4i`/u/A|8EI' f4+A_ٌp}b4X>1N,iil' i*'xQq`r2M2kY ;k1LgM<i_lfMb3k`gMv56ț5WcAlp56eySؠjld56i{Qؠjld56i4Y z=MVc&AO`A`x1ڠ،+=hb3F1ڠ،f6_9F{Q1 CTmϪc ū,W)X߾2F,W)X߾2F z^T)i2F4 zKlh&cAO1ڠmd6XmLS`fmLS`b2Mhe4 i*F,iT6XEhe4mLS1`bAO1ڠmv6i2F4 z=MhcAO1ڋoh&c2Mhe4 Όc2Mhe4 i*F,Tb2Mhe4 i i*F{Q1`b2Mh&cAO1 o:F{md6i2F{Q1ڠmd6i2F6h&c2Mhe he4ۏA4cccюDZf7o%~qlc;~ɍ[c6>~[{l= 2F{L1ڱ771hǼAhǼAh#^^VyюyюyюyюyюyюyюyюyюqюqюqSvdvl?3 2F{L1n11n11n1ڱ=7 2F; 2F{L1oP11oP1c yюyюcюy y yюy?e ySpTp,?0F;Ưcׁ11~=}jkcccזc ՎkP Sv_[jx1T;gyj7ژ7ژ7XS;P7S6X=P7ژ77Ȉ79F,, i*q,, i*q,, i*qxQ1`2M%&cAO1O%&c8 z=Mh/*q4 z=Mhizd6?1~"8ˁ1ڋ}90Fmp?~S3Fmp?~S3Fmp?mLS3F,T cAO1ڠmd6i2F4ީ z=Mh/*F4 z4 .e4ZLS1`bP i*F,T6mLMS1~2XmLS1~2XEhe4=md6i2F4 z=Mh&cAO1ڠmd6i2F4mLS1`b2cc2ccmmǔ g61ڠ_xh}b6h~O`6+cA'0F,l iOR4d iOR4dmLSA`2I;9x?sA}1VNrU̟-`3 l[Au7sn}}ɖo [Apt7ˁ-`Z i,T7Xپu,l: i,l: i{Q-`Z2M|4 zl=M|&[ACvot7X S`{*\~ d7K O^;\~/. } 24ot79y~5E~5o CAҡ ?oF ^CAq7i24 z ^T7i24 z =M~&CAOߠod7XoUw7X4Yz V]+A/ࠗ pK`8%R] pW\w&uEg~p),A g]T?8XZQ8+X `V k}Cg?8k࠯/*4z=M~pd?8i4z4i*,li*,T?8X~pLS`2Me42Me_T?8X~pLSࠧ~pd?8_4z=M&Aࠧ~EAO`2Mey7`2Me4i|Q`2Me4ۏA4~pLS`AOࠧ~p7*~ b?8i4~pd?8i4AO`2c2Me e>&0-&dxl?+$6ǼAƼM1oPYcjyjy*SxTNoTLodxcG$hc<$$$$]qlqlGǼA5ǼA5AFE5fq#8Fpp?Fpp?Fp׏pxLS`fFpLSj4U iO1n_,ci2n4U i3n4U i5n4UFpLS`ʓAOࠧFppAO*O=M6&U zl=M6&-=M6Fpp?FE5r`#8Hީr`#8Hީr`#8Hr`#8Xީi|ࠧFpd#8i4zl_ll=M6&zl=M6ej2M5ej_T.XFpLS*4i܏V^T#8X]LG+ej4G+ej_T.XFpLSl=M6&AOࠧFpd#8i4zl=M6&AOj4i,l?,l?|EUa@8žpg€p_0 @|U,~`@87 ?>f1 ,l>f1 ,li,liyQ`*2MU ecֿ@=ϑ5d8'T8".Eu5^'U0w|Q*yܹ :wQK玃~7a8w掃ʘ;دsAt8XqLS4;i*w,l;iot4;iot4;qLS`r;z=M掃&sAO ?;w|w{BA-j!ˋrj!y-䠯O>Bd 9n!y.-S [AO-䠧E[A- /<r7BWn!{0<r\BƖ[AO-䠧rd ZAO-䠧rd 9i4Bzl!=M&[2M{rLSċj!yk rcb96;Xz`L,=0&AىJ,=0&m2M‚V %U ū,oJū,oJz %x/Jz %=MPrd(9i24Jz %4Ji,lJi,T(9XPrLS`B2Me %4JC2Me %_T(9XPrLS䠧Prd(9_;4Jz %=M&CAޡ䠧PEeCAO`B2Me %yɡ`B2Me %4Ji*|Q`B2͟6cC2cC2M/*,T(9XPrd(9i2-N/Jz %=M/*4Jz %=MPrd(9XPrLPrLS`f1`Br(yl Jcͱo?ߝ~M7cyLr﵏kql~L67P7P~}d(yd(BcޠBcޠBcc %y y y %y %=ƼAǼAǼA)< *< *< *< *< *< 2< 2< 2|L1n7P10ǸAǸAǸA7P7P10ǼAǼA)9 *< *8,T(B2M7eSCe壌C2M7eC2M7eCC2M7/*,T~3Xfd(9i29Dd(AO䠧PE7&CAO䠧Prpw AO %g2O}90|Q/J/wjJ/wjJ҃Jiwje %d(9i24Jz %=M&C;5CAO䠧PE&CAO`BheLS`BU i*,T( ~2Me %Ji,`B2M`BU i*,T( CAO䠧Prd(9i24Jz %=M&CAO䠧Prd(B2Me %4ۏA %4ۏAW~BAEv(9 CAUJPrpgAa0|Q0J .Y %4,/Y %4,eG4,e?>0PrLS`214_V< ti*4z B=M/*4z B=M&AOA蠧 td:i2,T:Ȼ$B4 twIiۦtdJ:i2%4NI=M&SAO)頧ɔtwJRAO) tLm ˟EurP`-X.ւrh"ؙA`ڇvؙr`gkA_L=Mv&;Aޞrg:i34ٙzL=MveL4X iL4X i3,Tg:XtLS`:2MueLyK̝`:2Mu/3,Tg:Xtdg:i3}w&;AO頧tdg:L=Mv/3}w&;2MueL4ՙ;2MueL4ՙi3,Tg:2MueL4ՙiؙi3}Q`:2Mu&;AO ֺ3}tdg:i3}Q頧tdg:i3Hw&;2Mue veL4ۏAL4S_Ag%l;w?mb?m7b?϶mg϶mޙl?N| l?ێ϶CqL 3v3nϸAtgޙ;ӟmTgFۈy{g3opLK;ӟy{g3opL ϼ37w?gޙ;ӟqL 37ޙDgTg :c{G3nϸAt?љlk8~ 37;ӟy{gzl~ ϼ3M;ӟy{G3op~ ϼ3M);y{G3oLhPƯ-c{3~mѠ_[4&{r/hWuR?vgѮ@-7vޮ@3n걽]w?vg`vg`vg|,sЮsc{3opo=~ DZ]7g?g`vgoA3]?ۂ~z@zloWx=]?Ζx=]v3nP _Uv"!ǭupoP?ՠ~ȅAp;9s5nrjP?ՠ~VȹAL| Qaޠ e{af4u|QafH2ͽlL|.Qa^6}XY>ALs/2ͽlLs/>4Ѡ~iApw=M4{CO ^6}iAD4<4ѠDL4n'r@:7r@/4nՃ ꇾР~Vwj4r@vX/4iwj4i A4zhP?4Ѡ~iADuSADupoP?4Ѡ~iALsoP?܎]>,4up#>,4up#>,4epoP?,482m{aG|X7{aޠ~X7Ѡ~iADCO ꇞ&=M4zhP?4Ѡ~iADupoP?,42c=MdGfv;g(2#3/<2#3pWtpL?{2_ ')diORL')diORL?,K4')di242ͽL}Bfzt-Agiՠ~W ~Р~X^A@ ޠ~ <}ﯽ\™ɇ^|%Óz hmC/^ZZ6CIL}@kae{kae{ka~X>aL}@ka6ͽLsom?,[Lsom?,[ۃhm?4~iDkCWkCaUkay [yZ퇾x~퇾x~FCVk;>Dkvpom?4ھC Rk!C)Qk![yDL7L7L~2cޠcޠcޠ2cޠ2c1oP1oP1oP1oP1oP1oP1oP1oP1oP1n1n1nctqtƸAf): 2= 2= 2=*ǸAfǸAf): *= *}L1oP1oP=yL1oP1oP1oP1oP=9 *}w٠2c{t,?0ǘז1~m>tkLcRc{mlϾf{4Sc3=7ugRc3=Ư;Sǔ5iǼ})1o|_a{Tr,WU<771Ǽe Sc`[ Scۻc>x=0=))=)cJq1W\A싪f9EWEU9 jvp?v(]rfcA.`fjvLS싪f4b if_,\iOf4b if4b if4bjvLS-`ZAO젧jvp AOj=MV&b zf=MV&_ =MVjvp?gjEUXr`5;ީYr`5;ީYr`5;r`5;XީYi}젧jvd5;i4Yzf_lԬf=MV&Uzf=MVefÌ2MUef_T1XjvLSj=4Ui3^T5;XcLS`6Mej=4U;cLS`Yzf=MV&AO젧jvd5;i4Yzf=MV&Ui,T5;X~ b5 A c3A c2,m0vЗ=g;˞a~b0vL}Xbb0vL}Xb;XleL}Xb;Xlm2Me+eAO׿>{/<{Amy(ֽ_x~go;$E~O`o;SmyFֽ~,T 3i gxd2<1f;c6c2aS}l~cl7,1ec91nYcʲg8ƸAfǸAfُ)> *> *>*:,7\7\7,7,~hT}T}T}T}T}T}T}T}T}d}d}dc cI1nYcq̲q̲q̲1nY1nYcyʲyʲS>wT}T}lυS}T>wT>wT}T}lυyʲS>wT>w,?0~g0&ug}_w&ۏ);Ư;c3~G̹S}l/10>Ǹ 71Ǹ Ǹ ܏)>^7X1> G̹yjyscޠrǔsbUb=7l|?b},ߏs scoa}sc{pssǔsx=0>~}b=X>,TK4X>,TK2m2yU*{/{yϙ L|33Ar&>{yę 85ge4z=Mf⃞&3z=Mf⃞&3AOL|d&>i24i* x4L i,զfşݾ S`z*,_Om~_Oz2ד9 (:[ sA04z=M/*4z4ÿEeԿۨG;es2ju esA9GEu 5sA9~Ĝz=M&sAO9`r2Mu es2Mu esG;es2Me4ns2Me_T?X~LS9~d?[b=M&sAO9~Ĝz_T?[b=Me4i*-1e4i*,T?XEe4i*,l?1,Tr2Me=M&sA$pb19~drAO9~d?ț=Me4ۏA4ÿXz.9sA9 g~g"j gA~&׿N'")?{!0>?~bl~bl_>cJSOCw89sƸA&ǸA&39c c ǔSTuTuTTl?6 *? *? *? *? .TTcޠcޠc c c ǔO&qLSxdddl?8 2? 2L1oP 1oP cJyJyJcJyJyJyJyJ1oP cJyJyg&؏3Ǹ Ǹ );M0?M0?x)?$ ם 1~ݙc3L 1~ݙ׽|?bL 1o|?b,ߏ#&ǼAu)? ; {L 1oPY1oPQ1o|?b-L 1^L1^LSc&x=0י ~{rf2ANʙ G~pG'\3=9.g3=~L|wf?XEee74g/4˧ge74Gge74ge7_Tf?XxsLS栧~df?Eś&3AO7=Mf&3AO0&3df?ar`f2A_}90܏_l}90܏_l}90܏}90,l4?~df?i24z=Mf/wjf&3AO=Mf&32MeA`22Me/*+,Tf?XEeee4E/*,TV6XlLS`68(,Tf2Meeedf?i24z=Mf&3AO~df?i24z_Tf?X~LSyNcf?藏~2Auf?藏`y ΃ff2A_}e3܏_liC_liC4 iC4~Lwf<~L}bf?X8s/AK@'fK@_21,_2/9 Av?4՜ i94z yց@d?i2Q٠@d?i2Z&2Me32M/*,T?X@Ee 4i` 4g0e 4g0e _T?X@LS =M&AO@>,/Cl>,/f?+ l6ny AO E5&AN~`,A=ȓdy"<dĀ  OAtAA>9yA>9yn' 'Q AG5y̏j$pQj$I2?! ~TCw Q 2M|ezTCLSjz|TC&pQjz|TC&4G5=M>!iQ AOjiQ Ae^ԣ G54 i,ԣe4ӦӦoLS=`:,ݦw1ԉzDLSO.:4ԉɧN=M>u"iAOOz|DS'&:4ԉzDS'&:,S'ezD_QO|D}JS'eʭ˨ۧT>u"QAOjУS'5:?u"~DS'&:4ԉɧN4ԉ`r2)Oi2)O>2)Oi2)Oiԉ`:,S'.2M=u"XN=M>u"iAS'&:4ԉɧN=M>u"~DS'.AS'&:,S'ezDLSO:,S'ezDLSOi2M=u⢞:,S'ezDLSOiԉ`:qQOi2M=u"iAOOt:qħN=M>u"iԉɧN=M>u"iAS'&:,S'e >ubhKA?X"%A.r ~DK%<K?X"%KoR#)BHml?4g11>bl?4/yl?4xl1=bl~Xmlؾ1wl?6w#)c1nHcJyz$Ř7GRg&cޠcޠI1 cޠI1H1oPH1oPH1oPH1oPH1nH1n8GRq|$~Pw#))> c I1 c91nHcJyz$Ř7GRS|#)ƼA=bl?}LH1oPU#)Ƹ >b#))=Mۏc'cY1^|||O4c'cc؉czw>vb;;1 9|Ę7Xcޠc`NyMc'ƼAǼAcޠ2cޠ"c -1?n7GKDi%c<-1c<-11nޏr1~|D#.AƏr8~|Dp~8b9 ?>" bG47p?>"XG\#e47r?>bVG4ws?>"X(yL|K#e4u?>"XMSi*J,T>"sG#3~9ˁG}9A_||Dp?'SA_||Dp?'SA_||Dp?'G4;5,#AOz||D#&4흚z||D#.AOz||DLS'>ez|DLS\rLSiKi2M=>"G4Ki*,#ez|Dp?,#.*,T.9XG#&4G=M>>"iAOz||D#&4G\#ez|DLS8'DCAB|BDt AB|BD/Ϗ} A_|BE=!"苗O"#,ly#,ly`2OiN|QOiN,~BOiN|;ד`דGyՏG#z뎒#$ni"Ea ϗ?qKiuPm-j.9o5G@:X\r`QSsɁV@|D ^GZM#&j.9j2h5G @|D`QSf|D`QS,j*XT>@#G5,jhG54#G54#G|D`QSd>"j2h5GZM#y)|ā˧|D`CG/_<|D`)@#d3'@?G0aCGZM#&*h5?Ӗy*ŁR*t"YE ݳE{gǯz<w"7Yz=@gh5Sd #Gq.a%@<%@d%@Źȣ80T #Gq.a(%@ZXp`QS%JV%@Fd @0&KV%@Fd #jh5Y,aZM0*a.a51|JyKEMm 51XT #F ZF`QSÁEMm 5U,jȟֺqJEM0Z0&KV%@Fd #jh5Y,aZM0&Kh5Y,a5U,j]oXmD,j5x>2vho]@{Ee"0]Ҏ]ZM.&cV@E`QSV^(cEM.(c]5Q.]5Q.]E`QS|0E`QS&cV@Tڱ@Ed"j2vh5O]E *Ed"E`QSbm|aQV{"}pA{¢>m< z=q_O'.ą<5U{BhO\ z=qay=Dྵ|jОjO\S'.,j[5 &ZM'. ZM'.hO\h5ўܷ/hO\h5ўj=q!OR՞j=qaQsoO\XlО'ąEͽ=qaQsoO {{¢ޞ>=qaQ}@C{¢ޞ>=qaQsoO {{¢ޞąV &ZM'.hO\k'}B߫/|$jŅ >IZqaՊV\T+.{=Պ V\h5QlPjZqD"pV\h5Qj*Lq!~Va y0ŅRB^+Lq!wqaQsEjwqDB$\ &zZM..]\h5ѻ? WBE޻? WBŅEͽwqaQ}JE¢޻{5ŅEͽwqaQs]\X{5E޻..,j {¢fkz5E޻..,j &zZM..zA]\h5ѻjw..]\h5ѻjwq!XԻjlPe_Պ zZq!_OU+.jŅ|=UgIV\ȷU+.[FՊ2_Yw5;{?qk~5l~;{aS0l??Ga] aYް]aaEްasIa10:0:ô<w1w1l}zzzzzavPavPavPavPavPavPavPavPavPatatatw1w1lFٻxŇA.A.A.׻ { {Ӿ0;0;ô/>w1w1l?}zŇA] ] LJA.}avPz)g>~1~1l?&F?F?xv;;1l?׈a0Fd kNư7Q5b'ad {;Nv2=av|c'c,f=cd ~:젖At?LavP#&;[11lat-1111lF~ȗ-F _0߶8P-n8E =p #P720_ ,jXT @0ZX,jnaXX,\,jja=YúXz`Q|u #ja5"jh5"jq&[V-h5laZM0ߙla<-@laǁ~8P-@?laq` #p??Sǁ-NF0c@?la5;5[EM0Fd #jh5laZM0llaZM0&[h5la5:ja58PҁEM0jaIZEM0,jjR:IZEM0Z45/ja<-@Fd #jh5laZM0&[V-@Fd #jqZEM0F~O0K][~#F~$G-@?laFA0>Xl\8}pa #>Xr`Q}pa #,jjj9fF`QSS߾ t(#/6C~PF oTPF_l2b3X^l2Լs߇ѴCE͟BEM2yP exա@PFd(#j2q&CV@PF oY e5,jOo e58PBEM2T(#PF`QSfPF`Q}zc(#PF`Q}zc(#PƁ?M鍡Bd(#j2h5 eZM2ycPƁ e/|$PF`+XC >I1X e<@'02PF  á@PƁCV@PƁ eZM2~S]C^s(@2y\PF 䉐C<w(#eq2yPF Ox C^s(#'BeZM2&CVZ_h5 y@ / y@ /jnh> y' 䏏> 䏏> ,j45> ,j*|XԤ> ZMOT$j2|h5> ZMO&'V@Id$I /> ,jjR@OyI`QSҁEMMJ5> ,j*|ȟ;|XԤt`QSҁEMO 'ij*|ȟ;|h5> ZMO&'V@Id$j2|h5>9P@Id$I`QSO XnR> ,Rk7H>2|h> @KIf$V@Id$j2|h5> ,j*|XT$>2|XT$>2|r`I`Q}Je$I`Q}Je$Ɂ 5> ,j*|r'EMj5h5> @Id$j2|h5> e'V> e'V'E)'*|XT$I`QS'EMOT$I`QS'E O l b$Id$j2|{0Ol b$j2|h5>9߳}pa${. Ap$ | # H ϯ | # H tRFp$W5@]$ȰO{ $Ȱk ۯO!x G`~8lۯ&v&g 0;$0;$ȰaJ J J J J ۯ_A%AA%AA%AA%AA%AA%A⠒ 젒 젒 IatIatI) 22 2lXFyԇA&AA&AA&Aw Ӕ0;$0;$4>* 2* 2l?L~ 젦ԇAMJ J ϑA%A)avPS젒 Sd,'||~'W3#{atata)0:0:4˘ߦ?La/M kȰ7&5bfd_#fF,13#Sfdߗ%fF0/132Ǩ {#3#`0;1av|odfdÔf5>jaʌ ?f~˘wtatao ̌jh5Y E!jr6&k!VAh5Y ZMB_4<@g~X 9P@?q`-$p ?SǁNZHB @?5;5k!EMBZHd-$jh5Y ZMBlԬZMB&k!h5Y 5U o!5U 9PہEMBj!EMB[U ,jjt;j!EMB3j!45,j@B&k!V@ZHd-$jh5Y ZMB&k!V@Zȁ1XT#>1XTfƬG`QSY2h5zZMf=&i~3X G`·ORz/|$ŬGx ~O`# z=Y@*VY')f=&VYz§C7?G \䥫As#>n~V͏@^kq}G }x V͏@h5l~ZM6?&As#jqAs#jX7h,jT-%o"9P@3țwF=wF-5;#}d-$>kXLn`Q}d-$jh5Y 9P3V@ZH \ k!V@ZHd-$jXT-$ZH`Q}d-$ZH`Q}d-YfZH`QSfZH`QSU ,jX5&k!V@ZH &\ ZMB&k!V@Lh5Y 9P@Lh5Y ,jXl5Y ,jrj!EMB5U ,jXT-@B5U ,jXT-$ZȁY2h8P`laZ00?m/t #H`naf-@,HonaRM0,I0yF լ-_f響\5aX ۟ac00no ۟adon]aäZo.at5ixT cT c~>00A0A0A0A0Gjjjjjjjjj k k kS cd c~;0-~a a a /xA0A0avP5avP5i{T cT c~00A-~ZfUfUaco ۏc}c17~ K1lׁa' 0,l 5a_60ױ1u,l0 /ga oe=c aa ga ga ga20_\ KF _D02F~5>/kx5@wG >\,jqjEMm5˷20,\,jga5X,\,jjc=YXa5Xzd #jZM0zd #jq6&kV5@F~Hh5Yx k3YP8qj~XF~!}`{f #Џk흚5@?aҁ~X,jwj0a<5@Fd #jh5YaީYaZM0T #jh5Y,j5U,jqa5U8P[ՁEM0agXVu`QS[ՁEM0aҁEM0Vu`Q׭EM0Fd #jh5YaZM0&kV5@Fd #jLhoYܯ[T"pn ,S׭e:޲LhoY8Pe@{2Ee"p?>}g"پ۳Lq`n2E`Q}g"mfn2E`QSLXԶq`QSƁEݞe2Ł*Sf"/6~Yͧ~Y2E_l)ˋ2ŁSLXlX,jLXlX,jLqE͟f2Y,SZM)&Ve@>} ;[sKg+l,Ui,JlE0x ~`"oV ي@lEd1ي@lEd@e+~)u|"'_OD Kɗq|"g̎O|+ɗq|⁌OZM'yWDd|"j2>h5O@āO@D`QSEͿJHOSߚ 乎kp @0yF o"\O\SF +]8P5@?ea5@븆XT #?xv #jEM0&kV5@ƁaZM0&kV5@Fd #jh5Ya5UMkEM-F u\,jj8jEM0yF`QS ŁEM-5U,jȳ0T #كkV5@Fd #jh5YaZM0&kV5UaZM0a5PXlD,޶Leo,޶Le[2-S*hoLe̩@^p8h5LeZM2&SEM2Je5Q2Je5Q2lD,jLe5,jLe58PREMmrTF`QS܁EMmrZM2&Sp*#j2h5LeZM2yTFd*@2yTFd*#Mf TF`QS,j*XT*#TF`QSR*XT*#TF`QSRE &$gB"_x&$yD 4 @~ᝐ{rB"CNH= @'8!H58;!!'$oWNHN3 z vivf &givR o&iv> ц6=LS #avPSSaTaTaTaTa~9*0*0*0*0*0*0*0*0*0202020A헮 T0:40:40:4ðudadaffffxAASaT0;avPiavPia0;44=j*zTaJ3 J3 J3 Watwk_˯LV dÔםɊa|ݙ׽=Ɋa~eR o,ŰB|R Çk,0~0g f)}Ya|ޙx3K1;`,0;X>k0K1j{,5f50e)A-x~ƻAf)aYa/ gR$@ǀD~vh} HDc@"1 ]ؾg3 Xl߳8}f@"پg3 Xq`Q}f@" ,jj8 f̀D`QSd@@$b3 @ H2@ Hf@"/6V@@-}@J; XT@"e揯Ux V@: h5 HZM$Բqd@"j2 h5VE* XT@"ā H5,j* Xl,jOo H5,jOo H58POS}zc@ HZM$&V@ɀājD FD`·ORlD/ >IX~O`#lD=@'6"&V')6"&VՈO7H@n9 q<u@"[H:@; ȓc$T@"[HL: h5uV@ɀDd@"j2 -$&* -$&EM$ H5MEMU&a*=@V:=s'JONO5XTz"LOZM'Tz"j2=h5LOZM'&V@Ddz"D u,jj@'yD`QSEM-5,j*=ȃ 'Z ,jj7D`QS@L8=qEM'y0Ddz"j2=h5LOZM'&V@Ddz@'&VEM-58}d@"اD`}d@"1 h8P@ǀDc@"dH@ǀDd@"j2 h5,j* XT@">N2 XT@">N2 q`8ɀD`Q}d@"D`Q}d@"ā H5,jj@$Z,j2 h5V@ɀDd@"j2  $&*  $&EM-r5I$ HD`QSEM$ H58PEM$ H<Ӿ0/31,g y D lg d;'@9!g d;H5@X6䉐3T7g svIgdvE o&?dv;r &5cv1^ /džV0-1j{,{f50%A-t~ƹA#%#2.10#%#%#~t"@%"b8ȗÉ@%";@&ND|ĝ!O|Ν!EM%"T""fDāEljEM 5w!'"?P?.LDZM&"@DāLDZM&"xd""j2h5ܯ&V@LD?s;P@?LDq`""p)>S3ǁNDD&"@?LD5;5EM%"DDd""j2h5LDZM&"lLDZM&"&*h5LD57JD58PsӁEM%"JDEM%",jjn:EM%"jn:OQ@DDd""j2h5LDZM&"& ~Yd8PE4/6 i_l @,2f@ˋM@_@ؾEXl"Yd8}d!پEXԦo`Q}d!MUd,jj7Mf"C`QSd@bE@,2E@,2f!/6 V@}E@^;XT!N]dx VE@^h5Yd,2ZMԎpd!jh5Yd VE EE XT!"Á*25Ud,jXlXd,jOo,25Ud,jOo,25Ud8PE E͟& VE@"Cd!jp ³X "C`·OR,2/|$"CXdx ~O`! ,2=E@"CdIE@"C ?*{GOB &]ᤣ k2XT@ =5z,j*XT!Á =5z,j*XT =C| =C OzC|zC lyyC /zC t!j:p`Yw!G=RM,tzs.Be$-i-\rrqrvbe-Wia-7i-b\xqrvbE:zXC-g-U-Gak2;.k2;.k42;.k2;.k2;.k2;.k2;S\Cetp=\FCz˖[z[/{2:.{2:.{!l<z[/k2;.[NZevp<Ǘ5p\C-5_fz[C5p\C=pC=pC=pC=pC=.{2:.${2;XN`ee˖5ۃ k.[/s`e|n`e|n`Clll/{2;X>ປ},_`evp[ u/2;e:=l/\F'etb/^N >Gq %G\˹ ׸뎸sqD\o"|O$ 3D K>X\ /\ ,j,jo+|@\E}q557 >X\'?X,1W`Qs~W`Qs`Qs|Y'?h5ָ Z=A^NvjqZ=A>{jqZ=A{\\.^>a+|ЏWrN>a+|p}a{ >~ЏW`QSq5׸BW {\Vs+|jqZ=N>h5WxW {\E5XE5WxW`Qs+p]`Qs+|>Kp+|.G.G>X\ \x?X\ /\?X\?X)jqZ=A>h5W {"~D D% \4?lO$|p_=Ad{"k"~7 ɞHr;nO$|پ퉄ot{"EnO$|~پ퉄5ב5ב5ב57=:9M /\  {?6a'|&fڄA>h~ k?&>#b>X\ ,j,j{?Vs'|ǜ'|jZͽA^~jZͽA>Ȼ:>h5~k?En'|^>X\ ,j5~k?En'|>5~O`Qs'p'|>X)jZͽA>h5~H{"k>,퉄H`ڶK{"k>,퉄 {"~D {"VsO$}X {"a */\+a*,@kჼaDჼbDჼYB< AჼoEჼpEkZA OQA+{< AVs |jZͽAW>ț%T>h5 y ^A`Qs]`Qs |V>s:T>X\?X\+,j'>!AD u    yHDyLjD D'>;F$>$>X\ yH`Qs`QsM$|&>h5DH /\ {"VsO$|jZ=A'>h5DH ,j:5:555Dk"<@"EuEuE5H O-HxH`QsM$|H$|jZ=A'>h5DH {"VsO$|jZ=A'>X\ ,j,j`:C`{kl|Ђ Z=tAZ=tA>X\C,j5'=t:`Q}bC/l:`Qs |>15  :CB{Vs|jZ=tA^p tA^> >h5:`Qs p |>X\C,j5kkE5t:02B{ -+ >+ >! >ȇ-E|x2 O2 O2 ^ >H52 De-D-RM ^Xe_n _a+=l{^a%ذU3% _a;l?N0j 9ćI0lf$$j j oA5 A5 A5 A5 A5 A5 A5 A5 A5 A6 A6 A6 &0:&tdaF$F$F$ߛl ll `&0;av|~a`$f53=jdaj Ze  b_wv0@>0K~@~ }U_~@>:g|z~@~GX*u`Qp`yCw? Y,jj:Y,jj:Y,jj@Z,jj:j|[@GJh5U@~@d? jBZM&3~u~~@~{`{f? Џy흚@?籁~,jwj<@~@d? jh5ީZMT? jh5,j 5,jpf58PʁEMXԬr`QSʁEM籁Vvj5>p? ܯ4WW+@;@;hh_i޸Xloo\?q5>޸XԴh`Qq5>iXԴh`QSӢEM5Rɫ666񁼪j|Mj|Mj|Mj|`1Aj5oR\䙧WZ,jj5!@@@y5>jr5>jr5>jr5@ ZMZMZMɫVEM5G;5VZ,jj5@5Xj|`Q}j|`Q}j|`QSEюEMVZ W&W&W&W&Wj|`ڶK\,_[mmǞj|{j|{ZMZM>,q5>jz@ 8x>Gf䙙ya@<20| = X VV&&&&ya@a5 cZM5OX0|`QS0|`QSEM 55 h5ȟy5>Я Wp5>7^<{5>*ZWj|05&«EM^,jj5>V&W&W&Wj|j|j|j|j|j|j|j|j|`QS:^,jj4 VZ䩅W ,jj4VyjXj| O-h5h5h5h5h5h5h5h5h5V&W&WZ,jj4I@k@k@8{臗灼y_ΞWEUy`yUGΞy^Ξϳ灼y`QSEM͞5GΞ55{~f ,jj@͞55OXlj|Բ0;eavP젖͇A-\6Fl>raZ6Fl>l?Fl0-\6Fl>r|~09r|ô :j|Բô :j|ԲBaZ6f :jtԲ0;ea]0;eitԲ0;ei|Բ0;eat ͇A.\6Fl>r|0:eatӲ0:eat`; ͇A.K_k.;ʇߝ ΅a0>\(ʇB0>\(ʇB0>\(?|B0;X>p|Ԡ0;X>p|ԖôP>jvԌôP>ri|_?.2/H}H/;_t'ځًTًv mbx }bx~ȷU/5~ڡ ,jW/X`X,^ ,jj6YhXm`QSEM-5Xm`QS;V-wVmbxbڡ \ \ \ `&&gr1<ԁZ @?\ /Kl\ N@?\ /K8p1<ީXbh5h5h5h5h5~`{bxbxbZ \ \ ,jj1P݁~zw~)wjXlo5\>p;jXc`Qp;Xc`QSEMw5@t ݁~9݁~90t@w_lt@w e=}6'>ā@^+z;6'ڜ ՘&&&d@w@w@w x<h59X@w`Q}z@w`QSj;@w`QS݁EM t5ۧ7t5ۧ7t55Xl8X@,jj;r;jr;jr;jr@mp kX|,_y܁~ȻQopr܁~'܁~h5h5}`< _5{f;{f;V䍜gy@3ہv o<}fXlh3ہV3ہV3ہ1g&g&g&g&gy@ə5 lZMl55Xv`QS3ہ<.v`QS΁EMl55XoQ܁y7q;/7ye @(܁Q7q;`>P܁~7w +op5z; 6>P܁V܁V܁V܁V܁V܁V܁V܁V܁EMmp܁EM R @x;A),jj;,jj2A67w`QS܁<wwwwwwwww,jj;AfkGy@^pxD:qD:qD:qD@H>H>H#ҁtt9"Xl߳9"X,jj 4%5"Xh`QSKV#ҁV#ҁ1G&G&G&G&GcrHZMH@#ҁV#ҁEM-5lH55"}F,jjD:F>P#􁚂 y)5觌SЁc{gs0>c{gs쇕{fg =Z#fg ="}枇A }枇AO3H<H<O3雧4s~_R:O3R;O34́t́n =X4f,jj3Y =|`y34s`Qz9fyS4s`QSEMM355X4s`QSEM ~Z?f4GZ?f>PV́Vj3jr9jr9jr9p? fjr9@m?wns6Yns~y`{ns6ǜq6Yns~ǵE6v@@@@@8w&w&wnsnsns`QŚ\`QŚEM61vm>PcEM65sTș@$P^|I3Ɂ^`ih/p ͙@?ٜIoads&ds&9p? ,jj&ds&9ٞl$55XԬO;P3ɁEM 55XLr`QS3Lr U<3Ɂ~93ɁPKȁ)y<%@y 9^BO#2/!Kj 95r h5h5O3&rrrr &h5|y0%@%fuZB,jj 9^B,jjV7%ZBI䏧<ד3Ɂ~=9{&9?9Lr r@ImgLrj8,$I,jj&9?Lr`QS3ɁEM$55h59h59h59|f&g&g&g&g&g&g&g&gI佉g\I#@>I5gyM@@@ˆgy@w$g/y8Y<[Xe`Qy8f-,jj8.Z?-@@wh59[h59[h59[+ZMZMr8p?o ?glqYlq~yy`{lqqYlq~ygŁ~V8[Xlo-~ g&g&g&g&g&glo--->PŁVŁVŁE6f->P㦁EM55[|M-,jj8pe ,jj|8r_@ p_O p //ד~=/ד [ y} ,jjO/jr_8jr_8H>P{zVVV pp`QSEEM }>PEM 5/Xlи/Xlи/XԾp`Q}@p`QSj_8}}@}@}@}//˗}p`i_8|! p /+ pl pp/Jp @@cy%8Y^ }WC`.+JZ +<Jp h5{%8jr%8jr%8jr%8jr%8 ^ \ >P+OJpJp`QSEM55*XJp`QS+EMZM^ ɕ@\ нTz%8?Jp_O+j%8o5\ )W3t5ȟ{%8VZ \ \ \ >P+V+V+V+V+V+V+V+V+EM+EM@wx%89Z ,jj%8 ^ ,jj09VZ eWJp`QS+lJpJpJpJpJpJpJpJpJpJZ \ \ ,jj%89fkWl pppCCᖇyM!@;!@;!@ny8>5z ,jj@ 55Xcpp d!@!@!@!@!@CVCj8?pp`QSSECEM !,jj8!!@^x@ ߓC#!@^x8g#!@>#!@=x8g>z89>|w_a+=loy^au˰UӖ%\ _a;l?0Z sV gj0;!a}0;!avPC젆A f5<jx0;!atC A?LC } atC AۏA~A ~A _=LCA f5p7/GyyQ@By7/Gyr7/Gyy"Q@@ջϻ?Wn sg wwyG{w7y ww&ww3WZMZMZMZMVjw7 ,jj5vwyvZ ,jjw7kf{l/^=PSUy{CC/^ \ ͒{boߤț%/5Xbo {Z ,jj7jr7jr7jr@-ZM.ZM.ZM.ZM.ZM.ZM.ZM.ZM.5S /5x{yjv ,jj7@^6x7vZ ,jj7 ^=PEM-VVVVVVVVVj7jr7jr7vA\=5 | Gy Gy Gy(o5(o5(o o<ȃ ZZM7Wj7f(o`QSEM.ZMZM     O=h59{FySiZM55Xl\9X( ,jj7QqI\ɒmp|OԭeK[)RK^] '2MEyeQ 8PQޠNFy<(CEy `A{p7KQ /AGy ؃A{p7i:4 r>,VGy<(otaywPݫFz?ηgϮgI:u5v?ş]g>Ϯg׳;v>s{Gy?Z}v|v]/8ywg]>(gyw3o~ Qϼ;7xGy?(g q~ ";vGy?:7(D3nQϸADy?N} "7(yww} Qϼ;?(g{yw3o~vcw3o~ Qޱ;7xGy?(g q~ "7(g q~ "7(g cw3nQϸADy?w=ϸ]t?vq~";vt?vE@DXyG@3nϼg`yG@3oNIϼ#7xW$U]x^4 uuPw Qw:ȑXfrwp#|RwLbH\,,Z:eK2ͻXY^],ӼewwL.i2ͻXy7=?wwswps~gn$.zF⢧F⢧uoDwD7x74Sk Eע lƣ $dƣ $/$tp}!X^DwDwDwDwDwD7^]4]4 EOEO2b],Ӽ;Xywp4nn{IxYh-zh.^$wl:ݢIx\nѤ]nѤ].zhnѤ].zhnbݤ],Ӽ;yXy7i4&u|0x7i4b<[,Ӽ&ⱋI!X>I!Xyc4')c4xl.i2;;x좧x좧x좧>"VljЇ],݇],BT>좯OayFM}EQv'>ч]>"Oi{.r}.t<ߣ>"o{.<>l.Շ]3ayH}EO}EދTvDvDvDvDvՇ]4ч }E&WvDvL.i}2ͻ.iU2ͻXya4>b݇D6ؖ>"ți0"ți.f^<ˣ>l.E }EQvL.i}EUvL.i}2ͻiiiS>좧>좧>l.z.z.z.z.z.i}EAPvLN>"OǨXy'4b݇],Ӽb杰[,ӼvewvL.Xyay@}EO}EO}EO}EO}EO}EO}EO}EO}EO}݇]4ч]4ч],ӼewnL>l>좧x7x7xl.zC.zC.ⱋ<0x7x7x7x"(.iy2;㱋ewpL.z.z.ⱋ&ⱋ&ⱋ&ⱋ&ⱋ+x좧xl.ⱋ&ⱋewpL"Xycw rA}dkH4}lLS`fyervaymrv6XY^ i*I,,Q4$ i*;,Tv6XlLSI`AO٠l>>4}$a$aC% &AO٠l>>4 z>T0X ه_ll l eA_H4kT6x i*,Te2M%`eJ| i*\,T,Xld6"N3P 8l<+:'`*kAar6$N4K i*4 zLy ؠld6i2P ؠld6i2m}'`&2M%`e2M%`*,T6XC%`eJ4 iORL4')&`eJ4')&`eJ>T6XlLS 24 zL=M&`< `y }a6 τ9'`> d6 O9/:\C\BAxv5Ȼ{y6! O8P!! i9q\&CAs5i24r z =M\kdBAxv5i2,T3XkLS! o<;,T3XkLS!`B2M\Ȑö\skr}QwNr CAޞr5%0\*CAq5XkLS! :,T5XkLS!נɐkd5i2O&CAO!נɐC\&CAO!נɐkd5i2,T5ț4{kMr i*',TN.XkLS! 8,TN.X\LS!`BApB2M\<+kd5i24r z =M\&CAO!נɐkdBAO!נɐkLS!`rr2kCۯA =MX^sc z ̱X<kc z ̱X^sAq5 α"kLSټʱ4 i*4c ẕy9֠kd5i24c sAO9ևʱy9֠kLSټ`f{d5XCXeʱ4c i*,T5Xc b5α>T5G9 w8P9֠/^X<k'c u5ȋ9 /^XsAlp5i:4c rα>,=NX2v`^}g}e^=G'~Vc33X+:v_V?SuTuTuTuTu>7 *: *: *: *: *: *: *: *: *: 2: 2: 23XǸAXCc s?Smdududu>*8 2: 23ƼAXǼAXۘ77}6gʱyJyJyʱyʱǮƼAX77ϔccccccccccccrc sc sc cOc!1nY՟): 2: 23eUǸAfUǸAfU#?+̪q̪ݧǼA Ǽ^̬7jϔU,U wV5Y}:ȧYՇʪc),CΪgU9 j> PY}4W$gUeʪ>TV5XjL2969,,/OΪ4 i(gUeJ4U i*,TV5X^LSɽɬjdV5x z̪>Tr/iAOYՠɬjdV5i2=MfU& i*4\ GasA(lˏj>2Uj>(Uj>(UjW2\ zl=M6W&AOՠjds5iPՠjds5X\ i76Wee=MS^˩\CѦrj>XN z ,SwA/ԇm ,SwAHP})XrjLSԇ}4UN iO=T95XWLS`ʩAOӠ>O O~? c4X>O,T2MOe>T4XiLS`fbUƊiG\1 +A_e}bUƊi" r ny- EZH%iJ" H[" " l[Aqq4ȻnyK-Ҡid4iE4" n[AO-҇jy-ҠiLS`Z2MH[iLS`Z2MHej4"![" [A޶q"T4Xiw" zl=MH&[AO-Ҡid4iE4" zl>T4iE4" iE,TK-X~ ba5-`f5ҠgPCJ CAτ 9T)J CAτҠgPi`* CA1T,TPB2Me =MJ&CAu4i2T4* z =MJPidBAu4i2T,TP.X\* i*TP`B2MJe 4* i*TCJ~* C* b4s>T4苗 a8TJx* u4ȋס Pi* r9MJCKҡ 8T4*}XW*Cc!{c ,?+c~>U5v?cs?v)T:]ұ4z4vBcޠBcޠBcޠBcޠBcwhTtTtTtTtTtTtTtTtTtdtdtdg q ' ǸAJ87P7P7P}pdtdgy y Lq1oP1oPұ*GG**dy L1oP1oPҟ)T: *T: *T: 2T: 2T: 2T: 2T: 2T: 2T: 2T: 2T: 2T3JǸAJǸAJǸAJ3c Cc C?Stdtdg q q Vދ**Jyy彘1oP)T: *9Fiϯc tOc# RY^k# v4x4 '[1}bi:4#}iLS1`fyqayq4XY^# iE,ӬC4բ i*F,T4XiLS-`ZtAO1Ҡi>24#}]|AObբ z=MH&c4_d4i2FP-`b2iECfL~1}i3A?̘˃:1 d4x|:1 d4x d4`f{`̘=MfL&3AOӠɌida{`4i2c41}id4i2c,lo̘4[3AO1ҠOHÇoKA?ﳇA?|2FO|2FP1}|)x 1҇K2MHe>T|+XiLS1}b2Mŷeo4# zL>TR4N 0)h<JyIчv}}2)'E<\h'A_L=M&E&ApR4i2)4 zL>TR4i2)4 zLyIѠɤhLSI`fTähLSIчJ4 i*)PI`2M%EeS 2I`2I` i*),TRL=M&E&AOIч (),?Jl¤hWAzrR4#'N}1)UƤhWA>ٮy uT54۪=MVCejE4U imUWCejE4U i,T54XjY }ؖjh7\ . A%q54Ȼ$VCT3'Ϡl?T3XgLS`ڟ2M?ejcO{Gb54ǖ ZPР[VC2v`^}Hg]0v`>:3=cL1?惘ףT U U U U  =cޠcޠcޠcޠcޠcޠcޠcޠcޠc c c ?S5td5t>Q8 3ƸAVCǸAVCǸAVCsc c ?SlT5tT5g*yyj7R٘7R٘7j7j}&kT5gyyL1oP1oP1n1n1n1n1n1n1n1n1nП: : : 9wXcc?Ss[bs[bs>}y[R}o>< *7 *3?ǸA>|] 9t3xWit I>sgϦ Ph/*.|U އB|eq3XC>e*|4+ k 22gLS` 2M>*,T3XgLS`2m2Meڂ& AO})iPGGχʴ=M>& AO}.i4Y|L[LS` PiL}&A?B1#g>g>I0g>Ig/$eLS24 z=M?&AOχ}ϠgdAOϠgL0,l+=M>y}ZXQO aO A?e,|SpA?e,|>T3x5 ާ~5a Q>,T3XVLS` C>eja4 i4Y|g'\ A`3`yT $ U d3ȣ8.|y…ϠO>>Y z,|=M><gd3i4Y|gd3i4Y AO` 2` U i,T 2M>e*|4ۧ>eS 2M>eS 2M>,T3XY z,|=M>*,Ror`3ˁ) O98 r`3 Bi73Qffw LAq3󡚙?ZAq33nfy 4 zlf=M63&A r33iP oT4 i,T33Xff7* i,T33XffLS̠,d3-g1{,C`,fw A?by`Ẏb:,f& i*,T34 i*,T3i24 zb>T3i24 zb=Mf1&AOY̠,fd3X,f iPY 8,T+XWLSY`A[v3XWLSͯ`2Me1,Ce1ebyoY̠,fd3i24 zb=Mf1&AOY̠,Ce1&AOY`2M5e f1_ i_|,fO6A?byY ;,fO6A?byY ;A1PY`"d2MEȂ&AOY oT:4 zb=Mf1&Aިt3i2PY oT:4 i*B,lob4|,fLSY`2Me1eb4|e[j>le j05* f'|f/ jyA O8 A`84 rjyA ftP3i: ANA͇A.(cH(cDP~}a{c8Y?4Qc0]UL1oBͱHZ5vbcޠbcޠbcޠbcޠbcwgTlsTlsTlsTlsTlsTlsTlsTlsTlsdlsdlsdlgmqmDǸA6777}qdlsdlg ymymLa1oP1oPͱsymL1oP1oP͟)9 *9 *9 29 29 29 29 29 29 29 29 236ǸA6ǸA6ǸAF3Ǹ F3Ǹ F3Ǹ F3h7h7h}g彑1ṉ7}yKfyjLa %1| '||0f>E|930f> Ø/'*0fLSȧ c4 iW11ea`f2M1e c>T,X0fLSa`˜2M̂e =M1&ØSdfAOfAO˜0 z c=M1&Ø]d3i2P`˜2M1Ї c4ۦ2MV3jfA?|flA?|fۛA_٬fA_٬f}eZ}eCV3&AO̠jfd53i鰚4Y zf>T53i4Y i7V3eMy}RBPOЏ-`Џ-A?[3iA_[>T2x g҂왷|e>Ϥ}3o,T*XeLSy}&2Me A4 z[>T2ȳ[ 0oe<[yyˇ[}}2o-T2i2o4 z[y?yˠɼeLSy`fTüeLSyˇ[4 i*oPy`2M-eS 2y`2y` i*o,T[=M-& KʠGHe'p #AʠGHe ܡ ;At2Mu(<e]ܡ|elw(<e)ܡ ;A|q2iC4١ zP=Mv(ed:At2iC,T0XeLS C,T0XeLS`:AOʇf2[EP};` V;A~eܡ f2țP>T2!v(<eLS`:A*r2XeLS`:AOʠed:AOʠed2iC4١ zP=Mv(&;2Mu(eLSPy`"[2MEeP4ա &;2MEel4ա iCM`w(C,T2țP=Mv(&;AOʠed2iC4١ zP=Mv(C4١ zP4ա i*,lCeLCu(~١ f2(Pyʠlv(~١ f2sPyʠ_ء uʇP4 i4١ zPyGʠed2iC4١ ;AOʇPyGʠeLS`f{d2XCu(eP4ա iC,T2XCu()c2觌 o:RSHeO#A?eTy>ʇTjHe #A8R4 rT9MG*ٵϮsWcv>b1]gב{6]Ϯ|vmgbg:.:uXjX~ ϼ;`7x,?gW3oX~ ϼ;`7x,?gyw3oX~ "`7g cw3nϮãqXϸA,?Dg qXݑϼ;`7x,Hg]Gg#]yw3oX~vcw3oX~ ˱;`7x,?g qX~ "`7g qX~ "`7g cw3nϸA,?@3nϸ ,gqX~v+X~M `uX3o}vKX~ ͹gT.Prυ"qE>THud:3Dܑ"_\"xG*3|PrLTHb\,,Tk"eB2ͻXyG*4He-iޑ2;RXyG*4^b [4\4\N?-zT^آ-zHe-z^آT.zT.^=MD*=MD*w/lLT.iޑhT.iޑ2iݦw&˜; aE?ec.)Cs:^ aE?ec.^Sv1~5Asѯ&c.^'?j0 ˜&˜&˜&˜&˜&˜F0梧0梧0fc.zc.zc.zT.^wi:b9Hup:xI[%HeT.^3i ޑu&mї "ewiLT.iޑE(RrL9-i92;Ri"5E(HMbQ"OI)5Sʐ\1&yH@E_eHM.*CjrWR&R<.䢧䢧䢧dNM.zHM.zHM.zHM.έR&RewjrL}6AjrLNMb杚\,ӼS;5Xy&4bflbflb杚\,lM\,ӼS;5Xy&4 R&R&R`䢧`"O( F.zF.r>"o+;m>.v< "()s">.|Ӧ"og(i"i"i"i">.z>"og(i"Xy4b}\6e4b}\,Ӽewqe`{>Qv\ʎ;i{*;.^ʎס㢟OyTe]v\K ʎb]v\,Ӽˎסb]v\,ӼˎewqDqDqD1x=M=M=M=M=M=M=M=M4"XygwqwUv\,ӼUewjL.;.ieE9UqLV-i٪2ͻXyyTe]v\,Ӽˎs㢧㢧㢧㢧㢧㢧㢧㢧㢧c.;.z(;.z(;.ie2;[X~ B1~ BqLDqo\xc "޸EP1x7|"޸ȓ 7.⍋~Aq/57xbݴZ,ӼV&⍋&⍋Gx㢧x㢧x㢧x㢧x")޸i"㍋Gx㢧xbݴZ,l7.io\,Ӽ㍋e]o\,Ӽ㍋ew1xio"o*>[$~}\YewqO6<+c>.uEPqg}\4}\4}\4}\4}\Ye9Me9Me9Meߧ"(i*,o>ǟ)2vh>k2vg^}g؝X3vg>3fNݣ73v|3eǼeCOc}g>y>y>y>y>I1oP1oP1oP1oP1oP1oP1oP1oP1oP1n1n1nǟ)8 28vgƸAf777}rqdqdgJ[y>y>Li1oP1oPDZϔ}}}(y>L1oP1oPǟ)8 *8 *8 28 28 28 28 28 28 28 28 23eǸAfǸAfǼ;O18M08M03eǸ fǸ f?+>q>݇Ǹ fǼ_v v1i}9ȇiLJJ;3&hӎ|C,/N;?xc>4䫀ӎ2M*,T1XY^ v|X^v ie*^4v i*Pū`Ҏ2MeJ;4U ix4v zL;OAOiLJ*^=M&ӎU z񿩴cCɼɴcdҎ2MeJ;Cv i*,TҎ2MeaL4 ~ f2x f2'Q}l/'QȠlF!* DiЯCBQȠ(dd2i2 4 zB>loB=MF!& zB=Mzc>Gzc>GeA_+7>T1x G˂VXo|zc>},T!)XzcLS ,T!)XBRLSƠzc? A(7>T1X~T1XzcLS4Yo z7>lo7=M<@bC {Ap1ȓ!yv=ć!y{= C=bC {AO=Ġbd1M&{C {AO=`y2Me!y{=`y2Me!=MV FUàV ~VX5 zW U w0XU iCU ~`0țe2MU U`2MU e=MV &AOUÇ=MV &AOUàɪad0ij4Y5 z4U5 2MU j-NW e94s ij,T0;4s i,T0Xaw]5|aLSU (j4Y5 z=MV &AOUàɪad0ij4Y5|ad0ij,T0XSLĪk2kAO 2 7: ~. |ÅUe )c0țy _M. Մ‡ jpaLSUpad0S=M &ÅAO pa. z >T0S=M e24Å2M *\,T0X.|paLS`…. z7G9 z},)ys% ۻgXR KUR z},)y%Ň*){6K2MEKAN% btI1#%.)9MKAN%Ň%bGJ\R r.)>,.)}hd=v2vo쮝ݳW7vc67vNō݃0Lsk'cݧL1L1oPƱLT`U`U`U`U`*cޠ cޠ cޠ cޠ cޠ cޠ cޠ cޠ cޠ c c c ?Sqdq.q,0L1n1n1nƱ77TU`U`*Scޠ cޠ cȟ8 25 25 8 8vU` cޠ cޠ ?SqTqTqdqdqdqdqdqdqdqdqdg*0q,0q,0ywǼ; ˍcˍcˍ?Sq[bq[bq>rr}{[bq[bq>#:c\vUg ge`ylUg ާf]g Pu}5+u`Ug i,,W5:cL^34 i,TS2Me34Ug i*=,Tz*i4Yg Ǹ& z3=M*=4a:cd1x0 z3=M,T1X:c>aPu`2M,T1X:cLSu`i_i=vƠg®c3a1xj z&((Cf~`F1%0Q z (f2f^3A`0PŠ_0; ާl}&x? zM>;|a>e=jwO`A}2B2Mey ǽ`B2M5e=MV b𡪅ja\-|ja Ap0}Zja Ap0;=MV &AO‡=MV &AO oZZ4Y- iZ,l0X- iZP`2MU Z,T0XjaL}`0X>aZ,T0X>aZ,T𡪅2MU e jad_t/k~AMw/=T/G׊k~ATw/țy;5 s/țy{5ɚ_d/] z=T/] z4Ս i,T/4Ս i,T/i2}UkaZ v/Xx`C~M`/H}2Mc`_LS`}2M&}AO`C&}AO`_d/i24 z =Me y3`} f}2Mue4 i*-1e41 i*,T/[b=T/X`_ z =M&}AO`_d/i24 z =T/i24 i*,T(X~ ba5`f5`:F?dY/ǖe P[^w\ gY/˞e*}ٳeϲ^4\ z,y eɲ^dY/i4Y zAOe*y eɲ^LS`f{GbY/XCe*4;z;z2Me*=ѻe/ݲPMw&_лe/ݲ]i7-|AMw&_w{&_L۳,T_t/i4 rn9M7<0&_t/i4{X"iny`M &n&ݫcfL݌{cw7cؽֱtϴձ1v/u}gG:vot>3v/gfyjyj햟7 7 7 7 7v6ƼAƼAƼAƼAƼAƼAƼAƼAƼAƸAƸAƸA~7Ȗ|Z~c [~c [~c [~cI1n-1n-;4 7 3uƼAƼA?SoTwhToToTo>9 3ƼAƼA~ߘ7ߘ77Ȗ7Ȗ7Ȗ7Ȗ7Ȗ7Ȗ7Ȗ7Ȗ7Ȗ$lyw,7-3Ƹ%Ƹ%?+lqKl݇Ǹ%Ƹ%7|d q/xq|8L>ϼCC5 /V7cywPCEɻ}L$x` z=TT.x '&r D ɨ\>Pɗ`r2ME<\LSɗ`/2MEei8-sU Ź 8PŹ/$~8,Tqs2Me*=Tq.X\LSŹ`f\L}`q.X\L}`q.XCe*4U!sAOŹܿ\_ֻ87.y~Źz\{\炼\8] zŹ o8M`炞&sAOŹ uq.i8PŹ uq.i8,Tq.X\LSŹ uq.XYLSŹ`s?dT.o fG䗻宣rA~\З=rAqTrA_y#Q uT.X\Q4{\LSQ`rAOQɨ\dTrAOQɨ\dT.i2*4 z=MF傞&r2ME傼%\LSQyKQ`r2MEe4 򖘣r2M%|eJ4 i**-1G**,TT.[b=MF傞&rAOQɨ\dT.i2*4 z=MF**4 z4 i*,l1*Ĩ\LĨ\LS 2PɗNނ[7x d-K } 2%[&x >oAޱp-i24x z =Mނc[doAޱp-i2,T%Xx i*P`o2[LS`o2+?翾n\}9Pݸ/vゾ؍ r`7.{}9n\ЗqA[v7q2!ݸ`qqANݸ n\t7.iw゜qANݸ nr7.c9Mw?_}77v]ؽ{?Savg>v|>3m|n; >v=vqOyj;y͍y͍m77 77 77 77 77v7!ƼAƼAƼAƼAƼAƼAƼAƼAƼAƸAƸAƸA~7%ٛzsc {sc {sc {sc91n1n3 77 73qƼAƼA?SonTonTonTon;SSn;SSgjʍygjʍygjLM1LM1LM1M1M1M1M1ֱ)7 )7 )7 )7 )35ƸA6ƸA6Ƽ{rc`M1o8b; )35Ƹ%6Ƹ%6?+lʍqKlʍǶǸ%6Ƹ%7|dܔ qS.xqSr4L>O3TC5 aݔ [bݔ i)PM`r2r)\n4Ք i)PM`r2M5),TS.X\LSM`r2M5傞&rAOM}P+i)P1ɦ\dSbDAObDAOrAOM}0i)4ٔ{\LSM`r C5ej4Ք{\LSM`r2M5ej4Ք i),TS.X\L64 G}ndpX>rOaC5Ȃ}&x GA0b>r->T$x i*,Td2ME@e4 iACfƂbf, fƂY hW*U v+xez=T+XWLSUɪWd+iPUɪWd+i4Y zz=MV&^AOU`^;,T+XWc4U iPU`^2MUWLSݖ`-2MUezyUz4U &^AOUɪWd+i4Y zz=MV&^AOUz=MV&^2MUe4ۯAz=l,l,T%i2pW? ] E͍2*UpWW]A2p+;wyᮠ2&]AOᮠpW z w=T+{w=Mejn4]2M*,T+Xz i*,T+XpW?Gy=6 XW{`A_e}M` *c,諌=`flXL}6a,XYH9M{`AN= X r9M{`K0= '4{X>ϮZaC+?Ϯ`Ϯo?.Ϯ+ؽϮZg?Ϯv3no?Vg 7x>Vg ]7x>Vg yw+3on} ޭϼ7x> D+3n7VgWo3n7Vg Zaqh}vD+3ny7x>V<yw+\ yw+lnp}np}np hp=C3>hp} g| A4g| Aknp} 7_ \]gX?,,\yo|5v>D(XyC3nϮSԟqjp-'Tk?\9Ejpuve:x_N-^Ńwk:xuoZ.SkLnpbZ,,W\Xy74bZ,Ӽ\ew+x74bZ,Ӽ\ewkLnp-zhp-zhp-^Ǫ=M4wkDkD+x7=?wks7=p&עnp-zWnp-i 2ͻx 2ͻXy7wkLnp-i 2ͻXy74bZ,Ӽ\ewkLnp-iS@u`:xO ށu`:x[@u`:x[@u`:x[,^w>c:Xy34|Ft-i2;Xy4@ ]BkAt-Ȁ];еkE޳UkwZ@ע} еȻ t-ޙ]&]&]&];еi"еi"еi"е[1 t-zt-iށ2 2;]ewkLt@bZ,Ӽ]e]e]ewkL}@kLt@bZ,Ӽ]t-zt-zt=Tk߃)е;v t-];еGWk7Z-]AESkZ=Me]&]4Z4Z4 ށ2;еXy=]ewjLt "E~"Sfk_) E~۬Vl-*Cfk6+oZl-^2;3[ewfkLl-zl-zl-zl֢֢֢֢֢֢֢֢bZRewfkLl-^)2;XygwfkLl-iޙE*RfkL,iޕ2;XygyHZ,Ӽ3[U֢֢֢֢֢֢֢֢֢V2Md=Md=MdwfkL,i_ _Z,l!XyWV4ۯA(i-%]Z-JZP %E_H(i-BBIkTZ-NeJZPZ􅄒֢֢"WIkDI+W&JZ&JZ&JZVX^QZ,ӼKZXy4kט7>?Skysysys? 5 5 5 5 5 5 5 5 5 5 5 3ƸA7>OIqsqsL]1oP}1oP}k2 5 5v\cޠ\?Sgkσ:[c~:[c~P>5灝15vZ}(ulL1n1n 5vErvƼ򹜝1oP1o|.ggkkml݇ǸAv?+Glqlk$}x*xpL+1};x*O#`w>_89$?Ti+xއP!}?x_|F>,T#XCe`4i,Ti뇌i=TL+1 ;P1/ƴV r`L+ˁ1/ƴV7 zi=Mƴ&cZ zi=Mƴ&cZAoqL+i2,TL+X>D0,TLbZ2MŴei=TL+XVLS1`fVL}`L+XVL}`L+XCŴei4!cZAO1ɘֿ$V7ל X n XAޞr+/Z +'$Vw| XA~$V_84zW&XAOI$C%eJb4 ,'eH4!XA(LbŜXA~1$V_9U$V_9PI2&I 3v+ ޙ`X i*,T+i24 zLb=T+i24 zLb=M&&XAOI$Vd+X$VnR4 i*2M%eJb=T+X$VLSI 8,T+X\JLSI`XAq롒X2M%$Vd+i24 zLb=M&&XAOI$Vd_eLb=M&&X i*,l1$VL$VLS`f5a [o2lC r`*Ȼ[ya 8w*ˁa/Ud*/ z [=T*/ z [4iWje [=T*XULR3lR3l,T*XULSa`iz#Uw\zUЏWAr*^}zU/WAސu*苗ի/^Ve#W2MUe#W2q*iz4] r^9MW<+Ut*iz4]zX>^yVի իW??}a)5ֲ35vE}mݗWc5vE}YW}gLWc kYc kY?_yeye3ղƼAU7UU*T-kT-kT-kT-kT-kT-kT-kT-kT-kd-kd-kd-geqee1n5 5 5 5vYY$cޠjYcޠjY?SdT-kT-k>13ղ~z՘ի~z՘91?.WkWdj&Uc6Yz}~t&U?Sj&Uc U?Sjdjdj,Yd֫ƼAիƼ91oP1o|NPps>PY}9x އ}PF>v7mÿpJ#x ٶ}P?7 i* OiPY}*x ާ4}+PVx>X[ PUO=T!xާeBy+Y`2MeB4!P d*B=T*ˁY $: }g r`*ˁY ): g&PAOY,Ce&PAOY,T72 zB4 iB4z,TLSY`P i* ,T*X>D0 ,l" i* ,l" i* PY`P2Me~,Td*i2 / _IY=?--צߺ6dMrpm*My˵ um*o"\ kSAޡum*[ MkSA~TdmjSA~ITdm*XTLS`jSA~ITLSfCtڹvnFQA_+lFQՌ Za3*/݌ e7BͨwA~fC5ejF4Ռ zlF=M6&QՌ zlF=M6&QAOͨfTd3*i4ٌ is2M5ejFCLSͨ`QՌ i,T3*>nF4Ռ i,T3*XfT7|܌zfTLSͨ o4ٌ zlF=M6&QAOͨfTd3*i4ٌzfTd3*i,T3*XILSͨ`f5ͨ`f5ͨ`J$2k*Y o7: =g^?9 265 263ŦƼAŦƼAŦ~Ԙ7Ԙ7}`g JgWPg Jg/ϮԿƠ?x1(5A1^ JGDx1(3x1(5 2(3ƸAƸAƼYA1o|fPjTPj,gԔ5G}&:XME}b)xgaTc!g> ,T*XHCEeT4 i*R,T*XHULS`"U2ME*R,T*XHULS`"U2ME&#UAO}b)i2RPHUd"UAOHUHU>L4(R,T*XHULS}"U2MEeT=T*XHULS`"U2MEeT4 i*R,T*XHULS`"U?0Mi}P V4X`C},$x `]ii}=x{D>*4U V鱥o>>=SΝS[tK #HX%B|}NfR}S/+rK4%\)DMh`uAn(U' Vlm Vh d=iU !};`ہ@4X*5 V& V& V& V7*Ѥ*Ѥ*Ѥ*͉ V& VK4e \D`DS`DS%2XP%2X.є*pAU%%2X.^"h \) e \)UM.HUIUID7Y]mYٷuC]]M;蟖W_, ێ@.r DW Ks;_\huC9c+Ѥ+p_K4 \)W wv|.єR+Ѥ\zȥ_\z{@.=_W >ގ@ >ގٻ}_7+p_K4 t4 t4 t4_&_&_&_&_&_&_&_&_K4 K4 \)W\)WM9n(WM9hJ̎%r|.є+p_K4 d%f \)W +1;M:M:M:M:M:M:M:M:M:n(WIWIWM9hJ+DS% :hn \)Lc_+_l t t ;Yj@'@'@O+_ t4_ t4 \)mEaLWM9n(WM9hnc:n= \=_K4 \)Wyvh % ,u`7,@֍ , L, ل[g`K4% \IQDsYDs1YhZhZhZhZhZC222pYC2pyo .|Ϩ?rb)+6O)6ߓ[b"obɔbXlR.{ыwbF, RVl"c)+ Rv1 9s%+6`ŜA 9"s%+ JVl"s%+ JV3(9X1gPrbΠ`ŜA9s%+ RV3H9$+ RVlc)`ŘA1c)+6, RVbs%+ Jv19s%JW1{+E^sJQU E^xPU E^xPUl>Z E^ȫ^byc)* RU\[)* .ys%* .ys%* .ys% Z8Y <[uCICсPt|(:p>MwCIs!APt|.p>CI烞y4]|^|=hJDSJDS%|.є+p$_K4% \)WMIhJuCIhJDS%|.є+p$_&%_&%_@GJhRhRuCIMJMJ_|Χ?Ѽ=hJDS%|Χn(WMIhJuCIhJDS%|.є+p$_K4% \)WMIhJDS%|] 8F<^BOw' xz|#p>8W7+=^BO|P tG|1p>DS DS %Rp.є+p<^,-$_( \~ In(WJY3ZȒǒ@+|^+ %_,- dcWIWIWI % t4) t4) t4) dbWIWMIhno|.є높|.є+p$_7+p$_K4% \aPDs{à+p$_K47 JhJuCIhJDS RhRhR^/\ d dh=X [D#Z&z@t=M Ou,x[u,ֱ@_Zb:}c,:ֱ@6*u,x[Yu,Ѥu솲riXIXMYh:DSֱ@.=m \))E} rjX vX,Vn(XoX,b@vβ@vPb%.єX,ѤX,ѤX,ѤX::::::::.єX,p.єX,pb0$pbK4%bK4% \)X ;:hJ,DSb%.єX,b7X,pb, t4) t4) t4) t4) t4) t4) t4) t4) t4)b&b&bK4% \)MMhn( \} X,p\6K4A]z@eփ:ԃ:ԃ؃`r d)a=XsK=XsK=X =X*z@փ:ԃPz@փ:ԃ.є"pH,p`7,p`K4G*`7ԃ.ԃPz%҃.є,pg&a a al1a al1 dnwXtAwXt`;, hDS%kaK4,h."MMMMy@F@F@F."y@F.Yv}6_b+6b+6_bu+6|)6_?αbftϼb|17rK6.6.=\l>Rs3HXPs3Hn( 9Vs|brsn( Ps3(XU1gPαbΠcŜA9NJ9rs+ 9Vs3(X1gPαb cŘA:NJ1t]Lαb cSŘA:.&X1fαb cŘA:NJ.1tccŜA9NJ9r]LαbΠcŜݔ^bYwSzŊ~WWzŊ~WzŊ~W|ҳz.&X1@+ +VW3+V\M+ +V\M+ +V\M+ +v1yŊ93xzYTkjY %+ RǠԁY %+ Ϛ·Gc %+ z1JVDS%gMn(YYMhJVDS%.є,pdeK4%+ \)Y %+ \)YYMhJVDS%::ǠMn(YYIYYIY %+ t4)+ t4)+ t4IV8 t4IVh솒.є,pde JVDS%P%.є,pdeK4%+ \)YYMhJVDS%.є,pde삔G#o(YY|,p$desg8' ϟΝ %+ dlYY|,pg7J$p>N8 .єJ䆒.єJ$pT"K4%+ \)YY h=# \o};G >@@v,з}d# diY };GhGhGvCMMMGhGDS>%K}dK4#|dK4# \) # \)YMhn/.^"# \)Y%>%P>%.є::UMYd~O66v,Mf-m2 dsiY Ky;d&@6u>troZ wvсGZߓ@@ @t4hl@ @.@ t4@hܥځhҁDS%r.є-T;Mjn5g\Zs5gN<5g\ZsvCixjXs@֜@֜P%Ҝ.є,Ѥ,Ѥ,ѤҜ:Ԝ:Ԝ:Ԝ:Ԝ:Ԝ:Ԝ:Ԝ:Ԝ.є,p.є,p4g$p4gK494gK49 \)Y =khJsDS%Ҝ.є,5g7,p4gl9 t49 t49 t49 t49 t49 t49 t49 t49 t494g&5g&5gK49 \)'OMihn9 \} ,piDsV5\9NK4gY%,pV5\9.ќUf Iep2kDsV5\9.ќUf Ie8.ќUf Ieфʬ YCG*&Tf M:P5t42khBeф,pV5t42khBepYep2kDsV*2kDsV4\9u.>Aeф,p5tna+kV֐d+kȞWl,d+kVй!=V֐=le XVй,p5dc![YCGK4]Vpl+kDsζK4g[Y%# [Y%#Vpl+kDs5\9.l,ф笡 YClN9 =g =lx9!K}yz5\={9YC5sK4gY% K47x.\H5d49khs֐є!)YC7!)YCFS-Slͷ(62۸|O\LpL1fb =g1ŘAzΊ1]9s9+6׺U笘3(Y1gPbΠL8 SsYs8\.8J 's7,p>8Oo3,K49 \)Y|.є,p87,=gѵPH|t-p>28 dlYM)JhJQDS%qI Z JQvCI}P@6@^- %hW(A di Z K}PhRhRvCIMJMJMJYXhRDS%%hK4%A$hK4%A \) %A \) ZMIhno.0(A \) Z %P%.є킔::Ք i,lH \ v6^!-= ilJmH dWjCZ RyؐԆ@v6{My.H}Z-i7>-ז@_[YZI}Z W]֧>-#O dwe}Z >-Ѥ>ҧr k}ZI}ZooaO \)}ZMn(}iH t4iH ֆʐhҐ- i7!-Ѥ!- i,lH dA`CZldA`C eH \)CZMMMMn(CZICZICZICZICZICZICZICZICZMgJMhʐ8;ShʐDSʐDS%2ݳ!-p iK4eH \)CZMِvChʐvφ@G@G@G@G@G@G@G@G@GʐhҐhҐD{M|hʐDS@6.є'pt>K4A4.єԧP@@@>- i:O tO tO i [Ȯ@JȮ@G%p.ܞԧ.є>ҧ.є>-p>%n[hJDSJDS@G@@@v֧P@@v֧8i6i˰O s@>-!hJDS%iK47h.%MMMMyR@F@F@F.%yR@F.oo֧>/ |݋͗|Ջ|bzxwNjaԮ㵦v| (6?.&]FPl>}Slwtc]+ RVl>sSv3HŤ( JVv|bҮs( JQv3(ZW1gPڵbΠkŜAi׊9Үs]+ JVv3(Z1gPڵb kŘAj׊1Ԯ]Lڵb kCŘAj.&Z1fڵb kŘAj׊Ga1Ԯc]kŜAi׊9ҠsA+,IQVYD Z1fb5hӕŘ%j.& Z1fb5h3H Z1fb^G Z1gpy3( Z1gpy3( Z1gpy3( ŤA+ JV3hEZ|/p>08 LZ9Z8䥵"R·XSY_|RΧRѿ, H \ES%RP%R.є"-piK4H \)EZM)hJDSJDS%R.є"-piK4H t4H t4H W:TP@G@GJhRhRhR8 t4H t4HiK4H \)EZ|R.є"-pi7"-piK4H \)EZM)hJDS%R.є"-piK4H \)EH i7"-p>8wxZܽP@VR "-p>8wJ`8 B瓓섭H \)IM LhJDS@V]3 iiO OK O DZ DY؟@HF?-i,uO DZIZI O t4O t4O t4O d7dZIZMhn.є?.є?-pi7?-piK4O \~ПDs{?-piK4hʟvChʟDS ҟhҟhҟvsi-W deZ ;/zYr@\- j`-W dZ {XZvCh~o^IZ/Y-Xjr@V ‹@V Pr@V .є\-Ѥ\-Ѥ\-Ѥ\톒::::::::.є\-pv.є\-pjQ%pjK4%WjK4%W \)Z {AhJDSr%.є\-j7\-pj-W t4)W t4)W t4)W t4)W t4)W t4)W t4)W t4)W t4)Wj&j&jK4%W \)OMhJr%.є'p1r%\rJPSPSs>\-Ej5j5jp1= W|׻||0ՊZSVl>Sl~(6)6滿|bo1c)W+6) RV\bps%W+ JVl>v1Պ9ps%( JV\|3(Z1gPrbΠjŜAՊ9s%W+ JV\3(Z1frb jŘA.&Z1frbb j\3HZ1frb j棰ŘAՊ1]LrbΠ$iŜ IҊ9ac&(I+LPV$3AIZ d1fIV$K]Lb %iŘAJҊ9%iŜbΠ$iŜbΠ$iŜbΠ$i$3(IZ1gPb~vJ9Pl>/Y8 KgҁkyUkk7x-p>8X 7x-p>8d2-^kK4h~oєx-p>rChJDS%.єx-pkK4%^ \)ZMn(ZMhJDS%.єx-Ѥx-Ѥx-p>{hRvCMMn(ZIZIZIZ|,0Ѥx-Ѥx.єx-pkJDS%P%.єx-pkK4%^ \)ZMhJDS%.єx-pkxPZGFks/CY[8] {JȖZGn(I|d$p>8 OU/x-p'K4e> \)ZM[vAn(ZZRvC}#QȢ@<Fx-7k(^ dhZ }#QhRhRvCMMM7YhRDS%kK4%^kK4%^ \) %^ \)ZMhn.^?(^ \)Z%P%.є::]?@hGx-Mk,^ dkZ ;]Z^@H۵x-k7x-p-K4VjtҭBn@V vnʭcv[-Ղj\٭۞nʭj&jK4V \t.є[r]@naOiiZvC0YXȂ@֧J@֧P@֧.є>-Ѥ>-Ѥ>-Ѥ>ҧ:ԧ:ԧ:ԧ:ԧ:ԧ:ԧ:ԧ:ԧ.є>-p.є>-pi3%piK4OiK4O \)}Z =hJDS%ҧ.є>-i7>-pilO t4O t4O t4O t4O t4O t4O t4O t4O t4Oi&i&iK4O \)OMhJ%.є'p1%\JPSPS֧>-ui5i5i-![iK4WiK4]i7\I֧.\I֧2֧2֧2֧2֧2֧0iiii7\>S>3H}ZL1fb i3(}Z1gPbQIV3(=G1gPbΠi\ŜAӊ9ҧsO+ JQ>3(}Z1gPbΠiŜAӊ1ԧcO+ Rv1ӊ1ԧcOiŘAӊ1ԧcO+6hnCX1_O0NJzR=VדbTz|Ni.&X1T=MŤ+{b cŜ݆bnCX1gPbnCX1gPbnCX1gP걋I=Vz3(X1gpy%ddqq|)p>j)ZK854QZKvCi#I||.p>Ci瓠)@&ZJKDSZ%[4%dK4% \)-YMihJKDSZ%Ғ.є,pd7,pdK4% \)-YMihJKhRKhRK8i t4%d&d&d7,Ѥ,Ѥ,Ѥ,p>nhRKhRKvCihJKD{d7,pdK4%dK4% \)-YMihJKDSZ%Ғ.є,pdK4% \)-YMi.H e O}Εv|"p>8W7y,ݪcҾc,Xm O}Εv|RΧ-S_i@b VhJDS%2.єy,cy,Ѥ,п dU%d˯"-Y ͯ3D#{F߿=o3~yFWͯߞ]r??cٗrY[5;/׉d~>{Xc??????_?sZa߾/_?xoxπqFgtf1gt<33: ¯Yft.3ekbE_%,.9/O|pT_úu/Vc˾=\\ {|'Z.߯_دcB__˛ƯUU Wz-wg炾W~/~^]#{/ﱿ9G/읏O=-`-`ht[ޑp { o6e[_Ȏ-`sAX^s.ȎMle rd_kE\ "|Z.y~-E8 2׿`n/._Ⱦ_~-#Zsed_{=?=+ ed_6OZ\Fk^2˻8l=˻O@2߯ed߯ed_vy266sٹZ%vA{y?{kyO~-~m__ {{H{{H{{H{{H{{HCR:Ա=!ulc{HCR:Ա=!ulc{HCRllzNpx8^FY>(ge_> zd%uhgu;h}]u.b2..ؼ_6kmO/~܁\Ldyu~~18#Ұg~g~g~g~gg^cY{bk^s^b˳Ŗgg;ϋ-Yby%K֚7\5oM5oּZ0*ך7\ּ>Lv`ہ ?25/r[LXN.s n1a9yC} tLXNS@DŽdGrHzd,'=2@dGr2#;rd5o\>ך7\.k.ϵ Zsyεf Z3#c葱 Xkzd5=2֚k1\F p:Z5=2֚kyyC5k1Xkzd51\F #c葱 Xk?ZKk Yk^=yp{c+3Wgovyv`葱 yp{cy  '; yzd3B0FZlnC6WLmhCNImhCImhCNJmhCJmhCNKmhCKmhᯡ/І|hCl蘠lgb3pyQ8h*˟'N< φN< / gCO [LPx8:&(<N8 ?p6tLPxλʆ φ φ φ φ φ φȖ WU\xU˅WU\xU˅WU\xU녟҆WT6P6P6P6P6P6PECedsp򮩪 ҆҆҆ 4p.zdJ碡GG2hhl.zdJzdJK l葽 φB(< ܞC(< 9³ φ5 φ φ φ 93p{ ܞC(< 9³ φ φ l葡l葡l葡ll3p8\xTS!qP%[L˳*b*<d<-v-Jf,<d<-A8#UT(TFJ=UT( ,X҃:ܛ(=-An1uT[K,Jpo2ؒ]ufzP[l.Ѓ:bZbӊduTrVcuTZa;anCNean:@-pa7Peanۂ;oچNڂ4iWմ v#J󰚶J6pSJ6pkJwM<=66<=6ndxdxdxwx#cxwxCU`ȖU`%A]yAӕam2OW 9OW<]i6<]i6Xi.oo4Kmzd4Km7W =2V J8, TpXj[amR@U KmU%6Xi.oƮ4Km.#SpXj[amjڂ4izd4/RنЗ}ambyXj=w6twdyXj=w6wn)C}amedC}amA*&gڲy˟e2zds-Q@גԀ?pKP>pIj.I@ % $5$|Ԁ?#k5zds =\@l8n2q8zds =Jzds q=\ed.#7?#k5`{{`̕JW+_e~7Wrs%1Wrs%8Wr?#+͕Jzds%1Wrs%1Wrs 27`?п܀@on~77`?#}HôNHFmG20U`HT}$# S+da>HFڌ3b4LUG20m>iw4LUG20qF.T1]ΈGbfĨaZ~$F Su5LGb԰Y#i3^Oc NÖ̳HpڌדGb>6cY}$8 j#i걏aZ }$8 R#i걏aZ}$8m6}(8T=Z}(8}>}(8}T=4ӇC G %?Sa˃΅Ӈ.:NJ8)}('?З҇r@vJ)/;\\SPN=2vJ)p.#[pS >˟]?.sU2`>p>p %?sUK\U}(|0WUJ8GƪC[e-Zh|V2rm"Ж@?eSЖ@?e>P+mԊC["Ж=2vJ2jE->eSЖ*SPkNCt)}N;>Z@҇Zn vJj-GNC)}|`2чF蟖чF6@F|hsm:`$66{jaӊ\ =lz.6WCVjarf6M+bs5TlzgjaqFkyFabak`z3FP=Cm08?FP=CmTly6z'jz6z'jz6z'jbڨr?6*6i6z Pl^<3Bm0æ?L6zv3F 9%F 9'F 9)F 9+F 9-F 9/FQ.U8GN:WNC_!T8sGN习C i p|pO }P|pNG*y8ǰyOpˣXELC_x1 y^ !OU8KT.SLN0U8*%ph \ G G 'p^.K׆|hikK׆囗 XA4 x4 =24 Xtm葡 =24 =24 K׆斦G" w"iܢiܢ 5wG Q~ Q~'p{Ԡ 5wG *P4 3 3 3 3 ĮjbiNJ슩9)+fiNJ슩9)+akbZKӞĮ'%vŴ>)+ IXӜ=3bM]XcsRWLIiX᜔SsRWLIi^1U8'y7csRpWl<񺰊9)+dUI]%bN 첊9)+*஘V-'wŴh9)+*஘,'wO rJbN UŜ9- gӂ@U1wbN"m9mh ԛiC[\qr-%l }ض.Og-g=v.E"gY }ՙ0C.Tndȅi}[!*m y\ַP9o \ʻP9ok葱P9o \BB崾 -,TNP9R ԖJr]]#vK}]%ٹ 둆ꑆꑆꑆȶq#8ꑂGq#s=#C=#;kq/׏PԬ*fET1ma_T@ET1-D^T@݌ʋ [Pج*E1lV^TGS:flV^TGS:b9լ*fEج*ZbjV.fEuTe{:a؞z'/i+:ړQŴyQUL@_TGӞEuTEuܞ GϗQjO^VG=yY/՞ T{:j催V{Z eTA'/KX SˍeTr @߼ݼݼݼݼ eWCOVH/ BzY1a놮^g5r? +Y y\! \^V\!j eVed˳^`j+P-X _E-X%l* ll^ؼq eSC5N_^76/kzdll^8ySa)󲩩2Fb)󲩩ReSSJMM*e^65y˦@2/Uʼlj T)LLGR&#c)葱J@.@2/Uʼ T)*p{e)˪GReUC.@2KqnoJ_v{87k+/;a+aӇbsMie^l6}.66K7?7CMTl^?'j1,&*6DcP=̿rߢ&jb}rߢ&*6o^Ɯ&z&Ql޺<9CM05æwb3 5æ&j)&j9&jI&jY&ji&jy& kG}P05BWSC_ѩ&4A j_MPr{ *&O4A } j j葡 j葡 *&(pMPCMP9c&9j+&(pBj \ \GΟFR#C#CpS#C8E8.?n耡)2>'p޶4t4-[.9 =9 =9 YnD}EIM'[4QD}йE}йE}йE}#C}8' =2' =2' Ieds}#C}#C}#C}8' =2' =2' =2' =2' m/OB'Qn/OGGTnQIC Ié> ka5~7fTi7+rMY1-iߴӒMXi7{ؼ}nVLː7fqMsy|yEa7gTi7{m ߴӞMXi7+vbi7{MS濧oMSWd𦩫roDxՐwKM]ZRm ԒmSWo|mSW_4OC7^~_է/} t:``d?-pyy^jd˳5FO<\m$+Ț%c耱f tXzdY=2,;n y[VB;mEp򶳭!/km y\l T-Pv.>,o;zdYv.>,o; Iyۼu浆u浂,K֕j,pK˒ue }Y+k ϲm]YWe-C? xPzJpyl>Jdwߓ ]:.Mv&FQGQ ^XOb.7XYObn7X !Xa/VC{{8lj蟖aVAvZ 0W˯z*pU\OV_5?V_5z*pyks=qX}#c=qX}<@vT8j$8*-\緶pJ+p +^N+^ +æ˗bi/bi*PaT6bibiGbi*pywq4pæ,6Ul6M8lj葱8l ,6zd,6"*8n TUpaUpйeUpaUpaU*T8Pt=ltfcft [2A:7=BŴ9*A8wn[i3_3fcat [#n:naPZCkv ju1aPZCaQ>j3X@aQCG@}8, \5ȸ?, \ޯ5_?,jas~X=_?,jas~X=_?,jq~XԐ? : zK~ؠӐA'P[@}7"aӂؼ,67"abMKϛa]l^v?llr3,b]l^<_s^.;ZTٱ-GmUY댐\< =];mgrXjُ W zdLW$nzΏ+L2v~\ap?n I@;{Bp SySy~wBr {ű㖒$T$b-% $b-% 7a 9v~5P㮑c]#)]@f /ߐI?7ItqIA_!fҏM*]yoPJB/j֏O z $T$E$ E}O"$T$qIAO"cE$ cE$)v~\D"qIicdGqI@&ˏC*Y~pz`1 Ǎ! ƐJ7|Ǎ! ,?n ٟd;'X,`} uXBSxH^?a]ReHap.}2$ː!=oaa\H@z|8X!=> /a;8#LJqFH `}eJ rJ rJ voc 0 8 >}*H TSzO z!=.Շ0aVzpذD@pE@\amA_!6aӢF[1o58ᰨ)-RX[wڂ\C k r )M8, <6aTT 'D !-}&HrEA_[$}m䗨܂ڂڂڂڀkN+am>- - - - - M8<&ڂڂ0iCXamA amA amiCXp&>oNokN-ڄ}X[am.M8k va*-ȑ)-؏l@_P706| r70)l2moCu >l2uưv"7[-afE0nZ1l6ܛ k7[-{Afa"p1n()9nH w"Ƌ {+2H8l(j{+ rm9HXkފH8j{+ zdck" 1ݮ8v>!t1ݮO(u86ӊg AvDi3ia;nHw "R&nHn92qPAvCAP1HB_[F y;.Hd`"b킉 #킉A &*N~:zd & zdj j2sHv!vϡW<>>YvCad&ŬvϡGƬv!I1sH8mRj zdc[ vz`P+qv+CBűۭ n2VV߽?2kO)Y?kdmTpJOŚdmTS1_6~*ּ 'kbQ~'kq}jq}T[sbCdýGɆ{j J&>M6+}TlW^cdǸɆq5o㾐DB1 ۀ酜#ڄ}RBPԾ'/ARB. $I ^C}RadfaFB_>o}/BЧ/ldlzy 6KOOz}RiIo@/!}!a n~ c48,M$IDR{`jS7O3䅞gh>O^ym|Bϳx_ =>}Gg 0iamBj<0^}B/>}f8<؆/0Ȧݤcv>M؇/0Y8&}{ഛan҇ oI szOۤ^&}RB/>}a&I 6 rdHj_؏lXQa ?%a7SO>LI؇) 'a$,XSfdJ¶!) jH$l"Rf`8ҵֈbmF`Ԑl(o555cԐl(o55$#&[M x1QjjH6GL?l(o8^Z 뚩f 9%ƛ-*5lq8P1َနov1x@}lv1x@eh] /bjpX66dƛ/y2EH`xi3Ex^ ҷہrB-#z0Rlq8p)>r9oެx8PyfÁܬxH3+0꽆z^qRYqp8,H0 0z!8PfDBћ*>8Pf =2ћ*>8Pafā 7[&<Fo88<Fo8Kaf =2ћ-N #vћ-/Fo88& 7[vћ-/vToV<z If ToV<z@%՛/͊rdN7+~Kź`}v;$Շuo|.a 'l.a 'luߣ=X%l0ϨO0Iu>'z`}LeHF9 >ia.H `ý4:p 6+H `ý4ːFֽ.úW/S]SR]sR]{LM>8 Q\kDs>p.e Ȇ=WrA 2>ߴWr\Ыoarqg:Zd} h켠d <0 >Zd}yCA v ;y>o()y* T 'uE$p'nAE [P|>O8܂g@D EH>ᰨ'C"] zd!ZA |A |A |A |@D H>H> ">G4D$_K|H>aF6&N " zd zdN "nH>ᴛ O8&y}A->Z>aC^_ y}.O `/?s@XR`a#a]x/a]x/B`}xyX/aQ!3To XC`}e Cs6G pA#H`} l  Aa "?{29 9  ϙ v LAZY~A.e ,W^ zd}QÚVY~A >E_%죹^}\Շ}*}*o(i}#v$ުe;NH8#a.Bo;Qk@؎>Z؎>Z؎}SlG-v$죜^`>~/-y°Í$ÒD z0,*aiP.M>{ba7iP.M>yFýIKr'ذhI.ն%\ΓPrQN}rQNAĮrQNBE9 ].)yu\. HcqѦrMiX|EB!^C4 PeE ]&%IGKBY%I[pM+~iiR\i*kKr)4)_.4|\rMSBE@kN1嚦fLr嚦;/(!GG\.*kksT*1~ Z\dpM,|.Y)|.Y%\dUk'GFt+pg]*a] Ht+tg]JprVB+*ܽ\P >J\. Hs*"rUA/0 EV(|.Y% EVM{""Ӟ@s*'P\.*Q$S>˔r>S>˔a]ϔ̃a|aQ)0'X{㌮0]\<_KKK֗= /{:t)ذO= `>l' SaC:{29jrHa v/J 'LPs LP6a>&MPAO&( dOa!H)SwdOA/Ȟ^&=La#{Ga/繆GS>d-y>%SAnJ8݂Z OVA RGU#J! h! h!1WA/0H^`Z zAj*ȑIj~ Gqdre m\,ᰍ˕%&VG VG V L"DZA/0^`i L"-ao+ zdirM$DZ>4/aCBnyA"GV#H+]i}i}i z C"-a/oi ="-'@%Ӟ"GV#HK8 i ="-'@%i}i}i}i)$ riʕ%]YA\YA?|*؟͞gvC.>8nv}-`Jovӧfa}wCfa-`Jove!el7;hقqFl8#Z`-[0Έf^0Yy 6l7;Ѳ 6l7; -͎øinv Z۝l 'NNJow7QvvyZvyZA;۩i en%Tt<-^&HHzTW_/6b ?;<>mnZpxڈn+@#v.tӈ%hzd4b;Cv^鶧ݱW#ݱPFv^@J5z}@^k z QzݮKHv^BERk^+!Gf镐#uo]v. m]öK .v aaKBXЧBv0٥n!,uKv02$,nh_p,nJh_p(n&v0=2 ឮ-N;5Tt R.7L(Ew0nEwܰ/<rÂTt N rÂȦ݄vaAvai7]npMnȨn&PݮEL(w1ZĄӫZĂ^p lveY~ڴ;XoR`a<úW`}~X뿋a]O`}~X1O`zCwg8#`;3;3;3;3;3;3;3i; v0'L[~i 60m-pôw/+)I^Wwz^yz*U U{KG@` zd^/az%_ zp`~/SiKؿ>-u V ߌe zF6m50m=2L[A GV#i+`is@}TӖO z i+5Vk אL[AL G&Ӗpxȴ%:ȑx}@xx G^aK+S+S+S+o [^^F댂]vMؽÿ9욲{ < ^.O8횰{ <>k%o^>/AEK8x ^}x ¯^%^$N^iÀK8mx / ^A Gp0 N^iÀK ^=&{}` +ɴ+/$K%Rx,i/Jӧba}X*yX.JSXLbqd0 `fő_FaXy@aXLl82bqd0eő$#IE,G~ba<JŒ`}K)Xlo)Kz[,yEPMBŠ*af\Pj60 rm`B˵ k*X Lc60wgW˅ \8p/z%_%P_8쯖 r%_-#&:W 92ϟgxt|-7=|-7=&6_M~.°ۙ-H&6c;ɂtf l@-rdadrf .2˽}@j˽ mBmr/eiP[L8mj˽=2 /|0|;X-R&j_{?ރZuIoaa.:+Xu:%Xڅ:3:32x~L8L9ä>{$f%Of r\rK7}2wK=2RA nGT#[*-[*? T+n)a.X+n`LLWhxH<xJؿ x*+T ^x*E T#x*8l zdO=2SA ⩠GT#x*A<[Jػ}QÆ[*a- zpK9lRALn)Б[J8N8=` tABV*t +pz:J%C悾txJ؋|O} }O {TЗ⩠o{! zdO=2S鶇xJ8O >x*A<xJ8 O =)a/ A<S^<ba}|Lri0{K$ {k>_l7 &I Lh(Q-6$Zl & LẀ$I:O90$pU&!apUZ ZnK8NEYr^B&TAMx ܄P*h />#{ / UrM^AY嚼S*h&/ B%J'J8\>}-W'Z+GO\͗p艖 FZ޷W7{jD| h/ =r5_i'Z+-W%>zjUrM^BE5y k*XKH`&/Qb\W ZK8 kxHzyq.kú`8 03h8#h/wY I_! ӿJ8, yJؿ*=KD^+a^#*8@O{VA ^GU#*kk*+U{oIؿJ*8\k/V/VA_>x|Z}#i[}8pdadU#*k zdZ=2xVA *aȠ zdPW=2UAL G&uU#J8M (8\B#DH'}@h$vNe /h B#t;@h(QЗB#t;@h$n F^hH8x8ӊH;>O8YT5`_FYLa, 6kc` U__FYuza+fW0eU_ƾYLza˖B0{tmhf "d,DveYB |DH~cB*jiI; hI;dC-pْlw%PK(KݡP_j=2ZZPK(KݡP_j)Bo,.;K8<-BRJlWy%^,% U^=ϥj+t RJlWyخJ8݂U^=2J* ;;lwn%۝[}m;_۝[y;;lwn%7{έfݹU#w HݹUas+ᰍ;lwn亵wJ8l۝[ ۸vV@zέ6nݹU#wJ8l۝[=2zέ;;lwnwJ(]px";lj2wخJ8=kNO$z欄R Y vsVCݜN6c/Zi3=J8-jڃ.B]T EM{EP`*E{]UGK]wgH҄/CX 'u yM;X%`}I~>U<lj$. 'uT<.ֿƵ$0ǂ0 R|,}2e2a vxe OS`'㍄(m x 遀 Yz p+K$?j zA(=P z06a zd=2聂@A z @AOw+ކ"/ / /ؽn'n$ BIA !A !A !A !A 9~>/ˇ/r| !/˧ Wr\r&?aY#S_9~/r " /u-r^ #vj=2=2=2=2=2=2=2 0>/8 vYV@=2 }Y+Q}.L8=t'S̈́CQ}>OG+"O8=W'[D Q}>O8 zd <y"pO8-M `ImVU7fa}YuL)fM0%ћ5_$z0 ʚ`J7+k)ڬ ʚ`J7+kl) dJɔ:S/@BNvO8n)k{'| >!o+U?}@L~m'p;ޮ)5Ĕz'Rdۮ)1ޮIOJudY'uݬPIvMBE u4 uPx/@9 o>q-!+=k/Cz9a<䨇uW[Mlљpr `lA^<6a` v[mGlA؂^C` z !-5؂^C`lA lA lA k2ք MA_!$N Ii/҄;hA_>$/CR8/!) =1j>F-ˇ/bԂ|Q !F- WbԂbԄÆ55a&쿅 <&쿅 <y")-ȥ ҂^HJ zdHJ #6c%=2$=2$=2$=2$=2$=2$=2$ 0>)-8l.SRIiA Iiѡ00`%CYс1a AL&/XX݊57Q6v+ q6v+YڇqqH.Ya]GrŚOdm$WٲŚ8+Y-uGr8> Grx}$WGrx}$Wg7|{g7}x#<}𽐋 ׁúA y`HD iq`z`H#zd} 6Gd_!/23z L^&}B/>s<_ =>{/|Žy;{/|Ž8{>{Ggq A݁u/탺A }m}PB^A 92u_[/Խ#CPw`ԽuA }}PBgԽKA 4ԽK^A ;5zd}PB^A =>{Gu/Yu/Fu/F6| Ku/6{gq/lCfq 㶄{ڇ~>=/퟽Kbgť_oK4.~iL1¥_'c b<F|~\/) d dz8_r2E|~>?~\/)dpq ~_ 3 8~p36~1o:nBō ?'dxqKqO?@GƸ'şş8N/^ L/@%F~>.^ L/@}^Ӆgxw/gxwNA'| ;_1Q;{1nL ϸϫ_ ϸϫ>ƍ^+/Ƌ?>pxiqx-Dq8+q_P'dx/y2n Ƌ~!Ƌ>p7^ ۸ƋNȸ/qǍABq_P/Tx*n #Sx+/Ƌ?uNDOL?}? s/T.Ч\0!s]N/?7}s|ú/X?'y>~8ú`p>XSsGL)#<玘!,>ø>S!<燘0bx~)!gAς\> v ů3a_kGg]"p (,!,! 쳠,ς^ > z% ,蕀쳠Wς^ >gAdi_ӊM+V5kZ״i#N'>NpC8a!PSA\ M'=2=w ႞ ႞ }:\SA:\SA:\SA:\p&t ȔȔ'épA }\Ч W\} r).ՇG7S+- - - - - - -!Mgoadw2ހx zdx>oEc z -؅ w0 06tgaer_ƨ揿)f`za}r߇_ %X}¦ZS4|G_hf4|G z07L?RhԿ?`t؃)` n=|>L!=ùC"ϝg)1a)H9קۿO//2n / r]9nm/*P >SuP}gT z 1^C ?nkdЗwB_>_8݂̿pt ^>2'e ہ2'e =O&*]ƐPy2u+3H?;iNw6߮(vuDfZ:0%iꈂ Wm!]P o@s "@vDA.0 } }[H8lo-b8~o!} [(8LW} =2_8+̿C} % 7)_ /t+og>ǿ a]`S,XFs"XwoúP/XgԇyF}~gP=X!T?3B~gP0aB8#C~a `><ذ"86ο'T`>O D U/ؽ%( \tJ v JzA zA z =O=O=ϟ;; >}* TDX_Ka}>/yMw6ӝ>tg#O8Nw6l ;a}ggAI~3t O'? zH vߟ GQ>H8=D_1A/L2A̟ z /!/8 #> bbp@}_$?aR_kHI~A!%D^ЧȽO{A "^} z!r/Շ=aC^pQ{m[E E=aJ@^<{G*r/ȕ}:/VSԴXLQbua}ԴXrXQa݇s>X~>yF+N)r_82F'qF+N)r_8 b$'/֘s`XcL5&/֘SLXcLX}0cŊ`^X}$ذ1V_89c`\ϰmkS^a֖X XU,yݜ/Wd\Œp/Ww\Œp/W$wrK>pdddd SЧ&n>&0_I8 3O &'0_.I8 j gpR6ܞP9 `= r{N9Xn HsܞPr{NBe`9 gzqŅB)$T($[NrKPA> 喠rKPiXn *8lXn *ȑ}M;:ÿ)\aΧ rOb\SaOa'9,wsXI(܁PaWrNb\VpZ4e5=O岚+rYM` 9_؍gV _pXw룶ú-X룶ú-X}k룶ú--X/šaQzpg qF qF qF qF qF _pZ0a<høΠ:Vֿs:8up3x@{03/}p'akT>h瀇uP_&Qp/tDANC:`œp#_9g鈂tD>z,AG~~ڄM) 2 z%@dJ(pxKd$ K"#{Id$/^} # ,GAOQS(r$XVˑ #-GAlQU>*)8tgC$l(}Pk $a)AȠ@ zdag>+8\ f%1¬Y)+RЋf5JA/jY)`V #Jad÷$O vxD\pڇ@ #I8lx Z9 }ra\K'Wx'O9@gسO}rɗa]Ck rXNOa ͗ {RppAQޥ (R{M8. o\. ]ygA ޥG32 []R&a/e z@2)e)SR& LA LA LA LAxH')RWMi cS)yKG}=2蜄}Wp pz@)AȠs zd9=2蜄)AȠs #uNad~< < \CA$ r I%EPA!\CA=2\O דpx$r= \O$?״~_~;P+V/rpM+-QA^>D ׬~_|h򱠏oP+鹄ӅK빂>蹂K비J"=WWvS-nq҇nzm>AxvgdaۗQ=p;3{L a[0w;܂[aۗQ=b;B-4.`vkþ/J{طl{}kq]S=[=O]lZS=b aaoPi*jxŖPQ.e7m.m.whw-{\Ԗpm2-{\Ԗpx-{\V˄q[a{VGK!-Y=b H! ?_8moTi zR=K8moTi z%P=K8moTiNUȤ%^{/ބBp *z\HP!BF 5%BF . %BF GF #{&(zdp #p zdp> < Em w0Emlty\VkFqQ[ay\^qiZaOy\V߷>i%7>i%|NBLmtz4: =lNad _8:3- ۲8#ز8#ز8#ز8#ز8#7bqM5yJQa<زxe`˂ {lYay0kزxa㵆-;{1Ed9 X.pK%XASB,a)5[VklYA!87 y%v!9^&p^L z@i{ !VKB XA/^&b B,ᴽA%Xad+8lx{O{gv}]A_xxp>+ VB,a5A+k !VB-X.J؇= zdb {!VpK8=: B}y$Yb {!VkHB אXA! \Cb -UB,ᰥJ%T Öi^]iMJ8Z+ ViZ {UZ-VA߂Z=2hVad}ȟpxJk*AkȠ zdadlAY^ b z%@%XAgz~–-+[Ve  `g˾P /mfMc0U5fMafMaxl4'5N5qF^5qF^5d6kzm4Sh~6+li~6+lV?`GY9e[/Af`0mVSY9Lfaor0ov80 vq`BEŁ n&T4]Pvq`Aϓg80 v`Av`BEۭ 'v`A!V4? g-v_A/KeeB-]9p+ zR mW&>j^CBە+*ڮL(-]96 j/TZhvbmZh/7<?(wi*v5Bnk(?3.K(g]npZCtFv=2:rv]adpx} QB(GFg#3J޶)ir0 mv`B٦^CM_(۔k)3=;JEmw &OĄI݁XV* TT>? guQx}6XֽY냳:]8TqTqTqT{*poBSMh`}L|54aTuO`}χ_&MOc_yODd rV2Q9-LT>-D`LTᆕJؿn*5Uk&* dSAYȦ^M4!dS@Ȧ^ M z%Dv`DD*U &*D%>}Wp'𞶚N[ V>+a%3V1a%? 9}T}T}T4 ziBS҄* MU#?*8lz@S%J8IIS%ޤ א4UA!i\CTۛ4Ua{J8loT`'^CM+a/ zAp؄ pZ}pX=O8'VAOUS*8LrX zdpX=28VA + VAVAVA!8^CpX W8*5-VA_[8J8ݟpX;rXym6K)y,<7K?7K_;6K]fa}nY"xXY"xgDY"xgDY"LkD0f~0ߣ~k/fߗom, 6ܷ[|~k6 㵦, Tz/rv_B}oڮKwF/bqk*ݮ+Pamڮ+ڮK kFv^a7.+GK]Wæ.Kϳ2T =nwNe; T;N{v^A>*펽^}TX{ mw%^VC^kPX| mn+Qam%7>ZFwW "j.ҧQ}w % #q/ 7)+<+ÛvS^A>+fݔW#n+Qam7%a@ZKݔpx R}^&T Rj/h+JlW%Tt]ipvٮ+{jgҮ-v]j]iW#IQ$t R m%ڮ H)]PRhF/v^A>JÕgF/v^ٮB{YÄ 'Ruq-1XֽB볨ú?XtXAw;u/OzsXN>S{x,>Xzs a\/>qix`= ep;zs'ap;u`}r랆_&&aDu)+' SN&aD僄)) p=$a7$L 1I^C0Yw *@TRYciH< v_v #aJ(Q+ΣG瑰wM[*GaKӧ=y>WH*%>$RWH* TJA.j\R)R #7P) M*% H$UJ7Fn:̓/WuzrXf,u=a,8pJyH8F)zHؿ!Q ]# EA/^& 5-q⢠qQS(@\$D>H+装(装(RnDA/0>7pڇ&LOH z*pϖ}xWS'H8} v_& ; aHT z*=xI}QO /$BAxI\ $B^" GP#'(oO$y}Y b},_ |.Ks'"/a#/a#/!/!/!Mn5݂k!N $BDPSD( !pDHK$BA !a/ B"$BiD(I/D(k;JŖ`x.tŖ`2 [)ݹLo^[0\l &ph.t0\l &p˖RS@@p+`(e>\l 6 {xh.6xS {+B ^T ;ɞr?Uf'B].J8,*KU}˫KU).L%֬%喩òD2USD2PrTBEJ"\. HpH*$"J"\.J8mRN%ӝMp+*tgS"\ HOpC)>>.w('ܡpy .w('ܡP JH.w(%'ܡTp<[9𥳐$BB^[K˥N 7 K˥N 7 K˥Ny;X"\.uJ8CK #D\rSAZ'۔P!ަÛ=ަ^   .%^a .'\vTæ'\vpx \.;*a\.;J8< px ~G ^'pp5uP ;rTArTLp*X~\U#~HQ?\*kKp+*Q?\B]Q vv1`}:tX9-X wú`~8{a úXˠ000t5*AM8׫ø&8kj0j"ذO@M /8 j0^3úGp>' gu/H8IRs&i&Jj"ᰬ&o(yKM^*>䪔}(epXa >}(  }@ }@ I0`(5pj N[ B>+ PHgwN BA_>8DCHا'} !C(!$BadCMpxw&Bi|y yDA^>iט>}O Tsͫ ?sS]Ы9w^s'쳞6rN; r6r6r6rrr#[!>O8mNӆO8m">/!>/!>O8='|H v yiDB^KȿP yA^>%yѮy(@Fs(@] Ϧ@ f?З1>tgf?P0l[<Ϗ^` +L^`o '^/c|~W(B}^`>f?P0` ,f?P0 7IvYOA^T'ݷz f.Ie= ە; 5vNi0]SזyʝʫoW$T$}8' #9 I.I9 Ǡ#9 =✀t1]SSa^}8'8 [yn̫ow$nAշq 3ݍP)nJAnw$T^}'7~B_!FҷoБƿ]px+s$}&σԄ\??G} G/b/Tpu/ ,'ݷ_ WHUB/EΘow$^ᝑ)Θ^&ow$owȘI8<)1#@f }@n)5ČvMiaF~ᦠ3 7nI8=?nI8=?n)1#ppxw ~&12So$T ~&1~?|>#O#Ś+Ykܓy!YkdZ9Yk^y& gڌgg8>#vvhgt_u8SKvyFG<0{W(SiJ1+B1+B1+zP3BaoBaoBk^#R0D/l"}B.E "9?p}rB_>9mЗOl wp$+_ЇNwY8ew~t# =>9ayu`#O }}rB_>9/|~_S#Tqq~_/D4aDB߰#9?^k'/vWY/6qցӽg }}Bmwv8>?p@iv>?p@>aȿ#zd} @Yȿ#}~z@/y"V! o%rZdʅj])s_uId%>j])s_uI}%3Zb3Zd?l߻]S(3`\qM05qdkb)n0y`<z:dJ|q,$7XI&oTLRO0z:dKu>ɆRO@oTbíÒqXZAb% } +Y^~@}-6$d؀r}؀r9Q_l@9pZL?3(}z}ʡb= =fHD};兾LS&)/T/v$d. bɁãϹb L/8</$ig/K '=?yBۡyBy>B%Y =lf}-/Ș/8a8_ky02%Y}- 'Ș'SLe^d.sRvd-˼K)b́ó)b =22*_l9P)b́J?)b }Lg>3N{Sz1_9p/Vϼ#cX=s1_9P)b́WS/:/6G,~as냘ú &X}ú`}PX?,AaB>? ˾ A}>?3BPg>XcA!a\/zA Bø^?X!CϽia\D 8#qF+ La(OؿD_{oK Y~@dމa{A.eE؞pZCÒV'.ȝGI|A I|Ae*/!/!/ / / / I|kI|A.%L2Q 1 $ W\}J v_䰕좍}UFO zdH zdH #O% 4G$G$G$>tקQ 4*>/k-HOo=2=2=2=2 ΚvjxN opک N;5AA/Mx^4 zi zd=2xN opک H oȞI*H*H*BacTXl gq'w)Yl &ظLȋ;$wI*,6|babN0I`ѥÔw9PF9PF,Xl ֠`A$#[IF|dD0eNQ`tϝb(B|JIT,6Xl0 h(*IT,6U FgEŇ *&ÒXn" H\p؛m qۂr!NA:B`\Fa'ޖ qmy'd\S#QH葭ɺaJ' urNB%U: U kmSw&4X)ȕ`i\kpxjX,;??@.a(BTk rd'rOBɑn#> -w$H#_-GF9\T#Y J8C#˕A)r[ H9\pڇ(GPN*rK7Dydt.˽I \{ʹ,&%sYMJ(ܛT#sYMJ8t.˽I=2:ޤr.˽I \{N2:ܸpzй,7.yҹ,7.ҤsYn\J8=\ ziҹ,7.sYn\J8=\N:ƥrR+ \rd2ˍK %drR<> L>: Aa]-B. G uP^}Ua]e2z!sg!sg!sgyw͇ 0^Hx] yu9k0]Høv!y{˕\ǵ(ڥ[XeXԊ .p'N,ٚ, g^ngt a/@@mot.a@qBƌ cF@1#͆hamlXam)8wDM)rtHvX&Щ)ؿ:2X@ RY@ #c R~+`8ܾ^+J>J}J~-b#V**:2h@G8D6}J]@GȠRiV**:2h@G !URB*aCH:l@ !UpZ 6 tR B*БAH:2@G! 6s 9R})pNΩ:9V8TpZ6s tdpN Ω:9V8Tps*;!lU lU lU`g Smտ_Wvn+L-7mݺ6ɺٯl3mWL~edW0cF4Y7m&3ڌ{cF7?og[o&36>n1#Z}6Um3Y}6Um3Y}6Um3Y}sFj[6Bva/j7Bvfj7m&vfCaj7m&vf:Mvf:MvaiۭSm ZVP}`K Rnؿ>nT(n7[+8Yn<)n7[ېR큒j:zGm^Щ)[\y6ni .E۴m9ni+8,(q9'/vu7 r0+a&3ZÄ~#ݓ/pL~vO{mH?x'_AWpxݓ/Бo+8L{ivO o+8M{&c=`AGwxAY2Y,Ix`A ,n/XPfv{2 z,n/XpZ?io td4 O N ufv@> N fvӺBx`i]YA0Б, XpZWhow ">m8| #Ұ햆NvKO.9Oúú úú]úMz'yXiY$cFpN0f'y3<Iƌ$cFp1#8ÜQݲY$cFp1#8ØaN0f'y3<In;ÜQ$'y a/pInx_$؅N)8-kZ\^tdkZ\^?*.*} r@[0,.8.aX=?wOWY҇MzrXT675n`^):Y_pXEY_p3Bf}a1#>ƌPp3Ba1#>m҇ØJcF(}80f҇ØJcF(}8a0gԗ>< Qp 0^;Dya(o8 1#7ƌPp3ByfZ͆ k16b7l6ZUYD77Ge7Ta =tÄ@@ J1kQ}x=P1R@~x#  ڢ`}oQm{j@[T[ޮ~'1vM/-8"Б"pI "TT[#1f|z2_Qq=px~td*~k#=<\.GZ>{6i쑂Ӡf9AkԯiPAҶA[jk} tdZ>4Y|K#cM/|KAm-!k>5Yˇ޲&kЗZ>/% ޮicMA35YI ,4SpX& #sMVAF暬3k8<.zv o` r` z`  *.*XUБ`#cVAGƂ[ :2l=P[TUplX?],*XUБ``8.z[X/XUplX?]@[ :2l=pZ?Y@[ :״f끲}*d5W:e zh& zhӚjd5WAGj\ :2Vstd@d=pZ:Xi`MVA͚5Y6k :ldtج*XUБ&NKk8-zt&끪*8D6l&k>5YVAd=pZY@dtdkZj8zj :2d\gU5Y#k*8Z>r(ozqX6w7UaCݬ<n`^b< \[j0^;j㵣0^;jf 8SndƌP˵YcF:0fZØjcF:0g9nrƌPu3B-a\1#rƌPu3B-a\\9az㵣N0^;6y m6lQ'0ϣNhaGf<:͆yuBCfCa>)}:YDa(=ڬW҇1#ƌPzt3BaG1#ƌPzt36KcF(=:0fңØJcF(=:0fңңÜQ_z0 ߉2͆9eB s2ʄ6d m6(lQ&0'LhaNFP&0'LhaNFf6aH%D?4`o9pTBȡ(Б(Б(Б(Б(p`%Dyƣ(7%D(! G Q ?TBg%D %Dͥ@G`PB8D6=+(!*o.:2-goJ}PBۇ@>`Ij;I %D!iB UBTBTBTpTBTpxUB!J A(! R Q ǐJ9TB1@! [*! td(! td(! td(! td(! td(! td(! td(!*ؗ%DCd QBPBPBY^.8,*!*@G!aqU Q^/:2:2:2Wr#C Q#C цD}QEN:Q_衉@Mzh(CE5E(Б(Б(Б(Б(Б4ǣ(Щ(Щ(Щ(Щ(Щ(Щ(Б4ǣ4ǣ4ǣ4ǣ`_B8D6򨄨 QBTpZPBTpZPBT/! td(! U NJ %D %D] US SO oj}\\qi#ދG֋;~X$^<n3mx,fr/ ҋmMLc6;~XccFyXTpc6Ӗ܋mzLl[Ya>ٌan0QO_'7|Z-^>j^>Gm:@+:@+88zä:@ td*X'TpL_ȆF/Vppg:2V|&[)cgF/VP{O/VpzXalF/XalF/8DdېF/ ]+ ]+( k%A_>v4Ic NǮic N+AWdd0*\aarGF j+ r ¨ ǐ+ r ¨ 7~ؚpPAx gPAx gPAx :uBTP!2 "H } :2 td*ثK(at :2 "H =P#cPAG:uBH =P#cPAG:duBx-dPA_' zhN& zhN&8٬@ td*X'TБN#cLjN35}8Ԭy4SLjdOA?)ǁ>fjVTO!a>>XyӢjڧ#cO\Ӣjڧ#cOsy\a9^=ggNnosmo=ú}úo䇡h0N߉;{sX'7fX>c E;q,hg^,ƌPs3BaE;1#ƌPs3BaΨ۬/9h0fØvcF(9h0fØv6vcF(90GfaDf͆97 s$o6H< 7 s$o6HlIƌP|O *s ہ= >0 Sv})( l'З@>* C[` v ӕv e; e;Cdm0q}#CMO;B5=5=5=5=5=SMODMO#CMO~)Б'ۮ(8=)0:2M jz ;L 5={aۇ@>'з5=})ozįiZ6xiZ$!iBMUTTSpTSpxUӳ!jz A RMO ǐjz9TSpbPMo}P|;@!)CM2zP`_|8D ko6DM#CM#CM`g. ko Jఖ`toȆL7{P|P|P|SpZP|SW 7 5U5{KA}Mp)* C}MGk=P_SpZQ_!kkkkk6D MoJh}PBۇ@>&#%4()8ͷ()8ͷ()8ͷ()8lp`_B8D6lpu QBSpZWPBSpZWPBS/ td( D N Jh %4lMKh@!&CoquX4tIKAF撖%-!8)"N?qd~e~Nmon< ur~n~^V}UnoPY/+c(890^VƌPq3Baa90c00fŒØ 3cF(800fŒØ 3cF(ج/800fŒØ 3cF(800fŒØ 36:fl6u(ج>0sa@T+TF QEJmr,.`KA]F/ueR]F`EP1@>e:2e:2e:2eqCdd h,*: oH@EGG*:=P葀@~3## @Gs𢢣`P8D6=(ly BEG*:}P;@jTtw=Q4iZNp4なdddd(8C*E@mr$h##Afsѧ@>TWz"@TW s+6DuE#CuE#CuE`]pU]Q(8(۾@G!aWuE:2TW:2TW:2TWxTWm_#Cuņ((8-((؋@ZPz" `(CE (      =(8ͷ(8ͷ(8ͷ(8ͷꊂ|ꊂ|ꊂ|ꊂ÷+ Cd÷+ EE@G蠺`_]P]@UWfjUWfjWW\U CsW ^>w%#U ])8=Z|JG.!.RP;])8=Z|JesW.RpzxYpܕ!2l\>ZU6dU j*e.RPF*yU V)8CZ|JvX/RpXj #sBAF檅  ۅ  .Th/QPNQe/{K(g(@>H(8}tdtd(8}"]|dž(8}(8}tdO(8ͷOͿ|FA˧mX|FA˧mX|FA˧m:2˧ml/h/QpZyi8i}O+{ZyY$p7 td,(X$PБH#c@AG"EC@AG"E :2 ">]$p7 td, :2 td,(X$PC@AG"E䥸Qvo7a7fazf:s\_u^nOcqyz/3?zYcF1#xØa^0f/3?o{ØfX:0Ox?{l7%Øa;füyi_15K lP#pX02%2'2)2+2-:Q@DANr\`_@衉@M:2:2NCdl Q@Pp{+o{1M@>JP`_(B@xJ7~P(QpP(! % % % ; VAiBD~w'БP"plP(U!JfJlB@!JP"w((@jJ:2J́Cd$ ^{8D6|zIz#X/8-tb`/z`g N z^:2V=)_[v]Xo.Lo7u3٭7ݺ3Y7ݺ3٭cFo&~f[o&ufo&~f?vbØ-Şb͆vÆv]ެ_?lx-}%]Ȕl//8<˗2+LvrS7Ԇ._n_PvrS0T?CvrSzT7-MȆ-M LRAGF/]БKtd/F#Ѿ|`AA:2&FM#  N# N#  N# =MRݗO3(8MRݗO3|Ai|AG)^n]Pї[o?Hї[ooHxv/d;ݵ9/w.8|')^1Dxv#S]lxGvo<e >PfpUriwAiM K݁~ʨ /7ސrLMmxiwi6ܴ;БQ^n]px6ܴ;plx6ܴ;БQ^nȨ /7.(mxiwi-6ܴ;БQ^n]pZ˨ /7uZ^j&_#un#_wka.֗8_!uCN[|+:męՆ_ƌZmeΨu˘Q ˘Q ˘Q ˘Q aV6<63;ه1#XØ,aΨެ>2ƌ`cFL1#XØ,a0f˴Y/߃lxna6[W8&`^6[8&`^6[ú@2%@$Ȥd T Ӓ u@:a}0PN@K Є Є td0P *p"f{ a  Tai*o:l@ a6 TÆ t0P{` T4@T a T#*o:2nW4@7r T4 ={ ={ ={ ={ ={ =z*ЏT~+:БAO:2!~+:pl(=ȤTa*8`SVR Bl췅7!} mp GVG<ֆpW}pW}pW}pW}pW}pWݮzi€*8=epW *Б]:2@Gw8D6]U#*8MRpWI `U`t NU]:2]=P*)w_bgo΀/-;n-΀i|3frW7bgʹp3΀i|3f_ x3Z bgʹp3m~/vLj3frW7?v-f{tW㘠Z(ޒ; n6P022YÂ(kXpe aab5,8L7YÂ{#5,:2X@Gk+@G_ 5`{kXpd 00Lְ`o 8v>4ai5,[@Gk` a4f0X a NsaiN5,[@Gkة5 td 05,[R~Rӿm5l5m<[7S`7aج_v6maBYo-Lf^a0fx3;0x_`vl\fFaW0x_`"}<&rᙆlxa"7߮؅<{lxa"7wcF0uo;u dJ2I&2ID2+@%ȼ~o zCd^#շ tdб :6БA:2؂![pX"c ÁCd"7tl# tdб{Ƞctl# tdбCdp!!tla u:jVl@ͿӚwZj7j9`p 4nWaWaW[ tdpPi-o:2@GWj\m:2@GWj\m# tdp `o:\m# "MG41>P622ڂ(W[pj \mab-8LrW;ڂtdp{j\m#՛@G[pc :6з:6EIJ:`c=c;QTpc :4IAul# tdб :6plc NtбI :`ctl# f^:2@G@ׂq qݬ00ôYo`^죺vZ^0[G~~a|a̝>i1x c cva:ڌvt3n&sb_͆gž }M=9}f3Fsb_Ø}M71nm>徦eN_kZP徦eN_kZPk/5-(sZyٜ>P i{#9-hN :2ӂ洠#9}x؜6Бќo j{Ҝo j{rۂ2/-8g؜6plxװ9}muןa8W> 5ق}}(d ox-O![#*wz}nib}n ]u7}nAmU41RȾU41RȾU7БQȾU41R~=lt'mlmAI[[yt'mmAGF[[БtdkmmAGF[[pL"4k־܏7־܏0k־܏0k־܏7־܏0k־܏7pL"~־܏7Б־܏ఃe[@ ]$˝| zoS~oa~oS~/w-8}/w-8}/w td/w-8}/w ">J;:2J;:2J;~oi~o#~y_Cl;úUw~mֿno> aBuj~ ?^جu faݬ_c1wxއ]w |'aKp x0{Mx?>~xلlx67߮ ?|XFloo ƌ6뷅7w^&?Ȕdyyeyyeyyeyyey 7p\pm@> 7Б:2X@G[pXdy  ,o4pCd\# td `oy,o# td 7p4 "dy7-84|#36|#38,^d /2 rBf9Б,:2@Gwz;~,o,sa7Y0Y0Y.z,vNf`O`7<.8Bq#<.@Gy q#<tdǁ 8БAJ#<tdǁCd@ "&FJ22@F&y\p% qab<.8LQ*y\( `/Pq#<tdǁ xCΑ>ソ<.8}{yT ;Up$ 8q#<td`/q#<tdǁCdy\p 7<.8}{y q`j 8БA?P~`f݄7;//=z_azw0mn7;6o7;6oe͎͛ٱy3m$ٱy3-ovlL~͎d6ތ~ʛ ~ʛ ~ʇq,oƱDwfWʼv072{MwfWrfW͆ͮ̇1#72o72o72ֿݕ$M j&%yn\Pgo7An\Pgo7Aސ&ݒ&ȁ&)ޒ&Yޒ&ȁCd o7A. oHv䂒o7A. :2J޷ 'M Jబ[9plX-yn\pX-y8VxKAϹ[9БQϹ[yCJ޷9-(y :=- N-=-=qn\poqn\poqn@v|Kv@GFvӋ(=- N/oltdol.8ݲ9Бݲ"JvӋ(=-2n\pqnqn\pqn\pqnqn\pqn8D&v qnqn\px}es#-Ȯaִ.ػ{H:O{H:O{HfM{H:O{HoHvӬIvӬIv@GFvӬIv!i֤~t#~t#~tA{HfM{HR9gߟ=Mجhufa٬ݬ`=;o=&$af=[77f}s=Ak>|0]͆ 0v!7aΨߦzar09|slx!7w7Øaݻf9Yi|3>{Kج0ެ/>{Ex,n B7![ågprtv 7Б&jY܂T-[ "kY܂Y# td `oq,n# td 7p "`Y aq ˰,na!-8,ŲXdY܂Ò,[pXeq,n# td 7Б^?N׏qi98ЗsK9.ozvF9``79.8>ソ9td0{s`q#9td0ǁ 8Б:2ソ td0ǁ 8pw Cds@@F&sd qab9.8L2Q01^~e $БH#9td0ǁ 8Б:_k?ι$ҁ*+8L{!]p% [Ua!ةd,!]ҁ B:БAH:2d !]ҁ B:БAH:2!i2.8MBH&c邽tdҁ B:3{!@9炽sþxCbbz_tfެE-Mߣؔz3M7ӖŦԛbS/6~{誯/6ltY룫zaU_lf0/6>I~\l}s{3_l0Mct`ooM7McFt`oM7Ӷ&؇o;G~#wstk#wstA]!uI:r@GF}sta*90_8D6ї;G:2˝ JG_]P;r;G:2˝ #rї;GX˝Ȇe:rJk}st#9ZG_Ȩ/w.8ї;G^˝u/w.(]БQGtdAW}[t՗U.nr@W}[u&]nФܭ:БU_V]pt՗Uތ/wtdt՗Uތ/wtdt՗Uތ/w.8U_V8D&=r[uibܭ:U_V]p/w.8Lv՗U22ݪ ov՗UI\V!]nՁrÛ]nՁ#.~FSAkX:l(^wfra~waY;pa~wfҬ_n]pZ:h/7.8-4뗛}:2; NKf߁CdA~w#Y;БѬ_n]PfrA~m/¾>?fnO`2-Uۇuvk~~gnmֿl[Ƈ~ a faެ90wo6avx0agF~YlxV`7zs3?ƌ`cF slf[Yt3?{5lC`7뿏cFu/%`^plX >БA:2@G]_pZ >БA:2@G]8D6H ~Cӊ]_ >БAv}d[o\l~X]l~X/.PL[oWHz0J7tz}3m ^l^}.^LbQ_Ŗtز}Ytز0^;Ŗ Ŗ Ŗ9#ɡ͘vI_l羙vt0fD~f}at0fD~f ۹o&pì/wH/  sr /wH/8LV;2K Ӣr!av:!=БQ_^Pr !=БQ_^Pc;:!తY_8D6,kV;V6r@GFu~Cza:!Q_Ȩ/wH/(u~CzA O^^Pr^?NTۧ:/O/8-Tۧ=4ίk;FNvr@B;~Czvr@O;^~i/wHtd;:2 N/;M/;22 svr@Ff;~Czvr!27!=Бю_^px!=Бю_^px5D{X8eW/ I#]ܚUFrk@M#5}æܚ _nM8D&#54[VA˭ N 􁎌Frk*H#5}ٴ _nMh/td4[ܚ _n0Evú}zxXvno/:Y9qXcYoYbua[faܬ:c0Ww~lag{[la|`7#͆~9߬Q9a01?ØLa&w`c~01?ú͆y &C0fXJYo86|| dC>>2'_p cCd'_p7td >`||#td >>p "' ||a/8QUJ>NV*Z%_pX||#td >>Б:2@G_Cdd @nߧ:@o_w0||#td{||Yo"Ȧ_22@F&_p ӛ||az/؛@F&8D6MD#/؛@G||#td >~B'Y_N`/'d}`g63,t߄߄daq/k*_ ?plX\ ?БA:2h@G_pZ\ ?БA:2h@G8D6-Ch _ ?БAv}d7oovX$vXnvL7o7~3h4͎ifGvf7;o-5NI0j7#j= 9 9 0g$ufߌ}^DofØ-KnvKnv?ٲfyfÜcF,Aab ɩnX_PboHa}aƳݰ>7ݖvüga}aݰ>pllo7tdݰ6:o7tdԷeo7/ [vca}a ݰ>БnX_pXlo7td#[ N+-vgݰ<eo7/(za}ea}AmnX_pZ(o7/8-oHW|i ,{KW|-]WX۝,_pzY>Б,ow/8,8D6,ow/8`v;f0۝]WX۝Ȥnwߐvg@GFW|֮vg@GFW|֮rHW_ynX7cF1[&j=f7FYl0 qLP/L7: 1# sb{Y/w%Z/w/8N r@Gki||#6_8_p͗;6_8_P{ h|Aiq>;{k)||#6_8_p͗;:2j||AiR/w/(mqrR Jy.w/(mqr!||A-7/8-M N rk~j5|ork0_n _p&4˭dӌ//8Ҍ/td4˭ No4˭ No4˭ȴyJ3>ٌ//8LR6˭ rk@Ff3j3>pL˭7_n hƗ[RmƗ[:2TҌ//(3>yҌ//8,t6˭7^!r r@ r!2R~/w/8 N'r@GF~R~/w"O{{#^^P{{i~ ?ޡaVC5nv<7obyXco"aͦf.־vlwXU k,a+;{wVE F=9F=7aQcQcQbbbQsF sF@3z3z3ۏbmoiKKoۋ%㥷a/mxm{XUl[{^ cFmk^e3/U@!B y% ~`t_a_+/M& ~|mW$L E}0WBQ8̖P%Ì E}0gBQ8͚BG+/tdBG+/tdBG+/tdBG+[ECd7,Rz }`ABErs }zCh>Bߡ^4at`Q~>{~>{_z_z_8Dn֋ !A4!#h>p !wMۍ/dd_8D6kB4n#5kB4#E:^4#E:^4#E:^4]K>pXl]:%a6 .`l#w_л/l,ׁ |`H[ K!aK>u_zw_z|D.%#]:%#]MKd%]ٻ[w_zK>u_>?<0Xk֯K}zq׼6ӇR_(zߔh^]^L{Kˋkwy1Ŵa|_(?L{hPP/Q/&AG}3 >ņ疂zz^^lxn)[ >aHgz1m,Qߌzz1 ͘b ^BA=ㅂzz1}/_/6K8^(d A/BBT;^3b/Izr\A/B/H7u 'bnva7u 'bnv:2ŮӅbfn]}bJŮ_fn(7u@Ů_8D6^.6B_ bc/.v>pʓu&P.vBjoyvAM֯Б{Kd׍-@c=?/ipɀh.v>p~]}tq T[^} euR2ۯۯۯ~]}p T[^]p:iuZ2گ_;`%@ٯMВ~]lr}p~]lr]'TiO+=fb4n(xÁnxz,^^ 4n86n Ůi/v>p:i/v>p:i/vBKFxKxM:Ů_hh/vBKFx2n84n%II>+ol;RuaðYYY_>s6뿩> nf-񰮀Y]7 uNf0j 0j 0j{:aQ#8ׇQ#8ׇQ/n /;އ>a\p} ͆f{>Fa֨FxFxFxo;އQ/l.6띰o`7)? Na\gp뮤 jaNaet<ȿFfx Ifxa^s"ЏsK'{.8x '{.8 N @K9В{d-@K9В{d s pѕ\{.׏S`/$\;a/K$\F- r%h`/Z2jt%h`/AE^d)n"{"{`_d)A({`_d @K{9В^d-@K{9В^d g傽h`/v†{a{.W)2m {HEs5i/`=z%{$N{y{9В{d- {.8 N//炽8H6 N= =N{x{9В{d;`=Z2xρl'cYo]=6뫳ua]A`>o!*XarxXg*m~0~0~07 Q#χQ#χQ#χQ#χQ#ӛş{c0vӇØ>a\01ٰެF`Lfzc0kyF0F0F07èèufoz?;a}0i}Xwl8`Zo@i}Xw0Ӂ/ʘ$c:9'A.8,9A.8r [ A.8 r  ph Z28ȁ r%h Z28ȁ rA$nr U@8*P 䂓dp r'}=pdp U@epAdp-@K`_Sdp?Pj-ZpxY ꓳ'g5Oj WAhkpZpIY-Z-@Kg5ВY dpV {Ղhv N lׂn۵`wv ~紛v-;7Y-;^pV;p Y-;SPjY pV-6AY dpV-@KUV<ʂ t~«^eWY*nUZ2xWY*-@K6y?p^͇{5֟{5y/jL?FŞˇ=k{.֛{.Fhd^칼J^Q??Lͨ̋7yft]\Q[E\Toz3MM΋7MƵKb͆&~ӛ qf29/L&~ӛohr^7L΋7SbèMͨMͨOBL֛zza4@/l8h^o}' Ћ?Nԁ?;M8/w.8lOv*/YސN>ˁkT^\pX.w.pws.w..(rÍŁ|q]\\Pe$]\8H6\]\Br@zwܜ$͉ Nܜ8o͉ 7l{Z2z2z} tvڇ@7+>]n[Pu t x.7 d{} t-} t W[pZ2pɆ[_klz˭w ;WCÁnc~=1xpA{7{]1xioᠳwio%wio%wioA{wioA{nHOr؂YFOr@MOr@MOr؂.-8e. d.-8e a]`:mjϛèèèza]mFmFmFmF+5èݗ6 8Yp5}è|è|è|è|è|͆ >f}!0j0klYg;g;g;g۬FFFFv]7iø]?L6RA%Hdnt ʑȵ#K'GN ׏,@ Y: NF Kp'k͆^ NK{ _ `_ }+z~uhZ2x/ K$W @-{%b^ زW)@-{%b^ زW %@K{%Вn:W %@K{%В^ dWɆ@K{a@K{aȵ ޵ ak!&z k) ʚKSTpک읢@K(В) dp-,@?xX@ @<,@+{ fOO17}K7֟7֗Nn;ϓ6SiftwX_o0jR~{ f~f*oFXߌq)f_iy/f* a, ٰ//aԈ}7%oL}7S)f_Tʿp3o0oL}7Ӈ;Q#oLͨKQW7{r g*7{ur g㷻Uow; R.vVp8\],z<~Yuyv)ng}Xططjzݲ}X:au ױo*v]u)طuEױo خcn׵!طuZ2ֱo*8\\Ǿݮ+В}]W":v]dUu삖u삖u삖E }Xp] E^`xw;Tyv@k-6-+CvjVo7 evcN P~n Uߩz>H6Iuf}1鰮PY}ad0r{p~XwZl*6Oè p~ _uŤC~p~5B0jaè Q#7?Fa֨/FP8?p~5B0jaaè Yp~5 Q#F(FP8߬/FP8?p~5B0jaHq`w=@>U\U cD}m4o ā iqU7 )8".t?ngBF!ąǍB  ݏlxPA?[_CXy$+$cqd??跌G W d??RpH tqqOD0>OD}r' uaè Q#F(FP@߬/yfìQ_@?~5B0jaè è Q#F}0k4( Q#F(o6(oW-F(FP@?~5Rpz]7D=OU@>W[yQ`_ z%Cp^p8O~O-JŻ`g{; N+u}]>pk|Wݗ]fu@/0- u@J~8/OE@} V>bhHU*Rl-K%C>ВhhPd?l_y)-d}HE@K}%C>ВۂktPE`_~p:!*^}z\ O ~gw  /t ({^4Z2xdY/tβAk:ˮ,h"Z2Pw{9|a*m~ A契f*=l).?LBMam_7OuէaGجT w0>Oyج/= w0>OqÝ8888ج5fBIlس$l; I8 Nfpø$u '0k4p> NaNaNf^'aa/Y$F$F$F$vs9 emC8 |r\$vö.'`_  '!ВI(8r  {`h=AZOxR{)zƒ 6|u "пE' @NU- @/M^0(- @J<HUA ,TED UHUARy*Æ!aȃDhAZ2x D%h~YD p͒hAZ2xˠ(82( E;=^ņ0(4aPzi ʵ{"В(؏ dAВ $MA d0E-L@KStf)؛" H%)h`Mg6Lә SdC"3Hdtf_ldTڬ?T޿ ^YLum>6KnfdltX_aao0խ^Y:ϓ6KɃxfi3]^Y:ϓ6KɃxfi3^YL/,m&eTzfi3}lt5}f^/L^EKxپ0vy /m6^/:^EF/e͆=^E {2F^/L^E}чKxݾ-Kxݾ(O^E^E~^/*8l^/*8^/ $)hi4Tpxl4hi4!Hu'@N ;1SiQhx݉നi4Iu'UK*6^L *6^L *6^L*8l6^L *6^L*8l6^L d4^L d4^L*Ie4nTp&eR%eRfuˤ i4nTph߃dǢMSMS6M^/^i*(u/^i*(_d/ Z2_8UPSҿxqtT*@uǩ@KFuǩUSdUSSEAKF%=|@?{:tйCgƟY_;;v7~u%a]`eúfgf}ﰮY r5s5Yo +0>OX15OX1qÊ999٬/F`l-n9vfr_o50A ͆} vfþ;bގ8ødGv&ݕeC|#\eGv̂%;` [,;"В(8T# A;Вû'#`187@/Hc$FF ߾l#.G\@?w.G]˱!\@K*r9\}i#*r9\@"#plMrv(ؗ6-\@K#Вdp9-\@Kw9Ɇ\@K#Вdp9 ,ù" `_*8+@ HlsEȆ@: $+ -@K `oZ2X H YoH%h`Z2X {,h`Z2X H tORp'H `oZ2X H% 7@Kg%ВY)8etYv,Y_XY̜^Ya4s{{m&3gfZuX_4]u55b߯d,LFb߯t^u5ž_Yꨋ}6Sehf2z~m b߯TZa4sp@3gf2spuF3goa7i,l'h,:k?ahaoXk??Z?Pǂ|gbA>ræhi{V-Yr@,g--YuKi{V ٴni1!]%ܱ;v6)O?-wڐ|r[FsjWͩ^_r[FsjW%9ej78Wˍ "-ejXas,ٹZnVp؇\-7 $!;Wˍ "F`UZnVPF`\-7+(jXA9Wˍ 7F;Wˍ6spcsY-,XAKFClYA{Ӭp[ihh-4+(ClYAb= [ihh-4+(ClYAb= [i! f f frOrOAAClY%!,В[iVp~ACAG[Vz-+h=}t-頛ܲtA~W>a]1fg; mlu/úهg;mlu5oa]|0j0j0j0>klYg۬/)g 0>klYg;>a|FFmא? ~f_vPQ[PQ[PQ_ BȈ N2S`_j s'W_.U\Ö"jCT~BpRvW"` KhR5\ӢKUpZp6K!\nX< *k.U`w/z U/! C#*pxӋ#*/@[Q FT%[} "RROTS.ULH=RROTS.U %K8H6Cr \ RD .U%KhRZ2T .U%K8H6RZ2T .U%KhRZ2T .U%KUp8RwdA'jCT .U%K[ .U%KUw-\@K*pwz*В dp-\.Uޥ dp-\@K*pl~*8]?RmtKUw-\@K*В] = VW@[^ NG$ tDNGGzl~-v Lbd~-v L?bd~-v L ŎQ#_7S~3jW_7SzfX&x5ͯnZ&jԋ7b7d~-vLnMh*c?P䯰sT?>p!v 6X QA?>:G#b{N /={sZnWP{ٞr@^zN N={d@s*hhH-+/}%ܶp<ېZnFC*} ҭZ'Dj_[/Я ݪ~~V-+8t{T[ V-w,rr7çݪnMnr7@ij`a[ 0plؤV-w,rr7rTh`A[ ܪnV-w,85V-wܐnr7Agj`%[UВѭZn2Xp8V-7,8tv  ݪ&nrr [dܪ&nrr v Z2UM NUM NUM-ݪ&ݪ&dnr@KFj`%[dt[d0ВѭI+OZYˍ Lhe-76 2ذt~;ϯuXWWڬ.g>a]]ae>a]]cu:^6TFFF> >f}0>k\sg 0>k\Y:|è|è|ro23dtvE N}Y1+NFW ל@~7 Vpx^s`~p w.X% h`w.X] !\@KvM ,X OE,?`m~"+ " E',',OOXdd lCXd/gY ŖHRlgV ŖHR2gLY ٰ?+8`6V/`Z2g Y%hZ2g Y pKhZ2g Y%hZ2g YV Z2g Y`W-g Y? d-A!`Z2g Y%Vp~?+g Y%hMg  Y%hZ2g st\+؛k^C0`z \+80 N+̵ smCkiן;ӎ)ֺraMXʅ5Wb+ָᰦ(Yʅ5b+ָOb+FzW.\5]0jԻraԨw습0;vJ~u0;vaԡw¨C؅Qޱ FcFzǮX[DZw`{>}!#/lΧ ; \ v6ӁEQO[EB{}_hUzaWЪ^ZB{}_В^Z;pzzE꽾 ^ZBK{}_{}_{}_|f~~W;w~^߁ ^ ^OBw`!~?{ ɦ ^߁ /ؽ^Rlx}_H۾bB )/dpl۾В^߁m -Y}%뽾/dВ^ZA -wp ;p88NAx} r)d)`}%뽾/dMBK{}^ZBK{}_8H6\?z}_hz -Y}%뽾Gz}_hz -Y}%뽾/$wt轾wt轾[ -Y}%뽾/d7mo~ᯝNFgp-/_8H6t-/BKFײ%W_IKb+-L']ˋM<Ox~]ˋM<Ox~Ox~ ~ҵ -]TMϪxW-͋?+V4/"{L۽lB]\pxr6o..8<<з{rۄݻxCз{UUВф.hhBdѾ݆9-m m7}} sA}n\pGm_nhh_n\ph_n\ph_n8H6 oadoa.8 ?piO=׺% [d Ɇ$?.;-cniKene ]pR9;Џ۝ N[*s;gZ2nw$Tnwd n]pzV;+m  n]pwnv[@JϹmn!sjZ2n.(?v[n.n$Hnސm GsjZ2n.8 nhɘV;pL#ҹm-sj:V;В17pu[\1pu*h[\z3pu*hׁdUр--[\Z2Fn.8] :В1pu*#StD27pudnv@>nސ NG$s8 ?PAA z%0TPNGC8[|xXW}ܬݵ7KuFf}ⰮTaݲ7CuFf}a>Tp5B0jPaè30>kFp`:88Q#Fl\Y${5RD*(SP*NsU ; V=%oB @KPA%C В!TP7-ٯO_o C!Џ)@?> ۽R"LJCWkF6Lр7Kyf>l^o&aT!}ؼ0kfhY#7vlqCwlqCCwlqC@>w7ߐ ʑ{j#WВ16r Z2 Z_*J$NhUx(8=*<'Pp8~UxpUQȨBAKCAKq 9 N#s 1Z2c(8m1<hɘcxk#NjLሇ6pRf'5Fx8a3Nj8pRf GlF7Ϟ0~;<":Ux4DAF*(8(Tөǣ!6dh(e= QPvǣ! ~Rpl68UTEAKTEAK{>8xF'nn1(8c<<yc7-n1(8c<8H6c<hɘxF qc?w x0<6 x /Gl&hr4aԁA6Shd Jy/Gl1r4aԈAF /G|Q#<6Fok~v0Y3l5&Y;zFKp@Rpx1Dy=n8zFK%c -A3Z (g^1Q^h $hhDy=pQs(--#,'2zL2#,'2zLaD@K2Ce N#,'M#,'Z2FX^O)8:(hay=Q&В1zLaDAaD@KKAKL1-gi3f, oy=3zM -g6c[^Ϣ d̷E8H6m̷Ehɘoy=n&z2z:zLwQ7Spk:zL tʷ7SPq3e7!-Z2[^)(c2F_)8|u8zL%cA|q32zˆLRP)T [&ZlfM^S)8m̚RpR5y=L%'&TfM^S $TfM^S d̚hɘ5y=Lഥ2kzJ%c0ӖʬN[5m  1,Dy=pw)z KW<(ǰ2Q~ &'ZOfM^OZِYדV N&'fM^OZ ̚Rp:t5y=i%ߦQ+EúfxXw)ެ/8ߝuC:Yo_aA9go"r5BH0ja!èB*Q#T6MèB*Q#TFFR9!r5BH0jf+r5pY>r5BH0ja!è;!Q#[6mè-Q#[F vqUR`%R vW pK`w*8S ā @KpK%C%В!hn)8` ^1[ >c pSpSeC[-- ᖂOᖂ PᖂʆHnsJ &~H#Rp)!/~H]A%В!hɐ| dHZ2$_ N'/ ɗӉˆHZ2$_-/ ɗ@KKDBt"!Rp:|)8HHN$$_6D%В!hɐ| ɗ9mHz ؕ N1/y /^&HZ2$_-/y ٴ#Rpڌ|ɗӛpK %S%)Hn))-L@JpK %S%pwbz'6В!Rwb-- @KpK%C%В!hn $2[-"~|!.9wM qsWpڤ);w^v]iB\`M"M q )8mR" q@KH%C\$В!.h ${}X#hRp6+hRp6+hR#hI<&k&I pmVdCM N& A 4 ZOM N& AqIBФt\1h8f3xl&coqlf,L߀ck>Aű5kX[F44Y9h8f3Ml/q7)h8f3n6ӷ⸛è&n6Sdqfr#l⸛4Yw|q780k⸛4Ywa ,LfԈAͨ&Q#M,l84Ys5Σg8PYTpxPYHPYTpxPYTpxPYHPYTpx#PYhPYTpx/PYTpx5PY8H6, d ,*8\PYTp7:NAK@衂5C+ˣ +ˣ N+ˣ-+ˣ N+ˣ N+ˣɦCCCCCCdq@K PY=8~ˀ PY=TpoPY=衂~ˀ@KƀAie@eyP%c@%c@ey:<]:<]Ӆ wMT R2T T@eyPAyӅ ].!*Ӆ-*Ӆ ].TPt÷*Ӆ-*Ӆ g*Ӆ6d@eyvNA9~˳s6d@eyvNib@eyvNib@eyvNW<*˳s N*˳sɦM999M99M9d |I]&h0Åіa>іa> -|--|mYSpp;<'В1 N" qG)@=+{glL-L5ř:8SØ$YLř:pqaԈIͨS&v~L,Lv6Sdqfrly;Q?Ll⼝d ,Ly;)e8og3Yv6S&v6SnqfJ,0Ll⼝͔2Yy;Ys5bdqfYŔ*L~pVaZÞ~& #/C #/CyYTpyYTpyYhyYTpyYhyYTp$yYTp'yY8H6, d, *8nyYTp:<4(В1RВ? 6֓ 6] 6֓ 62a<ئ*_˃m6ddyvMNo6&˳k6ddyvMfdyvMW&˳k No6&˳k-&˳kɦ[*&˳k-&-&-&-&-ٚn̦,O _l򤝂ٔI; )˓vMYSPٔI; /O)(OxyΆ̦,O d̦,O)(OxyNAy˓v wjgS'Z2fS'N^gS'llJAKl0['l0@?!fS!fSzQ3px?y~*'EúfwXwAݬ/8Yzú!~rXw5ݬ/ٮ_è'Q MDS6)Q[DSFmM9"rE4eE40jhaєè)Q#DSFFM٬fa֨FM9!r5B40jhaєz0jhaєìp!p=9{~6AEa 0GQ'F 0|@!Ea 0{!В! h dZ2Da-0]IQþ(L $&Ea 0" h d.ۊۊ?-> !.Vi@KUAڥpMVeC] .Vi@HHZ2]-. iqJUAf%Ъ hUY dE@ʆҁ}Q:Я b)]- "R/ZlR-6b) @KXJ(8H6K)8K@KXJ%C,%В!hK)x !ʬ(ȟJէJ W2+\}ʬr)8H{ŁdWhɐY){Ł @KJ%Cf%В!hɐY dȬ Dz2+ @K{8*]Z.;"!t!B }"Ъ t }"В!th$C d]Z2.-B @KE%CpU`>1(؇. BV@EUAtC`E %)t؅. oB8=fjrXW4٬7nX:j^rXW0.V:Cqe|5B0jŇ!tY8"tqE0jaè-Bè-BQ#.F]F8!tq5B0jf}0kWF}0jaèBQ#.F]lۛQ#.F]fs ͆s C0j;cp9a;97n6 vB@ { ϝ@>@)8l 6E@ @KH%C$В!h dvH@ { AaTS*! @KHaTpW`h@HUA$Ъ Rp+! V@H_$@- @KH„ )8mo7@ N "hU@KGhɐJw }u.z"h=xZ2d< 5ARGRdžxZ2d<-2 @KG>qU#br~ b\`qr)G bdh(ػρ 1@KG%C#В!hdq 'b 1@KG%C`*qvVfEBcC8 N/bGUA#s+ N/bG%C#В!8H8 N/b"hdqZ28-b 1@KG G>Ʊ!bV1@GUA#Ъ hU(8(8"!Jj^$%5 /l,8e36.NgLY>IY6Strq:aԈIY6SR͔Ը8e3%5.NnL㋓[6SRT8e3݋/Nn92qqrfJj\ܲ 'lB-)qqrf*^ܲJP'fT8e3%5.Nn0&5.NnL[6SRdE^ܲjN'FԸ8ecRìp115bRc3jĤαw8{sl g!'l6L XolGJ. SuxH:ۑu)<^gCFJ.)8᎔\h)<^ఓ;RryNa3wxAaCwx@KH:mݑu ;#%Z2FJ.)H!9SéCr<$'Cr N'S#]aj򐜂Ӊ!9Cr-!6)C.O d˓el úkf}΋۬٬yq=Q#F6s#Q?F~ȍF9!7rCnd{Cn0jaèr#Q#FFȍF٬fa֨ύF9!7r5Bn0jaz0jaìpV!7pV!7aȍF9!7pV!7pV!7Y0dJ6?3Cp> SrXw_ SSR%k[IpL(Rh ZJR/%U *BIhɐT dHZ2$U-* IᡤJPR`8H6!J%U6DR%В!hɐT)8%JN%U ၖ I T)8EJN#%U 瑒*~H#J$U-* I@KJDBRt"!QpZCwB# Z)5xG%C`_$[ "hdwZ2;-xG%[íFُ@Ne?;(;T#KSُ@.Me?4$@KG>d~Z2d?- ُ@KG%C#pl8d~Z2d?-ϰF "%=^}@Ii@ޤa o!& y}$Ыys-8mț& yˋdž~!LJG~|~z#G­G>!Vُ@GUA#Ъ hU(85(g?"!~^$e? /7'xnN~9?oN~9nN~9ܜrX-9O9e3@oN~L=7'|إ/)rsfʅܜn7'l\/zsfoߜre.͔ 9e3XoN~L_6S.TZ9e3nN~9"vsfkH尛_F̅ܜr!7'l&wT90j\/ s!7'fs_F̅ܜp15b.d3j\fh $7g<5 $')ȧ~| Spڤp=gCMn)8 l°6۸&z1lr{MW3 lSpz?6=&Ъ0lr{Mɰ6&&Z2Mn)8HlHN5aMXk҅5'}R֘t:iXSڬ5ײbm41銵_>iXck?{6%aԯQ>PF@I%aԯQ>PR-†Q>PF@I5%aԨQ>PF@I5%ڪk5jKba֨ Q>PF@I5%aԨQ>PR¨Q( F}$ gU()6U}d>PF@I5%ņΪ>PR Q>PgJ@IXsM-6+}$n@i#P||aYqp Pr`T /^8283Z/_h@Z>P򅖬|%%_h@Ɂá@Ɂñ@Ɂ G%%@Z>P򅖬8A8B~%%ȁÙȁé/3#_gF+ό|W|BKgFВsό8+}f\;+w[B?5uSB?>MAiQ9ۂzQ9/dA9/s*_h=ʁme ɦETuS)TВ9/d}N -YSBKTl/d}N -Yb!/B b95O!/B bB.jXp | p R_hZ>򅖬|%C,_hZ>diZ>򅖬|%C,_hZ>zpt r`b9p8:b9 |%C,_8&y>P)}`B} zA/_-R8->KqL,Łm ,Y/NW>Kq`BJC_Hɐ&Kq8Kh0K1yPS!`b*>̲S `bd?{Plm,ţ-Ŕx4ЖbR<RLGC[jhhK5bЖbR<RLeGC[~hhK1e) m)䣡-Ty4%lhhK1e) mٌYGC[`hhK1e) m)&gЖb*<Fx4p1KhhK51f) m FR<Rl8ǘx4p1KhhK5z`cK?g0 )rã!1ņGCb 3|!UrᐘCCbZ9pH́|[:v|-أVf߱a|2(LÏ\N8H.I+~$&!{0?SpyJ#1_؃q˳=)<܃q/\"[Y8H:2`~$rGb ..`~$ {0?Spyk.,撐m_s)=:fq5/f\ nY~ ,Kdۣm=:f _ۣ=:^ۣ-Rp }񁪨~裂Ln{7}߻BGƾ2}~ 6>wKd]ƾ|#cn nw>w󅎌}߻)q}v@2 ȶQ?̍()܈r鞂͍(BFFOT#{ =Q?Spy =_؈r鞂jD9tO.FO|#c#{ .7~'!Q?Sp9~= kr鞂ʽ&B^O|SaIB^q徏ï|oA}~ͥrt\a{+_ϮTgW{+s7?Rp;'{+_ܽqٕˊwgWp^Oi62alϠŦrʇCh 6Mf+| 6WMf+|0th?ŦKCEF1fbŘ-:41[thŘ:41#thcF(ƌСQC3BF1f`s!3 ,ŜܡQC3BF1fbŘ:4ҫ3BF1fbhyC#CƇC3BF1f`s :4Ř:4> :4Ř:4MG`Ŧ뇩C!SRFCLJ :42-uh4d^H<ԡpy>C#,:2th4tdhСБC#CFaJ Kd#K :4C#CFCG˳K :4#CF@th4BFɡh+BhE^hE  G:4nth$  {Z z;:2v$VCGގ_TLݟH8&:lv4diR 6z;:lv${;.m'z;n'z;#CoGCGގ  z;β#CoGCGގKdiDhѐ#l22v4ddhѐToG)ގH<ۑpqH8v4tdh\nh苀nwh襉nN<-pH<-phT-p2oQ oQ Q@¹Q zQ n4 $>P?r J'u~ؼ]~جI?`Ql~;Ewb&9E0?`L쥧/?Ql.~?LZ&pQ`~#(ƌ&pQ`j8(F0?L/6ÏbSA~QbH6Ïb|?Lo6Ïb|9(F0~3bG1-? ~3Zl8(F1f6Ïb[l8(F6ÏbcFlb 61?~h#b 6-*h!t 6.,h!r 6.-h#r 6.|l!8F m4tdl!8F9m$\en!8F%qm4tdl!8Fm$\kn!8FCGm$\n8\F.e4E`2nOv .-?p{:Ks ?pl{:KK #c@BGڞl!HA@j$\6F[O4@wK ŦyiTl:qi_ 6M`s(\a(6U΃`w;N`q.ƿƹ:l6Ÿ`q-8Zq.Ƶ\͕`s3q.ƌ`1#bƹ3q.ƌ`1#`se3_98cF0Řs1f\8cF0B}1f\8sFs9܄q0bƹ3q<7a-M`q.ƌ`M'`V9l1#Xb?L`'}S`zy8默3[I$TN0 Շ}`IŘIŘI$TNݐYY>py4Y>py:Y>pLUǓR>pyJY>БQ>pyVY>py\Y>plydY>БQ>pypY>pyvY>БQ>PB5#P}<; y y ;)TN=(TnIx^w%@x^wCGFx^w@Bm1 Ug~<< m1 h[On8WnOneBxwΦP}<黡P}<;vgS>p)TOn(TON'}7td'}'܎v'}'܎v'}7\"y<;vP}\6czPV?pٌ-On(O@cG?QMw|PlDC>l~ {7f]>:죋GR> 6q G.ƌ࣋9lcFŘ|t1f]GcFŘ|t1fl.sFKa1g4b>3.ƌ࣋1#b>:\/ƌ࣋1#bh.sF>࣋1#b>:򼅏3.6 -#`.ƌ࣋M`>t0LI>!snȤ2+LK>!N<.'sѲ#n:2膎 >#N<.+{r%%pyhGn:2˳K>:N8-:2膎 >'|tCN ;ziG7҄n p{G' |t@膎 >#nnk5_ۮ p.7N8z dn8un{Lv¹P&! 2 'LvC p 'Lv%mOБd7td0 LvCGp' LvCGpl' ȶC!Ld7dd2 Lvd7dd2 LvCF&ݐd7\"Mv%A'&;N1#8b3}DGcFpŘx1f^cFpŘx1flNsFd1g4;b3/ƌ1#8b<ج1#8b3+cŜ,08b3<-b8`0x`s;칋Zs }pۤ؆ Y>oȠ:2iȠ.my@ #>oȠ:2wiȠ:2KdwiDmo(}ސI7dd g}ސI7dd yCF&}pl Ȗgy@˳L<,>Ow n=\[O.U?-W譮\CpVG2 {[#c{7b{7.g\s rp{7z ݶn "|2)6~!M[mUl2)3IUl2)I=7 6zΝ|=Nb6wJDD1^OtJ Nb\(ƌ)QS3mWvcF(ƌ)QS3BD1fNbŘ:%ͅbhsFsD1fNbŘ:%1#tJcF6I1#tJcF(i1g<)a(ƌ)QS"|GDb.x(낎b7 @Caxhի\xh8.u<$ߝ2-u<4d^xH<py&!\KnБ㡡#CCCG  :.u<$K ȖDŽ:. u<DCCG  :.O u<$K : 454^Chjh5^Chjh5NNۮۮۮۮ|>h^>hD>v:O-4t[H8+} k@-4t[h<ѷyo#CBCG|jзpl;ko! n[ zw'z.s!h襉\!hȥ\!H84dd!hCА!#SA%@=Cpy PA¹#F`ЩSAACsAJ8w4CA' }З }p>Hj}p6 :}BAC!t$\>@Kd3L2&Ql>^=? Ŧ-1\)6`"\)6`ln(6=%bŜ\PŘ Ř 1#4cFh.(ƌ\P3BsA1g4`X*ƌ\P3BsA1fbŘ 1#4 Ŝr_Ř 1#4cFh.(ƌ\P \-ƌ\P3Z h.\ah.(ƌ\P rf@sI SMŦje/G@xajhU&\jh8$\t5 $_q2-5 4d^jHjHjH8n:24 4tdhh$БI#C@eW@eW@¹pl$p$M :24 $\}5 $\$pA7tdhhA-:EAݶqt$ܶqt$ܶqt$ܶqt4C@C_4E@@C_$6F$6F$6F$6F$6FD@Y5th<p@ 'Nh<yD@CG> } gGБvvo,{Ch^ r%H7JvO8k\ {CF&ސI7\"[[i~+=J'\[ivoȠ:}C ayB7t X>y]v_8ˮ$B 'p hx E˄/=W^zBБ'$lv|vg'$\^`. ^@«/Ulr3_E_Pl܃b+6͵b+6MbŘ 93 1# 1#cF/(ƌ_P3BA1fbhvYfU3BA1fbŘ 1#cF/6=9QŘ 1#cF/(ƌ_P \-ƌ_P3Z /_a/(ƌ_P rfao+*p:'\6'q! {p٢dNoP ]J=l:2 #ao`:2˖%Þpٴd叆Kd%Þpٺd°7td0  {maOȰ'K  {CG=ᶍð'ܶq6Þpan8 {mDo? $zm{DOmo =ᶽA'ܶ7H  {9=,:lS7 {C að7t0  {CGÞpV3  {@۽ ydv'mwC^>\ e22Kd&%۝p٤dv'\6)&%۝p lwCGБv7td Ȗf1ϲ˰'\q6.ÞpeΆ=᲍K'%zC/1K=,:2H۽p  N4v@'%z^D+#Do :2H #ZkaDO\xK%c7__l޿_l.9?IocI9? IocI9?$c?1Ř%cI9?$c`o&`:&93Ds0I7S1t\}s0I7Sq1Toxs1gbÛcI9(L7Ds0UmL57cFov%c9@b̈13'h==>[h==>ah==!ףh==>qh==#~{z|e~{z|e~{z|%e~{z|CGF^b+:2'\^Dl?py~{z|C4oOOm4oOoFoyO`oyoFiFi^`oyO`oyo(ߞP>)ߞaSZ=> 3` nBi  C!3R==P g.lǟ7亵z~{ymKdVcVc󆎌 p&Z=tZBGFpl)Z='td/td/tdt ]C.O7tdtoHPrz {(}CGBvӢ= Z'n{Z7E{~C!Z'n{Z7tdoOoohpyŴE{~Coo8ZZ^lz6M`Va0ŦBUlVhŦ`a/ƌ`؋1#bh6Ŝl؋1#`a/ƌ`؋1#b3a/ƌ`؋1#bh>  {1f^cF0Ř {1f^cF0faVͺ3 {1f^cF0Ř {1fl/cF0Ř {1g`؃-ga/ƌ`?L;|mRt.Lܽ  ,ߝp >N/ X;|o:2,ߝpKd=-ߝpw7td |w@;J ߝp7td |w@s1Zkzw7n-|wC vvn |wC]:2rP 6|wYB5t f@Æn:2 ;l4:2 y"w'}wC^\ nrw7\"[V#;w'\j:2 #nDT:2 #n:2 #pV vIG&vC w7_ k#n=|ww'n{ |wCGБw7td |wn.J_xg}8=^Ӂp*QwNeT`:*Ne/6éŘuT`kNeT`݇SكIwNe0éŘuT`݇SكIwNe#T`݇SكIwNeTb̈p*{0éTq=LgéSp*{0FsFeSكIwNe0éTe>Lp*{0US}p*{1fD}8=rf>^-géŘ-hk};pm7tds.mik}&E1}<=p;Q/H|֝p0lu7td4úJ%Nlu7td4ú.E3䄎9#ANh'\6:2 e'A>p9I oTh# ߢ' ' =zx(yCGF|<<'P/%oȨA>; 2'FZ/o8U?zyzeb3%Xl*#_M\lzTŦRl.QJv1f\A=cFPŜѬ9Y=cFPf\A=cFPŘs1f\A=cFPŜ|6b3z.ƌ1#b3z.ƌҫ3W1g4b3z.ƌ1#b9\@.ƌ1#bh93@=[ P&Kp[d.,q- r'-qC&"Kp:t'\nY󡻡#%n`.,q%N8 .-w,q>%KБ7td #,q.Kpr7td ,q@XseZXkaz7%K,qCA an,qm%NmR M 8I%n :2iȠsեй }msB6M8܀0(cې?Eƶ!m62m8Ս.2ۄm'cpl6 Ȗ;[6 mΖMԗdlv#m`l:2ۆ ƶRkm`l:2ۆ ƶ#m`l:2ۆ 6 m6t*0 'gp6 |mtnCN܆й ;szA6T\mÀM8܄ۆ:#mȠs:2܆ :#wO-߄N8) SzN7rzl~;]l~RN.6t'n_'nQuÉT:LÉ'nSpv0'n>LpN.ƌ'n>Lpv0'n>Lpv0'ncFN`:N&|8q;Jr8q3:LpNJ`J|8q;jć1#zÉ3={g< 3.+ [Ɋxun">ݐaYNSV">p_'\n.+ Ȗ̊xuCGFE|<:r4">p9[nȨ_'\VRn迖xu,dE|<>D |<ĺ5-^ԴCn-^&CN(w<9j@n4n)h#{<92S.) ld0'\6S.OaH#{<#=œPx sB ) z憎Fx s¥c#{<9 ˑFx sCGF#{<9Oanhd0'=œp9aOanTBs#{<9aonI#{<9aonG#{<#=ߜp0hd77td4W=ќP^xDs^R^˵tUÛWl|j6v%͆UPO6q5ޑBJ6V} d=Xiƌf!یB3l3f4 fh͜Xoƌf!l,7cFmƌf!یB3l3f4 fh͜x4O6 fh͘,d1Y6cFmƌf!یB3lQ"5sFBjF!یB3l3f4 fh͘,d-gY&[ ,tm>Kd>?Kd>}Vj .w\ j\GW[p<!W\:\:ڂ}W[p`|r^-jή jБͮrp-j /tdBGj u/_;/\W[p,\~jήͮͮͮ gW^&B/~j%/tdM8ڂc ;gWB~j:6 t򯅎BбGcp\бG\`б_D6/\"u:Yu:Y~#u:Y~#u:YMj }f5|B_Y~/߬F+~V_?тc.=J8ׂqB=/KѸ~Þk׶a:caֱ_0/Euz: VpԱ_?؂&5؄BG6/tdBG^10 g7g^&qB/gv۟?AʳD  I N +_~2}믏I7_= n6 n6GuGG5Ght2<$L d*x=$L/4I'x4H:<ƣA nƌ N&_ht2G dՏI'~4H:ޗ nƌ N&_ht2b NפGW?$LEGht3g$ht2GW?$L dՏI{YV:`#eJG˖/ׄqeopW?8?~8 "..%lp:2 .h .i㠿Б?]p9N???~8 'A\-Aq A nqMr8/\~8IQ.?X.?]Pգ/Ey~8v#J.Tr\:r?py)z?py)z?P7'EoBNބ^\>+ℼVgE"~8+K#El~,6_oŦ`".6-oŦ|ݱa?".6*ŭbS< llcFPŘq1fE\AsF(fQA5F1fE\AcFPŘq1fE\AsF"6+b3".ƌ%`-k [7켊ZƫzZC㵆B-k Z 5ز@[(`R1^kY҄ˊ%MY҄@CX҆9, rɒ6䢓%m8.gGY҆ #%mD!eI.HYҀ ,iCGKp9Kʒ&\N jvCGK\Xdh:2zi®6لRagrv=p6*ggJ%'UpۤɮG6\m'UpۤɮG6'UpۤɮG6tdd =ۄzrpɮ6'rdףj.ؓ]MڄˣÞzTmC.{Q {Q ɮG'UБѓ]M9ؓ]pM]el8q@>!ͯŦelOŦLTT6:^? t 6˧bg~ 6˧b3|*ƌ 1#ȧbh.sFs |§=b\>zf1 b\>!|-C[r a>fS9@c>ŘOlar: gӐ?CN!NC8\pr: tNǬS[N#it]_ mpmB4i3k^6 4mziB4td6 gmplM~gM~#Gj:2x )ᶛ#%vxnpyGJ #Gj:2x{vCGpz!N8gdjT :HLE) 7 囅OC_>^> n!|ni |22٠7e.p J8۠2wTM`[b%T 6"ͦt 6b1fSTMQ1fSTcF0EŘLQ1g4bV'\,`ubV dlV'ؒNc:fS9c:ŘNa7 J4*q-MC)\U7 R!*qp:.%\]6*)p3 Jn& LC>^}p3 ~GC> n#I8Kdۆ7p0f4tdp3 LCG7Б$6 ۆ7p~%nDf.op3 LCG7p~%nfN W↎=4t: '4ddr3 f.$\"ID&7p9$LCer3 L¹Б4tdp3 s; MCg!nz%@4Jiq+&,nz%@4Ji8l+V'auV'NCNX^: 4auN qp[C7 5q⦡-MC_[Bp3 }f.Whv3 +4Bp3 fzÀIjfn#iȞT#qӐMCA7 'qp7 5:7/Η W/Η+6>8_|yq\|bǘcF7/Η+ƌ~|`7/Η &)|`zxq\0I}͋׊1?c~7/_ {l)|^VR8-ؒ-ϋsۊ1? c~>/m csۂI8t^֐D򀵄*Ѿ<`-tyyZk }% NB:Ǥ5OyyLZBU?^PN1i UxyLZBU?^PN1i t^pLN'ٶa<#yyn[màyyn[CGFܶNm *ܶ˹Nm dܶz~yn[|nܶN*Б$tdt:/ka$cX5#3 g4h\g4\3z7h >#H3>#h322)h?Hʢ,ʢἆ?Vo*6ޜL/Vo ת7|xsW0R9+^ޜaTo &e术`:93xsW3A9_Řśs>Z͹[T{sևQK9w+D0^Ϸux=3ޜlɏ:`Řuƛ3Ig9è3ޜVk:`t|sX07g}uۓRg=+ oOJۓ.I댷'y][ꌄoJ(hoJc.;uc:2ꌷ|%x{WB錷|5\"x{Wm7x{WCGFnB:1_ ucU1_ uc5:1_ *:1_ cuc:2ꌄ:#J TFLlIՐْ=+-c22[|%\"%y{WCGFK1_ -c:-3ߤ%y{XcK ^&$okeBK ?&\۞n{϶';Ȅgh? z{Z@ڠ5tشAoOjFpLZ01iT 5H#kF,[e&p \1^1p*z8.L`2ic>&p \1]1py8.L񨷆ږzK񨷄*~zK<@m~!m4)eʖOsKĶeҖOsK([v<-!ۖOskhˎ%\e۲in eˎ%-;p0hˎ5tdenm4x[CGF[v<-޲%\޶e ɶx[BeOsKmˎ%\Ne:2ڲin 3mfpL5UKh:2ڲ,#-Kh22۲| x_C^[۲| x_.m`[v<#-;pl>pl>pҎG5_KvDv<0{:Px_C!:~ Ӹ\^Ct^syK%n{:~ \mOm,\:p~^lzy6bC.^lۃͯŦZolveŦHT6cFpeŘ\Y1fWlveŘ\Y1fWVÊ7o+uV >|؇[ 6b.ax]ÊlaŸvÊ1#bs}b g} )!po7 VCAZ beCj8Bk5j5kZ#jȠ:Yk%V%Yk5\"n{hZ VCGБAk5tdZ V,p~nD.diZ VCGp~nȠN/ 醎 Z#jD6. Z+\?kȠ:2h Z#jȠ:sVvJZ AZ+r;Hk%\ni Z+r;Hk%\ni p.Vv ZZhk.zAk%VC/uH8[v n.%y9A-\poTNml {C[x#ImlB%UZCPi E p[Pi E pVi Lz@5tdPi TZΆJKPi ;*-vgC%l-k8ٲ-k8{ebpFc|813p81l`h *-;LpFc0h,ƌhg4Q^NYhg-I;LYt>L&- MZ0AZxhg4uI;L&pFѤh,ƵKv81^jg4~M ƆҤbL<=mҎ16':PxX`eװ':WxX`e':PxX`Bya EMOt<,#':p[D:2za =zK<p9 l=zK<p9' L퉎6tdD.{a T9xX`CGFOt<,0r':Б LxBGFOY"Ol otP^H n)_?'&6~:pR'&6QxbbmK~:БQ?B oR?O/leBt<0v;P?O/leBt<0 „ˋ„۝Mt<#~:^p6\"[l @?J fTlz Ŧil.2ʧClOŦ\Pl* ŘS1fTA?S1fTA?}k.A[㚀 6+b PZ7@i㺆*u UJ{.JسO`*5Uj*جq BMuUL?%K ~NS)\Q? DQC\E f5 p[`pA:N.S jT.4AM.(pl9%\krA:2 .(Б% +4!5\\h+p.'7zi8& ^zQC4tdP/ KCF&ҐVv!JC^[ٕ+ ymeW®$\nٕ pdW.Jv]I+!P.7 M%,Pz AI8}zA4i8B$ '4H¹Xп;$\,mu}?I} a:ΦV2`'{sŘesT,^*B EG0U` 'y8!/$ yTx}8!/'y8!/Jx?P2c -k4x8ͮEȌcx,]@ʌc.Oˌx<-rW<pgzHG%"xGI$- ף$.AQ u(^,|^hgU%+Q C ף$z%y=J" ף$.>GI$n^>GI4td,|^Hݼ,|^hDݼ,|~v>{Dv::_7tdw{%oБ%G? /{ZuВl|k6Ԅy׼fCM(lxK6֪ gd ^nl|kƌZu3f4ת1VlU7cFs3k͵fZu3V݌kbc':v3݌}c7ox?uf:v3s;X,\N+s;XnP.8ٹ]p<]_ .0J r͡9 gY[H:8Ne.SK_/CJ:Ϲs.q\>(q\N?(qn]pwK :]p|*8^}oW_e2׿ W_e2׿px*8d\BG6׿Б/\"[qP.8"/G yQB^xԿ˽w^Ar]pWPN8׿ %/E_0׿p\.8оvP)۹p_gw^ s z%/J_0׿Б/tdsv;׿ n7\.ݼsv;׿ n7\N8׿ n]p#_c \BGտW:ͯW:'IBդdzAԑLW:8~5#^ &u$Ӌդ`,_MHդd\MhƌXItԑLIT5f,_MHd]MHd*_MHd*_MՄd˽ՄfMUv']ž\b_(얮b_p>-¿ dq+XT߬_T{xa}Âfi=.8ثaՂXp/țJ wЂ{-p ؿntdR_a"A_,E"A_,oE~>,"EEMӀEi"4``C,td3_,8Ag> B}=`/\=#CPБ("[4{oAF p @SAA Tw8<GPAW E=P-ꁂrUtW C=P[ n0끂?}=PwꁂPz}'(Ptd@^Ëz pzxQN/E=!iv@=ꁂ @끂 @AGz`W@W^ܐ4o479_!f(!f$!f.nH>G/nH>F/nH0v7L+W7LV79;!fZYa[.^ܬf3u7Lj6ӧjogwpq\ܬf3]ܬf3u7Lj6Swpq\ܬa0xv=ga,`aX; c{{pig3Kˑ˻r.>r.>ԸOpcOpoy>6ٟ}l*I}ʹC҆r.*Iӓ*Iӓ*I;$NO6;$"lV9wH f$V9wH*X\!)pxctsyU7FW9wH *种=af}sXY}XnY Y }XnY}XY.zX2Y|XvY|3Bs3Bs3BϳYw3Bs3Bs3BϳY2~QwN'D8=;ug| Oԝ +ԝ}YБ,Pw,Pwtd; vug`_w;-ו^-뗺_o3-%fZJyqKT%fZFyqKʹxcxi-6J[ƌXw%f#6cF;_0-ǐE^0/;_0v֝/nLu[mŅLu[m-6Ӳ?vc͘͘͘͘=u0ֽ믫lXfXcFkr79Ys?L븛1wֹ1wֹկC~|y?ar~y]]cH}y?r~ޗs TX+3pLUo#cvxq~0%o6фm˛=6фm} ۗ7{ /oX75 ۗ7{ܐ˛==Iޅ˛=td,l_1p]ؾc!ibaf}y@m#caX:2U "SaXؾgA.l_3po]ؾgA .l_sC/Yw 3 7{cW8t^~@ q O[zyK@u[/o8=_Rd򖪁ӓ-U7dާj_4P {on_#cާA~y@5/}8=el_4pz iAGO {"26/}8͟l_#cާ>- iO7 +?Y^>[ڬ^6úe~un9~mnigr9{߬_9\6WucFcFcF7cFcFcFeᇡzެ8WχD|%Tϛ˶z>aoG|#Tχ9B|#T1fZzaD-0Fl#QKo6Z0fZ0fZzaD-0Fl#0֭aCeY} Tևuo+ه@e} Tևu c0| ߟa벂A`κo8%juͣf o5y/ahQ3tdh :24Cd}3_Бׯ~;.wNji;Cq;_;*]wn!"pukG ~SA]  :2t ]wAGdu`XGc]uA_x4}XGcؗ{.8D7em0`|*WF>6pL}l` |ٟ?Cd-o 2u.-pAG8 :pCāe_]/*ā}uUa .Qo8pp':+Rp`t* :t :~pAG#C\Б."$tӃxCtӴ8 :2t:]w: p;=^tnn|Xrsw-/ny37[L 7[L7b7RƂv˛iv˛ivˇ1#7[L7f̈mo`y|sTܦy3-y3:a,onἙ[8obΛi5ΛXfZfZaeaøЛ .KBY:oY:oY:܆zae|s͆qmcF,%S!N՛dY%7l[YV.0K,7?8>l0 [@9nv)yfy߸y}} t~{as~{as~{򂎌}LW!/8Dŋtd8pOa–8Ė8Ė*nsNw+stdoϽ!+stdo8s?wAG ܁Û+so;8:pM8ԺoM6η7 ʷ.TZN/.kzqYj]^\Zpm;[[dnolooFځjooFځjEV4Бtd :27ތݏ޼o7M܏޼{Cw.;];] :wސ]Տe;P] :2w$w.؏e;pz؏e#c?~{킎]Տe;pxw?~{킎]|wT?~{dd}9qú~mq96{nm}~5nmb9"ج_F6ttCUäaT^p ק 7D8j%պֽͬ og}u/{0@uHHAGֽ#C^p_)8DַZaP8E@__}}476D__-ݺPtߢ :2R^AG`.8稯 :2;o;/yA&k4&tс}QЗmtA^>ydžc=vA> oy.-o oz"}`ctd#C]Б.8D6<8Dַ.?%wAA( B]G'lmzo0}/nn罙Vnn罙>nn07Lk{7L+{7>fj>nn罙6cF0rsT{3"7L+7LMmƦ6 fjonV3nnۀo5ʛۀoMf[TZ|afS}sk͆Mͭ¦ 5[ƌTZ|afS}sk͆M?6Fv|ae}sxfø6~a}s͆9mfg}Mu  [b}{Xc?=p4]c? طONطOFOطO/Xc?=pxu}{CdZ=zjok@G0:?ז=!=!= ڲp k;oȪj1vtM}4M}4M}}ئ M dnS ??Zdd~yN.㻠-[{|5wA_[Ʒ)Ʒ.=;P=㻠#ck #[ td}MUvt(2z~=a=0?>a(7ߛ ͆f{a~@0߇1#߇1#ߛ ͆f'0(߉b0K(6^B1~X0?z05/ȟּ ZaHWkW<y75/wշOA^?}{ aV߾!з݇=pշC~#C^Бo/8D#C8]K}{0}#/jaM}4q4 NM}A#C8,QӝSAS_[ f$4~A_>4y䃤 $5~AF #SS_0#}#8<{;'쀂v@n}C'쀂 v@ 퀂 v@0j퀂 v@!aԔס쀂4j^}eWv;?g. DWv}kʮoBپ އ$AAG頠#{Mk^&4 5Mi@S_=`N=W=`N=]S@5lnAF`ٛ{H7a{/ʿ7fZ>|so蛩ysoʹp#ͽFͽ7Ӓ{oϳ7F?Qxso$7fZ)|soU337PxsoØK7FL%{oͽ7:қ{oͽ7j{oRfH 95dU"f̈2f̈2›{o6U3~a1#o0WQFxsØe7l(#gf\E=FͽR{oRͽ7Jo~ Jo7~LR{RۛOT bMIRۛhw ދ>pk-} 㭥/軏R{^KoE_БQ*x{/SR{"T^ ދ~CJoE_БQ*x{/aTa#t'u&oo8MVRGtd-5i|{x_!o(_Wf;N75͊weo(_Зf;N2͊w/fE YlV22fE #Y8D6LdU%>翠:o8v2翠:o('}d/d0x{Cdr2@GF'#Б]+m  BmO(8\!պT蛚6Goj6G#hs:26665M9翠 o8=9翠!o8=9?pzisAFf}6^6 6aGf}CvXYߐֵ/ 붇umfaf}CvXYb{Xאmƌ`sƌ`sƌ`sl/ƌ`sƌ`sƌ`sƌ`sz0g/> v`W 0^Oz®8va+=b~e0gԯz0f0f0f0f0fba]0wl;`Wl6+6]q3]q3]0wx,{Ø-,Ø-,úzK0f K0f K3oޒج$>$PPB8R(B E`[7o8 + w¯( ;W~E0ʯ~EAߚ+ :2+"pxq_9 ~E!)8D6˯~E0ɯ='"p>БWW mߜ&hӤm#ptmNMmmm#QБA(Ȯ+ȮCkWi^jC(S`+ӓ w%_@-k w-ܕkzYR'^ z)  /^ [/LKAF& #RplxY |/ؗ} Kς=_`WXm_!2|]2|a/2|CdH-_&e :22|[{E2M`ߪLSЗ2MA_o oVx4d 2MAG#LSБA) N di^L8+ 4A){2M{4LSdL8=i v2%ddi 224d}dg/5,rXx6a}ٶx6fZ_<e3}u. ųQ62({slQY<e3-/4F9QY<e34glųQ6(Ii%~3fDf<$,G#LNjl&f<ʹdxfZ^<0i6sFi6cFiMl;(,r3LxnfAfܔ͆2)9aLxnä2) sesS6 3glpxVf* *LY%|$,UųJ? ,dLA&\`af U-|0ZY>$pO-,rRп)'èjaf5),r8fO9 W 3˧tdfO9 ^-,rRpL)'kSNY)'SN4>WzOCb)z!1}SSY>$&p",8M{)Ȩ,8|6Yyz@GF'Б]lM54_lM5ԘȦٞRc tdԘ5@:NgpO> ԺGFi䣂oYHj@~(@-8=T*T(@-@ P*J%@-8Zzkv OV- O}юZ>)px|SA`Ox jlG-TБюZ>᩠#|S0[ Z?|fR 3e,8|Y Z>3æ|fRAM1h̤MbIŠ3!bIŠ3 MC*Šc IgX@?"NO6ݟc 6|,R|Y>6x7 |Q|Y>ܨ`wÍV6[úúoi~ᰮ۬l:[Dجoa^7WZØ Ø Ø 0f0f0f0f0g<98]}10X:9 aft5s,ex_95,Ø,Ø,Ø,Ø,Ø,͆fKga~0?l`< aaaf5sXY k0^OX3u9f501 eoSRfL0ʌ F`1ن&m/a68 qf%|*s)m[h3 L>,m&px#67 L!9(8D6KL`tdf :2h3 LAGm#o?pl?Y[LEaK\M J.ˇ&cMC Z|R0PYY>n)p,T5qK5ν|RAGF f@s/TpZ^>)pxs|.R|s,8LRUE*ǁJ#M}@GF_%БWY>V*PRӴL_eXRӴL_eXiZ|TAGF_eXSƾR}@GF_{}N U8'WY>>@_eY{}@GvO&e#Q2]'e#Q2x:O,T8D6oԟЮy@5OF5]軏vM >5svese,[8|خY>@GF2՛ QY>H-pR,8 VoR+蛚AjRY>H-PAjË՛ :27tdtSY>&,p|z|LXAIf1a%f1aË՛c :27DŽN&՛c :27DŽm~%vy^6v!X;-,8h*? -l֗u/"u%fWfa]ɻY sXY_ֽ o/֕/1#4C9"aaoExB9'wBh9'w 9r%-^r%-1#-1#-1#- c͆BfXeaв0AhyØĔ ֽ~n}AL9̿^<ӿC̔ ̔O<)7aؓد<]; a ᥠ/<^LK0Ix 엞 ~$# !td^ :2/RБAx k sx[|!^>) oM'}kB>)[IAߚO :2'z$O :2'䓂 IAG#|RБA>) N3/i|8ͼO6|RБA>) tdO :2'Ӝ $p!Ns6iξ9쫯7|RБA>) uqAGl)L)حNC*̔~ÆRaL)at0SB4L 4F-40i`GSM`_GSMMO#<ʣ)ddhQM00ʣ =LMAF&]&&h :2x4}kYБe`S aꐁ8L2p.9p:dNS atd0p :28?ptdN0N`t0p :O8' NA 8 NAG#SБ)`td0pQ=MtszNB 촕ᅞ@9;SSzNw{jtUf(5oaQk?EbMe~kVʺX.k*3b}SWp N1^{ kb?{^{ kס쵗zդKuUb.j6Px2C Ӵ(3:2 2C#ٳ@ Oee(3:2 @) oRIx@&T}PImB%!з @&T}PI"@) JB#Ȩ$:2* JB#Ȩ$xO#5}@AW=D_!}@C}W=D_Z "Fj Fj H_!БWtd}@GF_x}@GF_!pLB4RWx22 }9p+\(z^!i0b8 P 6bPБA1(ȠtdP :2(}ERБA1(Ƞ"Cd J1(ȤddR 1^A0K1x)bPI1(8D+5 A`_БA1(ȠtdP :2( A0?&W* /UA`_=`׼l#PБ?(* U ˾,t :l6AA p^!5|?WhAG#PБ?TӐ pRlxOC= 4{zٿNYP)6J8P)Q*E`RtP) vrB4jBxlaL-8 %^<8~UŃS_^<8尾5ʹ)ŃS6Sk)i%ŃS6ӧl&[P6-v?~zV"ղ@Fv?gϸ>Бtdl8PEKラ@}4ּ} bG+Ď>߷|td5!iHeGiHeG;@GƎ>Бtd!БtdM__> #sGc dd_> P؂@-|l_>#cG;-(:2vWˇ,3 Y(迖ˇ,mzX|B0ˇ,3 Y(X|BAGka0 x`@5\/8 R^>ء!z/8 R^>ء!z/PБ x`^;td^>!p|CAGF/ M#5N#5:{x?;S7ӻ&EATx )*|GA`^>#P˧xN8EO(;gm1|GA^>b8[ݬP7n,Uu:a~nvb8٬/5,4wb8cFFݬ/cF@=]EAٲ- ޖmQruJ* gszA<- vo0ɶ쿾 :2laHm8 J-Cd$"_,`[td- :2è'bC'"pd[}'(;EA - :2j0- :2l EAGۢ#mQБ(`[NS$lim8M-6mQБ(`[td- :2 bC &-!o :l o Et~=/C͘SϹfo

) #QraQrF2F2 :2%O(h>)#pQrF2oA%^?l>c#pm>c glm}FA}4Jn8L6JnQБ(}F0-(}FAGF@GF$plm$p^?={ap|ny,, zz`_n~= ,x )'@OAG&.8\ahC?4 J#e>(PϷ4 J#e>(pi>(pi>Ҩ#sHisH#i;O*kK B˜N#5 9זǁN#F[_ȁVȨ":2HCdӤC)БQE tdTU@GF)БQE tdTUj9plt"=pt"} U@GF)БQE tdTU tdTU!2uCdӗUT@Ff)YEAE ddVU@Ff)YE "^?"":2H=H*R#Ȩ":2H*R#Ȩ""PE O8=H"C=H*R#C=H*}*R#Ȩ" ӲU7tdTU@GWiSST(1:JLNSST(1=p􁔘%@GF)БD:2JLS#iii􁔘%@GF)БtEHMi8e8S#8D6}@pzEi8=pi@NSvyi8=PS #tT?tXlNuOa Y:Yo8֙ Kcu ftXžY/Nb1#N1#N1#Na`10^XLbڬwut]XLޅtX7m/5=LRn싈 S*k O THJ@ #Ta,!T|"< < )cҫ7GzyHa^6= T_^6= 7z8oUБyH@}#}W>h Z E "Цy iӼ}VA߷iO%iO@GF&БѦy0@}}Xl+hӼ}X4;Цy06ۇNm+hӼ}X!ivMa`m f4oVБѦy0@4oVБѦy0 6ۇ"hӼ6M #Ml64^?l#.Y(uN:7@9f}cssM:لsX7+n֯Bƌ$즠(k#y 䝂>s4sT~Άs ؿtc !Ca0eS,NMA˦SeSЩ !X68&p`l˦#eSБ)`tdi wB)7l4;!H `8?H :24D "M!^ O :24D "MAG#HSБA) Ȧ "M4@"MAG#HSБA) ]_AG#HSp Mi( #HSI  224DL"MAF&!&pxH!DC"M!&pxH8~H ^?$iV8̼lWM0ʲ kaeז})ص²)`tdl v-D0ʲ -ae[6,Cd+&g :2X6, M4² at*l :X6 ,NMA&pPe!,~`tdl :2X6,5MWie8MWl M4]MAG˦#eSБ)i8Pp6Sп NAN(8'pN_WPp+(8Spl8}]A Nu'Wp :2(88Pp{#@)8?heϮ6x~fϏL+QG= Q0^æ|W0[Z?8z a q|U0aY>᪠/<,p8Ct~O !:?'\tdt~O X,pUБY>jC:?GQNc壨 wY>jC:?GQIg(QT壨|U> |UAGFg(@-.E壨 QT壨 QT@g(QT@g(QT壨q:?GQ"S|U6N22;?Ԫt #22;?Oa뇝;?^?l| ͝@?+4w͝@GFs'Б tdiFzM3͝N3͝N3͝N3͝h:2;N#isS(Py4RSy4RSyD9gJ ">(

3+U\nižЅgFQ UG(/}j qYz3{߻$2Ƒ+8?;吏ݗʟ.}q_ԂLÉ'W|r3@Őߚ2<)Q)Q(qXܓvU$sB X:Gy~.6AC_~%_6u$6a8_릞-qq})_TXsOː~,O:?+8(zdJXߖ_w{`1H1A~}ԳyD܅|dXWUly+l1ʆ2v=缭!xwbʗPF _pc5O=d(usq ԶزE$.<lmE;(+pLlJ-a# F$SͥV%\M9^ɽh-+7mE5蠿P}3M=M\;u*VjtZg_'cX/*cި~JTRsr~U>jy\|+/:GC[,eTw4c6ڻ 8wb >k&}i?\)V4cPÎt}JלA}p*c`:b98_.pn\o6 8>At9ĚopJo| @U(Wi-Ze}+/K;.|$BbCr;j9cOh6 Q!mBC96gc/Զo8ٔ@^ozah(keA\$T8;i\\oĸ:~tYW[Ӻ󩓏}z\ǹFzvZ Ist}YzҎ%7l<]Vs-Q=i\F;j}xs}o=!ֿ)Zs6 ~?C|m_ 0S+%~݊[73dOl[3~K|dpt Ww1y kSLe$P69OoS#-ա3rԻvqTY׋Wh,j}h>W[z<ö̶\5 q$mf"[}aW=Čs/̷bjYXw HHv-Ņtͳuʳ,{\\'<𬸶"u9n6y>]kx9ν׫-{- Ooz"].&Yiyxru<hc~W&s=}st(/a p0/n:oP- 8t h֗8~$040`h-̽39q,2/1Hnt3|9 c ƋaF,ҨɈXw on($qΪaQ=̀}, dn<ۊ6X86|sVO3Bi4 wsL9cc&%҉tRM r{'-[Mr3t׋_9h(ZlT7{Wˊ`6.hl7|C3YbO,qW 1~e+^f^o7yܻ+*-Sĩ_Ơ79dPΕ=-{}?!|c܎Dfei/X|*РY%RC{b} ??]XUX}- ]þ@b<OXL" 0Nκub<vp̫!]nh`2t*XE1<{eƁ;&$ g>\H~h!anbuHƸ85Ot͙<@P)k6^ͺ1?k\&4c} ְdPo/PjK*Wb=t8 ~%ZIJ&2N#*Ԡ)qjUk9($K~fU#HT՞rZ#Vj^y{HKc"-`8mSNgu Zb[)= ]%nCqLL{ 55Ǒ~Cѫ EX͗=J\}JO<>4 %Sy튱4媘~]s{]jGHٽ9~_n9ˌ; bD X|b덱aFUDMZVcݺ!(B;A'wa*)jG^Jt>-ڱI7-Y${Ω[9k-6eAp43+\є˙e-1`f(s&bS{H7jF_5] Vv4їofhs}]G|U/[Ψ%KRc`[qx[Fc3P; 5O%m/k4;Vsm' <.߽|뮭"9)} 'fn͖pRi-y^讥ioe|k<ͼ^y66ݖ}3vV<[0Vwy<$ݵ;+>Öek Jk)hV\/K\_}xpWO/~Gz4ksucC`/…z`wnoμn &~w 3>^k|*{xU#G$qz_U[qMYAL(=aV(XNeyΫ< Uo~I>1ߌjO~/=Mu<+9TI)>aDKE8NP1쉚oyby Bp#7ȜU Jsq#E9qjȡ_ɛ2ނ;Xrk\cPmⲾ:9x p>vQpuuazW,%Q͐xPM>j]~Aчl_e@Q+q{X20zxQf{wONu**c26ڔ.ك#=y/|_t8=A]kh6wsjcsϙIZx@d/1w=Z0eV(pw=#*%V2 g3ss=GK>[rs},:|MqКmSl [y%0%/vvq:Rd5wӾ-\g%Jjj% ݄s Z3ڴU/;&9 ͧkDeisZcUS0_+.֩OS=V=x^48WϗUzo=3zinUK+|i;O]S|r@3w_>LSo6NxiԯfnNվӌ{\کXUش}:||ޚndK-εԗ'̎\&~}=<ש8L+H?>>Wix9Iy"h/cZvdd3ǿ]Dga+ipaqM1r0@127ŽكQq6"<;zaMmbO\<}>#͛}n?}4bhP/=}XI@I 笭A$܄Lj.ITc^u5`&CN+/imG8hU}^\'c;7 H V.`xxb8ӯPh $?mRM,BƴY91s0.d>]dX W-Bd Vk~&>[E/\:R)_%-υCQ^XE 繐FZhO~U?{y?9 =qm ®a G|]ʦ5z]8U<ʍ|A'~^*Hʧ }iҘD=h2^S8o>* |Nb{/a6DU(ɨ?t^vNjɘX>*|UWL.w_Sp:0%?NU %&vۥ(.!aCyOkި*@UaHL3c[fhMSj8B$|p<#Vc/<7,~p1tQwGf\71B(L@ }.v. Ck#qQ§NZ CfɯkJ-4hK)#;Y"=@DzRKNpO/r"<~F-ȞCI/ qo`P|]^Q%KSuʎCm+]/ޓ%sy`'K"8WsO̤ jqbM0c߭36TXLK^o߱bɼ"]A]c_vλ/WwFϷe)zۓ22hOӤܠ_\}I+Oz#mzΉ{Z-+s`D7[>ZsqW%ʙ]RwcY1óxv]EǐNkPneR p!s˓w,}]B*Er6rNuAB)?qu!_jalNe#M2;FN>ig: Qv'$sa4Z;Z,g-\=n#mCP29X=Mկa5Oi/0T|Ou2ǃY,Ut|?!SJx3q l[Qxݏb#\TZ"Hڋ0g+9{8”}9 TR18>U ]<|h<RKF+Wy8wțG^7J}K>/|_uAテx=tr19EoxD" 2lSݱS)+~ t"/cug>Dd9BvUwnb-UFU#Dܸ+PV6f89 s`oaέ܌ȴkUne;ǜ_qB@s~8Py@؄+ǵ1h {#rw4w(C`K3DLqogDvDw_:R~dԚ#!٫@WPDn8p!3j&"fڊP 2ϟ>?|k|(0 U;"7M`J1`9([2~ gǻ9P3jVgh)x}l]9y¾tm@=W(YjbdSZ|bNzel݅ǩg0CUEt7]T9V1|~#;foSEqVl>HyT~A x}^9"!4GN򡅱|"exА}x֗}a=}/~ Y^U6F?O7hB7pˆMo~؂}/_\({< bFwFi`W~sG2m_-߂~(1#RR.>~\}\E] Qp`U="!U}i^[)7Cx wB~lazӈ/efh6i/þfT/gڛH22\_DqaallP*9Т!Ίjq]aڧ B;W&5fS-efR^9kXز/}Q$_&~Ġ:-*kr7gJB:T6{OTG2K5%G'և h:gSJ<Ȣj)EqFte({+qʲ^eyfrܬ\*AsM{>S:C:C>en_}'qZ"$b,3lyAl2JyuKMaQ1ԗ%崺k7ŋk6q!ŵPlo/R%kFIXʎ@ߌe!S ul9ߜ[GV,u^`eߤצ]F_t*JJ`Nɇ+PXDx׳Sޔi )dE,r;|r2XMӞ(kORЀ]qFp +;"C|?/^LRG? \[PԕN\2T`. ײ\rr[7K^ݮ.(1otґ[k[|X)v`q˥Q@ˎIr\S˥R Y3x~+lWG;fJ:c:}<8Y5w{_6;1_fC` HX% 7E.%2!I|,3HǨfc)8.zHED T-B O0+"Qy-1sXnxK{.`+:ʺ>?4 )_(PF^|{@i*~xK!9.#uv#Ń"hGΎ :/o\M@p)ÁuSZJ b/r&R! +SpJsɶ\;O\t9X*o<ꨍU5,6/t\60&+u 8˥t!B0J< QGODNѤl0DQ >O |@u?t\7MQTRW[Ӕ{RETl5m)eɔ<kZQQC[+rs@ g%ş" 頻S^&Tߒ'B:XqAz;n}&GŪG9&@% Mf#UW^ Ŕ$5]|󖢂,MH[ gw>7Qx&7 -5ľz$O[ʉa׳5IJY^I;Q}o&߯oǺ"[+-KKK5?To?ͥyp ~X.xx-} wi￞k<'ޚY ^In{{GWa2cGS]b9_5cbByiywݾ\yE}t\< K} M)N(h@CB> h(HkȭPq>7 (rrقL$?p8pRm?*=x-&GD*- t %˖AV%j(Ǡ -60 K>^i4_֠ 2>MyeO98pɳϷgY1}|p6>)&AVPʹDx!NOqᢒԔ]7OM>QX3Oz +ntEr K~_U;H ե)%a|Ηԯ|Zu6$c:/~-ߧlu'N:] z֨>;]TsQxDH`jBB.L02A%b9&vbR>*eM[8Ppz TFs @pÌo4H)t2ݹ2Lw 3M-f !n蓡h?g2` V)s< *[&z '(Xοp/E2Lr?&qp$EM;Q\`x󎧾s~0 Ix uǻc> t1 0A2~Ea)*%.0b8POD6c%S9˻^Wwn1CmA5rL-|dO 9_N6cA6#+~>gl$FKwIMS(L/|5C$/Ikjs~e8M[c8`\=_XN64Ny3;8cL0W Ay+wovyOymtA_l 2m/$M="۔Yuzc'3$ )Y4EĄ1=ܔLBCȿNYaBa#v:!*HY`BCq~#dz/8.a<04"T\by:!.:gSbRӘLudBrlb\\u!/ UԔ7e$kǟD͛IZRԄȐ@PF' 6rWʹ򾍒0Ң/Xvmڢ }B~ʋ)!^nr y>!0263kAo[IJK8n3u?-d'R&b-I`6탯W_ Wx2iѦ?į?;/k,~S)׺O|r#Мo}w\ Ll6Qn%ѲmD$iΏ& R_M4fJaY|E{OgVslObܰ7O{n>q'W,eLڻ&9Haonj#{2K P(|1 L4~gm{./߷}bLq_'h8>"; Ʀ/ɾn ޾ _of~RS#12   rR?Vɀ y}ÅeAYF|hIVƟ.O9 hB$~\`-@R b U)|?| Ws=#j7!޹/2):YxF!ʼ+uS~q.~u7^t^8J8.HS! +wF0 e/l鞪ZqyU^ GP ǖk@/s;-OF{A*`<2],|H|~yBGB]vM&9N6d+OXs2L=߮>1>=D.[p?<$D&{h v3RanT[>ݷX̲ͮwEfL!׋o$ױOrUFl77~ÝSM'Z7p'gop;X%tZۧ7@6}[+^lkc v z"djݘ>şKKWڧ8[k!5D8!?"(Kixŵ 6YN=eɶ-&~n 1-" Ъ՛onxXn)Qއa,( ^8!q#=S L/7i=dbeTldTvޘ׷45q#M`TF/;Qn8@\W)/VG/,F&ބ}j;LVKaR*cJ? Aq\|oOpX#м)c/tqBy(tזr'֧チm<9Ӽ~i~_9>o=)LNAfh6dV҄0ڴoBCGIy3Tz|&AOJ5vʛ5vp G[.>}oye٩@|'0Χ`rr5뤳Ve "9au+$&xӹઞPq[0e +;. 6\FZSf0F ޱ1JrJ9'ܴ5dr?+d(B;{O&KUrjaJ[`d`I0L0:` u$dL7w1?0bnsapɢ|5ܼ Ҥ39l%ׄ݅`U_,FMs1V,Nـ;?| _jቍ!}oWSL^I'~ o02P/5sqL+Qpփޗ`m.Y|*6C~JNI{g]}r<=R.wИM}1-ױ:+2aL๭O}Nh> p/9X\1/4QpNRS_hfoY[ޏ1~W n@b_76O^?{6+/ܣ u5|@M1KeCpX RLg;hGT.y0'>\t'k/v۫l&2ǫws2y?\X %vBEWOq=*nfǰOyKJh~6VcؔťJ]MLXtv,ęFdu2ѓׅ#q)YPs1gp Y7g!ϜCgql)ҿ)ZEGx_ILta>hRVMrRfp}qkrL3p8:Yz `lɍ 2D m65Xд6__jI+g'${)7Y%fo:礮ǨӵG2WaFid(ިyWxBFFUN<L&qH:0H]pFt}b:6mJQaRojX^oq;ݨ\͸bFcw.}l:".A`?Kv'I'9~;W .>hI>2VMpU2‚_x.xθ:PyIK.$ dNe;<Tr #0K:K4w@r T`sgQl`q輜]A F؂S}Kމ֐Y.h뫻NcPkYEmBL\렭8'Wb!+4q7x!NelU,9㕾ˋ}T_cɽ;7cgXł]P=g` 1B[,^3)--sO)0"]rӺr@r|Fh;Y,Z[,:$o'708i<^Q_,b3Sj:puWˇϷ}==_ V=D%"?,Ϙ*߂\5F##6H8QnHtlr1n|Ũ#Mٜs<"S: 7[#Y_gD;/Icȿ̽/3y+|M@ݬɧqFw,]кE\BDAYj@EBf1 ZGodW F0!P.PyΤ枟Lob+c@ Ay" !|!x|zmQZ<FН3u,Ui{}ƽ-S!u+sɱnBCg'=PiB0_#i2MmQcwF3=Ħܕie| Iʤ9Ա::L,AUJ{dQ,XH5 & <xp|",-[G5~.1be){dI}hc5@ O` kӳHvapg Ŏ=m7Y)J6XWlw2~w9rjKV(xXaHXEq!R)`>n5V l ~{sqO^o{?uYݛ/}|=>^~܋{M (~v-n 1w%=l I.E'r\ZV{$d|AXMyy c R-/,d#z/B{M»ʱ]&1_Ee*}XMqF,:0kL1pCnAG$)/7Q:mWs㑣v;6ᗞ Xe:}0DN{oy qKqz&E{ q|M& Io&#Gu]_V&㤦$5޶.| LԚ\^r#Xiz٧43+^9$)r](Vo4?iܶX_PdE.;%wnd݉$99izm"tr'kg$Z1YH$tJC+oԏ!SVxAt4{fG~b1:WK*QIqx`4:|e mq7 ô : ˭:"Ȉ:`m;\]=d1W3T ľ;9ݛ#7m2nv4EȱνŽ0+|F6[>#Y󵕢03L<>60~jO'!0Y}o4S}-7$N/moV[-VLBnO eʱ2Co]]ȇfX7Kui`z7s2l/2h4( /A>_ˉ#~LCamଭ͚o>7˰C33ֹư{ G}+avTloTTA-ǥԧ<ZtZlJH= F+FiXEVXXXPfa; \a[iZ=lfZ|7 6/`. MW&3oK"j$U{wl+Ph'>" S__&nU&ӝ6`}Mv}j&s%qȖ@17\w+1ֻw-J5> T>UQ ۦ4Ioscz.-G{Qdp Xn x,';U>0Vhkznz6!soxSNtn |.h֖ϒJf@inÁp2twmH9)HkCeniP7Qtt;kz]j,AATs{7 r]8Z2FgE31dEа9[k VtO5_<"l3Tх9t:_L߻A{FuvC"vlB!1cu"ׯ]lY um SL]*q"|tLEap\ӕ~1bY9]r\Ѐ?tCtvU][Dܮ C}` 8[7R6*pcbS1i"+@h¸G&o*8-c}˼$^SçrM.g?vZb ޷CAtbws1SKH1@Bb ,;#/nNV {RU+NnMDEJV#*IR83(辶jN@x祶KGy Nj$}[<Ռ'?259o\u=[jc8 fr٤JC(wa3;Va8F3wSev/\ue.0/Bop5[?4RFCbP۹~1}9 `{W x#j1̫ Ɗ_ ` K>h~( ba<. $v8E 5d~? l-[GoKh;aݒd y6:ҿή,@4lu.' a[~T sd0>D1.[gknl=;pGT *ȉ>Em{hOP(K7uGk鹁 qM&W0&: yT9T58H|2|aa BVnehwcbZ 䃷*RUF S{n.F;D6opx̾tp\^q!쒮[(zbLT6})f\(+"Fpܠd x!}nnjtv߬擑=ZhΖa`U=VCn?$:quKat˻FN W!8{nngL5,sz:?#`mVWX`ÄD%c徣|˴Ng r-1 ro}rbr Ά嚪tww_fwX}a%BNT;5Xȧ/s详+\`)]Cn>9x'S,wVp'rjt'͓ wVwQ<n( ᰻dBSj4`5ؾF}>ƒ RY%wGԽrn9kFVseMy,v9UĞ3\?o6BR4%H6 u,wz %*,^ͮHj%>6ǹkS!@ܢ5z|)΍ 8 }_9E\ i`ʕX򼆼gm~uF)C%Nbt\stagΔDh"I \iY$rn'B[-iurI&zj&%W,].>T'TFAk?}`5Rș/+r k=-:SA2Gѧ櫓 Tnq bo4]]95D.BbLq}Eۙ>ք)NcZNUOI+IRMS'鞭i1G<+\]4 qbS,ȼP7AaF2 S}ѡߎ_^;,;oA^Zoxx W?{Sn)/}KI۹d<8> Lr 0p}Y6'燠c"PD9~AM,\ |a{SVj#S! ̶ͥR"r{4CsR9>R)Pɘ2BEIc$a:ZL_Qힿ H /e'Z)Z_8p)~>Ld }V`yDe+?2"䠡2'MVz4Wߪ VP@s/"9U ݀>=ymeAvǔGE8|:do?7ӼC~ur$9˺ w`PaS/0yS}ּ_Mx?hMpq<r۟EǏ2(A9'yD#y;R>_0ogX)C< 1 k9g `ܿ֔ŊoN{ 6xh<~-xGM>~g_{yrG!V4=S?YﳽU3} Xm+G| ͑1>P-4|rRNKwXf:Af8 9,'䘴$\{iou~L_R6P+TeǹqO[P@P}=x;)w')ÐI9xo|2֯9C| K 7~?G(ӟ4w_szְ9z:੹}iN;@Ϛihx=#5>TTq!yJnsa^/y=pリT͒I7}|D:>~ 8yyg ۘ\u ,"!{쿢2ưJM$/gPV.?C1Zm eGbKM D<`@ˁi4˩8owl/Mv;z^ }Dzm;_ @oאrE W~n&v,*D_%t'흮lƇX؎7oF)7ɾnGiAƛ"ig/Jug<^u#s>7G]P)iMԔ7%!;=s=55,hnOD*WƋIESэ| O׿UN>?롪 ^/[-~FWF}>/=o|0xJYFT5Q|3xR;E V53~@})+>B!O2`AvglSٿ5>cxO3[0??7m\l~r)˹?y_ZZxd Oi,}V#q=Oa|N4R?ZRw|R5>kY"|תx5[۝/?f}_7x}xPfR㓂W3ު?)sq'7jS[6dsPO~^@q=0^Ubzf`/es~ھo5g}J1E=4Ȼ?8*S* Sr7i{W\m+_y3:KrR߄Oؔ5> ":앂=ޙ !_uMyg9`U;a?X̧yJCykoG<~/?F=X oԀ jnrH6*BQL|Q6dj/aUMW'yi1/G4Y?"X1i&ڔǪ#/Q>pM@b h)#y^Q1/=2t>o09 h.rƤLY4o / G #AfM^{yɭ 4eZ>s9YQ.p==vBR'w/=-"|^lG"AR*Ke-xSn1UP?Ѷ-PfۦH=Xo[-S]|.=cC.pɩ?(nDsHNqd}xF*#$J9OAxhNu)'u\B(B7jʄ+YFLoǠíŎVb'i3V\[+G!9AjLEHjhϧEsn#.A>8mFЈ_%yc'܂~z (I՝߇s[r$ڱ u>r||@LDeFCrs[ *Z!LCrϣ5m2P;"]Li6FKyiX@ }?τB$fAѡCK<w{yO3Zy/ xa8|0n] %qHt˝z$Qč㯆/*'OAhG<<(7#sǨuB>o<%OGzLݙA<4xy0== G_segjGf]Kׯ^:G]6#@FcLAsyp&Ԝ@ZP)c5'w?L~h_.$!ny~swܴF>(+Sc_B5=3ޅe__&/\pxt/kPO>14z4Cc5C9f|cƐdc 45(.`:'V1ȱ9FѹL1FDT )oʜ#)FT+frL=6]`ppF9;Q\9Bzz$Jȇ g="9!C9>CQ0jH(= ۡΐGa*72l u$5^F5&tPN`$xYNjXz! z,5ܑgL  I3fdbh;c}qǜL[c\hq6@ίC3ב Jr 4]sF鱝~GΚi˵#;gLD/QV;dbaZlFh1j,yP]fS>CD;pO\5~OysWcM ±~钲q7ǁ!M} -^ƈ7č:pC"wɏy7u_۸9@b͹,bk 2}*rcܟ1 Ev|b91YȩqkO㝯lN p+oئ,mTS9u@(PMջw47h)yg3F9)4Aa1Lpűis`sl3,`ghMvpôLCV%#% 2S(@W}qG/Jض^2ѫ9k/Z_IN+\l:X}R¶l_]+@^N>.ݚ>{R 98wZa L#6 ghnp $*Kq罈]flq̙7=8Y4`j_1>cPt)O zKu)7qhur#J\,޾ x9\}~5eE 7Gx.˓y}eƭv΀ ~,[_8ޘg}u V=>H= 5Lzs|Nuߒ,d6wݝגc|yܬXt*:J/9,zqyuD"Fr'7i%p AC&r~Ƈ?eaadq$1HmE?c3ut2fPW틱>#ݑMl"GݙfYጙ>0 p5B'pXZ䂺"BAXU2PKYYЬ5mARl6.1ɋ-/oNYKtsօw}F~vyT1>V3 NE#Bх#wQ+F!WYә2eW".iߕ CO6<9š؋! MumvkUv[Y73nca(fy`?hoZX:s#|~j! `osFŕF&ylX2p֎Rv1Ř״q*<lHU1? 0TqL 2]S&΢l"]Rlbdda67o h7Wjj\pHc"mCtFG!qT89.Sώx+^q-@KBǻʁ7>}7+1>כ?Sz߻,O-ه zElt#qޥ 8ͭKr~H,U :Vb{2]I:+cVbb {\3# ht{xlRS{H6q#vu4ЉƬpbh.sD1;>:B0FvzՁf۪՚wu`3=Hw|7Vj=>l.;M=sNNGK|45=/܃e') sTtWwff:8qX&SX]M0hWFyE:3e%>Q{cW7os] 9@ պcՍ}=6>ful3l.~AlIs(CGY'~uϟ!͙L7Gczr}9yD3{՛7Wm XN:S~a&t^+ \ie [aW1Z3͏kue=ք|\ϔo®>Aҝ,W;s}zʪMC:*7՛sp4a._bGGխ}>QMw!,|&G?|H :;ئ}<[W1?`W:C9WEUvS!UlUn3> /d6oʾs sGs_să Ӕ/&[޶19tRdI' r<-|fq3f|A )x#'͞҉nsP]~F⑲E"IӇ#,g3ۀo-Et{=H6W]VN>O08N9[U{%UAV̦A1ft;aLs!c^| XI |KVwR7a锳EV_sȿ ׿x9{]_lϋ9iyx?+)/KylZ'#aqc2!_x[,e/d!?:do׫[8xB W=`Wy)+@2xCoͱLG?+Wד>^Fև<}Ɵ7 ~p>O ۖiŠ2@*5?wfyqߎK{87^63gs<wlcΟ8 or}?Ɲpcq籍='wA@ы6} IJhpF*nxkL׳Brp~5\Rn>w,x֏>l^Up()ʑr2{=r֦(/e稶ߢ(cN7o\ FvI w˞(j_5fX'7N6[O.Zb7%]oQ^=/}qH-~ֱaOQ//P;ޮǭېgΧ.܌}~ӌTD ZwiFTW? ]Ox|)׫e3ޟ|j9?(}]OpiϾE=>\9ԯ=l\oކ<Y`uVea8YG(wZ~4ͨBEu 0s]Y30/΃_3^I'>ÄT6AiXOyؾzO=7 (װgyXp#l_=sS]#r} הGAߏ=~xg/oܨ?,_us??2$p|n~|=VoV2\ t|c=)/{!OQot?9)̞+/W{(+ zqm<Y_jٻ;nE3HUk\c)7њ<i_ϭ}>]]F >ă{}loG$: ہ@o>/&:]>\k>>< b{O}>ZUuuFQˌP&oה=A÷.DBޟϻlc~OJgK_t}@xZcW7~?Y$dC_ߣ^\7B|^ht'|ˡM<r*u9z2)^h]#OxFz \GZW϶ʊv&RL^}׿WP1H+mCnr?9Ήԃ'̵s?:8V+ɾ?xsq`7)!h/gDH]r0!||~ ;N] 1:PZ-sg?<p̯Drۑ>P%!*Zd4W 2ut2cS<~]fHT+3eIy}A2q6|. txm78haTB`1'kk*ƑbbY8Ęj2̰ǘ%/*1#D%_Qf[%üu)Ou WV3&s%j.~dp+~~utCX3ǻV2VYx:ѯ-Blmd }RrX:0l=?:>8P!da%j$02k%3V';F8~Y[x ky*CzFmFP<zpd5\9W3̜:pk3crZmҘ+yG@=!ƅʛax̩s<:"XTmTnJ:?+o9~_#p 1iJfʎ-4^u_瑜溎@'SO~V~&1ӌ_Ĵ0ceiefqe5s+f|$X5f̑CeGpz[uC'U䛩Uy ~0ѧz?":")s-7?VǓ\SNs=z|gxcFu!`]O]Ǟ|,+|_a]U2C_ /_.Ϗu:?2>1m FA9@;o71g 1sQ^ ϣy O^Qo`ܟxo<JvbͅhcK#[e0t@r8;1_O6~º؏M=nGG׿Sݑo|P?5 'ƬYd?h~]?/$fnϏ߻R!A8`ӅC\|'`rA ~`8?[u](/0x{>2|7waur~bu׉~a|wr UEW_W~i~r@|{> %ίo?͸  ~d/,vxBM{Ax>) oA1~>.ĄhzRG^ ' Wø_7 "'믺+It~F%wy~ ",O҈+"aBBU$;O1g͏wv s}7ж? y69!;,6:(؋X5ݏ%8%|/񎻹J:!6\9ɏ&nXy|nK[<||[dIEg&kꘟq?_ݺ\E,=T!ntPa'kqjUUax 1iNnS>2$^5a\UIҪD2g9T[rKobޏ[n2?Xugc|>?*odCu\;qeYxC۾~m7}yXĴowe-{gkzqXo/6,>~) ٿZh=ͫ! [;ՑJI@ K)׿x5߰AѺFQuu؞ߟQ8;>뚟L^`=x A>* 2ý>~_ +ߗJYܑ AI3ЕrzrVo0WP_7]BUi)w:^xfwcc:sX_vP~:y!z>vL`ΗU]fNwϿSQ^6B +UNP YJ KE?U8\Wv"/Xzo%} /_ Λ!3OOs­M-,s?}KU1x> 3MUx[!X^ߧ?4Ea{95 菏n'_؆#kᢓ`*PM# "k|kfY8f|#NuEoE8aOW}+weLlj+p %ղ+}xɫZ'1}+J 8: ?ޏ6*V82{gioļzI@Tvf'Q-⛖3~Nq!%L[aZ W%`hY_ċ͒!^c˿ISX9(+~=}=Tsz Jq\`c_7ZHMY ˔ٟxAnwVVDKMqb%E!;ዸ_L' H+_ <9/._6b0KR]_$_|fȮ_lkp~^Kn>|ꚿ_gvV_`=gxi}ٞңw֗X+6_|)_c%_Ii:_n O/~G`St>Zw,r3-Oy۳OJW"oc߿}9> GJΏԯgQl|g\}B>o"xުOu< 7W/+\@I1"y"~zyA5w|OЉ'eMC/Og9^Ua>S? N!z? muK73s>0/w/S_/3O.̷'EE_oGqo> A |g /6ޟ7?xӾȰ|s3q׀]/Ho{~'[WPD|r~F|f~i8kӞb6qi7 瑁|WruzD)w/ݿ/ /$._//>5OvyeO|W4̐-3T6/|ɇwx۴lōf}z2df>f~=Ob ̿*~y?dQ l񥐕_>_>}ې'Ai]zYBs`W_ 3wk?=^><S+/?JX_ES-}ՙ)?!Yf}{#sm~~|be&?NyE~Neo_^_ $尜){Z?7u#P{߃6·߱?|_0O>x?ad|z|^S.Ue\{c|?/1xPq䃙{_ZBr̰@\O{%IRcU\Z>Lc BI J?&~0)S%ʈ@.v92ü' .^!2 r\֛C 0ɇ]o9;D5iSV>M>kO >g_o}y5w=n;)ߧ>wC(s>[_pj_>o q]o}=||=r< rjw7u=Q|PWulK;חhЧWTW)So~+8"aؤ4C\k1:Gl:; =cShKeJp< XL@`6JB3^d~;ެ80j dr\ŰG`*3ΙiaR̉3#Ws#b7gU윋͹O]N^&ʏ5 E8ˇiYj7+D.>|CqVRJKnܙ,RSmЦ^VV BEXR,yCY5t~-$6y6<9zxqq[Gq0ws1Π~j %;g="O~1 *;y/%dtn(ՙ!$p硗fk(9uW{ЈhŜ믽X5X5K_*˄%d}UM-瘰Anc,dk/&my\*뀋E\XfIr`/!ԷkGs푭l!?~.Z6Qֹeۮ;y⣳i\s{dz--R~NNAXb.e>dӹ^/8Z ?&CVZq*yu#g }o=rI멪UB_gufLZIڕYA>{ ~d6O]>l ]|Zٿwtv;3^I/Z_?{=0P|Jxb~Xxu>b]|Х2yx5}4v36{Z$ke}ZObHXe.|3O?FjjMuZ3eb޸sQkͰghʻňM"ae: ]~ePwdp@ÌO 2gֳLL9>v1 +ajrxnmx蔷Jl?xSxNN?p;s\%v\Qكrhv9,1r8j^%6{qv9S^j rV)\ʔ!V0We VV59%Ѥ%r jbYjǖn[2Sa^^ei)*YJX~;D.FKrΧ{ǝEQe{Ss݃FҊIʼYxC2w22QwߘvFۗNkٔ1_d * )S޾^y\/٣L<^ǿ|y?S3}֏}Y8<%K_7Bf:ʌ}Kc5TG8뾿bQxew({Ș[bڠ¡y?VV~]~jV+?XК2nza0@,G uO|ϑ 5b꘏|_w@ zC_~ݥ_6uV4=z8_릝-Re>/:/c>鐵Ya7GhU&dR]a}[aa,g7_ 4xa2-ʇr|>|pv-|#*ne,6BSQ5 "y?dJYE})m\~}7DC1aeM8VgZ{vpĆ gߍ8[ge_P|4&|;|}TZվ͌vEh y:θK gЏ$盅1[ (U[7yU?!bIn}^L` Ox2"a.ysGu\g$ЮɋcQ u/|zGdZ>@/{ce#演'7nO0F%S"cT+sqR(jFh|찭L'*|ۋ/>/*P>}sľOwr:Fw]qٕg e<(gQn'S^2gM[p+kߕXYcFjSsmOi#U{>{itg<snj[F$_՟~_Uzg!?c^"}值͇YUXHWdu zauKfI;ΗK9wGePoJ. $ĭ#Oz/*\OZ3z|k>ngnQOGzq덩mYȧaz^|""X֛.?yc9oR>\=ON~˟zxBz{RuGohw֏TlӝuBbEqkE& cF?\8֛Faz>ASӔ-J_ǧ^X+["F71z>U'.WBt&-X˅xuЮ|$…wbNJclv;Z9oe7 QUֻUO-Ȼz${9ǶcߵO9zכNVu΋O {P˺2뱑nM9:D?l=o:(oCCrW.y<*C]_>Ϯ}/ߧӚ1ӕ%wq}B>0[qkMU}Osz+ lMӼ9c\ ]͗\~_p_}G>2ճa%Y䘝`Nګjb)+?3x}qo|ӛϓr)#^~4W[S󩓏}{\:s }/ɷѓQ9\obQyƖlT=qce: <{O;`=?܌(3+e?Vn8D% ~ק!1q޿o}O'{N{mIΛY_@L3AoFVc{m|/Z}`SE9Azik扥/ףe{%i gn~궯z `9Brǹ&Swʲ7\L,4ثĨOrUd/V_{ߚܛ!:?9{ы\~ןC7 |qW~݊[7fOn۾3~OKIMu =_϶+8Y1Ք ߜq~y1}Ώ8QLdULibYCn`6%wby!xbWYԋWA5o֭*[hg /2[tM [(,W;0,^Ѭ dyXŠ|-Nʪ0HWɰo0r;wEo2fE*,Q{#( rxo`YG(M9Nm^>3&f$\TmFB9h/"c.0JѦ E5˻_cԇFQ, sb0`1f1eȫJQ2C-׍"aT&faER؞f2OEqX70YQ^ߪqV%n:B4;BQ GLA9-^AbFyFsTP<燣n}z\ I?o9O]EݶQ:ݨc8 -|.9}= ]Ld2'/n͔VT%d_</T+wYi,poc)e[}6=,n/ZY^'1))wyx][rN#)KSy=W}?X ] Dzc]rT#' =~^EBS | Yc Ͻo ;/o k{b zM{1_/<_4n2Jw̧A]d^b4cE8_޽NH8'濰Tcfhc):ĀY7KRku828I9g0wZR4#sE^+eijxHHG v ˹ +BFs4)LsMcJ5 Weʮmz\;anO~j҃1> C2q[C*NgoʌE8-zX U"3^쟺\(S\xy捾6U顟জZi]NgBW2OT=={}? fa uܙJ 1PRsP:<8*H9TR,ɩAHyssH$=ӑ kSG)YvAw$GhBXH}b6c=\k #jv6'6IqULaRv6ȝ*k՚A(?-K_kR]=7شmѬ=>%DLbU}=H${(R0)OQ4'ŀwe ODSX$qg4W: )\cR$+SD,٣=>P)j>pBPn̊c9Tl(NtHhf uOϔ_II>۫߳!7ՠ?r̍]st8F}"jYڷNpebT=R]CZl{a7w3x\X(UWLGpS5ȪJkdfxtR1K'p Ubnh' ج&0k r@fii1kJ]n0|1Y1~k2ceYoA⌐! zTFO4^){JGv c~-q *S~f0m{gOOPC)ju_(psN={9p\՞gsٙm'tbkuz]"- <طibݵ;LЫ=Yr/V'z:3L:K:39(;W?Żӓ/F4͌wݵ)D*B?D.\3ۮ7Yg^XG cϟwhW/YTYX2pt1X\No̢ƀd-RzB'igi[nJM>q>h'bayiQ(dYM"KbyڽiUs52jUExSNV돯3If V7#6=*n䔱#(:岾6;Ҧ\q}flA *XcFʃE%o.}V7 Bd?M||I}FzG=S{۳S: =P{q]/{ʣϻq2{txDg5?{P0յ>~Ͻ_gɓ7 Zc{Nmt*n1lDb_?1Kң*oF%{OpDwxSV1|63*7}L|D tES3rқySAsQf{1FerނmPue2M #эQƢׇ6²Weߖ+ Yz]1nSUc f CmO!&龜icU$꘴?E0Y(>AöϳQl_oiRԴ;P>;ApRw_N\2͇H~Z̤i^^TB&9Ǹyx03M{1K vG/zeQ)\փn.5yz4Lg!*u`&ҫueJ30ZUeA6`t9'"cb"u`Q*oCGx zL~[@ߊ#Z֡lY1y2lőE}#)а(o5\۟v] nɤNn/} ͬ?>EJk.ۊ y<5cD*2eo  5f$tEs'oTpD~d% Y׽ނ+Y\ҟ>3b? uZ,ĘUC{2_2V[U-9`4j}K֫Z.91󹐥elբ T)ϯeVC{B~&>~sF~%, ޟuy|_+C﷋r~QzI5o"򔷟5Pyʺ!w}1 YP®0 R<^SrϞS[Ȣsy=0$?;ש]S4$2(/~+"kʸ.^81zu?%>a/Uؑi_5ߟ4kh#Ͱn=3U/&Rn( 3 68jB5+W#GPӊ?OXwcױ>;|F U;wD׉;7!bigZ÷CwɃ;+QGzP.?78bFSV/|Y|RԦoSC5N7oOJ&8q5{\W{t5fgnљY?SjuTz+4}5BɴqNU9K{J2Ǵ!$8+CW̮dNhɾ޸_שBwW YRHhu6p>upN7ElAٜB]MF>eIҽAf*ѩRۦNVnCÍ_U;-plxqwuu/QE:VJU9T3VlgZz%TG+WHk%+UCVOʹ+rYW$$#eX5l]wds~Z}X߿ٴ\>^/*uw\IUSTZ&̵ۥ d~_kZC;Yj0QKW/9ӫ3zU=Ƀo =Ŋ"m<|j}S/}OT^5G~ҿ{}FW*djCcq^̔0j/8\G;ygt#q7Ǧ .{Npwcg4#>]ƅ w\{c{VDʋvF~\cx`n9ؕ'Sy҉ꨛgr!ڔ{@qPgѳsRܶa|[>ēAOȷUKח6|}t o[>sr&&wcYDó~vi;K<촴@2pi4;`TadcyſokxبH h8Ngsba]F!'t8970RI7dنF?}|b6ćY.[H&CmlENXN_-籏<#%9CN səA0fl|@\WCp>.1"C\!?:E2lG7c.Pͫ2-5h;ޘ )N@+)6: y{N}zanP`s(8bpFᄔ' -48_&VqOY탖 iQ)#LC|>q,{R[ ȍU(h^i B2cARJ<2T]AW)#z*uJIY ++%c߿nw]yc^뿰Ƕ8±#Y!}e)~>>{(7×K#dE #x)Y#W\3AxqD;ZUF{7&*7_yrMr( eg!C޼C*}?y]޶w=CW8+7{Y&^9)ZhCMP =[د8߇o}֣B~ߍXE d + lkpY快B:Hg"}g3\mi9uZ9^߸Ǟ)yrĪulX3SʨV[l(fyow)>Es*a +Q8f63\pt-7*L&o=E(9s|SG4uI?t.Rb`ENɇ+RusD*Iy}N׳Sޔi9d, ;|rEҟr`M Ԟ$s{BЈ]uVzp,+o"+C~__ e/rbR0˩?KUf ,Rrd r-. ΅ {i iT&Nw];ſ>H{H[{[|h)Mmm8howo02< Ƀ?U!Ɩ{ 3~Ŀ_}ſ,Z mj/uȳ` mG G lv3!!`qlsu,[ŵϣ6fD+(:2ȷoJ-WyXM7Զgk'ZT obUׄ^twY'TCR_XGE*4B+bP]\(x[07Ox=ٖQ~Sp5L1j$§c`b@#ٌ;vΥ,_ݞNK5uڎʲe2^b^ԸBЭ4S Cn yx>ԏLj@/I7aeS\)SPKЇsUl9U7 +UjqyVKj~2ޏhg0W-߰O9<P)邉p2,,jA t*oɋ ҸAq}7(Kpߣ]!<Ɔ| &[ɻQKe W|B1%6 1& LTo:c~T\pE6rSw̷B>`G? פFؗ@}SY1?lpy6-Dcx}|+rEt(;VsZO5>Ӝz7qs=~ֿC;4ڿ%W}Oz<~?9I]Bxkf*xz}V&QCs?˄E?;w 7 (ΌtrقLz?aweg5(6h|JL~k:K&K-!t ٿD5[,P`~ :/]POIui׵qu8_JJDEp=`GT _XW!g'j?y>+aLL\&滯g2,ZdmI4Jb9.$әm SZ$LT^ŊqW Rjg-JZ+|IZ˧_{jH_W`KR> RcA}w⸣]A'V[j{x U (  DSXY f؅%KYI)-xd_\nhvOggzLrft$~hсLy=]s<aZk'pҋE!f}WDLJWR4CD2fg V)? 2qҿOpB.`X!m_+4S)}"ns> NƼ%.pG?z~1K1n PzJ9_G;Ӱ=^@4{)foIj\+)Ӟ(߆'eʷt]B(gyKj΍"fȾ-ȮiBx1i2<}=l/4^1aa9Dvcڳ>cẸ%X.֧4_sw:5\t;圯2!;xcy?$C+7R>~_:Y8* NSAQ|~ߟ=YNpc@:<^X-|y|E$=}?AP*ǶT GDlS)GOlkAlH`pl'Eb$ q3 1WG1&$8jd!&D<`1o=/)FBLrBSDO'bG~Y.&98EQ7A&WgQtS !O")#x[$e/GO=߫$ؼBE4)m?ӣ P)`}5&~ކy"*oZ~ϖ<qf&]\Q{zI.$y Mϒqhš xmIzkJ$t-H%gR!: @#) $$r=7!Mj/( c= !Jpb!KJ-|x\X?=@P' 6bWʹÍ`7M_!۴LΩK2kc~lNJK|_Np4=Ew[ߝs>XrE/$@wFKI}rJ1I}c}SICRc9f=dޓ*-r'_n؛'=9<'WLWeMڻ&59ao#z2S(lߘ&~>~g\w;gڏD𱔯:y>yP Gt;R+ؽ +nJo|Z|,Ҕ LDA7zy < rRyR9f8?WX {8/ `MNyl=nϫoŽ`?#y}ކDQ>5>O _F? 8ϣ s?b%j).?yl?ҟO M|>zu<13&cK,< ³5:.)W]׎ja\gzEPTkģj"aQ 'Ί㬢 b.G>~{=} 8J`l6!tJ %[qF^>avs`P @([e FByj' l0nյ=wU@omz4&l AvQA²}= cדLrVFl77c9qaDֶ uDY9`A,>%\($En^V27;DO$̮CNZ2l|)v/_>5KiF=.7;`H(R_qrMF8dǨBLĿM×u|M;@ }cJ`Grmu{,{ܕSLϩQ'HDlHDI0߽ߥO oir4{5N /OBhU U%;[ajbx{}?/MWy%pmYuw?aUxi~;(D,TS[na*HF* AN]Fn;88ۺ{rF o)3ySX3p]~em_a 7=d:q 081C#yY=3eM+0gM+4x__y]8gi2l=m?%c\œ  nyX*}aXj]tr4y6/f-gFzHa髸ht2DSeg#˹[s{c:; c'gH٨خ)'y6 NΌR.Sl/>X+"ISj[`Ik[}yvRj~tK"<_x_b[=~[ݞ%+oN<{*4'cH9W05DbR =LT/f$Ul߅G8WC:M]fL> yyqinnM<7߷D4yۿߎՒjll/x|Iyx<}irﴯ{۝݆`X=6Q{ov_tdPMHqǷ=>0e_߰>{i.F߭0vۧ}nn-/iMck·Qx[Y[gzwag҉[e< /|7vVesHNI9w̢i7)/B{bR[SF:c1\ϫqVqSC']YwuM@|wܔs;|Ъz^ZBh?X>-bmZ[X|l>@: a?~Ǿ9_Eǵ4(Vx+BOc7>ۦSR9~GY 6;ؗ xyb/ C)$ba4}ƚؚ@gz`={ަڷ[S%lSO;ccH}\Pd"q7Ŵ׉iԇԕ A3rb @L>h>l/91Mo4 <+ϩ/_3A`[1}p#b_76cO}?x<[ ..r~Oc y'sRua8!Zm%4<# r<;~' \t'yo>v۫237wS2?TH$%CH]5ӊ>N{T8N>u>]v+S`XEE{SU,OH.BX XĢ^ǧf\)Snb`p^8\?H:)TXPQ"7'ܟ'f7Oz5-12 `[ _bMr.M}Aqy ?qw)}ً39Q {7jd.uZ[~ L+*zf}cx" 1jt}8 <DPG ߸:k#ͫӂUWCgOUC0%.E'T'|BO(s.z w|sF{Kf<ک{\a/GoWwbw5}K4`)*f4ui,kE[p)O%9U$RhK-egDD{hА(Ʋ*eʳ {'_@GZxn}Y񲭱qdAf"; |P޶S0>̄ơAu@k#uYWu[.Բ:O"f~ 't']ت\a(u6N^U1S7j}Seja{yƬ`.+bvM=b6'F.Nۍjf6|Fw:УqVZCY6ѓ?Ny8Jz:=QG{?{&|g|Mxf.R)3w9߅e-e9 Ywd|c i26#7c#^aErA"2$W/$ύ]rTXLR}΍j|8M~_8C|sn.;s]};O {]w)}\#…_)[tѝ )dz[|[iu_-:|(O1sdǺ0H](\V4cٮ %8B$"Z~)lk24+GX ԯ^]^o8w:̈7{ nנxLy|=PxNA9{-k1fbF?9 :`s>;'/*,ң@(;U : zUWK53;|U_4پm|}W 4Fe}c4>j.SAphtLn:֋\ ť/;{;1~2y4W=9Qt<"G|Z?4;1]G{$| op.W ϺY\zAr큋97rnz!ΩY8~Qub _u~TfQg g)[Yu!}G ~A3Ery6|%xUEWTȿ+RAyRW<مmJmt_%F$G~Za?q>#܋\2利*Q|/-9G kU{ڼaQmOtyܟ1*΢d 7$W1t(ee<+(ъ#J"a]=4ٿ_!+sXE[ /0u˻Q ,2bp\LH2HGt{MT?47X\3_1 :PƋLS󎤾mveވ:y`<#h$nwAsi,DZO i@My;q9Ǵ BU4~&2 $gjɯf ^xnfa{Z!pSm]R+^!l,5**5z/o}Tft(T A=xl[ą2vNO:E%p{>ORU_g: wNr_ ? ݏus0xw;.7q>%z0xӗn} 1~M0 /2jkOh nf27Ibç"5 ex+FI5,7R{]]=OԯRLhr]-#u~{[}Bqt`H/)w{?^"):5)jSV{ME{1nJxkTp* WoK4d6t螄[ ˮܱ "KFcW")%~Q7t~W=?0BAuO]l`Qebzi_qB>fq=?RT P 8z($MÞlovApwo [ lݚd4KE`ɛbn₨:ɞU7ҟz{'H79& N4CV9MX'>"ҺL8"v`Bus!t?c_d #{:a)eV1ZӅذJ; z*[c!w{+&̙y1 3^AS;!D؅R@ Ԝ@0Y2UbZc:ЧU{`Ԉ4Y4 Ɖ8iuȝϚyiU&lsiӄմ=:.=Dye4EsY9\Ulc*u+DZV[u],en\X: 9t5ށڮ |C0[zʵ`ˣA@O]/'.-fYC\6t<*d\hyke 몀Gh[`-;6ujv hhH-'cU@HUzل<]:= k'^0#>8HwI~6|Qrک1P'zs&+mV`v&] 0 X, :11B/ܱ7l`Y*ѡv rβ>=ֹl| OqMOUSX޻fkGy+*6G\fh+T}$T*O`%B9A&uu?~hE;B3t:ȆEt ]b[Kծ vJ>$6zq }2bywSd55~t| ꄨ+ojKbjZu51} R#tiC۩3MGS.zV62ok֠*o7vSARt jQRkG0J˚5$-;Ab*JLֲLfᛃ7ey :̲#~jچ1[V_Z6YD\+wwCyejI,сe%pC)EIHǘ4nR4FjH1]Ȍn5_We=6d"]s"f-vbI,AR\X~vK+\ SA[UnQ]]VnI~o-H危BX䰡ä0Fe tUcd!Y?w24քlmPf,ʼn2ڟd%; 0~hB<[ 4՚3 GKÏ r66G 1CMiiR[8u}hr=BL m0"v$74yVL {NLRׄ< 46kZ`>6r aǚTc+ BXv8ND׵'bՄ΢ŢIRA,;s)sjRoڴVxnTך7VT6L}9 dQs`űs"J؄̴CX3K!4eȵ[H0 .U^Vy_Ry IXֱt@Xݗհ^. GH3nb:QndSVm5&лeL V&$j%i[D`㶚,>G:V(/ͪφ^o}81'Ƅ5T53tK8j6 2c L+Ѿ ǃ[XtG&,4{(s?s̍O]XH1Gv<6sRp57̟Wx+#:FV~lJM$Ye;e&P+cCGa#M+Ḗ@{&/ɿ:=PjUՊbC'L= XX𼱒29JV ?+ް1et .k;'0Xqϭ'_'U7悱,0k U[F`hx-g=ŭ:(oV@O-(kbVV) ?]kAO:P<քcEVf2;O+qpcLi^1G#&Wi?TQx,Vnk: ֛x3ǮΞSP"Q kX*zZhjCƲ<ܡ y/0m+gμ5Sʲ,$ƥ1nj`1 ֊ נ̯!С40yk~`FS^:F@y_5l?N3F/3x=@M;鿰.f 60o{P1i s{vJGۄֶl|6I1oh:4GɆc<*l6g?6G-s̾[,cm/`Ͷۊ<,Wf^r˨n bSr`giŇQTgj@IZ&i/ :8nRb.֥s(j 2Q3LGK.zK=N'v GHѿe{!%8W~I; ;IvcOA@BqCCs/}tIߡ OAAj ˂Co(42J\}s#i3s\1>op!:ߪA#n< db}S&_,}ʿ#$vLY?$yߔPRWw8ߧN-$,P.0zI`ip<Љ7(`)K=Hm'IJc(56=m& z@,Ͽj0; S?^.TXMUvÿ#7]QZqIgT  :=ku-b|+v.n>馯MCIe}64^+E۴pLk`PVj49ج41/ ؛jyXh{EOFXc͙גT+)~zEcUW:[s_:bҵ G;+ nUh4Rt^<OIcJ~لBe-ѹ-Kھ~!wӻko;7 oV= w|bKʹmmlP>cHH61=\8}t)t6fIxF1ڿs BVhӽ_/ژnɏRfa+KSdDv0& ±Y Jf9\:Lr,aB4Z q:PR|dh!\e1Xr49"oVrt-84ߣZq7%=%'r[cm_%aGc)CEڦI 1H9bX†B|wRhv]A#Z$*\Z#O jWTTM*AM;>wڄ鸕>iJZi{$Pt{Y-Uڭlzrt T킴<>vhN_`9Hǣ~?gg4>hz;s@~qVڞ6hg]"OtǴFtmn+hEe`v6/JR͂ɁH!C=="BiԛҵrjΖoYJ2[WZVSk7h{7']aE_i@Km+fb2=\V#ŕSiF3~Vc7/OmSg,mGg|])T FhM]6ܞZӛ:+(ϐ8zUW-3TFk-?|xo}l&c^H~)׍/?3:/?n ᩲfE(਍_{x;|zM4 nSmFaz2KoV&qm%Lsp.aO(MP7|߯[KRtybt ~g|6kF38bjV3YuZca7K@>=!4MJ3S¾EN'4a^,'t̵>.5918Y+}=ilJn+ Ae]r(YcRΆJkjs京Kyz̆5 t֕7 mo* c1Jbf5:}lʷ4A4o);v6atEoMOAw0%p&7J3 0Ơyy (MɛPL>NUATiS}kYphLRt)Xsamu8 9 5-4{AFu% I3*שGj(B#VDAya Jk}0K-hrc=Ct5L" s|ҔtM+|zޔ.k{a;JFH~e..Tni 4ɿΦ^ir Zj\\^lN^Sl)0`?Ϡϐ?-a 8Qu]Z6]P_iKSj_=GfNn#{BgqU]Hۈ]vkl(&׶Zie!=RBfn3g tt'vJniq ~}) :Þ!2ht8hFgc=kg`},w3i {n,bN1ouR=W٧ 2 ڰSIe-JMҘe_1Nݻ͓<' C4O(K /L5J iRrBa6#ޱ vGiwg^"&K׀މgTW/a7zKECҦɯkvO] ,xF ѵ^Nq)pXҮ;t|oJ&]ޣ _FۯHBQ{텓{9ʯHÒ4!ڧѵ@ _ۮ6b<)J4v׶*Qڷa=ͨ](]k(N^u..OI7G4YJPd! u>cS_9n毜Rw6ThlEq/H:ĭ@irf!sAjT{2DCir`R1Zk7\t7_: G:̆"OA@`Xƛ<[3;gkwbEv9KC!JKrKKoCܲɫ3D/"mQ|:_!=[CvPoMUCva&-IwV޶o|iL[ 3@5fzz m)ϐUOьgiѷ_#65z8+J&Oo\i{l\Bk\hHemW$lߌB2z̐ca!tJ#<'+J<^"JWg_sy6HJ{0͜; 0nplzlQUkKBھEǬJ_J`Y@i;@""^n1dp&nI+W0z Ux/Hz"+&.c(r?&nr3H3y~E4f^Hir\ ,67Z݇j(Ӿnv-gR:q<݁<]Em[FESj9od7Elkg鹎p0)m؜m%Ӭ1V AKF1h3tɨۦu\Coh;TisJq76ǔoI}n;[#oѾo;L>=!߲Wkf0LӁ~89>'|1nWqғZllh0 669-+S9??͜h56NL_!#z3ir׷k^?$).U #L*mijF_ϳgg!'ƻBEB-$ZoΫ5o(yxZl8Õ&}HwE;I׌ҦkW|<^(hz.}F3SQ5Icy)mzXUFTYvXfb8M;QUn j҃BҬ>c_MkK>KvnkEdms٪SipTC|p9f-q\gq\{ IwFu'z$H;>}ܣJ3ĺv2zv#3pé[iD~SaD Qf(mk"!q3rIz:G "+r!4/*= ^СeȇxkTGR$Rʇ'vn<_r~3ݻ"i}![neSƛYWH/󶿈p V "<.!3B{y,wyޏ#T 7{5f# vch6_D} /B_z3rHG]Vq/Q?{028tUqx:fBTG=-W1v{Vs9^@>xv&@>*| Qw{^D82鏿jIIl!HqFB[DGBqՎ LAyN&?MHwɕrZqrȍ-О9+CGgph}$"*ݒY6p.Hg.r@6FIB&w:Թ86 ]c҈]GwD8@[^7[ZZB7q#p}V-$v5! ׾GBAzl>\;ctP>Bʱ؀BJKΓEt$ro Lg8u6;,|v~=Ws9SL S,ba5^dADP&8oa%:+´nkhP3+ڭo ] gb,WRjGB8N^DmF8UqۈeY9{#' S NqwzHw Qӄr_x6YBȱ)@xk[^pȓOz!/ aqU zl}foU, z񌖌F6#jlG MěC뷭Yh TZoXxP2h0 ߌt%zLZkBzT{BxV6@xMOB|}XJ4IKq,cF=!:X/v|Гx|uBh3R"z<ԣ@,+7`v%8.LN5ȇ"rk:ο03!qVje$B/Ga$9+"ZKܳQ>-/B?q`6'!!m5B+gG|Gly$EBhg,?ٟCQ v@+j{]BhU] %c~e#ʼ!1Nq4޼k Dm=}mBxM3<lBԞb-2B{xSEEq(MK23^cYTF>ʶmB>VUc!Jɽjk !Oʻ Mc:x*!;G g>u1!QU"N+gI~:gngJ4qwsdyxt[CwٵuM@+}/¯<=E>f:sM!v{D9^C^7v 2BY}r`Eu1E?V3꫱ sA \W=9Ks1Ap„Pfk8b]Hg=#=&%r@+3u&C!k|)@{w>xѨRC~"k}^a$e3r 5@J[txuž9T\95,΁c1Ӧ7yLj^q@( W"ù9oE(4Ӧ /םބQ)@ۛ&Rj0Q nɷn/wGT%D5ÿ0uC/|UEDM@yL^.mF(k&D8#9!J38]1#9IȇS׭|Ȉ;&!mrXiB>fܳ$ Xkcbywj> PQ rhfI܄|Tn,!ǡJi[#."B2"]Ty@ d/:!È&NTzA7"4;<E>wf=y|^cW7$c+WGB>ɨXQy3S3{Pa$KȇȇXK $]B>wT9xpz4]*!""YMȇH7Q!!p߽q"п uߛ$9nm^)Ɨ/-x|N U<žri !4N Z"hQcQ͏swʹu^{4DŽo1$ Qg-~%+QX 7*~')_o<&JVD.WmzU}Ռ0Gs嘘I 93߲:M9+3gx |k`ۄ㪌|.A=/ӌ aMȇՌr%ktH>kVPBȁ;Qx춳eE1?= !U"WcBA^ 23%S ݑU{k'ؔ^$Ê* };`cj!!+ \-gU!!*}:a r~$Y~f'cl}#Z.Yϛg#9+9|@{|yN:r9p[)B !Gh53xr vgyb!ל_s?!9#%nI/BN/Yx4M9puc !1V"ʱAx@7 |=#ʑcQۯ)5O;qB<^`rq{M98X}7 FI٩x@2^B{_s+KљE>LZX4L8^{ _-* lT.gR2U^$rޥVy'נc.ˊOg̵~OE/.SQ>!u띹_DsH[hdĊz:@ت$!Sݨp"W}F uc}r`Jcpz%!TO7φ#GwF>jѝ&!ܷ[We@ȁGtRI9}ɷ8pgpV$9j,ac b1!䨪K7DlBWܲǹmgBNc߇qJ ˙ܻ]Wx15/]vG{o+/B 2^2-jU$ځC<?++9 iXN*}rvIZez9f Q"*!}_٬I-7X'w9 !^[%"޸6|fHȇHa^|Kƹoy݊;eR/B9ͤiӋ3|CEw`V^@!nd#:WB}KbcpUB>T.Y&byι|9<]iBiԋٹ2GB>+6UB>T6v+Hȇz3;!˦%C'-"زM:Qbb0qm} ~9 ns9^$LB j'n8)-VVuvж+ҳDք|y 5"cJN.lcl^CgY5τ|\-:!*=K|ӵF;^3TmzB>ԂVY`$R;9?9.9X݃0;X)Z-W (_D= > GȹG&YiBC.7P/r拨aSs ?r |8MpB!S4#(K"䐳Gc9Sz罇E!Gǜ|21Kg# zq(vx92xȋ|Cim11!Y_?o*91bv"MJ9D"]?;!AV~GN9T:PB!a_QFʪŤ3!5'D8`o3 3#ETOxSwHl=M4}jn[xSҢů" ET,0iB1ieaW=0E,v"r=g>ހs@x-N"OC}s]#X/; /pwx!d@oZLVC?{=ibjKދ#~gjs칌<BSp*"{TBhMQv~96-5/B#jCRyLnǾ=Cׂ~Z׋Obg9r~-߫']c*Mmӏ5z%=Um *>No&q/ /d7W5yq7bVֽzs !za4ά/^D#UxQ&I3B͝q($,+U=#ZHd'Eh!LjEh8*OR n [ ucsEY 2QZ&G$BhABv@hw\NbX S5=@FM2Z?>W;,rE>_[!>hq>XOzbW-)!`|^z2!Tk!>7_[oOʕb GMn hڶ/yVܺSsnQ&uF!__WξD#Wfrlלcԥzӌ<+f+r&܋Cm*+X -!:M$DW<=r/'vi"\zr \~Y|BC剕Wr#e_4fqN׺̈ D˜X끨6hS}&kޕ:#w=wCae${ȇJׂ~rlƂy|׈'3r C"Kt:f/ڋ 2Bh=QC$!w.qJȇ%&$G^k !ǦL޵/BC=!'R !/y96OC^ctD 5!op!75||l5HJٛ&Zߠs#_h/Bu6_Zz;r OE&W 9h !Fyfۋ)6*9&@7 ?PBQ3pWwB}r*@1]CZ%b^V&!o;Iyr rx8!Ġٕ}YQ%b Q 1Jt&{fn !ǡ+w6|rݿ:G e'{0Rc t.Qސr4%FWMrhfJ @bH2M_r#/.`| ҒcG MRJůDO&y *q+ٛs7dQoQQv?czK@؃oA#Qi6|m{ M|mEhUuBBU!Zdv'L9.3Wl{Z&T~rD;=BGU$2lLvn.m)aKޣ&O%9[^F5/-_r[) !HB0Ûr'LKG"jPξ&gҞ"2B5/:r`e{oFȁVrZ#32ҢB9DW }f=Nh ϺB{vB1 ㉗rrf/³Rv3;r@4 kpbJ Ϲ^!=]A۶jȋ16SBaGUPJzrBTJq[.rRjMY! ©v "Զz/^a=^@GCyd-/nF>d-ѥ*!;Є|k\f!Iȇ fIȇL H/B+ȇ|N^CVIѝY|"Qx9NjcS6zXgE%c ޫd9XҔ/Y4Tcz09^Vڄ|uU9#B$!~faFÄ|/Q>YP_,WE>2_"t:5{2!\- QsGϔ@>vDi\E>t3\Cᨐr0YGv"3c7VL$CަE>hfOȇ\bh'4x#qpd>!5&_T%Nȇ3lL•,dQ|'r2|B+e%r5WC,e"_NJ{HيA+<鉑A":o^"$Sy㾵PJOVɋpYac_̔@)39|u*#&!X cm3#]:odKYqB='k,!< ^œ<4 ,'鋨hԋNG!c]ӄPJ yb QyZ l%R9ºfB!aO%DvφёqجP{ND9hG%k3H[<Bc~M#㐔o/#Bhxr׾p f1/BU_D8$$ eD8PnDhˢHȇ~r;E!خ'cGIe/r#x+cU7M~7Ӷ2giE53?ڣjSBC)0'DC+j2%$~E o" z}13#"=?#qn|} @!Y9wZ7Q n| QXNR{1KCc_\ܷk 2߫h,kdJPq@b*Lr̂I(Wx8QՃ4SyQMԃ$=E!c?*#qCV.B6؄3=g h5<(GGԿߨ9xBN+v Q2 +"3BCUH9&o<=!/cuJ9p !ޑGĐyyn^xM3o 5w(9m?q/o>oȤEm]Hm~ n/B},|ggpBYR庼YḐGp72!#J{ǜU 'Z%ɻ-'_EОo"B$hnuJmy c 9^t1Eht::GℐC)0s1zNTvY^@1t},a-`:A!3%q:G !i7bbҋWI9/o31cՋcq|2zB>MpBȱZ5|fµoJB=JE>;^WE>xOlr^;seXp6j^h }wj zg'c{_lQEES=Gë&h=32½/+}/3ӽx k*. ʋJ+knE,mBh듘#Njwiɲ PrƋВvQm]"mz,36!]z9,O+!ìVl?V8GԂ+,O=Ec}*"]zJNjg!Ʃn?Mo^STp^Bz֋PX^9mD愐cT[q $nL~oD~ro9#xp %.9tg15>!شܸmEh<'!Ԭ$LڔdfZ-2uzPC?Dm' !Bzb6r@&QkZQT#r.}s!GdcJ!h֗AbOݐr@t|F?Z6ZƧY,틐QZ,G4q@+K q!$%9 D)=->!8OÔ'Jae|ۓE7ޫ(q&-7!*OY߰\-#K܄羣9]?3Uo'B#CsBV9WfrVi( oڹ=2YZz9D9D+އ&#K*޴{9ӤeF>w^2BGdC X۔2BSsBIW%eC@oZNZE|Ц$3"3z-C,jQ #S1ȇX" w9$rX9n&9DY[ l/gR>/BN9K|(k!6Z"m}!ʱ p uH^qU!!Kat̤Q !fŭ y|ߢWF> :g8[FQ)= Vf,vmQ)-#_-ǵC"D;0@;rBY-(< +~%[^g3#)G {DLMBg{B}ƈ9|{3!xF 1/΋uȵ1n-\_D3f?]yw1h ] 9x ! ;"ꏈ vw_ DxF l\HgXg\}1OǮ~"xfC>H|-돽 PMȇ׼ yvzs?Eȱ)Ջ=E!GBoq@T3J?xy}Gxf-zYd筬]ۍE4^lcj I jei?\ xgFBrnxrT~ [N _ʂkxrHM wBѸ֘z/׭[H^WC=tЯ.g9`iV+I!b2#<3^ @>nG4z-+]΂ӌ0:l.E$Y1pȇjzZ|D+NgCf=D8}*ۣ"3"TX-'!且fqިTesɵ| s'y/&y5!L*!ՄLH9Hȇ{ߞrl^E>Ԅ!X6GB>Ԅl!x!T$<zh=N)#چm$v?P T#O@ȡa%^^J9 ϱ}.Cj gGF1;8,IQbCr}oAHzy8B:$D=E8F,}F>Ϙ:EB,iM?M9Od$D8P_~2BB61OG d}BȱH]*!u{ @BqTD8ԘI{!JV#帜o>rjX !GMGEyw֋ҳCO9 &r\zJk_}@ !G#=sࠀu޴6# I!G0,U!(xr,>sh^c -"K4k/B7}:8M+SFC'VQ\4>1pD9&d^I@x@Q'Hr#P9`އ&! #npBFrdm+}ƅL3I'r5o&_9fW#@,8V 3nZz+N-4";9:iK90Q 8!2mW4#h=^DvgYLMFȁx@/B;kl/Br uܣg/BE$ȵ !|:=dЋP-8z%zYsG >C $daŒ^pJ9V4kqWZnK܄RF{I9ɒgO >f*iK!| ?j h1ܥAb;fMgFag\(=c y]A{lF1?jD %sk ?on7L#ᙑPb^ޮ*EL4Z#ndi\Oc6k|}C o,%1o{x$Ux\\'1 =쓨B`DLDfWo+o5+EI,v?1"Ŭ %;% !"}sz]!C!b9`hM2g/7 oUsq3Aͯ%N*N6"A[h>;=W{lZ؀CƆ"_6؏ I'e$,L^׬ -'M/Z] &ƎxA2+һ c(v?a%]-Y41osOJIɨȍ`x{#1'DF033:|kH0[V>N-[,24|k+|\iyNIa]H%__/?$Ԯ/(*ͪ9>KZkD~S5ypmA=,v#bP1_ZhQKH;1o5lՕੳ5d|DE+rJf#7?o=W*4[ߢc5cJ$83/AhA?ajӜ4ledU;Av1_F̩ic; oyDH[N10f}$-VN[nxuƲd]Է(yyYUm\ɶ4lKylU`F":8F,ʖS#VZ3M 4":KQ|&|5]RQ 9 rI3CG> `e1R2to/=H?~0o'7ljك@nşAX1c68"N#:* O: 'Uf$C4vX~hĨV`y+?FٿO+oH57U2s] 'u*1 f+8sH3F09yl$rޝJqພMߣk(H׵$ⅸ*0ĭ[MNIx3"y@SSv ES 4?J };ŋ$qu5 i%I5w-6 <[I|}) ï}~jgZJNnbl#wH>G#݂TaA"Oo#mߢg\sDyd%и6L<* i={$fo8٫Ey%@lwzK\[IZ|+"հY17PIۭ߇homZj( [;U wq4H~ŵD[Fx6W: qDY#pkթÍDn!E[ >m#]In  F5xFa-b`#$ntVH#ցuVK:|~t#~G#аs~ixkԪJʵ{4b_ىijUFLYZkw4 <Ӹ&9o4n_Zo4urUNa6bx *ۉi<@dh4|rh64n?ݮsMhJueK{ՙӝƞƇ+G@6kQ$K3+gsFM'5_ 4,um N/u]~r'8UAv>jlrKHuk*STV5Hak~wUn4|y=O>1%p*t a]t\;Oz[@#w^$x1؟ o?ߋx巐WuR$ҵ\[k#72|1.$|ӯ?9lGI6z9$Рqu$0}]?\]!yR. =}rBx3 4,s6YY͢hkWmT7%аv>5HfZCE hg];; goAUNf<E[Vx5{zUMP=)^S/ -@$yɬCԶU1@aI h"–LYXJbE/WEIfU@ Ug@| I>'~ZF ƁLV$4Y.A2xa# Ng>gcz6 m ?$@IhYcFIQdI$8a5jdforȬ,!:Jdg=1%xةA:A L-ƃAVn'<~5 J|7 !*O#0+~oO5YkƔ@v* 4bX)y>d%0ԕƎƃEq!45{{7(Ƌ;e锘ƃO[6e@~ܓ$: ~ i`o4b^6Y 4NuFau1Q3@ëTE'8Q!5 9ʇ<_^x}/@6(FW6zuL[|Hxݳޙ1ӈi sgFƚ>-XjFzrK#jhuA'Ujdvb09GӸ*yv#eSPbνFL~}Z=y(YO0m>U1FLO΀1 uG# 6gF&v0TzT#s%%3U[|)Qndb˕d#c42}ϯld9KjdQWoidbӁ$#5J~z- %uƶFL4^Ւ+aN6z1'γƅIH<X4Hb:zP$ȇDOγIb&~u>F0;?+:4.\gKn$ f䪑L:3E=HaAQՒ7Q@Þ6jݣ\~- 4]c =f?8Wîeop4̳ުze'uqʓɲ< ~Z?ckf:h؛۪zT'gv le#xOuj͡/C i3囄v{b]* hj: ~ljghXԛk4pnVguO0ڍ@ 8gO0=FUgu~q][ƥ [UH$~ W o 1꯷DK6v%^Z+FXJmU]NmUF==??on,ڍ{g|#xn\klOݭΖsB7--9S9xV|#X}&3>U _`eQޱ{&Wi @{1 @#qzmg4oG4bȈۘ҈i g6 9W1-[k 5b0N* 4,ZyV#J|%аsk&46x9n4sQ dk]Ja+v15Ha5'~+\o6XAP@86ɕ?b4N/ 4$sK#1IעjޤONWH(0R'$аa=s@?OwP^5]O @>܄PױmIQ$x`A>sx?Hd0f~g#8An$fT|(ΆHa/6/=x$mw#1?3pKĹ^ٺ9qCk],ύ\9DZ ˷2K^kw' ||ӷ2w (|& .(Yl;]E~ߏ.ܭ.`b-kE? _}զe֙V2.e_AA~g)?ȗ+2*@l}~إMj_i^M^i-ƮQ3Cc _lb}8Fh@ם1!Àt@H{|>9 UjHߎ 7szoN5@8DBA Y0ߒc$2d (sV+y{; S7ǝ{ `f>X3WAlaˬ. 6{sY&2OPsHo:0.`r)ڏӀ=iܼ?HtkFvHv 9 }8{\ikV^wާڽ9  _5fe'cX\T}3 /͝+'f!)/0$v} >.V/$i`đcgUH[E/闃Ӌ.C 7sT]'f2^>|&{.g&|D+`zͪ)>f*Y:y֖-0pw>b)u ݝ)/A̵VqoM`zA̢Uc"OynIhm8lЛ t.)c _d*ym˖FLGtA@{ QsãJ^#Qӳl{צv2JEELgG3*/}LR0Q/R:K]oͳWo/2B/c ~#诔*0=%j LϪEL~WLϹ]뭍 Lϸ5 )/0=w5`z6;Ca 4^ x?﭅ EYQ65(g3s \(77<L1E+Ng*|VIq_z~# ܃_rk )|e)/Mx{7ChX9xkn"qo%/0= \ϒ|5T^]jY!2޾~(Ǚ@Q%||qykegSTFfO^[P렼ݛk.+rB Rk!= 1*ƣjf7_D4S/!_[ i\~my;JϨR$ _ a֍< D^1/CrIBdk{. Bl]~_sŜZ[oB~bʿK mۺa_n"UPR ?<6Ӻ,) pޙM! ^g?1@5]~>̇R`#k \}.Mӗ7wGr|VϺ[p- |9*zx1!}L >\<'@~~?@=s%⧖Sճ^!H8s##*ܺ+") _ &)/0=@d}&Vȗ~*]yĥ׿.' /wasN<L|ؾ @^-|w`ni(m?7K*^.M5@@XMZ~ @B p1p _|>@)w?=y?@~\0;:ldrwn٤(ͻKm^r[s=g//C\TjO-4`rwD#P>oW@(yȭ p~77>@2~!wj@ܱ[c-w|dP0}_z{`zE6馼8k70wQf!/GRRA [`?ܤ;벒L #WZ9qG|q qAD/O?,sq?,"`%߯naV,spa L_AwdX_*Z  __$%V@nU`_| /֜>+~!ͧK>!msĿ@~pP)r`ߖ!zn_ @n*@Onsr3W' `? Dq_[6]R~ pퟂ|}P7noݝWڟuדȗ#K /^p~_鮯΅ pإl<+e>؏& wEDC,Gju*|˝W40>~eO\_>OLWe!K_2w(U~KƧ.cWmuYŹ藵KղJi .s/XZWG~;i@MA|?^O%sqstf!п).50s"= .e~\Y{ W/)j# ^׋Z|?owNO&?;S^t@u O ݜC_UY;=uԟ_ͿI%O7  ?~\2D?b~Ma:y /*}_*}_UI/r{P0v/o~Q3ܦベcV5PI/Ú_'_֝/car`xVs#a}cMA;Ǐ}Uׇ>\\  _ݛy{/*Kͱw@~zsqUb|-[ `|pSM1~pqӓϬ/yB)[k1?a_?ʼ@ߏx 毷]xD, 5׊ | 3ȗ!¢y s}@Ƚn(03?J`fεiO?֜/? @k@4s(9j}EA\J! c88*3;ˑ^͏*φq%o3#O/lz |C |u/݃G@KR)@^HA؏.+# R_+ 샛+O-r]c D\|i9׍+`h#+ Xo@'sARPukY1؛P󏫊't٨ endstream endobj 385 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 390 0 obj << /Length 422 /Filter /FlateDecode >> stream xڭT]O0}߯h v4Ƙ> *¿njbޏsndtgZhFK·à T_~eDsn#=ѳRi6<d "eϞkWuO+^aA"-$jKz!O!E"Ͻ9)KBpM'<e>V q5E_;@j*.twN[);k}`Q'm?Ғ[YaMyү*s= z?ޭ =_~Q`){nkB"U>ͷ-Rb3==Ы㊾Fm1tJw׊M3aKXǺ9 f'ލUSTǓ.*:[$ endstream endobj 369 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpkG7Hnk/Rbuild688f6c73bb66/spatstat/vignettes/datasets-044.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 391 0 R /BBox [0 0 864 432] /Resources << /ProcSet [ /PDF /Text ] /Font << /F3 392 0 R>> /ExtGState << >>/ColorSpace << /sRGB 393 0 R >>>> /Length 46639 /Filter /FlateDecode >> stream xɮ-͚$4O09}3  &)9q3s_g;l9?WHmX|}?ď??ǿ?Ǐ ?B˿?y_#CvIG%oW-3OK=rp]?~?yO^?/G??%1P?g5k5q7cB3,!z1G`v+n5"1tjne=cEum^clеG+q7Sf)Ѭ?*2%Ns9xT0tcH6ֵ/*e5[LۏׯU"ig>c3lyG;|<sY/>FQn1םջYќU,]{돜|*}uðsgËf u7GkRSd˼U[v4u\ϽIfj=Iay5Ja5qae}h=1S?,sƸm>}T{blO5vnl>V_z=_?lܰ^]WMZ7Qk1s׸Ǽl q~n q~Z7(j5V{}g6y׷jG^"߷&Zh״k<mp<[cϖ1 c_3Z9'I|߳sy5krv8>=@_[{|W^8{*^,}׃~xZuHi~FϽ5Y;_{<SW|X_ˆkzvd o]VN{v{mx^:x~܌<9qkXM7 /ZxՎjwupu.%1߯cg-rk|n)p0^wZN1GH]LޏWׁ?=m΍ vzxr77Rky}J˼&ZhDHsuǹzqyU,dW|&=Sl_? {ޚ&>5Y ؿ벼/om}￵=n9-{i(>~|߶#5w/ֲx`_Χ}z||[ORN|~11uT?u}Jj&m|nk?.I^651I?A{g u0b}gy  m+hkdOcձ׺?v/DHn{ zA{|nkHk}Xc\yΓæ|}mu056~kzX?|y_Ώٯ["w-c?=O}ۙ{_MO{*|mm'QϷkڟNj~k~gz뱴^g^vx,fmL9Z-Mװζ׋8m|O:}'Mb٪:2kfٹ-3ٳ鬇/h]z|_χma~Y7xv?iD}:T;m?筃t?=τkgm|ٷښkZt}[:߼e/zy~Ozxcv)w‰v?wm,0Q3 Xy`ve]|osL{9?9s?eD/2g~U,vگ0o[CyQ/;~;|*k#/(s~3@3p1_]6?6q~4_x yC W:97Wx9Sp|J伖? یa8ϖY)lg-6ٮ<~;e?p>x/<=a8U܏rz!'FSS!>b{]aWrfia ᯣwO_?RC=ڈwFC>~xU+yMG^}~xjmƳis#8v;/q}7Kyp!/˶.{)M m<)p޴ծk 6Kfܨw6ȟaiJ`y`x^/ aWax!([׬G_xr{l}ڏqɕKLP̴ڈ#zD{}g=cG5* m)g8v=z c?&zPd`|q}{^`ԝӶ5VCV+&>41sɁ?V 5ߑCEAdIC5iOǹ4U`&c23sϤ,<>7;Yf (!ͭ2r&'2u'`VD4R}QM}\;>P2]^pgyfNU ?/ |5mc b}fٲ[m۶ *Ph7j 6۱Fb߲m[>mݏF=gSm:g2W,׷o/eO. 7-8 ~F;(n)v-ys?'7eKA TOˉ#v1Old<Ƥ&ɉO}Mn~Ϭ-QF?=^yv^=l]*w}8\)رSIO{>LnVu^3{A[=y*ŚaS47Mc4Pt_,ђG7&/v}f,jz?Έ7xXx_iGw#d/jl.GDO x}V<#h+h6]}w-v0DspН/N1~}agY83\h6mw{gy{H>[dg(ROvf D0uGg~hF9h껍Ol=OUѝ*~.]:ўr~~~vX|?ze0?{>Ī; {vٮ!;;vU4dCѵtCѰ>*Z~;$g-Ri H.O6߫O3ވOj|etD=-ՎgwG?6N6hk%EOy_ic":{ޯiߋ+y~[lWE_cGaFtd`MKPpm^Vc4/;s-=$|4 ks{^UAZ MYɯ\ݿmBN~[_}i~kpkZ`ʆfRcjO;1XZZ\>16׵l?"uBdk.|<OgV4a0ڏ9 972[Yh{ c|~4K|>q/C:Vxn]?N3~zFNp_M&c2U>OE֘@[! 7lE a3jӹUg)w '['+5V'v>fՌ|5N9146&L-q&ڹ}nL[Fp-m'+׃I'SOW/|5-k+=pT9U<<~m 9М\Vu>49F"kWC5j=q*=xm93Ьu/BcR _| ]tWv~%hW>jv6nxgs)XCmY!AIa30$vLgfj&ܞmu[&g%@@l M;s+Cw}}J.ÈlM Y}P)U7T9獪"YZvC2'ؾs8DqXߕKMhNݡ ~pB/8i&6PwȤ\+l (Dٔhd kW U'URb{N8WXOl+'7}Me"A[E8S%Ǯf'B&پ3 iՓC<ʆdafpY Q8lMiMk9ß4c#Ms"~kE,Cٿq!K#n$AN_$i>\ !y m G/׿?RKȂfS!ɳS$K'0#  ڙr\)1~l87HQ?G:RG?_eT 1_; 3?o Ud|<IOc"v~,*D"[T*O1PQ\'@s4%_1Hmӥ4s*lN$<|ld12ޯZq4Ŀak"pXzv^vΘDWKkJbt.`v 17o_B},]C&3H[grPY`ovA~y #1W{ D F*Xz%X*zt*h<>m 3)3GϙN um%d>B=m s&XA12; s]f $u't3Iu1E51~u٧lf*첍 [k򹂊J}8畝{:Rϙ+:{igBRvTlN/|.LąNazm1aɒi?S,iS)n (ά%&*yB$1-<7K݈Z*eE~tjubr/죓 H gLɌnӋܝ,D~Ql3"pV^Q y.Lnn}1@0>76,AďNZ42ʱU:v\fWvd=Rk5Cهoqr;;6 [WlW6)jVb*Gc!TB {@`r4-6a Rv<޷)3ܿdkŝHB/oZK4> v iǕ0v>cn ֈ &߼N&pq9<>s*9Px \Wv idf}29| L" ]`bWCv1 LYw`HdX^ᚘ;<;;Lⱉ6UAx&p$clK1 L {bf1[*4[0sMĽ[Ok,qqN!J5`; /ionI!4\h=A=Ya@S@k'k)?-F+<;v,ɧ,v v%h\rA0XfBc!x &])M! H2Ln'="ZWW)&J [,G6N=&5䩈N[_D!&I3u#3Qy:m UƑf}M=},B)&膔 gk^oez<&B!B4ɣS;8=hO{(iPqΏB06̠fF>}n2rk/}Y.F,t~"L篒Ҁ0cVHDDpZ&2⊍-bvYi|*q@\y a>kg=\&FPV,hpBz+Pޝ3%l̵b-Q "MfK`MrE!c˪.?oK*fQ6P&Aщδs+I6!BcZ]h'D`cVd0 $'AǛ#܄ge v3:Ce]I*ˡ| <{>h\Y6mY 3/<s4"JJA`61d;<2D&eKI2 7~ljH"ÿ*t`3EA4݌f M m]4PaC)34>% ]hm"kF{#h ]äO 2`crMI{t5I->?Ǧ%DDVfUKV:9{]usʣF0ЬMJu0bLy?Tfэl(|%옜ӓxmխi}l0ܕM1LDJj*'_Q\PBfrt9mdaq$]1z_;QL Yd@ 0ǞAs>;e'w󞼲 &1DُMzLi$2|~%5EƢCf[G";}8ªDd0q՝80?m3 Ǜ-o~+H$2fG)Ks,, ]i'Bkɞ7I6x&FId6LaM.-gVbfl}W3OtFi^F $'Zs9OhI}"%t[B*AmDCPZq'ـHxf2uJ֬2kK\߼7Hx2xBڷ>6B5)YD xyc9plCJK2 w*¯=#[|l|dU  ݢR[%/crr*PD+lu*J\L9?"9o"rE&|G%0M= gaHLTnDU\#,C{dó:G"9"TnЛl#)X3yNPͩHqWZգ%(UEL:եE v.qErr C5;l4ΚCޢ':0\[z5BH_:ׂ&&s[x&n"l')[y<229F́rvdiɯsCa*.DRī&g7Sft in?֧@WbD̲`p-OS!g+M[]6Uɳ1f 2HY85byw1GuUU{t# 𙪤4j! [$N9gg If DW_Z1,ԈB!se4FPHQ$c}RwؼP CgH:Ifsj^"6`AœMGUx `yE%J3O βKU#fKepHFlMbЩ.ՙ0 2M*r<&Kb,ˠztfT_ e@D.`2o(*d7BCl+L:[)יÔq DQlR0vP}UQ*u@ز_; iL*z:wem31k^c *}FZiXƕ\ͥ:t +Czݥ5c7(E@ 241 ^k:(K٨s'`8;D^b4łےԄZ7;m]Ƕ P_׀ t29 X# ?Nmn5d0) f!Qc[]:yLz :n? Yx.LQt~'u,fF?Kdӱ3¶B…)A`Drx)V)H3UT-M~iOz[R%Ss$B1>tGB]'P]:⥌Ru22Du:Gio-IЇ2YfD`BUݝ+CU&g]o+d oLX~Q6: 6?_"cǞ٬Uv<|ξ+X_z~fp:.vu^d'B3 3nR՟sFN>c2<KaHk5 +*VeL&WVGIBSCb_:Ә&IcQӋjzEFQԐGE U(P$g2-Z 0g %i?n;FfeXeJ]6LEl+xʇ{,\lN*SbH͵/ [ :@%C0Ap;F^a  MfMX +}F7p7BN3 (3J`9,٦H84çi Sr̓ɲ_$Ju&%?Kg9?ÄXGOL.i/X9-_;Kݰp*c7&هM%$$4Y}SG!6Sp X4sݴ/uށKCw63|Z,{-&rt-¾&UTbupH: < \eXgQމ,U˜*.)H}rݯtgVxS2qK,k,܇|*B5m "EO| $ww%WV8ft>C.|x[DISdL-POKP`.7Ln%*o|@9883}+oH򇅤7΄8al߯,jө}3$Bp(u^Q4ֆ 7194A-?@Y3s1zzOI l%> g=Nm,)Dn"K`{1zzUC27=l7>u*$q2@y%4.S4~μLJ}`" #:o&$6*ƭ#ô8DD"d [*默qSu$Ə4=P9򒿊0n$D%mZ ,#dU4k}FL"S:,wB3 =G$LB%U._MV2t [ WtJf_ SH\8[ t=Nj$5سPŹW#ҖbBm4NbGW=r h(l2 E8oq[Į C+U+5-R!Г2oܕ$i,ځZWF[^1B=8UA&mC5GѸL]iHJ.2D~^ATeI1T>zr巪dbp9D1/>T(Su\n8cPc*K9Ů"hu> XXZ-gHmoNntT[6>ɼ wCTNi[6]eD38JNݥ.06M'o9Q_0KDxaY*|]V<dU2䑙}gWa/rX]86:iOib+90+l;x9 8SS0L%׵: u[YhWxNbZWl+!<3f OX2;X @g'سA@YeE(0Q5޳33Ws;}U4 ^'=[{NdO6l}-U" aĊD)LR>tKAR ܙ3W AI⯝*Ĥߒ ojb$C}pp2ÇYB0٨v&bݪ6Vg4De0Qtcwq8}@%5Jd0;0tP)U"!TYa%愜ʞ5P`M]%ia'BJo<^{\ҢEƀG1_J-pGX4PFUƄ 0o}u*OFJyt׌8")N%J6XT@Nʌ0|n 6l\Ӧ*Fi&hR֫gD7"V<պ-.댄i\n.1kzv^nB1, Hj,8u.~SOb;k`ldInjU/:lTvi ȴVPp 3ʋzVN(,}5ôԞgjh@0ڹwUL|KlӋ @Nҏent\0OBxlkpVk6pԙ\g"k<ت,q{E(  ՙ&o_Q6"U|_Q䓘+68R&4* DjD}q/b y `4!9X1)0i2 Ȯ'7m E2ԸpB²pM-,*awUhv2r[w$K%/%'+ȷN*ZoYʧS9CHWœs2R-[A\$;ٜ&5hc4F'H?c_* }'9%Æ!OdQZ_+ja '%5U"YY8~MHr Vf;:&(K.הY:؝.*j$+Qp'awe~}gQ_cu:䦋0 D6e 'V7qw-Ol#9G3$=?dq?K|\]PlSLm_ο"of t.1L 4*P=*')YBchS ͇-̜q0mJ=-Xy>0j#Ʈ5;TG2X۫n: Pݕ aƿc+),[J'668*2Dte?HPY*<$_l`m: ({f  :C"gmӅ〡$)˧Yh8Aqd TvJeޅnm$l)bjo,L-YJ)Pۊ'wRxUư̚lr|& d捖ml$3)-$n{AډZIZNߙ'e ]1&!~ccMK@fMݲV4#[TW-K[E<-MUAf*lrW yE08?|.@Gep"Iejβ0nshDhLۢze|/tiPMKցm)[VoGྫྷ"щ޻mNY8mG2M5cZu[:y%`GnN.CukȌlGSV8##KԶZ9k[xv2MId 摒Ž5sb#ڝU J 'qC tK3'wqgr߅`FJ/*/h(2d]FYL)k隔}tv٥Ma[fUNZK圠 %7d7Em5:Z4.yt =}2K }p`d.} 4'k< |T quY7H<V}Ҕ2N.4Kx"FuXĴeq"G֗lAL0f R53\2U9m~,_yϼ1;F!SU?->H;'AJ.Aiy@xu[W9D+ʼn):?j$-z#X}my;yhc[їWfL#2; hWM|>mVի`PK80 "^M xX&6T^XS})HM@*f_h9:?+|1ɀiʴl![ ; ;^g:o|dǶ  G);;ꝑh^rqi$n5Y ik ]mԫ~t-$9,=1\qGű!FSUDR"j]˦HtTeǶh!(UkJboA9NuzIzY-~ldpc~{7C#)T$]X) sQ\\1Bي0 T}^Y7Ζ̓4IsZٺ_ڎ>) 3(SO)rW >&ҠؒFfLIbΠSȸYuK}p&mz>^zuT4d߯;t\/8,ߴ1>뫺:JL(}M̋H˚n{R@)/}^(apJ0WIL-0A_EAȮ2#PS<qՔP%9B}-DB߰uae6L *sٜx0;'!(/S% ȴD ![S;ȼLѥu:$-uDruO :^-ْ#ύj01٩bvIU3W\ }k#dv-j%|$=  W +6GU> w (O/j|W5C|RЬ$K= q5] mU "`>]5E/F.&1IL774YK13Ł집D6w]*S %b ~l^E?eUVÕF omxQM FaOeʣeFhс)I u6W}a*]S Wm$,NfIUAZd^zH%`tkRe2TF"Um ]!-9ٲJX&Hx fѻ ƢH]A zRz3RbhaP>M̜ر y&:%HjՐ7@Ou팮ͰWLl)-zOCȳ$6R<6SC%aevuL`2aL[w:l: rPoWVh&x t 3< x=PY(Ac[G9\ŦkK]gD+bf56ƗjԈ@ 80ݕV&٢0 'BB6X"m˩ O=`c Vq$@rN8U]0BWxS5`nL):~=mׅp#[MHYry\"r`d}]!xl]ـ g#ŇvA}azzsݠߚ[eJOLuO[z"_l]\::wu96[c 6ːNEP6`9y;{Ugqwnk D;elW N]yv<ⶈh%}Vn4h)Y1&=mZTnMU~LjvUЕ>9U/y_Zu<+Gz4%~NYSDB`HؠbC5^V 7G_k~ .5D^."x$Lem8; S,X[^ϳ&xwC)1 \iVA*ڦ$%e@9 8gqYAF'*D- 73XxB4)ceo uk9A~$#kF H 6#A0Ne3G[݋]Q)_Z2YW5Oѕt1S0-Uydzt]bliQ$Z4$ h]T'vP Ҡ9;1;抂qdDNь=^۩nd>{Xf$IlZqb;nd7DPLgR!y[eQ.\ p[ӆ~yHndUhR YA.mF B(MuÄ(^v37ADlD9!k]xί|-تõVx @ f gd)&d aRZ7ݵ2EJ<`]:08Hg5rc RV=C@ƺXE+T20&ds(Lvdaucwd ER[3&&XlFry 4{*~i1I2 wSQcw8]iXQft,MaU+1LM`9H_~@gdH#̼^jGZ>Zo^EZeZݢR|x4ʬgQq3!@bfa_w@Û""#qFWA" N̚J[;ͬ8%T e휃TJǛ_JAz+$QgDw:9vNwBfTH8X] K YO8VblBZt+hN1o^q+LcQۺ8wi7+79ȃ#@# c[L2nMl1(CU3]N4LK#$'K" Rլ0M0U&B&-:nw2ꅭl]Ѽ]P ! cEΡWoPdN`: u,n#"ǁb٢+q"A؂Z;^cɉ|UÛ7|]þ5n[QVbЇ Ia}~X`#CXJf& ]8Ӽ }+o"EqVp\ƇXPՊR;o77zvWQ1KZZӪs]sw4q30LS* Ǿ:ԏ{Q4vb6 {J@V]`wP?:Н}ۮG`duiFf Xsq.~.cK$t"i>k51Y-dp!9FeaJ\d=:$l*~Q5A8Lawy2FHQI,dA4$]ƍ Ezїb-:&L 0F:I`ڋ<;\8ڋ2@aam6t+o}uPzin-Ylq8ĨgyuZ'NwWW)xꊕ\=W 1N =}_i=| $oȯ\ך ]2 N *o,1\YKZxH/ʄOV*U!sVDyέ;:)) eZSk[$x$ҽ q)cF;02[f,)0iJºAg{u\ojIV8+N$>X4:{qOށn B^SWϹ?`9Eo{P-ʯ'oHbEUpo#"]9,՝؇[$MVM]&NXv$T7-ߺLXCx7 Cx[ &!f ژ7ޟEܖ"ܹ~eum L-A/FVYjԤt1\mV dJ.`cQpycҜAѠ]f9RE+̙K:hUSӊN̲.`j rYvBqIB 0toRA!uh[m5W=t\Y߽*UmW*\h)9)@DS}:]awm3V&/S> %BLTPqP8\YTYN"WiZOcM gR(]/a﬒).ZVJ/Mz.͓7H sh@^xij%5SQj%U8 J&! Xa7v9&TrwX49tS"EdvaH2LMLp`UMټz at1( vś}\y))NJY~%ܜvZoMTnN}yX!z~&swj1B/3\\|26cS_; eWqo GѪܹhyc !X Y@M=u$-7 ֡ާvb}XUr*gT\ʮtl;(`d޳qp,.O9&mʕ%&L*T5.ʏ:k 3ԢU+_.K)LVѕ2"AڔNԹЄoڊАC~$zX6Γ `&KBi(tDIx6y1jMTJ22df9ӗf H/*K,~˷Y @e/% 9MZs0W ዒ:v} T31h,cM0@`u_PBE5gz$3M h{7n =7!DE+ֵSggKcQ1`冊B9|U%زؾ a!wZwD{j2kZR{ 3UV lle RKN +0}-ۜ%+⿘3-*1"بB'P"H5dDhv LNJ}VL5Q8γ%b|\ r!I^+H9Au2kǵXt"6AǂVC,fv_EӈT]W ;y{* +7T&Bb <ӯR5)|R`ed &(*w(Kg3s7)-o~%QT9e@QfEoN f+(Jhbܖ}j$d]E<;/=)RHP?#T/i='*E_zZP@Z'ܘdH/ђ4I cZ"ͻi$H#mS.9 㪙Ӱ uX[joTzM]3rd(35USn{J* w}T]('M7 wk/&c\nKH`ͥ0te8y5~o?Ԝ7=ڐE%"q]w\ wou6+HÖVYBNKK‘kVQ  m@y2@/q_eZM$ \ΊlL6BVQN1@QQJw*~fݪQ~s%KwϓV)y=k*0>Id&8j7qTC5N,-u)ܰ98&5n=KyvnxrioK IDv[E`HT5SV 8pst|2!粯p^;c*o+f{%D2& &0h+Jsy\igcnN;|d1x[21cӈZ7(q)}'`sj6;9- Aɸsϔ)80N2 \H6}]b.+R:Bb%e9d5>nAM*~ dQWRcNRA'J~Yp&1ѽ=o45M,/n̜.[Wu\EWa #g%8.2@EŠx:Of/\耒pA0 yQE Ul; pe^|7BYƿO>Ρ%iKF]Q)GW ;;.yE=v)NA86kos'?/]޶D  r/>n%H L8*BB\xsOHE+'%f,"2mFL/r~ .Jsp%f^u'\vǛA6S~.saaId繾:_,KsԮWB·bbRY0Do۾Ⱥ,n}',vC%LA-mh õd`Ր̧5hVp ш}Y-fma V6mup& Wfd"|+]o <: Frd xMjb=T3p *F#pNN{ GG):)n[E6B__ۭK7 V.kv*vsu,GVr*C*c+SWJMV0я/FNK+LNy $YRݥmMᝦNI#+$; ]*(пS:!mi1"~SZ9$Rk/VbC X>"Vΐl6k}XTD&κifﯼ2 j5k}!۞XLȠ{[6ct>_%8+eS"jեɱ[j͕gư*hou5:v[d4q);61v77ElRT',(i-cX*-G4ИͷA &2E*Dg )aZ{[1B#x!xdt^@lAw%_كT"7l-IpӼ ⷸە22Է;y L粧t#|}ZU,C䕍k:b(J`21+`Ә'E"dUIѷ(%R8BM9&tN|U$;vԃc)gMzy3 6/AA{_h&ypPv8Up{qU@\'GOv`G2fԥXFiB1ˢM5P;KLVuM8+4f*>tP؁IwYSQ= pw%@s::#"xޕ8`L jCpt[]A^\ϔ,(tBi¾`/kiTVp5aF|̮ ^tLSh:jD#@m7.W^c( 5iwcBzpLDv٧U-eЪEښIgY;[p4ykq'Q ** @~GQ묲-p ʄ$`jQ ,;bdibYB /m玖3++:Qh|.h!0+vZEVU3:WqgFxSAf#T*EW^[7ؚeIT32[ӲnKNC=Kia ;B΄729sQ-WlA*ͣ(~7 %؊df]S-%gnR+ vc|^E۫:ՖydEL^ (h-4T*E46Gfx  HA|ly#6VV30ۮ5hP.0iӐLI3gCMI ) \Vx5]SW I@YdºL[W)$WO~F0OŸ_u GJpJv4K!F! a{ȖEM4{:QR%&>}#%ql7/%b IZ8I-]4,*UU&QݮhXG5% U*.Soh [& .^^*3h% aXRҶRӕUQj~׌l*bpU

?YWLϰޔOʆ\rX-.i64*p3$f{K9m㳍i8%Wg&l[0!҅ OHzNNr:AU^ʻPQZ8UHF6P}KGJm:󟨒Jʡ|Vg&NOIȸLBgN^*ߩkf7=*e}Y`N,t]; wSׂ^m>i O#m$!%tn7!"4L +In8%E W=Ƅ QJL46yOl,˲SZy:5>9O3;؍OH}mƖ2w]N֜TO4Yt*b EvB&Bh0-m=Y A,#IxNͫVTOt<'ޭGWؚ+WCvᲭz*w4#>6;č)\ْd=lYE"?7VMՖ:}[fQ(:윚Rn]ޜ0uK*X5S"Dh''Ï%%LJI]k e2xF1I:% $noXVM*c-H㦈$EӪ G?9z1I}:׃ \4ȹ]Јb}is2e h!!bGD5v~tͤMh`}[ Nu_:pNjj7]Zow^}fuj|ܖo Ѩ(c]Tr.5V/T\A.8$ԐzboٷXtR%LWՠ부 P-=df7Qu>Ầ]T,3]X[j`P!:=%h*(&L?SJ[53>o/I N.51Ќmٗ wo5$'YhUWlBdt2eF} tCDNdX 23a).V(K=A@FR1zJߐoU'A,UkgqMs3~[+"CN:v<\>$xG} $3ڱk[X; ;>\sOA A4Rsm,/T9zbkzcFq&O'nZ}b+Ď?<~e(a آMp[@.oXp |ɥVgR iA j#䄨Ap@Ū]>)2&@NRl@Q@K r$`W %ȶ0@=Zݵ i)E}UDK @*myLiYSFb1AD#?GpeمƺBLJ2/FXCɓ釛=P W?<Ƣ]XDYHb1+V)MԅG'&̡nIUa: !IoΔ9y4UBodu&0r?_H]7׵yrk$9:Ra;>$^_'R퍈 <%W I z"o4a<xUf`$r=V!}qTK#nM!4C3A R,mw JՈ\w ]yJE:ckB5\dcw/Ӣ\zեb҉smnp2AxJdkb4; 7Hml &܊4Ssr!433u/`;< ͵|W%"A@..l5:,2Ț4Umz ` NXމ[h̘yLHcQ 4ط$ۙO+]q ckeqO#]%y Zf iMp 2+Fh`<>I/f͛e-)𫦦{ `݁tl^ wK e roVaQ$XcPY85uIV;Lr׿q36\Mo?J/[EALfM3;(vQ],c`<)tW EhR߈N&%M |pz7W%%dޜ4rtDݫ&6 BYNQ4GO[XuFqưY-^r#emNS&6Lfx7ptbU9]ĉVUu%: MOLyfXMwF/rX4eƀq{&ʃQB9 ]nϤ.°NkӺ+x w.٣hVwdʼWPhmEOvie3A*Tff7|fKG2*]"y:TN@lWx&N`*_;űVu1`Q3L+e #H&TC%vT:7qi/hsB=@1 ȋYTxs$=kbôۥ&omzIT֩|YD ;9ݐ(sCnO ad诓Q j\ AW7-{22.ٳAQ?x~_7MWeN` n;1q-N 8, -K2׋E'iv>N ItyQ(N>{P R۶H*ܖ^"`&e!w}Q$jZiF3yvݖR%uGH.pk1Gၰ` F~3LQnQ{/7FRʥ8sZx cs~S9)8P̏ZJm( ׭%]cEgDPnRkObG)uojU([MҰ+Q\X_FتU9E7.x%݈Ajw6!rʌ7h 3!ܔ |mW}QB0ܤC+woErxH}&%E}cɗ2`c6W`2c A(d܂QGgw&գk:4h"*Wmy qnŢe~9i K8}waLN*P+gFH R8 1fptǟ|/7W=a ILgZ*^Gnխ1yҡ -ѽЕkɁZ6xFeul;'Ҡ.(p9rZ `IHU]iozʅ--(GaݶƠ?iwo48({VA1PY]4Y[$8.ԨDWt[ ƧqSQSkDqdyt 2( +([&}Hۚܞk#$>aotLEbcEǑxBzХyyp% !+d`q"'L?p`w0˾_x&=Uu((,QZ/`g1iDOnżl)m,jV~makA+?nɝ >KT6޿7ƑjJv'Շ8jx/kZt(0\6P6x= kPo/BŎ½Ex Pb<^ v1? xy {toI>NJW+3> )od!3fp2|sg."!P .ziVY`Gލvq]?mA2<HNcGIӟ? J1Yw J#&!ƛ?c WCg!L}8XBd2S0Ow֑R+Z[Kox,zG1Hɥ>~XS_4r²IX .{h)PģC] k?;V'l 0y֍o> 15a )Q΂eQG˾6*)N2, 3tYQ4&0m2_$72J؛gM``tDasASm mN0g JfT04׹#lDM&A5ف\G.H '#駳 Ҍ ~ֺ7ZBwfHŕ>ԣH- X$~SʗUm3:k Ǧ9ǣBH"R{u5 O/=~1%1|CCW.\Ód^i1%O?LZcVh ̴X>iE=}mnZH8o-I۸jsN<50րicM-_c8LkeSMNe+4Gǎ[#:}-v}|\W{m&29I iFZu"/S̮$Wȍ.x * D*9QG񷭣/d(BIťģ{Cx[',^W=fu؆NRzIf~{nL 6E6̔ȩѩ:-=L}øTj, ~]Uϗ[}(-=YR]QReO7Isk!\FYt[4+#&l> B+JmH|R*Q]߄UM^6Ѵ%_r?x?{R=WTD\`ovֆv_9cVs!yM.dnP ! ʴP7k *KSeU+ kG#TLljUOxMw&Y:b[O37n #UcVQ`Mm(d:wB/x,:'F店8ts/KQ A~߉z? P& GA~f H ~]^iWosA%O[͉,尙CN:鼌Q4DA 428:EVsVBq5~]_̃ݩ93>ȜVWf< *CQ DSnY(*`+ A@QE(_蕵Zţbq!z meuAxֆf_v%'$Jvi!i<ʦvO+)*g10aa($2bq#n= nz^ijr$o_ÈbnCϦ #jHPFu%0?G~D܆2SV߉=\$vK| Ee߿k`V-FmR"JЭ0+eIWGօG#qs1T+Q&hY B]󅬯1h~˺HTUm4,GgwPӂ(2$5B'|8'D$kJ($C?K(x>fS7%30}G3o/Ov}Ȟ}fA?<Чhh%HJtDnѝ^'Q/& |p zSZ`$3?5LN"MT鹀>ڃ3V)AWUmYM{*Kt2ߟ^Bsԥ<^%l;WNu<:ibz6G[$'=o؊^'Aw> WvsL)T;u1zM w;s%= _6rVwIS9Z4mSl7ZK-@p)rWՁ?U]RleQ]eʠ?di$L|#"!(ky[!0A &9,,?s&2k`XaK`poY5g)\1꡾ypHr2(  3ekg{2e==<@`] :iǀ:4ejX2~,^\ =A~wÂG6HB^fe:ոiUL١|Ԁyf'f ҄O5[99;3r'9ޭ§!KWofX;T3O@ry+(ejUD"j5pM5sֺ7J|9LT>r*gÂ~b6a^!G[ï2O}P܍2c /.eR;FѴqyZ= ֝6u S,;y66+X,j]\uxEޘWv-><>ȲʨLYy;T=J07*XΏY%-yt*晓,,lSpaTkT*Zi{ >cTP&Q{BTlB|ѯݘ JFPz8~#¶[lBUDqٯZa ۠ɓT$0"\´dxPX6΍e`ឋi>B2uwAxΨ74f酅.<+:1%QzuLh*~ڛ"Ӱ[6~@ $|J_dбM 9L 7$::GIwHMWgQcɏ2h4#[!$)"6rNg-;U:W1&e-PԼ' Bh$T/7a!R澀J:]*P?{+%"s͔@Al*`f1:=V ;q"q0HZѾ<Vi%jP}ogh\I}3P8 r *l!ֹG)D[6BPH{<6c(h4sUB16{Q]y[ '/w|!V*aS68+>V2~X&!4GXpcVŒԀEEWhW}DIŝ s+ eMAʷ79Ej>Ap}~jEsv^< 3ëс>LH*Q^ 9K%5[22dE(4D {\eJ'A@.Bn6SdMC4jÖjnQcM}2tQ[>i:> ۑzE~Wk':b 7x:<<7j@ ٚB uf_#u0#F l(V/Zu'$Kqv1]RŦ*6hs앜JG|{: TOÒ }BpQ{&h7xpMݧѢU~3(w#f~&O~wzBx[%SUn5Ԍe;~t6ǟAGF  FPbx(ԆH#gIXGSAYO%Qe$ +¶A $塝X{g0oQQR im7U'7[jQnj#% ROYb4?_ o @ 5 P~@AE#; PU8@^9B5Sn78;pF qws+e0Rxpd_#WV$[BhcT`c|6;:/*"{< jN!zƲAIOE/ж-.}3tw%@b@lV"!Wqή3. +Q]ә2 ݑҞT,~$_؃j֣׉\oߜY U+ꃃBb;b7Õ6F277/ dI8.A u#nM#ŪgZ*=BW&  "j[P/'I"&SQ[~٧"^8,C27ڝ&T&o*RHs :plb\a J AT] :C@>Z2Jc WA滭/ }]ߌ7bq_WT*_P.WҤ3byhDؘ'spbPc_;WdWs07BKWEۅuU䨝eZ+pCۧPIXɮ/-J-|Zۮ˯+ebַrt$_V/뻚Qa04? ~G ]Q,Lcx9CP IqI4z m&oEq]\LGe  6`yPiTᠲץ,TQDfBR:)TWl>quGזl*(Ì1-9/£̠.­K]zND~Ŏ6~hd{:-v kU6znզލ0T.Emt-1~y%jţHJيśQ2W@we]Ib_$ƹjg$7b:I4GtE&"4< `cca[xLONWxl%!;55WJ7:&U̙ΏQK>?~Ph>X9vn?AixIJ)z= *BA`JXT’ifs,, kz]|nZ R/͓4$jPn@^Ƥ([P.am !}N(Hڄfx~VAA١~o7Z#e NHCd^tI՗S+lIU*+kYoUR3_3}@ȵܟW,ZYEҦ =͕PSq U!rGۛ@׽RZ{$}Gi;%"v:`AdL. ^mz(u%}|VEiKAJBK T6үYbUthP)̆ QgT-j;j=$ͮdٴ `R# T/o\&Q@#rk5?ۨǁtV 1]<ސp]u\i10' NnŷW@,V{v>y=_Y}Ͷ}7^Gal68w6(I` }l$C`aRY}W咐deA|F>#d*j^!~3$19_=mIR(踚W#[oC '`b:U 5z2DUԎXwRLNBr*R%gAZ8WJՄB3whЯlt)r˷HuGQF `>WiPAٻw=~ [zl4!#}^G`#`~G'CtMR?&'n:'^]R~R H(ٙn^B  z)]Hq'F6n*iG=~7n rQAJXo)3&ɵo;e 9Tn9݈Vig%zMM{+m IyWt <l QS\:anj:NF#Sl&aRNA"n:mxҍ~ֱ|EmcbZGi GqZ0%NX!; d$hF7Vv V; k4gĕSo\jW @'* ((ax_^%QTVJ2"1*Tl,?i`~ = whhx0o}T@?BgqO77My𱱬k ~7Ek'T6޼+rl=#dnuPa5LΠn}2z3H\N"@"!L_T|fV][ ))/T\ꐽ.+5ۍ"lk?{.E:TҢ:3DP"$~9H("a2[[#>W`{@Str^AUְqR8^_!G1UV&+~|{eCl:C7Zܯ4lGɉ_;oAܵ:ÑpЫ)6 sV/λP+Hm]N8ͷp^G j"N7C/l* 7EWe8 UO; /: p[7V˻[ĨߝSŠTkg;(aaHxA6+V؀途 MP8XϠאfv:bCHwH թm&Yxlomzʙ|2[-C ٍR-1o!CZEJ;NǺȩ,{ܢ[ 3Wj\6alCo2ɪ|T%'DZD;fo9vYѯsw? f۬oD ý?c\ķH6"lGzU2)V `IJ0΋fYZ9f(XŖ$JS]_? HGꌳT6-(B wʖ+Qj\Xl(g3Pv/p0μMAeErWn9K(WASpٗ&q!,3V!2@K`So=NaZLQS Ղz}-`0*Ur&pJAsHcD: vޛj; o#NYyho4|H-j'0Q9VOW6r^ ە5l A!?Wx!8ӜR= L"4ԕYJ/cQUtbOQ9DB8qUH{. #^Րwq0 AdfX r˽JO"XyI?u 5X}[b@K{])*+ %襳m6tC)~Zh(sŭmo.O+|o gQ{YE ޅR-;{.?l2f*OdAw^aΑAQqmFhG"3'[zOtrs&0E. /Lrt4oU_ 5;^(ewS^TfO'Se',gڣ4n7!_F%.Pp@PD÷6Z- T6 6T ,El>x PAa \saWM^۝cǼAILb IT@{ +her,!lrBev]Z`L~OtO8\j j28%tܦIEcN"QPhW+(_~k蜦9M]"вi/FKI A`(坶 ]rq{>K)ל9ßb#9'x YJLs VtTJV,磗oORg$"XR}.yU>>4mpMڅ Zu"ȼyv7| OFͬESF&_-8LVaDV،=s=7Udr 1 | l*Ti,}(N~M`u%ePxpMWMSqt42Cg09(W357֟rVhTJ VNS">19'> W^S.$+<5I۲9ÉxWo֧5rPAE{fܶGF[:.dY?kjA:uEֈ %xR0~WvJJE?&cAౡ^Me]oV:VBj72RKްm ?A>65AA(kbd(.@r@$4Rdc:d,oA%| 9,fZ$XeP(ҠwЛǍM7%\&bv[* R vAͰP;J+]/vϪ~Cm},w[M܂; oJt¢v8h1VSnM ]^@Ԓ5cὉ+j5&LzQ4JJd%jgPQڙ0|5Y!fJ"UR VI$.2IՕR_J[, ^5YRju=JD0&*85χwBkT/1"JFA] 2N *hk )- &]Yꍾ0ՙU-kUjxn0J,]|4p"a7PtP:xX(=oN4*QOuoi#i4iZ׌43R7\/KGd@B=U \/?i@__;xjB亩|3)g/CEz.6֯qztG˕R$l0qpi7Jܹ3$kO >Xv٪S'~[mE JI[5!:sX.,ÕZOB}8Cn(g(=~F JLΞʶ&+iEͅ9 9*~ou.*E=jHmQg!}'y9ʠ!69~oTay4RCs.kڑtSthAd.7R Z`1b/wh&>gYGTjŷ3S]Pe"< 7tk ]:p`HYyQ!? ?~`:ZxR>t{{* alJ~8V+XlAI:1~%%;+qJttR(ewׂiQ.$fNwKed(ٝVG;i_W&4Cc F E\e݂]-W=T,$M&:nPpm}[&f#@ܺۓXʆ4< T< N4$Iy%P5XjBh?Z&ߟ%sqh >UU@Y~SX1Q5~eЭ}Qh/V,9+tTe'g_etj,VabTk3cqi\Dۧ )䟭WKt+э!4grc'oF$-:Z}j6Ur Q8K?4ߪ$]w]EtZoFR!7 \@m_IT/ۀMϾ[[DD8xblI5ϴLZdQpg~ }+OtJ 0Ic<[?t _G6L_x\i }HZ}}4$G ӚZ&1N$- A H[V;"nG=@Z\IdY\ l6p}Hm?jAyeH'6 g>^pq=Ze'J@Wj`Ë 9s+QP AI?;FN*uw1H!3?\xh]}!&ic$Q,7~ "_SKr%URP.y~KTݨ3v]T|Hh/dOKz5*V{vm oX0ۄ1Sx`3vP+=w:DN$[S%#1J$"W2q/]VF=(S"zRK[njTւOb|A VZ]fJ, d+T2U&n5I|k ä+݉`A/ExhUZOa"FS;HrM2 P>U # P`IoEnjok!kKH?I7Jx9xTeUvzR3MZ(V MmDEэ+\pMSP?>fetv(c;)Rlj%˖+AM^ J,ɍ~d#u[QXD4_2(@ yPNaW2wnh&NcՅaqyyVOTaUgwY; "QaYap3910^Ko?k'R ,j&y `ރva5b;L |Ŀ &g("LƒyQy<:,[qF迬S+|O < 9ȯ~&ʝۙݭ T^W>E29?^+j6I8~_*i sAYlj#g!_٠x>SU^Dޣsm!-pCl|]A}UѢ7芔5ۻZu`|)zOHۼl= ~FMS$#jO3`U:ŨK5zq0 Ky\+o_c~$(PSSpFրbR=sb^m`y]wR])/xWSD;ˤstQ֑SZlLuzO=XOd[M1U&>°a3ƖYkc&֣5٦ƭϾIU$EjW ]>sko2nA HՊ PX_;f$&t tӳ=ۼ17kS/}zz{ yά/>]M_@IoZn5tD\|Quۊx.o,A,kKHՄ @ ABY1a^oB'W#xaZx=LNKVF_1AtqFWt2J2AV^`4~aK'PjN G7$Nb L*-S^2 8r!КnE Pw6ID;<$$MB"=yl]/\}w >n2$ˆǤ,& J U+@h/Dj P&\=i T4ԉ4#W5EڰiRJwKۡLBbwJ%OF ,sQF݂TEƿ.MgAڧPʑSڒwHʈÉl)Q&g0 ]3O6`n<*A,{pѳ4Nor:_YMH AEr@l( cf[ۏ{C9eC}쿚p@6M=/a3 &N&jm!%X)&,* aEL3m?=3 endstream endobj 395 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 386 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpkG7Hnk/Rbuild688f6c73bb66/spatstat/vignettes/datasets-045.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 396 0 R /BBox [0 0 864 432] /Resources << /ProcSet [ /PDF /Text ] /Font << /F3 397 0 R>> /ExtGState << >>/ColorSpace << /sRGB 398 0 R >>>> /Length 105977 /Filter /FlateDecode >> stream xɮ%v:ϯ8Cr^@IQ$ ۾#bqTUDxJK}:_~# !|ӏ?W,???[*q돿|ş?_/J?㥬/~Uv9r^YB^_[F9WgnV8GWP)^_W?߾ӏhi柹~\~ w\ j\g?˫)*qADz>  ~dNi嵕`U^[^]}Uo?g3<7H?Kg']q =?3s *qGމo"}3bֿ C+h_"(/qU|^>Ӌ҉cppZ p$ y5d\?k"]o¯Xjoi \Ss {-W3e~-4֞%=jz.m_^酙"a_xQ-{[/Q^Szjx+3&ã@,\O=axDc\υKϼ?cu˜o+}*o28?^xG/~gjf{_>V}+&f3\qDzj߰w=1=?[/9F'~ ?_?oma`88#ƫdLiUd&<_L8yUͷ셛˜OWg~_~,J_MMpk~ް~UQC5g V@`U/Fp c4^ 7]틚Ok>.܈&塿-^'(k~0"zAzi? _^OR5˳)~0} T x1UϵEì_VA:?^rGl5տyM+_'pycq~_;u?~œk>}>7t`}p~_>/\Dy> [;ܱ76tN#rj?6//g=j<_mv]x78iu{jO_lz{]|_4?Ϳ^t?_xhbx^k˜ȟYyU{|O۱¹=kjF(\? U] 9ǽ h_ W^ߪ_GoZΫ\%}IяQ=;gzOa7[ 7h]T:ې?k~s'o_ga?#}0~C~|>__jMa(4=l~7u^P^'}o^j#֞L/!%Cc]6MX&]~8k+}奵-Q^X,R%za|iL՞|JGWu~-j/}Ty60׈1~quM\UA~?v%l_igv% -˟ȟ'k''> c\-Wl_Tџk{q_>o_}AxR}p?J{rD@TɇGzNyAp'MC8dž3ʋ_+k 3!z g/tŕč&?͐',nӌc_;߄|2WgyO?UܧNHIC /=ۛu=u_'蟶>1#7&IFǙu \ߌmeV{^o'0\Q,qIg}k>p[ݿJ/oݯ 2?n]W6ٜ_8=IXqzpuS!QLI)g7O޸$1ah$ȼ-W\7")$\0뗯I_8n'sFz}y+k{T߆C+^i/%ܰF"5qo<_@K"VjOQ?ΟxHP?a|o[ZukY5A/$+ek P􂺮/8zcŭ=m]ՂBpe4/<onǛfĩªכf;ހ6cy(~;v=Cޤ%x_:n{{F&bɴ&u&Lwj[ul_?'2' C:8N2c<,R ile<*kyA~OgߘC;&eqYdw\YffP˟7ãrh:oT}'?˵+|#LN/kg֟xwPsq>ul_?t 2=_>]e2>'eezEv|ê_Rc{5RKhC}^/zgg\!B eKQ=_JtqU} 뻥^xrm_Qj=RfF,s2 Jy$إ'eqwOP{ O8}ST~W=(x->m0O4SEG =y~E9B{ wЁwtKLs>|9Čjz[ v()vГQ?aݙiHfcǖ SICl5HC5pVJ,^Jr83X.x/x ˳猆#z7ڵ>S_#f`RUB>{,pV>WZԞUշjZ^C>uF&ZRMTZ__i3~Uu'MSw8Glc|4۳(Nyݞ -(xp~nOBz"һA'^@}$X,<.rn<>c^cJ {-qU했wy:qVYG3pW˦sr9n{k|8>˰vq&udpY6o53mW>>#3df݋Nl0w߲-;͜ݤ3: iۮgn VC_`s%&US:Ҩkqð9ݛ;:4|5}6NͿAl܇=Eh7C['@P[Lcx5w>yMK%(Ykjf;UНtNA ^ߜp K.l~nANF>D椻p^A+]?V%ͻf-c(Ç_Vȳf{Hy;~Oxw춼:;,b[  {M8l~愠a/Ǔta>^Cr}v FܩaN'8xBr6 s˙C ݙ ܌kpv)F Y^n7CHX+y2 #¹IL8݇h1{y8fpd5Yo~ko=YyoƓ̹>&F/C]6e{gn٩bWCp֥0rfLsf|!q*C1yo'=ؿ6g=9qh ]3D舝o&NWq%؝Dz"醳M^~WLmST9|ߚm3{ v mzMprfMͪN:j|8̰E: Uxo؈мԀ]Z f2gnjҁR3Ia <W2?\,vO Yj$ᬲ'I K Nb˻6tƖ})S+ҳHvqڙTi pU?gsOr"hXof~[4-"JG[!ÚY8, ދΉ`GV(@ ے+8߉Wl}~@dNz/(Ѳ95>#oeNt5P&q 8>D^OjoTځHw\iw_?`8im4 UQniL0aYCvY ˜-5(ꀳƨy!vs-yVJ\e+5gA~c gp`]\FEiOnP:7e]L RܻIFq3 PiN"'2xa Qރ8 TpmTb]f,`"uw%h+Г%@ՋmFt?BBּ|L,ޠf%)5?Ozk%TNO""_[ΉmTϒGB 2Fɟ$R>Ja1<b7ƫkM%E @|%‚~xɇ%Yɞud$N-^o S]Eec]D<>ܷ=_WIL}f9e Tw1R#lB}NK^׾Vނ0 m&'cN>1]CYwdi }&<L.-$k͵2 rU޳7fT|q =wgȉVWX ͐0LaTL.r"0y \sE%fIf!UE&4Y2fokui*&$5Gnz>INTx#iA_ЊsRk1%Osڃ`֐9y_ #2)qY=q7l nU*B7߿axV@PV:ٷ`H:Ww$ oo~ EwzI.hke[;GD;WvFţWR^y*kFjwTO9Z Dt٠Uh]OpL2av_RJ|MXwB.cl=ѱ/.Sl)DKsd5 Z7o'(;r^W1V>oZ4|w\u^SMg3!z{u<τH&|`ݓ|ˏGqy+aޑAupK] {&Xp׏֏\x7\Gs:1͂9)hh+AtD-h5ŝ&[C6e6GKR躨1UA\ ތMgH_]ݒa~wLӾ(εYɗwUG8_U-ru~xux} GdCkS8:HŽ,amWxmiO#e^Ӂl|ɝ׬i#Py"Ln9a'ᙣ) zL0ǡFgvq`jC'|ԴY!Zdn_????گ?WOd H_|}q} Nsw3z?﯁':j瘪?8eoǏ׽ h$/&qD#n$(DDg@| #?#~ao"-t<z0`VGɍ$G=9XD>+is`Z' ‰IW #E|<2B Y"\ԞaYĆ"ZĻYĐHC1^8M7?jD{4 9r# !8*ѝFYAHrg3HQqu|eD<`HE:x"j'."tJ ?ƛ^1ςp K| ,"j0) ؙ''\a. bF&՟4N%xe\XFx2f/! Ve0kH-Ca.^o FaaS0T4QLdza{ m}gMY8t7޳0f>?T}w{~՝15~Q]+-nCY }@_NSӥ?2h_0}Y; w ?vbJnJ!69dfB!^'®s\v:SO9V?>+.\ )a(ʀ OxUQ(eWcgYjOCcq!@kD%\o|ozmp5k>f9k~T/,t ˑ 塿CD:|+G̮|~\eUxi?@^_)\'e%ZNPUQluK>771&5wWy $WU"̨X򦖊đ.c`jI#ZQU|?C~\~YԮ# gh{/~h] T_O"&:/, |_H?c3j/b'HS@p}cm cݎt{c~dv}0]ɉޘ B\ "+*>AIm0uM |z7}Ej?6/ F> 0颍GC?k^ %\Lo8qFque1bXtRzBC%NY_9I;? ?ޠ9~ߐ?kKMѵ^"?u_Xf?@Ҵ=ka~?![^^#jЀh0s|@қ+_>hObR9?(:T{ jCߟHoIZ*p" H~!Lj_}>RK~~OQaFHI "IL$R>6EIҦa e L -kG||F=nj3ojDɗuG!0#|iӒv{X3q e8$c!pS4p$N?5D>0ݨ{U wv?4 ȝ Xi$.fNo߯fu>ҲU2Nθm]}$qns¼?0?(h}B!q<QYA(kՇze s L 6}Q%sO ~&saqoDypj}IRG/WSQ:Oy[Lg:&KM !/>p 85|> РtwyU4Iu7w&8_LDfax F{:ä {_P*}syt5# c81=f#XK:pa{&q!M~IJP~ex7<=>8 ξA6kAːb[q,˓NG$2髬aŝ՞ HYg?߁g砟^{C޾CxhX:*I/Z߃uCn?U4OYV1jӄs;c-}> SUy]1{w>#_eͺ 3lվ׉4w7vosz~twkeNL2UZ>)HpO_,ˡ f,c<⾯Ao DL)jx~7.7ATobH$ȼ-o~qt #[L(I劣q[t4u$B•[sz-Ԭhd}m"Z^۷z g>xwƨW`bnFYUM.zw}kUhi]1/X W<ԟ9ʟ|U(gJ7UVۋWdc}lU߾vq~Xyª\~fy?|^^;O%MW׬^quwߗ|mJb.ޖWl{<@S~ٿ\%z7wnth3b@N?V -N8;$$ {e##pB:iCIN-@ >N0˔{3(8 9I.7= G`e0Wx:O\C>ctN5=hEx_r$Ёw:^a {ci *C}Byė1q^ v)vГQ?aݙi8v #>nj[w5#xk7S_#d 6 JV,>{!O+Z~ocCgS{ WWG<\x4P!V35Q`ppBy]Hȁmo&i=;4{vhRӐn I}sb9_>d9'DO2,y8[ : :+^XFprv0Ml'zM-O6dj." r8oebYҋdZD8Q[}SB񼽹􃦄^<狨vj[GG"vV1dt]vIʊ^7b<9 `ȳ=MpOh˹,|s<ߏ< WHjAxB5ΡiFt}/8~ qeGn<:=1Bw ȮN γǻN,I?.o`;exNן\+)u7l0\VYܔ˅|Ppv xo;4۸Cc< -.RWppޟ5zw9x$^llXw~` C눌\Z3;z\vXP7w8IUWVy{o@^vSx4|wroϠ>n^aS_-@/p.w:<0MZKJ?dHտ9/*ANXʳBF"}%n%&5`~{oc:gڰ7!GWDpip@we)Mܷv99aMwF3` Ȯ,:u8t7^dTz,t"$7)Z\nPL)ص7x#& u<4hM4Q pZoz VU`=Ɲdkh4ppP5{HD]Fܳl#)=ҝ?&8q{YݫG'$ @T+pF ]91KG=̡6t 0|]~ 0Lz>JDA=\ED#wgdS#JeMN쮉bL*{R;lYav"8]g!OJI\ e:fx+ӄ Yg{=gc+84+ƕD]U=ЌYesvF*n[+3BgxL =sBeL%(A WPr$:*I={ :r:W=UUO͆H?#y>W,b5WC|K-{ܽ/hgaVt6F 8q V>T=ӶoLo}! )^]-zW'Tx"WPxDc#ҕ+lΪDc)6nV9o8wųOeW _\mm߬1Ԅ>O?xGdEՄ3^Yl"5bC#(.®{=I~{D fW= = }$ Yݙ }8b5+(3؝QFӾS=qYs_:}wY ^\_2R*l,#3WFFUY,# g"UWFi)#2y(/n ȏm(?J:l2IyT7zTG-FY4|p  _2qS}:zGZgG']1"<)V \o 6HweP{FeG@b1(HOF SBUXF\) 1JK 04xwUPU}`tDuˈp^A%E982RU,#E|XcetD6+S#BJbY,u*rdqB7"N }b _ JS2B֗22?YFFWT622mYF"h(w0࿱YFb9XFp7Ș0S6ȁ>Įc/*6ܶ؟:YFv9XFVo֏2Yw{} nUd0 Ybr22.|1ỵYFfK; bϠ>UP,#?UP,#QK7Lb9AD 22aYEgedfQQc,=c?Mb86+f$sXFf+"b̮?"Lxo 22AYF&{7DT2rH߬_f2ed}PYF&(%dW,#WuLD=gr` 3vzLeQ7J6MedBYF&X6YBAe9?zI$i°˳eZ22YF&o 22yN2B22h~iE"=h50tef~YFoeDXFn'f~YF| 7Ȁfa2BYEl}>id 2Bg2B22 m֑&xm4 lAVp ,!Y8F XF]LV%M7C`kPed4dedOL. -ki.]fY#}s}22|4nY{ʱ=d j/ r`?X3V!H!XFn\Y_Dd£KJ#~;XF -n'ߊeQ,#V?hJ9$V;(OedWBwkvx,#6k,eЛk G{ 0oedlwͱuj /"g_ˈ>b`,#226l?22}p=m7622XEG5`YAUR 2{8 ؇oed fyN,#c`amE7:XQ22XEsFtSW0b]R~~/Ͽa>bK~~$z2,MP5VdŽsDG,#,]GQFWIdDDO3o$'Op`IQY\x^ņ>eΊ%w:u]$DdY׸DҫbRcbD,ˈObYVgea,Ŵ%Ȓp,ҩP,#&MHi/혽 b)6?b2}mTŒL^Vec00[PDX̯8;m%YFJ}T,# xj7m,#1 ?p][,#tz_d)@tH' _%%3^2ȏ4)n3hB"fYE'Y2R*"xev-wxcE,tد*gL'T^J>51>2b)YFJRq-t:*Yu^'KzjXF lR\۸6xԟd!I_bSO8R_ZUxUԋeda0,>zRS;xT< *sy}T[a܇2b_UpF8?H,# 7XHb)YF#Ci`!ˈMXFNԾy(.y&A2bfQe#YIt,<߁F3-=:i;;1fa Avv=C^W\uڞ(IBѭzP`(bzw'Y5Ƈ7OM4T҇Y@^d1 ԸX@ s~X,!|$kȞ^O,O@,| s} u|.{|Ao3- IN㕎_2b+,2rXd!(z'T J΍z LтwrFѢic-@ymŐep,#.9I+|o?#}X5# JyMϽc3掝{D5}Sv 2I+jkE8~_+UD#kzPg'h6?{=v:\xHd%'z'kO>>/cN5uNt`X-ˈq$,#&ˈI*bx\1Y;4,# BД>2b ;U9V7Ͽ77͚`2bx,#z.O ,#yeNW<VS\\e f1\/o,)=i,2bHIfpLˈg=XF4beD/YtXFlˈ'^6nj=O?{Z2XFtŠeDzH/VQ?NcN@W"#:=M++ps*\*2Z#8>6<޽s;,{Ȳqp%wY`s:M|^T2QN/y!P@AEDrO.I;S,+$CpvӗN?/\ƙ2ږZDƭIN9?&v3dVZHZu{(n [FUt8ئl"ly!ѓ!!>;@}sP{yN\!Ǽ\fz agi /vsG{ެ?F5SF:C1#K.s}̹ &̶alr3^}G'z\{!sȅ>^nh7evM¬=M&ȯ]O /ٶv͛ N+0 [ U6C nNn,Bd#;Ƃt~S74)P!{?qkX9+kvbDܱ>$re {$S=% Nwc&6mgːC_e\DRw%C[Mć$jN=!X]mc⺱.pt6ZEht')v agog(W'k&V{(9vE}a&0GX,ۂ;&\apCi;`eNur:dj%5GSd~A܂M5^M^dBS UjAU'E91brX}nEǵ8NrYA^1F|T \𣷍6dMjC(h c儽nX2%2`>Nߚ2X.H!Tz)3diC+lP51; ~N8:hҝ愎 ,FDlqmYMj@\;ذJ jG.}+r.KR;Ĉ}"CJvIabeѥ3\i~r2hi'oQ܃S}ӽ~B m5 00oVmб1ʌm7])=nM[˄-8&|ܻ bvw*B@q <ŌqgRd-,CÛӂG5y \DW$RGGD g1\=1BX*t ds7A`\]Qꨠwo+DyԊeXz&BTwێhxn4xp?&2\UfuGXct?*GȯT lHX:3vo2w"ŕ,%X?[D(wy}wkĚQqNV ɇ팷9e_l׹%5yb"WH!Kc=beøތqit'ضv.-C#v&1 jrF6o} wNe^\^>7婶ltiI {tPVJz<3zjϻlL͹["b|m5`2NWD;b RqΖb\sz7 -bIs&7׮滜{QiȌ #%C|5*Ɛs)pMo:BRqkR|^XQ}cf{"g?5dX8Aц3 Y)j`OF#+:bNfXjizC/.VKx4y+EQ,?#B"MVMJ#̧ R,ܡ-(V+ XQJpt@2R17KU?>`"fXxnQW ^R0X*S+BlVP&pڬg܈Y~Vw*6BLQP:MyL>yȢ¨. AWs$!+6uj` O` 7tWDg]^QE /'V kR}%\Lo81Y)*Bbf6+} /Yft{ʐRT0oV 'Rh=B BXy{2W\ܘ,RVw^QF9?+3 ^X)}$VOҏ\v#˜R%V +,aijqڟJaQ c ^asyIKFt<z^WFS5|F~'##w~A;SĹJ"TV0qs|6vD?#i>tmx`>5;Rw[>OIJQaS#8.̈I<_bb6D'Rj, RT1JayRp p.X)`XԎTsU銩p#V~Eox(/}cg؇B?-WT?ɺCVQ ObXxX)c3&kޓŒesm" /hM+>JbI+ gHubf8PO++_:=chU^=ӊ+ݾ0D,_K0_*6R,\aՅX-JXpfXQ;iGhˏ Yd1d˛ST@}M]_"ׁ6bX{', uP JatO d`S=m_yqo~QX),ڬ?RY\V{"CLߠ߁u⌕Mt'O|?6A&ԧ 7Ӹ\i?=Qw:>̺TJpS[d6S}k FP:Q呅>>YZ>a_V쯇5< yX)oϨ_p7蛛8믃٘6dW,,IXJqck/SՒ |2FQM<V zMUVJ&Rp.gi:pzRʵ>qׯnٯ}m 7k{sr}lL/Š/V XJa: FY f3Qb_N=t}1lV,*M֏7ڞ!h?VOo8RS+nY)Gf3{W\ut|6XTu> o oN ub͐=c(Pғdv, C2O2 |MX$Nh~ WRSm$t̒,[ET(pLgY)s (#ڱw}Ύ2%ET}+q#+|gsacq ~08v|8<0mqZ^zebߘG Cu+K=_B{\/Y/Zo}E'|mb{ȟ0{bk0y1l1~g;` RN_; ; G b޵6"c{dw7vܰhQT1~wNǶbJax|mVnZrR >Fl9s?aΠTy;fޮHևբi蟲Oo&+D >a57~hdPH?e'TSk z#b,/\3Fy O|ϗǎarxmP{[|1qǨ`0V>X)*JavPqCf4X)4bB/VYr@l#Px-/}R6+=_2߰Ok`*]+7lփp9X)?BR~Re|^V:JX)$+$b:Jqdu To~Kt.$^ST$yhUf=67>5e9A|Wk2%vK3O<'b$pU OQs؃f9Nuޝaxw]S}_#)sv#مl)E{` >Nbk kǽ mS>&g`_Kx:}K}LD*Y *vZq+1mi߯!Nܚ6=*!1:a7-^wOvKZ$3{52lOM["}{i3Avܣa,Xcjɉ MR찷ź :-y#z(a&cT<fmYZ^R/tR@rY4mev7͉̽MmsY@\?z@foj>&6t[7^n\!YC9xKm8‰%11gjMzCB(Mc =l"-KN hk.;B0W6{WXh]\MS='>6并`7^,LLugd=mA%F iUC]=MHXN0 w<+,*ɒtb=B6p8k?5O#O% hV҈3Dr ~^=ȉ w ;nM;?cDy6`hus: &9o9槦\yaJǾs|=Xݾ=="u' 79Wٞ\g}Ӕߚu q L^~n}1ыMbDEZ=,\xs0l^`otiTětz8 ǕcZQ !T ;N=hs #)(Uj];[ .j;#DGWUY\kCP!&+HOnQݖ}i_a}F p [7B8uŎ%L!뻇bmR6.xF1ojqx5ZPk!o8^DvsAdF=qV Ș5bУLiҬBhn㯭`ωB>t.-n !Ą-y*4s9q&mK3QV @LbTAIYo"޻ ]Vշޘ}hUZ*ߟ+H&MbHtO0*Ai`RK`z ܔ?NHT',P5EԦ!`T cS;[%j;,UYk+.sW FgK8d{~>U[n~n 0Or@pRXI)_kv,~KTN7N5V7pTp!UcR {hIgSU_E|]vACR R>~YҲ3 8lr:wl^_ߗsuenk W:MEr|W̷ѷL<ŠY'*_:>[\ChM7ƯqS4V_ҔꧽDRX W`qqMaN09#^@+zJ2``g}$ ٷjfbe`_n8и48췮 ش:u撣$xUjFFD5M@/ls$_k,12[ jsGGxR#bLBG3JwD#9r/ 1 6!~UzeJ|˦j>rc~g {_^sdg>:ߐ{f >D:sɃ寍7 _LohD{*>1{Y&ҟtԑ ﴖSbHbm;[L|z{̇8>|x_xQU\z~ϑ[sQFo8F~]fo#_Y8 ?|s~3-@:W~6ɛkGӊد<2+_!QX/)dz0yr=y#?2G񚜟l~{od>Cy꘏t GdGnd?}=e~q"7̯ #milqlHeO}}˦T/3}Crh޿e`~ҳg?;W+~n/]Fy>_,V>a2+d_l7sL0.O\@V3[<ٽlXpG쯳>CyUѾYɏ'dv}ϳ|9v6O{VfZ<&+;Yo+]f[1[;#_뭛ׇW;+<_XZg^|w믗ﱿ.'>\~s~]v]Kp3yas;_Ny;_jܯ;3%^s޿wy">*Ͽv˯~1~vv|[rد3Z{IgwD6#7'Vs~~ߧwҴV~}_>-c?~]to}o5Ot|2S2IsNnw29ɘ_\ﯩ=fxS^RA{+ﲩ˚'nog6թ~krǣ/_3%?}70yC eәl3[Grf-17uA?]v^[CSQh#ևT r\,/ly`sP/Wp_r>sq9HLE0yl|%vQ\8߭ ]c-.Ez<3ۇUNmr)}48od_}E65gGG>>f{v0YMHv -O>(}OAh+Yvv*7^nՑ}~<}9Mc?orb{7aOW ޷wn3gUߴ,X g{GrA;G9m.Wd'ymo ّ֠z3zx sor /|n0aW~gj?3Ooٞ`1X:k}OR68y%/?y8C3[oA?!أѭny>ʋba:rǿ7/+/y꽼 Vٰ{dk?CK7؞>8p{8>yqLa8 O;oO[}gvw{Fl<T_~tշ}Yo/W7WQ).^IrVފ^ O_5/nlۛ ՞Dw5=I7vXq]5MFu5>q#MhƵ1θaojό\j vnogZx{\pq" ]^QZo2Mpk@"0E oHxˢ7Ԙ/]F#6楑KѾA#=,d_->?'__)]o@Aq W/oOAzh 6~lqӲi6 P';\!/(?F3?¦楏N"_6n: թCBLJC\ :ӣ} ]:D6} *|i_)7|?~H_/׮1ZO oBGX2:;9s~r^̗*S ھ>dW}?y׷ Ook}cM7txƊr~/wm~}c2]Sчѵ܂OPa{ݙy 9+_uGeKwxkH/y 5/[,rͽn鵾5?{m%lh￷[p-qKֱ3ɡ_ mֿr d&ֽ֜=WwuxMgr<Ǻ)yo+.ʠbgB_ Wzf.s~|߼,T3wyoz}}o27}Dzzo\^='Q.r2th|iYʅmoa6̇<_;e֯r}orc1_r/_1~J5ڷy ;x;ګ*,nHo2?䬘\MpM[X@cS7frBnA'~ߣ<`g`D${6wϾڱ~=~#;OCqdc}`|剰~+wk=2|GXRkNH14jmov6^*h)t? &i⒵fO5bQ.UZ\ќ⛖@}`d dUqX38=HYn7kԶUm~{('s?N8dY`7;ǙڗXB쉸z <L8pFɁ-,iܢAxsn O2\\",ru}(O[M`"0R3=Emic.!$ZRMb&;:yKDmQ!V%DJbB|bS֯fky Ŧr|+bc%DD,7?X bYHijUO" &-;^}R'ۙpN|iEJ,ކ-U_.N*'Ԑz ĮN87}MūkObdž|eC,y#mFS_Û:sK~[H-Q)7RogPܣv9",, &wqehz/zW=:YuDgK{VPe~}=OO5MTdQۅ(TMj /׈S/WALl @m; B[:L!wM{J/u3Ôﲯˡ39d` >qʞo@Y*r~RTzpN0\e6z:mT+"Sڻ yks+]r&4%,h}Ԉ„G(q~fDv+/ʣԈ3Y[Cx) l=Q3Vl/hz'N264|f/`MU&.KIm{áf¿=qHdy(d;;(?kE9"Z}}>3..2>&f,A9֝ĤҚ_-WzM"Uf[ Z"~ڇo# *pn{^?l< [FA vwop+ s$-ޠpRgDeiYY-O\є?0 ?j z"w)ms(j9u|m`(۽O8[\Pt V2AL>5y 1~5O9$H>herCXRmĘv>YK+od2Q fV}>fAfdbZKe2 pG14$4r@_S7n.a+|WRHzB0#o ~k_dk!w-8* @?ޕ;Ď 5owG ң֕=t *=kߩx)Ðwy7وWv^hqѷhoLb n`]̵ 굱SƆѕs= m= O*!UsG ?,λbb;qt}kQBfMIDPJl?POvɖ] Z@v"1#V](SBȅ*/J!ZlKԳojt5QrѰ-ۇ LbZNjg^I֞| =؅(j, u,-%R[O)dL~%q=}2+(C&pIoK : nH";~!xUvZfX^}k#>KփrKS{X߿\ih=}W) LoEu;^л~11*4@ZWۗ~o0朋xfI|PE2mъuB]^=nlhZB6㶅M绲vK+Zv^.$uRan)?*rlsw@GsPzצ^G[:oNfʨ_} |.6> Ʒ Bo.o'ͱH`eV|) ;{BՄ~U3WXR%+4}H2Q [|0&  vJGY5hcԗ ǝT1+I'HXb%i2ՈSqP]s~tkfwn/\< S_u 0I6嗍ѩ`6!+j#5.ܯ$Nc_{?lrk}WG:&t5*ۯ_a zxqA} |Kt~raBY?tgb支 */;;VnaV6QQ`!#~682q`*?r ٷ{)a w.PUeJ_Nyl8ٔ׬+sl]eC407c=J+WmSytRߖWG QTph:Wx@f{?ygҦSiQ}qUf'7Sb%DBꪾD˯Uh~t.*U6oJD`J"2`= R{7WEҜKDrK"MgXT .|CsdJ& sr!ֹգ?JCѥ,˳NTCΔi>?Ȥ [`mn5Emn탯?o^prTz#fB"Z[_E6:}-^S/N.XgU$S[0:pHqW9&kiɪdu.N. ,hrqҌs5bv%Br8@^EKa hhص1iHF9&!: /?h`h%09^N (h7؍eon9vfo;h7h XfsN yfH1Я54G>ߓϵ4Gv &#ts 澻9>;aFOnNsK4G'h-x_] YG^Jh 9t[fF㨟[YO[} b8  'BOJl #;Mt<=?tg'-.oA4U:?AO9Aa.O7<(Hs|䅷YCn/Vt3;ȽC{avwG#w\1_=.4CIs|d#oƊ8극^7sx#;.&սW e~~o1Hs|+.Ye}9rkx7rj+ )3\cY8qMyB-Ѽ[}s#hgZh 뗐~A6Ÿ.gOow7Nk64ʱ_kKs$ͱoxfnsbϓ4IslO?As|d,;<Y~EmWa}dgB~-\֘kB̺ZS<t-josh [by xnЖT~Ł@6_fW7^MT=8>V3imA9徿˛Nw7j߁RX9^]Թ^2bx6gW%C8i3Li+ y9@y?}/iQV9,im=cلtp'w$JFZV ݿiҝrW+`-EI؅(?O? r<ޣ`G1~N}<5߻Sxy]~Mc~?CF iAX?!d5ߎ}X/)G6P spd-<ٍ4ړ$!Rݲx/vw"i h/hwfT/(\<#hw"ͯ6IVP>%eLrp_Hs\:-qwMW%quh7}ᄜ?4OAApuBabǾj: 5)]q.RH2fvј#8U^xh>J#w'(tqAͱN2R65Xq$ͱ8/V Bu\O?{Kw#Jnb|_D?i*`Hkl-r}W99xHka1} З}o, beD9ӡt0gX\Iߓ'\.}HauɍER( 1/y[Hsl2iW9E{llFiO ̛M7}x Isl2B4G^пiNs|d)L_?f臡dx!~y{~X0IϨo3\^џ 93Tן߃kƛ4ǖʯ?As| `~44Q[a9n?Asl #q˄=CGڿ9i_ё4G^p{n|"MM祷'<-|Ʌe⼛p"qnr#g2oyo'w6N+| ;q}xEOrb$ao^~tws=^o7o{5~\}>a {ON95ؓ;?^j=;`Mle?? a hq%ͱiDU'L&µpZ:9d`77WE7oǍ54{|16`Zo2MpkCEa:џۿh[|"@u;d l hQ~oqn93/:IFƿ78 BnbŠFcXWJL)D:XDF0_o+_3d6#1CKD4:}渹!iG;g.9l8h@7-ˉ;VlP';\!9~eh#1} <K7Ӹt-E)Lrᅬ(AE~ %߇h~Cب5ڄf? :Y&~ =lxIf}ˬv$=ztί^X&3?]1_ګo1wh_̏4O'xԷ,ook}c ǖǛ6V[~oߧ% s{Tl uo#3hAy%NP9]? |[}`e@2oNC\B74wyWYq˯_rC} c3ɬۼ_ YAcq/R$^"ͱОޞƽWZE~Ho-={9>AƧ3}7i;_Qo27|/A;^c {NZWM.}H/5=O2iV̧us鵽)?i_VR6#O߿4Ƕ"iLMwn 969ͱ}]Ho2wZAVyQH!96-NMf}ϊ!ͱ'hW(9Ѿ>sZc)33:>h;Ou&+lOA2MFy1?㽹W ^Sˈ$ͱɘ3|cŎ+j!mrEGȿǎ71cGrZcGc^ŗn47 `ǘ'hM~4Ylf/j2.Bg5ˠEA͘jIhQՈGFUIZcUiP~t(ͱh_D8`N=/5&G koc/X.׼G[~.&,+ N9J^=c|)[,w,4Q޼4QF8iӛ)9>Mrz}rDV9y4'hq #NkZHaAT :~G<.Httiw34++桫)bNABUj9L=u1k$~8=؂'ңʗލ>`[y*S'\ξ 휠 YU >p9v ʀ%'L+ g ]~&Map-^EAL,b-4qïm)xz:"Zdb^&p̌Ӟ+lx gSU<6"d=&h#Rcwh*s݈cYj?*˹~Lo 3^eF_G][OW()Kv#kSBbAXtN\63{dyWּ^ lq_b_mk_C_G%zSQsUfMp]8_CgaW- {d P1gF^čI=Ԥ̏)@,v"Y|K}Z]Ì (56hl o)hg~]q.Xc2؉Վ1N  t P;$ڇ Ȋ1ҋb%Ex.w0K"Bn?wJj+ p'p:wGS+$ζimƖ%p!wf+Jdm}PpQ!|rtC;!E"֧' Ǎ `jڻzg/{kPΦ+r 9K7o |:Y*[SSxH|:^}^.]]IP|(GRo@H[SpDֽqOx]pIi6,H~R\R[N7xΤl߇D7.gMfRY顢RM_;k~ebRkwjr)եB}8ENXur@#:Rl-Y1 K+ofOᦊ En;@(AUv"F,d_QS@alLP%H7ڔ t5pל Uq8D xA]K/'bU0Px-9*#CDc$-@l@, aC60$-iF 47񘙉6PhGl 8?ŨV9cɝI @i% #B|lɂūuB<3N܌K%Bu7FB+lV NZVE%}(N(rZ~$`Ja|*\ _&awAm˟MY13)3 Vv_&ŊU9"/҇ _2k4յԍ@ Zi[lp&H &EYfBIRRrʜ҄R%.40!m-^)򧩕ݿN~|*s"_.DӛdLru3tH͢Km=_>AHq}p{0-/_"QGvȩ\I) є 07HI5qU ;LP 3܇oEd~8If9HwͯUDWF׭k -i92N.52i\>yȾ&:~82e@ qSi_ P{F}pv/3`?Xʤ'qPpnHG)><\܃X;+џ`@3) 3n< /I@߅Wm0Ю2{W*o06yh7y"4DIs{pa%}*{GsO3A9KH!/ԟ`źq"БۼF {_uy{[0-7ޟvS7K!d2'~L_Gw[yAC~?5ؓ;g۳}ƃ6q<E!(XdZ7djx'n\[8'R7L vo2n9~2( )cGmL桋{t{db}2g}h94~ìs/oE'Cn.`xdR G5dϟu><3/3-IF?W_P',wnVz _LZ_Xq8ÂymfCΤA^wycyh!l2LhM`+!vZ03=.q{:lS0 fyY9tn|>%fPul *A&3Q)p4Dg^amȗt >D YΜ_,?~>_uP%lRE!:f&ׁE|~{{or|?z?2QG.o:lxd.b}|S铌\پ]^9;PיF̗*S uOCghU/^#F~^#$g~_#Wy/޾O5K5s}^\gY;m;Ա+2Ľ.Z4-yga=fSt0dL@#PQ0_e-?ȿo7otiMNQ[wF7ᕓWxV^3 u/2+lOkJ!C߷r}Oq&Av&d/3?g dh_i},+|WyyA& /v|Og \QU~Ď{#;3O;:_q2!y0 }|'b|&O;F?OM|Gb>Cqch" #5dRrC~qb]z*hD 8V.z= TI|O%W=a(, Z/QL_pz$N*2dOWk>_]=CwϗN=5Etf*-E^9OS!oot, <-R(j p46`フ YŦXV"RhB562շPn6+[q`oj.S-ŰdB q >כc&k'J"˗ N؁v]q*P} |m棞%6$e[%:µe@n:QRsD/NPۗ߳&ODx^Kzma)hW+9._p^F;ImKqE"%с~LXR.1V,] $~.9*6J jSˮvžToA.ŽW01= oV2#P k/0ž&dm-$"bBwEKZ~9%P|P)P&]W_ qDbvHYZk 2H؇=y*<"_]i? o h6_*5p `_lJyo=J}ƟD>;y!OxSdl\}\C ύgЁj,RM_AyB,}GÕ$kNgD5aD !+JS['}xI>Cj7م0RܚkSPá嵀\[p"qH$'L‰cr7iR_)klAC`CW@,Ej~+_GKj>`d_l }MJBOTB(ƈGb"9>\dA=7ucDT%Hg([U܌WrTk~M\l 1OM K:q)~E ^=k=Ni$OR]މQUS !Az}q7(4/ w7 |O{j2cSZUq8Z"8vرvv"i}—D`!uL5Kv.]1 shF׬tP-g<"Pֈ:~(q)ĕ!n"پu“.f?)*x C1w uԪǩb@chP& e9t3db9t̟稌LyqQ)Ae q7Ʀ H*p/e&(/BY MsHDcy84-nj4vEi &T]1Ƿ _2.OQfW'Kl߿ UR:l5qN@ W>@-PkũbX h*2q3O[?֕Af![ |^- hi"T]r&hb Be*#NKՔ8d |$㗒y φ1HVp)T ,j<2;L(Eso%Lp"Qd]Z7iYFGd-0Eupb}:x@_Qͧ;!5kι.poļeVJ@ܤk'oe<2B\]}al> bt8$ff9Tծ(ItuLSGGw2^ם?Hj\:0TZ;I4ծȅZwŜ 4B]+ؓMz Xz+L>|5ۑ@1oW%婿NP!D4D23 ⬴ 8 4V*Ư2œقc.W^C]d*\ +w }vAԥ??d+q[L‡^ BVHҵpPĘjJAY R|]}zo^yLY$7Z}S}9Y҇tSf <{I2 YikUD??n&2\N.ȅ8Ba ڱ`/2LV+;q"xǐ 40 OnTx8 * cv?wg:?v>qU$cw6N? \%?vh݆'P}؝&^C`!㑓뒧~@Cw>e?;p cw+Ow/c~tfޑ?N`6*/;l@!Z꿒'^5 ;{yP.@r <{d>Yˣ yC^!WyAް/:Rᑛc(9]vA9rq?CSf9[?vc-O?y 1xd$L_ ǧl?N฀2旳?vzB9z}Q: =xGG_l hrE&-3'36 ?;{sS?hr?(?7ys?X.zAPY?cszP8AK]{h`4/ֿ`}g6;~r]_O7AMpG3?&`~OGKǂ<:o iUMf'硍~IG-˔~?MG/ds<;d<Oȸ28Gǎ8`{Tu9cGc?9=&Ku;AFzs=`uUCEpG]>=C?h97;h)?vt#ocw16%|vvw?[}>~ O oddzBO. >~Ky:1\f7s@d<M6xpX_P Vr5CQX.]ޜ/@:'Z'mQ&cױ[Pf62 8؟ = GKMNyo?]IG[xk?hGaEg슅QB։JG=Fj/@˾y/?Zl?o,#'`k4O߼.q%h~c{~sѮ^ۅDn_cx\_QAgҿw:v]2+:߯`%Ͻ>-~?.d~}>L]tRisb~FS%qeG?DSg@rLc)w"鯜keR X!N~xHGBa(7mr?}ۧpL-|xE:|L(eo8eo5`ڃH I9~7~?ݞnώ$O77<wr 7@D&S;pZ@;ZhxHj ˈYtxag{49K_|Anﰸyҽ>w7kZ Hk/jK@=a~oo2옻ۿh\|c$ۼMv>7_C_c|/,ny엃rAz?M2S]n|A%j<||B//r~3쏴 a9k]t&B]6Y1owyo32d3h-fZ£oo@##m0d0eʀu7MrGĈM<@!ul *glZ{6lʹQ']'% /dSo: !'=G6HuP6)h^oԙN1tP +QF8:v,x}_# #Yr@y^b|}N&#}_^g1ҫL[_gT?lOCί%ٮYc<6m<~o7oԩv/kb=GO/>cqRg;m;z2G K?=Ѵnಊ[\^or13gc,w-M.?wk~[2oF֞. d)/m?x1oo5~l{Ƌ B{&Sk{eg#SfM6[:ǧ߿unl&W9;cgW79ZcKF|ޘ_嘏7cQ!/Ο2s$/Nzhoa}.9_7lI?i1?O/apvQnEG?_;;hrL?h\>;lxM`k;o lwE"}xwiɬY1d4n'#d)Ѿ>GWئS({f9Cv{[7=쑅;˛_+^#]^>w'Mίr#vT" ;И']FXoC>1gҎyc 9Ì퍷[{'r&ae]0129"5dcF^lq&rRZCh.o/].:=oL_ho/5?@x.dd o2~%GL [X˫9Cǎȯ[ !#?xil< @gt"'l^g2rAOm/xqoK˳ џXaye{d^-K ǻ2Arw 9"5О{ ۫<GhW [ZD'n8c`O?ROG%ۉOϨvQk /5=#5OBAeg8 /'E 6|G3񌏴n6g98&$>;wP-N;nNϛA/"q7&tB #/X#j/ (!`9 ` /$P㍨Q͎u›۫vqHH$CYBj0{?:a9澲Qi[UڱD`3/[ͶE$-n_> hv z|R$\FqR9aFƚ8I YSxPDy6?Q3{!vL%:1LWG&؄ 1l4E'&<-H!DSzg֟v"LLC\Ob뾆/5Bi,NWq1vL#47Fh8'fbZyXp߇ėOȖI/jj1}ffє BTٽ +YO} 'b3?!x~yWu{d2qO|\e3yV'r4ǩ|jpv=Wzola|#A1R IQk{BḅL'CպBIT }MtFXŋ~bY} |>kr ~bAi}I/T#*ó PVNd)fFY2ۮH Bkb{[Psj7e+ I9lƏ hg6KM ٔPvjJ#CK%%icO V D")x_OR iƦI=jg/ǒzx%o[0ϕ'6!4Uf J!CL)jۀt΋]pKsJU&y՜Sjwl>;EeήC}b<,vjwܗf `oMtmU!wΗƴj0Ly@aV`DеkР Ċ2LFr_qtBT}֧fWwjGlsK.<œd=d T3A/AePԈr!3^ߗ"JzdL0Z*V)_Ò {\?ɅljMաW32ir&@$َ]HWf[ja8 |JQ(QA1/7ڲXUI$ҪxɎcn7Ke~w-4-@xE!t4nmՀEFC-4ǒ) uƔ7TǢ~yLBWn#!+=$:R-C8#h52*HİdQ;=&E33X/δGKBg>31`h}\1_†B?b IX׶sh+a2'J0#//~0dz`OaD* `XĖѣJ/r:b?,ȡ'-䓹;:PЦҿTtc1O*{Þ[\,a8v `?%6MܮVDIx"l$DwOr"t踚Ěa$W O Ďp1rwp}5E:WCC㊨[=;68gໄz(cy GᆖG6\>IHt/gU5(5 qEK~ߒ2v7E4@Ĥyht#h9=%0F D`^mD`ν,,l5ބap.UQCK$hY}@x;H" c="ӥxB e;5!z8o{jSXܑCvK6C3#0@Y)̾0g &Xb33-G Λ`޳ȕKOx,K &S<,8-vwfa n t܍x2Ni$Z+gŸvQWhU~dѤQղ ?<7g]d};.yBNO`.VF#Gy#\ȑ)-x_Q ʡ"4£_D#YE #M>ߪks#ˑq6|"7H3w[,‘G\پ~ QU>?ÕɍGhONWF`"GI#/׾gMD2yrE}PSu tc`ۭlpLG>[o'd׈E Yw}ߓ^eBI{WS7M뤹hes'mPM˝;a|LӢ\\fkȑ'd;i"j:kj6IP3:akF r ԧG9J;ij6ddԷc1 mHQC;%'s:Ϻ?yǂ|žt+w$#=f~7u?2BiF?1_3rlam27cj}c?d4E|LK}GM^m|wfOxA#XO}[]误ׅeW6yh= ((*ޟ.~fo'+m&?/M3pN=byt<;<~7I`_3fye&>'iM^< n0@憼Qߏd{ͦ0Is^o7g>Cdl0%ΦhTO)3Ö?rgv}O\s>0|ls{c?K>4YϨ{9u޳/; # ZL҇ d/?~wK>qUw _Sg\<ꐵ^z;iFECWK?~ZM'k_kRc~v=eg-0K=\{|ifC=>K߫rߟN:MUߙ_[_'r7yg|:IQ 8n2t](чv|@ٿ߂c?>'ۧ|_| Ik)//̂kw;~kY ΰڟ5-o&֑}d؇l&|~|\|@&|J΃d50â76cVɘ_n9.OlW?U̿`U}]eOܘߤ}3U7yuM[-`f/l*G}7조/m^~M[y|ίM {2vᇭr[o[)u~oA>K9I.{_?MFܮؿM8_8?~%$ Q~_~ e5'o;aϜe; Ss.o9iaqh [ۧ|{쏫r~O?ͧP_|oXBe6G|>/ lwLg?wCc|c>Z)/ wCCr~?Hn?Q{_'˹#i~ =7_aJؿȇnr}s~`b 7]_Ii12M=`\>(o.ɢ=zj o>Z4 {3ePnW<(כV_Rax 7`_*7v'=}K{ ۟rRlߤ|Ha'UٟKgkEW 5>PrV}d/OPym'g1~EV?0e(h72G[K-ka>z{$@)(޷Џ|4_6?i<]-7?K?G>-ؗ!wY;>{܁lXܱg3џ-؄d}(G38ncP|W}fȈ!l4,R G }H,W}7r} 6|_)7<ކ1~Y6W(壿>OK)C?GŽ6){>_Ƨi|>?c:^O_a=֓t7 i3,/b}>_OK>Sl}}}lu?]1F}v􂥔)Z0Qw˩5R&%ק[F[(CLji79-`lϼM[.}~ )Zgz5[xiHUoi~cs~Q_ ~{ͧQ?-ϯm~b׿dg5ٿ훜?^>~9>^,#zG>S[3~&/}[([ݿPT0w~`9l//r׏/n;#b|ɼ6}k׎f_];Eˮt&%ku1dgwzU/ _w 9kGYAo-K{>3 @nG\{wٿ!Wq&w_'\e}{o?!О'L|'Z1y>,oZѻ3쨑}-r XK՚l_CLܿ]Zƴz;j/yqfi1Y#OH˗寠-{ւ\,{6{^,ceɈjDoHcYݭ~x*oI/e5f3투O*hІ^0HEZ+of}xڎp>߿x& _{i|_H%Px}~z1e;qtJli's wG v_oش#5鄜Malߪh=->ɲ<}oc,l/rk9_Θ/ڑVz|~3|_xR[NȁnX-7ҧlFNkqfsQt>qWQYO[x|R;܊k :<bx' v`HH'ygA`ƪ( G# zdbBZ)h9"2B,'>SOlZF3 i{v%Þzl!|bYw.<̙D7ĕB|G(qv⎞VX-艐rFVE?7;p,D׌tŝ# od)EzBVg40Vĉb)HNAݤ?6g ӪZ~-,b8a#{i^IX 3yaa${֬@Mav{Dc'ɞFK9WL]pV\R~3eu}0XJ\#\e'rypс`P~""E4C a2 1*]9#yܒ']@^#Eg-ZJ@r做嶬~.K9P893I"I[iY\*P]>Ӓsk"A'E"xǎ/i TZ=a̝:{߭0f_Kxt"fh<xBx6 r?ͥ]>CH f!; Ҍz#mnQŻ #U`װEFlDr*ӈg? x$ lx2hAwo;5fdAh@Gz7'U=ڤNw)Ƒ6K$fddԴwu*4~r؀v@6&Q9l9DFq*#4k~8Ep;Y}8QƓm["*!*#/X ۢr˝r+I٠>r8ETT}ϭɤ )?癨8rh9{ʑ'*#wL=Epd7=qohnPuʡ|+*S8̀؇EpƯ}kK&6 w|>~1@%@ۗ\6^Ce`]9irhLnCjݍuR9X{/|+/w1Q$?mPbb82Ϳo`6.v6?Q9yzf! ʳ/8'Q9wMO@yI_H?>6}`|& [08IHNNj_BFo{\a7*m/FmPlQ9οE=-*Ɲi/٤v NAؖʁ N尋;oʁGN少ɶo<Tg|+rX;CөaKu}l,c:qQ9[y77[_TG6{mP.`r82||eFf=}8S}:y *_S9y{}E gVX*sEp%7گ+eRc?HÑUϓ3&L=0T%rT~<_%WGPgF$*3A+*s>'FT&2?|GdwHX}Lj%}#پ%zGջsBÑwwڡIa_A-uo=khP}Lj['6k<*SVr}d[ϯN濞K)8AxsYX`yY\E`Qh|-а,q+_,#Pe9b y\YNj)xBE6TN(?=}~z^;j-z(_[g=q(z5H0MP_ӍQ9Q{dv l [u#ڪo@c_3d&E_8~q~~}~;b>7"I-k}a |r0$.Y-1BY6T|?errٌWl >]4rGO*}0B:rG||/Y#5pg~32>ll&eQ=GGCT>$s/D"=e0lkKVdW>kKy!؄#G*Hhest>>>;Q;hjVgW>{Ȭ_+{!\){I-Q}OG{LNj\4~I> l{޿gzֻی縷7]|y'[zϔ6[_o|޿mUo#} WעS9~ݴTw٭xoI Mxߒjz2y8SJ`Dm :dhv0y8g^VGR+&CmjQ5r=Zv+wy~iS ݿ*ܳN`5> r_Ƴ+s,}NQd'QA7SKFy֭QYSEdoz%s~sw|V?c|JR}OnAw,_MRv(oעoݿ7峾V?>e=_^5AKN"O ryϭ?nq*}Q9nWv{_ !ڗDTfw@ywǩ~?O|vY;qcDK"7dAggU2joBy}?Gj ?aHprk}: ˟{ $?aޟ]kE8Q9gyӊ^>@G܎xW*oblga},Hk׎{DܿM6;mUOI夂pFfB*l!Ag"4#)L?N`-eOzAv:ؖeP[oҶYʥȤӿ$*J3Q9T9uC$[Dys8R9'VM?y~<"R_$o'I y|K2vh=M>ɲ^ϬzԣP/׉a/ )~y_k'UT1(O'l, 7- lG:z Z"2e eo O/~J)8i;pBUendRb+ac*і#!n "o軐WpJOd@ծ-6F\ci6\(4dcszK][Vb9[m@1]4B( ۋhi!5 'S0"QV{?D"][ц Y}r!#E0_!hD'"+G>DNC0ov[`Oó,UbFX9,'cHU*gָ3jdO &B Z Ethܸ7A|,YF ͪ 0a$*u2+@晏~=$B~ fm!yhˑ2|Ѿ.X`-N$`&Yb| 1b yXF8esx#3~I^bVYo$x";fpٸa|OJ UÕucvmXDЙLo`se >%LXd+AXpeLz-  >&Q!&Y gfoG ett I&IV\vJD[*OBٍ`*AB\t-DDhe[DZM"> 06@Oflaho2BtsePAx ) ?@4i!݀` >5O#r)숇 w[2#H3s9y u|;`k ѦoDD| ]lyx#vՀ\J`CD9CD޲N}2"'0SdCid:Q@E #ߞnR7WG@q0o7"<ѓy\Y [%e㟛e5y)|cZ}G2,#8hNXrF<#  6SAtq #\14OyM2~rI:&XLm}-J[;&9m ]~~FTLKt yЌAn[V6c|&=bTMbK_Dz5B'3{z\hՠj3Q­fA"mbcPNoz_N BKĂ{P/dzO%`}v`h}.]HgZDQ{JVd8ѹB<{tj? cOQDZʳf_K9o֏5D#dQؿC-xDDVlD =K"h.^6Pl}d'21a8+56tM]Y\`~?  PQ''\<~{#U=xNkx,B 0ν hc\?:@f'O sMVS1oZFdƨ󅼑+Z ^iOD L،qAU S+c͐b.2K?g:8 <BF;oP/1%'  ;lʡA>GRS?5`GAB"gäkxG~(&BS8ܤLn $.L?,ƽЅRQ#'r(&G6fhȷNCM'\70qM#DžE)ƈK9"{B'k/@pGkp.s-O_ ¹Fw G 7K8%B(U2aV:'\8r\%c IabF`n9X`}e!7 pHs0O2W71JN D&aڸL΅#+@ u.mm>~4sP67q.4Ź`20N=D΅0Eq.4bs]>@l1ŽP򇅁p`q,rNFN6q-}  |&Y0zm\h4\ mG޸*q.pGq.U$OGSF8l9|fBBs.C ٔι0֋sa!W9hr֝sa QRι@ kK/v΅Ć\{zI4Masa#39 Ź &y\Xs΅S9s`?XKW4>r\Xdr΅iO/s.㴶Ź& Lk<&΅"\ιsaWqFt=!Sx#Z968gs(l~q.!Õs.l$qgw/`>mrBsaé s.096|+ι8†g9s`p^ w\~hq.L͙$ e~K?nx1\Ȗp΅0M8ű}\`s.[ d78sa96?\jsa\_r~ŹpHs@s.l`X;Fs.|SDUč\X9q&&`c!9sA ι9?:&s.%Άtœ ι@L_\_s~8‚g9֧s.,:sa#_UG88 F}S e=b/jۋsa s&~q'@ @s΅5 \?ι.΅#W8p/%2:ͰIzo8ր緌|\XHq΅"ޅIN}J G‘q0B G6`3@3,\h#SU? 8lmk(^ʰs~a}\f\Xxye`\8_g%Ӂ Vq.+Fr9MZA`e-w ޜs^ TιHrOQ8z$΅5@r.΅L\`s.,sa 8{8xp'i#q.|cq΅Կ:1g??toW\X@q y!9sa-`` }\X@sqOιwQ?ߓSϿ?c{75uw|0O54> :H"6/sO7|;xLFq?wq.~8߮;cVz rN YAFҕ}~~+΅cϘKr{Ӟ}}'N‘9.DNڗP&~#d±_"Gٳxۥ1aH >f\hRsȆCq.}oK`:鈳DGʓa\s,\r ,"BxNNr.}T GnOn+ιpA[{_7`?X0>.OĹm|hŹZSX+QN΅#3@Hz}ȠBs.FT9N{s?zIı`M_=9CьM8Zg*$9lntB>zS9_yA!y~r>̈@q.A[3ևdy"ɴ\9 79)8‘ M.u8LI,rŹ`?ι`ϧ\P}\8lp8(s إ5"}$‘cvr,DFgbyq$4gsD~?syXO^tEs=@d/vR&g0Fq.y~]<9>KK#ɰC9?ι`-*oxJ.xD3qs;sG{/l;ԟE!BD!: cwsdoĹа0ss7"΅#Ob O\MϋCB%t&0_FqIgN\ιpn=+>]XdcpLv`sAYUggۧeV|,dqwg)"A>"O>pmf!_>~i,]6K^_'ޟYK۠\y!f؄7|s7}Mͩ}8dj9~r64WK=d֯w Ο/%DR>OSbJ11+<re/?ƻU~?f<ǽ=Ĺn[f}>_OX_2Wfiێ> ?s,2~_E>"aV=s.{9[o}baƼH.s-¸őB$^]?ιGK%sXo2&-ZlLB췒)ߓca"݂ПnޟKVPꗬ,/\?ˡӌ*ly}}CfKFy`l7՘uɜEg5š4p+v_r\Dj!c!s"4}X[[ޭd>s?(KE׏|˾[_^WŊ<5sSƊ$&{pqqO eWҊ'9}\{tqL|AĹ`vwQ>.fl~{Բ s7Whi__LvxoYvΠ3*dᩚ3i?r`"CaN?ι;x h{);~=Ϗk 8n}EOogy#Za~ ^ΌuX ?ӀE ޾v;߫rohjs.cr. :5kzfFeٛ?ι`2ݲ'Ymϓg}tiY{r&a{~gC9ssz=rb d<0֧HBLjs2]Əs.xKx\r̙ ^'߾\Li|8炕c|wz GsGeWv="75~<rEyDΆ>}qq'r(%/}Q}q, ̪2>ۓQ~Yߧq Y1@Nt|Ծd\pA}:aeX-oNtXg;bKŊ-p So_c4Ï7B7di%7z>|יNA6B,+DY0Vy=#SE0\Joz&q7&Dg)*{bω`BP-9|w^"* E~퀎z8 ,NeEl (Ԝ|)s O"0P'k#Gl%4A: Ua$83a "_wFp^9]p?AUdO٤e~֠Z:j yK"+5:*SxCFT2t!%㶅'! gF(F|or+=WYb|+E #"+FZXA}GzW@_Lp&0ᣖ}hq{jf,'-Zn lU]RKDS)>orXZ܆l'vk،>'b&S3 +*!!Q#5 [Cdث& k<G8NGFIvh4g? HKW  بg/6C3+\ٓ:95 gqH':W@jkv@>3a9,m3鄢L@j )NteGHE^!*cDd oyk sDm5z23)mfL6ѹ)MYqIJR@UVR_# ӳ)j a-#n%cŋ+IF-+5$بYԄU.;bE'#A4x{O@1U7h ojV6B-$]XO5oU'["HFjIk':RZo 0f;P#Dg6¹ `=;vG!jRNp*쀭&K33Mw!M֏[FxzCٗyw6lm()0Gn5סZeм犸 pUYQ޷~bf*P'hBmrC!c#\ƣI%dP͈fG9J9eM@e l5Mi؇$ޘ,<Ƌɽ -uqD-̒3"lri24cFUCRePxt͐L]meO.,hU PAঝ+0mΝQ>[A<B3C[Y\fNOգs2zpɼ [:³,!#;l%b{n<|5~ve)"*HĂwQ}FgnhWf߂}OO"rqF32K/؈}W{0^9Dk|f/d)܌Mݡ+oz.HGW}1hzrc1#T¶- F'OY'uC+b5)``{<3#[9oB /7:8*Y[kb!"/A [H;{bMN?l-ȌIx#[eYD 6y8R+J}EBV>)kyqYoGn;E2oe wt1"~RpGʹ#y1~HGq1}|ԩ )dd{2HGfqG}R6c32la2c8efzB 7NC"v|n  T,#Fh!TH'><,%}KlHD~T |IϪw)">-O0 9}"_G偛n7|^*#O )8Rp'G;I|~oHD ˕qf,:yAI%MHX09^h{; )ȝwZ6H&TeNnrjۏ[!KHGaHDN\! $k< )s>'nwTHGސǑ;'(C|aIT"|}ǯ~) WW4#Edc"w@8RpĠ# H B >r]2#b8u-!G 1C1\HG)L{~?@?|6p>Lr5MN6KhX"l78Rpg; Io)oi?ج~]<єԞȿk=6z<&e_q`QW+}Om)D|߯e>1R!/`;?lr )DžI$N$:!_d*Mok^Hv )בl!ġ2]HG>;c|)y!.`;ׅyR)'0c|UR˸Lz+~Ϣ$Èa)ȆlL#)r![B )r!GRB 1/N !v+}JHLB )6>[6>} ) DHGnv)!y )kRqB6ye0G)賓W*5a<,cg )3~)\HG6@"vt ) g:s,BdJRͿt!unl !j>Mo^&NǐmG N0!kS w뇼SHFam'!!`8Ʈ(' ^+_f.BR58`ڇu )~U[k_:O>tJX~~"k' R{>~W뗌 ߏH@>kP+:OII%o(#? zZHoO*B :rȑeE;W?q+೿zypde/,Uۑ;}~&dlw̨RpgN;1C@/-T#OwSgHa GM-Lў~E\.ٻp0~a!wJ)x~>h(G^C!9 s$5 H?'Dx s1!/5Q#woT)^&#gؿZOXK*K2 &ʼ_W=/N3pe IHN(zY')|OH?<4>!Y& )bS:L`^q`k[_H AY_EBߗH#wDdɖ6?<{/VB=@~}OH|o!~3UG^iz~=8ZW2B&;'}?" uDeӡ4ڛl1 BW s| eB G )؞G5^>OfPL+ !c .!w@8R9u^`&RPG QB 6r77s+W!ND>pR~!cZHHBDߐ\?؀)?)G(^";?q_ɑ;e"7I+)XrH_&weBNoz[|J`ӿ@3~)ޗlD]$O\O orv40Pʢڀ#ʗ,ƥyYMXC^.Cc\q/G貐M8>1??{о>>g{Ų[rQ?Kj/5dG$or^d!%k~{{i&Y}ܢR0=;2<2oLzߵp )X>V!Ϣma)z@4ЅԂ;?e )b[|JgOW<8*``S.a[kw SԿtBrpǺ|6Ο)wʚQB2ܿoMB#{SnA|S.@`߸_0}~~19=ݿe}66_5C_5cgӾɶ'H?; jlq{y~\;Kx =?1z,o" 9?}yǗHվutAyo_v KP!z^us/B1R;8f)`dAE,{Bޕgv*S=Dz~\#lMo:nI<)H͟]OM?)`}ݑ=ӅHR4.sDž%gΤG}<Ṽ3W# UeϬ\3M#RHH&ۃE+tNeN{TY}噷WW@.3 :8R Vd_<I󫢽Yg{cS&22|wW?}Su"z_|s+`?'"_=-ۉ[ PcWR *ݟBZLy'iӜNh=sm3x4- bAF (' xhqu=D\=ё1<'4iPED½>A.Ipr?X +.Hމ2#xN9(1oY x!vܯ&H O9m WY;#l/\[} n|!?4`O2e,^Hс(xՠ"6s0 jρP2WlBz:8yg[k&W~mӤX 3Bʱ .g!3™^, 3 k4;90`@dpttP_/嗹A2e65>R^HmlQXRB3׊ qJi./3 7drlV>,ȓgzk?dPׇXť#oPAu(N= 'HHtL|;\R0sᶱ 9fq>~wexv|)@0g\#C3"%Q:bd'6%J!!ggq>6wN̡@)M B*<4a =2ķlI:gjo&pDf{'K/EkRY: 4X}@)2L$Q.$I ϥiQONbD_e (2I:" K \vn#^ғhjILh T= HdGhQ>,|`F ̦ᐹBd(dT.]ʉN&T46춣憫l'nm*U`P. / LJbr?gҲ1B4X"ݟNLb;Hь|#~PX•'5AtKlpPGG?[?L8dCh~0=c%Ga <@]kn3 Q-jPAs@A"?JDoIOoȤloKVHG:Ӥ׀Ȳ|)̞商#ԑCkqg˿im ^/σf'a ,hu8sV|p3bu:@HGu43FFZPUA7\wfovDETo>:d"[@N]0*PʒZaKt .5C OATV@NގxC?"^@H)u/ sJI}^IY+R+t`fFߗݢx$RnR؃@e?Co1hN"jx0M*.3Q*;8c\׎6 GgZ}%1a.3dg朒䴱0&}S=Q|_r1z'}Wqk-L  /H_ 4H"ڶFhAd'N4#:؟IY9VKZwʎacb0`i`E3' a*;2SZ+dYq5Lʊ,-ޢF^x5ώ%qR?FAu0g"ތP4'ĩ##%deav?=-(OW XkYc U!!NȂ*5N0v;LgZRc; ;t6Hѳ}DVeN#>`tߑkĪ"JP4ŊfѠԎEIL}vk):鑎89).RMYGIM^S@zD@pjG]&0 6xJGi _aGcgMT3~۬*6\G&|'ʐ*b$J=k!hx)'sOr8qسyJeMi6Ip3879b0٦$U55c1Vٶ{'l9[ߵ :w#j+?ؤY7qgu߯@h#a1iϮ b&aKm?U7v`ǘ4ciߨC,㬪3lsaN͈mb?bAp>ʄ yNΜlI#d$RBQs$vXg{w NtK*%|+:Q)1.LQg$I15.EWp/ّ9ؐyc(*RbA%vkR$1e>TE+TFU?uCjx -n' HflD5⢑:Iy}!fC=ψL2fW_A?Gz3Y|JYUhn{b/ԇdEs"unǥ "+3@PkǨ~eH b(JV կ,vuFVLfYwլ( p :Fq]=5 MbjZ۔U"o|LDY!B$W-n?O{J4v䅟yff\4TRP~b1mٍ,$NGyӣg@m *MG;JTs} 7_ug]3WGÙ %Ź=Ԍ;q#_ZDBۏ-P8vnoǹ5́s[/κh+N?91嬘c}8`zfHSǔ{.257r9CsJXLXg]}k;lG~(gL?κHK5we~xX>LtgEX #j8L;lݘcϘr nt+g #Q8rĚ#jlxftǘ1fUB?v _:䟥 U`etAXc "stEPcPa ^G|NXC{\P}uŢ+K _¿%bc; {GӥHսams9^x]"J V|K-5\A+.b?'|cαޭk[w)mrjMD?*L1q&IeogKFd,.&՜:W|)v-K۟׆ ~O{im|6膡6C-?xj?okιU۠UzP}JTjjRl e4mTsGWx:R8iҶ=N),kF%̟f*G 01YSs%5WSz=av\FsQdKaV\G1-,_44\fo/s+7=UwSNWscEKl;?0w: /-aEIb eꉉg`U7g֍;vB(]͟j6rcx% lE ]Ԁ!1^È^(n|ؗPs`X$ӕ`C0eNןz2Nin/o.q+g4t,Dp=:]4 g,)\ xuy斠?V]= H\̎^X<af\f1XYx:ܷgAW׷cMXX612~J 3ӿW%y(#`rhĶw`p;uR nV d4c.;aKbM3}# IpJbC\tn<$w6(ɍ̸KLL}V^z3_l&_P̡Ś|n[VUvtFnF-xXR2~~ǎY}gaGYC9!xɣ8)vϕ2SgJ6Fi*g:wjLW}u!FӕX'2~T?`)M\jƔMzL$K4{8Z/sץ3:x}!<#sb.1ScxYS'[oo3.5Q'>D[+l&9fy;ML?+sa^!(/ m m_xA_| O+V{/8ǣN`ѵzcې ϣcb1XFM9! ޏFtx9,iVh( ګ(Q7M_(\GM:H~OF@&|:5Vf\KX=%[c8wh ^Ǎ^SsAHJ`H6~,Ϭ-&R㍺4 #~;! sWn_3ި4F\˴M~P\jrixžc/,5hFy`~QXAQv茣{1T_$~ܟCn} _?Fs~Y_8co}1AANcGU~hW<0ifx~%ԛypWCo@M;3zϷ^I~cR{.%'YƉ?̌gC`feJSMaF θ|ΚK-{Jnщ>*j׏GS ƹ;򧡏$ ;M5|H܈ptƍ*(I(Pcg1Nx}(P;| ٣tt/wo3-(gv?'/C"#[1cr;} b zH81֍}]Z7Tg?өard`en-NM24DFj!yY." "cPL;y!{_Jfo4rS4q/k~1hp3yVD ǾϢӲ1buF%ω]YR'md{lmI>5F/c,h=YLtfp/ub{Ԙ%y}ix2`bۻXR,>C$W!I'bɳO7Drʛ7UN~ņ< 3 oׄk6D~kÓ X,?538Fi%}콙+T`5B`OMad"zm}ynV_,g'E{iigʁN:~yζFZ.q~}F:9RYKl{k=2;~%@"r/t7FIH>GE<3s_pšπC. rw Z'gf:izCR o{@BHO*[ %&Y_pbi >ɒ ;.%QH0-.@' ߚ}~&7fӥ8"nYNV2wO4$*m%xx5DT91 L |كoZLjuN2gXA xH }~?<O?2?|[A*.i{䞙LW'7;ld̜LxdX{Vu)ۻN&)ZN~Xpx~ 1z!}d%bmnj%R;[PcԗT3hqvQt^g/]zzs/" LEo!D!y:,;`4._'ʡoEi6Ay$Ĭ-*#BpcK3]X!rurdm +1KDدW*!|)v׬-XBf2W6OﯳHKdW*v Yw:wKC`8J~c0/Кu)k{Á~߅f剺}lC1up,/IP[_n;͝r?_utcH&2<ۧ^#Ajc\'ӿ<NdwЀμhc_'1%}W|KVM=LC;z+Bc b!I[$Ck.%PL' f .'&Xfۦ dngeRh .+e$^MP}> \FL`̼d5"JYs 52PjF  (.xnA>J.PjM2̛WCXJ9Aӎ"'ԻA"]Ͷe)u՜Br7e[V: k Seƺ\~'[N!GY*cl A±`x#J8D) rNVsLYx, HIC|%Jـ?D)$J^rQK:(Hd'M 򁿗(e (`JyXr@E%4Jy^(Pjcf#Jy8+(ak@)D)9;Q8RJhe>RVcM0rvav<oԉRct܃(aZNf!JyخFg\6QxE%gFMn~QjgaQjE(H "D)L vrbH.D) > ҈R(Q (eD)@)D@)9QjFRRV/g01 J9GCF|\(x#ϼQ6,(e-+{;)P(8w;+'sJY9R9RىRV/l(eWlQ'Q"& s?RFsD),s[!JYM;Jf'JWD)QjA2FIqM2 JyX _f=D)cld'{/4k!JWD){(e avOC-G) 2jan(e8cG(e(e5w%JYMD)RaTa6qn#JWD)[tJWD)9*ɉ9Q4RV Q0Q.D)Y|Ы|Ҷ$E(aV ^aVQ8bd!%)c.J`D JVD)(ea|0lCD)(_)Q+K岓51!J|(sR>R6+Q=l ԅR>R679Q0D) QcY}.(aӢQ6Q^'ϳ Qʸ Jy-JCÌJ@)Jy5Pk(e4B04Q}Hc\JRw2=Cݏ DJMR20qjF#2հa oJ[(e4EKٮ.lSw*p[h}0#?@)AJkmƍR<r%J<(e?la.G!D)h'J7RVm,B2E|ThΗ99K[RSץIKKGZh~ih47Lb7*s폲PjnArrak<ոץ5Qhe(e_l&_p7T(e5GUQ5'ɿ+پqzgtC>d#J9~T+QʏKu>_Eǝ:Pᬧ4J׷]}Vb\4m S[|1 +I2M,El5w}]jqR K$+(e4Eb'&Jf!J Q0;Q3QX"og(eQʱ\qEswa6mD)*Eg!JaBr3؏/ ѐ`'G_0!Jx+L&x(e)dcA/&sfsZK[/>.Bk y>&rJeg40"c:i1VKFB+jmylrކ®\]ʂbSZyyNǮK ;WtatӱMÓc6~[xuwĻ=@4ӵ j 1 7j@_`E t*ȹZ;0dkB Vjkoy..Y=[w/k̽md _Xi-%oGӥܳrx#Yk '~c= Bv+AHNY)&M3,.$(_ 8NH@/@#a~~C{mJzw/#f8#)4Y?:_b7ɜl InC1R9A{b58 !;6M!̥z3FeXdrv ,@5wgI -c CޑdYx|Hg3I(_[ 'k=WҗdBv/-2uV';#%t,߾`82OD wJ?yw!(I2nl<䣼-Ek'(R3٩hsfh=3аye$n?&~}ا,6g8yLZyKe0q͉dw\f'quL*vRh6g ??r0bKY_ϦGugqkd&h .Ğby鄩C+-v 8m'֙FGVȸMf9&q)đb:1,mO tl`S1xBv鳬ۆfaK?5g;$!7iokG_mc7R*9϶I7Q~iN"%9)*5oOɔۤ& a1{e=w^&t;`b̤1lV3\B-e^r9`s`%uw8!uL!_7A$wUrޞϚL7k~`W_$r2|^`/Xpo~V,@A-O)UMqثKڃ9vXBY4^PM& XK5 v0:WR_l,ukNok\ӹͬH yGobeܰd\xđW`AMٙY.vKpM(wv|6$ar;C6!b!;~<'2Rw+t]M)J͜˲F6XgA+Ui:.ǁFV*;S}?7yi}VM>:}Ѳok'1rLWn w1gT7WINעC]%Q2w8UFHժ Y6mcK7Q w~}#uD zTD$w)q0mlnb3s" ҾmtYyf|ɽ|F4at I,,alae .z?AP2nsb7Œoǒt;s~Ӟ2Ye q+'Nd|*c3WXܔǀ"%ݼF8/fAJ,yk (Bz:*i:?_t+@&J68`ݬ_}C$"i*icQ‡Ԡʔԉ'ǒ)`snL3zR5K7<<<09o.e8h]JJ#@ 䦼?$M/;)[/2l̀_'Y;|ts 9H+̣yNl<Mc%g>:gx>&[զIA|JJsln̔P0q5Mi9<<Ӆ R0172=/ K ),%٦VL K ,),%XX-Fa)A+,`Rb`iL;]$SXp42 KgRr<{cvhA(sN]a)FJR"⣹ K xF"_*~H=nC+,u@a-:톰-SXG),uEa)ǦR]Ѱr K;QXJ2f K K؆:b(,e':Hɠa(,e} K"(,uFa)QXpc %.T1]a>, ,:Pf K?a+,eLVMRLa)YAEa(]ǹu^aaG KtQcgPx-6v<+,em Kp(,u/ww+,u,.h:),e;u2 KVER\Na)JRpQXʺ/ KYFa)r),e37 a)AcWRX:d KNLK 2b),e 2JR_+,eT2 Kah+l-+Z.JcДrS KyKڞRyLR^Da)' K ~{a)ȢSXjX _ K a)JR[Perlx5(,6RCXJШy+ԔMa)RYCIcbR:E׷RB8Al4%*ɰ6o: P )PXJ - K Ps PBaaB!,zծNa)R4O{a} K8+,[ Rp(,u+,%XQXJ Ha)Na)r-iPX&xR\IŹU ~R KY{_L),%ۯ_J KYOHa)wR{.n(,%"{T09$>U;Ea)ٮ$5@ WXz KvlksFDuy uR=]y+,žoJGr5Ìx/X!8j+,In?9bk!,xre8Y gՌVf__u|#=Ǥ!L;Ɗ@JHt"\VU62CXj5v[ 7a,CXjCK B#Hgd"6d-/Fu?P{RZ<gj{.,5&BXjLd0RmK ކ0G2'D٧iSsY\RjCaa>r3&,R0 pJa0ch2mKY}Y),5l'7ע-ׅ旚\jr 'Fax[= 儰RXJMәbiM%(,RsM K `n KǕR0 Ia)}+lR0>0/I4= aa~74BXJMzOԸ0FBX  Rӄ|>H]RjڦU#f먿B qE(,u7GPXJ3(,50lP(,渚MjJa) HĻTÄc߽#c k85Ĺ9\ʋnn X3!!3RBI*370GUAOx?&=RZ}K,RB(CX (,57?GL΢kr7R4q(,e1 K0RH*|+ 7-aSb"8Z?~-BM/&R<~BRPXyx?OWx=k׮'}?+}GK sN(,in*a)c_a)|K %\a)9&R{h0/7<,7Rux{?n?خmRXRLy6,ki(,%iqU25s^Xc\a)+z]M˴.x? K!^we%lӀR 56˜Uҋ_\a)mĞWXJĕ͒&;m[m۾mVPXJ@X^qK "I9_戚cΒrDBg*~lNnKGZWhjr_BK'I+SBX]t!ćyzRxHBq]^WXJlFۖﮯg\a):R"oUw͚3GOL^+,1 K 4),ll_9CX~TsrNy(poQ֫eNwPu*A:2S(,: g72Sq3lR ">uyJJus(,5(,2avZM KBa)Rd]X fTܨEor>>zD49$RX fTum4ngBa. KhTf K4 CB5/ij&?U SI*1M+jB%ePX*>h]u.[IG!L)B,Ea{DiTZfy>N0iK<Kۑ ݩ$O59Nv K\l KʫѕFa ϣR[fLPa)9RpTH4b/ K!dSXJMYqT:`N Aɛy9XfR0R1|\*a IVpבf{7jPd;R(!,sw/e)V7Mㅀ+ptRI*1knRXʋ= sKEvEM¤{(,u/7 RbCXʫhK=̨R),BRsL K(,* f𫇰԰SX PX ]~T0WtBs>Zl1 a)CXǺrrKk:SF_YIƗn)%Teq;Y|.ƻȩy|$cF^ ΙZГِ oXPLRdTFz)\)3bߔa'y#VcA++yGME~~S4 h# a,k`1}WE7C+0#17iQJ/~ ; ߏkSL*Ra6g RHSLCSaIl'K[D7UYT+M%iߴf!p<)$Ts ^ӣGaKNRW~W8ϓodЕ>53vw.#-k0ďڎ~Kg I|wKIۃin.<䳎N7]}jwgϷEk^)vh~A#  ˩@0eqJ1g}OuXmۂSf/&X-E[`"RfR2)wEz<ׯ͊YC͘CX&clcYR"4+/N;i1c TbIj)34p-R?cVvN 곈 x&$%'᝸c"ee!Fh|ab+'tj]1XY'_&bcHVhrс&cg203htr;LؗCF0DȚY_0_SI6EXp=o˵ $'hXrl`|68bdM ؝w%.QS^\IVyPk%<=X&Ĝnc!rk9i#- 'ڎRE 䁌?g܈i8M#2oC_: RsY)eйa:'@H:@dCr*PD_d{9&*ůRx aw)#>Uq⎌ |8[t$A rzV+ [M4={LNYg '꩸xF6K\|r ^'}:3]9'zF:8}lcX:fBykw~d*W:ҏxA"O9Ɋ&}rڙӉZܲz=SN._M$z' $ƑKua9K$ef^ >L{Nr"$Ῡj<ʶH' \-Wqɕ"Rgy(6V:>XS v 2a_-f/DZՓrkdrh.fZ!Hj1fPRJI%tL0*:08+"7/YT rZqsBH>rg|@שveK2GplIٝ@TWgA/5s!pnPtۮ-US“2;`Cv/tRbDrQ9] k$X~v̔[y3UGkh=5紼*SuEB~'oK䙤mt V? \ms tLEVؿD&OvR@|N+lKH&("ık"dv K`S#asZti>٘`:-2`j8<ӢBfd]5G$lw d;-`>`jZ_jfu9ǜq]9aVvf s2SyN\~MDh*ajIخ> ÀNKI.Ʉ9kqstva.vQ "}2`SۤZN!lwi `jJ']c"/s!H0`jV7Z6\Xlwڒ.`ӳxeVlw.W+~ݹQ] )tݏ ad;-f`9lwjZ#vb8`s 4rخw𥚧;7a0'ajMܦ1?^kZ+ ƪi\W/ 4Nc+lqۍ_v՜]]o!]w<\NZ7p-`ӔPaj֟݀a\7۝8[t q45-5U|2 ۝[=۝ خ;Յu۝/ 4ǀaӰ۝"pvBjHPs a<U6vqJôAJUsvx#!0Wͽ U<=wc?`"lfo,>]ǹhQ:Y۝)\i؀N۹خ{™g]]\f5UsvTisր⾝]ܨ sNff+uSa}l}]!l7:z]fi¬z+EY'a su3躉!1Jnd\!S%lf'lWͱ ۝VfYK5zAxXv1hnva ab$+v)Dž]5&]71$lWͶU va6vu?iz#۝=خZV6m*`]t)lwl!`0 a€ s=f_egM\NM! ]5m28smvp lWyՂBخ `(ؑ]ԫB.r۽io?LNخB.c]o8}=݄ND9nah]]zخ7D= ah] s;ݡ]t}]nXݰ2sDh1/$l+W^ز %JxN8P-i{5zť]ܨחngO.Kk wS*aa .u3{ nļ0!KAy+`3e_.za{a8Lvy*o`]i~x.S Z^vq),rgdNQ.x6?Bخ]71׼+ ǀAvmSlw۽G1Y_ܯ^_j &d݉ C'lara9۝&|]pخoVlW2۝W]lw~ QsLi}"l!K wpk2=vF9ixìIQVJJw!d^%i9a0uL˃cBخo\3z!,!-*;:j uRۍb옕F.>"l7>پ<rG#>܄ ٰ+SgJ6FipʪQt;lf%]73aXdۅilj$]73Vv4R`k<:&9 ۅىyHM3ۅy۝ .B~. S^XFQE$lf#laƥڥ^S )qQ_x}xAcF_xQ_1axB.6vqC/f!]71;_&sY'k]u,aHÀDžt\os {|#ʹluFnbJ+A.ʹݺ1a =l3`0 AB]hBnapEG .VXGDĂ*躉9a0X놳W۽73E}aӈAMLm<$]7‡)^k%]71 Իo8B.6aA{A?va.vvah E.A.Mn\ގȼTW%]7invc`s×Lva^n n+gnv7([w4wgY.os N'lwXcvY~pm;y.Aӳ:uas0<^S"crNlf{Ns//e& 15#Fi<02 ;d;mon]rndWW&l^o4Xa]}Q7!lf%lf;f]4I.AnuqnB.aVf J)`>l{9`nbIPI؆a3nø7}l+"' ^o!pҳDs|SQsOf:9 N $2NJSfnKjǻ/@-OАp~$5 y3K ߊb!4:[֖Ws0z8CUݩ}_ҡNHHp/RA<@ɳ~I=T2Ci Cö3}N p'9wӔ> .=fQOIz^拡s̟Sv&)#hyyuc2-r&lC7⺿|i__p_55I .=ғ; óm\HJa8%Ipڦ eKLIeo6!9*%,xmbrgp<9kp1_jrC9ӔSXK~?L4e%c@EI6Vnmߐn62kxf:/@ʰ:M s629+Jo;[&{[50oɻ۾doI8Vst½^è֟cy= 9߳ymtcj֔ӄJjw8Ϻ9ySdGv"q8dȦίϏuMTXC+oTFiemsI===X(kGe~6~ţ"wkd>W6Y3gW_4Br\9cq&H;@/^̳L ̖La;rj_08|=/$Y~":9c|W39_{ǜ6EkZ}`qRcݬ??d{ endstream endobj 400 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 405 0 obj << /Length 865 /Filter /FlateDecode >> stream xڕV[OA~Wl%qǹ&mԤЗZe$R܆VC3gshx IUZ щJn*/`0 ny߄``-aղEb!$=GᕜxG.wwZNKɆ~2x-&ن7Z%yM?YXV3"\،i9kJedϒxo,+@d9sU?MJr>wՏ؅qjFZJe[3w j$hM3Vv4&Q0\nhu])2:єK%8k=jF+/%O_HwJjMx)V!MV&{NzR҈x y6mw>NduMX-ȗlr3@[.;-o fi}Y "='[3${Uas)āxEBiM:`5Ҳ4m{/C\ 5t!{EN.l nhUӢ"?ؽ.3*Y ?9 UQ=KJSa0K˳-g`!6ݝyl W'O"W+bgBg%D$ Kp?KءHzާ oaӫ]f3{%*;YG]"D(VhMør?ʇ).h)ꎂfeNi6[/3G endstream endobj 387 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpkG7Hnk/Rbuild688f6c73bb66/spatstat/vignettes/datasets-046.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 407 0 R /BBox [0 0 864 432] /Resources << /ProcSet [ /PDF /Text ] /Font << /F1 408 0 R/F2 409 0 R/F3 410 0 R>> /ExtGState << >>/ColorSpace << /sRGB 411 0 R >>>> /Length 9797 /Filter /FlateDecode >> stream x]ˎ%qW%i` `B¦([28YUMfwg9"Ϭxo//>~O?}!璘On<.i~xv#+]PKRןo>ݧo)>~7!|/_YFן*;+RhbVhϾ3_k}onRCnZ=#Z~- `t8&{!.C,j·P=C:fk$+3*g6¸{dG֣q P"` Dm1Fa=恡@P}> [waH p$X,|Qa#mՁFV< fSksao `i qБ![ n*V,ʶ ^$''[~0mX>IhK(5牭hY؅0VJ0 B,!ۜ1SKixhsT'kf7 .pڊ8b[0lc{h|niSb}ىpӲh#r%ΉXKaN 3 <0#.)k^sĖΘh#m'ш;Wn?桕xMAX@8&{,`g),m"b*jݱX@0WzBG~._Ld2|~";uw `#6Xy^KZ_Ɵ_mпy?/d{01WV}/Ƴg͟ nVL$we7as6Tb"6ǣp22f~;!>]8q z-#8좷ݏf$b΅ Iawؖ7c1? ~Å=xqthW8ak8}?vdyhmx<cnS%s,vװmc=XgaolNǡy o)||_]@FOv>5/Rn}$;)z_OJ(;ƫyYmcxa2&i귍 Rd}8z7B\ON=86l#9A{5oyJ%:~ mdDŽ6Xg}CoI^ >YE1<8ɿm{>6>8lMDs?jƃF},oۂwW;?>,;cb?9,\M8-өjUd#C>֑S`8c7_6^9Ƶ EZ}=zqj Va%^ll=fd;"ɳx?;s7M?<q^=9s.&|OxqR0E? Ts~*OXq~"qyZv?pnľ+ǧTɳhO}˙XI'Y< s^2-I/6P[<O?hA/{ru牰@E(Տ_Zg_~ce\1Kx^|/a8Lǘ\oW]9 02ecUM_◛˧%aEgEs_>볤(B̍y󉊍q9tDtRItLACLɏO0#F1;}hNo[;SHoMy~C>lǸ]|BP<EWE ۜ*!gnY*oJ5hW/ { Yϑ'/8 FLכL `JI9N GG} f~Of/R.p*Ie|X>]GWiGU/O6k=]0ˤ\+]>oҧ aE2@z<)~gWWQfH:z#)ׂؗ#,( yWLC5c̚g`=9̅۳tןAɟ/1Uwc=8 G?F?o E j1žP܆ϔjo]γ @&9|+sh"|>R-8qp$:(S`15o5mfMʪEE 7dI{)UÇ^hElݷPltȊt,^l Y2T]}|wĚgrhFC{ 9ЋXV)5zZX܁,_} B |fKWhEPDܣKn߱8lYK>)fQ:LX=p:B4~?kD$u,7kHV\6*\Q-6H7s9ȷvRbo?|E1QDZM;NgIJv9WvH6얓zyO3D,;-ʓr$9eWڋAr"];ܔ&ؼKoOIu; j*+uѾbWݝ sX =g,;7N,=3I/|z+N˯UjFj[ vwK/EiPSfǾ^|=Ko5}|y{P~_5ҮLУoϹ i+QI˂4H W]ӳՉdMal 2k,OG-k|S"G;qV"i<^,k- >pH~h <"E󈜈|&zgsY5!K,j ,k>_q>&U.oniky<ĬyEP` wY~c7=4%,$㱿 ךj7 mT^alph봮;Mf5MeECͮd_mD@yEPin ()P.$ֶՌ1]w@ږtNuzZ%kQ7dwFa.hlmdT`ЛAc Q6R]7"W;\.-[W9eձq:<Е5v~ե|V@((^Q=*O9t}ڵH2vu5R5.zD3w0 VPU_[Q9-/ 6s dha]vBbNE{T5t`-}s]*f]!&`/͡mzh\b3O+)_4pmr`㖔Yxx+u؎Y6pM58Z\VI`2a1{E=%U:5PonXXKW7_띳WUO+V3ˋ-up%46let47&\ e@ ]i@0Vn(@/@8{ O$ˎސ r#eːí5*oˆT BM]w@5.5l&PP_w!:q9qdtXC_UQLaUE+ErɪTn!\HH^Pp+Xܔ):38V%Srߧe[ LJO[j ϰ BD lˊYyb%aNG! "m);`ZOmSQi^Cɱ{UMh+HIU\f/ 1+yd.r5.A$F˔yƐyѴFw/z2yVNuӒ΁e`_g:eYY9zQ>ŒK$UJ*1brChWD}-BzSЪG![%ar5cYo҈a\hOufsT(p,a<1p1/u|1-/b@<7{yM2gUݛSt L~W``פxn1,z27F0Dn02ݾE=šٰv~\ښ3 q&֥~xP UK邯})#ĵyuc%#Ֆ,K1-YfNډ7V>؜7O BJ!yERn;PwI|*AN|P%wlKo/yf (!ntSp7r(fH#34.Gf0):z4b%;0\bK#_߱#e(=Q:ۋ5tJ'q_Td+sFoo6Q2 ܒML`@5$mJ"_A( n#i0+D CbYIS}=DI2af(4B*qGևN821o\.䥝JOe(+R,(K"˚!|g 胱9N->JfnS ?g i+3Ʊ@Wl$m&ay-i#ԡ'6J+S\n ic+̽8E$)x(ʄ` @Ɯ#nCy2uHGzd2|D}a劯<(^vfܮJ"+ht3"N|cspVZТlrv5z+t:~9)܊}Fqc=PmOslsn'gQɛ"Va+R7zf'}gH f(ܯg)I3yg` ]-3o,.k*{c=`gE q[ ^)Jd# 8Σ{J2"O߱) c?X0^klW>@AG ~B. iBlHJezryi7ϵ% |Y8uIn̎VhyWњ e((]r ߕ+jlH=4wtn{!6~KB`tΨix7fQ``Ɣ1W6w\d4v&|!Ȼ");%.4+9ɮKAp8gokTP</m=͕Zp}z[鎜Lx{VZm_o' W@[$ZQV]JAVCAUщ'~BYf*lxV+>#0WFJm2逐"lE_.*+yҞ4([>*1(M4EY~&P.j7kyf#V3؝"2b늹Y ?N^_X. DND{+#Ok}fEB -^](˅B zx?=7fL!TXP6|W{}*ۛCSѸLF. r`}9EgCfH%sЫK*8H" rR;X A 4r~&ީYܭ o[)˅".hIFfBl #[O9!V^xF%S | ҿ#AEIJwu FZՊ},ʷI4?GS|6bgg]~$gHs޴h۪Hωü!n/*EX8Bfr_*1fR.dH8 mXޤw2ײe\fꖕɢɟw}흂LN- ׸4m8qJ3ob‹ P‰}pn灹)R300CYs-XSbZ!(mv@h\<%yM2$eEZ]UƯ1G\v:qz[+]%t]ĝ "BgwȢE"أg"?I40ѯAd|72 pSLԅ0;3w&5z;_|ƻ5ф#$FNs?r'9ou"2]7vm%uN㋾44blBݷlaHbɼ$r˪ґ$/ndL9E_iu㗓B%$OeVf1vh&gc?(K~thϏ fźln͘QFx0EvNFN|@xt y1 ӻ^@e`^H-X3OzF쨞˄Gh}+(/ -F ޭ\I;JC_)Ir2*Ee0ҟR藓B%Hє9EP軄Q|B_N PKyR)\м ~9)T֗;'@רB*NQܯõt6ƒxPu"ωQg+N ~9)Tb_Wl:4"*-}W[9aDzLͭ򝱥qq(cKNlѽyk2jmg(a~cx; nF*|HL̘9#/]tG>Ǽ,N^|R]@'(˅"I6SDcE#E~_$@hxC[_E~/'J76VTM!y/qɢVzn>egt9Wo.gHmFU"yu]_.]y2J~1YL(ȠP^!u'kN<%A؈zf~@IÓ Ê5 `r r`}96lw["]̑×(wo3@mݱr$^^]ŘYo7#7V8ឱ![=q+i ~BiǏPۑ]= ]O6:8p¹eQtD{Vwo @%"iÿtZ+[\44DM=ty%>RR5PNjne%>. rR:fp cu%pU)??2?dC?2u*h(1f=}[ ̺tT6?Fx\ӎ_.,|v4U*{8;M?GT&*=' IQ&9(>sM]L}!DWזikX9~9)TxСӚ+,Spt8C|aN鄎/v}w+tAe{MHO3ZƠ}!E߹k>PV)#ׁпvM^a)IA1owkė*gDA5Mw +J%/n3:7$]u#$9s ?eMA~AkBg9lU Ő aլ ,3t!~@fg?II_~{Wc p>Ϭӯ~OO^ސ K>&=tSǿ7Ex>7*_k>_nO7d5jE[z7r} endstream endobj 413 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 401 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpkG7Hnk/Rbuild688f6c73bb66/spatstat/vignettes/datasets-047.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 414 0 R /BBox [0 0 864 432] /Resources << /ProcSet [ /PDF /Text ] /Font << /F1 415 0 R/F3 416 0 R>> /ExtGState << >>/ColorSpace << /sRGB 417 0 R >>>> /Length 864 /Filter /FlateDecode >> stream xXMo0W.]] $8 NPUDįgI@9$~~*/ER.op==-On߾x*orr~zy鳨z[^FRיzy$XN˳=vzj*hn.\oW Y>-*/q].z7W&Rf2/KmݫMG{a|Y7t#0p`P|\4Wa*n~OnyF[*a,\s,%eFr]5͉DK^F̲|"|Ekm1T]y*ȳV%- :DąԔӽDrG[|Ό^=i1pkyV>*ehc|1C# ,'ҭIϞlٶKK`fEdc"Ίt,$!ztVd sOBXg*DMkm582v8f08JVG%Af/PJdZkAu[,;Ft ;4Bj=T ȶ eըQ=jPZ0!S!E9{Ps^:13FlXFgmAG,>q> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 424 0 obj << /Length 895 /Filter /FlateDecode >> stream xڕUo0_M<$vI `C}c> /ExtGState << >>/ColorSpace << /sRGB 428 0 R >>>> /Length 2874 /Filter /FlateDecode >> stream x[ˎ, W2rQ3Kq x, gqa#Rp3!UԃZ6>lܾ~َ86~/>㯟oQٗ{hߚE?營??n/nyڇڇf!oo}vl~>ܤV;T[ M}ñ}s7IaWícشB<5ߜ٣͜/M^~`eGk#ʞC9a˯v6 Ө{IFr4{oT/]lG/;s>шc|o#ջ[bS|zqM]؋ŝhHuXԕrT 7c{4-rΜ`iX^=׶,V0Xu_ }Չ}uB$okROV~ CrZM%&FT!cGiJbt%@=iB U`gaWg{zrNU"{gJ>/Ȇ7F7V;zo4q2LQNu4cFZt_ X5c#W;gEPD- E$ 4>q)p@Ū> +ymj,sOLȀ]p`FCǨcxٲ{%`!1z]I "O0"/qi(ZShv,B#PB6 @5ƆiDaGZ'&4il kQ;|% sA4GN>cP1KCd^EshJN%=&g$ 1 r >bg@}kb"d,qY$ٳe-tAsd{]JL$"aؒgN8?#/K|J$ȓ8aEh$AʕC[ivwF#B9v0AsTrN(kfF>Ԥ8[a|RNYC6=JcgBd-' HDc&c) H .1+)η:P4 IV b^)d>"L)}$0:&`-B^S+ג F)0A8#ŭJdB:!uR?PdT<7cX9XQҬR 4ru ˱.'AT$zrD{cSH%Z멝£Nj\6ztgΦZĥ[Gr^L@;-a ܂!sIXXagqz‰KGn_=8ϝ|%=w*("9ظ#JiJbt%0CڮrANrx``g]ezrKh 7=c/Ӑcqf4`!suBoypSz =ȟW3n^%ՐE*r^|ł1bT/=r0b7eDCOxa_ɝZJ&$_˩cۗTN"Uw#OV.{탹5;}h].Ǩcx2Z;L.%^WC9i@KrF+zBf-W̥gHh4~Lx&ZENL< |c|Z/}۠y̗<<AeS3a% bDUjJ UCڇKpP O"4huL8[\'ƁPv.%^WC%iA$撩VƪCqۓ؜X, :C#aaN6"|Ahv0Ct1 dőG. Ȉ]ҢdqyNGFJwvG4cQ1*f.A))+plDT"z;vCa,p Ś a ܥαk.X|eq}b?S0S0[K9SKe߹a+_"S5f~›kLQW}Բ-'c1I+7C_0(/ ݧܱA8eb;wV߉v𘴏1~]SȬ{E Z}`녴B/KV|(<(ɛkO,'M#J"-&o|ǩ'<DP~d{3TM4ͷ#k7KfqSCt)u"63JÌk/h8Ϝ> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 420 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpkG7Hnk/Rbuild688f6c73bb66/spatstat/vignettes/datasets-050.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 431 0 R /BBox [0 0 864 432] /Resources << /ProcSet [ /PDF /Text ] /Font << /F1 432 0 R/F2 433 0 R/F3 434 0 R>> /ExtGState << >>/ColorSpace << /sRGB 435 0 R >>>> /Length 19392 /Filter /FlateDecode >> stream xKMv6?(M~A^x 2GQS{viAs_?ɟ?~o?ǿϿ?~PO'/_}>߽_f?{~?Qgִ%?g__?~˟;߲UZ?~u~M'ט_b5_]M_%I3:+ԕXyzV›ƊO~W:W]\R:Vw*Sj?yOk3~0W/뫔,]_}o_5nN]_sdW=W?y_{VƵVYOmo_ݖ.}qnٿCO6n6lN~pe\?8.Ogpu6Ο rq(y:Onu<}?+zSW N9Y?Xz?B]qH?u [mbWx=g{6?x#מ'40BJ~Gc;vώXψd=7}[~B'k<Jfw,:~>sEϞ埼N\9Ħi<,as?5xm+;z:rWjo!<}9 9w#Z;ۉ:P}$ av+xկ>󴢏fܒn蘱ñIDct=Q6-qַ{ FV _Yvrjvw `qXsH A,$ZzmB.[L'MXuR_يvavMa_ނx Nsk:'-8>4KbZڐlWLx}m}kjwgba [uWlpV s918| q_;\}9faaj7S}f'.~egaqZ\7_Znb[T-ݘ-[~7v>좤$~ƯۻJp7ÇLl-xlv+o ؘ8&24W= !|VG+(IX㰻\ksbj8-Kbo}L\.[Pf8ZL_S&IncC[/0?vH0vl)ζ$|E5u ca,P[ĝ;è0Z 0jH qiϿ;b zC q/U>8Ci+~m֚8XӃl1=aΙ^ c_$X ͍6&9Fb8e;Rj}^mPvQ`hNa7W$t/8}b>0Z,]|!]\gZ#~u}iqu_&JbJ3J}t'h3e>$Q=ZN8 )n%9/. X5޶>nrq {=>| ~sYz\:t̅͟ٲDzWnlyV:mgM(Yhbm6Owvl sƀH8T'cоq}묟>_uC?g8zi4fm!W`.g\U{Aq^kR6Q9ǪbڑGkk1a{q #E8G {ƑD}!>VQtFR ԏķ8>{pJF?fѢ})6IkAmh9_nEd8Z/Q);SF>T4IXy>Zj#|%)9G)- Os%(ʦ {#MB ;TU_tRfbF 4$q`i~m(bc2F~_8xGn AЏעiͮsF 8. b/X"T/Bbҝq5/pŴ y7X>]JW1%^14vp5@[0Z4c^Izuqu]%{ Ѣ.kh\\k>)&r uKF^ ; D0hm;1Zl;*DZ&i{:nt[|FzNhOCr8- .hႋl8W7nǚ' 5F\onses\r> Ų~X-kmZSX_%CL$8Y4i8'-ɑގ; D?3ENnjU_Mނ/"9Ԋ6aA'P؇jBbWX+yR~܁> _Dk䓡J$F =͗ |9:~69GkS-&XyG g;!#ohji-q!ߞ^ueu$gd VU Y<NqMkXhu>fWx>H]ӽlwyC+9Q7 K=. 9ZM+i0ZȨc^O7Zl6 LZZ{Lǚ6xKR81R;W'Zl-0<\SXLK}K _; Z[k MsQX#ݮu{TW.# аF-\`4ZӇQF-ՇQǹ6FYκXlcM)sҚ@\Ia^blPb#7χ rLl5~6V2x8}l/ގ{%\-mү2Q0ZUT+?s|]=7_h1-V m:c&bogpsƚ+6L߹^̸8Llk,*u[3,~ ' J\i#l(6m.j}\E8.@dNJ5X_tX[+˙Ǫ/6W]uV}g}Tcǵ$cvs3xs[a^ ~;h8"V%܇]tE_}ksoah!vpo~+ɰL{ΉѢ+XsCzX_1ytXyc0_3AӜLh/Y5ib9h_Z1KPhv ] v?8.`~KhnanJ/]J-gqb9l csCpu 1H OyioG>QOOÎra%bw9vy_eHs a}ھLaiw]p,L57vl9?^11VIޛhEHĥ\) lZEKzup5]kt7Rn 9[+OlـMl{k"UeHU}TM>!hLnX-[Z4{؝`qh\Gp#XԜ8۽vNa0}c>+>䩢>ҵ%3]?si]?c]_U%Kc̅hLFZ!4aQ(g;CA}\A}wP@Z'X$\NLs[oæyqXzMxo-^w^wc= wg5~#oϚsL8ª1bK&\8[2aD炳M'o݃ڐv"/uR]/h ±bͣۻvvbslpXvn \lh2b[dJ "Yt9c{O le$ cFؒnm`K">Ñ-)lnl.h1,Oɶ+\F 뻮Bqsn>NQdmEXk}x;RH gy&0idM)0ș 7̹,rlqxcMKp.G8֚|ВZhXvFҜsQewq&Y`qٌ-[yLĪCY-@|7n)Ч /ևKqGVuC?ه=pт/΢51\^j}EV@[ʖdi.Bbem.h1p>&wxx=?e?qMKkOS!)Ȏ\-,zsg[V' -1ZLx"0"\I`-g:yvX$aE "j>H (mhG}= #@zc. baS+PPU0_ePPSExW[ta *pA`d+f9٤+qxf47x$gl`{ƌq,hik7_d=ŷ5,q]Jʹv9Leo6.l :bt ce׃xX_և|o|@t~C;iHm.T12k3fi6Ҭ$XB2\GP:8Gؿc8)A5!~S\_}F +ZΘqϘơ3q3f<~ִY]87kJ|?i%%kadQ&\ l9 .,){FF2IтQR@\Sh%UV6[t(prfl#34@H{ ;)R98]0ZgcZEƍԍWhE$B7~ΥG7CDL'S൉k4GKjQG>H}Ydx rόVbw5>sD}HFF  4> 4h!m+tkbh)P7'9Y^ N: PD tݧ xhh'#]8BBF gl_:|k_ؗQтֈFhgъ'.EOkF\0[X%l }k|GߴΑ*NR3ɷ{>Q>r#W;RZ?1y~b'( Ϻyyg=pJY F]L!FgBq9z DDze/ /43 AbplΝ!l>sW,"ύ>b_t͜J!KAI q|7&h|$ ߘD%MC3VaRBwaեwHZsA޿c&C!.鮘әt?a9S6!V #S3NqXjz61oQ޽eN-?v}Q3*-|xzg-FJQC4oRDB$FZع4[> '֫.lݗ rGvCe{_,MFY&boQ\R"i-kfs0h؛Ѣ^p aºQ`K"6;(L-2/w猗w֧;q〬!~^_~ۛ<~ō)bAsIύH|V@w~szy }߹"[c%aa5;#l]q9fH-#G`6DaWhk\# LCHݠCvjS̖ؔ\ Cj;|FLF)D(A_iZntĚ"ށ-:"Bat4ӻ4QVe"Cdh)D qG !Zk`+> vPStE0Z@۵+o6pMs6,'[+(~gȮtW-#Yb}(,8iq[3Z X_1zah v#*mV'Xs8Is8~-k \'$>_?fFn_A49V(sjX#bG@#Gd+2 >8#x} JNy?bNbXS_vAsd%>c3Ld2#:u#:q:LO.O(.{i~#}sH𬇫'ax]gmfʄ{[$W-h#3:۳h|{y$Q˚EC c_aTp![Y[>&|Oȁ$D}J|= c Or ]s ekH6-ێ҆Ib@a1p'l'/Oze~r0 +y9[bcdbjOW}seƊux2g/Zx_e.n\c:$O{Gh:<{KhKXViR9 GZ:x[![9_J\ hr0e1ľ6`;KtF _po F"JUR-I(i`6SClG =tc>*4&0Q^scBRFYJ>cGe !>LDcE$l-4\k@Jh`ٯREk8%Y@FPb+]9Hݷ51[It>?2hl\}ϰGىIƶَƶ^5]͍|1u } ;Tg>jdL373Oo@z'!z*Ms;PV7!Bxwypl:o\y''y O܁J'X˰JZMIo[tbpWBnJ2P+9 n<~n*ܦd1K{2Nf[u6f7K"aއ}EVꁕ]Xq\*\8Ϲpěy0'~Fч l[ԪZyG8KZzas3>k^1[^EVԄ£IܼQk82 Jy;+k2bj{4:g۽k%ܓha.1Z8{gwlkw]7Z4$r Pgfs/I&&F gf #6J%z'bl|^1Zk/mVՇ\Gxj=Ѯƿ`c)x[ݞlQ y-㰙y-RKݥzȓZ a]i!C?2óKOE/=̽SKO["Kr1-@͆q%x1p͍z\z\ߘ Yg]&FPxfyv cwZ1Uy uv+TzV ʱ%M%q-g`!JbPzVmXk5"n@[;ȴXH{d}*òN!2 F.5B@15՛}VoV30esG.d@+,֡qNt,Rh9paZH"[U@~}M=mW{V9@k!M3{C/k?I}c[&OSvy-WiL\%4UW} vUPꪑBVk8xTi8[O@ /%[sibREآh@Z-W!{oGjG< j?"gیE M ,+l0j۾f6"h0*Xox33@,2USq3}?3AK>5+VKy 5Tc0p6)J)Wp>GP<@n$vwǐF|39gPk':Ηy/",R'Yxǘ:w$eJpdZm9ݦdx/wOF.}?$>,ՕpGő~韵͜-OGv}hjViFW%$tTVfd{({J!蠒ܩHe5h]P}=pϠ@ 9bO]JH:]Mz 8Ml,D~w["xݐȔRFq$BXs160%$ wvP A/CB~v/ Io4~~?BDDqK!28' [ia^ CWZ/u]fz;%h[5"sYHcG𹈬3S#/.du< F廕l6`yA]\{o6j3;`ҘWfNn c0Z |/*9<`Ƚ2^y^߈zAVd /QՁ⼿Qjݣාȃ `VE$]CO]tsow.%TI93X$[ZȎ35W r9-s d5HK#F GQ%&y>r9!5eJҷ5wMRv{+Ǩ"%gNkq88}PYF*s=z8#8ꃚ#/ڬ,"CQsf sFWj:*U$&)bh=#S!4%Q*D.B.ms3ܕ}n.sC\oBFYQ0յ0Xv)tIl>XRo;j:(j:re[[=62~Df~V>W3 ֚>nG5\\%ަߪeH131gXgXxpCXEN“Tx]3?ӍhFL@Q⅘u1XyonuSHXPAηnd_(Qdy]+lYþzp}}`,/#/Ә5]}j!#DC(trWqbp 8X8cyXC6bpBd%!~yE\Ѣ x+Uy-Cy=u G5?llI,ۇo!F qǭ:H6f ΠIX(Ku2jj'zL ZǴ`XdNwc.xٝC/b$TF 11m]EzS0$\̳N&daKhA&$S}} ad;8\P&F P-yKmgßg-~(p]*ȓqKAM]-2>;8mA:1<HrbZBc]HKhv ~v-\"]$3c}η>>>h(fpo*xpi_lH!D- WJj 'AFmG[r7z3)-av FNJK(|v25"Xs`@l.J6cpN-? 茕9%7hvOhsD(HK3xD{lQ܌M-n#!p곱/q^ `\CV^BG{ ʖJlrư /OwRpuJ4[ݟ6GW`QnwuZwl@Z$mTHU-* i+ ĕ8cu_#~:D T$I Yr$Qd.0I L_ic}1յ y WEqC Yj,]F?n27 vp=Z9( }`Xh= (8G#qA/`—w!e447i)jHħ:ng.\2S [jEͶ;·yaX!Eg6,^ =x8l%/՛#1!Hc+Jp^)-:N2psXKO Xf43 xo˂+ *m7y^87/ $73pz?#!) ET&F џUYxඐ%7@ڗ$BA|CC 竦ݒ.I*;GH)Jr}UWT:MAᅯJ'ˡ w0TJM.s js2z k F ׀m c:h0qe&`aъ+p%A.D 5}Oe+>qte:8t"c7_}j>ф&:xFVSkXP qVE;E銳rٿ(>/vD>ȽHP;,o7ȘC"\24Yf,R>/P1SpoeO:AĘeIZya ':C7 /1(;bG!S(d#[5e-.3%: Z7'ܒQ]!( 3{73*{Hg]C3w >)gx[ Eo Mp( P4Q )2MG7Ky UΩkod9Yym̂C\"w/ISj'k@;{K} ]; W5w=F .!t}0wsU~o݊T Fufdܷ: oo,;n#- 6Y7'+/,kҤ 亪~WSw6q=rT2"2L)^/EMТ<L/nǍ-+xs߇>Ŝ;C%r{WGM!dbw~# M/#j'в}v{1e( 􀚿`9;.ޠƬi|u4ZͦEᰀF.EedDJ6F;~Y˝zlǞi!ii Bbh_l'w?bҠ3*݈cTQ Z_wL8Ẑ-O?{ҥ[18mF r0m>\!g|.O;u [vgvʑ jM g{6?ۓ\=g-ڀ>. w gw&5z*'BoE F>yҲt"UGdKn"id"{Ԟ9y 5y );zN:2-wB|A7f_GX;oW=N:yП%%EzmBuPNxDUƋL_&5=pБNQm8]e?,X_ }gqQZ$mm!3·EC.{$fyQ&SPzeX{7 &; 7?3LAneB.).f3z-4N&YO*G_W ͤ894/~o|~Ycc.oBͯ΄Q'694@tBE).YqfF7II /Bͱ?J;>ɏ8# $? }z y+Vz@7 5\{ْEpp:<6<P3d\a yp`"WLo睕iL+'u'?}n|>R/L@+95O;_([*O}}mFXr z{e7}T&/J1(_ FC[Px~f>F_BT63%^JV}=*Gזe{겋;5VQdȧqQo.qƛ{rhagb.(ܟsFa.){PZI@mr"6|wӽŻ <7; Md~r@8 'C%yݦA6 #+a.Nw'hˆ]#_f&=kG1l?jLd´܄ )|uKe;yN&΅ME[Qs 5=(:i$\Y Oi`V9vaT='?c_kP<#[e%|O7FvC[zҏ湙&Qpnr"-;HUn\we! Xt̅zVFF crk43rѨ'.mzWKW;#JW1`-[痀}O(E eij7p7C[0,\ݻ ሺV7Zυ{S[, ?̨m@p-8t6רog zjEE  &*s*ݳZ|j{TJn`{4H`G]J$Kf. %7?bCPG`^ 6/ "-,1YgUw1zHmQQXI΢Da} A|@0yo\资il汫]͟iBsC 芥M=yC#?5W&#\H<.&R'BJ\_\Z3R 6ı=AOܢ[0u9洛-8Dr`ԓB /מj}-#N 󾌠qeEg_;- ^ᢪ`I(=< Z,Y~:l~ 8 F(VG{FIei)Bw_V\B KP#c.^nCˏ!(cOg)ֶ"8%WdDI%ȪC\ҍzt,{#q ~@9"+e0?8 ki1/ Otc6ĉr5'55hU}Ȣ+Fl=)tu} -j?$% On*0axMt K<o??\MdQ|_?ћ?}w?fO?/|!'X?y>_Ф_?}G?k%q=T_٠}Ee endstream endobj 437 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 441 0 obj << /Length 1244 /Filter /FlateDecode >> stream xڭWY6~_!yWI&i Eߚ} ^3-%yA @5VIYg y%{цg Z&|ZA/6iB{:);{ -ƾuO0YS_ zKXt+ !3"/g@m Hև Od>X[d#S=e:4 ʌ9 Vr*<&*(Ӆs,KNؔn=j9%@ n[;ujTa8Zp "$Y|J %ga[v=rFFxNRCW69č:Qw.6kx+ȇmujXvS`Vy枏){[' kBW. QQ 4SpVJ)(J@I[=vzd$#1|Q]3eό1bݹv(ڎj4UQY Ll" XϢ(a[ 4 Z>¤c'&˳ HkU":?°.u,W ڣ?+ EcEA\Ҁ jDw*sS5'K5'>j~(7*Elyܬ|QA_1_aKx;-?@G9D-1c0 iU˺'=8…' p|ڪB$)DLJ? endstream endobj 421 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpkG7Hnk/Rbuild688f6c73bb66/spatstat/vignettes/datasets-051.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 442 0 R /BBox [0 0 864 432] /Resources << /ProcSet [ /PDF /Text ] /Font << /F1 443 0 R/F2 444 0 R/F3 445 0 R>> /ExtGState << >>/ColorSpace << /sRGB 446 0 R >>>> /Length 1198 /Filter /FlateDecode >> stream xYn]7W⣌$ptv AgW2 v)K:{Hgggy\)7k.WxQ=Zm~0\N]g).QY)xsoh[ZOw?ܟmoo*ofiQq7ˏrY+n'A=Yѕ]fOoOWGNr/V[;}сubmº/ͤ@&/oë.}5h}(>FZ,f F,潊^ > ]::'W!ڸ 3 q{mR1t.V8@8H2ZB5CSC贷*smkL[ ::)ccS2r4(7usp+NAgY~./%" _p XgC}e=W\&9]j#FO@'T|H5|Ȃk`rтJL^ :qs:E 4q3]J$/(k2a[TX&@^DJ,|X1]*e0Q Am dfu[ioiiwfb2{TZ6Y^FG0¼ey2E*͉5)O#RN59s/Ka,KjP~e'0ȱ!pt=nNH8 OTDON`g$LyN6CM9} pmw7bptb/T$l-  k.:5z駮e!{t ]Ƶ~st7$u3)!Cꘔq>`LhpP)L=wLag7(@`c &<2ws&hAX%VG}<|oХTN95 t s7nhMHk}*Au2PZx@E3-Qh &tqiD"'k ejT4(hjZ@Bw@v_[%}r7V5myˤ ֔j; gQc=g{"<_~<{ܧ{/׭ނ)ЀGgjH<>il-{X}rgOR_ endstream endobj 448 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 438 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpkG7Hnk/Rbuild688f6c73bb66/spatstat/vignettes/datasets-052.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 449 0 R /BBox [0 0 864 432] /Resources << /ProcSet [ /PDF /Text ] /Font << /F1 450 0 R/F2 451 0 R/F3 452 0 R>> /ExtGState << >>/ColorSpace << /sRGB 453 0 R >>>> /Length 11523 /Filter /FlateDecode >> stream xK-qW1` `2/@>,҆azڷ8;]rwUZۗ/^SziFӯ<^Gyӿ?|ۗ͟_6/]C#~WKb_~^~O߽??/ZK.՗U_sһ?zڛg//?_no_U9o=$~G_?/8/1/_/k_?W=cw??eSwJ/?饽v[k>? 4njZuzUx3+xY_8u wy-5y{ş.5.`쳱YkzdQG~-`hd59[[uzp2]_L`~>k)culr齏u#kA<36>lzLwXh:E mpxv9d]G+O_kaM<c73M߂+}{hO9х/)B-^؍Ϻ&}>p+:N; z?w lN;ǼF'A>{egZ :+D2/yE9< n|XQ t Kp d{̼96E{6~ {mA㻟bB{'^*aZu`<ɖ9GncNdu &9.A=}TiL Yu8^a G@%uFGj>ύNF_nA'mg:kDWW'PyQʷI n6C|hBtMr\Ys[v0p{˽{\FgqK{Yd]( ]s*w'E-' )QpҜ 1NjSǜG5vIQv!Kt,?<to:$(} nOBH6lsByi;R"]81'ٓM~qHOF.` 0uZ}8 C9^oݎ*9Άm2z`}ч N$Sk1:2ass~D.ę8byټjQ׉wq+B|n;C;qzaVg5ݻ|x16}]_ëT\)"=dͧ.(\q~x(I L+ی,(<VOlfڹPA#O o&|8Cq&ҩ&#K+ Pr5Cn҇0D]=!| _d303[ *̈f-㽉.8{F[hthAI;(9g#srW!Qv1PSjtfb3ţ:SV]V]R62myQ;/D/U': uiGN05yV*@Sw\hev#:n% Nժ[ _vī}]<0vp%x\ȫWl3Rm!pDŚ!Ȏ 0Cf!W,1@Kj4CȕqZ6 (-XT}ABN_<_9v>] 7p9̘fqf?ΓcEn!`WfQ{g+9YZZ) 'Y3]NnXYq ͵ NHё[. !1'^b<~ǘTm zfHtktd^\`En)-{c;ER)E+ ;Q Vw6z$<pp ٝQ(t{*xd.0 [lgb+Cϋb+Cne~62Tj~VٹJ0 ?iǸjC*.;FiVHM&]OHpx?;afFޏ*s>* B cVgh]^QP&1uBṠvF'd8+=mZ6E'a<<gj]y-cgw[>yYQZ"faO[f/\L>Eyͧ:OS:O߻igTT]=9*F5v%ޗSh}n eP.-"DOW&STn.{"EПvH[nk^ep̉՚{t|v5]洔 VniJgmeW&vw[+=90XD E5JrEwXtiAބxL@pfy\KgPF훖% [r>Zj>n[rh݂2p7ЊN@>o] V}:&J3T:/ !. h_mZdt+HA9 P"`X͡+3>%)`#ݖq-A&{O&0cQU G]YZRˑתWZr:~ފώ<ի*2rֲGG}do4 ^ ~%h=mvцGgөs~h𻌎 <~SMMOgwOM=5^=1nC=5o7V9}?P3+׫Ɛr_6b(nv4R ؛U= $]+sd6dy%GaR1I˙M݁"yx7~^6([qA3ݣRoәTXs|'7l‰foz+>3D{+B[ɜѯ^^x~0ns vɘJZN0]> D+e/戹{TDA2jt],n y=|f:1n3rj%ab:=Bm @(8QˮJE'7] _6Wa*jݣ%o,TSߩ1ڰso3Y[&$P: ]EVM,9Tk(W9+)Xjq*Y"―QbgN;Vn ,iǩgPh ']VmO-лZpqpjWlnv'wkw?\"ȁB`.B:"P; R3G'X A-jW"کiVw%\8E`+wOAc(2e^0LU-reĮYNLmi]T冠#%Vß^Lω{sJlyŧw7%GE řIފƚA$~u?* ^s*o& c/~5J^G틚>VWٛo3`ɲ 0BS hG5W}+LGlF%@xwy㎣jWpՅ`+f_hV8E{P吴پT| $8$ g ZI %:;0>0%|/7ho+.o7,5YO"0~a*;7~%|4RE )3n]ɕ\9F` 7EftLӂH(.0J(T؈}%7T@SCPC 2Y$u_si!bT"2LW{5(Zܮ&\Ycu޵ȤсyȚT%_RA/MWz˚ ;ӇJ CHObTXb-c#DˬL~Kr ]'1ЪNg.ʚeB>UqraAd=t"{LMuߚs$MQ)gP&CHc"b'̓4mM=9%Z;EJ5$#yu!Bz'4!4Yiѓ7}*HPNw8һ${Βrz=BK~wwtz۽pINZ@"|ttOGOHJHr+ hGmV樝(q SN*LFB]͗[dNW .;uݦ?Ρ;^77JDm2#THOVz@$:bƎK-4zI)AF6kDZ%Z2kZ|NxtJd !ɟ3Yot\8/R4]v\aMJ{+ߢ巢\ԋ+ S5u8HfW[wVh #z-cBux S6A'm Taw$ oݳCR]30T4p˜|cK9v}N59@:$^tZSWd~wC[*G2]38.9j{UXgRl!:K\K&єSlƂbAx"Ѫ #2ڰF3+a-(JXPn 5,|&&zLb;Ř|K#v1EF/ʕ F.fL2E|W3t#W:SYբGcƢ ݣZ=VG"si,'5ʔ$wa+Ls5+;Xr|Eߐ8%D" 4ኚFhk-Ѥ "L iVԝ ou Yic÷u$-AZ-&s\bz%VciN ]%n׶Ug *.U' *_TTxz $كtCSs u 60& d(FqWASZuޝK*Y!9=vʢdaWg"c|nMWs%b5;jkha7:D7TCtVs=EE8B˦E1Wa3}!Y 5U{ntڴ,2AhB'h}3ٜ$<0)i˸#29J;R`dg=PSkCQ̓>}w G2њ74qis:j]ra@ۗlK0ɼa&x+ ^uTIGG_؈2G`*Cr+C|PW,Z"VvvLg7&`Z~g)?.<iرLUҚBA == 5ՙ BjHUCY*ae+z1kbkϏ}Q[3:.CDo@^ 4ZLik|uսQ& xC@R2 xݿQv;[Ku#$r ɒ@%MG$\q=.sOdN4M(QA ?|òa+]IRcJUDDCVcW&$|QnFʻF8S2$RvW=U#ͷ!r ꬫ믗{=r{}?b+#{,l{wuwXՈ_('kClQ%i붭X&!7Ycg&Xp/fkVYW2u1% TLc֜2w)M[W|[&ٺc-,Pxr$3ԒzhȚyYnB1{]Bp.cgdπbq*@3 5eKZckZ<}Cnq:7)2@fnXPTt=)~ ԝXB\h3w!>nVi')>R&0ȿKO>n;4prK,M?%r?jY1=^"G4tK4U/?Hs0D];ɣQUClHF9xwqM,u[K88V25BQrg k "E>jk.,(`yRPBivTrL%JS R50R6 QPVA-TϜJhLqP"=F'q=t lwh 4{N/2WؒmaS8&꯻xĈj!z916>{ү҃ O@n $~3=yg҃ZWO݆,DU9̪TmN|HE&F?֎ a:BJqbktP9k 0l Ӓ?/:$!oMĝUX:_/a'F֗.;Mꪨj==wd}ɰߡ^*~8nyI&PPS-PdWG_Q2ou 1qǭgS'aӪs4G: ?S.|3IlG3D]љ}!ދ6ö%*2Jt9sIN |vHIJNzJ1:霪5Վs1O91j-YI 2vg)g^j'V >?hؽ)[R&~]fp3E-Qi ['3 Lmt?ۼt/ooɼ[E.ow?mJh&RzG8)t'OtITl-NJs}Ƚ!0ۛ d?/w qs GR“͹DD)oqIkNyV&6Lk!CCK >oм;BWo#͆?sY: 9O}UD|2zwA`UϺy6[5\I1C@;] =>:NjjT{ZYF aAlfY" V Rx=L|.w!)݀)g3FĐĠ'TI3*@W)yx3֬FNU)==R }>&#Fɨm Ꝿ#QU(&!:LjNEHXuA%`ug9z2ONܟ=L&)};MuLPb. ͮ%r4 b T͞xg w>xKѰ§ 5smOIr=%ǚBV֛_*3J6SIθf,- 7C-T3-Q^{i婜FETJvt|Ǵ7QNWȬ%Je9}dRL\7stNj̃H JFLJET-pj3'mF+>V%b~Ͻxgy)Y0xoy+S\-\qۡ;~!9$좭9[^$w&ϳfOS4|R8N"HjV jQqKƶ+"JUs|ey"k^j-5Ri] c7 Tv;~6i0ԌN-Deq{.5jm S̑Rm(BNد:=e 0-ON2CwpO d-tyH3:C m(D=زnlaֆKVc )Y g;HK-. ڑ?e 낪&Q+g*ky[aV ×K endstream endobj 455 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 461 0 obj << /Length 936 /Filter /FlateDecode >> stream xڭVo@ ~_!w$0='ChQim?rlgh:9Γ6Sg&ˍ [V5piv1=9L_VJ.6*|ngi~LXbplי.j]#6a*\堼cņV7t<<S^Q4{ \ksBdU*=̚&+-V5Pp`:zCn %&)R^7O\(,ϹtoCgs.`).@`ɴlQݑ6O`O&7}BFƻ}+u.*Q]!-ݥؙ]A;́MOKw2 -nCז ws ˘0y4V$@b,dQ 0U{+T%'g?L I=>7>4#gNTj;prˡ8W=䊫p˻`şwJ?-0@Di-#ޣ~b?<'Z&<4j>ȹiF]~hi endstream endobj 456 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpkG7Hnk/Rbuild688f6c73bb66/spatstat/vignettes/datasets-053.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 463 0 R /BBox [0 0 864 432] /Resources << /ProcSet [ /PDF /Text ] /Font << /F1 464 0 R/F3 465 0 R>> /ExtGState << >>/ColorSpace << /sRGB 466 0 R >>>> /Length 1052 /Filter /FlateDecode >> stream xYnG+:$RW*6@d.@P$JAI0]5K 87uWt8ݧyӒ)^^>}[ӗFݛtbNƃZK*t_~9Ag_?\=~N\Տ$9]}~w1nq-kB=_?ҷiϻ ܾJ|r[8«R/bT~]*wm>o-K֨CAWH%ΰ*_<L(bR]BQu' ܄p)^ <"pR23CSnb eKSb w&IjfGHĕ*m6z7d|4\LIP^.!˰RYq<r3Cq?l~(?XY iSF$Cc´t4psSH;pem՜i3{pX; 7R\sǒnf (}pXvW׸W{uwnfWRnq ZZG˥[0cN>|hϳK첖Ay zWˉs"7c}F!RFeG p/ýwT>/G ~ʗoܥ#hu^Gd Gmw)hT-5gAFǼvb;ը#ug nB ۖs^n(\Yhmgxd-uךyY" Ф/N#~> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 457 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpkG7Hnk/Rbuild688f6c73bb66/spatstat/vignettes/datasets-054.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 469 0 R /BBox [0 0 864 432] /Resources << /ProcSet [ /PDF /Text ] /Font << /F1 470 0 R/F2 471 0 R/F3 472 0 R>> /ExtGState << >>/ColorSpace << /sRGB 473 0 R >>>> /Length 1844 /Filter /FlateDecode >> stream x[nTGW{t=%Q)x, d P>!VBg|8ugI'<9$>J./_Ok$/"e]:K<9._ui{BǛWNG穕tvJ7WIj:|2]e:>`m^a5^,nɧɋ?⯽χ/SII J8ltC[.K/o^X"Z&7./u,4Y4z ſ7Z1 ;#. V۲w(IݭѴ[-ng,FKw5܆[R2Usͳ1vG$MᦔWUmv+bf_[ ˅Y#[f 0ڦ :ǎ{(c)m~i[*;T?)MWBQfQ1 vevw#CF֥QL;b<|8čUIԫ))kIf9Í<9աAŊ[ܽҌWTճCd!TgV@ceAht .<*.V% <ԙ6xo0nKdø#D+aJY|ޝ  q1 3,Zػ*xph$vCO%)=Z(7="Nj/LȄ͘h YRkB;hP&bgNn[Yvf&0qZEjyJ96Ȑ"aO{~GtB1( e{|-Hk{7__ON~9]}_2O endstream endobj 328 0 obj << /Type /ObjStm /N 100 /First 892 /Length 1383 /Filter /FlateDecode >> stream xZKoFW1>4}FnI5|V"] oD!:Q-h# vwf.qB k tx& AXK9 ^xrx3HܞD2EaAB[KD|&]B;V&L<[0JSaF Q!$0Z 7 $L4P0X`2 9Lc#ߖ4)pOyl`'њ4AΌ)V%IWHLHЊ3'h%]J/F㜉) g !@o1RT0Gc ] #F|E!!/#iBCy%pqC!X |V ` FAx HS>%IUA1.DޢBJT*yQklT H ؠ ;}C1-b' ?br,E#&Y$ĥft&rȆ"9Vw? \"5%DIVpq2&ȕEQڐ88SqxXw]f!_MWȷ˳ɍ?33|^.2<* }S藫og϶5iM~NȗGG2Pz6'k+}K|,C k U'M@]\]!r|jy["_@U'Nouy yp}y䛼hs)Sh-Wℋ屾N1MwYG<]6eœoڃ̵;ըxF0//BZvj{Szj>r?^o1jzVv=!Г' LG!0X"uZ™t7buvXފXXMx5Ǣ"|]Ċ}[ #ދXfioNĺG(<қ_8r c[3`1Hw"mՊ!=ĝ2 d-ћћHEb_!'7̲N?,i2 waR'|?#mefw}h;׆mƽܗ_p! e7|bYӝ3Yv>VXsfx}lIuEj "!j'N%6zuHW4xIu9b Wa  6uC+v:$n;&o$V? P endstream endobj 476 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 481 0 obj << /Length 528 /Filter /FlateDecode >> stream xڕTKo@ +FQÁyJ}joU=Lۢ.K z> /ExtGState << >>/ColorSpace << /sRGB 485 0 R >>>> /Length 7260 /Filter /FlateDecode >> stream xIeqWܥyJ0  xax!LHD.y_C$Zj#P95U}׏?GYʧۯ?_?/ۯ/zQKx>w{||>;}sʳ~w(N;|S"|s=o(|~hc=ymϣgϯ?/_<>|XV%Obo>$7_s< YӇ[M㇟WvcOyW<}jآy՟+`GS |ǂ¡]wsxx+Uˑ@G&A3z oy1gpf{轰Ƕ{,&sƺp1%ng]Ȍ ӻ"v~toX#7hb'/oL]+8gi냅;Ӻ/?:2 o2ru {x-.^~/MWhKML|ĴȿkE>h}g#KxvYh%j9,4u}gEXW@.ìM|R.2KXW_/::k27.g9+@.򸾛) v>]b |uDRamjuÆfu|ӏw]uWlJ2'1jd,:瑗^\ 8m\rǒqr'r*؉F) \PcÛJUɶ sq(0ӑ90%BB}DEdγM{mIۗZN*.]7s D'94uvpIQI]E{t#(h d.; 尹i]". VqɞXwcV,4dbČ|wU'okT8:BѶvnBQ:Fh &]*y3ٺXX)Jڴ'ye3pYpG2RX .NR~\QQQyXQV.):2|>ۺzhķ2aRPӞ!ppRS8 n>=h3$J۔N(CGJQtI|Z8yr#k<(oYbZQ>j_gFz~~w)w{~_'5/~hyoZf`,OMӱK",톱ROo1$U|)^1 D^^Ootu`zy嫒r4Uuf M}p2 ,ǰ8'%9͸vVUH6kvPt=62F>UϣMvTFfelM%RjG] =?!QWV=6j(^4z@Ȧ&^DvwC=#7dǍe":g' e KÈxkކ򑐧:1N-Gg@AzDf-[+ofw6#4q+Wxo'^HOž=)zv\@aGPyc:SJKRzݓɀ.?L! *$ȑqF2vp#Ln#RSP65ZjVi))C>n9-Wf6 t$jCyulm_xmyRڒ4"RNM^b$B˛(WA4Y l*+(*FEfqe A3u?3 ۽ 2?#K4E1Y0N/?Ի#gjh&JeU ?^áVD&/"zjA![똞W#&qb2UJDzr5֒{wr:Z "鯜zGzb+DS])rWlg+3'c4)ôєˊ\V|_Ka6 mEKJ4xxPWȴyI.Z޶} h_ CZ7ށv:ކ`mDU%e6N<a[J:lضZ+#3Vbc!+S#a$k7fwj=l[뚬m3\J:#l#/NQ%qP(XzZXO?c"*b * 6ڋj}B!$MBLc> JpJq;0k9,9&hD!:F%(#kP3Dn0Vu>2f$t^`M),D;@)1aUMg=I"o}eQ;OECv7k׶.1Y+!C}b!HYT^hHj%[ e%z"vkXaǼ }sQ˙֒UKܽ`mA|\ eQѻ4p& V wY0s hU=z]U"˭R@@.+HүA\"{D%-ztIRwe]˱%@eƣ]u軉C0 bE("UڡtV6/(o=4jqZ{1O8kâ#m#Qx{CYT1tHk* ȖE EG+GM+--~CB++~W^I#ղ> #D hʶܺ*GFLUE费4?рp$"9p]SXغi;#~YKHbe}XuVJZv{i EcŮ ^0Z놪+ʞԚ:akԖ~vY4rg>SV+SG |j) Nx#FDKh,lZv-Շmx5T7H~+34¡W]06LF $`q.0{l u~ +@\J5VFЙP5yBt-w]M(xSɮ^ҨLfӞB^I_]}h"I!vdt &ƁON^6ˍ2(lFG| "4 \D's}dc^5"e^c3\3 H*["0Ǹ0rc֢n.MXܞC(*j< #d՘IaͮMK}i%^vBzݸbą a[r1ׅuRw1(Zݝ8ui;:-$~%TˡJ" I輺3dH2=4RBBΓ{! R~nx3tx=F^$\&,![Tu(i5?'+,3dž& Kc=}ŢBHR.jiTFV_ךY@ﱙZ2|ZF LpDZXuLcD%m˝N6CT|A9_{WH Ɂ|?K]G?XDA5v,_`bv  {u50:*⛁ѫdXR\ű3rfKAt46S3oc[]BdծsSD.ί,'PRW>o#uwPVC!#Bwfꛩt@"YJ@ Ym:j19t`yx%OR\SPQQ*dݣZti_bjW!6 JYڳ UHӆ-.P== Y7w>VJB E{_t@ZwtJٵ4szN2heu1]ٜCff#^]4LY};&Ѥk_w'H${llk55y ~kfzw+E.5?vE~w]zXCr, [[ϱ_nT:<2̹ot1 $Rm>}Q(iҔ-䴨N&.4+aTUuvyLM.KQ$ Z(Dy%w<?]bL<)ʳPʐZ PN]-BDGV4!~:%o5ܽJpzz PS>Dflt|*^Jt$ f~y6>3cQH7uT EP*퉌&4~u+º 5_[B}|eKĽd# R Yga< fNgf6z~Oo\Bך V@/Fӱd;cB1H{S-h182&\rǑ%ILܶ%Xi`pSdQ1_ 薉M?YG@ zBAR ru+A`ڪjHJ%H gY)9Fb#׺"ʲ`A۱ۉӢ٫T`3/^M#@ȔDFmO2T8boZyw[4s(_*Iqc_A"e?UO=yj#.Bs_2qzu}ZS;XVRw]] U`L]L[,<t4OctQŷY\ab1y[AV*(qŸeI&qnp1y- RNÏeGAMq7#GΆqX,7?b:lUԩAnS#5вX(+_"mn#I.M~"c\>,"Ot/7gLRYߟ,8'cj1ݵ$b]ao,d\;nomHW-1s9SzVePZV,8Tً. Mu!qDit딦Jb,dgb&+9=\͸ ^jc%uZ,n9ۂ8-*QEï;Ƣf>ټW #HijM[d3-u:W]y*T<Ŷe`P+\$&tb@ϡ]J3C5cIO %F 8 Z5hi.1˩6>B sh+C՞0$mk)ݸTs}7hMG*zfʃJdW|ɖpֹgPlM6(ܮȼfOJ-Do[UYFE %/Wv&6KVZ&K$7yˁ2Ŭ4ߌ`99d`(,\$";&xyà(Lt}†~7WY2KSܺ#xk:bbDFrQnV $B-̀ -\_@8^2 BtV 2ӸլĐ؝Y{ђ*DصCK8 !j ɶ?LTpQ6"gJz7 > stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 477 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpkG7Hnk/Rbuild688f6c73bb66/spatstat/vignettes/datasets-056.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 488 0 R /BBox [0 0 864 432] /Resources << /ProcSet [ /PDF /Text ] /Font << /F2 489 0 R/F3 490 0 R>> /ExtGState << >>/ColorSpace << /sRGB 491 0 R >>>> /Length 26837 /Filter /FlateDecode >> stream x-I7ϧ8CrcJB"@@n"!͆z{>lque?~u:篭_OKk?Z>x/Gѿ:|]G/Fh{Z|׽=$opp~m~vW}|Fo*׎xCok>o)rӶg7<Ћ}+P M 2~yU֯ n5|Tj_> ~u M9^^gׂ4' ߳$?۶]q,D@mkYeZ۶_d_~'^YU|ͅS' |Y*<3gF9L_޼mEh@ҮJ _}=yl0WAJ>=7Svfc|J&>]23oJ3Bjo{p[;?47}|6cmoFhkPuqc=MVTgW1\/~RY*t\=3Y&W0B#t@ov|lW>TE?. `=>G;^F\x\ 443`v ,1yElWY+{Avf>KV{͘jMcǶ cDxv;O$3`1^[Ӥљ-P{|`{vg cxn0=m|" wCS]N$}V/~vh_{jK~LG;XgSlزbN>r=#~gcw.:y}@@ǁ{| 3ԁ^7=swxf׫7OZeM.LX:?g~kY gV'{v~Z-{ҡx7GC`I,<~+\ |N.;m456P+}4UmlK<i!zIя6 s 3ے9YP-lo㜂_+0 ^6/{5zشW۾ zv巕] jRYzQiCCuf _ӹ Lj:<*>O8Mϕ({:HT/X/ܩ rY'&ijt&Ɲ(V֥Ah _vi71W!3hG']'\~O?K|f}aa]WZd[xY1 =ϱ߻*'Lr{AbĐr[=|xjl'̬ٚ&5+xxC>m3~g&(6޲靚il5yO=ֶ}OP/` y`>3F(i瓁igjBϞ[J p&Nvщ$ppv<㶏66``p sWǾś|Bںwӂ_vzSQ`u|l ЖʥU\ge(~ 6[3R! P&1/$iOplrplܩ?k%kejn\ ?Wҹb rk+@[ԔAk }@=g *FDl ӂTl6y&x'AE@TRbW IJ<ߓ_6 ( 2to9Nt_ŎxCzZ=9&"PfB0!y0 < HRX+ldaJ\S؜02 2+fZv9kD?lxvM`d6d:$#ك#뭛Qd{rVJ3WV{č[:\n&ݩ/޲^3Q*@%`Zy ^b-um6\r˗#N -v|RR(˜%hjqBaIJI,NqOh~'$)&=j& x:v'$;=-vX/NuKB\Iꨀx0kxH3|6g"g  h$UV#DvOUT! <,3 ?C@8m'sZ1Qq{ ؑ|iJ&f$e@HYڪO0- :_6hdښE SkekxF4} q#e:9X}:5`:Np}Ԫ62ֿ( v Cp M;:PHs'r}y ۉUZx`f=XiIdA75_u^ W{:՘2U6TL :覉: WΕJ@Jp_L ? 0M4/RKfr\d'LO횐 i/Ge}+EVF !Uh9Kw!gfpNu> ـBʋ7.*Û1dIJH&3A_Ū4qѷ`Ql,Չl|m:n5iO94 AS0}gYRfYbTp? ].NWdO[LNGvMZe'&ЄuL[*6DU4d2 +=K¥xK,㊛7ZM4֓VkI_E󞥐!:B7+q0 um4= u!| (OJsfZt Gr` [f_E*~.}c"' Y\!)Czof"c옓΄srG7냂)Fp8^ehƅSJq4X*m-^=J<{й2Ǡ sg +e` Hg055)4Nq+|R9W(R8:$9FiR[\;ӌ_?pJ'Zâ_jff0h #Ѭk2VA~v}+x,ItSq_ ERxc~^ﺒ^tֹO|K!-̞abNG20S=HpX5c[RV[3;6'0[X TU-\1>e@>dй24 q`)`N=)AK-^&ݾ@Ceۻ XƂiND}(dp9 )2C2Yǧ)/ GkP._Aϋ )m2![ijr4Rj>nf xjq{j  "CD+i9߳pCd)lt\Vdh 8j)` ;ԡjMFbR-5ۇ(྿ &GJ'FF[3.rGNpWFL`Sz&|o j$ f౎.3{] H%~3Ɋ28C%Ox'%х[ RtWY=쌻=ߟ7As=MPOI7ȒŲ ּ7<ж,xXlPO}U`}]SbWX}Ͼ"\>X\ǙqI:&+4`g:O$Rʎi"crv",qR}GjcELpa#W~vn Ag)D{4Zۓ/7ACQn.CF&Oȍ)^?tta Lsb50izD{ f|s9xd_d7< t,Cѩ׼DHKAv*.FE]]MѢ'L%XG+ ]J^dB@p:4% Đx5.s?k[Qa SP(PTg:C&,L*ڀ+2`<傎EOU-s`΢) zܕ rj#TO$$]/}իL d,j\c! VyooCs~sY{adp澷l%5O$dp0rb8Vذkr'i+aRʦi6)0 c55}(;ek0Sbb[FN;5f*XTkLZE6ѼMD„jZ(%MXc✼>=>1x:%E%X-В  b{rx˒j\);&X>k{6FKt" rXQ${{YwvjqIP~3j,Q`/.cƭK@.7<_nZG ˹Jb{rKHދqjx F2YXXlP\l[ V<5ro4@濴m]w*_!+PNT01XszaxLTȯX'@vKnS17q P.Ҥ⺭@--alZ[JZAu_p6 m T$u 8wd G QyVq ]u[iS0mQh c R8x'ҟC. PIVsIͶQ`cWԆͪǟ,;{Õk3Q{h;B^-zPV 3'sPIiPɔ)/k78*uSD}#QE+a3',}D^*[m&P0|Uӕ#DܷQ!~{N 3@hٙ Z{|wZF{g:kuJ;D '_,"`n6,38lC8fSXBP;T{tmpJŧf*=WN3Huh-C y齟 Vi)+q[mB,F><=&sZyh*VW_rb%w|dXD辨V~>a>R#Wn]WkJG/Pd\;jn5/[)/n߄gQ1z„3riRc#xn$0 g _ϙ+Ac 6izP<β 0ו TjpTr"psRn]S/tw{{0a+,jCF.+ǰ`;7GFԛ ~87vX=ԟU1j@(P\OPVtrR0kA/!L TzU.k*@ceRіEMo:}&5c33tosaPmpRј8^$I_2J:EH!ZEGˤεJ:a+úAmJ, <._HTYV`I쓑ok~QAEq+aw`E^hPEg^¨}y:{( JD`SL 7a- *iaW܉N[;`&yAA/KN{9x0ftoSR1D۷kYV:h5ɱ+˥%26`V6H˜:)-iJOCR}Bů+ڏ5>!8pCCےa@'&^Vvr̊6UM'b@ wK4l)EӶ\O8;/XtXT%P :iO{&VdĈLZ30{i?01d2W(T"FL$E)\_:ٮav hE c's L]ܤįqre TPe_:+bvaG:rN0F?]`ʹT)*+L˃\Y3)TC9]x(W03A쾳f:Mj!PWP/Da@&9t!k7Y>&y#*qiuLEظB;3Ƿ6>i_eq=5VvTGZ2L 0kUf3㾢0ȅStK-1È$5FHr<ܼR0l&#ۨ!(&C0*F'+UlԁyMELyp9#ao  E802(oݔ7%ʡ6֢ l[7PijZczDP؋C#gwGjIi+ic)bP \y@/II{miYњ>"WtIEM=_ՀԴ1*S2&H}w#HixH{*7v 2}|:@T0@# >HoB M[{^G=i+ece- +u` U -/~g`=*y2D-< 03:(驝\AM`Iiym:]h3֋{ lRB}ϒf)>{A;:jڶ$9|;kmKP 3փ$woћ{26"FFú/&P@ijizOI"ZvQ}@8g\CX Ev-Vɺq ҵB?Lݣ=R+P1Ls/bX"906+^2c'kVQmEh(ª+ejD}{2Ǵ)d<++tHsoPkvp#{ĪḢ:! =46aܳUU:c W^HKRgw%cMS-{n%1y;fL;iuf0ۢ^>xfrW&^)HϴEjhK '߰͜! NrLi!WRv!!@9w7Qq-3xō%3WX %U-7bZjaM/h[%!> *y U5-b62/E5>4f( D!F|eM" )3͢ (H"izMNXq?)dw`>9%PD 4$Ȼօy2ʞ6ZvK$JN28 {N"bҐ=,(bEV)[.WX,ue^n DRrIT`ЂZj&&L TYmu-E4ܒtfT'5i̪Fի88e,G͑lB;B-1LBXHoWB>.墴LR{ ~r}J|"RP0< !PMͩ =A.PAfa0xV5 ?v0ၜgWyW]H.-v֑iY>u+14mmoG 7Emf oOR߂R7v Z+Pf:˭&h;U {~Rvxυ̎@A"ՎY&PHjTXlP )f}:%jFcnIL=5$"xp8WtbZrA"7Z2Q8=O;0G_`OZ,QOfiz(SiLfB T.Z ۅ;(-M_K'^]+E|RF:Eʌ˦uk7ݻE5Gf&ʪّ+KJ56O!M󴌼l0k ( ػS0d`'y+W-%3Yr`W,^ 3Lרs#{K%Y7ҳoEvBU~ç_.mkk0K,՗Ө[G6=ة`itu kBVG_>V.ڴw/|Q+yzR$c'ޔv63NVu*6BA:B,P=|N˙M 4 U~%Ls.^6j}6phf7gEpPY ,s3i$<0MHpxȴ@1.Z'pUQ1l#@,$HޘNA| U jEeaLO~l(8#pNSU),$DfSQ@ vOM 0 RUi=I`҉NZB|AUYbhr!~2dZdn̤p >glkB fDl m-,%$PxDUx/NiJ+e /Qa$bO~Nπ4v" l_6f&&d6I+uVc:ͯѹtW-S`sЄa\F3M.x;- FK@y-mxY7n:V>S tT,H,S;Mhi:/[t:( Թ:del&uQ h޶_4-N:ð_I1RN]I3X.lVk*͚ܪ<`$"ʒ:klel%PQ_̽^x q)IߣS_dr5ߵ6T+[I]]B0g%ps@Q+q܏'g+N¯Rڷx?t۪`(t,$[8,w9`pfͿAqw7UYګ+ms0ʙVh`{!7 I3Q )D^ڟOa#HZp3|~XU—`4$w1.\9@V78=+W-OタԾ|!&bUvq(JP0ō]eeG S0Xz[W;a,*f3iA@ԩ,ĎJEl&bF8mj' iD`:I[s 8g/2#ÈVizHF`tTLxE ise5W&O҆ze ^H,مG5zˌ42Q~1#aiX )Yʤ#9Qq[>WI!x?vG| e+|ˊ\ UfV4d&Ynpql7p+ʫ` *R i7xϑ*R> Vίb97_UB>xȹq&0~$JǦu98OǶ'Vi䒪+U5h;ᗞIG a1 L؃s_ZEȓ#m$5.Slt)1.gF''Ir~c6bu~1H-O{eo=5)HFDa7P޸I[rҴY%=/ߞvB{BҢՋE%d cn7%bqO8`͜>΃"Lq OCA(M OjAM& fC']>/7֡C߳D.: 9h7d`6ugquܑ+J-kdoCJH7kŒٮȒyΥ$%o|%WBnt)0;rI4nngJ_sڿX?Ajw@yWw}g}F VG>`#`ɤ~;F&;/Pbm{N5n0}Tuՠ+ه t>:΃&e 6g+Vz%(pE}Fz3!8ɕ6X\&}<&|[7:,) s5ؿ~xO`_E~9%-*(|vAVQW^o`t3v81C>  h;<5n1i;j;x=;qƒὓHt7X[<=[Y#'Xq[qՍBf _ӤXLT-U6n7ZŻ 8țZ$l/,'G&H8^gULMdXө9ב"Nݰ3x۝7'ೋO t)5594xl}^ /< dgYxMm7ܤ/Li0_ @}q+`F$`u6c@Bz2\9 t$sj{|ڙ+Il;hpX؝Jʾg_٧}2bo'hS,*pG: $hߎ_T&eڵzxFZ @H8di~$e]Bۢv@\{%vlΝDI%F9 &jNx 6$i8n'qշDb  R@~tt#1^wep|#=7`a:oog3XA;+x FU5cڋO;j ½:m[ -H ҝ ~+ 0Bbb<l>!p4IEŃw t+d"M#p@G5'|bFC. S{%6xx0B:%I&cg A * OWTP3ySVoYR)t'iE Nj ε-'gN Sa9tc*{ޞD+XǗ2mZ7TᅵwzbaXl&owDLz=aGzNH IEc_0RVxmE I 5ߙ8H[>1.eʕ9DZGfXQRJ 8M犈Y\R#109+p8Ӻf%[s;&:ʆ]ʕ %SE#bD5;l, Nmki%U2t+4ݽi Cm16bhc{!+X 32=үT$cFDHs0s(pL|2w77VF$lB,0q%~ ~tTFA`K}vPICX-bT+Y&=ae4Mڧ(Yo5g,OfCTj0Aw=@wUVSq׬ٌ7N5=58%7&=>QZmǗOکi5@|09K>F'?AXc'HnUޞv߾ߠI}/Za8B [Z <2^GsJ(FyAu9L^Z ['}D6CWb5f|N%V Ѐ#:27K{҅ڹs VTB20&uhNb~@dMrc#ޅ^ ?1F<`Xwb rt+ JY24Tk49t zX#C:!}#_ f*i~:xH x, ~f0n ^W j+p+2WJ7 <*,7fb 1^fu5؝L& ,Pg5hJSN4Ss+i(s%VQ s|J$UGruRof ."+WZ,8x}.[o;e(sd`l2CaxKTR{f߳|]fz{OV uFh ᑍ/=AX\{k~\{ll$P4+B]@ʚ\T'-gCwEcJi;X?OqSS# ka!ǦJ3WUdBcbRCPr% (p9WO7iwWvi}J{Z)1^X$'p F#~k"B-WB`Ţ6>Utj%h_I4>DG Sk~_cLNS7e®?Ao$8V#J-?t:{_e!W yˑ:=UoL P} am!T ؽ{$sDDWJ@+zO_ǏuT䈰Ba"_y<@o^ۆ|74jC+dK4Il|xDB^xhʧ(LfMbe9͛4TwxyMN퍂o"Ȧ08!SgyZ,HI`ɿy-G<"بQ=lQ$;}4|^@<:΋Bs>,tl\ ڙ^zu oKMrMMye#[D,!7S_~&$i;i !cV\[W\Fw;C ~Fj \i8Jǜ@Ir8:a4OF s~!!3g2hN'z.B* ,[N4\"˨ *0"Ws/7:f`WIcM Y0Lq{x$O3 <4`dNh%P3gY6pOEBJtb[vf }NWB[D>#gy~SKp?ľVH>7ӽ6kTV0ww1`N>B8s&~_1v1-/Kޢ^6Ns?ibhG'zӨuBSvQCŠ]C"iV"T 3đMq&uz~$,PMhOPD'ɝHQ'_$ ΣcHFnq4,Sv5’H滻:CV3v$ [(kI%^1w`ImIyp'ϡbׇ'8:c#G9tu"UpX鄥 ,zo؎ &  Ux;UBQUQ(S?> 5Ɗ-!S7 zJl8\7Wϙhd.TnbQފ0BİpN1lK`~)F94 EX{&B%vQ`8%d#gH CiX1NisQ>+ <0@Ĺo@Y^W!0sb!AXT}i";|F#ݲg7kEm#U$@hBqj:F,dhqIF65"&-ǧ'HI93 Lۨ,\%, DUo>mOjp*|'fj%K#c7|tg~8DQrvLT\` ,Lw'g*pVXrRc(@F{<~Dh>^wSwԁ}DcAdS0>zW6&ZN]d\"&pLDgR}MԾS_\ UߟO9r~lf5F mjhYJj< ,Vc#y֭c*!D +,| >= DrMZR*j7z鋚IG6V? %Il9&^[؃7xs=thX({#dCQӴ${ysӴYxp9&C;j@7 v_\gHȖ-jC8(7f{Ɨ&$i0'ݶ\HkN @ ^bs~hlpjM ttNϺQxg2 Ғ/?~믃Vns°ws'L`3Cyoukч1,٢ xc-6?:,kqGhb)#G%_8ǷNY*JC9 آ\P 9griU[t^?ʊ5jd@KO/ڄR)K=aK^/ڐZ1&yi|7[M[Pn5T-N.вim~v#}9vZ#>}ZE M ghNSs-"Vbc)HаGU3Nc0*e[wd$'=m͆XQHIV),RM3|j F^\cHoL+6j= 6Z5[j%9#pPՕ=0nϻBLuD5<*HVlQ}Fzmܭ|'zkjrጰ,a980sOBI~#E0ͮ.f=)rhh{ hnUGW֟(: Y,т\JZBWC|pm8`GRa NU>P@-kǏ갣b̰J4-Q)aȖEkBꭤOZҜf-:KB/FH*; 2%-M;Iqc/Mip J<Ɗ^"c  C >z#:+֯/ 5nk[KJg*ɡ)[IWвqft&9 rJI7V뗙TJ.ÖP g"xAd(r9'p)#\\n/֡ %0ǠaLaFT?GU&ۼe; .,'Ĩ0fEoZ~(ZFNK<ꫴ1aN;aG[}< A-`C- ~;,;PӢ./j^bcc! qo0 }z:9I=Ѽ.է &AAA P+~*8'u U9wbӷqH$0oR.I8r&)9 ִ(PCm[O&f7I  AG`cP¡,)jwTRhI#PD#6) #9 L4F W6d b´.R;:,fZM4&m;^Sa?|x4 #OE^訂5 (PFͰ ؉%xN#kQ4G"˼W엙 t9,+s㢾rH` `"!(+P6ͻT?]<@|l\$)9ojhZN N˼/2BI#̈́>S@70kDK zn+=u' Qzސ莪l+d H D2:QI tC/G-ļy;T=6p5:n=$J%=~K+y'Ӹ4Wn(/1H.*CѧwlŻVɫƄb=2I@4xWrtl92DF;Hxū)iL? U_V.'|/JSn |Ƌ/k-b/" Eg9NO-P^}DoAq;w>i0I|Cz;;-R9>,xQȾL&7)gdszyLB NVf!HRYƹn_~yn{>ϕ#PNT!xUOYEuby j?Z( ٲM̘@j 3CpvlOs=QiRR6FFojOYT%#{QUI3$IpS`KY0}Ǒ`G[|XeaBc@}eM.u }}Q~&|=#Cz6\/ Ձ[ F_%Rލm5yTBK0EjM?>='`Nt>XN:.#+bWC14;k~Mb7|\;DnՐ oݤu =w¾WI *.pJI{&t4,0-_8+))^eJ:qD PB`7vaaR{<$ =TkazXKRzCLU"A@ɷsʳD1͡0M&lR!ibm({}~E6IiQl+YC.L?M7ID.9li)Mh;-}=uafW k yl==ǹ)7V%5ؿqlbA."L#{DhQ »Uh_5w{T T)BФ*ף!yfKݤ#e.rp)gBsD\a{P]:]̡ӯs,>Q_k 尹1{JSRtcf2U0[.F DK>0ZZ#nt28Kq-b %#T4[vC ͝D׆CEUF0pK !D:QsI ZN:>J9Z PoiW@4j[cܜzڅsSN!O#`Fa؊U{w9 tvێ . Eç]pYũ.am ;0 kEx @PBΧJՊ--֏G`uHROk#V,۞>Ωlk c` 3ީ8g _ꤊajx* P" +x d<`2_\B_~7`W;jNYmSX:20$Uy /VLiȓr;;BK $DlV\:0R> O`AFm -lbY`6܄WtL{؃\S\e{܋b)´-i.(_tgT4R4XI"5O 0hd֗,7 <;x}ERF(V~{ 9 ݫhC-V S-1{|DU Sͭ.< X1G\fϋC0 4^=-4 XJ3Ί=;KMnR%g grôJzMaP8mA18'_q tR Zr2>Kٚ(Fx[VtHy5E'@BP .]r3@5|g8T5.J ^4]Q&4桁O'<`VnXHۺwpJ"Eax\JX@ iQ[ParSP0)OM&sw4}thx;%9U{㕚'P UpHJ3n)mv[/\Gj q[ijX" o&5}WdP Vny-嚟_/Q#9`QT}v2 g~nG]dvp8a/:r{Vo[%I\N:zڕ1, 'Gzx9ge57 ϕoӱvXchru+&xF|=컆 ܶM hf^ _aT`EPi8뷑%P&aˮ$T-.B Tj.0S(V;%PE~O[ANYtjo+ExQ?x~d(ʦ43V< - W0ŜiE6Qx T" rbJRl0BӾ"]v6N3-`J('PEN^KicR\ Y`)ld>]68i먾v`ĂD[nhU ucT,hxr: >փ]sl.b[^ =Ȗʎ8:+swirXja/ŕ=QaٯEr_װKx@UUj=#517e)u@ >3aZ*gD])nK%JV"ӛ˽Adݔ0-&`oYTM%7F(UפXi96<niV&na8K~%ǹP0\ZKYeKȵ,g@AdDs߃}7˭@W[]05z1Mw[eļ\r@ӾMFvM; 8 O!p_KвFGgҤ6(ѢVC"4`﹣:uS#mBj cM1]o֝޽5yߩ yZ$.,LC)&F)HTM7ܶE(vYPShbg4:8荿}Mka}IFR& }*dž]ecuJ:K18I 'hw0^JT~86EHB5nœ@NJ˖[Th]ʖFMAl+aPoYw豆ۥu{HaNe6I#͂v1ݤ7Ttz\f;݇"I/'MPX%'(5eƦe{ڞ t &dJ`]vI.Ί ;R#@ii );܍,Yc=PZK7[y(R?Ab*Y9ȱ1M̑=dDx{[6ҖވL wɰW ^ #3Sji+SQ=vz F5+Ci?ď7rv5*`}fQP[ ҕfwr~tUuvu=grA{,/%֯U '&)cEԒjs8mגfJYC;F#pEC5pe8NX7FAsL[% Çۑ+'A gO3w+A;~4 wq͕Á Q\m!5IeԬE0/{}'^RXL(rK.p= ,ےBm2wDJq({pP{!ܦٺH}ݰ Y_5sO7C(b=¯gٶ.j龌RmdoLB+?]a64jw%h|Q]'@z$:M{"ƿ\_bZJ/bu}Vʩ$f=a=RZB1y ^sI۴7S"ɲ&g9oӘ)ǎ_/W??"?W~GVg-oۏ_c g???-?~om?(?__NiG_<򷋛Y,U(ϟϟ?᱓;<'Jt2| _-RH>~O_H|NMg>\?3ώj~wu㴄>'owW>c?G ?엟- ?矝7kDŴ?쬻>J,{w;+yYr{$c;8]>p 6+|_{<~{?>__Eij?Un V^䫝Op/O)f./-sy9rm'*νD? ??oUs板^V$fgLןX5߽w>dzܿAy~˯/#mw=cTPnjAO獎=Vz`y9\Tg^nywn(?o@\;?պ*G4TS힨h V(뎣М;.8uGѡgi_y*._\gtaAKG7g /@ endstream endobj 493 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 498 0 obj << /Length 575 /Filter /FlateDecode >> stream xڭTM0WD"-b+ r4e%ɪ|fqFFN<7o*"?e 7,+9~֙&An~>b@Vё䥈m5?C!-D1$߫MBZ7Ij#I %~&J ,8H5c"s*" 3˃eo_q&D`o%*uI & mfꞈ[N3vGE3 .ܝ[ #pGqK,SeCx! )=P [ xwQVwII)%r^0| DefΫvDKH_l/{ám 8dac;vs39b8UB^6ϋ)eO$E: P'g\W ']gVq?i@#zΉƠdniA+*8ʖBNPȹH2 HB̽ \: endstream endobj 478 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpkG7Hnk/Rbuild688f6c73bb66/spatstat/vignettes/datasets-057.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 499 0 R /BBox [0 0 864 432] /Resources << /ProcSet [ /PDF /Text ] /Font << /F1 500 0 R/F3 501 0 R>> /ExtGState << >>/ColorSpace << /sRGB 502 0 R >>>> /Length 1770 /Filter /FlateDecode >> stream xZM]_1x3AdR RUs "(WYt9]]ݽ*+>~wOo?ߕ7ʗ|||O?r[}9ӇR)O?W?g{iWy)^lcUǼ߾~__Gӧ2k,lܮ֧yTӿ-ߗی9߽xyQ݊Èk|[l8.}D#j7a{jx̺bY.WM0!`:sq}ݤ͝9.=lnC^'-`r#p1XgebVJjf5Mvlxh,Z[.nہ1ŨyD#6GֹyNCm/ ?t >3;JHM7Bq9:CqvrDrQ0àQ`&vVw"$5Č@R3&ɐ|Gnt7Rtǽ)NӝzT(!k Ff݊0sqp]"%גStk \!_-Nו( @i}:~)n bӕ`G%3cI$jUo_Rg !dBϨS=趚Aj֕x-lU [é6QyGgӭ;8KgW) v(#7\8g촸x6ay9F> ܺC1vCѶ"sI 3>;88Πx_#%Zڀxuj%]չQCa)j ƷHN`F{$<“1)ĂQ~5Hp_]"쐽~~"FѮPN_Pm M D;uwTtS+>C3],O*`JÓJr:P z ft d\Z:x cx,8U1Ky;d9gǟZٓt DYQeI_Z/"@F<'\80Y) Zp !`)݃ 88hO"pVҭdqpTX>iA?<bMQ2ĭ-jg _O@3+.P<8i ٕjrlqޡ~*56BJlcpm2&A7j/Y-Ev 44Hj[jP qה"W߮n>$kq8 8KOr2D)NR#>C`}'ߩjt;.L}\0{S8-+c1td' ;%mm$8Rk!gNS)~n Jm29ijWTV,:aB:7q{4ŕhdvNsxT_kDzA vq]"+L bm$bt_i@D* $r`N)ˣץI$Xz_>>@WO#~ endstream endobj 504 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 494 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpkG7Hnk/Rbuild688f6c73bb66/spatstat/vignettes/datasets-058.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 505 0 R /BBox [0 0 864 432] /Resources << /ProcSet [ /PDF /Text ] /Font << /F1 506 0 R/F3 507 0 R>> /ExtGState << >>/ColorSpace << /sRGB 508 0 R >>>> /Length 1607 /Filter /FlateDecode >> stream xYM6_%lL؂JHDRPP-k'ό}ĥx9snfbuqz{ee?9X?eӯo<7/^&;M_-noޚɼ92xs`~}03Ms처Ou;>#3Pǥ_zqoƃ@=u\#=`=u ,_u|k_0m:.M2~2TG~[PE6@1zh2N@wnk90U Рȁ@TGcȉ@D6@) PA] # s$zA0KCA0Kq%SaHV#Lj]()A0 JJfxJ.k F闦QrOe0KQr. DХ(9Ar DҥY! Af%0 \gU$ R*ʀ.FI\T!]:$N9h F*%IBI B*sCOe0%54rN)1ti5rN؅\V49'L\VCtW֨A0DsY BvT$dK9 B!]Z$N9 B!]:$N9h F!]Z$tT!€\()s)1%'\C%'B. B]$G. B!]:5h" dVʘۮj$![AeRХ(I딃 2K)He@V$ ]()AHe`.Ts)1FTr|aHV@rYe" h H\VCt@ 4hp2+A0#Ii%IȖrPC%IprPCt9IprC%IBI BPR2SrYc0R䛝ReSrYc0RХ(9ArYeRإ(9Ar 2K B*4%yp5^©IPνGf+}1wpy ʻ/F>Pw;eq1<3PVѯudFjl^۠BdǻY3kf ?Y)F_3 -dVDɬ0gɬ0Mͬ?Jfѯ%^E׻勮w_n~+׏vybSsx~OYo<ߞ̓oezWv߽Ll?]{r0]ȧt'^T/qY~{Uw|q.a|?Cb~<̧[zLZXۚszcO hyecky|Xy7X>szqFӹi.]xy#r|_r"=-:~0^>{{g)u endstream endobj 510 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 515 0 obj << /Length 620 /Filter /FlateDecode >> stream xڕTn@}Wq-՛:6< ZB\Dހ8MD(-_\[QEtә3gf&>6KDu^ɪrG\Mmj?! b6|ͯbHѥ)m u"E:Ԫ[/X ֥ߖoW ?F$]}fNZ>1?{6N3SL3gt,"xO3P;h\W N+T}UQl$^b!Ui;T7`mA'-D0׵o$uA4+WBj=ӫ3u %Ϩ0Mxq'?0&RM-nTɳk -۱> rQ 2jկ&brnךּ 7-iqkGVl-E!Şr23.lYN F{oG٭t8嬔B2DGhM,l]ˊyL!\\G.HX_˺0⃉>Oo9uJmf!xnz@t> /ExtGState << >>/ColorSpace << /sRGB 520 0 R >>>> /Length 5461 /Filter /FlateDecode >> stream x\K^7n]:3_3A[.Yd\E/IP:;ucY1R<Ļ(Ƿ/,>KBw?oG,APq(q^?޾><<7oIG1|_yx/?~oANŰ֟#KJ߯iO}b6_{}~ǿǷߏ/,KbJGJAxM=+J,IjJ"ܯ$-b5#pEz2kH2dY 2I֮9-*e9_?upUYu_+媂_6y\U#ׂ#;6"zM5^W"~HW7ȇ: p5! -,?1~jW|Yv6 WkIoʰ8N !nJ6 JL3`_>;ᯈ``g~E6F/q{}JX{ng}A#9wL,%?p'C1ѯѿ{F?s |/mG׼.b1e?{8W qo+(r~fݿ5g}bnk~Qln0ǘ4 f'=޹aS*PydGDK%H%[%O#-mHYjGej9JQB5:إRE%zk\f: )f+ף&ƭ|ڊ=vMy[mWN-3ZVnwFZ&t䲰{޻ޣe.w7lXZnZn\n^oXVZoZVoܮ{k/Qx@C`*,{z>=&).7=:3|^k=e"Sq(8pEOg"H6}B~]"w8]Ën\q Ў$7xtuYyQٸQъGe-d${APEs+ϯĆ+\;w>Vpy10r2_BE ǯX6-.JKb^<Q(%rb%p-_}/dH\č1pa*1ѐcÿ1EMm %fE%M+ٸk\55I8[]W`ts8,kM H3o K>ݣ%xug߅yskt8ŵtj8 R$6軲7уty0JX3xl]'Wy:j Sib]< g@F\^|M-5;y|# J)5W`+M+JDmcvuysySo |0OyU̶OBĉc*h?3q29cxes`a׆c8KLW28&qu~e`9ںp0%3N'ciU$ =G -&p4HLfWѩ)9'!=_P6ms,+ZG2_'љK| W.SGg_49C7szRX *m9W`_PͱO<,Q0u(RMu㢯VI]~ ?t ny,Xs Թ*'jiqs]e޷fP<ڔzDvy=\HݵiFǾk KL^LLԬ&ś #B'r_}uIspm`_edgEb7,2Uw]> Tpy% 8u|<SõESp^Qq WSe^rI视*i"+.GVއl'79PCǃOMmI*pEu*@%NP7(oaQ %)ّPhIBkI#0>WQ-gۙ-R88B, y%ja *ĺLf5B ?:{ÎrEOȩN~'ViE^(4v)>ĕ;'`RPxH ĸ7-wIXa\ASJAB_yimEG#$(b^y ҽ{o _ lu%:j\lV[XJC:=_Bj`O y(^,k/؊$:t.CayB\3,l,YuPM- a=ư) [|fK%W˽Z9ы̽^]za`O t_MxH@S8Jj`)H:j΁BaF`V~+ց#it_yγv72b~W ?x"`ͮ߁-Rh3h;ЃѴZz= 4&PG?sL=欜Ձ-Rxqz8\Cs;fa&/;CX1K1l_)Tn k6/ 3;>5rņJݭW$:HRg`O ˙=x>#quC`_d< 7N ?+Ĉ(T;xD ~ڨU7Y3{#3WK;ULQݩRr 282&j%ͪ\Fw`{;]<J7O֊[ԺzHZnxƩZLWnDG& l„qX#*xMZ:d>V`A(taxۨ]5ϡ}6Hy*vE덫5[|Y)e饼j-dcpZ0|94xޔݼqL{O[י[YhBj7M36j*W5/:mTO~7z&-[~Ʒ \`MÁ-M,f+]yOgဎg(gd,[mZL1u%}ދF`ڋ~W VO^HX~=y`) ڽ×I; yTC7Q ɕ O#+ka#/߰W`-5^ j/ЬH;So gqhyEb1r=W䅐kX, Y, dj;SCgiv'|8fpf:Rhwq%]eiR ~Ă5E <] n _0Ϧas>,lFy֠#iE^[gA %P.Diw$Hs=[= Qߔ<-aہ-ΚPh5[|]FF ŊaP˸.s@]Է0+J>`[kް;~7P:_Af$O:oyMy}p6iO*I{b.\qlAj(@fӕJa lA3-ISJ^/}|<-_=Nyʗ#n~~_oA w!Ĵ|ݻM+2J# fZ=z$mw\3! );߿y 7e_ :k endstream endobj 522 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 511 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpkG7Hnk/Rbuild688f6c73bb66/spatstat/vignettes/datasets-060.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 523 0 R /BBox [0 0 864 432] /Resources << /ProcSet [ /PDF /Text ] /Font << /F1 524 0 R/F2 525 0 R/F3 526 0 R>> /ExtGState << >>/ColorSpace << /sRGB 527 0 R >>>> /Length 1765 /Filter /FlateDecode >> stream xZM5ϯ.ʟW"@B)d%S yM{θw.׫WH.HWOW?~Ux|K1_q{Oq`{ٞ(Zx\f<~?ߗ՜n%*>??v ^ ޾m%s¯7 M6),|ǜ4ֆo7e9<} = iIQwjO>}k@ZB1gL9}4em1u=v7jC \=t/9hS>K@aR7X KK@10pyn<d:-x+qiIUہT $cR]  ̑ YcX8RtcvusHhY . [oRJl.돨G:fua5yMk4 h| 4ט\><8OSf*ۥc@!N9 3#iu$(^pӊrCy?<wL[P2~ FNOfuB3XmB|K;c` iK12Dч޸,' ?a:>KDŽCuǴͩ0u%m;j?B\_ 6VzIwg6]:RМ81c)VզVNT2j,BMY?./q̹I:6' 9avp4ߒH嫶<'dL^ ._\HsuN̼NJihQI8 Bu%$Tg|a6f9&/zO2 [7)̒fco._}W;.~!m m)]4R|VӡSLYdO[gh 1OzuJ?B?x>\gM.9/vYX3W3laҡ4GBs|fy JrˉGZʘUKG4 \g?]t 5\(yJc(b;Gc$<>f{@axGq1xW_-vƘG8G5m γޛH1fjhdzlQf]dሚd7X193v>{!=9Ǩ4{ sU4[/h> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 534 0 obj << /Length 747 /Filter /FlateDecode >> stream xڕTN@}WX<%HHT"mmJ\ٙuLҠVx=:RӑdV)'~TIShSi%XiVqbs#[ZK\@*~TW~݄ 'FI;*PT:ڤy&揸:W1Ďilkzzp>M;+tWG]bWro0B77~ )&咇e ^DCVڴ)b'J<_b+,i, -ZiX%O^86~%%lL=W?#X0p'kJ\u7dZ '2NwDW)3 0\  Yɴ vѵpoCB 9fG|u*cUHm6=*^չjh'12?I>.2JҠqݖjB$䌚ASXSXF_ۑjEqd^e􍎝3ԁ44XfGT]2|)>Ә؎zK9=m]YRW -:aG8*ޛ5=L_ӜݿiZZ4vMPp%y™K:CW\e}J$ h6ZS% endstream endobj 512 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpkG7Hnk/Rbuild688f6c73bb66/spatstat/vignettes/datasets-061.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 535 0 R /BBox [0 0 864 432] /Resources << /ProcSet [ /PDF /Text ] /Font << /F1 536 0 R/F3 537 0 R>> /ExtGState << >>/ColorSpace << /sRGB 538 0 R >>>> /Length 727 /Filter /FlateDecode >> stream xXMoA ﯘ#\x>H%+q@ Z$}w7B|!Lo??ۑp$܅o^tv|<\SJx8V?RzY%(Z‹^nz5/3Z{i> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 530 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpkG7Hnk/Rbuild688f6c73bb66/spatstat/vignettes/datasets-062.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 541 0 R /BBox [0 0 864 432] /Resources << /ProcSet [ /PDF /Text ] /Font << /F1 542 0 R/F2 543 0 R/F3 544 0 R>> /ExtGState << >>/ColorSpace << /sRGB 545 0 R >>>> /Length 14521 /Filter /FlateDecode >> stream x}InKr3q Y’]%1@ L-W "sꕰjp8'2wG_o___~?wJ+㏿?r>ne}?׿?weB봿S_vacjKUwA~߿?o ~Wc?+}?1뫷Ҿ~#}_.{+v_s}8k*jX{_Æ1w5\{p.ߥ.{wO|~ =7*[*c\okQ~m .ൾ矀m:h?Z ~L5lz4o}o N c۷,#l?*x#;lvA./z{Z&yӰ'6Az΅_OJs:پ>j᪌ユe= ||URs0a&]CknTr7oM>6m~'w{Jq;-Si>^zs;n hral*fm^% )^8+ÖV)fis/CuF'v7=89glR*ܸ {N]"Ր6.ƁYPY#e[.48=ұK+qּYarnsF$5hy!4/|"w7,4o޸MSh^;/e~0Vx WUcNq렴Ml2B߸b%sbۨ,vtay)FC],5$چ])v߇2w}7P2*N]XI^u6-*W Ful\J0jrjg6.B.FΧN:I<լHW)sQijTbGqV"S*ve!Wpl8A|3WwZe)5Z׏ES2&-ѥnlU_7 mХkͤLS!VeW<#KHe'|G$ZgwƚHec@(}jLxa%HylqHgnB(Ȕ CI3jۄy&uVRQ{1:$jSf2C:)GY& b@/^u\4 .ii@3ռ _z}2fH:LY gפ5)Wʫk/SlH>fWK&-ҜA XW(:BAUK sHmvx_b:Z{m&R(ޕ{ ~]0w|]U 8LS! Yn,4IJ^7Ne!gOu҄œDx—!ҩX:WJB6mr!iKU6Zѝlo#ϣ9F uh8ju8!K.+!.]ɛ/Jg"?.,~CJz׀$5aU@X̆&7* aNmp#69ܦH:T7V&-],3>O%iFޒC~7E[L6 aDj|VuUj}M͗DSת*w]L~^Mu_+ΣJ{,Y{^KRO˓B]Uè[&/IZ:A Y!ܸX:NI֠ar.{bDnvWV]1 ӆ:νW핱}Gſ}_.MH \'Yb _7RY 93[953DG67Qqa#:@#հy/9.#ZYBf)ss{TqKzާ'Tw#zRLBX!ZGXB&$u)Qm+1ѼB:Ej|]1߼JС6yרAZrL]Y iELm=T^9vtgUOޓ)_*z^Hm HjМx]sÍC,( VITj&%fB=8!G4LFr;#Xξ-YoO] `U>TI!?oB t9N#VCR|pe(UW21IJ \ID k44ay \9~㲑 %9OC &F/vSmh>Umn}`$ne*To .3~e,$YFc0xIٿ.ΩSu$I7.KI*nu9HKcN=8 =Ɛv4֨CߓC.c&Q {\KsB6I rc]1*S̚vӈBS1]ظ-i&.E,]$]#ԛ+b% [vovdq`3I3Nz+ۧ#f L$*?.tSᶻd׻gh߸!wg|鉴C1v$ Fht~eEto!Ȫ:1;*+4a%yv^v^R\|A(y=Q1Ꭻě! {9FJwj@+(-bBg XjZ!V蚥 [Q~KԔکC!HqGdAOl..ΩC ? b2<.Y4>ਥMD]S}Z ;Nr6J\e.{a LޑzɩבMy" #Tu/,sm<ͫ$udö'ͻC^x^<]xD讃͊J^3u;ǢW*VRQJo}~%VbP$H| b$2nX#Z |襺ΐoy9%'C:BJ1gOU8<VV VG8:Xe+U6&|=k6^ASǩNZn^lW&#@Bl}䯭dL/QJ&@:q#|cx%J^Y ,|Գ%# E-oR<`𪢴L gM+1+-.yiu#tn nڱ\rw D7iS2i%I!FYR$S/_Z\FP:)tM|ogR J[7YEqJ ZڱAMP\ʫ ;xw_nd/F%Ղ{ޞHIq*_`\5 p6P)Uۇ}Gsx&Jbpmjw҅Sgt"/.r)}g0I .:7ft񺑩Zb X$vW/3>uL2sȟмE}/t̗ۮ*%E<&AiU(8tvD+dzTIaMF*b۴'x3iɌ:Y vbZWv{u.;wֱV"mp "nn1l\xcs[.,JV>YY=iNzұ!}bp|XB2~O+S<i*Z)tPˬfH+7uU+xebɦ:Em$3,}vlDZ7]\%PX:*s.vҧ.tuж=$+y*O QVkA*X\}#?;ڔҧGiHv6"_7NU%H d)\sokas a}.&-\A!M?+۰>/p^^tHW,{j9t,_"i|B )4҉ѐ8욚ϗ.#DKa7K_V;n\F,%5 o}$zaN3ƑTMތ'^-PוhW}!@Nͤi0 HgϢ.llZAjQtoumg@ܥ\u͎M6Ҿ)~.C3Zk#y{jx&&ٌZ^5rQO46OAD4GA]E Gb)EfeIDh+|[TG0L4(F' +vL^ 4!ċ0bD+fIP_Ur䯑c+(T|[԰(CDw׳c]+ǡ WSo@uܩ301 >PK|TWd?Kt*Ansm|J1^eM(:Ʒ2j,n,q|%C/i%$TⳒQLq=kV %<*c( [?iIwd$ːBD:udaN_$(eqZ,ҦéC4HYu$_{i6*ZO֜z٘EuPmB8*IEZTUqZ[aYS<#t2L݌Y}zsA}51;64Y"MtEY $ ^ (6SKզ6QBT!ߴ4(#|}xJ`4Yv4{`LZ9`4{kbR4T_5c]>rq i2¿ax4fJnPm:iV3cJ ^h *s%fY;+9ئ_܋V +t'bN_WMϞzM8vzIצMWYMm&E;FUY2HJ_WiF)bWa8#$(Cm\rU}SqRը3'JXԼ2NrK0VЧ9hFǚ`ȽaהfN)[]siI.)0zb^:.]jz4^$ΚJH֬4#Ak~NG[+v"I;ϛTKe+fi{UOYfjL[Ig? RTjf<>Ut8v!,\egaJ.Κa*&r/8*v;vMWQR"2證:`4T5@.ȑ>Ǧn*#yjK@mz [-y*c팊"!qr}Dk^d+41m/b4 7%Jŝ*+ gd18qZޱMV%Z$Iܨ(QϼᐥJ~RE7;,-2=-K[51i$vɶ8{VD҆y4j4gU[74h<y۷5QYsX72mv.WbpbmE(JWc28(7)% quǠ?m@|/ Žr`HŽ\Qv G_~ :F٬N R8Ђ/,dtzt ?Yd^G|`{O&5[VZ,ց)clߓ- G|i˨q\IȪQb) pGV,,[G @D<>{*9!]ZU (c ] CX}zc1\;f-*q8{,^OicozUY L-Ja8X;=8yvzgPU_8 36 `wf p*+V W/6.z`]tt pĿc@-JmO@(c@rAb$|ޕ}/2Q '/{L]6~IX`ŃG|)6s7=Ee1V p3(&BkoԼ`G/n sy cԉ^8vp~rX#So.p/ovYڥ{m/e#>zh4bSbM?ZW0&FҠ!# 66u1Ҿ?D1L$M8QA'} 7WȬ 0pL|8u@NM'IT:[L{ 1 o3"Ɣv#>z>lŶmȵ0xG|S-J2/w ,*Mf,ȉS[ݽ=GCTX^W%Ӝce0wonb@=&a{.S4j8u5}pcȋ!bg ÂQEB{nnp,뾘z(#v7+ TMv/D5~dJ̬Zŕ 9⊨';y&,:8X?,ݻ٪{1 }p-#]X盠/V!5JЇ 3q8J?<+IeY q(SY8t13?} ̕lO_k)w/p1xŸB"?ՠykiC%Ì:|7+]ݫ,oﳘC&U\, gpY0+EICQNYeE:$zz)kg/wpX> p)|ԕ1{`e4̳,nX{Ԋ|@ zid}?4jGKa ;|JXk\pĿ+ >\o^q 8JIy{7BDK G{K4nhPy#"fvPCcEW<Ԣ4 oskكIbQ2&uҤS-s1Pe-nB*+8{\pctzH5pV_8,臌7{oQ7u(y=(C_D[5->ʢWռzƣjd:W;KpY0"4W)> ߰>-x'(;6/.ҙ^~'|?}~ef-Qn8aU~0?xd==3{ yr ^0|t7+0gA By'm_u{?:@}W2_^8{O%T֨^5H;rJs&;ߤSi¿/<4㬃=̐@*GGM(wU/{0@lT/OEܥ6#%7D}%}5y\d% pgGo+_YME8{a֞<6wiqnGNsvv>2D;ߋԠb-`}=P6q ȱZVx= vRȎ֦tpĿ+={@Kɘ?h {Bco/&O} Eal@nO.޽wŊ}~O5꡸nO#=tObE\:#,R;EGB=x$ř|f0?oP K'g%Ftk_8 ! / pE9ؙ8; bS!Ls|I0x87%SQȵSܤiT.Ju\B9+'0:<݃сZO/ozi\{qu'yc^8s6id7z" 1W=xKl+XxڦprN'1J†Bg>~4{HEGvsG|Cu*?yNb]pW,^ƪ8ST8{,Zr^>}zO|_*R] =o/R#yH0 JIO fA)#ݖ\ynKNV#b87gqja8{:fSфwAA4uyֽ>^JfX{Jf渚N5|<0o/y܂8|u{e`qUWvlGs*Ÿ€?8TĚ.U.>p~lf!bё@D7+8_ e\ +%Frwg^˅?jG:'iBH7pNT͉:h+< a?rL֟PD^8;CĜu$wyG|d];k { B~1P77~QfE81` F| vc]|Ͻ(tzս?^u=OTe58`e"]:{tjL8$F#ςb?- ݭ2ip{ܑZXU/o=П*Úw ()>GaG|VWQOБ:^8uYrw"\A p 졒)ݫ6cV8w8Q' G|2Q8-PCYK/#y2oaM~8{ӽ6wsfk=pĿ+~}+ׁ|[!dN/yqy{ ~YrY  oVĿ{¼pH̳ܽ+?z/ʑ@kzdG{`iw3G|MV<ŋ1JuaX)G#~U2c0h G{&r8\<$w7q >zw;m~vo^O5נf *t Upď=}3SCB{=Tʑ؝K⚊TCHA8+yigٮ 0o0/-ZRq|w8\IjT =υ?V Qn“ #YIl򸾒p5g:('˲d>fv=~$4y.,pĿ/,+ sH(yxp1nFɨ)G|_8t&|0"{[h=k/pNr\Tg ;T0;[S㙡knsZ`@f -F9KmIk#>zWBH&n}chLU|FWa p rv!i;fG|;o¿jܵE :'"<޷0O| ƒN %Kr#eA+5Ҽց#&v Q紒G||:ĘG|%h+ Gy0ܖTyifG3$aD`1/0\ZJZ *MR#x@t=i]3$NVu{Eߋɥ>-qJ>oYΛv#ឧX{ Y(/_8{~0}8_8{ ryt&ѫmNDa)WbSZe~{I`։NwF DFzߵ G>3 Nf~,0 ux04({ŵ?fgpgx|ۀUs#LJR/X&|I^8xNK>)t$PTv#0S,wO-f G|LBj!{NXZ~Fw a~r8XM3F /Wj#|^8{~V=m[Gόo|o2]X)^JpĿEbu14OTh^mw)X>?|R%DG\N ^OG3j%E$\Kc_0b_`/18tNZ9ѝ'sgN#I\ěcPT.c' oQ qΛ#yF9ܛ1\:B[;UD%%% :G|0芃!QQj6])%e?} NoT^PuG|C}FoVxM؇^8=wܱt.]h3gwJ"V7ecԅ| Gu{:pܪQ9SvyxtZuǎpNSʲ-1>ub 0}cc@c(p#g /} 0RS^DPa(P9ܞ>ӏܝ w5#7q"0r/w7N`O(ّln ;Ĵs9lCw{R ҫוÕ FK9fv.k嫎_$ۍMiA /]m#>{sŜYc%"~=P; O'ŹjD8,9M<< V1#ك$/z~@<ܮ Eum0 h,p/00?(Ϣ, ]4 a/rnGw{q >{=6gA%I+e#=Q>k(UU=Ѷ; p kg\ZSu/z|mi[Q,!\f2di' i89V*ȏg?~N z zugi0 rz+ˣjPD*@hyPĈ;$O_~sW=pᇧ ~;M3w)?;Yp1xՁoj^/^8x~rG?Ezߧ<gW+d6UWGQ`0 o?,꧞>Lr/uЁHu*"0 uap )#BzNXP~8E^R|niǏ#czF Hb?ﳠB*r_dR#drp)!kO)ރzNZ1ﳠ#QFi`o{p1P|}fu:CaN0 cipteU|['XIPr5 >rxː%8 ޚ&z G `@چ>~s6rs `@fk4mv~8ZԽ`@F||ZRu-x̦~F!8=(*Ku"gv?H~r@i+~)3O5.Uo?)D: endstream endobj 547 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 551 0 obj << /Length 960 /Filter /FlateDecode >> stream xڥVMo@WXG^wm#h%8 \('qN촅zffgi)6v7oFz!|0tISo86w!و}\k}'_F g {2YIovƉi{ÿJ*a-`UݳOgZ$ ieH`m`[ڕizU0CЀQ(:4Jeh*:_O+{m{ .Q0.F e9>.o-Ⱥ!~ 6'DioJpڒa!Ħ!/"| D7|X/CcEZKd4:h >)lyU\gR|hx/s f?%[X'?9GWy`6%/$:7Xk~p:}i$(w1 g<։&$mq0*ArshIFm-j?H8 *Ai ,֪h 8 I{뼽wT+N;8#u&Fg> /ExtGState << >>/ColorSpace << /sRGB 556 0 R >>>> /Length 6423 /Filter /FlateDecode >> stream x]Kq_H HEx!x1e$3S4M #٬:ux|u/_~w珿?q~~f?bcqx8ޏ?_ǗLJ^\[߾:qq_K㋏x?߿+WG9Sӷ_O/X/>m7F[Ooo|//KK77Kk<ƙڀç#nB/G^o_ڄlo.$eu?j 9V~tBX#^?/ LMۑS>m=HWV3rgGVs'꒕k)dSn ;󜑫)6a/kS9VۍHc:SYRNi0-4 v?嬕)Mv޼J)|7!ۉ+pK{h(>4QT]elK^/m"I3(,eh(Ḛ`8z]߄\ U4W2&d;pU{LȆ)NԶœ.7!z&C39sQhnF̍ ٌhT4'`7Cܛi i.8d*ҋě=-{c`$D H;?LHcr @Ɍel#+O|ƳW7r*89nޱu)ɏ _pSff %Lͬ8@&v"YLTS;a&d/@Ey\;oPX]ee w\ ƻL5)fjG6 ˍWpfd<BgGr)8*!VEh;u`7,41zw>[k7j;B◔kfh AW5!HfOCNW(wO~. gQq6`l.gAԶY3; C-$.xYTTnGȤsTΛXN'i}֛GG]( $)0Wxq67E0bDU)£;}c9͎ͼ]vI9vHadwe-f4 ˚ۣeSfJ]cPrNLKmyrMKm;MY)Sяlv*ᗝ[F|#m]txׁhI9 F:ͣ9 pT To#7RT(-P⫵\Gw6ۺ$}b*qyL0[q%O-w@ȩ]Ӽ|- ~g^_7/`HYBh!^̈́<&E*3YHZK =UwsUdfW>HvkߘAQ4w7ρM;bZNQ(s(=yw鑦 ):'ը6õo&L i|j<8tz-'/|ݻ;UwH\VxgE3wcB/]7-sB)"=MZfJ23^@4[%Šu&B[ v%["k]Jo$u^ւ=Q|fSꢹ13Df.ώ)l&7ԫզ"FI\>Ä3v -V%GUy4 08<W gDOOPB6ģrd:*Y+,ǣ;^C!τY#4(EfȐѓ"Yrك-NZQD)u]\IY-RȽ4ۛגj"iJyTx"*NsӬ,s`5kH3{OQpupgShozP@ ON^EDRyøs -S& J?"YnI@L;4hA$%1kES(yPmj7ǖ%$yQgamIs*[\ɘ:GLJLG6x /}38D\`sQ(K[Y~FE4@iP /۾U!˥ J>"j0swqV_ʸѺؾ`T Zg̈́W^x|PLJ7\k9G$'`MuTDF>+v֦v&5{ #ew@VU"w6DV;/ ȅ6Wtvm Y |ZsuhImԩ^Pa:]"zZ^Z[*kk#uM*3=82p%X}J$LesJe1ⴉV95n9",5c*c6x M XTѨYʳleV*BMq BRsVET[2>l-z@]6ZjQrRJUj+I=,)}Ղ~LÊ],n)Ԟڨ3vi:k/h.lVR#{GṁZBρf_<ˆ.{OCEw;`-(t2;؄8ܴp뢖N<ص$pBpu4I5Ij^6,FCթLK$a;RK9A9%(*00Szjdy>L"q̞իmA ӥpp1l7"g'v5zSL&? R]uvqMώXMm}`e1 un OI%˅գ2$ K]7 9ѓ;Ku5u`5I$0luy%l9M㧹P334PuZDO{ɺFTJVR䭆%_8¾9cH|,P_(4QrwVP>*& ־1Wշ.։*+Fq|P+h.$TG4soc`hGF9K|d6QX, 5;iYHafeZiبMƽkgl)QSh*cyn3&#ou!2^.#+ ՜*v0_Ohb冽@CËVhh)UmtqV]/}}BAyXFUy{W!-u{Aloh^ocBCK5Du }Ʋy#BI_3VbUgu;\!-3Z E8sn)f3* fH@ CuXUGj&ZPM'>%9x/i%bbR'dlQ08 ]M/m{_8_S/*]B3+c>6]k[aeԛvCRq!h(W M%kHk( O:^nj=K6Z_7'ײ٫(IiUXq`OTdra~jq8I }p нYY ]-Z,Eg۽{XLs-5̏J>L#QxSj20_mäDgP? V]#CA ߺi3i`61E:Fń‚w@sy{>eRvȯZwoOzk׮@wG)7֪ ٗ3?Mw>B,#⇞b-rQ'Kbak1uN6ssQq8]{ $o([C0-I"0m nS+7p\+FUq AVsK{ rB`+[g#$\c`2vwh>ィӱI#,hk~jIkۑebJFIR$pv3lT8Z1Dq:WK;HZlPoO2XE$~1xݒHj?IJ0*G>7?]#]H(oçBD_> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 548 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpkG7Hnk/Rbuild688f6c73bb66/spatstat/vignettes/datasets-064.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 559 0 R /BBox [0 0 864 432] /Resources << /ProcSet [ /PDF /Text ] /Font << /F1 560 0 R/F3 561 0 R>> /ExtGState << >>/ColorSpace << /sRGB 562 0 R >>>> /Length 1231 /Filter /FlateDecode >> stream xWn\7 ߯2("6A[@  +jAbD<|}>᰽uK1._n?(Gߘ_#Q4bkp B 퇃 cnܟvJ ^z ǧ׻/pr]Jd#~_ݠ1/Xm%}RcpF[-֌IN[+C2qk"`?( ¾4]j8t=0(U^-9 zԕ3ʯi:7*ˠذߓ^IlU]82ˠy;^:$q0š|VAɗc:f7M<<!'a2^a@ +c7D]~i7+Tq6ˢ;?ؑM( ˢ}K(IvP?S^VI[HY`GlD?vvd.Bԕ6Z.ԧ jgWBY#[Y@a mJb-Rg=1_KnC)P!vfW+8qW+KTO\6B 'WpV NȔ瀯'Vpl_zjyħ_FLj endstream endobj 564 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 568 0 obj << /Length 933 /Filter /FlateDecode >> stream xڽWM0WDZxA pȦiMJS>=3q,a.ZYqc{潙7rvښDfJ,ID^$wN9Y%_rala0v;;=?.^66*RH:jHV]HUAƇbƧ(͛14L֔3:>Fh!k:)]h?B],\ \ Ԙٞstsb[> Lebw{&N\[_@vCW(!ء;jJΫMs"F'PGP$#iKV7}+8wg>7!æba`3c;NvK]",v r @pZ޲,Ȥ8NkyՐ'K! ?r q*fϫɴ& ̚B ' Lra'IiL(9e}t26p)`AT5u/og@[3fhۡŽNo>9LJꊏ܄^@N6jK*r3J "Ɯrzl)2eƅJaŵf2?XQh<}a Q ^Cd(FMJ +&^,Rаtl'kxY{+9o_~0S vNke0Fk>+zR>"y޴A SV@΢0M&e)u6jT]%؆vYK[8y! .$a׎:Ov5y ifj=nYp 2̾y0k_4fI:FFT`Uʁ%1k,٫ +;Kdbs:Q&3aLh> /ExtGState << >>/ColorSpace << /sRGB 574 0 R >>>> /Length 1264 /Filter /FlateDecode >> stream xYMkGﯘtx{>I@@9c@%ǒ?ճ!,櫻{% W[RL)Iz>*-n/fyyX^ngSxyfyb_Az~c)X-ëOAJ84ࣿat:Ηm-Lkڱ5۶lms {pW T`8f>pχ˛7rfg6xq+JzRxH8ղ"#&k^h}?M4<'nC#cմ"-J ŒQe| ,c.MgjmKQW QKgZzJK;o13;Gg2p%]4j~t.^:,6݇/RG(1SG1b%\MkW5UM#@Zh;C ֆŭ{"`L KVDZq&pԃj<^KLƌ->"u֨IHo%qFAܕ *V|H] g$\L>`4#..#Qոban.^ًBTrܘͯpEEZ ɐ*/l8JY0m\zKF+?:N o4{'q:2u ,>#68p(()PgQf 9 $*q/ ^KNkۼ(\X,~oX8sbѾX,<c W (Wb߿QSV&ܹAZ5L%6s?Uc'*)|˃[=1W6ڏm1L+Z{sLO$cvd/$,BgKe7U7!cvl oiT Gۘ78 I-5Q&<kCOf^Wl /2͹&k=TB|0v[e3xn2nd!^l}3YA\4 =u2,DM%3Q˿ˤ endstream endobj 576 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 577 0 obj << /Length 117 /Filter /FlateDecode >> stream x31ӳP0P02U06P06W05WH1*24͡Rɹ\N\ \@q.}O_T.}gB4PS,{h 0T0pzrr۵& endstream endobj 581 0 obj << /Length 155 /Filter /FlateDecode >> stream x313R0P0U0S01CB.cI$r9yr\`W4K)YKE!P E ?0? J!DH" @ l%r38 H.WO@.E endstream endobj 598 0 obj << /Length1 2163 /Length2 17867 /Length3 0 /Length 19150 /Filter /FlateDecode >> stream xڌP]Ҁbݝwwdp!$xp}$U[T-ݣe1'%)( sfdeb)jXXؙXX))-mSj,Av|s9č @vY+;C#@)@q1r3 b t41(9[m#@&@gqA#`lddr4eY:[TN@GW)௒FO PtB df l,MvNK\L5y=_2`{sLu9{ `fi(I39;3L24q7r512~7;u# dhidiWyf ;S1- -&õy,L*ŞY(#ow9:&PdK^=`^ pvtxS 04q-xheyo?V_}0S󿏘YFNINK%W)* rx1rr8Y\ld@dr~_x/`rD8"E\f? `C<f7 YY0r=zޣ+!^=?zEsQCK>uлbs1/qL@6 _[?:gf{{f/,~^ { ?,ep^?=z_+p~13s/{rp=a{vN6/#}wsςr\ߓpξWlxgL\_;_MWA&AVuA5"n(hV<"$Vg}r-<ҋ'As+J&-Q0^u~y{pH!f5d+x,e rƽ[{@xa5s,cF^@)K_;Jf I=el0)(לq7k؅w~*R@.# #kcIMu*)+E^ j1$AYѥS`ɟX$GGv;Z&-ݹ($HV5,ečn ̆ ARITk5Xuz~3Av\[F>LKL[ D'Sidq*5v 1t)Hva$Uݘqx:g3d8FSh48Vhcnlvc;ޑo3?a*K>=r 0I%a4Uy+߳7]K (VYǘ ِsUCz=ܢ=l7V^TIE]j OsJdP 1Fk/ 1o~%"O\-Q Jg4HSuL;ջzt;%T\Al]io2?o2 C$[Ӡ$ZW#;mp"P'{ wXB+ e7/ &*2Oâ^Wzդd"BP:= c#pt:Vg'(=G"?odN/t(]Dg5Q1Q fVIg߻&sg齳 r^9^![ 1cXu28b'XK|).1 eb"݊%$)nG!%@\o뻵<]9Cpa+DP2S0 `H?luN~>1AkG"C,0)ޔ] J҇g] *^1Fru6&{%K@!ϫ#9+/o>']u(…ߣ+{~<=񎳦?2=a&@W@C|TTޟQt~)'ba] JkE u~N5H(Zαtg%>2ZɁy8Р1?]~ؤ)ĖRieaW]ƫP<. Ta#:Cx ~ n-aWIf̠fYC~+ `#{ FwZcNH7فSһ譫S+ T|jQeAg96\*pn1lx1pCSe!s\q.U{CFާrU )fS-=/2lp&|=ǏS}t4J«z5y}M9<ӭ3H e5/[lE9!o^g .f2Tqe3(wOz_TYwq7 Z"A5]AYي5ٟuE%C;7)m5:1Dt5$v v9m&EnnOKp?kdt] UucP}|y{(yhsH"}f>v{ܺ[UXPiN;0i'<&0HvJv*7wfD+bL(hco1|Vt;cYG~X!#+N~^rEUzYo@8:ʳO.΋~)kPa8-{dZ΢z"~@\wK`;-u-`5npI! R6LG䂠ddjkJJ+>DcU6e?$yocBjĘ$H lx|(ъk0p+d`I{rǐ 'vQ໫:𹃁Ŋ,~eR:I'ZSg*g^0xPb#wW%ũ俌{.2T%nskP[C#~l]'{&48A&~S&{c@͢º˕Cf0]Nm24ɯ# mӳrL3Y%{`N6Ksii^!& #;>9EwƱJ jHim!6V[ of|=5oP45E4DkETj7B2yDURbl UPt ~J\{T&U,ɇNoYت$^7G[x/'U[vdp^n O8ޞA[>y=;=x!g`%Z)gZq&EEގj3l13ڭHn(:.;^Gȷ{jg[GXh д(1FT(qs}战葐-;sCv M7 \W LO?*>%ϬMIZjՔ۶ݗ)n9X[ -I}Tz~< I<ɵKc 8ע#!HeWA]r;:]O$0ɠ#lHHZ5F ÀG5ɃΜ9鼿tT (`k5d*W7][{B{maTdo^+zloTEv6OZe}WAl~,MuI|p5޺\уiӳA)+ɩ&D.KT{xc2_ ӴnkwPdGCE(yҔk Mi7'bB6~Zk(q3?X~#[mȵGӶ`}'_z,Oni1q y' f0X^_iӼRnz8aFu md5p",(!d r VǃJ^47`֍Q:%7:Ú5l ?@hi*@Mb22dfF), Wpxiy OSkNS)X"څ!~cKpD)Ԩ½g1wlaTږZVy\-܂~׷TXW:憛Uy 4%okRBBF}(;kޚͿng|a:ޙ0/"kz_`gJ~$>Y\V,jZ:ShfhHRi`pkTnl4˔P pBG&>LU|`$ +p'CpYAEM0޳ʥw_38Q Oui #jqi\ղ2ep! %x$rSؚ%"P5FYv-_g\l{6S¬Qѧ3_>:48folݞ< +붻}Y`@;K?7+xbeGONex4zl{::LsRBw@&$mg4qN ]vě'~EfuD_ OM(bXI.,0_ Uu?z)ɉi8$X%Oa Y *]|"T%ҒA"!S}Qھ> 6w8X#<}^x~A}CQ}H'pWQlo+ް}c7Ry .Ӌ@6yLF< 4qDihTg^3#> eġ\|=\%˥͎z&p5qr(񀻏rErћsN!I*U#*uô/p/`̀7S,frb+$=sp916o,E"lf6^*]:H˜ϰ?b!w1C#٧96|Qi@g‬CׄOl$lj*NVʝ>mt!~J6ƗRPȼ7aʩᓧoVN&Pi,j8vGv;9y?%  C ,GRwzNP0!WWO/ycp1d)#돃9>^e&jfkߚʅOl5r Y`bS#_Lolm\^@_wKp3Se7x~R&OInv!0FE[smxfCF~$e@{ō#Tqv)sĕJ"Y@q,h˄ Mq}#{Lin ?fa{Jxɍ/(z(-4x(Qy!|Y>eEܞ/TPoP͜AЊ!ڽyMM";Ǩy&dnݚwdʱmlɶ|!ݷc9RJȿzMC{A-KHTznz Qh. z SKZ#"kT՞G7v~ k|q՟j6.Dĉ$͘04K`ĩTOshe2gQ}k59)m#Jr:ugtܠ"x]YQEP+Zar]erd6rsFDgQlا'q/=@L5q=za_ 9$J Tn(!COv䞃,sp< QS:0f(TS PԨ Ⳓaoa"vSRA.]vjГ4&?  H=&`XL(QS,8{rFrW*4?zL\Nd #]#!V ֜r>ðR^hE*]hQQ}uj;TDS@[(qZ&b3ACw.XZ !ZRJ!Tk6ZH14/2L5 Mj6"[ ^o9:]A3Db yAl%d`lfy89CPѮfd9 !ya)ΗLԆQC35V4Iq!3&nL.38ʈ 7!$r6-Kq'Yvz?~K3i.<}*aS;Kj7"lZ]f"wrF[gHu$.nLEHX)r~jPG 68#R22$,e:2=eq"[y~hhD VΗco|ܯ Sp|+[8DEU*[Z .R޾SWft RԼ~]9l>aքMVx}}n[_x%NjV!o&}n 4ͽݙvW2I"fI\jj+Aj[0j XɎڂ4)i)Fhljљ^zzȒ+a2b"oOpжKkI#0R9: /gQ"~DGYa:h#{S?ʋ%ٕp5_/;IJ¾?xpVsOAjkK>G4e[#w:V=X42/ƶ&4/rݛ}:7ٌ4r`%/򠙂E +MK}x*f04נlT/>V l8;-D86hao"m`٨yr$'o!w؂SJ/MpFph#T%@xʗ,(-Nh9rW3@.N?P(+@@ oD++Lv}[' _e$n:60gx(l33jx;hXyE6gi:%H TEQ'sW(][/ڄadaT {zp@{i=m[Łv5#[2 NڝFDxt< 5䫀t0aFD ψ-i?=7ミGr"NՔ/،R@XLCg*ml6vQۆ BOݱ^&b$YC%.?)6(*t:BTp)xx Xo۷kEܓbD .6ĈGvIϣ(4,hQe*j9|q:~,& BFύimS FaZ}9sعHȋTœl(h6*SJ1Gzb:DAUQ}Oe ;矦ֆU>6_]#%7';]xFac{]ŽŲʱU(6nk^^7U"YR͜G._-tX Q E%_Eؕpa ֻG$KzHs`Ƅlۃlku^EvakMCR̫I8^ i;lqL./*%\\pT^ߪvx0DE0VtGciESr.*\l^O`u?ײ# &rgjf,svU0!1:axCwYⶵ5ۢ- C@Et`q4:{xO/N՟V!zaPˍye׻$kbV"fU0tJ0vy|uRy|iC,8ɤ.w5놔;ssVg3qrM3ԗu});ս#({6YBݝRm~aR[ 3ƕ$L|=__e˿#$Ah=1 ?6P 0G$!$Y/?m:@H9u"r(USCa:U }mXL27 x30we'HOD;|it}5Rsi93?Gg;Ţ뫗W͋[>n4QwAFOD ҶwI] t~ %\NL<OلYU?>]00ec`ly yx^blf B{+yV>iis]/~ۢD`uqUbk s\ "lJ)rrv̅=~Y wNǣY8y&eC$T[Ls] K_]pՆnJ?ep/u؟ɹ1Ar79ldkk$lSoLa[kϺb m܃&V\:}g`\ =oHq=-S^ V_K-VA.=w]nN{H+4 Zi`( jzA}!B {iTᤄ|J arty Y5V1'Fv,a{62{"ͰN1:B۾xB⑑9z|V/j+(7>>Rlߞ@c0z1pJ+c_ݒ (?H ?P gR]fmcډA/j#LAX!4슭id`h^zvD/1iHQ+XMO.8߯@ʈyl Dg4vJޔ]&à j?œjZ<&1zEu)PXJ>1R?qX|k l| i˃lMȲH]ӊddVmYخ4I9%A<\IP5[⾬3p۵\q]* &)N:]~<=a>O +"e6q=yAA>SXC65@lygگ Q:N t!ʟCCGf(KJ9z}OAXCa5'.^L͔j[ma*5Bx:`m ܰ@\;!(8j/ TݥiADǒEZ>Yxҡ_^ &B]Z[w`پKB%΍_[Y!^f 呟NX[|4 XGWfơnřrHS5kXJSpa `fR%9B~/7 iy IP4&_'m'`w^RIf(b] Ȓ `?R1vjE TO1+$Q/?8l N|%-^X<5==`|?t1 AԌ3}X@\#!iP˺Ph  흵aqy;Sbҥ=N NkXp. 8ojc}oC0x\ y L2WHf?5m@a|0IޚD0!۹ R~6!<(*u."SS[mUr2cidA4=Cff0'Є R= b(O/Opjq7? b9?/D<>\im&TSx73*V1^auiYiB/v\W1R_wZ(w,g+UXœ&Lk8PcflSފY:`ӈ%J=$ƒ{&QNqdJuB !$J^)k$H6C]r;3×҅Wђ롐UsxlLtˈŒwȟ?6J}nbeuxN4jTXFk0LwxbnpIl\jT ~}%uB E0FhU0&uye/i%]<*k.VqʮWP;,IءҬ buG3ZV9..y kRLr8W<#j9ƅ!N[X4q58eBGҋQ8󇙠N|v^h :ہiw:`G(+5\D8Q :W뽹dq\bbGOͧ l R8(2@;$sf3zT}2pwCP*cط 2jw&Fj~?aoQ쒯ueXY? ZٶX/{\qy 5iu[{7*&Uh RF X>%\lzߥOza2&I[W)G~b~SFL~lK"x_}R:y77it7Z$( 8esѓnC`0f=s3"Å4kZ"\W)OƱsԎSڄH[En$Ga `~ :Np-Q!1@]L N.g6Ԡ[>GXxT,b7մ-qN%xxA.hR+(0OF󩭞I}laUMs6<IHOMF { W0e\oIYKU{=V9}k4lI]ʾ\1-%Rl0X9Z9\[1W-&n}XQ2B(o,ӅGٮj2?btXKfD #2m#-ͷV){$Isc~l ;b)g^b (3?_*T# qK-[un[X ǎBr|<+,sq=fOhǤG㔟 ,-o5-AmSsh%1(rVOīKo+Ò>]9,e5^ecvp8z(*f#SeOm6njnMUwn;AfR|f??:0-i0:&d7pBGXӛ{q.B^y*=%Mo,о: FteQ{2'tM yâA>qU\hB5w8voa^ڠ)CS65ȁ4dj=۠qNHߗ?r6  BE%v1^ᥝCQٱv}@IVvb=eOĩM_iXފ03QU_n!ޯ/X$LfW+X"4H) ϢuEy{5M/Ƨn'/@9xEgt106Uo'Nj۠ 0©ũz RN~,rv Y zx^4 }ުMx .Ɨ8^'~׻ ȃKB87qg),] 5Xn'c JEQH}ڱIJGUyZu]NdU3܊Ud#%瞼쒧iX¾7{WB8_iZ fGTgr]o7 ia=Fhܤ'fЙ)#\ڗP>Du8'9؜I'ސ|}U|ޏ[#s#_^&2dfud/whLEruƢT|}P\vi  7{5sMsS_u$Y*!Xa&{_~_&|),z|>G %Ny~G O6-I%T.?vcJ)h(J ( /BX-+ W:05KZO/ FoyssyvHtQd5W)K$8u>_Hm!WToX滑€Z}Ö'=iSqUC{eʕ7Hq oB-=ɢw,ӵ$8IոNǷM"e\нJÔu&{Ο6AM)CQ3I ,|)YC86'߿xLFJM[!e+Y_xGP*)KI5oAyB~!p+_'3y35 >)dsVX9rJ~vd{0WA¢vk5ܬǠyXh$7AȎ?'O\ )kq-fB0-2NX~a?:X~~ջ,.ZCIU.@45wwsVN5ԩvLx6e h&[6(taRY>/uϔF@}`灟Xӯn˓B}G=xWD/Ss_gR?oϝQw03"«'XIo7J⬀&-bbpp~9Sld:5IhZSЬ&ENwvOa@@3HJQ˜]QRl< ,-­Bfb=)BNiyM}E('M; 2 >W[d( mޑ/KcY6_KJfS?5tksZ@\7~?, {{ou<-\j 2z3LyE[iIaj{Zk DbGbm*稈Cz!ɄM{KE&?\=Itp,=@sAOSg4;-o?vwEd63Y?7;¤#:3R ]Q7{,MtQk%j ?K3jY5߶'[֕ lyNPbjՋ+GwT\)+nutLiWΕ3|U 1);T(va(@b' *E˘;.#kY7ޫENw!v' uo;%ťM^N]K3rlcK&W>(-"_=x'ظ&fKO 2pb PaPTJ-ݠF,/QO!j{f 2{Y0Τ9?b;h! m \~m %LKjGa7;4C?!.p ZQ ل{UE9ۛQ_f }(>~ts9?bD.PbѾh+دpU} .Cԕn}nXTnev*tW *iA< yڋ/5ǀP0^_9 {nX8E\zBݏCd'fNY+Hy1_$&x\2YK$帧-[XIxL.\@xsX^.k.9O( M#`>!/Lst}l(3_Tr$~q#b 7m $#ΜUm7> kbO%%+KR,9<_P^[nq,xl{4~5[&a#dRbf6s1QE Y.koLz0]V@ ~',yD(v;O k֔{"8!-W2*h&TŒ ,˲'Xޯ^퍭)_|KubjTHWD7[9ĺu*t_#2Xk}h-+OlOܾӾ!-i#丌:b;r&fNB%jGS,$8zbڬhi:uTAYwALCLҭ\A'zu;0pekㄠE@oŠ*^ 1!ꄔ_nn endstream endobj 474 0 obj << /Type /ObjStm /N 100 /First 898 /Length 2837 /Filter /FlateDecode >> stream x[mo7_ !(ۗ6%%hym*KJ7J\b7,Ģf3Cr^*U)2зS'T*zKV%۩=}{UATH1Qq)_*e|"N14 F%>L6U2C([< UA(k#! 0ۘHʦhPAsQQ0^9k 8ΒIyrC0Y`U4`H+4KSH-41K2ΌEZ2,ʱ:# 3H+[khM. ]kiPHhIeZG @Ѩ'OKh, B#A+x#X S&; <B Q`H=$"bNHSX _'=#R1g Lp&y?&"BiR.J!y2e[ Lf92e(Pdd ;͉gn@JsUpKNd2d-%4YBV*#H\4Y&*J,G@ &8 \VPTGF/,H^oL3?#^5Qh:?{_ѷ~ ;}3дI=O'd Ribv;*' (@X oմ>6f>iVՒ+UuhYsx}hgj5gCɺmd/NgGNg''\_6$C*d[}2!VӕIs44'٬2zV/=aouYO7G!TK' A\Í h1JQdV}_D>! ]QFr1yݴ- űت㫡~Y5#}Hy#Y-Kͬ`Q $*bia&..Guۨoj+CYe [|)P}J}/ϧzn/^z,~ltc-zGgC!T دa~w? @@q=pEFm ܻb7@=(-鱋{P Qv%(x+]P0pPG&񏯣$zr,;s6X7%l ͝XnW{`VwX뭀tǏ(wp(~HWu֑$;|XoA F߶;V9bM`J?솝?vΰvU/At "{^)*f\$P"qlP%™D >Td\R9zpfzҞW {Da}g86 d;+ved$_諠pqq**|0*D288iѻ(iKc׵Δ g1N %JqTNTތ2%3`v -P{XGZn;]ԧ΢wX‘eueCJ!JTtU8]d/@TX, z9Z2%ϐȞde`c2e&<rlTb6GS*n9Ns+->a>p˱gAy-HG.U`#<̀\,=|,BuWڍ rS!\CVCW 8;' [B(H˩''1rBB2{bDh" W[N}NK MqCeLNH:pa ݵGC=fl)hx1&k9;,lO8xd欠9Οڊ-/RY2)T'# 0zNҝ-2"2/!fb:yFd˅YڎV"zc|KڥN[ IIӕ>"^ǖ]q9hSnY8s9V 9p81q'F6sn[bX2Dt7Hr[ gN?ᚄcşJz{P\˕ kgQi⻁c юs|gݟ f)qe9»MG"nv+"P1-J`z&1 WS,-U\,N(/mn9:loAlGxY ;ĝf/:5p1cFpUZ$oK% ^Yns'V9|C +O"G:V8aG9}MԱ}|дo?_fgnomC>ۋP_+O=e }?k0t[)syp -xޚEtJK*.N* gG ~rє%Ummڀ"z炤ǾrIJ"o{&jQz?G)<y; endstream endobj 600 0 obj << /Length1 1713 /Length2 9763 /Length3 0 /Length 10857 /Filter /FlateDecode >> stream xڍT6 (!%)CK5t 00 0t7H(HItJwHI#!Hwxι{[<{?/'h UpDrpqdd yx|\ܼLL0W8?vl&}(C D5G@ rsxECtD0@ 숀`3::y!a6ů#""G8E¬p_ 8Z^♸(vprDHr<`m ~ P;@ k sӡhFB8 pqC@H}u*@ 'xxNWD0`++G'0 XP*+&.`w0 ':'k>+$Յ=#wcG@dWPsuG5 = 9Aܛ@]B"3ie ]@ ~?'G'P?5"ݠ~>v7@`VK O{3O|H'^~<ߟ+ ∀{C㊁*:*SF)~G b֎?۽? Υx\(?B7ය!**H Ovb+~ wT蟫+O| c(<MrӮ{0Tipps~{M/GX9B~o D+ _G\Gp~kG$~@B|?HT#anPD@?>o{F _ ym@ؿ}f }fĿ}fǿ!=!?Qx# G O9^.+7$c? ZaL:Z} m>Qyp I3$r [.1Y/"OA V埝Hl7TcF4&h5]^]kž! yDͩ+{dM)M_3㫢gUg5rA1Wz1&ALY1\9i=Of *f/h7|.K;#9̓Qf$e)8C }2Nú2ke.D.@5R!_gRFiDbBF~Mٷa)}(1j8[Wۗgq'uL̪$9Քff뺝OjlW.1  @97։!_@fWO-l߉c}DB`/SjNۻ&DS3 #0++52ո#U{.J[n2 ,,EYKjna)|,<-D^O&JM\Oha+_P?x_fV܃'YpYb'y(D;xTP: 2qQpf<TNx4`#"yϺϓ1PoA&ų Лq e0**LOf7&=.KO Pᔤv:MWm6-_ ?8)"l|]l8[^ek cZ^'jjWPhPٗ׋Ɣ@ȑoCh4 X+'5 _㴍m3e<[h5ADo=l-_Bk:>K&9g~`,C[hpQSP\ }&6j}\aVŕ>)F}F󫅹h;h'=;BXd 2 Z|dC^N喷84C99GFBۈOq_%=w V\7(hJe;:,HlGUWf7qV$lOA;$B 0nl z[K, deJ%*i$LQ_\J)]HLs)w6^7U! ˀAkyG7JR8.;~nIjӁEBTZqm4Dt`}K./kf=Q18gQJ3/Sjؼ˗t9qȄϺxVż^0]{əU?y>[:X1D@ "auƝ8p5+\3jDCJ0s xn>h@45<먄^KŇuFb%@"H ݚ5JsIrBKkQ-9epBlSyJiؿman5vwD:bx,he L}Z/zׂ Ȉ9t.nѵJ6,}z3 2ʭݍ^ 9_޻J%"\WX}:l=F RRM`|@Չ=lE(zO/Wb%u6+c&79ɒəV0p!pNCMC7\E9_#UPBk1o>2uűثL2!18$3#(>\?8P_;$CP7(Ģ\v;mԊGSi>~ 5j׫dQYͳm[ViRmA?Z.vu$ (wf Z_B$Q+RK#QA??۶k+TdN?fNg\ ?m<;Sk,Fz7Rqǽ.CBGti[T5P<`E$%&^ř1]mqZ6Rde=kD= ́uhVR\LF?Naap5|H0zSxMKz;C&C7jH-_T7~Yѡ"_rHz %2P݁鎢$^2]&E[f۪5L M,,W1"%ZYyg"iD2z[3Be\2J\0=>*3dD_1#WύR(4K~zly1@R>0-<^D>ݛ];6~% \[U̘%I+ ^r^ޜiUlӏn:V~Ҝ5׺;*nѰ){b*yM{;UkƲFpS+Ea+9tK5Sↄ~Jl d2Ò^R潨EW+-72MCyj/,9B#T!`_w f%ol|J]0*M[R&[}"̸=sa'2qIХ,}ZC<@60U*Q\f.D3gM/ːﱵxetH.@T死$؎ )V'iK)oS`dú.X)5Qp?e@fD7 %ř .iq--{ AWi@/ĆsN,-L);,1/C_73h#o2a?zV*!(줙vۀ}ĉוil]k(4 {VhOX-x@ղݭ$M8OM&eaӪYS^8^E !G>Z~ŐvSUdOM7'M1֚9u;mjf,>Ӄ{ԭ*oQo?',T{8LPlYEuU罨>|}5)ɥ&7z\0&3I/ +fwX!譬-Tǂӟ ^LcٸKm V =?}MMw{T~|aOSBZcZf2YiW/#f GΩ?v3MJťeeC~T7cwS=e.A`,8Ѡ ?ܖP"ExԘ>̓ NʉO/ \;,F;L6S%th .Y{Qlގd?;\iUGo@Bd`Մ!I#Yj(WJD&!+~D5O\5?yNK`TMbqJX?=3}B3 ǿӷo  1M7lyj#YJ=X$vXySoW. 9}+M!ߦ1rv $'=ԣd]s^?sl^Eu/nxzq9հ*t\ܮ?YKr OM %bGjkq|{γz-K^BRյ\Z䂄d ד[F-@ZMr->AV\A]|1I="h 將Rϐv`F҄o,y<*b596T|sebsG?#yecqJ)#}ӳ-b_C|`(R~',lj@dk ` B(WSM\=_Qg"]0",_#m/x ZN77;lZJ!3PS络wر [E6=vI6_+)zhzaJ-NWKb:fm>=x1»  yqߔı 09jt|S>1ݾ@)YE1uutq&.A4*O`2JD/KKW◉PM Tg$zIځ@VւhN iB&{M^ U|vZ撄al+{\RαKgjtEQRR`SMϪ!El{iVr-cL8*s+n^`u-+2cy^[H=5ζVkuu$HZX[3c^WU|AOLOU^s":HȱA >y`Zc(~b+4IJ@7JR>!H\%GS?l7"ayԼMBwR3),)6_ڷ}v33t:JKyd^XDE\U_Z#Z"+Wjx_@<͗]8&4eu;͔*/)Z !cVUKxP1>pMև9_ 0{=.@z2?tgoku];AE5=NNSɽx<'/7%;([9fBz,鲑NAg|<[ھ:}Gih:b2ԃǼ)r$L3wFhsѹYLIj&T7lUڱan֍AJQ" r#G5X?88z|)f 蛐ֈQ:VNu@k/xYnɦ4;u%<ط[aʔ`-\§s?(b}.;Sk>cb&r;?7mxԒh0S{qV?-\Yb1< D.{ڸ@kA" j ӹ$·nw^U9^E-txq]I'veMB o6)-M꘍&X9}^JIAl~&ɗsӛ1Z6%3Az$v~iwДmV*"|&]tˌ%B {PW]yOws?Š?\ 7ayia@2t&Ij[Kٰ9Lg/2 p^4en{fxjp.]^G@eJ)~byv\0کw 9Mm._cpZ])%Fn JǸlӒcrqaCżKgҀ'%CKMa՚[P]ڻp +ٍh:ɺ}9T y:56 }rz;c҃vpjl[)esF3\)4t60̀OH5f$$logFصvR*ݙE ^mN Qd/JMlx‘ɢ1 eGKo{F$JjSoH0S݅c_Uc$B$gS4+O]jT  [ɈH9h. ̵YT>D!+WXtHU,7#T[QN*zTXXCAlƖ&/I 0'1@WN } gU uy"nMc7#eI3'+nXZsw<$0bʕK|KftH$K%ƜFSRi)拚Ѱx%ů5^ U,,^7A3Zm}/lnі3)ّRҲ3v[3e!/ ,OEbj2W8DtuZ&6o NaSc#h]P'[mRS7ud6]+MErѕ)]3yϤwq(XJ'c‘:K ;\Sm?`QjD$`i=1%%jR&#vPYG*W-vh(m4sw5#'sbh2Y;. 9QWSv8J'_ <'W˖ :7i^l`< M\th}?*8IAA&WHpb:ZП*8ݑ64VFi? ӬK?ZP&]' C]V'oiEߜ * 9`蚲92 jyP[B+Rexɤ1çԟ} t wʜIr*6!w^?%q1>N 75`kR8hj=8#/ 2MGM?Cmye|2PM ] 3r(YᛖYA+ nTr\-zڇ#:̲.@ i_Glp^3(hAbAb tüJ*y024eWA}c3گZ:ι1R(;G.hTTxWe `OZiȧtY0FRN>&T2w t)X}MD=L>OݰBeJ_2zpdOnG+>=`8|e\#w ;@S"r[Z륉8w="nk)tD{,f|i%ӓvu>I &P!X^H@Q@!L4/5v1\rSzL󩿢URa^ *l>^\ diOO6l=i˥^"zqB+i{ xCpj eg T1;[mM-xRi5j9!O{a)mЌ7뙖cXCٸq, {T?ow2#Qw禥F3Q2-ꐒ(ȉO[R2rv1<6^d Pr&R,Ǽ7"<7ؽ./1L,Duf*d^}f%ʝ~Co|_!YzbHHiZg[0tTvnrs\/hߖf!k)M;4~Wvj1j|&0 EM(i8mX E63rŠhBF$I1ٖ9 BZZ]FVbqqgڒÊ:s',ev˝Fp%= ֻWo23pcYtޛޞ 2aݙfˏnR(Ou:i0IYTUcdi=dp9t>M}n4}>MA9xӔl@=j-m=P[1} S+bm %@$uũޡ/7=\ؚ} >ZjJ 4ZlUl= Rt8TտsAB=<ǀy:)M2֣"gFgá0 =A(hlɡz1ï!SvG @4D%/~y9o=Z.իo~tqlPh!PLՈ,y9MKE>ă<!ZFdeAC-s<"Εu% i;M1__f^{`dGԺȺGIi/ JT\˒3a7ib~aжN|$"B`[.G-vtv?R 73^.@-B z53(9P|#Rʉ ?"(+ ~d8ALEye4cx&7]xI^;8SlWmp]GmW}hH+|+IJq_ '`kmw{ǍB(pb3Ԛ%zo86NRnۅgmR)w?5bAJR[UD'eeA/9l/_je"նְI #/ے;|ݠ,BNpE 9%CP,Y&KƪdF M/G7ӓoq~,9Ovl|v6?b&85gūtJOO;N i uF7Tmp xXnE^R.9bGM endstream endobj 603 0 obj << /Length1 1513 /Length2 7678 /Length3 0 /Length 8679 /Filter /FlateDecode >> stream xڍtT]6JHHwwww C H%ݥt HYs}}>ڇ^׀K (a.>n^q/Wpc2A0P\!@ҦD Zp@ 'EP0@A.ޮP;{rVOLLw8@ a- tAP_)X%qOOOn7N E!nWi6WoxLC{-  NP憌q!5M Y'q_@yCav[ɍBp0/" zN@$w@l@PW< Y V;;C`7_)B]! {u0'oh m5vw1AC$!MxA!^QA^~ ䷓م `l |݀Oǿ !6;( ?ّf)W@>+Kpo@Y㯞ý|.~!^@w] :~U/O9$l'ӆ# GB ,!?KRvwrM`P'?)H#G쿩&?X ;W Dﳄ)C `](df7'( wzb\|C! 膜8[!șw J0k@WW7H)䔂!^ ]-- x~~#QH x"@!?0+Dv@ ^4$PtQ.Gɵ1,yta5ѥ4n üF\'Smgi3ǾtUނg\tvt6oNS3L֡d$ӊvDʀ X*:f,%WaǹQ}aս]dgJ QBW?߲S~YGz>W WQ[&x#n*ȊO^; ;|.k/Wɼ_ Rw~,m Rwr,=nߍVfS~qPn^U<#dEoS'csMu*HAWP3qG +^݄OJt8!3sv4-KJ'Vu& )aG)cfJ|ŭFhܩ*U<EGwHqޤEh/Y}'!Y^sl{TʾQ$ "29GV2_osrԓg/---PM"D1fk&zv **E_ػj _$6 uPA"f'4*Gw)*Gu |5%< 3˔|6yc }\LY&؞z_tBmD U.S+uA-ّn_.$,iy`bZ_ה.OcMX ,_/QXM6^XWmОz$ޠÁQgMwB.#(_ޗʛe7=#  OG3bU_ʦ^| +$ Ӊom`v'-|J½2j>$[RO%-?+ z6y:6Xf`dnaj7yUKÐfy5nȒL]SD#kh5^Ns˅d؂{*gVjiWzÏa9Y2^gGx-O/?|B"#z3.o#Na'ūnܙAb^Vt[@AԏQIY'x qh!ATI08UoUJUQ E,'.Y~Д0p]4#9[.ypjf+e+l3*xIӕ}XE-<ꣂ.KZ r+$OditO{5U $qɼ ILuy=VȪ89>cuidYXJrh_D$Orx+ `+Q~6. 'r60 WuK9>:CiZ?ˎHH!&, ,20%xTI6#L)<ȓǸC LJP#VΔ"~JSd_ -RsXM85tY 0PZI-odH}'/sБh-whI!`@5woDbFPҫkKbzUfGAړvtsɱp@'}RWLˆGw'}ьۏo* s(ktǤjӏf !~#l4Ev3 |Em;9,r? C˷uGK<'gj/~=x8}TmGS lkJ5(?Íamf6A!i*(-}.aT J= ?la{RX ߛ*:CיHL+GhE=%"ґ~a`j2I!k4!z蒌d1LF{|ˣ3Wh gjuKН/}~.xYSpU:^ÊR Q[DȻ`|N4+0>s"BV|x63Mx6FIRy=^zDי- O&ٯ_#DŽe'hh5ocvbb$eE?:|~Q,sڪ]FzS^{@))Ed0*wN܇oCՙXa(lsnVfb:P0Rt{4r:xj i!K[q!;T7;OYǗ<_hd/ ړ*<k3SƪvQ viV9}FH:I\Dxrsi@͛Jl.3{|W^Z=?R([%*ϘfIίWR2] ehaU9l+\6Gft'Sp ozf%o{d$޾<(ʢ{pj-NAw6(<]ܚL@x_I[%JAפ2pc zT , |Ô/>M{a}&, ;[{aиvTlLk]i$ǩ~yK૤IZ^J idNNbLK1 6v]Mv"[;nd){ǒ{6sNav*cYiW5o& q҃qy:".F1m%$,"I⌢U617H^ fQL#qtyx6B'ԕ d ^S-$T56}O?,~O_|QԒۻkKIGrI ܐFKʌgt80M0a =^l06Qod +e794WQM;V*kS{[N +UűMtO $_$tݕ R%N::;(ӛffJiMZ7 έۦ#7ǯP' 1585HCϕ}3I8xE>mwiU~tyl4'X?3|j$~4M뵹k_yۋZq V͜Np:Yz9HCNwW? }$͸ lݾWy՛Uf;D1i7`bKgB :/ad;|R+MTsIW - 6N< pZ#1Jl}-,붣S<ܣ?SI=έYJ?݊]f+=X*<$4Aͥgv(NHcL֌y"r[D'h8MnBwLqseDdcW&ydҹטgU 4>;#Y+47|#`) 0PPBc=ܪX˻:^#YFyM(<ʯ ӟA m|VdsQ^5eŃu1eYQ̱ST]c _=BwTN[Fy@Wl-řzpq}Cxy^WA+w4#QNK6mXf;q} ϞZL|XtrNd|XAcHkodV+ERaLcDvSR0+bꀗpq^Oc#9Ȧ3&:+g }+h!WUqlo5 Lt"ˬ`ID~@X:=y|܈e}(L <$̔Y7lSz"E-F4ÖJ34)7Xt*y=tHVT~ U&"+49<{7ͮV˴ g3+T<82zC=Vr<NI`^8.Q] Ļw]JU -)ϋ<&?֩KQvRifArI7o; (8S ^B\:#u O]7LW_Tzm:RYia?LImqԴ=kbGet?=)gop*rؐhCY,%Z:q͵l6<|BZ!6g^lA! ٙ" Wo5_~zQ7_v#E1\L!;9gϢ+`9^ma"I{n}Uc짳OTO)I!ĸCR blCviww4Rx^Si=RC?'L5/2ex}N.u`?}eҾfx;_a7{eiizdpZJDX8Y1 U}ie9jOxz7P;wf$hggUtk7f1vӏ?\ݻo madzbpS.tí; K [p]8k ݀H@;"3B ÝDR<,a뜓a8@% l"wVD}) uV 5@YδAYCJtQz hfjZ01k)3ԟky>"/flcǦvqӛrdL1٢roH Ye󗋖(Ln\Q p0> stream xڍT 8T{4nʱ%۬ftɾ3)1slfa$Kn!JqS7KJۂtvI3hgynw(>&6tNa Lph==_XzTLJ9l7; =(@6*dx"7'-8@H#`:\9lֳpxppiXK3$\:`Âx0 d b!i `H]C.Ā,> 20"aACs| fĊ qX,-<|9/&fEB.vB. ą  P8h!Xq (.4ċ1\` @10B|0qKQY~Zd̽r}^DP T^@۵ԥgoNk][BN-z.|G>H).Sەa$Peh[lc Lnm|@> ַ*A)lPs@+6~fC}MF:z{rZG^wh/}Ooںm+pӔwt1fMì)OSfmCiWօvK;N&_Q[>cYHjMu֐SnRigpQ}[bOU_:U{+W58>zJ$qQI_:^U좕\Bk7a(&gӉF7nrYaVM[3:2lЧk.[0#w'jEi=Ueg VVIJIM,ϖg. ߀;kZuRs ELU:Ӆ?kMܣyc?ό^C>0ܙxmKC(ۘ)SUަ^lyfjD5ۀf79ʴP*ڙ ;]䆞xӦڻ70QƓ|O;rz4 i}!>#yIxOoշz*s?6bRۥucs%55TχLp[PW\u@:thtxG:ʶ(lN %摺0:N,Ev;C- e뻇=2l+JlZHiMʴ)^x92]U*eW9 ⟳rIٳkQ]P[>JzNǭ|0f U/8$֦Gv>Us"@OTˌo鏑7JgӶWP(pUbڨ}L/ նWW~o^htq\+SyJ&iάS]am:r#\.8>l~k*MC,5n]땠|A\EA6D>" 6wdlc)l$wܙYDr}[ >YͬRw{ku6inC%~(Dmr=숆Z6vPv /Nx.7 endstream endobj 607 0 obj << /Length1 2605 /Length2 22750 /Length3 0 /Length 24222 /Filter /FlateDecode >> stream xڌtk-6۶mٱ ۰il6hlsӞǸwd=3zzMA bh ttpc`ad)0Y((ԭ(4.֎M@6q7P@`eef23O /@utQ9:yX[Zj3 _{@ h:hf t5/'+ = tx (ne]̀ ws t8@MFt;Xz?0KO/"kML,v@<=W#(r T?幚X;2Z* b@7W_ĭ]f{3} ks_E;1i8X;e ~,nfff.62bE `*gmu5\܁~:XXfnSovh7]@4{,_~3IANKSD:z|ؙ _C_ed+`[-Mg9hRtM-@{9@,ϣW߄b $ng/mbomOhh@ Z{i+fZKh*i4Wv3{Zk2;k t5Z-3[У4fV`b dv@)Py~ G_7`eqD#.o `xL".fo`XLҿI7b0F -rHoҢ(F -J"nE7iQ@Z~#E7/|&"62{'ЂzF f"4cagek2@{, `W?@[ ? /hl3Y~~;q(]+o'+ 3l5Z?%;fq;w;zm:-~7Th_s= rgwG7;KŁ|. z6_?P@/¬_Mup]'ΘŎV2 KtMEzˍH`"ac 8wOF1;pX}y"{ a ԅw=; o|.K΍~#dinS(HilӌR(7BZ3/)Wb:8(|_u3>+ꬮ为8h#sE}^ęI (i֪U# m,Y `*>z #R%̊ft*6[ ~JˍfM"Ϗ﯀iDCN>ۏIMwC[~ZBB,VzKiI%no;2( O&!x,y̺ܐoFN8ޝ+=Sl[FD3z T|5mO:;õb0NH&MW *a:[/b/&?'gޙ+ e<,h‘B}0_0 scy훨!OG2P\amJ)R"o nfaMɎE=H룑F; ojeWb K3iʩ<^ٷkǼFBA^!St$×s '~V%y0 W<>C M'=ԐIV8"B%jp9ݬJSI2v?J:pwx0r#s>neGpS+Ǧd}Mm-@B6>V&ԓ+eF3<~fVF)TyJг5n<\Q_!-olQfv\8_ՋxۙDGmMQ᪫cNlG{~ALύ Vǒ5@ѳ0~$6o- |9L7m`*P(GށC“p⫔]U6s̓Cͬ)\lҺrTG^kcN^)B2$q!tOvZuw]+k6/<. zWܵ ;rxjymHIXRd`n_yXulm#"/9*N%iĴ"zo+r˪2l5qJ #r͔&yN_MDe07!իn_3) ͮi`9b^S4ZKoO IJ5.}ij*VV~kdT?xIr~<ĤDޘf(*vJVGP=p>e/UCC;Hc:;_<[#a lN6<5eI\QLQ>o,D%Qh tN|WJF`Tذ55[FCʕ[ӷps% :b)/.L* 'ymӦَβ3cGY I~L̰ v{7Mg‘|@q4$NܽQJ\sJ<"xe_(D-78|T9TS,"31ʽ;10ʼn ʎ9 .f*5wXӯx]bp=>"3lo/7WXDJ2DsYM=ny'@<+^踜\ slX`Nv DEb?OZXDziz_.NfI*tD r3["FР#V&Vb.Lv[ 6ai:BM_nt]5|ex|Ү7e(Ok-} [%|i%dckU9ҍrxeQW1>hu?X"!ݐPk%~|Us%H5wT }hTV`ػ$R 6t0^_% _dZL099$gcP' Ue O•*I֮-ڦv9:65Y2~y*ܴwŒL^5Aw 6Yv1 T7{DL8 Qn).3Sr99äMtEAFբ>C2?^ yL@=lԭ$w&.q 4׎$̷)ev?K`3mHN}ӃcIK96P)qdm}ue? *TGr+rm%V%1Ġ }. iuU!ߣ*jH*jb~(kN'Cbmd 4ssr-G&ޅlUCqJ-YFr(=73 :\Q`۔D!# V B,1bk2&8.z \&i߷kWq-MǺ (>h5\*km7* o߃$ƚ0T~.Uj<3{Kvc/ 9fG~vĄ]A2K{"[5Qxo C vS0DMA&{'E97Zw4K&҉e뺍SQ~={WEPID{720:;ESrf GDi #2jP,˩&T+=V%: ;\^M8 :R3|D<Qœ:`X!+kxpw1A6rP\1]0v'f+*mz(+S+/_h,&%}Ҍrdդ)_![bO `[U%l&e"$-[St_Hr M^f6, HQ,\'4njtzj (lFIN#_aG?E)T`LG]D%f%L|<\y.WjRZπ",O;G75 P.`dKs:6Klz7Qe=X"٩yʾR:ρH>s27:T7Ψ?Ir{@(v^&wH\Lp3s~Ob0qA_-fךW"M)gPRy K}A7t'kH>EƱ|ϖ!h7fCNP=l[-֜gUôg*~{F4Q.;'b8Ye?k *9ҠjX ݘ"_֣F6êXBv-J,y z*h -|*Rf!j^ߵ}:yzM' otԢ9j> N ᐡ =fII`8t+\,n0j\6`\R[|\oZwVD1>]_,; {QzD9_gȉ{,`1\b,Gۈ.ON#"|l] y`l4?ʽA^ȻZu);\?'3;""U \F$ E6q;1OR> k(sNV]K< hc1gU6M-BZV:F-jL^ZXl^#5ߍPR:+6A"ќT gycuz{ .#i/_6puSh#}k[> :HP-qT2xA1MJ+ƅԱCTCyjHHcAd_nh{c3 "ub9GjN|+XɞP8mIlsM)Yz=,WRuCbn7VȥD>nl@Դ}yaA~-+mP׹cMqٙwIMM~b.}"8osc IZnbԳ$oo R`;{ݧ9_Uz'2TL>]ؘ/Q4@3 e+htzT6Blfve܃s8eOH+RQ[W)t\HO^Mw Kb)Ciw0W4[kB[4J[E/1WKc9@L 9!^>JX5}sCnEg8[QWgTwTڱ5fQ8eSox5 0aMcœi,oK[,\+ H| F|O)?Dt]7!oX vOoquDdh"-{2n@]H_nLzx>EɌ}KaicDu~BZXg[kZrMB!f u@4l3 _qw.ϑdN`Ď0<.q>~jF[Xau'|EU[%Ǽg-%F2 ِf&]5NDl!혴(3{{x %D)I?$[0;z߈K8loӏ YaQ"fYg{yq[ #cg + ,2:Im 'L$'99;AZJďPic0STj'O~v5oOL1yrEUKJX Ӳ$[&,AߞE3 iD*^u:sV]aJ' >Jlo(@gcno֩|?3(5>iucЩؿFka&/^2ԉKtI;5µϒy/7)2o+KjQћ>}v/I獖tx9E ;^#6KB1~䛨XTC8^^3L2`iLMo^ >e64r_ʽ^A=)%2h/mئ`?mCrU=FhL~?S޷(L^T'7~>}.Vą?91׾xbOH#5;^*&˱]Gl6s`iVPAOTX\:-_ M⪻aNP($xv6l)Jomܰ87 'o'2Yq*K7q*is/? KU>$O4]K Fd鲴Y7WbB;v '"I3F|SJqަVfay!9R7^elذ.ޛRG6 :O4O!?dưf?WxR4(q\Sb[/-_/lNu%$UU@"XZf[%r\Ei-lY 뚎|{|iVUiTX wPq INߴ[5ߌt!1_LmjF{Y Clc|'&vt=U⽽quJ\&z:t]s$YW{R{$ӎFyYmc5S3H,/W0~!d!8Xc RuTҽ ᡧO(E=u3;n"!؉!7;"bbտ]҄m }9a2Blcf4e#rhB5 \TbF04'b~˷Ιs\.BPQ20J+\# ѮO6,Z3$ FTgO&=?<?ڛ[bᒿi:,ZlƄXldVSnh7]Q!d6_(?׬d\/_ [QoQQpFDak"0= zq$fZ)q&ۤ{"q|[a_uR9[iȏ0vH ^}M׆@('A8܅D&TKȦ3BskpJObyEO)taUd2FM &J}AߞH^d&djH%+XE/wlg,y[7N/vqve#:'`\-4ncX윥Mg_9i%Ao9M>mA;8R".ch$)*}w/4 @CR9IN߿WV7̒! j*|K~CA匪‹hxFZʉ'bGdHtEj }{S&"ί& Jg2CN|Bs #~>ATD )xdSFy3~H-!R$VW.)ގX˓0VmC}Mz>kr[Q& *(ja."YmIluS HƪyMREZaڔ}8<+óE_U ls!džX adZ79z #o=7:/|S>%Y`ߓ b&72[p~6q@#t2ފ_dA7R κs@c %z+@FTǎa1U4xv۴{"}!E8-Hn;ʯCɬ u$_ 6O6Դ0զ\2 mOX~*Eڻ-vjgonuܵ?v2b|$|2ɉ?bG|8RμUL 쇿t܂9TS11sh$Vq?4!&XW5^$OP:ש&tJg?o\bB@ c"m\7PzPvȺ hT{;ri$eoV)!6;"p!.Z#=݌LGZ/~WkTPp8Y\Y|h&1l@xr伄S{e9O14xg Q)e$̀ 1CfœhS>uV|ohwg 9nZMw1]7Tc⾻\9Im %N]2 \CB  zAUhACe!"в%ojɈ^\|2? Ov>ѳ.WfR` R@*.?e%s?s8y|7GB/r͆{-.@Ki˕?GpqXDgCZ筡 ;{&j,%7L$, fD4`hyE2!fMB|t9/b;\xA%n~OŚKth}$Q{}lF_ |Ú kNM/ܴX2nX}nk l$ 6Nغ-7pl:8oo_52 &;y҈-#Uϩ6Q!"R_R*ziu~ն&*lb|op2"nHMDWBx!8uq㽓8ת/zuک{z H6pUj_E[52p^⚬(UFĞv\t14`ұp,!~=2sYJHf_KV5@my9; $Y['pv+Bw/Yl&C'I0L="W@`KѠ.$D4F R~|ʏ%z)]F1QM T3'!R K(Z$Ca]D']q^Ϫ@J|4}욉x$B0SL,H5tԇĚ=|=KHhِ:*89/Q:( cFBJ2bekPK:K*?(,~+pF$hԣRJ?@"OG9Eq"qʚwCW#La~SMGi #.sz|a;0#4\Y} tT?./];^Z戫1YDy=҈df[r- ܃b#9 af{+jHɽ}- EѸc WZ3TLI3M&գ`vbT8caƠXߗ0A.oA Y!AqcXh欂#) gޏ 9\{`I2j99_"8cIA)JA+Z ϻ%#_ҩ3:g̱klP_3P v@B" Ǘ#m" ٧Sd%{b+q=guqL8@+=##i&VG޼FE^A_܇ןN8xitlK̈́ʪ.KVWhwp_]T `WnFQ&G6zʢD ܨm*1 d<-e Q^yS$|OD:cni&*'㥮A=(מY,b[sbsGSkxTnx/M4'垕|69)zͷ QS,V+ݣTU:ᢘ> WɋWtgR]-TBւk3gFzwj5!QkNh.jU\Q@|/}a&^ۨHꄜG9U05xXpm97.<8\DܰK[9g5W^/u#{[WUuy]#&J|j UF\;r\Y5gRVdO-;,ב咲-Sj` )>Sx?٠ c!M3(zT1 ĞYRɴY;k( wnR1ZQm4HΆAC-J}#߮[HŦO{;Og'+BhĠl* 6fg'g!1qC3 R\9\~6L`f2HObr儈BbA{2qvSdf2#i|t-)GxvcMzRMD(tP7CIʩI89?'a*=-M'iXFC{V}zθF9䮲BA(a$R$ʾN'v[Z%uMG \TLˁXG g&fMurоtQ3T* R Hcz25J b:c{HѱO韦fґXQZP#o ӫE{ۗ<ٲM>!߱/>m!v}ʮ.μ 3(gUDsﺕT2ѧ)J A&ov\w߻4 .APr]HP[ ӓ,1M`}ָ jv:n6FZXAGݤTFSu@=۠D`Vf4c[y6C }]5UE_.̧%)Jf[-)'xTѣE2Ɩ3> 5ȝa6&7q$;րx׉1Sz)UQHm P9Fh'# Kw9J{^$3DTgW26_^C~TmQ*?E}NQoAP|~0" Fgatc Ij)3Kj~*UON|Ҽ:7OOt1Brˏm)(Ն/!"uzG m*%Ko,u: :KRmϘ7޷Dfo_;OCT,ૂײa$L;bMr!%ס̆8ܣ~MZЏd}Xذ2YKI,$٨o_5xU5Լwmڒކ6b!u\7͸}D=ryƤOj#.% tVPSn)3-1<ݠ 9Y75FJ|r'xdZJduFmcq<4+랒]xΞjS܏,D(r&pЕ@jVؼ@7HLj;…69O<oƓ k+{ ÕÕ/\fܓD2W=a .\$GTT( !a$f8|RG9w3.L;B6b\d83K1%η>0(񥣀({wa-e<^l5"0_f4Š._s1v8 ]rpB֮eb0 j{y=df^178 ƭl?i&'y k6T}쪜\'68o?+eMNHNM t 6BCLqdฯz\Y֟\p8/RQ2*<'n`W`%Ҕ6@Tj; @{%gp\ӆcocZ4D{p_XuB")"YQBu3R9ts]ǰRZF\曨I߀Itgԓs&H 7qŸO7QSQ&8<ƿG-47OBp`i?ʋ#%"rԨ iIlS춘vm~b:Abt)sIg A# thF)04kQ)9 *.m-l遧/ Y 'ޥ?橷/y_i+$K⨧="7wjo5j $|m𚽞3\夾XL} +kf!`^'|@)ҢBsAd@P*zfi!u*Bm"_Ը@ }UW4X~hrֻw D4/ȸ7gXY3ުXϊ} A,CWlCD 1S W8_R,a H _!3D) 5Z!U͊37)x$u;p4i( ݎ kO+t GvT/`a$ ·^mr8_sGd[)b j%* 7b?f6p<|pYWo?O̢!nAl1W/߁0˹jV6uH #Ы~ H[;v_m(z19ڴEy#}W87ܟ/bG/#/|$$[gf%N⾜s:p[L.bizg #og1ZQƞ꧊@̇^#!kɖ|JHq FEQXb})mQ_2g!BC~XjkB*XuO||ƺPwV|:8>L<Ҧ 0x:u)xk#zQ5hO15ڹ/Ɓp5$j>HWE! *r~! 9qMڌ:U0b gEQ^N"c3ҡ OL5r֍a$VqV`vϩd$؈%`P"]qV2s]m$b{"Y$0*J۝6jY@kL\TQ%s*V;̥#vf<.Bm_{5/})HL1:_<㻥BZjwlcrL8`(B@݀HA!8)@21?8U|Q^ p`;?SE0Q͓o FX6VLgSKleU6M!stf#l&bgQpP|8")Sv( 8X,޹ˎ:]ݓ -= OFGOٶ-%}X><7DtF#M<^KFy7B,N?\`mUJ=׬DG a0\q5 /P7} J37ffksL,_\ \ BV7iM|SjGK|) bZd}#c[vZz s9G`C.R' E] 6nG39ӈ:ކG3},KHAW*Z4~k2e7B̞#ml_6hd`7Y/yM;6!/K$¯GF;RAV5/g8лv~:'7ŵ694Vg1ӵId$oG&ͬ>]<2 +wO"E%Cz.]wWFI/'ײ__kaniqmy| †SE,굔Ki? ,Ϗse*k E~BVn!pԉr͖ #D7Bqx739U\B0qو3XDUB n`{lyY%Zs"Bqzc}y,)[O\}bklDmav\[dX|^7lY t"0+`Kp|ډ*-4U{^@mE;b#q,DB'8\ɞ1M3Γ+q"j9ۇU;4 jD:Q]- -D lכ]L+:Y0TF8W>rS%~S}F mDڟ$y`LJx'e!8zg{HvĖ %s!{uCδ ?F-gy.b&ws`+CYT:5qw"yZ_LPG,;[>r < YF).FF/Sۼ* ϨV&~"8,#Mٺ/ tdބ]~Izayҽ+0 .n_1)6TyR~dS-Ɍ|[~G >rδۘEnb1f2{B9kp03iO"oӶ/(\" C.jC]put*၅ O{\IiNh~l}K%فCU{0}HH+&tZRf~@:MV?MMg#:L+ 8xa}ƈ 5Z5{ Mu,,]PMKFQ8p+A!p2CO%dFۏȹ[57`x1;=mƐ]3_M0^3.;}U}QU:mj C>>5JQ"h,m gcӢ~nAΌlFws!ky T}8t#*䝥2@1/8ʼrF6X\uMV=`st^yu$kv: W<[x%(x8a#4#z8 3ž)p|c_!Գ1#\\syevqꛖ1"Go_6E6m3Ayv\i25jiAdGs%Op|BW&R&I3d%\6"1n踹S1a) A};DX A4j OqJcEס<b$?zwNAU⋦, :ъlIR6$]~(͍ Hn@+LU)濣g 2\U[834ISG1#HZ(L9uoG .thb~5 Ы|C"b4$Z# 2Б8l0lw@=l+ڪjSiG8|I*Wݍe D g#Ty^$pw[/Q. U Ф.yy9 H*AЏǏ!K^Ͷd^kvgG6]HĚ[6л{=5YT)Hf'CcHi@Z-͐wwa7<}Σ*k`fF~$MDzy:>6F5}r rD]?jթ\">?4zeY cclI+d$fB #˩KІf^*oFW`3pPh'_o%#sӟ!q:*E*Xh󄊋:x>G3 /ETpe)@Y ;(Ln ql`Pol((`no$I&DzoeV;g^E&m0iYK!P}3R>6bXbH)z7>Dw -~r!9|{"/F ؊LأCғ7~n\'kfmJ/v%0WE o/ 6KwS_cfV<'N*1 qRkzae/*Yȁґ8-d >C!y_il6ru9ۗ2s| KqjetA|Y?%ذI`vK5$-Vz %6}<-MAsx oLnui(gA? cV" |rY\`FCBlF/ԲgNz9L%&¹u{Ṙ8qUtWMZ4'^C#Kb|L;r2 / g>G=,yQ}1Pk"\+opQʆ()uU58cl,:nr9O֘dn#қdX:huB7qf"aJ #X?H";L33Y^?6֦QŮ:lD-3TɃ3ܭUAT(핟hZqH(%gUrZA{)bUd@/? ULˏ d#XP0Ui>#+8?)]ָV"uQˆ}زy(J.DyF5p`R] 2CX M۾$=_}{q ʫ#r(  ȁ py+W.uzmfCtVІf[Q>`̂yk\r@D6Bp&SNȃa¶66Ea|Qw3kR+ s`H5kSA-ݟj%p *yFF=% j-v,uu*)KZw~Hlt*c>ܜ6sc3ZZiR\̢%o5:x*5zFC#aY >dFQ$50 -Ď2} kPA1τ])T f3P"KC[ښCt>\< Z">-qMwNijQ&.Ps]k.3Olx1P9~SSBDcSxgQEO E3юY~(Va8?Gϝpޞ5IC_(7<>xQF`KMoK OLNljŸt"d.8ås{;[/CL0^׶l'CTNpdJn2C=ϟ˛)[5?KLHUp2"7|QSmzn6:BszV]{*Qg<@_ŷU6DQZ_3ޠq0'BAEVܐoQsƭjYLҊ͓Vjm& "*P^M vU+zgj䋆@ֳhPW-u-b$X;0-4qq~FG _dn5'O-O9I6iܭz*)'3 /RqP$ƦbOFO);YYF=dW?6fM3naRڧ^2ɀ|d/Vulֻ͇ĞӥW 6$ gTrbsݲgullc_P&ܒGaLn}zsC;Fh p endstream endobj 609 0 obj << /Length1 1688 /Length2 9894 /Length3 0 /Length 10979 /Filter /FlateDecode >> stream xڍP\.%h[%h 4Ґ-w ' !ᑙ3sWWF]M,uad T49@ 7;ȅNG q%FÜ!PH '4N Ptpr88@(CG@`gt:)G'O)>LNAA~?`\lO-@-G B0ظ8 qpa֢Lw @ -vƎNж8)rrq'= u~pZa-ew@pYX8:85 b*x@P߆ {g'b22r@VBzj-`'gvg9~y:e>i ltN XAVtuЁB޸2y#x@ ^ _o'G'S`_ r\``_+sr,!.s5O'1O4|`|'^P{/˟txqظx9~^ UUjاSOn͟`w,U'҂p xf.-H5.OWq|Y% .=Z}gYRba'Y^2{ }8=m?T2P G` OtxyޜOh v˓ =_# DxE">?H!78dF\Co$#'˧k9,9DV!OR9A9 >U|/_)?)c =]O `¬pm}pm;ΗWSt;ziLl N{ldkkO[2W?[Q5&'tYqC᜿ܙU-f!s൏HTiC@ i/ر|&Y5XY!lj6lߴ~_pM310}5`ٞ"qkw |2]SSڶi w YׂdѴ3r[1Mw12kom?JT +DGkFs%^P WbʼnqPZKS>uZT^ /`c//6fd޽0Yֵ?t vMR=S51Dt@c ?|=~҂@,X7G[ ;(TܽZy8p܇XE L:C  z;~|1$­5IbU 7KLUyȖ;gs[lr}Z' 2giKTf ]nS}ƟVSVtyoHڵw|v%11!VzV,=լ N.j4JŸQ*jUnFWN%̞tTObię<}RE8h1m+v#Ldܱ|\1D~LbERrSt M62ˮ+-U/( |2{u -ƪK6d!q-5MQQ^wӹشPclv8mȢEB߫$TuN 6,OHG[ ̏kILU!C JJ%"x(|L#Ø@ ;`, S-d1}zwxrxҔB&br[yΜa`DXL;3-јj){Qlo&huQs}"U"akD"v5<QfwtXe)|Ƙ *#A;NؤAZ4Uq;/|fnD;j7\Cj Uͮ_p4(/席) w`X,ۏ8rLٸrL}GGo}Šy5Hf.qR+4QHWǨٍxTT0OKLc":v$ѡZy"4x?k F Uan>"|r .oCD7cHj/ݒGv}9+$j𞥤'dܯ_]0qt❝Tٌ+J g4t Ly22(8LU YҞ$)O4=r{iB,i:5123rQwsI- {_ WUa}U Ejͨ| E7C3уCUվ.zFԴ36(65詿YJc\5,>8n-6QZ sP: Wo=VNPi+#wZaHx^ c0PcYx~ʽypT(0>)(bޚ~x \ZbkI0ɬ&Ums|ޭKܔ nK'̌CJ\QEԜF],fr71}  ᜊLd5ISFb2N117xF5w>,P~U2ȫai5X}7y=:9S%PnC$y1zC;ũ{d~ H?_*\-@ADyvb=Nc?7\)iL9S}zK̄-į,;#pZxH@#.( VrZ#|Neڊq|Zz_g-N$$@1!E|x%/@ɗV bӶ. BQ ry9(p#M|HҰDX`׀_7մ vx*|A*˩v:ID8 [SE) EpU.V:SmMpz &mţv' }%Ş%ĔZJLg3\xTSⶆOCs_j>}>kZxa;vp1r!&qڑ4(ɑ {Kd !b*C%N𥥴׳w_{m!>j{2Zm^qC4M\֧|Kc4x=2Nl?kP3&vh|eD+AkWz&c}2~T=.$9 1#I i]E]4q/}?vw/hp 9hBDЊ?A~*̈́}HzD޺p(KX}%MzX;9 m(m[5% %Y&dԴb1-l('oSzڞ&Ox&F;P:gE5b0ȿ|CM>볋/H{ Ə_- saIAj详 .!yݑ(򂻯U\jDX=42;f ,!,vk'@SNG'AmDR)N ޭhpٳє;,j붏:nE?#sX^hy{Y*Lȧ)z=Ҹ<氺@Bwu۞и򕺔oò)+Bap\rBmT'Pê--l0֦WLplȟ[J6.]r Ļ%MQ)5{PKeIF~z"3GhᾔǀcWD5_I\U<-0ƪo{.kd?JGO99SXsq]P>_YN/*mj%оii}0@®cꮺNS.nǓ<* gC Zx?bE=C-1:H]+]:5w)P:pZ~Ls^TOh㖮VZA0'"QEcnl;*8w)Ř0@EIYmyF \rfc澅t2e>DkGPI}^Uhv4g!}f KFg".s  =k +YN8H$~_˘jʔ ӛ-:YJ ba j;5 @, ΅;5Wp}HPh/tj&c4y[4tq=(H"9z5g6/?StUa8o8!+'g_p:zee3. rFwPn\#v/Ij <Í Ir׺T"ȑrGP3$z&F*s K%[WxWo b7jvIpꭌb\b[wH:$P7EgsB6}?7o="]2!}͈Gr8kb^&tbFa,u0hl0܈4rTBlɛ38@tsЦ^ZmqR0 >a,~\h%Ŵ++'uŋl^\ oJP7 3Q7[iy_z^n|yfE^Y&̷rӟkZnXgر2`Mn/s3tiؠoO+JM-JeΥډ`ݮA4=Z%SeZFDy(2S8?ێ?h r'j>,\kMӽT"^%"^@WȂee~fݏ(͚k07گwSXFh@_ſȂv9%FCYH|(L-zuhа(z}rBzVv]5J|}O}'|KܨiA #ur/V17~^5n!w&Ņ AIeyOBT)p)Sn1F#r@~k0s[VJETC}!@DAA攷TbXZ,.˺6كًy@ڀ]{;.Lz ^"Rnm%V<%-EjLưN8c(1Tqh# }Bx2*Oiɽq/Ţdyb NM*'kU@irʭO{6DQ>J{_߱2*mM{Έ"gLH|74nHBd3{JH?)*a)UYijpUE3ұ_ct' g_7B9wl8P%fֺ?-apUB('w5Wf[l;8ȬG݂JOoj 򿜟d^he tFMwg䫮9-zb;cWS2јNR0 J# \WpYzMTR:.ȷ #_d4 jPJ'q޹zɒPA((1^zU1fq ݖw/"6=r+-ǙtG~T:"-/h'"=t/vp;KkƏ0@g28~~vfLu5ue+g?(}7l,QBDEsdVQW :x;`P9%'FdKǽ# OЙ// ph0B(3=NCrG4\P볆jq̐8e29 ]+}hIш'ꆝϜC^[YOC|T hMd/x5>l6 RkI.wNIg/4=;0^LscHm+%PDqX.ܢcĢYM,9eSw D&<]'KFmMޫ o|3o/{C=pERԌtLfza;3ʛwÇ˄:yĥl:Yr_*{O} ɪң 1Ӆ6[i Jrڣl6p}Y|LZo/ޥ7JbR7=Dcd$[E|J :#XE8On+wS0H< U }sg'd$qezspO, Ul,W!{LŨe^J8Wc rٛ XfAwFÔ8yXk8$Wlk7]"PS/lK,s{ ̾]#YĴXe(Jת}W'e[y6p*D(GƦYiS1]t8 o%SK2ba%X_saq2~byn'EMDSg䗲uTL__Ḙ'-Yg[d#qK?>xFRZBKԭׁSn74^@aK;={Qe3ȆϣSiQ[ >I;Xp AUmh:xjߟK$0̘HV ikRE7%']t(Zt3lന%75|'B˂ޥՖ\9 `?b<σw+œlBy*qׄ '6S|΀gS"pG->XJ_N!jՄ˿Zu.W,W82bZLhӥ,J tcO ִIIr C<rjƟmbL$SS$UfFIQT``c|5 hήe#jL̬lX00|>84FXsGVh$N_[մ̏a Fwlާ)֌O4aN!D~m $v8i-Jb[:dTlVolpJ}$<17Uxӫ>zW6Şؑ5ad%kB퉺,}nXnJ07jb#V*(Hr B$D|)K\<] Laɟ@V\jCAo)2Viw-(\'_΋{-ݳ#&$̲ dj_u4g58f{@cpQv4{]44 [Ν4K{tMGӧ6V+{X&4#9Y+=;z`Pg;.p/kC޽( 725 }#M_RQi~7T͋bP0>)d:}pO ̕ЕJL,|0; A1%V<īM;bzW&r'{en2pʍ~h#ѱz3hh̋ؠTi(LlbP;8M`l֊Kf?Wb "xHk%zYxx]:n3Z;Y,#gF\ɓZN"]!aqTɇBcKeRj$gؕ{|bʯ(y~Խ P}?8FfH3+|MϵuEK/'m[qtB1}>0# fu1E5n##7SڸphQt"ј0hBoɈk1/`pa~z{] 2i@ 2-L^d5a ;g`cF/euзS=臉q\k;M{Aet%m-GeWK2eT1`w+NGo{3zPE%/eEs*}Vfҳ!psJ=H7Sox_*_ JKPgXy'Ii\׉J,H'#_ *l9'd#rzEwh#=`ˀe;9 W4u7Cx&`+CsN7O"&=GTì {+yp Se8qc>r6[K/Dm?Ys`Y?3)`w.߬,Hl3v}{hh=H(FG>+}4[I ָN,iI \\g v|>d^}{R+A1oh1gHeujjQO\@J̐ṵ1iC{K<&א$5M0V&ChW?i*i~ue ȓ̩OoWۯ틆Dwp":y1M`δ_˜|,j6V?{= endstream endobj 611 0 obj << /Length1 1526 /Length2 7823 /Length3 0 /Length 8834 /Filter /FlateDecode >> stream xڍP-C`mp A !. !CGrνUT|{uWFC]j B`\iU-.''''7=یAruC! H,`O6 O (;x\\œnNN 5@0襡ޮ`[;2z0Y1$@`+ @frzZ ` , zzzrX8q@]mŘ`@ rY~7 Pp=@]jp `+)b r<-VT; U"׿;' lCl6`G@]NcX@-ݠO`G 'Ÿ-rn sp;n;.BNN  w}2`WӶ{:Y7Cm~7a ԅ]A2SLق`>NNN!>N m~ yj=`Yx0Ww?0`+d `'d~:|WI{\ߟ?< qU uTY>))  ;*8*iUl0;I 4YB;M[7mvYwؓUOS_>诙UYݝ׫xI㿷&YkaVv/!sC@P7[?ɲrx9ܞz^Rb=a| WW o '!q|FG  { <b>Q~AP-~A? |8Dv]b@?Sn4/n d8 m$dOcfYtmwAMf ^sL܃%t!D}S=jD;_wf ZS;_&&~JSSH|w r@ߩD.Q@x'U_u4|~G[2]4{n쫠Y|9RZ;%  A#R+~{u77sWuݺȌH)/F|vSH|J>5sp+eGa|Qa2 WFٿ\f| CA;~-&]M?Pޫ[I z@i8l=ٱ)3T" &a=ҷ~l^_;m d\{ q4fē j tdvK31C;mk%#q ~y;/kB}Ƌ)o#$Xj+?[l}l {OuV QJÖo%צtoDg3pcC=]{E*;Jn\o#`~Gl N *ǰ 9->-KP.wWKZ^;f3Pl>/1C%S␵??9]ɶ-kpKV;>=˯'5MOl=Y 86q:MFe'qQ-٬y1Jb-s+Eсwz:8fF%+ t˶EϖX9EeKY5R mEYWn=۲ 5gYÀL8n/LD_ڡv+-YoםH4.VK"sJcKx'TS&@0>uY14IwK9v.v\Gf-.i+@9P#'TKQR;d3޲Eku{Jxoƭ*O @;:Xz(U i응Y:9Z^t<vɕG,& 5:Gtӆ]mD_jd'5x377`EwvOCu XNJvZZ0m\[U,Nh7/ѣ:z64 E_вG!4;Rs fgT5c<wø,=R 7AFN_f.E`Qhl#W eԛLonYzsQ׹1 7r# |81$2hsP-k V$wX/ 7E tl4^/YP-#"`Z2Rf'GE jJÊs,7 /A)!35hN'Hk _);벙i>ϰRlEB)2J'\dB \|Vr 1ke&="[iZ lcv-Yzpy+/j5\]wlr'އ/egn_ s]f0ԑ$OwZ/)$5{`y1MTR$uwȼe߁r7`DO }[Wh%Nih<}c#2~ l"OL}iӂ ni;NsЭ0j F$'p$rnmjrnX.O5LZ)4,EpXARq}tߒxkP]-t" ~j) \VdV[']eb `Z^}6Kq1/vS-dEJ`;`F#ULa{-kIIVH(hb}sZ B2å[UjZ. ."#+) M|.ko]Vg-YbgS8U1uH-_ ۶B%y uEQtzjMӲSIJlg3n+@61(j" H(GLNt}Ť*BeC<ׂE"?2rz?n ;ơ؛Px4b^9u#?/D=|bf->~q%-v>?waNV%"\gSܛ@F}9g UxLsf76v$ ZVxçu5 &Yʘk/T6v/|0t܇m,KFck4s$ ^UK;& h~bsTՋÈ"cZhO%r;Ycmvh"[/|䈗@^N@ p'IۀS\:Cq}JQژ{WYŝLKv'Y}:]:xJ-BJQR'.CYӼ mȆXs\h4\5ZO0/Vl3Q?gY!c'tȘv-^/ug2C UgW#L}rpJȑF%enɐP79+Vu[r%Gviwǵ>A&IcBC*e/8C,Z h;9KSӨUi]HOӲ$^L}Ix0 {A$Y3`Xkk >.S~CZdv^+x] șs:,z$Qݶ@~)=c (Whh89+wɈoG׃ ʥ'A8}o&/VDMk4x\G Do(}_S꿤^OFWMe!!O}>vDK}"=e&g+" GfR~+4~"ZORaVS(3Ҙ+5Wg`XrFI:z6?_nr 7?J`DCAr}T6?M!hb,;;z':;LMjz鐅g71g7^n> XxfڗNE$,w'1:Ś 덅5-CieFq]*ھbQ!i<9\'P&a1ęBiμ7EvkueLp.~)[^f 짝'L 0Q58CFgtDL r\)hVmQ=-m5Rܹ^OA* ~a {KޞJ]:EzsS'ECC޲% h k!}33JbVoq!'FXg^*MNJ'(J#g %93,rԦh_-rĒv|J g"|v9,ײ1>ѕX#VyO+4孵֯_X^3tb <0&%zi 7I\s/X$)<V>VZopGOM+i)w̲ak=6yP1ά-3Vg6Qqgh[562 l5AeOXE*Ge‰"!10ק.D㗳?_A?7<,&uVo&{z‡Vn H󄌉"|hE?GlrPUlP@ ryF^Uxa)P+xR}E\#;S"lRKTT?AMߛZmΔJİTSRq"`U)y~AQ]T> stream xڌstmv&۞ضhv6n[mipY\{_P)29%]ؘYbrjjlVVfVVv**5+W[ /3$7vQn67??++Dg~@ `tAsprtE_)_;=@hhjl Pu0z w,,v.t+WK 4.`l8f*˿U@H`ke wٛU9#diw_Ʀv^Vs+[ @QRӕ`lohl7v7561@RD` ?:[90XP%.r:;xVfK1ssdQrrʈ!Y]\:,y9d-02~|\݁Wg7ߊEll3+SW who g+O.+h?͙僶 ?Eu098l<n>n:R2O"؛;/Q?@=/]{e5쿽vV^FTM࿷Xhff2Ơ63'V.V@3%+WSɿb0hLm@׉ h:VJ؛:9v.n+hع>l4zk, B?3qXD `X$ 6`@> O?T,*ȧ `Q@5Ax(?AA\A3Ai8m#co |Ǧ .PlS[hW[bg'3']Pf1@Y81 L@7cJ/nQ,x-~?S@[1K/GK_ //:- 92uуZinFBnc?ɀ_jP [Gc?=BڳϡphWr ?ꌓ+3`Ij9`Vwa/ "s̅K!6\jw?Yv}e,Χ9a,}XL|̣%5峸8{ǝL}(R5GIyw.Oh 4ELw#=DD_BLC۴Ƈև2ď@051¼#@3՜JZ7R$ P[Ƹٗ)hX=h8ڽ6qFF}*i3>*ٜ˰eFww' I]p&Mo L7Nr NG$$B@RH..ϑ.Y ̻[Ơ8Ϛ/?~ļihlRc c? #%U2v|RBc~xrz=}5fK ;=Lt\v!IA5 9L 50CpYID]Q$SPü(z8Q'tXRH]}j_ ԵN -Aa)> =űn,G >PVڗROlJN:ifozy,COKZ ?04'It{/*6e 6h,HV^ʶ-e%9O4NҕsD[?-$&yռ '/Zd!nfs|[,eA+ǓWk_P.H7_}m"WSQ^L%IY~eI>:|eà22n(V,F.q5b~- >u3 XG9 nA)X'Yiyp|`I7sen tC2fz.vW1*NCb>J3%NNYwYƭ >C 8GSg*Khr~dJw GD-5}ļZV]z3Fs:@<|LJT| E;kW<;׬zV렋i>]Qgb憶/؆%Bq~JwIgĶR5]33rvCSc^uwοCh&Z0{4\Qll;pdc^n?mў d½N}hW3/ T;n M/GZ(tXR gNڗ!gd_u4B/vli_PUkJVwv^ }xwvN yʆC>u% ^J+"$yd.R i,.룖%ZP дH ף!(rb5?qCʡ'PT*z`&9h nl^_m|h1"h4fѽPp zzvF4$W~uLoZڌ#ԘY-0\~4|hֽq}G'wY o~;G?[ha[͒"fDwC.|Ќyw'<}!IN;pW={9}u(u^Hl7g|p1s{RUP~"^ŝO1!؅t0$IrWvA70Z.yJZ9јTKܮ~. +E\[iR~,pSזI\=΂gPdP4e*)hAɑj:lae(gTw ̙P,uF~j<P Q_di#n:u ȇnVt"SY[5̻нA&шH !a~0|<>]#JV}cEݼ9|:%;U0 ܙGWݛ LD,yُp0"[z8S:o!ޓfI&H>!Ja (sfXF?sv'Cr,u 1]1Aos@|cmW<h+j*ؤ nd]" '?,4}\ҳi͝j2X{gWe.cW_G>=τ3tDE&2HPٌ;BjUOc'oK>w5G, R=_/uQ >@!de iVh$—iBꚸl%›<90Q&#mX[XP>ڮF),ê%Q$oҫ+W\T˨ D4Cvx䷲Gm.VB/F 2Gb ;a!z>d9G-V\o_'EժPOjX] w"ADtCC)U?չevBO%6p530GH.iҺ\Pr=7hƕdŁ QTr}4*Œk%a"[vrMsE8U=R!&9(xp_;/P։g{EQ#| %4(4o[}bACbx ~ua-뻀8+/W7EI7ԭnV+X<6*繣[88D]$ID%m?R& M2V2鱛m]nzdlq"%0=`{.4v$°/x 2%m(3ku^)O_#MPgS{ǝk}+? ePWez5J/&pL7hk᯸E݁쐮,#ݬ(3GZHďmħ +^9,tB-8&m@Q{-k]9D(1 b{S8,e/ycվ/iB3; ?7R `5;56;+``K|ԃVrN:\t97,ƤS{r@(!NF-b#9ىV^l>juf>I&^A !)*jbzh~1$I|izcVQLu 3 3ƄwF®:[ѭ=ڱG rtMFgC5b( N+9pNR:جRZ]|z`[P^EmrB CƷ Ss$#ݺ]v4n{3/6q[n2R?૕~C~;}pec#л%ez"8 X2^\)=gӋR*(v"9.4W4(C ltI|˒ʊ{ ]p=A;Jڴ/`3i|ݚ=3Ȼ\ʛ1?K7lkCZ~2NSׄڱByK*]>&A+YbY漺! |}(ay]VX\c:^v7HDeme3A>|o&Qp)a ?b\>Sr~Ok ӗ fS'}^lmGumHYf43#ªE'O9X`R5<_鉎>IS0fJ2Ʊ<qU(z؆r%o})u:g;RLkzVn,4C75NM~ƒW[˷P\`!,}'Ӗߥf6 #EyI$Ea n!JV=6X_=R% h̔sv Xlą*lՒnCs4 [ҭн׼'__p)]~ ^(f>+BV=Ȓ9JͳwMʧq 󨑶O9ORV(;wC&NpT:XO-Ni(K[UP{U{ 4j30sz{{#hۓ2Flg 6Jٝ}_ru7ըҊmSmt͖Ȝi5 C1)q0wɍ:S?6ɫ 4N12?uAxdJ®W cm'qBǻ/?\͑PQmA7Z>Fa-l!Öa&.PI3e4tmbOYV@%J]@KL1>Ruڜc* :+[TBĔ1 v/28 cWHKT_ ,((bEҴLwEiM0 l^WJkkƎzĹ4vH >:I2 ]'m tf jzX˥y-JvƢ>2|{4)b ] ?5^g݌ #zQ؜!4P u{4PVs(+Azlj?oeCY4P)ܼ?J?Z__H\zXWD*_"S<%pmxhy7zWHQE퐆llxp1{u9juD.ϪUxHKxp"6=0ȳ<5TS5Csˌ2h{Hp3=+}I+GJFWfg􎷢hZbr?٠B? T8a;w&ٶ]%,{ K];Yɘ`zTQVOLUzsw*j޲b~՚&%c`{A!lb1U6i$E^4fF0xU NO Iu kݕB$j $^[bv%id&0}Yyb}o '}jteEg Ɩ 0T$! \u֩oUj܍xiХop5łX~ "Ds}wJ'"/C+GvN$1d<-y㰝\iyf͔p( Ȅ7|{r 7љ#SkiZ7KOowtM1nVK$Ma*]ȣ"gpF"c4?`5)}㊽ IEY#pn3ݖZոw9{~e \_ĬU=]!RGsSr+e %wqN(f~vCb.NXTun) bL:b Yrt{WLpjt Kʪao>":S.Z6K(ZEQ*KڢU&8dTa j|O,P;N)ǫmV{3FZ4}'9jjnOE-Tۛ֬+oRh1U 7م {s  <Yp,b8fDd,ѩ ]r=CIM9ƫJPIK^?fE׹dff2J7cV·S3 #^c3`ݖn[Q>S@gjLIĔ|_HRۊl7YԻš0$~N vGV8_thD<(_ѺbŭTi77̾`%=<-I߼rJO+u2=W0`^}nVZ"$}0CT>G47oiòApÞCrmwa(ӷ Dx \qX% IdKQ/QȄƆ䡱/onl8j31, OSi(\a;~1=+GX;'ޭ(i2Bzn tv.Ii6~,"- XcYyZI^Aݖ/˪nZ!jP|(Yr)c6g#чKVJ|muՒQa"/<[T(M=I OT8n4Hݽzʺ'yp`'C֧`Ah/8}5L* 9x`O;t50Yʜm[ Cc)U 1Ӑܝ#Y!&tIqFk\`KAoOK˻Ў\k8W;F4M}鶍ךMQd~``+ 8rRPց$Kϴn2}Ey{_;R-$v/;*VA039&iIf}ZXxz'aH$)G:-)w xnnvݤsaq~I}901zoߩS*1yrj}ҕݶ@3l]S,?>ʎv 1uZ-k Цhyu7?Aͅi( ^9!N҆mwH37G"hnĶv},pHu.wФЅ]{`S?r1T{RKoewR-G}N*cG Ӹ܍it ~ge+ڮt@%(N Y6TpcxPT` _>lg͵'Rr/^|(7 )<)X :\B"Ь|XY|ӳ[$h~RyE?X)v9}te y< m2r>;JcH:Y?t>bdD 77}aQd>5ɣ;M'sg#O[u~䶈M=?ڹw C>iu9ҵ% ٳ:8kTG/QĊ&ïu6s-m}2u'TZS: eb%&9˞hxa6 >֕m&UEǴ@Fz^p.` !que>s;1{_<&jS9vȃ!@"+ߣdqogFoϜuq;P79MXN?̾--_1z#SFR? on͢7LvEFNN2GIgpOu8ui.Nx~x:VtY?^~>d{H]Yxj)a:] }`Pr{}|%qe9cMZwEo݌<%c C;b'r:aR($Ͽ,ҝ-|O:}ӌ y .ph/wړ6B/$Pp~שU%s% JHj^AGE+BWN"42zxYSQ%sKr{{if'}AuOPwCU T_=_i?c^j &p(/xŠxO\L"9xv9+IrQؼy)(Ai|qF}BHzUӧs؂bHz&A5(zv 3~mjTo!D_8K>n\'3M^\}nf{{(WeEQdEIk^.DŽj,LSb6W= S T`k41:bNM,G c Sh[ AjΡ5A4Ɛb?e`LJ?'mZKwn ~wW:tv?/#L O~SNU WrtV>h/W"s`[@ZUC JlI35DCj XH'P"X$~A͟: ͗j+Z4ٷџ{%}GbG|bv>dKs@%Rw5Q~idZLէ,wG2g! ' "%ȱۢ(xsFyYk,zygyTs|#yM4dz)*ߎ8 .85>@>eSmR1Ha#^^;@m;IDfDdV>1NWijY[k %z,x(~nt_QQRAHt!BC[]= eıa$x6B YhvRB}[ _X9? ٜ9 oȞ~{k˭yZ.yFUg+?VY2H4|x77'0S/"?Ag|4~}*|hIsDb$=x6l=ܽMfcW<Œ.͝xQ*_nАjԙՂw ǔBKEϒ]L}] nrBbP!6sz[zO28[>>+:lΏkjVV?IEY ҇iN<r!{}o$.1:i&JI!noPmPn *e~q080)'bAbI؀'r:CDs"={j\r 4; 󅕓km+^ xK&B(1^ɦJ 8 N: Ȭ5!tS:ZַŷW^bPI=tl:;Өh:JI >9_`-eaRgCi%b Ia( Cq`[e~-pMy.(V`؎sMyx +Bt)q=&e ۹?[3q4`S6a.'A?JٸC 7Ax]zU0ˋ)bt!zG 2FibeJo^j,jY[; _L"ZJle9he 3EĚ KpF7 "ς+dbkrƔbOKN'tzJڛ"#* ۲1 ~J;_xDy-o=0D)u-;N>V>>ƑH1CN^lIk4//ûJiLZb~(@ R@hQ0ntnfWJ^9vb22FuTn Cw}KEU"SX7$tl\4*ތ@tE<1Z„!C-1e޽HpN$$ '?/5 ` fEv]Py(G nL:Śvq1D?^__ٍMVPȱpVl0[tHBY[iYKyk=_`Ud+1`>6SnOe!<:^޻p-=u"с^G 7_{Bu'UVۤi=}ָ9n#ՙJSM v˶H kpH|!N[;U-7ugv#?ƿ'%6`c1|dPJ,?vQ?qnbI|oEMƎ-)m\9\;H wrpUj\57)yU%KK [ ݢEٝ%}и5dBۇs*4{>2םh 3I 1?Jer'w'CM_F}& o2Gf4Kө/5 eX{5k$wyGKhͅG?V{km/ Hn٤*Yu/wWM%1dd霉$H}[j RU?yh^N1DPG` nhb!=6*V&5;vD `Ơ=7Y#.c|G%:[m-YV,K9AT*!]2%;Dh^`'r2}}먪bupH6ONY~ zΈ0S+NЁ 2myG_6y?o_ڌk0Ґg&L.>aY-`'TSeI=p$ A[6sa3k&3h@ZFP ,i?--\k:V4gGZ (>BmE* &4j+K"oHJsh<\ 8Ewٝ/fZqZe(s]򬹅rhB8a9LȪ_6KmC1mA%Isf(QֺG.&sH>JvB9:PF&:k$^GI& {l:-P5x mRx1OVUf5vYg+W%eWdĜvKCY/"X4Z&E|$YW<#q{>wΰ(ﮰ L{`𫍲7s4kV(t7t،Z]79/g >-^)yҘ9XjdW%}q3Lu2+Mmu"Lq8%e'g)mbAuomTSI%yXbgG2 pf'ݩ ΅ Ш /j|Mȃa5 9rS^1'?XeyuzLo[UK'-Yi#爛.n~OP5^wy\H4-CB|/`9)E<4⌖͋ `.YϣeN2*YDrLb4E\ <0 4aAm&Q]8oo<90 endstream endobj 615 0 obj << /Length1 1608 /Length2 7875 /Length3 0 /Length 8911 /Filter /FlateDecode >> stream xڍTk6 C7HJt 03 ] !H7H+4( ݍHw7~9=}Ykڽ}{a搲YaP7'P @ GŠ vC`P?,d\ ĽL7TAJn^1dAn+*'@ ñd`N.[}>̖,naaA)G ` 6FxWf1[Iݝ焹H![vq[~ P9nc ІY#A.`b ]\V`}v @ X/vpsr'޿AA0G'XCuyNZ29a 7dqot@^J.'q#ׯ0cZPW}=>\{(@a qv+ms/WfF@v=,m~%tVrsX߷X_Xppqzo X"`b_] #=_Lf:xk^K077 |;w*BaῪ?M߱`1hy.$Hᷞ/q➸%PݯMm* ZE~6$.[i@/_5`pȯ ~,=))堖0_k/<\xs7\Ppߞ/Lx\ZD /pY Y\?}`?}d=_Ȱ?}d? xp!B.?}^hr&/.0l5= RҝcrsDAW8?q2Vagis2d Jֽ㶌$l\׊Gnq Fa3(BIgnj7q"λ^ _78 i(RYȑ4ˈ1.F):/o#!1ҟ/=WbRdtw%U( %"'R{q\RQL$w)+fljffJK}mڄ*QB"0ywq ;<`[jqtc$~Q)jfb BWQc< hy@PTU~r\k> -HTFVI\ {硓we+Sxӌ+ޗQ<$5yVxaڄ,N,q-}*M,}}:2ekie,p ` j" Sgw*bf8eN{܈6lۛC-KO6vXo=, 7u_:I%0E1>7YΩ .QcXleTةOdYf%-s8!M4ىGSJO9YL.ۚ|FR6!kvpc^h|yh*euY;Aiqfe*.ĉv0e ))'y9B z9,VWQmqTq^/q,gR@7_1$1zZNk|ZQ-ԝLH؊6FGߩug{ wL5*l-ߌ-, >?e.cAij9I+kD8Pq 'HְnMխ r ƪt7&%a[vє+&|DS$1)%u'! z,IGM$ yW%`VDI^Gզ滋 X1k_Df'9dhR G#(?sݦY;Rm 35rvb9^Lpx,I~JmK8[ZP %4m!Kɲj!_-ݧd6~gD,K|ݽ"8| 0z-.:aڄӱU r`r~%R0@Ӝ"~N U'ą-2̽ԈNLN.C:׉jQ7;vZ!,pF9^:dAV^0؝DfNH' 5)9;>_ҕ6RE0`ɥ"|<&)V9sAzv=+ QO#PzėNg$FMRsgBț.1Һ]$ N(u_ y >y?џiz~2HK/GfxVȑUkoi#(uʘvIhX8GգR}]MENR!ķ3v$U^%2/o64 gOgŴْ~5:" ? 9FfHNј6.>DJ}vGu֮R3sErL9|3dc?S?*#5t6}K&ƲK^xz(Rst>ΫInQEafA#4 KNVoAy6oӈ-O+cNz7S@ x1MenM)O0tTb?x.]SPH?Јv@†ػG$n_6/{7_L߮M6VHo968dEo-L7ѯN\ s}27%10f _|<,I%Kn2@1>Բuzb^tU Չ˿h>Ym(In%Aˍ{s=\X{+A}9>tNj}Xk8SJtS8RELs4Pb$AiKEb]UCe=Kv~J Ywa)x`AEU׮a`hz {Hf@,$Nx @wK%l/w5pĊ)Gԑsn:p)Cb7!K`&N`FErd|9^ 6~ʇϛދ% A>˴ƐL@R#9N2EJ9h>ퟄK<g~UTBFdQL+r x_ߡ䚤Athw ,bJ>q&0̛L0׏#\`5WNBZvDw[8=gAŦ*,ѲC(snHhG=|{͛[޸Ќ5*qYh .a_O:V1"kޯT.3=(.*f[dnz$Dx3 Zj];q1qH\ (ju c\ЭP&?P`Q1Bs)#b酞~˫kY& LE(:.VISQ;CU bg"SnwI 5Q.gZ/JVv :Q9!w Ԍn.?<̽{ޖwǀLMcCS55k|%jW2ci,*K658AǨWA:1_FLI;a^&M|]{Q#ß΂mZ&?ҝ5fYHSjfouKPn,ND#žZO=tq&bq)74LnG$/WBk8ʇoʒ oS[)3,sJQ/Q J&b;59>"[N9ִZjs8;MيRYy{ 7kkSlM˂+rmGs>xM B:@5rdLN :"[j!3HQ75y\XfE*FC&ښ2Lǘk,׳KS%ܠF Yb5BG0Uc$(a. !R` v L*JX$i< >\C?^@/Ѕf<Wdmp ;율e!ҸPeTEjyL9,\ |X+k'/? |*[ 'ǓC>Ox}ZTW\)ϺQe?l<7Cuc=k?WAC:9nAPK:u òO_:fu`sPQ\廃WCL{~S ~ԩjphN Q۪U:ͣ쎑~dƎ"/j` ;`b|CG³Q :7r4 )Zw,دKYE뵆Oi/+l/ƾj0M36ӧkZe8_g4R|k,cWbԜb 1KEDjc4\>]"p;c|Kwʁ&=˞rZSE*B,r9Z;hk'M m<:>N[,SԔlɝddz<"V5NDKE&8ُ]f]&'JnvXKpl>I/aZ ‘q'Μ|WrAx6׏6258Kam"IMYZx'i3pZ%U#k)zGQ%e/(N*xF^8{ݢ s{Jziq-Ɂ [U 0+WB [uL-\dV/=x%R/ɞ|EyۣP / nPLv({aجhK5Ky:כ8}_նCe~]fOL*Mm\)2>< ;~LoAGhWw7[D(ŧQlUUev!4?%03<.%Bz(~FQMX>d 5({|?&4™['k@HбcV]cዉר__gJVOu~ۆ-F?76{Ҹם\WFةy+$_Zp+$Oo0i+K뗈B-7w·Rj9LJ2AçSR9tw_޸?Ѵ]_[S^la)É?CdpVzAf"m?j g6L~BiD'kj?Ƿ1LפP`[飼Yܐe(;^vUK耞,,U 2-"uDf:v[ ZGl|MT4 \:y åoO) r\ Rppy>gKK!s~ʂ\TsSVDيdBzݼkiWd}]I5ޜj/b >Xu04Ιye1 оL`">{4)w Kze6TJB1 2-q'81h3Gr :'ζΟMH3/}G 3Җl(R\2"? 9#d:1ʔ;Gt=|A\PX6Ssf6YL1@]Ҝkc( 굸ֺZzn0Rׄ$1_L[y2$oڎ[%iUbqSy$׳d..Y[a];|^Vn"描Pm,o?.04"%g  MNj>iе?M= v_ ։(8ȕ2s<81 S3Sf-cUU C7ή| y2ň|;; "8M<|qC؅f~,k#UdLccr+ EjEzf^gЦGƯ_pRpVG,XT!"9VxFn6+sS>F2&*0z$3gVћpOtR+qWrZa*m׀Ї,Z[.á}NKpUP-z父H?CɸVDG-xBVs5K|,:_ګwHU\q,`_ַuPFeTc&WO w=L'æs54%>6;4J.ݸRU HLWaZw6W?M=0Tցv?Gٔo{) ??JHvD^Yg)V(WOAs]OG=or yb0;Ìݮ.5M|rѱz7diԑRv޾Ta9\rz.3`АNu_9EVrT Xqr Y.1,[lD8.]oM,hj6^HCXC-Oi2\j*EBUQxV.%s+ ,Ԩ=㫯[BK{o"Mm>MM֦ a] +^ǑU_WkR$^.54 潼Z1Dя! +hM [ʧ{UE_moun hd^E׃!|WJN TA6ud07㇮$^H"z\RSB0Xi-u.UUn;Ӷ(vCW6頑;ƽ5dw\7Ud+9N惫_g8>ۑTq5݀#M+rh`غ%TBeID]ӀY}TIBIs$oHπmn*P~gGB#}Sѷlט +R13(D]YT+tLsKt'wbC9Co LUBmũ\q\|£=T.ETTmu endstream endobj 617 0 obj << /Length1 1413 /Length2 6039 /Length3 0 /Length 7006 /Filter /FlateDecode >> stream xڍxT}? @] "1! )]Jt+)] % Syy߳s\}}vad*A:B50PDLoj Issprbn ' D'Ah>%@)Y@\LLoE,@ Ew([ svAc22B*PO.PwLD00EaP?\ɻ>>>" wY_CL(7U24bn 0E:}@PF z0:zC(⏲!_Er/G0oct!`g jꉠ}BKG!1 o r(NT10U @DP0EYQCChaP0~ A}r! Nʀxy#`:`D9CI1))I  ` cj @z0e@`NPq =A D  00u!#:9c ؈az0kX U@UU/ @X\ ,#%RiiI@?`:'$@O>_kAe0 7I1o3XJH Qp; ^h#1oUKՇB`^AmPA8c- -"v҄B!F04k!Q_7 JL0̒0 CC#!M\R cf9I@} *@1&LA''bn2Q0  D=0SBB0 ׿{yzb$$CP0<,"NG8jwl@$=F Q_.gj4UrALΓҏc)RYlpmx,"{%o>FmݓJ)82 *{؏9V#)>U2y:YP3qSpztVeŞ8n}9}:d MMiё<Ci!!}O*H 6),5֒,0řd!A^dFLo +ˀZ6R%dj' a wB.MP7O_i.XTNC]dX^նLr`{ '~:k:*RiM ^:uo0%~_oƖvӷscj-!ΈxUsdڇAXsA^Y+gSWͅP&#TT 25!<2$1_P"X.Np%wt{6\Z(iy|ng;) ӼHGQ+C~*ȞÇ{[coFR['͍[i]ۋݬi@jg}giwy{:;, YMr4OJےfϾg5֣6q>g,p\Z3$ 'U,Fx;!&.J,``MJ 7JD/f_|bbB=k)39kKV%Z`z/_@48ݧw|q ͠S-U{k8Y,O Uk @6<^n/OUmHgKϫW}(1^V}ksFZB n 1#٬Phy]|KE!g137o:bn;Q1b$9,:~4iS}U۰Hy/[;<*J~و7ѥ`9AFN2}=4io KF@Tкy3TIcXO wڣL~ƥmbҳ3dDZ$Ͼ?>=u Twy&Q1;bJ܉Woy~se[0۝zp1W ~T)ϛRÅ>!5-oa &|c[}dG[O'wI-6Wkkv9fC@ثՇS t]ErIW^vX*'R KވlušN j[QqࠀaLG,90`Єbxiq\@%AE֑ 7kq3O6&{@9gJsB$'"xZ TO^2|dSѫd6OWr% 2{7dH(隴S<ԇ߮\yS&rԔ8J|83ǥFKL6[dvJԸ],LZKy91(z9~==hd+b=;/MHE\=7N <(|()̽ALm? @ Fe{7Y?u^6-~FQPoEKnf`d@sA~[5!sǭa`ܤ7{DV<=9z6vښ"|wTGk4߼XI{+ZsJ^Ĺ҆D-5xnqjI6T_Ƨ nmY}^*K:1[/{(| ୼=e(a˨:b}м?w/"-VmabqLL FW4^e\hUHo{ՅwOh_`&Y#b@INX>n.҄I0΄0.1օU}J*Xӭ;=%~f9 }MCW90-lϹ!,Fm9X=e + *pX&~srNQҵ@׵]HRX73IN.\]DE0yy+?eݵ^J hHz `VGߙc*F)k5Ѓɝ]c y@;j0޽z=؇g^]A4"ύ[`,L/FPؒ~"VI8,3?o`CnUUcq bZYWĶ$*,w#fh[zT+~*;{+v&3p[oR=%m|J ftP zZ%.I6&e$( EȎf93pۨXq&$o=[JCf1qrx7٨\WxWe&Xl͘vG!ױFq;]#[%@v~^=ElO|}*ծ/&GW^.On"c~t6O? YQ;)T6qs,dxv:>'%L vѮJ|-X;|ʋrI]}<: >J>|'Tƃþ̸ݗ R\ƣU,E2񗮶TS +ŻEJ! 7 9l?w+D]bj&]> !bN݃< H){ȸM)Ek„;wrs㲽WK{ vj-VKѡk|2z]";ܸIfv,j9鍦',fi5SLAQ!Y쮛/'?S~nk{WU Zy.DPBܝ`t鬭 cuzJf2NciP[$aլwcfBl4*x$(_SQdqK#݉(n4X_9! ̜(F ZG89ˡ֦l'ڬV!;F2!'\o2O,ȵ0d4\Haq'XsLdbM?4wI7lXC@R.8~p>xOvTtN}/6ns4nފ-rj@G7Wl/r* ,k<;L'.Ջs)Nm_@JJѮHLzKw" U=(.ԥ\߈^?p)vOj!]hyTU]ޖ>@JXKC>X 8eةQNO_wT1~+릅:S ]#2yρj#5+u\G^-SZ Gԧ.ߝ٦ΊOSZ||>vFj.>HS ls% }ge[R^{@ͯ;lv*DCk3,7I}2k]6*y \9.\(zmN1De߁o R9 YR8fC[J햟6oRӖd:JW9x|ڏ+e Cp6vccorf_zr(.I|kL|j7<.[80p8Iyo#=B3U穔k뎂\η\s,o^zx铴(+By^>ȂĢM/6YCE=L._Vr27ygi@S%/PeNxiʾd}r1~tv\g *t8$32x3bQ,=o@Su~AK TЧGRY||&;d(>ݪ8 y:زIE*k I {T ٍ痯wT$#:+qyJWcq٫ H Lf%3+jO<+O! ||.B4~YWw-,<XTu(/~>= o8u~?\R1!Y>Pۚ{I6nl;U4*vqrULgљx\oq{5&w3K{0xam\> stream xڍPM-w  ][p` 2kp '{NpOOv{kh4&9HcbHh)pq8999ttZ`_vt:3G3 l6=8@.v.n'')@g!+PttR`ks-L.AA~? gb?w0h:XA0*spssc7;8[20 rvY P5Met9;}NqX ʀ _>;׿G!0d3 {G3b ہoea0V@3;s9ϭd%f p; P9(|2K){{Ec`g{p}7׿bi KGm w̳ ?6k ) w ?hy8tra~z[нf O#t..%0Y!lYx,?.fOW̡c.7;%%^lܼ6 /' ^w53G v_[v[k:<+` 9y9-?\r3OT w$bglwijr]`S< 5* Kz`f ~V4;'_v0TT,lR_v? 9@0Y{2 W,?]2 ? 0sv6@g zJKbpC`)g>+g?.!OP784F AſsW?~g s+[>'8^ϊ>7r|\L?\O_'n9/[,g,^WKrc9 ;8Gݒ!uVzssƝ9'1+f XdGik)D<_ߊde=/rUvRjo70XZ}v.>L/W6G( 1F"QH1InSSޤy7JY)D?t)趺dHb[>0C5h3OO4!&#JPId3l 3KO*e(*+#ECT˞\30gM̘J)`J#hT<5a6 fFrF;G '>MN>7ᰜ 8(y;~>\!im,Y_P\X,zWxf2m[qIvce]܋>d\"4\={17zh$ 5z {Fy_dqL]3lQ3el,^V7{*^[SzAPw=}=72Kg^=VlnywZVW_<4j_W b3/Igw ͈H19M2,SM%>LS+Q_*M)1g/0# !r0Ҏ蹒1~FxT=j QO"xxnak$^/LsGl>lSQYx#MlҷmJ5^h\VxTYޑPwx1~aWզ"t!TKhS8R!8]+e5K'25 -# 7{aܻ2"9rA(}c??*ȏs,TϸȕTrL@fi3/Ďdm``_ZU63}MC1~nmS]K.HLS/|B_ښa(| /A.vq2Q&cM+-LlFËf93:ht%.?#bRwxtk=xzJϻ-LqF,~nS1z!*r# ^Ӵ^<-c>-{ZXŗז[x9] #dԔw˥q%bU2t 5/9EAnN}!G w[!Ty>vN q vcvA2@U>M-2滖WCJ\L4wҰӻ]-% Qd!yo*FV-zvVylsn})Iz_"N5QId8yRG^gO.[D5M9MQx Kw lpQ%6{kfm-2v=K+WH>ӊ `gil7ʜן͍|4%Ri\7egmXKˡ.BȠ4 f얈h}?g*}>Y>&-ʯ(/In|Y`}Iv JQZꐌ:%4IiICiay[KQ*}0ZD˺Jx.G%]R@%r "@7.RuޫKjpiٮę7˿qzh)3Q{/ . U[q)4M!"( ,5m:6, l`uqDZ]F9Y~pv0@ tytK!YP +-HR`ecXSlؓ _it:G('\uN~A]Fm8tu4JLآ;o# QZi O= :5:dPGD[6j1.AnYmqAzXC]:vb!DpK2W|fFUk]`-Tk0sG@f}CMgx{ʯ*:.|]jG{@3W}5T(#qFqIØ0$rD 5ӎc-YwG=Ztu$muM*WqS$]Do^{x25 kMc:Y#mٍ{ZG:%xG V?=R<n6fԝ+S&l-F/3м4 <(b3"t١aؗpǮ.9;XdrBj fs-T|e,A`{7::QzzOrjl[o`m#qr; +eo>ӚndeSik֫C#+<:hKg6zҋTظҹSHTx1;^)zz`9gQ+U\+0Xح d4밺{T؞'h q6BijlFtƔe%O-Xq: UIab~{ԅټD^uc1~g&u }6Dd;A@?=KA&-7AQlH˚Ev*?J JXQ?j5UxdEVh~F⧭kMT+g\, g!/jKܷG#Y7y9X\y$2 {DP?Jj=~S 1WcV*ʹ\(TiȶW AMjL8 PL1gycXe'Z=(_?~.nrtF$ͱZPϸm`%Fq\yG{F3@]ke ޡ38z?ZSF@o;$nQk<>ށՕ[N%p9F8qmݿ$v}ӓ Xvum<=Z&^|Z8Y 5ڦEtRxQ zݚs@Z꽽շAtۭ@;?¤I)’L|_rߘ@[WS].d89nOȚTU'bHaִK(גMཥ`@$h=rbO?N>5"-:CX=yfGEBi /ẁ4ш.079@ h/AmpΏ lіkVșɃ*v2 5'#!4[{f)2wxInSKpf"?mg|BVƓҾ&[f ˺м1Kj$T=[tCŌm~LKt;\ۢ*{ Be^G0[_^qkqEޭD./?9^ c߇W `_Vb;%I t/!H7Jvlzp=˛%PD'u MTMs,-[O&$&Iwwu1`Qu7w%X&MHjK{w)J5(%J,QD;~L, i᪫lMn\0(1AҋKeIJd:Fq7SJkʉZ;^= Dqo+ QL#w4oUR٬/[%guU VMcP2 B"0Z2OϘgdp&45G`oJ SQD8'z/z;(#/aebRujV/.SЖV;/PɇӐeJ|?:R}:MZIзe ޸VaYh"y?1\M> [tW1w I˧66pK겏cCY!@b<'-?'fѳ*M$u2܅Oo5&Ԇ!U H%$(ؼJs˜ξ6n!rx3oh4 HMj#I*u{en$mtO:< ݖj^&n}iб  c2Ǐ("/7ޫb^o8R<{x­C;Gll`+C&/Ӗ-+"ev/2Q3aDGG'Q{‹:K׍ǒt$S̀! U}7=-88£5Kxj-4'T2\k$ @GUI}|3<ߘY'KPB%1|[Ji ZeKD(*Ps26Z0U1˼ZOMS$9?r5&EoynIpռb!/ $9/>4:¼y諫Bz`^WlsAQ!"sV^|5J+h]ڂI$b+\Em];8.>@ŜJ1ѕ>;\4W^ )E&QþLUM$5`1aZR!?=78 E ;<<\p=K?26nf *KfzOY,;M jyˬM-uMR7MGs/=}OLV? gkx#>3Jᒷ]El^0: |'zm7_dy7]#SuܫWo8&iډPO3ѳ;/N:2^$!Hw4UN7OiĕQYrgߵ|תv№ _b(CkIk,:#D$GH&# Z:DY>2{o] i;eL635x$v3,e}i"sTw`5~VጏzZU鎒x2y)t`6O_!'ɇ{lJEѶk3 bOms 3Or# Iq7Ĺai_ ZZ_~OS˜?~;w3aD-r-p 5B^Y|FwZZ,,l[t]U͸&s^ĽFj-^UQAD<&{t@ uuՂ"\sZtV:Ьi*s~^WNN˹l''i\&ǥSz{ &ak4jwvChUW4wCitPDqI{QƔ~:m{zIarw+ nYuQJ#g@l7&-Stm>,JdŏZL{D^^k o|['0 ۽|^n}Z=+bU*.V{A{7b졉[[i9'VBYJSŠOO*'"~97g&`˦b\d#hG1ÚɞǸ\k^]%e s#5wJ8a4OE2Q f-zVp/wm .2E)Dx{"L#,:%988 OA[7;0[|wb~`}EJZS5L'po:y_B=xذRxLGk 'KވZhJFM2P;/Joק,^Dk/^W;4R =|]{?9}]&ҾNخO;U2i!pS+|F풼Ql"Ƹ|^^d^ G`;2 E0c0%hոz@7#{VS'?tN aJI`|Xkq:Xozŀ69JM{drp 6}W4.@GF :ClƣFQ=š1k}WhHmdEoJg8jMc[O~zԚ͙qDe`;vR,:o\&:i̕Y.IZWsћ  v1 c1ü7gw7%N l}T׻ -K}1T1U&$~/8*\e+EYSvIW_h^z j2 -#MR{euZG7 GBix$k|e 21 w^yҐڪzwU^G7o*h$c B7͑opB/:y! r: !1weSgky&\)caf<&S@#0G,32 XdH_e qwDg;?oΏ|]v0PTxPoMn=tr2ؖA% Q3YIۨ;/p~~Y  ji%:k"Y\4J"ި/ 1uxom\h8ly$Zn=,'6.Tټ:#(' 7A':܊ȻXDl>!P[PHr)Zz x/ѹ}rApl`y/Ҟ$OƘlݓctQml2u^kW0ԌQOPmUaI0@.ʊP>0G,L$RPē[d( x?LTo/J*Fgp-6%0W8U=97>w>/DF4n?xBH8X`.B [`lD,+q¸$se.O2 =Ln]dYPI }sN{"`L xa(c ߱bNʍTqs{2>3" qv@'r$o`GVBG"Nvhq]A@h\X Rdo'2yQʑۡXYT,DѱBH^vYQ7nkj+{xJ}* 2HdЀpA-$+d!/r]0Dc6r kZd;5U >0'\ǎiOfmuN} Tkå$]v5|mu*w °˅|`n汱UmrkEȡxUeF~Q`Γg Z](ex|EZLd}Xh.]X q $ytnQr9%i(I5ּ(^! zJBCnSP5BĥGC?Lytn^Te"CzE1DCe?9_bV6LKlNr-{(t& 8N 8Wq{_mtA0^7lD ^Աkg֕K@ׇߠYAim VAxk)jO:;15'ڗ(8醰&t~)n >K*$SrԾs|z$O tgq8z:B%Jy}Q憹N;|ae+97NJSaj'ha6a,`,N^J/Qw2{ΏYM#QTQo-bUK~ipt"WNz~#<+?UfXB_l$P *>hQbҠTI`!*Gwl@퓮owu#q8sPbk_1X}ND/ >:0j%x2w7Bq:ow&RѾg1ۮΑQc09XJ݂֕N?`I}+əQ ݄zg\v*fb3JVB?^ua^5}O'6j0pМ@JU:NցZIWľ3qkkۏ4W#B endstream endobj 621 0 obj << /Length1 1947 /Length2 12678 /Length3 0 /Length 13887 /Filter /FlateDecode >> stream xڍP c 6Kp\ww [;!8r9jYݫmJ25FQ3{ `aagbaaCTm#RjAv|b;o6 cQ b`errXXxCwHL9{;3"V??4V^^n?@'@l }hjlP7F vcfvssc2ufweU3hcd-ј) j`7c' `29ؙoj e_d tGdg``DcgxcWc֍Ro= cF?Ҽ-G '۹{0}vnv^A ;3?0sq`ְ9e%漙YNv6t7dO'|oc}@/D/gcW "++ d -@vd3or豼ɏ翿ffog+fR{:^F6vv';7 YTAwHY;s{_;v[4-s)ٿGX8YL!4GWߎ\llEۂl'1^lUl=7T^bJ7sXh 6&IǻNd1XlVy'(|:vՌQܤ 썄!܅;ћ))yxm6Lyo# n㬵KsV%.,귚>qwsWX7< G,f}iM"Ta#0TtP6K(9Y7>E#qxw3yk^u%UTj|y~dڼJWbm Ui+c# aZTz TͻD!*>>] 7KVLbː6ō+ӹ~YөIŞ:YCarŪw0b=?ba/U1Z'meskF>EZpVjy%qvSj7Qx)y_Ybn\un5'eH&h1zPv}OC}ՠK%Bb_K#y;c8XL!e9.\NPt:?gf}̍}Vݔ%ly$knl(NwYN[1{>T~mP%\G_0K+Տ̊$򚊻5AK9> 1^cmF.5,98׺^ 2[J_ e*1;%7 )n'$/ {H:nƞGDL$uG*8åLض;aLXc'Mreò"{=4ND1_/_;J=,y*38v]*P{-6U3y$^/g ٦z|HcN `zStKraW_SCQ!f"2F9׳TlEF4< 3MqI QsU\U%ߩfVrp-zE/<,vL6L{$WJE"6(]L8A cΆ.ۧB$tX|,7:˜50"hIM$FFߞGȒTQbSMK>du8J;ϖīػ<#ۥ4uuC* LkmeDFlU;uDJ#"`|A>&ФޘWYesJiv*Bj?Zœi <? +uRzH94 ږS&|/d)ѧN%TybyULk 2e,_IXkp5هHG6y-z1N,пV^Vx6R҈%;z5`'%z/H˽z2fPwRGB(Nm OB pox*8Ӥ:[c >IW/zj}up}啬]ZrI&idO5X{9N-;˜tJGeD\DM)$ݞXLZ л+>1]mq뇵nCJV(0/Jh3S%Z?*_V_5אӔ\`y_qpcDyHllDW{m+&{Wyw=l? O@􄜟e-EX5`j:&sCna2z7o'2Tz13GBp#T3|c+vr J(NYɒ* $:)YV̬bohj1EJC D0RDծc$26.KaKZ f1ctIdx,`VE(hmHN . V- V]67o.z d]S#`\au*_}qdx͎$ SRwm,Z$'ܨda';U0;G/pU!_RReG3nwLrFy׾KOX?.h7kQN=J&̷yWm."8R癑 mXҦ>J C[1>(sxŋ!D "ܛL֍cR9ʞb}k3€@aU;R3{w CqbD[W}3;.;C^SFJF"$G& w2)z*ZׯJӝᛗA⤩S6}MkhsMȏ*Le@2v>ǗVp{'/A* jM]*c#̹7SRӴ#)SwoKpE`VI oI=·;m%T.āOeCPD:I&Lg ̴s=cG뜏I2xTν1gyd^'1#W=pBk6ONvJrG/QW󤪭]u)[uvG|8\E[qW&\ܥx&OIkoXr XM#X䃙&{QλM[gs:k,l|cS8ujv/E‹ 8e7npۣ&5],~%ΏZ:CIIy6Th&Ȝf7`4swG1' }ۘ]^kR.ߡ}OEvl\T.5V!hFFaTd/lLpo,Q+*8አv/Ε\Q`$pyۊWIӻ!u4OI?HLpzj-ypFG*kK.RvB ~lnLD>$LvsIZfk䧾)ydP&0MsﱗS)ȃnoD]z!t-``MVNCd.Xt,\h҃YhEP6wL*-ё(ę`YcCuBlv B3l}nt1.@EP)rLE=Θoi%q*(碒I]uzԀK/JEMA@So!MG$/$26|0?gQoӫo|sy`ǒ).iɠxKD^%0qbo` ty򧜿wK/; YP2"iak=hԖǭ܊XVi!+6eV^7p ?N[F۰d5g0y֮?b,Y T6'kghnpڛ8 vhULv(LGoDCnQ \OPF| PMM e$8<Ѳ7 )F>G%Yxc´k~ʽ%D#6k9ʴ eC2ۼ(&<ɢ+PAJ ^jk<E@;(b;O@ڵ)`+a+TDNzXr8U%ۂ1*&z {v.%~s5&6 CL(GL->K󽦳ϵ^?+2]O־lNx.] <"MB\~)\p@enIK8X<k}[[*\a#U)Klբ#ʌT9R G8fi<_4kZiU Jc_]eͤ"+^:*;I9F]&)׸^ oRj7>]+C-F)s̶"*P ٖz!8TI*=_d13_@4hy|z> G%Y}Ygy=@0FOYKbϵj<'^ :Ur8mNa{ܴTQ ZPvh-n[B%L&3|}5KfFDS &zo\AL]Ӎ=@(8Y sGpln>ʗk+4؊=K53.Q;{OzYwS ʗ:U"FK;MZUQ+%IjP1lPq0TB'ZؼeL6֕^kNFk6wiִ[lr$1i8Q˃I|%,}.txyRQb 'ǻÔHGHsf}}?XYvr<t}l_DPÏvLb/CyfO![^L  ;'.j3MOK \ua5S Ɨp=x(z`S=qFNaR)~:ӯ4S pmc Mok%/hkҀuz1H4q9FUF MM23vw' 컶/j fd &+4vZ.6@i$ɉ? {I3nZ@-3CŦ-++=)?vLwzrkm11.|n$N~|j^?zkpAD.?cB A{f+vA.F^}DĖ \ow [OMA(j^fLz?LR6):0\mfvXo8E@eK O*f'b;DfXz\Zktle'UMQvnv9d('EOwYB2^l);3YOBew>&{4EfP*:)3׌U4&àzh>;?of.5{xpiJ9;;D7f=/}!b"6^CcV$6>5aWՓ&ϱ}Lpwxsf*<ǒn6zaGCݛym$"h#gZf' - -~'[0Aj&"JCz+>h_RuC \-Yh,h᭐}qSd=eA=JpgmH&-.lIJ:yi{ڋM´hlyI~I׳7wQeT=#?5 5# aג 4Q[ s?VVI٬{xb':U[xN:,_.eU ϓsVj8.}E<5'v$;qWwHe¶|\QITʪ[y4 R!K<%˔_~$ r>:`a{T=0XKr>RTslpb^[9K<xWAKk`s<:^c>15aJ{-c|)caA>}9/|lm\hb#o '>k$J98[RWhK>kF'# /_b/Ug]ފHM$7!'ы; yR 3^zO /G joZF-_Se5S ޅrT۪[UNEfXiXuSPMwx-Nw9:?,rHH+XT7T |M-$z,껆Riʘ{tlO;n 0`mc>ȽO]Y(Ml6B**|'р~䤝Q<@W#gWɔYX:}^ gVB1;Z)a@e^e!M K\HcȼV*=iV]JO>.οB?e#]d3 Ew}m=S+b/fٮU!}2Jw. g(we{߽ L0vىa,GB%頋Rit( rq2^H V+tQ^9#00TS pK5; ^'2~1Tvu>5I:#AEJa3}|$]%m/) l<ήslf iu ŲH -_(U-=6,Zcc:K,=Ey:ʌꗐS #+ xvJn=ix(mS)]7Q .ċ2ǕP 4|oz`J;E>3 R4p "4LXx)7D-(ߓSZ破lVy.bPTVK&GmH|оnzMTw `] y@u¸eݫ$h8e6gŋRDtڼʨ+$j~\Dž#^|jĶcN\9 ;3:BζOZH$80u6%JEӱ?_>ZŶ aXzI5Y' e\J,6#5exyAC$a7^& |t2t1<E&O <m0-|{߼?#kVj$X+LRa̳?Slexi零sPymVCd쇐Zx!&;1ˋ}ۉ}M[cJs88~KbN| ;n7ym;[%a-l9xyY6D48[}]˕wr7)B`D 6ENyP,hﻸd,&k s7AREw 3&jpG:_[h֗bc!"}"xR5@Fs9CFOaQk±^eFϥl<{'yD6WظNj̊,% _^\]}xwbF &ѐbK qp\) ,s).泟38B$Il8[ƚV5cΏwr<*;_d$}ѽ?f؈U^ވ=~F&p:1GIbLkS_-õbkTlF|jzA_ |>UgU|/@wŷuU=7Iﺧs0$Ѿϵ9B(Y9a~?s0`d0ICR`@+`z_ʠ\ˮ}n7[uPɯc狦^bXC\ ¶UM}nP$j]B[~j621ia\µ cT,GR{=@u[p[v'O? ZC/0Q[Gݲ:6D-C+ NrK\Gl)c}vEMլw #t><<𬮫&fVm[b>\qBsss`,zV67$<⊢?T؉M_=@ G>4WR;8E_*[זp-E]}6CGJg:$0wдMY̞lOE7EAhYd/%,+3g[P}{;ciHTvpvr_X١q(8x)!pg^ʺeYyimMvvwn|uتS5z)0TU^\.Aj"AMƔ^xS0dmb'+oW ubCn60nt3b" PD}nPl1G^ 1,;|*/DW೚_A숖Ċ3hLn#*KG~[8qSYpP¶%%,Md[}V菪U]tE<6x\ ͩ `r٢xR/ bB|| )Cv&UPVlDyLf $vkm DH$mO8+Q'%WewQ$u+[83=FZ(DoJmWD],EI赃i,d\()ϽGRO1ʉhREƽI %K 4I: 6 ⹥rHil?|Oe^#jnwn6v~j s3/$L}ĖUHΗat X+`cL! * S; ,iVS/ iTvmBR 8(=.&'+,B'zFg14'J 2HdAeFqFI.3$NEc{CWP)}Ez,Fy:1*go*/N*zsi2iZđ%3t)mc^cI.b:P?IKdNoKk U>Zocҵ!lR]* 3=iΨtBe #< ;qO5qq{q%$ƝQF)>""{ WlG!h } e4fWJnKIǂbPUu\?ޜ^ ?Ư䢬qI$K/$P\]PX+nƃUtE^Io_mZ!8s*V:kazbIo]rnq(TIz?M̭MT˿yq儙Uj6Ѱoр-^k~YÄ ͛zmY3BY@i W{;Qh{[&رAb+4ȑB_$:T\I怬1Z+;Ysڤ |'j̰>.pjB~N2Y,42icDt`6 G.yN1@ gwGLz`[bF7vgV6ҙYR?*?Uq k>~}2KP\(AwM+W&n++F( D$q6[J$F{Z*6x2ލcUk:1u2u'Rҟ`vH-&cC4_#;\\m09eÊ&lɘ?-  endstream endobj 623 0 obj << /Length1 1518 /Length2 2602 /Length3 0 /Length 3577 /Filter /FlateDecode >> stream xڍU 8T)"Q"9dbkc43ḡ13]#!d)EeIH$[mE$EɍR8Q @c06dR`O`S(#8ӉT[f A`!L]jʦx2A2@WZC L'9&@ *  dPv82@w0;@XF[^ D-;Dz@ |2pv[ HK@? TX.XaÕXD&f!XTKa1[Hf고2A"4PZLKmHW50X@*//tQ(Nmm "%.Kjp6H*^p! 02% D6Ri_!5H.CϤ(~h1D/1zǎ!@&tt=.{'u /O` MG+ {,:[JE~3ٗ]?/E4wEVAvw+Alh& u/=Hj&@$eE INT6̍jץ=@':th_6h~(l=%H'--@`2 0$-]]  m# Y&1Dlt&lH1Zt$ qBG@RBHF)!!z,DC dsoLhM⇼|`HzA'+nY`*'6d%=1FsI]FHӺ}ϻ?LW57>O%߱pAZ#vMMi$MGifϳǬT?]LU}o;VzNmQ[[fUg6J)ӶrVcsgh}]G8#;>aߕ5zGOSSy莥Bƙ _ԪJs4ڧi4̷ud>yp^t} WӎDs7M'>>\LHgZ_l6njfj !o}eȓ.\GzW^9whCbU*S9/b-YFxI=s1;G~O}>ݹκtgs8:=G^1[yu*_Y{'p}9iT2'J &hLU8O1aR@v>m9 ?{OS=mjT~a0e": hgH^Uc:!]۱Jvh'>EX@/ _cF dXjS΃ɮ *˖%>/æ18&uʑj֙f [j^BL19s x?r N׏Z,ꥮ=]qd"je3`pJST5`(YW6pCK̚4 eB#Ob2}w)wu^JJϯN?d6#`٨@i#-o݆gvErOJ&Ѳnlz;$BH`L ,*' jY%^E ۵%52Pp{u9}Y8CJ[&|()ђ|R5*} )dG[#VY79w8{O tn̈8p<9_hA21CLo?rl}t*wD}c!\'<U8o\u,RVI&~]D^FF$|3YChF͹RCm5gh9l7Үձ;u"ԍo*k8T_+@bh{Iu>oQHꓼkR<蛎Z~9cTtgp6.,Kn5m \o3%:y"т&apѦ}? {JabQ"flj&wj>KzvL+šA$Œ^^jFONK0T}x}RGc㩄%Rmӛݻ]]Er{SC֚EKN='Gpn< U kC>_ڳYSnܯw(Y+iբ$p+d endstream endobj 627 0 obj << /Producer (pdfTeX-1.40.16) /Creator (TeX) /CreationDate (D:20171008163158+08'00') /ModDate (D:20171008163158+08'00') /Trapped /False /PTEX.Fullbanner (This is pdfTeX, Version 3.14159265-2.6-1.40.16 (TeX Live 2015/Debian) kpathsea version 6.2.1) >> endobj 602 0 obj << /Type /ObjStm /N 33 /First 277 /Length 1555 /Filter /FlateDecode >> stream xڭX]S:}ϯsY}۞t&Ц  $|NL"oM -]YlDR,])#h* WHOac(agI3$L+M88 )ᚉp_d.`&'Br|Ae}.3")T#O$}[ɎDJ%Ձ"$}b8/Qӄႊ9Q;BIv$.G;JD4Ji w<{!K&b||sA^gD/u nqЂ" :&f$:;ⳉg8u,}2Iw~B;0\ShЁ%41 ӡYa! L C&)?m!pyU&8'hOՂ&@Qϫ@ ģ h VHd]hpyG&^ y*5HY**-kM ^UF i7ْQ%Wi>]F}ЃO > a2>3@ 6 SDaO8M<3 e?[ U  1"RG.8,+`jzi~8?*YeO9w Dnۙd/phԘ<Wp ?omҤoOGa$MfٿntYr%}aZڠ6~Nk .'WW ĕ(Ք|O3Bz"V1ֆ`q|GzýԴi:] >|'2 '.H n?c=40}0AԄ|,"^eh[aZD'=a5+شqqQL kYe:?:[FY(ImQ<=O=R"e\I=Il&.ׄ]ϤfZ./_vvDS fX!3K 66Z!|u=vj "4[kahex ׼*/u==/+3]_ u?vqkgl^+k茮^6^GղutybyK֤'UIzЯ?u5gA >h_H+V'y L_;ϗrnAʯtu5IՔ'Raxm2<˒=;8N3; fʟA;{m+P()Z-͜wF\RrX]SsBȍ1rۯp[[#I7Ɛ~ѺtfAt^`MJhg[u> b\w9^ endstream endobj 628 0 obj << /Type /XRef /Index [0 629] /Size 629 /W [1 3 1] /Root 626 0 R /Info 627 0 R /ID [<1FA3FA8AB5C4A0A75EAA25D210D52702> <1FA3FA8AB5C4A0A75EAA25D210D52702>] /Length 1700 /Filter /FlateDecode >> stream x%h%w11?M4%w߻3QcLbF55&MH][ʤZ:V07 me}ډ2XJD,|&++]<_/j1wԎ15=8H+̶CZjGv+ "TO #ԎP;BⒹ ;~كkUJ -ɳ#ώ<;j%Sȳ#ώ 0) a[i,counts] <- a[i,counts] + b[max(j), counts] } return(a) } z <- getSizeTable() zutils <- getSizeTable("spatstat.utils") zdata <- getSizeTable("spatstat.data") zlocal <- getSizeTable("spatstat", "spatstatlocalsize.txt") z <- mergeSizeTables(z, zutils) z <- mergeSizeTables(z, zdata) z <- mergeSizeTables(z, zlocal) # changes <- z[nrow(z), ] - z[z$version == "1.42-0", ] newobj <- changes[["nobjects"]] newdat <- changes[["ndatasets"]] + 1 # counting rule doesn't detect redwood3 ################################################### ### code chunk number 3: updates.Rnw:82-87 ################################################### options(SweaveHooks=list(fig=function() par(mar=0.2+c(2,4,2,0)))) Plot <- function(fmla, ..., dat=z) { yvals <- eval(as.expression(fmla[[2]]), envir=dat) plot(fmla, ..., data=dat, type="l", xlab="", lwd=2, ylim=c(0, max(yvals))) } ################################################### ### code chunk number 4: updates.Rnw:93-98 ################################################### getOption("SweaveHooks")[["fig"]]() Plot((Rlines + srclines)/1000 ~ date, ylab="Lines of code (x 1000)", main="Spatstat growth") lines(srclines/1000 ~ date, data=z) text(as.Date("2015-01-01"), 9.5, "C code") text(as.Date("2015-01-01"), 60, "R code") ################################################### ### code chunk number 5: updates.Rnw:1906-1910 ################################################### nbugs <- nrow(news(grepl("^BUG", Category), package="spatstat")) nbugssince <- nrow(news(Version > "1.42-0" & grepl("^BUG", Category), package="spatstat")) ################################################### ### code chunk number 6: updates.Rnw:1916-1917 (eval = FALSE) ################################################### ## news(grepl("^BUG", Category), package="spatstat") spatstat/inst/doc/updates.Rnw0000644000176200001440000023463313166356056016022 0ustar liggesusers\documentclass[11pt]{article} \usepackage{graphicx} \usepackage{Sweave} \usepackage{bm} \usepackage{anysize} \marginsize{2cm}{2cm}{2cm}{2cm} % \VignetteIndexEntry{Summary of Recent Updates to Spatstat} \newcommand{\pkg}[1]{\texttt{#1}} \newcommand{\code}[1]{\texttt{#1}} \newcommand{\R}{{\sf R}} \newcommand{\spst}{\pkg{spatstat}} \newcommand{\Spst}{\pkg{Spatstat}} \begin{document} \bibliographystyle{plain} <>= library(spatstat) sversion <- read.dcf(file = system.file("DESCRIPTION", package = "spatstat"), fields = "Version") options(useFancyQuotes=FALSE) @ \title{Summary of recent updates to \spst} \author{Adrian Baddeley, Rolf Turner and Ege Rubak} \date{For \spst\ version \texttt{\Sexpr{sversion}}} \maketitle \thispagestyle{empty} This is a summary of changes that have been made to the \spst\ package since the publication of the accompanying book \cite{baddrubaturn15}. The book, published in December 2015, covers everything in \spst\ up to version \texttt{1.42-0}, released in May 2015. <>= readSizeTable <- function(fname) { if(is.null(fname) || !file.exists(fname)) return(NULL) a <- read.table(fname, header=TRUE) a$date <- as.Date(a$date) return(a) } getSizeTable <- function(packagename="spatstat", tablename="packagesizes.txt") { fname <- system.file("doc", tablename, package=packagename) readSizeTable(fname) } counts <- c("nhelpfiles", "nobjects", "ndatasets", "Rlines", "srclines") mergeSizeTables <- function(a, b) { if(is.null(b)) return(a) for(i in seq_len(nrow(a))) { j <- which(b$date <= a$date[i]) if(length(j) > 0) a[i,counts] <- a[i,counts] + b[max(j), counts] } return(a) } z <- getSizeTable() zutils <- getSizeTable("spatstat.utils") zdata <- getSizeTable("spatstat.data") zlocal <- getSizeTable("spatstat", "spatstatlocalsize.txt") z <- mergeSizeTables(z, zutils) z <- mergeSizeTables(z, zdata) z <- mergeSizeTables(z, zlocal) # changes <- z[nrow(z), ] - z[z$version == "1.42-0", ] newobj <- changes[["nobjects"]] newdat <- changes[["ndatasets"]] + 1 # counting rule doesn't detect redwood3 @ %$ The current version of \spst\ is \texttt{\Sexpr{sversion}}. It contains \Sexpr{newobj} new functions and \Sexpr{newdat} new datasets introduced after May 2015. This document summarises the most important changes. This document also lists all \emph{important} bugs detected \emph{since 2010}. <>= options(SweaveHooks=list(fig=function() par(mar=0.2+c(2,4,2,0)))) Plot <- function(fmla, ..., dat=z) { yvals <- eval(as.expression(fmla[[2]]), envir=dat) plot(fmla, ..., data=dat, type="l", xlab="", lwd=2, ylim=c(0, max(yvals))) } @ \SweaveOpts{eps=TRUE} \setkeys{Gin}{width=0.45\textwidth} \centerline{ <>= Plot((Rlines + srclines)/1000 ~ date, ylab="Lines of code (x 1000)", main="Spatstat growth") lines(srclines/1000 ~ date, data=z) text(as.Date("2015-01-01"), 9.5, "C code") text(as.Date("2015-01-01"), 60, "R code") @ } \tableofcontents \newpage \section{\pkg{spatstat} is splitting into parts} \pkg{spatstat} is being split into several sub-packages, to satisfy the requirements of CRAN. This should not affect the user: existing code will continue to work in the same way. Currently there are two sub-packages, called \pkg{spatstat.utils} and \pkg{spatstat}. Typing \code{library(spatstat)} will load the familiar \pkg{spatstat} package which can be used as before, and will silently import the \pkg{spatstat.utils} package. The \pkg{spatstat.utils} package contains utility functions that were originally written for \pkg{spatstat}: they were undocumented internal functions in \pkg{spatstat}, but are now documented and accessible in a separate package because they may be useful for other purposes. To access these functions, you need to type \code{library(spatstat.utils)}. \section{Precis of all changes} Here is the text from the `overview' sections of the News and Release Notes for each update. \begin{itemize} \item \spst\ now Imports the package \pkg{spatstat.utils}. \item \spst\ now requires the package \pkg{spatstat.data} which contains the datasets. \item \spst\ now suggests the package \pkg{fftwtools}. \item Improvements to \texttt{ppm} and \texttt{update.ppm}. \item Correction to \texttt{lohboot} \item Numerous bug fixes for linear networks code. \item Now handles disconnected linear networks. \item Effect function is now available for all types of fitted model. \item Geometric-mean smoothing. \item A model can be fitted or re-fitted to a sub-region of data. \item New fast algorithm for kernel smoothing on a linear network. \item Leverage and influence diagnostics extended to Poisson/Gibbs models fitted by logistic composite likelihood. \item Two-stage Monte Carlo test. \item Dirichlet/Voronoi tessellation on a linear network. \item Thinning of point patterns on a linear network. \item More support for functions and tessellations on a linear network. \item Bandwidth selection for pair correlation function. \item Pooling operations improved. \item Operations on signed measures. \item Operations on lists of pixel images. \item Improved pixellation of point patterns. \item Stieltjes integral extended. \item Subset operators extended. \item Greatly accelerated \texttt{rmh} when using \texttt{nsave} \item Sufficient Dimension Reduction for point processes. \item Alternating Gibbs Sampler for point process simulation. \item New class of spatially sampled functions. \item ROC and AUC extended to other types of point patterns and models. \item More support for linear networks. \item More support for infinite straight lines. \item \spst\ now depends on the packages \pkg{nlme} and \pkg{rpart}. \item Important bug fix in \code{linearK}, \code{linearpcf} \item Changed internal format of \code{linnet} and \code{lpp} objects. \item Faster computation in linear networks. \item Bias correction techniques. \item Bounding circle of a spatial object. \item Option to plot marked points as arrows. \item Kernel smoothing accelerated. \item Workaround for bug in some graphics drivers affecting image orientation. \item Non-Gaussian smoothing kernels. \item Improvements to inhomogeneous multitype $K$ and $L$ functions. \item Variance approximation for pair correlation function. \item Leverage and influence for multitype point process models. \item Functions for extracting components of vector-valued objects. \item Recursive-partition point process models. \item Minkowski sum, morphological dilation and erosion with any shape. \item Minkowski sum also applicable to point patterns and line segment patterns. \item Important bug fix in Smooth.ppp \item Important bug fix in spatial CDF tests. \item More bug fixes for replicated patterns. \item Simulate a model fitted to replicated point patterns. \item Inhomogeneous multitype $F$ and $G$ functions. \item Summary functions recognise \texttt{correction="all"} \item Leverage and influence code handles bigger datasets. \item More support for pixel images. \item Improved progress reports. \item New dataset \texttt{redwood3} \item Fixed namespace problems arising when spatstat is not loaded. \item Important bug fix in leverage/influence diagnostics for Gibbs models. \item Surgery with linear networks. \item Tessellations on a linear network. \item Laslett's Transform. \item Colour maps for point patterns with continuous marks are easier to define. \item Pair correlation function estimates can be pooled. \item Stipulate a particular version of a package. \item More support for replicated point patterns. \item More support for tessellations. \item More support for multidimensional point patterns and point processes. \item More options for one-sided envelopes. \item More support for model comparison. \item Convexifying operation. \item Subdivide a linear network. \item Penttinen process can be simulated (by Metropolis-Hastings or CFTP). \item Calculate the predicted variance of number of points. \item Accelerated algorithms for linear networks. \item Quadrat counting accelerated, in some cases. \item Simulation algorithms have been accelerated; simulation outcomes are \emph{not} identical to those obtained from previous versions of \spst. \item Determinantal point process models. \item Random-effects and mixed-effects models for replicated patterns. \item Dao-Genton test, and corresponding simulation envelopes. \item Simulated annealing and simulated tempering. \item spatstat colour tools now handle transparent colours. \item Improvements to \verb![! and \texttt{subset} methods \item Extensions to kernel smoothing on a linear network. \item Support for one-dimensional smoothing kernels. \item Mark correlation function may include weights. \item Cross-correlation version of the mark correlation function. \item Penttinen pairwise interaction model. \item Improvements to simulation of Neyman-Scott processes. \item Improvements to fitting of Neyman-Scott models. \item Extended functionality for pixel images. \item Fitted intensity on linear network \item Triangulation of windows. \item Corrected an edge correction. \end{itemize} \section{New datasets} The following datasets have been added to the package. \begin{itemize} \item \texttt{austates}: The states and large mainland territories of Australia represented as polygonal regions forming a tessellation. \item \texttt{redwood3}: a more accurate version of the \texttt{redwood} data. \end{itemize} \section{New classes} \begin{itemize} \item \texttt{ssf}: Class of spatially sampled functions. \end{itemize} \section{New Functions} Following is a list of all the functions that have been added. \begin{itemize} \item \texttt{fitin.profilepl}: Extract the fitted interaction from a model fitted by profile likelihood. \item \verb![<-.linim!: Subset assignment method for pixel images on a linear network. \item \texttt{nnfromvertex}: Given a point pattern on a linear network, find the nearest data point from each vertex of the network. \item \texttt{tile.lengths}: Calculate the length of each tile in a tessellation on a network. \item \texttt{text.ppp}, \texttt{text.lpp}, \texttt{text.psp}: Methods for \texttt{text} for spatial patterns. \item \texttt{as.data.frame.envelope}: Extract function data from an envelope object, including the functions for the simulated data ('simfuns') if they were saved. \item \texttt{is.connected}, \texttt{is.connected.default}, \texttt{is.connected.linnet}: Determines whether a spatial object consists of one topologically connected piece, or several pieces. \item \texttt{is.connected.ppp}: Determines whether a point pattern is connected after all pairs of points closer than distance R are joined. \item \texttt{hist.funxy}: Histogram of values of a spatial function. \item \texttt{model.matrix.ippm}: Method for \texttt{model.matrix} which allows computation of regular and irregular score components. \item \texttt{harmonise.msr}: Convert several measures (objects of class \texttt{msr}) to a common quadrature scheme. \item \texttt{bits.test}: Balanced Independent Two-Stage Monte Carlo test, an improvement on the Dao-Genton test. \item \texttt{lineardirichlet}: Computes the Dirichlet-Voronoi tessellation associated with a point pattern on a linear network. \item \texttt{domain.lintess}, \texttt{domain.linfun}: Extract the linear network from a \texttt{lintess} or \texttt{linfun} object. \item \texttt{summary.lintess}: Summary of a tessellation on a linear network. \item \texttt{clicklpp}: Interactively add points on a linear network. \item \texttt{envelopeArray}: Ggenerate an array of envelopes using a function that returns \texttt{fasp} objects. \item \texttt{bw.pcf}: Bandwidth selection for pair correlation function. \item \texttt{grow.box3}: Expand a three-dimensional box. \item \texttt{hexagon}, \texttt{regularpolygon}: Create regular polygons. \item \texttt{Ops.msr}: Arithmetic operations for measures. \item \texttt{Math.imlist}, \texttt{Ops.imlist}, \texttt{Summary.imlist}, \texttt{Complex.imlist}: Arithmetic operations for lists of pixel images. \item \texttt{measurePositive}, \texttt{measureNegative}, \texttt{measureVariation}, \texttt{totalVariation}: Positive and negative parts of a measure, and variation of a measure. \item \texttt{as.function.owin}: Convert a spatial window to a \texttt{function(x,y)}, the indicator function. \item \texttt{as.function.ssf}: Convert an object of class \texttt{ssf} to a \texttt{function(x,y)} \item \texttt{as.function.leverage.ppm} Convert an object of class \texttt{leverage.ppm} to a \texttt{function(x,y)} \item \texttt{sdr}, \texttt{dimhat}: Sufficient Dimension Reduction for point processes. \item \texttt{simulate.rhohat}: Simulate a Poisson point process with the intensity estimated by \texttt{rhohat}. \item \texttt{rlpp}: Random points on a linear network with a specified probability density. \item \texttt{cut.lpp}: Method for \texttt{cut} for point patterns on a linear network. \item \texttt{has.close}: Faster way to check whether a point has a close neighbour. \item \texttt{psib}: Sibling probability (index of clustering strength in a cluster process). \item \texttt{rags}, \texttt{ragsAreaInter}, \texttt{ragsMultiHard}: Alternating Gibbs Sampler for point processes. \item \texttt{bugfixes}: List all bug fixes in recent versions of a package. \item \texttt{ssf}: Create a spatially sampled function \item \texttt{print.ssf}, \texttt{plot.ssf}, \texttt{contour.ssf}, \texttt{image.ssf}: Display a spatially sampled function \item \texttt{as.im.ssf}, \texttt{as.ppp.ssf}, \texttt{marks.ssf}, \verb!marks<-.ssf!, \texttt{unmark.ssf}, \verb![.ssf!, \texttt{with.ssf}: Manipulate data in a spatially sampled function \item \texttt{Smooth.ssf}: Smooth a spatially sampled function \item \texttt{integral.ssf}: Approximate integral of spatially sampled function \item \texttt{roc.kppm}, \texttt{roc.lppm}, \texttt{roc.lpp}: Methods for \texttt{roc} for fitted models of class \texttt{"kppm"} and \texttt{"lppm"} and point patterns of class \texttt{"lpp"} \item \texttt{auc.kppm}, \texttt{auc.lppm}, \texttt{auc.lpp}: Methods for \texttt{auc} for fitted models of class \texttt{"kppm"} and \texttt{"lppm"} and point patterns of class \texttt{"lpp"} \item \texttt{timeTaken}: Extract the timing data from a \texttt{"timed"} object or objects. \item \texttt{rotate.infline}, \texttt{shift.infline}, \texttt{reflect.infline}, \texttt{flipxy.infline}: Geometrical transformations for infinite straight lines. \item \texttt{whichhalfplane}: Determine which side of an infinite line a point lies on. \item \texttt{matrixpower}, \texttt{matrixsqrt}, \texttt{matrixinvsqrt}: Raise a matrix to any power. \item \texttt{points.lpp}: Method for \texttt{points} for point patterns on a linear network. \item \texttt{pairs.linim}: Pairs plot for images on a linear network. \item \texttt{closetriples}: Find close triples of points. \item \texttt{anyNA.im}: Method for \texttt{anyNA} for pixel images. \item \texttt{bc}: Bias correction (Newton-Raphson) for fitted model parameters. \item \texttt{rex}: Richardson extrapolation for numerical integrals and statistical model parameter estimates. \item \texttt{boundingcircle}, \texttt{boundingcentre}: Find the smallest circle enclosing a window or point pattern. \item \verb![.linim! : Subset operator for pixel images on a linear network. \item \texttt{mean.linim}, \texttt{median.linim}, \texttt{quantile.linim}: The mean, median, or quantiles of pixel values in a pixel image on a linear network. \item \texttt{weighted.median}, \texttt{weighted.quantile}: Median or quantile of numerical data with associated weights. \item \verb!"[.linim"!: Subset operator for pixel images on a linear network. \item \texttt{mean.linim}, \texttt{median.linim}, \texttt{quantile.linim}: The mean, median, or quantiles of pixel values in a pixel image on a linear network. \item \texttt{boundingcircle}, \texttt{boundingcentre}: Smallest circle enclosing a spatial object. \item \texttt{split.msr}: Decompose a measure into parts. \item \texttt{unstack.msr}: Decompose a vector-valued measure into its component measures. \item \texttt{unstack.ppp}, \texttt{unstack.psp}, \texttt{unstack.lpp}: Given a spatial pattern with several columns of marks, separate the columns and return a list of spatial patterns, each having only one column of marks. \item \texttt{kernel.squint}: Integral of squared kernel, for the kernels used in density estimation. \item \texttt{as.im.data.frame}: Build a pixel image from a data frame of coordinates and pixel values. \item \texttt{covering}: Cover a window using discs of a given radius. \item \texttt{dilationAny}, \texttt{erosionAny}, \verb!%(-)%! : Morphological dilation and erosion by any shape. \item \texttt{FmultiInhom}, \texttt{GmultiInhom} Inhomogeneous multitype/marked versions of the summary functions \texttt{Fest}, \texttt{Gest}. \item \texttt{kernel.moment} Moment or incomplete moment of smoothing kernel. \item \texttt{MinkowskiSum}, \verb!%(+)%!: Minkowski sum of two windows: \verb!A %(+)% B!, or \texttt{MinkowskiSum(A,B)} \item \texttt{nobjects}: New generic function for counting the number of 'things' in a dataset. There are methods for \texttt{ppp}, \texttt{ppx}, \texttt{psp}, \texttt{tess}. \item \texttt{parameters.interact}, \texttt{parameters.fii}: Extract parameters from interpoint interactions. (These existing functions are now documented.) \item \texttt{ppmInfluence}: Calculate \texttt{leverage.ppm}, \texttt{influence.ppm} and \texttt{dfbetas.ppm} efficiently. \item \texttt{rppm}, \texttt{plot.rppm}, \texttt{predict.rppm}, \texttt{prune.rppm}: Recursive-partition point process models. \item \texttt{simulate.mppm} Simulate a point process model fitted to replicated point patterns. \item \texttt{update.interact}: Update the parameters of an interpoint interaction. [This existing function is now documented.] \item \texttt{where.max}, \texttt{where.min} Find the spatial location(s) where a pixel image achieves its maximum or minimum value. \item \texttt{compileK}, \texttt{compilepcf}: make a $K$ function or pair correlation function given the pairwise distances and their weights. [These existing internal functions are now documented.] \item \texttt{laslett}: Laslett's Transform. \item \texttt{lintess}: Tessellation on a linear network. \item \texttt{divide.linnet}: Divide a linear network into pieces demarcated by a point pattern. \item \texttt{insertVertices}: Insert new vertices in a linear network. \item \texttt{thinNetwork}: Remove vertices and/or segments from a linear network etc. \item \texttt{connected.linnet}: Find connected components of a linear network. \item \texttt{nvertices}, \texttt{nvertices.linnet}, \texttt{nvertices.owin}: Count the number of vertices in a linear network or vertices of the boundary of a window. \item \texttt{as.data.frame.linim}, \texttt{as.data.frame.linfun}: Extract a data frame of spatial locations and function values from an object of class \texttt{linim} or \texttt{linfun}. \item \texttt{as.linfun}, \texttt{as.linfun.linim}, \texttt{as.linfun.lintess}: Convert other kinds of data to a \texttt{linfun} object. \item \texttt{requireversion}: Require a particular version of a package (for use in stand-alone R scripts). \item \texttt{as.function.tess}: Convert a tessellation to a \texttt{function(x,y)}. The function value indicates which tile of the tessellation contains the point $(x,y)$. \item \texttt{tileindex}: Determine which tile of a tessellation contains a given point $(x,y)$. \item \texttt{persp.leverage.ppm}: Method for persp plots for objects of class \texttt{leverage.ppm} \item \texttt{AIC.mppm}, \texttt{extractAIC.mppm}: AIC for point process models fitted to replicated point patterns. \item \texttt{nobs.mppm}, \texttt{terms.mppm}, \texttt{getCall.mppm}: Methods for point process models fitted to replicated point patterns. \item \texttt{rPenttinen}: Simulate the Penttinen process using perfect simulation. \item \texttt{varcount}: Given a point process model, compute the predicted variance of the number of points falling in a window. \item \texttt{inside.boxx}: Test whether multidimensional points lie inside a specified multidimensional box. \item \texttt{lixellate}: Divide each segment of a linear network into smaller segments. \item \texttt{nsegments.linnet}, \texttt{nsegments.lpp}: Count the number of line segments in a linear network. \item \texttt{grow.boxx}: Expand a multidimensional box. \item \texttt{deviance.ppm}, \texttt{deviance.lppm}: Deviance for a fitted point process model. \item \texttt{pseudoR2}: Pseudo-R-squared for a fitted point process model. \item \texttt{tiles.empty} Checks whether each tile of a tessellation is empty or nonempty. \item \texttt{summary.linim}: Summary for a pixel image on a linear network. \item Determinantal Point Process models: \begin{itemize} \item \texttt{dppm}: Fit a determinantal point process model. \item \texttt{fitted.dppm}, \texttt{predict.dppm}, \texttt{intensity.dppm}: prediction for a fitted determinantal point process model. \item \texttt{Kmodel.dppm}, \texttt{pcfmodel.dppm}: Second moments of a determinantal point process model. \item \texttt{rdpp}, \texttt{simulate.dppm}: Simulation of a determinantal point process model. \item \texttt{logLik.dppm}, \texttt{AIC.dppm}, \texttt{extractAIC.dppm}, \texttt{nobs.dppm}: Likelihood and AIC for a fitted determinantal point process model. \item \texttt{print.dppm}, \texttt{reach.dppm}, \texttt{valid.dppm}: Basic information about a \texttt{dpp} model. \item \texttt{coef.dppm}, \texttt{formula.dppm}, \texttt{print.dppm}, \texttt{terms.dppm}, \texttt{labels.dppm}, \texttt{model.frame.dppm}, \texttt{model.matrix.dppm}, \texttt{model.images.dppm}, \texttt{is.stationary.dppm}, \texttt{reach.dppm}, \texttt{unitname.dppm}, \verb!unitname<-.dppm!, \texttt{Window.dppm}: Various methods for \texttt{dppm} objects. \item \texttt{parameters.dppm}: Extract meaningful list of model parameters. \item \texttt{objsurf.dppm}: Objective function surface of a \texttt{dppm} object. \item \texttt{residuals.dppm}: Residual measure for a \texttt{dppm} object. \end{itemize} \item Determinantal Point Process model families: \begin{itemize} \item \texttt{dppBessel}, \texttt{dppCauchy}, \texttt{dppGauss}, \texttt{dppMatern}, \texttt{dppPowerExp}: Determinantal Point Process family functions. \item \texttt{detpointprocfamilyfun}: Create a family function. \item \texttt{update.detpointprocfamily}: Set parameter values in a determinantal point process model family. \item \texttt{simulate.dppm}: Simulation. \item \texttt{is.stationary.detpointprocfamily}, \texttt{intensity.detpointprocfamily}, \texttt{Kmodel.detpointprocfamily}, \texttt{pcfmodel.detpointprocfamily}: Moments. \item \texttt{dim.detpointprocfamily}, \texttt{dppapproxkernel}, \texttt{dppapproxpcf}, \texttt{dppeigen}, \texttt{dppkernel}, \texttt{dppparbounds}, \texttt{dppspecdenrange}, \texttt{dppspecden}: Helper functions. \end{itemize} \item \texttt{dg.envelope}: Simulation envelopes corresponding to Dao-Genton test. \item \texttt{dg.progress}: Progress plot (envelope representation) for the Dao-Genton test. \item \texttt{dg.sigtrace}: significance trace for the Dao-Genton test. \item \texttt{markcrosscorr}: Mark cross-correlation function for point patterns with several columns of marks. \item \texttt{rtemper}: Simulated annealing or simulated tempering. \item \texttt{rgb2hsva}: Convert RGB to HSV data, like \texttt{rgb2hsv}, but preserving transparency. \item \texttt{superimpose.ppplist}, \texttt{superimpose.splitppp}: New methods for 'superimpose' for lists of point patterns. \item \texttt{dkernel}, \texttt{pkernel}, \texttt{qkernel}, \texttt{rkernel}: Probability density, cumulative probability, quantiles and random generation from distributions used in basic one-dimensional kernel smoothing. \item \texttt{kernel.factor}: Auxiliary calculations for one-dimensional kernel smoothing. \item \texttt{spatdim}: Spatial dimension of any object in the \spst\ package. \item \texttt{as.boxx}: Convert data to a multi-dimensional box. \item \texttt{intensity.ppx}: Method for \texttt{intensity} for multi-dimensional space-time point patterns. \item \texttt{fourierbasis}: Evaluate Fourier basis functions in any number of dimensions. \item \texttt{valid}: New generic function, with methods \texttt{valid.ppm}, \texttt{valid.lppm}, \texttt{valid.dppm}. \item \texttt{emend}, \texttt{emend.ppm}, \texttt{emend.lppm}: New generic function with methods for \texttt{ppm} and \texttt{lppm}. \texttt{emend.ppm} is equivalent to \texttt{project.ppm}. \item \texttt{Penttinen}: New pairwise interaction model. \item \texttt{quantile.density}: Calculates quantiles from kernel density estimates. \item \texttt{CDF.density}: Calculates cumulative distribution function from kernel density estimates. \item \texttt{triangulate.owin}: decompose a spatial window into triangles. \item \texttt{fitted.lppm}: fitted intensity values for a point process on a linear network. \item \texttt{parameters}: Extract all parameters from a fitted model. \end{itemize} \section{Alphabetical list of changes} Here is a list of all changes made to existing functions, listed alphabetically. \begin{itemize} %%A \item \texttt{affine.owin}: Allows transformation matrix to be singular, if the window is polygonal. \item \texttt{anova.mppm}: Now handles Gibbs models, and performs the adjusted composite likelihood ratio test. New argument \texttt{fine}. \item \texttt{as.function.tess}: New argument \texttt{values} specifies the function values. \item \texttt{as.im.distfun}: New argument \texttt{approx} specifies the choice of algorithm. \item \texttt{as.im.function}: New argument \texttt{strict}. \item \texttt{as.layered}: Default method now handles a (vanilla) list of spatial objects. \item \texttt{as.linfun.lintess}: \begin{itemize} \item New argument \texttt{values} specifies the function value for each tile. \item New argument \texttt{navalue}. \end{itemize} \item \texttt{as.linim.default}: New argument \texttt{delta} controls spacing of sample points in internal data. \item \texttt{as.linnet.psp}: If the line segment pattern has marks, then the resulting linear network also carries these marks in the \verb!$lines! component. \item \texttt{as.owin}: Now refuses to convert a \code{box3} to a two-dimensional window. \item \texttt{as.owin.data.frame}: New argument \texttt{step} \item \texttt{as.polygonal}: \begin{itemize} \item Can now repair errors in polygon data, if \texttt{repair=TRUE}. \item Accelerated when \texttt{w} is a pixel mask. \end{itemize} \item \texttt{as.solist}: The argument \texttt{x} can now be a spatial object; \texttt{as.solist(cells)} is the same as \texttt{solist(cells)}. %%B \item \texttt{bdist.pixels}: Accelerated for polygonal windows. New argument \texttt{method}. \item \texttt{bind.fv}: New argument \texttt{clip}. \item \texttt{bw.ppl}: New arguments \texttt{weights} and \texttt{sigma}. \item \texttt{bw.diggle}, \texttt{bw.ppl}, \texttt{bw.relrisk}, \texttt{bw.smoothppp}, These functions now extract and store the name of the unit of length from the point pattern dataset. When the bandwidth selection criterion is plotted, the name of the unit of length is shown on the x-axis. %%C \item \texttt{cdf.test}: \begin{itemize} \item Calculations are more robust against numerical rounding effects. \item The methods for classes \texttt{ppp}, \texttt{ppm}, \texttt{lpp}, \texttt{lppm}, \texttt{slrm} have a new argument \texttt{interpolate}. \end{itemize} \item \texttt{cdf.test.mppm}: \begin{itemize} \item Now handles Gibbs models. \item Now recognises \texttt{covariate="x"} or \texttt{"y"}. \end{itemize} \item \texttt{clarkevans}: The argument \texttt{correction="all"} is now recognised: it selects all the available options. [This is also the default.] \item \texttt{clickpoly}: The polygon is now drawn progressively as the user clicks new vertices. \item \texttt{closepairs.ppp}, \texttt{closepairs.pp3}: \begin{itemize} \item New arguments \texttt{distinct} and \texttt{neat} allow more options. \item Argument \texttt{ordered} has been replaced by \texttt{twice} (but \texttt{ordered} is still accepted, with a warning). \item Performance improved (computation time and memory requirements reduced.) This should improve the performance of many functions in \texttt{spatstat}. \end{itemize} \item \texttt{clusterset}: Improved behaviour. \item \texttt{clusterfit}: New argument \texttt{algorithm} specifies the choice of optimisation algorithm. \item \texttt{collapse.fv}: This is now treated as a method for the \texttt{nlme} generic \texttt{collapse}. Its syntax has been adjusted slightly. \item \texttt{connected.im}: Now handles a logical-valued image properly. Arguments \texttt{...} now determine pixel resolution. \item \texttt{connected.owin}: Arguments \texttt{...} now determine pixel resolution. \item \texttt{contour.im}: New argument \texttt{col} specifies the colour of the contour lines. If \texttt{col} is a colour map, then the contours are drawn in different colours. \item \texttt{crossing.psp}: New argument \texttt{details} gives more information about the intersections between the segments. \item \texttt{cut.ppp}: Argument \texttt{z} can be \texttt{"x"} or \texttt{"y"} indicating one of the spatial coordinates. %%D \item \texttt{dclf.test, mad.test, dclf.progress, mad.progress,} \texttt{dclf.sigtrace, mad.sigtrace}, \texttt{dg.progress, dg.sigtrace}: \begin{itemize} \item New argument \texttt{clamp} determines the test statistic for one-sided tests. \item New argument \texttt{rmin} determines the left endpoint of the test interval. \item New argument \texttt{leaveout} specifies how to calculate discrepancy between observed and simulated function values. \item New argument \texttt{scale} allows summary function values to be rescaled before the comparison is performed. \item New argument \texttt{interpolate} supports interpolation of $p$-value. \item New argument \texttt{interpolate} supports interpolation of critical value of test. \end{itemize} \item \texttt{default.rmhcontrol, default.rmhexpand}: New argument \texttt{w}. \item \texttt{density.lpp}: \begin{itemize} \item New fast algorithm (up to 1000 times faster) for the default case where \texttt{kernel="gaussian"} and \texttt{continuous=TRUE}. Generously contributed by Greg McSwiggan. \item Fast algorithm has been further accelerated. \item New argument \texttt{kernel} specifies the smoothing kernel. Any of the standard one-dimensional smoothing kernels can be used. \item Now supports both the `equal-split continuous' and `equal-split discontinuous' smoothers. New argument \texttt{continuous} determines the choice of smoother. \item New arguments \texttt{weights} and \texttt{old}. \end{itemize} \item \texttt{density.ppp}: \begin{itemize} \item A non-Gaussian kernel can now be specified using the argument \texttt{kernel}. \item Argument \texttt{weights} can now be a pixel image. \item Accelerated by about 30\% when \texttt{at="pixels"}. \item Accelerated by about 15\% in the case where \texttt{at="points"} and \texttt{kernel="gaussian"}. \item Accelerated in the cases where weights are given or \texttt{diggle=TRUE}. \item New argument \texttt{verbose}. \end{itemize} \item \texttt{density.psp}: \begin{itemize} \item New argument \texttt{method}. \item Accelerated by 1 to 2 orders of magnitude. \end{itemize} \item \texttt{dfbetas.ppm}: For Gibbs models, memory usage has been dramatically reduced, so the code can handle larger datasets and finer quadrature schemes. \item \texttt{diagnose.ppm}: Infinite values of \texttt{rbord} are now ignored and treated as zero. This ensures that \texttt{diagnose.ppm} has a sensible default when the fitted model has infinite reach. \item \texttt{diagnose.ppm, plot.diagppm}: New arguments \texttt{col.neg, col.smooth} control the colour maps. \item \texttt{dilation.ppp}: Improved geometrical accuracy. Now accepts arguments to control resolution of polygonal approximation. \item \texttt{discs}: \begin{itemize} \item Now accepts a single numeric value for \texttt{radii}. \item New argument \texttt{npoly}. \item Accelerated in some cases. \end{itemize} \item \texttt{distfun}: When the user calls a distance function that was created by \texttt{distfun}, the user may now give a \texttt{ppp} or \texttt{lpp} object for the argument \texttt{x}, instead of giving two coordinate vectors \texttt{x} and \texttt{y}. %%E \item \texttt{edge.Trans}: New argument \texttt{gW} for efficiency. \item \texttt{effectfun}: Now works for \texttt{ppm}, \texttt{kppm}, \texttt{lppm}, \texttt{dppm}, \texttt{rppm} and \texttt{profilepl} objects. \item \texttt{envelope}: \begin{itemize} \item New argument \texttt{clamp} gives greater control over one-sided envelopes. \item New argument \texttt{funargs} \item New argument \texttt{scale} allows global envelopes to have width proportional to a specified function of $r$, rather than constant width. \item New argument \texttt{funYargs} contains arguments to the summary function when applied to the data pattern only. \end{itemize} \item \texttt{envelope.lpp}, \texttt{envelope.lppm}: New arguments \texttt{fix.n} and \texttt{fix.marks} allow envelopes to be computed using simulations conditional on the observed number of points. \item \texttt{ewcdf}: Argument \texttt{weights} can now be \texttt{NULL}. %%F \item \texttt{Fest}: Additional checks for errors in input data. \item \texttt{fitted.lppm}: New argument \texttt{leaveoneout} allows leave-one-out computation of fitted value. \item \texttt{fitted.ppm}: New option, \texttt{type="link"}. \item \texttt{funxy}: When the user calls a function that was created by \texttt{funxy}, the user may now give a \texttt{ppp} or \texttt{lpp} object for the argument \texttt{x}, instead of giving two coordinate vectors \texttt{x} and \texttt{y}. %%G \item \texttt{Geyer}: The saturation parameter \texttt{sat} can now be less than 1. \item \texttt{grow.rectangle}: New argument \texttt{fraction}. %%H \item \texttt{Hest}: \begin{itemize} \item Argument \texttt{X} can now be a pixel image with logical values. \item New argument \texttt{W}. [Based on code by Kassel Hingee.] \item Additional checks for errors in input data. \end{itemize} \item \texttt{hist.im}: New argument \texttt{xname}. %%I \item \texttt{identify.psp}: Improved placement of labels. Arguments can be passed to \texttt{text.default} to control the plotting of labels. \item \texttt{influence.ppm}: For Gibbs models, memory usage has been dramatically reduced, so the code can handle larger datasets and finer quadrature schemes. \item \texttt{integral.linfun}: New argument \texttt{delta} controls step length of approximation to integral. \item \texttt{intensity.ppm}: Intensity approximation is now implemented for area-interaction model, and Geyer saturation model. \item \texttt{ippm}: \begin{itemize} \item Accelerated. \item The internal format of the result has been extended slightly. \item Improved defaults for numerical algorithm parameters. \end{itemize} %%J %%K \item \texttt{Kcross.inhom}, \texttt{Kdot.inhom}, \texttt{Kmulti.inhom}: These functions now allow intensity values to be given by a fitted point process model. New arguments \texttt{update}, \texttt{leaveoneout}, \texttt{lambdaX}. \item \texttt{Kest} Accelerated computation (for translation and rigid corrections) when window is an irregular shape. \item \texttt{Kest.fft}: Now has \verb!...! arguments allowing control of spatial resolution. \item \texttt{Kinhom}: \begin{itemize} \item New argument \texttt{ratio}. \item Stops gracefully if \texttt{lambda} contains any zero values. \end{itemize} \item \texttt{kppm}: \begin{itemize} \item Fitting a model with \texttt{clusters="LGCP"} no longer requires the package \pkg{RandomFields} to be loaded explicitly. \item New argument \texttt{algorithm} specifies the choice of optimisation algorithm. \item Left hand side of formula can now involve entries in the list \texttt{data}. \item refuses to fit a log-Gaussian Cox model with anisotropic covariance. \item A warning about infinite values of the summary function no longer occurs when the default settings are used. Also affects \texttt{mincontrast}, \texttt{cauchy.estpcf}, \texttt{lgcp.estpcf}, \texttt{matclust.estpcf}, \texttt{thomas.estpcf}, \texttt{vargamma.estpcf}. \item Improved printed output. \end{itemize} %%L \item \texttt{Lcross.inhom}, \texttt{Ldot.inhom}: These functions now allow intensity values to be given by a fitted point process model. New arguments \texttt{update}, \texttt{leaveoneout}, \texttt{lambdaX}. \item \texttt{lengths.psp}: New argument \texttt{squared}. \item \texttt{leverage.ppm}: For Gibbs models, memory usage has been dramatically reduced, so the code can handle larger datasets and finer quadrature schemes. \item \texttt{leverage.ppm}, \texttt{influence.ppm}, \texttt{dfbetas.ppm}: These methods now work for models that were fitted by logistic composite likelihood (\texttt{method='logi'}). \item \texttt{linearK}, \texttt{linearpcf} and relatives: \\ \begin{itemize} \item substantially accelerated. \item ratio calculations are now supported. \item new argument \texttt{ratio}. \end{itemize} \item \texttt{linearKinhom}: new argument \texttt{normpower}. \item \texttt{linearKinhom}, \texttt{linearpcfinhom}: \begin{itemize} \item Changed behaviour when \texttt{lambda} is a fitted model. \item New arguments \texttt{update} and \texttt{leaveoneout}. \end{itemize} \item \texttt{linearpcf}: new argument \texttt{normpower}. \item \texttt{linim}: \begin{itemize} \item The image \texttt{Z} is now automatically restricted to the network. \item New argument \texttt{restrict}. \end{itemize} \item \texttt{linnet}: \begin{itemize} \item The internal format of a \texttt{linnet} (linear network) object has been changed. Existing datasets of class \texttt{linnet} are still supported. However, computation will be faster if they are converted to the new format. To convert a linnet object \texttt{L} to the new format, use \verb!L <- as.linnet(L)!. \item If the argument \texttt{edges} is given, then this argument now determines the ordering of the sequence of line segments. For example, the \texttt{i}-th row of \texttt{edges} specifies the \texttt{i}-th line segment in \texttt{as.psp(L)}. \item New argument \texttt{warn}. \end{itemize} \item \texttt{lintess}: Argument \texttt{df} can be missing or \texttt{NULL}, resulting in a tesellation with only one tile. \item \texttt{logLik.ppm}: \begin{itemize} \item New argument \texttt{absolute}. \item The warning about pseudolikelihood (`log likelihood not available') is given only once, and is not repeated in subsequent calls, within a spatstat session. \end{itemize} \item \texttt{logLik.mppm}: new argument \texttt{warn}. \item \texttt{lohboot}: Algorithm has been corrected and extended thanks to Christophe Biscio and Rasmus Waagepetersen. New arguments \texttt{block}, \texttt{basicboot}, \texttt{Vcorrection}. \item \texttt{lpp}: \begin{itemize} \item The internal format of an \texttt{lpp} object has been changed. Existing datasets of class \texttt{lpp} are still supported. However, computation will be faster if they are converted to the new format. To convert an \texttt{lpp} object \texttt{X} to the new format, use \verb!X <- as.lpp(X)!. \item \texttt{X} can be missing or \texttt{NULL}, resulting in an empty point pattern. \end{itemize} \item \texttt{lpp}, \texttt{as.lpp}: These functions now handle the case where coordinates \texttt{seg} and \texttt{tp} are given but \texttt{x} and \texttt{y} are missing. \item \texttt{lppm}: \begin{itemize} \item New argument \texttt{random} controls placement of dummy points. \item Computation accelerated. \end{itemize} %%M \item \texttt{markcorr}: New argument \texttt{weights} allows computation of the weighted version of the mark correlation function. \item \texttt{mppm}: \begin{itemize} \item Now handles models with a random effect component. (This is covered in \cite[Chap.\ 16]{baddrubaturn15}.) \item New argument \texttt{random} is a formula specifying the random effect. (This is covered in \cite[Chap.\ 16]{baddrubaturn15}.) \item Performs more checks for consistency of the input data. \item New arguments \texttt{gcontrol} and \texttt{reltol.pql} control the fitting algorithm. \end{itemize} %%N \item \texttt{nbfires}: \begin{itemize} \item the unit of length for the coordinates is now specified in this dataset. \item This dataset now includes information about the different land and sea borders of New Brunswick. \end{itemize} \item \texttt{nndist.lpp, nnwhich.lpp, nncross.lpp, distfun.lpp}: New argument \texttt{k} allows computation of $k$-th nearest point. Computation accelerated. \texttt{nnfun.lpp}: New argument \texttt{k}. %%O %%P \item \texttt{padimage}: New argument \texttt{W} allows an image to be padded out to fill any window. \item \texttt{pcf.ppp}: \begin{itemize} \item New argument \code{close} for advanced use. \item New argument \texttt{ratio} allows several estimates of pcf to be pooled. \item Now calculates an analytic approximation to the variance of the estimate of the pair correlation function (when \texttt{var.approx=TRUE}). \item Now returns the smoothing bandwidth used, as an attribute of the result. \item New argument \texttt{close} for advanced use. \end{itemize} \item \texttt{pcfinhom}: \begin{itemize} \item New argument \code{close} for advanced use. \item Default behaviour is changed when \texttt{lambda} is a fitted model. The default is now to re-fit the model to the data before computing pcf. New arguments \texttt{update} and \texttt{leaveoneout} control this. \item New argument \texttt{close} for advanced use. \end{itemize} \item \texttt{pixellate.ppp}: \begin{itemize} \item If the pattern is empty, the result is an integer-valued image (by default) for consistency with the results for non-empty patterns. \item Accelerated in the case where weights are given. \item New arguments \texttt{fractional} and \texttt{preserve} for more accurate discretisation. \end{itemize} \item \texttt{plot.anylist}: \begin{itemize} \item If a list entry \verb!x[[i]]! belongs to class \texttt{"anylist"}, it will be expanded so that each entry \verb!x[[i]][[j]]! will be plotted as a separate panel. \item New arguments \texttt{panel.begin.args}, \texttt{panel.end.args} \item Result is now an (invisible) list containing the result from executing the plot of each panel. \end{itemize} \item \texttt{plot.im}: \begin{itemize} \item Now handles complex-valued images. \item New argument \texttt{workaround} to avoid a bug in some MacOS device drivers that causes the image to be displayed in the wrong spatial orientation. \item The number of tick marks in the colour ribbon can now be controlled using the argument \texttt{nint} in \texttt{ribargs}. \end{itemize} \item \texttt{plot.imlist}: Result is now an (invisible) list containing the result from executing the plot of each panel. \item \texttt{plot.influence.ppm}: New argument \texttt{multiplot}. \item \texttt{plot.kppm}: \begin{itemize} \item New arguments \texttt{pause} and \texttt{xname}. \item The argument \texttt{what="all"} is now recognised: it selects all the available options. [This is also the default.] \end{itemize} \item \texttt{plot.leverage.ppm}: \begin{itemize} \item New argument \texttt{multiplot}. \item A contour line showing the average value of leverage is now drawn on the colour ribbon, as well as on the main image. New argument \texttt{args.contour}. \end{itemize} \item \texttt{plot.linfun}: \begin{itemize} \item Now passes arguments to the function being plotted. \item A scale bar is now plotted when \texttt{style="width"}. \item New argument \texttt{legend}. \item The return value has a different format. \end{itemize} \item \texttt{plot.linim}: \begin{itemize} \item The return value has a different format. \item A scale bar is now plotted when \texttt{style="width"}. \item When \texttt{style="width"}, negative values are plotted in red (by default). New argument \texttt{negative.args} controls this. \item New argument \texttt{zlim} specifies the range of values to be mapped. \end{itemize} \item \texttt{plot.lintess}: Improved plot method, with more options. \item \texttt{plot.lpp}: \begin{itemize} \item New argument \texttt{show.network}. \item For a point pattern with continuous marks (``real numbers'') the colour arguments \texttt{cols}, \texttt{fg}, \texttt{bg} can now be vectors of colour values, and will be used to determine the default colour map for the marks. \end{itemize} \item \texttt{plot.mppm}: New argument \texttt{se}. \item \texttt{plot.msr}: \begin{itemize} \item Now handles multitype measures. \item New argument \texttt{multiplot}. \item New argument \texttt{massthresh}. \item New arguments \texttt{equal.markscale} and \texttt{equal.ribbon}. \end{itemize} \item \texttt{plot.pp3}: New arguments \texttt{box.front}, \texttt{box.back} control plotting of the box. \item \texttt{plot.ppp}: \begin{itemize} \item The default colour for the points is now a transparent grey, if this is supported by the plot device. \item For a point pattern with continuous marks (``real numbers'') the colour arguments \texttt{cols}, \texttt{fg}, \texttt{bg} can now be vectors of colour values, and will be used to determine the default colour map for the marks. \item Now recognises graphics parameters for text, such as \texttt{family} and \texttt{srt} \item When \texttt{clipwin} is given, any parts of the boundary of the window of \texttt{x} that lie inside \texttt{clipwin} will also be plotted. \end{itemize} \item \texttt{plot.profilepl} ,\texttt{plot.quadratcount}, \texttt{plot.quadrattest}, \texttt{plot.tess}: Now recognise graphics parameters for text, such as \texttt{family} and \texttt{srt} \item \texttt{plot.solist}: \begin{itemize} \item New arguments \texttt{panel.begin.args}, \texttt{panel.end.args} \item Result is now an (invisible) list containing the result from executing the plot of each panel. \end{itemize} \item \texttt{plot.symbolmap}: New argument \texttt{nsymbols} controls the number of symbols plotted. \item \code{ponderosa}: In this installed dataset, the function \code{ponderosa.extra\$plotit} has changed slightly (to accommodate the dependence on the package \pkg{spatstat.utils}). \item \texttt{polynom}: This function now has a help file. \item \texttt{pool.fv}: \begin{itemize} \item The default plot of the pooled function no longer includes the variance curves. \item New arguments \texttt{relabel} and \texttt{variance}. \end{itemize} \item \texttt{pool.rat}: New arguments \texttt{weights}, \texttt{relabel} and \texttt{variance}. \item \texttt{ppm}: \begin{itemize} \item Argument \code{interaction} can now be a function that makes an interaction, such as \code{Poisson}, \code{Hardcore}, \code{MultiHard}. \item Argument \texttt{subset} can now be a window (class \texttt{"owin"}) specifying the sub-region of data to which the model should be fitted. \end{itemize} \item \texttt{ppm.ppp, ppm.quad}: \begin{itemize} \item New argument \texttt{emend}, equivalent to \texttt{project}. \item New arguments \texttt{subset} and \texttt{clipwin}. \end{itemize} \item \texttt{ppp}: \begin{itemize} \item New argument \texttt{checkdup}. \item If the coordinate vectors \code{x} and \code{y} contain \code{NA}, \code{NaN} or infinite values, these points are deleted with a warning, instead of causing a fatal error. \end{itemize} \item \texttt{predict.kppm, residuals.kppm} Now issues a warning when the calculation ignores the cluster/Cox component and treats the model as if it were Poisson. (This currently happens in predict.kppm when se=TRUE or interval != "none", and in residuals.kppm when type != "raw"). \item \texttt{predict.lppm}: Argument \texttt{locations} can now be an \texttt{lpp} object. \item \texttt{predict.mppm}: The argument \texttt{type="all"} is now recognised: it selects all the available options. [This is also the default.] \item \texttt{predict.rhohat}: New argument \texttt{what} determines which value should be calculated: the function estimate, the upper/lower confidence limits, or the standard error. \item \texttt{print.linim}: More information is printed. \item \texttt{print.quad}: More information is printed. \item \texttt{progressreport} \begin{itemize} \item Behaviour improved. \item New arguments \texttt{state}, \texttt{tick}, \texttt{showtime}. \item New option: \verb!style="tk"! \end{itemize} %%Q \item \texttt{quadratcount.ppp}: Computation accelerated in some cases. \item \texttt{quadrat.test.ppm}: Computation accelerated in some cases. \item \texttt{quantile.ewcdf}: The function is now normalised to the range \verb![0,1]! before the quantiles are computed. This can be suppressed by setting \texttt{normalise=FALSE}. \item \texttt{qqplot.ppm} Argument \texttt{expr} can now be a list of point patterns, or an envelope object containing a list of point patterns. %%R \item \texttt{rcellnumber}: New argument \texttt{mu}. \item \texttt{rgbim, hsvim}: New argument \texttt{A} controls the alpha (transparency) channel. \item \texttt{rgb2hex, col2hex, paletteindex, is.colour, samecolour,} \texttt{complementarycolour, is.grey, to.grey} These colour tools now handle transparent colours. \item \texttt{rgb2hex}: New argument \texttt{maxColorValue} \texttt{rhohat}: New argument \texttt{subset} allows computation for a subset of the data. \texttt{rhohat.lpp}: New argument \texttt{random} controls placement of dummy points. \item \texttt{rLGCP}: This function no longer requires the package \pkg{RandomFields} to be loaded explicitly. \item \texttt{rMaternI, rMaternII}: These functions can now generate random patterns in three dimensions and higher dimensions, when the argument \texttt{win} is of class \texttt{box3} or \texttt{boxx}. \item \texttt{rmh}: Accelerated, in the case where multiple patterns are saved using \texttt{nsave}. \item \texttt{rmh.ppm, rmhmodel.ppm, simulate.ppm}: A model fitted using the \texttt{Penttinen} interaction can now be simulated. \item \texttt{rmh.default, rmhmodel.default}: \begin{itemize} \item These functions now recognise \verb!cif='penttinen'! for the Penttinen interaction. \item New arguments \texttt{nsim}, \texttt{saveinfo}. \end{itemize} \item \texttt{rmhcontrol}: New parameter \texttt{pstage} determines when to generate random proposal points. \item \texttt{rose.default} New argument \texttt{weights}. \item \texttt{rose} New arguments \texttt{start} and \texttt{clockwise} specify the convention for measuring and plotting angles. \item \texttt{rotmean}: New argument \texttt{padzero}. Default behaviour has changed. \item \texttt{rpoispp}: Accelerated, when \texttt{lambda} is a pixel image. \item \texttt{rpoisppx}: New argument \code{drop}. \item \texttt{rpoisline}: Also returns information about the original infinite random lines. \item \texttt{rStrauss, rHardcore, rStraussHard, rDiggleGratton, rDGS, rPenttinen:} New argument \texttt{drop}. \item \texttt{rthin} \begin{itemize} \item Accelerated, when \texttt{P} is a single number. \item \texttt{X} can now be a point pattern on a linear network (class \texttt{lpp}). \end{itemize} \item \texttt{rThomas, rMatClust, rCauchy, rVarGamma}: \begin{itemize} \item When the model is approximately Poisson, it is simulated using rpoispp. This avoids computations which would require huge amounts of memory. New argument \texttt{poisthresh} controls this behaviour. \item New argument \texttt{saveparents}. \end{itemize} \item \texttt{runifpointx}: New argument \code{drop}. %%S \item Simulation: Several basic simulation algorithms have been accelerated. Consequently, simulation outcomes are not identical to those obtained with previous versions of \spst, even when the same random seed is used. To ensure compatibility with previous versions of spatstat, revert to the slower code by setting \texttt{spatstat.options(fastthin=FALSE, fastpois=FALSE)}. \item \code{shapley}: In this installed dataset, the function \code{shapley.extra\$plotit} has changed slightly (to accommodate the dependence on the package \pkg{spatstat.utils}). \item \texttt{simulate.ppm} New argument \texttt{w} controls the window of the simulated patterns. New argument \texttt{verbose}. \item \texttt{Smooth.ppp}: \begin{itemize} \item A non-Gaussian kernel can now be specified using the argument \texttt{kernel}. \item Argument \texttt{weights} can now be a pixel image. \item Accelerated by about 30\% in the case where \texttt{at="pixels"}. \item Accelerated by about 15\% in the case where \texttt{at="points"} and \texttt{kernel="gaussian"}. \item Now exits gracefully if any mark values are \texttt{NA}, \texttt{NaN} or \texttt{Inf}. \item New argument \texttt{geometric} supports geometric-mean smoothing. \end{itemize} \item \texttt{spatstat.options} New options \texttt{fastthin} and \texttt{fastpois} enable fast simulation algorithms. Set these options to \texttt{FALSE} to reproduce results obtained with previous versions of \spst. \item \texttt{split.ppp} The splitting variable \texttt{f} can now be a logical vector. \item \texttt{square}: Handles a common error in the format of the arguments. \item \texttt{step}: now works for models of class \texttt{"mppm"}. \item \texttt{stieltjes}: Argument \texttt{M} can be a stepfun object (such as an empirical CDF). \item \texttt{subset.ppp}, \texttt{subset.lpp}, \texttt{subset.pp3}, \texttt{subset.ppx}: The argument \texttt{subset} can now be any argument acceptable to the \verb!"["! method. \item summary functions The argument \texttt{correction="all"} is now recognised: it selects all the available options. \begin{quote} This applies to \texttt{Fest}, \texttt{F3est}, \texttt{Gest}, \texttt{Gcross}, \texttt{Gdot}, \texttt{Gmulti}, \texttt{G3est}, \texttt{Gfox}, \texttt{Gcom}, \texttt{Gres}, \texttt{Hest}, \texttt{Jest}, \texttt{Jmulti}, \texttt{Jcross}, \texttt{Jdot}, \texttt{Jfox}, \texttt{Kest}, \texttt{Kinhom}, \texttt{Kmulti}, \texttt{Kcross}, \texttt{Kdot}, \texttt{Kcom}, \texttt{Kres}, \texttt{Kmulti.inhom}, \texttt{Kcross.inhom}, \texttt{Kdot.inhom}, \texttt{Kscaled}, \texttt{Ksector}, \texttt{Kmark}, \texttt{K3est}, \texttt{Lscaled}, \texttt{markcorr}, \texttt{markcrosscorr}, \texttt{nnorient}, \texttt{pairorient}, \texttt{pcfinhom}, \texttt{pcfcross.inhom}, \texttt{pcfcross}, \texttt{pcf}, \texttt{Tstat}. \end{quote} \item \texttt{Summary.linim}: Recognises the argument \texttt{finite} so that \texttt{range(x, finite=TRUE)} works for a linim object \texttt{x}. \item \texttt{summary.ppm}: New argument \texttt{fine} selects the algorithm for variance estimation. \item \texttt{summary.owin}, \texttt{summary.im}: The fraction of frame area that is occupied by the window/image is now reported. \item \texttt{sumouter}: New argument \texttt{y} allows computation of asymmetric outer products. \item \texttt{symbolmap}: \begin{itemize} \item Now accepts a vector of colour values for the arguments \texttt{col}, \texttt{cols}, \texttt{fg}, \texttt{bg} if the argument \texttt{range} is given. \item New option: \texttt{shape="arrows"}. \end{itemize} %%T \item \texttt{tess}: Argument \texttt{window} is ignored when xgrid, ygrid are given. \item \texttt{texturemap}: Argument \texttt{textures} can be missing or NULL. \item \texttt{textureplot}: Argument \texttt{x} can now be something acceptable to \texttt{as.im}. \item \texttt{to.grey} New argument \texttt{transparent}. %%U \item \texttt{union.owin}: Improved behaviour when there are more than 2 windows. \item \texttt{update}: now works for models of class \texttt{"mppm"}. \item \texttt{update.kppm}: \begin{itemize} \item New argument \texttt{evaluate}. \item Now handles additional arguments in any order, with or without names. \item Changed arguments. \item Improved behaviour. \end{itemize} \item \texttt{update.ppm}: For the case \texttt{update(model, X)} where \texttt{X} is a point pattern, if the window of \texttt{X} is different from the original window, then the model is re-fitted from scratch (i.e. \texttt{use.internal=FALSE}). %%V \item \texttt{valid.ppm} This is now a method for the generic function \texttt{valid}. \item \texttt{vcov.mppm}: Now handles models with Gibbs interactions. \item \texttt{vcov.ppm}: Performance slightly improved, for Gibbs models. %%W %%X %%Y %%Z \item \verb![<-.im! Accepts an array for \texttt{value}. \item \verb![.im! The subset index \texttt{i} can now be a linear network. Then the result of \verb!x[i, drop=FALSE]! is a pixel image of class \texttt{linim}. \item \verb![.layered!: \begin{itemize} \item Subset index \texttt{i} can now be an \texttt{owin} object. \item Additional arguments \verb!...! are now passed to other methods. \end{itemize} \item \verb![.leverage.ppm!: New argument \texttt{update}. \item \verb![.linnet!: \begin{itemize} \item New argument \texttt{snip} determines what to do with segments of the network that cross the boundary of the window. Default behaviour has changed. \item More robust against artefacts when the subset index is a pixel mask. \end{itemize} \item \verb![.linim!: More robust against artefacts. \item \verb![.lpp!: New argument \texttt{snip} determines what to do with segments of the network that cross the boundary of the window. Default behaviour has changed. \item \verb![.ppx!: The subset index \texttt{i} may now be a spatial domain of class \texttt{boxx} or \texttt{box3}. \item \verb![.ppp! New argument \texttt{clip} determines whether the window is clipped. \item \verb![.ppp! The previously-unused argument \texttt{drop} now determines whether to remove unused levels of a factor. \item \verb![.pp3!, \verb![.lpp!, \verb![.ppx!, \texttt{subset.ppp, subset.pp3, subset.lpp, subset.ppx}: These methods now have an argument \texttt{drop} which determines whether to remove unused levels of a factor. \item \verb![.psp!: New argument \texttt{fragments} specifies whether to keep fragments of line segments that are cut by the new window, or only to retain segments that lie entirely inside the window. \item \verb![.solist!: Subset index \texttt{i} can now be an \texttt{owin} object. \end{itemize} \section{Serious Bugs Fixed} <>= nbugs <- nrow(news(grepl("^BUG", Category), package="spatstat")) nbugssince <- nrow(news(Version > "1.42-0" & grepl("^BUG", Category), package="spatstat")) @ Hundreds of bugs have been detected and fixed in \spst. Bugs that may have affected the user are listed in the package \texttt{NEWS} file. To read all these bug reports, type <>= news(grepl("^BUG", Category), package="spatstat") @ which currently produces a list of \Sexpr{nbugs} bugs, of which \Sexpr{nbugssince} were detected after publication of the book \cite{baddrubaturn15}. Following is a list of the {\bf most serious bugs} only, in order of potential impact. \newcommand\bugger[4]{% \\ {} % {\small (Bug introduced in \texttt{spatstat {#1}}, {#2}; % fixed in \texttt{spatstat {#3}}, {#4})}% } \begin{itemize} %% LEVEL 1: always completely wrong, broad impact \item \texttt{nncross.ppp}: Results were completely incorrect if $k > 1$. \bugger{1.31-2}{april 2013}{1.35-0}{december 2013} \item \texttt{nncross.pp3}: Results were completely incorrect in some cases. \bugger{1.32-0}{august 2013}{1.34-0}{october 2013} \item \texttt{cdf.test.ppm}: Calculation of $p$-values was incorrect for Gibbs models: $1-p$ was computed instead of $p$. \bugger{1.40-0}{december 2014}{1.45-2}{may 2016} \item \texttt{Smooth.ppp}: Results of \verb!Smooth(X, at="points", leaveoneout=FALSE)! were completely incorrect. \bugger{1.20-5}{august 2010}{1.46-0}{july 2016} \item \texttt{rmh}: \begin{itemize} \item Simulation was completely incorrect in the case of a multitype point process with an interaction that does not depend on the marks, such as \verb!ppm(betacells, ~marks, Strauss(60))! due to a coding error in the \texttt{C} interface. \bugger{1.22-3}{march 2010}{1.22-3}{june 2011} \item Simulation of the Area-Interaction model was completely incorrect. \bugger{1.23-6}{october 2011}{1.31-0}{january 2013} \item Simulation of the Geyer saturation process was completely incorrect. \bugger{1.31-0}{january 2013}{1.31-1}{march 2013} \item Simulation of the Strauss-Hard Core process was partially incorrect, giving point patterns with a slightly lower intensity. \bugger{1.31-0}{january 2013}{1.37-0}{may 2014} \item The result of simulating a model with a hard core did not necessarily respect the hard core constraint, and simulation of a model with strong inhibition did not necessarily converge. This only happened if the first order trend was large, the starting state (\texttt{n.start} or \texttt{x.start}) was not given, and the number of iterations \texttt{nrep} was not very large. It occurred because of a poor choice for the default starting state. {\small (Bug was present since about 2010. Fixed in \texttt{spatstat 1.40-0}, december 2014)} \item Simulation was incorrect in the case of an inhomogeneous multitype model with \texttt{fixall=TRUE} (i.e.\ with a fixed number of points of each type) if the model was segregated (i.e.\ if different types of points had different first order trend). The effect of the error was that all types of points had the same first order trend. {\small (Bug was present since about 2010. Fixed in \texttt{spatstat 1.43-0}, september 2015)} \item Simulation of the Geyer saturation process was incorrectly initialised, so that the results of a short run (i.e. small value of \texttt{nrep}) were incorrect, while long runs were correct. \bugger{1.17-0}{october 2009}{1.31-1}{march 2013} \end{itemize} \item \texttt{rVarGamma}: Simulations were incorrect; they were generated using the wrong value of the parameter \texttt{nu.ker}. \bugger{1.25-0}{december 2011}{1.35-0}{december 2013} \item \texttt{rCauchy}: Simulations were incorrect; they were generated using the wrong value of the parameter \texttt{omega}. \bugger{1.25-0}{december 2011}{1.25-2}{january 2012} \item \texttt{lppm}: For multitype patterns, the fitted model was completely incorrect due to an error in constructing the quadrature scheme. \bugger{1.23-0}{july 2011}{1.30-0}{december 2012} \item \verb![.lpp!: The local coordinate \texttt{seg} was completely incorrect, when \texttt{i} was a window. \bugger{1.31-2}{april 2013}{1.45-0}{march 2016} \item \texttt{lohboot}: Implementation was completely incorrect. \bugger{1.26-1}{april 2012}{1.53-2}{october 2017} \item \texttt{leverage.ppm}, \texttt{influence.ppm}, \texttt{dfbetas.ppm}: Results were incorrect for non-Poisson processes due to a mathematical error. \bugger{1.25-0}{december 2011}{1.51-0}{may 2017} %% LEVEL 2: often completely wrong, moderate impact \item \texttt{bw.pcf}: Results were totally incorrect due to a typo. \bugger{1.51-0}{may 2017}{1.52-0}{august 2017} \item \texttt{predict.rho2hat}: Results were incorrect for a \texttt{rho2hat} object computed from a point pattern. \bugger{1.42-0}{may 2015}{1.52-0}{august 2017} \item \texttt{envelope.ppm}: If the model was an inhomogeneous Poisson process, the resulting envelope object was incorrect (the simulations were correct, but the envelopes were calculated assuming the model was CSR). \bugger{1.23-5}{september 2011}{1.23-6}{october 2011} \item \texttt{linearK}, \texttt{linearpcf}, \texttt{linearKinhom}, \texttt{linearpcfinhom} and multitype versions: These functions were sometimes greatly underestimated when the network had segments shorter than 10 coordinate units. \bugger{1.44-0}{december 2015}{1.46-2}{july 2016} \item \texttt{nncross}, \texttt{distfun}, \texttt{AreaInter}: Results of \texttt{nncross} were possibly incorrect when \code{X} and \code{Y} did not have the same window. This bug affected values of \texttt{distfun} and may also have affected ppm objects with interaction \texttt{AreaInter}. \bugger{1.9-4}{june 2006}{1.25-2}{january 2012} \item \texttt{update.kppm}: If the call to \texttt{update} did not include a formula argument or a point pattern argument, then all arguments were ignored. Example: \texttt{update(fit, improve.type="quasi")} was identical to \texttt{fit}. \bugger{1.42-2}{june 2015}{1.45-0}{march 2016} \item \texttt{markcorrint}: Results were completely incorrect. \bugger{1.39-0}{october 2014}{1.40-0}{december 2014} %% LEVEL 3: substantially incorrect, moderate impact \item \texttt{density.ppp}: Values of \verb!density(X, at="points")! and \verb!Smooth(X, at="points")! were sometimes incorrect, due to omission of the contribution from the data point with the smallest $x$ coordinate. \bugger{1.26-0}{april 2012}{1.46-1}{july 2016} \item \texttt{update.ppm}: If the argument \texttt{Q} was given, the results were usually incorrect, or an error was generated. \bugger{1.38-0}{august 2014}{1.38-1}{august 2014} \item \texttt{subfits}: The interaction coefficients of the submodels were incorrect for Gibbs models with a multitype interaction (\texttt{MultiStrauss}, etc). \bugger{1.35-0}{december 2013}{1.45-2}{may 2016} \item \texttt{F3est}: Estimates of $F(r)$ for the largest value of $r$ were wildly incorrect. {\small (Bug was present since about 2010. Fixed in \texttt{spatstat 1.48-0}, december 2016)} \item \texttt{kppm}, \texttt{matclust.estpcf}, \texttt{pcfmodel}: The pair correlation function of the M\'atern Cluster Process was evaluated incorrectly at distances close to 0. This could have affected the fitted parameters in \texttt{matclust.estpcf()} or \texttt{kppm(clusters="MatClust")}. \bugger{1.20-2}{august 2010}{1.33-0}{september 2013} \item \texttt{ppm}: Results were incorrect for the Geyer saturation model with a non-integer value of the saturation parameter \texttt{sat}. \bugger{1.20-0}{july 2010}{1.31-2}{april 2013} \item \texttt{clip.infline}: Results were incorrect unless the midpoint of the window was the coordinate origin. \bugger{1.15-1}{april 2009}{1.48-0}{december 2016} \item \texttt{intensity.ppm}: Result was incorrect for Gibbs models if the model was exactly equivalent to a Poisson process (i.e. if all interaction coefficients were exactly zero). \bugger{1.28-1}{june 2012}{1.47-0}{october 2016} \item \texttt{funxy}: Did not correctly handle one-line functions. The resulting objects evaluated the wrong function in some cases. \bugger{1.45-0}{march 2016}{1.46-0}{july 2016} %% LEVEL 4: partially incorrect \item \texttt{density.ppp}: If the smoothing bandwidth \texttt{sigma} was very small (e.g.\ less than the width of a pixel), results were inaccurate if the default resolution was used, and completely incorrect if a user-specified resolution was given. \bugger{1.26-0}{april 2012}{1.52-0}{august 2017} \item \texttt{selfcrossing.psp}: $y$ coordinate values were incorrect. \bugger{1.23-2}{august 2011}{1.25-3}{february 2012} \item \texttt{Geyer}: For point process models with the \texttt{Geyer} interaction, \texttt{vcov.ppm} and \texttt{suffstat} sometimes gave incorrect answers. \bugger{1.27-0}{may 2012}{1.30-0}{december 2012} \item \texttt{leverage.ppm}, \texttt{influence.ppm}, \texttt{dfbetas.ppm}: Calculations were incorrect for a Geyer model fitted using an edge correction other than \texttt{"border"} or \texttt{"none"}. \bugger{1.25-0}{december 2011}{1.51-0}{may 2017} \item \texttt{vcov.ppm}, \texttt{suffstat}: These functions sometimes gave incorrect values for marked point process models. \bugger{1.27-0}{may 2012}{1.29-0}{october 2012} \item \texttt{diagnose.ppm}: When applied to a model obtained from \texttt{subfits()}, in the default case (\texttt{oldstyle=FALSE}) the variance calculations were incorrect. Consequently the dotted lines representing significance bands were incorrect. An error or warning about negative variances occurred sometimes. However, calculations with \texttt{oldstyle=TRUE} were correct. The default has now been changed to \texttt{oldstyle=TRUE} for such models. \bugger{1.35-0}{december 2013}{1.45-0}{march 2016} \item \texttt{Smooth.ppp}: Results for \verb!at="points"! were garbled, for some values of \texttt{sigma}, if \texttt{X} had more than one column of marks. \bugger{1.38-0}{october 2014}{1.46-0}{july 2016} \item \texttt{linearK}, \texttt{linearKinhom}: If any data points were located exactly at a vertex of the linear network, the weights for Ang's correction were incorrect, due to numerical error. This sometimes produced infinite or NA values of the linear $K$ function. \bugger{1.23-0}{july 2011}{1.27-0}{may 2012} \item \texttt{Kinhom}, \texttt{Linhom}: the results were not renormalised (even if \texttt{renormalise=TRUE}) in some cases. \bugger{1.21-0}{december 2010}{1.37-0}{may 2014} \item \texttt{Kinhom}, \texttt{Linhom}: Ignored argument \texttt{reciplambda2} in some cases. \bugger{1.39-0}{october 2014}{1.40-0}{december 2014} \item \texttt{Kinhom}, \texttt{Linhom}: Calculations were incorrect if \texttt{lambda} was a fitted point process model. \bugger{1.38-0}{august 2014}{1.38-1}{august 2014} \item \texttt{integral.linim}, \texttt{integral.linfun}: \begin{itemize} \item results were inaccurate because of a bias in the distribution of sample points. \bugger{1.41-0}{february 2015}{1.47-0}{october 2016} \item results were inaccurate if many of the segment lengths were shorter than the width of a pixel. \bugger{1.41-0}{february 2015}{1.48-0}{december 2016} \end{itemize} \item \texttt{predict.ppm}: Calculation of the conditional intensity omitted the edge correction if \texttt{correction='translate'} or \texttt{correction='periodic'}. \bugger{1.17-0}{october 2009}{1.31-3}{may 2013} \item \texttt{varblock}: Calculations were incorrect if more than one column of edge corrections was computed. \bugger{1.21-1}{november 2010}{1.39-0}{october 2014} \item \texttt{scan.test} Results were sometimes incorrect due to numerical instability (a 'Gibbs phenomenon'). \bugger{1.24-1}{october 2011}{1.26-1}{april 2012} \item \texttt{relrisk}: When \verb!at="pixels"!, a small fraction of pixel values were sometimes wildly inaccurate, due to numerical errors. This affected the range of values in the result, and therefore the appearance of plots. {\small (Bug fixed in \texttt{spatstat 1.40-0}, december 2014)} \item \texttt{predict.slrm}: Results of \texttt{predict(object, newdata)} were incorrect if the spatial domain of \texttt{newdata} was larger than the original domain. \bugger{1.21-0}{november 2010}{1.25-3}{february 2012} \item \texttt{Lest}: The variance approximations (Lotwick-Silverman and Ripley) obtained with \texttt{var.approx=TRUE} were incorrect for \texttt{Lest} (although they were correct for \texttt{Kest}) due to a coding error. \bugger{1.24-1}{october 2011}{1.24-2}{november 2011} \item \texttt{bw.diggle}: Bandwidth was too large by a factor of 2. \bugger{1.23-4}{september 2011}{1.23-5}{september 2011} \item pair correlation functions (\texttt{pcf.ppp}, \texttt{pcfdot}, \texttt{pcfcross} etc:) The result had a negative bias at the maximum $r$ value, because contributions to the pcf estimate from interpoint distances greater than \texttt{max(r)} were mistakenly omitted. {\small (Bugs fixed in \texttt{spatstat 1.35-0}, december 2013)} \item \texttt{Kest}, \texttt{Lest}: Gave incorrect values in very large datasets, due to numerical overflow. `Very large' typically means about 1 million points in a random pattern, or 100,000 points in a tightly clustered pattern. [Overflow cannot occur unless there are at least 46,341 points.] \item \texttt{bw.relrisk}: Implementation of \texttt{method="weightedleastsquares"} was incorrect and was equivalent to \texttt{method="leastsquares"}. \bugger{1.21-0}{november 2010}{1.23-4}{september 2011} \item \texttt{triangulate.owin}: Results were incorrect in some special cases. \bugger{1.42-2}{june 2015}{1.44-0}{december 2015} \item \texttt{crosspairs}: If \texttt{X} and \texttt{Y} were identical point patterns, the result was not necessarily symmetric (on some machines) due to numerical artifacts. \bugger{1.35-0}{december 2013}{1.44-0}{december 2015} \item \texttt{bdist.tiles}: Values were incorrect in some cases due to numerical error. {\small (Bug fixed in \texttt{spatstat 1.29-0}, october 2012)} \item \texttt{Kest.fft}: Result was incorrectly normalised. \bugger{1.21-2}{january 2011}{1.44-0}{december 2015} \item \texttt{crossdist.ppp}: Ignored argument \texttt{squared} if \texttt{periodic=FALSE}. {\small (Bug fixed in \texttt{spatstat 1.38-0}, july 2014)} \item polygon geometry: The point-in-polygon test gave the wrong answer in some boundary cases. {\small (Bug fixed in \texttt{spatstat 1.23-2}, august 2011)} \item \texttt{MultiStraussHard}: If a fitted model with \texttt{MultiStraussHard} interaction was invalid, \texttt{project.ppm} sometimes yielded a model that was still invalid. {\small (Bug fixed in \texttt{spatstat 1.42-0}, may 2015)} \item \texttt{pool.envelope}: Did not always respect the value of \texttt{use.theory}. \bugger{1.23-5}{september 2011}{1.43-0}{september 2015} \item \texttt{nncross.lpp}, \texttt{nnwhich.lpp}, \texttt{distfun.lpp}: Sometimes caused a segmentation fault. \bugger{1.44-0}{december 2015}{1.44-1}{december 2015} \item \texttt{anova.ppm}: If a single \texttt{object} was given, and it was a Gibbs model, then \texttt{adjust} was effectively set to \texttt{FALSE}. \bugger{1.39-0}{october 2014}{1.44-1}{december 2015} \end{itemize} \begin{thebibliography}{1} \bibitem{badd10wshop} A.~Baddeley. \newblock Analysing spatial point patterns in {{R}}. \newblock Technical report, CSIRO, 2010. \newblock Version 4. \newblock URL \texttt{https://research.csiro.au/software/r-workshop-notes/} \bibitem{baddrubaturn15} A. Baddeley, E. Rubak, and R. Turner. \newblock {\em Spatial Point Patterns: Methodology and Applications with {{R}}}. \newblock Chapman \& Hall/CRC Press, 2015. \end{thebibliography} \end{document} spatstat/inst/doc/replicated.R0000644000176200001440000004062613166361206016112 0ustar liggesusers### R code from vignette source 'replicated.Rnw' ################################################### ### code chunk number 1: replicated.Rnw:29-30 ################################################### options(SweaveHooks=list(fig=function() par(mar=c(1,1,1,1)))) ################################################### ### code chunk number 2: replicated.Rnw:35-42 ################################################### library(spatstat) spatstat.options(image.colfun=function(n) { grey(seq(0,1,length=n)) }) sdate <- read.dcf(file = system.file("DESCRIPTION", package = "spatstat"), fields = "Date") sversion <- read.dcf(file = system.file("DESCRIPTION", package = "spatstat"), fields = "Version") options(useFancyQuotes=FALSE) ################################################### ### code chunk number 3: replicated.Rnw:180-181 ################################################### waterstriders ################################################### ### code chunk number 4: replicated.Rnw:199-200 ################################################### getOption("SweaveHooks")[["fig"]]() plot(waterstriders, main="") ################################################### ### code chunk number 5: replicated.Rnw:207-208 ################################################### summary(waterstriders) ################################################### ### code chunk number 6: replicated.Rnw:216-217 ################################################### X <- listof(rpoispp(100), rpoispp(100), rpoispp(100)) ################################################### ### code chunk number 7: replicated.Rnw:222-224 ################################################### getOption("SweaveHooks")[["fig"]]() plot(X) X ################################################### ### code chunk number 8: replicated.Rnw:253-254 (eval = FALSE) ################################################### ## hyperframe(...) ################################################### ### code chunk number 9: replicated.Rnw:279-281 ################################################### H <- hyperframe(X=1:3, Y=list(sin,cos,tan)) H ################################################### ### code chunk number 10: replicated.Rnw:289-294 ################################################### G <- hyperframe(X=1:3, Y=letters[1:3], Z=factor(letters[1:3]), W=list(rpoispp(100),rpoispp(100), rpoispp(100)), U=42, V=rpoispp(100), stringsAsFactors=FALSE) G ################################################### ### code chunk number 11: replicated.Rnw:323-324 ################################################### simba ################################################### ### code chunk number 12: replicated.Rnw:337-338 ################################################### pyramidal ################################################### ### code chunk number 13: replicated.Rnw:344-345 ################################################### ws <- hyperframe(Striders=waterstriders) ################################################### ### code chunk number 14: replicated.Rnw:352-354 ################################################### H$X H$Y ################################################### ### code chunk number 15: replicated.Rnw:364-366 ################################################### H$U <- letters[1:3] H ################################################### ### code chunk number 16: replicated.Rnw:371-375 ################################################### G <- hyperframe() G$X <- waterstriders G$Y <- 1:3 G ################################################### ### code chunk number 17: replicated.Rnw:383-387 ################################################### H[,1] H[2,] H[2:3, ] H[1,1] ################################################### ### code chunk number 18: replicated.Rnw:393-396 ################################################### H[,1,drop=TRUE] H[1,1,drop=TRUE] H[1,2,drop=TRUE] ################################################### ### code chunk number 19: replicated.Rnw:409-410 (eval = FALSE) ################################################### ## plot.listof(x, ..., main, arrange = TRUE, nrows = NULL, ncols = NULL) ################################################### ### code chunk number 20: replicated.Rnw:425-426 ################################################### getOption("SweaveHooks")[["fig"]]() plot(waterstriders, pch=16, nrows=1) ################################################### ### code chunk number 21: replicated.Rnw:441-442 ################################################### getOption("SweaveHooks")[["fig"]]() plot(simba) ################################################### ### code chunk number 22: replicated.Rnw:454-456 ################################################### getOption("SweaveHooks")[["fig"]]() H <- hyperframe(X=1:3, Y=list(sin,cos,tan)) plot(H$Y) ################################################### ### code chunk number 23: replicated.Rnw:468-469 (eval = FALSE) ################################################### ## plot(h, e) ################################################### ### code chunk number 24: replicated.Rnw:478-479 ################################################### getOption("SweaveHooks")[["fig"]]() plot(demohyper, quote({ plot(Image, main=""); plot(Points, add=TRUE) })) ################################################### ### code chunk number 25: replicated.Rnw:491-493 ################################################### getOption("SweaveHooks")[["fig"]]() H <- hyperframe(Bugs=waterstriders) plot(H, quote(plot(Kest(Bugs))), marsize=1) ################################################### ### code chunk number 26: replicated.Rnw:506-508 ################################################### df <- data.frame(A=1:10, B=10:1) with(df, A-B) ################################################### ### code chunk number 27: replicated.Rnw:521-522 (eval = FALSE) ################################################### ## with(h,e) ################################################### ### code chunk number 28: replicated.Rnw:532-535 ################################################### H <- hyperframe(Bugs=waterstriders) with(H, npoints(Bugs)) with(H, distmap(Bugs)) ################################################### ### code chunk number 29: replicated.Rnw:558-559 ################################################### with(simba, npoints(Points)) ################################################### ### code chunk number 30: replicated.Rnw:566-568 ################################################### H <- hyperframe(Bugs=waterstriders) K <- with(H, Kest(Bugs)) ################################################### ### code chunk number 31: replicated.Rnw:576-577 ################################################### getOption("SweaveHooks")[["fig"]]() plot(K) ################################################### ### code chunk number 32: replicated.Rnw:582-584 ################################################### H <- hyperframe(Bugs=waterstriders) with(H, nndist(Bugs)) ################################################### ### code chunk number 33: replicated.Rnw:590-591 ################################################### with(H, min(nndist(Bugs))) ################################################### ### code chunk number 34: replicated.Rnw:603-604 ################################################### simba$Dist <- with(simba, distmap(Points)) ################################################### ### code chunk number 35: replicated.Rnw:617-621 ################################################### getOption("SweaveHooks")[["fig"]]() lambda <- rexp(6, rate=1/50) H <- hyperframe(lambda=lambda) H$Points <- with(H, rpoispp(lambda)) plot(H, quote(plot(Points, main=paste("lambda=", signif(lambda, 4))))) ################################################### ### code chunk number 36: replicated.Rnw:627-628 ################################################### H$X <- with(H, rpoispp(50)) ################################################### ### code chunk number 37: replicated.Rnw:657-658 ################################################### getOption("SweaveHooks")[["fig"]]() plot(simba, quote(plot(density(Points), main="")), nrows=2) ################################################### ### code chunk number 38: replicated.Rnw:677-679 ################################################### getOption("SweaveHooks")[["fig"]]() rhos <- with(demohyper, rhohat(Points, Image)) plot(rhos) ################################################### ### code chunk number 39: replicated.Rnw:696-697 (eval = FALSE) ################################################### ## mppm(formula, data, interaction, ...) ################################################### ### code chunk number 40: replicated.Rnw:707-708 (eval = FALSE) ################################################### ## mppm(Points ~ group, simba, Poisson()) ################################################### ### code chunk number 41: replicated.Rnw:741-742 ################################################### mppm(Points ~ 1, simba) ################################################### ### code chunk number 42: replicated.Rnw:749-750 ################################################### mppm(Points ~ group, simba) ################################################### ### code chunk number 43: replicated.Rnw:756-757 ################################################### mppm(Points ~ id, simba) ################################################### ### code chunk number 44: replicated.Rnw:767-768 ################################################### mppm(Points ~ Image, data=demohyper) ################################################### ### code chunk number 45: replicated.Rnw:786-787 (eval = FALSE) ################################################### ## mppm(Points ~ offset(log(Image)), data=demohyper) ################################################### ### code chunk number 46: replicated.Rnw:799-800 (eval = FALSE) ################################################### ## mppm(Points ~ log(Image), data=demop) ################################################### ### code chunk number 47: replicated.Rnw:817-818 (eval = FALSE) ################################################### ## mppm(formula, data, interaction, ..., iformula=NULL) ################################################### ### code chunk number 48: replicated.Rnw:868-869 ################################################### radii <- with(simba, mean(nndist(Points))) ################################################### ### code chunk number 49: replicated.Rnw:876-878 ################################################### Rad <- hyperframe(R=radii) Str <- with(Rad, Strauss(R)) ################################################### ### code chunk number 50: replicated.Rnw:883-885 ################################################### Int <- hyperframe(str=Str) mppm(Points ~ 1, simba, interaction=Int) ################################################### ### code chunk number 51: replicated.Rnw:912-915 ################################################### h <- hyperframe(Y=waterstriders) g <- hyperframe(po=Poisson(), str4 = Strauss(4), str7= Strauss(7)) mppm(Y ~ 1, data=h, interaction=g, iformula=~str4) ################################################### ### code chunk number 52: replicated.Rnw:926-927 ################################################### fit <- mppm(Points ~ 1, simba, Strauss(0.07), iformula = ~Interaction*group) ################################################### ### code chunk number 53: replicated.Rnw:945-946 ################################################### fit ################################################### ### code chunk number 54: replicated.Rnw:949-951 ################################################### co <- coef(fit) si <- function(x) { signif(x, 4) } ################################################### ### code chunk number 55: replicated.Rnw:962-963 ################################################### coef(fit) ################################################### ### code chunk number 56: replicated.Rnw:1020-1021 (eval = FALSE) ################################################### ## interaction=hyperframe(po=Poisson(), str=Strauss(0.07)) ################################################### ### code chunk number 57: replicated.Rnw:1026-1027 (eval = FALSE) ################################################### ## iformula=~ifelse(group=="control", po, str) ################################################### ### code chunk number 58: replicated.Rnw:1037-1038 (eval = FALSE) ################################################### ## iformula=~I((group=="control")*po) + I((group=="treatment") * str) ################################################### ### code chunk number 59: replicated.Rnw:1048-1053 ################################################### g <- hyperframe(po=Poisson(), str=Strauss(0.07)) fit2 <- mppm(Points ~ 1, simba, g, iformula=~I((group=="control")*po) + I((group=="treatment") * str)) fit2 ################################################### ### code chunk number 60: replicated.Rnw:1176-1178 ################################################### H <- hyperframe(P=waterstriders) mppm(P ~ 1, H, random=~1|id) ################################################### ### code chunk number 61: replicated.Rnw:1185-1186 (eval = FALSE) ################################################### ## mppm(Neurons ~ AstroIm, random=~AstroIm|WellNumber) ################################################### ### code chunk number 62: replicated.Rnw:1209-1212 ################################################### H <- hyperframe(W=waterstriders) fit <- mppm(W ~ 1, H) subfits(fit) ################################################### ### code chunk number 63: replicated.Rnw:1233-1234 (eval = FALSE) ################################################### ## subfits <- subfits.new ################################################### ### code chunk number 64: replicated.Rnw:1246-1248 ################################################### H <- hyperframe(W=waterstriders) with(H, ppm(W)) ################################################### ### code chunk number 65: replicated.Rnw:1271-1273 ################################################### fit <- mppm(P ~ x, hyperframe(P=waterstriders)) res <- residuals(fit) ################################################### ### code chunk number 66: replicated.Rnw:1283-1284 ################################################### getOption("SweaveHooks")[["fig"]]() plot(res) ################################################### ### code chunk number 67: replicated.Rnw:1289-1291 ################################################### getOption("SweaveHooks")[["fig"]]() smor <- with(hyperframe(res=res), Smooth(res, sigma=4)) plot(smor) ################################################### ### code chunk number 68: replicated.Rnw:1303-1306 ################################################### fit <- mppm(P ~ x, hyperframe(P=waterstriders)) res <- residuals(fit) totres <- sapply(res, integral.msr) ################################################### ### code chunk number 69: replicated.Rnw:1312-1319 ################################################### getOption("SweaveHooks")[["fig"]]() fit <- mppm(Points~Image, data=demohyper) resids <- residuals(fit, type="Pearson") totres <- sapply(resids, integral.msr) areas <- with(demohyper, area.owin(as.owin(Points))) df <- as.data.frame(demohyper[, "Group"]) df$resids <- totres/areas plot(resids~Group, df) ################################################### ### code chunk number 70: replicated.Rnw:1340-1343 ################################################### getOption("SweaveHooks")[["fig"]]() fit <- mppm(P ~ 1, hyperframe(P=waterstriders)) sub <- hyperframe(Model=subfits(fit)) plot(sub, quote(diagnose.ppm(Model))) ################################################### ### code chunk number 71: replicated.Rnw:1356-1364 ################################################### H <- hyperframe(P = waterstriders) fitall <- mppm(P ~ 1, H) together <- subfits(fitall) separate <- with(H, ppm(P)) Fits <- hyperframe(Together=together, Separate=separate) dr <- with(Fits, unlist(coef(Separate)) - unlist(coef(Together))) dr exp(dr) ################################################### ### code chunk number 72: replicated.Rnw:1381-1390 ################################################### H <- hyperframe(X=waterstriders) # Poisson with constant intensity for all patterns fit1 <- mppm(X~1, H) quadrat.test(fit1, nx=2) # uniform Poisson with different intensity for each pattern fit2 <- mppm(X ~ id, H) quadrat.test(fit2, nx=2) ################################################### ### code chunk number 73: replicated.Rnw:1419-1420 (eval = FALSE) ################################################### ## kstest.mppm(model, covariate) spatstat/inst/doc/packagesizes.txt0000644000176200001440000002073313166361072017063 0ustar liggesusersdate version nhelpfiles nobjects ndatasets Rlines srclines "2001-08-08" "1.0-1" 109 196 0 706 1370 "2002-05-17" "1.1-3" 116 220 0 1140 1370 "2002-08-06" "1.2-1" 129 237 0 1786 1474 "2003-03-12" "1.3-1" 134 242 0 1955 1474 "2003-05-05" "1.3-2" 148 257 0 2024 1474 "2003-07-28" "1.3-3" 148 266 0 2034 1474 "2003-11-12" "1.3-4" 148 261 0 2033 1474 "2004-01-27" "1.4-3" 166 296 0 3641 1437 "2004-02-11" "1.4-4" 166 296 0 3641 1437 "2004-03-25" "1.4-5" 166 296 0 3646 1439 "2004-05-23" "1.4-6" 166 296 0 3689 1514 "2004-06-17" "1.5-1" 166 300 0 4255 1514 "2004-09-01" "1.5-3" 171 311 0 4636 1514 "2004-09-24" "1.5-4" 174 315 0 4642 1514 "2004-10-21" "1.5-5" 180 319 0 4686 1514 "2004-11-15" "1.5-6" 180 319 0 4686 1512 "2004-11-27" "1.5-7" 180 319 0 4687 1512 "2005-01-25" "1.5-8" 182 320 0 4770 1512 "2005-01-27" "1.5-9" 182 321 0 4805 1512 "2005-02-16" "1.5-10" 182 321 0 4805 1512 "2005-03-14" "1.6-1" 188 345 0 5597 1517 "2005-03-30" "1.6-2" 188 345 0 5600 1450 "2005-04-08" "1.6-3" 189 352 0 5715 1474 "2005-04-14" "1.6-4" 194 358 0 6056 1544 "2005-04-21" "1.6-5" 194 358 0 6056 1544 "2005-05-09" "1.6-6" 195 373 0 6385 1592 "2005-05-25" "1.6-7" 201 392 0 7727 1644 "2005-06-07" "1.6-8" 206 400 0 8003 1644 "2005-07-01" "1.6-9" 207 402 0 8025 1644 "2005-07-26" "1.7-11" 212 406 0 8213 1643 "2005-08-10" "1.7-12" 213 407 0 8279 1643 "2005-10-27" "1.7-13" 215 410 0 8531 1643 "2005-11-24" "1.8-1" 215 418 0 8539 1643 "2005-12-05" "1.8-2" 229 440 0 9031 1643 "2005-12-21" "1.8-3" 237 446 0 9175 1643 "2006-01-09" "1.8-4" 237 446 0 9207 1643 "2006-01-18" "1.8-5" 237 446 0 9225 1643 "2006-02-23" "1.8-6" 241 449 0 9315 1643 "2006-03-02" "1.8-7" 247 457 0 9627 1643 "2006-03-30" "1.8-8" 248 459 0 9662 1643 "2006-04-18" "1.8-9" 259 446 21 10144 1832 "2006-05-03" "1.9-0" 259 447 21 10396 1817 "2006-05-26" "1.9-1" 266 466 21 10861 3069 "2006-06-05" "1.9-2" 268 473 21 11409 3487 "2006-06-20" "1.9-3" 268 479 21 11941 4140 "2006-08-03" "1.9-4" 273 490 22 12435 5619 "2006-08-22" "1.9-5" 274 490 22 12493 5560 "2006-09-27" "1.9-6" 277 494 22 12573 5601 "2006-10-19" "1.10-1" 283 529 22 13124 5601 "2006-10-19" "1.10-1" 283 529 22 13124 5171 "2006-11-06" "1.10-2" 283 529 22 13194 5601 "2006-11-20" "1.10-3" 287 540 22 13425 5684 "2007-01-08" "1.10-4" 291 554 22 13591 5684 "2007-01-08" "1.10-4" 291 554 22 13591 5684 "2007-01-12" "1.11-0" 291 562 22 13728 5684 "2007-02-01" "1.11-1" 294 564 23 13614 5684 "2007-03-10" "1.11-2" 301 574 24 13860 5684 "2007-03-16" "1.11-3" 305 580 24 14106 5819 "2007-03-19" "1.11-4" 307 589 24 14316 5868 "2007-05-08" "1.11-5" 307 591 24 14373 5940 "2007-05-18" "1.11-6" 308 592 24 14390 5940 "2007-06-09" "1.11-7" 311 595 24 14506 5940 "2007-07-26" "1.11-8" 312 596 24 14552 6055 "2007-08-20" "1.12-0" 319 619 25 15246 6055 "2007-09-22" "1.12-1" 319 619 25 15250 6055 "2007-10-26" "1.12-2" 322 623 25 15684 6188 "2007-11-02" "1.12-3" 322 626 25 15767 6188 "2007-12-18" "1.12-4" 322 626 25 15814 6188 "2008-01-07" "1.12-5" 322 630 25 15891 6238 "2008-02-04" "1.12-6" 328 638 25 16334 6446 "2008-02-26" "1.12-8" 328 639 25 16405 6718 "2008-03-18" "1.12-9" 331 644 25 16606 6718 "2008-04-02" "1.12-10" 331 644 25 16649 6771 "2008-04-11" "1.13-0" 332 645 25 16753 6771 "2008-04-23" "1.13-1" 333 647 25 16812 6840 "2008-05-14" "1.13-2" 339 654 25 17057 6840 "2008-06-24" "1.13-3" 340 657 25 17182 6840 "2008-07-18" "1.13-4" 348 672 26 17527 6840 "2008-07-22" "1.14-0" 354 681 26 17923 7131 "2008-07-22" "1.14-1" 356 684 26 18052 7131 "2008-09-08" "1.14-2" 360 688 27 18087 7185 "2008-09-26" "1.14-3" 362 693 27 18194 7185 "2008-10-16" "1.14-4" 366 707 27 18427 7185 "2008-10-23" "1.14-5" 368 715 27 18493 7185 "2008-11-07" "1.14-6" 372 726 27 18657 7185 "2008-11-17" "1.14-7" 374 730 27 18671 7185 "2008-12-10" "1.14-8" 377 734 27 18766 7185 "2008-12-16" "1.14-9" 377 734 27 18772 7185 "2009-01-30" "1.14-10" 381 741 27 18949 7186 "2009-03-02" "1.15-0" 384 750 27 19212 7362 "2009-03-31" "1.15-1" 386 752 28 19292 7439 "2009-04-14" "1.15-2" 396 772 28 19880 7436 "2009-05-13" "1.15-3" 398 777 29 20141 7524 "2009-06-11" "1.15-4" 399 776 29 20176 7524 "2009-07-01" "1.16-0" 405 787 29 20774 7524 "2009-07-27" "1.16-1" 411 814 29 21433 7524 "2009-08-22" "1.16-2" 417 821 29 21863 7937 "2009-08-28" "1.16-3" 419 831 29 22060 7941 "2009-10-22" "1.17-0" 420 833 30 21881 8705 "2009-11-04" "1.17-1" 437 875 30 22900 10614 "2009-11-10" "1.17-2" 439 880 30 22943 10606 "2009-12-15" "1.17-3" 442 885 30 23193 10606 "2009-12-15" "1.17-4" 445 890 30 23640 10606 "2010-01-06" "1.17-5" 451 906 30 24283 12003 "2010-02-08" "1.17-6" 456 921 30 24795 12003 "2010-03-10" "1.18-0" 459 931 30 25073 12333 "2010-03-19" "1.18-1" 462 945 30 25464 12439 "2010-04-09" "1.18-2" 463 950 30 25631 12475 "2010-04-19" "1.18-3" 464 953 30 25720 12475 "2010-05-02" "1.18-4" 475 980 30 26093 13417 "2010-05-07" "1.18-5" 475 981 30 26117 13417 "2010-05-14" "1.19-0" 476 982 30 26205 13417 "2010-05-22" "1.19-1" 479 984 31 26286 13556 "2010-06-09" "1.19-2" 481 996 31 26653 13667 "2010-06-16" "1.19-3" 483 1003 31 26733 13667 "2010-07-15" "1.20-0" 483 1017 31 26926 14009 "2010-07-26" "1.20-1" 484 1020 31 27107 14263 "2010-08-10" "1.20-2" 489 1028 31 27728 14466 "2010-08-23" "1.20-3" 489 1033 31 27869 14564 "2010-10-21" "1.20-4" 493 1040 31 28237 14805 "2010-10-25" "1.20-5" 494 1043 31 28377 15160 "2010-11-05" "1.21-0" 504 1067 31 41301 15160 "2010-11-11" "1.21-1" 507 1075 31 41714 15554 "2011-01-17" "1.21-3" 515 1103 31 42975 15747 "2011-01-20" "1.21-4" 515 1103 31 42985 15747 "2011-02-10" "1.21-5" 515 1103 31 43037 15747 "2011-04-25" "1.21-6" 517 1107 31 43211 15747 "2011-04-28" "1.22-0" 526 1148 32 44006 15831 "2011-05-19" "1.22-1" 528 1154 32 44235 15820 "2011-06-13" "1.22-2" 537 1188 32 45006 16282 "2011-06-17" "1.22-3" 539 1197 32 45153 16269 "2011-07-07" "1.22-4" 550 1218 33 46696 16269 "2011-07-24" "1.23-0" 562 1244 34 47694 16496 "2011-08-01" "1.23-1" 564 1252 34 48014 16658 "2011-08-11" "1.23-2" 566 1260 34 48313 17035 "2011-08-12" "1.23-3" 566 1260 34 48319 17035 "2011-09-09" "1.23-4" 571 1269 34 48747 17243 "2011-09-23" "1.23-5" 575 1274 34 49128 17141 "2011-10-11" "1.23-6" 579 1286 34 49508 17141 "2011-10-22" "1.24-1" 585 1308 34 50154 17141 "2011-11-11" "1.24-2" 588 1312 34 50604 17839 "2011-12-06" "1.25-0" 602 1334 34 52015 18351 "2011-12-21" "1.25-1" 609 1339 35 52235 19088 "2012-01-19" "1.25-2" 610 1338 35 52774 19120 "2012-02-05" "1.25-3" 613 1345 35 53004 19120 "2012-02-29" "1.25-4" 614 1347 35 53302 19423 "2012-03-14" "1.25-5" 616 1351 35 53720 19506 "2012-04-08" "1.26-0" 616 1356 35 53816 19169 "2012-04-19" "1.26-1" 617 1358 35 54498 19261 "2012-05-16" "1.27-0" 630 1393 35 55787 19363 "2012-06-11" "1.28-0" 632 1417 35 56384 19363 "2012-08-23" "1.28-2" 640 1438 36 58566 19372 "2012-10-14" "1.29-0" 651 1470 36 59711 19457 "2012-12-23" "1.30-0" 666 1499 41 61344 19806 "2013-01-17" "1.31-0" 668 1507 41 61446 20094 "2013-03-01" "1.31-1" 678 1562 41 63783 20536 "2013-04-25" "1.31-2" 682 1581 41 64501 21117 "2013-05-27" "1.31-3" 685 1600 41 65545 21773 "2013-08-13" "1.32-0" 695 1625 41 67120 22151 "2013-09-05" "1.33-0" 701 1630 43 67397 22218 "2013-10-24" "1.34-0" 720 1666 43 69219 22867 "2013-11-03" "1.34-1" 720 1666 43 69180 23340 "2013-12-12" "1.35-0" 745 1717 47 72110 23491 "2014-02-18" "1.36-0" 757 1753 47 73946 24042 "2014-05-09" "1.37-0" 781 1841 47 77585 24633 "2014-08-15" "1.38-0" 803 1963 48 80709 25191 "2014-08-27" "1.38-1" 803 1965 48 80833 25191 "2014-10-23" "1.39-0" 824 2015 49 82274 25554 "2014-10-24" "1.39-1" 824 2015 49 81990 25554 "2014-12-31" "1.40-0" 839 2071 51 85832 25637 "2015-02-26" "1.41-0" 861 2135 53 88407 25650 "2015-02-27" "1.41-1" 861 2135 53 88407 25650 "2015-05-27" "1.42-0" 888 2222 53 91600 25650 "2015-06-05" "1.42-1" 888 2225 53 91658 25650 "2015-06-28" "1.42-2" 890 2232 53 91985 25650 "2015-10-07" "1.43-0" 939 2342 54 95950 25802 "2015-12-22" "1.44-0" 949 2378 54 97522 27569 "2015-12-29" "1.44-1" 951 2385 54 97745 27569 "2016-03-10" "1.45-0" 961 2456 54 100964 28122 "2016-05-08" "1.45-1" 977 2478 54 101981 28124 "2016-05-09" "1.45-2" 977 2478 54 101981 28124 "2016-07-06" "1.46-0" 981 2490 54 102484 28310 "2016-07-08" "1.46-1" 981 2491 54 102573 28310 "2016-10-12" "1.47-0" 988 2533 54 103848 28679 "2016-12-22" "1.48-0" 1017 2611 54 105733 29466 "2017-02-08" "1.49-0" 1024 2629 54 106522 31029 "2017-02-08" "1.49-0" 1024 2629 54 106522 31029 "2017-03-22" "1.50-0" 1025 2476 54 104021 29413 "2017-05-04" "1.51-0" 1029 2501 54 105229 29430 "2017-08-10" "1.52-0" 1035 2518 54 106162 29416 "2017-08-16" "1.52-1" 1035 2518 54 106170 29416 "2017-09-23" "1.53-0" 984 2525 0 106672 29418 "2017-09-28" "1.53-1" 984 2525 0 106675 29418 "2017-10-08" "1.53-2" 984 2526 0 106797 29418 spatstat/inst/doc/replicated.pdf0000644000176200001440000141160013166361222016453 0ustar liggesusers%PDF-1.5 % 1 0 obj << /Type /ObjStm /Length 4169 /Filter /FlateDecode /N 95 /First 798 >> stream x\[s۶~?oN g:N\b;uNdJrןoPI6-(4@X\vbiJ0KY*3$Yf9eeLD0LK2%QTjtfLrRE!ô_]J,q2&T-:3JL)hJ'T %1u[> M$s2ILlA7b\*4X4"SS 60O W7Y@C  (}3-,Og}":Ͽ,8\~sAfyM=ɿ,@򧟪*wl1)(t(]VTs q3?>O ɳ|X>8ތ{{ {1_-| Y%4{9,nIm~IJH23dDH@0J%)z6,Z|won[ {<7|G|§/Wh%MQ z7sZz$B_y( eI)zɯ@af ?`\^E>:'_an ǛW u7H5"XG tHe`Qm7f):0\\%*_z9_n)Nr]>N^?, i&')'ufuƠt# :n-6\K XZF2lpi>savB<88IbaYoeb2z)G699$U4,jS#( 5Qi*zHR̤OMi!!% CѴR-*'T:L)]-Vj[}~C ~53~yW^|1ʯE~F( BY7FxC!зU3͇_xm/++ᙆ.ii{"~ MkhBWK!rK>8/5:z[OB}oBG+U>47 0p7?yd|p5ŋPߕ#aZ (5XO"UR""˷'e):w:iLH(_sD%Q6oT\Xup c Gi;GoF(0V#F|& \eK=m(zxj맾YSj#յDWxՆW_Fe]x%6S-h,{,tMá>1{lHd0 Dž%!Ng~R&a~R"&t/|.J)Rh nk|ʧtcuk@Ԭb{PKo[VZ][BY8RQJF5i^N[#)<yO)p:?Ȣ.U.*鷏fWL<)1Gv3] wFG@Z@E%tuկ*p%hUXNcV5? A2iaMKՀ$A^Bɇg/hxQ2 6p#:';u5uj|w-sB.1ڴv䰊YEONm3,$E<=/CN{ ˋbS (cBzFM&#.mdnU0 ml-Jcw Nj`w+^#]듏ߢӆ薾Y17e,FBlMHe3^mD3Cʒƭ2mHi:PAүb+XwDm/u-5-x d<*_xv>7=Jo_ z||I/,)l "U`Ҹ,ux+-nG/}p+;3R vS٦ǖNj&AMRݫ *LBV2  Iؚl%᭤cD![`s]E˘α7~y:Elj)[Ne+mȖ Rs,mN͝93Cؘ7h]uk _f#o|;WS|}KDGҋ<+NżŐOj[=VOT9y,[lL5ՋO.<]5i!On]{\M|}+7[w6Z ˜lZe5`W^oc>k:e~|`^MnRdᄈY%+gطԴ 'De׭ۯOh{+1_(>TLd`~ /]}w>at4 m7iw6ymz+DNW(Wڝ~Y5u@/7<3 P/*jhebڽv ZItM7Vf0P?mN>EJ(_xK_#~Y|4K׏| h[޽A-+NĦV vаKCQj&b]O [Xl'/NN_ 5_,T:l"p>J-2_KE,hϱ5GVl}:GQvTckjc>N`Q<ifb]hFm%j eWSRDߠF#?X6@^俸SE<0f($`DFṋG\czElhGvlۯfםlGa#mGoDvDpUrk{13߿-/+:M;0wijm琻ID*q=Y Zq5\k,ۘAuvˊ+j44V'uֵGs~2YZZ\weSb%_xGY~]cGGXC0Zzuv-+jFx[\Z_ݕoɲq}逪ݟ-Q-VϽO_7sP܃uPۿoڛoCx|Y'n$ݻ k+<'k\C0}]ѣ=F_N~= SՈyy$ eWN"zz+gB .|z(T>nu x{Vk_^8CB<Ђp TIrc|Q}"޿+ E endstream endobj 97 0 obj << /Subtype /XML /Type /Metadata /Length 1329 >> stream 2017-10-08T16:32:17+08:00 2017-10-08T16:32:17+08:00 TeX Untitled endstream endobj 98 0 obj << /Type /ObjStm /Length 1984 /Filter /FlateDecode /N 94 /First 829 >> stream xZmo6_M; 4I[4mtM_;,ۯs%KrȶL;xbc| 9`BS2pn oF,sWiArz[Ҙ. ZYΦy:3x4/_}d,ĢdL-xv>J3F|<ӰCc ;EktEoһL@sRlUQT(l~i+DpTUiށ @ ̎VCpۏ=‚:c<&ѻןLGya|@¾!d|]HSdn1tUˣU-#'*(ū"UBZ_^fZkLRpw4f)m)3@[2$bGxz\1x4*2aB >O|>(y9]~΃x$ctS?h]20Ww[)+i]$D]20 -Lzdi/^zxzjti6^Xh>g'!*s!lZw`Cǀ[Ym?88&7շI4nI5/9gѿ| HnǓ*cikxj\s,xdn'77]m2>ϓQ@ 9^\M4Iv=fQ6OyGWstiä8ٝA6B E+ ̎Y=d5 Q *C֍^>R [ovXf {m [Pw=^? U.7Br:UU.fsw8 Aͦ, s6tl܆V$^c <-զayբ{6>WOkC6FGZP;mDqtEaDQ^j!eº~u^VjFQ[8V"R="RmT]#*9ٺoh9ny?L;sŬ^0TTp ۦ6'?Z|:5$>TTԬ)5^U!?*+^kBeժ5g݄6V/Gendstream endobj 193 0 obj << /Filter /FlateDecode /Length 3060 >> stream x[Ms#av7I9'aU(R"h̠gKJdSr@~neFZin~wGTjnV`@^Q-:8"*z7:9ڌp{lFS3xkJ)oy1d-J`!dB v<^;ÃGRl2ixpXOx]WB1?h=_3sH6q8% $mqبWud;"p;E_hb-ww\В>∷"csW<8c)9 )Xasi['sOśʰ(be{vV>lWDc k4:J+M|vOT`p_aHƸTuçŀs=* JjeA<2 Z3&PcjmC/aw5YOc[JdKTۯ'Hny>O8MtXoUu^_  Kоs2TsR69v=We, c0mCPs Mfo '+@ͫvBYs>m1m騗ljMy}}a b>XdCŨa0dP1>7Pd6m 1 xG<*C..ZgS ;X#*`2m4PQ+ nrM-Yn<;.z{z:wz xiZ{5\CzvfB}&ѵ&J9,ϕ2z\pWټױ7UR wBz]^H%UDtVHo]Brg NFf|BQ\*lK!L(5LX\I|t)'A/<:zē(hϻ]5zJ%Ȃ5S5Z5q%JL29=Z<\ʲ@zD&`-`b1ˆ4*-k8mRnvwQ=E{<Z-"j5 4k\lnaJymz;R^0NJDdgt6n8Ë(, EϥH5VG3%0X(fB /~.P]^ui*OJ` 7h66j-] nЯ"8%\Y'9zS 8d:6dxؠ Pru\ WhrR! suƒSl1QDD!\ӂ$/<㨔9,XrpLmm4@op@ywow si 81xIBbCaxB/JpeʆZi,' fɍǙA\Q?nE/lojzLUO_c50V`a໇V.ATGoUts{[Fn ~ĺ4JdoAe;ov'rpu'.gN(1% O @yN:jݓ]|т|6 Eo;aށJGLͅlLyc$$ôBקP8,7*->J86~g =,b#n)M}Xn}.(ݸqɠi-\a"mB\ikcXO׽' ЁKz0{cr-qHpa~;孩oou3N7^lK9I,4(*E?i/EF|P w}?T[1֏*@4t@TM=|ݎAT6:G&CTZJZ-A 6ߡ NUUɇK)7ko5Ϥcs=ǒkgd2հ܃mq,97CBRJG ArGX= Hnߞ~} C;@B 3 ͝A:ɻMqSCN^@t-?6%_®?!rS6t !x v)ɤۓ$xڀ˞P%d/:9P5?>R Wcy;#6}m4{n6*b.;BR}3]^^KLo3FO7)Kk۠(6Gb'pK Þ[sٞ6Z!%G_b1nQhsaRO-5$,S BcKWhmi,ܱhi@ s̛z/NOBw8SY7~|fH?h@Κ0Z^1v~2xO endstream endobj 194 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1846 >> stream x}U{PT7J8z鈴E>ƒQ |,ˆEX*v]XuV0+(XL4Lhx.=̴W;M?ߜ3s|}CQI􌤵OV?&Q1+*r",-;RE $^\~ڕK6)"T.)JNؔH2U"(ߨV ReYJ[|D_SH2erm@*IKrR]k% yJUjmNj J'b'$lb3xH%$"҉a"Qu_$$$)⦷Bus:j6 +f^hȖU+P~0t@ C},0Ujp#hB&'7Ty``5TBƞ]#iƏ2(m8a^#FPADįF/eϩPSMGr[-N׈g۬Cʢn4nby "SA &1 YHD޹c[7M|:  k;`+@}zjA D? Zu#@b`ͽ;9(SqRx)(4@+l81%b1oKXX3 >:=-qD]WQ5s<By9+~ps RY3B=0gKR\f:Oe~w8Z*f=Rcpdv%=9}l Z[PHK˲W)zZip92:u7uI$"0>nj攽 1VD['>v/+vNK&k{, ~~Z3Hnۗ*~a Pшx`̤R_'rþ'7}%3 'CyޘvwK!q>>? DZ.ibT09|<21{!Ⱥre#@&vٔF؜NfMzn֞3'Z"س'\ 8 Ϳ?BwKVÔu%ICos 5vJPxh!F-`Ɇ2{hA P>{<--hh[w~.F}fjlq^Ή6bycc#-Ŀendstream endobj 195 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1421 >> stream xM{PWw ,vn2mU,<2bSPр@@yI pA] T<)y5AmOI\cK}%ޞ IrF+SK2]&Z'Z$LyL?4ITH50'6E'S$±Xb!X(&Ŝ0g[-ǜ-fe6%PJtvvKZ'EYfBxӕϻ'q\M\[zݵAٖ`k(Qt qK8Iu3INȗZP0p3 P$y}60] e5Uts.8xƯ|c)3_U~A!yRH'QSnT|F=;=6C0up88b[endstream endobj 196 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1728 >> stream xuTkTa`gHflRsj4$5ZRoQXqewvY*{dY,n厰,]bS/xGOC=hlc1M|L9swμ><I$I*2=zNzb¤QsB 2h ] YGyK $Œ$Ψ//0(W)7lHX\40W%*TA2r6P0^/0t][^^F)Yo[,/4(3A_&S&kE2]í'i5RWi z MbV\R2 cmb%b ;8VD#,w+ԯмzN{7_|pzR vK^1VTDIAK13(w~X+ GcBxW,L S!5>: RKg+p:fng5{I-S<ٶ`_֦ _MGI&WZ!f,%e"q_cG~he.YlduSaɮq=Ҋ3TDIKooP5db~lFRwt[t |9/k+jD)>YAhtm -雖aD[QܠQ,08>Gn~>#m֢ދJ 梟rQboıW̓f@ ݔ^c]0[*Jq~]`UP0V=tlUG}L~cJ:l: jۢq8 v68V zAsZ׮F+&{m;4<w邇{H_Dp5f|-tF$\Xk$FG x\RtLIq3v`lY@ ,vDc tb[1 غ % %硹C¢cZgٯ Y+\C1Wa_fN;>uy@^GPVZ* YL-Ht?'. y>?ߋCa'ݝ|2b[#)^':GVTZRW L1&ZAO%dh/Ol7ًtn4t%JZ*Y_m/8/ -@D"k'ڠFmݴ6.| Nk+dPlp&')J؁^mqiNW7r2+-52<)gaqB_RY' z<*둒#mkshܢO<-3]K߾Uendstream endobj 197 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 5776 >> stream xX XWPiK@M5h\Q(n "l }W7vEEwIDĘSxy6I&7F1={02]e1P4HscGCߞ Æmzq#ÓEzٌ>f̙3&Li+w{ 0/0]6kzEی4###'N ;nMoWWH0^6ݻ0?(<+fEWH0. Z84li;ˣ<Zc'ߵ~g5sLl7eӦǜaF0jf͌a2:f=3qf3ƞld2M"f2,f%f)3yyYLcJƊf0Cٌ?ӏ˘3d1<3aަIgz2 =(=lY\Z̺_skb^_}{}=ޏnؿO>GL4pвA>?7Xnr5 )i;:@1HqX.tL+B BjRr4wJI\^P piN)iPOU'+.1f+8%J~<*HRwXu6Z>nT_$\Y_2u.jL__^,W[1O :$y}g'`Ay O2seWSeq.jڢ%E[KUR2[n^1^C_[7>'ӅŮtW(&YwzkOj:GlEgEPVk! d%K%QV0 ɥ qO hbM|eo/`dG(/[ԁZաcNj8]S `37 CyǍxQz.ʮ$I.D69IxgK.kb 8Nܑ~WG(Y1:U2Bs`Fنˮ~ |2}_杦eZ^[ \PAsjAEnd7Z<,w0!tf pί$dÆe{\VU|7gOKBp8BkoɨλTG,q0 =?/U5nt_3&Ym;|})-dq# 2yIi@M}^cv:%0lKؽ8T :sϘEy7]DJVG&b9n T"ֺ*$a.NTtOqb.3  O.є1܄mG3z6`*iU޺^?:]n<#++Ϭ ([`0G7]TĒ.k4|/C @; Z C$}ΫhPmLAGzѠ̥j4ڢ K%"#kYܕpu>VR \d>gMLB\8B3o((FSS7?[=d>;ﺐ֧= ; 6'߱F31×QҲ9,:ddhP)[%zEZv\MYݛ=͝ ʨ n4-}UG#xBTxo8޿_6=14uQKOSB:%Ҥ|fazUD6D jЛfaUjt:mjp$(쬨ةƳ5* ;@Xܴb ݫw;*MiuUruZQ 9D5fEnݰkEx#hA:gy.f}1'uΊIO(rH: 󭞑f/⪣8hQ@f:-~lI8(ʇ8SJ9c/zs"ol:\U]|hA8Y:y*_V).ҐD2zĦ\ N'^ q^ׂ N-<&?͹JLѵq8J**k `!xw0[@K [T:}jIV5k:GQvE;H,mL7lkʦ (wE]ԃ>+[e؃ 6tUT/jZ5]uןלNi4m)OU#m?olOn_({&}KF1 4$xLgם %^mՁǐd ph**i 66Ñwin.ˬ´ 4^]jN",Ѳ,esx~KrK - t=.zʅTdSӿQVOŏ=c4ޭD%.s Y.o'd|(rY{ !ZK7GЖm;8Mg/r4w:ԌV0m(2 a)mQ[]TޭsƝQA}Ο݄/Ǒ11]ؿY/L'f'C*p<[ *j#NÚ,u@9.B9|pRK׏&VX{dL`S۲ 殐Uh+=ד禮KA!5?gtQ5R$b#Q5.?[Wq,Pè֊-g7 GYX?̆ѱڸdJBa9]Ó58nVݱo/M"&^}f)p.[|7:x %Wl~%z_ /KөR\a$H9KH*7ڕ.~,nVKC ybo +Ro7)++X\]V|XB˂4[\m?z4RyS#M9kku+?|ė +CA;.@ucBuۚe9 'XV8\վ6eWT7D-(6/9L,qI d˺|D-}顺 q4{RϺcw6 dԭɩL-k'i E)XJ4; ٻ?4\b4 Q×TVW5aݨO样~Ruͤ|4!`!CB&;hiKaOImWVw.?teڒȹ !SQv6n "i#EM{wHK8PvUJ<0PyAa@l3Gr‡d yw9u& P cC~1i'Ĝk8Bҝ>]a7KP]xǒM~k% r=aA;9-B!*េVEj*?uV~Al+~XMeqJ\geXeymWI2$SSִa|J-opgDd˜#) y ~}/ٲmaSE~2OCvH̕?ȕ8Sa.)ߕ wdz}̬ Fkj(-a~?aQ$'QA )DamI>Qj:3i7Kްg0aZl)^#_:| k!9Q4 "t-RsBre[ҞA;}$aM3Q^(ȥB D|Orz>V!6nnAݫ"|!;jMZ1eزƒǦ eyF-k"BRwi--` 24k_7$Zs*(_,?߼[uY[UP}kywꅺ#-U}|gXTA.Sž_ ta8p Lhok LamF`NW ?vcMDFz<]_z[_"@̺L(`qu&(-<?W#)yZʂcA,m^RUNj7·swH+5>q%,l.U፷U`K:f\p2}WyM@=~]݌O'p@X\mp; (nW&5^VSoi`CCYEadtAi fݴXYusS);)q? Ԑw5܄<+ێcԘ*8g2_S UR^"D:]?[-s|֤ uk>"UWpGכ?Xܢk>f~PuU:5OhbadE љ)rn M]?R'SS=އF/0"O+ܪL{X1y|:)Ȗ,V썊>B3jG5g+Z4^endstream endobj 198 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 8414 >> stream xytWä́― 6)B!;j{oݖlIW{/l `Z!m ycddn߳9>Ѽ{wwT@ _v$_#A~p-;־'[A!s6u%“DzD؏rm?yL4~ngk @"~]gDY>!M950|BpGFد a08(~kp,H#k}wqƻ3{#Gulbq}0e)SMNokJQmj-5ZGS(j4CmRy8j35OmPGDj!5ZDMSS%Tj)5ZFMSP+J]ʖRTESPk=ՋER@/D}@YST?j.՟zb7Hj7TIłz Kz {VVEz"-e2}ZU/T=}" nzO/z?~恸ocg/|g;Vo;p@7 ;*x7k($)eIC ġ n9iâvmO^)##WX' m6KtB~hr6P com79BG&hS֩f7[V`"+| KE|xV8<C=i',&_Ԡ>*r^,}3r8_~v#jpL~50]W0?x*ﻐEbݾd6 mtɼ< {HEkpI;mg2so-&#RtU#tT⟽~={3`ad"GevU/8sĚWȌ$6q[#?1?E?~Ŏ 8COvʽa/VTop:K6c|$@w _;lӧ'31JwUMd/n)<qjh NoǙho<Wئv՘/(A$yomF+~Q"9n sY"y%ȁnǛl=ŒďoYJ4{$ALd'@R0XJi':,Ɲ"Ixă%(.lYz:$pz5n:4ď*<O.$mx 3vaFQ'C{ȸ-wovtClMSpXG8΢sVAH<S"~2\#(=|00>pߏfuԵH)NFWq}PIw#O=Dh(Ńdyλŋ}q)9J&03t ~21$OeѢ?:X̼z_}F f^;eB;h}ňF!|[yC`]WOߑQYh,` 7:PK(I:Nuw5?n^}qw*z}͢4mag4[o iS"! ʌuu.<^B*jO<`B;\oDÌBz,c+пj5(-UqP AwyHMGY]1`\W95 ;6ưQ1m` ba>} _GqBUF0 -t gFTzgz_ ܑX q!.qቁ U-wQNK)rɏKSBbHOJNWxָAu>%R~]hxԔCa&m sߧa8p-6 94ޝ9!/ze ;5N@+,pO,N^4Ä6ןd<҂Q`Ww]4<.'Iʌhл:O@ҹp 3ahyH53~t:#ljk.ᎇZ KN̄v,8h/gl&*Wi(UW܆@s. X[p_} m% JA4oe?J>H!|.x!=my3q\ CφIh':{kj?cѯ]6IksUf<`VWdUumWd ,.|ĻzיCY ˙+ez~IP i.[Q笚Y2HMөgMdezmc{:y!Ʈj7p)# ՁEOC0 6+^VGP'5@=&A FO(vu7V1X,w{qg.\ɗsv2jN;:+=UDC%F{H!Vٝg aIIXR'MTgX*C4y-eZ$k; ե:]tAMpD)̎l!]Z9XFyH j+A_l3)q+{Lx=G6)>R,7l#83= ;͑T.;7i[áOZoY~!?hnNΑ7v&:TO kRY6[tАi볒s ."`Wo`{F^d$(BԌv250"T %EޠI脅^@|lRB|l?&[,W(ca;8C:|Ԫ~x{`@WiL*VeejS  ~HYa,_rr\yVء>1۰[;hMD-c!amˢ ]nB(ZST}(n|yPf .RȍrP>IN%J*7Zp/ ~ޕMeihbwurfAFlHv<1 :} Er&bJE2VRBGwTҶUKEFY$'I*eZBdHSf.jr@1dIE>hL6-!O ]p2SFJHiBS-ǏWrj`fQ<ԼX4V*M!&,JWܜ1q?"pX%0\MX0u.~9kʛB$o&p (pnjvpZ7i$GfB&]>hs2򑪽㢨,Ǯ+;Rqp)[0oF@m67LxLa DsH[EYʛPbzD"\7YJH&@Ehqɸvg̐z.]BkA?BA.U:X(dFطX }OJ(e1[.fW mQ'Isv޴(!8S~ 9gf5!k&QFԊA*VU(*b:tfZ!u~!9t'hLg,2,4q'0  N΢:A c&%sC}HXIKVSɐ}GDSpIeҪU =#d\xv#nh,85~Pq=4le%脅lѴ;9y=!gC{ŐZ%G'@QY][l;pp_ 1hW:mυ(glꃨO ~K_aҢ@,TH#O5[Z{N +hޕ6ĿPO+uFINAEM30`dD?.LNx-r`?))BҸ1̼ J&D&0$<4#YʹֻjLXgq$!.|q;U1.a=F~:|!nCXIRM@d8>ώXx]M{'LwXy4OexF5{J]40)tuភ0| sf'!CfscV)-0Jyo:9ŵG k%-Q,uk? %RSCVn-'M6hAʑ<ȗ%RTk 8B3LF'N.D=b-fO=#AYGH G dHaR?Na}fKr2f찌 +/>:;@}įd9s儁 iSTmNӒ..L 1j:"mRScUgй3HB.g-0(8 '.z߀urwheLUS9n-S)1uȻG] NÈWH҇jel?#hN ܚ3FU^ (vGԞgsj돘|x'ڍQ<]rk&%ᖬ?jyYs1yg\/KP8CWLU.88:2tW>; =k+Ã*kk++k"YGsFsgHD^z4,tAC0 C9Q'?ϡł wz(4:XQYF>t9i4(+-ێNWB[/ _jzBU9}擫CZA]TLz\FOms^ #X٪4iY0f\~##7U'׷!V}]bkϩ6YզM%>/E!WxYF`,жK ?7YO6;*SQ,"z~ XWonӫ 7_=N8Y ,{.|9$a P"8,N*D\B8OT,28U.EyqC#[,阋2X4H+BdmR!dg|-z#MwST~Up#!¿`s;=EL֖.,κ{EYX a $/;L'OrfT&5hz(b/qȼocH ̧^tf[Wp8˦mpy4͝qjuŒ< n!Klg+[̌tekJn9}gOԝg9I;prK!Btq°mkb4;C9bil`n_Ԛ4禬¹9E‡ hԓ)V`pXp@T}hWazR對ywTܿ/P Bft?!b;ч+m2 WGuYgMM!3ILbj|CBCKC35Vs|MNfo/:Ay_ƗQK0QYBT!}yQO L&'C*[)5?eZӎ1h޷Pi#q˦X$ݽM3 ~*h[ж}+-22u 2S5/ `a]4}Y}}3uy)Tr;3QaIi*dE. Ѿ裧}E`?sq͚04i[h8MOoί:1|k2 s S.yoZx(>kp,ܳ$ڿ:-L:]_FI(~$59k$蜸Rқej{2r۸1-5V2!El M r Ӆ|:a RڢVRu1~_f;OΕŧ'%)8%Fi'M/Ӛ}A<2oh5vF9 >}7VP fa9{޻i{;y 3_b=`1;)x' 9Zz_5B=.rbɖō[d~ (h2%rYV&[l*@WU1ļ%N;{D ^׎ǟo"2Β—+5dsgs&ʾx&!QxjDo`kl5wu@[PR]哢h `yog_!뱟̼ Ӣ4*LJ"b ψwB=y49!R A?O\ i5sV3D՚tCǙ-lDPgIV_E*ELU qL ^ѭEA.4 i ;Oz Ç8rk0}g@J~*I;N3!r#g#]#gvSH~+)HVl%I/)DHfi^{|Ѐ6~jBQͦmcQO1%ǛCi>f)2SU$[";lvԧh$*JA")EmV_t&q[Nt63d[ kĜL}2m1D b}+Po6;]ohBZlǣlw [tQxUk)2P#w)oAšmcb> stream xYXT>٣bc=jj,5Ʈ (DHY";w bŲĵޘhb4Q| e]o<0{fgyeփDNN&)  C{pﵽ9# -%OE"Ber߳4sl'O8vsUN>{CmGwz}B17p}{lr[G@7Y%~r宾n&,$w:pQ|ߺڰ(`lIO!ۖnw۱mΕ<=W{9yYa3g;8vޟ8)bȩӦS0j5NfQR#5ZM=j-5rRzj!5@-&Pj"15LQS({j*F-SrjeMPbj052ޡ8'ՋMͦ,>/ՏO ,͔HmXjeE ̨PwQUzTď>0+7%%`<{^ }bo}~+otKs˽Ҟ҃G c{là>Xmjiif``ك2anP硅Co}wQ,on/PiAw_T#xA'gc)>X7ܧ]cDr"jTBV= Wf [J_jT_'BB_:slMwl NZg:pYIY@m2P5*[#BuP8ң/4:S迂Êy5yME/Ix0{ JYpsE~8!C>_G΁ ^fx@U" mpyH"fX&'9^k.z~y'ZpZ~FS, -wm75#eX`Z 5JZj D*/m6,B0p \6X9''h tb|Ȏc3l6a4?ǟîx;]y BY`%1YCUu: rTS`FƦq%1^Ch|/I?ƕdTSV֊%"cQohbL|"#e^H=^ / 滶{Qt% ܆~1. -0"]e F`yK QZ%lVHe>euo@3At}&)㓹--(CL_=Qҧ-q_EsMɸҤ=#Y}NqJS>~&$䆯Oҗ7j3d)Z8BIX7R_|;m6lZYsQ0_5^cO ˌ4=#8EfwlmCj>) H๮U-;}2qLYax@wG̩WGx7=;"*8S983M< H2YU{˛O])3,e˪!ž!gAMWu][oP'!Ig TH># ICM'G@1jWbdl2[tjE.Xb5}cF7`4H|O5 2TPحl%7c]p쌆=h@OUq QA>:kEBXج7YC DҡBoS*::X+

^ov[7`L HV,!jrPmjW4.#Kj"]qVا~B3!4FE'j&@ud T6(Y63RB)٨ - pݷQK%mE"%$+⹄ȐM3}OԂh>E|G1Ñ -~3L^+ Ub8,Q~ ~$9bO̘JR"f+%y ޼"|;Gf 23Q*.L20)[4h:cp5a$el-:8]Y:% t !(!U`j7 39]Ԩ`3n\ҕ;tWyuGi2R'Җ:m[`ax$N[ѡ:IV̘hͶn\qe1NB0QFHۅMJEݣMFOaYbU5Ok*I2S s:$@Άլ 5g<;о/G WC+4nwH<nM? I޾x",]hA Y.p(;`]0>qxf &d)Cc?@wQclS&rE^<uG yL2ڡIj=NsI9;ŵ>D<[;($^#MHH#;2-"U*ԎYr!ۮ4O]uX } ϟ/Y] O/e%#]z#GN=i-$6-Yނ'oօ])ȯٳ&zs3Q1͛ī(b*:<6w0(DwAhfګ0mSu5\dk$,P$Kw>Qх*5JE驅 WQ)ihPU96bM2o먎ab-*Ae|\A#93Ϣ1RЀR3RP1ѓaMOBژut/rO+W?Am6,>g9ʠcT) DP;-uCp__|`[xH_nH#{}gbITU&%&NE?9rU48y5iA滹p// R/Zz<!Wd|˼K|k!Jg U'BW% G\Aٱ\T\έ/,bS, f 0l1Ǘq?a alVT_T[[^^uנ5-P&|@S+goHι$RJTޝ} hl3 r𫤱an=j~(v`WGR|FtTˉЫhO&jT UboqM ,?9fu iU䅻n'@h(^ fPL jO0͇0?J()e1`yPܟA,rҧ¯oE:сu~K:-6JgtٛzB?7( &b8sxeH;- &-Iӻ>R7\Qv@˸- ZO!Yjm휝ڟ%dzN~gHAaMLHc8^ݕ k:aV9bڐ[ʮ1$i`cI:dSPNSw9i-ϴBoK$`(1ƯrȀd0rcaVJBj7&Sz\>R/H{#_e`Ӂ4`V'Ă?#K96H]xg?$&ؠʌX[)P 6MD> UYYgT6(Oo3<*IeCY:-,D =VY%FYy%hPI+:;O<.{0JXRWe@ڋTTtBqMDA!kg.u)HfM߾?"hg8m=z{7:;:lY~UsgL{cfr<N+ t 'ݵ8_ V•_|zZp\o*i=#IoNH EU(4Gg3249A=r]fcpF%nrO .N$2T Sz."Ԉ,֍3pL? =8tу ~7;o:\+W^tHV%Db53:2,hD `6V{||v44Wf|s7eH W}}~gU+_8ug]:ehf98BⒹ3NJ7D͂Xpl^ظ4%"*w"(Wx[Dx(IGy'Mfj^E0_9EE7z < 8/+Hى?[ؤܙwWF:Cm~Rv_EA͏=N=51E\D^|nf`QFDɨC&FhQ$r~ Wߕj5V|f%LVS}õ&<e#F]PZTJ>PE,l: SS>CuB^~)v(sQLezna)daH0NFx$Ѩ0gPh,I6gilæ𨤘ݡHD4rzhw|eۭN73+%5 Jsɗv&>xp-pI`&m!ۻ2#_բ 8UP6SQL U%*lBYEBRtR# f:^O-[A0{i4?^0F߹}=< ǽ>|W H"j˯DmstHr*83ĢGY%FnDPvS1t.ğ_`cѿrˉOt+ǮyŽfϜ6DSQRX;b|y N> zmEm l,sgX-o\UT+LݝsSxoǮ ;@#U1OV-_l9yLlo<%nnhWBX_|ZI @ȇB0E_ e‹‹232BKzHu>Tj+JEu娴SnQZZTwdfFd^9{8ICї@3>ҿ@T0vbt pZF::Pz@[:x' IB4=#7i8Xz?]`/>O"ɋ#|cwf1AIx9g$tfml!>TH~*uI}1$cZw!?9+endstream endobj 200 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 5054 >> stream xXXTg־+"֫Ć&h1bEQTP,C&E"Ma@*Ru71Fn4Րsc d}f~9=dA?F&V6Kfϑ~$cc[Ȏ1]`,ccFb<4 f2߾8+Af.f/X`9lάY ̖x88y8fv~.nAfz`̐N>3?<,3l[[@o:'7aw_[[/0|.[`"hU!N׆ظmn&o{ Ϙ5{y?a3xf31Mla0X1әm2fff1+Jf3Yc016:f>31ea|f3 c3<3)3bޣ9f ?ll!(+o5fPh8b^r܏Y^77`у< ^8eCچZ=5З6n2bvdezH'C9p'Qu>KYx|)h,:hbܴVep;Px=Yd_;^X 5;wt44/lb9oAF>K*-Oꕥ' p=8rpLڣh_KY/br+k_)ά,_ A&'ɨFkd #bb Z%o?VB ^. ::UeL JDEtz4#Sykܤp88Oh[/eKO?Ӟd`F*=M|MB/8Lc ]KyUg sMhpi8*?G >ctFz(\@x'E0{Y Y΋X8i`8KA:\c yf2TzQkx]:0uaVzZ;+N|=ǚt+Wѯ+ϥW6Zw?=]QEvxg{byyƮ,=k pB\2F`"׉fղ038P\AzDb#Ĥ踄$i)@F$(+tNG am ?!X\0$) t- GNzB0=Cr.P\癶9kMl8]%(O[#3$Е@J-GA"~0 "Pm^V:- YX?H'6w\O~ h nlct/ƗyM:)@VD\E[b?h? l8z2 G $nW5tn˅B={=(LiaOP[R|]#N—NI },ԳlGGꁀ9}BeH ϵˆ)xE'V9Kh ;w+Y88 (ׯhxƁzz85 { ez+hbVzӄă q8=V35ly[YMB'Y:zEz9 gUuܯM(|fo=Uȧ_NUG҄:Cd̳IOHQK /J8{l< N$-&FAt8O~=$=45L)2;pT''4-BDйduDΒd 1#F NU% Eqaق.m҅%s]D> rXT (-ʵɐe][zms֝+25h($>ECÙ_UU_oJK76lsYd#UIQ惩B f8SvvdHNPBCцN*EejJiannْv/7EEŭ[G7R=:;)k}#L[a3l0avJo1^v9PJ!bvz!:u%pfT`Lm9vQ Kvx]/+vPħf\PN8N> II$ LJppHGmۇk,tOH:ez9$BDisx{noqݖbZ$MT YFr9D%qIupɎd#; FQ&%EF@TVQp¦m&˩ aɘog$]\:eɽy,"w#0a.y&>={PsrǏo^#3YW0|G|LXbXJvjh:ȎZ^-Fj``g9 'QlRV嗧h! HnQIyh984Xp.wtE6};˹S?ԃʄlwIx`MllĵsP#8 !Y@i9#9/0yS`cXS^ ~US4Hee>wNkYGx\=h@"#Fn9ջmsA䇟qvYhڨRjO+rnlx)9Os DEj [` 8w0:Hh#ث9 cdm^ᥞn S=dCv3Ěnd'S2iзcɥH:|\*:BnwPjz1reh')N|gfh +L> HUg8'Y7 3ӾޏU *޶ ۸Q{8=]8Oh \4zFI}|_ O\8_pg1]xw]bu5Sʎ3S ){Q |H_e\ڸG QP_ XYصy3Bht^Sjrq$YKfyg Ui%%P-kyt댯1aN >*͟ݪb o_}ZTep}-,ƷmwиDѕO$_Ԧe5fCH:Ѳ;/M$ 2͓ &>lECI #glu\?#3Y(#`܄km/oB-\ȵIt1 6neWKX)sUEcОx.%J۬N ͽ2V_^CVTZ-zQgs0nDPhQgC#[T/ EQVS_rٻ;Uh<՞ZܳmrV) 24L 8C\/d>r!VaN0'L:G>3rgoWq.|KfV T~zY]%Jb'T+:US=]hn Ҁ{>>;vSGiB4$D., $NI// qB(^o]2iIշ^)Ԭ~> stream x\]sܶ}= ̲xMi['IՑJde VV\s\{tq~qz} ?Hr9X|uJENuQ/\ nqxq&.WQV|G|ZBI\ᲈNi՜Jf'ϩO5hࣵQϴuAQ1nDo֩)/H n44ZԍhC暦uYl0>0.1[I{2}%s|VJ֚-Ms}J LjXp@IE3khєڅfCK0sMei|g8t= u?U[ IQ֪Hfh9ͮn%{`BZd~1lIwqgzZ,M.*i kpS _29@3;!5t}$f#<qrKqibeew]9 E`,ZZO{jRPC9*誰f Jo*'s|I>xN wVl}i9n0%K&DvmgGr(ۆ>DI Wc9 N4iЖEaC6|Hx lBXiQ]~$ μ Wr9'O2+'34ǯSs*9^qjH7$XveJ[*}|x [q!]´4{F^Uxhp6"FzAܕtU":*𪀗_ (Lv`>uBM%bOt6/fxV&g}'Ea >,f; Xe m2c+`LX^zalLԝo$Q(iM6e^W>`g`Uv$T'aTynDjߖژ≳DLi/'嫭9N9rQv<0Dn Ҷ+l*h/q(5Oz572A9hIB'PU pC![Z@ .GՃ,||2[v* Ёhbu(^M,DU6yKtş+Gn%ϊ1qư0{m&Z;B.K1d.*X`{+n)ΜGt-en*Ζ⇽FW9K7,(m_Fm(1k . V5oų;%᜞MtE/l{֮m"=1/D.*nGKM`;%T]!/ j,;Tgyա_߉{py0WM1E'`8E1Q7-X[3A)9~_&R8˔2㟎I }EdwqѧErYU+l=DZr=d܎Yn}^00>Q\8RdGɶ==5m7RbPJ+X˪>ऄ8^z&th,OJ O{u{ U蛩 g|}ޅH/ž3H3ofvExek|ܓzq|xV,8?~x[+:xи 8UIW1LKve>zy}z*Q70=ZU!ttaKdm9K?\f:>چC䥜kV7H6rxT~\\#QB ?R^ѻXuhC3i?Ex6]0 M8 ($Iev3fM.BLH0C/eq,sGR;^QVjs QT,*|s}Lr)1%]MirMpuQljxq8n? 6<QfiM{AKI"rzWk&8C.s1x;5 9M*^Ҩv4G/~ŴZu*c&jaC+43xלcJ$M*yë췕&/]՗*cR~TpM.SJ{endstream endobj 202 0 obj << /Filter /FlateDecode /Length 6459 >> stream x\Y~#& MpRawvUL>HPsRNӚ(ٛ DUZeXS‘t0_}Wtq/ xQ~{/Wp>ڋKs!ws:/>zgX_N f49v遼/.;//n_?9Z{-ڟzff"Pbpf0s\\糫z-SS}ݘi2>^೟O{M\N]o?^GהJ)nCK!;a7z  oDo!gܛRN6OvGϥ\(H.7Y /tT)&{ط־;bk[Ҙ@8O" *︋# 𫁹}/."o99`lI,S{"Dۺ$_ld-PWq\DnyW?9IL{^9-nrr7ئg/_w>J\⍑Rr9Yro3HcV4FVipRm1@, S`R3I?:0]]'t͜Jl+PhaS`n|Q{^n͏4%CWaUi'>{~m)9ژKsaY?CIuMfsc{swft2hIĒ<gI+e(b0%faO TN^ݟ` u^}| #}&E(&Er 45uMԜ|0X0"h!h(%lirѡ~;1 _D@sM 9jݭY4Pn:6#w~@ 'xHqzL6J;AyjD"%E5%ڠn#-HĵщAJ[kVKRykh8ĢL`%@ .1,\y&`qvhm @:gI}B2ux>V09vJ8v)fpcP !H(~ &ZqX=a)F lS?!s€`TTbX`<11X3Ha'Xm"W~BsE8,J/r*w*t'RHCؓI}* J| gX%h4!U rbpOiC\BKRƒVIt|m -UK̾H}g ,ڢ0OICr)+ -5h[Vч͖D0W=,HLS,nl9:<5)͌]Z:RH+K yQЬ҂!u }h n,o]mwJo%Z.h!Ļj E7[ D6DGmZ -]R84]h{yMPwBP֪}F&Vk WIx/nA io;<:KȫvYx%1% ׷0.g3C2I? /UM6kjdCJz:QpUV\w_qG,k \E{T_a*)1#MPΪ!0 gW}Sp:N32jzUAD휖FU@֍^Kp df+McfިUƺX76W$*4$ UyuڜQnYθ6% ꛽ز'< 8<ْ`ڌ!VGVDj tLd2e!v8WS\uco!zZ)ir}rNJgS'dt-a8+\T}YP1 84˛P6D[-L `m6w7| ؖIQVYkYQB>c|X}ڮ%|9wڛ.ux)օ9A[]JT>juG19BuW{. D͍ؗGb/I-ta{Y8+ݼl ݈^d nżwB?yf=t%{y-CkWbL3Wz{~<]3(qO`|U:)Xgds~'g$|@O>>r7 l˓8BIYT&^V%o[Kf=b=0n u?AWSS؈ i|Mz8z1+ A-"Ʋ00> mH[+ 8ȁd k*Lx2N"]ְ0aQ9M=/lcpSd%gle ot"(s8u2W-9冸HPKl,C/;]?QJ E…A@~"Pu%Wn*@͐lU |uhVUoKB@c7u=VHD͸8JHS>6$Faۙ+S@/:jrI(TlۄQdH JuE ~ǭDKmj}B)h$զ-S;V0 Fc5T!3H%p( +TF' >Sl$&%.骦p鐅 ŌK3g9t5qԹ|욼 n6P-'Oͼh "q@3Þ` F%FR* ӑ;b2{ -eYn#5xa Tu=~V{p!1&5F`Jt5UO]؟gfVcwH%Y 9O1i(劐l1k;q5,H'HYhni {'w}Ez bN*.W$JQ _c-.͋ >yrufFa3uW%^/DI]vr;eb^Pj kr8ZjdK+#6< #޼VBʷImq6?@Fsp|6f3˵_/y A'8+ \rʇ`3rՔ:4MPozp.٪7N v?b9rg2ro%ZhDpmMX\ϜR vN⣰3(Z rMR8kQ~[6ߨ9j%_# &2^xQf#x*4vKl6Tul1qnbt`gFi^)1oM6~LԞIp{@*6b]B:>aXx|{È4 !K Т1PiŚ3ބIY*sfSknU a"<0SѯH!t[ dB:ncQa5"ͭVZֵ@T'k@Ϋ%5WͰDUix2kh屔-,rrcp3A& W{&P{k1Ra̓%$IG0%Y 3/FL-*|ړ  ky ]De1!xūy=>Hg ޛ$eBp?'O /֭R,;F"/ f?`S_ 0p_Ο Jv&0=#wXMp>mT%nIJi(zێw cR&[ܘTOUiƣBie谦^Ig#òGXg(E:T-h:s>$unǪnx ɸ|Hbr\*k 'F6(cI`,6X7* %X*d*+^^Ȣd|P&}<SX[#i4FFDʞ,ׄqq-|T i:x?*lQ~;haCK6AtL?dDysq~hFz tmzFRXMe4m>TDVi@07W GD3HE,rq5V $ƁyIEı`LbPZRM UuFSp-H]3|OTՊ"Z$DV苏Xo.ϻ.gN?ų!䞰snR}Jh_ЕW&-1tnI3&+IDToF)vpp=~3F?M39WV6'm-ZJe[Yݵ>@0Vnmi&S9J mN7|$glj]KI}Yt F+L. ny;B ! q.GN+8iN"% Q!9飊u6'uXtr "7jġkƟ(7|_Q0x3iMa*F}ЀN҃ת%Q)!\?bY9Zh)GhPYzߘZ>> (yѻY-D`?n7ԝ5oC`61Hg,9`7?YG"kphkuvX?R7 nI*HԮ'{l,!J! EldMUŅ̓^JVn->kZQYȣGb!;H?<1[\sm[nf1s6שl̕QX-'iam,e4O8 F䤕i\oIEwkInJ.U|-&o;lHB2;[E%B܋XjWȰM+$|Kq "n8 I5FDl0v1&jw;4؃g{ROp D V Zא7NP6}Fܳ^T sHK~\Bh'2rj`5Ysvo][kÃkiVa Wx?RZdb GgmNػ[c:#=9l%d솇*VH/)Fm$y\G?TW0Kp 2 (j"tesr<r*{1囱3@Y^ٔjם=oD߿Kfk N?rD.7o~o'Dyˡ*OPB@q-Rşx2 ߊb3aZu*^jO\.32՟rEY p}rB9ӭwW"8/ kyq N]pcTtPM %m` ?i)ysB"eqs/)0LO!/]@P.hM%|}=Íi>R[|(j=^+ UnTE s.l9ޞU20%N?hKƛ^t?&KYا#_N18텍OeՖ#5kvJU[*bdQ{PծgsXgcxendstream endobj 203 0 obj << /Filter /FlateDecode /Length 159 >> stream x313R0P0U0S01C.=Cɹ\ &`A RN\ %E\@i.}0`ȥ 43KM V8qy(-> stream x]O10 TA%B2D! }I쯗+(n H0hYhqgy⤼M2:B4^!#(֘N?i fw6MQ5fh*q\B in X3 IQSendstream endobj 205 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 5569 >> stream xX XSg>1K#]\ZNǎvӪuPBBBBY$d#KPPQ RUbiv,ν@{ۙyx{w91jӎK:aWF} I\4!jtdM&NJnU$K*OH97f%˖==擋/29N*4s{J0.=kg%-d H㟟7L0s[\Z43.vK)f>/R%qҙRbb "YF>s,JrV̦q'lnLڕ|ΞID><1 &BG-V1b6A$vnE"XE, ‰Bb xXK,!&ib31 !LG+ gg"cJ%$=WM &X'>8Et"&O@ԔT75xc3MLJ@$8c3~0!PYWv~ di0繩zhz-f81[ T5x VxM<7<@yn1Xvv~8`h_l/-(B N<& dײ8^ ӯ0 <?6 pQ6Zŷy56OI0W4>-1JhȃLroc/<[t*0)D2o+"Bcp'|+!r;.lx+1-`$5E`+1=6Jq^^A`{0Μ G ڎ|Ã07ip 8H47X6i lxc|{Cx 5 U6QÝAjkg4Fиo>ZwdfgWd^XvkN_Amh oSv 8o!: *er,}MɈoƽ["3#wew;3!or?cIZC, tP#@7Lb (wlGv}) yr<$<2^(s"`2Х|gWSM%a'7ҹ5ܪᴚ'8Zxbx9ڊ#!@,{ 4~?yׯF[l9tQ]Zq·Z8CڹC/\buto\z6 ˨1%B3h4` sZ* 3H-sACf>%_-Xr29[)@QZbThS)7 +-T-ZcC&yR9y&shQ?Ĥlva,SנW0 rulA%VJN|)>,CA{ckv49-T 3_yZxg7tgє=?NR$yь BVJ2S#6ſ@G>y2E}bwzҵvMАj{v]БORAd wKC1qQ ( ^;[X  jm&R,`^b;Mz֘S;yKr^Y H ԗ;шB/@KL3om`%2YN5R/=Y`S )@FnhBy"0? r6i$X:uMJ%mkn*wʯv/Uva@Gdȧ/ Lۏݷט-\&A|guȚ)1ehF%ښU&>4ܥMӮAfdF .CNEhA n3 ҳhq.a&tCgQuV=l!;nDk_' oSr{/D'i,/ѳG{1%=f>ͯLGd+ߪ1 B=ֽ yJ4ABÒWl[CP,VdkO\>`뭗 H_{2zK1eT'nx]߇K_3}RABЛ֒cdپNI?淚/D #z8;:#T*an}F=EP(V`ڎՌjg ".>@_1.NCrQhe'Kyn ɭr q.)rN68}e0膏O*=&-7HfHo]%f&v9\AUo;I`-aQ@$ŧ܇mugk驆3G P$o/A^C7~4&5M E1T`&&Ow}Ehߴㄙ"!eKk;߄x,eMm2q:e1]Eׁ,P5(W11uW+(ğV<؟#t0b4 r%z2%~˨,6h3/7egCC^~l0 _Qv̴Vd=KxϿK=g2-Xwkр?u1![f2}JJFg:F\f-s?,e$nEmCxdIonOԏZfӺzIV.xR NiO_o÷c'`!1EO?t 4;b~}v1b X2b@6e ڬ>ʈ)̹6 er$`y_[AW9nf5|nJA?:>VpqE.sW,VV4D7KO9Ir*[pn :N5U) Ioĉ_FVfJg/6Ik©I$QL.)Rg"d}i9NFw}WLt$i1KAVuzwtp)XȾfͮYrl[;z ?~Zp @Fy1rZG:^ɴGyE]kegFXH.P̨ԔW?]=ONLvLz?F2L2uދs7i3Z*44 ˚muPMzR,!Pf7]hK/:ǴOMt4+4~p-.43EĶNL?w!X Ki1= Ǽ+'S7rb!fLE;TMn;(-tnX~fP''$], $*5+IӞ5o_E! mkz*&1r;eg-='PWq1y# Z4>֌Omi?n荳ΩعßjWNUMIJi+u Ix<^:0YplvZgX_s',K t6(q?pxety:\DuRRi]2ɢ$ -^kK[km&t}gt OAS=ap#PK(ioIkI;}PԄV%8WoWp\/F =M-ߩs@FEي/PeNeD"֖V7V5؟ZA\B{/q& =7PHZLNMɤΣ"Ǿ,Zw?B=$/WN?4Td '?7+|Skk)-r<503L\Fmzl)rRboKQI 9[E)MZ˵oے\aYv t~7׮L5Im5WR:)nrdG/htYho{L* J,:nWV)OGwJ[GhѶ0C \Z𲫶w+HH4Vvx"vEFmlu98({[W2$l]u=2E,@hcky2GL:hC,@5iZdFN\{MCc@g͹yW0vvw_zP`t5~hjfahp2K7ۭNpF]j* $)`Q.7pRez=, h5hb q km`s񓟰I-a\Pbxˇn@8XkN=Α U #\a1#dWυӕQR*Ra.IxF z|2oa6\P* ԕE';qByVz\ґ]:phomdTT>9ǁdVU,w4͡P6s i䲆ZOCy/*)٣8}gk~TTz ?M4qEz;)ykٯ+7ͻAœZ[Uԏ\'ISt\Pzzq xT^Fd ZCMRۏ\g (ŝțgUw@qv+Tr.NN%Y= -Nf^Sewγ/_agD=ʚ? g&js0!KP'>!فO{L4Vֳ 1deh $11_@j)*TBPϼ]ZDu xt 0(RQq &R>]=i5iA/G)Rendstream endobj 206 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 3769 >> stream xW{tSuO bљLU0"JG@yMi$ͣ-m޹{wڦMo[Rr By.ꌜk9/V_(9G$~?+L&J3ׯٷff{YXm0Gs{ ?AFrdTqz6k,C xɥ a'Ѩuʼ*CZ2y[j%%Cղ7LT"xłyRCIf^3 Vk+ y/4꼩Mjj j]zmZWYPR_mP4UJP+K5*D2rj եew2c<8sOu^:;Z ޹.7/eln:7U<&Hѷ/>9']p&{s5 ,;H )B@SD7㟀8.*[K}4xyP z7iZSeH* T۩48Ko$]/jI{` mC"EXV'HFʌa;2J' '~v\qk^7/->% lCm9Q0`b“ "”뭕zazE,|KwQ!Af̷^G?R)-wEd)^ ńE"z08vzd0>sH':[B=%CϢ܉ϠwB+2'>`[!Q|a/FXh%tAրC1w6AX0Ecp'牧7;SSÐ ;8oe `/?}0 Z"D!VyRj1vu A;l|~!\?Hf~k"էqA9zs!t> .Q7ho][$QuKT{eOB;$jB]u+CW>2V4Txo"y: VReyk@ `boBoJP/O ^뤽v7mӃTm ,2}pvl A;9S(tB'*֬- [FhL tA "teZ8ivIS_0 ,c4^7yԁ&r'F7AgޕGFZ{o>ƶiʠ.R(eg(F DeF铛buʰPrL)5/yf1d+-_F+]V=!}0CS}ZMC$YELZw!n#E CǷf J@O cP_6:9d4WW2G-4Û+r{aQ @a_m@aN wUSh*G}*qVCUUu#Փ!WNΑǺ1Bɿ$]_ L i(G+"QVPF偒8`<1`X֕hJ]{\T 3[5|*/2$AcjKuuuv+woނ]pO,IX۪tₑW8u<3 _vl]teBd± oQh?YL@EIё7ǿ,` jXlXROpNI&HC7cEu8q@y٬q`eX ӣ7#L /JSʟ?Du2ח8qؑ!,pLN 0w7m/[j-1x*,U1nԴO܇@d_ka.Ʋ ` L_-2`մ mkO+d:g$RcM)-G~fuVr[ΰvz;OA~>'rT)Heyzcf#`ЃanT9tn〘,WeLyU&pXYU*F,F.3GVbzUF5ཱུj?@4=e-'YN":@="nF3A$9w qD0b=vM)zNpaIJVpq,́gJ6Q2+;4ĻMG7eh)yRwwwttcZhGI)C!qzG1^e}1k)Bzp8~eX;J;5,nhiU6%޽x;qZ9WAyH ."q:҆SJk 6U8Q͇聞}Dh`pbDqy+~?,(7?VرE;?bk,{8{l4eAaR\BxAôH.~ ɻ[/.ۤ.!l}ļR\\]!c}WQmhu=q2׊Ɗg/5B\+.7^=N.&s!o_n݅6Yu-혯*+=&mk7R=v62 ]CKPVl$g.\ӲWQL$eѨEwDլ^VVCQ2a䬿sqhXL 4caC2{_/?[>FCĢT.COZj" 8E]Τ/sEa9#e~ld5_i-v56\Рmjli G#!2{)8YuM/>#B<#pׅ$ɂ݇|GHP.Q-F'}q!aB/}VӫvQeva*ǥ.e,y|:9K4爏bC~Rcɹgf#%-JQJԊw ]a[盰W~zeӓ/QUjL~4G!GTEgξv9{Y9²endstream endobj 207 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1652 >> stream xU{L[ǯ17 *Kxu*K(ISmIڪZ4%i 70ļm/`y1- $HUdRmڪM]*[4\ltt`y@ (()-=unO;Ky \ݣ۠Pٗ tDϣ_(Q-M͔do>kǏ= 9rq \ SJJeT\!C=UCILQo:}PTw@B5K*K~A)-[kJIՒRZY/dmWR-TYaeX%v;þ!`;xX>vMpTE^[?_m[ɹ6͙^+DRtLDeM|ޑɫ>ThkHc+W,>|xI⌕6]keFF^G5^LU5r%a[t@Mijp㖐3CD9ǃNCkm%VY{ةSIk|B ORxQEI!ynÂd # fn? SLO}DWmZCeͮ4A\vѮXyz~eJL&('r3ЎN$9Ғ,ܘ?|d ⫵ٟvi 241iو,A77}t3' X_ءuAgdr;:성ZWQ8eIf"S128}^L!uMNF[HW'FhPj#GgS(aq妏eެod GC I3hző _qdf|`*`V2?m 5'Oj-͎g N"g~w!]vu|V.,q=c0_*BK!"{{eeTjp#,3lhk"8#&gM9;fĆĖnqۡ#ݛz=➄!9sϴPp.|dɗ晣v0J;NJX`?2x>4;<5~0qJHw+Sڊ BkËRĤF ^s)t ,#DeN*JK.b_mT3_ܭjW%UlKzz AId#|W1Nn[|_W(U> stream xcd`ab`dddwu041U~H3a!nnne?2W ~/^$Ș_2as~AeQfzFFcnjQfrbobIFjnb ZRaQRR`_^^[_nPYZZTWqr-(-I-ROI-+IM/J)I().Y000000v1v3032Խ#녋|gB~choܾ+'/ɱtFEyi]nތv?zPn߷O<{>kڽsaE^{sIw-GºyX;rN}\}HuMy9~mWsfVǻ]aJ\~e-b+r3˫ns+ǖuKvwN}Gl߫Z.XfEyl2+-Rùy20Pљendstream endobj 209 0 obj << /Filter /FlateDecode /Length 4540 >> stream x[Yo~WV r !<0ȲҌ"ɛu~A~vXla[R/$6Yd]_S1Sߋۓɟ^=ӫisq{s < )O]ԧ.wz~{fYt9ц87J=<9L=렦 6eW_aH$z3Gy7{$QNj3)9DE99mKOqCOaۍ5]\CӖhe8@C Btsu@g" %"=p0=;+pyO$ a;iFpܭۼ%/a("\Lp !J N9~A+!aWeKk9[,Kz x2h|H|[s$!?t:2*)&xHՙqޝI=[Q hFeKB:2M1Gwp5;DzOH)x/Iǎ^ʰ\R#chO̗>@] w r~zEtxw=.4#D7I3ʩp9Xk!=щLr[E㢤2S+BcɨJ +ו#$|۱7Cޱͮ9*8$:Ι|-hm69L0hcfet E&M BgFY( ^OR,*M"txOcUno%3t֒c*.*A[\Ttޢ*c=]US-J |iC]&vӹe$:Uo/6 kD?_uxYu0&\V˳ѡ,e]Z]Lu%vSЧ_- I<Ӎ\Ws $a],6GF8JG H+V;OnY+vrS|QTqtqqIAlJB.rޞٜIp~:Cvʑ%\M$T΂T(5<3*[xVtB ^6H"X0YUeŸ~3 tQ>ur7 *Ze$}z  ) ZreH- YAn*  J ջhd&% Pg1OS/h)7&W=Mﭸ , deMorRyz+P?6[d뙝1 EiXd3MÇ:|N]-+T} O70}A6KR3HAjnWU@ 0.o5=RȡQ>[bo| K-D Ѷ p;GYUy -5.}h&}H~]k,VChY+츈q|~˕TKט%I5q3O^224_ 7MM0E2މj)O!lctЇd#]s%2Oψѹ֢]rM8RNԚ2Ml/r#.~<}IT_>U3{S]`ܠ L݌ dP4C~p7"eme$D`&HYքi}A~0Xr o*Rh?B0֐$!rxݦ$YMɐsa~a%6%7r!gse\cSz "]O"8cWpOwe X8ۼ~?}m|rg|RYyK(pMj>f;Ạ0- ۯif Ph_/!!{ƿC^Lq0JV @vC aV;BL%ZãZn3&y,eQpC ྐor` /*I[2HH<;T3yVEޓ͔ĈV#ovDl$ Fh,I?!`Wy{Th(7j4nim`e#ZeHGKKe5 P_Z6\_UOU[;i[djwYʝ 4a\bV˞D^fGbRCrNev_6qpO#Y/*l%%Ũ )`۪-vLHGYaдNC42fKgP#W+`l^:l(Pu KiǢ~dVjbʧbXb2 rR`r LX̸(@1Fֿ{+[FgslK$ Q(m@Q AAWh@c" Jw$^L<:-Y}\.Ozoy%r8okUQx#L1]bs ͺb%0#pk&OeUUꡪ:Twuϟcr<&6b Ԡce+O̔=lޟF[ab*tQqI|lc0@,@INn<\.1Ћ ߱פJEv\?^llT-`H~C}t9O7+Jv"dtTZsXAGs}er:Fn,2v,+*=}m'75/^W`gzn[li _1 MTپ$=fpr4BJ dBp͖otl&iF߭5 <5 1#Aֻ5ෛFvξ-ƥ-xf}Xqi3$ o yUJH/qXCb-2O֐X^oӫwIޜ90|Fd۪.w% l}\LܞBf$ȲEG,\`GqK)sKmLARDZkX؁vqz EydG5˅Me~K]'\VʠMVn1 cNڗ#MŘ:mwqtVŸzFtLͣ^H Fi#W&C.`6*땂3bhy͟Ǐ&Rl䇛Q'xP\3]] mWFnMM\;V]Lkt"MmAL@bKan U6k@uofcl Bp2kMMf#yJ;80KFb=Ձ+J0mEj/A$\Dz`i]z q:k wIa/D.V{$ &{1rN6`Ln5,"0~0ɺ͸z|FP)tO"C &ŗбޒ﭅!P䙈ʡ)ND> r$'(wե2K D0[4,ZqIbr&|<̟i kD@5\Xu~AC\yԋ>J[KgmT79/1wZ<C G pV| `1Isn85iY kL+a% JVB(%rE/(/s/VFmĢL3VC:X>d i*Gʨ` f!4}M(&ds_a(ҍ/>ieO܉NuOF&POY0sϔ_sЏOoߟ ~Ue p~Ȣd}:QP,%k-7cO.u]Wק-W4UĔzx9!kv1> stream x]O10 0 0~ 8@0%!tpw'0^Gk"GpXhu[@fcA剋7OH.ϋy%:EHAڙXWU}uȪ?&]mgM*)J747I3 BSvendstream endobj 211 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 6722 >> stream xYxW!4P3`Sf I{`lJ,Yr-Y"6`ji@I$B6%w}J2H%o]>ft?={</dM7o>G$7Ǎg#1+/Y?/q+¶p㥉5bQb|BFΝ3)|ƴis_J%FGHM$oF'fOH7ujvv)BQ³37ƦNJbc— S2F%dž?o,&ffĊcbE)Ay)e"ԝK^-M_&C8gbW{c^6f=sC$t׫O] {PocA}l}MSߞ~obK~68uO>Bc!C^F6clk{ 8rļ7G=2dϘg7ֆ ?7Mpjឪqӹ/hm (%) M'i࠰Ȳp)ٓpidTp(zp1$h׋.;:U@ REaMzz}'JKɤ?" k=ʰ/۶/K ̔f`gJ3W3- y9)8ki8e[cHv ,I~g~NziSڽrt1 5l8!Ng/jѫt%"S/ `M۸'-'e1cfW#ǣ$=NxS(V6`(ZV;om1}젝ŝvjA.b*^,T#Z+Fg18,G N ɮ]r&(ҫyͶ2PL春ɚ$Z\})dlLCH]F ([]h.ňj` a6M$l!pjHMfa P5֘S@|%؇::r\)ZQ,@t 6CaYrG׏FGo?&P |2k(37Y*D r6QU)O@Lȃh{[//6x2e=AgF |ږ-ʕY ZwS=kn45WCq>"l/cL.@] >a` Ex@ ~ j=g^E.Y0@5$> ?Df=yݵ$TVz0 }Ϲȶ?oݪoNP ?s2a \*UVo>qI^7XD}PAM-0ö@Y74*00?G);:N+љ2`ʪIbρ&ɛzy\ȣ˔eL*Wd y:t^!h;z@I@IWKɉS.Y4ic޼f i[\J])I@n>dB{ d9h~h JGjVejCV^蟼짜P'IŻ/XMiƬF ꀳ #h^ d (PtJlRP|`Ewy Y9εf>l)>sεh S &%~?EYqM@A^(dWϸ["()VdPzk˲EXܗ=tT=-G78C:r,.<"5K>nq)韻"8`>ܭ#:7W` ~nvIKP(t(|= wQPS$gʉԚ~bK; #x.]G^AA#4vS:0o&@9G~ H+XlS P`]`ɉnuj09xZ л׷}z(mlLDb$5^na$;ڧ ;ou= '@E>Lx*0PLp]Zi0.E~{je6"wPnwyCucFTQn6͝h;1ΘJ2ٜ AlXU ݙ1k|Ȥ^ ÅN{~&w'ts=k6[˦ ڏV8Oձ.PwW><)b?^p3hWG$>缣{\9tߢ?=zE}0hzGoŪ"UJGJnb)JAb+fJYhjpLuJRP:wBVzoū䗭ۣl\>o/bW|n,.]S-LYMuuM eѠ 4:|^c蠁 }pŸvv7rT" t@+hl#BINV+bSU漗zN^bF}NXԬiV <{'*x%s*k)_AJԆSD]9ɪ2$91n@ٔ\ZgWcmo"ԗMAK `nb^6)|Dw Y'SO cuCSOa@VSm>w_çzCg#A6g lǁ^ ޓ C=- @WO9z:)fW͵uaQ<_Գh(@fa_  CGwUa+M Zz8yZB܂ťSm: n(Q!x%J:YO pQu@BO*HOP7dz &k@#m$YE??>{XMCQ>M~k*lU]ʘmBi1J/&Aq>gnRevQH|&/7yi\9Z;c>6 ͐ [rT22p}NCrh0n.1(ĭKk yk3x ok^(x }C0SyQ vje-Hm3\&f./pt'섣x<>9Mt<#o^H"[~7X V` g2j&=- sIyHHhB-µZ(ˬ+K  K/%"}2 8 z ɱթG[[3}9Svx0üuuUs%V,RJ\ᵊ"팹X3oIHԌVi.Oze/~NDN >kx3 FCY >ӕlxo-%S$ĒEEʦgLU (k#|9eԺ2z- R BMa ߸e̫ u*0?Կ/L( +ͩhULVsl01qQy nn_kri*jqDU-cPQ*, Kf5یbHN-4(+mcnl;`2b1٬r2Q)z2ϼ8iPub6wx\_V}*G`ͦ¥(omOpZPs]$D*TsnpyPcU'pan3pl@F֛5*RM}#t4ψ* &Ogh>_Uq5|?fɊ2וS(/d6Pʠ),v-#{#e>=MCzA#FB?3 pkzE`W͚&LAk"YoaR:hS,d TI{>!R4Eʦy,4Z钄:g}nl&U%(7cO}0 zeut;8"iH-CY+I^Dҙ%)TԌT*.K )ERY5[0)ȤxGB!\1d1DHj I*w;Bwv|GC[/\3ZQ]SgW9f8̖`WRE0]%Ac2n| O}c7Z FE!s1: Ӭh5ihLR6K+ȴR*R\EUyP*^F5dBqFTcljLK" ^a:C|p |=K_Z8V25X곝⾇u^/S\F!h%Z Kjj+EVgvꝀ8?6KM&j׫e:ff8N"Zti^8H]d2@PY 칕Dpĉ23@LJvhbJζbk)+3*Mj˛qBm5B0&t_D^Z10!ݙLvyE2-x#B"h]XbȕzrYD x,oa-4A;nR:U~TOd5.fcνz Ζ-'N^4-j댌[.+d5)ﭼOSc\,j{(ϻprUnOTSP]_zc%h ncPc?`/8p'!jݖX+t[VZ Lo˙fˀfI\~n PpHl}ӻX7 endstream endobj 212 0 obj << /Filter /FlateDecode /Length 3816 >> stream x\kd>Ő/̭ޏЖP%6Lx,HWhZ!mXBX{K1ȥʿg7/v Z^sv)H!(S. qLpӛZ JՉt0QM!6_~F Ԡ~1qIC/D\=E᮹-ϟVt%(FzjCGUjbm4px5($dY5SD k Z֍BedAQF,nO0iS6‹0b#h"pWe8jWZQL:YȞd6#1mC6I 9#Zm2\'dUTv[܋0؜W (g\NZEpըC у'Cؙ4; `bfz& z݅6d ^B ^-HZe~K}sw @­=D ,#fvJYH wvdGl~Դk#NAFBfݳ2$&LI ! S,5$1mGΓY2lk:K )`S{TPcJuxQl푔*zYFR&'Zydr'vdOY0oS0R`$-$V]hlˑ!>i#S12sdi1i`=uiE iÇ:ec!|b=n ?mMh r,IN݌UNqI&M * Yqye8nJ^)Pl$Pn綴lF~՗@l ro&7f[ՠ'`\%jIє (aVoѻ7#ph:Zg!p\b A3ip)rӊ{ 0Tm O!%1QɔplJ#fT oHˡ+쐄MN H~cT,=8a-޲b,9S|ȁ%ə1-90aB>crw*mimf~[b,O?_PR%>NTٕϐǕ4xŒ!:9DaG `z #["7{ryBDQo`QKQ)[-'S\ya(;F(o:rBHAa?`g;e_^,zL3 #w]_,xQtPbHI{=1Lc xapvg /D1}?VBxJ QAQa׆0ҽ ~ryNɥ:v0%hHZ-z*8=x{fGӚ=HPuɝ`w2ǒ(%E=(FDIHz `Tz!Gu=LoH]=!^FBeTq u/8.Q-E먅ȆJql'S,ӥL=B v #@MT=azXw >BAATtAj@1%$vB.,4!ÈQΠ~0=.W5b.7yFt(4]^*I6R{ؽ1xQ-2ҷ>")4GDQF +jbЬ:)t)i4 OԟhK jU'd{{@ {.iDH7yZ= I'BMn_O_u'uAD%u%4:x酟6SjD YQ̟6aa-)@ttY7^0 N+SGozqmz7SόԞNϥƣ?,aspu9? n Ի:rZzjrz1{5!ɯAt]NٮR,=|c8Y[x~{TxLJ91)j^fkc;fGJ{SG۱Z{p|) 5G/~BqxWMsCK[r%7:č/Â4|[G^Nh]AI-@k[Or5[\(/Ji~\L^!싄dJ7[7fkŸ[H[#RܧٰZĶ{<&}aL)&îډwǸԢ-5̶:;!eԡڟM|XN iξ7u YfJ6d>oGĺbkiUSQi;x[gUeH1YS7MF_7x\{e@5|U7e͋}iWLlb_6}${achLוe?&{? }#gL-IO*S@XtY_;cu8'ƧƜ:{H[ ێE4J" #5c ɲ_siYfbN9~?d2<2gch (aP2? O ~4/0?endstream endobj 213 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 2646 >> stream x]UiXWb:%*dSQAP*H WDhAD - 6MP$&.$jK&s|$3?zw'btuHd>6=!:rGjR6d-XKa8SfJ6[dS0X$^6s]Btm{!v'N+2m eT(=cSe}lRؔ䨌4e`dJrR " (S_p&}C U Ueg9r}u:;*pRҴw"Gn ߧWOD0_λTo^4@t]g..M^3vafXrBMH*k^\uUrPW}^~A. W/q3@bvX c)‘|ZC礠3,sPp\!K|רDmt$('hvhzup7K*/4|yD`4L2ǻΙ(ѭ%}|`#ܳR6G&G8Zv뉝a7*79dkC[X pP1U B&Ip%Ok 'ﮞ:ru OñVn$ɷ%|N˖ͩ-W%,YY.'Kڒ/ Cԇ:t-q-DxgK: ZinպՄ]4{p7Bg:C=*ڭ{qS'Sxv|a'E)JVo9BUD!ZH'ZK3C1jHMny־wj혇4_` X:=E1#9^=e~ ZryA~w6w^N@R.| KY0ۿаk(,5414fe'endstream endobj 214 0 obj << /Filter /FlateDecode /Length 162 >> stream x]O10 @J ]2D! }Il[6|/`,@[ 4ZebY8)/d+x@fw5|k^[+x$QESb' 67h*q\B in X3 o:Sendstream endobj 215 0 obj << /Filter /FlateDecode /Length 4740 >> stream x]mo]9_q?ޢ=~Ex b]BӖM]<{hśVX3g^lyv-_|wu^; OBhxawd2rqˮR+˓/xzIy4Gq>y6_ͻ׵Cgeg:I;l4˵vi/>]xCqY;x67?6=Yiv|Ii`7{ɓ~Ah _˭kjmޭ[%u_ofNw${8O,MN?:a>ukszf:umƇ*Ef=|ڼY_][cק0>!SsbŦ@V>ەWӻ7&31GgӇקq&џorro69f_JMݦUn6y~S.6IvɶUĠ~^Hȉ7izɠmO̗S>ukszP\l꛵l}Hɠmzfڏ:P]>XZZ)&u?[_zO|R<92U|İ6},T|l|y Ux"pp=Hh DHKJ%vo4\[V9NGWJs,@Vח^=;]Wh]7^]]JK⽴bӗ-|9yvZW@Ǡcʠv8 \*;x{/5qчHI%-I "q` n>$ZO>FT|RRxuq$Ъ?m#!*bRiεc߻|OŠ|i9l!fLFZL_J}y|ܟ.JN^rKwW2$dJAj4+`_ߖᤨͅ|~ǟ]LSS>t< 4~JJ.% Á DZ]ښ!x}e9TWݎ%0y?8R57Ǒ^=m9ôO.6eKZi ƅemrKH*j"aS[{̾.ʧ9me:8 X &?BNjh>#C{HA_x &tL~dcri͟o7}dfghO:Ne@V[)ea:- ݓ4S'wiWJvˢDO.Uǐ`ez=E(C3J']5ӛIfR)ԖmrN9Hلkm~oo@{`4w5wv4èyēI N=$PG4!A8p<'BN~~vۓw'NBKI ɉbn^hZɖ9w sЁU0y w钂 |3j;'> 9tm禟SO0H%3qgחo^\9ǧcoy :}aȕ#k8ewy5ɛzdՇO_pGg[w LCMCgݜ`fz`jNXth0'׺E58 JA .ŠC?> AVPZAPbƐ5| :sV y˰U @c,+lxc^-:.B>)h303鯸5Skt]\&!06q3!aNla*3uOXL ..,VaW\c3eg!v_$p)VEHNdנ"Ø4CEGZ;nd09jN.jdJw+f0~&b`AD`M "ZА8 sg 10?ѲCb {tX `t=ѰЫVG#׺fBHŒ鮫'QM OL`5o NX&22]Mlgt `]P[p_Ō+3hi[_~CΛHQN5b!v9ٕ,\3:VaI*q#՟h*DGM)OVX*L" LWjMt@Vѿ&R\o&)§]5/`6rD%1Q^]~&1q(,EM& VTMP!ٖUL AjHEo GQ$+Qݚ@Ru 3y: V iXh*dSIJRL+A J2q*v/@K&XVPD{e >B%DAOFm+ K:h`;#$äTR t'XL2h@n F ;<PQ4vMBS}M~DT0,Qb!$$`4zנ5,2l@Aa A +-L+&1}SfF1UWDӫU1T9OUBe3O֜W>1ᾙdb cc[tj|=hv $&1@ ,mH`!Sͅ\0qbfBT``#CTO1Uߵ%ZIJf1@@,t 1ԟ߾CzRecdh{P;ԬY:1!Pk1r#EtZ9,L#bӐ-UuV)iDĠ4\l`=Wo^=d 9躌IZ( -3mABNEMx3!8mh{J# ͢CАo\UY݊>fGkV((%o",,k&dM&vGEY!x"X E-͜&tAd1epFZ=N-+XNJT6#Hnl˕ p6A m&. LcRa0e}G)I.}Ȭ<,tCIZpW4k5jLp]X52hd?0@d1ږou& 3&C(&=(qu&QZ,=b,&zAa,&%eQLZ&TFe7qfPj E!c2B83,e&`ʚUɢpkd D#0M,ӨjT#[g eM GV4,T!'3 əTMRJXy}&򡺟X  nbh#mZ,K<&HU&[}2]̲+ 6!XPΰN&$|%vݏ*`IQawV C?QKH&9-Ee&(4MxV;iQ't剔$ "SJ7IZr˃5}{͇OL)0\^3@ /b8HwuOL ?J,L_HKxEU5:";ZL"bMaD<5+Mh(h4|)Hh e&m#@%Qd2B^`L9g'b`E:xih7U\<)yg;ZDnBIw> I,BˆY.|Ā%'40h(Zxc[oXTғED5308fc=CS\}sd-HXY5)'Ct5ZT%VG*IP8*oRHFNS֢uW9Ie-#6(5#" $OHs0"*,*YweQN5)&&dHTm`x>cp"vAM ۜ2@mC(x> stream xUV XMY޻{-di$GH!]tӍR]!2nc⤐([ED I3ia0u-*5nyD0q"牑hӹF@0NhznyғbbS MN"M&%.Z:Mڈ-K_`k7cISmM7(jKͤPTHM"ʉS<*r\(Wʍr<(/ʛMRbjuҢQ#(=JBxjOє$mj1EcF5>&E9;5-; L`; {2|1zȌ!!ClbjmmጮP_sBűtK5 YI_M/tңD*t@ aN\o. Z0xc}q213SiC6,Ʒ2sj oKPqǦFt$0}ǴX.r:QxG svK_pG$C'"pX~PVU0k%֨ԗ4DN0W+S㺀{pX&X:#^\+;2Xrw&gZZ0vL^()v(Gx^mY[ Ѳ8o/ k וU)<]><qXwٌH k]w'0 E Y$б4(嬙,I Ȗ™f~!$i0wvIe$6;X~kszl-~E *tGM஀<~ӟ`Uh)^FBr:<՗F v\5wqx_*v*S֦ĹU|9 L(Oa d @,#Q`>pX%/*!YX0 ''0?U⁠\ :DJ~h7>!zǭ k"c)I[v/:?^8ՊgVְ1ܶb;Wn^8?TIHAGP^v~Vm[lC${}0?/uzDx&\uh0Zp*cG% b5U(~Xr\^@0q@(Vp$5 YIfvqG,(7RX0| >' 'I00 OPX.•vhL:;=oy D=}_eByc󹵧b*=O9YbM F=2[&vi3,pЯi2\癞PMg6.bb]qVqɆ2F',^/Dݷga=9nraڷWl,{esi2\<LAgkh|73orٍ{Y>RRb֠"MGy?zlyT]9cX[*CUh7In -j(inlضQ!-JMFܠ|wyޚ0wIW!R8B~{iG3NUH'hu24mط@v|66_}& V٭{K'$](| 9%"7D_Q8 ]=;ߕ̫m vWl37L} ⸁'ҪW G_GjyM$#Ƚ0yT~fr_xTT ^*Cmfqx <&>taJ< aC5&H3}D76 ~k+}JEPL|C¼Ri')R5(-8r/7뽽~pKl^ڍxzsJČnGɫ09[f57ե܊((]t ME#6u1{*:XXxډjĵbgb!6]:#jEOu?1Y⾪VAC+o_nt>r=0yݱUO$/3gx5u^VkB0 (d:zsؿgO::r#{ t]^endstream endobj 217 0 obj << /Filter /FlateDecode /Length 4458 >> stream x[[od9~Dte" hZ!L6t2.I&ryX6VrUL|\| h"֫k>kʾL*&n"h(9 VzFGh2r[3)rqB0>#4#e!&Wt{QNT*k%@'=ogr1pC: 'D[d\$| ?)ۀu=hFߑiPIW.&&o}Th8(P߷Uv*8 +x M:Hr1ϴwZYL,lz:n=db\(GN9y+eww4lАH!{ +' [}rI z϶ Kgkzt,o6.쀶2~СY184b`*'߱ޫ.Gʠ\lF6Y9.p!/{w:8A k8tJv )}YZNԅ@llIJ:/c3!"4[h^}wMև$wLՐUS.EdYaz 0홥D2 ^4"N.v:/@Wݚq+ڻ[㱥dç,HMX6C>9h)_ְ5gnr$N<ߎ`r9hW~X֢4Xt`#5JBs`XZ[,JXj;%:m6Ej, H-H>=! 3!N{K|dpnTrcMXm}@Xu,ok y|EERP @a"}OnpUm_=}a]ns!%AǛ yHr]!ox_3?x.e`#@ V^M[mj1PV)eԙxj+ "N5/wlD8VOx&Lf3uB!g)SIB%\($C`;-,zet֋0A en]zy`AtoOO2X^u=eQE@.ΖgvZei=7wB0/'uv[q(l/ln2suqhdxlQȌht^16ϭ`xSXE>F IYj>rg\d1Z(h(]0F/Fg憓#]BwD<ݧ|"1%4qH>0"{G\]}-LF B2QRTMA(D\4y]]M(&J%` 7YsOEl)^DB!32qaT{vE})J2W,PLoTTQ7F&t 1%\Ppn "6򢑷|UۧHK t$RRW(g H|Py 5#E-c/4#s@d&.5 w!8YҫL X&X쫹Z|}׶5O]l\=o^]y֦l*QzpZt-W0IKj=8"AJ~,e9稪uy,b/^ xd1i+]B C x7!^3 󜒗0VzЏ+̋Ʀ“{gVefIvt~@ӵʽOE }sYoXY$l.*f2OQuD8  >۱m9,2?Mgm6NǖlR  S-HuDG+Pk9('Fy*)scL]7ͻOek\H'j]]H.6dဘ;Z^U sjb..YB?4 :\‡_zr1J?˾h_ՃOը`2ŋ2(#@̾+9 gs F@Nj*ٷA{J/yrx!tA8 ةǕT"}îiۺhYžo t]kǥQQ>,Byʼ;+&;_ P!OԷS#ojT=@}y%n^HcbQe'躧 x̵?yʘN4law c%^A̻#۴vnG6ϕTX8{m?erA;PPzxu7u{X`]M_ic{>{/oyXk&%uŝdRhLM斧[hX|U_"N p-.֭>>7AXs|w1EZyá>OC~S*TR\Y} 6h=`359e8e"^Jk @LCt0&b 9mY!=J+r)^Չ:l"ؕ.6؄|0Ƣ@~g\A+lbFw%7 QBWc<wCc16ǹ<"/;jrk-bi^o]6prcXH eg!q{o:K;~ɪ|ښX[;>-y RP27ե>U^ I8ꦶ_8rQQ%8M||> jG?^6f7wꆍ?4FF~HӉzTuF5R ?}#Oypv,%Mp9U]谐9xݛ>y0zf~ym7i*v1[jvc,“c`?qiB߼ىIJs 5 Gy~Ox Hi{|?M y!3ԅvrܽF<1ņ\M]/_q5Ma1LO7Umsh2ʖ #V-kj侑+n֥) 3lD7򾑇!05R8_47|542wMP_Rtwp*vR ߿\a[WE26򢑷|3F0lkv?w΄*"bGcp/'+Ƹ.gW ?O߰YR*끓:|{[G(?"k9tYb0'J @Yo)=~dmH?~>q?6ܛF>&F y> stream x]O10 @UXҡUqP(/ СY:ߝ|reA>E0u- -m1,O)x@fw5|s^[+x$E b'Φ2?M%K17MR{;RB|DSzendstream endobj 219 0 obj << /Filter /FlateDecode /Length 4350 >> stream x\Ko$y&t~A`$N8qҒVZ9F֬֏_",G(a nXW5X X߳_wGx99^ QDy|rq .c`;>9zދΘw|]J^tDz!ꦒ+?I9FO΁7VAZoXcAӰܴkF֔u!F]U1uЭL}.G)ׯj]p,#Ϻ `,5↨m<(;XеE'*XCPZDX-`$]uI'ɜxOml j0X1IG"s¹xC6@v{w9_'RfZ"=;B2FC>r1MG7^.cxBIeVD4¬&6=5|#HG }1{ > }2"Eu@ aG1px2c%Yٱ|lT1HA(}FrWy E?(ˁ7SSf"irϮR_}ttۺa_@ΦW9Ny*bb2;(/BcUEl.]Uɡ⅚`ildhs{ha\1&&5Iyʍ^<|)G{*ZR9,2-3W\1D3x=Ol58Y]RpP| qeݐun憈.3cIlThRNa"1Wx?$ Ⱥ"" *]kk(x V ,&.@ }CmAЍmR5e xeIhT16 fMZFg̠mu>J2|XB'\\đ T}<Ϙ籃x+|mDnh& 羜 Z]]#M :+Zh%!zKgs5+ DŽ3ĀMX3phczhͲ4D^l${fI_E]#EH0䶸YXEtut1eϡ% ꥍSdԚ3=!/d b:Xҿx.{2+/aRVD e|zO M/wg9m*m,TCG#qpWg//Y 4f%q`wV{ 3R:- /.̫ʘ&-DLЭAXѶ@S8Sp!8.|LBn*ؐ#ME)God8AZLL3IDs UpMoVٸO𩓖*.ƏelW*ZcE,Ȫ{I^(dmېbC,ch?vly2Bc@&܊ ^8쾅v%=y9M8SY#msR+-s,$4=YA<{3QRHS^ūZi࢑'"~}[ZrhP 8lIV4yT(ȀKC٤fd7w:6&'!vB׳$h>{ )gy~1 Y\c< 5n,3W-Acz#A!&ӂ\Jifشs'(F?p>4tg 'a筩JW MC${NBjFHqud`Zq4N$vnJøkr="zt\ `0Yh܋AkCwQ$^xf{fQܳ4)=uA>Ռ.螵"t{˺L?uf/$3Jn;kLm\=w{=7`4Oyр#.(Nlj^@a+ҕ(O=- z PPVJ e iW/_N)U/endstream endobj 220 0 obj << /Filter /FlateDecode /Length 1434 >> stream xXKo$5ϯhFj.,se0L2!yf&_O=vO-9jP)U_mř8 Wgc-7s\ 5s/jzUYpLeVro˺=j7| lV;ae풸M!wŵPhjle9c䌝3n*P>y`J^eI9#ҩiqq1-DSkjF*ƹIGoWEWExġxğcmyY@ ds~Avg)J3^4 L %)1G y(oMq]hwY7f 3g(nj[D=2cf<7f<1s)3Me6詄b3='7ye|J ת_.yr&9ޅj9W:|I*x g}q]Շ0;@ ;JVpA#98 \֋}k+3W`^\YPp JWKhcyA`lȰ q+-d(cť:̳ }kzj <O(8wOMPN' 1aΌW@ WVlы b **R杓XD eS {; +b=$+nUt9m6҇"ٖyeUf4)Z#ȿGC~߇ B`XwL\fu[VXǑCk˴oX]P)V=rBDN j~ꈾ9/' A4|Bt7P'~0hZIΘg^Ǥp#>1&Ɩ)Q`k,fQ`WGc-Qe+=\6e]"gI]?(vzOGU-('L^v뷧"h>C_>I&qCo˷d>c~z p11_-="/gtMb67J ^&qW ? p436jOt|3 Ö#endstream endobj 221 0 obj << /Filter /FlateDecode /Length 1853 >> stream xXKo7 9ݩ$=( k4~v^qv>RERGλFt.g嚳hff2%*H+]A6,픍 X],/g-Q/XC{0?Z:Tm]񪈫"|(脰6.;!BuB]YE(eudГ(QxUDS1v~6Y(Y`*mǨ{h>DeA;ᲕeN;վe}Fd&am',t}:_r?tI0RI֣'WcE!-W9T&&h\ amܦ}N,{cсwڶ_-nN;3u:3a$)ZZ"kUrF:/(mu}#?&;?,^%ӺLJa\{Mrˬ`Pv4dthcQ_׷Ҳ ^i~e/z<䫐X~&,%z>3*?0 4VU6^%mUBk͂$Lj6|훍b}+kK@`4`fTJ1 &)6*ё>T )|R.td!A\` GH}g+-%s&M`&Vn1mmON ?u<s&²ObxceRO+r4D^yWCYD1Tr }OբaIL:c3< 30EfFi;BNjÆ{\zoZ}֓Ptlng4M? k. "( t X[v_QPģjv'9^ȡ9f7\ě"j_fO?~ll^FC2{0T57y]]݋{|2#X?2}ch澪~c^վa;)Qh|5 Ʋ*1Dm vO,K (~;(Q!'R?b"z8.Ѽ8Ecǀi~|Ť6=ErRlىH!giig ļ9G |81!߲C dx?9!(b;m>1_="ÃyƵ_Gj9"mJdY7ZH>h$Bs@h`jy po0:'>q4g-6J%7pF76-k ;Lel'p,I_2k_2 Nr8iNqߢIe%v$"~[DYģ8O-܄m!rVu#[5ME9{l<}1GaGAFhPS.*O5>endstream endobj 222 0 obj << /Filter /FlateDecode /Length 3979 >> stream x[ko_J7@[/Eh %(%Yv *\'gp/";#X/çtV+C:sU%`!jRGx=O(^qoݳ){<=-=rMO& O[=6hu[ۘey&(=[e3< .I&Fi<Τh°!\AMGǻwՓ}]f#Ʌn0jfc]r11]YO/y7`ŷ96t4)ʈ@Tj)j.r10eu@Ǯe@mHoVdT6Nߗ#M+N;UEb-̔D)@"ͱ1w m,}&$ ! 7S ܱӜ3)&'rEE÷ݢqWϪxZU|Qϫү$_Kqsvt=?GMIY4U 4Xs hhP߳g:(,v-Ƿq34])fn)4)0RbYd>J_WsGX;v'zWגɭIpv* yY'nLCXDpl.3*=SJ$|qt85ښ92 8l+%X4v4k% 3#`"!AF並cG C%icI6UmӘˋc`,#6hͫ#)OW27B']tJDF0; ǘLp;7'a_BZ:G+o %c9$XҎʊSni}U[{FtTge\1˓׈Kx, <xC7iÖ~i8` ߦȾ8k1p8hÌKCPH&HG엺n-$U. @!V`/X}lBKC=# ܕw SUkgʼpVl 4ފ>WJiLڞ5t:XN9aCQ0k[ ȝ6AuߏpQ*+8Ӷ`9˰8]Dž(:xY'O8Թ K2[վSƢ*CUUܤB_t,)BcnE3^= NzBt22UX D+IW@aJ\M1݄uC\wd  }9[gu(_=zHmkqqW}UEqd$ۏBrЪڋ5cdꈔ.[6e2&(R^.KO."ڐm.&KFJM#/JdCZU-V.gzv rE{6끋w[amx5 3&**#c+qA 3 OQ[-p]3CSIp,3`C[""E-/~yqꄅ-3&mtΑL隚0Z]B')$j14_NsjE_"d>-]{%ZKErIh~f=K-Ծ*m)aL.;   QZD*KRd@GQ]R{]p;;0q_b1IP/L$v1j4}F؅ pqmtZ>AZѐcHV3ޭ^hݥ#0c.x=½t>iܸjR{_Ol"/r2'&B}3'Lwywd'^r|gc(7b-ǘ\ss̋/x^^8r`/%DBf' 8R?\x$ؼG3i` 4^\e&l#=I -.![g΋LbA_WQKG_XbGP`jY b3^8*.>(㈞0FO' R˥ 4Tͤn@ZYTB*DT'2LU;+lD4ǿn/oM'FSt{L 6|5Ȥa9"ԪQzi!kL[/o1 K>uic%kV^ |85G?*dG@G}G]66AL~ XQC- LR%䀷N#HKhtW0Fܭm5kiT~8e}-vTQz[*NlY ꃅ?5s jPPa9='^07V.}n,b A97:C~e|Z}P(dȬC_}1XMw{M=Jps(M`.738@"hoMcx#96f>tbSiXsKy~tuHiP9r9:?EΥ#?lh8yZ6 G CZ6~*unbP]0YsŎ'P&ӊ>6Po qd"_K$KUt3`p]_r pi zN& qzE^j?XvaH/'@Res홅'2HtG"\B'4XdÊ6ws;{cP sb ĉYk&c)X1|84&c}`ZB35T|<0NI5\CHL"\&q(u 5e|^-W Z:XڀJv\< NrH+@`|%v'*gdGZ2SC @iSufeWHL7߶PUxC4l~ӵ5.#Dv2z3= nR|A-j   c]Lњe3OjEHa5WJjŕ"s|2vKB_@RSv9Lkg:g4 > J$/BO&endstream endobj 223 0 obj << /Filter /FlateDecode /Length 12467 >> stream x}ێ$u{[Q0Pmq ?c p@5rnCB{3+k4z올ȸ%V!!_o>݄77}??^H(Yk:zwcNmCTF;x߆S-~>huQ\c5˽xr4GM?4SH[֖C3gmay|6gISKHcKq׸]X0Zf$88Wሧ&zbPIhtR|:$.r8QCCa`94e?SC4Bm-8#:ٿN||*?3pn菻i`uƿ.C?m+=S+՛L|V<1CWHa 't h($a:8|!呰c[f %qJFͿ#xïϦ9Z[.s/?X;[3/#7*m qbږ%tGcg m }pPR!q||[,wMr,O)hhb}3ۜ|cΈqݽl3ݞ4.~YbVF+U;{ll8vUh[MC-u?c k_1#u )JHͺ&rYi>JЌ9[])9ŌZowBŝ=?zasYJ50Ovo* iI8EUak=2v{oe `;Ҿѻضmsݎx\ZҠwq,GuyU5Z.e\㟬A}] a{reԕq,'Њur|{}8ԳƑFǢ1E_ҼY$XCէXAOcG-kZrkD@=૧~+[ƴ x$6)CEZRViqzNaͥz=~{ϯn/@&)|#s”߿=ßE-bTuHSMPCVd"Oao Sy7(X'`C kr?=a"ϭiR@O}C `>Bw,t.`٢`l.>f5O},oz OC (W!n0Ё@ S;TPa0DNR*,(BlzLѷäQʑ:DJp._Xʐ]% 4"]$Ôu1%0NLfEGfO0EM\肎 xITa!K*MFy?FR˄M1'ab< .TUe3:!2v' # q/c?TLa&Vi= 3&RSHƹMcHLƬUkp-dEF1V t9Ssl`8zGT.vLz/Ffv5\mKoŋ8(7E3&' V~\4g y=$CaRӒ+TcY̾O?1S1;Ɇ졛yR6x$NMn !6tmӣUgwt⒙te0G XJkqY58򜰓B<鱑Fm4hXGz[fνDŘ͐9 ɖ/G-#h h4X X53=#K0F r {(;Ƹ!F"eHE7KXHU'CHv]r4L!pP).w:]TQ$W]2&HPz(OƸ܃(_x:>ܜyo,eS޻9%gWK\ܜ5#Q>d]DЀ6]rrbfqt ]q'3 B drdVPˉ\V- gqKRW˹B)Y|Z8N-l~.&ydزJ}mJY#ZIVȣ(Qs<=Ik'@Wif%a:XORs4"ZEȨ30@'Gr迡;0B*Ņ ːY-F&d ѿ4G0R.K"wIɃ,Z %b%#Y(]~0K048:IB 'Ms ~ ;xڞ<FB64^Qƕ mF8]drfzpJFqi*%8@d.\Th>iB5Syd-duxPWU 4!O#gTk"fir; "gcY ŻW}mJ/\x, Tq3)ӡyLG 9M=B2PCHk *H/yPzĹhȃO'{le,ig'4ԅM4#h\f0C^IVPR'\<tiEڽp rPG#.N!̼Ov|H^j$)aQCAͅ'a=xD14G?*)(j_=4H"ECaUS2d$'e$P.t"L $h\n6-9]:B&c1K]B C``6GΈ\I~ jхX.XӮx`}rq5`:Cf Dr.*/.Ĭ91\&Ô*jP2J!MbuBryxcAfWD1]rG;jY>hYaNLe}8Gx!ǂ.I.wte'9Dqp;e O;M6^4#C-TLe2e)/q +EęՅ܈g`T.B'Cұt vTsk.]ڑ < D8HB.>Zt s @2벐U\ +gu*yae2y$eNÇJRG-ePau*tÅ()!@6䪣JVpLB>5d1%B !4mT.,[閷bqJFHGx?1>6ӳKHHG ]R$0 G]a[ QK@9v}Ƞ"nveJ֬Ɣ#əj2$`ZE&xS >{m%%㾭,-2weD0әSU5sS1ypP'hY% lL 0].ñ 鶷UQaf0q$+#&)\\Dm˙.-rDr;o.1}|r)it$:œ-Q<>fc GR]v\%N1Ro$u\hPa3C yX]]RI31m3d7yLqv$օ`B"%ۋv U*Wg֕K) 畷y=lϛ\r4LSX0Y*J64+ֽ˝et6X2X-2q~uf{ϵdbڳ|^)95O>#%OQgx eEU="SKIuT0,(X 3;*'$NzQ9V:o߃8ށu) @ϝ+:4SGE8iN0ýu{ {Bd⾋.?hQ ޅ~u߷KRA P!p+~BۑXC(=]`ۈ4lYEk0Lsw$ d0?IQ<:dgy/dɌfI$7/ 8u*INmv9r3:M~{!TDtp/'|9GW LT_el^zH2]m4#GXm}|u{ǡPoo齁7o5@#;뜽'4B;=pH{C(_x7:RW@q|g-<ҌTb}.? 6ug߳5|-cV(@ִ2ւoa-!tt]&YTwdBA2׻rq`1ڹxϚq4fwm <1/A^N^[9i#Л틻fٷVÓòCYZ̎w$jM#Θ,:mªM׫[n:0Ikam|DZ QzIP߯Օ(-WQMN.T1}f:_oZd.tp[?uop5ƅEBϏyX yU ` Va\*/ݥtxqx$sL"0:j{[Jǻ[*v{~{|w~{x o=Ǽ=~:Z?lOҞo~=~=_Wy{.^ݼ nNϿFowSc_^pZsRqRS?% +'d4䵇t"+h`W[>r=Qg2hIH~@=IB0t%w<|t>&tR6 d,h"7`T++%<~;W΂]=Ied(, D/Naգ$&%̖Ne0PP3 }Z6~gي/N(!13Y+-WrC+a C7)j̡vE( X~<4cx%䄂(ݲ`3@QBK$KOzCW8RyCPb  d_HuwxTI$Dॲ<&48c O(28bT3 /z GGx/UnbɴC"g&}]1zp4pOQa?rV RD%?Jk{XRxc~1 0J_t)*@K񶶐{24rU_ba:4 DNdnIdM,%Wؘ:Ŵx;FZGxa)U`??2W-JƛFI&g?kQJQ QNiHNᐖԶ@n꘡w < epRI(g6'`jOUmm AH8NkMH=vO>uk2Bt V̓[Ac-q5 XQdiX)#B4h aJa3+fXB~6Ugk#y#qoqYbm N 'fKJ7lCFJL4i܅FʁQn&ø^@jrYtՉDL<<7SK zfma^BRT@{*T#lPĵ$NV i v-뒇Viu qyF+.\DI-% FP& }ϥHhIf|1g ʑ_:hbXfv(4ʐ?;!=3p ł|*HZE54LiTsFBBZ`vn5:I \bY2JF>K0 lEƕF.F> 1Nu=z1979U%u+?Yq=2xZr^{k)ٿJ{k)xkYj.;t05uEL!ʯ%іox:"ZK.޲:o-%o-ؽkD<"'rlĩ+w7ƶג )D8Cdz998cg3o1$P~&ٗ<dMUztkpRnGrat:O{c>v{.F?3ٹ:Vp8le>1r'UZz;s_YsRWܘP[\3ؾx5?eq*{w4b15&6:Ưjܗsaח_L-w7|H+˒eƵQefxJrT`2Rz|Oa{|}W?pukJ^T-$.{sL}{czu6gW[/^m<9ɞ%nKkdԦTbᓥgwZ粣TŒo[Zj8`_LcBiz0%)w<7`ڃ=endstream endobj 224 0 obj << /Filter /FlateDecode /Length 24079 >> stream xͮfWr%FO xd ncKԲ uUbUwX"Y*u#s?xy+VܼI&X<>oEĊC~>Ux߼}XyaƇߌ=Oe?Wu| aMx*!\7AS )DXƓ|#\Z==~!1B폟ڙZ鏿r9K||ǟs w){/ϑ 7Zˏ_34M򏣌Ws_}o9gsgg??GRb*qG|L16kiJ )?z>^&;/=1?Z͏pSr칏_~eφ~6}G}yOs(msr$b%q}嶵?M-I{>-/~yt7x@i/kΗpru_?=sM.㊷79ǵ Jh&oφ7ck24k +XnXd|LCm]#fRi[ވ讯;7'|oTL|_jǦS&62ϱ~ʩ_U[m衦l/9e3>P?|,ӘœN:Ż58ւ(3#F)O !ͮwC^x߫9`Kcs|)eꝧO}(]gwYWOq~5nх v%o vMF^'cX3UOYM/͛id^6k^Ժ #+!^LMtɏ$}WN$Xy췫|fm2?9[^\kLMĜGBJ*l%o'5bŰM֟|m4DHފuc/_^) _l\vճ%V]`<׺m))oaKBg!r?]du1؋c=6LŽS(ȿkyvF8M?W\w?-Cɹ? S˟?_ 2,4dIL!cQ>_Ȧ;I0/"[|*K04Qa39b~h]ƞr)unj>r*wpL_Isw,W? X>+o"΀顴&v_}G?W y#⫏~0^}O?o_}_~O!Vo__|C?|wo?G??=?篢p_ y3]z $0y=)SLt Ef޲Fr%Qz"t<;[>5Ĩ8#ePY`d V'dba!xj 1^׾t?1Ye6}I&1Ê޺&U`~5L4LϿ=ӡa_y#q`?TN YkrBlZ+U[i'[$7R$-n4ρI(žmD *P7ح OӨEvG(?"DJjUȱ1nBȲrRzw值 ︡.wQ=*Hwޱq@MJ^aV1o-;T5iܡ(Lкh%# m\6y.lbù!pפ!@t];&~ӮtwyY;n8 4TX5+MwOx;׽q@eb3-aP| 'nAH Ɨˍy:Qzedwh6L;2b%I3oaGM+.*Mt ⦶ .v0[ !E6LlQ/͏;X0j"Rl44`M*koH $q[\,#;@c}sLɍLD=` Dbeܲ FsqLX*)*݄gg.;vZfSTG49S^hQ]6;أ`,)-W$gSQPT= 9-Scㆈ BuWV|Bup>-(2TᴙX"Z;⃘˚z$f:C:Y)6'/6->ze:ˋn)xւJ/ɑ^ˑ7ɑ;ɑJ#G*&H' 3ɿ.r.i=1U u%.i]uUK:tI.ɯ:uI몥KZWuE[tiq%ԞI!sY6B=ራ#;|*E}6>ʎ)]FA*jwx7*ِt ;نfoh2:ȹE tf;йAy:7p;i"~C6rpV鎨/dw;D BHeAޠ,B]s¾T$wPՐDNwBT!qG̠$oZ]:!LGuW;NegT#>Q-׫wX.pTEx(\~P{v?}DkX%wh`"F -]M7~Gκ w J֥Yi*.dޑU*Vu>ਆv#&Qj꼻E_V@ 2!e}uԧ\Y0ޡ*HD_ ܢ`CkE;ՈJwd3HXdHȬpyQ^RY\}է1I wC^MH虞{S7rq Ԥ+(nu, B- m-!_#D,GVTw~P0ƍpO*FIRCyC'vHvG@^hV)&PtwKkH1KɆ'(07wsvM/TNhsZ#.TJnq rkǀZ[\ 8nْṘE}x6bށ#u5SnqjQ}N!]{Й²G8, -$5&395~r ۂ Z>^zϡpTs,-I1&YRw}0\"oYPfTTvZ:x rІP펴 Zav@uzdp[nO]>b' {E oyh'^qdu[_Zg5%eN#j&aJib&_FOz3tZ%>LYǴC.yd&.+H`v gIYW:Hp&t.;C8H΢_u~,MC)V;^_Lz=+fBG{~ 0s8y/M |~\Ϯ~x G(`>~9/Ic8g8(Yc7SlHY/۸Rea1kWߺϑH&p~j;9^_>b}t4 Mvs6ޛ/^k)߯~Wʀy1H]=bO6M׸XR>h k@Oq!C>htIQ-b{N@ `"zQXUA (R8p@g[>L[:>Ήe`> ^,+ v67 K8\ 1k~KCWDoɜ4̂H/گCp=N\#ppωjw1@OԿ ,'(c_^ȟ%ew/;GU*aSJ)ڊxEAP#t$}&\ q-THz2j"zR"^ }E',U(ÔRkyA9t*"WvC;@[OYjC:ŜMړ'Rt%d^# 5p9E ~?d>L,π9*SUgH ZWhf1rz;S)O#џ> H捑Fd{#A /"EՉcD5Kو= l: ti5wuר*TOE-D6kH)Q dM"i_:xRt`;Ե8K8 hJNIIvx(So#A 2G>dW'RSUOS&AiA;uh:ctmچ_= ehU )\S,?3 3P YKϕ?'8AO=Á?n,G!P*wc7=b^@bN m^ā.@l$B < @蛫O#8Oq ,8_ā@ 0@u҇Q(2wӇ1T}҇1vi0xӇ1+ MOpF0p?l @D(A K8 ĉ@`1q N 0&ǟF b0āX4o1E 68 8#8 H Pqv\F@lĉB`^kDH!Ps(HzI!PAHQ1 v\(āX(FBxb >L82\Hg'Br D  N Bdb O2lgZ$0X(8!ؚF?nMZa#" A^F?$\?$0p"PXq(988VپՍPnydO[8{/;SXt "oa "3ThkQ /HcfaAAN( 1f@S2WдA[XBNj[Ofs d6MB )Xfcٜ2e*^<#'Bf!̉ !+@5HmsӮ\'a85c4F;(A׹I:WIc64Vmt[8,qvq"IMʼn]e;~[?ʼnRG.؅<'o$$1! 8r /@[Dsz,j1m-fqi" ?S$$qz (n#a% }ou 6"wAyk8ivwDCS=I]6c!V4L-H]"3o!j)*{쪾I:}~$- DR {(8%~lc߹jaQSYwz / ;cְ%4vF;7VyM1$(- H@=2e!¤>![SSeƩ`  # @8>5$1V$cLH3"b}mǐJ]htⴋy R1 lX/`)4Zܬ|mVT,dU~BۀOsLsPVk' c#"5jÁu Jf+0֡'&`APUkЃ8nm24!)Ƒ$%aG*@&Bj 8^zW91d:T#ciI:ǴXtwU"%nj ]@dZ#` H3$s<H[Y@ P8Nk 1.NF ]9%lç# ҊMԅUE[1onLM͊X?hJ, :S[Vhbcs BWYT9BQ >~:5ѧ|)@6Pm eɺYf_2F~'}^>[']z"]@ 'WJI9ݑO9=GEoQn8 L9y$&{8lRPL2~ !~6RiĂ1\d` ccܦ#$>jؔ k-It¶CTDzCd(ݞ󐔦AFtniO% &/Q=^|u~ckZB/Wn i@`=$ Ёr Xka=k:v"S5ݝ3)K`dd>lv*;_NE V0RLHYB| ЫhkI"MгA<yajèlXh4p?9 lCt*VEp;FhMC!:]R ~4"ŅwL"mV ~@FIKMu!D?;BS E0M8]9[,}\+!o:QMٺIHz db9"sL+J[PqK5i&hioGrԍ:L7#AO1?sc3`2d *CR[3(jOi>X+7`q67.c͐JNb8A' a '&Ȗk2V4-je|6(p:Y[ ll?W@u-? 8;kD;Z(7Ad-+fZd?٣! В.X?詽d8A"Xz٠-Z̵ sy- ;Q_N*$OX?!o\2.Oh!>-ZS@ @ӈ/8 Lr8 [Q`ԦKY(qNVo{恍ذu& Eh+e[Ŋ.NYTYs-6bsx"X΢ VFo2+5˰MOF(l|a}ɇ@Nc=}82 KEAUʰ}FLgTXj *SA0+V(h8W+2j~ HT`XYbE0b Y5ܢm WӪL JA|T㌳&Xr̶0WȦ4zdPwے(w}4<Bkl7Ų *48b5\΃-05OuBi`Pm@Q; ژ%գ. b~ X@v< 'tٝ5A2=5cؖLd1[%e̶`S%TؗN w→-YJcr6Od߃ؖ.+lK\\wUct$Njw`d,jz,\LAGF7Kݭj72ilBvuCK./띁Vlm# mˆV|Q'[.TO,A-!A0%giG74ó~.*F<OW {`?`E@8̇zo7wzd?`0 q;o5VW%$ȰA74㢬D)䫃ADr cW͍ Un]Wi$Ҧ+k7]62^ W$4xfSMĽjPB7 {:ل꾊ެp fنlDZx@'nT)`hP "lC+{Evע ݕ%..^ Ր m8|>.bjEmeWJyF (uyFۢ-]|# a:ttnh6']ڹY hv Q, FEZr"XɛI+3ۀPvCX@A 4"CӜ/ĺ=-ã"eڞ4X%U7C\΂E7K2QZ_Mblt V`{[xiR|o*2sг.τXpMgu+t7k82 3 6q VΓjIhUo\20웺)ɈM}!a9+~=w1C~)b}71ʇpbqАoˤҥG*+fE{2k=t5eO[ioh2,] o*N@K7i+cdtU& Z1ē%li$oOύ*&_5ہR k~7p2cƇT[5Z.oTA}4'QfqA/DۘB"6H^_tQ'ɛ~O{cil. R7MO!EkRt}pQ#\R9޽ʑEu-O}\dyQ͚y\dy<:8r^4c;n0k\+zA7{ݼo@:[Q2#e7 )dWcL8hkdP;+tDAȁDlD#BkDr ENƫ#5b iC/jZ 6CZ'Bkeuǥi`R9q|XGXM^s ZK5&D) aUޮHcrjcX$gg3=_hVc4)9AgT'CM\$C4`++ @g2D1];a)oUGTBHD:CЄ4"A2rP Cc 9[:CX/F̎Du-1J#ZRtW5%EG:3L5_ĝ𧊀_ўH';1qbY4グJ![eg.@Q %{12}AT$f\3D}̪=`@"O)ކ 釪"Pv6M:XqΕg2D i2a g#*Mk~FX4q{}Z"I"$* ~E]3,u.F66 U91JR2 Zi1( Q 2< "؉W?U2WB4vrdMl! j])eUJeu"x'xJmN9.ha ֓Lv3"'&"VHg,AYnG>]$w9B˓bctenG|@wͧ:Vo.ԓ~O\H3)"FENHQw*#F`~  ߈u|N5>V2}ʸʦ06?ȳ6Lק[t@wLl Fœ6?D~|EOtBF#86q?o 6?#.0݄@{`4p1JM Haě`#Nk1M7bQrbB$I0f ᡎ 'X_IGb#NZĨЛoĨ0Oq?n9wd~Ȧu_t"d9ΏEO?C6"ƞCr KWvp mml-d1,E8 ؈[sr#`fBOXNvdnPӥp`jNv@A1; 8mlЉp fxUm[q+`Z!R1RԬFd 8ZPpVfԶq1l;@hT׮,;C ȇOv"n_䃝gv%nZL8W]@;1k򭲄d kQkWDs*vEpZp @DZÁHAy)x{j]^Le]Ȋ&0&,+!.,s0“mqL@`mzabFW0@ܶFGm&X#L`Bmq L\Ȏ`Fv0b\v LX2. LGaXlNja11,հ#%}0~Yq+ry+c#T b;B*uMNف%R.FYjY" YHx X"VD,fml,-jж!6;ijc#fcdb!ˤI3)џ&FhR$?c'b&g:m@EݨDè6*57*,/ګme[XVBʰXȲ2؈zկ߮QGew؈)6Fܦ(;lMpbnS6FܦM+nSlm +{F`.m/l^X )v^<ţW^8h>)!PqW$W=L,r5`!fT[9hF6[Ig7 ohi+b`#N="Cv"fL (% Т !p"DBn4Oن@L'qs o+&q?ғxc'?.r=(:cF@j7%R˼"\`*:0XT0Wcj6*(xz: {b< Dd?`flEFң"Fe{> 2/`Rɾk ?;{^kPֺE]WD=EӾ50VAycM- +`5RL@[yjME]ePJ$af' VIBi$a҃` <ƣ"a@ ^I/31}؆CGuTЈ'U$dE:&_+ ]q(LXTЈU0!fG0 4b(LƲk&=*gIV$L  %oOc8uCB+5W„cmD.OKP.5&"mC:d6H'2ݯlESNre ȝ%z" $h$ ^0\ճnxܘf(E 1,@XaE YMBu|hb7H ʂ@xU;Q0!/MPgHcnj)/{5ҫ{ֿQrR6zX w3P X)+t]@.V<[j&8(>^ ZĶYebw2>v޲N{2vyަhBhS 6).W㼵bt*(kֿE-@e/ꈂH ^&j§)ҧVWjQѽ/pj1Z029{AaА.(*Yb{!)C `d2 j|'K'M"0Zcj0$Q@iUuP\~'$cU_]Q`h5JRUmH֨.i>%C]E.5amTV`ҝL֔jl͇գ~u062cZe1R@k: wnh%Wy5(hoX$NHX֚tɥEP4cuN/ F_D%<>Z +( ax)ƄnGOS"&57gta[-FPkEd-6J,Bu4*e%9,%1Ң-F(V.bAO{}G,P,zJ7 4E]`Fr4Tze!ZjMF#ƨ֦6%, _!-xgyCkDF0Iwcъ6E閶)3 mC~~vDX !=.b>!1!i++;1+vL,&vG9PaYZ8,A0>I+a3dE_4{G_֣ R1ҁY?Bf\t`H/Qk1(41Gqb߫Xgv_fx?NIjH*wLVxgex':Ʃ 2n &ASc: gMx\rG(xjŏ?ZNF$+&!R IRW:%!mpuC:i*6Ƿ :iD/'a)D#F3w}Q1/';b-'Cm!F᎝ok}'kjknHe\t`3{Z:6|\$;j$]6WzLRҬ5e5jF&SV{8Vh77+IQ.Conq)l;yZ؆M|,k&xmtյ2$B5I TEW*{sC~!s3<+0JԵte;ek\> k2=X.pn(Ic{5ΜS8IVl['k &t (%|׌T\pf\&+.n ₪1-(k ZjR[fL+-Ȗ@%ʂ1PYUY,xSo,߇Z~{a^ eG[?U3Jȧ^A.U޽ʑDJE_~ϐ"YPΑ"Jr^\dyѳwpcޫ:ZڂBȀ5&?.Z*Ϋ \eOz\u<[v=#j2:Y3pkz;GvՐw53t+BanIykJGʨTq4X6 f'fJ"z>8 v TT=\kR1U7t #_?r'դš*DIP,bR4J-}]'BGRyd $PwUQeP4tSiUS3j?n$}W_@1{"ۦtA7 ȴl!0/ȤebrFuAonhj'qK_j1t(phDe|xyPz[׸jL@8\c[K*6./HRMXzEVp[W1X854z P4ۈ] EseB ~ݰ؈呃R TFԮ,vAfªS_Gcn}I5R ,Zk\b%Q^mg_GDx# y u9o_p tCvNi  \QүH]@G|\\'ü"Uz({ Aua]S| pAT_/yݲcыnINW=];=Qz*gr"*U"_w@cPrP@@CO5R$-q @ ي<'MTrqr5h+86gG0/"ZeыNWY%- у!DycOuPAO\E$1@Cq\@ߕD1S$Q 1::#QNIԁ 65)T|M(Sq ey Tͥ6dmd@LYhl)ԁrM"Sdʒ2#Sْ7:STfN)?yzө8jV/pө1: N)xN6'S~N&S ddj#N&ݦM16}16Lmy<ΥE7:R.ͥ(_7@^Ť6bL*y{1!BLDȤf^LDI!zvRb\#ЋKq)?qs)Ul.u ƥ3ťe*J-"IԴTŤXLDI1{21&-ZIImęԁn"Qͪl"Q+rao"u $R Hlz:d\D$[:R58s*u F)7ZȢR)ET@JԁE2u.2u F5zedj#FCԉL!^و)jOH>uT`5\)FRytlS1J nJu Ff})&i) R!ᅮdZȷþ\VvΏrEM@IׁpgVQ!߰ف1٬WD+22ǀ3!5[T͙HT!J6Wu ONM5t+w;r7$]F#r.ܧ)zf7wۀs7tm#,z|plnw31}\'sIr9K8F̍^7'sbdNE"`ӝQ>26wF(I9\MÜѹ2te,kvQӹqw ͪl7(]om،@ɔM(߁8ᷜQ> nw F87;|*nw F\(kڔˏnw F8]ߣA6b/{Zb|'2]ęs燊ow "MOm_pCpڑ;BEg9\"+.YD-30gGn·{5F,fN| `{'B$"N\NGvȸNdh/[:;=/![G.&w V_.&w dr-2oc O6y ǡi4!!CA-B #9ۉ+38gE}*F/7z7v,(ҚQ7*Z$˝渞@R.je!K&#^(cqE4tW18"Fgx1(RQ(CxVQ(>l+G#A:e2|O(cqpiG"A E2RhP$}&E2nG"4r,8]aE2|(Zqpcl_ɋ13#klHgp8_,IĢَ jGy)8528jeLK3YHVU]5鬚52fh^ Hv AOudZ`P 0 k',!'U`@8|6hv*6q;)hDݘmLOEqE~b$ʸCZ,g3G *㇣.F{` yW AEϸų𱲟 MV/.k+ISpQȶz^%;i .  %~yyY362]V20P֠EQtV92FuԏU.AL\^N4gubIeSA EI!x&:qx'+R1HfT* ƦΆPoy#J@,(6녩9K6d<˽gd9<D6";>U L]u ^T7O]Hm!ի0E'hHb&.ot}&(rZje/c>T\ǿyns@Wjj<?.-`Ee l֚TZ7K)*2GSΘVgL_~͜j 6/h]=Ż{UhV kA.|9`86ẋuCPE[^Jp9sYp"ռ͔@{Y2/,3H]:d>bl{roWz{1rIw\ҽ?tǿ]&!Eam0?rZ{imVuAa2.2X0|"6aŀc vƁaQD0FS׸\K].sw¸LVCAQKUPp0XQZ'[M,扷W Ncplgʵ{(ch]G`+RxIlZd7`؈ŀIuX]s1!ʏ`0)e H7T;z0)Xu\ܸ4:+"UwvHA;$ǚM(ݕ$Y cШ_@-١/s/+ cVhy;^EnX(TrfĎ]4^fghe}6xiVilTۈZ Ǘ7TۨT!3b承=P*s'|*cyT)QXD'_DM( L1)&OI̊eŲ"q ^hCδ ۂFsVQkkZ]+Kb5Daao 6ʢ9dXmcExDV{,aG(]I;JW+c(0޺aIp1mnX5-(zw?hM O^eDȴ sCQ(LcУz\16fVgͼ,N[Ynrw%i"!K3xaHͺaZ=,.N\19F #̝qθyZ+S R1YtUjXfAVrʊmX:ƥ\I)K'iUN2eEyEЌrSPBې]rLq[fڻ,HP,`CV{dJx|gcl 6PؙZm@":ڰچZ| lox2Olhb|d*ӞxZ?#~Vz9\4 (`1lӵNZ_xIZ! ·&jǒj1_ſMHuѹVuɁ;ՖxaɁPҙ^w`0w.003e/1j:'<ӂ&H!J—٥a En>D{ʦζ-UKtђD:DaQa r빽.`ރoaUb/V:KJa4 |>˔WMS\ϊQ5M牚XjM̡zt ^ i+qKw#ܯ/ue2e2* ۬2G6^hd9Z7~e WuVR9YM/u>΃W=O379jɈ!v/Jgendstream endobj 225 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 329 >> stream xcd`ab`dddw 641H3a!O/nn߷}O=J19(3=DA#YS\GR17(391O7$#57QOL-Tа())///K-/JQ(,PJ-N-*KMQp+QKMU8OB9)槤1000103012)ٽgf|;?̿ܥ;~3>,}} |g|Waq6;7<_s/wt\7Xp9|zaendstream endobj 226 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1768 >> stream xUTkPYNݡXljԖe{DG8HI  /+  ̌ 3<"oYg*ƭm";n}߭{sذEYD%}'IԒֱX_c ll50O0k5FQ/(Sq*߂C7nڴceǶ$Il`\PJbu HHJb2QS%ICAqFyOIb2y$!HC#͈r@' a"6H(҅n@X~6nSւYp!. T70RAqF*mSN 4{=3YW-q_^;Fo4e+זr޳=.ʐP>g[y?<6OӳplMjn{Ea4 &h ד?}QO}+Iiƙ;~^t3#8';^V;<%f"Zʝ-nMGh'db}ay+ɘ#^p윶T? [:18%Pс 7nB>UqW49Lp2;9X#e}Y23)g*bPN {otM>%ĩB {GOYӘ]q8e^ R4Q(@; r~I҉}Cpf~{g : +7cٍJSm}I+ ~lTG_99=lPR7/o1'\FBl߿eg,&շtW+<> /Filter /FlateDecode /Height 53 /Subtype /Image /Width 39 /Length 1223 >> stream xV0@7ԥ|+5ҖߓA0>G!y{{{yyL&F5Hԓ- J[ >==e[k/x ]ZWpwwwyy^ROOOM%0.\]]M:Vm0K;!@L)G^\\HEx*W; SCY[[LNX%h fʃl%8Ys DP>!؃ A6'S.Xq p񯠢pN]7aR9wJhVPZs}`Lձd]x^*O oREҵ`Pbw'E,=Sc0Ү`BE *O)gjPP/::0UgSAPL!_THWk S;onDlT07`*A}M %3V 9:0ʬc3X=bP=d=T {jih jTo±)[a,F&w\ Uܤ7<wA,7lUgLGK4zuS(K SU›0;o+y^47nd޸=;ʕZMV6/:n?FD< [ ->;U70Jk|bGRNTk:DlSdg0_B{%YhM$T6u[^O<TmeWy'aendstream endobj 228 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceRGB /DecodeParms << /Colors 3 /Columns 53 /Predictor 15 >> /Filter /FlateDecode /Height 39 /Subtype /Image /Width 53 /Length 1350 >> stream xŘV#AE%<۔ #$"&!က/--TAWuUooov.?<<ӾNLLЎ 5 k4vh? {+ 8C{{{K A`vjb-L AZP+,M0;>>M%!S>"eKo,L/F 3|lc5DUD [XX ) L?\ZVT%*|www?䛚N("d|t S?J0)XA_da8/Y~d28 bkCv_+%cnnV,n~ /kd@iQ>S1eusYS?ĺ|Z0\oA.+A<<<kqqZ֍ .QAࣃ7!@r|Vamav{{{u@nB6;&*/e $Ƌ];'qсST/M:~p۬!@D<8I_&5|H+/nGo;WWWGkgc~r (*CM^vFߊGVu`> /Filter /FlateDecode /Height 39 /Subtype /Image /Width 53 /Length 1332 >> stream x͘9R+IEU3,ER܇:Y% ;T)S Rv;U?booo|sssKKK?S'ߣ+d%\m?nggg|+++?d=klxbemEfZ\__nыeBmbbSSS̽2_!>v:==mG,466l ӈwqq!@+NNN%>AFL@DoC>~uUnϏ0G\^^Ex=ܯbMtq'%oDsgaL;[7i]@M}jёB<2V,f;y!y"< |]v>j^!@H3e)6 fdђ=ٛ(,#MdCN$Ѷ-C/hyrD3ry`YA-SctA[_" q4_j"eNJP52>f,gv_y722Lmk1t e#6 2V:q+p.#OP77ޞ /#ycWRjS?qAB9a~øJŵEL^&ɷA{:BƷ-|lLpޔee>Ҿ t꣙&j Ԯd^@r}{?HHr!vehq̛N|o˨4-N~qCO>Vg{4pNSB6`B6'O2LRLR[[`2l?? c㡡~gJV^>,^Uy4=*䣥dʇ9 >kSy&؃ogg̑ژ? v0hA4"ʞOq{.[僦~V2CT? ~ VG5?sag_CиD >,xm(?I>666ϴ|nu10ʙa_٪/C67B_ɔ֪I_?>/Gg-TQV_2F2g>ƴeBe Yc9Ҷ330k.?2G~~endstream endobj 230 0 obj << /Filter /FlateDecode /Length 3722 >> stream xnN#fqr`$ÈH:.%%S3U=ӻ"%*]U?϶~<,\8 FLSJyh]>t&㫃W]XILoDb:vJ"nq #M,SvXBZmR(][}A(>ZPEUJ(]02-݄3&t݅06ORuF`]zL 6L:Jf&EX-UD Lm n4@y8i%a<: y-pSFC̵Du3Imn ox&_6W+\ [w d]!un}!#*iUJ B)pI#;A RI Lb@r%U2$JD9 `3@mePձBIttJⅭYv eGv_G7tO8 #aގzh:dR* hE,5s YQ} 1?5ҍ,O[ {->sb"tt toˈvTH`f90C$bpr iBuadM'}XhvA-竦P}pnD1SpQ1}d@(bsXOa725-bF>UaAbg \I&%ŷen⵬o`{6ZޤGCDőGڠԳ+W))zFbx-m! !*n~z Çx? yA-84 eiν"r/ B2Y=mDF /"z[9t6XQ(eyvaS #b{'dbxfkX DmD.iȦGțOO͑zAnF[?^R}4 w<N`?M"@5qUx>Y[mw><3y`~#ǬIQGMfT21oKj!*ߎX2< S|RxY%BlFE_V,"ųPiݎqhXB((|ekԜdv;&K]\ޘUg 6ly'.ד=$w{r YFXȸ)hte+Q_.x2w\w`m_7@)ΘbX~\Lȃ$&zaU{S|%x) )! Lw¯`fQpS.ޮiDUd^l/Wc/ 7(J),@0żkPi7%mdkG9١W=HM2uZO!C9Rb /l4X3n˔՟ d8:vqǼaVi+*^u H,Q][U3RXO{I; `5[N&B@(D zDxi|Y)ƒ/v*K. IU)XT ^~œ{~ *xF@V<ųo !pی}>a3VIj^sl< 9yTTnQ ŘӒʹjH QcizetǶ(C_Kӛ)&64ʺ͋7m%c8kMgqD˝/*[yC%p |g S6xTS \M{ƜxلZ}g,5- /2xMIL_w(Ti⤩Cng~͞ih}:Ct3xM'M` x ^TI7TwWlkn. 7V]+6U)G,TqEyVsGز(5G>t?R@dd4bA4dVsd1D(6vcFT+魓m`|'ߝU2x O7;El$wDKYȪ5nlrҫgc& ,XCqQ%PeY/ۙ IgLl,+K"7gf !e慱5E"f8"PFa7o S#3nrgyR?1RzG!%a9ܕ)w4eEY@SuT;\hs_ yFlԩƧ!'OɟB<+aDM}0 |h2\nN-LD=@ѱ&l$TE',,[^8 ,;߃N8ɷu5ts;rM{߮Tg69"ܬ[YHݛRŎ_1gUVWm@u)^Re*(lɻ!nnOk`zZ6lgcś_ѡU>*Ͷ#b[ UܳW?qO\LxI/ u)=Ui3Ż@EX6ڬW:WnCM9*wnmOaSi>n;NFOU wX@֟yA65'&@/E2oez[&=ҧ1gSuSw^Zҧ; F}K$yWY( :7> stream x}k%Gw:## R3Hն{ڍ_ci߳w"v{ʷ!ɌX3)SϾM> O?|?>}IxO}3>vk3?oeOWMRϷ7o1>7ocLV_+^C)=f|#{=h!?y{0#e|Qn&\9/1֚ᖂ|~SݧV86 3'9r8u3sK V\wz};+)gzDXi1~c'o<sJ\W;m<}x>|=Dx>~w>~q>~>1oG;>xhǼTyn !e9~ Aķ6,r2{߭M& wڞ%>{LV䩮Iamd3$LulԵQbBVU%(s\*yn.8*cn ebX1dmlY~:?VL61v+$KJ)c?O*7ė r9`kNAo69dڊ(FW1cWb^߿V-Cp=vgoS=~`ʳU~BwΫWW5Qef觖=$+o%E[ͧ{eʣHY:r/2eSm'Skz6-y9&e߈Lg#r\=<ݱ??|~)k輒BTm.,G5v<[2wL7~kyʰzJXc`DCn zeLs!Z͂;:ɳ-9KU1uJaOX/m5X;oOoxi hr5J|?ˇ[h~~֐ qʒ>KSV˕MdފΛ囷5囌H+7>x>|BXx(`>}>;̷ܰu].m}v|~<c??U ؊ERo5>󱜏|lc?89lC~b_ZXWv_jϓaxq~AAc~BbcMO @"gZSX`ӷPEP׾,"d+yᲄF41}cp#KQn!/--[-wds;θ8[?7oWjQϿR>{nPKRPq/,vg[^.d),Rlp#ߙ3r~wopnlD~o?y|P)U|]U|wzt_*Gyrb pOhtYLn9eQnd_lWo7Q'X~x/7|eʿ8W?{)j kOc\Z~:O2_bɥڼ5zQv<Sƪ7E}v1@6#zUIpmK19/7`4?b3TPx }C|^8ێuw6_mXrBo쎫I:~黳:=)~1էOt=қ}*90>"O雟 -@jrImOg?~8 }$>AN,l;#"+;Zsk1|mlOQD-LQDQ"XnD("^"jyGGݷ w+O^'$t:A Dǽn&ʧdv}*oE,t"o^ojrOu"+W:QD#\\jb9p9WxracVxT&E-xw*,!ym*:ǭ*! p˃5&sؠV Sryo!_Ynʊosw!,c?VMOvuȓr栿<ߣFf("K=e#TʴΪ\&cL]ͅ#HU\~XهZG1n)sa)8IAC֍PH!d"Y,flWD֓"E52_oU'JhY ]; #C 4n^ԛ}IyJoO$Th[э?dJ'yWDFO,A&%4ڙ5-9sE/]d]>+VF5 YwA/="!g5MU9]|Wdήu*)L u=nQ~W@'GLxAa0K;WƼ=K &di`=RE S,;k=r^~ C6sQ~ے!W 6e ݝ?esbu &[ea=([T$А$Cct׼fB4uS.Rt2)"K+b,R\Gus`*+bA؝gSi䑴K* Oe^٘o6UVHWDL.>~EZMֆKT" Xɣѕ[R-vŅZZn!hiGd*YEClyAn{DXE;^H-vIy͡ MӚZHMS 7VC{-)eE>۫'mW[()"xؙuH!Jِ}4|t,'zR2e'"shpU@d ;E@b`o;  T( paUE>8A.!{9A AxSG?l!3C?x!4H g)!P+="!̓/"2sURC8Bf! Qֿx qjlL5Djy - hċpi"(f/#ֈɶw#J \%N@2E|KQWB@V4R ǀJw8F 11567Kn6VpWXuR1FrU؄rBBQ}sFU,ĶnE p&a|Q L1Ur]f Uu6Z%`RQ2A੊}&vjTSwI1_d-Tӌu0Z9R$)AڶZ HjǤ#Ij#IJlJ59t\4aBx4_M:iԢΎd1 GzUC&'TRZ6c"KiZاbE hc Ln϶hLPԀYM$E2ʝ딲1NHɄ䱃YJ50 c% f>$504|ػW[NI jЃ)S%.'TX Ik!|Rҁ:dAܞ jpM$Ġ([+Բ"Lb5 5:'( `HPcTM-jIb+artߜSD1ur6S3s:'&$yVN-mV&T\s{PɸlD1&@SsQg$XH?J.&RdeLpT j:NK"6q28JK{*ƛ xǘ(r Q%CSPGZq(àVܑBu+B3*,Fߜ"J$oeTQ LR'–c3xXYIAߊ1_I(u]<~E>FVގs4MmXx 0k03O%hJ2sw.1*[K1urN. hl.cE(Fls72\JʤS}y%ɠ);$b4U,*[e4utWS%%D (J᚛&r2R%k=2DcD{kDD03n9+4%Cf& +@JDկUGA5.dR2nzG:Wݎcg 2֜ w7vPsP*It7tv+d8|-d)ug\C;?qV};D8|o,5b J󋁤JFT@ˍ/r'U 7Mϻ0Nc4y+M-dNXqPXC\WOX4U#ws4D.˼IY9ee!"Qvyx UbEM !tQ,Oȏ0_n{Е,&Eu%yOjeУIͅ`w]52 {`X$ &ai`T}1DXJ,1`) A 5423%SkOSY v Y23"8a sY;" bH Ҕ+'[b-\'b{ a HPJp%:TY %.$bFTkYRvUչY%pSVJDb~?M@2YJ@md,_ q4d+(JWR2 Ne`}A u:Y`(aZdd4zfRy@-,ViAӄ][7sCdH@GZhm嬳nDER4xisR4-DvXci!tEWДf` Q42;F4TŨgNѝ-#ESqjc1Wf e<Xes(Z :3;,k7 xOh02AO}-S!:IiDHJ%lVB-Sa^¶M_C72{3caRfhF^"&OMwf j/M]|,y,iB?dMi4ò'No6i 7_U~!Ό묘:T6vB2 FS],;:6Q0jۍ',@USZY{([ض7TLeE XJӶ1N{J;͂=Ss,R=ai =<  BvL[Nk+4~} g^'HRIRA $5nOHGZGg72v9(#WƂ(Jslh(MD:viuk,6ꚙk%@m^] Ra9-cjbzffb{No}, *He6E.N}]`$յ'2.,>gf!<FR}XVF|*>ASC=}Kmt}#Dj]EW;F1aJ& NOcBԐiӶgsiYeG\Zt$&1Msi Jual_vE'DcZи!C(dE'F Ȝc˨ ~Cv[؂㤫"sT&+c®21b!mF,NY93&p5"&OK WQܸEvjSh4rFA82uI|tN.4 VWsK`VVScBfY:'UIHoSN6J{<{¯m*>4e#VHѽ‹"_{i$b~wne9<[ѝ"~%zbDּקDQqFrTvifHe#Ye?re#v#z$M#*ZS gbTjvR`mvRI+Jϟ" UUUYRXrY{J< (wй츒4qu\I':̨sW ɠ"e_ J GoYp%(WMmͦ W4\9hZx-q%e0O73s:$!',T6bftd"'2#"!pTgDORO E+"#b-KR[m+,!#ae8X3%5~V^C B֡Ct8sm Еo@2c!%H >tHOHj#!6^l3;:u+,zfޔ9VznAӃfwPBw RV6pfhRMP ._&;HrIPIjs4dl,#=+7Fj[ 0/HEvHEgЦ `&\׌˯rHTFl!5Ni`Rx$#LK8&,D-e!6) LryސU0n HISA"gJLŤAM"4UM %Z; X+m3tjltA4۰C&$کMg1L] ȈrQ,F[w&G­6e5P.6-A Y[#^˓/=\`Ue ؂vAE I=kId=2-(F=9GݳzLKxPɦ+Y+xVC6<+-0E҂%"z0 WGVI VA >.@ [3#+||ZbYa,ZZbƒMhdw52-E~]=1ԟ$^]D[iW9b.y"־PdśbsuDL涂 T:zrw +,Z!5[!0jECahd-(񂴀2sDH (k5 8dZH%# k5D^0 i+w~W Ѝ#dg+.]f&9Yj<>LS;Ą?{6,@3~@}N!P;¿TK=!?yG;df~@74&8!'>!=նG;SB?LR;dHb~@O$(֥G;Ą f&530H;][;=0wd~MSkRCj9lG_o&E e`)u0>H5Ȯ_MR;Ĥ^A(g#?QSpC א֌u(;#n;Ko BRH<_ P3-"{C +:H,IH@ ɴA(/nWHHH]&wL c u2LKo6MB&f1n[Q{01Iw&ACFv PWg;Bɴ/eC ;\Gw+>(`2 =D],0ɯv $Gl-=bw$H-=bzt#_$G^o4H m="0ؤÙ h:̄Lh/A֨DCM'M 9[ 0&“$9K $X , В6%qZmIDz[ٖD^gI‹ChS] $Ƽ!8] C lS"QoS 0%y)mJ$7%cK1礹}H3/BLyq(uO\ 4/;D{`t 4AC7tB>6 _ _ |29Lv46AR%FA`0XA=gPkCEZb#ۢ-`(v؈v-/ 6 3 LHN;58LY)ICt&јXկI'ŜI]&;NwǤRd /BǤ9jygoV8>O96|LռcҼr$:J _ԚqҘD3<S&ytq IS(y҄cȤְ;9vTQB:cDE&DC3%*bu i.)dzeej ]W(H#rF&׍8k\%2(hCӄ ,dYi@ly %2U3NTibFT%QM%Si'0UF󽘊}url,ߝS%WrUG,Ur;G&M*@pUAΫ۩vl n*R%W%ٯ>$S%W5 dB@:+6ٶqF1< ĎCTPx\O ?'~hNоd h9yZ672dr2Ov3Yl7ḯfIK M)3Zdj(A:PTEHQ@Q՝mVU;SPq>)'w FufL@^(J W D!pm <.HQ%rmЎ2CJjehc,3iKi4FPI%1VA?]SG17]k9R<161(AfKs<4Z V .5RT咢ش258wHQ[:zAuYIR2\\S4$NIN:YG1)K{N9ci;iPt@INK ?dfǬTd1:nmulGNGOf I=-/|9u3ZRϋdB Y$iL6.u<3-}r[~Nbm{epj`YjV[S;hjr)UKɼ,j4%:;E>VpCA.xv%P~Ci>f-" 2" K1B ~ %'IyIL# fa܍0whAy -uÆzc32F&x0GC&K3v5w}Q)_ZFA2U@_i(JڐF"2ҰE[fҭW'[^cb CH/BhJXHBʃ8"~"X <5 ^v`~&O 6+@NL%ALY$tDi%.̓ *5f2HfuN7 cy֌k]ӀGXP$Z/S ЉHb@'qZA{@~{ :i\ek;)c#,IN\(@ 0dL 2UK|=]gHF~(S-ٜL bL〈xOcŚ00\79wWDHT@Ka>N0Hl-hjpNY 7ܾgbvE=lyzt¢L5F̤V]E%v$f{Ky7$RvQ>M6mbݫ3TBvN)j!}'(ju6G{,\v9ᚿ+&KYN: i :+4AQlct,1B":u VLH, w84>]WULR%DYySsսMdΩېXfUW0+;`z"u⍏$׫1h`l!aN;^[W ĀmI| \B0nAqQj,@,P3x 0/TlǙ(=ʘ S#2SWu8gى,~rBmZw)ZbKѰU`$pWA8 )-0k@RI#zR#w˶vGfK [nZr v2W>d4jӢDa_nN}fh0IJ& 2w]z;t u+mlf{d {<\Y*2Ow`=4._WxbupDG^bUe)' |yIݴ FSh.du(F![W!R&R@SDhJ'9D1Ds@BNumbHAS.%97.]Ȣ65렄]WAցmv}bfIfs] OdT H9 `щ,Au묍EoHuxIuS_uSm–O}ʋΎ6MASM ]$B~2JOVsg}ש`6ERgA͈.d9﮺,̝`ե b6vu"0D/2ԫtz5' N4̳3bxR *DI1㞪9XWJsW%gͮu=Qkц5Hvcku0N^IXQ}jO91OC$s $d1{4Y}Oa VjzK@i\ ֞DHtsˮ=eS{Z&+\zQCTO1~H [vqWݧ~1sgׯ6Dwjc%qRzZMP&vgSA,8t-zUEw}mӘŵKIJ&H5iL&4I>͇7i'MI&I>kҨJ&b6i&}MypvFMˇviVt?}..} ?Kc_ܺS44IcXt fr/ڈk`X!5򵊆\I+p GwhlE9'C) #W)WY"S40[/@ OYT*:bS兘/Լ)r81u#⽘wbjuI!XR9BRNHB*R|uA(Pv{%*)yΟPR>J!TR8%uSRj !(&($~9uQN7exTCB%`w(@Ei(%? Qt௜rDTG:Q58"!Q ?" GD"o/&T Cv<`J-~J(@B1yH(@B{8PyG@iE1C ^N ճyr4T7,PbQPڟŎrye>⫣I_pLB=yĞVS=yd'-GAH V+v? a)[Gб) P` 5"GEzlP;x Fgj#PLvPKXGB NB +4IV,}Mll+6 VmnG9m˫D׍Cl>!wL͒9 ntQ leۑ[mlCvݑm#A{f>`Yn(Bi=xvUSBew-i6LMGŽ s]TvcZߑvz-ؤa;&a7"L-'9Hwoi`ŽqG9ŽG9ŽIe+WnH=Tz-cma(;ݐ4Tv9ʮwmxd7CFvq 9^dNOY حS& )kmՌWdN@U%k!UW )k!UO T8[􌽟%;=mNONOY  X vzZPD);= ы.Z|SЭ(I[SrMkj@)FSrQV?!6 SIv`씵*kS2 TE AE-AN[bmv5{#kAUi>NY "g&| TE H㔵*k\hYj VaPUe$1P'xMPZSq2KK SՌ4@Ded6jwqlBh4BEUyiG ۷1`T,L2 };yj *rK U b@YAX*х=+f=z%Z*V@"U5"}-^S'ZZ./AU|a:tڹ bCeVH JK$Siy2P1m*@Lj~ ۻ0WN^ꈤhjӡ1" H7g .:wV4_6LJRcP"Yjf-wFbV ̀+Yj~,%QARIJDZz42x42I[jLxOb+&n?茜ΈJ}3䎢uUKxOgSͶ=L db[c oj^Xuش/FQ+V-Rs hqR"c}Y<f֨"|Z b4=i{w;~Hּ>rIKl>aE&ֆH6DB"xD{nƾO( Ro4۸vkab,i,[mGsұ[ ,ȼd+m\>pt7`=0N7rfzG;'71-?6}tt|䗤27sv!_8 M-,S[HVQ$&Д"Ȣ]* MTYd'j{& (-h$kK#"O '1~$T<6Sa7`+`#Aa#A%U"`IQHQmD@Q;y#)E=fExld( @̲yc&2:/j;YwNJ;ԝhjw" ~ى(Z2ћLu-Έ\!))X^ĂDc0ݙc;좕הc6*,4aoҖa% jrdb!A9'|c08;B{=FDxHe+AOQɠ'mE\Г"BΎqm@Q L"*6,jmEԀ џL“QzjzQ2~ٞd^Xb"pJ\\=* La&d& kw]sșWJff;;J7Cxb(IYgfl3TTV:%p@Z %(JՖ5&/ZA FRTOi ˉYC-WEU(*sPKG z,/+PEƥp)}=Ҩ@SV &#eLy_y_Zr.Ht"x%Ulޥ1|RŬq\)UiHJ[vb:',1R23m\v^|DĝŰF2-ߨvzM5(]f;|n>kjts^v2mݜUjZt5Kэxu)k,T@PD`7ۮ26lX&RGi㟁/Ybw)TpvaqFJ=,@FcCTԝHRB#β+W- +8E_E7k[6UID&ib}~ym*ZkAƹA"]}0u&XhC`18*&žԄ xRw}j*$_Y{ "wm`bC%$"$M 6>&Kq g@h rVg|!OTjQAP JCiWq$si)zXUPUR`(-8v;ꦂزTT, ۩^TfAV͚eͥ:[VKcm ~|ԑ573w}a'j{ï5SyGe,SyNնAR^˩)t]:&o%(X-Vf-Ïh O[B|xK}jOk O[BPj6?wKZj Ok m %2C[B = 3ȶB$;zsNI7o A&oNYf_y^_.er#zoD B_SȜo4.ݟh琭HʚRu="߭ϣk5t\4fgy3 g~֞`C_ެ ]/Q\Eg&+ʾ$<\Ŀ?u.smT]+qr3rS7+U! BQB0S򙝭ȼoN-4h:*t-ϓjNffoWCl7o_`]] 딧?~w>~q>~>!|dQy>|_xs?"s|#N&[Rӿry8ó>չٚLˍ_>#' B<7ӭ\9wgAoǮc8Ct|ϐGw 7y׌c?|l<_=Z=yzc}ۣ5VqEó_䈈]q cx#wz;ïcDZ-/&惡<3c=ԭ7I*릂{fN~wg?o~x x0//7upS7 &'cp:p- 7 &ྖ;{!:^#W7nsnnχ'Kɯ\9kfC7܅^t舽׋y!P{xOڑvW__yÏl~Y`V7|u7mݧ?Z[:pdxk{5s{5%vd".Tp3<be>n^OfNF+bso/~}$^fc}|yޟ`Gȯ= > UXHiePmj3x='ޫI\8՝6~lNW&ӟ{y>v8<ƹ#b2=_;z8I݆6X٦_x>Irk{1fڼ}p\oy/iMR-"΅_|?~o+~C/U~usM17_q{}kWO?6c͕<^_!|8)rd?6{lh9pWctͪQapV?gxzȈ ;'zܣgW z7Ew7?bן6<lV^r,R^$o5n X]c<#%endstream endobj 232 0 obj << /Filter /FlateDecode /Length 2761 >> stream xZKso#R9̦c.+W˕Rd\R\%CߧMI僺@LXZ^.W=._^byv LF(\\,ri]^:dx̋Θ}p^:k{kZl`:MVj-񽐝}2)iѸZ[{!\Y-,髩 Ҩ 75}5e5U54%S5կc*3css ԾdB`G]5MZl}X-5#@ύ8S*֙eGɑs!E';Jo^PtbG1sٔ1^sjėGq #UӸx $#T+;;/߰%{I ?]Ls~ Cß@~>Y8+J u94Lw^1of^Zk~iV˜|̜}]g)L WlF;Fma'a { QBxwt0.wiTG eBL__3CK(K7!4V đ8Hpg+T5dw ܸePAC) PL/ +0dHvՔPpFn9v.C k!\ 1ZBI/RnKBߥ:yaM>w2^ùk+TwC FUhAǮ&554MI* MnmZ6W/,i¤4Lr^u " 6mM !MH181} u; q)pq^pT#Ϙ"X8glg6ڠ8jZQӰqC^+,^ՋT:;%Uo35,1˓'zRCjU?wu,.ܰ.ռ}_j^.~^f8Khg&0OFF_8iw#j̰֬rټ%>߯bGyqhJVXJGkUdéD'&:Qh D N{ 㠀ˀJnl`1O4иsXXR^HT=]uxKTwMnpc` i9`fy!R3]_MKIamCBA3DlMj.&RΚĔ)*Tt-L$I* nK% KC??-Tmt4jʽBOsXp"氃@0&fG,Dw&R|IG؍؜iΊXm$U#Bp%×1q҉_nضIn2$V,<&TԐPD;lJM[ZHtaE/oq5J75 IcʜǦb =kRye8۴2&J%{3dN[ ,*|wXѴ0hTA^pF+*?Vx_{u5<^^ 4pSi>PC]3D2rtk ẢjKebxdm$9#/Byvۘ PYj5dh^2vں 1|I$Q_$ m,G07(ӐK:`; Ұn6~V)QEB&=dd~kZ̴B8E8QnWOdҖZ3.}%U}Z_~EqmoCMiY {V& xc x|?f޳YO*M7͜ ݻ'0] +  ooْds5K߄.igFd3c/q%S͡&- _bssC ;quҭN_>tendstream endobj 233 0 obj << /Filter /FlateDecode /Length 7330 >> stream x]kuΏ2hUu`@lH)\ܥdszv27m7{q>;7Wo~{t׫ds)S ;"䝸 z>)iWbTNND}n7Yrc/o_v|Xzr 𿿠z 949{{SK˝NNW﬿+7%Rʳ-; &4 _t{ l/jχ6w))|q{MR=ƚCkoXrq{-N)C/c1O-.Q둡+ dy BSH҄n{5)5ԣ XS7l/ng{q*b0\XK sk[(b`hBuS,e]\\" x7?`^LӚqa0}hn< fXPlyjK bUqrh/23>㛓BLۖi4 Nu/AqJ] '"S+/Gb9`y#A 큼v986h` < "큕 죉8ȾB^ lozThGHj#SV4L[ij̀kc9R@ 0jj@=x0كQFc 1}Dh9DvP,n tdT{S`tK_XFb$Kjo5 qhnab8ל ht*.ŪDiUcZ* 7,w@c2teNpy+0?I& fZc-h/%uYSi ҌX-+hyZ*\ %.eFLȂfB#*VB/=!.j$}``lAK,"7d``qբ\E[,t%Ԫ -6%B/ɭhA{eEȳēu! ,/xf‚)([n5vA@M$ t#Z_RXfN,j4x5a蚅u+X^/ Ԥ+0b=pz+biC CVpe2H fQ,Ԓ.o4AKuXl>\7GU Kr{xz/ XMw+P֨iyjs{2+`?j3XLL"ph sR,d*K ;'JԿp๹Y`%|1:on ,ޥ3-)G%٥`W ǐxb]a9XGHQ(Osu  Z4bw6B1`nآ8VSKL2XN-"WNkTR 8c9 X0 è2adu5h-'7p7]+cf j7łag`QeV2ZհqEE(8MFɢ k42R` 9@m=Ik+`V.dEfd''{2`,i&rrb ,1.J]+[uZXWZYi{=Ykcz~ǢZVq^ŤH(Ox ! 71\-@L.6Q@͵%{̣H8h%Y4,AVrӲ.m1%AqH+V ؈pbpHߐjlQJ;c'^wYv*Gb]kW?&*`Iwfy,:DOЇqw5gD#Cb ҮX]hǠ8+ K؜xܔh{q_ wK鰘Y:ݎv0$ ŀ Yak,̸ӱlhAc,lm.AanԨ`:L5^Ϭ48ZBxJ_Ik7XʪQj@NBH-Kg"hEx|HNxU )AAA3@IQf;C|h7?r;>هVwۨjTHpNT=e8n{ pOT=*"0"(JZBxqIw-G@Z32"+#Zbwp`MA hkF'1hcnm@y,P %NÜE֧0Kņq)A#̀ɩro4_L@Xz9D 2%;@"Qg[3߂=9xe:rA d=:At􏾝ŹLDm/ZgZ!XEꝢi䡚˔ 0 bk?Ԭ3pV9`#O>.oh@LS6^‘rDSٿ\a<@x >Csj̳AѡNQy7:6 /)i\Zf8'],/,!Mlryӗ)cS ibIx8-|,W<w)>]jԸ~@/[|/NnxŬJ^m>MEW6qA:6,AO 5-ek|Bn]^:-΍!4^s(ճVCgOp>ToџC]ޢ~솁g FM,fRe#PR::,7؄ʨ:D@"]B&JșG ba)ҽs1ԶSL>z`t\#s JI00G ?ca{ѓY.6z,Dk~ȞzL<#?lfp\álWX#q6[\ g#1R&Y(\,-oX4grXd*'WɼP}I zlFdۢ 8dƦ%Vl^8.o>n7m2iD3 Ѕfgs RbPڛ {Ãz- B+fYΞ /Qe8fJå8i9*4J[iB#yJ8i8/6fϚ39 EY4vs2-EzBzEslZcA-K['[@U3 &_b[YۂM5t Q7 ܃0f;V*a5Ӈ飡*qo"R߯qcf LdQ\?]߀1۩jN:L"n i-೙d?w\jq $Zh5S.無-޻Rm#>@ wfУ7J7G|;kڮ Hay{[{}?ᤵR`#'7OYPv؀LJ܋kxql&אf = y d8K QYW<@bƲ~,0?˛o,qdn̯{'̆o[b8VXtY㡕7uhKvn;rhp-}V熩G2>/N ??f˭+Ͱ߮^I<ɕW%<[Ag 呔5̼hSkPz4]91RB2fq~9c KI9ڥf`~NIm{ g@j]oL'{y/[P׻o}E>SlYf?R[Mܼΐoi,(0 j/ll›a}hY 2%LWy;lx8 ,JkGd+>L}xy 'As2a)dܯlyެjv(EoRreg0Ӽ΢)ugp4{df7WKba_qj/Պσt?pw#Om۴cZ.&"*%$tۗ`LGKZ _p/_f[[[QmFt>ې>wjCRA?:]}7^֯cPŪp.u] {ƅ ߰˹n/뗪V~8A U>18F4]'Afyh-/IC7賯5@Pب'*tB7.cc~ut\2Nٯ4b"u4=^ dSGՖ!}Ƽ+zқr1GAiLz^Y/??z\.7?3Ťdˁ/zE1A}qlAo{mljaz?vޝf뽎Y;.€{{) }ֵ~|\/?s^^\/oˡˡ@`(vK O51T&N6n,Y&wo?o_<*F9KriNcDؼf~(ݍu/C}H=endstream endobj 234 0 obj << /Filter /FlateDecode /Length 3630 >> stream x[;o\v&$Uny7SF &` v Z($W"ׯ&?! 痤H&33w/@h73g|5K8?Gn8Z\kYP1/b[f.OON+"C|xx Y5tPa8|H;Arlpu9)xwŸZ3 Z8ͬ0 euq23`,O oamü4"QuNZԟ'a;>.+ə⡄v, J~-.>H8x݃wo?~'%&Ͼ:SjPoxϿ~?{W=?aΣÅ1YY"8[L4>s%mf x曙rES vL k,PLtW$2,@pB8% Tʒm% 4ҏeR`V|Bzm,$l~r (8|\1i(,_Bj~)2LC]^JfqH%:d֐xD:k[r.Y56sH! FEҡDSEAIa<[)K:T.Gʒ4> ca"9S_q|e:V"up`{,R;`&R4l|VgmKY&PhKu ƀe.3(;VIҡ)*K:EeIZkG1ij.G//9Y0{BfJ|9"N 0a!~ѰQxY-K7M*PB,2<ԏI( HAL {HүB{RN?h)=(9|a~P@9.ιsxm99?) yֶNjblM\A _$A EeI߃x E,P\tKe E4H=>n<"^"7=Ƀ=irW{ hژّ4@ۀ6n"~TrڂA}(Itgo'7D1m$.FZcIqP*" P   iN\ ͌"wqUV{/{wq{?=̄EFέў%&"fJMnt( w%EeI7h $J(CQYҡE[IysHoS–WPシ4o7irҸw)Mssa%TjH]&@  hno-Wu.A_/rziʎGK#G md ! uf9@lRNǚsVX1^Uw1BL8H>AVڲWsR"{+* ZXi,j\/m }8vHFlvaY%P9ٽй4s"NnS((UDc1PE3Q[rN{--xwЯN)e>ڮttl`ܟ/WC,CXJkEUSORAoՅio4ޒX.2u\H9gr9|6Cb2^qq8j"RFsrt|7BSWÃB]PJ~Y X`{YHpkwq *j =V^-WN*p5N@3g1#uJDžP/wB0"/c\hA-b.n%B1Rk={>s%n;)Ot|>k/ i#qf.c$Y 9E*0~aXhUZ#饗y>K´R^D-Rp-VnM[ ,,&Hbs*7 kS/o:r5-\={jpl;FyBMu} <:V8*R}Iu,oHxQEPU8]+w nK'.Ytt^jԗ@Z|5J$.Lhgg`I\rȥ=ӿOE>;r׽=S S3\Z贋+NC/imL2` HNŻƘ6b FA^kuN "Wh"6GG#َh%#^$;Fc}HC ymRØRsEk7 55y`gQʝyw]*$u2^ [&|0#@C.yZ3;HպE&Yk r}ea)_aZSHzIDژ'zK6yV ɉCw zZuKcI|.^kM}+u)>VK,l~c}ca&Ƙpֶ},ѻO. FNFQ2оNlv6>/?I,!Z h:FcXjexf<]Fi)8\bWp;Urk2䩮%N3R,P,Soc܎b 29T*te5 1}K@Tc5[j86aqO7+hcDHϲj6j*R i>oM>k`y|܆'mxކ%k+7I:}hsfbv6|V6m>“?endstream endobj 235 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceRGB /DecodeParms << /Colors 3 /Columns 84 /Predictor 15 >> /Filter /FlateDecode /Height 84 /Subtype /Image /Width 84 /Length 3361 >> stream x\ y'((D"hTJ4*DP)h$QH("H4*?w;ws{ߕSwߝ{s>s_~]__h۵,//徭N=$۞d4 êqLmij[nݲe֩q'YZۦ멵Im59akg۶mK-:h5OHd?$(1DtLc?E0CQ&L8ӱB;ܶYhig1x18]ӦPbG=/N|"Ca/'O_|am E𫝁vO ┰hy|<h9scΙd6Ȟ#$9ہaOB&1C&O%#|;J=38aqʘ]'! r?yg'é1C gtBn'Ʌ] $!>|`]5?<} C2rwޅ ȞlX$<% vW޾}Um$QᔙBNMrɧJ?@o޼)0/;L;C9ͺ既ygpA E'ׯ9Qx xysv(Jt3ՔĿ@a;tA$ X긶s 7z * ߿?-ask^L;T7"19&rwMX4be?Jn{޿se\qw|-G$-`z3>L{#v-p/s)7˴R#_{S͛7A ZwƂw0L\[E1]~6k8us=G]HV <9|k׮]*`|-n4K'Y?vn漆V5< xGիiܩ-xIgWZ8LSw9g p\-$ 'W85{; .]4Lvl||Ӟ?UGJ9J/\0^(Q >w>08v~sU0XPAC9X^˜'.px9Uny&gV) qOxS?\ Z\Iɟ6~49- ճ%9z/jMr|'OÀ߽ .oWk`׹b!kC/H+W8qB3cQe0Y3?i? ۳*+9 S@ye&=z͓ U0r%y4|/{3?gH;tÇ{)K:rMg pAe ǟfV|$?6[DUWkρ |ݭU[mM ,<#/@ڱ՛,e ݻWԒ/. n,9l"0WY 8 ~:ݻwvLżh~$Wx\';y9;w,/.6o ^gڛL'#1 &Gر9aTg=U4&3|m߾r.Z,΁,5C1s\5V}/f92*.2r௭-D gx\3c|<=V=|d[IsI_AΡ^YUSTs s9]v9r^p(payޢI^]W=a;g\_E+d5ϴ\sBGL֭h'D!~)iBs({ Wҟ\⇅%3_G)w,y-;9伕;lmes.x}A:F5)_}dU3h\g9^sBS#Gƶ'C y^o,G;Ns\;ve:<'j)ZsI>sɺ|gڅŬ.{r {Ҽ[y_:uI]'~g>0 ?v0#|g^a4/^t]}-3S}Aacezw䛂yrF=}Utav]|:̏9/L1l \Acޜ;sF4bީ^<Օ76}˹fQq=ۅjp{&{o߾n;?{[y>-E6W۔+Ed=\ʳtnCUd{Eӷ\s]cÇ=)kP-CB~Y]"̻GdaϘ)E >W s^nmf ˙3/^X?QϏʳԫc3bHo h26T~h57f;ڄB͐6dpΓy^ 7 0_^[;-es7;ss.hݢg؛SF0im&' o߾qKˠv/|`=ss~&߿KK+'َ& E{jӟcp[F㟃Z%)Ԗ BgjE-Άyރ3Ä7ϟ?=yk?+g{01/ơ|M_y!Wb~ 0s [y1׿$yO%Yeø= "k*sFayo]Z9\C$D BC-gl7ߎU.'|N嶢>>!vyendstream endobj 236 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceRGB /DecodeParms << /Colors 3 /Columns 84 /Predictor 15 >> /Filter /FlateDecode /Height 84 /Subtype /Image /Width 84 /Length 3742 >> stream x=ݽKkkV,DH1U* #*v$UHcB,4B`Bݏ잝3?0w|[;<<<:::wN:^p?0Zk.띬Vry ''' HaTN\?ƚZJb`Nblv_r%a]E*TA`WqWݲ_dM $ ́%3e>-d-73B;^1/m/01 =7x#LVDs}J[|3 y_*Bh57|S8QR` yb7x#Ykhcy뭷ȳb]HJJu?24b[؋'DG_~mR'` $N,6,B xFG7\^ d;CAAՑ0k)8OuNY >e> F~yP s>oF{,ٴQE`n汐uEHeUիl8#;ɽ 6^#4M1~G3*qʽIR '/ȋ|"q.'ׯ_9@2܌NEi'IƘ'OTm=t3B<4ܳ=^NaJ67O>EKy"4xz{ S* /' C;s~pϬP~F5mM䢽?ZJd^-ɭ!*Sώ5 V4W,X$69U@oWLwkj |嗹DW3OTNanSru#zܗGI[W_}͘gdD8gݫې3ETPkuvuHrۭ#f u D;ʊ@';UЃa^!ϴ5\ ;9o0)f5@b,g+nơE?tX1[/ *lG-C 'ڕĶj WޔR_4go<\ΒNy.b+ga: oݺ% eF-%xҒ[[fNDNYܼyŸ|{xi>OaLoqM֭\_Mth >ZXZ7F {FAA|TFSܸqc UkgakڐD~КSe?Spyd~:Gb1p {$xyY.%sN]qܶklrסORg3\-o7lHbI,2bC3ize >'^Otp2뾬)Cr||Ϡ=K3&~ֹ?k0a_V2&xa>މU{LL*KSL,.3OW7^`t;j~_84x:<2Ϝ˺} |,f߷"d:/h'a>ތK]YG}DÖ+bmg~&4a~sҾƘgyLu^nQOM,)ݔ~quj!N˄0rř꛽^ey\۷oi1u![/^b9(u^(Y v0.{Zg?jIV9EgSOh4Ø].GVBYܽ{WUa@^3L~ƿlyeoWb e ,twRU+S 72cTk"b-?uC%&KȳQOvj6'),R~G6^a0agԬ_>vumb]ѭDvW~7Y\&ȟO{Ɯ ݊jGIy6@&D1O}ci/ M-ѣG bA|AU*sN#&j'P7-XNPY`cvcgs5QK[f%c x!lꂝ/6+f 6H|sXfN)\i H09{ϓ'OYN0kv~5Թ͘L {:\9jo[BsW] Md[ʊuV?~8)MRK iټg|\d-|4umZi"к z(Λߥʷ  YVh&<2"9[^5S c!٠lcM6z{ճٯQYX-`}b ޼>-hZ93΅͏m sl+YSa>3-pfKm}TAU_J{Y8G34ŷE ɖiZN5*M97[zӧObxHҚz_꼬u-1΍5[ r|.FF0WVmsB2cvnVy"Y/?gW9Cxnm<|P+86|4ur]ʌ|JOs3Yޥ[~װr3{|nL+vN3 g&~>>3e71531ߡ d~s~"6n&& Kr3 ?= } 2uc!dF ;"h1Ug"7|c#Y003F8w{rkдym2!6o'֭߫[uW8"inn8n(orf^w<'8711/{ΰ__d`3~dn&xc |ɚ%.1 NA 8O7oZ0)/:gE&0k=$!/꧿#ca Ȑ7Ti<gڵk:/l!v"ϻOo_F`b>GYĊl,=̰W;dK\^2SmMA2vB%r??YGMq~= ~4~'ώ9Y^ʕ+Co `? ɗGE$ĩ8\:_xfJM9{ŋ7L%<{L޼hƌBHz7ߪwo D&Gxu m[A>#i3MTDI}-endstream endobj 237 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceRGB /DecodeParms << /Colors 3 /Columns 84 /Predictor 15 >> /Filter /FlateDecode /Height 84 /Subtype /Image /Width 84 /Length 3643 >> stream x\=]ޫ+ "hh(hbddfbbb` Q~swʚ>'ٙS]}X?~؈l6W{oE7eys'8ƭF2Z$~Gy4sԍ~ ks~xG>aLj1Ř_vr+[ׯL{{c fy{}F qd+@ϓ `>wy*E|'fcf.|/.FG1/#Z 0ķcBGhgEFCrG]lu?2l a1!Dr;@u #Bܞe->|`org;A&y*@␃| 6A cʼ\o3s. 5 Ϯt: ¾I ~972@ pI# pႂE7o߾<<'>[ 1 [`xe!ŀۙv:`3>i_߾}{n/)5acRGvEp;KHqln/ [hd<:J7Q-` 6T€\vmn!Ƽs>Ch7{簏:N)\ti﷉B$/EdxbFDn;vK3+k.ˊȅv5GGŸSwW. OO3v5_rހ ׯ/V1V9yŜy;a˭[*Kj֐ picł$RzԹRוOܹs$ 86^ e^>k^ܖ9}{,we"OPԯoWu'b3˹-|R{Yr?K8y0#rKhfyN-bf!%=ȾHV<#?Ӕ1/ky+oMK@y?{ԓG>PnDc}`E^4?/^|^.㇛Ǐ$ ,fms,iΟ/οI۝ڋI ͡>6OX#P+8+b'Gf=/-0^Ei#˞›ybMȚiۜm'q_ǜ2.{ ?=jA˩쵩mǶacH= 9Y}<3Ϛdw;%3๼Z>icٛwp#}r&_~w}Jta^y5vᐄ3nb16U%A>'KXsۏ=cU,-j6CŹ sSqEzNj[)o9{T˹Ӿ59_is*˪o?Zh 'y|s|>|)3_@XGK}/>wcMvȑ#Xs!)ЎGᄌu.ޖ׶\L +ѣ9a?IX?wo}^g<)x?: D9vX_Ҟ0T@)%QquNڴ;i66cZգM5V*cL./'N}o@~n? /oUc^ ;++;ydܗDEacvlVˑ e7O8@R˩S?+P/I^~ȅ [Ϝ9[M9Q4pf$Ϟ=;u$ko۳CoSKz]Xvp,ܹsxs!~Þ?!™ϧ+N^ܑ.s d+ϟ׿Oe"BXS$> /Filter /FlateDecode /Height 84 /Subtype /Image /Width 84 /Length 3473 >> stream x\IoK眸pĉ ']@Xل؎Ǟzr:U3V{hn40oM䝦|żKzlĖw25gS}r71߿ |p7ڹ!Odv؊U?<]pAh춌y#proA>ϖaS-eȡȽ yx!gNm᪹;;Xּ/y_g>߫ Z#k.]2VL<l/!KK̯F˗ $Mvv70߽67gB\lvcΘ7Yy{c]qPrGwիW '@-[#法=8Bja(vqrhuH`x웡l9ro.?{.x~ Tǚ7X`پ^Gv}肒_D#v[v`~֛;z*L͛7&ol6̩^{lz\۷o1h 0#g)x|=W{̒#C/I߿g xC&]`f>ka*%%""wvvd;QӧG^vh9GowЛol>O}%i`>|A  ; 꼀gpWknbK ",y-`?}bk]MJ=?s^nEJ^|Lm+7 AkHx0&-P8H߾}!#Y94%Csh/I})u/?~4}/8ɛ82pە+s߁gzf(L;H7 MA^;ɹ g!xyGԎa7]^3KFjPg!~}V GAd"GsVs߇`!{>RuGKȧ4Wy⎊"B ,.ȹ C#l_GK^ SM߿gH%jv:v2QwR]"||*hȹCU 30@>ZhSI?}NNhǀ$wiB/owЕ ,0ӧOa˹]`sGyjJSt  4xx ~ww͜]Zm@^JhyG&!&:۸!ʸ x] G - ~VX3;a$."Rm#*ǫJ|𶶶ÀKKHmYl.HX/ׯQPx _*& gfd?=\ &1?'ۢOΕV&Zf4l<6 g({ɓ'Ͱȯvdxh 9FI3O>h|{VT*<̇3J͒y‡=>f=o]j'#laòj6DW M rЎ^缪sJ̗4/?Nu떏y2':**!~/˪RDP766%dN ? :7eg4sxfuEaStT]#cE 8zUۚ4KoEcP,֭ϯD73oD!z%KvKg{0A'iߏs&'!p9竄ݖ9#qCp Gr8Z)Eh2μk^<3/#7)38"GK.! (~]%n2~Ff旂~6<H۬>g4.6Μ9-QmcI9<9a~F-=nT1$p\r!togϞ=~8y^iyãCC"ӎml"J.?3s;/uL`/g ~3g 9̘3έċ3YFbIضc>5Qc8 xĐ/endstream endobj 239 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceRGB /DecodeParms << /Colors 3 /Columns 84 /Predictor 15 >> /Filter /FlateDecode /Height 84 /Subtype /Image /Width 84 /Length 3344 >> stream x\G3e " BaBXFB6)cbĤdDD$Hgoꪫ^fٙzU^j8ǽq0wwwׯ8ڗ/_主+6ul6K)qX,bcs,GyA7gG\+-/ǣwX#M32r,,_ 6 |>֠U_,"6L)|FW6%Mk+dd\g5glLk6g8#aq8Nxipz<~qgy|N8xJd1 vp. x:uL>[/Hd<<4i|I?#ϴC2Nwl| Ѿy:1iXiyIX= G D#c;<"ʙ9|^ܒ Vzᨄ{W%D!x!?cSڍc{g ūr]fY]yC"C"5ˈע }I\JݲRckecA_a;C|y_mZ%5{ch^!%ud9aaJa̗ Jg^"|e~YXQ8{al6E8KCsɜs׼#߷wyXEuiݞ=jkq[Z|* ~񬀏U[nEN~O~r:ċ'T>KOl|`7oxK8"\y|ƿ@PoLגr /Q ўtg9;l S]W^~@_EG>Y pAa™yS2;yfΆ3lwp/_.UtwX\C_pSs:~I}'>]oG^%]u@.uXM??x;tA |.הTك$ȷj}ϟ;xoMAy|Yҟ)e+;:|.c>~XC%mrK'? :2fw{幃[(kzofcpj̷wvVҭ[z0Aδfd~Wt )|<$ľ~uȺ4WoHW^K@R@Ø>͖:/6 bHCd5~9m3&{h &?tÖ_~z1@ ϫrPٮh:Ki i#2e?wQ(_Z#ϭ}Ű2h/bS3{..\M[Z 1xg~(x1va_lXjDmxau.m|gco!7% Q?=/ys78͛7Lþ]qUȼT{a/^ 5ж.,vk3_v݊2 / ̎Gto bٳgRyy?3:w$"Z{&3 ^hg0XhXjHɩv_~^0a/ǭ-yǥΕ/Bv~Lx!rʽPnҮɉm k9O01xGR䱘hH̏a4B$5k9O>,uНݡ0EX㗈[B5C=f5h} Oq(YxB r>NLN(^b  gZQXpMoBq^6޽{ 䂟C>:~N~X'jgvK?+Q6Iwa!xwveCV9gCN_͛!Զ f∙9zh *U<ۀ]0+7tVAx?TUGuyn\|8$XOJc_򝎚Sj8 x~5UIfwu)Ο??ށgQ 馺 >DՕ/k̵vĹs:ˢ!BʷE|5 gKbnpTSٳgC3Dm8+S].ѵJt̙pJ$4͜c%brAChdd|6lIl%_sW%i"5Hvkۦμ"w򻲖!6>wlyA2vcƷ¼Dؕ)q]DB|j*{ԩSXĹO[@Y o3_~-}wa3B(H{C?> /Filter /FlateDecode /Height 84 /Subtype /Image /Width 84 /Length 3360 >> stream x[t""` (_ox=fZio9ׯJ)]FfϟXFÇ6qCnű6?C1;wƟtoi埍 ϟ]q086;a@!{hӡqǤK+ /<# %6A/c瀇 B$Y+//7$y( :!;~>3’ݯ R}KgKz`Nݲ # x'rXyYJl6V~X|_|-_5:T YY}˗T*K5; 2A gT;#,>Wi|0|+c,47K2w;SϋaQwfM4j>ה|r#kjYse_s :[rP:>K.m|7yv$测΂DryE?J׍˯hކ9t}nϻFO?z4xr*ϨtfFŒvQwj{K*Y&:'[[m]&QmSmS)=+f&<5A)"wn> 7NTTqW&oGdNm 76s d_ƣVN1W.3`ݧbEeI &?ʇFE0əة;)]w\6;KÄ,n-[< 5;.1ݑ\3K5YZP}e\/=H%Fqt:JxΥᵁ5zSf]|ZT>ܸGX2 TNG_a6[g˄i,R)>|(y^!DBNX@, Ʌ Ci$Skqx46߯sFpw۳tC^n˳tٟn Lox$wügڭF?^j۷ʽtL#jkca`&o<#0ƍ x'dw~yCـ?`aq3իWkC%g;ؼKp>r ]< kV/z.,q.]9Uq5zu;b 2Hzw!O9jL-G?^sçf~^O^|Foۋ2HÛ/Ksu{͆lOmfGn>soE T/XΞ=~p M =U~mVR@V6Ϝ9̋aT\8Ǟ/tJ.𬹼$^/,ӵ sx9{lr12hrq [3^yWʩS:k<1RdnÚ`x.j3ԂeɓL9ɹOx:_*? yg'N&Ǻ6v4?WSrq$yzIYg1}ǎ geTAg/Nk׮4>5<om}wׇWI9z(ʹꔮy3R᥼ |}rH`޽"; VOGuni^y#Z9tT{3ZOu Wʹ||hQy> /Filter /FlateDecode /Height 84 /Subtype /Image /Width 84 /Length 3247 >> stream xKū{-  >wuϝ{[5wuνRZuòaeXUkV]b1,ڰόf-V`{vk?ahG5?߿هmk{V_5;ǏWj!<T{O9 ~hj{j!bWG߿9N*?,ЇWÊQ|#q mS޾hP훱pf䮼 ?Y4+S4xc3yV*#1Sg\9m#?v2{Oaį>3s/_B qI|]Mm|M{asZܚu~޵`asS6=G=O㬏ӧOj\Pw~+mxl=ٞL?R|so<|sr vįi1a;C> MC);)捜 >[}M~.hyw{M]ԼSy%Oh˰=yLvYj~)[mRR Tv*̓T yS=6gV˨1z)OcN5<ؖC5"/~ `b[W|bC]@qw5.RWhV{LK+9~q}10ٯst:nj:w0GS=/).5į%ۇSS[o(GvET|`2}u}ޣh$Wve=W\@;vv8Òॱm%M) 8 <.t[ʵ>:ѣrJSW^=_R{8 | J5x_=y7ooʆ;_5AHH޻lӧzO_H03eWR`#IC= ~8 M!'9oS'V~j%F͓ϟsaKܮahHKj4Ύe_D;$1nnw;c#0/_g3=Ti6 /%gOr}Duxld4L^zSᲉ*p)%xRI=붝gQ Z ~NGۗTTH6#:BBK'ځȝ_O]JS><[wiV HB|I${13yU"ܖyVxDhBC )7^peY*LFXC7ӌ UPU$9\?0CyC/5û8K˜|ۗz hZ)gbB􆓓8K;BkOy~Iz$;.Y.¨KcP %}>!fk4VE ;6ʎ-T^3:mҦ5N]^7|+jKrǫ1d{R'0H˱ݹsGe~"7cmvy_%P0۷oC*J~<\!l<%< ߺu[t<hWr0LK7oެx]Gٓ,k>3|/ylyWnܸQ1<2Ga+uAxu6?{&讋WTm63zvi>Vڵkz }B F KOQUj<1I^3^\QORSf6uʕ+=Qz;Tz Sr_\|ݮ'V&a ZIJV1XTʥKt5[8SQ턽>ڋ/hJ7IS }TfƼpLQmDL*|Kjo::u*uhC&BrWN$'O>]1pu p{xቼ"FǏBLI.M iy9|&aOlNcǎcPy]oGJNOEO%m5{G1=9r|eS:10{@|m$>|5N`q7'cṢcPȏ>E +kCAJ6ImO͟o}|_u$zN9㗩 `X1gHs K![Vwg_UsUmlcc@S>&މ>+\)$1ź&6֠ԅUIxIW~/[Qyo+1OzsQ=='+/@Tn7>C( 9%WJ4(8;ֆ}qE yd{7{8? MW(tx''Iһ^5ciBEb# "w.ᔧrb~<44Q7X+׹^;{grhDJBœ `7H|Ч&@ dT9{Zih/DŽg{h+ =1`꡶4;… )4?I@7_b1C~QՄGaq0endstream endobj 242 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceRGB /DecodeParms << /Colors 3 /Columns 84 /Predictor 15 >> /Filter /FlateDecode /Height 84 /Subtype /Image /Width 84 /Length 3240 >> stream xIUM/A  $H,X `bf3pm_|8y"2XXYDDӮN߿OLOZ۶vٱ`q'7 ~p!擭۷gN?%!h۷oCH;5S c` h]C#gGxFZ &7/r_'&g+oG6(o'<ʛHJ.H_~ļ/1 3/x~/gAyB(9fPՀ C{?&tA勑sjFłK3 rns8p#W(ϟy\g|mۓ)QgNgSv]|B2B4Fz-R2s>;\zq 1*9޼yDsRJuM-Hׯ_YYZ|boy ۬^x R/MPfϟ j/.aO5'UC ~Źtޟ6-H[[[ /3-J{Wy%71vȬqNXzI+_ ,ϸ |Sv ˤ?~JϻT\^ Bx>#_4ɘqE)EEeyZU-s;<V_} $Ƿgnc! ի`x2vGw,h( soˢ]Е x)Ъ yaT<]xz>_r%C-&6, nO# ǡ^zDK/<s(I%x3_ގFo0*$O >|WsI>1> #84ZgLy$7ۇ.X%+oY5ϾcE|_:w7ؤG){,8"056yE~]gr ׀:EvWcXY~\" ! Up-O 7nats' $-΅:^G|clgHÞ9'NN8ϗg 0U><_i!W]5E[nl Q, Gp3U{ gv.u\2\G-7qzKhF^HNI(jG*-{Or`ϟ>}*cLDR*l =?J?*N9LPKy#V%&2P߻`ͥgi2f{F*?{2CK݌.Eä7I %K=z6ySo/}P?>_fE|IP|Mȥ=Yzԡ$ W<׿ɫiXPSڿJ^Uhd5QYЄҸMaYhfk;? LWgWf($&K+9 <ʯ G{^'OT7+JKV\+VR#Rt,NWh_ȥ%j)~[GgϞ5y d؅"|xa[,EJZ5[d<@^ҘSxEp"[?Bޗ뜅\эG{)usQE Wu^yn*z]uro9!C/p x9nՍVDGE^)K__I aGl)޲$.-fm G_Wo߮)rXzK|mf8n׀7{~ԺǤ¢6޴`l&Q_+v<)`xw~wi=y𬹿m.l#__._D˗/d˒FvJ[9-M.]Ol^",i<^p`Tw%wɒŋKKҔ*VOuN @.\RDs.EeqV"]vk6d Žs|o}~ Ν;Ts.9dDv%$KkGg}N[4䡅1/坋9P}n3g=>e|\]Piϼ6gө.'0 Hx<:g'3 GjŬyFO:UՄr ܎Y~ ; y:w'O5Rpd 7$kQoEJV ȧ'N݌ص}xs2?kxiGñKrN/oaǏq ACvjc_LT.Ztر|fl<:$*GaG#,~OBwK?=z49ln}9#4dp*+ff6xȑ#Xq&;kpDsǏm ;ig7 |?υ#>|د܀x!#mIkgY?%s{x&Q;e o0eendstream endobj 243 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceRGB /DecodeParms << /Colors 3 /Columns 84 /Predictor 15 >> /Filter /FlateDecode /Height 84 /Subtype /Image /Width 84 /Length 3486 >> stream x9oM5ؼ;xٱdM,XEDDDDHDFDFDFFD>|g6o}L?uqGu)aA߲bϟo߿+?hdfvزel>/--lyyy޽+++]޲.m| vd[,_]Zw;N ޾of&9Uf[ff4f!ln2' 9>}fCskpЂ+>_d|&D x>6Lz {W(3YY.GɞUD|Ӗo ~ɽ m/_H!QW HL1LRGsļDg{ϡO~vF| Wg}sv>`"1/"qZ̈'V 2߻ELyzqC ds'GVq.168x/! xgO䐝5(}Έsc/_<.`rPHف=yZE,'G;N֕ۇ ߗgQ/7|g{_"+SyϨ8k:rwdK.ۙobO#_2_5rBm(ukLQS4 qˍ KP'wQCqarBr <{ײ>l9UT1D<| &]YxpR@.s2N·rʛi : |Nl Үf}FC [**^r+,x1~湐߿?r`qKET0$EΓvʟ>}: •٧FDFe QP#k,xކ?w' q^s j7x^8r- s{m5-·װ|~̲&鍀lg%`gZ'~b>< Ѣ_;rB>A>_t흫:~q[>yޗi. u)\saʕ|W/8xV/Ca^8Cc}E>OxVg7ol7Jn"4:>ɞI.Ft`VNoBQYy~#;w<9|F!{mC7}_ ^{%Ù?{$!<:O8eV|f3g2ƜL˛ m^ ʋq!LEyRzuð3?=>oڡTy9ۑxa?ncmll⹝yoц]>T'y8Y/`1kC.6Bօs؛CA}$s%K0Kk1~iz W)>yU+K3-I{}ʻ/3t2,~ПC f\~Vy%Ɗ!%;ŋ%Ȣ+ga{ym#ډ~zMU\?C};EJv>1Lʇ qx0faH?YKqg>5Z]*gx<<랍7!/졵2ͽ桹Վj>.bz+i/c|翄9M@ 1)x? 13y_xt˾E ~Ag>`x\VzZʘ'%P|gx~oW'x_*C]EyK|ɏ.C8K~^Brfiy8I\sGǡvH\#=/ʝX<|?MI.8bWƿ7v/Pak.(ܱ]-5s+= )/ƭk]7|E[Iv^P A!={6$n²μ/B(#|Qm،ouuUf ^v_= OXۅ{]a| Kl"wd1 彧tAOk8d; en/#> /Filter /FlateDecode /Height 84 /Subtype /Image /Width 84 /Length 3183 >> stream xoKħ׆YyX2!@!DDBBBDBDDDDBFDBDBDBd.oƽ[:ݻ|WsNﻮkVl~i446fٰyv~}~cv?yygoog1~;l0xkСCixizxf_29@XS4+EthO1b˗/yKϚ56?wU@,Xv<>ʏ?Ǚ/j6Q{g#ӧOK AI68.Ked ~TǏKəg xbyZGEk!$Ç+‹IRLC\ ([߿gR;h~o{  J>T޽{N{[?5O)CS6 ?O -y̡ע&rt0}zPU9\]Â*ύ/9{ٗjgt<Yo뤐>v!Wė_j/sSJ ?;Evg ɓ'}Խ~-ySyVXj/#T~Ǐ{:EWBIsn<4Ks~<=FoN.NGs4ԗlL;Nzagå^Ņ(yƦL}^|WdiH~|{@(iSy<3$\U9P-Pٗ+LC~W&.%v>\+h_ɠ~8{k^B}ܹswҩP*L+ 9]Z},b0~a%Ç,ڏY6yu2sKCW`!7!L3-]۝zu1K5V6?Y=捴8M y*AGQ1쌮ϕyfX*CÇn5inuqEG oMѕY'nܸѕBEp_s طE]D(pŰsׯKhyȿTyn"%SCƷnSȹvZIj!~*|Iylx|">PWʕ`\ <旪=>`%x/a%tRX<Æc!vIv)%$N|Qq 򱺧 ޙq?lr6H)e{hx򕜇~\ k)템766:Zɯ/!HVK<%xo9|Iq~'Od3|VxZ‹杭ֶaspN'NceJ[QvC[]k'vBǏ+^|[> αc03=\Nae<%.$o+WQ~ѣG_|  %.:PkES`R'oJStȑg{S8iɖsI{ ~w{owxPjFZg$[: _[=džuwTK6p[GgYei?,]o1v<{qPӡ)y|bE'B<)ftpl >cB͙os-7T\_+<ReũG,!y8X.qg{EyȄew*ӄ&9N~8ROatQUHyB{Z19n!?wx\~ ]MT|EcxAҞgf/ ;·R.^\w t;+ AOr~kkr޷\Md'| =7zl+\ytV/@4*ӧBJDy]5t4t-8MuTə3g[L/TOx^F(ϾSgP\;wN$ }QeY8ã yJy/Qm#䬹 !w󒻙 }E9O,+F˿uu~^L|735'aLϟgal?{k8GfQxCn_&|br~EUO_&Cm#9.wȆ ?l.ó쮼 2n\|_Ç'.rγ\K˦+O/nz*cuC1ca];/uޏPx%*endstream endobj 245 0 obj << /Filter /FlateDecode /Length 31589 >> stream xM6IR%E.oEB#A Aw,P,߿vȪi=Bߴ#?1._~7~[9V(n߾ۿ~2䍼緿-Տ -W}{?{}~x馜~x>f-?zx++Tg5~K-s}Jjv4S)ϧTʹwlJ%]|\J|u~(oƬpc1[G2G^xnCc}?ӏ3@b~q-_Ѫ#-x(?:$x(v}v(Ip*kVLRMcCIh㪇R|V: w E)W.ImlŦφX). SwڨOwYm|#xOf{A0A>/XSBK-O]3c+d|Ls&+2 &/.]㣭i=E2ۏj=61ZOlLuKM7 _&3ke.xy-)Ь۠Yo'W=SHu*ʲ]2S. 1\b^-k' MEMh gv=:^x3D{EmJLK ZMڸ콆KlL%P2?Zw}kmlu7B.Hlu%l:%9_e%岗7I5W6;t* $6=a#{5xQb:ֻlDykI&o,4Պ=CmCٳ 5ٸkQ1!NoV_KX?w괏6*$6)َmr>hb5A^EV٫m9C`6 Gi`{;.4lڪ9if6GapI3 nͲٓc~Y;.Inp|n Vv %>cL[uN3ζF`r{R;H]mEoXۭ{Mbkyˣ~.ׁJZӞۥK٘l&_V[I&4[aKCc "jɞ5Ϲdԏ:Mn Nױh ؎WjǦ_ޫ/<’Ab^M]e:.&c_vR0[6l{.O&{ e=g_`DI%|2\as+?$.iݦI:va']֠^:E$6hб(1`$3-v>oMVK~ .V]}2Ĭ)tlG ͘ q`̠&6cGfy[!X\b I4&i3DL2*lrgaKEgn؎^dluO=0\4&iVuڜ6 tlA[c۱Pr3It,Hl"}WTf[NѲI$f>g lu&l;w5&QbI($_f1IcƆ$>IڼL2&1 \(1;3)1G.Y~6o$W̱S%T`5$qd`ab\g c:LLNKt2{;^(lJV$خ(Xg&\S$9:):;\7.IlǦڒ$f)P!nxCĉϚI Oov龄Kd kVRI IrXK%N3M:ձ]"kjFkDLvPb`S5p-b'7%3)1>>YyKϲLA+,\Nj*m@Ra, N$ :&8%M+dO*n/}r%Iͥ89LR${ws}q $:"~3q5 Lb#"يZi l6';\vK.A-][b콼.Ԇ;N$' F{n},B=`pMP]⛨I 8Yi}39 5^Vc毺\LK*fm)όm_ oг3mhΚvRwic öLỾ JԠ0mj>zff񠇀]J2jM$֓9M̤twEm2-sflfPtbc!nC]~i/It9&{Л܃`CdMxA>3`LbHc[/6i$rvJ@gRRv;sP#pN}Z޹YtͲgKTmЈA.)$g(aӑ$ͷuaS:tm=p,L`V9tތY zsrorD=^8$ OAl.m;>V2reDbnمaYܧ5ll-F#\L{% }@KOqT;&IW,iF/hvpmkeZ\tQuc3IW_T;3$1r4<.:\نq%J$2vxft~΀q #/2*ؼ38|nr1$we=Ӎ]zD: c[Qf7lЏ*6j8=A̓~掚GL0Jfd$ *&V#nKIGb+Τ#1dB|uH6fg5nvnlf38[v<B?Ze"Zq^NW;K[y^˼!Ph쵬u#s "ln3&Lr_)I?1q˝ZXR9}[A1ccŽ^Kri%Dzvn&TnG:xʋ28PEC"%ˁػK K~ݻ,l۾RQ{W^eKpa`n~ŖGjpQ8 KpPQp检ǃb%IKq]"pn;R5,CTj8EWEƹCPv.٤g*|SvG cF"ye9,ysM$S-uNB$5NA`aAPcFg쯲Gop"__ZT6Z^9C~=8v ǢEO2ʣuo㘤Xf;Cg2Qh$Mcs}GV:;ʻy.yŸ,yfDy.KQMRqX2IǺt/\%yLu5N8 3ƃF8,MT'VJWg<L0drV \g,䛕=e-!řQE!t9 Itb*C&:+] %$yI*I-[GvרZi455ңT+CeŰ2["EU|i< ]H:kX2LV"mX)Q#dB4K5f6ZAB*\H}{ˌoWrz8@K5>fm|V̌޸]Cr990j) Aelg3 {?N;f~*Y0fN>XXuțA1QPڞfDbR3,pqo[IHE *8XA |Q"+G7 H !Ov\vIFxK7>#&̴`l-v+fs+"( x_ "`\`#=Lv{}ýض?vMmq^kO>C;$ᮨL"J]e87mvqJv4 g$71tGbBB½4l5!)}< :%\f!iB2Ko _2Got5+dp2 VKC6ϾM+&X۶L+ /fVLtIi%Cs3/:(Ǟ}[քm[W_q-mm>ѱUšn ̛vqgiS K̈́ku8h6wNvNwti ir@5:R$aS XrZ_> 29sIJҹDB(TLm98 p1)uڑSqݘ<3bM^@C֔:(.Vkᦫ&/'. ,R<Ar=I'E\j'ʼn\-8)1 6:unvt<#~tKQ$:Ωe!7q:HG~ IڹpwaI=yЇD[z1.DFzm QC uVpl;V(pNQ;U *.3$[qC?PphU 2.5@$ VA!v":Ao@$Z aLRxg_ A"`'Fڇ{[<%r_gh)Og=Ǡ1= @OQa}*8q5eƒ;< nM8H6D{vo-Y ~yو#zoIvVt$^n^(kLH Z5#u Cͤ"*qd)7IOd2"\GGA?tH@2&D;*ލ5zɑSvs-~^)42AVglQ'N(ud jQb Hy1ysNVE K .yi=G#nGRK=&Wg)nA|ܞ1H5#_)ey7le>SvԿAѢf+â"Wҹcɔd*+' G,2ǝ;2RBS!)PI{!|8r<ꝨK0rJcrk)I~Az7\x ( '>BPA  MSAЅB nvB@)0B qqϔ@et/"t莎.t[)H8lH֤'£+YB үY !C :D( muaLal: N[ Ryy |> acBH#bivpE=j#POj Ɔ @ևq)PzkC4 n OZ_ [5BXk׆F֐dPFyZFZSw F+ JJ k%VkF)!Tk?w ZXSZ#[[n_@ Q:t ZWNB5ҁh)Pbl):6@~8-pɖtL|`.|2[{(f n4ADzHg+\ʀ+YmWHjE(U(G_!Wj69@$ ?*zH.¯ ApYx`f,%a"U#vVW39hL}UnI rG*J[~KY\BjHj='%$`V"N84gEHM\N'A%䕊EfVd|1uO 3ћU$y%dr;Et ICG<>\|KM+NK:mdNvw=[%Rl|}Y=F֣}5ΌuC[ѻȘ%϶$:obTDHyߐ[Q#Sl$ W*CbqE+GLqE )#N?+;tdnȿmt<"IA/9Sfe]LZy֮fdtWĉI&n^xE8Ttrǫ A'楘MpkF^[]|lQ+5^ =Wp¢|UZ_qd׊~_#~ф*M uTVpY1«@žуj ,$'sXm]@*@;(TmsɊXmOYͧDm7&xnnYJ ;A|*צmb3Vt hM۽%ͦ,޶@va@v݉oV_ :wQN`ozޱh#G@s @k w? ]舰l(躰j08WqG:0X% ?S4qdUp-pU2ܿl!Gn,E7@f!S4~qAI P^x#`#qT#tqGB/x0vxPV?%&( '^9(g=X:)U/ȭ:NmWNHTǩyI NBGuS F1D@QEh@>1+РtEWi [dulVND% cߡ+4Fd|"_!;%a;e%d04WBW SP,42P UHD =]a;6HTrC8H.)fw:M$ AGZpJ<nGD!aS|B B5GHYtBB(0EZP2Wt!QH!iL[]%BېLI<;=c#ȎY-8 +R\^:w64wJ0r;i,èP lBd^(a%"?,KaA7@ȖuȨ2krxRI5y`-KSncDRE  Rom9` RQt+J44bmXJ䥿;~ >HQEBr}LbPR(hDd"#>JK/@_1ʼn]K@YPʧ)Q:]`vy)푔% \;D&o'#J4ԉBJF5JFHjSQLD)4ƽt#!]:t*( !Qz  Pu>:o=|Cu Ql`"RpDޅqH%5 0R<0^IbG1)M _pasAh/!6qhgE R>q$(wݳ6&)N*V%^bbYGϻ]Q;Ijab-<[YTSg=[HY``>pT.,0$ / Pl!XE Nv%@380ϐ4!/j!菪$¿_`GwG1 ZXr ]CItH-­Em`@#Ec[A/@gB" .NcID *i q 1[D8hM@ѱz@xx h (x1NS%#Π6A񲠨8!eZg`!`Ǡ♬4[jze3xAAU4щDAS|zƌb~F(Ġ1)!U0E^\M y (2ZуD!*.jkW+DŢ9 uk $**de>`&*b(*JevIBټZp0ELC[=ApeHC\DtPE.8yT!/8y&[;BJ nJEH]*(H!D=hje؊H,h=X,2ɌD(QŞD'夳y0OIHȜ/'$ 5 '_R|X"B\WTXAdfs@>]+8b@N *3ËҤ2D(. K*0&8ŐH3ob2& ( RgTX4iH4Rݤ[C2U A&m{5RMK*bc@U4Gm`avTSQTl>4wAGQ*T7YL}35EG^T3YTR, [TmTyKD¹^Ɓ_6ޣZxo?}?_/.h"Rmo:oWCz?/ Y9a3=?ͼSn!}?W۴u]f^/(_=]vj"7oυ6ݎ'~GRw0||1fV?,QYK&Sǯ?~:X|A̶n_/J{ŧ6vvT@W?~[?|kcg\߾ۿX׭d86K!犭t J|9$8JC)Rs SiyC)%QJJX-8<;BP YR=}-8Dxv% J[Pr?TSIR}*Ip*:uCOORn؂S[S )SG?T> CɫEJW4~[$t(SIW9$8_8<?+B~?݁3ä4p0u=c66D!@e@l :Wb`<_i[G۸ݷ?狗t@=15ND~E4 KG/"v _U@}sLp'^=HIDpij}[Rq7? 78$9|6jȸU֐nVr =6. _^,@P_ug6ܤLPKig>7k4/T˾莵ky R YF$'4SKnw|jPbp}xFCk EߐijծȠr8G4*d#*Z|r,Ł3VU{tUjEt^ʔs9q9 ItCQe 2ɤIgp9䢋-J7HcHESwHI-sp:lsE530nC.<^ &Y*ŮBh 7uE2^ֹ1]Uo^GYcôt*low! 1~C(půzD #ʒa1Ah* /$Ŗ^ ojUMi ޘ1{h? xǤ }&_]Q'_= 輂s5Y23;1~%XWë_53ũ3;*1 "*1|HT!**}KXኃ5u ܯXaJWc;9"3^}ۧz~*Q55HVjP0cx`ϑb3)gb$nP2{JꀨLA`k|͗=ë_5.'82H웘`e Mc5g'^*uiaxWL+v.h^E׎/S5asǓxjCC{[M 'U(RYqk^HO,76ή 8 8ZEOn%zb[fU"fMIo$iJ8HV'VW ^i_-*bk$4bt(i[F-*;V+\Q1psɱF)>vS/$})r85)"Z8F6.^v  P4~{D[r]1aeݧM?rԊ~,؀V43|Ws.n7&Jp%7l{ǵIի; *yn8㎍G]8l&A>7d$ЌЦ zOW) x݂v:49r90EP@]4wr'm/$|2N:$ᥝzrǯA##ۨ5'-U GOEo  $`ßDF>7R3V~9[wH4r1ouҢ#fml\G4!uAGVB'Lq:2Iѹ,/8ANk:^n1:AB,y%`: :–đԏAfx.q[>0#t8_eDP)?[<]3)n$M?bB;/, DS\AVWQT`@ #8Qg a*MDY'H4zĭҌTۂdaƿ QR12G պ[qu>H1U^x9.V5( &KEwXI*y- ) Y!ҽFWbCku+h0b">;US Cr=bP|Xt A{#~`eʆDFv);BN%;;B^wuM?G*\1~.%l765ޑN@Ai< ᓔ2e0L?(/$ $g)e\\ a>糕9%@Q0?t*sZd:DUAߩw{Ty1.eʘ;;Tו󁡊te*i< RA2lO x' fJ~vDj&1UwJ_BR=%Lb:l9e9eBJ\ݣ#EύGrػ[0 J23Dh]S.PjLe%/T*oJh+/<Թ]tf121eY+8UQ9D`G&8\Wk:A|þojc 4+]BS R`Rq"Ue Q61`t] 9y#H0'sR/!Q Lh5ŋ>R" .3$?3¸` 08Bj IV+D@ykBF!vY$H5yAL}'CtJ" =!@V?Â?ZtA!_N Ð 4$%x 1SÄ˒džI SHK&{GGpLyMz%%m](?lM>"`=` D{:5Cd"̇zLEp%Jm,%n@F#M_RO:G<î6AA Ok@>o)$z^S./6h AX@b!(9 H3Y;\l:Ot! {;*o, ÷{W 1vN!Ny#}Sa 'y!4?zʆ@tq֏ ?Ap.:8q@ 6]{Sdĵ ~|''n!!(} 0 (qNIƱGcMkrcT!E&T*v6x@q 8fx \C)NXĕF>/:|n[94n) qkԪ22#E|7 Mqݑsȝ|x¡$RD&nĹHJǴ#@.o[ZNIփCNoY:V;HA-j>qoWH-[+1]B2ٱѧ87!)1G.cj#%'2NᛈEȝKj^x~K*%c)yH''90Xq:َ7K'#zu1+'{`VzFNC/U=bqzO^2=йhRrlXdz$z*TErj'fL$׊ӂH$ּ"Vܪp 络ۚKJsɻ.> 0n$*\!dB\*'sӍ]?skj 4'I~ #~t11;H)+]5QkW;u)9ՇH31*n}?rӾ/mb/mb/mb/mb/m6_lo/mb/6_l6?Lȋmb/mb/6_l6_l6_l6_l6_l;o,ot7ᙚ|BÜL[4Lގ7ue'6T,Ǜ>?ѐE,ǏE/ b9^=#Ԯ<6+hc*+M'{3$zJEJѮXkU~]+zF,t/| ӠT~]l Ru]ֺ]sm7͜LմNVŒL͚9Z6M4.2Y G|~MD0„.i4Ax釭Q6|@fA禰̫ЇkX9𷱹H؀@ v8wō1 I ̛s8Į:{fb +7p7"jyUYbWT p?I*wfPȓȖ% eƣfj~18K@F(tdM$6+LnQ7J;@x#9刂t8E a8 S>︮`srx5'Ϻqv>>Ӈnָ{}lgQT^nN:Ptk*CGo{J`QUA,yzp:tx50qN:K:C,Lz:9r 0X<9R:<rұ d:Y?Z  7N@\&q{Ñ\ 4=t{>* oi>H^M@Թ gHFDi# яы$׸ O7|@ pi:(h(. ? U"`h"Dgx_\`Ţޯzi[1-S VM|\Fτol·}A8`V5}m* ,[ ')ZǫQ%TSA6S;V@aPЂ P)fCSNFxGPh1=G[`UpH0qKi * VPl@KDic$V+Z}&MiAoiGeo43Ijcj0o$If?3@BEIY & RJR'g|* P$?sgL>9?aE;9Sċ8@hܔ]$V y4&.|$:9<"+QUB Le flZ1HgҬ0X;$JchXI<<\r% QJ@|Cԙ0wj*UGUr]*&^e'Ќ3 SLTL傕nD),hJa&2UW~dˏpD3]vetOIzecP'Ys }Wf) ^(C ăD*WL?p'@[1@0 4'^@#P %pUFW$%f.RalM-!xN(ѿNyT0(JAY`հ.J5,D 7h[yk0G N ϩ_@x*"֭@@D(A0A2"4 /A"";\p(DhNbv:Eh:BA b~(2H^V[| own}KyC87Y/z"J??o~#'_Кo6.>SVC=x/(_Hjľ<'Yo8Վ~(CrOc8#|=JWZ@W.0U@C.=?C_t`~ՏZE2Q<oH- 8 CPo7r(PBzVڂS){P T<6z(Ծ*_*MJ!8oчR|y%[i (y*SIS)yP Caߧ_ZԾ~]v١S8P q!x(9TTjJ!x(u*Ip* UJ!8$J?e NKJϛRNtC)\R[p*5J!x(lCɏ'R;RJ1>$x( O% J~<$8<:NCR(=[ Ci[p*(SI/Ja TY\*8p*>!*[E_4&z JmlCi='J9?muRsق]N% N% J!x(-8Pͧ>UgX<SI/Je~Q*J=}Q_q!x(JقRATԾ\NC)$C S?Tl[p(5W-Tl؂/Jc|Q_)܂PN% J㹱nC0Ee/:k/IJ!8;BPrЩ$Y_BG!x(uJ}-8x|~JN6<$*}}N%<ӽނ/Jϯ DܡN% J0lW*MR㳺[{挋O,9`Y![לexGI*(Ip"ǖ}³_ʮ"u OG&xES;|ҪND8W."0`7O w%d%Q @="IpvRya۞*%d+] uSיʋAׯKf7WgPsڤJksd{sLOqkv Ͼ6_K$16ï[iCCnOP,:޷,pr=Z2fy-쬦v̞w(_Da-Q bOZY)淩WKm%TuLZ`hN{wwZ Nzb[﷎pl&∵Oi oS65aaH`kv?;›f͛,A02vc:ݬhtm[%q>t#laO+[[!nXΪ/ͫ (yw8LG?; }P*fU GaAEKJMXIkumlqr˹$qboGf`d4NǷŒ,8BU}s_QDo}(A Q|M]2UpKF;S<-$xGt?tĆ\(F xdJt(jS vV=:QP`Q?n)DwT`̊#JjdQĝz0<'5VqpRPM5L>dRLݿ/~S!. Aay2ѵo9<@hd6.;#3:G^0yMlvϯ.|ut^oԊ{~(Z|x}fЅU G~/|a*xT ΢.>^Wz< \1vvQgIg:U|s B A}U" ]'OTz @vl%Al,rA ܞ*@3[ǩX&x5EԲ&:iaCt3Zξ2l L{ׁcS.(X`7 {TwO7x+tp{m|RMm&>nAž5W~7- j,QiYQr&IOQN>2?`xCM*eu+{3O07uL} |io9$C]W4h9a‰.IM4][pmxvtJoz9du"ҧ"|򺸥xen)IrOsJgQkhBЬX`'kƕ1DvTz #+8^O8a-B\1 h\ `샃MGihV9fãk^*p`;%<aJC`n&3ۊj&:'lrR͢`/҃r9 8ïڗvl] >\no %q lMt0|vۼ]9u m7$rkko)ݿ1tl Pq?Z/bi+w3 riDN=#hI:Xp8O ᧛R\1{J$]B$[sZ6˧0:HeW҇mH܆J\ 7wq]n(U/'z E3ox{Ϥ){:\ݚD4%%$ Gծ[+-6&"Oñ4Yt Q ǎ0:D?5u%| OAeq`*`3KT;Z%e .4x&>ffy+F>>0U3 Rd#>\ *4D+tdK* 7M 7np& .Ҝ)۪NiQXQXuFwwץ2/Xx0pjV4d8}G<s}I1b 5"x$1\\bJ0JÊPpl^ N1{$ 3Gxq أ S0:iNBfrf5' c޳ zm.NB+PaYb܂oyrs"$60lH0lnP,0kwPg;@1o#5¸xTBb켂b|pcđTqz{&̦{E[ 3iB**M _#4QKyhm:3R&:)kUi2oiY`2d. kApSN )xpASSzl%bs0S1}_mT}c~N/\ uZi]L*>Wp+=53$xį"a{y!c\KS>_F h$=Z{LIX]F>2)~U4-*rWdEg+&`r``rxd 1ׅ(_B庋Y%I?R96(P Y \ g^: 5T-cؼ0u9an^bZx/B|jyihcgc g~yU%f>7z. x&~SPwe@ VJDV8>DV$,+{\p򳰌:MYE(I9$"w,-OB3 O`6E1+K=K265:Q&u Q59瘴DxڹtAr7VN}U oG{x^^A"oRqYsvIS=)BBW,}}50/E=‡\&DG +>.:[WǮ)3vvE ]mH6=@v{q Gٷ㚛HNZ1:@q#Q] ;?mT$5lglG)AX ;` 04b  L $$ -phd7FXdA6)  Md}DzOD x vXet9NΔS&H횂"+" c9d7`Eb@V AEGs )YIHRj:DYqQ DRߢ(mjzq}c"$mNE-'О6ȚjEc%z!ʷ6 J{U\FwKn-ogX`* )[ED.GS΁^q?#8kF.ӱmd(%C/!ٴ$HO%v#]>4 %w_`ޅrC/:e(<Y6 1BFHP(4@<+UBQ `\+sSmyic@^|~Iac(meJMP W4k WlJWR΃Ćjs 7/T Ms–OqB8 [%5w/)N}Z6׀3 qԅ0iJH6i-W1G vAIPb].sK_=;[ P.:Ut4]t<<邾]x-y|mܾbcnA e`Qk $w+o !Cc.fÂT`Acj yj l^P@6U, *u-f: =} R%x hC`ԂT`ATˀ[l;8ѿ2 Zg?]~tqЂT@vX ~7(1ӀXucP ;Pڏwo0TaiK Ru % RT`8 }hwl6t. RwI@AS <^QtI+ R2i *,l@M@n6]uM@|aA*wx; ,HTY (ϻueu "٭.؁J0]oNQ-dQi~t=>Z;&p/) RI5m|+`@MG@5@Qu]`AT9vd@M@7ϺT #9 Z6A;2`ה]@i_R:(is۲ ZXP05et.p.؁Ҳ%?bd! 6b#'/S"%M⹖"W)~d|rQhуG"|ǧnŀǦb!|aZl\Uoܱ͹h 9z)*1$`6-wZOu,]irjLʞh'30n©Ju9/|B'v?\ bxhd}i-3RR:sTڿ):򺗸LK!%('7ZےL'&B*(UI5/'9JSr?v/'6xhĞd-3.Ÿ2O䙶4 Zz|u-!n͠c^LcMߍy$כ1X昞5l׏+$ewcvi$-9m0g͟|Dm &h`ց53xv4VwXqs֚S*ՠڃ;A܃h?5,95f;w%n;KLjL㵫р7Fnmǎ ^ۣpr{PGdӝ#ĔGۉ KX)e-%PW~V68̖q|t9N7p)2-d;ПAzz7-f:?Jlc؛M'-ԇao<>L>31n?jw ff1_ٔqꒃ/vl1 mn$nͅ B 3ICs.l0Ĝg^[R*_r2{|i cb_ҮJ(+:fDa}YIhĪ[n8,׾zLGS^5>ekf0U)va#:?or GVٓӚ̱> |4FG2<kDYddQ'&[36vD1 nsL1̉CrOFYj_Z)ܝ/nG=mc`*oendstream endobj 246 0 obj << /Filter /FlateDecode /Length 3418 >> stream x[Kse6~C*)ovH6H$(E=3;ӳ%HY)[.j-fY`=_0^_[ޟ3~\׋?Ox˳&_h/^98>K+o[Pǚq$Xq.4ҝ/ לftd{l+iU.aJ9q B7KA{ؑʞ1m?O<[:;w'-voqIp+i$+(wW81ضo8X>( FֽNv?ԊnY:Wak҃7lT ẽJxW">qWaRg8DʅWW4![ LN~CL$9ܒ q%i]Ǔ8q1O1=a# %XDH,?$W)b ʨDh_!:jĻ"^񡈻"_L7zQ?,sh튚u&Y rċ:%Bx'Xtp]g?DG :pd!< LJ!`YH/Sh 5YWA9N7졏_Tո -Y<4JDMQOJ'iքT@u?TQ gf,=Acўsq:b46FQQ4mC-%,S-wS 9OWe7g=B-sDB}ЩJUδ<):! bn#m:}[\cTpPJӶ9֝*T\F=ٍSR I;8>Zc4֞whQxR&N <1) 3z`,z@r'ՐiƓJt>D5@}|Zl6 q&-x4$kHC*LÔP*$X]-]{S,s pFgp ڢ* B%VgqjqnZ7z>4 EoVteXvIQ l5N1A]KS*f [ X\AMxe35A&"xMkؾG9U)87N_xH,B8]6,p=6]VT5eÚl,Je0򹀧p>v%a>!vJ(oc-dlBi,StiZTM *8K&L5 g#x#Cُ Tx&& 3o0/TVW@ p$EDzwքV[D?Ӧv`͝.oSY%P}L9@q-3Q̠xڅά 7ߩ 5:K/1 ӦLKC{oR"ӣ"wWoP̖)DnKjrUcRheŧ;Y:L\̦8jr[`maŤGm',1R)~WL{;lrt}HqGi"F4kx Į35+lS0 ƍ2;!E3#Dx Bd!5鮆ck,:mU` %J$E(}ɵXc,a+w|}@>nLn%:?$vE4c_]#8HaBOEo-&pYIdV҇ohϔNTpBRgrBTV蕀Jm|O˕2%\]Fo"Pt_Nn.)"ݏ+?Ee9/m{SD d("^񦈇.Eni9æ9îvˡ)^y . HM7x<4E=^4mENnvͧ+'!]]NfTK!逩ӓ$wPRU&ȋD7~q8}(ni<\W`< `iyK,t_lRi7VAv6\ RhDzBgXc ߔDQKb9h!kT:e#<Āx-vT}vȚ"v[܇8c6C{s}]FoѪ,ӈsQrI"O jzSW_|mI׺ǔ,~TƞS81 rS>h2mhD[hύ 42rzi /L>}UCďS,1qmK"$*-qRL*w 즈dFXUyʊt.tZ,,@h*~Ǘoa[ѤnH-]},c?WWDU }k$-q>4*>S"^I>]s*ЋP:;KJuD6k6KZ\?I?dҺ? ӏn!}ͤ/kQGY^HV'}<ϙ)=E>sssc!u n?5>_endstream endobj 247 0 obj << /Filter /FlateDecode /Length 161 >> stream x]O10  ]ZUm?eB:gׁm_Xց$i, x> stream x\I\r[c E$ (0%\bH{6)MGuoOG_%Y Tfs+w 1ȅʿg/O~wڝw'2(\:-CQ.N/OHnpQ/\ nq+1q,y*=K/D\57na,gl+0䮑OV>ˉԃ5!,N:9oOyKy6Vej6^ZzFWk"__mkʗ_kjNfc:W3̃n^y#4nYBōt^1a*|E\TPet0Ux-@C q !0is{/i+` cr{E$z$Lj\;gFՔ:)|2ځl2ezKMf•!󚎁k;rVXL:b~-K>c9[5XupgFSᔋ|[vSs]Cճm#IKêX۱﫮ٔŔS~VͦI|ER{v؆vH^x]*X3fn'5K>9`tSNDQ.j=<7lt(-;+Į)!H/Aq<BEU['*Ke*j W}+@Z0qI _b#C#͔C4m]K|S3B:C/sX_8tuEd]9 .(B HX+&&Im Q.Di-׉kޏix)(4mViO^A^rIr1`DJ`4o~M!v2+d]-ClS@H%;ص56鋼sE𥫛R=.G"+ v+*&0耄 r\h>t ּMlޏښY8  n:YӪSWxjsA0{B&F ?2PLy?)ࠅ5jT! Xthܸ'{ߩʦ :|"3WȞDjЋņS(o+ 旓]鮦Z:_ uuA . ||% ԣt#7Js]1{+HK @dz.iT.l W1WMKf/7\ˉLӹ 9j˥y^)( t}9j<^SVT*9ܧ4hm Σ|􎛿hJ5g5p3 >|u&`3nreߜI Q _XBp0.M dhݖSJcr/j3Q]'?֠~4=&P"`e< %(QYwV4 J46?VV?+ ciKd!$GYDbwnHaӚ+S)x``@1_ˈ :PX# #Lah\:1ǔqa_z6FC^ߓ[ v^z{xn(mO?ְ?Uvɍ_?XN`P@RmvVٶ/L` TK*/,Y_ގar-]ҤdO>Jcl(2ɷ9l0|{p6)+ gI@OojRJ{KO vg{Fd[Cn5YM" Et~U}{U@n6@[O/S1,{J2 APR mf}=|f$m}=|?w+Avm> usD\#Ys%Khq$l 4Aku /1["ؑ?2f{ r_J}E ´uk-LC@NVag/P;*ߙFoa`Fr {&5&F-4p`d$>QA xJ=#wcz!AjO2\K>豤K_k`U*W3W+xVN zV}URǚB =O:B Q1hᅗQ'v 檙'ZKϜN{3ElTV*1Es .S*?TSHe_ӯlA= /aE_0{Q2尵R~Wҍ}d-Q|(pj壱N8eg:85MPXH3h }483G[M`<ߨ*;X/@sxmLyn!A|]I5^ S/JGt)cfHgy!t2Y{2IUh|0=k#p ߹ y5׋ˠӓw\-ahhm9m UŜ{X[cm/)&05˒m JrMƀ"*3&_ )/_q ]!YNo?T0JͿžVE7y9{%ǜӗ2W=ZN^>/<֫7q|<>%2pxիTwO~S"FSZ)㦑h]#79>.3s%> stream xE]HSaͱp6s-4 &b¥-̜&g9T!JѲ`bf<^` o8szPtr>-UHE2tQ'+NL#Qל =CUf\X8xí^Ϝl./kPIH p6?Gأf,+wM@tuvu`$L}@R\% Xv.Oo~5^.̄{`EB DlUmYJ(&ם ag霗w`GfO=,M22CϾv6&5Sendstream endobj 250 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 426 >> stream xcd`ab`dddu 21T~H3a!]cO]nn?G ~(Ș[_PYQ`hii`d``ZXX('gT*hdX뗗%i(gd(((%*\"s JKR|SR @bI?V3+_}ߝݿ:9w^{g倂'D߫>d3.yz}ƺgK qV 7qZѽM3~_+ۻsO/{Vw\B ۡ݇f<ʛUg+[yNƾ|Փxxe`Vendstream endobj 251 0 obj << /Filter /FlateDecode /Length 4302 >> stream x\Ys~g#X~lJ;}TI|\V$9ʈ\I.%m%=fb\bT.=:d=?d/|qK{x9`?G'Zã7K~Mo<4ʙã;`FH %f\*9>^ yvEz]2ޭKiU/c͘\H{dwx|1ew;;s%oHoSogP +74<.놼s/g^Ҩr?84>d h^NãG H6 n(==Cp+ R(\T L u @etWCfSΤqi+hmpصq-aF{U| _L a\rkETU4= h#"N}˚fK"80&]CAH9qF4´= msdhVB1Q };ᘒC0["0"~2~&^X*a12X&.69N6Q4QPq'^U֡ތ={ t̷0`"D(΍~QT>j‡ 7yЕcX9hu킨Qyz4lj<$/.&"1NNJ0-\nD2* 2!LNc`\ ,~ރA(A- '4i#E;HH祖^D 5U$$)؁ g+Kp,Yyq =GnjH֔@\_eIHD~ZyCJgN M!u3ސr?]E biJ@.t~R$R<6;(L/L^=#YbEf[_5vh!M TV pO2V||2 v fq#'bfh[sBݘuniYD ݥ܎aXAbYEy2B;#3N&e|YԞU "vXbBG  z,mM8m6ss_6&7۱u-|3Je0$wAz9$ /ѽ-ldgќ~H 'z_xQ~15\'e- ڞiĦ wqǺ `XLETPFE_Fy4qMdbՅPY#S!#RXؐҖhQ+4&o$|?*Q/ry%e P|I^Fϛ6lnmBC$7e0癩Di`4S"s+AR7 ^:WS(ȥ|B=iЄ}V$:X$[my0}0<MyȲҥ%Zy:"$OVr==1MG_ՆL릣ӶYj:P]`̋4֡@ue71>pv{y˗M=X(: {$q^&m,R>6w?R09UY3 Ú] m{㴯\,HdSCE:mxX)r4PbmI%w80kǘGrJ{Z v$vrw +#y}}Zb|VC^ΩyHm1͢d$xN4 #idHX.H} 3e8kZY3i}[d3u<ŦiD}ENuRŗN9NZ^jǞGq;~YgmU]:V^ݵݟz8NUmu*"|4wb71.?s5uEwY+ d*d=J?#qiY  i'G{b4.]í6|ѓ 6h S^ p<{,33Z*So < +ڱ)-Œ n.DoN70QbEɹZE/6@gQ%'&u1ф[J}^C!p!FꋋLO2infj:\+2_#ܻ?I/{.7r@c-:3t0^u9(A]bӱr`*fD+iDύ%;_i-il/ E!% M^h0SE=\6S*Sfy7~X!N-;^cTq|lB{O(rٜ&ɛȱiK)VS'Ǧg ?v()Y|ˢ";nᦩf'gQ}-t 7=)Og;/µB`ΛOq{N(rݳ "}e9 Yn>=+鎧Yq _KK/|=t ?ڜvVE:!G?3tendstream endobj 252 0 obj << /Filter /FlateDecode /Length 3141 >> stream x[Ko#h|?(Ā {sj̴;kGlލdA; Tb_U=,X,Iovg_|gWwglqu\6aw0{, mz8+g绳0|wSC|[ȋJ 3f˘n Be۔ѡIrmoL+ο9ʹg+Tr[t)O(bBi8¨/,-(d4꾝A}e2cRXVB2/$5ԅU&\!yS2!aOlk0֮I$aƞP︩Mнfƛ/BRp& nCkWn0XF9pn_(&8\d齲)ƴW~0 ǔDcKfLpC:rA ~ 8cĿq#:IEݦA&2i*j3i(tQja8'K1-~qp/tRQHS3G!`x>2 ,iA1j:󎮺Mft(ÝNJ>+Y%zn7 \Jp.< v眇dxhpgeٛi:ݵ v(._5nfvd>* |g\|I/DT F:ރ4ӌۥHA>(~KWJwڶ` G"T~4lX#_EVJ;?+{O&PFV贒.ch@}"Aq½+xC Af#? A"1At|꾄2|Ma'mXB`yٺx7a ȭQ(6ҀcZ,rבobs[G=9%)|N1 tGm8 :xA􎃍 @pn hd)]z$_`Rb_v ܩJ(Q4{'9irɈ5^x_A'h1Џ4U֚.A¿_ռ @+\X7=)ҹiCJy 7 9&hr(X:k P#YUc_%` {& 0aZOhN4RYKL8WB@OSFʑU֙ Ԟe!ɫ>iHCڶ-Z<3֗l>jq9)P%@EmX *Mōb"P~TmR)"lFgK+cc$)]pN{:J25Ng|8cIuޑR`?̪L:3a`oL4piL y_m!gM)g`b*dL4 10E40'sAsР%k(Ƈ|Yl߁n1s;D\ pFis}}Q_ZÆ_tаPp\ z֓S-KWWܟFU'_y(u4?MdhvY^ЮqJUrQ@04oX@c!!J䠅^#I(c*#q&QeulҢng]HhAkM +)5A[(FHI(KW]DӌIAn7w\gaY5l42= $n^Ċ Z|Te@hRM*%+M/+$K7})''-w))x `tweUa^ɦnSW04 C#/LR5IzfD AirS煗VQoWDׅkWtB$ ?!w%wt&`|Uߚ܎~`2ݙ<ʷMB Ǫ¿̿e ^zCSӯn:*ˊJg&,Key7[!ub%Y!uB.>ٿk endstream endobj 253 0 obj << /Filter /FlateDecode /Length 3549 >> stream x[Io\3F~o ޻ZHrH|pb92.bLjhroOuudّ:쥺~ͱۛ?3_ˣo49)NЀ#SRI^؇)${ Nѳ'8/!y;)넨TZdLeapŐbr;,cɗ?vӣ'V_0H^2]Nn|qRГdOP;k2914KL\CMTrOfxvRG".M0k&7Rk^HC`Vq7Cr3WIäA~2KI2Sht*'Mj:Y)CͣQAt0!Iv F)"'}}r6kxi9*6%=&%-) wm} l7]Fkċ)A=!N^RVbF)Αb4Oޤ{ uU/3x\~*DʝJfYxM|B$2L@1eĶKBk,2mtxSULĪ@A09m@]SEE"ɗLn]1cRb+&oJ',PEO1d+8q(rиE+3"˾ҭ?!"ɂ!FFdxw1ŀd {^bJ$XwCF\Str3'A B$9ְ%v] 9Pu]3AG9cWFU{D}JUT`BX@'Ih=gә*+yaHքDPZ@Vc0̀.9#woP|EӅQr4i}~:_X9G, 4N,ftЖ~*ڄcuFک rO~b+^k/`aE+K@M.㲈" v&EpC ޢ-/GȞb%aO-$\ W|P_ǖ4co  `8a)l̅@AeTz *8eG_U{vbU8A|ՔkkVICV tYY/"6خհ֚b]6U1.#cҲIY>Px&ӥ&-cxm9)kCif`A&#{qJ2<:+7ؓEkn)d7hJ=JT0z d FE416jU`]|kWF}~7FxKDF8}OO3=>z!FQ) Yb^*o \,D R fjĐr+oGriw> stream x31ӳP0P0T06P0P05WH1230!U`aS027i`9'O.}O_T.p .}*.}gC.}h\nn@n.P9?47XΎEAmGTa?ߠ pS! B)endstream endobj 255 0 obj << /Filter /FlateDecode /Length 160 >> stream x]O10 @+uA,tahU@p' aKB,N>~l#GpƲ5 HeQ7-Yy!' dv~W3:UiZB 'mUu1 R 8KW`JM5☛&e=O) =Seendstream endobj 256 0 obj << /Filter /FlateDecode /Length 6795 >> stream x]KGr+ݷ/|?X XK|X#gg9CR̪ꞡ$ ɪ|GfBMB7xquL]\=3MA'x2eŋʛ‡)d{/nq\ܶCkioT޼kCknwI7ߵGK[Sʛy?lܴyش}kajM4f?a`ך5ͰrItߞ_WR4e? -ciS4+Y`V0̨;ȫ|5۝R&[l8ص '쐔aĿ)#:y$¹e}x]ܔj bDz12ʆ9G`k WR{h:ϼw)G6tiGs*ZxL9Ys:m7ȶ&[[NPO[,4&9 z3C2s0 L؝uw=;l[<4a ȕ6NA'0[(:#5o&YyݾQr,!;w B6j77ژh¿40!D;e]9 V2c5 97(UhK3\ J*O%oIU9:!R Ѹ0Bځw{*xΙ;fd Z2Zv)O6)X&PYd&+a`\^ܽfrZAi:\bԋ ,l 0,U|:L'P蕀ƨH=li9bS:Xԙ{{6`iĻ;`*qޱ@? Ðfd&bY"r9\I1uyr++DFNu:q:$4B -\Ei!g yY|0֜76f[ 9{q+n:ܔM>eʖ|5{bOڄz An >,Ḡ:}@Ph?|r Vmo]$tT97w>GԼv[.l0mk>Grڻ9Li[3 Z:ګ!  kaߛa_]kKyk^ `1n8|þ7þb B9m3!W8qoM7|Mڡ;?rab@}s]D.Wֻ^;aAݗ)u\,XWb zRgЁHcI&N W6q#\Wmjt'&B8uyOh݈ΰw)xߑ>nH!Bwa 7aC?>x .18H/S;v^4^BǺzo,+9\I-|zWcp ۛs2e|t%Eq}Aղ7l\@+aE 4T&yFk'l{цi z(5ڸJn%Fx){N(4DԂ#j/e|CBr\_|t$w}cӟVP,g`32-6E{Fgf͓=unYxPHuq2nq>=ZPs VrGs)5Խ/߃0LMjFFɋzz\7Ȑv̹3>0{'J|et\?[H0'|kk"Onwg@e^aR޿b_a-nvZ Ѣ.|N$N ؈Yh5^EA!? ˀ4&dnx6.(kkŷ Pgm^|ZAP,q$%kPb m&cegkC@7oXvMѤ{2G-Aq[vJnͻQq4b8ʖR~ؗ=] <N>׏!1rFaO6=B);C PaDη?x`e!|iPf\}rS QM@sny" #e[=PR6؆b*P ʑ35F=c.cXUKa|II$m25l뽖Db:b% vSqwAy`9u,hۇM*uȄ\j`{H)kB]3@IE@ ݾ5/[5~ m\ʩCz…aJ ;VpPI3ĴGԾ:KA'ӯ{I;~"Ü4/wE/(H _-?g'%YɻxM_iiu QG3@Ah!.d# sS"v+T==ZŶ~ý dd;M<<(3$Geqv]cGd,k r/ > Skxk^+yRQSp䲝eW1 VE!Ls ߊK ^L n&Vnޔջ'"y+FCk {ձ7þhg#8s|=R|žqFމX[&MupRORij|6AT1hSf j2UUy 㜗aDŀy5X*ey[mn)0T`+Fx"*}ùDp:ҿ6/_r {^i|\ijPW8vPE|߶`\W8i 4S_:}xdҡS%fE7c5t`lE~Ni S-(TQYNGZ*BR>zEQSpRcC͖t)a5RP^İ|QVU꛾gRĹĻ_ iY;1YRRR|bҜ$Dn) d*}!ޖ']/B&C1GAh?qtcr"KD'9Ŗ)_0QjC+fG)YR.ˆNfc*Uo缅)_cbٍadl~pO2HGoGQ䭧TuU+ 8a +EL,zX#Y!_HEf 0C{fIUϷ,m.=`|.%#w3:U)pALɹ?Nykt*-LRLe=; eM<*HkR_lHdl(ıHLpy<a;x}2,V}$ͼֹ G򓧳S`N,a"!(aljnn&(fUq>Xl,Y TyP}9fb~!+:\]3=Д4QG sQdF =UJtt~d_UZTi% ,BۈOkX;Խ~,Q'optObr:raY`(7Gm6rPTFtUg9DMJ:]ޔ#,p2nJ:Z[[oEsA2oS6ZĚL$}p;:"Nɲ ܋UYΪw: ֫(qٙ/FyC0vugasNT(Xbݨ2.[蚉nN V{a7u~Z WpP*FjMeyɹ{2v\jW}oNaZgd $"'YwѰYX&^wEyGxԢM^5c[5dN?q<$.* Rv8( 2 K"yhbJt;<x[,gqS# n9tT߷Pë܏ vSǷꖔ~<[љZ- u2%4|YA.A/FRҩ^$4Re"NnS)Xמq鯗wב}w -[2TUNM%(1&uG /Wƨ/Ts@fsqK*hwl:1d1bN鉐vY(T]{.Un RMR|~TYPq](o2/J!EqRPDW`.mhɼ/Xp]b/^DrO B 48S\݊?k d?/צ+jgcHf ]_esb)FutV[ 7Chͳ5oϭ7_vl|7Yra 8AmIKݹ_Pk mns/h턪IF$=b%Eoe{5LQJFg3rw:c[02 #͐hל;S#ö8'JnwY!"C q7k bOL}kxo%[NM#u?F^qmC~}q2캋z2ڦM7cv_^<şendstream endobj 257 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 437 >> stream xcd`ab`dddwt1H3a!SG$kc7s7B``fd/mq/,L(QHT04Q020TpM-LNSM,HM,rr3SK*4l2JJ s4u3K2RSRSJsS ӃP%E )Ey  L,~tk:s2n.wIȋ&Uevp]i> stream xcd`ab`ddpt24H3a!Ïnn?ԅ' ~O+XP_PYQ`hii`d``ZXX('gT*hdX뗗%i(gd(((%*@ݧs JKR|SR+"Y޼zƍ_cU"9kmX@::9~ Ϫ=e+]^cڐt߲5~鹙ݒm%O{ ªYiyQa2Nٰm %+ts|:̚v<ܤnʹ80e^N=U't/_W5G~Mf[2s}?Oendstream endobj 259 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 955 >> stream xmlSemK{9_b "ve!!3è D$jwb_ioۭ![iݮ/m/#n5[]ֱcP4Ѡ&Hߟ[xYW?<9y?I(dISg^wNK8Y3BA k<~PI Nאb2s^C3>Ukc:b;n=gfzNJl)rCCfszGt^l-yn;t9s\og::vcLsu;.9Ȟmz<bn T6JAIH d$LTlIi(?H^ۃi @+CO(Fq {Ѝ5,-zcCKh;R@!nUvo$ENuJjmm8>UFڂ8x_-Ld&X5r @ BQqOn$ )Y |:(IL>@|)ٶ)~VόcȠ$sG^? ǠF3/WLg'}pU_FӍ+%![ʏi(G6mh,Dendstream endobj 260 0 obj << /Filter /FlateDecode /Length 3073 >> stream xZYo~gB/%  oVxreߞZHɠz`]]]W5ˡ?Ձ8\r ?'ÿ`"|RZ׻] x}S* 381;䦒\W&^ح*J~JWevnxUzYJn*I|ÊIBU򤒛J^T򺒗A3=F6\l٥fWtT6'>![r'Hp_nlw9G)EC˨]ɖIx( `̑]L$*"y;P[t"]zubF;ukL& Y)mRNA(comwD"hWcWu^t./f) YսE-KȬX;J 3틔W"qK"4 *E ;=?P=FO Hfket>9n_r*kMn) *s8sBoĘk+d-RU>?Mޞ`(JE?XXqӕ{j0dyf8HD9mVQgT.c'|4U&k )kG 8G(B7y+[ 9 eJ)}Y'URyy6XFpG%b[G!м Jz¹3pE\PU8&kߠmnERcRMq-^ |n!(a9rtPCxvtElJQy^F]LV[+K 9\ISR% )˅ u^vh$f9^cADMkq {z|;:JGZ;֠i$Tֱ֍ӎoG3(rQpy,e#DV؝r5چ\ʯI4jY>i4%6Sl]ڃME*IŨ#*y2QNjYW{qM%Ȏa~ʒE& )Zx>5x mJ^Vr0h>X}ocj`ܳiҊNiBcQ;'>6yt׾Jem#RIT)BC^OBf~C=HR^&v%̅eySā,@GO˛Z] L;xDB~'$e)PʦBNhevF40s'Ke$-TF,F]ea眳Jg-x)\/_ VuIR1 Nu9kwR];Asы{bt}Ti!Sqv*喳k/:Ґnd ;"xǪ>#o`lܝ`mI:w-)S{5p}ÕtʼI*`XWjwzk,k!inHPA@H a_`hgtֻ/|wL':$p8ߒ6TM%NɻD^+ ;.o`+Qn4 2ĢI}F%E5d\#Ayag^b2#enOI@q} H9'pvP8!j9Ԑ-u:F55oL'i9t{ITL3$͡m3 0)#fQw}db-*dxVkW5W4dD9Jz5ˁ6Hj{n Ӫ)y7$m]>.J%l=w$=axPu%UbGWq5^;%nM~ZpXU$kmy%OՏ '78|{:{TrUGBx06KϦ \yq%kV+2Ŀc,Ib]/n('izdI>[SI#IG9%bx_`pq"rqaЏCi*0+++KR *Y/c*w9% mG5Dӵ12o*퍸yngnw{ɊJZ+ZԱEjq7ju$p.9ajˏN572&3]%U% 1'ﶌt;6rI!!&C\j>rrS(t'?>7WyxuI].>פw/Ad%})1.K Xډ> stream x=M7vwy2݉]&wl qd@ZOC5-}U==,tPE>>io&SyGG?2U?];L\]?{_ܾtuџ6q%z_6?>o?ތi*?`~&)fM%fzyՐsټwnyټ}}oS^|<<`aK1mG6GYzQ?KN;=V8Ͽ yhQ;|,q>fĶ/>۝16|MH %@;oO|.6Υ; m>L>E#LaJvi%}rh u 璍!Ą8`?p<[0!@+NDɄ nT,A3&^fD'xy.޺RUZ8;]Ƒ6ƓL(l'2RT@VafMP o -`,J8'D@gqwL*<% +a?!A3Oz`;(a8NrBq gp"e)6굄qhRZto>kA,O9n d )Vq 9~F@>[ w"   [}$], 憘&A%2fʈQk ę ̦q `=⮼3yffS?cGfrNٸp;,I>Xmr *\HUx Mw뛑LJˏïqLxxF |U6_u3fd6jh\#XӬ86dE/8P<w|/f08Mu(Z!M\pVqxܧM8xeWV z<ϲr|fh=c@Cɕi[=wD0cDVw^ZKa Gl$zqhO{A{Nt1TĠYdXQ K t֡ЈPu W@acb쑧*#k¬0 i@0\^u-fTɂԼ=G`NQ-D`X&tSjx3K}Fl%T0M ą4u1YF+?G4^]z:=.g]%ĩX&3&n@KaHC2>C o$m ` ~"Vn@ J ~ėOς1D](U^ 9K/_㢯`wF_֮3)0I`0y'>+"]fy5lkX6q$h>Tj UNR1Y${ 櫼`\B[ݕ)+n& 7_3hhPټAAسhJjg>z_u;<IRLFW] UƶA 1L'Ԉ^BM&+W,mFY[e٪ WB]K6(gZfhSqgm`3ɱQ7F1X?)S#~&d;N}Lueo(B)s0!|'9 iՐ;:R:ǻٿO՘*]ZPnsdl*hIz>x!34O+SvP+shcwVdһ- *E(ߛ¿Rt c:QFOAdXX aWLB_"҇``VeB@yB5 iDxVKUە3:߃`aH*RPSnoI( '|D.*bg.58iS "~Wagh}Ǡ7crOyKm\l)9dɓuI[Hʂ8f\\ ;܁Xe0LHynć 8KQCZ=̩c!jE.aSNR\QH2p#^j^4_ՊL2O;Cg`e`\cv, 8yC !'ӒwG86bOX3e-z6TϤү#<c^mDɗ?cO OvrKmˢ 0`"/hוmLgSGLӔvppelX,'{1ٟ:&~V@2ӟ( i#nE1'u.r"OaCۧfdu(r'32nEf!ZGY*'6/4V 7Xp-fHKFZN.XR8E^``%J55Bg Ny2?&QP'QRz)A$ɦ( z|na&qCs$|L ,Yd9ksTNgoqUL dJOIk1,W)ca6I?j%x[(p%mH.^H~9Gn~D" KY&sX;R%:҃ cQ۾㞙~Z(vYb`I63E Hv= >4%FP `}$ʧ AFX0 ]B>8Ji3(JN ^SƘ+# 6,Sl oBvi~( (fd%l̃sB⃾>'S QG0BǏ"v(~)mOX _Td.3,Ih=X?B'8 *@":#_| u*u-ry S^L,j$j%%0 NetゲFR4/Oej#_Rn 2QDӨU^0:q: DvR" ܊MI|d:UO} X?b Lh[-%HZIe2-k\!+k@Q53 3xF*PIC'eS>J)3oOj}kɫ¨o%ɹyP+Yf~*΢?Nd7)TFi*l}\k)y{_nUdT%T4ضKu\m0ыҝW<=D} DVd)t'bz`]7 ȰG% /$8|J:Ǚ.4mqv.c}ЕNc0GQL}'7>r`*|Є~^}.;$&CJ0F!vZFCX'E#)yg.qAV74Vղx,`8ZP(7BFvȍ1J ̗6Y)Sb7=.vSeZ9f}Ŏ*γ{&^`~n4ZCYr! ,&qtdH5p?!&[ ةo֌'ʏpiJ1^w[U\^JG3-t. L*`@XN.{sVyye{OQ.L%)r!f NjRq>DWn53VC*۞Y_ |t[)3 6smc'1S|Ј妊5-|ەJdn6.ǖ㏼6:1:^Ҿb1YbqJSD.Q8&vA"w^Ms <]gdn$éؖXnx(g\XԂt eE xτmʕƈTNS J4rs!G[huhn3N˼ t OFڅ}>6:Ҟuk"dh  FPf՚b/Spz^SbLNY.Ĕj ]׀  (8--4{%g;1ٙ|y7 4/KZ^!ތ_PCjiBIkn\$lz;+|co՟O˺}ɬkf5'WAc!OpjoZD> stream x\[dq~_ys/X}E l wGLgf 跻!xxgM"cXׯ< 1 W/>~x!._L?^n/H=SQ^\zg & ~2]\޾VLJ aL:>7cZxiv'&#f~Pf~cnZ\O#oi>Et!1t: } npl +2UnAƙ64OIv^pGHi‹JHаǺ;cC&zp=Epv-$Qǔep y6󧍨?n_hR9 iy2-KEèoidZ)8 vy2 ~^QR^4d i|ـk]t+dx_)}#x#$z =z6Iv1OyedPcZ-)Aq LP ԛlh`4^&Xa&+/h`:1H50T;_qgN;jAT`q&|+ ɺ^ln79V7dȨLI&RK9h2IZ_(*S@ܤy A .uDQ2j (p e4N?o>nQ7Dѥ#~Wt ?k%nT $zoI$\=LC  nSO1y C'[Q!O/v Vb* +!Tv<`mY+iN{`5XxTHMB8T9YW~s`OR@"]EpnH8xci:*pQ9c/8a!9xFCDNбxF\#&QDXPл ? JqcTȄcRJi8jphX\.9} L&/ bƢ\!H2))nC;tfG$MZN#Ki֥)Ւ4Ȕtثc_!G\+M@4,%]~W֫^ΠC3uQ{y;aEH8:&%vS6ѶZxSU'V@_TS )ϨLؕSP1DGsNV24 VT%H}ށwlLh/\zB_/cF5_[.RNʚr9$gC:!>% &v.Ay~1D1gCr8*N]7]Q9ޢ;˪ # A"' $,@y~Ūg],`o, _e vNϲ˓dZw@']`2\wШ3 /!$*%VslNwtCk lT,'F*cfpⅩY&Sytv=S MHJs#BW8rd,lr!Pw@Z4Ϋ`gZ@|G! ";G!J̴pq2療>=,Ūbު|L>^t`]z]|5P=>NC>GV̄+HC+'D=,fmVW3c$WojY#V Yg>7P[1"W8IEޘPS;?_//L;4:k65P6$f֪/U BARJ *+b8.|J.Qf/ ^?6NޏWϕ+4VO2#=OYu!`γB;~dW%gӱhC :_sZDx xܴd,j`FnqnZ#X+>t,Z@q͵PHUJ*MHh!k 8{b"ׯ 2۴ EGy ieJS?:4X . ;IVL+Z@A|ͬ<^Zy LTBK[n O ,;QƸ$MicF&:ęs23:Z!Hf~nޕ-P|ԺLP*vH7dM_k&'pp%jv.l\w܉QJ5ILϞX)_!U'BⅪYwyhck RL m,pCYS[sscPNݥ46d^iˈ|V)CZEOY%n-U E 'uukD6wC*=&˖ ԼS9fɇ2s0K mc$rP bY ZT$x3R޴dI%}һ)gJJv1a7?"q/487B]by 0)|&fnNmkƒZ﷭y=ԮLlGG<٭6?j*:kg,y1ӱW⫱z1rӿn -Ec?̦1UmBP[Z+od,`TZ]4m}ZJ,bqMt豍BĘ|? 3+N崀=)/tIƒƶno-VZʷ$׸ |B.M˯\ s\o(J΋ѱ7Y+ߓ͟`YޝPk׆+)"]akT$ F"ph\8^/ņ/-@OXhnW$6pv|!cU09r"L؏8BX) fUsgghA;k%cLRyV^7Ht__g嘳E鄕Qُq^`YJکWHP哃e>MڵjL#)ڕZM&Y(VjAgM@: &6xO̤X{[ê^@>VzQbb"LvMzai %Swsr+w,v~$2~~R_ϕ,wv6*7߱t f_n+AgJmI\ \`i ;}1Gg&Q'&v`5ÄrAjҤsh@?-S##7+Z}騘#^a%GEugIEXTZV dZQρ .R}_=߰׫/!/RTyH&:F$ 5fm'딆5²N{W](E?"_{Ɗ[%oH X(DԯځfXl:_a'- k 埙 >m*®~oZXl|5ݷpJ5xY[1Cjd1@킽oM2=?kZ>l{\kCkb!L5I^9, Sԏ5[s?KV8ң2Jp+Y?_nV%'Phv;\0\M#zhcs8ܒ|CL.Wt9-Ҵ} 7'pyͰ$wml߶I3?CnL#FL&;H9NyQSCrhH/!4{&f@iW+䌇Jh劘X.ܵLkN)[Siy՚6doyG%0R^.0\d:(~vcbHZ6n(*?lFjR#DUbAtH ZsL!מ#Li3aiL 5=i-Ӛ~HɗI0:JbĿ&@Qaqf_NJAL x  ]endstream endobj 263 0 obj << /Filter /FlateDecode /Length 2672 >> stream xZ[o\ ~W#q7b4A\A Фkie)-Zɮ $CΊhQT# 9?.`ߣ_ovfK\?G-02TSʹ`"<-?__M!7L^0y9"1ubƤ+&9;w>n[RTi>Bvtq&fUqC1`n-g"h!;P 4%ܢyx炩4Wk) )"5ctXK~+>"3K9vm _r9N[l&\1sr -$wнCK 8.t#FOF5FQi4gaw#lMat+߉ \ α<ֆQ+Ɓ{K-ºcrPu$rML b3ѶBcGu7ap rTg;RCj .|ZA!oW1ٰܐj 9aSh'<-ڹb- MޔaL:8;'('G)'НO{ 1>@$!@>ռ$ *<xy ibV*ƘQxV?F: `x~)4i1l#y) (px>֜"tGI, G%@k cL*襅^$gAC}]Q_0$JaTj]0)?3xVs;ubB2Hm4ga!9JY@F3_XAC%$4#К򤃳\ei@G0<9 ]oK2/hdLvi*#wVX(܌z eZ̤811y$Ô 50X .߷\ U ,LHM!zLUnBk)Jmr6*!t(Jh !sR7q>yǘf0! v(Ɩx9mm" pT+'M~Ӱ9@RbI>haU;)T6FW4"691bR39PpLcښA~0¨%a'ԿkA?5eZuE eSP](bd*Lq߽0DjAPL7:u iFt!۾ڵR* =@^\td]K*Wb]]xUlҚ4+$:K\,8,;ޝ YfCm` `uU]p/=wn ë*2Iqli$pqDe\?ֲ'#%LK0gb8[^f- qaNTDzFX%t۪V s@6+:;e<@d0ܿTI!vycvGw"Mic'Opa*ND,^A]P)o(-(IQ.Z \,P?p36?jb\7ϪP6#:8xk#:/ Qq^-uT_+Wowj|;`{-nE37b_]V+jON땦݋0qXCXF2ɇN]Hq?Xw ܙDFgi^۩"s&xWn cNUέx Q2vTdbـkuڠk,ꨑ ^ݢ2uZ춘|Ht"\zb1YLXݺWx*OP%OP_;'(y (ɹm꟰DŒ~'Xy'X`exmr aӥ^ meendstream endobj 264 0 obj << /Filter /FlateDecode /Length 4773 >> stream xrdݤ sBHT*!ST<c|a:Y/ U^YӒޭPPW?wI<;:LF(<<:;H^NxMpGW_ր[ipZFp*/4k A.5)"1ob0us*p)A[7\" ) KDD6DX~gc^M3 w(szw0褳 א!JCiiy2ld[kglq؞9&Ry1)a3/s.Qd98H? ɆD0jڟ AW 77B;'͜46 GazRiN/FJʒ`<'miaS|8aZ.j!MN2+~HEۄF\ΐ&!amamsV 14VV^5pܚZ"jaBb60,& dPZAĶ"1L ާQL:֞a3ϊ:a y2`X.Ύ9RXÁnK!pε/yCk(:*Ҙ(33rggi¶QqBrM֦"(IEFnmgRkbdhCg5D,qA /$gh v'}9@̃fDƹ&iAڝm*i|,Ut cQzU0w:em(!jBFezhs/jFCރE~E*xQ A{e1@81{ *Mk4R:"U"H !0C!rV;*c 0}~ ;cG8)jw%)_ %O!>XqUJ vLhVpF4'%۳,dMbnM1 aYx |4M`oXPW 60[23e]NE) ,iC~L%Asw+eOZ[ DվìV.Gr|$_T Ð<2xZW#)tHU"A0bAH b ~ze K*_Arj"^ !?VCUK@bFT.~5WT]5*"V&:`̨5 TOE 9̦\j D,ԒLr+*PwSi7c?MRʔ/kB0GO?uMXT3Bnxu-׳s{7>_FĖ` BNL3?r &(Bݻ) z_o'ѓX%~|іIx%g- L]HyauRr@ɻӚ(&Dn X nO.Y Re$USVK?̴(y wFȥRSEQB30oK 2ˑ'yPX:>A]Hׅs ,Sݹţ0 %n]˾ i' 6*9}tuaϜ;wZQK&n7q1Nt!skYQhA0M ౄ EG2Q9vus:Nf*ʬ&Z9CSoâ̇tWW-VR#9z7ZgbCj^5b嬧F^=J8|ڂOt߀ =oDR7l86bo"I#7H/f>+m{;` ,u*y%dECt!-WW@]{Z"i{G}54H`1ZӵtYܴv:LƶxtvׯH rs 47M2+&6F{ fN˙QA8n cԹNMjÿr7aM'-(y[,?hY3SމPo55ly͙am)lH?&oTi Qg{qH3ZJ*m׋1u]S/*`Sنr, =Ѿ,l_R  iXȐ_zQy |Ț74 S┲Sd'C4 ⳅO~4-kҵh`dz"80/r2Yv @Z[_-zzWyެ\Z|.{9bjͶ:|5OpFzd-W(7\pB>+;&|0r-߽:è5tbY/Yة'4V7;UY?Otw%]}֌T;wy7H+_uz;OiIu麔`3@XC*!%c;h:ೣ7 endstream endobj 265 0 obj << /Filter /FlateDecode /Length 3387 >> stream x\;^ؑE{wCR@LE #HQnISJ?ع'N W=3=˹2,)P_Uӵ_uםn'VgW輻s.a*ԝ}ȶ ).|݃7W*`KilZ4fgRyYzߛ@|; j|fixCR7a3{< ϧ4X4|gXlZsnqm4lB+?k{a&nc| `zɑe&ME68zVEzXlsvYTUlZ'heC" )EFʬSEmtALRΒ["iy/;m4&g܊ ʯڃNє+Z\ JևŁЊ``6;+ j`n{Y#䤅w&n}\Җq/NSh]8.@W9b }\A1Vne!pJet qˎS1k}!SS9dXx} ֹuqN>`:&Wn2"P-{IoX*/V'7,% \AGӦms^",#-v vn%JF&S=3!j&_F$Ωؕ ᒖBwMeRJh6J\OYCO7R~\dHN*dYRˊ@QhTwB5% 8p$\ 2`=R.3 j5!@sV+N]/+U#lQ ". M Q_I|ҤMA(,T67PEu{/(;d`*[4dv]b* Br 9a,<7c܊ovZ@筞dNFx άY RVnzϸ5ՓϣV<[A[5[I@_VCA+cxH}͌ȐvM:wT+4#UvOLĬca߳)d,ɜdnM:xH_ uzGCE/Ǧ#MwӰ_.h2nS򹈐"I83H8ĆMTq8m1 $1)l񯧝*Ll-{i9gp; M/>FɝC9 a0<>.0t064A\1Rrps4"R+&@gf^jFtg 讣}nk.h#N_u.[p1̜bwNebɻq[ޣͰtCۼ't?:P[ӫ;V{{'O=^vcŮn_]g}wuo͟OGwꃻ+ȸw/@$ G+8W'Ɖrq-gM_z*Cu#ci@! SjqB0 # ǙvՀqkՙvbqoՙvQŠYt@CNĔDtCJJ YKVCt 97o?i 822}\pe8Je>$XVY'"@^`F|=E@"E@ȁid"VxOHeFpc2"=,I 54J$!f,R1AC)X D x0<`i.3GK/y2[@$-@a#W/CV$y,5=f b  LF̘%"AWҀ^9d%b\$qe%e\DChD"A =iOFRd'Pā!ɷA-(還q"I."R8AY | P06&_:}5 ܏RR&г"|g* l Ba)]6jAMWǙ!=74!WCvUYX5mǙ*1.*5&aʶlUšY` ^l},AF[0ҵX-v}"Lgf^(WEn.Fju lRߌ)nċ@i"gO"S-.,D(HYVoFA!4\Q!k3Pg +@.se!K4h nЫÿ@*gV$*]VBp }&D ^[p&Kd#@ve `֎u9JD&PyKRR$Ot$!fcJ^7D((N[$zv'h%X` TLMҜ 4J0eUʁK(:.>RbRxŮL<GŌ]~V "`*ϩՀeh{]ZdǓ;O׿?H>?Xؿtzk{=HC$!zA=rIH.i#[, m&-pXPZ1~XvΜlm~ش#&󞹦 dx1OOlZ^gYM 68mkӴͿi`mm:!xpc#;eb7vqR W9y">Njwvq[+u]hb3Ln_bjzfnŏ$$-YR\le) endstream endobj 266 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceRGB /DecodeParms << /Colors 3 /Columns 128 /Predictor 15 >> /Filter /FlateDecode /Height 128 /Subtype /Image /Width 128 /Length 2019 >> stream xۖ* D'7AR_b=¶@P;~۶ͼ di v@%_~k?"IN)Cz&@~rW,tdwx@cK踣4?_WaR|Jj.d:=Ht@#`,I` Gzk?M VG aV+;i[sK93?aa}Oͳ'_߿5,@c8V}I 5}f 0<uiey?կ\Uwh-5(twgѳПշ "mH3P4oJb)6nun{ [@>`@Bg`x}ҟ62i%\@;L@n1rK3l `?ns_@ӰSh_G#@6]lW6q  Rb˺<ezT.w\ MBO/ [Y.;w:J Y/uWP{^o'4On}gTjkY[_WjRs 3Q[j`R>!s<2nK^^HE7͹3 @/AIդϮqs kA͆Ph;;4֏bpy %ȵ m*m@2C37ͤw`嵍؁&48'~3f茞 4PmGn*?n}):G940(d^/(M6X)L>R3Hfv@YWpRfz w!ɝt{{sj|3 jM` qW@R'C%`yzr2u+m и@V~s~2n@..HƝR }[A̔FH,Ǵ^S-\ UҡiM$љYf3(cw3 y2 [ (ލǍ7>k "  Lqu,zP\?M#y,x~/3fA}!Cd@MY?^׸7&q&/oz&`=`} <퀦'Pj3@J֎ŀ7smҨohZ \f7 (p8p{Y; ^@Mf;0f4W0aܼt}-|endstream endobj 267 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceRGB /DecodeParms << /Colors 3 /Columns 128 /Predictor 15 >> /Filter /FlateDecode /Height 128 /Subtype /Image /Width 128 /Length 1752 >> stream x흍r k߹in萴,ؙv|f%>i??8D+{V8)sɀ9j:ϯ @;_tdR lأ@cvH2t껑(rg6x4Swj hП B*IAQAe,?@L-ul6=c@?5)ىn' :ЩAcd4h FR;]9t+ fu vpK![}QV?Y3mYwǓRO@<my\`.h?{z7s1NjX"% S٢{܉>x)F4 \Z@dTo`n ߾1Myf=sy h@1{$"l34rl3Q, Ȟg$00 v)EI' "`M[`\mm.69wAQ6 f'8 fX[6&ǁn@n*/bv;0OC葓d MzQ/{cmժ7>@vH?`@(]?J+7Ab/ qASи^$3 xd"Ot~=`e:!Eǭ\Rۺj@ދ;(BSww^=8[[ugn~k EzT @<S#7C] yP Ɨ/7"`4zM=:Q xڵu @FEs 6g2uʮ$ #7dưDI6nl]6j`rmٞnvFc`@ ` *Z J2 J@l;Fp`.Jc I1.j7EG@t>(fpu n ^Wx{3f(5E׳&P}=\ 3@;A ѻ A4G8mdN>gIٟ#@2 )ig t/=F8d(2 i+(1nUhbր||endstream endobj 268 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceRGB /DecodeParms << /Colors 3 /Columns 128 /Predictor 15 >> /Filter /FlateDecode /Height 128 /Subtype /Image /Width 128 /Length 1809 >> stream xb* kvnZ ha1^#Cvtf~>zUbZ_ZGCsQxw;8|b PfpnG Ed'P&Y`#5h9vg]*}v_<2~]h#pk\ӣٱp ]H<bh)n94v+M/s+3$FRX e@:#W@vq }3vV:UږK4 <VA.+?7tyUT&nȅ `Ӹ'^uv#q*KQh=':+p{:'Ԓ>@L'Z@ + jn (Mp/b`gٿUvM`] f0 IOz{ <P{g@u6؆F-XF CT@;:x GPY r{^GliB?Wzmh H8/cp$% L x/TXt D;Y d*?8<. ; AHbR2U*@J7_N@ǖk>ޛ 3@&#c˺dj/ǫPw j TMLdMg*7]BlWO@ubߣIy> stream xZn\v&Y$UAA lEF Z"Bvђ+\KZRG $/:}$E4 ;?Y$sf73ua+ff(VwZ}3p.PRR0vegōE̝L!%)NQSJ4_y,D(SѻS#jGPgHGG$MgD*y>1^D8܄8VoE &/8CW6\ ѡCj/$<3CVmV 閺d]DHʄnD S!8 B eQBn4l ,K%sZ92+n7U4=Go~$$vm IQJsNeNE+H4 eoE9-۰h~iY b%i4 /mע*A@I(>7WX kJ.9խ<׭sqn&|oYk_)Tq)YA}4Q*!{tEJ_҅3> <9B R \%'~ci(ZA!:lH" ꔐr*)Zմ6. Ш͍\I=)\P1SuJQgLچWLL sAv!η$QemÕDD3(I!Wcr[mgJBޯ#]puŞ10GgZ}l.QHնO  rXr|!MSwIѯ yT]TN YUo=E's"c/W8E%vO^'X/&k[UɨuIID=9&UU,B雧$)'U.9FyU )(2iLj˪5k|m stՐ^M;~l~y-"-ׅϢ\9$P?d$^B$Pdu ݍ. ==!8 >endstream endobj 270 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceRGB /DecodeParms << /Colors 3 /Columns 128 /Predictor 15 >> /Filter /FlateDecode /Height 128 /Subtype /Image /Width 128 /Length 6306 >> stream x]$[?(((>uED\POEe5 @܇;]ޚ9f~7hjz{^߭GWavիۍ-knܸmnqaofl8G݈+NnlO>==N-^08>p8lۙ8l_@fj[X&|O r4FዧAah =RCۧ6} ÆLšDL@=O:}]+WpD@P(FE٢C}pMχɐy^ɟEw_t\< ?!CS(W蕃ZpID@`/QA_hۧA ' l C,? QVYk*B& 'SuPāE?>ʁ% !|@ q^"p`^@)}DaL'dЗGP 8^ N\o p{PiAV]bO3ͪ:AѬ "pEJ\a'jҮgo eh=YO~nBb)&} $3Xbeh?BQ<(5YS`%zOZxEA@~HAS Φȸ]Ed@gcZh""c:@C6Ъ.)iOK &܈xJ][i;Aш1&@ZKTv`w}>fQsHg>;0B)\p x(: ed#"N44x#p؋? ~<Ts`qй_~Q 2y)1)kwڜйE"kqb\ 8Rn'7M$7ld( 9XfݱA+Mо+:I P&2b(sp,,t RL1A" Ϝ9C6!<+%Am " ztGP#;@B'Zl5A>k=]|>& ާzA@+:-Y+\ N E1"+-JVA|#|dOy,t6| @Pr;K??q`in@3m@B36|/h% / p)'Me%Ѡ[/"TߤX(nCBS|UX_+0GBG}`9*uX/K)Y'tBL&VkQB='>"W_]/%!lC=DOPҴ7xctѷL,%V wf$W(A{뭷 O-"R,FeÄTбaXA{-8w}WOSt%^(W3-BZ/?}֐z.?Y!d:n?>z`bn C_ k);>#M^Q;T9Pе+- ;VED9;>{^!PP:v4YgM`ߗǗzY&t/h%Ѕ (JALp#=У,a?kċ? ׷>,@+%N:8UGgwcrע_P@@_|_lIr E+fHBz]ГveD@`/,|,?ݦ_&UY 3JngLLm5cI p #a d'kk#3W 6=.v1P: ~9y`LV; KH~!1sJ')LSMdkq^4Y/_ZA0EM3W7* ǯ(ۯodw? 3&ZdnKE#BnoFyX,뺚 "b? ?$)T@?`5F$$:U6p$ p].]H+w_Faٟo~+ ] ݱn_~& hŏ-@Ŏ9O?E]2,Z(.ucr""}4éu` , 6{gM0px%98Le@MP)TlXƤ/ixǢ OmJ[?1aM@)dKbKOM@q{wY \,F{ |[n! D+b!4+վ2ov 0pD4J0}v!EB86o~fPn^ v1Qؗ@ր!d$"HHc#Nn/^/V6wD,dKҠaFhH(qg`AB$c$VDK/4 zF@՝D jɑ ay#NRMPFАE(p?/e_AL{#?#ΰůS$'-F1NAM N&-,i@3'og |!m P jGZΞl>v CSkԂ=&cDH@;<,lհXjULJ_g|쾦ڹ% @`gb7j(c$Ad4菒 ܹs1r#`-DNP#JWW#⊾rBet3g$q!9-^h Nd1 2n$ G~}. A0:P??7yj~\@pTeW>?;u"X~#*jCa6(lf/K@GSrg* z82'qjBD5A3^s.& bMY!!#hߢ z?N=ZwmY*.8Pz"Izm~(Bjb:7wuW=%`@im*[NtϏ?gfn^sj.yf;k! ,G?@)QrDlbymDAy*$:싺S$ j1o]'bQdXi!:vk]C%F?mF&\aE'fk::LkAwKd4L.h@* UBӉThG-^p-}뭷 f]YYѧvS=%[Ѭ1 _;pͧs& FCX_$gHkzUzޕIi(x"?/|GWAأ/EHt0:UPqM9of-vٞv̦}lqf`Ş0.d9g?LH (uF"%aG@`OxY-EP'ANGpM@5^^a#XMi Pu(IVdeW寢[$ #&%s=kZbo>$w&=8| @?RF璡Og}gśA祸~E6E܋`k/qv@<@S&*J2sS{M.=CQeE\ qpa'u:BO"m%b`wYoPpu[ɿh>Ҁ-ր3'mPG61,>^o޶]h]&`z 9SPǥeB(ƝVВ%LC$`."w0  rR~0:`MSkяУ&endstream endobj 271 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceRGB /Filter /DCTDecode /Height 128 /Subtype /Image /Width 128 /Length 1754 >> stream AdobedC    %,'..+'+*17F;14B4*+=S>BHJNON/;V\UL[FMNKC $$K2+2KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK" }!1AQa"q2#BR$3br %&'()*456789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz w!1AQaq"2B #3Rbr $4%&'()*56789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz ?SJ@<ѵT6dwIceq[M8P!]]W`H4łk?j1Y~j[ +Wba]N6:&RWvjheJ7P>> stream AdobedC    %,'..+'+*17F;14B4*+=S>BHJNON/;V\UL[FMNKC $$K2+2KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK" }!1AQa"q2#BR$3br %&'()*456789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz w!1AQaq"2B #3Rbr $4%&'()*56789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz ?WpKW%~rMojW۶MgV>aTeN?P[mOi+W[bQ@sRUr m'Xځ4jqrk]vsXPx Q5fO'<1b⣖.*^* d̘r Ѹj9bk:E,kVhMV0sҀ*EMkX8A=+Z1@Zt{q[EdڨP*h Z͹}٢Yޡ@.bߚ˸'tk9̎hALMwm`ƛn_ "$sQ2֋[J>7TmW\Ք"*hdGQDԪ jP[ȭ1YEZ6.(hik):MqQ<)͸ҾXwsP|~8w {PA%;WW.:Uy~o&q\״nB7UU;WVl1@ 0 izT ֜G,LOMy@]6sY2jwT-Z@H{X-[v8 K#E-#^RMjZbԎn(h\`u;E2I z=&[;v9@=hs޹ =j@K?iL٪qEK,5YK&&f}uyn9@$pj ҙޠLκk5օg5!&<ܱn,i&G/QO7h]{kTnhD7-Y!&[PpVl- un_-endstream endobj 273 0 obj << /Filter /FlateDecode /Length 2130 >> stream xXMoGsY6{"qL`D6HQ6ع(@U^WC#z_VꛛK[ak0QN^_sR!J6WJa0UA9(WMYDݼW+bO+clֺUtN)nU4yǼyzO%`T]Fc9'4*zԬ AE|q62 %TO3yzMɦV޲Ȅϰy3 :FD|J9+k@|=ypUQ#U$q{Lk!T4ɱ[YaAZʻ+ͭ *QT+hnp]7 4ĦƇfs K٬b zmm9^kЏHKKG/\|OeDehwo6?~ؠT9O;kՊ#P>Z3JMEt̠:$>PvZz5BmZ;hEk)=dct]{BF!VvM^\{ZHnoIN DhlPX,+m12ȉlmޫ 4CNgi/iMۧ=3Eb=SőpewǫeJ>C_cl>s;J B{`9uD&Kn'GJ zO$kFrbgܱ^ [œFn<`s&}9~ LToY]tig ;'2K2F.4JI]D +@`E9AV񦊷U' .G[lob(8 g_ҏ̝}h˲z~, zS=N=;b ~"i{f^ <{tpguy~?͑fendstream endobj 274 0 obj << /Filter /FlateDecode /Length 43292 >> stream xK&q%_˺vzЀv=@/-8$%P!Rluqs<Q-4Xyndd=yOo_?_xOUz{{oL+o_?=oLz7/?۷-ܟx>+jm f{f{}_Os?o99߾~Jyr{[Oy/Ow+0~<%c=~ gsl#/ֽWx ے-ul^'|k/^K} {?/Xso:x+ڵui/|Oi/x-ނ|A}8z EJw~Y"2+ְGc{_p뾡(2ߟG)m1R^ ғQF|Ŷ> Mz{u&={,Q)}>/(YQzlaZ4@|lpƞ_9-~mPg-]+e6%~@s#R[exq+J{:[o;x*Os6$+2s eIDdϮ훤iK~ Lި* 7!҉wpW7y#~E.O>[6E;ȔO=o.O7#[oo"Rߛ"U7_EaʶG,la٧yE*D_z62lp.lCV8<[Hn@狎"C=$:FL"ؑc[ZCndpGRlmdz#[Ŧ"s@T@7 4ޛ+ i϶K))O?"הE7"Y6'm@>RHDB~` ۾0|]eGo:(2ZET|ֱ- Y"Q$mQďUd 2,CDyT TA="C`YoE}>j/ulp&Ⱦ1^j-9cN{);Bp0HlAE$a+{3`u"+b Ȟpb?'R{cТRI7&q |shKQ_rj7d)_ל.EHN s[4SD'hBbX{_̡G{+oo$VcQۮԠ77J!T9̂RWC{35"[E=er 'it{Z_ 5f^ɔ`An0 ;Y_ϭmV`ܒ{U`lNG}2\e;LEٴje?1ǯK/9X緪Xq--AdA"OXlU| 6ƒ$FV&p,nmr—H,ZޓVִE.~B׭h-uLmEoZ$&H{/ k} |~1˿¡U Zu[n5ƀVP Zmo5t!jʜ#+X׽ݗdf}b H&҉ Q~l2SnD,Fĵ2D̫DkD| cC̊ݴZ)eϨ4da]n{^wx[t ~nLm 72D$)RS6R I@R¾$PP£$RPǁzP~ږ7`ŽwO,;;W[[qI}+| j [D #eR<Vi3j']VGyaK³%G2gJ_ԾX1]gt͹M/1v Dh=R@Uwky\a'@ME=1& FDnOLfFRϸc<3JW;xt|Ay/EW]Aܬ,kj6=a10ƖeR7_=Ֆx+VFlG+^SH$`(bQnsnč${)lTsC@ft:6wH>c3H-T2Af'q#&եȤ1j8I)dMQAvlľȗH3+2h#ɲ'x.*їf{IB ̑RLbt#R {0PA^o҆LȐz4A6z(Qp!?6hzR4\sQnjBPl ^הi-A/*B yJsVYJnn5%{C7}"% \FS"3#iM9;"^j?5a iޒB?.R`kE2>F=!%@ܮ'8tm}L7$el *z:4"(~3= d$HA%=!oDOgH3pظA,ȇ< E:1̂ o ̓YʢZۖ?Ob! $ *=x5) )VfI|tF^Ź8Nx+p%q!11OrDi, ]KaI#E\Աݧ~Eq H]hAt\uAF [z`xa7'9b㼹Ly czbmDO\Ⱥe^f7e*ʼ yeMQI(s%DWPϕ%\vjk| ?-Bm\‘^<F/ce~o=J'V7J2zdRwlEu[@ePI K&e.crM88$F,tLhlii a|o]'~l+!^U⇙y[/BWI?l?DF 'yBÅH QKVfrl/^uQIJTh!,X.2wd#[F$b O(+6fN@*K !{c%81r8 k3 Kk:ˠtYpXBT?YbZ.֐,<νDSt-8|բ/3=̞"锂o UIHVCtNH/zm$ZʃV1b X pd BDJ G:<)eC"1ݜipIp{>_G=@ᾐwD Yx4 ~ 4–[i:Lg+zT\NiQD՛= ,H ,%\Ӿ]ϤR={C:W4g嗪j%L=twΣiaI[ ( ʝ֭ХGӾP95Q$ٺ}\,,BYfnqmcu+tvG&Q'hKqa" )(v!UwGKM )e4XRSn; Ro@XXtn͹D# I8qڵOp΄ۄ0rF6ϙl|k189m9"ȺIfΝjN56vadЈ=F,u;7=6٬6Ԋy6NY{n:Ʊz6V<\glU9q$ G$ ^ OyHQ6ȃd)Ӽόm%I]+SdKn6Y !Sd[N,N7d;.8ВSxk,,^Z\G".(JD. ˭%>h$AU)/ָ.D:?'HRua) M6 #")i4%Cd ܢV>$S< zR5<;HJ81<5*b+I$5fF%3']P5𖈚)||r+#!"+*?)i! ܒˡVpZ"b\hIcC^vK.AFLH\PKO-q[1sv\NA&+HO$YOHly=O6N^0gGl['>|-Z2 5+!Ħ!ۄ^S5Cl

P{O?"ǷWAS_N|_+^o~oo:S)dQ2#0#^V -DghVP@-"S*H ƧD=7YSS,[#CZ}->$Ť 5Ƞq{H3]T:-Y?z4)T;zЅ-1LG8KWEDW*~CdYRrTʁ b倨Jƴ 2hf HEYj]HAVԍ~ ʒtB[F "qD?V{CjD""Aj[*"PHP4FP' ʹ.+uW+bjҭv# QW)HJ 䇫Xf!H&Q ?V|RfL+Qp[NK/9t9\уp Z HOM[+Hψ@zj:tdIgDCɚFJ ;#>Gː ee).^1T +#bM(+)= QYNX]V^?ݰv<5ʈ@VL~TY=ʈr AtBcY"EgDX :VaP3"J0T;ى:Kwv'*gc!&{&ӖD:\Xi9ki-.%ܹ$TҬz/7hHZ{?ԥ}Xj\gW?дgS۩*K2 먟y[G!>/ =×_oⳅ__~OoAoC~>~Wŗߟr43C/D\kؠ!B؃RT%&r-44UhLrEA  LވDYtCxfƲ8(>ݖ#TY?Dr0 qbi 4r!z ֲC$d 7c+[w E*HYSVWZ-?wGL#xo .d pwĿgH;Z3 wAtAt wkqq{3P3 r &%E>_|/sAL "N~#.e(.@\un|qAn G;+ڋ&JA~[^^2AdRp.u: wďIopE];2F\ Up.tT߁X?_rp܂Hw#%UW܂( p݇Al;r ?,!BGl>ŶE-EYQLr %#gf-9k,/y$941 EC ĥ6Evwķ'KQD]#g`kW$`^*)Eu]EJ ^DFUmJ "r]7 Ww+I'[јiMwgC_e) R߁TenƸsUf^WM^ WWU֋ W}pl{]JH caE5:C?OHzxeR1H7 hSJzcaī!~l?Po<_ Xg зv *$65sH*G|o}a=v%-[y"梁mg`fG}xА^RJ~bP }ΓmT.@[QE]A^@s")]]N/ ttF=Sp#HS+etB|9ẏ(cM{ɾBV*jMO|5-xE:K`ӳT>9.,"R, o7Ȣh/]\F4Su]Ng>"s\=Ȳ>i0]"%y7" gmT$(/]&(K]Hl|$m v*򑮆ÊhSʡ"Eڷz IG< I@uPJ׎AmjI)G?@*Ѐ 8AR7݁qI巁@:v.v 7Gb)VL%m 3Od,P %҅ ӅV)ZȍSab92ɤ嵦.޼)F [@*VOk ;C#;*Ƀձ,+tsT XC;'Ήa(ݜT$к)RFMGH>ҥf3u6+dQg&#稳Ɏ,GyԴYvق W^[C%ǵ0߱}".ĨLڏŮ7Y"lt3iH՗U;Ϧ K̄©(lbMfAwEzH4h3M.u "Rbh3ijԣ6Q2* YhQIPZreL̤sD"_m]ʬ`e#G1Yd*+ HuAA? !]Vtj2eq]&^fj.GAUe~ 1F[I]暋&ĴYj0<.k\4M "&eC\4!in1Kub":[~ ?m19oޗ߭)^wjOւEenE[ ~h]F엩:t"pW<L NJNT %vmkz/jf+ '\lY[@XSly\6٦ֵkurG8:Q␯TiLU[7e }Nl56V< ?X J?W ~?&۱0A.Z_h/h\`AJdL9Kۭ/i\/kbO36V౵$]cmjYV6YZ E/^u\BMUѸ 5O\YȊM= rsaIٞ\b0hop- z&9ͬC,r"/>.&kKj1ɬ-ʀRdcjQɴљL5QףȠ2X[RL9Cy"[9;V0c_!1rCcpkYZvӱs˘AGgQkpm8L54$vsKڈ\3wd3sk+=CeIWFʑaWHͭT9̫D}w-TDj^.() ރuezLgZi\֕٢kZ 7v&v{+shyKu[}uct&_)B*g֨nmh\a_0h @[ Ώ5EtR:UcMu*@i򱯪:њ2N/,ҋVv=5ei8ЊyYSR:TӢh;ڙ[mMIߥv*f4[0lƻt;vLqv Wǁ~q= v~?ʫm_W[TfkKG۴˚iġjj!6fnPyi" &|1Zx̬ܺ0VslZ_ XJW5J7cUZE1 8WTCS:ss4L]G噤c3IHE4!i2LӶ JO?c/z ׀+K`ރk ].B,[CG`֟S'=x|'{PwN4x8bL-+p .PwpD]>̧?Z}{Al?vSD=#7'`)*2D9/}Yfc_tnf 1+:x1W;ƾ:֢[?Y87ĵj*w8}q/ih}\t)fUȓwާ[Me?ʯڜ[g2XvW;ފs}?Mwk;\:JѾtG$ 4ftt&-?Q4~ԣw֫iZ) M*PPtT pZDң{eЙ}Jeݏ/oڥC'JvbbߏڵS!':}\JPǩ~h}"G}/^G3aYfFi,1,B D[DۧP\L3jUO-aXVaY1+\K,ͥv?W?ɼ wO˚QŴVּSU(ІwfxoɂN.JH)Ra2J|YVN|9qa񩮰 |%Xj3+'{abeZNWnEWlgυ)ſkʧ'ƽeE>rXX#¨GuB XDa>x͏c|8$1 `6 H8F. O t+L@,|ƨO XtŀӃh=F(L#p7iʡ%{iqrR^_e^/I% 1aGQsxBC1]6'mN%O\ƴq$e 6۬Mb7!Zu2mJ j\٬CclȲN,vBlVf+點X,i%CF+ !_EɊpsUdKry;SϕVQ ʚCl蹲*-p,ٸ]M9sgрnyY=i&yV5l?P1+UȃJvBxrMwba^(s-H%,[0Jl&Ec5?UmIc^PfXU+|E?}9j}Z0r,Z%mh,fQdE;']xK8V!FP\Eo:)ƤH"F≅ !x'V"5'f/u"qAƈDq s4%5yVNxA)| vZ߳\ӢZCHe 1 ᛔL c'[ 2.לбLK'!LQʍ9V2#+Z1h(2HfA*ro뀓N:@ ī%> _r&T=X@>RdCm *٦}kU 3]j)WwR jDCBG"U,|H%%_ zU@ Dj:D*"0H%e8m˩U HD*2%H5Z)|Zw: TD"j9VMTI>ZCYN5Ʃ2CbFIS_ASE"*Hpͣhëi_QL*XYE*2]k%Y9ά~ LAbu x8:*A1Xea0ΪEʖlPgց: s-%㯠];$A5tp0DܑJC'CȽ'd3"3Vn$>'ߑpB;g@;ɴti|>A < |Dx 3^^IKej+C7m7s;jG<-1- y=I};IQrr=Q"?J7/b$2##CfqI1ET )]Q@4THu2}QdZVXsuf/"$q"86"<-M+,8Td&%L@A#)&I@l׀B߁ӛ}R(<$L%X^yQ#ɜ \,;@$!q֔ˡ$HX-ia մ*=#x.ŭuYz j#* p-Zk xQ2 _5KIh1!Jf b1qXJDk.]kG;:u5Z[ӟ뇎jCAUn*MRf,T23_$S$o<IhI*٫^7{9VIg\iy!9*VIeև ɒ|а^C둶bBe%tmp[ ꕷj#bZe!taFB1 ?,;x\ s5** f1>9;~`Dj0W⟭e5HYぶϡfEBTIC3m%$ƁтTF ҉, N[-xA8NS'U4HX8srDVX=q5QVCə+AU _*?CgWή5"LсSIO /U*S=,=V zdN1/VIa$"$\apPEz3pPz#Amdԓ?rP*B /'TEA}s`#T\I;GѢ9IH0}]bXqJǺ;$/|]n 8uU0N2\VAH~$4>%:]dX%rˆx+bqa@+/f FeFz))D@\tX.{b򢗔RnWgr:HH jezr$0)~BP$$QT& FJ(jb@6q)V=6f4`{c^ ð,4餆CjƗ09a%Lthrs$B]!3l!5D$)Co򪙵" -:#EyK@ C.ִC." D+®;TboM$;#nM%\'!U8 :!Nw$ 9`gv!$%.RОPV؂T'Et)(I p]9@xbY 嬇mq8@t(%y7/V1YAtL+(I"<"ݾR)(5"%( i#0<BZrG:Ң&K> cFUԟZEl,SAHĖpC""6T <ƌbhŦ3i"Hv*GAUsNbc̷H4RU[pբբ4NaJri^j‰ 6tyZCj:G:⻇\]gim+oxYQ >mDx/:fwp?D ΎbSp3Vdղ7ΝiW 2F.D}'f-jw2\AnR]Jԥ_;Љ7ȑT`CE1" O<^HZH.(xfgQ[0"렖F$&Z|.D ]lM[hÐ|ZꁜHC.B$!m,%WdN MQ"71;G"DBe졳=$9 'C4qȴonAX3G_=}#"ɩtJ#8Hׯ8uBvqWH4b'؇,0|6".asdѣo֙;)d!d OX=*XSP[#\an ,ag tPQwBf.k^g|d@z".䕥kaYh!SGawz`VV\߂p5%L=:2#rPwip; lU, uMU|lֈ6Ҕ)sZhNg5@V> Lj( 9 Nb^CK0 sT_4":dZXFN/K1.SC߂', wNKX^`Cfjd[6& I3ic*!%Up݂~b!![ɘ\s*iyK 0Dҝ0;t2:fG-J)!c@twl3Uź[JBP~bs3g%-&תȿ{)n`_ȱIGmxIf]kLs7[5i,:U `/NJ '.z&f갶w &FB;~':"ׄ?Y2K ԒvԎs*G5 Myj+MTi&HhZwd%jhYMH1Y$yd!}i};,x_b9e[E;8Y${:kzJzW"yx+!m9H4d߁ѼYT,.<H*$; "-$]T]$Fw`Fsq#L >&Z5"o?BWH !^ h`BddhRIX%UҖ[!!!䐐O$b6[{*5)XӨִ;LO͈<󱦥T$`+=Eۙ֠*rkp]4`8Uه ڇ6 ov6p+hb2HԂc;@VaV6 sUQmڠvF δ9*20,M 4\>3 T#?S~ b"|,|Fujk:e|rGagH522G9li[[1jO-mhf O>HJ:T^2ac# RxE՗@`~XW UҎ_ec-aٰOs*"P/J$GS(/Hb,-h PRE-#2QN{z1#tnp[aj,@2mI_qD%=P6)Xl1,p 603و|JS7sl5e ZX4 ;$u[!ھϭw[nѾY Ǿn߂S$F ܾ]s}ˬ`߂Ӯl:ӎ}!^w,5:vĬen.]lGzlZ1^R=#8i#rܥe:]tV~ќ]֬n peDih ܒrum[fnۻcQرhep%= W$cBT,-ۖ]м]5xzyIw YnM}.GT=TxE{v]D^E1h+XAf=g^AGuG^^Wz] e@OkȀ22azQe2.N%.-XKX!u OL]ZnBq'݄}OFDJ:JLa&un|RsQV.͉9R'u'u񦐺t.q\dH]ȼZMPuR+O]2.}H`Xca%\,sX̟;2Nd} Ayƙ~ i )a&}hJxLFxF3l†h <}ÚxFњ֨kѻЄv&l4r } N\3sY1M$Ѣr8$XLdi'@s:f#ѐlZ|M6@t ;xs P_"YDJǪN  Po&V  + vA@[m{_Y r0J3FΌ7 9h#cE;+!wLzVBfJLHk![V)p$ZJ(ϕ BF7?5`|^BeƷo-%sY(5'gd"~by 0$.0vVGjVҜՎ֢b! ;/To2pB6`U-V8!jW 5Zr56WmAs{\++* c:͝ńZ c9!*#!a-w}"ܓ:}bL}%GΎLo<('} &U;UdAx^rGW 6W)hBnB.ƃK'!v$AWcG4MϰpMsW;Z "E4hu|%M+Q.%Ǝv 3%ނ[Xcz A6ќY\q[9 9Zz"}YN7Be؏R5!:Ig\\ԴܱE3 N('8lsce4yl $BOWD㕠KpAj{̍R$!n ,3jI ORKIO:'^?Wy^JZDK1^ if$eQrGəxejZrfoQ9>!GH9> ࣰDQ؆&(tG!\Gb پ0G36x$S#Ilq<~:/<> ukH܌u$U'q=q$^ ;;]'yjY ;+9>HJt'D>@mJ?@t4rny jZǃShs S>,ِr;%5} B>|B9:%bpJж"ɁxM6ȽI۔IΏ,Іn2VZY!@X7E{ E/vq20D~ ՍxrT4-oWb+"3r6iHlkd ]7#s Y0i zM՜"q0gy-]OL{EDt8=9`1+YV i:Iz`f0,&\ګI ǯŽD+'79E.hd"a8tLj-M. eAmC]7% r;HgFKSfQ,Q,}`5 8DΘ7ʾm3o|,#LiyeӦV虄`y8gF_ F}ApJ%ђEHS׸4܈)lq{9Lِ AsD"p8i.cE OY3dXA0h"ص<&}5`D'Z,BF? h*a:f÷ GF|#ȚJ9W雹?O17]j}<~ːdr -4+ ؀ MU6\X!?ԣ7Қ=6tk/{AVzwЄPRѣVt#tE 1u]#ύc[<o]@˨gk:"ŏ #A GZc4vz FlutGX >NpVk[ܫ!ˤEmLZ8rcvҡ&-< Ģ!-Rpjґ&=Wz˃]Hsē=:vj*Q%v9@s]~#rg21-b:@l|XMH,a+*;Hw۱ux Téω)Z?b"R&OV]ը6Vbo~o_U럙kRǩ+t~dzk4KQs$KD6@=B$S)tx@jk j]UgKD~J %Y<&Ai8Ոh]@EOp[ B[cSm'jOӋH5W 廕5.edDi/XTRNnf~ aY%y>VJg@%QFz2BꉡȈp )#,di\c3LrU[j6 YȔ0m5kee<Ĩ,Q%-ۈb4"ž%4GRLju[XNfB0\0Mf7[Xcb4 :_9m9wR)F#1,Gj( ( b4̂e)‰6h. 1Q-=Aj=ԮX՜܂f| %ӿ}o*$z){CUFݯۿ~]ǧ˧)l<`_^KEj$Tȁpl4EzA_@~rTJ-`.پ$sJB՟~~19˿5~@|,VVݸ*[<Cg/7о|pw7>}wShJ3^Dj0xxǟUMT-U\W%Wҗ*Pߏ!s{JR~Lm֪1F*<Ӗ>C"yC~>~Wŗߟ#L=mFg}"=D̑m1]~ 齭ݽ[p`24't#nmdР9k/Nm`8J } u%q >NSl#2! u#˻*{8R`JJF 2|ʣ ]r& ((ޅh.FұSH#ҒeK R.R^cbH抈8z Dz7V׆vR/`e/9.e6҈ͤ," _SiP%y/ RII=}DJ>.0`j:r"MԖ$Vmb@cBZ/L4ٝćXڍx![ӦZ@n _mZ@܀Fw6odt}ъfnd9 7"'b$Y1&ӗcMB&P[m,+xP;:gԍ6 ^9fI8d}$\]iay"K~ Y=?f$٪m(r{,]ȰG *[{ŃBjT뷿 ]+Usd |͊YROoVȲ=_zڇGF׿j<ƥgS3YcCw_}E :?78HDVVVJ|*+/_*7.EDGWF|!H--Hޡ4${ *5_@# K/`)[r+iW YfHVA%,k$/YuYuɰADT' a#6),A߁eMh_Аi4p#7񈊈DU7dZF,Ǻ> AmGP+2jZޔmto jEV Z(BAmUGPdXs]qvb݉>i햷>W>rZ3, SEvrj]N+nYvG|5LqĴ.?bZmU5'R #΅Lhт> m;,.m4zsH#m6JHzjA|JW21!h$f,E19 J r &ft ӊȊ@s֫fzg 9 &)AL;`RZS|ڿb{w BZ>x"=\Jb${>i",!5w`~ ǻV6|P^!~U EU& WWI^DCl׽\WU`f) F!rk#$_f/9ZkG v7W926} Hk.2vKҖ yC"U[uWTMJ F%x#gWџ?h!m/bBw+~߿5E#ldK8J+ o}o|r˗xo>d\?z>,5~}ŅST n'oR%^qC&޷7@^påmZ./$bhw/#<^xCv+n(.&%@JZts{ƶJjaPdAyܩpWAZY{ MPOl7yZ|dW %'QΝ8K06)'J8&B:>/݆ruj%7a}1_JGsCO"TxHҶF| 7BU 1[YJ^žVI@.%U043nO$TLS5KڧoL^fr%ڠ }[bm&&Eظ5E <B`RAhqki0AT5l6R9.ib@%)Q%Cl^DBC(0gTzhÀ8m GF+Τ*9und  D&=T)[qP"OýFsk (x> Lz>7=z`[B;e[YIW`ki]5nV܊<F'M4Ԍ*ٱtkG͍ :*+HEt8z#6ٚլI:OG)ʯQ>*ř3N=;hO)ft*L*5H{7lp ;':^h9܎f[uLnL>ramsq6QudcJ\Ufh 4;͊ʧxRp`F?V;.UTzc6(Б94kP)A5q`!K32+ZVGbVSRމWL-h6R(ՠGp!W8d5Uzhe '!=;Q*$NUwTᲭ%Άurd[Eʬ' R{iWU' t""A|d^}GZXCg* 9bU8l#lf]pthqkIkt$EfÆhӃJ"x XtX5W0e}Mt\q5DY m}$`9UwZT)-68F ndG55{B2h MJM~=^t? 74&]pW{`ԒU"k4=G\>JMM0z Q%%[irFdl҉ak2RP ?L fwGK2|tDZ) "gP^y!rStbF[ /sJ '2 lw7_6-mHlave25 }DɵICպu8J܍"W@r4E1ukq9tTԑ, 6~Ӽ,^aIDw}u &X@Ե:Q?X@myG Z@mƱ}e>M^ʮ[H/|{!HFѐOAţWʩz +aLL螣ZzG?,?9-Lcox34"+TGcT1 (aB(;SBJužt;tud^UzHjkӗAӰڎ$:8,VsDVv^85w&?td܍a721yQ`MppL#lB0Y-6 L:ݬ8RJ٬ e $^>䰫 j0E,szl]'pJBWwٍtfK*]wKy.nd޸+}{B;~ͣ)ѯ8{1ӡ݋>d;xgd.:.LzyqqjS&H:ŤF/ƺӸӒY+cTKUXm()]DiQR"׼(=%u$%]gq[lU7๻)lueU9lu]lޚy^tU7PMa"7Bs3g(:guR`!h!`Hb4d1yfz7|W EfWMC7Rtlđ'-Z>9(Osy7t r"4 X-,jmYQ;^C@mMm 9"R,E6Yt,z RFBl|X^QoƯK'~5ҩ=625 Ҫm5'$6|2$ RĶOEiDȈ^ אָ"ҴO؋jC t[ bjژޛXj*=M7~6ƨ޺hs01~y Z7.A@>y&q*  A]YfMu@a#2^N@ 'H ɱ=%!-$NledfG3ʊ0Vp7-~ +*kyG)l[RDkHy*e i'@]V:̥AD2yשT=E!58§I։TdÄePl d<#IhUIZs"bE8.x!'%C )*18>W lEon#O>{^n>H[X:~@4 1/}KLcXN_sŎ3} b͊uMSt%hqB'$1q]$l2iY1,SX؟ #NDR+R ]V!ȿ@gw|zO-W 1Wzht2.iq)LCskYdl, n9BP35r2\0]a > ʮʆSɢNNmԋT |$vG<$@{0vQ*o\rn0v!Q."Uz{eY"{P;Q!߄ZrFبd,3ukEYXU@uёE(* *J+Ԧ K*HN8"v|pS'W9nVNJYgTJWS)3\ڹ"];%RM  P\)&Xy -fa1M9L'Tyg!DFAVmH<ET Y `I\ e P'XyH)iKbo} XHM5+E iy딋ru"rQ."~E(bTDr"tQ2+a1Ly[T`@ܯ[ϭjC֫:; ^~=XpM6aڞ .ZnxB~`}v*WʛveNj]6L(ކ>0Z |pC/'vn6Tkk/թo$ze}Q |Q`HoӦ3_*'\UDC` Ց #*?}" $nHȔhl_A9@P²p yYY_5Apʲ DYE\Nf ȤĮv∆xY*8d∄ER7i"A&d/A&dA&+YpSG4,H1:cJXj)S-4& .J j/TA*B9>t15>HA$cf>4QÉ)m_d1^HEB[6XS!FQWGRђH 5$ 2(2:o[EILn\c"Pe=DCWri5Š* 2b[ΘV2ݣTXA/dS:XAm*kl!io)z+@ 'lUze;h(]"J:*ufnתVԲ]t ~fSUd+Q 'a^f旻vSWy /< -ZbQ7ƫa%ߝnjP2C{î޼YU7|,p[4JCP0f5 m1B ֒ĀpBzdFl}oV.Fצ)}_[$KOȼAЯ]4 #f(}GD IBW)i S5hU'K(ůP7cE }Ga8"%ҩ&շXxB"K|oWڋMQ[hL8_j~b#/.[A6!@s.Z!e6wC( y*]܂hȜ$=ƬF;tJ>sM0t:̚d Č'IT%.F,e4i2̕"4JIG|-iMq$4(&N  n0q CQ"sz)sOܔN"D9/Xep-y&0lGO#R;~74ZͧFD<ur NYweV@p̀_p .w(*K"P&g[#p@+ϥs; r2[A'#pݟC"WR<,v^9`*ʉ[&Ui" %SeɝB0gGE? t_Hs H<_HsDV6#Z4GSA8=z4^?!]g 4͘ѹ!,:7!x u2lhtH+Aڋ#RYtIUۈtĥHpͳ&O)P7ڜ;FBy0$Jap·3@iҍ$ܝBW3(A::$K[%98zaq%-[1ج~mr`ɔ4tG ׃g30$¡vW@?LT[%s^g`[z Z"7{P5gҊŌEha 89#4$3QP{$a('H_ղɤz5:No ћ`kV4)9EFnQs_=.=. #!f&#N9'7B^d@U@6I)mA@62%3ANR$Μwr t+j MGBV(]ŒLSĝz9D2(psSr͉ )~P 2(I\J ]-I]BNtT(b6fv1z蝶TcG_lSJۘ%o?e?: "gIBB- d tXhd;#j,8i1|]]/dՋb:5 B̤8gp8Xa#@(B*x 6*ʈl6qRMX28ƴ.JH8"[)sE* F=+h?ԧ:hl\b i:QŇRsꝅgu@XH \J ezR_]iAW_njEn>EBrR"2:wB G3Y2׼ N?s!e(;sM%RA_QB-` 9NȬKrqى,Kn+O9d)p9vZR)jhE} ye[IS,DZi3?$wu_VB4+oY Z6#8+u l>:1A dNe::[3tY-uu x]9i1ͮ ]Y-۳%/?ُyt+y5uH*UY1#D<@e|dw|!Ԓa.u&U׭:ٗۢMvc`_$)ܒhЫnEf FȒ60U(mLi6S{Yu'R5tJX@}(@j@(C"Xaɗq,61JFG2v:^~S7 iuD*AnF9d_Ls:rV7 L̎e? h!5ZL} >z $$b@+sЙ0 T> 9&$B{a:*GdEg]6'׋t\La].0( vJŇ٤Qp]Xp-,H}">2RYKau&Iـgr9֔Os6RJ#}X\=xP އRA(H; oOAɭԚ=bSt-yKFbSt- _KtTKUS4y[ʀ&O.vP2-ی@2ی@@یˢIfaf\^mFG!Qs2RN_;ԩc5)GFuꤥlKUB̩gxpy9#B#*)IR)M_\ǂ_J}62[ToCSU LIW9ͩܮEC)a4ub"- &EY.8#$8 *Lmlvp_?TvR0x8 $zRb=Zz.ZCZ_:lD!DPWE>g6mS n2$a&h7ϔwPvCJخ\ 㖔#%O[ k@֌-1Cӳ"gÍuLY âazx =+:9^d,n'tYb6>67CU). &fJFb-) HLRI1rN&p5*Y⌜f%g%X$댭Jck-p[[-QE3V;G۪Y n$1UR|GomZi׊QT)nnOqQ4l k 5;h=$4Vq:^C82(G|&wqe@Φ[@EVXI UsdS8'(EEúvr0UV4蝪FF>5.)?e͆7p%I^lsEܑ(ȫji[ &VFArb "JJ /{AM)CZT2QXKqFD##"U4 }pŞ's IQs1(Y4'f`wVIIʴZWi˲sRE,lکʌX1t4ch@Nn %.!.]-a'e)ryƶ3qVRɉ97zX')!wDFRJZZ"#8jYAX~ž@!hg+qRs/UCĸsJK;$q$ƨ1!SٴBUVI:e~.0?uSN ab})BŢ`f֣^@4ӄcief d\Q/QUC d)I-W\RL`R%K ')a`jR -SȔ:'Ei\izҶY.ݶ%MG$pgKPy.z@볒8:GUmƁPb[hv5;T>=3xb.d9,Z\)S0#h%nhf 姁pjPً:,H:N &'gl="V* 6i'X*g0[YCWS4jqKL"q4]7˺sed=D8ڔRΫVEE]uq̕ocʂY+Ve) V`*,#ʂUʂO?`RV(ʂY³`2T̪ Vj{y㔋beU_IĘMc,H@(]f`S5Jl3$ʩ%ێ1`~QJmX(H+}$ܭH$-tX"FL،2Xba{Q!NznE=bbU^ 9bQ@1-ݭJ`n>jύjņ=ע9$8f̅}p|RH3`}Dn WcmU^&؅kikz(܆=(S67HXe& &f>A&U^(" L7QQ{ a#Q5 M#HeC 5['7Hd0HTh-c]=6~75HE2.6} vIF;=Bo4GlRo=( 6FK5:'zbs5mi\d*X#j.eZ`2b}5y'bGn&h/tXnp1hX1[[a 6 Ȝ`PY-`}Kә[h4Q_`)-pSmQɘQpㅢa+O $u⢆CdH:H uTc&UNrSޛ j>fQT,OePPF5 v15cTo 7n! R]TgEVcEKuʶexe]a(8r[誔T D l+\=~m)&D[Q*,*8`.?Wj[_Cuq22󧺒ř_E"RʸrCJzN6sY:4Nssf GY9]--?"+ο)#O&ȮA w7-A% |%7c_F{P t|rG` A }$XHp+_]zmp<](Mkh86yAd<;ZƑ&3[* T$QQҕz#BƁMޑhf© K 8VQA)\NœGdtA vzQPq:pH_en^hI[:2wJ-š£33芐q ots:0B֊s y4,>Z-+"&!8e sd.q<@iM"ͺ/J 5_ŧH%OSnt+"9 pIR BRSKSf)ri1hƞ" d) ̆66ĺ!e0$e1Tqs!+  Ҏk} Mj%a lb}%a MiU 4_4 Ϧ$܃,GS4Ao5()[C!aE=$M2B\v91N4>h>I.ҮKԎJRt _I> >!k īpE*]IZ=?wSKmU%E۱]_}=[E:Ȥ}d5+h3%% m(%%$+!YF>حtb28. I#]up強5iZ'[3#,(r73SJH}ҤW3{<z{8%.}8!w})TZO2 q9B2 D '3 tI|5?=ȷD(iRħ^O <3Ӛz/ 1>SB@^ Mӯy"=Х2q|ݻ_YJ\Q{s6'!3$y*FZ`كJLQ&f|8+aKǓl~7{ƇDx)n&j5,!Si384%`Lm]Y"ra-V%="aT[8$H/d!sP+$AWq rV76)͈pAKMM*"BQνى BX43fxj x{Db - Gr*" 4-B=% DȈoYZ?8 Z4HPĊSPG N!͍zj<&m@D.ZW8~qfR6&( Wdj')RL$KB萊`"g!\]#ā`qR“%" _WLXEX9 N& R?ݜbń{@e\/JR֓?jI]lD *PO &Ddp0.jJY42 di(dA_^b%oIǤ ( +NQP#qPrCk4!v\ **) &WI5@lp<>%7rnҞ &I,FG4\$JmG|Ds ?lo]6 :Mqb9q-OM32^񒙄v!&NN H1;4 FTr\{+ɞ"vk - pd?N Sd XHM/["N" ^'(~b E`k)>^/ːei#"YDE V?.dY:Rf{AAEq=x13 ^PmN.{]#]^d3U=JheEtx4#9| 6v@XvROlvv"oIh~"^=Fِd%Ӟ"Rj6̆d?m>̊0#(n?(0odv!l s02:5Qk>ȝ%(i,r'yv Gl;os ! 'i?չP4c> jTH')"*o2i +R6h!nFYJ65@vMY. QS%Ŵ!&vŴ!k/?Hi= i#%6Dethц2 6X)QFb2~Ʉrje2Mꦸ21Jjh% j.q9ݤiL5Kn^ >\nR{VHZvD';ymISQCt_kbZʅ'\Jbӭ5p)m&w]&b2Id El TbGPdClH;2!E}FbC zoEb\tۅrC~KdGs$$M;b1'Kı".q Qqdtq ~_EL8բ86ćS>\K(L8]+QF;gMLX8 {g rCx Y2Y Y8YK:>Y! lI[7{V?*PW4{9t-Wfc:A],E%)q ,6R$MM<Ե!"H< "H<t L`OC>;Xq ϔ+G:o7?<['D쵅|xbB]'TeRk(oQt_==CYKpI}:?7 qW+Nkܧ<Ǖr;:e)b5=~'|ǚNTE7ǯڋkqk$OӼu~k^[* )J<_VFߣ}6^yIo;V3-g#zE jCg&~UB&fFٷ{ŗ% ?I?fxZeZ+y??xe+ =F32m /o oID{|3{r-mݟApmQw4 H9b,3}YNQE^`0>$SI|þEj@H-q90)2거@XfyUWƅL- GOiaz)[[lJRTK峕$]1Mm-*o^)M !Of4斎3W%#L꼊Xt$EN=^Y陊oӂ^-Nl+|S`kmk (鼿$TE`IKHͪnpy+z}jdɵ+:~_ 4_Lj_=Fe L&erp,Fwk}||O٨"$HSnWU_&^'R,h':d#QچVniOk (LVN5`ѥpu %Oxx^'C'Ћso>O/mk6FT>=o1>o}R|?wїq߻rOowڜt{w~~f[`I.H/u<7gGԨ?L~_;7/^~㇯_K'^?N΍׽yw E?ן?|-+oK'?#qoq;K>4 W'w~Soʩw~;)?ay>~~;ï_,Tк1ezU\m~b?A[?|}f7./Μ0d_ ÍL?oJذ?+ uȁJ̺?}*C{<巟MI|OD-׍ʗ1aE$zf\K/0SͧgW踋λͧtssÀֻhrH_*}~t|L  Ow' 4;n/N>duӣ=7LsDXwgUù#^vO?ܗ>kYn:Zv-0σUUBaYKwoqG>V2QBW&G"]񺿖g5RD=,$0/y ,^aOP|pLDϔhI-L9^i 4|H@[r%*ecH/pϚY _U.S0JάQ8n#ݦ&v"Zjĥo$#7O*N LtrN\xVpMzWx<<}xR\!`؇GZ{|u/QJ'{I'Ç8 kmW|zO "9%vfWK}b~>#B!C%} ڣpu:{|8~lO怅7F } ^oaV EWq_pq.2bApvImowD6XF}?# t0p2T'"h?cO]+:(`ϙ[#|Vp3(`5N+:*. (ؿ18bdϹ5j?i~d^<=+h $MC8ʟ_Ey~_"zCp7} (V eM;endstream endobj 275 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceRGB /DecodeParms << /Colors 3 /Columns 128 /Predictor 15 >> /Filter /FlateDecode /Height 128 /Subtype /Image /Width 128 /Length 282 >> stream x10ѭ ŹYendstream endobj 276 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceRGB /DecodeParms << /Colors 3 /Columns 128 /Predictor 15 >> /Filter /FlateDecode /Height 128 /Subtype /Image /Width 128 /Length 2961 >> stream x+-9wTuQ8p8ḣp87Cp! tUwHSSKvRITD'zߥzG@‘pzt{.B٫'B?A?`@]/ .a& B\+$MQ`xDJY8##X+]T &S,?ujD$Ncx"M[Lvwʞ5VMG',K+BxIdda͑jwZ9`}gXR=xju5]>4_%Yg}O^C:wf>qc?[>ڮd(<9nYsֳ{j/ߏ">$RUOiql45))QG }R4QadR?$YדX.Tbz5`:ݹ$JcsؙXR"D{n?qwӏ t%Τ?cӰso_yi3>;W8zϗ`of!V~HrqD_ nZLI1.Կ v>2)P'2\o>-S6ɳot%Oxm*xM6^$VpM;R>YaOiV0(gwگJ60/Nk>RM +dr% b* {,|Ap@O4T=՝bR}F1$+o-b`j6KrfK(ܿ)>뿃9Rxߪ )x7O998u3O 'wo |[ R&) DX&NN0'&-ŲLLSə`j$iy "|@䝃D. Ff2I:u}[9T1¬0 ^ģ: Vq\!@RĊ%EY'iv1y JFZc'vy~ҋ#ڃM3 Cπm|+OL7rrK'[ߚ"l?0g0\- B GV -GM]1&:4* gn#| MPb@Ӱ#yb@ u]oRU{k( r |e!Rh '1;ײeyU%I듿"noQVNP0d߮zGm.&xj֞A\HodH##GM$H,r*W=)w{pRO:7I gG/'fl.o37p3noY\ @t3Pas&o@B@Lתkr zqÍG ѿ.0Vf XN(A 6v2f<J:Z?-BO}%}Z8Ok&;X na/-STY+ En> q7u}jweO C+$[x3_zTV# JmNtZ7cYP T]z'I-PkCP!{:w'tue(8J}T>4Lǖx %_Y0_ĹO Jpmh;\1OܧcXJ t?9jIqߏ evM`OUʾ>< xEZeދ9gAPg }@0!0d]}mh-Q|n42xkiE&H@D0ڳ3_|srߐ `jᘔH'>% aqLwG "ε1H ĢIyw&u7O Cߊ2Go202A&:d0P!NV3 %fNendstream endobj 277 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceRGB /Filter /DCTDecode /Height 128 /Subtype /Image /Width 128 /Length 2488 >> stream AdobedC    %,'..+'+*17F;14B4*+=S>BHJNON/;V\UL[FMNKC $$K2+2KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK" }!1AQa"q2#BR$3br %&'()*456789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz w!1AQaq"2B #3Rbr $4%&'()*56789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz ?ZUx-ӥ~RGVEhյTkҼnYFj82Z4zdTҥ~Al]J]#){l,%饈53'>PՒhGB֍(헥~VG畤h'J۰̑TwE[Zcsc:<6Hm=^qpo_E'q2sRݿ&pZ4iڿ)UKgV%yںpNa\$ⷵL &_*%~ ZEڢT&))nծDCv&=j#T!VYۿ1Ya :MF/jUQAy+g.y7cʼ7^i{֝TaZҶ+J4GJ߰;m?aZbl`|OP_jzyg9WNn5i('q:x'W>)݌}}pcTpF85Y{,\1UX+\Е/=T҄ .bĕ~#LUWvԑՕݫwNQ/kչE_!/~f3K0"k*ck^}X'_ GT~0zOσJ]NGctnEqW"ްa5~ 3LE1˞f&fpԍ:R(j'*ΕNqT%jC#iWXQVKkJNVSNS_C*XSq)rP3?ֱZc09KiQSo2c" y$?x95w [jZ0't/l+Zvҹj洁pppk.պVj^UUk1T5nj1EE(,5weiZjĭTj= hԠuuNrFl*6N\8*i5)U#5Ѧj_^^&[ *2}zf~ǂf j]mM{0zx[Ҷ~`gYՎUZ?J؈<~ٿJ޲=xՏ h\5pk+s>6H3U)ZNBgVONN Py*=j-[yI$f3Djc_[ X7͌Wp=IMumepw]:CWf^%TVfVtgе*-Uhmu|I=s۔-yR֍(&|k]Q:fMuWdֿ_H[Օ'zϙy߭QԧإWZRAڙJx=ExM?< SiE %_||ɦAn8U֎-s}M|{O AFErljʵUhŕe\kR^{'Z|40ۧ{5=j׵N6Gҍ4\kZs˷",#V5ݑψŒ*֩&ݱ?SNӣ Ͽۇ꽠~iSTWc:Nrjԝk&`|iީJ,U$j֧endstream endobj 278 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceRGB /Filter /DCTDecode /Height 128 /Subtype /Image /Width 128 /Length 2479 >> stream AdobedC    %,'..+'+*17F;14B4*+=S>BHJNON/;V\UL[FMNKC $$K2+2KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK" }!1AQa"q2#BR$3br %&'()*456789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz w!1AQaq"2B #3Rbr $4%&'()*56789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz ?5&̠c?2kCQ?X .n :Ԟ+J|{^jKq!U(/zW(?ù1X56?FhִVZث͂+z鬮Nu8"A{]5 ,\LT[l?O2p[WETe-ndzV\bSb!Z pwL.T5fVE9cRz0Nw3ZAW%VA]gu6zwxdU0=+ieMҘD۹b:dY|F y_݌c{Kh9fU+Wn+65FR> F4y=ɬ׭zYXwG椉cfkb|blk|ѵj78+árU,nsZq <>eWnI]P$]8)RS^9yk:u˸Nm)\0 Йj^S[қA_6>r=]~RQ[Celֵ^:WJGFFD˃TngL9FRw$V}K VhpM-k<\m+Ӥ^>*ci+15<OnqZ5ĒaE_߭a!6Z?U{DZ>.GXš:,^շ,9U$CZuOG+R(B;Jp1<+3pXxseWtRUYj`z): ׭h*]I7 Y&fNBh^9h!L"sucV+"YLPEhr 9DGj]dxJw]OŽhnV{l# d֘_0Uo^O~Tsn ݇-؏S-'] )[NJWn u:Sڲoz9_C^~+q45 syl= 7ŻW֤oLj3Gp08osE_5'(>Y+3ibcV%ͿK/8ԑu~j& (kЧkV:6{ބxkcҲ-W8|*<ɯΫFe̘,ǩ> stream xZKoG znQMZBI%,#i.̮DGvMb.pP}g'罛*o @y8I&N{͗:AYe5Rh)u E-6X]4EU=0^ 4{jQJE=2 B*URg+I>6)^y|f,%[ r 8iݑAy_[E(L `G)Qpl}hX*RڥZI aLɻb2=3#Bp 8MfeJHLv4:a.#iD+m' idJxAqEJA5D*kzi6n#+q)1LgFYjpV'jhsX#{3PIcQJ}鬀#jgQH juj@,A]loӸvU\t]H@lM`6E~0!6m璃]5m2=h_\RVc6kj(kdة VΡĤz D~9n]`t %z rσڢAhlQ!ڦ=EӤaNI(rH""JpM2TMq[;j5*R/0.v`W6;,u1sEjD٨VRvTRSjUԻޡ:iR7e+Ԇ6Z1M9#J^# \"#3ë '߱YY-pᄥ%^uvxڤ@xpo -3OpinaI[S/ٜ%X^'ߊmEG0+ ;k<x}cb=eFxt%UqBBpK|@2${s` A8B8F''q^4vg=o2Iخ|#SBZ\пzT ܰd3FΔ, #S6  X_i3V A{@"F $)+&ͱGz9:ɓ` !#\]( ~c~o@=eӐOWibOMf=~؅u]fEEivkI=Pg]S dߞ H Jc pL[hwzP@ Gd"&?>A(@ ,4Bv%b8a/'Vbtt^&3|j?aowl/%{lŧ:B؎(`N<=v0j=GNF`AW?5?VOnq.fO734I9j+ִ\>} k+b׍]ft%5]-E Wcs:m gRl΄n> '2<k XO B 7BPV?-XEendstream endobj 280 0 obj << /Filter /FlateDecode /Length 160 >> stream x]O1 y@REX%C1C2 tpw'0^GgG:i[DfX+@[L 7ޟ@Oue!֠r3idodTNqTr48on1Kii XGg9;ESrendstream endobj 281 0 obj << /Filter /FlateDecode /Length 4097 >> stream x[Ks|L[>-SCbb)WaEbrWEI/hΐg1x4J r%_DO~>UvstO(\_7ʺEr&ɏk}~_'Rք:7?Uz¯)p:x!Mkjc_޵WnZ5rW Q۸:SvB {f>`3P[k_lY !MX:rT :FmZ6D~M`ӎQwo1j'˂a0|Bno ᜌY&ai DXo[sh׳YG;h专͖&TЊ]@.{5'%efoMkh77Þ#ۏiuOW讃% I) g]jie0ESA[~ Ŝv褖1oK`zCFQGE !Fþ)7u LiS 8i][4xpC4fp*8$feW[oxvZ1(~в~}FÅ Lp0c?f 8-ލ✑\ElY2- '[uӢPѧsD:78M=NP;ڃǺ18uY7UER{hZ-3(MQ6F62) `ߖδī܆wgiI Δf(lp;w{]Wxa38-bY,*iB_HFbfG =/BnQQ*P!`>L|b)m=s5h֣L|ht;>d0~EiH1k~a{vϻЦʮ/n9qR-%v kݓ2?M~5[GWZgsM!aj=X 휼YI\"AO3 K jud+n# '~4 5ibwyz$zX#a 8?Xf0, Oc(; NӉ|„KSқ,MS :jB!TMyZ'`q#:L/p4*ƈw=RdB'7.CdӏY^ bͰ;]i{k @t3xJ{n5kce(sa4P p qAhf Nn6E1y)6Rd;^I̧mhaJ*:^ќLOA-tT{ ?W99'mƃж?Ka?oZ(̚8tVDy-_ExtTWSlN>l %+#xv6Zꑽ-d@_߱%,핝 qImsY\کf %/x4q)XH.(h5H>.ޠN_O @c$ޤ.্a#\()\oyφNDc0Á;Bؠf-!>D2nVR2 uf ߛ NBӍQN Ƅ$C78MBMTm2aP{}5Ld8JH/Ym mF9߽nY lW>3m%03 )=Pr/4{!P"pZCJ26ثж.`֌?sхX.(qꂄ"fܖX&OCG.71zn[,PtV߱M0nwr3 ֲ͓) )wyJ),"z3sb )Q imr%jkd t7a<1{ ՁF S%L 4j}=Bl"@;.ʅ}\ IZ4L!b lYM7$€V@v=d058޴SEUs \*+z ^-|kWNjs۱0~4[ w2N&h2O$O>z '2YNf€XxF8A9zY==nY@ {bM1)VțޓŒ)˔Umo)%-5lnh_<, dO@tGR?FőY-;r|׿XKG3es\!lͯW8zq`6 tg HLSكcP0O A'gܵ,'C0U3GLKS2u1*q[Dc;]_idҽm=ْpP[x}O' @_Yqu{ U S@UOiD#M&;ԔTGiҧsɅyp@L4˗JگLzK*RؿJЊ.c ۳c 驀Hkm7GZ/PXĉCM˗.\KS$ԭtL u)|n%V_x[:0NR7-{֨8P_L'2"5]R%4)6j9cxVϴ͗9>1WKvVbd:Ele|2N:}cN\}sݒXt]pIdɠԈ>љ"Vؕ<.n~tz/O-ΕwV<.J36L5T&tәR؊DC6/SNYt]~}Kĉㄥ+\ЙԌؙ@=!1}I`O-ԱVJ/WiOO>Ckw.5Χν z<~=$vWմ6:ߗLd:)r}5g|ۚt޶&.IV *YV[}1;»dEĬUkW2^~2'.ģȚSs5,?NX@`v}h2꩔aU]g`}ek2>VG /yx9I䲵G bFs~kyGn;IU.SL=r&ݵY=Ngq̂ b~eؗkG:VNJ K~RsY Irc9v]Y]z.5}L:#O.wO+VY&v$kkNOC ts /T+kUo]Qʽyj7\פUDRX1vX`s] 4hmN\Xח''PN|`hgj>k͇֬9KCDh?XVI8Vx&̓ ɻ Tw ˕byMw:4u+P-\<_額g R h!v1Q#mO:M/W0+#H9Ji;{")@Q=&q̮Ͽ*q/sXT"؅"P ;:n2UMqq5>zN:@,AW&OhR^r!%( e>12"V{uŒty G}Bބ?~IAe$sk+iJj6 dCm <]56<̆}w}zѸzݞ<(iʡI199~3Gav*ޯ"x?P+r7&'qll+ > stream x]O10 @€XҡUqP(/ СY:ߝ|reA>E0u- -m1,O)x@fw5|s^[+x$E b'Φ2?M%K17MR{;RB|DSzendstream endobj 283 0 obj << /Filter /FlateDecode /Length 4332 >> stream x[Ks!R3)q=xIGI*GÊ(F"&Eٴh`wDʕTf1F/JO_ޗ޴˞ oX_jOfǰjJ)4QE׾6h|~{k!~>Q\))TGf&'{y9DASSMịGYR~X݃H2*Ψ&&%p;F&;<̤aj5|4hAkKjΧZ-MVn^_n ).O`o;Y[~5aXGcE<!r8ä, XF9H`LɻB+02+̓op`҈{`+;HԤSވ!'um*`]XMshV@SNupjLhHvNB 6J@j zx,zlIa /Î"y9FiuS0:15ψRu8M]@R EmmPW ǥy^(u\ހ-:Wq?ddMN[%1=~g w ^|.!p{ _D07:{䊳K"6d SL %I4-z-ƍGʡѩ#_U%%Ɩ r7Ri%2Aul$e x eH!hzFuDn T z^6p7pNk/uū2/"wXA?3 vlFHڂ8})F8 DAdgԄggLD͢EUF4tfNh,Dp L4y g "qh| q_p2 $&r6eh ,ˈOĦA泪JrsqKsG9F2Sdbڛ-e!HEg/tRY /*Eeo%䞞j}ٹ }pZ("W` P3[B)fWXn4eYa1ioF"Gs`<'ѐɼUrE@!gT`0N* &waj#7~/e޹lA7ܺ΂H6wv:?['ݜtl}~])XNr3y(˻%FtD}TH_$Xq 6(h{WE8hT;2ID7/Vu@W }۔,ρp$6u8N%ş2jd|5AQK4wg9jZz[7rA"#T͝)Tditk72%tb}4 $j;-;n)+.>IZ/c1!a( @? ޯe0/Ң#.QspذT:]p&r+;sFUŭk|ngϤ aF GJچ8kPT*OI il7cVí(yLg5wbv6.X*<[ ӠM\d3w MƒPdP@W!G2qCWfau<9l%„PO 045Lcb }.163|;>ړR#+$:k dɭ:8 >_oq [ԘcjLWx9dVq fH1Gk#D2Sۍ*W*:1wiX,{;\M=P`wa+0jG3J@tٲ-<0 [@\ =*JOxIBzr}m"+]ɰ]1 g/{M`A  YKMĐa$xN\Gm6Q RV]s׵^k5]Vr|X,6E͆ڈ+@  ͟I\l % U}ݮ~{F'ͫ)\IRE'XDUnpWvӅ0W??\iOX0uM=Մ'Y2s)+P^(S(Aqtأ2wX)3+w]_SѥZL~v_%Mm/BRwжۋ8qWu:d7RwtϹnZ+bXH{u)>;%endstream endobj 284 0 obj << /Type /XRef /Length 332 /Filter /FlateDecode /DecodeParms << /Columns 5 /Predictor 12 >> /W [ 1 3 1 ] /Info 3 0 R /Root 2 0 R /Size 285 /ID [<7dff024d5fa19d02be09d8b679148d47>] >> stream xԱ/CQwz^haX$!I7E (6JH$aa'!3:w>9NNjq _'ܯ mQ;ܢ+Vam-gِW3$32.mD6!߼TT6_]r#g@6Tj͖qzfAwGg,r;Fn/&d'WP&X$3JdS9"vJԙҹ[f"{|a|1%#!70yOI62X">xWީ?G~̪T*?T?t endstream endobj startxref 397601 %%EOF spatstat/inst/doc/updates.pdf0000644000176200001440000055015313166361223016013 0ustar liggesusers%PDF-1.5 % 1 0 obj << /Type /ObjStm /Length 3212 /Filter /FlateDecode /N 89 /First 740 >> stream x[[S~?bޒT*쩔`0l!aI8|=+EA$S,sgv,a4>aieekUyӄL0_m L8-L&)RŤH5;& SZz&,Si̴64>{ fFˤ`Ɓ̊ s[5a.Q([ؑ9T`2 &SA(JF7dH52e%I92P~R' :`EF (γi1dӜ}o@ ecK~( ŃQGQc>ۻM޸g10N uџ?a-*(|CK:;JL\Lbc. -S]>RL~'~%Od j6eZݔ6rRAli>N6bȶG1`y~ctyOF7Hɔt"ňK$G| h8o YGl&5o|jH6Älpw7 íᤘW(ƓlL6#Be4LOL8?  [ QYԏR1q0І,O9u P-k#͸D]̋)< `M'<=9?W|GO dBdra B:!6(9܈WsfWy[`Eokx1ё i~o~V.Lõv;Lvz ȮyN{U{\U:ŨP49~5$ K%)Ô4O Kz`nk+hBj"Eދ0^X ,h"FڏCeS}76=[a7aףȓQBǖ&lhoc53mm_c~ }UVz ^ 1b7sۋ]8%L_tOA FÚy&7Ȯ`~[=| */ Jưf1 #!gj??݌y t o(c%I~UD'g0)ndM.S_-vERNC7|O:Ӡ۰mѴU݆Yt<\n&5oеG9F\@][V4,%5jJ#N<'g'ohW pg`껾vi*`S+!=!(쭼#sisKլwDiΟƺ4"}kurWKm`S:Zm*6  bT62L¡o㰅Q;;;<~|9lEUkS^r|*muj;{'م^sĢ|ފKYWcSF-` u.[a5^]Hف](AmXҕ7#G !e~]$Q͎O^l;<8OۣAsfѹkصpuN_Y ErgK`FgpI\Kt*yAiYݳ~/ljK.<&wi0v{|\paTVtf6 ORzGeFG+XdԷqYGe>d8۱ݠі*^VE?lD^  VeA:wޝWyz]-}P=}pYVcj,`,MSveS RzIr& W7L+`t)Lai?'ն?}jOGo&O)υrn&wj{StDgvM:VyCΜBuV?.AJa}bQ"^vX8cZ>!iSӌIL]k ^%[]fu Z=BqrߝSK]ozkKV!8Rfb]zz%X =󮰡,RQ+n>v)~HQ/QԚ]ˡ)7h] ׯ(KWKc֣ћT~B (֤҇zti6]F=rSߋ\{@za7@tf 'yk|4HSxA؍O sGz/gڒ0A+p},endstream endobj 91 0 obj << /Subtype /XML /Type /Metadata /Length 1329 >> stream 2017-10-08T16:32:19+08:00 2017-10-08T16:32:19+08:00 TeX Untitled endstream endobj 92 0 obj << /Type /ObjStm /Length 1596 /Filter /FlateDecode /N 89 /First 783 >> stream xZnF}Wc"Y} Nvz (BTnRIv$_GҐ;;3p)hH%% EG*J9)FrNL:W(CG:n$dxFn$Ix(hp[Ju(^jR2350԰V S&OIO*D4)I:1,(ʢ1&Kxi*O:arX"q0^"T-aцd wZ,o4721jXϱ1lsvj*a| q,x9y|UXඋI^zAA *,0awV> B£yH0jp% $)A(g!d!x9hvZ =xABiul9LބocW+zJD?m?<{Fs:?FUʀq|:=/|:f(%Giyp0wdqZsnI&0;ῥ>Ӷˌ95~fo9f8!ϫvVMcrc*h۫9לQOTr)s.d$jdP۶C̗Mvĺq1OkEqѕBWV̮棬Y/3u,ٓP(e%tGp6.+Oqi]\6l[Eؔ6 n  8+k# -A-?d-RuxlOhɤ;E-WU/L9S€mS,jYONCKoS|p.ׯ>V}USqV^ 6G<(J%7DYRh~@֯8n~ܐ:l+ SypKR:dYR>Wٌ}mSI-Cq-]N6C-YZnˆ-͖|&f=z-זlt-cAZd/Ώ^9nBd"ZM=r6S+Cn6ECz[ŬNvy߽w;G#u~1brC;BP,F#*Ł8/[(.HfD*3c.13q).PSYKs.\㇉B?' $|q4I*i~D2t4'I𫱻opSd#lSVOAL_WS^>ۜ_݇p_}{@@v93}\j۹Wl;i|WثپeȹdȅWEUP׃o6ǜ6nf^zd] ͔K=|Դf1KP4Uuې}S|endstream endobj 182 0 obj << /Filter /FlateDecode /Length 5233 >> stream x[n$Ǖ+j7U c["ERV={#3HAȈ}~8o_\p+ h_s:\}w/yq9JK͇=~sΕ֏KCuukk J.A?.Ku C|Q~6"xW?9tj<"~8]tvC9CՆzjbQsM~(·^~{*|Zb P-mô椞 9 R%jCؖG[;_zo} */o?r]Ƿxt7Ź yV_TwnT t]jlbkw(gpT,[_l+}=OAIoxuju Qmދc&ZLc%) oĔ'owN4` KJpҒ2FS x97ӈqzfr.B0 )^C66qBfO_-XNLx.F ~jʩE(mƾZ׵&2ɛ \l? y9{P``/4]F-$5M ^So ր(M9F嵚eϲpə{RɘUbɒ N=?iFi%d Phd$K8`$Nc_l*˨-f0XF !ݪ1:W&LqIUi 3gj]RshU4"`0iP-BB&@Ů7ƣd :4bU7pjabo%FfqٜI-a?t 2rTQm醚RZ%yU%AR[-#"4J2 gxoQ]vR{C>mFiᒾz:f Om3]Ttb&ĂC(}ɶD Ak.`,afZg4|;_Z`׿x+V˼yGϳl. ?hRAHK`e8.~mA\xt 3cx sI[AB~eK%İ;͇jyGǧEt7…M.>w~*%is#5`E*0d 0jep \0mlc,۸e{Lc/GcBlc_f@c‹I"[)|F*ZGmyC:Qs_GK)LF04(~1f+_uA{1oq{N8B<ߏ%UC{'Ukwr1uCj.A=P4aΉ?p߄z|:f._,RŶJKE@ۀpM8c[S::m83e@b!X0sQFߚ,<2\os ׳uB؇d W,I,nA>%m|ԗc;>H~ψgt\N2Ӽ-l>k_g5jA:~T^.wG̃w':/8Q+yգ_1x=IC4{#J;PmSY:iϫgSgw:ܛMdVpb+&w[;< 'Tl"@!qwZs-]Fv-?^kSf~M4=LESN6뙇W o6/`߾y%7yؾ>Ow%=(ʡqHq2. X Y6sMjC<6ޙaX#xA"}EDRy 8!U=67H0G^( t q<"MN+c=)$F@:3I'P(p`Z Zmd!P-^zx:# fh0;9v"0Giq$Hvlh2IAɄ,PgH#NّQɴػo!^9ǿvtPhkedsoX$Vdu66s;0kSrPJW>] jgvn|rwZ}sQlQ rW"<*ґPKQٍQ52d5FrTZ2$;I,@JƆ!}သi I}~z|~:x do ^Qg;ςOꊌ)x{xxpp{w_ιNWP|A+!91 8l=ِq"|p> ak+z!vAD88,UqW N6Zrƨ5L5|l~B7$g4|KAIh(R#e`Nے^lH,c2f?Z[0<@6R MWRěψRRf=#ʸPK،_ψlA6q0l|k !°Dc;D ͸"|({4FagD)l`TcHdY)( 1z:#JOdSAtS })i3z*SDrQz d @!S M[AOwS!Y |P(`er<ו!*hNWlH0d4 @AFwQɭd]5gDɦsdH0Dɦc_&]}ў¼z"EkX(\sBkHJt!,'ĘC>Y̒E1fIh1 1rKۦKObHVq\SY nN?Cٺr%Cwpnѽ91Oh&b{.KDw[^, Q hDߞ$rf(~zw Jz6_bm\X<Z2RhnKՋCcKVGzG6cvO [:\i)l@+xX{sw>EK4{O{KW^̣*oqޞSi_wnw߷º󴝴gendstream endobj 183 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1799 >> stream xUkPT>]e:ш $J E,²Fm=.E] "R`%6ı՘I&qj819jM;93ߜ3sw/W`8 "cBߞc~3V0M](^H_6;ע5(s:xn^ERdeʵbVqPhhv񋁁] &K*JrB>rĉJiL˵ZU؎E^Ru0K+'dYx2W+(d%pKgR4XeLaت\J͗ `D, Ka7=^lcqj0n2 ~7Wyj? nFxP]NzI).R*^BfDe3 gDvXg~[ G(VcR.&؉wΡ9NS߱8lab݆p߽Rl &< .fzFe=?7=ihȁ SXZ H{؍~d]}r}Y5\Py )RCcŏЈb᡺ځ ?|/̌3V'- |yyT&ΗYt|:&[/9"qa:,QbN%q1aBm4tq)Um(6fI /_jlK[!%ιvGGU;ROӅ"v3ufsCUY5'U%6Wn4$:*vL?xT%B OA#jʍBo=r6_nVbu&yЯL?D|D\E]%T$(K3CStyEDWݻB grQ f)&Hs+%po6ir*E{b褳'NU1  J5f}Took(:E-_r 91"J%AFF|JYR#@xft DJ_S#ԡvapljCc=vϕ>js5odq&MxF٩_RXO^hG<ՅK4+"Z6}c?9ǚ:  ZУ(i4^뉇D2`@Nlx[-u\MD8SE\"\PcDGCkt*{`~ͮij2'S`/f1 /qf 'dE J?;JpH+?Nf )Nvp{{|(oNg:?endstream endobj 184 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1204 >> stream xELgZlOu٢lé(Y!)0s Ttq*ZB[ROkR_V:bt[9mn[4lf8θ]S3z'y|?8'pWfd%D󅹸0O&&OTqCF>mq)o`,PTщ T":eTzM5t+֗8(71=-8Kڒ%v=IWjMbآU vWLo[M^˘9#]%Ŏ RY:٭gMX9;6cX<`/a/c%XfeNllJ6)x]dy£o?ȅtR\۰Sy0{̹ [+ACM'^{`0FXf6WSlŕD3fD1@FH ;a8\uvIQcdFzh v)rtiB&5 c)z) N0W:jd.-PFTO|~{|F+$I "-Fs7PGw X|]%iu]POCs |}8k,[y;%ZMֹ\PjTD D264QZͻZ$Vt^lu5ox=^<D^Slr=Goߝ<\֌ߏqꈈuշ{NJ+V@qu s&c$x?"-m+؋&v(L%ثvv:E8}ڏA|--M@eqVGqӑ(O_2Z{~hQ"iqF5W"U}endstream endobj 185 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 2693 >> stream xVyTSW1T8hmQnZ]lUAM(HB PE`R[=-ZXӪ]}s8ۙ䝼sw~]0D5f7IxF$&<+,K1$CF'1qJ,)Rr"*&1>AЙϛ:kƌK2Mn"tT K%9trwL }aaVZ0}zzz4ZjѸ{Xmd3mY`͠z,iO;tbS7q$X0%BeXx X-E0d/3?I@}V1ٰ$&)|pVe1Eҿ\9{BHgϵ[@ML! ׇnw_c.87Dp !H_ݻYu\17Osx)EV(FK~hbX̿fwȇz-Z$0%jCM}baJbfpx4~)HFpa Zѣ:V.v8|hu+t]Cls48H5m2 `}pzѨM)W$CyʒT*<%ChJм Od<5J R$C?$4iCogw޹jBA!kڗ*Fͮv*T;_RL X_zh=M?P:n.kc^B#>7gQZdV=syoK gHzJ}B&oc%O O\KcHwL r3RssuE{WlȆEyh3ůTP2;5fC*grb.4A>0`xcbqpbVUo? 5kZrJfUъryQx%4FiVΒ[ )L#3~)'Nq/4?ud@|SR+$k L>zYnq:X0g|" Sl ! 2!JHVG69dChzj,o5M$C!gT&࿇Vc9C8&.Y{.яѮ}O!Kq玎_R{kkjj!9nG61 9ΓNp{s|70s_gSRr28Uŭa“vnvEmVn.I}D,.7{eAƼoخ&(`-m;,aAϧ'NQкOn(x'dDBm ZB!K\ &sŨ.سel}+jVP 9vcE2 c 9o}:q47.tg#| 7ޫ*s9C~8]%^d:C"K1tnPY A}tgUY`>]%3Ƶ ÉSyu{M-2uÆ*rXQP.,4 ISl_Q-ӉЂ l}WNMSk3js8RI}?~_Ѓ t!z?޿/r5DJ,b `7-Ye}t"øx3 OA!^Pg-.[}AI|%58*I J*1:,X.ۯwt䠆͠2Ǥ 8<G(&9 \~/lЏidinBR.ff`NjK3_2`FI"\b^HUdUԦb[w f7R͛ `b6SDsin! )Kn ѾGr#湂J.W;> stream xytWä́bȔ{ 1 &l&ْ$^llcc6aCBۄ@B od'&{?>Gǒf{wOkJ X;a|? k[@!wV>hO/7%CF :vFN?~{WgΡ^~Ρ䍯gz7.,,l_؀`ϏG e==xPە~͍5 ۮps(j܀y/ YXdgp+vt_sliY쇏ctcƎs?aSMnIQU`j55ZC}@Razj8AmFR\j4GR6j5ZHQDj 5ZJMQSTj5ZI}HYS](՟@h8vPPݨՓ>,^ToʎCYQb]ޣ¨nZR6+ ~)) ~][H'zJ9wBYu;]C÷^aһO>Ȫ^qz;$pg4pA8 uVn.mp@ZTkjv!DNDoh4$@/HH?B!{.x!=yۢӒ[9U77A1QGҳ`<͉Oʪs%<"1~m-L숬L!|nu90LV3h!?=`3wf t_ !]Km|&]pW&'>ָ+jlON Әn9)\_4#hMR.BJfJQ?#!ָ,+pЄS"{9(bL:9GT(!6+C;WeVLS:036=wE?YFkԤ})2G65nOxzXgtcS4"~Ѓlks30gHNeɇR9dOC4xֻ5l)KJvW$lWK*jdWsD->plbbߜ<YoP BZŘ`zy4u(bz7djgzZȫhfGB\iyF"œ>2XP 삆f! *s/wѮPw, -޲ \Zsf(( >&s>{轐 Wcy*ƓGo*YWt/'-?ߢ.l>U.bTʤءXeHL!]X^Ŝ@~XvuRL`2HB;E 0a?&ϣcbwÑ^}5JbgE)AdpyȾļkT*59N)313U+(m"R>C*_[VA8^'(SԪ9?ufZLK#t4"RmUz7lxA %i3J\/@ih}^`^enE"J_lxN7{=zCHXIWא [uOh'pHEi_ S9Qd[1x2v!k,h8}Ph|I,)٢3tӇB4"Oj!1M`& *ޣfォr{ 3 yLWGK,%B=uwÿKߍ"- TȢ!M[4$č]vF۪5{T_O2氓 uu^0,4P,I p%@bXH݅% N{Yn?ܬn\m\+ _:A'eH ء [WP+9-z” _Uz/02W ]+`7WbP.90g̊'|fK`nZ)A>dW49ɖgQc_wbvhqK iUWqPlZ [JSI\`gD&>Mu;v3 ʲ-T#$ fnDiZ쎹m7 *Ul"֯eD2 Mᱹ7%-_b*$Vzejeo7vꑍܚ :UV(:j{Tdsjkv\|Dߡ.xSĚ|͇ҋbz2ژFeK;OY A\~JIT0%aANvbv ) / *+"ڃd}z)Q#mζ#Q8yCSk(("R& B=s+o„+=e`,(%4{)+FAM7o6J !*Fo鍁M &|Wr8R}ZSJm|HE*MR1)deoiҔM"xBmVbU b ,:5ϝO4 FA#?mZDOD &@gHTiK tCF!TmS/ئM@DxQ-e :уXŸҶʶm l^}E$*]MMyVUiy Rx<4Vq$mԹόfElȅBФ=ϧ䦃#˃kF?vx{~s1'INj0~J_aYg*p+HGX|*EF=ܔE&t8i\R-ƒv;<Ģi %)ٷYkq@ꓞhF{C)^ZvCpBS!ȿa3g NaKP&3"oB[[y>7'=&q"gb{%á,OfOٝ9SSGX=xh"AlooQX5oSGr& Bg}v+NBUЃ`Qi4~;}O^(d~tʅMֺm[/돴EēͰqhc_nx?,~,S{^̢^*Û`ݜMӧ;wT;{z7?lҧ1W]f}h3x]4{U|i_._ גвӟO\5squ9 ԕt7>|GQ8ǡZ 5; r|Rv#'qD7oh=o 71I +? og-KL ӈILMN6bW7xE֥j@ٚtc!'Ρz4*lkg25ZHc3eqIrnˢŁsaЯo /ܙ;3]9u̓杭<&uh1U~Cd,+>+Klؒ\4}YnA4o_> Y"zךƒhB{[63=#r(LxLaqX1 GLm9*f.v gcpp=Tr:jÃIJT 6yx|~@p+tNW;?a{P l|wN.: L;VX]{+̘9}(Tl`> }~޺FlF1¶y6_ %%-iVaaŐ/<7Q5D PH.L!Nؔe7Qh :'qߐv:f*f j +"_z\5:|WgXt\Ω* %Ty$mZ&=)%IlmUÁڢrm|͒ҟו^8%VBAV>h 1CsQ/'!Zy܄EXیb!QYٸ `i0i)toFMHMYB|<9Yeq'$g{71'Ms$>͵.9Ht?nnBlȟG?oEw|m=GuƟFyㇰc\$x6V*cmTJ%DݐŃs{BK8<"ݻ ^d7wߌdK*%znj)G5{!dk' r28BVE/ Xh/ɮMpʹu \g(DfדDhlB|'TdO ^f,Q OIj]?|5+8B՝TkR[[x \8^_C \(frɖE^`ZA ^o8Hz+5˖&[lKȣZ˙|b'p>cHv^`u5r|b>`߼Bw8A %7qQ/(JPJx)`1-3ׯ[f< iQ%k,JKtJLXBi4d79?Pש?~W)tfVd^5[^e(U(pbn0?NU rmۡV%BJTA#nҡ;([hiNi/ImAhϖ |, w\jN0gIo d㡀8 u ge_H"Uh#=)w,whgu6 d4^vuSۜ"<9.$V'iDa$IXְ KJib\"% ymZ ]P~{FD 0*9#h;uZK8k ee^ۧ\J?!oWcf/ՏH)J~{O=]J k;Fw2u-eM:.|ˢt{28~!~," f3$>o/a_cy~D3܆"fa+Ͱ̅[vyڞjm3P<.>{( D*ѣOluݓ8u&LUEzj_M 5w)ÌgrupE"!41ƚiMztfA1[`[>WsZר.ֹd 0c;D>r<8-ЦN&TpniNdQc) vN]rhCKݹn]xG٣ǥ=)vfIendstream endobj 187 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 6785 >> stream xYXT>+pXs[EŖbEHoҖ^KobŲ7DSDKvf7W#{Q"Hnc"aD/=,ccLq݈Q փ~lHD>K||n.2q>:g-M2YI$#O>ܜeds?H.Ov#uYDKr4yro*'/gKuuxȜ>E޴g-KI*,:F"h]!W9޳e:v<7z<{䜹qn0qM:mz(j$CPsQtʆAfR+Y-1MS5NP4Q ՛CͧL~?5H ( 5rXj5ZC|FSkQU{ 03zdq ogNqe{5mt7fs̆%KKaC `C-L,2- Kn9gxptD/޳xo{|>7s|P˨{_ ʐw5hw#fE'F(P+ %FEpƫ s/ TW/G.|&0.e"G^IWt>.5!/d֘`ssShS۩͞jN3T% pP 9G0. KQ(hɱwO-YanqjI$_~܂νZ,8Qх0~&E^娜_,1&rT_'=#P!_N)$'{WO-dc o4H @58~@f+A ߕٺ@j36p P-ykQ &i^[[<K 8[vZtA#<Ɏacl*6KL@RM$Xra}Qd y'߉)3_x-SCRBӈTJT zWdf܇P^xʝgLXfG'9<ӑĀ ~}C/yZXVw_qLyY{#lN!@9ᇴ{L;Xk B? Cq(­ rޗ?27W(q4P1,eG!_֡{FfŒ0[Xq9{Nw:SN,Y!ΜoΪu#ƣ'b xuxS[ |=O-rvݖ9slnXs, Pc;jiTz-xG_bIPNڸrOPpčtq9n\Go}~V)JV?]qJĔ)goZҎ|_m~Bvq^j`9,F}C;lGd_@"ԞꡒpXY'ѫ ^%qm}=)_GP$.JnZ\&֐ęI^YG:ә0] GIq@ Vho'X} #m3w!P0&}۽+2L)!U]&is`|[ZЉl߱p\ i<տ)EZO#ΐ 4 H3CG< '\w}ruO lE \ I4XO{C!Տ.˪.촑;AgB%MUv[oQ!ީs#<} >#IC9M5[G@>P``l0ZpEXb5ykF`u4<7%R!RTrPثl7up=LϚUysii騈RJea+ys?j)AV0RH#adYJw RތO$ U#_ٛ늢HE݅9DiPR"(5D]( IpﴘPjQZj`Kb PyLjFLM];E)[h*&)CYʔ|\KNe{-b6L$) bcs-74ho5Z`gr@ /ZuR(-vXCRzF9`A?`SMz]s ĴfA'&-my1 _Tu[oAqx iK)Ea 俇xO.$zՕ*5h0Y{udrHJ8"d %a[C'7M3S8Iȷy>c:{#tIR($8H-WakpuM-Kf/Xf/7ݸڲNOZ!`NF KȨĞk|iXٱ7ߵ( Y6BFgԥF\jy T zVU^+NKpc?TjC+8foP,iCZ Iܾt"4i@5Y.p8b@_|V-,މH8q=DQ{ޠp<5k]2#ZQ S'ݢ]u)<4!p/bwZru, 'dJ=jB#J?Q$^I|Lt]ETܞܺ /'=6aFd n >GҒBqM@O@OY@MMYY ]\k״@5[`w%zTp8$8ģUYFAC6wKzW̭GW]G;f~RH^-qsnD',N } 'yfpq~\Ňxp;792Z4gǭ'=^ZSY^ 'ah'(=etLQrsu$:EJJAL8PWtFӫ;q@,4`J;տ5źٗ^Z44 g&JvZ:[ :/{};<|)턦~yOH-u[TSmimoT~0>싷}ՙCH1*+tu uu۪P#_ 0vV5&G7,6 "T,Q)nI0;Vlb#Cu&v *'tH">KZ8T^ba8}kO-Ӽv#Fry)N,Y+Ԣ\!H!pv+vǬO!][2e(xrEZmx[-(=4`i'ӷ:&,GyV/NN̔VL7v Ф\ ՏXJ7p/FskthIX}IY + vkn( wʥҮ:)jخ6;joB!tmi=ս [:.d>ւ.0f};+?Nۢk d: Ǐ޵dvuF}ʏ/\zl--}8!~F'ܲI꘏͓RkP%J/I.f`3tM$4~E(-E3vP5 6dcɫY鹧ί×l%o6HWW~vgաTpjq@&m+"U0Cβi9Y/G$l}q0-ס"Y KOngxt5#[/ᔢhSTL| gH!VU5}iw :IC9g1b!B u9Ƀm9Z'nm F[Z_F’7شtB%arؠxW,!*1ZVdv{XY59unR6"꼼R_G ovnv'?|nfC%u%^;|"!תVaEG(JI ?cP ϊ̉ﱳyAtjR6bJKɧ<>&Y3l956!ӊ=u4'X'N4Ҥ]* *OB垦~JY]fz2JPt\%1Q( .VTր6T+E`ش<<`=oXFx7ށq6{|jfI\ .nۋZ#DŽƦa6Dq݃y<|Jd+kaɥ(VEJJL[gk=6ڃk3#j?4(-7W'7`1F~T0k$Pj <\#8< G?]ģH&q:xʌ$m Иʆn HLXaHqsKs]2/Z龌L(,߼E`_DFEC&cPt׈zF]& oEeuQ@lXx)- MB("0DYYѣAE49񁷶?]5^xl U/Pat%='6ͳ;o|q6']w{/G W^E4ve|iI|a題\)b~O{yJ׻2"܅Z蘸x`z<Xuah_9xU5c9Dʋk3'#ۧ/۲ڎGWf#xȶ1wt v\}MS.^a4%q]BBތ7ӟ]~] rOHs> stream xWkTSg>1JiO5sKq[mkQQkIKBH  k`,U[ԶjvUgͭS7xvՙ5묬=ϳl6sbfE~kɯKXgg~:QxR!xf"<$y% l?[,Vb[`[X [B0ll&eűZgqLWڠY/̺oo.' OC~y乀>d\~RϺ0FADkILhכ\dmV8\*P()B Q!E*S$)PpnK'vv7V@w T85ZdNQY48B9! JoE/ eQ6(7ؼgTs ̉1T/|`Koe KP#U"Ѳ=UM>g#U?܃f~gwc KH 5 2@,.L-l&F^R/ɨaƨ=W,w8dH3"UvăWVO^bS@ $m뉴C1z ;$K`3{h΁_ UJf˘,mZ@mp\2^awT;Z6 \o` R$sF1wsZL S6+<̙e#!L~D|6hzyR-^R^Wu mu~3#A"#iuCZGfJnȱ3Q| @/ڰDvVFA/|P}$U' bcmJ9Php|avsGiT"(ŕvic}r -zvʯɰGeyA&p({lTXBԸX@FmtkmJsKѿ z#U2SmNmd &KǔVHE.ETjG 21VQdBj12khQg@~?)2L\e%#\Rї'i;"zƣ/*vk\r2_X?bkH0 \eQ&# j1S0+ksxpzTݓF-r瑶A~oټ)ǨIH}!bgN\|CFp?;D':={ۦh=i9%9/~j@GzV qNU1KvR(AS$=gBJ7W5]`NB$oJ/ ─TbX4&d|Ë^{h'ę?ע/~Ym#щ[ IzQpgmىV1d k (p+Jm-(h-k"v5hfm63lmoeLMKAWt3%6kzd:Je !)f8ʳ@ exQ.grd,䜙kg{˃1&endstream endobj 189 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1506 >> stream xUTyPSg<-L9GC0aCAD %:تmSP,jTbeUv!@MĦHce>^;vҔizrPZ4I.,|gs2UzI ߆ЩCr6l*./UzV̟Mp`Pr^цtzBtѹ4 SĜd`AS&-b*2|?bܥ|l~:D;/o>ۑ?xȦ1W. Gzcf$g~ޯ>c6gd[8>,O'Bˍ<OMX)Ho;bgnӗG2XpAa +kI6"XƟTn>Ca1! qxNp@}}Yq` '? |dcx /U>7qwI:jTX*nmm(C ɷu6NV)=I0 q0c3KVqEMPGWgsϽ3rtLyhA4XuX: Qcpw)c_sau􀃁eNl+# _-L򺴧" xU H>*[ɎF\@C ,OzϭUf̒b-aBy[M1$g0q(yiP=tytDx3-% i+X9b7DF !4_ dh`-Bs1e#*ס` yq( n,:W޼ GV3}{q?uWGۇ8Hbe:z UUTo߳qYK1C|MDp Vc {2)*5hCF6;O׊3ag[0x?~etRհQt5h(?^vh=ZÅd󖋡 /$w~:`\ 2~4H0:x-h3PM;IBLo9ul|aWdn}f<~mo.!.ݞX ׭,6l٪O54t~s*ս=m"`^&n 7l4Jl MiӅٮ_@hF5'1~~DF@B%u2^㿃/(<ŞmƵ&HEP? vh㭎5UU>5N?0> stream xU}LSWmU:h0*t pcX| hAPP([:r(0p8$M`@Tt#0LL馓^rpeY{sy0Kb EٖLp!kaHX!8gJLe.v.a6k3 7iW<Т(`*6Zyh #r54[u\-Pډ(%bV$Z ځ:#E Nh ` < <؁ÈhC"I3 BfA8l}EDZIܴ1ei5a,;%̻IHwA qz`X M׈h6 ne*bZ9ZVRR>TqM[[nOo2R`Seiz}iƳ_8FT'k> stream xyTSo ɽ"^ Nu*OJR+E0"Hy0Qd -vN}>}jO[׮:YV콿1#L/ |uLzf4Yd8ɡ홱cpXF.it=ڈXי;fzxu}uiT6b:_?"]5;"Bc]gywꨘmK&DĆn ƇtDǺSGn&jO\l_3T0̔4{~1q }!;v !"0^YHWL3Y221י fe^a3dV2jƋg&1̓ L`&2Sis[^z4\>F^1@1p`i6n&lRk.{  O%!eeo0?S'u'OQ_DQj#i0{q'<\#t! Vtl:b Ջ\I^[&*F:-i8 P:,oP9rS)ͨ 7Eϳ/;Z,}5lĩJ\JhS,a\Z?ȥ 2 $d2]aE@&LqMbC5#ڵ^!h\XCڥip]r)GŇ ј)YqV 'ļ>Ch4E>9 zKkI5n$~)[Y,z6!'>v}ߵG*UmoLw*kE?4-D6xW JNRUlЗ[ Еh9.막-et6O-2E{).)!hܛhH0&R֚B׏[,J?&;:hNROE)&c An(}Bt$\mH+b1hzbQ6Y&#q Yg0~vK.^6n՝%1=N'=szPQ qUb 6:;Lm56xɴsC:(߰>$ql4 Ii!ќx#"%0} `}{Y+C_{sGBChOI<ϊUME"b/e}ڌݺڌeM0A59N._~ݒR(6TtJK9RZeKlVBㅐA4Nk/-, K S \G+q->_ߨ-:mL*/&TVT\ >=u >~n?/ ,ySxzf']h(`v1lD/?1k OZ6n(6q|tRs8ݝ=JqB$; RCsJct0]IWHOk~+YZϨK6#5toq2zuWcHl7Bl׌^?zz~iv51f@wٴ3$2"4)!W*? ґ(|Hx;SllMPXo!-Bv<g `NoLMToh-fӑnO"[ٲxvьw 5%ɍT/D<3c%d'<GP(**(wdIdK=F3}HԗbJѱZv[|A!%kRd6Pd(7 (:* rMUJ _K7=__Z'Ӯ8>JGm뢻D |v KdJyzsK"N?9)̸W+񺀤tbHUlmeE(͡eZ3knKmjꌴDzs,zw!*D4JDҢ8N|OvȾoP)~ϣL n_}|3ڟ{/UH|vH㉪SǷ @.p7/**k+{#B )c!C:B0NJW#~i/fKKmR?B'Tlp9v7IÓO<믨K9$R9<>\h:oCqo \Bg֛@~tQ.4Sť-]?(ü}a*km;boI{Mކ]Qo&nG8]'wVCCb=G[dc2Ͼx\MgOG-:mh bX+YR8闯]g_!{О?ho1g ΄ )R@n.,VyeGj PV40 b endstream endobj 192 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 3737 >> stream xW TgTlNQS/Ѹ+ Wm5}WT\웊, uXQg<s3Gswޜ3OwUw߿n߽R06CBZg܁SgyB?D~E 76؂lZǏѸw$b EtU1qa NS9[NΝiEdH\xP@ӺȀ]A! NS%$,3'99yv@dХf:%'9m K vrJpZ4xw#cBEE1 3qE8䀔!l n{:uk4yl`62f b|/Y͸1swf 3yy`\uzf 3%g^fI,ƆIc~PR\fHrCglA5SU*x.z۰ò}64.7#捨Q.tMY %٣BwR)G'nCS1iCˀKҪZgL =YIwRcv(hV]!ζ3i`#6,y(G"$EyZQE_rt'Wݵl񊩅湬5 ,ww{5 Ᶎ϶+h Òlj>4e${tA .<bMQGOWgdEJ0Z9Xro :O< T Acltn?dA-h5ERn(yB^';8Q\ \) U_$aSf-s@q$^NȊޠ6\|A9<G˫ĞLpث ;R0B9Ħ3vp_s˽MM5{X6!0D73YIq-I[zi~9*і%NSqV9jr˞cɘwQ/fX"%|β7Х8]H gT%P]P UFWLDYw6o0wJ)әڼc p7,YZG)cgMg Wvpq?1g_OPid#g)ASKG dQ~U*9MBs%Vb:NuRQ=\(N%ry%^[ItQ]QAsqwov+űۘj6P Vhp OBa(BX]mZj$UnFaCḾNg4@"?z5F\4(Z)@YEp>Yqwzh2_|^v<2w3dI7ExЈ t|_l1;̍'J885cQОgH%7˵Z?*Sd%2 C>)}~(R8]K&RS2'ʫv7iua;Ԓ -K+@1aBZζ}I.G-}zS$l}f@,Aqg~z yUma]hV2}lp,/4B*wƠvxj\O)a CAd/{7:X/<)oC:;,MlQ$wK4egUL*8v^'d|k~`8.عۣ2'jDF?sس0*9̓{+Pl8`8/ê&S~m2^E'6rs9+ }6z d+lQ|+,hu Jr/*4s_%woNRP_iUa$VR}RZc*,QmAlVq@:]cRndɗ o/7S7M,-7ۑ͐NW(p?WO}Jbptd|P^rGFz%.Iq '; JZ.ڽ3:.*\gW#r]GĈYm8VVy\5Cָ طZdTy@NJYhz _ii?[B4g|,J _ 8(4מ.쵻ä 1P7d(hSpZE4-LsjXXS\WD΀yFM Qw7:`H/߲b;Q-ےWW>"*fk8'[Qp.H)#奋 8,76qmku)2axn/IU}|B}5?b{)@O>O_Q'tkT[Zep=v~G|9 2w.X>ۤJLH.*DՀkxp4?7ݥeD"~E'dO˸g 1ipGV8 byC!0_>+ CpavCPUk7hfendstream endobj 193 0 obj << /Filter /FlateDecode /Length 4178 >> stream x\Kse6eo7;q9I9bb*CRZ2&9n4hbe,*Q hG?~~<zq<ыovw4o~<0Ac{<>}}Docc{Ա}׉|83 +W \7{xG6ͦs[:!ݯ`3X,}靧]ut0%UL;vK AmPF~TM8Uÿdw7!:l"nax}o:>J+B!4uBXek>榍5XګHAYk ͹D2+*:]NoS^x'Xfq'BF{Owq 4*{`Il6@8aH.ML2qBϸ+$͉Y&xkTKTNe|DPSn"+ՃNjA\`9+Љ8I,-u95M6 :UGxO b}>BP]1W0m`5֝XS ><2Owex\9oP3vZgZD5e/ B @eWQ)m .YNg 6>xWn Z> `h /Ryqm;Dj\>q]9 }=u9焲,ѢR5pV/|W26Qq0`gDFdPp/XWނ$R ¾S,g :`al X)\ 2)JbiK9̫vjV$f6Ef0,╂w dH2|Ӝ5bR/}:!ASo7UQY0 P3"U(asx(<8XBo::ѱpՁIWTպ +Bb 8\ gH꫄۫nD/aUq׹ xѸtsϷt>S Mv;8UK|DQw+0k{o% =(<2F ~ňs2,,yES1zSE8 b9qAh"\!fSB[0t{ J4 r%oQ~HUO xQ~b`Fͳl;-C> IDoLVFCYq2"1hdkca #U^2*sZ,R#SWw4 'WϑZK@Hr5#K pxhlѩ ӵBդrPu`&_!Kܥ(M5rn 3ѯn|QuBٴl@[lrs eeYޮRdh)7eUxE"E$-}2BbNT\LDqd&I6IJVIx,'4g':DMMdB,FbdB 9/0$PPj5E pt Sf)Vp&+5m\Oxe檢YL*rbG#`щz-sJ[ʏ,j~ !C4M\XX\Qt qsOۃcTY5 sQ6"Rsh9*ӱA],}J_ vu_0u@!*,8PxH΢<{V>Yx8|b-SmS"Rp^30f %a-kHVUjzT]7wޜN vPN+^`b6POΨ SnJc:bׂq~5rI㪏Ykc!U*jfP!"`~maǑ8Ly|@1xls}޶.9rX$k2}^YW`T!԰Z)Z}+$ꌎ_}ͧSsmSk{m5m~|D*c@̧en5s~ٹ=SU\% ID 4+ΔdC6yJf%5l9l߸^0zDmw IpY7+ >u݅آQ BS^YsVjA{U%kqD4kd,e^kMzV )jj@1gÀ1D`>%]tr>rR2yZIo9EP'f$>kS<$w>,8*"^!E㎍媧buVd<KbRm?acǨ6rCJ_cjlQdv!=n-_Bʘkߑ]u À,fF?sF.3䠪{dzDZEQJ={\J*YS+%ZּpYIɍ7FƋQJlx :-KV` 5^f_!u6oO÷Ϧt'5Y9E9W'Vr:q"_ẺIVY ˕vYAZ'5'~fQ.KX㿯8 h|K5Ȫ]eJcipK5|:e)D^9Qwa86tf[qhn-P' o=UrtTdYD|A ۡ]5Sh" }jΊ[a$;:&R[Ζ}wu3FgDRwuk[ȋe# F+<1J\[6(4 vhl#I +iCˢm^et զ& <>&g&F/죆e;O1Xg4Ы0eLL:UUrÃou;XBd#9zԸ.+4Y8 a":?Ӯ<+~(>Z,S s|· ª7ݚSߖּG^>A{ujRҮ} oef-[DQ ^~S7(4UJb)C*޽}@_raӘe#`(*q"y=NVV59}^-.?9jg9eBھW)âJSކhoi['_1s|.[$bG;x>q[ ͔~6؉v)k*,i˙} 0T:NPzZ*7rJi4vg.ie-!k_7JU 1߽"*dH7m#N"U<=yendstream endobj 194 0 obj << /Filter /FlateDecode /Length 159 >> stream x313R0P0U0S01C.=Cɹ\ &`A RN\ %E\@i.}0`ȥ 43KM V8qy(-> stream x]O10 TXЪj8(Nߗ,N>~l#GpƲ5 HeQV-ƃ剳B7OfrɫrӴxO$ڢZc:A#04]FUטyp 8榹I*`~xS 6/FS}endstream endobj 196 0 obj << /Filter /FlateDecode /Length 2984 >> stream x[r =¸(Q0 ۇ&!H`z5͙1 Y+Y|Y|sʖ?=v:ys?.N> *$?rɜ٥r=U[:c'83ƕZxl)t)1f!;w a ]Grq&XjiY{onyp#ukPe2CpnnJy},>ɽo~>-[{hNkY &mcp,gR>iKc+QI{mz , <7$Yh/c,_.m;=!D LfKŔ.9 (vEu建ZLSK2Sog˙֍Io*cgh1H%X&Px&^K9[r=[ p#妵E*@2jS 喧d-mR 8KIJ,Fh)7  n.P|hL%m<`B5S .T9l VYIy.-c}r襘-WwhGL4)&ӗ"rvxzcWUL) !XYǫL$ XzɠBOmꓗ:֪ A ˊ I4B:zpihny,v,<ܞA5d ,E94Aj=ok&?,Jl($Sრ w6^VG  Rp(/p YRE-*fG qNDu~x&:9P!m!ZPw92a :E Bi K6h4Qv2rL\+u/-!CN`$ET"*Rohsp|>Es_}]EC]¯=:U(Fx k}XblfőRGQFS2ZG8+gJ.nn\mUtý^"~*2~~ Vwy%уxݗ~n>-! @tZ%T I33FM]j":(WEO_n4enrC&𙋺ٵ9ϢvE MRM]m0\ːTwVAUϾuaē"Rm2mؔv% NHufݼXi:ݼ{)JMc=G(hn>xA6y%c)Rd[%VE`moVкx&A :]8 =)Z RaLKb-RZSW9T)a{o[i<ߏ;Tb[TͺnB83&b/\eLG8@ͰRߗ5xjR 2=ߧʛfcegeN[sU}fَOa#E\әL!Nf_SxA`Kg{k`PZxg;l&FDKDoB[>cޯs0 &Cڠ_6wJbDoMf Gxid\'P@e0ɏSuNG WV)y2O1z+RR9t{v  ~m5_xө@d R%؉`f4 qlr4^4|ċ&HIn{Uv((CJ00N=/RIƷ2vnM\ABr]iZ%6dvByo|@0scF> stream x]O10 @ ]ZUm?eB:gׁm_Xց$i, x`CSwendstream endobj 198 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1954 >> stream xUip_!7qJg U!)WBJ\Bhpd8 p--˖¶Z]zkuؖ,)#Lq&Rkh3CIBh3 _z:]A/~w`ar _ۨTٝ D %-E݋P;V{*,-vkC]=_ZLuk_Xz~hmnWVSFs5>4ߴ4)~zjU6memZar0fJjQʇ 2Zڌaأ-W7ޱ޸E ێ=-ŖcU l/#ǰuNl>v^y~_ss 6-Wѭ%ōy4ʮ,Z.WȚ3JyGG 8WL( "%N!2(FKbj3T]νʏ": c8x=VQJ@+O<ZNh $Ux`/^:;ҞEY~uE6ibg'ʕ6]iU!RLHL@:#Ykc'!@`* 3pCq? I 4KbS T̏7H!\e]pк'Ka"]q~YMV? "xUdp\1xvmPţbH9`7S^ܓŲOҤʇ$HnKZ@3Tl~CU@}L @?Nɹ\Cou@Qrz@@Gґ,=2!\-=CU SpJR+\bfi9.g.z]օބgBۃ| zX3PtB0 dC93ۛVUu89`iv٦oL Dvp Wn(p~G7tNVS?.&=ǣПG`k3"ի椀and(N 5 6`5aAHϼgy(8HU"J|Ωa״#E`Z'.W!8ozZ-]6m|=}LWj >xjyys1roj\6FA ` o[Ϧ'&x!*#JW@@|9<&wXA4 5.uo \Fs;kQ.  w͝G=eOfB~N=rZb*_(Eo4hEy IfdZ%A)F|{c&=!(-Q.+$2EJu8GfJ `ؿ +5endstream endobj 199 0 obj << /Filter /FlateDecode /Length 3020 >> stream x[r)oSi*Ey,ZD1شW G= sf"J҂P.= ū~43:_nҏWߟSeIgM1gָ`M9ug{/z[ckgeF]ow;W[azD?#˭tj>Ϸ;)% û7Eﬢ!X?1M 0s߁K^3tO!4s:9K~9=Vw Xd8 :IĴ:Ȃ(:QlY;gEz9ٺI!EWpb+t!(YFQOR`c1: s3\T"1J33:vG3#=$"d*όϝ:>ߑ 4*\EUU19f:Ñy.A62p}q&c&t<#Vn$1Jr)*BFŠ6*Bknle2êzxL3U 61@>0Ki ڤ8A-sC p藳zPU=f dC&Um7kR4Ip7FT"zDSCi1ҠȾQn802KyaãL9*W^6-lޱժ,@뚁&p4+<mPHE&@QTEGT A2W] !Uy=|: NU}>U (~2ԙ 3򡑶C*6w/J{,XFժ3I>V@bڦ ~У ϼkc{þ@tn}S/i l֣NԲ!/ 6BL dW0Etk9NeQ}jD6LEM~K2'Q<҄㋅s¥B!oV!C`U4˽T|u%o y1 |`J#eO\qb餇SeE$!=W¹ȕK^`;{>RJ^>l ^0_0H_PҜ U$dݣ,ڳ$eJ n=r==D#\dxhGV'BOceY巏-*g8,d}&g}a28YY*I!iW\w[PX4< 8y?Ӌksk12P <27 ] ېك=1D#TH\":y*y2.gǃjM橫TA+KyB='|TwSEQj&{+cCw )$x_N*#!|`F\x0lep ѫe*4K(cAD*:ߟ1Tp%m>qߪnZRǐ$.G@> stream x]O10 @UXҡUqP(/ СY:ߝ|reA>E0u- -m1,O)x@fw5|VS^[+x$E b'Φ2?M%K17MR{;RB|ES{endstream endobj 201 0 obj << /Filter /FlateDecode /Length 3322 >> stream x[r}WGqZNp򒋝J*q%\y" 4]vTR`ЧOn?,X,]^v;] HoZ3W&_X3g8+gݿxuŇK3όqtehJdWVw/pn/UJJx(zm7fpn]8ޅAwB or=50LU;FN ·Urc{'2 ^(۰v֊n: \%LGɦo–ݏadޗiÚ U?.th=< (Yic{aa[9, nU(۸(ɽ8we5@.VS38 [Jz)#XVC6s-"kDeL`r]͹s0~-GgdRԲ_VksX`oR/3^EXX J/*FWFwM6 ҕgAJHv"{b%<y@Hz?ܚt8 k# 0ȭIGq"}u@@4c`"08-i LV?Zٰ;av \7tN`Oq(KU;Ђ-؇?y0c໏y*mi~]MJP'Ew]2(+\.ޖB@#{)_7y@ƅ4ZS;ƚ2%ZM|O @Gl}f崇({] cTQ 1\7OPzL0<%xmP4X>&~5" !F;Q&)ǩ)ya7]-$"1α R{.{CuQ0 (` ao}acS+IE aqMaE<3yGp>NʼnVk%— f-SvGAΌpl$n .ga謢2%Isޢie)_G|!ۗ(NEqIJS}@w\&g.3`\0P}%ſK+t[d~mp e!6XڶY㵒5i"r6 fH : LJ5\]Wnϻw#c[?:|~M<Ա$eHJ‚"ЀMBu$0VH`G[D&i ZFj9{%cDmS6fh/ɋhFa+Ha#AgFU1eK@%X3ZбX\B,?_VouxE`R5KQ/fGCo+'9,MKH[O:g,ԗͥ6郕nW˥k,&*,$V^$ a';'ff\1}T( Is#9$i } SS!PHW3LnQ$3$WӐ̹~:;;z{TLC]"RlPctf@%vOXCRIy`qexy?Vf#?_\":RuiK#[K,'aa 1#M+RL KUBUUJ`>Zsମg6ѻ ez &caZ!I'0UQym<5cT_n=&y-SXt/ C9 =r>; J}xjx0n;X}nJE%+nF9Pkx}Q7Y ٰ}GD#kY+p+o"3PmrgtNlfΆή9en+"\pH<'yiwD*pXo䎜HHLAhs8 ʍ颀Q,iJsFf=HK$d0Fz1b+-Mi~-ql67)I,J /S 9Xjྨ /\Iŝg̝z5c*6=36DqS_KlnU&;)cA}&WI/qME{A/rXmK.)tT"yp9)DQKgi>JqOB'i^57۞j}QC]oGqK[@ǴV "(͓-$} ry (f}tEf9Q\#'3ڔd3)!|l]¬먱A8 we89<~t2]r5:849'$u!O鰝*+ݲUZAc&>sN07)kGzGh Ġ בWVn:J"S8>$]gª`C:=Y]irL"}HAD8-Xk/go]RuS"#u@ju7?m`RrvbʗVSPryP<ץ*;:m$Oo^y^~[SPXv~*lx 3]3ԎI[xnZɏњ6!W~Rjg}kaêsŒO*)[Zzv|c>qK+8Գ5^ҊYSp6B͇[$)nKfx FM(S ; bثXC8*~ {OduT_Jkޅw+LJQ,Mo-~`QBְfk.6?{ GE).F!3[,I?$/_/;`1X34-+\KP| ?onEr.}|*Ot?2Sy(tT1 2}.W| ;?ytglKz; $ q]bs7RjZitu&/&eF(&*U\ 7nS']16CnӋ~f〷|.N9 \pC#u@ˋmLendstream endobj 202 0 obj << /Filter /FlateDecode /Length 162 >> stream x]O10 0 0~ 8ʀ0%!tpw'0^GGpȍ%pu[ΖX- nʿ?n@sZP> stream x\ro̓NkM!KX> t׫l̴H Ԛ2erw⨏7/;]H<*}~Bl }G'/7őS]]8uۣD_ByN^kpL V:;64}8_L,v֘޷Kwxy Y~X^jv6 5֪2-MMKV+cӄ}R59[Q-4qRSެ.]:^:Ce{iٜ/Z=_Pw坰6ͮptJk:MS*uޏb;4jKaru|ƃbBr&q$\K}Av_]mUڰau6ؾ7v짷e!IM@Y |X{+ՓlNc_&ke4MQJjDj6gE@IEx+}O3俓6e8Pn2l%IQO-5,McトZRAiT'V?Ϋ` Awޢ&͜uՁtMWs=nkzDyr5 Q^ztƭT1Z R݊Lt$8yo4-6m}<u|1H$Ȳ`PViRZE<"(ݥ GRASI4BT0:4 91M2G]!*W u>;sixUahޞ RSwQId\!R磖7|d=KptĦO{ `ڇܧ,(޲MR`Xj=]= A9n% ~ڂ[Iu }/jsSy@FÏH#:mmm޲?"ՂfΥ]h9`,%AT0 &dƏPS3V@L"|?&(3se;Y2[?>Z3Xihjb%p()dBAk7RI(upʍK):^{ gM)Zm/ӤtuqsNWiH;*ɹb)IKeK69fd&Ke42kKD7hIeY&}(ODm-aPrqAA4 y}ugJ>N^F O I)Q,p!OίZeS݂"AEpFRaOi\ٷ3J׀6gD0tVMvI6ʞ)v΅i*1aL%K8lIA^+/ ʄZ3!UЗ1r5im6쫦$5߄g9fgF!d*݂? X<^W!'xd! 'uG  ;_Mc&"j/D ']-'B3(@GL1(F)Wtc_χF{lX  s̞b^/^7yV<OvŊIv oH`SQO cɦ$r )"eD- ^%nj jAt1D-w A{ley wzD &8rczBN>dgC(LD8Cs"Gʽ_<>x$cC 23mT R7YGŋCxQ|rBHjs_=ˋrjp5|fM*,3X4>2cVp&Tj$! '_ |5%TٖŽz10(*K0"IV ,aDh@m6ML: |Cj}D J\yt?zbK~'c֩$+m*E/-Ay֩o_oろ@'=iNh]S43I.%f 1Gwyr۔R77e K0cȂ#!8zD^(D[gr,V4 ZST S})vxY5dRtyP/+SZRSi,Dʲ7-+E"M$7Q"M^*^| \>5|l<x|ZVaƃ$IU׋RLP`SX^+i =lƓ\WlL' [n1<0wNbVbñRc(SDȃŹj}^}[w]^J۸HMYs/$Yнt zwo򄕟I@݃ 4@4as,܈L` jP4Q+XeThƈB.)K-X?'熰7  |)wqϣ )]24 O<Ґ}E:$2ɊeL1ȑ=DGnymc-AL(j[Uźq.j;p+QbP]X?O-0IaȞ&hK?ϙ'z հ@bV>yl(3o(Lbwl4+jy5i4OgVL"} )8N¬&śH.'Y ?1l6RG$5=yO/5&\a1hA?QoksY81A*\ vΥIeIn-8)1i<ǨeRAA ^]o@JX_f ̈́\rH `%л?}%X~Ćunx4k!|Gh s@cM(E)Mwts. Gby{eiCI$^i0 u^$%5If(5hҷio 0e*=O(/@k;O5JWYf^|-3F{E;$P$s8 Fۦ"UHF`Ko6D^wiNK~L=J{ç7kOlnb·RA^3Rk=MI ZSTer UK쒠8@ uq{"U6ݜ3?h9E6f yBs #Ǐ@cF)FL3ky:co^PCSLToa1O._3+&} %YPT@^#d .'e]n04oCȱj4\NۓCV>ӘIL&5R6!Na1ÇŔgB,4mRv_LaA#)_@{η#<6ow>xvN=`?d JAK{5:] 0b^؇W 1<ٿm=]a&zy`;lk`۱@i ,6:Q64*R+a)찓/:{0\L0ncDdκ#4ʏ$]e8Ru7P\cc^Yk28a ,E?u}?< ;;v0 ^gGW~)X%=f$ͻoۺ9xBal/0zlB]'emo!.|HAl)[]J_"0Ȉ<{{s)eY!DL`N>R/^~y>e/'g#ti 9h:Tez?<ӟrdZ |Q,{Q||J #eoH̏Ǵe_`EF;ѧI#Mhlnp|rVkwimiTE\\**~hw<]65:̽ˆff}iFMrqHɝ\hrw8{bzn,{g2}a)]N.;P<\ن3 fFwoXR w**"BSUђ @ʶM4uD9Q4 S>FLDX5;%NfA9k[F}^Z0E+VNv5kӆ6a7Xj-y} /ڨ_'/Bendstream endobj 204 0 obj << /Filter /FlateDecode /Length 162 >> stream x]O10 *XЪj8(Nߗ,N>~l#GpƲ5 HeQV-ƃ剳B7OfrɫrӴxO$ڢZc:A#04]FUטyp 8榹I*`~xS 6/ES{endstream endobj 205 0 obj << /Filter /FlateDecode /Length 4247 >> stream x\r}W#T~b'TJlyݺvj @ 8J+@h>öar{A{xq?v~uLhM4u hZkܡV>^` mcz?9-[ݤ=6}wߋ^8o#)EcW?ڦuNYQ/)&Wg1|] !8u~ua=͜m[ݭWɄZ]=x#YmbUMY&Y+]]fBs̋!J6o⋆xgf9GsGw^ 2/wѣQ jzUZ :3κ(=k|uGS Pm+29O^4ŝI}V4JH4m/h^m_t[U~oWZ/(i}]r3ce6\jIyנkޒ`֚Y5/W#aT 'ISmj6iڶ? yQLQ.:kIGqmWt,{Q.z]Y>a~ބi[-K>YHӄ'~LͷLZ޿Y ɐ';;\PQ[ ۖw%&j'g\f;N{6t c;%{eMj>Tu|Hͳ+HHsY[Ag#,IOPA%=O-G&"!D0ߦm<0֝}\ӦSA-F]njRɮ-ZCwm[\ճ^ٔ^A rxL~r>‹8Y+쬖j\uJZz_Uͫ|3҇*'fI. v/[>NI[dM:LUҭ<9'lS tރ%-I1Z:"|М SQE[z1tg J..S3|Laӂ!U6LAARqY#ޛR5YgIpy J3OCkq_|1,c 4ZCKI͋25ᵳjK;dTgeMJY*4R|L͛|WԄ!ޤY 7Ȃ <^';}q`Ҵ:B zpxH=#Gl4G'8WȊt;jxÞiphYn_U 8IoRzjX UcD;֯v-Q}m3FXvV*zOA 7X[;2LKHѵ[.̄˹ Vdb)`:~*E bgMZ")ԻŮ0ԝ>0Ӑ'=ۿJlCt$9R/AF=Br~\ɔiq)5>6 $4^2.W5Ʉib>:"_xtb$RW65/S4+^aj w;A,|':gۋr$9B1(x!B0. v"[DC|0x8p$i&xQ21>{㱔~mjnf.?4vW^"Z}\Cx dk%Md˙(/;!qzŚAPybk9XHO2D>+fdPD[nCbGH0L0wQ"*;wMQz/H8x/l\#X~ 0@\ƃJ]ϑ}aIaW'U G}=L/iA1xTᬮ4p QYL!6UdkuR<{ܟԙ2p2Q$8D 4GBtiDQ&?ġ穻2CzL-3(y"שy_mަՕbX NakB@ڊP7/,s6N*PT{;&:檜nVDS1тTk~!KR)i^ [ [| M5 l ?x%1\D_x_tԈuq .d887džg/fF Q( 1w QmviuU#4iT'@h.NzRAVDg\p*d.8OR9dEwaIeIH ^w7AAJP&o=[O$M%Hx@:yuMU x^Y-)\o LaJ 3M=.%!C.zp-<µ!lgkΓ2,_!uCDim, Jl*2"/w14G,s*tјX B7h8~. 1 ҭB]Գ,AԩҬP^uzP.Z\2 &+_S2^WO*p"6dyD(C )[G@]*6;EƈL Ri5f||S7 TfɅ{Aڗ6B4!CFwO1iwU[O#7_5+8olw4jJI;S=PWXuX-+#ߧuKƃ6ZASn (߅Mx0͹͊rn,Ke3|I*W#;,ZKؼU;ktO'*Rh 5>]"&wwRc&RHsJH>*@9{F쑴оw(N\x8TĘ.;;Xy(NNۻ"Z?Kͧ4ibB;~!(F3fH9~b:!) W__dJ!J]i20T!v1$p+crE[-[vqe5bwҳ&8_7Znp'<2[A66ZrV6T| $G~zmYZL+Cr˩!t9$odW]v2`{ʿke]Rs.*4ƚ3Y^ $PzڐY38\2_Ƃ"#!Z'Jp.\B LģB;%S3>͛9Ko^hi6IܳX@e˾.km` "mmeƈ!&@0hUMOC 8ό|JVp=6Jp "W7$AFSYZ!™c𕋒w6lN>sTz=|aQ|Rhh;-ӳ2.+ tQ˿='2#:2?Ȁ[H댺})zxv@*>cXQp21i a*3^V">b( I1 "*aNF´ m,>KwЧ4?-B@Hoi :^rV^FM._3hj=S\^%A=#ȯE endstream endobj 206 0 obj << /Filter /FlateDecode /Length 162 >> stream x]O10 TXЪj8(Nߗ,N>~l#GpƲ5 HeQV-ƃ剳B7OfRUiZB 'mQt1 l.kSIT @s$Lg)D]Syendstream endobj 207 0 obj << /Filter /FlateDecode /Length 5112 >> stream x\[s$u~J7\LY3n܁I*aD.Ph vRz,sb؊zwu_^W.~ن!˛Kqv.\Xۋw/V>`z?>韂:^u!o#!^}!_B;u!w[<}fhZYo >ܸZNFx?5/ y]m!)B yu!hn+eAlpf q&84MttpNqzӎqrPhZAK,Ӽ0pcO j32$*=haa1ԠtdƮ@8ؽM ָ kG ;^Ui*fmWr_o$4jS Zj90A>Ӎ]lȠCG[{`A_=L91J8kf oF)AMPj,)I?$zu4} >LL߽myO KXŁ$MwyD:]"Q`<JE?G\vWflt$qry YUdn3dn{׵JB_9G +b+\='عsҚRg.N_~e#ֳoѮI:Af%ٵ UЂXE v_8s"d a4NQ +'蟊~CWk4,|mmz_= f}!?t?[`an yUW}-p (vI>h%Xʘ1D-h `5ơbyΓ|OE #"%bRV#wE>#J"C.Dy-  /X'R=?A7J1᳐%՗2^PE;^r>8-Ztc RP#2u#mZjR<% ѷ6dlmVŨ&ǎT!r%O ip2j8P3%I8k8Jn<- YUd5G> 9Df4L*60b t`Yq EhNX{,D ٗBӚwQvSڈ%FtK¡xmPڎ?md^w B>|b-YFYPȯ3SO0`p)dú?e򥜲䷈YhզB Z6bVIqަO]e m0a*ƶ[k5. F%Ry 9AzԇMC)yUhaD!cf=MH"ª*L }jYj!WɁW`n(U6 5.""!Zʣ \ԕf=Oed ؊ v ΐw&7; i5Ld>`T犥Ɯ4ye`TqHM6ԧ!)Be\ZL*CjwTc t:Y~M|hn9W_ &ο*~[s'މ,ܨ`;'n(2컩$Tgص{Н'.oJ`=Dv@^1ȓT>k6a;ah& (pEr/k&:&zLT(@,-g{p ~ $ "F>)Cmft'<@}%vMuLxy)w1fQI` ҴYڠI]tQT$;,ޓǺ=Th]$sؕs"!b'@ckZ_"^S 8b\lmR #܏=$6VN|p"?a~^ɠ9hq2PH'U|d0#hk!pn5nV`}xcs7IG'?zOK*~^Y] =ǂPUTUY=xıBKwX [a (Ȇ0,O =OS RN4/2q)nB<[zEĠ|b{OO]~e򅚃s"&pl5B.̟NuRt2FI6L*~dɾ۹+HpaRށ)Hhq dG O}YOAw n ΛN38k qlƮ1 do5 'e 碎x2C4#0HzʓG[ƮB{m'@x(Xu#m atڟn3;!.s48ddz82qG9 ^0uj|UUX֨\5j-h!fo gQA)/q"k)S=.R_{e\h! 9%W^A]P ɲ7]ԕ+h 뇮 }];P1ҿׅCEI{APrٻ30Q4Ż^j2v6F-BtRY a:Ui898^O}xh[-bo^QmV,.KF${lRˮI\ 5{!]fHM!TC-tC!?]!~DB~Sz)9_A!Uv?wMNX~?J~J`F4ֆ[*;p?~{Lb`$⸶*qa2Cв BmTzGK57L0Jcb%4aT~0{3.%b) MnZ9d=*J3m [AJ)C+'& 7K h#m y:m _r.4$>y㥍 mj&Gv6jԚ(ija>aߣ-}a 'OտP_ ٝG|CwsaXJ_[w#(thfkP PStSHv?C1^Oݎd@8L*XJA^ i] FwZY>cǃ+ј),8LM '5r ƾ: ЩMC, uг~V82=!{*#41S Ԗ6"T[ENFX3(ϟSa-BdL1C~n PM8CcGJgf-=Ji?.PH%2H/$i=(+KyQ OuY@) ~O'T.U`D(,"L[jݐgْݿu}m *K~00Ig5O\UAqIsz VM]Of ]r@ ?A*xgAM$[TyI"q y_eK*bJyx-aN[ >_qt~Uˀ܀G)*?qIg?3t_ POwmut]dK6ǐ2/@e"XCERQaUXW>M?z-;ڭkK@Ys@c~~(zAXqLɚ g%[x's9#3$d~мrY} s/cxEBe:Zke>]eT%CTy*·iz=n` );#Ϻgh# Y? <{a_py32 BNl+Y6~!aD &w# /)-&N`PQ_sj > U&W2ǘG̡a ߦ8VAQ<-iM7䏩G"5& ,W|":1Q_{rbTNS4V'X1D0vX昫ܔQRALiMEU{_\$}Y9d6cAC/woSyf!{=4mւΛ.7x>_O`endstream endobj 208 0 obj << /Filter /FlateDecode /Length 162 >> stream x]O10 P ]ZUm?eB:gׁm_Xց$i, x`DSzendstream endobj 209 0 obj << /Filter /FlateDecode /Length 4641 >> stream x\Ko$ :vM:'gq\zڒF+Tf|XzfQUW~{w'_NO듷'"4s~{3.A]'ũS]]8uӳۓgVw;8b  VzS刺ol^fW7Icdw'DDYoV_Co-RzV gs)Xm)MsvqV(jtB[K 1>i=`_X_^}W :`[i}\&. ^:_t 1 2-ja2nq5t4<.Sh[ ^vl] @7^.x^j8g:.KU/oͽ^ğs;oiάB9LBC:n#n ^*怤:P+mvY1 T5M5,`r_xl͸qxFnF5 ¡(]&hլl,oN,wE^ Mo \JsNJޯ8Y*X8HĉaNvk`@  .!0ljO`3A&d^AZWHߓpWΏU|2b6TizF+Ce,:ˬb<܀`.w<9+@PaаX@(G4?6 Sڎ25g4^碭JaؼcHf𧻓Ͼ򁾝9civ+X9 VpbSFӻ:].Euu0A%98amjbG;SN^.o^{AJ줒Uq_ I8=0BL$)ؙ<'%F󲝴IwU pv9j hppc\'`1=\ 8VCޒ*]fC*˜,l49-MrJ+<@j "$"v1Fů?D1b3$u]?hSp<>1ӣ|YIsڸs5i7G^uQb"kp> H `[ϢD~+5?#%EF8OV6cYs4a^@ߥN,t8Ik耽q b4Jv6~%:abi"dkd ]3kۦ}χ4$[DQRx!#;!lFϳo+KĎ7#e&&ʘlOB)fU1 UZ@e7fLU5s| tРx7r=YaӪ5=M6%b|j(# o5"6U+BN O=fOrZGF?UyZJ6&V6{iEޫ*߯9rl)(&:TQ[ D6 CO`6qYCr=?(pTɪ;;iY0itsJ$n&I=֣zW da1Bsd:t5J)b%gʻ J0Ț=zdb1@'3od㦀&,\4 恈U&S:1A*-8N \]|XԶ{xUU b&=A#9,=q$=߱M>~.Ѡc G >C+t9EÓzbZǜwmv`9 R:T~ȹ_ _xrR"69ҟsf<]rRKDwKh}>1פN\rjc㽨لD mjv@ĺ,8c_*Uh)@[.LcMyl\ԥzhי Sd>E:qP6kb/#GV_Qy'Fi8K>~J6>ul.>޺3gvڷ3* 1I+&0C\k\N6ՙ\9ihwZr3 BisE+8`0 Uih:G\rZB\ aoI>b+Y;UC |*Q', Ӹ+x:6$$t>G+2?' >-A)O/:c Wv%CKR;yT9|?*г"c<ʙNq׌$Sv4ˣSI^Y'6%<ߒgmiLU$ZZN2[eskG?GToF`" fu| efa yI-5>20 ,<[foJvڜ*G[_ň1=t~ bGUh" Wi%or[OMU?#^{7y~(moeon$ˆᶪ?왷:/Co&!kƯ?W8oB;W %4f|@!;LGW(87 o#`ƪO@fv$M[su09Ճ)0|F,Ka黳:6dNendstream endobj 210 0 obj << /Filter /FlateDecode /Length 162 >> stream x]O10 TXЪj8(Nߗ,N>~l#GpƲ5 HeQV-ƃ剳B7OfϪUiZB 'mQt1 l.kSIT @s$Lg)HSendstream endobj 211 0 obj << /Filter /FlateDecode /Length 4580 >> stream x\Ks#xnW.J8*䰤HJK撊Ui3=;;ܕi_?N8iOow'WՋ^ϓɗ/d N^^o`WR{b[ioO^޾XwGzA/NK/_%+ Ҽ/WyQ7bec$I#IV#irv+kL0yi^45/J4Wl߷l+%|yZ dJH==V+cw˾jyt i6v!xoij릉}ѫ.h&__ƶb4G-4!~<+?D)dFY[I6vMV:X>!4TV&ev؇aV4W_B&-b$UcXZ ZŸI3>Hz![_ 0fe:9ne4>Ljjiݔ߰B 4Y&'씟.XťMNM~Q U-1mܧV`ţ,Z4Zj!H>t-JO,;_2mz(cS_nfT,@\PSMvA~;>L 0m[ 7tdNHSi-cҬGacb 1$¶R5 jǍk[{}d$%XHDl:/Ɠʅ JK'X[3p-f/H'>NODY!Һ iOM4[߲0C,ه Km_FsA ii!ܞIsHnQgh*ܛw 0rHc;`p7Ʒ*A1;bӋCc]>&i$1ɾVVYiȲkGnek]v(ۆDe9PhdrGr@88 ٦}ոleS3ײ '+ha9 f,xO%?"xjX E7~*С,gf+FjvOj[PnN;8LxKf)%/)A4=ӓG":{ae`Ć]N~a0@G>*l,ګ%|{> .Go)5 KYzKeC{Me?R/d^߲M0&s͊<|5<*Yl;Smҟsv?(+0!Ců3}>L*>~ NeQ&`>^蔖ilJBRB/iugY)<#Yn5k@4v\ƛ*9 B79"z~ r@Oд#ay:5:Xh`5"R9+كٲD\z 7?uQ>FiJdQT֎`$Ѻϖ؝܃*9f#6:Z۫,Hjק= U 6I4Vsi{ n_JSS{9ٯBFjXY\z L216i+8~o7a% gV#Zue -@ ~NՐ{ކcQؒP=OO<5.YzlYblopƌ{F i9ĩ{f+'a 8kƔ#;+~ǜ)S>^6_R98?qOW!9_ ʕZD+;!ΩϯMF;Թt_ bwb8lxX '> stream x]O10 *,tahU@p$ aKtpw'y?\gG:ikD&X-@[L+go*?`3]ğB\ʪC5-A!E&b]U?p,M*9K7ɥҴ43 GpS~endstream endobj 213 0 obj << /Filter /FlateDecode /Length 4144 >> stream x\n$ :Pe>g{`ӌJ-7َ d0YYxƀч&RdbccG=Z{y>u:R~qWOex0|,Û2|*Oeؕ}sq?#jU'Ȅvyd1 {euXy͹k|av+0a3剔9,62С\:.;ΤK;7F>e2i~yZSY>Rr] 8;5΅u,sau2nq/ /84e/y 9~lb88-ƾ_WQpLG:FOQ\mиʔ ->Ƶ8$W4(!~d[z8CĕNMk.;@\7]-&E&;Qc;͞ݜ;TUcN_B \Z4(Ip( ٟh:҄!+1H]g RÍKcfab/& 0V9\<4xζb®u8*|x,0y&F+!lM ,Bx ئ6.-`7KۉEyD)S0BpiPFͣTRr.U>$;$i_l]ߖ# <tjA}4D2C~7)cd0ͨlM~b}!ae*iT:ff@ OT) [){a6f$ È$ g%2$u0%"*wMD>#ٶUċ0b&8Ly\zfC=7x2GOs+/pJTvNbvYh6h18^dPIj6N{V1Uz&jsrpO9!)aLO-$F\x^UPL.6j(3_yE&C*IV3̚D,C*TFDlmuC=v:O{ Z< t4Lvp{?֭cS̶-$,nװ3Z5ᔕS6+NGHbfCe2IGְ{}VS !y9(+;OHc4 2"ǩNI Rc(p,O8lӋ/h}9MFix"a>1$FT4K_>FI6p!9'pZ@K $98븆U[#/7i48&i!XqHA0ɂ:%RFԠM E^\*("'}jCcĂIaGR2?92Ay~<;M@XT:[f44rI܁b'Fe74ZLkJ!dl$vЌKB{YơW:e3myIHRZGHQJ͸(N?QN=a#m-TOLdi,b5T!+u wN4HRhJ`QTqn Y4dHvֶ&Kf"IYZ g$Y8txHl-9zM]Evqݎwܕlһ2$MJ 2Xg:T/*Uab9,ȣsP'l.mh}x6cVo(&鼛& ̧jCa>hy!n7< ̋,0O(^.rmjDCa|]ٝbړ7tEG pvL 2s҃zNmBԟw{w ,?}o׺Evjc: AL+*Ǖ3\k΁5]22w4DHNFXĔ F}b@r*qQ#fEi?eCZLv5Ri6O6Xvx=ҳim*1ZY.ӵ'{S bc-u:;/mmn0nôjTtVH;%o~+\@dA:u!O{nr[[TYKm t PJ壆]=Uϡk"jPR VN\&2]J R)0ծCIҕ86LU;Nn`Z ڞ=F%"cT3{r1^urT9QL|Q;+!xVw8H/ИOxhp2[/ [f1vx)-UmCCկXZ`4cnu1IeZ&)3,xٺDߤ{mU-"?Q>vTŁڇk }d"r |a:.U>v(L \ȱ ?hxdiAfD$˸o5dh'8- Zr%n^B(_5mN,GuV()m;aΩ:d8^>F&.XDGY *z h9uJK}j.ܔK gj^d)QdzV`R%bP P[hvIpO?WZl MmEJ>C9ߘϽNĕp%=㣷 ĉh[m6ﭲ`!N3D_޷=輶f,qHRPuJ#i] ӱ{ +Gֽw\i^(^kmm:[LiKD U[:ՍB=`[ԍPlÉuN疾 0~We} yΝ`vj~pKƦLjL0|ZNr|1usH|N*粙m7UZ>#;x̷Ik~8=|szwҪendstream endobj 214 0 obj << /Filter /FlateDecode /Length 163 >> stream x]O10 PXҡUqP(/ СY:ߝ|reA>E0u- -m1,O)x@fw5|Vs^[+x$E b'Φ2?M%K17MR{;RB|ISendstream endobj 215 0 obj << /Filter /FlateDecode /Length 4750 >> stream x\KoDN9#ANH9>Hj)ʤGS3]S,3W_Ugbg+^޽ݿ8ۿL<+\ޝ^ QDyvERy= ~0]ܽo{#| |.^|mO79Oqs|]ݷ.:1.~{> :77s1Xūߍ4QQnO6 ÷U!8| ~`ykLٍht6vnRz%vcmv8= [`߼ӗ0sxda!BՎlo ;.lvSk=@^n~OlGN;Nn} r#ƃŃ}z̢nj7}Y׵y6IooYɋn΅q-d+ [&R$%EiDO ]9S ;7;:!J}v.`MyBDy[Kj65ޥem^J{w Zd д8}A[9qkn>-x`eXV6o}ΉHm-"fy(!N&b_VUVK.?bi r4tV @!:ܵ `\B l2dx.G@C5ezYxGRf)N@ cK0 ͪ5EMIan%z >=yLpx턚پ_Sz2D)|Z}Hy~ fO!MKaf7acyity!xy圬0JSt_g|tQ#0քN7V',r]Gi_,$&$GwW,N#:S1|X~yxJ3խ|R1ЦVF`[DDt"r m( H#ԴU6!;ѷל`3y6F@Yh*!I^~A !Yjx,s`Lyn'm#21fc'q?tF RJ9Y XB/yejeՠL|>1YjA[+`4vZP^A[`˲B~7vl?O[~I ?%jĶ ڌ+5aIЊB /+$xH318~˖$Q&D[/U֋hl@ ;q$,N9)s_Y,j":êNiUXE'LED%`.%tAd)E48ƺYZ x1PN 1,u#7k|yڎ/=|ρDwR(siDRˆn054%~j7wW'mW5F)!ľ7>.j%^N4(K}d)kp|7IN(-tƚ hˣ@_?s]md/Q]cŭ~A$*&KerFC&СFL7(7^<9+;sl!$aqb¹#~K٭kӍȌa_0){AV)1-ض H&ɜypDcm6?Ч!=TX m2ǵ:ƹʈYgsR()ax{k)fԔ ~_QW%+ud-'c1/H$&ZAni!> .4ђlt`a;C\&d* f>I׺92 2/ Z޽&]X0̙Ӝ2t|!wXI 5| <c\ha C,3cr!ۯм|ǖF+9b>#jBR bዷ>XAH{ f}gx%9Hâޞ -5?;3-:UJsߡV",Xh 򌐫d!X "L9MZ6(% co6SơLkn&/DkkfmWQggDqHY!8q =&U<NLPv2Q|vz~j? @ +9 GEBf& #8\JLjyEus1|{N@VX9ZdkV([A-^7C";cAFZ) gZvTðx9rMI2-d+ID2)!j'A"K ݅"1C4ZD3ePpO1O%n|G%׬S>~5<ǣO+B=OTj@5,UaH6:LY;ޘ^6 Ў(e5]Ȅ~SUoIx,Y6=f&@$,D7Ӆ6|ʮsD4]蚔bþ`mۚ[ =uGVĩIWp3NOKkjbm ?usA3ylUF H_tAG(Mːr7aq%ƞ"5"˅ fr:PȁA(IбSb})at01@bS&hXg79\r]܍/Iɳ@S=]E]a /EJ⅟jOpxL Y]&hV'+r|[pS0eQ嵳m)j8ʱa#>XՇןz,[lS䤴G|Z{%I BT1SNPHk%J%tP}ԉ*O uPJR LE(t"O~ׄڃ9 8H>+ KQ"83ձˁ=G*}|%Yq/MWw#cÖW'ܲ-nq-~g9pM[a-#jR삅Z*—i^! ͫA FrSȖ,8HtBI:e(|csoim3F.=d/ *4Y3ZFpV–f6}[G6zZ\џrѺ/~Z!ʹ(ԗ AXCLlP$?=ҧ'ސBfiKWZRb*)*Ӗ)Cn%""BV,d7^yxX ";Ӊ$ӯ_'j78WEf#w/s L㨲L#?%o,Tp+87%FMYGHkr5Ѿ\d@Qeфkvk?;=Cү1IbO,[Ѧo-, uk#bˎLj7?{7 5C,Mh^)~%@0۩]bq53'%䗮S鄻$LsS5ɏ 8g'J/5NIYkc TR x!J4)endstream endobj 216 0 obj << /Filter /FlateDecode /Length 162 >> stream x]O10 @ ]ZUm?eB:gׁm_Xց$i, x`GqS~endstream endobj 217 0 obj << /Filter /FlateDecode /Length 4415 >> stream x\n}AfUK}I ) %eeVunbAn^U>> v~?^LУ3_Osg᛽և _68g'{[鿼QʯK4_Ǧe̯|[y}4=fVoMi|!RgڹyAyYKs(Ҽ)ͱ4Ϻ/mgǧ<#z~= ^s\ib47y\E'PyQONX0鎪tG,()ܹJ`G\w7v?J2@Ѿ[VmYi wQg]51*rOFgA:zuzYTR=l:>|dV4tW^ޯmjsdLMVz+%cABt& Y{_]V~c֚IgK b uWN4_E@)nr=vR!E8d U-,`hE w$%}Z#\h:/+0,š9Ƀ]V`r*igRUǛ82͎iKoZ MznhǸD&\#Hi'p1,p`؝KbU=Z+s;Mjki\~6.`$lQNW poDKڍk:-N0MNI688WeF ;hh n᫾ g>Ո1wZ$:7)^Fv\(2e.̭SQbӌ&ncq7<Gݮ|BíN '/vt[:h58 .Q$w& UzZE-hl_LPȣrV׳ C} jIN{{]_'KB)_NKcLXOsFrH1,uG@G!m62ÈĠK}Als E9UQ 2>]Cw1ha=ioX5[xb;JGr6j7xǕc,@K P%o?Oq OiZ- i1'ӤD=o< 9kcgQNJ LV-a&9閌57-2 Ћ` d%-`Fq005>?xw[^3@!-ͅ [_d u*C5c[:0DWИ&?8W0֪xфOkʑfғ&KYh0LGz׏v`WXG1#Gnc/a"qSGtPrU:HRr$sr ]yajRoE53$ii.MrhQr]k܅0$D:!( h@ pZ?vU*͓>uUJnKT',ݘq.MQm0 JǤE֥g''H#vL&Ig< P̓#'jL^f/}Bptė e.lyK)vq+Ϋ*;(KSK_2b$b\&wbNٺ1=W K\%Lަ^>Sk zlY-qx\t2zu\DIަp0(-MIS&=M>BMSAei~U_&d wWLE+ob(;4S=Cʃnv9}<Ui7m9~]#J ,k#,!r( A?ƻcheDs9һMK=-Ibp4SGfu?QAk/0߳ e, 0?QnduA19,wf*Kb4BpGǚ\PxmE}/Я@\|m?D>i;@q D{ MlAMF J/&Z&7q4c2 0}uOWA?aCג5D &FMT[y-Gb.92gigB}A4 Zڭ+.)9eQYpfs}bvOd# \g?/|zƉW}Rch^WE">MyFDN` " ԡ|qD5L..3B g.ѐ}P^իuh|lYw +:O.?Ae#vOE3"B@ãMQ2jk?n&呆|ǰe/@4>7s0+ba{ \,9Zu2C<~g$QŸTRk\zTT3im(W$Q AdJ.t7&v_ߨAUNќHPRJ҄#8"8硳Hj/H* m; Yo+>8*oYu˰{\z`PvJO7]Z[._:4%4ՊI+4WE)f^M"&]T_i~Q}vu!U 7+,'Ĕk$Z7WOn!0q]+7h߳m=+}XߔddQ44Bff6In9*M(OGpܪbNj[oR[C7] ϻH䃯r ƿVs`( V.P0'8&|B+=1P,+5ުlSISG*tur'JH/BZT.**(6XPq)+P!nK{ pDpEKDRI5M414Yt7 2i(%^%errS2!PxU%ؗA(/e:J#pel(_ɝxdW 3q1D,܏1~uHϷꮌ .d3ֻl]"O/Ie"&>mwOd'ÚLҜČwxʵdxUMQnk-,eӂ̂4v%ŰH$CƮݤKTTEv-|sQ nk %K5\D0lǓU,"_ }*\!GvsN-[\#T&/‚'##dZ;ywX%πj]]D5n!*b TWCjtNFu٪[\ exM.ΛA]_ـ.T>n۹($ v]cǫ: ՟U1JXXUy.}8 p%5_y90XfjM:1 cD~- 1@J~'Pn•'x;Zܔ&a ;^"ms!^x]HI)} $z'fp ӮdW-Jzy_"S-.i݋/7-tsF~"fRL-W_jm?1N[P0{] 58Z/ɜyce2dBM?QVdSj+xMd爡N&@XaDw𑤎hV7> stream x]O10 @€XҡUqP(/ СY:ߝ|reA>E0u- -m1,O)x@fw5|VS^[+x$E b'Φ2?M%K17MR{;RB|F8S|endstream endobj 219 0 obj << /Filter /FlateDecode /Length 4327 >> stream x\Yo$~!O#>b; $PfuY^I#VSE"==Zہyjt~aOG<8s~w< y~|vu_VYl3gwGo68𵳋V'+o2|,0O? N 1?NS6X.jSn`_Y'mr+Yٜ;0&TJ o7J>a]齲zucŘjpCY!^;}Ȏ+N|=Z·Tz#QZbA'XU-Gz7ܣ7ʨ"Pp05ZݣGa lc|i9J}Y3D6kS){xM4u Z>u L>$L~1^ZDH0VO+ݓL ʘZ ۤ{aX pq`hQN_TNkFernxp&i@!cidká/&+4zt&(ŀpB)C{ONt5zf1/~";W,\_cSmG_Bh|7$GT,;D ɰ+Em" cc/LiPX*&¦&c7r8 D;  _&G| 1ʷ C1*6ʏYbC,>rJn ғ:UEc0)^ē,Kc!6yi׈4g[Eiz.S0W5"FTIy(H[h< ʾ豋֭-)FKxmBD[y%dՁ.AXИt br䵫4{%٧}l>*m758܏EXY=;qrEC+e*&48|,7i*6Hf#Q@r! _ O9E׀45<" o\r54C0| bY2E~YGm( AV j2ZNk_^pS%z4!h g'əBIĠd{[7⎯o !qzyct?53SJpP7ttSrYӝ@LAqRa sQ_L! uM0kyCx) E7ES DKb%rT|]v׋ 4:^wlFRPRˉ DK\z9T[v 񡻽G d( hWaXguBr&Ӗ-Ρ%cI;|X}["cW }Mb9FCHWCrW1&z]- X^(x][SAH5VCMx`ݡ]Rw`<5rXOM+gT>T^^zm,JavR8IΟjKPjBw0l-i5Uq_嫛\)z3YۅC4Nr}vdzIV/r䣻ؑk!-x/+$#^@ng@2΄⭖ܢ}0!R"pMC4{rĩ}W',C~mR-<n( i=1EurM3eX#u #;S֣WN>'!},CPӇ|KaJ#DY{p6jrWMj߄tE"y5kԃWa):0H6R\TQ}=s'3Ll@ $w$Vbk^r K6cvڇUKF*!&$T}`ٕYoE4n Ci h\!??0%r@ňt5\S)<΀B6UMbMe 85um./ӞOzLSl⊬Bϔ?dJf*T!m2u?4U%NeMb j;4vQן8R6}~Ao⎐Km[ 6:9tMtW*bc)mo_$T;rbX 4Bendstream endobj 220 0 obj << /Filter /FlateDecode /Length 163 >> stream x]O10 @€XҡUqP(/ СY:ߝ|reA>E0u- -m1,O)x@fw5|Vs^[+x$E b'Φ2?M%K17MR{;RB|F5S|endstream endobj 221 0 obj << /Filter /FlateDecode /Length 4517 >> stream x\Icw#DK 6bF!@&M/ꎻ[tLSEEO,VSdXW ˇ_hW3v:{u?翻 +h󋛳U~nA9s~p6 S} :fws7Jc)0٪ _ p~zWexSOt/E:=X k$7Rrp 6(ƴWrϖH[1[ ' 3Uv@&->um!S#WnȰ0/7eLAp-/ M@6̅_sf[<8 ט)b& 9 Τq 7a[fU6n뛸aMaI;pFi7[υb?d\q"EF̖H`G12y,,HxtЪArfig gIAIBe07\)C%ĎHQ,`k8EeLAɩdHg=mQz !vj1&k6pFJUu(NN+^}qG}ЇhU]XQ@IԃRY@+A(h# u_j&n;jy`sqo}~'^Y 2 er0# Hq+-+DS5!r`xhF'׷9%I6t};1y´i%j x\K MCΉ\ޖDւF %>Eo۠ʋptR=;Ȍlġ%1hr lˑaM312eJS wb 7ti7,]vM/DtZxXe #k۱fRH}P=[F5IG$ ?M;ZM`JEgxFtv=e}ocØC`V!xjQ|+1-*+Wz%yY'w@(.(ޅ Hd$0)A-Fm+ _ ɐ@~v[d_0)Se{(<4g>JX囓1պ DkF :-Oe؏p~X?uQZC\01٢E4IYcf4V4Xm tPx4^y@oA3$߰3L>)#+5w@H'< &-jS;#j|m'M+ah+yN̘dR> Oist}WzI80)k-xI]ϲj1p g&GзF i %+EDF Xj&k~p˺AB:]Vj\D2?){}_'GUk`:(.<((կMP@(E[U3S$f\v'ɢ}7VH$>&M*hS>3_$GU'R{K$I4&2k*c{F$M(w3K$DWHhM  <w9X@%q!D+݄SP 1fl BO?e#Y${UJ$mV8#kT_cCpUs_ 9s4I!5]kj^*ۉp1_w Yߗ7e2Z=R*U yMZo{,(,gJɐMni2iHOuV.\U$L RyqhzfN)ǘc{?o"mX @RZD\1i Jʩ}I:dTCy]\rX.LkOc'ȯl@jR9qRr)*d}2?RUc]K a%^N6 Wd[kxr%/8;4)PRU Lon֞ 6Kk,6"i"-}HC\ ),5( vPI>'g5dTӝ]J).gvZ$jW3_>4< MpV0,ܒ-J fH0#{ŚS=w&Nx=G#׺P3R6$^HSL|U20[fi?>L3>n@v_s~1) gT2`G[2=N>NhVˊ}^JN([ hMs)8XwJdsVp%NJ ѫ9ӠMIyhRrvjQ?Ѵߪfd"5ǏyZ)"V'Hr2o-a"NC'zV&=ZLfvN/.TRcƾt'lc_XwU'~TNݫ45ɫ- ^ ^!LfOC|sDD_2"-w`1[ Nؾl:C{*Fz_| Ux}=a]=-TUu"׃o ?LFKNVRR t*C 76" ,Hwan '2trp;zoru1?Zr5jB p;"ځStu_LV)4. Dh Ýmb1, uK.\ҿuT~=GfJWtvRW#Inҳz՞Íjm==Xٗ~.S$A<ɏl}?zq@(J-B^/yeBx\Sz6ЧaŻ 1a:DՕѷw۲dX5 ؤ,~߷ΦWء:~񹘴Ŷq tg6;B+ߘ)Kp<~ IIoHiU~M4 D沸c%<0?b^Jt꼙UtV4Ũ454*YF^chAtK">JDI5ݽ_=<謢|P%8~?ēݞV_Ӵ'wSJFBU0z#m*%P}Nʺ5R/h/"YIMlAyxo_RsrY9|ة&ÿ^'9_GC9387Qȯk4]'=ijzd=w=ڐ ss52gHendstream endobj 222 0 obj << /Filter /FlateDecode /Length 162 >> stream x]O10 @ ]ZUm?eB:gׁm_Xց$i, x`FS}endstream endobj 223 0 obj << /Filter /FlateDecode /Length 4277 >> stream x\n}'|, N~ br`#qxY"l٩%92 ?1鮮:u2XXgGovGxs<;=&(O(<>unWy`*68,} J'RYy|To YbE|7g̰'ޕE(j Ymݸ߬N}~I-c,k[QNm]Y)lǵys ц0o sߕ_%+{cb-Y /D ^LO]Ł899x2<+sYeȴy "5&HZE׺ $PBEOD57 jΓ5٨W^V@zEP <3[4<5$%a-73$NR.N,N(![-鍟7z4WexE>4$@|,13S6a c߁fWfK) ׆(RcVg'DQ]޻OG}C/„,uJO%޲Nb$(a&z2<6|jtHgS\;ӤrvН]4msÁC_ X>!P^.qp?'%{}P{:Zz;m;FJvK|^F[ c;:Ds_VM[ΘXmXL1(@|,aV D*Rp&xC2_)jZJND8;4R㖹txA0:П|:^2d+C:2aLQն0%Z ~lis TH#+4{wd (3Ed[ Xj`=%vPG'6,^%٤x_p Udd\XxmWQ%!<4VD$wXى|7y ]da.mObP' [)B5 5DNvh&ĊǢ'pj ,o0e`1'Ty}#BhT B Pu[90az5m|g,xjUK=G  xZ= #Ty8WE3L6˔^v`Wni:G}5%Ui"`7H廓t]ʰ/Û2.û ʐHdWM`O&4?2B$? |  p8"#@,Qce6M6k{Pнft@55͌ji,ɼEp4Q5ɶ* Fo{L~ 6M-y^Xdµz%9F[ea쎹y& 4vVn"eȩ`6-~km:+b̝60bIHJp>ȍ~ \B7ZTjJLpvS-Ĕ67Sޜu ,Vl $qT;K-lUd$pC oҎu:%kbM-U@&%e' \z 0t2^@?LD}e:8>iiIk^ so޺+)VS`  h /8re<|"v殝v!%O1dvvmF|ശ9TR@d]Vr<{$8Suq, .12*L;.'08)$dˋU _>1x˰EuFI8Ƶ3w%2ڔD H!y1~=Xx%F7|Prf:mʍT)>@oSӹԆaw#_m}b5hGYm-ż 헒{k!Umne1{}ٰ]<cpeO _51uY@fM=#ΈH<3>QUt XK ۤ%0KN a_]?6cQfuNaoeq]-`>iRűؠ :˫cmO~@Vj4Y*]03 \j8'`t*p)54ն &9OoacoϷp?։DUޅjcWxU!lÙF&6PQCD K64@*+MٯzcjQ'M];hz8?/+OE3:.Ku`s1o ^g9 l px }uNWN2rƆi6l(C}j&wEֹl DۨPQi1u 6@=)cz*بI=:r`)1zRDe% m=-Iepm?vBV"Yȣ,x = }5^ 5ʰQ6l$wxVb[s#`6hy[|<,KS!f "m655Mנ7%C(YGx+ o֪=s.c4MfBfq|43vgw1"tc yԞĂiդ'ޒ|U{˔zHp8$FK:U"l[E(wI(䇭cu7l6s*ZaO ,qM{*_۽)Clw R\`$hƭT|VZy  (cR5%|,k}NzIEkcZm8.YO%WM~ 2? ʻjPeL#>F_1mz/^\itL30˒'7^]t/ ObTA4Qla~cB%"^O͒Ot|?&q /kKW"Bf޷nGñBnk y϶w}ӓ/uiNJ»9I?Px,$9)-fu.NqRt,<ɜDA3w1 )@?ܣ0"-Ifҕ@*Im|r(2ϸPT#l@a$TIE08R5SPsږnVYbMvƴnX.U]8AzتչMg$08wo\O0Y{^1`qj=c?"r6s{m3Ay_f3b]LYyenMߐ<4PpWug"^hm&:\&1 Cs uJ2ϞhRʥ1Epw\?aL`ʍJ+ȡПX:G?oelY>O؆o܋s\"]I1*xdʀ-O'iH/\endstream endobj 224 0 obj << /Filter /FlateDecode /Length 161 >> stream x]O10 b C+qP(/ СY:ߝ|pFcpƲ- DeQV-Ɠ剋BwOϪ.> stream x\r$SN2>XAa+ACskaS,g;@ jBI$ry9g?g_;ߜr?Woο0h۳)?r`s제9xs' :ś%b]ȷ91*䶐o)83*=XZ,3cjrŘj~RF /8Jx<+&?xfvfP\`ڦT{wfBq%ʵs>ׅOm9.iLE{nu0``b wR6\2Z<.tn F(Θen&\^Y&ۇ88MS˻2M| <.n7dI8`y-H: ƼSq0CWc<`8׷N( 8ƵKBs2x"1NI#Fb^*<8 Tr.UO_l 3Fúȩt )ap8Fh%D#GXC$Xz|N5]MWQ(}imF'-rU-O"rWع,QDz;Dl܇13g A@fj jpI0 Q@0. Bخ}̊D_Cyb͖n1l0a2$Fܼ@?. ~a<­Sl2)ZT>hP$z$%!Le |M`hPYSav+L|K܋(>Iun 4qV]UWr'Ku`Iȉ [yȝn 0"Z g%kYmhP`Jl SWx``HlIE.ُQqDKB2>ƥ8ѶƫR,S -zOg7InC-7z+y2tײ[`ɸms({Eo-A}I( [BwǀZƑ!8h|KP2#vk2 1qhdk9!s|wvqh=qcX6VT IkxJ(T>[Qɭq^P`tr$Эq TS@ O5Ԍ f4w=5=]ȷddrQ^V \cR2(` >/w/& hѨ͠wqDht'F6$+t6Ʊk,:aQJ@ )ZZq*3x+wQ'"Z}>ׄQq| fδ +ُ+}{(&MG뮇C! %!<=8!d WNy "EB^.I}wHuw\o ]kfF]pP%؟'K "iƼuHklᡨ~`9DF W 01'U2RaKV2¢g+1!^wMf1 r<|h=Fuhf@9nIQ Qx").Es ZjRٽ&< OX[1vLτx9 ɾn1or+HeI _ЂCWd1Ĵ2APaHcn wk:_}9e>|gFW1 ^4QR~ïpTXT-,ʠGEÒu{Ұ/K4.)9R.kUoD3?nKc,4nr]U8w16UB ,1Dţ,MdE^g1_ 4j3h$:KY:$έX4J%b`9"0(CFU![ d滒X$[ \J?G.E!/ mc8Jq?.p1 "dm"ѳ$9n|V8ݤb櫢l &1~D-%3mj$vEO6Ft~*tO*W0%&*!-_޴ z U)4SS2 }iV qtO9pkDD\>LRPw]9}TG.m=N< 5x&0N'f+.)/R,Ĭ`s!}:i/Rh`Ql\MjYh6hBm SNoî4]U']ld-V}ݙU)Lq=Y 3ހѠp&YG-GF7q\MnB%j:x]aCH*@­U h~1 _`ǬѝQeG *W^mx, )UƇ1lmN&tWmaIYzhU! mʊJg$khskJ8Dͷ4u$"k Ah=dXqyXB Tq0%B zKy}a[ȇC/<}6ڋdۻ }j̺$(f$Zt;zX9O_C!o|$d?CM>{a)ƂiTv$6=75) FNԧN2$g&#/gU5U><u!om#m_O3kdiӼq(>5z60.&?| o EiGT-2*"]XO+oz2ccM]l6 8^h,Tm{dxnnثIŘ;[-Nmg֏q';@ Pfd2߸4>dMA.r5_aTP/%\W=8T uti`fgll({-d@RjZSV78lJCz[f)/;рױ|ʰR I_ӮS`DNmq4)%ᓔ.H?'_J >%ᆪ/ުf:js;G=LgV]"8QӀ4ݶΨqx|eS^"B?v\wxWVcA6ZB[?v}Ɠhi!bލ. [䮕6!ƌ)l+/ ~蒄Z.L.57#ūȮ:GCN:yYW]FGjzƲ1m!"sGX˼q,D?MaJO ^٤m|9Y[կ٬crۉ#6Gn}3:0e81R15tAog?|5;q+ϠSgUP*əFUq.=W3}e_PxZ]."FFgX =%9n~{qOCFendstream endobj 226 0 obj << /Filter /FlateDecode /Length 163 >> stream x]O10 TA%B2D! }I쯗+(n H0hYhqgy⤼M2:B4^!#(֘N?i fw6MQ5fh*q\B in X3 IMSendstream endobj 227 0 obj << /Filter /FlateDecode /Length 3782 >> stream x[Yo~W#?V`  d# }]#ҴVgdL˻kgDb7;}ÏOa0ӫ&?ʹcl9>=:'U-^:8ruzŘ{:Rܹn}-cu륷B 3%<֞{J^qGxȼweNT[B=un2 oʰ/ó2$s7e ex\aNR^QƓֱMo>tm(c_6_a섣Xn{)E7Ȋ$HaĈJF•7q F'>ޙLoI{ UJE;~Lvm7]I,=Cx)x 8N Vƀ.tAwR*Ìb}<9  t#4i$)5y2ӛ QϔhpNppWṭvOxsGy"Spo}bx2‘ql+0&w=rIC WqKj gH:Ąz1h$4.)*}C3̘<>)yP$,dĜFkC>ڗ2m8*U71 +_d.\Dg$c*]a5 uұϽ: `%Qぺ hUvJ yyuޭob>9K\D:411+tr8׆:bb{b˼#Ǐ'TX&*;cMKTqE2/wPO3%Ȇq~!2;\2O0YCq&xET)KԦY~ggRH^ /t}Na9[th s&@MNjӍBG4G tW#tj>> Λ52u IXi{x'Hi:^9ay@TR  ^2'MV 玂:CY[mztݣT:xC% Z&7$;r5م^xq hHnBϒӕsk37C_dr_s.1{)/Rx[|hiF*[!t40`kZ,kAF4RD2,a;G4d 7 Dqx*-̀  >Qm<{gҸub #G[@m3/ALUPaE9#5PjTiF)%Ϛ$^'ߪ1urd[ݧm8 8r8_`Q"M|4ۻ؏GoCى(Hj)AtV֡r#,%~Ht¼WM4RD`Ip(Hrh(؎HBUhB20 DkͺNRkzTSId_ة*rI4y{|5)"Ba<&j/0Еr/>/! S4~"9/-JK^ $}j @J‘^fg꺼<_i@U3[L37L e WrQI :{ϷE-,p_eE_ۨDBD'kJSצ[B''iu7FFynf* EŬL&lJ!*ۑbo#d#,a\:yac@CМyc҆e9-C NB٘ Qbvvϰ'D0 m6W9+\]D F_G^z^ybi(&}| N]Mp>dwӘ )-^H 桚F7r2b>6i lːt) LT/ AK;9؆(7{P|b5ړIBZF9mI̠S`@Y/N.d9w[)BW(5@gDX U"D+{ m?irXU*/2Yen3}Q3?q}?•s$XFvJ``'"`kkvNHɯU?j-^N59&5a\Y˂ /Qkli!%ACe1{'d@&,i%T/i"\c۩kByqG6xTc$-eGJط8W;C {NأrW@Η٪rdUVŽ92~f5H&|~=_LM7֓yviF5H+a(F1c*`|G>2 X0*=Ņ`npE0fl8jXcqMbx)Tݛ7񌟣n6\]"U؀yå-R.KhwR:1|$0bIq'p X sB oʰ8cHT5o޹%*oMWR,\Da1BUJ%lAJա#.{\|'|w+aw[`y$א>!8?|% ]5)`Y ,/z,ç4 ip 0&Nvl+z(R|SRܗQ۳_'R= m`FSӆ؞RHYαRܹ6 H̡ыȷS?/$X+>HNoAډĕrЊi]N ,>endstream endobj 228 0 obj << /Filter /FlateDecode /Length 162 >> stream x]O10 P ]ZUm?eB:gׁm_Xց$i, x`HSendstream endobj 229 0 obj << /Filter /FlateDecode /Length 4142 >> stream x\Ko  ,'~-$Hrs)eQWNUwtG6qOOuu=z4?N S^ݞGޝه3=L:.(/N] lſ ׼ vqy+:clS:|LC/Duux[vϋ? 1.~0Fwa|!:#fiZw2VT(8 /q wsv`}!A b!DXZZ~~-YHk: |Tg 7:]>[1ZHrVTCe'.me#cyŃB00>̖wa z5: څ|:F-l Q/ UsQS]n3l#H =13>g#B@S"Ke!LH0 L{ei BX[HEy2 : $%+=ܘ:8`Yg;NghPDpSDVouxk1KJM%;:S'sWb%pWNh'h9BOUVvux[KY6Ը Mͪvv۸Gl7Qձ[$Zc^;Wt}E}[Z1,ı` ̞5{ib77>؆RpP\e}rz oE6,e5rA M> u f6eR*E XBu^Ou66bق`x]hc*H 4c=UXtjڧxjBDP!!ˊ/)NPXu8\׶/p`0ܖ^J*Fcq>bDF2}Rؑ-j",A4W >Y/t- d`${'@M=Yv9L"GM5f<`X3h{VtCׅO1'KC{ʿ$lUF+bj\ʹ[#JM\@*6:a A=g['$t/zw$=?!FE:cpo:YCX&6Gc@w8 Jd/^#jw+e9{MtYĵL1\* 4oG}kKcje ^ѸM2zr%nTqU[8@oZV T.S*\Ock *vQ)BE ȂT4uoূs 2jPNutb#/ ZhM'bA6huW08FLJ0,NSk\Nf|Wދ㠐pf lB9M"XBk.I?, <[R6d}[1I\8DC.Kۥ6vdDjC]5a#O>*a38}\ Y%` rtB5 Hoi&@<]Kq|SS"3rՆ"lMt|+r|ǒE jsAk@X˾6~757ka!{,Zw$>lIޞ`G襘%Ɇdd HĬRzk2j [M Mg~,Cn=rWbRхÉW݁c=uB) Y];ԲH$ɺGSl+9]ĸ$>ZJy Ȑ/MlB?wuП=JK{$S S)#t ʚnaya cMÚO!NvI"=QMsMS1\-lJY85 W&A䪧X#Zf+j(Â\(pŢ;Hױ&*5YoU%!}buy޴U2MIW4bj{扻=P&D2wJ @V-bDR%c,yG}žvy'.)Ѿ891 Q JMdjR_NchogU"۔=Vk bIIߴ0Yhm:z2.Ð`e]\3:H.%F!6Noyĩ\LP?Cuv )[Q dZc~#8Ʒ8E]G[+; D>X+ |!'C_$%e`+/OtI#%!9zd*{ / MP4$BĢ}-xtv\m)&}81&Ń'4(n=- 4t1=l^qGy,g9}6Ҙ2nj\f$G[fS5JHxj!/tÄAmjIF9$>rhPhW(?>nK˔4C ])M6'mWt#f& ni Ýzr$f YMz44Gw劒}5wqG,dNŝN / IdիE +U/ր*ǯi4`im> yDLm)87L p㷹[Q["-e\tz0&Ĝ~žFI aP{*;pcG`PO2/ II}T铲5X).6˴RIKK!L]IL}iӦo*6}DŽS\Y]kO ;R"_t#g[Kˎ[-E?"M-;$v=}MLi/_Ϯ)>ͭ9GJ4;zlGU !c|05CЩ@nnOL4CMogDs˹áV>Rn֢ķYj*N(Ճa !:joT jԶKkLA ӧ/q3ero}r2]0> stream x]O10 *A%B2D! }I쯗+(n H0hYhqgy⤼M2:B4^!#(֘N?i fw6MQ5fh*q\B in X3 HSendstream endobj 231 0 obj << /Filter /FlateDecode /Length 4432 >> stream x\nƫla p8L6ÇH$IKD~ [:SH Az{}6}{|Ovv='gK B9o3W{-p[A?:<{xq<`bL{x߬Arχ)x u,~+"zImi=n/.zRAp&ht OxfiHV[aKchM:<:랁bP^n5@=HT7KUhac~q[ *}m'%ܫq 'iAXVV%xPr :C ҕvN\V$6#>y\(ggo(aoM:OuavrQ5g]i*E6׏[ƿMB%MQc^1ә|U/.+hF%G9E{E߾X%&z4MZF>ףDB`ݨPNىw7**d$8MFiE>Q\I0BZu6LMꀀSlFq7 $rИ*G& E?oGOWa;*NBV mU u;Þ!s,X;e=I7-(**i3'oT6͡XR(vZ)~ 1/#|sSt,rAǴ1rg 3;>;.,nD\ oܼK[%L5!<>Ǚx'ԋL,ª)% F %0ZCI1I 8J=sI.F"hR|dRNy

IzGi6N:B9'F&VޅS&MXƏV^KnlOC~~Lq 3RFi^Du:DEF-%B[d!piI7 ׀0DɬnkYOfsTTVo͆}F&?'O^03ZbaY0eD´oྮ[҃l(y_k#yQ_#%IeSP| "Li2 S,u}~1$`.Jʔn%_&AT8C\T baAiӳ. q%gB4lH+6$u/zejp:}EpmUf}g%rhVQ\tŇ7^NѤ՟ ENz쇰@CF+BE|M%jkC¤$cB&mGѶ8j'[H$5!/]^tv YWF2P:NdAZ|J~B\^(v}7\u&^&%`kEN)7"FIٚt'WϦܓ-gӰJ4\|\ S0qVnIM)$凮*@:kámX[k!0:hČ/g୒p&mP 4|ç"55YqLOG=LQ yڬM,PM}~yMU4Q=ryO;¨VsjAҁlJ0!XWtQa{$,iuc𼚆ĵ06<ԕFƳbDB>]d=cvt$y@:O kh0WWutd\bL m#X_?m:I'i`< #?O E6(~_A4X*~娉 L3Ʈ5l2s*erM&ƅ`&) .eiaj&a ysQ1!nV5{~ b0̠_rfZi0,5%0=-gdg5Œ@<< Nj>I4;zC6[٪ZB9Nȿb?C=9x%u}([#6 ݧ3L5nc')tt+Kـ{9hE0e}]/=dvɼIXIdۉ uدb[d|G)'kEHq$ B9rPuxed"w!"]wΤO ©'J RtbF[C7I9O&j(c{qJΔ5>O `5}>ħ[$e'}0HmUOMPrJ7]m;;?AQKNGә6MN.\Q<@CwR=-|z:3ԯ+L!}ObF[]j?Oz>(6hS݀Zn K1acЪD)lk3 *v=#t>ݧo0Y;-S&:c4%ss朐KS?"1̈́ 9[HKb"z/ڄ& !%)"DwtI]:UV_!Cljҿ@F[iS+MRITs 7iCt%8`*xE@gO;~)A,!*qrCꀔs^)tBeB9htbJ٦smJw/ۤhU3M%8i. dG!23g`3g݋DK=*"ƁÝ""Pe`o:{~I zErCTw58 BK0HŴ.*Ru c6SZ϶MϵM#fZ02v;]$RRu:Bܥ?پM{v |'oAháϺ3vWEIm^3ڬIS[|Я(Sv?dD)5Zt1]bJ+i0t:x%j] h 7Рۄ^MC@ u ]BR y66!#/g}w ?-k:k NRLcL;b^%5*.ʹfR18i܀pÍ#Y-$x};n ęI\Yk?Ti wvi}(!-NѠO&?Mlf7`RPgnJ5ކ燱V/v :m?$TU 0vO*<쐎(]4rC:_l_9[U{Pd R1RH~vTz_s-C(Y@T;Y%. %ު1tfKzuERBBJx3t֣ظ{b]x⫂poωAZendstream endobj 232 0 obj << /Filter /FlateDecode /Length 162 >> stream x]O10 *XЪj8(Nߗ,N>~l#GpƲ5 HeQV-ƃ剳B7OfRUiZB 'mQt1 l.kSIT @s$Lg)GrS~endstream endobj 233 0 obj << /Filter /FlateDecode /Length 4761 >> stream x\Y#~_GCH} I8c5ό֫]oSEEZxKklU_}u<<_ٯ6glg2syws -4DgQy g.wv~l < >v~WKStŷ͇Bum6?Y9 G~[z?L0iyY 8_`c>,n`¯:mb:靷Vr0.^:xNYg Z_WJ !ZS6q5-}'D¤0LEX++Qu2(A͸ap?7!9!$NN:OE-a*bwQ K X_/ЀA{;͛6v4YhAOMYkqQ[-ltv7[(MyQ͟jSw ۪+A&rA*_^Y+A$\PnJ[58VqVcO5@wyc$[wc]hĄ?gQRl+4OVD,>z*>U3vGVF%It9Ďc~'d1qQIjn+dF5~!_ & Y'1Q5Eaa|j@DzEUǣNqE0Gumk^uX4ux38Κ35ʕ^ͻc|tankgM'S6k6׵P׵9Ky7cD@ Jr(.02ZIbX9`׿=u%=- tApF:I_pՇIm`6#po50FpZ7ĴEB<֗w۲1ebM1@#R!w@DDX06,1٨a Rb4E &qY nGCW,~d*+uV4oŏdף64FV$+tbm, 2* U9Gvx[cM518q.Y3C ů8ě|Ɋ=kYόNͯ8(KE};u=8Ն_Ǚ6/B9O)tzvu|X 'HfqW̤x͛Ǥ_^i?짥kKݍaiC[e vWA>JF0[VGC5+ɼ]ܲ3ZĆy2{޼Guj^&91eؾ<ש=J2 ~^rUMz)Aq`a{zr5ӶN6D<{ܤ򶉳QगgmF@`CϷzd'pG:H;J([J=9ޔJ ʳdCPhÿY'H2Ãqϫ}C+5I ryY\T@Ⱥ)g&=V!U):X,`-?YlhRZ7T~ ,]hOGzM5H~Yj'aC$z3N+ ''qwnioBy neoNXljIa8k,C<&=)6aɴՁ ΅8 +޹#,(iN hI6a/gi mzZ:G{Ѷ1.LQ@lN)G2Q Ow^|{LP¨ʕ({qaΰO$5l^%_$T;;'kb#HoK]mtS8; IdM_'Mb8 *$A'LK@No nH4|i"9S +A+Q}z8JG.;'פpk]%ɬ'Ajqy5->_glN?i5mB'Aȡb*mLjl9%abv_8ɩڠgᰜ%eCus~Z7Ba|bT߭EpƟE`C}WI .ޯbGB pA/Ys}$V JY .Կ~/L[N-Ɲ7%pQ=6➓`h!6yٞ,5L u {Qx7ų} mʵlqU"WO7IIAC-Y?qY&i#b֠2Ң x.jsGoa_ɕ- 6S&/~QI~yþ6?oj|^$J7&)Ok6j~{[c׶qUbROR*sQ#&)x{jvk\KQpi)[`%&-ec#G1C"ݵ'LxjA!1/j\2 g [ЃyuOױ1ซؕthwI?9={gA"zab<}3̗~dNz9SȳBiREoɱ{Fۑ;.~V`9@\&I[\T'U'_.`(~g$J~~J'endstream endobj 234 0 obj << /Filter /FlateDecode /Length 161 >> stream x]O10 BVUAD! }I.GOJ`H_#L4;0L| p oMTSVBoh )jItUwIG`m4Xyp8I.~r 6/F4S|endstream endobj 235 0 obj << /Filter /FlateDecode /Length 151 >> stream x31ӳP0P0T06P0P05WH1230!U`aS027i`9'O.}O_T.p .}*.}gC.}h\nn@n.P9?47XΎEAmGTa?ߠ pS! B)endstream endobj 236 0 obj << /Filter /FlateDecode /Length 159 >> stream x]O1 y@%ʒ.ZUm?@b!C@Hg|wYud@>%Mo &Vq*+n:?`7=]/$Je!֠D4}gm/͟TNu+h.qb$Nii 83 <[Scendstream endobj 237 0 obj << /Filter /FlateDecode /Length 4758 >> stream x\n$}o# +ŀfwg3gՒZnIig;xI2ȌRIc,Dkĉ :a?a_o'׏oǓɿC4x՛)?r`제99{_ κ7p F)]_ͧZ|_C->Oa Cہq ;Se L9O΄4W.|<]˭LJC{VZ݇4 Њn9z8ƔlWbj~Vil0L rSާn7 \<#k865\Ǧ $Tϩty{zkCG\~n*hɦЮ0|4_%ܘ:ק: ̽PF5C˜(q,7*(ن^Y 'Y;׎>,bȮYÃzQ:>D)u\Xh?=Yᖔh`dnlza;GJzLq0 t\gae5 %K+y^n 4f _M-^h0\%%LyQ/R$i$]+ZҷY2QI՜ʂC2j`K KR9lP_H:ݧ_è:Mܕ-ղdh} c̷1 @"-S˴cm&bEC`"VKg"w?dAe. czIvۑ^&7‚dJƃU'EFC  `=3Xk"Ϥ{$mGϫZ.S*P0cV;%͇U-~[?tFTi@ kP ʌ̆XY{$tQVn>!NkkEzK϶ LE򰯔MNC^X͐l<i `xj00 _BeP(Hzԓ91Ժre1 &tk_@:H'L]`ee!fVI_"=(~rHYH^,Jbw`9; X@"2'6hF.)h(~$̂+q(lav_ #sLD0q3^)df5|RE4LM?EA¼drV")AjÚ-&5'jx =WɾyoWpO^xLÈڸ(Ak%풋:bw$? +a]n u/74D%_1,JY9=q}ش]6`Nnk%c6j^I21M󀭖WEExVS[?LF3)Ɲ+ݞJ!'(kLr\YeG:S[#8l?%aйS/b, 'A_!&Y)ҪƸ{3$"d qC pz2 aAeŻ(,"C!!CxA? yߒřkТu((*%}:AɒJM/>^Op,HBgp1:H(͟^6M$zDy5#c+t0ī!LWd{9ۨNPt.{r4cGS^@w}qBRcdh~1AlYx|a),W9 q8Bx5_-u}6BOK „- `BKv*\Nft g$_elɁ_!&J)i^XDs+ⶈ}-Oixqzxc%=& J0M<Y^GJ(eNE||1(Kl-,r68"[\JpBS#F5MQ8)y1ݟAD)(xipNEG3. mo+㈗Juw`H'wՊK7&N(L fJȦ]w[2t!,;3?:.1;r* 5])gɐu#I*gh+U@:qhZm2 mتd$j9 bUk=erQpoӔ8Z믲&IM_ҏlHg֛PNW+chn~{SM6j2:l?6+ղԭQPrzK; T-dxq 'r{P8kULjru) 3WIZT|5ƨ)XIPFp&~A}_KJ ^i/JMD/O1䟢K4qd:H8W?*c ()ri 5>5 ^Kp{ GXO ~LƴME񆰷p'i+5M >8C\/UqnAX>_/ex`66'n#~YC.} 3^ O s=GkmBnL~Ox;H?6-}غu/1 c $"&!2xe3baHb 5/ee6,\MW+r /9xY@ZZ3O #j'2@𬐄\gN'#u\uAE&ha٭?7}eWn;  藫LR|7DU)@L_ѓ<;%{W/E~s+ Aw6уMp􍽃d2>,Qug2廮3}c)!(/?"@_B'DЦa~}(QOG_9l g{S% >iw,Nr1n&;8d' .2k>Jikf[FU/C(Bd|eJn}|P8Kߺ "( D}satßE8; PBÿg endstream endobj 238 0 obj << /Filter /FlateDecode /Length 163 >> stream x]O10 €XҡUqP(/ СY:ߝ|reA>E0u- -m1,O)x@fw5|VS^[+x$E b'Φ2?M%K17MR{;RB|GtS~endstream endobj 239 0 obj << /Filter /FlateDecode /Length 4490 >> stream x\[o~#<)}Hۤhmi֒Q,iUKrgCpvgeP̊C;οOYO=9?nO}O?7? (ӳW'Q~jeϜ^9szvswuSxz:|aF)ʰ/mZ| /gmR7ao/Wk+ƴWϫu7;\}EB;g^s/}wW\jx5nƬ5i@ڹa)2/û\es&Ecpp޿,Z|jG nj2ːL cjs#2˿XH'eH*Aԕ^yI*150;8a3Zv(9Ov/Wk`]-Ԧ&+έSFp a#>g`^ xd65j =ndM,8e8w  ģe)&QQ%á~f%#3Fh80ۉ ;01mǾ%yX AwX@Xt:,0m[b]^H5A-! &2H G   GDsx\ 0YM_EaԈuA4jGT<>Eޝ^gCm\ieyM7Mvל 2cgό'- Û%&YEk,>Qk{ +qdoː ׶V*[Ќ%5L$3&E$-SPZ |31~NtdzeuVNEIbX ]ݡUkeu\q^y-DSj\! ƣ6I*MklZyT;ց7]/VSr+Xi&U"SU?'qx'8s@Dov78j2NP7Bb,v.8/≉oLv/"W3|,,u`< Q^Q0BXÄʪ:JSuT\}l9f袜 E`waaL"igR`|QY RJLHoH0H\C ^kk)eؗUޖC7͹2.ߕee +ï3^ 0`BYN0A0uc RBҳ6 O̯(G§͐PRD2`) #c@>PnL$"yC_ Pmē*/%\|']~>/ˑ7(6j-Bqm#g(iK-;!>/CRx۔ܛ' 1gRY#VQzo?},l}_d! Zݜn6,y82 -GchVrH9i( ru)D!D,TG|R"\$'CqGKM9ғ;a\!jJZBy */WCñX83mxop]Dޮ 'B9IDw!XmvbxP7ݤ `ybʳJHF3mNCؾ=ƼH /&V@|WDq;@8m9 H'NHhȟe9`n5 bdc={wKMbb9{YbVg%AY~/*~YzJDXcj٬ѩ%RbE8{9uugOv\{+qBJFhV\v]004Gd&T&?~- 4af2Y%rwŖhrr&;yG~(/}Tl XvSDۊm}bxQ$~ H)h!ڟT3~ eiiƠ N9\Ω> D`=q!fs)ez1Ch,X}B M}2WT l3=qĥaJ0uGHO˔U`(W[kޫ.aY'ߗrt,نNr,}|'|}8b&dKx M븀v@7p!& UϬ7*@#V $0e׆4'DO*07J 6GM6f< ˯rl!]`Tz3h_ȎzAh=6sYԙdThoVih$c inMBU5QUj|k7VuḶFcҤP#΂:UR$כ χd˻bfԥn^AXQ}"8_V`M2y"B6;H;VAVцsP] rdQ ~~W!én䇴=oPE ֦ I5 aa1k RG:pQ쉿ƞp=aa lԙ3ri?A_gf_h&<\?i#/|s\Ah'K?㮷 '^!9X22k5aOnֻ0w mku{*v+vuXy΂xq^ WjȒ\1)EG`CBz?ِ'VVI?6N$_Frۤ6#ZuBMOh{֢r\v#[@=c^Los+{C϶޵a^u$ 8Z_wRxۏe?" wFZ6ÕYp8t<=IZ@{jtn S2Hyxc|]ͬkq1.Rr hɅ%eD!5z'Lj'C6=H /򄘊,ZL?ahCz_ŸkM5mbg" ̺78 Q3~p>7׻;J?1&{_e{H7D)ܼc+JA F%eTǵdൻiwlv~( .<+C`(:e֤*Umi `3^¬/&/Z9:1|\$>oY2t}2vr \?jsx,CwVU?R1$% Ȑ\ $Rn,?sY m!Ww5prkl7DjgKq̀{4;5ؙ-^JJӋ/H`%f!UQ?Tcě&#(6}G/.,ѩCxߒi-0Uza"TYछ]pu3i Y M?-֕&_\(Y?;ѤdOf[ĈϘ/f~BJH҄O%lUz7N#^7o}fKVi׷">':2dY5vdOhp5f;-pvMFVwE-HeDb_Rv~z~vlO>ZR%Qz}jξ d~A63v`ࡏ\ʬ:X"O=&\U1R$H]:6$ 8$u} =Ecɵb͖?[x_[ċ-mUȱ,69lաsB_TxRɲj_s׫З.$HNUendstream endobj 240 0 obj << /Filter /FlateDecode /Length 162 >> stream x]O10  ]ZUm?eB:gׁm_Xց$i, x`FS}endstream endobj 241 0 obj << /Filter /FlateDecode /Length 5671 >> stream x][\q3@sK ,m w%]ΚK"9?Uշ3}fwV$tӗ|U]ۉϯ78|G$3h̉46N=}~)O]'.wrvɟnTfӷON7bV1FWH[!S M;F{3h/AY?dmXmL= )~<1fJgHXǓ?=:߿uK5F?;)ja$'MB{9힦B7cߛe^rzƚlO~JVRbG3YF0%vR˦^!凴sFcNq2+hu6i-T3_!q3dʛ>pU*7K@.@nᇰ!馑Fnkv$^XFO79$VՖsw:51kE{pUӻě QPy !HXh QvVEMbtPOgmEY`u+=eR$%OAn@굑z&+RQJkMr}_E 獼eJF"v:,:Q0!5--fxfCG}j K^Z а=LF)eETD vllb_} `H3@0ri4S\3HJ6T [|U氥^Xp%NR$PG"Y0 3下.I^uHn"#=A)I9:{#AÃ'b"J|WsK1gL2|FejeW|țF^52wA_ EY~۾nz8fw|wipa 7ŰI#_ M0!`qC%EYq1%#j` X'`8hn/$Gכ~!0!+`_bWf L-޻4[nX_; 6ja(nS7"#(<]n^w  krYZ3I6&)W`5h!Yy']-l_1067a)* [Ѧ}ʆI}{,6ueU+`nɱ ^f^Ki9(*>^4wC࠰5Q  + i[~4h-UU%Q2^%PP0HBѕ 5wEϵY'[ X#ZUѹAVhJ˱ ,JgMQYVufX:E*@d$sBΖϼeﬨrڗEX(;Q);xc*>BZ C ^!?}&- 49UP^SdQiI^A(%^S!DM{z>| 0 y;$!ю;Sb~Zлd}0|;EXAG?Mq-r,II`+p/]эrǑw8qNs`!B݅PuJPs>x:;KtRi&.۠R gZTL:%^<,XFckRjE ˹0ĐDN"UIsB>PJ[jzi9qTM`| ),V Tj3.rv_ն F_lN%sڦg߽7#Ș&.RK|{7퀛b6qS$BoV$ ៯Q=αn%Amg%,@qǎ019puПG%l_CR49Xvaa4sed˦~6i&XZaLU'I?8eOs98#&ocX&`=(/oz  =3|Hfia;m<;xlm2S:֊Xm0* \-~"lBU6S@'0~ZC*'k }|[8Qg4O߁.h@;ۍSHe .:O|3{Ld] g?,CE#Ŏu$,>ގ=t|akJFēgC6?lQB(T<xBhw#l @Nn x$m׍-ɥ}փqUkd>%?@g̛r360l,&jM1ggC|!߯ eIHߩAr:AlLJ#X#{[Csev"RxெвIx|ecS2p]_`Ik%~@A+,ϬZa|<:`5F"iK_1_*iZFZ8mð&[PWTrF\h7c|k-ZC'۩ˏQ5?Wi4; Xy_1+0i*MQ'cj'"f隸Vښf ܖOnԥH\+XߋM(¶rk[ydReyIh֑k;KlNhRŷ :N_P,%L8 k5ؖe9"wY!o7ff8|0 n%VV@CDߎëܯÀV *pYN(a jnr+(6 ipf4⨫8NVK-;~% ы<Ʌ?k%@ $_r)\atu| RmTkiNriwD?N!cQ_&:^*ٸfV C}~YۑFK>qYL]_W4B#dzNڏ_ܺ+f,<;*N@g|m-V5ye%R̠3NZ3}'}Fs.WUbLm-NGAݗ'"kv"bf(дmگj_ =P]>!+'mfm6_MFt*4 6+-ǷDcbyZ]Dh>G g[hn26t{w)@4Z/icZ.?K!m?%9Tȉ{bH2~&!檎1\l.,Z T^Ȝ}|OK?UfŮxz'B+c)> `sU)o:v^6~\DGt 9 =&=2ou%@)" DiJ\neAY<hXLC{;' 8M XXM.pBT4Z~4,y`S_~heBoI-%7m`[Ԑ!3YC-r܍u<0CQKK?\7У *#]XT tx*f 4#Aqyt)#IrP3]F_lqA:3c8jk-XS GiBof6eUmUC xf#EYH(M˕?:rssyKX >*YFѬWMY yj9+;ԟVlvuڐnOd*IjKjӑѨ.nsГ|&'-u2@T`$Kd&"6!-M:A(K뵻b,JG(K%:8~W1Q/r|#svݪl!_ϨIى@-{ GYjiǽ5.Gv xWg vqoxB-'݋v>Gwp(%|J^$h-qMeyl֮9@.9\9 }E`Sۃ^9~JE b$(ˆ#5zZ*_ (s:L;3œLQk a} E!zh=D wȺN]<ýuxoB캸G?}}m*P~ LIzlh0$P-8T5$ai^aJ#q͐5߃Ynn[)wisUgd=E}b>@kfa܌ xk 2ȥ,v顊rɠƹ"|VWUt5|-yI[9 Ʉ*B?S~endstream endobj 243 0 obj << /Filter /FlateDecode /Length 162 >> stream x]O10 TXЪj8(Nߗ,N>~l#GpƲ5 HeQV-ƃ剳B7OfRUiZB 'mQt1 l.kSIT @s$Lg)HSendstream endobj 244 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 280 >> stream xcd`ab`dddw 441H3a!"nn'~%Ș__PYQ`hii`d``ZXX('gT*hdX뗗%i(gd(((%*@s JKR|SRx##sWmf;OUsvϟ_]%g#[Uewy9|ŋ/d-=}3fn9.zͳyxv^endstream endobj 245 0 obj << /Filter /FlateDecode /Length 5336 >> stream x]Ks$9~S5] `A@eN۳LdJ*)Uꮶ۞ F[VT*ˇ=e=?e_g_ӫ ;:?ίO}'g>oSoz;5ʙӳoxo?WJ뺷%w^w a{˸Zѽyl6sV8&E ΤqfL5{| x;H߇3<Nqf8ĮIm> }T{SB익a 1f/JfL^,a ޝy9D ֽI'q & J>BG^9;M9k5q,sAyssBa7CΫxEn:ÉWJ \Dv_CH|d] ܷ;\ƿ+qX`80.ء3r9,sɁxծ p*K1Ha+K ߇7 &p;ٕsBԇlxgKZ)B]af@&AX pd?3R[XXyaq$Z({%6@nٻxvR4oiJ$ ;0?,9@q@Y*&T}`2H,m9H9)4$7GH1.,#&j$iB䱡ݣMPE}KP'WNi@B<: : Lx(lX G6!WiÂؑ@t(<Ưn"8<)d/sRguY1PNCkZ(4~K8a[ͭenEc *J^4W;rsU)5ihQV~% n_'Zs(QZ8j q3QA_5AYN x- 3@P@PQ#"3EC߫K5n Ԙx"d x:+j)ҫ+ j0$B|iN䲪7!BJ8 N"`_jqb{WxT4 9,ERZo ۪wi!&$t /;4]yQTw꽐4@&-?$4%"5Eѯ@JyG i0dsz>}X]zPb^!} cnK}/Qs긖u9gץyYWP㘟5`yQϭ~Myq&y*ߠ]xC(0$&p$V$0Af4a8e-Gu% Nr"^ҼoNv> 4o  4_5햻\7pR5*Y**RE-ڮ}Y(>(6 fXGUͅ9䪱Yt,+RڞhM\6۔c5βʎ7#4gF~ XS|$'D)<60` ˒Ie:N\Ə V IL?E!YK\qʌ !GE &$x7h :yio'IÒ$؛9d&oC/dڮ[п EJaa0ܔGkp*bF n-iմP6P<-?ָ)MD7jcύ3 d?]4i r.$K{Ea),&^M4yH YjsX*LdWWW E+u),["{GzlGO `M3`rp5c&@U-oY]xKؒu)v\q)}1B3 ZFJe*3;r0ᒤ,llן[gSKeBԊwGu9}x:X7UWK m'j iJ&0V6Om~KPrpk%$4L= /A/fUK*Cԃ!`NL.Et2ުE4~U]Kؼw)ez*.N^Y*弛IEõ*շF馃ߜnU!vR+F;.HuMnT껡XVUxHfpQ$E~}*/1yZ#: ]1~MBQAa@DPI#4͵}62 Y-w" yFp9D@K[2,rdftCATG dgd"~@T54oK}iKCi^f_w9.y헥4KsU|VK-RICHN8L͸Z]TmJ׃V[E!t߬SCH_pKfoPMhJI}]iKm> stream x]O10 *XЪj8(Nߗ,N>~l#GpƲ5 HeQV-ƃ剳B7Of겯=NRP> stream x][od7r~y Ox$ؗuF؋CFQ(`VyiV5ciy)<<_Wry%N^JOߞ :!(O._|_#5V8*hIGElRuups<5]_]mk6omOEB AHORdXRc,oF;u%,!8} 2hmNRքfK! ;%ࢄ4p} ^7|chCP.v~]`S Rsڧ>Pщah\W""-}m{$˿/rjFCļMc;IP{@KrqF`5Kʾe _0D*`'|rj.Vj$_7:bM Ɣ!Q#/i 3r} ($]0;S0DQ,rJ ‚59oH{\vOac}F[`u^>RBX[PEuAH;ހhP3_5n|Y q䃲8D\.׫SR 7X8aJ")A{8[>itQo{UKgiX5RA=zlNf j:HQuS Rۢc770]iJ뾴6R?#V!$ Y>JK+ei}K=.Wip; 9oJ_+ҿ$_Dѕ={jF^uKM]iq*M糽ivRSR-j_g8Rn @`D@C}+ w 'K؂b{Aއ njU^svqsm([*(y',W?Ts:$XhWEZ3RtiK2`7/-uLU3*H |V/Df_\qgGD IÞeݻ#Sɑ oju@-M2|bnKLMMPmuפ!q8&?=Ϋ91sFrw> a4 F`Y+̡Sڱ]`o;ITWk'dOQ&دT'[@Gj}QǨM]ZxPnU1B4gBYqVJ֜pUs&t9WY0"ܞi ujIF_eU}wѦ \Y ]@1G%KDe_hG ޜsNp#7E Ni,\#٫((XdJB]dbƞjuey;|Mؚ}p&P IUnpWdk9]i\(ғ-u]˦Rkɋr ӱ½?aM<:o.n򻩾 av:<'550 "X$3p^v<﫜x͛Ӟא@667uLLD5מ+I`wbn@8sT)upz.t ~yBB?@J\=ҫÀOȈa#&pv y0],H"c]^8qĀ\@jg:,m$TAƒ)j],s06롷؂\mynZ-8cVu Jُ{@{yZcfA##g3U( ZҘcAׇ$NH - "XGbCY*גG1c< ~ӪA3TH-V,*{aR͔.JmUU}mO15x]w^{{>vfyYԄ~M[ӢVHnqvw: 5'C:eu]4ͻ كdf6`vV^=hH"8#x qݖS=h&S> 1. zwo3e@%~Ìj\sGjͪڑ3wuit(jOb;0pvAo>POҜMB 4'/H['y7rԳG ’˞@>4+`|+`sKD7 ]s:}KIN̘釂xHٶZ/x@R"7Qy @;;KejޥJJ~X{o)W5^Y2J,qryںW?gW9-gܔ[8DQؿVum2s혦d޶ؘ|Oj6|rHu4:)f"r䕋+?5 AFV_rs|m!d8˔3}I7Za3BKӴǩ"9i.33WoeXԐ@_h/@z%X"(NB&džn_Q\|b[sCY}m2w%v?iAwd51d>ЀGΥ7i)bGoW>aw&,Ȑ`r6d+2ZP1C b3.&aBdboV+dpG慌d8x&A*\`^18@ȖbtTr[&-XAfRg :9 }s3_r'ղɐʇkI4:w45eճ(V V'^xqw4Ȗf2s:BH[ce*gv!J3a%>sR^d1ylh?&iF;@52w\A*ؾR37Aލ0Ђjͫdi.&sO y1LB1S_MsJPi\~ Np .un949F=ubYr^&|Ms/յP &W7 ]2t8,Mvѣ;;־2\K+NG_cqm2cz=<[숤wX}' /~2ǀE k@ ~!67f?c)Ծ9}G̊lB!?)zҷ; ~|t[R}zU1{7`##Q94hdVry+L|&N{GK^|`)O(^xSdD>HJLLԏhXRca&- Yߞ6,IG9^b0DsWSJ Neh|{>7if|T4ޯ;o%1&zYkgZ) ٭CTf_c].Xfu]Ws ;csp{|fLmn2'?[f$O9Tr;DGI;y2b<<^z_V0> stream x]O10 ЅUqP(/ Ct;,:\<Fn,逫 pjɵXX(pSwՂ)%#N`P4#몪J`2ٶ}lSITa )榹I*` xS`_DSzendstream endobj 249 0 obj << /Filter /FlateDecode /Length 5541 >> stream x]Ks$q#i!Be2rCcŮŀhr?ۙYUY=ՃKJGVV族H H߿W8x I<{'  QDytETy= ~0㓿@ _Vs Θz]M% ^*y%ɿch(=x-Cfxn! 7|[o${#<8QV'8R%~DVXǠ>[|04(tFmS6; cx1U EX+YmCNeX$Kߛ L̴CDtJ+h/vcn6j[x|5J Ӥk!yc 3iD7vn68|F"ެ޴[ȋ.ko3>wA+.ji@.e[RR{ó x / yWqib$yMoHV C~ K:7Ϣ+6lEEGkkBHM0| t] lT†'3GqAq7P-'_8翬"6m`.!0o G -&HqbztQkck|\H \i7']%A j#EI7HmUU&V% +8Qc uy˕,BBB %;a0_ Iʦ ~!97_ [G_ -P+]BX>10Z2q](Θ<{a [ \޲`MM[hsJ xP~&7"#y^kS2Gk@gҾXɋj*隽GjP>$3:?DIm,YŽ9RJj5^3|MPR2 ̖px6U5/iD]l\}L;Qlu~Q ':A\62Axb„mT4r+Ζ.uEVˑ̢0**|ݠJ;#HLUw켒_T_+e%*RପJ ):õ g i7:GR fjw ED 4yQ4:^m#Y)}0$hЂ0M2u^n?;uE1 hGw+s\DBPwěZª,qe!q=䭖_x4FoŶa3rk%NAP|EB m\"<뺨 opWe+I4)J"ִ2.8Մ Pn"b~:f] 'f (F-7r<Ӹ\h|c#g (gO} ;Fҭa'6o'Z &v˫%Z-HpȟwuZ#pl|>e*1ՄeL+̿~=젖4F*ټ̼Yj9cZ?D7C\iqLHBvtb@cr|,m| `)nztk2DMbąLg5O@VJy35^ 9M6uմ)Ea(M U<0>t?; tq$2eA,7"U 8ٮn6b3[G%8%S,·ERn&mZ{.{A2xLSl$g[tSRN ojyݡgP̎m{FH,zYIÙ*yYI}QɓJ7\+0IX03 N%^NpR6K)$DI}g lz41fXz`K>:uVRi iU@G{|w eѯea;E㓆CY e`7!-::'aoH?a.g8U{T5Jn\߽!~0S]wXhZ>)Ek2uڎF ">YUAg'R2467`r`5)Ԅ0Iiiǜ3Yi($ETՋ7PW,f};lR"16QB+Pay2F&0_ 6eݦ0#7£ιI~Qu=qn.6!c_ͲnS %sXKAq$'$Tǧx`k0.MŸ7G_d.Cp[l}'\xoxY qrPvrLS>OL R[,h察۷zs,ZPNZ^N1\ʭ8GyG}Őn.+10/]0}xUNIH(Ŀڊ:dsK}=}䓹9 ʦRɴ<'oYaٻ+»Yp_ovRW<^ˋ4&&@Y$4Qzq'Z=a |!Y;HZ^nkكsH6,xX9bME覮ձ-9PάMm^WXr1ì+3?뾫!jP#,mGP-A9I$mG]˥ȣS;>##m1<A"t=:qs=y`".<.uL4<p?Xbwrz}﹊N>bfMi}+d󉃄9tkQ}]PA.7rj:˙=\?8''H>όbl1&x#?kX5Eq̩Ԥ]m[3$,x3,@ -l<$4ܭvCjhJxcv:_xChUaH|̒H-Nfi6MadK6ɁFY^@;jm8!{6d?iCvX,k>jxS])%/5f@u*ve2#SA) f$W!>MDGbiî@axX}|vY} Jv Lv5w \y$L${St6[my:x.%ɲfy_Pd#^ qsWTĚʁX7]DTe'bwY]@a*n?R_ez'sa*2uaq{.C1bG~_V}u_xͻVU+]T;˷ >U]ď|?m]wf=:SLPN2vl@6v1E΍JNzX8<n eGӇx@?ԐiM#+a3 }8*'ZiȈu e 1FU ',8)|7"􂀟5]YjCk|jWNTD3`g2p?)WѫÓܱ_`yͥkۄ@X\{+6d(o0{y.`Ubߚ88mlZi=;GYX׌rCn{=IۻSw -,)28|xh3Of<)8j` S J`Z?0̂$,SYВ,zXZvBy'1GaOO#މIbItָ;BX#ɋg}endstream endobj 250 0 obj << /Filter /FlateDecode /Length 162 >> stream x]O10 *XЪj8(Nߗ,N>~l#GpƲ5 HeQV-ƃ剳B7OfRUiZB 'mQt1 l.kSIT @s$Lg)C Swendstream endobj 251 0 obj << /Filter /FlateDecode /Length 5871 >> stream x]rGr}}ӮEz ] ÎF CR ․ͬkVÒhׁJꬬ̓'j~vz/%Cmд<js[6/zG3nޔ}\i& { |^->.K/[HnF 5a`<)IZ)i'<ͷyV׵76u9H3>;ʱ;A\2A qިanS+"8ǹ*G.YP[`0eNj^ o`ԡg 5^pżSv}M{g (xt絿 SRx.Zޞ?+d/swa8v'P(w'p!qux|F 8:L3>-aTtXQVZ/5,(d2PMoA#l΄Zߙ s&υ04؆aTdĹ" )X*buzv".${/J+sҕN$CVpKd+;ѡ[Q Phތq$|6?||( EyKVUZci]ֶs-zP n\JKV몴ز3ց-A`eҀk?[%egJɨ) x2ꡃ E?Y4B"ys[G03S;~UiQyֶs ;rr~Gr]%|38dEm:1Q{lb,nbd.3]-ie Rt(:9a%%Ry >PO ?Fͥj8`pTF4X3[:3=0RTGe @6-l (߆;1)H]4~iKr΁/zIҤ1vaRwChm7xYrUh:QμEk‰(lxp2 +p)|3Mٍ0U,11 V Jjt})Z2kz P"!1p6 ">ZA$Sm|a&-%MpW&])>x wg d!p骷D AѰxMsF`BĤrԥa(B 8GxbQ9́v.|xl }Tt, [޲3˷d+aH 3V!>OC&Rt:2_/Wl BK(CTƢdCXt8vw]j΃}`; A3nPXU uCj9 J 1$lc,rշ (V&VØB_ebE.Io1sɖկhB,(8 ⿖ !w,kߴT GiU^S?` JdsBb3]adG 0rFhŚ6AgAcACbΪRG؆aRMPWc؛a k Um|J *cYG3?/Qz`| Tr (ƥc/F%sԊHQ(?_P A>A=rekF)Z&Ru ձq,4$%DǪ[rG" gHr+*>w9[PZ,Z 9s /U.1ξ@Sxc{$*a @{Xa)# I]F#xnmt#^=5 2vf8L圚H{S|*UieGXHc5lFNiiO;y)?[@ ~)5IC_uNOM #]0M9R5P"lE13CW܄ -358hf#rR6 Goj/)^w_q}{)v;#W͏[n@bݸwZ7hCE¼TsccmAyR0Hb2?. (PT-z(OHZG{ixda7M65 _ˆlo#WLbHR+n4)׽LS%WS=(3}`z kBmm+uYcGS.ARCF?dR< i&{DrgUE8I 嚻+S)VNS V'/oqET.i&e~io,픢'.kG"il5 U%:Rn[ +.Se}v:{8P/,w&b|sLS,1Z`9|U&~7T 8s8AxWdIB\lX:ʾB|cfn"Bqg?f2]Ld-QZUiuMu1i|)awv[Z#UcVJa[[#jzu/Rz($ɝ8QY$ ~S!=)ĂJP2>jhv:y,'22|dsfchu+^bM|/.I"@8B#נt$?%&Is%ȭE1þy*ukJBσ(qܑ-0Q1W|6/1.W/0IԿ!ﻏ$TIJVεux0uі oNΥ$/s櫏J+Nʑ\R}Kы l~așIn?5 ^I4x& $ɿpd@wQ,9@D X:,VF{'t7 XO&!%?6͠?t,Dڨ}Jyu\ mLL1%c=(A2zo2UmH_s 4q\*'> stream x]O10 b C+qP(/ СY>~ l#18|QcYZ`ٲ(+劋BwO C-$U]QyiZB gmQt1 i̩l.c_Y1ɚB\7cNw>`IOSendstream endobj 253 0 obj << /Filter /FlateDecode /Length 1127 >> stream xuVn[7+-k9]Rhm# U~%r{Lʾϼˠ'3h74zY|Yq6vX]/M38EvC4y~qǥ43'J~XpG {"rx‡H1FRq+1pGOو;bㅺCI2ǥgDfdVKDQ/{E\c)εWLiVoj-7 @b%jS67k PkЏrQ[1cXlu+#μK>X[hh\M΋ S>ȅk5VKbZb"tE㎝A>|qvr%I$fƫڶaQb1JFK{L0\p{^]_ 5M~ݢ lK-_Z<.1)ѯ/ MHbNp$ջhkɣ5!RO8t.NyX7 T ⯜txR:=dz0њcؒoSk)xh&͊MEN׳6M윜f .y8]&_ϚxEncv>\͚`YϳN^Ϛ>{ȭ:Ku܌ Kk`-]q}6L8z)S1c3 |(a]̚Ġg[& 2)[4m3˙0!~"؇tU'o2,~‘¡"e ]k}S7J>,X<^$S wg){X_ 8nH/O$eA{%tJ{7'^))SQud1i2F9 zP?11FQ+ uzLA:u04|嗥חeC;~U H6wy M5,蘒3, [-G^fendstream endobj 254 0 obj << /Type /XRef /Length 231 /Filter /FlateDecode /DecodeParms << /Columns 5 /Predictor 12 >> /W [ 1 3 1 ] /Info 3 0 R /Root 2 0 R /Size 255 /ID [] >> stream xcb&F~0 $8JP?/ B]M!$z < J!"Ai# DH R9"΁Hv[rD_@L "y"oe T >DEll[X!f @2!.h` GR~f! Tf!1B/#8~~dzBp D*!CW ɝ5@A endstream endobj startxref 183921 %%EOF spatstat/inst/doc/replicated.Rnw0000644000176200001440000014151513115225157016454 0ustar liggesusers\documentclass[11pt]{article} % \VignetteIndexEntry{Analysing Replicated Point Patterns in Spatstat} \usepackage{graphicx} \usepackage{Sweave} \usepackage{bm} \usepackage{anysize} \marginsize{2cm}{2cm}{2cm}{2cm} \newcommand{\pkg}[1]{\texttt{#1}} \newcommand{\code}[1]{\texttt{#1}} \newcommand{\R}{{\sf R}} \newcommand{\spst}{\pkg{spatstat}} \newcommand{\Spst}{\pkg{Spatstat}} \newcommand{\bold}[1]{{\textbf {#1}}} \newcommand{\indicate}[1]{\boldmaths{1}\{ {#1} \}} \newcommand{\dee}[1]{\, {\rm d}{#1}} \newcommand{\boldmaths}[1]{{\ensuremath\boldsymbol{#1}}} \newcommand{\xx}{\boldmaths{x}} \begin{document} \bibliographystyle{plain} \thispagestyle{empty} <>= options(SweaveHooks=list(fig=function() par(mar=c(1,1,1,1)))) @ \SweaveOpts{eps=TRUE} \setkeys{Gin}{width=0.6\textwidth} <>= library(spatstat) spatstat.options(image.colfun=function(n) { grey(seq(0,1,length=n)) }) sdate <- read.dcf(file = system.file("DESCRIPTION", package = "spatstat"), fields = "Date") sversion <- read.dcf(file = system.file("DESCRIPTION", package = "spatstat"), fields = "Version") options(useFancyQuotes=FALSE) @ \title{Analysing replicated point patterns in \texttt{spatstat}} \author{Adrian Baddeley} \date{For \spst\ version \texttt{\Sexpr{sversion}}} \maketitle \begin{abstract} This document describes \spst's capabilities for fitting models to replicated point patterns. More generally it applies to data from a designed experiment in which the response from each unit is a spatial point pattern. \end{abstract} \tableofcontents \newpage %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Introduction} `Replicated point patterns' are datasets consisting of several point patterns which can be regarded as independent repetitions of the same experiment. For example, three point patterns taken from micrographs of three pipette samples of the same jug of milk, could be assumed to be replicated observations. More generally we could have several experimental groups, with replicated point pattern data in each group. For example there may be two jugs of milk that were treated differently, and we take three pipette samples from each jug. Even more generally our point patterns could be the result of a designed experiment involving control and treatment groups, covariates such as temperature, and even spatial covariates (such as image data). This document describes some capabilities available in the \spst\ package for analysing such data. \textbf{For further detail, see Chapter 16 of the spatstat book \cite{TheBook}.} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Overview of software} The main components needed are: \begin{itemize} \item the model-fitting function \texttt{mppm}, an extension of the \texttt{spatstat} function \texttt{ppm}, that will fit Gibbs point process models to multiple point pattern datasets; \item support for the class \texttt{"mppm"} of point process models fitted by \texttt{mppm} (e.g. functions to print and plot the fitted model, analysis of deviance for Poisson models) \item some tools for exploratory data analysis; \item basic support for the data from such experiments by storing the data in a \emph{``hyperframe''}. A hyperframe is like a data frame, except that each entry in a column can be a point pattern or a pixel image, as well as a single number or categorical value. \item four example datasets. \end{itemize} \section{Formulating the problem} We view the experiment as involving a series of {\em `units'\/}. Each unit is subjected to a known set of experimental conditions (described by the values of the {\em covariates\/}), and each unit yields a {\em response\/} which is a spatial point pattern. The value of a particular covariate for each unit can be either a single value (numerical, logical or factor), or a pixel image. Three important cases are: \begin{description} \item[independent replicates:] We observe $n$ different point patterns that can be regarded as independent replicates, i.e.\ independent realisations of the same point process. The `responses' are the point patterns; there are no covariates. \item[replication in groups:] there are $K$ different experimental groups (e.g. control, aspirin, nurofen). In group $k$ ($k=1,\ldots,K$) we observe $n_k$ point patterns which can be regarded as independent replicates within this group. We regard this as an experiment with $n = \sum_k n_k$ units. The responses are the point patterns; there is one covariate which is a factor (categorical variable) identifying which group each point pattern belongs to. \item[general case:] there are covariates other than factors that influence the response. The point patterns are assumed to be independent, but no two patterns have the same distribution. \end{description} Examples of these three cases are given in the datasets \texttt{waterstriders}, \texttt{pyramidal} and \texttt{demohyper} respectively, which are installed in \spst. \section{Installed datasets} The following datasets are currently installed in \spst. \begin{itemize} \item \texttt{waterstriders}: Penttinen's \cite{pent84} waterstriders data recording the locations of insect larvae on a pond in 3 independent experiments. \item \texttt{pyramidal}: data from Diggle, Lange and Benes \cite{digglangbene91} on the locations of pyramidal neurons in human brain, 31 human subjects grouped into 3 groups (controls, schizoaffective and schizophrenic). \item \texttt{flu}: data from Chen et al \cite{chenetal08} giving the locations of two different virus proteins on the membranes of cells infected with influenza virus; 41 multitype point patterns divided into two virus types (wild and mutant) and two stain types. \item \texttt{simba}: simulated data from an experiment with two groups and 5 replicate point patterns per group. \item \texttt{demohyper}: simulated data from an experiment with two groups in which each experimental unit has a point pattern response and a pixel image covariate. \end{itemize} \section{Lists of point patterns} First we need a convenient way to store the \emph{responses} from all the units in an experiment. An individual point pattern is stored as an object of class \verb!"ppp"!. The easiest way to store all the responses is to form a list of \verb!"ppp"! objects. \subsection{Waterstriders data} The \texttt{waterstriders} data are an example of this type. The data consist of 3 independent point patterns representing the locations of insect larvae on a pond. See \texttt{help(waterstriders)}. <<>>= waterstriders @ The \texttt{waterstriders} dataset is a list of point patterns. It is a list, each of whose entries is a point pattern (object of class \verb!"ppp"!). Note that the observation windows of the three point patterns are {\tt not\/} identical. \subsection{The class \texttt{listof}} For convenience, the \texttt{waterstriders} dataset also belongs to the class \verb!"listof"!. This is a simple mechanism to allow us to handle the list neatly --- for example, we can provide special methods for printing, plotting and summarising the list. \SweaveOpts{width=6,height=2} \setkeys{Gin}{width=0.9\textwidth} <>= plot(waterstriders, main="") @ Notice that the plot method displays each entry of the list in a separate panel. There's also the summary method: <<>>= summary(waterstriders) @ \subsection{Creating a \texttt{listof} object} For example, here is a simulated dataset containing three independent realisations of the Poisson process with intensity 100. <<>>= X <- listof(rpoispp(100), rpoispp(100), rpoispp(100)) @ Then it can be printed and plotted. <>= plot(X) X @ To convert an existing list to the class \code{listof}, use \code{as.listof}. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Hyperframes} A \emph{hyperframe} is like a data frame, except that its entries can be objects of any kind. A hyperframe is effectively a two-dimensional array in which each column consists of values of one type (as in a data frame) or consists of objects of one class. The entries in a hyperframe can be point patterns, pixel images, windows, or any other objects. To analyse an experiment, we will store {\bf all} the data from the experiment in a single hyperframe. The rows of the hyperframe will correspond to different experimental units, while the columns represent different variables (response variables or covariates). \subsection{Creating hyperframes} The function \texttt{hyperframe} will create a hyperframe. <>= hyperframe(...) @ The arguments \verb!...! are any number of arguments of the form \texttt{tag=value}. Each \texttt{value} will become a column of the array. The \texttt{tag} determines the name of the column. Each \texttt{value} can be either \begin{itemize} \item an atomic vector or factor (i.e. numeric vector, integer vector, character vector, logical vector, complex vector or factor) \item a list of objects which are all of the same class \item one atomic value, which will be replicated to make an atomic vector or factor \item one object, which will be replicated to make a list of identical objects. \end{itemize} All columns (vectors, factors and lists) must be of the same length, if their length is greater than 1. For example, here is a hyperframe containing a column of numbers and a column of \emph{functions}: <<>>= H <- hyperframe(X=1:3, Y=list(sin,cos,tan)) H @ Note that a column of character strings will be converted to a factor, unless you set \texttt{stringsAsFactors=FALSE} in the call to \code{hyperframe}. This is the same behaviour as for the function \code{data.frame}. <<>>= G <- hyperframe(X=1:3, Y=letters[1:3], Z=factor(letters[1:3]), W=list(rpoispp(100),rpoispp(100), rpoispp(100)), U=42, V=rpoispp(100), stringsAsFactors=FALSE) G @ This hyperframe has 3 rows. The columns named \texttt{U} and \texttt{V} are constant (all entries in a column are the same). The column named \texttt{Y} is a character vector. \subsection{Hyperframes of data} To analyse an experiment, we will store {\bf all} the data from the experiment in a single hyperframe. The rows of the hyperframe will correspond to different experimental units, while the columns represent different variables (response variables or covariates). Several examples of hyperframes are provided with the package, including \texttt{demohyper}, \texttt{flu}, \texttt{simba} and \texttt{pyramidal}, described above. The \texttt{simba} dataset contains simulated data from an experiment with a `control' group and a `treatment' group, each group containing 5 experimental units. The responses in the control group are independent Poisson point patterns with intensity 80. The responses in the treatment group are independent realisations of a Strauss process (see \texttt{help(simba)} for details). The \texttt{simba} dataset is a hyperframe with 10 rows and 2 columns: \texttt{Points} (the point patterns) and \texttt{group} (a factor with levels \texttt{control} and \texttt{treatment}). <<>>= simba @ The \texttt{pyramidal} dataset contains data from Diggle, Lange and Benes \cite{digglangbene91} on the locations of pyramidal neurons in human brain. One point pattern was observed in each of 31 human subjects. The subjects were classified into 3 groups (controls, schizoaffective and schizophrenic). The \texttt{pyramidal} dataset is a hyperframe with 31 rows and 2 columns: \code{Neurons} (the point patterns) and \code{group} (a factor with levels \texttt{control}, \texttt{schizoaffective} and \texttt{schizophrenic}). <<>>= pyramidal @ The \texttt{waterstriders} dataset is not a hyperframe; it's just a list of point patterns. It can easily be converted into a hyperframe: <<>>= ws <- hyperframe(Striders=waterstriders) @ \subsection{Columns of a hyperframe} Individual columns of a hyperframe can be extracted using \verb!$!: <<>>= H$X H$Y @ The result of \verb!$! is a vector or factor if the column contains atomic values; otherwise it is a list of objects (with class \texttt{"listof"} to make it easier to print and plot). Individual columns can also be assigned (overwritten or created) using \verb!$<-!: <<>>= H$U <- letters[1:3] H @ This can be used to build up a hyperframe column-by-column: <<>>= G <- hyperframe() G$X <- waterstriders G$Y <- 1:3 G @ \subsection{Subsets of a hyperframe} Other subsets of a hyperframe can be extracted with \verb![!: <<>>= H[,1] H[2,] H[2:3, ] H[1,1] @ The result of \verb![! is a hyperframe, unless you set \verb!drop=TRUE! and the subset consists of only one element or one column: <<>>= H[,1,drop=TRUE] H[1,1,drop=TRUE] H[1,2,drop=TRUE] @ Currently there is no method for \verb![<-! that would allow you to assign values to a subset of a hyperframe. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Plotting} \subsection{Plotting a \code{listof} object} The plot method for \code{listof} objects has formal arguments <>= plot.listof(x, ..., main, arrange = TRUE, nrows = NULL, ncols = NULL) @ where \code{main} is a title for the entire page. If \code{arrange=TRUE} then the entries of the list are displayed in separate panels on the same page (with \code{nrows} rows and \code{ncols} columns of panels), while if \code{arrange=FALSE} then the entries are just plotted as a series of plot frames. The extra arguments \verb!...! control the individual plot panels. These arguments will be passed to the plot method that displays each entry of the list. Suitable arguments depend on the type of entries. <>= plot(waterstriders, pch=16, nrows=1) @ \subsection{Plotting a hyperframe} \subsubsection{Plotting one column} If \code{h} is a hyperframe, then the default action of \code{plot(h)} is to extract the first column of \code{h} and plot each of the entries in a separate panel on one page (actually using the plot method for class \verb!"listof"!). \SweaveOpts{width=7,height=5} \setkeys{Gin}{width=0.9\textwidth} <>= plot(simba) @ This only works if the entries in the first column are objects for which a plot method is defined (for example, point patterns, images, windows). To select a different column, use \verb!$! or \verb![!: \SweaveOpts{width=6,height=2} \setkeys{Gin}{width=0.9\textwidth} <>= H <- hyperframe(X=1:3, Y=list(sin,cos,tan)) plot(H$Y) @ The plot can be controlled using the arguments for \code{plot.listof} (and, in this case, \code{plot.function}, since \verb!H$Y! consists of functions). \subsubsection{Complex plots} More generally, we can display any kind of higher-order plot involving one or more columns of a hyperframe: <>= plot(h, e) @ where \code{h} is a hyperframe and \code{e} is an \R\ language call or expression that must be evaluated in each row to generate each plot panel. \SweaveOpts{width=9,height=5} \setkeys{Gin}{width=0.9\textwidth} <>= plot(demohyper, quote({ plot(Image, main=""); plot(Points, add=TRUE) })) @ Note the use of \code{quote}, which prevents the code inside the braces from being evaluated immediately. To plot the $K$-functions of each of the patterns in the \code{waterstriders} dataset, \SweaveOpts{width=6,height=2} \setkeys{Gin}{width=0.9\textwidth} <>= H <- hyperframe(Bugs=waterstriders) plot(H, quote(plot(Kest(Bugs))), marsize=1) @ %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Data analysis} \subsection{Computing with hyperframes} Often we want to perform some computation on each row of a hyperframe. In a data frame, this can be done using the command \code{with}: <<>>= df <- data.frame(A=1:10, B=10:1) with(df, A-B) @ In this example, the expression \code{A-B} is evaluated in each row of the data frame, and the result is a vector containing the computed values for each row. The function \code{with} is generic, and has a method for data frames, \code{with.data.frame}. The computation above was executed by \code{with.data.frame}. The same syntax is available for hyperframes using the method \code{with.hyperframe}: <>= with(h,e) @ Here \code{h} is a hyperframe, and \code{e} is an {\sf R} language construct involving the names of columns in \code{h}. For each row of \code{h}, the expression \code{e} will be evaluated in such a way that each entry in the row is identified by its column name. <<>>= H <- hyperframe(Bugs=waterstriders) with(H, npoints(Bugs)) with(H, distmap(Bugs)) @ The result of \code{with.hyperframe} is a list of objects (of class \verb!"listof"!), or a vector or factor if appropriate. Notice that (unlike the situation for data frames) the operations in the expression \code{e} do not have to be vectorised. For example, \code{distmap} expects a single point pattern, and is not vectorised to deal with a list of point patterns. Instead, the expression \code{distmap(Bugs)} is evaluated separately in each row of the hyperframe. \subsection{Summary statistics} One application of \code{with.hyperframe} is to calculate summary statistics for each row of a hyperframe. For example, the number of points in a point pattern \code{X} is returned by \code{npoints(X)}. To calculate this for each of the responses in the \code{simba} dataset, <<>>= with(simba, npoints(Points)) @ The summary statistic can be any kind of object. For example, to compute the empirical $K$-functions for each of the patterns in the \code{waterstriders} dataset, <<>>= H <- hyperframe(Bugs=waterstriders) K <- with(H, Kest(Bugs)) @ To plot these $K$-functions you can then just type \SweaveOpts{width=6,height=2} \setkeys{Gin}{width=0.9\textwidth} <>= plot(K) @ The summary statistic for each row could be a numeric vector: <<>>= H <- hyperframe(Bugs=waterstriders) with(H, nndist(Bugs)) @ The result is a list, each entry being a vector of nearest neighbour distances. To find the minimum interpoint distance in each pattern: <<>>= with(H, min(nndist(Bugs))) @ \subsection{Generating new columns} New columns of a hyperframe can be created by computation from the existing columns. For example, I can add a new column to the \code{simba} dataset that contains pixel images of the distance maps for each of the point pattern responses. <>= simba$Dist <- with(simba, distmap(Points)) @ \subsection{Simulation} This can be useful for simulation. For example, to generate Poisson point patterns with different intensities, where the intensities are given by a numeric vector \code{lambda}: \SweaveOpts{width=6,height=6} \setkeys{Gin}{width=0.7\textwidth} <>= lambda <- rexp(6, rate=1/50) H <- hyperframe(lambda=lambda) H$Points <- with(H, rpoispp(lambda)) plot(H, quote(plot(Points, main=paste("lambda=", signif(lambda, 4))))) @ It's even simpler to generate 10 independent Poisson point patterns with the \emph{same} intensity 50, say: <>= H$X <- with(H, rpoispp(50)) @ (the expression \code{rpoispp(50)} is evaluated once in each row, yielding a different point pattern in each row because of the randomness). %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Exploratory data analysis} Before fitting models to the data, it is prudent to explore the data to detect unusual features and to suggest appropriate models. \subsection{Exploring spatial trend and covariate effects} Points may be distributed non-uniformly either because they are intrinsically non-uniform (``spatial trend'') or because their abundance depends on a spatial covariate (``covariate effects''). Non-uniformity of a point pattern can be investigated using the kernel smoothed intensity. This is the convolution of the point pattern with a smooth density called the kernel. Effectively each point in the pattern is replaced by a copy of the kernel, and the sum of all copies of the kernel is the kernel-smoothed intensity function. It is computed by \texttt{density.ppp} separately for each point pattern. <>= plot(simba, quote(plot(density(Points), main="")), nrows=2) @ Covariate effects due to a real-valued spatial covariate (a real-valued pixel image) can be investigated using the command \code{rhohat}. This uses a kernel smoothing technique to fit a model of the form \[ \lambda(u) = \rho(Z(u)) \] where $\lambda(u)$ is the point process intensity at a location $u$, and $Z(u)$ is the value of the spatial covariate at that location. Here $\rho$ is an unknown, smooth function which is to be estimated. The function $\rho$ expresses the effect of the spatial covariate on the point process intensity. If $\rho$ turns out to be constant, then the covariate has no effect on point process intensity (and the constant value of $\rho$ is the constant intensity of the point process). <>= rhos <- with(demohyper, rhohat(Points, Image)) plot(rhos) @ \SweaveOpts{width=6,height=4} \setkeys{Gin}{width=0.9\textwidth} \subsection{Exploring interpoint interaction} Still to be written. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Fitting models of spatial trend} The command \code{mppm} fits models to multiple point patterns. Its syntax is very similar to that of \code{lm} and \code{glm}: <>= mppm(formula, data, interaction, ...) @ where \code{formula} is a formula describing the systematic trend part of the model, \code{data} is a hyperframe containing all the data (responses and covariates), and \code{interaction} determines the stochastic interpoint interaction part of the model. For example: <>= mppm(Points ~ group, simba, Poisson()) @ Note that the formula has a left hand side, which identifies the response. This should be the name of a column of \code{data}. \subsection{Trend formula} The right side of \code{formula} is an expression for the linear predictor (effectively the {\bf logarithm} of the spatial trend). The variables appearing in the right hand side of \code{formula} should be either \begin{itemize} \item names of columns in \code{data} \item objects in the {\sf R} global environment (such as \code{pi} and \code{log}) \item the reserved names \code{x}, \code{y} (representing Cartesian coordinates), \code{marks} (representing mark values attached to points) or \code{id} (a factor representing the row number in the hyperframe). \end{itemize} \subsubsection{Design covariates} The variables in the trend could be `design covariates'. For example, to fit a model to the \code{simba} dataset in which all patterns are independent replicates of the same uniform Poisson process, with the same constant intensity: <<>>= mppm(Points ~ 1, simba) @ To fit a model in which the two groups of patterns (control and treatment groups) each consist of independent replicates of a uniform Poisson process, but with possibly different intensity in each group: <<>>= mppm(Points ~ group, simba) @ To fit a uniform Poisson process to each pattern, with different intensity for each pattern: <<>>= mppm(Points ~ id, simba) @ \subsubsection{Spatial covariates} The variables in the trend could be `spatial covariates'. For example, the \code{demohyper} dataset has a column \code{Image} containing pixel images. <<>>= mppm(Points ~ Image, data=demohyper) @ This model postulates that each pattern is a Poisson process with intensity of the form \[ \lambda(u) = \exp(\beta_0 + \beta_1 Z(u)) \] at location $u$, where $\beta_0, \beta_1$ are coefficients to be estimated, and $Z(u)$ is the value of the pixel image \code{Image} at location $u$. It may or may not be appropriate to assume that the intensity of the points is an exponential function of the image pixel value $Z$. If instead we wanted the intensity $\lambda(u)$ to be \emph{proportional} to $Z(u)$, the appropriate model is <>= mppm(Points ~ offset(log(Image)), data=demohyper) @ which corresponds to an intensity proportional to \code{Image}, \[ \lambda(u) = \exp(\beta_0 + \log Z(u)) = e^{\beta_0} \; Z(u). \] The \code{offset} indicates that there is no coefficient in front of $\log Z(u)$. Alternatively we could allow a coefficient: <>= mppm(Points ~ log(Image), data=demop) @ which corresponds to a gamma transformation of \code{Image}, \[ \lambda(u) = \exp(\beta_0 + \beta_1 \log Z(u)) = e^{\beta_0} \; Z(u)^{\beta_1}. \] %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Interpoint interaction} The stochastic interpoint interaction in a point process model is specified by the arguments \code{interaction} and (optionally) \code{iformula} in <>= mppm(formula, data, interaction, ..., iformula=NULL) @ \subsection{Same interaction for all patterns} In the simplest case, the argument \texttt{interaction} is one of the familiar objects that describe the point process interaction structure. It is an object of class \texttt{"interact"} created by calling one of the functions \begin{center} \begin{tabular}{rl} \texttt{Poisson()} & the Poisson point process\\ \texttt{Hardcore()} & the hard core process \\ \texttt{Strauss()} & the Strauss process \\ \texttt{StraussHard()} & the Strauss/hard core point process\\ \texttt{Softcore()} & pairwise interaction, soft core potential\\ \texttt{PairPiece()} & pairwise interaction, piecewise constant \\ \texttt{DiggleGatesStibbard() } & Diggle-Gates-Stibbard pair potential \\ \texttt{DiggleGratton() } & Diggle-Gratton pair potential \\ \texttt{Fiksel() } & Fiksel pair potential \\ \texttt{LennardJones() } & Lennard-Jones pair potential \\ \texttt{Pairwise()} & pairwise interaction, user-supplied potential\\ \texttt{AreaInter()} & area-interaction potential\\ \texttt{Geyer()} & Geyer's saturation process\\ \texttt{BadGey()} & multiscale Geyer saturation process\\ \texttt{Saturated()} & Saturated pair model, user-supplied potential\\ \texttt{OrdThresh()} & Ord process, threshold potential\\ \texttt{Ord()} & Ord model, user-supplied potential \\ \texttt{MultiStrauss()} & multitype Strauss process \\ \texttt{MultiStraussHard()} & multitype Strauss/hard core process \\ \texttt{Concom()} & connected component interaction \\ \texttt{Hybrid()} & hybrid of several interactions \\ \end{tabular} \end{center} In this `simple' usage of \texttt{mppm}, the point process model assumes that all point patterns have exactly the same interpoint interaction, (with the same interaction parameters), and only differ in their spatial trend. \subsection{Hyperframe of interactions} More generally the argument \code{interaction} can be a hyperframe containing objects of class \texttt{"interact"}. For example, we might want to fit a Strauss process to each point pattern, but with a different Strauss interaction radius for each pattern. <>= radii <- with(simba, mean(nndist(Points))) @ Then \code{radii} is a vector of numbers which we could use as the values of the interaction radius for each case. First we need to make the interaction objects: <<>>= Rad <- hyperframe(R=radii) Str <- with(Rad, Strauss(R)) @ Then we put them into a hyperframe and fit the model: <<>>= Int <- hyperframe(str=Str) mppm(Points ~ 1, simba, interaction=Int) @ An important constraint is that all of the interaction objects in one column must be \emph{instances of the same process} (e.g. Strauss) albeit possibly having different parameter values. For example, you cannot put Poisson and Strauss processes in the same column. \subsection{Interaction formula} If \code{interaction} is a hyperframe, then the additional argument \code{iformula} may be used to fully specify the interaction. (An \code{iformula} is also required if \code{interaction} has more than one column.) The \code{iformula} should be a formula without a left hand side. Variables on the right hand side are typically the names of columns in \code{interaction}. \subsubsection{Selecting one column} If the right hand side of \code{iformula} is a single name, then this identifies the column in \code{interaction} to be used as the interpoint interaction structure. <<>>= h <- hyperframe(Y=waterstriders) g <- hyperframe(po=Poisson(), str4 = Strauss(4), str7= Strauss(7)) mppm(Y ~ 1, data=h, interaction=g, iformula=~str4) @ \subsubsection{Interaction depending on design} The \code{iformula} can also involve columns of \code{data}, but only those columns that are vectors or factors. This allows us to specify an interaction that depends on the experimental design. [This feature is {\bf experimental}.] For example <<>>= fit <- mppm(Points ~ 1, simba, Strauss(0.07), iformula = ~Interaction*group) @ Since \code{Strauss(0.1)} is not a hyperframe, it is first converted to a hyperframe with a single column named \code{Interaction}. The \code{iformula = ~Interaction*group} specifies (since \code{group} is a factor) that the interpoint interaction shall have a different coefficient in each experimental group. That is, we fit a model which has two different values for the Strauss interaction parameter $\gamma$, one for the control group and one for the treatment group. When you print the result of such a fit, the package tries to do `automatic interpretation' of the fitted model (translating the fitted interaction coefficients into meaningful numbers like $\gamma$). This will be successful in \emph{most} cases: <<>>= fit @ <>= co <- coef(fit) si <- function(x) { signif(x, 4) } @ Thus we see that the estimate of the Strauss parameter $\gamma$ for the control group is \Sexpr{si(exp(co[2]))}, and for the treatment group \Sexpr{si(exp(sum(co[c(2,4)])))} (the correct values in this simulated dataset were $1$ and $0.5$). The fitted model can also be interpreted directly from the fitted canonical coefficients: <<>>= coef(fit) @ The last output shows all the coefficients $\beta_j$ in the linear predictor for the (log) conditional intensity. The interpretation of the model coefficients, for any fitted model in \R, depends on the \emph{contrasts} which were applicable when the model was fitted. This is part of the core {\sf R} system: see \code{help(contrasts)} or \code{options(contrasts)}. If you did not specify otherwise, the default is to use \emph{treatment contrasts}. This means that, for an explanatory variable which is a \texttt{factor} with $N$ levels, the first level of the factor is used as a baseline, and the fitted model coefficients represent the factor levels $2, 3, \ldots, N$ relative to this baseline. In the output above, there is a coefficient for \code{(Intercept)} and one for \code{grouptreatment}. These are coefficients related to the \code{group} factor. According to the ``treatment contrasts'' rule, the \code{(Intercept)} coefficient is the estimated effect for the control group, and the \code{grouptreatment} coefficient is the estimated difference between the treatment and control groups. Thus the fitted first order trend is $\exp(\Sexpr{si(co[1])}) = \Sexpr{si(exp(co[1]))}$ for the control group and $\exp(\Sexpr{si(co[1])} + \Sexpr{si(co[3])}) = \Sexpr{si(exp(sum(co[c(1,3)])))}$ for the treatment group. The correct values in this simulated dataset were $80$ and $100$. The remaining coefficients in the output are \code{Interaction} and \code{Interaction:grouptreatment}. Recall that the Strauss process interaction term is $\gamma^{t(u,\xx)} = \exp(t(u,\xx) \log\gamma)$ at a spatial location $u$, for a point pattern $\xx$. Since we're using treatment contrasts, the coefficient \code{Interaction} is the estimate of $\log\gamma$ for the control group. The coefficient \code{Interaction:grouptreatment} is the estimate of the difference in $\log\gamma$ between the treatment and control groups. Thus the estimated Strauss interaction parameter $\gamma$ is $\exp(\Sexpr{si(co[2])}) = \Sexpr{si(exp(co[2]))}$ for the control group and $\exp(\Sexpr{si(co[2])} + (\Sexpr{si(co[4])})) = \Sexpr{si(exp(co[2]+co[4]))}$ for the treatment group. The correct values were $1$ and $0.5$. \subsubsection{Completely different interactions for different cases} In the previous example, when we fitted a Strauss model to all point patterns in the \code{simba} dataset, the fitted model for the patterns in the control group was close to Poisson ($\gamma \approx 1$). Suppose we now want to fit a model which {\it is} Poisson in the control group, and Strauss in the treatment group. The Poisson and Strauss interactions must be given as separate columns in a hyperframe of interactions: <>= interaction=hyperframe(po=Poisson(), str=Strauss(0.07)) @ What do we write for the \code{iformula}? The following \emph{will not} work: <>= iformula=~ifelse(group=="control", po, str) @ This does not work because the Poisson and Strauss models are `incompatible' inside such expressions. The canonical sufficient statistics for the Poisson and Strauss processes do not have the same dimension. Internally in \code{mppm} we translate the symbols \code{po} and \code{str} into matrices; the dimensions of these matrices are different, so the \code{ifelse} expression cannot be evaluated. Instead we need something like the following: <>= iformula=~I((group=="control")*po) + I((group=="treatment") * str) @ The letter \code{I} here is a standard R function that prevents its argument from being interpreted as a formula (thus the \code{*} is interpreted as multiplication instead of a model interaction). The expression \code{(group=="control")} is logical, and when multiplied by the matrix \code{po}, yields a matrix. So the following does work: <<>>= g <- hyperframe(po=Poisson(), str=Strauss(0.07)) fit2 <- mppm(Points ~ 1, simba, g, iformula=~I((group=="control")*po) + I((group=="treatment") * str)) fit2 @ %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %#%^!ifdef RANDOMEFFECTS %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Random effects} \subsection{Mixed effects models} It is also possible to fit models that include `random effects'. Effectively, some of the coefficients in the model are assumed to be Normally-distributed random variables instead of constants. \subsubsection{Mixed Poisson model} Consider the simplest model of a uniform Poisson process which we fitted to the 3 point patterns of waterstriders. It might be sensible to assume that each pattern is a realisation of a Poisson process, but with {\em random intensity\/}. In each realisation the intensity $\lambda$ is constant across different locations, but it is a different, random value in different realisations. This example is called a `mixed Poisson process' and belongs to the class of `Cox processes' (Poisson processes with random intensity functions). Let's assume further that the log-intensity is a Normal random variable. Then the model is a (very degenerate) special case of a `log-Gaussian Cox process'. To fit such a model we use the standard techniques of mixed effects models \cite{lairware82,davigilt95,pinhbate00}. The mixed Poisson process which we discussed above would be written in standard form \begin{equation} \label{mixPois} \lambda_i(u) = \exp(\mu + Z_i) \end{equation} for the $i$th point pattern, where $\mu$ is a parameter to be estimated (the `fixed effect') and $Z_i \sim N(0, \sigma^2)$ is a zero-mean Normal random variable (the `random effect' for point pattern $i$). In the simplest case we would assume that $Z_1, \ldots, Z_n$ are independent. The variance $\sigma^2$ of the random effects would be estimated. One can also estimate the individual realised values $z_i$ of the random effects for each point pattern, although these are usually not of such great interest. Since the model includes both fixed and random effects, it is called a ``mixed-effects'' model. \subsubsection{Dependence structure} When we formulate a random-effects or mixed-effects model, we must specify the dependence structure of the random effects. In the model above we assumed that the $Z_i$ are independent for all point patterns $i$. If the experiment consists of two groups, we could alternatively assume that $Z_i = Z_j$ whenever $i$ and $j$ belong to the same group. In other words all the patterns in one group have the same value of the random effect. So the random effect is associated with the group rather than with individual patterns. This could be appropriate if, for example, the groups represent different batches of a chemical. Each batch is prepared under slightly different conditions so we believe that there are random variations between batches, but within a batch we believe that the chemical is well-mixed. \subsubsection{Random effects are coefficients} In the mixed Poisson model (\ref{mixPois}), the random effect is an additive constant (with a random value) in the log-intensity. In general, a random effect is a \emph{coefficient} of one of the covariates. For example if $v$ is a real-valued design covariate (e.g. `temperature'), with value $v_i$ for the $i$th point pattern, then we could assume \begin{equation} \label{ranef2} \lambda_i(u) = \exp(\mu + Z_i v_i) \end{equation} where $Z_i \sim N(0, \sigma^2)$ are independent for different $i$. This model has a random effect in the dependence on $v$. We could also have a random effect for a spatial covariate $V$. Suppose $V_i$ is a real-valued image for the $i$th pattern (so that $V_i(u)$ is the value of some covariate at the location $u$ for the $i$th case). Then we could assume \begin{equation} \label{ranef3} \lambda_i(u) = \exp(\mu + Z_i V_i(u)) \end{equation} where $Z_i \sim N(0, \sigma^2)$ are independent for different $i$. This kind of random effect would be appropriate if, for example, the images $V_i$ are not `normalised' or `standardised' relative to each other (e.g.\ they are images taken under different illumination). Then the coefficients $Z_i$ effectively include the rescaling necessary to standardise the images. \subsection{Fitting a mixed-effects model} The call to \texttt{mppm} can also include the argument \texttt{random}. This should be a formula (with no left-hand side) describing the structure of random effects. The formula for random effects must be recognisable to \texttt{lme}. It is typically of the form \begin{verbatim} ~x1 + ... + xn | g \end{verbatim} or \begin{verbatim} ~x1 + ... + xn | g1/.../gm \end{verbatim} where \verb!x1 + ... + xn! specifies the covariates for the random effects and \texttt{g} or \verb!g1/.../gm! determines the grouping (dependence) structure. Here \code{g} or \code{g1, \ldots, gm} should be factors. To fit the mixed Poisson model (\ref{mixPois}) to the waterstriders, we want to have a random intercept coefficient (so \texttt{x} is \texttt{1}) that varies for different point patterns (so \texttt{g} is \texttt{id}). The reserved name \code{id} is a factor referring to the individual point pattern. Thus <<>>= H <- hyperframe(P=waterstriders) mppm(P ~ 1, H, random=~1|id) @ To fit the mixed effects model (\ref{ranef2}) to the coculture data with the \code{AstroIm} covariate, with a random effect associated with each well, <>= mppm(Neurons ~ AstroIm, random=~AstroIm|WellNumber) @ %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %#%^!endif %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Studying the fitted model} Fitted models produced by \code{mppm} can be examined and validated in many ways. \subsection{Fits for each pattern} \subsubsection{Subfits} The command \code{subfits} takes an \code{mppm} object and extracts, for each individual point pattern, the fitted point process model for that pattern \emph{that is implied by the overall fit}. It returns a list of objects of class \code{ppm}. <<>>= H <- hyperframe(W=waterstriders) fit <- mppm(W ~ 1, H) subfits(fit) @ In this example the result is a list of three \code{ppm} objects representing the implied fits for each of the three point patterns in the \code{waterstriders} dataset. Notice that {\bf the fitted coefficients are the same} in all three models. Note that there are some unresolved difficulties with the implementation of \code{subfits}. Two completely different implementations are supplied in the package; they are called \code{subfits.old} %(used in versions 0.1--1 and earlier) and \code{subfits.new}.% (introduced in 0.1--2). The old version would occasionally crash. Unfortunately the newer version \code{subfits.new} is quite memory-hungry and sometimes causes R to hang. We're still working on this problem. So for the time being, \code{subfits} is the same as \code{subfits.old}. You can change this simply by reassigning, e.g. <>= subfits <- subfits.new @ \subsubsection{Fitting separately to each pattern} For comparison, we could fit a point process model separately to each point pattern dataset using \code{ppm}. The easy way to do this is with \code{with.hyperframe}. To fit a \emph{separate} uniform Poisson point process to each of the three waterstriders patterns, <<>>= H <- hyperframe(W=waterstriders) with(H, ppm(W)) @ The result is again a list of three fitted point process models (objects of class \code{ppm}), but now the fitted coefficients are different. \subsection{Residuals} One standard way to check a fitted model is to examine the residuals. \subsubsection{Point process residuals} Some recent papers \cite{baddetal05,baddmollpake08} have defined residuals for a fitted point process model (fitted to a \emph{single} point pattern). These residuals are implemented in \code{spatstat} as \code{residuals.ppm} and apply to an object of class \code{ppm}, that is, a model fitted to a \emph{single} point pattern. The command \code{residuals.mppm} computes the point process residuals for an \code{mppm} object. <<>>= fit <- mppm(P ~ x, hyperframe(P=waterstriders)) res <- residuals(fit) @ The result is a list, with one entry for each of the point pattern datasets. Each list entry contains the point process residuals for the corresponding point pattern dataset. Each entry in the list is a signed measure (object of class \code{"msr"}) as explained in the help for \code{residuals.ppm}). It can be plotted: <>= plot(res) @ You probably want the smoothed residual field: <>= smor <- with(hyperframe(res=res), Smooth(res, sigma=4)) plot(smor) @ \subsubsection{Sums of residuals} It would be useful to have a residual that is a single value for each point pattern (representing how much that point pattern departs from the model fitted to all the point patterns). That can be computed by \emph{integrating} the residual measures using the function \code{integral.msr}: <<>>= fit <- mppm(P ~ x, hyperframe(P=waterstriders)) res <- residuals(fit) totres <- sapply(res, integral.msr) @ In designed experiments we can plot these total residuals against the design covariates: <>= fit <- mppm(Points~Image, data=demohyper) resids <- residuals(fit, type="Pearson") totres <- sapply(resids, integral.msr) areas <- with(demohyper, area.owin(as.owin(Points))) df <- as.data.frame(demohyper[, "Group"]) df$resids <- totres/areas plot(resids~Group, df) @ \subsubsection{Four-panel diagnostic plots} Sometimes a more useful tool is the function \code{diagnose.ppm} which produces a four-panel diagnostic plot based on the point process residuals. However, it is only available for \code{ppm} objects. To obtain a four-panel diagnostic plot for each of the point patterns, do the following: \begin{enumerate} \item fit a model to multiple point patterns using \code{mppm}. \item extract the individual fits using \code{subfits}. \item plot the residuals of the individual fits. \end{enumerate} For example: <>= fit <- mppm(P ~ 1, hyperframe(P=waterstriders)) sub <- hyperframe(Model=subfits(fit)) plot(sub, quote(diagnose.ppm(Model))) @ (One could also do this for models fitted separately to the individual point patterns.) \subsubsection{Residuals of the parameter estimates} We can also compare the parameter estimates obtained by fitting the model simultaneously to all patterns (using \code{mppm}) with those obtained by fitting the model separately to each pattern (using \code{ppm}). <<>>= H <- hyperframe(P = waterstriders) fitall <- mppm(P ~ 1, H) together <- subfits(fitall) separate <- with(H, ppm(P)) Fits <- hyperframe(Together=together, Separate=separate) dr <- with(Fits, unlist(coef(Separate)) - unlist(coef(Together))) dr exp(dr) @ One could also try deletion residuals, etc. \subsection{Goodness-of-fit tests} \subsubsection{Quadrat count test} The $\chi^2$ goodness-of-fit test based on quadrat counts is implemented for objects of class \code{ppm} (in \code{quadrat.test.ppm}) and also for objects of class \code{mppm} (in \code{quadrat.test.mppm}). This is a goodness-of-fit test for a fitted {\bf Poisson} point process model only. The model could be uniform or non-uniform and the intensity might depend on covariates. <<>>= H <- hyperframe(X=waterstriders) # Poisson with constant intensity for all patterns fit1 <- mppm(X~1, H) quadrat.test(fit1, nx=2) # uniform Poisson with different intensity for each pattern fit2 <- mppm(X ~ id, H) quadrat.test(fit2, nx=2) @ See the help for \code{quadrat.test.ppm} and \code{quadrat.test.mppm} for further details. \subsubsection{Kolmogorov-Smirnov test} The Kolmogorov-Smirnov test of goodness-of-fit of a Poisson point process model compares the observed and predicted distributions of the values of a spatial covariate. We want to test the null hypothesis $H_0$ that the observed point pattern ${\mathbf x}$ is a realisation from the Poisson process with intensity function $\lambda(u)$ (for locations $u$ in the window $W$). Let $Z(u)$ be a given, real-valued covariate defined at each spatial location $u$. Under $H_0$, the \emph{observed} values of $Z$ at the data points, $Z(x_i)$ for each $x_i \in {\mathbf x}$, are independent random variables with common probability distribution function \[ F_0(z) = \frac{\int_W \lambda(u) \indicate{Z(u) \le z} \dee u} {\int_W \lambda(u) \dee u}. \] We can therefore apply the Kolmogorov-Smirnov test of goodness-of-fit. This compares the empirical cumulative distribution of the observed values $Z(x_i)$ to the predicted c.d.f. $F_0$. The test is implemented as \code{kstest.ppm}. The syntax is <>= kstest.mppm(model, covariate) @ where \code{model} is a fitted model (of class \texttt{"mppm"}) and \code{covariate} is either \begin{itemize} \item a \code{function(x,y)} making it possible to compute the value of the covariate at any location \code{(x,y)} \item a pixel image containing the covariate values \item a list of functions, one for each row of the hyperframe of original data \item a list of pixel images, one for each row of the hyperframe of original data \item a hyperframe with one column containing either functions or pixel images. \end{itemize} \newpage \addcontentsline{toc}{section}{Bibliography} %\bibliography{% %extra,% %extra2,% %biblio/badd,% %biblio/bioscience,% %biblio/censoring,% %biblio/mcmc,% %biblio/spatstat,% %biblio/stat,% %biblio/stochgeom% %} \begin{thebibliography}{1} \bibitem{baddmollpake08} A. Baddeley, J. M{\o}ller, and A.G. Pakes. \newblock Properties of residuals for spatial point processes. \newblock {\em Annals of the Institute of Statistical Mathematics}, 60:627--649, 2008. \bibitem{TheBook} A. Baddeley, E. Rubak, and R. Turner. \newblock {\em Spatial Point Patterns: Methodology and Applications with R}. \newblock Chapman \& Hall/CRC Press, 2015. \bibitem{statpaper} A. Baddeley, I. Sintorn, L. Bischof, R. Turner, and S. Heggarty. \newblock Analysing designed experiments where the response is a spatial point pattern. \newblock In preparation. \bibitem{baddetal05} A. Baddeley, R. Turner, J. M{\o}ller, and M. Hazelton. \newblock Residual analysis for spatial point processes (with discussion). \newblock {\em Journal of the Royal Statistical Society, series B}, 67(5):617--666, 2005. \bibitem{chenetal08} B.J. Chen, G.P. Leser, D. Jackson, and R.A. Lamb. \newblock The influenza virus {M2} protein cytoplasmic tail interacts with the {M1} protein and influences virus assembly at the site of virus budding. \newblock {\em Journal of Virology}, 82:10059--10070, 2008. %#%^!ifdef RANDOMEFFECTS \bibitem{davigilt95} M. Davidian and D.M. Giltinan. \newblock {\em Nonlinear Mixed Effects Models for Repeated Measurement Data}. \newblock Chapman and Hall, 1995. %#%^!endif \bibitem{digglangbene91} P.J. Diggle, N. Lange, and F. M. Benes. \newblock Analysis of variance for replicated spatial point patterns in clinical neuroanatomy. \newblock {\em Journal of the {A}merican {S}tatistical {A}ssociation}, 86:618--625, 1991. %#%^!ifdef RANDOMEFFECTS \bibitem{lairware82} N.M. Laird and J.H. Ware. \newblock Random-effects models for longitudinal data. \newblock {\em Biometrics}, 38:963--974, 1982. %#%^!endif \bibitem{pent84} A. Penttinen. \newblock {\em Modelling Interaction in Spatial Point Patterns: Parameter Estimation by the Maximum Likelihood Method}. \newblock Number 7 in {Jyv\"askyl\"a} Studies in Computer Science, Economics and Statistics. University of {Jyv\"askyl\"a}, 1984. %#%^!ifdef RANDOMEFFECTS \bibitem{pinhbate00} J.C. Pinheiro and D.M. Bates. \newblock {\em Mixed-Effects Models in {S} and {S-PLUS}}. \newblock Springer, 2000. %#%^!endif \end{thebibliography} %\addcontentsline{toc}{section}{Index} %\printindex \end{document} spatstat/inst/doc/shapefiles.pdf0000644000176200001440000023265413166361223016474 0ustar liggesusers%PDF-1.5 % 1 0 obj << /Type /ObjStm /Length 3472 /Filter /FlateDecode /N 74 /First 597 >> stream x[[s۶~?omčNfGvj>0eEW-@JIfl\Ʀ b-v9 `:fqbBq;,fJKfV1,F(d E(ŚEE ,_nE%MƄx(nqą)ɆBf" R3nЮ&$#"&AgΤA? " "fQ$ދ -ȐF1&32pW Ya+* ,,H~!I %ȁHDZX)*ĤYxRP0T![Hze.1_ͩ4(s @7EA(YҬP6TQ Z\cR@;'ekI]XGy2NĆ,xwϦtɊIro7) ^,dϟ;;U`?'4ɧ|/S?yŐ47?0GI u_+^, IF1(N}㥣}#{ƷtOްël/GM 2pzt;zg|RƋvÚS;l ߬ ?X)<ˉWerc!"Q!XNjN20ϥ\4b(!o:Cqr V,_ܦeRWe?|8ąrq,HNKup |dkidv>WKk >t 5rYmT._@]_HZ*YRg򿛣tzyEk!ƏQ`d\W4̂yp,L2GG̒KJ-wKu4y!ek|Ko%hg~ LL:!!Bi^ʬ*࢘-ĝ:;cV8;tݰTAD`)EFel\U6kazTAk J/ip*=AdB7 Zij5T=Sê1xX5TUrWGeQ^m|8!7dd 2x~ q׵_?7k4O,jZoT~ƛW-PW(_LrL`(.]HU@_]p' ap(~ Fu49Yn s~A;`_oEZ!=VbN :Jz{y$oCݗª-0VqQ(owBF*} *.Q5X6U>,@mmyh7\ Y2Su0.MܳhD!9 cMlsѸE40l d&r &s xfټjBdyweaJ E n̂ q6Oga+yNr_Z8,__Y?\ں7zz>.wU,kpG\3eIWL)$k\11h1w{O>EL5e^5a-e׫v˶m]]4G.j+zlvVfx$&9l"}Ns?_Ɍ̑ gr 8-&vn J$YY5`1#֭Z uxG'1E-Hs[v*٩igu~ێr5_]0Ѻ2d#=) /HTDMuϓ[,S)e]X~dۇoWVhq{>;SuStGv{Ns%*WeH\6Ԗ֗n{MZfy?*pOHc/, d\dG 7q)jo˪GЛ֪11mRцqCR'k*K)u~FҜRb2>qLdJoOofʎVc*߸-9}#8kPnڦԖfOFf|{~sFvnVݯ'*zCJ3kl}^)ps?Z :7v!fępbU*"H.BvQ=9L]y.SfŶj66۟ >sQMu}#xl0ICM6;{_ͣmZq\*n"G"6Q7ܷӿ*X(QJ`W`e{jZ 8ۆSNrV:h@/y.6ʚ>Jg_|:Jfq߀gk%ZrV^tn[Z_W7-0B bbu>'(qp3 \u\s/nm7Va̙5 O]6lIT_dVA ˳jGUy~a`}:_$b;ψwӜendstream endobj 76 0 obj << /Subtype /XML /Type /Metadata /Length 1557 >> stream GPL Ghostscript 9.18 2017-10-08T16:32:18+08:00 2017-10-08T16:32:18+08:00 LaTeX with hyperref package endstream endobj 77 0 obj << /Filter /FlateDecode /Length 4830 >> stream x[Mq#C]$@d nr3ܝarEDVedu]{`nNv~DFxLvgۇӕ^teۇ_ӈyr6wWSN.]~ ^?\3Z㵙|)41MoXke|{}Nɔz ]pTS<^\Jy=FDݿCԍڊy_mFc5SvLM.̫c-z~7|N橯}K>'ݽPG'c]ͲxO9N5:F5D+dut9?/QBi`mj;8̦PEw󑖭@ HؔJ2ƗĒ6pC/ z]be_s .xΐt_lIu۾wjkjIbe|e:ưl*Q8kߴպ\qUvo!@CoIj?ph#]`Ŵ>b0yr}Q+XQE>ݕ5fUvքTft):wk"xT˱2[lziz>àO&vz2n9.MkgYPuM[uJ,vj~'߸ȯ@zy`|Zψ _B|as" R^b8;.C[#6Q t Fג馁>6\lm{6exwK~>o 2s Iiƶ#F~\e=reZ*8/\XMyڒ6sXDLy|n)JD3טE_U] _Yv\Um)Ӄp@W}:uZFDX[ȭzUB^Q1 HXwM=hi[pmPl7H {g04^h]VMEke A P4e ;zhֱsp`գ}4%mgs4v@1n`D&f`:a 9;z2n dqT ,耧wԎLwthfMߛt'j(x Xu\N]f14 B R'-]C(;< 8m4QB' kg͆IP 7a<eby|nQ zQgz԰VI3@Gr~(UfCom_Í;{7"f".]0ܱ"0 &?Stc[=η~O2M"S\gx;&odE%j>QkP:Ҋ xp Ş7e26p-@#ԞI"eⱀ^vYc8orBf hxGvU +'I^yB4i`RbrRx5?n>!;ڋ _8Hc|ؤt!! OT ||^ȭ{2mRm&Y`^󉚹6ЃEϠGIopO t쨸p).'?Ja,*6$*rY()c$?7}Kҷ,^ 0A/q *$xKq-“`SmAk,B] ;4O=ę#7dF2̶hٔҩVߧkn?xw͡q-Av&U ?nE"G(;rlc`c@t-zo:o⮸bjCL:8*MPMУ!"}@DeMSU@XNyl bkj'? hal5B̝mL"[ hVyOARs%_Fȫ 3=0YL!Ty5zMn})9Gs~Bد"{^W_LMo~Yfsw@8`x^_ͩk:_3W8vp/1dYD 0x\%:Y(ijb! Vå/yӛ .wM rRN )| _* 'xbb(ireD>2KYJQ'5U=g-hjܹa|M)yMąw\gv|gDsyctɯcML`BŹ]o>nB͓%~Fq+#]$%HW\i#63ė0!tb\*%%t]onK$B"*gY$dclY79G9.u$a=8-"I3`׭Z"W=,_&Kw!d?/ׅbtZ kSCcw)S̉MB$:3ںJfz8"YM%^_U|\.O7v٩DwBI djAZ&{pO=z>o2!:WNVa:eo FΫjlzձ[Y)$ ˯kJ6NY+rߊў6I_x>-G/Q}02y캀H 'ylLԵ|Yjθn:WOn#Ϟ/ -3@r#l oi>ʆˌQDͩIttYuH>U[x*L?,gu*D|K?Ie2ٳLQQ`}V :n -E~|o%*#`bk odKFHO>HgE" GXx)0,˙+MQe |y3mcz1+evzHiMquϫ8cU32u#d@+i? . QK45 *Փ)J-u:񨐵籏J72S2cZ]@;ň^l@ldíaxSyF-$%dmk}R3>/ DpU.iZ2vsS*%CMO7_>?[g2h0U\aџ l3=QHG:}2w)b{X'ak%P ,Ir*H!l\ Xk؆ܾN!(6'@Jc)i-EРJG{ذи "dX'8P aB 5*Aޞ ~%ɯ)yLXU]ƞ[~8ѧ#m*L"7YIpWsg:"h4Ymթ^Q/cl-o8 _6 :(mEB]DžgGUf  *v%sʻFv~V;so>mc!CWfoW1QkUs+-=WvoNE&U沫#zd-W胎_ ژXϱ9m*_a.G*bw1ȗ ?>ɜl14~g")؜|RCRWRy!`PJZf_Oco>f0vC]+8yz2VӾ/΄Eޅ :JWXh%+9,S V! n[>/>5apmo|ylK+ TJzu7j\d RAK*gǭO6U?OdpHi>݋ۥՀS~ q\? <{D."GUHA-E%ŕA|;$@v~w$lf-O?Y>ҥDԠ|֟ UetAPW ^8endstream endobj 78 0 obj << /Filter /FlateDecode /Length 3402 >> stream xrGL钞}I%98KJ"4g(t=C%z)Ao_ N-~>Ez!̐nyó)_h/VVZ8>x%k7Bڦ]/W ;1S%c˦[4'˕Yà`ù1m}s ,ZˤyqS4cRq thÂK\F6yyYq^ȼ3K i`A.4o6dGp­pRj@dÿQT:h'8pT#a7D7ZwCWGRu`^]HW>1{eu"R« &Y9 @\mf 6sT\@Yɉ0 dmJTFDeWVz6: <Ŕqd[r%PD;K 9NCET 2xM*x&tʷ(>`lZsIh$GJNq;H7@vnBˡj[:B)>Ec0"ad8z ˜ fKt;4@)Pd1! Q /á[4U. cIh'GfpAAS`/htx^X@F]vFA𝓮hJ2 Td0qyATRdMJ`84(> Q wn88EECzlY|8JLv[34(0m(w쪣u n2W eUpVxRI8r {:5oyA<,d􀺂)lTQGAAOx,g1+M0" }QؘTjqeq / Tb$gb:~,m n2xYpU2@CnAl:LS'K4ѩI%Sw5jDN$gbe9~v[Xi[_f1U"m- cjv[EHS"c͌fg۩x4 qD(W9\GU  dH` .m!PC.aVZ2tNaVZI4 &/y!3y.cH)JMLFa5>dx&;Bs yqge-=MI-RJ-j8eƠI[pHz&fN.|~Ts$',V d9Cp&bPmӥt˅5]9*"Q1*HO{Cۃ?\ᩘ.]ydX "'s)[ӣY2J $ %GqBP淗LH՜AH;HqĴeu1f~vS]0ZŜ*$$w"ь+Ht7 gX 1>Z53uP0A>zWS=udB|[{ p /-{YNR1` 3EYKQ)c Yz" uq! #%Hp.~b4|y",Sq$ɼ3y8pP|c5S.ՕgT]2TչDbN=B'I Z}^p[(<EA$W,@iͿq8M03HkG*\t>|GVP J`$Tw`]A4mg7$bS}za=NiЭ4+7B.Wѽ|%fIVzb2&w [Z선Qj%MUpXm\)}k?TVR.m59V6TVrXu!a {bd}ZK>Ch0$e@u$"_#F ۓrqx$i(f!si 0RuGC i`' cŹai<-:1 Co-R] N B)%B& 2(ÔbC꘶kUXIҽ!:VLB~m,x2=2avdc~.N!"Drd.IbOnx4MUUk *NUG>#Ugvܶ!.kmYn;jX}DhICY&,we;` !FlR>{ <rt\?5sSmnaIsU4߸JpӜ MEB<=A:m1Y2FF-[+>9p+dw{!∓vxܫSVr1]c C!z7;oF߼ E =u `<&~Z4F246 3~McD7ҕƸv54Om;Se1cmKvDG&C>co1EX4Jtret}uZ?CcvDPoPvHe, s{Ri[m[\qy`MP)2\ي,ycAj],<1^-Y\>{whs!-vT0^ Fe Xx]S1˱7)dH># 3><ր>"F/f%6H Aà}i:V{r/yFS:^ԸDv!X'ߚyުn oC""Pßqx_"Dʳendstream endobj 79 0 obj << /Filter /FlateDecode /Length 6291 >> stream x][\7r~W/'wXbc q16e -htM#[~~bǠjXoAJүYu(G/Q^=[o|{cƇslqt_Q;j(/apE?lfWApq0!8'8OظƥS Lܖ5_4Ku?)M6~B7J7r:.z#3r1Y' a6t"5vsp \Vg+Bv/JD`&C"G D"KlDDPUui~[ Bɏ6 ^KKXZgO{聂aPgY8 gw>$dYyۃyG]Cx .v@>7u q#V#y'= g$&sFr7E Qaz߹ݕT3CDzվ4JCi~ !(zP}_g ^P6Rp؍Vj0ƜmHaO5CYU%G W",4> MtvN#'OM< ㍔936񑑑KO!2(g"\ g9pojb'Llo W9lg='NJ~h h1L343ωdDܔ8?EKҗdLt˒-(#G?I'2SG>Χ|E܅_SƎ~rm"{cBEau_L 87LC&u]%2R LT߯3o=g>*+UEE t0(iuod̻u\-/˖(W 7k4`AIВ]-!8~V*=}2xণ9  OE?;}YF`&_@E )&qݏ$vigz;;8oAN6%#Y> Sx[n6gR\}{N.^'i \z%+/p# C\Ǹ/)blЈ+yGKl, [{qp4X mjuCi$ yx`z6ӐppU0B7*E9#;9_G %14₱%5PGWNFr$/p#$MnRX  ,z~v`e Їr' wW"LI Xy5kgfw_)w,߫A a1 PCmr'\)Hv fӕK%D*>THVK%֊%k0UOL`a0~0ܷP*!TIn4Yg?ru[“'?Uќ ԑ^V">T1t)5opkb* x3?" ::f>= tM)nC}W:(ec0k ֻ|el+&# Ҏgt*,oޣSXSd<%$VTN} >3`lηQ[Bq n7V"Up@hAqK=l#..ƒ`wpYM˶]2`EaOM]l-‡7I0×NCvy_~e%PbIu=pKG5c%u;=7eOCi x*Ym-ڛtk:穏WrBF~2FG4yKV$7{s^ydp\M~Xq" %jQa!Tx^|ul}}W'jVD!ץy*M߇g]ef1'g ELɾ͇/uR'C\U8*vۗy܅T:}z=%Z zSB,PVWTE\$xBX#w h|A{sFr2̘U`7w*K8 ]V0G 7%6}n.}3BmP:옰Vv)t /7>Q@Lgm}}g…E h!qXgA}b%K:Mɿ^q`۹!cǔI%αLF@fi~fZWK]ql( ` ӘW)u=)H|:, m+'Vi]UnJ'.ˆ1|u6jVm4ħ{UämYVwU5b =m0j۶i L-T+Yq1Ү~UzTy|il-Q$f&O.ĭy 0-c5]IEyw-]Y,縮B1,lu*QQе]E'Ui\jGZ,Up^3fnWienŢS*6АȒهO^-ɯS$j G+Sa dGߓ^D89h;@+ޯOuN}n<3uؼ 2)5*.Y o_Fa䫽,?_0߱Ci NC}r$Y_< $O۹J[iR6,hXt oGTr=\`iZ(6/Fs=: hZ>sQTBu(EYQ;qa&$\Jqh4Zg;HK]Qũl%s`^-T6妑X^fwR)pY}PZErbV j!8q}OǶ%A kT(xADHڥ@4F! UC!7bznB`r#/L=+]"uƈ];_Ǒ}ܷ6}x/< e3Б⳽`g3# YdjP/~Qj; P5Ԥu|sMZ,V8Yn[+E< : ᓶ6R+^þfosbez71+rq}ΟִLܥ-܇I͋4h,Ia1hBo^U<:Z;_N,0w[KBXiA/Zy]@MGCn񊬰$O>XUt'Ű8TXmxA'xJ ]e- Bzi;|Jނvvn~ebc+p6ƫk^E5]< gLQzd6L)Yh7lHoTv>oyȉ$ӋC\f?Hw$<)ǥjX9ifeiJM6}vgDUTKJ4yRaә^"QGdZ[Kxͳ{ q:|7r 6LS^qIz&i9)i}.vs3cH3F+ChSEMՕ<;sP毁 ,E\F^|Bƈ_c[XUeA \WǠ 73nY橌Jb%TsI~ㅄz.y/e @xI 019_'!ۛRf?;Uqf7`rC!]xSԩqXlBoJ1߭K/JSt}wq  AZ(caSM*np@隬/b:q^Vr/(iiIKn0^RS{6_iOQDqĚvbi3K7{huQsnSZ!nИmy*_B)塣ā+y mOYOT4XpҤIOV/UGGW 2Rcyr`XEzZ/#ȁ\.?okYiendstream endobj 80 0 obj << /Filter /FlateDecode /Length 5566 >> stream x\I\7r#:tqzƾL8$h"Pxc$Pbj8ߝ{HɑDFaIt!y!_o/6gbgzz}/C2DS ڻϾ]\.0z[Trq/*wJBc#f~]-71o7\XֹAbBDdj~(ퟩiC8Uz",(_Js~\EU0hTj\ f7i2A;RkvF?B g'ؑU:[K6Pz<#A %AN"?|&\ܳaq`O aOeJ6{[pY^6.߉F =G⮔@xz&DՇy^*QYHK&sr" 3jHAK#D z ]:4"thQJVsjZ:Xk j`tx:`FƠ!`Ihe|+&D(us<)Dw" 9GHʙ ac\~Dŏ8V9+ZUjvDt0,(NZ=穞+k?JݜHUA0)縦ū<ޔ<0vBa9eby ʝ2;BEHN Jqaca #D>[60.Ngl_j}Aw9'zE}hI- YldIuUBbHKq$B :g\Au&`rN|eUh=b=R)lHAwy6CT˲Icꊛ2p,Y~[yw  :q4j;"ߗ1(J'՛G8abgL$͕-Pnp PJs[7]eİm4yUyW8nK[4KM,ogK>I4 N6w֕}->jF$B(~/I\-+v2l΂V(TDexcs]2qkX-9Q>j޳8 xI!4뽔@9PΓ91h'@BJ=z;5P mڢT1}"hi>S#!z?~i`"ⲯIq-Xu}"Ml$3܈%+ '8sэ=Q8 >>pˏ(p@LC [$x2m9|@n i ڃ;H Rl5< J<]V:b%8>GHBIsdV \l^sɌW(]7Pr伾cgxD8c-dd bIG34YRN+9vsU'dF jE$m\}t3sL36t\Xޖ9!3HŌfNp-/:!$]ѭi|qGC7m:"S !]]3!B՟۴ +A9vuKBM3K 0<3E@;HߐK'FkH Q ҋ']tOhs:Ց5A=άk[$8 p@T9 _+tL3.8<4J* ֙ב;?L.3h?TH1MFd+{Ȣզ.!Hr#FXn pR9jLJ9M 91{SJ4N0t 2uN1:kr+Fsc6KHly n]H~Q_NR wwt>oN=Vv ;2e'ޕ&soJ|^翔@}WWU6Sd,;'xk” 05ҀIP4 X-Qp x`Ң9)8̩?cDV,0-}T  A4HGO'aD\TXRL{OV x6>a069:K.~d<t<˨=?H7&y|[oJ]WKKn{E(Ъ2c.q_}FP06~LHAC=$CGRTHlVЖ*0iMZnfb@a2$s{"RQ ą8at7fq a֧YM5H4c.V.Ճsވ2چZ%YXMZLlGfS$\@Q)ٛn]w0 B꫾#yQUep,9b 弢mܚ((ULDM:ЭoO<&D tO}+Y%8yMuq`MWӨ]kx4A2P'|RܜOW@}50J.k,t % RE:.r0_D;/b3S9**qKU `A!6Tz^N_ƅr\_WsP— H( I@$0ɻ)}P$);$*LjHjC{e+qw9X ˃5LME{Dat]*AU> 㹪.ښ3ӛ, TŒb\Ձ0>Rk9SU9:<BL\.!B FS}e}>u )d` *IݪZ*3wö4}Wt&KE❣V_0?FUുEBS,yݸ* C&Rn|vmi,"oK,%d!`L0#Wkd_SEʋ<*:/OJSg5ջtE %9socc3Hhn6Vaul{qSyb \ކWC_!oǙj0? :Z  81T$N/A֥ɢ$6KOi˩'3-"RVOŽHOFӵhB-Y!2FbaLI@FUEy{6:=8̟peȒyf~9 05tJ.7l]|JE+1P̤v'̓n8y<24-RMV5;:xmu`oھぃG?d#<Rr0N5l̥h>AGO#fsZbt(HCNBݜ! &Bbu$p[XL GEOMxi*+; ЏggPtZM׾N]0>]02pk2nT4npYOWiOnjS{ OCҊ)deuS",'dHU!uH Sу*ޖJPV!9|$dCc_f?5Lj`T@K@Pp7hrJ=]5_7V:d{鰰 K:.$aކg5z=`S2ZӝO)ԝq=N`##{XFǍi%Tacg#pA4硅}kwRT|5]xZRW ?h*p=i2lB GKDvoJBPlrǁ: H`0U >Asf/Ispm#lLx;VO9'3 %s|b: ֟J& >ڼL9:i y-π7IwH'(ҧȹg5"Be5=}Qn,[͗AƤwtl.e''$Z15W)&vY!qȺ1w7 !It)1mS7]QMu]0}߮_1,)rSye<+Rg }c)nTJOƮeWң=9ɾr͚sZu^ºnրjZ@]VگK9(=>A-ИA-].~Oi܃ZtU\;A-|ՓZk6Հ2{:DϿU?OY.wS %3'J@uK\LYK61A~ב$v7dp%6W{N_gnWLkByMS{HYBY$z$'DAA}O28rG[C7\1h040#s]%6'wBqƴ̏{` GUl)͍ŋKQ =?Xգկ>Vy[S^N'F]VrZ9ހanZvv3]YLŲWumU`-ֽH R\I%o3Z~m2Z|ϛvL6 33`!W]CiJĻm2ciӁ俞 S S}sn S,?WMi(ͯK}* /CŝqՖcj2D𘆈6ELQRLϪ{V=uƵ<xMz.0QP#9X O:n`ǿ`wy7/ Y>1} Q>(tX7BU᫔fWy:4 SCb&m},>J\DT[߽œh ]NӬBW+`(]%<2kN3p@uvQqY* wҗ0HEj?g^';5&foq;;n{=肬<^ }ev"}L*Q|vo`W0 kW)s)5j05٫+endstream endobj 81 0 obj << /Filter /FlateDecode /Length 4811 >> stream x[ێq}7w;{  X0 A rHF_s"zK Ί̌Ɉ?F/wnՏWN_@2݋7Wە0d_>J޽>.{71_90~L-{J=C%{bIcZX1=q }Y9=l{b?P\cX㏇0Րnc[j4~䱥wйjC! 2Z|祗/>g}j+!THj\kk9I[Lt]>je/(EWjb5v^u+8 []r<$r4F0k"Ti5cvk~%z}a!-N=?wLȇf}bU^GU|Kd=.zW- Cqy҂Z&s%)m]Zm9V/bx\?.<^Mw-u^G_h5bk8LBߝFcVH>xC.Ibt;Ⓧ;vCc8DfvLlf8<7,<F)) b#hRsrFA%Am (!]BYFT/BYf:0݂ JbNvR H7r0;ڠHro﷣rCT{߆13XbK)?=3<^ۇeV<7~3f<]Ρ}*9ĸ~ :u*6C#쮃bǦ6`BKk-ܥa}:\;l0?$CnV{Uj OhKq'F?^qgdbCt<*JX0u qyT0L#.nyxLld52ѕ  =5ǽ4QKO\RY?-3Qod@Wz"h8,ifS\Rv/x!WLx*z^!tR^h&G ]8 ̰,;IŢelTkc:Vqpiyɀ+=(tU );qo[[ 1@bw.$<1o|` zv /ssGV zT6']-Pv_q:b(yMb41n87K\\˔S:Akt;'kYp ;nTJBys ]<0e߼jBs6n}o“ކG b Kd!VhS_*i4E)Eؓ{>2 R-3sq(߃ԇn]%Qb J'Hav*KI hؗDSGsTITikclr,- foJt0!wm 1D eK,c=JFQVzx7RIli#˶ pzP9j'&CЖi-쬢yf.< ZȤNwcC[gK&/B.5OV}7^UK]`Aьgg`$A.cˢO\%< `Ahe#p (QWD7:>!84-( ܋DXEcW" [XvWon>Lق]Kbax/󯰅82Z)YJ^v(CW2bZoMG2wmx$Tܠ1qyD'`zȣżoxBTbp&n",)I Qȶxq8Niq Qc@0șhOLv(I1@Ģ{зDr;8}¦88Qg/A0ձrD I MgcWUs#Uu>ݢ—'rA\{Ll 0a!½FmDez0Ɗfl؍{>7鄱r O@'yXid9Hl2[0w,4wXh(QԩmVQF߫_T[q(KˆXh`-b &JRYBT]A5p8 #Qi&;B iOrCܺ7=ti6)I @KNԢ\KeN.,Q HJ$I$T >|YF:ˇ$/9[9*HYA;r$V+Z R٨h.H2B IvwIJ~Cdi5dɰeFVL(CTS㨏ML7R #ij<#kي`Zi}K&|+5^FERƲdLx3't*! 6Y"1uW]9huR$#gy=e6sAفo_#S ?vIìG\e?e%yQډ)"n0FTQs^lW9 !9- g_ qavb+*9 9EiE Fx@om{WhrɌ19=L =x ?A HsD~Nl/7Yqy\osX󃏽+7NO<=LXQ6y=;Ѻ4/b[ T8tk3ԑgBO_ΟqAJ">Ows{HuYX_ Ru7$+_{W"켯TpL>)}9 +IVmAjx2JtK&6 J.HIA^'ޙ]HZJaZ􍬓ӨZ {l@&V.V:TΈը)oĬ -H)<Vw(ċȞldg^o\x @G^k8!!GY Dbm¤0>fݩr^ R /jxKrY+XF #ɽe~{䢓&!1["$5- ĖГgmRePyKK_l&@m\yl-.]Dh/փ"u0ݯ0&&oS!5$0$}?d-a`y YL#=$w]J;+O eZ50b-cCeچ{y <.=E^ӥ8TOO*f*7yiN|8uټ Mgal&Ajkz&X77W&4~ϴ_qf,|0GoV|?KK/p Eۖoxs/3ݸ38E+cItê t%ulBZtsȭW&o2zxx77xxP_ˑWg Xr4cHl$LDOzlcbLH4XN{+ǥ */`} ֕BB\Qxغr{=,(<Dy@i?^r~o 7&Wc?daU3AK(7"e1U9'4"uz wlgUȅ,?Nn!J6ذb:̤ўlS.~lĔ&Gq|Xfu@}^|ǿ,@dfE^HV.oQ $¬֭yՕW#lt!?ھq$G2F9> stream x\Is$u#9e9J-GC3rx5ܚ3l^ w < 3I -;}h0 -[:wd~NƓWzדۓ:“]:9|ůw>`r֌qG?|v3!bwvGcw،F[3\CS)q8&%ܰA.%7|7@Z1 wy 4e4D=I8Ө(߻3O˶÷u8yOt&}䵚8eJ1am|1w):=<!E~vkBmČ _.?x Tv֯1y3WG{|q᦮lqxzVIQ2]Ix`.ΪiEn@:+N{s`;xLihENPyuSi[!N<A(人reцa+{m` m#o;-p٤yaLAT,=̈K~x3.T״n=_z!(zu8OcF|>LyA#,cL#P/u2TR f.B)|.Ǥ;%"NDꪺȋdFR u (ԍrH=[&.%uۥ߅y/ܛEeIyi`1$8C n1X~-*orUvՓQN%>FlAl ܛ]sQD {f!DV"U22Ds[e(fuQBZn:,~ }X.;p @G:0ߝ !i]9޹V8&i΢~䬁s "`zը@zE+3kKT_-R+"Y<]]H툶HDzฑ81NT|}Ք$,?Qӡ; Pk6okSʒfFIVC3Gn746w?me$>V ci9ubvϬ#?c |ELf bIJfe bC"qlC|_8E]IەE t:0fJ3Fi~$tx*8);o/4JQPdB]m I_K۲0"nTFZþ.zh{iX|m0R&7Fpz_+F:Y/zFLX5FSGH&SN2n@`i=&ICa3Ye8<, 'BlmDD 1^=Rpd{!Ns8՘A0=h 5G(,+7π!q;)SO>-ɦJj[p2tZb97*[8obd @-F_*46,Y]'ϑ-%!ҎPBlNRYR)iF98YO+Hܗ"X|W$遂`Bq+Up?Fh#oVC9Ƞ@%c A'.^*Ӕϩ>Lo3Z|kt^dgcvFr)pp.SBA,5|]^KH('jd ~sTۺd4z+εj'Ĵê( L+)FL8=b[=;)QasD%f~Y)'k)%"M">._CcR"w~R~ 3!fp]]tvkmu1qV`1R<:6K,`r7o(S [j}3.cor[Aܦ(-} +qc[].Bs(5@xS ȉM_NZxªZgoO?id,cjkzQ$k^TJC]5f+򖲌eǶv24QJ Xªͮ, dU*RyˏQW \82uBg-. PǩJʘ}1_|A |$/I[!5V(;Uչ i{B?]5Kd^[WJ * ٟ[c]NhoR1hmfE":/',\xtZ[PMek`Я: &+`RvڛJrT݃I.Xld5q||}nhBތdZK.OIc)0/_Pɪ>n`+#G m:. O1J&7 Y0nA(jz_vnLzgxëvCYD陥XD ӳ(-_py$*hP=q T 8g-B~( gO-Zf>moxy[RhxeP|SzOa,@cIx\(.,OY]usMa8u$\kZJ3k%wK=YY afVx)Y+eo'i 0k}ʀǓJ3хлibBDZ)D(WH~<ʍ7X/{,ͥP3ϞUIUfS.RJUsgj"O 멽t|>b,*4]#&4 ȅ0*KBvLwv}ޕruy|V*=ʤ$)3.}V=c9V\ه(r6Z bfpEH\Gg|`S 'Y+'qT[ftt;vbQU#\&L3rVyd`e-,DmJeMPH,9~\+[c(v֨fai2cs'y=&{uEkHG‚eD-](^~T}]mPSG?-Fb! \`a>[$!zqUn[yy0T[,Ѷ`:6>V=XjtM"<Fcְ;-~l#xT]`ty֣#زzu[زeŇʢ[6(]?۫%bx$,{AѭǿgCh#j"mB{8DWBzZV DA(ƶ@{0nt-^^o-=d.U_Kw$G|P/ ?s@Pu"F.ݨ3 ]}mY>7.\D}rZbkJm*C{6/k)k_ZYlV.1J[vZ L)O [i NloF.HT;r wpĸY9&kPkd.n1n{.iJ.O fk%nS^~-vx[,HդS?2>;4LIͱ.oE-Mb/1K ?STfV&\ p< jя5Jvo7 .|&:aUtS{PΖffŧ:|Op +[p9gǘe>9}G^\72颮xM)kWi -U6[bS͕ŧH[icl(žC{˸>F7eO~P+ Ok3nz>+RZQ_ZcI=TE%&;݂9FQ/q4^8ƂjFH_uÄu51X\9$؝]Gٳb4qH!c9H{%hssJmAt "GFk~'r]=XFIR ǻ5D_XgKpYҿQBG%Ӓ˼C%tݍ|1MNg57@0\Zx' U[="ڕ/|AXʱ-b}BӟD{?|S/V> stream x\Ys~gG4NpSR7S]KFS0@c{E2U҃C F__w:f=?f/~w {|:bG?NaTӫ*?֦7^[i{iwݰ^1yw1K){e<LnOEoa(^;߽֊.ݢLqrXu@JfLa^ywKWa.t7*wYC{#`:az{*a0h+KavRÆg0=-=P˘r8lY xH# K <6!SBԻ{ɇ ,g?/ J; k{/PBOiJ陋v\ oYwJt,Ӣ8+pCK*/✧8H0&ބW4T5)LbsG{4H]{XvAF)<MvM&V hSt(+>ǙS(ryNېQ# 3~Y]*Tq9P %\qMcMU_fi!{nyy:/Oߗ/."gE/ r5r];.lٵN*rYB~}!OlӋBS&?Wr2+ "\O+YsYlUo07{!υn]4;olnrtMPB^򦐄 ʾ̳u7D&{y[h:'e%<};PB^|U>5_;ߥE1MQ^߅ "#(iͮmላ)$a0ugǝ֡Z[5{Qȋ9oN)sT7UvW8*2)Q4,W-\ "hsfa۲$f^ X1mF9I:#F׋ߓ!ƐQPVl}(8GVk@.9\V Q_3~,a_]c<ǰ "&'A2 {iM |)ױ<-SpJ˻lvF*U!s`d`M Z|"씭 3K؞nb;[px8`i@>MDDN7kZFb:Ij>nY(+dNԁ$٠qңTgL ō`v[P_e!nqn(z\ a0sz:qɃu lj)thrk8x+9帪^]٪ɺ2"]O\rxjQ@ׁa.#%5@Dp po?F<@1Ku,+LB Aq5͍y kD*1hT& I_v% ř#LmR).ePa`Aϸ$\ װRlע絼 n|F ٤W`U e|@f`I̱>ǧ..`H҄Q ӽcJ,.W$` :!Y1ۊMqa#F12N K#JdfE ^\-q@ӆOdiu0%4Ut/ 4 ]{; {>(a/lڠ4êf\Ysi'n5JV@tKT*, T.Kƫ`!.1k崄57۳'oyyJs&S3PCpM!IᢐmxEY";a ŶE$SxM n0M! ӬgKMQyRq[pR0^te}( 5c~ζQ;Uԡ/H.B[+-P#'rGw[]^mrQ Xs 1zoTo3H:xUa䁨ke:[Ō4 .J`JNw1()jM& ɇ ;<ª^SӐQY.Bl匃y*IEx 5Vggs0ߐSH05۪'`G=.`U|}N%s{߀X$(o` ZB׈)9/@<嬍vr4VbL}Q+&Z _?Nƾi-aB>4i@@?m_5(`-7B-eKnCYi3krӷk;r =b0;T3hHxқU `=+ էѕI-xlHGk Osc=R(K.Ɋ/ 9` UR2QZZf0DK+HGT_vC5{oSlV4yָI AH K$tXglSgе'P$&IZ]%V'mm P5 FhWP5\U'jbޚʛsřfWS $E&AB<i[!i{$OkBz⎝jM$7C:@\DT[S:+ł-vBXCҍuce`{ҳ a>0$йKFw,}*~V%ŵe;=.s?40u^zPVlҥy@ASv{K"c1hCS`*Y2f2ic5sHK8=-pX$iWX$&+(YE·[\swaWNb,O0Sd{'מtuj >GQCGhBϸrHciKnMD}*[C@~rO')'[vy6]d?dF3=q2ԌƥgD};lWO}@E N\1zڑIe\G it?L#1ʑ6ǓZ$k՚{+Y7P `?|`QgUTO3kXVS(q.[d8^ ŴHK"Cu5K\[Nv}GsloKճ~B_<CL$iz:1ܔ1-~5<)D;q "tO+ii3fI`L~@e+4l>kEbo 8_q́kV2ġK;sHW,Iup lJX)(M! i BMPb;u8²>ԙ:yo4Fqz'"tm܆k#6.c2}oIg ek">s̪.t@TV$Ibzt/X.d:HhS+QǍPԻO>ׅ$gB@A\AggJi vc:m7mu@5LJUL~KywNG"4RJoaΏKPBK1U/ r`Hx*TIf]OvDH/~{&?>h=~CY xz_9endstream endobj 84 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1757 >> stream xT{PTW%ltBvb^ɴ΄93goyH"4 IR=%5vy*DX-5̥ACWѺ'y%]W,,ڮrU:Yb,6.nFً11qWq3Z{AG/7uKJ,cЀJijy-{ٿ̢}:]ל:hk> q6aH-컸j/zfp!gD.ͽ|}Y.VO%?&mzV9p¿)FN3.\ӂ!y(k*+ã=)WJgK<^Klx[-ݙw K(qx0h p9َCE Պ|ڊ47NtdʰI.a~p4Μ} Y-| UM-.wS[y{Be(Ұ\w8)/u*3e+p~?IQȃߠU8t`7ueg`?/g6"E7= E2}>#)D%Å=q*Y! SC_6VLΊ-3>c`ԕz 6o'}ΌS ]6t>J R*B0n @L،ZavB 8~~BN)qg4ȵj&8N0&TPJ[&oPhJ/ ؟Éu~H|?{YR`1VO yDDX#$3%+fARDz.||Px*۽^w#C)8$pxODhuU(Gֈ4fg5PeP&|l   endstream endobj 85 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1392 >> stream xLSWߣ>TŽeš'`CTtq Z-A)Lj8 SŠ-mΪSfwhtF[u5ޓ|{0E2yjjT83՜m[)$CmRMAb7MV:GEKòHcJ##KJ:KaJW|7fUMϷX, }^ќd\EMyJsrt@K(J.¿ɌzS>4KmJð)V"c>ðl-m19cSPl:.&b2l] \Z7pr`cmvfpƋ6H4- qXd7 }G(Ҹ8R&>Kέ'$gfP42#/ Pg&]=ח %LM  ^<@є'n޲\2YJaI. =|`g<&1*|Ŋc jel;lncܛt %־[Gv(T"[XpU6@E4h+uB9^9 nzMJĴ&7Qw`ω\'崴W1?UN{Q o^N"BاmJLjWN?} -Pv@,1kE xQ!;0y} nSXn]7<ꠠr8@ a22 )Ukw-V "䇃gkS n?aYW"< 4dҡ?F!> `X|19`oRSWįl? U)"|KnBP`J]m#i:z@r`@]i_K+Ӡբ;N?РJ8^>á/=ogt> R-h5| f$@?aZy.pHjw7@#UܑW`)-X(1#O#?8q-iK5wlG~qXN0]`({)?2^T߄#l: )*s[MmSx8}hد6*vvk+ޖ> |#4[촂B"qY +kD_9k}h5Ob!endstream endobj 86 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 3146 >> stream xV TSg~1Tu`JikZ׺V+XwR"EBBX5% 7@X’PT@*Z:کX:Sgtlt93?:Μ3''9yw{EQH$ 5{dY8JxN 8_1>7f:R?6=IE"yZ^BJJHTM piAg\4EJQ'JSbA"6I(QV6cFff UiAIĠpiT! ZbRA7TLWKUAqR[􌘝qɲ 2 ~Eަ&SPMT$BRQ[rjCRku?@ D9 52ƋEwGm'hI9>g%K%ڇǴ2g6~hh}CǘT.\f#e#R6n r>ȥՐ .;:1 G'tGtu ^ P! Y&ZSA ?)A>r#(ZXM4H ٚ\8ʣ0+ϟyv "D\q#w%IO5]()|%s͐q6+p[^L7phۃs#VE.b~ #XG?kaKy?Af;E-eP,lGX;me$Ac|&pxf!Fާ+} x<,>zE~iO\³]PilY]mEF 63NN1ZR@ yF>E\Gv}U%092Fvr +ey7ƬR(12uGgh: r:w$EWmfڈr]_[[ߕ{xN`z?ciͰd>,fIuG;;⭭ T?jnukG΃Ǒ?Z\y̍;É:ؐ}ͻ# }UG[Kҋ E_7nU$`?\q.&-ϔʶ@$4bm;AA9`tm|䀟N5!߅QCA1LS@c;8 ߩ(P\ rz > 9)Ji40,;^^eɥO?NC _R ώ0 Ki<`4jQ/ѤGZB%4i;[IT9Ieg~=|we8BWϋh[gw_X2$~*qG*+1LnFKymrV4f+I$ >2ƃJ\]KMa2| >;9PC!!C&)qR{vT9> gۮ(@W˕ɰ tOtj+R6V3TĊ%JWqDO~խk3'Z`:Zamp+R3)Y:s8bY}ÿ~$_gOHF~Hx"b$kI6NTelb `5: \P3C: 4\\f 3=|g UxCCj l2/o66lUbY0 ]Z1=;Ә ) A=bEy.xA?pqqh~l }}UgjAg >`J2']C>#^X-i JҮnWĺqǃV`%2p^vVa-Uď%4edZ:6\c'MFzQW/ΘF̤$=SQSfwG{}7s8^Yl#1>аQX4~QcN ,|}^[}pR G.&\S" qT#Rg"%d̠ѯI]}_h(2B!-e"0 @61Gl19t}oX0Vh]leu)ZrpYǒ+i'69x5a) YH= ?Ok>~k7 Ϩsm-WJX £f%0DR)1L\rK @V}7FfN'rp2,>4=c|pKʢ/.Rh- 9u53x}bc=Noẙ)s7>Wcu[sCg"h0?lƼkx3|GZVTОqcGwq^OQӣendstream endobj 87 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 6642 >> stream xyXTW9'FQO5g$Dc%{GiR#003kfEPBT4jh5hl71:{0~ͽyx`8zߵwcHg/Y1~A6ObxjOs[š/Hll“fDmp9yԩS8=nT癁^a[=݃GzG_W{nv97""ݷފ>68Qc#F: y^pPG^Í~ s^+,a3f:7l^"ݣ<=lk2[W_vʐw2rT1}6> ' 2 e2Øe̻r5f:bF2Qf4Yb3 f,˼c1f!3YLd>d&1#Ƒ b3y2} Ǝd #c0< 8bb0=@B?z$$K=zb.'ml 쥔^z~9m>l>nϮ_{De=F 8_Y!#Ǐ^sR:]2A~ qj/<8/vI`fqaM]L8}1ZɊXPRgǏ@Õ,s@c|_r.nK8٥', `e.dl۵Y9gf=ʯ9D {eY>h SD~7Pz,\nu"t^+}쉏F ӝqk7D/_BEU/*\>l`کvZimjx0)C&6?{_>2 \CϠXꃭ{`?STUrXpUlv Z6fqS".>483Ԇ@,ׇhؽEQJUBJNr?N=d8fG= ;lҹ- J/ [YGG_hF,f9fi31N-a&躃~ ϱd8_f\岫UޥkWA54il۲[~Ferك pȡ#}bK̝2ӫlo<tPFޕ({$ihTE.i)NCG2\Ax2CNәVU݉Gp^qdξe9`:IR !+N VvE>Yf:Q_}Ɠ} 5V&{7t4y:r4O`>c-g a?:Xܬn#/_~񻖉k(ܧQ2c.|7 $8/J[06LFF"G[v}?Yn’0$ehav-7]:_S|N|% ہKh9qdc %/C+-zЗw?-pf㫣;~u[ݳiZ7GKʡ T [ YT $&IijճZҷ(Z!ݮ Q}OcR0,f?X|۷A'9(xمr6g|Ny] O > ;CVJzYR"`?dDP.Ab~c#|D&;qljd⾆v}J42 6;e6ǁ3<3a8L<*S|OjY~b5_Nb^^wΙwڌVݖ|>Pig^5.ғ҆ AW,ϳ5sQz dsyčgMIkm\r2p :kjOsvϮ6 yW*Kdh\[`ZWdUCWdj4!r+p̡4̕rq٦T&SQM6a>s4i" 4iiH#u \SU3PpjWj" ;#M=\ Ï`޺yGOZLVҤ6N{0߹{*sVKC5P5ktݞZ2q/I9eڅX.t]uC;I^޸|f8Uz jES9(cmg_};1[YK˘b;uF: ;*] ZJ#aFnqTVBR㾉wTrC-/)A%6"=X(]^QO+4VzUyVxkm6Oex(ywc QAqC"N8:sDW .zEgڢ*Yk5#1-WNaWBgc^H;|qtcjB*A*xއ g%l#qUQE->=qVxmL^"t_ \k9sF xUDTbF[!2Hp`jJiF(8f>${68׈W%j'p|pnPph:U쑑Aa<Զqu|)M/*bG+{7. ЦDS4`mZLxꁁk|N\A2D kL5I3-mt KO4EˁץŸʣ$k+hP-Ɋ.a'_?!HؠFՖw Ex"y$Zx2qXޓ,eL 7 ΪwUjI@yw1h8zqj(B(j+:-DqSb6#UMWc:8$Y퀬Rx ? zNP?y s8hONҹ0+zbnv8 "'Ix Ppj{rOFSkw%$*Y#a=xh A#DQ>ʆǒv/ǣy'Ed>yL$Tg%p.7pn 2܁o9$W%Ó֞odm{$8[# R($E'@QY][XV]f5CN B.fS3r[K08]?W fodyt%8lpJ I<*p5mo\O:~Wb~OGպB<;fp`DD?.LIm!*˚Kئ yƌiVD. (48nK]pw7(;oF/6yB\rv ̑Kcpaߎ\n?FXS> Rt@i2_wM O7'LZ=g(ye6J=3Lץ1^>~v #Q#D-xSFSY|!W_ eiyG ˟e'"AFow+LdY?|hغۇ:~U); xUYWё'K N}v3w6g :n61E]Xmj($S& y!#b8ꔝ)5塹CqsϞ4#a1Ym67nƲt,tCQֳ>x黟\5gmxePPxxPPexmmee-.4SqfM ocI>}*߁(SjN*72WSJ(&BP3@+_aPر2)vQ@}A2b;5 <='y(wNأݣ(ٳ\ҲbҢs5OTjk;x%2էfRy3GZN`%QUBD'&!QW&%Q8tv3˓Bzp [և%C#޹bP9ïz8`g>ڍ >/3+mh'E}zmzص_kO;yսoC%x(oB=سpɳ!5ľf ho-ON֨G2QLЦk2 yUr{t9cH3$Q5Pa/Sѥѐd^;@^S G|#CO-.LQ.~~{1D|X<^9GҤ4i\ԏxPrge^CO2vQ'^@pO dKqsik|z\+tNyfŶ>$44$Ohب%S.2t\3.`y?TDwe%=AA_ݏ?LգKH;j5WF4fAty,Kx| o %@&*[cu:pzOOx<1M P!Yfc }Aĕ|ULOdvJjZ-pؑ^ՙ|K \VjzJ\ʿNt$g+l v)& 8k7¾ؿ%ilH$ʠy'{sQ.p,X4pY}VR;TjДFyLd7CΤ2s|O##%po9`-ep`VTI>)VkIVAJszg2C Xί*h秫Zf%/#6- J j)j^D?׳4Eٹ^oi? ERSY BwĘVC} hw Ge+Z]PbryGSsۃs!FHJWܸKOakq),g,ѿs|Չ}>Ny?ZE +A!,pVuV8^[ԗ[]Ÿk>ggU_'O~1~w;1C1x;b;WCqCު*d hBǚNww߻S[?m9YE${"Euj`T@Xv͗H~ɾ{N߻7㱃0תO^+ܵ x}~ƨZs C b|fy(Wސj6n1,(?uhvw*Э9kqYtioDӎSq>SfbmT.!en=27҂d&-TV:hD1Z|:u4q5n婏-Gl>Nd3Rk#6D.OؔRlL+'hHޑ7d yEMǮB6eGyzN*[C7Q9ḩ:/6UVƘ}S6M:?{e#O(á2.vN?sBvoE8=0wp˽EwdWJ~G*[T[B'l3i>;dDx*=#e͹.r#/oxJU A wѝ,x:j6oeFsoIUhfl9[Bﮕ\R!!ZO{z%P[DNв1H6WªJO.(.ƠLx^_zR%cNza endstream endobj 88 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 4815 >> stream xX TS־!psBw/mZ֊38 U frA9L2):#8jj}jZ} tn>ND%$Vm0m_'u0Lx7V ڗJG;ж(H$S&/~ o;N3罉ӧN# q\ R 2oqӐ*#9!ˊgL]sO22cXKJQmpnur:D}W"5/}؞uۨ;nY_x[Υ.܅CEw$HXPZĵM'I^ Ym`uCAFak =֛,KSq|7(v LQqu}[w1_wٳ[uW‘J^"6K/"Cѧb٦w͙aC#i/}n9]~(Nj]1љZ7-hf31]timQռܥ{''6mn\(rZ_يC*((Ϊb2V<$;(VU]gy~[7"̚٪2ҶWBNVA᷶E 5%Ȩ,؋ 3Ayi^?0j9ix+kkb %'!JC偒jڿm|?!@xCt~C;;}̢o{# Cximzo[[u m ZX B QW{\ \:^D>H!xi'Cͻ$5RH-؎AgeBVkzIgs4cS$Q t$/;STA:"Q\1k_ &/P:8tXy G{ZU<4Jw`_ HtºN!(%:!="K⌘Y}$3gUkS-k<5xDZ` L0i^{j+3Ps@"9HJ J2%*nФ(*T tU.D%E*IS|[O)qG}L KE&b)~;t#4_,Y1{5׻]zJ &<1nqcBTNrAAʞ~:Xurk%30GїaNt^5$`=@@Ėk1`P8URZd2ܴ1Qe2.]_CgP-<`C R;1da T oo;Z*K/ߟu6Τpd@ 8Y:HӉH t=xX1ȣ `MiAmR7mK'DHu/ [/6\4(gްjXnj;{ر3lq[Oc*C׍Ćz^[T7#7:ybʪ#6oo *"fDRLG'*G!byAZ♙/V󟭪Sqy^UNB()3bM%Dٙ{ğ=V^\ז.t`۸酶WX‡p5oW0\!x-<]J!t X{sP-p'6P'Mz>U2R)U)뫪^3$l\ҫ>W a[Mohls>ok#rM;S񏴣9QB¼j[7~A"Işًۑ=j|w!|9&(ƲH@Z);%}Ho#&+բ2> aXHf :(:1M]의"Pl~d'fi-( X~5AaXXO٢e'q o?*?2J@Ѿ #*g<,*IVmAayĿV ۰=.'|̝]xk=Pu9}JZ\bXn}wʭkAGb)I19i<Ƣ<q;1_b܈P`ttuH~u|K:[M36f#|A4^G7ne0$ܻP/C Qi(! 'ICoϞR yyiu5o b1xgM7cs-j!3:GJB@a!وv'\# ٸ`mSӀډN3`Ͱ8< ϱScp>+ rx~`tpsTr+m d)qq,hMx:1Ԓùq=sJ8M)3A/"LP-*c"vÉӏn=ylj+knx9vǗiR&q%'ښ#W[]yHe^ zo"1|v|Zcn.KՒOԪ+ SeqMM W/Oz*]tuc aƝ^vSsru+1%*jrK.Ưe:ĎtAC;4 Lޮ4Qɕ*#bqD9{}͇//^lEy@~@4<&8q{Kl<1'P#8|߳Əw4>' VQW[Y{Qx/p/|vc3N*#( 8ì+K2i(@M^z'O3]- B$zKnJf5DgeFQ\nendstream endobj 89 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 3014 >> stream xVyTT "vU*RIP\1 bU`f 6Ȣ 0,"6IKi^fv*󽲬|0ywf~Y~2e#XBMwI ȁ7?<>kg2Y|/~sZbTDV9aDٳgMVNQ Pk#cZz MSNx%Rԩ)))SԱIS#^8YT Si+Ա29Y  OcfL\$d]zcX7cb=L>a^gqJ&YŬf&2o2LY3f,gL,3<ƅgN˖Ȏ 9{$iI.\Px+~3T9~I#cMrzɥ=9FD΋L8DA乪%&Ul"('&HRo]z&kA@Ԅ<>=2̚9$np֗?pJtvޭu>7`^^?Z\jECv.yNLy~AG7/GdD2,z̜jsdO0:Ld[I@teG 0^[ ՃA 78&P-twLq1`߁1w?G|M I(g{NU $xe*UݑS08;w~FNaJp [!m:B')W~;+_3| cIzMRnjri4|#f C|&Kpy #Ǔ[ Ħ[6H5/BW݊kME ˦ UCA5x'*h-ZԴFYV-+r+- BQv'V([_By:pn' .9FHy 0H0M0iai/^>sY8fż Cvpƫ8Zޟ?s(JK qf TX`w-ol vBxCtLrݏ} sׯ3߱Ѭr>p>sE/+p]\nX%!$Or394Muz rDF' q!-0Pv Kk9a0dWq)g8cC{C1Ȣ sgbE16f-{'k[nq~YѠ7撕 5zqP}P/$m)a>RUbKI5)9mi™6Y+Xt3ovxY}iM{m5rV5 -ndt2c"rc]kMQVXC5ˋ 9"_vdZG Tm\D7b+dL>uoɇzp*5L&I42!S7ZvFXj⿐ZKv*>Z+ Ww8sY&lh}Ad/#5v1EGX"Wɢ2&l]kN8f@-˷cP3:hڹ7R&hܫ ڱ>d8Q싓pҡ]a2d KBix>#6;ڭ}Tģh Q9s`!;_; -֩h Yq*[d96K]g<ѵ 'Lup+0Ur ,8Gzzp򓏺ebsUö]fC8ݻwXU]N4^]SS"TIZx <߻ !àDE6JE;8jg…Ç—_u?S!##WRv T$s>o-uZ_ʩ;ft<`Cc8؆ޣFdJ4w$@:脊fC'<8C/.ao$b4Q#]yY&}I,Xe0&7"32alT5zxn~Hg;dor, a'mu˵dU(ցAa D+Ji/ΝFdkwnɿ\ajN{ ^LPK.bQQiP^^ԺkYC}} Ad4n $@(+/^6 @;h BQ$KbO 'Mp!5b**wU0;$zI-m0@Ka't [o C=8}e `zq'^Ɔou5kqq꩷Wc8onKLmjiwevk3v79\ӺD<u1!*n6 C!nZ3B , e ul2íeÊ<<Z"endstream endobj 90 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 324 >> stream xcd`ab`dddu 1H3a!#.kc7s7넾G ~"Ș[_PYQ`hii`d``ZXX('gT*hdX뗗%i(gd(((%*"s JKR|SR YtX޽53~OAe=rIa:Y˺v]g*{ZG\Fۖ;Q> stream xcd`ab`dddw 641H3a!O/nn߷}O=J19(3=DA#YS\GR17(391O7$#57QOL-Tа())///K-/JQ(,PJ-N-*KMQp+QKMU8OB9)槤1000103012)ٽgf|;?̿ܥ;~3>,}} |g|Waq6;7<_s/wt\7Xp9|zaendstream endobj 92 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 324 >> stream xcd`ab`dddu 21H3a!.kc7s7넾G ~"Ș[_PYQ`hii`d``ZXX('gT*hdX뗗%i(gd(((%*"s JKR|SR Yt޽M3~_+ۻsO/{Vw\B ۡ݇f<ʛUg+[yNƾ|Փxxe`qendstream endobj 93 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 2650 >> stream xVyPSw1D v$ H(ں**ޭ"B !N ȑA@!xE֮e5Zkc^wf_y30Iڼ-e$u6Љc㱾? BHb^,\ReS$R T&x+2rq`r:E$T6 5R\a~1JQX-}GѤ-}MV;_(OTKE)`8]' *\,6ehjfeX0lBRk2gj$qD=U&aac[mX v`X [b:l=- یm^W^`>X=Kz4)W]lGeN3Uĥs'SMY2E=PWMAg %sO-]n{<:z ?in6SPg5@ F79NqN+ #{!NŻtt^Cʪ,]wMe|B9z=I( DǏQ qN:4TQ#p5r?%Mh!$V^e|VS-Í"+Πc~7G_=?2 o͉^4Aћ?fh&}:T=/IC(([/vgF=|C=u^$:=KbV'.Y)ֶj)͚GBA>sj<>OB6奩 Swr=䋈k߸9d;u@?(Asg H'ZsZl]'8=p5יF8/z5GPa:AF!lR EE%2̒U\j-#;Z6eZA5n IiZ yBL7 yv|+{|*_ Đ4BD_&]29KʧwLDcE7kuu"ЃTH:#$Օؘ<-MR@ʄ3ND?T5:LAJxDʡE믽ec;ud%qvNNN5PROBݶ@,^#(kp6t 4]@/ǹXaE(i ""]11>g;O{| /ڣD}_¨2m>(U5G 4$ !~7zVzrJx18 6F~Bт'/ԝ1==AW(y q=LMGeD Eՠc膓ю~( L6EBZFef?PFo):1وy3Y}6Lvܻ!W=M+oLe:( ?m*ˌ y 0@j4~C' B=@,{Wl4U)zĠj( Tlw#ߟˣwd~EV:щ6]DNʻshґWo +{Z(,lBWV`mmw :fOgZDT;q]^U7UsSGɮvLV(.#ZoGV7{|x$3|;9(ŅtTf*dHp %8}hW>o$|zSuK&`\(H3ӅȚ Ŷ~hvϵ1~C:!PO/ȼe1YvkngؐG_ Zd;!6cm1#YF<#F^FMޛy٦5/bόeG2E2Af> stream xu}LwZNdtgn#30P6'h plRk+RZƋOE2(ZD fэBt&11L碋3%s_r|?2 qen%"\X,^X# ,HCŊ;^a6*P?^Cߞ*(ɉ3&:kN޾+g)c~K< *e J; jslIKO}:t1<]FBj5w@g`x=GBGot'y e;lDW7yt\tYd>X!{/5j. _HZK 8Up8( Hg5US$pP~@l;rchqIɞ]/@4t 6F |3I &y1* e']ֺ`uQZ& l6~^si_զ"~JDU(CL梔_޸wfe]Dœ8Cb{ꠡ! [j^h3 q++T;Lx{ܝU  5\XFnU9e8XUdnQ䃓ȸ__G)`vrgX4:66>>VcGiN-摿#j6.I95695<% gYcY? !J,Gh4KO||_3ft+F4I u> )AI t\-EJ3O͡v6i6Kzbendstream endobj 95 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 4444 >> stream xW tSպ>!PH+s*}2IdB :Җi)Ҧi cC:Ђ \0(}pksz; z=MӮ&眽-|Q"wٚ a7Κ0Q'"?[%?1[9 G#ЎX$J.2&<4kyӂfϜ 8YbLTjК̄L!9h4&1.37h̴3fDdHeK>.#N453GBL' Z#R5ҴW32sa1yq;߈OHܐ>EL~n֩"+(jZ@AS $j#& 6SP[et-j9ZAJfQk*y*ZCNͧx/G/5zZDQè%pjN&|\DT&:?hݠ ]>>J%kh48c}F|wϐC?O;l0OͨirFatcfcsǞ ) k /X]΃ (܅w>hȠ avwe?ͥ+%tA$v\Þ H`ֺ~1$P: {St)B.'õCRj;YMmdtPol۽2|Gt'Fc%+=ӛY[V=p64b؉#N`WSKz5"(R(/<_ްቛDI[M |0/{I#q$/eǣhGx$]/y@)ABSt2(LE>KN@P"p^W_ݍ˧/ ˜h'/+fYO`0|3g鵙#|Xc9:IGܡ`ƃQY_\`Tim= Sf0N5Ra,kʙV IQ-ñ/ O|{W5 8^ɛv4^L@yz2UZ~cc(0Je9BY/B#sh:u&gc c 6&%~E;$HKkJcAd4ٺu-n"wכ6z$K5\}Xg EmOM8 n|H<'€^6t:ęR̵Lh> %uB}*Bynﳞ<oO; M(`w-x(H{Qz~OCukHMq-X@1A-_0SIЇq7f2a—6;ح\MiV1<!M`f2zs}B NHGa1,Ns(Uˏs^7 4_ŀ0Ol^א0nl YYu6?>'0$ֽ>o C2t?jxTI勵ÓT4T谝rG>^_ׂq1 -jvcבoָ2[SRW,X3 hSt!"z?^p<Ν)(_c.Op?|~/>p iΈQ-!(`hCp뻭fUik62Gw}fTY6>ݑX̳ϞoO6jxybjȰe΍(z7W YhR추#gO;ٽ+bͺ;e Ns{6&tpKRW8WrcՒ&7gS8X$c[L,ݑ,5`OG?hQs<1]6'6*9V@A QlкTSyۍܡgo'i69RmLd>̅7|j>dU4? J@X"%xs7⩧`7UGܥC}\uwgL4ě4{|o! ?=P7HHC"N2kvk!jop>^MɓVwrT!tCtRH 25Y._KKZrϷ'KB/~GX]AO_` 1V@^.qβ~}m<@%ʅ8p3 Un?+qdU M"7zM8]>N ѺXvG;9U73W j1\M Ɲ&fC^腊&Y` 5tz$ѡTAp;G1 8YXMB)(ore?%e4אDXa۝ّҜmx0͏S)7t#*r$;.FcoQεII$t lMܺdC!zωwN%dE2]K2lW>:w_-G|x,<cvU"! WiAYDmfJWƢ`4B]_`v_gu\39tQ+߂I+>0ZP/K̿VS'm3BhK_FKDbߨEh xM02+SRu OIt}`x:O L I=UF08ot3~ b 3T  i,W4`2wLL"uŋݥ?v -B BqB?[RLmʟ2QRl> stream xW TgDD-n}; &j4l"ݷYEl6E:PN$d&"?7,9[w[b#Hؕ락g9/'c$A{RwM`-kƱ#lL[3R$"ʈ萠`.\0~Ba!pjMp`ZCw $OYD~0sf|| uX̌蠥SLJhc]#5a1pZ _0CxDdt&6.^qWPpn=a f̚=gCf#̸3Ɠbgf%YŸ0Yìe1 Hfcșw.c'bQ &%UKFK;"KQ']ǞR v\g%oh]C>o}u蘡l6Z^[eOeE^ rUŞ6|pEwZz,Jw]3-I?59ʾ8yw/ KZXy<PڈwҚDIR`#1\g$ &Qdq.~Y r@I;/S~bhWl 9O!:DIFc'HCqqS2n^@lך*ޡL)rK,Z6+T(dr=.Q* ,tF xxy6f8wW d?=О pLZsq%+m}WW.n.U1 -x~QPP[Sw~ F* \'3tGFf?yr)!~NԔygBC|zq:.4J"aЉL8f Pv,+#gNj>]RDwք-ಓW4V X'__TyCc"6Írq@&O6e biZLA)Kq]r-vahTEʴ@.t^og'Tx@w*($Uy%Z+rn'Ѻp}4DÎ8͑-֦CJ‘,"*nA[7Kjovŕ}GF@AV=ॲ(퍥^r\1{1 /]6MU#Dg-/ P. Ԡou5n!lxfC?̐ ѱ*ooJU y%o7o8\J3 :lkqRspljV{{Sv(T'CrFWa(tտn&cS5Ɩ3 %Ltn(p׫)CXJKxyܟ@h.k,tvho6dpv,?aF=np8QTc[d)J=ҵzJy3l rIS=m uQGT2'%\ 13$~"Hʐ`C^qbh~Æv88+ K 4c^ΐ]^ ٘o\|P$i#'NS$!- ^Ard8A.x?Lu#JEE}V[US$oUlcq%5ċw5n=L78L)߫YQSu &'y,nmFeuPP),[AaY`~[)wt\y1РK4.$|oOJO<6NXwү^&"8mwhHuuwrT˃+Τ^w?A@ag^JcGkdjl!m^be1@D[mLPP?f /~M_?vkʍR3#NFG} ox1ZDǐz߷Z[6S|"i0TAګ]y8f)Ѳ!N~⨒ugQv\=uS88 '!R9d]P:3[:tuo'~31>vmj|u,~J(ʻyV(>̬@ n;Bqtyz;e\!!<*I%4@&ώ 8,Q eg>Z1Kdkd )\.Aq!| YyBMf ":I>P>q4)+UAAT◺M8U'eǡp+&JۖgϚW2:48]Pɴ?ҝ,/E7x!;?^u\?kSgREmSٹ I|uEC vY$>D|f)舖wFNt$Cik=zvS׊όw/ckϐ%kD6W~ɖ _H pZ;YOƧ)t:}^w/%Nm2ToL}fl~e%4s5;޳݈ \}yz5\?(D ]s7[}q.'"NO:}df"!*!k&x!IA@-㾊cq+n{G"qTj)> d?#S=*G uu1A<]-Rni %ig!(3H]@_8œEJ0XǬ˹M4bm`>?ܿ~6nWN^eN p=:|(eeּcB*:m&(=1¸Ɗkd,Y8aqfWaٽ[4_ht Ub˸ "S鉶pB X>yD5jM.d}P?Ȱ2d>q$[f׺mI0wXA+Z﵈]w~\Yr+)K(䵺${NXBqS>0ʈO+Xloe> stream xV{XS>1ʘ3ZVm:-uXN"ޡ $@@B\H   pPغ].cnk;>ϳs{{X<`̏ߵe3+~”PwC(B:U-Fǐ(id0DX"ΑFDfFE^*矏ؘϗ2ˆx4ϕGe ȟH◟{h 7`HjԪ"4'b/yDBi6n>?bݚQPʗDċx|ðpH,)r33ylP KcNlۃ`/Xl%c۰|l },c2{u(g̩EgYOnΏ?<]P01` \P{ =ǙŰ-8M -׮_*O*3IX2<={aeiASi >[tGA0M~3A"tÂtȆ""_&\ck!P0+ԨIo6j]DXz񼫨*3P;ۯH-ԓo!6z=MP5v1:Cu|=m{zኼRk_j2?Z5{r ]Gv3e Y dtT[p臑5pk@jl5nėq8Hτl,K|AlK7>`ۑS [2Iz>5S*+V{ЕNs-&#X1 No8z:Ξ);PAGy9k=UjIfbĻZ|>_!@I~̀vy + -) *ܥrȤmcgzQ^iΨ#¿d[~♘lD^V \]SZ7qt ^"+x])Z ?ql_d"G`{dR rEl*p<%D#Eʣ~UyY4꘹BJNRW鿟/20k73_|XNSu_~ύ̀-]USijܮuZ5`(S# O-7AZ=p#59T~vg px@#V6^1Ý-D> zs{hBgͅzBS+,s6^_=Gv[2|pD@<1oŤPctC0~ղSDm^˻9G!'Hq:K|{&ϒN0H.^˞7N_5jf#-3 m0T*0HQ|GA(=_ {alIZݑ"k[8{|(Ao`[]ٔpynihͧ7%RYYY[I-mЈ{2~コ_|g>QA TLؾmA8,\ŕAZaWjkl}-oN֮05Zb*J-m:Dv_92v093q;;Y1gÈ)U_Σb #5-:m$eCfMa >5b/@sú LE v'[=\Z|0'+صC|3/qj/y cĂ'h%uM~/Cr ]΢&4;PBj]u>M|fM>җq(#JBH#dԼ-,h2%B5zG+P ȫNKC qoM!z8gd:O0/[≥wǣl$L$ddB)`?nkpG@~%/뿹"#wIˤZa~C)nO_FQHu'7{ Q;m{nuP' $}g{nv9Yq-9w,Bt*+Z]֝sZ- -Sz{2iZ/r臣WT⼖ӟtg1l_U)$nmA!ZO.~GP=YC6h:fcЃH$%r856=~mG{ CUco T8Vt 8 k}v+ѝOhuB$ ~psyv{/u틣o:hߠG =I]uM*]%e5vxN}ryyۛ{Q @Bybxupbp$˒9=`P5RG>G+SDX7cGU^ŵ/XH,Z օ _+C/@endstream endobj 98 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1003 >> stream xURkPU>^?e PѰRqG\ R˶lX ˶@%3(B0f)LxPY2{yA2 b&$h2V %۴vKizLd1J][X%*Ѓ0ЇBf<IfNYa6U m޺5? ?!0lB -{h1[uC+A%NlEѳH !$Cȅ~a!kD)/XC6B8фS/s^O]ql|K?ʚ"%~ g)Tv H@ ٱK$YD 1ywܿ?23DX?**fRլ4R_q>}O sGL.Ĕq>+2̹,7c SS-^꣠yh5ks@58xTcM X,;5y@ QTթvw,\SduCbv#ur d2c^M]sk5j4vԇ=7ϭ YĈ]Pn`vL ,y"H&sgl5 K}+؀_|qao۾O zS, ؾfJt_v֋D%D9NjtdǒcgƯZeV-݃5<٤h*iƫhwP eTYH#A/j\ZX4; >vwBEV(ojeʟ{o ܙa#GAУՠ ruݮVjvIEI_ >endstream endobj 99 0 obj << /Type /XRef /Length 116 /Filter /FlateDecode /DecodeParms << /Columns 5 /Predictor 12 >> /W [ 1 3 1 ] /Info 3 0 R /Root 2 0 R /Size 100 /ID [<95e5dd25f65e9da3b627e04492f010c4>] >> stream xcb&F~0 $8J1?v - o2"" R<D xHvsv D.R>` GD2@$#Ƚf" "9" |& endstream endobj startxref 78887 %%EOF spatstat/inst/doc/getstart.R0000644000176200001440000001154513166361177015640 0ustar liggesusers### R code from vignette source 'getstart.Rnw' ################################################### ### code chunk number 1: getstart.Rnw:5-6 ################################################### options(SweaveHooks=list(fig=function() par(mar=c(1,1,1,1)))) ################################################### ### code chunk number 2: getstart.Rnw:25-32 ################################################### library(spatstat) spatstat.options(image.colfun=function(n) { grey(seq(0,1,length=n)) }) sdate <- read.dcf(file = system.file("DESCRIPTION", package = "spatstat"), fields = "Date") sversion <- read.dcf(file = system.file("DESCRIPTION", package = "spatstat"), fields = "Version") options(useFancyQuotes=FALSE) ################################################### ### code chunk number 3: getstart.Rnw:56-58 ################################################### getOption("SweaveHooks")[["fig"]]() data(redwood) plot(redwood, pch=16, main="") ################################################### ### code chunk number 4: getstart.Rnw:79-81 ################################################### getOption("SweaveHooks")[["fig"]]() data(longleaf) plot(longleaf, main="") ################################################### ### code chunk number 5: getstart.Rnw:138-141 ################################################### data(finpines) mypattern <- unmark(finpines) mydata <- round(as.data.frame(finpines), 2) ################################################### ### code chunk number 6: getstart.Rnw:156-157 (eval = FALSE) ################################################### ## mydata <- read.csv("myfile.csv") ################################################### ### code chunk number 7: getstart.Rnw:167-168 ################################################### head(mydata) ################################################### ### code chunk number 8: getstart.Rnw:183-184 (eval = FALSE) ################################################### ## mypattern <- ppp(mydata[,3], mydata[,7], c(100,200), c(10,90)) ################################################### ### code chunk number 9: getstart.Rnw:187-188 (eval = FALSE) ################################################### ## ppp(x.coordinates, y.coordinates, x.range, y.range) ################################################### ### code chunk number 10: getstart.Rnw:197-198 ################################################### getOption("SweaveHooks")[["fig"]]() plot(mypattern) ################################################### ### code chunk number 11: getstart.Rnw:205-206 (eval = FALSE) ################################################### ## summary(mypattern) ################################################### ### code chunk number 12: getstart.Rnw:210-211 ################################################### options(SweaveHooks=list(fig=function() par(mar=rep(4,4)+0.1))) ################################################### ### code chunk number 13: getstart.Rnw:213-214 ################################################### getOption("SweaveHooks")[["fig"]]() plot(Kest(mypattern)) ################################################### ### code chunk number 14: getstart.Rnw:220-221 (eval = FALSE) ################################################### ## plot(envelope(mypattern,Kest)) ################################################### ### code chunk number 15: getstart.Rnw:223-224 ################################################### env <- envelope(mypattern,Kest, nsim=39) ################################################### ### code chunk number 16: getstart.Rnw:226-227 ################################################### getOption("SweaveHooks")[["fig"]]() plot(env, main="envelope(mypattern, Kest)") ################################################### ### code chunk number 17: getstart.Rnw:229-230 ################################################### options(SweaveHooks=list(fig=function() par(mar=c(1,1,1,1)))) ################################################### ### code chunk number 18: getstart.Rnw:236-237 ################################################### getOption("SweaveHooks")[["fig"]]() plot(density(mypattern)) ################################################### ### code chunk number 19: getstart.Rnw:247-248 (eval = FALSE) ################################################### ## marks(mypattern) <- mydata[, c(5,9)] ################################################### ### code chunk number 20: getstart.Rnw:250-251 ################################################### mypattern <-finpines ################################################### ### code chunk number 21: getstart.Rnw:254-255 (eval = FALSE) ################################################### ## plot(Smooth(mypattern)) ################################################### ### code chunk number 22: getstart.Rnw:258-259 ################################################### getOption("SweaveHooks")[["fig"]]() plot(Smooth(mypattern, sigma=1.2), main="Smooth(mypattern)") spatstat/inst/doc/BEGINNER.txt0000644000176200001440000000202313115225157015570 0ustar liggesusers -== Welcome to the 'spatstat' package! ==- For a friendly introduction to spatstat, type the command vignette('getstart') which displays the document "Getting Started with Spatstat". For an overview of all capabilities, type help(spatstat) View the documentation for any command/function 'foo' by typing help(foo) Activate the graphical help interface by typing help.start() To handle spatial data in the 'shapefile' format, see the document "Handling shapefiles in the spatstat package", by typing vignette('shapefiles') For a complete course on spatstat, see the book "Spatial Point Patterns: Methodology and Applications with R" by Baddeley, Rubak and Turner, Chapman and Hall/CRC Press, December 2015. For a summary of changes to spatstat since the book was finished, type vignette('updates') Visit the website www.spatstat.org for updates and free chapters. For news about the very latest version of spatstat, type latest.news [[[Press 'Q' to exit, on some computers]]] spatstat/inst/doc/shapefiles.R0000644000176200001440000001306513166361207016117 0ustar liggesusers### R code from vignette source 'shapefiles.Rnw' ################################################### ### code chunk number 1: shapefiles.Rnw:7-8 ################################################### options(SweaveHooks=list(fig=function() par(mar=c(1,1,1,1)))) ################################################### ### code chunk number 2: shapefiles.Rnw:25-31 ################################################### library(spatstat) options(useFancyQuotes=FALSE) sdate <- read.dcf(file = system.file("DESCRIPTION", package = "spatstat"), fields = "Date") sversion <- read.dcf(file = system.file("DESCRIPTION", package = "spatstat"), fields = "Version") ################################################### ### code chunk number 3: shapefiles.Rnw:106-107 (eval = FALSE) ################################################### ## library(maptools) ################################################### ### code chunk number 4: shapefiles.Rnw:111-112 (eval = FALSE) ################################################### ## x <- readShapeSpatial("mydata.shp") ################################################### ### code chunk number 5: shapefiles.Rnw:117-118 (eval = FALSE) ################################################### ## class(x) ################################################### ### code chunk number 6: shapefiles.Rnw:135-139 ################################################### baltim <- columbus <- fylk <- list() class(baltim) <- "SpatialPointsDataFrame" class(columbus) <- "SpatialPolygonsDataFrame" class(fylk) <- "SpatialLinesDataFrame" ################################################### ### code chunk number 7: shapefiles.Rnw:141-145 (eval = FALSE) ################################################### ## setwd(system.file("shapes", package="maptools")) ## baltim <- readShapeSpatial("baltim.shp") ## columbus <- readShapeSpatial("columbus.shp") ## fylk <- readShapeSpatial("fylk-val.shp") ################################################### ### code chunk number 8: shapefiles.Rnw:147-150 ################################################### class(baltim) class(columbus) class(fylk) ################################################### ### code chunk number 9: shapefiles.Rnw:178-179 (eval = FALSE) ################################################### ## X <- X[W] ################################################### ### code chunk number 10: shapefiles.Rnw:196-197 (eval = FALSE) ################################################### ## y <- as(x, "ppp") ################################################### ### code chunk number 11: shapefiles.Rnw:211-213 (eval = FALSE) ################################################### ## balt <- as(baltim, "ppp") ## bdata <- slot(baltim, "data") ################################################### ### code chunk number 12: shapefiles.Rnw:261-262 (eval = FALSE) ################################################### ## out <- lapply(x@lines, function(z) { lapply(z@Lines, as.psp) }) ################################################### ### code chunk number 13: shapefiles.Rnw:271-272 (eval = FALSE) ################################################### ## curvegroup <- lapply(out, function(z) { do.call("superimpose", z)}) ################################################### ### code chunk number 14: shapefiles.Rnw:315-319 (eval = FALSE) ################################################### ## out <- lapply(x@lines, function(z) { lapply(z@Lines, as.psp) }) ## dat <- x@data ## for(i in seq(nrow(dat))) ## out[[i]] <- lapply(out[[i]], "marks<-", value=dat[i, , drop=FALSE]) ################################################### ### code chunk number 15: shapefiles.Rnw:340-342 ################################################### getOption("SweaveHooks")[["fig"]]() data(chorley) plot(as.owin(chorley), lwd=3, main="polygon") ################################################### ### code chunk number 16: shapefiles.Rnw:355-357 ################################################### getOption("SweaveHooks")[["fig"]]() data(demopat) plot(as.owin(demopat), col="blue", main="polygonal region") ################################################### ### code chunk number 17: shapefiles.Rnw:393-396 (eval = FALSE) ################################################### ## regions <- slot(x, "polygons") ## regions <- lapply(regions, function(x) { SpatialPolygons(list(x)) }) ## windows <- lapply(regions, as.owin) ################################################### ### code chunk number 18: shapefiles.Rnw:401-402 (eval = FALSE) ################################################### ## te <- tess(tiles=windows) ################################################### ### code chunk number 19: shapefiles.Rnw:438-439 (eval = FALSE) ################################################### ## y <- as(x, "SpatialPolygons") ################################################### ### code chunk number 20: shapefiles.Rnw:449-453 (eval = FALSE) ################################################### ## cp <- as(columbus, "SpatialPolygons") ## cregions <- slot(cp, "polygons") ## cregions <- lapply(cregions, function(x) { SpatialPolygons(list(x)) }) ## cwindows <- lapply(cregions, as.owin) ################################################### ### code chunk number 21: shapefiles.Rnw:463-465 (eval = FALSE) ################################################### ## ch <- hyperframe(window=cwindows) ## ch <- cbind.hyperframe(ch, columbus@data) ################################################### ### code chunk number 22: shapefiles.Rnw:485-487 (eval = FALSE) ################################################### ## y <- as(x, "im") ## ylist <- lapply(slot(x, "data"), function(z, y) { y[,] <- z; y }, y=y) spatstat/inst/doc/getstart.Rnw0000644000176200001440000003144413115225157016174 0ustar liggesusers\documentclass[11pt]{article} % \VignetteIndexEntry{Getting Started with Spatstat} <>= options(SweaveHooks=list(fig=function() par(mar=c(1,1,1,1)))) @ \usepackage{graphicx} \usepackage{anysize} \marginsize{2cm}{2cm}{2cm}{2cm} \newcommand{\pkg}[1]{\texttt{#1}} \newcommand{\bold}[1]{{\textbf {#1}}} \newcommand{\R}{{\sf R}} \newcommand{\spst}{\pkg{spatstat}} \newcommand{\Spst}{\pkg{Spatstat}} \begin{document} \bibliographystyle{plain} \thispagestyle{empty} \SweaveOpts{eps=TRUE} \setkeys{Gin}{width=0.6\textwidth} <>= library(spatstat) spatstat.options(image.colfun=function(n) { grey(seq(0,1,length=n)) }) sdate <- read.dcf(file = system.file("DESCRIPTION", package = "spatstat"), fields = "Date") sversion <- read.dcf(file = system.file("DESCRIPTION", package = "spatstat"), fields = "Version") options(useFancyQuotes=FALSE) @ \title{Getting started with \texttt{spatstat}} \author{Adrian Baddeley, Rolf Turner and Ege Rubak} \date{For \spst\ version \texttt{\Sexpr{sversion}}} \maketitle Welcome to \spst, a package in the \R\ language for analysing spatial point patterns. This document will help you to get started with \spst. It gives you a quick overview of \spst, and some cookbook recipes for doing basic calculations. \section*{What kind of data does \spst\ handle?} \Spst\ is mainly designed for analysing \emph{spatial point patterns}. For example, suppose you are an ecologist studying plant seedlings. You have pegged out a $10 \times 10$ metre rectangle for your survey. Inside the rectangle you identify all the seedlings of the species you want, and record their $(x,y)$ locations. You can plot the $(x,y)$ locations: <>= data(redwood) plot(redwood, pch=16, main="") @ This is a \emph{spatial point pattern} dataset. Methods for analysing this kind of data are summarised in the highly recommended book by Diggle \cite{digg03} and other references in the bibliography. \nocite{handbook10,bivapebegome08} Alternatively the points could be locations in one dimension (such as road accidents recorded on a road network) or in three dimensions (such as cells observed in 3D microscopy). You might also have recorded additional information about each seedling, such as its height, or the number of fronds. Such information, attached to each point in the point pattern, is called a \emph{mark} variable. For example, here is a stand of pine trees, with each tree marked by its diameter at breast height (dbh). The circle radii represent the dbh values (not to scale). <>= data(longleaf) plot(longleaf, main="") @ You might also have recorded supplementary data, such as the terrain elevation, which might serve as explanatory variables. These data can be in any format. \Spst\ does not usually provide capabilities for analysing such data in their own right, but \spst\ does allow such explanatory data to be taken into account in the analysis of a spatial point pattern. \Spst\ is \underline{\bf not} designed to handle point data where the $(x,y)$ locations are fixed (e.g.\ temperature records from the state capital cities in Australia) or where the different $(x,y)$ points represent the same object at different times (e.g.\ hourly locations of a tiger shark with a GPS tag). These are different statistical problems, for which you need different methodology. \section*{What can \spst\ do?} \Spst\ supports a very wide range of popular techniques for statistical analysis for spatial point patterns, for example \begin{itemize} \item kernel estimation of density/intensity \item quadrat counting and clustering indices \item detection of clustering using Ripley's $K$-function \item spatial logistic regression \item model-fitting \item Monte Carlo tests \end{itemize} as well as some advanced statistical techniques. \Spst\ is one of the largest packages available for \R, containing over 1000 commands. It is the product of 15 years of software development by leading researchers in spatial statistics. \section*{How do I start using \spst?} \begin{enumerate} \item Install \R\ on your computer \begin{quote} Go to \texttt{r-project.org} and follow the installation instructions. \end{quote} \item Install the \spst\ package in your \R\ system \begin{quote} Start \R\ and type \verb!install.packages("spatstat")!. If that doesn't work, go to \texttt{r-project.org} to learn how to install Contributed Packages. \end{quote} \item Start \R\ \item Type \texttt{library(spatstat)} to load the package. \item Type \texttt{help(spatstat)} for information. \end{enumerate} \section*{How do I get my data into \spst?} <>= data(finpines) mypattern <- unmark(finpines) mydata <- round(as.data.frame(finpines), 2) @ Here is a cookbook example. Suppose you've recorded the $(x,y)$ locations of seedlings, in an Excel spreadsheet. You should also have recorded the dimensions of the survey area in which the seedlings were mapped. \begin{enumerate} \item In Excel, save the spreadsheet into a comma-separated values (CSV) file. \item Start \R\ \item Read your data into \R\ using \texttt{read.csv}. \begin{quote} If your CSV file is called \texttt{myfile.csv} then you could type something like <>= mydata <- read.csv("myfile.csv") @ to read the data from the file and save them in an object called \texttt{mydata} (or whatever you want to call it). You may need to set various options to get this to work for your file format: type \texttt{help(read.csv)} for information. \end{quote} \item Check that \texttt{mydata} contains the data you expect. \begin{quote} For example, to see the first few rows of data from the spreadsheet, type <<>>= head(mydata) @ To select a particular column of data, you can type \texttt{mydata[,3]} to extract the third column, or \verb!mydata$x! to extract the column labelled \texttt{x}. \end{quote} \item Type \texttt{library(spatstat)} to load the \spst\ package \item Now convert the data to a point pattern object using the \spst\ command \texttt{ppp}. \begin{quote} Suppose that the \texttt{x} and \texttt{y} coordinates were stored in columns 3 and 7 of the spreadsheet. Suppose that the sampling plot was a rectangle, with the $x$ coordinates ranging from 100 to 200, and the $y$ coordinates ranging from 10 to 90. Then you would type <>= mypattern <- ppp(mydata[,3], mydata[,7], c(100,200), c(10,90)) @ The general form is <>= ppp(x.coordinates, y.coordinates, x.range, y.range) @ Note that this only stores the seedling locations. If you have additional columns of data (such as seedling height, seedling sex, etc) these can be added as \emph{marks}, later. \end{quote} \item Check that the point pattern looks right by plotting it: <>= plot(mypattern) @ \item Now you are ready to do some statistical analysis. Try the following: \begin{itemize} \item Basic summary of data: type <>= summary(mypattern) @ \item Ripley's $K$-function: <>= options(SweaveHooks=list(fig=function() par(mar=rep(4,4)+0.1))) @ <>= plot(Kest(mypattern)) @ For more information, type \texttt{help(Kest)} \item Envelopes of $K$-function: <>= plot(envelope(mypattern,Kest)) @ <>= env <- envelope(mypattern,Kest, nsim=39) @ <>= plot(env, main="envelope(mypattern, Kest)") @ <>= options(SweaveHooks=list(fig=function() par(mar=c(1,1,1,1)))) @ For more information, type \texttt{help(envelope)} \item kernel smoother of point density: <>= plot(density(mypattern)) @ For more information, type \texttt{help(density.ppp)} \end{itemize} \item Next if you have additional columns of data recording (for example) the seedling height and seedling sex, you can add these data as \emph{marks}. Suppose that columns 5 and 9 of the spreadsheet contained such values. Then do something like <>= marks(mypattern) <- mydata[, c(5,9)] @ <>= mypattern <-finpines @ Now you can try things like the kernel smoother of mark values: <>= plot(Smooth(mypattern)) @ \setkeys{Gin}{width=0.8\textwidth} <>= plot(Smooth(mypattern, sigma=1.2), main="Smooth(mypattern)") @ \setkeys{Gin}{width=0.4\textwidth} \item You are airborne! Now look at the book \cite{baddrubaturn15} for more hints. \end{enumerate} \section*{How do I find out which command to use?} Information sources for \spst\ include: \begin{itemize} \item the Quick Reference guide: a list of the most useful commands. \begin{quote} To view the quick reference guide, start \R, then type \texttt{library(spatstat)} and then \texttt{help(spatstat)}. Alternatively you can download a pdf of the Quick Reference guide from the website \texttt{www.spatstat.org} \end{quote} \item online help: \begin{quote} The online help files are useful --- they give detailed information and advice about each command. They are available when you are running \spst. To get help about a particular command \texttt{blah}, type \texttt{help(blah)}. There is a graphical help interface, which you can start by typing \texttt{help.start()}. Alternatively you can download a pdf of the entire manual (1000 pages!) from the website \texttt{www.spatstat.org}. \end{quote} \item vignettes: \begin{quote} \Spst\ comes installed with several `vignettes' (introductory documents with examples) which can be accessed using the graphical help interface. They include a document about \texttt{Handling shapefiles}. \end{quote} \item workshop notes: \begin{quote} The notes from a two-day workshop on \spst\ are available online \cite{badd10wshop}. These are now rather out-of-date, but still useful. \end{quote} \item book: \begin{quote} The forthcoming book \cite{baddrubaturn15} contains a complete course on \texttt{spatstat}. \end{quote} \item website: \begin{quote} Visit the \spst\ package website \texttt{www.spatstat.org} \end{quote} \item forums: \begin{quote} Join the forum \texttt{R-sig-geo} by visiting \texttt{r-project.org}. Then email your questions to the forum. Alternatively you can ask the authors of the \spst\ package (their email addresses are given in the package documentation). \end{quote} \end{itemize} \begin{thebibliography}{10} \bibitem{badd10wshop} A. Baddeley. \newblock Analysing spatial point patterns in {{R}}. \newblock Technical report, CSIRO, 2010. \newblock Version 4. \newblock URL \texttt{https://research.csiro.au/software/r-workshop-notes/} \bibitem{baddrubaturn15} A. Baddeley, E. Rubak, and R. Turner. \newblock {\em Spatial Point Patterns: Methodology and Applications with {{R}}}. \newblock Chapman \& Hall/CRC Press, 2015. \bibitem{bivapebegome08} R. Bivand, E.J. Pebesma, and V. G{\'{o}}mez-Rubio. \newblock {\em Applied spatial data analysis with {R}}. \newblock Springer, 2008. \bibitem{cres93} N.A.C. Cressie. \newblock {\em Statistics for Spatial Data}. \newblock {John Wiley and Sons}, {New York}, second edition, 1993. \bibitem{digg03} P.J. Diggle. \newblock {\em Statistical Analysis of Spatial Point Patterns}. \newblock Hodder Arnold, London, second edition, 2003. \bibitem{fortdale05} M.J. Fortin and M.R.T. Dale. \newblock {\em Spatial analysis: a guide for ecologists}. \newblock Cambridge University Press, Cambridge, UK, 2005. \bibitem{fothroge09handbook} A.S. Fotheringham and P.A. Rogers, editors. \newblock {\em The {SAGE} {H}andbook on {S}patial {A}nalysis}. \newblock SAGE Publications, London, 2009. \bibitem{gaetguyo09} C. Gaetan and X. Guyon. \newblock {\em Spatial statistics and modeling}. \newblock Springer, 2009. \newblock Translated by Kevin Bleakley. \bibitem{handbook10} A.E. Gelfand, P.J. Diggle, M. Fuentes, and P. Guttorp, editors. \newblock {\em Handbook of Spatial Statistics}. \newblock CRC Press, 2010. \bibitem{illietal08} J. Illian, A. Penttinen, H. Stoyan, and D. Stoyan. \newblock {\em Statistical Analysis and Modelling of Spatial Point Patterns}. \newblock John Wiley and Sons, Chichester, 2008. \bibitem{mollwaag04} J. M{\o}ller and R.P. Waagepetersen. \newblock {\em Statistical Inference and Simulation for Spatial Point Processes}. \newblock Chapman and Hall/CRC, Boca Raton, 2004. \bibitem{pfeietal08} D.U. Pfeiffer, T. Robinson, M. Stevenson, K. Stevens, D. Rogers, and A. Clements. \newblock {\em Spatial analysis in epidemiology}. \newblock Oxford University Press, Oxford, UK, 2008. \bibitem{wallgotw04} L.A. Waller and C.A. Gotway. \newblock {\em Applied spatial statistics for public health data}. \newblock Wiley, 2004. \end{thebibliography} \end{document} spatstat/inst/doc/datasets.R0000644000176200001440000004066013166361176015612 0ustar liggesusers### R code from vignette source 'datasets.Rnw' ################################################### ### code chunk number 1: datasets.Rnw:5-6 ################################################### options(SweaveHooks=list(fig=function() par(mar=c(1,1,1,1)))) ################################################### ### code chunk number 2: datasets.Rnw:27-34 ################################################### library(spatstat) sdate <- read.dcf(file = system.file("DESCRIPTION", package = "spatstat"), fields = "Date") sversion <- read.dcf(file = system.file("DESCRIPTION", package = "spatstat"), fields = "Version") spatstat.options(transparent=FALSE) options(useFancyQuotes=FALSE) ################################################### ### code chunk number 3: datasets.Rnw:212-230 ################################################### opa <- par() ## How to set all margins to zero and eliminate all outer spaces zeromargins <- function() { par( mar=rep(0,4), omd=c(0,1,0,1), xaxs="i", yaxs="i" ) invisible(NULL) } ## Set 'mar' setmargins <- function(...) { x <- c(...) x <- rep(x, 4)[1:4] par(mar=x) invisible(NULL) } ################################################### ### code chunk number 4: datasets.Rnw:239-240 (eval = FALSE) ################################################### ## plot(amacrine) ################################################### ### code chunk number 5: datasets.Rnw:242-244 ################################################### getOption("SweaveHooks")[["fig"]]() setmargins(0,1,2,0) plot(amacrine) ################################################### ### code chunk number 6: datasets.Rnw:253-254 (eval = FALSE) ################################################### ## plot(anemones, markscale=1) ################################################### ### code chunk number 7: datasets.Rnw:256-258 ################################################### getOption("SweaveHooks")[["fig"]]() setmargins(0,0,2,0) plot(anemones, markscale=1) ################################################### ### code chunk number 8: datasets.Rnw:271-272 (eval = FALSE) ################################################### ## ants.extra$plotit() ################################################### ### code chunk number 9: datasets.Rnw:274-276 ################################################### getOption("SweaveHooks")[["fig"]]() setmargins(0,0,1,0) ants.extra$plotit() ################################################### ### code chunk number 10: datasets.Rnw:284-285 ################################################### getOption("SweaveHooks")[["fig"]]() plot(austates) ################################################### ### code chunk number 11: datasets.Rnw:295-297 (eval = FALSE) ################################################### ## plot(bdspots, equal.scales=TRUE, pch="+", ## panel.args=function(i)list(cex=c(0.15, 0.2, 0.7)[i])) ################################################### ### code chunk number 12: datasets.Rnw:299-303 ################################################### getOption("SweaveHooks")[["fig"]]() zeromargins() plot(bdspots, equal.scales=TRUE, pch="+", main="", mar.panel=0, hsep=1, panel.args=function(i)list(cex=c(0.15, 0.2, 0.7)[i])) ################################################### ### code chunk number 13: datasets.Rnw:313-315 (eval = FALSE) ################################################### ## plot(bei.extra$elev, main="Beilschmiedia") ## plot(bei, add=TRUE, pch=16, cex=0.3) ################################################### ### code chunk number 14: datasets.Rnw:317-320 ################################################### getOption("SweaveHooks")[["fig"]]() setmargins(0,0,2,0) plot(bei.extra$elev, main="Beilschmiedia") plot(bei, add=TRUE, pch=16, cex=0.3) ################################################### ### code chunk number 15: datasets.Rnw:323-329 ################################################### getOption("SweaveHooks")[["fig"]]() M <- persp(bei.extra$elev, theta=-45, phi=18, expand=7, border=NA, apron=TRUE, shade=0.3, box=FALSE, visible=TRUE, main="") perspPoints(bei, Z=bei.extra$elev, M=M, pch=16, cex=0.3) ################################################### ### code chunk number 16: datasets.Rnw:338-339 ################################################### getOption("SweaveHooks")[["fig"]]() plot(betacells) ################################################### ### code chunk number 17: datasets.Rnw:344-345 ################################################### getOption("SweaveHooks")[["fig"]]() plot(bramblecanes, cols=1:3) ################################################### ### code chunk number 18: datasets.Rnw:348-349 ################################################### getOption("SweaveHooks")[["fig"]]() plot(split(bramblecanes)) ################################################### ### code chunk number 19: datasets.Rnw:359-360 ################################################### getOption("SweaveHooks")[["fig"]]() plot(bronzefilter,markscale=2) ################################################### ### code chunk number 20: datasets.Rnw:369-370 ################################################### getOption("SweaveHooks")[["fig"]]() plot(cells) ################################################### ### code chunk number 21: datasets.Rnw:379-382 ################################################### getOption("SweaveHooks")[["fig"]]() plot(chicago, main="Chicago Crimes", col="grey", cols=c("red", "blue", "black", "blue", "red", "blue", "blue"), chars=c(16,2,22,17,24,15,6), leg.side="left", show.window=FALSE) ################################################### ### code chunk number 22: datasets.Rnw:392-393 ################################################### getOption("SweaveHooks")[["fig"]]() chorley.extra$plotit() ################################################### ### code chunk number 23: datasets.Rnw:409-411 ################################################### getOption("SweaveHooks")[["fig"]]() plot(clmfires, which.marks="cause", cols=2:5, cex=0.25, main="Castilla-La Mancha forest fires") ################################################### ### code chunk number 24: datasets.Rnw:421-422 ################################################### getOption("SweaveHooks")[["fig"]]() plot(clmfires.extra$clmcov200, main="Covariates for forest fires") ################################################### ### code chunk number 25: datasets.Rnw:433-435 ################################################### getOption("SweaveHooks")[["fig"]]() plot(copper$Points, main="Copper") plot(copper$Lines, add=TRUE) ################################################### ### code chunk number 26: datasets.Rnw:442-444 ################################################### getOption("SweaveHooks")[["fig"]]() plot(demohyper, quote({ plot(Image, main=""); plot(Points, add=TRUE) }), parargs=list(mar=rep(1,4))) ################################################### ### code chunk number 27: datasets.Rnw:451-452 ################################################### getOption("SweaveHooks")[["fig"]]() plot(demopat) ################################################### ### code chunk number 28: datasets.Rnw:466-467 ################################################### getOption("SweaveHooks")[["fig"]]() plot(dendrite, leg.side="bottom", main="", cex=0.75, cols=2:4) ################################################### ### code chunk number 29: datasets.Rnw:475-476 ################################################### getOption("SweaveHooks")[["fig"]]() plot(finpines, main="Finnish pines") ################################################### ### code chunk number 30: datasets.Rnw:489-493 ################################################### getOption("SweaveHooks")[["fig"]]() wildM1 <- with(flu, virustype == "wt" & stain == "M2-M1") plot(flu[wildM1, 1, drop=TRUE], main=c("flu data", "wild type virus, M2-M1 stain"), chars=c(16,3), cex=0.4, cols=2:3) ################################################### ### code chunk number 31: datasets.Rnw:501-502 ################################################### getOption("SweaveHooks")[["fig"]]() plot(gordon, main="People in Gordon Square", pch=16) ################################################### ### code chunk number 32: datasets.Rnw:517-518 ################################################### getOption("SweaveHooks")[["fig"]]() plot(gorillas, which.marks=1, chars=c(1,3), cols=2:3, main="Gorilla nest sites") ################################################### ### code chunk number 33: datasets.Rnw:522-523 (eval = FALSE) ################################################### ## system.file("rawdata/gorillas/vegetation.asc", package="spatstat") ################################################### ### code chunk number 34: datasets.Rnw:532-533 ################################################### getOption("SweaveHooks")[["fig"]]() plot(hamster, cols=c(2,4)) ################################################### ### code chunk number 35: datasets.Rnw:543-544 ################################################### getOption("SweaveHooks")[["fig"]]() plot(heather) ################################################### ### code chunk number 36: datasets.Rnw:554-555 ################################################### getOption("SweaveHooks")[["fig"]]() plot(humberside) ################################################### ### code chunk number 37: datasets.Rnw:567-568 ################################################### getOption("SweaveHooks")[["fig"]]() plot(hyytiala, cols=2:5) ################################################### ### code chunk number 38: datasets.Rnw:577-578 ################################################### getOption("SweaveHooks")[["fig"]]() plot(japanesepines) ################################################### ### code chunk number 39: datasets.Rnw:587-588 ################################################### getOption("SweaveHooks")[["fig"]]() plot(lansing) ################################################### ### code chunk number 40: datasets.Rnw:591-592 ################################################### getOption("SweaveHooks")[["fig"]]() plot(split(lansing)) ################################################### ### code chunk number 41: datasets.Rnw:599-600 ################################################### getOption("SweaveHooks")[["fig"]]() plot(longleaf) ################################################### ### code chunk number 42: datasets.Rnw:609-611 ################################################### getOption("SweaveHooks")[["fig"]]() plot(mucosa, chars=c(1,3), cols=c("red", "green")) plot(mucosa.subwin, add=TRUE, lty=3) ################################################### ### code chunk number 43: datasets.Rnw:625-628 ################################################### getOption("SweaveHooks")[["fig"]]() plot(murchison$greenstone, main="Murchison data", col="lightgreen") plot(murchison$gold, add=TRUE, pch=3, col="blue") plot(murchison$faults, add=TRUE, col="red") ################################################### ### code chunk number 44: datasets.Rnw:636-637 ################################################### getOption("SweaveHooks")[["fig"]]() plot(nbfires, use.marks=FALSE, pch=".") ################################################### ### code chunk number 45: datasets.Rnw:640-641 ################################################### getOption("SweaveHooks")[["fig"]]() plot(split(nbfires), use.marks=FALSE, chars=".") ################################################### ### code chunk number 46: datasets.Rnw:644-649 ################################################### getOption("SweaveHooks")[["fig"]]() par(mar=c(0,0,2,0)) plot(split(nbfires)$"2000", which.marks="fire.type", main=c("New Brunswick fires 2000", "by fire type"), cols=c("blue", "green", "red", "cyan"), leg.side="left") ################################################### ### code chunk number 47: datasets.Rnw:657-659 ################################################### getOption("SweaveHooks")[["fig"]]() plot(nztrees) plot(trim.rectangle(as.owin(nztrees), c(0,5), 0), add=TRUE, lty=3) ################################################### ### code chunk number 48: datasets.Rnw:672-673 ################################################### getOption("SweaveHooks")[["fig"]]() plot(osteo[1:10,], main.panel="", pch=21, bg='white') ################################################### ### code chunk number 49: datasets.Rnw:679-680 (eval = FALSE) ################################################### ## system.file("rawdata/osteo/osteo36.txt", package="spatstat") ################################################### ### code chunk number 50: datasets.Rnw:689-690 ################################################### getOption("SweaveHooks")[["fig"]]() plot(paracou, cols=2:3, chars=c(16,3)) ################################################### ### code chunk number 51: datasets.Rnw:698-699 ################################################### getOption("SweaveHooks")[["fig"]]() ponderosa.extra$plotit() ################################################### ### code chunk number 52: datasets.Rnw:710-713 ################################################### getOption("SweaveHooks")[["fig"]]() pyr <- pyramidal pyr$grp <- abbreviate(pyramidal$group, minlength=7) plot(pyr, quote(plot(Neurons, pch=16, main=grp)), main="Pyramidal Neurons") ################################################### ### code chunk number 53: datasets.Rnw:733-735 ################################################### getOption("SweaveHooks")[["fig"]]() plot(redwood) plot(redwood3, add=TRUE, pch=20) ################################################### ### code chunk number 54: datasets.Rnw:738-739 ################################################### getOption("SweaveHooks")[["fig"]]() redwoodfull.extra$plotit() ################################################### ### code chunk number 55: datasets.Rnw:753-755 ################################################### getOption("SweaveHooks")[["fig"]]() plot(as.listof(residualspaper[c("Fig1", "Fig4a", "Fig4b", "Fig4c")]), main="") ################################################### ### code chunk number 56: datasets.Rnw:763-764 ################################################### getOption("SweaveHooks")[["fig"]]() shapley.extra$plotit(main="Shapley") ################################################### ### code chunk number 57: datasets.Rnw:771-772 ################################################### getOption("SweaveHooks")[["fig"]]() plot(simdat) ################################################### ### code chunk number 58: datasets.Rnw:780-781 ################################################### getOption("SweaveHooks")[["fig"]]() plot(spiders, pch=16, show.window=FALSE) ################################################### ### code chunk number 59: datasets.Rnw:788-791 ################################################### getOption("SweaveHooks")[["fig"]]() plot(sporophores, chars=c(16,1,2), cex=0.6) points(0,0,pch=16, cex=2) text(15,8,"Tree", cex=0.75) ################################################### ### code chunk number 60: datasets.Rnw:800-801 ################################################### getOption("SweaveHooks")[["fig"]]() plot(spruces, maxsize=min(nndist(spruces))) ################################################### ### code chunk number 61: datasets.Rnw:810-811 ################################################### getOption("SweaveHooks")[["fig"]]() plot(swedishpines) ################################################### ### code chunk number 62: datasets.Rnw:820-821 ################################################### getOption("SweaveHooks")[["fig"]]() plot(urkiola, cex=0.5, cols=2:3) ################################################### ### code chunk number 63: datasets.Rnw:828-830 ################################################### getOption("SweaveHooks")[["fig"]]() par(mar=c(0,0,2,0)) plot(waka, markscale=0.04, main=c("Waka national park", "tree diameters")) ################################################### ### code chunk number 64: datasets.Rnw:837-841 ################################################### getOption("SweaveHooks")[["fig"]]() v <- rotate(vesicles, pi/2) ve <- lapply(vesicles.extra, rotate, pi/2) plot(v, main="Vesicles") plot(ve$activezone, add=TRUE, lwd=3) ################################################### ### code chunk number 65: datasets.Rnw:866-867 (eval = FALSE) ################################################### ## system.file("rawdata/vesicles/mitochondria.txt", package="spatstat") ################################################### ### code chunk number 66: datasets.Rnw:875-876 ################################################### getOption("SweaveHooks")[["fig"]]() plot(waterstriders) spatstat/inst/ratfor/0000755000176200001440000000000013065443316014374 5ustar liggesusersspatstat/inst/ratfor/Makefile0000755000176200001440000000244413166361223016041 0ustar liggesusers RATFOR = /home/adrian/bin/ratfor77 #RATFOR = /usr/local/bin/ratfor CPP = /usr/bin/cpp ########################################################## # Sources actually written by humans: RAT_SRC = dppll.r inxypOld.r C_DOMINIC = dinfty.c dwpure.c C_MISC = raster.h areadiff.c closepair.c connect.c corrections.c \ discarea.c distances.c distmapbin.c distseg.c \ exactdist.c exactPdist.c \ massdisthack.c poly2im.c trigraf.c utils.c xyseg.c C_MH = methas.h dist2.h areaint.c badgey.c dgs.c \ diggra.c dist2.c fexitc.c getcif.c geyer.c \ lookup.c methas.c stfcr.c \ straush.c straushm.c strauss.c straussm.c C_KEST = Kloop.h Kborder.c C_SRC = $(C_DOMINIC) $(C_MISC) $(C_MH) $(C_KEST) CC_SRC = PerfectStrauss.cc HUMAN = $(RAT_SRC) $(C_SRC) $(CC_SRC) Makefile ########################################################## # Source to be generated automatically: RAT_FOR = dppll.f inxypOld.f GENERATED = $(RAT_FOR) ###################################################### ########### TARGETS ################################ target: $(GENERATED) @echo -- Done ------- tar: tar cvf src.tar $(HUMAN) clean: rm $(GENERATED) -rm src.tar ####################################################### ######### RULES ################################## .r.f: $(RATFOR) -o $@ $? spatstat/inst/ratfor/inxypOld.r0000755000176200001440000000216313115273007016364 0ustar liggesuserssubroutine inxyp(x,y,xp,yp,npts,nedges,score,onbndry) implicit double precision(a-h,o-z) dimension x(npts), y(npts), xp(nedges), yp(nedges), score(npts) logical first, onbndry(npts) zero = 0.0d0 half = 0.5d0 one = 1.0d0 do i = 1,nedges { x0 = xp(i) y0 = yp(i) if(i == nedges) { x1 = xp(1) y1 = yp(1) } else { x1 = xp(i+1) y1 = yp(i+1) } dx = x1 - x0 dy = y1 - y0 do j = 1,npts { xcrit = (x(j) - x0)*(x(j) - x1) if(xcrit <= zero) { if(xcrit == zero) { contrib = half } else { contrib = one } ycrit = y(j)*dx - x(j)*dy + x0*dy - y0*dx if(dx < 0) { if(ycrit >= zero) { score(j) = score(j) + contrib } onbndry(j) = onbndry(j) | (ycrit == zero) } else if(dx > zero) { if(ycrit < zero) { score(j) = score(j) - contrib } onbndry(j) = onbndry(j) | (ycrit == zero) } else { if(x(j) == x0) { ycrit = (y(j) - y0)*(y(j) - y1) } onbndry(j) = onbndry(j) | (ycrit <= zero) } } } } return end spatstat/inst/ratfor/dppll.r0000755000176200001440000000203313115273007015665 0ustar liggesuserssubroutine dppll(x,y,l1,l2,l3,l4,np,nl,eps,mint,rslt,xmin,jmin) implicit double precision(a-h,o-z) dimension x(np), y(np), rslt(np,nl), xmin(np), jmin(np) double precision l1(nl), l2(nl), l3(nl), l4(nl) one = 1.d0 zero = 0.d0 do j = 1,nl { dx = l3(j) - l1(j) dy = l4(j) - l2(j) alen = sqrt(dx**2 + dy**2) if(alen .gt. eps) { co = dx/alen si = dy/alen } else { co = 0.5 si = 0.5 } do i = 1, np { xpx1 = x(i) - l1(j) ypy1 = y(i) - l2(j) xpx2 = x(i) - l3(j) ypy2 = y(i) - l4(j) d1 = xpx1**2 + ypy1**2 d2 = xpx2**2 + ypy2**2 dd = min(d1,d2) if(alen .gt. eps) { xpr = xpx1*co + ypy1*si if(xpr .lt. zero .or. xpr .gt. alen) { d3 = -one } else { ypr = - xpx1*si + ypy1*co d3 = ypr**2 } } else { d3 = -one } if(d3 .ge. zero) { dd = min(dd,d3) } sd =sqrt(dd) rslt(i,j) = sd if(mint.gt.0) { if(sd .lt. xmin(i)) { xmin(i) = sd if(mint.gt.1) { jmin(i) = j } } } } } return end spatstat/tests/0000755000176200001440000000000013115271042013253 5ustar liggesusersspatstat/tests/badwindow.txt0000644000176200001440000005451113115225157016006 0ustar liggesusers x y i 486959 6497047 1 487223 6497012 1 487293 6497170 1 487434 6497187 1 487504 6497047 1 487539 6496959 1 487557 6496889 1 488875 6496924 1 488945 6496643 1 490808 6496643 1 490737 6496854 1 490298 6497644 1 490140 6498541 1 490298 6498857 1 490491 6497855 1 490948 6496854 1 491036 6496555 1 491950 6496537 1 491282 6500298 1 491282 6501546 1 491124 6501792 1 491124 6501985 1 491563 6502319 1 491493 6502740 1 491475 6503355 1 491686 6504375 1 491616 6505324 1 490772 6505675 1 490526 6506237 1 489683 6506237 1 489490 6505605 1 489578 6505359 1 489191 6505078 1 488892 6504023 1 488910 6503795 1 488716 6503812 1 488611 6504568 1 488031 6505201 1 487522 6505042 1 487522 6504919 1 487486 6504849 1 487416 6504884 1 487399 6504814 1 487346 6504832 1 487240 6504638 1 487117 6504515 1 487117 6503935 1 487276 6504006 1 487346 6503971 1 487399 6503865 1 487486 6503812 1 487574 6503777 1 487557 6503689 1 487082 6503303 1 486994 6502266 1 487205 6501159 1 487117 6500526 1 487188 6499437 1 487012 6498259 1 486924 6497029 1 487186 6499396 2 487182 6499396 2 487186 6499426 2 487186 6499396 2 487126 6500589 2 487126 6500476 2 487156 6500476 2 487156 6500176 2 487186 6500176 2 487186 6499462 2 487117 6500526 2 487126 6500589 2 487156 6500686 2 487140 6500686 2 487156 6500805 2 487156 6500686 2 487186 6500986 2 487181 6500986 2 487186 6501021 2 487186 6500986 2 487216 6501076 2 487194 6501076 2 487205 6501159 2 487187 6501256 2 487216 6501256 2 487216 6501076 2 487186 6501406 2 487186 6501260 2 487158 6501406 2 487186 6501406 2 487156 6501466 2 487156 6501417 2 487147 6501466 2 487156 6501466 2 487096 6501766 2 487096 6501732 2 487090 6501766 2 487096 6501766 2 487066 6502936 2 487066 6502636 2 487096 6502636 2 487096 6502606 2 487156 6502606 2 487156 6502486 2 487066 6502486 2 487066 6502456 2 487036 6502456 2 487036 6502156 2 487015 6502156 2 486994 6502266 2 487064 6503086 2 487066 6503086 2 487066 6503112 2 487066 6503116 2 487156 6503116 2 487156 6503026 2 487126 6503026 2 487126 6502996 2 487096 6502996 2 487096 6502936 2 487066 6502936 2 488956 6501496 3 488956 6501256 3 488926 6501256 3 488926 6501046 3 488896 6501046 3 488896 6500806 3 488866 6500806 3 488866 6500506 3 488836 6500506 3 488836 6500236 3 488806 6500236 3 488806 6499996 3 488776 6499996 3 488776 6499486 3 488686 6499486 3 488686 6499126 3 488716 6499126 3 488716 6499006 3 488626 6499006 3 488626 6499036 3 488596 6499036 3 488596 6499066 3 488566 6499066 3 488566 6499126 3 488536 6499126 3 488536 6499216 3 488416 6499216 3 488416 6499456 3 488446 6499456 3 488446 6499696 3 488416 6499696 3 488416 6499936 3 488446 6499936 3 488446 6500056 3 488476 6500056 3 488476 6500146 3 488506 6500146 3 488506 6500266 3 488536 6500266 3 488536 6500386 3 488566 6500386 3 488566 6500656 3 488536 6500656 3 488536 6500986 3 488566 6500986 3 488566 6501136 3 488536 6501136 3 488536 6501376 3 488566 6501376 3 488566 6501406 3 488596 6501406 3 488596 6501496 3 488566 6501496 3 488566 6501616 3 488596 6501616 3 488596 6501796 3 488626 6501796 3 488626 6502036 3 488656 6502036 3 488656 6502096 3 488686 6502096 3 488686 6502246 3 488716 6502246 3 488716 6502276 3 488776 6502276 3 488776 6502336 3 488806 6502336 3 488806 6502426 3 488836 6502426 3 488836 6502636 3 488866 6502636 3 488866 6502666 3 488926 6502666 3 488926 6502696 3 488986 6502696 3 488986 6502726 3 489046 6502726 3 489046 6502756 3 489136 6502756 3 489136 6502096 3 489106 6502096 3 489106 6501976 3 489076 6501976 3 489076 6501826 3 489046 6501826 3 489046 6501736 3 489016 6501736 3 489016 6501586 3 488986 6501586 3 488986 6501496 3 490216 6502426 4 490216 6502246 4 490246 6502246 4 490246 6502186 4 490306 6502186 4 490306 6501946 4 490216 6501946 4 490216 6502096 4 490186 6502096 4 490186 6502156 4 490036 6502156 4 490036 6502126 4 489946 6502126 4 489946 6502096 4 489916 6502096 4 489916 6502066 4 489826 6502066 4 489826 6502036 4 489796 6502036 4 489796 6501946 4 489706 6501946 4 489706 6502036 4 489736 6502036 4 489736 6502186 4 489766 6502186 4 489766 6502216 4 489796 6502216 4 489796 6502276 4 489946 6502276 4 489946 6502306 4 489976 6502306 4 489976 6502336 4 490006 6502336 4 490006 6502366 4 490036 6502366 4 490036 6502426 4 488642 6504346 5 488716 6503812 5 488910 6503795 5 488892 6504023 5 488926 6504143 5 488926 6503806 5 488956 6503806 5 488956 6503686 5 488986 6503686 5 488986 6503566 5 489016 6503566 5 489016 6503476 5 489046 6503476 5 489046 6503386 5 489076 6503386 5 489076 6503296 5 489106 6503296 5 489106 6503206 5 489136 6503206 5 489136 6503086 5 489166 6503086 5 489166 6502846 5 489046 6502846 5 489046 6503086 5 488926 6503086 5 488926 6503236 5 488746 6503236 5 488746 6503266 5 488536 6503266 5 488536 6503296 5 488506 6503296 5 488506 6503326 5 488416 6503326 5 488416 6503386 5 488326 6503386 5 488326 6503506 5 488356 6503506 5 488356 6503536 5 488416 6503536 5 488416 6503566 5 488446 6503566 5 488446 6503656 5 488626 6503656 5 488626 6503746 5 488656 6503746 5 488656 6503776 5 488686 6503776 5 488686 6503956 5 488656 6503956 5 488656 6503986 5 488626 6503986 5 488626 6504046 5 488596 6504046 5 488596 6504076 5 488566 6504076 5 488566 6504106 5 488536 6504106 5 488536 6504166 5 488506 6504166 5 488506 6504226 5 488476 6504226 5 488476 6504346 5 489886 6503386 6 489886 6503146 6 489916 6503146 6 489916 6503056 6 489736 6503056 6 489736 6503206 6 489706 6503206 6 489706 6503266 6 489676 6503266 6 489676 6503356 6 489796 6503356 6 489796 6503596 6 489916 6503596 6 489916 6503386 6 490006 6505666 7 489916 6505666 7 489916 6505756 7 490006 6505756 7 487426 6504856 8 487396 6504796 8 487276 6504676 8 490786 6505366 9 490786 6505336 9 491176 6505336 9 491176 6505276 9 491236 6505276 9 491236 6505126 9 491266 6505126 9 491266 6504976 9 491236 6504976 9 491236 6504916 9 491206 6504916 9 491206 6504886 9 491176 6504886 9 491176 6504856 9 491086 6504856 9 491086 6504886 9 490996 6504886 9 490996 6504916 9 490966 6504916 9 490966 6504946 9 490936 6504946 9 490936 6505006 9 490876 6505006 9 490876 6505186 9 490846 6505186 9 490846 6505246 9 490726 6505246 9 490726 6505276 9 490696 6505276 9 490696 6505366 9 487906 6505066 10 487906 6505036 10 487936 6505036 10 487936 6505006 10 487966 6505006 10 487966 6504616 10 487906 6504616 10 487906 6504586 10 487846 6504586 10 487846 6504556 10 487756 6504556 10 487756 6504526 10 487606 6504526 10 487606 6504646 10 487636 6504646 10 487636 6504766 10 487666 6504766 10 487666 6504886 10 487726 6504886 10 487726 6504976 10 487756 6504976 10 487756 6505006 10 487786 6505006 10 487786 6505036 10 487816 6505036 10 487816 6505066 10 491416 6504856 11 491326 6504856 11 491326 6505006 11 491416 6505006 11 491386 6504736 12 491266 6504736 12 491266 6504826 12 491386 6504826 12 487456 6504586 13 487456 6504436 13 487366 6504436 13 487366 6504466 13 487306 6504466 13 487306 6504556 13 487336 6504556 13 487336 6504586 13 487396 6504586 13 487396 6504676 13 487486 6504676 13 487486 6504586 13 489226 6504646 14 489226 6504616 14 489256 6504616 14 489256 6504556 14 489286 6504556 14 489286 6504466 14 489106 6504466 14 489106 6504586 14 489136 6504586 14 489136 6504646 14 488296 6504406 15 488296 6504316 15 488206 6504316 15 488206 6504376 15 488176 6504376 15 488176 6504466 15 488206 6504466 15 488206 6504496 15 488236 6504496 15 488236 6504526 15 488326 6504526 15 488326 6504406 15 490666 6504466 16 490666 6504376 16 490696 6504376 16 490696 6504316 16 490756 6504316 16 490756 6504256 16 490786 6504256 16 490786 6504166 16 490696 6504166 16 490696 6504226 16 490576 6504226 16 490576 6504286 16 490546 6504286 16 490546 6504466 16 489346 6503986 17 489346 6504076 17 489406 6504076 17 489406 6504166 17 489526 6504166 17 489526 6504256 17 489496 6504256 17 489496 6504346 17 489586 6504346 17 489586 6504256 17 489646 6504256 17 489646 6504196 17 489706 6504196 17 489706 6504016 17 489676 6504016 17 489676 6503896 17 489586 6503896 17 489586 6503956 17 489496 6503956 17 489496 6503986 17 489346 6503986 17 489346 6503986 17 489346 6503836 17 489376 6503836 17 489376 6503746 17 489346 6503746 17 489346 6503566 17 489256 6503566 17 489256 6503506 17 489226 6503506 17 489226 6503416 17 489196 6503416 17 489196 6503386 17 489076 6503386 17 489076 6503446 17 489046 6503446 17 489046 6503566 17 489016 6503566 17 489016 6503626 17 488986 6503626 17 488986 6503836 17 489106 6503836 17 489106 6504106 17 489076 6504106 17 489076 6504226 17 489196 6504226 17 489196 6504196 17 489226 6504196 17 489226 6504076 17 489256 6504076 17 489256 6503986 17 487936 6504166 18 487936 6504136 18 487966 6504136 18 487966 6504016 18 487846 6504016 18 487846 6504046 18 487816 6504046 18 487816 6504136 18 487846 6504136 18 487846 6504166 18 488596 6504046 19 488596 6503986 19 488626 6503986 19 488626 6503896 19 488596 6503896 19 488596 6503716 19 488506 6503716 19 488506 6503806 19 488476 6503806 19 488476 6503986 19 488506 6503986 19 488506 6504046 19 487396 6503896 20 487486 6503836 20 487516 6503806 20 487126 6503956 20 487216 6503986 20 488296 6503806 21 488296 6503776 21 488326 6503776 21 488326 6503746 21 488356 6503746 21 488356 6503626 21 488236 6503626 21 488236 6503656 21 488206 6503656 21 488206 6503686 21 488146 6503686 21 488146 6503776 21 488176 6503776 21 488176 6503806 21 491146 6503686 22 491146 6503626 22 491176 6503626 22 491176 6503536 22 491146 6503536 22 491146 6503476 22 491026 6503476 22 491026 6503656 22 491056 6503656 22 491056 6503686 22 487816 6503506 23 487816 6503476 23 487846 6503476 23 487846 6503386 23 487936 6503386 23 487936 6503356 23 487966 6503356 23 487966 6503296 23 488026 6503296 23 488026 6503236 23 488086 6503236 23 488086 6503116 23 487936 6503116 23 487936 6503146 23 487846 6503146 23 487846 6503176 23 487816 6503176 23 487816 6503206 23 487786 6503206 23 487786 6503386 23 487696 6503386 23 487696 6503356 23 487606 6503356 23 487606 6503506 23 490036 6503506 24 490036 6503386 24 490096 6503386 24 490096 6503266 24 490066 6503266 24 490066 6503176 24 490096 6503176 24 490096 6503026 24 489976 6503026 24 489976 6503086 24 489946 6503086 24 489946 6503146 24 489916 6503146 24 489916 6503386 24 489946 6503386 24 489946 6503506 24 489496 6503356 25 489406 6503356 25 489406 6503446 25 489496 6503446 25 488386 6503356 26 488386 6503326 26 488416 6503326 26 488416 6503236 26 488326 6503236 26 488326 6503266 26 488296 6503266 26 488296 6503356 26 490726 6503206 27 490636 6503206 27 490636 6503326 27 490726 6503326 27 489496 6503056 28 489406 6503056 28 489406 6503176 28 489526 6503176 28 489526 6503086 28 489496 6503086 28 490726 6503086 29 490726 6502996 29 490756 6502996 29 490756 6502876 29 490666 6502876 29 490666 6502936 29 490636 6502936 29 490636 6503086 29 491176 6502996 30 491086 6502996 30 491086 6503086 30 491176 6503086 30 487786 6503056 31 487786 6503026 31 488116 6503026 31 488116 6502996 31 488266 6502996 31 488266 6502936 31 488626 6502936 31 488626 6502906 31 488806 6502906 31 488806 6502876 31 488836 6502876 31 488836 6502786 31 488806 6502786 31 488806 6502636 31 488776 6502636 31 488776 6502606 31 488686 6502606 31 488686 6502576 31 488656 6502576 31 488656 6502546 31 488506 6502546 31 488506 6502516 31 488476 6502516 31 488476 6502486 31 488416 6502486 31 488416 6502456 31 488356 6502456 31 488356 6502396 31 488296 6502396 31 488296 6502306 31 488326 6502306 31 488326 6502216 31 488416 6502216 31 488416 6502246 31 488446 6502246 31 488446 6502276 31 488476 6502276 31 488476 6502306 31 488506 6502306 31 488506 6502336 31 488536 6502336 31 488536 6502366 31 488566 6502366 31 488566 6502426 31 488596 6502426 31 488596 6502456 31 488656 6502456 31 488656 6502486 31 488806 6502486 31 488806 6502396 31 488776 6502396 31 488776 6502366 31 488746 6502366 31 488746 6502306 31 488686 6502306 31 488686 6502246 31 488626 6502246 31 488626 6502186 31 488536 6502186 31 488536 6502156 31 488506 6502156 31 488506 6502126 31 488476 6502126 31 488476 6502006 31 488416 6502006 31 488416 6501976 31 488386 6501976 31 488386 6501946 31 488326 6501946 31 488326 6501886 31 488296 6501886 31 488296 6501856 31 488266 6501856 31 488266 6501706 31 488206 6501706 31 488206 6501676 31 488176 6501676 31 488176 6501646 31 488086 6501646 31 488086 6501616 31 487996 6501616 31 487996 6501586 31 487876 6501586 31 487876 6501556 31 487786 6501556 31 487786 6501646 31 487756 6501646 31 487756 6501766 31 487726 6501766 31 487726 6501856 31 487756 6501856 31 487756 6501946 31 487816 6501946 31 487816 6502066 31 487786 6502066 31 487786 6502096 31 487666 6502096 31 487666 6502186 31 487606 6502186 31 487606 6502246 31 487576 6502246 31 487576 6502276 31 487546 6502276 31 487546 6502306 31 487516 6502306 31 487516 6502426 31 487456 6502426 31 487456 6502636 31 487486 6502636 31 487486 6502696 31 487546 6502696 31 487546 6502786 31 487516 6502786 31 487516 6502906 31 487546 6502906 31 487546 6502966 31 487606 6502966 31 487606 6502996 31 487636 6502996 31 487636 6503026 31 487666 6503026 31 487666 6503056 31 489466 6502816 32 489466 6502786 32 489496 6502786 32 489496 6502756 32 489526 6502756 32 489526 6502726 32 489586 6502726 32 489586 6502696 32 489616 6502696 32 489616 6502486 32 489586 6502486 32 489586 6502366 32 489616 6502366 32 489616 6502156 32 489586 6502156 32 489586 6502096 32 489556 6502096 32 489556 6501976 32 489586 6501976 32 489586 6501796 32 489556 6501796 32 489556 6501766 32 489436 6501766 32 489436 6501646 32 489406 6501646 32 489406 6501616 32 489316 6501616 32 489316 6501526 32 489196 6501526 32 489196 6501586 32 489106 6501586 32 489106 6501856 32 489166 6501856 32 489166 6502096 32 489226 6502096 32 489226 6502246 32 489166 6502246 32 489166 6502426 32 489196 6502426 32 489196 6502486 32 489226 6502486 32 489226 6502576 32 489256 6502576 32 489256 6502606 32 489286 6502606 32 489286 6502726 32 489316 6502726 32 489316 6502786 32 489376 6502786 32 489376 6502816 32 487276 6502336 33 487276 6502306 33 487306 6502306 33 487306 6502216 33 487216 6502216 33 487216 6502096 33 487126 6502096 33 487126 6502246 33 487156 6502246 33 487156 6502306 33 487186 6502306 33 487186 6502336 33 490126 6501856 34 490036 6501856 34 490036 6501976 34 490186 6501976 34 490186 6501886 34 490126 6501886 34 490756 6501406 35 490666 6501406 35 490666 6501496 35 490756 6501496 35 488116 6501346 36 488116 6501316 36 488146 6501316 36 488146 6501076 36 488116 6501076 36 488116 6501016 36 488056 6501016 36 488056 6500866 36 488086 6500866 36 488086 6500836 36 488116 6500836 36 488116 6500746 36 488146 6500746 36 488146 6500716 36 488236 6500716 36 488236 6500776 36 488296 6500776 36 488296 6500926 36 488386 6500926 36 488386 6500776 36 488356 6500776 36 488356 6500656 36 488326 6500656 36 488326 6500566 36 488356 6500566 36 488356 6500476 36 488236 6500476 36 488236 6500506 36 488146 6500506 36 488146 6500416 36 488206 6500416 36 488206 6500326 36 488116 6500326 36 488116 6500296 36 488086 6500296 36 488086 6500206 36 487996 6500206 36 487996 6500116 36 488026 6500116 36 488026 6500026 36 488056 6500026 36 488056 6499846 36 488116 6499846 36 488116 6499786 36 488146 6499786 36 488146 6499696 36 488176 6499696 36 488176 6499606 36 488056 6499606 36 488056 6499636 36 487966 6499636 36 487966 6499606 36 487876 6499606 36 487876 6499636 36 487846 6499636 36 487846 6499726 36 487816 6499726 36 487816 6499786 36 487786 6499786 36 487786 6499936 36 487846 6499936 36 487846 6500026 36 487726 6500026 36 487726 6499996 36 487636 6499996 36 487636 6500086 36 487666 6500086 36 487666 6500356 36 487636 6500356 36 487636 6500446 36 487756 6500446 36 487756 6500566 36 487786 6500566 36 487786 6500656 36 487816 6500656 36 487816 6500746 36 487846 6500746 36 487846 6500896 36 487816 6500896 36 487816 6501076 36 487846 6501076 36 487846 6501166 36 487906 6501166 36 487906 6501286 36 487996 6501286 36 487996 6501316 36 488026 6501316 36 488026 6501346 36 489226 6501046 37 489136 6501046 37 489136 6501196 37 489226 6501196 37 490666 6500896 38 490576 6500896 38 490576 6501106 38 490636 6501106 38 490636 6501196 38 490726 6501196 38 490726 6501046 38 490696 6501046 38 490696 6501016 38 490666 6501016 38 489646 6500926 39 489646 6500836 39 489676 6500836 39 489676 6500716 39 489556 6500716 39 489556 6500926 39 488986 6500836 40 488986 6500776 40 489046 6500776 40 489046 6500626 40 489106 6500626 40 489106 6500446 40 489016 6500446 40 489016 6500416 40 488986 6500416 40 488986 6500356 40 488896 6500356 40 488896 6500836 40 488356 6500296 41 488356 6500176 41 488386 6500176 41 488386 6500026 41 488266 6500026 41 488266 6500056 41 488206 6500056 41 488206 6500116 41 488176 6500116 41 488176 6500236 41 488206 6500236 41 488206 6500296 41 489226 6500146 42 489136 6500146 42 489136 6500236 42 489226 6500236 42 489226 6499756 43 489046 6499756 43 489046 6499846 43 489106 6499846 43 489106 6499876 43 489136 6499876 43 489136 6499936 43 489226 6499936 43 487486 6499666 44 487396 6499666 44 487396 6499756 44 487486 6499756 44 488386 6499666 45 488386 6499636 45 488416 6499636 45 488416 6499546 45 488386 6499546 45 488386 6499486 45 488296 6499486 45 488296 6499576 45 488266 6499576 45 488266 6499666 45 487936 6499546 46 487936 6499186 46 487906 6499186 46 487906 6499156 46 487876 6499156 46 487876 6499126 46 487816 6499126 46 487816 6499066 46 487786 6499066 46 487786 6498886 46 487636 6498886 46 487636 6499066 46 487606 6499066 46 487606 6499186 46 487576 6499186 46 487576 6499306 46 487696 6499306 46 487696 6499396 46 487606 6499396 46 487606 6499486 46 487786 6499486 46 487786 6499516 46 487846 6499516 46 487846 6499546 46 489286 6499396 47 489166 6499396 47 489166 6499486 47 489286 6499486 47 488296 6499036 48 488296 6498886 48 488446 6498886 48 488446 6498796 48 488506 6498796 48 488506 6498706 48 488446 6498706 48 488446 6498676 48 488386 6498676 48 488386 6498646 48 488356 6498646 48 488356 6498616 48 488116 6498616 48 488116 6498586 48 488056 6498586 48 488056 6498556 48 488026 6498556 48 488026 6498526 48 487876 6498526 48 487876 6498646 48 487996 6498646 48 487996 6498676 48 488026 6498676 48 488026 6498706 48 488116 6498706 48 488116 6498976 48 488146 6498976 48 488146 6499006 48 488176 6499006 48 488176 6499096 48 488236 6499096 48 488236 6499306 48 488266 6499306 48 488266 6499396 48 488386 6499396 48 488386 6499306 48 488356 6499306 48 488356 6499096 48 488326 6499096 48 488326 6499036 48 489886 6499276 49 489766 6499276 49 489766 6499396 49 489886 6499396 49 490156 6499066 50 490156 6499006 50 490186 6499006 50 490186 6498766 50 490156 6498766 50 490096 6498556 50 490096 6498526 50 489976 6498526 50 489976 6498706 50 490066 6498706 50 490066 6498826 50 489766 6498826 50 489766 6498916 50 489736 6498916 50 489736 6499006 50 489766 6499006 50 489766 6499066 50 489976 6499066 50 489976 6499036 50 490066 6499036 50 490066 6499066 50 487756 6498466 51 487756 6498256 51 487666 6498256 51 487666 6498226 51 487636 6498226 51 487636 6498196 51 487516 6498196 51 487516 6498226 51 487486 6498226 51 487486 6498376 51 487396 6498376 51 487396 6498406 51 487336 6498406 51 487336 6498526 51 487576 6498526 51 487576 6498556 51 487816 6498556 51 487816 6498466 51 489316 6498106 52 489226 6498106 52 489226 6498226 52 489316 6498226 52 490066 6497836 53 489976 6497836 53 489976 6497956 53 490066 6497956 53 489436 6497536 54 489346 6497536 54 489346 6497926 54 489466 6497926 54 489466 6497596 54 489436 6497596 54 490726 6497926 55 490726 6497656 55 490756 6497656 55 490756 6497596 55 490816 6497596 55 490816 6497506 55 490786 6497506 55 490786 6497476 55 490696 6497476 55 490696 6497536 55 490666 6497536 55 490666 6497656 55 490636 6497656 55 490636 6497746 55 490606 6497746 55 490606 6497776 55 490576 6497776 55 490576 6497926 55 490156 6497746 56 490156 6497716 56 490186 6497716 56 490186 6497656 56 490216 6497656 56 490216 6497566 56 490336 6497566 56 490336 6497476 56 490306 6497476 56 490306 6497326 56 490246 6497326 56 490246 6497296 56 490096 6497296 56 490096 6497356 56 490066 6497356 56 490066 6497596 56 490036 6497596 56 490036 6497716 56 490066 6497716 56 490066 6497746 56 488026 6497536 57 487936 6497536 57 487936 6497626 57 488026 6497626 57 489466 6497206 58 489346 6497206 58 489346 6497446 58 489376 6497446 58 489376 6497506 58 489526 6497506 58 489526 6497296 58 489466 6497296 58 490876 6497266 59 490786 6497266 59 490786 6497356 59 490876 6497356 59 490936 6497236 60 490936 6497206 60 490996 6497206 60 490996 6497176 60 491026 6497176 60 491026 6496996 60 491086 6496996 60 491086 6496936 60 491206 6496936 60 491206 6496696 60 491116 6496696 60 491116 6496726 60 491086 6496726 60 491086 6496846 60 491056 6496846 60 491056 6496906 60 490996 6496906 60 490996 6496966 60 490936 6496966 60 490936 6497026 60 490906 6497026 60 490906 6497116 60 490876 6497116 60 490876 6497146 60 490846 6497146 60 490846 6497236 60 490366 6496906 61 490276 6496906 61 490276 6497026 61 490306 6497026 61 490306 6497176 61 490396 6497176 61 490396 6497206 61 490516 6497206 61 490516 6497116 61 490456 6497116 61 490456 6497056 61 490396 6497056 61 490396 6497026 61 490366 6497026 61 487456 6497146 62 487486 6497116 62 487486 6497086 62 487546 6497086 62 487546 6496936 62 487216 6497026 62 487216 6497086 62 487126 6497086 62 487126 6497176 62 489586 6496936 63 489376 6496936 63 489376 6497026 63 489586 6497026 63 spatstat/tests/testsNtoP.R0000644000176200001440000004110313131104243015334 0ustar liggesusers# # tests/NAinCov.R # # Testing the response to the presence of NA's in covariates # # $Revision: 1.5 $ $Date: 2015/12/29 08:54:49 $ require(spatstat) local({ X <- runifpoint(42) Y <- as.im(function(x,y) { x+y }, owin()) Y[owin(c(0.2,0.4),c(0.2,0.4))] <- NA # fit model: should produce a warning but no failure misfit <- ppm(X ~Y, covariates=list(Y=Y)) # prediction Z <- predict(misfit, type="trend", se=TRUE) # covariance matrix: all should be silent v <- vcov(misfit) ss <- vcov(misfit, what="internals") NULL }) # # tests/nndist.R # # Check that nndist and nnwhich give # results consistent with direct calculation from pairdist # # Similarly for nncross and distfun # # Also test whether minnndist(X) == min(nndist(X)) # # $Revision: 1.17 $ $Date: 2016/11/29 06:25:15 $ # require(spatstat) local({ eps <- sqrt(.Machine$double.eps) f <- function(mat,k) { apply(mat, 1, function(z,n) { sort(z)[n] }, n=k+1) } g <- function(mat,k) { apply(mat, 1, function(z,n) { order(z)[n] }, n=k+1) } # Two dimensions X <- runifpoint(42) nn <- nndist(X) nnP <- f(pairdist(X), 1) if(any(abs(nn - nnP) > eps)) stop("nndist.ppp does not agree with pairdist") nn5 <- nndist(X, k=5) nn5P <- f(pairdist(X), 5) if(any(abs(nn5 - nn5P) > eps)) stop("nndist.ppp(k=5) does not agree with pairdist") nw <- nnwhich(X) nwP <- g(pairdist(X), 1) if(any(nw != nwP)) stop("nnwhich.ppp does not agree with pairdist") nw5 <- nnwhich(X, k=5) nw5P <- g(pairdist(X), 5) if(any(nw5 != nw5P)) stop("nnwhich.ppp(k=5) does not agree with pairdist") # Three dimensions X <- runifpoint3(42) nn <- nndist(X) nnP <- f(pairdist(X), 1) if(any(abs(nn - nnP) > eps)) stop("nndist.pp3 does not agree with pairdist") nn5 <- nndist(X, k=5) nn5P <- f(pairdist(X), 5) if(any(abs(nn5 - nn5P) > eps)) stop("nndist.pp3(k=5) does not agree with pairdist") nw <- nnwhich(X) nwP <- g(pairdist(X), 1) if(any(nw != nwP)) stop("nnwhich.pp3 does not agree with pairdist") nw5 <- nnwhich(X, k=5) nw5P <- g(pairdist(X), 5) if(any(nw5 != nw5P)) stop("nnwhich.pp3(k=5) does not agree with pairdist") # m dimensions X <- runifpointx(42, boxx(c(0,1),c(0,1),c(0,1),c(0,1))) nn <- nndist(X) nnP <- f(pairdist(X), 1) if(any(abs(nn - nnP) > eps)) stop("nndist.ppx does not agree with pairdist") nn5 <- nndist(X, k=5) nn5P <- f(pairdist(X), 5) if(any(abs(nn5 - nn5P) > eps)) stop("nndist.ppx(k=5) does not agree with pairdist") nw <- nnwhich(X) nwP <- g(pairdist(X), 1) if(any(nw != nwP)) stop("nnwhich.ppx does not agree with pairdist") nw5 <- nnwhich(X, k=5) nw5P <- g(pairdist(X), 5) if(any(nw5 != nw5P)) stop("nnwhich.ppx(k=5) does not agree with pairdist") #### nncross in two dimensions X <- runifpoint(42) Y <- runifpoint(42, win=owin(c(1,2),c(1,2))) # default nncross nc <- nncross(X,Y) ncd <- nc$dist ncw <- nc$which cd <- crossdist(X,Y) cdd <- apply(cd, 1, min) cdw <- apply(cd, 1, which.min) if(any(abs(ncd - cdd) > eps)) stop("nncross()$dist does not agree with apply(crossdist(), 1, min)") if(any(ncw != cdw)) stop("nncross()$which does not agree with apply(crossdist(), 1, which.min)") # sort on x nc <- nncross(X,Y, sortby="x") ncd <- nc$dist ncw <- nc$which if(any(abs(ncd - cdd) > eps)) stop("nncross(sortby=x)$dist does not agree with apply(crossdist(), 1, min)") if(any(ncw != cdw)) stop("nncross(sortby=x)$which does not agree with apply(crossdist(), 1, which.min)") # pre-sorted on x Y <- Y[order(Y$x)] nc <- nncross(X,Y, is.sorted.Y=TRUE, sortby="x") ncd <- nc$dist ncw <- nc$which cd <- crossdist(X,Y) cdd <- apply(cd, 1, min) cdw <- apply(cd, 1, which.min) if(any(abs(ncd - cdd) > eps)) stop("For sorted data, nncross()$dist does not agree with apply(crossdist(), 1, min)") if(any(ncw != cdw)) stop("For sorted data, nncross()$which does not agree with apply(crossdist(), 1, which.min)") # sanity check for nncross with k > 1 ndw <- nncross(X, Y, k=1:4, what="which") if(any(is.na(ndw))) stop("NA's returned by nncross.ppp(k > 1, what='which')") nnc4 <- nncross(X, Y, k=1:4) iswhich <- (substr(colnames(nnc4), 1, nchar("which")) == "which") ndw <- nnc4[,iswhich] if(any(is.na(ndw))) stop("NA's returned by nncross.ppp(k > 1)$which") # test of correctness for nncross with k > 1 flipcells <- flipxy(cells) calcwhich <- nncross(cells, flipcells, k=1:4, what="which") truewhich <- t(apply(crossdist(cells,flipcells), 1, order))[,1:4] if(any(calcwhich != truewhich)) stop("nncross(k > 1) gives wrong answer") # test of agreement between nngrid.h and knngrid.h # dimyx=23 (found by trial-and-error) ensures that there are no ties a <- as.matrix(nnmap(cells, what="which", dimyx=23)) b <- as.matrix(nnmap(cells, what="which", dimyx=23, k=1:2)[[1]]) if(any(a != b)) stop("algorithms in nngrid.h and knngrid.h disagree") ## minnndist mfast <- minnndist(X) mslow <- min(nndist(X)) if(abs(mfast-mslow) > eps) stop("minnndist(X) disagrees with min(nndist(X))") mfast <- maxnndist(X) mslow <- max(nndist(X)) if(abs(mfast-mslow) > eps) stop("maxnndist(X) disagrees with max(nndist(X))") }) local({ # tests for has.close() # (the default method uses nndist or pairdist, and can be trusted!) a <- has.close(redwood, 0.05) b <- has.close.default(redwood, 0.05) if(any(a != b)) stop("Incorrect result for has.close(X, r)") a <- has.close(redwood, 0.05, periodic=TRUE) a <- has.close.default(redwood, 0.05, periodic=TRUE) if(any(a != b)) stop("Incorrect result for has.close(X, r, periodic=TRUE)") Y <- split(amacrine) a <- with(Y, has.close(on, 0.05, off)) b <- with(Y, has.close.default(on, 0.05, off)) if(any(a != b)) stop("Incorrect result for has.close(X, r, Y)") a <- with(Y, has.close(on, 0.05, off, periodic=TRUE)) b <- with(Y, has.close.default(on, 0.05, off, periodic=TRUE)) if(any(a != b)) stop("Incorrect result for has.close(X, r, Y, periodic=TRUE)") }) ## ## tests/percy.R ## ## Tests of Percus-Yevick approximations ## ## $Revision: 1.2 $ $Date: 2015/12/29 08:54:49 $ require(spatstat) local({ fit <- ppm(swedishpines ~1, DiggleGatesStibbard(6)) K <- Kmodel(fit) }) #' tests/perspim.R #' #' Check persp.im handling of NA, etc #' #' $Revision: 1.1 $ $Date: 2016/08/27 02:53:35 $ require(spatstat) local({ set.seed(42) Z <- distmap(letterR, invert=TRUE)[letterR, drop=FALSE] X <- runifpoint(100, Frame(Z)) M <- persp(Z, colin=Z, visible=TRUE) perspPoints(X, Z=Z, M=M) }) ## ## tests/pixelgripes.R ## Problems related to pixellation of windows ## ## $Revision: 1.3 $ $Date: 2015/12/29 08:54:49 $ require(spatstat) local({ ## From Philipp Hunziker: bug in rNeymanScott (etc) ## Create an irregular window PM <- matrix(c(1,0,0.5,1,0,0), 3, 2, byrow=TRUE) P <- owin(poly=PM) ## Generate Matern points X <- rMatClust(50, 0.05, 5, win=P) ## Some distance function as a covariate distorigin <- function(x, y) { sqrt(x^2 + y^2) } ## No covariates: works fine fit0 <- kppm(X ~ 1, clusters="MatClust") Y0 <- simulate(fit0, retry=0) ## Covariates: Simulation fails fit1 <- kppm(X ~ distorigin, clusters="MatClust") Y1 <- simulate(fit1, retry=0) }) ## ## tests/polygons.R ## ## $Revision: 1.2 $ $Date: 2015/12/29 08:54:49 $ ## require(spatstat) local({ co <- as.ppp(corners(letterR), letterR, check=FALSE) co[letterR] }) # # tests/ppmBadData.R # # $Revision: 1.5 $ $Date: 2015/12/29 08:54:49 $ # Testing robustness of ppm and support functions # when data are rubbish require(spatstat) local({ # --------------------------------------------------- # from Rolf: very large proportion of data is NA SEED <- 42 K <- 101 A <- 500 X <- seq(0, A, length=K) G <- expand.grid(x=X, y=X) FOO <- function(x,y) { sin(x)^2 + cos(y)^2 } M1 <- im(matrix(FOO(G$x, G$y), K, K), xcol=X, yrow=X) M <- im(matrix(FOO(G$x, G$y), K, K)) BAR <- function(x) { exp(-6.618913 + 5.855337 * x - 8.432483 * x^2) } V <- im(BAR(M$v), xcol=X, yrow=X) # V <- eval.im(exp(-6.618913 + 5.855337 * M - 8.432483 * M^2)) set.seed(SEED) Y <- rpoispp(V) fY <- ppm(Y ~cv + I(cv^2), data=list(cv=M), correction="translate") diagnose.ppm(fY) lurking(fY, covariate=as.im(function(x,y){x}, square(A)), type="raw") }) # -------------------------------------------------------- # from Andrew Bevan: numerical overflow, ill-conditioned Fisher information local({ SEED <- 42 nongranite<- owin(poly = list(x = c(0, 8500, 7000, 6400, 6400, 6700, 7000, 7200, 7300, 8000, 8100, 8800, 9500, 10000, 10000, 0), y = c(0, 0, 2000, 3800, 4000, 5000, 6500, 7400, 7500, 8000, 8100, 9000, 9500, 9600, 10000, 10000))) #Trend on raster grid rain <- as.im(X=function(x,y) { x^2 + y^2 }, W=nongranite, dimyx=100) #Generate a point pattern via a Lennard-Jones process set.seed(SEED) mod4<- rmhmodel(cif="lennard", par=list(beta=1, sigma=250, epsilon=2.2), trend=rain, w=nongranite) ljtr<- rmh(mod4, start=list(n.start=80), control=list(p=1, nrep=1e5)) #Fit a point process model to the pattern with rain as a covariate # NOTE INCORRECT TREND FORMULA ljtrmod <- ppm(ljtr, trend= ~ Z, interaction=NULL, data=list(Z=rain)) ss <- summary(ljtrmod) }) local({ # From Ege # Degenerate but non-null argument 'covariates' xx <- list() names(xx) <- character(0) fit <- ppm(cells ~x, covariates = xx) st <- summary(fit) }) # # tests/ppmgam.R # # Test ppm with use.gam=TRUE # # $Revision: 1.3 $ $Date: 2015/09/01 02:01:33 $ # require(spatstat) local({ fit <- ppm(nztrees ~s(x,y), use.gam=TRUE) mm <- model.matrix(fit) mf <- model.frame(fit) v <- vcov(fit) prd <- predict(fit) }) #' #' tests/ppmlogi.R #' #' Tests of ppm(method='logi') #' and related code (predict, leverage etc) #' #' $Revision: 1.7 $ $Date: 2017/07/11 08:13:18 $ #' require(spatstat) local({ fit <- ppm(cells ~x, method="logi") f <- fitted(fit) p <- predict(fit) u <- summary(fit) fitS <- ppm(cells ~x, Strauss(0.12), method="logi") fS <- fitted(fitS) pS <- predict(fitS) uS <- summary(fitS) plot(leverage(fit)) plot(influence(fit)) plot(dfbetas(fit)) plot(leverage(fitS)) plot(influence(fitS)) plot(dfbetas(fitS)) }) local({ #' same with hard core - A1 is singular fitH <- ppm(cells ~x, Strauss(0.08), method="logi") fH <- fitted(fitH) pH <- predict(fitH) uH <- summary(fitH) plot(leverage(fitH)) plot(influence(fitH)) plot(dfbetas(fitH)) }) local({ #' logistic fit to data frame of covariates z <- c(rep(TRUE, 5), rep(FALSE, 5)) df <- data.frame(A=z + 2* runif(10), B=runif(10)) Y <- quadscheme.logi(runifpoint(5), runifpoint(5)) fut <- ppm(Y ~ A+B, data=df, method="logi") sf <- summary(fut) }) # # tests/ppmmarkorder.R # # $Revision: 1.3 $ $Date: 2015/12/29 08:54:49 $ # # Test that predict.ppm, plot.ppm and plot.fitin # tolerate marks with levels that are not in alpha order # require(spatstat) local({ X <- amacrine levels(marks(X)) <- c("ZZZ", "AAA") fit <- ppm(X ~marks, MultiStrauss(c("ZZZ","AAA"), matrix(0.06, 2, 2))) aa <- predict(fit, type="trend") bb <- predict(fit, type="cif") plot(fit) plot(fitin(fit)) }) # # tests/ppmscope.R # # Test things that might corrupt the internal format of ppm objects # # $Revision: 1.5 $ $Date: 2015/12/29 08:54:49 $ # require(spatstat) local({ ## (1) Scoping problem that can arise when ppm splits the data fit <- ppm(bei ~elev, data=bei.extra) mm <- model.matrix(fit) ## (2) Fast update mechanism fit1 <- ppm(cells ~x+y, Strauss(0.07)) fit2 <- update(fit1, ~y) fit3 <- update(fit2, ~x) ## (3) New formula-based syntax attach(bei.extra) slfit <- ppm(bei ~ grad) sl2fit <- update(slfit, ~grad + I(grad^2)) slfitup <- update(slfit, use.internal=TRUE) sl2fitup <- update(sl2fit, use.internal=TRUE) }) # # tests/ppmtricks.R # # Test backdoor exits and hidden options in ppm # and summary.ppm, print.summary.ppm # # $Revision: 1.5 $ $Date: 2015/12/29 08:54:49 $ # require(spatstat) local({ ## (1) skip.border fit <- ppm(cells, ~1, Strauss(0.1), skip.border=TRUE) ## (2) subset arguments of different kinds fut <- ppm(cells ~ x, subset=(x > 0.5)) fot <- ppm(cells ~ x, subset=(x > 0.5), method="logi") W <- owin(c(0.4, 0.8), c(0.2, 0.7)) fut <- ppm(cells ~ x, subset=W) fot <- ppm(cells ~ x, subset=W, method="logi") ## (3) profilepl -> ppm ## uses 'skip.border' and 'precomputed' ## also tests scoping for covariates splants <- split(ants) mess <- splants[["Messor"]] cats <- splants[["Cataglyphis"]] ss <- data.frame(r=seq(60,120,by=20),hc=29/6) dM <- distmap(mess,dimyx=256) mungf <- profilepl(ss, StraussHard, cats ~ dM) mungp <- profilepl(ss, StraussHard, trend=~dM, Q=cats) ## (4) splitting large quadschemes into blocks op <- spatstat.options(maxmatrix=5000) pr <- predict(ppm(cells ~ x, AreaInter(0.05))) ## (5) shortcuts in summary.ppm ## and corresponding behaviour of print.summary.ppm print(summary(fit, quick=TRUE)) print(summary(fit, quick="entries")) print(summary(fit, quick="no prediction")) print(summary(fit, quick="no variances")) spatstat.options(op) }) # # tests/ppx.R # # Test operations for ppx objects # # $Revision: 1.3 $ $Date: 2017/03/02 01:19:13 $ # require(spatstat) local({ df <- data.frame(x=c(1,2,2,1), y=c(1,2,3,1), z=c(2,3,4,2)) X <- ppx(data=df, coord.type=rep("s", 3), domain=box3()) unique(X) duplicated(X) multiplicity(X) stopifnot(identical(unmark(chicago[1]), unmark(chicago)[1])) #' ppx with zero points U <- chicago[integer(0)] V <- U %mark% 1 V <- U %mark% factor("a") }) # # tests/prediction.R # # Things that might go wrong with predict() # # $Revision: 1.4 $ $Date: 2016/03/04 03:14:40 $ # require(spatstat) local({ # test of 'covfunargs' f <- function(x,y,a){ y - a } fit <- ppm(cells ~x + f, covariates=list(f=f), covfunargs=list(a=1/2)) p <- predict(fit) # prediction involving 0 * NA qc <- quadscheme(cells, nd=10) r <- minnndist(as.ppp(qc))/10 fit <- ppm(qc ~ 1, Strauss(r)) # model has NA for interaction coefficient p1 <- predict(fit) p2 <- predict(fit, type="cif", ngrid=10) stopifnot(all(is.finite(as.matrix(p1)))) stopifnot(all(is.finite(as.matrix(p2)))) # test of 'new.coef' mechanism fut <- ppm(cells ~ x, Strauss(0.15), rbord=0) p0 <- predict(fut, type="cif") pe <- predict(fut, type="cif", new.coef=coef(fut)) pn <- predict(fut, type="cif", new.coef=unname(coef(fut))) if(max(abs(pe-p0)) > 0.01) stop("new.coef mechanism is broken!") if(max(abs(pn-p0)) > 0.01) stop("new.coef mechanism gives wrong answer, for unnamed vectors") # tests of relrisk.ppm fut <- ppm(amacrine ~ x * marks) a <- relrisk(fut, control=2, relative=TRUE) a <- relrisk(fut, se=TRUE) a <- relrisk(fut, relative=TRUE, se=TRUE) fut <- ppm(sporophores ~ marks + x) a <- relrisk(fut, control=2, relative=TRUE) a <- relrisk(fut, se=TRUE) a <- relrisk(fut, relative=TRUE, se=TRUE) }) # # tests/project.ppm.R # # $Revision: 1.6 $ $Date: 2015/08/27 08:19:03 $ # # Tests of projection mechanism # require(spatstat) local({ chk <- function(m) { if(!valid.ppm(m)) stop("Projected model was still not valid") return(invisible(NULL)) } # a very unidentifiable model fit <- ppm(cells ~Z, Strauss(1e-06), covariates=list(Z=0)) chk(emend(fit)) # multitype r <- matrix(1e-06, 2, 2) fit2 <- ppm(amacrine ~1, MultiStrauss(types=c("off", "on"), radii=r)) chk(emend(fit2)) # complicated multitype fit3 <- ppm(amacrine ~1, MultiStraussHard(types=c("off", "on"), iradii=r, hradii=r/5)) chk(emend(fit3)) # hierarchical ra <- r r[2,1] <- NA fit4 <- ppm(amacrine ~1, HierStrauss(types=c("off", "on"), radii=r)) chk(emend(fit4)) # complicated hierarchical fit5 <- ppm(amacrine ~1, HierStraussHard(types=c("off", "on"), iradii=r, hradii=r/5)) chk(emend(fit5)) # hybrids r0 <- min(nndist(redwood)) ra <- 1.25 * r0 rb <- 0.8 * r0 f1 <- ppm(redwood ~1, Hybrid(A=Strauss(ra), B=Geyer(0.1, 2)), project=TRUE) chk(f1) f2 <- ppm(redwood ~1, Hybrid(A=Strauss(rb), B=Geyer(0.1, 2)), project=TRUE) chk(f2) f3 <- ppm(redwood ~1, Hybrid(A=Strauss(ra), B=Strauss(0.1)), project=TRUE) chk(f3) f4 <- ppm(redwood ~1, Hybrid(A=Strauss(rb), B=Strauss(0.1)), project=TRUE) chk(f4) f5 <- ppm(redwood ~1, Hybrid(A=Hardcore(rb), B=Strauss(0.1)), project=TRUE) chk(f5) f6 <- ppm(redwood ~1, Hybrid(A=Hardcore(rb), B=Geyer(0.1, 2)), project=TRUE) chk(f6) f7 <- ppm(redwood ~1, Hybrid(A=Geyer(rb, 1), B=Strauss(0.1)), project=TRUE) chk(f7) }) spatstat/tests/testsGtoK.R0000644000176200001440000001077213115446267015351 0ustar liggesusers## ## tests/gcc323.R ## ## $Revision: 1.2 $ $Date: 2015/12/29 08:54:49 $ ## require(spatstat) local({ # critical R values that provoke GCC bug #323 a <- marktable(lansing, R=0.25) a <- marktable(lansing, R=0.21) a <- marktable(lansing, R=0.20) a <- marktable(lansing, R=0.10) }) # # tests/hobjects.R # # Validity of methods for ppm(... method="ho") # require(spatstat) local({ set.seed(42) fit <- ppm(cells ~1, Strauss(0.1), method="ho", nsim=10) fitx <- ppm(cells ~offset(x), Strauss(0.1), method="ho", nsim=10) a <- AIC(fit) ax <- AIC(fitx) f <- fitted(fit) fx <- fitted(fitx) p <- predict(fit) px <- predict(fitx) }) # # tests/hyperframe.R # # test "[.hyperframe" etc # # $Revision: 1.3 $ $Date: 2014/08/25 04:43:07 $ # lambda <- runif(4, min=50, max=100) X <- lapply(as.list(lambda), function(x) { rpoispp(x) }) h <- hyperframe(lambda=lambda, X=X) h$lambda2 <- lambda^2 h[, "lambda3"] <- lambda^3 h[, "Y"] <- X h[, "X"] <- lapply(X, flipxy) h[, c("X", "Y")] <- hyperframe(X=X, Y=X) names(h) <- LETTERS[1:5] print(h) # # tests/imageops.R # # $Revision: 1.7 $ $Date: 2015/12/29 08:54:49 $ # require(spatstat) local({ A <- as.im(owin()) B <- as.im(owin(c(1.1, 1.9), c(0,1))) Z <- imcov(A, B) stopifnot(abs(max(Z) - 0.8) < 0.1) ## handling images with 1 row or column ycov <- function(x, y) y E <- as.im(ycov, owin(), dimyx = c(2,1)) G <- cut(E, 2) H <- as.tess(G) E12 <- as.im(ycov, owin(), dimyx = c(1,2)) G12 <- cut(E12, 2) H12 <- as.tess(G12) ## d <- distmap(cells, dimyx=32) Z <- connected(d <= 0.06, method="interpreted") }) #' tests/ippm.R #' Tests of 'ippm' class #' $Revision: 1.1 $ $Date: 2017/06/06 06:32:00 $ require(spatstat) local({ # .......... set up example from help file ................. nd <- 10 gamma0 <- 3 delta0 <- 5 POW <- 3 # Terms in intensity Z <- function(x,y) { -2*y } f <- function(x,y,gamma,delta) { 1 + exp(gamma - delta * x^POW) } # True intensity lamb <- function(x,y,gamma,delta) { 200 * exp(Z(x,y)) * f(x,y,gamma,delta) } # Simulate realisation lmax <- max(lamb(0,0,gamma0,delta0), lamb(1,1,gamma0,delta0)) set.seed(42) X <- rpoispp(lamb, lmax=lmax, win=owin(), gamma=gamma0, delta=delta0) # Partial derivatives of log f DlogfDgamma <- function(x,y, gamma, delta) { topbit <- exp(gamma - delta * x^POW) topbit/(1 + topbit) } DlogfDdelta <- function(x,y, gamma, delta) { topbit <- exp(gamma - delta * x^POW) - (x^POW) * topbit/(1 + topbit) } # irregular score Dlogf <- list(gamma=DlogfDgamma, delta=DlogfDdelta) # fit model fit <- ippm(X ~Z + offset(log(f)), covariates=list(Z=Z, f=f), iScore=Dlogf, start=list(gamma=1, delta=1), nd=nd) # ............. test ............................. Ar <- model.matrix(fit) Ai <- model.matrix(fit, irregular=TRUE) Zr <- model.images(fit) Zi <- model.images(fit, irregular=TRUE) })# # tests/kppm.R # # $Revision: 1.15 $ $Date: 2016/09/13 02:30:05 $ # # Test functionality of kppm that depends on RandomFields # Test update.kppm for old style kppm objects require(spatstat) local({ fit <- kppm(redwood, ~1, "Thomas") fitx <- update(fit, ~ . + x) fitM <- update(fit, clusters="MatClust") fitC <- update(fit, cells) fitCx <- update(fit, cells ~ x) # improve.kppm fitI <- update(fit, improve.type="quasi") fitxI <- update(fitx, improve.type="quasi") # vcov.kppm vc <- vcov(fitxI) # plot.kppm including predict.kppm fitMC <- kppm(redwood ~ x, "Thomas") fitCL <- kppm(redwood ~ x, "Thomas", method="c") fitPA <- kppm(redwood ~ x, "Thomas", method="p") plot(fitMC) plot(fitCL) plot(fitPA) # fit with composite likelihood method [thanks to Abdollah Jalilian] fut <- kppm(redwood ~ x, "VarGamma", method="clik2", nu.ker=-3/8) if(require(RandomFields)) { fit0 <- kppm(redwood ~1, "LGCP") Y0 <- simulate(fit0)[[1]] stopifnot(is.ppp(Y0)) # fit LGCP using K function: slow fit1 <- kppm(redwood ~x, "LGCP", covmodel=list(model="matern", nu=0.3), control=list(maxit=3)) Y1 <- simulate(fit1)[[1]] stopifnot(is.ppp(Y1)) # fit LGCP using pcf fit1p <- kppm(redwood ~x, "LGCP", covmodel=list(model="matern", nu=0.3), statistic="pcf") Y1p <- simulate(fit1p)[[1]] stopifnot(is.ppp(Y1p)) # ... and Abdollah's code fit2 <- kppm(redwood ~x, cluster="Cauchy", statistic="K") Y2 <- simulate(fit2)[[1]] stopifnot(is.ppp(Y2)) } }) spatstat/tests/testsLtoM.R0000644000176200001440000003251313115225157015346 0ustar liggesusers## ## tests/legacy.R ## ## Test that current version of spatstat is compatible with outmoded usage ## $Revision: 1.2 $ $Date: 2015/12/29 08:54:49 $ local({ require(spatstat) ## (1) Old syntax of ppm ppm(cells, ~x) ## (2) Old syntax of MultiStrauss etc. r <- matrix(3, 2, 2) a <- MultiStrauss( , r) a <- MultiStrauss(NULL, r) a <- MultiHard(, r) h <- r/2 a <- MultiStraussHard( , r, h) NULL }) #' #' tests/leverinf.R #' #' leverage and influence for Gibbs models #' #' $Revision: 1.8 $ $Date: 2017/02/23 05:30:18 $ #' require(spatstat) local({ # original non-sparse algorithm Leverage <- function(...) leverage(..., sparseOK=FALSE) Influence <- function(...) influence(..., sparseOK=FALSE) Dfbetas <- function(...) dfbetas(..., sparseOK=FALSE) # Strauss()$delta2 fitS <- ppm(cells ~ x, Strauss(0.12), rbord=0) levS <- Leverage(fitS) infS <- Influence(fitS) dfbS <- Dfbetas(fitS) # Geyer()$delta2 fitG <- ppm(redwood ~ 1, Geyer(0.1, 2), rbord=0) levG <- Leverage(fitG) infG <- Influence(fitG) # pairwise.family$delta2 fitD <- ppm(cells ~ 1, DiggleGatesStibbard(0.12), rbord=0) levD <- Leverage(fitD) infD <- Influence(fitD) # ppmInfluence; offset is present; coefficient vector has length 0 fitH <- ppm(cells ~ 1, Hardcore(0.07)) levH <- Leverage(fitH) infH <- Influence(fitH) # ppmInfluence; offset is present; coefficient vector has length 1 fitHx <- ppm(cells ~ x, Hardcore(0.07), rbord=0) levHx <- Leverage(fitHx) infHx <- Influence(fitHx) ## divide and recombine algorithm op <- spatstat.options(maxmatrix=50000) ## non-sparse levSB <- Leverage(fitS) infSB <- Influence(fitS) dfbSB <- Dfbetas(fitS) chk <- function(x, y, what, from="single-block and multi-block", thresh=1e-12) { if(max(abs(x-y)) > thresh) stop(paste("Different results for", what, "obtained from", from, "algorithms"), call.=FALSE) invisible(NULL) } chk(marks(as.ppp(infS)), marks(as.ppp(infSB)), "influence") chk(as.im(levS), as.im(levSB), "leverage") chk(dfbS$val, dfbSB$val, "dfbetas$value") chk(dfbS$density, dfbSB$density, "dfbetas$density") # also check case of zero cif levHB <- Leverage(fitH) infHB <- Influence(fitH) dfbHB <- Dfbetas(fitH) levHxB <- Leverage(fitHx) infHxB <- Influence(fitHx) dfbHxB <- Dfbetas(fitHx) ## sparse algorithm, with blocks pmiSSB <- ppmInfluence(fitS, sparseOK=TRUE) # also check case of zero cif pmiHSB <- ppmInfluence(fitH, sparseOK=TRUE) pmiHxSB <- ppmInfluence(fitHx, sparseOK=TRUE) spatstat.options(op) ## sparse algorithm, no blocks pmi <- ppmInfluence(fitS, sparseOK=TRUE) levSp <- pmi$leverage infSp <- pmi$influence dfbSp <- pmi$dfbetas chks <- function(...) chk(..., from="sparse and non-sparse") chks(marks(as.ppp(infS)), marks(as.ppp(infSp)), "influence") chks(as.im(levS), as.im(levSp), "leverage") chks(dfbS$val, dfbSp$val, "dfbetas$value") chks(dfbS$density, dfbSp$density, "dfbetas$density") # case of zero cif pmiH <- ppmInfluence(fitH, sparseOK=TRUE) pmiHx <- ppmInfluence(fitHx, sparseOK=TRUE) }) ## ## tests/linalgeb.R ## ## checks validity of linear algebra code ## ## $Revision: 1.3 $ $Date: 2015/12/29 08:54:49 $ ## require(spatstat) local({ p <- 3 n <- 4 x <- matrix(1:(n*p), n, p) w <- rep(2, n) z <- matrix(0, p, p) for(i in 1:n) z <- z + w[i] * outer(x[i,],x[i,]) zC <- sumouter(x, w) if(!identical(zC, z)) stop("sumouter gives incorrect result in symmetric case") y <- matrix(1:(2*n), n, 2) z <- matrix(0, p, 2) for(i in 1:n) z <- z + w[i] * outer(x[i,],y[i,]) zC <- sumouter(x, w, y) if(!identical(zC, z)) stop("sumouter gives incorrect result in ASYMMETRIC case") x <- array(as.numeric(1:(p * n * n)), dim=c(p, n, n)) w <- matrix(1:(n*n), n, n) y <- matrix(numeric(p * p), p, p) for(i in 1:n) for(j in (1:n)[-i]) y <- y + w[i,j] * outer(x[,i,j], x[,j,i]) z <- sumsymouter(x, w) if(!identical(y,z)) stop("sumsymouter gives incorrect result") }) ## ## tests/localpcf.R ## ## temporary test file for localpcfmatrix ## $Revision: 1.2 $ $Date: 2015/12/29 08:54:49 $ require(spatstat) local({ a <- localpcfmatrix(redwood) a plot(a) a[, 3:5] }) # # tests/lppstuff.R # # Tests for lpp code # # $Revision: 1.9 $ $Date: 2016/09/28 04:28:05 $ require(spatstat) local({ # check 'normalise' option in linearKinhom X <- rpoislpp(5, simplenet) fit <- lppm(X ~x) K <- linearKinhom(X, lambda=fit, normalise=FALSE) plot(K) g <- linearpcfinhom(X, lambda=fit, normalise=FALSE) plot(g) K <- linearKinhom(X, lambda=fit, normalise=TRUE) plot(K) g <- linearpcfinhom(X, lambda=fit, normalise=TRUE) plot(g) # check empty patterns OK X <- runiflpp(0, simplenet) print(X) ## nearest neighbour distances eps <- sqrt(.Machine$double.eps) f <- function(mat,k) { apply(mat, 1, function(z,n) { sort(z)[n] }, n=k+1) } g <- function(mat,k) { apply(mat, 1, function(z,n) { order(z)[n] }, n=k+1) } XX <- spiders nn <- nndist(XX) nnP <- f(pairdist(XX), 1) if(any(abs(nn - nnP) > eps)) stop("nndist.lpp does not agree with pairdist.lpp") nw <- nnwhich(XX) nwP <- g(pairdist(XX), 1) if(any(nw != nwP)) stop("nnwhich.lpp does not agree with pairdist") ZZ <- split(chicago) XX <- ZZ$damage YY <- ZZ$assault op <- spatstat.options(Cnncrosslpp=FALSE) a <- nncross(XX, YY) spatstat.options(Cnncrosslpp=TRUE) b <- nncross(XX, YY) if(any(a$which != b$which)) stop("Inconsistent values of nncross.lpp()$which from different C code") if(max(abs(a$dist - b$dist)) > eps) stop("Inconsistent values of nncross.lpp()$dist from different C code") spatstat.options(Cnncrosslpp=TRUE) b2 <- nncross(XX, YY, k=1:2, what="which") if(any(b2$which.1 != b$which)) stop("inconsistent values of nncross.lpp()$which from k=1:2 and k=1") a2 <- nncross(XX, YY, k=1:2, what="dist") if(max(abs(a2$dist.1 - a$dist)) > eps) stop("Inconsistent values of nncross.lpp()$dist from k=1:2 and k=1") spatstat.options(Cnncrosslpp=TRUE) ii <- seq_len(npoints(XX)) w1 <- nnwhich(XX) w2 <- nncross(XX, XX, iX=ii, iY=ii, what="which") w3 <- nncross(XX, XX, iX=ii, iY=ii, what="which", method="interpreted") if(any(w1 != w2)) stop("nnwhich.lpp disagrees with nncross.lpp(iX, iY)") if(any(w2 != w3)) stop("Different results for nncross.lpp(iX, iY, 'which') using R and C") d1 <- nndist(XX) d2 <- nncross(XX, XX, iX=ii, iY=ii, what="dist") d3 <- nncross(XX, XX, iX=ii, iY=ii, what="dist", method="interpreted") if(max(abs(d1-d2)) > eps) stop("nndist.lpp disagrees with nncross.lpp(iX, iY)") if(max(abs(d2-d3)) > eps) stop("Different results for nncross.lpp(iX, iY, 'dist') using R and C") spatstat.options(op) # test handling marginal cases xyd <- nncross(XX, YY[1]) ## as.linnet.psp (Suman's example) Lines <- as.data.frame(as.psp(simplenet)) newseg <- c(Lines[1,1:2], Lines[10,3:4]) Lines <- rbind(Lines, newseg) Y <- as.psp(Lines, window=Window(simplenet)) marks(Y) <- c(3, 4, 5, 5, 3, 4, 5, 5,5, 5,1) Z <- as.linnet(Y) # can crash if marks don't match segments ## Test linnet surgery code set.seed(42) X <- runiflpp(30, simplenet) V <- runiflpp(30, simplenet) XV <- insertVertices(X, V) validate.lpp.coords(XV, context="calculated by insertVertices") ## Test [.lpp internal data B <- owin(c(0.1,0.7),c(0.19,0.6)) XB <- X[B] validate.lpp.coords(XB, context="returned by [.lpp") ## Tests related to linearK, etc testcountends <- function(X, r=100, s=1) { if(s != 1) { X <- rescale(X, s) r <- r/s } L <- as.linnet(X) n1 <- countends(L, X[1], r) n2 <- npoints(lineardisc(L, X[1], r, plotit=FALSE)$endpoints) if(n1 != n2) stop(paste("Incorrect result from countends:", n1, "!=", n2, paren(paste("scale=", 1/s))), call.=FALSE) } # original scale X <- unmark(chicago) testcountends(X) # finer scale testcountends(X, s=1000) ## Test algorithms for boundingradius.linnet L <- as.linnet(chicago, sparse=TRUE) opa <- spatstat.options(Clinearradius=FALSE) bR <- as.linnet(L, sparse=FALSE)$boundingradius spatstat.options(Clinearradius=TRUE) bC <- as.linnet(L, sparse=FALSE)$boundingradius spatstat.options(opa) if(abs(bR-bC) > 0.001 * (bR+bC)/2) stop("Disagreement between R and C algorithms for boundingradius.linnet", call.=FALSE) }) ## ## tests/marcelino.R ## ## $Revision: 1.3 $ $Date: 2015/12/29 08:54:49 $ ## require(spatstat) local({ Y <- split(urkiola) B <- Y$birch O <- Y$oak B.lam <- predict (ppm(B ~polynom(x,y,2)), type="trend") O.lam <- predict (ppm(O ~polynom(x,y,2)), type="trend") Kinhom(B, lambda=B.lam, correction="iso") Kinhom(B, lambda=B.lam, correction="border") Kcross.inhom(urkiola, i="birch", j="oak", B.lam, O.lam) Kcross.inhom(urkiola, i="birch", j="oak", B.lam, O.lam, correction = "iso") Kcross.inhom(urkiola, i="birch", j="oak", B.lam, O.lam, correction = "border") }) ## ## tests/markcor.R ## ## Tests of mark correlation code (etc) ## ## $Revision: 1.4 $ $Date: 2015/12/29 08:54:49 $ require(spatstat) local({ ## check.testfun checks equality of functions ## and is liable to break if the behaviour of all.equal is changed fe <- function(m1, m2) {m1 == m2} fm <- function(m1, m2) {m1 * m2} fs <- function(m1, m2) {sqrt(m1)} if(check.testfun(fe, X=amacrine)$ftype != "equ") warning("check.testfun fails to recognise mark equality function") if(check.testfun(fm, X=longleaf)$ftype != "mul") warning("check.testfun fails to recognise mark product function") check.testfun(fs, X=longleaf) ## test all is well in Kmark -> Kinhom MA <- Kmark(amacrine,function(m1,m2){m1==m2}) set.seed(42) AR <- rlabel(amacrine) MR <- Kmark(AR,function(m1,m2){m1==m2}) if(isTRUE(all.equal(MA,MR))) stop("Kmark unexpectedly ignores marks") }) # # tests/mppm.R # # Basic tests of mppm # # $Revision: 1.8 $ $Date: 2016/06/28 04:19:08 $ # require(spatstat) local({ ## test interaction formulae and subfits fit1 <- mppm(Points ~ group, simba, hyperframe(po=Poisson(), str=Strauss(0.1)), iformula=~ifelse(group=="control", po, str)) fit2 <- mppm(Points ~ group, simba, hyperframe(po=Poisson(), str=Strauss(0.1)), iformula=~str/id) fit3 <- mppm(Points ~ group, simba, hyperframe(po=Poisson(), pie=PairPiece(c(0.05,0.1))), iformula=~I((group=="control") * po) + I((group=="treatment") * pie)) fit1 fit2 fit3 ## run summary.mppm which currently sits in spatstat-internal.Rd summary(fit1) summary(fit2) summary(fit3) ## test vcov algorithm vcov(fit1) vcov(fit2) vcov(fit3) ## test subfits algorithm s1 <- subfits(fit1) s2 <- subfits(fit2) s3 <- subfits(fit3) ## validity of results of subfits() p1 <- solapply(s1, predict) p2 <- solapply(s2, predict) p3 <- solapply(s3, predict) }) local({ ## [thanks to Sven Wagner] ## factor covariate, with some levels unused in some rows set.seed(14921788) H <- hyperframe(X=replicate(3, runifpoint(20), simplify=FALSE), Z=solist(as.im(function(x,y){x}, owin()), as.im(function(x,y){y}, owin()), as.im(function(x,y){x+y}, owin()))) H$Z <- solapply(H$Z, cut, breaks=(0:4)/2) fit6 <- mppm(X ~ Z, H) v6 <- vcov(fit6) s6 <- subfits(fit6) p6 <- solapply(s6, predict) # random effects fit7 <- mppm(X ~ Z, H, random=~1|id) v7 <- vcov(fit7) s7 <- subfits(fit7) p7 <- solapply(s7, predict) fit7a <- mppm(X ~ Z, H, random=~x|id) v7a <- vcov(fit7a) s7a <- subfits(fit7a) p7a <- solapply(s7a, predict) # multitype: collisions in vcov.ppm, predict.ppm H$X <- lapply(H$X, rlabel, labels=factor(c("a","b")), permute=FALSE) M <- MultiStrauss(matrix(0.1, 2, 2), c("a","b")) fit8 <- mppm(X ~ Z, H, M) v8 <- vcov(fit8, fine=TRUE) s8 <- subfits(fit8) p8 <- lapply(s8, predict) c8 <- lapply(s8, predict, type="cif") fit9 <- mppm(X ~ Z, H, M, iformula=~Interaction * id) v9 <- vcov(fit9, fine=TRUE) s9 <- subfits(fit9) p9 <- lapply(s9, predict) c9 <- lapply(s9, predict, type="cif") # and a simple error in recognising 'marks' fit10 <- mppm(X ~ marks, H) }) local({ ## test handling of offsets and zero cif values in mppm H <- hyperframe(Y = waterstriders) mppm(Y ~ 1, data=H, Hardcore(1.5)) mppm(Y ~ 1, data=H, StraussHard(7, 1.5)) ## prediction, in training/testing context ## (example from Markus Herrmann and Ege Rubak) X <- waterstriders dist <- solapply(waterstriders, function(z) distfun(runifpoint(1, Window(z)))) i <- 3 train <- hyperframe(pattern = X[-i], dist = dist[-i]) test <- hyperframe(pattern = X[i], dist = dist[i]) fit <- mppm(pattern ~ dist, data = train) pred <- predict(fit, type="cif", newdata=test, verbose=TRUE) }) local({ ## test handling of interaction coefficients in multitype case set.seed(42) XX <- as.solist(replicate(3, rthin(amacrine, 0.8), simplify=FALSE)) H <- hyperframe(X=XX) M <- MultiStrauss(matrix(0.1, 2, 2), levels(marks(amacrine))) fit <- mppm(X ~ 1, H, M) co <- coef(fit) subco <- sapply(subfits(fit), coef) if(max(abs(subco - co)) > 0.001) stop("Wrong coefficient values in subfits, for multitype interaction") }) spatstat/tests/testsAtoF.R0000644000176200001440000004772313142737416015343 0ustar liggesusers## badwindowcheck.R ## $Revision: 1.2 $ $Date: 2014/01/27 07:18:41 $ ## require(spatstat) local({ ## Simple example of self-crossing polygon x <- read.table("selfcross.txt", header=TRUE) ## Auto-repair w <- owin(poly=x) ## Real data involving various quirks b <- read.table("badwindow.txt", header=TRUE) b <- split(b, factor(b$i)) b <- lapply(b, function(z) { as.list(z[,-3]) }) ## make owin without checking W <- owin(poly=b, check=FALSE) ## Apply stringent checks owinpolycheck(W,verbose=FALSE) ## Auto-repair W2 <- owin(poly=b) }) ## tests/cdf.test.R ## check cdf.test with strange data require(spatstat) local({ # Marked point patterns with some marks not represented AC <- split(ants, un=FALSE)$Cataglyphis AM <- split(ants, un=FALSE)$Messor DM <- distmap(AM) # should produce a warning, rather than a crash: cdf.test(AC, DM) # should be OK: cdf.test(unmark(AC), DM) cdf.test(unmark(AC), DM, "cvm") cdf.test(unmark(AC), DM, "ad") # linear networks set.seed(42) X <- runiflpp(20, simplenet) fit <- lppm(X ~1) cdf.test(fit, "y") cdf.test(fit, "y", "cvm") cdf.test(fit, "y", "ad") }) ## tests/closeshave.R ## check 'closepairs/crosspairs' code ## validity and memory allocation ## $Revision: 1.5 $ $Date: 2016/03/28 04:21:07 $ local({ r <- 0.12 close.all <- closepairs(redwood, r) close.ij <- closepairs(redwood, r, what="indices") close.ijd <- closepairs(redwood, r, what="ijd") stopifnot(identical(close.ij, close.all[c("i","j")])) stopifnot(identical(close.ijd, close.all[c("i","j","d")])) Y <- split(amacrine) on <- Y$on off <- Y$off cross.all <- crosspairs(on, off, r) cross.ij <- crosspairs(on, off, r, what="indices") cross.ijd <- crosspairs(on, off, r, what="ijd") stopifnot(identical(cross.ij, cross.all[c("i","j")])) stopifnot(identical(cross.ijd, cross.all[c("i","j","d")])) # closethresh vs closepairs: EXACT agreement thresh <- 0.08 clt <- closethresh(redwood, r, thresh) cl <- with(closepairs(redwood, r), list(i=i, j=j, th = (d <= thresh))) if(!identical(cl, clt)) stop("closepairs and closethresh disagree") # compare with older, slower code reordered <- function(a) { o <- with(a, order(i,j)) as.list(as.data.frame(a)[o,,drop=FALSE]) } samesame <- function(a, b) { identical(reordered(a), reordered(b)) } spatstat.options(closepairs.newcode=FALSE) old.close.ij <- closepairs(redwood, r, what="indices") old.cross.ij <- crosspairs(on, off, r, what="indices") stopifnot(samesame(close.ij, old.close.ij)) stopifnot(samesame(cross.ij, old.cross.ij)) spatstat.options(closepairs.newcode=TRUE) # Rasmus' example R <- 0.04 U <- as.ppp(gridcenters(owin(), 50, 50), W=owin()) cp <- crosspairs(U, U, R) G <- matrix(0, npoints(U), npoints(U)) G[cbind(cp$i, cp$j)] <- 1 if(!isSymmetric(G)) stop("crosspairs is not symmetric in Rasmus example") }) ## tests/colour.R ## ## $Revision: 1.1 $ $Date: 2015/12/29 08:54:49 $ ## require(spatstat) local({ f <- function(n) grey(seq(0,1,length=n)) z <- to.grey(f) }) # tests/correctC.R # check for agreement between C and interpreted code # for interpoint distances etc. # $Revision: 1.4 $ $Date: 2015/12/29 08:54:49 $ require(spatstat) local({ eps <- .Machine$double.eps * 4 # pairdist.ppp X <- rpoispp(42) dC <- pairdist(X, method="C") dR <- pairdist(X, method="interpreted") if(any(abs(dC - dR) > eps)) stop("Algorithms for pairdist() do not agree") dC <- pairdist(X, periodic=TRUE, method="C") dR <- pairdist(X, periodic=TRUE, method="interpreted") if(any(abs(dC - dR) > eps)) stop("Algorithms for pairdist(periodic=TRUE) do not agree") # crossdist.ppp Y <- rpoispp(42) dC <- crossdist(X, Y, method="C") dR <- crossdist(X, Y, method="interpreted") if(any(abs(dC - dR) > eps)) stop("Algorithms for crossdist() do not agree") dC <- crossdist(X, Y, periodic=TRUE, method="C") dR <- crossdist(X, Y, periodic=TRUE, method="interpreted") if(any(abs(dC - dR) > eps)) stop("Algorithms for crossdist(periodic=TRUE) do not agree") # nndist.ppp nnC <- nndist(X, method="C") nnI <- nndist(X, method="interpreted") if(any(abs(nnC - nnI) > eps)) stop("Algorithms for nndist() do not agree") nn3C <- nndist(X, k=3, method="C") nn3I <- nndist(X, k=3, method="interpreted") if(any(abs(nn3C - nn3I) > eps)) stop("Algorithms for nndist(k=3) do not agree") # nnwhich.ppp nwC <- nnwhich(X, method="C") nwI <- nnwhich(X, method="interpreted") if(any(nwC != nwI)) stop("Algorithms for nnwhich() do not agree") nw3C <- nnwhich(X, k=3, method="C") nw3I <- nnwhich(X, k=3, method="interpreted") if(any(nw3C != nw3I)) stop("Algorithms for nnwhich(k=3) do not agree") # whist set.seed(98123) x <- runif(1000) w <- sample(1:5, 1000, replace=TRUE) b <- seq(0,1,length=101) op <- spatstat.options(Cwhist=TRUE) aT <- whist(x,b,w) spatstat.options(Cwhist=FALSE) aF <- whist(x,b,w) if(!all(aT == aF)) stop("Algorithms for whist disagree") spatstat.options(op) }) # # tests/density.R # # Test behaviour of density methods and inhomogeneous summary functions # # $Revision: 1.9 $ $Date: 2017/08/10 02:01:54 $ # require(spatstat) local({ # test all cases of density.ppp tryit <- function(...) { Z <- density(cells, ..., at="pixels") Z <- density(cells, ..., at="points") return(invisible(NULL)) } tryit(0.05) tryit(0.05, diggle=TRUE) tryit(0.05, se=TRUE) tryit(varcov=diag(c(0.05^2, 0.07^2))) tryit(0.05, weights=data.frame(a=1:42,b=42:1)) tryit(0.05, weights=expression(x)) # apply different discretisation rules Z <- density(cells, 0.05, fractional=TRUE) Z <- density(cells, 0.05, preserve=TRUE) Z <- density(cells, 0.05, fractional=TRUE, preserve=TRUE) ## compare density.ppp(at="points") results with different algorithms crosscheque <- function(expr) { e <- as.expression(substitute(expr)) ename <- sQuote(deparse(substitute(expr))) ## interpreted R opa <- spatstat.options(densityC=FALSE, densityTransform=FALSE) val.interpreted <- eval(e) ## established C algorithm 'denspt' spatstat.options(densityC=TRUE, densityTransform=FALSE) val.C <- eval(e) ## new C algorithm 'Gdenspt' using transformed coordinates spatstat.options(densityC=TRUE, densityTransform=TRUE) val.Transform <- eval(e) spatstat.options(opa) if(max(abs(val.interpreted - val.C)) > 0.001) stop(paste("Numerical discrepancy between R and C algorithms in", ename)) if(max(abs(val.C - val.Transform)) > 0.001) stop(paste("Numerical discrepancy between C algorithms", "using transformed and untransformed coordinates in", ename)) invisible(NULL) } crosscheque(density(redwood, at="points", sigma=0.13, edge=FALSE)) lam <- density(redwood) K <- Kinhom(redwood, lam) lamX <- density(redwood, at="points") KX <- Kinhom(redwood, lamX) ## test all code cases of new 'relrisk.ppp' algorithm pants <- function(..., X=ants) { a <- relrisk(X, sigma=100, se=TRUE, ...) return(TRUE) } pants() pants(casecontrol=FALSE) pants(relative=TRUE) pants(casecontrol=FALSE, relative=TRUE) pants(at="points") pants(casecontrol=FALSE,at="points") pants(relative=TRUE,at="points") pants(casecontrol=FALSE, relative=TRUE,at="points") ## more than 2 types pants(X=sporophores) pants(X=sporophores, at="points") pants(X=sporophores, relative=TRUE, at="points") ## Smooth.ppp Z <- Smooth(longleaf, 5, diggle=TRUE) Z <- Smooth(longleaf, 1e-6) # generates warning about small bandwidth ## Smooth.ppp(at='points') Y <- longleaf %mark% runif(npoints(longleaf), min=41, max=43) Z <- Smooth(Y, 5, at="points", leaveoneout=TRUE) rZ <- range(Z) if(rZ[1] < 40 || rZ[2] > 44) stop("Implausible results from Smooth.ppp(at=points, leaveoneout=TRUE)") Z <- Smooth(Y, 5, at="points", leaveoneout=FALSE) rZ <- range(Z) if(rZ[1] < 40 || rZ[2] > 44) stop("Implausible results from Smooth.ppp(at=points, leaveoneout=FALSE)") ## compare Smooth.ppp results with different algorithms crosscheque(Smooth(longleaf, at="points", sigma=6)) ## drop-dimension coding errors X <- longleaf marks(X) <- cbind(marks(X), 1) Z <- Smooth(X, 5) ZZ <- bw.smoothppp(finpines, hmin=0.01, hmax=0.012, nh=2) # reshaping problem ## geometric-mean smoothing U <- Smooth(longleaf, 5, geometric=TRUE) UU <- Smooth(X, 5, geometric=TRUE) }) #' #' tests/discarea.R #' #' $Revision: 1.1 $ $Date: 2016/03/28 09:16:03 $ #' require(spatstat) local({ u <- c(0.5,0.5) B <- owin(poly=list(x=c(0.3, 0.5, 0.7, 0.4), y=c(0.3, 0.3, 0.6, 0.8))) areaGain(u, cells, 0.1, exact=TRUE) areaGain(u, cells, 0.1, W=NULL) areaGain(u, cells, 0.1, W=B) areaLoss(cells[square(0.4)], 0.1, exact=TRUE) }) #' #' tests/disconnected.R #' #' disconnected linear networks #' #' $Revision: 1.2 $ $Date: 2017/06/05 14:58:36 $ require(spatstat) local({ #' disconnected network m <- simplenet$m m[4,5] <- m[5,4] <- m[6,10] <- m[10,6] <- m[4,6] <- m[6,4] <- FALSE L <- linnet(vertices(simplenet), m) L summary(L) Z <- connected(L, what="components") #' point pattern with no points in one connected component set.seed(42) X <- rpoislpp(lambda=function(x,y) { 10 * (x < 0.5)}, L) B <- lineardirichlet(X) plot(B) summary(B) D <- pairdist(X) A <- nndist(X) H <- nnwhich(X) Y <- rpoislpp(lambda=function(x,y) { 10 * (x < 0.5)}, L) G <- nncross(X, Y) J <- crossdist(X, Y) plot(distfun(X)) # includes evaluation of nncross(what="dist") #' K functions in disconnected network K <- linearK(X) lamX <- intensity(X) nX <- npoints(X) KI <- linearKinhom(X, lambda=rep(lamX, nX)) P <- linearpcf(X) PJ <- linearpcfinhom(X, lambda=rep(lamX, nX)) Y <- X %mark% factor(rep(1:2, nX)[1:nX]) Y1 <- split(Y)[[1]] Y2 <- split(Y)[[2]] KY <- linearKcross(Y) PY <- linearpcfcross(Y) KYI <- linearKcross.inhom(Y, lambdaI=rep(intensity(Y1), npoints(Y1)), lambdaJ=rep(intensity(Y2), npoints(Y2))) PYI <- linearpcfcross.inhom(Y, lambdaI=rep(intensity(Y1), npoints(Y1)), lambdaJ=rep(intensity(Y2), npoints(Y2))) }) # tests/emptymarks.R # # test cases where there are no (rows or columns of) marks # # $Revision: 1.3 $ $Date: 2015/12/29 08:54:49 $ require(spatstat) local({ n <- npoints(cells) df <- data.frame(x=1:n, y=factor(sample(letters, n, replace=TRUE))) nocolumns <- c(FALSE, FALSE) norows <- rep(FALSE, n) X <- cells marks(X) <- df marks(X) <- df[,1] marks(X) <- df[,nocolumns] Z <- Y <- X[integer(0)] marks(Y) <- df[norows,] stopifnot(is.marked(Y)) marks(Z) <- df[norows,nocolumns] stopifnot(!is.marked(Z)) }) # # tests/envelopes.R # # Test validity of envelope data # # $Revision: 1.5 $ $Date: 2015/12/29 08:54:49 $ # require(spatstat) local({ checktheo <- function(fit) { fitname <- deparse(substitute(fit)) en <- envelope(fit, nsim=4, verbose=FALSE, nrep=1e3) nama <- names(en) expecttheo <- is.poisson(fit) && is.stationary(fit) context <- paste("Envelope of", fitname) if(expecttheo) { if(!("theo" %in% nama)) stop(paste(context, "did not contain", sQuote("theo"))) if("mmean" %in% nama) stop(paste(context, "unexpectedly contained", sQuote("mmean"))) } else { if("theo" %in% nama) stop(paste(context, "unexpectedly contained", sQuote("theo"))) if(!("mmean" %in% nama)) stop(paste(context, "did not contain", sQuote("mmean"))) } cat(paste(context, "has correct format\n")) } checktheo(ppm(cells)) checktheo(ppm(cells ~x)) checktheo(ppm(cells ~1, Strauss(0.1))) # check envelope calls from 'alltypes' a <- alltypes(demopat, Kcross, nsim=4, envelope=TRUE) b <- alltypes(demopat, Kcross, nsim=4, envelope=TRUE, global=TRUE) # check 'transform' idioms A <- envelope(cells, Kest, nsim=4, transform=expression(. - .x)) B <- envelope(cells, Kest, nsim=4, transform=expression(sqrt(./pi) - .x)) #' check savefuns/savepatterns with global fit <- ppm(cells~x) Ef <- envelope(fit, Kest, nsim=4, savefuns=TRUE, global=TRUE) Ep <- envelope(fit, Kest, nsim=4, savepatterns=TRUE, global=TRUE) # check conditional simulation e1 <- envelope(cells, Kest, nsim=4, fix.n=TRUE) e2 <- envelope(amacrine, Kest, nsim=4, fix.n=TRUE) e3 <- envelope(amacrine, Kcross, nsim=4, fix.marks=TRUE) fit <- ppm(japanesepines ~ 1, Strauss(0.04)) e4 <- envelope(fit, Kest, nsim=4, fix.n=TRUE) fit2 <- ppm(amacrine ~ 1, Strauss(0.03)) e5 <- envelope(fit2, Gcross, nsim=4, fix.marks=TRUE) # check pooling of envelopes in global case E1 <- envelope(cells, Kest, nsim=5, savefuns=TRUE, global=TRUE) E2 <- envelope(cells, Kest, nsim=12, savefuns=TRUE, global=TRUE) p12 <- pool(E1, E2) E1r <- envelope(cells, Kest, nsim=5, savefuns=TRUE, global=TRUE, ginterval=c(0.05, 0.15)) E2r <- envelope(cells, Kest, nsim=12, savefuns=TRUE, global=TRUE, ginterval=c(0.05, 0.15)) p12r <- pool(E1r, E2r) }) local({ #' as.data.frame.envelope Nsim <- 5 E <- envelope(cells, nsim=Nsim, savefuns=TRUE) A <- as.data.frame(E) B <- as.data.frame(E, simfuns=TRUE) stopifnot(ncol(B) - ncol(A) == Nsim) }) # # tests/factorbugs.R # # check for various bugs related to factor conversions # # $Revision: 1.3 $ $Date: 2015/12/29 08:54:49 $ # require(spatstat) local({ # make a factor image m <- factor(rep(letters[1:4], 4)) Z <- im(m, xcol=1:4, yrow=1:4) # make a point pattern set.seed(42) X <- runifpoint(20, win=as.owin(Z)) # look up the image at the points of X # (a) internal ans1 <- lookup.im(Z, X$x, X$y) stopifnot(is.factor(ans1)) # (b) user level ans2 <- Z[X] stopifnot(is.factor(ans2)) # (c) turn the image into a tessellation # and apply quadratcount V <- tess(image = Z) quadratcount(X, tess=V) # (d) pad image Y <- padimage(Z, factor("b", levels=levels(Z))) stopifnot(Y$type == "factor") U <- padimage(Z, "b") stopifnot(U$type == "factor") }) # # tests/fastgeyer.R # # checks validity of fast C implementation of Geyer interaction # # $Revision: 1.3 $ $Date: 2015/12/29 08:54:49 $ # require(spatstat) local({ X <- redwood Q <- quadscheme(X) U <- union.quad(Q) EP <- equalpairs.quad(Q) G <- Geyer(0.11, 2) # The value r=0.11 is chosen to avoid hardware numerical effects (gcc bug 323). # It avoids being close any value of pairdist(redwood). # The nearest such values are 0.1077.. and 0.1131.. # By contrast if r = 0.1 there are values differing from 0.1 by 3e-17 a <- pairsat.family$eval(X,U,EP,G$pot,G$par,"border") b <- G$fasteval(X,U,EP,G$pot,G$par,"border") if(!all(a==b)) stop("Results of Geyer()$fasteval and pairsat.family$eval do not match") # ... # and again for a non-integer value of 'sat' # (spotted by Thordis Linda Thorarinsdottir) G <- Geyer(0.11, 2.5) a <- pairsat.family$eval(X,U,EP,G$pot,G$par,"border") b <- G$fasteval(X,U,EP,G$pot,G$par,"border") if(!all(a==b)) stop("Results of Geyer()$fasteval and pairsat.family$eval do not match when sat is not an integer") # and again for sat < 1 # (spotted by Rolf) G <- Geyer(0.11, 0.5) a <- pairsat.family$eval(X,U,EP,G$pot,G$par,"border") b <- G$fasteval(X,U,EP,G$pot,G$par,"border") if(!all(a==b)) stop("Results of Geyer()$fasteval and pairsat.family$eval do not match when sat < 1") }) # # tests/fastK.R # # check fast and slow code for Kest # # $Revision: 1.3 $ $Date: 2017/07/02 08:41:46 $ # require(spatstat) local({ ## fast code Kb <- Kest(cells, nlarge=0) Ku <- Kest(cells, correction="none") Kbu <- Kest(cells, correction=c("none", "border")) ## slow code, full set of corrections, sqrt transformation Ldd <- Lest(unmark(demopat), correction="all", var.approx=TRUE) ## Kinhom lam <- density(cells, at="points", leaveoneout=TRUE) ## fast code Kib <- Kinhom(cells, lam, nlarge=0) Kiu <- Kest(cells, lam, correction="none") Kibu <- Kest(cells, lam, correction=c("none", "border")) ## slow code Lidd <- Linhom(unmark(demopat), sigma=bw.scott) }) #' tests/formuli.R #' #' Test machinery for manipulating formulae #' #' $Revision: 1.4 $ $Date: 2017/02/20 07:35:47 $ require(spatstat) local({ ff <- function(A, deletevar, B) { D <- reduceformula(A, deletevar) if(!spatstat.utils::identical.formulae(D, B)) { AD <- as.expression(substitute(reduceformula(A,d), list(A=A, d=deletevar))) stop(paste(AD, "\n\tyields ", spatstat.utils::pasteFormula(D), " instead of ", spatstat.utils::pasteFormula(B)), call.=FALSE) } invisible(NULL) } ff(~ x + z, "x", ~z) ff(y ~ x + z, "x", y~z) ff(~ I(x^2) + z, "x", ~z) ff(y ~ poly(x,2) + poly(z,3), "x", y ~poly(z,3)) }) # # tests/func.R # # $Revision: 1.3 $ $Date: 2016/06/10 15:04:08 $ # # Tests of 'funxy' infrastructure etc require(spatstat) local({ W <- square(1) f1a <- function(x, y) sqrt(x^2 + y^2) f1b <- function(x, y) { sqrt(x^2 + y^2) } f2a <- function(x, y) sin(x) f2b <- function(x, y) { sin(x) } f3a <- function(x, y) sin(x) + cos(x) f3b <- function(x, y) { sin(x) + cos(x) } f4a <- function(x, y) { z <- x + y ; z } f4b <- function(x, y) { x + y } F1a <- funxy(f1a, W) F1b <- funxy(f1b, W) F2a <- funxy(f2a, W) F2b <- funxy(f2b, W) F3a <- funxy(f3a, W) F3b <- funxy(f3b, W) F4a <- funxy(f4a, W) F4b <- funxy(f4b, W) stopifnot(identical(F1a(cells), F1b(cells))) stopifnot(identical(F2a(cells), F2b(cells))) stopifnot(identical(F3a(cells), F3b(cells))) stopifnot(identical(F4a(cells), F4b(cells))) }) ## ## tests/funnymarks.R ## ## tests involving strange mark values ## $Revision: 1.3 $ $Date: 2015/12/29 08:54:49 $ require(spatstat) local({ ## ppm() where mark levels contain illegal characters hyphenated <- c("a", "not-a") spaced <- c("U", "non U") suffixed <- c("a+", "a*") charred <- c("+", "*") irad <- matrix(0.1, 2,2) hrad <- matrix(0.005, 2, 2) tryit <- function(types, X, irad, hrad) { levels(marks(X)) <- types fit <- ppm(X ~marks + polynom(x,y,2), MultiStraussHard(types=types,iradii=irad,hradii=hrad)) print(fit) print(coef(fit)) val <- fitted(fit) pred <- predict(fit) return(invisible(NULL)) } tryit(hyphenated, amacrine, irad, hrad) tryit(spaced, amacrine, irad, hrad) tryit(suffixed, amacrine, irad, hrad) tryit(charred, amacrine, irad, hrad) ## marks which are dates X <- cells n <- npoints(X) endoftime <- rep(ISOdate(2001,1,1), n) eotDate <- rep(as.Date("2001-01-01"), n) markformat(endoftime) markformat(eotDate) marks(X) <- endoftime print(X) Y <- X %mark% data.frame(id=1:42, date=endoftime, dd=eotDate) print(Y) }) # # tests/fvproblems.R # # $Revision: 1.7 $ $Date: 2016/03/08 00:26:23 $ # require(spatstat) # This appears in the workshop notes # Problem detected by Martin Bratschi local({ Jdif <- function(X, ..., i) { Jidot <- Jdot(X, ..., i=i) J <- Jest(X, ...) dif <- eval.fv(Jidot - J) return(dif) } Z <- Jdif(amacrine, i="on") }) # # Test mathlegend code # local({ K <- Kest(cells) plot(K) plot(K, . ~ r) plot(K, . - theo ~ r) plot(K, sqrt(./pi) ~ r) plot(K, cbind(iso, theo) ~ r) plot(K, cbind(iso, theo) - theo ~ r) plot(K, sqrt(cbind(iso, theo)/pi) ~ r) plot(K, cbind(iso/2, -theo) ~ r) plot(K, cbind(iso/2, trans/2) - theo ~ r) # test expansion of .x and .y plot(K, . ~ .x) plot(K, . - theo ~ .x) plot(K, .y - theo ~ .x) plot(K, sqrt(.y) - sqrt(theo) ~ .x) # problems with parsing weird strings in levels(marks(X)) # noted by Ulf Mehlig levels(marks(amacrine)) <- c("Nasticreechia krorluppia", "Homo habilis") plot(Kcross(amacrine)) plot(alltypes(amacrine, "K")) plot(alltypes(amacrine, "J")) plot(alltypes(amacrine, pcfcross)) }) # # Test quirks related to 'alim' attribute local({ K <- Kest(cells) attr(K, "alim") <- NULL plot(K) attr(K, "alim") <- c(0, 0.1) plot(tail(K)) }) # # Check that default 'r' vector passes the test for fine spacing local({ a <- Fest(cells) A <- Fest(cells, r=a$r) b <- Hest(heather$coarse) B <- Hest(heather$coarse, r=b$r) # from Cenk Icos X <- runifpoint(100, owin(c(0,3), c(0,10))) FX <- Fest(X) FXr <- Fest(X, r=FX$r) JX <- Jest(X) }) spatstat/tests/testsStoZ.R0000644000176200001440000004051313115225157015371 0ustar liggesusers# # tests/segments.R # # $Revision: 1.11 $ $Date: 2017/02/20 10:15:30 $ require(spatstat) local({ # pointed out by Jeff Laake W <- owin() X <- psp(x0=.25,x1=.25,y0=0,y1=1,window=W) X[W] # migrated from 'lpp' X <- psp(runif(10),runif(10),runif(10),runif(10), window=owin()) Z <- as.mask.psp(X) Z <- pixellate(X) # more tests of lppm code fit <- lppm(unmark(chicago) ~ polynom(x,y,2)) Z <- predict(fit) # tests of pixellate.psp -> seg2pixL ns <- 50 out <- numeric(ns) for(i in 1:ns) { X <- psp(runif(1), runif(1), runif(1), runif(1), window=owin()) len <- lengths.psp(X) dlen <- sum(pixellate(X)$v) out[i] <- if(len > 1e-7) dlen/len else 1 } if(diff(range(out)) > 0.01) stop(paste( "pixellate.psp test 1: relative error [", paste(diff(range(out)), collapse=", "), "]")) # Michael Sumner's test examples set.seed(33) n <- 2001 co <- cbind(runif(n), runif(n)) ow <- owin() X <- psp(co[-n,1], co[-n,2], co[-1,1], co[-1,2], window=ow) s1 <- sum(pixellate(X)) s2 <- sum(lengths.psp(X)) if(abs(s1 - s2)/s2 > 0.01) { stop(paste("pixellate.psp test 2:", "sum(pixellate(X)) = ", s1, "!=", s2, "= sum(lengths.psp(X))")) } wts <- 1/(lengths.psp(X) * X$n) s1 <- sum(pixellate(X, weights=wts)) if(abs(s1-1) > 0.01) { stop(paste("pixellate.psp test 3:", "sum(pixellate(X, weights))=", s1, " (should be 1)")) } X <- psp(0, 0, 0.01, 0.001, window=owin()) s1 <- sum(pixellate(X)) s2 <- sum(lengths.psp(X)) if(abs(s1 - s2)/s2 > 0.01) { stop(paste("pixellate.psp test 4:", "sum(pixellate(X)) = ", s1, "!=", s2, "= sum(lengths.psp(X))")) } X <- psp(0, 0, 0.001, 0.001, window=owin()) s1 <- sum(pixellate(X)) s2 <- sum(lengths.psp(X)) if(abs(s1 - s2)/s2 > 0.01) { stop(paste("pixellate.psp test 5:", "sum(pixellate(X)) = ", s1, "!=", s2, "= sum(lengths.psp(X))")) } #' tests of density.psp Y <- as.psp(simplenet) YC <- density(Y, 0.2, method="C", edge=FALSE, dimyx=64) YI <- density(Y, 0.2, method="interpreted", edge=FALSE, dimyx=64) YF <- density(Y, 0.2, method="FFT", edge=FALSE, dimyx=64) xCI <- max(abs(YC/YI - 1)) xFI <- max(abs(YF/YI - 1)) if(xCI > 0.01) stop(paste("density.psp C algorithm relative error =", xCI)) if(xFI > 0.01) stop(paste("density.psp FFT algorithm relative error =", xFI)) }) # ## tests/sigtraceprogress.R # ## Tests of *.sigtrace and *.progress # ## $Revision: 1.3 $ $Date: 2015/12/29 08:54:49 $ require(spatstat) local({ plot(dclf.sigtrace(redwood, nsim=19, alternative="greater", rmin=0.02, verbose=FALSE)) plot(dclf.progress(redwood, nsim=19, alternative="greater", rmin=0.02, verbose=FALSE)) plot(dg.sigtrace(redwood, nsim=5, alternative="greater", rmin=0.02, verbose=FALSE)) plot(dg.progress(redwood, nsim=5, alternative="greater", rmin=0.02, verbose=FALSE)) ## test 'leave-two-out' algorithm a <- dclf.sigtrace(redwood, Lest, nsim=9, use.theory=FALSE, leaveout=2, verbose=FALSE) aa <- dclf.progress(redwood, Lest, nsim=9, use.theory=FALSE, leaveout=2, verbose=FALSE) b <- dg.sigtrace(redwood, Lest, nsim=5, use.theory=FALSE, leaveout=2) bb <- dg.progress(redwood, Lest, nsim=5, use.theory=FALSE, leaveout=2, verbose=FALSE) }) # # tests/slrm.R # # $Revision: 1.1 $ $Date: 2013/04/19 10:14:52 $ # # Test slrm fitting and prediction when there are NA's # require(spatstat) local({ X <- copper$SouthPoints W <- owin(poly=list(x=c(0,35,35,1),y=c(1,1,150,150))) Y <- X[W] fit <- slrm(Y ~ x+y) pred <- predict(fit) }) #' tests/sparse3Darrays.R #' Basic tests of sparse3array.R code #' $Revision: 1.8 $ $Date: 2017/02/22 09:00:27 $ require(spatstat) local({ if(require(Matrix)) { M <- sparse3Darray(i=1:4, j=sample(1:4, replace=TRUE), k=c(1,2,1,2), x=1:4, dims=c(5,5,2)) M dimnames(M) <- list(letters[1:5], LETTERS[1:5], c("yes", "no")) M U <- aperm(M, c(1,3,2)) U M[ 3:4, , ] M[ 3:4, 2:4, ] M[, 3, ] M[, 3, , drop=FALSE] MA <- as.array(M) UA <- as.array(U) ## tests of "[<-.sparse3Darray" Mflip <- Mzero <- MandM <- M Mflip[ , , 2:1] <- M stopifnot(Mflip[3,1,1] == M[3,1,2]) Mzero[1:3,1:3,] <- 0 stopifnot(all(Mzero[1,1,] == 0)) M2a <- M[,,2,drop=FALSE] M2d <- M[,,2,drop=TRUE] MandM[,,1] <- M2a MandM[,,1] <- M2d # matrix index M[cbind(3:5, 2, 2)] ## tests of arithmetic (Math, Ops, Summary) negM <- -M oneM <- 1 * M twoM <- M + M range(M) cosM <- cos(M) # non-sparse sinM <- sin(M) # sparse stopifnot(all((M+M) == 2*M)) # non-sparse stopifnot(!any((M+M) != 2*M)) # sparse ztimesM <- (1:5) * M # sparse zplusM <- (1:5) + M # non-sparse ## tensor operator tenseur(c(1,-1), M, 1, 3) tenseur(M, M, 1:2, 1:2) tenseur(M, M, 1:2, 2:1) V <- sparseVector(i=c(1,3,6),x=1:3, length=7) tenseur(V,V) tenseur(V,V,1,1) ## test of anyNA method anyNA(M) ## a possible application in spatstat cl10 <- as.data.frame(closepairs(cells, 0.1)) cl12 <- as.data.frame(closepairs(cells, 0.12)) cl10$k <- 1 cl12$k <- 2 cl <- rbind(cl10, cl12) n <- npoints(cells) Z <- with(cl, sparse3Darray(i=i, j=j, k=k, x=1, dims=c(n,n,2))) dimnames(Z) <- list(NULL, NULL, c("r=0.1", "r=0.12")) Z <- aperm(Z, c(3,1,2)) stopifnot(all(sumsymouterSparse(Z) == sumsymouter(as.array(Z)))) # no entries indexed Z[integer(0), integer(0), integer(0)] <- 42 Z[matrix(, 0, 3)] <- 42 } }) # # tests/splitpea.R # # Check behaviour of split.ppp etc # # Thanks to Marcelino de la Cruz # # $Revision: 1.11 $ $Date: 2016/03/05 01:33:47 $ # require(spatstat) local({ W <- square(8) X <- ppp(c(2.98, 4.58, 7.27, 1.61, 7.19), c(7.56, 5.29, 5.03, 0.49, 1.65), window=W) Z <- quadrats(W, 4, 4) Yall <- split(X, Z, drop=FALSE) Ydrop <- split(X, Z, drop=TRUE) P <- Yall[[1]] if(!all(inside.owin(P$x, P$y, P$window))) stop("Black hole detected when drop=FALSE") P <- Ydrop[[1]] if(!all(inside.owin(P$x, P$y, P$window))) stop("Black hole detected when drop=TRUE") Ydrop[[1]] <- P[1] split(X, Z, drop=TRUE) <- Ydrop # test NA handling Zbad <- quadrats(square(4), 2, 2) Ybdrop <- split(X, Zbad, drop=TRUE) Yball <- split(X, Zbad, drop=FALSE) # From Marcelino set.seed(1) W<- square(10) # the big window puntos<- rpoispp(0.5, win=W) data(letterR) r00 <- letterR r05 <- shift(letterR,c(0,5)) r50 <- shift(letterR,c(5,0)) r55 <- shift(letterR,c(5,5)) tessr4 <- tess(tiles=list(r00, r05,r50,r55)) puntosr4 <- split(puntos, tessr4, drop=TRUE) split(puntos, tessr4, drop=TRUE) <- puntosr4 ## More headaches with mark format A <- runifpoint(10) B <- runifpoint(10) AB <- split(superimpose(A=A, B=B)) #' check that split<- respects ordering where possible X <- amacrine Y <- split(X) split(X) <- Y stopifnot(identical(X, amacrine)) #' split.ppx df <- data.frame(x=runif(4),y=runif(4),t=runif(4), age=rep(c("old", "new"), 2), mineral=factor(rep(c("Au","Cu"), each=2), levels=c("Au", "Cu", "Pb")), size=runif(4)) X <- ppx(data=df, coord.type=c("s","s","t","m", "m","m")) Y <- split(X, "age") Y <- split(X, "mineral", drop=TRUE) }) # # tests/step.R # # $Revision: 1.4 $ $Date: 2015/12/29 08:54:49 $ # # test for step() operation # require(spatstat) local({ Z <- as.im(function(x,y){ x^3 - y^2 }, nztrees$window) fitP <- ppm(nztrees ~x+y+Z, covariates=list(Z=Z)) step(fitP) fitS <- update(fitP, Strauss(7)) step(fitS) fitM <- ppm(amacrine ~ marks*(x+y), MultiStrauss(types=levels(marks(amacrine)), radii=matrix(0.04, 2, 2))) step(fitM) }) ## ## tests/symbolmaps.R ## ## Quirks associated with symbolmaps, etc. ## ## $Revision: 1.3 $ $Date: 2015/12/29 08:54:49 $ local({ require(spatstat) set.seed(100) ## spacing too large for tiles - upsets various pieces of code V <- as.im(dirichlet(runifpoint(8))) textureplot(V, spacing=2) g1 <- symbolmap(range=c(0,100), size=function(x) x/50) invoke.symbolmap(g1, 50, x=numeric(0), y=numeric(0), add=TRUE) }) # # tests/testaddvar.R # # test addvar options # # $Revision: 1.2 $ $Date: 2015/12/29 08:54:49 $ X <- rpoispp(function(x,y){exp(3+3*x)}) model <- ppm(X ~y) addvar(model, "x", crosscheck=TRUE) addvar(model, "x", bw.input="quad") w <- square(0.5) addvar(model, "x", subregion=w) addvar(model, "x", subregion=w, bw.input="points") # # tests/testparres.R # # additional test of parres # # $Revision: 1.2 $ $Date: 2015/12/29 08:54:49 $ # require(spatstat) local({ X <- rpoispp(function(x,y){exp(3+x+2*x^2)}) model <- ppm(X ~x+y) # options in parres parres(model, "x") parres(model, "x", bw.input="quad") w <- square(0.5) parres(model, "x", subregion=w) parres(model, "x", subregion=w, bw.input="quad") # check whether 'update.ppm' has messed up internals mod2 <- update(model, ~x) parres(mod2, "x") }) # # tests/triplets.R # # test code for triplet interaction # # $Revision: 1.5 $ $Date: 2015/12/29 08:54:49 $ # require(spatstat) local({ fit <- ppm(redwood ~1, Triplets(0.1)) fit suffstat(fit) # hard core (zero triangles, coefficient is NA) fit0 <- ppm(cells ~1, Triplets(0.05)) fit0 suffstat(fit0) # bug case (1 triangle in data) fit1 <- ppm(cells ~1, Triplets(0.15)) fit1 suffstat(fit1) }) # # tests/undoc.R # # $Revision: 1.3 $ $Date: 2017/02/20 10:51:56 $ # # Test undocumented hacks, etc require(spatstat) local({ # pixellate.ppp accepts a data frame of weights pixellate(cells, weights=data.frame(a=1:42, b=42:1)) }) ## ## tests/updateppm.R ## ## Check validity of update.ppm ## ## $Revision: 1.4 $ $Date: 2016/03/08 06:30:46 $ local({ require(spatstat) h <- function(m1, m2) { mc <- deparse(sys.call()) cat(paste(mc, "\t... ")) m1name <- deparse(substitute(m1)) m2name <- deparse(substitute(m2)) if(!identical(names(coef(m1)), names(coef(m2)))) stop(paste("Differing results for", m1name, "and", m2name, "in updateppm.R"), call.=FALSE) cat("OK\n") } X <- redwood[c(TRUE,FALSE)] Y <- redwood[c(FALSE,TRUE)] fit0f <- ppm(X ~ 1, nd=8) fit0p <- ppm(X, ~1, nd=8) fitxf <- ppm(X ~ x, nd=8) fitxp <- ppm(X, ~x, nd=8) cat("Basic consistency ...\n") h(fit0f, fit0p) h(fitxf, fitxp) cat("\nTest correct handling of model formulas ...\n") h(update(fitxf, Y), fitxf) h(update(fitxf, Q=Y), fitxf) h(update(fitxf, Y~x), fitxf) h(update(fitxf, Q=Y~x), fitxf) h(update(fitxf, ~x), fitxf) h(update(fitxf, Y~1), fit0f) h(update(fitxf, ~1), fit0f) h(update(fit0f, Y~x), fitxf) h(update(fit0f, ~x), fitxf) h(update(fitxp, Y), fitxp) h(update(fitxp, Q=Y), fitxp) h(update(fitxp, Y~x), fitxp) h(update(fitxp, Q=Y~x), fitxp) h(update(fitxp, ~x), fitxp) h(update(fitxp, Y~1), fit0p) h(update(fitxp, ~1), fit0p) h(update(fit0p, Y~x), fitxp) h(update(fit0p, ~x), fitxp) cat("\nTest scope handling for left hand side ...\n") X <- Y h(update(fitxf), fitxf) cat("\nTest scope handling for right hand side ...\n") Z <- distmap(X) fitZf <- ppm(X ~ Z) fitZp <- ppm(X, ~ Z) h(update(fitxf, X ~ Z), fitZf) h(update(fitxp, X ~ Z), fitZp) h(update(fitxf, . ~ Z), fitZf) h(update(fitZf, . ~ x), fitxf) h(update(fitZf, . ~ . - Z), fit0f) h(update(fitxp, . ~ Z), fitZp) h(update(fitZp, . ~ . - Z), fit0p) h(update(fit0p, . ~ . + Z), fitZp) h(update(fitZf, . ~ . ), fitZf) h(update(fitZp, . ~ . ), fitZp) cat("\nTest use of internal data ...\n") h(update(fitZf, ~ x, use.internal=TRUE), fitxf) fitsin <- update(fitZf, X~sin(Z)) h(update(fitZf, ~ sin(Z), use.internal=TRUE), fitsin) cat("\nTest step() ... ") fut <- ppm(X ~ Z + x + y, nd=8) fut0 <- step(fut, trace=0) cat("OK\n") }) # test update.lppm local({ X <- runiflpp(20, simplenet) fit0 <- lppm(X ~ 1) fit1 <- update(fit0, ~ x) anova(fit0, fit1, test="LR") cat("update.lppm(fit, ~trend) is OK\n") fit2 <- update(fit0, . ~ x) anova(fit0, fit2, test="LR") cat("update.lppm(fit, . ~ trend) is OK\n") }) # # tests/vcovppm.R # # Check validity of vcov.ppm algorithms # # Thanks to Ege Rubak # # $Revision: 1.6 $ $Date: 2015/12/29 08:54:49 $ # require(spatstat) local({ set.seed(42) X <- rStrauss(200, .5, .05) model <- ppm(X, inter = Strauss(.05)) b <- vcov(model, generic = TRUE, algorithm = "basic") v <- vcov(model, generic = TRUE, algorithm = "vector") vc <- vcov(model, generic = TRUE, algorithm = "vectorclip") vn <- vcov(model, generic = FALSE) disagree <- function(x, y, tol=1e-7) { max(abs(x-y)) > tol } asymmetric <- function(x) { disagree(x, t(x)) } if(asymmetric(b)) stop("Non-symmetric matrix produced by vcov.ppm 'basic' algorithm") if(asymmetric(v)) stop("Non-symmetric matrix produced by vcov.ppm 'vector' algorithm") if(asymmetric(vc)) stop("Non-symmetric matrix produced by vcov.ppm 'vectorclip' algorithm") if(asymmetric(vn)) stop("Non-symmetric matrix produced by vcov.ppm Strauss algorithm") if(disagree(v, b)) stop("Disagreement between vcov.ppm algorithms 'vector' and 'basic' ") if(disagree(v, vc)) stop("Disagreement between vcov.ppm algorithms 'vector' and 'vectorclip' ") if(disagree(vn, vc)) stop("Disagreement between vcov.ppm generic and Strauss algorithms") # Geyer code xx <- c(0.7375956, 0.6851697, 0.6399788, 0.6188382) yy <- c(0.5816040, 0.6456319, 0.5150633, 0.6191592) Y <- ppp(xx, yy, window=square(1)) modelY <- ppm(Y ~1, Geyer(0.1, 1)) b <- vcov(modelY, generic = TRUE, algorithm = "basic") v <- vcov(modelY, generic = TRUE, algorithm = "vector") vc <- vcov(modelY, generic = TRUE, algorithm = "vectorclip") if(asymmetric(b)) stop("Non-symmetric matrix produced by vcov.ppm 'basic' algorithm for Geyer model") if(asymmetric(v)) stop("Non-symmetric matrix produced by vcov.ppm 'vector' algorithm for Geyer model") if(asymmetric(vc)) stop("Non-symmetric matrix produced by vcov.ppm 'vectorclip' algorithm for Geyer model") if(disagree(v, b)) stop("Disagreement between vcov.ppm algorithms 'vector' and 'basic' for Geyer model") if(disagree(v, vc)) stop("Disagreement between vcov.ppm algorithms 'vector' and 'vectorclip' for Geyer model") ## tests of 'deltasuffstat' code ## Handling of offset terms modelH <- ppm(cells ~x, Hardcore(0.05)) a <- vcov(modelH, generic=TRUE) ## may fall over b <- vcov(modelH, generic=FALSE) if(disagree(a, b)) stop("Disagreement between vcov.ppm algorithms for Hardcore model") ## Correctness of pairwise.family$delta2 modelZ <- ppm(amacrine ~1, MultiStrauss(radii=matrix(0.1, 2, 2))) b <- vcov(modelZ, generic=FALSE) g <- vcov(modelZ, generic=TRUE) if(disagree(b, g)) stop("Disagreement between vcov.ppm algorithms for MultiStrauss model") ## Test that 'deltasuffstat' works for Hybrids modHyb <- ppm(japanesepines ~ 1, Hybrid(Strauss(0.05), Strauss(0.1))) }) # # tests/windows.R # # Tests of owin geometry code # # $Revision: 1.3 $ $Date: 2015/12/29 08:54:49 $ require(spatstat) local({ # Ege Rubak spotted this problem in 1.28-1 A <- as.owin(ants) B <- dilation(A, 140) if(!is.subset.owin(A, B)) stop("is.subset.owin fails in polygonal case") # thanks to Tom Rosenbaum A <- shift(square(3), origin="midpoint") B <- shift(square(1), origin="midpoint") AB <- setminus.owin(A, B) D <- shift(square(2), origin="midpoint") if(is.subset.owin(D,AB)) stop("is.subset.owin fails for polygons with holes") ## thanks to Brian Ripley / SpatialVx M <- as.mask(letterR) stopifnot(area(bdry.mask(M)) > 0) stopifnot(area(convexhull(M)) > 0) R <- as.mask(square(1)) stopifnot(area(bdry.mask(R)) > 0) stopifnot(area(convexhull(R)) > 0) }) ## ## tests/xysegment.R ## ## Test weird problems and boundary cases for line segment code ## ## $Version$ $Date: 2016/02/12 08:18:08 $ ## require(spatstat) local({ # segment of length zero B <- psp(1/2, 1/2, 1/2, 1/2, window=square(1)) BB <- angles.psp(B) A <- runifpoint(3) AB <- project2segment(A,B) # mark inheritance X <- psp(runif(10), runif(10), runif(10), runif(10), window=owin()) marks(X) <- 1:10 Y <- selfcut.psp(X) marks(X) <- data.frame(A=1:10, B=factor(letters[1:10])) Z <- selfcut.psp(X) }) spatstat/tests/selfcross.txt0000644000176200001440000000071513115225157016030 0ustar liggesusers x y 0.3057897 0.1518920 0.6038506 0.3132859 0.6343093 0.2740279 0.5364061 0.2936569 0.8170620 0.4681368 0.8083595 0.6535217 0.6125531 0.6796937 0.6103774 0.6360737 0.4363273 0.6338927 0.4689617 0.6927797 0.6538900 0.7560286 0.6169043 0.7756576 0.5994993 0.7276756 0.3514779 0.7363996 0.3123166 0.6622457 0.1447933 0.4877658 0.2274671 0.4332408 0.1578471 0.3721728 0.2753309 0.4027068 0.1817790 0.4136118 0.2100621 0.3067429 spatstat/tests/testsQtoR.R0000644000176200001440000006271713115225157015371 0ustar liggesusers#' tests/randoms.R #' Further tests of random generation code #' $Revision: 1.1 $ $Date: 2016/04/02 04:03:46 $ require(spatstat) local({ A <- runifrect(6, nsim=2) A <- runifdisc(6, nsim=2) A <- runifpoispp(5, nsim=2) A <- runifpoispp(0, nsim=2) Z <- as.im(function(x,y) 10*x, square(1)) A <- rpoint(n=6, f=Z, fmax=10, nsim=2) A <- rSSI(0.05, 6, nsim=2) A <- rstrat(nx=4, nsim=2) A <- rsyst(nx=4, nsim=2) A <- rthin(cells, P=0.5, nsim=2) A <- rjitter(cells, nsim=2, retry=FALSE) }) #' tests/resid.R #' #' Stuff related to residuals and residual diagnostics #' #' $Revision: 1.1 $ $Date: 2016/09/02 10:56:59 $ #' require(spatstat) local({ fit <- ppm(cells ~x, Strauss(r=0.15)) diagnose.ppm(fit, cumulative=FALSE) diagnose.ppm(fit, cumulative=FALSE, type="pearson") }) ## ## tests/rhohat.R ## ## Test all combinations of options for rhohatCalc ## ## $Revision: 1.2 $ $Date: 2015/12/29 08:54:49 $ local({ require(spatstat) X <- rpoispp(function(x,y){exp(3+3*x)}) ## done in example(rhohat): ## rhoA <- rhohat(X, "x") ## rhoB <- rhohat(X, "x", method="reweight") ## rhoC <- rhohat(X, "x", method="transform") fit <- ppm(X, ~x) rhofitA <- rhohat(fit, "x") rhofitB <- rhohat(fit, "x", method="reweight") rhofitC <- rhohat(fit, "x", method="transform") ## Baseline lam <- predict(fit) rhoAb <- rhohat(X, "x", baseline=lam) rhoBb <- rhohat(X, "x", method="reweight", baseline=lam) rhoCb <- rhohat(X, "x", method="transform", baseline=lam) ## Horvitz-Thompson rhoAH <- rhohat(X, "x", horvitz=TRUE) rhoBH <- rhohat(X, "x", method="reweight", horvitz=TRUE) rhoCH <- rhohat(X, "x", method="transform", horvitz=TRUE) rhofitAH <- rhohat(fit, "x", horvitz=TRUE) rhofitBH <- rhohat(fit, "x", method="reweight", horvitz=TRUE) rhofitCH <- rhohat(fit, "x", method="transform", horvitz=TRUE) }) # # tests/rmhAux.R # # $Revision: 1.1 $ $Date: 2013/02/18 10:41:27 $ # # For interactions which maintain 'auxiliary data', # verify that the auxiliary data are correctly updated. # # To do this we run rmh with nsave=1 so that the point pattern state # is saved after every iteration, then the algorithm is restarted, # and the auxiliary data are re-initialised. The final state must agree with # the result of simulation without saving. # ---------------------------------------------------- require(spatstat) local({ # Geyer: mod <- list(cif="geyer", par=list(beta=1.25,gamma=1.6,r=0.2,sat=4.5), w=square(10)) set.seed(42) X.nosave <- rmh(model=mod, start=list(n.start=50), control=list(nrep=1e3, periodic=FALSE, expand=1)) set.seed(42) X.save <- rmh(model=mod, start=list(n.start=50), control=list(nrep=1e3, periodic=FALSE, expand=1, nburn=0, nsave=1, pstage="start")) #' Need to set pstage='start' so that proposals are generated #' at the start of the procedure in both cases. stopifnot(npoints(X.save) == npoints(X.nosave)) stopifnot(max(nncross(X.save, X.nosave)$dist) == 0) stopifnot(max(nncross(X.nosave, X.save)$dist) == 0) }) ## ## tests/rmhBasic.R ## ## $Revision: 1.11 $ $Date: 2015/12/29 08:54:49 $ # # Test examples for rmh.default # run to reasonable length # and with tests for validity added # ---------------------------------------------------- require(spatstat) local({ if(!exists("nr")) nr <- 5e3 spatstat.options(expand=1.1) # Strauss process. mod01 <- list(cif="strauss",par=list(beta=2,gamma=0.2,r=0.7), w=c(0,10,0,10)) X1.strauss <- rmh(model=mod01,start=list(n.start=80), control=list(nrep=nr)) # Strauss process, conditioning on n = 80: X2.strauss <- rmh(model=mod01,start=list(n.start=80), control=list(p=1,nrep=nr)) stopifnot(X2.strauss$n == 80) # test tracking mechanism X1.strauss <- rmh(model=mod01,start=list(n.start=80), control=list(nrep=nr), track=TRUE) X2.strauss <- rmh(model=mod01,start=list(n.start=80), control=list(p=1,nrep=nr), track=TRUE) # Hard core process: mod02 <- list(cif="hardcore",par=list(beta=2,hc=0.7),w=c(0,10,0,10)) X3.hardcore <- rmh(model=mod02,start=list(n.start=60), control=list(nrep=nr)) # Strauss process equal to pure hardcore: mod02 <- list(cif="strauss",par=list(beta=2,gamma=0,r=0.7),w=c(0,10,0,10)) X3.strauss <- rmh(model=mod02,start=list(n.start=60), control=list(nrep=nr)) # Strauss process in a polygonal window. x <- c(0.55,0.68,0.75,0.58,0.39,0.37,0.19,0.26,0.42) y <- c(0.20,0.27,0.68,0.99,0.80,0.61,0.45,0.28,0.33) mod03 <- list(cif="strauss",par=list(beta=2000,gamma=0.6,r=0.07), w=owin(poly=list(x=x,y=y))) X4.strauss <- rmh(model=mod03,start=list(n.start=90), control=list(nrep=nr)) # Strauss process in a polygonal window, conditioning on n = 42. X5.strauss <- rmh(model=mod03,start=list(n.start=42), control=list(p=1,nrep=nr)) stopifnot(X5.strauss$n == 42) # Strauss process, starting off from X4.strauss, but with the # polygonal window replace by a rectangular one. At the end, # the generated pattern is clipped to the original polygonal window. xxx <- X4.strauss xxx$window <- as.owin(c(0,1,0,1)) X6.strauss <- rmh(model=mod03,start=list(x.start=xxx), control=list(nrep=nr)) # Strauss with hardcore: mod04 <- list(cif="straush",par=list(beta=2,gamma=0.2,r=0.7,hc=0.3), w=c(0,10,0,10)) X1.straush <- rmh(model=mod04,start=list(n.start=70), control=list(nrep=nr)) # Another Strauss with hardcore (with a perhaps surprising result): mod05 <- list(cif="straush",par=list(beta=80,gamma=0.36,r=45,hc=2.5), w=c(0,250,0,250)) X2.straush <- rmh(model=mod05,start=list(n.start=250), control=list(nrep=nr)) # Pure hardcore (identical to X3.strauss). mod06 <- list(cif="straush",par=list(beta=2,gamma=1,r=1,hc=0.7), w=c(0,10,0,10)) X3.straush <- rmh(model=mod06,start=list(n.start=60), control=list(nrep=nr)) # Area-interaction, inhibitory mod.area <- list(cif="areaint",par=list(beta=2,eta=0.5,r=0.5), w=square(10)) X.area <- rmh(model=mod.area,start=list(n.start=60), control=list(nrep=nr)) # Area-interaction, clustered mod.area2 <- list(cif="areaint",par=list(beta=2,eta=1.5,r=0.5), w=square(10)) X.area2 <- rmh(model=mod.area2,start=list(n.start=60), control=list(nrep=nr)) # Area-interaction close to hard core set.seed(42) mod.area0 <- list(cif="areaint",par=list(beta=2,eta=1e-300,r=0.35), w=square(10)) X.area0 <- rmh(model=mod.area0,start=list(x.start=X3.hardcore), control=list(nrep=nr)) stopifnot(nndist(X.area0) > 0.6) # Soft core: w <- c(0,10,0,10) mod07 <- list(cif="sftcr",par=list(beta=0.8,sigma=0.1,kappa=0.5), w=c(0,10,0,10)) X.sftcr <- rmh(model=mod07,start=list(n.start=70), control=list(nrep=nr)) # Diggle, Gates, and Stibbard: mod12 <- list(cif="dgs",par=list(beta=3600,rho=0.08),w=c(0,1,0,1)) X.dgs <- rmh(model=mod12,start=list(n.start=300), control=list(nrep=nr)) # Diggle-Gratton: mod13 <- list(cif="diggra", par=list(beta=1800,kappa=3,delta=0.02,rho=0.04), w=square(1)) X.diggra <- rmh(model=mod13,start=list(n.start=300), control=list(nrep=nr)) # Geyer: mod14 <- list(cif="geyer",par=list(beta=1.25,gamma=1.6,r=0.2,sat=4.5), w=c(0,10,0,10)) X1.geyer <- rmh(model=mod14,start=list(n.start=200), control=list(nrep=nr)) # Geyer; same as a Strauss process with parameters # (beta=2.25,gamma=0.16,r=0.7): mod15 <- list(cif="geyer",par=list(beta=2.25,gamma=0.4,r=0.7,sat=10000), w=c(0,10,0,10)) X2.geyer <- rmh(model=mod15,start=list(n.start=200), control=list(nrep=nr)) mod16 <- list(cif="geyer",par=list(beta=8.1,gamma=2.2,r=0.08,sat=3)) data(redwood) X3.geyer <- rmh(model=mod16,start=list(x.start=redwood), control=list(periodic=TRUE,nrep=nr)) # Geyer, starting from the redwood data set, simulating # on a torus, and conditioning on n: X4.geyer <- rmh(model=mod16,start=list(x.start=redwood), control=list(p=1,periodic=TRUE,nrep=nr)) # Lookup (interaction function h_2 from page 76, Diggle (2003)): r <- seq(from=0,to=0.2,length=101)[-1] # Drop 0. h <- 20*(r-0.05) h[r<0.05] <- 0 h[r>0.10] <- 1 mod17 <- list(cif="lookup",par=list(beta=4000,h=h,r=r),w=c(0,1,0,1)) X.lookup <- rmh(model=mod17,start=list(n.start=100), control=list(nrep=nr)) # Strauss with trend tr <- function(x,y){x <- x/250; y <- y/250; exp((6*x + 5*y - 18*x^2 + 12*x*y - 9*y^2)/6) } beta <- 0.3 gmma <- 0.5 r <- 45 tr3 <- function(x,y){x <- x/250; y <- y/250; exp((6*x + 5*y - 18*x^2 + 12*x*y - 9*y^2)/6) } # log quadratic trend mod17 <- list(cif="strauss",par=list(beta=beta,gamma=gmma,r=r),w=c(0,250,0,250), trend=tr3) X1.strauss.trend <- rmh(model=mod17,start=list(n.start=90), control=list(nrep=nr)) }) ## ## tests/rmhErrors.R ## ## $Revision: 1.5 $ $Date: 2015/12/29 08:54:49 $ ## # Things which should cause an error require(spatstat) local({ if(!exists("nv")) nv <- 0 if(!exists("nr")) nr <- 1e3 # Strauss with zero intensity and p = 1 mod0S <- list(cif="strauss",par=list(beta=0,gamma=0.6,r=0.7), w = square(3)) out <- try(X0S <- rmh(model=mod0S,start=list(n.start=80), control=list(p=1,nrep=nr,nverb=nv),verbose=FALSE)) if(!inherits(out, "try-error")) stop("Error not trapped (Strauss with zero intensity and p = 1) in tests/rmhErrors.R") }) # # tests/rmhExpand.R # # test decisions about expansion of simulation window # # $Revision: 1.2 $ $Date: 2011/12/05 07:29:16 $ # require(spatstat) local({ fit <- ppm(cells, ~x) # check rmhmodel.ppm mod <- rmhmodel(fit) wsim <- as.rectangle(mod$trend) if(!identical(wsim, as.owin(cells))) stop("Expansion occurred improperly in rmhmodel.ppm") }) # # tests/rmhMulti.R # # tests of rmh, running multitype point processes # # $Revision: 1.6 $ $Date: 2015/12/29 08:54:49 $ require(spatstat) local({ if(!exists("nr")) nr <- 5e3 if(!exists("nv")) nv <- 0 spatstat.options(expand=1.1) # Multitype Poisson modp2 <- list(cif="poisson", par=list(beta=2), types=letters[1:3], w = square(10)) Xp2 <- rmh(modp2, start=list(n.start=0), control=list(p=1)) # Multitype Strauss: beta <- c(0.027,0.008) gmma <- matrix(c(0.43,0.98,0.98,0.36),2,2) r <- matrix(c(45,45,45,45),2,2) mod08 <- list(cif="straussm",par=list(beta=beta,gamma=gmma,radii=r), w=c(0,250,0,250)) X1.straussm <- rmh(model=mod08,start=list(n.start=80), control=list(ptypes=c(0.75,0.25),nrep=nr,nverb=nv)) # Multitype Strauss conditioning upon the total number # of points being 80: X2.straussm <- rmh(model=mod08,start=list(n.start=80), control=list(p=1,ptypes=c(0.75,0.25),nrep=nr, nverb=nv)) stopifnot(X2.straussm$n == 80) # Conditioning upon the number of points of type 1 being 60 # and the number of points of type 2 being 20: X3.straussm <- rmh(model=mod08,start=list(n.start=c(60,20)), control=list(fixall=TRUE,p=1,ptypes=c(0.75,0.25), nrep=nr,nverb=nv)) stopifnot(all(table(X3.straussm$marks) == c(60,20))) # Multitype Strauss hardcore: rhc <- matrix(c(9.1,5.0,5.0,2.5),2,2) mod09 <- list(cif="straushm",par=list(beta=beta,gamma=gmma, iradii=r,hradii=rhc),w=c(0,250,0,250)) X.straushm <- rmh(model=mod09,start=list(n.start=80), control=list(ptypes=c(0.75,0.25),nrep=nr,nverb=nv)) # Multitype Strauss hardcore with trends for each type: beta <- c(0.27,0.08) tr3 <- function(x,y){x <- x/250; y <- y/250; exp((6*x + 5*y - 18*x^2 + 12*x*y - 9*y^2)/6) } # log quadratic trend tr4 <- function(x,y){x <- x/250; y <- y/250; exp(-0.6*x+0.5*y)} # log linear trend mod10 <- list(cif="straushm",par=list(beta=beta,gamma=gmma, iradii=r,hradii=rhc),w=c(0,250,0,250), trend=list(tr3,tr4)) X1.straushm.trend <- rmh(model=mod10,start=list(n.start=350), control=list(ptypes=c(0.75,0.25), nrep=nr,nverb=nv)) # Multitype Strauss hardcore with trends for each type, given as images: bigwin <- square(250) i1 <- as.im(tr3, bigwin) i2 <- as.im(tr4, bigwin) mod11 <- list(cif="straushm",par=list(beta=beta,gamma=gmma, iradii=r,hradii=rhc),w=bigwin, trend=list(i1,i2)) X2.straushm.trend <- rmh(model=mod11,start=list(n.start=350), control=list(ptypes=c(0.75,0.25),expand=1, nrep=nr,nverb=nv)) ####################################################################### ############ checks on distribution of output ####################### ####################################################################### checkp <- function(p, context, testname, failmessage, pcrit=0.01) { if(missing(failmessage)) failmessage <- paste("output failed", testname) if(p < pcrit) warning(paste(context, ",", failmessage), call.=FALSE) cat(paste("\n", context, ",", testname, "has p-value", signif(p,4), "\n")) } # Multitype Strauss code; output is multitype Poisson beta <- 100 * c(1,1) ri <- matrix(0.07, 2, 2) gmma <- matrix(1, 2, 2) # no interaction tr1 <- function(x,y){ rep(1, length(x)) } tr2 <- function(x,y){ rep(2, length(x)) } mod <- rmhmodel(cif="straussm", par=list(beta=beta,gamma=gmma,radii=ri), w=owin(), trend=list(tr1,tr2)) X <- rmh(mod, start=list(n.start=0), control=list(nrep=1e6)) # The model is Poisson with intensity 100 for type 1 and 200 for type 2. # Total number of points is Poisson (300) # Marks are i.i.d. with P(type 1) = 1/3, P(type 2) = 2/3. # Test whether the total intensity looks right # p <- ppois(X$n, 300) p.val <- 2 * min(p, 1-p) checkp(p.val, "In multitype Poisson simulation", "test whether total number of points has required mean value") # Test whether the mark distribution looks right ta <- table(X$marks) cat("Frequencies of marks:") print(ta) checkp(chisq.test(ta, p = c(1,2)/3)$p.value, "In multitype Poisson simulation", "chi-squared goodness-of-fit test for mark distribution (1/3, 2/3)") ##### #### multitype Strauss code; fixall=TRUE; #### output is multinomial process with nonuniform locations #### the.context <- "In nonuniform multinomial simulation" beta <- 100 * c(1,1) ri <- matrix(0.07, 2, 2) gmma <- matrix(1, 2, 2) # no interaction tr1 <- function(x,y){ ifelse(x < 0.5, 0, 2) } tr2 <- function(x,y){ ifelse(y < 0.5, 1, 3) } # cdf of these distributions Fx1 <- function(x) { ifelse(x < 0.5, 0, ifelse(x < 1, 2 * x - 1, 1)) } Fy2 <- function(y) { ifelse(y < 0, 0, ifelse(y < 0.5, y/2, ifelse(y < 1, (1/2 + 3 * (y-1/2))/2, 1))) } mod <- rmhmodel(cif="straussm", par=list(beta=beta,gamma=gmma,radii=ri), w=owin(), trend=list(tr1,tr2)) X <- rmh(mod, start=list(n.start=c(50,50)), control=list(nrep=1e6, expand=1, p=1, fixall=TRUE)) # The model is Poisson # Mean number of type 1 points = 100 # Mean number of type 2 points = 200 # Total intensity = 300 # Marks are i.i.d. with P(type 1) = 1/3, P(type 2) = 2/3 # Test whether the coordinates look OK Y <- split(X) X1 <- Y[[names(Y)[1]]] X2 <- Y[[names(Y)[2]]] checkp(ks.test(X1$y, "punif")$p.value, the.context, "Kolmogorov-Smirnov test of uniformity of y coordinates of type 1 points") if(any(X1$x < 0.5)) { stop(paste(the.context, ",", "x-coordinates of type 1 points are IMPOSSIBLE"), call.=FALSE) } else { checkp(ks.test(Fx1(X1$x), "punif")$p.value, the.context, "Kolmogorov-Smirnov test of uniformity of transformed x coordinates of type 1 points") } checkp(ks.test(X2$x, "punif")$p.value, the.context, "Kolmogorov-Smirnov test of uniformity of x coordinates of type 2 points") checkp(ks.test(Fy2(X2$y), "punif")$p.value, the.context, "Kolmogorov-Smirnov test of uniformity of transformed y coordinates of type 2 points") }) # # tests/rmhTrend.R # # Problems with trend images (rmhmodel.ppm or rmhEngine) # require(spatstat) local({ set.seed(42) # Bug folder 37 of 8 feb 2011 # rmhmodel.ppm -> predict.ppm # + rmhResolveTypes -> is.subset.owin data(demopat) Z <- rescale(demopat, 7000) X <- unmark(Z) X1 <- split(Z)[[1]] Int <- density(X,dimyx=200) Lint <- eval.im(log(npoints(X1)*Int/npoints(X))) M <- as.owin(Int) MR <- intersect.owin(M,scalardilate(M,0.5,origin="midpoint")) X1 <- X1[MR] Fut <- ppm(X1~offset(Lint),covariates=list(Lint=Lint), inter=BadGey(r=c(0.03,0.05),sat=3)) Y <- rmh(Fut,control=list(expand=M,nrep=1e3), verbose=FALSE) }) # # tests/rmhWeird.R # # $Revision: 1.3 $ $Date: 2015/12/29 08:54:49 $ # # strange boundary cases require(spatstat) local({ if(!exists("nv")) nv <- 0 if(!exists("nr")) nr <- 5e3 # Poisson process cat("Poisson\n") modP <- list(cif="poisson",par=list(beta=10), w = square(3)) XP <- rmh(model = modP, start = list(n.start=25), control=list(nrep=nr,nverb=nv)) # Poisson process case of Strauss cat("\nPoisson case of Strauss\n") modPS <- list(cif="strauss",par=list(beta=10,gamma=1,r=0.7), w = square(3)) XPS <- rmh(model=modPS, start=list(n.start=25), control=list(nrep=nr,nverb=nv)) # Strauss with zero intensity cat("\nStrauss with zero intensity\n") mod0S <- list(cif="strauss",par=list(beta=0,gamma=0.6,r=0.7), w = square(3)) X0S <- rmh(model=mod0S,start=list(n.start=80), control=list(nrep=nr,nverb=nv)) stopifnot(X0S$n == 0) # Poisson with zero intensity cat("\nPoisson with zero intensity\n") mod0P <- list(cif="poisson",par=list(beta=0), w = square(3)) X0P <- rmh(model = mod0P, start = list(n.start=25), control=list(nrep=nr,nverb=nv)) # Poisson conditioned on zero points cat("\nPoisson conditioned on zero points\n") modp <- list(cif="poisson", par=list(beta=2), w = square(10)) Xp <- rmh(modp, start=list(n.start=0), control=list(p=1, nrep=nr)) stopifnot(Xp$n == 0) # Multitype Poisson conditioned on zero points cat("\nMultitype Poisson conditioned on zero points\n") modp2 <- list(cif="poisson", par=list(beta=2), types=letters[1:3], w = square(10)) Xp2 <- rmh(modp2, start=list(n.start=0), control=list(p=1, nrep=nr)) stopifnot(is.marked(Xp2)) stopifnot(Xp2$n == 0) # Multitype Poisson conditioned on zero points of each type cat("\nMultitype Poisson conditioned on zero points of each type\n") Xp2fix <- rmh(modp2, start=list(n.start=c(0,0,0)), control=list(p=1, fixall=TRUE, nrep=nr)) stopifnot(is.marked(Xp2fix)) stopifnot(Xp2fix$n == 0) }) # # tests/rmhmodel.ppm.R # # $Revision: 1.8 $ $Date: 2015/12/29 08:54:49 $ # # Case-by-case tests of rmhmodel.ppm # require(spatstat) local({ f <- ppm(cells) m <- rmhmodel(f) f <- ppm(cells ~x) m <- rmhmodel(f) f <- ppm(cells ~1, Strauss(0.1)) m <- rmhmodel(f) f <- ppm(cells ~1, StraussHard(r=0.1,hc=0.05)) m <- rmhmodel(f) f <- ppm(cells ~1, Hardcore(0.07)) m <- rmhmodel(f) f <- ppm(cells ~1, DiggleGratton(0.05,0.1)) m <- rmhmodel(f) f <- ppm(cells ~1, Softcore(0.5), correction="isotropic") m <- rmhmodel(f) f <- ppm(cells ~1, Geyer(0.07,2)) m <- rmhmodel(f) f <- ppm(cells ~1, BadGey(c(0.07,0.1,0.13),2)) m <- rmhmodel(f) f <- ppm(cells ~1, PairPiece(r = c(0.05, 0.1, 0.2))) m <- rmhmodel(f) f <- ppm(cells ~1, AreaInter(r=0.06)) m <- rmhmodel(f) # multitype r <- matrix(0.07, 2, 2) f <- ppm(amacrine ~1, MultiStrauss(c("off","on"),r)) m <- rmhmodel(f) h <- matrix(min(nndist(amacrine))/2, 2, 2) f <- ppm(amacrine ~1, MultiStraussHard(c("off","on"),r, h)) m <- rmhmodel(f) diag(r) <- NA diag(h) <- NA f <- ppm(amacrine ~1, MultiStrauss(c("off","on"),r)) m <- rmhmodel(f) f <- ppm(amacrine ~1, MultiStraussHard(c("off","on"),r, h)) m <- rmhmodel(f) # multitype data, interaction not dependent on type f <- ppm(amacrine ~marks, Strauss(0.05)) m <- rmhmodel(f) # trends f <- ppm(cells ~x, Strauss(0.1)) m <- rmhmodel(f) f <- ppm(cells ~y, StraussHard(r=0.1,hc=0.05)) m <- rmhmodel(f) f <- ppm(cells ~x+y, Hardcore(0.07)) m <- rmhmodel(f) f <- ppm(cells ~polynom(x,y,2), Softcore(0.5), correction="isotropic") m <- rmhmodel(f) # covariates Z <- as.im(function(x,y){ x^2+y^2 }, as.owin(cells)) f <- ppm(cells ~z, covariates=list(z=Z)) m <- rmhmodel(f) m <- rmhmodel(f, control=list(p=1)) Zim <- as.im(Z, as.owin(cells)) f <- ppm(cells ~z, covariates=list(z=Zim)) m <- rmhmodel(f) Z <- as.im(function(x,y){ x^2+y }, as.owin(amacrine)) f <- ppm(amacrine ~z + marks, covariates=list(z=Z)) m <- rmhmodel(f) m <- rmhmodel(f, control=list(p=1)) m <- rmhmodel(f, control=list(p=1,fixall=TRUE)) Zim <- as.im(Z, as.owin(amacrine)) f <- ppm(amacrine ~z + marks, covariates=list(z=Zim)) m <- rmhmodel(f) }) # # tests/rmhmodelHybrids.R # # Test that rmhmodel.ppm and rmhmodel.default # work on Hybrid interaction models # # $Revision: 1.4 $ $Date: 2015/12/29 08:54:49 $ # require(spatstat) local({ # ......... rmhmodel.ppm ....................... fit1 <- ppm(redwood ~1, Hybrid(A=Strauss(0.02), B=Geyer(0.1, 2), C=Geyer(0.15, 1))) m1 <- rmhmodel(fit1) m1 reach(m1) # Test of handling 'IsOffset' fit2 <- ppm(cells ~1, Hybrid(H=Hardcore(0.05), G=Geyer(0.15, 2))) rmhmodel(fit2) # Test of handling Poisson components fit3 <- ppm(cells ~1, Hybrid(P=Poisson(), S=Strauss(0.05))) X3 <- rmh(fit3, control=list(nrep=1e3,expand=1), verbose=FALSE) # ............ rmhmodel.default ............................ modH <- list(cif=c("strauss","geyer"), par=list(list(beta=50,gamma=0.5, r=0.1), list(beta=1, gamma=0.7, r=0.2, sat=2)), w = square(1)) rmodH <- rmhmodel(modH) rmodH reach(rmodH) # test handling of Poisson components modHP <- list(cif=c("poisson","strauss"), par=list(list(beta=5), list(beta=10,gamma=0.5, r=0.1)), w = square(1)) rmodHP <- rmhmodel(modHP) rmodHP reach(rmodHP) modPP <- list(cif=c("poisson","poisson"), par=list(list(beta=5), list(beta=10)), w = square(1)) rmodPP <- rmhmodel(modPP) rmodPP reach(rmodPP) }) # # tests/rmh.ppm.R # # $Revision: 1.2 $ $Date: 2015/12/29 08:54:49 $ # # Examples removed from rmh.ppm.Rd # stripped down to minimal tests of validity # require(spatstat) local({ op <- spatstat.options() spatstat.options(rmh.nrep=10, npixel=10, ndummy.min=10) spatstat.options(project.fast=TRUE) Nrep <- 10 X <- swedishpines # Poisson process fit <- ppm(X ~1, Poisson()) Xsim <- rmh(fit) # Strauss process fit <- ppm(X ~1, Strauss(r=7)) Xsim <- rmh(fit) # Strauss process simulated on a larger window # then clipped to original window Xsim <- rmh(fit, control=list(nrep=Nrep, expand=1.1, periodic=TRUE)) # Extension of model to another window (thanks to Tuomas Rajala) Xsim <- rmh(fit, w=square(2)) Xsim <- simulate(fit, w=square(2)) # Strauss - hard core process # fit <- ppm(X ~1, StraussHard(r=7,hc=2)) # Xsim <- rmh(fit, start=list(n.start=X$n)) # Geyer saturation process # fit <- ppm(X ~1, Geyer(r=7,sat=2)) # Xsim <- rmh(fit, start=list(n.start=X$n)) # Area-interaction process fit <- ppm(X ~1, AreaInter(r=7)) Xsim <- rmh(fit, start=list(n.start=X$n)) # soft core interaction process # X <- quadscheme(X, nd=50) # fit <- ppm(X ~1, Softcore(kappa=0.1), correction="isotropic") # Xsim <- rmh(fit, start=list(n.start=X$n)) # Diggle-Gratton pairwise interaction model # fit <- ppm(cells ~1, DiggleGratton(0.05, 0.1)) # Xsim <- rmh(fit, start=list(n.start=cells$n)) # plot(Xsim, main="simulation from fitted Diggle-Gratton model") X <- rSSI(0.05, 100) # piecewise-constant pairwise interaction function fit <- ppm(X ~1, PairPiece(seq(0.02, 0.1, by=0.01))) Xsim <- rmh(fit) # marked point pattern Y <- amacrine # marked Poisson models fit <- ppm(Y) Ysim <- rmh(fit) fit <- ppm(Y~marks) Ysim <- rmh(fit) fit <- ppm(Y~x) Ysim <- rmh(fit) # fit <- ppm(Y~polynom(x,2)) # Ysim <- rmh(fit) fit <- ppm(Y~marks+x) Ysim <- rmh(fit) # fit <- ppm(Y~marks+polynom(x,2)) # Ysim <- rmh(fit) # multitype Strauss models MS <- MultiStrauss(types = levels(Y$marks), radii=matrix(0.07, ncol=2, nrow=2)) # fit <- ppm(Y~marks*polynom(x,2), MS) fit <- ppm(Y~marks*x, MS) Ysim <- rmh(fit) spatstat.options(op) }) spatstat/src/0000755000176200001440000000000013166361223012707 5ustar liggesusersspatstat/src/fardist.c0000644000176200001440000000070413166361223014510 0ustar liggesusers/* fardist.c Furthest data point from each grid point Uses code template 'fardist.h' Copyright (C) Adrian Baddeley, Rolf Turner and Ege Rubak 2014 Licence: GPL >= 2 $Revision: 1.2 $ $Date: 2014/08/31 06:43:42 $ */ #include #include #include double sqrt(); #define FNAME fardistgrid #undef SQUARED #include "fardist.h" #undef FNAME #define FNAME fardist2grid #define SQUARED #include "fardist.h" spatstat/src/PerfectPenttinen.h0000644000176200001440000001301013166361223016330 0ustar liggesusers // ........................... Penttinen process ................ // $Revision: 1.2 $ $Date: 2016/02/02 01:30:01 $ class PenttProcess : public PointProcess { public: double beta, gamma, radius, reachsquared, loggamma2pi; int ishard; PenttProcess(double xmin, double xmax, double ymin, double ymax, double b, double g, double r); ~PenttProcess(){} void NewEvent(double *x, double *y, char *InWindow); void GeneratePoisson(Point *headPoint, long int *GeneratedPoints, long int *LivingPoints, long int *NoP); double Interaction(double dsquared); }; PenttProcess::PenttProcess(double xmin, double xmax, double ymin, double ymax, double b, double g, double r) : PointProcess(xmin, xmax, ymin, ymax){ beta = b; gamma = g; radius = r; ishard = (gamma <= DOUBLE_EPS); loggamma2pi = M_2PI * (ishard? 0.0 : log(gamma)); reachsquared = 4.0 * radius * radius; InteractionRange = 2.0 * radius; TotalBirthRate = beta*(xmax-xmin)*(ymax-ymin); } double PenttProcess::Interaction(double dsquared) { double rtn, z, z2; rtn = 1.0; if(dsquared < reachsquared) { if(ishard) return(0.0); z2 = dsquared/reachsquared; z = sqrt(z2); if(z < 1.0) { rtn = exp(loggamma2pi * (acos(z) - z * sqrt(1.0 - z2))); } } return(rtn); } void PenttProcess::NewEvent(double *x, double *y, char *InWindow) { double Xdim, Ydim; Xdim = Xmax-Xmin; Ydim = Ymax-Ymin; *x = slumptal()*Xdim+Xmin; *y = slumptal()*Ydim+Ymin; *InWindow = 1; } void PenttProcess::GeneratePoisson(Point *headPoint, long int *GeneratedPoints, long int *LivingPoints, long int *NoP) { int i; double xtemp, ytemp, L, Xdim, Ydim; struct Point *TempPoint; Xdim = Xmax-Xmin; Ydim = Ymax-Ymin; L = beta*Xdim*Ydim; *GeneratedPoints = poisson(L); *LivingPoints = *GeneratedPoints; for (i=1; i<=*GeneratedPoints ; i++){ //Rprintf("Generating PenttProcess Poisson 3\n"); //scanf("%f",&f1); xtemp = slumptal()*Xdim+Xmin; ytemp = slumptal()*Ydim+Ymin; // //Rprintf("Generating PenttProcess Poisson 3.2\n"); TempPoint = ALLOCATE(struct Point); // TempPoint->X = xtemp; TempPoint->Y = ytemp; TempPoint->No = i; TempPoint->R = slumptal(); //Rprintf("Generating PenttProcess Poisson 3.6\n"); TempPoint->next = headPoint->next; headPoint->next = TempPoint; *NoP = *NoP + 1; } } // ........................... Interface to R .......................... extern "C" { SEXP PerfectPenttinen(SEXP beta, SEXP gamma, SEXP r, SEXP xrange, SEXP yrange) { // input parameters double Beta, Gamma, R, Xmin, Xmax, Ymin, Ymax; double *Xrange, *Yrange; // internal int xcells, ycells; long int StartTime, EndTime; // output int noutmax; SEXP xout, yout, nout, out; double *xx, *yy; int *nn; // protect arguments from garbage collector PROTECT(beta = AS_NUMERIC(beta)); PROTECT(gamma = AS_NUMERIC(gamma)); PROTECT(r = AS_NUMERIC(r)); PROTECT(xrange = AS_NUMERIC(xrange)); PROTECT(yrange = AS_NUMERIC(yrange)); // that's 5 protected objects // extract arguments Beta = *(NUMERIC_POINTER(beta)); Gamma = *(NUMERIC_POINTER(gamma)); R = *(NUMERIC_POINTER(r)); Xrange = NUMERIC_POINTER(xrange); Xmin = Xrange[0]; Xmax = Xrange[1]; Yrange = NUMERIC_POINTER(yrange); Ymin = Yrange[0]; Ymax = Yrange[1]; // compute cell array size xcells = (int) floor((Xmax-Xmin)/ R); if(xcells > 9) xcells = 9; if(xcells < 1) xcells = 1; ycells = (int) floor((Ymax-Ymin)/ R); Xrange = NUMERIC_POINTER(xrange); Xmin = Xrange[0]; Xmax = Xrange[1]; Yrange = NUMERIC_POINTER(yrange); Ymin = Yrange[0]; Ymax = Yrange[1]; // compute cell array size xcells = (int) floor((Xmax-Xmin)/ R); if(xcells > 9) xcells = 9; if(xcells < 1) xcells = 1; ycells = (int) floor((Ymax-Ymin)/ R); if(ycells > 9) ycells = 9; if(ycells < 1) ycells = 1; #ifdef DBGS Rprintf("xcells %d ycells %d\n",xcells,ycells); Rprintf("Initialising\n"); #endif // Initialise Penttinen point process PenttProcess ExampleProcess(Xmin,Xmax,Ymin,Ymax,Beta,Gamma,R); // Initialise point pattern Point2Pattern ExamplePattern(Xmin,Xmax,Ymin,Ymax, xcells, ycells); // parameters: min x, max x, min y, max y, "cells" in x and y direction // used for speeding up neighbour counting, 9 is max here #ifdef DBGS Rprintf("Initialisation complete\n"); #endif // Synchronise random number generator GetRNGstate(); // Initialise perfect sampler Sampler PerfectSampler(&ExampleProcess); // Perform perfect sampling PerfectSampler.Sim(&ExamplePattern, &StartTime, &EndTime); // Synchronise random number generator PutRNGstate(); // Get upper estimate of number of points noutmax = ExamplePattern.UpperCount() + 1; // Allocate space for output PROTECT(xout = NEW_NUMERIC(noutmax)); PROTECT(yout = NEW_NUMERIC(noutmax)); PROTECT(nout = NEW_INTEGER(1)); xx = NUMERIC_POINTER(xout); yy = NUMERIC_POINTER(yout); nn = INTEGER_POINTER(nout); // copy data into output storage ExamplePattern.Return(xx, yy, nn, noutmax); // pack up into output list PROTECT(out = NEW_LIST(3)); SET_VECTOR_ELT(out, 0, xout); SET_VECTOR_ELT(out, 1, yout); SET_VECTOR_ELT(out, 2, nout); // return UNPROTECT(9); // 5 arguments plus xout, yout, nout, out return(out); } } spatstat/src/linnncross.h0000644000176200001440000000625613166361223015261 0ustar liggesusers/* linnncross.h Function body definitions with macros $Revision: 1.2 $ $Date: 2015/11/28 02:02:50 $ Macros used: FNAME name of function EXCLU whether serial numbers are provided WHICH whether 'nnwhich' is required */ void FNAME(np, xp, yp, /* data points 'from' */ nq, xq, yq, /* data points 'to' */ nv, xv, yv, /* network vertices */ ns, from, to, /* segments */ dpath, /* shortest path distances between vertices */ psegmap, /* map from data points to segments */ qsegmap, /* map from data points to segments */ #ifdef EXCLU idP, idQ, /* serial numbers for patterns p and q */ #endif huge, /* value taken as infinity */ /* OUTPUT */ #ifdef WHICH nndist, /* nearest neighbour distance for each point */ nnwhich /* identifies nearest neighbour */ #else nndist /* nearest neighbour distance for each point */ #endif ) int *np, *nq, *nv, *ns; int *from, *to, *psegmap, *qsegmap; /* integer vectors (mappings) */ #ifdef EXCLU int *idP, *idQ; #endif double *xp, *yp, *xq, *yq, *xv, *yv; /* vectors of coordinates */ double *huge; double *dpath; /* matrix */ double *nndist; /* nearest neighbour distance for each point */ #ifdef WHICH int *nnwhich; /* identifies nearest neighbour */ #endif { int Np, Nq, Nv, i, j; int segPi, segQj, nbi1, nbi2, nbj1, nbj2; double d, xpi, ypi, xqj, yqj, dXi1, dXi2, d1Xj, d2Xj, d11, d12, d21, d22; double dmin, hugevalue; #ifdef EXCLU int idPi; #endif #ifdef WHICH int whichmin; #endif Np = *np; Nq = *nq; Nv = *nv; hugevalue = *huge; /* initialise nn distances */ for(i = 0; i < Np; i++) { nndist[i] = hugevalue; #ifdef WHICH nnwhich[i] = -1; #endif } /* main loop */ for(i = 0; i < Np; i++) { xpi = xp[i]; ypi = yp[i]; #ifdef EXCLU idPi = idP[i]; #endif segPi = psegmap[i]; nbi1 = from[segPi]; nbi2 = to[segPi]; dXi1 = EUCLID(xpi, ypi, xv[nbi1], yv[nbi1]); dXi2 = EUCLID(xpi, ypi, xv[nbi2], yv[nbi2]); dmin = nndist[i]; #ifdef WHICH whichmin = nnwhich[i]; #endif for(j = 0; j < Nq; j++) { #ifdef EXCLU if(idQ[j] != idPi) { #endif xqj = xq[j]; yqj = yq[j]; segQj = qsegmap[j]; /* compute path distance between i and j */ if(segPi == segQj) { /* points i and j lie on the same segment; use Euclidean distance */ d = EUCLID(xpi, ypi, xqj, yqj); } else { /* Shortest path from i to j passes through ends of segments; Calculate shortest of 4 possible paths from i to j */ nbj1 = from[segQj]; nbj2 = to[segQj]; d1Xj = EUCLID(xv[nbj1], yv[nbj1], xqj, yqj); d2Xj = EUCLID(xv[nbj2], yv[nbj2], xqj, yqj); d11 = dXi1 + DPATH(nbi1,nbj1) + d1Xj; d12 = dXi1 + DPATH(nbi1,nbj2) + d2Xj; d21 = dXi2 + DPATH(nbi2,nbj1) + d1Xj; d22 = dXi2 + DPATH(nbi2,nbj2) + d2Xj; d = d11; if(d12 < d) d = d12; if(d21 < d) d = d21; if(d22 < d) d = d22; } /* OK, distance between i and j is d */ /* update nn for point i */ if(d < dmin) { dmin = d; #ifdef WHICH whichmin = j; #endif } #ifdef EXCLU } #endif } /* commit nn distance for point i */ nndist[i] = dmin; #ifdef WHICH nnwhich[i] = whichmin; #endif } } spatstat/src/close3pair.c0000644000176200001440000000263713166361223015127 0ustar liggesusers/* close3pair.c $Revision: 1.1 $ $Date: 2015/02/21 03:28:53 $ closepairs and crosspairs for 3D Assumes point pattern is sorted in increasing order of x coordinate */ #include #include #include #define OK 0 #define ERR_OVERFLOW 1 #define ERR_ALLOC 2 #define intRealloc(PTR, OLDLENGTH, NEWLENGTH) \ (int *) S_realloc((char *) PTR, NEWLENGTH, OLDLENGTH, sizeof(int)) #define dblRealloc(PTR, OLDLENGTH, NEWLENGTH) \ (double *) S_realloc((char *) PTR, NEWLENGTH, OLDLENGTH, sizeof(double)) double sqrt(); /* ....... define functions, using closefuns.h ........*/ /* return only one of the pairs (i,j) and (j,i) */ #define SINGLE /* enable 3D code */ #define ZCOORD /* return i, j only */ #define CLOSEFUN close3IJpairs #define CROSSFUN cross3IJpairs #undef THRESH #undef COORDS #undef DIST #include "closefuns.h" #undef CLOSEFUN #undef CROSSFUN #undef THRESH #undef COORDS #undef DIST /* return i, j, xi, yi, zi, xj, yj, zj, dx, dy, dz, d */ #define CLOSEFUN close3pairs #define CROSSFUN cross3pairs #undef THRESH #define COORDS #define DIST #include "closefuns.h" #undef CLOSEFUN #undef CROSSFUN #undef THRESH #undef COORDS #undef DIST /* return i, j, t where t = 1{d < s} */ #define CLOSEFUN close3thresh #define CROSSFUN cross3thresh #define THRESH #undef COORDS #undef DIST #include "closefuns.h" #undef CLOSEFUN #undef CROSSFUN #undef THRESH #undef COORDS #undef DIST spatstat/src/linnndist.c0000755000176200001440000001167413166361223015071 0ustar liggesusers#include /* linnndist.c Shortest-path distances between nearest neighbours in linear network $Revision: 1.1 $ $Date: 2013/10/21 02:01:14 $ linnndist linnnwhich */ #define DPATH(I,J) dpath[(J) + Nv * (I)] #define ANSWER(I,J) answer[(J) + Np * (I)] #define EUCLID(X,Y,U,V) sqrt(pow((X)-(U),2)+pow((Y)-(V),2)) void linnndist(np, xp, yp, /* data points */ nv, xv, yv, /* network vertices */ ns, from, to, /* segments */ dpath, /* shortest path distances between vertices */ segmap, /* map from data points to segments */ huge, /* value taken as infinity */ /* OUTPUT */ answer /* nearest neighbour distance for each point */ ) int *np, *nv, *ns; int *from, *to, *segmap; /* integer vectors (mappings) */ double *xp, *yp, *xv, *yv; /* vectors of coordinates */ double *huge; double *dpath; /* matrix */ double *answer; /* vector of output values */ { int Np, Nv, i, j, Np1; int segi, segj, nbi1, nbi2, nbj1, nbj2; double d, xpi, ypi, xpj, ypj, dXi1, dXi2, d1Xj, d2Xj, d11, d12, d21, d22; double dmin, hugevalue; Np = *np; Nv = *nv; Np1 = Np - 1; hugevalue = *huge; /* initialise nn distances */ for(i = 0; i < Np; i++) answer[i] = hugevalue; /* main loop */ for(i = 0; i < Np1; i++) { xpi = xp[i]; ypi = yp[i]; segi = segmap[i]; nbi1 = from[segi]; nbi2 = to[segi]; dXi1 = EUCLID(xpi, ypi, xv[nbi1], yv[nbi1]); dXi2 = EUCLID(xpi, ypi, xv[nbi2], yv[nbi2]); dmin = answer[i]; for(j = i+1; j < Np; j++) { xpj = xp[j]; ypj = yp[j]; segj = segmap[j]; /* compute path distance between i and j */ if(segi == segj) { /* points i and j lie on the same segment; use Euclidean distance */ d = sqrt(pow(xpi - xpj, 2) + pow(ypi - ypj, 2)); } else { /* Shortest path from i to j passes through ends of segments; Calculate shortest of 4 possible paths from i to j */ nbj1 = from[segj]; nbj2 = to[segj]; d1Xj = EUCLID(xv[nbj1], yv[nbj1], xpj, ypj); d2Xj = EUCLID(xv[nbj2], yv[nbj2], xpj, ypj); d11 = dXi1 + DPATH(nbi1,nbj1) + d1Xj; d12 = dXi1 + DPATH(nbi1,nbj2) + d2Xj; d21 = dXi2 + DPATH(nbi2,nbj1) + d1Xj; d22 = dXi2 + DPATH(nbi2,nbj2) + d2Xj; d = d11; if(d12 < d) d = d12; if(d21 < d) d = d21; if(d22 < d) d = d22; } /* OK, distance between i and j is d */ /* update nn distance for point i */ if(d < dmin) dmin = d; /* update nn distance for point j */ if(d < answer[j]) answer[j] = d; } /* commit nn distance for point i */ answer[i] = dmin; } } void linnnwhich(np, xp, yp, /* data points */ nv, xv, yv, /* network vertices */ ns, from, to, /* segments */ dpath, /* shortest path distances between vertices */ segmap, /* map from data points to segments */ huge, /* value taken as infinity */ /* OUTPUT */ nndist, /* nearest neighbour distance for each point */ nnwhich /* identifies nearest neighbour */ ) int *np, *nv, *ns; int *from, *to, *segmap; /* integer vectors (mappings) */ double *xp, *yp, *xv, *yv; /* vectors of coordinates */ double *huge; double *dpath; /* matrix */ double *nndist; /* vector of output values */ int *nnwhich; /* vector of output values */ { int Np, Nv, i, j, Np1; int segi, segj, nbi1, nbi2, nbj1, nbj2; double d, xpi, ypi, xpj, ypj, dXi1, dXi2, d1Xj, d2Xj, d11, d12, d21, d22; double dmin, hugevalue; int whichmin; Np = *np; Nv = *nv; Np1 = Np - 1; hugevalue = *huge; /* initialise nn distances and identifiers */ for(i = 0; i < Np; i++) { nndist[i] = hugevalue; nnwhich[i] = -1; } /* main loop */ for(i = 0; i < Np1; i++) { xpi = xp[i]; ypi = yp[i]; segi = segmap[i]; nbi1 = from[segi]; nbi2 = to[segi]; dXi1 = EUCLID(xpi, ypi, xv[nbi1], yv[nbi1]); dXi2 = EUCLID(xpi, ypi, xv[nbi2], yv[nbi2]); dmin = nndist[i]; whichmin = nnwhich[i]; for(j = i+1; j < Np; j++) { xpj = xp[j]; ypj = yp[j]; segj = segmap[j]; if(segi == segj) { /* points i and j lie on the same segment; use Euclidean distance */ d = sqrt(pow(xpi - xpj, 2) + pow(ypi - ypj, 2)); } else { /* Shortest path from i to j passes through ends of segments; Calculate shortest of 4 possible paths from i to j */ nbj1 = from[segj]; nbj2 = to[segj]; d1Xj = EUCLID(xv[nbj1], yv[nbj1], xpj, ypj); d2Xj = EUCLID(xv[nbj2], yv[nbj2], xpj, ypj); d11 = dXi1 + DPATH(nbi1,nbj1) + d1Xj; d12 = dXi1 + DPATH(nbi1,nbj2) + d2Xj; d21 = dXi2 + DPATH(nbi2,nbj1) + d1Xj; d22 = dXi2 + DPATH(nbi2,nbj2) + d2Xj; d = d11; if(d12 < d) d = d12; if(d21 < d) d = d21; if(d22 < d) d = d22; } /* OK, distance between i and j is d */ /* update nn for point i */ if(d < dmin) { dmin = d; whichmin = j; } /* update nn for point j */ if(d < nndist[j]) { nndist[j] = d; nnwhich[j] = i; } } /* commit nn for point i */ nndist[i] = dmin; nnwhich[i] = whichmin; } } spatstat/src/linpairdist.c0000755000176200001440000000430113166361223015376 0ustar liggesusers#include #include #include "chunkloop.h" /* linpairdist.c Shortest-path distances between each pair of points in linear network $Revision: 1.5 $ $Date: 2012/10/12 10:21:46 $ linpairdist */ #define DPATH(I,J) dpath[(I) + Nv * (J)] #define ANSWER(I,J) answer[(I) + Np * (J)] #define EUCLID(X,Y,U,V) sqrt(pow((X)-(U),2)+pow((Y)-(V),2)) void linpairdist(np, xp, yp, /* data points */ nv, xv, yv, /* network vertices */ ns, from, to, /* segments */ dpath, /* shortest path distances between vertices */ segmap, /* map from data points to segments */ /* OUTPUT */ answer /* shortest path distances between points */ ) int *np, *nv, *ns; int *from, *to, *segmap; /* integer vectors (mappings) */ double *xp, *yp, *xv, *yv; /* vectors of coordinates */ double *dpath, *answer; /* matrices */ { int Np, Nv, i, j, Np1, maxchunk; int segi, segj, nbi1, nbi2, nbj1, nbj2; double d, xpi, ypi, xpj, ypj, dXi1, dXi2, d1Xj, d2Xj, d11, d12, d21, d22; Np = *np; Nv = *nv; Np1 = Np - 1; OUTERCHUNKLOOP(i, Np1, maxchunk, 1024) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, Np1, maxchunk, 1024) { xpi = xp[i]; ypi = yp[i]; segi = segmap[i]; nbi1 = from[segi]; nbi2 = to[segi]; dXi1 = EUCLID(xpi, ypi, xv[nbi1], yv[nbi1]); dXi2 = EUCLID(xpi, ypi, xv[nbi2], yv[nbi2]); for(j = i+1; j < Np; j++) { xpj = xp[j]; ypj = yp[j]; segj = segmap[j]; if(segi == segj) { /* points i and j lie on the same segment; use Euclidean distance */ d = sqrt(pow(xpi - xpj, 2) + pow(ypi - ypj, 2)); } else { /* Shortest path from i to j passes through ends of segments; Calculate shortest of 4 possible paths from i to j */ nbj1 = from[segj]; nbj2 = to[segj]; d1Xj = EUCLID(xv[nbj1], yv[nbj1], xpj, ypj); d2Xj = EUCLID(xv[nbj2], yv[nbj2], xpj, ypj); d11 = dXi1 + DPATH(nbi1,nbj1) + d1Xj; d12 = dXi1 + DPATH(nbi1,nbj2) + d2Xj; d21 = dXi2 + DPATH(nbi2,nbj1) + d1Xj; d22 = dXi2 + DPATH(nbi2,nbj2) + d2Xj; d = d11; if(d12 < d) d = d12; if(d21 < d) d = d21; if(d22 < d) d = d22; } /* write */ ANSWER(i,j) = ANSWER(j,i) = d; } ANSWER(i,i) = 0; } } } spatstat/src/hasclose.c0000644000176200001440000000135613166361223014661 0ustar liggesusers/* hasclose.c $Revision: 1.4 $ $Date: 2016/11/29 05:09:25 $ Determine whether a point has a neighbour closer than 'r' Data must be ordered by increasing x coordinate */ #include #undef BUG #undef TORUS #undef ZCOORD #define CLOSEFUN hasXclose #define CROSSFUN hasXYclose #include "hasclose.h" #undef CLOSEFUN #undef CROSSFUN #define ZCOORD #define CLOSEFUN hasX3close #define CROSSFUN hasXY3close #include "hasclose.h" #undef CLOSEFUN #undef CROSSFUN #define TORUS #undef ZCOORD #define CLOSEFUN hasXpclose #define CROSSFUN hasXYpclose #include "hasclose.h" #undef CLOSEFUN #undef CROSSFUN #define ZCOORD #define CLOSEFUN hasX3pclose #define CROSSFUN hasXY3pclose #include "hasclose.h" #undef CLOSEFUN #undef CROSSFUN spatstat/src/closepair.c0000755000176200001440000002366213166361223015050 0ustar liggesusers/* closepair.c $Revision: 1.33 $ $Date: 2016/03/28 03:46:26 $ Assumes point pattern is sorted in increasing order of x coordinate paircount() count the total number of pairs (i, j) with distance < rmax Cclosepaircounts count for each i the number of j with distance < rmax crosscount() count number of close pairs in two patterns (note: Ccrosspaircounts is defined in Estrauss.c) duplicatedxy() find duplicated (x,y) pairs Fclosepairs() extract close pairs of coordinates .C interface - output vectors have Fixed length Fcrosspairs() extract close pairs in two patterns .C interface - output vectors have Fixed length Vclosepairs() extract close pairs of coordinates .Call interface - output vectors have Variable length Vcrosspairs() extract close pairs in two patterns .Call interface - output vectors have Variable length */ #include #include #include #define OK 0 #define ERR_OVERFLOW 1 #define ERR_ALLOC 2 #define FAILED(X) ((void *)(X) == (void *)NULL) #define intRealloc(PTR, OLDLENGTH, NEWLENGTH) \ (int *) S_realloc((char *) PTR, NEWLENGTH, OLDLENGTH, sizeof(int)) #define dblRealloc(PTR, OLDLENGTH, NEWLENGTH) \ (double *) S_realloc((char *) PTR, NEWLENGTH, OLDLENGTH, sizeof(double)) double sqrt(); /* count TOTAL number of close pairs */ void paircount(nxy, x, y, rmaxi, count) /* inputs */ int *nxy; /* number of (x,y) points */ double *x, *y; /* (x,y) coordinates */ double *rmaxi; /* maximum distance */ /* output */ int *count; { int n, maxchunk, i, j, counted; double xi, yi, rmax, r2max, dx, dy, a; n = *nxy; rmax = *rmaxi; r2max = rmax * rmax; *count = counted = 0; if(n == 0) return; /* loop in chunks of 2^16 */ i = 0; maxchunk = 0; while(i < n) { R_CheckUserInterrupt(); maxchunk += 65536; if(maxchunk > n) maxchunk = n; for(; i < maxchunk; i++) { xi = x[i]; yi = y[i]; if(i > 0) { /* scan backwards from i */ for(j = i - 1; j >= 0; j--) { dx = x[j] - xi; a = r2max - dx * dx; if(a < 0) break; dy = y[j] - yi; a -= dy * dy; if(a >= 0) ++counted; } } if(i + 1 < n) { /* scan forwards from i */ for(j = i + 1; j < n; j++) { dx = x[j] - xi; a = r2max - dx * dx; if(a < 0) break; dy = y[j] - yi; a -= dy * dy; if(a >= 0) ++counted; } } /* end loop over i */ } } *count = counted; } /* count for each i the number of j closer than distance r */ void Cclosepaircounts(nxy, x, y, rmaxi, counts) /* inputs */ int *nxy; /* number of (x,y) points */ double *x, *y; /* (x,y) coordinates */ double *rmaxi; /* maximum distance */ /* output VECTOR, assumed initialised to 0 */ int *counts; { int n, maxchunk, i, j; double xi, yi, rmax, r2max, dx, dy, a; n = *nxy; rmax = *rmaxi; r2max = rmax * rmax; if(n == 0) return; /* loop in chunks of 2^16 */ i = 0; maxchunk = 0; while(i < n) { R_CheckUserInterrupt(); maxchunk += 65536; if(maxchunk > n) maxchunk = n; for(; i < maxchunk; i++) { xi = x[i]; yi = y[i]; if(i > 0) { /* scan backwards from i */ for(j = i - 1; j >= 0; j--) { dx = x[j] - xi; a = r2max - dx * dx; if(a < 0) break; dy = y[j] - yi; a -= dy * dy; if(a >= 0) (counts[i])++; } } if(i + 1 < n) { /* scan forwards from i */ for(j = i + 1; j < n; j++) { dx = x[j] - xi; a = r2max - dx * dx; if(a < 0) break; dy = y[j] - yi; a -= dy * dy; if(a >= 0) (counts[i])++; } } /* end loop over i */ } } } /* analogue for two different point patterns */ void crosscount(nn1, x1, y1, nn2, x2, y2, rmaxi, count) /* inputs */ int *nn1, *nn2; double *x1, *y1, *x2, *y2, *rmaxi; /* output */ int *count; { int n1, n2, maxchunk, i, j, jleft, counted; double x1i, y1i, rmax, r2max, xleft, dx, dy, a; n1 = *nn1; n2 = *nn2; rmax = *rmaxi; r2max = rmax * rmax; *count = counted = 0; if(n1 == 0 || n2 == 0) return; jleft = 0; i = 0; maxchunk = 0; while(i < n1) { R_CheckUserInterrupt(); maxchunk += 65536; if(maxchunk > n1) maxchunk = n1; for(; i < maxchunk; i++) { x1i = x1[i]; y1i = y1[i]; /* adjust starting index */ xleft = x1i - rmax; while((x2[jleft] < xleft) && (jleft+1 < n2)) ++jleft; /* process from j=jleft until dx > rmax */ for(j=jleft; j < n2; j++) { dx = x2[j] - x1i; a = r2max - dx * dx; if(a < 0) break; dy = y2[j] - y1i; a -= dy * dy; if(a > 0) ++counted; } } } *count = counted; } /* Find duplicated locations xx, yy are not sorted */ void duplicatedxy(n, x, y, out) /* inputs */ int *n; double *x, *y; /* output */ int *out; /* logical vector */ { int m, i, j; double xi, yi; m = *n; for(i = 1; i < m; i++) { R_CheckUserInterrupt(); xi = x[i]; yi = y[i]; for(j = 0; j < i; j++) if((x[j] == xi) && (y[j] == yi)) break; if(j == i) out[i] = 0; else out[i] = 1; } } /* ............... fixed output length .............. */ void Fclosepairs(nxy, x, y, r, noutmax, nout, iout, jout, xiout, yiout, xjout, yjout, dxout, dyout, dout, status) /* inputs */ int *nxy, *noutmax; double *x, *y, *r; /* outputs */ int *nout, *iout, *jout; double *xiout, *yiout, *xjout, *yjout, *dxout, *dyout, *dout; int *status; { int n, k, kmax, maxchunk, i, j; double xi, yi, rmax, r2max, dx, dy, dx2, d2; n = *nxy; rmax = *r; r2max = rmax * rmax; *status = OK; *nout = 0; k = 0; /* k is the next available storage location and also the current length of the list */ kmax = *noutmax; if(n == 0) return; /* loop in chunks of 2^16 */ i = 0; maxchunk = 0; while(i < n) { R_CheckUserInterrupt(); maxchunk += 65536; if(maxchunk > n) maxchunk = n; for(; i < maxchunk; i++) { xi = x[i]; yi = y[i]; if(i > 0) { /* scan backwards */ for(j = i - 1; j >= 0; j--) { dx = x[j] - xi; dx2 = dx * dx; if(dx2 > r2max) break; dy = y[j] - yi; d2 = dx2 + dy * dy; if(d2 <= r2max) { /* add this (i, j) pair to output */ if(k >= kmax) { *nout = k; *status = ERR_OVERFLOW; return; } jout[k] = j + 1; /* R indexing */ iout[k] = i + 1; xiout[k] = xi; yiout[k] = yi; xjout[k] = x[j]; yjout[k] = y[j]; dxout[k] = dx; dyout[k] = dy; dout[k] = sqrt(d2); ++k; } } } if(i + 1 < n) { /* scan forwards */ for(j = i + 1; j < n; j++) { dx = x[j] - xi; dx2 = dx * dx; if(dx2 > r2max) break; dy = y[j] - yi; d2 = dx2 + dy * dy; if(d2 <= r2max) { /* add this (i, j) pair to output */ if(k >= kmax) { *nout = k; *status = ERR_OVERFLOW; return; } jout[k] = j + 1; /* R indexing */ iout[k] = i + 1; xiout[k] = xi; yiout[k] = yi; xjout[k] = x[j]; yjout[k] = y[j]; dxout[k] = dx; dyout[k] = dy; dout[k] = sqrt(d2); ++k; } } } } } *nout = k; } void Fcrosspairs(nn1, x1, y1, nn2, x2, y2, rmaxi, noutmax, nout, iout, jout, xiout, yiout, xjout, yjout, dxout, dyout, dout, status) /* inputs */ int *nn1, *nn2, *noutmax; double *x1, *y1, *x2, *y2, *rmaxi; /* outputs */ int *nout, *iout, *jout; double *xiout, *yiout, *xjout, *yjout, *dxout, *dyout, *dout; int *status; { int n1, n2, maxchunk, k, kmax, i, j, jleft; double x1i, y1i, rmax, r2max, xleft, dx, dy, dx2, d2; n1 = *nn1; n2 = *nn2; rmax = *rmaxi; r2max = rmax * rmax; *status = OK; *nout = 0; k = 0; /* k is the next available storage location and also the current length of the list */ kmax = *noutmax; if(n1 == 0 || n2 == 0) return; jleft = 0; i = 0; maxchunk = 0; while(i < n1) { R_CheckUserInterrupt(); maxchunk += 65536; if(maxchunk > n1) maxchunk = n1; for(; i < maxchunk; i++) { x1i = x1[i]; y1i = y1[i]; /* adjust starting position jleft */ xleft = x1i - rmax; while((x2[jleft] < xleft) && (jleft+1 < n2)) ++jleft; /* process from j=jleft until dx > rmax */ for(j=jleft; j < n2; j++) { dx = x2[j] - x1i; dx2 = dx * dx; if(dx2 > r2max) break; dy = y2[j] - y1i; d2 = dx2 + dy * dy; if(d2 <= r2max) { /* add this (i, j) pair to output */ if(k >= kmax) { *nout = k; *status = ERR_OVERFLOW; return; } jout[k] = j + 1; /* R indexing */ iout[k] = i + 1; xiout[k] = x1i; yiout[k] = y1i; xjout[k] = x2[j]; yjout[k] = y2[j]; dxout[k] = dx; dyout[k] = dy; dout[k] = sqrt(d2); ++k; } } } } *nout = k; } /* ........ versions that return variable-length vectors ......... */ #define SINGLE /* return i, j only */ #define CLOSEFUN VcloseIJpairs #define CROSSFUN VcrossIJpairs #undef THRESH #undef COORDS #undef DIST #include "closefuns.h" #undef CLOSEFUN #undef CROSSFUN #undef THRESH #undef COORDS #undef DIST /* return i, j, d */ #define CLOSEFUN VcloseIJDpairs #define CROSSFUN VcrossIJDpairs #undef THRESH #undef COORDS #define DIST #include "closefuns.h" #undef CLOSEFUN #undef CROSSFUN #undef THRESH #undef COORDS #undef DIST /* return i, j, xi, yi, xj, yj, dx, dy, d */ #define CLOSEFUN Vclosepairs #define CROSSFUN Vcrosspairs #undef THRESH #define COORDS #define DIST #include "closefuns.h" #undef CLOSEFUN #undef CROSSFUN #undef THRESH #undef COORDS #undef DIST /* return i, j, t where t = 1{d < s} */ #define CLOSEFUN Vclosethresh #define CROSSFUN Vcrossthresh #define THRESH #undef COORDS #include "closefuns.h" #undef CLOSEFUN #undef CROSSFUN #undef THRESH #undef COORDS spatstat/src/mhloop.h0000755000176200001440000003021513166361223014362 0ustar liggesusers /* mhloop.h This file contains the iteration loop for the Metropolis-Hastings algorithm methas.c It is #included several times in methas.c with different #defines for the following variables MH_MARKED whether the simulation is marked (= the variable 'marked' is TRUE) MH_SINGLE whether there is a single interaction (as opposed to a hybrid of several interactions) MH_TEMPER whether tempering is applied MH_TRACKING whether to save transition history MH_DEBUG whether to print debug information MH_SNOOP whether to run visual debugger $Revision: 1.22 $ $Date: 2015/09/06 05:21:55 $ */ #ifndef MH_DEBUG #define MH_DEBUG NO #endif /* ..... Pre-processing: recursively delete illegal/improbable points ..... */ nfree = state.npts - algo.ncond; /* number of 'free' points */ if(thinstart && nfree > 0) { nsuspect = nfree; while(nsuspect > 0) { /* scan for illegal points */ ix = state.npts - nsuspect; deathprop.ix = ix; deathprop.u = state.x[ix]; deathprop.v = state.y[ix]; #if MH_MARKED deathprop.mrk = state.marks[ix]; #endif #if MH_DEBUG #if MH_MARKED Rprintf("check legality of point %d = (%lf, %lf) with mark %d\n", ix, deathprop.u, deathprop.v, deathprop.mrk); #else Rprintf("check legality of point %d = (%lf, %lf)\n", ix, deathprop.u, deathprop.v); #endif #endif /* evaluate conditional intensity without trend terms */ #if MH_SINGLE adenom = (*(thecif.eval))(deathprop, state, thecdata); #else adenom = 1.0; for(k = 0; k < Ncif; k++) adenom *= (*(cif[k].eval))(deathprop, state, cdata[k]); #endif #if MH_TEMPER adenom = pow(adenom, invtemp); #endif #if MH_DEBUG Rprintf("cif = %lf\n", adenom); #endif /* accept/reject */ if(unif_rand() >= adenom) { #if MH_DEBUG Rprintf("deleting illegal/improbable point\n"); #endif /* delete point x[ix], y[ix] */ if(mustupdate) { /* Update auxiliary variables first */ #if MH_SINGLE (*(thecif.update))(state, deathprop, thecdata); #else for(k = 0; k < Ncif; k++) { if(needupd[k]) (*(cif[k].update))(state, deathprop, cdata[k]); } #endif } state.npts--; nfree--; #if MH_DEBUG Rprintf("deleting point %d\n", ix); Rprintf("\tnpts=%d\n", state.npts); #endif if(ix < state.npts) { for(j = ix; j < state.npts; j++) { state.x[j] = state.x[j+1]; state.y[j] = state.y[j+1]; #if MH_MARKED state.marks[j] = state.marks[j+1]; #endif } } } nsuspect--; } } /* ............... MAIN ITERATION LOOP ............................. */ OUTERCHUNKLOOP(irep, algo.nrep, maxchunk, 1024) { R_CheckUserInterrupt(); INNERCHUNKLOOP(irep, algo.nrep, maxchunk, 1024) { #if MH_DEBUG Rprintf("iteration %d\n", irep); #endif if(verb) { /* print progress message every nverb iterations */ iverb = irep + 1 + algo.nrep0; if((iverb % algo.nverb) == 0) Rprintf("iteration %d\n", iverb); } itype = REJECT; nfree = state.npts - algo.ncond; /* number of 'free' points */ /* ................ generate proposal ..................... */ /* Shift or birth/death: */ if(unif_rand() > algo.p) { #if MH_DEBUG Rprintf("propose birth or death\n"); #endif /* Birth/death: */ if(unif_rand() > algo.q) { /* Propose birth: */ birthprop.u = xpropose[irep]; birthprop.v = ypropose[irep]; #if MH_MARKED birthprop.mrk = mpropose[irep]; #endif #if MH_DEBUG #if MH_MARKED Rprintf("propose birth at (%lf, %lf) with mark %d\n", birthprop.u, birthprop.v, birthprop.mrk); #else Rprintf("propose birth at (%lf, %lf)\n", birthprop.u, birthprop.v); #endif #endif /* evaluate conditional intensity */ #if MH_MARKED betavalue = model.beta[birthprop.mrk]; #endif #if MH_SINGLE anumer = betavalue * (*(thecif.eval))(birthprop, state, thecdata); #else anumer = betavalue; for(k = 0; k < Ncif; k++) anumer *= (*(cif[k].eval))(birthprop, state, cdata[k]); #endif #if MH_TEMPER anumer = pow(anumer, invtemp); #endif adenom = qnodds*(nfree+1); #if MH_DEBUG Rprintf("cif = %lf, Hastings ratio = %lf\n", anumer, anumer/adenom); #endif /* accept/reject */ if(unif_rand() * adenom < anumer) { #if MH_DEBUG Rprintf("accepted birth\n"); #endif itype = BIRTH; /* Birth proposal accepted. */ } #if MH_SNOOP /* visual debug */ mhsnoop(&snooper, irep, &algo, &state, &birthprop, anumer, adenom, &itype); #endif #if MH_TRACKING /* save transition history */ if(irep < history.nmax) { history.n++; history.proptype[irep] = BIRTH; history.accepted[irep] = (itype == REJECT) ? 0 : 1; #ifdef HISTORY_INCLUDES_RATIO history.numerator[irep] = anumer; history.denominator[irep] = adenom; #endif } #endif } else if(nfree > 0) { /* Propose death: */ ix = floor(nfree * unif_rand()); if(ix < 0) ix = 0; ix = algo.ncond + ix; if(ix >= state.npts) ix = state.npts - 1; deathprop.ix = ix; deathprop.u = state.x[ix]; deathprop.v = state.y[ix]; #if MH_MARKED deathprop.mrk = state.marks[ix]; #endif #if MH_DEBUG #if MH_MARKED Rprintf("propose death of point %d = (%lf, %lf) with mark %d\n", ix, deathprop.u, deathprop.v, deathprop.mrk); #else Rprintf("propose death of point %d = (%lf, %lf)\n", ix, deathprop.u, deathprop.v); #endif #endif /* evaluate conditional intensity */ #if MH_MARKED betavalue = model.beta[deathprop.mrk]; #endif #if MH_SINGLE adenom = betavalue * (*(thecif.eval))(deathprop, state, thecdata); #else adenom = betavalue; for(k = 0; k < Ncif; k++) adenom *= (*(cif[k].eval))(deathprop, state, cdata[k]); #endif #if MH_TEMPER adenom = pow(adenom, invtemp); #endif anumer = qnodds * nfree; #if MH_DEBUG Rprintf("cif = %lf, Hastings ratio = %lf\n", adenom, anumer/adenom); #endif /* accept/reject */ if(unif_rand() * adenom < anumer) { #if MH_DEBUG Rprintf("accepted death\n"); #endif itype = DEATH; /* Death proposal accepted. */ } #if MH_SNOOP /* visual debug */ mhsnoop(&snooper, irep, &algo, &state, &deathprop, anumer, adenom, &itype); #endif #if MH_TRACKING /* save transition history */ if(irep < history.nmax) { history.n++; history.proptype[irep] = DEATH; history.accepted[irep] = (itype == REJECT) ? 0 : 1; #ifdef HISTORY_INCLUDES_RATIO history.numerator[irep] = anumer; history.denominator[irep] = adenom; #endif } #endif } } else if(nfree > 0) { /* Propose shift: */ /* point to be shifted */ ix = floor(nfree * unif_rand()); if(ix < 0) ix = 0; ix = algo.ncond + ix; if(ix >= state.npts) ix = state.npts - 1; deathprop.ix = ix; deathprop.u = state.x[ix]; deathprop.v = state.y[ix]; #if MH_MARKED deathprop.mrk = state.marks[ix]; #endif /* where to shift */ permitted = YES; shiftprop.ix = ix; shiftprop.u = xpropose[irep]; shiftprop.v = ypropose[irep]; #if MH_MARKED shiftprop.mrk = mpropose[irep]; if(algo.fixall) permitted = (shiftprop.mrk == deathprop.mrk); #endif #if MH_DEBUG #if MH_MARKED Rprintf("propose shift of point %d = (%lf, %lf)[mark %d] to (%lf, %lf)[mark %d]\n", ix, deathprop.u, deathprop.v, deathprop.mrk, shiftprop.u, shiftprop.v, shiftprop.mrk); #else Rprintf("propose shift of point %d = (%lf, %lf) to (%lf, %lf)\n", ix, deathprop.u, deathprop.v, shiftprop.u, shiftprop.v); #endif #endif /* evaluate cif in two stages */ cvn = cvd = 1.0; if(permitted) { #if MH_SINGLE cvn = (*(thecif.eval))(shiftprop, state, thecdata); if(cvn > 0.0) { cvd = (*(thecif.eval))(deathprop, state, thecdata); } else { permitted = NO; } #else for(k = 0; k < Ncif; k++) { cvn *= (*(cif[k].eval))(shiftprop, state, cdata[k]); if(cvn > 0.0) { cvd *= (*(cif[k].eval))(deathprop, state, cdata[k]); } else { permitted = NO; break; } } #endif } if(permitted) { #if MH_MARKED cvn *= model.beta[shiftprop.mrk]; cvd *= model.beta[deathprop.mrk]; #endif #if MH_TEMPER cvn = pow(cvn, invtemp); cvd = pow(cvd, invtemp); #endif #if MH_DEBUG Rprintf("cif[old] = %lf, cif[new] = %lf, Hastings ratio = %lf\n", cvd, cvn, cvn/cvd); #endif /* accept/reject */ if(unif_rand() * cvd < cvn) { #if MH_DEBUG Rprintf("accepted shift\n"); #endif itype = SHIFT; /* Shift proposal accepted . */ } } else { cvn = 0.0; cvd = 1.0; #if MH_DEBUG Rprintf("Forbidden shift"); #endif } #if MH_SNOOP /* visual debug */ mhsnoop(&snooper, irep, &algo, &state, &shiftprop, cvn, cvd, &itype); #endif #if MH_TRACKING /* save transition history */ if(irep < history.nmax) { history.n++; history.proptype[irep] = SHIFT; history.accepted[irep] = (itype == REJECT) ? 0 : 1; #ifdef HISTORY_INCLUDES_RATIO history.numerator[irep] = cvn; history.denominator[irep] = cvd; #endif } #endif } if(itype != REJECT) { /* ....... implement the transition ............ */ if(itype == BIRTH) { /* Birth transition */ /* add point at (u,v) */ #if MH_DEBUG #if MH_MARKED Rprintf("implementing birth at (%lf, %lf) with mark %d\n", birthprop.u, birthprop.v, birthprop.mrk); #else Rprintf("implementing birth at (%lf, %lf)\n", birthprop.u, birthprop.v); #endif #endif if(state.npts + 1 > state.npmax) { #if MH_DEBUG Rprintf("!!!!!!!!!!! storage overflow !!!!!!!!!!!!!!!!!\n"); #endif /* storage overflow; allocate more storage */ Nmore = 2 * state.npmax; state.x = (double *) S_realloc((char *) state.x, Nmore, state.npmax, sizeof(double)); state.y = (double *) S_realloc((char *) state.y, Nmore, state.npmax, sizeof(double)); #if MH_MARKED state.marks = (int *) S_realloc((char *) state.marks, Nmore, state.npmax, sizeof(int)); #endif state.npmax = Nmore; /* call the initialiser again, to allocate additional space */ #if MH_SINGLE thecdata = (*(thecif.init))(state, model, algo); #else model.ipar = iparvector; for(k = 0; k < Ncif; k++) { if(k > 0) model.ipar += plength[k-1]; cdata[k] = (*(cif[k].init))(state, model, algo); } #endif #if MH_DEBUG Rprintf("........... storage extended .................\n"); #endif } if(mustupdate) { /* Update auxiliary variables first */ #if MH_SINGLE (*(thecif.update))(state, birthprop, thecdata); #else for(k = 0; k < Ncif; k++) { if(needupd[k]) (*(cif[k].update))(state, birthprop, cdata[k]); } #endif } /* Now add point */ state.x[state.npts] = birthprop.u; state.y[state.npts] = birthprop.v; #if MH_MARKED state.marks[state.npts] = birthprop.mrk; #endif state.npts = state.npts + 1; #if MH_DEBUG Rprintf("\tnpts=%d\n", state.npts); #endif } else if(itype==DEATH) { /* Death transition */ /* delete point x[ix], y[ix] */ if(mustupdate) { /* Update auxiliary variables first */ #if MH_SINGLE (*(thecif.update))(state, deathprop, thecdata); #else for(k = 0; k < Ncif; k++) { if(needupd[k]) (*(cif[k].update))(state, deathprop, cdata[k]); } #endif } ix = deathprop.ix; state.npts = state.npts - 1; #if MH_DEBUG Rprintf("implementing death of point %d\n", ix); Rprintf("\tnpts=%d\n", state.npts); #endif if(ix < state.npts) { for(j = ix; j < state.npts; j++) { state.x[j] = state.x[j+1]; state.y[j] = state.y[j+1]; #if MH_MARKED state.marks[j] = state.marks[j+1]; #endif } } } else { /* Shift transition */ /* Shift (x[ix], y[ix]) to (u,v) */ #if MH_DEBUG #if MH_MARKED Rprintf("implementing shift from %d = (%lf, %lf)[%d] to (%lf, %lf)[%d]\n", deathprop.ix, deathprop.u, deathprop.v, deathprop.mrk, shiftprop.u, shiftprop.v, shiftprop.mrk); #else Rprintf("implementing shift from %d = (%lf, %lf) to (%lf, %lf)\n", deathprop.ix, deathprop.u, deathprop.v, shiftprop.u, shiftprop.v); Rprintf("\tnpts=%d\n", state.npts); #endif #endif if(mustupdate) { /* Update auxiliary variables first */ #if MH_SINGLE (*(thecif.update))(state, shiftprop, thecdata); #else for(k = 0; k < Ncif; k++) { if(needupd[k]) (*(cif[k].update))(state, shiftprop, cdata[k]); } #endif } ix = shiftprop.ix; state.x[ix] = shiftprop.u; state.y[ix] = shiftprop.v; #if MH_MARKED state.marks[ix] = shiftprop.mrk; #endif } #if MH_DEBUG } else { Rprintf("rejected\n"); #endif } } } spatstat/src/lixel.c0000644000176200001440000000754613166361223014204 0ustar liggesusers#include #include /* lixel.c divide a linear network into shorter segments */ void Clixellate(ns, fromcoarse, tocoarse, fromfine, tofine, nv, xv, yv, svcoarse, tvcoarse, nsplit, np, spcoarse, tpcoarse, spfine, tpfine) /* A linear network with *ns segments and *nv vertices is specified by the vectors from, to, xv, yv. The i-th segment will be subdivided into nsplit[i] subsegments. New data will be added at the end of the vectors 'xv' and 'yv' representing additional vertices in the new network. The point pattern data (*np points with local coordinates sp, tp in the coarse network) will be mapped to the new 'fine' network. Points are sorted by 'spcoarse' value. 'xv', 'yv', 'svcoarse', 'tvcoarse' must each have space for (nv + sum(nsplit-1)) entries. 'fromfine', 'tofine' must have length = sum(nsplit). */ int *ns; /* number of segments (input & output) */ int *fromcoarse, *tocoarse; /* endpoints of each segment (input) */ int *fromfine, *tofine; /* endpoints of each segment (output) */ int *nv; /* number of vertices (input & output) */ double *xv, *yv; /* cartesian coords of vertices (input & output) */ int *svcoarse; /* segment id of new vertex in COARSE network */ double *tvcoarse; /* location coordinate of new vertex on COARSE network */ int *nsplit; /* number of pieces into which each segment should be split */ int *np; /* number of data points */ double *tpcoarse, *tpfine; /* location coordinate */ int *spcoarse, *spfine; /* segment id coordinate */ { int Np, oldNs, oldNv, i, j, k, m, ll; int oldfromi, oldtoi, newlines, newNv, newNs, SegmentForData; double xstart, xend, ystart, yend, xincr, yincr, tn; Np = *np; newNv = oldNv = *nv; oldNs = *ns; newNs = 0; /* initialise pointer at start of point pattern Determine which segment contains first point */ k = 0; SegmentForData = (Np > 0) ? spcoarse[0] : -1; /* loop over line segments in original network */ for(i = 0; i < oldNs; i++) { newlines = nsplit[i]; oldfromi = fromcoarse[i]; oldtoi = tocoarse[i]; /* local coordinates of endpoints of segment, in ***coarse*** network */ svcoarse[oldfromi] = svcoarse[oldtoi] = i; tvcoarse[oldfromi] = 0.0; tvcoarse[oldtoi] = 1.0; if(newlines == 1) { /* copy existing segment to new segment list */ fromfine[newNs] = oldfromi; tofine[newNs] = oldtoi; /* advance pointer */ ++newNs; } else if(newlines > 1) { /* split segment into 'newlines' pieces */ xstart = xv[oldfromi]; ystart = yv[oldfromi]; xend = xv[oldtoi]; yend = yv[oldtoi]; xincr = (xend-xstart)/newlines; yincr = (yend-ystart)/newlines; m = newlines - 1; for(j = 1; j <= m; j++) { /* create new vertex, number 'newNv' */ xv[newNv] = xstart + j * xincr; yv[newNv] = ystart + j * yincr; /* local coordinates of new vertex relative to ***coarse*** network */ svcoarse[newNv] = i; tvcoarse[newNv] = ((double) j)/((double) newlines); /* create new segment, number 'newNs', ending at new vertex */ fromfine[newNs] = (j == 1) ? oldfromi : (newNv-1); tofine[newNs] = newNv; /* advance */ ++newNv; ++newNs; } /* create segment from last added vertex to end of old segment */ fromfine[newNs] = newNv-1; tofine[newNs] = oldtoi; ++newNs; } /* handle data points lying on current segment i */ while(SegmentForData == i) { if(newlines == 1) { spfine[k] = spcoarse[k]; tpfine[k] = tpcoarse[k]; } else { tn = tpcoarse[k] * newlines; ll = (int) floor(tn); ll = (ll < 0) ? 0 : (ll > newlines) ? newlines: ll; tpfine[k] = tn - ll; spfine[k] = newNs - newlines + ll; } ++k; SegmentForData = (k < Np) ? spcoarse[k] : -1; } } *nv = newNv; *ns = newNs; } spatstat/src/distances.c0000755000176200001440000002166613166361223015046 0ustar liggesusers/* distances.c Distances between pairs of points $Revision: 1.31 $ $Date: 2017/01/08 00:32:52 $ Cpairdist Pairwise distances Cpair2dist Pairwise distances squared CpairPdist Pairwise distances with periodic correction CpairP2dist Pairwise distances squared, with periodic correction Ccrossdist Pairwise distances for two sets of points Ccross2dist Pairwise distances squared, for two sets of points CcrossPdist Pairwise distances for two sets of points, periodic correction */ #include #include #include "chunkloop.h" double sqrt(); void Cpairdist(n, x, y, squared, d) /* inputs */ int *n; double *x, *y; int *squared; /* output */ double *d; { void Cpair1dist(), Cpair2dist(); if(*squared == 0) { Cpair1dist(n, x, y, d); } else { Cpair2dist(n, x, y, d); } } void Cpair1dist(n, x, y, d) /* inputs */ int *n; double *x, *y; /* output */ double *d; { int i, j, npoints, maxchunk; double *dp; double xi, yi, dx, dy, dist; npoints = *n; /* set d[0,0] = 0 */ *d = 0.0; OUTERCHUNKLOOP(i, npoints, maxchunk, 16384) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, npoints, maxchunk, 16384) { xi = x[i]; yi = y[i]; /* point at the start of column i */ dp = d + i * npoints; /* set diagonal to zero */ dp[i] = 0.0; for (j=0; j < i; j++) { dx = x[j] - xi; dy = y[j] - yi; dist = sqrt( dx * dx + dy * dy ); /* upper triangle */ *dp = dist; ++dp; /* lower triangle */ d[ j * npoints + i] = dist; } } } } /* squared distances */ void Cpair2dist(n, x, y, d) /* inputs */ int *n; double *x, *y; /* output */ double *d; { int i, j, npoints, maxchunk; double *dp; double xi, yi, dx, dy, dist; npoints = *n; /* set d[0,0] = 0 */ *d = 0.0; OUTERCHUNKLOOP(i, npoints, maxchunk, 16384) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, npoints, maxchunk, 16384) { xi = x[i]; yi = y[i]; /* point at the start of column i */ dp = d + i * npoints; /* set diagonal to zero */ dp[i] = 0.0; for (j=0; j < i; j++) { dx = x[j] - xi; dy = y[j] - yi; dist = dx * dx + dy * dy; /* upper triangle */ *dp = dist; ++dp; /* lower triangle */ d[ j * npoints + i] = dist; } } } } void Ccrossdist(nfrom, xfrom, yfrom, nto, xto, yto, squared, d) /* inputs */ int *nto, *nfrom; double *xfrom, *yfrom, *xto, *yto; int *squared; /* output */ double *d; { void Ccross1dist(), Ccross2dist(); if(*squared == 0) { Ccross1dist(nfrom, xfrom, yfrom, nto, xto, yto, d); } else { Ccross2dist(nfrom, xfrom, yfrom, nto, xto, yto, d); } } void Ccross1dist(nfrom, xfrom, yfrom, nto, xto, yto, d) /* inputs */ int *nto, *nfrom; double *xfrom, *yfrom, *xto, *yto; /* output */ double *d; { int i, j, nf, nt, maxchunk; double *dptr; double xj, yj, dx, dy; nf = *nfrom; nt = *nto; dptr = d; OUTERCHUNKLOOP(j, nt, maxchunk, 16384) { R_CheckUserInterrupt(); INNERCHUNKLOOP(j, nt, maxchunk, 16384) { xj = xto[j]; yj = yto[j]; for(i = 0; i < nf; i++, dptr++) { dx = xj - xfrom[i]; dy = yj - yfrom[i]; *dptr = sqrt( dx * dx + dy * dy ); } } } } /* squared distances */ void Ccross2dist(nfrom, xfrom, yfrom, nto, xto, yto, d) /* inputs */ int *nto, *nfrom; double *xfrom, *yfrom, *xto, *yto; /* output */ double *d; { int i, j, nf, nt, maxchunk; double *dptr; double xj, yj, dx, dy; nf = *nfrom; nt = *nto; dptr = d; OUTERCHUNKLOOP(j, nt, maxchunk, 16384) { R_CheckUserInterrupt(); INNERCHUNKLOOP(j, nt, maxchunk, 16384) { xj = xto[j]; yj = yto[j]; for(i = 0; i < nf; i++, dptr++) { dx = xj - xfrom[i]; dy = yj - yfrom[i]; *dptr = dx * dx + dy * dy; } } } } /* distances with periodic correction */ void CpairPdist(n, x, y, xwidth, yheight, squared, d) /* inputs */ int *n; double *x, *y, *xwidth, *yheight; int *squared; /* output */ double *d; { void CpairP1dist(), CpairP2dist(); if(*squared == 0) { CpairP1dist(n, x, y, xwidth, yheight, d); } else { CpairP2dist(n, x, y, xwidth, yheight, d); } } void CpairP1dist(n, x, y, xwidth, yheight, d) /* inputs */ int *n; double *x, *y, *xwidth, *yheight; /* output */ double *d; { int i, j, npoints, maxchunk; double *dp; double xi, yi, dx, dy, dx2, dy2, dx2p, dy2p, dist, wide, high; npoints = *n; wide = *xwidth; high = *yheight; /* set d[0,0] = 0 */ *d = 0.0; OUTERCHUNKLOOP(i, npoints, maxchunk, 16384) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, npoints, maxchunk, 16384) { xi = x[i]; yi = y[i]; /* point at the start of column i */ dp = d + i * npoints; /* set diagonal to zero */ dp[i] = 0.0; for (j=0; j < i; j++) { dx = x[j] - xi; dy = y[j] - yi; dx2p = dx * dx; dy2p = dy * dy; dx2 = (dx - wide) * (dx - wide); dy2 = (dy - high) * (dy - high); if(dx2 < dx2p) dx2p = dx2; if(dy2 < dy2p) dy2p = dy2; dx2 = (dx + wide) * (dx + wide); dy2 = (dy + high) * (dy + high); if(dx2 < dx2p) dx2p = dx2; if(dy2 < dy2p) dy2p = dy2; dist = sqrt( dx2p + dy2p ); /* upper triangle */ *dp = dist; ++dp; /* lower triangle */ d[ j * npoints + i] = dist; } } } } /* same function without the sqrt */ void CpairP2dist(n, x, y, xwidth, yheight, d) /* inputs */ int *n; double *x, *y, *xwidth, *yheight; /* output */ double *d; { int i, j, npoints, maxchunk; double *dp; double xi, yi, dx, dy, dx2, dy2, dx2p, dy2p, dist, wide, high; npoints = *n; wide = *xwidth; high = *yheight; /* set d[0,0] = 0 */ *d = 0.0; OUTERCHUNKLOOP(i, npoints, maxchunk, 16384) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, npoints, maxchunk, 16384) { xi = x[i]; yi = y[i]; /* point at the start of column i */ dp = d + i * npoints; /* set diagonal to zero */ dp[i] = 0.0; for (j=0; j < i; j++) { dx = x[j] - xi; dy = y[j] - yi; dx2p = dx * dx; dy2p = dy * dy; dx2 = (dx - wide) * (dx - wide); dy2 = (dy - high) * (dy - high); if(dx2 < dx2p) dx2p = dx2; if(dy2 < dy2p) dy2p = dy2; dx2 = (dx + wide) * (dx + wide); dy2 = (dy + high) * (dy + high); if(dx2 < dx2p) dx2p = dx2; if(dy2 < dy2p) dy2p = dy2; dist = dx2p + dy2p; /* upper triangle */ *dp = dist; ++dp; /* lower triangle */ d[ j * npoints + i] = dist; } } } } void CcrossPdist(nfrom, xfrom, yfrom, nto, xto, yto, xwidth, yheight, squared, d) /* inputs */ int *nto, *nfrom; double *xfrom, *yfrom, *xto, *yto, *xwidth, *yheight; int *squared; /* output */ double *d; { void CcrossP1dist(), CcrossP2dist(); if(*squared == 0) { CcrossP1dist(nfrom, xfrom, yfrom, nto, xto, yto, xwidth, yheight, d); } else { CcrossP2dist(nfrom, xfrom, yfrom, nto, xto, yto, xwidth, yheight, d); } } void CcrossP1dist(nfrom, xfrom, yfrom, nto, xto, yto, xwidth, yheight, d) /* inputs */ int *nto, *nfrom; double *xfrom, *yfrom, *xto, *yto, *xwidth, *yheight; /* output */ double *d; { int i, j, nf, nt, maxchunk; double *dptr; double xj, yj, dx, dy, dx2, dy2, dx2p, dy2p, wide, high; nf = *nfrom; nt = *nto; wide = *xwidth; high = *yheight; dptr = d; OUTERCHUNKLOOP(j, nt, maxchunk, 16384) { R_CheckUserInterrupt(); INNERCHUNKLOOP(j, nt, maxchunk, 16384) { xj = xto[j]; yj = yto[j]; for(i = 0; i < nf; i++, dptr++) { dx = xj - xfrom[i]; dy = yj - yfrom[i]; dx2p = dx * dx; dy2p = dy * dy; dx2 = (dx - wide) * (dx - wide); dy2 = (dy - high) * (dy - high); if(dx2 < dx2p) dx2p = dx2; if(dy2 < dy2p) dy2p = dy2; dx2 = (dx + wide) * (dx + wide); dy2 = (dy + high) * (dy + high); if(dx2 < dx2p) dx2p = dx2; if(dy2 < dy2p) dy2p = dy2; *dptr = sqrt( dx2p + dy2p ); } } } } void CcrossP2dist(nfrom, xfrom, yfrom, nto, xto, yto, xwidth, yheight, d) /* inputs */ int *nto, *nfrom; double *xfrom, *yfrom, *xto, *yto, *xwidth, *yheight; /* output */ double *d; { int i, j, nf, nt, maxchunk; double *dptr; double xj, yj, dx, dy, dx2, dy2, dx2p, dy2p, wide, high; nf = *nfrom; nt = *nto; wide = *xwidth; high = *yheight; dptr = d; OUTERCHUNKLOOP(j, nt, maxchunk, 16384) { R_CheckUserInterrupt(); INNERCHUNKLOOP(j, nt, maxchunk, 16384) { xj = xto[j]; yj = yto[j]; for(i = 0; i < nf; i++, dptr++) { dx = xj - xfrom[i]; dy = yj - yfrom[i]; dx2p = dx * dx; dy2p = dy * dy; dx2 = (dx - wide) * (dx - wide); dy2 = (dy - high) * (dy - high); if(dx2 < dx2p) dx2p = dx2; if(dy2 < dy2p) dy2p = dy2; dx2 = (dx + wide) * (dx + wide); dy2 = (dy + high) * (dy + high); if(dx2 < dx2p) dx2p = dx2; if(dy2 < dy2p) dy2p = dy2; *dptr = dx2p + dy2p; } } } } spatstat/src/functable.h0000755000176200001440000000310213166361223015022 0ustar liggesusers/* $Revision: 1.1 $ $Date: 2009/11/04 23:54:15 $ Definitions of C structures for spatial statistics function estimates. Usually the estimates are of the form f^(x) = a^(x)/b^(x); we store f^ and also a^ and b^ to cater for applications with replicated data. # ///////////////////////////////////////////// # AUTHOR: Adrian Baddeley, CWI, Amsterdam, 1991. # # This software is distributed free # under the conditions that # (1) it shall not be incorporated # in software that is subsequently sold # (2) the authorship of the software shall # be acknowledged in any publication that # uses results generated by the software # (3) this notice shall remain in place # in each file. # ////////////////////////////////////////////// */ typedef struct Ftable { /* double precision function table */ double t0; double t1; int n; /* number of entries */ double *f; double *num; /* f[i] = num[i]/denom[i] */ double *denom; } Ftable; typedef struct Itable { /* integer count table e.g for histograms */ double t0; double t1; int n; int *num; int *denom; /* usually p[i] = num[i]/denom[i] */ } Itable; typedef struct H4table { /* Four histograms, for censored data */ double t0; double t1; int n; int *obs; /* observed lifetimes: o_i = min(t_i, c_i) */ int *nco; /* uncensored lifetimes: o_i for which t_i <= c_i */ int *cen; /* censoring times: c_i */ int *ncc; /* censor times of uncensored data: c_i for which t_i <= c_i */ int upperobs; /* number of o_i that exceed t1 */ int uppercen; /* number of c_i that exceed t1 */ } H4table; spatstat/src/proto.h0000644000176200001440000005176613166361223014242 0ustar liggesusers#include #include /* Prototype declarations for all native routines in spatstat package Automatically generated - do not edit! */ /* Functions invoked by .C */ void areadifs(double *, int *, double *, double *, int *, int *, double *, void *); void areaBdif(double *, int *, double *, double *, int *, int *, double *, double *, double *, double *, double *, void *); void delta2area(double *, double *, double *, double *, int *, double *, double *, double *, double *, int *, void *); void delta2area(double *, double *, double *, double *, int *, double *, double *, double *, double *, int *, void *); void digberJ(double *, double *, int *, int *, int *, double *, void *); void xysegint(int *, double *, double *, double *, double *, int *, double *, double *, double *, double *, double *, double *, double *, double *, double *, int *, void *); void Fclosepairs(int *, double *, double *, double *, int *, int *, int *, int *, double *, double *, double *, double *, double *, double *, double *, int *, void *); void paircount(int *, double *, double *, double *, int *, void *); void Fclosepairs(int *, double *, double *, double *, int *, int *, int *, int *, double *, double *, double *, double *, double *, double *, double *, int *, void *); void crosscount(int *, double *, double *, int *, double *, double *, double *, int *, void *); void Fcrosspairs(int *, double *, double *, int *, double *, double *, double *, int *, int *, int *, int *, double *, double *, double *, double *, double *, double *, double *, int *, void *); void cocoImage(int *, int *, int *, void *); void cocoGraph(int *, int *, int *, int *, int *, int *, void *); void lincrossdist(int *, double *, double *, int *, double *, double *, int *, double *, double *, double *, int *, int *, double *, int *, int *, double *, void *); void trigrafS(int *, int *, int *, int *, int *, int *, int *, int *, int *, int *, void *); void trigraf(int *, int *, int *, int *, int *, int *, int *, int *, int *, int *, void *); void Idist2dpath(int *, int *, int *, int *, int *, int *, int *, void *); void Gdenspt(int *, double *, double *, double *, double *, void *); void Gwtdenspt(int *, double *, double *, double *, double *, double *, void *); void Gwtdenspt(int *, double *, double *, double *, double *, double *, void *); void denspt(int *, double *, double *, double *, double *, double *, void *); void wtdenspt(int *, double *, double *, double *, double *, double *, double *, void *); void wtdenspt(int *, double *, double *, double *, double *, double *, double *, void *); void adenspt(int *, double *, double *, double *, double *, double *, double *, void *); void awtdenspt(int *, double *, double *, double *, double *, double *, double *, double *, void *); void awtdenspt(int *, double *, double *, double *, double *, double *, double *, double *, void *); void crdenspt(int *, double *, double *, int *, double *, double *, double *, double *, double *, void *); void wtcrdenspt(int *, double *, double *, int *, double *, double *, double *, double *, double *, double *, void *); void wtcrdenspt(int *, double *, double *, int *, double *, double *, double *, double *, double *, double *, void *); void acrdenspt(int *, double *, double *, int *, double *, double *, double *, double *, double *, double *, void *); void awtcrdenspt(int *, double *, double *, int *, double *, double *, double *, double *, double *, double *, double *, void *); void awtcrdenspt(int *, double *, double *, int *, double *, double *, double *, double *, double *, double *, double *, void *); void segdens(double *, int *, double *, double *, double *, double *, int *, double *, double *, double *, void *); void Ediggra(int *, double *, double *, int *, int *, double *, double *, int *, double *, double *, double *, void *); void Ediggatsti(int *, double *, double *, int *, int *, double *, double *, int *, double *, double *, void *); void discareapoly(int *, double *, double *, int *, double *, int *, double *, double *, double *, double *, double *, double *, void *); void Ddist2dpath(int *, double *, int *, double *, double *, int *, int *, void *); void D3pairdist(int *, double *, double *, double *, int *, double *, void *); void D3pairPdist(int *, double *, double *, double *, double *, double *, double *, int *, double *, void *); void nnd3D(int *, double *, double *, double *, double *, int *, double *, void *); void knnd3D(int *, int *, double *, double *, double *, double *, int *, double *, void *); void nnw3D(int *, double *, double *, double *, double *, int *, double *, void *); void knnw3D(int *, int *, double *, double *, double *, double *, int *, double *, void *); void D3crossdist(int *, double *, double *, double *, int *, double *, double *, double *, int *, double *, void *); void D3crossPdist(int *, double *, double *, double *, int *, double *, double *, double *, double *, double *, double *, int *, double *, void *); void Cpairdist(int *, double *, double *, int *, double *, void *); void CpairPdist(int *, double *, double *, double *, double *, int *, double *, void *); void Ccrossdist(int *, double *, double *, int *, double *, double *, int *, double *, void *); void CcrossPdist(int *, double *, double *, int *, double *, double *, double *, double *, int *, double *, void *); void nndMD(int *, int *, double *, double *, double *, void *); void knndMD(int *, int *, int *, double *, double *, double *, void *); void nnwMD(int *, int *, double *, double *, int *, double *, void *); void knnwMD(int *, int *, int *, double *, double *, int *, double *, void *); void distmapbin(double *, double *, double *, double *, int *, int *, int *, double *, double *, void *); void ripleybox(int *, double *, double *, double *, int *, double *, double *, double *, double *, double *, double *, void *); void ripleypoly(int *, double *, double *, int *, double *, int *, double *, double *, double *, double *, double *, void *); void exact_dt_R(double *, double *, int *, double *, double *, double *, double *, int *, int *, int *, int *, double *, int *, double *, void *); void ps_exact_dt_R(double *, double *, double *, double *, int *, int *, int *, int *, int *, double *, int *, int *, double *, void *); void fardist2grid(int *, double *, double *, int *, double *, double *, int *, double *, double *, double *, void *); void fardistgrid(int *, double *, double *, int *, double *, double *, int *, double *, double *, double *, void *); void RcallK3(double *, double *, double *, int *, double *, double *, double *, double *, double *, double *, double *, double *, int *, double *, double *, double *, int *, void *); void RcallG3(double *, double *, double *, int *, double *, double *, double *, double *, double *, double *, double *, double *, int *, double *, double *, double *, int *, void *); void RcallF3(double *, double *, double *, int *, double *, double *, double *, double *, double *, double *, double *, double *, double *, int *, int *, int *, int *, void *); void RcallF3cen(double *, double *, double *, int *, double *, double *, double *, double *, double *, double *, double *, double *, double *, int *, int *, int *, int *, int *, int *, int *, void *); void RcallG3cen(double *, double *, double *, int *, double *, double *, double *, double *, double *, double *, double *, double *, int *, int *, int *, int *, int *, int *, int *, void *); void Rcallpcf3(double *, double *, double *, int *, double *, double *, double *, double *, double *, double *, double *, double *, int *, double *, double *, double *, int *, double *, void *); void RcallF3(double *, double *, double *, int *, double *, double *, double *, double *, double *, double *, double *, double *, double *, int *, int *, int *, int *, void *); void locxprod(int *, double *, double *, int *, double *, double *, double *, int *, double *, double *, void *); void Efiksel(int *, double *, double *, int *, double *, double *, double *, double *, double *, void *); void Egeyer(int *, double *, double *, int *, int *, double *, double *, int *, double *, double *, double *, void *); void hasXclose(int *, double *, double *, double *, int *, void *); void hasXpclose(int *, double *, double *, double *, double *, int *, void *); void hasXYclose(int *, double *, double *, int *, double *, double *, double *, int *, void *); void hasXYpclose(int *, double *, double *, int *, double *, double *, double *, double *, int *, void *); void hasX3close(int *, double *, double *, double *, double *, int *, void *); void hasX3pclose(int *, double *, double *, double *, double *, double *, int *, void *); void hasXY3close(int *, double *, double *, double *, int *, double *, double *, double *, double *, int *, void *); void hasXY3pclose(int *, double *, double *, double *, int *, double *, double *, double *, double *, double *, int *, void *); void Cidw(double *, double *, double *, int *, double *, double *, int *, double *, double *, int *, double *, double *, double *, double *, void *); void idwloo(double *, double *, double *, int *, double *, double *, double *, double *, void *); void locprod(int *, double *, double *, double *, int *, double *, double *, void *); void locxprod(int *, double *, double *, int *, double *, double *, double *, int *, double *, double *, void *); void KborderI(int *, double *, double *, double *, int *, double *, int *, int *, void *); void KborderD(int *, double *, double *, double *, int *, double *, double *, double *, void *); void Kwborder(int *, double *, double *, double *, double *, int *, double *, double *, double *, void *); void KnoneI(int *, double *, double *, int *, double *, int *, void *); void KnoneD(int *, double *, double *, int *, double *, double *, void *); void Kwnone(int *, double *, double *, double *, int *, double *, double *, void *); void KrectWtd(double *, double *, int *, double *, double *, double *, int *, double *, double *, int *, int *, int *, int *, double *, double *, double *, double *, double *, void *); void KrectInt(double *, double *, int *, double *, double *, int *, double *, double *, int *, int *, int *, int *, double *, double *, int *, int *, int *, void *); void KrectDbl(double *, double *, int *, double *, double *, int *, double *, double *, int *, int *, int *, int *, double *, double *, double *, double *, double *, void *); void Csumouter(double *, int *, int *, double *, void *); void Cwsumouter(double *, int *, int *, double *, double *, void *); void Csum2outer(double *, double *, int *, int *, int *, double *, void *); void Cwsum2outer(double *, double *, int *, int *, int *, double *, double *); void Cquadform(double *, int *, int *, double *, double *, void *); void Cbiform(double *, double *, int *, int *, double *, double *, void *); void Csumsymouter(double *, int *, int *, double *, void *); void Cwsumsymouter(double *, double *, int *, int *, double *, void *); void Ccountends(int *, double *, int *, double *, int *, double *, double *, int *, int *, int *, double *, double *, double *, int *, void *); void Clinequad(int *, int *, int *, int *, double *, double *, double *, int *, int *, double *, double *, int *, double *, double *, int *, double *, double *, int *, void *); void ClineRquad(int *, int *, int *, int *, double *, double *, double *, int *, int *, double *, double *, int *, double *, double *, int *, double *, double *, int *, void *); void ClineMquad(int *, int *, int *, int *, double *, double *, double *, int *, int *, double *, double *, int *, int *, double *, double *, int *, double *, double *, int *, int *, double *, double *, int *, void *); void ClineRMquad(int *, int *, int *, int *, double *, double *, double *, int *, int *, double *, double *, int *, int *, double *, double *, int *, double *, double *, int *, int *, double *, double *, int *, void *); void linearradius(int *, int *, int *, double *, int *, double *, double *, double *, void *); void cocoGraph(int *, int *, int *, int *, int *, int *, void *); void cocoGraph(int *, int *, int *, int *, int *, int *, void *); void Clixellate(int *, int *, int *, int *, int *, int *, double *, double *, int *, double *, int *, int *, int *, double *, int *, double *, void *); void locpcfx(int *, double *, double *, int *, int *, double *, double *, int *, int *, double *, double *, double *, void *); void locWpcfx(int *, double *, double *, int *, int *, double *, double *, int *, double *, int *, double *, double *, double *, void *); void cocoGraph(int *, int *, int *, int *, int *, int *, void *); void minPnnd2(int *, double *, double *, double *, double *, void *); void minnnd2(int *, double *, double *, double *, double *, void *); void maxPnnd2(int *, double *, double *, double *, double *, void *); void maxnnd2(int *, double *, double *, double *, double *, void *); void nnX3Dinterface(int *, double *, double *, double *, int *, int *, double *, double *, double *, int *, int *, int *, int *, double *, int *, double *, void *); void knnX3Dinterface(int *, double *, double *, double *, int *, int *, double *, double *, double *, int *, int *, int *, int *, int *, double *, int *, double *, void *); void nnXinterface(int *, double *, double *, int *, int *, double *, double *, int *, int *, int *, int *, double *, int *, double *, void *); void knnXinterface(int *, double *, double *, int *, int *, double *, double *, int *, int *, int *, int *, int *, double *, int *, double *, void *); void linnndist(int *, double *, double *, int *, double *, double *, int *, int *, int *, double *, int *, double *, double *, void *); void linknnd(int *, int *, int *, double *, int *, int *, int *, int *, double *, double *, double *, double *, int *, void *); void linnnwhich(int *, double *, double *, int *, double *, double *, int *, int *, int *, double *, int *, double *, double *, int *, void *); void linknnd(int *, int *, int *, double *, int *, int *, int *, int *, double *, double *, double *, double *, int *, void *); void linknncross(int *, int *, int *, double *, int *, int *, double *, int *, int *, int *, int *, double *, double *, double *, double *, int *, void *); void linSnndwhich(int *, int *, double *, int *, int *, double *, int *, int *, int *, int *, double *, double *, double *, double *, int *, void *); void linndcross(int *, double *, double *, int *, double *, double *, int *, double *, double *, int *, int *, int *, double *, int *, int *, double *, double *, int *, void *); void linndxcross(int *, double *, double *, int *, double *, double *, int *, double *, double *, int *, int *, int *, double *, int *, int *, int *, int *, double *, double *, int *, void *); void nndistsort(int *, double *, double *, double *, double *, void *); void knndsort(int *, int *, double *, double *, double *, double *, void *); void nnwhichsort(int *, double *, double *, int *, double *, void *); void knnsort(int *, int *, double *, double *, double *, int *, double *, void *); void Clinvwhichdist(int *, int *, double *, int *, int *, int *, int *, double *, double *, double *, double *, int *, void *); void linvknndist(int *, int *, int *, double *, int *, int *, int *, int *, double *, double *, double *, double *, int *, void *); void nnGinterface(int *, double *, double *, int *, double *, double *, int *, double *, double *, int *, int *, double *, int *, double *, void *); void knnGinterface(int *, double *, double *, int *, double *, double *, int *, double *, double *, int *, int *, int *, double *, int *, double *, void *); void linpairdist(int *, double *, double *, int *, double *, double *, double *, int *, int *, double *, int *, double *, void *); void poly2imA(int *, int *, double *, double *, int *, double *, int *, void *); void xypsi(int *, double *, double *, double *, double *, double *, double *, double *, int *, int *, void *); void Cxypolyselfint(int *, double *, double *, double *, double *, double *, double *, double *, double *, double *, int *, void *); void auctionbf(int *, int *, int *, double *, double *, int *, double *, void *); void dwpure(int *, int *, int *, int *, int *, int *, void *); void auctionbf(int *, int *, int *, double *, double *, int *, double *, void *); void dwpure(int *, int *, int *, int *, int *, int *, void *); void dinfty_R(int *, int *, int *, void *); void dwpure(int *, int *, int *, int *, int *, int *, void *); void dwpure(int *, int *, int *, int *, int *, int *, void *); void seg2pixI(int *, double *, double *, double *, double *, int *, int *, int *, void *); void seg2pixL(int *, double *, double *, double *, double *, double *, double *, double *, int *, int *, double *, void *); void seg2pixN(int *, double *, double *, double *, double *, double *, int *, int *, double *, void *); void xysegint(int *, double *, double *, double *, double *, int *, double *, double *, double *, double *, double *, double *, double *, double *, double *, int *, void *); void xysi(int *, double *, double *, double *, double *, int *, double *, double *, double *, double *, double *, int *, void *); void xysiANY(int *, double *, double *, double *, double *, int *, double *, double *, double *, double *, double *, int *, void *); void xysegXint(int *, double *, double *, double *, double *, double *, double *, double *, double *, double *, int *, void *); void xysxi(int *, double *, double *, double *, double *, double *, int *, void *); void Corput(int *, int *, double *, void *); void knownCif(char *, int *, void *); void scantrans(double *, double *, int *, double *, double *, double *, double *, int *, int *, double *, int *, void *); void Gsmoopt(int *, double *, double *, double *, int *, double *, double *, void *); void Gwtsmoopt(int *, double *, double *, double *, int *, double *, double *, double *, void *); void smoopt(int *, double *, double *, double *, int *, double *, double *, double *, void *); void wtsmoopt(int *, double *, double *, double *, int *, double *, double *, double *, double *, void *); void asmoopt(int *, double *, double *, double *, int *, double *, double *, double *, void *); void awtsmoopt(int *, double *, double *, double *, int *, double *, double *, double *, double *, void *); void crsmoopt(int *, double *, double *, int *, double *, double *, double *, double *, double *, double *, void *); void wtcrsmoopt(int *, double *, double *, int *, double *, double *, double *, double *, double *, double *, double *, void *); void acrsmoopt(int *, double *, double *, int *, double *, double *, double *, double *, double *, double *, void *); void awtcrsmoopt(int *, double *, double *, int *, double *, double *, double *, double *, double *, double *, double *, void *); void CspaSumSymOut(int *, int *, int *, int *, int *, int *, double *, int *, double *, void *); void CspaWtSumSymOut(int *, int *, int *, int *, int *, int *, double *, int *, int *, int *, int *, double *, double *, void *); void Ccrosspaircounts(int *, double *, double *, int *, double *, double *, double *, int *, void *); void Cclosepaircounts(int *, double *, double *, double *, int *, void *); void poly2imI(double *, double *, int *, int *, int *, int *, void *); void bdrymask(int *, int *, int *, int *, void *); void discs2grid(int *, double *, double *, int *, double *, double *, int *, double *, double *, double *, int *, void *); /* Functions invoked by .Call */ SEXP close3pairs(SEXP, SEXP, SEXP, SEXP, SEXP, SEXP); SEXP close3IJpairs(SEXP, SEXP, SEXP, SEXP, SEXP, SEXP); SEXP cross3pairs(SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP); SEXP cross3IJpairs(SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP); SEXP Vclosepairs(SEXP, SEXP, SEXP, SEXP, SEXP); SEXP VcloseIJpairs(SEXP, SEXP, SEXP, SEXP, SEXP); SEXP VcloseIJDpairs(SEXP, SEXP, SEXP, SEXP, SEXP); SEXP Vcrosspairs(SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP); SEXP VcrossIJpairs(SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP); SEXP VcrossIJDpairs(SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP); SEXP Vclosethresh(SEXP, SEXP, SEXP, SEXP, SEXP, SEXP); SEXP triograph(SEXP, SEXP, SEXP, SEXP); SEXP trioxgraph(SEXP, SEXP, SEXP, SEXP, SEXP); SEXP triDgraph(SEXP, SEXP, SEXP, SEXP, SEXP); SEXP triDRgraph(SEXP, SEXP, SEXP, SEXP, SEXP, SEXP); SEXP graphVees(SEXP, SEXP, SEXP, SEXP); SEXP Cxysegint(SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP); SEXP CxysegXint(SEXP, SEXP, SEXP, SEXP, SEXP, SEXP); SEXP CxysegXint(SEXP, SEXP, SEXP, SEXP, SEXP, SEXP); SEXP thinjumpequal(SEXP, SEXP, SEXP, SEXP); SEXP xmethas(SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP); SEXP xmethas(SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP); SEXP PerfectStrauss(SEXP, SEXP, SEXP, SEXP, SEXP, SEXP); SEXP PerfectHardcore(SEXP, SEXP, SEXP, SEXP, SEXP); SEXP PerfectStraussHard(SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP); SEXP PerfectDiggleGratton(SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP); SEXP PerfectDGS(SEXP, SEXP, SEXP, SEXP, SEXP); SEXP PerfectPenttinen(SEXP, SEXP, SEXP, SEXP, SEXP, SEXP); SEXP Cwhist(SEXP, SEXP, SEXP, SEXP); spatstat/src/triplets.c0000644000176200001440000000615513166361223014730 0ustar liggesusers#include #include #include "methas.h" #include "dist2.h" /* Conditional intensity computation for Triplets process */ /* Format for storage of parameters and precomputed/auxiliary data */ typedef struct Triplets { double gamma; double r; double loggamma; double r2; double *period; int hard; int per; int *neighbour; /* scratch list of neighbours of current point */ int Nmax; /* length of scratch space allocated */ } Triplets; /* initialiser function */ Cdata *tripletsinit(state, model, algo) State state; Model model; Algor algo; { /* create storage for model parameters */ Triplets *triplets; triplets = (Triplets *) R_alloc(1, sizeof(Triplets)); /* create scratch space */ triplets->Nmax = 1024; triplets->neighbour = (int *) R_alloc(1024, sizeof(int)); /* Interpret model parameters*/ triplets->gamma = model.ipar[0]; triplets->r = model.ipar[1]; /* No longer passed as r^2 */ triplets->r2 = triplets->r * triplets->r; triplets->period = model.period; #ifdef MHDEBUG Rprintf("Initialising Triplets gamma=%lf, r=%lf\n", triplets->gamma, triplets->r); #endif /* is the model numerically equivalent to hard core ? */ triplets->hard = (triplets->gamma < DOUBLE_EPS); triplets->loggamma = (triplets->hard) ? 0 : log(triplets->gamma); /* periodic boundary conditions? */ triplets->per = (model.period[0] > 0.0); return((Cdata *) triplets); } /* conditional intensity evaluator */ double tripletscif(prop, state, cdata) Propo prop; State state; Cdata *cdata; { int npts, kount, ix, j, k, nj, nk, N, Nmax, Nmore, N1; int *neighbour; double *x, *y; double u, v; double r2, d2, cifval; Triplets *triplets; triplets = (Triplets *) cdata; r2 = triplets->r2; u = prop.u; v = prop.v; ix = prop.ix; x = state.x; y = state.y; npts = state.npts; if(npts == 0) return((double) 1.0); neighbour = triplets->neighbour; Nmax = triplets->Nmax; N = 0; /* compile list of neighbours */ for(j=0; j < npts; j++) { if(j != ix) { d2 = dist2either(u,v,x[j],y[j],triplets->period); if(d2 < r2) { /* add j to list of neighbours of current point */ if(N >= Nmax) { /* storage space overflow: reallocate */ Nmore = 2 * Nmax; triplets->neighbour = neighbour = (int *) S_realloc((char *) triplets->neighbour, Nmore, Nmax, sizeof(int)); triplets->Nmax = Nmax = Nmore; } neighbour[N] = j; N++; } } } /* count r-close (ordered) pairs of neighbours */ kount = 0; if(N > 1) { N1 = N - 1; for(j = 0; j < N1; j++) { nj = neighbour[j]; for(k = j+1; k < N; k++) { nk = neighbour[k]; if(nj != nk) { d2 = dist2either(x[nj],y[nj],x[nk],y[nk],triplets->period); if(d2 < r2) kount++; } } } } if(triplets->hard) { if(kount > 0) cifval = 0.0; else cifval = 1.0; } else cifval = exp((triplets->loggamma) * kount); #ifdef MHDEBUG Rprintf("triplet count=%d cif=%lf\n", kount, cifval); #endif return cifval; } Cifns TripletsCifns = { &tripletsinit, &tripletscif, (updafunptr) NULL, NO}; spatstat/src/linvknndist.c0000644000176200001440000001356413166361223015427 0ustar liggesusers#include #include "yesno.h" /* linvknndist.c k-th nearest neighbour function at vertices (distance from each vertex to the nearest, second nearest, ... k-th nearest target data point) Needs only the sparse representation of the network $Revision: 1.5 $ $Date: 2017/09/12 17:47:37 $ ! Data points must be ordered by segment index ! */ #undef HUH #define DIST(VERTEX, ORDER) dist[(ORDER) + (VERTEX) * Kmax] #define WHICH(VERTEX, ORDER) which[(ORDER) + (VERTEX) * Kmax] #define UPDATE(VERTEX, D, J, EPS) \ UpdateKnnList(D, J, \ dist + (VERTEX) * Kmax, \ which + (VERTEX) * Kmax, \ Kmax, \ EPS) void linvknndist(kmax, /* number of neighbours required */ nq, sq, tq, /* target data points (ordered by sq) */ nv, /* number of network vertices */ ns, from, to, /* segments (pairs of vertices) */ seglen, /* segment lengths */ huge, /* value taken as infinity */ tol, /* tolerance for updating distances */ /* OUTPUT */ dist, /* distance from each vertex to the nearest, ..., kth nearest data points */ which /* identifies which data points */ ) int *kmax; int *nq, *nv, *ns; /* number of points, vertices, segments */ int *sq, *from, *to; /* integer vectors (mappings) */ double *tq; /* fractional location coordinates */ double *huge, *tol; double *seglen; double *dist; int *which; { int Nq, Nv, Ns, Kmax, Nout, i, j, k, m; int segQj, ivleft, ivright, changed; double hugevalue, eps, slen, d, tqj; char converged; int UpdateKnnList(); Kmax = *kmax; Nq = *nq; Nv = *nv; Ns = *ns; hugevalue = *huge; eps = *tol; /* number of values in 'dist' and in 'which' */ Nout = Nv * Kmax; #ifdef HUH Rprintf("Initialise dist\n"); #endif /* initialise to huge value */ for(i = 0; i < Nout; i++) { dist[i] = hugevalue; which[i] = -1; } #ifdef HUH Rprintf("Run through target points\n"); #endif /* assign value to endpoints of segments containing target points */ for(j = 0; j < Nq; j++) { segQj = sq[j]; tqj = tq[j]; slen = seglen[segQj]; ivleft = from[segQj]; d = slen * tqj; UPDATE(ivleft, d, j, (double) 0.0); ivright = to[segQj]; d = slen * (1.0 - tqj); UPDATE(ivright, d, j, (double) 0.0); } #ifdef HUH Rprintf("Initialised values at vertices:\n"); Rprintf("\ti\twhich\tdist\n"); for(i = 0; i < Nv; i++) { Rprintf("\t%d", i); for(k = 0; k < Kmax; k++) Rprintf(" %d ", WHICH(i, k)); for(k = 0; k < Kmax; k++) Rprintf(" %lf ", DIST(i, k)); Rprintf("\n"); } #endif /* recursively update */ #ifdef HUH Rprintf("Recursive update\n"); #endif converged = NO; while(!converged) { converged = YES; #ifdef HUH Rprintf("........... starting new pass ...................... \n"); Rprintf("Current state:\n"); Rprintf("\ti\twhich\tdist\n"); for(i = 0; i < Nv; i++) { Rprintf("\t%d", i); for(k = 0; k < Kmax; k++) Rprintf(" %d ", WHICH(i, k)); for(k = 0; k < Kmax; k++) Rprintf(" %lf ", DIST(i, k)); Rprintf("\n"); } #endif for(m = 0; m < Ns; m++) { ivleft = from[m]; ivright = to[m]; slen = seglen[m]; #ifdef HUH Rprintf("updating right=%d from left=%d\n", ivright, ivleft); #endif for(k = 0; k < Kmax; k++) { changed = UPDATE(ivright, DIST(ivleft, k)+slen, WHICH(ivleft, k), eps); converged = converged && !changed; } #ifdef HUH Rprintf("updating left=%d from right=%d\n", ivleft, ivright); #endif for(k = 0; k < Kmax; k++) { changed = UPDATE(ivleft, DIST(ivright, k)+slen, WHICH(ivright, k), eps); converged = converged && !changed; } } } #ifdef HUH Rprintf("Done\nVertex values:\n"); Rprintf("\ti\twhich\tdist\n"); for(i = 0; i < Nv; i++) { Rprintf("\t%d", i); for(k = 0; k < Kmax; k++) Rprintf(" %d ", WHICH(i, k)); for(k = 0; k < Kmax; k++) Rprintf(" %lf ", DIST(i, k)); Rprintf("\n"); } #endif } /* update a list of nearest, second nearest, ..., k-th nearest neighbours */ int UpdateKnnList(d, j, dist, which, Kmax, eps) double d; /* candidate distance */ int j; /* corresponding candidate target point */ int Kmax; double *dist; /* pointer to start of vector of length Kmax */ int *which; /* pointer to start of vector of length Kmax */ double eps; /* numerical tolerance, to prevent infinite loops */ { char matched, unsorted, changed; int k, Klast, itmp; double dtmp, dPlusEps; Klast = Kmax - 1; dPlusEps = d + eps; if(dPlusEps > dist[Klast]) return(NO); changed = NO; /* Check whether this data point is already listed as a neighbour */ matched = NO; for(k = 0; k < Kmax; k++) { if(which[k] == j) { matched = YES; #ifdef HUH Rprintf("\tMatch: which[%d] = %d\n", k, j); #endif if(dPlusEps <= dist[k]) { changed = YES; #ifdef HUH Rprintf("\t\tUpdated distance from %lf to %lf\n", dist[k], d); #endif dist[k] = d; } break; } } if(!matched) { #ifdef HUH Rprintf("\tNo match with current list\n"); Rprintf("\t\tUpdated distance from %lf to %lf\n", dist[Klast], d); #endif /* replace furthest point */ changed = YES; dist[Klast] = d; which[Klast] = j; } /* Bubble sort entries */ if(changed) { #ifdef HUH Rprintf("Bubble sort.\nCurrent state:\n\tk\twhich\tdist\n"); for(k = 0; k <= Klast; k++) Rprintf("\t%d\t%d\t%lf\n", k, which[k], dist[k]); #endif do { unsorted = NO; for(k = 0; k < Klast; k++) { if(dist[k] > dist[k+1]) { unsorted = YES; dtmp = dist[k]; dist[k] = dist[k+1]; dist[k+1] = dtmp; itmp = which[k]; which[k] = which[k+1]; which[k+1] = itmp; } } } while(unsorted); } #ifdef HUH Rprintf("Return state:\n\tk\twhich\tdist\n"); for(k = 0; k <= Klast; k++) Rprintf("\t%d\t%d\t%lf\n", k, which[k], dist[k]); #endif return( (int) changed); } spatstat/src/KrectV3.h0000644000176200001440000000025313166361223014341 0ustar liggesusers/* KrectV4.h with or without border correction */ if((*doBord) == 1) { #define BORDER #include "KrectV4.h" } else { #undef BORDER #include "KrectV4.h" } spatstat/src/loccumx.h0000644000176200001440000000373313166361223014540 0ustar liggesusers/* loccumx.h C template for loccum.c grid-to-data or data-cross-data functions $Revision: 1.5 $ $Date: 2012/11/10 06:13:52 $ macros: FNAME function name NULVAL initial value (empty sum = 0, empty product = 1) INC(A,B) increment operation A += B or A *= B */ void FNAME(ntest, xtest, ytest, ndata, xdata, ydata, vdata, nr, rmax, ans) /* inputs */ int *ntest, *ndata, *nr; double *xtest, *ytest, *xdata, *ydata, *vdata; double *rmax; /* output */ double *ans; /* matrix of column vectors of functions for each point of first pattern */ { int Ntest, Ndata, Nr, Nans; double Rmax; int i, j, k, jleft, kmin, maxchunk, columnstart; double Rmax2, rstep, xtesti, ytesti, xleft; double dx, dy, dx2, d2, d, contrib; Ntest = *ntest; Ndata = *ndata; Nr = *nr; Rmax = *rmax; if(Ntest == 0) return; Nans = Nr * Ntest; /* initialise products to 1 */ OUTERCHUNKLOOP(k, Nans, maxchunk, 8196) { R_CheckUserInterrupt(); INNERCHUNKLOOP(k, Nans, maxchunk, 8196) { ans[k] = NULVAL; } } if(Ndata == 0) return; rstep = Rmax/(Nr-1); Rmax2 = Rmax * Rmax; jleft = 0; OUTERCHUNKLOOP(i, Ntest, maxchunk, 8196) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, Ntest, maxchunk, 8196) { xtesti = xtest[i]; ytesti = ytest[i]; columnstart = Nr * i; /* start position for f_i(.) in 'ans' */ /* adjust starting point */ xleft = xtesti - Rmax; while((xdata[jleft] < xleft) && (jleft+1 < Ndata)) ++jleft; /* process from jleft until |dx| > Rmax */ for(j=jleft; j < Ndata; j++) { dx = xdata[j] - xtesti; dx2 = dx * dx; if(dx2 > Rmax2) break; dy = ydata[j] - ytesti; d2 = dx2 + dy * dy; if(d2 <= Rmax2) { d = sqrt(d2); kmin = (int) ceil(d/rstep); if(kmin < Nr) { contrib = vdata[j]; for(k = kmin; k < Nr; k++) INC(ans[columnstart + k] , contrib); } } } } } } spatstat/src/constants.h0000644000176200001440000000057613166361223015104 0ustar liggesusers/* constants.h Ensure that required constants are defined (Insurance against flaky installations) $Revision: 1.1 $ $Date: 2013/08/09 08:14:15 $ */ #ifndef M_PI #define M_PI 3.141592653589793 #endif #ifndef M_PI_2 #define M_PI_2 1.570796326794897 #endif #ifndef M_2_PI #define M_2_PI (2.0/M_PI) #endif #ifndef M_2PI #define M_2PI 6.283185307179586 #endif spatstat/src/Kborder.c0000755000176200001440000000155113166361223014450 0ustar liggesusers#include #include #include /* Kborder.c Efficient computation of border-corrected estimates of K for large datasets KborderI() Estimates K function, returns integer numerator & denominator KborderD() Estimates K function, returns double precision numerator & denominator Kwborder() Estimates Kinhom. Functions require (x,y) data to be sorted in ascending order of x and expect r values to be equally spaced and starting at zero $Revision: 1.4 $ $Date: 2013/05/27 02:09:10 $ */ #undef WEIGHTED #define FNAME KborderI #define OUTTYPE int #include "Kborder.h" #undef FNAME #undef OUTTYPE #define FNAME KborderD #define OUTTYPE double #include "Kborder.h" #undef FNAME #undef OUTTYPE #define FNAME Kwborder #define WEIGHTED #define OUTTYPE double #include "Kborder.h" spatstat/src/knnXdist.h0000644000176200001440000001501313166361223014662 0ustar liggesusers #if (1 == 0) /* knnXdist.h Code template for C functions supporting nncross for k-nearest neighbours (k > 1) THE FOLLOWING CODE ASSUMES THAT LISTS ARE SORTED IN ASCENDING ORDER OF y COORDINATE This code is #included multiple times in knndistance.c Variables used: FNAME function name DIST #defined if function returns distance to nearest neighbour WHICH #defined if function returns id of nearest neighbour EXCLUDE #defined if exclusion mechanism is used Either or both DIST and WHICH may be defined. When EXCLUDE is defined, code numbers id1, id2 are attached to the patterns X and Y respectively, such that x1[i], y1[i] and x2[j], y2[j] are the same point iff id1[i] = id2[j]. Copyright (C) Adrian Baddeley, Jens Oehlschlagel and Rolf Turner 2000-2013 Licence: GPL >= 2 $Revision: 1.10 $ $Date: 2013/12/10 03:29:55 $ */ #endif void FNAME(n1, x1, y1, id1, n2, x2, y2, id2, kmax, nnd, nnwhich, huge) /* inputs */ int *n1, *n2; double *x1, *y1, *x2, *y2, *huge; int *id1, *id2; int *kmax; /* outputs */ double *nnd; int *nnwhich; /* some inputs + outputs are not used in all functions */ { int npoints1, npoints2, nk, nk1; int maxchunk, i, jleft, jright, jwhich, lastjwhich, unsorted, k, k1; double d2, d2minK, x1i, y1i, dx, dy, dy2, hu, hu2, tmp; double *d2min; #ifdef WHICH int *which; int itmp; #endif #ifdef EXCLUDE int id1i; #endif #ifdef TRACER int kk; #endif npoints1 = *n1; npoints2 = *n2; nk = *kmax; nk1 = nk - 1; hu = *huge; hu2 = hu * hu; if(npoints1 == 0 || npoints2 == 0) return; lastjwhich = 0; /* create space to store the nearest neighbour distances and indices for the current point */ d2min = (double *) R_alloc((size_t) nk, sizeof(double)); #ifdef WHICH which = (int *) R_alloc((size_t) nk, sizeof(int)); #endif /* loop in chunks of 2^16 */ i = 0; maxchunk = 0; while(i < npoints1) { R_CheckUserInterrupt(); maxchunk += 65536; if(maxchunk > npoints1) maxchunk = npoints1; for(; i < maxchunk; i++) { /* initialise nn distances and indices */ d2minK = hu2; jwhich = -1; for(k = 0; k < nk; k++) { d2min[k] = hu2; #ifdef WHICH which[k] = -1; #endif } x1i = x1[i]; y1i = y1[i]; #ifdef EXCLUDE id1i = id1[i]; #endif #ifdef TRACER Rprintf("i=%d : (%lf, %lf) ..................... \n", i, x1i, y1i); #endif if(lastjwhich < npoints2) { #ifdef TRACER Rprintf("\tForward search from lastjwhich=%d:\n", lastjwhich); #endif /* search forward from previous nearest neighbour */ for(jright = lastjwhich; jright < npoints2; ++jright) { #ifdef TRACER Rprintf("\tjright=%d \t (%lf, %lf)\n", jright, x2[jright], y2[jright]); #endif dy = y2[jright] - y1i; dy2 = dy * dy; #ifdef TRACER Rprintf("\t\t dy2=%lf,\t d2minK=%lf\n", dy2, d2minK); #endif if(dy2 > d2minK) /* note that dy2 >= d2minK could break too early */ break; #ifdef EXCLUDE /* do not compare identical points */ if(id2[jright] != id1i) { #ifdef TRACER Rprintf("\t\t %d and %d are not identical\n", i, jright); #endif #endif dx = x2[jright] - x1i; d2 = dx * dx + dy2; #ifdef TRACER Rprintf("\t\t d2=%lf\n", d2); #endif if (d2 < d2minK) { /* overwrite last entry in list of neighbours */ #ifdef TRACER Rprintf("\t\t overwrite d2min[nk1]=%lf by d2=%lf\n", d2min[nk1], d2); #endif d2min[nk1] = d2; jwhich = jright; #ifdef WHICH which[nk1] = jright; #endif /* bubble sort */ unsorted = YES; for(k = nk1; unsorted && k > 0; k--) { k1 = k - 1; if(d2min[k] < d2min[k1]) { /* swap entries */ tmp = d2min[k1]; d2min[k1] = d2min[k]; d2min[k] = tmp; #ifdef WHICH itmp = which[k1]; which[k1] = which[k]; which[k] = itmp; #endif } else { unsorted = NO; } } #ifdef TRACER Rprintf("\t\t sorted nn distances:\n"); for(kk = 0; kk < nk; kk++) Rprintf("\t\t d2min[%d] = %lf\n", kk, d2min[kk]); #endif /* adjust maximum distance */ d2minK = d2min[nk1]; #ifdef TRACER Rprintf("\t\t d2minK=%lf\n", d2minK); #endif } #ifdef EXCLUDE } #endif } /* end forward search */ #ifdef TRACER Rprintf("\tEnd forward search\n"); #endif } if(lastjwhich > 0) { #ifdef TRACER Rprintf("\tBackward search from lastjwhich=%d:\n", lastjwhich); #endif /* search backward from previous nearest neighbour */ for(jleft = lastjwhich - 1; jleft >= 0; --jleft) { #ifdef TRACER Rprintf("\tjleft=%d \t (%lf, %lf)\n", jleft, x2[jleft], y2[jleft]); #endif dy = y1i - y2[jleft]; dy2 = dy * dy; #ifdef TRACER Rprintf("\t\t dy2=%lf,\t d2minK=%lf\n", dy2, d2minK); #endif if(dy2 > d2minK) /* note that dy2 >= d2minK could break too early */ break; #ifdef EXCLUDE /* do not compare identical points */ if(id2[jleft] != id1i) { #ifdef TRACER Rprintf("\t\t %d and %d are not identical\n", i, jleft); #endif #endif dx = x2[jleft] - x1i; d2 = dx * dx + dy2; #ifdef TRACER Rprintf("\t\t d2=%lf\n", d2); #endif if (d2 < d2minK) { /* overwrite last entry in list of neighbours */ #ifdef TRACER Rprintf("\t\t overwrite d2min[nk1]=%lf by d2=%lf\n", d2min[nk1], d2); #endif d2min[nk1] = d2; jwhich = jleft; #ifdef WHICH which[nk1] = jleft; #endif /* bubble sort */ unsorted = YES; for(k = nk1; unsorted && k > 0; k--) { k1 = k - 1; if(d2min[k] < d2min[k1]) { /* swap entries */ tmp = d2min[k1]; d2min[k1] = d2min[k]; d2min[k] = tmp; #ifdef WHICH itmp = which[k1]; which[k1] = which[k]; which[k] = itmp; #endif } else { unsorted = NO; } } #ifdef TRACER Rprintf("\t\t sorted nn distances:\n"); for(kk = 0; kk < nk; kk++) Rprintf("\t\t d2min[%d] = %lf\n", kk, d2min[kk]); #endif /* adjust maximum distance */ d2minK = d2min[nk1]; #ifdef TRACER Rprintf("\t\t d2minK=%lf\n", d2minK); #endif } #ifdef EXCLUDE } #endif } /* end backward search */ #ifdef TRACER Rprintf("\tEnd backward search\n"); #endif } /* copy nn distances for point i to output matrix in ROW MAJOR order */ for(k = 0; k < nk; k++) { #ifdef DIST nnd[nk * i + k] = sqrt(d2min[k]); #endif #ifdef WHICH nnwhich[nk * i + k] = which[k] + 1; /* R indexing */ #endif } /* save index of last neighbour encountered */ lastjwhich = jwhich; /* end of loop over points i */ } } } spatstat/src/nn3Ddist.h0000644000176200001440000000364413166361223014555 0ustar liggesusers/* nn3Ddist.h Code template for nearest-neighbour algorithms for 3D point patterns Input is a single point pattern - supports 'nndist' and 'nnwhich' This code is #included multiple times in nn3Ddist.c Variables used: FNAME function name DIST #defined if function returns distance to nearest neighbour WHICH #defined if function returns id of nearest neighbour Either or both DIST and WHICH may be defined. THE FOLLOWING CODE ASSUMES THAT THE POINT PATTERN IS SORTED IN ASCENDING ORDER OF THE z COORDINATE $Revision: 1.5 $ $Date: 2013/06/28 10:38:46 $ */ void FNAME(n, x, y, z, nnd, nnwhich, huge) /* inputs */ int *n; double *x, *y, *z, *huge; /* outputs */ double *nnd; int *nnwhich; { int npoints, i, j, maxchunk; double d2, d2min, xi, yi, zi, dx, dy, dz, dz2, hu, hu2; #ifdef WHICH int which; #endif hu = *huge; hu2 = hu * hu; npoints = *n; OUTERCHUNKLOOP(i, npoints, maxchunk, 16384) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, npoints, maxchunk, 16384) { d2min = hu2; #ifdef WHICH which = -1; #endif xi = x[i]; yi = y[i]; zi = z[i]; /* search backward */ if(i > 0){ for(j = i - 1; j >= 0; --j) { dz = z[j] - zi; dz2 = dz * dz; if(dz2 > d2min) break; dx = x[j] - xi; dy = y[j] - yi; d2 = dx * dx + dy * dy + dz2; if (d2 < d2min) { d2min = d2; #ifdef WHICH which = j; #endif } } } /* search forward */ if(i < npoints - 1) { for(j = i + 1; j < npoints; ++j) { dz = z[j] - zi; dz2 = dz * dz; if(dz2 > d2min) break; dx = x[j] - xi; dy = y[j] - yi; d2 = dx * dx + dy * dy + dz2; if (d2 < d2min) { d2min = d2; #ifdef WHICH which = j; #endif } } } #ifdef DIST nnd[i] = sqrt(d2min); #endif #ifdef WHICH /* convert to R indexing */ nnwhich[i] = which + 1; #endif } } } spatstat/src/Kborder.h0000755000176200001440000001066313166361223014461 0ustar liggesusers/* Kborder.h Code template for K function estimators in Kborder.c Variables: FNAME function name OUTTYPE storage type of the output vectors ('int' or 'double') WEIGHTED #defined for weighted (inhom) K function Copyright (C) Adrian Baddeley, Julian Gilbey and Rolf Turner 2000-2013 Licence: GPL >= 2 $Revision: 1.11 $ $Date: 2013/09/18 04:06:59 $ */ void FNAME( nxy, x, y, #ifdef WEIGHTED w, #endif b, nr, rmax, numer, denom) /* inputs */ int *nxy, *nr; double *x, *y, *b, *rmax; #ifdef WEIGHTED double *w; #endif /* outputs */ OUTTYPE *numer, *denom; { int i, j, l, n, nt, n1, nt1, lmin, lmax, maxchunk; double dt, tmax, xi, yi, bi, maxsearch, max2search; double bratio, dratio, dij, dij2, dx, dy, dx2; OUTTYPE *numerLowAccum, *numerHighAccum, *denomAccum; OUTTYPE naccum, daccum; #ifdef WEIGHTED double wi, wj, wij; #endif #ifdef WEIGHTED #define ZERO 0.0 #define WI wi #define WJ wj #define WIJ wij #else #define ZERO 0 #define WI 1 #define WJ 1 #define WIJ 1 #endif n = *nxy; nt = *nr; n1 = n - 1; nt1 = nt - 1; dt = (*rmax)/(nt-1); tmax = *rmax; /* initialise */ numerLowAccum = (OUTTYPE *) R_alloc(nt, sizeof(OUTTYPE)); numerHighAccum = (OUTTYPE *) R_alloc(nt, sizeof(OUTTYPE)); denomAccum = (OUTTYPE *) R_alloc(nt, sizeof(OUTTYPE)); for(l = 0; l < nt; l++) numer[l] = denom[l] = numerLowAccum[l] = numerHighAccum[l] = denomAccum[l] = ZERO; if(n == 0) return; /* loop in chunks of 2^16 */ i = 0; maxchunk = 0; while(i < n) { R_CheckUserInterrupt(); maxchunk += 65536; if(maxchunk > n) maxchunk = n; for(; i < maxchunk; i++) { /* -------- DENOMINATOR -------------*/ bi = b[i]; #ifdef WEIGHTED wi = w[i]; #endif /* increment denominator for all r < b[i] */ bratio = bi/dt; /* lmax is the largest integer STRICTLY less than bratio */ lmax = (int) ceil(bratio) - 1; lmax = (lmax <= nt1) ? lmax : nt1; /* effectively increment entries 0 to lmax */ if(lmax >= 0) denomAccum[lmax] += WI; /* ---------- NUMERATOR -----------*/ /* scan through points (x[j],y[j]) */ xi = x[i]; yi = y[i]; maxsearch = (bi < tmax) ? bi : tmax; max2search = maxsearch * maxsearch; /* scan backward from i-1 until |x[j]-x[i]| > maxsearch or until we run out */ if(i > 0) { for(j=i-1; j >= 0; j--) { /* squared interpoint distance */ dx = x[j] - xi; dx2 = dx * dx; if(dx2 >= max2search) break; dy = y[j] - yi; dij2 = dx2 + dy * dy; if(dij2 < max2search) { #ifdef WEIGHTED wj = w[j]; #endif /* increment numerator for all r such that dij <= r < bi */ dij = (double) sqrt(dij2); dratio = dij/dt; /* smallest integer greater than or equal to dratio */ lmin = (int) ceil(dratio); /* increment entries lmin to lmax inclusive */ if(lmax >= lmin) { #ifdef WEIGHTED wij = wi * wj; #endif numerLowAccum[lmin] += WIJ; numerHighAccum[lmax] += WIJ; } } } } /* scan forward from i+1 until x[j]-x[i] > maxsearch or until we run out */ if(i < n1) { for(j=i+1; j < n; j++) { /* squared interpoint distance */ dx = x[j] - xi; dx2 = dx * dx; if(dx2 >= max2search) break; dy = y[j] - yi; dij2 = dx2 + dy * dy; if(dij2 < max2search) { #ifdef WEIGHTED wj = w[j]; #endif /* increment numerator for all r such that dij <= r < bi */ dij = (double) sqrt(dij2); dratio = dij/dt; /* smallest integer greater than or equal to dratio */ lmin = (int) ceil(dratio); /* increment entries lmin to lmax inclusive */ if(lmax >= lmin) { #ifdef WEIGHTED wij = wi * wj; #endif numerLowAccum[lmin] += WIJ; numerHighAccum[lmax] += WIJ; } } } } } } /* Now use the accumulated values to compute the numerator and denominator. The value of denomAccum[l] should be added to denom[k] for all k <= l. numerHighAccum[l] should be added to numer[k] for all k <=l numerLowAccum[l] should then be subtracted from numer[k] for k <= l. */ for(l=nt1, naccum=daccum=ZERO; l>=0; l--) { daccum += denomAccum[l]; denom[l] = daccum; naccum += numerHighAccum[l]; numer[l] = naccum; naccum -= numerLowAccum[l]; } } #undef ZERO #undef WI #undef WJ #undef WIJ spatstat/src/linearradius.c0000644000176200001440000000351713166361223015543 0ustar liggesusers#include #include #include "chunkloop.h" /* linearradius.c Bounding radius in linear network $Revision: 1.1 $ $Date: 2016/07/19 06:52:57 $ */ #define DPATH(I,J) dpath[(J) + Nv * (I)] #include "yesno.h" #undef DEBUG void linearradius(ns, from, to, /* network segments */ lengths, /* segment lengths */ nv, dpath, /* shortest path distances between vertices */ huge, result) int *nv, *ns; int *from, *to; /* integer vectors (mappings) */ double *dpath; /* matrix of shortest path distances between vertices */ double *lengths; /* vector of segment lengths */ double *huge; /* very large value */ double *result; { int Nv, Ns; int i, j, A, B, C, D; double AB, AC, AD, BC, BD, CD; double sAij, sBij, sAiMax, sBiMax, smin; int maxchunk; Nv = *nv; Ns = *ns; smin = *huge; OUTERCHUNKLOOP(i, Ns, maxchunk, 16384) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, Ns, maxchunk, 16384) { /* indices of endpoints of segment i */ A = from[i]; B = to[i]; AB = lengths[i]; sAiMax = sBiMax = AB/2.0; for(j = 0; j < Ns; j++) { if(j != i) { /* indices of endpoints of segment i */ C = from[j]; D = to[j]; CD = lengths[j]; AC = DPATH(A,C); AD = DPATH(A,D); BC = DPATH(B,C); BD = DPATH(B,D); /* max dist from A to any point in segment j */ sAij = (AD > AC + CD) ? AC + CD : (AC > AD + CD) ? AD + CD : (AC + AD + CD)/2.0; /* max dist from B to any point in segment j */ sBij = (BD > BC + CD) ? BC + CD : (BC > BD + CD) ? BD + CD : (BC + BD + CD)/2.0; /* row-wise maximum */ if(sAij > sAiMax) sAiMax = sAij; if(sBij > sBiMax) sBiMax = sBij; } } if(sAiMax < smin) smin = sAiMax; if(sBiMax < smin) smin = sBiMax; } } *result = smin; } spatstat/src/Knone.h0000644000176200001440000000567213166361223014144 0ustar liggesusers/* Knone.h Code template for K function estimators in Knone.c Variables: FNAME function name OUTTYPE storage type of the output 'numer' ('int' or 'double') WEIGHTED #defined for weighted (inhom) K function Copyright (C) Adrian Baddeley, Julian Gilbey and Rolf Turner 2000-2013 Licence: GPL >= 2 $Revision: 1.6 $ $Date: 2013/09/18 04:08:26 $ */ void FNAME( nxy, x, y, #ifdef WEIGHTED w, #endif nr, rmax, numer) /* inputs */ int *nxy, *nr; double *x, *y, *rmax; #ifdef WEIGHTED double *w; #endif /* output */ OUTTYPE *numer; { int i, j, l, n, nt, n1, lmin, lmax, maxchunk; double dt, tmax, tmax2, xi, yi; double dratio, dij, dij2, dx, dy, dx2; #ifdef WEIGHTED double wi, wj, wij; #endif #ifdef WEIGHTED #define ZERO 0.0 #define WI wi #define WJ wj #define WIJ wij #else #define ZERO 0 #define WI 1 #define WJ 1 #define WIJ 1 #endif n = *nxy; nt = *nr; n1 = n - 1; lmax = nt - 1; dt = (*rmax)/(nt-1); tmax = *rmax; tmax2 = tmax * tmax; /* initialise */ for(l = 0; l < nt; l++) numer[l] = ZERO; if(n == 0) return; /* loop in chunks of 2^16 */ i = 0; maxchunk = 0; while(i < n) { R_CheckUserInterrupt(); maxchunk += 65536; if(maxchunk > n) maxchunk = n; for(; i < maxchunk; i++) { #ifdef WEIGHTED wi = w[i]; #endif xi = x[i]; yi = y[i]; /* scan backward from i-1 until x[j] < x[i] -tmax or until we run out */ if(i > 0) { for(j=i-1; j >= 0; j--) { dx = x[j] - xi; dx2 = dx * dx; if(dx2 >= tmax2) break; dy = y[j] - yi; dij2 = dx2 + dy * dy; if(dij2 < tmax2) { #ifdef WEIGHTED wj = w[j]; #endif /* increment numerator for all r >= dij */ dij = (double) sqrt(dij2); dratio = dij/dt; /* smallest integer greater than or equal to dratio */ lmin = (int) ceil(dratio); /* effectively increment entries lmin to lmax inclusive */ if(lmin <= lmax) { #ifdef WEIGHTED wij = wi * wj; #endif numer[lmin] += WIJ; } } } } /* scan forward from i+1 until x[j] > x[i] + tmax or until we run out */ if(i < n1) { for(j=i+1; j < n; j++) { /* squared interpoint distance */ dx = x[j] - xi; dx2 = dx * dx; if(dx2 >= tmax2) break; dy = y[j] - yi; dij2 = dx2 + dy * dy; if(dij2 < tmax2) { #ifdef WEIGHTED wj = w[j]; #endif /* increment numerator for all r >= dij */ dij = (double) sqrt(dij2); dratio = dij/dt; /* smallest integer greater than or equal to dratio */ lmin = (int) ceil(dratio); /* increment entries lmin to lmax inclusive */ if(lmin <= lmax) { #ifdef WEIGHTED wij = wi * wj; #endif numer[lmin] += WIJ; } } } } } } /* Now accumulate the numerator. */ if(nt > 1) for(l=1; l < nt; l++) numer[l] += numer[l-1]; } #undef ZERO #undef WI #undef WJ #undef WIJ spatstat/src/KrectV1.h0000644000176200001440000000026313166361223014340 0ustar liggesusers/* KrectV2.h with or without isotropic correction */ if((*doIso) == 1) { #define ISOTROPIC #include "KrectV2.h" } else { #undef ISOTROPIC #include "KrectV2.h" } spatstat/src/linalg.c0000755000176200001440000001257113166361223014332 0ustar liggesusers/* linalg.c Home made linear algebra Yes, really $Revision: 1.11 $ $Date: 2016/09/30 10:57:20 $ Csumouter Cwsumouter Csum2outer Cwsum2outer Cquadform Csumsymouter Cwsumsymouter */ #include #include #include "chunkloop.h" /* ............... matrices ..............................*/ /* ........................sums of outer products ........*/ /* Csumouter computes the sum of outer products of columns of x y = sum[j] (x[,j] %o% x[,j]) */ void Csumouter(x, n, p, y) double *x; /* p by n matrix */ int *n, *p; double *y; /* output matrix p by p, initialised to zero */ { int N, P; register int i, j, k, maxchunk; register double xij, xkj; register double *xcolj; N = *n; P = *p; OUTERCHUNKLOOP(j, N, maxchunk, 2048) { R_CheckUserInterrupt(); INNERCHUNKLOOP(j, N, maxchunk, 2048) { xcolj = x + j * P; for(i = 0; i < P; i++) { xij = xcolj[i]; for(k = 0; k < P; k++) { xkj = xcolj[k]; y[k * P + i] += xij * xkj; } } } } } /* Cwsumouter computes the weighted sum of outer products of columns of x y = sum[j] (w[j] * x[,j] %o% x[,j]) */ void Cwsumouter(x, n, p, w, y) double *x; /* p by n matrix */ int *n, *p; double *w; /* weight vector, length n */ double *y; /* output matrix p by p, initialised to zero */ { int N, P; register int i, j, k, maxchunk; register double wj, xij, wjxij, xkj; register double *xcolj; N = *n; P = *p; OUTERCHUNKLOOP(j, N, maxchunk, 2048) { R_CheckUserInterrupt(); INNERCHUNKLOOP(j, N, maxchunk, 2048) { wj = w[j]; xcolj = x + j * P; for(i = 0; i < P; i++) { xij = xcolj[i]; wjxij = wj * xij; for(k = 0; k < P; k++) { xkj = xcolj[k]; y[k * P + i] += wjxij * xkj; } } } } } /* Csum2outer computes the sum of outer products of columns of x and y z = sum[j] (x[,j] %o% y[,j]) */ void Csum2outer(x, y, n, px, py, z) double *x, *y; /* matrices (px by n) and (py by n) */ int *n, *px, *py; double *z; /* output matrix px by py, initialised to zero */ { int N, Px, Py; register int i, j, k, maxchunk; register double xij, ykj; register double *xcolj, *ycolj; N = *n; Px = *px; Py = *py; OUTERCHUNKLOOP(j, N, maxchunk, 2048) { R_CheckUserInterrupt(); INNERCHUNKLOOP(j, N, maxchunk, 2048) { xcolj = x + j * Px; ycolj = y + j * Py; for(i = 0; i < Px; i++) { xij = xcolj[i]; for(k = 0; k < Py; k++) { ykj = ycolj[k]; y[k * Px + i] += xij * ykj; } } } } } /* Cwsum2outer computes the weighted sum of outer products of columns of x and y z = sum[j] (w[j] * x[,j] %o% y[,j]) */ void Cwsum2outer(x, y, n, px, py, w, z) double *x, *y; /* matrices (px by n) and (py by n) */ int *n, *px, *py; double *w; /* weight vector, length n */ double *z; /* output matrix px by py, initialised to zero */ { int N, Px, Py; register int i, j, k, maxchunk; register double wj, xij, wjxij, ykj; register double *xcolj, *ycolj; N = *n; Px = *px; Py = *py; OUTERCHUNKLOOP(j, N, maxchunk, 2048) { R_CheckUserInterrupt(); INNERCHUNKLOOP(j, N, maxchunk, 2048) { wj = w[j]; xcolj = x + j * Px; ycolj = y + j * Py; for(i = 0; i < Px; i++) { xij = xcolj[i]; wjxij = wj * xij; for(k = 0; k < Py; k++) { ykj = ycolj[k]; z[k * Px + i] += wjxij * ykj; } } } } } /* ........................quadratic/bilinear forms ......*/ /* computes the quadratic form values y[j] = x[,j] %*% v %*% t(x[,j]) */ void Cquadform(x, n, p, v, y) double *x; /* p by n matrix */ int *n, *p; double *v; /* p by p matrix */ double *y; /* output vector, length n */ { int N, P; register int i, j, k, maxchunk; register double xij, xkj, vik, yj; register double *xcolj; N = *n; P = *p; OUTERCHUNKLOOP(j, N, maxchunk, 2048) { R_CheckUserInterrupt(); INNERCHUNKLOOP(j, N, maxchunk, 2048) { xcolj = x + j * P; yj = 0; for(i = 0; i < P; i++) { xij = xcolj[i]; for(k = 0; k < P; k++) { xkj = xcolj[k]; vik = v[k * P + i]; yj += xij * vik * xkj; } } y[j] = yj; } } } /* computes the bilinear form values z[j] = x[,j] %*% v %*% t(y[,j]) */ void Cbiform(x, y, n, p, v, z) double *x, *y; /* p by n matrices */ int *n, *p; double *v; /* p by p matrix */ double *z; /* output vector, length n */ { int N, P; register int i, j, k, maxchunk; register double xij, vik, ykj, zj; register double *xcolj, *ycolj; N = *n; P = *p; OUTERCHUNKLOOP(j, N, maxchunk, 2048) { R_CheckUserInterrupt(); INNERCHUNKLOOP(j, N, maxchunk, 2048) { xcolj = x + j * P; ycolj = y + j * P; zj = 0; for(i = 0; i < P; i++) { xij = xcolj[i]; for(k = 0; k < P; k++) { ykj = ycolj[k]; vik = v[k * P + i]; zj += xij * vik * ykj; } } z[j] = zj; } } } /* ............... 3D arrays ...................... */ #undef FNAME #undef WEIGHTED /* sumsymouter computes the sum of outer products x[,i,j] %o% x[,j,i] over all pairs i, j */ #define FNAME Csumsymouter #include "sumsymouter.h" #undef FNAME /* wsumsymouter computes the weighted sum of outer products w[i,j] * (x[,i,j] %o% x[,j,i]) over all pairs i, j */ #define FNAME Cwsumsymouter #define WEIGHTED #include "sumsymouter.h" #undef FNAME #undef WEIGHTED spatstat/src/nn3DdistX.h0000644000176200001440000000520413166361223014677 0ustar liggesusers/* nn3DdistX.h Code template for nearest-neighbour algorithms for 3D point patterns Input is two point patterns - supports 'nncross' This code is #included multiple times in nn3Ddist.c Variables used: FNAME function name DIST #defined if function returns distance to nearest neighbour WHICH #defined if function returns id of nearest neighbour EXCLUDE #defined if the two patterns may include common points (which are not to be counted as neighbours) Either or both DIST and WHICH may be defined. THE FOLLOWING CODE ASSUMES THAT BOTH POINT PATTERNS ARE SORTED IN ASCENDING ORDER OF THE z COORDINATE If EXCLUDE is #defined, Code numbers id1, id2 are attached to the patterns X and Y respectively, such that x1[i], y1[i] and x2[j], y2[j] are the same point iff id1[i] = id2[j]. $Revision: 1.5 $ $Date: 2013/09/20 10:01:25 $ */ void FNAME(n1, x1, y1, z1, id1, n2, x2, y2, z2, id2, nnd, nnwhich, huge) /* inputs */ int *n1, *n2, *id1, *id2; double *x1, *y1, *z1, *x2, *y2, *z2, *huge; /* outputs */ double *nnd; int *nnwhich; { int npoints1, npoints2, i, j, jwhich, lastjwhich; double d2, d2min, x1i, y1i, z1i, dx, dy, dz, dz2, hu, hu2; #ifdef EXCLUDE int id1i; #endif hu = *huge; hu2 = hu * hu; npoints1 = *n1; npoints2 = *n2; if(npoints1 == 0 || npoints2 == 0) return; lastjwhich = 0; for(i = 0; i < npoints1; i++) { R_CheckUserInterrupt(); d2min = hu2; jwhich = -1; x1i = x1[i]; y1i = y1[i]; z1i = z1[i]; #ifdef EXCLUDE id1i = id1[i]; #endif /* search backward from previous nearest neighbour */ if(lastjwhich > 0) { for(j = lastjwhich - 1; j >= 0; --j) { dz = z2[j] - z1i; dz2 = dz * dz; if(dz2 > d2min) break; #ifdef EXCLUDE /* do not compare identical points */ if(id2[j] != id1i) { #endif dx = x2[j] - x1i; dy = y2[j] - y1i; d2 = dx * dx + dy * dy + dz2; if (d2 < d2min) { d2min = d2; jwhich = j; } #ifdef EXCLUDE } #endif } } /* search forward from previous nearest neighbour */ if(lastjwhich < npoints2) { for(j = lastjwhich; j < npoints2; ++j) { dz = z2[j] - z1i; dz2 = dz * dz; if(dz2 > d2min) break; #ifdef EXCLUDE /* do not compare identical points */ if(id2[j] != id1i) { #endif dx = x2[j] - x1i; dy = y2[j] - y1i; d2 = dx * dx + dy * dy + dz2; if (d2 < d2min) { d2min = d2; jwhich = j; } #ifdef EXCLUDE } #endif } } #ifdef DIST nnd[i] = sqrt(d2min); #endif #ifdef WHICH /* convert to R indexing */ nnwhich[i] = jwhich + 1; #endif lastjwhich = jwhich; } } spatstat/src/maxnnd.h0000644000176200001440000000356013166361223014351 0ustar liggesusers/* maxnnd.h Code template for maxnnd to be #included in minnnd.c Macros: FNAME Function name IGNOREZERO #defined if zero distances should be ignored $Revision: 1.2 $ $Date: 2014/09/18 01:00:30 $ */ /* THE FOLLOWING CODE ASSUMES THAT y IS SORTED IN ASCENDING ORDER */ void FNAME(n, x, y, huge, result) /* inputs */ int *n; double *x, *y, *huge; /* outputs */ double *result; { int npoints, i, maxchunk, left, right; double d2, d2mini, d2max, xi, yi, dx, dy, dy2, hu, hu2; hu = *huge; hu2 = hu * hu; npoints = *n; /* maximum (over all i) nearest-neighbour distance, squared */ d2max = 0.0; if(npoints == 0) return; /* loop in chunks of 2^16 */ i = 0; maxchunk = 0; while(i < npoints) { R_CheckUserInterrupt(); maxchunk += 65536; if(maxchunk > npoints) maxchunk = npoints; for(; i < maxchunk; i++) { xi = x[i]; yi = y[i]; /* nearest-neighbour distance for point i, squared */ d2mini = hu2; if(i < npoints - 1) { /* search forward */ for(right = i + 1; right < npoints; ++right) { dy = y[right] - yi; dy2 = dy * dy; if(dy2 > d2mini) break; dx = x[right] - xi; d2 = dx * dx + dy2; if (d2 < d2mini) { #ifdef IGNOREZERO if(d2 > 0) { #endif d2mini = d2; if(d2mini <= d2max) break; #ifdef IGNOREZERO } #endif } } } if(i > 0 && d2mini > d2max){ /* search backward */ for(left = i - 1; left >= 0; --left) { dy = yi - y[left]; dy2 = dy * dy; if(dy2 > d2mini) break; dx = x[left] - xi; d2 = dx * dx + dy2; if (d2 < d2mini) { #ifdef IGNOREZERO if(d2 > 0) { #endif d2mini = d2; if(d2mini <= d2max) break; #ifdef IGNOREZERO } #endif } } } if(d2mini > d2max) d2max = d2mini; } } *result = d2max; } spatstat/src/linSnncross.c0000644000176200001440000000127313166361223015371 0ustar liggesusers#include #include "yesno.h" /* linSnncross.c Shortest-path distances between nearest neighbours in linear network One pattern to another pattern $Revision: 1.3 $ $Date: 2015/11/28 10:08:55 $ 'Sparse version' Works with sparse representation Does not allow 'exclusion' Requires point data to be ordered by segment index. linSnndcross linSnndwhich */ void Clinvdist(), Clinvwhichdist(); /* functions from linvdist.c */ #undef HUH /* definition of linSnndcross */ #define FNAME linSnndcross #undef WHICH #include "linSnncross.h" /* definition of linSnndwhich */ #undef FNAME #define FNAME linSnndwhich #define WHICH #include "linSnncross.h" spatstat/src/closefuns.h0000644000176200001440000006276613166361223015102 0ustar liggesusers/* closefuns.h Function definitions to be #included in closepair.c several times with different values of macros. Macros used: CLOSEFUN name of function for 'closepairs' CROSSFUN name of function for 'crosspairs' DIST if defined, also return d COORDS if defined, also return xi, yi, xj, yj, dx, dy THRESH if defined, also return 1(d < s) ZCOORD if defined, coordinates are 3-dimensional SINGLE if defined, capture only i < j $Revision: 1.9 $ $Date: 2015/12/30 04:01:51 $ */ #ifdef ZCOORD #define SPACEDIM 3 #else #define SPACEDIM 2 #endif SEXP CLOSEFUN(SEXP xx, SEXP yy, #ifdef ZCOORD SEXP zz, #endif SEXP rr, #ifdef THRESH SEXP ss, #endif SEXP nguess) { double *x, *y; double xi, yi, rmax, r2max, rmaxplus, dx, dy, d2; #ifdef ZCOORD double *z; double zi, dz; #endif int n, k, kmax, kmaxold, maxchunk, i, j, m; /* local storage */ int *iout, *jout; /* R objects in return value */ SEXP Out, iOut, jOut; /* external storage pointers */ int *iOutP, *jOutP; #ifdef COORDS double *xiout, *yiout, *xjout, *yjout, *dxout, *dyout; SEXP xiOut, yiOut, xjOut, yjOut, dxOut, dyOut; double *xiOutP, *yiOutP, *xjOutP, *yjOutP, *dxOutP, *dyOutP; #ifdef ZCOORD double *ziout, *zjout, *dzout; SEXP ziOut, zjOut, dzOut; double *ziOutP, *zjOutP, *dzOutP; #endif #endif #ifdef DIST double *dout; SEXP dOut; double *dOutP; #endif #ifdef THRESH double s, s2; int *tout; SEXP tOut; int *tOutP; #endif /* protect R objects from garbage collector */ PROTECT(xx = AS_NUMERIC(xx)); PROTECT(yy = AS_NUMERIC(yy)); #ifdef ZCOORD PROTECT(zz = AS_NUMERIC(zz)); #endif PROTECT(rr = AS_NUMERIC(rr)); PROTECT(nguess = AS_INTEGER(nguess)); #ifdef THRESH PROTECT(ss = AS_NUMERIC(ss)); #define NINPUTS (3+SPACEDIM) #else #define NINPUTS (2+SPACEDIM) #endif /* Translate arguments from R to C */ x = NUMERIC_POINTER(xx); y = NUMERIC_POINTER(yy); #ifdef ZCOORD z = NUMERIC_POINTER(zz); #endif n = LENGTH(xx); rmax = *(NUMERIC_POINTER(rr)); kmax = *(INTEGER_POINTER(nguess)); r2max = rmax * rmax; rmaxplus = rmax + rmax/16.0; #ifdef THRESH s = *(NUMERIC_POINTER(ss)); s2 = s * s; #endif k = 0; /* k is the next available storage location and also the current length of the list */ if(n > 0 && kmax > 0) { /* allocate space */ iout = (int *) R_alloc(kmax, sizeof(int)); jout = (int *) R_alloc(kmax, sizeof(int)); #ifdef COORDS xiout = (double *) R_alloc(kmax, sizeof(double)); yiout = (double *) R_alloc(kmax, sizeof(double)); xjout = (double *) R_alloc(kmax, sizeof(double)); yjout = (double *) R_alloc(kmax, sizeof(double)); dxout = (double *) R_alloc(kmax, sizeof(double)); dyout = (double *) R_alloc(kmax, sizeof(double)); #ifdef ZCOORD ziout = (double *) R_alloc(kmax, sizeof(double)); zjout = (double *) R_alloc(kmax, sizeof(double)); dzout = (double *) R_alloc(kmax, sizeof(double)); #endif #endif #ifdef DIST dout = (double *) R_alloc(kmax, sizeof(double)); #endif #ifdef THRESH tout = (int *) R_alloc(kmax, sizeof(int)); #endif /* loop in chunks of 2^16 */ i = 0; maxchunk = 0; while(i < n) { R_CheckUserInterrupt(); maxchunk += 65536; if(maxchunk > n) maxchunk = n; for(; i < maxchunk; i++) { xi = x[i]; yi = y[i]; #ifdef ZCOORD zi = z[i]; #endif #ifndef SINGLE if(i > 0) { /* scan backward */ for(j = i - 1; j >= 0; j--) { dx = x[j] - xi; if(dx < -rmaxplus) break; dy = y[j] - yi; d2 = dx * dx + dy * dy; #ifdef ZCOORD if(d2 <= r2max) { dz = z[j] - zi; d2 = d2 + dz * dz; #endif if(d2 <= r2max) { /* add this (i, j) pair to output */ if(k >= kmax) { /* overflow; allocate more space */ kmaxold = kmax; kmax = 2 * kmax; iout = intRealloc(iout, kmaxold, kmax); jout = intRealloc(jout, kmaxold, kmax); #ifdef COORDS xiout = dblRealloc(xiout, kmaxold, kmax); yiout = dblRealloc(yiout, kmaxold, kmax); xjout = dblRealloc(xjout, kmaxold, kmax); yjout = dblRealloc(yjout, kmaxold, kmax); dxout = dblRealloc(dxout, kmaxold, kmax); dyout = dblRealloc(dyout, kmaxold, kmax); #ifdef ZCOORD ziout = dblRealloc(ziout, kmaxold, kmax); zjout = dblRealloc(zjout, kmaxold, kmax); dzout = dblRealloc(dzout, kmaxold, kmax); #endif #endif #ifdef DIST dout = dblRealloc(dout, kmaxold, kmax); #endif #ifdef THRESH tout = intRealloc(tout, kmaxold, kmax); #endif } jout[k] = j + 1; /* R indexing */ iout[k] = i + 1; #ifdef COORDS xiout[k] = xi; yiout[k] = yi; xjout[k] = x[j]; yjout[k] = y[j]; dxout[k] = dx; dyout[k] = dy; #ifdef ZCOORD ziout[k] = zi; zjout[k] = z[j]; dzout[k] = dz; #endif #endif #ifdef DIST dout[k] = sqrt(d2); #endif #ifdef THRESH tout[k] = (d2 <= s2) ? 1 : 0; #endif ++k; } #ifdef ZCOORD } #endif } } #endif if(i + 1 < n) { /* scan forward */ for(j = i + 1; j < n; j++) { dx = x[j] - xi; if(dx > rmaxplus) break; dy = y[j] - yi; d2 = dx * dx + dy * dy; #ifdef ZCOORD if(d2 <= r2max) { dz = z[j] - zi; d2 = d2 + dz * dz; #endif if(d2 <= r2max) { /* add this (i, j) pair to output */ if(k >= kmax) { /* overflow; allocate more space */ kmaxold = kmax; kmax = 2 * kmax; iout = intRealloc(iout, kmaxold, kmax); jout = intRealloc(jout, kmaxold, kmax); #ifdef COORDS xiout = dblRealloc(xiout, kmaxold, kmax); yiout = dblRealloc(yiout, kmaxold, kmax); xjout = dblRealloc(xjout, kmaxold, kmax); yjout = dblRealloc(yjout, kmaxold, kmax); dxout = dblRealloc(dxout, kmaxold, kmax); dyout = dblRealloc(dyout, kmaxold, kmax); #ifdef ZCOORD ziout = dblRealloc(ziout, kmaxold, kmax); zjout = dblRealloc(zjout, kmaxold, kmax); dzout = dblRealloc(dzout, kmaxold, kmax); #endif #endif #ifdef DIST dout = dblRealloc(dout, kmaxold, kmax); #endif #ifdef THRESH tout = intRealloc(tout, kmaxold, kmax); #endif } jout[k] = j + 1; /* R indexing */ iout[k] = i + 1; #ifdef COORDS xiout[k] = xi; yiout[k] = yi; xjout[k] = x[j]; yjout[k] = y[j]; dxout[k] = dx; dyout[k] = dy; #ifdef ZCOORD ziout[k] = zi; zjout[k] = z[j]; dzout[k] = dz; #endif #endif #ifdef DIST dout[k] = sqrt(d2); #endif #ifdef THRESH tout[k] = (d2 <= s2) ? 1 : 0; #endif ++k; } #ifdef ZCOORD } #endif } } /* end of i loop */ } } } /* return a list of vectors */ PROTECT(iOut = NEW_INTEGER(k)); PROTECT(jOut = NEW_INTEGER(k)); #ifdef COORDS PROTECT(xiOut = NEW_NUMERIC(k)); PROTECT(yiOut = NEW_NUMERIC(k)); PROTECT(xjOut = NEW_NUMERIC(k)); PROTECT(yjOut = NEW_NUMERIC(k)); PROTECT(dxOut = NEW_NUMERIC(k)); PROTECT(dyOut = NEW_NUMERIC(k)); #ifdef ZCOORD PROTECT(ziOut = NEW_NUMERIC(k)); PROTECT(zjOut = NEW_NUMERIC(k)); PROTECT(dzOut = NEW_NUMERIC(k)); #endif #endif #ifdef DIST PROTECT(dOut = NEW_NUMERIC(k)); #endif #ifdef THRESH PROTECT(tOut = NEW_INTEGER(k)); #endif if(k > 0) { iOutP = INTEGER_POINTER(iOut); jOutP = INTEGER_POINTER(jOut); #ifdef COORDS xiOutP = NUMERIC_POINTER(xiOut); yiOutP = NUMERIC_POINTER(yiOut); xjOutP = NUMERIC_POINTER(xjOut); yjOutP = NUMERIC_POINTER(yjOut); dxOutP = NUMERIC_POINTER(dxOut); dyOutP = NUMERIC_POINTER(dyOut); #ifdef ZCOORD ziOutP = NUMERIC_POINTER(ziOut); zjOutP = NUMERIC_POINTER(zjOut); dzOutP = NUMERIC_POINTER(dzOut); #endif #endif #ifdef DIST dOutP = NUMERIC_POINTER(dOut); #endif #ifdef THRESH tOutP = INTEGER_POINTER(tOut); #endif for(m = 0; m < k; m++) { iOutP[m] = iout[m]; jOutP[m] = jout[m]; #ifdef COORDS xiOutP[m] = xiout[m]; yiOutP[m] = yiout[m]; xjOutP[m] = xjout[m]; yjOutP[m] = yjout[m]; dxOutP[m] = dxout[m]; dyOutP[m] = dyout[m]; #ifdef ZCOORD ziOutP[m] = ziout[m]; zjOutP[m] = zjout[m]; dzOutP[m] = dzout[m]; #endif #endif #ifdef DIST dOutP[m] = dout[m]; #endif #ifdef THRESH tOutP[m] = tout[m]; #endif } } #define HEAD 2 #ifdef THRESH #define NECK 1 #else #define NECK 0 #endif #ifdef COORDS #define MIDDLE (3*SPACEDIM) #else #define MIDDLE 0 #endif #ifdef DIST #define TAIL 1 #else #define TAIL 0 #endif PROTECT(Out = NEW_LIST(HEAD+NECK+MIDDLE+TAIL)); SET_VECTOR_ELT(Out, 0, iOut); SET_VECTOR_ELT(Out, 1, jOut); #ifdef THRESH SET_VECTOR_ELT(Out, HEAD, tOut); #endif #ifdef COORDS #ifdef ZCOORD SET_VECTOR_ELT(Out, HEAD+NECK, xiOut); SET_VECTOR_ELT(Out, HEAD+NECK+1, yiOut); SET_VECTOR_ELT(Out, HEAD+NECK+2, ziOut); SET_VECTOR_ELT(Out, HEAD+NECK+3, xjOut); SET_VECTOR_ELT(Out, HEAD+NECK+4, yjOut); SET_VECTOR_ELT(Out, HEAD+NECK+5, zjOut); SET_VECTOR_ELT(Out, HEAD+NECK+6, dxOut); SET_VECTOR_ELT(Out, HEAD+NECK+7, dyOut); SET_VECTOR_ELT(Out, HEAD+NECK+8, dzOut); #else SET_VECTOR_ELT(Out, HEAD+NECK, xiOut); SET_VECTOR_ELT(Out, HEAD+NECK+1, yiOut); SET_VECTOR_ELT(Out, HEAD+NECK+2, xjOut); SET_VECTOR_ELT(Out, HEAD+NECK+3, yjOut); SET_VECTOR_ELT(Out, HEAD+NECK+4, dxOut); SET_VECTOR_ELT(Out, HEAD+NECK+5, dyOut); #endif #endif #ifdef DIST SET_VECTOR_ELT(Out, HEAD+NECK+MIDDLE, dOut); #endif UNPROTECT(NINPUTS+1+HEAD+NECK+MIDDLE+TAIL); /* 1 is for 'Out' itself */ return(Out); } #undef NINPUTS #undef HEAD #undef NECK #undef MIDDLE #undef TAIL /* ........................................................ */ SEXP CROSSFUN(SEXP xx1, SEXP yy1, #ifdef ZCOORD SEXP zz1, #endif SEXP xx2, SEXP yy2, #ifdef ZCOORD SEXP zz2, #endif SEXP rr, #ifdef THRESH SEXP ss, #endif SEXP nguess) { /* input vectors */ double *x1, *y1, *x2, *y2; #ifdef ZCOORD double *z1, *z2; #endif /* lengths */ int n1, n2, nout, noutmax, noutmaxold, maxchunk; /* distance parameter */ double rmax, r2max, rmaxplus; /* indices */ int i, j, jleft, m; /* temporary values */ double x1i, y1i, xleft, dx, dy, dx2, d2; #ifdef ZCOORD double z1i, dz; #endif /* local storage */ int *iout, *jout; /* R objects in return value */ SEXP Out, iOut, jOut; /* external storage pointers */ int *iOutP, *jOutP; #ifdef COORDS SEXP xiOut, yiOut, xjOut, yjOut, dxOut, dyOut; double *xiOutP, *yiOutP, *xjOutP, *yjOutP, *dxOutP, *dyOutP; double *xiout, *yiout, *xjout, *yjout, *dxout, *dyout; #ifdef ZCOORD SEXP ziOut, zjOut, dzOut; double *ziOutP, *zjOutP, *dzOutP; double *ziout, *zjout, *dzout; #endif #endif #ifdef DIST SEXP dOut; double *dOutP; double *dout; #endif #ifdef THRESH double s, s2; int *tout; SEXP tOut; int *tOutP; #endif /* protect R objects from garbage collector */ PROTECT(xx1 = AS_NUMERIC(xx1)); PROTECT(yy1 = AS_NUMERIC(yy1)); PROTECT(xx2 = AS_NUMERIC(xx2)); PROTECT(yy2 = AS_NUMERIC(yy2)); #ifdef ZCOORD PROTECT(zz1 = AS_NUMERIC(zz1)); PROTECT(zz2 = AS_NUMERIC(zz2)); #endif PROTECT(rr = AS_NUMERIC(rr)); PROTECT(nguess = AS_INTEGER(nguess)); #ifdef THRESH PROTECT(ss = AS_NUMERIC(ss)); #define NINPUTS (2*SPACEDIM + 3) #else #define NINPUTS (2*SPACEDIM + 2) #endif /* Translate arguments from R to C */ x1 = NUMERIC_POINTER(xx1); y1 = NUMERIC_POINTER(yy1); x2 = NUMERIC_POINTER(xx2); y2 = NUMERIC_POINTER(yy2); #ifdef ZCOORD z1 = NUMERIC_POINTER(zz1); z2 = NUMERIC_POINTER(zz2); #endif n1 = LENGTH(xx1); n2 = LENGTH(xx2); rmax = *(NUMERIC_POINTER(rr)); noutmax = *(INTEGER_POINTER(nguess)); r2max = rmax * rmax; rmaxplus = rmax + rmax/16.0; #ifdef THRESH s = *(NUMERIC_POINTER(ss)); s2 = s * s; #endif nout = 0; /* nout is the next available storage location and also the current length of the list */ if(n1 > 0 && n2 > 0 && noutmax > 0) { /* allocate space */ iout = (int *) R_alloc(noutmax, sizeof(int)); jout = (int *) R_alloc(noutmax, sizeof(int)); #ifdef COORDS xiout = (double *) R_alloc(noutmax, sizeof(double)); yiout = (double *) R_alloc(noutmax, sizeof(double)); xjout = (double *) R_alloc(noutmax, sizeof(double)); yjout = (double *) R_alloc(noutmax, sizeof(double)); dxout = (double *) R_alloc(noutmax, sizeof(double)); dyout = (double *) R_alloc(noutmax, sizeof(double)); #ifdef ZCOORD ziout = (double *) R_alloc(noutmax, sizeof(double)); zjout = (double *) R_alloc(noutmax, sizeof(double)); dzout = (double *) R_alloc(noutmax, sizeof(double)); #endif #endif #ifdef DIST dout = (double *) R_alloc(noutmax, sizeof(double)); #endif #ifdef THRESH tout = (int *) R_alloc(noutmax, sizeof(int)); #endif jleft = 0; i = 0; maxchunk = 0; while(i < n1) { R_CheckUserInterrupt(); maxchunk += 65536; if(maxchunk > n1) maxchunk = n1; for( ; i < maxchunk; i++) { x1i = x1[i]; y1i = y1[i]; #ifdef ZCOORD z1i = z1[i]; #endif /* adjust starting point jleft */ xleft = x1i - rmaxplus; while((x2[jleft] < xleft) && (jleft+1 < n2)) ++jleft; /* process from j = jleft until dx > rmax + epsilon */ for(j=jleft; j < n2; j++) { /* squared interpoint distance */ dx = x2[j] - x1i; if(dx > rmaxplus) break; dx2 = dx * dx; dy = y2[j] - y1i; d2 = dx2 + dy * dy; #ifdef ZCOORD if(d2 <= r2max) { dz = z2[j] - z1i; d2 = d2 + dz * dz; #endif if(d2 <= r2max) { /* add this (i, j) pair to output */ if(nout >= noutmax) { /* overflow; allocate more space */ noutmaxold = noutmax; noutmax = 2 * noutmax; iout = intRealloc(iout, noutmaxold, noutmax); jout = intRealloc(jout, noutmaxold, noutmax); #ifdef COORDS xiout = dblRealloc(xiout, noutmaxold, noutmax); yiout = dblRealloc(yiout, noutmaxold, noutmax); xjout = dblRealloc(xjout, noutmaxold, noutmax); yjout = dblRealloc(yjout, noutmaxold, noutmax); dxout = dblRealloc(dxout, noutmaxold, noutmax); dyout = dblRealloc(dyout, noutmaxold, noutmax); #ifdef ZCOORD ziout = dblRealloc(ziout, noutmaxold, noutmax); zjout = dblRealloc(zjout, noutmaxold, noutmax); dzout = dblRealloc(dzout, noutmaxold, noutmax); #endif #endif #ifdef DIST dout = dblRealloc(dout, noutmaxold, noutmax); #endif #ifdef THRESH tout = intRealloc(tout, noutmaxold, noutmax); #endif } iout[nout] = i + 1; /* R indexing */ jout[nout] = j + 1; #ifdef COORDS xiout[nout] = x1i; yiout[nout] = y1i; xjout[nout] = x2[j]; yjout[nout] = y2[j]; dxout[nout] = dx; dyout[nout] = dy; #ifdef ZCOORD ziout[nout] = z1i; zjout[nout] = z2[j]; dzout[nout] = dz; #endif #endif #ifdef DIST dout[nout] = sqrt(d2); #endif #ifdef THRESH tout[nout] = (d2 <= s2) ? 1 : 0; #endif ++nout; } #ifdef ZCOORD } #endif } } } } /* return a list of vectors */ PROTECT(iOut = NEW_INTEGER(nout)); PROTECT(jOut = NEW_INTEGER(nout)); #ifdef COORDS PROTECT(xiOut = NEW_NUMERIC(nout)); PROTECT(yiOut = NEW_NUMERIC(nout)); PROTECT(xjOut = NEW_NUMERIC(nout)); PROTECT(yjOut = NEW_NUMERIC(nout)); PROTECT(dxOut = NEW_NUMERIC(nout)); PROTECT(dyOut = NEW_NUMERIC(nout)); #ifdef ZCOORD PROTECT(ziOut = NEW_NUMERIC(nout)); PROTECT(zjOut = NEW_NUMERIC(nout)); PROTECT(dzOut = NEW_NUMERIC(nout)); #endif #endif #ifdef DIST PROTECT(dOut = NEW_NUMERIC(nout)); #endif #ifdef THRESH PROTECT(tOut = NEW_INTEGER(nout)); #endif if(nout > 0) { iOutP = INTEGER_POINTER(iOut); jOutP = INTEGER_POINTER(jOut); #ifdef COORDS xiOutP = NUMERIC_POINTER(xiOut); yiOutP = NUMERIC_POINTER(yiOut); xjOutP = NUMERIC_POINTER(xjOut); yjOutP = NUMERIC_POINTER(yjOut); dxOutP = NUMERIC_POINTER(dxOut); dyOutP = NUMERIC_POINTER(dyOut); #ifdef ZCOORD ziOutP = NUMERIC_POINTER(ziOut); zjOutP = NUMERIC_POINTER(zjOut); dzOutP = NUMERIC_POINTER(dzOut); #endif #endif #ifdef DIST dOutP = NUMERIC_POINTER(dOut); #endif #ifdef THRESH tOutP = INTEGER_POINTER(tOut); #endif for(m = 0; m < nout; m++) { iOutP[m] = iout[m]; jOutP[m] = jout[m]; #ifdef COORDS xiOutP[m] = xiout[m]; yiOutP[m] = yiout[m]; xjOutP[m] = xjout[m]; yjOutP[m] = yjout[m]; dxOutP[m] = dxout[m]; dyOutP[m] = dyout[m]; #ifdef ZCOORD ziOutP[m] = ziout[m]; zjOutP[m] = zjout[m]; dzOutP[m] = dzout[m]; #endif #endif #ifdef DIST dOutP[m] = dout[m]; #endif #ifdef THRESH tOutP[m] = tout[m]; #endif } } #define HEAD 2 #ifdef THRESH #define NECK 1 #else #define NECK 0 #endif #ifdef COORDS #define MIDDLE (3*SPACEDIM) #else #define MIDDLE 0 #endif #ifdef DIST #define TAIL 1 #else #define TAIL 0 #endif PROTECT(Out = NEW_LIST(HEAD+NECK+MIDDLE+TAIL)); SET_VECTOR_ELT(Out, 0, iOut); SET_VECTOR_ELT(Out, 1, jOut); #ifdef THRESH SET_VECTOR_ELT(Out, HEAD, tOut); #endif #ifdef COORDS #ifdef ZCOORD SET_VECTOR_ELT(Out, HEAD+NECK, xiOut); SET_VECTOR_ELT(Out, HEAD+NECK+1, yiOut); SET_VECTOR_ELT(Out, HEAD+NECK+2, ziOut); SET_VECTOR_ELT(Out, HEAD+NECK+3, xjOut); SET_VECTOR_ELT(Out, HEAD+NECK+4, yjOut); SET_VECTOR_ELT(Out, HEAD+NECK+5, zjOut); SET_VECTOR_ELT(Out, HEAD+NECK+6, dxOut); SET_VECTOR_ELT(Out, HEAD+NECK+7, dyOut); SET_VECTOR_ELT(Out, HEAD+NECK+8, dzOut); #else SET_VECTOR_ELT(Out, HEAD+NECK, xiOut); SET_VECTOR_ELT(Out, HEAD+NECK+1, yiOut); SET_VECTOR_ELT(Out, HEAD+NECK+2, xjOut); SET_VECTOR_ELT(Out, HEAD+NECK+3, yjOut); SET_VECTOR_ELT(Out, HEAD+NECK+4, dxOut); SET_VECTOR_ELT(Out, HEAD+NECK+5, dyOut); #endif #endif #ifdef DIST SET_VECTOR_ELT(Out, HEAD+NECK+MIDDLE, dOut); #endif UNPROTECT(NINPUTS+1+HEAD+NECK+MIDDLE+TAIL); /* 1 is for 'Out' itself */ return(Out); } #undef NINPUTS #undef HEAD #undef NECK #undef MIDDLE #undef TAIL /* ........................................................ */ /* Alternative code for CLOSEFUN, based on algorithm in CROSSFUN */ #define ALT_ALGO(NAME) ALT_PREFIX(NAME) #define ALT_PREFIX(NAME) alt ## NAME SEXP ALT_ALGO(CLOSEFUN)(SEXP xx, SEXP yy, #ifdef ZCOORD SEXP zz, #endif SEXP rr, #ifdef THRESH SEXP ss, #endif SEXP nguess) { /* input vectors */ double *x, *y; #ifdef ZCOORD double *z; #endif /* lengths */ int n, nout, noutmax, noutmaxold, maxchunk; /* distance parameter */ double rmax, r2max, rmaxplus; /* indices */ int i, j, jleft, m; /* temporary values */ double xi, yi, xleft, dx, dy, dx2, d2; #ifdef ZCOORD double zi, dz; #endif /* local storage */ int *iout, *jout; /* R objects in return value */ SEXP Out, iOut, jOut; /* external storage pointers */ int *iOutP, *jOutP; #ifdef COORDS SEXP xiOut, yiOut, xjOut, yjOut, dxOut, dyOut; double *xiOutP, *yiOutP, *xjOutP, *yjOutP, *dxOutP, *dyOutP; double *xiout, *yiout, *xjout, *yjout, *dxout, *dyout; #ifdef ZCOORD SEXP ziOut, zjOut, dzOut; double *ziOutP, *zjOutP, *dzOutP; double *ziout, *zjout, *dzout; #endif #endif #ifdef DIST SEXP dOut; double *dOutP; double *dout; #endif #ifdef THRESH double s, s2; int *tout; SEXP tOut; int *tOutP; #endif /* protect R objects from garbage collector */ PROTECT(xx = AS_NUMERIC(xx)); PROTECT(yy = AS_NUMERIC(yy)); #ifdef ZCOORD PROTECT(zz = AS_NUMERIC(zz)); #endif PROTECT(rr = AS_NUMERIC(rr)); PROTECT(nguess = AS_INTEGER(nguess)); #ifdef THRESH PROTECT(ss = AS_NUMERIC(ss)); #define NINPUTS (SPACEDIM + 3) #else #define NINPUTS (SPACEDIM + 2) #endif /* Translate arguments from R to C */ x = NUMERIC_POINTER(xx); y = NUMERIC_POINTER(yy); #ifdef ZCOORD z = NUMERIC_POINTER(zz); #endif n = LENGTH(xx); rmax = *(NUMERIC_POINTER(rr)); noutmax = *(INTEGER_POINTER(nguess)); r2max = rmax * rmax; rmaxplus = rmax + rmax/16.0; #ifdef THRESH s = *(NUMERIC_POINTER(ss)); s2 = s * s; #endif nout = 0; /* nout is the next available storage location and also the current length of the list */ if(n > 0 && noutmax > 0) { /* allocate space */ iout = (int *) R_alloc(noutmax, sizeof(int)); jout = (int *) R_alloc(noutmax, sizeof(int)); #ifdef COORDS xiout = (double *) R_alloc(noutmax, sizeof(double)); yiout = (double *) R_alloc(noutmax, sizeof(double)); xjout = (double *) R_alloc(noutmax, sizeof(double)); yjout = (double *) R_alloc(noutmax, sizeof(double)); dxout = (double *) R_alloc(noutmax, sizeof(double)); dyout = (double *) R_alloc(noutmax, sizeof(double)); #ifdef ZCOORD ziout = (double *) R_alloc(noutmax, sizeof(double)); zjout = (double *) R_alloc(noutmax, sizeof(double)); dzout = (double *) R_alloc(noutmax, sizeof(double)); #endif #endif #ifdef DIST dout = (double *) R_alloc(noutmax, sizeof(double)); #endif #ifdef THRESH tout = (int *) R_alloc(noutmax, sizeof(int)); #endif jleft = 0; i = 0; maxchunk = 0; while(i < n) { R_CheckUserInterrupt(); maxchunk += 65536; if(maxchunk > n) maxchunk = n; for( ; i < maxchunk; i++) { xi = x[i]; yi = y[i]; #ifdef ZCOORD zi = z[i]; #endif /* adjust starting point jleft */ xleft = xi - rmaxplus; while((x[jleft] < xleft) && (jleft+1 < n)) ++jleft; /* process from j = jleft until dx > rmax + epsilon */ for(j=jleft; j < n; j++) { /* squared interpoint distance */ dx = x[j] - xi; if(dx > rmaxplus) break; dx2 = dx * dx; dy = y[j] - yi; d2 = dx2 + dy * dy; #ifdef ZCOORD if(d2 <= r2max) { dz = z[j] - zi; d2 = d2 + dz * dz; #endif if(d2 <= r2max) { /* add this (i, j) pair to output */ if(nout >= noutmax) { /* overflow; allocate more space */ noutmaxold = noutmax; noutmax = 2 * noutmax; iout = intRealloc(iout, noutmaxold, noutmax); jout = intRealloc(jout, noutmaxold, noutmax); #ifdef COORDS xiout = dblRealloc(xiout, noutmaxold, noutmax); yiout = dblRealloc(yiout, noutmaxold, noutmax); xjout = dblRealloc(xjout, noutmaxold, noutmax); yjout = dblRealloc(yjout, noutmaxold, noutmax); dxout = dblRealloc(dxout, noutmaxold, noutmax); dyout = dblRealloc(dyout, noutmaxold, noutmax); #ifdef ZCOORD ziout = dblRealloc(ziout, noutmaxold, noutmax); zjout = dblRealloc(zjout, noutmaxold, noutmax); dzout = dblRealloc(dzout, noutmaxold, noutmax); #endif #endif #ifdef DIST dout = dblRealloc(dout, noutmaxold, noutmax); #endif #ifdef THRESH tout = intRealloc(tout, noutmaxold, noutmax); #endif } iout[nout] = i + 1; /* R indexing */ jout[nout] = j + 1; #ifdef COORDS xiout[nout] = xi; yiout[nout] = yi; xjout[nout] = x[j]; yjout[nout] = y[j]; dxout[nout] = dx; dyout[nout] = dy; #ifdef ZCOORD ziout[nout] = zi; zjout[nout] = z[j]; dzout[nout] = dz; #endif #endif #ifdef DIST dout[nout] = sqrt(d2); #endif #ifdef THRESH tout[nout] = (d2 <= s2) ? 1 : 0; #endif ++nout; } #ifdef ZCOORD } #endif } } } } /* return a list of vectors */ PROTECT(iOut = NEW_INTEGER(nout)); PROTECT(jOut = NEW_INTEGER(nout)); #ifdef COORDS PROTECT(xiOut = NEW_NUMERIC(nout)); PROTECT(yiOut = NEW_NUMERIC(nout)); PROTECT(xjOut = NEW_NUMERIC(nout)); PROTECT(yjOut = NEW_NUMERIC(nout)); PROTECT(dxOut = NEW_NUMERIC(nout)); PROTECT(dyOut = NEW_NUMERIC(nout)); #ifdef ZCOORD PROTECT(ziOut = NEW_NUMERIC(nout)); PROTECT(zjOut = NEW_NUMERIC(nout)); PROTECT(dzOut = NEW_NUMERIC(nout)); #endif #endif #ifdef DIST PROTECT(dOut = NEW_NUMERIC(nout)); #endif #ifdef THRESH PROTECT(tOut = NEW_INTEGER(nout)); #endif if(nout > 0) { iOutP = INTEGER_POINTER(iOut); jOutP = INTEGER_POINTER(jOut); #ifdef COORDS xiOutP = NUMERIC_POINTER(xiOut); yiOutP = NUMERIC_POINTER(yiOut); xjOutP = NUMERIC_POINTER(xjOut); yjOutP = NUMERIC_POINTER(yjOut); dxOutP = NUMERIC_POINTER(dxOut); dyOutP = NUMERIC_POINTER(dyOut); #ifdef ZCOORD ziOutP = NUMERIC_POINTER(ziOut); zjOutP = NUMERIC_POINTER(zjOut); dzOutP = NUMERIC_POINTER(dzOut); #endif #endif #ifdef DIST dOutP = NUMERIC_POINTER(dOut); #endif #ifdef THRESH tOutP = INTEGER_POINTER(tOut); #endif for(m = 0; m < nout; m++) { iOutP[m] = iout[m]; jOutP[m] = jout[m]; #ifdef COORDS xiOutP[m] = xiout[m]; yiOutP[m] = yiout[m]; xjOutP[m] = xjout[m]; yjOutP[m] = yjout[m]; dxOutP[m] = dxout[m]; dyOutP[m] = dyout[m]; #ifdef ZCOORD ziOutP[m] = ziout[m]; zjOutP[m] = zjout[m]; dzOutP[m] = dzout[m]; #endif #endif #ifdef DIST dOutP[m] = dout[m]; #endif #ifdef THRESH tOutP[m] = tout[m]; #endif } } #define HEAD 2 #ifdef THRESH #define NECK 1 #else #define NECK 0 #endif #ifdef COORDS #define MIDDLE (3*SPACEDIM) #else #define MIDDLE 0 #endif #ifdef DIST #define TAIL 1 #else #define TAIL 0 #endif PROTECT(Out = NEW_LIST(HEAD+NECK+MIDDLE+TAIL)); SET_VECTOR_ELT(Out, 0, iOut); SET_VECTOR_ELT(Out, 1, jOut); #ifdef THRESH SET_VECTOR_ELT(Out, HEAD, tOut); #endif #ifdef COORDS #ifdef ZCOORD SET_VECTOR_ELT(Out, HEAD+NECK, xiOut); SET_VECTOR_ELT(Out, HEAD+NECK+1, yiOut); SET_VECTOR_ELT(Out, HEAD+NECK+2, ziOut); SET_VECTOR_ELT(Out, HEAD+NECK+3, xjOut); SET_VECTOR_ELT(Out, HEAD+NECK+4, yjOut); SET_VECTOR_ELT(Out, HEAD+NECK+5, zjOut); SET_VECTOR_ELT(Out, HEAD+NECK+6, dxOut); SET_VECTOR_ELT(Out, HEAD+NECK+7, dyOut); SET_VECTOR_ELT(Out, HEAD+NECK+8, dzOut); #else SET_VECTOR_ELT(Out, HEAD+NECK, xiOut); SET_VECTOR_ELT(Out, HEAD+NECK+1, yiOut); SET_VECTOR_ELT(Out, HEAD+NECK+2, xjOut); SET_VECTOR_ELT(Out, HEAD+NECK+3, yjOut); SET_VECTOR_ELT(Out, HEAD+NECK+4, dxOut); SET_VECTOR_ELT(Out, HEAD+NECK+5, dyOut); #endif #endif #ifdef DIST SET_VECTOR_ELT(Out, HEAD+NECK+MIDDLE, dOut); #endif UNPROTECT(NINPUTS+1+HEAD+NECK+MIDDLE+TAIL); /* 1 is for 'Out' itself */ return(Out); } #undef NINPUTS #undef HEAD #undef NECK #undef MIDDLE #undef TAIL #undef ALT_ALGO #undef ALT_PREFIX spatstat/src/dist2.c0000755000176200001440000000377713166361223014121 0ustar liggesusers# include #include #include "yesno.h" /* dist2: squared distance in torus dist2thresh: faster code for testing whether dist2 < r2 dist2Mthresh: same as dist2thresh, but does not assume the points are within one period of each other. */ double dist2(u,v,x,y,period) double u, v, x, y; double *period; { double wide, high, dx, dy, dxp, dyp, a, b, d2; /* points are assumed to lie within one period of each other */ wide = period[0]; high = period[1]; dx = u - x; if(dx < 0.0) dx = -dx; dxp = wide - dx; a = (dx < dxp)? dx : dxp; dy = v - y; if(dy < 0.0) dy = -dy; dyp = high - dy; b = (dy < dyp)? dy : dyp; d2 = a * a + b * b; return d2; } double dist2either(u,v,x,y,period) double u, v, x, y; double *period; { if(period[0] < 0.0) return pow(u-x,2) + pow(v-y,2); return(dist2(u,v,x,y,period)); } int dist2thresh(u,v,x,y,period,r2) double u, v, x, y, r2; double *period; { double wide, high, dx, dy, dxp, dyp, a, b, residue; /* points are assumed to lie within one period of each other */ wide = period[0]; high = period[1]; dx = u - x; if(dx < 0.0) dx = -dx; dxp = wide - dx; a = (dx < dxp) ? dx : dxp; residue = r2 - a * a; if(residue <= 0.0) return NO; dy = v - y; if(dy < 0.0) dy = -dy; dyp = high - dy; b = (dy < dyp) ? dy : dyp; if (residue > b * b) return YES; return NO; } int dist2Mthresh(u,v,x,y,period,r2) double u, v, x, y, r2; double *period; { double wide, high, dx, dy, dxp, dyp, a, b, residue; /* points are NOT assumed to lie within one period of each other */ wide = period[0]; high = period[1]; dx = u - x; if(dx < 0.0) dx = -dx; while(dx > wide) dx -= wide; dxp = wide - dx; a = (dx < dxp) ? dx : dxp; residue = r2 - a * a; if(residue < 0.0) return NO; dy = v - y; if(dy < 0.0) dy = -dy; while(dy > high) dy -= high; dyp = high - dy; b = (dy < dyp) ? dy : dyp; if (residue >= b * b) return YES; return NO; } spatstat/src/nndistX.h0000644000176200001440000000606513166361223014516 0ustar liggesusers #if (1 == 0) /* nndistX.h Code template for C functions supporting nncross THE FOLLOWING CODE ASSUMES THAT LISTS ARE SORTED IN ASCENDING ORDER OF y COORDINATE This code is #included multiple times in nndistance.c Variables used: FNAME function name DIST #defined if function returns distance to nearest neighbour WHICH #defined if function returns id of nearest neighbour EXCLUDE #defined if exclusion mechanism is used Either or both DIST and WHICH may be defined. When EXCLUDE is defined, code numbers id1, id2 are attached to the patterns X and Y respectively, such that x1[i], y1[i] and x2[j], y2[j] are the same point iff id1[i] = id2[j]. Copyright (C) Adrian Baddeley, Jens Oehlschlagel and Rolf Turner 2000-2012 Licence: GPL >= 2 $Revision: 1.5 $ $Date: 2013/09/18 04:49:18 $ */ #endif void FNAME(n1, x1, y1, id1, n2, x2, y2, id2, nnd, nnwhich, huge) /* inputs */ int *n1, *n2; double *x1, *y1, *x2, *y2, *huge; int *id1, *id2; /* outputs */ double *nnd; int *nnwhich; /* some inputs + outputs are not used in all functions */ { int npoints1, npoints2, maxchunk, i, jleft, jright, jwhich, lastjwhich; double d2, d2min, x1i, y1i, dx, dy, dy2, hu, hu2; #ifdef EXCLUDE int id1i; #endif hu = *huge; hu2 = hu * hu; npoints1 = *n1; npoints2 = *n2; if(npoints1 == 0 || npoints2 == 0) return; lastjwhich = 0; /* loop in chunks of 2^16 */ i = 0; maxchunk = 0; while(i < npoints1) { R_CheckUserInterrupt(); maxchunk += 65536; if(maxchunk > npoints1) maxchunk = npoints1; for(; i < maxchunk; i++) { d2min = hu2; jwhich = -1; x1i = x1[i]; y1i = y1[i]; #ifdef EXCLUDE id1i = id1[i]; #endif if(lastjwhich < npoints2) { /* search forward from previous nearest neighbour */ for(jright = lastjwhich; jright < npoints2; ++jright) { dy = y2[jright] - y1i; dy2 = dy * dy; if(dy2 > d2min) /* note that dy2 >= d2min could break too early */ break; #ifdef EXCLUDE /* do not compare identical points */ if(id2[jright] != id1i) { #endif dx = x2[jright] - x1i; d2 = dx * dx + dy2; if (d2 < d2min) { d2min = d2; jwhich = jright; } #ifdef EXCLUDE } #endif } /* end forward search */ } if(lastjwhich > 0) { /* search backward from previous nearest neighbour */ for(jleft = lastjwhich - 1; jleft >= 0; --jleft) { dy = y1i - y2[jleft]; dy2 = dy * dy; if(dy2 > d2min) /* note that dy2 >= d2min could break too early */ break; #ifdef EXCLUDE /* do not compare identical points */ if(id2[jleft] != id1i) { #endif dx = x2[jleft] - x1i; d2 = dx * dx + dy2; if (d2 < d2min) { d2min = d2; jwhich = jleft; } #ifdef EXCLUDE } #endif } /* end backward search */ } /* commit values */ #ifdef DIST nnd[i] = sqrt(d2min); #endif #ifdef WHICH nnwhich[i] = jwhich + 1; /* R indexing */ #endif lastjwhich = jwhich; } } } spatstat/src/exactdist.c0000755000176200001440000001460513166361223015054 0ustar liggesusers/* exactdist.c Exact distance transform of a point pattern (used to estimate the empty space function F) $Revision: 1.12 $ $Date: 2011/09/20 07:36:17 $ Author: Adrian Baddeley Sketch of functionality: the 'data' are a finite list of points in R^2 (x,y coordinates) and the 'output' is a real valued image whose entries are distances, with the value for each pixel equalling the distance from that pixel to the nearest point of the data pattern. Routines: exact_dt_R() interface to R exact_dt() implementation of distance transform dist_to_bdry() compute distance to edge of image frame shape_raster() initialise a Raster structure The appropriate calling sequence for exact_dt_R() is exemplified in 'exactdt.R' */ #undef DEBUG #include #include "raster.h" #ifdef DEBUG #include #endif void shape_raster(ras,data,xmin,ymin,xmax,ymax,nrow,ncol,mrow,mcol) Raster *ras; /* the raster structure to be initialised */ void *data; int nrow, ncol; /* absolute dimensions of storage array */ int mrow, mcol; /* margins clipped off */ /* e.g. valid width is ncol - 2*mcol columns */ double xmin, ymin, /* image dimensions in R^2 after clipping */ xmax, ymax; { ras->data = data; ras->nrow = nrow; ras->ncol = ncol; ras->length = nrow * ncol; ras->rmin = mrow; ras->rmax = nrow - mrow - 1; ras->cmin = mcol; ras->cmax = ncol - mcol - 1; ras->x0 = ras->xmin = xmin; ras->x1 = ras->xmax = xmax; ras->y0 = ras->ymin = ymin; ras->y1 = ras->ymax = ymax; ras->xstep = (xmax-xmin)/(ncol - 2 * mcol - 1); ras->ystep = (ymax-ymin)/(nrow - 2 * mrow - 1); /* Rprintf("xstep,ystep = %lf,%lf\n", ras->xstep,ras->ystep); */ } void exact_dt(x, y, npt, dist, index) double *x, *y; /* data points */ int npt; Raster *dist; /* exact distance to nearest point */ Raster *index; /* which point x[i],y[i] is closest */ { int i,j,k,l,m; double d; int ii; double dd; /* double bdiag; */ /* initialise rasters */ #define UNDEFINED -1 #define Is_Defined(I) (I >= 0) #define Is_Undefined(I) (I < 0) Clear(*index,int,UNDEFINED) d = 2.0 * DistanceSquared(dist->xmin,dist->ymin,dist->xmax,dist->ymax); Clear(*dist,double,d) /* If the list of data points is empty, ... exit now */ if(npt == 0) return; for(i = 0; i < npt; i++) { /* Rprintf("%ld -> (%lf,%lf)\n", i, x[i], y[i]); */ j = RowIndex(*dist,y[i]); k = ColIndex(*dist,x[i]); /* if(!Inside(*dist,j,k)) Rprintf("(%ld,%ld) out of bounds\n",j,k); else if (!Inside(*dist,j+1,k+1)) Rprintf("(%ld+1,%ld+1) out of bounds\n",j,k); */ for(l = j; l <= j+1; l++) for(m = k; m <= k+1; m++) { d = DistanceToSquared(x[i],y[i],*index,l,m); if( Is_Undefined(Entry(*index,l,m,int)) || Entry(*dist,l,m,double) > d) { /* Rprintf("writing (%ld,%ld) -> %ld\t%lf\n", l,m,i,d); */ Entry(*index,l,m,int) = i; Entry(*dist,l,m,double) = d; /* Rprintf("checking: %ld, %lf\n", Entry(*index,l,m,int), Entry(*dist,l,m,double)); */ } } } /* for(j = 0; j <= index->nrow; j++) for(k = 0; k <= index->ncol; k++) Rprintf("[%ld,%ld] %ld\t%lf\n", j,k,Entry(*index,j,k,int),Entry(*dist,j,k,double)); */ /* how to update the distance values */ #define COMPARE(ROW,COL,RR,CC) \ d = Entry(*dist,ROW,COL,double); \ ii = Entry(*index,RR,CC,int); \ /* Rprintf(" %lf\t (%ld,%ld) |-> %ld\n", d, RR, CC, ii); */ \ if(Is_Defined(ii) /* && ii < npt */ \ && Entry(*dist,RR,CC,double) < d) { \ dd = DistanceSquared(x[ii],y[ii],Xpos(*index,COL),Ypos(*index,ROW)); \ if(dd < d) { \ /* Rprintf("(%ld,%ld) <- %ld\n", ROW, COL, ii); */ \ Entry(*index,ROW,COL,int) = ii; \ Entry(*dist,ROW,COL,double) = dd; \ /* Rprintf("checking: %ld, %lf\n", Entry(*index,ROW,COL,int), Entry(*dist,ROW,COL,double)); */\ } \ } /* bound on diagonal step distance */ /* bdiag = sqrt(index->xstep * index->xstep + index->ystep * index->ystep); */ /* forward pass */ for(j = index->rmin; j <= index->rmax; j++) for(k = index->cmin; k <= index->cmax; k++) { /* Rprintf("Neighbourhood of (%ld,%ld):\n", j,k); */ COMPARE(j,k, j-1,k-1) COMPARE(j,k, j-1, k) COMPARE(j,k, j-1,k+1) COMPARE(j,k, j, k-1) } /* backward pass */ for(j = index->rmax; j >= index->rmin; j--) for(k = index->cmax; k >= index->cmin; k--) { COMPARE(j,k, j+1,k+1) COMPARE(j,k, j+1, k) COMPARE(j,k, j+1,k-1) COMPARE(j,k, j, k+1) } /* take square roots of the distances^2 */ for(j = index->rmin; j <= index->rmax; j++) for(k = index->cmin; k <= index->cmax; k++) Entry(*dist,j,k,double) = sqrt(Entry(*dist,j,k,double)); } #define MIN(A,B) (((A) < (B)) ? (A) : (B)) void dist_to_bdry(d) /* compute distance to boundary from each raster point */ Raster *d; /* of course this is easy for a rectangular grid but we implement it in C for ease of future modification */ { int j, k; double x, y, xd, yd; for(j = d->rmin; j <= d->rmax;j++) { y = Ypos(*d,j); yd = MIN(y - d->ymin, d->ymax - y); for(k = d->cmin; k <= d->cmax;k++) { x = Xpos(*d,k); xd = MIN(x - d->xmin, d->xmax - x); Entry(*d,j,k,double) = MIN(xd,yd); } } } /* R interface */ void exact_dt_R(x, y, npt, xmin, ymin, xmax, ymax, nr, nc, mr, mc, distances, indices, boundary) double *x, *y; /* input data points */ int *npt; double *xmin, *ymin, *xmax, *ymax; /* guaranteed bounding box */ int *nr, *nc; /* desired raster dimensions EXCLUDING margins */ int *mr, *mc; /* margins */ /* output arrays */ double *distances; /* distance to nearest point */ int *indices; /* index to nearest point */ double *boundary; /* distance to boundary */ { Raster dist, index, bdist; int mrow, mcol, nrow, ncol; mrow = *mr; mcol = *mc; /* full dimensions */ nrow = *nr + 2 * mrow; ncol = *nc + 2 * mcol; shape_raster( &dist, (void *) distances,*xmin,*ymin,*xmax,*ymax, nrow, ncol, mrow, mcol); shape_raster( &index, (void *) indices, *xmin,*ymin,*xmax,*ymax, nrow, ncol, mrow, mcol); shape_raster( &bdist, (void *) boundary, *xmin,*ymin,*xmax,*ymax, nrow, ncol, mrow, mcol); exact_dt(x, y, (int) *npt, &dist, &index); dist_to_bdry(&bdist); } spatstat/src/corrections.c0000755000176200001440000002203613166361223015413 0ustar liggesusers/* corrections.c Edge corrections $Revision: 1.12 $ $Date: 2013/05/27 02:09:10 $ */ #include #include #include #include #include "chunkloop.h" #include "yesno.h" #include "constants.h" #undef DEBUG /* This constant is defined in Rmath.h */ #define TWOPI M_2PI #define MIN(A,B) (((A) < (B)) ? (A) : (B)) #define BETWEEN(X,X0,X1) (((X) - (X0)) * ((X) - (X1)) <= 0) #define UNDER(X,Y,X0,Y0,X1,Y1) \ (((Y1) - (Y0)) * ((X) - (X0)) >= ((Y) - (Y0)) * ((X1)- (X0))) #define UNDERNEATH(X,Y,X0,Y0,X1,Y1) \ (((X0) < (X1)) ? UNDER(X,Y,X0,Y0,X1,Y1) : UNDER(X,Y,X1,Y1,X0,Y0)) #define TESTINSIDE(X,Y,X0,Y0,X1,Y1) \ (BETWEEN(X,X0,X1) && UNDERNEATH(X, Y, X0, Y0, X1, Y1)) void ripleybox(nx, x, y, rmat, nr, xmin, ymin, xmax, ymax, epsilon, out) /* inputs */ int *nx, *nr; /* dimensions */ double *x, *y; /* coordinate vectors of length nx */ double *rmat; /* matrix nx by nr */ double *xmin, *ymin, *xmax, *ymax; /* box dimensions */ double *epsilon; /* threshold for proximity to corner */ /* output */ double *out; /* output matrix nx by nr */ { int i, j, n, m, ijpos, ncor, maxchunk; double xx, yy, x0, y0, x1, y1, dL, dR, dU, dD, aL, aU, aD, aR, rij; double cL, cU, cD, cR, bLU, bLD, bRU, bRD, bUL, bUR, bDL, bDR; double corner, extang; double eps; n = *nx; m = *nr; x0 = *xmin; y0 = *ymin; x1 = *xmax; y1 = *ymax; eps = *epsilon; OUTERCHUNKLOOP(i, n, maxchunk, 16384) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, n, maxchunk, 16384) { xx = x[i]; yy = y[i]; /* perpendicular distance from point to each edge of rectangle L = left, R = right, D = down, U = up */ dL = xx - x0; dR = x1 - xx; dD = yy - y0; dU = y1 - yy; /* test for corner of the rectangle */ #define ABS(X) (((X) >= 0) ? (X) : (-X)) #define SMALL(X) ((ABS(X) < eps) ? 1 : 0) ncor = SMALL(dL) + SMALL(dR) + SMALL(dD) + SMALL(dU); corner = (ncor >= 2) ? YES : NO; /* angle between - perpendicular to edge of rectangle and - line from point to corner of rectangle */ bLU = atan2(dU, dL); bLD = atan2(dD, dL); bRU = atan2(dU, dR); bRD = atan2(dD, dR); bUL = atan2(dL, dU); bUR = atan2(dR, dU); bDL = atan2(dL, dD); bDR = atan2(dR, dD); for(j = 0; j < m; j++) { ijpos = j * n + i; rij = rmat[ijpos]; #ifdef DEBUG Rprintf("rij = %lf\n", rij); #endif /* half the angle subtended by the intersection between the circle of radius r[i,j] centred on point i and each edge of the rectangle (prolonged to an infinite line) */ aL = (dL < rij) ? acos(dL/rij) : 0.0; aR = (dR < rij) ? acos(dR/rij) : 0.0; aD = (dD < rij) ? acos(dD/rij) : 0.0; aU = (dU < rij) ? acos(dU/rij) : 0.0; #ifdef DEBUG Rprintf("aL = %lf\n", aL); Rprintf("aR = %lf\n", aR); Rprintf("aD = %lf\n", aD); Rprintf("aU = %lf\n", aU); #endif /* apply maxima */ cL = MIN(aL, bLU) + MIN(aL, bLD); cR = MIN(aR, bRU) + MIN(aR, bRD); cU = MIN(aU, bUL) + MIN(aU, bUR); cD = MIN(aD, bDL) + MIN(aD, bDR); #ifdef DEBUG Rprintf("cL = %lf\n", cL); Rprintf("cR = %lf\n", cR); Rprintf("cD = %lf\n", cD); Rprintf("cU = %lf\n", cU); #endif /* total exterior angle over 2 pi */ extang = (cL + cR + cU + cD)/TWOPI; /* add pi/2 for corners */ if(corner) extang += 1/4; #ifdef DEBUG Rprintf("extang = %lf\n", extang); #endif /* OK, now compute weight */ out[ijpos] = 1 / (1 - extang); } } } } void ripleypoly(nc, xc, yc, nr, rmat, nseg, x0, y0, x1, y1, out) /* inputs */ int *nc, *nr, *nseg; double *xc, *yc, *rmat; double *x0, *y0, *x1, *y1; /* output */ double *out; { int n, m, i, j, k, l, nradperpt, ncut, nchanges, maxchunk; double xcentre, ycentre, xx0, yy0, xx1, yy1, xx01, yy01; double x, y, radius, radius2, dx0, dx1, dy0; double a, b, c, t, det, sqrtdet, tmp; double theta[6], delta[7], tmid[7]; double xtest, ytest, contrib, total; n = *nc; nradperpt = *nr; m = *nseg; OUTERCHUNKLOOP(i, n, maxchunk, 16384) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, n, maxchunk, 16384) { xcentre = xc[i]; ycentre = yc[i]; #ifdef DEBUG Rprintf("centre = (%lf, %lf)\n", xcentre, ycentre); #endif for(j = 0; j < nradperpt; j++) { radius = rmat[ j * n + i]; radius2 = radius * radius; #ifdef DEBUG Rprintf("radius = %lf\n", radius); #endif total = 0.0; for(k=0; k < m; k++) { #ifdef DEBUG Rprintf("k = %d\n", k); #endif ncut = 0; xx0 = x0[k]; yy0 = y0[k]; xx1 = x1[k]; yy1 = y1[k]; #ifdef DEBUG Rprintf("(%lf,%lf) to (%lf,%lf)\n", xx0, yy0, xx1, yy1); #endif /* intersection with left edge */ dx0 = xx0 - xcentre; det = radius2 - dx0 * dx0; if(det > 0) { sqrtdet = sqrt(det); y = ycentre + sqrtdet; if(y < yy0) { theta[ncut] = atan2(y - ycentre, dx0); #ifdef DEBUG Rprintf("cut left at theta= %lf\n", theta[ncut]); #endif ncut++; } y = ycentre - sqrtdet; if(y < yy0) { theta[ncut] = atan2(y-ycentre, dx0); #ifdef DEBUG Rprintf("cut left at theta= %lf\n", theta[ncut]); #endif ncut++; } } else if(det == 0) { if(ycentre < yy0) { theta[ncut] = atan2(0.0, dx0); #ifdef DEBUG Rprintf("tangent left at theta= %lf\n", theta[ncut]); #endif ncut++; } } /* intersection with right edge */ dx1 = xx1 - xcentre; det = radius2 - dx1 * dx1; if(det > 0) { sqrtdet = sqrt(det); y = ycentre + sqrtdet; if(y < yy1) { theta[ncut] = atan2(y - ycentre, dx1); #ifdef DEBUG Rprintf("cut right at theta= %lf\n", theta[ncut]); #endif ncut++; } y = ycentre - sqrtdet; if(y < yy1) { theta[ncut] = atan2(y - ycentre, dx1); #ifdef DEBUG Rprintf("cut right at theta= %lf\n", theta[ncut]); #endif ncut++; } } else if(det == 0) { if(ycentre < yy1) { theta[ncut] = atan2(0.0, dx1); #ifdef DEBUG Rprintf("tangent right at theta= %lf\n", theta[ncut]); #endif ncut++; } } /* intersection with top segment */ xx01 = xx1 - xx0; yy01 = yy1 - yy0; dy0 = yy0 - ycentre; a = xx01 * xx01 + yy01 * yy01; b = 2 * (xx01 * dx0 + yy01 * dy0); c = dx0 * dx0 + dy0 * dy0 - radius2; det = b * b - 4 * a * c; if(det > 0) { sqrtdet = sqrt(det); t = (sqrtdet - b)/(2 * a); if(t >= 0 && t <= 1) { x = xx0 + t * xx01; y = yy0 + t * yy01; theta[ncut] = atan2(y - ycentre, x - xcentre); #ifdef DEBUG Rprintf("hits segment: t = %lf, theta = %lf\n", t, theta[ncut]); #endif ++ncut; } t = (-sqrtdet - b)/(2 * a); if(t >= 0 && t <= 1) { x = xx0 + t * xx01; y = yy0 + t * yy01; theta[ncut] = atan2(y - ycentre, x - xcentre); #ifdef DEBUG Rprintf("hits segment: t = %lf, theta = %lf\n", t, theta[ncut]); #endif ++ncut; } } else if(det == 0) { t = - b/(2 * a); if(t >= 0 && t <= 1) { x = xx0 + t * xx01; y = yy0 + t * yy01; theta[ncut] = atan2(y - ycentre, x - xcentre); #ifdef DEBUG Rprintf("tangent to segment: t = %lf, theta = %lf\n", t, theta[ncut]); #endif ++ncut; } } /* for safety, force all angles to be in range [0, 2 * pi] */ if(ncut > 0) for(l = 0; l < ncut; l++) if(theta[l] < 0) theta[l] += TWOPI; /* sort angles */ if(ncut > 1) { do { nchanges = 0; for(l = 0; l < ncut - 1; l++) { if(theta[l] > theta[l+1]) { /* swap */ ++nchanges; tmp = theta[l]; theta[l] = theta[l+1]; theta[l+1] = tmp; } } } while(nchanges > 0); } #ifdef DEBUG if(ncut > 0) { for(l = 0; l < ncut; l++) Rprintf("theta[%d] = %lf\n", l, theta[l]); } #endif /* compute length of circumference inside polygon */ if(ncut == 0) { /* entire circle is either in or out */ xtest = xcentre + radius; ytest = ycentre; if(TESTINSIDE(xtest, ytest, xx0, yy0, xx1, yy1)) contrib = TWOPI; else contrib = 0.0; } else { /* find midpoints and lengths of pieces (adding theta = ) */ delta[0] = theta[0]; tmid[0] = theta[0]/2; if(ncut > 1) { for(l = 1; l < ncut; l++) { delta[l] = theta[l] - theta[l-1]; tmid[l] = (theta[l] + theta[l-1])/2; } } delta[ncut] = TWOPI - theta[ncut - 1]; tmid[ncut] = (TWOPI + theta[ncut-1])/2; contrib = 0.0; for(l = 0; l <= ncut; l++) { #ifdef DEBUG Rprintf("delta[%d] = %lf\n", l, delta[l]); #endif xtest = xcentre + radius * cos(tmid[l]); ytest = ycentre + radius * sin(tmid[l]); if(TESTINSIDE(xtest, ytest, xx0, yy0, xx1, yy1)) { contrib += delta[l]; #ifdef DEBUG Rprintf("... inside\n"); } else { Rprintf("... outside\n"); #endif } } } /* multiply by sign of trapezium */ if(xx0 < xx1) contrib *= -1; #ifdef DEBUG Rprintf("contrib = %lf\n", contrib); #endif total += contrib; } out[ j * n + i] = total; #ifdef DEBUG Rprintf("total = %lf\n", total); #endif } } } } spatstat/src/PerfectDiggleGratton.h0000644000176200001440000001342113166361223017124 0ustar liggesusers // ........................... Diggle-Gratton process .......................... // $Revision: 1.5 $ $Date: 2012/03/10 11:22:56 $ class DiggleGrattonProcess : public PointProcess { public: double beta, delta, rho, kappa, rhominusdelta, deltasquared, rhosquared; DiggleGrattonProcess(double xmin, double xmax, double ymin, double ymax, double b, double d, double r, double k); ~DiggleGrattonProcess(){} void NewEvent(double *x, double *y, char *InWindow); void GeneratePoisson(Point *headPoint, long int *GeneratedPoints, long int *LivingPoints, long int *NoP); double Interaction(double dsquared); }; DiggleGrattonProcess::DiggleGrattonProcess(double xmin, double xmax, double ymin, double ymax, double b, double d, double r, double k) : PointProcess(xmin, xmax, ymin, ymax){ beta = b; delta = d; rho = r; kappa = k; deltasquared = delta * delta; rhosquared = rho * rho; rhominusdelta = rho - delta; InteractionRange = rho; TotalBirthRate = beta*(xmax-xmin)*(ymax-ymin); } double DiggleGrattonProcess::Interaction(double dsquared) { double rtn, dist, t; rtn = 1; if(dsquared < rhosquared) { if(dsquared < deltasquared) { rtn = 0; } else { dist = sqrt(dsquared); t = (dist - delta)/rhominusdelta; rtn = pow(t, kappa); } } return(rtn); } void DiggleGrattonProcess::NewEvent(double *x, double *y, char *InWindow) { double Xdim, Ydim; Xdim = Xmax-Xmin; Ydim = Ymax-Ymin; *x = slumptal()*Xdim+Xmin; *y = slumptal()*Ydim+Ymin; *InWindow = 1; } void DiggleGrattonProcess::GeneratePoisson(Point *headPoint, long int *GeneratedPoints, long int *LivingPoints, long int *NoP) { int i; double xtemp, ytemp, L, Xdim, Ydim; struct Point *TempPoint; Xdim = Xmax-Xmin; Ydim = Ymax-Ymin; L = beta*Xdim*Ydim; *GeneratedPoints = poisson(L); *LivingPoints = *GeneratedPoints; for (i=1; i<=*GeneratedPoints ; i++){ //Rprintf("Generating DiggleGrattonProcess Poisson 3\n"); //scanf("%f",&f1); xtemp = slumptal()*Xdim+Xmin; ytemp = slumptal()*Ydim+Ymin; // //Rprintf("Generating DiggleGrattonProcess Poisson 3.2\n"); TempPoint = ALLOCATE(struct Point); // TempPoint->X = xtemp; TempPoint->Y = ytemp; TempPoint->No = i; TempPoint->R = slumptal(); //Rprintf("Generating DiggleGrattonProcess Poisson 3.6\n"); TempPoint->next = headPoint->next; headPoint->next = TempPoint; *NoP = *NoP + 1; } } // ........................... Interface to R .......................... extern "C" { SEXP PerfectDiggleGratton(SEXP beta, SEXP delta, SEXP rho, SEXP kappa, SEXP xrange, SEXP yrange) { // input parameters double Beta, Delta, Rho, Kappa, Xmin, Xmax, Ymin, Ymax; double *Xrange, *Yrange; // internal int xcells, ycells; long int StartTime, EndTime; // output int noutmax; SEXP xout, yout, nout, out; double *xx, *yy; int *nn; // protect arguments from garbage collector PROTECT(beta = AS_NUMERIC(beta)); PROTECT(delta = AS_NUMERIC(delta)); PROTECT(rho = AS_NUMERIC(rho)); PROTECT(kappa = AS_NUMERIC(kappa)); PROTECT(xrange = AS_NUMERIC(xrange)); PROTECT(yrange = AS_NUMERIC(yrange)); // that's 6 protected objects // extract arguments Beta = *(NUMERIC_POINTER(beta)); Delta = *(NUMERIC_POINTER(delta)); Rho = *(NUMERIC_POINTER(rho)); Kappa = *(NUMERIC_POINTER(kappa)); Xrange = NUMERIC_POINTER(xrange); Xmin = Xrange[0]; Xmax = Xrange[1]; Yrange = NUMERIC_POINTER(yrange); Ymin = Yrange[0]; Ymax = Yrange[1]; // compute cell array size xcells = (int) floor((Xmax-Xmin)/ Rho); if(xcells > 9) xcells = 9; if(xcells < 1) xcells = 1; ycells = (int) floor((Ymax-Ymin)/ Rho); Xrange = NUMERIC_POINTER(xrange); Xmin = Xrange[0]; Xmax = Xrange[1]; Yrange = NUMERIC_POINTER(yrange); Ymin = Yrange[0]; Ymax = Yrange[1]; // compute cell array size xcells = (int) floor((Xmax-Xmin)/ Rho); if(xcells > 9) xcells = 9; if(xcells < 1) xcells = 1; ycells = (int) floor((Ymax-Ymin)/ Rho); if(ycells > 9) ycells = 9; if(ycells < 1) ycells = 1; #ifdef DBGS Rprintf("xcells %d ycells %d\n",xcells,ycells); Rprintf("Initialising\n"); #endif // Initialise DiggleGratton point process DiggleGrattonProcess ExampleProcess(Xmin,Xmax,Ymin,Ymax,Beta,Delta,Rho,Kappa); // Initialise point pattern Point2Pattern ExamplePattern(Xmin,Xmax,Ymin,Ymax, xcells, ycells); // parameters: min x, max x, min y, max y, "cells" in x and y direction // used for speeding up neighbour counting, 9 is max here #ifdef DBGS Rprintf("Initialisation complete\n"); #endif // Synchronise random number generator GetRNGstate(); // Initialise perfect sampler Sampler PerfectSampler(&ExampleProcess); // Perform perfect sampling PerfectSampler.Sim(&ExamplePattern, &StartTime, &EndTime); // Synchronise random number generator PutRNGstate(); // Get upper estimate of number of points noutmax = ExamplePattern.UpperCount() + 1; // Allocate space for output PROTECT(xout = NEW_NUMERIC(noutmax)); PROTECT(yout = NEW_NUMERIC(noutmax)); PROTECT(nout = NEW_INTEGER(1)); xx = NUMERIC_POINTER(xout); yy = NUMERIC_POINTER(yout); nn = INTEGER_POINTER(nout); // copy data into output storage ExamplePattern.Return(xx, yy, nn, noutmax); // pack up into output list PROTECT(out = NEW_LIST(3)); SET_VECTOR_ELT(out, 0, xout); SET_VECTOR_ELT(out, 1, yout); SET_VECTOR_ELT(out, 2, nout); // return UNPROTECT(10); // 6 arguments plus xout, yout, nout, out return(out); } } spatstat/src/Ediggra.c0000755000176200001440000000366413166361223014431 0ustar liggesusers#include #include #include "chunkloop.h" #include "looptest.h" /* Ediggra.c $Revision: 1.5 $ $Date: 2014/09/19 00:53:38 $ C implementation of 'eval' for DiggleGratton interaction (exponentiated) Assumes point patterns are sorted in increasing order of x coordinate */ double sqrt(); void Ediggra(nnsource, xsource, ysource, idsource, nntarget, xtarget, ytarget, idtarget, ddelta, rrho, values) /* inputs */ int *nnsource, *nntarget; double *xsource, *ysource, *xtarget, *ytarget; int *idsource, *idtarget; double *ddelta, *rrho; /* output */ double *values; { int nsource, ntarget, maxchunk, j, i, ileft, idsourcej; double xsourcej, ysourcej, xleft, dx, dy, dx2, d2; double delta, rho, delta2, rho2, rho2pluseps, rhominusdelta; double product; nsource = *nnsource; ntarget = *nntarget; delta = *ddelta; rho = *rrho; if(nsource == 0 || ntarget == 0) return; rho2 = rho * rho; delta2 = delta * delta; rhominusdelta = rho - delta; rho2pluseps = rho2 + EPSILON(rho2); ileft = 0; OUTERCHUNKLOOP(j, nsource, maxchunk, 65536) { R_CheckUserInterrupt(); INNERCHUNKLOOP(j, nsource, maxchunk, 65536) { product = 1; xsourcej = xsource[j]; ysourcej = ysource[j]; idsourcej = idsource[j]; /* adjust starting point */ xleft = xsourcej - rho; while((xtarget[ileft] < xleft) && (ileft+1 < ntarget)) ++ileft; /* process until dx > rho (or until product is zero) */ for(i=ileft; i < ntarget; i++) { dx = xtarget[i] - xsourcej; dx2 = dx * dx; if(dx2 > rho2pluseps) break; if(idtarget[i] != idsourcej) { dy = ytarget[i] - ysourcej; d2 = dx2 + dy * dy; if(d2 <= rho2) { if(d2 <= delta2) { product = 0; break; } else product *= (sqrt(d2) - delta)/rhominusdelta; } } } values[j] = product; } } } spatstat/src/nndist.h0000644000176200001440000000405113166361223014357 0ustar liggesusers/* nndist.h Code template for C functions supporting nndist and nnwhich (k=1) THE FOLLOWING CODE ASSUMES THAT y IS SORTED IN ASCENDING ORDER This code is #included multiple times in nndistance.c Variables used: FNAME function name DIST #defined if function returns distance to nearest neighbour WHICH #defined if function returns id of nearest neighbour Either or both DIST and WHICH may be defined. Copyright (C) Adrian Baddeley, Jens Oehlschlagel and Rolf Turner 2000-2012 Licence: GPL >= 2 $Revision: 1.2 $ $Date: 2012/03/14 02:37:27 $ */ void FNAME(n, x, y, #ifdef DIST nnd, #endif #ifdef WHICH nnwhich, #endif huge) /* inputs */ int *n; double *x, *y, *huge; /* outputs */ #ifdef DIST double *nnd; #endif #ifdef WHICH int *nnwhich; #endif { int npoints, i, maxchunk, left, right; double d2, d2min, xi, yi, dx, dy, dy2, hu, hu2; #ifdef WHICH int which; #endif hu = *huge; hu2 = hu * hu; npoints = *n; /* loop in chunks of 2^16 */ i = 0; maxchunk = 0; while(i < npoints) { R_CheckUserInterrupt(); maxchunk += 65536; if(maxchunk > npoints) maxchunk = npoints; for(; i < maxchunk; i++) { d2min = hu2; #ifdef WHICH which = -1; #endif xi = x[i]; yi = y[i]; if(i < npoints - 1) { /* search forward */ for(right = i + 1; right < npoints; ++right) { dy = y[right] - yi; dy2 = dy * dy; if(dy2 > d2min) break; dx = x[right] - xi; d2 = dx * dx + dy2; if (d2 < d2min) { d2min = d2; #ifdef WHICH which = right; #endif } } } if(i > 0){ /* search backward */ for(left = i - 1; left >= 0; --left) { dy = yi - y[left]; dy2 = dy * dy; if(dy2 > d2min) break; dx = x[left] - xi; d2 = dx * dx + dy2; if (d2 < d2min) { d2min = d2; #ifdef WHICH which = left; #endif } } } #ifdef DIST nnd[i] = sqrt(d2min); #endif #ifdef WHICH nnwhich[i] = which + 1; /* R indexing */ #endif } } } spatstat/src/localpcf.h0000755000176200001440000000453513166361223014655 0ustar liggesusers/* localpcf.h Source template for versions of local pair correlation Requires variable: WEIGHTED Assumes point patterns are sorted in increasing order of x coordinate $Revision: 1.5 $ $Date: 2012/03/27 04:50:04 $ */ #ifdef WEIGHTED #define FNAME locWpcfx #else #define FNAME locpcfx #endif void FNAME(nn1, x1, y1, id1, nn2, x2, y2, id2, #ifdef WEIGHTED w2, #endif nnr, rmaxi, del, pcf) /* inputs */ int *nn1, *nn2, *nnr; double *x1, *y1, *x2, *y2; int *id1, *id2; double *rmaxi, *del; #ifdef WEIGHTED double *w2; #endif /* output */ double *pcf; /* matrix of column vectors of pcf's for each point of first pattern */ { int n1, n2, nr, i, j, k, jleft, kmin, kmax, id1i, maxchunk; double x1i, y1i, rmax, delta, xleft, dx, dy, dx2; double d2, d2max, dmax, d; double rstep, rvalue, frac, contrib, weight, coef; n1 = *nn1; n2 = *nn2; nr = *nnr; rmax = *rmaxi; delta = *del; dmax = rmax + delta; /* maximum relevant value of interpoint distance */ d2max = dmax * dmax; rstep = rmax/(nr-1); coef = 3.0 /(4.0 * delta); if(n1 == 0 || n2 == 0) return; jleft = 0; OUTERCHUNKLOOP(i, n1, maxchunk, 8196) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, n1, maxchunk, 8196) { x1i = x1[i]; y1i = y1[i]; id1i = id1[i]; /* adjust starting point */ xleft = x1i - dmax; while((x2[jleft] < xleft) && (jleft+1 < n2)) ++jleft; /* process from jleft until |dx| > dmax */ for(j=jleft; j < n2; j++) { dx = x2[j] - x1i; dx2 = dx * dx; if(dx2 > d2max) break; dy = y2[j] - y1i; d2 = dx2 + dy * dy; if(d2 <= d2max && id2[j] != id1i) { d = sqrt(d2); kmin = (int) floor((d-delta)/rstep); kmax = (int) ceil((d+delta)/rstep); if(kmin <= nr-1 && kmax >= 0) { /* nonempty intersection with range of r values */ /* compute intersection */ if(kmin < 0) kmin = 0; if(kmax >= nr) kmax = nr-1; /* */ weight = coef/d; #ifdef WEIGHTED weight = weight * w2[j]; #endif for(k = kmin; k <= kmax; k++) { rvalue = k * rstep; frac = (d - rvalue)/delta; /* Epanechnikov kernel with halfwidth delta */ contrib = (1 - frac * frac); if(contrib > 0) pcf[k + nr * i] += contrib * weight; } } } } } } } #undef FNAME spatstat/src/PerfectStraussHard.h0000644000176200001440000001272613166361223016644 0ustar liggesusers // ..................... Strauss-Hardcore process .......................... // $Revision: 1.3 $ $Date: 2014/02/18 10:42:53 $ class StraussHardProcess : public PointProcess { public: double beta, gamma, H, R, Hsquared, Rsquared; StraussHardProcess(double xmin, double xmax, double ymin, double ymax, double b, double g, double Ri, double Hc); ~StraussHardProcess(){} void NewEvent(double *x, double *y, char *InWindow); void GeneratePoisson(Point *headPoint, long int *GeneratedPoints, long int *LivingPoints, long int *NoP); double Interaction(double dsquared); // void CalcBeta(long int xsidepomm, long int ysidepomm, // double *betapomm); // void CheckBeta(long int xsidepomm, long int ysidepomm, // double *betapomm); // double lnCondInt(struct Point2 *TempCell, Point2Pattern *p2p); // void Beta(struct Point2 *TempCell); // void CalcBeta(Point2Pattern *p2p); }; StraussHardProcess::StraussHardProcess(double xmin, double xmax, double ymin, double ymax, double b, double g, double Ri, double Hc) : PointProcess(xmin, xmax, ymin, ymax){ beta = b; gamma = g; R = Ri; H = Hc; Rsquared = R * R; Hsquared = H * H; InteractionRange = R; TotalBirthRate = beta*(xmax-xmin)*(ymax-ymin); } double StraussHardProcess::Interaction(double dsquared) { if(dsquared >= Rsquared) return(1.0); if(dsquared >= Hsquared) return(gamma); return(0.0); } void StraussHardProcess::NewEvent(double *x, double *y, char *InWindow) { double Xdim, Ydim; Xdim = Xmax-Xmin; Ydim = Ymax-Ymin; *x = slumptal()*Xdim+Xmin; *y = slumptal()*Ydim+Ymin; *InWindow = 1; } void StraussHardProcess::GeneratePoisson(Point *headPoint, long int *GeneratedPoints, long int *LivingPoints, long int *NoP) { int i; double xtemp, ytemp, L, Xdim, Ydim; struct Point *TempPoint; Xdim = Xmax-Xmin; Ydim = Ymax-Ymin; L = beta*Xdim*Ydim; *GeneratedPoints = poisson(L); *LivingPoints = *GeneratedPoints; for (i=1; i<=*GeneratedPoints ; i++){ //Rprintf("Generating StraussHardProcess Poisson 3\n"); //scanf("%f",&f1); xtemp = slumptal()*Xdim+Xmin; ytemp = slumptal()*Ydim+Ymin; // //Rprintf("Generating StraussHardProcess Poisson 3.2\n"); TempPoint = ALLOCATE(struct Point); // TempPoint->X = xtemp; TempPoint->Y = ytemp; TempPoint->No = i; TempPoint->R = slumptal(); //Rprintf("Generating StraussHardProcess Poisson 3.6\n"); TempPoint->next = headPoint->next; headPoint->next = TempPoint; *NoP = *NoP + 1; } } // ........................... Interface to R .......................... extern "C" { SEXP PerfectStraussHard(SEXP beta, SEXP gamma, SEXP r, SEXP hc, SEXP xrange, SEXP yrange) { // input parameters double Beta, Gamma, R, H, Xmin, Xmax, Ymin, Ymax; double *Xrange, *Yrange; // internal int xcells, ycells; long int StartTime, EndTime; // output int noutmax; SEXP xout, yout, nout, out; double *xx, *yy; int *nn; // protect arguments from garbage collector PROTECT(beta = AS_NUMERIC(beta)); PROTECT(gamma = AS_NUMERIC(gamma)); PROTECT(r = AS_NUMERIC(r)); PROTECT(hc = AS_NUMERIC(hc)); PROTECT(xrange = AS_NUMERIC(xrange)); PROTECT(yrange = AS_NUMERIC(yrange)); // that's 6 protected objects // extract arguments Beta = *(NUMERIC_POINTER(beta)); Gamma = *(NUMERIC_POINTER(gamma)); R = *(NUMERIC_POINTER(r)); H = *(NUMERIC_POINTER(hc)); Xrange = NUMERIC_POINTER(xrange); Xmin = Xrange[0]; Xmax = Xrange[1]; Yrange = NUMERIC_POINTER(yrange); Ymin = Yrange[0]; Ymax = Yrange[1]; // compute cell array size xcells = (int) floor((Xmax-Xmin)/ R); if(xcells > 9) xcells = 9; if(xcells < 1) xcells = 1; ycells = (int) floor((Ymax-Ymin)/ R); if(ycells > 9) ycells = 9; if(ycells < 1) ycells = 1; #ifdef DBGS Rprintf("xcells %d ycells %d\n",xcells,ycells); Rprintf("Initialising\n"); #endif // Initialise StraussHard point process StraussHardProcess ExampleProcess(Xmin,Xmax,Ymin,Ymax, Beta, Gamma, R, H); // Initialise point pattern Point2Pattern ExamplePattern(Xmin,Xmax,Ymin,Ymax, xcells, ycells); // parameters: min x, max x, min y, max y, "cells" in x and y direction // used for speeding up neighbour counting, 9 is max here #ifdef DBGS Rprintf("Initialisation complete\n"); #endif // Synchronise random number generator GetRNGstate(); // Initialise perfect sampler Sampler PerfectSampler(&ExampleProcess); // Perform perfect sampling PerfectSampler.Sim(&ExamplePattern, &StartTime, &EndTime); // Synchronise random number generator PutRNGstate(); // Get upper estimate of number of points noutmax = ExamplePattern.UpperCount() + 1; // Allocate space for output PROTECT(xout = NEW_NUMERIC(noutmax)); PROTECT(yout = NEW_NUMERIC(noutmax)); PROTECT(nout = NEW_INTEGER(1)); xx = NUMERIC_POINTER(xout); yy = NUMERIC_POINTER(yout); nn = INTEGER_POINTER(nout); // copy data into output storage ExamplePattern.Return(xx, yy, nn, noutmax); // pack up into output list PROTECT(out = NEW_LIST(3)); SET_VECTOR_ELT(out, 0, xout); SET_VECTOR_ELT(out, 1, yout); SET_VECTOR_ELT(out, 2, nout); // return UNPROTECT(10); // 6 arguments plus xout, yout, nout, out return(out); } } spatstat/src/massdisthack.c0000755000176200001440000000355613166361223015545 0ustar liggesusers/* HACKED from R-2.0.1/src/appl/massdist.c by Adrian Baddeley Changes indicated by 'AB' */ /* * R : A Computer Language for Statistical Data Analysis * Copyright (C) 1996-2004 Robert Gentleman and Ross Ihaka and the * R Development Core Team * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifdef HAVE_CONFIG_H #include #endif #include #include void massdisthack(double *x, int *nx, double *xmass, /* AB: new variable */ double *xlow, double *xhigh, double *y, int *ny) { double fx, xdelta, xmi, xpos; /* AB */ int i, ix, ixmax, ixmin; ixmin = 0; ixmax = *ny - 2; /* AB: line deleted */ xdelta = (*xhigh - *xlow) / (*ny - 1); for(i=0; i < *ny ; i++) y[i] = 0; for(i=0; i < *nx ; i++) { if(R_FINITE(x[i])) { xpos = (x[i] - *xlow) / xdelta; ix = floor(xpos); fx = xpos - ix; xmi = xmass[i]; /* AB: new line */ if(ixmin <= ix && ix <= ixmax) { y[ix] += (1 - fx) * xmi; /* AB */ y[ix + 1] += fx * xmi; /* AB */ } else if(ix == -1) { y[0] += fx * xmi; /* AB */ } else if(ix == ixmax + 1) { y[ix] += (1 - fx) * xmi; /* AB */ } } } /* AB: lines deleted */ } spatstat/src/minnnd.c0000644000176200001440000000105313166361223014335 0ustar liggesusers/* minnnd.c Minimum/Maximum Nearest Neighbour Distance Uses code templates in minnnd.h, maxnnd.h $Revision: 1.4 $ $Date: 2014/09/18 01:28:48 $ */ #include #include #include #undef IGNOREZERO #define FNAME minnnd2 #include "minnnd.h" #undef FNAME #define FNAME maxnnd2 #include "maxnnd.h" #undef FNAME /* min/max nearest neighbour distance ignoring zero distances */ #define IGNOREZERO #define FNAME minPnnd2 #include "minnnd.h" #undef FNAME #define FNAME maxPnnd2 #include "maxnnd.h" #undef FNAME spatstat/src/mhsnoop.c0000644000176200001440000001064713166361223014546 0ustar liggesusers#include #include #include #include "methas.h" #include "mhsnoopdef.h" /* mhsnoop.c $Revision: 1.8 $ $Date: 2013/05/27 02:09:10 $ support for visual debugger in RMH */ /* To switch on debugging code, insert the line: #define MH_DEBUG YES */ #ifndef MH_DEBUG #define MH_DEBUG NO #endif void initmhsnoop(Snoop *s, SEXP env) { s->active = isEnvironment(env); s->nextstop = 0; /* stop at iteration 0 */ s->nexttype = NO_TYPE; /* deactivated */ if(s->active) { s->env = env; s->expr = findVar(install("callbackexpr"), env); } else { s->env = s->expr = R_NilValue; } } void mhsnoop(Snoop *s, int irep, Algor *algo, State *state, Propo *prop, double numer, double denom, int *itype) { SEXP e; int npts, j; /* passed from C to R before debugger */ SEXP Sirep, Sx, Sy, Sm, Sproptype, Sproplocn, Spropmark, Spropindx; SEXP Snumer, Sdenom, Sitype; double *Px, *Py, *Pproplocn; int *Pm; /* passed from R to C after debugger */ SEXP Sinxt, Stnxt, SitypeUser; #if MH_DEBUG Rprintf("mhsnoop called at iteration %d\n", irep); #endif if(!(s->active)) return; #if MH_DEBUG Rprintf("mhsnoop is active\n"); #endif /* execute when the simulation reaches the next stopping time: a specified iteration number 'nextstop' or a specified proposal type 'nexttype' */ if(irep != s->nextstop && prop->itype != s->nexttype) return; #if MH_DEBUG Rprintf("debug triggered\n"); #endif /* environment for communication with R */ e = s->env; /* copy data to R */ /* copy iteration number */ PROTECT(Sirep = NEW_INTEGER(1)); *(INTEGER_POINTER(Sirep)) = irep; setVar(install("irep"), Sirep, e); UNPROTECT(1); /* copy (x,y) coordinates */ npts = state->npts; PROTECT(Sx = NEW_NUMERIC(npts)); PROTECT(Sy = NEW_NUMERIC(npts)); Px = NUMERIC_POINTER(Sx); Py = NUMERIC_POINTER(Sy); for(j = 0; j < npts; j++) { Px[j] = state->x[j]; Py[j] = state->y[j]; } setVar(install("xcoords"), Sx, e); setVar(install("ycoords"), Sy, e); UNPROTECT(2); /* copy marks */ if(state->ismarked) { PROTECT(Sm = NEW_INTEGER(npts)); Pm = INTEGER_POINTER(Sm); for(j = 0; j < npts; j++) { Pm[j] = state->marks[j]; } setVar(install("mcodes"), Sm, e); UNPROTECT(1); } /* proposal type */ PROTECT(Sproptype = NEW_INTEGER(1)); *(INTEGER_POINTER(Sproptype)) = prop->itype; setVar(install("proptype"), Sproptype, e); UNPROTECT(1); /* proposal coordinates */ PROTECT(Sproplocn = NEW_NUMERIC(2)); Pproplocn = NUMERIC_POINTER(Sproplocn); Pproplocn[0] = prop->u; Pproplocn[1] = prop->v; setVar(install("proplocn"), Sproplocn, e); UNPROTECT(1); /* proposal mark value */ if(state->ismarked) { PROTECT(Spropmark = NEW_INTEGER(1)); *(INTEGER_POINTER(Spropmark)) = prop->mrk; setVar(install("propmark"), Spropmark, e); UNPROTECT(1); } /* proposal point index */ PROTECT(Spropindx = NEW_INTEGER(1)); *(INTEGER_POINTER(Spropindx)) = prop->ix; setVar(install("propindx"), Spropindx, e); UNPROTECT(1); /* Metropolis-Hastings numerator and denominator */ PROTECT(Snumer = NEW_NUMERIC(1)); PROTECT(Sdenom = NEW_NUMERIC(1)); *(NUMERIC_POINTER(Snumer)) = numer; *(NUMERIC_POINTER(Sdenom)) = denom; setVar(install("numerator"), Snumer, e); setVar(install("denominator"), Sdenom, e); UNPROTECT(2); /* tentative outcome of proposal */ PROTECT(Sitype = NEW_INTEGER(1)); *(INTEGER_POINTER(Sitype)) = *itype; setVar(install("itype"), Sitype, e); UNPROTECT(1); /* ..... call visual debugger */ #if MH_DEBUG Rprintf("executing callback\n"); #endif eval(s->expr, s->env); /* update outcome of proposal */ SitypeUser = findVar(install("itype"), e); *itype = *(INTEGER_POINTER(SitypeUser)); #if MH_DEBUG Rprintf("Assigning itype = %d\n", *itype); #endif /* update stopping time */ Sinxt = findVar(install("inxt"), e); s->nextstop = *(INTEGER_POINTER(Sinxt)); Stnxt = findVar(install("tnxt"), e); s->nexttype = *(INTEGER_POINTER(Stnxt)); #if MH_DEBUG if(s->nextstop >= 0) Rprintf("Next stop: iteration %d\n", s->nextstop); if(s->nexttype >= 0) { if(s->nexttype == BIRTH) Rprintf("Next stop: first birth proposal\n"); if(s->nexttype == DEATH) Rprintf("Next stop: first death proposal\n"); if(s->nexttype == SHIFT) Rprintf("Next stop: first shift proposal\n"); } #endif return; } spatstat/src/xyseg.c0000755000176200001440000005263613166361223014231 0ustar liggesusers/* xyseg.c Computation with line segments xysegint compute intersections between line segments $Revision: 1.19 $ $Date: 2013/09/18 04:59:17 $ */ #include #include #include #include #include #include "chunkloop.h" #define NIETS -1.0 #undef DEBUG #define INSIDE01(X,E) (X * (1.0 - X) >= -E) /* --------------- PAIRS OF PSP OBJECTS ---------------------- */ /* xysegint Determines intersections between each pair of line segments drawn from two lists of line segments. Line segments are given as x0, y0, dx, dy where (x0,y0) is the first endpoint and (dx, dy) is the vector from the first to the second endpoint. Points along a line segment are represented in parametric coordinates, (x,y) = (x0, y0) + t * (dx, dy). Output from xysegint() consists of five matrices xx, yy, ta, tb, ok. The (i,j)-th entries in these matrices give information about the intersection between the i-th segment in list 'a' and the j-th segment in list 'b'. The information is ok[i,j] = 1 if there is an intersection = 0 if not xx[i,j] = x coordinate of intersection yy[i,j] = y coordinate of intersection ta[i,j] = parameter of intersection point relative to i-th segment in list 'a' tb[i,j] = parameter of intersection point relative to j-th segment in list 'b' */ void xysegint(na, x0a, y0a, dxa, dya, nb, x0b, y0b, dxb, dyb, eps, xx, yy, ta, tb, ok) /* inputs (vectors of coordinates) */ int *na, *nb; double *x0a, *y0a, *dxa, *dya, *x0b, *y0b, *dxb, *dyb; /* input (tolerance for determinant) */ double *eps; /* outputs (matrices) */ double *xx, *yy, *ta, *tb; int *ok; { int i, j, ma, mb, ijpos, maxchunk; double determinant, absdet, diffx, diffy, tta, ttb, epsilon; ma = *na; mb = *nb; epsilon = *eps; OUTERCHUNKLOOP(j, mb, maxchunk, 8196) { R_CheckUserInterrupt(); INNERCHUNKLOOP(j, mb, maxchunk, 8196) { for(i = 0; i < ma; i++) { ijpos = j * ma + i; ok[ijpos] = 0; xx[ijpos] = yy[ijpos] = ta[ijpos] = tb[ijpos] = NIETS; determinant = dxb[j] * dya[i] - dyb[j] * dxa[i]; absdet = (determinant > 0) ? determinant : -determinant; #ifdef DEBUG Rprintf("i = %d, j = %d\n", i, j); Rprintf("segment A[i]: (%lf, %lf) to (%lf, %lf)\n", x0a[i], y0a[i], x0a[i] + dxa[i], y0a[i] + dya[i]); Rprintf("segment B[j]: (%lf, %lf) to (%lf, %lf)\n", x0b[j], y0b[j], x0b[j] + dxb[j], y0b[j] + dyb[j]); Rprintf("determinant=%lf\n", determinant); #endif if(absdet > epsilon) { diffx = (x0b[j] - x0a[i])/determinant; diffy = (y0b[j] - y0a[i])/determinant; ta[ijpos] = tta = - dyb[j] * diffx + dxb[j] * diffy; tb[ijpos] = ttb = - dya[i] * diffx + dxa[i] * diffy; #ifdef DEBUG Rprintf("ta = %lf, tb = %lf\n", tta, ttb); #endif if(INSIDE01(tta, epsilon) && INSIDE01(ttb, epsilon)) { /* intersection */ ok[ijpos] = 1; xx[ijpos] = x0a[i] + tta * dxa[i]; yy[ijpos] = y0a[i] + tta * dya[i]; #ifdef DEBUG Rprintf("segments intersect at (%lf, %lf)\n", xx[ijpos], yy[ijpos]); #endif } } } } } } /* Stripped-down version of xysegint that just returns logical matrix */ void xysi(na, x0a, y0a, dxa, dya, nb, x0b, y0b, dxb, dyb, eps, ok) /* inputs (vectors of coordinates) */ int *na, *nb; double *x0a, *y0a, *dxa, *dya, *x0b, *y0b, *dxb, *dyb; /* input (tolerance for determinant) */ double *eps; /* outputs (matrices) */ int *ok; { int i, j, ma, mb, ijpos, maxchunk; double determinant, absdet, diffx, diffy, tta, ttb, epsilon; ma = *na; mb = *nb; epsilon = *eps; OUTERCHUNKLOOP(j, mb, maxchunk, 8196) { R_CheckUserInterrupt(); INNERCHUNKLOOP(j, mb, maxchunk, 8196) { for(i = 0; i < ma; i++) { ijpos = j * ma + i; ok[ijpos] = 0; determinant = dxb[j] * dya[i] - dyb[j] * dxa[i]; absdet = (determinant > 0) ? determinant : -determinant; if(absdet > epsilon) { diffx = (x0b[j] - x0a[i])/determinant; diffy = (y0b[j] - y0a[i])/determinant; tta = - dyb[j] * diffx + dxb[j] * diffy; ttb = - dya[i] * diffx + dxa[i] * diffy; if(INSIDE01(tta, epsilon) && INSIDE01(ttb, epsilon)) { /* intersection */ ok[ijpos] = 1; } } } } } } /* Test whether there is at least one intersection */ void xysiANY(na, x0a, y0a, dxa, dya, nb, x0b, y0b, dxb, dyb, eps, ok) /* inputs (vectors of coordinates) */ int *na, *nb; double *x0a, *y0a, *dxa, *dya, *x0b, *y0b, *dxb, *dyb; /* input (tolerance for determinant) */ double *eps; /* output (single logical value) */ int *ok; { int i, j, ma, mb, maxchunk; double determinant, absdet, diffx, diffy, tta, ttb, epsilon; *ok = 0; ma = *na; mb = *nb; epsilon = *eps; OUTERCHUNKLOOP(j, mb, maxchunk, 8196) { R_CheckUserInterrupt(); INNERCHUNKLOOP(j, mb, maxchunk, 8196) { for(i = 0; i < ma; i++) { determinant = dxb[j] * dya[i] - dyb[j] * dxa[i]; absdet = (determinant > 0) ? determinant : -determinant; if(absdet > epsilon) { diffx = (x0b[j] - x0a[i])/determinant; diffy = (y0b[j] - y0a[i])/determinant; tta = - dyb[j] * diffx + dxb[j] * diffy; ttb = - dya[i] * diffx + dxa[i] * diffy; if(INSIDE01(tta, epsilon) && INSIDE01(ttb, epsilon)) { /* intersection */ *ok = 1; return; } } } } } } /* Analogue of xysegint when segments in list 'a' are infinite vertical lines */ void xysegVslice(na, xa, nb, x0b, y0b, dxb, dyb, eps, yy, ok) /* inputs (vectors of coordinates) */ int *na, *nb; double *xa, *x0b, *y0b, *dxb, *dyb; /* input (tolerance for determinant) */ double *eps; /* outputs (matrices) */ double *yy; int *ok; { int i, j, ma, mb, ijpos, maxchunk; double diffx0, diffx1, width, abswidth, epsilon; int notvertical; ma = *na; mb = *nb; epsilon = *eps; OUTERCHUNKLOOP(j, mb, maxchunk, 8196) { R_CheckUserInterrupt(); INNERCHUNKLOOP(j, mb, maxchunk, 8196) { /* determine whether segment j is nearly vertical */ width = dxb[j]; abswidth = (width > 0) ? width : -width; notvertical = (abswidth <= epsilon); for(i = 0; i < ma; i++) { ijpos = j * ma + i; ok[ijpos] = 0; yy[ijpos] = NIETS; /* test whether vertical line i separates endpoints of segment j */ diffx0 = xa[i] - x0b[j]; diffx1 = diffx0 - width; if(diffx0 * diffx1 <= 0) { /* intersection */ ok[ijpos] = 1; /* compute y-coordinate of intersection point */ if(notvertical) { yy[ijpos] = y0b[j] + diffx0 * dyb[j]/width; } else { /* vertical or nearly-vertical segment: pick midpoint */ yy[ijpos] = y0b[j] + dyb[j]/2.0; } } } } } } /* -------------- ONE PSP OBJECT ---------------------------- */ /* Similar to xysegint, but computes intersections between all pairs of segments in a single list, excluding the diagonal comparisons of course */ void xysegXint(n, x0, y0, dx, dy, eps, xx, yy, ti, tj, ok) /* inputs (vectors of coordinates) */ int *n; double *x0, *y0, *dx, *dy; /* input (tolerance for determinant) */ double *eps; /* outputs (matrices) */ double *xx, *yy, *ti, *tj; int *ok; { int i, j, m, mm1, ijpos, jipos, iipos, maxchunk; double determinant, absdet, diffx, diffy, tti, ttj, epsilon; m = *n; epsilon = *eps; mm1 = m - 1; OUTERCHUNKLOOP(j, mm1, maxchunk, 8196) { R_CheckUserInterrupt(); INNERCHUNKLOOP(j, mm1, maxchunk, 8196) { for(i = j+1; i < m; i++) { ijpos = j * m + i; jipos = i * m + j; ok[ijpos] = ok[jipos] = 0; xx[ijpos] = yy[ijpos] = ti[ijpos] = ti[jipos] = NIETS; xx[jipos] = yy[jipos] = tj[ijpos] = tj[jipos] = NIETS; determinant = dx[j] * dy[i] - dy[j] * dx[i]; absdet = (determinant > 0) ? determinant : -determinant; if(absdet > epsilon) { diffx = (x0[j] - x0[i])/determinant; diffy = (y0[j] - y0[i])/determinant; ti[ijpos] = tti = - dy[j] * diffx + dx[j] * diffy; tj[ijpos] = ttj = - dy[i] * diffx + dx[i] * diffy; tj[jipos] = ti[ijpos]; ti[jipos] = tj[ijpos]; if(INSIDE01(tti, epsilon) && INSIDE01(ttj, epsilon)) { ok[ijpos] = ok[jipos] = 1; xx[ijpos] = xx[jipos] = x0[i] + tti * dx[i]; yy[ijpos] = yy[jipos] = y0[i] + tti * dy[i]; } } } } } /* assign diagonal */ for(i = 0; i < m; i++) { iipos = i * m + i; ok[iipos] = 0; xx[iipos] = yy[iipos] = ti[iipos] = tj[iipos] = NIETS; } } /* Reduced version of xysegXint that returns logical matrix 'ok' only */ void xysxi(n, x0, y0, dx, dy, eps, ok) /* inputs (vectors of coordinates) */ int *n; double *x0, *y0, *dx, *dy; /* input (tolerance for determinant) */ double *eps; /* outputs (matrices) */ int *ok; { int i, j, m, mm1, ijpos, jipos, iipos, maxchunk; double determinant, absdet, diffx, diffy, tti, ttj, epsilon; m = *n; epsilon = *eps; mm1 = m - 1; OUTERCHUNKLOOP(j, mm1, maxchunk, 8196) { R_CheckUserInterrupt(); INNERCHUNKLOOP(j, mm1, maxchunk, 8196) { for(i = j+1; i < m; i++) { ijpos = j * m + i; jipos = i * m + j; ok[ijpos] = ok[jipos] = 0; determinant = dx[j] * dy[i] - dy[j] * dx[i]; absdet = (determinant > 0) ? determinant : -determinant; if(absdet > epsilon) { diffx = (x0[j] - x0[i])/determinant; diffy = (y0[j] - y0[i])/determinant; tti = - dy[j] * diffx + dx[j] * diffy; ttj = - dy[i] * diffx + dx[i] * diffy; if(INSIDE01(tti, epsilon) && INSIDE01(ttj, epsilon)) { ok[ijpos] = ok[jipos] = 1; } } } } } /* assign diagonal */ for(i = 0; i < m; i++) { iipos = i * m + i; ok[iipos] = 0; } } /* ---------------------- ONE CLOSED POLYGON ------------------------ */ /* Identify self-intersections in a closed polygon (Similar to xysegXint, but does not compare segments which are cyclically adjacent in the list) */ void Cxypolyselfint(n, x0, y0, dx, dy, eps, xx, yy, ti, tj, ok) /* inputs (vectors of coordinates) */ int *n; double *x0, *y0, *dx, *dy; /* input (tolerance for determinant) */ double *eps; /* outputs (matrices) */ double *xx, *yy, *ti, *tj; int *ok; { int i, j, k, m, m2, mm1, mm2, mstop, ijpos, jipos, maxchunk; double determinant, absdet, diffx, diffy, tti, ttj, epsilon; m = *n; epsilon = *eps; m2 = m * m; /* initialise matrices */ for(k = 0; k < m2; k++) { ok[k] = 0; xx[k] = yy[k] = ti[k] = tj[k] = NIETS; } if(m <= 2) return; /* Compare j with j+2, j+3, ...., m-1 Don't compare 0 with m-1 */ mm1 = m - 1; mm2 = m - 2; OUTERCHUNKLOOP(j, mm2, maxchunk, 8196) { R_CheckUserInterrupt(); INNERCHUNKLOOP(j, mm2, maxchunk, 8196) { mstop = (j > 0) ? m : mm1; for(i = j+2; i < mstop; i++) { ijpos = j * m + i; jipos = i * m + j; determinant = dx[j] * dy[i] - dy[j] * dx[i]; absdet = (determinant > 0) ? determinant : -determinant; if(absdet > epsilon) { diffx = (x0[j] - x0[i])/determinant; diffy = (y0[j] - y0[i])/determinant; ti[ijpos] = tti = - dy[j] * diffx + dx[j] * diffy; tj[ijpos] = ttj = - dy[i] * diffx + dx[i] * diffy; tj[jipos] = ti[ijpos]; ti[jipos] = tj[ijpos]; if(INSIDE01(tti, epsilon) && INSIDE01(ttj, epsilon)) { ok[ijpos] = ok[jipos] = 1; xx[ijpos] = xx[jipos] = x0[i] + tti * dx[i]; yy[ijpos] = yy[jipos] = y0[i] + tti * dy[i]; } } } } } } /* Just determines whether there is self-intersection (exits quicker & uses less space) */ void xypsi(n, x0, y0, dx, dy, xsep, ysep, eps, proper, answer) /* inputs (vectors of coordinates) */ int *n; double *x0, *y0, *dx, *dy; /* inputs (distances beyond which intersection is impossible) */ double *xsep, *ysep; /* input (tolerance for determinant) */ double *eps; /* input (flag) */ int *proper; /* output */ int *answer; { int i, j, m, mm1, mm2, mstop, prop, maxchunk; double determinant, absdet, diffx, diffy, tti, ttj, epsilon; double Xsep, Ysep; m = *n; prop = *proper; Xsep = *xsep; Ysep = *ysep; epsilon = *eps; *answer = 0; if(m <= 2) return; /* Compare j with j+2, j+3, ...., m-1 Don't compare 0 with m-1 */ mm1 = m - 1; mm2 = m - 2; OUTERCHUNKLOOP(j, mm2, maxchunk, 8196) { R_CheckUserInterrupt(); INNERCHUNKLOOP(j, mm2, maxchunk, 8196) { mstop = (j > 0) ? m : mm1; for(i = j+2; i < mstop; i++) { diffx = x0[j] - x0[i]; diffy = y0[j] - y0[i]; if(diffx < Xsep && diffx > -Xsep && diffy < Ysep && diffy > -Ysep) { determinant = dx[j] * dy[i] - dy[j] * dx[i]; absdet = (determinant > 0) ? determinant : -determinant; if(absdet > epsilon) { diffx = diffx/determinant; diffy = diffy/determinant; tti = - dy[j] * diffx + dx[j] * diffy; ttj = - dy[i] * diffx + dx[i] * diffy; if(INSIDE01(tti, epsilon) && INSIDE01(ttj, epsilon)) { /* intersection occurs */ if(prop == 0 || (tti != 0.0 && tti != 1.0) || (ttj != 0.0 && ttj != 1.0)) { /* proper intersection */ *answer = 1; return; } } } } } } } } /* ---------------- .Call INTERFACE --------------------------- Analogues of functions above, but using the .Call interface and dynamic storage allocation, to save space. */ SEXP Cxysegint(SEXP x0a, SEXP y0a, SEXP dxa, SEXP dya, SEXP x0b, SEXP y0b, SEXP dxb, SEXP dyb, SEXP eps) { int i, j, k, na, nb; double determinant, absdet, diffx, diffy, tta, ttb; int nout, noutmax, newmax, maxchunk; double epsilon; double *x0A, *y0A, *dxA, *dyA, *x0B, *y0B, *dxB, *dyB; double *ta, *tb, *x, *y; int *ia, *jb; SEXP out, iAout, jBout, tAout, tBout, xout, yout; double *tAoutP, *tBoutP, *xoutP, *youtP; int *iAoutP, *jBoutP; PROTECT(x0a = AS_NUMERIC(x0a)); PROTECT(y0a = AS_NUMERIC(y0a)); PROTECT(dxa = AS_NUMERIC(dxa)); PROTECT(dya = AS_NUMERIC(dya)); PROTECT(x0b = AS_NUMERIC(x0b)); PROTECT(y0b = AS_NUMERIC(y0b)); PROTECT(dxb = AS_NUMERIC(dxb)); PROTECT(dyb = AS_NUMERIC(dyb)); PROTECT(eps = AS_NUMERIC(eps)); /* that's 9 protected */ /* get pointers */ x0A = NUMERIC_POINTER(x0a); y0A = NUMERIC_POINTER(y0a); dxA = NUMERIC_POINTER(dxa); dyA = NUMERIC_POINTER(dya); x0B = NUMERIC_POINTER(x0b); y0B = NUMERIC_POINTER(y0b); dxB = NUMERIC_POINTER(dxb); dyB = NUMERIC_POINTER(dyb); /* determine length of vectors */ na = LENGTH(x0a); nb = LENGTH(x0b); epsilon = *(NUMERIC_POINTER(eps)); /* guess amount of storage required for output */ noutmax = (na > nb) ? na : nb; nout = 0; ia = (int *) R_alloc(noutmax, sizeof(int)); jb = (int *) R_alloc(noutmax, sizeof(int)); ta = (double *) R_alloc(noutmax, sizeof(double)); tb = (double *) R_alloc(noutmax, sizeof(double)); x = (double *) R_alloc(noutmax, sizeof(double)); y = (double *) R_alloc(noutmax, sizeof(double)); /* scan data and collect intersections */ OUTERCHUNKLOOP(j, nb, maxchunk, 8196) { R_CheckUserInterrupt(); INNERCHUNKLOOP(j, nb, maxchunk, 8196) { for(i = 0; i < na; i++) { determinant = dxB[j] * dyA[i] - dyB[j] * dxA[i]; absdet = (determinant > 0) ? determinant : -determinant; #ifdef DEBUG Rprintf("i = %d, j = %d\n", i, j); Rprintf("segment A[i]: (%lf, %lf) to (%lf, %lf)\n", x0A[i], y0A[i], x0A[i] + dxA[i], y0A[i] + dyA[i]); Rprintf("segment B[j]: (%lf, %lf) to (%lf, %lf)\n", x0B[j], y0B[j], x0B[j] + dxB[j], y0B[j] + dyB[j]); Rprintf("determinant=%lf\n", determinant); #endif if(absdet > epsilon) { diffx = (x0B[j] - x0A[i])/determinant; diffy = (y0B[j] - y0A[i])/determinant; tta = - dyB[j] * diffx + dxB[j] * diffy; ttb = - dyA[i] * diffx + dxA[i] * diffy; #ifdef DEBUG Rprintf("ta = %lf, tb = %lf\n", tta, ttb); #endif if(INSIDE01(tta, epsilon) && INSIDE01(ttb, epsilon)) { /* intersection */ if(nout >= noutmax) { /* storage overflow - increase space */ newmax = 4 * noutmax; ia = (int *) S_realloc((char *) ia, newmax, noutmax, sizeof(int)); jb = (int *) S_realloc((char *) jb, newmax, noutmax, sizeof(int)); ta = (double *) S_realloc((char *) ta, newmax, noutmax, sizeof(double)); tb = (double *) S_realloc((char *) tb, newmax, noutmax, sizeof(double)); x = (double *) S_realloc((char *) x, newmax, noutmax, sizeof(double)); y = (double *) S_realloc((char *) y, newmax, noutmax, sizeof(double)); noutmax = newmax; } ta[nout] = tta; tb[nout] = ttb; ia[nout] = i; jb[nout] = j; x[nout] = x0A[i] + tta * dxA[i]; y[nout] = y0A[i] + tta * dyA[i]; #ifdef DEBUG Rprintf("segments intersect at (%lf, %lf)\n", x[nout], y[nout]); #endif ++nout; } } } } } /* pack up */ PROTECT(iAout = NEW_INTEGER(nout)); PROTECT(jBout = NEW_INTEGER(nout)); PROTECT(tAout = NEW_NUMERIC(nout)); PROTECT(tBout = NEW_NUMERIC(nout)); PROTECT(xout = NEW_NUMERIC(nout)); PROTECT(yout = NEW_NUMERIC(nout)); /* 9 + 6 = 15 protected */ iAoutP = INTEGER_POINTER(iAout); jBoutP = INTEGER_POINTER(jBout); tAoutP = NUMERIC_POINTER(tAout); tBoutP = NUMERIC_POINTER(tBout); xoutP = NUMERIC_POINTER(xout); youtP = NUMERIC_POINTER(yout); for(k = 0; k < nout; k++) { iAoutP[k] = ia[k]; jBoutP[k] = jb[k]; tAoutP[k] = ta[k]; tBoutP[k] = tb[k]; xoutP[k] = x[k]; youtP[k] = y[k]; } PROTECT(out = NEW_LIST(6)); /* 15 + 1 = 16 protected */ SET_VECTOR_ELT(out, 0, iAout); SET_VECTOR_ELT(out, 1, jBout); SET_VECTOR_ELT(out, 2, tAout); SET_VECTOR_ELT(out, 3, tBout); SET_VECTOR_ELT(out, 4, xout); SET_VECTOR_ELT(out, 5, yout); UNPROTECT(16); return(out); } /* Similar to Cxysegint, but computes intersections between all pairs of segments in a single list, excluding the diagonal comparisons of course */ SEXP CxysegXint(SEXP x0, SEXP y0, SEXP dx, SEXP dy, SEXP eps) { int i, j, k, n, n1; double determinant, absdet, diffx, diffy, tti, ttj; int nout, noutmax, newmax, maxchunk; double epsilon; double *X0, *Y0, *Dx, *Dy; double *ti, *tj, *x, *y; int *ii, *jj; SEXP out, iout, jout, tiout, tjout, xout, yout; double *tioutP, *tjoutP, *xoutP, *youtP; int *ioutP, *joutP; PROTECT(x0 = AS_NUMERIC(x0)); PROTECT(y0 = AS_NUMERIC(y0)); PROTECT(dx = AS_NUMERIC(dx)); PROTECT(dy = AS_NUMERIC(dy)); PROTECT(eps = AS_NUMERIC(eps)); /* that's 5 protected */ /* get pointers */ X0 = NUMERIC_POINTER(x0); Y0 = NUMERIC_POINTER(y0); Dx = NUMERIC_POINTER(dx); Dy = NUMERIC_POINTER(dy); /* determine length of vectors */ n = LENGTH(x0); epsilon = *(NUMERIC_POINTER(eps)); /* guess amount of storage required for output */ noutmax = n; nout = 0; ii = (int *) R_alloc(noutmax, sizeof(int)); jj = (int *) R_alloc(noutmax, sizeof(int)); ti = (double *) R_alloc(noutmax, sizeof(double)); tj = (double *) R_alloc(noutmax, sizeof(double)); x = (double *) R_alloc(noutmax, sizeof(double)); y = (double *) R_alloc(noutmax, sizeof(double)); /* scan data */ n1 = n - 1; OUTERCHUNKLOOP(j, n1, maxchunk, 8196) { R_CheckUserInterrupt(); INNERCHUNKLOOP(j, n1, maxchunk, 8196) { for(i = j+1; i < n; i++) { determinant = Dx[j] * Dy[i] - Dy[j] * Dx[i]; absdet = (determinant > 0) ? determinant : -determinant; if(absdet > epsilon) { diffx = (X0[j] - X0[i])/determinant; diffy = (Y0[j] - Y0[i])/determinant; tti = - Dy[j] * diffx + Dx[j] * diffy; ttj = - Dy[i] * diffx + Dx[i] * diffy; if(INSIDE01(tti,epsilon) && INSIDE01(ttj,epsilon)) { /* intersection */ if(nout >= noutmax) { /* storage overflow - increase space */ newmax = 4 * noutmax; ii = (int *) S_realloc((char *) ii, newmax, noutmax, sizeof(int)); jj = (int *) S_realloc((char *) jj, newmax, noutmax, sizeof(int)); ti = (double *) S_realloc((char *) ti, newmax, noutmax, sizeof(double)); tj = (double *) S_realloc((char *) tj, newmax, noutmax, sizeof(double)); x = (double *) S_realloc((char *) x, newmax, noutmax, sizeof(double)); y = (double *) S_realloc((char *) y, newmax, noutmax, sizeof(double)); noutmax = newmax; } ti[nout] = tti; tj[nout] = ttj; ii[nout] = i; jj[nout] = j; x[nout] = X0[i] + tti * Dx[i]; y[nout] = Y0[i] + tti * Dy[i]; ++nout; } } } } } /* pack up */ PROTECT(iout = NEW_INTEGER(nout)); PROTECT(jout = NEW_INTEGER(nout)); PROTECT(tiout = NEW_NUMERIC(nout)); PROTECT(tjout = NEW_NUMERIC(nout)); PROTECT(xout = NEW_NUMERIC(nout)); PROTECT(yout = NEW_NUMERIC(nout)); /* 5 + 6 = 11 protected */ ioutP = INTEGER_POINTER(iout); joutP = INTEGER_POINTER(jout); tioutP = NUMERIC_POINTER(tiout); tjoutP = NUMERIC_POINTER(tjout); xoutP = NUMERIC_POINTER(xout); youtP = NUMERIC_POINTER(yout); for(k = 0; k < nout; k++) { ioutP[k] = ii[k]; joutP[k] = jj[k]; tioutP[k] = ti[k]; tjoutP[k] = tj[k]; xoutP[k] = x[k]; youtP[k] = y[k]; } PROTECT(out = NEW_LIST(6)); /* 11 + 1 = 12 protected */ SET_VECTOR_ELT(out, 0, iout); SET_VECTOR_ELT(out, 1, jout); SET_VECTOR_ELT(out, 2, tiout); SET_VECTOR_ELT(out, 3, tjout); SET_VECTOR_ELT(out, 4, xout); SET_VECTOR_ELT(out, 5, yout); UNPROTECT(12); return(out); } spatstat/src/knndistance.c0000644000176200001440000001043313166361223015355 0ustar liggesusers/* knndistance.c K-th Nearest Neighbour Distances between points Copyright (C) Adrian Baddeley, Jens Oehlschlaegel and Rolf Turner 2000-2013 Licence: GNU Public Licence >= 2 $Revision: 1.8 $ $Date: 2013/12/10 03:29:45 $ Function definitions are #included from knndist.h and knnXdist.h THE FOLLOWING FUNCTIONS ASSUME THAT y IS SORTED IN ASCENDING ORDER SINGLE LIST: knndsort k-th nearest neighbour distances knnwhich k-th nearest neighbours knnsort k-th nearest neighbours and their distances ONE LIST TO ANOTHER LIST: knnXdist Nearest neighbour distance from one list to another knnXwhich Nearest neighbour ID from one list to another knnX Nearest neighbour ID & distance from one list to another ONE LIST TO ANOTHER OVERLAPPING LIST: knnXEdist Nearest neighbour distance from one list to another, overlapping knnXEwhich Nearest neighbour ID from one list to another, overlapping knnXE Nearest neighbour ID & distance */ #undef SPATSTAT_DEBUG #include #include #include #include "yesno.h" double sqrt(); /* THE FOLLOWING CODE ASSUMES THAT y IS SORTED IN ASCENDING ORDER */ /* ------------------- one point pattern X --------------------- */ /* knndsort nearest neighbours 1:kmax returns distances only */ #undef FNAME #undef DIST #undef WHICH #undef EXCLUDE #define FNAME knndsort #define DIST #include "knndist.h" /* knnwhich nearest neighbours 1:kmax returns identifiers only */ #undef FNAME #undef DIST #undef WHICH #undef EXCLUDE #define FNAME knnwhich #define WHICH #include "knndist.h" /* knnsort nearest neighbours 1:kmax returns distances and indices */ #undef FNAME #undef DIST #undef WHICH #undef EXCLUDE #define FNAME knnsort #define DIST #define WHICH #include "knndist.h" /* --------------- two distinct point patterns X and Y --------------- */ /* general interface */ void knnXinterface(n1, x1, y1, id1, n2, x2, y2, id2, kmax, exclude, wantdist, wantwhich, nnd, nnwhich, huge) /* inputs */ int *n1, *n2; double *x1, *y1, *x2, *y2, *huge; int *id1, *id2; int *kmax; /* options */ int *exclude, *wantdist, *wantwhich; /* outputs */ double *nnd; int *nnwhich; /* some inputs + outputs are not used in all functions */ { void knnX(), knnXdist(), knnXwhich(); void knnXE(), knnXEdist(), knnXEwhich(); int ex, di, wh; ex = (*exclude != 0); di = (*wantdist != 0); wh = (*wantwhich != 0); if(!ex) { if(di && wh) { knnX(n1, x1, y1, id1, n2, x2, y2, id2, kmax, nnd, nnwhich, huge); } else if(di) { knnXdist(n1, x1, y1, id1, n2, x2, y2, id2, kmax, nnd, nnwhich, huge); } else if(wh) { knnXwhich(n1, x1, y1, id1, n2, x2, y2, id2, kmax, nnd, nnwhich, huge); } } else { if(di && wh) { knnXE(n1, x1, y1, id1, n2, x2, y2, id2, kmax, nnd, nnwhich, huge); } else if(di) { knnXEdist(n1, x1, y1, id1, n2, x2, y2, id2, kmax, nnd, nnwhich, huge); } else if(wh) { knnXEwhich(n1, x1, y1, id1, n2, x2, y2, id2, kmax, nnd, nnwhich, huge); } } } /* Turn off the debugging tracer in knnXdist.h */ #undef TRACER /* knnXdist returns distances only */ #undef FNAME #undef DIST #undef WHICH #undef EXCLUDE #define FNAME knnXdist #define DIST #include "knnXdist.h" /* knnXwhich returns identifiers only */ #undef FNAME #undef DIST #undef WHICH #undef EXCLUDE #define FNAME knnXwhich #define WHICH #include "knnXdist.h" /* knnX returns distances and indices */ #undef FNAME #undef DIST #undef WHICH #undef EXCLUDE #define FNAME knnX #define DIST #define WHICH #include "knnXdist.h" /* --------------- overlapping point patterns X and Y --------------- */ /* knnXEdist returns distances only */ #undef FNAME #undef DIST #undef WHICH #undef EXCLUDE #define FNAME knnXEdist #define DIST #define EXCLUDE #include "knnXdist.h" /* knnXEwhich returns identifiers only */ #undef FNAME #undef DIST #undef WHICH #undef EXCLUDE #define FNAME knnXEwhich #define WHICH #define EXCLUDE #include "knnXdist.h" /* knnXE returns distances and indices */ #undef FNAME #undef DIST #undef WHICH #undef EXCLUDE #define FNAME knnXE #define DIST #define WHICH #define EXCLUDE #include "knnXdist.h" spatstat/src/geom3.h0000755000176200001440000000041013166361223014070 0ustar liggesusers/* $Revision: 1.1 $ $Date: 2009/11/04 23:54:15 $ Definitions for 3D geometrical structures */ typedef struct Point { double x; double y; double z; } Point; typedef struct Box { double x0; double x1; double y0; double y1; double z0; double z1; } Box; spatstat/src/lineardisc.c0000755000176200001440000002030613166361223015174 0ustar liggesusers#include #include #include "chunkloop.h" /* lineardisc.c Disc of radius r in linear network $Revision: 1.12 $ $Date: 2016/07/15 13:56:07 $ */ #define DPATH(I,J) dpath[(J) + Nv * (I)] #include "yesno.h" #undef DEBUG void lineardisc(f, seg, /* centre of disc (local coords) */ r, /* radius of disc */ nv, xv, yv, /* network vertices */ ns, from, to, /* segments */ dpath, /* shortest path distances between vertices */ lengths, /* segment lengths */ allinside, boundary, dxv, nendpoints) int *nv, *ns; int *from, *to; /* integer vectors (mappings) */ double *f, *r; int *seg; double *xv, *yv; /* vectors of coordinates of vertices */ double *dpath; /* matrix of shortest path distances between vertices */ double *lengths; /* vector of segment lengths */ /* OUTPUTS */ int *allinside, *boundary; /* vectors of status for each segment */ double *dxv; /* vector of distances for each vertex */ int *nendpoints; { int Nv, Ns; double f0, rad; int seg0; int i, A, B, fromi, toi, allin, bdry, reachable, nends, maxchunk; double length0, dxA, dxB, dxAvi, dxBvi, residue; double *resid; int *covered; Nv = *nv; Ns = *ns; f0 = *f; seg0 = *seg; rad = *r; /* endpoints of segment containing centre */ A = from[seg0]; B = to[seg0]; /* distances from x to A and B */ length0 = lengths[seg0]; dxA = f0 * length0; dxB = (1-f0) * length0; /* visit vertices */ covered = (int *) R_alloc((size_t) Nv, sizeof(int)); resid = (double *) R_alloc((size_t) Nv, sizeof(double)); OUTERCHUNKLOOP(i, Nv, maxchunk, 16384) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, Nv, maxchunk, 16384) { /* distance going through A */ dxAvi = dxA + DPATH(A,i); /* distance going through B */ dxBvi = dxB + DPATH(B,i); /* shortest path distance to this vertex */ dxv[i] = (dxAvi < dxBvi) ? dxAvi : dxBvi; /* distance left to 'spend' from this vertex */ residue = rad - dxv[i]; resid[i] = (residue > 0)? residue : 0; /* determine whether vertex i is inside the disc of radius r */ covered[i] = (residue >= 0); } } /* Now visit line segments. */ nends = 0; OUTERCHUNKLOOP(i, Ns, maxchunk, 16384) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, Ns, maxchunk, 16384) { /* Determine which line segments are completely inside the disc, and which cross the boundary. */ if(i == seg0) { /* initial segment: disc starts from centre (x, y) */ allin = covered[A] && covered[B]; bdry = !allin; if(bdry) { if(!covered[A]) nends++; if(!covered[B]) nends++; } } else { /* another segment: disc extends in from either endpoint */ fromi = from[i]; toi = to[i]; reachable = (covered[fromi] || covered[toi]); if(reachable) { allin = covered[fromi] && covered[toi] && (resid[fromi] + resid[toi] >= lengths[i]); bdry = !allin; } else allin = bdry = NO; if(bdry) { if(covered[fromi]) nends++; if(covered[toi]) nends++; } } allinside[i] = allin; boundary[i] = bdry; } } *nendpoints = nends; } /* ------------------------------------------------- */ /* count endpoints of several discs in a network */ /* ------------------------------------------------- */ void Ccountends(np, f, seg, /* centres of discs (local coords) */ r, /* radii of discs */ nv, xv, yv, /* network vertices */ ns, from, to, /* network segments */ dpath, /* shortest path distances between vertices */ lengths, /* segment lengths */ toler, /* tolerance */ nendpoints /* output counts of endpoints */ ) int *np, *nv, *ns; int *from, *to; /* integer vectors (mappings) */ double *f, *r; int *seg; double *xv, *yv; /* vectors of coordinates of vertices */ double *dpath; /* matrix of shortest path distances between vertices */ double *lengths; /* vector of segment lengths */ double *toler; /* tolerance for merging endpoints and vertices */ /* OUTPUT */ int *nendpoints; { int Np, Nv, Ns; double f0, rad; int seg0; int i, m, A, B, fromi, toi, reachable, nends, maxchunk, covfrom, covto, allin; double length0, dxA, dxB, dxAvi, dxBvi, dxvi, residue, resfrom, resto, tol; double *resid; int *covered, *terminal; Np = *np; Nv = *nv; Ns = *ns; tol = *toler; #ifdef DEBUG Rprintf("\nTolerance = %lf\n", tol); #endif covered = (int *) R_alloc((size_t) Nv, sizeof(int)); terminal = (int *) R_alloc((size_t) Nv, sizeof(int)); resid = (double *) R_alloc((size_t) Nv, sizeof(double)); /* loop over centre points */ OUTERCHUNKLOOP(m, Np, maxchunk, 256) { R_CheckUserInterrupt(); INNERCHUNKLOOP(m, Np, maxchunk, 256) { f0 = f[m]; seg0 = seg[m]; rad = r[m]; #ifdef DEBUG Rprintf("\nCentre point %d lies in segment %d\n", m, seg0); #endif /* endpoints of segment containing centre */ A = from[seg0]; B = to[seg0]; /* distances from centre to A and B */ length0 = lengths[seg0]; dxA = f0 * length0; dxB = (1-f0) * length0; #ifdef DEBUG Rprintf("Distances to endpoints: dxA=%lf, dxB=%lf\n", dxA, dxB); #endif nends = 0; /* visit vertices */ for(i = 0; i < Nv; i++) { #ifdef DEBUG Rprintf("\nConsidering vertex %d\n", i); #endif /* distance going through A */ dxAvi = dxA + DPATH(A,i); /* distance going through B */ dxBvi = dxB + DPATH(B,i); /* shortest path distance to this vertex */ dxvi = (dxAvi < dxBvi) ? dxAvi : dxBvi; /* distance left to 'spend' from this vertex */ residue = rad - dxvi; #ifdef DEBUG Rprintf("dxAvi = %lf; dxBvi = %lf; residue = %lf\n", dxAvi, dxBvi, residue); #endif if(residue > tol) { resid[i] = residue; covered[i] = YES; terminal[i] = NO; #ifdef DEBUG Rprintf("Vertex is covered\n"); #endif } else if(residue < -tol) { resid[i] = 0; covered[i] = terminal[i] = NO; #ifdef DEBUG Rprintf("Vertex is not covered\n"); #endif } else { /* vertex is within 'tol' of an endpoint - deem it to be one */ resid[i] = 0; covered[i] = terminal[i] = YES; /* vertex is an endpoint of disc */ ++nends; #ifdef DEBUG Rprintf("Vertex is a terminal endpoint\n"); #endif } } #ifdef DEBUG Rprintf("%d terminal endpoints\n", nends); #endif /* Now visit line segments to count any endpoints that are interior to the segments. */ for(i = 0; i < Ns; i++) { /* Determine which line segments are completely inside the disc, and which cross the boundary. */ if(i == seg0) { /* initial segment: disc starts from (x0, y0) */ if(!covered[A]) nends++; if(!covered[B]) nends++; #ifdef DEBUG if(!covered[A]) Rprintf("A not covered\n"); if(!covered[B]) Rprintf("B not covered\n"); #endif } else { /* another segment: disc extends in from either endpoint */ fromi = from[i]; toi = to[i]; covfrom = covered[fromi]; covto = covered[toi]; resfrom = resid[fromi]; resto = resid[toi]; reachable = covfrom || covto; #ifdef DEBUG residue = resfrom + resto - lengths[i]; Rprintf("%d: %s %s: %lf + %lf - %lf = %lf sign %s\n", i, (terminal[fromi]) ? "T" : ((covfrom) ? "Y" : "N"), (terminal[toi]) ? "T" : ((covto) ? "Y" : "N"), resfrom, resto, lengths[i], residue, (residue < 0) ? "-" : ((residue > 0) ? "+" : "0")); #endif if(reachable) { residue = resfrom + resto - lengths[i]; allin = covfrom && covto && (residue >= 0); #ifdef DEBUG if(allin) { Rprintf("Covered\n"); } else if((terminal[fromi] || terminal[toi]) && (residue >= - tol * lengths[i])) { Rprintf("Deemed to be covered\n"); } else Rprintf("Reachable\n"); #endif allin = allin || ((terminal[fromi] || terminal[toi]) && (residue >= - tol)); if(!allin) { /* segment is not entirely covered by disc - infer endpoint(s) in interior of segment */ if(covfrom && !terminal[fromi]) nends++; if(covto && !terminal[toi]) nends++; #ifdef DEBUG if(covfrom && !terminal[fromi]) Rprintf("fromi => end\n"); if(covto && !terminal[toi]) Rprintf("toi => end\n"); #endif } } } } nendpoints[m] = nends; } } } spatstat/src/nndistance.c0000755000176200001440000001054513166361223015211 0ustar liggesusers/* nndistance.c Nearest Neighbour Distances between points Copyright (C) Adrian Baddeley, Jens Oehlschlaegel and Rolf Turner 2000-2012 Licence: GNU Public Licence >= 2 $Revision: 1.21 $ $Date: 2013/11/03 03:36:27 $ THE FOLLOWING FUNCTIONS ASSUME THAT y IS SORTED IN ASCENDING ORDER SINGLE LIST: nndistsort Nearest neighbour distances nnwhichsort Nearest neighbours nnsort Nearest neighbours & distances ONE LIST TO ANOTHER LIST: nnXdist Nearest neighbour distance from one list to another nnXwhich Nearest neighbour ID from one list to another nnX Nearest neighbour ID & distance from one list to another ONE LIST TO ANOTHER OVERLAPPING LIST: nnXEdist Nearest neighbour distance from one list to another, overlapping nnXEwhich Nearest neighbour ID from one list to another, overlapping nnXE Nearest neighbour ID & distance */ #undef SPATSTAT_DEBUG #include #include #include #include "yesno.h" double sqrt(); /* THE FOLLOWING CODE ASSUMES THAT y IS SORTED IN ASCENDING ORDER */ /* ------------------- one point pattern X --------------------- */ /* nndistsort: nearest neighbour distances */ #undef FNAME #undef DIST #undef WHICH #define FNAME nndistsort #define DIST #include "nndist.h" /* nnwhichsort: id of nearest neighbour */ #undef FNAME #undef DIST #undef WHICH #define FNAME nnwhichsort #define WHICH #include "nndist.h" /* nnsort: distance & id of nearest neighbour */ #undef FNAME #undef DIST #undef WHICH #define FNAME nnsort #define DIST #define WHICH #include "nndist.h" /* --------------- two distinct point patterns X and Y ----------------- */ /* general interface */ void nnXinterface(n1, x1, y1, id1, n2, x2, y2, id2, exclude, wantdist, wantwhich, nnd, nnwhich, huge) /* inputs */ int *n1, *n2; double *x1, *y1, *x2, *y2, *huge; int *id1, *id2; /* options */ int *exclude, *wantdist, *wantwhich; /* outputs */ double *nnd; int *nnwhich; { void nnX(), nnXdist(), nnXwhich(); void nnXE(), nnXEdist(), nnXEwhich(); int ex, di, wh; ex = (*exclude != 0); di = (*wantdist != 0); wh = (*wantwhich != 0); if(!ex) { if(di && wh) { nnX(n1, x1, y1, id1, n2, x2, y2, id2, nnd, nnwhich, huge); } else if(di) { nnXdist(n1, x1, y1, id1, n2, x2, y2, id2, nnd, nnwhich, huge); } else if(wh) { nnXwhich(n1, x1, y1, id1, n2, x2, y2, id2, nnd, nnwhich, huge); } } else { if(di && wh) { nnXE(n1, x1, y1, id1, n2, x2, y2, id2, nnd, nnwhich, huge); } else if(di) { nnXEdist(n1, x1, y1, id1, n2, x2, y2, id2, nnd, nnwhich, huge); } else if(wh) { nnXEwhich(n1, x1, y1, id1, n2, x2, y2, id2, nnd, nnwhich, huge); } } } /* nnXdist: nearest neighbour distance (from each point of X to the nearest point of Y) */ #undef FNAME #undef DIST #undef WHICH #undef EXCLUDE #define FNAME nnXdist #define DIST #include "nndistX.h" /* nnXwhich: nearest neighbour id */ #undef FNAME #undef DIST #undef WHICH #undef EXCLUDE #define FNAME nnXwhich #define WHICH #include "nndistX.h" /* nnX: nearest neighbour distance and id */ #undef FNAME #undef DIST #undef WHICH #undef EXCLUDE #define FNAME nnX #define DIST #define WHICH #include "nndistX.h" /* --------------- two point patterns X and Y with common points --------- */ /* Code numbers id1, id2 are attached to the patterns X and Y respectively, such that x1[i], y1[i] and x2[j], y2[j] are the same point iff id1[i] = id2[j]. */ /* nnXEdist: similar to nnXdist but allows X and Y to include common points (which are not to be counted as neighbours) */ #undef FNAME #undef DIST #undef WHICH #undef EXCLUDE #define FNAME nnXEdist #define DIST #define EXCLUDE #include "nndistX.h" /* nnXEwhich: similar to nnXwhich but allows X and Y to include common points (which are not to be counted as neighbours) */ #undef FNAME #undef DIST #undef WHICH #undef EXCLUDE #define FNAME nnXEwhich #define WHICH #define EXCLUDE #include "nndistX.h" /* nnXE: similar to nnX but allows X and Y to include common points (which are not to be counted as neighbours) */ #undef FNAME #undef DIST #undef WHICH #undef EXCLUDE #define FNAME nnXE #define DIST #define WHICH #define EXCLUDE #include "nndistX.h" spatstat/src/linknnd.h0000644000176200001440000001013613166361223014516 0ustar liggesusers/* linknnd.h k-th nearest neighbours in a linear network Using sparse representation of network ! Data points must be ordered by segment index ! This code is #included several times in linknnd.c Macros required: FNAME Function name CROSS #defined for X-to-Y, undefined for X-to-X HUH debugging flag $Revision: 1.2 $ $Date: 2016/12/04 12:34:19 $ */ #define MAT(MATRIXNAME, INDEX, ORDER) MATRIXNAME[(ORDER) + (INDEX) * Kmax] #define NNDIST(INDEX, ORDER) MAT(nndist, (INDEX), (ORDER)) #define NNWHICH(INDEX, ORDER) MAT(nnwhich, (INDEX), (ORDER)) #define VDIST(INDEX, ORDER) MAT(dminvert, (INDEX), (ORDER)) #define VWHICH(INDEX, ORDER) MAT(whichvert, (INDEX), (ORDER)) #define UPDATENN(INDEX, D, J) \ UpdateKnnList(D, J, \ nndist + (INDEX) * Kmax, \ nnwhich + (INDEX) * Kmax, \ Kmax, \ (double) 0.0) /* ................. */ void FNAME(kmax, /* number of neighbours required */ np, sp, tp, /* source data points (ordered by sp) */ #ifdef CROSS nq, sq, tq, /* target data points (ordered by sq) */ #endif nv, /* number of network vertices */ ns, from, to, /* segments (pairs of vertices) */ seglen, /* segment lengths */ huge, /* value taken as infinity */ tol, /* tolerance for updating distances */ /* OUTPUT */ nndist, /* distance from each source point to the nearest, ..., kth nearest target points */ nnwhich /* identifies which target points */ ) int *kmax; int *np, *nv, *ns; /* number of points, vertices, segments */ int *sp, *from, *to; /* integer vectors (mappings) */ double *tp; /* fractional location coordinates */ #ifdef CROSS int *nq, *sq; double *tq; #endif double *huge, *tol; double *seglen; double *nndist; int *nnwhich; { int Np, Nv, Kmax, Nout, i, j, ivleft, ivright, jfirst, jlast, k, m; double d, hugevalue, slen, tpi, deltad; double *dminvert; /* min dist from each vertex */ int *whichvert; /* which min from each vertex */ int linvknndist(), UpdateKnnList(); #ifdef CROSS int Nq; #else #define Nq Np #define nq np #define sq sp #define tq tp #endif Kmax = *kmax; Np = *np; Nv = *nv; hugevalue = *huge; #ifdef CROSS Nq = *nq; #endif /* First compute min distances to target set from each vertex */ #ifdef HUH Rprintf("Computing distances from each vertex\n"); #endif dminvert = (double *) R_alloc(Nv * Kmax, sizeof(double)); whichvert = (int *) R_alloc(Nv * Kmax, sizeof(int)); linvknndist(kmax, nq, sq, tq, nv, ns, from, to, seglen, huge, tol, dminvert, whichvert); #ifdef HUH Rprintf("Initialise answer\n"); #endif /* initialise nn distances from source points */ Nout = Np * Kmax; for(i = 0; i < Nout; i++) { nndist[i] = hugevalue; nnwhich[i] = -1; } /* run through all source points */ #ifdef HUH Rprintf("Run through source points\n"); #endif jfirst = 0; for(i = 0; i < Np; i++) { tpi = tp[i]; m = sp[i]; /* segment containing this point */ slen = seglen[m]; ivleft = from[m]; ivright = to[m]; #ifdef HUH Rprintf("Source point %d lies on segment %d = [%d,%d]\n", i, m, ivleft, ivright); #endif deltad = slen * tpi; #ifdef HUH Rprintf("\tComparing to left endpoint %d, distance %lf\n", ivleft, deltad); #endif for(k = 0; k < Kmax; k++) UPDATENN(i, deltad + VDIST(ivleft, k), VWHICH(ivleft, k)); deltad = slen * (1.0 - tpi); #ifdef HUH Rprintf("\tComparing to right endpoint %d, distance %lf\n", ivright, deltad); #endif for(k = 0; k < Kmax; k++) UPDATENN(i, deltad + VDIST(ivright, k), VWHICH(ivright, k)); /* find any target points in this segment */ while(jfirst < Nq && sq[jfirst] < m) jfirst++; jlast = jfirst; while(jlast < Nq && sq[jlast] == m) jlast++; --jlast; /* if there are no such points, then jlast < jfirst */ if(jfirst <= jlast) { for(j = jfirst; j <= jlast; j++) { d = slen * fabs(tq[j] - tpi); UPDATENN(i, d, j); } } } } #undef MAT #undef NNDIST #undef NNWHICH #undef VDIST #undef VWHICH #undef UPDATENN #ifndef CROSS #undef nq #undef Nq #undef sq #undef tq #endif spatstat/src/dinfty.c0000755000176200001440000000677613166361223014373 0ustar liggesusers/* dinfty.c $Revision: 1.6 $ $Date: 2011/09/20 07:42:18 $ Code by Dominic Schuhmacher Modified by Adrian Baddeley */ #include #include #define COST(I,J) (d)[n * (J) + (I)] int arraymax(int *a, int n); void swap(int i, int j, int *a); int largestmobpos(int *mobile, int *current, int *collectvals, int n); /* ------------ The main function ----------------------------- */ void dinfty_R(int *d, int *num, int *assignment) { int i,j; /* indices */ int lmp, lmq; /* largest mobile position and its neighbor */ int newmax; int n, currmin; int *current, *travel, *mobile, *assig, *distrelev, *collectvals; n = *num; /* scratch space */ assig = (int *) R_alloc((long) n, sizeof(int)); travel = (int *) R_alloc((long) n, sizeof(int)); mobile = (int *) R_alloc((long) n, sizeof(int)); current = (int *) R_alloc((long) n, sizeof(int)); distrelev = (int *) R_alloc((long) n, sizeof(int)); collectvals = (int *) R_alloc((long) (n * n), sizeof(int)); /* */ /* We use the Johnson-Trotter Algorithm for listing permutations */ /* */ /* Initialize the algorithm */ for (i = 0; i < n; i++) { travel[i] = -1; /* all numbers traveling to the left */ mobile[i] = 1; /* all numbers mobile */ current[i] = i; /* current permutation is the identity */ assig[i] = i; /* best permutation up to now is the identity */ distrelev[i] = COST(i, i); /* pick relevant entries in the cost matrix */ } currmin = arraymax(distrelev, n); /* minimal max up to now */ /* The main loop */ while(arraymax(mobile, n) == 1) { lmp = largestmobpos(mobile, current, collectvals, n); lmq = lmp + travel[lmp]; swap(lmp, lmq, current); swap(lmp, lmq, travel); for (i = 0; i < n; i++) { if (current[i] > current[lmq]) travel[i] = -travel[i]; j = i + travel[i]; if (j < 0 || j > n-1 || current[i] < current[j]) mobile[i] = 0; else mobile[i] = 1; distrelev[i] = COST(i, current[i]); } /* Calculation of new maximal value */ newmax = arraymax(distrelev, n); if (newmax < currmin) { currmin = newmax; for (i = 0; i < n; i++) { assig[i] = current[i]; } } } /* For testing: print distance from within C program Rprintf("Prohorov distance is %d\n", currmin); */ /* "Return" the final assignment */ for (i = 0; i < n; i++) { assignment[i] = assig[i] + 1; } } /* ------------------------------------------------------------*/ /* Maximal element of an integer array */ int arraymax(int *a, int n) { int i, amax; if(n < 1) return(-1); amax = a[0]; if(n > 1) for(i = 0; i < n; i++) if(a[i] > amax) amax = a[i]; return(amax); } /* Swap elements i and j in array a */ void swap(int i, int j, int *a) { int v; v = a[i]; a[i] = a[j]; a[j] = v; } /* Return index of largest mobile number in current */ int largestmobpos(int *mobile, int *current, int *collectvals, int n) { int i,j, maxval; j = 0; for (i = 0; i < n; i++) { if (mobile[i] == 1) { collectvals[j] = current[i]; j++; } } maxval = arraymax(collectvals, j); for (i = 0; i < n; i++) { if (current[i] == maxval) { return(i); } } error("Internal error: largestmobpos failed"); return(0); } spatstat/src/PerfectHardcore.h0000644000176200001440000001137713166361223016131 0ustar liggesusers // ........................... Hardcore process .......................... // $Revision: 1.4 $ $Date: 2012/03/10 11:23:09 $ class HardcoreProcess : public PointProcess { public: double beta, R, Rsquared; HardcoreProcess(double xmin, double xmax, double ymin, double ymax, double b, double Ri); ~HardcoreProcess(){} void NewEvent(double *x, double *y, char *InWindow); void GeneratePoisson(Point *headPoint, long int *GeneratedPoints, long int *LivingPoints, long int *NoP); double Interaction(double dsquared); }; HardcoreProcess::HardcoreProcess(double xmin, double xmax, double ymin, double ymax, double b, double Ri) : PointProcess(xmin, xmax, ymin, ymax){ beta = b; R = Ri; Rsquared = R * R; InteractionRange = R; TotalBirthRate = beta*(xmax-xmin)*(ymax-ymin); } double HardcoreProcess::Interaction(double dsquared) { double rtn; rtn = 1; if(dsquared < Rsquared) rtn = 0; return(rtn); } void HardcoreProcess::NewEvent(double *x, double *y, char *InWindow) { double Xdim, Ydim; Xdim = Xmax-Xmin; Ydim = Ymax-Ymin; *x = slumptal()*Xdim+Xmin; *y = slumptal()*Ydim+Ymin; *InWindow = 1; } void HardcoreProcess::GeneratePoisson(Point *headPoint, long int *GeneratedPoints, long int *LivingPoints, long int *NoP) { int i; double xtemp, ytemp, L, Xdim, Ydim; struct Point *TempPoint; Xdim = Xmax-Xmin; Ydim = Ymax-Ymin; L = beta*Xdim*Ydim; *GeneratedPoints = poisson(L); *LivingPoints = *GeneratedPoints; for (i=1; i<=*GeneratedPoints ; i++){ //Rprintf("Generating HardcoreProcess Poisson 3\n"); //scanf("%f",&f1); xtemp = slumptal()*Xdim+Xmin; ytemp = slumptal()*Ydim+Ymin; // //Rprintf("Generating HardcoreProcess Poisson 3.2\n"); TempPoint = ALLOCATE(struct Point); // TempPoint->X = xtemp; TempPoint->Y = ytemp; TempPoint->No = i; TempPoint->R = slumptal(); //Rprintf("Generating HardcoreProcess Poisson 3.6\n"); TempPoint->next = headPoint->next; headPoint->next = TempPoint; *NoP = *NoP + 1; } } // ........................... Interface to R .......................... extern "C" { SEXP PerfectHardcore(SEXP beta, SEXP r, SEXP xrange, SEXP yrange) { // input parameters double Beta, R, Xmin, Xmax, Ymin, Ymax; double *Xrange, *Yrange; // internal int xcells, ycells; long int StartTime, EndTime; // output int noutmax; SEXP xout, yout, nout, out; double *xx, *yy; int *nn; // protect arguments from garbage collector PROTECT(beta = AS_NUMERIC(beta)); PROTECT(r = AS_NUMERIC(r)); PROTECT(xrange = AS_NUMERIC(xrange)); PROTECT(yrange = AS_NUMERIC(yrange)); // that's 4 protected objects // extract arguments Beta = *(NUMERIC_POINTER(beta)); R = *(NUMERIC_POINTER(r)); Xrange = NUMERIC_POINTER(xrange); Xmin = Xrange[0]; Xmax = Xrange[1]; Yrange = NUMERIC_POINTER(yrange); Ymin = Yrange[0]; Ymax = Yrange[1]; // compute cell array size xcells = (int) floor((Xmax-Xmin)/ R); if(xcells > 9) xcells = 9; if(xcells < 1) xcells = 1; ycells = (int) floor((Ymax-Ymin)/ R); if(ycells > 9) ycells = 9; if(ycells < 1) ycells = 1; #ifdef DBGS Rprintf("xcells %d ycells %d\n",xcells,ycells); Rprintf("Initialising\n"); #endif // Initialise Hardcore point process HardcoreProcess ExampleProcess(Xmin,Xmax,Ymin,Ymax, Beta, R); // Initialise point pattern Point2Pattern ExamplePattern(Xmin,Xmax,Ymin,Ymax, xcells, ycells); // parameters: min x, max x, min y, max y, "cells" in x and y direction // used for speeding up neighbour counting, 9 is max here #ifdef DBGS Rprintf("Initialisation complete\n"); #endif // Synchronise random number generator GetRNGstate(); // Initialise perfect sampler Sampler PerfectSampler(&ExampleProcess); // Perform perfect sampling PerfectSampler.Sim(&ExamplePattern, &StartTime, &EndTime); // Synchronise random number generator PutRNGstate(); // Get upper estimate of number of points noutmax = ExamplePattern.UpperCount() + 1; // Allocate space for output PROTECT(xout = NEW_NUMERIC(noutmax)); PROTECT(yout = NEW_NUMERIC(noutmax)); PROTECT(nout = NEW_INTEGER(1)); xx = NUMERIC_POINTER(xout); yy = NUMERIC_POINTER(yout); nn = INTEGER_POINTER(nout); // copy data into output storage ExamplePattern.Return(xx, yy, nn, noutmax); // pack up into output list PROTECT(out = NEW_LIST(3)); SET_VECTOR_ELT(out, 0, xout); SET_VECTOR_ELT(out, 1, yout); SET_VECTOR_ELT(out, 2, nout); // return UNPROTECT(8); // 4 arguments plus xout, yout, nout, out return(out); } } spatstat/src/geyer.c0000755000176200001440000002363213166361223014177 0ustar liggesusers#include #include #include #include "methas.h" #include "dist2.h" void fexitc(const char *msg); #undef MH_DEBUG /* Conditional intensity function for a Geyer saturation process. */ typedef struct Geyer { /* model parameters */ double gamma; double r; double s; /* transformations of the parameters */ double r2; double loggamma; int hard; /* periodic distance */ double *period; int per; /* auxiliary counts */ int *aux; #ifdef MH_DEBUG int *freshaux; int prevtype; #endif } Geyer; Cdata *geyerinit(state, model, algo) State state; Model model; Algor algo; { int i, j, n1; Geyer *geyer; double r2; double *period; DECLARE_CLOSE_VARS; geyer = (Geyer *) R_alloc(1, sizeof(Geyer)); /* Interpret model parameters*/ geyer->gamma = model.ipar[0]; geyer->r = model.ipar[1]; /* not squared any more */ geyer->s = model.ipar[2]; geyer->r2 = geyer->r * geyer->r; #ifdef MHDEBUG Rprintf("Initialising Geyer gamma=%lf, r=%lf, sat=%lf\n", geyer->gamma, geyer->r, geyer->s); #endif /* is the model numerically equivalent to hard core ? */ geyer->hard = (geyer->gamma < DOUBLE_EPS); geyer->loggamma = (geyer->hard) ? 0 : log(geyer->gamma); /* periodic boundary conditions? */ geyer->period = model.period; geyer->per = (model.period[0] > 0.0); /* allocate storage for auxiliary counts */ geyer->aux = (int *) R_alloc((size_t) state.npmax, sizeof(int)); #ifdef MH_DEBUG geyer->freshaux = (int *) R_alloc((size_t) state.npmax, sizeof(int)); geyer->prevtype = -42; #endif r2 = geyer->r2; /* Initialise auxiliary counts */ for(i = 0; i < state.npmax; i++) geyer->aux[i] = 0; if(geyer->per) { /* periodic */ period = geyer->period; if(state.npts > 1) { n1 = state.npts - 1; for(i = 0; i < n1; i++) { for(j = i+1; j < state.npts; j++) { if(CLOSE_PERIODIC(state.x[i], state.y[i], state.x[j], state.y[j], period, r2)) { geyer->aux[i] += 1; geyer->aux[j] += 1; } } } } } else { /* Euclidean distance */ if(state.npts > 1) { n1 = state.npts - 1; for(i = 0; i < n1; i++) { for(j = i+1; j < state.npts; j++) { if(CLOSE(state.x[i], state.y[i], state.x[j], state.y[j], r2)) { geyer->aux[i] += 1; geyer->aux[j] += 1; } } } } } return((Cdata *) geyer); } double geyercif(prop, state, cdata) Propo prop; State state; Cdata *cdata; { int ix, j, npts, tee; double u, v, r2, s; double w, a, b, f, cifval; double *x, *y; int *aux; double *period; Geyer *geyer; DECLARE_CLOSE_VARS; geyer = (Geyer *) cdata; npts = state.npts; if(npts==0) return ((double) 1.0); x = state.x; y = state.y; u = prop.u; v = prop.v; ix = prop.ix; r2 = geyer->r2; s = geyer->s; period = geyer->period; aux = geyer->aux; /* tee = neighbour count at the point in question; w = sum of changes in (saturated) neighbour counts at other points */ tee = w = 0.0; if(prop.itype == BIRTH) { if(geyer->per) { /* periodic distance */ for(j=0; j 1) /* j is not saturated after addition of (u,v) */ w = w + 1; /* addition of (u,v) increases count by 1 */ else if(f > 0) /* j becomes saturated by addition of (u,v) */ w = w + f; } } } else { /* Euclidean distance */ for(j=0; j 1) /* j is not saturated after addition of (u,v) */ w = w + 1; /* addition of (u,v) increases count by 1 */ else if(f > 0) /* j becomes saturated by addition of (u,v) */ w = w + f; } } } } else if(prop.itype == DEATH) { tee = aux[ix]; if(geyer->per) { /* Periodic distance */ for(j=0; j 0) /* j is not saturated */ w = w + 1; /* deletion of 'ix' decreases count by 1 */ else { f = f+1; if(f > 0) { /* j is not saturated after deletion of 'ix' (s must be fractional) */ w = w + f; } } } } } else { /* Euclidean distance */ for(j=0; j 0) /* j was not saturated */ w = w + 1; /* deletion of 'ix' decreases count by 1 */ else { f = f+1; if(f > 0) { /* j is not saturated after deletion of 'ix' (s must be fractional) */ w = w + f; } } } } } } else if(prop.itype == SHIFT) { /* Compute the cif at the new point, not the ratio of new/old */ if(geyer->per) { /* Periodic distance */ for(j=0; j= b) w = w + 1; } } } else { /* Euclidean distance */ for(j=0; j= b) w = w + 1; } } } } w = w + ((tee < s) ? tee : s); if(geyer->hard) { if(tee > 0) cifval = 0.0; else cifval = 1.0; } else cifval = exp(geyer->loggamma*w); return cifval; } void geyerupd(state, prop, cdata) State state; Propo prop; Cdata *cdata; { /* Declare other variables */ int ix, npts, j; int oldclose, newclose; double u, v, xix, yix, r2; double *x, *y; int *aux; double *period; Geyer *geyer; #ifdef MH_DEBUG int *freshaux; int i; int oc, nc; #endif DECLARE_CLOSE_VARS; geyer = (Geyer *) cdata; period = geyer->period; aux = geyer->aux; r2 = geyer->r2; x = state.x; y = state.y; npts = state.npts; #ifdef MH_DEBUG /* ........................ debugging cross-check ................ */ /* recompute 'aux' values afresh */ freshaux = geyer->freshaux; for(i = 0; i < state.npts; i++) freshaux[i] = 0; if(geyer->per) { /* periodic */ for(i = 0; i < state.npts; i++) { for(j = 0; j < state.npts; j++) { if(i == j) continue; if(CLOSE_PERIODIC(state.x[i], state.y[i], state.x[j], state.y[j], period, r2)) freshaux[i] += 1; } } } else { /* Euclidean distance */ for(i = 0; i < state.npts; i++) { for(j = 0; j < state.npts; j++) { if(i == j) continue; if(CLOSE(state.x[i], state.y[i], state.x[j], state.y[j], r2)) freshaux[i] += 1; } } } /* Check agreement with 'aux' */ for(j = 0; j < state.npts; j++) { if(aux[j] != freshaux[j]) { Rprintf("\n\taux[%d] = %d, freshaux[%d] = %d\n", j, aux[j], j, freshaux[j]); Rprintf("\tnpts = %d\n", state.npts); Rprintf("\tperiod = (%lf, %lf)\n", period[0], period[1]); if(geyer->prevtype == BIRTH) error("updaux failed after BIRTH"); if(geyer->prevtype == DEATH) error("updaux failed after DEATH"); if(geyer->prevtype == SHIFT) error("updaux failed after SHIFT"); error("updaux failed at start"); } } /* OK. Record type of this transition */ geyer->prevtype = prop.itype; /* ................ end debug cross-check ................ */ #endif if(prop.itype == BIRTH) { /* Birth */ u = prop.u; v = prop.v; /* initialise auxiliary counter for new point */ aux[npts] = 0; /* update all auxiliary counters */ if(geyer->per) { /* periodic distance */ for(j=0; j < npts; j++) { if(CLOSE_PERIODIC(u,v,x[j],y[j],period,r2)) { aux[j] += 1; aux[npts] += 1; } } } else { /* Euclidean distance */ for(j=0; j < npts; j++) { if(CLOSE(u,v,x[j],y[j],r2)) { aux[j] += 1; aux[npts] += 1; } } } } else if(prop.itype == DEATH) { /* Death */ ix = prop.ix; u = x[ix]; v = y[ix]; /* decrement auxiliary counter for each point */ if(geyer->per) { /* periodic distance */ for(j=0; j= ix) aux[j-1] = aux[j]; } } else { /* Euclidean distance */ for(j=0; j= ix) aux[j-1] = aux[j]; } } } else if(prop.itype == SHIFT) { /* Shift */ u = prop.u; v = prop.v; ix = prop.ix; xix = x[ix]; yix = y[ix]; /* recompute auxiliary counter for point 'ix' */ aux[ix] = 0; /* update auxiliary counters for other points */ if(geyer->per) { for(j=0; j #include #include #include "chunkloop.h" /* localpcf.c $Revision: 1.3 $ $Date: 2013/05/27 02:09:10 $ Assumes point patterns are sorted in increasing order of x coordinate */ #undef WEIGHTED #include "localpcf.h" #define WEIGHTED 1 #include "localpcf.h" spatstat/src/seg2pix.c0000755000176200001440000001435413166361223014446 0ustar liggesusers#include #include #include #include #include "chunkloop.h" #undef DEBUG /* seg2pix.c Discretise line segment on pixel grid seg2pixI pixel value is indicator = 1 if any line crosses pixel seg2pixN pixel value is (weighted) number of lines crossing pixel seg2pixL pixel value is total (weighted) length of lines inside pixel (rescale R data so that pixels are integer) pixels numbered 0, ..., nx-1 and 0, ..., ny-1 with boundaries at x=0, x=nx, y=0, y=ny. */ #define V(I,J) out[(I) + (J) * (Ny)] int clamp(k, n0, n1) int k, n0, n1; { int m; m = k; if(m < n0) m = n0; if(m > n1) m = n1; return(m); } /* function 'seg2pixI' returns indicator = 1 if pixel is hit by any segment */ #define FNAME seg2pixI #undef SUMUP #include "seg2pix.h" #undef FNAME /* function 'seg2pixN' returns (weighted) number of segments hitting pixel */ #define FNAME seg2pixN #define SUMUP #include "seg2pix.h" #undef FNAME #undef SUMUP /* the other one is anomalous... */ void seg2pixL(ns,x0,y0,x1,y1,weights,pixwidth,pixheight,nx,ny,out) int *ns; double *x0,*y0,*x1,*y1,*weights; /* segment coordinates and weights */ double *pixwidth, *pixheight; /* original pixel dimensions */ int *nx, *ny; double *out; /* output matrix */ { int Ns, Nx, Ny, i, j, k, m, mmin, mmax, maxchunk; double x0i, x1i, y0i, y1i; double leni; double xleft, yleft, xright, yright, slope, scalesecant; double xlow, xhigh, ylow, yhigh, invslope, scalecosecant; double xstart, ystart, xfinish, yfinish; double xxx0, xxx1, yyy0, yyy1; int mleft, mright, kstart, kfinish, kmin, kmax; double pwidth, pheight, pwidth2, pheight2; double wti; Ns = *ns; Nx = *nx; Ny = *ny; /* one scaled x unit = 'pwidth' original x units one scaled y unit = 'pheight' original y units */ pwidth = *pixwidth; pheight = *pixheight; pwidth2 = pwidth * pwidth; pheight2 = pheight * pheight; /* zero the matrix */ for(k = 0; k < Ny - 1; k++) for(j = 0; j < Nx - 1; j++) V(k, j) = 0; OUTERCHUNKLOOP(i, Ns, maxchunk, 8196) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, Ns, maxchunk, 8196) { x0i = x0[i]; y0i = y0[i]; x1i = x1[i]; y1i = y1[i]; wti = weights[i]; leni = sqrt(pwidth2 * pow(x1i - x0i, 2) + pheight2 * pow(y1i-y0i, 2)); #ifdef DEBUG Rprintf("(%lf, %lf) to (%lf, %lf), length %lf\n", x0i, y0i, x1i, y1i, leni); #endif if(leni < 0.001) { /* tiny segment */ #ifdef DEBUG Rprintf("tiny\n"); #endif k = clamp((int) floor(x0i), 0, Nx-1); j = clamp((int) floor(y0i), 0, Ny-1); V(j,k) += wti * leni; } else if(floor(x1i) == floor(x0i) && floor(y1i) == floor(y0i)) { /* contained in one cell */ #ifdef DEBUG Rprintf("contained in one cell\n"); #endif k = clamp((int) floor(x0i), 0, Nx-1); j = clamp((int) floor(y0i), 0, Ny-1); V(j,k) += wti * leni; } else if(floor(y1i) == floor(y0i)) { /* horizontal */ #ifdef DEBUG Rprintf("horizontal\n"); #endif j = clamp((int) floor(y1i), 0, Ny-1); if(x1i > x0i) { xleft = x0i; yleft = y0i; xright = x1i; yright = y1i; } else { xleft = x1i; yleft = y1i; xright = x0i; yright = y0i; } mmin = clamp((int) floor(xleft), 0, Nx-1); mmax = clamp((int) floor(xright), 0, Nx-1); slope = (yright - yleft)/(xright - xleft); scalesecant = wti * sqrt(pwidth2 + slope * slope * pheight2); /* For this slope, one scaled x unit means 'pwidth' original x units and slope * pheight original y units i.e. line length sqrt(pwidth^2 + slope^2 * pheight^2) */ for(k = mmin; k <= mmax; k++) { xstart = (k == mmin) ? xleft : k; xfinish = (k == mmax) ? xright : (k+1); V(j,k) += (xfinish - xstart) * scalesecant; } } else if(floor(x1i) == floor(x0i)) { /* vertical */ #ifdef DEBUG Rprintf("vertical\n"); #endif k = clamp((int) floor(x1i), 0, Nx-1); if(y1i > y0i) { xlow = x0i; ylow = y0i; xhigh = x1i; yhigh = y1i; } else { xlow = x1i; ylow = y1i; xhigh = x0i; yhigh = y0i; } mmin = clamp((int) floor(ylow), 0, Ny-1); mmax = clamp((int) floor(yhigh), 0, Ny-1); invslope = (xhigh - xlow)/(yhigh - ylow); scalecosecant = wti * sqrt(pheight2 + invslope * invslope * pwidth2); #ifdef DEBUG Rprintf("i = %d\n", i); Rprintf("inverse slope = %lf\n", invslope); Rprintf("scaled cosecant = %lf\n", scalecosecant); #endif /* For this slope, one scaled y unit means 'pheight' original y units and invslope * pwidth original x units i.e. line length sqrt(pheight^2 + invslope^2 * pwidth^2) */ for(j = mmin; j <= mmax; j++) { ystart = (j == mmin)? ylow : j; yfinish = (j == mmax)? yhigh : (j+1); V(j,k) += (yfinish - ystart) * scalecosecant; } } else { /* general case */ #ifdef DEBUG Rprintf("general\n"); #endif if(x1i > x0i) { xleft = x0i; yleft = y0i; xright = x1i; yright = y1i; } else { xleft = x1i; yleft = y1i; xright = x0i; yright = y0i; } slope = (yright - yleft)/(xright - xleft); mleft = clamp((int) floor(xleft), 0, Nx-1); mright = clamp((int) floor(xright), 0, Nx-1); #ifdef DEBUG Rprintf("column range [%d, %d]\n", mleft, mright); #endif /* treat each vertical slice */ for(m = mleft; m <= mright; m++) { if(m == mleft) { xstart = xleft; ystart = yleft; } else { xstart = m; ystart = yleft + slope * (xstart - xleft); } if(m == mright) { xfinish = xright; yfinish = yright; } else { xfinish = m+1; yfinish = yleft + slope * (xfinish - xleft); } kstart = clamp((int) floor(ystart), 0, Ny-1); kfinish = clamp((int) floor(yfinish), 0, Ny-1); if(ystart < yfinish) { kmin = kstart; kmax = kfinish; ylow = ystart; yhigh = yfinish; } else { kmin = kfinish; kmax = kstart; ylow = yfinish; yhigh = ystart; } #ifdef DEBUG Rprintf("column %d: rows [%d, %d]\n", m, kmin, kmax); #endif for(k = kmin; k <= kmax; k++) { yyy0 = (k == kmin) ? ylow : k; yyy1 = (k == kmax) ? yhigh : (k+1); xxx0 = xstart + (yyy0 - ystart)/slope; xxx1 = xstart + (yyy1 - ystart)/slope; V(k, m) += wti * sqrt(pow(yyy1 - yyy0, 2) * pheight2 + pow(xxx1 - xxx0, 2) * pwidth2); } } } } } #ifdef DEBUG Rprintf("done.\n"); #endif } spatstat/src/lincrossdist.c0000644000176200001440000000456613166361223015606 0ustar liggesusers#include #include #include "chunkloop.h" /* lincrossdist.c Shortest-path distances between pairs of points in linear network $Revision: 1.3 $ $Date: 2012/10/13 03:45:41 $ lincrossdist */ #define DPATH(I,J) dpath[(I) + Nv * (J)] #define ANSWER(I,J) answer[(I) + Np * (J)] #define EUCLID(X,Y,U,V) sqrt(pow((X)-(U),2)+pow((Y)-(V),2)) void lincrossdist(np, xp, yp, /* data points from which distances are measured */ nq, xq, yq, /* data points to which distances are measured */ nv, xv, yv, /* network vertices */ ns, from, to, /* segments */ dpath, /* shortest path distances between vertices */ psegmap, /* map from data points to segments */ qsegmap, /* map from data points to segments */ /* OUTPUT */ answer /* shortest path distances between points */ ) int *np, *nq, *nv, *ns; int *from, *to, *psegmap, *qsegmap; /* integer vectors (mappings) */ double *xp, *yp, *xq, *yq, *xv, *yv; /* vectors of coordinates */ double *dpath, *answer; /* matrices */ { int Np, Nq, Nv, i, j, maxchunk; int Psegi, Qsegj, nbi1, nbi2, nbj1, nbj2; double xpi, ypi, xqj, yqj; double d, dPiV1, dPiV2, dV1Qj, dV2Qj, d11, d12, d21, d22; Np = *np; Nq = *nq; Nv = *nv; OUTERCHUNKLOOP(i, Np, maxchunk, 1024) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, Np, maxchunk, 1024) { xpi = xp[i]; ypi = yp[i]; Psegi = psegmap[i]; nbi1 = from[Psegi]; nbi2 = to[Psegi]; dPiV1 = EUCLID(xpi, ypi, xv[nbi1], yv[nbi1]); dPiV2 = EUCLID(xpi, ypi, xv[nbi2], yv[nbi2]); for(j = 0; j < Nq; j++) { xqj = xq[j]; yqj = yq[j]; Qsegj = qsegmap[j]; if(Psegi == Qsegj) { /* points i and j lie on the same segment; use Euclidean distance */ d = sqrt(pow(xpi - xqj, 2) + pow(ypi - yqj, 2)); } else { /* Shortest path from i to j passes through ends of segments; Calculate shortest of 4 possible paths from i to j */ nbj1 = from[Qsegj]; nbj2 = to[Qsegj]; dV1Qj = EUCLID(xv[nbj1], yv[nbj1], xqj, yqj); dV2Qj = EUCLID(xv[nbj2], yv[nbj2], xqj, yqj); d11 = dPiV1 + DPATH(nbi1,nbj1) + dV1Qj; d12 = dPiV1 + DPATH(nbi1,nbj2) + dV2Qj; d21 = dPiV2 + DPATH(nbi2,nbj1) + dV1Qj; d22 = dPiV2 + DPATH(nbi2,nbj2) + dV2Qj; d = d11; if(d12 < d) d = d12; if(d21 < d) d = d21; if(d22 < d) d = d22; } /* write */ ANSWER(i,j) = d; } } } } spatstat/src/Estrauss.c0000755000176200001440000000320713166361223014671 0ustar liggesusers#include #include #include "chunkloop.h" #include "looptest.h" /* Estrauss.c $Revision: 1.4 $ $Date: 2014/09/19 00:54:07 $ C implementation of 'eval' for Strauss interaction Calculates number of data points within distance r of each quadrature point (when 'source' = quadrature points, 'target' = data points) Assumes point patterns are sorted in increasing order of x coordinate */ double sqrt(); void Ccrosspaircounts(nnsource, xsource, ysource, nntarget, xtarget, ytarget, rrmax, counts) /* inputs */ int *nnsource, *nntarget; double *xsource, *ysource, *xtarget, *ytarget, *rrmax; /* output */ int *counts; { int nsource, ntarget, maxchunk, j, i, ileft, counted; double xsourcej, ysourcej, rmax, r2max, r2maxpluseps, xleft, dx, dy, dx2, d2; nsource = *nnsource; ntarget = *nntarget; rmax = *rrmax; r2max = rmax * rmax; r2maxpluseps = r2max + EPSILON(r2max); if(nsource == 0 || ntarget == 0) return; ileft = 0; OUTERCHUNKLOOP(j, nsource, maxchunk, 65536) { R_CheckUserInterrupt(); INNERCHUNKLOOP(j, nsource, maxchunk, 65536) { counted = 0; xsourcej = xsource[j]; ysourcej = ysource[j]; /* adjust starting point */ xleft = xsourcej - rmax; while((xtarget[ileft] < xleft) && (ileft+1 < ntarget)) ++ileft; /* process from ileft to iright */ for(i=ileft; i < ntarget; i++) { dx = xtarget[i] - xsourcej; dx2 = dx * dx; if(dx2 > r2maxpluseps) break; dy = ytarget[i] - ysourcej; d2 = dx2 + dy * dy; if(d2 <= r2max) ++counted; } counts[j] = counted; } } } spatstat/src/methas.c0000755000176200001440000002750513166361223014350 0ustar liggesusers#include #include #include #include "methas.h" #include "chunkloop.h" #include "mhsnoop.h" void fexitc(const char *msg); /* To switch on debugging code, insert the line: #define MH_DEBUG YES */ #ifndef MH_DEBUG #define MH_DEBUG NO #endif /* This is the value of 'ix' when we are proposing a birth. It must be equal to -1 so that NONE+1 = 0. */ #define NONE -1 extern Cifns getcif(char *); SEXP xmethas( SEXP ncif, SEXP cifname, SEXP beta, SEXP ipar, SEXP iparlen, SEXP period, SEXP xprop, SEXP yprop, SEXP mprop, SEXP ntypes, SEXP nrep, SEXP p, SEXP q, SEXP nverb, SEXP nrep0, SEXP x, SEXP y, SEXP marks, SEXP ncond, SEXP fixall, SEXP track, SEXP thin, SEXP snoopenv, SEXP temper, SEXP invertemp) { char *cifstring; double cvd, cvn, qnodds, anumer, adenom, betavalue; double *iparvector; int verb, marked, tempered, mustupdate, itype; int nfree, nsuspect; int irep, ix, j, maxchunk, iverb; int Ncif; int *plength; long Nmore; int permitted; double invtemp; double *xx, *yy, *xpropose, *ypropose; int *mm, *mpropose, *pp, *aa; SEXP out, xout, yout, mout, pout, aout; int tracking, thinstart; #ifdef HISTORY_INCLUDES_RATIO SEXP numout, denout; double *nn, *dd; #endif State state; Model model; Algor algo; Propo birthprop, deathprop, shiftprop; History history; Snoop snooper; /* The following variables are used only for a non-hybrid interaction */ Cifns thecif; /* cif structure */ Cdata *thecdata; /* pointer to initialised cif data block */ /* The following variables are used only for a hybrid interaction */ Cifns *cif; /* vector of cif structures */ Cdata **cdata; /* vector of pointers to initialised cif data blocks */ int *needupd; /* vector of logical values */ int k; /* loop index for cif's */ /* =================== Protect R objects from garbage collector ======= */ PROTECT(ncif = AS_INTEGER(ncif)); PROTECT(cifname = AS_CHARACTER(cifname)); PROTECT(beta = AS_NUMERIC(beta)); PROTECT(ipar = AS_NUMERIC(ipar)); PROTECT(iparlen = AS_INTEGER(iparlen)); PROTECT(period = AS_NUMERIC(period)); PROTECT(xprop = AS_NUMERIC(xprop)); PROTECT(yprop = AS_NUMERIC(yprop)); PROTECT(mprop = AS_INTEGER(mprop)); PROTECT(ntypes = AS_INTEGER(ntypes)); PROTECT(nrep = AS_INTEGER(nrep)); PROTECT( p = AS_NUMERIC(p)); PROTECT( q = AS_NUMERIC(q)); PROTECT(nverb = AS_INTEGER(nverb)); PROTECT(nrep0 = AS_INTEGER(nrep0)); PROTECT( x = AS_NUMERIC(x)); PROTECT( y = AS_NUMERIC(y)); PROTECT( marks = AS_INTEGER(marks)); PROTECT(fixall = AS_INTEGER(fixall)); PROTECT(ncond = AS_INTEGER(ncond)); PROTECT(track = AS_INTEGER(track)); PROTECT(thin = AS_INTEGER(thin)); PROTECT(temper = AS_INTEGER(temper)); PROTECT(invertemp = AS_NUMERIC(invertemp)); /* that's 24 protected objects */ /* =================== Translate arguments from R to C ================ */ /* Ncif is the number of cif's plength[i] is the number of interaction parameters in the i-th cif */ Ncif = *(INTEGER_POINTER(ncif)); plength = INTEGER_POINTER(iparlen); /* copy RMH algorithm parameters */ algo.nrep = *(INTEGER_POINTER(nrep)); algo.nverb = *(INTEGER_POINTER(nverb)); algo.nrep0 = *(INTEGER_POINTER(nrep0)); algo.p = *(NUMERIC_POINTER(p)); algo.q = *(NUMERIC_POINTER(q)); algo.fixall = ((*(INTEGER_POINTER(fixall))) == 1); algo.ncond = *(INTEGER_POINTER(ncond)); algo.tempered = tempered = (*(INTEGER_POINTER(temper)) != 0); algo.invtemp = invtemp = *(NUMERIC_POINTER(invertemp)); /* copy model parameters without interpreting them */ model.beta = NUMERIC_POINTER(beta); model.ipar = iparvector = NUMERIC_POINTER(ipar); model.period = NUMERIC_POINTER(period); model.ntypes = *(INTEGER_POINTER(ntypes)); state.ismarked = marked = (model.ntypes > 1); /* copy initial state */ state.npts = LENGTH(x); state.npmax = 4 * ((state.npts > 256) ? state.npts : 256); state.x = (double *) R_alloc(state.npmax, sizeof(double)); state.y = (double *) R_alloc(state.npmax, sizeof(double)); xx = NUMERIC_POINTER(x); yy = NUMERIC_POINTER(y); if(marked) { state.marks =(int *) R_alloc(state.npmax, sizeof(int)); mm = INTEGER_POINTER(marks); } if(!marked) { for(j = 0; j < state.npts; j++) { state.x[j] = xx[j]; state.y[j] = yy[j]; } } else { for(j = 0; j < state.npts; j++) { state.x[j] = xx[j]; state.y[j] = yy[j]; state.marks[j] = mm[j]; } } #if MH_DEBUG Rprintf("\tnpts=%d\n", state.npts); #endif /* access proposal data */ xpropose = NUMERIC_POINTER(xprop); ypropose = NUMERIC_POINTER(yprop); mpropose = INTEGER_POINTER(mprop); /* we need to initialise 'mpropose' to keep compilers happy. mpropose is only used for marked patterns. Note 'mprop' is always a valid pointer */ /* ================= Allocate space for cifs etc ========== */ if(Ncif > 1) { cif = (Cifns *) R_alloc(Ncif, sizeof(Cifns)); cdata = (Cdata **) R_alloc(Ncif, sizeof(Cdata *)); needupd = (int *) R_alloc(Ncif, sizeof(int)); } else { /* Keep the compiler happy */ cif = (Cifns *) R_alloc(1, sizeof(Cifns)); cdata = (Cdata **) R_alloc(1, sizeof(Cdata *)); needupd = (int *) R_alloc(1, sizeof(int)); } /* ================= Determine process to be simulated ========== */ /* Get the cif's */ if(Ncif == 1) { cifstring = (char *) STRING_VALUE(cifname); thecif = getcif(cifstring); mustupdate = NEED_UPDATE(thecif); if(thecif.marked && !marked) fexitc("cif is for a marked point process, but proposal data are not marked points; bailing out."); /* Keep compiler happy*/ cif[0] = thecif; needupd[0] = mustupdate; } else { mustupdate = NO; for(k = 0; k < Ncif; k++) { cifstring = (char *) CHAR(STRING_ELT(cifname, k)); cif[k] = getcif(cifstring); needupd[k] = NEED_UPDATE(cif[k]); if(needupd[k]) mustupdate = YES; if(cif[k].marked && !marked) fexitc("component cif is for a marked point process, but proposal data are not marked points; bailing out."); } } /* ============= Initialise transition history ========== */ tracking = (*(INTEGER_POINTER(track)) != 0); /* Initialise even if not needed, to placate the compiler */ if(tracking) { history.nmax = algo.nrep; } else { history.nmax = 1; } history.n = 0; history.proptype = (int *) R_alloc(history.nmax, sizeof(int)); history.accepted = (int *) R_alloc(history.nmax, sizeof(int)); #ifdef HISTORY_INCLUDES_RATIO history.numerator = (double *) R_alloc(history.nmax, sizeof(double)); history.denominator = (double *) R_alloc(history.nmax, sizeof(double)); #endif /* ============= Visual debugging ========== */ /* Active if 'snoopenv' is an environment */ #if MH_DEBUG Rprintf("Initialising mhsnoop\n"); #endif initmhsnoop(&snooper, snoopenv); #if MH_DEBUG Rprintf("Initialised\n"); if(snooper.active) Rprintf("Debugger is active.\n"); #endif /* ================= Thinning of initial state ==================== */ thinstart = (*(INTEGER_POINTER(thin)) != 0); /* ================= Initialise algorithm ==================== */ /* Interpret the model parameters and initialise auxiliary data */ if(Ncif == 1) { thecdata = (*(thecif.init))(state, model, algo); /* keep compiler happy */ cdata[0] = thecdata; } else { for(k = 0; k < Ncif; k++) { if(k > 0) model.ipar += plength[k-1]; cdata[k] = (*(cif[k].init))(state, model, algo); } /* keep compiler happy */ thecdata = cdata[0]; } /* Set the fixed elements of the proposal objects */ birthprop.itype = BIRTH; deathprop.itype = DEATH; shiftprop.itype = SHIFT; birthprop.ix = NONE; if(!marked) birthprop.mrk = deathprop.mrk = shiftprop.mrk = NONE; /* Set up some constants */ verb = (algo.nverb !=0); qnodds = (1.0 - algo.q)/algo.q; /* Set value of beta for unmarked process */ /* (Overwritten for marked process, but keeps compiler happy) */ betavalue = model.beta[0]; /* ============= Run Metropolis-Hastings ================== */ /* Initialise random number generator */ GetRNGstate(); /* Here comes the code for the M-H loop. The basic code (in mhloop.h) is #included many times using different options The C preprocessor descends through a chain of files mhv1.h, mhv2.h, ... to enumerate all possible combinations of flags. */ #include "mhv1.h" /* relinquish random number generator */ PutRNGstate(); /* ============= Done ================== */ /* Create space for output, and copy final state */ /* Point coordinates */ PROTECT(xout = NEW_NUMERIC(state.npts)); PROTECT(yout = NEW_NUMERIC(state.npts)); xx = NUMERIC_POINTER(xout); yy = NUMERIC_POINTER(yout); for(j = 0; j < state.npts; j++) { xx[j] = state.x[j]; yy[j] = state.y[j]; } /* Marks */ if(marked) { PROTECT(mout = NEW_INTEGER(state.npts)); mm = INTEGER_POINTER(mout); for(j = 0; j < state.npts; j++) mm[j] = state.marks[j]; } else { /* Keep the compiler happy */ PROTECT(mout = NEW_INTEGER(1)); mm = INTEGER_POINTER(mout); mm[0] = 0; } /* Transition history */ if(tracking) { PROTECT(pout = NEW_INTEGER(algo.nrep)); PROTECT(aout = NEW_INTEGER(algo.nrep)); pp = INTEGER_POINTER(pout); aa = INTEGER_POINTER(aout); for(j = 0; j < algo.nrep; j++) { pp[j] = history.proptype[j]; aa[j] = history.accepted[j]; } #ifdef HISTORY_INCLUDES_RATIO PROTECT(numout = NEW_NUMERIC(algo.nrep)); PROTECT(denout = NEW_NUMERIC(algo.nrep)); nn = NUMERIC_POINTER(numout); dd = NUMERIC_POINTER(denout); for(j = 0; j < algo.nrep; j++) { nn[j] = history.numerator[j]; dd[j] = history.denominator[j]; } #endif } else { /* Keep the compiler happy */ PROTECT(pout = NEW_INTEGER(1)); PROTECT(aout = NEW_INTEGER(1)); pp = INTEGER_POINTER(pout); aa = INTEGER_POINTER(aout); pp[0] = aa[0] = 0; #ifdef HISTORY_INCLUDES_RATIO PROTECT(numout = NEW_NUMERIC(1)); PROTECT(denout = NEW_NUMERIC(1)); nn = NUMERIC_POINTER(numout); dd = NUMERIC_POINTER(denout); nn[0] = dd[0] = 0; #endif } /* Pack up into list object for return */ if(!tracking) { /* no transition history */ if(!marked) { PROTECT(out = NEW_LIST(2)); SET_VECTOR_ELT(out, 0, xout); SET_VECTOR_ELT(out, 1, yout); } else { PROTECT(out = NEW_LIST(3)); SET_VECTOR_ELT(out, 0, xout); SET_VECTOR_ELT(out, 1, yout); SET_VECTOR_ELT(out, 2, mout); } } else { /* transition history */ if(!marked) { #ifdef HISTORY_INCLUDES_RATIO PROTECT(out = NEW_LIST(6)); #else PROTECT(out = NEW_LIST(4)); #endif SET_VECTOR_ELT(out, 0, xout); SET_VECTOR_ELT(out, 1, yout); SET_VECTOR_ELT(out, 2, pout); SET_VECTOR_ELT(out, 3, aout); #ifdef HISTORY_INCLUDES_RATIO SET_VECTOR_ELT(out, 4, numout); SET_VECTOR_ELT(out, 5, denout); #endif } else { #ifdef HISTORY_INCLUDES_RATIO PROTECT(out = NEW_LIST(7)); #else PROTECT(out = NEW_LIST(5)); #endif SET_VECTOR_ELT(out, 0, xout); SET_VECTOR_ELT(out, 1, yout); SET_VECTOR_ELT(out, 2, mout); SET_VECTOR_ELT(out, 3, pout); SET_VECTOR_ELT(out, 4, aout); #ifdef HISTORY_INCLUDES_RATIO SET_VECTOR_ELT(out, 5, numout); SET_VECTOR_ELT(out, 6, denout); #endif } } #ifdef HISTORY_INCLUDES_RATIO UNPROTECT(32); /* 24 arguments plus xout, yout, mout, pout, aout, out, numout, denout */ #else UNPROTECT(30); /* 24 arguments plus xout, yout, mout, pout, aout, out */ #endif return(out); } spatstat/src/dwpure.c0000755000176200001440000002320213166361223014363 0ustar liggesusers/* dwpure.c $Revision: 1.5 $ $Date: 2011/09/20 07:54:53 $ Code by Dominic Schuhmacher */ #include #include #include typedef struct State { int n1, n2; /* vectors of length n1 (rows) and n2 (cols) */ int *rowmass, *colmass; /* mass to be moved from row / to col */ int *rowlab, *collab; /* row and col labels (specify previous node (row for collab, col for rowlab)) */ int *rowflow, *colflow; /* second component of labels (specify flow through current node) */ int *rowsurplus, *colsurplus; /* the surplus in each row/col under the current flow */ int *dualu, *dualv; /* vectors of dual variables (u for rows, v for cols) */ int *rowhelper, *colhelper; /* helping vector to store intermediate results */ /* could be local in initcost at the moment */ /* n by n matrices */ int *d; /* matrix of costs */ int *flowmatrix; /* matrix of flows */ int *arcmatrix; /* matrix of arcs for restriced primal problem (1 if arc, 0 if no arc) should be unsigned char to save memory however need to workout problem with R_alloc first (see below) */ /* n*n vector */ int *collectvals; } State; #define COST(I,J,STATE,NVALUE) ((STATE)->d)[(NVALUE) * (J) + (I)] #define FLOW(I,J,STATE,NVALUE) ((STATE)->flowmatrix)[(NVALUE) * (J) + (I)] #define ARC(I,J,STATE,NVALUE) ((STATE)->arcmatrix)[(NVALUE) * (J) + (I)] #define MIN(A,B) ((A)<(B) ? (A) : (B)) int arraysum(int *a, int n); int arraymin(int *a, int n); void initvalues(State *state); void maxflow(State *state); void updateduals(State *state); void augmentflow(int startcol, State *state); /* ------------ The main function ----------------------------- */ void dwpure(int *d, int *rmass, int *cmass, int *numr, int *numc, int *flowmatrix) { int i,j; /* indices */ int n1,n2; unsigned char feasible = 0; /* boolean for main loop */ State state; /* inputs */ state.n1 = n1 = *numr; state.n2 = n2 = *numc; state.d = d; state.rowmass = rmass; state.colmass = cmass; /* scratch space */ state.rowlab = (int *) R_alloc((long) n1, sizeof(int)); state.collab = (int *) R_alloc((long) n2, sizeof(int)); state.rowflow = (int *) R_alloc((long) n1, sizeof(int)); state.colflow = (int *) R_alloc((long) n2, sizeof(int)); state.rowsurplus = (int *) R_alloc((long) n1, sizeof(int)); state.colsurplus = (int *) R_alloc((long) n2, sizeof(int)); state.dualu = (int *) R_alloc((long) n1, sizeof(int)); state.dualv = (int *) R_alloc((long) n2, sizeof(int)); state.rowhelper = (int *) R_alloc((long) n1, sizeof(int)); state.colhelper = (int *) R_alloc((long) n2, sizeof(int)); state.flowmatrix = (int *) R_alloc((long) (n1 * n2), sizeof(int)); state.arcmatrix = (int *) R_alloc((long) (n1 * n2), sizeof(int)); state.collectvals = (int *) R_alloc((long) (n1 * n2), sizeof(int)); for (i = 0; i < n1; ++i) { for (j = 0; j < n2; ++j) { state.flowmatrix[(n1)*(j) + i] = 0; state.arcmatrix[(n1)*(j) + i] = 0; state.collectvals[(n1)*(j) + i] = 0; } } for (i = 0; i < n1; ++i) { state.rowlab[i] = 0; state.rowflow[i] = 0; state.rowsurplus[i] = 0; state.dualu[i] = 0; state.rowhelper[i] = 0; } for (j = 0; j < n2; ++j) { state.collab[j] = 0; state.colflow[j] = 0; state.colsurplus[j] = 0; state.dualv[j] = 0; state.colhelper[j] = 0; } /* Initialize dual variables, arcmatrix, and surpluses */ initvalues(&state); /* For testing: print out cost matrix for (i = 0; i < n1; ++i) { for (j = 0; j < n2; ++j) { Rprintf("%d ", COST(i, j, &state, n1)); } Rprintf("\n"); } */ /* The main loop */ while(feasible == 0) { maxflow(&state); if (arraysum(state.rowsurplus, n1) > 0) { updateduals(&state); /* also updates arcmatrix */ } else { feasible = 1; } } /* "Return" the final flowmatrix */ for (i = 0; i < n1; i++) { for (j = 0; j < n2; j++) { flowmatrix[n1*j+i] = state.flowmatrix[n1*j+i]; } } } /* ------------ Functions called by dwpure_R ------------------------- */ /* Sum of integer array */ int arraysum(int *a, int n) { int i; int asum = 0; for (i = 0; i < n; i++) asum += a[i]; return(asum); } /* Minimal element of an integer array */ int arraymin(int *a, int n) { int i, amin; if (n < 1) return(-1); amin = a[0]; if (n > 1) for (i = 0; i < n; i++) if (a[i] < amin) amin = a[i]; return(amin); } /* Initialize cost matrix: subtract in each row its minimal entry (from all the entries in the row), then subtract in each column its minimal entry (from all the entries in the column) */ void initvalues(State *state) { int i,j,n1,n2; n1 = state->n1; n2 = state->n2; /* Initial surpluses; can I do this shorter? later on surpluses are updated in flow augmentation step */ for (i = 0; i < n1; i++) state->rowsurplus[i] = state->rowmass[i]; for (j = 0; j < n2; j++) state->colsurplus[j] = state->colmass[j]; for (i = 0; i < n1; i++) { for (j = 0; j < n2; j++) state->colhelper[j] = COST(i, j, state, n1); state->dualu[i] = arraymin(state->colhelper, n2); } for (j = 0; j < n2; j++) { for (i = 0; i < n1; i++) state->rowhelper[i] = COST(i, j, state, n1) - state->dualu[i]; state->dualv[j] = arraymin(state->rowhelper, n1); } for (i = 0; i < n1; i++) { for (j = 0; j < n2; j++) { if (COST(i, j, state, n1) == state->dualu[i] + state->dualv[j]) ARC(i, j, state, n1) = 1; else ARC(i, j, state, n1) = 0; } } } /* Maximize the flow on the (zeros of the) current cost matrix */ void maxflow(State *state) { int breakthrough; /* col. no. in which breakthrough occurs */ unsigned char labelfound = 1; /* 0 if no more labels can be found */ int i,j,n1,n2; n1 = state->n1; n2 = state->n2; while (labelfound == 1) { breakthrough = -1; /* initialize labels */ for (i = 0; i < n1; i++) { if (state->rowsurplus[i] > 0) { state->rowlab[i] = -5; state->rowflow[i] = state->rowsurplus[i]; } else { state->rowlab[i] = -1; /* setting rowflow to zero isn't necessary! */ } } for (j = 0; j < n2; j++) state->collab[j] = -1; /* setting colflow to zero isn't necessary! */ /* -1 means "no index", -5 means "source label" (rows only) */ while (labelfound == 1 && breakthrough == -1) { labelfound = 0; /* label unlabeled column j that permits flow from some labeled row i */ /* ("permits flow" means arcmatrix[i][j] = 1). Do so for every j */ for (i = 0; i < n1; i++) { if (state->rowlab[i] != -1) { for (j = 0; j < n2; j++) { if (ARC(i, j, state, n1) == 1 && state->collab[j] == -1) { state->collab[j] = i; state->colflow[j] = state->rowflow[i]; labelfound = 1; if (state->colsurplus[j] > 0 && breakthrough == -1) breakthrough = j; } } } } /* label unlabeled row i that already sends flow to some labeled col j */ /* ("already sends" means flowmatrix[i][j] > 0). Do so for every i */ for (j = 0; j < n2; j++) { if (state->collab[j] != -1) { for (i = 0; i < n1; i++) { if (FLOW(i, j, state, n1) > 0 && state->rowlab[i] == -1) { state->rowlab[i] = j; state->rowflow[i] = MIN(state->colflow[j],FLOW(i, j, state, n1)); labelfound = 1; } } } } } if (breakthrough != -1) augmentflow(breakthrough, state); } } /* Update the dual variables (called if solution of restricted primal is not feasible for the original problem): determine the minimum over the submatrix given by all labeled rows and unlabeled columns, and subtract it from all labeled rows and add it to all labeled columns. */ void updateduals(State *state) { int i,j,n1,n2,mini; int count = 0; n1 = state->n1; n2 = state->n2; for (i = 0; i < n1; i++) { for (j = 0; j < n2; j++) { if (state->rowlab[i] != -1 && state->collab[j] == -1) { state->collectvals[count] = COST(i, j, state, n1) - state->dualu[i] - state->dualv[j]; count++; } } } mini = arraymin(state->collectvals, count); for (i = 0; i < n1; i++) { if (state->rowlab[i] != -1) state->dualu[i] += mini; } for (j = 0; j < n2; j++){ if (state->collab[j] != -1) state->dualv[j] -= mini; } for (i = 0; i < n1; i++) { for (j = 0; j < n2; j++) { if (COST(i, j, state, n1) == state->dualu[i] + state->dualv[j]) ARC(i, j, state, n1) = 1; else ARC(i, j, state, n1) = 0; } } } /* Augment the flow on the graph given by arcmatrix (by aug) according to the row and column labels starting in column startcol */ /* Adjust the surpluses while we're at it (first row and last col have -aug) */ void augmentflow(int startcol, State *state) { int k,l,aug,n1; /* int i,j,k,l,aug,n1,n2; */ n1 = state->n1; l = startcol; aug = MIN(state->colflow[l], state->colsurplus[l]); state->colsurplus[l] -= aug; k = state->collab[l]; FLOW(k, l, state, n1) += aug; l = state->rowlab[k]; while (l != -5) { FLOW(k, l, state, n1) -= aug; k = state->collab[l]; FLOW(k, l, state, n1) += aug; l = state->rowlab[k]; } state->rowsurplus[k] -= aug; } spatstat/src/distmapbin.c0000755000176200001440000000644513166361223015221 0ustar liggesusers/* distmapbin.c Distance transform of a discrete binary image (8-connected path metric) $Revision: 1.6 $ $Date: 2011/11/20 03:34:16 $ */ #include #include "raster.h" #include void dist_to_bdry(); void shape_raster(); void distmap_bin(in, dist) Raster *in; /* input: binary image */ Raster *dist; /* output: distance to nearest point */ /* rasters must have been dimensioned by shape_raster() and must all have identical dimensions and margins */ { int j,k; double d, dnew; double xstep, ystep, diagstep, huge; int rmin, rmax, cmin, cmax; /* distances between neighbouring pixels */ xstep = in->xstep; ystep = in->ystep; diagstep = sqrt(xstep * xstep + ystep * ystep); if(xstep < 0) xstep = -xstep; if(ystep < 0) ystep = -ystep; /* effectively infinite distance */ huge = 2.0 * Distance(dist->xmin,dist->ymin,dist->xmax,dist->ymax); /* image boundaries */ rmin = in->rmin; rmax = in->rmax; cmin = in->cmin; cmax = in->cmax; #define DISTANCE(ROW, COL) Entry(*dist, ROW, COL, double) #define MASKTRUE(ROW, COL) (Entry(*in, ROW, COL, int) != 0) #define MASKFALSE(ROW, COL) (Entry(*in, ROW, COL, int) == 0) #define UPDATE(D, ROW, COL, STEP) \ dnew = STEP + DISTANCE(ROW, COL); \ if(D > dnew) D = dnew /* initialise edges to boundary condition */ for(j = rmin-1; j <= rmax+1; j++) { DISTANCE(j, cmin-1) = (MASKTRUE(j, cmin-1)) ? 0.0 : huge; DISTANCE(j, cmax+1) = (MASKTRUE(j, cmax+1)) ? 0.0 : huge; } for(k = cmin-1; k <= cmax+1; k++) { DISTANCE(rmin-1, k) = (MASKTRUE(rmin-1, k)) ? 0.0 : huge; DISTANCE(rmax+1, k) = (MASKTRUE(rmax+1, k)) ? 0.0 : huge; } /* forward pass */ for(j = rmin; j <= rmax; j++) { R_CheckUserInterrupt(); for(k = cmin; k <= cmax; k++) { if(MASKTRUE(j, k)) d = DISTANCE(j, k) = 0.0; else { d = huge; UPDATE(d, j-1, k-1, diagstep); UPDATE(d, j-1, k, ystep); UPDATE(d, j-1, k+1, diagstep); UPDATE(d, j, k-1, xstep); DISTANCE(j,k) = d; } } } /* backward pass */ for(j = rmax; j >= rmin; j--) { R_CheckUserInterrupt(); for(k = cmax; k >= cmin; k--) { if(MASKFALSE(j,k)) { d = DISTANCE(j,k); UPDATE(d, j+1, k+1, diagstep); UPDATE(d, j+1, k, ystep); UPDATE(d, j+1, k-1, diagstep); UPDATE(d, j, k+1, xstep); DISTANCE(j,k) = d; } } } } /* R interface */ void distmapbin(xmin, ymin, xmax, ymax, nr, nc, inp, distances, boundary) double *xmin, *ymin, *xmax, *ymax; /* x, y dimensions */ int *nr, *nc; /* raster dimensions EXCLUDING margin of 1 on each side */ int *inp; /* input: binary image */ double *distances; /* output: distance to nearest point */ double *boundary; /* output: distance to boundary of rectangle */ /* all images must have identical dimensions including a margin of 1 on each side */ { Raster data, dist, bdist; shape_raster( &data, (void *) inp, *xmin,*ymin,*xmax,*ymax, *nr+2, *nc+2, 1, 1); shape_raster( &dist, (void *) distances,*xmin,*ymin,*xmax,*ymax, *nr+2,*nc+2,1,1); shape_raster( &bdist, (void *) boundary, *xmin,*ymin,*xmax,*ymax, *nr+2,*nc+2,1,1); distmap_bin(&data, &dist); dist_to_bdry(&bdist); } spatstat/src/dist2.h0000755000176200001440000000434213166361223014113 0ustar liggesusers/* dist2.h External declarations for the functions defined in dist2.c and In-line cpp macros for similar purposes $Revision: 1.19 $ $Date: 2014/05/08 02:13:22 $ */ double dist2(double u, double v, double x, double y, double *period); double dist2either(double u, double v, double x, double y, double *period); int dist2thresh(double u, double v, double x, double y, double *period, double r2); int dist2Mthresh(double u, double v, double x, double y, double *period, double r2); /* Efficient macros to test closeness of points */ /* These variables must be declared (note: some files e.g. straush.c use 'RESIDUE' explicitly) */ #define DECLARE_CLOSE_VARS \ register double DX, DY, DXP, DYP, RESIDUE #define DECLARE_CLOSE_D2_VARS \ register double DX, DY, DXP, DYP, DX2 #define CLOSE(U,V,X,Y,R2) \ ((DX = X - U), \ (RESIDUE = R2 - DX * DX), \ ((RESIDUE > 0.0) && \ ((DY = Y - V), \ (RESIDUE = RESIDUE - DY * DY), \ (RESIDUE > 0.0)))) #define CLOSE_D2(U,V,X,Y,R2,D2) \ ((DX = X - U), \ (DX2 = DX * DX), \ (DX2 < R2) && (((DY = Y - V), \ (D2 = DX2 + DY * DY), \ (D2 < R2)))) /* The following calculates X mod P, but it works only if X \in [-P, P] so that X is the difference between two values that lie in an interval of length P */ #define CLOSE_PERIODIC(U,V,X,Y,PERIOD,R2) \ ((DX = X - U), \ (DX = (DX < 0.0) ? -DX : DX), \ (DXP = (PERIOD)[0] - DX), \ (DX = (DX < DXP) ? DX : DXP), \ (RESIDUE = R2 - DX * DX), \ ((RESIDUE > 0.0) && ((DY = Y - V), \ (DY = (DY < 0.0) ? -DY : DY), \ (DYP = (PERIOD)[1] - DY), \ (DY = (DY < DYP) ? DY : DYP), \ (RESIDUE = RESIDUE - DY * DY), \ (RESIDUE > 0.0) ))) #define CLOSE_PERIODIC_D2(U,V,X,Y,PERIOD,R2,D2) \ ((DX = X - U), \ (DX = (DX < 0.0) ? -DX : DX), \ (DXP = (PERIOD)[0] - DX), \ (DX = (DX < DXP) ? DX : DXP), \ (D2 = DX * DX), \ ((D2 < R2) && ((DY = Y - V), \ (DY = (DY < 0.0) ? -DY : DY), \ (DYP = (PERIOD)[1] - DY), \ (DY = (DY < DYP) ? DY : DYP), \ (D2 += DY * DY), \ (D2 < R2) ))) spatstat/src/dgs.c0000755000176200001440000000505613166361223013641 0ustar liggesusers#include #include #include #include "methas.h" #include "dist2.h" #include "constants.h" /* Conditional intensity computation for Diggle-Gates-Stibbard process */ /* Conditional intensity function for a pairwise interaction point process with interaction function as given by e(t) = sin^2(pi*t/2*rho) for t < rho = 1 for t >= rho (See page 767 of Diggle, Gates, and Stibbard, Biometrika vol. 74, 1987, pages 763 -- 770.) */ #define PION2 M_PI_2 /* pi/2 defined in Rmath.h */ /* Storage of parameters and precomputed/auxiliary data */ typedef struct Dgs { double rho; double rho2; double pion2rho; double *period; int per; } Dgs; /* initialiser function */ Cdata *dgsinit(state, model, algo) State state; Model model; Algor algo; { Dgs *dgs; /* allocate storage */ dgs = (Dgs *) R_alloc(1, sizeof(Dgs)); /* Interpret model parameters*/ dgs->rho = model.ipar[0]; dgs->period = model.period; /* constants */ dgs->rho2 = pow(dgs->rho, 2); dgs->pion2rho = PION2/dgs->rho; /* periodic boundary conditions? */ dgs->per = (model.period[0] > 0.0); return((Cdata *) dgs); } /* conditional intensity evaluator */ double dgscif(prop, state, cdata) Propo prop; State state; Cdata *cdata; { int npts, ix, ixp1, j; double *x, *y; double u, v; double d2, r2, pairprod, cifval; Dgs *dgs; DECLARE_CLOSE_D2_VARS; dgs = (Dgs *) cdata; r2 = dgs->rho2; u = prop.u; v = prop.v; ix = prop.ix; x = state.x; y = state.y; npts = state.npts; cifval = pairprod = 1.0; if(npts == 0) return(cifval); ixp1 = ix+1; /* If ix = NONE = -1, then ixp1 = 0 is correct */ if(dgs->per) { /* periodic distance */ if(ix > 0) { for(j=0; j < ix; j++) { if(CLOSE_PERIODIC_D2(u,v,x[j],y[j],dgs->period,r2,d2)) pairprod *= sin(dgs->pion2rho * sqrt(d2)); } } if(ixp1 < npts) { for(j=ixp1; jperiod,r2,d2)) pairprod *= sin(dgs->pion2rho * sqrt(d2)); } } } else { /* Euclidean distance */ if(ix > 0) { for(j=0; j < ix; j++) { if(CLOSE_D2(u, v, x[j], y[j], r2, d2)) pairprod *= sin(dgs->pion2rho * sqrt(d2)); } } if(ixp1 < npts) { for(j=ixp1; jpion2rho * sqrt(d2)); } } } /* sin to sin^2 */ cifval = pairprod * pairprod; return cifval; } Cifns DgsCifns = { &dgsinit, &dgscif, (updafunptr) NULL, NO}; spatstat/src/distan3.c0000755000176200001440000002436313166361223014433 0ustar liggesusers/* distan3.c Distances between pairs of 3D points $Revision: 1.3 $ $Date: 2013/11/03 03:34:15 $ D3pairdist Pairwise distances D3pair2dist Pairwise distances squared D3pairPdist Pairwise distances with periodic correction D3pairP2dist Pairwise distances squared, with periodic correction D3crossdist Pairwise distances for two sets of points D3cross2dist Pairwise distances squared, for two sets of points D3crossPdist Pairwise distances for two sets of points, periodic correction matchxyz Find matches between two sets of points */ #include /* #include */ double sqrt(); void D3pairdist(n, x, y, z, squared, d) /* inputs */ int *n; double *x, *y, *z; int *squared; /* output */ double *d; { void D3pair1dist(), D3pair2dist(); if(*squared == 0) { D3pair1dist(n, x, y, z, d); } else { D3pair2dist(n, x, y, z, d); } } void D3pair1dist(n, x, y, z, d) /* inputs */ int *n; double *x, *y, *z; /* output */ double *d; { int i, j, npoints; double *dp; double xi, yi, zi, dx, dy, dz, dist; npoints = *n; /* set d[0,0] = 0 */ *d = 0.0; for (i=1; i < npoints; i++) { xi = x[i]; yi = y[i]; zi = z[i]; /* point at the start of column i */ dp = d + i * npoints; /* set diagonal to zero */ dp[i] = 0.0; for (j=0; j < i; j++) { dx = x[j] - xi; dy = y[j] - yi; dz = z[j] - zi; dist = sqrt( dx * dx + dy * dy + dz * dz ); /* upper triangle */ *dp = dist; ++dp; /* lower triangle */ d[ j * npoints + i] = dist; } } } /* squared distances */ void D3pair2dist(n, x, y, z, d) /* inputs */ int *n; double *x, *y, *z; /* output */ double *d; { int i, j, npoints; double *dp; double xi, yi, zi, dx, dy, dz, dist; npoints = *n; /* set d[0,0] = 0 */ *d = 0.0; for (i=1; i < npoints; i++) { xi = x[i]; yi = y[i]; zi = z[i]; /* point at the start of column i */ dp = d + i * npoints; /* set diagonal to zero */ dp[i] = 0.0; for (j=0; j < i; j++) { dx = x[j] - xi; dy = y[j] - yi; dz = z[j] - zi; dist = dx * dx + dy * dy + dz * dz; /* upper triangle */ *dp = dist; ++dp; /* lower triangle */ d[ j * npoints + i] = dist; } } } void D3crossdist(nfrom, xfrom, yfrom, zfrom, nto, xto, yto, zto, squared, d) /* inputs */ int *nto, *nfrom; double *xfrom, *yfrom, *zfrom, *xto, *yto, *zto; int *squared; /* output */ double *d; { void D3cross1dist(), D3cross2dist(); if(*squared == 0) { D3cross1dist(nfrom, xfrom, yfrom, zfrom, nto, xto, yto, zto, d); } else { D3cross2dist(nfrom, xfrom, yfrom, zfrom, nto, xto, yto, zto, d); } } void D3cross1dist(nfrom, xfrom, yfrom, zfrom, nto, xto, yto, zto, d) /* inputs */ int *nto, *nfrom; double *xfrom, *yfrom, *zfrom, *xto, *yto, *zto; /* output */ double *d; { int i, j, nf, nt; double *dptr; double xj, yj, zj, dx, dy, dz; nf = *nfrom; nt = *nto; dptr = d; for (j=0; j < nt; j++) { xj = xto[j]; yj = yto[j]; zj = zto[j]; for(i = 0; i < nf; i++, dptr++) { dx = xj - xfrom[i]; dy = yj - yfrom[i]; dz = zj - zfrom[i]; *dptr = sqrt( dx * dx + dy * dy + dz * dz ); } } } /* squared distances */ void D3cross2dist(nfrom, xfrom, yfrom, zfrom, nto, xto, yto, zto, d) /* inputs */ int *nto, *nfrom; double *xfrom, *yfrom, *zfrom, *xto, *yto, *zto; /* output */ double *d; { int i, j, nf, nt; double *dptr; double xj, yj, zj, dx, dy, dz; nf = *nfrom; nt = *nto; dptr = d; for (j=0; j < nt; j++) { xj = xto[j]; yj = yto[j]; zj = zto[j]; for(i = 0; i < nf; i++, dptr++) { dx = xj - xfrom[i]; dy = yj - yfrom[i]; dz = zj - zfrom[i]; *dptr = dx * dx + dy * dy + dz * dz; } } } /* distances with periodic correction */ void D3pairPdist(n, x, y, z, xwidth, yheight, zdepth, squared, d) /* inputs */ int *n; double *x, *y, *z, *xwidth, *yheight, *zdepth; int *squared; /* output */ double *d; { void D3pairP1dist(), D3pairP2dist(); if(*squared == 0) { D3pairP1dist(n, x, y, z, xwidth, yheight, zdepth, d); } else { D3pairP2dist(n, x, y, z, xwidth, yheight, zdepth, d); } } void D3pairP1dist(n, x, y, z, xwidth, yheight, zdepth, d) /* inputs */ int *n; double *x, *y, *z, *xwidth, *yheight, *zdepth; /* output */ double *d; { int i, j, npoints; double *dp; double xi, yi, zi, dx, dy, dz, dx2, dy2, dz2, dx2p, dy2p, dz2p, dist, wide, high, deep; npoints = *n; wide = *xwidth; high = *yheight; deep = *zdepth; /* set d[0,0] = 0 */ *d = 0.0; for (i=1; i < npoints; i++) { xi = x[i]; yi = y[i]; zi = z[i]; /* point at the start of column i */ dp = d + i * npoints; /* set diagonal to zero */ dp[i] = 0.0; for (j=0; j < i; j++) { dx = x[j] - xi; dy = y[j] - yi; dz = z[j] - zi; dx2p = dx * dx; dy2p = dy * dy; dz2p = dz * dz; dx2 = (dx - wide) * (dx - wide); dy2 = (dy - high) * (dy - high); dz2 = (dz - deep) * (dz - deep); if(dx2 < dx2p) dx2p = dx2; if(dy2 < dy2p) dy2p = dy2; if(dz2 < dz2p) dz2p = dz2; dx2 = (dx + wide) * (dx + wide); dy2 = (dy + high) * (dy + high); dz2 = (dz + deep) * (dz + deep); if(dx2 < dx2p) dx2p = dx2; if(dy2 < dy2p) dy2p = dy2; if(dz2 < dz2p) dz2p = dz2; dist = sqrt( dx2p + dy2p + dz2p ); /* upper triangle */ *dp = dist; ++dp; /* lower triangle */ d[ j * npoints + i] = dist; } } } /* same function without the sqrt */ void D3pairP2dist(n, x, y, z, xwidth, yheight, zdepth, d) /* inputs */ int *n; double *x, *y, *z, *xwidth, *yheight, *zdepth; /* output */ double *d; { int i, j, npoints; double *dp; double xi, yi, zi, dx, dy, dz, dx2, dy2, dz2, dx2p, dy2p, dz2p, dist, wide, high, deep; npoints = *n; wide = *xwidth; high = *yheight; deep = *zdepth; /* set d[0,0] = 0 */ *d = 0.0; for (i=1; i < npoints; i++) { xi = x[i]; yi = y[i]; zi = z[i]; /* point at the start of column i */ dp = d + i * npoints; /* set diagonal to zero */ dp[i] = 0.0; for (j=0; j < i; j++) { dx = x[j] - xi; dy = y[j] - yi; dz = z[j] - zi; dx2p = dx * dx; dy2p = dy * dy; dz2p = dz * dz; dx2 = (dx - wide) * (dx - wide); dy2 = (dy - high) * (dy - high); dz2 = (dz - deep) * (dz - deep); if(dx2 < dx2p) dx2p = dx2; if(dy2 < dy2p) dy2p = dy2; if(dz2 < dz2p) dz2p = dz2; dx2 = (dx + wide) * (dx + wide); dy2 = (dy + high) * (dy + high); dz2 = (dz + deep) * (dz + deep); if(dx2 < dx2p) dx2p = dx2; if(dy2 < dy2p) dy2p = dy2; if(dz2 < dz2p) dz2p = dz2; dist = dx2p + dy2p + dz2p; /* upper triangle */ *dp = dist; ++dp; /* lower triangle */ d[ j * npoints + i] = dist; } } } void D3crossPdist(nfrom, xfrom, yfrom, zfrom, nto, xto, yto, zto, xwidth, yheight, zdepth, squared, d) /* inputs */ int *nto, *nfrom; double *xfrom, *yfrom, *zfrom, *xto, *yto, *zto, *xwidth, *yheight, *zdepth; int *squared; /* output */ double *d; { void D3crossP1dist(), D3crossP2dist(); if(*squared == 0) { D3crossP1dist(nfrom, xfrom, yfrom, zfrom, nto, xto, yto, zto, xwidth, yheight, zdepth, d); } else { D3crossP2dist(nfrom, xfrom, yfrom, zfrom, nto, xto, yto, zto, xwidth, yheight, zdepth, d); } } void D3crossP1dist(nfrom, xfrom, yfrom, zfrom, nto, xto, yto, zto, xwidth, yheight, zdepth, d) /* inputs */ int *nto, *nfrom; double *xfrom, *yfrom, *zfrom, *xto, *yto, *zto, *xwidth, *yheight, *zdepth; /* output */ double *d; { int i, j, nf, nt; double *dptr; double xj, yj, zj, dx, dy, dz, dx2, dy2, dz2, dx2p, dy2p, dz2p, wide, high, deep; nf = *nfrom; nt = *nto; wide = *xwidth; high = *yheight; deep = *zdepth; dptr = d; for (j=0; j < nt; j++) { xj = xto[j]; yj = yto[j]; zj = zto[j]; for(i = 0; i < nf; i++, dptr++) { dx = xj - xfrom[i]; dy = yj - yfrom[i]; dz = zj - zfrom[i]; dx2p = dx * dx; dy2p = dy * dy; dz2p = dz * dz; dx2 = (dx - wide) * (dx - wide); dy2 = (dy - high) * (dy - high); dz2 = (dz - deep) * (dz - deep); if(dx2 < dx2p) dx2p = dx2; if(dy2 < dy2p) dy2p = dy2; if(dz2 < dz2p) dz2p = dz2; dx2 = (dx + wide) * (dx + wide); dy2 = (dy + high) * (dy + high); dz2 = (dy + deep) * (dz + deep); if(dx2 < dx2p) dx2p = dx2; if(dy2 < dy2p) dy2p = dy2; if(dz2 < dz2p) dz2p = dz2; *dptr = sqrt( dx2p + dy2p + dz2p ); } } } void D3crossP2dist(nfrom, xfrom, yfrom, zfrom, nto, xto, yto, zto, xwidth, yheight, zdepth, d) /* inputs */ int *nto, *nfrom; double *xfrom, *yfrom, *zfrom, *xto, *yto, *zto, *xwidth, *yheight, *zdepth; /* output */ double *d; { int i, j, nf, nt; double *dptr; double xj, yj, zj, dx, dy, dz, dx2, dy2, dz2, dx2p, dy2p, dz2p, wide, high, deep; nf = *nfrom; nt = *nto; wide = *xwidth; high = *yheight; deep = *zdepth; dptr = d; for (j=0; j < nt; j++) { xj = xto[j]; yj = yto[j]; zj = zto[j]; for(i = 0; i < nf; i++, dptr++) { dx = xj - xfrom[i]; dy = yj - yfrom[i]; dz = zj - zfrom[i]; dx2p = dx * dx; dy2p = dy * dy; dz2p = dz * dz; dx2 = (dx - wide) * (dx - wide); dy2 = (dy - high) * (dy - high); dz2 = (dz - deep) * (dz - deep); if(dx2 < dx2p) dx2p = dx2; if(dy2 < dy2p) dy2p = dy2; if(dz2 < dz2p) dz2p = dz2; dx2 = (dx + wide) * (dx + wide); dy2 = (dy + high) * (dy + high); dz2 = (dy + deep) * (dz + deep); if(dx2 < dx2p) dx2p = dx2; if(dy2 < dy2p) dy2p = dy2; if(dz2 < dz2p) dz2p = dz2; *dptr = dx2p + dy2p + dz2p; } } } /* matchxyz Find matches between two lists of points */ void matchxyz(na, xa, ya, za, nb, xb, yb, zb, match) /* inputs */ int *na, *nb; double *xa, *ya, *za, *xb, *yb, *zb; /* output */ int *match; { int i, j, Na, Nb; double xai, yai, zai; Na = *na; Nb = *nb; for (i=1; i < Na; i++) { xai = xa[i]; yai = ya[i]; zai = za[i]; match[i] = 0; for (j=0; j < Nb; j++) if(xai == xb[j] && yai == yb[j] && zai == zb[i]) { match[i] = j; break; } } } spatstat/src/Knone.c0000644000176200001440000000154713166361223014134 0ustar liggesusers#include #include #include /* Knone.c Efficient computation of uncorrected estimates of K for large datasets KnoneI() Estimates K function, returns integer numerator KnoneD() Estimates K function, returns double precision numerator Kwnone() Estimates Kinhom, returns double precision numerator Functions require (x,y) data to be sorted in ascending order of x and expect r values to be equally spaced and starting at zero $Revision: 1.2 $ $Date: 2013/05/27 02:09:10 $ */ #undef WEIGHTED #define FNAME KnoneI #define OUTTYPE int #include "Knone.h" #undef FNAME #undef OUTTYPE #define FNAME KnoneD #define OUTTYPE double #include "Knone.h" #undef FNAME #undef OUTTYPE #define FNAME Kwnone #define WEIGHTED #define OUTTYPE double #include "Knone.h" spatstat/src/PerfectDGS.h0000644000176200001440000001231513166361223015010 0ustar liggesusers // ........................... Diggle-Gates-Stibbard process ................ // $Revision: 1.3 $ $Date: 2012/03/10 11:22:50 $ #ifndef PI #define PI 3.14159265358979 #endif class DgsProcess : public PointProcess { public: double beta, rho, rhosquared; DgsProcess(double xmin, double xmax, double ymin, double ymax, double b, double r); ~DgsProcess(){} void NewEvent(double *x, double *y, char *InWindow); void GeneratePoisson(Point *headPoint, long int *GeneratedPoints, long int *LivingPoints, long int *NoP); double Interaction(double dsquared); }; DgsProcess::DgsProcess(double xmin, double xmax, double ymin, double ymax, double b, double r) : PointProcess(xmin, xmax, ymin, ymax){ beta = b; rho = r; rhosquared = rho * rho; InteractionRange = rho; TotalBirthRate = beta*(xmax-xmin)*(ymax-ymin); } double DgsProcess::Interaction(double dsquared) { double rtn, dist, t; rtn = 1; if(dsquared < rhosquared) { dist = sqrt(dsquared); t = sin((PI/2) * dist/rho); rtn = t * t; } return(rtn); } void DgsProcess::NewEvent(double *x, double *y, char *InWindow) { double Xdim, Ydim; Xdim = Xmax-Xmin; Ydim = Ymax-Ymin; *x = slumptal()*Xdim+Xmin; *y = slumptal()*Ydim+Ymin; *InWindow = 1; } void DgsProcess::GeneratePoisson(Point *headPoint, long int *GeneratedPoints, long int *LivingPoints, long int *NoP) { int i; double xtemp, ytemp, L, Xdim, Ydim; struct Point *TempPoint; Xdim = Xmax-Xmin; Ydim = Ymax-Ymin; L = beta*Xdim*Ydim; *GeneratedPoints = poisson(L); *LivingPoints = *GeneratedPoints; for (i=1; i<=*GeneratedPoints ; i++){ //Rprintf("Generating DgsProcess Poisson 3\n"); //scanf("%f",&f1); xtemp = slumptal()*Xdim+Xmin; ytemp = slumptal()*Ydim+Ymin; // //Rprintf("Generating DgsProcess Poisson 3.2\n"); TempPoint = ALLOCATE(struct Point); // TempPoint->X = xtemp; TempPoint->Y = ytemp; TempPoint->No = i; TempPoint->R = slumptal(); //Rprintf("Generating DgsProcess Poisson 3.6\n"); TempPoint->next = headPoint->next; headPoint->next = TempPoint; *NoP = *NoP + 1; } } // ........................... Interface to R .......................... extern "C" { SEXP PerfectDGS(SEXP beta, SEXP rho, SEXP xrange, SEXP yrange) { // input parameters double Beta, Rho, Xmin, Xmax, Ymin, Ymax; double *Xrange, *Yrange; // internal int xcells, ycells; long int StartTime, EndTime; // output int noutmax; SEXP xout, yout, nout, out; double *xx, *yy; int *nn; // protect arguments from garbage collector PROTECT(beta = AS_NUMERIC(beta)); PROTECT(rho = AS_NUMERIC(rho)); PROTECT(xrange = AS_NUMERIC(xrange)); PROTECT(yrange = AS_NUMERIC(yrange)); // that's 4 protected objects // extract arguments Beta = *(NUMERIC_POINTER(beta)); Rho = *(NUMERIC_POINTER(rho)); Xrange = NUMERIC_POINTER(xrange); Xmin = Xrange[0]; Xmax = Xrange[1]; Yrange = NUMERIC_POINTER(yrange); Ymin = Yrange[0]; Ymax = Yrange[1]; // compute cell array size xcells = (int) floor((Xmax-Xmin)/ Rho); if(xcells > 9) xcells = 9; if(xcells < 1) xcells = 1; ycells = (int) floor((Ymax-Ymin)/ Rho); Xrange = NUMERIC_POINTER(xrange); Xmin = Xrange[0]; Xmax = Xrange[1]; Yrange = NUMERIC_POINTER(yrange); Ymin = Yrange[0]; Ymax = Yrange[1]; // compute cell array size xcells = (int) floor((Xmax-Xmin)/ Rho); if(xcells > 9) xcells = 9; if(xcells < 1) xcells = 1; ycells = (int) floor((Ymax-Ymin)/ Rho); if(ycells > 9) ycells = 9; if(ycells < 1) ycells = 1; #ifdef DBGS Rprintf("xcells %d ycells %d\n",xcells,ycells); Rprintf("Initialising\n"); #endif // Initialise Diggle-Gates-Stibbard point process DgsProcess ExampleProcess(Xmin,Xmax,Ymin,Ymax,Beta,Rho); // Initialise point pattern Point2Pattern ExamplePattern(Xmin,Xmax,Ymin,Ymax, xcells, ycells); // parameters: min x, max x, min y, max y, "cells" in x and y direction // used for speeding up neighbour counting, 9 is max here #ifdef DBGS Rprintf("Initialisation complete\n"); #endif // Synchronise random number generator GetRNGstate(); // Initialise perfect sampler Sampler PerfectSampler(&ExampleProcess); // Perform perfect sampling PerfectSampler.Sim(&ExamplePattern, &StartTime, &EndTime); // Synchronise random number generator PutRNGstate(); // Get upper estimate of number of points noutmax = ExamplePattern.UpperCount() + 1; // Allocate space for output PROTECT(xout = NEW_NUMERIC(noutmax)); PROTECT(yout = NEW_NUMERIC(noutmax)); PROTECT(nout = NEW_INTEGER(1)); xx = NUMERIC_POINTER(xout); yy = NUMERIC_POINTER(yout); nn = INTEGER_POINTER(nout); // copy data into output storage ExamplePattern.Return(xx, yy, nn, noutmax); // pack up into output list PROTECT(out = NEW_LIST(3)); SET_VECTOR_ELT(out, 0, xout); SET_VECTOR_ELT(out, 1, yout); SET_VECTOR_ELT(out, 2, nout); // return UNPROTECT(8); // 4 arguments plus xout, yout, nout, out return(out); } } spatstat/src/sphefrac.c0000755000176200001440000000622613166361223014657 0ustar liggesusers#include #include #include "geom3.h" /* $Revision: 1.1 $ $Date: 2009/11/04 23:54:15 $ Routine for calculating surface area of sphere intersected with box # ///////////////////////////////////////////// # AUTHOR: Adrian Baddeley, CWI, Amsterdam, 1991. # # MODIFIED BY: Adrian Baddeley, Perth 2009, 2013 # # This software is distributed free # under the conditions that # (1) it shall not be incorporated # in software that is subsequently sold # (2) the authorship of the software shall # be acknowledged in any publication that # uses results generated by the software # (3) this notice shall remain in place # in each file. # ////////////////////////////////////////////// */ #ifdef DEBUG #define DBG(X,Y) Rprintf("%s: %f\n", (X), (Y)); #else #define DBG(X,Y) #endif static double pi = 3.141592653589793; /* Factor of 4 * pi * r * r IS ALREADY TAKEN OUT */ double sphesfrac(point, box, r) Point *point; Box *box; double r; { double sum, p[4], q[4]; double a1(), a2(), a3(); int i, j; p[1] = point->x - box->x0; p[2] = point->y - box->y0; p[3] = point->z - box->z0; q[1] = box->x1 - point->x; q[2] = box->y1 - point->y; q[3] = box->z1 - point->z; sum = 0; for(i = 1; i <= 3; i++) { sum += a1(p[i],r) + a1(q[i],r); #ifdef DEBUG Rprintf("i = %d, a1 = %f, a1 = %f\n", i, a1(p[i],r), a1(q[i],r)); #endif } DBG("Past a1", sum) for(i = 1; i < 3; i++) for(j = i+1; j <= 3; j++) { sum -= a2(p[i], p[j], r) + a2(p[i], q[j], r) + a2(q[i], p[j], r) + a2(q[i], q[j], r); #ifdef DEBUG Rprintf("i = %d, j = %d, sum = %f\n", i, j, sum); #endif } DBG("Past a2", sum) sum += a3(p[1], p[2], p[3], r) + a3(p[1], p[2], q[3], r); DBG("sum", sum) sum += a3(p[1], q[2], p[3], r) + a3(p[1], q[2], q[3], r); DBG("sum", sum) sum += a3(q[1], p[2], p[3], r) + a3(q[1], p[2], q[3], r); DBG("sum", sum) sum += a3(q[1], q[2], p[3], r) + a3(q[1], q[2], q[3], r); DBG("Past a3", sum) return(1 - sum); } double a1(t, r) double t, r; { /* This is the function A1 divided by 4 pi r^2 */ if(t >= r) return(0.0); return((1 - t/r) * 0.5); } double a2(t1, t2, r) double t1, t2, r; { double c2(); /* This is A2 divided by 4 pi r^2 because c2 is C divided by pi */ return(c2( t1 / r, t2 / r) / 2.0); } double a3(t1, t2, t3, r) double t1, t2, t3, r; { double c3(); /* This is A3 divided by 4 pi r^2 because c3 is C divided by pi */ return(c3(t1 / r, t2 / r, t3 / r) / 4.0); } double c2(a, b) double a, b; { double z, z2; double c2(); /* This is the function C(a, b, 0) divided by pi - assumes a, b > 0 */ if( ( z2 = 1.0 - a * a - b * b) < 0.0 ) return(0.0); z = sqrt(z2); return((atan2(z, a * b) - a * atan2(z, b) - b * atan2(z, a)) / pi); } double c3(a, b, c) double a, b, c; { double za, zb, zc, sum; /* This is C(a,b,c) divided by pi. Arguments assumed > 0 */ if(a * a + b * b + c * c >= 1.0) return(0.0); za = sqrt(1 - b * b - c * c); zb = sqrt(1 - a * a - c * c); zc = sqrt(1 - a * a - b * b); sum = atan2(zb, a * c) + atan2(za, b * c) + atan2(zc, a * b) - a * atan2(zb, c) + a * atan2(b, zc) - b * atan2(za, c) + b * atan2(a, zc) - c * atan2(zb, a) + c * atan2(b, za); return(sum / pi - 1); } spatstat/src/bdrymask.c0000644000176200001440000000163213166361223014671 0ustar liggesusers/* bdrymask.c Boundary pixels of binary mask Copyright (C) Adrian Baddeley, Rolf Turner and Ege Rubak 2014 Licence: GPL >= 2 $Revision: 1.3 $ $Date: 2016/02/02 01:29:50 $ */ #include #include #include void bdrymask(nx, ny, m, b) /* inputs */ int *nx, *ny, *m; /* outputs */ int *b; { int Nxcol, Nyrow, Nx1, Ny1; int i, j, mij; Nxcol = *nx; Nyrow = *ny; Nx1 = Nxcol - 1; Ny1 = Nyrow - 1; #define MAT(A,I,J) A[(I) + (J) * Nyrow] /* loop over pixels */ for(j = 0; j < Nxcol; j++) { R_CheckUserInterrupt(); for(i = 0; i < Nyrow; i++) { mij = MAT(m, i, j); if(i == 0 || i == Ny1 || j == 0 || j == Nx1) { MAT(b, i, j) = mij; } else if((mij != MAT(m, (i-1), j)) || (mij != MAT(m, (i+1), j)) || (mij != MAT(m, i, (j-1))) || (mij != MAT(m, i, (j+1)))) { MAT(b, i, j) = 1; } } } } spatstat/src/connect.c0000755000176200001440000000627113166361223014515 0ustar liggesusers/* connect.c Connected component transforms cocoImage: connected component transform of a discrete binary image (8-connected topology) cocoGraph: connected component labels for a discrete graph specified by a list of edges $Revision: 1.8 $ $Date: 2013/05/27 02:09:10 $ */ #include #include #include #include #include "raster.h" void shape_raster(); #include "yesno.h" /* workhorse function for cocoImage */ void comcommer(im) Raster *im; /* raster must have been dimensioned by shape_raster() */ /* Pixel values assumed to be 0 in background, and distinct nonzero integers in foreground */ { int j,k; int rmin, rmax, cmin, cmax; int label, curlabel, minlabel; int nchanged; /* image boundaries */ rmin = im->rmin; rmax = im->rmax; cmin = im->cmin; cmax = im->cmax; #define ENTRY(ROW, COL) Entry(*im, ROW, COL, int) #define UPDATE(ROW,COL,BEST,NEW) \ NEW = ENTRY(ROW, COL); \ if(NEW != 0 && NEW < BEST) \ BEST = NEW nchanged = 1; while(nchanged >0) { nchanged = 0; R_CheckUserInterrupt(); for(j = rmin; j <= rmax; j++) { for(k = cmin; k <= cmax; k++) { curlabel = ENTRY(j, k); if(curlabel != 0) { minlabel = curlabel; UPDATE(j-1, k-1, minlabel, label); UPDATE(j-1, k, minlabel, label); UPDATE(j-1, k+1, minlabel, label); UPDATE(j, k-1, minlabel, label); UPDATE(j, k, minlabel, label); UPDATE(j, k+1, minlabel, label); UPDATE(j+1, k-1, minlabel, label); UPDATE(j+1, k, minlabel, label); UPDATE(j+1, k+1, minlabel, label); if(minlabel < curlabel) { ENTRY(j, k) = minlabel; nchanged++; } } } } } } void cocoImage(mat, nr, nc) int *mat; /* input: binary image */ int *nr, *nc; /* raster dimensions EXCLUDING margin of 1 on each side */ { Raster im; shape_raster( &im, (void *) mat, (double) 1, (double) 1, (double) *nc, (double) *nr, *nr+2, *nc+2, 1, 1); comcommer(&im); } void cocoGraph(nv, ne, ie, je, label, status) /* inputs */ int *nv; /* number of graph vertices */ int *ne; /* number of edges */ int *ie, *je; /* vectors of indices of ends of each edge */ /* output */ int *label; /* vector of component labels for each vertex */ /* Component label is lowest serial number of any vertex in the connected component */ int *status; /* 0 if OK, 1 if overflow */ { int Nv, Ne, i, j, k, niter, labi, labj, changed; Nv = *nv; Ne = *ne; /* initialise labels */ for(k = 0; k < Nv; k++) label[k] = k; for(niter = 0; niter < Nv; niter++) { R_CheckUserInterrupt(); changed = NO; for(k = 0; k < Ne; k++) { i = ie[k]; j = je[k]; labi = label[i]; labj = label[j]; if(labi < labj) { label[j] = labi; changed = YES; } else if(labj < labi) { label[i] = labj; changed = YES; } } if(!changed) { /* algorithm has converged */ *status = 0; return; } } /* error exit */ *status = 1; return; } spatstat/src/mhv1.h0000644000176200001440000000040113166361223013726 0ustar liggesusers/* mhv1.h marked or unmarked simulation */ #undef MH_MARKED if(marked) { /* marked process */ #define MH_MARKED YES #include "mhv2.h" #undef MH_MARKED } else { /* unmarked process */ #define MH_MARKED NO #include "mhv2.h" #undef MH_MARKED } spatstat/src/sparselinalg.c0000644000176200001440000000054213166361223015540 0ustar liggesusers#include #include /* sparselinalg.c Counterpart of 'linalg.c' for sparse matrices/arrays $Revision: 1.6 $ $Date: 2016/02/20 11:14:12 $ */ #undef DBG #define FNAME CspaSumSymOut #undef WEIGHTS #include "spasumsymout.h" #undef FNAME #define FNAME CspaWtSumSymOut #define WEIGHTS #include "spasumsymout.h" #undef FNAME spatstat/src/knndist.h0000644000176200001440000000744313166361223014542 0ustar liggesusers/* knndist.h Code template for C functions supporting knndist and knnwhich THE FOLLOWING CODE ASSUMES THAT y IS SORTED IN ASCENDING ORDER This code is #included multiple times in knndistance.c Variables used: FNAME function name DIST #defined if function returns distance to nearest neighbour WHICH #defined if function returns id of nearest neighbour Either or both DIST and WHICH may be defined. Copyright (C) Adrian Baddeley, Jens Oehlschlagel and Rolf Turner 2000-2012 Licence: GPL >= 2 $Revision: 1.3 $ $Date: 2013/05/27 02:09:10 $ */ void FNAME(n, kmax, x, y, #ifdef DIST nnd, #endif #ifdef WHICH nnwhich, #endif huge) /* inputs */ int *n, *kmax; double *x, *y, *huge; /* output matrices (npoints * kmax) in ROW MAJOR order */ #ifdef DIST double *nnd; #endif #ifdef WHICH int *nnwhich; #endif { int npoints, maxchunk, nk, nk1, i, k, k1, left, right, unsorted; double d2, d2minK, xi, yi, dx, dy, dy2, hu, hu2, tmp; double *d2min; #ifdef WHICH int *which; int itmp; #endif hu = *huge; hu2 = hu * hu; npoints = *n; nk = *kmax; nk1 = nk - 1; /* create space to store the nearest neighbour distances and indices for the current point */ d2min = (double *) R_alloc((size_t) nk, sizeof(double)); #ifdef WHICH which = (int *) R_alloc((size_t) nk, sizeof(int)); #endif /* loop in chunks of 2^16 */ i = 0; maxchunk = 0; while(i < npoints) { R_CheckUserInterrupt(); maxchunk += 65536; if(maxchunk > npoints) maxchunk = npoints; for(; i < maxchunk; i++) { #ifdef SPATSTAT_DEBUG Rprintf("\ni=%d\n", i); #endif /* initialise nn distances and indices */ d2minK = hu2; for(k = 0; k < nk; k++) { d2min[k] = hu2; #ifdef WHICH which[k] = -1; #endif } xi = x[i]; yi = y[i]; /* search backward */ for(left = i - 1; left >= 0; --left) { #ifdef SPATSTAT_DEBUG Rprintf("L"); #endif dy = yi - y[left]; dy2 = dy * dy; if(dy2 > d2minK) break; dx = x[left] - xi; d2 = dx * dx + dy2; if (d2 < d2minK) { /* overwrite last entry */ d2min[nk1] = d2; #ifdef WHICH which[nk1] = left; #endif /* bubble sort */ unsorted = YES; for(k = nk1; unsorted && k > 0; k--) { k1 = k - 1; if(d2min[k] < d2min[k1]) { /* swap entries */ tmp = d2min[k1]; d2min[k1] = d2min[k]; d2min[k] = tmp; #ifdef WHICH itmp = which[k1]; which[k1] = which[k]; which[k] = itmp; #endif } else { unsorted = NO; } } /* adjust maximum distance */ d2minK = d2min[nk1]; } } /* search forward */ for(right = i + 1; right < npoints; ++right) { #ifdef SPATSTAT_DEBUG Rprintf("R"); #endif dy = y[right] - yi; dy2 = dy * dy; if(dy2 > d2minK) break; dx = x[right] - xi; d2 = dx * dx + dy2; if (d2 < d2minK) { /* overwrite last entry */ d2min[nk1] = d2; #ifdef WHICH which[nk1] = right; #endif /* bubble sort */ unsorted = YES; for(k = nk1; unsorted && k > 0; k--) { k1 = k - 1; if(d2min[k] < d2min[k1]) { /* swap entries */ tmp = d2min[k1]; d2min[k1] = d2min[k]; d2min[k] = tmp; #ifdef WHICH itmp = which[k1]; which[k1] = which[k]; which[k] = itmp; #endif } else { unsorted = NO; } } /* adjust maximum distance */ d2minK = d2min[nk1]; } } /* search finished for point i */ #ifdef SPATSTAT_DEBUG Rprintf("\n"); #endif /* copy nn distances for point i to output matrix in ROW MAJOR order */ for(k = 0; k < nk; k++) { #ifdef DIST nnd[nk * i + k] = sqrt(d2min[k]); #endif #ifdef WHICH nnwhich[nk * i + k] = which[k] + 1; /* R indexing */ #endif } /* end of i loop */ } } } spatstat/src/mhsnoopdef.h0000644000176200001440000000104113166361223015216 0ustar liggesusers/* mhsnoopdef.h Define structure 'Snoop' containing visual debugger parameters and state $Revision: 1.2 $ $Date: 2013/05/27 02:09:10 $ */ #ifndef R_INTERNALS_H_ #include #endif typedef struct Snoop { int active; /* true or false */ int nextstop; /* jump to iteration number 'nextstop' */ int nexttype; /* jump to the next proposal of type 'nexttype' */ SEXP env; /* environment for exchanging data with R */ SEXP expr; /* callback expression for visual debugger */ } Snoop; #define NO_TYPE -1 spatstat/src/Ediggatsti.c0000755000176200001440000000337213166361223015147 0ustar liggesusers#include #include #include #include "chunkloop.h" #include "looptest.h" #include "constants.h" /* Ediggatsti.c $Revision: 1.3 $ $Date: 2014/09/19 00:53:30 $ C implementation of 'eval' for DiggleGatesStibbard interaction Assumes point patterns are sorted in increasing order of x coordinate */ void Ediggatsti(nnsource, xsource, ysource, idsource, nntarget, xtarget, ytarget, idtarget, rrho, values) /* inputs */ int *nnsource, *nntarget; double *xsource, *ysource, *xtarget, *ytarget; int *idsource, *idtarget; double *rrho; /* output */ double *values; { int nsource, ntarget, maxchunk, j, i, ileft, idsourcej; double xsourcej, ysourcej, xleft, dx, dy, dx2, d2; double rho, rho2, rho2pluseps, coef, product; nsource = *nnsource; ntarget = *nntarget; rho = *rrho; if(nsource == 0 || ntarget == 0) return; rho2 = rho * rho; coef = M_PI_2/rho; rho2pluseps = rho2 + EPSILON(rho2); ileft = 0; OUTERCHUNKLOOP(j, nsource, maxchunk, 65536) { R_CheckUserInterrupt(); INNERCHUNKLOOP(j, nsource, maxchunk, 65536) { product = 1; xsourcej = xsource[j]; ysourcej = ysource[j]; idsourcej = idsource[j]; /* adjust starting position */ xleft = xsourcej - rho; while((xtarget[ileft] < xleft) && (ileft+1 < ntarget)) ++ileft; /* process from ileft until dx > rho */ for(i=ileft; i < ntarget; i++) { dx = xtarget[i] - xsourcej; dx2 = dx * dx; if(dx2 > rho2pluseps) break; if(idtarget[i] != idsourcej) { dy = ytarget[i] - ysourcej; d2 = dx2 + dy * dy; if(d2 <= rho2) product *= sin(sqrt(d2) * coef); } } values[j] = log(product * product); } } } spatstat/src/lookup.c0000755000176200001440000001170613166361223014374 0ustar liggesusers#include #include #include #include "methas.h" #include "dist2.h" /* Conditional intensity function for a general pairwise interaction process with the pairwise interaction function given by a ``lookup table'', passed through the par argument. */ /* For debugging code, insert the line: #define DEBUG 1 */ /* Storage of parameters and precomputed/auxiliary data */ typedef struct Lookup { int nlook; int equisp; double delta; double rmax; double r2max; double *h; /* values of pair interaction */ double *r; /* r values if not equally spaced */ double *r2; /* r^2 values if not equally spaced */ double *period; int per; } Lookup; /* initialiser function */ Cdata *lookupinit(state, model, algo) State state; Model model; Algor algo; { int i, nlook; double ri; Lookup *lookup; lookup = (Lookup *) R_alloc(1, sizeof(Lookup)); /* Interpret model parameters*/ lookup->nlook = nlook = model.ipar[0]; lookup->equisp = (model.ipar[1] > 0); lookup->delta = model.ipar[2]; lookup->rmax = model.ipar[3]; lookup->r2max = pow(lookup->rmax, 2); /* periodic boundary conditions? */ lookup->period = model.period; lookup->per = (model.period[0] > 0.0); /* If the r-values are equispaced only the h vector is included in ``par'' after ``rmax''; the entries of h then consist of h[0] = par[5], h[1] = par[6], ..., h[k-1] = par[4+k], ..., h[nlook-1] = par[4+nlook]. If the r-values are NOT equispaced then the individual r values are needed and these are included as r[0] = par[5+nlook], r[1] = par[6+nlook], ..., r[k-1] = par[4+nlook+k], ..., r[nlook-1] = par[4+2*nlook]. */ lookup->h = (double *) R_alloc((size_t) nlook, sizeof(double)); for(i = 0; i < nlook; i++) lookup->h[i] = model.ipar[4+i]; if(!(lookup->equisp)) { lookup->r = (double *) R_alloc((size_t) nlook, sizeof(double)); lookup->r2 = (double *) R_alloc((size_t) nlook, sizeof(double)); for(i = 0; i < nlook; i++) { ri = lookup->r[i] = model.ipar[4+nlook+i]; lookup->r2[i] = ri * ri; } } #ifdef DEBUG Rprintf("Exiting lookupinit: nlook=%d, equisp=%d\n", nlook, lookup->equisp); #endif return((Cdata *) lookup); } /* conditional intensity evaluator */ double lookupcif(prop, state, cdata) Propo prop; State state; Cdata *cdata; { int npts, nlook, k, kk, ix, ixp1, j; double *x, *y; double u, v; double r2max, d2, d, delta, cifval, ux, vy; Lookup *lookup; lookup = (Lookup *) cdata; r2max = lookup->r2max; delta = lookup->delta; nlook = lookup->nlook; u = prop.u; v = prop.v; ix = prop.ix; x = state.x; y = state.y; npts = state.npts; cifval = 1.0; if(npts == 0) return(cifval); ixp1 = ix+1; /* If ix = NONE = -1, then ixp1 = 0 is correct */ if(lookup->equisp) { /* equispaced r values */ if(lookup->per) { /* periodic distance */ if(ix > 0) { for(j=0; j < ix; j++) { d = sqrt(dist2(u,v,x[j],y[j],lookup->period)); k = floor(d/delta); if(k < nlook) { if(k < 0) k = 0; cifval *= lookup->h[k]; } } } if(ixp1 < npts) { for(j=ixp1; jperiod)); k = floor(d/delta); if(k < nlook) { if(k < 0) k = 0; cifval *= lookup->h[k]; } } } } else { /* Euclidean distance */ if(ix > 0) { for(j=0; j < ix; j++) { d = hypot(u - x[j], v-y[j]); k = floor(d/delta); if(k < nlook) { if(k < 0) k = 0; cifval *= lookup->h[k]; } } } if(ixp1 < npts) { for(j=ixp1; jh[k]; } } } } } else { /* non-equispaced r values */ if(lookup->per) { /* periodic distance */ if(ix > 0) { for(j=0; j < ix; j++) { d2 = dist2(u,v,x[j],y[j],lookup->period); if(d2 < r2max) { for(kk = 0; kk < nlook && lookup->r2[kk] <= d2; kk++) ; k = (kk == 0) ? 0 : kk-1; cifval *= lookup->h[k]; } } } if(ixp1 < npts) { for(j=ixp1; jperiod); if(d2 < r2max) { for(kk = 0; kk < nlook && lookup->r2[kk] <= d2; kk++) ; k = (kk == 0) ? 0 : kk-1; cifval *= lookup->h[k]; } } } } else { /* Euclidean distance */ if(ix > 0) { for(j=0; j < ix; j++) { ux = u - x[j]; vy = v - y[j]; d2 = ux * ux + vy * vy; if(d2 < r2max) { for(kk = 0; kk < nlook && lookup->r2[kk] <= d2; kk++) ; k = (kk == 0) ? 0 : kk-1; cifval *= lookup->h[k]; } } } if(ixp1 < npts) { for(j=ixp1; jr2[kk] <= d2; kk++) ; k = (kk == 0) ? 0 : kk-1; cifval *= lookup->h[k]; } } } } } return cifval; } Cifns LookupCifns = { &lookupinit, &lookupcif, (updafunptr) NULL, NO}; spatstat/src/straushm.c0000755000176200001440000001550113166361223014726 0ustar liggesusers#include #include #include "methas.h" #include "dist2.h" /* for debugging code, include #define DEBUG 1 */ /* Conditional intensity computation for Multitype Strauss hardcore process */ /* NOTE: types (marks) are numbered from 0 to ntypes-1 */ /* Storage of parameters and precomputed/auxiliary data */ typedef struct MultiStraussHard { int ntypes; double *gamma; /* gamma[i,j] = gamma[i+ntypes*j] for i,j = 0... ntypes-1 */ double *rad; /* rad[i,j] = rad[j+ntypes*i] for i,j = 0... ntypes-1 */ double *hc; /* hc[i,j] = hc[j+ntypes*i] for i,j = 0... ntypes-1 */ double *rad2; /* squared radii */ double *hc2; /* squared radii */ double *rad2hc2; /* r^2 - h^2 */ double range2; /* square of interaction range */ double *loggamma; /* logs of gamma[i,j] */ double *period; int *hard; /* hard[i,j] = 1 if gamma[i,j] ~~ 0 */ int *kount; /* space for kounting pairs of each type */ int per; } MultiStraussHard; /* initialiser function */ Cdata *straushminit(state, model, algo) State state; Model model; Algor algo; { int i, j, ntypes, n2, hard; double g, r, h, r2, h2, logg, range2; MultiStraussHard *multistrausshard; multistrausshard = (MultiStraussHard *) R_alloc(1, sizeof(MultiStraussHard)); multistrausshard->ntypes = ntypes = model.ntypes; n2 = ntypes * ntypes; #ifdef DEBUG Rprintf("initialising space for %d types\n", ntypes); #endif /* Allocate space for parameters */ multistrausshard->gamma = (double *) R_alloc((size_t) n2, sizeof(double)); multistrausshard->rad = (double *) R_alloc((size_t) n2, sizeof(double)); multistrausshard->hc = (double *) R_alloc((size_t) n2, sizeof(double)); /* Allocate space for transformed parameters */ multistrausshard->rad2 = (double *) R_alloc((size_t) n2, sizeof(double)); multistrausshard->hc2 = (double *) R_alloc((size_t) n2, sizeof(double)); multistrausshard->rad2hc2 = (double *) R_alloc((size_t) n2, sizeof(double)); multistrausshard->loggamma = (double *) R_alloc((size_t) n2, sizeof(double)); multistrausshard->hard = (int *) R_alloc((size_t) n2, sizeof(int)); /* Allocate scratch space for counts of each pair of types */ multistrausshard->kount = (int *) R_alloc((size_t) n2, sizeof(int)); /* Copy and process model parameters*/ /* ipar will contain n^2 values of gamma, then n^2 values of r, then n^2 values of h */ range2 = 0.0; for(i = 0; i < ntypes; i++) { for(j = 0; j < ntypes; j++) { g = model.ipar[ i + j*ntypes]; r = model.ipar[ n2 + i + j*ntypes]; h = model.ipar[2*n2 + i + j*ntypes]; r2 = r * r; h2 = h * h; hard = (g < DOUBLE_EPS); logg = (hard) ? 0 : log(g); MAT(multistrausshard->gamma, i, j, ntypes) = g; MAT(multistrausshard->rad, i, j, ntypes) = r; MAT(multistrausshard->hc, i, j, ntypes) = h; MAT(multistrausshard->rad2, i, j, ntypes) = r2; MAT(multistrausshard->hc2, i, j, ntypes) = h2; MAT(multistrausshard->rad2hc2, i, j, ntypes) = r2-h2; MAT(multistrausshard->hard, i, j, ntypes) = hard; MAT(multistrausshard->loggamma, i, j, ntypes) = logg; if(r2 > range2) range2 = r2; } } multistrausshard->range2 = range2; /* periodic boundary conditions? */ multistrausshard->period = model.period; multistrausshard->per = (model.period[0] > 0.0); #ifdef DEBUG Rprintf("end initialiser\n"); #endif return((Cdata *) multistrausshard); } /* conditional intensity evaluator */ double straushmcif(prop, state, cdata) Propo prop; State state; Cdata *cdata; { int npts, ntypes, kount, ix, ixp1, j, mrk, mrkj, m1, m2; int *marks; double *x, *y; double u, v, lg; double d2, cifval; double range2; double *period; MultiStraussHard *multistrausshard; DECLARE_CLOSE_D2_VARS; multistrausshard = (MultiStraussHard *) cdata; range2 = multistrausshard->range2; period = multistrausshard->period; u = prop.u; v = prop.v; mrk = prop.mrk; ix = prop.ix; x = state.x; y = state.y; marks = state.marks; npts = state.npts; #ifdef DEBUG Rprintf("computing cif: u=%lf, v=%lf, mrk=%d\n", u, v, mrk); #endif cifval = 1.0; if(npts == 0) return(cifval); ntypes = multistrausshard->ntypes; #ifdef DEBUG Rprintf("initialising pair counts\n"); #endif /* initialise pair counts */ for(m1 = 0; m1 < ntypes; m1++) for(m2 = 0; m2 < ntypes; m2++) MAT(multistrausshard->kount, m1, m2, ntypes) = 0; /* compile pair counts */ #ifdef DEBUG Rprintf("compiling pair counts\n"); #endif ixp1 = ix+1; /* If ix = NONE = -1, then ixp1 = 0 is correct */ if(multistrausshard->per) { /* periodic distance */ if(ix > 0) { for(j=0; j < ix; j++) { if(CLOSE_PERIODIC_D2(u,v,x[j],y[j],period,range2,d2)) { mrkj = marks[j]; if(d2 < MAT(multistrausshard->rad2, mrk, mrkj, ntypes)) { if(d2 < MAT(multistrausshard->hc2, mrk, mrkj, ntypes)) { cifval = 0.0; return(cifval); } MAT(multistrausshard->kount, mrk, mrkj, ntypes)++; } } } } if(ixp1 < npts) { for(j=ixp1; jrad2, mrk, mrkj, ntypes)) { if(d2 < MAT(multistrausshard->hc2, mrk, mrkj, ntypes)) { cifval = 0.0; return(cifval); } MAT(multistrausshard->kount, mrk, mrkj, ntypes)++; } } } } } else { /* Euclidean distance */ if(ix > 0) { for(j=0; j < ix; j++) { if(CLOSE_D2(u, v, x[j], y[j], range2, d2)) { mrkj = marks[j]; if(d2 < MAT(multistrausshard->rad2, mrk, mrkj, ntypes)) { if(d2 < MAT(multistrausshard->hc2, mrk, mrkj, ntypes)) { cifval = 0.0; return(cifval); } MAT(multistrausshard->kount, mrk, mrkj, ntypes)++; } } } } if(ixp1 < npts) { for(j=ixp1; jrad2, mrk, mrkj, ntypes)) { if(d2 < MAT(multistrausshard->hc2, mrk, mrkj, ntypes)) { cifval = 0.0; return(cifval); } MAT(multistrausshard->kount, mrk, mrkj, ntypes)++; } } } } } #ifdef DEBUG Rprintf("multiplying cif factors\n"); #endif /* multiply cif value by pair potential */ for(m1 = 0; m1 < ntypes; m1++) { for(m2 = 0; m2 < ntypes; m2++) { kount = MAT(multistrausshard->kount, m1, m2, ntypes); if(MAT(multistrausshard->hard, m1, m2, ntypes)) { if(kount > 0) { cifval = 0.0; return(cifval); } } else { lg = MAT(multistrausshard->loggamma, m1, m2, ntypes); cifval *= exp(lg * kount); } } } #ifdef DEBUG Rprintf("returning positive cif\n"); #endif return cifval; } Cifns MultiStraussHardCifns = { &straushminit, &straushmcif, (updafunptr) NULL, YES}; spatstat/src/idw.c0000755000176200001440000000703413166361223013645 0ustar liggesusers/* idw.c Inverse-distance weighted smoothing $Revision: 1.8 $ $Date: 2013/05/27 02:09:10 $ */ #include #include #include "chunkloop.h" #define MAT(X,I,J,NROW) (X)[(J) + (NROW) * (I)] /* inverse-distance smoothing from data points onto pixel grid */ void Cidw(x, y, v, n, xstart, xstep, nx, ystart, ystep, ny, power, num, den, rat) double *x, *y, *v; /* data points and values */ int *n; double *xstart, *xstep, *ystart, *ystep; /* pixel grid */ int *nx, *ny; double *power; /* exponent for IDW */ double *num, *den, *rat; /* output arrays - assumed initialised 0 */ { int N, i, Nx, Ny, ix, iy; double xg, yg, x0, dx, y0, dy, pon2, d2, w; N = *n; Nx = *nx; Ny = *ny; x0 = *xstart; y0 = *ystart; dx = *xstep; dy = *ystep; pon2 = (*power)/2.0; if(pon2 == 1.0) { /* slightly faster code when power=2 */ for(ix = 0, xg=x0; ix < Nx; ix++, xg+=dx) { if(ix % 256 == 0) R_CheckUserInterrupt(); for(iy = 0, yg=y0; iy < Ny; iy++, yg+=dy) { /* loop over data points, accumulating numerator and denominator */ for(i = 0; i < N; i++) { d2 = (xg - x[i]) * (xg - x[i]) + (yg - y[i]) * (yg - y[i]); w = 1.0/d2; MAT(num, ix, iy, Ny) += w * v[i]; MAT(den, ix, iy, Ny) += w; } /* compute ratio */ MAT(rat, ix, iy, Ny) = MAT(num, ix, iy, Ny)/MAT(den, ix, iy, Ny); } } } else { /* general case */ for(ix = 0, xg=x0; ix < Nx; ix++, xg+=dx) { if(ix % 256 == 0) R_CheckUserInterrupt(); for(iy = 0, yg=y0; iy < Ny; iy++, yg+=dy) { /* loop over data points, accumulating numerator and denominator */ for(i = 0; i < N; i++) { d2 = (xg - x[i]) * (xg - x[i]) + (yg - y[i]) * (yg - y[i]); w = 1.0/pow(d2, pon2); MAT(num, ix, iy, Ny) += w * v[i]; MAT(den, ix, iy, Ny) += w; } /* compute ratio */ MAT(rat, ix, iy, Ny) = MAT(num, ix, iy, Ny)/MAT(den, ix, iy, Ny); } } } } /* Leave-one-out IDW at data points only */ void idwloo(x, y, v, n, power, num, den, rat) double *x, *y, *v; /* data points and values */ int *n; double *power; /* exponent for IDW */ double *num, *den, *rat; /* output vectors - assumed initialised 0 */ { int N, i, j, maxchunk; double xi, yi, d2, w, pon2; N = *n; pon2 = (*power)/2.0; if(pon2 == 1.0) { /* slightly faster code when power=2 */ OUTERCHUNKLOOP(i, N, maxchunk, 16384) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, N, maxchunk, 16384) { xi = x[i]; yi = y[i]; if(i > 0) { for(j = 0; j < i; j++) { d2 = (xi - x[j]) * (xi - x[j]) + (yi - y[j]) * (yi - y[j]); w = 1.0/d2; num[i] += w * v[j]; den[i] += w; } } if(i < N-1) { for(j = i+1; j < N; j++) { d2 = (xi - x[j]) * (xi - x[j]) + (yi - y[j]) * (yi - y[j]); w = 1.0/d2; num[i] += w * v[j]; den[i] += w; } } /* compute ratio */ rat[i] = num[i]/den[i]; } } } else { /* general case */ OUTERCHUNKLOOP(i, N, maxchunk, 16384) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, N, maxchunk, 16384) { xi = x[i]; yi = y[i]; if(i > 0) { for(j = 0; j < i; j++) { d2 = (xi - x[j]) * (xi - x[j]) + (yi - y[j]) * (yi - y[j]); w = 1.0/pow(d2, pon2); num[i] += w * v[j]; den[i] += w; } } if(i < N-1) { for(j = i+1; j < N; j++) { d2 = (xi - x[j]) * (xi - x[j]) + (yi - y[j]) * (yi - y[j]); w = 1.0/pow(d2, pon2); num[i] += w * v[j]; den[i] += w; } } /* compute ratio */ rat[i] = num[i]/den[i]; } } } } spatstat/src/knngrid.h0000644000176200001440000001273513166361223014524 0ustar liggesusers #if (1 == 0) /* knngrid.h Code template for C functions k-nearest neighbours (k > 1) of each grid point THE FOLLOWING CODE ASSUMES THAT POINT PATTERN (xp, yp) IS SORTED IN ASCENDING ORDER OF x COORDINATE This code is #included multiple times in knngrid.c Variables used: FNAME function name DIST #defined if function returns distance to nearest neighbour WHICH #defined if function returns id of nearest neighbour Either or both DIST and WHICH may be defined. Copyright (C) Adrian Baddeley, Jens Oehlschlagel and Rolf Turner 2000-2013 Licence: GPL >= 2 $Revision: 1.6 $ $Date: 2016/02/02 01:31:50 $ */ #endif #undef PRINTALOT void FNAME(nx, x0, xstep, ny, y0, ystep, /* pixel grid dimensions */ np, xp, yp, /* data points */ kmax, nnd, nnwhich, huge) /* inputs */ int *nx, *ny, *np; double *x0, *xstep, *y0, *ystep, *huge; double *xp, *yp; int *kmax; /* outputs */ double *nnd; int *nnwhich; /* some inputs + outputs are not used in all functions */ { int Nxcol, Nyrow; int i, j, ijpos; int Npoints, Nk, Nk1; int mleft, mright, mwhich, lastmwhich, unsorted, k, k1; double X0, Y0, Xstep, Ystep; double d2, d2minK, xj, yi, dx, dy, dx2, hu, hu2, tmp; double *d2min; #ifdef WHICH int *which; int itmp; #endif Nxcol = *nx; Nyrow = *ny; Npoints = *np; Nk = *kmax; hu = *huge; X0 = *x0; Y0 = *y0; Xstep = *xstep; Ystep = *ystep; Nk1 = Nk - 1; hu2 = hu * hu; if(Npoints == 0) return; lastmwhich = mwhich = 0; /* create space to store the nearest neighbour distances and indices for the current grid point */ d2min = (double *) R_alloc((size_t) Nk, sizeof(double)); #ifdef WHICH which = (int *) R_alloc((size_t) Nk, sizeof(int)); #endif /* loop over pixels */ for(j = 0, xj = X0; j < Nxcol; j++, xj += Xstep) { R_CheckUserInterrupt(); #ifdef PRINTALOT Rprintf("j=%d, xj=%lf\n", j, xj); #endif for(i = 0, yi = Y0; i < Nyrow; i++, yi += Ystep) { #ifdef PRINTALOT Rprintf("\ti=%d, yi = %lf\n", i, yi); #endif /* initialise nn distances and indices */ d2minK = hu2; for(k = 0; k < Nk; k++) { d2min[k] = hu2; #ifdef WHICH which[k] = -1; #endif } if(lastmwhich < Npoints) { /* search forward from previous nearest neighbour */ for(mright = lastmwhich; mright < Npoints; ++mright) { dx = xp[mright] - xj; dx2 = dx * dx; #ifdef PRINTALOT Rprintf("\t\t%d\n", mright); #endif if(dx2 > d2minK) /* note that dx2 >= d2minK could break too early */ break; dy = yp[mright] - yi; d2 = dy * dy + dx2; if (d2 < d2minK) { #ifdef PRINTALOT Rprintf("\t\t\tNeighbour: d2=%lf\n", d2); #endif /* overwrite last entry in list of neighbours */ d2min[Nk1] = d2; mwhich = mright; #ifdef WHICH which[Nk1] = mright; #endif /* bubble sort */ unsorted = YES; for(k = Nk1; unsorted && k > 0; k--) { k1 = k - 1; if(d2min[k] < d2min[k1]) { /* swap entries */ tmp = d2min[k1]; d2min[k1] = d2min[k]; d2min[k] = tmp; #ifdef WHICH itmp = which[k1]; which[k1] = which[k]; which[k] = itmp; #endif } else { unsorted = NO; } } /* adjust maximum distance */ d2minK = d2min[Nk1]; #ifdef PRINTALOT Rprintf("\t\t\tUpdated d2minK=%lf\n", d2minK); for(k = 0; k < Nk; k++) Rprintf("\t\t\t\td2min[%d]=%lf\n", k, d2min[k]); #ifdef WHICH for(k = 0; k < Nk; k++) Rprintf("\t\t\t\twhich[%d]=%d\n", k, which[k]); #endif #endif } } /* end forward search */ } if(lastmwhich > 0) { /* search backward from previous nearest neighbour */ for(mleft = lastmwhich - 1; mleft >= 0; --mleft) { dx = xj - xp[mleft]; dx2 = dx * dx; #ifdef PRINTALOT Rprintf("\t\t%d\n", mleft); #endif if(dx2 > d2minK) /* note that dx2 >= d2minK could break too early */ break; dy = yp[mleft] - yi; d2 = dy * dy + dx2; if (d2 < d2minK) { #ifdef PRINTALOT Rprintf("\t\t\tNeighbour: d2=%lf\n", d2); #endif /* overwrite last entry in list of neighbours */ mwhich = mleft; d2min[Nk1] = d2; #ifdef WHICH which[Nk1] = mleft; #endif /* bubble sort */ unsorted = YES; for(k = Nk1; unsorted && k > 0; k--) { k1 = k - 1; if(d2min[k] < d2min[k1]) { /* swap entries */ tmp = d2min[k1]; d2min[k1] = d2min[k]; d2min[k] = tmp; #ifdef WHICH itmp = which[k1]; which[k1] = which[k]; which[k] = itmp; #endif } else { unsorted = NO; } } /* adjust maximum distance */ d2minK = d2min[Nk1]; #ifdef PRINTALOT Rprintf("\t\t\tUpdated d2minK=%lf\n", d2minK); for(k = 0; k < Nk; k++) Rprintf("\t\t\t\td2min[%d]=%lf\n", k, d2min[k]); #ifdef WHICH for(k = 0; k < Nk; k++) Rprintf("\t\t\t\twhich[%d]=%d\n", k, which[k]); #endif #endif } } /* end backward search */ } /* remember index of most recently-encountered neighbour */ lastmwhich = mwhich; #ifdef PRINTALOT Rprintf("\t\tlastmwhich=%d\n", lastmwhich); #endif /* copy nn distances for grid point (i, j) to output array nnd[ , i, j] */ ijpos = Nk * (i + j * Nyrow); for(k = 0; k < Nk; k++) { #ifdef DIST nnd[ijpos + k] = sqrt(d2min[k]); #endif #ifdef WHICH nnwhich[ijpos + k] = which[k] + 1; /* R indexing */ #endif } /* end of loop over points i */ } } } spatstat/src/strauss.c0000755000176200001440000000474313166361223014572 0ustar liggesusers#include #include #include "methas.h" #include "dist2.h" /* Conditional intensity computation for Strauss process */ /* Format for storage of parameters and precomputed/auxiliary data */ typedef struct Strauss { double gamma; double r; double loggamma; double r2; double *period; int hard; int per; } Strauss; /* initialiser function */ Cdata *straussinit(state, model, algo) State state; Model model; Algor algo; { /* create storage for model parameters */ Strauss *strauss; strauss = (Strauss *) R_alloc(1, sizeof(Strauss)); /* Interpret model parameters*/ strauss->gamma = model.ipar[0]; strauss->r = model.ipar[1]; /* No longer passed as r^2 */ strauss->r2 = strauss->r * strauss->r; strauss->period = model.period; #ifdef MHDEBUG Rprintf("Initialising Strauss gamma=%lf, r=%lf\n", strauss->gamma, strauss->r); #endif /* is the model numerically equivalent to hard core ? */ strauss->hard = (strauss->gamma < DOUBLE_EPS); strauss->loggamma = (strauss->hard) ? 0 : log(strauss->gamma); /* periodic boundary conditions? */ strauss->per = (model.period[0] > 0.0); return((Cdata *) strauss); } /* conditional intensity evaluator */ double strausscif(prop, state, cdata) Propo prop; State state; Cdata *cdata; { int npts, kount, ix, ixp1, j; double *x, *y; double u, v; double r2, cifval; Strauss *strauss; DECLARE_CLOSE_VARS; strauss = (Strauss *) cdata; r2 = strauss->r2; u = prop.u; v = prop.v; ix = prop.ix; x = state.x; y = state.y; npts = state.npts; if(npts == 0) return((double) 1.0); kount = 0; ixp1 = ix+1; /* If ix = NONE = -1, then ixp1 = 0 is correct */ if(strauss->per) { /* periodic distance */ if(ix > 0) { for(j=0; j < ix; j++) { if(CLOSE_PERIODIC(u,v,x[j],y[j],strauss->period, r2)) ++kount; } } if(ixp1 < npts) { for(j=ixp1; jperiod, r2)) ++kount; } } } else { /* Euclidean distance */ if(ix > 0) { for(j=0; j < ix; j++) { if(CLOSE(u,v,x[j],y[j], r2)) ++kount; } } if(ixp1 < npts) { for(j=ixp1; jhard) { if(kount > 0) cifval = 0.0; else cifval = 1.0; } else cifval = exp((strauss->loggamma) * kount); return cifval; } Cifns StraussCifns = { &straussinit, &strausscif, (updafunptr) NULL, NO}; spatstat/src/call3d.c0000755000176200001440000002474713166361223014236 0ustar liggesusers/* $Revision: 1.5 $ $Date: 2010/10/24 10:57:02 $ R interface Pass data between R and internally-defined data structures # ///////////////////////////////////////////// # AUTHOR: Adrian Baddeley, CWI, Amsterdam, 1991. # # MODIFIED BY: Adrian Baddeley, Perth 2009 # # This software is distributed free # under the conditions that # (1) it shall not be incorporated # in software that is subsequently sold # (2) the authorship of the software shall # be acknowledged in any publication that # uses results generated by the software # (3) this notice shall remain in place # in each file. # ////////////////////////////////////////////// */ #include #include "geom3.h" #include "functable.h" #undef DEBUG #ifdef DEBUG #define DEBUGMESSAGE(S) Rprintf(S); #else #define DEBUGMESSAGE(S) #endif void g3one(Point *p, int n, Box *b, Ftable *g); void g3three(Point *p, int n, Box *b, Ftable *g); void g3cen(Point *p, int n, Box *b, H4table *count); void k3trans(Point *p, int n, Box *b, Ftable *k); void k3isot(Point *p, int n, Box *b, Ftable *k); void pcf3trans(Point *p, int n, Box *b, Ftable *pcf, double delta); void pcf3isot(Point *p, int n, Box *b, Ftable *pcf, double delta); void phatminus(Point *p, int n, Box *b, double vside, Itable *count); void phatnaive(Point *p, int n, Box *b, double vside, Itable *count); void p3hat4(Point *p, int n, Box *b, double vside, H4table *count); /* ALLOCATION OF SPACE FOR STRUCTURES/ARRAYS We have defined an alloc() and free() function for each type. However, the free() functions currently do nothing, because we use R_alloc to allocate transient space, which is freed automatically by R. */ Ftable * allocFtable(n) /* allocate function table of size n */ int n; { Ftable *x; x = (Ftable *) R_alloc(1, sizeof(Ftable)); x->n = n; x->f = (double *) R_alloc(n, sizeof(double)); x->num = (double *) R_alloc(n, sizeof(double)); x->denom = (double *) R_alloc(n, sizeof(double)); return(x); } void freeFtable(x) Ftable *x; { } Itable * allocItable(n) int n; { Itable *x; x = (Itable *) R_alloc(1, sizeof(Itable)); x->n = n; x->num = (int *) R_alloc(n, sizeof(int)); x->denom = (int *) R_alloc(n, sizeof(int)); return(x); } void freeItable(x) Itable *x; { } H4table * allocH4table(n) int n; { H4table *x; x = (H4table *) R_alloc(1, sizeof(H4table)); x->n = n; x->obs = (int *) R_alloc(n, sizeof(int)); x->nco = (int *) R_alloc(n, sizeof(int)); x->cen = (int *) R_alloc(n, sizeof(int)); x->ncc = (int *) R_alloc(n, sizeof(int)); return(x); } void freeH4table(x) H4table *x; { } Box * allocBox() /* I know this is ridiculous but it's consistent. */ { Box *b; b = (Box *) R_alloc(1, sizeof(Box)); return(b); } void freeBox(x) Box *x; { } Point * allocParray(n) /* allocate array of n Points */ int n; { Point *p; p = (Point *) R_alloc(n, sizeof(Point)); return(p); } void freeParray(x) Point *x; { } /* CREATE AND INITIALISE DATA STORAGE */ Ftable * MakeFtable(t0, t1, n) double *t0, *t1; int *n; { Ftable *tab; int i, nn; nn = *n; tab = allocFtable(nn); tab->t0 = *t0; tab->t1 = *t1; for(i = 0; i < nn; i++) { tab->f[i] = 0.0; tab->num[i] = 0; tab->denom[i] = 0; } return(tab); } Itable * MakeItable(t0, t1, n) double *t0, *t1; int *n; { Itable *tab; int i, nn; nn = *n; tab = allocItable(nn); tab->t0 = *t0; tab->t1 = *t1; for(i = 0; i < nn; i++) { tab->num[i] = 0; tab->denom[i] = 0; } return(tab); } H4table * MakeH4table(t0, t1, n) double *t0, *t1; int *n; { H4table *tab; int i, nn; nn = *n; tab = allocH4table(nn); tab->t0 = *t0; tab->t1 = *t1; for(i = 0; i < nn; i++) { tab->obs[i] = 0; tab->nco[i] = 0; tab->cen[i] = 0; tab->ncc[i] = 0; } tab->upperobs = 0; tab->uppercen = 0; return(tab); } /* CONVERSION OF DATA TYPES R -> internal including allocation of internal data types as needed */ Point * RtoPointarray(x,y,z,n) double *x, *y, *z; int *n; { int i, nn; Point *p; nn = *n; p = allocParray(nn); for(i = 0; i < nn; i++) { p[i].x = x[i]; p[i].y = y[i]; p[i].z = z[i]; } return(p); } Box * RtoBox(x0, x1, y0, y1, z0, z1) double *x0, *x1, *y0, *y1, *z0, *z1; { Box *b; b = allocBox(); b->x0 = *x0; b->x1 = *x1; b->y0 = *y0; b->y1 = *y1; b->z0 = *z0; b->z1 = *z1; return(b); } /* CONVERSION OF DATA TYPES internal -> R Note: it can generally be assumed that the R arguments are already allocated vectors of correct length, so we do not allocate them. */ void FtabletoR(tab, t0, t1, n, f, num, denom) /* internal */ Ftable *tab; /* R representation */ double *t0, *t1; int *n; double *f, *num, *denom; { int i; *t0 = tab->t0; *t1 = tab->t1; *n = tab->n; for(i = 0; i < tab->n; i++) { f[i] = tab->f[i]; num[i] = tab->num[i]; denom[i] = tab->denom[i]; } freeFtable(tab); } void ItabletoR(tab, t0, t1, m, num, denom) /* internal */ Itable *tab; /* R representation */ double *t0, *t1; int *m; int *num, *denom; { int i; *t0 = tab->t0; *t1 = tab->t1; *m = tab->n; for(i = 0; i < tab->n; i++) { num[i] = tab->num[i]; denom[i] = tab->denom[i]; } freeItable(tab); } void H4tabletoR(tab, t0, t1, m, obs, nco, cen, ncc, upperobs, uppercen) /* internal */ H4table *tab; /* R representation */ double *t0, *t1; int *m; int *obs, *nco, *cen, *ncc; int *upperobs, *uppercen; { int i; *t0 = tab->t0; *t1 = tab->t1; *m = tab->n; *upperobs = tab->upperobs; *uppercen = tab->uppercen; for(i = 0; i < tab->n; i++) { obs[i] = tab->obs[i]; nco[i] = tab->nco[i]; cen[i] = tab->cen[i]; ncc[i] = tab->ncc[i]; } freeH4table(tab); } /* R CALLING INTERFACE These routines are called from R by > .C("routine-name", ....) */ void RcallK3(x,y,z, n, x0, x1, y0, y1, z0, z1, t0, t1, m, f, num, denom, method) double *x, *y, *z; /* points */ int *n; double *x0, *x1, /* box */ *y0, *y1, *z0, *z1; double *t0, *t1; /* Ftable */ int *m; double *f, *num, *denom; int *method; { Point *p; Box *b; Ftable *tab; p = RtoPointarray(x, y, z, n); b = RtoBox(x0, x1, y0, y1, z0, z1); tab = MakeFtable(t0, t1, m); switch((int) *method) { case 0: k3trans(p, (int) *n, b, tab); break; case 1: k3isot(p, (int) *n, b, tab); break; default: Rprintf("Method %d not implemented: defaults to 0\n", *method); k3trans(p, (int) *n, b, tab); break; } FtabletoR(tab, t0, t1, m, f, num, denom); } void RcallG3(x,y,z, n, x0, x1, y0, y1, z0, z1, t0, t1, m, f, num, denom, method) double *x, *y, *z; /* points */ int *n; double *x0, *x1, /* box */ *y0, *y1, *z0, *z1; double *t0, *t1; /* Ftable */ int *m; double *f, *num, *denom; int *method; { Point *p; Box *b; Ftable *tab; p = RtoPointarray(x, y, z, n); b = RtoBox(x0, x1, y0, y1, z0, z1); tab = MakeFtable(t0, t1, m); switch(*method) { case 1: g3one(p, (int) *n, b, tab); break; case 3: g3three(p, (int) *n, b, tab); break; default: Rprintf("Method %d not implemented: defaults to 3\n", *method); g3three(p, (int) *n, b, tab); } FtabletoR(tab, t0, t1, m, f, num, denom); } void RcallG3cen(x,y,z, n, x0, x1, y0, y1, z0, z1, t0, t1, m, obs, nco, cen, ncc, upperobs, uppercen) double *x, *y, *z; /* points */ int *n; double *x0, *x1, /* box */ *y0, *y1, *z0, *z1; double *t0, *t1; int *m; /* H4table */ int *obs, *nco, *cen, *ncc; int *upperobs, *uppercen; { Point *p; Box *b; H4table *count; DEBUGMESSAGE("Inside RcallG3cen\n") p = RtoPointarray(x, y, z, n); b = RtoBox(x0, x1, y0, y1, z0, z1); count = MakeH4table(t0, t1, m); g3cen(p, (int) *n, b, count); H4tabletoR(count, t0, t1, m, obs, nco, cen, ncc, upperobs, uppercen); DEBUGMESSAGE("Leaving RcallG3cen\n") } void RcallF3(x,y,z, n, x0, x1, y0, y1, z0, z1, vside, t0, t1, m, num, denom, method) double *x, *y, *z; /* points */ int *n; double *x0, *x1, /* box */ *y0, *y1, *z0, *z1; double *vside; double *t0, *t1; int *m; /* Itable */ int *num, *denom; int *method; { Point *p; Box *b; Itable *count; DEBUGMESSAGE("Inside Rcall_f3\n") p = RtoPointarray(x, y, z, n); b = RtoBox(x0, x1, y0, y1, z0, z1); count = MakeItable(t0, t1, m); switch((int) *method) { case 0: phatnaive(p, (int) *n, b, *vside, count); break; case 1: phatminus(p, (int) *n, b, *vside, count); break; default: Rprintf("Method %d not recognised: defaults to 1\n", *method); phatminus(p, (int) *n, b, *vside, count); } ItabletoR(count, t0, t1, m, num, denom); DEBUGMESSAGE("Leaving Rcall_f3\n") } void RcallF3cen(x,y,z, n, x0, x1, y0, y1, z0, z1, vside, t0, t1, m, obs, nco, cen, ncc, upperobs, uppercen) double *x, *y, *z; /* points */ int *n; double *x0, *x1, /* box */ *y0, *y1, *z0, *z1; double *vside; double *t0, *t1; int *m; /* H4table */ int *obs, *nco, *cen, *ncc; int *upperobs, *uppercen; { Point *p; Box *b; H4table *count; DEBUGMESSAGE("Inside Rcallf3cen\n") p = RtoPointarray(x, y, z, n); b = RtoBox(x0, x1, y0, y1, z0, z1); count = MakeH4table(t0, t1, m); p3hat4(p, (int) *n, b, *vside, count); H4tabletoR(count, t0, t1, m, obs, nco, cen, ncc, upperobs, uppercen); DEBUGMESSAGE("Leaving Rcallf3cen\n") } void Rcallpcf3(x,y,z, n, x0, x1, y0, y1, z0, z1, t0, t1, m, f, num, denom, method, delta) double *x, *y, *z; /* points */ int *n; double *x0, *x1, /* box */ *y0, *y1, *z0, *z1; double *t0, *t1; /* Ftable */ int *m; double *f, *num, *denom; int *method; double *delta; /* Epanechnikov kernel halfwidth */ { Point *p; Box *b; Ftable *tab; p = RtoPointarray(x, y, z, n); b = RtoBox(x0, x1, y0, y1, z0, z1); tab = MakeFtable(t0, t1, m); switch((int) *method) { case 0: pcf3trans(p, (int) *n, b, tab, (double) *delta); break; case 1: pcf3isot(p, (int) *n, b, tab, (double) *delta); break; default: Rprintf("Method %d not implemented: defaults to 0\n", *method); pcf3trans(p, (int) *n, b, tab, (double) *delta); break; } FtabletoR(tab, t0, t1, m, f, num, denom); } spatstat/src/veegraf.c0000644000176200001440000000677113166361223014505 0ustar liggesusers/* veegraf.c $Revision: 1.2 $ $Date: 2013/05/21 08:11:27 $ Given the edges of a graph, determine all "Vees" i.e. triples (i, j, k) where i ~ j and i ~ k. */ #include #include #include #include "chunkloop.h" #undef DEBUGVEE SEXP graphVees(SEXP nv, /* number of vertices */ SEXP iedge, /* vectors of indices of ends of each edge */ SEXP jedge) /* all arguments are integer */ /* Edges should NOT be repeated symmetrically. Indices need not be sorted. */ { int Nv, Ne; int *ie, *je; /* edges */ int *it, *jt, *kt; /* vectors of indices of triples */ int Nt, Ntmax; /* number of triples */ int Nj; int *jj; /* scratch storage */ int i, j, k, m, mj, mk, Nmore, maxchunk; /* output */ SEXP iTout, jTout, kTout, out; int *ito, *jto, *kto; /* =================== Protect R objects from garbage collector ======= */ PROTECT(nv = AS_INTEGER(nv)); PROTECT(iedge = AS_INTEGER(iedge)); PROTECT(jedge = AS_INTEGER(jedge)); /* That's 3 protected objects */ /* numbers of vertices and edges */ Nv = *(INTEGER_POINTER(nv)); Ne = LENGTH(iedge); /* input arrays */ ie = INTEGER_POINTER(iedge); je = INTEGER_POINTER(jedge); /* initialise storage (with a guess at max size) */ Ntmax = 3 * Ne; it = (int *) R_alloc(Ntmax, sizeof(int)); jt = (int *) R_alloc(Ntmax, sizeof(int)); kt = (int *) R_alloc(Ntmax, sizeof(int)); Nt = 0; /* initialise scratch storage */ jj = (int *) R_alloc(Ne, sizeof(int)); XOUTERCHUNKLOOP(i, 1, Nv, maxchunk, 8196) { R_CheckUserInterrupt(); XINNERCHUNKLOOP(i, 1, Nv, maxchunk, 8196) { #ifdef DEBUGVEE Rprintf("i=%d ---------- \n", i); #endif /* Find Vee triples with apex 'i' */ /* First, find all vertices j connected to i */ Nj = 0; for(m = 0; m < Ne; m++) { if(ie[m] == i) { jj[Nj] = je[m]; Nj++; } else if(je[m] == i) { jj[Nj] = ie[m]; Nj++; } } /* save triples (i,j,k) */ #ifdef DEBUGVEE Rprintf("Nj = %d\n", Nj); #endif if(Nj > 1) { #ifdef DEBUGVEE Rprintf("i=%d\njj=\n", i); for(mj = 0; mj < Nj; mj++) Rprintf("%d ", jj[mj]); Rprintf("\n\n"); #endif for(mj = 0; mj < Nj-1; mj++) { j = jj[mj]; for(mk = mj+1; mk < Nj; mk++) { k = jj[mk]; /* add (i, j, k) to list of triangles */ if(Nt >= Ntmax) { /* overflow - allocate more space */ Nmore = 2 * Ntmax; #ifdef DEBUGVEE Rprintf("Doubling space from %d to %d\n", Ntmax, Nmore); #endif it = (int *) S_realloc((char *) it, Nmore, Ntmax, sizeof(int)); jt = (int *) S_realloc((char *) jt, Nmore, Ntmax, sizeof(int)); kt = (int *) S_realloc((char *) kt, Nmore, Ntmax, sizeof(int)); Ntmax = Nmore; } it[Nt] = i; jt[Nt] = j; kt[Nt] = k; Nt++; } } } } } /* allocate space for output */ PROTECT(iTout = NEW_INTEGER(Nt)); PROTECT(jTout = NEW_INTEGER(Nt)); PROTECT(kTout = NEW_INTEGER(Nt)); PROTECT(out = NEW_LIST(3)); /* that's 3+4=7 protected objects */ ito = INTEGER_POINTER(iTout); jto = INTEGER_POINTER(jTout); kto = INTEGER_POINTER(kTout); /* copy triplet indices to output vectors */ for(m = 0; m < Nt; m++) { ito[m] = it[m]; jto[m] = jt[m]; kto[m] = kt[m]; } /* insert output vectors in output list */ SET_VECTOR_ELT(out, 0, iTout); SET_VECTOR_ELT(out, 1, jTout); SET_VECTOR_ELT(out, 2, kTout); UNPROTECT(7); return(out); } spatstat/src/raster.h0000755000176200001440000000474713166361223014377 0ustar liggesusers/* raster.h Definition of raster structures & operations requires (for floor()) $Revision: 1.3 $ $Date: 2004/11/15 19:25:11 $ */ typedef struct Raster{ /* array of data */ char *data; /* coerced to appropriate type */ int nrow; /* dimensions of entire array */ int ncol; int length; int rmin; /* position of valid subrectangle */ int rmax; int cmin; int cmax; /* definition of mapping into continuous space */ double x0; /* position of entry (rmin,cmin) */ double y0; double x1; /* position of entry (rmax,cmax) */ double y1; double xstep; /* x increment for each column step */ double ystep; /* y increment for each row step */ /* xstep = (x1 - x0)/(cmax - cmin) = (x1 - x0)/(number of valid columns - 1) CAN BE POSITIVE OR NEGATIVE */ /* image of valid subrectangle */ double xmin; /* = min{x0,x1} */ double xmax; double ymin; double ymax; } Raster; /* how to clear the data */ #define Clear(ARRAY,TYPE,VALUE) \ { unsigned int i; TYPE *p; \ for(i = 0, p = (TYPE *) (ARRAY).data; i < (ARRAY).length; i++, p++) \ *p = VALUE; } /* how to index a rectangular array stored sequentially in row-major order */ #define Entry(ARRAY,ROW,COL,TYPE) \ ((TYPE *)((ARRAY).data))[COL + (ROW) * ((ARRAY).ncol)] /* test for indices inside subrectangle */ #define Inside(ARRAY,ROW,COL) \ ( (ROW >= (ARRAY).rmin) && (ROW <= (ARRAY).rmax) && \ (COL >= (ARRAY).cmin) && (COL <= (ARRAY).cmax)) /* how to compute the position in R^2 corresponding to a raster entry */ #define Xpos(ARRAY,COL) \ ((ARRAY).x0 + (ARRAY).xstep * (COL - (ARRAY).cmin)) #define Ypos(ARRAY,ROW) \ ((ARRAY).y0 + (ARRAY).ystep * (ROW - (ARRAY).rmin)) #define Distance(X,Y,XX,YY) sqrt((X - XX)* (X - XX) + (Y - YY) * (Y - YY)) #define DistanceTo(X,Y,ARRAY,ROW,COL)\ Distance(X,Y,Xpos(ARRAY,COL),Ypos(ARRAY,ROW)) #define DistanceSquared(X,Y,XX,YY) ((X - XX)* (X - XX) + (Y - YY) * (Y - YY)) #define DistanceToSquared(X,Y,ARRAY,ROW,COL)\ DistanceSquared(X,Y,Xpos(ARRAY,COL),Ypos(ARRAY,ROW)) /* how to map a point (x,y) in R^2 to a raster entry */ /* (x,y) is guaranteed to lie in the rectangle bounded by the images of the entries (r,c), (r+1,c), (r,c+1), (r+1,c+1) where r = RowIndex(..) and c = ColIndex(..). */ #define RowIndex(ARRAY,Y) \ ((ARRAY).rmin + (int) floor(((Y) - (ARRAY).y0)/(ARRAY).ystep)) #define ColIndex(ARRAY,X) \ ((ARRAY).cmin + (int) floor(((X) - (ARRAY).x0)/(ARRAY).xstep)) spatstat/src/hardcore.c0000755000176200001440000000410713166361223014647 0ustar liggesusers#include #include #include "methas.h" #include "dist2.h" /* Conditional intensity computation for Hard core process */ /* Storage of parameters and precomputed/auxiliary data */ typedef struct Hardcore { double h; /* hard core distance */ double h2; double *period; int per; } Hardcore; /* initialiser function */ Cdata *hardcoreinit(state, model, algo) State state; Model model; Algor algo; { Hardcore *hardcore; double h; hardcore = (Hardcore *) R_alloc(1, sizeof(Hardcore)); /* Interpret model parameters*/ hardcore->h = h = model.ipar[0]; hardcore->h2 = h * h; hardcore->period = model.period; /* periodic boundary conditions? */ hardcore->per = (model.period[0] > 0.0); return((Cdata *) hardcore); } /* conditional intensity evaluator */ double hardcorecif(prop, state, cdata) Propo prop; State state; Cdata *cdata; { int npts, ix, ixp1, j; double *x, *y; double u, v; double h2, a; Hardcore *hardcore; hardcore = (Hardcore *) cdata; h2 = hardcore->h2; u = prop.u; v = prop.v; ix = prop.ix; x = state.x; y = state.y; npts = state.npts; if(npts == 0) return((double) 1.0); ixp1 = ix+1; /* If ix = NONE = -1, then ixp1 = 0 is correct */ if(hardcore->per) { /* periodic distance */ if(ix > 0) { for(j=0; j < ix; j++) { if(dist2thresh(u,v,x[j],y[j],hardcore->period, h2)) return((double) 0.0); } } if(ixp1 < npts) { for(j=ixp1; jperiod, h2)) return((double) 0.0); } } } else { /* Euclidean distance */ if(ix > 0) { for(j=0; j < ix; j++) { a = h2 - pow(u - x[j], 2); if(a > 0) { a -= pow(v - y[j], 2); if(a > 0) return((double) 0.0); } } } if(ixp1 < npts) { for(j=ixp1; j 0) { a -= pow(v - y[j], 2); if(a > 0) return((double) 0.0); } } } } return ((double) 1.0); } Cifns HardcoreCifns = { &hardcoreinit, &hardcorecif, (updafunptr) NULL, NO}; spatstat/src/spasumsymout.h0000644000176200001440000001024013166361223015646 0ustar liggesusers/* spasumsymout.h Function definitions for 'sumsymouter' for sparse matrices/arrays This file is #included in sparselinalg.c several times. Macros used FNAME function name DBG (#ifdef) debug WEIGHTS (#ifdef) use weights $Revision: 1.4 $ $Date: 2016/02/24 09:57:16 $ */ void FNAME(m, n, lenx, ix, jx, kx, x, flip, #ifdef WEIGHTS lenw, jw, kw, w, #endif y) int *m, *n; /* dimensions of array m * n * n */ int *lenx; /* number of nonzero entries in sparse array x */ int *ix, *jx, *kx; /* indices of entries in sparse array x */ double *x; /* values in sparse array x */ /* NB: ix, jx, kx are assumed to be sorted by order(j,k,i) i.e. in increasing order of j, then k within j, then i within (j,k) */ int *flip; /* reordering of ix, jx, kx, x that would achieve increasing order(k,j,i) */ #ifdef WEIGHTS int *lenw; /* length of jw, kw */ int *jw, *kw; /* indices of entries in sparse matrix w of weights */ /* Assumed sorted by order (j,k) */ double *w; /* values of weights w */ #endif double *y; /* output: full m * m matrix */ { /* Compute the sum of outer(x[,j,k], x[,k,j]) for all j != k */ int M,N,L, i,j,k,ii, l, ll, lstart, lend, t, tstart, tend, r; double xijk, xx; int *it, *jt, *kt; double *xt; #ifdef WEIGHTS int R; double wjk; #endif M = *m; N = *n; L = *lenx; #ifdef WEIGHTS R = *lenw; #endif if(L <= 1 || N <= 1 || M <= 0) return; /* Create space to store array in k-major order*/ it = (int *) R_alloc(L, sizeof(int)); jt = (int *) R_alloc(L, sizeof(int)); kt = (int *) R_alloc(L, sizeof(int)); xt = (double *) R_alloc(L, sizeof(double)); /* copy reordered array */ #ifdef DBG Rprintf("---------- Reordered: -------------------\n"); #endif for(l = 0; l < L; l++) { ll = flip[l]; it[l] = ix[ll]; jt[l] = jx[ll]; kt[l] = kx[ll]; xt[l] = x[ll]; #ifdef DBG Rprintf("%d \t [%d, %d, %d] = %lf\n", l, it[l], jt[l], kt[l], xt[l]); #endif } /* Now process array */ lstart = tstart = r = 0; lend = tend = -1; /* to keep compiler happy */ while(lstart < L && tstart < L) { /* Consider a new entry x[,j,k] */ j = jx[lstart]; k = kx[lstart]; #ifdef DBG Rprintf("Entry %d: [, %d, %d]\n", lstart, j, k); #endif #ifdef WEIGHTS /* Find weight w[j,k] */ while(r < R && ((jw[r] < j) || ((jw[r] == j) && (kw[r] < k)))) ++r; if(jw[r] == j && kw[r] == k) { /* weight w[j,k] is present */ wjk = w[r]; #endif /* Find all entries in x with the same j,k */ for(lend = lstart+1; lend < L && jx[lend] == j && kx[lend] == k; ++lend) ; --lend; #ifdef DBG Rprintf("\t lstart=%d, lend=%d\n", lstart, lend); #endif /* Find corresponding entries in transpose (k'=j, j'=k) */ /* search forward to find start of run */ while(tstart < L && ((kt[tstart] < j) || (kt[tstart] == j && jt[tstart] < k))) ++tstart; #ifdef DBG Rprintf("\t tstart=%d\n", tstart); Rprintf("\t kt[tstart]=%d, jt[tstart]=%d\n", kt[tstart], jt[tstart]); #endif if(kt[tstart] == j && jt[tstart] == k) { /* Both x[,j,k] and x[,k,j] are present so a contribution will occur */ /* seek end of run */ for(tend = tstart+1; tend < L && kt[tend] == j && jt[tend] == k; ++tend) ; --tend; #ifdef DBG Rprintf("\t tend=%d\n", tend); #endif /* Form products */ for(l = lstart; l <= lend; l++) { i = ix[l]; xijk = x[l]; #ifdef DBG Rprintf("Entry %d: [%d, %d, %d] = %lf\n", l, i, j, k, xijk); #endif for(t = tstart; t <= tend; t++) { ii = it[t]; xx = xijk * xt[t]; #ifdef WEIGHTS xx *= wjk; #endif /* increment result at [i, ii] and [ii, i]*/ y[i + M * ii] += xx; /* y[ii + M * i] += xx; */ #ifdef DBG Rprintf("-- matches entry %d: [%d, %d, %d] = %lf\n", t, ii, k, j, xt[t]); Rprintf("++ %lf\n", xx); #endif } } } #ifdef WEIGHTS } #endif lstart = ((lend > lstart) ? lend : lstart) + 1; tstart = ((tend > tstart) ? tend : tstart) + 1; } } spatstat/src/denspt.c0000755000176200001440000003125013166361223014354 0ustar liggesusers#include #include #include "chunkloop.h" #include "pairloop.h" #include "constants.h" /* denspt.c Calculation of density estimate at data points $Revision: 1.18 $ $Date: 2017/06/05 10:53:59 $ Assumes point pattern is sorted in increasing order of x coordinate *denspt* Density estimate at points *smoopt* Smoothed mark values at points */ #define TWOPI M_2PI double sqrt(), exp(); #define STD_DECLARATIONS \ int n, i, j, maxchunk; \ double xi, yi, rmax, r2max, dx, dy, dx2, d2 #define STD_INITIALISE \ n = *nxy; \ rmax = *rmaxi; \ r2max = rmax * rmax /* ----------------- density estimation -------------------- */ void denspt(nxy, x, y, rmaxi, sig, result) /* inputs */ int *nxy; /* number of (x,y) points */ double *x, *y; /* (x,y) coordinates */ double *rmaxi; /* maximum distance at which points contribute */ double *sig; /* Gaussian sd */ /* output */ double *result; /* vector of computed density values */ { STD_DECLARATIONS; double resulti, coef; double sigma, twosig2; STD_INITIALISE; sigma = *sig; twosig2 = 2.0 * sigma * sigma; coef = 1.0/(TWOPI * sigma * sigma); if(n == 0) return; PAIRLOOP( { resulti = 0.0; }, { resulti += exp(-d2/twosig2); } , { result[i] = coef * resulti; }) } void wtdenspt(nxy, x, y, rmaxi, sig, weight, result) /* inputs */ int *nxy; /* number of (x,y) points */ double *x, *y; /* (x,y) coordinates */ double *rmaxi; /* maximum distance */ double *sig; /* Gaussian sd */ double *weight; /* vector of weights */ /* output */ double *result; /* vector of weighted density values */ { STD_DECLARATIONS; double resulti, coef; double sigma, twosig2; STD_INITIALISE; sigma = *sig; twosig2 = 2.0 * sigma * sigma; coef = 1.0/(TWOPI * sigma * sigma); if(n == 0) return; PAIRLOOP( { resulti = 0.0; }, { resulti += weight[j] * exp(-d2/twosig2); }, { result[i] = coef * resulti; } ) } /* ------------- anisotropic versions -------------------- */ void adenspt(nxy, x, y, rmaxi, detsigma, sinv, result) /* inputs */ int *nxy; /* number of (x,y) points */ double *x, *y; /* (x,y) coordinates */ double *rmaxi; /* maximum distance at which points contribute */ double *detsigma; /* determinant of variance matrix */ double *sinv; /* inverse variance matrix (2x2, flattened) */ /* output */ double *result; /* vector of density values */ { STD_DECLARATIONS; double resulti, coef; double detsig, s11, s12, s21, s22; STD_INITIALISE; detsig = *detsigma; coef = 1.0/(TWOPI * sqrt(detsig)); s11 = sinv[0]; s12 = sinv[1]; s21 = sinv[2]; s22 = sinv[3]; PAIRLOOP( { resulti = 0.0; }, { resulti += exp(-(dx * (dx * s11 + dy * s12) \ + dy * (dx * s21 + dy * s22))/2.0); }, { result[i] = coef * resulti; }) } void awtdenspt(nxy, x, y, rmaxi, detsigma, sinv, weight, result) /* inputs */ int *nxy; /* number of (x,y) points */ double *x, *y; /* (x,y) coordinates */ double *rmaxi; /* maximum distance at which points contribute */ double *detsigma; /* determinant of variance matrix */ double *sinv; /* inverse variance matrix (2x2, flattened) */ double *weight; /* vector of weights */ /* output */ double *result; /* vector of weighted density values */ { STD_DECLARATIONS; double resulti, coef; double detsig, s11, s12, s21, s22; STD_INITIALISE; detsig = *detsigma; coef = 1.0/(TWOPI * sqrt(detsig)); s11 = sinv[0]; s12 = sinv[1]; s21 = sinv[2]; s22 = sinv[3]; if(n == 0) return; PAIRLOOP( { resulti = 0.0; }, { resulti += weight[j] * \ exp(-(dx * (dx * s11 + dy * s12) \ + dy * (dx * s21 + dy * s22))/2.0); }, { result[i] = coef * resulti; }) } /* --------------- smoothing --------------------------- */ void smoopt(nxy, x, y, v, self, rmaxi, sig, result) /* inputs */ int *nxy; /* number of (x,y) points */ double *x, *y; /* (x,y) coordinates */ double *v; /* vector of mark values to be smoothed */ int *self; /* 0 if leave-one-out */ double *rmaxi; /* maximum distance at which points contribute */ double *sig; /* Gaussian sd */ /* output */ double *result; /* vector of computed smoothed values */ { STD_DECLARATIONS; int countself; double sigma, twosig2; double numer, denom, wij; STD_INITIALISE; sigma = *sig; countself = *self; twosig2 = 2.0 * sigma * sigma; if(n == 0) return; if(countself != 0) { PAIRLOOP({ numer = denom = 0.0; }, { \ wij = exp(-d2/twosig2); \ denom += wij; \ numer += wij * v[j]; \ }, { \ denom += 1; \ numer += v[i]; \ result[i] = numer/denom; \ }) } else { PAIRLOOP({ numer = denom = 0.0; }, { \ wij = exp(-d2/twosig2); \ denom += wij; \ numer += wij * v[j]; \ }, { \ result[i] = numer/denom; \ }) } } void wtsmoopt(nxy, x, y, v, self, rmaxi, sig, weight, result) /* inputs */ int *nxy; /* number of (x,y) points */ double *x, *y; /* (x,y) coordinates */ double *v; /* vector of mark values to be smoothed */ int *self; /* 0 if leave-one-out */ double *rmaxi; /* maximum distance */ double *sig; /* Gaussian sd */ double *weight; /* vector of weights */ /* output */ double *result; /* vector of computed smoothed values */ { STD_DECLARATIONS; int countself; double sigma, twosig2; double numer, denom, wij; STD_INITIALISE; sigma = *sig; countself = *self; twosig2 = 2.0 * sigma * sigma; if(n == 0) return; if(countself != 0) { PAIRLOOP({ numer = denom = 0.0; }, { \ wij = weight[j] * exp(-d2/twosig2); \ denom += wij; \ numer += wij * v[j]; \ }, { \ denom += weight[i]; \ numer += weight[i] * v[i]; \ result[i] = numer/denom; \ }) } else { PAIRLOOP({ numer = denom = 0.0; }, { \ wij = weight[j] * exp(-d2/twosig2); \ denom += wij; \ numer += wij * v[j]; \ }, { \ result[i] = numer/denom; \ }) } } /* ------------- anisotropic versions -------------------- */ void asmoopt(nxy, x, y, v, self, rmaxi, sinv, result) /* inputs */ int *nxy; /* number of (x,y) points */ double *x, *y; /* (x,y) coordinates */ double *v; /* vector of mark values to be smoothed */ int *self; /* 0 if leave-one-out */ double *rmaxi; /* maximum distance at which points contribute */ double *sinv; /* inverse variance matrix (2x2, flattened) */ /* output */ double *result; /* vector of smoothed values */ { STD_DECLARATIONS; int countself; double s11, s12, s21, s22; double numer, denom, wij; STD_INITIALISE; countself = *self; s11 = sinv[0]; s12 = sinv[1]; s21 = sinv[2]; s22 = sinv[3]; if(n == 0) return; if(countself != 0) { PAIRLOOP({ numer = denom = 0.0; }, { \ wij = exp(-(dx * (dx * s11 + dy * s12) \ + dy * (dx * s21 + dy * s22))/2.0); \ denom += wij; \ numer += wij * v[j]; \ }, { \ denom += 1; \ numer += v[i]; \ result[i] = numer/denom; \ }) } else { PAIRLOOP({ numer = denom = 0.0; }, { \ wij = exp(-(dx * (dx * s11 + dy * s12) \ + dy * (dx * s21 + dy * s22))/2.0); \ denom += wij; \ numer += wij * v[j]; \ }, { \ result[i] = numer/denom; \ }) } } void awtsmoopt(nxy, x, y, v, self, rmaxi, sinv, weight, result) /* inputs */ int *nxy; /* number of (x,y) points */ double *x, *y; /* (x,y) coordinates */ double *v; /* vector of mark values to be smoothed */ int *self; /* 0 if leave-one-out */ double *rmaxi; /* maximum distance at which points contribute */ double *sinv; /* inverse variance matrix (2x2, flattened) */ double *weight; /* vector of weights */ /* output */ double *result; /* vector of smoothed values */ { STD_DECLARATIONS; int countself; double s11, s12, s21, s22; double numer, denom, wij; STD_INITIALISE; countself = *self; s11 = sinv[0]; s12 = sinv[1]; s21 = sinv[2]; s22 = sinv[3]; if(n == 0) return; if(countself != 0) { PAIRLOOP({ numer = denom = 0.0; }, { \ wij = weight[j] * exp(-(dx * (dx * s11 + dy * s12) \ + dy * (dx * s21 + dy * s22))/2.0); \ denom += wij; \ numer += wij * v[j]; \ }, { \ denom += weight[i]; \ numer += weight[i] * v[i]; \ result[i] = numer/denom; \ }) } else { PAIRLOOP({ numer = denom = 0.0; }, { \ wij = weight[j] * exp(-(dx * (dx * s11 + dy * s12) \ + dy * (dx * s21 + dy * s22))/2.0); \ denom += wij; \ numer += wij * v[j]; \ }, { \ result[i] = numer/denom; \ }) } } /* ----------------- transformed coordinates -------------------- */ /* The following functions assume that x, y have been transformed by the inverse of the variance matrix, and subsequently scaled by 1/sqrt(2) so that the Gaussian density is proportional to exp(-(x^2+y^2)). Constant factor in density is omitted. */ void Gdenspt(nxy, x, y, rmaxi, result) /* inputs */ int *nxy; /* number of (x,y) points */ double *x, *y; /* (x,y) coordinates */ double *rmaxi; /* maximum distance at which points contribute */ /* output */ double *result; /* vector of computed density values */ { STD_DECLARATIONS; double resulti; STD_INITIALISE; if(n == 0) return; PAIRLOOP( { resulti = 0.0; }, { resulti += exp(-d2); } , { result[i] = resulti; }) } void Gwtdenspt(nxy, x, y, rmaxi, weight, result) /* inputs */ int *nxy; /* number of (x,y) points */ double *x, *y; /* (x,y) coordinates */ double *rmaxi; /* maximum distance */ double *weight; /* vector of weights */ /* output */ double *result; /* vector of weighted density values */ { STD_DECLARATIONS; double resulti; STD_INITIALISE; if(n == 0) return; PAIRLOOP( { resulti = 0.0; }, { resulti += weight[j] * exp(-d2); }, { result[i] = resulti; } ) } void Gsmoopt(nxy, x, y, v, self, rmaxi, result) /* inputs */ int *nxy; /* number of (x,y) points */ double *x, *y; /* (x,y) coordinates */ double *v; /* vector of mark values to be smoothed */ int *self; /* 0 if leave-one-out */ double *rmaxi; /* maximum distance at which points contribute */ /* output */ double *result; /* vector of computed smoothed values */ { STD_DECLARATIONS; int countself; double numer, denom, wij; STD_INITIALISE; countself = *self; if(n == 0) return; if(countself != 0) { PAIRLOOP({ numer = denom = 0.0; }, { \ wij = exp(-d2); \ denom += wij; \ numer += wij * v[j]; \ }, { \ denom += 1; \ numer += v[i]; \ result[i] = numer/denom; \ }) } else { PAIRLOOP({ numer = denom = 0.0; }, { \ wij = exp(-d2); \ denom += wij; \ numer += wij * v[j]; \ }, { \ result[i] = numer/denom; \ }) } } void Gwtsmoopt(nxy, x, y, v, self, rmaxi, weight, result) /* inputs */ int *nxy; /* number of (x,y) points */ double *x, *y; /* (x,y) coordinates */ double *v; /* vector of mark values to be smoothed */ int *self; /* 0 if leave-one-out */ double *rmaxi; /* maximum distance */ double *weight; /* vector of weights */ /* output */ double *result; /* vector of computed smoothed values */ { STD_DECLARATIONS; int countself; double numer, denom, wij; STD_INITIALISE; countself = *self; if(n == 0) return; if(countself != 0) { PAIRLOOP({ numer = denom = 0.0; }, { \ wij = weight[j] * exp(-d2); \ denom += wij; \ numer += wij * v[j]; \ }, { \ denom += weight[i]; \ numer += weight[i] * v[i]; \ result[i] = numer/denom; \ }) } else { PAIRLOOP({ numer = denom = 0.0; }, { \ wij = weight[j] * exp(-d2); \ denom += wij; \ numer += wij * v[j]; \ }, { \ result[i] = numer/denom; \ }) } } spatstat/src/discs.c0000644000176200001440000000352013166361223014160 0ustar liggesusers#include #include /* discs.c Fill binary mask with discs with given centres and radii $Revision: 1.4 $ $Date: 2014/10/05 03:04:08 $ */ void discs2grid(nx, x0, xstep, ny, y0, ystep, /* pixel grid dimensions */ nd, xd, yd, rd, /* disc parameters */ out) /* inputs */ int *nx, *ny, *nd; double *x0, *xstep, *y0, *ystep; double *xd, *yd, *rd; /* output */ int *out; { int Nxcol, Nyrow, Ndiscs; double X0, Y0, Xstep, Ystep; int i, j, k; double xk, yk, rk, rk2, dx, dymax; int imin, imax, jmin, jmax, iminj, imaxj, Nxcol1, Nyrow1; Nxcol = *nx; Nyrow = *ny; Ndiscs = *nd; X0 = *x0; Y0 = *y0; Xstep = *xstep; Ystep = *ystep; if(Ndiscs == 0) return; Nxcol1 = Nxcol - 1; Nyrow1 = Nyrow - 1; /* loop over discs */ for(k = 0; k < Ndiscs; k++) { R_CheckUserInterrupt(); xk = xd[k]; yk = yd[k]; rk = rd[k]; /* find valid range of i and j */ imax = floor( (yk + rk - Y0)/Ystep); imin = ceil((yk - rk - Y0)/Ystep); jmax = floor( (xk + rk - X0)/Xstep); jmin = ceil((xk - rk - X0)/Xstep); if(imax >= 0 && imin < Nyrow && jmax >= 0 && jmin < Nxcol && imax >= imin && jmax >= jmin) { if(imin < 0) imin = 0; if(imax > Nyrow1) imax = Nyrow1; if(jmin < 0) jmin = 0; if(jmax > Nxcol1) jmax = Nxcol1; rk2 = rk * rk; /* loop over relevant pixels */ for(j = jmin, dx=X0 + jmin * Xstep - xk; j <= jmax; j++, dx += Xstep) { dymax = sqrt(rk2 - dx * dx); imaxj = floor( (yk + dymax - Y0)/Ystep); iminj = ceil((yk - dymax - Y0)/Ystep); if(imaxj >= 0 && iminj < Nyrow) { if(iminj < 0) iminj = 0; if(imaxj > Nyrow1) imaxj = Nyrow1; for(i = iminj; i <= imaxj; i++) out[i + j * Nyrow] = 1; } } } } } spatstat/src/exactPdist.c0000755000176200001440000001007613166361223015172 0ustar liggesusers/* exactPdist.c `Pseudoexact' distance transform of a discrete binary image (the closest counterpart to `exactdist.c') $Revision: 1.12 $ $Date: 2011/05/17 12:27:20 $ */ #include #include "raster.h" void dist_to_bdry(); void shape_raster(); void ps_exact_dt(in, dist, row, col) Raster *in; /* input: binary image */ Raster *dist; /* output: exact distance to nearest point */ Raster *row; /* output: row index of closest point */ Raster *col; /* output: column index of closest point */ /* rasters must have been dimensioned by shape_raster() and must all have identical dimensions and margins */ { int j,k; double d, x, y; int r, c; double dnew; double huge; /* double bdiag; */ /* initialise */ #define UNDEFINED -1 #define Is_Defined(I) (I >= 0) #define Is_Undefined(I) (I < 0) Clear(*row,int,UNDEFINED) Clear(*col,int,UNDEFINED) huge = 2.0 * DistanceSquared(dist->xmin,dist->ymin,dist->xmax,dist->ymax); Clear(*dist,double,huge) /* if input pixel is TRUE, set distance to 0 and make pixel point to itself */ for(j = in->rmin; j <= in->rmax; j++) for(k = in->cmin; k <= in->cmax; k++) if(Entry(*in, j, k, int) != 0) { Entry(*dist, j, k, double) = 0.0; Entry(*row, j, k, int) = j; Entry(*col, j, k, int) = k; } /* how to update the distance values */ #define GETVALUES(ROW,COL) \ x = Xpos(*in, COL); \ y = Ypos(*in, ROW); \ d = Entry(*dist,ROW,COL,double); #define COMPARE(ROW,COL,RR,CC) \ r = Entry(*row,RR,CC,int); \ c = Entry(*col,RR,CC,int); \ if(Is_Defined(r) && Is_Defined(c) \ && Entry(*dist,RR,CC,double) < d) { \ dnew = DistanceSquared(x, y, Xpos(*in,c), Ypos(*in,r)); \ if(dnew < d) { \ Entry(*row,ROW,COL,int) = r; \ Entry(*col,ROW,COL,int) = c; \ Entry(*dist,ROW,COL,double) = dnew; \ d = dnew; \ } \ } /* bound on diagonal step distance squared */ /* bdiag = (in->xstep * in->xstep + in->ystep * in->ystep); */ /* forward pass */ for(j = in->rmin; j <= in->rmax; j++) for(k = in->cmin; k <= in->cmax; k++) { GETVALUES(j, k) COMPARE(j,k, j-1,k-1) COMPARE(j,k, j-1, k) COMPARE(j,k, j-1,k+1) COMPARE(j,k, j, k-1) } /* backward pass */ for(j = in->rmax; j >= in->rmin; j--) for(k = in->cmax; k >= in->cmin; k--) { GETVALUES(j, k) COMPARE(j,k, j+1,k+1) COMPARE(j,k, j+1, k) COMPARE(j,k, j+1,k-1) COMPARE(j,k, j, k+1) } /* take square roots of distances^2 */ for(j = in->rmax; j >= in->rmin; j--) for(k = in->cmax; k >= in->cmin; k--) Entry(*dist,j,k,double) = sqrt(Entry(*dist,j,k,double)); } /* R interface */ void ps_exact_dt_R(xmin, ymin, xmax, ymax, nr, nc, mr, mc, inp, distances, rows, cols, boundary) double *xmin, *ymin, *xmax, *ymax; /* x, y dimensions */ int *nr, *nc; /* raster dimensions EXCLUDING margins */ int *mr, *mc; /* margins */ int *inp; /* input: binary image */ double *distances; /* output: distance to nearest point */ int *rows; /* output: row of nearest point (start= 0) */ int *cols; /* output: column of nearest point (start = 0) */ double *boundary; /* output: distance to boundary of rectangle */ /* all images must have identical dimensions including a margin of 1 on each side */ { Raster data, dist, row, col, bdist; int mrow, mcol, nrow, ncol; mrow = *mr; mcol = *mc; /* full dimensions */ nrow = *nr + 2 * mrow; ncol = *nc + 2 * mcol; shape_raster( &data, (void *) inp, *xmin,*ymin,*xmax,*ymax, nrow, ncol, mrow, mcol); shape_raster( &dist, (void *) distances, *xmin,*ymin,*xmax,*ymax, nrow, ncol, mrow, mcol); shape_raster( &row, (void *) rows, *xmin,*ymin,*xmax,*ymax, nrow, ncol, mrow, mcol); shape_raster( &col, (void *) cols, *xmin,*ymin,*xmax,*ymax, nrow, ncol, mrow, mcol); shape_raster( &bdist, (void *) boundary, *xmin,*ymin,*xmax,*ymax, nrow, ncol, mrow, mcol); ps_exact_dt(&data, &dist, &row, &col); dist_to_bdry(&bdist); } spatstat/src/areaint.c0000755000176200001440000001624413166361223014510 0ustar liggesusers#include #include #include #include "methas.h" #include "dist2.h" /* Conditional intensity function for an area-interaction process: cif = eta^(1-B) where B = (uncovered area)/(pi r^2) */ #define NGRID 16 /* To explore serious bug, #define BADBUG */ #undef BADBUG /* Format for storage of parameters and precomputed/auxiliary data */ typedef struct AreaInt { /* model parameters */ double eta; double r; /* transformations of the parameters */ double r2; double range2; double logeta; int hard; /* periodic distance */ double *period; int per; /* grid counting */ double dx; double xgrid0; int *my; int kdisc; /* scratch space for saving list of neighbours */ int *neighbour; } AreaInt; /* initialiser function */ Cdata *areaintInit(state, model, algo) State state; Model model; Algor algo; { double r, dx, dy, x0; int i, my, kdisc; AreaInt *areaint; /* create storage */ areaint = (AreaInt *) R_alloc(1, sizeof(AreaInt)); /* Interpret model parameters*/ areaint->eta = model.ipar[0]; areaint->r = r = model.ipar[1]; #ifdef BADBUG Rprintf("r = %lf\n", r); #endif areaint->r2 = r * r; areaint->range2 = 4 * r * r; /* square of interaction distance */ /* is the model numerically equivalent to hard core ? */ areaint->hard = (areaint->eta == 0.0); areaint->logeta = (areaint->hard) ? log(DOUBLE_XMIN) : log(areaint->eta); #ifdef BADBUG if(areaint->hard) Rprintf("Hard core recognised\n"); #endif /* periodic boundary conditions? */ areaint->period = model.period; areaint->per = (model.period[0] > 0.0); #ifdef BADBUG if(areaint->per) { Rprintf("*** periodic boundary conditions ***\n"); Rprintf("period = %lf, %lf\n", model.period[0], model.period[1]); } #endif /* grid counting */ dx = dy = areaint->dx = (2 * r)/NGRID; #ifdef BADBUG Rprintf("areaint->dx = %lf\n", areaint->dx); #endif areaint->xgrid0 = -r + dx/2; areaint->my = (int *) R_alloc((long) NGRID, sizeof(int)); kdisc = 0; for(i = 0; i < NGRID; i++) { x0 = areaint->xgrid0 + i * dx; my = floor(sqrt(r * r - x0 * x0)/dy); my = (my < 0) ? 0 : my; areaint->my[i] = my; #ifdef BADBUG Rprintf("\tmy[%ld] = %ld\n", i, my); #endif kdisc += 2 * my + 1; } areaint->kdisc = kdisc; #ifdef BADBUG Rprintf("areaint->kdisc = %ld\n", areaint->kdisc); #endif /* allocate space for neighbour indices */ areaint->neighbour = (int *) R_alloc((long) state.npmax, sizeof(int)); return((Cdata *) areaint); } #ifdef BADBUG void fexitc(); #endif /* conditional intensity evaluator */ double areaintCif(prop, state, cdata) Propo prop; State state; Cdata *cdata; { int npts, ix, ixp1, j; double *period, *x, *y; double u, v; double r2, dx, dy, a, range2; double xgrid, ygrid, xgrid0, covfrac, cifval; int kount, kdisc, kx, my, ky; int *neighbour; int nn, k; AreaInt *areaint; areaint = (AreaInt *) cdata; r2 = areaint->r2; range2 = areaint->range2; /* square of interaction distance */ dy = dx = areaint->dx; kdisc = areaint->kdisc; /* pointers */ period = areaint->period; neighbour = areaint->neighbour; u = prop.u; v = prop.v; ix = prop.ix; x = state.x; y = state.y; npts = state.npts; if(npts == 0) return ((double) 1.0); if(!areaint->per) { /* .......... Euclidean distance .................... First identify which data points are neighbours of (u,v) */ nn = 0; ixp1 = ix + 1; /* If ix = NONE = -1, then ixp1 = 0 is correct */ if(ix > 0) { for(j=0; j < ix; j++) { a = range2 - pow(u - x[j], 2); if(a > 0.) { a -= pow(v - y[j], 2); if(a > 0.) { /* point j is a neighbour of (u,v) */ neighbour[nn] = j; ++nn; } } } } if(ixp1 < npts) { for(j=ixp1; j < npts; j++) { a = range2 - pow(u - x[j], 2); if(a > 0.) { a -= pow(v - y[j], 2); if(a > 0.) { /* point j is a neighbour of (u,v) */ neighbour[nn] = j; ++nn; } } } } if(nn == 0) { /* no neighbours; no interaction */ cifval = 1.0; return cifval; } else if(areaint->hard) { /* neighbours forbidden if it's a hard core process */ cifval = 0.0; return cifval; } else { /* scan a grid of points centred at (u,v) */ kount = 0; xgrid0 = u + areaint->xgrid0; for(kx=0; kxmy[kx]; for(ky=(-my); ky<=my; ky++) { ygrid = v + ky * dy; /* Grid point (xgrid,ygrid) is inside disc of radius r centred at (u,v) Loop through all neighbouring data points to determine whether the grid point is covered by another disc */ if(nn > 0) { for(k=0; k < nn; k++) { j = neighbour[k]; a = r2 - pow(xgrid - x[j], 2); if(a > 0) { a -= pow(ygrid - y[j], 2); if(a > 0) { /* point j covers grid point */ ++kount; break; } } } } /* finished consideration of grid point (xgrid, ygrid) */ } } } } else { /* ............. periodic distance ...................... First identify which data points are neighbours of (u,v) */ nn = 0; ixp1 = ix + 1; if(ix > 0) { for(j=0; j < ix; j++) { if(dist2thresh(u,v,x[j],y[j],period,range2)) { /* point j is a neighbour of (u,v) */ neighbour[nn] = j; ++nn; } } } if(ixp1 < npts) { for(j=ixp1; jhard) { /* neighbours forbidden if it's a hard core process */ cifval = 0.0; return cifval; } else { /* scan a grid of points centred at (u,v) */ kount = 0; xgrid0 = u + areaint->xgrid0; for(kx=0; kxmy[kx]; for(ky=(-my); ky<=my; ky++) { ygrid = v + ky * dy; /* Grid point (xgrid,ygrid) is inside disc of radius r centred at (u,v) Loop through all neighbouring data points to determine whether the grid point is covered by another disc */ for(k=0; k < nn; k++) { j = neighbour[k]; if(dist2Mthresh(xgrid,ygrid,x[j],y[j],period,r2)) { /* point j covers grid point */ ++kount; break; } } /* finished considering grid point (xgrid,ygrid) */ } } } } /* `kdisc' is the number of grid points in the disc `kount' is the number of COVERED grid points in the disc */ /* Hard core case has been handled. */ /* Usual calculation: covered area fraction */ covfrac = ((double) kount)/((double) kdisc); cifval = exp(areaint->logeta * covfrac); #ifdef BADBUG if(!R_FINITE(cifval)) { Rprintf("Non-finite CIF value\n"); Rprintf("kount=%ld, kdisc=%ld, covfrac=%lf, areaint->logeta=%lf\n", kount, kdisc, covfrac, areaint->logeta); Rprintf("u=%lf, v=%lf\n", u, v); fexitc("Non-finite CIF"); } #endif return cifval; } Cifns AreaIntCifns = { &areaintInit, &areaintCif, (updafunptr) NULL, NO}; spatstat/src/Perfect.cc0000755000176200001440000005751413166361223014625 0ustar liggesusers// Debug switch // #define DBGS #include #include #include #include #include #include #include #include #include // #include // FILE *out; // File i/o is deprecated in R implementation #ifdef DBGS #define CHECK(PTR,MESSAGE) if(((void *) PTR) == ((void *) NULL)) error(MESSAGE) #define CLAMP(X, LOW, HIGH, XNAME) \ if((X) > (HIGH)) { \ Rprintf("Value of %s exceeds upper limit %d\n", XNAME, HIGH); \ X = HIGH; \ } else if((X) < (LOW)) { \ Rprintf("Value of %s is below %d\n", XNAME, LOW); \ X = LOW; \ } #else #define CHECK(PTR,MESSAGE) #define CLAMP(X, LOW, HIGH, XNAME) \ if((X) > (HIGH)) X = HIGH; else if((X) < (LOW)) X = LOW; #endif // ......................................... // memory allocation // using R_alloc #define ALLOCATE(TYPE) (TYPE *) R_alloc(1, sizeof(TYPE)) #define FREE(PTR) // Alternative using Calloc and Free // #define ALLOCATE(TYPE) (TYPE *) Calloc(1, sizeof(TYPE)) // #define FREE(PTR) Free(PTR) void R_CheckUserInterrupt(void); struct Point{ long int No; float X; float Y; float R; struct Point *next; }; struct Point2{ long int No; float X; float Y; char InLower[2]; double Beta; double TempBeta; struct Point2 *next; }; struct Point3{ char Case; char XCell; char YCell; struct Point3 *next; }; // const float Pi=3.141593; double slumptal(void){ return(runif((double) 0.0, (double) 1.0)); } long int poisson(double lambda){ return((long int)rpois(lambda)); } // ........................... Point patterns .......................... class Point2Pattern { public: long int UpperLiving[2]; long int MaxXCell, MaxYCell, NoP; double XCellDim, YCellDim, Xmin, Xmax, Ymin, Ymax; struct Point2 *headCell[10][10],*dummyCell; char DirX[10], DirY[10]; Point2Pattern(double xmin, double xmax, double ymin, double ymax, long int mxc, long int myc){ long int i,j; UpperLiving[0] = 0; UpperLiving[1] = 0; Xmin = xmin; Xmax = xmax; Ymin = ymin; Ymax = ymax; DirX[1] = 1; DirY[1] = 0; DirX[2] = 1; DirY[2] = -1; DirX[3] = 0; DirY[3] = -1; DirX[4] = -1; DirY[4] = -1; DirX[5] = -1; DirY[5] = 0; DirX[6] = -1; DirY[6] = 1; DirX[7] = 0; DirY[7] = 1; DirX[8] = 1; DirY[8] = 1; NoP = 0; // dummyCell = ALLOCATE(struct Point2); // dummyCell->next = dummyCell; dummyCell->No = 0; MaxXCell = mxc; MaxYCell = myc; if(MaxXCell>9) MaxXCell = 9; if(MaxYCell>9) MaxYCell = 9; for(i=0;i<=MaxXCell;i++){ for(j=0;j<=MaxYCell;j++){ // headCell[i][j] = ALLOCATE(struct Point2); // headCell[i][j]->next=dummyCell; } } XCellDim = (Xmax-Xmin)/((double)(MaxXCell+1)); YCellDim = (Ymax-Ymin)/((double)(MaxYCell+1)); }; ~Point2Pattern(){} void Print(); void Return(double *X, double *Y, int *num, int maxnum); long int Count(); long int UpperCount(); void Empty(); void Clean(); // void DumpToFile(char FileName[100]); // void ReadFromFile(char FileName[100]); }; void Point2Pattern::Print(){ long int i,j,k; k = 0; struct Point2 *TempCell; for(i=0;i<=MaxXCell;i++){ for(j=0;j<=MaxYCell;j++){ //Rprintf("%d %d:\n",i,j); TempCell = headCell[i][j]->next; CHECK(TempCell, "internal error: TempCell is null in Print()"); while(TempCell->next != TempCell){ k++; Rprintf("%f %f %ld %ld %ld=%d %ld=%d UL0 %d UL1 %d %f\n", TempCell->X,TempCell->Y,k, TempCell->No, i,int(TempCell->X/XCellDim), j,int(TempCell->Y/YCellDim), TempCell->InLower[0],TempCell->InLower[1], TempCell->Beta); TempCell = TempCell->next; CHECK(TempCell, "internal error: TempCell is null in Print() loop"); } } } Rprintf("Printed %ld points.\n",k); } void Point2Pattern::Return(double *X, double *Y, int *num, int maxnum){ long int i,j,k; k =0; *num = 0; #ifdef DBGS Rprintf("executing Return()\n"); #endif if(UpperLiving[0]<=maxnum){ struct Point2 *TempCell; for(i=0;i<=MaxXCell;i++){ for(j=0;j<=MaxYCell;j++){ #ifdef DBGS // Rprintf("%d %d:\n",i,j); #endif TempCell = headCell[i][j]->next; CHECK(TempCell, "internal error: TempCell is null in Return()"); while(TempCell->next != TempCell){ X[k] = TempCell->X; Y[k] = TempCell->Y; k++; TempCell = TempCell->next; CHECK(TempCell, "internal error: TempCell is null in Return() loop"); } } } *num = k; } else { *num = -1; } } long int Point2Pattern::Count(){ long int i,j,k; k = 0; struct Point2 *TempCell; for(i=0;i<=MaxXCell;i++){ for(j=0;j<=MaxYCell;j++){ // Rprintf("%d %d:\n",i,j); TempCell = headCell[i][j]->next; CHECK(TempCell, "internal error: TempCell is null in Count()"); while(TempCell->next != TempCell){ k++; TempCell = TempCell->next; CHECK(TempCell, "internal error: TempCell is null in Count() loop"); } } } //Rprintf("Printed %d points.\n",k); return(k); } // a quick (over)estimate of the number of points in the pattern, // for storage allocation long int Point2Pattern::UpperCount(){ return(UpperLiving[0]); } void Point2Pattern::Empty(){ struct Point2 *TempCell, *TempCell2; long int i,j; #ifdef DBGS long int k; k=0; Rprintf("executing Empty()\n"); #endif for(i=0; i<=this->MaxXCell; i++){ for(j=0; j<=this->MaxYCell; j++){ TempCell = headCell[i][j]->next; CHECK(TempCell, "internal error: TempCell is null in Empty()"); while(TempCell!=TempCell->next){ #ifdef DBGS // k++; Rprintf("%d %d %d\n",i,j,k); #endif TempCell2 = TempCell->next; FREE(TempCell); TempCell = TempCell2; CHECK(TempCell, "internal error: TempCell is null in Empty() loop"); } headCell[i][j]->next = dummyCell; } } } void Point2Pattern::Clean(){ struct Point2 *TempCell, *TempCell2; long int i,j; #ifdef DBGS Rprintf("executing Clean()\n"); #endif for(i=0; i<=MaxXCell; i++){ for(j=0; j<=MaxYCell; j++){ TempCell = headCell[i][j]; CHECK(TempCell, "internal error: TempCell is null in Clean()"); TempCell2 = headCell[i][j]->next; CHECK(TempCell2, "internal error: TempCell2 is null in Clean()"); while(TempCell2!=TempCell2->next){ TempCell2->No = 0; if(TempCell2->InLower[0]==0){ TempCell->next = TempCell2->next; FREE(TempCell2); TempCell2 = TempCell->next; CHECK(TempCell2, "internal error: TempCell2 is null in Clean() loop A"); } else{ TempCell2 = TempCell2->next; TempCell = TempCell->next; CHECK(TempCell, "internal error: TempCell is null in Clean() loop B"); CHECK(TempCell2, "internal error: TempCell2 is null in Clean() loop B"); } } } } } //void Point2Pattern::DumpToFile(char FileName[100]){ // FILE *out; // long int i,j; // out = fopen(FileName,"w"); // struct Point2 *TempCell; // for(i=0;i<=MaxXCell;i++){ // for(j=0;j<=MaxYCell;j++){ // //Rprintf("%d %d:\n",i,j); // TempCell = headCell[i][j]->next; // while(TempCell->next != TempCell){ // fprintf(out,"%f\t%f\t%ld\n", // TempCell->X,TempCell->Y,TempCell->No); // TempCell = TempCell->next; // } // } //} //fclose(out); //} //void Point2Pattern::ReadFromFile(char FileName[100]){ // FILE *out; //long int k,XCell,YCell; //float f1,xs,ys; //out = fopen(FileName,"r"); //struct Point2 *TempCell; //k=0; //while(feof(out)==0){ // k++; // fscanf(out,"%f%f\n",&xs,&ys); // //Rprintf("%f %f\n",xs,ys); // // // TempCell = ALLOCATE(struct Point2); // // // TempCell->No = k; // TempCell->X = xs; // TempCell->Y = ys; // TempCell->InLower[0] = 1; // TempCell->InLower[1] = 1; // // f1 = (xs-Xmin)/XCellDim; XCell = int(f1); // if(XCell>MaxXCell) XCell = MaxXCell; // f1 = (ys-Ymin)/YCellDim; YCell = int(f1); // if(YCell>MaxYCell) YCell = MaxYCell; // // TempCell->next = headCell[XCell][YCell]->next; // headCell[XCell][YCell]->next = TempCell; // //} //fclose(out); //Rprintf("%ld points loaded.\n",k); // //} // ........................... Point processes .......................... // ...................... (stationary, pairwise interaction) ............ class PointProcess { public: double Xmin, Xmax, Ymin, Ymax, TotalBirthRate, InteractionRange; PointProcess(double xmin, double xmax, double ymin, double ymax){ Xmin = xmin; Xmax = xmax; Ymin = ymin; Ymax = ymax; } ~PointProcess(){} virtual void NewEvent(double *x, double *y, char *InWindow)=0; virtual void GeneratePoisson(Point *headPoint, long int *GeneratedPoints, long int *LivingPoints, long int *NoP)=0; virtual double Interaction(double dsquared)=0; // virtual void CalcBeta(long int xsidepomm, long int ysidepomm, // double *betapomm){ // Rprintf("Define CalcBeta...\n"); // } // virtual void CheckBeta(long int xsidepomm, long int ysidepomm, // double *betapomm){ //Rprintf("Define CheckBeta...\n"); //} // virtual double lnCondInt(struct Point2 *TempCell, Point2Pattern *p2p) //{ return(0.0);}; // virtual double lnDens(Point2Pattern *p2p); // virtual void Beta(struct Point2 *TempCell){ // TempCell->Beta = 0; // Rprintf("Define Beta...\n");}; }; //double PointProcess::lnDens(Point2Pattern *p2p){ //// double f1; //long int xco,yco,xc,yc,fx,tx,fy,ty,ry,rx; //double dy,dx, lnDens,dst2; //struct Point2 *TempCell, *TempCell2; // //dx = (Xmax-Xmin)/(double(p2p->MaxXCell+1)); //dy = (Ymax-Ymin)/(double(p2p->MaxYCell+1)); //rx = int(InteractionRange/dx+1.0); //ry = int(InteractionRange/dy+1.0); // // //Rprintf("1:%f 2:%f 3:%d 4:%d 5:%f 6:%f\n",dx,dy,rx,ry, // // this->InteractionRange,InteractionRange); // //Rprintf("mx:%d my:%d\n",p2p->MaxXCell,p2p->MaxYCell); // // lnDens = 0; // // //Rprintf("lnDens: %f (0)\n",lnDens); // // for(xc = 0; xc <= p2p->MaxXCell; xc++){ // for(yc = 0; yc <= p2p->MaxYCell; yc++){ // //if(xc==1) Rprintf("%d %d\n",xc,yc); // CHECK(p2p->headCell[xc][yc], // "internal error: p2p->headCell[xc][yc] is null in lnDens()"); // TempCell = p2p->headCell[xc][yc]->next; // CHECK(TempCell, "internal error: TempCell is null in lnDens()"); // while(TempCell != TempCell->next){ // lnDens += log(TempCell->Beta); // //Rprintf("lnDens: %f (1) %d %d %d %d Beta %f\n",lnDens,xc,yc, // // p2p->MaxXCell,p2p->MaxYCell,TempCell->Beta); // //if(lnDens<(-100000)){Rprintf("%f",lnDens); scanf("%f",&f1);} // if(InteractionRange>0){ // if((xc+rx)<=p2p->MaxXCell) tx=xc+rx; else tx = p2p->MaxXCell; // if((yc+ry)<=p2p->MaxYCell) ty=yc+ry; else ty = p2p->MaxYCell; // if((xc-rx)>=0) fx=xc-rx; else fx = 0; // if((yc-ry)>=0) fy=yc-ry; else fy = 0; // for(xco = fx; xco <= tx; xco++){ // for(yco = fy; yco <= ty; yco++){ // //if(xc==1) Rprintf("%d %d %d %d %d %d\n",xco,yco,fx,tx,fy,ty); // CHECK(p2p->headCell[xco][yco], // "internal error: p2p->headCell[xco][yco] is null in lnDens() loop"); // TempCell2 = p2p->headCell[xco][yco]->next; // CHECK(TempCell2, // "internal error: TempCell2 is null in lnDens() loop A"); // while(TempCell2!=TempCell2->next){ // if(TempCell2 != TempCell){ // dst2 = pow(TempCell->X-TempCell2->X,2)+ // pow(TempCell->Y-TempCell2->Y,2); // lnDens += log(Interaction(dst2)); // } // TempCell2 = TempCell2->next; // CHECK(TempCell2, // "internal error: TempCell2 is null in lnDens() loop B"); // } // } // } // //Rprintf("lnDens: %f\n",lnDens); // } // TempCell = TempCell->next; // CHECK(TempCell, // "internal error: TempCell is null in lnDens() at end"); // } // } // } // return(lnDens); // //} // ........................... Sampler .......................... class Sampler{ public: PointProcess *PP; Point2Pattern *P2P; long int GeneratedPoints, LivingPoints, NoP; //long int UpperLiving[2]; Sampler(PointProcess *p){ PP = p;} ~Sampler(){} void Sim(Point2Pattern *p2p, long int *ST, long int *ET); long int BirthDeath(long int TimeStep, struct Point *headLiving, struct Point *headDeleted, struct Point3 *headTransition); // WAS: Sampler::Forward void Forward(long int TS, long int TT, char TX, char TY, struct Point *Proposal, long int *DDD); }; void Sampler::Forward(long int TS, long int TT, char TX, char TY, struct Point *Proposal, long int *DDD){ long int XCell, YCell, DirectionN; double dtmp2,dtmpx,dtmpy, tmpR, TempGamma[2], TempI; struct Point2 *TempCell, *TempCell2; float f1; /* Birth */ if(TT==1){ f1 = (Proposal->X-P2P->Xmin)/P2P->XCellDim; XCell = int(f1); CLAMP(XCell, 0, P2P->MaxXCell, "XCell"); f1 = (Proposal->Y-P2P->Ymin)/P2P->YCellDim; YCell = int(f1); CLAMP(YCell, 0, P2P->MaxYCell, "YCell"); // TempCell = ALLOCATE(struct Point2); // TempCell->No = Proposal->No; TempCell->X = Proposal->X; TempCell->Y = Proposal->Y; tmpR = Proposal->R; TempCell->next = P2P->headCell[XCell][YCell]->next; P2P->headCell[XCell][YCell]->next = TempCell; TempCell->InLower[0]=0; TempCell->InLower[1]=0; TempGamma[0] = 1.0; TempGamma[1] = 1.0; /*same cell*/ TempCell2 = TempCell->next; CHECK(TempCell2, "internal error: TempCell2 is null in Forward() birth case"); while(TempCell2 != TempCell2->next){ dtmpx = TempCell->X - TempCell2->X; dtmpy = TempCell->Y - TempCell2->Y; dtmp2 = dtmpx*dtmpx+dtmpy*dtmpy; TempI = PP->Interaction(dtmp2); if(TempCell2->InLower[0]==1) TempGamma[0] = TempGamma[0]*TempI; if(TempCell2->InLower[1]==1) TempGamma[1] = TempGamma[1]*TempI; TempCell2=TempCell2->next; CHECK(TempCell2, "internal error: TempCell2 is null in Forward() birth case loop"); } /*eight other cells*/ for(DirectionN=1;DirectionN<=8;DirectionN++){ if(((XCell+P2P->DirX[DirectionN])>=0) && ((XCell+P2P->DirX[DirectionN])<=P2P->MaxXCell) && ((YCell+P2P->DirY[DirectionN])>=0) && ((YCell+P2P->DirY[DirectionN])<=P2P->MaxYCell)){ CHECK(P2P->headCell[XCell+P2P->DirX[DirectionN]][YCell+P2P->DirY[DirectionN]], "internal error: HUGE P2P EXPRESSION is null in Forward() birth case loop A"); TempCell2 = P2P->headCell[XCell+P2P->DirX[DirectionN]] [YCell+P2P->DirY[DirectionN]]->next; CHECK(TempCell2, "internal error: TempCell2 is null in Forward() birth case loop B"); while(TempCell2!=TempCell2->next){ dtmpx = TempCell->X - TempCell2->X; dtmpy = TempCell->Y - TempCell2->Y; dtmp2 = dtmpx*dtmpx+dtmpy*dtmpy; TempI = PP->Interaction(dtmp2); if(TempCell2->InLower[0]==1) TempGamma[0] = TempGamma[0]*TempI; if(TempCell2->InLower[1]==1) TempGamma[1] = TempGamma[1]*TempI; TempCell2=TempCell2->next; CHECK(TempCell2, "internal error: TempCell2 is null in Forward() birth case loop C"); } } } if(tmpR <= TempGamma[1] ){ TempCell->InLower[0]=1; P2P->UpperLiving[0] = P2P->UpperLiving[0] +1; } if(tmpR <= TempGamma[0] ){ TempCell->InLower[1]=1; P2P->UpperLiving[1] = P2P->UpperLiving[1] +1; } } /* Death */ if(TT==0){ TempCell=P2P->headCell[(int)TX][(int)TY]; CHECK(TempCell, "internal error: TempCell is null in Forward() death case"); while(TempCell->next->No != *DDD){ TempCell = TempCell->next; CHECK(TempCell, "internal error: TempCell is null in Forward() death case loop"); if(TempCell->next == TempCell) { Rprintf("internal error: unexpected self-reference. Dumping...\n"); P2P->Print(); error("internal error: unexpected self-reference"); break; } }; CHECK(TempCell->next, "internal error: TempCell->next is null in Forward() death case"); if(*DDD!=TempCell->next->No) Rprintf("diagnostic message: multi cell: !!DDD:%ld TempUpper->No:%ld ", *DDD,TempCell->No); if(TempCell->next->InLower[0]==1) P2P->UpperLiving[0] = P2P->UpperLiving[0] -1; if(TempCell->next->InLower[1]==1) P2P->UpperLiving[1] = P2P->UpperLiving[1] -1; TempCell2 = TempCell->next; CHECK(TempCell2, "internal error: TempCell2 is null in Forward() death case B"); TempCell->next = TempCell2->next; FREE(TempCell2); /* Common stuff */ //KillCounter ++; *DDD = *DDD - 1; } } long int Sampler::BirthDeath(long int TimeStep, struct Point *headLiving, struct Point *headDeleted, struct Point3 *headTransition){ long int i,n; float f1,f2,f3,f4; double xtemp,ytemp; char InWindow, Success; struct Point *TempPoint, *TempPoint2; struct Point3 *TempTransition; R_CheckUserInterrupt(); f1 = LivingPoints; f2 = PP->TotalBirthRate; f3 = f2/(f1+f2); f4 = slumptal(); n = 0; Success = 0; //Rprintf("LivingPoints: %d TotalBirthRate %f GeneratedPoints %d\n", // LivingPoints,PP->TotalBirthRate,GeneratedPoints); /* Birth */ while(Success==0){ if(f4NewEvent(&xtemp, &ytemp, &InWindow); //Rprintf("Ping 2 (BD)\n"); if(InWindow==1){ Success = 1; // TempTransition = ALLOCATE(struct Point3); // //Rprintf("Ping 3 (BD)\n"); TempTransition->Case = 0; LivingPoints ++; GeneratedPoints ++; // TempPoint = ALLOCATE(struct Point); // TempPoint->X = xtemp; TempPoint->Y = ytemp; TempPoint->No = GeneratedPoints; TempPoint->R = slumptal(); TempPoint->next = headLiving->next; headLiving->next = TempPoint; NoP ++; f1 = (TempPoint->X-P2P->Xmin)/P2P->XCellDim; TempTransition->XCell = int(f1); f1 = (TempPoint->Y-P2P->Ymin)/P2P->YCellDim; TempTransition->YCell = int(f1); //Rprintf("X %f XCell %d\n",TempPoint->X,TempTransition->XCell); // CLAMP(TempTransition->XCell, 0, P2P->MaxXCell, "TempTransition->XCell"); CLAMP(TempTransition->YCell, 0, P2P->MaxYCell, "TempTransition->YCell"); TempTransition->next = headTransition->next; headTransition->next = TempTransition; } } /* Death */ else{ Success = 1; // TempTransition = ALLOCATE(struct Point3); // TempTransition->Case = 1; f1 = LivingPoints; f2 = f1*slumptal()+1.0; n = int(f2); if(n < 1) n = 1; if(n>LivingPoints){ // Rprintf("diagnostic message: random integer n=%ld > %ld = number of living points\n", n,LivingPoints); n=LivingPoints; } TempPoint2 = TempPoint = headLiving; for(i=1; i<=n; i++){ TempPoint2 = TempPoint; TempPoint = TempPoint->next; } TempPoint2->next = TempPoint->next; TempPoint->next = headDeleted->next; headDeleted->next = TempPoint; LivingPoints --; NoP --; TempTransition->next = headTransition->next; headTransition->next = TempTransition; } } return(n); } void Sampler::Sim(Point2Pattern *p2p, long int *ST, long int *ET) { P2P = p2p; long int StartTime, EndTime, TimeStep, D0Time, D0Living; long int XCell, YCell, DDD, i; float f1; /* Initialising linked listed for backward simulation */ struct Point *headDeleted, *headLiving, *dummyDeleted, *dummyLiving; struct Point *TempPoint; // headLiving = ALLOCATE(struct Point); dummyLiving = ALLOCATE(struct Point); // headLiving->next = dummyLiving; dummyLiving->next = dummyLiving; // headDeleted = ALLOCATE(struct Point); dummyDeleted = ALLOCATE(struct Point); // headDeleted->next = dummyDeleted; dummyDeleted->next = dummyDeleted; struct Point2 *TempCell2; struct Point3 *headTransition, *dummyTransition; // headTransition = ALLOCATE(struct Point3); dummyTransition = ALLOCATE(struct Point3); // headTransition->next = dummyTransition; dummyTransition->next = dummyTransition; PP->GeneratePoisson(headLiving, &GeneratedPoints, &LivingPoints, &NoP); StartTime=1; EndTime=1; TimeStep = 0; D0Time = 0; D0Living = GeneratedPoints; long int tmp, D0; do{ tmp=BirthDeath(TimeStep, headLiving, headDeleted, headTransition); if(tmp>0){ if(tmp>(LivingPoints+1-D0Living)){ D0Living --; } } D0Time++; }while(D0Living>0); tmp=BirthDeath(TimeStep, headLiving, headDeleted, headTransition); StartTime=1; EndTime=D0Time+1; D0 = 0; do{ if(D0==1){ for(TimeStep=StartTime;TimeStep<=EndTime;TimeStep ++){ tmp=BirthDeath(TimeStep, headLiving, headDeleted, headTransition); } } D0 = 1; P2P->Empty(); /* headUpper->next = dummyUpper; dummyUpper->next = dummyUpper; for(XCell=0;XCell<=P2P->MaxXCell;XCell++){ for(YCell=0;YCell<=P2P->MaxYCell;YCell++){ headUpperCell[XCell][YCell]->next=dummyUpper; } } */ P2P->UpperLiving[0] = LivingPoints; P2P->UpperLiving[1] = 0; P2P->NoP = 0; i=0; TempPoint = headLiving->next; CHECK(TempPoint, "internal error: TempPoint is null in Sim()"); while(TempPoint!=TempPoint->next){ i++; // TempCell2 = ALLOCATE(struct Point2); // TempCell2->No = TempPoint->No; TempCell2->X = TempPoint->X; TempCell2->Y = TempPoint->Y; TempCell2->InLower[0] = 1; TempCell2->InLower[1] = 0; f1 = (TempPoint->X-P2P->Xmin)/P2P->XCellDim; XCell = int(floor(f1)); CLAMP(XCell, 0, P2P->MaxXCell, "XCell"); f1 = (TempPoint->Y-P2P->Ymin)/P2P->YCellDim; YCell = int(floor(f1)); CLAMP(YCell, 0, P2P->MaxYCell, "YCell"); TempCell2->next = P2P->headCell[XCell][YCell]->next; P2P->headCell[XCell][YCell]->next = TempCell2; TempPoint = TempPoint->next; CHECK(TempPoint, "internal error: TempPoint is null in Sim() loop"); } //P2P->DumpToFile("temp0.dat"); struct Point3 *TempTransition; struct Point *Proposal; TempTransition = headTransition->next; CHECK(TempTransition, "internal error: TempTransition is null in Sim()"); Proposal = headDeleted->next; DDD = GeneratedPoints; for(TimeStep=EndTime;TimeStep>=1;TimeStep--){ R_CheckUserInterrupt(); Forward(TimeStep,TempTransition->Case, TempTransition->XCell,TempTransition->YCell, Proposal,&DDD); if(TempTransition->Case == 1) Proposal = Proposal ->next; TempTransition = TempTransition->next; CHECK(TempTransition, "internal error: TempTransition is null in Sim() loop"); } /* Doubling strategy used!*/ StartTime = EndTime+1; EndTime=EndTime*2; //P2P->DumpToFile("temp.dat"); }while(P2P->UpperLiving[0]!=P2P->UpperLiving[1]); P2P->Clean(); i=0; struct Point *TempPoint2; TempPoint = headLiving; TempPoint2 = headLiving->next; CHECK(TempPoint2, "internal error: TempPoint2 is null in Sim() position B"); while(TempPoint!=TempPoint->next){ i++; FREE(TempPoint); TempPoint = TempPoint2; TempPoint2 = TempPoint2->next; CHECK(TempPoint2, "internal error: TempPoint2 is null in Sim() loop C"); } FREE(TempPoint); i = 0; TempPoint = headDeleted; TempPoint2 = headDeleted->next; CHECK(TempPoint2, "internal error: TempPoint2 is null in Sim() position D"); while(TempPoint!=TempPoint->next){ i++; FREE(TempPoint); TempPoint = TempPoint2; TempPoint2 = TempPoint2->next; CHECK(TempPoint2, "internal error: TempPoint2 is null in Sim() loop D"); } FREE(TempPoint); //Rprintf("%d ",i); struct Point3 *TempTransition,*TempTransition2; i = 0; TempTransition = headTransition; TempTransition2 = headTransition->next; CHECK(TempTransition2, "internal error: TempTransition2 is null in Sim() position E"); while(TempTransition!=TempTransition->next){ i++; FREE(TempTransition); TempTransition = TempTransition2; TempTransition2 = TempTransition2->next; CHECK(TempTransition2, "internal error: TempTransition2 is null in Sim() loop F"); } FREE(TempTransition); //Rprintf("%d ST: %d ET: %d\n",i,StartTime,EndTime); //scanf("%f",&f1); *ST = StartTime; *ET = EndTime; } #include "PerfectStrauss.h" #include "PerfectStraussHard.h" #include "PerfectHardcore.h" #include "PerfectDiggleGratton.h" #include "PerfectDGS.h" #include "PerfectPenttinen.h" spatstat/src/KrectFunDec.h0000644000176200001440000000563013166361223015221 0ustar liggesusers/* KrectFunDec.h $Revision: 1.3 $ $Date: 2014/02/09 02:51:15 $ Function declarations for Krect Macros: FNAME function name WEIGHTED #defined for weighted version (Kinhom etc) +++ Copyright (C) Adrian Baddeley 2014 ++++ */ void FNAME(width, height, nxy, x, y, #ifdef WEIGHTED w, #endif nr, rmax, trimedge, doIso, doTrans, doBord, doUnco, iso, trans, bnumer, bdenom, unco) /* input data */ double *width, *height; /* window is (0, width) x (0, height) */ int *nxy; /* number of (x,y) points */ double *x, *y; /* (x,y) coordinates */ #ifdef WEIGHTED double *w; /* weights (e.g. reciprocal intensities) */ #endif /* algorithm parameters */ int *nr; /* number of r values */ double *rmax; /* maximum r value */ double *trimedge; /* maximum edge correction weight */ int *doIso; /* logical: whether to do isotropic correction */ int *doTrans; /* logical: whether to do translation correction */ int *doBord; /* logical: whether to do border correction */ int *doUnco; /* logical: whether to do uncorrected estimator */ /* outputs */ /* These are vectors of length nr if required, otherwise ignored */ double *iso; /* isotropic-corrected estimator */ double *trans; /* translation-corrected estimator */ COUNTTYPE *bnumer; /* numerator of border-corrected estimator */ COUNTTYPE *bdenom; /* denominator of border-corrected estimator */ COUNTTYPE *unco; /* uncorrected estimator */ { int i, j, l, ldist, lbord, M, maxchunk, N, Nr, N1, Nr1; double rstep, Rmax, R2max, wide, high, trim; double xi, yi, bdisti, bx, by, bratio; double dx, dy, dx2, dij, dij2, dratio, edgetrans, edgeiso; double dL, dR, dD, dU, bLU, bLD, bRU, bRD, bUL, bUR, bDL, bDR; double aL, aR, aD, aU, cL, cR, cU, cD, extang; int ncor, corner; COUNTTYPE *numerLowAccum, *numerHighAccum, *denomAccum; COUNTTYPE naccum, daccum; double accum; #ifdef WEIGHTED double wi, wj, wij; #endif #ifdef WEIGHTED #define ZERO 0.0 #define WIJ wij #else #define ZERO 0 #define WIJ 1 #endif N = *nxy; if(N == 0) return; Nr = *nr; Rmax = *rmax; trim = *trimedge; N1 = N - 1; Nr1 = Nr - 1; R2max = Rmax * Rmax; rstep = Rmax/Nr1; wide = *width; high = *height; /* Allocate and initialise scratch space - for border correction, but do it in all cases to keep the compiler happy */ M = (*doBord == 1) ? Nr : 1; numerLowAccum = (COUNTTYPE *) R_alloc(M, sizeof(COUNTTYPE)); numerHighAccum = (COUNTTYPE *) R_alloc(M, sizeof(COUNTTYPE)); denomAccum = (COUNTTYPE *) R_alloc(M, sizeof(COUNTTYPE)); for(l = 0; l < M; l++) numerLowAccum[l] = numerHighAccum[l] = denomAccum[l] = ZERO; #include "KrectV1.h" } #undef ZERO #undef WIJ spatstat/src/rthin.c0000644000176200001440000000345413166361223014205 0ustar liggesusers#include #include #include /* rthin.c Select from the integers 1:n with probability p by simulating geometric(p) jumps between selected integers $Revision: 1.1 $ $Date: 2015/07/25 03:19:22 $ */ SEXP thinjumpequal(SEXP n, SEXP p, SEXP guess) { int N; double P; int *w; /* temporary storage for selected integers */ int nw, nwmax; int i, j, k; double log1u, log1p; /* R object return value */ SEXP Out; /* external storage pointer */ int *OutP; /* protect R objects from garbage collector */ PROTECT(p = AS_NUMERIC(p)); PROTECT(n = AS_INTEGER(n)); PROTECT(guess = AS_INTEGER(guess)); /* Translate arguments from R to C */ N = *(INTEGER_POINTER(n)); P = *(NUMERIC_POINTER(p)); nwmax = *(INTEGER_POINTER(guess)); /* Allocate space for result */ w = (int *) R_alloc(nwmax, sizeof(int)); /* set up */ GetRNGstate(); log1p = -log(1.0 - P); /* main loop */ i = 0; /* last selected element of 1...N */ nw = 0; /* number of selected elements */ while(i <= N) { log1u = exp_rand(); /* an exponential rv is equivalent to -log(1-U) */ j = (int) ceil(log1u/log1p); /* j is geometric(p) */ i += j; if(nw >= nwmax) { /* overflow; allocate more space */ w = (int *) S_realloc((char *) w, 2 * nwmax, nwmax, sizeof(int)); nwmax = 2 * nwmax; } /* add 'i' to output vector */ w[nw] = i; ++nw; } /* The last saved 'i' could have exceeded 'N' */ /* For efficiency we don't check this in the loop */ if(nw > 0 && w[nw-1] > N) --nw; PutRNGstate(); /* create result vector */ PROTECT(Out = NEW_INTEGER(nw)); /* copy results into output */ OutP = INTEGER_POINTER(Out); for(k = 0; k < nw; k++) OutP[k] = w[k]; UNPROTECT(4); return(Out); } spatstat/src/Krect.c0000644000176200001440000000373713166361223014135 0ustar liggesusers/* Krect.c $Revision: 1.3 $ $Date: 2014/02/09 03:02:42 $ +++ Copyright (C) Adrian Baddeley, Julian Gilbey and Rolf Turner 2014 ++++ Fast code for K function in rectangular case. **Assumes point pattern is sorted in increasing order of x coordinate** **Assumes window is (0,wide) x (0, high) ** **Assumes output vectors were initialised to zero** Krect.c defines three interface functions, for weighted, unweighted double, and unweighted integer cases KrectFunDec.h (#included thrice) Function declaration, arguments, storage allocation KrectV1.h split according to whether Isotropic Correction is wanted Macro ISOTROPIC is #defined KrectV2.h split according to whether Translation Correction is wanted Macro TRANSLATION is #defined KrectV3.h split according to whether Border Correction is wanted Macro BORDER is #defined KrectV4.h split according to whether Uncorrected estimate is wanted Macro UNCORRECTED is #defined KrectBody.h Function body, including loops over i and j KrectIncrem.h (#included twice) Code performed when a close pair of points has been found: calculate edge corrections, increment results. */ #include #include #include /* This constant is defined in Rmath.h */ #define TWOPI M_2PI #define ABS(X) (((X) >= 0) ? (X) : (-X)) #define SMALL(X) ((ABS(X) < 1.0e-12) ? 1 : 0) #define MIN(X,Y) (((X) < (Y)) ? (X) : (Y)) #undef FNAME #undef WEIGHTED #undef COUNTTYPE #define FNAME KrectInt #define COUNTTYPE int #include "KrectFunDec.h" #undef FNAME #undef WEIGHTED #undef COUNTTYPE #define FNAME KrectDbl #define COUNTTYPE double #include "KrectFunDec.h" #undef FNAME #undef WEIGHTED #undef COUNTTYPE #define FNAME KrectWtd #define COUNTTYPE double #define WEIGHTED #include "KrectFunDec.h" spatstat/src/KrectIncrem.h0000644000176200001440000000462613166361223015276 0ustar liggesusers/* KrectIncrem.h Code to increment numerators of K-function $Revision: 1.5 $ $Date: 2014/02/09 03:00:51 $ +++ Copyright (C) Adrian Baddeley, Julian Gilbey and Rolf Turner 2014 ++++ */ #ifdef WEIGHTED wj = w[j]; wij = wi * wj; #endif /* determine index of entry to be incremented */ dij = (double) sqrt(dij2); dratio = dij/rstep; /* smallest integer greater than or equal to dratio */ ldist = (int) ceil(dratio); #ifdef UNCORRECTED /* ............ uncorrected estimate ................. */ #ifdef WEIGHTED unco[ldist] += wij; #else (unco[ldist])++; #endif #endif #ifdef BORDER /* ............ border correction ................. */ /* increment numerator for all r such that dij <= r < bi */ /* increment entries ldist to lbord inclusive */ #ifdef WEIGHTED if(lbord >= ldist) { numerLowAccum[ldist] += wij; numerHighAccum[lbord] += wij; } #else if(lbord >= ldist) { (numerLowAccum[ldist])++; (numerHighAccum[lbord])++; } #endif #endif #ifdef TRANSLATION /* ............ translation correction ................. */ edgetrans = 1.0/((1.0 - ABS(dx)/wide) * (1.0 - ABS(dy)/high)); edgetrans = MIN(edgetrans, trim); #ifdef WEIGHTED trans[ldist] += wij * edgetrans; #else trans[ldist] += edgetrans; #endif #endif #ifdef ISOTROPIC /* ............ isotropic correction ................. */ /* half the angle subtended by the intersection between the circle of radius d[i,j] centred on point i and each edge of the rectangle (prolonged to an infinite line) */ aL = (dL < dij) ? acos(dL/dij) : 0.0; aR = (dR < dij) ? acos(dR/dij) : 0.0; aD = (dD < dij) ? acos(dD/dij) : 0.0; aU = (dU < dij) ? acos(dU/dij) : 0.0; /* apply maxima */ cL = MIN(aL, bLU) + MIN(aL, bLD); cR = MIN(aR, bRU) + MIN(aR, bRD); cU = MIN(aU, bUL) + MIN(aU, bUR); cD = MIN(aD, bDL) + MIN(aD, bDR); /* total exterior angle over 2 pi */ extang = (cL + cR + cU + cD)/TWOPI; /* add pi/2 for corners */ if(corner) extang += 1/4; /* edge correction factor */ edgeiso = 1 / (1 - extang); edgeiso = MIN(edgeiso, trim); #ifdef WEIGHTED iso[ldist] += wij * edgeiso; #else iso[ldist] += edgeiso; #endif #endif spatstat/src/areapair.c0000644000176200001440000000360313166361223014641 0ustar liggesusers/* areapair.c $Revision: 1.6 $ $Date: 2013/09/18 04:11:42 $ Specialised code for the second order conditional intensity of the area-interaction process */ #include #include #include "yesno.h" /* computes area of b(A, r) \int b(B, r) \setminus \bigcup_i b(X[i], r) */ void delta2area(xa, ya, xb, yb, nother, xother, yother, radius, epsilon, pixcount) double *xa, *ya, *xb, *yb; int *nother; double *xother, *yother; double *radius, *epsilon; int *pixcount; { int Ni, Nj, Nk, i, j, k, count, covered; double xA, yA, xB, yB, r, eps, r2; double xmin, xmax, ymin, ymax, xi, yj; double dxA, dyA; double dxB, dyB; double dx, dy; Nk = *nother; xA = *xa; yA = *ya; xB = *xb; yB = *yb; r = *radius; eps = *epsilon; r2 = r * r; /* find intersection of squares centred on A and B */ if(xA < xB) { xmin = xB - r; xmax = xA + r; } else { xmin = xA - r; xmax = xB + r; } if(xmin > xmax) return; if(yA < yB) { ymin = yB - r; ymax = yA + r; } else { ymin = yA - r; ymax = yB + r; } if(ymin > ymax) return; /* set up grid */ Ni = (int) ceil((xmax - xmin)/eps) + 1; Nj = (int) ceil((ymax - ymin)/eps) + 1; count = 0; for(i = 0, xi = xmin; i < Ni; i++, xi += eps) { dxA = xi - xA; for(j = 0, yj = ymin; j < Nj; j++, yj += eps) { dyA = yj - yA; if(dxA * dxA + dyA * dyA <= r2) { /* grid point belongs to b(A, r) */ dxB = xi - xB; dyB = yj - yB; if(dxB * dxB + dyB * dyB <= r2) { /* grid point belongs to b(A,r) \cap b(B,r) */ covered = NO; /* test whether it is covered by another b(X[k], r) */ for(k = 0; k < Nk; k++) { dx = xi - xother[k]; dy = yj - yother[k]; if(dx * dx + dy * dy <= r2) { covered = YES; break; } } if(!covered) { ++count; } } } } } *pixcount = count; } spatstat/src/yesno.h0000644000176200001440000000011613166361223014213 0ustar liggesusers/* yesno.h */ #ifndef YES #define YES (0 == 0) #define NO (!YES) #endif spatstat/src/dist2dpath.c0000755000176200001440000000052713166361223015130 0ustar liggesusers#include #include /* given matrix of edge lengths compute matrix of shortest-path distances Uses dist2dpath.h */ #define FNAME Ddist2dpath #define DTYPE double #define FLOATY #include "dist2dpath.h" #undef FNAME #undef DTYPE #undef FLOATY #define FNAME Idist2dpath #define DTYPE int #include "dist2dpath.h" spatstat/src/diggra.c0000755000176200001440000000637013166361223014321 0ustar liggesusers#include #include #include #include "methas.h" #include "dist2.h" /* Conditional intensity computation for Diggle-Gratton process */ /* Conditional intensity function for a pairwise interaction point process with interaction function as given by e(t) = 0 for t < delta = (t-delta)/(rho-delta)^kappa for delta <= t < rho = 1 for t >= rho (See page 767 of Diggle, Gates, and Stibbard, Biometrika vol. 74, 1987, pages 763 -- 770.) */ /* Storage of parameters and precomputed/auxiliary data */ typedef struct Diggra { double kappa; double delta; double rho; double delta2; /* delta^2 */ double rho2; /* rho^2 */ double fac; /* 1/(rho-delta) */ double *period; int per; } Diggra; /* initialiser function */ Cdata *diggrainit(state, model, algo) State state; Model model; Algor algo; { Diggra *diggra; diggra = (Diggra *) R_alloc(1, sizeof(Diggra)); /* Interpret model parameters*/ diggra->kappa = model.ipar[0]; diggra->delta = model.ipar[1]; diggra->rho = model.ipar[2]; diggra->period = model.period; /* constants */ diggra->delta2 = pow(diggra->delta, 2); diggra->rho2 = pow(diggra->rho, 2); diggra->fac = 1/(diggra->rho - diggra->delta); /* periodic boundary conditions? */ diggra->per = (model.period[0] > 0.0); return((Cdata *) diggra); } /* conditional intensity evaluator */ double diggracif(prop, state, cdata) Propo prop; State state; Cdata *cdata; { int npts, ix, ixp1, j; double *x, *y; double u, v; double d2, pairprod, cifval; double rho2, delta, delta2, fac; double *period; DECLARE_CLOSE_D2_VARS; Diggra *diggra; diggra = (Diggra *) cdata; period = diggra->period; rho2 = diggra->rho2; delta = diggra->delta; delta2 = diggra->delta2; fac = diggra->fac; u = prop.u; v = prop.v; ix = prop.ix; x = state.x; y = state.y; npts = state.npts; cifval = pairprod = 1.0; if(npts == 0) return(cifval); ixp1 = ix+1; /* If ix = NONE = -1, then ixp1 = 0 is correct */ if(diggra->per) { /* periodic distance */ if(ix > 0) { for(j=0; j < ix; j++) { if(CLOSE_PERIODIC_D2(u,v,x[j],y[j],period,rho2,d2)) { if(d2 < delta2) { cifval = 0.0; return(cifval); } else { pairprod *= fac * (sqrt(d2)-delta); } } } } if(ixp1 < npts) { for(j=ixp1; j 0) { for(j=0; j < ix; j++) { if(CLOSE_D2(u, v, x[j], y[j], rho2, d2)) { if(d2 <= delta2) { cifval = 0.0; return(cifval); } else { pairprod *= fac * (sqrt(d2)-delta); } } } } if(ixp1 < npts) { for(j=ixp1; jkappa); return cifval; } Cifns DiggraCifns = { &diggrainit, &diggracif, (updafunptr) NULL, NO}; spatstat/src/linSnncross.h0000644000176200001440000000644213166361223015401 0ustar liggesusers/* linSnncross.h Function body definitions with macros Sparse representation of network $Revision: 1.4 $ $Date: 2015/12/28 02:44:25 $ Macros used: FNAME name of function WHICH whether 'nnwhich' is required HUH debugging ! Data points must be ordered by segment index ! */ void FNAME(np, sp, tp, /* data points 'from' (ordered by sp) */ nq, sq, tq, /* data points 'to' (ordered by sq) */ nv, /* number of network vertices */ ns, from, to, /* segments */ seglen, /* segment lengths */ huge, /* value taken as infinity */ tol, /* tolerance for updating distances */ /* OUTPUT */ #ifdef WHICH nndist, /* nearest neighbour distance for each point */ nnwhich /* identifies nearest neighbour */ #else nndist /* nearest neighbour distance for each point */ #endif ) int *np, *nq, *nv, *ns; int *from, *to, *sp, *sq; /* integer vectors (mappings) */ double *tp, *tq; /* fractional location coordinates */ double *huge, *tol; double *seglen; double *nndist; /* nearest neighbour distance for each point */ #ifdef WHICH int *nnwhich; /* identifies nearest neighbour */ #endif { int Np, Nq, Nv, i, j, ivleft, ivright, jfirst, jlast, k; double d, hugevalue, slen, tpi; double *dminvert; /* min dist from each vertex */ #ifdef WHICH int *whichvert; /* which min from each vertex */ #endif Np = *np; Nq = *nq; Nv = *nv; hugevalue = *huge; /* First compute min distance to target set from each vertex */ dminvert = (double *) R_alloc(Nv, sizeof(double)); #ifdef WHICH whichvert = (int *) R_alloc(Nv, sizeof(int)); Clinvwhichdist(nq, sq, tq, nv, ns, from, to, seglen, huge, tol, dminvert, whichvert); #else Clinvdist(nq, sq, tq, nv, ns, from, to, seglen, huge, tol, dminvert); #endif #ifdef HUH Rprintf("Initialise answer\n"); #endif /* initialise nn distances from source points */ for(i = 0; i < Np; i++) { nndist[i] = hugevalue; #ifdef WHICH nnwhich[i] = -1; #endif } /* run through all source points */ #ifdef HUH Rprintf("Run through source points\n"); #endif jfirst = 0; for(i = 0; i < Np; i++) { tpi = tp[i]; k = sp[i]; /* segment containing this point */ slen = seglen[k]; ivleft = from[k]; ivright = to[k]; #ifdef HUH Rprintf("Source point %d lies on segment %d = [%d,%d]\n", i, k, ivleft, ivright); #endif d = slen * tpi + dminvert[ivleft]; if(nndist[i] > d) { #ifdef HUH Rprintf("\tMapping to left endpoint %d, distance %lf\n", ivleft, d); #endif nndist[i] = d; #ifdef WHICH nnwhich[i] = whichvert[ivleft]; #endif } d = slen * (1.0 - tpi) + dminvert[ivright]; if(nndist[i] > d) { #ifdef HUH Rprintf("\tMapping to right endpoint %d, distance %lf\n", ivright, d); #endif nndist[i] = d; #ifdef WHICH nnwhich[i] = whichvert[ivright]; #endif } /* find any target points in this segment */ while(jfirst < Nq && sq[jfirst] < k) jfirst++; jlast = jfirst; while(jlast < Nq && sq[jlast] == k) jlast++; --jlast; /* if there are no such points, then jlast < jfirst */ if(jfirst <= jlast) { for(j = jfirst; j <= jlast; j++) { d = slen * fabs(tq[j] - tpi); if(nndist[i] > d) { nndist[i] = d; #ifdef WHICH nnwhich[i] = j; #endif } } } } } spatstat/src/mhv3.h0000644000176200001440000000042713166361223013740 0ustar liggesusers/* mhv3.h tracking or not */ #undef MH_TRACKING if(tracking) { /* saving transition history */ #define MH_TRACKING YES #include "mhv4.h" #undef MH_TRACKING } else { /* not saving transition history */ #define MH_TRACKING NO #include "mhv4.h" #undef MH_TRACKING } spatstat/src/linvdist.h0000644000176200001440000000655013166361223014722 0ustar liggesusers/* linvdist.h Distance function at vertices (shortest distance from each vertex to a data point) Function body definitions with macros Sparse representation of network $Revision: 1.3 $ $Date: 2015/12/05 07:26:56 $ Macros used: FNAME name of function WHICH whether 'nnwhich' is required HUH debugging flag ! Data points must be ordered by segment index ! */ void FNAME(np, sp, tp, /* target data points (ordered by sp) */ nv, /* number of network vertices */ ns, from, to, /* segments */ seglen, /* segment lengths */ huge, /* value taken as infinity */ tol, /* tolerance for updating distances */ /* OUTPUT */ #ifdef WHICH dist, /* distance from each vertex to nearest data point */ which /* identifies nearest data point */ #else dist /* distance from each vertex to nearest data point */ #endif ) int *np, *nv, *ns; /* number of points, vertices, segments */ int *sp, *from, *to; /* integer vectors (mappings) */ double *tp; /* fractional location coordinates */ double *huge, *tol; double *seglen; double *dist; #ifdef WHICH int *which; #endif { int Np, Nv, Ns, i, j, k, segPj, ivleft, ivright; double hugevalue, eps, dleft, dright, slen, d, tpj; char converged; Np = *np; Nv = *nv; Ns = *ns; hugevalue = *huge; eps = *tol; #ifdef HUH Rprintf("Initialise dist\n"); #endif /* initialise to huge value */ for(i = 0; i < Nv; i++) { dist[i] = hugevalue; #ifdef WHICH which[i] = -1; #endif } #ifdef HUH Rprintf("Run through target points\n"); #endif /* assign correct value to endpoints of segments containing target points */ for(j = 0; j < Np; j++) { segPj = sp[j]; tpj = tp[j]; slen = seglen[segPj]; ivleft = from[segPj]; d = slen * tpj; if(d < dist[ivleft]) { dist[ivleft] = d; #ifdef WHICH which[ivleft] = j; #endif } ivright = to[segPj]; d = slen * (1.0 - tpj); if(d < dist[ivright]) { dist[ivright] = d; #ifdef WHICH which[ivright] = j; #endif } } /* recursively update */ #ifdef HUH Rprintf("Recursive update\n"); #endif converged = NO; while(!converged) { converged = YES; #ifdef HUH Rprintf("........... starting new pass ...................... \n"); #endif for(k = 0; k < Ns; k++) { ivleft = from[k]; ivright = to[k]; slen = seglen[k]; dleft = (double) dist[ivleft]; dright = (double) dist[ivright]; d = (double) (dleft + slen); if(d < dright - eps) { #ifdef HUH Rprintf("Updating ivright=%d using ivleft=%d, from %lf to %lf+%lf=%lf\n", ivright, ivleft, dright, dleft, slen, d); #endif converged = NO; dist[ivright] = d; #ifdef WHICH which[ivright] = which[ivleft]; #endif } else { d = (double) (dright + slen); if(d < dleft - eps) { #ifdef HUH Rprintf("Updating ivleft=%d using ivright=%d, from %lf to %lf+%lf=%lf\n", ivleft, ivright, dleft, dright, slen, d); #endif converged = NO; dist[ivleft] = d; #ifdef WHICH which[ivleft] = which[ivright]; #endif } } } } #ifdef HUH Rprintf("Done\nVertex values:\n"); #ifdef WHICH Rprintf("\ti\twhich\tdist\n"); for(i = 0; i < Nv; i++) Rprintf("\t%d\t%d\t%lf\n", i, which[i], dist[i]); #else Rprintf("\ti\tdist\n"); for(i = 0; i < Nv; i++) Rprintf("\t%d\t%lf\n", i, dist[i]); #endif #endif } spatstat/src/nngrid.c0000644000176200001440000000356213166361223014342 0ustar liggesusers/* nngrid.c Nearest Neighbour Distances from a pixel grid to a point pattern Copyright (C) Adrian Baddeley, Jens Oehlschlaegel and Rolf Turner 2000-2013 Licence: GNU Public Licence >= 2 $Revision: 1.4 $ $Date: 2013/11/03 03:41:23 $ Function body definition is #included from nngrid.h THE FOLLOWING FUNCTIONS ASSUME THAT x IS SORTED IN ASCENDING ORDER */ #undef SPATSTAT_DEBUG #include #include #include #include "yesno.h" double sqrt(); /* THE FOLLOWING CODE ASSUMES THAT x IS SORTED IN ASCENDING ORDER */ /* general interface */ void nnGinterface(nx, x0, xstep, ny, y0, ystep, /* pixel grid dimensions */ np, xp, yp, /* data points */ wantdist, wantwhich, /* options */ nnd, nnwhich, huge) /* inputs */ int *nx, *ny, *np; double *x0, *xstep, *y0, *ystep, *huge; double *xp, *yp; /* options */ int *wantdist, *wantwhich; /* outputs */ double *nnd; int *nnwhich; /* some inputs + outputs are not used in all functions */ { void nnGdw(), nnGd(), nnGw(); int di, wh; di = (*wantdist != 0); wh = (*wantwhich != 0); if(di && wh) { nnGdw(nx, x0, xstep, ny, y0, ystep, np, xp, yp, nnd, nnwhich, huge); } else if(di) { nnGd(nx, x0, xstep, ny, y0, ystep, np, xp, yp, nnd, nnwhich, huge); } else if(wh) { nnGw(nx, x0, xstep, ny, y0, ystep, np, xp, yp, nnd, nnwhich, huge); } } #undef FNAME #undef DIST #undef WHICH /* nnGdw returns distances and indices */ #define FNAME nnGdw #define DIST #define WHICH #include "nngrid.h" #undef FNAME #undef DIST #undef WHICH /* nnGd returns distances only */ #define FNAME nnGd #define DIST #include "nngrid.h" #undef FNAME #undef DIST #undef WHICH /* nnGw returns indices only */ #define FNAME nnGw #define WHICH #include "nngrid.h" #undef FNAME #undef DIST #undef WHICH spatstat/src/discarea.c0000755000176200001440000001502413166361223014633 0ustar liggesusers/* disc.c Area of intersection between disc and polygonal window $Revision: 1.6 $ $Date: 2011/12/03 00:15:52 $ */ #undef DEBUG #include #include #define MIN(A,B) (((A) < (B)) ? (A) : (B)) #define MAX(A,B) (((A) > (B)) ? (A) : (B)) #ifndef PI #define PI 3.1415926535898 #endif void discareapoly(nc, xc, yc, nr, rmat, nseg, x0, y0, x1, y1, eps, out) /* inputs */ int *nc, *nr, *nseg; double *xc, *yc, *rmat; double *x0, *y0, *x1, *y1; double *eps; /* output */ double *out; { int n, m, i, j, k, nradperpt; double radius, radius2, total, contrib; double xx0, xx1, yy0, yy1, xleft, xright, yleft, yright, xcentre, ycentre; double epsilon; double DiscContrib(); n = *nc; nradperpt = *nr; m = *nseg; epsilon = *eps; for(i = 0; i < n; i++) { xcentre = xc[i]; ycentre = yc[i]; #ifdef DEBUG Rprintf("\ni = %d:\n centre = (%lf, %lf)\n", i, xcentre, ycentre); #endif for(j = 0; j < nradperpt; j++) { radius = rmat[ j * n + i]; radius2 = radius * radius; #ifdef DEBUG Rprintf("radius = %lf\n", radius); #endif total = 0.0; for(k=0; k < m; k++) { #ifdef DEBUG Rprintf("k = %d\n", k); #endif xx0 = x0[k]; yy0 = y0[k]; xx1 = x1[k]; yy1 = y1[k]; #ifdef DEBUG Rprintf("(%lf,%lf) to (%lf,%lf)\n", xx0, yy0, xx1, yy1); #endif /* refer to unit disc at origin */ /* arrange so that xleft < xright */ if(radius <= epsilon) contrib = 0.0; else if(xx0 < xx1) { xleft = (xx0 - xcentre)/radius; xright = (xx1 - xcentre)/radius; yleft = (yy0 - ycentre)/radius; yright = (yy1 - ycentre)/radius; contrib = - radius2 * DiscContrib(xleft,yleft,xright,yright,epsilon); } else { xleft = (xx1 - xcentre)/radius; xright = (xx0 - xcentre)/radius; yleft = (yy1 - ycentre)/radius; yright = (yy0 - ycentre)/radius; contrib = radius2 * DiscContrib(xleft,yleft,xright,yright,epsilon); } #ifdef DEBUG Rprintf("contrib = %lf\n contrib/(pi * r^2)=%lf\n", contrib, contrib/(PI * radius2)); #endif total += contrib; } out[ j * n + i] = total; #ifdef DEBUG Rprintf("total = %lf\ntotal/(pi * r^2) = %lf\n", total, total/(PI * radius2)); #endif } } } /* area of intersection of unit disc with halfplane x <= v */ #ifdef DEBUG #define TRIGBIT(V) trigbit(V) double trigbit(v) double v; { double zero, result; zero = 0.0; if(v < -1.0) return(zero); if(v > 1.0) return(PI); result = PI/2 + asin(v) + v * sqrt(1 - v * v); Rprintf("trigbit: v = %lf, asin(v)=%lf, result=%lf\n", v, asin(v), result); return(result); } #else #define TRIGBIT(V) (((V) <= -1.0) ? 0.0 : (((V) >= 1.0) ? PI : \ (PI/2 + asin(V) + (V) * sqrt(1 - (V) * (V))))) #endif /* Find the area of intersection between a disc centre = (0,0), radius = 1 and the trapezium with upper segment (xleft, yleft) to (xright, yright) ASSUMES xleft < xright */ double DiscContrib(xleft, yleft, xright, yright, eps) double xleft, yleft, xright, yright, eps; /* NOTE: unit disc centred at origin */ { double xlo, xhi, zero, slope, intercept, A, B, C, det; double xcut1, xcut2, ycut1, ycut2, xunder1, xunder2, dx, dx2, result; #ifdef DEBUG double increm; Rprintf( "DiscContrib: xleft=%lf, yleft=%lf, xright=%lf, yright=%lf\n", xleft, yleft, xright, yright); #endif zero = 0.0; /* determine relevant range of x coordinates */ xlo = MAX(xleft, (-1.0)); xhi = MIN(xright, 1.0); if(xlo >= xhi - eps) { /* intersection is empty or negligible */ #ifdef DEBUG Rprintf("intersection is empty or negligible\n"); #endif return(zero); } /* find intersection points between the circle and the line containing upper segment */ slope = (yright - yleft)/(xright - xleft); intercept = yleft - slope * xleft; A = 1 + slope * slope; B = 2 * slope * intercept; C = intercept * intercept - 1.0; det = B * B - 4 * A * C; #ifdef DEBUG Rprintf("slope=%lf, intercept=%lf\nA = %lf, B=%lf, C=%lf, det=%lf\n", slope, intercept, A, B, C, det); #endif if(det <= 0.0) { /* no crossing between disc and infinite line */ if(intercept < 0.0) /* segment is below disc; intersection is empty */ return(zero); /* segment is above disc */ result = TRIGBIT(xhi) - TRIGBIT(xlo); return(result); } xcut1 = (- B - sqrt(det))/(2 * A); xcut2 = (- B + sqrt(det))/(2 * A); /* partition [xlo, xhi] into pieces delimited by {xcut1, xcut2} */ if(xcut1 >= xhi || xcut2 <= xlo) { /* segment is outside disc */ if(yleft < 0.0) { #ifdef DEBUG Rprintf("segment is beneath disc\n"); #endif result = zero; } else { #ifdef DEBUG Rprintf("segment is above disc\n"); #endif result = TRIGBIT(xhi) - TRIGBIT(xlo); } return(result); } /* possibly three parts */ #ifdef DEBUG Rprintf("up to three pieces\n"); #endif result = zero; ycut1 = intercept + slope * xcut1; ycut2 = intercept + slope * xcut2; if(xcut1 > xlo) { /* part to left of cut */ #ifdef DEBUG Rprintf("left of cut: [%lf, %lf]\n", xlo, xcut1); if(ycut1 < 0.0) Rprintf("below disc - no intersection\n"); else { increm = TRIGBIT(xcut1) - TRIGBIT(xlo); Rprintf("increment = %lf\n", increm); result += increm; } #else if(ycut1 >= 0.0) result += TRIGBIT(xcut1) - TRIGBIT(xlo); #endif } if(xcut2 < xhi) { /* part to right of cut */ #ifdef DEBUG Rprintf("right of cut: [%lf, %lf]\n", xcut2, xhi); if(ycut2 < 0.0) Rprintf("below disc - no intersection\n"); else { increm = TRIGBIT(xhi) - TRIGBIT(xcut2); Rprintf("increment = %lf\n", increm); result += increm; } #else if(ycut2 >= 0.0) result += TRIGBIT(xhi) - TRIGBIT(xcut2); #endif } /* part underneath cut */ xunder1 = MAX(xlo, xcut1); xunder2 = MIN(xhi, xcut2); dx = xunder2 - xunder1; dx2 = xunder2 * xunder2 - xunder1 * xunder1; #ifdef DEBUG Rprintf("underneath cut: [%lf, %lf]\n", xunder1, xunder2); increm = intercept * dx + slope * dx2/2 + (TRIGBIT(xunder2) - TRIGBIT(xunder1))/2; Rprintf("increment = %lf\n", increm); result += increm; #else result += intercept * dx + slope * dx2/2 + (TRIGBIT(xunder2) - TRIGBIT(xunder1))/2; #endif return(result); } #ifdef DEBUG /* interface to low level function, for debugging only */ void RDCtest(xleft, yleft, xright, yright, eps, value) double *xleft, *yleft, *xright, *yright, *eps, *value; { double DiscContrib(); *value = DiscContrib(*xleft, *yleft, *xright, *yright, *eps); } #endif spatstat/src/linvdist.c0000644000176200001440000000076313166361223014715 0ustar liggesusers#include #include "yesno.h" /* linvdist.c Distance function at vertices (shortest distance from each vertex to a data point) Sparse representation of network $Revision: 1.1 $ $Date: 2015/12/05 06:07:16 $ ! Data points must be ordered by segment index ! */ #undef HUH /* definition of Clinvdist */ #define FNAME Clinvdist #undef WHICH #include "linvdist.h" /* definition of Clinvwhichdist */ #undef FNAME #define FNAME Clinvwhichdist #define WHICH #include "linvdist.h" spatstat/src/chunkloop.h0000644000176200001440000000144413166361223015065 0ustar liggesusers/* chunkloop.h Divide a loop into chunks Convenient for divide-and-recombine, and reducing calls to R_CheckUserInterrupt, etc. $Revision: 1.2 $ $Date: 2013/05/27 02:09:10 $ */ #define OUTERCHUNKLOOP(IVAR, LOOPLENGTH, ICHUNK, CHUNKSIZE) \ IVAR = 0; \ ICHUNK = 0; \ while(IVAR < LOOPLENGTH) #define INNERCHUNKLOOP(IVAR, LOOPLENGTH, ICHUNK, CHUNKSIZE) \ ICHUNK += CHUNKSIZE; \ if(ICHUNK > LOOPLENGTH) ICHUNK = LOOPLENGTH; \ for(; IVAR < ICHUNK; IVAR++) #define XOUTERCHUNKLOOP(IVAR, ISTART, IEND, ICHUNK, CHUNKSIZE) \ IVAR = ISTART; \ ICHUNK = 0; \ while(IVAR <= IEND) #define XINNERCHUNKLOOP(IVAR, ISTART, IEND, ICHUNK, CHUNKSIZE) \ ICHUNK += CHUNKSIZE; \ if(ICHUNK > IEND) ICHUNK = IEND; \ for(; IVAR <= IEND; IVAR++) #define CHUNKLOOP_H spatstat/src/scan.c0000644000176200001440000000365413166361223014007 0ustar liggesusers/* scan.c Scan transform $Revision: 1.2 $ $Date: 2012/04/16 12:00:07 $ */ #include #include #include "raster.h" void shape_raster(); void Cscantrans(x, y, npt, R, out) double *x, *y; /* data points */ int npt; double R; /* radius */ Raster *out; /* scan image */ { int i,j,k,l,m; double d2, R2; int rmin, rmax, cmin, cmax, Rrow, Rcol, lmin, lmax, mmin, mmax; /* initialise raster */ Clear(*out,int,0); /* If the list of data points is empty, ... exit now */ if(npt == 0) return; R2 = R * R; cmin = out->cmin; cmax = out->cmax; rmin = out->rmin; rmax = out->rmax; /* disc size in rows/columns */ Rrow = (int) ceil(R/(out->ystep)); Rcol = (int) ceil(R/(out->xstep)); if(Rrow < 1) Rrow = 1; if(Rcol < 1) Rcol = 1; /* run through points */ for(i = 0; i < npt; i++) { j = RowIndex(*out,y[i]); k = ColIndex(*out,x[i]); lmin = j - Rrow; if(lmin < rmin) lmin = rmin; lmax = j + Rrow; if(lmax > rmax) lmax = rmax; mmin = k - Rcol; if(mmin < cmin) mmin = cmin; mmax = k + Rcol; if(mmax > cmax) mmax = cmax; for(l = lmin; l <= lmax; l++) { for(m = mmin; m <= mmax; m++) { d2 = DistanceToSquared(x[i],y[i],*out,l,m); if(d2 <= R2) Entry(*out,l,m,int) += 1; } } } } /* R interface */ void scantrans(x, y, n, xmin, ymin, xmax, ymax, nr, nc, R, counts) double *x, *y; /* input data points */ int *n; double *xmin, *ymin, *xmax, *ymax; /* guaranteed bounding box */ int *nr, *nc; /* desired raster dimensions */ double *R; /* radius */ /* output array */ int *counts; /* number of R-close points */ { Raster out; int nrow, ncol, npoints; double r; nrow = *nr; ncol = *nc; npoints = *n; r = *R; shape_raster( &out, (void *) counts, *xmin,*ymin,*xmax,*ymax, nrow, ncol, 0, 0); Cscantrans(x, y, npoints, r, &out); } spatstat/src/minnnd.h0000644000176200001440000000305713166361223014350 0ustar liggesusers/* minnnd.h Code template for minnnd to be #included in minnnd.c Macros: FNAME Function name IGNOREZERO #defined if zero distances should be ignored $Revision: 1.1 $ $Date: 2014/09/18 00:52:15 $ */ /* THE FOLLOWING CODE ASSUMES THAT y IS SORTED IN ASCENDING ORDER */ void FNAME(n, x, y, huge, result) /* inputs */ int *n; double *x, *y, *huge; /* outputs */ double *result; { int npoints, i, maxchunk, left, right; double d2, d2min, xi, yi, dx, dy, dy2, hu, hu2; hu = *huge; hu2 = hu * hu; npoints = *n; d2min = hu2; if(npoints == 0) return; /* loop in chunks of 2^16 */ i = 0; maxchunk = 0; while(i < npoints) { R_CheckUserInterrupt(); maxchunk += 65536; if(maxchunk > npoints) maxchunk = npoints; for(; i < maxchunk; i++) { xi = x[i]; yi = y[i]; if(i < npoints - 1) { /* search forward */ for(right = i + 1; right < npoints; ++right) { dy = y[right] - yi; dy2 = dy * dy; if(dy2 > d2min) break; dx = x[right] - xi; d2 = dx * dx + dy2; if (d2 < d2min) { #ifdef IGNOREZERO if(d2 > 0) d2min = d2; #else d2min = d2; #endif } } } if(i > 0){ /* search backward */ for(left = i - 1; left >= 0; --left) { dy = yi - y[left]; dy2 = dy * dy; if(dy2 > d2min) break; dx = x[left] - xi; d2 = dx * dx + dy2; if (d2 < d2min) { #ifdef IGNOREZERO if(d2 > 0) d2min = d2; #else d2min = d2; #endif } } } } } *result = d2min; } spatstat/src/PerfectStrauss.h0000644000176200001440000002146013166361223016040 0ustar liggesusers // ........................... Strauss process .......................... // $Revision: 1.4 $ $Date: 2014/02/18 10:43:00 $ class StraussProcess : public PointProcess { public: double beta, gamma, R, Rsquared; StraussProcess(double xmin, double xmax, double ymin, double ymax, double b, double g, double Ri); ~StraussProcess(){} void NewEvent(double *x, double *y, char *InWindow); void GeneratePoisson(Point *headPoint, long int *GeneratedPoints, long int *LivingPoints, long int *NoP); double Interaction(double dsquared); // void CalcBeta(long int xsidepomm, long int ysidepomm, // double *betapomm); // void CheckBeta(long int xsidepomm, long int ysidepomm, // double *betapomm); // double lnCondInt(struct Point2 *TempCell, Point2Pattern *p2p); // void Beta(struct Point2 *TempCell); // void CalcBeta(Point2Pattern *p2p); }; StraussProcess::StraussProcess(double xmin, double xmax, double ymin, double ymax, double b, double g, double Ri) : PointProcess(xmin, xmax, ymin, ymax){ beta = b; gamma = g; R = Ri; Rsquared = R * R; InteractionRange = R; TotalBirthRate = beta*(xmax-xmin)*(ymax-ymin); } double StraussProcess::Interaction(double dsquared) { double rtn; rtn = 1; if(dsquared < Rsquared) rtn = gamma; return(rtn); } void StraussProcess::NewEvent(double *x, double *y, char *InWindow) { double Xdim, Ydim; Xdim = Xmax-Xmin; Ydim = Ymax-Ymin; *x = slumptal()*Xdim+Xmin; *y = slumptal()*Ydim+Ymin; *InWindow = 1; } void StraussProcess::GeneratePoisson(Point *headPoint, long int *GeneratedPoints, long int *LivingPoints, long int *NoP) { int i; double xtemp, ytemp, L, Xdim, Ydim; struct Point *TempPoint; Xdim = Xmax-Xmin; Ydim = Ymax-Ymin; L = beta*Xdim*Ydim; *GeneratedPoints = poisson(L); *LivingPoints = *GeneratedPoints; for (i=1; i<=*GeneratedPoints ; i++){ //Rprintf("Generating StraussProcess Poisson 3\n"); //scanf("%f",&f1); xtemp = slumptal()*Xdim+Xmin; ytemp = slumptal()*Ydim+Ymin; // //Rprintf("Generating StraussProcess Poisson 3.2\n"); TempPoint = ALLOCATE(struct Point); // TempPoint->X = xtemp; TempPoint->Y = ytemp; TempPoint->No = i; TempPoint->R = slumptal(); //Rprintf("Generating StraussProcess Poisson 3.6\n"); TempPoint->next = headPoint->next; headPoint->next = TempPoint; *NoP = *NoP + 1; } } //void StraussProcess::CalcBeta(long int xsidepomm, long int ysidepomm, // double *betapomm){ // long int i,j,k; // k=0; // // Rprintf("\ndiagnostic message: Strauss CalcBeta... %ld %ld\n",xsidepomm,ysidepomm); // for(i=0; ibeta; // k++; // } // } //} //void StraussProcess::CheckBeta(long int xsidepomm, long int ysidepomm, // double *betapomm){ // long int i,j,k; // // double d1; // k=0; // // Rprintf("\ndiagnostic message: Strauss CalcBeta... %ld %ld\n",xsidepomm,ysidepomm); // for(i=0; i0.001) && (k==0)){ // Rprintf("%f %f %f %ld %ld\n",fabs(*(betapomm + i*ysidepomm + j)- beta), // *(betapomm + i*ysidepomm + j),beta,i,j); // k++; // // scanf("%lf",&d1); // } // } // } //} //double StraussProcess::lnCondInt(struct Point2 *TempCell, // Point2Pattern *p2p){ // double f1; // long int xco,yco,xc,yc,fx,tx,fy,ty,ry,rx,k; // double dy,dx, lnCI,dst2; // struct Point2 *TempCell2; // // f1 = (TempCell->X-p2p->Xmin)/p2p->XCellDim; xc = int(f1); // CLAMP(xc, 0, p2p->MaxXCell, "xc"); // f1 = (TempCell->Y-p2p->Ymin)/p2p->YCellDim; yc = int(f1); // CLAMP(yc, 0, p2p->MaxYCell, "yc"); // // dx = (Xmax-Xmin)/(double(p2p->MaxXCell+1)); // dy = (Ymax-Ymin)/(double(p2p->MaxYCell+1)); // rx = int(this->InteractionRange/dx+1.0); // ry = int(this->InteractionRange/dy+1.0); // // lnCI = log(TempCell->Beta); // // k = 0; // // if((xc+rx)<=p2p->MaxXCell) tx=xc+rx; else tx = p2p->MaxXCell; // if((yc+ry)<=p2p->MaxYCell) ty=yc+ry; else ty = p2p->MaxYCell; // if((xc-rx)>=0) fx=xc-rx; else fx = 0; // if((yc-ry)>=0) fy=yc-ry; else fy = 0; // // //Rprintf("MCI! %d %d %d %d\n",fx,tx,fy,ty); // // for(xco = fx; xco <= tx; xco++){ // for(yco = fy; yco <= ty; yco++){ // CHECK(p2p->headCell[xco][yco], // "internal error: p2p->headCell[xco][yco] is null in lnCondInt()"); // TempCell2 = p2p->headCell[xco][yco]->next; // CHECK(TempCell2, "internal error: TempCell2 is null in lnCondInt()"); // while(TempCell2!=TempCell2->next){ // if(TempCell2 != TempCell){ // k++; // dst2 = pow(TempCell->X-TempCell2->X,2)+ // pow(TempCell->Y-TempCell2->Y,2); // lnCI += log(Interaction(dst2)); // } // TempCell2 = TempCell2->next; // CHECK(TempCell2, // "internal error: TempCell2 is null in lnCondInt() loop"); // } // } // } // return(lnCI); //} //void StraussProcess::Beta(struct Point2 *TempCell){ // TempCell->Beta = beta; //} //void StraussProcess::CalcBeta(Point2Pattern *p2p){ // long int xco,yco; // // double dy,dx; // struct Point2 *TempMother; // // for(xco = 0; xco <= p2p->MaxXCell; xco++){ // for(yco = 0; yco <= p2p->MaxYCell; yco++){ // CHECK(p2p->headCell[xco][yco], // "internal error: p2p->headCell[xco][yco] is null in CalcBeta()"); // TempMother = p2p->headCell[xco][yco]->next; // CHECK(TempMother, "internal error: TempMother is null in CalcBeta()"); // while(TempMother!=TempMother->next){ // TempMother->Beta = this->beta; // TempMother = TempMother->next; // CHECK(TempMother, // "internal error: TempMother is null in CalcBeta() loop"); // } // } // } //} // ........................... Interface to R .......................... extern "C" { SEXP PerfectStrauss(SEXP beta, SEXP gamma, SEXP r, SEXP xrange, SEXP yrange) { // input parameters double Beta, Gamma, R, Xmin, Xmax, Ymin, Ymax; double *Xrange, *Yrange; // internal int xcells, ycells; long int EndTime, StartTime; // output int noutmax; SEXP xout, yout, nout, out; double *xx, *yy; int *nn; SEXP stout, etout; int *ss, *ee; // protect arguments from garbage collector PROTECT(beta = AS_NUMERIC(beta)); PROTECT(gamma = AS_NUMERIC(gamma)); PROTECT(r = AS_NUMERIC(r)); PROTECT(xrange = AS_NUMERIC(xrange)); PROTECT(yrange = AS_NUMERIC(yrange)); // that's 5 protected objects // extract arguments Beta = *(NUMERIC_POINTER(beta)); Gamma = *(NUMERIC_POINTER(gamma)); R = *(NUMERIC_POINTER(r)); Xrange = NUMERIC_POINTER(xrange); Xmin = Xrange[0]; Xmax = Xrange[1]; Yrange = NUMERIC_POINTER(yrange); Ymin = Yrange[0]; Ymax = Yrange[1]; // compute cell array size xcells = (int) floor((Xmax-Xmin)/ R); if(xcells > 9) xcells = 9; if(xcells < 1) xcells = 1; ycells = (int) floor((Ymax-Ymin)/ R); if(ycells > 9) ycells = 9; if(ycells < 1) ycells = 1; #ifdef DBGS Rprintf("xcells %d ycells %d\n",xcells,ycells); Rprintf("Initialising\n"); #endif // Initialise Strauss point process StraussProcess ExampleProcess(Xmin,Xmax,Ymin,Ymax, Beta, Gamma, R); // Initialise point pattern Point2Pattern ExamplePattern(Xmin,Xmax,Ymin,Ymax, xcells, ycells); // parameters: min x, max x, min y, max y, "cells" in x and y direction // used for speeding up neighbour counting, 9 is max here #ifdef DBGS Rprintf("Initialisation complete\n"); #endif // Synchronise random number generator GetRNGstate(); // Initialise perfect sampler Sampler PerfectSampler(&ExampleProcess); // Perform perfect sampling PerfectSampler.Sim(&ExamplePattern, &StartTime, &EndTime); // Synchronise random number generator PutRNGstate(); // Get upper estimate of number of points noutmax = ExamplePattern.UpperCount() + 1; // Allocate space for output PROTECT(xout = NEW_NUMERIC(noutmax)); PROTECT(yout = NEW_NUMERIC(noutmax)); PROTECT(nout = NEW_INTEGER(1)); PROTECT(stout = NEW_INTEGER(1)); PROTECT(etout = NEW_INTEGER(1)); xx = NUMERIC_POINTER(xout); yy = NUMERIC_POINTER(yout); nn = INTEGER_POINTER(nout); ss = INTEGER_POINTER(stout); ee = INTEGER_POINTER(etout); // copy data into output storage ExamplePattern.Return(xx, yy, nn, noutmax); *ss = StartTime; *ee = EndTime; // pack up into output list PROTECT(out = NEW_LIST(5)); SET_VECTOR_ELT(out, 0, xout); SET_VECTOR_ELT(out, 1, yout); SET_VECTOR_ELT(out, 2, nout); SET_VECTOR_ELT(out, 3, stout); SET_VECTOR_ELT(out, 4, etout); // return UNPROTECT(11); // 5 arguments plus xout, yout, nout, stout, etout, out return(out); } } spatstat/src/pcf3.c0000755000176200001440000001201113166361223013704 0ustar liggesusers#include #include #include #include #include "geom3.h" #include "functable.h" #include "chunkloop.h" #include "constants.h" /* $Revision: 1.7 $ $Date: 2012/03/27 05:01:41 $ pair correlation function of 3D point pattern (Epanechnikov kernel) pcf3trans translation correction pcf3isot isotropic correction */ #define FOURPI (2.0 * M_2PI) void pcf3trans(p, n, b, pcf, delta) Point *p; int n; Box *b; Ftable *pcf; double delta; { register int i, j, l, lmin, lmax, maxchunk; register double dx, dy, dz, dist; register double vx, vy, vz, tval; Point *ip, *jp; double dt, vol, lambda, denom; double coef, twocoef, frac, invweight, kernel; double sphesfrac(), sphevol(); /* compute denominator & initialise numerator*/ vol = (b->x1 - b->x0) * (b->y1 - b->y0) * (b->z1 - b->z0); lambda = ((double) n )/ vol; denom = lambda * lambda; for(l = 0; l < pcf->n; l++) { (pcf->denom)[l] = denom; (pcf->num)[l] = 0.0; } /* spacing of argument in result vector */ dt = (pcf->t1 - pcf->t0)/(pcf->n - 1); /* compute numerator */ OUTERCHUNKLOOP(i, n, maxchunk, 8196) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, n, maxchunk, 8196) { ip = p + i; for(j = i + 1; j < n; j++) { /* compute pairwise distance */ jp = p + j; dx = jp->x - ip->x; dy = jp->y - ip->y; dz = jp->z - ip->z; dist = sqrt(dx * dx + dy * dy + dz * dz); lmin = ceil( ((dist - delta) - pcf->t0) / dt ); lmax = floor( ((dist + delta) - pcf->t0) / dt ); if(lmax >= 0 && lmin < pcf->n) { /* kernel centred at 'dist' has nonempty intersection with specified range of t values */ /* compute intersection */ if(lmin < 0) lmin = 0; if(lmax >= pcf->n) lmax = pcf->n - 1; /* compute (inverse) edge correction weight */ vx = b->x1 - b->x0 - (dx > 0 ? dx : -dx); vy = b->y1 - b->y0 - (dy > 0 ? dy : -dy); vz = b->z1 - b->z0 - (dz > 0 ? dz : -dz); invweight = vx * vy * vz * FOURPI * dist * dist; if(invweight > 0.0) { for(l = lmin; l < pcf->n; l++) { tval = pcf->t0 + l * dt; /* unnormalised Epanechnikov kernel with halfwidth delta */ frac = (dist - tval)/delta; kernel = (1 - frac * frac); if(kernel > 0) (pcf->num)[l] += kernel / invweight; } } } } } } /* constant factor in kernel */ coef = 3.0/(4.0 * delta); /* multiplied by 2 because we only visited i < j pairs */ twocoef = 2.0 * coef; /* normalise kernel and compute ratio estimate */ for(l = 0; l < pcf->n; l++) { (pcf->num)[l] *= twocoef; (pcf->f)[l] = ((pcf->denom)[l] > 0.0) ? (pcf->num)[l] / (pcf->denom)[l] : 0.0; } } void pcf3isot(p, n, b, pcf, delta) Point *p; int n; Box *b; Ftable *pcf; double delta; { register int i, j, l, lmin, lmax, maxchunk; register double dx, dy, dz, dist; Point *ip, *jp; double dt, vol, denom, mass, tval; double coef, frac, kernel; double sphesfrac(), sphevol(); Point vertex; Box half; /* compute denominator & initialise numerator*/ vol = (b->x1 - b->x0) * (b->y1 - b->y0) * (b->z1 - b->z0); denom = ((double) (n * n))/vol; for(l = 0; l < pcf->n; l++) { (pcf->denom)[l] = denom; (pcf->num)[l] = 0.0; } /* spacing of argument in result vector */ dt = (pcf->t1 - pcf->t0)/(pcf->n - 1); /* set up for volume correction */ vertex.x = b->x0; vertex.y = b->y0; vertex.z = b->z0; half.x1 = b->x1; half.y1 = b->y1; half.z1 = b->z1; half.x0 = (b->x0 + b->x1)/2.0; half.y0 = (b->y0 + b->y1)/2.0; half.z0 = (b->z0 + b->z1)/2.0; /* compute numerator */ OUTERCHUNKLOOP(i, n, maxchunk, 8196) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, n, maxchunk, 8196) { ip = p + i; for(j = i + 1; j < n; j++) { jp = p + j; dx = jp->x - ip->x; dy = jp->y - ip->y; dz = jp->z - ip->z; dist = sqrt(dx * dx + dy * dy + dz * dz); lmin = ceil( ((dist - delta) - pcf->t0) / dt ); lmax = floor( ((dist + delta) - pcf->t0) / dt ); if(lmax >= 0 && lmin < pcf->n) { /* kernel centred at 'dist' has nonempty intersection with specified range of t values */ /* compute intersection */ if(lmin < 0) lmin = 0; if(lmax >= pcf->n) lmax = pcf->n - 1; /* compute edge correction weight */ mass = (1.0 / sphesfrac(ip, b, dist)) + (1.0 / sphesfrac(jp, b, dist)); mass *= 1.0 - 8.0 * sphevol(&vertex, &half, dist) / vol; if(mass > 0.0) { mass /= FOURPI * dist * dist; for(l = lmin; l < pcf->n; l++) { tval = pcf->t0 + l * dt; /* unnormalised Epanechnikov kernel with halfwidth delta */ frac = (dist - tval)/delta; kernel = (1 - frac * frac); if(kernel > 0) (pcf->num)[l] += kernel * mass; } } } } } } /* constant factor in kernel */ coef = 3.0/(4.0 * delta); /* normalise kernel and compute ratio estimate */ for(l = 0; l < pcf->n; l++) { (pcf->num)[l] *= coef; (pcf->f)[l] = ((pcf->denom)[l] > 0.0)? (pcf->num)[l] / (pcf->denom)[l] : 0.0; } } spatstat/src/straush.c0000755000176200001440000000602513166361223014552 0ustar liggesusers#include #include #include "methas.h" #include "dist2.h" /* Conditional intensity computation for Hard core Strauss process */ /* Storage of parameters and precomputed/auxiliary data */ typedef struct StraussHard { double gamma; double r; /* interaction distance */ double h; /* hard core distance */ double loggamma; double r2; double h2; double r2h2; /* r^2 - h^2 */ double *period; int hard; int per; } StraussHard; /* initialiser function */ Cdata *straushinit(state, model, algo) State state; Model model; Algor algo; { StraussHard *strausshard; strausshard = (StraussHard *) R_alloc(1, sizeof(StraussHard)); /* Interpret model parameters*/ strausshard->gamma = model.ipar[0]; strausshard->r = model.ipar[1]; /* No longer passed as r^2 */ strausshard->h = model.ipar[2]; /* No longer passed as h^2 */ strausshard->r2 = pow(strausshard->r, 2); strausshard->h2 = pow(strausshard->h, 2); strausshard->r2h2 = strausshard->r2 - strausshard->h2; strausshard->period = model.period; /* is the interaction numerically equivalent to hard core ? */ strausshard->hard = (strausshard->gamma < DOUBLE_EPS); strausshard->loggamma = (strausshard->hard) ? 0.0 : log(strausshard->gamma); /* periodic boundary conditions? */ strausshard->per = (model.period[0] > 0.0); return((Cdata *) strausshard); } /* conditional intensity evaluator */ double straushcif(prop, state, cdata) Propo prop; State state; Cdata *cdata; { int npts, kount, ix, ixp1, j; double *x, *y; double u, v; double r2, r2h2, cifval; StraussHard *strausshard; double *period; DECLARE_CLOSE_VARS; strausshard = (StraussHard *) cdata; r2 = strausshard->r2; r2h2 = strausshard->r2h2; period = strausshard->period; u = prop.u; v = prop.v; ix = prop.ix; x = state.x; y = state.y; npts = state.npts; if(npts == 0) return((double) 1.0); kount = 0; ixp1 = ix+1; /* If ix = NONE = -1, then ixp1 = 0 is correct */ if(strausshard->per) { /* periodic distance */ if(ix > 0) { for(j=0; j < ix; j++) { if(CLOSE_PERIODIC(u,v,x[j],y[j],period,r2)) { /* RESIDUE = r2 - distance^2 */ if(RESIDUE > r2h2) return((double) 0.0); ++kount; } } } if(ixp1 < npts) { for(j=ixp1; j r2h2) return((double) 0.0); ++kount; } } } } else { /* Euclidean distance */ if(ix > 0) { for(j=0; j < ix; j++) { if(CLOSE(u,v,x[j],y[j],r2)) { if(RESIDUE > r2h2) return((double) 0.0); ++kount; } } } if(ixp1 < npts) { for(j=ixp1; j r2h2) return((double) 0.0); ++kount; } } } } if(strausshard->hard) { if(kount > 0) cifval = (double) 0.0; else cifval = (double) 1.0; } else cifval = exp(strausshard->loggamma*kount); return cifval; } Cifns StraussHardCifns = { &straushinit, &straushcif, (updafunptr) NULL, NO}; spatstat/src/linequad.h0000644000176200001440000003433013166361223014665 0ustar liggesusers/* linequad.h Template code, #included several times in linequad.c Macros used: FUNNAME function name (unmarked version) FMKNAME function name (marked version) ALEA #defined if grid location should be randomised HUH #defined if debugging is on SWAP swap macro $Revision: 1.2 $ $Date: 2016/10/04 06:24:22 $ */ void FUNNAME(ns, from, to, nv, xv, yv, eps, ndat, sdat, tdat, wdat, ndum, xdum, ydum, sdum, tdum, wdum, maxscratch) /* A linear network with *ns segments and *nv vertices is specified by the vectors from, to, xv, yv. Data points on the network are specified by *ndat, sdat, tdat. *** Assumed to be sorted in order of 'sdat' ** Dummy points will be placed every 'eps' units along each segment. Output vectors: wdat quadrature weights for the data points wdum quadrature weights for the dummy points xdum, | ydum, | coordinates of dummy points sdum, | tdum | Space must be allocated for sum(ceiling(lengths/eps)) dummy points. */ int *ns; /* number of segments */ int *from, *to; /* endpoints of each segment */ int *nv; /* number of vertices */ double *xv, *yv; /* cartesian coords of vertices */ double *eps; /* desired spacing of dummy points */ int *ndat, *ndum; /* number of data & dummy points */ int *sdat, *sdum; /* segment id (local coordinate) */ double *tdat, *tdum; /* location (local coordinate) */ double *wdat, *wdum; /* quadrature weights */ double *xdum, *ydum; /* spatial coordinates of dummy points */ int *maxscratch; { int Nseg, Ndat, Ndum, Lmax, i, j, k, ll, m, fromi, toi; #ifdef HUH int Nvert; #endif int SegmentForData, nwhole, nentries, npieces, npieces1; double x0, y0, x1, y1, dx, dy; double seglength, ratio, epsilon, rump, epsfrac, rumpfrac, gridstart; double tfirst, tlast, tcurrent, plen, w; int *serial, *count, *pieceid; char *isdata; double *tvalue, *pieceweight; Nseg = *ns; Ndat = *ndat; Ndum = 0; Lmax = *maxscratch; epsilon = *eps; #ifdef HUH Nvert = *nv; Rprintf("Nseg=%d, Nvert=%d, Ndat=d, Lmax = %d\n\n", Nseg, Nvert, Ndat, Lmax); #endif /* allocate scratch space, one for each data/dummy point in current segment */ serial = (int *) R_alloc(Lmax, sizeof(int)); isdata = (char *) R_alloc(Lmax, sizeof(char)); tvalue = (double *) R_alloc(Lmax, sizeof(double)); pieceid = (int *) R_alloc(Lmax, sizeof(int)); /* allocate scratch space, one for each piece of current segment */ count = (int *) R_alloc(Lmax, sizeof(int)); pieceweight = (double *) R_alloc(Lmax, sizeof(double)); /* initialise pointer at start of point pattern Determine which segment contains first point */ k = 0; SegmentForData = (Ndat > 0) ? sdat[0] : -1; #ifdef ALEA GetRNGstate(); #endif /* loop over line segments */ for(i = 0; i < Nseg; i++) { #ifdef HUH Rprintf("Segment %d\n", i); #endif /* endpoints of segment */ fromi = from[i]; toi = to[i]; x0 = xv[fromi]; y0 = yv[fromi]; x1 = xv[toi]; y1 = yv[toi]; dx = x1 - x0; dy = y1 - y0; seglength = sqrt(dx * dx + dy * dy); /* divide segment into pieces of length eps with shorter bits at each end */ ratio = seglength/epsilon; nwhole = (int) floor(ratio); if(nwhole > 2 && ratio - nwhole < 0.5) --nwhole; rump = (seglength - nwhole * epsilon)/2.0; epsfrac = epsilon/seglength; rumpfrac = rump/seglength; /* There are nwhole+2 pieces, with endpoints 0, rumpfrac, rumpfrac+epsfrac, rumpfrac+2*epsfrac, ..., 1-rumpfrac, 1 */ /* Now place dummy points in these pieces */ #ifdef ALEA tfirst = rumpfrac * unif_rand(); #else tfirst = rumpfrac/2.0; #endif #ifdef HUH Rprintf("\tnwhole=%d, epsfrac=%lf, rumpfrac=%lf, tfirst=%lf\n", nwhole, epsfrac, rumpfrac, tfirst); Rprintf("\tsegment length %lf divided into %d pieces\n", seglength, nwhole+2); #endif /* create a new dummy point in each piece */ #ifdef HUH Rprintf("\tMaking left dummy point %d\n", Ndum); #endif tvalue[0] = tfirst; serial[0] = Ndum; isdata[0] = NO; count[0] = 1; pieceid[0] = 0; xdum[Ndum] = x0 + dx * tfirst; ydum[Ndum] = y0 + dy * tfirst; sdum[Ndum] = i; tdum[Ndum] = tfirst; ++Ndum; if(nwhole > 0) { #ifdef HUH Rprintf("\tMaking %d middle dummy points\n", nwhole); #endif #ifdef ALEA gridstart = rumpfrac - unif_rand() * epsfrac; #else gridstart = rumpfrac - epsfrac/2.0; #endif for(j = 1; j <= nwhole; j++) { serial[j] = Ndum; tvalue[j] = tcurrent = gridstart + ((double) j) * epsfrac; isdata[j] = NO; count[j] = 1; pieceid[j] = j; xdum[Ndum] = x0 + dx * tcurrent; ydum[Ndum] = y0 + dy * tcurrent; sdum[Ndum] = i; tdum[Ndum] = tcurrent; ++Ndum; } } j = nwhole + 1; #ifdef HUH Rprintf("\tMaking right dummy point %d\n", Ndum); #endif serial[j] = Ndum; isdata[j] = NO; tvalue[j] = tlast = 1.0 - tfirst; count[j] = 1; pieceid[j] = j; xdum[Ndum] = x0 + dx * tlast; ydum[Ndum] = y0 + dy * tlast; sdum[Ndum] = i; tdum[Ndum] = tlast; ++Ndum; nentries = npieces = nwhole + 2; npieces1 = npieces-1; /* add any data points lying on current segment i */ while(SegmentForData == i) { #ifdef HUH Rprintf("\tData point %d lies on segment %d\n", k, i); #endif serial[nentries] = k; tvalue[nentries] = tcurrent = tdat[k]; isdata[nentries] = YES; /* determine which piece contains the data point */ ll = (int) ceil((tcurrent - rumpfrac)/epsfrac); if(ll < 0) ll = 0; else if(ll >= npieces) ll = npieces1; #ifdef HUH Rprintf("\tData point %d mapped to piece %d\n", k, ll); #endif count[ll]++; pieceid[nentries] = ll; ++nentries; ++k; SegmentForData = (k < Ndat) ? sdat[k] : -1; } /* compute counting weights for each piece of segment */ #ifdef HUH Rprintf("\tcounting weights..\n"); #endif for(ll = 0; ll < npieces; ll++) { plen = (ll == 0 || ll == npieces1)? rump : epsilon; pieceweight[ll] = plen/count[ll]; } /* apply weights to data/dummy points */ #ifdef HUH Rprintf("\tdistributing weights..\n"); #endif for(j = 0; j < nentries; j++) { m = serial[j]; ll = pieceid[j]; if(ll >= 0 && ll < npieces) { w = pieceweight[ll]; if(isdata[j]) { #ifdef HUH Rprintf("\t\tEntry %d: data point %d, piece %d\n", j, m, ll); #endif wdat[m] = w; } else { #ifdef HUH Rprintf("\t\tEntry %d: dummy point %d, piece %d\n", j, m, ll); #endif wdum[m] = w; } } } } *ndum = Ndum; #ifdef ALEA PutRNGstate(); #endif } void FMKNAME(ns, from, to, nv, xv, yv, eps, ntypes, ndat, xdat, ydat, mdat, sdat, tdat, wdat, ndum, xdum, ydum, mdum, sdum, tdum, wdum, maxscratch) /* A linear network with *ns segments and *nv vertices is specified by the vectors from, to, xv, yv. Data points on the network are specified by *ndat, xdat, ydat, mdat, sdat, tdat. *** Assumed to be sorted in order of 'sdat' ** Dummy points will be placed every 'eps' units along each segment and replicated for each possible mark. Each data point location is also replicated by dummy points with each possible mark except the mark of the data point. Output vectors: wdat quadrature weights for the data points wdum quadrature weights for the dummy points xdum, | ydum, | coordinates of dummy points sdum, | tdum | mdum marks for dummy points Space must be allocated for ntypes * sum(ceiling(lengths/eps)) dummy points. */ int *ns; /* number of segments */ int *from, *to; /* endpoints of each segment */ int *nv; /* number of vertices */ double *xv, *yv; /* cartesian coords of vertices */ double *eps; /* desired spacing of dummy points */ int *ndat, *ndum; /* number of data & dummy points */ int *ntypes; /* number of types */ double *xdat, *ydat; /* spatial coordinates of data points */ double *xdum, *ydum; /* spatial coordinates of dummy points */ int *mdat, *mdum; /* mark values */ int *sdat, *sdum; /* segment id (local coordinate) */ double *tdat, *tdum; /* location (local coordinate) */ double *wdat, *wdum; /* quadrature weights */ int *maxscratch; { int Nseg, Ndat, Ndum, Ntypes, Lmax, i, k, ll, m, fromi, toi; #ifdef HUH int Nvert; #endif int SegmentForData, nwhole, nentries, npieces, npieces1, nMpieces; int jpiece, jentry, jpdata, type, mcurrent; double x0, y0, x1, y1, dx, dy, xcurrent, ycurrent; double seglength, ratio, epsilon, rump, epsfrac, rumpfrac, gridstart; double tfirst, tlast, tcurrent, plen, w; int *serial, *count, *mkpieceid; char *isdata; double *tvalue, *countingweight; Nseg = *ns; Ndat = *ndat; Ntypes = *ntypes; Ndum = 0; Lmax = *maxscratch; epsilon = *eps; #ifdef HUH Nvert = *nv; Rprintf("Nseg=%d, Nvert=%d, Ndat=d, Lmax = %d\n\n", Nseg, Nvert, Ndat, Lmax); #endif /* allocate scratch space, one for each data/dummy point in current segment */ serial = (int *) R_alloc(Lmax, sizeof(int)); isdata = (char *) R_alloc(Lmax, sizeof(char)); tvalue = (double *) R_alloc(Lmax, sizeof(double)); mkpieceid = (int *) R_alloc(Lmax, sizeof(int)); /* allocate scratch space, one for each piece of current segment */ count = (int *) R_alloc(Lmax, sizeof(int)); countingweight = (double *) R_alloc(Lmax, sizeof(double)); /* initialise pointer at start of point pattern Determine which segment contains first point */ k = 0; SegmentForData = (Ndat > 0) ? sdat[0] : -1; #ifdef ALEA GetRNGstate(); #endif /* loop over line segments */ for(i = 0; i < Nseg; i++) { #ifdef HUH Rprintf("Segment %d\n", i); #endif /* endpoints of segment */ fromi = from[i]; toi = to[i]; x0 = xv[fromi]; y0 = yv[fromi]; x1 = xv[toi]; y1 = yv[toi]; dx = x1 - x0; dy = y1 - y0; seglength = sqrt(dx * dx + dy * dy); /* divide segment into pieces of length eps with shorter bits at each end */ ratio = seglength/epsilon; nwhole = (int) floor(ratio); if(nwhole > 2 && ratio - nwhole < 0.5) --nwhole; npieces = nwhole + 2; rump = (seglength - nwhole * epsilon)/2.0; epsfrac = epsilon/seglength; rumpfrac = rump/seglength; /* There are nwhole+2 pieces, with endpoints 0, rumpfrac, rumpfrac+epsfrac, rumpfrac+2*epsfrac, ..., 1-rumpfrac, 1 */ /* Now place dummy points in these pieces */ #ifdef ALEA tfirst = rumpfrac * unif_rand(); gridstart = rumpfrac - epsfrac * unif_rand(); #else tfirst = rumpfrac/2.0; gridstart = rumpfrac - epsfrac/2.0; #endif tlast = 1.0 - tfirst; #ifdef HUH Rprintf("\tnwhole=%d, epsfrac=%lf, rumpfrac=%lf, tfirst=%lf\n", nwhole, epsfrac, rumpfrac, tfirst); Rprintf("\tsegment length %lf divided into %d pieces\n", seglength, npieces); #endif /* 'Marked pieces' of segment are numbered in order (piece 0, mark 0), (piece 0, mark 1), ..., (piece 0, mark Ntypes-1), (piece 1, mark 0), ..... mpieceid = type + pieceid * Ntypes */ #ifdef HUH Rprintf("\tMaking %d x %d = %d dummy points\n", npieces, Ntypes, npieces * Ntypes); #endif /* create a new dummy point in each piece */ npieces1 = npieces-1; for(jpiece = 0; jpiece < npieces; jpiece++) { tcurrent = (jpiece == 0) ? tfirst : (jpiece == npieces1) ? tlast : (gridstart + ((double) jpiece) * epsfrac); xcurrent = x0 + dx * tcurrent; ycurrent = y0 + dy * tcurrent; for(type = 0; type < Ntypes; type++) { /* position in list of relevant data/dummy points */ jentry = type + jpiece * Ntypes; /* serial number of marked piece */ ll = jentry; tvalue[jentry] = tcurrent; serial[jentry] = Ndum; isdata[jentry] = NO; mkpieceid[jentry] = ll; count[ll] = 1; xdum[Ndum] = xcurrent; ydum[Ndum] = ycurrent; mdum[Ndum] = type; sdum[Ndum] = i; tdum[Ndum] = tcurrent; ++Ndum; } } nentries = npieces * Ntypes; /* handle any data points lying on current segment i */ while(SegmentForData == i) { #ifdef HUH Rprintf("\tData point %d lies on segment %d\n", k, i); #endif xcurrent = xdat[k]; ycurrent = ydat[k]; tcurrent = tdat[k]; mcurrent = mdat[k]; /* determine which piece contains the data point */ jpdata = (int) ceil((tcurrent - rumpfrac)/epsfrac); if(jpdata < 0) jpdata = 0; else if(jpdata >= npieces) jpdata = npieces1; #ifdef HUH Rprintf("\tData point %d falls in piece %d\n", k, jpdata); #endif /* copy data point, and create dummy points at same location with different marks */ for(type = 0; type < Ntypes; type++) { tvalue[nentries] = tcurrent; ll = type + jpdata * Ntypes; mkpieceid[nentries] = ll; count[ll]++; if(type == mcurrent) { /* data point */ isdata[nentries] = YES; serial[nentries] = k; } else { /* create dummy point */ isdata[nentries] = NO; serial[nentries] = Ndum; xdum[Ndum] = xcurrent; ydum[Ndum] = ycurrent; mdum[Ndum] = type; sdum[Ndum] = i; tdum[Ndum] = tcurrent; ++Ndum; } ++nentries; } ++k; SegmentForData = (k < Ndat) ? sdat[k] : -1; } /* compute counting weights for each piece of segment */ #ifdef HUH Rprintf("\tcounting weights..\n"); #endif for(jpiece = 0; jpiece < npieces; jpiece++) { plen = (jpiece == 0 || jpiece == npieces1)? rump : epsilon; for(type = 0; type < Ntypes; type++) { ll = type + jpiece * Ntypes; countingweight[ll] = plen/count[ll]; } } /* apply weights to data/dummy points */ #ifdef HUH Rprintf("\tdistributing weights..\n"); #endif nMpieces = npieces * Ntypes; for(jentry = 0; jentry < nentries; jentry++) { m = serial[jentry]; ll = mkpieceid[jentry]; if(ll >= 0 && ll < nMpieces) { w = countingweight[ll]; if(isdata[jentry]) { #ifdef HUH Rprintf("\t\tEntry %d: data point %d, piece %d\n", jentry, m, ll); #endif wdat[m] = w; } else { #ifdef HUH Rprintf("\t\tEntry %d: dummy point %d, piece %d\n", jentry, m, ll); #endif wdum[m] = w; } } } } *ndum = Ndum; #ifdef ALEA PutRNGstate(); #endif } spatstat/src/crossloop.h0000644000176200001440000000341113166361223015102 0ustar liggesusers/* crossloop.h Generic code template for loop for cross-close-pairs operations collecting contributions to point x_i from all points y_j such that ||x_i - y_j|| <= r cpp variables used: INITIAL_I code executed at start of 'i' loop CONTRIBUTE_IJ code executed to compute contribution from j to i COMMIT_I code executed to save total contribution to i C variables used: int i, j, n1, n2, maxchunk, jleft; double x1i, y1i, xleft, dx, dy, d2, rmax, r2max; double *x1, *y1, *x2, *y2; $Revision: 1.2 $ $Date: 2014/04/02 07:59:10 $ */ #ifndef CHUNKLOOP_H #include "chunkloop.h" #endif #define CROSSLOOP(INITIAL_I, CONTRIBUTE_IJ, COMMIT_I) \ OUTERCHUNKLOOP(i, n1, maxchunk, 65536) { \ R_CheckUserInterrupt(); \ INNERCHUNKLOOP(i, n1, maxchunk, 65536) { \ \ x1i = x1[i]; \ y1i = y1[i]; \ \ INITIAL_I; \ \ jleft = 0; \ \ /* \ adjust starting point jleft \ */ \ xleft = x1i - rmax; \ while((x2[jleft] < xleft) && (jleft+1 < n2)) \ ++jleft; \ \ /* \ process from j = jleft until dx > rmax \ */ \ for(j=jleft; j < n2; j++) { \ dx = x2[j] - x1i; \ if(dx > rmax) \ break; \ dy = y2[j] - y1i; \ d2 = dx * dx + dy * dy; \ if(d2 <= r2max) { \ /* add this (i, j) pair to output */ \ CONTRIBUTE_IJ; \ } \ } \ COMMIT_I; \ } \ } spatstat/src/badgey.c0000755000176200001440000003136513166361223014321 0ustar liggesusers#include #include #include #include "methas.h" #include "dist2.h" /* To get debug output, insert the line: #define DEBUG 1 */ void fexitc(const char *msg); /* Conditional intensity function for a multiscale saturation process. parameter vector: ipar[0] = ndisc ipar[1] = gamma[0] ipar[2] = r[0] ipar[3] = s[0] ... */ typedef struct BadGey { /* model parameters */ int ndisc; double *gamma; double *r; double *s; /* transformations of the parameters */ double *r2; double *loggamma; int *hard; /* periodic distance */ double *period; int per; /* auxiliary counts */ int *aux; /* matrix[ndisc, npmax]: neighbour counts in current state */ int *tee; /* vector[ndisc] : neighbour count at point in question */ double *w; /* vector[ndisc] : sum of changes in counts at other points */ } BadGey; Cdata *badgeyinit(state, model, algo) State state; Model model; Algor algo; { int i, j, k, i0, ndisc, nmatrix; double r, g, d2; BadGey *badgey; /* create storage */ badgey = (BadGey *) R_alloc(1, sizeof(BadGey)); badgey->ndisc = ndisc = model.ipar[0]; /* Allocate space for parameter vectors */ badgey->gamma = (double *) R_alloc((size_t) ndisc, sizeof(double)); badgey->r = (double *) R_alloc((size_t) ndisc, sizeof(double)); badgey->s = (double *) R_alloc((size_t) ndisc, sizeof(double)); /* Derived values */ badgey->r2 = (double *) R_alloc((size_t) ndisc, sizeof(double)); badgey->loggamma = (double *) R_alloc((size_t) ndisc, sizeof(double)); badgey->hard = (int *) R_alloc((size_t) ndisc, sizeof(int)); /* copy and transform parameters */ for(i=0; i < ndisc; i++) { i0 = 3*i + 1; g = badgey->gamma[i] = model.ipar[i0]; r = badgey->r[i] = model.ipar[i0 + 1]; badgey->s[i] = model.ipar[i0 + 2]; badgey->r2[i] = r * r; badgey->hard[i] = (g < DOUBLE_EPS); badgey->loggamma[i] = (g < DOUBLE_EPS) ? 0 : log(g); } /* periodic boundary conditions? */ badgey->period = model.period; badgey->per = (model.period[0] > 0.0); /* Allocate scratch space */ badgey->tee = (int *) R_alloc((size_t) ndisc, sizeof(int)); badgey->w = (double *) R_alloc((size_t) ndisc, sizeof(double)); /* Allocate space for auxiliary counts */ nmatrix = ndisc * state.npmax; badgey->aux = (int *) R_alloc((size_t) nmatrix, sizeof(int)); /* Initialise auxiliary counts */ for(i = 0; i < nmatrix; i++) badgey->aux[i] = 0; for(i = 0; i < state.npts; i++) { for(j = 0; j < state.npts; j++) { if(j == i) continue; d2 = dist2either(state.x[i], state.y[i], state.x[j], state.y[j], badgey->period); for(k = 0; k < ndisc; k++) { if(d2 < badgey->r2[k]) MAT(badgey->aux, k, i, ndisc) += 1; } } } #ifdef DEBUG Rprintf("Finished initialiser; ndisc=%d\n", ndisc); #endif return((Cdata *) badgey); } #define AUX(I,J) MAT(aux, I, J, ndisc) double badgeycif(prop, state, cdata) Propo prop; State state; Cdata *cdata; { int ix, j, k, npts, ndisc, tk; double u, v, d2; double a, dd2, b, f, r2, s, cifval; double *x, *y; int *tee, *aux; double *w; BadGey *badgey; badgey = (BadGey *) cdata; #ifdef DEBUG Rprintf("Entering badgeycif\n"); #endif npts = state.npts; cifval = 1.0; if(npts==0) return cifval; x = state.x; y = state.y; u = prop.u; v = prop.v; ix = prop.ix; ndisc = badgey->ndisc; tee = badgey->tee; aux = badgey->aux; w = badgey->w; /* For disc k, tee[k] = neighbour count at the point in question; w[k] = sum of changes in (saturated) neighbour counts at other points */ if(prop.itype == BIRTH) { /* compute tee[k] and w[k] from scratch */ for(k = 0; k < ndisc; k++) { tee[k] = 0; w[k] = 0.0; } if(badgey->per) { /* periodic distance */ for(j=0; jperiod); for(k = 0; k < ndisc; k++) { if(d2 < badgey->r2[k]) { tee[k]++; f = badgey->s[k] - AUX(k,j); if(f > 1) /* j is not saturated after addition of (u,v) */ w[k] += 1; /* addition of (u,v) increases count by 1 */ else if(f > 0) /* j becomes saturated by addition of (u,v) */ w[k] += f; } } } } else { /* Euclidean distance */ for(j=0; jr2[k]) { tee[k]++; f = badgey->s[k] - AUX(k,j); if(f > 1) /* j is not saturated after addition of (u,v) */ w[k] += 1; /* addition of (u,v) increases count by 1 */ else if(f > 0) /* j becomes saturated by addition of (u,v) */ w[k] += f; } } } } } else if(prop.itype == DEATH) { /* extract current auxiliary counts for point ix */ /* compute w[k] from scratch */ for(k = 0; k < ndisc; k++) { tee[k] = AUX(k,ix); w[k] = 0.0; } /* compute change in counts for other points */ if(badgey->per) { /* Periodic distance */ for(j=0; jperiod); for(k = 0; k < ndisc; k++) { if(d2 < badgey->r2[k]) { f = badgey->s[k] - AUX(k,j); if(f > 0) /* j is not saturated */ w[k] += 1; /* deletion of 'ix' decreases count by 1 */ else { f += 1; if(f > 0) { /* j is not saturated after deletion of 'ix' (s must be fractional) */ w[k] += f; } } } } } } else { /* Euclidean distance */ for(j=0; jr2[k]) { f = badgey->s[k] - AUX(k,j); if(f > 0) /* j is not saturated */ w[k] += 1; /* deletion of 'ix' decreases count by 1 */ else { f += 1; if(f > 0) { /* j is not saturated after deletion of 'ix' (s must be fractional) */ w[k] += f; } } } } } } } else if(prop.itype == SHIFT) { /* compute auxiliary counts from scratch */ for(k = 0; k < ndisc; k++) { tee[k] = 0; w[k] = 0.0; } /* Compute the cif at the new point, not the ratio of new/old */ if(badgey->per) { /* periodic distance */ for(j=0; jperiod); for(k = 0; k < ndisc; k++) { r2 = badgey->r2[k]; if(d2 < r2) { /* shifted point is a neighbour of point j */ tee[k]++; a = AUX(k,j); s = badgey->s[k]; /* Adjust */ dd2 = dist2(x[ix],y[ix], x[j],y[j],badgey->period); if(dd2 < r2) a -= 1; b = a + 1; /* b is the number of neighbours of point j in new state */ if(a < s && s < b) { w[k] += s - a; /* s is fractional and j is saturated */ } else if(s >= b) w[k] += 1; } } } } else { /* Euclidean distance */ for(j=0; jr2[k]; if(d2 < r2) { /* shifted point is a neighbour of point j */ tee[k]++; a = AUX(k,j); s = badgey->s[k]; /* Adjust */ dd2 = pow(x[ix] - x[j], 2) + pow(y[ix] - y[j], 2); if(dd2 < r2) a -= 1; b = a + 1; /* b is the number of neighbours of point j in new state */ if(a < s && s < b) { w[k] += s - a; /* s is fractional and j is saturated */ } else if(s >= b) w[k] += 1; } } } } } #ifdef DEBUG Rprintf("ndisc=%d\n", ndisc); #endif /* compute total change in saturated count */ for(k = 0; k < ndisc; k++) { s = badgey->s[k]; tk = tee[k]; w[k] += ((tk < s) ? tk : s); #ifdef DEBUG Rprintf("s[%d]=%lf, t[%d]=%d, w[%d]=%lf\n", k, s, k, tk, k, w[k]); #endif } /* evaluate cif */ for(k = 0; k < ndisc; k++) { if(badgey->hard[k]) { if(tee[k] > 0) return(0.0); /* else cifval multiplied by 0^0 = 1 */ } else cifval *= exp(badgey->loggamma[k] * w[k]); } return cifval; } void badgeyupd(state, prop, cdata) State state; Propo prop; Cdata *cdata; { /* Declare other variables */ int ix, npts, ndisc, j, k; double u, v, xix, yix, r2, d2, d2old, d2new; double *x, *y; int *aux; BadGey *badgey; badgey = (BadGey *) cdata; aux = badgey->aux; /* 'state' is current state before transition */ x = state.x; y = state.y; npts = state.npts; ndisc = badgey->ndisc; #ifdef DEBUG Rprintf("start update ---- \n"); for(j=0; j < npts; j++) { for(k=0; k < ndisc; k++) Rprintf("aux[%d,%d]=%d\t", k, j, AUX(k,j)); Rprintf("\n"); } #endif if(prop.itype == BIRTH) { #ifdef DEBUG Rprintf("Update for birth ---- \n"); #endif /* Birth */ u = prop.u; v = prop.v; /* initialise auxiliary counters for new point x[npts], y[npts] */ for(k = 0; k < ndisc; k++) AUX(k, npts) = 0; /* update all auxiliary counters */ if(badgey->per) { /* periodic distance */ for(j=0; j < npts; j++) { d2 = dist2(u,v,x[j],y[j],badgey->period); for(k = 0; k < ndisc; k++) { if(d2 < badgey->r2[k]) { AUX(k, j) += 1; AUX(k, npts) += 1; } } } } else { /* Euclidean distance */ for(j=0; j < npts; j++) { d2 = pow(u - x[j], 2) + pow(v - y[j], 2); for(k = 0; k < ndisc; k++) { if(d2 < badgey->r2[k]) { AUX( k, j) += 1; AUX( k, npts) += 1; } } } } #ifdef DEBUG Rprintf("end update ---- \n"); for(j=0; j <= npts; j++) { for(k=0; k < ndisc; k++) Rprintf("aux[%d,%d]=%d\t", k, j, AUX(k,j)); Rprintf("\n"); } #endif return; } if(prop.itype == DEATH) { /* Death */ ix = prop.ix; u = x[ix]; v = y[ix]; #ifdef DEBUG Rprintf("--- Update for death of point %d = (%lf,%lf) ---- \n", ix, u, v); #endif /* Decrement auxiliary counter for each neighbour of deleted point, and remove entry corresponding to deleted point */ if(badgey->per) { /* periodic distance */ for(j=0; jperiod); for(k = 0; k < ndisc; k++) { if(d2 < badgey->r2[k]) { if(j < ix) AUX(k,j) -= 1; else AUX(k,j-1) = AUX(k,j) - 1; } else if(j >= ix) AUX(k,j-1) = AUX(k,j); } } } else { /* Euclidean distance */ for(j=0; jr2[k]) { #ifdef DEBUG Rprintf("hit for point %d with radius r[%d]\n", j, k); #endif if(j < ix) AUX(k,j) -= 1; else AUX(k,j-1) = AUX(k,j) - 1; } else if(j >= ix) AUX(k,j-1) = AUX(k,j); } } } #ifdef DEBUG Rprintf("end update ---- \n"); for(j=0; j < npts-1; j++) { for(k=0; k < ndisc; k++) Rprintf("aux[%d,%d]=%d\t", k, j, AUX(k,j)); Rprintf("\n"); } #endif return; } if(prop.itype == SHIFT) { #ifdef DEBUG Rprintf("Update for shift ---- \n"); #endif /* Shift */ u = prop.u; v = prop.v; ix = prop.ix; xix = x[ix]; yix = y[ix]; /* recompute all auxiliary counters for point ix */ for(k = 0; k < ndisc; k++) AUX(k,ix) = 0; if(badgey->per) { for(j=0; jperiod); d2old = dist2(xix,yix,x[j],y[j],badgey->period); for(k = 0; k < ndisc; k++) { r2 = badgey->r2[k]; if(d2old >= r2 && d2new >= r2) continue; if(d2new < r2) { /* increment neighbour count for new point */ AUX(k,ix) += 1; if(d2old >= r2) AUX(k,j) += 1; /* point j gains a new neighbour */ } else if(d2old < r2) AUX(k,j) -= 1; /* point j loses a neighbour */ } } } else { /* Euclidean distance */ for(j=0; jr2[k]; if(d2old >= r2 && d2new >= r2) continue; if(d2new < r2) { #ifdef DEBUG Rprintf("shifted point is close to j=%d\n", j); #endif /* increment neighbour count for new point */ AUX(k,ix) += 1; if(d2old >= r2) { #ifdef DEBUG Rprintf("\t(previous position was not)\n"); #endif AUX(k,j) += 1; /* point j gains a new neighbour */ } } else if(d2old < r2) { #ifdef DEBUG Rprintf("previous position was close to j=%d, shifted point is not\n", j); #endif AUX(k,j) -= 1; /* point j loses a neighbour */ } } } } #ifdef DEBUG Rprintf("end update ---- \n"); for(j=0; j < npts; j++) { for(k=0; k < ndisc; k++) Rprintf("aux[%d,%d]=%d\t", k, j, AUX(k,j)); Rprintf("\n"); } #endif return; } fexitc("Unrecognised transition type; bailing out.\n"); } Cifns BadGeyCifns = { &badgeyinit, &badgeycif, &badgeyupd, NO}; spatstat/src/linknnd.c0000644000176200001440000000063513166361223014514 0ustar liggesusers#include #include "yesno.h" /* linknnd.c k-th nearest neighbours in a linear network Sparse representation of network ! Data points must be ordered by segment index ! $Revision: 1.3 $ $Date: 2016/12/04 11:08:58 $ */ #undef HUH #undef CROSS #define FNAME linknnd #include "linknnd.h" #undef FNAME #define CROSS #define FNAME linknncross #include "linknnd.h" #undef CROSS #undef FNAME spatstat/src/mhsnoop.h0000644000176200001440000000050013166361223014536 0ustar liggesusers/* Function declarations from mhsnoop.c $Revision: 1.4 $ $Date: 2013/05/27 02:09:10 $ */ #include "mhsnoopdef.h" void initmhsnoop(Snoop *s, SEXP env); void mhsnoop(Snoop *s, int irep, Algor *algo, State *state, Propo *prop, double numer, double denom, int *itype); spatstat/src/sumsymouter.h0000644000176200001440000000343413166361223015500 0ustar liggesusers/* sumsymouter.h Code template for some functions in linalg.c $Revision: 1.3 $ $Date: 2013/04/18 11:55:24 $ Macros used: FNAME = function name, WEIGHTED = #defined for weighted version */ void FNAME( x, #ifdef WEIGHTED w, #endif p, n, y ) double *x; /* p by n by n array */ #ifdef WEIGHTED double *w; /* n by n matrix (symmetric) */ #endif int *p, *n; double *y; /* output matrix p by p, initialised to zero */ { int N, P; register int i, j, k, m, ijpos, jipos, maxchunk; register double *xij, *xji; #ifdef WEIGHTED register double wij; #endif N = *n; P = *p; OUTERCHUNKLOOP(i, N, maxchunk, 256) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, N, maxchunk, 256) { /* loop over j != i */ if(i > 0) { for(j = 0; j < i; j++) { /* pointers to [i,j] and [j,i] in N*N matrices */ ijpos = i + N * j; jipos = j + N * i; /* pointers to x[, i, j] and x[ , j, i] */ xij = x + ijpos * P; xji = x + jipos * P; /* outer product */ #ifdef WEIGHTED wij = w[ijpos]; #endif for(k = 0; k < P; k++) { for(m = 0; m < P; m++) { #ifdef WEIGHTED y[m + k * P] += wij * xij[m] * xji[k]; #else y[m + k * P] += xij[m] * xji[k]; #endif } } } } if(i + 1 < N) { for(j = i+1; j < N; j++) { /* pointers to [i,j] and [j,i] in N*N matrices */ ijpos = i + N * j; jipos = j + N * i; /* pointers to x[, i, j] and x[ , j, i] */ xij = x + ijpos * P; xji = x + jipos * P; /* outer product */ #ifdef WEIGHTED wij = w[ijpos]; #endif for(k = 0; k < P; k++) { for(m = 0; m < P; m++) { #ifdef WEIGHTED y[m + k * P] += wij * xij[m] * xji[k]; #else y[m + k * P] += xij[m] * xji[k]; #endif } } } } /* end of loop over j */ } } } spatstat/src/sphevol.c0000755000176200001440000000746413166361223014551 0ustar liggesusers#include #include #include "geom3.h" /* $Revision: 1.2 $ $Date: 2013/05/27 02:09:10 $ Routine for calculating ABSOLUTE volume of intersection between sphere and box Arbitrary positions: point is allowed to be inside or outside box. # ///////////////////////////////////////////// # AUTHOR: Adrian Baddeley, CWI, Amsterdam, 1991. # # MODIFIED BY: Adrian Baddeley, Perth 2009 # # This software is distributed free # under the conditions that # (1) it shall not be incorporated # in software that is subsequently sold # (2) the authorship of the software shall # be acknowledged in any publication that # uses results generated by the software # (3) this notice shall remain in place # in each file. # ////////////////////////////////////////////// */ #ifdef DEBUG #define DBG(X,Y) Rprintf("%s: %f\n", (X), (Y)); #else #define DBG(X,Y) #endif #include "yesno.h" #define ABS(X) ((X >= 0.0) ? (X) : -(X)) static double rcubed, spherevol; double sphevol(point, box, r) Point *point; Box *box; double r; { double sum, p[4], q[4]; double v1(), v2(), v3(); int i, j; rcubed = r * r * r; spherevol = (4.0/3.0) * PI * rcubed; p[1] = box->x0 - point->x; p[2] = box->y0 - point->y; p[3] = box->z0 - point->z; q[1] = box->x1 - point->x; q[2] = box->y1 - point->y; q[3] = box->z1 - point->z; sum = 0; for(i = 1; i <= 3; i++) { sum += v1(p[i], -1, r) + v1(q[i], 1, r); #ifdef DEBUG Rprintf("i = %d, v1 = %f, v1 = %f\n", i, v1(p[i], -1, r), v1(q[i], 1, r)); #endif } DBG("Past v1", sum) for(i = 1; i < 3; i++) for(j = i+1; j <= 3; j++) { sum -= v2(p[i], -1, p[j], -1, r) + v2(p[i], -1, q[j], 1, r) + v2(q[i], 1, p[j], -1, r) + v2(q[i], 1, q[j], 1, r); #ifdef DEBUG Rprintf("i = %d, j = %d, sum = %f\n", i, j, sum); #endif } DBG("Past v2", sum) sum += v3(p[1], -1, p[2], -1, p[3], -1, r) + v3(p[1], -1, p[2], -1, q[3], 1, r); DBG("sum", sum) sum += v3(p[1], -1, q[2], 1, p[3], -1, r) + v3(p[1], -1, q[2], 1, q[3], 1, r); DBG("sum", sum) sum += v3(q[1], 1, p[2], -1, p[3], -1, r) + v3(q[1], 1, p[2], -1, q[3], 1, r); DBG("sum", sum) sum += v3(q[1], 1, q[2], 1, p[3], -1, r) + v3(q[1], 1, q[2], 1, q[3], 1, r); DBG("Past v3", sum) DBG("sphere volume", spherevol) return(spherevol - sum); } double v1(a,s,r) double a, r; int s; { double value; double u(); short sign; value = 4.0 * rcubed * u(ABS(a)/r, 0.0, 0.0); sign = (a >= 0.0) ? 1 : -1; if(sign == s) return(value); else return(spherevol - value); } double v2(a, sa, b, sb, r) double a, b, r; int sa, sb; { short sign; double u(); sign = (b >= 0.0) ? 1 : -1; if(sign != sb ) return(v1(a, sa, r) - v2(a, sa, ABS(b), 1, r)); b = ABS(b); sb = 1; sign = (a >= 0.0) ? 1 : -1; if(sign != sa) return(v1(b, sb, r) - v2(ABS(a), 1, b, sb, r)); a = ABS(a); return(2.0 * rcubed * u(a/r, b/r, 0.0)); } double v3(a, sa, b, sb, c, sc, r) double a, b, c, r; int sa, sb, sc; { short sign; double u(); sign = (c >= 0.0) ? 1 : -1; if(sign != sc) return(v2(a,sa,b,sb,r) - v3(a,sa,b,sb, ABS(c), 1, r)); c = ABS(c); sc = 1; sign = (b >= 0.0) ? 1 : -1; if(sign != sb) return(v2(a,sa,c,sc,r) - v3(a,sa,ABS(b),1,c,sc,r)); b = ABS(b); sb = 1; sign = (a >= 0.0) ? 1 : -1; if(sign != sa) return(v2(b,sb, c, sc, r) - v3(ABS(a),1, b, sb, c, sc, r)); a = ABS(a); return(rcubed * u(a/r, b/r, c/r)); } double u(a, b, c) double a, b, c; { double w(); if(a * a + b * b + c * c >= 1.0) return(0.0); return( (PI/12.0) * (2.0 - 3.0 * (a + b + c) + (a * a * a + b * b * b + c * c * c)) + w(a,b) + w(b,c) + w(a,c) - a * b * c ); } double w(x,y) double x,y; /* Arguments assumed >= 0 */ { double z; z = sqrt(1 - x * x - y * y); return( (x / 2.0 - x * x * x / 6.0) * atan2(y, z) + (y / 2.0 - y * y * y / 6.0) * atan2(x, z) - ( atan2(x * y , z) - x * y * z )/3.0 ); } spatstat/src/looptest.h0000644000176200001440000000030213166361223014724 0ustar liggesusers/* looptest.h Utilities for looping $Revision: 1.1 $ $Date: 2014/09/19 00:47:34 $ */ /* a small value relative to threshold X, for loop exit test */ #define EPSILON(X) ((X)/64) spatstat/src/densptcross.c0000644000176200001440000002226313166361223015427 0ustar liggesusers#include #include #include "chunkloop.h" #include "crossloop.h" #include "constants.h" /* densptcross.c $Revision: 1.2 $ $Date: 2014/04/02 10:27:43 $ Assumes point patterns are sorted in increasing order of x coordinate *crdenspt Density estimate at points *crsmoopt Smoothed mark values at points */ #define TWOPI M_2PI double sqrt(), exp(); #define STD_DECLARATIONS \ int i, j, n1, n2, maxchunk, jleft; \ double x1i, y1i, xleft, dx, dy, d2, rmax, r2max; \ double *x1, *y1, *x2, *y2; #define STD_INITIALISE \ n1 = *nquery; \ x1 = xq; y1 = yq; \ n2 = *ndata; \ x2 = xd; y2 = yd; \ rmax = *rmaxi; \ r2max = rmax * rmax /* ----------------- density estimation -------------------- */ void crdenspt(nquery, xq, yq, ndata, xd, yd, rmaxi, sig, result) /* inputs */ int *nquery; /* number of locations to be interrogated */ double *xq, *yq; /* (x,y) coordinates to be interrogated */ int *ndata; /* number of data points */ double *xd, *yd; /* (x,y) coordinates of data */ double *rmaxi; /* maximum distance at which points contribute */ double *sig; /* Gaussian sd */ /* output */ double *result; /* vector of computed density values */ { STD_DECLARATIONS; double resulti, coef; double sigma, twosig2; STD_INITIALISE; sigma = *sig; twosig2 = 2.0 * sigma * sigma; coef = 1.0/(TWOPI * sigma * sigma); if(n1 == 0 || n2 == 0) return; CROSSLOOP( { resulti = 0.0; }, { resulti += exp(-d2/twosig2); } , { result[i] = coef * resulti; }) } void wtcrdenspt(nquery, xq, yq, ndata, xd, yd, wd, rmaxi, sig, result) /* inputs */ int *nquery; /* number of locations to be interrogated */ double *xq, *yq; /* (x,y) coordinates to be interrogated */ int *ndata; /* number of data points */ double *xd, *yd; /* (x,y) coordinates of data */ double *wd; /* weights of data points */ double *rmaxi; /* maximum distance at which points contribute */ double *sig; /* Gaussian sd */ /* output */ double *result; /* vector of computed density values */ { STD_DECLARATIONS; double resulti, coef; double sigma, twosig2; STD_INITIALISE; sigma = *sig; twosig2 = 2.0 * sigma * sigma; coef = 1.0/(TWOPI * sigma * sigma); if(n1 == 0 || n2 == 0) return; CROSSLOOP( { resulti = 0.0; }, { resulti += wd[j] * exp(-d2/twosig2); }, { result[i] = coef * resulti; } ) } /* ------------- anisotropic versions -------------------- */ void acrdenspt(nquery, xq, yq, ndata, xd, yd, rmaxi, detsigma, sinv, result) /* inputs */ int *nquery; /* number of locations to be interrogated */ double *xq, *yq; /* (x,y) coordinates to be interrogated */ int *ndata; /* number of data points */ double *xd, *yd; /* (x,y) coordinates of data */ double *rmaxi; /* maximum distance at which points contribute */ double *detsigma; /* determinant of variance matrix */ double *sinv; /* inverse variance matrix (2x2, flattened) */ /* output */ double *result; /* vector of computed density values */ { STD_DECLARATIONS; double resulti, coef; double detsig, s11, s12, s21, s22; STD_INITIALISE; detsig = *detsigma; coef = 1.0/(TWOPI * sqrt(detsig)); s11 = sinv[0]; s12 = sinv[1]; s21 = sinv[2]; s22 = sinv[3]; if(n1 == 0 || n2 == 0) return; CROSSLOOP( { resulti = 0.0; }, { resulti += exp(-(dx * (dx * s11 + dy * s12) \ + dy * (dx * s21 + dy * s22))/2.0); }, { result[i] = coef * resulti; }) } void awtcrdenspt(nquery, xq, yq, ndata, xd, yd, wd, rmaxi, detsigma, sinv, result) /* inputs */ int *nquery; /* number of locations to be interrogated */ double *xq, *yq; /* (x,y) coordinates to be interrogated */ int *ndata; /* number of data points */ double *xd, *yd; /* (x,y) coordinates of data */ double *wd; /* weights of data points */ double *rmaxi; /* maximum distance at which points contribute */ double *detsigma; /* determinant of variance matrix */ double *sinv; /* inverse variance matrix (2x2, flattened) */ /* output */ double *result; /* vector of weighted density values */ { STD_DECLARATIONS; double resulti, coef; double detsig, s11, s12, s21, s22; STD_INITIALISE; detsig = *detsigma; coef = 1.0/(TWOPI * sqrt(detsig)); s11 = sinv[0]; s12 = sinv[1]; s21 = sinv[2]; s22 = sinv[3]; if(n1 == 0 || n2 == 0) return; CROSSLOOP( { resulti = 0.0; }, { resulti += wd[j] * \ exp(-(dx * (dx * s11 + dy * s12) \ + dy * (dx * s21 + dy * s22))/2.0); }, { result[i] = coef * resulti; }) } /* --------------- smoothing --------------------------- */ void crsmoopt(nquery, xq, yq, ndata, xd, yd, vd, rmaxi, sig, result) /* inputs */ int *nquery; /* number of locations to be interrogated */ double *xq, *yq; /* (x,y) coordinates to be interrogated */ int *ndata; /* number of data points */ double *xd, *yd; /* (x,y) coordinates of data */ double *vd; /* mark values at data points */ double *rmaxi; /* maximum distance at which points contribute */ double *sig; /* Gaussian sd */ /* output */ double *result; /* vector of computed smoothed values */ { STD_DECLARATIONS; double sigma, twosig2; double numer, denom, wij; STD_INITIALISE; sigma = *sig; twosig2 = 2.0 * sigma * sigma; if(n1 == 0 || n2 == 0) return; CROSSLOOP({ numer = denom = 0.0; }, { \ wij = exp(-d2/twosig2); \ denom += wij; \ numer += wij * vd[j]; \ }, { \ result[i] = numer/denom; \ }) } void wtcrsmoopt(nquery, xq, yq, ndata, xd, yd, vd, wd, rmaxi, sig, result) /* inputs */ int *nquery; /* number of locations to be interrogated */ double *xq, *yq; /* (x,y) coordinates to be interrogated */ int *ndata; /* number of data points */ double *xd, *yd; /* (x,y) coordinates of data */ double *vd; /* mark values at data points */ double *wd; /* weights of data points */ double *rmaxi; /* maximum distance */ double *sig; /* Gaussian sd */ /* output */ double *result; /* vector of computed smoothed values */ { STD_DECLARATIONS; double sigma, twosig2; double numer, denom, wij; STD_INITIALISE; sigma = *sig; twosig2 = 2.0 * sigma * sigma; if(n1 == 0 || n2 == 0) return; CROSSLOOP({ numer = denom = 0.0; }, { \ wij = wd[j] * exp(-d2/twosig2); \ denom += wij; \ numer += wij * vd[j]; \ }, { \ result[i] = numer/denom; \ }) } /* ------------- anisotropic versions -------------------- */ void acrsmoopt(nquery, xq, yq, ndata, xd, yd, vd, rmaxi, sinv, result) /* inputs */ int *nquery; /* number of locations to be interrogated */ double *xq, *yq; /* (x,y) coordinates to be interrogated */ int *ndata; /* number of data points */ double *xd, *yd; /* (x,y) coordinates of data */ double *vd; /* mark values at data points */ double *rmaxi; /* maximum distance at which points contribute */ double *sinv; /* inverse variance matrix (2x2, flattened) */ /* output */ double *result; /* vector of smoothed values */ { STD_DECLARATIONS; double s11, s12, s21, s22; double numer, denom, wij; STD_INITIALISE; s11 = sinv[0]; s12 = sinv[1]; s21 = sinv[2]; s22 = sinv[3]; if(n1 == 0 || n2 == 0) return; CROSSLOOP({ numer = denom = 0.0; }, { \ wij = exp(-(dx * (dx * s11 + dy * s12) \ + dy * (dx * s21 + dy * s22))/2.0); \ denom += wij; \ numer += wij * vd[j]; \ }, { \ result[i] = numer/denom; \ }) } void awtcrsmoopt(nquery, xq, yq, ndata, xd, yd, vd, wd, rmaxi, sinv, result) /* inputs */ int *nquery; /* number of locations to be interrogated */ double *xq, *yq; /* (x,y) coordinates to be interrogated */ int *ndata; /* number of data points */ double *xd, *yd; /* (x,y) coordinates of data */ double *vd; /* mark values at data points */ double *wd; /* weights of data points */ double *rmaxi; /* maximum distance at which points contribute */ double *sinv; /* inverse variance matrix (2x2, flattened) */ /* output */ double *result; /* vector of smoothed values */ { STD_DECLARATIONS; double s11, s12, s21, s22; double numer, denom, wij; STD_INITIALISE; s11 = sinv[0]; s12 = sinv[1]; s21 = sinv[2]; s22 = sinv[3]; if(n1 == 0 || n2 == 0) return; CROSSLOOP({ numer = denom = 0.0; }, { \ wij = wd[j] * exp(-(dx * (dx * s11 + dy * s12) \ + dy * (dx * s21 + dy * s22))/2.0); \ denom += wij; \ numer += wij * vd[j]; \ }, { \ result[i] = numer/denom; \ }) } spatstat/src/loccums.h0000644000176200001440000000400113166361223014520 0ustar liggesusers/* loccums.h C template for loccum.c data-to-data functions $Revision: 1.5 $ $Date: 2013/09/18 04:28:45 $ macros: FNAME function name NULVAL initial value (empty sum = 0, empty product = 1) INC(A,B) increment operation A += B or A *= B */ void FNAME(n, x, y, v, nr, rmax, ans) /* inputs */ int *n, *nr; double *x, *y, *v; double *rmax; /* output */ double *ans; /* matrix of column vectors of functions for each point */ { int N, Nr, Nans; double Rmax; int i, j, k, kmin, maxchunk, columnstart; double Rmax2, rstep, xi, yi; double dx, dy, dx2, d2, d, contrib; N = *n; Nr = *nr; Rmax = *rmax; if(N == 0) return; rstep = Rmax/(Nr-1); Rmax2 = Rmax * Rmax; Nans = Nr * N; /* initialise products to 1 */ OUTERCHUNKLOOP(k, Nans, maxchunk, 8196) { R_CheckUserInterrupt(); INNERCHUNKLOOP(k, Nans, maxchunk, 8196) { ans[k] = NULVAL; } } OUTERCHUNKLOOP(i, N, maxchunk, 8196) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, N, maxchunk, 8196) { xi = x[i]; yi = y[i]; columnstart = Nr * i; /* start position for f_i(.) in 'ans' */ /* process backward until |dx| > Rmax */ if(i > 0) { for(j=i-1; j >= 0; j--) { dx = x[j] - xi; dx2 = dx * dx; if(dx2 > Rmax2) break; dy = y[j] - yi; d2 = dx2 + dy * dy; if(d2 <= Rmax2) { d = sqrt(d2); kmin = (int) ceil(d/rstep); if(kmin < Nr) { contrib = v[j]; for(k = kmin; k < Nr; k++) INC(ans[columnstart + k] , contrib); } } } } /* process forward until |dx| > Rmax */ if(i < N - 1) { for(j=i+1; j < N; j++) { dx = x[j] - xi; dx2 = dx * dx; if(dx2 > Rmax2) break; dy = y[j] - yi; d2 = dx2 + dy * dy; if(d2 <= Rmax2) { d = sqrt(d2); kmin = (int) ceil(d/rstep); if(kmin < Nr) { contrib = v[j]; for(k = kmin; k < Nr; k++) INC(ans[columnstart + k] , contrib); } } } } } } } spatstat/src/methas.h0000755000176200001440000000712313166361223014347 0ustar liggesusers/* Definitions of types and data structures for Metropolis-Hastings State Current state of point pattern Model Model parameters passed from R Cdata (pointer to) model parameters and precomputed data in C Algor Algorithm parameters (p, q, nrep etc) Propo Proposal in Metropolis-Hastings algorithm History Transition history of MH algorithm Cifns Set of functions for computing the conditional intensity for a point process model. This consists of three functions init(State, Model, Algor) .... initialises auxiliary data eval(State, Propo) ........... evaluates cif update(State,Propo) .......... updates auxiliary data */ /* Current state of point pattern */ typedef struct State { double *x; /* vectors of Cartesian coordinates */ double *y; int *marks; /* vector of mark values */ int npts; /* current number of points */ int npmax; /* storage limit */ int ismarked; /* whether the pattern is marked */ } State; /* Parameters of model passed from R */ typedef struct Model { double *beta; /* vector of activity parameters */ double *ipar; /* vector of interaction parameters */ double *period; /* width & height of rectangle, if torus */ int ntypes; /* number of possible marks */ } Model; /* A pointer to Cdata is a pointer to C storage for parameters of model */ typedef void Cdata; /* RMH Algorithm parameters */ typedef struct Algor { double p; /* probability of proposing shift */ double q; /* conditional probability of proposing death */ int fixall; /* if TRUE, only shifts of location are feasible */ int ncond; /* For conditional simulation, the first 'ncond' points are fixed */ int nrep; /* number of iterations */ int nverb; /* print report every 'nverb' iterations */ int nrep0; /* number of iterations already performed in previous blocks - for reporting purposes */ int tempered; /* TRUE if tempering is applied */ double invtemp; /* inverse temperature if tempering is applied */ } Algor; /* Metropolis-Hastings proposal */ typedef struct Propo { double u; /* location of point of interest */ double v; int mrk; /* mark of point of interest */ int ix; /* index of point of interest, if already in pattern */ int itype; /* transition type */ } Propo; /* transition codes 'itype' */ #define REJECT 0 #define BIRTH 1 #define DEATH 2 #define SHIFT 3 #define HISTORY_INCLUDES_RATIO /* Record of transition history */ typedef struct History { int nmax; /* length of vectors */ int n; /* number of events recorded */ int *proptype; /* vector: proposal type */ int *accepted; /* vector: 0 for reject, 1 for accept */ #ifdef HISTORY_INCLUDES_RATIO double *numerator; /* vectors: Hastings ratio numerator & denominator */ double *denominator; #endif } History; /* conditional intensity functions */ typedef Cdata * (*initfunptr)(State state, Model model, Algor algo); typedef double (*evalfunptr)(Propo prop, State state, Cdata *cdata); typedef void (*updafunptr)(State state, Propo prop, Cdata *cdata); typedef struct Cifns { initfunptr init; evalfunptr eval; updafunptr update; int marked; } Cifns; #define NEED_UPDATE(X) ((X).update != (updafunptr) NULL) #define NULL_CIFNS { (initfunptr) NULL, (evalfunptr) NULL, (updafunptr) NULL, NO} /* miscellaneous macros */ #include "yesno.h" # define MAT(X,I,J,M) (X[(I)+(J)*(M)]) spatstat/src/fardist.h0000644000176200001440000000254113166361223014516 0ustar liggesusers/* fardist.h Code template for fardist.c Macros used: FNAME function name SQUARED #defined if squared distances should be returned. Copyright (C) Adrian Baddeley, Rolf Turner and Ege Rubak 2014 Licence: GPL >= 2 $Revision: 1.3 $ $Date: 2014/08/31 06:42:50 $ */ void FNAME(nx, x0, xstep, ny, y0, ystep, /* pixel grid dimensions */ np, xp, yp, /* data points */ dfar) /* output grid */ /* inputs */ int *nx, *ny, *np; double *x0, *xstep, *y0, *ystep; double *xp, *yp; /* outputs */ double *dfar; { int Nxcol, Nyrow, Npoints; int i, j, k, ijpos; double X0, Y0, Xstep, Ystep, yi, xj; double d2, d2max, dx, dy; Nxcol = *nx; Nyrow = *ny; Npoints = *np; X0 = *x0; Y0 = *y0; Xstep = *xstep; Ystep = *ystep; if(Npoints == 0) return; /* loop over pixels */ for(j = 0, xj = X0; j < Nxcol; j++, xj += Xstep) { R_CheckUserInterrupt(); for(i = 0, yi = Y0; i < Nyrow; i++, yi += Ystep) { d2max = 0.0; for(k = 0; k < Npoints; k++) { dx = xj - xp[k]; dy = yi - yp[k]; d2 = dx * dx + dy * dy; if(d2 > d2max) d2max = d2; } ijpos = i + j * Nyrow; #ifdef SQUARED dfar[ijpos] = d2max; #else dfar[ijpos] = sqrt(d2max); #endif /* end of loop over grid points (i, j) */ } } } spatstat/src/KrectBody.h0000644000176200001440000001042513166361223014750 0ustar liggesusers /* KrectBody.h +++ Copyright (C) Adrian Baddeley, Julian Gilbey and Rolf Turner 2014 ++++ Main function body for 'Krect' Included multiple times with different values of the macros: (#define or #undef) WEIGHTED ISOTROPIC TRANSLATION BORDER UNCORRECTED **Assumes point pattern is sorted in increasing order of x coordinate** **Assumes window is (0,wide) x (0, high) ** **Assumes output vectors were initialised to zero** Variables are declared in 'KrectFunDec.c' This algorithm is optimal (amongst the choices in spatstat) when the window is a rectangle *and* at least one of the ISOTROPIC, TRANSLATION corrections is needed. There are faster algorithms for the border correction on its own. $Revision: 1.3 $ $Date: 2014/02/09 03:01:27 $ */ /* loop in chunks of 2^16 */ i = 0; maxchunk = 0; while(i < N) { R_CheckUserInterrupt(); maxchunk += 65536; if(maxchunk > N) maxchunk = N; /* ............. LOOP OVER i ................. */ for(; i < maxchunk; i++) { xi = x[i]; yi = y[i]; #ifdef WEIGHTED wi = w[i]; #endif #ifdef BORDER /* For border correction */ /* compute distance to border */ bx = MIN(xi, (wide - xi)); by = MIN(yi, (high - yi)); bdisti = MIN(bx, by); /* denominator will ultimately be incremented for all r < b[i] */ bratio = bdisti/rstep; /* lbord is the largest integer STRICTLY less than bratio */ lbord = (int) ceil(bratio) - 1; lbord = (lbord <= Nr1) ? lbord : Nr1; /* increment entry corresponding to r = b[i] */ #ifdef WEIGHTED if(lbord >= 0) denomAccum[lbord] += wi; #else if(lbord >= 0) (denomAccum[lbord])++; #endif #endif #ifdef ISOTROPIC /* For isotropic correction */ /* perpendicular distance from point i to each edge of rectangle L = left, R = right, D = down, U = up */ dL = xi; dR = wide - xi; dD = yi; dU = high - yi; /* test for corner of the rectangle */ ncor = SMALL(dL) + SMALL(dR) + SMALL(dD) + SMALL(dU); corner = (ncor >= 2); /* angle between - perpendicular to edge of rectangle and - line from point to corner of rectangle */ bLU = atan2(dU, dL); bLD = atan2(dD, dL); bRU = atan2(dU, dR); bRD = atan2(dD, dR); bUL = atan2(dL, dU); bUR = atan2(dR, dU); bDL = atan2(dL, dD); bDR = atan2(dR, dD); #endif /* ............. LOOP OVER j ................. */ /* scan through points (x[j],y[j]) */ /* scan backward from i-1 until |x[j]-x[i]| > Rmax */ if(i > 0) { for(j=i-1; j >= 0; j--) { /* squared interpoint distance */ dx = xi - x[j]; dx2 = dx * dx; if(dx2 >= R2max) break; dy = y[j] - yi; dij2 = dx2 + dy * dy; if(dij2 < R2max) { #include "KrectIncrem.h" } } } /* scan forward from i+1 until x[j]-x[i] > Rmax */ if(i < N1) { for(j=i+1; j < N; j++) { /* squared interpoint distance */ dx = x[j] - xi; dx2 = dx * dx; if(dx2 >= R2max) break; dy = y[j] - yi; dij2 = dx2 + dy * dy; if(dij2 < R2max) { #include "KrectIncrem.h" } } } } } /* .................. END OF LOOPS ................................ */ /* ............. compute cumulative functions ..................... */ #ifdef UNCORRECTED naccum = ZERO; for(l = 0; l < Nr; l++) { unco[l] += naccum; naccum = unco[l]; } #endif #ifdef ISOTROPIC accum = 0.0; for(l = 0; l < Nr; l++) { iso[l] += accum; accum = iso[l]; } #endif #ifdef TRANSLATION accum = 0.0; for(l = 0; l < Nr; l++) { trans[l] += accum; accum = trans[l]; } #endif #ifdef BORDER /* Now use the accumulated values to compute the numerator and denominator. The value of denomAccum[l] should be added to denom[k] for all k <= l. numerHighAccum[l] should be added to numer[k] for all k <=l numerLowAccum[l] should then be subtracted from numer[k] for k <= l. */ for(l=Nr1, naccum=daccum=ZERO; l>=0; l--) { daccum += denomAccum[l]; bdenom[l] = daccum; naccum += numerHighAccum[l]; bnumer[l] = naccum; naccum -= numerLowAccum[l]; } #endif spatstat/src/dist2dpath.h0000644000176200001440000000765513166361223015143 0ustar liggesusers/* Function body for dist2dpath.c Macros used: FNAME function name DTYPE declaration for distance values ('double' or 'int') FLOATY (DTYPE == 'double') $Revision: 1.3 $ $Date: 2013/05/27 02:09:10 $ */ #undef DEBUG #define MATRIX(X,I,J) (X)[(J) + n * (I)] #define D(I,J) MATRIX(d, I, J) #define DPATH(I,J) MATRIX(dpath, I, J) #define ADJ(I,J) (MATRIX(adj, I, J) != 0) #define INFIN -1 #define FINITE(X) ((X) >= 0) void FNAME(nv, d, adj, dpath, tol, niter, status) int *nv; /* number of vertices */ DTYPE *d; /* matrix of edge lengths */ int *adj; /* 0/1 edge matrix of graph */ DTYPE *tol; /* tolerance threshold (ignored in integer case) */ DTYPE *dpath; /* output - shortest path distance matrix */ int *niter, *status; /* status = 0 for convergence */ { int i, j, k, n, iter, maxiter, changed; DTYPE dij, dik, dkj, dikj; #ifdef FLOATY DTYPE eps, diff, maxdiff; #endif int totaledges, starti, nneighi, increm, pos; int *start, *nneigh, *indx; n = *nv; #ifdef FLOATY eps = *tol; #endif /* initialise and count edges */ *status = -1; totaledges = 0; for(i = 0; i < n; i++) { for(j = 0; j < n; j++) { DPATH(i, j) = (i == j) ? 0 : ((ADJ(i,j)) ? D(i, j) : INFIN); if((i != j) && ADJ(i,j)) ++totaledges; } } maxiter = 2 + ((totaledges > n) ? totaledges : n); /* store indices j for each edge (i,j) */ indx = (int *) R_alloc(totaledges, sizeof(int)); nneigh = (int *) R_alloc(n, sizeof(int)); start = (int *) R_alloc(n, sizeof(int)); pos = 0; for(i = 0; i < n; i++) { nneigh[i] = 0; start[i] = pos; #ifdef DEBUG Rprintf("Neighbours of %d:\n", i); #endif for(j = 0; j < n; j++) { if((i != j) && ADJ(i,j) && FINITE(D(i,j))) { #ifdef DEBUG Rprintf("\t%d\n", j); #endif ++(nneigh[i]); if(pos > totaledges) error("internal error: pos exceeded storage"); indx[pos] = j; ++pos; } } } /* run */ for(iter = 0; iter < maxiter; iter++) { changed = 0; #ifdef FLOATY maxdiff = 0; #endif #ifdef DEBUG Rprintf("--------- iteration %d ---------------\n", iter); #endif for(i = 0; i < n; i++) { R_CheckUserInterrupt(); nneighi = nneigh[i]; if(nneighi > 0) { /* run through neighbours k of i */ starti = start[i]; for(increm = 0, pos=starti; increm < nneighi; ++increm, ++pos) { k = indx[pos]; dik = DPATH(i,k); #ifdef DEBUG #ifdef FLOATY Rprintf("i=%d k=%d dik=%lf\n", i, k, dik); #else Rprintf("i=%d k=%d dik=%d\n", i, k, dik); #endif #endif /* now run through all other vertices j */ for(j = 0; j < n; j++) { if(j != i && j != k) { dij = DPATH(i,j); dkj = DPATH(k,j); if(FINITE(dkj)) { dikj = dik + dkj; #ifdef DEBUG #ifdef FLOATY Rprintf("considering %d -> (%d) -> %d,\t dij=%lf, dikj=%lf\n", i, k, j, dij, dikj); #else Rprintf("considering %d -> (%d) -> %d,\t dij=%d, dikj=%d\n", i, k, j, dij, dikj); #endif #endif if(!FINITE(dij) || dikj < dij) { #ifdef DEBUG #ifdef FLOATY Rprintf("updating i=%d j=%d via k=%d from %lf to %lf\n", i, j, k, dij, dikj); #else Rprintf("updating i=%d j=%d via k=%d from %d to %d\n", i, j, k, dij, dikj); #endif #endif DPATH(i,j) = DPATH(j,i) = dikj; changed = 1; #ifdef FLOATY diff = (FINITE(dij)) ? dij - dikj : dikj; if(diff > maxdiff) maxdiff = diff; #endif } } } } } } } if(changed == 0) { /* algorithm converged */ #ifdef DEBUG Rprintf("Algorithm converged\n"); #endif *status = 0; break; #ifdef FLOATY } else if(FINITE(maxdiff) && maxdiff < eps) { /* tolerance reached */ #ifdef DEBUG Rprintf("Algorithm terminated with maxdiff=%lf\n", maxdiff); #endif *status = 1; break; #endif } } #ifdef DEBUG Rprintf("Returning after %d iterations on %d vertices\n", iter, n); #endif *niter = iter; } #undef DEBUG #undef MATRIX #undef D #undef DPATH #undef ADJ #undef INFIN #undef FINITE spatstat/src/hasclose.h0000644000176200001440000001604613166361223014670 0ustar liggesusers/* hasclose.h Function definitions to be #included in hasclose.c several times with different values of macros. Macros used: CLOSEFUN name of function for pairs in a single pattern CROSSFUN name of function for pairs between two patterns ZCOORD if defined, coordinates are 3-dimensional TORUS if defined, distances are periodic BUG debugger flag $Revision: 1.10 $ $Date: 2017/06/05 10:53:59 $ */ void CLOSEFUN(n, x, y, #ifdef ZCOORD z, #endif r, /* distance deemed 'close' */ #ifdef TORUS b, /* box dimensions */ #endif t) /* result: true/false */ int *n, *t; double *x, *y, *r; #ifdef ZCOORD double *z; #endif #ifdef TORUS double *b; #endif { double xi, yi, rmax, r2max, rmaxplus, dx, dy, d2minr2; #ifdef ZCOORD double zi, dz; #endif int N, maxchunk, i, j; #ifdef TORUS double Bx, By, Hy; #ifdef ZCOORD double Bz, Hz; #endif #endif N = *n; rmax = *r; r2max = rmax * rmax; rmaxplus = rmax + rmax/16.0; #ifdef TORUS Bx = b[0]; By = b[1]; Hy = By/2.0; #ifdef ZCOORD Bz = b[2]; Hz = Bz/2.0; #endif #endif /* loop in chunks of 2^16 */ i = 0; maxchunk = 0; while(i < N) { R_CheckUserInterrupt(); maxchunk += 65536; if(maxchunk > N) maxchunk = N; for(; i < maxchunk; i++) { xi = x[i]; yi = y[i]; #ifdef ZCOORD zi = z[i]; #endif if(i > 0) { /* scan backward from i */ for(j = i - 1; j >= 0; j--) { dx = xi - x[j]; if(dx > rmaxplus) break; dy = y[j] - yi; #ifdef TORUS if(dy < 0.0) dy = -dy; if(dy > Hy) dy = By - dy; #endif d2minr2 = dx * dx + dy * dy - r2max; #ifdef ZCOORD if(d2minr2 <= 0.0) { dz = z[j] - zi; #ifdef TORUS if(dz < 0.0) dz = -dz; if(dz > Hz) dz = Bz - dz; #endif d2minr2 = d2minr2 + dz * dz; #endif if(d2minr2 <= 0.0) { /* pair (i, j) is close */ t[i] = t[j] = 1; } #ifdef ZCOORD } #endif } #ifdef TORUS /* wrap-around */ /* scan forward from 0 */ for(j = 0; j < i; j++) { dx = Bx + x[j] - xi; if(dx > rmaxplus) break; dy = y[j] - yi; #ifdef TORUS if(dy < 0.0) dy = -dy; if(dy > Hy) dy = By - dy; #endif d2minr2 = dx * dx + dy * dy - r2max; #ifdef ZCOORD if(d2minr2 <= 0.0) { dz = z[j] - zi; #ifdef TORUS if(dz < 0.0) dz = -dz; if(dz > Hz) dz = Bz - dz; #endif d2minr2 = d2minr2 + dz * dz; #endif if(d2minr2 <= 0.0) { /* pair (i, j) is close */ t[i] = t[j] = 1; } #ifdef ZCOORD } #endif } #endif } } } } /* ........................................................ */ void CROSSFUN(n1, x1, y1, #ifdef ZCOORD z1, #endif n2, x2, y2, #ifdef ZCOORD z2, #endif r, #ifdef TORUS b, /* box dimensions (same for both patterns!!) */ #endif t) int *n1, *n2, *t; double *x1, *y1, *x2, *y2, *r; #ifdef ZCOORD double *z1, *z2; #endif #ifdef TORUS double *b; #endif { /* lengths */ int N1, N2, maxchunk; /* distance parameter */ double rmax, r2max, rmaxplus; /* indices */ int i, j, jleft; /* temporary values */ double x1i, y1i, xleft, dx, dy, dx2, d2minr2; #ifdef ZCOORD double z1i, dz; #endif #ifdef TORUS double Bx, By, Hx, Hy; int jright; #ifdef ZCOORD double Bz, Hz; #endif #endif N1 = *n1; N2 = *n2; rmax = *r; r2max = rmax * rmax; rmaxplus = rmax + rmax/16.0; #ifdef TORUS Bx = b[0]; By = b[1]; Hx = Bx/2.0; Hy = By/2.0; #ifdef BUG Rprintf("=> PERIODIC: Bx = %lf, By = %lf <= \n", Bx, By); #endif #ifdef ZCOORD Bz = b[2]; Hz = Bz/2.0; #endif #endif if(N1 > 0 && N2 > 0) { i = 0; maxchunk = 0; jleft = 0; while(i < N1) { R_CheckUserInterrupt(); maxchunk += 65536; if(maxchunk > N1) maxchunk = N1; for( ; i < maxchunk; i++) { x1i = x1[i]; y1i = y1[i]; #ifdef ZCOORD z1i = z1[i]; #endif #ifdef BUG Rprintf("------ i = %d --------\n", i); Rprintf(" [%d] = (%lf, %lf)\n", i, x1i, y1i); #endif /* adjust starting point jleft */ xleft = x1i - rmaxplus; while((x2[jleft] < xleft) && (jleft+1 < N2)) ++jleft; #ifdef BUG Rprintf("\t jleft = %d\n", jleft); #endif /* process from j = jleft until dx > rmax + epsilon */ for(j=jleft; j < N2; j++) { dx = x2[j] - x1i; #ifdef BUG Rprintf("\t Central loop, j = %d, dx = %lf\n", j, dx); #endif if(dx > rmaxplus) break; dx2 = dx * dx; dy = y2[j] - y1i; #ifdef BUG Rprintf("\t\t Did not break\n\t\t dy = %lf\n", dy); #endif #ifdef TORUS if(dy < 0.0) dy = -dy; if(dy > Hy) dy = By - dy; #ifdef BUG Rprintf("\t\t periodic dy = %lf\n", dy); #endif #endif d2minr2 = dx2 + dy * dy - r2max; #ifdef ZCOORD if(d2minr2 <= 0.0) { dz = z2[j] - z1i; #ifdef TORUS if(dz < 0.0) dz = -dz; if(dz > Hz) dz = Bz - dz; #endif d2minr2 = d2minr2 + dz * dz; #endif if(d2minr2 <= 0.0) { #ifdef BUG Rprintf("\t\t Point %d has close neighbour\n", i); #endif /* point i has a close neighbour */ t[i] = 1; break; } #ifdef ZCOORD } #endif } #ifdef TORUS jright = j; /* wrap-around at start */ #ifdef BUG Rprintf("\t Wrap around at start for j = 0 to %d\n", jleft); #endif for(j=0; j < jleft; j++) { dx = x1i - x2[j]; #ifdef BUG Rprintf("\t\t j = %d, dx = %lf\n", j, dx); #endif if(dx < 0.0) dx = -dx; if(dx > Hx) dx = Bx - dx; #ifdef BUG Rprintf("\t\t periodic dx = %lf\n", dx); #endif if(dx > rmaxplus) break; dx2 = dx * dx; dy = y2[j] - y1i; #ifdef BUG Rprintf("\t\t Did not break\n\t\t dy = %lf\n", dy); #endif if(dy < 0.0) dy = -dy; if(dy > Hy) dy = By - dy; #ifdef BUG Rprintf("\t\t periodic dy = %lf\n", dy); #endif d2minr2 = dx2 + dy * dy - r2max; #ifdef ZCOORD if(d2minr2 <= 0.0) { dz = z2[j] - z1i; if(dz < 0.0) dz = -dz; if(dz > Hz) dz = Bz - dz; d2minr2 = d2minr2 + dz * dz; #endif if(d2minr2 <= 0.0) { /* point i has a close neighbour */ #ifdef BUG Rprintf("\t\t Point %d has close neighbour\n", i); #endif t[i] = 1; break; } #ifdef ZCOORD } #endif } /* wrap around at end */ #ifdef BUG Rprintf("\t Wrap around at end for j = %d to %d\n", N2-1, jright); #endif for(j=N2-1; j >= jright; j--) { dx = x1i - x2[j]; #ifdef BUG Rprintf("\t\t j = %d, dx = %lf\n", j, dx); #endif if(dx < 0.0) dx = -dx; if(dx > Hx) dx = Bx - dx; #ifdef BUG Rprintf("\t\t periodic dx = %lf\n", dx); #endif if(dx > rmaxplus) break; dx2 = dx * dx; dy = y2[j] - y1i; #ifdef BUG Rprintf("\t\t Did not break\n\t\t dy = %lf\n", dy); #endif if(dy < 0.0) dy = -dy; if(dy > Hy) dy = By - dy; #ifdef BUG Rprintf("\t\t periodic dy = %lf\n", dy); #endif d2minr2 = dx2 + dy * dy - r2max; #ifdef ZCOORD if(d2minr2 <= 0.0) { dz = z2[j] - z1i; if(dz < 0.0) dz = -dz; if(dz > Hz) dz = Bz - dz; d2minr2 = d2minr2 + dz * dz; #endif if(d2minr2 <= 0.0) { #ifdef BUG Rprintf("\t\t Point %d has close neighbour\n", i); #endif /* point i has a close neighbour */ t[i] = 1; break; } #ifdef ZCOORD } #endif } #endif } } } } spatstat/src/pairloop.h0000644000176200001440000000327613166361223014715 0ustar liggesusers/* pairloop.h Generic code template for loop collecting contributions to point x_i from all points x_j such that ||x_i - x_j|| <= r cpp variables used: INITIAL_I code executed at start of 'i' loop CONTRIBUTE_IJ code executed to compute contribution from j to i COMMIT_I code executed to save total contribution to i C variables used: int i, j, n, maxchunk; double xi, yi, dx, dy, dx2, d2, r2max; double *x, *y; $Revision: 1.4 $ $Date: 2016/07/08 03:37:11 $ */ #ifndef CHUNKLOOP_H #include "chunkloop.h" #endif #define PAIRLOOP(INITIAL_I, CONTRIBUTE_IJ, COMMIT_I) \ OUTERCHUNKLOOP(i, n, maxchunk, 65536) { \ R_CheckUserInterrupt(); \ INNERCHUNKLOOP(i, n, maxchunk, 65536) { \ \ xi = x[i]; \ yi = y[i]; \ \ INITIAL_I; \ \ if(i > 0) { \ for(j=i-1; j >= 0; j--) { \ dx = x[j] - xi; \ dx2 = dx * dx; \ if(dx2 > r2max) \ break; \ dy = y[j] - yi; \ d2 = dx2 + dy * dy; \ if(d2 <= r2max) { \ CONTRIBUTE_IJ; \ } \ } \ } \ \ if(i+1 < n) { \ for(j=i+1; j < n; j++) { \ dx = x[j] - xi; \ dx2 = dx * dx; \ if(dx2 > r2max) \ break; \ dy = y[j] - yi; \ d2 = dx2 + dy * dy; \ if(d2 <= r2max) { \ CONTRIBUTE_IJ; \ } \ } \ } \ COMMIT_I; \ } \ } spatstat/src/linequad.c0000644000176200001440000000105413166361223014655 0ustar liggesusers#include #include #include "yesno.h" /* linequad.c make a quadrature scheme on a linear network Clinequad unmarked pattern ClineMquad multitype pattern $Revision: 1.5 $ $Date: 2016/10/03 08:43:57 $ */ #define SWAP(X,Y,TMP) TMP = Y; Y = X; X = TMP #undef HUH #define FUNNAME Clinequad #define FMKNAME ClineMquad #undef ALEA #include "linequad.h" #undef FUNNAME #undef FMKNAME #define FUNNAME ClineRquad #define FMKNAME ClineRMquad #define ALEA #include "linequad.h" #undef FUNNAME #undef FMKNAME #undef ALEA spatstat/src/seg2pix.h0000644000176200001440000001005313166361223014440 0ustar liggesusers/* seg2pix.h Code template for seg2pix.c $Revision: 1.2 $ $Date: 2015/01/08 10:57:20 $ Macros: FNAME name of function SUMUP #defined if crossings should be counted (weights summed) V matrix index macro (in seg2pix.c) DEBUG debug if #defined */ #undef INCREMENT #undef ZERO #ifdef SUMUP #define ZERO (double) 0.0 #define INCREMENT(I,J) V(I,J) += wi #else #define ZERO 0 #define INCREMENT(I,J) V(I,J) = 1 #endif void FNAME(ns,x0,y0,x1,y1, #ifdef SUMUP w, #endif nx,ny,out) int *ns; /* number of segments */ double *x0,*y0,*x1,*y1; /* coordinates of segment endpoints */ int *nx, *ny; /* dimensions of pixel array (columns, rows) */ #ifdef SUMUP double *w; /* weights attached to segments */ double *out; /* output totals */ #else int *out; /* output indicators */ #endif { int Ns, Nx, Ny, i, j, k, m, m0, m1, mmin, mmax, maxchunk; double x0i, x1i, y0i, y1i, dx, dy; double leni; double xleft, yleft, xright, yright, slope; double xstart, ystart, xfinish, yfinish; int mleft, mright, kstart, kfinish, kmin, kmax; #ifdef SUMUP double wi; #endif Ns = *ns; Nx = *nx; Ny = *ny; for(k = 0; k < Ny - 1; k++) for(j = 0; j < Nx - 1; j++) V(k, j) = ZERO; OUTERCHUNKLOOP(i, Ns, maxchunk, 8196) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, Ns, maxchunk, 8196) { x0i = x0[i]; y0i = y0[i]; x1i = x1[i]; y1i = y1[i]; #ifdef SUMUP wi = w[i]; #endif dx = x1i - x0i; dy = y1i - y0i; leni = hypot(dx, dy); #ifdef DEBUG Rprintf("(%lf, %lf) to (%lf, %lf)\n", x0i, y0i, x1i, y1i); #endif if(leni < 0.001) { /* tiny segment */ #ifdef DEBUG Rprintf("tiny\n"); #endif k = clamp((int) floor(x0i), 0, Nx-1); j = clamp((int) floor(y0i), 0, Ny-1); INCREMENT(j, k); } else if(floor(x1i) == floor(x0i) && floor(y1i) == floor(y0i)) { /* contained in one cell */ #ifdef DEBUG Rprintf("contained in one cell\n"); #endif k = clamp((int) floor(x0i), 0, Nx-1); j = clamp((int) floor(y0i), 0, Ny-1); INCREMENT(j, k); } else if(floor(y1i) == floor(y0i)) { /* horizontal */ #ifdef DEBUG Rprintf("horizontal\n"); #endif j = clamp((int) floor(y1i), 0, Ny-1); m0 = clamp((int) floor(x0i), 0, Nx-1); m1 = clamp((int) floor(x1i), 0, Nx-1); mmin = (m0 < m1) ? m0: m1; mmax = (m0 < m1) ? m1: m0; #ifdef DEBUG Rprintf("row %d: columns [%d, %d]\n", j, mmin, mmax); #endif for(k = mmin; k <= mmax; k++) INCREMENT(j,k); } else if(floor(x1i) == floor(x0i)) { /* vertical */ #ifdef DEBUG Rprintf("vertical\n"); #endif k = clamp((int) floor(x1i), 0, Nx-1); m0 = clamp((int) floor(y0i), 0, Ny-1); m1 = clamp((int) floor(y1i), 0, Ny-1); mmin = (m0 < m1) ? m0: m1; mmax = (m0 < m1) ? m1: m0; #ifdef DEBUG Rprintf("column %d: rows [%d, %d]\n", k, mmin, mmax); #endif for(j = mmin; j <= mmax; j++) INCREMENT(j,k); } else { /* general case */ #ifdef DEBUG Rprintf("general\n"); #endif if(x1i > x0i) { xleft = x0i; yleft = y0i; xright = x1i; yright = y1i; } else { xleft = x1i; yleft = y1i; xright = x0i; yright = y0i; } slope = (yright - yleft)/(xright - xleft); mleft = clamp((int) floor(xleft), 0, Nx-1); mright = clamp((int) floor(xright), 0, Nx-1); #ifdef DEBUG Rprintf("column range [%d, %d]\n", mleft, mright); #endif /* treat each vertical slice */ for(m = mleft; m <= mright; m++) { if(m == mleft) { xstart = xleft; ystart = yleft; } else { xstart = m; ystart = yleft + slope * (xstart - xleft); } if(m == mright) { xfinish = xright; yfinish = yright; } else { xfinish = m+1; yfinish = yleft + slope * (xfinish - xleft); } kstart = clamp((int) floor(ystart), 0, Ny-1); kfinish = clamp((int) floor(yfinish), 0, Ny-1); kmin = (kstart < kfinish) ? kstart : kfinish; kmax = (kstart < kfinish) ? kfinish : kstart; #ifdef DEBUG Rprintf("column %d: rows [%d, %d]\n", m, kmin, kmax); #endif for(k = kmin; k <= kmax; k++) INCREMENT(k, m); } } /* end of if-else */ } } #ifdef DEBUG Rprintf("done\n"); #endif } spatstat/src/quasirandom.c0000644000176200001440000000105613166361223015400 0ustar liggesusers/* quasirandom.c Quasi-random sequence generators Copyright (C) Adrian Baddeley 2014 GNU Public Licence version 2 | 3 $Revision: 1.1 $ $Date: 2014/03/17 03:31:59 $ */ #include void Corput(base, n, result) int *base, *n; double *result; { int b, N, i, j; register double f, f0, z; N = *n; b = *base; f0 = 1.0/((double) b); for(i = 0; i < N; i++) { j = i+1; z = 0; f = f0; while(j > 0) { z = z + f * (j % b); j = j/b; f = f / ((double) b); } result[i] = z; } } spatstat/src/nnMDdist.c0000755000176200001440000004360113166361223014602 0ustar liggesusers/* nnMDdist.c Nearest Neighbour Distances in m dimensions $Revision: 1.8 $ $Date: 2013/05/27 02:09:10 $ Argument x is an m * n matrix with columns corresponding to points and rows corresponding to coordinates. Spatial dimension m must be > 1 THE FOLLOWING FUNCTIONS ASSUME THAT THE ROWS OF x ARE SORTED IN ASCENDING ORDER OF THE FIRST COLUMN nndMD Nearest neighbour distances nnwMD Nearest neighbours and their distances nnXwMD Nearest neighbour from one list to another nnXxMD Nearest neighbour from one list to another, with overlaps knndMD k-th nearest neighbour distances knnwMD k-th nearest neighbours and their distances */ #undef SPATSTAT_DEBUG #include #include #include #include "chunkloop.h" #include "yesno.h" double sqrt(); void nndMD(n, m, x, nnd, huge) /* inputs */ int *n, *m; double *x, *huge; /* output */ double *nnd; { int npoints, mdimen, i, j, left, right, leftpos, rightpos, maxchunk; double d2, d2min, hu, hu2, xi0, dx0, dxj; double *xi; npoints = *n; mdimen = *m; xi = (double *) R_alloc((size_t) mdimen, sizeof(double)); /* dx = (double *) R_alloc((size_t) mdimen, sizeof(double)); */ hu = *huge; hu2 = hu * hu; OUTERCHUNKLOOP(i, npoints, maxchunk, 16384) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, npoints, maxchunk, 16384) { #ifdef SPATSTAT_DEBUG Rprintf("\ni=%d\n", i); #endif d2min = hu2; for(j = 0; j < mdimen; j++) xi[j] = x[i * mdimen + j]; xi0 = xi[0]; #ifdef SPATSTAT_DEBUG Rprintf("\n ("); for(j = 0; j < mdimen; j++) Rprintf("%lf, ", x[i * mdimen + j]); Rprintf(")\n"); #endif /* search backward */ if(i > 0) { for(left = i - 1; left >= 0; --left) { #ifdef SPATSTAT_DEBUG Rprintf("L=%d, d2min=%lf\n", left, d2min); #endif dx0 = xi0 - x[left * mdimen]; d2 = dx0 * dx0; if(d2 > d2min) break; leftpos = left * mdimen; for(j = 1; j < mdimen && d2 < d2min; j++) { dxj = xi[j] - x[leftpos + j]; d2 += dxj * dxj; } if (d2 < d2min) { d2min = d2; #ifdef SPATSTAT_DEBUG Rprintf("\tupdating d2min=%lf\n", d2min); #endif } } } /* search forward */ if(i < npoints - 1) { for(right = i + 1; right < npoints; ++right) { #ifdef SPATSTAT_DEBUG Rprintf("R=%d, d2min=%lf\n", right, d2min); #endif dx0 = x[right * mdimen] - xi0; d2 = dx0 * dx0; if(d2 > d2min) break; rightpos = right * mdimen; for(j = 1; j < mdimen && d2 < d2min; j++) { dxj = xi[j] - x[rightpos + j]; d2 += dxj * dxj; } if (d2 < d2min) { d2min = d2; #ifdef SPATSTAT_DEBUG Rprintf("\tupdating d2min=%lf\n", d2min); #endif } } } #ifdef SPATSTAT_DEBUG Rprintf("\n"); #endif nnd[i] = sqrt(d2min); } } } /* nnwMD: same as nndMD, but also returns id of nearest neighbour */ void nnwMD(n, m, x, nnd, nnwhich, huge) /* inputs */ int *n, *m; double *x, *huge; /* output */ double *nnd; int *nnwhich; { int npoints, mdimen, i, j, left, right, leftpos, rightpos, which, maxchunk; double d2, d2min, hu, hu2, xi0, dx0, dxj; double *xi; npoints = *n; mdimen = *m; xi = (double *) R_alloc((size_t) mdimen, sizeof(double)); /* dx = (double *) R_alloc((size_t) mdimen, sizeof(double)); */ hu = *huge; hu2 = hu * hu; OUTERCHUNKLOOP(i, npoints, maxchunk, 16384) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, npoints, maxchunk, 16384) { #ifdef SPATSTAT_DEBUG Rprintf("\ni=%d\n", i); #endif d2min = hu2; which = -1; for(j = 0; j < mdimen; j++) xi[j] = x[i * mdimen + j]; xi0 = xi[0]; /* search backward */ if(i > 0) { for(left = i - 1; left >= 0; --left) { #ifdef SPATSTAT_DEBUG Rprintf("L"); #endif dx0 = xi0 - x[left * mdimen]; d2 = dx0 * dx0; if(d2 > d2min) break; leftpos = left * mdimen; for(j = 1; j < mdimen && d2 < d2min; j++) { dxj = xi[j] - x[leftpos + j]; d2 += dxj * dxj; } if (d2 < d2min) { d2min = d2; which = left; } } } /* search forward */ if(i < npoints - 1) { for(right = i + 1; right < npoints; ++right) { #ifdef SPATSTAT_DEBUG Rprintf("R"); #endif dx0 = x[right * mdimen] - xi0; d2 = dx0 * dx0; if(d2 > d2min) break; rightpos = right * mdimen; for(j = 1; j < mdimen && d2 < d2min; j++) { dxj = xi[j] - x[rightpos + j]; d2 += dxj * dxj; } if (d2 < d2min) { d2min = d2; which = right; } } } #ifdef SPATSTAT_DEBUG Rprintf("\n"); #endif nnd[i] = sqrt(d2min); /* convert index to R convention */ nnwhich[i] = which + 1; } } } /* nnXwMD: for TWO point patterns X and Y, find the nearest neighbour (from each point of X to the nearest point of Y) returning both the distance and the identifier Requires both patterns to be sorted in order of increasing z coord */ void nnXwMD(m, n1, x1, n2, x2, nnd, nnwhich, huge) /* inputs */ int *m, *n1, *n2; double *x1, *x2, *huge; /* outputs */ double *nnd; int *nnwhich; { int mdimen, npoints1, npoints2, i, ell, jleft, jright, jwhich, lastjwhich; double d2, d2min, x1i0, dx0, dxell, hu, hu2; double *x1i; int maxchunk; hu = *huge; hu2 = hu * hu; npoints1 = *n1; npoints2 = *n2; mdimen = *m; if(npoints1 == 0 || npoints2 == 0) return; x1i = (double *) R_alloc((size_t) mdimen, sizeof(double)); /* dx = (double *) R_alloc((size_t) mdimen, sizeof(double)); */ lastjwhich = 0; OUTERCHUNKLOOP(i, npoints1, maxchunk, 16384) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, npoints1, maxchunk, 16384) { d2min = hu2; jwhich = -1; for(ell = 0; ell < mdimen; ell++) x1i[ell] = x1[i * mdimen + ell]; x1i0 = x1i[0]; /* search backward from previous nearest neighbour */ if(lastjwhich > 0) { for(jleft = lastjwhich - 1; jleft >= 0; --jleft) { dx0 = x1i0 - x2[jleft * mdimen]; d2 = dx0 * dx0; if(d2 > d2min) break; for(ell = 1; ell < mdimen && d2 < d2min; ell++) { dxell = x1i[ell] - x2[jleft * mdimen + ell]; d2 += dxell * dxell; } if (d2 < d2min) { d2min = d2; jwhich = jleft; } } } /* search forward from previous nearest neighbour */ if(lastjwhich < npoints2) { for(jright = lastjwhich; jright < npoints2; ++jright) { dx0 = x2[jright * mdimen] - x1i0; d2 = dx0 * dx0; if(d2 > d2min) break; for(ell = 1; ell < mdimen && d2 < d2min; ell++) { dxell = x1i[ell] - x2[jright * mdimen + ell]; d2 += dxell * dxell; } if (d2 < d2min) { d2min = d2; jwhich = jright; } } } nnd[i] = sqrt(d2min); nnwhich[i] = jwhich; lastjwhich = jwhich; } } } /* nnXxMD: similar to nnXwMD but allows X and Y to include common points (which are not to be counted as neighbours) Code numbers id1, id2 are attached to the patterns X and Y respectively, such that x1[i], y1[i] and x2[j], y2[j] are the same point iff id1[i] = id2[j]. Requires both patterns to be sorted in order of increasing y coord */ void nnXxMD(m, n1, x1, id1, n2, x2, id2, nnd, nnwhich, huge) /* inputs */ int *m, *n1, *n2; double *x1, *x2, *huge; int *id1, *id2; /* outputs */ double *nnd; int *nnwhich; { int mdimen, npoints1, npoints2, i, ell, jleft, jright, jwhich, lastjwhich, id1i; double d2, d2min, x1i0, dx0, dxell, hu, hu2; double *x1i; int maxchunk; hu = *huge; hu2 = hu * hu; npoints1 = *n1; npoints2 = *n2; mdimen = *m; if(npoints1 == 0 || npoints2 == 0) return; x1i = (double *) R_alloc((size_t) mdimen, sizeof(double)); /* dx = (double *) R_alloc((size_t) mdimen, sizeof(double)); */ lastjwhich = 0; OUTERCHUNKLOOP(i, npoints1, maxchunk, 16384) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, npoints1, maxchunk, 16384) { d2min = hu2; jwhich = -1; id1i = id1[i]; for(ell = 0; ell < mdimen; ell++) x1i[ell] = x1[i * mdimen + ell]; x1i0 = x1i[0]; /* search backward from previous nearest neighbour */ if(lastjwhich > 0) { for(jleft = lastjwhich - 1; jleft >= 0; --jleft) { dx0 = x1i0 - x2[jleft * mdimen]; d2 = dx0 * dx0; if(d2 > d2min) break; /* do not compare identical points */ if(id2[jleft] != id1i) { for(ell = 1; ell < mdimen && d2 < d2min; ell++) { dxell = x1i[ell] - x2[jleft * mdimen + ell]; d2 += dxell * dxell; } if (d2 < d2min) { d2min = d2; jwhich = jleft; } } } } /* search forward from previous nearest neighbour */ if(lastjwhich < npoints2) { for(jright = lastjwhich; jright < npoints2; ++jright) { dx0 = x2[jright * mdimen] - x1i0; d2 = dx0 * dx0; if(d2 > d2min) break; /* do not compare identical points */ if(id2[jright] != id1i) { for(ell = 1; ell < mdimen && d2 < d2min; ell++) { dxell = x1i[ell] - x2[jright * mdimen + ell]; d2 += dxell * dxell; } if (d2 < d2min) { d2min = d2; jwhich = jright; } } } } nnd[i] = sqrt(d2min); nnwhich[i] = jwhich; lastjwhich = jwhich; } } } /* knndMD nearest neighbours 1:kmax */ void knndMD(n, m, kmax, x, nnd, huge) /* inputs */ int *n, *m, *kmax; double *x, *huge; /* output matrix (kmax * npoints) */ double *nnd; { int npoints, mdimen, nk, nk1, i, j, k, k1, left, right, unsorted, maxchunk; double d2, d2minK, xi0, dx0, dxj, hu, hu2, tmp; double *d2min, *xi; hu = *huge; hu2 = hu * hu; npoints = *n; mdimen = *m; nk = *kmax; nk1 = nk - 1; /* create space to store the squared k-th nearest neighbour distances for the current point */ d2min = (double *) R_alloc((size_t) nk, sizeof(double)); /* scratch space */ xi = (double *) R_alloc((size_t) mdimen, sizeof(double)); /* loop over points */ OUTERCHUNKLOOP(i, npoints, maxchunk, 16384) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, npoints, maxchunk, 16384) { #ifdef SPATSTAT_DEBUG Rprintf("\ni=%d\n", i); #endif /* initialise nn distances */ d2minK = hu2; for(k = 0; k < nk; k++) d2min[k] = hu2; for(j = 0; j < mdimen; j++) xi[j] = x[i* mdimen + j]; xi0 = xi[0]; #ifdef SPATSTAT_DEBUG Rprintf("\n ("); for(j = 0; j < mdimen; j++) Rprintf("%lf, ", xi[j]); Rprintf(")\n"); #endif /* search backward */ for(left = i - 1; left >= 0; --left) { dx0 = xi0 - x[left * mdimen]; d2 = dx0 * dx0; if(d2 > d2minK) break; #ifdef SPATSTAT_DEBUG Rprintf("L=%d\n", left); Rprintf("\t 0 "); #endif for(j = 1; j < mdimen && d2 < d2minK; j++) { #ifdef SPATSTAT_DEBUG Rprintf("%d ", j); #endif dxj = xi[j] - x[left * mdimen + j]; d2 += dxj * dxj; } #ifdef SPATSTAT_DEBUG Rprintf("\n\t d2=%lf\n", d2); #endif if (d2 < d2minK) { /* overwrite last entry */ #ifdef SPATSTAT_DEBUG Rprintf("\td2=%lf overwrites d2min[%d] = %lf\n", d2, nk1, d2min[nk1]); #endif d2min[nk1] = d2; /* bubble sort */ #ifdef SPATSTAT_DEBUG Rprintf("\td2min[] before bubble sort:"); for(k = 0; k < nk; k++) Rprintf("%lf, ", d2min[k]); Rprintf("\n"); #endif unsorted = YES; for(k = nk1; unsorted && k > 0; k--) { k1 = k - 1; if(d2min[k] < d2min[k1]) { /* swap entries */ tmp = d2min[k1]; d2min[k1] = d2min[k]; d2min[k] = tmp; } else { unsorted = NO; } } #ifdef SPATSTAT_DEBUG Rprintf("\td2min[] after bubble sort:"); for(k = 0; k < nk; k++) Rprintf("%lf, ", d2min[k]); Rprintf("\n"); #endif /* adjust maximum distance */ d2minK = d2min[nk1]; } } /* search forward */ for(right = i + 1; right < npoints; ++right) { #ifdef SPATSTAT_DEBUG Rprintf("R=%d\n", right); Rprintf("\t 0 "); #endif dx0 = x[right * mdimen] - xi0; d2 = dx0 * dx0; if(d2 > d2minK) break; for(j = 1; j < mdimen && d2 < d2minK; j++) { #ifdef SPATSTAT_DEBUG Rprintf("%d ", j); #endif dxj = xi[j] - x[right * mdimen + j]; d2 += dxj * dxj; } #ifdef SPATSTAT_DEBUG Rprintf("\n\t d2=%lf\n", d2); #endif if (d2 < d2minK) { #ifdef SPATSTAT_DEBUG Rprintf("\td2=%lf overwrites d2min[%d] = %lf\n", d2, nk1, d2min[nk1]); #endif /* overwrite last entry */ d2min[nk1] = d2; /* bubble sort */ #ifdef SPATSTAT_DEBUG Rprintf("\td2min[] before bubble sort:"); for(k = 0; k < nk; k++) Rprintf("%lf, ", d2min[k]); Rprintf("\n"); #endif unsorted = YES; for(k = nk1; unsorted && k > 0; k--) { k1 = k - 1; if(d2min[k] < d2min[k1]) { /* swap entries */ tmp = d2min[k1]; d2min[k1] = d2min[k]; d2min[k] = tmp; } else { unsorted = NO; } } #ifdef SPATSTAT_DEBUG Rprintf("\td2min[] after bubble sort:"); for(k = 0; k < nk; k++) Rprintf("%lf, ", d2min[k]); Rprintf("\n"); #endif /* adjust maximum distance */ d2minK = d2min[nk1]; } } #ifdef SPATSTAT_DEBUG Rprintf("\n"); #endif /* copy nn distances for point i to output matrix in ROW MAJOR order */ for(k = 0; k < nk; k++) { nnd[nk * i + k] = sqrt(d2min[k]); } } } } /* knnwMD nearest neighbours 1:kmax returns distances and indices */ void knnwMD(n, m, kmax, x, nnd, nnwhich, huge) /* inputs */ int *n, *m, *kmax; double *x, *huge; /* output matrix (kmax * npoints) */ double *nnd; int *nnwhich; { int npoints, mdimen, nk, nk1, i, j, k, k1, left, right, unsorted, itmp; double d2, d2minK, xi0, dx0, dxj, hu, hu2, tmp; double *d2min, *xi; int *which; int maxchunk; hu = *huge; hu2 = hu * hu; npoints = *n; mdimen = *m; nk = *kmax; nk1 = nk - 1; /* create space to store the nearest neighbour distances and indices for the current point */ d2min = (double *) R_alloc((size_t) nk, sizeof(double)); which = (int *) R_alloc((size_t) nk, sizeof(int)); /* scratch space */ xi = (double *) R_alloc((size_t) mdimen, sizeof(double)); /* loop over points */ OUTERCHUNKLOOP(i, npoints, maxchunk, 16384) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, npoints, maxchunk, 16384) { #ifdef SPATSTAT_DEBUG Rprintf("\ni=%d\n", i); #endif /* initialise nn distances */ d2minK = hu2; for(k = 0; k < nk; k++) { d2min[k] = hu2; which[k] = -1; } for(j = 0; j < mdimen; j++) xi[j] = x[i* mdimen + j]; xi0 = xi[0]; #ifdef SPATSTAT_DEBUG Rprintf("\n ("); for(j = 0; j < mdimen; j++) Rprintf("%lf, ", x[i * mdimen + j]); Rprintf(")\n"); #endif /* search backward */ for(left = i - 1; left >= 0; --left) { #ifdef SPATSTAT_DEBUG Rprintf("L=%d, d2minK=%lf\n", left, d2minK); Rprintf("\t 0 "); #endif dx0 = xi0 - x[left * mdimen]; d2 = dx0 * dx0; if(d2 > d2minK) break; for(j = 1; j < mdimen && d2 < d2minK; j++) { #ifdef SPATSTAT_DEBUG Rprintf("%d ", j); #endif dxj = xi[j] - x[left * mdimen + j]; d2 += dxj * dxj; } if (d2 < d2minK) { #ifdef SPATSTAT_DEBUG Rprintf("\td2=%lf overwrites d2min[%d] = %lf\n", d2, nk1, d2min[nk1]); #endif /* overwrite last entry */ d2min[nk1] = d2; which[nk1] = left; /* bubble sort */ #ifdef SPATSTAT_DEBUG Rprintf("\td2min[] before bubble sort:"); for(k = 0; k < nk; k++) Rprintf("%lf, ", d2min[k]); Rprintf("\n"); Rprintf("\twhich[] before bubble sort:"); for(k = 0; k < nk; k++) Rprintf("%d, ", which[k]); Rprintf("\n"); #endif unsorted = YES; for(k = nk1; unsorted && k > 0; k--) { k1 = k - 1; if(d2min[k] < d2min[k1]) { /* swap entries */ tmp = d2min[k1]; d2min[k1] = d2min[k]; d2min[k] = tmp; itmp = which[k1]; which[k1] = which[k]; which[k] = itmp; } else { unsorted = NO; } } #ifdef SPATSTAT_DEBUG Rprintf("\td2min[] after bubble sort:"); for(k = 0; k < nk; k++) Rprintf("%lf, ", d2min[k]); Rprintf("\n"); Rprintf("\twhich[] after bubble sort:"); for(k = 0; k < nk; k++) Rprintf("%d, ", which[k]); Rprintf("\n"); #endif /* adjust maximum distance */ d2minK = d2min[nk1]; } } /* search forward */ for(right = i + 1; right < npoints; ++right) { #ifdef SPATSTAT_DEBUG Rprintf("R=%d, d2minK=%lf\n", right, d2minK); Rprintf("\t 0 "); #endif dx0 = x[right * mdimen] - xi0; d2 = dx0 * dx0; if(d2 > d2minK) break; for(j = 1; j < mdimen && d2 < d2minK; j++) { #ifdef SPATSTAT_DEBUG Rprintf("%d ", j); #endif dxj = xi[j] - x[right * mdimen + j]; d2 += dxj * dxj; } if (d2 < d2minK) { #ifdef SPATSTAT_DEBUG Rprintf("\td2=%lf overwrites d2min[%d] = %lf\n", d2, nk1, d2min[nk1]); #endif /* overwrite last entry */ d2min[nk1] = d2; which[nk1] = right; /* bubble sort */ #ifdef SPATSTAT_DEBUG Rprintf("\td2min[] before bubble sort:"); for(k = 0; k < nk; k++) Rprintf("%lf, ", d2min[k]); Rprintf("\n"); Rprintf("\twhich[] before bubble sort:"); for(k = 0; k < nk; k++) Rprintf("%d, ", which[k]); Rprintf("\n"); #endif unsorted = YES; for(k = nk1; unsorted && k > 0; k--) { k1 = k - 1; if(d2min[k] < d2min[k1]) { /* swap entries */ tmp = d2min[k1]; d2min[k1] = d2min[k]; d2min[k] = tmp; itmp = which[k1]; which[k1] = which[k]; which[k] = itmp; } else { unsorted = NO; } } #ifdef SPATSTAT_DEBUG Rprintf("\td2min[] after bubble sort:"); for(k = 0; k < nk; k++) Rprintf("%lf, ", d2min[k]); Rprintf("\n"); Rprintf("\twhich[] after bubble sort:"); for(k = 0; k < nk; k++) Rprintf("%d, ", which[k]); Rprintf("\n"); #endif /* adjust maximum distance */ d2minK = d2min[nk1]; } } #ifdef SPATSTAT_DEBUG Rprintf("\n"); #endif /* copy nn distances for point i to output matrix in ROW MAJOR order */ for(k = 0; k < nk; k++) { nnd[nk * i + k] = sqrt(d2min[k]); /* convert index back to R convention */ nnwhich[nk * i + k] = which[k] + 1; } } } } spatstat/src/knn3Ddist.h0000644000176200001440000000730113166361223014722 0ustar liggesusers/* knn3Ddist.h Code template for k-nearest-neighbour algorithms for 3D point patterns Input is a single point pattern - supports 'nndist' and 'nnwhich' This code is #included multiple times in nn3Ddist.c Variables used: FNAME function name DIST #defined if function returns distance to nearest neighbour WHICH #defined if function returns id of nearest neighbour Either or both DIST and WHICH may be defined. THE FOLLOWING CODE ASSUMES THAT THE POINT PATTERN IS SORTED IN ASCENDING ORDER OF THE z COORDINATE $Revision: 1.3 $ $Date: 2013/06/29 02:38:19 $ */ void FNAME(n, kmax, x, y, z, nnd, nnwhich, huge) /* inputs */ int *n, *kmax; double *x, *y, *z, *huge; /* output matrices (npoints * kmax) in ROW MAJOR order */ double *nnd; int *nnwhich; { int npoints, nk, nk1, i, j, k, k1, unsorted, maxchunk; double d2, d2minK, xi, yi, zi, dx, dy, dz, dz2, hu, hu2, tmp; double *d2min; #ifdef WHICH int *which; int itmp; #endif hu = *huge; hu2 = hu * hu; npoints = *n; nk = *kmax; nk1 = nk - 1; /* create space to store the nearest neighbour distances and indices for the current point */ d2min = (double *) R_alloc((size_t) nk, sizeof(double)); #ifdef WHICH which = (int *) R_alloc((size_t) nk, sizeof(int)); #endif /* loop over points */ OUTERCHUNKLOOP(i, npoints, maxchunk, 16384) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, npoints, maxchunk, 16384) { #ifdef SPATSTAT_DEBUG Rprintf("\ni=%d\n", i); #endif /* initialise nn distances and indices */ d2minK = hu2; for(k = 0; k < nk; k++) { d2min[k] = hu2; #ifdef WHICH which[k] = -1; #endif } xi = x[i]; yi = y[i]; zi = z[i]; /* search backward */ if(i > 0) { for(j = i - 1; j >= 0; --j) { #ifdef SPATSTAT_DEBUG Rprintf("L"); #endif dz = z[j] - zi; dz2 = dz * dz; if(dz2 > d2minK) break; dx = x[j] - xi; dy = y[j] - yi; d2 = dx * dx + dy * dy + dz2; if (d2 < d2minK) { /* overwrite last entry */ d2min[nk1] = d2; #ifdef WHICH which[nk1] = j; #endif /* bubble sort */ unsorted = YES; for(k = nk1; unsorted && k > 0; k--) { k1 = k - 1; if(d2min[k] < d2min[k1]) { /* swap entries */ tmp = d2min[k1]; d2min[k1] = d2min[k]; d2min[k] = tmp; #ifdef WHICH itmp = which[k1]; which[k1] = which[k]; which[k] = itmp; #endif } else { unsorted = NO; } } /* adjust maximum distance */ d2minK = d2min[nk1]; } } } /* search forward */ if(i + 1 < npoints) { for(j = i + 1; j < npoints; ++j) { #ifdef SPATSTAT_DEBUG Rprintf("R"); #endif dz = z[j] - zi; dz2 = dz * dz; if(dz2 > d2minK) break; dx = x[j] - xi; dy = y[j] - yi; d2 = dx * dx + dy * dy + dz2; if (d2 < d2minK) { /* overwrite last entry */ d2min[nk1] = d2; #ifdef WHICH which[nk1] = j; #endif /* bubble sort */ unsorted = YES; for(k = nk1; unsorted && k > 0; k--) { k1 = k - 1; if(d2min[k] < d2min[k1]) { /* swap entries */ tmp = d2min[k1]; d2min[k1] = d2min[k]; d2min[k] = tmp; #ifdef WHICH itmp = which[k1]; which[k1] = which[k]; which[k] = itmp; #endif } else { unsorted = NO; } } /* adjust maximum distance */ d2minK = d2min[nk1]; } } } #ifdef SPATSTAT_DEBUG Rprintf("\n"); #endif /* calculate nn distances for point i and copy to output matrix in ROW MAJOR order */ for(k = 0; k < nk; k++) { #ifdef DIST nnd[nk * i + k] = sqrt(d2min[k]); #endif #ifdef WHICH /* convert from C to R indexing */ nnwhich[nk * i + k] = which[k] + 1; #endif } } } } spatstat/src/mhv5.h0000644000176200001440000000037013166361223013737 0ustar liggesusers/* mhv5.h tempered or not */ #undef MH_TEMPER if(tempered) { /* tempering */ #define MH_TEMPER YES #include "mhloop.h" #undef MH_TEMPER } else { /* usual, no tempering */ #define MH_TEMPER NO #include "mhloop.h" #undef MH_TEMPER } spatstat/src/fexitc.c0000755000176200001440000000045513166361223014344 0ustar liggesusers# include # include # include void fexitc(const char *msg) { size_t nc = strlen(msg); char buf[256]; if(nc > 255) { warning("invalid character length in fexitc"); nc = 255; } strncpy(buf, msg, nc); buf[nc] = '\0'; error(buf); } spatstat/src/nngrid.h0000644000176200001440000000572613166361223014353 0ustar liggesusers #if (1 == 0) /* nngrid.h Code template for C functions nearest neighbour of each grid point THE FOLLOWING CODE ASSUMES THAT POINT PATTERN (xp, yp) IS SORTED IN ASCENDING ORDER OF x COORDINATE This code is #included multiple times in nngrid.c Variables used: FNAME function name DIST #defined if function returns distance to nearest neighbour WHICH #defined if function returns id of nearest neighbour Either or both DIST and WHICH may be defined. Copyright (C) Adrian Baddeley, Jens Oehlschlagel and Rolf Turner 2000-2013 Licence: GPL >= 2 $Revision: 1.4 $ $Date: 2014/02/18 08:43:29 $ */ #endif void FNAME(nx, x0, xstep, ny, y0, ystep, /* pixel grid dimensions */ np, xp, yp, /* data points */ nnd, nnwhich, huge) /* inputs */ int *nx, *ny, *np; double *x0, *xstep, *y0, *ystep, *huge; double *xp, *yp; /* outputs */ double *nnd; int *nnwhich; /* some inputs + outputs are not used in all functions */ { int Nxcol, Nyrow, Npoints; int i, j, ijpos; int mleft, mright, mwhich, lastmwhich; double X0, Y0, Xstep, Ystep; double d2, d2min, xj, yi, dx, dy, dx2, hu, hu2; Nxcol = *nx; Nyrow = *ny; Npoints = *np; hu = *huge; X0 = *x0; Y0 = *y0; Xstep = *xstep; Ystep = *ystep; hu2 = hu * hu; if(Npoints == 0) return; lastmwhich = 0; /* loop over pixels */ for(j = 0, xj = X0; j < Nxcol; j++, xj += Xstep) { R_CheckUserInterrupt(); for(i = 0, yi = Y0; i < Nyrow; i++, yi += Ystep) { /* reset nn distance and index */ d2min = hu2; mwhich = -1; if(lastmwhich < Npoints) { /* search forward from previous nearest neighbour */ for(mright = lastmwhich; mright < Npoints; ++mright) { dx = xp[mright] - xj; dx2 = dx * dx; if(dx2 > d2min) /* note that dx2 >= d2min could break too early */ break; dy = yp[mright] - yi; d2 = dy * dy + dx2; if (d2 < d2min) { /* save as nearest neighbour */ d2min = d2; mwhich = mright; } } /* end forward search */ } if(lastmwhich > 0) { /* search backward from previous nearest neighbour */ for(mleft = lastmwhich - 1; mleft >= 0; --mleft) { dx = xj - xp[mleft]; dx2 = dx * dx; if(dx2 > d2min) /* note that dx2 >= d2min could break too early */ break; dy = yp[mleft] - yi; d2 = dy * dy + dx2; if (d2 < d2min) { /* save as nearest neighbour */ d2min = d2; mwhich = mleft; } } /* end backward search */ } /* remember index of most recently-encountered neighbour */ lastmwhich = mwhich; /* copy nn distance for grid point (i, j) to output array nnd[i, j] */ ijpos = i + j * Nyrow; #ifdef DIST nnd[ijpos] = sqrt(d2min); #endif #ifdef WHICH nnwhich[ijpos] = mwhich + 1; /* R indexing */ #endif /* end of loop over grid points (i, j) */ } } } spatstat/src/KrectV4.h0000644000176200001440000000027513166361223014346 0ustar liggesusers/* KrectV5.h with or without uncorrected estimator */ if((*doUnco) == 1) { #define UNCORRECTED #include "KrectBody.h" } else { #undef UNCORRECTED #include "KrectBody.h" } spatstat/src/KrectV2.h0000644000176200001440000000027313166361223014342 0ustar liggesusers/* KrectV3.h with or without translation correction */ if((*doTrans) == 1) { #define TRANSLATION #include "KrectV3.h" } else { #undef TRANSLATION #include "KrectV3.h" } spatstat/src/Efiksel.c0000755000176200001440000000317513166361223014446 0ustar liggesusers#include #include #include "chunkloop.h" #include "looptest.h" /* Efiksel.c $Revision: 1.3 $ $Date: 2012/03/28 05:55:29 $ C implementation of 'eval' for Fiksel interaction (non-hardcore part) Assumes point patterns are sorted in increasing order of x coordinate */ double sqrt(), exp(); void Efiksel(nnsource, xsource, ysource, nntarget, xtarget, ytarget, rrmax, kkappa, values) /* inputs */ int *nnsource, *nntarget; double *xsource, *ysource, *xtarget, *ytarget, *rrmax, *kkappa; /* output */ double *values; { int nsource, ntarget, maxchunk, j, i, ileft; double xsourcej, ysourcej, xleft, dx, dy, dx2, d2; double rmax, r2max, r2maxpluseps, kappa, total; nsource = *nnsource; ntarget = *nntarget; rmax = *rrmax; kappa = *kkappa; if(nsource == 0 || ntarget == 0) return; r2max = rmax * rmax; r2maxpluseps = r2max + EPSILON(r2max); ileft = 0; OUTERCHUNKLOOP(j, nsource, maxchunk, 16384) { R_CheckUserInterrupt(); INNERCHUNKLOOP(j, nsource, maxchunk, 16384) { total = 0; xsourcej = xsource[j]; ysourcej = ysource[j]; /* adjust starting point */ xleft = xsourcej - rmax; while((xtarget[ileft] < xleft) && (ileft+1 < ntarget)) ++ileft; /* process from ileft until dx > rmax */ for(i=ileft; i < ntarget; i++) { /* squared interpoint distance */ dx = xtarget[i] - xsourcej; dx2 = dx * dx; if(dx2 > r2maxpluseps) break; dy = ytarget[i] - ysourcej; d2 = dx2 + dy * dy; if(d2 <= r2max) total += exp(- kappa * sqrt(d2)); } values[j] = total; } } } spatstat/src/loccum.c0000644000176200001440000000267113166361223014343 0ustar liggesusers#include #include #include #include "chunkloop.h" /* loccum.c $Revision: 1.1 $ $Date: 2013/05/27 02:09:10 $ Compute local cumulative sums or products of weights locsum: f_i(t) = \sum_{j: j \neq i, ||x_j - x_i|| \le t} v(x_j) for a data point pattern {x_i} locxsum: f_u(t) = \sum_{||x_i - u|| \le t} v(x_i) for a grid of points {u} and a data point pattern {x_i} (also works if {u} is another point pattern) locprod: f_i(t) = \prod_{j: j \neq i, ||x_j - x_i|| \le t} v(x_j) for a data point pattern {x_i} locxprod: f_u(t) = \prod_{||x_i - u|| \le t} v(x_i) for a grid of points {u} and a data point pattern {x_i} (also works if {u} is another point pattern) Assumes point patterns are sorted in increasing order of x coordinate Uses C code template files : loccums.h, loccumx.h */ /* data-to-data */ #undef FNAME #undef NULVAL #undef INC #define FNAME locsum #define NULVAL 0.0 #define INC(A,B) A += B #include "loccums.h" #undef FNAME #undef NULVAL #undef INC #define FNAME locprod #define NULVAL 1.0 #define INC(A,B) A *= B #include "loccums.h" /* test-grid-to-data */ #undef FNAME #undef NULVAL #undef INC #define FNAME locxsum #define NULVAL 0.0 #define INC(A,B) A += B #include "loccumx.h" #undef FNAME #undef NULVAL #undef INC #define FNAME locxprod #define NULVAL 1.0 #define INC(A,B) A *= B #include "loccumx.h" spatstat/src/k3.c0000755000176200001440000000716313166361223013402 0ustar liggesusers#include #include #include "geom3.h" #include "functable.h" /* $Revision: 1.1 $ $Date: 2009/11/04 23:54:15 $ K function of 3D point pattern k3trans translation correction k3isot isotropic correction # ///////////////////////////////////////////// # AUTHOR: Adrian Baddeley, CWI, Amsterdam, 1991. # # MODIFIED BY: Adrian Baddeley, Perth 2009. # # This software is distributed free # under the conditions that # (1) it shall not be incorporated # in software that is subsequently sold # (2) the authorship of the software shall # be acknowledged in any publication that # uses results generated by the software # (3) this notice shall remain in place # in each file. # ////////////////////////////////////////////// */ void k3trans(p, n, b, k) Point *p; int n; Box *b; Ftable *k; { register int i, j, l, lmin; register double dx, dy, dz, dist; register double vx, vy, vz; Point *ip, *jp; double dt, vol, lambda, denom, term; double sphesfrac(), sphevol(); /* compute denominator & initialise numerator*/ vol = (b->x1 - b->x0) * (b->y1 - b->y0) * (b->z1 - b->z0); lambda = ((double) n )/ vol; denom = lambda * lambda; for(l = 0; l < k->n; l++) { (k->denom)[l] = denom; (k->num)[l] = 0.0; } /* spacing of argument in result vector k */ dt = (k->t1 - k->t0)/(k->n - 1); /* compute numerator */ for( i = 0; i < n; i++) { ip = p + i; for(j = i + 1; j < n; j++) { jp = p + j; dx = jp->x - ip->x; dy = jp->y - ip->y; dz = jp->z - ip->z; dist = sqrt(dx * dx + dy * dy + dz * dz); lmin = ceil( (dist - k->t0) / dt ); if(lmin < 0) lmin = 0; vx = b->x1 - b->x0 - (dx > 0 ? dx : -dx); vy = b->y1 - b->y0 - (dy > 0 ? dy : -dy); vz = b->z1 - b->z0 - (dz > 0 ? dz : -dz); if(vx >= 0.0 && vy >= 0.0 && vz >= 0.0) { term = 2.0 /(vx * vy * vz); /* 2 because they're ordered pairs */ for(l = lmin; l < k->n; l++) (k->num)[l] += term; } } } /* compute ratio */ for(l = 0; l < k->n; l++) (k->f)[l] = ((k->denom)[l] > 0.0)? (k->num)[l] / (k->denom)[l] : 0.0; } void k3isot(p, n, b, k) Point *p; int n; Box *b; Ftable *k; { register int i, j, l, lmin; register double dx, dy, dz, dist; Point *ip, *jp; double dt, vol, denom, term; double sphesfrac(), sphevol(); Point vertex; Box half; /* compute denominator & initialise numerator*/ vol = (b->x1 - b->x0) * (b->y1 - b->y0) * (b->z1 - b->z0); denom = ((double) (n * n))/vol; for(l = 0; l < k->n; l++) { (k->denom)[l] = denom; (k->num)[l] = 0.0; } /* spacing of argument in result vector k */ dt = (k->t1 - k->t0)/(k->n - 1); /* set up for volume correction */ vertex.x = b->x0; vertex.y = b->y0; vertex.z = b->z0; half.x1 = b->x1; half.y1 = b->y1; half.z1 = b->z1; half.x0 = (b->x0 + b->x1)/2.0; half.y0 = (b->y0 + b->y1)/2.0; half.z0 = (b->z0 + b->z1)/2.0; /* compute numerator */ for( i = 0; i < n; i++) { ip = p + i; for(j = i + 1; j < n; j++) { jp = p + j; dx = jp->x - ip->x; dy = jp->y - ip->y; dz = jp->z - ip->z; dist = sqrt(dx * dx + dy * dy + dz * dz); lmin = ceil( (dist - k->t0) / dt ); if(lmin < 0) lmin = 0; term = (1.0 / sphesfrac(ip, b, dist)) + (1.0 / sphesfrac(jp, b, dist)); term *= 1.0 - 8.0 * sphevol(&vertex, &half, dist) / vol; for(l = lmin; l < k->n; l++) (k->num)[l] += term; } } /* compute ratio */ for(l = 0; l < k->n; l++) (k->f)[l] = ((k->denom)[l] > 0.0)? (k->num)[l] / (k->denom)[l] : 0.0; } spatstat/src/g3.c0000755000176200001440000001266613166361223013402 0ustar liggesusers#include #include #include "geom3.h" #include "functable.h" /* $Revision: 1.3 $ $Date: 2012/05/22 07:17:31 $ G function (nearest neighbour distribution) of 3D point pattern Let b = distance from point p[i] to boundary of box d = distance from p[i] to nearest p[j] method = 1 naive ratio estimator (Ripley 1981) numerator(r) = count(i: b >= r, d <= r) denominator(r) = count(i: b >= r) method = 2 minus sampling estimator numerator(r) = count(i: b >= r, d <= r) denominator(r) = lambda * volume(x: b >= r) where lambda = (no of points)/volume(box) method = 3 Hanisch's G3 numerator(r) = count(i: b >= d, d <= r) denominator(r) = count(i: b >= d) method = 4 Hanisch's G4 numerator(r) = count(i: b >= d, d <= r) denominator(r) = fudge * volume(x: b >= r) fudge = numerator(R)/denominator(R) R = sup{r : denominator(r) > 0 } # ///////////////////////////////////////////// # AUTHOR: Adrian Baddeley, CWI, Amsterdam, 1991. # # MODIFIED BY: Adrian Baddeley, Perth 2009, 2012. # # This software is distributed free # under the conditions that # (1) it shall not be incorporated # in software that is subsequently sold # (2) the authorship of the software shall # be acknowledged in any publication that # uses results generated by the software # (3) this notice shall remain in place # in each file. # ////////////////////////////////////////////// */ #define MIN(X,Y) (((X) > (Y)) ? (Y) : (X)) double * nndist3(p, n, b) /* compute nearest neighbour distance for each p[i] */ Point *p; int n; Box *b; { register int i, j; register double dx, dy, dz, dist2, nearest2, huge2; Point *ip, *jp; double *nnd; nnd = (double *) R_alloc(n, sizeof(double)); dx = b->x1 - b->x0; dy = b->y1 - b->y0; dz = b->z1 - b->z0; huge2 = 2.0 * (dx * dx + dy * dy + dz * dz); /* scan each point and find closest */ for( i = 0; i < n; i++) { ip = p + i; nearest2 = huge2; for(j = 0; j < n; j++) if(j != i) { jp = p + j; dx = jp->x - ip->x; dy = jp->y - ip->y; dz = jp->z - ip->z; dist2 = dx * dx + dy * dy + dz * dz; if(dist2 < nearest2) nearest2 = dist2; } nnd[i] = sqrt(nearest2); } return(nnd); } double * border3(p, n, b) /* compute distances to border */ Point *p; int n; Box *b; { register int i; register double bord; register Point *ip; double *bored; bored = (double *) R_alloc(n, sizeof(double)); for( i = 0; i < n; i++) { ip = p + i; bord = MIN(ip->x - b->x0, b->x1 - ip->x); bord = MIN(bord, ip->y - b->y0); bord = MIN(bord, b->y1 - ip->y); bord = MIN(bord, ip->z - b->z0); bord = MIN(bord, b->z1 - ip->z); bored[i] = bord; } return(bored); } void g3one(p, n, b, g) Point *p; int n; Box *b; Ftable *g; { register int i, l, lbord, lnnd; double dt; double *bord, *nnd; bord = border3(p, n, b); nnd = nndist3(p, n, b); /* initialise */ for(l = 0; l < g->n; l++) (g->num)[l] = (g->denom)[l] = 0.0; /* spacing of argument in result vector g */ dt = (g->t1 - g->t0)/(g->n - 1); for(i = 0; i < n; i++) { lbord = floor( (bord[i] - g->t0) / dt ); if(lbord >= g->n) lbord = g->n - 1; for(l = 0; l <= lbord; l++) (g->denom)[l] += 1.0; lnnd = ceil( (nnd[i] - g->t0) / dt ); if(lnnd < 0) lnnd = 0; for(l = lnnd; l <= lbord; l++) (g->num)[l] += 1.0; } /* compute ratio */ for(l = 0; l < g->n; l++) (g->f)[l] = ((g->denom)[l] > 0)? (g->num)[l] / (g->denom)[l] : 1.0; } void g3three(p, n, b, g) Point *p; int n; Box *b; Ftable *g; { register int i, l, lmin; double dt; int denom; double *bord, *nnd; bord = border3(p, n, b); nnd = nndist3(p, n, b); /* initialise */ denom = 0; for(l = 0; l < g->n; l++) (g->num)[l] = 0.0; /* spacing of argument in result vector g */ dt = (g->t1 - g->t0)/(g->n - 1); for(i = 0; i < n; i++) { if(nnd[i] <= bord[i]) { ++denom; lmin = ceil( (nnd[i] - g->t0) / dt ); if(lmin < 0) lmin = 0; for(l = lmin; l < g->n; l++) (g->num)[l] += 1.0; } } /* compute ratio */ for(l = 0; l < g->n; l++) { (g->denom)[l] = denom; (g->f)[l] = (denom > 0)? (g->num)[l] / (double) denom : 1.0; } } void g3cen(p, n, b, count) Point *p; int n; Box *b; H4table *count; { register int i, lcen, lobs; register double dt, cens, obsv; double *bord, *nnd; bord = border3(p, n, b); nnd = nndist3(p, n, b); /* spacing of histogram cells */ dt = (count->t1 - count->t0)/(count->n - 1); /* 'count' is assumed to have been initialised */ for(i = 0; i < n; i++) { obsv = nnd[i]; cens = bord[i]; lobs = ceil( (obsv - count->t0) / dt ); lcen = floor( (cens - count->t0) / dt ); if(obsv <= cens) { /* observation is uncensored; increment all four histograms */ if(lobs >= count->n) ++(count->upperobs); else if(lobs >= 0) { (count->obs)[lobs]++; (count->nco)[lobs]++; } if(lcen >= count->n) ++(count->uppercen); else if(lcen >= 0) { (count->cen)[lcen]++; (count->ncc)[lcen]++; } } else { /* observation is censored; increment only two histograms */ lobs = MIN(lobs, lcen); if(lobs >= count->n) ++(count->upperobs); else if(lobs >= 0) (count->obs)[lobs]++; if(lcen >= count->n) ++(count->uppercen); else if(lcen >= 0) (count->cen)[lcen]++; } } } spatstat/src/trigraf.c0000755000176200001440000006777013166361223014535 0ustar liggesusers/* trigraf.c Form list of all triangles in a planar graph, given list of edges $Revision: 1.14 $ $Date: 2016/11/22 09:19:42 $ Form list of all triangles in a planar graph, given list of edges Note: vertex indices ie, je are indices in R. They are handled without converting to C convention, because we only need to test equality and ordering. (*except in 'trioxgraph'*) Called by .C: ------------- trigraf() Generic C implementation with fixed storage limit usable with Delaunay triangulation trigrafS() Faster version when input data are sorted (again with predetermined storage limit) suited for handling Delaunay triangulation Called by .Call: --------------- trigraph() Version with dynamic storage allocation triograph() Faster version assuming 'iedge' is sorted in increasing order trioxgraph() Even faster version for use with quadrature schemes Diameters: ----------- triDgraph() Also computes diameters of triangles */ #include #include #include #include "chunkloop.h" #undef DEBUGTRI void trigraf(nv, ne, ie, je, ntmax, nt, it, jt, kt, status) /* inputs */ int *nv; /* number of graph vertices */ int *ne; /* number of edges */ int *ie, *je; /* vectors of indices of ends of each edge */ int *ntmax; /* length of storage space for triangles */ /* output */ int *nt; /* number of triangles (<= *ntmax) */ int *it, *jt, *kt; /* vectors of indices of vertices of triangles */ int *status; /* 0 if OK, 1 if overflow */ { int Nv, Ne, Ntmax; int Nt, Nj, m, i, j, k, mj, mk, maxchunk; int *jj; Nv = *nv; Ne = *ne; Ntmax = *ntmax; /* initialise scratch storage */ jj = (int *) R_alloc(Ne, sizeof(int)); Nt = 0; /* vertex index i ranges from 1 to Nv */ XOUTERCHUNKLOOP(i, 1, Nv, maxchunk, 8196) { R_CheckUserInterrupt(); XINNERCHUNKLOOP(i, 1, Nv, maxchunk, 8196) { /* Find triangles involving vertex 'i' in which 'i' is the lowest-numbered vertex */ /* First, find vertices j > i connected to i */ Nj = 0; for(m = 0; m < Ne; m++) { if(ie[m] == i) { j = je[m]; if(j > i) { jj[Nj] = j; Nj++; } } else if(je[m] == i) { j = ie[m]; if(j > i) { jj[Nj] = j; Nj++; } } } /* Determine which pairs of vertices j, k are joined by an edge; save triangles (i,j,k) */ if(Nj > 1) { /* Sort jj in ascending order */ for(mj = 0; mj < Nj-1; mj++) { j = jj[mj]; for(mk = mj+1; mk < Nj; mk++) { k = jj[mk]; if(k < j) { /* swap */ jj[mk] = j; jj[mj] = k; j = k; } } } for(mj = 0; mj < Nj-1; mj++) { j = jj[mj]; for(mk = mj+1; mk < Nj; mk++) { k = jj[mk]; if(j != k) { /* Run through edges to determine whether j, k are neighbours */ for(m = 0; m < Ne; m++) { if((ie[m] == j && je[m] == k) || (ie[m] == k && je[m] == j)) { /* add (i, j, k) to list of triangles */ if(Nt >= Ntmax) { /* overflow - exit */ *status = 1; return; } it[Nt] = i; jt[Nt] = j; kt[Nt] = k; Nt++; } } } } } } } } *nt = Nt; *status = 0; } /* faster version of trigraf() assuming that ie[m] < je[m] ie[] is in ascending order je[] is in ascending order within ie[], that is, je[ie[]=i] is in ascending order for each fixed i */ void trigrafS(nv, ne, ie, je, ntmax, nt, it, jt, kt, status) /* inputs */ int *nv; /* number of graph vertices */ int *ne; /* number of edges */ int *ie, *je; /* vectors of indices of ends of each edge */ int *ntmax; /* length of storage space for triangles */ /* output */ int *nt; /* number of triangles */ int *it, *jt, *kt; /* vectors of indices of vertices of triangles */ int *status; /* 0 if OK, 1 if overflow */ { int Ne, Nt, Ntmax; int m, i, j, k, mj, mk; int firstedge, lastedge; Ne = *ne; Ntmax = *ntmax; /* nv is not used, but retained for harmony with trigraf */ /* Avoid compiler warnings */ Nt = *nv; /* initialise output */ Nt = 0; lastedge = -1; while(lastedge + 1 < Ne) { if(lastedge % 256 == 0) R_CheckUserInterrupt(); /* Consider next vertex i. The edges (i,j) with i < j appear contiguously in the edge list. */ firstedge = lastedge + 1; i = ie[firstedge]; for(m= firstedge+1; m < Ne && ie[m] == i; m++) ; lastedge = m-1; /* Consider each pair j, k of neighbours of i, where i < j < k. Scan entire edge list to determine whether j, k are joined by an edge. If so, save triangle (i,j,k) */ if(lastedge > firstedge) { for(mj = firstedge; mj < lastedge; mj++) { j = je[mj]; for(mk = firstedge+1; mk <= lastedge; mk++) { k = je[mk]; /* Run through edges to determine whether j, k are neighbours */ for(m = 0; m < Ne && ie[m] < j; m++) ; while(m < Ne && ie[m] == j) { if(je[m] == k) { /* add (i, j, k) to list of triangles */ if(Nt >= Ntmax) { /* overflow - exit */ *status = 1; return; } it[Nt] = i; jt[Nt] = j; kt[Nt] = k; Nt++; } m++; } } } } } *nt = Nt; *status = 0; } /* ------------------- callable by .Call ------------------------- */ SEXP trigraph(SEXP nv, /* number of vertices */ SEXP iedge, /* vectors of indices of ends of each edge */ SEXP jedge) /* all arguments are integer */ { int Nv, Ne; int *ie, *je; /* edges */ int *it, *jt, *kt; /* vectors of indices of vertices of triangles */ int Nt, Ntmax; /* number of triangles */ int Nj; int *jj; /* scratch storage */ int i, j, k, m, mj, mk, Nmore, maxchunk; /* output */ SEXP iTout, jTout, kTout, out; int *ito, *jto, *kto; /* =================== Protect R objects from garbage collector ======= */ PROTECT(nv = AS_INTEGER(nv)); PROTECT(iedge = AS_INTEGER(iedge)); PROTECT(jedge = AS_INTEGER(jedge)); /* That's 3 protected objects */ /* numbers of vertices and edges */ Nv = *(INTEGER_POINTER(nv)); Ne = LENGTH(iedge); /* input arrays */ ie = INTEGER_POINTER(iedge); je = INTEGER_POINTER(jedge); /* initialise storage (with a guess at max size) */ Ntmax = 3 * Ne; it = (int *) R_alloc(Ntmax, sizeof(int)); jt = (int *) R_alloc(Ntmax, sizeof(int)); kt = (int *) R_alloc(Ntmax, sizeof(int)); Nt = 0; /* initialise scratch storage */ jj = (int *) R_alloc(Ne, sizeof(int)); XOUTERCHUNKLOOP(i, 1, Nv, maxchunk, 8196) { R_CheckUserInterrupt(); XINNERCHUNKLOOP(i, 1, Nv, maxchunk, 8196) { #ifdef DEBUGTRI Rprintf("i=%d ---------- \n", i); #endif /* Find triangles involving vertex 'i' in which 'i' is the lowest-numbered vertex */ /* First, find vertices j > i connected to i */ Nj = 0; for(m = 0; m < Ne; m++) { if(ie[m] == i) { j = je[m]; if(j > i) { jj[Nj] = j; Nj++; } } else if(je[m] == i) { j = ie[m]; if(j > i) { jj[Nj] = j; Nj++; } } } /* Determine which pairs of vertices j, k are joined by an edge; save triangles (i,j,k) */ #ifdef DEBUGTRI Rprintf("Nj = %d\n", Nj); #endif if(Nj > 1) { #ifdef DEBUGTRI Rprintf("i=%d\njj=\n", i); for(mj = 0; mj < Nj; mj++) Rprintf("%d ", jj[mj]); Rprintf("\n\n"); #endif /* Sort jj in ascending order */ for(mj = 0; mj < Nj-1; mj++) { j = jj[mj]; for(mk = mj+1; mk < Nj; mk++) { k = jj[mk]; if(k < j) { /* swap */ jj[mk] = j; jj[mj] = k; j = k; } } } #ifdef DEBUGTRI Rprintf("sorted=\n", i); for(mj = 0; mj < Nj; mj++) Rprintf("%d ", jj[mj]); Rprintf("\n\n"); #endif for(mj = 0; mj < Nj-1; mj++) { j = jj[mj]; for(mk = mj+1; mk < Nj; mk++) { k = jj[mk]; if(j != k) { /* Run through edges to determine whether j, k are neighbours */ for(m = 0; m < Ne; m++) { if((ie[m] == j && je[m] == k) || (ie[m] == k && je[m] == j)) { /* add (i, j, k) to list of triangles */ if(Nt >= Ntmax) { /* overflow - allocate more space */ Nmore = 2 * Ntmax; #ifdef DEBUGTRI Rprintf("Doubling space from %d to %d\n", Ntmax, Nmore); #endif it = (int *) S_realloc((char *) it, Nmore, Ntmax, sizeof(int)); jt = (int *) S_realloc((char *) jt, Nmore, Ntmax, sizeof(int)); kt = (int *) S_realloc((char *) kt, Nmore, Ntmax, sizeof(int)); Ntmax = Nmore; } /* output indices in R convention */ it[Nt] = i; jt[Nt] = j; kt[Nt] = k; Nt++; } } } } } } } } /* allocate space for output */ PROTECT(iTout = NEW_INTEGER(Nt)); PROTECT(jTout = NEW_INTEGER(Nt)); PROTECT(kTout = NEW_INTEGER(Nt)); PROTECT(out = NEW_LIST(3)); /* that's 3+4=7 protected objects */ ito = INTEGER_POINTER(iTout); jto = INTEGER_POINTER(jTout); kto = INTEGER_POINTER(kTout); /* copy triangle indices to output vectors */ for(m = 0; m < Nt; m++) { ito[m] = it[m]; jto[m] = jt[m]; kto[m] = kt[m]; } /* insert output vectors in output list */ SET_VECTOR_ELT(out, 0, iTout); SET_VECTOR_ELT(out, 1, jTout); SET_VECTOR_ELT(out, 2, kTout); UNPROTECT(7); return(out); } /* faster version assuming iedge is in increasing order */ SEXP triograph(SEXP nv, /* number of vertices */ SEXP iedge, /* vectors of indices of ends of each edge */ SEXP jedge) /* all arguments are integer */ { int Nv, Ne; int *ie, *je; /* edges */ int *it, *jt, *kt; /* vectors of indices of vertices of triangles */ int Nt, Ntmax; /* number of triangles */ int Nj; int *jj; /* scratch storage */ int i, j, k, m, mj, mk, maxjk, Nmore, maxchunk; /* output */ SEXP iTout, jTout, kTout, out; int *ito, *jto, *kto; /* =================== Protect R objects from garbage collector ======= */ PROTECT(nv = AS_INTEGER(nv)); PROTECT(iedge = AS_INTEGER(iedge)); PROTECT(jedge = AS_INTEGER(jedge)); /* That's 3 protected objects */ /* numbers of vertices and edges */ Nv = *(INTEGER_POINTER(nv)); Ne = LENGTH(iedge); /* input arrays */ ie = INTEGER_POINTER(iedge); je = INTEGER_POINTER(jedge); /* initialise storage (with a guess at max size) */ Ntmax = 3 * Ne; it = (int *) R_alloc(Ntmax, sizeof(int)); jt = (int *) R_alloc(Ntmax, sizeof(int)); kt = (int *) R_alloc(Ntmax, sizeof(int)); Nt = 0; /* initialise scratch storage */ jj = (int *) R_alloc(Ne, sizeof(int)); XOUTERCHUNKLOOP(i, 1, Nv, maxchunk, 8196) { R_CheckUserInterrupt(); XINNERCHUNKLOOP(i, 1, Nv, maxchunk, 8196) { #ifdef DEBUGTRI Rprintf("i=%d ---------- \n", i); #endif /* Find triangles involving vertex 'i' in which 'i' is the lowest-numbered vertex */ /* First, find vertices j > i connected to i */ Nj = 0; for(m = 0; m < Ne; m++) { if(ie[m] == i) { j = je[m]; if(j > i) { jj[Nj] = j; Nj++; } } else if(je[m] == i) { j = ie[m]; if(j > i) { jj[Nj] = j; Nj++; } } } /* Determine which pairs of vertices j, k are joined by an edge; save triangles (i,j,k) */ #ifdef DEBUGTRI Rprintf("Nj = %d\n", Nj); #endif if(Nj > 1) { #ifdef DEBUGTRI Rprintf("i=%d\njj=\n", i); for(mj = 0; mj < Nj; mj++) Rprintf("%d ", jj[mj]); Rprintf("\n\n"); #endif /* Sort jj in ascending order */ for(mj = 0; mj < Nj-1; mj++) { j = jj[mj]; for(mk = mj+1; mk < Nj; mk++) { k = jj[mk]; if(k < j) { /* swap */ jj[mk] = j; jj[mj] = k; j = k; } } } #ifdef DEBUGTRI Rprintf("sorted=\n", i); for(mj = 0; mj < Nj; mj++) Rprintf("%d ", jj[mj]); Rprintf("\n\n"); #endif for(mj = 0; mj < Nj-1; mj++) { j = jj[mj]; for(mk = mj+1; mk < Nj; mk++) { k = jj[mk]; if(j != k) { /* Run through edges to determine whether j, k are neighbours */ maxjk = (j > k) ? j : k; for(m = 0; m < Ne; m++) { if(ie[m] > maxjk) break; /* since iedge is in increasing order, the test below will always be FALSE when ie[m] > max(j,k) */ if((ie[m] == j && je[m] == k) || (ie[m] == k && je[m] == j)) { /* add (i, j, k) to list of triangles */ if(Nt >= Ntmax) { /* overflow - allocate more space */ Nmore = 2 * Ntmax; #ifdef DEBUGTRI Rprintf("Doubling space from %d to %d\n", Ntmax, Nmore); #endif it = (int *) S_realloc((char *) it, Nmore, Ntmax, sizeof(int)); jt = (int *) S_realloc((char *) jt, Nmore, Ntmax, sizeof(int)); kt = (int *) S_realloc((char *) kt, Nmore, Ntmax, sizeof(int)); Ntmax = Nmore; } it[Nt] = i; jt[Nt] = j; kt[Nt] = k; Nt++; } } } } } } } } /* allocate space for output */ PROTECT(iTout = NEW_INTEGER(Nt)); PROTECT(jTout = NEW_INTEGER(Nt)); PROTECT(kTout = NEW_INTEGER(Nt)); PROTECT(out = NEW_LIST(3)); /* that's 3+4=7 protected objects */ ito = INTEGER_POINTER(iTout); jto = INTEGER_POINTER(jTout); kto = INTEGER_POINTER(kTout); /* copy triangle indices to output vectors */ for(m = 0; m < Nt; m++) { ito[m] = it[m]; jto[m] = jt[m]; kto[m] = kt[m]; } /* insert output vectors in output list */ SET_VECTOR_ELT(out, 0, iTout); SET_VECTOR_ELT(out, 1, jTout); SET_VECTOR_ELT(out, 2, kTout); UNPROTECT(7); return(out); } /* Even faster version using information about dummy vertices. Dummy-to-dummy edges are forbidden. For generic purposes use 'friendly' for 'isdata' Edge between j and k is possible iff friendly[j] || friendly[k]. Edges with friendly = FALSE cannot be connected to one another. */ SEXP trioxgraph(SEXP nv, /* number of vertices */ SEXP iedge, /* vectors of indices of ends of each edge */ SEXP jedge, SEXP friendly) /* indicator vector, length nv */ { /* input */ int Nv, Ne; int *ie, *je; /* edges */ int *friend; /* indicator */ /* output */ int *it, *jt, *kt; /* vectors of indices of vertices of triangles */ int Nt, Ntmax; /* number of triangles */ /* scratch storage */ int Nj; int *jj; int i, j, k, m, mj, mk, maxjk, Nmore, maxchunk; /* output to R */ SEXP iTout, jTout, kTout, out; int *ito, *jto, *kto; /* =================== Protect R objects from garbage collector ======= */ PROTECT(nv = AS_INTEGER(nv)); PROTECT(iedge = AS_INTEGER(iedge)); PROTECT(jedge = AS_INTEGER(jedge)); PROTECT(friendly = AS_INTEGER(friendly)); /* That's 4 protected objects */ /* numbers of vertices and edges */ Nv = *(INTEGER_POINTER(nv)); Ne = LENGTH(iedge); /* input arrays */ ie = INTEGER_POINTER(iedge); je = INTEGER_POINTER(jedge); friend = INTEGER_POINTER(friendly); /* initialise storage (with a guess at max size) */ Ntmax = 3 * Ne; it = (int *) R_alloc(Ntmax, sizeof(int)); jt = (int *) R_alloc(Ntmax, sizeof(int)); kt = (int *) R_alloc(Ntmax, sizeof(int)); Nt = 0; /* initialise scratch storage */ jj = (int *) R_alloc(Ne, sizeof(int)); /* convert to C indexing convention */ for(m = 0; m < Ne; m++) { ie[m] -= 1; je[m] -= 1; } OUTERCHUNKLOOP(i, Nv, maxchunk, 8196) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, Nv, maxchunk, 8196) { #ifdef DEBUGTRI Rprintf("i=%d ---------- \n", i); #endif /* Find triangles involving vertex 'i' in which 'i' is the lowest-numbered vertex */ /* First, find vertices j > i connected to i */ Nj = 0; for(m = 0; m < Ne; m++) { if(ie[m] == i) { j = je[m]; if(j > i) { jj[Nj] = j; Nj++; } } else if(je[m] == i) { j = ie[m]; if(j > i) { jj[Nj] = j; Nj++; } } } /* Determine which pairs of vertices j, k are joined by an edge; save triangles (i,j,k) */ #ifdef DEBUGTRI Rprintf("Nj = %d\n", Nj); #endif if(Nj > 1) { #ifdef DEBUGTRI Rprintf("i=%d\njj=\n", i); for(mj = 0; mj < Nj; mj++) Rprintf("%d ", jj[mj]); Rprintf("\n\n"); #endif /* Sort jj in ascending order */ for(mj = 0; mj < Nj-1; mj++) { j = jj[mj]; for(mk = mj+1; mk < Nj; mk++) { k = jj[mk]; if(k < j) { /* swap */ jj[mk] = j; jj[mj] = k; j = k; } } } #ifdef DEBUGTRI Rprintf("sorted=\n", i); for(mj = 0; mj < Nj; mj++) Rprintf("%d ", jj[mj]); Rprintf("\n\n"); #endif for(mj = 0; mj < Nj-1; mj++) { j = jj[mj]; for(mk = mj+1; mk < Nj; mk++) { k = jj[mk]; if(j != k && (friend[j] || friend[k])) { /* Run through edges to determine whether j, k are neighbours */ maxjk = (j > k) ? j : k; for(m = 0; m < Ne; m++) { if(ie[m] > maxjk) break; /* since iedge is in increasing order, the test below will always be FALSE when ie[m] > max(j,k) */ if((ie[m] == j && je[m] == k) || (ie[m] == k && je[m] == j)) { /* add (i, j, k) to list of triangles */ if(Nt >= Ntmax) { /* overflow - allocate more space */ Nmore = 2 * Ntmax; #ifdef DEBUGTRI Rprintf("Doubling space from %d to %d\n", Ntmax, Nmore); #endif it = (int *) S_realloc((char *) it, Nmore, Ntmax, sizeof(int)); jt = (int *) S_realloc((char *) jt, Nmore, Ntmax, sizeof(int)); kt = (int *) S_realloc((char *) kt, Nmore, Ntmax, sizeof(int)); Ntmax = Nmore; } /* convert back to R indexing */ it[Nt] = i + 1; jt[Nt] = j + 1; kt[Nt] = k + 1; Nt++; } } } } } } } } /* allocate space for output */ PROTECT(iTout = NEW_INTEGER(Nt)); PROTECT(jTout = NEW_INTEGER(Nt)); PROTECT(kTout = NEW_INTEGER(Nt)); PROTECT(out = NEW_LIST(3)); /* that's 4+4=8 protected objects */ ito = INTEGER_POINTER(iTout); jto = INTEGER_POINTER(jTout); kto = INTEGER_POINTER(kTout); /* copy triangle indices to output vectors */ for(m = 0; m < Nt; m++) { ito[m] = it[m]; jto[m] = jt[m]; kto[m] = kt[m]; } /* insert output vectors in output list */ SET_VECTOR_ELT(out, 0, iTout); SET_VECTOR_ELT(out, 1, jTout); SET_VECTOR_ELT(out, 2, kTout); UNPROTECT(8); return(out); } /* also calculates diameter (max edge length) of triangle */ SEXP triDgraph(SEXP nv, /* number of vertices */ SEXP iedge, /* vectors of indices of ends of each edge */ SEXP jedge, SEXP edgelength) /* edge lengths */ { int Nv, Ne; int *ie, *je; /* edges */ double *edgelen; int *it, *jt, *kt; /* vectors of indices of vertices of triangles */ double *dt; /* diameters (max edge lengths) of triangles */ int Nt, Ntmax; /* number of triangles */ /* scratch storage */ int Nj; int *jj; double *dd; int i, j, k, m, mj, mk, Nmore, maxchunk; double dij, dik, djk, diam; /* output */ SEXP iTout, jTout, kTout, dTout, out; int *ito, *jto, *kto; double *dto; /* =================== Protect R objects from garbage collector ======= */ PROTECT(nv = AS_INTEGER(nv)); PROTECT(iedge = AS_INTEGER(iedge)); PROTECT(jedge = AS_INTEGER(jedge)); PROTECT(edgelength = AS_NUMERIC(edgelength)); /* That's 4 protected objects */ /* numbers of vertices and edges */ Nv = *(INTEGER_POINTER(nv)); Ne = LENGTH(iedge); /* input arrays */ ie = INTEGER_POINTER(iedge); je = INTEGER_POINTER(jedge); edgelen = NUMERIC_POINTER(edgelength); /* initialise storage (with a guess at max size) */ Ntmax = 3 * Ne; it = (int *) R_alloc(Ntmax, sizeof(int)); jt = (int *) R_alloc(Ntmax, sizeof(int)); kt = (int *) R_alloc(Ntmax, sizeof(int)); dt = (double *) R_alloc(Ntmax, sizeof(double)); Nt = 0; /* initialise scratch storage */ jj = (int *) R_alloc(Ne, sizeof(int)); dd = (double *) R_alloc(Ne, sizeof(double)); XOUTERCHUNKLOOP(i, 1, Nv, maxchunk, 8196) { R_CheckUserInterrupt(); XINNERCHUNKLOOP(i, 1, Nv, maxchunk, 8196) { #ifdef DEBUGTRI Rprintf("i=%d ---------- \n", i); #endif /* Find triangles involving vertex 'i' in which 'i' is the lowest-numbered vertex */ /* First, find vertices j > i connected to i */ Nj = 0; for(m = 0; m < Ne; m++) { if(ie[m] == i) { j = je[m]; if(j > i) { jj[Nj] = j; dd[Nj] = edgelen[m]; Nj++; } } else if(je[m] == i) { j = ie[m]; if(j > i) { jj[Nj] = j; dd[Nj] = edgelen[m]; Nj++; } } } /* Determine which pairs of vertices j, k are joined by an edge; save triangles (i,j,k) */ #ifdef DEBUGTRI Rprintf("Nj = %d\n", Nj); #endif if(Nj > 1) { #ifdef DEBUGTRI Rprintf("i=%d\njj=\n", i); for(mj = 0; mj < Nj; mj++) Rprintf("%d ", jj[mj]); Rprintf("\n\n"); #endif /* Sort jj in ascending order */ for(mj = 0; mj < Nj-1; mj++) { j = jj[mj]; for(mk = mj+1; mk < Nj; mk++) { k = jj[mk]; if(k < j) { /* swap */ jj[mk] = j; jj[mj] = k; dik = dd[mj]; dd[mj] = dd[mk]; dd[mk] = dik; j = k; } } } #ifdef DEBUGTRI Rprintf("sorted=\n", i); for(mj = 0; mj < Nj; mj++) Rprintf("%d ", jj[mj]); Rprintf("\n\n"); #endif for(mj = 0; mj < Nj-1; mj++) { j = jj[mj]; dij = dd[mj]; for(mk = mj+1; mk < Nj; mk++) { k = jj[mk]; dik = dd[mk]; if(j != k) { /* Run through edges to determine whether j, k are neighbours */ for(m = 0; m < Ne; m++) { if((ie[m] == j && je[m] == k) || (ie[m] == k && je[m] == j)) { /* triangle (i, j, k) */ /* determine triangle diameter */ diam = (dij > dik) ? dij : dik; djk = edgelen[m]; if(djk > diam) diam = djk; /* add (i, j, k) to list of triangles */ if(Nt >= Ntmax) { /* overflow - allocate more space */ Nmore = 2 * Ntmax; #ifdef DEBUGTRI Rprintf("Doubling space from %d to %d\n", Ntmax, Nmore); #endif it = (int *) S_realloc((char *) it, Nmore, Ntmax, sizeof(int)); jt = (int *) S_realloc((char *) jt, Nmore, Ntmax, sizeof(int)); kt = (int *) S_realloc((char *) kt, Nmore, Ntmax, sizeof(int)); dt = (double *) S_realloc((char *) dt, Nmore, Ntmax, sizeof(double)); Ntmax = Nmore; } it[Nt] = i; jt[Nt] = j; kt[Nt] = k; dt[Nt] = diam; Nt++; } } } } } } } } /* allocate space for output */ PROTECT(iTout = NEW_INTEGER(Nt)); PROTECT(jTout = NEW_INTEGER(Nt)); PROTECT(kTout = NEW_INTEGER(Nt)); PROTECT(dTout = NEW_NUMERIC(Nt)); PROTECT(out = NEW_LIST(4)); /* that's 4+5=9 protected objects */ ito = INTEGER_POINTER(iTout); jto = INTEGER_POINTER(jTout); kto = INTEGER_POINTER(kTout); dto = NUMERIC_POINTER(dTout); /* copy triangle indices to output vectors */ for(m = 0; m < Nt; m++) { ito[m] = it[m]; jto[m] = jt[m]; kto[m] = kt[m]; dto[m] = dt[m]; } /* insert output vectors in output list */ SET_VECTOR_ELT(out, 0, iTout); SET_VECTOR_ELT(out, 1, jTout); SET_VECTOR_ELT(out, 2, kTout); SET_VECTOR_ELT(out, 3, dTout); UNPROTECT(9); return(out); } /* same as triDgraph but returns only triangles with diameter <= dmax */ SEXP triDRgraph(SEXP nv, /* number of vertices */ SEXP iedge, /* vectors of indices of ends of each edge */ SEXP jedge, SEXP edgelength, /* edge lengths */ SEXP dmax) { int Nv, Ne; int *ie, *je; /* edges */ double *edgelen; int *it, *jt, *kt; /* vectors of indices of vertices of triangles */ double *dt; /* diameters (max edge lengths) of triangles */ int Nt, Ntmax; /* number of triangles */ /* scratch storage */ int Nj; int *jj; double *dd; int i, j, k, m, mj, mk, Nmore, maxchunk; double dij, dik, djk, diam, Dmax; /* output */ SEXP iTout, jTout, kTout, dTout, out; int *ito, *jto, *kto; double *dto; /* =================== Protect R objects from garbage collector ======= */ PROTECT(nv = AS_INTEGER(nv)); PROTECT(iedge = AS_INTEGER(iedge)); PROTECT(jedge = AS_INTEGER(jedge)); PROTECT(edgelength = AS_NUMERIC(edgelength)); PROTECT(dmax = AS_NUMERIC(dmax)); /* That's 5 protected objects */ /* numbers of vertices and edges */ Nv = *(INTEGER_POINTER(nv)); Ne = LENGTH(iedge); /* input arrays */ ie = INTEGER_POINTER(iedge); je = INTEGER_POINTER(jedge); edgelen = NUMERIC_POINTER(edgelength); /* maximum diameter */ Dmax = *(NUMERIC_POINTER(dmax)); /* initialise storage (with a guess at max size) */ Ntmax = 3 * Ne; it = (int *) R_alloc(Ntmax, sizeof(int)); jt = (int *) R_alloc(Ntmax, sizeof(int)); kt = (int *) R_alloc(Ntmax, sizeof(int)); dt = (double *) R_alloc(Ntmax, sizeof(double)); Nt = 0; /* initialise scratch storage */ jj = (int *) R_alloc(Ne, sizeof(int)); dd = (double *) R_alloc(Ne, sizeof(double)); XOUTERCHUNKLOOP(i, 1, Nv, maxchunk, 8196) { R_CheckUserInterrupt(); XINNERCHUNKLOOP(i, 1, Nv, maxchunk, 8196) { #ifdef DEBUGTRI Rprintf("i=%d ---------- \n", i); #endif /* Find triangles involving vertex 'i' in which 'i' is the lowest-numbered vertex */ /* First, find vertices j > i connected to i */ Nj = 0; for(m = 0; m < Ne; m++) { if(ie[m] == i) { j = je[m]; if(j > i) { jj[Nj] = j; dd[Nj] = edgelen[m]; Nj++; } } else if(je[m] == i) { j = ie[m]; if(j > i) { jj[Nj] = j; dd[Nj] = edgelen[m]; Nj++; } } } /* Determine which pairs of vertices j, k are joined by an edge; save triangles (i,j,k) */ #ifdef DEBUGTRI Rprintf("Nj = %d\n", Nj); #endif if(Nj > 1) { #ifdef DEBUGTRI Rprintf("i=%d\njj=\n", i); for(mj = 0; mj < Nj; mj++) Rprintf("%d ", jj[mj]); Rprintf("\n\n"); #endif /* Sort jj in ascending order */ for(mj = 0; mj < Nj-1; mj++) { j = jj[mj]; for(mk = mj+1; mk < Nj; mk++) { k = jj[mk]; if(k < j) { /* swap */ jj[mk] = j; jj[mj] = k; dik = dd[mj]; dd[mj] = dd[mk]; dd[mk] = dik; j = k; } } } #ifdef DEBUGTRI Rprintf("sorted=\n", i); for(mj = 0; mj < Nj; mj++) Rprintf("%d ", jj[mj]); Rprintf("\n\n"); #endif for(mj = 0; mj < Nj-1; mj++) { j = jj[mj]; dij = dd[mj]; for(mk = mj+1; mk < Nj; mk++) { k = jj[mk]; dik = dd[mk]; if(j != k) { /* Run through edges to determine whether j, k are neighbours */ for(m = 0; m < Ne; m++) { if((ie[m] == j && je[m] == k) || (ie[m] == k && je[m] == j)) { /* triangle (i, j, k) */ /* determine triangle diameter */ diam = (dij > dik) ? dij : dik; djk = edgelen[m]; if(djk > diam) diam = djk; if(diam <= Dmax) { /* add (i, j, k) to list of triangles */ if(Nt >= Ntmax) { /* overflow - allocate more space */ Nmore = 2 * Ntmax; #ifdef DEBUGTRI Rprintf("Doubling space from %d to %d\n", Ntmax, Nmore); #endif it = (int *) S_realloc((char *) it, Nmore, Ntmax, sizeof(int)); jt = (int *) S_realloc((char *) jt, Nmore, Ntmax, sizeof(int)); kt = (int *) S_realloc((char *) kt, Nmore, Ntmax, sizeof(int)); dt = (double *) S_realloc((char *) dt, Nmore, Ntmax, sizeof(double)); Ntmax = Nmore; } it[Nt] = i; jt[Nt] = j; kt[Nt] = k; dt[Nt] = diam; Nt++; } } } } } } } } } /* allocate space for output */ PROTECT(iTout = NEW_INTEGER(Nt)); PROTECT(jTout = NEW_INTEGER(Nt)); PROTECT(kTout = NEW_INTEGER(Nt)); PROTECT(dTout = NEW_NUMERIC(Nt)); PROTECT(out = NEW_LIST(4)); /* that's 5+5=10 protected objects */ ito = INTEGER_POINTER(iTout); jto = INTEGER_POINTER(jTout); kto = INTEGER_POINTER(kTout); dto = NUMERIC_POINTER(dTout); /* copy triangle indices to output vectors */ for(m = 0; m < Nt; m++) { ito[m] = it[m]; jto[m] = jt[m]; kto[m] = kt[m]; dto[m] = dt[m]; } /* insert output vectors in output list */ SET_VECTOR_ELT(out, 0, iTout); SET_VECTOR_ELT(out, 1, jTout); SET_VECTOR_ELT(out, 2, kTout); SET_VECTOR_ELT(out, 3, dTout); UNPROTECT(10); return(out); } spatstat/src/poly2im.c0000755000176200001440000002017013166361223014451 0ustar liggesusers/* poly2im.c Conversion from (x,y) polygon to pixel image poly2imI pixel value = 1{pixel centre is inside polygon} poly2imA pixel value = area of intersection between pixel and polygon $Revision: 1.9 $ $Date: 2014/06/27 06:14:49 $ */ #undef DEBUG #include #include #include #include "chunkloop.h" void poly2imI(xp, yp, np, nx, ny, out) double *xp, *yp; /* polygon vertices, anticlockwise, CLOSED */ int *np; int *nx, *ny; /* INTEGER raster points from (0,0) to (nx-1, ny-1) */ int *out; /* output matrix [ny, nx], byrow=FALSE, initialised to 0 */ { int Np, Nx, Ny, Np1, maxchunk, mstart, mend; int j, k, m; double x0, y0, x1, y1, xleft, xright, yleft, yright; double dx, dy, y, slope, intercept; int jleft, jright, imax; int sign; Np = *np; Nx = *nx; Ny = *ny; /* Nxy = Nx * Ny; */ Np1 = Np - 1; /* run through polygon edges */ OUTERCHUNKLOOP(k, Np1, maxchunk, 8196) { R_CheckUserInterrupt(); INNERCHUNKLOOP(k, Np1, maxchunk, 8196) { x0 = xp[k]; y0 = yp[k]; x1 = xp[k+1]; y1 = yp[k+1]; if(x0 < x1) { xleft = x0; xright = x1; yleft = y0; yright = y1; sign = -1; } else { xleft = x1; xright = x0; yleft = y1; yright = y0; sign = +1; } /* determine relevant columns of pixels */ jleft = (int) ceil(xleft); jright = (int) floor(xright); if(jleft < Nx && jright >= 0 && jleft <= jright) { if(jleft < 0) { jleft = 0; } if(jright >= Nx) {jright = Nx - 1; } /* equation of edge */ dx = xright - xleft; dy = yright - yleft; slope = dy/dx; intercept = yleft - slope * xleft; /* visit relevant columns */ for(j = jleft; j <= jright; j++) { y = slope * ((double) j) + intercept; imax = (int) floor(y); if(imax >= Ny) imax = Ny-1; if(imax >= 0) { /* increment entries below edge in this column: out[i + j * Ny] += sign for 0 <= i <= imax */ mstart = j * Ny; mend = mstart + imax; for(m = mstart; m <= mend; m++) { out[m] += sign; } } } } } } } #define BELOW -1 #define INSIDE 0 #define ABOVE 1 void poly2imA(ncol, nrow, xpoly, ypoly, npoly, out, status) int *ncol, *nrow; /* pixels are unit squares from (0,0) to (ncol,nrow) */ double *xpoly, *ypoly; /* vectors of coordinates of polygon vertices */ int *npoly; double *out; /* double array [nrow, ncol] of pixel areas, byrow=TRUE, initialised to 0 */ int *status; { double *xp, *yp; int nx, ny, nxy, np, np1, maxchunk; int i, j, k; double xcur, ycur, xnext, ynext, xleft, yleft, xright, yright; int sgn, jmin, jmax, imin, imax; double x0, y0, x1, y1, slope, yhi, ylo, area, xcut, xcutA, xcutB; int klo, khi; nx = *ncol; ny = *nrow; xp = xpoly; yp = ypoly; np = *npoly; *status = 0; /* initialise output array */ nxy = nx * ny; for(k = 0; k < nxy; k++) out[k] = 0; /* ............ loop over polygon edges ...................*/ np1 = np - 1; OUTERCHUNKLOOP(k, np1, maxchunk, 8196) { R_CheckUserInterrupt(); INNERCHUNKLOOP(k, np1, maxchunk, 8196) { xcur = xp[k]; ycur = yp[k]; xnext = xp[k+1]; ynext = yp[k+1]; #ifdef DEBUG Rprintf("\nEdge %d from (%lf, %lf) to (%lf, %lf) .........\n", k, xcur, ycur, xnext, ynext); #endif if(xcur != xnext) { /* vertical edges are ignored */ if(xcur < xnext) { #ifdef DEBUG Rprintf("negative sign\n"); #endif sgn = -1; xleft = xcur; yleft = ycur; xright = xnext; yright = ynext; } else { #ifdef DEBUG Rprintf("positive sign\n"); #endif sgn = 1; xleft = xnext; yleft = ynext; xright = xcur; yright = ycur; } /* we have now ensured xleft < xright */ slope = (yright - yleft)/(xright - xleft); /* Find relevant columns of pixels */ jmin = floor(xleft); jmin = (jmin < 0) ? 0 : jmin; jmax = ceil(xright); jmax = (jmax > nx - 1) ? nx - 1 : jmax; /* Find relevant rows of pixels */ imin = floor((yleft < yright) ? yleft : yright); imin = (imin < 0) ? 0 : imin; imax = ceil((yleft < yright) ? yright : yleft); imax = (imax > ny - 1) ? ny - 1 : imax; #ifdef DEBUG Rprintf( "imin=%d, imax=%d, jmin=%d, jmax=%d\n", imin, imax, jmin, jmax); #endif /* ........... loop over columns of pixels ..............*/ for(j = jmin; j <= jmax; j++) { #ifdef DEBUG Rprintf( "\t j=%d:\n", j); #endif /* Intersect trapezium with column of pixels */ if(xleft <= j+1 && xright >= j) { if(xleft >= j) { /* retain left corner */ #ifdef DEBUG Rprintf( "\tretain left corner\n"); #endif x0 = xleft; y0 = yleft; } else { /* trim left corner */ #ifdef DEBUG Rprintf( "\ttrim left corner\n"); #endif x0 = (double) j; y0 = yleft + slope * (x0 - xleft); } if(xright <= j+1) { /* retain right corner */ #ifdef DEBUG Rprintf( "\tretain right corner\n"); #endif x1 = xright; y1 = yright; } else { /* trim right corner */ #ifdef DEBUG Rprintf( "\ttrim right corner\n"); #endif x1 = (double) (j+1); y1 = yright + slope * (x1 - xright); } /* save min and max y */ if(y0 < y1) { #ifdef DEBUG Rprintf( "slope %lf > 0\n", slope); #endif ylo = y0; yhi = y1; } else { #ifdef DEBUG Rprintf( "slope %lf <= 0\n", slope); #endif ylo = y1; yhi = y0; } /* ............ loop over pixels within column ......... */ /* first part */ if(imin > 0) { for(i = 0; i < imin; i++) { #ifdef DEBUG Rprintf( "\ti=%d:\n", i); #endif /* The trimmed pixel [x0, x1] * [i, i+1] lies below the polygon edge. */ area = (x1 - x0); #ifdef DEBUG Rprintf( "\tIncrementing area by %lf\n", sgn * area); #endif out[i + ny * j] += sgn * area; } } /* second part */ for(i = imin; i <= imax; i++) { #ifdef DEBUG Rprintf( "\ti=%d:\n", i); #endif /* Compute area of intersection between trapezium and trimmed pixel [x0, x1] x [i, i+1] */ klo = (ylo <= i) ? BELOW : (ylo >= (i+1))? ABOVE: INSIDE; khi = (yhi <= i) ? BELOW : (yhi >= (i+1))? ABOVE: INSIDE; if(klo == ABOVE) { /* trapezium covers pixel */ #ifdef DEBUG Rprintf( "\t\ttrapezium covers pixel\n"); #endif area = (x1-x0); } else if(khi == BELOW) { #ifdef DEBUG Rprintf( "\t\tpixel avoids trapezium\n"); #endif /* pixel avoids trapezium */ area = 0.0; } else if(klo == INSIDE && khi == INSIDE) { /* polygon edge is inside pixel */ #ifdef DEBUG Rprintf( "\t\t polygon edge is inside pixel\n"); #endif area = (x1-x0) * ((ylo + yhi)/2.0 - i); } else if(klo == INSIDE && khi == ABOVE) { /* polygon edge crosses upper edge of pixel */ #ifdef DEBUG Rprintf( "\t\t polygon edge crosses upper edge of pixel\n"); #endif xcut = x0 + ((i+1) - y0)/slope; if(slope > 0) area = (xcut - x0) * ((y0 + (i+1))/2 - i) + (x1 - xcut); else area = (x1 - xcut) * ((y1 + (i+1))/2 - i) + (xcut - x0); } else if(klo == BELOW && khi == INSIDE) { /* polygon edge crosses lower edge of pixel */ #ifdef DEBUG Rprintf( "\t\t polygon edge crosses lower edge of pixel\n"); #endif xcut = x0 + (i - y0)/slope; if(slope > 0) area = (x1 - xcut) * ((y1 + i)/2 - i); else area = (xcut - x0) * ((y0 + i)/2 - i); } else if(klo == BELOW && khi == ABOVE) { /* polygon edge crosses upper and lower edges of pixel */ #ifdef DEBUG Rprintf( "\t\t polygon edge crosses upper and lower edges of pixel\n"); #endif xcutA = x0 + (i - y0)/slope; xcutB = x0 + ((i+1) - y0)/slope; if(slope > 0) area = (xcutB - xcutA)/2 + (x1 - xcutB); else area = (xcutB - x0) + (xcutA - xcutB)/2; } else { /* control should not pass to here */ *status = 1; return; } /* add contribution to area of pixel */ #ifdef DEBUG Rprintf( "\tIncrementing area by %lf\n", sgn * area); #endif out[i + ny * j] += sgn * area; } /* ............ end of loop over pixels within column ......... */ } } /* ........ end of loop over columns of pixels ...............*/ } } } /* ......... end of loop over polygon edges ...................*/ } spatstat/src/init.c0000644000176200001440000002653313166361223014027 0ustar liggesusers /* Native symbol registration table for spatstat package Automatically generated - do not edit this file! */ #include "proto.h" #include #include #include // for NULL #include /* See proto.h for declarations for the native routines registered below. */ static const R_CMethodDef CEntries[] = { {"acrdenspt", (DL_FUNC) &acrdenspt, 10}, {"acrsmoopt", (DL_FUNC) &acrsmoopt, 10}, {"adenspt", (DL_FUNC) &adenspt, 7}, {"areaBdif", (DL_FUNC) &areaBdif, 11}, {"areadifs", (DL_FUNC) &areadifs, 7}, {"asmoopt", (DL_FUNC) &asmoopt, 8}, {"auctionbf", (DL_FUNC) &auctionbf, 7}, {"awtcrdenspt", (DL_FUNC) &awtcrdenspt, 11}, {"awtcrsmoopt", (DL_FUNC) &awtcrsmoopt, 11}, {"awtdenspt", (DL_FUNC) &awtdenspt, 8}, {"awtsmoopt", (DL_FUNC) &awtsmoopt, 9}, {"bdrymask", (DL_FUNC) &bdrymask, 4}, {"Cbiform", (DL_FUNC) &Cbiform, 6}, {"Cclosepaircounts", (DL_FUNC) &Cclosepaircounts, 5}, {"Ccountends", (DL_FUNC) &Ccountends, 14}, {"Ccrossdist", (DL_FUNC) &Ccrossdist, 8}, {"Ccrosspaircounts", (DL_FUNC) &Ccrosspaircounts, 8}, {"CcrossPdist", (DL_FUNC) &CcrossPdist, 10}, {"Cidw", (DL_FUNC) &Cidw, 14}, {"ClineMquad", (DL_FUNC) &ClineMquad, 23}, {"Clinequad", (DL_FUNC) &Clinequad, 18}, {"ClineRMquad", (DL_FUNC) &ClineRMquad, 23}, {"ClineRquad", (DL_FUNC) &ClineRquad, 18}, {"Clinvwhichdist", (DL_FUNC) &Clinvwhichdist, 12}, {"Clixellate", (DL_FUNC) &Clixellate, 16}, {"cocoGraph", (DL_FUNC) &cocoGraph, 6}, {"cocoImage", (DL_FUNC) &cocoImage, 3}, {"Corput", (DL_FUNC) &Corput, 3}, {"Cpairdist", (DL_FUNC) &Cpairdist, 5}, {"CpairPdist", (DL_FUNC) &CpairPdist, 7}, {"Cquadform", (DL_FUNC) &Cquadform, 5}, {"crdenspt", (DL_FUNC) &crdenspt, 9}, {"crosscount", (DL_FUNC) &crosscount, 8}, {"crsmoopt", (DL_FUNC) &crsmoopt, 10}, {"CspaSumSymOut", (DL_FUNC) &CspaSumSymOut, 9}, {"CspaWtSumSymOut", (DL_FUNC) &CspaWtSumSymOut, 13}, {"Csum2outer", (DL_FUNC) &Csum2outer, 6}, {"Csumouter", (DL_FUNC) &Csumouter, 4}, {"Csumsymouter", (DL_FUNC) &Csumsymouter, 4}, {"Cwsum2outer", (DL_FUNC) &Cwsum2outer, 7}, {"Cwsumouter", (DL_FUNC) &Cwsumouter, 5}, {"Cwsumsymouter", (DL_FUNC) &Cwsumsymouter, 5}, {"Cxypolyselfint", (DL_FUNC) &Cxypolyselfint, 11}, {"D3crossdist", (DL_FUNC) &D3crossdist, 10}, {"D3crossPdist", (DL_FUNC) &D3crossPdist, 13}, {"D3pairdist", (DL_FUNC) &D3pairdist, 6}, {"D3pairPdist", (DL_FUNC) &D3pairPdist, 9}, {"Ddist2dpath", (DL_FUNC) &Ddist2dpath, 7}, {"delta2area", (DL_FUNC) &delta2area, 10}, {"denspt", (DL_FUNC) &denspt, 6}, {"digberJ", (DL_FUNC) &digberJ, 6}, {"dinfty_R", (DL_FUNC) &dinfty_R, 3}, {"discareapoly", (DL_FUNC) &discareapoly, 12}, {"discs2grid", (DL_FUNC) &discs2grid, 11}, {"distmapbin", (DL_FUNC) &distmapbin, 9}, {"dwpure", (DL_FUNC) &dwpure, 6}, {"Ediggatsti", (DL_FUNC) &Ediggatsti, 10}, {"Ediggra", (DL_FUNC) &Ediggra, 11}, {"Efiksel", (DL_FUNC) &Efiksel, 9}, {"Egeyer", (DL_FUNC) &Egeyer, 11}, {"exact_dt_R", (DL_FUNC) &exact_dt_R, 14}, {"fardist2grid", (DL_FUNC) &fardist2grid, 10}, {"fardistgrid", (DL_FUNC) &fardistgrid, 10}, {"Fclosepairs", (DL_FUNC) &Fclosepairs, 16}, {"Fcrosspairs", (DL_FUNC) &Fcrosspairs, 19}, {"Gdenspt", (DL_FUNC) &Gdenspt, 5}, {"Gsmoopt", (DL_FUNC) &Gsmoopt, 7}, {"Gwtdenspt", (DL_FUNC) &Gwtdenspt, 6}, {"Gwtsmoopt", (DL_FUNC) &Gwtsmoopt, 8}, {"hasX3close", (DL_FUNC) &hasX3close, 6}, {"hasX3pclose", (DL_FUNC) &hasX3pclose, 7}, {"hasXclose", (DL_FUNC) &hasXclose, 5}, {"hasXpclose", (DL_FUNC) &hasXpclose, 6}, {"hasXY3close", (DL_FUNC) &hasXY3close, 10}, {"hasXY3pclose", (DL_FUNC) &hasXY3pclose, 11}, {"hasXYclose", (DL_FUNC) &hasXYclose, 8}, {"hasXYpclose", (DL_FUNC) &hasXYpclose, 9}, {"Idist2dpath", (DL_FUNC) &Idist2dpath, 7}, {"idwloo", (DL_FUNC) &idwloo, 8}, {"KborderD", (DL_FUNC) &KborderD, 8}, {"KborderI", (DL_FUNC) &KborderI, 8}, {"knnd3D", (DL_FUNC) &knnd3D, 8}, {"knndMD", (DL_FUNC) &knndMD, 6}, {"knndsort", (DL_FUNC) &knndsort, 6}, {"knnGinterface", (DL_FUNC) &knnGinterface, 15}, {"knnsort", (DL_FUNC) &knnsort, 7}, {"knnw3D", (DL_FUNC) &knnw3D, 8}, {"knnwMD", (DL_FUNC) &knnwMD, 7}, {"knnX3Dinterface", (DL_FUNC) &knnX3Dinterface, 17}, {"knnXinterface", (DL_FUNC) &knnXinterface, 15}, {"KnoneD", (DL_FUNC) &KnoneD, 6}, {"KnoneI", (DL_FUNC) &KnoneI, 6}, {"knownCif", (DL_FUNC) &knownCif, 2}, {"KrectDbl", (DL_FUNC) &KrectDbl, 17}, {"KrectInt", (DL_FUNC) &KrectInt, 17}, {"KrectWtd", (DL_FUNC) &KrectWtd, 18}, {"Kwborder", (DL_FUNC) &Kwborder, 9}, {"Kwnone", (DL_FUNC) &Kwnone, 7}, {"lincrossdist", (DL_FUNC) &lincrossdist, 16}, {"linearradius", (DL_FUNC) &linearradius, 8}, {"linknncross", (DL_FUNC) &linknncross, 16}, {"linknnd", (DL_FUNC) &linknnd, 13}, {"linndcross", (DL_FUNC) &linndcross, 18}, {"linndxcross", (DL_FUNC) &linndxcross, 20}, {"linnndist", (DL_FUNC) &linnndist, 13}, {"linnnwhich", (DL_FUNC) &linnnwhich, 14}, {"linpairdist", (DL_FUNC) &linpairdist, 12}, {"linSnndwhich", (DL_FUNC) &linSnndwhich, 15}, {"linvknndist", (DL_FUNC) &linvknndist, 13}, {"locpcfx", (DL_FUNC) &locpcfx, 12}, {"locprod", (DL_FUNC) &locprod, 7}, {"locWpcfx", (DL_FUNC) &locWpcfx, 13}, {"locxprod", (DL_FUNC) &locxprod, 10}, {"maxnnd2", (DL_FUNC) &maxnnd2, 5}, {"maxPnnd2", (DL_FUNC) &maxPnnd2, 5}, {"minnnd2", (DL_FUNC) &minnnd2, 5}, {"minPnnd2", (DL_FUNC) &minPnnd2, 5}, {"nnd3D", (DL_FUNC) &nnd3D, 7}, {"nndistsort", (DL_FUNC) &nndistsort, 5}, {"nndMD", (DL_FUNC) &nndMD, 5}, {"nnGinterface", (DL_FUNC) &nnGinterface, 14}, {"nnw3D", (DL_FUNC) &nnw3D, 7}, {"nnwhichsort", (DL_FUNC) &nnwhichsort, 5}, {"nnwMD", (DL_FUNC) &nnwMD, 6}, {"nnX3Dinterface", (DL_FUNC) &nnX3Dinterface, 16}, {"nnXinterface", (DL_FUNC) &nnXinterface, 14}, {"paircount", (DL_FUNC) &paircount, 5}, {"poly2imA", (DL_FUNC) &poly2imA, 7}, {"poly2imI", (DL_FUNC) &poly2imI, 6}, {"ps_exact_dt_R", (DL_FUNC) &ps_exact_dt_R, 13}, {"RcallF3", (DL_FUNC) &RcallF3, 17}, {"RcallF3cen", (DL_FUNC) &RcallF3cen, 20}, {"RcallG3", (DL_FUNC) &RcallG3, 17}, {"RcallG3cen", (DL_FUNC) &RcallG3cen, 19}, {"RcallK3", (DL_FUNC) &RcallK3, 17}, {"Rcallpcf3", (DL_FUNC) &Rcallpcf3, 18}, {"ripleybox", (DL_FUNC) &ripleybox, 11}, {"ripleypoly", (DL_FUNC) &ripleypoly, 11}, {"scantrans", (DL_FUNC) &scantrans, 11}, {"seg2pixI", (DL_FUNC) &seg2pixI, 8}, {"seg2pixL", (DL_FUNC) &seg2pixL, 11}, {"seg2pixN", (DL_FUNC) &seg2pixN, 9}, {"segdens", (DL_FUNC) &segdens, 10}, {"smoopt", (DL_FUNC) &smoopt, 8}, {"trigraf", (DL_FUNC) &trigraf, 10}, {"trigrafS", (DL_FUNC) &trigrafS, 10}, {"wtcrdenspt", (DL_FUNC) &wtcrdenspt, 10}, {"wtcrsmoopt", (DL_FUNC) &wtcrsmoopt, 11}, {"wtdenspt", (DL_FUNC) &wtdenspt, 7}, {"wtsmoopt", (DL_FUNC) &wtsmoopt, 9}, {"xypsi", (DL_FUNC) &xypsi, 10}, {"xysegint", (DL_FUNC) &xysegint, 16}, {"xysegXint", (DL_FUNC) &xysegXint, 11}, {"xysi", (DL_FUNC) &xysi, 12}, {"xysiANY", (DL_FUNC) &xysiANY, 12}, {"xysxi", (DL_FUNC) &xysxi, 7}, {NULL, NULL, 0} }; static const R_CallMethodDef CallEntries[] = { {"close3IJpairs", (DL_FUNC) &close3IJpairs, 5}, {"close3pairs", (DL_FUNC) &close3pairs, 5}, {"cross3IJpairs", (DL_FUNC) &cross3IJpairs, 8}, {"cross3pairs", (DL_FUNC) &cross3pairs, 8}, {"Cwhist", (DL_FUNC) &Cwhist, 3}, {"Cxysegint", (DL_FUNC) &Cxysegint, 9}, {"CxysegXint", (DL_FUNC) &CxysegXint, 5}, {"graphVees", (DL_FUNC) &graphVees, 3}, {"PerfectDGS", (DL_FUNC) &PerfectDGS, 4}, {"PerfectDiggleGratton", (DL_FUNC) &PerfectDiggleGratton, 6}, {"PerfectHardcore", (DL_FUNC) &PerfectHardcore, 4}, {"PerfectPenttinen", (DL_FUNC) &PerfectPenttinen, 5}, {"PerfectStrauss", (DL_FUNC) &PerfectStrauss, 5}, {"PerfectStraussHard", (DL_FUNC) &PerfectStraussHard, 6}, {"thinjumpequal", (DL_FUNC) &thinjumpequal, 3}, {"triDgraph", (DL_FUNC) &triDgraph, 4}, {"triDRgraph", (DL_FUNC) &triDRgraph, 5}, {"triograph", (DL_FUNC) &triograph, 3}, {"trioxgraph", (DL_FUNC) &trioxgraph, 4}, {"VcloseIJDpairs", (DL_FUNC) &VcloseIJDpairs, 4}, {"VcloseIJpairs", (DL_FUNC) &VcloseIJpairs, 4}, {"Vclosepairs", (DL_FUNC) &Vclosepairs, 4}, {"Vclosethresh", (DL_FUNC) &Vclosethresh, 5}, {"VcrossIJDpairs", (DL_FUNC) &VcrossIJDpairs, 6}, {"VcrossIJpairs", (DL_FUNC) &VcrossIJpairs, 6}, {"Vcrosspairs", (DL_FUNC) &Vcrosspairs, 6}, {"xmethas", (DL_FUNC) &xmethas, 25}, {NULL, NULL, 0} }; void R_init_spatstat(DllInfo *dll) { R_registerRoutines(dll, CEntries, CallEntries, NULL, NULL); R_useDynamicSymbols(dll, FALSE); } spatstat/src/penttinen.c0000644000176200001440000000602613166361223015063 0ustar liggesusers#include #include #include #include "methas.h" #include "dist2.h" #include "constants.h" /* Conditional intensity computation for Penttinen process */ /* Format for storage of parameters and precomputed/auxiliary data */ typedef struct Penttinen { double gamma; double r; double loggamma; double reach2; double *period; int hard; int per; } Penttinen; /* initialiser function */ Cdata *penttineninit(state, model, algo) State state; Model model; Algor algo; { /* create storage for model parameters */ Penttinen *penttinen; penttinen = (Penttinen *) R_alloc(1, sizeof(Penttinen)); /* Interpret model parameters*/ penttinen->gamma = model.ipar[0]; penttinen->r = model.ipar[1]; penttinen->reach2 = 4.0 * penttinen->r * penttinen->r; penttinen->period = model.period; #ifdef MHDEBUG Rprintf("Initialising Penttinen gamma=%lf, r=%lf\n", penttinen->gamma, penttinen->r); #endif /* is the model numerically equivalent to hard core ? */ penttinen->hard = (penttinen->gamma < DOUBLE_EPS); penttinen->loggamma = (penttinen->hard) ? 0 : log(penttinen->gamma); /* periodic boundary conditions? */ penttinen->per = (model.period[0] > 0.0); return((Cdata *) penttinen); } /* conditional intensity evaluator */ double penttinencif(prop, state, cdata) Propo prop; State state; Cdata *cdata; { int npts, ix, ixp1, j; double *x, *y; double u, v; double d2, reach2, z, z2, logpot, cifval; Penttinen *penttinen; DECLARE_CLOSE_D2_VARS; penttinen = (Penttinen *) cdata; reach2 = penttinen->reach2; u = prop.u; v = prop.v; ix = prop.ix; x = state.x; y = state.y; npts = state.npts; if(npts == 0) return((double) 1.0); logpot = 0.0; ixp1 = ix+1; /* If ix = NONE = -1, then ixp1 = 0 is correct */ if(penttinen->per) { /* periodic distance */ if(ix > 0) { for(j=0; j < ix; j++) { if(CLOSE_PERIODIC_D2(u,v,x[j],y[j],penttinen->period,reach2,d2)) { z2 = d2/reach2; z = sqrt(z2); if(z < 1.0) { logpot += acos(z) - z * sqrt(1 - z2); } } } } if(ixp1 < npts) { for(j=ixp1; jperiod,reach2,d2)) { z2 = d2/reach2; z = sqrt(z2); if(z < 1.0) { logpot += acos(z) - z * sqrt(1 - z2); } } } } } else { /* Euclidean distance */ if(ix > 0) { for(j=0; j < ix; j++) { if(CLOSE_D2(u, v, x[j], y[j], reach2, d2)) { z2 = d2/reach2; z = sqrt(z2); if(z < 1.0) { logpot += acos(z) - z * sqrt(1 - z2); } } } } if(ixp1 < npts) { for(j=ixp1; jhard) { if(logpot > 0) cifval = 0.0; else cifval = 1.0; } else cifval = exp((penttinen->loggamma) * M_2_PI * logpot); return cifval; } Cifns PenttinenCifns = { &penttineninit, &penttinencif, (updafunptr) NULL, NO}; spatstat/src/fiksel.c0000755000176200001440000000574413166361223014345 0ustar liggesusers#include #include #include #include "methas.h" #include "dist2.h" /* Conditional intensity computation for Fiksel process */ /* Conditional intensity function for a pairwise interaction point process with interaction function e(t) = 0 for t < h = exp(a * exp(- kappa * t)) for h <= t < r = 1 for t >= r */ /* Storage of parameters and precomputed/auxiliary data */ typedef struct Fiksel { double r; double h; double kappa; double a; double h2; /* h^2 */ double r2; /* r^2 */ double *period; int per; } Fiksel; /* initialiser function */ Cdata *fikselinit(state, model, algo) State state; Model model; Algor algo; { Fiksel *fiksel; fiksel = (Fiksel *) R_alloc(1, sizeof(Fiksel)); /* Interpret model parameters*/ fiksel->r = model.ipar[0]; fiksel->h = model.ipar[1]; fiksel->kappa = model.ipar[2]; fiksel->a = model.ipar[3]; fiksel->period = model.period; /* constants */ fiksel->h2 = pow(fiksel->h, 2); fiksel->r2 = pow(fiksel->r, 2); /* periodic boundary conditions? */ fiksel->per = (model.period[0] > 0.0); return((Cdata *) fiksel); } /* conditional intensity evaluator */ double fikselcif(prop, state, cdata) Propo prop; State state; Cdata *cdata; { int npts, ix, ixp1, j; double *x, *y; double u, v; double d2, pairpotsum, cifval; double kappa, r2, h2; double *period; Fiksel *fiksel; DECLARE_CLOSE_D2_VARS; fiksel = (Fiksel *) cdata; period = fiksel->period; kappa = fiksel->kappa; r2 = fiksel->r2; h2 = fiksel->h2; u = prop.u; v = prop.v; ix = prop.ix; x = state.x; y = state.y; npts = state.npts; cifval = 1.0; if(npts == 0) return(cifval); pairpotsum = 0; ixp1 = ix+1; /* If ix = NONE = -1, then ixp1 = 0 is correct */ if(fiksel->per) { /* periodic distance */ if(ix > 0) { for(j=0; j < ix; j++) { if(CLOSE_PERIODIC_D2(u,v,x[j],y[j],period,r2,d2)) { if(d2 < h2) { cifval = 0.0; return(cifval); } else { pairpotsum += exp(-kappa * sqrt(d2)); } } } } if(ixp1 < npts) { for(j=ixp1; j 0) { for(j=0; j < ix; j++) { if(CLOSE_D2(u,v,x[j],y[j],r2,d2)) { if(d2 < h2) { cifval = 0.0; return(cifval); } else { pairpotsum += exp(-kappa * sqrt(d2)); } } } } if(ixp1 < npts) { for(j=ixp1; ja * pairpotsum); return cifval; } Cifns FikselCifns = { &fikselinit, &fikselcif, (updafunptr) NULL, NO}; spatstat/src/mhv4.h0000644000176200001440000000040113166361223013731 0ustar liggesusers/* mhv4.h visual debugger or not */ #undef MH_SNOOP if(snooper.active) { /* visual debugger */ #define MH_SNOOP YES #include "mhv5.h" #undef MH_SNOOP } else { /* no visual debugger */ #define MH_SNOOP NO #include "mhv5.h" #undef MH_SNOOP } spatstat/src/knn3DdistX.h0000644000176200001440000001226413166361223015056 0ustar liggesusers #if (1 == 0) /* knn3DdistX.h Code template for C functions supporting nncross for k-nearest neighbours (k > 1) for 3D point patterns THE FOLLOWING CODE ASSUMES THAT LISTS ARE SORTED IN ASCENDING ORDER OF z COORDINATE This code is #included multiple times in nn3Ddist.c Variables used: FNAME function name DIST #defined if function returns distance to nearest neighbour WHICH #defined if function returns id of nearest neighbour EXCLUDE #defined if exclusion mechanism is used Either or both DIST and WHICH may be defined. When EXCLUDE is defined, code numbers id1, id2 are attached to the patterns X and Y respectively, such that X[i] and Y[j] are the same point iff id1[i] = id2[j]. Copyright (C) Adrian Baddeley, Jens Oehlschlagel and Rolf Turner 2000-2013 Licence: GPL >= 2 $Revision: 1.1 $ $Date: 2013/06/29 03:04:47 $ */ #endif void FNAME(n1, x1, y1, z1, id1, n2, x2, y2, z2, id2, kmax, nnd, nnwhich, huge) /* inputs */ int *n1, *n2; double *x1, *y1, *z1, *x2, *y2, *z2, *huge; int *id1, *id2; int *kmax; /* output matrices (npoints * kmax) in ROW MAJOR order */ double *nnd; int *nnwhich; /* some inputs + outputs are not used in all functions */ { int npoints1, npoints2, nk, nk1; int maxchunk, i, jleft, jright, jwhich, lastjwhich, unsorted, k, k1; double d2, d2minK, x1i, y1i, z1i, dx, dy, dz, dz2, hu, hu2, tmp; double *d2min; #ifdef WHICH int *which; int itmp; #endif #ifdef EXCLUDE int id1i; #endif npoints1 = *n1; npoints2 = *n2; nk = *kmax; nk1 = nk - 1; hu = *huge; hu2 = hu * hu; if(npoints1 == 0 || npoints2 == 0) return; lastjwhich = 0; /* create space to store the nearest neighbour distances and indices for the current point */ d2min = (double *) R_alloc((size_t) nk, sizeof(double)); #ifdef WHICH which = (int *) R_alloc((size_t) nk, sizeof(int)); #endif /* loop in chunks of 2^16 */ i = 0; maxchunk = 0; while(i < npoints1) { R_CheckUserInterrupt(); maxchunk += 65536; if(maxchunk > npoints1) maxchunk = npoints1; for(; i < maxchunk; i++) { /* initialise nn distances and indices */ d2minK = hu2; jwhich = -1; for(k = 0; k < nk; k++) { d2min[k] = hu2; #ifdef WHICH which[k] = -1; #endif } x1i = x1[i]; y1i = y1[i]; z1i = z1[i]; #ifdef EXCLUDE id1i = id1[i]; #endif if(lastjwhich < npoints2) { /* search forward from previous nearest neighbour */ for(jright = lastjwhich; jright < npoints2; ++jright) { dz = z2[jright] - z1i; dz2 = dz * dz; if(dz2 > d2minK) /* note that dz2 >= d2minK could break too early */ break; #ifdef EXCLUDE /* do not compare identical points */ if(id2[jright] != id1i) { #endif dy = y2[jright] - y1i; d2 = dy * dy + dz2; if(d2 < d2minK) { dx = x2[jright] - x1i; d2 = dx * dx + d2; if (d2 < d2minK) { /* overwrite last entry in list of neighbours */ d2min[nk1] = d2; jwhich = jright; #ifdef WHICH which[nk1] = jright; #endif /* bubble sort */ unsorted = YES; for(k = nk1; unsorted && k > 0; k--) { k1 = k - 1; if(d2min[k] < d2min[k1]) { /* swap entries */ tmp = d2min[k1]; d2min[k1] = d2min[k]; d2min[k] = tmp; #ifdef WHICH itmp = which[k1]; which[k1] = which[k]; which[k] = itmp; #endif } else { unsorted = NO; } } /* adjust maximum distance */ d2minK = d2min[nk1]; } } #ifdef EXCLUDE } #endif } /* end forward search */ } if(lastjwhich > 0) { /* search backward from previous nearest neighbour */ for(jleft = lastjwhich - 1; jleft >= 0; --jleft) { dz = z1i - z2[jleft]; dz2 = dz * dz; if(dz2 > d2minK) /* note that dz2 >= d2minK could break too early */ break; #ifdef EXCLUDE /* do not compare identical points */ if(id2[jleft] != id1i) { #endif dy = y2[jleft] - y1i; d2 = dy * dy + dz2; if(d2 < d2minK) { dx = x2[jleft] - x1i; d2 = dx * dx + d2; if (d2 < d2minK) { /* overwrite last entry in list of neighbours */ d2min[nk1] = d2; jwhich = jleft; #ifdef WHICH which[nk1] = jleft; #endif /* bubble sort */ unsorted = YES; for(k = nk1; unsorted && k > 0; k--) { k1 = k - 1; if(d2min[k] < d2min[k1]) { /* swap entries */ tmp = d2min[k1]; d2min[k1] = d2min[k]; d2min[k] = tmp; #ifdef WHICH itmp = which[k1]; which[k1] = which[k]; which[k] = itmp; #endif } else { unsorted = NO; } } /* adjust maximum distance */ d2minK = d2min[nk1]; } } #ifdef EXCLUDE } #endif } /* end backward search */ } /* copy nn distances for point i to output matrix in ROW MAJOR order */ for(k = 0; k < nk; k++) { #ifdef DIST nnd[nk * i + k] = sqrt(d2min[k]); #endif #ifdef WHICH nnwhich[nk * i + k] = which[k] + 1; /* R indexing */ #endif } /* save index of last neighbour encountered */ lastjwhich = jwhich; /* end of loop over points i */ } } } spatstat/src/whist.c0000644000176200001440000000172613166361223014217 0ustar liggesusers/* whist.c Weighted histogram Designed for very fine bins Cwhist(indices, weights, nbins) indices point to bins (range: 0 to nbins-1) $Revision: 1.5 $ $Date: 2016/02/02 01:52:19 $ */ #include #include #include SEXP Cwhist(SEXP indices, SEXP weights, SEXP nbins) { int i, j, N, M; int *x; double *w, *y; SEXP result; /* =================== Protect R objects from garbage collector ======= */ PROTECT(indices = AS_INTEGER(indices)); PROTECT(weights = AS_NUMERIC(weights)); PROTECT(nbins = AS_INTEGER(nbins)); N = LENGTH(indices); M = *(INTEGER_POINTER(nbins)); x = INTEGER_POINTER(indices); w = NUMERIC_POINTER(weights); PROTECT(result = NEW_NUMERIC(M)); y = NUMERIC_POINTER(result); for(j = 0; j < M; j++) y[j] = 0.0; for(i = 0; i < N; i++) { j = x[i]; if(j != NA_INTEGER && R_FINITE(w[i]) && j >= 0 && j < M) y[j] += w[i]; } UNPROTECT(4); return(result); } spatstat/src/knngrid.c0000644000176200001440000000400613166361223014507 0ustar liggesusers/* knngrid.c K-th Nearest Neighbour Distances from a pixel grid to a point pattern Copyright (C) Adrian Baddeley, Jens Oehlschlaegel and Rolf Turner 2000-2013 Licence: GNU Public Licence >= 2 $Revision: 1.6 $ $Date: 2013/11/03 05:06:28 $ Function body definition is #included from knngrid.h THE FOLLOWING FUNCTIONS ASSUME THAT x IS SORTED IN ASCENDING ORDER */ #undef SPATSTAT_DEBUG #include #include #include #include "yesno.h" double sqrt(); /* THE FOLLOWING CODE ASSUMES THAT x IS SORTED IN ASCENDING ORDER */ /* general interface */ void knnGinterface(nx, x0, xstep, ny, y0, ystep, /* pixel grid dimensions */ np, xp, yp, /* data points */ kmax, wantdist, wantwhich, nnd, nnwhich, huge) /* inputs */ int *nx, *ny, *np; double *x0, *xstep, *y0, *ystep, *huge; double *xp, *yp; int *kmax; /* options */ int *wantdist, *wantwhich; /* outputs */ double *nnd; int *nnwhich; /* some inputs + outputs are not used in all functions */ { void knnGdw(), knnGd(), knnGw(); int di, wh; di = (*wantdist != 0); wh = (*wantwhich != 0); if(di && wh) { knnGdw(nx, x0, xstep, ny, y0, ystep, np, xp, yp, kmax, nnd, nnwhich, huge); } else if(di) { knnGd(nx, x0, xstep, ny, y0, ystep, np, xp, yp, kmax, nnd, nnwhich, huge); } else if(wh) { knnGw(nx, x0, xstep, ny, y0, ystep, np, xp, yp, kmax, nnd, nnwhich, huge); } } #undef FNAME #undef DIST #undef WHICH /* knnGdw nearest neighbours 1:kmax returns distances and indices */ #define FNAME knnGdw #define DIST #define WHICH #include "knngrid.h" #undef FNAME #undef DIST #undef WHICH /* knnGd nearest neighbours 1:kmax returns distances only */ #define FNAME knnGd #define DIST #include "knngrid.h" #undef FNAME #undef DIST #undef WHICH /* knnGw nearest neighbours 1:kmax returns indices only */ #define FNAME knnGw #define WHICH #include "knngrid.h" #undef FNAME #undef DIST #undef WHICH spatstat/src/linnncross.c0000644000176200001440000000121213166361223015237 0ustar liggesusers#include /* linnncross.c Shortest-path distances between nearest neighbours in linear network One pattern to another pattern $Revision: 1.1 $ $Date: 2013/10/21 02:01:29 $ linndcross linndxcross */ #define DPATH(I,J) dpath[(I) + Nv * (J)] #define ANSWER(I,J) answer[(I) + Np * (J)] #define EUCLID(X,Y,U,V) sqrt(pow((X)-(U),2)+pow((Y)-(V),2)) /* definition of linndcross */ #define FNAME linndcross #undef EXCLU #define WHICH #include "linnncross.h" #undef FNAME #undef EXCLU #undef WHICH /* definition of linndxcross */ #define FNAME linndxcross #define EXCLU #define WHICH #include "linnncross.h" spatstat/src/mhv2.h0000644000176200001440000000041213166361223013731 0ustar liggesusers/* mhv2.h single interaction or hybrid */ #undef MH_SINGLE if(Ncif == 1) { /* single interaction */ #define MH_SINGLE YES #include "mhv3.h" #undef MH_SINGLE } else { /* hybrid interaction */ #define MH_SINGLE NO #include "mhv3.h" #undef MH_SINGLE } spatstat/src/sftcr.c0000755000176200001440000000436613166361223014210 0ustar liggesusers#include #include #include #include "methas.h" #include "dist2.h" /* Conditional intensity computation for Soft Core process */ /* Storage of parameters and precomputed/auxiliary data */ typedef struct Softcore { double sigma; double kappa; double nook; /* -1/kappa */ double stok; /* sigma^(2/kappa) */ double *period; int per; } Softcore; /* initialiser function */ Cdata *sftcrinit(state, model, algo) State state; Model model; Algor algo; { Softcore *softcore; softcore = (Softcore *) R_alloc(1, sizeof(Softcore)); /* Interpret model parameters*/ softcore->sigma = model.ipar[0]; softcore->kappa = model.ipar[1]; softcore->period = model.period; /* constants */ softcore->nook = -1/softcore->kappa; softcore->stok = pow(softcore->sigma, 2/softcore->kappa); /* periodic boundary conditions? */ softcore->per = (model.period[0] > 0.0); return((Cdata *) softcore); } /* conditional intensity evaluator */ double sftcrcif(prop, state, cdata) Propo prop; State state; Cdata *cdata; { int npts, ix, ixp1, j; double *x, *y; double u, v; double d2, pairsum, cifval, nook, stok; Softcore *softcore; softcore = (Softcore *) cdata; nook = softcore->nook; stok = softcore->stok; u = prop.u; v = prop.v; ix = prop.ix; x = state.x; y = state.y; npts = state.npts; cifval = 1.0; if(npts == 0) return(cifval); pairsum = 0; ixp1 = ix+1; /* If ix = NONE = -1, then ixp1 = 0 is correct */ if(softcore->per) { /* periodic distance */ if(ix > 0) { for(j=0; j < ix; j++) { d2 = dist2(u,v,x[j],y[j],softcore->period); pairsum += pow(d2, nook); } } if(ixp1 < npts) { for(j=ixp1; jperiod); pairsum += pow(d2, nook); } } } else { /* Euclidean distance */ if(ix > 0) { for(j=0; j < ix; j++) { d2 = pow(u - x[j],2) + pow(v-y[j],2); pairsum += pow(d2, nook); } } if(ixp1 < npts) { for(j=ixp1; j up to local adaptations for spatstat this code is identical to Revision 0.4 for the R package transport */ /* n >= 2 is assumed throughout !!!!!!!!! */ #include #include #include typedef struct State { int n; double epsbid; /* the current eps */ int backwards; /* 0 if we should do forward auction, 1 if we should do backward auction */ int nofassigned; /* number of assigned persons */ int *pers_to_obj; /* -1 means unassigned */ int *obj_to_pers; /* -1 means unassigned */ double *price; double *profit; int *desiremat; /* matrix of desires */ double *persvalue; /* desire minus price of current person in forward phase */ double *objvalue; /* desire minus profit of current object in reverse phase */ /* last three only used in bid, but maybe better to reserve memory once and for all */ } State; #define DESIRE(I,J,STATE,NVALUE) ((STATE)->desiremat)[(NVALUE) * (J) + (I)] #define DESIREMAIN(I,J,STATE,NVALUE) ((STATE).desiremat)[(NVALUE) * (J) + (I)] #define MIN(A,B) ((A)<(B) ? (A) : (B)) void bidbf(State *state, int person); void lurebf(State *state, int obj); int arrayargmax(double *a, int n); double arraysec(double *a, int n, int arg); /* void printit(State *state); */ /* ------------ The main function ----------------------------- */ void auctionbf(int *desirem, int *nn, int *pers_to_obj, double *price, double *profit, int *kk, double *eps) { int i,j,r; /* indices */ int k,n; State state; /* inputs */ state.n = n = *nn; k = *kk; /* length of eps, only needed in outside loop */ state.pers_to_obj = pers_to_obj; /* n vector: person i gets which object */ state.price = price; /* n vector: price of object j */ state.profit = profit; /* n vector: profit of person i */ state.desiremat = desirem; /* n x n vector: desire of person i for object j */ /* scratch space */ state.obj_to_pers = (int *) R_alloc((long) n, sizeof(int)); state.persvalue = (double *) R_alloc((long) n, sizeof(double)); state.objvalue = (double *) R_alloc((long) n, sizeof(double)); /* Prices start at what the R-function supplied (usually 0) */ /* Profits are set to the rowwise max that satisfies eps-CS */ for (i = 0; i < n; i++) { for (j = 0; j < n; j++) { state.persvalue[j] = DESIREMAIN(i,j,state,n); } state.profit[i] = arrayargmax(state.persvalue, n); } for (r = 0; r < k; r++) { state.backwards = 0; state.epsbid = eps[r]; /* At start everything is unassigned */ state.nofassigned = 0; for (j = 0; j < n; j++) { state.pers_to_obj[j] = -1; state.obj_to_pers[j] = -1; } while (state.nofassigned < n) { /* printit(&state); */ R_CheckUserInterrupt(); if (state.backwards == 0) { /* printit(&state); */ for (i = 0; i < n; i++) { if (state.pers_to_obj[i] == -1) { /* Rprintf("Bid \n"); */ bidbf(&state, i); /* bid does assigning and unassigning and changes nofassigned */ } } } else { /* printit(&state); */ for (j = 0; j < n; j++) { if (state.obj_to_pers[j] == -1) { /* Rprintf("Lure \n"); */ lurebf(&state, j); /* lure does assigning and unassigning and changes nofassigned */ } } } } /* eof while */ } /* eof eps-scaling for-loop */ } /* ------------ Functions called by auction ------------------------- */ void bidbf(State *state, int person) { int j; int n; int bidfor, oldpers; double bidamount; n = state->n; for (j = 0; j < n; j++) { state->persvalue[j] = DESIRE(person,j,state,n) - state->price[j]; } bidfor = arrayargmax(state->persvalue, n); bidamount = state->persvalue[bidfor] - arraysec(state->persvalue,n,bidfor) + state->epsbid; /* here we get a float result, the rest are int results */ oldpers = state->obj_to_pers[bidfor]; if (oldpers == -1) { state->nofassigned++; state->backwards = 1; } else { state->pers_to_obj[oldpers] = -1; } state->pers_to_obj[person] = bidfor; state->obj_to_pers[bidfor] = person; state->price[bidfor] = state->price[bidfor] + bidamount; /* new forward/reverse auction algo */ state->profit[person] = DESIRE(person,bidfor,state,n) - state->price[bidfor]; } /* like bidbf, but for reverse auction */ void lurebf(State *state, int obj) { int i; int n; int lurepno, oldobj; double lureamount; n = state->n; for (i = 0; i < n; i++) { state->objvalue[i] = DESIRE(i,obj,state,n) - state->profit[i]; } lurepno = arrayargmax(state->objvalue, n); lureamount = state->objvalue[lurepno] - arraysec(state->objvalue,n,lurepno) + state->epsbid; /* here we get a float result, the rest are int results */ oldobj = state->pers_to_obj[lurepno]; if (oldobj == -1) { state->nofassigned++; state->backwards = 0; } else { state->obj_to_pers[oldobj] = -1; } state->obj_to_pers[obj] = lurepno; state->pers_to_obj[lurepno] = obj; state->profit[lurepno] = state->profit[lurepno] + lureamount; /* new forward/reverse auction algo */ state->price[obj] = DESIRE(lurepno,obj,state,n) - state->profit[lurepno]; } /* ------------ Little helpers ------------------------- */ /* Gives first index that maximizes array */ int arrayargmax(double *a, int n) { int i, arg; double amax; arg = 0; amax = a[0]; for (i = 1; i < n; i++) if (a[i] > amax) { arg = i; amax = a[i]; } return(arg); } /* Second largest element of a non-negative integer array knowing the largest is at index arg */ double arraysec(double *a, int n, int arg) { int i; double amax; if (arg > 0) amax = a[0]; else amax = a[1]; for (i = 0; i < arg; i++) if (a[i] > amax) amax = a[i]; for (i = arg+1; i < n; i++) if (a[i] > amax) amax = a[i]; return(amax); } /* void printit(State *state) { int i=0,n=0; n = state->n; Rprintf("Current state: \n"); Rprintf("backwards: %d \n", state->backwards); Rprintf("nofassigned: %d \n", state->nofassigned); Rprintf("pers_to_obj: "); for (i = 0; i < n; i++) { Rprintf("%d ", state->pers_to_obj[i]); } Rprintf("\n"); Rprintf("obj_to_pers: "); for (i = 0; i < n; i++) { Rprintf("%d ", state->obj_to_pers[i]); } Rprintf("\n"); Rprintf("price: "); for (i = 0; i < n; i++) { Rprintf("%2.9lf ", state->price[i]); } Rprintf("\n"); Rprintf("profit: "); for (i = 0; i < n; i++) { Rprintf("%2.9lf ", state->profit[i]); } Rprintf("\n"); Rprintf("persvalue: "); for (i = 0; i < n; i++) { Rprintf("%2.9lf ", state->persvalue[i]); } Rprintf("\n"); Rprintf("objvalue: "); for (i = 0; i < n; i++) { Rprintf("%2.9lf ", state->objvalue[i]); } Rprintf("\n"); Rprintf("\n\n\n"); } */ spatstat/src/f3.c0000755000176200001440000002477313166361223013403 0ustar liggesusers#include #include #include #include "geom3.h" #include "functable.h" #ifdef DEBUG #define DEBUGMESSAGE(S) Rprintf(S); #else #define DEBUGMESSAGE(S) #endif /* $Revision: 1.4 $ $Date: 2016/10/23 04:24:03 $ 3D distance transform # ///////////////////////////////////////////// # AUTHOR: Adrian Baddeley, CWI, Amsterdam, 1991. # # MODIFIED BY: Adrian Baddeley, Perth 2009 # # This software is distributed free # under the conditions that # (1) it shall not be incorporated # in software that is subsequently sold # (2) the authorship of the software shall # be acknowledged in any publication that # uses results generated by the software # (3) this notice shall remain in place # in each file. # ////////////////////////////////////////////// */ /* step lengths in distance transform */ #define STEP1 41 #define STEP2 58 #define STEP3 71 /* (41,58,71)/41 is a good rational approximation to (1, sqrt(2), sqrt(3)) */ #define MIN(X,Y) (((X) < (Y)) ? (X) : (Y)) #define MAX(X,Y) (((X) > (Y)) ? (X) : (Y)) typedef struct IntImage { int *data; int Mx, My, Mz; /* dimensions */ int length; } IntImage; typedef struct BinaryImage { unsigned char *data; int Mx, My, Mz; /* dimensions */ int length; } BinaryImage; #define VALUE(I,X,Y,Z) \ ((I).data)[ (Z) * ((I).Mx) * ((I).My) + (Y) * ((I).Mx) + (X) ] void allocBinImage(b, ok) BinaryImage *b; int *ok; { b->length = b->Mx * b->My * b->Mz; b->data = (unsigned char *) R_alloc(b->length, sizeof(unsigned char)); if(b->data == 0) { Rprintf("Can't allocate memory for %d binary voxels\n", b->length); *ok = 0; } *ok = 1; } void allocIntImage(v, ok) IntImage *v; int *ok; { v->length = v->Mx * v->My * v->Mz; v->data = (int *) R_alloc(v->length, sizeof(int)); if(v->data == 0) { Rprintf("Can't allocate memory for %d integer voxels\n", v->length); *ok = 0; } *ok = 1; } void freeBinImage(b) BinaryImage *b; { } void freeIntImage(v) IntImage *v; { } void cts2bin(p, n, box, vside, b, ok) /* convert a list of points inside a box into a 3D binary image */ Point *p; int n; Box *box; double vside; /* side of a (cubic) voxel */ BinaryImage *b; int *ok; { int i, lx, ly, lz; unsigned char *cp; b->Mx = (int) ceil((box->x1 - box->x0)/vside) + 1; b->My = (int) ceil((box->y1 - box->y0)/vside) + 1; b->Mz = (int) ceil((box->z1 - box->z0)/vside) + 1; allocBinImage(b, ok); if(! (*ok)) return; for(i = b->length, cp = b->data; i ; i--, cp++) *cp = 1; for(i=0;ix0)/vside)-1; ly = (int) ceil((p[i].y - box->y0)/vside)-1; lz = (int) ceil((p[i].z - box->z0)/vside)-1; if( lx >= 0 && lx < b->Mx && ly >= 0 && ly < b->My && lz >= 0 && lz < b->Mz ) VALUE((*b),lx,ly,lz) = 0; } } void distrans3(b, v, ok) /* Distance transform in 3D */ BinaryImage *b; /* input */ IntImage *v; /* output */ int *ok; { register int x, y, z; int infinity, q; /* allocate v same size as b */ v->Mx = b->Mx; v->My = b->My; v->Mz = b->Mz; allocIntImage(v, ok); if(! (*ok)) return; /* compute largest possible distance */ infinity = (int) ceil( ((double) STEP3) * sqrt( ((double) b->Mx) * b->Mx + ((double) b->My) * b->My + ((double) b->Mz) * b->Mz)); /* Forward pass: Top to Bottom; Back to Front; Left to Right. */ for(z=0;zMz;z++) { R_CheckUserInterrupt(); for(y=0;yMy;y++) { for(x=0;xMx;x++) { if(VALUE((*b),x,y,z) == 0) VALUE((*v),x,y,z) = 0; else { q = infinity; #define INTERVAL(W, DW, MW) \ ((DW == 0) || (DW == -1 && W > 0) || (DW == 1 && W < MW - 1)) #define BOX(X,Y,Z,DX,DY,DZ) \ (INTERVAL(X,DX,v->Mx) && INTERVAL(Y,DY,v->My) && INTERVAL(Z,DZ,v->Mz)) #define TEST(DX,DY,DZ,DV) \ if(BOX(x,y,z,DX,DY,DZ) && q > VALUE((*v),x+DX,y+DY,z+DZ) + DV) \ q = VALUE((*v),x+DX,y+DY,z+DZ) + DV /* same row */ TEST(-1, 0, 0, STEP1); /* same plane */ TEST(-1,-1, 0, STEP2); TEST( 0,-1, 0, STEP1); TEST( 1,-1, 0, STEP2); /* previous plane */ TEST( 1, 1,-1, STEP3); TEST( 0, 1,-1, STEP2); TEST(-1, 1,-1, STEP3); TEST( 1, 0,-1, STEP2); TEST( 0, 0,-1, STEP1); TEST(-1, 0,-1, STEP2); TEST( 1,-1,-1, STEP3); TEST( 0,-1,-1, STEP2); TEST(-1,-1,-1, STEP3); VALUE((*v),x,y,z) = q; } } } } /* Backward pass: Bottom to Top; Front to Back; Right to Left. */ for(z = b->Mz - 1; z >= 0; z--) { R_CheckUserInterrupt(); for(y = b->My - 1; y >= 0; y--) { for(x = b->Mx - 1; x >= 0; x--) { if((q = VALUE((*v),x,y,z)) != 0) { /* same row */ TEST(1, 0, 0, STEP1); /* same plane */ TEST(-1, 1, 0, STEP2); TEST( 0, 1, 0, STEP1); TEST( 1, 1, 0, STEP2); /* plane below */ TEST( 1, 1, 1, STEP3); TEST( 0, 1, 1, STEP2); TEST(-1, 1, 1, STEP3); TEST( 1, 0, 1, STEP2); TEST( 0, 0, 1, STEP1); TEST(-1, 0, 1, STEP2); TEST( 1,-1, 1, STEP3); TEST( 0,-1, 1, STEP2); TEST(-1,-1, 1, STEP3); VALUE((*v),x,y,z) = q; } } } } } void hist3d(v, vside, count) /* compute histogram of all values in *v using count->n histogram cells ranging from count->t0 to count->t1 and put results in count->num */ IntImage *v; double vside; Itable *count; { register int i, j, k; register int *ip; register double scale, width; /* relationship between distance transform units and physical units */ scale = vside/STEP1; width = (count->t1 - count->t0)/(count->n - 1); for(i = 0; i < count->n ; i++) { (count->num)[i] = 0; (count->denom)[i] = v->length; } for(i = v->length, ip = v->data; i; i--, ip++) { k = (int) ceil((*ip * scale - count->t0)/width); k = MAX(k, 0); for(j = k; j < count->n; j++) (count->num)[j]++; } } void hist3dminus(v, vside, count) /* minus sampling */ IntImage *v; double vside; Itable *count; { register int x, y, z, val, border, bx, by, bz, byz, j, kbord, kval; register double scale, width; DEBUGMESSAGE("inside hist3dminus\n") scale = vside/STEP1; width = (count->t1 - count->t0)/(count->n - 1); /* table is assumed to have been initialised in MakeItable */ for(z = 0; z < v->Mz; z++) { bz = MIN(z + 1, v->Mz - z); for(y = 0; y < v->My; y++) { by = MIN(y + 1, v->My - y); byz = MIN(by, bz); for(x = 0; x < v->Mx; x++) { bx = MIN(x + 1, v->My - x); border = MIN(bx, byz); kbord = (int) floor((vside * border - count->t0)/width); kbord = MIN(kbord, count->n - 1); /* denominator counts all voxels with distance to boundary >= r */ if(kbord >= 0) for(j = 0; j <= kbord; j++) (count->denom)[j]++; val = VALUE((*v), x, y, z); kval = (int) ceil((val * scale - count->t0)/width); kval = MAX(kval, 0); #ifdef DEBUG /* Rprintf("border=%lf\tkbord=%d\tval=%lf\tkval=%d\n", vside * border, kbord, scale * val, kval); */ #endif /* numerator counts all voxels with distance to boundary >= r and distance to nearest point <= r */ if(kval <= kbord) for(j = kval; j <= kbord; j++) (count->num)[j]++; } } } DEBUGMESSAGE("leaving hist3dminus\n") } void hist3dCen(v, vside, count) /* four censoring-related histograms */ IntImage *v; double vside; H4table *count; { register int x, y, z, val, border, bx, by, bz, byz, kbord, kval; register double scale, width, realborder, realval; DEBUGMESSAGE("inside hist3dCen\n") scale = vside/STEP1; width = (count->t1 - count->t0)/(count->n - 1); /* table is assumed to have been initialised in MakeH4table */ for(z = 0; z < v->Mz; z++) { bz = MIN(z + 1, v->Mz - z); for(y = 0; y < v->My; y++) { by = MIN(y + 1, v->My - y); byz = MIN(by, bz); for(x = 0; x < v->Mx; x++) { bx = MIN(x + 1, v->My - x); border = MIN(bx, byz); realborder = vside * border; kbord = (int) floor((realborder - count->t0)/width); val = VALUE((*v), x, y, z); realval = scale * val; kval = (int) ceil((realval - count->t0)/width); /* this could exceed array limits; that will be detected below */ #ifdef DEBUG Rprintf("border=%lf\tkbord=%d\tval=%lf\tkval=%d\n", realborder, kbord, realval, kval); #endif if(realval <= realborder) { /* observation is uncensored; increment all four histograms */ if(kval >= count->n) ++(count->upperobs); else if(kval >= 0) { (count->obs)[kval]++; (count->nco)[kval]++; } if(kbord >= count->n) ++(count->uppercen); else if(kbord >= 0) { (count->cen)[kbord]++; (count->ncc)[kbord]++; } } else { /* observation is censored; increment only two histograms */ kval = MIN(kval, kbord); if(kval >= count->n) ++(count->upperobs); else if(kval >= 0) (count->obs)[kval]++; if(kbord >= count->n) ++(count->uppercen); else if(kbord >= 0) (count->cen)[kbord]++; } } } } DEBUGMESSAGE("leaving hist3dCen\n") } /* CALLING ROUTINES */ void phatminus(p, n, box, vside, count) Point *p; int n; Box *box; double vside; Itable *count; { BinaryImage b; IntImage v; int ok; DEBUGMESSAGE("in phatminus\ncalling cts2bin...") cts2bin(p, n, box, vside, &b, &ok); DEBUGMESSAGE("out of cts2bin\ninto distrans3...") if(ok) distrans3(&b, &v, &ok); if(ok) { freeBinImage(&b); DEBUGMESSAGE("out of distrans3\ninto hist3dminus...") hist3dminus(&v, vside, count); DEBUGMESSAGE("out of hist3dminus\n") freeIntImage(&v); } } void phatnaive(p, n, box, vside, count) Point *p; int n; Box *box; double vside; Itable *count; { BinaryImage b; IntImage v; int ok; DEBUGMESSAGE("in phatnaive\ncalling cts2bin...") cts2bin(p, n, box, vside, &b, &ok); DEBUGMESSAGE("out of cts2bin\n into distrans3...") if(ok) distrans3(&b, &v, &ok); if(ok) { freeBinImage(&b); DEBUGMESSAGE("out of distrans3\ninto hist3d..."); hist3d(&v, vside, count); DEBUGMESSAGE("out of hist3d\n") freeIntImage(&v); } } void p3hat4(p, n, box, vside, count) Point *p; int n; Box *box; double vside; H4table *count; { BinaryImage b; IntImage v; int ok; DEBUGMESSAGE("in phatminus\ncalling cts2bin...") cts2bin(p, n, box, vside, &b, &ok); DEBUGMESSAGE("out of cts2bin\ninto distrans3...") if(ok) distrans3(&b, &v, &ok); if(ok) { freeBinImage(&b); DEBUGMESSAGE("out of distrans3\ninto hist3dminus...") hist3dCen(&v, vside, count); DEBUGMESSAGE("out of hist3dminus\n") freeIntImage(&v); } } spatstat/src/digber.c0000644000176200001440000000222313166361223014306 0ustar liggesusers/* digber.c Diggle-Berman function J used in bandwidth selection J(r) = \int_0^(2r) phi(t, r) dK(t) where K is the K-function and phi(t, r) = 2 r^2 * (acos(y) - y sqrt(1 - y^2)) where y = t/(2r). $Revision: 1.7 $ $Date: 2013/08/24 11:13:43 $ */ #include double sqrt(), acos(); /* r is the vector of distance values, starting from 0, with length nr, equally spaced. dK = diff(K) is the vector of increments of the K-function, with length ndK = nr-1. values of J are computed only up to max(r)/2 nrmax = floor(nr/2). */ void digberJ(r, dK, nr, nrmax, ndK, J) /* inputs */ int *nr, *nrmax, *ndK; double *r, *dK; /* output */ double *J; { int i, j, Ni, NdK; double ri, twori, tj, y, phiy, integral; Ni = *nrmax; NdK = *ndK; J[0] = 0.0; for(i = 1; i < Ni; i++) { ri = r[i]; twori = 2 * ri; integral = 0.0; for(j = 0; j < NdK; j++) { tj = r[j]; y = tj/twori; if(y >= 1.0) break; phiy = acos(y) - y * sqrt(1 - y * y); integral += phiy * dK[j]; } J[i] = 2 * ri * ri * integral; } } spatstat/src/lennard.c0000755000176200001440000000712113166361223014502 0ustar liggesusers#include #include #include #include #include "methas.h" #include "dist2.h" /* Conditional intensity computation for Lennard-Jones process */ /* Storage of parameters and precomputed/auxiliary data */ typedef struct Lennard { double sigma; double epsilon; double sigma2; /* sigma^2 */ double foureps; /* 4 * epsilon */ double d2min; /* minimum value of d^2 which yields nonzero intensity */ double d2max; /* maximum value of d^2 which has nontrivial contribution */ double *period; int per; } Lennard; /* MAXEXP is intended to be the largest x such that exp(-x) != 0 although the exact value is not needed */ #define MAXEXP (-log(DOUBLE_XMIN)) #define MINEXP (log(1.001)) /* initialiser function */ Cdata *lennardinit(state, model, algo) State state; Model model; Algor algo; { Lennard *lennard; double sigma2, foureps, minfrac, maxfrac; lennard = (Lennard *) R_alloc(1, sizeof(Lennard)); /* Interpret model parameters*/ lennard->sigma = model.ipar[0]; lennard->epsilon = model.ipar[1]; lennard->period = model.period; /* constants */ lennard->sigma2 = sigma2 = pow(lennard->sigma, 2); lennard->foureps = foureps = 4 * lennard->epsilon; /* thresholds where the interaction becomes trivial */ minfrac = pow(foureps/MAXEXP, (double) 1.0/6.0); if(minfrac > 0.5) minfrac = 0.5; maxfrac = pow(foureps/MINEXP, (double) 1.0/3.0); if(maxfrac < 2.0) maxfrac = 2.0; lennard->d2min = sigma2 * minfrac; lennard->d2max = sigma2 * maxfrac; /* periodic boundary conditions? */ lennard->per = (model.period[0] > 0.0); return((Cdata *) lennard); } /* conditional intensity evaluator */ double lennardcif(prop, state, cdata) Propo prop; State state; Cdata *cdata; { int npts, ix, ixp1, j; double *x, *y; double u, v; double d2, ratio6, pairsum, cifval; double sigma2, d2max, d2min; double *period; Lennard *lennard; DECLARE_CLOSE_D2_VARS; lennard = (Lennard *) cdata; sigma2 = lennard->sigma2; d2max = lennard->d2max; d2min = lennard->d2min; period = lennard->period; u = prop.u; v = prop.v; ix = prop.ix; x = state.x; y = state.y; npts = state.npts; cifval = 1.0; if(npts == 0) return(cifval); pairsum = 0; ixp1 = ix+1; /* If ix = NONE = -1, then ixp1 = 0 is correct */ if(lennard->per) { /* periodic distance */ if(ix > 0) { for(j=0; j < ix; j++) { if(CLOSE_PERIODIC_D2(u,v,x[j],y[j],period,d2max,d2)) { if(d2 < d2min) { cifval = 0.0; return cifval; } ratio6 = pow(sigma2/d2, 3); pairsum += ratio6 * (1.0 - ratio6); } } } if(ixp1 < npts) { for(j=ixp1; j 0) { for(j=0; j < ix; j++) { if(CLOSE_D2(u, v, x[j], y[j], d2max, d2)) { if(d2 < lennard->d2min) { cifval = 0.0; return cifval; } ratio6 = pow(sigma2/d2, 3); pairsum += ratio6 * (1.0 - ratio6); } } } if(ixp1 < npts) { for(j=ixp1; jd2min) { cifval = 0.0; return cifval; } ratio6 = pow(sigma2/d2, 3); pairsum += ratio6 * (1.0 - ratio6); } } } } cifval *= exp(lennard->foureps * pairsum); return cifval; } Cifns LennardCifns = { &lennardinit, &lennardcif, (updafunptr) NULL, NO}; spatstat/src/Egeyer.c0000755000176200001440000000447113166361223014304 0ustar liggesusers#include #include #include "chunkloop.h" #include "looptest.h" /* Egeyer.c $Revision: 1.6 $ $Date: 2014/09/19 00:53:20 $ Part of C implementation of 'eval' for Geyer interaction Calculates change in saturated count (xquad, yquad): quadscheme (xdata, ydata): data tdata: unsaturated pair counts for data pattern quadtodata[j] = i if quad[j] == data[i] (indices start from ZERO) Assumes point patterns are sorted in increasing order of x coordinate */ double sqrt(); void Egeyer(nnquad, xquad, yquad, quadtodata, nndata, xdata, ydata, tdata, rrmax, ssat, result) /* inputs */ int *nnquad, *nndata, *quadtodata, *tdata; double *xquad, *yquad, *xdata, *ydata, *rrmax, *ssat; /* output */ double *result; { int nquad, ndata, maxchunk, j, i, ileft, dataindex, isdata; double xquadj, yquadj, rmax, sat, r2max, r2maxpluseps, xleft, dx, dy, dx2, d2; double tbefore, tafter, satbefore, satafter, delta, totalchange; nquad = *nnquad; ndata = *nndata; rmax = *rrmax; sat = *ssat; if(nquad == 0 || ndata == 0) return; r2max = rmax * rmax; r2maxpluseps = r2max + EPSILON(r2max); ileft = 0; OUTERCHUNKLOOP(j, nquad, maxchunk, 65536) { R_CheckUserInterrupt(); INNERCHUNKLOOP(j, nquad, maxchunk, 65536) { totalchange = 0.0; xquadj = xquad[j]; yquadj = yquad[j]; dataindex = quadtodata[j]; isdata = (dataindex >= 0); /* adjust starting point */ xleft = xquadj - rmax; while((xdata[ileft] < xleft) && (ileft+1 < ndata)) ++ileft; /* process until dx > rmax */ for(i=ileft; i < ndata; i++) { dx = xdata[i] - xquadj; dx2 = dx * dx; if(dx2 > r2maxpluseps) break; if(i != dataindex) { dy = ydata[i] - yquadj; d2 = dx2 + dy * dy; if(d2 <= r2max) { /* effect of adding dummy point j or negative effect of removing data point */ tbefore = tdata[i]; tafter = tbefore + ((isdata) ? -1 : 1); /* effect on saturated values */ satbefore = (double) ((tbefore < sat)? tbefore : sat); satafter = (double) ((tafter < sat)? tafter : sat); /* sum changes over all i */ delta = satafter - satbefore; totalchange += ((isdata) ? -delta : delta); } } } result[j] = totalchange; } } } spatstat/src/multihard.c0000755000176200001440000000735713166361223015063 0ustar liggesusers#include #include #include "methas.h" #include "dist2.h" /* for debugging code, include #define DEBUG 1 */ /* Conditional intensity computation for Multitype Hardcore process */ /* NOTE: types (marks) are numbered from 0 to ntypes-1 */ /* Storage of parameters and precomputed/auxiliary data */ typedef struct MultiHard { int ntypes; double *hc; /* hc[i,j] = hc[j+ntypes*i] for i,j = 0... ntypes-1 */ double *hc2; /* squared radii */ double range2; /* square of interaction range */ double *period; int per; } MultiHard; /* initialiser function */ Cdata *multihardinit(state, model, algo) State state; Model model; Algor algo; { int i, j, ntypes, n2; double h, h2, range2; MultiHard *multihard; multihard = (MultiHard *) R_alloc(1, sizeof(MultiHard)); multihard->ntypes = ntypes = model.ntypes; n2 = ntypes * ntypes; #ifdef DEBUG Rprintf("initialising space for %d types\n", ntypes); #endif /* Allocate space for parameters */ multihard->hc = (double *) R_alloc((size_t) n2, sizeof(double)); /* Allocate space for transformed parameters */ multihard->hc2 = (double *) R_alloc((size_t) n2, sizeof(double)); /* Copy and process model parameters*/ range2 = 0.0; for(i = 0; i < ntypes; i++) { for(j = 0; j < ntypes; j++) { h = model.ipar[i + j*ntypes]; h2 = h * h; MAT(multihard->hc, i, j, ntypes) = h; MAT(multihard->hc2, i, j, ntypes) = h2; if(range2 > h2) range2 = h2; } } multihard->range2 = range2; /* periodic boundary conditions? */ multihard->period = model.period; multihard->per = (model.period[0] > 0.0); #ifdef DEBUG Rprintf("end initialiser\n"); #endif return((Cdata *) multihard); } /* conditional intensity evaluator */ double multihardcif(prop, state, cdata) Propo prop; State state; Cdata *cdata; { int npts, ntypes, ix, ixp1, j, mrk, mrkj; int *marks; double *x, *y; double u, v; double d2, range2, cifval; double *period; MultiHard *multihard; DECLARE_CLOSE_D2_VARS; multihard = (MultiHard *) cdata; range2 = multihard->range2; period = multihard->period; u = prop.u; v = prop.v; mrk = prop.mrk; ix = prop.ix; x = state.x; y = state.y; marks = state.marks; npts = state.npts; #ifdef DEBUG Rprintf("computing cif: u=%lf, v=%lf, mrk=%d\n", u, v, mrk); #endif cifval = 1.0; if(npts == 0) return(cifval); ntypes = multihard->ntypes; #ifdef DEBUG Rprintf("scanning data\n"); #endif ixp1 = ix+1; /* If ix = NONE = -1, then ixp1 = 0 is correct */ if(multihard->per) { /* periodic distance */ if(ix > 0) { for(j=0; j < ix; j++) { if(CLOSE_PERIODIC_D2(u,v,x[j],y[j],period,range2,d2)) { mrkj = marks[j]; if(d2 < MAT(multihard->hc2, mrk, mrkj, ntypes)) { cifval = 0.0; return(cifval); } } } } if(ixp1 < npts) { for(j=ixp1; jhc2, mrk, mrkj, ntypes)) { cifval = 0.0; return(cifval); } } } } } else { /* Euclidean distance */ if(ix > 0) { for(j=0; j < ix; j++) { if(CLOSE_D2(u, v, x[j], y[j], range2, d2)) { mrkj = marks[j]; if(d2 < MAT(multihard->hc2, mrk, mrkj, ntypes)) { cifval = 0.0; return(cifval); } } } } if(ixp1 < npts) { for(j=ixp1; jhc2, mrk, mrkj, ntypes)) { cifval = 0.0; return(cifval); } } } } } #ifdef DEBUG Rprintf("returning positive cif\n"); #endif return cifval; } Cifns MultiHardCifns = { &multihardinit, &multihardcif, (updafunptr) NULL, YES}; spatstat/src/nn3Ddist.c0000755000176200001440000001733313166361223014553 0ustar liggesusers/* nn3Ddist.c Nearest Neighbour Distances in 3D $Revision: 1.11 $ $Date: 2013/11/03 03:42:48 $ THE FOLLOWING FUNCTIONS ASSUME THAT z IS SORTED IN ASCENDING ORDER nnd3D Nearest neighbour distances nnw3D Nearest neighbours (id) nndw3D Nearest neighbours (id) and distances nnXdw3D Nearest neighbour from one list to another nnXEdw3D Nearest neighbour from one list to another, with overlaps knnd3D k-th nearest neighbour distances knnw3D k-th nearest neighbours (id) knndw3D k-th nearest neighbours (id) and distances */ #undef SPATSTAT_DEBUG #include #include #include #include "chunkloop.h" #include "yesno.h" double sqrt(); /* .......... Single point pattern ...............................*/ #undef FNAME #undef DIST #undef WHICH /* nnd3D: returns nn distance */ #define FNAME nnd3D #define DIST #include "nn3Ddist.h" #undef FNAME #undef DIST #undef WHICH /* nnw3D: returns id of nearest neighbour */ #define FNAME nnw3D #define WHICH #include "nn3Ddist.h" #undef FNAME #undef DIST #undef WHICH /* nndw3D: returns nn distance .and. id of nearest neighbour */ #define FNAME nndw3D #define DIST #define WHICH #include "nn3Ddist.h" #undef FNAME #undef DIST #undef WHICH /* .......... Two point patterns ...............................*/ /* common interface */ void nnX3Dinterface(n1, x1, y1, z1, id1, n2, x2, y2, z2, id2, exclude, wantdist, wantwhich, nnd, nnwhich, huge) /* inputs */ int *n1, *n2, *id1, *id2; double *x1, *y1, *z1, *x2, *y2, *z2, *huge; /* options */ int *exclude, *wantdist, *wantwhich; /* outputs */ double *nnd; int *nnwhich; { void nnXdw3D(), nnXd3D(), nnXw3D(); void nnXEdw3D(), nnXEd3D(), nnXEw3D(); int ex, di, wh; ex = (*exclude != 0); di = (*wantdist != 0); wh = (*wantwhich != 0); if(!ex) { if(di && wh) { nnXdw3D(n1, x1, y1, z1, id1, n2, x2, y2, z2, id2, nnd, nnwhich, huge); } else if(di) { nnXd3D(n1, x1, y1, z1, id1, n2, x2, y2, z2, id2, nnd, nnwhich, huge); } else if(wh) { nnXw3D(n1, x1, y1, z1, id1, n2, x2, y2, z2, id2, nnd, nnwhich, huge); } } else { if(di && wh) { nnXEdw3D(n1, x1, y1, z1, id1, n2, x2, y2, z2, id2, nnd, nnwhich, huge); } else if(di) { nnXEd3D(n1, x1, y1, z1, id1, n2, x2, y2, z2, id2, nnd, nnwhich, huge); } else if(wh) { nnXEw3D(n1, x1, y1, z1, id1, n2, x2, y2, z2, id2, nnd, nnwhich, huge); } } } /* nnXdw3D: for TWO point patterns X and Y, find the nearest neighbour (from each point of X to the nearest point of Y) returning both the distance and the identifier Requires both patterns to be sorted in order of increasing z coord */ #define FNAME nnXdw3D #define DIST #define WHICH #undef EXCLUDE #include "nn3DdistX.h" #undef FNAME #undef DIST #undef WHICH #undef EXCLUDE /* nnXd3D: returns distance only */ #define FNAME nnXd3D #define DIST #undef EXCLUDE #include "nn3DdistX.h" #undef FNAME #undef DIST #undef WHICH #undef EXCLUDE /* nnXw3D: returns identifier only */ #define FNAME nnXw3D #define WHICH #undef EXCLUDE #include "nn3DdistX.h" #undef FNAME #undef DIST #undef WHICH #undef EXCLUDE /* .......... Two point patterns with exclusion ........................*/ /* nnXEdw3D: similar to nnXdw3D but allows X and Y to include common points (which are not to be counted as neighbours) Code numbers id1, id2 are attached to the patterns X and Y respectively, such that x1[i], y1[i] and x2[j], y2[j] are the same point iff id1[i] = id2[j]. Requires both patterns to be sorted in order of increasing z coord */ #define FNAME nnXEdw3D #define DIST #define WHICH #define EXCLUDE #include "nn3DdistX.h" #undef FNAME #undef DIST #undef WHICH #undef EXCLUDE /* nnXEd3D: returns distances only */ #define FNAME nnXEd3D #define DIST #define EXCLUDE #include "nn3DdistX.h" #undef FNAME #undef DIST #undef WHICH #undef EXCLUDE /* nnXEw3D: returns identifiers only */ #define FNAME nnXEw3D #define WHICH #define EXCLUDE #include "nn3DdistX.h" #undef FNAME #undef DIST #undef WHICH #undef EXCLUDE /* .......... Single point pattern ...............................*/ /* .......... k-th nearest neighbours ...............................*/ /* knnd3D nearest neighbour distances 1:kmax */ #define FNAME knnd3D #define DIST #include "knn3Ddist.h" #undef FNAME #undef DIST #undef WHICH /* knnw3D nearest neighbour indices 1:kmax */ #define FNAME knnw3D #define WHICH #include "knn3Ddist.h" #undef FNAME #undef DIST #undef WHICH /* knndw3D nearest neighbours 1:kmax returns distances and indices */ #define FNAME knndw3D #define DIST #define WHICH #include "knn3Ddist.h" #undef FNAME #undef DIST #undef WHICH /* .......... Two point patterns ...............................*/ /* .......... k-th nearest neighbours ...............................*/ /* general interface */ void knnX3Dinterface(n1, x1, y1, z1, id1, n2, x2, y2, z2, id2, kmax, exclude, wantdist, wantwhich, nnd, nnwhich, huge) /* inputs */ int *n1, *n2; double *x1, *y1, *z1, *x2, *y2, *z2, *huge; int *id1, *id2; int *kmax; /* options */ int *exclude, *wantdist, *wantwhich; /* output matrices (npoints * kmax) in ROW MAJOR order */ double *nnd; int *nnwhich; /* some inputs + outputs are not used in all functions */ { void knnXdw3D(), knnXd3D(), knnXw3D(); void knnXEdw3D(), knnXEd3D(), knnXEw3D(); int ex, di, wh; ex = (*exclude != 0); di = (*wantdist != 0); wh = (*wantwhich != 0); if(!ex) { if(di && wh) { knnXdw3D(n1,x1,y1,z1,id1,n2,x2,y2,z2,id2,kmax,nnd,nnwhich,huge); } else if(di) { knnXd3D(n1,x1,y1,z1,id1,n2,x2,y2,z2,id2,kmax,nnd,nnwhich,huge); } else if(wh) { knnXw3D(n1,x1,y1,z1,id1,n2,x2,y2,z2,id2,kmax,nnd,nnwhich,huge); } } else { if(di && wh) { knnXEdw3D(n1,x1,y1,z1,id1,n2,x2,y2,z2,id2,kmax,nnd,nnwhich,huge); } else if(di) { knnXEd3D(n1,x1,y1,z1,id1,n2,x2,y2,z2,id2,kmax,nnd,nnwhich,huge); } else if(wh) { knnXEw3D(n1,x1,y1,z1,id1,n2,x2,y2,z2,id2,kmax,nnd,nnwhich,huge); } } } #undef FNAME #undef DIST #undef WHICH #undef EXCLUDE /* knnXdw3D nearest neighbours 1:kmax between two point patterns returns distances and indices */ #define FNAME knnXdw3D #define DIST #define WHICH #include "knn3DdistX.h" #undef FNAME #undef DIST #undef WHICH #undef EXCLUDE /* knnXd3D nearest neighbours 1:kmax between two point patterns returns distances */ #define FNAME knnXd3D #define DIST #include "knn3DdistX.h" #undef FNAME #undef DIST #undef WHICH #undef EXCLUDE /* knnXw3D nearest neighbours 1:kmax between two point patterns returns indices */ #define FNAME knnXw3D #define WHICH #include "knn3DdistX.h" #undef FNAME #undef DIST #undef WHICH #undef EXCLUDE /* .......... Two point patterns with exclusion ..........................*/ /* .......... k-th nearest neighbours ...............................*/ /* knnXEdw3D nearest neighbours 1:kmax between two point patterns with exclusion returns distances and indices */ #define FNAME knnXEdw3D #define DIST #define WHICH #define EXCLUDE #include "knn3DdistX.h" #undef FNAME #undef DIST #undef WHICH #undef EXCLUDE /* knnXEd3D nearest neighbours 1:kmax between two point patterns with exclusion returns distances */ #define FNAME knnXEd3D #define DIST #define EXCLUDE #include "knn3DdistX.h" #undef FNAME #undef DIST #undef WHICH #undef EXCLUDE /* knnXEw3D nearest neighbours 1:kmax between two point patterns with exclusion returns indices */ #define FNAME knnXEw3D #define WHICH #define EXCLUDE #include "knn3DdistX.h" #undef FNAME #undef DIST #undef WHICH #undef EXCLUDE spatstat/src/areadiff.c0000755000176200001440000001533313166361223014624 0ustar liggesusers/* areadiff.c Area difference function $Revision: 1.14 $ $Date: 2013/09/18 04:09:24 $ A(x,r) = area of disc b(0,r) not covered by discs b(x_i,r) for x_i in x Area estimated by point-counting on a fine grid For use in area-interaction model and related calculations */ #undef DEBUG #include #include #include #include "chunkloop.h" #include "constants.h" /* Original version areadiff() 1 point u No trimming of discs */ void areadiff(rad,x,y,nn,ngrid,answer) /* inputs */ double *rad; /* radius */ double *x, *y; /* coordinate vectors for point pattern */ int *nn; /* length of vectors x and y */ int *ngrid; /* dimensions of point-counting grid */ /* output */ double *answer; /* computed area */ { double dx, dy, xg, yg, r, r2, a2, b2, xdif, ydif; int i, j, k, m, n, count, covered; r = *rad; r2 = r * r; n = *nn; m = *ngrid; dx = dy = 2 * r / (m-1); count = 0; /* run through grid points */ for(i = 0, xg = -r; i < m; i++, xg += dx) { a2 = r2 - xg *xg; for(j = 0, yg = -r; j < m; j++, yg += dy) { /* test for inside disc */ if(yg * yg < a2) { #ifdef DEBUG Rprintf("\n\n (xg,yg) = (%lf, %lf)\n", xg, yg); #endif /* run through data points seeking one close to (xy, yg) */ covered = 0; if(n > 0) { for(k = 0; k < n; k++) { #ifdef DEBUG Rprintf("(x[%d],y[%d]) = (%lf,%lf)\n", k, k, x[k], y[k]); #endif xdif = x[k] - xg; b2 = r2 - xdif * xdif; if(b2 > 0) { ydif = y[k] - yg; if(b2 - ydif * ydif > 0) { #ifdef DEBUG Rprintf("(x[%d], y[%d]) = (%lf, %lf) covers!\n", k, k, x[k], y[k]); #endif covered = 1; break; } } } } if(covered == 0) { ++count; #ifdef DEBUG Rprintf("Not covered; incrementing count\n"); #endif } } } } #ifdef DEBUG Rprintf("Count = %d\n", count); #endif /* calculate area */ *answer = ((double) count) * dx * dy; } /* similar function, handles multiple values of 'r' */ void areadifs(rad,nrads,x,y,nxy,ngrid,answer) /* inputs */ double *rad; /* vector of radii */ int *nrads; /* length of 'rads' */ double *x, *y; /* coordinate vectors for point pattern */ int *nxy; /* length of vectors x and y */ int *ngrid; /* dimensions of point-counting grid */ /* output */ double *answer; /* computed areas (vector of length 'nrads') */ { double dx, dy, xg, yg, r, r2, a2, b2, xdif, ydif; int i, j, k, l, m, n, nr, m0, count, covered, maxchunk; n = *nxy; nr = *nrads; m = *ngrid; /* run through radii in chunks of 2^14 */ OUTERCHUNKLOOP(l, nr, maxchunk, 16384) { R_CheckUserInterrupt(); INNERCHUNKLOOP(l, nr, maxchunk, 16384) { r = rad[l]; if(r == 0.0) { answer[l] = 0.0; } else if(n == 0) { answer[l] = M_PI * r * r; } else { r2 = r * r; dx = dy = 2 * r / (m-1); count = 0; /* run through grid points in disc of radius r */ for(i = 0, xg = -r; i < m; i++, xg += dx) { a2 = r2 - xg * xg; m0 = (a2 > 0.0) ? floor(sqrt(a2)/dy) : 0; for(j = -m0, yg = -m0 * dy; j <= m0; j++, yg += dy) { #ifdef DEBUG Rprintf("\n\n (xg,yg) = (%lf, %lf)\n", xg, yg); #endif /* run through data points seeking one close to (xy, yg) */ covered = 0; for(k = 0; k < n; k++) { #ifdef DEBUG Rprintf("(x[%d],y[%d]) = (%lf,%lf)\n", k, k, x[k], y[k]); #endif xdif = x[k] - xg; b2 = r2 - xdif * xdif; if(b2 > 0) { ydif = y[k] - yg; if(b2 - ydif * ydif > 0) { #ifdef DEBUG Rprintf("(x[%d], y[%d]) = (%lf, %lf) covers!\n", k, k, x[k], y[k]); #endif covered = 1; break; } } } /* end of loop through data points */ if(covered == 0) { ++count; #ifdef DEBUG Rprintf("Not covered; incrementing count\n"); #endif } } } /* end of loop over grid points */ #ifdef DEBUG Rprintf("Count = %d\n", count); #endif /* calculate area for this value of r*/ answer[l] = ((double) count) * dx * dy; } /* end of if(r==0).. else {...} */ } } } /* Modified version multiple test points u discs constrained inside a rectangle */ void areaBdif(rad,nrads,x,y,nxy,ngrid,x0,y0,x1,y1,answer) /* inputs */ double *rad; /* vector of radii */ int *nrads; /* length of 'rads' */ double *x, *y; /* coordinate vectors for point pattern */ int *nxy; /* length of vectors x and y */ int *ngrid; /* dimensions of point-counting grid */ double *x0,*y0,*x1,*y1; /* constraint rectangle */ /* output */ double *answer; /* computed areas (vector of length 'nrads') */ { double dx, dy, xg, yg, r, r2, a, a2, b2, xdif, ydif; double xleft, xright, ylow, yhigh; double xmin, ymin, xmax, ymax; int i, j, k, l, m, n, nr, ileft, iright, mlow, mhigh, count, covered; n = *nxy; nr = *nrads; m = *ngrid; xmin = *x0; ymin = *y0; xmax = *x1; ymax = *y1; /* run through radii */ for(l = 0; l < nr; l++) { r = rad[l]; if(r == 0.0) { answer[l] = 0.0; } else if (n == 0) { answer[l]= M_PI * r * r; } else { r2 = r * r; dx = dy = 2 * r / (m-1); count = 0; /* run through grid points in disc intersected with box */ xleft = (xmin > -r) ? xmin : -r; xright = (xmax < r) ? xmax : r; ileft = ceil(xleft/dx); iright = floor(xright/dx); if(ileft <= iright) { for(i = ileft, xg = ileft * dx; i <= iright; i++, xg += dx) { a2 = r2 - xg * xg; a = (a2 > 0) ? sqrt(a2): 0.0; yhigh = (ymax < a) ? ymax: a; ylow = (ymin > -a) ? ymin: -a; mhigh = floor(yhigh/dy); mlow = ceil(ylow/dy); if(mlow <= mhigh) { for(j = mlow, yg = mlow * dy; j <= mhigh; j++, yg += dy) { #ifdef DEBUG Rprintf("\n\n (xg,yg) = (%lf, %lf)\n", xg, yg); #endif /* run through data points seeking one close to (xy, yg) */ covered = 0; for(k = 0; k < n; k++) { #ifdef DEBUG Rprintf("(x[%d],y[%d]) = (%lf,%lf)\n", k, k, x[k], y[k]); #endif xdif = x[k] - xg; b2 = r2 - xdif * xdif; if(b2 > 0) { ydif = y[k] - yg; if(b2 - ydif * ydif > 0) { #ifdef DEBUG Rprintf("(x[%d], y[%d]) = (%lf, %lf) covers!\n", k, k, x[k], y[k]); #endif covered = 1; break; } } } /* end of loop over data points */ if(covered == 0) { ++count; #ifdef DEBUG Rprintf("Not covered; incrementing count\n"); #endif } } } } } /* end of loop over grid points */ #ifdef DEBUG Rprintf("Count = %d\n", count); #endif /* calculate area for this value of r*/ answer[l] = ((double) count) * dx * dy; } /* end of if(r==0).. else {...} */ } /* end of loop over r values */ } spatstat/src/segdens.c0000644000176200001440000000234713166361223014511 0ustar liggesusers#include #include #include #include /* segdens.c Convolution of segments with Gaussian kernel Adrian Baddeley, 02 dec 2016 Licence: GPL >= 2.0 */ #define DNORM(X, SIG) dnorm((X), (double) 0.0, (SIG), FALSE) #define PNORM(X, SIG) pnorm((X), (double) 0.0, (SIG), TRUE, FALSE) void segdens(sigma, ns, xs, ys, alps, lens, np, xp, yp, z) double *sigma; /* bandwidth */ int *ns; /* number of line segments */ double *xs, *ys, *alps, *lens; /* first endpoint, angle, length */ int *np; /* number of pixels or test locations */ double *xp, *yp; /* pixel coordinates */ double *z; /* result, assumed initially 0 */ { int i, j, Ns, Np; double Sigma; double xsi, ysi, angi, leni, cosi, sini; double dx, dy, u1, u2; Ns = *ns; Np = *np; Sigma = *sigma; for(i = 0; i < Ns; i++) { R_CheckUserInterrupt(); xsi = xs[i]; ysi = ys[i]; angi = alps[i]; leni = lens[i]; cosi = cos(angi); sini = sin(angi); for(j = 0; j < Np; j++) { dx = xp[j] - xsi; dy = yp[j] - ysi; u1 = dx * cosi + dy * sini; u2 = -dx * sini + dy * cosi; z[j] += DNORM(u2, Sigma) * (PNORM(u1, Sigma) - PNORM(u1-leni, Sigma)); } } } spatstat/src/getcif.c0000755000176200001440000000331413166361223014320 0ustar liggesusers#include #include "methas.h" void fexitc(const char *msg); extern Cifns AreaIntCifns, BadGeyCifns, DgsCifns, DiggraCifns, FikselCifns, GeyerCifns, HardcoreCifns, LennardCifns, LookupCifns, SoftcoreCifns, StraussCifns, StraussHardCifns, MultiStraussCifns, MultiStraussHardCifns, MultiHardCifns, TripletsCifns, PenttinenCifns; Cifns NullCifns = NULL_CIFNS; typedef struct CifPair { char *name; Cifns *p; } CifPair; CifPair CifTable[] = { {"areaint", &AreaIntCifns}, {"badgey", &BadGeyCifns}, {"dgs", &DgsCifns}, {"diggra", &DiggraCifns}, {"geyer", &GeyerCifns}, {"fiksel", &FikselCifns}, {"hardcore", &HardcoreCifns}, {"lookup", &LookupCifns}, {"lennard", &LennardCifns}, {"multihard", &MultiHardCifns}, {"penttinen", &PenttinenCifns}, {"sftcr", &SoftcoreCifns}, {"strauss", &StraussCifns}, {"straush", &StraussHardCifns}, {"straussm", &MultiStraussCifns}, {"straushm", &MultiStraussHardCifns}, {"triplets", &TripletsCifns}, {(char *) NULL, (Cifns *) NULL} }; Cifns getcif(cifname) char *cifname; { int i; CifPair cp; for(i = 0; CifTable[i].name; i++) { cp = CifTable[i]; if(strcmp(cifname, cp.name) == 0) return(*(cp.p)); } fexitc("Unrecognised cif name; bailing out.\n"); /* control never passes to here, but compilers don't know that */ return(NullCifns); } /* R interface function, to check directly whether cif is recognised */ void knownCif(cifname, answer) char** cifname; int* answer; { int i; CifPair cp; for(i = 0; CifTable[i].name; i++) { cp = CifTable[i]; if(strcmp(*cifname, cp.name) == 0) { *answer = 1; return; } } *answer = 0; return; } spatstat/src/straussm.c0000755000176200001440000001300313166361223014734 0ustar liggesusers#include #include #include "methas.h" #include "dist2.h" /* for debugging code, include #define DEBUG 1 */ /* Conditional intensity computation for Multitype Strauss process */ /* NOTE: types (marks) are numbered from 0 to ntypes-1 */ /* Storage of parameters and precomputed/auxiliary data */ typedef struct MultiStrauss { int ntypes; double *gamma; /* gamma[i,j] = gamma[i+ntypes*j] for i,j = 0... ntypes-1 */ double *rad; /* rad[i,j] = rad[j+ntypes*i] for i,j = 0... ntypes-1 */ double *rad2; /* squared radii */ double range2; /* square of interaction range */ double *loggamma; /* logs of gamma[i,j] */ double *period; int *hard; /* hard[i,j] = 1 if gamma[i,j] ~~ 0 */ int *kount; /* space for kounting pairs of each type */ int per; } MultiStrauss; /* initialiser function */ Cdata *straussminit(state, model, algo) State state; Model model; Algor algo; { int i, j, ntypes, n2, hard; double g, r, r2, logg, range2; MultiStrauss *multistrauss; multistrauss = (MultiStrauss *) R_alloc(1, sizeof(MultiStrauss)); multistrauss->ntypes = ntypes = model.ntypes; n2 = ntypes * ntypes; #ifdef DEBUG Rprintf("initialising space for %d types\n", ntypes); #endif /* Allocate space for parameters */ multistrauss->gamma = (double *) R_alloc((size_t) n2, sizeof(double)); multistrauss->rad = (double *) R_alloc((size_t) n2, sizeof(double)); /* Allocate space for transformed parameters */ multistrauss->rad2 = (double *) R_alloc((size_t) n2, sizeof(double)); multistrauss->loggamma = (double *) R_alloc((size_t) n2, sizeof(double)); multistrauss->hard = (int *) R_alloc((size_t) n2, sizeof(int)); /* Allocate scratch space for counts of each pair of types */ multistrauss->kount = (int *) R_alloc((size_t) n2, sizeof(int)); /* Copy and process model parameters*/ /* ipar will contain n^2 gamma values followed by n^2 values of r */ range2 = 0.0; for(i = 0; i < ntypes; i++) { for(j = 0; j < ntypes; j++) { g = model.ipar[i + j*ntypes]; r = model.ipar[n2 + i + j*ntypes]; r2 = r * r; hard = (g < DOUBLE_EPS); logg = (hard) ? 0 : log(g); MAT(multistrauss->gamma, i, j, ntypes) = g; MAT(multistrauss->rad, i, j, ntypes) = r; MAT(multistrauss->hard, i, j, ntypes) = hard; MAT(multistrauss->loggamma, i, j, ntypes) = logg; MAT(multistrauss->rad2, i, j, ntypes) = r2; if(r2 > range2) range2 = r2; } } multistrauss->range2 = range2; /* periodic boundary conditions? */ multistrauss->period = model.period; multistrauss->per = (model.period[0] > 0.0); #ifdef DEBUG Rprintf("end initialiser\n"); #endif return((Cdata *) multistrauss); } /* conditional intensity evaluator */ double straussmcif(prop, state, cdata) Propo prop; State state; Cdata *cdata; { int npts, ntypes, kount, ix, ixp1, j, mrk, mrkj, m1, m2; int *marks; double *x, *y; double u, v, lg; double d2, cifval; double range2; double *period; MultiStrauss *multistrauss; DECLARE_CLOSE_D2_VARS; multistrauss = (MultiStrauss *) cdata; range2 = multistrauss->range2; period = multistrauss->period; u = prop.u; v = prop.v; mrk = prop.mrk; ix = prop.ix; x = state.x; y = state.y; marks = state.marks; npts = state.npts; #ifdef DEBUG Rprintf("computing cif: u=%lf, v=%lf, mrk=%d\n", u, v, mrk); #endif cifval = 1.0; if(npts == 0) return(cifval); ntypes = multistrauss->ntypes; #ifdef DEBUG Rprintf("initialising pair counts\n"); #endif /* initialise pair counts */ for(m1 = 0; m1 < ntypes; m1++) for(m2 = 0; m2 < ntypes; m2++) MAT(multistrauss->kount, m1, m2, ntypes) = 0; /* compile pair counts */ #ifdef DEBUG Rprintf("compiling pair counts\n"); #endif ixp1 = ix+1; /* If ix = NONE = -1, then ixp1 = 0 is correct */ if(multistrauss->per) { /* periodic distance */ if(ix > 0) { for(j=0; j < ix; j++) { if(CLOSE_PERIODIC_D2(u,v,x[j],y[j],period,range2,d2)) { mrkj = marks[j]; if(d2 < MAT(multistrauss->rad2, mrk, mrkj, ntypes)) MAT(multistrauss->kount, mrk, mrkj, ntypes)++; } } } if(ixp1 < npts) { for(j=ixp1; jrad2, mrk, mrkj, ntypes)) MAT(multistrauss->kount, mrk, mrkj, ntypes)++; } } } } else { /* Euclidean distance */ if(ix > 0) { for(j=0; j < ix; j++) { if(CLOSE_D2(u, v, x[j], y[j], range2, d2)) { mrkj = marks[j]; if(d2 < MAT(multistrauss->rad2, mrk, mrkj, ntypes)) MAT(multistrauss->kount, mrk, mrkj, ntypes)++; } } } if(ixp1 < npts) { for(j=ixp1; jrad2, mrk, mrkj, ntypes)) MAT(multistrauss->kount, mrk, mrkj, ntypes)++; } } } } #ifdef DEBUG Rprintf("multiplying cif factors\n"); #endif /* multiply cif value by pair potential */ for(m1 = 0; m1 < ntypes; m1++) { for(m2 = 0; m2 < ntypes; m2++) { kount = MAT(multistrauss->kount, m1, m2, ntypes); if(MAT(multistrauss->hard, m1, m2, ntypes)) { if(kount > 0) { cifval = 0.0; return(cifval); } } else { lg = MAT(multistrauss->loggamma, m1, m2, ntypes); cifval *= exp(lg * kount); } } } #ifdef DEBUG Rprintf("returning positive cif\n"); #endif return cifval; } Cifns MultiStraussCifns = { &straussminit, &straussmcif, (updafunptr) NULL, YES}; spatstat/NAMESPACE0000644000176200001440000027234113165362536013357 0ustar liggesusers# spatstat NAMESPACE file import(stats,graphics,grDevices,utils,methods) import(spatstat.utils,spatstat.data) import(polyclip,goftest) import(Matrix,nlme,rpart) importFrom(deldir, deldir,duplicatedxy,tile.list) importFrom(abind,abind) importFrom(tensor,tensor) importFrom(mgcv, gam,gam.control,anova.gam,formula.gam,predict.gam, print.gam,summary.gam,vcov.gam,s) # .... load dynamic library ..... # (native routines are now registered in init.c) useDynLib(spatstat, .registration=TRUE) # Do not edit the following. # It is generated automatically. # .................................................. # load dynamic library # (native routines are now registered in init.c) # .................................................. useDynLib(spatstat, .registration=TRUE) # .................................................. # Automatically-generated list of documented objects # .................................................. export("acedist.noshow") export("acedist.show") export("active.interactions") export("adaptcoef") export("adaptive.density") export("add.texture") export("addvar") export("adjust.ratfv") export("affine") export("affine.im") export("affine.layered") export("affine.linim") export("affine.linnet") export("affine.lpp") export("affine.owin") export("affine.ppp") export("affine.psp") export("affine.tess") export("affinexy") export("affinexypolygon") export("AIC.dppm") export("AIC.kppm") export("AIC.mppm") export("AIC.ppm") export("allElementsIdentical") export("allstats") export("alltypes") export("ang2rad") export("angles.psp") export("anova.lppm") export("anova.mppm") export("anova.ppm") export("anova.slrm") export("anycrossing.psp") export("anyDuplicated.ppp") export("anyDuplicated.ppx") export("anylapply") export("[<-.anylist") export("[.anylist") export("anylist") export("anyNA.im") export("anyNA.sparse3Darray") export("aperm.sparse3Darray") export("append.psp") export("ApplyConnected") export("applynbd") export("applyPolyclipArgs") export("applySparseEntries") export("apply.ssf") export("applytolayers") export("area") export("area.default") export("areadelta2") export("areaGain") export("areaGain.diri") export("areaGain.grid") export("AreaInter") export("areaLoss") export("areaLoss.diri") export("areaLoss.grid") export("area.owin") export("as.anylist") export("as.array.im") export("as.array.sparse3Darray") export("as.box3") export("as.boxx") export("as.breakpts") export("as.character.units") export("as.data.frame.bw.optim") export("as.data.frame.envelope") export("as.data.frame.fv") export("as.data.frame.hyperframe") export("as.data.frame.im") export("as.data.frame.linfun") export("as.data.frame.linim") export("as.data.frame.owin") export("as.data.frame.ppp") export("as.data.frame.ppx") export("as.data.frame.psp") export("as.data.frame.tess") export("as.double.im") export("as.function.fv") export("as.function.im") export("as.function.leverage.ppm") export("as.function.linfun") export("as.function.owin") export("as.function.rhohat") export("as.function.ssf") export("as.function.tess") export("as.fv") export("as.fv.bw.optim") export("as.fv.data.frame") export("as.fv.dppm") export("as.fv.fasp") export("as.fv.fv") export("as.fv.kppm") export("as.fv.matrix") export("as.fv.minconfit") export("as.hyperframe") export("as.hyperframe.anylist") export("as.hyperframe.data.frame") export("as.hyperframe.default") export("as.hyperframe.hyperframe") export("as.hyperframe.listof") export("as.hyperframe.ppx") export("as.im") export("as.im.data.frame") export("as.im.default") export("as.im.distfun") export("as.im.function") export("as.im.funxy") export("as.im.im") export("as.im.leverage.ppm") export("as.im.linim") export("as.imlist") export("as.im.matrix") export("as.im.nnfun") export("as.im.owin") export("as.im.ppp") export("as.im.scan.test") export("as.im.Smoothfun") export("as.im.ssf") export("as.im.tess") export("as.interact") export("as.interact.fii") export("as.interact.interact") export("as.interact.ppm") export("as.layered") export("as.layered.default") export("as.layered.listof") export("as.layered.msr") export("as.layered.ppp") export("as.layered.solist") export("as.layered.splitppp") export("as.linfun") export("as.linfun.linfun") export("as.linfun.linim") export("as.linfun.lintess") export("as.linim") export("as.linim.default") export("as.linim.linfun") export("as.linim.linim") export("as.linnet") export("as.linnet.linfun") export("as.linnet.linim") export("as.linnet.linnet") export("as.linnet.lintess") export("as.linnet.lpp") export("as.linnet.lppm") export("as.linnet.psp") export("as.list.hyperframe") export("as.listof") export("as.lpp") export("as.mask") export("as.mask.psp") export("as.matrix.im") export("as.matrix.owin") export("as.matrix.ppx") export("as.owin") export("as.owin.boxx") export("as.owin.data.frame") export("as.owin.default") export("as.owin.distfun") export("as.owin.dppm") export("as.owin.funxy") export("as.owin.im") export("as.owin.influence.ppm") export("as.owin.kppm") export("as.owin.layered") export("as.owin.leverage.ppm") export("as.owin.linfun") export("as.owin.linnet") export("as.owin.lintess") export("as.owin.lpp") export("as.owin.lppm") export("as.owin.msr") export("as.owin.nnfun") export("as.owin.owin") export("as.owin.ppm") export("as.owin.ppp") export("as.owin.psp") export("as.owin.quad") export("as.owin.quadratcount") export("as.owin.quadrattest") export("as.owin.rmhmodel") export("as.owin.tess") export("as.polygonal") export("as.ppm") export("as.ppm.dppm") export("as.ppm.kppm") export("as.ppm.lppm") export("as.ppm.ppm") export("as.ppm.profilepl") export("as.ppm.rppm") export("as.ppp") export("as.ppp.data.frame") export("as.ppp.default") export("as.ppp.influence.ppm") export("as.ppplist") export("as.ppp.lpp") export("as.ppp.matrix") export("as.ppp.ppp") export("as.ppp.psp") export("as.ppp.quad") export("as.ppp.ssf") export("as.psp") export("as.psp.data.frame") export("as.psp.default") export("as.psp.linnet") export("as.psp.lpp") export("as.psp.matrix") export("as.psp.owin") export("as.psp.psp") export("as.rectangle") export("assemble.plot.objects") export("as.solist") export("as.sparse3Darray") export("as.tess") export("as.tess.im") export("as.tess.list") export("as.tess.owin") export("as.tess.quadratcount") export("as.tess.quadrattest") export("as.tess.tess") export("as.units") export("AsymmDistance.psp") export("auc") export("auc.kppm") export("auc.lpp") export("auc.lppm") export("auc.ppm") export("auc.ppp") export("augment.msr") export("BadGey") export("BartCalc") export("bbEngine") export("bc") export("bc.ppm") export("bdist.pixels") export("bdist.points") export("bdist.tiles") export("bdry.mask") export("beachcolourmap") export("beachcolours") export("beginner") export("begins") export("berman.test") export("bermantest") export("bermantestEngine") export("berman.test.lpp") export("bermantest.lpp") export("berman.test.lppm") export("bermantest.lppm") export("berman.test.ppm") export("bermantest.ppm") export("berman.test.ppp") export("bermantest.ppp") export("bilinearform") export("bind.fv") export("bind.ratfv") export("bind.sparse3Darray") export("bits.test") export("blankcoefnames") export("blur") export("border") export("bounding.box") export("boundingbox") export("bounding.box3") export("boundingbox.default") export("boundingbox.im") export("boundingbox.owin") export("boundingbox.ppp") export("boundingbox.solist") export("bounding.box.xy") export("boundingcentre") export("boundingcentre.owin") export("boundingcentre.ppp") export("boundingcircle") export("boundingcircle.owin") export("boundingcircle.ppp") export("boundingradius") export("boundingradius.linnet") export("boundingradius.owin") export("boundingradius.ppp") export("box3") export("boxx") export("branchlabelfun") export("break.holes") export("breakpts") export("breakpts.from.r") export("bt.frame") export("bugfixes") export("bw.diggle") export("bw.frac") export("bw.optim") export("bw.pcf") export("bw.ppl") export("bw.relrisk") export("bw.scott") export("bw.smoothppp") export("bw.stoyan") export("by.im") export("by.ppp") export("calc.DR") export("calc.NNIR") export("calc.SAVE") export("calc.SIR") export("calc.TSE") export("cannot.update") export("cartesian") export("cauchy.estK") export("cauchy.estpcf") export("cbind.fv") export("cbind.hyperframe") export("CDF") export("CDF.density") export("cdf.test") export("cdf.test.lpp") export("cdf.test.lppm") export("cdf.test.mppm") export("cdf.test.ppm") export("cdf.test.ppp") export("cdf.test.slrm") export("cellmiddles") export("censtimeCDFest") export("centroid.owin") export("change.default.expand") export("checkbigmatrix") export("checkfields") export("check.finespacing") export("check.hist.lengths") export("check.mat.mul") export("checksolve") export("check.testfun") export("chop.tess") export("circdensity") export("circticks") export("circumradius") export("circumradius.linnet") export("circumradius.owin") export("circumradius.ppp") export("clarkevans") export("clarkevansCalc") export("clarkevans.test") export("clear.simplepanel") export("clf.test") export("clickbox") export("clickdist") export("clickjoin") export("clicklpp") export("clickpoly") export("clickppp") export("clip.infline") export("clippoly.psp") export("clip.psp") export("cliprect.psp") export("closepaircounts") export("closepairs") export("closepairs.pp3") export("closepairs.ppp") export("closethresh") export("closetriples") export("closing") export("closing.owin") export("closing.ppp") export("closing.psp") export("clusterfield") export("clusterfield.character") export("clusterfield.function") export("clusterfield.kppm") export("clusterfit") export("clusterkernel") export("clusterkernel.character") export("clusterkernel.kppm") export("clusterradius") export("clusterradius.character") export("clusterradius.kppm") export("clusterset") export("cobble.xy") export("codetime") export("coef.dppm") export("coef.fii") export("coef.kppm") export("coef.lppm") export("coef.mppm") export("coef.ppm") export("coef.slrm") export("coef.summary.fii") export("coef.summary.kppm") export("coef.summary.ppm") export("coef.vblogit") export("coerce.marks.numeric") export("col2hex") export("col.args.to.grey") export("collapse.anylist") export("collapse.fv") export("colourmap") export("colouroutputs<-") export("colouroutputs") export("commonGrid") export("commonPolyclipArgs") export("compareFit") export("compatible") export("compatible.fasp") export("compatible.fv") export("compatible.im") export("compatible.rat") export("compatible.units") export("compileCDF") export("compileK") export("compilepcf") export("complementarycolour") export("complement.owin") export("Complex.im") export("Complex.imlist") export("Complex.linim") export("concatxy") export("Concom") export("conform.imagelist") export("conform.ratfv") export("connected") export("connected.im") export("connected.linnet") export("connected.lpp") export("connected.owin") export("connected.ppp") export("conspire") export("contour.funxy") export("contour.im") export("contour.imlist") export("contour.listof") export("contour.objsurf") export("contour.ssf") export("convexhull") export("convexhull.xy") export("convexify") export("convolve.im") export("coords<-") export("coords") export("coords<-.ppp") export("coords.ppp") export("coords<-.ppx") export("coords.ppx") export("corners") export("countends") export("countingweights") export("covering") export("CressieReadName") export("CressieReadStatistic") export("CressieReadSymbol") export("crossdist") export("crossdist.default") export("crossdist.lpp") export("crossdist.pp3") export("crossdist.ppp") export("crossdist.ppx") export("crossdist.psp") export("crossing.linnet") export("crossing.psp") export("crosspaircounts") export("crosspairquad") export("crosspairs") export("crosspairs.pp3") export("crosspairs.ppp") export("cut.im") export("cut.lpp") export("cut.ppp") export("CVforPCF") export("damaged.ppm") export("datagen.rpoisppOnLines") export("datagen.runifpointOnLines") export("datagen.runifpoisppOnLines") export("data.mppm") export("data.ppm") export("dclf.progress") export("dclf.sigtrace") export("dclf.test") export("default.clipwindow") export("default.dummy") export("default.expand") export("default.linnet.tolerance") export("default.ntile") export("default.n.tiling") export("default.rmhcontrol") export("delaunay") export("delaunay.distance") export("delaunayDistance") export("delaunay.network") export("delaunayNetwork") export("deletebranch") export("deletebranch.linnet") export("deletebranch.lpp") export("deltametric") export("deltasuffstat") export("densitycrossEngine") export("density.lpp") export("densitypointsEngine") export("density.ppp") export("density.ppplist") export("density.psp") export("density.splitppp") export("density.splitppx") export("deriv.fv") export("detpointprocfamilyfun") export("deviance.lppm") export("deviance.ppm") export("Deviation") export("dfbetas.ppm") export("dflt.redraw") export("dg.envelope") export("dg.progress") export("dg.sigtrace") export("dg.test") export("diagnose.ppm") export("diagnose.ppm.engine") export("[.diagramobj") export("diagramobj") export("diameter") export("diameter.box3") export("diameter.boxx") export("diameter.linnet") export("diameter.owin") export("digestCovariates") export("DiggleGatesStibbard") export("DiggleGratton") export("digital.volume") export("dilated.areas") export("dilate.owin") export("dilation") export("dilationAny") export("dilation.owin") export("dilation.ppp") export("dilation.psp") export("dim.detpointprocfamily") export("dim.fasp") export("dimhat") export("dim.hyperframe") export("dim.im") export("dim.msr") export("dimnames<-.fasp") export("dimnames.fasp") export("dimnames.msr") export("dimnames<-.sparse3Darray") export("dimnames.sparse3Darray") export("dim.owin") export("dim<-.sparse3Darray") export("dim.sparse3Darray") export("dirichlet") export("dirichletAreas") export("dirichlet.edges") export("dirichletEdges") export("dirichlet.network") export("dirichletNetwork") export("dirichlet.vertices") export("dirichletVertices") export("dirichlet.weights") export("dirichletWeights") export("disc") export("discpartarea") export("discretise") export("discs") export("dist2dpath") export("distcdf") export("distfun") export("distfun.lpp") export("distfun.owin") export("distfun.ppp") export("distfun.psp") export("distmap") export("distmap.owin") export("distmap.ppp") export("distmap.psp") export("distributecbind") export("divide.linnet") export("dkernel") export("dknn") export("dmixpois") export("do.as.im") export("do.call.plotfun") export("do.istat") export("domain") export("domain.distfun") export("domain.dppm") export("domain.funxy") export("domain.im") export("domain.im") export("domain.influence.ppm") export("domain.kppm") export("domain.layered") export("domain.leverage.ppm") export("domain.linfun") export("domain.lintess") export("domain.lpp") export("domain.lpp") export("domain.lppm") export("domain.msr") export("domain.nnfun") export("domain.pp3") export("domain.ppm") export("domain.ppp") export("domain.ppx") export("domain.psp") export("domain.quad") export("domain.quadratcount") export("domain.quadrattest") export("domain.rmhmodel") export("domain.tess") export("doMultiStraussHard") export("dppapproxkernel") export("dppapproxpcf") export("dppBessel") export("dppCauchy") export("dppeigen") export("dppGauss") export("dppkernel") export("dppm") export("dppMatern") export("dppmFixAlgorithm") export("dppmFixIntensity") export("dppparbounds") export("dppPowerExp") export("dppspecden") export("dppspecdenrange") export("dummify") export("dummy.ppm") export("duplicated.ppp") export("duplicated.ppx") export("edge.Ripley") export("edges") export("edges2triangles") export("edges2vees") export("edge.Trans") export("edit.hyperframe") export("edit.im") export("edit.ppp") export("edit.psp") export("eem") export("effectfun") export("ellipse") export("Emark") export("emend") export("emend.lppm") export("emend.ppm") export("emptywindow") export("endpoints.psp") export("EntriesToSparse") export("envelope") export("envelopeArray") export("envelopeEngine") export("envelope.envelope") export("envelope.hasenvelope") export("envelope.kppm") export("envelope.lpp") export("envelope.lppm") export("envelope.matrix") export("envelope.pp3") export("envelope.ppm") export("envelope.ppp") export("envelopeProgressData") export("envelopeTest") export("equalpairs") export("equalpairs.quad") export("equalsfun.quad") export("equals.quad") export("eroded.areas") export("eroded.volumes") export("eroded.volumes.box3") export("eroded.volumes.boxx") export("erodemask") export("erode.owin") export("erosion") export("erosionAny") export("erosion.owin") export("erosion.ppp") export("erosion.psp") export("evalCovar") export("evalCovariate") export("evalCovar.lppm") export("evalCovar.ppm") export("eval.fasp") export("eval.fv") export("eval.hyper") export("eval.im") export("evalInteraction") export("evalInterEngine") export("eval.linim") export("evalPairPotential") export("evalSparse3Dentrywise") export("evaluate2Dkernel") export("even.breaks.owin") export("ewcdf") export("exactdt") export("exactMPLEstrauss") export("exactPdt") export("existsSpatstatVariable") export("expand.owin") export("expandSparse") export("expandSpecialLists") export("expandwinPerfect") export("ExpSmoothLog") export("extractAIC.dppm") export("extractAIC.kppm") export("extractAIC.lppm") export("extractAIC.mppm") export("extractAIC.ppm") export("extractAIC.slrm") export("extractAtomicQtests") export("extractbranch") export("extractbranch.linnet") export("extractbranch.lpp") export("f3Cengine") export("f3engine") export("F3est") export("fakemaintitle") export("family.vblogit") export("fardist") export("fardist.owin") export("fardist.ppp") export("[.fasp") export("fasp") export("FDMKERNEL") export("Fest") export("fft2D") export("fftwAvailable") export("Fhazard") export("fii") export("Fiksel") export("fill.coefs") export("fillNA") export("findbestlegendpos") export("findcbind") export("findCovariate") export("Finhom") export("fitin") export("fitin.ppm") export("fitin.profilepl") export("fitted.dppm") export("fitted.kppm") export("fitted.lppm") export("fitted.mppm") export("fitted.ppm") export("fitted.rppm") export("fitted.slrm") export("fixef.mppm") export("flatfname") export("flipxy") export("flipxy.im") export("flipxy.infline") export("flipxy.layered") export("flipxy.owin") export("flipxypolygon") export("flipxy.ppp") export("flipxy.psp") export("FmultiInhom") export("foo") export("forbid.logi") export("FormatFaspFormulae") export("format.numberwithunit") export("formula<-") export("formula.dppm") export("formula<-.fv") export("formula.fv") export("formula.kppm") export("formula.lppm") export("formula.ppm") export("formula.slrm") export("fourierbasis") export("Frame<-") export("Frame") export("framebottomleft") export("Frame.default") export("Frame<-.im") export("Frame<-.owin") export("Frame<-.ppp") export("fryplot") export("frypoints") export("funxy") export("[<-.fv") export("[.fv") export("$<-.fv") export("fv") export("fvexprmap") export("fvlabelmap") export("fvlabels<-") export("fvlabels") export("fvlegend") export("fvnames<-") export("fvnames") export("g3Cengine") export("g3engine") export("G3est") export("gauss.hermite") export("Gcom") export("Gcross") export("Gdot") export("Gest") export("getCall.mppm") export("getdataname") export("getfields") export("getglmdata") export("getglmfit") export("getglmsubset") export("getlambda.lpp") export("getlastshift") export("getppmdatasubset") export("getppmOriginalCovariates") export("getRandomFieldsModelGen") export("getSpatstatVariable") export("getSumFun") export("Geyer") export("geyercounts") export("geyerdelta2") export("Gfox") export("Ginhom") export("GLMpredict") export("Gmulti") export("GmultiInhom") export("good.correction.K") export("Gres") export("grid1index") export("gridcenters") export("gridcentres") export("gridindex") export("gridweights") export("grokIndexVector") export("grow.box3") export("grow.boxx") export("grow.mask") export("grow.rectangle") export("grow.simplepanel") export("hackglmmPQL") export("Halton") export("Hammersley") export("handle.r.b.args") export("handle.rshift.args") export("Hardcore") export("harmonic") export("harmonise") export("harmonise.fv") export("harmonise.im") export("harmonise.msr") export("harmonise.owin") export("harmonize") export("harmonize.fv") export("harmonize.im") export("harmonize.owin") export("has.close") export("has.close.default") export("has.close.pp3") export("has.close.ppp") export("hasenvelope") export("has.offset") export("has.offset.term") export("head.hyperframe") export("head.ppp") export("head.ppx") export("head.psp") export("head.tess") export("HermiteCoefs") export("Hest") export("hexagon") export("hexgrid") export("hextess") export("hierarchicalordering") export("HierHard") export("hiermat") export("hierpair.family") export("HierStrauss") export("HierStraussHard") export("hist.funxy") export("hist.im") export("ho.engine") export("hopskel") export("hopskel.test") export("hsvim") export("hsvNA") export("Hybrid") export("hybrid.family") export("[<-.hyperframe") export("[.hyperframe") export("$<-.hyperframe") export("$.hyperframe") export("hyperframe") export("IdenticalRows") export("identify.lpp") export("identify.ppp") export("identify.psp") export("idorempty") export("idw") export("Iest") export("illegal.iformula") export("[<-.im") export("[.im") export("im") export("image.im") export("image.imlist") export("image.listof") export("image.objsurf") export("image.ssf") export("im.apply") export("imcov") export("implemented.for.K") export("impliedcoefficients") export("impliedpresence") export("improve.kppm") export("incircle") export("increment.fv") export("infline") export("[.influence.ppm") export("influence.ppm") export("inforder.family") export("inpoint") export("inradius") export("insertVertices") export("inside3Darray") export("inside.boxx") export("inside.owin") export("instantiate.interact") export("integral") export("integral.im") export("integral.linfun") export("integral.linim") export("integral.msr") export("integral.ssf") export("intensity") export("intensity.detpointprocfamily") export("intensity.dppm") export("intensity.lpp") export("intensity.ppm") export("intensity.ppp") export("intensity.ppx") export("intensity.quadratcount") export("intensity.splitppp") export("interactionfamilyname") export("intermaker") export("interp.colourmap") export("interp.colours") export("interp.im") export("intersect.owin") export("intersect.tess") export("intX.owin") export("intX.xypolygon") export("intY.owin") export("intY.xypolygon") export("invokeColourmapRule") export("invoke.symbolmap") export("iplot") export("iplot.default") export("iplot.layered") export("iplot.linnet") export("iplot.lpp") export("iplot.ppp") export("ippm") export("is.atomicQtest") export("is.cadlag") export("is.col.argname") export("is.colour") export("is.connected") export("is.connected.default") export("is.connected.linnet") export("is.connected.ppp") export("is.convex") export("is.data") export("is.dppm") export("is.empty") export("is.empty.default") export("is.empty.owin") export("is.empty.ppp") export("is.empty.psp") export("is.expandable") export("is.expandable.ppm") export("is.expandable.rmhmodel") export("is.fv") export("is.grey") export("is.hybrid") export("is.hybrid.interact") export("is.hybrid.ppm") export("is.hyperframe") export("is.im") export("is.infline") export("is.interact") export("is.kppm") export("is.lpp") export("is.lppm") export("is.marked") export("is.marked.default") export("is.marked.lppm") export("is.marked.msr") export("is.marked.ppm") export("is.marked.ppp") export("is.marked.psp") export("is.marked.quad") export("is.mask") export("is.mppm") export("is.multitype") export("is.multitype.default") export("is.multitype.lpp") export("is.multitype.lppm") export("is.multitype.msr") export("is.multitype.ppm") export("is.multitype.ppp") export("is.multitype.quad") export("is.owin") export("is.poisson") export("is.poisson.interact") export("is.poisson.kppm") export("is.poisson.lppm") export("is.poisson.mppm") export("is.poisson.ppm") export("is.poisson.rmhmodel") export("is.poisson.slrm") export("is.polygonal") export("is.pp3") export("is.ppm") export("is.ppp") export("is.ppx") export("is.psp") export("is.rectangle") export("isRelevantZero") export("is.scov") export("is.slrm") export("is.sob") export("is.stationary") export("is.stationary.detpointprocfamily") export("is.stationary.dppm") export("is.stationary.kppm") export("is.stationary.lppm") export("is.stationary.ppm") export("is.stationary.rmhmodel") export("is.stationary.slrm") export("is.subset.owin") export("istat") export("is.tess") export("Jcross") export("Jdot") export("Jest") export("Jfox") export("Jinhom") export("Jmulti") export("k3engine") export("K3est") export("kaplan.meier") export("Kborder.engine") export("Kcom") export("Kcross") export("Kcross.inhom") export("Kdot") export("Kdot.inhom") export("kernel.factor") export("kernel.moment") export("kernel.squint") export("Kest") export("Kest.fft") export("killinteraction") export("Kinhom") export("Kmark") export("Kmeasure") export("Kmodel") export("Kmodel.detpointprocfamily") export("Kmodel.dppm") export("Kmodel.kppm") export("Kmodel.ppm") export("km.rs") export("km.rs.opt") export("Kmulti") export("Kmulti.inhom") export("Knone.engine") export("Kount") export("Kpcf.kppm") export("kppm") export("kppmComLik") export("kppm.formula") export("kppmMinCon") export("kppmPalmLik") export("kppm.ppp") export("kppm.quad") export("kraever") export("kraeverRandomFields") export("Krect.engine") export("Kres") export("Kscaled") export("Ksector") export("ksmooth.ppp") export("kstest") export("kstest.lpp") export("kstest.lppm") export("ks.test.ppm") export("kstest.ppm") export("kstest.ppp") export("kstest.slrm") export("Kwtsum") export("labels.dppm") export("labels.kppm") export("labels.ppm") export("labels.slrm") export("LambertW") export("laslett") export("latest.news") export("[<-.layered") export("[.layered") export("[[<-.layered") export("layered") export("layerplotargs<-") export("layerplotargs") export("layout.boxes") export("Lcross") export("Lcross.inhom") export("Ldot") export("Ldot.inhom") export("ldtEngine") export("lengths.psp") export("LennardJones") export("Lest") export("levelsAsFactor") export("levelset") export("levels<-.im") export("levels.im") export("leverage") export("[.leverage.ppm") export("leverage.ppm") export("lgcp.estK") export("lgcp.estpcf") export("lineardirichlet") export("lineardisc") export("linearK") export("linearKcross") export("linearKcross.inhom") export("linearKdot") export("linearKdot.inhom") export("linearKengine") export("linearKinhom") export("linearKmulti") export("linearKmultiEngine") export("linearKmulti.inhom") export("linearmarkconnect") export("linearmarkequal") export("linearpcf") export("linearpcfcross") export("linearpcfcross.inhom") export("linearpcfdot") export("linearpcfdot.inhom") export("linearpcfengine") export("linearpcfinhom") export("linearpcfmulti") export("linearPCFmultiEngine") export("linearpcfmulti.inhom") export("linequad") export("linfun") export("Linhom") export("[<-.linim") export("[.linim") export("linim") export("[.linnet") export("linnet") export("lintess") export("[<-.listof") export("listof") export("lixellate") export("local2lpp") export("localK") export("localKengine") export("localKinhom") export("localL") export("localLinhom") export("localpcf") export("localpcfengine") export("localpcfinhom") export("[.localpcfmatrix") export("localpcfmatrix") export("logicalIndex") export("logi.dummy") export("logi.engine") export("logLik.dppm") export("logLik.kppm") export("logLik.lppm") export("logLik.mppm") export("logLik.ppm") export("logLik.slrm") export("logLik.vblogit") export("lohboot") export("lookup2DkernelInfo") export("lookup.im") export("[.lpp") export("lpp") export("lppm") export("lppm.formula") export("lppm.lpp") export("Lscaled") export("lurking") export("lut") export("mad.progress") export("mad.sigtrace") export("mad.test") export("majorminorversion") export("make.even.breaks") export("makefvlabel") export("makeLinnetTolerance") export("makeunits") export("mapSparseEntries") export("marginSums") export("markappend") export("markappendop") export("markcbind") export("markconnect") export("markcorr") export("markcorrint") export("markcrosscorr") export("markformat") export("markformat.default") export("markformat.ppp") export("markformat.ppx") export("markformat.psp") export("markmean") export("markreplicateop") export("marks<-") export("marks") export("mark.scale.default") export("marks.default") export("marks<-.lpp") export("markspace.integral") export("marks<-.ppp") export("marks.ppp") export("marks<-.ppx") export("marks.ppx") export("marks<-.psp") export("marks.psp") export("marks.quad") export("marks<-.ssf") export("marks.ssf") export("markstat") export("marks<-.tess") export("marks.tess") export("marksubset") export("marksubsetop") export("marktable") export("markvar") export("markvario") export("mask2df") export("maskLaslett") export("match2DkernelName") export("matchingdist") export("match.kernel") export("matclust.estK") export("matclust.estpcf") export("Math.im") export("Math.imlist") export("Math.linim") export("Math.sparse3Darray") export("matrixinvsqrt") export("matrixpower") export("matrixsqrt") export("maxflow") export("max.fv") export("maxnndist") export("max.ssf") export("mctest.progress") export("mctest.sigtrace") export("mctestSigtraceEngine") export("mean.im") export("mean.linim") export("meanlistfv") export("meanX.owin") export("meanY.owin") export("measureNegative") export("measurePositive") export("measureVariation") export("median.im") export("median.linim") export("mergeLevels") export("midpoints.psp") export("mincontrast") export("min.fv") export("MinkowskiSum") export("minnndist") export("min.ssf") export("miplot") export("model.covariates") export("model.depends") export("model.frame.dppm") export("modelFrameGam") export("model.frame.kppm") export("model.frame.lppm") export("model.frame.ppm") export("model.images") export("model.images.dppm") export("model.images.kppm") export("model.images.lppm") export("model.images.ppm") export("model.images.slrm") export("model.is.additive") export("model.matrix.dppm") export("model.matrix.ippm") export("model.matrix.kppm") export("model.matrix.lppm") export("model.matrix.ppm") export("model.matrix.slrm") export("model.se.image") export("mpl") export("mpl.engine") export("mpl.get.covariates") export("mpl.prepare") export("mpl.usable") export("mppm") export("[.msr") export("msr") export("MultiHard") export("MultiPair.checkmatrix") export("multiplicity") export("multiplicity.data.frame") export("multiplicity.default") export("multiplicityNumeric") export("multiplicity.ppp") export("multiplicity.ppx") export("multiply.only.finite.entries") export("MultiStrauss") export("MultiStraussHard") export("na.handle.im") export("names<-.fv") export("names<-.hyperframe") export("names.hyperframe") export("nearest.neighbour") export("nearest.pixel") export("nearest.raster.point") export("nearestsegment") export("nearest.valid.pixel") export("nestsplit") export("newformula") export("newstyle.coeff.handling") export("nnclean") export("nncleanEngine") export("nnclean.pp3") export("nnclean.ppp") export("nncorr") export("nncross") export("nncross.default") export("nncross.lpp") export("nncross.pp3") export("nncross.ppp") export("nndcumfun") export("nndensity") export("nndensity.ppp") export("nndist") export("nndist.default") export("nndist.lpp") export("nndist.pp3") export("nndist.ppp") export("nndist.ppx") export("nndist.psp") export("nnfromvertex") export("nnfun") export("nnfun.lpp") export("nnfun.ppp") export("nnfun.psp") export("nnmap") export("nnmark") export("nnmean") export("nnorient") export("nnvario") export("nnwhich") export("nnwhich.default") export("nnwhich.lpp") export("nnwhich.pp3") export("nnwhich.ppp") export("nnwhich.ppx") export("nobjects") export("nobjects.ppp") export("nobjects.ppx") export("nobjects.psp") export("nobjects.tess") export("nobs.dppm") export("nobs.kppm") export("nobs.lppm") export("nobs.mppm") export("nobs.ppm") export("no.trend.ppm") export("npfun") export("npoints") export("npoints.pp3") export("npoints.ppp") export("npoints.ppx") export("n.quad") export("nsegments") export("nsegments.linnet") export("nsegments.lpp") export("nsegments.psp") export("numberwithunit") export("numeric.columns") export("nvertices") export("nvertices.default") export("nvertices.linnet") export("nvertices.owin") export("objsurf") export("objsurf.dppm") export("objsurfEngine") export("objsurf.kppm") export("objsurf.minconfit") export("onearrow") export("onecolumn") export("opening") export("opening.owin") export("opening.ppp") export("opening.psp") export("Ops.im") export("Ops.imlist") export("Ops.linim") export("Ops.msr") export("Ops.sparse3Darray") export("optimStatus") export("Ord") export("ord.family") export("OrdThresh") export("outdated.interact") export("overlap.owin") export("oversize.quad") export("[.owin") export("owin") export("owin2polypath") export("owinpoly2mask") export("owinpolycheck") export("padimage") export("pairdist") export("pairdist.default") export("pairdist.lpp") export("pairdist.pp3") export("pairdist.ppp") export("pairdist.ppx") export("pairdist.psp") export("pairorient") export("PairPiece") export("pairsat.family") export("pairs.im") export("pairs.linim") export("pairs.listof") export("pairs.solist") export("Pairwise") export("pairwise.family") export("paletteindex") export("paletteindex") export("panel.contour") export("panel.histogram") export("panel.image") export("parameters") export("parameters.dppm") export("parameters.fii") export("parameters.interact") export("parameters.kppm") export("parameters.ppm") export("parameters.profilepl") export("param.quad") export("parbreak") export("parres") export("partialModelMatrix") export("pcf") export("pcf3engine") export("pcf3est") export("pcfcross") export("pcfcross.inhom") export("pcfdot") export("pcfdot.inhom") export("pcf.fasp") export("pcf.fv") export("pcfinhom") export("pcfmodel") export("pcfmodel.detpointprocfamily") export("pcfmodel.dppm") export("pcfmodel.kppm") export("pcfmodel.ppm") export("pcfmodel.zclustermodel") export("pcfmulti") export("pcfmulti.inhom") export("pcf.ppp") export("PDEdensityLPP") export("Penttinen") export("perimeter") export("periodify") export("periodify.owin") export("periodify.ppp") export("periodify.psp") export("perspContour") export("persp.funxy") export("persp.im") export("persp.leverage.ppm") export("perspLines") export("persp.objsurf") export("perspPoints") export("perspSegments") export("pickoption") export("pixelcentres") export("pixellate") export("pixellate.linnet") export("pixellate.owin") export("pixellate.ppp") export("pixellate.psp") export("pixelquad") export("pkernel") export("pknn") export("plan.legend.layout") export("plot3Dpoints") export("plot.addvar") export("plot.anylist") export("plot.barplotdata") export("plot.bermantest") export("plot.bw.frac") export("plot.bw.optim") export("plot.cdftest") export("plot.colourmap") export("plot.diagppm") export("plot.dppm") export("plotEachLayer") export("plot.envelope") export("ploterodeimage") export("ploterodewin") export("plot.fasp") export("plot.fii") export("plot.foo") export("plot.funxy") export("plot.fv") export("plot.hyperframe") export("plot.im") export("plot.imlist") export("plot.infline") export("plot.influence.ppm") export("plot.kppm") export("plot.kstest") export("plot.laslett") export("plot.layered") export("plot.leverage.ppm") export("plot.linfun") export("plot.linim") export("plot.linnet") export("plot.lintess") export("plot.listof") export("plot.localpcfmatrix") export("plot.lpp") export("plot.lppm") export("plot.lurk") export("plot.minconfit") export("plot.mppm") export("plot.msr") export("plot.objsurf") export("plot.onearrow") export("plot.owin") export("plot.parres") export("plot.plotpairsim") export("plot.plotppm") export("plotPolygonBdry") export("plot.pp3") export("plot.ppm") export("plot.ppp") export("plot.pppmatching") export("plot.ppx") export("plot.profilepl") export("plot.psp") export("plot.qqppm") export("plot.quad") export("plot.quadratcount") export("plot.quadrattest") export("plot.rho2hat") export("plot.rhohat") export("plot.rppm") export("plot.scan.test") export("plot.slrm") export("plot.solist") export("plot.spatialcdf") export("plot.splitppp") export("plot.ssf") export("plot.studpermutest") export("plot.symbolmap") export("plot.tess") export("plot.textstring") export("plot.texturemap") export("plot.yardstick") export("pmixpois") export("pointgrid") export("pointsAlongNetwork") export("points.lpp") export("pointsOnLines") export("PoisSaddle") export("PoisSaddleArea") export("PoisSaddleGeyer") export("PoisSaddlePairwise") export("Poisson") export("polyLaslett") export("polynom") export("polytileareaEngine") export("pool") export("pool.anylist") export("pool.envelope") export("pool.fasp") export("pool.fv") export("pool.quadrattest") export("pool.rat") export("positiveIndex") export("[.pp3") export("pp3") export("ppllengine") export("ppm") export("ppmCovariates") export("ppm.default") export("ppmDerivatives") export("ppm.formula") export("ppmInfluence") export("ppmInfluenceEngine") export("PPMmodelmatrix") export("ppm.ppp") export("ppm.quad") export("[<-.ppp") export("[.ppp") export("ppp") export("pppdist") export("pppdist.mat") export("pppdist.prohorov") export("pppmatching") export("ppsubset") export("PPversion") export("[.ppx") export("ppx") export("predict.dppm") export("predict.kppm") export("predict.lppm") export("predict.mppm") export("predict.ppm") export("predict.profilepl") export("predict.rho2hat") export("predict.rhohat") export("predict.rppm") export("predict.slrm") export("predict.vblogit") export("predict.zclustermodel") export("prefixfv") export("prepareTitle") export("print.addvar") export("print.anylist") export("print.autoexec") export("print.box3") export("print.boxx") export("print.bt.frame") export("print.bugtable") export("print.bw.frac") export("print.bw.optim") export("print.colourmap") export("print.detpointprocfamily") export("print.detpointprocfamilyfun") export("print.diagppm") export("print.distfun") export("print.dppm") export("print.envelope") export("print.ewcdf") export("print.fasp") export("print.fii") export("print.funxy") export("print.fv") export("print.fvfun") export("print.hasenvelope") export("print.hierarchicalordering") export("print.hyperframe") export("print.im") export("print.indicfun") export("print.infline") export("print.influence.ppm") export("print.interact") export("print.intermaker") export("print.isf") export("print.kppm") export("print.laslett") export("print.layered") export("print.leverage.ppm") export("print.linfun") export("print.linim") export("print.linnet") export("print.lintess") export("print.localpcfmatrix") export("print.lpp") export("print.lppm") export("print.lut") export("print.minconfit") export("print.mppm") export("print.msr") export("print.nnfun") export("print.numberwithunit") export("print.objsurf") export("print.onearrow") export("print.owin") export("print.parres") export("print.plotpairsim") export("print.plotppm") export("print.pp3") export("print.ppm") export("print.ppp") export("print.pppmatching") export("print.ppx") export("print.profilepl") export("print.psp") export("print.qqppm") export("print.quad") export("print.quadrattest") export("print.rat") export("print.rho2hat") export("print.rhohat") export("print.rmhcontrol") export("print.rmhexpand") export("print.rmhInfoList") export("print.rmhmodel") export("print.rmhstart") export("print.rppm") export("print.simplepanel") export("print.slrm") export("print.Smoothfun") export("print.solist") export("print.sparse3Darray") export("print.splitppp") export("print.splitppx") export("print.ssf") export("printStatus") export("print.summary.fii") export("print.summary.hyperframe") export("print.summary.im") export("print.summary.kppm") export("print.summary.linim") export("print.summary.linnet") export("print.summary.lintess") export("print.summary.listof") export("print.summary.logiquad") export("print.summary.lpp") export("print.summary.lut") export("print.summary.mppm") export("print.summary.owin") export("print.summary.pp3") export("print.summary.ppm") export("print.summary.ppp") export("print.summary.psp") export("print.summary.quad") export("print.summary.rmhexpand") export("print.summary.solist") export("print.summary.splitppp") export("print.summary.splitppx") export("print.summary.units") export("print.symbolmap") export("print.tess") export("print.textstring") export("print.texturemap") export("print.timed") export("print.units") export("print.vblogit") export("print.yardstick") export("print.zclustermodel") export("profilepl") export("progressreport") export("project2segment") export("project2set") export("project3Dhom") export("project.ppm") export("prune.rppm") export("pseudoR2") export("pseudoR2.lppm") export("pseudoR2.ppm") export("psib") export("psib.kppm") export("[.psp") export("psp") export("psst") export("psstA") export("psstG") export("putlastshift") export("putSpatstatVariable") export("qkernel") export("qknn") export("qmixpois") export("qqplot.ppm") export("QQversion") export("[.quad") export("quad") export("quadBlockSizes") export("quadform") export("quad.mppm") export("quad.ppm") export("quadratcount") export("quadratcount.ppp") export("quadratcount.splitppp") export("quadratresample") export("quadrats") export("quadrat.test") export("quadrat.testEngine") export("quadrat.test.mppm") export("quadrat.test.ppm") export("quadrat.test.ppp") export("quadrat.test.quadratcount") export("quadrat.test.splitppp") export("quadscheme") export("quadscheme.logi") export("quadscheme.replicated") export("quadscheme.spatial") export("quantess") export("quantess.im") export("quantess.owin") export("quantess.ppp") export("quantile.density") export("quantile.ewcdf") export("quantile.im") export("quantile.linim") export("rags") export("ragsAreaInter") export("ragsMultiHard") export("RandomFieldsSafe") export("ranef.mppm") export("range.fv") export("range.ssf") export("rastersample") export("raster.x") export("rasterx.im") export("rasterx.mask") export("raster.xy") export("rasterxy.im") export("rasterxy.mask") export("raster.y") export("rastery.im") export("rastery.mask") export("[.rat") export("rat") export("ratfv") export("rbindCompatibleDataFrames") export("rbind.hyperframe") export("rCauchy") export("rcell") export("rcellnumber") export("rDGS") export("rDiggleGratton") export("rdpp") export("reach") export("reach.detpointprocfamily") export("reach.dppm") export("reach.fii") export("reach.interact") export("reach.ppm") export("reach.rmhmodel") export("rebadge.as.crossfun") export("rebadge.as.dotfun") export("rebadge.fv") export("rebound") export("rebound.im") export("rebound.owin") export("rebound.ppp") export("rebound.psp") export("recognise.spatstat.type") export("reconcile.fv") export("rectquadrat.breaks") export("rectquadrat.countEngine") export("redraw.simplepanel") export("reduced.sample") export("reduceformula") export("reflect") export("reflect.default") export("reflect.im") export("reflect.infline") export("reflect.layered") export("reflect.tess") export("regularpolygon") export("reheat") export("reincarnate.interact") export("RelevantDeviation") export("RelevantEmpty") export("RelevantZero") export("relevel.im") export("relevel.ppp") export("relevel.ppx") export("reload.or.compute") export("relrisk") export("relrisk.ppm") export("relrisk.ppp") export("rename.fv") export("repair.image.xycoords") export("repair.old.factor.image") export("replacementIndex") export("representativeRows") export("requireversion") export("resampleNetworkDataFrame") export("rescale") export("rescale.im") export("rescale.layered") export("rescale.linnet") export("rescale.lpp") export("rescale.owin") export("rescale.ppp") export("rescale.psp") export("rescale.units") export("rescue.rectangle") export("reset.spatstat.options") export("resid1panel") export("resid1plot") export("resid4plot") export("residuals.dppm") export("residuals.kppm") export("residuals.mppm") export("residuals.ppm") export("resolve.2D.kernel") export("resolveEinfo") export("resolve.vargamma.shape") export("restrict.mask") export("reversePolyclipArgs") export("rex") export("rGaussPoisson") export("rgb2hex") export("rgb2hsva") export("rgbim") export("rgbNA") export("rHardcore") export("rho2hat") export("rhohat") export("rhohatCalc") export("rhohatEngine") export("rhohat.lpp") export("rhohat.lppm") export("rhohat.ppm") export("rhohat.ppp") export("rhohat.quad") export("ripras") export("rjitter") export("rkernel") export("rknn") export("rlabel") export("rLGCP") export("rlinegrid") export("rlpp") export("rMatClust") export("rMaternI") export("rMaternII") export("rMaternInhibition") export("rmax.Rigid") export("rmax.Ripley") export("rmax.rule") export("rmax.Trans") export("rmh") export("rmhcontrol") export("rmhcontrol.default") export("rmhcontrol.list") export("rmhcontrol.rmhcontrol") export("rmh.default") export("rmhEngine") export("rmhexpand") export("RmhExpandRule") export("rmhmodel") export("rmhmodel.default") export("rmhmodel.list") export("rmhmodel.ppm") export("rmhmodel.rmhmodel") export("rmh.ppm") export("rmhResolveControl") export("rmhResolveExpansion") export("rmhResolveTypes") export("rmhsnoop") export("rmhSnoopEnv") export("rmhstart") export("rmhstart.default") export("rmhstart.list") export("rmhstart.rmhstart") export("rmixpois") export("rMosaicField") export("rMosaicSet") export("rmpoint") export("rmpoint.I.allim") export("rmpoispp") export("rNeymanScott") export("rnoise") export("roc") export("rocData") export("roc.kppm") export("roc.lpp") export("roc.lppm") export("rocModel") export("roc.ppm") export("roc.ppp") export("rose") export("roseContinuous") export("rose.default") export("rose.density") export("rose.fv") export("rose.histogram") export("rotate") export("rotate.im") export("rotate.infline") export("rotate.layered") export("rotate.linnet") export("rotate.lpp") export("rotate.owin") export("rotate.ppp") export("rotate.psp") export("rotate.tess") export("rotmean") export("rotxy") export("rotxypolygon") export("rounding") export("rounding.default") export("rounding.pp3") export("rounding.ppp") export("rounding.ppx") export("round.pp3") export("round.ppp") export("round.ppx") export("row.names<-.hyperframe") export("row.names.hyperframe") export("rPenttinen") export("rpoint") export("rpoint.multi") export("rpoisline") export("rpoislinetess") export("rpoislpp") export("rpoispp") export("rpoispp3") export("rpoisppOnLines") export("rpoisppx") export("rPoissonCluster") export("rppm") export("rQuasi") export("rshift") export("rshift.ppp") export("rshift.psp") export("rshift.splitppp") export("rSSI") export("rstrat") export("rStrauss") export("rStraussHard") export("rsyst") export("rtemper") export("rthin") export("rThomas") export("rtoro") export("ruletextline") export("runifdisc") export("runiflpp") export("runifpoint") export("runifpoint3") export("runifpointOnLines") export("runifpointx") export("runifpoispp") export("runifpoisppOnLines") export("runifrect") export("run.simplepanel") export("rVarGamma") export("safedeldir") export("safelookup") export("samecolour") export("SatPiece") export("Saturated") export("scalardilate") export("scalardilate.breakpts") export("scalardilate.default") export("scalardilate.diagramobj") export("scalardilate.im") export("scalardilate.layered") export("scalardilate.linim") export("scalardilate.linnet") export("scalardilate.lpp") export("scalardilate.msr") export("scalardilate.owin") export("scalardilate.ppp") export("scalardilate.psp") export("scalardilate.tess") export("scaletointerval") export("scaletointerval.default") export("scaletointerval.im") export("scanBinomLRTS") export("scanLRTS") export("scanmeasure") export("scanmeasure.im") export("scanmeasure.ppp") export("scanPoisLRTS") export("scanpp") export("scan.test") export("sdr") export("sdrPredict") export("second.moment.calc") export("second.moment.engine") export("segregation.test") export("segregation.test.ppp") export("selfcrossing.psp") export("selfcut.psp") export("sessionLibs") export("setcov") export("setmarks") export("setminus.owin") export("sewpcf") export("sewsmod") export("sharpen") export("sharpen.ppp") export("shift") export("shift.diagramobj") export("shift.im") export("shift.infline") export("shift.influence.ppm") export("shift.layered") export("shift.leverage.ppm") export("shift.linim") export("shift.linnet") export("shift.lpp") export("shift.msr") export("shift.owin") export("shift.ppp") export("shift.psp") export("shift.quadratcount") export("shift.quadrattest") export("shift.tess") export("shiftxy") export("shiftxypolygon") export("shortside") export("shortside.box3") export("shortside.boxx") export("shortside.owin") export("sidelengths") export("sidelengths.box3") export("sidelengths.boxx") export("sidelengths.owin") export("signalStatus") export("simplepanel") export("simplify.owin") export("simulate.detpointprocfamily") export("simulate.dppm") export("simulate.kppm") export("simulate.lppm") export("simulate.mppm") export("simulate.ppm") export("simulate.profilepl") export("simulate.rhohat") export("simulate.slrm") export("simulrecipe") export("slrAssemblePixelData") export("slrm") export("slr.prepare") export("Smooth") export("smoothcrossEngine") export("Smoothfun") export("Smoothfun.ppp") export("smooth.fv") export("Smooth.fv") export("Smooth.im") export("smooth.msr") export("Smooth.msr") export("smoothpointsEngine") export("smooth.ppp") export("Smooth.ppp") export("Smooth.solist") export("Smooth.ssf") export("Softcore") export("solapply") export("[<-.solist") export("[.solist") export("solist") export("solutionset") export("sortalongsegment") export("sort.im") export("[<-.sparse3Darray") export("[.sparse3Darray") export("sparse3Darray") export("SparseEntries") export("SparseIndices") export("sparseVectorCumul") export("spatdim") export("spatialcdf") export("spatialCDFframe") export("spatialCDFtest") export("spatstatClusterModelInfo") export("spatstatDiagnostic") export("spatstatDPPModelInfo") export("spatstat.options") export("spatstatRmhInfo") export("spatstat.xy.coords") export("sp.foundclass") export("sp.foundclasses") export("sphere.volume") export("splitHybridInteraction") export("split<-.hyperframe") export("split.hyperframe") export("split.im") export("split.msr") export("[<-.splitppp") export("[.splitppp") export("split<-.ppp") export("split.ppp") export("[<-.splitppx") export("[.splitppx") export("split.ppx") export("spokes") export("square") export("[.ssf") export("ssf") export("stieltjes") export("stienen") export("stienenSet") export("store.versionstring.spatstat") export("stratrand") export("Strauss") export("strausscounts") export("StraussHard") export("str.hyperframe") export("studpermu.test") export("subfits") export("subfits.new") export("subfits.old") export("subset.hyperframe") export("subset.lpp") export("subset.pp3") export("subset.ppp") export("subset.ppx") export("subspaceDistance") export("suffloc") export("suffstat") export("suffstat.generic") export("suffstat.poisson") export("summarise.trend") export("summary.anylist") export("summary.envelope") export("summary.fii") export("summary.funxy") export("summary.hyperframe") export("summary.im") export("Summary.im") export("Summary.imlist") export("summary.kppm") export("summary.linfun") export("summary.linim") export("Summary.linim") export("summary.linnet") export("summary.lintess") export("summary.listof") export("summary.logiquad") export("summary.lpp") export("summary.lppm") export("summary.lut") export("summary.mppm") export("summary.owin") export("summary.pp3") export("summary.ppm") export("summary.ppp") export("summary.pppmatching") export("summary.ppx") export("summary.profilepl") export("summary.psp") export("summary.quad") export("summary.rmhexpand") export("summary.solist") export("Summary.sparse3Darray") export("summary.splitppp") export("summary.splitppx") export("summary.units") export("summary.vblogit") export("sumouter") export("sumsymouter") export("sumsymouterSparse") export("superimpose") export("superimpose.default") export("superimpose.lpp") export("superimposeMarks") export("superimpose.ppp") export("superimpose.ppplist") export("superimpose.psp") export("superimposePSP") export("superimpose.splitppp") export("symbolmap") export("symbolmaptype") export("tail.hyperframe") export("tail.ppp") export("tail.ppx") export("tail.psp") export("tail.tess") export("tenseur") export("tensor1x1") export("terms.dppm") export("terms.kppm") export("terms.lppm") export("terms.mppm") export("terms.ppm") export("terms.slrm") export("[<-.tess") export("[.tess") export("tess") export("test.crossing.psp") export("test.selfcrossing.psp") export("text.lpp") export("text.ppp") export("text.psp") export("textstring") export("texturemap") export("textureplot") export("thinjump") export("thinNetwork") export("thomas.estK") export("thomas.estpcf") export("tile.areas") export("tilecentroids") export("tileindex") export("tile.lengths") export("tilenames<-") export("tilenames") export("tiles") export("tiles.empty") export("timed") export("timeTaken") export("to.grey") export("to.opaque") export("totalVariation") export("to.transparent") export("transect.im") export("transmat") export("treebranchlabels") export("treeprune") export("trianglediameters") export("triangulate.owin") export("trim.mask") export("trim.rectangle") export("triplet.family") export("Triplets") export("Tstat") export("tweak.coefs") export("tweak.colourmap") export("tweak.fv.entry") export("tweak.ratfv.entry") export("twostage.test") export("unionOfSparseIndices") export("union.owin") export("union.quad") export("unique.ppp") export("unique.ppx") export("unitname<-") export("unitname") export("unitname<-.box3") export("unitname.box3") export("unitname<-.boxx") export("unitname.boxx") export("unitname<-.default") export("unitname.default") export("unitname<-.dppm") export("unitname.dppm") export("unitname<-.im") export("unitname.im") export("unitname<-.kppm") export("unitname.kppm") export("unitname<-.linnet") export("unitname.linnet") export("unitname<-.lpp") export("unitname.lpp") export("unitname<-.minconfit") export("unitname.minconfit") export("unitname<-.owin") export("unitname.owin") export("unitname<-.pp3") export("unitname.pp3") export("unitname<-.ppm") export("unitname.ppm") export("unitname<-.ppp") export("unitname.ppp") export("unitname<-.ppx") export("unitname.ppx") export("unitname<-.psp") export("unitname.psp") export("unitname<-.quad") export("unitname.quad") export("unitname<-.slrm") export("unitname.slrm") export("unitname<-.tess") export("unitname.tess") export("unit.square") export("unmark") export("unmark.lpp") export("unmark.ppp") export("unmark.ppx") export("unmark.psp") export("unmark.splitppp") export("unmark.ssf") export("unmark.tess") export("unnormdensity") export("unstackFilter") export("unstack.layered") export("unstack.lpp") export("unstack.msr") export("unstack.ppp") export("unstack.psp") export("unstack.solist") export("update.detpointprocfamily") export("update.im") export("update.interact") export("update.ippm") export("update.kppm") export("update.lppm") export("update.ppm") export("update.rmhcontrol") export("update.rmhstart") export("update.slrm") export("update.symbolmap") export("valid") export("validate2Dkernel") export("validate.angles") export("validate.lpp.coords") export("validate.mask") export("validate.quad") export("validate.weights") export("valid.detpointprocfamily") export("valid.lppm") export("valid.ppm") export("validradius") export("vanilla.fv") export("varblock") export("varcount") export("varcountEngine") export("vargamma.estK") export("vargamma.estpcf") export("vcov.kppm") export("vcov.lppm") export("vcov.mppm") export("vcov.ppm") export("vcov.slrm") export("vdCorput") export("verifyclass") export("versionstring.interact") export("versionstring.ppm") export("versionstring.spatstat") export("vertexdegree") export("vertices") export("vertices.linnet") export("vertices.owin") export("Vmark") export("vnnFind") export("volume") export("volume.box3") export("volume.boxx") export("volume.linnet") export("volume.owin") export("warn.once") export("waxlyrical") export("weighted.median") export("weighted.quantile") export("weighted.var") export("where.max") export("where.min") export("whichhalfplane") export("which.max.im") export("whist") export("will.expand") export("Window<-") export("Window") export("Window.distfun") export("Window.dppm") export("Window.funxy") export("Window<-.im") export("Window.im") export("Window.influence.ppm") export("Window.kppm") export("Window.layered") export("Window.leverage.ppm") export("Window<-.linnet") export("Window.linnet") export("Window.lintess") export("Window<-.lpp") export("Window.lpp") export("Window.lppm") export("Window.msr") export("Window.nnfun") export("Window.ppm") export("Window<-.ppp") export("Window.ppp") export("Window<-.psp") export("Window.psp") export("Window.quad") export("Window.quadratcount") export("Window.quadrattest") export("Window.rmhmodel") export("windows.mppm") export("Window.tess") export("with.fv") export("with.hyperframe") export("with.msr") export("with.ssf") export("w.quad") export("X2testEngine") export("x.quad") export("xtfrm.im") export("xy.grid") export("xypolygon2psp") export("xypolyselfint") export("yardstick") export("y.quad") export("zapsmall.im") export("zclustermodel") # ....... Special cases ........... export("%(-)%") export("%(+)%") export("%mapp%") export("%mark%") export("%mrep%") export("%msub%") export("%unit%") S3method("Complex", "im") S3method("Complex", "imlist") S3method("Complex", "linim") S3method("Math", "im") S3method("Math", "imlist") S3method("Math", "linim") S3method("Math", "sparse3Darray") S3method("mean", "im") S3method("median", "im") S3method("Ops", "im") S3method("Ops", "imlist") S3method("Ops", "linim") S3method("Ops", "msr") S3method("Ops", "sparse3Darray") S3method("Summary", "im") S3method("Summary", "imlist") S3method("Summary", "linim") S3method("Summary", "sparse3Darray") # ....... End of special cases ... # ......................................... # Automatically generated list of S3 methods # ......................................... S3method("affine", "im") S3method("affine", "layered") S3method("affine", "linim") S3method("affine", "linnet") S3method("affine", "lpp") S3method("affine", "owin") S3method("affine", "ppp") S3method("affine", "psp") S3method("affine", "tess") S3method("AIC", "dppm") S3method("AIC", "kppm") S3method("AIC", "mppm") S3method("AIC", "ppm") S3method("anova", "lppm") S3method("anova", "mppm") S3method("anova", "ppm") S3method("anova", "slrm") S3method("anyDuplicated", "ppp") S3method("anyDuplicated", "ppx") S3method("[", "anylist") S3method("anyNA", "im") S3method("anyNA", "sparse3Darray") S3method("aperm", "sparse3Darray") S3method("area", "default") S3method("area", "owin") S3method("as.array", "im") S3method("as.array", "sparse3Darray") S3method("as.character", "units") S3method("as.data.frame", "bw.optim") S3method("as.data.frame", "envelope") S3method("as.data.frame", "fv") S3method("as.data.frame", "hyperframe") S3method("as.data.frame", "im") S3method("as.data.frame", "linfun") S3method("as.data.frame", "linim") S3method("as.data.frame", "owin") S3method("as.data.frame", "ppp") S3method("as.data.frame", "ppx") S3method("as.data.frame", "psp") S3method("as.data.frame", "tess") S3method("as.double", "im") S3method("as.function", "fv") S3method("as.function", "im") S3method("as.function", "leverage.ppm") S3method("as.function", "linfun") S3method("as.function", "owin") S3method("as.function", "rhohat") S3method("as.function", "ssf") S3method("as.function", "tess") S3method("as.fv", "bw.optim") S3method("as.fv", "data.frame") S3method("as.fv", "dppm") S3method("as.fv", "fasp") S3method("as.fv", "fv") S3method("as.fv", "kppm") S3method("as.fv", "matrix") S3method("as.fv", "minconfit") S3method("as.hyperframe", "anylist") S3method("as.hyperframe", "data.frame") S3method("as.hyperframe", "default") S3method("as.hyperframe", "hyperframe") S3method("as.hyperframe", "listof") S3method("as.hyperframe", "ppx") S3method("as.im", "data.frame") S3method("as.im", "default") S3method("as.im", "distfun") S3method("as.im", "function") S3method("as.im", "funxy") S3method("as.im", "im") S3method("as.im", "leverage.ppm") S3method("as.im", "linim") S3method("as.im", "matrix") S3method("as.im", "nnfun") S3method("as.im", "owin") S3method("as.im", "ppp") S3method("as.im", "scan.test") S3method("as.im", "Smoothfun") S3method("as.im", "ssf") S3method("as.im", "tess") S3method("as.interact", "fii") S3method("as.interact", "interact") S3method("as.interact", "ppm") S3method("as.layered", "default") S3method("as.layered", "listof") S3method("as.layered", "msr") S3method("as.layered", "ppp") S3method("as.layered", "solist") S3method("as.layered", "splitppp") S3method("as.linfun", "linfun") S3method("as.linfun", "linim") S3method("as.linfun", "lintess") S3method("as.linim", "default") S3method("as.linim", "linfun") S3method("as.linim", "linim") S3method("as.linnet", "linfun") S3method("as.linnet", "linim") S3method("as.linnet", "linnet") S3method("as.linnet", "lintess") S3method("as.linnet", "lpp") S3method("as.linnet", "lppm") S3method("as.linnet", "psp") S3method("as.list", "hyperframe") S3method("as.matrix", "im") S3method("as.matrix", "owin") S3method("as.matrix", "ppx") S3method("as.owin", "boxx") S3method("as.owin", "data.frame") S3method("as.owin", "default") S3method("as.owin", "distfun") S3method("as.owin", "dppm") S3method("as.owin", "funxy") S3method("as.owin", "im") S3method("as.owin", "influence.ppm") S3method("as.owin", "kppm") S3method("as.owin", "layered") S3method("as.owin", "leverage.ppm") S3method("as.owin", "linfun") S3method("as.owin", "linnet") S3method("as.owin", "lintess") S3method("as.owin", "lpp") S3method("as.owin", "lppm") S3method("as.owin", "msr") S3method("as.owin", "nnfun") S3method("as.owin", "owin") S3method("as.owin", "ppm") S3method("as.owin", "ppp") S3method("as.owin", "psp") S3method("as.owin", "quad") S3method("as.owin", "quadratcount") S3method("as.owin", "quadrattest") S3method("as.owin", "rmhmodel") S3method("as.owin", "tess") S3method("as.ppm", "dppm") S3method("as.ppm", "kppm") S3method("as.ppm", "lppm") S3method("as.ppm", "ppm") S3method("as.ppm", "profilepl") S3method("as.ppm", "rppm") S3method("as.ppp", "data.frame") S3method("as.ppp", "default") S3method("as.ppp", "influence.ppm") S3method("as.ppp", "lpp") S3method("as.ppp", "matrix") S3method("as.ppp", "ppp") S3method("as.ppp", "psp") S3method("as.ppp", "quad") S3method("as.ppp", "ssf") S3method("as.psp", "data.frame") S3method("as.psp", "default") S3method("as.psp", "linnet") S3method("as.psp", "lpp") S3method("as.psp", "matrix") S3method("as.psp", "owin") S3method("as.psp", "psp") S3method("as.tess", "im") S3method("as.tess", "list") S3method("as.tess", "owin") S3method("as.tess", "quadratcount") S3method("as.tess", "quadrattest") S3method("as.tess", "tess") S3method("auc", "kppm") S3method("auc", "lpp") S3method("auc", "lppm") S3method("auc", "ppm") S3method("auc", "ppp") S3method("bc", "ppm") S3method("berman.test", "lpp") S3method("berman.test", "lppm") S3method("berman.test", "ppm") S3method("berman.test", "ppp") S3method("boundingbox", "default") S3method("boundingbox", "im") S3method("boundingbox", "owin") S3method("boundingbox", "ppp") S3method("boundingbox", "solist") S3method("boundingcentre", "owin") S3method("boundingcentre", "ppp") S3method("boundingcircle", "owin") S3method("boundingcircle", "ppp") S3method("boundingradius", "linnet") S3method("boundingradius", "owin") S3method("boundingradius", "ppp") S3method("by", "im") S3method("by", "ppp") S3method("cbind", "fv") S3method("cbind", "hyperframe") S3method("CDF", "density") S3method("cdf.test", "lpp") S3method("cdf.test", "lppm") S3method("cdf.test", "mppm") S3method("cdf.test", "ppm") S3method("cdf.test", "ppp") S3method("cdf.test", "slrm") S3method("circumradius", "linnet") S3method("circumradius", "owin") S3method("circumradius", "ppp") S3method("closepairs", "pp3") S3method("closepairs", "ppp") S3method("closing", "owin") S3method("closing", "ppp") S3method("closing", "psp") S3method("clusterfield", "character") S3method("clusterfield", "function") S3method("clusterfield", "kppm") S3method("clusterkernel", "character") S3method("clusterkernel", "kppm") S3method("clusterradius", "character") S3method("clusterradius", "kppm") S3method("coef", "dppm") S3method("coef", "fii") S3method("coef", "kppm") S3method("coef", "lppm") S3method("coef", "mppm") S3method("coef", "ppm") S3method("coef", "slrm") S3method("coef", "summary.fii") S3method("coef", "summary.kppm") S3method("coef", "summary.ppm") S3method("coef", "vblogit") S3method("collapse", "anylist") S3method("collapse", "fv") S3method("compatible", "fasp") S3method("compatible", "fv") S3method("compatible", "im") S3method("compatible", "rat") S3method("compatible", "units") S3method("connected", "im") S3method("connected", "linnet") S3method("connected", "lpp") S3method("connected", "owin") S3method("connected", "ppp") S3method("contour", "funxy") S3method("contour", "im") S3method("contour", "imlist") S3method("contour", "listof") S3method("contour", "objsurf") S3method("contour", "ssf") S3method("coords", "ppp") S3method("coords", "ppx") S3method("crossdist", "default") S3method("crossdist", "lpp") S3method("crossdist", "pp3") S3method("crossdist", "ppp") S3method("crossdist", "ppx") S3method("crossdist", "psp") S3method("crosspairs", "pp3") S3method("crosspairs", "ppp") S3method("cut", "im") S3method("cut", "lpp") S3method("cut", "ppp") S3method("deletebranch", "linnet") S3method("deletebranch", "lpp") S3method("density", "lpp") S3method("density", "ppp") S3method("density", "ppplist") S3method("density", "psp") S3method("density", "splitppp") S3method("density", "splitppx") S3method("deriv", "fv") S3method("deviance", "lppm") S3method("deviance", "ppm") S3method("dfbetas", "ppm") S3method("[", "diagramobj") S3method("diameter", "box3") S3method("diameter", "boxx") S3method("diameter", "linnet") S3method("diameter", "owin") S3method("dilation", "owin") S3method("dilation", "ppp") S3method("dilation", "psp") S3method("dim", "detpointprocfamily") S3method("dim", "fasp") S3method("dim", "hyperframe") S3method("dim", "im") S3method("dim", "msr") S3method("dimnames", "fasp") S3method("dimnames", "msr") S3method("dimnames", "sparse3Darray") S3method("dim", "owin") S3method("dim", "sparse3Darray") S3method("distfun", "lpp") S3method("distfun", "owin") S3method("distfun", "ppp") S3method("distfun", "psp") S3method("distmap", "owin") S3method("distmap", "ppp") S3method("distmap", "psp") S3method("domain", "distfun") S3method("domain", "dppm") S3method("domain", "funxy") S3method("domain", "im") S3method("domain", "im") S3method("domain", "influence.ppm") S3method("domain", "kppm") S3method("domain", "layered") S3method("domain", "leverage.ppm") S3method("domain", "linfun") S3method("domain", "lintess") S3method("domain", "lpp") S3method("domain", "lpp") S3method("domain", "lppm") S3method("domain", "msr") S3method("domain", "nnfun") S3method("domain", "pp3") S3method("domain", "ppm") S3method("domain", "ppp") S3method("domain", "ppx") S3method("domain", "psp") S3method("domain", "quad") S3method("domain", "quadratcount") S3method("domain", "quadrattest") S3method("domain", "rmhmodel") S3method("domain", "tess") S3method("duplicated", "ppp") S3method("duplicated", "ppx") S3method("edit", "hyperframe") S3method("edit", "im") S3method("edit", "ppp") S3method("edit", "psp") S3method("emend", "lppm") S3method("emend", "ppm") S3method("envelope", "envelope") S3method("envelope", "hasenvelope") S3method("envelope", "kppm") S3method("envelope", "lpp") S3method("envelope", "lppm") S3method("envelope", "matrix") S3method("envelope", "pp3") S3method("envelope", "ppm") S3method("envelope", "ppp") S3method("eroded.volumes", "box3") S3method("eroded.volumes", "boxx") S3method("erosion", "owin") S3method("erosion", "ppp") S3method("erosion", "psp") S3method("evalCovar", "lppm") S3method("evalCovar", "ppm") S3method("extractAIC", "dppm") S3method("extractAIC", "kppm") S3method("extractAIC", "lppm") S3method("extractAIC", "mppm") S3method("extractAIC", "ppm") S3method("extractAIC", "slrm") S3method("extractbranch", "linnet") S3method("extractbranch", "lpp") S3method("family", "vblogit") S3method("fardist", "owin") S3method("fardist", "ppp") S3method("[", "fasp") S3method("fitin", "ppm") S3method("fitin", "profilepl") S3method("fitted", "dppm") S3method("fitted", "kppm") S3method("fitted", "lppm") S3method("fitted", "mppm") S3method("fitted", "ppm") S3method("fitted", "rppm") S3method("fitted", "slrm") S3method("fixef", "mppm") S3method("flipxy", "im") S3method("flipxy", "infline") S3method("flipxy", "layered") S3method("flipxy", "owin") S3method("flipxy", "ppp") S3method("flipxy", "psp") S3method("format", "numberwithunit") S3method("formula", "dppm") S3method("formula", "fv") S3method("formula", "kppm") S3method("formula", "lppm") S3method("formula", "ppm") S3method("formula", "slrm") S3method("Frame", "default") S3method("[", "fv") S3method("getCall", "mppm") S3method("harmonise", "fv") S3method("harmonise", "im") S3method("harmonise", "msr") S3method("harmonise", "owin") S3method("harmonize", "fv") S3method("harmonize", "im") S3method("harmonize", "owin") S3method("has.close", "default") S3method("has.close", "pp3") S3method("has.close", "ppp") S3method("head", "hyperframe") S3method("head", "ppp") S3method("head", "ppx") S3method("head", "psp") S3method("head", "tess") S3method("hist", "funxy") S3method("hist", "im") S3method("[", "hyperframe") S3method("$", "hyperframe") S3method("identify", "lpp") S3method("identify", "ppp") S3method("identify", "psp") S3method("[", "im") S3method("image", "im") S3method("image", "imlist") S3method("image", "listof") S3method("image", "objsurf") S3method("image", "ssf") S3method("[", "influence.ppm") S3method("influence", "ppm") S3method("integral", "im") S3method("integral", "linfun") S3method("integral", "linim") S3method("integral", "msr") S3method("integral", "ssf") S3method("intensity", "detpointprocfamily") S3method("intensity", "dppm") S3method("intensity", "lpp") S3method("intensity", "ppm") S3method("intensity", "ppp") S3method("intensity", "ppx") S3method("intensity", "quadratcount") S3method("intensity", "splitppp") S3method("iplot", "default") S3method("iplot", "layered") S3method("iplot", "linnet") S3method("iplot", "lpp") S3method("iplot", "ppp") S3method("is.connected", "default") S3method("is.connected", "linnet") S3method("is.connected", "ppp") S3method("is.empty", "default") S3method("is.empty", "owin") S3method("is.empty", "ppp") S3method("is.empty", "psp") S3method("is.expandable", "ppm") S3method("is.expandable", "rmhmodel") S3method("is.hybrid", "interact") S3method("is.hybrid", "ppm") S3method("is.marked", "default") S3method("is.marked", "lppm") S3method("is.marked", "msr") S3method("is.marked", "ppm") S3method("is.marked", "ppp") S3method("is.marked", "psp") S3method("is.marked", "quad") S3method("is.multitype", "default") S3method("is.multitype", "lpp") S3method("is.multitype", "lppm") S3method("is.multitype", "msr") S3method("is.multitype", "ppm") S3method("is.multitype", "ppp") S3method("is.multitype", "quad") S3method("is.poisson", "interact") S3method("is.poisson", "kppm") S3method("is.poisson", "lppm") S3method("is.poisson", "mppm") S3method("is.poisson", "ppm") S3method("is.poisson", "rmhmodel") S3method("is.poisson", "slrm") S3method("is.stationary", "detpointprocfamily") S3method("is.stationary", "dppm") S3method("is.stationary", "kppm") S3method("is.stationary", "lppm") S3method("is.stationary", "ppm") S3method("is.stationary", "rmhmodel") S3method("is.stationary", "slrm") S3method("Kmodel", "detpointprocfamily") S3method("Kmodel", "dppm") S3method("Kmodel", "kppm") S3method("Kmodel", "ppm") S3method("kppm", "formula") S3method("kppm", "ppp") S3method("kppm", "quad") S3method("labels", "dppm") S3method("labels", "kppm") S3method("labels", "ppm") S3method("labels", "slrm") S3method("[", "layered") S3method("levels", "im") S3method("[", "leverage.ppm") S3method("leverage", "ppm") S3method("[", "linim") S3method("[", "linnet") S3method("[", "localpcfmatrix") S3method("logLik", "dppm") S3method("logLik", "kppm") S3method("logLik", "lppm") S3method("logLik", "mppm") S3method("logLik", "ppm") S3method("logLik", "slrm") S3method("logLik", "vblogit") S3method("[", "lpp") S3method("lppm", "formula") S3method("lppm", "lpp") S3method("markformat", "default") S3method("markformat", "ppp") S3method("markformat", "ppx") S3method("markformat", "psp") S3method("marks", "default") S3method("marks", "ppp") S3method("marks", "ppx") S3method("marks", "psp") S3method("marks", "quad") S3method("marks", "ssf") S3method("marks", "tess") S3method("max", "fv") S3method("max", "ssf") S3method("mean", "linim") S3method("median", "linim") S3method("min", "fv") S3method("min", "ssf") S3method("model.frame", "dppm") S3method("model.frame", "kppm") S3method("model.frame", "lppm") S3method("model.frame", "ppm") S3method("model.images", "dppm") S3method("model.images", "kppm") S3method("model.images", "lppm") S3method("model.images", "ppm") S3method("model.images", "slrm") S3method("model.matrix", "dppm") S3method("model.matrix", "ippm") S3method("model.matrix", "kppm") S3method("model.matrix", "lppm") S3method("model.matrix", "ppm") S3method("model.matrix", "slrm") S3method("[", "msr") S3method("multiplicity", "data.frame") S3method("multiplicity", "default") S3method("multiplicity", "ppp") S3method("multiplicity", "ppx") S3method("names", "hyperframe") S3method("nnclean", "pp3") S3method("nnclean", "ppp") S3method("nncross", "default") S3method("nncross", "lpp") S3method("nncross", "pp3") S3method("nncross", "ppp") S3method("nndensity", "ppp") S3method("nndist", "default") S3method("nndist", "lpp") S3method("nndist", "pp3") S3method("nndist", "ppp") S3method("nndist", "ppx") S3method("nndist", "psp") S3method("nnfun", "lpp") S3method("nnfun", "ppp") S3method("nnfun", "psp") S3method("nnwhich", "default") S3method("nnwhich", "lpp") S3method("nnwhich", "pp3") S3method("nnwhich", "ppp") S3method("nnwhich", "ppx") S3method("nobjects", "ppp") S3method("nobjects", "ppx") S3method("nobjects", "psp") S3method("nobjects", "tess") S3method("nobs", "dppm") S3method("nobs", "kppm") S3method("nobs", "lppm") S3method("nobs", "mppm") S3method("nobs", "ppm") S3method("npoints", "pp3") S3method("npoints", "ppp") S3method("npoints", "ppx") S3method("nsegments", "linnet") S3method("nsegments", "lpp") S3method("nsegments", "psp") S3method("nvertices", "default") S3method("nvertices", "linnet") S3method("nvertices", "owin") S3method("objsurf", "dppm") S3method("objsurf", "kppm") S3method("objsurf", "minconfit") S3method("opening", "owin") S3method("opening", "ppp") S3method("opening", "psp") S3method("[", "owin") S3method("pairdist", "default") S3method("pairdist", "lpp") S3method("pairdist", "pp3") S3method("pairdist", "ppp") S3method("pairdist", "ppx") S3method("pairdist", "psp") S3method("pairs", "im") S3method("pairs", "linim") S3method("pairs", "listof") S3method("pairs", "solist") S3method("parameters", "dppm") S3method("parameters", "fii") S3method("parameters", "interact") S3method("parameters", "kppm") S3method("parameters", "ppm") S3method("parameters", "profilepl") S3method("pcf", "fasp") S3method("pcf", "fv") S3method("pcfmodel", "detpointprocfamily") S3method("pcfmodel", "dppm") S3method("pcfmodel", "kppm") S3method("pcfmodel", "ppm") S3method("pcfmodel", "zclustermodel") S3method("pcf", "ppp") S3method("periodify", "owin") S3method("periodify", "ppp") S3method("periodify", "psp") S3method("persp", "funxy") S3method("persp", "im") S3method("persp", "leverage.ppm") S3method("persp", "objsurf") S3method("pixellate", "linnet") S3method("pixellate", "owin") S3method("pixellate", "ppp") S3method("pixellate", "psp") S3method("plot", "addvar") S3method("plot", "anylist") S3method("plot", "barplotdata") S3method("plot", "bermantest") S3method("plot", "bw.frac") S3method("plot", "bw.optim") S3method("plot", "cdftest") S3method("plot", "colourmap") S3method("plot", "diagppm") S3method("plot", "dppm") S3method("plot", "envelope") S3method("plot", "fasp") S3method("plot", "fii") S3method("plot", "foo") S3method("plot", "funxy") S3method("plot", "fv") S3method("plot", "hyperframe") S3method("plot", "im") S3method("plot", "imlist") S3method("plot", "infline") S3method("plot", "influence.ppm") S3method("plot", "kppm") S3method("plot", "kstest") S3method("plot", "laslett") S3method("plot", "layered") S3method("plot", "leverage.ppm") S3method("plot", "linfun") S3method("plot", "linim") S3method("plot", "linnet") S3method("plot", "lintess") S3method("plot", "listof") S3method("plot", "localpcfmatrix") S3method("plot", "lpp") S3method("plot", "lppm") S3method("plot", "lurk") S3method("plot", "minconfit") S3method("plot", "mppm") S3method("plot", "msr") S3method("plot", "objsurf") S3method("plot", "onearrow") S3method("plot", "owin") S3method("plot", "parres") S3method("plot", "plotpairsim") S3method("plot", "plotppm") S3method("plot", "pp3") S3method("plot", "ppm") S3method("plot", "ppp") S3method("plot", "pppmatching") S3method("plot", "ppx") S3method("plot", "profilepl") S3method("plot", "psp") S3method("plot", "qqppm") S3method("plot", "quad") S3method("plot", "quadratcount") S3method("plot", "quadrattest") S3method("plot", "rho2hat") S3method("plot", "rhohat") S3method("plot", "rppm") S3method("plot", "scan.test") S3method("plot", "slrm") S3method("plot", "solist") S3method("plot", "spatialcdf") S3method("plot", "splitppp") S3method("plot", "ssf") S3method("plot", "studpermutest") S3method("plot", "symbolmap") S3method("plot", "tess") S3method("plot", "textstring") S3method("plot", "texturemap") S3method("plot", "yardstick") S3method("points", "lpp") S3method("pool", "anylist") S3method("pool", "envelope") S3method("pool", "fasp") S3method("pool", "fv") S3method("pool", "quadrattest") S3method("pool", "rat") S3method("[", "pp3") S3method("ppm", "default") S3method("ppm", "formula") S3method("ppm", "ppp") S3method("ppm", "quad") S3method("[", "ppp") S3method("[", "ppx") S3method("predict", "dppm") S3method("predict", "kppm") S3method("predict", "lppm") S3method("predict", "mppm") S3method("predict", "ppm") S3method("predict", "profilepl") S3method("predict", "rho2hat") S3method("predict", "rhohat") S3method("predict", "rppm") S3method("predict", "slrm") S3method("predict", "vblogit") S3method("predict", "zclustermodel") S3method("print", "addvar") S3method("print", "anylist") S3method("print", "autoexec") S3method("print", "box3") S3method("print", "boxx") S3method("print", "bt.frame") S3method("print", "bugtable") S3method("print", "bw.frac") S3method("print", "bw.optim") S3method("print", "colourmap") S3method("print", "detpointprocfamily") S3method("print", "detpointprocfamilyfun") S3method("print", "diagppm") S3method("print", "distfun") S3method("print", "dppm") S3method("print", "envelope") S3method("print", "ewcdf") S3method("print", "fasp") S3method("print", "fii") S3method("print", "funxy") S3method("print", "fv") S3method("print", "fvfun") S3method("print", "hasenvelope") S3method("print", "hierarchicalordering") S3method("print", "hyperframe") S3method("print", "im") S3method("print", "indicfun") S3method("print", "infline") S3method("print", "influence.ppm") S3method("print", "interact") S3method("print", "intermaker") S3method("print", "isf") S3method("print", "kppm") S3method("print", "laslett") S3method("print", "layered") S3method("print", "leverage.ppm") S3method("print", "linfun") S3method("print", "linim") S3method("print", "linnet") S3method("print", "lintess") S3method("print", "localpcfmatrix") S3method("print", "lpp") S3method("print", "lppm") S3method("print", "lut") S3method("print", "minconfit") S3method("print", "mppm") S3method("print", "msr") S3method("print", "nnfun") S3method("print", "numberwithunit") S3method("print", "objsurf") S3method("print", "onearrow") S3method("print", "owin") S3method("print", "parres") S3method("print", "plotpairsim") S3method("print", "plotppm") S3method("print", "pp3") S3method("print", "ppm") S3method("print", "ppp") S3method("print", "pppmatching") S3method("print", "ppx") S3method("print", "profilepl") S3method("print", "psp") S3method("print", "qqppm") S3method("print", "quad") S3method("print", "quadrattest") S3method("print", "rat") S3method("print", "rho2hat") S3method("print", "rhohat") S3method("print", "rmhcontrol") S3method("print", "rmhexpand") S3method("print", "rmhInfoList") S3method("print", "rmhmodel") S3method("print", "rmhstart") S3method("print", "rppm") S3method("print", "simplepanel") S3method("print", "slrm") S3method("print", "Smoothfun") S3method("print", "solist") S3method("print", "sparse3Darray") S3method("print", "splitppp") S3method("print", "splitppx") S3method("print", "ssf") S3method("print", "summary.fii") S3method("print", "summary.hyperframe") S3method("print", "summary.im") S3method("print", "summary.kppm") S3method("print", "summary.linim") S3method("print", "summary.linnet") S3method("print", "summary.lintess") S3method("print", "summary.listof") S3method("print", "summary.logiquad") S3method("print", "summary.lpp") S3method("print", "summary.lut") S3method("print", "summary.mppm") S3method("print", "summary.owin") S3method("print", "summary.pp3") S3method("print", "summary.ppm") S3method("print", "summary.ppp") S3method("print", "summary.psp") S3method("print", "summary.quad") S3method("print", "summary.rmhexpand") S3method("print", "summary.solist") S3method("print", "summary.splitppp") S3method("print", "summary.splitppx") S3method("print", "summary.units") S3method("print", "symbolmap") S3method("print", "tess") S3method("print", "textstring") S3method("print", "texturemap") S3method("print", "timed") S3method("print", "units") S3method("print", "vblogit") S3method("print", "yardstick") S3method("print", "zclustermodel") S3method("prune", "rppm") S3method("pseudoR2", "lppm") S3method("pseudoR2", "ppm") S3method("psib", "kppm") S3method("[", "psp") S3method("[", "quad") S3method("quadratcount", "ppp") S3method("quadratcount", "splitppp") S3method("quadrat.test", "mppm") S3method("quadrat.test", "ppm") S3method("quadrat.test", "ppp") S3method("quadrat.test", "quadratcount") S3method("quadrat.test", "splitppp") S3method("quantess", "im") S3method("quantess", "owin") S3method("quantess", "ppp") S3method("quantile", "density") S3method("quantile", "ewcdf") S3method("quantile", "im") S3method("quantile", "linim") S3method("ranef", "mppm") S3method("range", "fv") S3method("range", "ssf") S3method("[", "rat") S3method("rbind", "hyperframe") S3method("reach", "detpointprocfamily") S3method("reach", "dppm") S3method("reach", "fii") S3method("reach", "interact") S3method("reach", "ppm") S3method("reach", "rmhmodel") S3method("rebound", "im") S3method("rebound", "owin") S3method("rebound", "ppp") S3method("rebound", "psp") S3method("reflect", "default") S3method("reflect", "im") S3method("reflect", "infline") S3method("reflect", "layered") S3method("reflect", "tess") S3method("relevel", "im") S3method("relevel", "ppp") S3method("relevel", "ppx") S3method("relrisk", "ppm") S3method("relrisk", "ppp") S3method("rescale", "im") S3method("rescale", "layered") S3method("rescale", "linnet") S3method("rescale", "lpp") S3method("rescale", "owin") S3method("rescale", "ppp") S3method("rescale", "psp") S3method("rescale", "units") S3method("residuals", "dppm") S3method("residuals", "kppm") S3method("residuals", "mppm") S3method("residuals", "ppm") S3method("rhohat", "lpp") S3method("rhohat", "lppm") S3method("rhohat", "ppm") S3method("rhohat", "ppp") S3method("rhohat", "quad") S3method("rmhcontrol", "default") S3method("rmhcontrol", "list") S3method("rmhcontrol", "rmhcontrol") S3method("rmh", "default") S3method("rmhmodel", "default") S3method("rmhmodel", "list") S3method("rmhmodel", "ppm") S3method("rmhmodel", "rmhmodel") S3method("rmh", "ppm") S3method("rmhstart", "default") S3method("rmhstart", "list") S3method("rmhstart", "rmhstart") S3method("roc", "kppm") S3method("roc", "lpp") S3method("roc", "lppm") S3method("roc", "ppm") S3method("roc", "ppp") S3method("rose", "default") S3method("rose", "density") S3method("rose", "fv") S3method("rose", "histogram") S3method("rotate", "im") S3method("rotate", "infline") S3method("rotate", "layered") S3method("rotate", "linnet") S3method("rotate", "lpp") S3method("rotate", "owin") S3method("rotate", "ppp") S3method("rotate", "psp") S3method("rotate", "tess") S3method("rounding", "default") S3method("rounding", "pp3") S3method("rounding", "ppp") S3method("rounding", "ppx") S3method("round", "pp3") S3method("round", "ppp") S3method("round", "ppx") S3method("row.names", "hyperframe") S3method("rshift", "ppp") S3method("rshift", "psp") S3method("rshift", "splitppp") S3method("scalardilate", "breakpts") S3method("scalardilate", "default") S3method("scalardilate", "diagramobj") S3method("scalardilate", "im") S3method("scalardilate", "layered") S3method("scalardilate", "linim") S3method("scalardilate", "linnet") S3method("scalardilate", "lpp") S3method("scalardilate", "msr") S3method("scalardilate", "owin") S3method("scalardilate", "ppp") S3method("scalardilate", "psp") S3method("scalardilate", "tess") S3method("scaletointerval", "default") S3method("scaletointerval", "im") S3method("scanmeasure", "im") S3method("scanmeasure", "ppp") S3method("segregation.test", "ppp") S3method("sharpen", "ppp") S3method("shift", "diagramobj") S3method("shift", "im") S3method("shift", "infline") S3method("shift", "influence.ppm") S3method("shift", "layered") S3method("shift", "leverage.ppm") S3method("shift", "linim") S3method("shift", "linnet") S3method("shift", "lpp") S3method("shift", "msr") S3method("shift", "owin") S3method("shift", "ppp") S3method("shift", "psp") S3method("shift", "quadratcount") S3method("shift", "quadrattest") S3method("shift", "tess") S3method("shortside", "box3") S3method("shortside", "boxx") S3method("shortside", "owin") S3method("sidelengths", "box3") S3method("sidelengths", "boxx") S3method("sidelengths", "owin") S3method("simulate", "detpointprocfamily") S3method("simulate", "dppm") S3method("simulate", "kppm") S3method("simulate", "lppm") S3method("simulate", "mppm") S3method("simulate", "ppm") S3method("simulate", "profilepl") S3method("simulate", "rhohat") S3method("simulate", "slrm") S3method("Smoothfun", "ppp") S3method("Smooth", "fv") S3method("Smooth", "im") S3method("Smooth", "msr") S3method("Smooth", "ppp") S3method("Smooth", "solist") S3method("Smooth", "ssf") S3method("[", "solist") S3method("sort", "im") S3method("[", "sparse3Darray") S3method("split", "hyperframe") S3method("split", "im") S3method("split", "msr") S3method("[", "splitppp") S3method("split", "ppp") S3method("split", "ppx") S3method("[", "splitppx") S3method("[", "ssf") S3method("str", "hyperframe") S3method("subset", "hyperframe") S3method("subset", "lpp") S3method("subset", "pp3") S3method("subset", "ppp") S3method("subset", "ppx") S3method("summary", "anylist") S3method("summary", "envelope") S3method("summary", "fii") S3method("summary", "funxy") S3method("summary", "hyperframe") S3method("summary", "im") S3method("summary", "kppm") S3method("summary", "linfun") S3method("summary", "linim") S3method("summary", "linnet") S3method("summary", "lintess") S3method("summary", "listof") S3method("summary", "logiquad") S3method("summary", "lpp") S3method("summary", "lppm") S3method("summary", "lut") S3method("summary", "mppm") S3method("summary", "owin") S3method("summary", "pp3") S3method("summary", "ppm") S3method("summary", "ppp") S3method("summary", "pppmatching") S3method("summary", "ppx") S3method("summary", "profilepl") S3method("summary", "psp") S3method("summary", "quad") S3method("summary", "rmhexpand") S3method("summary", "solist") S3method("summary", "splitppp") S3method("summary", "splitppx") S3method("summary", "units") S3method("summary", "vblogit") S3method("superimpose", "default") S3method("superimpose", "lpp") S3method("superimpose", "ppp") S3method("superimpose", "ppplist") S3method("superimpose", "psp") S3method("superimpose", "splitppp") S3method("tail", "hyperframe") S3method("tail", "ppp") S3method("tail", "ppx") S3method("tail", "psp") S3method("tail", "tess") S3method("terms", "dppm") S3method("terms", "kppm") S3method("terms", "lppm") S3method("terms", "mppm") S3method("terms", "ppm") S3method("terms", "slrm") S3method("[", "tess") S3method("text", "lpp") S3method("text", "ppp") S3method("text", "psp") S3method("unique", "ppp") S3method("unique", "ppx") S3method("unitname", "box3") S3method("unitname", "boxx") S3method("unitname", "default") S3method("unitname", "dppm") S3method("unitname", "im") S3method("unitname", "kppm") S3method("unitname", "linnet") S3method("unitname", "lpp") S3method("unitname", "minconfit") S3method("unitname", "owin") S3method("unitname", "pp3") S3method("unitname", "ppm") S3method("unitname", "ppp") S3method("unitname", "ppx") S3method("unitname", "psp") S3method("unitname", "quad") S3method("unitname", "slrm") S3method("unitname", "tess") S3method("unmark", "lpp") S3method("unmark", "ppp") S3method("unmark", "ppx") S3method("unmark", "psp") S3method("unmark", "splitppp") S3method("unmark", "ssf") S3method("unmark", "tess") S3method("unstack", "layered") S3method("unstack", "lpp") S3method("unstack", "msr") S3method("unstack", "ppp") S3method("unstack", "psp") S3method("unstack", "solist") S3method("update", "detpointprocfamily") S3method("update", "im") S3method("update", "interact") S3method("update", "ippm") S3method("update", "kppm") S3method("update", "lppm") S3method("update", "ppm") S3method("update", "rmhcontrol") S3method("update", "rmhstart") S3method("update", "slrm") S3method("update", "symbolmap") S3method("valid", "detpointprocfamily") S3method("valid", "lppm") S3method("valid", "ppm") S3method("vcov", "kppm") S3method("vcov", "lppm") S3method("vcov", "mppm") S3method("vcov", "ppm") S3method("vcov", "slrm") S3method("vertices", "linnet") S3method("vertices", "owin") S3method("volume", "box3") S3method("volume", "boxx") S3method("volume", "linnet") S3method("volume", "owin") S3method("Window", "distfun") S3method("Window", "dppm") S3method("Window", "funxy") S3method("Window", "im") S3method("Window", "influence.ppm") S3method("Window", "kppm") S3method("Window", "layered") S3method("Window", "leverage.ppm") S3method("Window", "linnet") S3method("Window", "lintess") S3method("Window", "lpp") S3method("Window", "lppm") S3method("Window", "msr") S3method("Window", "nnfun") S3method("Window", "ppm") S3method("Window", "ppp") S3method("Window", "psp") S3method("Window", "quad") S3method("Window", "quadratcount") S3method("Window", "quadrattest") S3method("Window", "rmhmodel") S3method("Window", "tess") S3method("with", "fv") S3method("with", "hyperframe") S3method("with", "msr") S3method("with", "ssf") S3method("xtfrm", "im") # ......................................... # Assignment methods # ......................................... S3method("[<-", "anylist") S3method("coords<-", "ppp") S3method("coords<-", "ppx") S3method("dimnames<-", "fasp") S3method("dimnames<-", "sparse3Darray") S3method("dim<-", "sparse3Darray") S3method("formula<-", "fv") S3method("Frame<-", "im") S3method("Frame<-", "owin") S3method("Frame<-", "ppp") S3method("[<-", "fv") S3method("$<-", "fv") S3method("[<-", "hyperframe") S3method("$<-", "hyperframe") S3method("[<-", "im") S3method("[<-", "layered") S3method("[[<-", "layered") S3method("levels<-", "im") S3method("[<-", "linim") S3method("[<-", "listof") S3method("marks<-", "lpp") S3method("marks<-", "ppp") S3method("marks<-", "ppx") S3method("marks<-", "psp") S3method("marks<-", "ssf") S3method("marks<-", "tess") S3method("names<-", "fv") S3method("names<-", "hyperframe") S3method("[<-", "ppp") S3method("row.names<-", "hyperframe") S3method("[<-", "solist") S3method("[<-", "sparse3Darray") S3method("split<-", "hyperframe") S3method("[<-", "splitppp") S3method("split<-", "ppp") S3method("[<-", "splitppx") S3method("[<-", "tess") S3method("unitname<-", "box3") S3method("unitname<-", "boxx") S3method("unitname<-", "default") S3method("unitname<-", "dppm") S3method("unitname<-", "im") S3method("unitname<-", "kppm") S3method("unitname<-", "linnet") S3method("unitname<-", "lpp") S3method("unitname<-", "minconfit") S3method("unitname<-", "owin") S3method("unitname<-", "pp3") S3method("unitname<-", "ppm") S3method("unitname<-", "ppp") S3method("unitname<-", "ppx") S3method("unitname<-", "psp") S3method("unitname<-", "quad") S3method("unitname<-", "slrm") S3method("unitname<-", "tess") S3method("Window<-", "im") S3method("Window<-", "linnet") S3method("Window<-", "lpp") S3method("Window<-", "ppp") S3method("Window<-", "psp") # ......................................... # End of methods # ......................................... spatstat/demo/0000755000176200001440000000000013115225157013043 5ustar liggesusersspatstat/demo/sumfun.R0000644000176200001440000000713613115225157014512 0ustar liggesusers## demonstration of all summary functions opa <- par(mfrow=c(1,1)) ## Ripley's K-function plot(swedishpines) plot(Kest(swedishpines)) ## Besag's transformation plot(Lest(swedishpines)) ## pair correlation function plot(pcf(swedishpines)) par(mfrow=c(2,3)) ## Showing the utility of the K-function plot(cells) plot(nztrees) plot(redwood) plot(Kest(cells)) plot(Kest(nztrees)) plot(Kest(redwood)) ## Showing the utility of the pair correlation function plot(cells) plot(nztrees) plot(redwood) plot(pcf(cells)) plot(pcf(nztrees)) plot(pcf(redwood)) ## par(mfrow=c(1,1)) ## Analogues for inhomogeneous patterns ## Reweighted K-function plot(japanesepines) fit <- ppm(japanesepines, ~polynom(x,y,2)) plot(predict(fit)) plot(Kinhom(japanesepines, fit)) plot(pcfinhom(japanesepines, fit)) plot(Linhom(japanesepines)) ## Rescaled K-function plot(unmark(bronzefilter)) plot(Kscaled(bronzefilter)) fit <- ppm(unmark(bronzefilter), ~x) plot(predict(fit)) plot(unmark(bronzefilter), add=TRUE) plot(Kscaled(bronzefilter, fit)) plot(Lscaled(bronzefilter, fit)) ## Local indicators of spatial association plot(localL(swedishpines)) plot(localK(swedishpines)) ## anisotropic plot(Ksector(redwood, 0, 90)) plot(Rf <- pairorient(redwood, 0.05, 0.15)) plot(Df <- deriv(Rf, spar=0.6, Dperiodic=TRUE)) ## par(mfrow=c(2,3)) ## Empty space function F plot(cells) plot(nztrees) plot(redwood) plot(Fest(cells)) plot(Fest(nztrees)) plot(Fest(redwood)) ## Nearest neighbour distance function G plot(cells) plot(nztrees) plot(redwood) plot(Gest(cells)) plot(Gest(nztrees)) plot(Gest(redwood)) ## J-function plot(cells) plot(nztrees) plot(redwood) plot(Jest(cells)) plot(Jest(nztrees)) plot(Jest(redwood)) par(mfrow=c(1,1)) ## versions for inhomogeneous patterns plot(Finhom(japanesepines)) plot(Ginhom(japanesepines)) plot(Jinhom(japanesepines)) ## Display F,G,J,K plot(allstats(swedishpines)) ## Multitype patterns plot(amacrine) plot(Kcross(amacrine)) plot(Kdot(amacrine)) I <- (marks(amacrine) == "on") J <- (marks(amacrine) == "off") plot(Kmulti(amacrine, I, J)) plot(alltypes(amacrine, "K")) plot(Lcross(amacrine)) plot(Ldot(amacrine)) plot(pcfcross(amacrine)) plot(pcfdot(amacrine)) plot(pcfmulti(amacrine, I, J)) plot(Gcross(amacrine)) plot(Gdot(amacrine)) plot(Gmulti(amacrine, I, J)) plot(alltypes(amacrine, "G")) plot(Jcross(amacrine)) plot(Jdot(amacrine)) plot(Jmulti(amacrine,I,J)) plot(alltypes(amacrine, "J")) plot(alltypes(amacrine, "F")) plot(Iest(amacrine)) plot(markconnect(amacrine)) ## Multitype, inhomogeneous plot(Kcross.inhom(amacrine)) plot(Kdot.inhom(amacrine)) plot(Kmulti.inhom(amacrine, I, J)) plot(Lcross.inhom(amacrine)) plot(Ldot.inhom(amacrine)) plot(pcfcross.inhom(amacrine)) plot(pcfdot.inhom(amacrine)) plot(pcfmulti.inhom(amacrine, I, J)) ## Numerical marks plot(markcorr(longleaf)) plot(markvario(longleaf)) plot(Emark(longleaf)) plot(Vmark(longleaf)) ## Linear networks plot(chicago) plot(linearK(chicago)) plot(linearKcross(chicago)) plot(linearKdot(chicago)) plot(linearpcf(chicago)) plot(linearpcfcross(chicago)) plot(linearpcfdot(chicago)) lam <- rep(intensity(unmark(chicago)), npoints(chicago)) A <- split(chicago)$assault B <- split(chicago)$burglary lamA <- rep(intensity(A), npoints(A)) lamB <- rep(intensity(B), npoints(B)) plot(linearKinhom(chicago, lam)) plot(linearKcross.inhom(chicago, "assault", "burglary", lamA, lamB)) plot(linearKdot.inhom(chicago, "assault", lamA, lam)) plot(linearpcfinhom(chicago, lam)) plot(linearpcfcross.inhom(chicago, "assault", "burglary", lamA, lamB)) plot(linearpcfdot.inhom(chicago, "assault", lamA, lam)) plot(linearmarkconnect(chicago)) plot(linearmarkequal(chicago)) rm(I,J,fit) par(opa) spatstat/demo/diagnose.R0000755000176200001440000001230013115273007014753 0ustar liggesusersif(dev.cur() <= 1) { dd <- getOption("device") if(is.character(dd)) dd <- get(dd) dd() } oldpar <- par(ask = interactive() && (.Device %in% c("X11", "GTK", "windows", "Macintosh"))) par(mfrow=c(1,1)) oldoptions <- options(warn = -1) # ####################################################### # X <- rpoispp(function(x,y) { 1000 * exp(- 4 * x)}, 1000) plot(X, main="Inhomogeneous Poisson pattern") fit.hom <- ppm(X ~1, Poisson()) fit.inhom <- ppm(X ~x, Poisson()) diagnose.ppm(fit.inhom, which="marks", type="Pearson", main=c("Mark plot", "Circles for positive residual mass", "Colour for negative residual density")) par(mfrow=c(1,2)) diagnose.ppm(fit.hom, which="marks", main=c("Wrong model", "(homogeneous Poisson)", "raw residuals")) diagnose.ppm(fit.inhom, which="marks", main=c("Right model", "(inhomogeneous Poisson)", "raw residuals")) par(mfrow=c(1,1)) diagnose.ppm(fit.inhom, which="smooth", main="Smoothed residual field") par(mfrow=c(1,2)) diagnose.ppm(fit.hom, which="smooth", main=c("Wrong model", "(homogeneous Poisson)", "Smoothed residual field")) diagnose.ppm(fit.inhom, which="smooth", main=c("Right model", "(inhomogeneous Poisson)", "Smoothed residual field")) par(mfrow=c(1,1)) diagnose.ppm(fit.inhom, which="x") par(mfrow=c(1,2)) diagnose.ppm(fit.hom, which="x", main=c("Wrong model", "(homogeneous Poisson)", "lurking variable plot for x")) diagnose.ppm(fit.inhom, which="x", main=c("Right model", "(inhomogeneous Poisson)", "lurking variable plot for x")) par(mfrow=c(1,1)) diagnose.ppm(fit.hom, type="Pearson",main="standard diagnostic plots") par(mfrow=c(1,2)) diagnose.ppm(fit.hom, main=c("Wrong model", "(homogeneous Poisson)")) diagnose.ppm(fit.inhom, main=c("Right model", "(inhomogeneous Poisson)")) par(mfrow=c(1,1)) # ####################################################### # LEVERAGE/INFLUENCE plot(leverage(fit.inhom)) plot(influence(fit.inhom)) plot(dfbetas(fit.inhom)) # ####################################################### # COMPENSATORS ## Takes a long time... CF <- compareFit(listof(hom=fit.hom, inhom=fit.inhom), Kcom, same="iso", different="icom") plot(CF, main="model compensators", legend=FALSE) legend("topleft", legend=c("empirical K function", "compensator of CSR", "compensator of inhomogeneous Poisson"), lty=1:3, col=1:3) # ####################################################### # Q - Q PLOTS # qqplot.ppm(fit.hom, 40) #conclusion: homogeneous Poisson model is not correct title(main="Q-Q plot of smoothed residuals") qqplot.ppm(fit.inhom, 40) # TAKES A WHILE... title(main=c("Right model", "(inhomogeneous Poisson)", "Q-Q plot of smoothed residuals")) # conclusion: fitted inhomogeneous Poisson model looks OK # ####################################################### # plot(cells) fitPoisson <- ppm(cells ~1, Poisson()) diagnose.ppm(fitPoisson, main=c("CSR fitted to cells data", "Raw residuals", "No suggestion of departure from CSR")) diagnose.ppm(fitPoisson, type="pearson", main=c("CSR fitted to cells data", "Pearson residuals", "No suggestion of departure from CSR")) # These diagnostic plots do NOT show evidence of departure from uniform Poisson plot(Kcom(fitPoisson), cbind(iso, icom) ~ r) plot(Gcom(fitPoisson), cbind(han, hcom) ~ r) # K compensator DOES show strong evidence of departure from uniform Poisson qqplot.ppm(fitPoisson, 40) title(main=c("CSR fitted to cells data", "Q-Q plot of smoothed raw residuals", "Strong suggestion of departure from CSR")) # Q-Q plot DOES show strong evidence of departure from uniform Poisson. # fitStrauss <- ppm(cells ~1, Strauss(r=0.1)) diagnose.ppm(fitStrauss, main=c("Strauss model fitted to cells data", "Raw residuals")) diagnose.ppm(fitStrauss, type="pearson", main=c("Strauss model fitted to cells data", "Pearson residuals")) plot(Kcom(fitStrauss), cbind(iso, icom) ~ r) plot(Gcom(fitStrauss), cbind(han, hcom) ~ r) # next line takes a LOOONG time ... qqplot.ppm(fitStrauss, 40, type="pearson") title(main=c("Strauss model fitted to cells data", "Q-Q plot of smoothed Pearson residuals", "Suggests adequate fit")) # Conclusion: Strauss model seems OK # ####################################################### # plot(nztrees) fit <- ppm(nztrees ~1, Poisson()) diagnose.ppm(fit, type="pearson") title(main=c("CSR fitted to NZ trees", "Pearson residuals")) diagnose.ppm(fit, type="pearson", cumulative=FALSE) title(main=c("CSR fitted to NZ trees", "Pearson residuals (non-cumulative)")) lurking(fit, expression(x), type="pearson", cumulative=FALSE, splineargs=list(spar=0.3)) # Sharp peak at right is suspicious qqplot.ppm(fit, 40, type="pearson") title(main=c("CSR fitted to NZ trees", "Q-Q plot of smoothed Pearson residuals")) # Slight suggestion of departure from Poisson at top right of pattern. par(oldpar) options(oldoptions) spatstat/demo/data.R0000755000176200001440000000614013115273007014100 0ustar liggesusersif(dev.cur() <= 1) { dd <- getOption("device") if(is.character(dd)) dd <- get(dd) dd() } oldpar <- par(ask = interactive() && dev.interactive(orNone=TRUE)) oldoptions <- options(warn=-1) plot(amacrine) plot(anemones, markscale=1) ants.extra$plotit() plot(austates) plot(bei.extra$elev, main="Beilschmiedia") plot(bei, add=TRUE, pch=16, cex=0.3) plot(betacells) plot(bramblecanes, cols=1:3) plot(split(bramblecanes)) plot(bronzefilter,markscale=2) plot(cells) plot(chicago, main="Chicago Street Crimes", col="grey", cols=c("red", "blue", "black", "blue", "red", "blue", "blue"), chars=c(16,2,22,17,24,15,6), leg.side="left", show.window=FALSE) chorley.extra$plotit() plot(clmfires, which.marks="cause", cols=2:5, cex=0.25, main="Castilla-La Mancha forest fires") plot(clmfires.extra$clmcov200, main="Covariates for forest fires") plot(copper$Points, main="Copper") plot(copper$Lines, add=TRUE) plot(demohyper, quote({ plot(Image, main=""); plot(Points, add=TRUE) }), parargs=list(mar=rep(1,4))) plot(dendrite, leg.side="bottom", main="", cex=0.75, cols=2:4) plot(demopat) plot(finpines, main="Finnish pines") wildM1 <- with(flu, virustype == "wt" & stain == "M2-M1") plot(flu[wildM1, 1, drop=TRUE], main=c("flu data", "wild type virus, M2-M1 stain"), chars=c(16,3), cex=0.4, cols=2:3) plot(gordon, main="People in Gordon Square", pch=16) plot(gorillas, which.marks=1, chars=c(1,3), cols=2:3, main="Gorilla nest sites") plot(hamster, cols=c(2,4)) plot(heather) plot(humberside) plot(hyytiala, cols=2:5) plot(japanesepines) plot(lansing) plot(split(lansing)) plot(longleaf) plot(mucosa, chars=c(1,3), cols=c("red", "green")) plot(mucosa.subwin, add=TRUE, lty=3) plot(murchison, main="Murchison data") plot(murchison$greenstone, main="Murchison data", col="lightgreen") plot(murchison$gold, add=TRUE, pch=3, col="blue") plot(murchison$faults, add=TRUE, col="red") plot(nbfires, use.marks=FALSE, pch=".") plot(split(nbfires), use.marks=FALSE, chars=".") plot(split(nbfires)$"2000", which.marks="fire.type", main=c("New Brunswick fires 2000", "by fire type"), cols=c("blue", "green", "red", "cyan"), leg.side="left") plot(nztrees) plot(trim.rectangle(as.owin(nztrees), c(0,5), 0), add=TRUE, lty=3) plot(osteo[1:10,], tick.marks=FALSE, xlab="", ylab="", zlab="") plot(paracou, cols=2:3, chars=c(16,3)) ponderosa.extra$plotit() pyr <- pyramidal pyr$grp <- abbreviate(pyramidal$group, minlength=7) plot(pyr, quote(plot(Neurons, pch=16, main=grp)), main="Pyramidal Neurons") rm(pyr) plot(redwood) plot(redwood3, add=TRUE, pch=20) redwoodfull.extra$plotit() plot(residualspaper$Fig1) plot(residualspaper$Fig4a) plot(residualspaper$Fig4b) plot(residualspaper$Fig4c) shapley.extra$plotit(main="Shapley") plot(simdat) plot(spiders, pch=16, show.window=FALSE) plot(sporophores, chars=c(16,1,2), cex=0.6) points(0,0,pch=16, cex=2) text(15,8,"Tree", cex=0.75) plot(spruces, maxsize=min(nndist(spruces))) plot(swedishpines) plot(urkiola, cex=0.5, cols=2:3) plot(waka, markscale=0.04, main=c("Waka national park", "tree diameters")) plot(waterstriders) par(oldpar) options(oldoptions) spatstat/demo/00Index0000755000176200001440000000032413115273007014174 0ustar liggesusersspatstat Demonstration of spatstat library diagnose Demonstration of diagnostic capabilities for models in spatstat data Datasets in spatstat sumfun Demonstration of nonparametric summary functions in spatstat spatstat/demo/spatstat.R0000755000176200001440000005225513115273007015042 0ustar liggesusersif(dev.cur() <= 1) { dd <- getOption("device") if(is.character(dd)) dd <- get(dd) dd() } oldpar <- par(ask = interactive() && dev.interactive(orNone=TRUE)) oldoptions <- options(warn=-1) fanfare <- function(stuff) { plot(c(0,1),c(0,1),type="n",axes=FALSE, xlab="", ylab="") text(0.5,0.5, stuff, cex=2.5) } par(mar=c(1,1,2,1)+0.1) fanfare("Spatstat demonstration") fanfare("I. Types of data") plot(swedishpines, main="Point pattern") plot(demopat, cols=c("green", "blue"), main="Multitype point pattern") plot(longleaf, fg="blue", main="Marked point pattern") plot(finpines, main="Point pattern with multivariate marks") a <- psp(runif(20),runif(20),runif(20),runif(20), window=owin()) plot(a, main="Line segment pattern") marks(a) <- sample(letters[1:4], 20, replace=TRUE) plot(a, main="Multitype line segment pattern") marks(a) <- runif(20) plot(a, main="Marked line segment pattern") plot(owin(), main="Rectangular window") plot(letterR, main="Polygonal window") plot(as.mask(letterR), main="Binary mask window") Z <- as.im(function(x,y){ sqrt((x - 1)^2 + (y-1)^2)}, square(2)) plot(Z, main="Pixel image") X <- runifpoint(42) plot(dirichlet(X), main="Tessellation") plot(rpoispp3(100), main="Three-dimensional point pattern") plot(simplenet, main="Linear network (linnet)") X <- rpoislpp(20, simplenet) plot(X, main="Point pattern on linear network (lpp)", show.window=FALSE) fanfare("II. Graphics") plot(letterR, col="green", border="red", lwd=2, main="Polygonal window with colour fill") plot(letterR, hatch=TRUE, spacing=0.15, angle=30, main="Polygonal window with line shading") plot(letterR, hatch=TRUE, hatchargs=list(texture=8, spacing=0.12), main="Polygonal window with texture fill") plot(amacrine, chars=c(1,16), main="plot(X, chars = c(1,16))") plot(amacrine, cols=c("red","blue"), chars=16, main="plot(X, cols=c(\"red\", \"blue\"))") opa <- par(mfrow=c(1,2)) plot(longleaf, markscale=0.03, main="markscale=0.03") plot(longleaf, markscale=0.09, main="markscale=0.09") par(opa) plot(longleaf, pch=21, cex=1, bg=colourmap(terrain.colors(128), range=c(0,80)), main="colourmap for numeric mark values") Z <- as.im(function(x,y) { r <- sqrt(x^2+y^2); r * exp(-r) }, owin(c(-5,5),c(-5,5))) plot(Z, main="pixel image: image plot") plot(Z, main="pixel image: image plot (heat colours)", col=heat.colors(256)) plot(Z, main="pixel image: logarithmic colour map", log=TRUE, col=rainbow(128, end=5/6)) contour(Z, main="pixel image: contour plot", axes=FALSE) plot(Z, main="pixel image: image + contour plot") contour(Z, add=TRUE) persp(Z, colmap=terrain.colors(128), shade=0.3, phi=30,theta=100, main="pixel image: perspective plot") ct <- colourmap(rainbow(20), breaks=seq(-1,1,length=21)) plot(ct, main="Colour map for real numbers") ca <- colourmap(rainbow(8), inputs=letters[1:8]) plot(ca, main="Colour map for discrete values") Z <- as.im(nnfun(runifpoint(8))) plot(Z, main="colour image for discrete values") textureplot(Z, main="texture plot for discrete values") W <- owin(c(1,5),c(0,4.5)) Lout <- scaletointerval(distmap(rebound.owin(letterR, W))) Lin <- scaletointerval(distmap(complement.owin(letterR, W))) L <- scaletointerval(eval.im(Lin-Lout)) D <- scaletointerval(density(runifpoint(30, W), adjust=0.3)) X <- scaletointerval(as.im(function(x,y){ x }, W=W)) plot(listof(L=L, D=D, X=X), main="Multiple images") pairs(L, D, X, main="Multiple images: pairs plot") persp(L, colin=D, theta=-24, phi=35, box=FALSE, apron=TRUE, main="Two images:\nperspective + colours", shade=0.4, ltheta=225, lphi=10) plot(rgbim(D,X,L,maxColorValue=1), valuesAreColours=TRUE, main="Three images: RGB display") plot(hsvim(D,L,X), valuesAreColours=TRUE, main="Three images: HSV display") fanfare("III. Conversion between types") W <- as.owin(chorley) plot(W, "window W") plot(as.mask(W)) plot(as.mask(W, dimyx=1000)) plot(as.im(W, value=3)) plot(as.im(W, value=3, na.replace=0), ribbon=TRUE) plot(as.im(function(x,y) {x^2 + y}, W=square(1)), main="as.im(function(x,y){x^2+y})") V <- delaunay(runifpoint(12)) plot(V, main="Tessellation V") plot(as.im(V, dimyx=256), main="as.im(V)") plot(as.owin(V)) X <- swedishpines plot(X, "point pattern X") plot(as.im(X), col=c("white","red"), ribbon=FALSE, xlab="", ylab="") plot(as.owin(X), add=TRUE) fanfare("IV. Subsetting and splitting data") plot(X, "point pattern X") subset <- 1:20 plot(X[subset], main="subset operation: X[subset]") subwindow <- owin(poly=list(x=c(0,96,96,40,40),y=c(0,0,100,100,50))) plot(X[subwindow], main="subset operation: X[subwindow]") plot(lansing, "Lansing Woods data") plot(split(lansing), main="split operation: split(X)", mar.panel=c(0,0,2,0), hsep=1, pch=3) plot(longleaf, main="Longleaf Pines data") plot(cut(longleaf, breaks=3), main=c("cut operation", "cut(longleaf, breaks=3)")) Z <- dirichlet(runifpoint(16)) X <- runifpoint(100) plot(cut(X,Z), main="points cut by tessellation", leg.side="left") plot(Z, add=TRUE) plot(split(X, Z), main="points split by tessellation", mar.panel=c(0,0,2,2), hsep=1) W <- square(1) X <- as.im(function(x,y){sqrt(x^2+y^2)}, W) Y <- dirichlet(runifpoint(12, W)) plot(split(X,Y), main="image split by tessellation") fanfare("V. Exploratory data analysis") par(mar=c(3,3,3,2)+0.1) plot(swedishpines, main="Quadrat counts", pch="+") tab <- quadratcount(swedishpines, 4) plot(tab, add=TRUE, lty=2, cex=2, col="blue") par(mar=c(5,3,3,2)+0.1) plot(swedishpines, main="", pch="+") title(main=expression(chi^2 * " test"), cex.main=2) tes <- quadrat.test(swedishpines, 3) tes plot(tes, add=TRUE, col="red", cex=1.5, lty=2, lwd=3) title(sub=paste("p-value =", signif(tes$p.value,3)), cex.sub=1.4) par(mar=c(4,4,3,2)+0.1) tesk <- cdf.test(nztrees, "x") tesk plot(tesk) mur <- lapply(murchison, rescale, s=1000) mur <- lapply(mur, "unitname<-", value="km") X <- mur$gold D <- distfun(mur$faults) plot(X, main="Murchison gold deposits", cols="blue") plot(mur$faults, add=TRUE, col="red") rh <- rhohat(X,D) plot(rh, main="Smoothed rate estimate", xlab="Distance to nearest fault (km)", legend=FALSE) plot(predict(rh), main="predict(rhohat(X,D))") Z <- density(cells, 0.07) plot(Z, main="Kernel smoothed intensity of point pattern") plot(cells, add=TRUE) plot(redwood, main="Redwood data") te <- scan.test(redwood, 0.1, method="poisson") plot(te, main=c("Scan Statistic for redwood data", paste("p-value =", signif(te$p.value,3)))) plot(redwood, add=TRUE) te X <- unique(unmark(shapley)) plot(X, "Shapley galaxy concentration", pch=".") coco <-colourmap(rev(rainbow(128, end=2/3)), range=c(0,1)) pa <- function(i, ...) { if(i == 1) list(chars=c(".", "+"), cols=1:2) else list(size=0.5, pch=16, col=coco) } plot(nnclean(X, k=17), panel.args=pa, mar.panel=c(0,1,1,0), nrows=2, main="Byers-Raftery nearest neighbour cleaning", cex.title=1.2) Y <- sharpen(X, sigma=0.5, edgecorrect=TRUE) plot(Y, main="Choi-Hall data sharpening", pch=".") owpa <- par(mfrow=c(1,2)) W <- grow.rectangle(as.rectangle(letterR), 1) X <- superimpose(runifpoint(300, letterR), runifpoint(50, W), W=W) plot(W, main="clusterset(X, 'm')") plot(clusterset(X, 'marks', fast=TRUE), add=TRUE, chars=c("o", "+"), cols=1:2) plot(letterR, add=TRUE) plot(W, main="clusterset(X, 'd')") plot(clusterset(X, 'domain', exact=FALSE), add=TRUE) plot(letterR, add=TRUE) par(owpa) D <- density(a, sigma=0.05) plot(D, main="Kernel smoothed intensity of line segment pattern") plot(a, add=TRUE) X <- runifpoint(42) plot(dirichlet(X)) plot(X, add=TRUE) plot(delaunay(X)) plot(X, add=TRUE) parsave <- par(mfrow=c(1,1), mar=0.2+c(0,1,3,1)) plot(listof("Longleaf Pines data"=longleaf, "Nearest mark"=nnmark(longleaf), "Kernel smoothing of marks"=Smooth(longleaf,10), "Inverse distance weighted\nsmoothing of marks"=idw(longleaf)), equal.scales=TRUE, halign=TRUE, valign=TRUE, main="", mar.panel=0.2+c(0,0,2,2)) par(parsave) fryplot(cells, main=c("Fry plot","cells data"), pch="+") miplot(longleaf, main="Morishita Index plot", pch=16, col="blue") plot(swedishpines, main="Swedish Pines data") K <- Kest(swedishpines) plot(K, main="K function for Swedish Pines", legendmath=TRUE) en <- envelope(swedishpines, fun=Kest, nsim=10, correction="translate") plot(en, main="Envelopes of K function based on CSR", shade=c("hi", "lo")) pc <- pcf(swedishpines) plot(pc, main="Pair correlation function") plot(swedishpines, main="nearest neighbours") m <- nnwhich(swedishpines) b <- swedishpines[m] arrows(swedishpines$x, swedishpines$y, b$x, b$y, angle=12, length=0.1, col="red") plot(swedishpines %mark% nndist(swedishpines), markscale=1, main="Stienen diagram", legend=FALSE, fg="blue") plot(Gest(swedishpines), main=c("Nearest neighbour distance function G", "Gest(swedishpines)"), legendmath=TRUE) Z <- distmap(swedishpines, dimyx=512) plot(swedishpines$window, main="Distance map") plot(Z, add=TRUE) points(swedishpines) plot(Fest(swedishpines), main=c("Empty space function F", "Fest(swedishpines)"), legendmath=TRUE) W <- rebound.owin(letterR, square(5)) plot(distmap(W), main="Distance map") plot(W, add=TRUE) a <- psp(runif(20),runif(20),runif(20),runif(20), window=owin()) contour(distmap(a), main="Distance map") plot(a, add=TRUE,col="red") plot(Jest(swedishpines), main=c("J-function", "J(r)=(1-G(r))/(1-F(r))")) X <- swedishpines X <- X[sample(1:npoints(X))] Z <- nnfun(X) plot(as.owin(X), main="Nearest neighbour map") plot(Z, add=TRUE) points(X) plot(allstats(swedishpines)) Fig4b <- residualspaper$Fig4b plot(Fig4b, main="Inhomogeneous point pattern") plot(Kinhom(Fig4b), main="Inhomogeneous K-function") plot(pcfinhom(Fig4b, stoyan=0.1), main="Inhomogeneous pair correlation") plot(Ginhom(Fig4b, sigma=0.06), main="Inhomogeneous G-function") plot(Jinhom(Fig4b, sigma=0.06), main="Inhomogeneous J-function") X <- unmark(bronzefilter) plot(X, "Bronze filter data") lam <- predict(ppm(X ~x)) plot(Kscaled(X, lam), xlim=c(0, 1.5), main="Locally-scaled K function") plot(urkiola) plot(split(urkiola), cex=0.5) plot(density(split(urkiola))) contour(density(split(urkiola)), panel.begin=as.owin(urkiola)) plot(relrisk(urkiola), main="Relative risk (cross-validated)") plot(bramblecanes) br <- rescale(bramblecanes) plot(alltypes(br, "K"), mar.panel=c(4,5,2,2)+0.1) ama <- rescale(amacrine) plot(alltypes(ama, Lcross, envelope=TRUE, nsim=9), . - r ~ r, ylim=c(-25, 5)) ponderosa.extra$plotit(main="Ponderosa Pines") L <- localL(ponderosa) pL <- plot(L, lty=1, col=1, legend=FALSE, main=c("neighbourhood density functions", "for Ponderosa Pines"), cex.main=0.8) parsave <- par(mfrow=c(1,2)) ponderosa.extra$plotit() par(pty="s") plot(L, iso007 ~ r, main="point B") par(mar=0.2+c(1,1,3,1)) ponderosa.extra$plotit() L12 <- localL(ponderosa, rvalue=12) P12 <- ponderosa %mark% L12 Z12 <- Smooth(P12, sigma=5, dimyx=128) plot(Z12, col=topo.colors(128), main=c("smoothed", "neighbourhood density"), cex.main=0.8) contour(Z12, add=TRUE) points(ponderosa, pch=16, cex=0.5) plot(amacrine, main="Amacrine cells data", cex.main=0.8) par(pty="s") mkc <- markcorr(amacrine, correction="translate", method="density", kernel="epanechnikov") plot(mkc, main="Mark correlation function", legend=FALSE, cex.main=0.8) par(parsave) par(mar=0.2+c(4,4,3,1)) plot(alltypes(amacrine, markconnect), title="Mark connection functions for amacrine cells") parsave <- par(mfrow=c(1,2)) parspruce2 <- par(mar=0.2+c(0,2,2,0)) plot(spruces, cex.main=0.8, markscale=10) par(pty="s", mar=0.2+c(2,3,2,0)) plot(markcorr(spruces), main="Mark correlation", legendpos="bottomright") par(parspruce2) plot(spruces, cex.main=0.8, markscale=10) par(pty="s", mar=0.2+c(2,3,2,0)) plot(markvario(spruces), main="Mark variogram", legendpos="topright") par(parsave) plot(listof("Emark(spruces)"=Emark(spruces), "Vmark(spruces)"=Vmark(spruces)), main="Independence diagnostics", ylim.covers=0, legendpos="bottom") par3 <- par(mfrow=c(1,2)) X <- rpoispp3(100) plot(X, main="3D point pattern X") plot(K3est(X), main="K-function in 3D") plot(X, main="3D point pattern X") plot(G3est(X), main="G-function in 3D", legendpos="bottomright") par(par3) par(mfrow=c(1,3)) X <- unmark(chicago) plot(X, col="green", cols="red", pch=16, main="Chicago Street Crimes", cex.main=0.75, show.window=FALSE) plot(linearK(X, correction="none"), main="Network K-function", cex.main=0.75) plot(linearK(X, correction="Ang"), main="Corrected K-function", cex.main=0.75) par(mfrow=c(1,1)) fanfare("VI. Model-fitting") parsave <- par(mar=0.2+c(1,1,3,2)) plot(japanesepines) fit <- ppm(japanesepines ~1) print(fit) fit <- ppm(japanesepines ~polynom(x,y,2)) print(fit) plot(fit, how="image", se=FALSE, main=c("Inhomogeneous Poisson model", "fit by maximum likelihood", "Fitted intensity")) plot(fit, how="image", trend=FALSE, main=c("Standard error", "of fitted intensity")) plot(leverage(fit)) plot(influence(fit)) plot(mur$gold, main="Murchison gold deposits", cols="blue") plot(mur$faults, add=TRUE, col="red") fit <- ppm(mur$gold ~D, covariates=list(D=distfun(mur$faults))) par(mar=0.2+c(4,4,4,2)) plot(parres(fit, "D"), main="Partial residuals from loglinear Poisson model", xlab="Distance to nearest fault (km)", ylab="log intensity of gold", legend=FALSE) legend("bottomleft", legend=c("partial residual", "loglinear fit"), col=c(1,4), lty=c(1,4)) par(mar=rep(0.2, 4), mfrow=c(1,1)) fitT <- kppm(redwood ~1, clusters="Thomas") simT <- simulate(fitT)[[1]] plot(listof(redwood, simT), main.panel=c("Redwood", "simulation from\nfitted Thomas model"), main="", mar.panel=0.2, equal.scales=TRUE) mop <- par(mfrow=c(1,2), pty="s", mar=rep(4.4, 4)) plot(fitT, xname=c("Thomas model", "minimum contrast fit"), pause=FALSE) par(mop) oop <- par(pty="s", mar=0.2+c(4,4,4,2)) os <- objsurf(fitT) plot(os, main="Minimum contrast objective function", col=terrain.colors(128)) contour(os, add=TRUE) par(oop) parra <- par(mfrow=c(1,2), mar=0.2+c(3,3,4,2)) plot(swedishpines) fit <- ppm(swedishpines ~1, Strauss(r=7)) print(fit) plot(fit, how="image", main=c("Strauss model", "fit by maximum pseudolikelihood", "Conditional intensity plot")) # fitted interaction plot(swedishpines) fit <- ppm(swedishpines ~1, PairPiece(c(3,5,7,9,11,13))) plot(fitin(fit), legend=FALSE, main=c("Pairwise interaction model", "fit by maximum pseudolikelihood")) # simulation par(mfrow=c(1,1), mar=0.5+c(0,0,2,0)) Xsim <- rmh(model=fit, start=list(n.start=80), control=list(nrep=100)) plot(listof(swedishpines, Xsim), main="", main.panel=c("Swedish Pines", "Simulation from\nfitted Strauss model"), mar.panel=c(0,0,3,0),hsep=1,equal.scales=TRUE) # model compensator par(parra) par(mar=0.2+c(4,4,3,1)) plot(swedishpines) fit <- ppm(swedishpines ~1, Strauss(r=7)) plot(Kcom(fit), cbind(iso, icom, pois) ~ r, legend=FALSE, main="model compensators") legend("topleft", legend=c("empirical K function", "Strauss model compensator of K", "Poisson theoretical K"), lty=1:3, col=1:3, inset=0.05) par(parsave) # Multitype data dpat <- rescale(demopat, 8) unitname(dpat) <- c("mile", "miles") dpat plot(dpat, cols=c("red", "blue")) fit <- ppm(dpat ~marks + polynom(x,y,2), Poisson()) plot(fit, trend=TRUE, se=TRUE) fanfare("VII. Simulation") plot(letterR, main="Poisson random points") lambda <- 10/area.owin(letterR) points(rpoispp(lambda, win=letterR)) points(rpoispp(9 * lambda, win=letterR)) points(rpoispp(90 * lambda, win=letterR)) plot(rpoispp(100)) plot(rpoispp(function(x,y){1000 * exp(-3*x)}, 1000), main="rpoispp(function)") plot(rMaternII(200, 0.05)) plot(rSSI(0.05, 200)) plot(rThomas(10, 0.2, 5)) plot(rMatClust(10, 0.05, 4)) plot(rCauchy(30, 0.01, 5)) plot(rVarGamma(30, 2, 0.02, 5)) plot(rGaussPoisson(30, 0.05, 0.5)) if(require(RandomFields) && RandomFieldsSafe()) { X <- rLGCP("exp", 4, var=0.2, scale=0.1) plot(attr(X, "Lambda"), main="log-Gaussian Cox process") plot(X, add=TRUE, pch=16) } plot(rStrauss(200, 0.3, 0.07)) plot(rDiggleGratton(200,0.03,0.08)) plot(rDGS(300, 0.05)) plot(redwood, main="random thinning - rthin()") points(rthin(redwood, 0.5), col="green", cex=1.4) plot(rcell(nx=15)) plot(rsyst(nx=5)) abline(h=(1:4)/5, lty=2) abline(v=(1:4)/5, lty=2) plot(rstrat(nx=5)) abline(h=(1:4)/5, lty=2) abline(v=(1:4)/5, lty=2) X <- rsyst(nx=10) plot(rjitter(X, 0.02)) Xg <- rmh(list(cif="geyer", par=list(beta=1.25, gamma=1.6, r=0.2, sat=4.5), w=c(0,10,0,10)), control=list(nrep=1e4), start=list(n.start=200)) plot(Xg, main=paste("Geyer saturation process\n", "rmh() with cif=\"geyer\"")) L <- as.psp(matrix(runif(20), 5, 4), window=square(1)) plot(L, main="runifpointOnLines(30, L)") plot(runifpointOnLines(30, L), add=TRUE, pch="+") plot(L, main="rpoisppOnLines(3, L)") plot(rpoisppOnLines(3, L), add=TRUE, pch="+") plot(runiflpp(20, simplenet)) plot(rpoislpp(5, simplenet)) plot(rpoisline(10)) plot(rlinegrid(30, 0.1)) spatstat.options(npixel=256) X <- dirichlet(runifpoint(30)) plot(rMosaicSet(X, 0.4), col="green", border=NA) plot(X, add=TRUE) plot(rMosaicField(X, runif)) plot(rMosaicSet(rpoislinetess(3), 0.5), col="green", border=NA, main="Switzer's random set") spatstat.options(npixel=100) plot(Halton(512, c(2,3)), main="quasirandom pattern") plot(Halton(16384, c(2,3)), main="quasirandom pattern", pch=".") fanfare("VIII. Geometry") A <- letterR B <- shift(letterR, c(0.2,0.1)) plot(bounding.box(A,B), main="shift", type="n") plot(A, add=TRUE) plot(B, add=TRUE, border="red") B <- rotate(letterR, 0.2) plot(bounding.box(A,B), main="rotate", type="n") plot(A, add=TRUE) plot(B, add=TRUE, border="red") mat <- matrix(c(1.1, 0, 0.3, 1), 2, 2) B <- affine(letterR, mat=mat, vec=c(0.2,-0.1)) plot(bounding.box(A,B), main="affine", type="n") plot(A, add=TRUE) plot(B, add=TRUE, border="red") par1x2 <- par(mfrow=c(1,2)) L <- rpoisline(10, owin(c(1.5,4.5),c(0.2,3.6))) plot(L, main="Line segment pattern") plot(L$window, main="L[window]", type="n") plot(L[letterR], add=TRUE) plot(letterR, add=TRUE, border="red") par(par1x2) a <- psp(runif(20),runif(20),runif(20),runif(20), window=owin()) plot(a, main="Self-crossing points") plot(selfcrossing.psp(a), add=TRUE, col="red") a <- as.psp(matrix(runif(20), 5, 4), window=square(1)) b <- rstrat(square(1), 5) plot(a, lwd=3, col="green", main="project points to segments") plot(b, add=TRUE, col="red", pch=16) v <- project2segment(b, a) Xproj <- v$Xproj plot(Xproj, add=TRUE, pch=16) arrows(b$x, b$y, Xproj$x, Xproj$y, angle=10, length=0.15, col="red") plot(a, main="pointsOnLines(L)") plot(pointsOnLines(a, np=100), add=TRUE, pch="+") parry <- par(mfrow=c(1,3), mar=0.3+c(1,1,3,1)) X <- tess(xgrid=seq(2, 4, length=10), ygrid=seq(0, 3.5, length=8)) plot(X, cex.main=0.75) plot(letterR, cex.main=0.75) plot(intersect.tess(X, letterR), cex.main=0.75) X <- dirichlet(runifpoint(10)) plot(X) L <- infline(0.3,0.5) plot(owin(), main="L", cex.main=0.75) plot(L, col="red", lwd=2, cex.main=0.75) plot(chop.tess(X,L), cex.main=0.75) par(parry) W <- chorley$window plot(W, main="simplify.owin") WS <- simplify.owin(W, 2) plot(WS, add=TRUE, border="green") nopa <- par(mfrow=c(2,2)) Rbox <- grow.rectangle(as.rectangle(letterR), 0.3) v <- erode.owin(letterR, 0.25) plot(Rbox, type="n", main="erode.owin", cex.main=0.75) plot(letterR, add=TRUE, col="red", cex.main=0.75) plot(v, add=TRUE, col="blue") v <- dilate.owin(letterR, 0.25) plot(Rbox, type="n", main="dilate.owin", cex.main=0.75) plot(v, add=TRUE, col="blue") plot(letterR, add=TRUE, col="red") v <- closing.owin(letterR, 0.3) plot(Rbox, type="n", main="closing.owin", cex.main=0.75) plot(v, add=TRUE, col="blue") plot(letterR, add=TRUE, col="red") v <- opening.owin(letterR, 0.3) plot(Rbox, type="n", main="opening.owin", cex.main=0.75) plot(letterR, add=TRUE, col="red") plot(v, add=TRUE, col="blue") par(nopa) laslett(heather$fine, main="Laslett's Transform") fanfare("IX. Operations on pixel images") Z <- distmap(swedishpines, dimyx=512) plot(Z, main="An image Z") plot(levelset(Z, 4)) plot(cut(Z, 5)) plot(eval.im(sqrt(Z) - 3)) plot(solutionset(abs(Z - 6) <= 1)) nopa <- par(mfrow=c(1,2)) plot(Z) segments(0,0,96,100,lwd=2) plot(transect.im(Z)) par(nopa) d <- distmap(cells, dimyx=256) W <- levelset(d, 0.06) nopa <- par(mfrow=c(1,2)) plot(W) plot(connected(W)) par(nopa) Z <- as.im(function(x,y) { 4 * x^2 + 3 * y }, letterR) plot(Z) plot(letterR, add=TRUE) plot(blur(Z, 0.3, bleed=TRUE)) plot(letterR, add=TRUE) plot(blur(Z, 0.3, bleed=FALSE)) plot(letterR, add=TRUE) plot(blur(Z, 0.3, bleed=FALSE)) plot(letterR, add=TRUE) fanfare("X. Programming tools") showoffK <- function(Y, current, ..., fullpicture,rad) { plot(fullpicture, main=c("Animation using `applynbd'", "explaining the K function")) points(Y, cex=2) u <- current points(u[1],u[2],pch="+",cex=3) theta <- seq(0,2*pi,length=100) polygon(u[1]+ rad * cos(theta),u[2]+rad*sin(theta)) text(u[1]+rad/3,u[2]+rad/2,Y$n,cex=3) if(runif(1) < 0.2) Sys.sleep(runif(1, max=0.4)) return(npoints(Y)) } par(ask=FALSE) applynbd(redwood, R=0.2, showoffK, fullpicture=redwood, rad=0.2, exclude=TRUE) par(oldpar) options(oldoptions) spatstat/NEWS0000644000176200001440000113014013166356056012627 0ustar liggesusers CHANGES IN spatstat VERSION 1.53-2 OVERVIEW o We thank Christophe Biscio and Rasmus Waagepetersen for contributions. o Correction to 'lohboot' o Improvements to ppm and update.ppm o Bug fixes and minor improvements. o Nickname: "Quantum Entanglement" NEW FUNCTIONS o fitin.profilepl Extract the fitted interaction from a model fitted by profile likelihood. SIGNIFICANT USER-VISIBLE CHANGES o lohboot Algorithm has been corrected and extended thanks to Christophe Biscio and Rasmus Waagepetersen. New arguments 'block', 'basicboot', 'Vcorrection'. o ppm.ppp, ppm.quad New argument 'clipwin' o update.ppm For the case 'update(model, X)' where X is a point pattern, if the window of X is different from the original window, then the model is re-fitted from scratch (i.e. use.internal=FALSE). o plot.leverage.ppm A contour line showing the average value of leverage is now drawn on the colour ribbon, as well as on the main image. New argument 'args.contour'. BUG FIXES o lohboot Implementation was completely incorrect. [Spotted and fixed by Christophe Biscio and Rasmus Waagepetersen.] Fixed. o update.ppm Did not always work correctly with formulae that included 'polynom()' terms. Fixed. CHANGES IN spatstat VERSION 1.53-1 OVERVIEW o We thank Suman Rakshit for contributions. o Bug fix in plot.linim o Nickname: "Drongo" BUG FIXES o plot.linim Colour map was mangled if log=TRUE. Fixed. CHANGES IN spatstat VERSION 1.53-0 OVERVIEW o We thank Tilman Davies and Mehdi Moradi for contributions. o Numerous bug fixes for linear networks code. o spatstat now requires the sub-package 'spatstat.data' which contains the datasets. o Minor enhancements and bug fixes. o Nickname: "Tinfoil Hat" NEW FUNCTIONS o "[<-.linim" Subset assignment method for pixel images on a linear network. o nnfromvertex Given a point pattern on a linear network, find the nearest data point from each vertex of the network. o tile.lengths Calculate the length of each tile in a tessellation on a network. o text.ppp, text.lpp, text.psp Methods for 'text' for spatial patterns. SIGNIFICANT USER-VISIBLE CHANGES o datasets All datasets installed in 'spatstat' have now been moved into the sub-package 'spatstat.data'. This should not have any effect on normal use. The 'spatstat.data' package is automatically loaded when spatstat is loaded, and the datasets are lazy-loaded so that they are available in the usual way. To list all datasets you now need to type 'data(package="spatstat.data")' o nbfires This dataset now includes information about the different land and sea borders of New Brunswick. o rhohat New argument 'subset' allows computation for a subset of the data. o predict.lppm Argument 'locations' can now be an 'lpp' object. o ewcdf Argument 'weights' can now be NULL. o plot.msr New arguments 'equal.markscale' and 'equal.ribbon'. o plot.im The number of tick marks in the colour ribbon can now be controlled using the argument 'nint' in 'ribargs'. o plot.symbolmap New argument 'nsymbols' controls the number of symbols plotted. o square Handles a common error in the format of the arguments. o [.linim More robust against artefacts. o [.linnet More robust against artefacts when the subset index is a pixel mask. o linim The image Z is now automatically restricted to the network. New argument 'restrict'. o plot.linim When style="width", negative values are plotted in red (by default). New argument 'negative.args' controls this. o plot.linim New argument 'zlim' specifies the range of values to be mapped. o Summary.linim Recognises the argument 'finite' so that range(x, finite=TRUE) works for a linim object x. o identify.psp Improved placement of labels. Arguments can be passed to text.default to control the plotting of labels. o as.polygonal Accelerated when w is a pixel mask. o density.lpp Accelerated in the default case. o Kinhom Stops gracefully if 'lambda' contains any zero values. o print.linim Prints more information. BUG FIXES o with.msr The value of 'atommass' was incorrect, due to a coding error. Fixed. o [.linim Internal data was sometimes corrupted. Fixed. o as.linim The result had incorrect internal format when Window(X) was a mask and one of the arguments 'eps', 'dimyx', 'xy' was present. Fixed. o as.im.im If W was a rectangle or polygonal window, the pixel resolution of the result was determined by the spatstat defaults, rather than being determined by the image argument X. This was contrary to the rule advertised in help(as.im). Fixed. o density.lpp In the 'slow' case (kernel not Gaussian, or continuous=FALSE), occasionally a pixel could incorrectly be assigned the value 1. [Spotted by Mehdi Moradi.] Fixed. o "[.solist" Ignored the "..." arguments in some cases. Fixed. o density.lpp Ignored the resolution arguments 'eps', 'dimyx' in the default case. Fixed. o plot.msr Plotted the panel titles on top of each other, if how="contour". Fixed. o contour.im Plotted the title text at the wrong place when add=TRUE and show.all=TRUE. Fixed. o predict.lppm Crashed if 'locations' was an 'lpp' object. Fixed. o plot.ppp Crashed if the window had height 0 and width 0 and the pattern had several columns of marks. Fixed. o plot.solist Crashed if all panels had windows of height 0 and width 0. Fixed. o linearK, linearKinhom, linearpcf, linearpcfinhom Crashed if the linear network was disconnected and one component of the network contained fewer than 2 points. Fixed. o integral.linim Crashed in some cases. Fixed. o "[.linim" Crashed in some cases. Fixed. CHANGES IN spatstat VERSION 1.52-1 OVERVIEW o Bug fix to satisfy the development version of R. o Nickname: "Apophenia" SIGNIFICANT USER-VISIBLE CHANGES o Ops.imlist Improved the 'names' of the result. BUG FIXES o bw.smoothppp Crashes in R-devel. Fixed. CHANGES IN spatstat VERSION 1.52-0 OVERVIEW o We thank Nicholas Read, Abdollah Jalilian, Suman Rakshit, Dominic Schuhmacher and Rasmus Waagepetersen for contributions. o Important bug fixes. o Now handles disconnected linear networks. o Effect function is now available for all types of fitted model. o A model can be fitted or re-fitted to a sub-region of data. o More support for measures. o 'Pool' operations improved. o Geometric-mean smoothing. o Changed algorithm defaults in ippm. o Version nickname: "Rudimentary Lathe" NEW FUNCTIONS o as.data.frame.envelope Extract function data from an envelope object, including the functions for the simulated data ('simfuns') if they were saved. o is.connected, is.connected.default, is.connected.linnet Determines whether a spatial object consists of one topologically connected piece, or several pieces. o is.connected.ppp Determines whether a point pattern is connected after all pairs of points closer than distance R are joined. o hist.funxy Histogram of values of a spatial function. o model.matrix.ippm Method for 'model.matrix' which allows computation of regular and irregular score components. o harmonise.msr Convert several measures (objects of class 'msr') to a common quadrature scheme. SIGNIFICANT USER-VISIBLE CHANGES o Smooth.ppp New argument 'geometric' supports geometric-mean smoothing. o Kinhom New argument 'ratio'. o linearKinhom, linearpcfinhom Changed default behaviour when 'lambda' is a fitted model. New arguments 'update' and 'leaveoneout'. o linearK, linearKinhom, linearpcf, linearpcfinhom, compilepcf Ratio calculations are now supported. New argument 'ratio'. o effectfun Now works for 'ppm', 'kppm', 'lppm', 'dppm', 'rppm' and 'profilepl' objects. o ppm, kppm The argument 'subset' can now be a window (class 'owin') specifying the subset of data to which the model should be fitted. o fitted.lppm New argument 'leaveoneout' allows leave-one-out computation of fitted value. o pool.rat New arguments 'relabel' and 'variance'. o density.lpp The return value is a pixel image of class 'linim' in all cases. o plot.linim, plot.linfun A scale bar is now plotted when style="width". New argument 'legend'. o ippm Default values for the parameters of the optimisation algorithm (nlm.args) have changed. o ippm The internal format of the result has been extended slightly. o bind.fv New argument 'clip'. o as.im.distfun New argument 'approx' specifies the choice of algorithm. o "[.psp" New argument 'fragments' specifies whether to keep fragments of line segments that are cut by the new window, or only to retain segments that lie entirely inside the window. o predict.rhohat New argument 'what' determines which value should be calculated: the function estimate, the upper/lower confidence limits, or the standard error. o pool.fv New arguments 'relabel' and 'variance' o pool.rat New argument 'weights'. o plot.msr New argument 'massthresh'. o Ops.msr Calculations like A+B can now be performed even when the measures A and B are not defined on the same quadrature scheme. o density.ppp New argument 'verbose'. o bw.pcf New argument 'verbose'. o hist.im New argument 'xname'. o [.leverage.ppm New argument 'update'. o [.layered Additional arguments '...' are now passed to other methods. o logLik.ppm The warning about pseudolikelihood ('log likelihood not available') is given only once, and is not repeated in subsequent calls, within a spatstat session. o kppm Refuses to fit a log-Gaussian Cox model with anisotropic covariance. o plot.linim, plot.linfun The return value has a different format. Arguments have been renamed and reorganised. o density.lpp New argument 'old'. o ippm Accelerated. o Smooth.ppp Now exits gracefully if any mark values are NA, NaN or Inf. o timeTaken Now exits gracefully if there is no timing information. o nbfires The unit of length for the coordinates is now specified in this dataset. BUG FIXES o bw.pcf Results were totally incorrect due to a typo. [Spotted by Abdollah Jalilian and Rasmus Waagepetersen.] Fixed. o predict.rho2hat Results were incorrect for a rho2hat object computed from a point pattern. Fixed. o density.ppp If the smoothing bandwidth was very small (e.g.\ smaller than pixel width), results were inaccurate if the default resolution was used, and completely wrong if another resolution was specified. [Spotted by Dominic Schuhmacher.] Fixed. o linearK, linearKinhom, linearpcf, linearpcfinhom, linearKcross, linearKdot, linearpcfcross, linearpcfdot, linearKcross.inhom, linearKdot.inhom, linearpcfcross.inhom, linearpcfdot.inhom Crashed if the network was disconnected. Fixed. o crossdist.lpp Crashed if the network was disconnected. Fixed. o countends Crashed if the network was disconnected. Fixed. o model.images.ppm Crashed for models fitted using 'covfunargs'. Fixed. o model.matrix.ppm Crashed for models fitted using 'covfunargs', if argument 'Q' was given. Fixed. o polynom Expansion of some polynomials caused an error message about 'invalid model formula'. Fixed. o plot.ppp The argument 'type="n"' did not suppress plotting of the legend, for marked point patterns. Fixed. o plot.psp Ignored 'show.all' when 'add=TRUE'. Fixed. o intensity.ppm Result had incorrect 'names' attribute in some cases. Fixed. o marks<-.ppx The assignment marks(X) <- a, where 'a' is a single atomic value, caused an error if 'X' contained zero points. Fixed o model.depends Crashed when applied to regression models fitted by 'gam', or point process models fitted by 'ppm' with 'use.gam=TRUE'. Fixed. o pool.fv Crashed sometimes, if the arguments did not have the same set of column names. Fixed. o pool.rat Crashed with an error message from 'fmt' if there were more than 20 objects to be pooled. Fixed. o linearK The 'theo' column was missing if npoints(X) < 2 and correction="Ang". Fixed. o model.matrix.ppm Result was malformed if the model was fitted with 'use.gam=TRUE'. Fixed. o effectfun Crashed if 'covname' was omitted, if the model was fitted with 'use.gam=TRUE'. Fixed. o nncross.lpp Result had incorrect format if Y was empty, in some cases. Fixed. o linearKinhom Plot label for y axis was incorrect. [Spotted by Suman Rakshit.] Fixed. o plot.solist If the entries were 'linim' objects, they were plotted using image() so arguments like 'style="w"' were ignored. Fixed. o as.ppp.data.frame Crashed if X was an object of class 'tbl_df' from the dplyr package. Fixed. o plot.lpp Crashed if there were multiple columns of marks. Fixed. CHANGES IN spatstat VERSION 1.51-0 OVERVIEW o We thank Greg McSwiggan, Mehdi Moradi and Tammy L Silva for contributions. o New fast algorithm for kernel smoothing on a linear network. o Leverage and influence diagnostics extended to Poisson/Gibbs models fitted by logistic composite likelihood. o Two-stage Monte Carlo test. o Dirichlet/Voronoi tessellation on a linear network. o Thinning of point patterns on a linear network. o More support for functions and tessellations on a linear network. o Improvements and bug fixes. o Version nickname: 'Poetic Licence' NEW FUNCTIONS o bits.test: Balanced Independent Two-Stage Monte Carlo test, an improvement on the Dao-Genton test. o lineardirichlet Computes the Dirichlet-Voronoi tessellation associated with a point pattern on a linear network. o domain.lintess, domain.linfun Extract the linear network from a 'lintess' or 'linfun' object. o summary.lintess Summary of a tessellation on a linear network. o clicklpp Interactively add points on a linear network. o envelopeArray Generate an array of envelopes using a function that returns 'fasp' objects. SIGNIFICANT USER-VISIBLE CHANGES o density.lpp New fast algorithm (up to 1000 times faster) for the default case where kernel="gaussian" and continuous=TRUE. Generously contributed by Greg McSwiggan. o leverage.ppm, influence.ppm, dfbetas.ppm These methods now work for models that were fitted by logistic composite likelihood (method='logi'). o rthin Argument X can now be a point pattern on a linear network (class 'lpp'). o fitted.ppm New option: type = "link" o update.kppm New argument 'evaluate'. o integral.linfun New argument 'delta' controls step length of approximation to integral. o as.linim.default New argument 'delta' controls spacing of sample points in internal data. o as.linfun.lintess New argument 'values' specifies the function value for each tile. New argument 'navalue'. BUG FIXES o leverage.ppm, influence.ppm, dfbetas.ppm Results for Gibbs models were incorrect due to a mathematical error. (Results for Poisson models were correct). Fixed. o leverage.ppm, influence.ppm, dfbetas.ppm, ppmInfluence Calculations were incorrect for a Geyer model fitted using an edge correction other than "border" or "none". Fixed. o step, kppm, update.kppm 'step' did not work for kppm objects in some cases due to a scoping problem in update.kppm. Fixed. o improve.kppm Crashed if the window was not a rectangle. Fixed. o pcf.ppp, pcfinhom Crashed if kernel="epa" rather than "epanechnikov". Fixed. o alltypes Crashed if envelope=TRUE and reuse=FALSE. Fixed. o pairdist.lpp, nndist.lpp, nnwhich.lpp, nncross.lpp Crashed if the network was disconnected. Fixed. o as.im.linim, as.linim.linim Additional arguments such as 'eps' and 'dimyx' were ignored. Fixed. o as.im.default Arguments 'eps and 'xy' were ignored if X was a single numeric value. Fixed. o 'timed' class Printing of these objects did not work in some locales. Fixed. o runifpoint Ignored 'drop' argument if the window was a rectangle. Fixed. CHANGES IN spatstat VERSION 1.50-0 OVERVIEW o We thank Richard Cotton, Adrian Heyner, Abdollah Jalilian, Dominic Schuhmacher and Rasmus Waagepetersen for contributions. o spatstat now 'Imports' the package 'spatstat.utils'. o Bandwidth selection for pair correlation function. o Improvements and bug fixes. o Version nickname: 'Bunyip Aristocracy' NEW PACKAGE STRUCTURE o spatstat is being split into several sub-packages, to satisfy the requirements of CRAN. This should not affect the user: existing code will continue to work in the same way. Currently there are two sub-packages, called 'spatstat.utils' and 'spatstat'. Typing 'library(spatstat)' will load the familiar 'spatstat' package which can be used as before, and will silently import the 'spatstat.utils' package. The 'spatstat.utils' package contains utility functions that were originally written for 'spatstat': they were undocumented internal functions in 'spatstat', but are now documented and accessible in a separate package because they may be useful for other purposes. To access these functions, you need to type 'library(spatstat.utils)'. NEW FUNCTIONS o bw.pcf Bandwidth selection for pair correlation function. Original code contributed by Abdollah Jalilian and Rasmus Waagepetersen. o grow.box3 Expand a three-dimensional box. SIGNIFICANT USER-VISIBLE CHANGES o as.owin Now refuses to convert a 'box3' to a two-dimensional window. o pixellate.ppp If the pattern is empty, the result is an integer-valued image (by default) for consistency with the results for non-empty patterns. o ppp If the coordinate vectors x and y contain NA, NaN or infinite values, these points are deleted with a warning, instead of causing a fatal error. o ppm Argument 'interaction' can now be a function that makes an interaction, such as Poisson, Hardcore, MultiHard. o pcf, pcfinhom New argument 'close' for advanced use. o runifpointx, rpoisppx New argument 'drop'. o shapley, ponderosa In these installed datasets, the functions shapley.extra$plotit and ponderosa.extra$plotit have changed slightly (to accommodate the dependence on the package spatstat.utils). o kppm Improved printed output. BUG FIXES o rMaternI, rMaternII If 'win' was a three-dimensional box of class 'box3', the result was a two-dimensional point pattern. [Spotted by Adrian Heyner.] Fixed. o rmhmodel.ppm, simulate.ppm Crashed when applied to a fitted Lennard-Jones model. [Spotted by Dominic Schuhmacher.] Fixed. o leverage.ppm, influence.ppm, dfbetas.ppm Crashed when applied to some hard-core models. Fixed. o "[.ppx" The format of the result was slightly malformed if exactly one point was selected. Fixed. o unmark.lpp, marks<-.lpp The result had class c("lpp", "lpp", "ppx") instead of c("lpp", "ppx"). Fixed. CHANGES IN spatstat VERSION 1.49-0 OVERVIEW o We thank Tilman Davies, Kassel Hingee, Abdollah Jalilian, Brian Ripley and Dominic Schuhmacher for contributions. o spatstat now 'Suggests' the package 'fftwtools'. o Operations on signed measures. o Operations on lists of pixel images. o Improved pixellation of point patterns. o Stieltjes integral extended. o Subset operators extended. o Greatly accelerated 'rmh' when using 'nsave' o Some computations accelerated. o Size of namespace reduced, for efficiency. o Bug fixes. o Version nickname: 'So-Called Software' NEW DEPENDENCIES o fftwtools spatstat now 'Suggests' the package 'fftwtools'. This package provides a very fast implementation of the Fast Fourier Transform, leading to much faster computation in the spatstat functions 'density.ppp', 'relrisk.ppp', 'convolve.im', 'blur', 'scan.test' and many other functions. The 'fftwtools' package requires the external software library 'fftw'. We strongly recommend installing this library if possible. NEW FUNCTIONS o hexagon, regularpolygon Create regular polygons. o Ops.msr Arithmetic operations for measures. o Math.imlist, Ops.imlist, Summary.imlist, Complex.imlist Arithmetic operations for lists of pixel images. o measurePositive, measureNegative, measureVariation, totalVariation Positive and negative parts of a measure, and variation of a measure. o as.function.owin Convert a spatial window to a function (x,y), the indicator function. o as.function.ssf Convert an object of class 'ssf' to a function(x,y) o as.function.leverage.ppm Convert an object of class 'leverage.ppm' to a function(x,y) SIGNIFICANT USER-VISIBLE CHANGES o stieltjes Argument 'M' can be a stepfun object (such as an empirical CDF). o quantile.ewcdf The function is now normalised to the range [0,1] before the quantiles are computed. This can be suppressed by setting normalise=FALSE. o pixellate.ppp New arguments 'fractional' and 'preserve' for more accurate discretisation. o "[.layered" Subset index i can now be an 'owin' object. o "[.solist" Subset index i can now be an 'owin' object. o plot.solist, plot.imlist, plot.anylist Result is now an (invisible) list containing the result from executing the plot of each panel. o ppp New argument 'checkdup'. o Summary.im Argument 'na.rm' is no longer ignored. o cdf.test The methods for classes ppp, ppm, lpp, lppm, slrm have a new argument 'interpolate'. o as.solist The argument x can now be a spatial object; as.solist(cells) is the same as solist(cells). o bw.diggle, bw.ppl, bw.relrisk, bw.smoothppp These functions now extract and store the name of the unit of length from the point pattern dataset. When the bandwidth selection criterion is plotted, the name of the unit of length is shown on the x-axis. o polynom This function now has a help file. o rmhcontrol New parameter 'pstage' determines when to generate random proposal points. o rmh Accelerated, in the case where multiple patterns are saved using 'nsave'. o bdist.pixels Accelerated for polygonal windows. New argument 'method'. o spatstat namespace The namespace of the spatstat package has been shortened (by internally registering the native routines) which should make the package run faster. o sum.im, range.im, max.im, min.im These functions have been removed, as they are now subsumed in Summary.im. BUG FIXES o plot.msr If one of 'nrows' or 'ncols' was specified, but not both, an obscure error occurred. Fixed. o plot.solist, plot.imlist, plot.anylist Crashed if 'nrows' and 'ncols' were given values implying that some rows or columns would not contain any plots. Fixed. o as.ppp.lpp Crashed if there was more than one column of marks. Fixed. o has.close.pp3 Results were incorrect, or a crash occurred, when argument 'Y' was given. Fixed. o rmpoispp If 'lambda' was a list of images, 'names(lambda)' was ignored, rather than serving as the default value of 'types'. Fixed. o bugfixes Output was garbled, in rare cases. Fixed. o kppm Result was malformed when clusters="VarGamma" and method="clik2". Spotted by Abdollah Jalilian. Fixed. o QQversion Plotting labels were malformed. Fixed. CHANGES IN spatstat VERSION 1.48-0 OVERVIEW o We thank Kim Colyvas, Yongtao Guan, Gopalan Nair, Nader Najari, Suman Rakshit, Ian Renner and Hangsheng Wang for contributions. o Sufficient Dimension Reduction for point processes. o Alternating Gibbs Sampler for point process simulation. o Intensity approximation for area-interaction and Geyer models. o New class of spatially sampled functions. o ROC and AUC extended to other types of point patterns and models. o More support for linear networks. o More support for infinite straight lines. o Simulation of 'rhohat' objects. o Kernel smoothing accelerated. o Methods for 'head' and 'tail' for spatial patterns. o More low-level functionality. o Improvements and bug fixes. o spatstat now has more than 1000 help files. o Nickname: 'Model Prisoner' NEW CLASSES o ssf Class of spatially sampled functions. NEW FUNCTIONS o sdr, dimhat Sufficient Dimension Reduction for point processes. Matlab code contributed by Yongtao Guan, translated by Suman Rakshit. o rags, ragsAreaInter, ragsMultiHard Alternating Gibbs Sampler for point processes. o psib Sibling probability (index of clustering strength in a cluster process). o bugfixes List all bug fixes in recent versions of a package. o roc.kppm, roc.lppm, roc.lpp Methods for 'roc' (receiver operating characteristic curve) for fitted models of class 'kppm' and 'lppm' and point patterns of class 'lpp' o auc.kppm, auc.lppm, auc.lpp Methods for 'auc' (area under the ROC curve) for fitted models of class 'kppm' and 'lppm' and point patterns of class 'lpp' o rlpp Random points on a linear network with a specified probability density. o cut.lpp Method for 'cut' for point patterns on a linear network. o crossing.linnet Find crossing points between a linear network and another set of lines. o ssf Create a spatially sampled function o print.ssf, plot.ssf, contour.ssf, image.ssf Display a spatially sampled function o as.im.ssf, as.ppp.ssf, marks.ssf, marks<-.ssf, unmark.ssf, [.ssf, with.ssf Manipulate data in a spatially sampled function o Smooth.ssf Smooth a spatially sampled function o integral.ssf Approximate integral of spatially sampled function o simulate.rhohat Generate a Poisson random point pattern with intensity that is a function of a covariate, given by a 'rhohat' object. o head.ppp, head.ppx, head.psp, head.tess, tail.ppp, tail.ppx, tail.psp, tail.tess Methods for 'head' and 'tail' for spatial patterns. o as.data.frame.tess Convert a tessellation to a data frame. o timeTaken Extract the timing data from a 'timed' object or objects. o rotate.infline, shift.infline, reflect.infline, flipxy.infline Geometrical transformations for infinite straight lines. o whichhalfplane Determine which side of an infinite line a point lies on. o points.lpp Method for 'points' for point patterns on a linear network. o pairs.linim Pairs plot for images on a linear network. o has.close Faster way to check whether a point has a close neighbour. o closetriples Low-level function to find all close triples of points. o matrixpower, matrixsqrt, matrixinvsqrt Raise a matrix to any power. SIGNIFICANT USER-VISIBLE CHANGES o intensity.ppm Intensity approximation is now available for the Geyer saturation process and the area-interaction process (results of research with Gopalan Nair). o envelope.lpp, envelope.lppm New arguments 'fix.n' and 'fix.marks' allow envelopes to be computed using simulations conditional on the observed number of points. o "[.im" The subset index "i" can now be a linear network (object of class 'linnet'). The result of "x[i, drop=FALSE]" is then a pixel image of class 'linim'. o cut.ppp Argument z can be "x" or "y" indicating one of the spatial coordinates. o rThomas, rMatClust, rCauchy, rVarGamma, rPoissonCluster, rNeymanScott New argument 'saveparents'. o lintess Argument 'df' can be missing or NULL, resulting in a tesellation with only one tile. o lpp X can be missing or NULL, resulting in an empty point pattern. o plot.lintess Improved plot method, with more options. o rpoisline Also returns information about the original infinite random lines. o density.ppp, Smooth.ppp Accelerated. o density.psp New argument 'method' controls the method of computation. New faster option 'method="FFT"' o nndist.lpp Accelerated. BUG FIXES o F3est Estimates of F(r) for the largest value of r were wildly incorrect. Fixed. o clip.infline Results were incorrect unless the midpoint of the window was the coordinate origin. Fixed. o integral.linim Results were inaccurate if many of the segment lengths were shorter than the width of a pixel. Fixed. o predict.lppm Bizarre error messages about 'class too long' or 'names too long' occurred if the model was multitype. Fixed. o superimpose Point patterns containing 0 points were ignored when determining the list of possible marks. Fixed. o chop.tess Vertical lines were not handled correctly with pixellated tessellations. Fixed. o timed Argument 'timetaken' was ignored. Fixed. o ppm Crashed if method="logi" and the 'covariates' were a data frame. [Spotted by Kim Colyvas and Ian Renner.] Fixed. o rpoislpp, runiflpp Crashed if nsim > 1. Fixed. o rpoisline Crashed if zero lines were generated. Fixed. o model.frame.ppm Crashed if the original model was fitted to a data frame of covariates and there were NA's amongst the covariate values. [Spotted by Kim Colyvas.] Fixed. o any, all When applied to pixel images (objects of class 'im') the result was sometimes NA when a finite value should have been returned. Fixed. o predict.rhohat When the original data were on a linear network, the result of predict.rhohat did not belong to the correct class 'linim'. Fixed. CHANGES IN spatstat VERSION 1.47-0 OVERVIEW o We thank Marcel Austenfeld, Guy Bayegnak, Tilman Davies, Cenk Icos, Jorge Mateu, Frederico Mestre, Mehdi Moradi, Virginia Morera Pujol, Suman Rakshit and Sven Wagner for contributions. o Non-Gaussian smoothing kernels. o Important bug fix in linearK, linearpcf o Changed internal format of linnet and lpp objects. o Faster computation in linear networks. o Bias correction techniques. o Bounding circle of a spatial object. o Minkowski sum also applicable to point patterns and line segment patterns. o Option to plot marked points as arrows. o Kernel smoothing accelerated. o Workaround for bug in some graphics drivers affecting image orientation. o Bug fixes and improvements. o Version nickname: 'Responsible Gambler' NEW FUNCTIONS o anyNA.im Method for 'anyNA' for pixel images. o bc Bias correction (Newton-Raphson) for fitted model parameters. See also 'rex'. o boundingcircle, boundingcentre Find the smallest circle enclosing a window or point pattern. o "[.linim" Subset operator for pixel images on a linear network. o mean.linim, median.linim, quantile.linim The mean, median, or quantiles of pixel values in a pixel image on a linear network. o rex Richardson extrapolation for numerical integrals and statistical model parameter estimates. o weighted.median, weighted.quantile Median or quantile of numerical data with associated weights. SIGNIFICANT USER-VISIBLE CHANGES o linear networks The internal format of a 'linnet' (linear network) object has been changed. Existing datasets of class 'linnet' and 'lpp' are still supported. However, computation will be faster if they are converted to the new format. To convert a linnet object L to the new format, use L <- as.linnet(L). To convert an lpp object X to the new format, use X <- as.lpp(X). o density.ppp, Smooth.ppp New argument 'kernel' allows the user to specify the smoothing kernel. o density.ppp, Smooth.ppp Argument 'weights' can now be a pixel image. o MinkowskiSum, %(+)% Now accepts arguments which are point patterns or line segment patterns as well as windows. o plot.im New argument 'workaround' to avoid a bug in some device drivers that causes the image to be displayed in the wrong spatial orientation. [Thanks to Marcel Austenfeld for drawing attention to this.] o sumouter New argument 'y' allows computation of asymmetric outer products. o linearKinhom, linearpcfinhom New argument 'normpower'. o rmh.default, rmh.ppm New arguments 'nsim', 'saveinfo'. o symbolmap, plot.ppp, plot.lpp New option: shape="arrows" o rcellnumber New argument 'mu'. o lengths.psp New argument 'squared'. o plot.linfun Now passes arguments to the function being plotted. o as.linnet.psp If the line segment pattern has marks, then the resulting linear network also carries these marks in the $lines component. o summary.owin, summary.im The fraction of frame area that is occupied by the window/image is now reported. o density.ppp, Smooth.ppp Computation accelerated by about 15% in the case where at='points' and kernel='gaussian'. o linearK, linearpcf Accelerated by about 40%. o pixellate.ppp Accelerated in the case where weights are given o density.ppp Accelerated in the cases where weights are given or 'diggle=TRUE' o dilation.ppp Improved geometrical accuracy. Now accepts arguments to control resolution of polygonal approximation. o discs New argument 'npoly'. Accelerated in some cases. o plot.pp3 New arguments 'box.front', 'box.back' control plotting of the box. o grow.rectangle New argument 'fraction'. o nnfun.lpp New argument 'k'. o bw.ppl New argument 'sigma'. o lppm New argument 'random' controls placement of dummy points. o rhohat.lpp New argument 'random' controls placement of dummy points. o quadrat.test.ppm Accelerated in the case where the original window is a rectangle. o kppm, mincontrast, cauchy.estpcf, lgcp.estpcf, matclust.estpcf, thomas.estpcf, vargamma.estpcf A warning about infinite values of the summary function no longer occurs when the default settings are used. o circumradius This function is now deprecated, in favour of 'boundingradius' o print.quad More information is printed. BUG FIXES o linearK, linearpcf, and relatives: These functions were sometimes greatly underestimated when the network had segments shorter than 10 coordinate units. [Bug introduced in spatstat 1.44-0, december 2015.] Fixed. o integral.linim, integral.linfun Results were slightly inaccurate because of a bias in the distribution of sample points. [Bug introduced in spatstat 1.41-0, february 2015.] Fixed. o intensity.ppm Result was incorrect for Gibbs models if the model was *exactly* equivalent to a Poisson process (i.e. if all interaction coefficients were exactly zero). [Bug introduced in spatstat 1.28-1, june 2012.] Fixed. o rSSI Sometimes terminated prematurely. [Spotted by Frederico Mestre.] Fixed. o perspPoints Crashed if the image Z contained NA (i.e. if Z was only defined on a subset of the bounding frame). Spotted by Guy Bayegnak. Fixed. o plot.ppp, plot.lpp Crashed if the argument 'shape' was given. Fixed. o plot.kppm Crashed if the model was not fitted by minimum contrast. Fixed. o superimpose Crashed if the argument was a 'solist' containing line segment patterns. Fixed. o Jest Crashed sometimes, depending on the shape of the observation window. [Spotted by Cenk Icos.] Fixed. o plot.studpermutest Crashed when the summary statistic was a multitype pair correlation function or multitype K function. [Spotted by Sven Wagner.] Fixed. o pool.anylist Crashed with a message about buffer size, if the list was longer than about 100 items. Fixed. o diagnose.ppm, plot.diagppm Crashed in some cases when cumulative=FALSE. Fixed. o leverage.ppm, influence.ppm, dfbetas.ppm Crashed sometimes with a message about wrong replacement length. [Spotted by Virginia Morera Pujol.] Fixed. o as.linnet.psp Crashed with marked segment patterns, if any segments were very short. [Spotted by Suman Rakshit.] Fixed. o stieltjes Returned NA if some values of f were not finite. Fixed. o plot.symbolmap If a new plot window was initialised, it was sometimes too small to contain the geometric figures (circles, squares etc) in the symbol map. Fixed. o plot.ppp, plot.im Ignored xlim, ylim. Fixed. o rhohat.lpp Ignored nd, eps. Fixed. o nnfun.lpp Print method gave incorrect information about the point pattern. Fixed. o "[.fv" The default plot formula was not updated. Fixed. o fitted.ppm The result was sometimes a 1-dimensional array rather than a numeric vector. Fixed. CHANGES IN spatstat VERSION 1.46-1 OVERVIEW o Important bug fix. o Version nickname: 'Spoiler Alert' BUG FIXES o density.ppp, Smooth.ppp The results of density(X, at="points") and Smooth(X, at="points") were incorrect in some cases. The contribution from the left-most data point (the point with the smallest x coordinate) was omitted. [Bug introduced in spatstat 1.26-0, April 2012.] Fixed. CHANGES IN spatstat VERSION 1.46-0 OVERVIEW o We thank Corey Anderson and Sebastian Meyer for contributions. o spatstat now depends on R 3.3.0 or later. o Improvements to inhomogeneous multitype K and L functions. o Variance approximation for pair correlation function. o Leverage and influence for multitype point process models. o Functions for extracting components of vector-valued objects. o Important bug fix in Smooth.ppp o Minor improvements and bug fixes. o Version nickname: 'Multidimensional Toothbrush' NEW FUNCTIONS o split.msr Decompose a measure into parts. o unstack.msr Decompose a vector-valued measure into its component measures. o unstack.ppp, unstack.psp, unstack.lpp Given a spatial pattern with several columns of marks, separate the columns and return a list of spatial patterns, each having only one column of marks. o kernel.squint Integral of squared kernel, for the kernels used in density estimation. SIGNIFICANT USER-VISIBLE CHANGES o Kcross.inhom, Kdot.inhom, Kmulti.inhom, Ldot.inhom, Lcross.inhom These functions now allow intensity values to be given by a fitted point process model. New arguments 'update', 'leaveoneout', 'lambdaX'. o diagnose.ppm Infinite values of 'rbord' are now ignored and treated as zero. This ensures that diagnose.ppm has a sensible default when the fitted model has infinite reach. o pcf.ppp Now calculates an analytic approximation to the variance of the estimate of the pair correlation function (when var.approx=TRUE). Now returns the smoothing bandwidth used, as an attribute of the result. o plot.ppp When 'clipwin' is given, any parts of the boundary of the window of x that lie inside 'clipwin' will also be plotted. o plot.msr Now handles multitype measures. New argument 'multiplot'. o plot.anylist If a list entry x[[i]] belongs to class 'anylist', it will be expanded so that each entry x[[i]][[j]] will be plotted as a separate panel. o influence.ppm, leverage.ppm These can now be applied to multitype point process models and the results can be plotted. o plot.influence.ppm, plot.leverage.ppm New argument 'multiplot'. o plot.anylist, plot.solist, plot.listof New arguments panel.begin.args, panel.end.args o influence.ppm, leverage.ppm, dfbetas.ppm For Gibbs models, memory usage has been dramatically reduced, so the code can handle larger datasets and finer quadrature schemes. BUG FIXES o Smooth.ppp Results were incorrect when at='points' and leaveoneout=FALSE. [Bug introduced in spatstat 1.20-5, October 2010.] Fixed. o funxy Did not correctly handle one-line functions: the resulting objects evaluated the wrong function in some cases. [Spotted by Sebastian Meyer. Bug introduced in spatstat 1.45-0] Fixed. o mppm Did not recognise the variable 'marks' in a formula. Fixed. o Smooth.ppp, bw.smoothppp Crashed if X had two columns of marks and one column was constant. [Bug introduced in spatstat 1.38-0, October 2014] Fixed. o Smooth.ppp Results for 'at="points"' were garbled, for some values of 'sigma', if X had more than one column of marks. [Bug introduced in spatstat 1.38-0, October 2014] Fixed. o plot.layered Crashed if one layer was a point pattern with several columns of marks. Fixed. o plot.ppm Sometimes gave a spurious warning about a singular matrix. Fixed. o setminus.owin Gave wrong or strange answer if the correct answer was empty. Fixed. o parameters.dppm Crashed, due to a typo. Fixed. o progressreport Crashed if n = 1. Fixed. CHANGES IN spatstat VERSION 1.45-2 OVERVIEW o We thank Ottmar Cronie, Virginia Morera Pujol, Sven Wagner and Marie-Colette van Lieshout for contributions. o Recursive-partition point process models. o Minkowski sum, morphological dilation and erosion with any shape. o Important bug fix in spatial CDF tests. o More bug fixes for replicated patterns. o Simulate a model fitted to replicated point patterns. o Inhomogeneous multitype F and G functions. o Summary functions recognise correction="all" o Leverage and influence code handles bigger datasets. o More support for pixel images. o Improved progress reports. o New dataset 'redwood3' o spatstat now Depends on the package 'rpart' o Version nickname: 'Caretaker Mode' NEW DATASETS o redwood3 A more accurate version of the 'redwood' data. NEW FUNCTIONS o as.im.data.frame Build a pixel image from a data frame of coordinates and pixel values. o covering Cover a window using discs of a given radius. o dilationAny, erosionAny, %(-)% Morphological dilation and erosion by any shape. o FmultiInhom, GmultiInhom Inhomogeneous multitype/marked versions of the summary functions Fest, Gest. o kernel.moment Moment or incomplete moment of smoothing kernel. o MinkowskiSum, %(+)% Minkowski sum of two windows: A %(+)% B, or MinkowskiSum(A,B) o nobjects New generic function for counting the number of 'things' in a dataset. There are methods for ppp, ppx, psp, tess. o parameters.interact, parameters.fii Extract parameters from interpoint interactions. [These existing functions are now documented.] o ppmInfluence Calculate leverage.ppm, influence.ppm and dfbetas.ppm efficiently. o rppm, plot.rppm, predict.rppm, prune.rppm Recursive-partition point process models o simulate.mppm Simulate a point process model fitted to replicated point patterns. o update.interact Update the parameters of an interpoint interaction. [This existing function is now documented.] o where.max, where.min Find the spatial location(s) where a pixel image achieves its maximum or minimum value. SIGNIFICANT USER-VISIBLE CHANGES o cdf.test.mppm Now handles Gibbs models. Now recognises covariate="x" or "y". o leverage.ppm, influence.ppm, dfbetas.ppm For Gibbs models, memory usage has been dramatically reduced, so the code can handle larger datasets and finer quadrature schemes. o plot.im Now handles complex-valued images. o connected.im Now handles a logical-valued image properly. o qqplot.ppm Argument 'expr' can now be a list of point patterns, or an envelope object containing a list of point patterns. o as.layered Default method now handles a (vanilla) list of spatial objects. o summary functions The argument 'correction="all"' is now recognised: it selects all the available options. This applies to Fest, F3est, Gest, Gcross, Gdot, Gmulti, G3est, Gfox, Gcom, Gres, Hest, Jest, Jmulti, Jcross, Jdot, Jfox, Kest, Kinhom, Kmulti, Kcross, Kdot, Kcom, Kres, Kmulti.inhom, Kcross.inhom, Kdot.inhom, Kscaled, Ksector, Kmark, K3est, Lscaled, markcorr, markcrosscorr, nnorient, pairorient, pcfinhom, pcfcross.inhom, pcfcross, pcf, Tstat. o clarkevans The argument 'correction="all"' is now recognised: it selects all the available options. [This is also the default.] o predict.mppm The argument 'type="all"' is now recognised: it selects all the available options. [This is also the default.] o plot.kppm The argument 'what="all"' is now recognised: it selects all the available options. [This is also the default.] o connected.im, connected.owin Arguments '...' now determine pixel resolution. o anova.mppm New argument 'fine' o as.owin.data.frame New argument 'step' o discs Now accepts a single numeric value for 'radii'. o plot.ppp, plot.profilepl, plot.quadratcount, plot.quadrattest, plot.tess Now recognise graphics parameters for text, such as 'family' and 'srt' o as.function.tess New argument 'values' specifies the function values. o cdf.test Calculations are more robust against numerical rounding effects. o progressreport Behaviour improved. New arguments 'tick', 'showtime'. o simulate.ppm New argument 'verbose' o compileK, compilepcf These internal functions are now documented. BUG FIXES o cdf.test.ppm Calculation of p-values was incorrect for Gibbs models: 1-p was computed instead of p. [Spotted by Sven Wagner.] Fixed. o subfits The interaction coefficients of the submodels were incorrect for Gibbs models with a multitype interaction (MultiStrauss, etc). [Spotted by Sven Wagner.] Fixed. o subfits Crashed when a Gibbs model included factor-valued spatial covariates and not all levels of the factor were present in each row of the data. [Spotted by Sven Wagner.] Fixed. o subfits For Gibbs models with a multitype interaction (MultiStrauss, etc), computation of the conditional intensity caused an error. [Spotted by Sven Wagner.] Fixed. o diagnose.ppm Crashed if what="smooth", when the original window was a rectangle. [Spotted by Virginia Morera Pujol.] Fixed. o mppm The x and y coordinates were not permitted in the random-effects formula 'random'. [Spotted by Sven Wagner.] Fixed. o vcov.ppm The result had no 'dimnames', if the model was fitted using method="ho". Fixed. CHANGES IN spatstat VERSION 1.45-1 OVERVIEW o This version was never released. CHANGES IN spatstat VERSION 1.45-0 OVERVIEW o We thank Monsuru Adepeju, Mario D'Antuono, Markus Herrmann, Paul Hewson, Kassel Hingee, Greg McSwiggan, Suman Rakshit and Sven Wagner for contributions. o Important bug fix in leverage/influence diagnostics for Gibbs models. o Numerous bug fixes in code for replicated point patterns. o Surgery on linear networks. o Tessellations on a linear network. o Laslett's Transform. o Colour maps for point patterns with continuous marks are easier to define. o Pair correlation function estimates can be pooled. o Stipulate a particular version of a package. o Fixed namespace problems arising when spatstat is not loaded. o Bug fixes and performance improvements. o spatstat now contains 100,000 lines of R code. o Version nickname: 'One Lakh' NEW FUNCTIONS o laslett Laslett's Transform. [Thanks to Kassel Hingee] o lintess Tessellation on a linear network. o divide.linnet Divide a linear network into pieces demarcated by a point pattern. o insertVertices Insert new vertices in a linear network. o thinNetwork Remove vertices and/or segments from a linear network etc o connected.linnet Find connected components of a linear network. o nvertices, nvertices.linnet, nvertices.owin Count the number of vertices in a linear network or vertices of the boundary of a window. o as.data.frame.linim, as.data.frame.linfun Extract a data frame of spatial locations and function values from an object of class 'linim' or 'linfun'. o as.linfun, as.linfun.linim, as.linfun.lintess Convert other kinds of data to a 'linfun' object. o requireversion Require a particular version of a package (for use in stand-alone R scripts). SIGNIFICANT USER-VISIBLE CHANGES o [.linnet, [.lpp New argument 'snip' determines what to do with segments of the network that cross the boundary of the window. Default behaviour has changed. o pcfinhom Default behaviour is changed when 'lambda' is a fitted model. The default is now to re-fit the model to the data before computing pcf. New arguments 'update' and 'leaveoneout' control this. o envelope methods New argument 'funYargs' contains arguments to the summary function when applied to the data pattern only. o plot.ppp, plot.lpp For a point pattern with continuous marks ('real numbers') the colour arguments 'cols', 'fg', 'bg' can now be vectors of colour values, and will be used to determine the default colour map for the marks. o symbolmap Now accepts a vector of colour values for the arguments 'col', 'cols', 'fg', 'bg' if argument 'range' is given. o closepairs.ppp, closepairs.pp3 New arguments 'distinct' and 'neat' allow more options. o closepairs.ppp, closepairs.pp3 Argument 'ordered' has been replaced by 'twice' (but 'ordered' is still accepted, with a warning). o closepairs.ppp, closepairs.pp3 Performance improved (computation time and memory requirements reduced.) This should improve the performance of many functions in spatstat. o Geyer The saturation parameter 'sat' can now be less than 1. o lpp, as.lpp These functions now handle the case where 'seg' and 'tp' are given but 'x' and 'y' are missing. o linnet If the argument 'edges' is given, then this argument now determines the ordering of the sequence of line segments. For example, the i-th row of 'edges' specifies the i-th line segment in as.psp(L). o funxy, distfun The functions created by funxy and distfun have arguments (x,y). The user may now give a ppp or lpp object for the argument 'x', instead of giving two coordinate vectors 'x' and 'y'. o crossing.psp New argument 'details' gives more information about the intersections between the segments. o subset.ppp, subset.lpp, subset.pp3, subset.ppx The argument 'subset' can now be any argument acceptable to the "[" method. o density.lpp New argument 'weights'. o pcf.ppp New argument 'ratio' allows several estimates of pcf to be pooled. o summary.ppm New argument 'fine' selects the algorithm for variance estimation. o texturemap Argument 'textures' can be missing or NULL. o plot.lpp New argument 'show.network' o linnet New argument 'warn' o mppm Performs more checks for consistency of the input data. o mppm New arguments 'gcontrol' and 'reltol.pql' control the fitting algorithm. o edge.Trans New argument 'gW' for efficiency. o pool.fv The default plot of the pooled function no longer includes the variance curves. o clickpoly The polygon is now drawn progressively as the user clicks new vertices. o Kest Accelerated computation (for translation and rigid corrections) when window is an irregular shape. o vcov.ppm, leverage.ppm, influence.ppm, dfbetas.ppm Performance slightly improved, for Gibbs models. o Internal code Performance slightly improved. o Fest, Hest Additional checks for errors in input data. BUGS o leverage.ppm, influence.ppm, parres.ppm, addvar.ppm Calculations were completely incorrect for Gibbs models, due to a coding error. Fixed. o update.kppm If the call to 'update' did not include a formula argument or a point pattern argument, then all arguments were ignored. Example: update(fit, improve.type="quasi") was identical to 'fit'. Fixed. o diagnose.ppm When applied to a model obtained from subfits(), in the default case (oldstyle=FALSE) the variance calculations were incorrect. Consequently the dotted lines representing significance bands were incorrect. An error or warning about negative variances occurred sometimes. However, calculations with oldstyle=TRUE were correct. The default has now been changed to oldstyle=TRUE for such models. o [.lpp The local coordinate 'seg' was completely incorrect, when 'i' was a window. Fixed. o leverage.ppm, influence.ppm, parres.ppm, addvar.ppm Crashed for Gibbs models in which the coefficient vector had length 1, such as the stationary Hardcore model. Fixed. o subfits Crashed if the model included factor-valued spatial covariates. [Spotted by Sven Wagner] Fixed. o subfits If the model included factor-valued spatial covariates, and if not all levels of the factor were present in each row of the data, the resulting objects were malformed and caused errors in other code. [Spotted by Sven Wagner] Fixed. o subfits Crashed with some random-effects formulas. [Spotted by Sven Wagner] Fixed. o improve.kppm An error message about a missing object 'gminus1' occurred when vcov=TRUE, fast.vcov=FALSE and type="clik1" or "wclik1". Fixed. o plot.profilepl Failed with a message about a missing object 'finite'. Fixed. o selfcut.psp Gave an error if marks(A) was a vector rather than a data frame. [Spotted by Paul Hewson.] Fixed. o suffstat Gave an error for point process models with Geyer interaction. Fixed. o nncross.lpp, distfun.lpp Crashed with obscure errors if Y consisted of a single point. Fixed. o scan.test, scanmeasure Crashed sometimes with an error message from 'grow.mask'. Fixed. o dppm Crashed sometimes with a message that the point pattern could not be found. [Scoping bug.] Fixed. o mppm, profilepl Crashed, with a message about 'SpatstatVersion', if the 'spatstat' package was neither loaded nor attached. [Spotted by Markus Herrmann.] Fixed. o qqplot.ppm Crashed sometimes when applied to a model obtained from subfits(). Fixed. o anova.mppm Crashed sometimes with a message about mismatched coefficients. [Spotted by Sven Wagner.] Fixed. o anova.mppm Crashed sometimes with a message about unrecognised option 'type="score"'. [Spotted by Sven Wagner.] Fixed. o split.ppx Crashed if 'f' was not a factor. Fixed. o idw The result was a pixel image defined in the rectangle Frame(X) instead of Window(X). Fixed. o ppm Parameter estimates were slightly inaccurate when the model included the Geyer() interaction and the "isotropic" edge correction was used. Fixed. o [.ppx Crashed if the number of points selected was less than 2. Fixed. o linnet Crashed if there were no line segments at all. Fixed. o kppm, improve.kppm Crashed if the model was stationary and improve.type != "none". Fixed. o as.linim.default Did not correctly handle factor-valued data. Fixed. o texturemap Crashed if no graphical arguments were specified. Fixed. o vcov.mppm Ignored "..." arguments. Fixed. o Kest If ratio=TRUE and correction=c('border', 'none') the result did not contain ratio information. Fixed. o plot.ppp, plot.lpp Arguments 'chars' and 'cols' were ignored in some cases. Fixed. o ppm Ignored argument 'emend'. Fixed. o plot.dppm Gave warnings about unrecognised argument 'objectname'. Fixed. o overlap.owin Sometimes returned a very small negative value, when the correct answer was 0. Fixed. CHANGES IN spatstat VERSION 1.44-1 OVERVIEW o We thank Brian Ripley for contributions. o Urgent bug fix. o More support for replicated point patterns. o More support for tessellations. o Version nickname: 'Gift Horse' NEW FUNCTIONS o as.function.tess Convert a tessellation to a function(x,y). The function value indicates which tile of the tessellation contains the point (x,y). o tileindex Determine which tile of a tessellation contains a given point (x,y). o persp.leverage.ppm Method for persp plots for objects of class leverage.ppm o AIC.mppm, extractAIC.mppm AIC for point process models fitted to replicated point patterns. o nobs.mppm, terms.mppm, getCall.mppm Methods for point process models fitted to replicated point patterns. SIGNIFICANT USER-VISIBLE CHANGES o anova.mppm Now handles Gibbs models, and performs the adjusted composite likelihood ratio test. o update, step These functions now work for models of class 'mppm'. o textureplot Argument x can now be something acceptable to as.im o logLik.mppm New argument 'warn'. BUGS o nncross.lpp, nnwhich.lpp, distfun.lpp Caused a segmentation fault. [Spotted by Brian Ripley.] Fixed. o anova.ppm If a single 'object' was given, and the object was a Gibbs model, then 'adjust' was effectively set to FALSE. Fixed. CHANGES IN spatstat VERSION 1.44-0 OVERVIEW o We thank Jonas Geldmann, Andrew Hardegen, Kassel Hingee, Tom Lawrence, Robin Milne, Gopalan Nair, Suman Rakshit, Peijian Shi and Rasmus Waagepetersen for contributions. o More support for multidimensional point patterns and point processes. o More options for envelopes and related Monte Carlo tests. o More support for model comparison. o k-th nearest neighbours on a linear network. o Penttinen process can be simulated (by Metropolis-Hastings or CFTP). o Calculate the predicted variance of number of points. o Convexifying operation for sets. o Subdivide a linear network. o Accelerated algorithms for linear networks. o Quadrat counting accelerated, in some cases. o Version nickname: 'The Sound of One Hand Typing' NEW FUNCTIONS o rPenttinen Simulate the Penttinen process using perfect simulation. o varcount Given a point process model, compute the predicted variance of the number of points falling in a window. o inside.boxx Test whether multidimensional points lie inside a specified multidimensional box. o lixellate Divide each segment of a linear network into smaller segments. o nsegments.linnet, nsegments.lpp Count the number of line segments in a linear network. o grow.boxx Expand a multidimensional box. o deviance.ppm, deviance.lppm Deviance for a fitted point process model. o pseudoR2 Pseudo-R-squared for a fitted point process model. o tiles.empty Checks whether each tile of a tessellation is empty or nonempty. o summary.linim Summary for a pixel image on a linear network. SIGNIFICANT USER-VISIBLE CHANGES o rMaternI, rMaternII These functions can now generate random patterns in three dimensions and higher dimensions, when the argument 'win' is of class 'box3' or 'boxx'. o "[.ppx" The subset index 'i' may now be a spatial domain of class 'boxx' or 'box3'. o rmh.ppm, rmhmodel.ppm, simulate.ppm A model fitted using the 'Penttinen' interaction can now be simulated. o rmh.default, rmhmodel.default These functions now recognise cif='penttinen' for the Penttinen interaction. o envelope New argument 'clamp' gives greater control over one-sided envelopes. o dclf.test, mad.test, dclf.progress, mad.progress, dclf.sigtrace, mad.sigtrace New argument 'clamp' determines the test statistic for one-sided tests. o dclf.progress, mad.progress, dclf.sigtrace, mad.sigtrace, mctest.progress, mctest.sigtrace, dg.progress, dg.sigtrace New argument 'rmin' determines the left endpoint of the test interval. o dclf.test, mad.test, dg.test, dg.progress, dg.sigtrace, dg.envelope (also accepted by dclf.progress, mad.progress, dclf.sigtrace, mad.sigtrace) New argument 'leaveout' specifies how to calculate the deviation between the observed summary function and nominal reference value. o envelope New argument 'funargs' o Hest Argument X can now be a pixel image with logical values. New argument 'W'. [Based on code by Kassel Hingee.] o nncross.lpp, distfun.lpp New argument 'k' allows calculation of k-th nearest neighbour. Computation accelerated. o logLik.ppm New argument 'absolute'. o plot.kppm New arguments 'pause' and 'xname'. o tess Argument 'window' is ignored when xgrid, ygrid are given. o as.polygonal Can now repair errors in polygon data, if repair=TRUE. o rStrauss, rHardcore, rStraussHard, rDiggleGratton, rDGS, rPenttinen New argument 'drop'. o Kest.fft Now has '...' arguments allowing control of spatial resolution. o lppm Computation accelerated. o quadratcount.ppp Computation accelerated in some cases. o dg.test Computation accelerated. BUGS o runifpointx, rpoisppx Crashed if nsim > 1. Fixed. o triangulate.owin Results were incorrect in some special cases. Fixed. o quadrat.test, clarkevans.test In rare cases, the computed Monte Carlo p-value could have been greater than 1. This could have occurred only when nsim was an even number and when the correct p-value was equal to 1. Fixed. o linearmarkequal Result was a data frame instead of an 'fv' object. Fixed. o point-in-polygon test The function inside.owin could take a very long time to check whether points are inside a polygonal window, if the coordinates were very large numbers. This was due to numerical overflow. (Fixed??) o as.fv.kppm Crashed if the model was not fitted by minimum contrast. Fixed. o plot.fv Crashed in some obscure cases. Fixed. o collapse.fv Did not allow 'same=NULL'. Fixed. o dclf.progress, mad.progress, dg.progress, dclf.sigtrace, mad.sigtrace, dg.sigtrace The results could not be re-plotted using a plot formula, because the internal data were slightly corrupted. Fixed. o Kest.fft Result was incorrectly normalised. Fixed. o crosspairs If X and Y were identical point patterns, the result was not necessarily symmetric (on some machines) due to numerical artifacts. Fixed. o plot.fv Lines were not correctly clipped to the plot region when 'ylim' was given. Fixed. o pool.envelope The 'scale' argument was not handled correctly. Fixed. CHANGES IN spatstat VERSION 1.43-0 OVERVIEW o We thank Leanne Bischof, Christophe Biscio, Belarmain Fandohan, Andrew Hardegen, Frederic Lavancier, Tom Lawrence, Martin Maechler, Greg McSwiggan, Robin Milne, Gopalan Nair, Tuomas Rajala, Suman Rakshit, Ben Ramage, Francois Semecurbe and Ida-Maria Sintorn for contributions. o spatstat now depends on the package 'nlme'. o spatstat now depends on R 3.2.2 or later. o Simulation algorithms have been accelerated; simulation outcomes are *not* identical to those obtained from previous versions of spatstat. o Determinantal point process models. o Random-effects and mixed-effects models for replicated patterns. o Dao-Genton test, and corresponding simulation envelopes. o Simulated annealing and simulated tempering. o spatstat colour tools now handle transparent colours. o Improvements to "[" and subset() methods o Extensions to kernel smoothing on a linear network. o Support for one-dimensional smoothing kernels. o Bug fix in Metropolis-Hastings simulation. o Mark correlation function may include weights. o Cross-correlation version of the mark correlation function. o Variance calculations for replicated patterns. o Penttinen pairwise interaction model. o Contour plots with colours determined by a colour map. o New dataset: Australian states and territories. o More support for multi-dimensional point patterns. o Minor improvements and bug fixes. o Version nickname: "Mixed Effects" NEW DATASET o austates The states and large mainland territories of Australia represented as polygonal regions forming a tessellation. NEW FUNCTIONS o dppm Fit a determinantal point process model to point pattern data. o fitted.dppm, predict.dppm, intensity.dppm Predict a fitted dppm object. o logLik.dppm, AIC.dppm, extractAIC.dppm, nobs.dppm Likelihood and AIC for determinantal point process models (enabling the use of 'step') o coef.dppm, formula.dppm, print.dppm, terms.dppm, labels.dppm, model.frame.dppm, model.matrix.dppm, model.images.dppm, is.stationary.dppm, reach.dppm, unitname.dppm, unitname<-.dppm, Window.dppm Various methods for dppm objects. o parameters.dppm Extract meaningful list of model parameters o objsurf.dppm Objective function surface of a dppm object o residuals.dppm Residual measure for a dppm object. o dppBessel, dppCauchy, dppGauss, dppMatern, dppPowerExp Determinantal Point Process models. o update.dppmodel Set parameter values in a dpp model. o is.stationary.dppmodel, print.dppmodel, reach.dppmodel, valid.dppmodel Basic information about a dpp model o rdpp, simulate.dppmodel Simulation of a dpp model. o intensity.dppmodel, Kmodel.dppmodel, pcfmodel.dppmodel Moments of a dpp model o dim.dppmodel, dppapproxkernel, dppapproxpcf, dppeigen, dppfamily, dppkernel, dppparbounds, dppspecdenrange, dppspecden Helper functions for dpp models. o dclf.sigtrace, mad.sigtrace, mctest.sigtrace Significance trace of Monte Carlo test o dg.test Dao-Genton adjusted Monte Carlo goodness-of-fit test. o dg.envelope Simulation envelopes corresponding to Dao-Genton test. o dg.sigtrace Significance trace for Dao-Genton test o dg.progress Progress plot for Dao-Genton test o markcrosscorr Mark cross-correlation function for point patterns with several columns of marks o fixef.mppm, ranef.mppm Extract fixed effects and random effects from a point process model fitted to replicated point patterns. o rtemper Simulated annealing or simulated tempering. o to.opaque, to.transparent Change transparency value in colours o rgb2hsva Convert RGB to HSV data, like rgb2hsv, but preserving transparency. o superimpose.ppplist, superimpose.splitppp New methods for 'superimpose' for lists of point patterns. o dkernel, pkernel, qkernel, rkernel Probability density, cumulative probability, quantiles and random generation from distributions used in basic one-dimensional kernel smoothing. o kernel.factor Auxiliary calculations for one-dimensional kernel smoothing. o PPversion, QQversion Transformation of a summary function to its P-P or Q-Q counterpart. o spatdim Spatial dimension of any object in the spatstat package. o as.boxx Convert data to a multi-dimensional box. o intensity.ppx Method for 'intensity' for multi-dimensional space-time point patterns. o fourierbasis Evaluate Fourier basis functions in any number of dimensions. o valid New generic function, with methods valid.ppm, valid.lppm, valid.dppmodel o emend, emend.ppm, emend.lppm New generic function with methods for ppm and lppm. emend.ppm is equivalent to project.ppm o Penttinen New pairwise interaction model. o quantile.density Calculates quantiles from kernel density estimates. o CDF.density Calculates cumulative distribution function from kernel density estimates. SIGNIFICANT USER-VISIBLE CHANGES o simulation Several basic simulation algorithms have been accelerated. Consequently, simulation outcomes are not identical to those obtained with previous versions of spatstat, even when the same random seed is used. To ensure compatibility with previous versions of spatstat, revert to the slower code by setting spatstat.options(fastthin=FALSE, fastpois=FALSE). o mppm Now handles models with a random effect component. New argument 'random' is a formula specifying the random effect. o vcov.mppm Now handles models with Gibbs interactions. o [.ppp New argument 'clip' determines whether the window is clipped. o [.ppp The previously-unused argument 'drop' now determines whether to remove unused levels of a factor. o [.pp3, [.lpp, [.ppx, subset.ppp, subset.pp3, subset.lpp, subset.ppx These methods now have an argument 'drop' which determines whether to remove unused levels of a factor. o density.lpp Now supports both the 'equal-split continuous' and 'equal-split discontinuous' smoothers. New argument 'continuous' determines the choice of smoother. o envelope New argument 'scale' allows global envelopes to have width proportional to a specified function of r, rather than constant width. o dclf.test, mad.test, dclf.progress, mad.progress, mctest.progress New argument 'scale' allows summary function values to be rescaled before the comparison is performed. o dclf.test, mad.test New argument 'interpolate' supports interpolation of p-value. o dclf.progress, mad.progress, mctest.progress New argument 'interpolate' supports interpolation of critical value of test. o simulate.ppm New argument 'w' controls the window of the simulated patterns. o default.rmhcontrol, default.rmhexpand New argument 'w'. o markcorr New argument 'weights' allows computation of the weighted version of the mark correlation function. o density.lpp New argument 'kernel' specifies the smoothing kernel. Any of the standard one-dimensional smoothing kernels can be used. o contour.im New argument 'col' specifies the colour of the contour lines. If 'col' is a colour map, then the contours are drawn in different colours. o plot.ppp The default colour for the points is now a transparent grey, if this is supported by the plot device. o rgbim, hsvim New argument 'A' controls the alpha (transparency) channel. o rgb2hex, col2hex, paletteindex, is.colour, samecolour, complementarycolour, is.grey, to.grey These colour tools now handle transparent colours. o rgb2hex New argument 'maxColorValue' o to.grey New argument 'transparent'. o progressreport New argument 'state' New option: style="tk" o rLGCP This function no longer requires the package 'RandomFields' to be loaded explicitly. o kppm Fitting a model with clusters="LGCP" no longer requires the package 'RandomFields' to be loaded explicitly. o rpoispp Accelerated, when 'lambda' is a pixel image. o rthin Accelerated, when 'P' is a single number. o spatstat.options New options 'fastthin' and 'fastpois' enable fast simulation algorithms. Set these options to FALSE to reproduce results obtained with previous versions of spatstat. o split.ppp The splitting variable 'f' can now be a logical vector. o collapse.fv This is now treated as a method for the 'nlme' generic 'collapse'. Its syntax has been adjusted slightly. o diagnose.ppm, plot.diagppm New arguments col.neg, col.smooth control the colour maps. o valid.ppm This is now a method for the generic function 'valid'. o ppm.ppp, ppm.quad New argument 'emend', equivalent to 'project'. o "[<-.im" Accepts an array for 'value'. o as.im.function New argument 'strict'. o bw.ppl New argument 'weights'. o plot.mppm New argument 'se'. o dclf.test, mad.test Formal arguments 'use.theo' and 'internal' have been removed. o predict.kppm, residuals.kppm Now issues a warning when the calculation ignores the cluster/Cox component and treats the model as if it were Poisson. (This currently happens in predict.kppm when se=TRUE or interval != "none", and in residuals.kppm when type != "raw"). BUG FIXES o lpp Crashed if X was a 4-column matrix. Fixed. o plot.fv Crashed with some graphics devices, if legend=TRUE. Fixed. o effectfun Crashed if 'covname' was missing. Fixed. o rVarGamma, rMatClust, rThomas, rCauchy, rNeymanScott Crashed if 'kappa' was a function or image instead of a single number. [Spotted by Ben Ramage.] Fixed. o plot.mppm Crashed with a message about "figure margins too large" unless the argument se=FALSE was given explicitly. Fixed. o opening.owin, closing.owin Crashed sometimes, with a message about a rectangle not containing a window. Fixed. o persp.im Crashed if all pixel values were equal to zero (unless zlim was given). Fixed. o predict.ppm Crashed sometimes if the model was fitted with use.gam=TRUE. o as.linim.linfun Generated an error ('L must be a linear network') if extra arguments were given. o as.function.fv Generated an error when executed in the 'covr' package. Fixed. o rmh, simulate.ppm Results were incorrect for inhomogeneous multitype models simulated with fixall=TRUE (i.e. prescribing a fixed number of points of each type) if the model was segregated (i.e. if different types of points had different first order trend). Fixed. o dclf.progress, mad.progress Ignored the argument 'alternative'. Fixed. o $<-.hyperframe, [<-.hyperframe Result was garbled if 'value' was a hyperframe with one column. o rmh.ppm Argument 'w' was ignored in some cases. Fixed. o Hest There was an artefact at r=0 when conditional=TRUE. Fixed. o [.msr The result of M[W] where W is a window was a measure with window W, instead of intersect.owin(W, Window(M)). Fixed. o pool.envelope Did not always respect the value of 'use.theory'. Fixed. o envelope, pool.envelope If 'ginterval' was given, the results were in a slightly incorrect format. Fixed. o pool.envelope Did not check for compatible values of 'ginterval'. Fixed. CHANGES IN spatstat VERSION 1.42-2 OVERVIEW o We thank Bob Klaver and Harold-Jeffrey Ship for contributions. o Improvements to simulation of Neyman-Scott processes. o Improvements to fitting of Neyman-Scott models. o Extended functionality for pixel images. o Fitted intensity on linear network o Triangulation of windows. o Corrected an edge correction. o Bug fixes and performance improvements. o Nickname: 'Barking at Balloons' NEW FUNCTIONS o triangulate.owin Decompose a spatial window into triangles. o fitted.lppm Fitted intensity values for a point process on a linear network. SIGNIFICANT USER-VISIBLE CHANGES o rThomas, rMatClust, rCauchy, rVarGamma When the model is approximately Poisson, it is simulated using rpoispp. This avoids computations which would require huge amounts of memory. New argument 'poisthresh' controls this behaviour. o update.kppm Now handles additional arguments in any order, with or without names. Changed arguments. Improved behaviour. o kppm, clusterfit New argument 'algorithm' specifies the choice of optimisation algorithm. o kppm Left hand side of formula can now involve entries in the list 'data'. o rotmean New argument 'padzero'. Default behaviour has changed. o rose.default New argument 'weights'. o rose New arguments 'start' and 'clockwise' specify the convention for measuring and plotting angles. o padimage New argument 'W' allows an image to be padded out to fill any window. o union.owin Improved behaviour when there are more than 2 windows. o clusterset Improved behaviour. o affine.owin Allows transformation matrix to be singular, if the window is polygonal. BUG FIXES o spatstat spatstat could not be installed on some 64-bit VM systems because of an apparent bug in R. Fixed. o rThomas, rMatClust, rCauchy, rVarGamma Large values of the scale parameter could cause the algorithm to freeze or require huge amounts of memory. Fixed. o pcf, pcfinhom Crashed if the point pattern was empty. Fixed. o plot.fv Gave an error message if all 'y' values were equal, when legend=TRUE. Fixed. o rose.default Display was incorrect when unit="radian". Fixed. o Kest Ohser-Stoyan rigid motion correction (correction='rigid') was calculated incorrectly at large distances. Fixed. o summary.im Issued a warning about numerical overflow in some cases. [Spotted by Bob Klaver.] Fixed. o plot.im Sometimes warned that 'box' is not a graphical parameter. Fixed. CHANGES IN spatstat VERSION 1.42-1 OVERVIEW o We thank Andrew Hardegen, Tom Lawrence, Robin Milne, Suman Rakshit, and Brian Ripley for contributions. o Urgent bug fix. o More robust simulation of cluster processes. o Slightly accelerated. o Version nickname: 'Vogon Poetry' NEW FUNCTIONS o boundingbox.solist Method for boundingbox for lists of spatial objects. SIGNIFICANT USER-VISIBLE CHANGES o rThomas, rMatClust, rCauchy, rVarGamma, rNeymanScott New faster algorithm which is more robust against extreme values of the parameters. o rNeymanScott New argument 'nonempty' controls choice of algorithm. o solist, as.solist Accelerated. o as.list.hyperframe Accelerated. BUG FIXES o residuals.mppm Brought some computers to a grinding halt, due to the bug in solist(). Fixed. o solist, as.solist In rare cases, the format was corrupted, or the algorithm never terminated. Fixed. CHANGES IN spatstat VERSION 1.42-0 OVERVIEW o We thank Anders Bilgrau, Ute Hahn, Jack Hywood, Tuomas Rajala, Cody Schank, Olivia Semboli and Ben Taylor for contributions. o Version nickname: 'Life, The Universe and Everything' o Permutation test for difference between groups of point patterns. o Variational Bayes estimation for point process models. o Score test in anova.ppm o ROC curve, and discrimination index AUC, for fitted models. o Interactive text editor for spatial datasets. o Tools for analysing data on a tree. o Kernel density/intensity estimation on a linear network. o Random pixel noise. o Improved behaviour of polygon geometry operations. o Improved support for cluster and Cox models. o Improved basic support for owin objects. o Improved support for tessellations. o More hierarchical Gibbs interactions. o Modifications to Kest. o summary method for Cox and cluster models. o class 'listof' is almost completely replaced by 'anylist' and 'solist'. o Improvements and bug fixes. o spatstat now depends on R version 3.2.0 or later. NEW FUNCTIONS o studpermu.test Studentised permutation test for difference between groups of point patterns. Generously contributed by Ute Hahn. o AIC.kppm, extractAIC.kppm, logLik.kppm, nobs.kppm Methods for computing AIC for fitted Cox and cluster models. o transmat Convert pixel arrays between different display conventions. o roc Receiver Operating Characteristic curve. o auc Discrimination index AUC (area under the ROC curve) o edit.ppp, edit.psp, edit.im Interactive text editor works for spatial datasets. o edit.hyperframe Interactive text editor works for hyperframes. o parameters Extract all parameters from a fitted model. o density.lpp Kernel estimation of point process intensity on a linear network. o extractbranch, deletebranch, treeprune, treebranchlabels, begins Tools for analysing data on a tree. o rnoise Random pixel noise. o as.data.frame.owin Convert a window to a data frame. o harmonise.owin Convert several binary mask windows to a common pixel grid. o copyExampleFiles Copy the raw data files from an installed dataset to a chosen folder, for use in a practice exercise. o density.ppplist Method for 'density' for lists of point patterns. o inradius Radius of largest circle inside a window. o mergeLevels Merge different levels of a factor. o relevel.im, relevel.ppp, relevel.ppx Change the reference level of a factor. o simulate.profilepl simulation method for models fitted by profile maximum pseudolikelihood. o predict.rho2hat Prediction method for class rho2hat o with.msr Evaluate (an expression involving) components of a measure. o summary.kppm, print.summary.kppm, coef.summary.kppm Methods for 'summary' and 'coef(summary(..))' for Cox and cluster models. o as.im.funxy Method for as.im for class funxy. o shift.linim, scalardilate.linim, affine.linim Geometrical transformations for 'linim' objects. o Smooth.solist Smooth method for a list of spatial objects. o unitname.tess, unitname<-.tess Tessellations now keep track of the name of the unit of length. o dirichletAreas Faster algorithm for tile.areas(dirichlet(X)). o identify.lpp Method for 'identify' for point patterns on a linear network. o HierStraussHard, HierHard Hierarchical interactions for Gibbs models. o delaunayDistance, delaunayNetwork, dirichletEdges, dirichletNetwork, dirichletVertices, dirichletWeights These functions will replace delaunay.distance, delaunay.network, dirichlet.edges, dirichlet.network, dirichlet.vertices and dirichlet.weights respectively. The latter are now 'deprecated'. SIGNIFICANT USER-VISIBLE CHANGES o ppm Now supports Variational Bayes fitting method. o kppm 'AIC' and 'step' now work for kppm objects fitted using maximum Palm likelihood. o kppm The default for the weight function 'weightfun' has been changed, for better performance. o envelope envelope methods now have argument 'use.theory' specifying whether to use the 'theoretical' value of the summary function when constructing simultaneous envelopes. o anova.ppm Now performs the Score Test, for Poisson models only, if argument test="Rao" or test="score". o Kest New argument 'rmax' controls maximum value of argument 'r' o diagnose.ppm Now computes and prints the null standard deviation of the smoothed Pearson residual field, when appropriate. o nncorr, nnmean, nnvario New argument 'k' specifies k-th nearest neighbour. o quadrat.test.ppp, quadrat.test.quadratcount New argument 'lambda' supports a test of the Poisson process with given intensity 'lambda'. o clickpoly, clickbox These functions now handle graphical arguments to polygon() when drawing the resulting polygon or rectangle. o owin, as.owin, as.mask owin(mask=D) or as.owin(D) or as.mask(D) will produce a binary mask window if D is a data frame with two columns of (x,y) coordinates or a data frame with three columns containing (x,y,logical). o as.owin.data.frame W can now be a data frame with only two columns, giving the spatial coordinates of the pixels that are inside the window. o rose Tick marks now have labels showing the angle (in degrees or radians). o distcdf New argument 'regularise' determines whether values at short distances will be smoothed to avoid discretisation artefacts. o rpoislinetess Return value now has an attribute 'lines' giving the realisation of the Poisson line process. o intersect.owin, union.owin, setminus.owin New argument 'p' controls resolution of polygon clipping algorithm. o intersect.owin, union.owin Arguments may be lists of windows, of class 'solist'. Formal arguments A and B have been removed. o superimpose Now handles lists of point patterns (objects of class 'ppplist' or 'splitppp') o density.ppp New argument 'positive' allows the user to stipulate that density values must be positive (avoiding numerical errors which occasionally produce small negative values). o adaptive.density Now accepts f = 0 (uniform intensity estimate) and f = 1 (Voronoi intensity estimate) as well as 0 < f < 1. Algorithm accelerated. o rSSI Can now generate inhomogeneous patterns. o effectfun Now works for 'kppm' and 'lppm' objects as well. o integral.im, integral.msr Argument 'domain' can now be a tessellation; the integral over each tile of the tessellation is returned. o allstats, compareFit, markcorr, split.ppx, by.ppp Result is now of class 'anylist'. o by.im, density.splitppp, idw, model.images, nnmark, pixellate.ppp, predict.lppm, predict.ppm, quadratcount.splitppp, quadratresample, relrisk, Smooth.msr, split.im, tiles Result is now of class 'solist'. o split.ppp New argument 'reduce'. Result now inherits class 'ppplist' and 'solist', as well as 'splitppp' o rLGCP New argument 'nsim' allows multiple patterns to be generated. o alltypes New argument 'reuse' determines whether all simulation envelopes are based on the same set of simulated patterns, or on independent sets. o rpoispp, runifpoint New argument 'ex' makes it possible to generate a random pattern similar to an example point pattern. o effectfun Argument 'covname' is not needed if the model has only one covariate. o quadratcount Argument 'tess' can now be anything acceptable to as.tess. o tess New argument 'unitname' specifies the name of the unit of length. If it is missing, unitname information will be extracted from the other data. o intersect.tess, chop.tess, quadrats Results of these functions now have the same 'unitname' as their input. o persp.im, nnclean, plot.qqppm, plot.bw.optim These plotting functions now obey spatstat.options('monochrome') o lurking Now returns an object of class 'lurk' which has a plot method. Two-standard-deviation limits are now plotted using grey shading. o marktable New argument 'N' for studying the N nearest neighbours. New argument 'collapse' for manipulating the contingency table. o harmonise.fv Now discards columns with names which do not match. o eval.fv New argument 'equiv' can be used to declare that two columns with different names in different objects are equivalent. o quantile.ewcdf New argument 'type' controls the type of quantile. o plot.imlist New argument 'plotcommand' specifies how to plot each image. o persp.im The lower extent of the apron can now be controlled by 'zlim'. o quadscheme Argument 'method' is partially matched. o Kdot, Ldot New argument 'from' is an alternative to 'i'. o Kcross, Lcross New arguments 'from' and 'to' are alternatives to 'i' and 'j' respectively. o varblock Changed the ordering (and therefore default colours/styles) of curves in the plot, to match other functions like lohboot. o bw.diggle New argument 'nr' controls accuracy. o textureplot Now accepts a pixel image, a tessellation, or anything acceptable to as.tess. o textureplot Line spacing in legend now matches line spacing in main display. o [.tess Subset index can now be a window. o plot.tess Can now plot a text label in each tile. o plot.tess New argument 'do.plot'. o MultiHard, MultiStrauss, MultiStraussHard, HierStrauss Printed output of fitted model now respects spatstat.options('terse'). o print.ppm Reduced redundancy in output in some cases. o print.msr Responds better to spatstat.options('terse'). o print.ppm, print.fii, print.interact Irregular parameters are now printed to the number of significant figures specified by options("digits"). o square New argument 'unitname'. o plot.fv Return value is now invisible. o delaunay.distance, delaunay.network, dirichlet.edges, dirichlet.network, dirichlet.vertices These functions are now 'deprecated', and will be replaced by delaunayDistance, delaunayNetwork, dirichletEdges, dirichletNetwork and dirichletVertices respectively. o data(residualspaper) In the real datasets (Fig1 and Fig11), the name of the unit of length has now been recorded. o rLGCP This function now requires the package 'RandomFields' to be loaded explicitly by library(RandomFields) or require(RandomFields), unless model="exp". o iplot, istat These functions now require the package 'rpanel' to be loaded explicitly by library(rpanel) or require(rpanel). o ppm, quadscheme Improved calculation of Dirichlet weights. o countends New argument 'toler' controls numerical errors o diagnose.ppm Improved handling of additional graphics arguments. o pcf3est Mathematical labels changed. o plot.hyperframe Default margin spacing has been increased. BUG FIXES o Kinhom, Linhom The value of 'theo' was erroneously rescaled by a small amount, when renormalise=TRUE (the default). Fixed. o Kmark Values were erroneously rescaled. Fixed. o union.owin Strange results were sometimes obtained when taking the union of more than two windows. Fixed. o rpoispp3 Implementation was incorrect for nsim > 1. (Results may have been incorrect.) Spotted by Jack Hywood. Fixed. o as.owin.data.frame Crashed if the window was not connected. Fixed. o Frame<- Crashed when applied to a binary mask. Fixed. o rho2hat Crashed if cov1="x" and cov2="y". Fixed. o as.mask Crashed sometimes when only the argument 'xy' was given. Fixed. o ppm Crashed (rarely) when method='ho' if the simulated pattern was empty. Fixed. o istat, iplot Crashed in recent versions of rpanel. Fixed. o convexhull Crashed if applied to a 'psp' object. Fixed. o plot.ppm Crashed with message about 'variable lengths differ'. Fixed. o plot.solist Crashed when applied to a list of point patterns if some patterns had more than one column of marks. Fixed. o Smooth.ppp Crashed if applied to a point pattern with several columns of marks if some of the columns were factors. Fixed. o runifpoint3, rpoispp3 Crashed if nsim > 1. Spotted by Jack Hywood. Fixed. o hist.im Crashed if argument 'freq' was given. Fixed. o MultiStraussHard Generated misleading error messages (e.g. 'model is invalid') when arguments 'iradii' and 'hradii' did not have the same pattern of NA's. Fixed. o plot.solist Figures were sometimes aligned incorrectly when the argument 'panel.args' was given. Fixed. o scaletointerval Results sometimes fell slightly outside the desired interval due to numerical error. Fixed. o plot.solist Behaved incorrectly when plotcommand='persp'. Fixed. o "[.hyperframe" Sometimes returned an 'anylist' when it should have returned a 'solist'. Fixed. o plot.im Did not plot surrounding frame box when ribbon=FALSE. Fixed. o envelope The functions stored when savefuns=TRUE did not inherit the correct name for the unit of length. Fixed. o print.ppm, print.fii, print.interact Layout was misaligned. Fixed. o plot.plotppm Paused for input when it was not appropriate. Fixed. o plot.fv On png devices, the legend box was drawn with a white background, obscuring the main plot. Fixed. o plot.owin, plot.ppp, plot.im There was unnecessary extra space above the main title. Fixed. o plot.rho2hat Colour map ribbon was drawn but not annotated. Fixed. o density.splitppp, density.ppplist Format was out of order if se=TRUE. Fixed. o MultiStraussHard project.ppm sometimes yielded a model that was still invalid. Fixed. CHANGES IN spatstat VERSION 1.41-1 OVERVIEW o This is identical to the major release 1.41-0 except for minor bug fixes. The change log for 1.41-0 is repeated here with minor modifications. o Version nickname: 'Ides of March' o We thank Ahmed El-Gabbas, Ute Hahn, Aruna Jammalamadaka, Ian Renner, Brian Ripley, Torben Tvedebrink and Sasha Voss for contributions. o Fixed a bug causing a segmentation fault. o Standard errors for kernel estimates of intensity. o Test for segregation. o Tessellations may now have marks. o Nested splitting. o More support for cluster models. Reorganised parametrisation. o Sparse data representation of linear networks. o More support for data on a linear network. o New datasets: 'spiders' and 'dendrite'. o Improvements and bug fixes. o spatstat no longer uses Fortran. o spatstat no longer depends on the package 'scatterplot3d'. o spatstat now imports (rather than 'suggests') the Matrix package. NEW DATASETS o dendrite Dendritic spines on the dendrite network of a neuron. A point pattern on a linear network. Generously contributed by Aruna Jammalamadaka. o spiders Spider webs on the mortar lines of a brick wall. A point pattern on a linear network. Generously contributed by Sasha Voss. NEW FUNCTIONS o segregation.test Test of spatial segregation of types in a multitype point pattern. o clusterfield, clusterkernel Compute the cluster kernel (offspring density) of a cluster process model, or compute the cluster field generated by superimposing copies of the cluster kernel at specified locations. o clusterradius Compute the radius of the support of the offspring density of a cluster process model. o as.linnet.psp Convert a line segment pattern to a linear network by guessing the connectivity using a distance threshold. o iplot.linnet, iplot.lpp Methods for interactive plotting 'iplot' for objects of class lpp and linnet. o Mathematical operations are now supported for pixel images on a linear network. See help(Math.linim) o dirichlet.network, delaunay.network The linear networks formed by the Dirichlet tessellation and Delaunay triangulation. o dirichlet.edges The edges of the Dirichlet tessellation. o selfcut.psp Cut line segments where they cross each other. o vertices.linnet Extract the vertices (nodes) of the linear network. o vertexdegree Compute the degree of each vertex in a linear network. o pixellate.linnet Pixellate a linear network. o subset.hyperframe 'subset' method for class 'hyperframe'. o head.hyperframe, tail.hyperframe 'head' and 'tail' methods for hyperframes. o clickdist Measures the distance between two spatial locations clicked by the user. o solapply, anylapply wrappers for 'lapply' which return a list of class 'solist' or 'anylist'. o Kmark Weighted K-function. Identical to 'markcorrint' and will eventually replace it. o marks.tess, marks<-.tess, unmark.tess: Extract or change the marks associated with the tiles of a tessellation. o quantess Quantile tessellation: divide space into pieces which contain equal amounts of 'stuff'. o nestsplit Nested split o integral New generic function for integrating functions, with methods for 'im', 'msr', 'linim' and 'linfun'. o selfcut.psp Cut line segments where they cross each other o as.function.im Convert a pixel image to a function(x,y). o as.linnet.linim Extract the linear network from a 'linim' object. o pool.fv, pool.anylist New methods for 'pool' o Window.linnet Extract the two-dimensional window containing a linear network. SIGNIFICANT USER-VISIBLE CHANGES o linnet, lpp A linear network can now be built in 'sparse matrix' form which requires much less memory. o chicago The Chicago street crimes data are now stored in 'sparse matrix' form. To convert them to non-sparse form, use as.lpp(chicago, sparse=FALSE) o kppm The parametrisation of cluster models has been reorganised. The scale parameter is now always called 'scale'. Results should be backward-compatible. o cauchy.estK, cauchy.estpcf, matclust.estK, matclust.estpcf, thomas.estK, thomas.estpcf, vargamma.estK, vargamma.estpcf The parametrisation of cluster models has been reorganised. The scale parameter is now always called 'scale'. o plot.kppm Also plots the cluster kernel. o density.ppp New argument 'se' allows calculation of standard errors as well. o plot.pp3 Now produces a genuine perspective view. New arguments control the eye position for the perspective view. o Emark, Vmark These functions can now work with multiple columns of marks. o pixellate.psp Can now count the number of segments that intersect each pixel, instead of the total length of intersection. o linfun If g = linfun(f, L), the function f will always be called as f(x,y,seg,tp, ...) It is no longer expected to handle the case where 'seg' and 'tp' are absent. The resulting function g can now be called as g(X) where X is an lpp object, or as g(x,y) or g(x,y,seg,tp) where x,y,seg,tp are coordinates. o tess New argument 'marks' allows marks to be associated with tiles. o anova.lppm Outdated argument 'override' has been removed. o split<-.ppp Preserves the original ordering of the data, if possible. o MultiHard, MultiStrauss, MultiStraussHard, HierStrauss Zero values in the interaction radii are now treated as NA. Improved handling of missing arguments. Printed output now respects options('width') o linearKinhom, linearKcross.inhom, linearKdot.inhom, linearpcfinhom, linearpcfcross.inhom, linearpcfdot.inhom If the intensity argument lambda, lambdaI, lambdaJ, lambdadot is a fitted point process model, the model is first updated by re-fitting it to the data, before computing the fitted intensity. o solutionset The expression will be evaluated using pixel arithmetic (Math.im) if it cannot be evaluated using eval.im. o to.grey Now uses better weights for the R, G, B channels. o rVarGamma Accelerated. o summary.mppm, print.mppm These functions now respect options('width') and spatstat.options('terse'). o print.quadrattest Now respects options('width') and spatstat.options('terse'). o print.pp3 Now respects options('width') o print.lpp Now respects options('width') and options('digits'). o print.owin, print.im, print.summary.owin, print.summary.im Now respect options('width'). o nnmean Now yields a vector, instead of a 1-column matrix, when there is only a single column of marks. o pairdist.psp, crossdist.psp, nndist.psp The option 'method="Fortran"' is no longer supported. The default is 'method="C"'. o [.hyperframe: When a row of data is extracted with drop=TRUE, the result belongs to class 'anylist'. o installation of spatstat A Fortran compiler is no longer needed to compile spatstat from source. o hyperframe class The internal structure of hyperframes has changed slightly: columns of objects are now stored and returned as lists of class 'anylist' or 'solist'. There should be no change in behaviour. o datasets Internal format of the datasets bdspots, bei, clmfires, demohyper, flu, gorillas, heather, Kovesi, murchison, osteo, pyramidal, waterstriders has changed slightly to use the classes 'anylist' and 'solist'. There should be no change in behaviour. o K3est New argument 'ratio'. o spatstat.options New option 'par.points3d' sets default arguments for plot.pp3. o diagnose.ppm New arguments 'xlab', 'ylab', 'rlab' determine the labels in the 4-panel plot, and new argument 'outer' controls their position. The confusing default value for 'compute.sd' has been changed. o iplot.layered New argument 'visible' controls which layers are initially visible. o plot.lpp New argument 'show.window' controls whether to plot the containing window. o textstring Any number of spatial locations (x,y) can be specified, with a corresponding vector of text strings. o plot.hyperframe New argument 'mar' o plot.linnet New argument 'do.plot' o summary.hyperframe Improved output. o eval.linim Improved scoping rules. o pixellate.owin Accelerated. o summary.linnet Now prints more information, and respects options('digits'). o rmpoispp, rmpoint The vector of possible types of points will default to the 'names' vector of the argument 'lambda', 'n', or 'f' where appropriate. o rpoislpp Argument 'L' can be omitted when lambda is a 'linim' or 'linfun' o simulate.ppm, simulate.kppm, simulate.lppm, simulate.slrm New argument 'drop': if nsim = 1 and drop=TRUE, the result is a point pattern rather than a list containing one point pattern. o runifdisc, runifpoint, rpoint, rpoispp, rmpoint, rmpoispp, rMaternI, rMaternII, rSSI, rPoissonCluster, rGaussPoisson, rstrat, rsyst, rcell, rthin, rNeymanScott, rMatClust, rThomas, rCauchy, rVarGamma, rpoispp3, runifpoint3 New argument 'drop': if nsim = 1 and drop=TRUE, the result is a point pattern rather than a list containing one point pattern. o spatstat.options New option 'units.paren' controls the type of parenthesis enclosing the explanatory text about the unit of length, in print.ppm, plot.fv, etc. o closepairs, crosspairs New option: what="ijd" returns only the indices i, j and the distance d o rCauchy, rMatClust, rNeymanScott, rPoissonCluster, rThomas, rVarGamma Argument names have changed. BUG FIXES o sumouter A segmentation fault could occur if any data were NA. Fixed. o simulate.kppm Simulation failed for log-Gaussian Cox processes (in simulate.kppm only) with an error message from the RandomFields package. Fixed. o ppm, predict.ppm, profilepl Crashed sometimes with message "interaction evaluator did not return a matrix". Fixed. o lppm step() did not work correctly on 'lppm' objects. Fixed. o quadscheme If quadscheme() was called explicitly, with the stipulated number of tiles exceeding the number of dummy points given, then the quadrature weights were sometimes vastly inflated - total quadrature weight was much larger than window area. Spotted by Ian Renner. Fixed. o predict.rhohat Result was incorrect for data on a non-rectangular window (and a warning was issued about incorrect vector length). Fixed. o Math.im Unary operators did not work (e.g."-x") Fixed. o density.ppp Crashed when at="points" if the dataset had exactly 1 point. Fixed. o rSSI Crashed if nsim > 1. Fixed. o influence.ppm, leverage.ppm, dfbetas.ppm Crashed or issued a warning if any quadrature points had conditional intensity zero under the model (negative infinite values of the sufficient statistic). Fixed. o clickppp, clickpoly Did not work correctly in the RStudio display device. Fixed. o Iest Ignored the arguments 'r' and 'eps'. Fixed. o markvario Result was garbled, when X had more than one column of marks. Fixed. o rMatClust, rVarGamma, rCauchy, rNeymanScott Result was a list, but not a 'solist', when nsim > 1. Fixed. o print.mppm, summary.mppm, subfits Crashed if a Poisson interaction was implied but not given explicitly. Fixed. o Kest Crashed if ratio=TRUE and the window was a rectangle. Fixed. o anova.ppm Crashed sometimes with message 'models were not all fitted to the same size of dataset'. (This occurred if there were quadrature points with conditional intensity equal to zero in some models but not in all models.) Fixed. o vcov.kppm Occasionally ran out of memory. Fixed. o as.linim.linfun Erroneously converted the pixel values to numeric values. Fixed. o as.owin.layered Ignored layers with zero area. Fixed. o plot.ppm Paused the plot between frames even when there was only one frame. Fixed. o plot.layered Did not allocate space for legends of 'lpp' objects. Fixed. o plot.lpp Ignored symbolmap arguments like 'cex' and confused the arguments 'col' and 'cols'. Fixed. o plot.diagppm Ignored add=TRUE in some cases. Fixed. o iplot.layered Did not handle 'diagramobj' objects correctly. Fixed. o plot.yardstick Changed arguments. CHANGES IN spatstat VERSION 1.41-0 OVERVIEW o We thank Ahmed El-Gabbas, Ute Hahn, Aruna Jammalamadaka, Ian Renner, Brian Ripley, Torben Tvedebrink and Sasha Voss for contributions. o Fixed a bug causing a segmentation fault. o Standard errors for kernel estimates of intensity. o Test for segregation. o Tessellations may now have marks. o Nested splitting. o More support for cluster models. Reorganised parametrisation. o Sparse data representation of linear networks. o More support for data on a linear network. o New datasets: 'spiders' and 'dendrite'. o Improvements and bug fixes. o spatstat no longer uses Fortran. o spatstat no longer depends on the package 'scatterplot3d'. o spatstat now imports (rather than 'suggests') the Matrix package. o Nickname: 'Team Australia' NEW DATASETS o dendrite Dendritic spines on the dendrite network of a neuron. A point pattern on a linear network. Generously contributed by Aruna Jammalamadaka. o spiders Spider webs on the mortar lines of a brick wall. A point pattern on a linear network. Generously contributed by Sasha Voss. NEW FUNCTIONS o segregation.test Test of spatial segregation of types in a multitype point pattern. o clusterfield, clusterkernel Compute the cluster kernel (offspring density) of a cluster process model, or compute the cluster field generated by superimposing copies of the cluster kernel at specified locations. o clusterradius Compute the radius of the support of the offspring density of a cluster process model. o as.linnet.psp Convert a line segment pattern to a linear network by guessing the connectivity using a distance threshold. o iplot.linnet, iplot.lpp Methods for interactive plotting 'iplot' for objects of class lpp and linnet. o Mathematical operations are now supported for pixel images on a linear network. See help(Math.linim) o dirichlet.network, delaunay.network The linear networks formed by the Dirichlet tessellation and Delaunay triangulation. o dirichlet.edges The edges of the Dirichlet tessellation. o selfcut.psp Cut line segments where they cross each other. o vertices.linnet Extract the vertices (nodes) of the linear network. o vertexdegree Compute the degree of each vertex in a linear network. o pixellate.linnet Pixellate a linear network. o subset.hyperframe 'subset' method for class 'hyperframe'. o head.hyperframe, tail.hyperframe 'head' and 'tail' methods for hyperframes. o clickdist Measures the distance between two spatial locations clicked by the user. o solapply, anylapply wrappers for 'lapply' which return a list of class 'solist' or 'anylist'. o Kmark Weighted K-function. Identical to 'markcorrint' and will eventually replace it. o marks.tess, marks<-.tess, unmark.tess: Extract or change the marks associated with the tiles of a tessellation. o quantess Quantile tessellation: divide space into pieces which contain equal amounts of 'stuff'. o nestsplit Nested split o integral New generic function for integrating functions, with methods for 'im', 'msr', 'linim' and 'linfun'. o selfcut.psp Cut line segments where they cross each other o as.function.im Convert a pixel image to a function(x,y). o as.linnet.linim Extract the linear network from a 'linim' object. o pool.fv, pool.anylist New methods for 'pool' o Window.linnet Extract the two-dimensional window containing a linear network. SIGNIFICANT USER-VISIBLE CHANGES o linnet, lpp A linear network can now be built in 'sparse matrix' form which requires much less memory. o chicago The Chicago street crimes data are now stored in 'sparse matrix' form. To convert them to non-sparse form, use as.lpp(chicago, sparse=FALSE) o kppm The parametrisation of cluster models has been reorganised. The scale parameter is now always called 'scale'. Results should be backward-compatible. o cauchy.estK, cauchy.estpcf, matclust.estK, matclust.estpcf, thomas.estK, thomas.estpcf, vargamma.estK, vargamma.estpcf The parametrisation of cluster models has been reorganised. The scale parameter is now always called 'scale'. o plot.kppm Also plots the cluster kernel. o density.ppp New argument 'se' allows calculation of standard errors as well. o plot.pp3 Now produces a genuine perspective view. New arguments control the eye position for the perspective view. o Emark, Vmark These functions can now work with multiple columns of marks. o pixellate.psp Can now count the number of segments that intersect each pixel, instead of the total length of intersection. o linfun If g = linfun(f, L), the function f will always be called as f(x,y,seg,tp, ...) It is no longer expected to handle the case where 'seg' and 'tp' are absent. The resulting function g can now be called as g(X) where X is an lpp object, or as g(x,y) or g(x,y,seg,tp) where x,y,seg,tp are coordinates. o tess New argument 'marks' allows marks to be associated with tiles. o anova.lppm Outdated argument 'override' has been removed. o split<-.ppp Preserves the original ordering of the data, if possible. o MultiHard, MultiStrauss, MultiStraussHard, HierStrauss Zero values in the interaction radii are now treated as NA. Improved handling of missing arguments. Printed output now respects options('width') o linearKinhom, linearKcross.inhom, linearKdot.inhom, linearpcfinhom, linearpcfcross.inhom, linearpcfdot.inhom If the intensity argument lambda, lambdaI, lambdaJ, lambdadot is a fitted point process model, the model is first updated by re-fitting it to the data, before computing the fitted intensity. o solutionset The expression will be evaluated using pixel arithmetic (Math.im) if it cannot be evaluated using eval.im. o to.grey Now uses better weights for the R, G, B channels. o rVarGamma Accelerated. o summary.mppm, print.mppm These functions now respect options('width') and spatstat.options('terse'). o print.quadrattest Now respects options('width') and spatstat.options('terse'). o print.pp3 Now respects options('width') o print.lpp Now respects options('width') and options('digits'). o print.owin, print.im, print.summary.owin, print.summary.im Now respect options('width'). o nnmean Now yields a vector, instead of a 1-column matrix, when there is only a single column of marks. o pairdist.psp, crossdist.psp, nndist.psp The option 'method="Fortran"' is no longer supported. The default is 'method="C"'. o [.hyperframe: When a row of data is extracted with drop=TRUE, the result belongs to class 'anylist'. o installation of spatstat A Fortran compiler is no longer needed to compile spatstat from source. o hyperframe class The internal structure of hyperframes has changed slightly: columns of objects are now stored and returned as lists of class 'anylist' or 'solist'. There should be no change in behaviour. o datasets Internal format of the datasets bdspots, bei, clmfires, demohyper, flu, gorillas, heather, Kovesi, murchison, osteo, pyramidal, waterstriders has changed slightly to use the classes 'anylist' and 'solist'. There should be no change in behaviour. o K3est New argument 'ratio'. o spatstat.options New option 'par.points3d' sets default arguments for plot.pp3. o diagnose.ppm New arguments 'xlab', 'ylab', 'rlab' determine the labels in the 4-panel plot, and new argument 'outer' controls their position. The confusing default value for 'compute.sd' has been changed. o iplot.layered New argument 'visible' controls which layers are initially visible. o plot.lpp New argument 'show.window' controls whether to plot the containing window. o textstring Any number of spatial locations (x,y) can be specified, with a corresponding vector of text strings. o plot.hyperframe New argument 'mar' o plot.linnet New argument 'do.plot' o summary.hyperframe Improved output. o eval.linim Improved scoping rules. o pixellate.owin Accelerated. o summary.linnet Now prints more information, and respects options('digits'). o rmpoispp, rmpoint The vector of possible types of points will default to the 'names' vector of the argument 'lambda', 'n', or 'f' where appropriate. o rpoislpp Argument 'L' can be omitted when lambda is a 'linim' or 'linfun' o simulate.ppm, simulate.kppm, simulate.lppm, simulate.slrm New argument 'drop': if nsim = 1 and drop=TRUE, the result is a point pattern rather than a list containing one point pattern. o runifdisc, runifpoint, rpoint, rpoispp, rmpoint, rmpoispp, rMaternI, rMaternII, rSSI, rPoissonCluster, rGaussPoisson, rstrat, rsyst, rcell, rthin, rNeymanScott, rMatClust, rThomas, rCauchy, rVarGamma New argument 'drop' o spatstat.options New option 'units.paren' controls the type of parenthesis enclosing the explanatory text about the unit of length, in print.ppm, plot.fv, etc. o closepairs, crosspairs New option: what="ijd" returns only the indices i, j and the distance d o rCauchy, rMatClust, rNeymanScott, rPoissonCluster, rThomas, rVarGamma Argument names have changed. BUG FIXES o sumouter A segmentation fault could occur if any data were NA. Fixed. o simulate.kppm Simulation failed for log-Gaussian Cox processes (in simulate.kppm only) with an error message from the RandomFields package. Fixed. o ppm, predict.ppm, profilepl Crashed sometimes with message "interaction evaluator did not return a matrix". Fixed. o lppm step() did not work correctly on 'lppm' objects. Fixed. o quadscheme If quadscheme() was called explicitly, with the stipulated number of tiles exceeding the number of dummy points given, then the quadrature weights were sometimes vastly inflated - total quadrature weight was much larger than window area. Spotted by Ian Renner. Fixed. o predict.rhohat Result was incorrect for data on a non-rectangular window (and a warning was issued about incorrect vector length). Fixed. o Math.im Unary operators did not work (e.g."-x") Fixed. o density.ppp Crashed when at="points" if the dataset had exactly 1 point. Fixed. o rSSI Crashed if nsim > 1. Fixed. o influence.ppm, leverage.ppm, dfbetas.ppm Crashed or issued a warning if any quadrature points had conditional intensity zero under the model (negative infinite values of the sufficient statistic). Fixed. o clickppp, clickpoly Did not work correctly in the RStudio display device. Fixed. o Iest Ignored the arguments 'r' and 'eps'. Fixed. o markvario Result was garbled, when X had more than one column of marks. Fixed. o rMatClust, rVarGamma, rCauchy, rNeymanScott Result was a list, but not a 'solist', when nsim > 1. Fixed. o print.mppm, summary.mppm, subfits Crashed if a Poisson interaction was implied but not given explicitly. Fixed. o Kest Crashed if ratio=TRUE and the window was a rectangle. Fixed. o anova.ppm Crashed sometimes with message 'models were not all fitted to the same size of dataset'. (This occurred if there were quadrature points with conditional intensity equal to zero in some models but not in all models.) Fixed. o vcov.kppm Occasionally ran out of memory. Fixed. o as.linim.linfun Erroneously converted the pixel values to numeric values. Fixed. o as.owin.layered Ignored layers with zero area. Fixed. o plot.ppm Paused the plot between frames even when there was only one frame. Fixed. o plot.layered Did not allocate space for legends of 'lpp' objects. Fixed. o plot.lpp Ignored symbolmap arguments like 'cex' and confused the arguments 'col' and 'cols'. Fixed. o plot.diagppm Ignored add=TRUE in some cases. Fixed. o iplot.layered Did not handle 'diagramobj' objects correctly. Fixed. o plot.yardstick Changed arguments. CHANGES IN spatstat VERSION 1.40-0 OVERVIEW o We thank Markus Herrmann, Peter Kovesi, Andrew Lister, Enrique Miranda, Tuomas Rajala, Brian Ripley, Dominic Schuhmacher and Maxime Woringer for contributions. o Important bug fixes. o Mathematical operators now apply to images. o Parametric estimates of relative risk from fitted point process models. o Standard errors for relative risk (parametric and non-parametric). o Kernel smoothing and rose diagrams for angular data. o Perceptually uniform colour maps. o Hierarchical interactions for multitype patterns. o Hard core parameters in all interactions no longer need to be specified and will be estimated from data. o Improvements to analysis of deviance and model selection. o New datasets. o New vignette, summarising all datasets installed with spatstat. o Tests and diagnostics now include a Monte Carlo option. o Faster checking of large datasets. o Faster simulations. o Code for drawing diagrams (arrows, scale bars). o Version nickname: 'Do The Maths' NEW DATASETS o bdspots Breakdown spots on microelectronic capacitor electrodes. Generously contributed by Prof Enrique Miranda. o Kovesi Colour maps with perceptually uniform contrast. Generously contributed by Peter Kovesi. NEW FUNCTIONS o Mathematical operations are now supported for images. For example: alpha <- atan(bei.extra$grad) * 180/pi See help(Math.im) o relrisk.ppm Spatially-varying probabilities of different types of points predicted by a fitted point process model. o circdensity Kernel density estimate for angular data o rose Rose diagram (rose of directions) for angular data o nnorient Nearest neighbour orientation distribution. o AIC.ppm Calculate AIC of a Gibbs model using Takeuchi's rule. o interp.colours Interpolate a sequence of colour values. o anyDuplicated.ppp, anyDuplicated.ppx Fast replacements for any(duplicated(x)) for point patterns. o textstring, onearrow, yardstick Objects representing a text string, an arrow, or a scale bar, for use in drawing spatial diagrams. o plot.imlist, image.imlist, contour.imlist Methods for the new class 'imlist' o [<-.layered, [[<-.layered More support for class 'layered' SIGNIFICANT USER-VISIBLE CHANGES o (vignettes) New vignette 'datasets' summarises all the datasets installed with the spatstat package. o relrisk The function relrisk is now generic, with methods for ppp and ppm. New argument 'relative' specifies whether to calculate the relative risk or the absolute probability of each type of point. New argument 'se' specifies whether to calculate standard errors. o plot.im The default colour map for plotting images, specified by spatstat.options('image.colfun'), has been changed to a perceptually uniform map. o DiggleGratton, Fiksel, MultiHard, MultiStraussHard The hard core distance parameters in these models can now be omitted by the user, and will be estimated automatically from data (by the 'self-starting' feature of interactions). This was already true of Hardcore and StraussHard. o Hybrid Hybrid models now apply the 'self-starting' feature to each component model. o anova.ppm Can now reconcile models fitted using different dummy points, different values of 'rbord', different values of 'use.gam', etc. o profilepl New argument 'aic' makes it possible to optimise the parameters by minimising AIC. o profilepl No longer requires values for parameters which are 'optional' (such as the hard core distance). o rmh, simulate.ppm, rmh.ppm, rmh.default The Metropolis-Hastings algorithm now starts by deleting any points in the initial state that are 'illegal' (i.e. whose conditional intensity is equal to zero). This ensures that the result of rmh never contains illegal points. o runifpoint, rpoispp, rStrauss, rHardcore, rStraussHard, rDiggleGratton, rDGS, runifdisc, rpoint, rMaternI, rMaternII, rSSI, rPoissonCluster, rGaussPoisson, rstrat, rsyst, rcell, rthin, rjitter, rNeymanScott, rMatClust, rThomas, rCauchy, rVarGamma, rmpoint, rmpoispp, runifpointOnLines, rpoisppOnLines, runiflpp, rpoislpp, runifpointx, rpoisppx, runifpoint3, rpoispp3 These random point pattern generators now have an argument 'nsim' specifying the number of simulated realisations to be generated. o pairorient New argument 'cumulative'. New algorithm to compute kernel estimate of probability density. Default behaviour changed. Argument 'units' has been renamed 'unit' for consistency. Labels and descriptions of columns have been corrected. o predict.ppm New syntax (backward-compatible). New argument 'se' replaces option 'type="se"'. Old argument 'total' is deprecated: use 'window' and set 'type="count"'. o cdf.test The methods for class 'ppm' and 'lppm' now handle Gibbs models and perform a Monte Carlo test in this case. o lurking, diagnose.ppm Lurking variable plot can now include simulation envelopes. o rmh.ppm New argument 'w' determines the window in which the simulated pattern is generated. o ppp Accelerated. o Gcom, Gres When conditional=TRUE and restrict=TRUE, the Hanisch estimate was not calculated exactly as described in Appendix E.1 of Baddeley, Rubak and Moller (2011). The intensity was estimated on the full window rather than the eroded window. Fixed. o step, drop1, add1, extractAIC The AIC of a Gibbs model is now calculated using Takeuchi's rule for the degrees of freedom. o model.matrix.ppm, model.matrix.kppm New argument 'Q' allows prediction at any desired locations. o vcov.ppm New argument 'fine' gives more control over computation. o predict.ppm For multitype models, when the result is a list of images, the names of list entries are now identical to the mark levels (e.g. "hickory" instead of "markhickory") o print.slrm Output now respects options('width') o image.listof New argument 'ribmar' controls margin space around the ribbon when equal.ribbon=TRUE. o integral.im New argument 'domain' specifies the domain of integration. o plot.fasp New argument 'transpose' allows rows and columns to be exchanged. o plot.im The list 'ribargs' can now include the parameter 'labels'. o rmh, rpoint, rpoispp, rmpoint, rmpoispp Accelerated, for inhomogeneous processes. o stienen Now recognises the parameter 'lwd'. o suffstat Accelerated (also affects ppm with method='ho'). o Poisson, AreaInter, BadGey, Concom, DiggleGatesStibbard, DiggleGratton, Fiksel, Geyer, Hardcore, Hybrid, LennardJones, MultiHard, MultiStrauss, MultiStraussHard, OrdThresh, Ord, PairPiece, Pairwise, SatPiece, Saturated, Softcore, Strauss, StraussHard, Triplets These functions can now be printed (by typing the function name) to give a sensible description of the required syntax. o fitin A plot of the fitted interpoint interaction of a point process model e.g. plot(fitin(ppm(swedishpines ~ 1, Strauss(9)))) now shows the unit of length on the x-axis. o fitin Plots of the fitted interpoint interaction are now possible for some higher-order interactions such as Geyer and AreaInter. o anova.ppm New argument 'warn' to suppress warnings. o rmhmodel.ppm Argument 'win' renamed 'w' for consistency with other functions. o print.ppm Printed output for the fitted regular parameters now respects options('digits'). o print.ppm, print.summary.ppm Output now respects options('width') and spatstat.options('terse') o print.ppm By default, standard errors are not printed for a model fitted with method="logi" (due to computational load) o plot.profilepl Now recognises 'lty', 'lwd', 'col' etc o vesicles, gorillas Some of the raw data files for these datasets are also installed in spatstat for demonstration and training purposes. BUG FIXES o rmh, rmh.ppm, rmh.default, simulate.ppm The result of simulating a model with a hard core did not necessarily respect the hard core constraint, and simulation of a model with strong inhibition did not necessarily converge. This only happened if the first order trend was large, the starting state (n.start or x.start) was not given, and the number of iterations (nrep) was not very large. It occurred because of a poor choice for the default starting state. Bug was present since about 2010. Fixed. o markcorrint Results were completely incorrect. Bug introduced in spatstat 1.39-0, october 2014. Fixed. o Kinhom Ignored argument 'reciplambda2' in some cases. Bug introduced in spatstat 1.39-0, october 2014. Fixed. o relrisk When at="pixels", a small fraction of pixel values were sometimes wildly inaccurate, due to numerical errors. This affected the range of values in the result, and therefore the appearance of plots. Fixed. o model.images Crashed if the model was multitype. Fixed. o profilepl Crashed in some cases when the interaction was multitype. [Spotted by Andrew Lister.] Fixed. o profilepl Crashed if the model involved covariates that were not given in a 'data' argument. Fixed. o envelope.ppm Crashed if global=TRUE and savefuns=TRUE. Fixed. o setminus.owin Crashed if the result was empty and the input was polygonal. Fixed. o predict.ppm Crashed sometimes when type="cif" and ngrid was very large. Fixed. o pixelquad If X was a multitype point pattern, the result was mangled. Fixed. o relrisk Did not accept a character string value for the argument 'case'. Fixed. o intensity.ppm Format of result was incorrect for ppm(Y ~ 1) where Y is multitype. Fixed. o $<-.hyperframe Columns containing character values were converted to factors. Fixed. o clickppp Sometimes filled the window in solid black colour.. Fixed. o plot.psp Ignored 'show.all' in some cases. Fixed. o plot.ppp Warned about NA values amongst the marks, even if there were no NA's in the column(s) of marks selected by the argument 'which.marks'. Fixed. o stienen Did not suppress the border circles when border=FALSE. Fixed. CHANGES IN spatstat VERSION 1.39-1 OVERVIEW o Urgent bug fix. o We thank Suman Rakshit and Brian Ripley for contributions. BUG FIXES o bdry.mask, convexhull In R-devel only, these functions could return an empty window, causing errors in other packages. [Spotted by Brian Ripley.] Fixed. o project2segment An error occurred if any line segments had length zero. [Spotted by Suman Rakshit.] Fixed. CHANGES IN spatstat VERSION 1.39-0 OVERVIEW o We thank Shane Frank, Shaaban Ghalandarayeshi, Ute Hahn, Mahdieh Khanmohammadi, Nicoletta Nava, Jens Randel Nyengaard, Sebastian Schutte, Rasmus Waagepetersen and Carl G. Witthoft for contributions. o ANOVA extended to Gibbs models. o Improved algorithm for locally-scaled K-function. o Leave-one-out calculation of fitted values in ppm objects. o New dataset: presynaptic vesicles. o Geometrical operations with windows and images. o More edge corrections for K-function. o Improved handling and plotting of 'fv' objects. o Utilities for perspective views of surfaces. o New classes 'anylist', 'solist' will ultimately replace 'listof'. o Bug fixes. o Version nickname: 'Smoke and Mirrors' NEW DATASETS o vesicles Synaptic vesicles (includes raw data files for training purposes) NEW CLASSES o anylist List of objects. (A replacement for 'listof') o solist List of two-dimensional spatial objects. (A replacement for some uses of 'listof') NEW FUNCTIONS o perspPoints, perspLines, perspSegments, perspContour Draw points and lines on a surface, as seen in perspective view. o hopskel.test Hopkins-Skellam test of CSR o project2set For each data point in a point pattern X, find the nearest spatial location in a given window W. o stienen, stienenset Stienen diagrams o dirichlet.vertices Vertices of the Dirichlet tessellation o discs Union of discs. Given a point pattern dataset recording the locations and diameters of objects, find the region covered by the objects. o increment.fv Increments of a summary function: g(x) = f(x+h)-f(x-h). o rotmean Rotational average of pixel values in an image o fardist Distance from each pixel/data point to farthest part of window boundary o circumradius.owin Circumradius of a window o rmax.Trans, rmax.Ripley Compute the maximum distance 'r' for which the translation edge correction and isotropic edge correction are valid. o is.grey Determines whether a colour value is a shade of grey. o harmonise Convert several objects of the same class to a common format. (New generic function with existing methods harmonise.im and harmonise.fv) o area New generic function, with methods for 'owin' and 'default'. o Fhazard Hazard rate of empty space function o anylist, as.anylist, [.anylist, [<-.anylist, print.anylist, summary.anylist Support for new class 'anylist' o solist, as.solist, [.solist, [<-.solist, print.solist, summary.solist Support for new class 'solist' o plot.anylist, plot.solist Plot methods for the classes 'anylist' and 'solist' (Currently identical to 'plot.listof') SIGNIFICANT USER-VISIBLE CHANGES o anova.ppm Now applies to Gibbs models as well as Poisson models, using adjusted composite likelihood ratio test statistic. o persp.im If visible=TRUE, the algorithm will also calculate which pixels of x are visible in the perspective view. This is useful for drawing points or lines on a perspective surface. o Kscaled Improved algorithm [thanks to Ute Hahn.] New arguments 'renormalise' and 'normpower' allow renormalisation of intensity, similar to Kinhom. o Kest New option: correction="rigid" computes the rigid motion correction. o pairwise interactions Fitted parameters and other calculations for pairwise interaction models DiggleGatesStibbard, DiggleGratton, Fiksel, Geyer, Strauss may change slightly due to a change in handling numerical rounding effects. o eval.fv Functions no longer need to have exactly the same sequence of 'r' values. They will now be made compatible using 'harmonise.fv'. o fitted.ppm New argument 'leaveoneout' allows leave-one-out calculation of fitted intensity at original data points. o Kinhom, Linhom New argument 'leaveoneout' specifies whether the leave-one-out rule should be applied when calculating the fitted intensities. o crosspaircounts Results may change slightly due to a change in handling numerical rounding effects. o Fest, Gest New argument 'domain' supports bootstrap methods. o plot.fv New argument 'mathfont' determines the font (e.g. plain, italic, bold) for mathematical expressions on the axes and in the legend. Defaults to italic. o scanpp Upgraded to handle multiple columns of mark data. o circumradius The function 'circumradius' is now generic, with methods for the classes 'owin' and 'linnet'. o edge.Trans New argument 'give.rmax' o fvnames, plot.fv The symbol '.a' is now recognised. It stands for 'all function values'. o as.function.fv Argument 'extrapolate' can have length 1 or 2. o varblock New argument 'confidence' determines the confidence level. o $<-.fv This can now be used to add an extra column to an 'fv' object (previously it refused). o minnndist, maxnndist New argument 'positive'. If TRUE, coincident points are ignored: the nearest-neighbour distance of a point is the distance to the nearest point that does not coincide with the current point. o plot.fv Improved handling of 'shade' argument. o Kmeasure Now passes '...' arguments to as.mask() o Ksector Now allows start < 0. New arguments 'units' and 'domain'. o pairorient New arguments 'units' and 'domain'. o eroded.areas New argument 'subset' o disc New argument 'delta' o plot.plotppm New argument 'pppargs' o harmonise.fv, harmonise.im These are now methods for the new generic 'harmonise' o Fest, Gest These functions now also compute the theoretical value of hazard for a Poisson process, if correction = "km". o with.fv Improved mathematical labels. o Gfox, Jfox Improved mathematical labels. o area.owin This function is now a method for the new generic 'area' o edges Default for argument 'check' changed to FALSE. BUG FIXES o varblock Calculations were incorrect if more than one column of edge corrections was computed. [Bug introduced in spatstat 1.21-1, november 2010.] Fixed. o varblock Crashed if one of the quadrats contained no data points. Fixed. o lohboot Interval was calculated wrongly when global=TRUE and fun="Lest" or "Linhom". Fixed. o nnmark Crashed when at="points" if there was only a single column of marks. [Spotted by Shane Frank.] Fixed. o plot.msr Some elements of the plot were omitted or cut off. Fixed. o plot.msr Did not work with 'equal.scales=TRUE'. Fixed. o plot.msr, augment.msr Crashed if every data point was duplicated. Fixed. o as.im.owin Crashed if X was a 1 x 1 pixel array. Fixed. o owin Coordinates of polygon data were altered slightly when fix=TRUE. [Spotted by Carl Witthoft.] Fixed. o objects of class 'fv' Assigning a new value to names(x) or colnames(x) or dimnames(x) would cause the internal data format to become corrupted. Fixed. o to.grey, complementarycolour Did not work properly on 'colourmap' objects. Fixed. o Kest Ignored argument 'var.approx' if the window was a rectangle. Fixed. o rmh.ppm, rmhmodel.ppm Ignored the argument 'new.coef'. [Spotted by Sebastian Schutte] Fixed. o as.function.fv The meanings of 'extrapolate=TRUE' and 'extrapolate=FALSE' were swapped. Fixed. o varblock Handled the case 'fun=Lest' incorrectly. Fixed. o [.fv Sometimes garbled the internal data format, causing plot.fv to crash. Fixed. o range.fv Sometimes returned NA even when na.rm=TRUE. Fixed. o Fest Argument 'eps' was not interpreted correctly. Fixed. o plot.fv Argument 'lwd' was not passed to legend() o flipxy.owin Sometimes deleted the name of the unit of length. Fixed. CHANGES IN spatstat VERSION 1.38-1 OVERVIEW o We thank Ute Hahn and Xavier Raynaud for contributions. o Urgent Bug Fixes. o Nickname: 'Le Hardi' NEW FUNCTIONS o "[<-.fv", "$<-.fv" Subset replacement methods for 'fv' objects. SIGNIFICANT USER-VISIBLE CHANGES o clarkevans.test Simulations are now performed with a fixed number of points. o plot.owin, plot.ppp, plot.psp, plot.im The default size of the outer margin of white space has been reduced. o dclf.test Improved information in printed output. BUG FIXES o update.ppm Results were incorrect in several cases. [Spotted by Xavier Raynaud.] Bug introduced in spatstat 1.38-0. Fixed. o Kinhom, Linhom Calculations were incorrect if 'lambda' was a fitted point process model. [Spotted by Xavier Raynaud.] Bug introduced in spatstat 1.38-0. Fixed. o envelope.envelope Ignored the arguments 'global' and 'VARIANCE'. Fixed. o fv objects If 'f' was an object of class 'fv', then an assignment like f$name <- NULL mangled the internal format of the object 'f', leading to errors in print.fv and plot.fv. [Spotted by Ute Hahn.] Fixed. o split.ppp split(X, A) where A is a rectangular tessellation, produced errors if the window of 'A' did not include the window of 'X'. [Spotted by Ute Hahn.] Fixed. o names<-.hyperframe Mangled the internal format. [Spotted by Ute Hahn.] Fixed. o plot.fv y axis label was incorrect in some cases when the 'fv' object had only a single column of function values. [Spotted by Ute Hahn.] Fixed. CHANGES IN spatstat VERSION 1.38-0 OVERVIEW o We thank Malissa Baddeley, Colin Beale, Oscar Garcia, Daniel Esser, David Ford, Eric Gilleland, Andrew Hardegen, Philipp Hunziker, Abdollah Jalilian, Tom Lawrence, Lore De Middeleer, Robin Milne, Mike Porter, Suman Rakshit, Pablo Ramon, Jason Rudokas, Christopher Ryan, Dominic Schuhmacher, Medha Uppala and Rasmus Waagepetersen for contributions. o spatstat now Requires the package 'goftest' and Suggests the package 'Matrix'. o New dataset: 'sporophores' o Palm likelihood method for fitting cluster processes and Cox processes. o Quasi-likelihood and weighted composite likelihood methods for estimating trend in cluster processes and Cox processes. o Further extensions to model formulas in ppm and kppm. o Faster variance calculations for ppm objects. o One-sided tests and one-sided envelopes of summary functions. o Cramer-Von Mises and Anderson-Darling tests of spatial distribution. o Cressie-Read test statistic in quadrat counting tests. o Spatial cumulative distribution functions. o Faster algorithm for point pattern matching. o Improvements to plots. o Increased support for envelopes. o New generic functions 'Window', 'Frame' and 'domain'. o Directional K-function and directional distribution. o Raster calculations accelerated. o Summary functions accelerated. o Many improvements and bug fixes. o Version nickname: 'Wicked Plot' NEW DATASETS o sporophores Spatial pattern of three species of mycorrhizal fungi around a tree. [Contributed by E. David Ford.] NEW FUNCTIONS o improve.kppm Re-estimate the trend in a kppm (cluster or Cox) model using quasi-likelihood or weighted first-order composite likelihood. [Contributed by Abdollah Jalilian and Rasmus Waagepetersen.] o Window, Window<- Generic functions to extract and change the window of a spatial object in two dimensions. Methods for ppp, psp, im, and many others. o Frame, Frame<- Generic functions to extract and change the containing rectangle ('frame') of a spatial object in two dimensions. o domain Generic function to extract the spatial domain of a spatial object in any number of dimensions. o Ksector Directional version of the K-function. o pairorient Point pair orientation distribution. o spatialcdf Compute the spatial cumulative distribution of a spatial covariate, optionally using spatially-varying weights. o cdf.test [Supersedes 'kstest'.] Test of goodness-of-fit of a Poisson point process model. The observed and predicted distributions of the values of a spatial covariate are compared using either the Kolmogorov-Smirnov, Cramer-Von Mises or Anderson-Darling test. o berman.test Replaces 'bermantest'. o harmonise.fv Make several functions compatible. o simulate.lppm Simulate a fitted point process model on a linear network. o subset.ppp, subset.lpp, subset.pp3, subset.ppx Methods for 'subset', for point patterns. o closepairs.pp3, crosspairs.pp3 Low-level functions to find all close pairs of points in three dimensions o volume.linnet Method for the generic 'volume'. Returns the length of the linear network. o padimage Pad the border of a pixel image. o as.layered Convert spatial data to a layered object. o panel.contour, panel.image, panel.histogram Panel functions for 'pairs' plots. o range.fv, min.fv, max.fv Range, minimum and maximum of function values. SIGNIFICANT USER-VISIBLE CHANGES o ppm.formula The left hand side of the formula can now be the name of an object in the list 'data', or an expression involving such objects. o ppm The right hand side of the formula can now include the symbol '.' representing all covariates in the list 'data'. o ppm New argument 'subset' makes it possible to fit the model in a subset of the spatial domain defined by an expression. o kppm New option: method="palm", will fit the model by maximising Palm likelihood. o pppdist Substantially accelerated. New argument 'auction' controls choice of algorithm. o rhohat New arguments 'weights' and 'horvitz' for weighted calculations. o persp.im Surface heights and colours can now be controlled by different images. Option to draw a grey apron around the sides of the perspective plot. Return value has a new attribute 'expand'. o plot.listof New arguments 'halign' and 'valign' give improved control over the alignment of panels. o plot.listof If all components of the list are objects of class 'fv' representing functions, then if equal.scales=TRUE, these functions will all be plotted with the same axes (i.e. the same xlim and the same ylim). o envelope The argument 'transform' is now processed by 'with.fv' giving more options, such as 'transform=expression(. - r)' o envelope, dclf.test, mad.test One-sided tests and one-sided envelopes can now be produced, by specifying the argument 'alternative'. o dclf.test, mad.test A pointwise test at fixed distance 'r' can now be performed by setting rinterval = c(r,r). o envelope New arguments 'fix.n' and 'fix.marks' for envelope.ppp and envelope.ppm make it easy to generate simulated patterns conditional on the total number of points, or on the number of points of each type. o quadrat.test Can now calculate the Cressie-Read test statistic instead of the Pearson X2 statistic. o Kres, Gres, Kcom, Gcom, psst, psstA, psstG New argument 'model' makes it easier to generate simulation envelopes of the residual summary functions. o layered, plot.layered The layer plot arguments can include the argument '.plot' specifying a function to perform the plotting instead of the generic 'plot'. o deriv.fv New arguments make it possible to differentiate a periodic function. o ppm Argument 'data' or 'covariates' can now include entries which are not spatial covariates, provided they do not appear in the model formula. o closepairs, crosspairs These functions are now generic, with methods for 'ppp' and 'pp3' o rLGCP Updated to conform to new usage of RandomFields package. Argument syntax has changed. Now allows control over pixel resolution. o bw.diggle New arguments 'correction' and 'hmax' for controlling the calculation. o predict.lppm New argument 'new.coef' for computing predictions with a different vector of model coefficients. o predict.ppm If 'locations' is a pixel image, its pixels determine the spatial locations for prediction. o cut.ppp Argument 'z' can now be a window. o split.ppp Argument 'f' can now be a window. o print.ppm, summary.ppm, coef.summary.ppm The table of parameter estimates, standard errors and confidence intervals now also includes the value of the (signed square root) Wald test statistic. o plot.im Now automatically detects problems in some Windows graphics displays and tries to avoid them. o plot.im The position of axis tick marks alongside the colour ribbon can now be controlled using the parameter 'at' in the argument 'ribargs'. o plot.ppp Can now plot numeric marks using characters chosen by 'pch' or 'chars' with size determined by mark value. o plot.ppp New argument 'meansize' for controlling mark scale. o hsvim, rgbim New argument 'autoscale' causes automatic scaling of colour channel values. o plot.ppp If type='n', a legend is now displayed when x is a marked point pattern. o whist Accelerated by a factor of 5. o Fest, Jest Accelerated by a factor of 2 to 3. o fryplot Accelerated. Now displays a legend if the point pattern is marked. Now handles numerical marks nicely. New argument 'axes'. o frypoints Accelerated. New arguments 'to', 'from' and 'dmax'. o duplicated.ppp New option: rule = 'unmark' o rjitter Argument 'radius' now has a default. o Smooth.msr New argument 'drop' o LambertW Now handles NA and infinite values. o update.ppm Now handles formulae with a left-hand side. o raster.x, raster.y, raster.xy These functions can now handle images, as well as masks. o Smooth.ppp If the mark values are exactly constant, the resulting smoothed values are now exactly constant. o eval.im, eval.fv, eval.fasp Argument 'envir' can now be a list, instead of an environment. o plot.ppp The printout (of the resulting symbol map object) now displays the numerical value of the mark scale. o with.fv Improved mathematical labels. o plot.fv Improved mathematical labels on x axis. o ppm Improved error messages. o vcov.ppm Computations greatly accelerated for Hybrid interactions and for Area-interaction models. o vcov.kppm Computations greatly accelerated (when fast=TRUE) o interp.im Argument 'x' can now be a point pattern. o pool.envelope Improved handling of text information. o miplot Improved layout. o print.summary.ppp Improved layout. Now respects spatstat.options('terse') o print.profilepl Improved layout. Now respects spatstat.options('terse') o anova.ppm Now respects spatstat.options('terse') o print.fv, print.envelope Now respect spatstat.options('terse') and options('width') o summary.envelope Now respects options('width') o kstest, bermantest These functions will soon be Deprecated. They are retained only for backward compatibility. BUG FIXES o vcov.ppm Sometimes gave wrong answers for Poisson models fitted by method='logi'. Fixed. o unnormdensity If weights were missing, the density was normalised, contrary to the documentation. Fixed. o logLik.ppm, anova.ppm, AIC For models fitted by 'ippm', the number of degrees of freedom was incorrect. Fixed. o im.apply Pixels outside the window were not assigned the value NA as they should. Fixed. o pixellate.owin Crashed, unpredictably, if the pixel raster had unequal numbers of rows and columns. [Spotted by Rasmus Waagepetersen.] Fixed. o vcov.ppm Crashed for pairwise interaction models fitted by method="logi". Fixed. o predict.ppm Crashed for models fitted by method="logi" if the model included external covariates. Fixed. o predict.ppm Crashed if the argument 'covariates' or 'data' in the original call to 'ppm' included entries that were not spatial covariates. [These entries were ignored by ppm but caused predict.ppm to crash.] Fixed. o simulate.kppm, rNeymanScott, rThomas, rMatClust Crashed randomly when simulating an inhomogeneous model. [Spotted by Philipp Hunziker.] Fixed. o bw.diggle In some extreme cases, generated an error message about `NaN values in Foreign function call.' [Spotted by Colin Beale.] Fixed. o textureplot Crashed if 'spacing' was too large. Fixed. o superimpose.psp Crashed if the result was empty. Fixed. o istat Crashed with an error message about 'vars'. Fixed. o dirichlet, delaunay, delaunay.distance Crashed in rare cases due to a problem in package 'deldir'. [Spotted by Pierre Legendre.] Fixed. o rgbim, hsvim Crashed if any argument was constant. Fixed. o scaletointerval Crashed if x was constant. Fixed. o linnet, [.linnet Crashed if the result contained only a single vertex. [Spotted by Daniel Esser.] Fixed. o plot.fv If some of the function values were NA, they were replaced by fictitious values (by linearly interpolating). Fixed. o crossdist.ppp Ignored argument 'squared' if periodic=FALSE. [Spotted by Mike Porter.] Fixed. o marks<-.ppp Ignored argument 'drop'. [Spotted by Oscar Garcia.] Fixed. o update.ppm Sometimes did not respect the argument 'use.internal'. Fixed. o plot.rhohat Did not respect the argument 'limitsonly'. Fixed. o contour.im Argument 'axes' defaulted to TRUE, but FALSE was intended. Fixed. o print.hyperframe, as.data.frame.hyperframe Column names were mangled if the hyperframe had a single row. Fixed. o as.psp.data.frame Generated a warning about partially-matched names in a data frame. [Spotted by Eric Gilleland.] Fixed. o plot.leverage.ppm Generated a warning from 'contour.default' if the leverage function was constant. Fixed. o plot.diagppm Issued warnings about unrecognised graphics parameters. Fixed. o update.symbolmap Discarded information about the range of input values. Fixed. o plot.fv Label for y axis was garbled, if argument 'shade' was given. Fixed. o plot.ppp The legend was sometimes plotted when it should not have been (e.g. when add=TRUE). Fixed. o plot.listof, plot.im In an array of plots, containing both images and other spatial objects, the titles of the panels were not correctly aligned. Fixed. o plot.tess, plot.quadratcount Ignored arguments like 'cex.main'. Fixed. o iplot Navigation buttons (Left, Right, Up, Down, Zoom In, Zoom Out) did not immediately refresh the plot. Fixed. o iplot.layered Reported an error 'invalid argument type' if all layers were deselected. Fixed. CHANGES IN spatstat VERSION 1.37-0 OVERVIEW o Ege Rubak is now a joint author of spatstat. o We thank Peter Forbes, Tom Lawrence and Mikko Vihtakari for contributions. o Spatstat now exceeds 100,000 lines of code. o New syntax for point process models (ppm, kppm, lppm) equivalent to syntax of lm, glm, ... o Covariates in ppm and kppm can now be tessellations. o Confidence intervals and prediction intervals for fitted models. o Quasirandom point patterns and sequences. o Plots using texture fill. o Support for mappings from data to graphical symbols and textures. o Automatic re-fitting of model in Ginhom, Kinhom, Finhom, Jinhom. o Support for Mixed Poisson distribution. o Interpretation of mark scale parameters has changed in plot.ppp o Syntax of multitype interactions (eg MultiStrauss) has changed. o Bug fix in Metropolis-Hastings simulation of 'StraussHard' models o Changed default behaviour of perfect simulation algorithms. o Improvements to layout of text output. o Version nickname: 'Model Prisoner' NEW CLASSES o symbolmap An object of class 'symbolmap' represents a mapping from data to graphical symbols o texturemap An object of class 'texturemap' represents a mapping from data to graphical textures. NEW FUNCTIONS o split.hyperframe, split<-.hyperframe methods for split and split<- for hyperframes. o dmixpois, pmixpois, qmixpois, rmixpois (log-)normal mixture of Poisson distributions. o vdCorput, Halton, Hammersley, rQuasi quasirandom sequences and quasirandom point patterns. o Smoothfun create a function(x,y) equivalent to the result of Smooth.ppp o minnndist, maxnndist Faster ways to compute min(nndist(X)), max(nndist(X)) o add.texture Draw a simple texture inside a specified region. o textureplot Display a factor-valued pixel image using texture fill. o texturemap Create a texture map o plot.texturemap Plot a texture map in the style of a legend o symbolmap Create a symbol map o update.symbolmap Modify a symbol map o invoke.symbolmap Apply symbol map to data values, and plot them o plot.symbolmap Plot the symbol map in the style of a legend o as.owin.boxx Converts a 'boxx' to an 'owin' if possible. o ellipse Create an elliptical window. o clickbox Interactively specify a rectangle, by point-and-click on a graphics device. o complementarycolour Compute the complementary colour value of a given colour value, or the complementary colour map of a given colour map. o gauss.hermite Gauss-Hermite quadrature approximation to the expectation of any function of a normally-distributed random variable. o boundingbox Generic function, replaces bounding.box o edges Extract boundary edges of a window. Replaces and extends 'as.psp.owin' o pixelcentres Extract centres of pixels as a point pattern. SIGNIFICANT USER-VISIBLE CHANGES o ppm, kppm, lppm NEW SYNTAX FOR POINT PROCESS MODELS The model-fitting functions 'ppm', 'kppm' and 'lppm' now accept a syntax similar to 'lm' or 'glm', for example ppm(X ~ Z), but still accept the older syntax ppm(X, ~Z). To support both kinds of syntax, the functions 'ppm' and 'kppm' are now generic, with methods for the classes 'formula', 'ppp' and 'quad'. The 'formula' method handles a syntax like ppm(X ~ Z) while the 'ppp' method handles the old syntax ppm(X, ~Z). Similarly 'lppm' is generic with methods for 'formula' and 'lpp'. o ppm, kppm, lppm Covariates appearing in the model formula can be objects which exist in the R session, instead of always having to be elements of the list `covariates'. o ppm.formula, kppm.formula, lppm.formula Formulae involving polynom() are now expanded, symbolically, so that polynom(x, 3) becomes x + I(x^2) + I(x^3) and polynom(x,y,2) becomes x + y + I(x^2) + I(x*y) + I(y^2). This neatens the model output, and also makes it possible for anova() and step() to add or delete single terms in the polynomial. o predict.ppm New argument 'interval' allows confidence intervals or prediction intervals to be calculated. o predict.ppm New argument 'total' allows for prediction of the total number of points in a specified region. o plot.ppp, plot.lpp For marked point patterns, a legend is automatically added to the plot, by default. Arguments have changed: new arguments include parameters of the legend, and an optional symbol map. Result has changed: it is now an object of class 'symbolmap'. o plot.ppp, plot.lpp Interpretation of the parameters 'markscale' and 'maxsize' has changed. The size of a circle in the plot is now defined as the circle's diameter instead of its radius. (Size of a square is measured, as before, by its side length). o parres Now handles the case where the fitted model is not separable but its restriction to the given 'subregion' is separable. o envelope Now issues a warning if the usage of envelope() appears to be `invalid' in the sense that the simulated patterns and the data pattern have not been treated equally. o Kinhom, Finhom, Ginhom, Jinhom New argument 'update'. If 'lambda' is a fitted model (class ppm or kppm) and update=TRUE, the model is re-fitted to the data pattern, before the intensities are computed. o rDiggleGratton, rDGS, rHardcore, rStrauss, rStraussHard By default the point pattern is now generated on a larger window, and trimmed to the original window. New argument expand=TRUE. o MultiStrauss, MultiHard, MultiStraussHard The syntax of these functions has changed. The new code should still accept the old syntax. o rhohat rhohat.ppp and rhohat.quad have new argument 'baseline' o ippm Algorithm improved. Argument syntax changed. o default.dummy, quadscheme Dummy points can now be generated by a quasirandom sequence. o plot.owin The window can now be filled with one of 8 different textures. Arguments changed. o ppm, kppm Covariates in the model can now be tessellations. o [.im New argument 'tight' allows the resulting image to be trimmed to the smallest possible rectangle. o [.psp, rlinegrid, rpoisline These functions now handle binary mask windows. o rotate The user can specify the centre of rotation. o rescale rescale() and all its methods now have argument 'unitname' which can be used to change the name of the unit of length. o anova.ppm Output format has been improved. Number of columns of result has changed. o print.ppp, print.summary.ppp, print.owin, print.summary.owin, print.im, print.summary.im, print.fv, print.msr, print.profilepl These functions now avoid over-running the text margin (i.e. they respect options('width') where possible). o layerplotargs<- Now handles any spatial object, converting it to a 'layered' object. o effectfun Improved display in case se.fit=TRUE. o scaletointerval New argument 'xrange' o contour.im New argument 'show.all'. Default value of 'axes' changed to FALSE. o identify.ppp Now handles multivariate marks. o plot.listof Improved layout. New arguments 'hsep', 'vsep'. Argument 'mar.panel' may have length 1, 2 or 4. o plot.splitppp This function is no longer identical to plot.listof. Instead it is a much simpler function which just calls plot.listof with equal.scales=TRUE. o anova.ppm Output is neater. o plot.layered New argument 'do.plot' o plot.psp New argument 'do.plot' o as.psp.owin New argument 'window' o plot.im, contour.im, textureplot New argument 'clipwin' o plot.ppp New argument 'clipwin' o plot.msr New argument 'how' allows density to be plotted as image and/or contour o diagnose.ppm, plot.diagppm More options for 'plot.neg' o plot.leverage.ppm, plot.influence.ppm, plot.msr Argument 'clipwin' can now be used to restrict the display to a subset of the full data. o [.hyperframe, [<-.hyperframe, $.hyperframe, $<-.hyperframe These functions are now documented. o leverage.ppm, influence.ppm, dfbetas.ppm Resulting objects are now smaller (in memory size). o print.ppm Now indicates whether the irregular parameters 'covfunargs' were optimised (by profilepl or ippm) or whether they were simply provided by the user. o plot.ppp A point pattern with numerical marks can now be plotted as filled dots with colours determined by the marks, by setting pch=21 and bg= o colourmap Now handles dates and date-time values (of class 'Date' or 'POSIXt'). o plot.ppp, print.ppp, summary.ppp Improved handling of dates and date-time values (of class 'Date' or 'POSIXt') in the marks of a point pattern. o cut.im Now refuses to handle images whose pixel values are factor, logical or character. o centroid.owin New argument 'as.ppp' o superimpose Improved default names for columns of marks. o Softcore() Improved printout. o kppm, lgcp.estpcf, lgcp.estK Adjusted to new structure of RandomFields package. No change in syntax. o data(murchison) This dataset now belongs to class 'listof' so that it can be plotted directly. o data(clmfires) The format of the covariate data has changed. The objects 'clmcov100' and 'clmcov200' are now elements of a list 'clmfires.extra'. o bounding.box This function is now Deprecated; it has been replaced by the generic boundingbox(). o as.psp.owin This function is now Deprecated; it has been replaced and extended by the function edges(). o plot.kstest Changed defaults so that the two curves are distinguishable. o with.fv Improved mathematical labels. BUG FIXES o intensity.quadratcount Values were incorrect for a rectangular tessellation (the matrix of intensities was transposed). Fixed. o rmh, simulate.ppm Simulation of the Strauss-hard core model (StraussHard) was incorrect (intensity of the simulated process was about 15% too low). Bug introduced in spatstat 1.31-0 (January 2013). o intensity.quadratcount Crashed for a rectangular tessellation with only a single row or column. Fixed. o model.images.ppm Crashed sometimes if the argument W was given. Fixed. o eval.im Crashed when applied to images with only a single row or column. Fixed. o ppp, marks<-.ppp If the marks were a vector of dates, they were erroneously converted to numbers. Fixed. o ippm Crashed if the model formula included an offset term that was not a function. Fixed. o leverage.ppm Crashed sometimes when the model had irregular parameters ('covfunargs'). Fixed. o residuals.ppm Crashed sometimes when type='score'. Fixed. o scaletointerval Did not handle dates and date-time values correctly. Fixed. o rbind.hyperframe, as.list.hyperframe Gave incorrect results for hyperframes with 1 row. Fixed. o Kinhom Did not renormalise the result (even when renormalise=TRUE), in some cases. Spotted by Peter Forbes. Fixed. o disc If mask=TRUE the disc was erroneously clipped to the square [-1,1] x [-1,1]. Fixed. o plot.fv Sometimes shaded the wrong half of the graph when the 'shade' coordinates were infinite. Fixed. o print.ppm Gave an error message if the coefficient vector had length zero. Fixed. o vcov.ppm Gave an error message if the coefficient vector had length zero. Fixed. o plot.distfun, as.im.distfun These functions effectively ignored the argument 'invert' in the original call to distfun. Fixed. o plot.msr Ignored certain additional arguments such as 'pch'. Fixed. o cut.im Crashed if the image had 1 row or 1 column of pixels. Fixed. o iplot.ppp Crashed with message about missing object 'vals'. Fixed. o effectfun Demanded a value for every covariate supplied in the original call to ppm, even for covariates which were not used in the model. Fixed. o plot.listof, plot.hyperframe When plotting 3D point patterns (class pp3), these functions issued warnings about 'add' being an unrecognised graphics argument. Fixed. CHANGES IN spatstat VERSION 1.36-0 OVERVIEW o We thank Sebastian Meyer, Kevin Ummer, Jean-Francois Coeurjolly, Ege Rubak, Rasmus Waagepetersen, Oscar Garcia and Sourav Das for contributions. o Important change to package dependencies. o Geometrical inconsistencies in polygons are now repaired automatically. o Improved quadrature schemes and reduced bias in ppm. o New vignette 'Summary of Recent Changes to Spatstat'. o Approximation to K function and pcf for Gibbs models. o Counterpart of 'apply' for lists of images. o Hexagonal grids and tessellations. o Extensions to scan test and Allard-Fraley cluster set estimator. o Change the parameters of a fitted model before simulating it. o Accelerated Kest, Kinhom for rectangular windows. o Extensions and improvements to plotting functions. o Improvements to labelling of 'fv' objects. o New demo of summary functions. o More methods for 'intensity'. o Version nickname: 'Intense Scrutiny' NEW FUNCTIONS o Kmodel.ppm, pcfmodel.ppm Compute approximation to K-function or pair correlation function of a Gibbs point process model. o im.apply Apply a function to corresponding pixel values in several images. o hexgrid, hextess Create a hexagonal grid of points, or a tessellation of hexagonal tiles o shift.tess, rotate.tess, reflect.tess, scalardilate.tess, affine.tess Apply a geometrical transformation to a tessellation. o quantile.ewcdf Extract quantiles from a weighted cumulative distribution function. o scanLRTS Evaluate the spatially-varying test statistic for the scan test. o pcfmulti General multitype pair correlation function o intensity.splitppp Estimate intensity in each component of a split point pattern. o intensity.quadratcount Use quadrat counts to estimate intensity in each quadrat. o as.owin.quadratcount, as.owin.quadrattest Extract the spatial window in which quadrat counts were performed. o reload.or.compute Utility function for R scripts: either reload results from file, or compute them. o to.grey Convert colour to greyscale. o Smooth.im Method for Smooth() for pixel images. Currently identical to blur(). o demo(sumfun) Demonstration of nonparametric summary functions in spatstat. SIGNIFICANT USER-VISIBLE CHANGES o Package Dependencies spatstat now "Imports" (rather than "Depends" on) the libraries mgcv, deldir, abind, tensor, polyclip. This means that these libraries are not accessible to the user unless the user explicitly loads them by typing 'library(mgcv)' and so on. o owin, as.owin Polygon data are no longer subjected to strict checks on geometrical validity (self-crossing points, overlaps etc.) Instead, polygon geometry is automatically repaired. o ppm The default quadrature scheme for a point pattern has been improved (in the case of a non-rectangular window) to remove a possible source of bias. o Performance various parts of spatstat now run slightly faster. o scan.test Now handles multiple values of circle radius 'r'. o plot.scan.test, as.im.scan.test These functions can now give the optimal value of circle radius 'r'. o pcfcross, pcfdot Algorithms have been reimplemented using a single-pass kernel smoother and now run much faster. Bandwidth selection rule improved. o plot.listof, plot.splitppp Default behaviour has changed: panels are now plotted on different scales. o plot.listof, plot.splitppp When 'equal.scales=TRUE' the panels are plotted on exactly equal scales and are exactly aligned (under certain conditions). o ppp, marks.ppp, marks<-.ppp New argument 'drop' determines whether a data frame with a single column will be converted to a vector. o simulate.ppm, rmh.ppm, rmhmodel.ppm New argument 'new.coef' allows the user to change the parameters of a fitted model, before it is simulated. o logLik.ppm New argument 'new.coef' allows the user to evaluate the loglikelihood for a different value of the parameter. o clusterset The argument 'result' has been renamed 'what'. It is now possible to give multiple values to 'what' so that both types of result can be computed together. o residuals.ppm Argument 'coefs' has been renamed 'new.coef' for consistency with fitted.ppm etc. o residuals.ppm If drop=TRUE the window associated with the residuals is now taken to be the domain of integration of the composite likelihood. o intensity.ppp Now has argument 'weights' o density.ppp, Smooth.ppp, markmean, markvar, intensity.ppp Argument 'weights' can now be an 'expression'. o pcf New argument 'domain' causes the computation to be restricted to a subset of the window. o nnclean The result now has attributes which give the fitted parameter values, information about the fitting procedure, and the histogram bar heights. o nnclean Extra arguments are now passed to hist.default. o plot.tess For a tessellation represented by a pixel image, plot.tess no longer treats the pixel labels as palette colours. o relrisk New argument 'case' allows the user to specify which mark value corresponds to the cases in a case-control dataset. o Kinhom Now accepts correction="good" o spatstat.options New option ('monochrome') controls whether plots generated by spatstat will be displayed in colour or in greyscale. This will eventually be applied to all plot commands in spatstat. o plot.im, persp.im, contour.im, plot.owin, plot.psp, plot.fv, plot.fasp These functions now obey spatstat.options('monochrome') o plot.ppp, plot.owin, plot.im, plot.psp, plot.tess, plot.layered New universal argument 'show.all' determines what happens when a plot is added to an existing plot. If show.all = TRUE then everything is plotted, including the main title and colour ribbon. o plot.ppp New argument 'show.window' o plot.im New arguments 'add' and 'do.plot'. More arguments recognised by 'ribargs' o plot.layered New arguments 'add', 'main' Better argument handling. o plot.fv Improved handling of argument 'shade' o layered, layerplotargs, plot.layered The plotting argument can now be a list of length 1, which will be replicated to the correct length. o varblock Ugly legends have been repaired. o quad.ppm New argument 'clip' o edge.Trans New arguments 'dx', 'dy' o disc Argument 'centre' can be in various formats. o affine, shift Argument 'vec' can be in various formats. o Geyer, BadGey A warning is no longer issued when the parameter 'sat' is fractional. o adaptive.density Now has argument 'verbose' o Smooth.ppp 'sigma' is now a formal argument of Smooth.ppp o plot.quadratcount, plot.quadrattest These functions have now been documented. o Summary functions and envelopes Improved mathematical labels in plots. o Kest Accelerated, in the case of a rectangular window. o Kscaled Argument 'lambda' can now be a fitted model (class ppm) o print.fv Improved layout. o plot.bermantest Improved graphics. o which.max.im This function is now deprecated. which.max.im(x) is superseded by im.apply(x, which.max) o smooth.ppp, smooth.fv, smooth.msr These functions are now deprecated, in favour of 'Smooth' with a capital 'S' BUG FIXES o bw.ppl Crashed if the point pattern had multiple points at the same location. Fixed. o quantile Crashed when applied to the result of 'ewcdf'. Fixed. o marks<-.ppp Crashed with a message about 'unrecognised format' if the current or replacement values of marks were date/time values (belonging to class 'Date' or 'POSIXt'). Fixed. o plot.im Crashed in case log=TRUE if the window was not a rectangle. Fixed. o vcov.ppm Crashed sometimes for models with a hard core term (Hardcore, StraussHard, MultiHard or MultiStrauss interactions). Spotted by Rasmus Waagepetersen. Fixed. o multiplicity.data.frame Results were incorrect and included NA's (spotted by Sebastian Meyer). Fixed. o markvar Values were incorrect. Fixed. o Smooth.ppp Ignored argument 'diggle'. Fixed. o rotate.im, affine.im Factor-valued images were not handled correctly. Fixed. o shift.layered If argument 'origin' was used, different layers were shifted by different amounts. Fixed. o tile.areas Sometimes returned a list instead of a numeric vector. Fixed. o print.ppp If the marks were date/time values (belonging to class 'Date' or 'POSIXt'), print.ppp reported that they were double precision numbers. Fixed. o plot.layered Graphics were mangled if the argument 'add=FALSE' was given explicitly. Fixed. o Smooth.ppp The argument 'sigma' was only recognised if it was explicitly named. For example in 'Smooth(X, 5)' the '5' was ignored. Fixed. o clusterset The bounding frame of the result was smaller than the original bounding frame of the point pattern dataset, when result="domain" and exact=TRUE. Fixed. o plot.im Ignored argument 'col' if it was a 'function(n)'. Fixed. o Kinhom Ignored argument 'correction' if there were more than 1000 points. Fixed. o [.fv Mangled the plot label for the y axis. Fixed. o cbind.fv Mangled the plot label for the y axis. Fixed. o plot.envelope Main title was always 'x'. Fixed. o print.ppp Ran over the right margin. Fixed. o union.owin, intersect.owin, setminus.owin Sometimes deleted the name of the unit of length. Fixed. CHANGES IN spatstat VERSION 1.35-0 OVERVIEW o We thank Melanie Bell, Leanne Bischof, Ida-Maria Sintorn, Ege Rubak, Martin Hazelton, Oscar Garcia, Rasmus Waagepetersen, Abdollah Jalilian and Jens Oehlschlaegel for contributions. o Support for analysing replicated spatial point patterns. o New vignette on analysing replicated spatial point patterns. o Objective function surface plots. o Estimator of point process intensity using nearest neighbour distances. o Improved estimator of pair correlation function. o Four new datasets. o Simple point-and-click interface functions for general use. o More support for fv objects. o More support for ppx objects. o Extensions to nearest neighbour functions. o Morphological operations accelerated. o Bug fix to pair correlation functions. o Bug fix to k-th nearest neighbour distances o Version nickname: 'Multiple Personality' NEW CLASSES o mppm An object of class 'mppm' represents a Gibbs point process model fitted to several point pattern datasets. The point patterns may be treated as independent replicates of the same point process, or as the responses in an experimental design, so that the model may depend on covariates associated with the design. Methods for this class include print, plot, predict, anova and so on. o objsurf An object of class 'objsurf' contains values of the likelihood or objective function in a neighbourhood of the maximum. o simplepanel An object of class 'simplepanel' represents a spatial arrangement of buttons that respond to mouse clicks, supporting a simple, robust graphical interface. NEW FUNCTIONS o mppm Fit a Gibbs model to several point patterns. The point pattern data may be organised as a designed experiment and the model may depend on covariates associated with the design. o anova.mppm Analysis of Deviance for models of class mppm o coef.mppm Extract fitted coefficients from a model of class mppm o fitted.mppm Fitted intensity or conditional intensity for a model of class mppm o kstest.mppm Kolmogorov-Smirnov test of goodness-of-fit for a model of class mppm o logLik.mppm log likelihood or log pseudolikelihood for a model of class mppm o plot.mppm Plot the fitted intensity or conditional intensity of a model of class mppm o predict.mppm Compute the fitted intensity or conditional intensity of a model of class mppm o quadrat.test Quadrat counting test of goodness-of-fit for a model of class mppm o residuals.mppm Point process residuals for a model of class mppm o subfits Extract point process models for each individual point pattern dataset, from a model of class mppm o vcov.mppm Variance-covariance matrix for a model of class mppm o integral.msr Integral of a measure. o objsurf For a model fitted by optimising an objective function, this command computes the objective function in a neighbourhood of the optimal value. o contour.objsurf, image.objsurf, persp.objsurf, plot.objsurf Plot an 'objsurf' object. o fvnames Define groups of columns in a function value table, for use in plot.fv, etc o multiplicity New generic function for which multiplicity.ppp is a method. o unique.ppx, duplicated.ppx, multiplicity.ppx Methods for unique(), duplicated() and multiplicity() for 'ppx' objects. These also work for 'pp3' and 'lpp' objects. o closepairs, crosspairs, closepaircounts, crosspaircounts Low-level functions for finding all close pairs of points o nndensity Estimate point process intensity using k-th nearest neighbour distances o simplepanel, run.simplepanel Support for a simple point-and-click interface for general use. NEW DATASETS o pyramidal Diggle-Lange-Benes data on pyramidal neurons in cingulate cortex. 31 point patterns divided into 3 groups. o waterstriders Nummelin-Penttinen waterstriders data. Three independent replicates of a point pattern formed by insects. o simba Simulated data example for mppm. Two groups of point patterns with different interpoint interactions. o demohyper Simulated data example for mppm. Point patterns and pixel image covariates, in two groups with different regression coefficients. SIGNIFICANT USER-VISIBLE CHANGES o plot.hyperframe The argument 'e' now has a different format. Instead of plot(h, plot(XYZ)) one must now type plot(h, quote(plot(XYZ))) This is necessary in order to avoid problems with 'S4 method dispatch'. o pcf.ppp, pcfinhom New argument 'divisor' enables better performance of the estimator of pair correlation function for distances close to zero. o applynbd The arguments N, R and criterion may now be specified together. o markstat The arguments N and R may now be specified together. o ppx New argument 'simplify' allows the result to be converted to an object of class 'ppp' or 'pp3' if appropriate. o as.function.fv Now allows multiple columns to be interpolated o multiplicity.ppp This function is now a method for the generic 'multiplicity'. It has also been accelerated. o nnfun.ppp, distfun.ppp New argument 'k' allows these functions to compute k-th nearest neighbours. o rVarGamma, kppm, vargamma.estK, vargamma.estpcf New argument 'nu.pcf' provides an alternative way to specify the kernel shape in the VarGamma model, instead of the existing argument 'nu.ker'. Function calls that use the ambiguous argument name 'nu' will no longer be accepted. o nnmap Image is now clipped to the original window. o dilation, erosion, opening, closing Polygonal computations greatly accelerated. o plot.colourmap Improved appearance and increased options, for discrete colourmaps. o plot.msr Improved appearance o plot.ppp, plot.owin An `empty' plot can now be generated by setting type="n" o nndist.ppp, nnwhich.ppp, nncross.ppp Column names of the result are now more informative. BUG FIXES o nncross.ppp Results were completely incorrect when k > 1. Spotted by Jens Oehschlaegel. Bug was introduced in spatstat 1.34-1. Fixed. o rVarGamma Simulations were incorrect; they were generated using the wrong value of the parameter 'nu.ker'. Spotted by Rasmus Waagepetersen and Abdollah Jalilian. Bug was always present. Fixed. o pair correlation functions (pcf.ppp, pcfdot, pcfcross, pcfinhom, ...) The result had a negative bias at the maximum 'r' value, because contributions to the pcf estimate from interpoint distances greater than max(r) were mistakenly omitted. Spotted by Rasmus Waagepetersen and Abdollah Jalilian. Bug was always present. Fixed. o demo(spatstat) This demonstration script had some unwanted side-effects, such as rescaling the coordinates of standard datasets 'bramblecanes', 'amacrine' and 'demopat', which caused the demonstration to crash when it was repeated several times, and caused errors in demo(data). Fixed. o rmh Visual debugger crashed sometimes with message 'XI not found'. Fixed. o predict.ppm Crashed if the model was fitted using 'covfunargs'. Fixed. o bounding.box Crashed if one of the arguments was NULL. Fixed. o multiplicity.ppp Did not handle data frames of marks. Fixed. CHANGES IN spatstat VERSION 1.34-1 OVERVIEW o We thank Kurt Hornik, Ted Rosenbaum, Ege Rubak and Achim Zeileis for contributions. o Important bug fix. SIGNIFICANT USER-VISIBLE CHANGES o as.box3 Now accepts objects of class 'ppx' or 'boxx'. o crossdist.ppp, crossdist.pp3, crossdist.default New argument 'squared' allows the squared distances to be computed (saving computation time in some applications) BUG FIXES o union.owin, is.subset.owin, dilation.owin Results were sometimes completely wrong for polygons with holes. Spotted by Ted Rosenbaum. Fixed. o psstA, areaLoss Crashed in some cases, with error message 'Number of items to replace is not a multiple of replacement length'. Spotted by Achim Zeileis. Fixed. CHANGES IN spatstat VERSION 1.34-0 OVERVIEW o We thank Andrew Bevan, Ege Rubak, Aruna Jammalamadaka, Greg McSwiggan, Jeff Marcus, Jose M Blanco Moreno, and Brian Ripley for contributions. o spatstat and all its dependencies are now Free Open Source. o spatstat does not require the package 'gpclib' any more. o spatstat now depends on the packages 'tensor', 'abind' and 'polyclip' o polygon clipping is now enabled always. o Substantially more support for point patterns on linear networks. o Faster computations for pairwise interaction models. o Bug fixes in nearest neighbour calculations. o Bug fix in leverage and influence diagnostics. o Version nickname: "Window Cleaner" o spatstat now requires R version 3.0.2 or later NEW FUNCTIONS o as.lpp Convert data to a point pattern on a linear network. o distfun.lpp Distance function for point pattern on a linear network. o eval.linim Evaluate expression involving pixel images on a linear network. o linearKcross, linearKdot, linearKcross.inhom, linearKdot.inhom Multitype K functions for point patterns on a linear network o linearmarkconnect, linearmarkequal Mark connection function and mark equality function for multitype point patterns on a linear network o linearpcfcross, linearpcfdot, linearpcfcross.inhom, linearpcfdot.inhom Multitype pair correlation functions for point patterns on a linear network o linfun New class of functions defined on a linear network o nndist.lpp, nnwhich.lpp, nncross.lpp Methods for nndist, nnwhich, nncross for point patterns on a linear network o nnfun.lpp Method for nnfun for point patterns on a linear network o vcov.lppm Variance-covariance matrix for parameter estimates of a fitted point process model on a linear network. o bilinearform Computes a bilinear form o tilenames, tilenames<- Extract or change the names of tiles in a tessellation. SIGNIFICANT USER-VISIBLE CHANGES o package dependencies Previous versions of spatstat used the package 'gpclib' to perform geometrical calculations on polygons. Spatstat now uses the package 'polyclip' for polygon calculations instead. o free open-source licence The restrictive licence conditions of 'gpclib' no longer apply to users of spatstat. Spatstat and all its dependencies are now covered by a free open-source licence. o polygon clipping In previous versions of spatstat, geometrical calculations on polygons could be performed 'exactly' using gpclib or 'approximately' using pixel discretisation. Polygon calculations are now always performed 'exactly'. o intersect.owin, union.owin, setminus.owin If A and B are polygons, the result is a polygon. o erosion, dilation, opening, closing If the original set is a polygon, the result is a polygon. o intersect.tess, dirichlet The tiles of the resulting tessellation are polygons if the input was polygonal. o plot.owin Polygons with holes can now be plotted with filled colours on any device. o lppm New arguments 'eps' and 'nd' control the quadrature scheme. o pairwise interaction Gibbs models Many calculations for these models have been accelerated. BUG FIXES o nncross.pp3 Values were completely incorrect in some cases. Usually accompanied by a warning about NA values. (Spotted by Andrew Bevan.) Fixed. o nnmap, nnmark A small proportion of pixels had incorrect values. [These were the pixels lying on the boundary of a Dirichlet cell.] Fixed. o leverage.ppm, influence.ppm, dfbetas.ppm Results were incorrect for non-Poisson processes. Fixed. o distcdf Results were incorrect in some cases when W was a window and V was a point pattern. Fixed. o Kcross, Kdot, pcfcross, pcfdot Results were incorrect in some rare cases. Fixed. o as.fv.kppm Erroneously returned a NULL value. Fixed. o vcov.ppm For point process models fitted with method = 'logi', sometimes crashed with error "object 'fit' not found". (Spotted by Ege Rubak). Fixed. o vcov.ppm For multitype point process models, sometimes crashed with error "argument 'par' is missing". Fixed. o plot.im Crashed if some of the pixel values were infinite. Fixed. o owin owin(poly=..) crashed if there were NA's in the polygon coordinates. Spotted by Jeff Marcus. Fixed. o plot.fv Crashed, giving an incomprehensible error, if the plot formula contained a number with a decimal point. Fixed. o alltypes Crashed if envelopes=TRUE and global=TRUE, with error message 'csr.theo not found'. Spotted by Jose M Blanco Moreno. Fixed. o chop.tess, rMosaicField Format of result was garbled in some cases. Fixed. o vcov.ppm Sometimes gave an irrelevant warning "parallel option not available". Fixed. CHANGES IN spatstat VERSION 1.33-0 OVERVIEW o We thank Kurt Hornik and Brian Ripley for advice. o The package namespace has been modified. o Numerous internal changes. o Likelihood cross-validation for smoothing bandwidth. o More flexible models of intensity in cluster/Cox processes. o New generic function for smoothing. o Version nickname: 'Titanic Deckchair' NEW FUNCTIONS o bw.ppl Likelihood cross-validation technique for bandwidth selection in kernel smoothing. o is.lppm, is.kppm, is.slrm Tests whether an object is of class 'lppm', 'kppm' or 'slrm' o Smooth New generic function for spatial smoothing. o Smooth.ppp, Smooth.fv, Smooth.msr Methods for Smooth (identical to smooth.ppp, smooth.fv, smooth.msr respectively) o fitted.kppm Method for 'fitted' for cluster/Cox models SIGNIFICANT USER-VISIBLE CHANGES o namespace The namespace of the spatstat package has been changed. o internal functions Some undocumented internal functions are no longer visible, as they are no longer exported in the namespace. These functions can still be accessed using the form spatstat:::functionname. Functions that are not visible are not guaranteed to exist or to remain the same in future. o methods For some generic functions defined in the spatstat package, it is possible that R may fail to find one of the methods for the generic. This is a temporary problem due to a restriction on the size of the namespace in R 3.0.1. It will be fixed in future versions of R and spatstat. It only applies to methods for a generic which is a spatstat function (such as nndist) and does not apply to methods for generics defined elsewhere (such as density). In the meantime, if this problem should occur, it can be avoided by calling the method explicitly, in the form spatstat:::genericname.classname. o speed The package should run slightly faster overall, due to the improvement of the namespace, and changes to internal code. o envelope New argument 'envir.simul' determines the environment in which to evaluate the expression 'simulate'. o kppm More flexible models of the intensity, and greater control over the intensity fitting procedure, are now possible using the arguments 'covfunargs', 'use.gam', 'nd', 'eps' passed to ppm. Also the argument 'X' may now be a quadrature scheme. o distcdf Arguments W and V can now be point patterns. o Kest New option: correction = "good" selects the best edge correction that can be computed in reasonable time. o bw.diggle Accelerated. o predict.ppm Calculation of standard error has been accelerated. o smooth.ppp, smooth.fv, smooth.msr These functions will soon be 'Deprecated' in favour of the methods Smooth.ppp, Smooth.fv, Smooth.msr respectively. o stratrand, overlap.owin, update.slrm, edge.Trans, edge.Ripley These already-existing functions are now documented. BUG FIXES o kppm, matclust.estpcf, pcfmodel The pair correlation function of the Matern Cluster Process was evaluated incorrectly at distances close to 0. This could have affected the fitted parameters in matclust.estpcf() or kppm(clusters="MatClust"). Fixed. o anova.ppm Would cause an error in future versions of R when 'anova.glm' is removed from the namespace. Fixed. CHANGES IN spatstat VERSION 1.32-0 OVERVIEW o We thank Ege Rubak for major contributions. o Thanks also to Patrick Donnelly, Andrew Hardegen, Tom Lawrence, Robin Milne, Gopalan Nair and Sean O'Riordan. o New 'logistic likelihood' method for fitting Gibbs models. o Substantial acceleration of several functions including profile maximum pseudolikelihood and variance calculations for Gibbs models. o Nearest neighbours for point patterns in 3D o Nearest-neighbour interpolation in 2D o New 'progress plots' o Hard core thresholds can be estimated automatically. o More support for colour maps o More support for 'fv' objects o Spatstat now has version nicknames. The current version is "Logistical Nightmare". o Minor improvements and bug fixes. NEW FUNCTIONS o nncross.pp3 Method for 'nncross' for point patterns in 3D o nnmark Mark of nearest neighbour - can be used for interpolation o dclf.progress, mad.progress Progress plots (envelope representations) for the DCLF and MAD tests. o deriv.fv Numerical differentiation for 'fv' objects. o interp.colourmap Smooth interpolation of colour map objects - makes it easy to build colour maps with gradual changes in colour o tweak.colourmap Change individual colour values in a colour map object o beachcolourmap Colour scheme appropriate for `altitudes' (signed numerical values) o as.fv Convert various kinds of data to an 'fv' object o quadscheme.logi Generates quadrature schemes for the logistic method of ppm. o beginner Introduction for beginners. SIGNIFICANT USER-VISIBLE CHANGES o ppm New option: method = "logi" Fits a Gibbs model by the newly developed 'logistic likelihood' method which is often faster and more accurate than maximum pseudolikelihood. Code contributed by Ege Rubak. o profilepl Greatly accelerated, especially for area-interaction models. o vcov.ppm Greatly accelerated for higher-order interaction models. o smooth.ppp Now handles bandwidths equal to zero (by invoking 'nnmark') o Hardcore, StraussHard The hard core distance 'hc' can now be omitted; it will be estimated from data. o plot.ppp Now behaves differently if there are multiple columns of marks. Each column of marks is plotted, in a series of separate plots arranged side-by-side. o plot.im Argument 'col' can now be a function o lohboot Now computes confidence intervals for L-functions as well (fun="Lest" or fun="Linhom") o dclf.test, mad.test The argument X can now be an object produced by a previous call to dclf.test or mad. o plot.fv Labelling of plots has been improved in some cases. o smooth.fv Further options added. o density.ppp The argument 'weights' can now be a matrix. o smooth.ppp Accelerated, when there are several columns of marks. o density.ppp Accelerated slightly. o simulate.ppm, simulate.kppm The total computation time is also returned. o simulate.kppm Now catches errors (such as 'insufficient memory'). o latest.news, licence.polygons Can now be executed by typing the name of the function without parentheses. o latest.news The text is now displayed one page at a time. BUG FIXES o Hest, Gfox, Jfox The 'raw' estimate was not computed correctly (or at least it was not the raw estimate described in the help files). Spotted by Tom Lawrence. Fixed. o edges2vees Format of result was incorrect if there were fewer than 3 edges. Fixed. o Jfox The theoretical value (corresponding to independence between X and Y) was erroneously given as 0 instead of 1. Spotted by Patrick Donnelly. Fixed. o ppm, quadscheme, default.dummy If the grid spacing parameter 'eps' was specified, the quadrature scheme was sometimes slightly incorrect (missing a few dummy points near the window boundary). Fixed. o print.timed Matrices were printed incorrectly. Fixed. CHANGES IN spatstat VERSION 1.31-3 OVERVIEW o spatstat now 'Suggests' the package 'tensor' o Code slightly accelerated. o More support for pooling of envelopes. o Bug fixes. NEW FUNCTIONS o nnmap Given a point pattern, finds the k-th nearest point in the pattern from each pixel in a raster. o coef.fii, coef.summary.fii Extract the interaction coefficients of a fitted interpoint interaction o edges2vees Low-level function for finding triples in a graph. SIGNIFICANT USER-VISIBLE CHANGES o predict.ppm New argument 'correction' allows choice of edge correction when calculating the conditional intensity. o pool.envelope New arguments 'savefuns' and 'savepatterns'. o pool.envelope Envelopes generated with VARIANCE=TRUE can now be pooled. o pool.envelope The plot settings of the input data are now respected. o Numerous functions have been slightly accelerated. BUG FIXES o predict.ppm Calculation of the conditional intensity omitted the edge correction if correction='translate' or correction='periodic'. Fixed. o shift.lpp, rotate.lpp, scalardilate.lpp, affine.lpp, shift.linnet, rotate.linnet, scalardilate.linnet, affine.linnet The enclosing window was not correctly transformed. Fixed. o rHardcore, rStraussHard, rDiggleGratton, rDGS The return value was invisible. Fixed. o ppm In rare cases the results obtained with forcefit=FALSE and forcefit=TRUE were different, due to numerical rounding effects. Fixed. CHANGES IN spatstat VERSION 1.31-2 OVERVIEW o We thank Robin Corria Anslie, Julian Gilbey, Kiran Marchikanti, Ege Rubak and Thordis Linda Thorarinsdottir for contributions. o spatstat now depends on R 3.0.0 o More support for linear networks o More functionality for nearest neighbours o Bug fix in fitting Geyer model o Performance improvements and bug fixes NEW FUNCTIONS o affine.lpp, shift.lpp, rotate.lpp, rescale.lpp, scalardilate.lpp Geometrical transformations for point patterns on a linear network o affine.linnet, shift.linnet, rotate.linnet, rescale.linnet, scalardilate.linnet Geometrical transformations for linear networks o [.linnet Subset operator for linear networks o timed Records the computation time taken SIGNIFICANT USER-VISIBLE CHANGES o nncross nncross.ppp can now find the k-th nearest neighbours, for any k. o nndist, nnwhich New argument 'by' makes it possible to find nearest neighbours belonging to specified subsets in a point pattern, for example, the nearest neighbour of each type in a multitype point pattern. o [.fv Now handles the argument 'drop'. o with.fv Argument 'drop' replaced by new argument 'fun' (with different interpretation). o [.lpp Subset index may now be a window (class 'owin') o Kest Options correction='border' and correction='none' now run about 4 times faster, thanks to Julian Gilbey. o density.ppp Numerical underflow no longer occurs when sigma is very small and 'at="points"'. A warning is no longer issued. Thanks to Robin Corria Anslie. o crossing.psp New argument 'fatal' allows the user to handle empty intersections o union.owin It is now guaranteed that if A is a subset of B, then union.owin(A,B)=B. o plot.colourmap Now passes arguments to axis() to control the plot. Appearance of plot improved. o image.listof Now passes arguments to plot.colourmap() if equal.ribbon=TRUE. o kppm Accelerated (especially for large datasets). o plot.envelope plot.envelope is now equivalent to plot.fv and is essentially redundant. o rThomas, rMatClust, rNeymanScott Improved explanations in help files. o All functions Many functions have been slightly accelerated. BUG FIXES o ppm Results were incorrect for the Geyer saturation model with a non-integer value of the saturation parameter 'sat'. Spotted by Thordis Linda Thorarinsdottir. Bug introduced in spatstat 1.20-0, July 2010. Fixed. o ppm Fitting a stationary Poisson process using a nonzero value of 'rbord', as in "ppm(X, rbord=R)" with R > 0, gave incorrect results. Fixed. o predict.slrm Crashed with message 'longer object length is not a multiple of shorter object length' if the original data window was not a rectangle. Fixed. o iplot Main title was sometimes incorrect. Fixed. o plot.layered Ignored argument 'main' in some cases. Fixed. o plot.listof, image.listof Crashed sometimes with a message 'figure margins too large' when equal.ribbon=TRUE. Fixed. o print.ppx Crashed if the object contained local coordinates. Fixed. o transect.im Crashed if the transect lay partially outside the image domain. Fixed. o rthin Crashed if X was empty. Fixed. o max.im, min.im, range.im Ignored additional arguments after the first argument. Fixed. o update.lppm Updated object did not remember the name of the original dataset. Fixed. o envelope Grey shading disappeared from plots of envelope objects when the envelopes were transformed using eval.fv or eval.fasp. Fixed. CHANGES IN spatstat VERSION 1.31-1 OVERVIEW o We thank Marcelino de la Cruz, Daniel Esser, Jason Goldstick, Abdollah Jalilian, Ege Rubak and Fabrice Vinatier for contributions. o Nonparametric estimation and tests for point patterns in a linear network. o More support for 'layered' objects. o Find clumps in a point pattern. o Connected component interaction model. o Improvements to interactive plots. o Visual debugger for Metropolis-Hastings algorithm. o Bug fix in Metropolis-Hastings simulation of Geyer process. o Faster Metropolis-Hastings simulation. o Faster computation of 'envelope', 'fv' and 'fasp' objects. o Improvements and bug fixes. NEW FUNCTIONS o connected.ppp Find clumps in a point pattern. o kstest.lpp, kstest.lppm The spatial Kolmogorov-Smirnov test can now be applied to point patterns on a linear network (class 'lpp') and point processes on a linear network (class 'lppm'). o bermantest.lpp, bermantest.lppm Berman's Z1 and Z2 tests can now be applied to point patterns on a linear network (class 'lpp') and point processes on a linear network (class 'lppm'). o rhohat.lpp, rhohat.lppm Nonparametric estimation of the dependence of a point pattern on a spatial covariate: 'rhohat' now applies to objects of class 'lpp' and 'lppm'. o intensity.lpp Empirical intensity of a point pattern on a linear network. o as.function.rhohat Converts a 'rhohat' object to a function, with extrapolation beyond the endpoints. o [.layered Subset operator for layered objects. o shift, rotate, affine, rescale, reflect, flipxy, scalardilate These geometrical transformations now work for 'layered' objects. o iplot.layered Interactive plotting for 'layered' objects. o as.owin.layered Method for as.owin for layered objects. o [.owin Subset operator for windows, equivalent to intersect.owin. o rcellnumber Generates random integers for the Baddeley-Silverman counterexample. o is.lpp Tests whether an object is a point pattern on a linear network. o is.stationary.lppm, is.poisson.lppm New methods for is.stationary and is.poisson for class 'lppm' o sessionLibs Print library names and version numbers (for use in Sweave scripts) SIGNIFICANT USER-VISIBLE CHANGES o iplot iplot is now generic, with methods for 'ppp', 'layered' and 'default'. iplot methods now support zoom and pan navigation. o rmh.default New argument 'snoop' allows the user to activate a visual debugger for the Metropolis-Hastings algorithm. o connected connected() is now generic, with methods for 'im', 'owin' and 'ppp'. o alltypes Now works for lpp objects o rlabel Now works for lpp, pp3, ppx objects o plot.kstest Can now perform P-P and Q-Q plots as well. o plot.fasp New argument 'samey' controls whether all panels have the same y limits. o plot.fasp Changed default value of 'samex'. o Objects of class 'envelope', 'fv' and 'fasp' Reduced computation time and storage required for these objects. o pcfmodel.kppm Improved calculation. o plot.fv Improved collision-avoidance algorithm (for avoiding overlaps between curves and legend) o ppm Improved error handling o envelope All methods for 'envelope' now handle fun=NULL o setminus.owin Better handling of the case where both arguments are rectangles. o rmh Simulation has been further accelerated. o lppm Accelerated. o vcov.ppm Accelerated. o marktable Accelerated. o Triplets() interaction Accelerated. o alltypes Accelerated when envelope=TRUE. BUG FIXES o rmh Simulation of the Geyer saturation process was incorrect. [Bug introduced in previous version, spatstat 1.31-0.] Fixed. o rmh Simulation of the Geyer saturation process was incorrectly initialised, so that the results of a short run (i.e. small value of 'nrep') were incorrect, while long runs were correct. [Bug introduced in spatstat 1.17-0, october 2009.] Fixed. o ppm Objects fitted with use.gam=TRUE caused fatal errors in various functions including print, summary, vcov and model.frame. Spotted by Jason Goldstick. Fixed. o lpp, runiflpp, rpoislpp Empty point patterns caused an error. Fixed. o rmh.default Crashed for hybrid models, with message 'Attempt to apply non-function'. Spotted by Ege Rubak. Fixed. o relrisk Crashed when 'at="points"' for a multitype pattern with more than 2 types. Spotted by Marcelino de la Cruz. Fixed. o erosion.owin, dilation.psp, border Ignored the arguments "..." in some cases (namely when the window was polygonal and 'gpclib' was disabled). Fixed. o rsyst, rcell Did not correctly handle the argument 'dx'. Spotted by Fabrice Vinatier. Fixed. o correction="trans" Various functions such as Kest no longer recognised 'correction = "trans"'. Fixed. o istat Crashed with an error message about envelopes. Fixed. o summary.ppm, print.ppm p-values which were exactly equal to zero were reported as NA. Fixed. o [.im Crashed if the intersection consisted of a single row or column of pixels. Fixed. o plot.im Sometimes incorrectly displayed an image consisting of a single row or column of pixels. Fixed. o plot.layered The plot region was determined by the first layer, so that objects in subsequent layers could sometimes fall outside the plot region. Fixed. o transect.im If the arguments 'from' and 'to' were numeric vectors of length 2, the result was garbled. Fixed. o Inhomogeneous K functions and pair correlation functions [Kinhom, pcfinhom, Kcross.inhom, Kdot.inhom, pcfcross.inhom, etc.] These functions reported an error 'lambda is not a vector' if the intensity argument lambda was computed using density(, at="points"). Fixed. o rlabel Did not accept a point pattern with a hyperframe of marks. Fixed. o alltypes Crashed when envelope=TRUE if the summary function 'fun' did not have default values for the marks i and j. Fixed. o Kres, Gres, psst, psstA Ignored the unit of length. Fixed. CHANGES IN spatstat VERSION 1.31-0 OVERVIEW o We thank Frederic Lavancier and Ege Rubak for contributions. o Major bug fix in simulation of area-interaction process. o Metropolis-Hastings simulations accelerated. o Rounding of spatial coordinates o clmfires dataset corrected. o Bug fixes and minor improvements. NEW FUNCTIONS o round.ppp Round the spatial coordinates of a point pattern to a specified number of decimal places. o rounding Determine whether a dataset has been rounded. SIGNIFICANT USER-VISIBLE CHANGES o rmh Simulation of the following models has been accelerated: areaint, dgs, diggra, fiksel, geyer, hardcore, lennard, multihard, strauss, straush, straussm, strausshm. o rmh The transition history of the simulation (which is saved if 'track=TRUE') now also contains the value of the Hastings ratio for each proposal. o clmfires The clmfires dataset has been modified to remove errors and inconsistencies. o plot.linim Appearance of the plot has been improved, when style='width'. o summary.ppm Now reports whether the spatial coordinates have been rounded. o dclf.test, mad.test The range of distance values ('rinterval') used in the test is now printed in the test output, and is saved as an attribute. BUG FIXES o rmh Simulation of the Area-Interaction model was completely incorrect. Spotted by Frederic Lavancier. The bug was introduced in spatstat version 1.23-6 or later. Fixed. o dclf.test The test statistic was incorrectly scaled (by a few percent). This did not affect the p-value of the test. Fixed. o ppx If argument 'coord.type' was missing, various errors occurred: a crash may have occurred, or the results may have depended on the storage type of the data. Spotted by Ege Rubak. Fixed. o plot.ppx Crashed for 1-dimensional point patterns. Spotted by Ege Rubak. Fixed. CHANGES IN spatstat VERSION 1.30-0 OVERVIEW o We thank Jorge Mateu, Andrew Bevan, Olivier Flores, Marie-Colette van Lieshout, Nicolas Picard and Ege Rubak for contributions. o The spatstat manual now exceeds 1000 pages. o Hybrids of point process models. o Five new datasets o Second order composite likelihood method for kppm. o Inhomogeneous F, G and J functions. o Delaunay graph distance o Fixed serious bug in 'lppm' for marked patterns. o bug fix in some calculations for Geyer model o Improvements to linear networks code o Pixel images can now be displayed with a logarithmic colour map. o spatstat now formally 'Depends' on the R core package 'grDevices' o miscellaneous improvements and bug fixes NEW DATASETS o clmfires Forest fires in Castilla-La Mancha o gordon People sitting on the grass in Gordon Square, London o hyytiala Mixed forest in Hyytiala, Finland (marked by species) o paracou Kimboto trees in Paracou, French Guiana (marked as adult/juvenile) o waka Trees in Waka national park (marked with diameters) NEW FUNCTIONS o Hybrid The hybrid of several point process interactions [Joint research with Jorge Mateu and Andrew Bevan] o is.hybrid Recognise a hybrid interaction or hybrid point process model. o Finhom, Ginhom, Jinhom Inhomogeneous versions of the F, G and J functions [Thanks to Marie-Colette van Lieshout] o delaunay.distance Graph distance in the Delaunay triangulation. o distcdf Cumulative distribution function of the distance between two independent random points in a given window. o bw.frac Bandwidth selection based on window geometry o shortside.owin, sidelengths.owin Side lengths of (enclosing rectangle of) a window SIGNIFICANT USER-VISIBLE CHANGES o ppm Can now fit models with 'hybrid' interactions [Joint research with Jorge Mateu and Andrew Bevan] o kppm Now has the option of fitting models using Guan's (2006) second order composite likelihood. o envelope.lpp Now handles multitype point patterns. o envelope.envelope New argument 'transform' allows the user to apply a transformation to previously-computed summary functions. o runifpointOnLines, rpoisppOnLines, runiflpp, rpoislpp Can now generate multitype point patterns. o rmhmodel, rmh, simulate.ppm Now handle point process models with 'hybrid' interactions. o kppm Accelerated, and more reliable, due to better choice of starting values in the optimisation procedure. o kppm The internal format of kppm objects has changed. o minimum contrast estimation Error messages from the optimising function 'optim' are now trapped and handled. o rhohat This command is now generic, with methods for ppp, quad, and ppm. o raster.x, raster.y, raster.xy These functions have a new argument 'drop' o summary.ppm Improved behaviour when the model covariates are a data frame. o progressreport Output improved. o second order summary functions (Kest, Lest, Kinhom, pcf.ppp, Kdot, Kcross, Ldot etc etc) These functions now accept correction="translation" as an alternative to correction = "translate", for consistency. o plot.im New argument 'log' allows colour map to be equally spaced on a log scale. o as.owin.ppm, as.owin.kppm New argument 'from' allows the user to extract the spatial window of the point data (from="points") or the covariate images (from="covariates") o dclf.test, mad.test The rule for handling tied values of the test statistic has been changed. The tied values are now randomly ordered to obtain a randomised integer rank. o with.fv New argument 'enclos' allows evaluation in other environments BUG FIXES o lppm For multitype patterns, the fitted model was completely incorrect due to an error in constructing the quadrature scheme. Fixed. o Geyer For point process models with the 'Geyer' interaction, vcov.ppm() and suffstat() sometimes gave incorrect answers. [Spotted by Ege Rubak.] Fixed. o as.im.im Did not correctly handle factor-valued images if one of the arguments 'dimyx', 'eps', 'xy' was given. Fixed. o envelope.lppm Crashed if the model was multitype. Fixed. o lpp Did not handle empty patterns. Fixed. o density.ppp If 'sigma' was a bandwidth selection function such as bw.scott() which returned a numeric vector of length 2, a warning message was issued, and the smoothing bandwidth was erroneously taken to be the first element of the vector. Fixed. o Fest, Jcross, Jdot, Jmulti If these functions were computed using correction = 'rs', plotting them would sometimes give an error, with the message "no finite x/y limits". Fixed. o pcfmodel.kppm For models with clusters="VarGamma" the value of the pcf at distance r=0 was given as NaN. Fixed. o vcov.ppm Result was incorrect in rare cases, due to numerical rounding effects. Fixed. o rLGCP, simulate.kppm For models fitted to point patterns in an irregular window, simulation sometimes failed, with a message that the image 'mu' did not cover the simulation window. (Spotted by George Limitsios.) Fixed. o rLGCP, simulate.kppm Crashed sometimes with an error about unequal x and y steps (from 'GaussRF'). Fixed. CHANGES IN spatstat VERSION 1.29-0 OVERVIEW o We thank Colin Beale, Li Haitao, Frederic Lavancier, Erika Mudrak and Ege Rubak for contributions. o random sequential packing o Allard-Fraley estimator o method for pooling several quadrat tests o better control over dummy points in ppm o more support for data on a linear network o nearest neighbour map o changes to subsetting of images o improvements and bug fixes NEW FUNCTIONS o clusterset Allard-Fraley estimator of high-density features in a point pattern o pool.quadrattest Pool several quadrat tests o nnfun Nearest-neighbour map of a point pattern or a line segment pattern o as.ppm Converts various kinds of objects to ppm o crossdist.lpp Shortest-path distances between pairs of points in a linear network o nobs.lppm Method for 'nobs' for lppm objects. o as.linim Converts various kinds of objects to 'linim' o model.images.slrm Method for model.images for slrm objects o rotate.im Rotate a pixel image SIGNIFICANT USER-VISIBLE CHANGES o "[.im" and "[<-.im" New argument 'j' allows any type of matrix indexing to be used. o "[.im" Default behaviour changed in the case of a rectangular subset. New argument 'rescue' can be set to TRUE to reinstate previous behaviour. o rSSI Performs 'Random Sequential Packing' if n=Inf. o ppm New argument 'eps' determines the spacing between dummy points. (also works for related functions quadscheme, default.dummy, ...) o fitted.ppm, predict.ppm Argument 'new.coef' specifies a vector of parameter values to replace the fitted coefficients of the model. o lppm Stepwise model selection using step() now works for lppm objects. o vcov.slrm Can now calculate correlation matrix or Fisher information matrix as well as variance-covariance matrix. o eval.fv Improved behaviour when plotted. o "[.fv" Improved behaviour when plotted. o lohboot When the result is plotted, the confidence limits are now shaded. o lohboot New argument 'global' allows global (simultaneous) confidence bands instead of pointwise confidence intervals. o vcov.ppm Accelerated by 30% in some cases. o quadrat.test.splitppp The result is now a single object of class 'quadrattest' o progressreport Improved output (also affects many functions which print progress reports) o Full redwood data (redwoodfull) Plot function redwoodfull.extra$plotit has been slightly improved. o nncross This function is now generic, with methods for 'ppp' and 'default'. o distfun The internal format of objects of class 'distfun' has been changed. o duplicated.ppp, unique.ppp New argument 'rule' allows behaviour to be consistent with package 'deldir' BUG FIXES o bdist.tiles Values were incorrect in some cases due to numerical error. (Spotted by Erika Mudrak.) Fixed. o vcov.ppm, suffstat These functions sometimes gave incorrect values for marked point process models. Fixed. o simulate.ppm, predict.ppm Did not correctly handle the 'window' argument. (Spotted by Li Haitao). Fixed. o smooth.ppp, markmean If sigma was very small, strange values were produced, due to numerical underflow. (Spotted by Colin Beale). Fixed. o MultiHard, MultiStrauss, MultiStraussHard Crashed if the data point pattern was empty. (Spotted by Ege Rubak). Fixed. o vcov.ppm Crashed sporadically, with multitype interactions. (Spotted by Ege Rubak). Fixed. o rStrauss, rHardcore, rStraussHard, rDiggleGratton, rDGS If the simulated pattern was empty, these functions would either crash, or return a pattern containing 1 point. (Spotted by Frederic Lavancier). Fixed. o model.matrix.slrm Crashed if the model was fitted using split pixels. Fixed. o residuals.ppm, diagnose.ppm Did not always correctly handle models that included offset terms. Fixed. o project.ppm When a model was projected by project.ppm or by ppm(project=TRUE), the edge corrections used the projected models were sometimes different from the edge corrections in the original model, so that the projected and unprojected models were not comparable. Fixed. o plot.listof, plot.splitppp Crashed sometimes due to a scoping problem. Fixed. o dclf.test, mad.test Crashed if any of the function values were infinite or NaN. Fixed. o psstA Default plot did not show the horizontal line at y=0 corresponding to a perfect fit. Fixed. o vcov.ppm names attribute was spelt incorrectly in some cases. Fixed. CHANGES IN spatstat VERSION 1.28-2 OVERVIEW o We thank Thomas Bendtsen, Ya-Mei Chang, Daniel Esser, Robert John-Chandran, Ege Rubak and Yong Song for contributions. o New code for Partial Residual Plots and Added Variable Plots. o maximum profile pseudolikelihood computations vastly accelerated. o New dataset: cells in gastric mucosa o now possible to capture every k-th state of Metropolis-Hastings algorithm. o size of 'ppm' objects reduced. o scope of 'intensity.ppm' extended. o quadrat.test can now perform Monte Carlo tests and one/two-sided tests o improvements to 'plot.fv' o improvement to 'rescale' o some datasets reorganised. o numerous bug fixes NEW DATASET o mucosa Cells in gastric mucosa Kindly contributed by Dr Thomas Bendtsen NEW FUNCTIONS o parres Partial residual plots for spatial point process models. A diagnostic for the form of a covariate effect. o addvar Added variable plots for spatial point process models. A diagnostic for the existence of a covariate effect. SIGNIFICANT USER-VISIBLE CHANGES o profilepl Accelerated (typically by a factor of 5). o rmh, rmhcontrol It is now possible to save every k-th iteration of the Metropolis-Hastings algorithm. The arguments 'nsave' and 'nburn' may be given to rmh or to rmhcontrol. They specify that the point pattern will be saved every 'nsave' iterations, after an initial burn-in of 'nburn' iterations. o simulate.ppm New argument 'singlerun' determines whether the simulated patterns are generated using independent runs of the Metropolis-Hastings algorithm or are obtained by performing one long run of the algorithm and saving every k-th iteration. o exactMPLEstrauss New argument 'project' determines whether the parameter gamma is constrained to lie in [0,1]. o intensity.ppm Now works for stationary point process models with the interactions DiggleGratton, DiggleGatesStibbard, Fiksel, PairPiece and Softcore. o plot.fv Improved algorithm for avoiding collisions between graphics and legend. o plot.fv New argument 'log' allows plotting on logarithmic axes. o envelope Can now calculate an estimate of the true significance level of the "wrong" test (which declares the observed summary function to be significant if it lies outside the pointwise critical boundary anywhere). Controlled by new argument 'do.pwrong'. o quadrat.test New argument 'alternative' allows choice of alternative hypothesis and returns one-sided or two-sided p-values as appropriate. o quadrat.test Can now perform Monte Carlo test as well (for use in small samples where the chi^2 approximation is inaccurate) o Softcore Improved numerical stability. New argument 'sigma0' for manual control over rescaling. o rescale If scale argument 's' is missing, then the data are rescaled to native units. For example if the current unit is 0.1 metres, coordinates will be re-expressed in metres. o psst Extra argument 'verbose=TRUE' o is.subset.owin Accelerated for polygonal windows o rmh.default 'track' is no longer a formal argument of rmh.default; it is now a parameter of rmhcontrol. However there is no change in usage: the argument 'track' can still be given to rmh.default. o clf.test Has been renamed 'dclf.test' to give proper attribution to Peter Diggle. o betacells This dataset has been restructured. The vector of cell profile areas, formerly given by betacells.extra$area, has now been included as a column of marks in the point pattern 'betacells'. o ants The function ants.extra$plot() has been renamed plotit() for conformity with other datasets. o redwoodfull The function redwoodfull.extra$plot() has been renamed plotit() for conformity with other datasets. o nbfires For conformity with other datasets, there is now an object nbfires.extra BUG FIXES o ripras Expansion factor was incorrect in the rectangular case. Fixed. o Triplets Crashed sometimes with error "dim(X) must have positive length". Fixed. o affine.im Crashed in the case of a diagonal transformation matrix! Spotted by Ege Rubak. Fixed. o envelope.envelope Ignored the argument 'global'. Fixed. o MultiStraussHard The printed output showed the hardcore radii as NULL. Spotted by Ege Rubak. Fixed. o "[.psp" Crashed if the data were generated by rpoisline(). Spotted by Marcelino de la Cruz. Fixed. o plot.linim If style="colour", the main title was always "x". Fixed. o plot.ppx Setting add=TRUE did not prevent the domain being plotted. Fixed. o rmh Crashed if x.start was an empty point pattern. Spotted by Ege Rubak. Fixed. o as.ppp.data.frame Crashed if any points lay outside the window. Spotted by Ege Rubak. Fixed. o Ripley isotropic edge correction Divide-by-zero error in rare cases. Spotted by Daniel Esser. Fixed. o summary functions For many of the summary functions (e.g. Kest, pcf), the result of saving the object to disc was an enormous file. Spotted by Robert John-Chandran. Fixed. o pcf.fv Default plot was wrongly coloured. Fixed. CHANGES IN spatstat VERSION 1.28-1 OVERVIEW o We thank Ege Rubak, Gopal Nair, Jens Oehlschlaegel and Mike Zamboni for contributions. o New approximation to the intensity of a fitted Gibbs model. o Minor improvements and bug fixes o spatstat now 'Suggests' the package 'gsl' NEW FUNCTIONS o intensity, intensity.ppp, intensity.ppm Calculate the intensity of a dataset or fitted model. Includes new approximation to the intensity of a fitted Gibbs model o LambertW Lambert's W-function SIGNIFICANT USER-VISIBLE CHANGES o envelope Improved plot labels for envelopes that were generated using the 'transform' argument. o plot.fv Improved algorithm for collision detection. o plot.im Now returns the colour map used. o plot.listof, plot.splitppp Slight change to handling of plot.begin and plot.end o square Now accepts vectors of length 2 o plot.fii Increased resolution of the plot obtained from plot(fitin(ppm(...))) o image.listof If equal.ribbon=TRUE, the colour ribbon will no longer be displayed repeatedly for each panel, but will now be plotted only once, at the right hand side of the plot array. BUG FIXES o vcov.ppm Results were sometimes incorrect for a Gibbs model with non-trivial trend. Spotted by Ege Rubak. Fixed. o nncross In rare cases the results could be slightly incorrect. Spotted by Jens Oehlschlaegel. Fixed. o plot.fv When add=TRUE, the x limits were sometimes truncated. Spotted by Mike Zamboni. Fixed. o plot.im Labels for the tick marks on the colour ribbon were sometimes ridiculous, e.g. "2.00000001". Fixed. CHANGES IN spatstat VERSION 1.28-0 OVERVIEW o We thank Farzaneh Safavimanesh, Andrew Hardegen and Tom Lawrence for contributions. o Improvements to 3D summary functions. o A multidimensional point pattern (ppx) can now have 'local' coordinates as well as spatial and temporal coordinates and marks. o Changed format for point patterns on a linear network (lpp). Changes are backward compatible. Many computations run faster. o More support for fitted cluster models (kppm). o split method for multidimensional point patterns (ppx) and point patterns on a linear network (lpp). o Fixed bug causing errors in plot.im o Miscellaneous improvements and bug fixes NEW FUNCTIONS o exactMPLEstrauss Fits the stationary Strauss point process model using an exact maximum pseudolikelihood technique. This is mainly intended for technical investigation of algorithms. o split.ppx Method for 'split' for multidimensional point patterns (class 'ppx'). This also works for point patterns on a linear network (class 'lpp'). o model.images This function is now generic, with methods for classes ppm, kppm, lppm o model.frame, model.matrix These generic functions now have methods for classes kppm, lppm o as.owin.kppm, as.owin.lppm New methods for 'as.owin' for objects of class kppm, lppm o as.linnet.lppm Extracts the linear network in which a point process model was fitted. SIGNIFICANT USER-VISIBLE CHANGES o class 'ppx' An object of class 'ppx' may now include 'local' coordinates as well as 'spatial' and 'temporal' coordinates, and marks. o ppx Arguments have changed. o class 'lpp' The internal format of lpp objects has been extended (but is backward-compatible). Many computations run faster. To convert an object to the new format: X <- lpp(as.ppp(X), as.linnet(X)). o F3est Calculation of theoretical Poisson curve ('theo') has changed, and is now controlled by the argument 'sphere'. o rmh, rmhstart The initial state ('start') can now be missing or null. o im, as.im The pixel coordinates in an image object are now generated more accurately. This avoids a numerical error in plot.im. o eval.fv, eval.fasp Evaluation is now applied only to columns that contain values of the function itself (rather than values of the derivative, hazard rate, etc). This is controlled by the new argument 'dotonly'. o spatstat.options New option 'nvoxel' o quad.ppm Now accepts kppm objects. o str This generic function (for inspecting the internal structure of an object) now produces sensible output for objects of class 'hyperframe', 'ppx', 'lpp' o ppx, coords.ppx, coords<-.ppx The arguments to these functions have changed. o lgcp.estK, Kmodel Computation can be greatly accelerated by setting spatstat.options(fastK.lgcp=TRUE). o G3est Computation accelerated. o envelope Computation slightly accelerated. o spatstat.options New option 'fastK.lgcp' BUG FIXES o nndist.psp Caused an error if length(k) > 1. Fixed. o plot.im Sometimes reported an error "useRaster=TRUE can only be used with a regular grid." This was due to numerical rounding effects on the coordinates of a pixel image. Fixed. o plot.fv If a formula was used to specify the plot, the names of variables in the formula were sometimes incorrectly matched to *functions*. Spotted by Farzaneh Safavimanesh. Fixed. o F3est Took a very long time if the containing box was very flat, due to the default value of 'vside'. Fixed. o rmh, rmhmodel An erroneous warning about 'outdated format of rmhmodel object' sometimes occurred. Fixed. o marks<-.ppx Names of result were incorrect. Fixed. o hyperframe class Various minor bug fixes. CHANGES IN spatstat VERSION 1.27-0 OVERVIEW o Variance estimates are now available for all Gibbs point process models. o Cressie-Loosmore-Ford test implemented o plot.fv now avoids collisions between the legend and the graphics. o Extension to predict.ppm o Improvements to envelopes and multitype summary functions. o Line transects of a pixel image. o Changes to defaults in Metropolis-Hastings simulations. o More geometrical operations o Bug fixes. o We thank Aruna Jammalamadaka for contributions. NEW FUNCTIONS o clf.test Perform the Cressie (1991)/ Loosmore and Ford (2006) test of CSR (or another model) o mad.test Perform the Maximum Absolute Deviation test of CSR (or another model). o convolve.im Compute convolution of pixel images. o Kmulti.inhom Counterpart of 'Kmulti' for spatially-varying intensity. o rmhexpand Specify a simulation window, or a rule for expanding the simulation window, in Metropolis-Hastings simulation (rmh) o transect.im Extract pixel values along a line transect. o affine.im Apply an affine transformation to a pixel image. o scalardilate Perform scalar dilation of a geometrical object relative to a specified origin. o reflect Reflect a geometrical object through the origin. o "[.lpp", "[.ppx" Subset operators for the classes "lpp" (point pattern on linear network) and "ppx" (multidimensional space-time point pattern). o is.rectangle, is.polygonal, is.mask Determine whether a window w is a rectangle, a domain with polygonal boundaries, or a binary pixel mask. o has.offset Determines whether a fitted model object (of any kind) has an offset. SIGNIFICANT USER-VISIBLE CHANGES o predict.ppm This function can now calculate the conditional intensity of a model relative to any point pattern X (not just the original data pattern). o vcov.ppm This function now handles all Gibbs point process models. o plot.fv Collisions between the legend box and the graphics are now detected and avoided. o rmh.ppm, rmh.default, simulate.ppm, qqplot.ppm, envelope.ppm These functions now have slightly different default behaviour because of changes to the handling of arguments to 'rmhcontrol'. o rmhcontrol The default value of the parameters 'periodic' and 'expand' has changed. o rmhcontrol The parameter 'expand' can now be in any format acceptable to rmhexpand(). o rmh.ppm, rmh.default, simulate.ppm Any 'rmhcontrol' parameter can now be given directly as an argument to rmh.ppm, rmh.default or simulate.ppm. o Kmulti, Gmulti, Jmulti The arguments I, J can now be any kind of subset index or can be functions that yield a subset index. o envelope.envelope In envelope(E, fun=NULL) if E does not contain simulated summary functions, but does contain simulated point patterns, then 'fun' now defaults to Kest, instead of flagging an error. o print.ppp, summary.ppp If the point pattern x was generated by Metropolis-Hastings simulation using 'rmh', then print(x) and summary(x) show information about the simulation parameters. o print.ppm Standard errors for the parameter estimates, and confidence intervals for the parameters, can now be printed for all Gibbs models (but are printed only for Poisson models by default). o eval.im Images with incompatible dimensions are now resampled to make them compatible (if harmonize=TRUE). o spatstat.options New option 'print.ppm.SE' controls whether standard errors and confidence intervals are printed for all Gibbs models, for Poisson models only, or are never printed. o inside.owin Now accepts the form list(x,y) for the first argument. o image.listof New argument 'equal.ribbon' allows several images to be plotted with the same colour map. o is.subset.owin Improved accuracy in marginal cases. o expand.owin Functionality extended to handle all types of expansion rule. o default.rmhcontrol, default.expand These functions now work with models of class 'rmhmodel' as well as 'ppm' o print.rmhcontrol Output improved. BUG FIXES o linearK, linearKinhom If any data points were located exactly at a vertex of the linear network, the weights for Ang's correction were incorrect, due to numerical error. This sometimes produced infinite or NA values of the linear K function. Fixed. o predict.ppm In some cases, predict.ppm(type="cif") generated a spurious warning that "number of rows of result is not a multiple of vector length." Fixed. o crossing.psp Results were sometimes incorrect due to numerical rounding error associated with GCC bug #323. Fixed. o MultiHard, MultiStrauss, MultiStraussHard If the mark values contained non-alphanumeric characters, the names of the interaction coefficients in coef(ppm(...)) were sometimes garbled. Fixed. o profilepl For edge corrections other than the border correction, an error message about 'rbord' would sometimes occur. Fixed. o is.marked, is.multitype These functions gave the wrong answer for 'lpp' objects. Fixed. o marks<-.lpp, marks<-.ppx Format of result was garbled if new columns of marks were added. Fixed. o reach.rmhmodel Gave the wrong answer for Geyer and BadGey models. Fixed. o envelope.envelope Ignored the argument 'savefuns'. Fixed. o BadGey Sometimes wrongly asserted that the parameter 'sat' was invalid. Occurred only in ppm(project=TRUE). Fixed. CHANGES IN spatstat VERSION 1.26-1 OVERVIEW o Variance-covariance matrix for Gibbs point process models. o Bootstrap confidence bands for pair correlation function and K function. o Bug fix in scan test. o Area-interaction model accelerated. o we thank Jean-Francois Coeurjolly and Ege Rubak for contributions. NEW FUNCTIONS o lohboot Computes bootstrap confidence bands for pair correlation function and K function using Loh's (2008) mark bootstrap. SIGNIFICANT USER-VISIBLE CHANGES o vcov.ppm Now works for all Gibbs point process models, thanks to new code (and theory) from Jean-Francois Coeurjolly and Ege Rubak o AreaInter Computations related to the area-interaction point process (ppm, predict.ppm, residuals.ppm, diagnose.ppm, qqplot.ppm) have been accelerated. BUG FIXES o scan.test Results were sometimes incorrect due to numerical instability (a 'Gibbs phenomenon'). Fixed. CHANGES IN spatstat VERSION 1.26-0 OVERVIEW o We thank Jens Oehlschlaegel for contributions. o Further substantial acceleration of spatstat functions. o Workaround for bug in RandomFields package. o Numerous modifications to internal code. NEW FUNCTIONS o RandomFieldsSafe There is a bug in the package 'RandomFields' (version <= 2.0.54) which causes a crash to occur, in the development version of R but not in R 2.15.0. To avoid crashing spatstat, we have written the temporary, undocumented function RandomFieldsSafe() which returns TRUE if it is safe to use the RandomFields package. Examples in the spatstat help files for kppm, lgcp.estK, lgcp.estpcf and rLGCP are only executed if RandomFieldsSafe() returns TRUE. SIGNIFICANT USER-VISIBLE CHANGES o Many functions Many spatstat functions now run faster, and will handle larger datasets, thanks to improvements in the internal code, following suggestions from Jens Oehlschlaegel. o Many functions The response to an 'Interrupt' signal is slightly slower. CHANGES IN spatstat VERSION 1.25-5 OVERVIEW o We thank Ya-Mei Chang, Jens Oehlschlaegel and Yong Song for contributions. o Extended functionality of 'rhohat' to local likelihood smoothing and bivariate smoothing. o Nearest neighbour distance computations accelerated. o spatstat now 'Suggests:' the package 'locfit' NEW FUNCTIONS o rho2hat Bivariate extension of 'rhohat' for estimating spatial residual risk, or intensity as a function of two covariates. SIGNIFICANT USER-VISIBLE CHANGES o rhohat Estimation can now be performed using local likelihood fitting with the 'locfit' package, or using kernel smoothing. o nncross Substantially accelerated. New arguments added to control the return value and the sorting of data. BUG FIXES o plot.msr Crashed if the argument 'box' was given. Fixed. CHANGES IN spatstat VERSION 1.25-4 OVERVIEW o We thank Jonathan Lee and Sergiy Protsiv for contributions. o Improvements and bug fixes to K function for very large datasets NEW FUNCTIONS o rStraussHard Perfect simulation for Strauss-hardcore process (with gamma <= 1) SIGNIFICANT USER-VISIBLE CHANGES o plot.im The colour ribbon can now be placed left, right, top or bottom using new argument 'ribside' o profilepl Does not generate warnings when some of the candidate models have zero likelihood - for example when fitting model with a hard core. o Kest Now includes fast algorithm for 'correction="none"' which will handle patterns containing millions of points. BUG FIXES o Kest, Lest Gave incorrect values in very large datasets, due to numerical overflow. `Very large' typically means about 1 million points in a random pattern, or 100,000 points in a tightly clustered pattern. [Overflow cannot occur unless there are at least 46,341 points.] [Spotted by Sergiy Protsiv.] Fixed. o Kest, Lest Ignored 'ratio=TRUE' if the argument 'domain' was given. [Spotted by Jonathan Lee.] Fixed. o rjitter Output was sometimes incorrect. [Spotted by Sergiy Protsiv.] Fixed. CHANGES IN spatstat VERSION 1.25-3 OVERVIEW o We thank Daniel Esser for contributions. o Improved support for fitted point process models. o Bug fixes. NEW FUNCTIONS o simulate.slrm Method for 'simulate' for spatial logistic regression models. o labels.ppm, labels.kppm, labels.slrm Methods for 'labels' for fitted point process models. o commonGrid Determine a common spatial domain and pixel resolution for several pixel images and/or binary masks SIGNIFICANT USER-VISIBLE CHANGES o effectfun Now has argument 'se.fit' allowing calculation of standard errors and confidence intervals. o [.msr Now handles character-valued indices. o print.summary.ppm Output gives a more precise description of the fitting method. o ppm, kppm, slrm Confidence intervals for the fitted trend parameters can now be obtained using 'confint' o predict.slrm New argument 'window' o union.owin Now handles a single argument: union.owin(A) returns A. BUG FIXES o selfcrossing.psp y coordinate values were incorrect. [Spotted by Daniel Esser.] Fixed. o as.im.owin Did not handle a binary mask with a 1 x 1 pixel array. Fixed. o predict.slrm Results of predict(object, newdata) were incorrect if the spatial domain of 'newdata' was larger than the original domain. Fixed. o ppm If the model was the uniform Poisson process, the argument 'rbord' was ignored. Fixed. o image subset assignment "[<-.im" Generated an error if the indexing argument 'i' was a point pattern containing zero points. Fixed. o hyperframe subset assignment "[<-.hyperframe" Did not correctly handle the case where a single column of the hyperframe was to be changed. Fixed. o help(bw.relrisk), help(rmh.ppm), help(plot.plotppm) These help files had the side-effect of changing some options in spatstat.options. Fixed. CHANGES IN spatstat VERSION 1.25-2 OVERVIEW o We thank Abdollah Jalilian and Thierry Onkelinx for contributions. o Very Important Bug fixes. o Improved mechanism for handling 'invalid' point processes NEW FUNCTIONS o as.matrix.owin Converts a window to a logical matrix. SIGNIFICANT USER-VISIBLE CHANGES o project.ppm Improved algorithm. Now handles terms in the trend formula as well as the interaction. The projected point process is now obtained by re-fitting the model, and is guaranteed to be the maximum pseudolikelihood fit. o plot.im Now handles many arguments recognised by plot.default such as 'cex.main'. Also handles argument 'box'. New argument 'ribargs' contains parameters controlling the ribbon plot only. o spatstat.options New option 'project.fast' allows a faster shortcut for project.ppm o spatstat.options New options 'rmh.p', 'rmh.q', 'rmh.nrep' determine the default values of the parameters p, q and nrep of the Metropolis-Hastings algorithm. See rmhcontrol o ppm Slightly accelerated. BUG FIXES o nncross, distfun, AreaInter Results of nncross were possibly incorrect when X and Y did not have the same window. This bug affected values of 'distfun' and may also have affected ppm objects with interaction 'AreaInter'. [Spotted by Thierry Onkelinx] Bug introduced in spatstat 1.9-4 (June 2006). Fixed. o rCauchy Simulations were incorrect in the sense that the value of 'omega' was inadvertently doubled (i.e. omega was incorrectly replaced by 2 * omega). Bug introduced in spatstat 1.25-0. Fixed. o plot.im White lines were present in the image display, on some graphics devices, due to changes in R 2.14. Fixed. o update.ppm The result of 'update(object, formula)' sometimes contained errors in the internal format. Bug introduced in spatstat 1.25-0. Fixed. o example(AreaInter), example(bw.smoothppp), example(Kest.fft), example(plot.owin), example(predict.ppm), example(simulate.ppm) Executing these examples had the side-effect of changing some of the parameters in spatstat.options. Fixed. CHANGES IN spatstat VERSION 1.25-1 OVERVIEW o We thank Neba Funwi-Gabga and Jorge Mateu for contributions. o New dataset of gorilla nest sites o New functions for perfect simulation o Bug fix for rare crashes in rStrauss o Code for ensuring a fitted point process model is a valid point process NEW DATASET o gorillas Gorilla nest sites in a National Park in Cameroon. Generously contributed by Neba Funwi-Gabga NEW FUNCTIONS o rDiggleGratton, rDGS, rHardcore Perfect simulation for the Diggle-Gratton process, Diggle-Gates-Stibbard process, and Hardcore process. o bw.scott Scott's rule of thumb for bandwidth selection in multidimensional smoothing o valid.ppm Checks whether a fitted point process model is a valid point process o project.ppm Forces a fitted point process model to be a valid point process SIGNIFICANT USER-VISIBLE CHANGES o ppm New argument 'project' determines whether the fitted model is forced to be a valid point process o linnet Substantially accelerated. o rStrauss Slightly accelerated. o summary.lpp Now prints the units of length. BUG FIXES o rStrauss Crashed rarely (once every 10 000 realisations) with a memory segmentation fault. Fixed. CHANGES IN spatstat VERSION 1.25-0 OVERVIEW o Leverage and influence for point process models o New cluster models (support for model-fitting and simulation). o Fit irregular parameters in trend of point process model o Third order summary statistic. o Improvements to speed and robustness of code. o spatstat now depends on R 2.14 o We thank Abdollah Jalilian and Rasmus Waagepetersen for contributions. NEW FUNCTIONS o leverage.ppm, influence.ppm, dfbetas.ppm Leverage and influence for point process models o ippm Experimental extension to 'ppm' which fits irregular parameters in trend by Fisher scoring algorithm. o Tstat Third order summary statistic for point patterns based on counting triangles. o rCauchy, rVarGamma simulation of a Neyman-Scott process with Cauchy clusters or Variance Gamma (Bessel) clusters. Contributed by Abdollah Jalilian. o rPoissonCluster simulation of a general Poisson cluster process o model.covariates Identify the covariates involved in a model (lm, glm, ppm etc) o as.im.distfun Converts a 'distfun' to a pixel image. o cauchy.estK, cauchy.estpcf, vargamma.estK, vargamma.estpcf Low-level model-fitting functions for the Neyman-Scott process with Cauchy or Variance-Gamma cluster kernel. Contributed by Abdollah Jalilian. SIGNIFICANT USER-VISIBLE CHANGES o kppm Now accepts clusters="Cauchy" or clusters="VarGamma" for the Neyman-Scott process with Cauchy or Variance-Gamma cluster kernel. Code contributed by Abdollah Jalilian. o rNeymanScott Argument 'rcluster' may now take a different format. o psst Argument 'funcorrection' changed to 'funargs' allowing greater flexibility. o plot.fv, plot.envelope New argument 'limitsonly' allows calculation of a common x,y scale for several plots. o overall speed spatstat is now byte-compiled and runs slightly faster. o user interrupt Long calculations in spatstat now respond to the Interrupt/Stop signal. o update.ppm Now runs faster and uses much less memory, when the update only affects the model formula (trend formula). o rNeymanScott, rThomas, rMatClust Accelerated thanks to Rasmus Waagepetersen. o multitype data and models Second order multitype statistics (such as Kcross, pcfcross) and multitype interaction models (such as MultiStrauss) now run faster, by a further 5%. BUG FIXES o distfun Some manipulations involving 'distfun' objects failed if the original data X in distfun(X) did not have a rectangular window. Fixed. CHANGES IN spatstat VERSION 1.24-2 OVERVIEW o Geyer's triplet interaction o more functionality for replicated point patterns o changed default for simulation window in point process simulation o changed default for edge correction in Kcom, Gcom o data in spatstat is now lazy-loaded o bug fixes NEW FUNCTIONS o Triplets Geyer's triplet interaction, for point process models o coef.summary.ppm New method coef.summary.ppm You can now type 'coef(summary(fit))' to extract a table of the fitted coefficients of the point process model 'fit' SIGNIFICANT USER-VISIBLE CHANGES o data in spatstat are now lazy-loaded so you don't have to type data(amacrine), etc. o rmh.default, rmh.ppm, simulate.ppm These now handle the 'triplets' interaction o fryplot Now has arguments 'to' and 'from', allowing selection of a subset of points. o fryplot, frypoints These functions now handle marked point patterns properly. o Kcross, Kdot, Kmulti New argument 'ratio' determines whether the numerator and denominator of the estimate of the multitype K-function will be stored. This enables analysis of replicated point patterns, using 'pool.rat()' to pool the K function estimates. o rmh.ppm, simulate.ppm, default.expand For point process models which have a trend depending only on x and y, the simulation window is now taken to be the same as the original window containing the data (by default). That is, `expansion' does not take place, by default. (In previous versions of spatstat the simulation window was larger than the original data window.) o rmh.ppm, simulate.ppm The argument sequence for these functions has changed. New argument 'expand' allows more explicit control over simulation domain. o Kcom, Gcom New argument 'conditional' gives more explicit control over choice of edge correction in compensator. Simplified defaults for edge correction. o Kinhom Improved plot labels. o profilepl Printed output improved. BUG FIXES o Lest The variance approximations (Lotwick-Silverman and Ripley) obtained with var.approx=TRUE, were incorrect for Lest (although they were correct for Kest) due to a coding error. Fixed. o simulate.ppm Ignored the argument 'control' in some cases. Fixed. o pcf and its relatives (pcfinhom, pcfcross.inhom, pcfdot.inhom) Sometimes gave a warning about 'extra arguments ignored'. Fixed. CHANGES IN spatstat VERSION 1.24-1 OVERVIEW o Spatial Scan Test o Functionality for replicated point patterns o Bug fixes NEW FUNCTIONS o scan.test Spatial scan test of clustering o rat New class of 'ratio objects' o pool.rat New method for 'pool'. Combines K function estimates for replicated point patterns (etc) by computing ratio-of-sums o unnormdensity Weighted kernel density with weights that do not sum to 1 and may be negative. o compatible New generic function with methods for 'fv', 'im', 'fasp' and 'units' SIGNIFICANT USER-VISIBLE CHANGES o Kest New argument 'ratio' determines whether the numerator and denominator of the estimate of the K-function will be stored. This enables analysis of replicated point patterns, using 'pool.rat()' to pool the K function estimates. o Lest Now handles theoretical variance estimates (using delta method) if var.approx=TRUE o as.mask Argument 'eps' can now be a 2-vector, specifying x and y resolutions. o default.expand Behaviour changed slightly. o plot.listof, plot.splitppp, contour.listof, image.listof The arguments 'panel.begin' and 'panel.end' can now be objects such as windows. BUG FIXES o rgbim, hsvim Did not work on images with non-rectangular domains. Fixed. o scaletointerval Did not handle NA's. Fixed. CHANGES IN spatstat VERSION 1.24-0 OVERVIEW o This version was not released publicly. CHANGES IN spatstat VERSION 1.23-6 OVERVIEW o Spatial covariance functions of windows and pixel images. o Area-interaction models can now be fitted in non-rectangular windows o Bug fix for envelope of inhomogeneous Poisson process o Bug fix for raster conversion o New vignette on 'Getting Started with Spatstat' o Code accelerated. NEW FUNCTIONS o imcov Spatial covariance function of pixel image or spatial cross-covariance function of two pixel images o harmonise.im Make several pixel images compatible by converting them to the same pixel grid o contour.listof, image.listof Methods for contour() and image() for lists of objects o dummify Convert data to numeric values by constructing dummy variables. SIGNIFICANT USER-VISIBLE CHANGES o setcov Can now compute the `cross-covariance' between two regions o AreaInter Point process models with the AreaInter() interaction can now be fitted to point pattern data X in any window. o areaGain, areaLoss These now handle arbitrary windows W. They are now more accurate when r is very small. o Kcom Computation vastly accelerated, for non-rectangular windows. o vignettes New vignette 'Getting Started with the Spatstat Package' o nncorr, nnmean, nnvario These functions now handle data frames of marks. BUG FIXES o envelope.ppm If the model was an inhomogeneous Poisson process, the resulting envelope object was incorrect (the simulations were correct, but the envelopes were calculated assuming the model was CSR). Bug was introduced in spatstat 1.23-5. Fixed. o envelope.ppm If the model was an inhomogeneous Poisson process with intensity a function of x and y only, overflow errors sometimes occurred ('insufficient storage' or 'attempting to generate a large number of random points'). Fixed. o as.im.im The result of as.im(X, W) was incorrect if 'W' did not cover 'X'. Fixed. o as.mask The result of as.mask(w, xy) was incorrect if 'xy' did not cover 'w'. Fixed. o plot.fv Legend was incorrectly labelled if 'shade' variables were not included in the plot formula. Fixed. o areaGain, areaLoss Crashed if the radius r was close to zero. Fixed. CHANGES IN spatstat VERSION 1.23-5 OVERVIEW o Bug fix to bandwidth selection. o Functions to pool data from several objects of the same class. o Improvements and bug fixes. o We thank Michael Sumner for contributions. NEW FUNCTIONS o pool Pool data from several objects of the same class o pool.envelope Pool simulated data from several envelope objects and create a new envelope o pool.fasp Pool simulated data from several function arrays and create a new array o envelope.envelope Recalculate an envelope from simulated data using different parameters SIGNIFICANT USER-VISIBLE CHANGES o bw.diggle, bw.relrisk, bw.smoothppp, bw.optim Plot method modified. o model.depends Now also recognises 'offset' terms. BUG FIXES o bw.diggle Bandwidth was too large by a factor of 2. Fixed. o plot.psp Crashed if any marks were NA. Fixed. o pointsOnLines Crashed if any segments had zero length. Ignored argument 'np' in some cases. Fixed. o stieltjes Crashed if M had only a single column of function values. Fixed. CHANGES IN spatstat VERSION 1.23-4 OVERVIEW o Bandwidth selection for density.ppp and smooth.ppp o Layered plots. o Model-handling facilities. o Improvements and bug fixes. NEW FUNCTIONS o bw.diggle Bandwidth selection for density.ppp by mean square error cross-validation. o bw.smoothppp Bandwidth selection for smooth.ppp by least-squares cross-validation. o layered, plot.layered A simple mechanism for controlling plots that consist of several successive layers of data. o model.depends Given a fitted model (of any kind), identify which of the covariates is involved in each term of the model. o model.is.additive Determine whether a fitted model (of any kind) is additive, in the sense that each term in the model involves at most one covariate. SIGNIFICANT USER-VISIBLE CHANGES o smooth.ppp Bandwidth 'sigma' is now selected by least-squares cross-validation o bw.relrisk Computation in large datasets accelerated. New arguments 'hmin', 'hmax' control the range of trial values of bandwidth. o Hest, Gfox, Jfox Improved algebraic labels for plot o spatstat.options New parameter 'n.bandwidth' o density.ppp, smooth.ppp Slightly accelerated. o point-in-polygon test Accelerated. BUG FIXES o with.fv Mathematical labels were incorrect in some cases. Fixed. o bw.relrisk Implementation of method="weightedleastsquares" was incorrect and was equivalent to method="leastsquares". Fixed. o smooth.ppp NaN values occurred if the bandwidth was very small. Fixed. CHANGES IN spatstat VERSION 1.23-3 OVERVIEW o Urgent bug fix. BUG FIXES o crossing.psp Crashed occasionally with a message about NA or NaN values. Fixed. o affine.ppp Crashed if the point pattern was empty. Fixed. CHANGES IN spatstat VERSION 1.23-2 OVERVIEW o Bug fixes. o Several functions have been accelerated. o We thank Marcelino de la Cruz and Ben Madin for contributions. NEW FUNCTIONS o sumouter, quadform Evaluate certain quadratic forms. o flipxy Exchange x and y coordinates. SIGNIFICANT USER-VISIBLE CHANGES o vcov.ppm Accelerated. o owin, as.owin Checking the validity of polygons has been accelerated. o crossing.psp, selfcrossing.psp Accelerated. BUG FIXES o split.ppp If drop=TRUE then some of the point patterns had the wrong windows. Spotted by Marcelino de la Cruz. Fixed. o split.ppp Crashed if the tessellation did not cover the point pattern. Fixed. o predict.ppm Crashed when type="se" if NA's were present. Spotted by Ben Madin. Fixed. o plot.ppp Incorrectly handled the case where both 'col' and 'cols' were present. Fixed. o polygon geometry The point-in-polygon test gave the wrong answer in some boundary cases. Fixed. CHANGES IN spatstat VERSION 1.23-1 OVERVIEW o Important bug fix to 'localpcf'. o Inverse-distance weighted smoothing. o Inhomogeneous versions of neighbourhood density functions. o Internal repairs and bug fixes. o We thank Mike Kuhn and Ben Madin for contributions. NEW FUNCTIONS o idw Inverse-distance weighted smoothing. o localKinhom, localLinhom, localpcfinhom Inhomogeneous versions of localK, localL, localpcf BUG FIXES o localpcf The columns of the result were in the wrong order. [i.e. pair correlation functions were associated with the wrong points.] Fixed. o delaunay If the union of several Delaunay triangles formed a triangle, this was erroneously included in the result of delaunay(). Fixed. o predict.ppm, plot.ppm Sometimes crashed with a warning about 'subscript out of bounds'. Fixed. o point-in-polygon test Vertices of a polygon were sometimes incorrectly classified as lying outside the polygon. Fixed. o Internal code Numerous tweaks and repairs to satisfy the package checker for the future R version 2.14. CHANGES IN spatstat VERSION 1.23-0 OVERVIEW o point patterns on a linear network: new tools including geometrically-corrected linear K function, pair correlation function, point process models, envelopes o changes to renormalisation of estimates in Kinhom and pcfinhom o new dataset: Chicago street crime o spatstat now 'Suggests:' the package RandomFields o spatstat now has a Namespace o we thank Mike Kuhn, Monia Mahling, Brian Ripley for contributions. NEW DATASET o chicago Street crimes in the University district of Chicago. A point pattern on a linear network. NEW FUNCTIONS o envelope.lpp Simulation envelopes for point patterns on a linear network o lineardisc Compute the 'disc' of radius r in a linear network o linearpcf Pair correlation for point pattern on a linear network o linearKinhom, linearpcfinhom Inhomogeneous versions of the K function and pair correlation function for point patterns on a linear network o lppm Fit point process models on a linear network. o anova.lppm Analysis of deviance for point process models on a linear network. o predict.lppm Prediction for point process models on a linear network. o envelope.lppm Simulation envelopes for point process models on a linear network. o linim Pixel image on a linear network o plot.linim Plot a pixel image on a linear network SIGNIFICANT USER-VISIBLE CHANGES o linearK New argument 'correction'. Geometrically-corrected estimation is performed by default (based on forthcoming paper by Ang, Baddeley and Nair) o Kinhom New argument 'normpower' allows different types of renormalisation. o pcfinhom Now performs renormalisation of estimate. Default behaviour changed - estimates are now renormalised by default. BUG FIXES o density.ppp Crashed if argument 'varcov' was given. Fixed. CHANGES IN spatstat VERSION 1.22-4 OVERVIEW o new diagnostics based on score residuals o new dataset o improvements to plotting summary functions o We thank Ege Rubak, Jesper Moller, George Leser, Robert Lamb and Ulf Mehlig for contributions. NEW FUNCTIONS o Gcom, Gres, Kcom, Kres New diagnostics for fitted Gibbs or Poisson point process models based on score residuals. Gcom is the compensator of the G function Gres is the residual of the G function Kcom is the compensator of the K function Kres is the residual of the K function o psst, psstA, psstG New diagnostics for fitted Gibbs or Poisson point process models based on pseudoscore residuals. psst is the pseudoscore diagnostic for a general alternative psstA is the pseudoscore diagnostic for an Area-interaction alternative psstG is the pseudoscore diagnostic for a Geyer saturation alternative o compareFit Computes and compares several point process models fitted to the same dataset, using a chosen diagnostic. o as.interact Extracts the interpoint interaction structure (without parameters) from a fitted point process model or similar object. NEW DATASET o flu Spatial point patterns giving the locations of influenza virus proteins on cell membranes. Kindly released by Dr George Leser and Dr Robert Lamb. SIGNIFICANT USER-VISIBLE CHANGES o pixel images and grids The default size of a pixel grid, given by spatstat.options("npixel"), has been changed from 100 to 128. A power of 2 gives faster and more accurate results in many cases. o residuals.ppm New arguments 'coefs' and 'quad' for advanced use (make it possible to compute residuals from a modified version of the fitted model.) o relrisk New argument 'casecontrol' determines whether a bivariate point pattern should be treated as case-control data. o plot.fv Further improvements in mathematical labels. o plot.fv The formula can now include the symbols .x and .y as abbreviation for the function argument and the recommended function value, respectively. o plot.fv New argument 'add' BUG FIXES o multitype summary functions (Kcross, Kdot, Gcross, Gdot, .....) Plotting these functions generated an error if the name of one of the types of points contained spaces, e.g. "Escherichia coli". Fixed. CHANGES IN spatstat VERSION 1.22-3 OVERVIEW o Important bug fix to simulation code o Miscellaneous improvements o spatstat now depends on R 2.13.0 or later o We thank Ege Rubak, Kaspar Stucki, Vadim Shcherbakov, Jesper Moller and Ben Taylor for contributions. NEW FUNCTIONS o is.stationary, is.poisson New generic functions for testing whether a point process model is stationary and/or Poisson. Methods for ppm, kppm, slrm etc o raster.xy raster coordinates of a pixel mask o zapsmall.im 'zapsmall' for pixel images SIGNIFICANT USER-VISIBLE CHANGES o density.ppp New argument 'diggle' allows choice of edge correction o rotate.owin, affine.owin These functions now handle binary pixel masks. New argument 'rescue' determines whether rectangles will be preserved BUG FIXES o rmh, simulate.ppm Serious bug - simulation was completely incorrect in the case of a multitype point process with an interaction that does not depend on the marks, such as ppm(betacells, ~marks, Strauss(60)) The calling parameters were garbled. Fixed. o effectfun Crashed if the covariate was a function(x,y). Fixed. o lurking Gave erroneous error messages about 'damaged' models. Fixed. o envelope.ppm Did not recognise when the fitted model was equivalent to CSR. Fixed. o plot.ppx Crashed in some cases. Fixed. CHANGES IN spatstat VERSION 1.22-2 OVERVIEW o Fitting and simulation of log-Gaussian Cox processes with any covariance function o More support for 'kppm' and 'rhohat' objects o K-function for point patterns on a linear network o Metropolis-Hastings algorithm now saves its transition history o Easier control of dummy points in ppm o Convert an 'fv' object to an R function o spatstat now depends on the package 'RandomFields' o We thank Abdollah Jalilian, Shen Guochun, Rasmus Waagepetersen, Ege Rubak and Ang Qi Wei for contributions. NEW FUNCTIONS o linearK Computes the Okabe-Yamada network K-function for a point pattern on a linear network. o pairdist.lpp Shortest-path distances between each pair of points on a linear network. o vcov.kppm Asymptotic variance-covariance matrix for regression parameters in kppm object. [Contributed by Abdollah Jalilian and Rasmus Waagepetersen] o rLGCP Simulation of log-Gaussian Cox processes [Contributed by Abdollah Jalilian and Rasmus Waagepetersen] o predict.rhohat Method for 'predict' for objects of class 'rhohat' Computes a pixel image of the predicted intensity. o Kmodel, pcfmodel Generic functions that compute the K-function or pair correlation function of a point process *model*. So far the only methods are for the class 'kppm'. o as.function.fv Converts a function value table (class 'fv') to a function in R o coef.kppm Method for 'coef' for objects of class 'kppm' o unitname, unitname<- These generic functions now have methods for fitted model objects (classes ppm, slrm, kppm, minconfit) and quadrature schemes (quad). o nobs.ppm Method for 'nobs' for class 'ppm'. Returns the number of points in the original data. SIGNIFICANT USER-VISIBLE CHANGES o kppm Can now fit a log-Gaussian Cox process o simulate.kppm Can now simulate a fitted log-Gaussian Cox process o lgcp.estK, lgcp.estpcf These functions previously fitted a log-Gaussian Cox process with exponential covariance. They can now fit a log-Gaussian Cox process with any covariance function implemented by the RandomFields package. o rmh If track=TRUE, the history of transitions of the Metropolis-Hastings algorithm is saved and returned. o ppm New argument 'nd' controls the number of dummy points. o as.fv Now handles objects of class kppm or minconfit. o rhohat If covariate = "x" or "y", the resulting object has the same 'unitname' as the original point pattern data. o rhohat Now has arguments 'eps, 'dimyx' to control pixel resolution. o MultiStrauss, MultiHard, MultiStraussHard Default value of 'types' has been changed to NULL. o data(ants) The auxiliary data 'ants.extra' now includes a function called 'side' determining whether a given location is in the scrub or field region. Can be used as a covariate in ppm, kppm, slrm. o print.ppm Now has argument 'what' to allow only selected information to be printed. BUG FIXES o profilepl Crashed in some cases involving multitype interactions. Fixed. o plot.splitppp Behaved incorrectly if 'main' was an expression. Fixed. o effectfun Crashed in trivial cases. Fixed. o kppm, thomas.estpcf, matclust.estpcf, lgcp.estpcf Gave a spurious warning message. Fixed. o step When applied to ppm objects this gave a spurious warning. Fixed. CHANGES IN spatstat VERSION 1.22-1 OVERVIEW o marked line segment patterns can now be plotted o multitype point process models are now 'self-starting' o new functions to manipulate colour images NEW FUNCTIONS o rgbim, hsvim Specify three colour channels. These functions convert three pixel images with numeric values into a single image whose pixel values are strings representing colours. o scaletointerval Generic utility function to rescale data (including spatial data) to a specified interval SIGNIFICANT USER-VISIBLE CHANGES o plot.im Can now plot images whose pixel values are strings representing colours. New argument 'valuesAreColours' o plot.psp Now handles marked line segment patterns and plots the marks as colours. o MultiHard, MultiStrauss, MultiStraussHard The argument 'types' can now be omitted; it will be inferred from the point pattern data. o rhohat Improved mathematical labels (when the result of rhohat is plotted) o plot.fv Minor improvements in graphics BUG FIXES o several minor bug fixes and improvements to satisfy R-devel CHANGES IN spatstat VERSION 1.22-0 OVERVIEW o support for point patterns on a linear network o 'superimpose' is now generic o improved mathematical labels when plotting functions NEW CLASSES o linnet An object of class 'linnet' represents a linear network, i.e. a connected network of line segments, such as a road network. Methods for this class include plot, print, summary etc. o lpp An object of class 'lpp' represents a point pattern on a linear network, such as a record of the locations of road accidents on a road network. Methods for this class include plot, print, summary etc. NEW FUNCTIONS o runiflpp Uniformly distributed random points on a linear network o rpoislpp Poisson point process on a linear network o clickjoin Interactive graphics to create a linear network o superimpose The function 'superimpose' is now generic, with methods for ppp, psp and a default method. o as.ppp.psp New method for as.ppp extracts the endpoints and marks from a line segment pattern NEW DATASETS o simplenet Simple example of a linear network SIGNIFICANT USER-VISIBLE CHANGES o superimposePSP This function is now deprecated in favour of 'superimpose' o superimpose Now handles data frames of marks. o plot.fv Argument 'legendmath' now defaults to TRUE. New argument 'legendargs' gives more control over appearance of legend. Increased default spacing between lines in legend. o eval.fv, with.fv Functions computed using eval.fv or with.fv now have better labels when plotted. o summary functions (Kest, Kest.fft, Kcross, Kdot, Kmulti, Kinhom, Kcross.inhom, Kdot.inhom, Kmulti.inhom, Lest, Lcross, Ldot, pcf, pcfcross, pcfdot, pcfinhom, pcfcross.inhom, pcfdot.inhom, Fest, Gest, Gcross, Gdot, Gmulti, Jest, Jcross, Jdot, Jmulti, Iest, localL, localK, markcorr, markvario, markconnect, Emark, Vmark, allstats, alltypes) Improved plot labels. BUG FIXES o superimpose If the marks components of patterns consisted of character vectors (rather than factors or non-factor numeric vectors) an error was triggered. Fixed. o plot.fv The y axis limits did not always cover the range of values if the argument 'shade' was used. Fixed. o plot.rhohat The y axis label was sometimes incorrect. Fixed. o plot.rhohat If argument 'xlim' was used, a warning was generated from 'rug'. Fixed. CHANGES IN spatstat VERSION 1.21-6 OVERVIEW o A line segment pattern can now have a data frame of marks. o Various minor extensions and alterations in behaviour NEW FUNCTIONS o nsegments Number of segments in a line segment pattern SIGNIFICANT USER-VISIBLE CHANGES o psp class A line segment pattern (object of class 'psp') can now have a data frame of marks. o density.ppp New argument 'adjust' makes it easy to adjust the smoothing bandwidth o plot.envelope If the upper envelope is NA but the lower envelope is finite, the upper limit is now treated as +Infinity o msr Argument 'continuous' renamed 'density' BUG FIXES o [.psp In X[W] if X is a line segment pattern and W is a polygonal window, marks were sometimes discarded, leading to an error. Fixed. o [.psp In X[W] if X is a line segment pattern and W is a rectangular window, if the marks of X were factor values, they were converted to integers. Fixed. o superimposePSP If the marks were a factor, they were mistakenly converted to integers. Fixed. o is.marked.ppp Did not generate a fatal error when na.action="fatal" as described in the help file. Fixed. CHANGES IN spatstat VERSION 1.21-5 OVERVIEW o Increased numerical stability. o New 'self-starting' feature of interpoint interactions. SIGNIFICANT USER-VISIBLE CHANGES o ppm Interaction objects may now be 'self-starting' i.e. initial parameter estimates can be computed from the point pattern dataset. So far, only the LennardJones() interaction has a self-starting feature. o LennardJones Increased numerical stability. New (optional) scaling argument 'sigma0'. Interpoint distances are automatically rescaled using 'self-starting' feature. o vcov.ppm New argument 'matrix.action' controls what happens when the matrix is ill-conditioned. Changed name of argument 'gamaction' to 'gam.action' o rmhmodel.ppm Default resolution of trend image has been increased. o is.poisson.ppm Accelerated. o ppm, kppm, qqplot.ppm Improved robustness to numerical error CHANGES IN spatstat VERSION 1.21-4 OVERVIEW o Urgent bug fix BUG FIXES o print.summary.ppm exited with an error message, if the model had external covariates. Fixed. CHANGES IN spatstat VERSION 1.21-3 OVERVIEW o Point process model covariates may now depend on additional parameters. o New class of signed measures, for residual analysis. o Miscellaneous improvements and bug fixes. NEW FUNCTIONS o clarkevans.test Classical Clark-Evans test of randomness o msr New class 'msr' of signed measures and vector-valued measures supporting residual analysis. o quadrat.test.quadratcount Method for 'quadrat.test' for objects of class 'quadratcount' (allows a chi-squared test to be performed on quadrat counts rather than recomputing from the original data) o tile.areas Computes areas of tiles in a tessellation (efficiently) SIGNIFICANT USER-VISIBLE CHANGES o ppm The spatial trend can now depend on additional parameters. This is done by allowing spatial covariate functions to have additional parameters: function(x, y, ...) where ... is controlled by the new argument 'covfunargs' to ppm o profilepl Can now maximise over trend parameters as well as interaction parameters o residuals.ppm The value returned by residuals.ppm is now an object of class 'msr'. It can be plotted directly. o eval.im When the argument 'envir' is used, eval.im() now recognises functions as well as variables in 'envir' o colourmap The argument 'col' can now be any kind of colour data o persp.im The 'colmap' argument can now be a 'colourmap' object o ppm The print and summary methods for 'ppm' objects now show standard errors for parameter estimates if the model is Poisson. o quadrat.test The print method for 'quadrattest' objects now displays information about the quadrats o lurking Improved format of x axis label o distmap.ppp Internal code is more robust. BUG FIXES o im Did not correctly handle 1 x 1 arrays. Fixed. o as.mask, pixellate.ppp Weird things happened if the argument 'eps' was set to a value greater than the size of the window. Fixed. CHANGES IN spatstat VERSION 1.21-2 OVERVIEW o New multitype hardcore interaction. o Nonparametric estimation of covariate effects on point patterns. o Output of 'Kmeasure' has been rescaled. o Numerous improvements and bug fixes. NEW FUNCTIONS o MultiHard multitype hard core interaction for use in ppm() o coords<- Assign new coordinates to the points in a point pattern o rhohat Kernel estimate for the effect of a spatial covariate on point process intensity SIGNIFICANT USER-VISIBLE CHANGES o as.ppp.matrix, as.ppp.data.frame These methods for 'as.ppp' now accept a matrix or data frame with any number of columns (>= 2) and interpret the additional columns as marks. o Kmeasure The interpretation of the output has changed: the pixel values are now density estimates. o rmh.ppm, rmhmodel.ppm These functions now accept a point process model fitted with the 'MultiHard' interaction o rmh.default, rmhmodel.default These functions now accept the option: cif='multihard' defining a multitype hard core interaction. o markcorr Now handles a data frame of marks o varblock Improved estimate in the case of the K function o colourmap, lut New argument 'range' makes it easier to specify a colour map or lookup table o [<-.hyperframe Now handles multiple columns o plot.fv Improved y axis labels o spatstat.options New option 'par.fv' controls default parameters for line plotting o rmhmodel More safety checks on parameter values. o quadratresample New argument 'verbose' o smooth.fv Default value of 'which' has been changed. BUG FIXES o Kest If the argument 'domain' was used, the resulting estimate was not correctly normalised. Fixed. o Kest The Lotwick-Silverman variance approximation was incorrectly calculated. (Spotted by Ian Dryden and Igor Chernayavsky). Fixed. o plot.owin, plot.ppp Display of binary masks was garbled if the window was empty or if it was equivalent to a rectangle. Fixed. o plot.bermantest One of the vertical lines for the Z1 test was in the wrong place. Fixed. o marks<-.ppx Crashed in some cases. Fixed. o is.convex An irrelevant warning was issued (for non-convex polygons). Fixed. CHANGES IN spatstat VERSION 1.21-1 OVERVIEW o Confidence intervals for K-function and other statistics o Bug fixes for smoothing and relative risk estimation NEW FUNCTIONS o varblock Variance estimation (and confidence intervals) for summary statistics such as Kest, using subdivision technique o bw.stoyan Bandwidth selection by Stoyan's rule of thumb. o which.max.im Applied to a list of images, this determines which image has the largest value at each pixel. o as.array.im Convert image to array SIGNIFICANT USER-VISIBLE CHANGES o smooth.ppp, markmean, sharpen.ppp, relrisk, bw.relrisk Further acceleration achieved. o Kest Argument 'correction' now explicitly overrides automatic defaults o plot.fv More robust handling of 'shade' BUG FIXES o relrisk Format of relrisk(at="points") was incorrect. Fixed. o bw.relrisk Result was incorrect in the default case method="likelihood" because of previous bug. Fixed. o Jdot, Jcross, Jmulti Return value did not include the hazard function, when correction="km" Fixed. o Jdot, Jcross, Jmulti Format of output was incompatible with format of Jest. Fixed. CHANGES IN spatstat VERSION 1.21-0 OVERVIEW o Implemented Spatial Logistic Regression o Implemented nonparametric estimation of relative risk with bandwidth selection by cross-validation. o Smoothing functions can handle a data frame of marks. o New options in Kinhom; default behaviour has changed. NEW FUNCTIONS o slrm Fit a spatial logistic regression model o anova.slrm, coef.slrm, fitted.slrm, logLik.slrm, plot.slrm, predict.slrm Methods for spatial logistic regression models o relrisk Nonparametric estimation of relative risk o bw.relrisk Automatic bandwidth selection by cross-validation o default.rmhcontrol Sets default values of Metropolis-Hastings parameters SIGNIFICANT USER-VISIBLE CHANGES o smooth.ppp, markmean These functions now accept a data frame of marks. o Kinhom Default behaviour has changed. New argument 'renormalise=TRUE' determines scaling of estimator and affects bias and variance in small samples. o residuals.ppm Now also computes the score residuals. o plot.im New argument 'ribscale' o plot.listof, plot.splitppp New arguments panel.begin, panel.end and panel.args o ppp Now checks for NA/NaN/Inf values in the coordinates o envelope.ppm Changed default value of 'control' New argument 'nrep' o qqplot.ppm Changed default value of 'control' BUG FIXES o marks<-.ppp, setmarks, %mark% A matrix of marks was accepted by ppp() but not by these assignment functions. Fixed. o density.ppp, smooth.ppp, sharpen.ppp, markmean Crashed if the bandwidth was extremely small. Fixed. CHANGES IN spatstat VERSION 1.20-5 OVERVIEW o Accelerated computations of kernel smoothing. o Implemented Choi-Hall data sharpening. NEW FUNCTIONS o sharpen.ppp Performs Choi-Hall data sharpening of a point pattern SIGNIFICANT USER-VISIBLE CHANGES o density.ppp, smooth.ppp Computation has been vastly accelerated for density(X, at="points") and smooth.ppp(X, at="points") o Kinhom Accelerated in case where lambda=NULL o Vignette 'shapefiles' updated CHANGES IN spatstat VERSION 1.20-4 OVERVIEW o New functions for inhomogeneous point patterns and local analysis. o Pair correlation function for 3D point patterns o Minor improvements and bug fixes to simulation code and image functions NEW FUNCTIONS o pcf3est Pair correlation function for 3D point patterns. o Kscaled, Lscaled Estimator of the template K function (and L-function) for a locally-scaled point process. o localpcf Local version of pair correlation function o identify.psp Method for 'identify' for line segment patterns. o as.im.matrix Converts a matrix to a pixel image SIGNIFICANT USER-VISIBLE CHANGES o rMaternI, rMaternII New argument 'stationary=TRUE' controls whether the simulated process is stationary (inside the simulation window). Default simulation behaviour has changed. o im New arguments 'xrange', 'yrange' o envelope Improvements to robustness of code. BUG FIXES o quadratcount If V was a tessellation created using a factor-valued image, quadratcount(X, tess=V) crashed with the error "Tessellation does not contain all the points of X". Fixed. o [.im If Z was a factor valued image and X was a point pattern then Z[X] was not a factor. Fixed. CHANGES IN spatstat VERSION 1.20-3 OVERVIEW o minor improvements (mostly internal). NEW FUNCTIONS o unmark.ppx Method for 'unmark' for general space-time point patterns SIGNIFICANT USER-VISIBLE CHANGES o plot.ppx Now handles marked patterns, in two-dimensional case o as.psp.psp Default value of argument 'check' set to FALSE CHANGES IN spatstat VERSION 1.20-2 OVERVIEW o Extensions to minimum contrast estimation. o Bug fix in simulation of Lennard-Jones model. o More support for distance functions. o Changes to point process simulations. NEW FUNCTIONS o thomas.estpcf Fit Thomas process model by minimum contrast using the pair correlation function (instead of the K-function). o matclust.estpcf Fit Matern Cluster model by minimum contrast using the pair correlation function (instead of the K-function). o lgcp.estpcf Fit log-Gaussian Cox process model by minimum contrast using the pair correlation function (instead of the K-function). o contour.distfun, persp.distfun Methods for 'contour' and 'persp' for distance functions o default.expand Computes default window for simulation of a fitted point process model. SIGNIFICANT USER-VISIBLE CHANGES o kppm Models can now be fitted using either the K-function or the pair correlation function. o ppm The list of covariates can now include windows (objects of class 'owin'). A window will be treated as a logical covariate that equals TRUE inside the window and FALSE outside it. o plot.distfun Pixel resolution can now be controlled. o envelope.ppm, qqplot.ppm The default value of 'control' has changed; simulation results may be slightly different. o rmh Slightly accelerated. BUG FIXES o rmh Simulation of the Lennard-Jones model (cif = 'lennard') was incorrect due to an obscure bug, introduced in spatstat 1.20-1. Fixed. o thomas.estK, matclust.estK, lgcp.estK The value of 'lambda' (if given) was ignored if X was a point pattern. Fixed. CHANGES IN spatstat VERSION 1.20-1 OVERVIEW o Further increases in speed and efficiency of ppm and rmh o New pairwise interaction model NEW FUNCTIONS o DiggleGatesStibbard Diggle-Gates-Stibbard pairwise interaction for use in ppm() SIGNIFICANT USER-VISIBLE CHANGES o ppm has been accelerated by a factor of 10 for the BadGey interaction. o rmh simulation of the Lennard-Jones model (cif='lennard') has been greatly accelerated. o rmh, rmhmodel.ppm Point process models fitted by ppm() using the DiggleGatesStibbard interaction can be simulated automatically using rmh. BUG FIXES o fitin The plot of a fitted Hardcore interaction was incorrect. Fixed. CHANGES IN spatstat VERSION 1.20-0 OVERVIEW o spatstat now contains over 1000 functions. o Substantial increase in speed and efficiency of model-fitting code. o Changes to factor-valued images. SIGNIFICANT USER-VISIBLE CHANGES o ppm has been accelerated by a factor of 10, and can handle datasets with 20,000 points, for the following interactions: DiggleGratton, Fiksel, Geyer, Hardcore, Strauss, StraussHard o predict.ppm accelerated by a factor of 3 (when type = "cif") with vastly reduced memory requirements for the following interactions: DiggleGratton, Fiksel, Geyer, Hardcore, Strauss, StraussHard o pixel images (class "im") The internal representation of factor-valued images has changed. Existing objects in the old format should still work. o im The syntax for creating a factor-valued image has changed. Argument 'lev' has been deleted. o ppm Some warnings have been reworded for greater clarity. BUG FIXES o [.im Mishandled some factor-valued images. Fixed. o hist.im Produced slightly erroneous output for some factor-valued images. Fixed. o plot.owin Filled polygons appeared to contain criss-cross lines on some graphics drivers. Fixed. o deltametric Did not handle windows with different enclosing frames (error message: 'dA and dB are incompatible') Fixed. o quadratcount Crashed if the pattern was empty and the window was a rectangle. (Noticed by Sandro Azaele) Fixed. o rNeymanScott Crashed if the parent process realisation was empty. (Noticed by Sandro Azaele) Fixed. CHANGES IN spatstat VERSION 1.19-3 ACKNOWLEDGEMENTS o We thank David Dereudre for contributions. OVERVIEW o Urgent bug fix to Metropolis-Hastings for Lennard-Jones model. o Miscellaneous additions to plotting and colour management. NEW FUNCTIONS o col2hex, rgb2hex, paletteindex, samecolour Functions for converting and comparing colours. o plot.envelope New method for plotting envelopes. By default the area between the upper and lower envelopes is shaded in grey. SIGNIFICANT USER-VISIBLE CHANGES o plot.fasp If the entries in the array are envelopes, they are plotted using plot.envelope (hence the envelope region is shaded grey). o plot.fv Now displays mathematical notation for each curve, if legendmath=TRUE. o print.fv Now prints the available range of 'r' values as well as the recommended range of 'r' values. BUG FIXES o rmh Simulation of Lennard-Jones model was incorrect; the simulations were effectively Poisson patterns. (Spotted by David Dereudre.) Fixed. o plot.fv Did not correctly handle formulas that included I( ) Fixed. CHANGES IN spatstat VERSION 1.19-2 ACKNOWLEDGEMENTS o We thank Jorge Mateu, Michael Sumner and Sebastian Luque for contributions. OVERVIEW o More support for fitted point process models and pixel images. o Improved plotting of pixel images and envelopes. o Simulation algorithm for Lennard-Jones process. o Improvements and bug fixes to envelopes. o Bug fixes to Metropolis-Hastings simulation. NEW FUNCTIONS o pairs.im Creates a scatterplot matrix for several pixel images. o model.frame.ppm Method for 'model.frame' for point process models. o sort.im Method for 'sort' for pixel images. SIGNIFICANT USER-VISIBLE CHANGES o plot.fv, plot.fasp New argument 'shade' enables confidence intervals or significance bands to be displayed as filled grey shading. o LennardJones The parametrisation of this interaction function has been changed. o rmh, rmhmodel These functions will now simulate a point process model that was fitted using the LennardJones() interaction. o rmh.default, rmhmodel.default These functions will now simulate a point process model with the Lennard-Jones interaction (cif='lennard'). o ecdf This function now works for pixel images. o dim, row, col These functions now work for pixel images. o order This function now works for pixel images. o [.im and [<-.im The subset index can now be any valid subset index for a matrix. o density.ppp, smooth.ppp The return value now has attributes 'sigma' and 'varcov' reporting the smoothing bandwidth. o plot.im The argument 'col' can now be a 'colourmap' object. This makes it possible to specify a fixed mapping between numbers and colours (e.g. so that it is consistent between plots of several different images). o rmh, spatstat.options spatstat.options now recognises the parameter 'expand' which determines the default window expansion factor in rmh. o rmh Improved handling of ppm objects with covariates. o kstest The 'covariate' can now be one of the characters "x" or "y" indicating the Cartesian coordinates. BUG FIXES o model.matrix.ppm For a fitted model that used a large number of quadrature points, model.matrix.ppm sometimes reported an internal error about mismatch between the model matrix and the quadrature scheme. Fixed. o plot.ppx Minor bugs fixed. o rmh In rare cases, the simulated point pattern included multiple points at the origin (0,0). (Bug introduced in spatstat 1.17-0.) Fixed. o rmh, rmhmodel.ppm Crashed when applied to a fitted multitype point process model if the model involved more than one covariate image. (Spotted by Jorge Mateu) Fixed. o density.psp If any segment had zero length, the result contained NaN values. (Spotted by Michael Sumner and Sebastian Luque.) Fixed. o envelope Crashed with fun=Lest or fun=Linhom if the number of points in a simulated pattern exceeded 3000. Fixed. o plot.kstest Main title was corrupted if the covariate was a function. Fixed. CHANGES IN spatstat VERSION 1.19-1 OVERVIEW o New dataset: replicated 3D point patterns. o Improvements to Metropolis-Hastings simulation code. o More support for hyperframes. o Bug fixes. NEW DATASETS o osteo: Osteocyte Lacunae data: replicated 3D point patterns NEW FUNCTIONS o rbind.hyperframe: Method for rbind for hyperframes. o as.data.frame.hyperframe: Converts a hyperframe to a data frame. SIGNIFICANT USER-VISIBLE CHANGES o Fiksel: Fitted point process models (class ppm) with the Fiksel() double exponential interaction can now be simulated by rmh. o rmh.default: Point processes with the Fiksel interaction can now be simulated by specifying parameters in rmh.default. o logLik.ppm: New argument 'warn' controls warnings. o profilepl: No longer issues spurious warnings. BUG FIXES o Hardcore, rmh: Simulation of the 'Hardcore' process was incorrect. The hard core radius was erroneously set to zero so that the simulated patterns were Poisson. Fixed. o fitin: A plot of the pairwise interaction function of a fitted model, generated by plot(fitin(model)) where model <- ppm(...), was sometimes incorrect when the model included a hard core. Fixed. CHANGES IN spatstat VERSION 1.19-0 OVERVIEW o Numerous bugs fixed in the implementation of the Huang-Ogata approximate maximum likelihood method. o New interpoint interaction model. NEW FUNCTIONS o Fiksel: new interpoint interaction: Fiksel's double exponential model. SIGNIFICANT USER-VISIBLE CHANGES o runifpoint, rpoispp, envelope These functions now issue a warning if the number of random points to be generated is very large. This traps a common error in simulation experiments. BUG FIXES o predict.ppm, fitted.ppm: Predictions and fitted values were incorrect for objects fitted using ppm(..., method="ho"). Fixed. o logLik, AIC: Values of logLik() and AIC() were incorrect for objects fitted using ppm(..., method="ho"). Fixed. o profilepl: Results were incorrect if the argument 'method="ho"' was used. Fixed. o fitin The result of fitin() was incorrect for objects fitted using ppm(..., method="ho"). Fixed. o rmhcontrol: rmhcontrol(NULL) generated an error. Fixed. CHANGES IN spatstat VERSION 1.18-4 ACKNOWLEDGEMENTS o We thank Michael Sumner for contributions. BUG FIXES o pixellate.psp: segments shorter than one pixel width were measured incorrectly if the 'weights' argument was present. Fixed. NEW FUNCTIONS o pairdist.ppx, crossdist.ppx, nndist.ppx, nnwhich.ppx: Methods for pairdist, crossdist, nndist, nnwhich for multidimensional point patterns (class 'ppx') o runifpointx, rpoisppx: Random point patterns in any number of dimensions o boxx: Multidimensional box in any number of dimensions o diameter.boxx, volume.boxx, shortside.boxx, eroded.volumes.boxx: Geometrical computations for multidimensional boxes o sum.im, max.im, min.im: Methods for sum(), min(), max() for pixel images. o as.matrix.ppx: Convert a multidimensional point pattern to a matrix SIGNIFICANT USER-VISIBLE CHANGES o plot.ppp: New argument 'zap' o diameter: This function is now generic, with methods for "owin", "box3" and "boxx" o eroded.volumes: This function is now generic, with methods for "box3" and "boxx" CHANGES IN spatstat VERSION 1.18-3 ACKNOWLEDGEMENTS o We thank Michael Sumner for contributions. BUG FIXES o pixellate.psp: segments shorter than one pixel width were measured incorrectly. Fixed. o fv: 'alim' not handled correctly. Fixed. NEW FUNCTIONS o smooth.fv: Applies spline smoothing to the columns of an fv object. CHANGES IN spatstat VERSION 1.18-2 ACKNOWLEDGEMENTS o We thank Michael Sumner for contributions. NEW FUNCTIONS o Gfox, Jfox: Foxall's G and J functions o as.owin.distfun: New method for as.owin extracts the domain of a distfun object. SIGNIFICANT USER-VISIBLE CHANGES o distfun: objects of class 'distfun', when called as functions, will now accept either two vectors (x,y) or a point pattern x. o Hest: this function can now compute the Hanisch estimator. It now has arguments 'r', 'breaks' and 'correction', like other summary functions. o Hest: new argument 'conditional'. BUG FIXES o pixellate.psp: Values were sometimes incorrect due to coding error. (Spotted by Michael Sumner) Fixed. o kstest: Crashed if the covariate contained NA's. Fixed. o kstest: Crashed if X was a multitype point pattern in which some mark values were unrepresented. Fixed. o lurking: Minor bug in handling of NA values. Fixed. o Hest: labels of columns were incorrect. Fixed. CHANGES IN spatstat VERSION 1.18-1 ACKNOWLEDGEMENTS o we thank Andrew Bevan and Ege Rubak for suggestions. NEW FUNCTIONS o Hardcore: Hard core interaction (for use in ppm) o envelope.pp3: simulation envelopes for 3D point patterns o npoints: number of points in a point pattern of any kind SIGNIFICANT USER-VISIBLE CHANGES o rmh.ppm, rmhmodel.ppm: It is now possible to simulate Gibbs point process models that are fitted to multitype point patterns using a non-multitype interaction, e.g. data(amacrine) fit <- ppm(amacrine, ~marks, Strauss(0.1)) rmh(fit, ...) o rmh.ppm, rmhmodel.ppm, rmh.default, rmhmodel.default: Hard core models can be simulated. o rmh.default, rmhmodel.default: The argument 'par' is now required to be a list, in all cases (previously it was sometimes a list and sometimes a vector). o Fest: Calculation has been accelerated in some cases. o summary.pp3 now returns an object of class 'summary.pp3' containing useful summary information. It is plotted by 'plot.summary.pp3'. o F3est, G3est, K3est: these functions now accept 'correction="best"' o union.owin, intersect.owin: these functions now handle any number of windows. o envelope.ppp, envelope.ppm, envelope.kppm: argument lists have changed slightly BUG FIXES o Fest: The result of Fest(X, correction="rs") had a slightly corrupted format, so that envelope(X, Fest, correction="rs") in fact computed the envelopes based on the "km" correction. (Spotted by Ege Rubak). Fixed. o rmh (rmh.ppm, rmhmodel.ppm): rmh sometimes failed for non-stationary point process models, with a message about "missing value where TRUE/FALSE needed". (Spotted by Andrew Bevan). Fixed. o diagnose.ppm, lurking: Calculations were not always correct if the model had conditional intensity equal to zero at some locations. Fixed. o ppm, profilepl: If data points are illegal under the model (i.e. if any data points have conditional intensity equal to zero) the log pseudolikelihood should be -Inf but was sometimes returned as a finite value. Thus profilepl did not always work correctly for models with a hard core. Fixed. o F3est, G3est: Debug messages were printed unnecessarily. Fixed. CHANGES IN spatstat VERSION 1.18-0 ACKNOWLEDGEMENTS o we thank Ege Rubak and Tyler Dean Rudolph for suggestions. HEADLINES o A point pattern is now allowed to have a data frame of marks (previously the marks had to be a vector). o Extended capabilities for 'envelope' and 'kstest'. NEW FUNCTIONS o pixellate.psp, as.mask.psp Convert a line segment pattern to a pixel image or binary mask o as.data.frame.im Convert a pixel image to a data frame SIGNIFICANT USER-VISIBLE CHANGES o A point pattern is now allowed to have a data frame of marks (previously the marks had to be a vector). o Many functions in spatstat now handle point patterns with a data frame of marks. These include print.ppp, summary.ppp, plot.ppp, split.ppp. o finpines, nbfires, shapley: The format of these datasets has changed. They are now point patterns with a data frame of marks. o envelope() is now generic, with methods for "ppp", "ppm" and "kppm". o kstest() now handles multitype point patterns and multitype point process models. o nnclean() now returns a point pattern with a data frame of marks. o plot.ppp() has new argument 'which.marks' to select one column from a data frame of marks to be plotted. o plot.ppp() now handles marks that are POSIX times. o complement.owin now handles any object acceptable to as.owin. BUG FIXES o erosion(w) and opening(w) crashed if w was not a window. Fixed. o diameter() and eroded.areas() refused to work if w was not a window. Fixed. CHANGES IN spatstat VERSION 1.17-6 ACKNOWLEDGEMENTS o We thank Simon Byers and Adrian Raftery for generous contributions. OVERVIEW o Nearest neighbour clutter removal algorithm o New documentation for the 'fv' class. o Minor improvements and bug fixes. NEW FUNCTIONS o nnclean: Nearest neighbour clutter removal for recognising features in spatial point patterns. Technique of Byers and Raftery (1998) [From original code by Simon Byers and Adrian Raftery, adapted for spatstat.] o marks.ppx, marks<-.ppx: Methods for extracting and changing marks in a multidimensional point pattern o latest.news: print news about the current version of the package SIGNIFICANT USER-VISIBLE CHANGES o news: spatstat now has a NEWS file which can be printed by typing news(package="spatstat"). o areaGain, areaLoss: New algorithms in case exact=TRUE. Syntax slightly modified. o with.hyperframe: - The result now inherits 'names' from the row names of the hyperframe. - New argument 'enclos' controls the environment in which the expression is evaluated. - The algorithm is now smarter at simplifying the result when simplify=TRUE. o update.ppm: Tweaked to improve the ability of ppm objects to be re-fitted in different contexts. ADVANCED USERS ONLY o Documentation for the class 'fv' of function value tables - fv: Creates an object of class 'fv' - cbind.fv, collapse.fv: Combine objects of class 'fv' - bind.fv: Add additional columns of data to an 'fv' object BUG FIXES o "$<-.hyperframe" destroyed the row names of the hyperframe. Fixed. o model.matrix.ppm had minor inconsistencies. Fixed. o ppm: The fitted coefficient vector had incorrect format in the default case of a uniform Poisson process. Fixed. o plot.ppx: Crashed if the argument 'main' was given. Fixed. o envelope.ppp: Crashed if the object returned by 'fun' did not include a column called "theo". Fixed. spatstat/R/0000755000176200001440000000000013164364074012326 5ustar liggesusersspatstat/R/window.R0000755000176200001440000011126413155546330013765 0ustar liggesusers# # window.S # # A class 'owin' to define the "observation window" # # $Revision: 4.176 $ $Date: 2017/09/11 16:35:38 $ # # # A window may be either # # - rectangular: # a rectangle in R^2 # (with sides parallel to the coordinate axes) # # - polygonal: # delineated by 0, 1 or more non-self-intersecting # polygons, possibly including polygonal holes. # # - digital mask: # defined by a binary image # whose pixel values are TRUE wherever the pixel # is inside the window # # Any window is an object of class 'owin', # containing at least the following entries: # # $type: a string ("rectangle", "polygonal" or "mask") # # $xrange # $yrange # vectors of length 2 giving the real dimensions # of the enclosing box # $units # name of the unit of length # # The 'rectangle' type has only these entries. # # The 'polygonal' type has an additional entry # # $bdry # a list of polygons. # Each entry bdry[[i]] determines a closed polygon. # # bdry[[i]] has components $x and $y which are # the cartesian coordinates of the vertices of # the i-th boundary polygon (without repetition of # the first vertex, i.e. same convention as in the # plotting function polygon().) # # # The 'mask' type has entries # # $m logical matrix # $dim its dimension array # $xstep,ystep x and y dimensions of a pixel # $xcol vector of x values for each column # $yrow vector of y values for each row # # (the row index corresponds to increasing y coordinate; # the column index " " " " " " x " " ".) # # #----------------------------------------------------------------------------- # .Spatstat.Image.Warning <- c("Row index corresponds to increasing y coordinate; column to increasing x", "Transpose matrices to get the standard presentation in R", "Example: image(result$xcol,result$yrow,t(result$d))") owin <- local({ isxy <- function(x) { (is.matrix(x) || is.data.frame(x)) && ncol(x) == 2 } asxy <- function(xy) { list(x=xy[,1], y=xy[,2]) } owin <- function(xrange=c(0,1), yrange=c(0,1), ..., poly=NULL, mask=NULL, unitname=NULL, xy=NULL) { # trap a common abuse of syntax if(nargs() == 1 && !missing(xrange) && is.owin(xrange)) return(xrange) unitname <- as.units(unitname) ## Exterminate ambiguities poly.given <- !is.null(poly) mask.given <- !is.null(mask) if(poly.given && mask.given) stop("Ambiguous -- both polygonal boundary and digital mask supplied") if(!is.null(xy) && !mask.given) warning("Argument xy ignored: it is only applicable when a mask is given") if(missing(xrange) != missing(yrange)) stop("If one of xrange, yrange is specified then both must be.") # convert data frames to vanilla lists if(poly.given) { if(is.data.frame(poly)) poly <- as.list(poly) else if(is.list(poly) && any(unlist(lapply(poly, is.data.frame)))) poly <- lapply(poly, as.list) } ## Hidden options controlling how much checking is performed check <- resolve.1.default(list(check=TRUE), list(...)) calculate <- resolve.1.default(list(calculate=check), list(...)) strict <- resolve.1.default(list(strict=spatstat.options("checkpolygons")), list(...)) fix <- resolve.1.default(list(fix=spatstat.options("fixpolygons")), list(...)) if(!poly.given && !mask.given) { ######### rectangle ################# if(check) { if(!is.vector(xrange) || length(xrange) != 2 || xrange[2L] < xrange[1L]) stop("xrange should be a vector of length 2 giving (xmin, xmax)") if(!is.vector(yrange) || length(yrange) != 2 || yrange[2L] < yrange[1L]) stop("yrange should be a vector of length 2 giving (ymin, ymax)") } w <- list(type="rectangle", xrange=xrange, yrange=yrange, units=unitname) class(w) <- "owin" return(w) } else if(poly.given) { ######### polygonal boundary ######## # if(length(poly) == 0) { # empty polygon if(check) { if(!is.vector(xrange) || length(xrange) != 2 || xrange[2L] < xrange[1L]) stop("xrange should be a vector of length 2 giving (xmin, xmax)") if(!is.vector(yrange) || length(yrange) != 2 || yrange[2L] < yrange[1L]) stop("yrange should be a vector of length 2 giving (ymin, ymax)") } w <- list(type="polygonal", xrange=xrange, yrange=yrange, bdry=list(), units=unitname) class(w) <- "owin" return(w) } # convert matrix or data frame to list(x,y) if(isxy(poly)) { poly <- asxy(poly) } else if(is.list(poly) && all(unlist(lapply(poly, isxy)))) { poly <- lapply(poly, asxy) } # nonempty polygon # test whether it's a single polygon or multiple polygons if(verify.xypolygon(poly, fatal=FALSE)) psingle <- TRUE else if(all(unlist(lapply(poly, verify.xypolygon, fatal=FALSE)))) psingle <- FALSE else stop("poly must be either a list(x,y) or a list of list(x,y)") w.area <- NULL if(psingle) { # single boundary polygon bdry <- list(poly) if(check || calculate) { w.area <- Area.xypolygon(poly) if(w.area < 0) stop(paste("Area of polygon is negative -", "maybe traversed in wrong direction?")) } } else { # multiple boundary polygons bdry <- poly if(check || calculate) { w.area <- unlist(lapply(poly, Area.xypolygon)) if(sum(w.area) < 0) stop(paste("Area of window is negative;\n", "check that all polygons were traversed", "in the right direction")) } } actual.xrange <- range(unlist(lapply(bdry, getElement, name="x"))) if(missing(xrange)) xrange <- actual.xrange else if(check) { if(!is.vector(xrange) || length(xrange) != 2 || xrange[2L] <= xrange[1L]) stop("xrange should be a vector of length 2 giving (xmin, xmax)") if(!all(xrange == range(c(xrange, actual.xrange)))) stop("polygon's x coordinates outside xrange") } actual.yrange <- range(unlist(lapply(bdry, getElement, name="y"))) if(missing(yrange)) yrange <- actual.yrange else if(check) { if(!is.vector(yrange) || length(yrange) != 2 || yrange[2L] <= yrange[1L]) stop("yrange should be a vector of length 2 giving (ymin, ymax)") if(!all(yrange == range(c(yrange, actual.yrange)))) stop("polygon's y coordinates outside yrange") } if(!is.null(w.area)) { # tack on area and hole data holes <- (w.area < 0) for(i in seq_along(bdry)) bdry[[i]] <- append(bdry[[i]], list(area=w.area[i], hole=holes[i])) } w <- list(type="polygonal", xrange=xrange, yrange=yrange, bdry=bdry, units=unitname) class(w) <- "owin" if(check && strict) { ## strict checks on geometry (self-intersection etc) ok <- owinpolycheck(w) if(!ok) { errors <- attr(ok, "err") stop(paste("Polygon data contain", commasep(errors))) } } if(check && fix) { if(length(bdry) == 1 && length(bx <- bdry[[1L]]$x) == 4 && length(unique(bx)) == 2 && length(unique(bdry[[1L]]$y)) == 2) { ## it's really a rectangle if(Area.xypolygon(bdry[[1L]]) < 0) w$bdry <- lapply(bdry, reverse.xypolygon) } else { ## repair polygon data by invoking polyclip ## to intersect polygon with larger-than-bounding rectangle ## (Streamlined version of intersect.owin) ww <- lapply(bdry, reverse.xypolygon) xrplus <- mean(xrange) + c(-1,1) * diff(xrange) yrplus <- mean(yrange) + c(-1,1) * diff(yrange) bignum <- (.Machine$integer.max^2)/2 epsclip <- max(diff(xrange), diff(yrange))/bignum rr <- list(list(x=xrplus[c(1,2,2,1)], y=yrplus[c(2,2,1,1)])) bb <- polyclip::polyclip(ww, rr, "intersection", fillA="nonzero", fillB="nonzero", eps=epsclip) ## ensure correct polarity totarea <- sum(unlist(lapply(bb, Area.xypolygon))) if(totarea < 0) bb <- lapply(bb, reverse.xypolygon) w$bdry <- bb } } return(w) } else if(mask.given) { ######### digital mask ##################### if(is.data.frame(mask) && ncol(mask) %in% c(2,3) && sum(sapply(mask, is.numeric)) == 2) { # data frame with 2 columns of coordinates return(as.owin(W=mask, xy=xy)) } if(!is.matrix(mask)) stop(paste(sQuote("mask"), "must be a matrix")) if(!is.logical(mask)) stop(paste("The entries of", sQuote("mask"), "must be logical")) nc <- ncol(mask) nr <- nrow(mask) if(!is.null(xy)) { # pixel coordinates given explicitly # validate dimensions if(!is.list(xy) || !checkfields(xy, c("x","y"))) stop("xy should be a list with entries x and y") xcol <- xy$x yrow <- xy$y if(length(xcol) != nc) stop(paste("length of xy$x =", length(xcol), "!=", nc, "= number of columns of mask")) if(length(yrow) != nr) stop(paste("length of xy$y =", length(yrow), "!=", nr, "= number of rows of mask")) # x and y should be evenly spaced if(!evenly.spaced(xcol)) stop("xy$x is not evenly spaced") if(!evenly.spaced(yrow)) stop("xy$y is not evenly spaced") # determine other parameters xstep <- diff(xcol)[1L] ystep <- diff(yrow)[1L] if(missing(xrange) && missing(yrange)) { xrange <- range(xcol) + c(-1,1) * xstep/2 yrange <- range(yrow) + c(-1,1) * ystep/2 } } else { # determine pixel coordinates from xrange, yrange if(missing(xrange) && missing(yrange)) { # take pixels to be 1 x 1 unit xrange <- c(0,nc) yrange <- c(0,nr) } else if(check) { if(!is.vector(xrange) || length(xrange) != 2 || xrange[2L] <= xrange[1L]) stop("xrange should be a vector of length 2 giving (xmin, xmax)") if(!is.vector(yrange) || length(yrange) != 2 || yrange[2L] <= yrange[1L]) stop("yrange should be a vector of length 2 giving (ymin, ymax)") } xstep <- diff(xrange)/nc ystep <- diff(yrange)/nr xcol <- seq(from=xrange[1L]+xstep/2, to=xrange[2L]-xstep/2, length.out=nc) yrow <- seq(from=yrange[1L]+ystep/2, to=yrange[2L]-ystep/2, length.out=nr) } out <- list(type = "mask", xrange = xrange, yrange = yrange, dim = c(nr, nc), xstep = xstep, ystep = ystep, warnings = .Spatstat.Image.Warning, xcol = xcol, yrow = yrow, m = mask, units = unitname) class(out) <- "owin" return(out) } # never reached NULL } owin }) # #----------------------------------------------------------------------------- # is.owin <- function(x) { inherits(x, "owin") } # #----------------------------------------------------------------------------- # as.owin <- function(W, ..., fatal=TRUE) { UseMethod("as.owin") } as.owin.owin <- function(W, ..., fatal=TRUE) { if(verifyclass(W, "owin", fatal=fatal)) return(owin(W$xrange, W$yrange, poly=W$bdry, mask=W$m, unitname=unitname(W), check=FALSE)) else return(NULL) } as.owin.ppp <- function(W, ..., fatal=TRUE) { if(verifyclass(W, "ppp", fatal=fatal)) return(W$window) else return(NULL) } as.owin.quad <- function(W, ..., fatal=TRUE) { if(verifyclass(W, "quad", fatal=fatal)) return(W$data$window) else return(NULL) } as.owin.im <- function(W, ..., fatal=TRUE) { if(!verifyclass(W, "im", fatal=fatal)) return(NULL) out <- list(type = "mask", xrange = W$xrange, yrange = W$yrange, dim = W$dim, xstep = W$xstep, ystep = W$ystep, warnings = .Spatstat.Image.Warning, xcol = W$xcol, yrow = W$yrow, m = !is.na(W$v), units = unitname(W)) class(out) <- "owin" return(out) } as.owin.psp <- function(W, ..., fatal=TRUE) { if(!verifyclass(W, "psp", fatal=fatal)) return(NULL) return(W$window) } as.owin.tess <- function(W, ..., fatal=TRUE) { if(!verifyclass(W, "tess", fatal=fatal)) return(NULL) return(W$window) } as.owin.data.frame <- function(W, ..., step, fatal=TRUE) { if(!verifyclass(W, "data.frame", fatal=fatal)) return(NULL) if(missing(step)) { xstep <- ystep <- NULL } else { step <- ensure2vector(step) xstep <- step[1L] ystep <- step[2L] } if(!(ncol(W) %in% c(2,3))) { whinge <- "need exactly 2 or 3 columns of data" if(fatal) stop(whinge) warning(whinge) return(NULL) } if(twocol <- (ncol(W) == 2)) { # assume data is a list of TRUE pixels W <- cbind(W, TRUE) } mch <- matchNameOrPosition(c("x", "y", "z"), names(W)) ix <- mch[1L] iy <- mch[2L] iz <- mch[3L] df <- data.frame(x=W[,ix], y=W[,iy], z=as.logical(W[,iz])) with(df, { xx <- sort(unique(x)) yy <- sort(unique(y)) jj <- match(x, xx) ii <- match(y, yy) ## make logical matrix (for incomplete x, y sequence) ok <- checkbigmatrix(length(xx), length(yy), fatal=fatal) if(!ok) return(NULL) mm <- matrix(FALSE, length(yy), length(xx)) mm[cbind(ii,jj)] <- z ## ensure xx and yy are complete equally-spaced sequences fx <- fillseq(xx, step=xstep) fy <- fillseq(yy, step=ystep) xcol <- fx[[1L]] yrow <- fy[[1L]] ## trap very large matrices ok <- checkbigmatrix(length(xcol), length(yrow), fatal=fatal) if(!ok) return(NULL) ## mapping from xx to xcol, yy to yrow jjj <- fx[[2L]] iii <- fy[[2L]] ## make logical matrix for full sequence m <- matrix(FALSE, length(yrow), length(xcol)) m[iii,jjj] <- mm ## make binary mask out <- owin(mask=m, xy=list(x=xcol, y=yrow)) ## warn if area fraction is small: may be a misuse of as.owin if(twocol) { pcarea <- 100 * nrow(df)/prod(dim(m)) if(pcarea < 1) warning(paste("Window occupies only", paste0(signif(pcarea, 2), "%"), "of frame area. Did you mean owin(poly=df) ?"), call.=FALSE) } return(out) }) } as.owin.default <- function(W, ..., fatal=TRUE) { ## Tries to interpret data as an object of class 'owin' ## W may be ## a structure with entries xrange, yrange ## a four-element vector (interpreted xmin, xmax, ymin, ymax) ## a structure with entries xl, xu, yl, yu ## an object with attribute "bbox" if(inherits(W, "box3")) { #' cannot be flattened if(fatal) stop("3D box cannot be converted to a 2D window") return(NULL) } if(checkfields(W, c("xrange", "yrange"))) { Z <- owin(W$xrange, W$yrange) return(Z) } else if(is.vector(W) && is.numeric(W) && length(W) == 4) { Z <- owin(W[1:2], W[3:4]) return(Z) } else if(checkfields(W, c("xl", "xu", "yl", "yu"))) { W <- as.list(W) Z <- owin(c(W$xl, W$xu),c(W$yl, W$yu)) return(Z) } else if(checkfields(W, c("x", "y", "area")) && checkfields(W$area, c("xl", "xu", "yl", "yu"))) { V <- as.list(W$area) Z <- owin(c(V$xl, V$xu),c(V$yl, V$yu)) return(Z) } else if(!is.null(Z <- attr(W, "bbox"))) { return(as.owin(Z, ..., fatal=fatal)) } else if(fatal) stop("Can't interpret W as a window") else return(NULL) } # #----------------------------------------------------------------------------- # # Frame <- function(X) { UseMethod("Frame") } "Frame<-" <- function(X, value) { UseMethod("Frame<-") } Frame.default <- function(X) { as.rectangle(X) } ## ......................................................... as.rectangle <- function(w, ...) { if(inherits(w, "owin")) return(owin(w$xrange, w$yrange, unitname=unitname(w))) else if(inherits(w, "im")) return(owin(w$xrange, w$yrange, unitname=unitname(w))) else if(inherits(w, "layered")) return(do.call(boundingbox, unname(lapply(w, as.rectangle)))) else { w <- as.owin(w, ...) return(owin(w$xrange, w$yrange, unitname=unitname(w))) } } # #----------------------------------------------------------------------------- # as.mask <- function(w, eps=NULL, dimyx=NULL, xy=NULL) { # eps: grid mesh (pixel) size # dimyx: dimensions of pixel raster # xy: coordinates of pixel raster nonamedargs <- is.null(eps) && is.null(dimyx) && is.null(xy) uname <- as.units(NULL) if(!missing(w) && !is.null(w)) { if(is.data.frame(w)) return(owin(mask=w, xy=xy)) if(is.matrix(w)) { w <- as.data.frame(w) colnames(w) <- c("x", "y") return(owin(mask=w, xy=xy)) } w <- as.owin(w) uname <- unitname(w) } else { if(is.null(xy)) stop("If w is missing, xy is required") } # If it's already a mask, and no other arguments specified, # just return it. if(!missing(w) && w$type == "mask" && nonamedargs) return(w) ########################## # First determine pixel coordinates ########################## if(is.null(xy)) { # Pixel coordinates to be computed from other dimensions # First determine row & column dimensions if(!is.null(dimyx)) { dimyx <- ensure2vector(dimyx) nr <- dimyx[1L] nc <- dimyx[2L] } else { # use pixel size 'eps' if(!is.null(eps)) { eps <- ensure2vector(eps) nc <- diff(w$xrange)/eps[1L] nr <- diff(w$yrange)/eps[2L] if(nr < 1 || nc < 1) warning("pixel size parameter eps > size of window") nr <- ceiling(nr) nc <- ceiling(nc) } else { # use spatstat defaults np <- spatstat.options("npixel") if(length(np) == 1) nr <- nc <- np[1L] else { nr <- np[2L] nc <- np[1L] } } } if((mpix <- (nr * nc)/1048576) >= 10) { whinge <- paste("Creating", articlebeforenumber(mpix), paste0(round(mpix, 1), "-megapixel"), "window mask") message(whinge) warning(whinge, call.=FALSE) } # Initialise mask with all entries TRUE rasta <- owin(w$xrange, w$yrange, mask=matrix(TRUE, nr, nc)) } else { # # Pixel coordinates given explicitly: # xy is an image, a mask, or a list(x,y) # if(is.im(xy)) { rasta <- as.owin(xy) rasta$m[] <- TRUE } else if(is.owin(xy)) { if(xy$type != "mask") stop("argument xy does not contain raster coordinates.") rasta <- xy rasta$m[] <- TRUE } else { if(!checkfields(xy, c("x", "y"))) stop(paste(sQuote("xy"), "should be a list containing two vectors x and y")) x <- sort(unique(xy$x)) y <- sort(unique(xy$y)) # derive other parameters nr <- length(y) nc <- length(x) # check size if((mpix <- (nr * nc)/1048576) >= 10) { whinge <- paste("Creating", articlebeforenumber(mpix), paste0(round(mpix, 1), "-megapixel"), "window mask") message(whinge) warning(whinge, call.=FALSE) } # x and y pixel sizes dx <- diff(x) if(diff(range(dx)) > 0.01 * mean(dx)) stop("x coordinates must be evenly spaced") xstep <- mean(dx) dy <- diff(y) if(diff(range(dy)) > 0.01 * mean(dy)) stop("y coordinates must be evenly spaced") ystep <- mean(dy) xr <- range(x) yr <- range(y) xrange <- xr + xstep * c(-1,1)/2 yrange <- yr + ystep * c(-1,1)/2 # initialise mask with all entries TRUE rasta <- list(type = "mask", xrange = xrange, yrange = yrange, dim = c(nr, nc), xstep = xstep, ystep = ystep, warnings = .Spatstat.Image.Warning, xcol = seq(from=xr[1L], to=xr[2L], length.out=nc), yrow = seq(from=yr[1L], to=yr[2L], length.out=nr), m = matrix(TRUE, nr, nc), units = uname) class(rasta) <- "owin" } if(missing(w)) { # No more window information out <- rasta if(!(identical(x, xy$x) && identical(y, xy$y))) { ## xy is an enumeration of the TRUE pixels out$m[] <- FALSE ij <- cbind(i=match(xy$y, y), j=match(xy$x, x)) out$m[ij] <- TRUE } return(out) } } ################################ # Second, mask pixel raster with existing window ################################ switch(w$type, rectangle = { out <- rasta if(!all(w$xrange == rasta$xrange) || !all(w$yrange == rasta$yrange)) { xcol <- rasta$xcol yrow <- rasta$yrow wx <- w$xrange wy <- w$yrange badrow <- which(yrow > wy[2L] | yrow < wy[1L]) badcol <- which(xcol > wx[2L] | xcol < wx[1L]) out$m[badrow , ] <- FALSE out$m[ , badcol] <- FALSE } }, mask = { # resample existing mask on new raster out <- rastersample(w, rasta) }, polygonal = { # use C code out <- owinpoly2mask(w, rasta, FALSE) }) unitname(out) <- uname return(out) } as.matrix.owin <- function(x, ...) { m <- as.mask(x, ...) return(m$m) } # # #----------------------------------------------------------------------------- # as.polygonal <- function(W, repair=FALSE) { verifyclass(W, "owin") switch(W$type, rectangle = { xr <- W$xrange yr <- W$yrange return(owin(xr, yr, poly=list(x=xr[c(1,2,2,1)],y=yr[c(1,1,2,2)]), unitname=unitname(W), check=FALSE)) }, polygonal = { if(repair) W <- owin(poly=W$bdry, unitname=unitname(W)) return(W) }, mask = { # This could take a while M <- W$m nr <- nrow(M) notM <- !M xcol <- W$xcol yrow <- W$yrow xbracket <- 1.1 * c(-1,1) * W$xstep/2 ybracket <- 1.1 * c(-1,1) * W$ystep/2 ## determine resolution for polyclip operations p <- list(x0 = xcol[1], y0 = yrow[1], eps = max(W$xstep, W$ystep)/(2^31)) # identify runs of TRUE entries in each column start <- M & rbind(TRUE, notM[-nr, ]) finish <- M & rbind(notM[-1, ], TRUE) #' build result out <- NULL for(j in 1:ncol(M)) { xj <- xcol[j] # identify start and end positions in column j starts <- which(start[,j]) finishes <- which(finish[,j]) ns <- length(starts) nf <- length(finishes) if(ns != nf) stop(paste("Internal error: length(starts)=", ns, ", length(finishes)=", nf)) if(ns > 0) { for(k in 1:ns) { yfrom <- yrow[starts[k]] yto <- yrow[finishes[k]] yk <- sort(c(yfrom,yto)) #' make rectangle boundary in reversed orientation xrect <- xj + xbracket yrect <- yk + ybracket recto <- list(list(x = xrect[c(1,2,2,1)], y = yrect[c(2,2,1,1)])) #' add to result if(is.null(out)) { out <- recto } else { out <- polyclip::polyclip(out, recto, "union", fillA="nonzero", fillB="nonzero", eps = p$eps, x0 = p$x0, y0 = p$y0) } } } } if(is.null(out)) return(emptywindow(Frame(W))) totarea <- sum(sapply(out, Area.xypolygon)) if(totarea < 0) out <- lapply(out, reverse.xypolygon) out <- owin(poly=out, check=FALSE, unitname=unitname(W)) return(out) } ) } # # ---------------------------------------------------------------------- is.polygonal <- function(w) { return(inherits(w, "owin") && (w$type == "polygonal")) } is.rectangle <- function(w) { return(inherits(w, "owin") && (w$type == "rectangle")) } is.mask <- function(w) { return(inherits(w, "owin") && (w$type == "mask")) } validate.mask <- function(w, fatal=TRUE) { verifyclass(w, "owin", fatal=fatal) if(w$type == "mask") return(TRUE) if(fatal) stop(paste(short.deparse(substitute(w)), "is not a binary mask")) else { warning(paste(short.deparse(substitute(w)), "is not a binary mask")) return(FALSE) } } dim.owin <- function(x) { return(x$dim) } ## NULL unless it's a mask ## internal use only: rasterx.mask <- function(w, drop=FALSE) { validate.mask(w) x <- w$xcol[col(w)] x <- if(drop) x[w$m, drop=TRUE] else array(x, dim=w$dim) return(x) } rastery.mask <- function(w, drop=FALSE) { validate.mask(w) y <- w$yrow[row(w)] y <- if(drop) y[w$m, drop=TRUE] else array(y, dim=w$dim) return(y) } rasterxy.mask <- function(w, drop=FALSE) { validate.mask(w) x <- w$xcol[col(w)] y <- w$yrow[row(w)] if(drop) { m <- w$m x <- x[m, drop=TRUE] y <- y[m, drop=TRUE] } return(list(x=as.numeric(x), y=as.numeric(y))) } nearest.raster.point <- function(x,y,w, indices=TRUE) { stopifnot(is.mask(w) || is.im(w)) nr <- w$dim[1L] nc <- w$dim[2L] if(length(x) == 0) { cc <- rr <- integer(0) } else { cc <- 1 + round((x - w$xcol[1L])/w$xstep) rr <- 1 + round((y - w$yrow[1L])/w$ystep) cc <- pmax.int(1,pmin.int(cc, nc)) rr <- pmax.int(1,pmin.int(rr, nr)) } if(indices) return(list(row=rr, col=cc)) else return(list(x=w$xcol[cc], y=w$yrow[rr])) } mask2df <- function(w) { stopifnot(is.owin(w) && w$type == "mask") xx <- raster.x(w) yy <- raster.y(w) ok <- w$m xx <- as.vector(xx[ok]) yy <- as.vector(yy[ok]) return(data.frame(x=xx, y=yy)) } #------------------------------------------------------------------ complement.owin <- function(w, frame=as.rectangle(w)) { w <- as.owin(w) if(reframe <- !missing(frame)) { verifyclass(frame, "owin") w <- rebound.owin(w, frame) # if w was a rectangle, it's now polygonal } switch(w$type, mask = { w$m <- !(w$m) }, rectangle = { # return empty window return(emptywindow(w)) }, polygonal = { bdry <- w$bdry if(length(bdry) == 0) { # w is empty return(frame) } # bounding box, in anticlockwise order box <- list(x=w$xrange[c(1,2,2,1)], y=w$yrange[c(1,1,2,2)]) boxarea <- Area.xypolygon(box) # first check whether one of the current boundary polygons # is the bounding box itself (with + sign) if(reframe) is.box <- rep.int(FALSE, length(bdry)) else { nvert <- lengths(lapply(bdry, getElement, name="x")) areas <- sapply(bdry, Area.xypolygon) boxarea.mineps <- boxarea * (0.99999) is.box <- (nvert == 4 & areas >= boxarea.mineps) if(sum(is.box) > 1) stop("Internal error: multiple copies of bounding box") if(all(is.box)) { return(emptywindow(box)) } } # if box is present (with + sign), remove it if(any(is.box)) bdry <- bdry[!is.box] # now reverse the direction of each polygon bdry <- lapply(bdry, reverse.xypolygon, adjust=TRUE) # if box was absent, add it if(!any(is.box)) bdry <- c(bdry, list(box)) # sic # put back into w w$bdry <- bdry }, stop("unrecognised window type", w$type) ) return(w) } #----------------------------------------------------------- inside.owin <- function(x, y, w) { # test whether (x,y) is inside window w # x, y may be vectors if(missing(y) && all(c("x", "y") %in% names(x))) return(inside.owin(x$x, x$y, w)) w <- as.owin(w) if(length(x)==0) return(logical(0)) # test whether inside bounding rectangle xr <- w$xrange yr <- w$yrange eps <- sqrt(.Machine$double.eps) frameok <- (x >= xr[1L] - eps) & (x <= xr[2L] + eps) & (y >= yr[1L] - eps) & (y <= yr[2L] + eps) if(!any(frameok)) # all points OUTSIDE window - no further work needed return(frameok) ok <- frameok switch(w$type, rectangle = { return(ok) }, polygonal = { ## check scale framesize <- max(diff(xr), diff(yr)) if(framesize > 1e6 || framesize < 1e-6) { ## rescale to avoid numerical overflow scalefac <- framesize/100 w <- rescale(w, scalefac) x <- x/scalefac y <- y/scalefac } xy <- list(x=x,y=y) bdry <- w$bdry total <- numeric(length(x)) on.bdry <- rep.int(FALSE, length(x)) for(i in seq_along(bdry)) { score <- inside.xypolygon(xy, bdry[[i]], test01=FALSE) total <- total + score on.bdry <- on.bdry | attr(score, "on.boundary") } # any points identified as belonging to the boundary get score 1 total[on.bdry] <- 1 # check for sanity now.. uhoh <- (total * (1-total) != 0) if(any(uhoh)) { nuh <- sum(uhoh) warning(paste("point-in-polygon test had difficulty with", nuh, ngettext(nuh, "point", "points"), "(total score not 0 or 1)"), call.=FALSE) total[uhoh] <- 0 } return(ok & (total != 0)) }, mask = { # consider only those points which are inside the frame xf <- x[frameok] yf <- y[frameok] # map locations to raster (row,col) coordinates loc <- nearest.raster.point(xf,yf,w) # look up mask values okf <- (w$m)[cbind(loc$row, loc$col)] # insert into 'ok' vector ok[frameok] <- okf return(ok) }, stop("unrecognised window type", w$type) ) } #------------------------------------------------------------------------- print.owin <- function(x, ..., prefix="window: ") { verifyclass(x, "owin") unitinfo <- summary(unitname(x)) switch(x$type, rectangle={ rectname <- paste0(prefix, "rectangle =") }, polygonal={ splat(paste0(prefix, "polygonal boundary")) if(length(x$bdry) == 0) splat("window is empty") rectname <- "enclosing rectangle:" }, mask={ splat(paste0(prefix, "binary image mask")) di <- x$dim splat(di[1L], "x", di[2L], "pixel array (ny, nx)") rectname <- "enclosing rectangle:" } ) splat(rectname, prange(zapsmall(x$xrange)), "x", prange(zapsmall(x$yrange)), unitinfo$plural, unitinfo$explain) invisible(NULL) } summary.owin <- function(object, ...) { verifyclass(object, "owin") result <- list(xrange=object$xrange, yrange=object$yrange, type=object$type, area=area(object), units=unitname(object)) result$areafraction <- with(result, area/(diff(xrange) * diff(yrange))) switch(object$type, rectangle={ }, polygonal={ poly <- object$bdry result$npoly <- npoly <- length(poly) if(npoly == 0) { result$areas <- result$nvertices <- numeric(0) } else if(npoly == 1) { result$areas <- Area.xypolygon(poly[[1L]]) result$nvertices <- length(poly[[1L]]$x) } else { result$areas <- unlist(lapply(poly, Area.xypolygon)) result$nvertices <- lengths(lapply(poly, getElement, name="x")) } result$nhole <- sum(result$areas < 0) }, mask={ result$npixels <- object$dim result$xstep <- object$xstep result$ystep <- object$ystep } ) class(result) <- "summary.owin" result } print.summary.owin <- function(x, ...) { verifyclass(x, "summary.owin") unitinfo <- summary(x$units) pluralunits <- unitinfo$plural singularunits <- unitinfo$singular switch(x$type, rectangle={ rectname <- "Window: rectangle =" }, polygonal={ np <- x$npoly splat("Window: polygonal boundary") if(np == 0) { splat("window is empty") } else if(np == 1) { splat("single connected closed polygon with", x$nvertices, "vertices") } else { nh <- x$nhole holy <- if(nh == 0) "(no holes)" else if(nh == 1) "(1 hole)" else paren(paste(nh, "holes")) splat(np, "separate polygons", holy) if(np > 0) print(data.frame(vertices=x$nvertices, area=signif(x$areas, 6), relative.area=signif(x$areas/x$area,3), row.names=paste("polygon", 1:np, ifelse(x$areas < 0, "(hole)", "") ))) } rectname <- "enclosing rectangle:" }, mask={ splat("binary image mask") di <- x$npixels splat(di[1L], "x", di[2L], "pixel array (ny, nx)") splat("pixel size:", signif(x$xstep,3), "by", signif(x$ystep,3), pluralunits) rectname <- "enclosing rectangle:" } ) splat(rectname, prange(zapsmall(x$xrange)), "x", prange(zapsmall(x$yrange)), pluralunits) Area <- signif(x$area, 6) splat("Window area =", Area, "square", if(Area == 1) singularunits else pluralunits) if(!is.null(ledge <- unitinfo$legend)) splat(ledge) if(x$type != "rectangle") splat("Fraction of frame area:", signif(x$areafraction, 3)) return(invisible(x)) } as.data.frame.owin <- function(x, ..., drop=TRUE) { stopifnot(is.owin(x)) switch(x$type, rectangle = { x <- as.polygonal(x) }, polygonal = { }, mask = { xy <- rasterxy.mask(x, drop=drop) if(!drop) xy <- append(xy, list(inside=as.vector(x$m))) return(as.data.frame(xy, ...)) }) b <- x$bdry ishole <- sapply(b, is.hole.xypolygon) sign <- (-1)^ishole b <- lapply(b, as.data.frame, ...) nb <- length(b) if(nb == 1) return(b[[1L]]) dfs <- mapply(cbind, b, id=as.list(seq_len(nb)), sign=as.list(sign), SIMPLIFY=FALSE) df <- do.call(rbind, dfs) return(df) } discretise <- function(X,eps=NULL,dimyx=NULL,xy=NULL) { verifyclass(X,"ppp") W <- X$window ok <- inside.owin(X$x,X$y,W) if(!all(ok)) stop("There are points of X outside the window of X") all.null <- is.null(eps) & is.null(dimyx) & is.null(xy) if(W$type=="mask" & all.null) return(X) WM <- as.mask(W,eps=eps,dimyx=dimyx,xy=xy) nok <- !inside.owin(X$x,X$y,WM) if(any(nok)) { ifix <- nearest.raster.point(X$x[nok],X$y[nok], WM) ifix <- cbind(ifix$row,ifix$col) WM$m[ifix] <- TRUE } X$window <- WM X } pixelcentres <- function (X, W=NULL,...) { X <- as.mask(as.owin(X), ...) if(is.null(W)) W <- as.rectangle(X) Y <- as.ppp(raster.xy(X,drop=TRUE),W=W) return(Y) } owin2polypath <- function(w) { w <- as.polygonal(w) b <- w$bdry xvectors <- lapply(b, getElement, name="x") yvectors <- lapply(b, getElement, name="y") xx <- unlist(lapply(xvectors, append, values=NA, after=FALSE))[-1] yy <- unlist(lapply(yvectors, append, values=NA, after=FALSE))[-1] return(list(x=xx, y=yy)) } ## generics which extract and assign the window of some object Window <- function(X, ...) { UseMethod("Window") } "Window<-" <- function(X, ..., value) { UseMethod("Window<-") } spatstat/R/Math.im.R0000644000176200001440000000214513115225157013742 0ustar liggesusers## ## Math.im.R ## ## $Revision: 1.7 $ $Date: 2017/01/12 03:50:22 $ ## Ops.im <- function(e1,e2=NULL){ unary <- nargs() == 1L if(unary){ if(!is.element(.Generic, c("!", "-", "+"))) stop("Unary usage is undefined for this operation for images.") callstring <- paste(.Generic, "e1") } else { callstring <- paste("e1", .Generic, "e2") } expr <- parse(text = callstring) return(do.call(eval.im, list(expr = expr))) } Math.im <- function(x, ...){ m <- do.call(.Generic, list(x$v, ...)) rslt <- im(m, xcol = x$xcol, yrow = x$yrow, xrange = x$xrange, yrange = x$yrange, unitname = unitname(x)) return(rslt) } Summary.im <- function(..., na.rm=FALSE, drop=TRUE){ argh <- list(...) ims <- sapply(argh, is.im) argh[ims] <- lapply(argh[ims], getElement, name="v") do.call(.Generic, c(argh, list(na.rm = na.rm || drop))) } Complex.im <- function(z){ m <- do.call(.Generic, list(z=z$v)) rslt <- im(m, xcol = z$xcol, yrow = z$yrow, xrange = z$xrange, yrange = z$yrange, unitname = unitname(z)) return(rslt) } spatstat/R/penttinen.R0000644000176200001440000000411013115225157014443 0ustar liggesusers# # # penttinen.R # # $Revision: 1.2 $ $Date: 2016/02/16 01:39:12 $ # # Penttinen pairwise interaction # # # ------------------------------------------------------------------- # Penttinen <- local({ # create blank template object without family and pars BlankAntti <- list( name = "Penttinen process", creator = "Penttinen", family = "pairwise.family", # evaluated later pot = function(d, par) { ans <- numeric(length(d)) dim(ans) <- dim(d) zz <- d/(2 * par$r) ok <- (zz < 1) z <- zz[ok] ans[ok] <- (2/pi) * (acos(z) - z * sqrt(1-z^2)) return(ans) }, par = list(r = NULL), # to be filled in parnames = "circle radius", init = function(self) { r <- self$par$r if(!is.numeric(r) || length(r) != 1 || r <= 0) stop("interaction distance r must be a positive number") }, update = NULL, # default OK print = NULL, # default OK interpret = function(coeffs, self) { theta <- as.numeric(coeffs[1]) gamma <- exp(theta) return(list(param=list(gamma=gamma), inames="interaction parameter gamma", printable=dround(gamma))) }, valid = function(coeffs, self) { theta <- as.numeric(coeffs[1]) return(is.finite(theta) && (theta <= 0)) }, project = function(coeffs, self) { if((self$valid)(coeffs, self)) return(NULL) else return(Poisson()) }, irange = function(self, coeffs=NA, epsilon=0, ...) { r <- self$par$r if(anyNA(coeffs)) return(2 * r) theta <- coeffs[1] if(abs(theta) <= epsilon) return(0) else return(2 * r) }, version=NULL # to be filled in ) class(BlankAntti) <- "interact" # Finally define main function Penttinen <- function(r) { instantiate.interact(BlankAntti, list(r=r)) } Penttinen <- intermaker(Penttinen, BlankAntti) Penttinen }) spatstat/R/classes.R0000755000176200001440000000232313115271075014103 0ustar liggesusers# # # classes.S # # $Revision: 1.7 $ $Date: 2006/10/09 03:38:14 $ # # Generic utilities for classes # # #-------------------------------------------------------------------------- verifyclass <- function(X, C, N=deparse(substitute(X)), fatal=TRUE) { if(!inherits(X, C)) { if(fatal) { gripe <- paste("argument", sQuote(N), "is not of class", sQuote(C)) stop(gripe) } else return(FALSE) } return(TRUE) } #-------------------------------------------------------------------------- checkfields <- function(X, L) { # X is a list, L is a vector of strings # Checks for presence of field named L[i] for all i return(all(!is.na(match(L,names(X))))) } getfields <- function(X, L, fatal=TRUE) { # X is a list, L is a vector of strings # Extracts all fields with names L[i] from list X # Checks for presence of all desired fields # Returns the sublist of X with fields named L[i] absent <- is.na(match(L, names(X))) if(any(absent)) { gripe <- paste("Needed the following components:", paste(L, collapse=", "), "\nThese ones were missing: ", paste(L[absent], collapse=", ")) if(fatal) stop(gripe) else warning(gripe) } return(X[L[!absent]]) } spatstat/R/affine.R0000755000176200001440000002477213115271075013712 0ustar liggesusers# # affine.R # # $Revision: 1.49 $ $Date: 2016/10/23 10:36:58 $ # affinexy <- function(X, mat=diag(c(1,1)), vec=c(0,0), invert=FALSE) { if(length(X$x) == 0 && length(X$y) == 0) return(list(x=numeric(0),y=numeric(0))) if(invert) { mat <- invmat <- solve(mat) vec <- - as.numeric(invmat %*% vec) } # Y = M X + V ans <- mat %*% rbind(X$x, X$y) + matrix(vec, nrow=2L, ncol=length(X$x)) return(list(x = ans[1L,], y = ans[2L,])) } affinexypolygon <- function(p, mat=diag(c(1,1)), vec=c(0,0), detmat=det(mat)) { # transform (x,y) p[c("x","y")] <- affinexy(p, mat=mat, vec=vec) # transform area if(!is.null(p$area)) p$area <- p$area * detmat # if map has negative sign, cyclic order was reversed; correct it if(detmat < 0) p <- reverse.xypolygon(p, adjust=TRUE) return(p) } "affine" <- function(X, ...) { UseMethod("affine") } "affine.owin" <- function(X, mat=diag(c(1,1)), vec=c(0,0), ..., rescue=TRUE) { verifyclass(X, "owin") vec <- as2vector(vec) if(!is.matrix(mat) || any(dim(mat) != c(2,2))) stop(paste(sQuote("mat"), "should be a 2 x 2 matrix")) diagonalmatrix <- all(mat == diag(diag(mat))) scaletransform <- diagonalmatrix && (length(unique(diag(mat))) == 1) newunits <- if(scaletransform) unitname(X) else as.units(NULL) # switch(X$type, rectangle={ if(diagonalmatrix) { # result is a rectangle Y <- owin(range(mat[1L,1L] * X$xrange + vec[1L]), range(mat[2L,2L] * X$yrange + vec[2L])) unitname(Y) <- newunits return(Y) } else { # convert rectangle to polygon P <- as.polygonal(X) # call polygonal case return(affine.owin(P, mat, vec, rescue=rescue)) } }, polygonal={ # Transform the polygonal boundaries bdry <- lapply(X$bdry, affinexypolygon, mat=mat, vec=vec, detmat=det(mat)) # Compile result W <- owin(poly=bdry, check=FALSE, unitname=newunits) # Result might be a rectangle: if so, convert to rectangle type if(rescue) W <- rescue.rectangle(W) return(W) }, mask={ # binary mask if(sqrt(abs(det(mat))) < .Machine$double.eps) stop("Matrix of linear transformation is singular") newframe <- boundingbox(affinexy(corners(X), mat, vec)) W <- if(length(list(...)) > 0) as.mask(newframe, ...) else as.mask(newframe, eps=with(X, min(xstep, ystep))) pixelxy <- rasterxy.mask(W) xybefore <- affinexy(pixelxy, mat, vec, invert=TRUE) W$m[] <- with(xybefore, inside.owin(x, y, X)) W <- intersect.owin(W, boundingbox(W)) if(rescue) W <- rescue.rectangle(W) return(W) }, stop("Unrecognised window type") ) } "affine.ppp" <- function(X, mat=diag(c(1,1)), vec=c(0,0), ...) { verifyclass(X, "ppp") vec <- as2vector(vec) r <- affinexy(X, mat, vec) w <- affine.owin(X$window, mat, vec, ...) return(ppp(r$x, r$y, window=w, marks=marks(X, dfok=TRUE), check=FALSE)) } "affine.im" <- function(X, mat=diag(c(1,1)), vec=c(0,0), ...) { verifyclass(X, "im") vec <- as2vector(vec) if(!is.matrix(mat) || any(dim(mat) != c(2,2))) stop(paste(sQuote("mat"), "should be a 2 x 2 matrix")) # Inspect the determinant detmat <- det(mat) if(sqrt(abs(detmat)) < .Machine$double.eps) stop("Matrix of linear transformation is singular") # diagonalmatrix <- all(mat == diag(diag(mat))) scaletransform <- diagonalmatrix && (length(unique(diag(mat))) == 1L) newunits <- if(scaletransform) unitname(X) else as.units(NULL) newpixels <- (length(list(...)) > 0) # if(diagonalmatrix && !newpixels) { # diagonal matrix: apply map to row and column locations v <- X$v d <- X$dim newbox <- affine(as.rectangle(X), mat=mat, vec=vec) xscale <- diag(mat)[1L] yscale <- diag(mat)[2L] xcol <- xscale * X$xcol + vec[1L] yrow <- yscale * X$yrow + vec[2L] if(xscale < 0) { # x scale is negative xcol <- rev(xcol) v <- v[, (d[2L]:1)] } if(yscale < 0) { # y scale is negative yrow <- rev(yrow) v <- v[(d[1L]:1), ] } Y <- im(v, xcol=xcol, yrow=yrow, xrange=newbox$xrange, yrange=newbox$yrange, unitname=newunits) } else { # general case # create box containing transformed image newframe <- boundingbox(affinexy(corners(X), mat, vec)) W <- if(length(list(...)) > 0) as.mask(newframe, ...) else as.mask(newframe, eps=with(X, min(xstep, ystep))) unitname(W) <- newunits # raster for transformed image naval <- switch(X$type, factor= , integer = NA_integer_, logical = as.logical(NA_integer_), real = NA_real_, complex = NA_complex_, character = NA_character_, NA) Y <- as.im(W, value=naval) # preimages of pixels of transformed image xx <- as.vector(rasterx.im(Y)) yy <- as.vector(rastery.im(Y)) pre <- affinexy(list(x=xx, y=yy), mat, vec, invert=TRUE) # sample original image if(X$type != "factor") { Y$v[] <- lookup.im(X, pre$x, pre$y, naok=TRUE) } else { lab <- levels(X) lev <- seq_along(lab) Y$v[] <- lookup.im(eval.im(as.integer(X)), pre$x, pre$y, naok=TRUE) Y <- eval.im(factor(Y, levels=lev, labels=lab)) } } return(Y) } ### ---------------------- reflect ---------------------------------- reflect <- function(X) { UseMethod("reflect") } reflect.default <- function(X) { affine(X, mat=diag(c(-1,-1))) } reflect.im <- function(X) { stopifnot(is.im(X)) out <- with(X, list(v = v[dim[1L]:1, dim[2L]:1], dim = dim, xrange = rev(-xrange), yrange = rev(-yrange), xstep = xstep, ystep = ystep, xcol = rev(-xcol), yrow = rev(-yrow), type = type, units = units)) class(out) <- "im" return(out) } ### ---------------------- shift ---------------------------------- "shift" <- function(X, ...) { UseMethod("shift") } shiftxy <- function(X, vec=c(0,0)) { if(is.null(vec)) { warning("Null displacement vector; treated as zero") return(X) } list(x = X$x + vec[1L], y = X$y + vec[2L]) } shiftxypolygon <- function(p, vec=c(0,0)) { # transform (x,y), retaining other data p[c("x","y")] <- shiftxy(p, vec=vec) return(p) } "shift.owin" <- function(X, vec=c(0,0), ..., origin=NULL) { verifyclass(X, "owin") if(!is.null(origin)) { if(!missing(vec)) warning("argument vec ignored; overruled by argument origin") if(is.numeric(origin)) { locn <- origin } else if(is.character(origin)) { origin <- pickoption("origin", origin, c(centroid="centroid", midpoint="midpoint", bottomleft="bottomleft")) locn <- switch(origin, centroid={ unlist(centroid.owin(X)) }, midpoint={ c(mean(X$xrange), mean(X$yrange)) }, bottomleft={ c(X$xrange[1L], X$yrange[1L]) }) } else stop("origin must be a character string or a numeric vector") return(shift(X, -locn)) } vec <- as2vector(vec) # Shift the bounding box X$xrange <- X$xrange + vec[1L] X$yrange <- X$yrange + vec[2L] switch(X$type, rectangle={ }, polygonal={ # Shift the polygonal boundaries X$bdry <- lapply(X$bdry, shiftxypolygon, vec=vec) }, mask={ # Shift the pixel coordinates X$xcol <- X$xcol + vec[1L] X$yrow <- X$yrow + vec[2L] # That's all --- the mask entries are unchanged }, stop("Unrecognised window type") ) # tack on shift vector attr(X, "lastshift") <- vec # units are unchanged return(X) } "shift.ppp" <- function(X, vec=c(0,0), ..., origin=NULL) { verifyclass(X, "ppp") if(!is.null(origin)) { if(!missing(vec)) warning("argument vec ignored; overruled by argument origin") if(is.numeric(origin)) { locn <- origin } else if(is.character(origin)) { origin <- pickoption("origin", origin, c(centroid="centroid", midpoint="midpoint", bottomleft="bottomleft")) W <- X$window locn <- switch(origin, centroid={ unlist(centroid.owin(W)) }, midpoint={ c(mean(W$xrange), mean(W$yrange)) }, bottomleft={ c(W$xrange[1L], W$yrange[1L]) }) } else stop("origin must be a character string or a numeric vector") vec <- -locn } vec <- as2vector(vec) # perform shift r <- shiftxy(X, vec) w <- shift.owin(X$window, vec) Y <- ppp(r$x, r$y, window=w, marks=marks(X, dfok=TRUE), check=FALSE) # tack on shift vector attr(Y, "lastshift") <- vec return(Y) } getlastshift <- function(X) { v <- attr(X, "lastshift") if(is.null(v)) stop(paste("Internal error: shifted object of class", sQuote(as.character(class(X))[1L]), "does not have \"lastshift\" attribute"), call.=FALSE) if(!(is.numeric(v) && length(v) == 2L)) stop("Internal error: \"lastshift\" attribute is not a vector", call.=FALSE) return(v) } putlastshift <- function(X, vec) { attr(X, "lastshift") <- vec return(X) } ### ---------------------- scalar dilation --------------------------------- scalardilate <- function(X, f, ...) { UseMethod("scalardilate") } scalardilate.default <- function(X, f, ...) { trap.extra.arguments(..., .Context="In scalardilate(X,f)") check.1.real(f, "In scalardilate(X,f)") stopifnot(is.finite(f) && f > 0) Y <- affine(X, mat=diag(c(f,f))) return(Y) } scalardilate.im <- scalardilate.owin <- scalardilate.psp <- scalardilate.ppp <- function(X, f, ..., origin=NULL) { trap.extra.arguments(..., .Context="In scalardilate(X,f)") check.1.real(f, "In scalardilate(X,f)") stopifnot(is.finite(f) && f > 0) if(!is.null(origin)) { X <- shift(X, origin=origin) negorig <- getlastshift(X) } else negorig <- c(0,0) Y <- affine(X, mat=diag(c(f, f)), vec = -negorig) return(Y) } spatstat/R/density.lpp.R0000644000176200001440000002345113161101170014706 0ustar liggesusers#' #' density.lpp.R #' #' Method for 'density' for lpp objects #' #' Copyright (C) 2017 Greg McSwiggan and Adrian Baddeley #' density.lpp <- function(x, sigma, ..., weights=NULL, kernel="gaussian", continuous=TRUE, epsilon=1e-6, verbose=TRUE, debug=FALSE, savehistory=TRUE, old=FALSE) { stopifnot(inherits(x, "lpp")) kernel <- match.kernel(kernel) if(continuous && (kernel == "gaussian") && !old) return(PDEdensityLPP(x, sigma, ..., weights=weights)) L <- as.linnet(x) # weights np <- npoints(x) if(is.null(weights)) { weights <- rep(1, np) } else { stopifnot(is.numeric(weights)) check.nvector(weights, np, oneok=TRUE) if(length(weights) == 1L) weights <- rep(weights, np) } # pixellate linear network Llines <- as.psp(L) linemask <- as.mask.psp(Llines, ...) lineimage <- as.im(linemask, value=0) # extract pixel centres xx <- raster.x(linemask) yy <- raster.y(linemask) mm <- linemask$m xx <- as.vector(xx[mm]) yy <- as.vector(yy[mm]) pixelcentres <- ppp(xx, yy, window=as.rectangle(linemask), check=FALSE) pixdf <- data.frame(xc=xx, yc=yy) # project pixel centres onto lines p2s <- project2segment(pixelcentres, Llines) projloc <- as.data.frame(p2s$Xproj) projmap <- as.data.frame(p2s[c("mapXY", "tp")]) projdata <- cbind(pixdf, projloc, projmap) # initialise pixel values values <- rep(0, nrow(pixdf)) # Extract local coordinates of data n <- npoints(x) coo <- coords(x) seg <- coo$seg tp <- coo$tp # lengths of network segments Llengths <- lengths.psp(Llines) # initialise stack stack <- data.frame(seg=integer(0), from=logical(0), distance=numeric(0), weight=numeric(0)) # process each data point for(i in seq_len(n)) { segi <- seg[i] tpi <- tp[i] len <- Llengths[segi] # evaluate kernel on segment containing x[i] relevant <- (projmap$mapXY == segi) values[relevant] <- values[relevant] + dkernel(len * (projmap$tp[relevant] - tpi), kernel=kernel, sd=sigma) # push the two tails onto the stack stack <- rbind(data.frame(seg = c(segi, segi), from = c(TRUE, FALSE), distance = len * c(tpi, 1-tpi), weight = rep(weights[i], 2L)), stack) } Lfrom <- L$from Lto <- L$to if(verbose) niter <- 0 if(savehistory) history <- data.frame(iter=integer(0), qlen=integer(0), totmass=numeric(0), maxmass=numeric(0)) # process the stack while(nrow(stack) > 0) { if(debug) print(stack) masses <- with(stack, abs(weight) * pkernel(distance, kernel=kernel, sd=sigma, lower.tail=FALSE)) totmass <- sum(masses) maxmass <- max(masses) if(savehistory) history <- rbind(history, data.frame(iter=nrow(history)+1L, qlen=nrow(stack), totmass=totmass, maxmass=maxmass)) if(verbose) { niter <- niter + 1L cat(paste("Iteration", niter, "\tStack length", nrow(stack), "\n")) cat(paste("Total stack mass", totmass, "\tMaximum", maxmass, "\n")) } # trim tiny <- (masses < epsilon) if(any(tiny)) { if(verbose) { ntiny <- sum(tiny) cat(paste("Removing", ntiny, "tiny", ngettext(ntiny, "tail", "tails"), "\n")) } stack <- stack[!tiny, ] } if(nrow(stack) == 0) break; # pop the top of the stack H <- stack[1L, , drop=FALSE] stack <- stack[-1L, , drop=FALSE] # segment and vertex Hseg <- H$seg Hvert <- if(H$from) Lfrom[Hseg] else Lto[Hseg] Hdist <- H$distance # find all segments incident to this vertex incident <- which((Lfrom == Hvert) | (Lto == Hvert)) degree <- length(incident) # exclude reflecting paths? if(!continuous) incident <- setdiff(incident, Hseg) for(J in incident) { lenJ <- Llengths[J] # determine whether Hvert is the 'to' or 'from' endpoint of segment J H.is.from <- (Lfrom[J] == Hvert) # update weight if(continuous) { Jweight <- H$weight * (2/degree - (J == Hseg)) } else { Jweight <- H$weight/(degree-1) } # increment density on segment relevant <- (projmap$mapXY == J) tp.rel <- projmap$tp[relevant] d.rel <- lenJ * (if(H.is.from) tp.rel else (1 - tp.rel)) values[relevant] <- values[relevant] + Jweight * dkernel(d.rel + Hdist, kernel=kernel, sd=sigma) # push other end of segment onto stack stack <- rbind(data.frame(seg = J, from = !(H.is.from), distance = lenJ + Hdist, weight = Jweight), stack) } } # attach values to nearest pixels Z <- lineimage Z[pixelcentres] <- values # attach exact line position data df <- cbind(projdata, values) out <- linim(L, Z, df=df) if(savehistory) attr(out, "history") <- history return(out) } density.splitppx <- function(x, sigma, ...) { if(!all(sapply(x, is.lpp))) stop("Only implemented for patterns on a linear network") solapply(x, density.lpp, sigma=sigma, ...) } PDEdensityLPP <- function(x, sigma, ..., weights=NULL, dx=NULL, dt=NULL, fun=FALSE, finespacing=FALSE, finedata=finespacing) { stopifnot(is.lpp(x)) L <- as.linnet(x) check.1.real(sigma) check.finite(sigma) if(!is.null(weights)) check.nvector(weights, npoints(x)) if(is.null(dx)) { #' default rule for spacing of sample points lenths <- lengths.psp(as.psp(L)) lbar <- mean(lenths) nseg <- length(lenths) ltot <- lbar * nseg if(finespacing) { #' specify 30 steps per segment, on average dx <- lbar/30 } else { #' use pixel size argh <- list(...) W <- Frame(x) eps <- if(!is.null(argh$eps)) { min(argh$eps) } else if(!is.null(argh$dimyx)) { min(sidelengths(W)/argh$dimyx) } else if(!is.null(argh$xy)) { with(as.mask(W, xy=xy), min(xstep, ystep)) } else min(sidelengths(W)/spatstat.options("npixel")) dx <- max(eps/1.4, lbar/30) } D <- ceiling(ltot/dx) D <- min(D, .Machine$integer.max) dx <- ltot/D } verdeg <- vertexdegree(L) amb <- max(verdeg[L$from] + verdeg[L$to]) dtmax <- min(0.95 * (dx^2)/amb, sigma^2/(2 * 10), sigma * dx/6) if(is.null(dt)) { dt <- dtmax } else if(dt > dtmax) { stop(paste("dt is too large: maximum value", dtmax), call.=FALSE) } a <- FDMKERNEL(lppobj=x, sigma=sigma, dtx=dx, dtt=dt, weights=weights, iterMax=1e6, sparse=TRUE) f <- a$kernel_fun if(fun) { result <- f } else if(!finespacing) { result <- as.linim(f, ...) } else { Z <- as.im(as.linim(f, ...)) df <- a$df colnames(df)[colnames(df) == "seg"] <- "mapXY" ij <- nearest.valid.pixel(df$x, df$y, Z) xy <- data.frame(xc = Z$xcol[ij$col], yc = Z$yrow[ij$row]) df <- cbind(xy, df) result <- linim(domain(f), Z, restrict=FALSE, df=df) } attr(result, "sigma") <- sigma attr(result, "dx") <- a$deltax attr(result, "dt") <- a$deltat return(result) } # Greg's code FDMKERNEL <- function(lppobj, sigma, dtt, weights=NULL, iterMax=5000, sparse=FALSE, dtx) { net2 <- as.linnet(lppobj) # ends1 <- net2$lines$ends lenfs <- lengths.psp(as.psp(net2)) seg_in_lengths <- pmax(1, round(lenfs/dtx)) new_lpp <- lixellate(lppobj, nsplit=seg_in_lengths) net_nodes <- as.linnet(new_lpp) vvv <- as.data.frame(vertices(net_nodes)) vertco_new <- vvv[, c("x", "y")] vertseg_new <- vvv$segcoarse # marks verttp_new <- vvv$tpcoarse # marks if(npoints(lppobj) == 0) { U0 <- numeric(npoints(net_nodes$vertices)) } else { tp1 <- as.numeric(new_lpp$data$tp) tp2 <- as.vector(rbind(1 - tp1, tp1)) newseg <- as.integer(new_lpp$data$seg) vert_init_events1 <- as.vector(rbind(net_nodes$from[newseg], net_nodes$to[newseg])) highest_vert <- npoints(net_nodes$vertices) vert_numbers <- seq_len(highest_vert) ff <- factor(vert_init_events1, levels=vert_numbers) ww <- if(is.null(weights)) tp2 else (rep(weights, each=2) * tp2) ww <- ww/dtx U0 <- tapply(ww, ff, sum) U0[is.na(U0)] <- 0 } M <- round((sigma^2)/(2*dtt)) if(M < 10) stop("No of time iterations must be > 10, decrease dtt") if(M > iterMax) stop("No of time iterations exceeds iterMax; increase dtt or increase iterMax") alpha <- dtt/(dtx^2) A1 <- net_nodes$m *1 # ml <- nrow(net_nodes$m) degree <- colSums(A1) dmax <- max(degree) A2 <- A1 * alpha diag(A2) <- 1 - alpha * degree if(1 - dmax*alpha < 0) stop("alpha must satisfy (1 - HIGHEST VERTEX DEGREE * ALPHA) > 0; decrease dtt or decrease D") if(npoints(lppobj) > 0) { v <- as.numeric(U0) for(j in 1:M) v <- A2 %*% v finalU <- as.numeric(v) } else finalU <- U0 vert_new <- cbind(vertco_new, vertseg_new, verttp_new) colnames(vert_new) <- c("x", "y", "seg", "tp") Nodes <- lpp(vert_new, net2, check=FALSE) nodemap <- nnfun(Nodes) interpUxyst <- function(x, y, seg, tp) { finalU[nodemap(x,y,seg,tp)] } interpU <- linfun(interpUxyst, net2) df <- cbind(vert_new, data.frame(values=finalU)) out <- list(kernel_fun = interpU, df = df, deltax = dtx, deltat = dtt) return(out) } spatstat/R/nndistlpp.R0000755000176200001440000005002113115271120014446 0ustar liggesusers# # nndistlpp.R # # $Revision: 1.20 $ $Date: 2017/06/05 10:31:58 $ # # Methods for nndist, nnwhich, nncross for linear networks # # nndist.lpp # Calculates the nearest neighbour distances in the shortest-path metric # for a point pattern on a linear network. nndist.lpp <- function(X, ..., k=1, method="C") { stopifnot(inherits(X, "lpp")) stopifnot(method %in% c("C", "interpreted")) n <- npoints(X) k <- as.integer(k) stopifnot(all(k > 0)) kmax <- max(k) L <- as.linnet(X) if(is.null(br <- L$boundingradius) || is.infinite(br)) { # network may be disconnected lab <- connected(L, what="labels") if(length(levels(lab)) > 1L) { # network is disconnected result <- matrix(Inf, n, length(k)) # handle each connected component separately subsets <- split(seq_len(nvertices(L)), lab) for(i in seq_along(subsets)) { Xi <- thinNetwork(X, retainvertices=subsets[[i]]) relevant <- attr(Xi, "retainpoints") result[relevant, ] <- nndist.lpp(Xi, k=k, method=method) } return(result) } } toomany <- (kmax >= n-1) if(toomany) { ## not enough points to define kmax nearest neighbours result <- matrix(Inf, nrow=n, ncol=kmax) if(n <= 1) return(result[,k,drop=TRUE]) ## reduce kmax to feasible value kmax <- n-1 kuse <- k[k <= kmax] } else { kuse <- k } Y <- as.ppp(X) sparse <- identical(L$sparse, TRUE) ## find nearest segment for each point ## This is given by local coordinates, if available (spatstat >= 1.28-0) loco <- coords(X, local=TRUE, spatial=FALSE, temporal=FALSE) pro <- if(!is.null(seg <- loco$seg)) seg else nearestsegment(X, Lseg) if(method == "interpreted") { ## interpreted code D <- pairdist(X, method="interpreted") diag(D) <- Inf ans <- if(kmax == 1) apply(D, 1, min) else t(apply(D, 1, orderstats, k=kuse))[,,drop=TRUE] } else if(!sparse && kmax == 1) { # C code for non-sparse network Lseg <- L$lines Lvert <- L$vertices from <- L$from to <- L$to dpath <- L$dpath # convert indices to start at 0 from0 <- from - 1L to0 <- to - 1L segmap <- pro - 1L nseg <- length(from0) # upper bound on interpoint distance huge <- max(dpath) + 2 * max(lengths.psp(Lseg)) # space for result ans <- double(n) # call C zz <- .C("linnndist", np = as.integer(n), xp = as.double(Y$x), yp = as.double(Y$y), nv = as.integer(Lvert$n), xv = as.double(Lvert$x), yv = as.double(Lvert$y), ns = as.integer(nseg), from = as.integer(from0), to = as.integer(to0), dpath = as.double(dpath), segmap = as.integer(segmap), huge = as.double(huge), answer = as.double(ans), PACKAGE = "spatstat") ans <- zz$answer } else if(spatstat.options('Cnndistlpp')) { ## use new C routine Lseg <- L$lines Lvert <- L$vertices from <- L$from to <- L$to ## nseg <- length(from) seglen <- lengths.psp(Lseg) ## convert indices to start at 0 from0 <- from - 1L to0 <- to - 1L segmap <- pro - 1L tp <- loco$tp ## sort by segment index oo <- order(segmap, tp) segmap <- segmap[oo] tp <- tp[oo] # upper bound on interpoint distance huge <- sum(seglen) #' numerical tolerance tol <- max(.Machine$double.eps, diameter(Frame(L))/2^20) #' kmax1 <- kmax + 1L zz <- .C("linknnd", kmax = as.integer(kmax1), np = as.integer(n), sp = as.integer(segmap), tp = as.double(tp), nv = as.integer(Lvert$n), ns = as.integer(nseg), from = as.integer(from0), to = as.integer(to0), seglen = as.double(seglen), huge = as.double(huge), tol = as.double(tol), nndist = as.double(numeric(n * kmax1)), nnwhich = as.integer(integer(n * kmax1)), PACKAGE = "spatstat") ans <- matrix(, n, kmax1) ans[oo, ] <- matrix(zz$nndist, n, kmax1, byrow=TRUE) # drop first column which is zero corresponding to j = i ans <- ans[, -1, drop=FALSE] colnames(ans) <- paste0("dist.", 1:ncol(ans)) ans <- ans[,kuse] } else { ## use fast code for nncross ans <- nncross(X, X, what="dist", k=kuse+1) if(is.matrix(ans) || is.data.frame(ans)) colnames(ans) <- paste0("dist.", kuse) } if(!is.null(dim(ans))) { ans <- as.matrix(ans) rownames(ans) <- NULL } if(!toomany) return(ans) result[, kuse] <- as.matrix(ans) colnames(result) <- paste0("dist.", 1:ncol(result)) return(result[,k]) } # nnwhich.lpp # Identifies the nearest neighbours in the shortest-path metric # for a point pattern on a linear network. # nnwhich.lpp <- function(X, ..., k=1, method="C") { stopifnot(inherits(X, "lpp")) stopifnot(method %in% c("C", "interpreted")) k <- as.integer(k) stopifnot(all(k > 0)) kmax <- max(k) n <- npoints(X) L <- as.linnet(X) if(is.null(br <- L$boundingradius) || is.infinite(br)) { # network may be disconnected lab <- connected(L, what="labels") if(length(levels(lab)) > 1L) { # network is disconnected result <- matrix(NA_integer_, n, length(k)) # handle each connected component separately subsets <- split(seq_len(nvertices(L)), lab) for(i in seq_along(subsets)) { Xi <- thinNetwork(X, retainvertices=subsets[[i]]) relevant <- attr(Xi, "retainpoints") result[relevant, ] <- nnwhich.lpp(Xi, k=k, method=method) } return(result) } } toomany <- (kmax >= n-1) if(toomany) { ## not enough points to define kmax nearest neighbours result <- matrix(NA_integer_, nrow=n, ncol=kmax) if(n <= 1) return(result[,k,drop=TRUE]) ## reduce kmax to feasible value kmax <- n-1 kuse <- k[k <= kmax] } else { kuse <- k } # Y <- as.ppp(X) sparse <- identical(L$sparse, TRUE) ## find nearest segment for each point ## This is given by local coordinates, if available (spatstat >= 1.28-0) loco <- coords(X, local=TRUE, spatial=FALSE, temporal=FALSE) pro <- if(!is.null(seg <- loco$seg)) seg else nearestsegment(X, Lseg) if(method == "interpreted") { D <- pairdist(X, method="interpreted") diag(D) <- Inf nnw <- if(kmax == 1) apply(D, 1, which.min) else t(apply(D, 1, orderwhich, k=kuse))[,,drop=TRUE] } else if(!sparse && kmax == 1) { # C code for non-sparse network ## Lseg <- L$lines Lvert <- L$vertices from <- L$from to <- L$to dpath <- L$dpath ## convert indices to start at 0 from0 <- from - 1L to0 <- to - 1L segmap <- pro - 1L nseg <- length(from0) # upper bound on interpoint distance huge <- max(dpath) + 2 * max(lengths.psp(Lseg)) # space for result nnd <- double(n) nnw <- integer(n) # call C zz <- .C("linnnwhich", np = as.integer(n), xp = as.double(Y$x), yp = as.double(Y$y), nv = as.integer(Lvert$n), xv = as.double(Lvert$x), yv = as.double(Lvert$y), ns = as.integer(nseg), from = as.integer(from0), to = as.integer(to0), dpath = as.double(dpath), segmap = as.integer(segmap), huge = as.double(huge), nndist = as.double(nnd), nnwhich = as.integer(nnw), PACKAGE = "spatstat") # convert C indexing to R indexing nnw <- zz$nnwhich + 1L # any zeroes occur if points have no neighbours. nnw[nnw == 0] <- NA } else if(spatstat.options('Cnndistlpp')) { ## use new C routine Lseg <- L$lines Lvert <- L$vertices from <- L$from to <- L$to ## nseg <- length(from) seglen <- lengths.psp(Lseg) ## convert indices to start at 0 from0 <- from - 1L to0 <- to - 1L segmap <- pro - 1L tp <- loco$tp ## sort by segment index oo <- order(segmap, tp) segmap <- segmap[oo] tp <- tp[oo] # upper bound on interpoint distance huge <- sum(seglen) #' numerical tolerance tol <- max(.Machine$double.eps, diameter(Frame(L))/2^20) #' kmax1 <- kmax + 1L zz <- .C("linknnd", kmax = as.integer(kmax1), np = as.integer(n), sp = as.integer(segmap), tp = as.double(tp), nv = as.integer(Lvert$n), ns = as.integer(nseg), from = as.integer(from0), to = as.integer(to0), seglen = as.double(seglen), huge = as.double(huge), tol = as.double(tol), nndist = as.double(numeric(n * kmax1)), nnwhich = as.integer(integer(n * kmax1)), PACKAGE = "spatstat") nnw <- matrix(, n, kmax1) nnw[oo, ] <- matrix(oo[zz$nnwhich + 1L], n, kmax1, byrow=TRUE) # drop first column which is j = i nnw <- nnw[, -1, drop=FALSE] colnames(nnw) <- paste0("which.", 1:ncol(nnw)) nnw <- nnw[,kuse] } else { ## use fast code for nncross nnw <- nncross(X, X, what="which", k=kuse+1) if(is.matrix(nnw) || is.data.frame(nnw)) colnames(nnw) <- paste0("which.", kuse) } if(!is.null(dim(nnw))) { nnw <- as.matrix(nnw) rownames(nnw) <- NULL } if(!toomany) return(nnw) result[, kuse] <- as.matrix(nnw) colnames(result) <- paste0("which.", 1:ncol(result)) return(result[,k]) } # nncross.lpp # Identifies the nearest neighbours in the shortest-path metric # from one point pattern on a linear network to ANOTHER pattern # on the SAME network. # nncross.lpp <- local({ nncross.lpp <- function(X, Y, iX=NULL, iY=NULL, what = c("dist", "which"), ..., k=1, method="C") { stopifnot(inherits(X, "lpp")) stopifnot(inherits(Y, "lpp")) what <- match.arg(what, choices=c("dist", "which"), several.ok=TRUE) stopifnot(method %in% c("C", "interpreted")) if(is.null(iX) != is.null(iY)) stop("If one of iX, iY is given, then both must be given") exclude <- (!is.null(iX) || !is.null(iY)) check <- resolve.defaults(list(...), list(check=TRUE))$check if(check && !identical(as.linnet(X, sparse=TRUE), as.linnet(Y, sparse=TRUE))) stop("X and Y are on different linear networks") # internal use only format <- resolve.defaults(list(...), list(format="data.frame"))$format nX <- npoints(X) nY <- npoints(Y) L <- domain(X) if(is.null(br <- L$boundingradius) || is.infinite(br)) { # network may be disconnected lab <- connected(L, what="labels") if(length(levels(lab)) > 1L) { # network is disconnected # handle each connected component separately subsets <- split(seq_len(nvertices(L)), lab) nndistmat <- if("dist" %in% what) matrix(Inf, nX, length(k)) else NULL nnwhichmat <- if("which" %in% what) matrix(NA_integer_, nX, length(k)) else NULL for(i in seq_along(subsets)) { subi <- subsets[[i]] Xi <- thinNetwork(X, retainvertices=subi) useX <- attr(Xi, "retainpoints") Yi <- thinNetwork(Y, retainvertices=subi) useY <- attr(Yi, "retainpoints") z <- nncross.lpp(Xi, Yi, iX = iX[useX], iY=iY[useY], what=what, k=k, method=method, format="list") if("dist" %in% what) nndistmat[useX, ] <- z$dist if("which" %in% what) nnwhichmat[useX, ] <- which(useY)[z$which] } result <- list(dist=nndistmat, which=nnwhichmat)[what] if(format == "data.frame") result <- as.data.frame(result)[,,drop=TRUE] return(result) } } koriginal <- k <- as.integer(k) stopifnot(all(k > 0)) kmax <- max(k) if(exclude) { kmax <- kmax+1 k <- 1:kmax } toomany <- (kmax > nY) if(toomany) { paddist <- matrix(Inf, nX, kmax) padwhich <- matrix(NA_integer_, nX, kmax) kmax <- nY kuse <- k[k <= kmax] } else { kuse <- k } if(length(kuse) == 0) { # None of the required values are defined nnd <- paddist nnw <- padwhich maxk <- max(k) colnames(nnd) <- paste0("dist.", seq_len(maxk)) colnames(nnd) <- paste0("dist.", seq_len(maxk)) nnd <- nnd[,k,drop=TRUE] nnw <- nnw[,k,drop=TRUE] result <- list(dist=nnd, which=nnw)[what] if(format == "data.frame") result <- as.data.frame(result)[,,drop=TRUE] return(result) } need.dist <- ("dist" %in% what) || exclude need.which <- ("which" %in% what) || exclude fast <- (method == "C") && spatstat.options("Cnncrosslpp") if(!fast) { ## require dpath matrix Xsparse <- identical(domain(X)$sparse, TRUE) Ysparse <- identical(domain(Y)$sparse, TRUE) L <- if(!Xsparse && Ysparse) as.linnet(X) else if(Xsparse && !Ysparse) as.linnet(Y) else as.linnet(X, sparse=FALSE) } else L <- as.linnet(X) # nX <- npoints(X) nY <- npoints(Y) P <- as.ppp(X) Q <- as.ppp(Y) # Lvert <- L$vertices from <- L$from to <- L$to if(fast) { seglengths <- lengths.psp(as.psp(L)) } else { dpath <- L$dpath } # deal with null cases if(nX == 0) return(data.frame(dist=numeric(0), which=integer(0))[, what]) if(nY == 0) return(data.frame(dist=rep(Inf, nX), which=rep(NA_integer_, nX))[, what]) # find nearest segment for each point Xcoords <- coords(X) Ycoords <- coords(Y) Xpro <- Xcoords$seg Ypro <- Ycoords$seg # handle serial numbers if(exclude) { stopifnot(is.integer(iX) && is.integer(iY)) if(length(iX) != nX) stop("length of iX does not match the number of points in X") if(length(iY) != nY) stop("length of iY does not match the number of points in Y") } if(method == "interpreted") { ## interpreted code D <- crossdist(X, Y, method="interpreted") if(exclude) D[outer(iX, iY, "==")] <- Inf nnd <- nnw <- NULL if(need.dist) { nnd <- if(kmax == 1) apply(D, 1, min) else t(apply(D, 1, orderstats, k=kuse))[,,drop=TRUE] } if(need.which) { nnw <- if(kmax == 1) apply(D, 1, which.min) else t(apply(D, 1, orderwhich, k=kuse))[,,drop=TRUE] } } else { ## C code ## convert indices to start at 0 from0 <- from - 1L to0 <- to - 1L nseg <- length(from0) Xsegmap <- Xpro - 1L Ysegmap <- Ypro - 1L ## upper bound on interpoint distance huge <- if(!fast) { max(dpath) + 2 * diameter(Frame(L)) } else { sum(seglengths) } ## space for result nnd <- double(nX * kmax) nnw <- integer(nX * kmax) ## call C if(fast) { ## experimental faster code ooX <- order(Xsegmap) ooY <- order(Ysegmap) tol <- max(.Machine$double.eps, diameter(Frame(L))/2^20) if(kmax > 1) { zz <- .C("linknncross", kmax = as.integer(kmax), np = as.integer(nX), sp = as.integer(Xsegmap[ooX]), tp = as.double(Xcoords$tp[ooX]), nq = as.integer(nY), sq = as.integer(Ysegmap[ooY]), tq = as.double(Ycoords$tp[ooY]), nv = as.integer(Lvert$n), ns = as.integer(nseg), from = as.integer(from0), to = as.integer(to0), seglen = as.double(seglengths), huge = as.double(huge), tol = as.double(tol), nndist = as.double(nnd), nnwhich = as.integer(nnw), PACKAGE = "spatstat") zznd <- matrix(zz$nndist, ncol=kmax, byrow=TRUE) zznw <- matrix(zz$nnwhich + 1L, ncol=kmax, byrow=TRUE) if(any(notfound <- (zznw == 0))) { zznd[notfound] <- NA zznw[notfound] <- NA } nnd <- matrix(nnd, nX, kmax) nnw <- matrix(nnw, nX, kmax) nnd[ooX, ] <- zznd nnw[ooX, ] <- ooY[zznw] colnames(nnd) <- colnames(nnw) <- seq_len(kmax) if(!identical(kuse, seq_len(kmax))) { nnd <- nnd[,kuse,drop=FALSE] nnw <- nnw[,kuse,drop=FALSE] if(length(kuse) == 1) { colnames(nnd) <- paste0("dist.", kuse) colnames(nnw) <- paste0("which.", kuse) } } } else { zz <- .C("linSnndwhich", np = as.integer(nX), sp = as.integer(Xsegmap[ooX]), tp = as.double(Xcoords$tp[ooX]), nq = as.integer(nY), sq = as.integer(Ysegmap[ooY]), tq = as.double(Ycoords$tp[ooY]), nv = as.integer(Lvert$n), ns = as.integer(nseg), from = as.integer(from0), to = as.integer(to0), seglen = as.double(seglengths), huge = as.double(huge), tol = as.double(tol), nndist = as.double(nnd), nnwhich = as.integer(nnw), PACKAGE = "spatstat") zznd <- zz$nndist zznw <- zz$nnwhich + 1L if(any(notfound <- (zznw == 0))) { zznd[notfound] <- NA zznw[notfound] <- NA } nnd[ooX] <- zznd nnw[ooX] <- ooY[zznw] } } else if(!exclude) { zz <- .C("linndcross", np = as.integer(nX), xp = as.double(P$x), yp = as.double(P$y), nq = as.integer(nY), xq = as.double(Q$x), yq = as.double(Q$y), nv = as.integer(Lvert$n), xv = as.double(Lvert$x), yv = as.double(Lvert$y), ns = as.integer(nseg), from = as.integer(from0), to = as.integer(to0), dpath = as.double(dpath), psegmap = as.integer(Xsegmap), qsegmap = as.integer(Ysegmap), huge = as.double(huge), nndist = as.double(nnd), nnwhich = as.integer(nnw), PACKAGE = "spatstat") nnd <- zz$nndist nnw <- zz$nnwhich + 1L } else { ## excluding certain pairs zz <- .C("linndxcross", np = as.integer(nX), xp = as.double(P$x), yp = as.double(P$y), nq = as.integer(nY), xq = as.double(Q$x), yq = as.double(Q$y), nv = as.integer(Lvert$n), xv = as.double(Lvert$x), yv = as.double(Lvert$y), ns = as.integer(nseg), from = as.integer(from0), to = as.integer(to0), dpath = as.double(dpath), psegmap = as.integer(Xsegmap), qsegmap = as.integer(Ysegmap), idP = as.integer(iX), idQ = as.integer(iY), huge = as.double(huge), nndist = as.double(nnd), nnwhich = as.integer(nnw), PACKAGE = "spatstat") nnd <- zz$nndist nnw <- zz$nnwhich + 1L } # any zeroes occur if points have no neighbours. nnw[nnw == 0] <- NA } if(toomany) { ## Nearest neighbours were undefined for some large values of k. ## Insert results obtained for valid 'k' back into matrix of NA/Inf if(need.dist) { paddist[,kuse] <- as.matrix(nnd) nnd <- paddist } if(need.which) { padwhich[,kuse] <- as.matrix(nnw) nnw <- padwhich } } if(exclude) { ## now find neighbours that don't have the same id number avoid <- matrix(iX[as.vector(row(nnw))] != iY[as.vector(nnw)], nrow=nrow(nnw), ncol=ncol(nnw)) colind <- apply(avoid, 1, whichcoltrue, m=seq_len(ncol(avoid)-1)) colind <- if(is.matrix(colind)) t(colind) else matrix(colind, ncol=1) rowcol <- cbind(as.vector(row(colind)), as.vector(colind)) nnd <- matrix(nnd[rowcol], nrow=nX) nnw <- matrix(nnw[rowcol], nrow=nX) nnd <- nnd[,koriginal] nnw <- nnw[,koriginal] } result <- list(dist=nnd, which=nnw)[what] if(format == "data.frame") result <- as.data.frame(result)[,,drop=TRUE] return(result) } whichcoltrue <- function(x, m) which(x)[m] nncross.lpp }) spatstat/R/vcov.mppm.R0000755000176200001440000001373413115271120014372 0ustar liggesusers# Variance-covariance matrix for mppm objects # # $Revision: 1.16 $ $Date: 2016/04/25 02:34:40 $ # # vcov.mppm <- local({ errhandler <- function(whinge, err) { switch(err, fatal=stop(whinge), warn={ warning(whinge) return(NA) }, null= return(NULL), stop(paste("Unrecognised option: err=", dQuote(err)))) } vcov.mppm <- function(object, ..., what="vcov", err="fatal") { what <- match.arg(what, c("vcov", "corr", "fisher", "Fisher", "internals", "all")) if(what == "Fisher") what <- "fisher" if(is.poisson.mppm(object) && object$Fit$fitter == "glm") return(vcmPois(object, ..., what=what, err=err)) return(vcmGibbs(object, ..., what=what, err=err)) } vcmPois <- function(object, ..., what, err) { # legacy algorithm for Poisson case gf <- object$Fit$FIT gd <- object$Fit$moadf wt <- gd$.mpl.W fi <- fitted(gf) fo <- object$trend if(is.null(fo)) fo <- (~1) mof <- model.frame(fo, gd) mom <- model.matrix(fo, mof) momnames <- dimnames(mom)[[2]] fisher <- sumouter(mom, fi * wt) dimnames(fisher) <- list(momnames, momnames) switch(what, fisher = { return(fisher) }, vcov = { vc <- try(solve(fisher), silent=(err == "null")) if(inherits(vc, "try-error")) return(errhandler("Fisher information is singular", err)) else return(vc) }, corr={ co <- try(solve(fisher), silent=(err == "null")) if(inherits(co, "try-error")) return(errhandler("Fisher information is singular", err)) sd <- sqrt(diag(co)) return(co / outer(sd, sd, "*")) }) } vcmGibbs <- function(object, ..., what, err, matrix.action=c("warn", "fatal", "silent"), gam.action=c("warn", "fatal", "silent"), logi.action=c("warn", "fatal", "silent") ) { if(!missing(err)) { if(err == "null") err <- "silent" matrix.action <- if(missing(matrix.action)) err else match.arg(matrix.action) gam.action <- if(missing(gam.action)) err else match.arg(gam.action) logi.action <- if(missing(logi.action)) err else match.arg(logi.action) } else { matrix.action <- match.arg(matrix.action) gam.action <- match.arg(gam.action) logi.action <- match.arg(logi.action) } collectmom <- (what %in% c("internals", "all")) subs <- subfits(object, what="basicmodels") n <- length(subs) guts <- lapply(subs, vcov, what="internals", matrix.action=matrix.action, gam.action=gam.action, logi.action=logi.action, dropcoef=TRUE, ...) fish <- lapply(guts, getElement, name="fisher") a1 <- lapply(guts, getElement, name="A1") a2 <- lapply(guts, getElement, name="A2") a3 <- lapply(guts, getElement, name="A3") a1 <- mergeAlternatives(fish, a1) cnames <- unique(unlist(lapply(c(a1, a2, a3), colnames))) if(collectmom) { sufs <- lapply(guts, getElement, name="suff") moms <- lapply(guts, getElement, name="mom") sufs <- mergeAlternatives(sufs, moms) cnames <- unique(c(cnames, unlist(lapply(sufs, colnames)))) } nc <- length(cnames) A1 <- A2 <- A3 <- matrix(0, nc, nc, dimnames=list(cnames, cnames)) if(collectmom) Mom <- matrix(, 0, nc, dimnames=list(character(0), cnames)) for(i in seq_len(n)) { coefnames.i <- names(coef(subs[[i]])) A1 <- addsubmatrix(A1, a1[[i]], coefnames.i) A2 <- addsubmatrix(A2, a2[[i]], coefnames.i) A3 <- addsubmatrix(A3, a3[[i]], coefnames.i) if(collectmom) Mom <- bindsubmatrix(Mom, sufs[[i]]) } internals <- list(A1=A1, A2=A2, A3=A3) if(collectmom) internals <- c(internals, list(suff=Mom)) if(what %in% c("vcov", "corr", "all")) { #' variance-covariance matrix required U <- checksolve(A1, matrix.action, , "variance") vc <- if(is.null(U)) NULL else (U %*% (A1 + A2 + A3) %*% U) } out <- switch(what, fisher = A1 + A2 + A3, vcov = vc, corr = { if(is.null(vc)) return(NULL) sd <- sqrt(diag(vc)) vc / outer(sd, sd, "*") }, internals = internals, all = list(internals=internals, fisher=A1+A2+A3, varcov=vc, invgrad=A1) ) return(out) } addsubmatrix <- function(A, B, guessnames) { if(is.null(B)) return(A) if(is.null(colnames(B)) && !missing(guessnames)) { if(is.character(guessnames)) guessnames <- list(guessnames, guessnames) if(all(lengths(guessnames) == dim(B))) colnames(B) <- guessnames } if(is.null(colnames(B))) { if(!all(dim(A) == dim(B))) stop("Internal error: no column names, and matrices non-conformable") A <- A + B return(A) } j <- match(colnames(B), colnames(A)) if(anyNA(j)) stop("Internal error: unmatched column name(s)") A[j,j] <- A[j,j] + B return(A) } bindsubmatrix <- function(A, B) { if(is.null(B)) return(A) if(is.null(colnames(B))) { if(ncol(A) != ncol(B)) stop("Internal error: no column names, and matrices non-conformable") A <- rbind(A, B) return(A) } j <- match(colnames(B), colnames(A)) if(anyNA(j)) stop("Internal error: unmatched column name(s)") BB <- matrix(0, nrow(B), ncol(A)) BB[,j] <- B A <- rbind(A, BB) return(A) } mergeAlternatives <- function(A, B) { okA <- !sapply(A, is.null) okB <- !sapply(B, is.null) if(any(override <- !okA & okB)) A[override] <- B[override] return(A) } vcov.mppm }) spatstat/R/ord.family.R0000755000176200001440000001020213115271120014474 0ustar liggesusers# # # ord.family.S # # $Revision: 1.17 $ $Date: 2015/10/21 09:06:57 $ # # The Ord model (family of point process models) # # ord.family: object of class 'isf' defining Ord model structure # # # ------------------------------------------------------------------- # ord.family <- list( name = "ord", print = function(self) { cat("Ord model family\n") }, eval = function(X, U, EqualPairs, pot, pars, ...) { # # This auxiliary function is not meant to be called by the user. # It computes the distances between points, # evaluates the pair potential and applies edge corrections. # # Arguments: # X data point pattern 'ppp' object # U points at which to evaluate potential list(x,y) suffices # EqualPairs two-column matrix of indices i, j such that X[i] == U[j] # (or NULL, meaning all comparisons are FALSE) # pot potential function function(d, p) # pars auxiliary parameters for pot list(......) # ... IGNORED # # Value: # matrix of values of the potential # induced by the pattern X at each location given in U. # The rows of this matrix correspond to the rows of U (the sample points); # the k columns are the coordinates of the k-dimensional potential. # # Note: # The potential function 'pot' will be called as # pot(M, pars) where M is a vector of tile areas. # It must return a vector of the same length as M # or a matrix with number of rows equal to the length of M ########################################################################## nX <- npoints(X) nU <- length(U$x) # number of data + dummy points seqX <- seq_len(nX) seqU <- seq_len(nU) # determine which points in the combined list are data points if(length(EqualPairs) > 0) is.data <- seqU %in% EqualPairs[,2] else is.data <- rep.int(FALSE, nU) ############################################################################# # First compute Dirichlet tessellation of data # and its total potential (which could be vector-valued) ############################################################################# marks(X) <- NULL Wdata <- dirichletWeights(X) # sic - these are the tile areas. Pdata <- pot(Wdata, pars) summa <- function(P) { if(is.matrix(P)) matrowsum(P) else if(is.vector(P) || length(dim(P))==1 ) sum(P) else stop("Don't know how to take row sums of this object") } total.data.potential <- summa(Pdata) # Initialise V dimpot <- dim(Pdata)[-1] # dimension of each value of the potential function # (= numeric(0) if potential is a scalar) dimV <- c(nU, dimpot) if(length(dimV) == 1) dimV <- c(dimV, 1) V <- array(0, dim=dimV) rowV <- array(seqU, dim=dimV) #################### Next, evaluate V for the data points. ############### # For each data point, compute Dirichlet tessellation # of the data with this point removed. # Compute difference of total potential. ############################################################################# for(j in seq_len(nX)) { # Dirichlet tessellation of data without point j Wminus <- dirichletWeights(X[-j]) # regressor is the difference in total potential V[rowV == j] <- total.data.potential - summa(pot(Wminus, pars)) } #################### Next, evaluate V for the dummy points ################ # For each dummy point, compute Dirichlet tessellation # of (data points together with this dummy point) only. # Take difference of total potential. ############################################################################# for(j in seqU[!is.data]) { Xplus <- superimpose(X, list(x=U$x[j], y=U$y[j]), W=X$window) # compute Dirichlet tessellation (of these points only!) Wplus <- dirichletWeights(Xplus) # regressor is difference in total potential V[rowV == j] <- summa(pot(Wplus, pars)) - total.data.potential } cat("dim(V) = \n") print(dim(V)) return(V) } ######### end of function $eval ) ######### end of list class(ord.family) <- "isf" spatstat/R/exactPdt.R0000755000176200001440000000430713115271075014226 0ustar liggesusers# # exactPdt.R # R function exactPdt() for exact distance transform of pixel image # # $Revision: 4.17 $ $Date: 2017/06/05 10:31:58 $ # "exactPdt"<- function(w) { verifyclass(w, "owin") if(w$type != "mask") stop(paste("Input must be a window of type", sQuote("mask"))) # nr <- w$dim[1L] nc <- w$dim[2L] # input image will be padded out with a margin of width 2 on all sides mr <- mc <- 2L # full dimensions of padded image Nnr <- nr + 2 * mr Nnc <- nc + 2 * mc N <- Nnr * Nnc # output image (subset): rows & columns (R indexing) rmin <- mr + 1L rmax <- Nnr - mr cmin <- mc + 1L cmax <- Nnc - mc # do padding x <- matrix(FALSE, nrow=Nnr, ncol=Nnc) x[rmin:rmax, cmin:cmax] <- w$m # res <- .C("ps_exact_dt_R", as.double(w$xrange[1L]), as.double(w$yrange[1L]), as.double(w$xrange[2L]), as.double(w$yrange[2L]), nr = as.integer(nr), nc = as.integer(nc), mr = as.integer(mr), mc = as.integer(mc), inp = as.integer(t(x)), distances = as.double (double(N)), rows = as.integer(integer(N)), cols = as.integer(integer(N)), boundary = as.double (double(N)), PACKAGE = "spatstat") dist <- matrix(res$distances, ncol=Nnc, nrow=Nnr, byrow = TRUE)[rmin:rmax, cmin:cmax] rows <- matrix(res$rows, ncol=Nnc, nrow=Nnr, byrow = TRUE)[rmin:rmax, cmin:cmax] cols <- matrix(res$cols, ncol=Nnc, nrow=Nnr, byrow = TRUE)[rmin:rmax, cmin:cmax] bdist<- matrix(res$boundary, ncol=Nnc, nrow=Nnr, byrow = TRUE)[rmin:rmax, cmin:cmax] # convert from C to R indexing rows <- rows + 1L - as.integer(mr) cols <- cols + 1L - as.integer(mc) return(list(d=dist,row=rows,col=cols,b=bdist, w=w)) } project2set <- function(X, W, ...) { stopifnot(is.ppp(X)) W <- as.mask(W, ...) eW <- exactPdt(W) ## grid location of X XX <- nearest.raster.point(X$x, X$y, W) ijX <- cbind(XX$row, XX$col) ## look up values of 'eW' at this location iY <- eW$row[ijX] jY <- eW$col[ijX] ## convert to spatial coordinates Y <- ppp(W$xcol[jY], W$yrow[iY], window=W) return(Y) } spatstat/R/colourtools.R0000755000176200001440000001151213115271075015032 0ustar liggesusers# # colourtools.R # # $Revision: 1.18 $ $Date: 2017/01/02 04:47:50 $ # rgb2hex <- function(v, maxColorValue=255) { stopifnot(is.numeric(v)) if(!is.matrix(v)) v <- matrix(v, nrow=1L) if(ncol(v) %in% c(3, 4)) { out <- rgb(v, maxColorValue=maxColorValue) return(out) } stop("v should be a vector of length 3 or 4, or a matrix with 3 or 4 columns") } rgb2hsva <- function(red, green=NULL, blue=NULL, alpha=NULL, maxColorValue=255) { if(is.null(green) && is.null(blue) && is.null(alpha)) { ## red should be a 3-row matrix of RGB values ## or a 4-row matrix of RGBA values if(!is.matrix(red)) red <- matrix(red, ncol=1L) ## check for an alpha channel if(nrow(red) == 4) { alpha <- red[4L,] red <- red[-4L, , drop=FALSE] } } y <- rgb2hsv(red, green, blue, maxColorValue=maxColorValue) if(!is.null(alpha)) y <- rbind(y, alpha=alpha/maxColorValue) return(y) } col2hex <- function(x) { # convert to RGBA y <- col2rgb(x, alpha=TRUE) # remove alpha channel if all colours are opaque if(all(y["alpha", ] == 255)) y <- y[1:3, , drop=FALSE] # convert to hex z <- rgb2hex(t(y)) return(z) } paletteindex <- function(x) { x <- col2hex(x) p <- col2hex(palette()) m <- match(x, p) return(m) } is.colour <- function(x) { if(length(x) == 0) return(FALSE) cx <- try(col2rgb(x), silent=TRUE) bad <- inherits(cx, "try-error") return(!bad) } samecolour <- function(x, y) { col2hex(x) == col2hex(y) } complementarycolour <- function(x) { if(is.null(x)) return(NULL) if(inherits(x, "colourmap")) { colouroutputs(x) <- complementarycolour(colouroutputs(x)) return(x) } # convert to RGBA y <- col2rgb(x, alpha=TRUE) # complement of R, G, B y[1:3, ] <- 255 - y[1:3, ] # convert to colours z <- rgb2hex(t(y)) return(z) } is.grey <- function(x) { if(inherits(x, "colourmap")) x <- colouroutputs(x) if(is.function(x)) return(NA) y <- rgb2hsva(col2rgb(x, alpha=TRUE)) sat <- y["s", ] alp <- y["alpha", ] return(sat == 0 & alp == 1) } to.opaque <- function(x) { if(all(!is.na(paletteindex(x)))) return(x) # preserve palette colours rgb(t(col2rgb(x)), maxColorValue=255) } to.transparent <- function(x, fraction) { if(all(fraction == 1)) return(to.opaque(x)) rgb(t(col2rgb(x))/255, alpha=fraction, maxColorValue=1) } to.grey <- function(x, weights=c(0.299, 0.587, 0.114), transparent=FALSE) { if(is.null(x)) return(NULL) if(inherits(x, "colourmap")) { colouroutputs(x) <- to.grey(colouroutputs(x), weights=weights, transparent=transparent) return(x) } if(is.function(x)) { f <- x g <- function(...) to.grey(f(...), weights=weights, transparent=transparent) return(g) } ## preserve palette indices, if only using black/grey if(all(!is.na(paletteindex(x))) && all(is.grey(x))) return(x) if(!transparent) { y <- col2rgb(x) g <- (weights %*% y)/(255 * sum(weights)) z <- grey(g) } else { yy <- col2rgb(x, alpha=TRUE) y <- yy[1:3, , drop=FALSE] g <- (weights %*% y)/(255 * sum(weights)) z <- grey(g, alpha=y[4L,]) } return(z) } is.col.argname <- function(x) { return(nzchar(x) & ((x == "col") | (substr(x, 1L, 4L) == "col."))) } col.args.to.grey <- function(x, ...) { if(any(hit <- is.col.argname(names(x)))) x[hit] <- lapply(x[hit], to.grey, ...) return(x) } # versions of rgb() and hsv() that work with NA values rgbNA <- function(red, green, blue, alpha=NULL, maxColorValue=1) { df <- if(is.null(alpha)) data.frame(red=red, green=green, blue=blue) else data.frame(red=red, green=green, blue=blue, alpha=alpha) result <- rep(NA_character_, nrow(df)) ok <- complete.cases(df) result[ok] <- if(is.null(alpha)) { with(df, rgb(red[ok], green[ok], blue[ok], maxColorValue=maxColorValue)) } else { with(df, rgb(red[ok], green[ok], blue[ok], alpha[ok], maxColorValue=maxColorValue)) } return(result) } hsvNA <- function(h, s, v, alpha=NULL) { df <- if(is.null(alpha)) data.frame(h=h, s=s, v=v) else data.frame(h=h, s=s, v=v, alpha=alpha) result <- rep(NA_character_, nrow(df)) ok <- complete.cases(df) result[ok] <- if(is.null(alpha)) { with(df, hsv(h[ok], s[ok], v[ok])) } else { with(df, hsv(h[ok], s[ok], v[ok], alpha[ok])) } return(result) } ## This function traps the colour arguments ## and converts to greyscale if required. do.call.plotfun <- function(fun, arglist, ...) { if(spatstat.options("monochrome")) { keys <- names(arglist) if(!is.null(keys)) { cols <- nzchar(keys) & ((keys %in% c("border", "col", "fg", "bg")) | (substr(keys, 1, 4) == "col.")) if(any(cols)) arglist[cols] <- lapply(arglist[cols], to.grey) } } do.call.matched(fun, arglist, ...) } spatstat/R/segtest.R0000644000176200001440000000337213115225157014126 0ustar liggesusers#' #' segtest.R #' #' Monte Carlo test of segregation for multitype patterns #' #' $Revision: 1.3 $ $Date: 2015/07/11 08:19:26 $ #' segregation.test <- function(X, ...) { UseMethod("segregation.test") } segregation.test.ppp <- function(X, ..., nsim=19, permute=TRUE, verbose=TRUE, Xname) { if(missing(Xname)) Xname <- short.deparse(substitute(X)) stopifnot(is.ppp(X)) stopifnot(is.multitype(X)) verboten <- c("at", "relative", "se", "leaveoneout", "casecontrol", "case", "control") if(any(nyet <- (verboten %in% names(list(...))))) stop(paste(ngettext(sum(nyet), "Argument", "Arguments"), commasep(sQuote(verboten[nyet])), "cannot be used")) lam <- intensity(X) pbar <- lam/sum(lam) np <- npoints(X) nt <- length(pbar) pbar <- matrix(pbar, byrow=TRUE, nrow=np, ncol=nt) if(verbose) cat("Computing observed value... ") phat <- relrisk(X, at="points", ...) obs <- mean((phat-pbar)^2) if(verbose) { cat("Done.\nComputing simulated values... ") pstate <- list() } sim <- numeric(nsim) for(i in 1:nsim) { Xsim <- rlabel(X, permute=permute) phatsim <- relrisk(Xsim, at="points", ...) if(permute) pbarsim <- pbar else { lamsim <- intensity(Xsim) pbarsim <- lamsim/sum(lamsim) pbarsim <- matrix(pbarsim, byrow=TRUE, nrow=np, ncol=nt) } sim[i] <- mean((phatsim - pbarsim)^2) if(verbose) pstate <- progressreport(i, nsim, state=pstate) } p.value <- (1+sum(sim >= obs))/(1+nsim) names(obs) <- "T" out <- list(statistic=obs, p.value=p.value, method="Monte Carlo test of spatial segregation of types", data.name=Xname) class(out) <- "htest" return(out) } spatstat/R/sparsecommon.R0000644000176200001440000001623613115271120015150 0ustar liggesusers#' #' sparsecommon.R #' #' Utilities for sparse arrays #' #' $Revision: 1.6 $ $Date: 2017/06/05 10:31:58 $ #' #' .............. completely generic .................... inside3Darray <- function(d, i, j, k) { stopifnot(length(d) == 3) if(length(dim(i)) == 2 && missing(j) && missing(k)) { stopifnot(ncol(i) == 3) j <- i[,2] k <- i[,3] i <- i[,1] } ans <- inside.range(i, c(1, d[1])) & inside.range(j, c(1, d[2])) & inside.range(k, c(1, d[3])) return(ans) } #' .............. depends on Matrix package ................ sparseVectorCumul <- function(x, i, length) { #' extension of 'sparseVector' to allow repeated indices #' (the corresponding entries are added) z <- tapply(x, list(factor(i, levels=1:length)), sum) z <- z[!is.na(z)] sparseVector(i=as.integer(names(z)), x=as.numeric(z), length=length) } #' .............. code that mentions sparse3Darray ................ expandSparse <- function(x, n, across) { #' x is a sparse vector/matrix; replicate it 'n' times #' and form a sparse matrix/array #' in which each slice along the 'across' dimension is identical to 'x' #' Default is across = length(dim(x)) + 1 check.1.integer(n) stopifnot(n >= 1) dimx <- dim(x) if(is.null(dimx)) { if(inherits(x, "sparseVector")) dimx <- x@length else if(is.vector(x)) dimx <- length(x) else stop("Format of x is not understood", call.=FALSE) } nd <- length(dimx) if(missing(across)) across <- nd + 1L else { check.1.integer(across) if(!(across %in% (1:(nd+1L)))) stop(paste("Argument 'across' must be an integer from 1 to", nd+1L), call.=FALSE) } if(nd == 1) { if(inherits(x, "sparseVector")) { m <- length(x@x) y <- switch(across, sparseMatrix(i=rep(1:n, times=m), j=rep(x@i, each=n), x=rep(x@x, each=n), dims=c(n, dimx)), sparseMatrix(i=rep(x@i, each=n), j=rep(1:n, times=m), x=rep(x@x, each=n), dims=c(dimx, n))) } else { y <- switch(across, outer(1:n, x, function(a,b) b), outer(x, 1:n, function(a,b) a)) } } else if(nd == 2) { if(inherits(x, "sparseMatrix")) { z <- as(x, "TsparseMatrix") m <- length(z@x) y <- switch(across, sparse3Darray(i=rep(1:n, times=m), j=rep(z@i + 1L, each=n), k=rep(z@j + 1L, each=n), x=rep(z@x, each=n), dims=c(n, dimx)), sparse3Darray(i=rep(z@i + 1L, each=n), j=rep(1:n, times=m), k=rep(z@j + 1L, each=n), x=rep(z@x, each=n), dims=c(dimx[1], n, dimx[2])), sparse3Darray(i=rep(z@i + 1L, each=n), j=rep(z@j + 1L, each=n), k=rep(1:n, times=m), x=rep(z@x, each=n), dims=c(dimx, n))) } else stop("Not yet implemented for full arrays") } else stop("Not implemented for arrays of more than 2 dimensions", call.=FALSE) return(y) } mapSparseEntries <- function(x, margin, values, conform=TRUE, across) { # replace the NONZERO entries of sparse matrix or array # by values[l] where l is one of the slice indices dimx <- dim(x) if(is.null(dimx)) { if(inherits(x, "sparseVector")) dimx <- x@length else if(is.vector(x)) dimx <- length(x) else stop("Format of x is not understood", call.=FALSE) } if(inherits(x, "sparseMatrix")) { x <- as(x, Class="TsparseMatrix") if(length(x$i) == 0) { # no entries return(x) } check.1.integer(margin) stopifnot(margin %in% 1:2) check.nvector(values, dimx[margin], things=c("rows","columns")[margin], oneok=TRUE) if(length(values) == 1) values <- rep(values, dimx[margin]) i <- x@i + 1L j <- x@j + 1L yindex <- switch(margin, i, j) y <- sparseMatrix(i=i, j=j, x=values[yindex], dims=dimx, dimnames=dimnames(x)) return(y) } if(inherits(x, "sparse3Darray")) { if(length(x$i) == 0) { # no entries return(x) } ijk <- cbind(i=x$i, j=x$j, k=x$k) if(conform) { #' ensure common pattern of sparse values #' in each slice on 'across' margin nslice <- dimx[across] #' pick one representative of each equivalence class ## ---- old code --------- ## dup <- duplicated(ijk[,-across,drop=FALSE]) ## ijk <- ijk[!dup, , drop=FALSE] ## --------------------- use <- representativeRows(ijk[,-across,drop=FALSE]) ijk <- ijk[use, , drop=FALSE] ## npattern <- nrow(ijk) #' repeat this pattern in each 'across' slice ijk <- apply(ijk, 2, rep, times=nslice) ijk[, across] <- rep(seq_len(nslice), each=npattern) } if(is.vector(values)) { # vector of values matching margin extent check.nvector(values, dimx[margin], things=c("rows","columns","planes")[margin], oneok=TRUE) if(length(values) == 1) values <- rep(values, dimx[margin]) yindex <- ijk[,margin] y <- sparse3Darray(i=ijk[,1], j=ijk[,2], k=ijk[,3], x=values[yindex], dims=dimx, dimnames=dimnames(x)) return(y) } else if(is.matrix(values)) { #' matrix of values. force(across) stopifnot(across != margin) #' rows of matrix must match 'margin' if(nrow(values) != dimx[margin]) stop(paste("Number of rows of values", paren(nrow(values)), "does not match array size in margin", paren(dimx[margin])), call.=FALSE) #' columns of matrix must match 'across' if(ncol(values) != dimx[across]) stop(paste("Number of columns of values", paren(ncol(values)), "does not match array size in 'across'", paren(dimx[across])), call.=FALSE) # map yindex <- ijk[,margin] zindex <- ijk[,across] y <- sparse3Darray(i=ijk[,1], j=ijk[,2], k=ijk[,3], x=values[cbind(yindex,zindex)], dims=dimx, dimnames=dimnames(x)) return(y) } else stop("Format of values not understood", call.=FALSE) } stop("Format of x not understood", call.=FALSE) } applySparseEntries <- local({ applySparseEntries <- function(x, f, ...) { ## apply vectorised function 'f' only to the nonzero entries of 'x' if(inherits(x, "sparseMatrix")) { x <- applytoxslot(x, f, ...) } else if(inherits(x, "sparse3Dmatrix")) { x <- applytoxentry(x, f, ...) } else { x <- f(x, ...) } return(x) } applytoxslot <- function(x, f, ...) { xx <- x@x n <- length(xx) xx <- f(xx, ...) if(length(xx) != n) stop(paste("Function f returned the wrong number of values:", length(xx), "instead of", n), call.=FALSE) x@x <- xx return(x) } applytoxentry <- function(x, f, ...) { xx <- x$x n <- length(xx) xx <- f(xx, ...) if(length(xx) != n) stop(paste("Function f returned the wrong number of values:", length(xx), "instead of", n), call.=FALSE) x$x <- xx return(x) } applySparseEntries }) spatstat/R/cdftest.R0000755000176200001440000003616213115271075014112 0ustar liggesusers# # cdftest.R # # $Revision: 2.15 $ $Date: 2017/01/18 07:58:44 $ # # # --------- old ------------- ks.test.ppm <- function(...) { .Deprecated("cdf.test.ppm", package="spatstat") cdf.test.ppm(...) } kstest <- kstest.ppp <- kstest.ppm <- kstest.lpp <- kstest.lppm <- kstest.slrm <- function(...) { message("kstest is out of date; use cdf.test") # .Deprecated("cdf.test", package="spatstat") cdf.test(..., test="ks") } # --------------------------- cdf.test <- function(...) { UseMethod("cdf.test") } cdf.test.ppp <- function(X, covariate, test=c("ks", "cvm", "ad"), ..., interpolate=TRUE, jitter=TRUE) { Xname <- short.deparse(substitute(X)) covname <- singlestring(short.deparse(substitute(covariate))) test <- match.arg(test) if(is.character(covariate)) covname <- covariate if(!is.marked(X, dfok=TRUE)) { # unmarked model <- ppm(X) modelname <- "CSR" } else if(is.multitype(X)) { # multitype mf <- summary(X)$marks$frequency if(all(mf > 0)) { model <- ppm(X ~marks) modelname <- "CSRI" } else { warning("Ignoring marks, because some mark values have zero frequency") X <- unmark(X) model <- ppm(X) modelname <- "CSR" } } else { # marked - general case X <- unmark(X) warning("marks ignored") model <- ppm(X) modelname <- "CSR" } do.call(spatialCDFtest, resolve.defaults(list(model, covariate, test=test), list(interpolate=interpolate, jitter=jitter), list(...), list(modelname=modelname, covname=covname, dataname=Xname))) } cdf.test.ppm <- function(model, covariate, test=c("ks", "cvm", "ad"), ..., interpolate=TRUE, jitter=TRUE, nsim=99, verbose=TRUE) { modelname <- short.deparse(substitute(model)) covname <- singlestring(short.deparse(substitute(covariate))) test <- match.arg(test) verifyclass(model, "ppm") if(is.character(covariate)) covname <- covariate if(is.poisson(model) && is.stationary(model)) modelname <- "CSR" do.call(spatialCDFtest, resolve.defaults(list(model, covariate, test=test), list(interpolate=interpolate, jitter=jitter, nsim=nsim, verbose=verbose), list(...), list(modelname=modelname, covname=covname))) } cdf.test.lpp <- function(X, covariate, test=c("ks", "cvm", "ad"), ..., interpolate=TRUE, jitter=TRUE) { Xname <- short.deparse(substitute(X)) covname <- singlestring(short.deparse(substitute(covariate))) test <- match.arg(test) if(is.character(covariate)) covname <- covariate if(!is.marked(X, dfok=TRUE)) { # unmarked model <- lppm(X) modelname <- "CSR" } else if(is.multitype(X)) { # multitype mf <- table(marks(X)) if(all(mf > 0)) { model <- lppm(X ~ marks) modelname <- "CSRI" } else { warning("Ignoring marks, because some mark values have zero frequency") X <- unmark(X) model <- ppm(X) modelname <- "CSR" } } else { # marked - general case X <- unmark(X) warning("marks ignored") model <- ppm(X) modelname <- "CSR" } do.call(spatialCDFtest, resolve.defaults(list(model, covariate, test=test), list(interpolate=interpolate, jitter=jitter), list(...), list(modelname=modelname, covname=covname, dataname=Xname))) } cdf.test.lppm <- function(model, covariate, test=c("ks", "cvm", "ad"), ..., interpolate=TRUE, jitter=TRUE, nsim=99, verbose=TRUE) { modelname <- short.deparse(substitute(model)) covname <- singlestring(short.deparse(substitute(covariate))) test <- match.arg(test) verifyclass(model, "lppm") if(is.character(covariate)) covname <- covariate if(is.poisson(model) && is.stationary(model)) modelname <- "CSR" do.call(spatialCDFtest, resolve.defaults(list(model, covariate, test=test), list(interpolate=interpolate, jitter=jitter, nsim=nsim, verbose=verbose), list(...), list(modelname=modelname, covname=covname))) } cdf.test.slrm <- function(model, covariate, test=c("ks", "cvm", "ad"), ..., modelname=NULL, covname=NULL) { # get names if(is.null(modelname)) modelname <- short.deparse(substitute(model)) if(is.null(covname)) covname <- short.deparse(substitute(covariate)) dataname <- model$CallInfo$responsename test <- match.arg(test) # stopifnot(is.slrm(model)) stopifnot(is.im(covariate)) # extract data prob <- fitted(model) covim <- as.im(covariate, W=as.owin(prob)) probvalu <- as.matrix(prob) covvalu <- as.matrix(covim) ok <- !is.na(probvalu) & !is.na(covvalu) probvalu <- as.vector(probvalu[ok]) covvalu <- as.vector(covvalu[ok]) # compile weighted cdf's FZ <- ewcdf(covvalu, probvalu/sum(probvalu)) X <- model$Data$response ZX <- safelookup(covim, X) # Ensure support of cdf includes the range of the data xxx <- knots(FZ) yyy <- FZ(xxx) if(min(xxx) > min(ZX)) { xxx <- c(min(ZX), xxx) yyy <- c(0, yyy) } if(max(xxx) < max(ZX)) { xxx <- c(xxx, max(ZX)) yyy <- c(yyy, 1) } # make piecewise linear approximation of cdf FZ <- approxfun(xxx, yyy, rule=2) # now apply cdf U <- FZ(ZX) # Test uniformity of transformed values result <- switch(test, ks = ks.test(U, "punif", ...), cvm = cvm.test(U, "punif", ...), ad = ad.test(U, "punif", ...)) testname <- switch(test, ks="Kolmogorov-Smirnov", cvm="Cramer-Von Mises", ad="Anderson-Darling") # modify the 'htest' entries result$method <- paste("Spatial", testname, "test of", "inhomogeneous Poisson process", "in two dimensions") result$data.name <- paste("covariate", sQuote(paste(covname, collapse="")), "evaluated at points of", sQuote(dataname), "\n and transformed to uniform distribution under", sQuote(modelname)) # additional class 'cdftest' class(result) <- c("cdftest", class(result)) attr(result, "prep") <- list(Zvalues=covvalu, ZX=ZX, FZ=FZ, FZX=ecdf(ZX), U=U) attr(result, "info") <- list(modelname=modelname, covname=covname, dataname=dataname, csr=FALSE) return(result) } #............. helper functions ........................# spatialCDFtest <- function(model, covariate, test=c("ks", "cvm", "ad"), ..., dimyx=NULL, eps=NULL, interpolate=TRUE, jitter=TRUE, nsim=99, verbose=TRUE, modelname=NULL, covname=NULL, dataname=NULL) { # conduct test based on comparison of CDF's of covariate values test <- match.arg(test) ispois <- is.poisson(model) # compute the essential data fra <- spatialCDFframe(model, covariate, dimyx=dimyx, eps=eps, interpolate=interpolate, jitter=jitter, modelname=modelname, covname=covname, dataname=dataname) values <- fra$values info <- fra$info ## Test uniformity of transformed values U <- values$U result <- switch(test, ks = ks.test(U, "punif", ...), cvm = cvm.test(U, "punif", ...), ad = ad.test(U, "punif", ...)) testname <- switch(test, ks="Kolmogorov-Smirnov", cvm="Cramer-Von Mises", ad="Anderson-Darling") ## if(!ispois) { ## Gibbs model: perform Monte Carlo test result$poisson.p.value <- pobs <- result$p.value result$poisson.statistic <- tobs <- result$statistic Xsim <- simulate(model, nsim=nsim, progress=verbose) sim.pvals <- sim.stats <- numeric(nsim) if(verbose) { cat("Processing.. ") state <- list() } for(i in seq_len(nsim)) { model.i <- update(model, Xsim[[i]]) fra.i <- spatialCDFframe(model.i, covariate, dimyx=dimyx, eps=eps, interpolate=interpolate, jitter=jitter, modelname=modelname, covname=covname, dataname=dataname) U.i <- fra.i$values$U res.i <- switch(test, ks = ks.test(U.i, "punif", ...), cvm = cvm.test(U.i, "punif", ...), ad = ad.test(U.i, "punif", ...)) sim.pvals[i] <- res.i$p.value sim.stats[i] <- res.i$statistic if(verbose) state <- progressreport(i, nsim, state=state) } if(verbose) cat("Done.\n") result$sim.pvals <- sim.pvals result$sim.stats <- sim.stats ## Monte Carlo p-value ## For tied p-values, first compare values of test statistics ## (because p = 0 may occur due to rounding) ## otherwise resolve ties by randomisation nless <- sum(sim.pvals < pobs) nplus <- sum(sim.pvals == pobs & sim.stats > tobs) nties <- sum(sim.pvals == pobs & sim.stats == tobs) result$p.value <- (nless + nplus + sample(0:nties, 1L))/(nsim+1L) } ## # modify the 'htest' entries csr <- info$csr modelname <- if(csr) "CSR" else if(ispois) "inhomogeneous Poisson process" else "Gibbs process" result$method <- paste(if(ispois) "Spatial" else "Monte Carlo spatial", testname, "test of", modelname, "in", info$spacename) result$data.name <- paste("covariate", sQuote(singlestring(info$covname)), "evaluated at points of", sQuote(info$dataname), "\n and transformed to uniform distribution under", if(csr) info$modelname else sQuote(info$modelname)) # additional class 'cdftest' class(result) <- c("cdftest", class(result)) attr(result, "frame") <- fra return(result) } spatialCDFframe <- function(model, covariate, ...) { # evaluate CDF of covariate values at data points and at pixels stuff <- evalCovar(model, covariate, ...) # extract values <- stuff$values # info <- stuff$info Zvalues <- values$Zvalues lambda <- values$lambda weights <- values$weights ZX <- values$ZX # compute empirical cdf of Z values at points of X FZX <- ecdf(ZX) # form weighted cdf of Z values in window wts <- lambda * weights sumwts <- sum(wts) FZ <- ewcdf(Zvalues, wts/sumwts) # Ensure support of cdf includes the range of the data xxx <- knots(FZ) yyy <- FZ(xxx) minZX <- min(ZX, na.rm=TRUE) minxxx <- min(xxx, na.rm=TRUE) if(minxxx > minZX) { xxx <- c(minZX, xxx) yyy <- c(0, yyy) } maxZX <- max(ZX, na.rm=TRUE) maxxxx <- max(xxx, na.rm=TRUE) if(maxxxx < maxZX) { xxx <- c(xxx, maxZX) yyy <- c(yyy, 1) } # make piecewise linear approximation of cdf FZ <- approxfun(xxx, yyy, rule=2) # now apply cdf U <- FZ(ZX) # pack up stuff$values$FZ <- FZ stuff$values$FZX <- FZX stuff$values$U <- U stuff$values$EN <- sumwts ## integral of intensity = expected number of pts class(stuff) <- "spatialCDFframe" return(stuff) } plot.kstest <- function(x, ...) { message("kstest is out of date; use cdf.test") # .Deprecated("plot.cdftest", package="spatstat") plot.cdftest(x, ...) } plot.cdftest <- function(x, ..., style=c("cdf", "PP", "QQ"), lwd=par("lwd"), col=par("col"), lty=par("lty"), lwd0=lwd, col0=2, lty0=2, do.legend=TRUE) { style <- match.arg(style) fram <- attr(x, "frame") if(!is.null(fram)) { values <- fram$values info <- fram$info } else { # old style values <- attr(x, "prep") info <- attr(x, "info") } # cdf of covariate Z over window FZ <- values$FZ # cdf of covariate values at data points FZX <- values$FZX # blurb covname <- info$covname covdescrip <- switch(covname, x="x coordinate", y="y coordinate", paste("covariate", dQuote(covname))) # plot it switch(style, cdf={ # plot both cdf's superimposed qZ <- get("x", environment(FZ)) pZ <- get("y", environment(FZ)) main <- c(x$method, paste("based on distribution of", covdescrip), paste("p-value=", signif(x$p.value, 4))) do.call(plot.default, resolve.defaults( list(x=qZ, y=pZ, type="l"), list(...), list(lwd=lwd0, col=col0, lty=lty0), list(xlab=info$covname, ylab="probability", main=main))) plot(FZX, add=TRUE, do.points=FALSE, lwd=lwd, col=col, lty=lty) if(do.legend) legend("topleft", c("observed", "expected"), lwd=c(lwd,lwd0), col=c(col2hex(col), col2hex(col0)), lty=c(lty2char(lty),lty2char(lty0))) }, PP={ # plot FZX o (FZ)^{-1} pX <- get("y", environment(FZX)) qX <- get("x", environment(FZX)) p0 <- FZ(qX) do.call(plot.default, resolve.defaults( list(x=p0, y=pX), list(...), list(col=col), list(xlim=c(0,1), ylim=c(0,1), xlab="Theoretical probability", ylab="Observed probability", main=""))) abline(0,1, lwd=lwd0, col=col0, lty=lty0) }, QQ={ # plot (FZX)^{-1} o FZ pZ <- get("y", environment(FZ)) qZ <- get("x", environment(FZ)) FZinverse <- approxfun(pZ, qZ, rule=2) pX <- get("y", environment(FZX)) qX <- get("x", environment(FZX)) qZX <- FZinverse(pX) Zrange <- range(qZ, qX, qZX) xlab <- paste("Theoretical quantile of", covname) ylab <- paste("Observed quantile of", covname) do.call(plot.default, resolve.defaults( list(x=qZX, y=qX), list(...), list(col=col), list(xlim=Zrange, ylim=Zrange, xlab=xlab, ylab=ylab, main=""))) abline(0,1, lwd=lwd0, col=col0, lty=lty0) }) return(invisible(NULL)) } spatstat/R/dummy.R0000755000176200001440000002765513115271075013620 0ustar liggesusers# # dummy.S # # Utilities for generating patterns of dummy points # # $Revision: 5.31 $ $Date: 2015/10/21 09:06:57 $ # # corners() corners of window # gridcenters() points of a rectangular grid # stratrand() random points in each tile of a rectangular grid # spokes() Rolf's 'spokes' arrangement # # concatxy() concatenate any lists of x, y coordinates # # default.dummy() Default action to create a dummy pattern # corners <- function(window) { window <- as.owin(window) x <- window$xrange[c(1L,2L,1L,2L)] y <- window$yrange[c(1L,1L,2L,2L)] return(list(x=x, y=y)) } gridcenters <- gridcentres <- function(window, nx, ny) { window <- as.owin(window) xr <- window$xrange yr <- window$yrange x <- seq(from=xr[1L], to=xr[2L], length.out = 2L * nx + 1L)[2L * (1:nx)] y <- seq(from=yr[1L], to=yr[2L], length.out = 2L * ny + 1L)[2L * (1:ny)] x <- rep.int(x, ny) y <- rep.int(y, rep.int(nx, ny)) return(list(x=x, y=y)) } stratrand <- function(window,nx,ny, k=1) { # divide window into an nx * ny grid of tiles # and place k points at random in each tile window <- as.owin(window) wide <- diff(window$xrange)/nx high <- diff(window$yrange)/ny cent <- gridcentres(window, nx, ny) cx <- rep.int(cent$x, k) cy <- rep.int(cent$y, k) n <- nx * ny * k x <- cx + runif(n, min = -wide/2, max = wide/2) y <- cy + runif(n, min = -high/2, max = high/2) return(list(x=x,y=y)) } tilecentroids <- function (W, nx, ny) { W <- as.owin(W) if(W$type == "rectangle") return(gridcentres(W, nx, ny)) else { # approximate W <- as.mask(W) rxy <- rasterxy.mask(W, drop=TRUE) xx <- rxy$x yy <- rxy$y pid <- gridindex(xx,yy,W$xrange,W$yrange,nx,nx)$index x <- tapply(xx,pid,mean) y <- tapply(yy,pid,mean) return(list(x=x,y=y)) } } cellmiddles <- local({ # auxiliary middle <- function(v) { n <- length(v); mid <- ceiling(n/2); v[mid]} dcut <- function(x, nx, xrange) { dx <- diff(xrange)/nx fx <- ((x - xrange[1L])/dx) %% 1 bx <- dx * pmin(fx, 1-fx) bx } # main cellmiddles <- function (W, nx, ny, npix=NULL, distances=FALSE) { if(W$type == "rectangle") return(gridcentres(W, nx, ny)) # pixel approximation to window # This matches the pixel approximation used to compute tile areas # and ensures that dummy points are generated only inside those tiles # that have nonzero digital area M <- as.mask(W, dimyx=rev(npix)) xx <- as.vector(rasterx.mask(M, drop=TRUE)) yy <- as.vector(rastery.mask(M, drop=TRUE)) pid <- gridindex(xx,yy,W$xrange,W$yrange,nx,ny)$index # compute tile centroids xmid <- tapply(xx, pid, mean) ymid <- tapply(yy, pid, mean) # check whether they are inside window ok <- inside.owin(xmid, ymid, W) if(all(ok)) return(list(x=xmid, y=ymid)) # some problem tiles bad <- rep.int(TRUE, nx * ny) bad[as.integer(names(xmid))] <- !ok badpid <- bad[pid] if(!distances) { midpix <- tapply(seq_along(pid)[badpid], pid[badpid], middle) } else { # find 'middle' points using boundary distances Dlines <- im(outer(dcut(M$yrow,ny,M$yrange), dcut(M$xcol,nx,M$xrange), "pmin"), M$xcol, M$yrow, M$xrange, M$yrange) Dbdry <- bdist.pixels(M) Dtile <- eval.im(pmin(Dlines, Dbdry)) dtile <- as.vector(Dtile[M]) df <- data.frame(dtile=dtile, id=seq_along(dtile))[badpid, , drop=FALSE] midpix <- by(df, pid[badpid], midpixid) } xmid[!ok] <- xx[midpix] ymid[!ok] <- yy[midpix] return(list(x=xmid,y=ymid)) } midpixid <- function(z) { z$id[which.max(z$dtile)] } cellmiddles }) spokes <- function(x, y, nrad = 3, nper = 3, fctr = 1.5, Mdefault=1) { # # Rolf Turner's "spokes" arrangement # # Places dummy points on radii of circles # emanating from each data point x[i], y[i] # # nrad: number of radii from each data point # nper: number of dummy points per radius # fctr: length of largest radius = fctr * M # where M is mean nearest neighbour distance in data # pat <- inherits(x,"ppp") if(pat) w <- x$w if(checkfields(x,c("x","y"))) { y <- x$y x <- x$x } M <- if(length(x) > 1) mean(nndist(x,y)) else Mdefault lrad <- fctr * M / nper theta <- 2 * pi * (1:nrad)/nrad cs <- cos(theta) sn <- sin(theta) xt <- lrad * as.vector((1:nper) %o% cs) yt <- lrad * as.vector((1:nper) %o% sn) xd <- as.vector(outer(x, xt, "+")) yd <- as.vector(outer(y, yt, "+")) tmp <- list(x = xd, y = yd) if(pat) return(as.ppp(tmp,W=w)[w]) else return(tmp) } # concatenate any number of list(x,y) into a list(x,y) concatxy <- function(...) { x <- unlist(lapply(list(...), getElement, name="x")) y <- unlist(lapply(list(...), getElement, name="y")) if(length(x) != length(y)) stop("Internal error: lengths of x and y unequal") return(list(x=x,y=y)) } #------------------------------------------------------------ default.dummy <- function(X, nd=NULL, random=FALSE, ntile=NULL, npix = NULL, quasi=FALSE, ..., eps=NULL, verbose=FALSE) { # default action to create dummy points. # regular grid of nd[1] * nd[2] points # plus corner points of window frame, # all clipped to window. orig <- list(nd=nd, eps=eps, ntile=ntile, npix=npix) orig <- orig[!sapply(orig, is.null)] # X <- as.ppp(X) win <- X$window # # # default dimensions a <- default.n.tiling(X, nd=nd, ntile=ntile, npix=npix, eps=eps, random=random, quasi=quasi, verbose=verbose) nd <- a$nd ntile <- a$ntile npix <- a$npix periodsample <- !quasi && !random && is.mask(win) && all(nd %% win$dim == 0) # make dummy points dummy <- if(quasi) rQuasi(prod(nd), as.rectangle(win)) else if(random) stratrand(win, nd[1L], nd[2L], 1) else cellmiddles(win, nd[1L], nd[2L], npix) dummy <- as.ppp(dummy, win, check=FALSE) # restrict to window if(!is.rectangle(win) && !periodsample) dummy <- dummy[win] # corner points corn <- as.ppp(corners(win), win, check=FALSE) corn <- corn[win] dummy <- superimpose(dummy, corn, W=win, check=FALSE) if(dummy$n == 0) stop("None of the dummy points lies inside the window") # pass parameters for computing weights attr(dummy, "weight.parameters") <- append(list(...), list(ntile=ntile, verbose=verbose, npix=npix)) # record parameters used to create dummy locations attr(dummy, "dummy.parameters") <- list(nd=nd, random=random, quasi=quasi, verbose=verbose, orig=orig) return(dummy) } # Criteria: # for rectangular windows, # R1. nd >= ntile # for non-rectangular windows, # R2. nd should be a multiple of ntile # R3. each dummy point is also a pixel of the npix grid # R4. npix should ideally be a multiple of nd, for speed # R5. npix should be large, for accuracy # R6. npix should not be too large, for speed # R7. if the window is a mask, npix should ideally be # a multiple of the mask array dimensions, for speed. # default.n.tiling <- local({ # auxiliary ensure2print <- function(x, verbose=TRUE, blah="user specified") { xname <- short.deparse(substitute(x)) x <- ensure2vector(x) if(verbose) cat(paste(blah, xname, "=", x[1L], "*", x[2L], "\n")) x } minmultiple <- function(n, lo, hi) { if(lo > hi) { temp <- hi hi <- lo lo <- temp } if(n > hi) return(hi) m <- n * (floor(lo/n):ceiling(hi/n)) m <- m[m >= n & m >= lo & m <= hi] if(length(m) > 0) min(m) else hi } mindivisor <- function(N, lo, Nbig) { d <- divisors(N) ok <- (d >= lo) if(any(ok)) return(min(d[ok])) m <- floor(Nbig/N) d <- unlist(lapply(as.list(seq_len(m) * N), divisors)) d <- sort(unique(d)) ok <- (d >= lo) if(any(ok)) return(min(d[ok])) return(Nbig) } min2mul <- function(n, lo, hi) c(minmultiple(n[1L], lo[1L], hi[1L]), minmultiple(n[2L], lo[2L], hi[2L])) min2div <- function(N, lo, Nbig) c(mindivisor(N[1L], lo[1L], Nbig[1L]), mindivisor(N[2L], lo[2L], Nbig[2L])) maxdiv <- function(n, k=1) { if(length(n) > 1L) return(c(maxdiv(n[1L], k), maxdiv(n[2L], k))) ## k-th largest divisor other than n d <- divisors(n) m <- length(d) ans <- if(m == 2L) n else if(m < 2+k) d[2L] else d[m-k] return(ans) } # main default.n.tiling <- function(X, nd=NULL, ntile=NULL, npix=NULL, eps=NULL, random=FALSE, quasi=FALSE, verbose=TRUE) { # computes dimensions of rectangular grids of # - dummy points (nd) (eps) # - tiles for grid weights (ntile) # - pixels for approximating area (npix) # for data pattern X. # verifyclass(X, "ppp") win <- X$window pixels <- (win$type != "rectangle") if(nd.given <- !is.null(nd)) nd <- ensure2print(nd, verbose) if(ntile.given <- !is.null(ntile)) ntile <- ensure2print(ntile, verbose) if(npix.given <- !is.null(npix)) npix <- ensure2print(npix, verbose) if(pixels) sonpixel <- rev(ensure2print(spatstat.options("npixel"), verbose, "")) ndummy.min <- ensure2print(spatstat.options("ndummy.min"), verbose, "") ndminX <- pmax(ndummy.min, 10 * ceiling(2 * sqrt(X$n)/10)) ndminX <- ensure2vector(ndminX) if(!is.null(eps)) { eps <- ensure2print(eps, verbose) Xbox <- as.rectangle(as.owin(X)) sides <- with(Xbox, c(diff(xrange), diff(yrange))) ndminX <- pmax(ndminX, ceiling(sides/eps)) } # range of acceptable values for npix if(npix.given) Nmin <- Nmax <- npix else switch(win$type, rectangle = { Nmin <- ensure2vector(X$n) Nmax <- Inf }, polygonal = { Nmin <- sonpixel Nmax <- 4 * sonpixel }, mask={ nmask <- rev(win$dim) Nmin <- nmask Nmax <- pmax(2 * nmask, 4 * sonpixel) }) # determine values of nd and ntile if(nd.given && !ntile.given) { # ntile must be a divisor of nd if(any(nd > Nmax)) warning("number of dummy points nd exceeds maximum pixel dimensions") ntile <- min2div(nd, ndminX, nd) } else if(!nd.given && ntile.given) { # nd must be a multiple of ntile nd <- min2mul(ntile, ndminX, Nmin) if(any(nd >= Nmin)) nd <- ntile } else if(!nd.given && !ntile.given) { if(!pixels) { nd <- ntile <- ensure2vector(ndminX) if(verbose) cat(paste("nd and ntile default to", nd[1L], "*", nd[2L], "\n")) } else { # find suitable divisors of the number of pixels nd <- ntile <- min2div(Nmin, ndminX, Nmax) if(any(nd >= Nmin)) { # none suitable if(verbose) cat("No suitable divisor of pixel dimensions\n") nd <- ntile <- ndminX } } } else { # both nd, ntile were given if(any(ntile > nd)) warning("the number of tiles (ntile) exceeds the number of dummy points (nd)") } if(!ntile.given && quasi) { if(verbose) cat("Adjusting ntile because quasi=TRUE\n") ntile <- maxdiv(ntile, if(pixels) 2L else 1L) } if(!npix.given && pixels) npix <- min2mul(nd, Nmin, Nmax) if(verbose) { if(!quasi) cat(paste("dummy points:", paste0(if(random) "stratified random in" else NULL, "grid"), nd[1L], "x", nd[2L], "\n")) else cat(paste("dummy points:", nd[1L], "x", nd[2L], "=", prod(nd), "quasirandom points\n")) cat(paste("weighting tiles", ntile[1L], "x", ntile[2L], "\n")) if(pixels) cat(paste("pixel grid", npix[1L], "x", npix[2L], "\n")) } if(pixels) return(list(nd=nd, ntile=ntile, npix=npix)) else return(list(nd=nd, ntile=ntile, npix=npix)) } default.n.tiling }) spatstat/R/quantess.R0000644000176200001440000001561313115225157014314 0ustar liggesusers#' quantess.R #' #' Quantile Tessellation #' #' $Revision: 1.12 $ $Date: 2016/02/18 07:26:34 $ quantess <- function(M, Z, n, ...) { UseMethod("quantess") } quantess.owin <- function(M, Z, n, ..., type=2) { W <- as.owin(M) tcross <- MinimalTess(W, ...) force(n) if(!is.character(Z)) { Zim <- as.im(Z, W) Zrange <- range(Zim) } else { if(!(Z %in% c("x", "y"))) stop(paste("Unrecognised covariate", dQuote(Z))) if(is.rectangle(W)) { out <- switch(Z, x={ quadrats(W, nx=n, ny=1) }, y={ quadrats(W, nx=1, ny=n) }) if(!is.null(tcross)) out <- intersect.tess(out, tcross) return(out) } switch(Z, x={ Zfun <- function(x,y){x} Zrange <- boundingbox(W)$xrange }, y={ Zfun <- function(x,y){y} Zrange <- boundingbox(W)$yrange }) Zim <- as.im(Zfun, W) } qZ <- quantile(Zim, probs=(1:(n-1))/n, type=type) qZ <- c(Zrange[1], qZ, Zrange[2]) if(is.polygonal(W) && is.character(Z)) { R <- Frame(W) strips <- switch(Z, x = tess(xgrid=qZ, ygrid=R$yrange), y = tess(xgrid=R$xrange, ygrid=qZ)) out <- intersect.tess(strips, tess(tiles=list(W))) qzz <- signif(qZ, 3) tilenames(out) <- paste0("[", qzz[1:n], ",", qzz[-1], c(rep(")", n-1), "]")) } else { ZC <- cut(Z, breaks=qZ, include.lowest=TRUE, right=FALSE) out <- tess(image=ZC) } if(!is.null(tcross)) out <- intersect.tess(out, tcross) return(out) } quantess.ppp <- function(M, Z, n, ..., type=2) { W <- as.owin(M) tcross <- MinimalTess(W, ...) force(n) if(!is.character(Z)) { Zim <- as.im(Z, W) ZM <- if(is.function(Z)) Z(M$x, M$y) else Zim[M] Zrange <- range(range(Zim), ZM) } else { if(!(Z %in% c("x", "y"))) stop(paste("Unrecognised covariate", dQuote(Z))) if(is.rectangle(W)) { switch(Z, x={ qx <- quantile(M$x, probs=(1:(n-1))/n, type=type) qx <- c(W$xrange[1], qx, W$xrange[2]) out <- tess(xgrid=qx, ygrid=W$yrange) }, y={ qy <- quantile(M$y, probs=(1:(n-1))/n, type=type) qy <- c(W$yrange[1], qy, W$yrange[2]) out <- tess(xgrid=W$xrange, ygrid=qy) }) if(!is.null(tcross)) out <- intersect.tess(out, tcross) return(out) } switch(Z, x={ Zfun <- function(x,y){x} ZM <- M$x Zrange <- boundingbox(W)$xrange }, y={ Zfun <- function(x,y){y} ZM <- M$y Zrange <- boundingbox(W)$yrange }) Zim <- as.im(Zfun, W) } qZ <- quantile(ZM, probs=(1:(n-1))/n, type=type) qZ <- c(Zrange[1], qZ, Zrange[2]) if(is.polygonal(W) && is.character(Z)) { R <- Frame(W) strips <- switch(Z, x = tess(xgrid=qZ, ygrid=R$yrange), y = tess(xgrid=R$xrange, ygrid=qZ)) out <- intersect.tess(strips, tess(tiles=list(W))) qzz <- signif(qZ, 3) tilenames(out) <- paste0("[", qzz[1:n], ",", qzz[-1], c(rep(")", n-1), "]")) } else { ZC <- cut(Zim, breaks=qZ, include.lowest=TRUE) out <- tess(image=ZC) } if(!is.null(tcross)) out <- intersect.tess(out, tcross) return(out) } quantess.im <- function(M, Z, n, ..., type=2) { W <- Window(M) tcross <- MinimalTess(W, ...) force(n) if(!(type %in% c(1,2))) stop("Only quantiles of type 1 and 2 are implemented for quantess.im") if(is.character(Z)) Z <- switch(Z, x=function(x,y){x}, y=function(x,y){y}, stop(paste("Unrecognised covariate", dQuote(Z)))) MZ <- harmonise(M=M, Z=Z) M <- MZ$M[W, drop=FALSE] Z <- MZ$Z[W, drop=FALSE] Zrange <- range(Z) Fun <- ewcdf(Z[], weights=M[]/sum(M[])) qZ <- quantile(Fun, probs=(1:(n-1))/n, type=type) qZ <- c(Zrange[1], qZ, Zrange[2]) ZC <- cut(Z, breaks=qZ, include.lowest=TRUE) out <- tess(image=ZC) qzz <- signif(qZ, 3) tilenames(out) <- paste0("[", qzz[1:(n-1)], ",", qzz[-1], c(rep(")", n-1), "]")) if(!is.null(tcross)) out <- intersect.tess(out, tcross) return(out) } MinimalTess <- function(W, ...) { # find the minimal tessellation of W consistent with the arguments argh <- list(...) if(length(argh) == 0) return(NULL) nama <- names(argh) if(any(c("nx", "ny") %in% nama)) { fun <- quadrats dflt <- list(nx=1, ny=1) } else if(any(c("xbreaks", "ybreaks") %in% nama)) { fun <- quadrats dflt <- list(xbreaks=W$xrange, ybreaks=W$yrange) } else { fun <- tess dflt <- list(window=W, keepempty=TRUE) } v <- do.call(fun, resolve.defaults(list(W), argh, dflt)) return(v) } nestsplit <- function(X, ...) { stopifnot(is.ppp(X)) flist <- list(...) cansplit <- sapply(flist, inherits, what=c("factor", "tess", "owin", "im", "character")) splitted <- lapply(flist[cansplit], split, x=X) splitters <- lapply(splitted, attr, which="fsplit") if(any(!cansplit)) { extra <- do.call(MinimalTess, append(list(W=Window(X)), flist[!cansplit])) pos <- min(which(!cansplit)) ns <- length(splitters) if(pos > ns) { splitters <- append(splitters, list(extra)) } else { before <- splitters[seq_len(pos-1)] after <- splitters[pos:ns] splitters <- c(before, list(extra), after) } } ns <- length(splitters) if(ns == 0) return(X) if(ns == 1) return(split(X, splitters[[1]])) if(ns > 2) stop("Nesting depths greater than 2 are not yet implemented") names(splitters) <- good.names(names(splitters), paste0("f", 1:ns)) fax1 <- is.factor(sp1 <- splitters[[1]]) fax2 <- is.factor(sp2 <- splitters[[2]]) lev1 <- if(fax1) levels(sp1) else seq_len(sp1$n) lev2 <- if(fax2) levels(sp2) else seq_len(sp2$n) if(!fax1 && !fax2) { ## two tessellations marks(sp1) <- factor(lev1, levels=lev1) marks(sp2) <- factor(lev2, levels=lev2) sp12 <- intersect.tess(sp1, sp2, keepmarks=TRUE) pats <- split(X, sp12) f1 <- marks(sp12)[,1] f2 <- marks(sp12)[,2] } else { if(fax1 && fax2) { ## two grouping factors Xsp1 <- split(X, sp1) sp2.1 <- split(sp2, sp1) ll <- mapply(split, Xsp1, sp2.1, SIMPLIFY=FALSE) } else if(fax1 && !fax2) { ## grouping factor and tessellation Xsp1 <- split(X, sp1) ll <- lapply(Xsp1, split, f=sp2) } else if(!fax1 && fax2) { ## tessellation and grouping factor Xsp1 <- split(X, sp1) sp2.1 <- split(sp2, attr(Xsp1, "fgroup")) ll <- mapply(split, Xsp1, sp2.1, SIMPLIFY=FALSE) } neach <- lengths(ll) f1 <- rep(factor(lev1, levels=lev1), neach) f2 <- rep(factor(lev2, levels=lev2), length(Xsp1)) pats <- do.call(c, unname(ll)) } h <- hyperframe(pts=pats, f1=f1, f2=f2) names(h)[2:3] <- names(splitters) return(h) } spatstat/R/lintess.R0000644000176200001440000002035613155775143014143 0ustar liggesusers#' #' lintess.R #' #' Tessellations on a Linear Network #' #' $Revision: 1.15 $ $Date: 2017/09/12 15:16:26 $ #' lintess <- function(L, df) { verifyclass(L, "linnet") if(missing(df) || is.null(df)) { # tessellation consisting of a single tile ns <- nsegments(L) df <- data.frame(seg=seq_len(ns), t0=0, t1=1, tile=factor(1)) out <- list(L=L, df=df) class(out) <- c("lintess", class(out)) return(out) } # validate 'df' stopifnot(is.data.frame(df)) needed <- c("seg", "t0", "t1", "tile") if(any(bad <- is.na(match(needed, colnames(df))))) stop(paste(ngettext(sum(bad), "Column", "Columns"), commasep(sQuote(needed[bad])), "missing from data frame"), call.=FALSE) df$seg <- as.integer(df$seg) df$tile <- as.factor(df$tile) if(any(reversed <- with(df, t1 < t0))) df[reversed, c("t0", "t1")] <- df[reversed, c("t1", "t0")] with(df, { segU <- sort(unique(seg)) segN <- seq_len(nsegments(L)) if(length(omitted <- setdiff(segN, segU)) > 0) stop(paste(ngettext(length(omitted), "Segment", "Segments"), commasep(omitted), "omitted from data"), call.=FALSE) if(length(unknown <- setdiff(segU, segN)) > 0) stop(paste(ngettext(length(unknown), "Segment", "Segments"), commasep(unknown), ngettext(length(unknown), "do not", "does not"), "exist in the network"), call.=FALSE) pieces <- split(df, seg) for(piece in pieces) { t0 <- piece$t0 t1 <- piece$t1 thedata <- paste("Data for segment", piece$seg[[1L]]) if(!any(t0 == 0)) stop(paste(thedata, "do not contain an entry with t0 = 0"), call.=FALSE) if(!any(t1 == 1)) stop(paste(thedata, "do not contain an entry with t1 = 1"), call.=FALSE) if(any(t1 < 1 & is.na(match(t1, t0))) | any(t0 > 0 & is.na(match(t0, t1)))) stop(paste(thedata, "are inconsistent"), call.=FALSE) } }) out <- list(L=L, df=df) class(out) <- c("lintess", class(out)) return(out) } print.lintess <- function(x, ...) { splat("Tessellation on a linear network") nt <- length(levels(x$df$tile)) splat(nt, "tiles") if(anyNA(x$df$tile)) splat("[An additional tile is labelled NA]") return(invisible(NULL)) } tile.lengths <- function(x) { if(!inherits(x, "lintess")) stop("x should be a tessellation on a linear network (class 'lintess')", call.=FALSE) seglen <- lengths.psp(as.psp(x$L)) df <- x$df df$fraglen <- with(df, seglen[seg] * (t1-t0)) tilelen <- with(df, tapplysum(fraglen, list(tile))) return(tilelen) } summary.lintess <- function(object, ...) { df <- object$df lev <- levels(df$tile) nt <- length(lev) nr <- nrow(df) seglen <- lengths.psp(as.psp(object$L)) df$fraglen <- with(df, seglen[seg] * (t1-t0)) tilelen <- with(df, tapplysum(fraglen, list(tile))) hasna <- anyNA(df$tile) nalen <- if(hasna) (sum(seglen) - sum(tilelen)) else 0 y <- list(nt=nt, nr=nr, lev=lev, seglen=seglen, tilelen=tilelen, hasna=hasna, nalen=nalen) class(y) <- c("summary.lintess", class(y)) return(y) } print.summary.lintess <- function(x, ...) { splat("Tessellation on a linear network") with(x, { splat(nt, "tiles") if(hasna) splat("[An additional tile is labelled NA]") if(nt <= 30) { splat("Tile labels:", paste(lev, collapse=" ")) splat("Tile lengths:") print(signif(tilelen, 4)) } else { splat("Tile lengths (summary):") print(summary(tilelen)) } if(hasna) splat("Tile labelled NA has length", nalen) }) return(invisible(NULL)) } plot.lintess <- function(x, ..., main, add=FALSE, style=c("segments", "image"), col=NULL) { if(missing(main)) main <- short.deparse(substitute(x)) style <- match.arg(style) if(style == "image") { z <- plot(as.linfun(x), main=main, ..., add=add) return(invisible(z)) } #' determine colour map df <- x$df lev <- levels(df$tile) if(is.null(col)) { col <- rainbow(length(lev)) cmap <- colourmap(col, inputs=lev) } else if(inherits(col, "colourmap")) { cmap <- col col <- cmap(lev) } else if(is.colour(col)) { if(length(col) == 1) col <- rep(col, length(lev)) if(length(col) != length(lev)) stop(paste(length(col), "colours provided but", length(lev), "colours needed")) cmap <- colourmap(col, inputs=lev) } else stop("col should be a vector of colours, or a colourmap object") #' determine segment coordinates L <- as.linnet(x) from <- L$from[df$seg] to <- L$to[df$seg] V <- vertices(L) vx <- V$x vy <- V$y #' plot if(!add) plot(Frame(x), main=main, type="n") with(df, segments( vx[from] * (1-t0) + vx[to] * t0, vy[from] * (1-t0) + vy[to] * t0, vx[from] * (1-t1) + vx[to] * t1, vy[from] * (1-t1) + vy[to] * t1, col=col[as.integer(tile)], ...) ) return(invisible(cmap)) } as.owin.lintess <- function(W, ...) { as.owin(as.linnet(W), ...) } Window.lintess <- function(X, ...) { as.owin(as.linnet(X)) } domain.lintess <- as.linnet.lintess <- function(X, ...) { X$L } as.linfun.lintess <- function(X, ..., values, navalue=NA) { L <- X$L df <- X$df if(missing(values) || is.null(values)) { rowvalues <- df$tile } else { if(length(values) != length(levels(df$tile))) stop("Length of 'values' should equal the number of tiles", call.=FALSE) rowvalues <- values[as.integer(df$tile)] } f <- function(x, y, seg, tp) { result <- rowvalues[integer(0)] for(i in seq_along(seg)) { tpi <- tp[i] segi <- seg[i] j <- which(df$seg == segi) kk <- which(df[j, "t0"] <= tpi & df[j, "t1"] >= tpi) result[i] <- if(length(kk) == 0) navalue else rowvalues[j[min(kk)]] } return(result) } g <- linfun(f, L) return(g) } #' Divide a linear network into tiles demarcated by #' the points of a point pattern divide.linnet <- local({ divide.linnet <- function(X) { stopifnot(is.lpp(X)) L <- as.linnet(X) coo <- coords(X) #' add identifiers of endpoints coo$from <- L$from[coo$seg] coo$to <- L$to[coo$seg] #' group data by segment, sort by increasing 'tp' coo <- coo[with(coo, order(seg, tp)), , drop=FALSE] bits <- split(coo, coo$seg) #' expand as a sequence of intervals bits <- lapply(bits, expanddata) #' reassemble as data frame df <- Reduce(rbind, bits) #' find all undivided segments other <- setdiff(seq_len(nsegments(L)), unique(coo$seg)) #' add a single line for each undivided segment if(length(other) > 0) df <- rbind(df, data.frame(seg=other, t0=0, t1=1, from=L$from[other], to=L$to[other])) #' We now have a tessellation #' Sort again df <- df[with(df, order(seg, t0)), , drop=FALSE] #' Now identify connected components #' Two intervals are connected if they share an endpoint #' that is a vertex of the network. nvert <- nvertices(L) nbits <- nrow(df) iedge <- jedge <- integer(0) for(iv in seq_len(nvert)) { joined <- with(df, which(from == iv | to == iv)) njoin <- length(joined) if(njoin > 1) iedge <- c(iedge, joined[-njoin]) jedge <- c(jedge, joined[-1L]) } nedge <- length(iedge) zz <- .C("cocoGraph", nv = as.integer(nbits), ne = as.integer(nedge), ie = as.integer(iedge - 1L), je = as.integer(jedge - 1L), label = as.integer(integer(nbits)), status = as.integer(integer(1L)), PACKAGE = "spatstat") if (zz$status != 0) stop("Internal error: connectedness algorithm did not converge") lab <- zz$label + 1L lab <- as.integer(factor(lab)) df <- df[,c("seg", "t0", "t1")] df$tile <- lab return(lintess(L, df)) } expanddata <- function(z) { df <- with(z, data.frame(seg=c(seg[1L], seg), t0 = c(0, tp), t1 = c(tp, 1), from=NA_integer_, to=NA_integer_)) df$from[1L] <- z$from[1L] df$to[nrow(df)] <- z$to[1L] return(df) } divide.linnet }) spatstat/R/rmhexpand.R0000644000176200001440000001454313115225157014440 0ustar liggesusers# # rmhexpand.R # # Rules/data for expanding the simulation window in rmh # # $Revision: 1.8 $ $Date: 2016/02/11 10:17:12 $ # # Establish names and rules for each type of expansion RmhExpandRule <- local({ .RmhExpandTable <- list(area=list(descrip ="Area expansion factor", minval = 1, expands = function(x) { unname(x) > 1 }), length=list(descrip ="Length expansion factor", minval = 1, expands = function(x) { unname(x) > 1 }), distance=list(descrip="Expansion buffer distance", minval = 0, expands = function(x) { unname(x) > 0 })) RmhExpandRule <- function(nama) { if(length(nama) == 0) nama <- "area" if(length(nama) > 1) stop("Internal error: too many names in RmhExpandRule", call.=FALSE) if(!(nama %in% names(.RmhExpandTable))) stop(paste("Internal error: unrecognised expansion type", sQuote(nama)), call.=FALSE) return(.RmhExpandTable[[nama]]) } RmhExpandRule }) rmhexpand <- function(x=NULL, ..., area=NULL, length=NULL, distance=NULL) { trap.extra.arguments(..., .Context="In rmhexpand") # check for incompatibility n <- (!is.null(x)) + (!is.null(area)) + (!is.null(length)) + (!is.null(distance)) if(n > 1) stop("Only one argument should be given") # absorb other arguments into 'x' if(is.null(x) && n > 0) { if(!is.null(area)) x <- c(area=area) if(!is.null(length)) x <- c(length=length) if(!is.null(distance)) x <- c(distance=distance) } if(is.null(x)) { # No expansion rule supplied. # Use spatstat default, indicating that the user did not choose it. force.exp <- force.noexp <- FALSE x <- spatstat.options("expand") x <- rmhexpand(x)$expand } else { # process x if(inherits(x, "rmhexpand")) return(x) if(is.owin(x)) { force.exp <- TRUE force.noexp <- FALSE } else { # expecting c(name=value) or list(name=value) if(is.list(x)) x <- unlist(x) if(!is.numeric(x)) stop(paste("Expansion argument must be either", "a number, a window, or NULL.\n")) # x is numeric check.1.real(x, "In rmhexpand(x)") explain.ifnot(is.finite(x), "In rmhexpand(x)") # an unlabelled numeric value is interpreted as an area expansion factor if(!any(nzchar(names(x)))) names(x) <- "area" # validate rule <- RmhExpandRule(names(x)) if(x < rule$minval) { warning(paste(rule$descrip, "<", rule$minval, "has been reset to", rule$minval), call.=FALSE) x[] <- rule$minval } force.exp <- rule$expands(x) force.noexp <- !force.exp } } result <- list(expand=x, force.exp=force.exp, force.noexp=force.noexp) class(result) <- "rmhexpand" return(result) } .no.expansion <- list(expand=c(area=1), force.exp=FALSE, force.noexp=TRUE) class(.no.expansion) <- "rmhexpand" print.rmhexpand <- function(x, ..., prefix=TRUE) { if(prefix) cat("Expand the simulation window? ") if(x$force.noexp) { cat("No.\n") } else { if(x$force.exp) cat("Yes:\n") else cat("Not determined. Default is:\n") y <- x$expand if(is.null(y)) { print(rmhexpand(spatstat.options("expand")), prefix=FALSE) } else if(is.numeric(y)) { descrip <- RmhExpandRule(names(y))$descrip cat(paste("\t", descrip, unname(y), "\n")) } else { print(y) } } return(invisible(NULL)) } summary.rmhexpand <- function(object, ...) { decided <- with(object, force.exp || force.noexp) ex <- object$expand if(is.null(ex)) ex <- rmhexpand(spatstat.options("expand"))$expand if(is.owin(ex)) { willexpand <- TRUE descrip <- "Window" } else if(is.numeric(ex)) { rule <- RmhExpandRule(names(ex)) descrip <- rule$descrip willexpand <- if(object$force.exp) TRUE else if(object$force.noexp) FALSE else (unname(ex) > rule$minval) } else stop("Internal error: unrecognised format in summary.rmhexpand", call.=FALSE) out <- list(rule.decided=decided, window.decided=decided && is.owin(ex), expand=ex, descrip=descrip, willexpand=willexpand) class(out) <- "summary.rmhexpand" return(out) } print.summary.rmhexpand <- function(x, ...) { cat("Expansion rule\n") ex <- x$expand if(x$window.decided) { cat("Window is decided.\n") print(ex) } else { if(x$rule.decided) { cat("Rule is decided.\n") } else { cat("Rule is not decided.\nDefault is:\n") } if(!x$willexpand) { cat("No expansion\n") } else { if(is.numeric(ex)) cat(paste(x$descrip, ex, "\n")) else print(ex) } } return(invisible(NULL)) } expand.owin <- function(W, ...) { ex <- list(...) if(length(ex) > 1) stop("Too many arguments") # get an rmhexpand object if(inherits(ex[[1]], "rmhexpand")) { ex <- ex[[1]] } else ex <- do.call(rmhexpand, ex) f <- ex$expand if(is.null(f)) return(W) if(is.owin(f)) return(f) if(!is.numeric(f)) stop("Format not understood") switch(names(f), area = { if(f == 1) return(W) bb <- boundingbox(W) xr <- bb$xrange yr <- bb$yrange fff <- (sqrt(f) - 1)/2 Wexp <- grow.rectangle(bb, fff * diff(xr), fff * diff(yr)) }, length = { if(f == 1) return(W) bb <- boundingbox(W) xr <- bb$xrange yr <- bb$yrange fff <- (f - 1)/2 Wexp <- grow.rectangle(bb, fff * diff(xr), fff * diff(yr)) }, distance = { if(f == 0) return(W) Wexp <- if(is.rectangle(W)) grow.rectangle(W, f) else dilation(W, f) }, stop("Internal error: unrecognised type") ) return(Wexp) } will.expand <- function(x) { stopifnot(inherits(x, "rmhexpand")) if(x$force.exp) return(TRUE) if(x$force.noexp) return(FALSE) return(summary(x)$willexpand) } is.expandable <- function(x) { UseMethod("is.expandable") } change.default.expand <- function(x, newdefault) { stopifnot(inherits(x, "rmhexpand")) decided <- with(x, force.exp || force.noexp) if(!decided) x$expand <- rmhexpand(newdefault)$expand return(x) } spatstat/R/sdr.R0000644000176200001440000002142313115225157013235 0ustar liggesusers#' #' sdr.R #' #' Sufficient Dimension Reduction #' #' Matlab original: Yongtao Guan #' Translated to R by: Suman Rakshit #' Adapted for spatstat: Adrian Baddeley #' #' GNU Public Licence 2.0 || 3.0 #' #' $Revision: 1.11 $ $Date: 2016/11/26 07:41:34 $ #' sdr <- local({ sdr <- function(X, covariates, method=c("DR", "NNIR", "SAVE", "SIR", "TSE"), Dim1=1, Dim2=1, predict=FALSE) { stopifnot(is.ppp(X)) method <- match.arg(method) #' ensure 'covariates' is a list of compatible images if(!inherits(covariates, "imlist") && !all(sapply(covariates, is.im))) stop("Argument 'covariates' must be a list of images") nc <- length(covariates) if(nc == 0) stop("Need at least one covariate!") if(nc < Dim1 + (method == "TSE") * Dim2) stop(paste(if(method == "TSE") "Dim1 + Dim2" else "Dim1", "must not exceed the number of covariates"), call.=FALSE) if(nc > 1 && !do.call(compatible, unname(covariates))) covariates <- do.call(harmonise, covariates) #' extract corresponding pixel values including NA's Ypixval <- sapply(lapply(covariates, as.matrix), as.vector) #' compute sample mean and covariance matrix m <- colMeans(Ypixval, na.rm=TRUE) V <- cov(Ypixval, use="complete") #' evaluate each image at point data locations YX <- sapply(covariates, safelook, Y=X) #' apply precomputed standardisation Zx <- t(t(YX) - m) %*% matrixinvsqrt(V) #' ready coordsX <- coords(X) result <- switch(method, DR = calc.DR(COV=V, z=Zx, Dim=Dim1), NNIR = calc.NNIR(COV=V, z=Zx, pos=coordsX, Dim=Dim1), SAVE = calc.SAVE(COV=V, z=Zx, Dim=Dim1), SIR = calc.SIR(COV=V, z=Zx ), TSE = calc.TSE(COV=V, z=Zx, pos=coordsX, Dim1=Dim1, Dim2=Dim2) ) #' covnames <- names(covariates) %orifnull% paste0("Y", 1:nc) dimnames(result$B) <- list(covnames, paste0("B", 1:ncol(result$B))) if(method == "TSE") { result$M1 <- namez(result$M1) result$M2 <- namez(result$M2) } else { result$M <- namez(result$M) } if(predict) result$Y <- sdrPredict(covariates, result$B) return(result) } safelook <- function(Z, Y, ...) { safelookup(Z, Y, ...) } namez <- function(M, prefix="Z") { dimnames(M) <- list(paste0(prefix, 1:nrow(M)), paste0(prefix, 1:ncol(M))) return(M) } sdr }) sdrPredict <- function(covariates, B) { if(!is.matrix(B)) { if(is.list(B) && is.matrix(BB <- B$B)) B <- BB else stop("B should be a matrix, or the result of a call to sdr()", call.=FALSE) } if(!inherits(covariates, "imlist") && !all(sapply(covariates, is.im))) stop("Argument 'covariates' must be a list of images") stopifnot(nrow(B) == length(covariates)) result <- vector(mode="list", length=ncol(B)) for(j in seq_along(result)) { cj <- as.list(B[,j]) result[[j]] <- Reduce("+", mapply("*", cj, covariates, SIMPLIFY=FALSE)) } names(result) <- colnames(B) return(as.solist(result)) } ##............ DR (Directional Regression) .......................... calc.DR <- function(COV, z, Dim){ ## Description: Naive Directional Regression Method ## Input: ## COV - cov{X(s)} ## z - standardized X(s) on SPP locations ## Dim - the CS dimension ## Output: ## B - the estimated CS basis ## M - the kernel matrix ss <- nrow(z) ncov <- ncol(z) M1 <- (t(z) %*% z)/ss - diag(1,ncov) M1 <- M1 %*% M1 # the SAVE kernel covMean <- matrix(colMeans(z),ncol=1) M2 <- covMean %*% t(covMean) M3 <- M2 * (base::norm(covMean, type="2"))^2 # the SIR kernel M2 <- M2 %*% M2 # the SIR-2 kernel M <- (M1 + M2 + M3)/3 # the DR kernel SVD <- svd(M) B <- SVD$u[,1:Dim] B <- matrixinvsqrt(COV) %*% B # back to original scale return(list(B=B, M=M)) } ## ............ NNIR (Nearest Neighbor Inverse Regression) ........... calc.NNIR <- function(COV, z, pos, Dim) { ## Description: Nearest Neighbor Inverse Regression ## Input: ## COV - cov{X(s)} ## z - standardized X(s) on SPP locations ## pos - the position of SPP events ## Dim - the CS dimension ## Output: ## B - the estimated CS basis ## M - the kernel matrix ss <- nrow(z) # sample size # ncov <- ncol(z) # predictor dimension jj <- nnwhich(pos) # identify nearest neighbour of each point dir <- z - z[jj, , drop=FALSE] # empirical direction IM <- sumouter(dir) # inverse of kernel matrix: sum of outer(dir[i,], dir[i,]) M <- solve(IM/ss) # invert kernel matrix SVD <- svd(M) B <- matrixinvsqrt(COV) %*% SVD$u[, 1:Dim, drop=FALSE] return(list(B=B, M=M)) } ## ........... SAVE (Sliced Average Variance Estimation) ........... calc.SAVE <- function(COV, z, Dim){ ## Description: Naive Directional Regression Method ## Input ## COV - cov{X(s)} ## z - standardized X(s) on SPP locations ## Dim - the central space dimension ## Value ## B - the estimated CS basis ## M - the kernel matrix # ss <- nrow(z) ncov <- ncol(z) M <- diag(1,ncov) - cov(z) M <- M %*% M SVD <- svd(M) B <- SVD$u[,1:Dim] B <- matrixinvsqrt(COV) %*% B return(list(B=B, M=M)) } ##.......... SIR (Sliced Inverse Regression) ...................... calc.SIR <- function(COV, z){ ## Description: Naive Directional Regression Method ## Input: ## COV - cov{X(s)} ## z - standardized X(s) on SPP locations ## Output: ## B - the estimated CS basis ## M - the kernel matrix covMean <- colMeans(z) B <- matrixinvsqrt(COV) %*% covMean # do SIR estimation B <- B/sqrt(sum(B^2)) # normalise to unit length M <- covMean %*% t(covMean) # create kernel matrix return(list(B=B, M=M)) } ## ............. TSE (Two-Step Estimation) .................... calc.TSE <- function(COV, z, pos, Dim1, Dim2) { ## Description: A Two-Step Method ## Input: ## COV - cov{X(s)} ## z - standardized X(s) on SPP locations ## Dim1 - the S1 dimension ## Dim2 - the S2 dimension ## Output: ## B - the estimated CS basis. Its first Dim1 columns ## are estimating S1 and the remaining Dim2 columns are ## estimating S2. In case of null space, a zero vector is reported. ## M1 - the kernel matrix of DR ## M2 - the kernel matrix of NNIR, which might be subject ## to some change, depending on the results of M1. # ss <- nrow(z) # sample size ncov <- ncol(z) # predictor dimension est1 <- calc.DR(COV, z, ncov) # do DR estimation est2 <- calc.NNIR(COV, z, pos, ncov) # do NNIR estimation M1 <- est1$M M2 <- est2$M if(Dim1 > 0) { U <- svd(M1)$u B1 <- U[ , 1:Dim1, drop=FALSE] # get S1 estimate Q <- diag(1, ncov) - B1 %*% solve(t(B1) %*% B1) %*% t(B1) # contract orthogonal basis M2 <- Q %*% M2 %*% Q # do constrained NNIR } else { B1 <- matrix(0, ncov, 1) } if(Dim2 > 0) { U <- svd(M2)$u # do SVD for possibly updated M2 B2 <- U[ , 1:Dim2, drop=FALSE] # get basis estimator } else { B2 <- matrix(0, ncov, 1) } B <- matrixinvsqrt(COV) %*% cbind(B1,B2) return(list(B=B, M1=M1, M2=M2)) } ## ////////////////// ADDITIONAL FUNCTIONS ///////////////////// subspaceDistance <- function(B0,B1) { ## ======================================================== # ## Evaluate the distance between the two linear spaces S(B0) and S(B1). ## The measure used is the one proposed by Li et al. (2004). ## ======================================================== # stopifnot(is.matrix(B0)) stopifnot(is.matrix(B1)) Proj0 <- B0 %*% solve((t(B0) %*% B0)) %*% t(B0) # Proj matrix on S(B0) lam <- svd(B1) # check whether B1 is singular U <- lam$u D <- lam$d # V <- lam$v B2 <- U[, D > 1e-09] # keep non-singular directions Proj1 <- B2 %*% solve((t(B2) %*% B2)) %*% t(B2) # Proj matrix on S(B.hat) Svd <- svd(Proj0 - Proj1) # Do svd for P0-P1 dist <- max(abs(Svd$d)) # Get the maximum absolute svd value return(dist) } dimhat <- function(M){ #' Description: Maximum Descent Estimator for CS Dim #' Input: #' M - the estimated kernel matrix #' Output: #' dimhat - the estimated CS dim (assume dim>0) stopifnot(is.matrix(M)) ncov <- ncol(M) # predictor dimension maxdim <- max((ncov-1), 5) # maximum structure dimension SVD <- svd(M) # svd of kernel matrix lam <- SVD$d eps <- 1e-06 lam <- lam + rep(eps,ncov) # add ridge effect lam1 <- lam[-ncov] lam2 <- lam[-1] dif <- lam1/lam2 dif <- dif[1 : maxdim] # the magnitude of drop retval <- which.max(dif) # find Maximum Descent estimator return(retval) } spatstat/R/round.R0000644000176200001440000000170313115225157013573 0ustar liggesusers# # round.R # # discretisation of coordinates # # $Revision: 1.5 $ $Date: 2013/01/09 03:13:10 $ round.ppp <- round.pp3 <- round.ppx <- function(x, digits=0) { coords(x) <- round(as.matrix(coords(x)), digits=digits) return(x) } rounding <- function(x) { UseMethod("rounding") } rounding.ppp <- rounding.pp3 <- rounding.ppx <- function(x) { rounding(as.matrix(coords(x))) } rounding.default <- function(x) { # works for numeric, complex, matrix etc if(all(x == 0)) return(NULL) if(identical(all.equal(x, round(x)), TRUE)) { # integers: go up k <- 0 smallk <- -log10(.Machine$double.xmax) repeat { if(k < smallk || !identical(all.equal(x, round(x, k-1)), TRUE)) return(k) k <- k-1 } } else { # not integers: go down k <- 1 bigk <- -log10(.Machine$double.eps) repeat { if(k > bigk || identical(all.equal(x, round(x, k)), TRUE)) return(k) k <- k+1 } } } spatstat/R/plot3d.R0000644000176200001440000001477413115225157013665 0ustar liggesusers#' perspective plot of 3D #' #' $Revision: 1.5 $ $Date: 2016/09/23 04:57:43 $ #' project3Dhom <- local({ check3dvector <- function(x) { xname <- deparse(substitute(x)) if(!(is.numeric(x) && length(x) == 3)) stop(paste(xname, "should be a numeric vector of length 3"), call.=FALSE) return(NULL) } normalise <- function(x) { len <- sqrt(sum(x^2)) if(len == 0) stop("Attempted to normalise a vector of length 0") return(x/len) } innerprod <- function(a, b) sum(a*b) crossprod <- function(u, v) { c(u[2] * v[3] - u[3] * v[2], -(u[1] * v[3] - u[3] * v[1]), u[1] * v[2] - u[2] * v[1]) } project3Dhom <- function(xyz, eye=c(0,-3,1), org=c(0,0,0), vert=c(0,0,1)) { ## xyz: data to be projected (matrix n * 3) stopifnot(is.matrix(xyz) && ncol(xyz) == 3) ## eye: eye position (x,y,z) check3dvector(eye) ## org: origin (x,y,z) becomes middle of projection plane check3dvector(org) ## vert: unit vector in direction to become the 'vertical' if(!missing(vert)) { check3dvector(vert) vert <- normalise(vert) } ## vector pointing into screen vin <- normalise(org - eye) ## projection of vertical onto screen vup <- normalise(vert - innerprod(vert, vin) * vin) ## horizontal axis in screen vhoriz <- crossprod(vin, vup) ## dbg <- FALSE if(dbg) { cat("vin=") print(vin) cat("vup=") print(vup) cat("vhoriz=") print(vhoriz) } ## homogeneous coordinates hom <- t(t(xyz) - eye) %*% cbind(vhoriz, vup, vin) colnames(hom) <- c("x", "y", "d") return(hom) } project3Dhom }) plot3Dpoints <- local({ plot3Dpoints <- function(xyz, eye=c(2,-3,2), org=c(0,0,0), ..., type=c("p", "n", "h"), xlim=c(0,1), ylim=c(0,1), zlim=c(0,1), add=FALSE, box=TRUE, main, cex=par('cex'), box.back=list(col="pink"), box.front=list(col="blue", lwd=2) ) { if(missing(main)) main <- short.deparse(substitute(xyz)) type <- match.arg(type) #' if(is.null(box.back) || (is.logical(box.back) && box.back)) box.back <- list(col="pink") if(is.null(box.front) || (is.logical(box.front) && box.front)) box.front <- list(col="blue", lwd=2) stopifnot(is.list(box.back) || is.logical(box.back)) stopifnot(is.list(box.front) || is.logical(box.front)) #' stopifnot(is.matrix(xyz) && ncol(xyz) == 3) if(nrow(xyz) > 0) { if(missing(xlim)) xlim <- range(pretty(xyz[,1])) if(missing(ylim)) ylim <- range(pretty(xyz[,2])) if(missing(zlim)) zlim <- range(pretty(xyz[,3])) if(missing(org)) org <- c(mean(xlim), mean(ylim), mean(zlim)) } if(!add) { #' initialise plot bb <- plot3Dbox(xlim, ylim, zlim, eye=eye, org=org, do.plot=FALSE) plot(bb$xlim, bb$ylim, axes=FALSE, asp=1, type="n", xlab="", ylab="", main=main) } if(is.list(box.back)) { #' plot rear of box do.call(plot3DboxPart, resolve.defaults(list(xlim=xlim, ylim=ylim, zlim=zlim, eye=eye, org=org, part="back"), box.back, list(...))) } if(type != "n") { #' plot points uv <- project3Dhom(xyz, eye=eye, org=org) uv <- as.data.frame(uv) dord <- order(uv$d, decreasing=TRUE) uv <- uv[dord, , drop=FALSE] if(type == "h") { xy0 <- cbind(xyz[,1:2,drop=FALSE], zlim[1]) uv0 <- as.data.frame(project3Dhom(xy0, eye=eye, org=org)) uv0 <- uv0[dord, , drop=FALSE] do.call.matched(segments, list(x0=with(uv0, x/d), y0=with(uv0, y/d), x1=with(uv, x/d), y1=with(uv, y/d), ...)) } with(uv, points(x/d, y/d, cex=cex * min(d)/d, ...)) } if(is.list(box.front)) do.call(plot3DboxPart, resolve.defaults(list(xlim=xlim, ylim=ylim, zlim=zlim, eye=eye, org=org, part="front"), box.front, list(...))) return(invisible(NULL)) } vertexind <- data.frame(i=rep(1:2,4), j=rep(rep(1:2,each=2),2), k=rep(1:2, each=4)) edgepairs <- data.frame(from=c(1, 1, 2, 3, 1, 2, 5, 3, 5, 4, 6, 7), to = c(2, 3, 4, 4, 5, 6, 6, 7, 7, 8, 8, 8)) vertexfrom <- vertexind[edgepairs$from,] vertexto <- vertexind[edgepairs$to,] hamming <- function(a, b) sum(abs(a-b)) ## determine projected positions of box vertices ## and optionally plot the box plot3Dbox <- function(xlim=c(0,1), ylim=xlim, zlim=ylim, eye=c(0,-3,1), org=c(0,0,0), do.plot=TRUE) { fromxyz <- with(vertexfrom, cbind(xlim[i], ylim[j], zlim[k])) toxyz <- with(vertexto, cbind(xlim[i], ylim[j], zlim[k])) fromuv <- project3Dhom(fromxyz, eye=eye, org=org) touv <- project3Dhom(toxyz, eye=eye, org=org) xfrom <- fromuv[,1]/fromuv[,3] xto <- touv[,1]/touv[,3] yfrom <- fromuv[,2]/fromuv[,3] yto <- touv[,2]/touv[,3] if(do.plot) segments(xfrom, yfrom, xto, yto) return(invisible(list(xlim=range(xfrom, xto), ylim=range(yfrom, yto)))) } ## plot either back or front of box plot3DboxPart <- function(xlim=c(0,1), ylim=xlim, zlim=ylim, eye=c(0,-3,1), org=c(0,0,0), part=c("front", "back"), ...) { part <- match.arg(part) boxvert <- with(vertexind, cbind(xlim[i], ylim[j], zlim[k])) pvert <- project3Dhom(boxvert, eye=eye, org=org) xyvert <- pvert[,c("x","y")]/pvert[,"d"] ## find vertex which is furthest away nback <- which.max(pvert[,"d"]) nearback <- with(edgepairs, (from==nback) | (to==nback)) ind <- if(part == "back") nearback else !nearback ## draw lines with(edgepairs[ind,], segments(xyvert[from, 1], xyvert[from, 2], xyvert[to, 1], xyvert[to, 2], ...)) } plot3Dpoints }) spatstat/R/quadrattest.R0000755000176200001440000004162013115271120015001 0ustar liggesusers# # quadrattest.R # # $Revision: 1.54 $ $Date: 2016/04/25 02:34:40 $ # quadrat.test <- function(X, ...) { UseMethod("quadrat.test") } quadrat.test.ppp <- function(X, nx=5, ny=nx, alternative = c("two.sided", "regular", "clustered"), method = c("Chisq", "MonteCarlo"), conditional=TRUE, CR=1, lambda=NULL, ..., xbreaks=NULL, ybreaks=NULL, tess=NULL, nsim=1999) { Xname <- short.deparse(substitute(X)) method <- match.arg(method) alternative <- match.arg(alternative) do.call(quadrat.testEngine, resolve.defaults(list(X, nx=nx, ny=ny, alternative=alternative, method=method, conditional=conditional, CR=CR, fit=lambda, xbreaks=xbreaks, ybreaks=ybreaks, tess=tess, nsim=nsim), list(...), list(Xname=Xname, fitname="CSR"))) } quadrat.test.splitppp <- function(X, ..., df=NULL, df.est=NULL, Xname=NULL) { if(is.null(Xname)) Xname <- short.deparse(substitute(X)) pool.quadrattest(lapply(X, quadrat.test.ppp, ...), df=df, df.est=df.est, Xname=Xname) } quadrat.test.ppm <- function(X, nx=5, ny=nx, alternative = c("two.sided", "regular", "clustered"), method=c("Chisq", "MonteCarlo"), conditional=TRUE, CR=1, ..., xbreaks=NULL, ybreaks=NULL, tess=NULL, nsim=1999) { fitname <- short.deparse(substitute(X)) dataname <- paste("data from", fitname) method <- match.arg(method) alternative <- match.arg(alternative) if(!is.poisson.ppm(X)) stop("Test is only defined for Poisson point process models") if(is.marked(X)) stop("Sorry, not yet implemented for marked point process models") do.call(quadrat.testEngine, resolve.defaults(list(data.ppm(X), nx=nx, ny=ny, alternative=alternative, method=method, conditional=conditional, CR=CR, xbreaks=xbreaks, ybreaks=ybreaks, tess=tess, nsim=nsim, fit=X), list(...), list(Xname=dataname, fitname=fitname))) } quadrat.test.quadratcount <- function(X, alternative = c("two.sided", "regular", "clustered"), method=c("Chisq", "MonteCarlo"), conditional=TRUE, CR=1, lambda=NULL, ..., nsim=1999) { trap.extra.arguments(...) method <- match.arg(method) alternative <- match.arg(alternative) quadrat.testEngine(Xcount=X, alternative=alternative, fit=lambda, method=method, conditional=conditional, CR=CR, nsim=nsim) } quadrat.testEngine <- function(X, nx, ny, alternative = c("two.sided", "regular", "clustered"), method=c("Chisq", "MonteCarlo"), conditional=TRUE, CR=1, ..., nsim=1999, Xcount=NULL, xbreaks=NULL, ybreaks=NULL, tess=NULL, fit=NULL, Xname=NULL, fitname=NULL) { trap.extra.arguments(...) method <- match.arg(method) alternative <- match.arg(alternative) if(method == "MonteCarlo") { check.1.real(nsim) explain.ifnot(nsim > 0) } if(is.null(Xcount)) Xcount <- quadratcount(X, nx=nx, ny=ny, xbreaks=xbreaks, ybreaks=ybreaks, tess=tess) tess <- attr(Xcount, "tess") testname <- switch(method, Chisq = "Chi-squared test", MonteCarlo = paste( if(conditional) "Conditional" else "Unconditional", "Monte Carlo test") ) # determine expected values under model if(is.null(fit)) { nullname <- "CSR" if(tess$type == "rect") areas <- outer(diff(tess$xgrid), diff(tess$ygrid), "*") else areas <- unlist(lapply(tiles(tess), area)) fitmeans <- sum(Xcount) * areas/sum(areas) df <- switch(method, Chisq = length(fitmeans) - 1, MonteCarlo = NULL) } else if(is.im(fit) || inherits(fit, "funxy")) { nullname <- "Poisson process with given intensity" fit <- as.im(fit, W=Window(tess)) areas <- integral(fit, tess) fitmeans <- sum(Xcount) * areas/sum(areas) df <- switch(method, Chisq = length(fitmeans) - 1, MonteCarlo = NULL) } else { if(!is.ppm(fit)) stop("fit should be a ppm object") if(!is.poisson.ppm(fit)) stop("Quadrat test only supported for Poisson point process models") if(is.marked(fit)) stop("Sorry, not yet implemented for marked point process models") nullname <- paste("fitted Poisson model", sQuote(fitname)) Q <- quad.ppm(fit, drop=TRUE) ww <- w.quad(Q) lambda <- fitted(fit, drop=TRUE) masses <- lambda * ww # sum weights of quadrature points in each tile if(tess$type == "rect") { xx <- x.quad(Q) yy <- y.quad(Q) xbreaks <- tess$xgrid ybreaks <- tess$ygrid fitmeans <- rectquadrat.countEngine(xx, yy, xbreaks, ybreaks, weights=masses) fitmeans <- as.vector(t(fitmeans)) } else { U <- as.ppp(Q) V <- marks(cut(U, tess), dfok=FALSE) fitmeans <- tapply(masses, list(tile=V), sum) fitmeans[is.na(fitmeans)] <- 0 } switch(method, Chisq = { df <- length(fitmeans) - length(coef(fit)) if(df < 1) stop(paste("Not enough quadrats: degrees of freedom df =", df)) }, MonteCarlo = { df <- NA }) } OBS <- as.vector(t(as.table(Xcount))) EXP <- as.vector(fitmeans) testname <- paste(testname, "of", nullname, "using quadrat counts") testname <- c(testname, CressieReadName(CR)) result <- X2testEngine(OBS, EXP, method=method, df=df, nsim=nsim, conditional=conditional, CR=CR, alternative=alternative, testname=testname, dataname=Xname) class(result) <- c("quadrattest", class(result)) attr(result, "quadratcount") <- Xcount return(result) } CressieReadStatistic <- function(OBS, EXP, lambda=1) { y <- if(lambda == 1) sum((OBS - EXP)^2/EXP) else if(lambda == 0) 2 * sum(OBS * log(OBS/EXP)) else if(lambda == -1) 2 * sum(EXP * log(EXP/OBS)) else (2/(lambda * (lambda + 1))) * sum(OBS * ((OBS/EXP)^lambda - 1)) names(y) <- CressieReadSymbol(lambda) return(y) } CressieReadSymbol <- function(lambda) { if(lambda == 1) "X2" else if(lambda == 0) "G2" else if(lambda == -1/2) "T2" else if(lambda == -1) "GM2" else if(lambda == -2) "NM2" else "CR" } CressieReadName <- function(lambda) { if(lambda == 1) "Pearson X2 statistic" else if(lambda == 0) "likelihood ratio test statistic G2" else if(lambda == -1/2) "Freeman-Tukey statistic T2" else if(lambda == -1) "modified likelihood ratio test statistic GM2" else if(lambda == -2) "Neyman modified X2 statistic NM2" else paste("Cressie-Read statistic", paren(paste("lambda =", if(abs(lambda - 2/3) < 1e-7) "2/3" else lambda) ) ) } X2testEngine <- function(OBS, EXP, ..., method=c("Chisq", "MonteCarlo"), CR=1, df=NULL, nsim=NULL, conditional, alternative, testname, dataname) { method <- match.arg(method) if(method == "Chisq" & any(EXP < 5)) warning(paste("Some expected counts are small;", "chi^2 approximation may be inaccurate"), call.=FALSE) X2 <- CressieReadStatistic(OBS, EXP, CR) # conduct test switch(method, Chisq = { if(!is.null(df)) names(df) <- "df" pup <- pchisq(X2, df, lower.tail=FALSE) plo <- pchisq(X2, df, lower.tail=TRUE) PVAL <- switch(alternative, regular = plo, clustered = pup, two.sided = 2 * min(pup, plo)) }, MonteCarlo = { nsim <- as.integer(nsim) if(conditional) { npts <- sum(OBS) p <- EXP/sum(EXP) SIM <- rmultinom(n=nsim,size=npts,prob=p) } else { ne <- length(EXP) SIM <- matrix(rpois(nsim*ne,EXP),nrow=ne) } simstats <- apply(SIM, 2, CressieReadStatistic, EXP=EXP) if(anyDuplicated(simstats)) simstats <- jitter(simstats) phi <- (1 + sum(simstats >= X2))/(1+nsim) plo <- (1 + sum(simstats <= X2))/(1+nsim) PVAL <- switch(alternative, clustered = phi, regular = plo, two.sided = min(1, 2 * min(phi,plo))) }) result <- structure(list(statistic = X2, parameter = df, p.value = PVAL, method = testname, data.name = dataname, alternative = alternative, observed = OBS, expected = EXP, residuals = (OBS - EXP)/sqrt(EXP), CR = CR, method.key = method), class = "htest") return(result) } print.quadrattest <- function(x, ...) { NextMethod("print") single <- is.atomicQtest(x) if(!single) splat("Pooled test") if(waxlyrical('gory')) { if(single) { cat("Quadrats: ") } else { splat("Quadrats of component tests:") } do.call(print, resolve.defaults(list(x=as.tess(x)), list(...), list(brief=TRUE))) } return(invisible(NULL)) } plot.quadrattest <- local({ plot.quadrattest <- function(x, ..., textargs=list()) { xname <- short.deparse(substitute(x)) if(!is.atomicQtest(x)) { # pooled test - plot the original tests tests <- extractAtomicQtests(x) do.call(plot, resolve.defaults(list(x=tests), list(...), list(main=xname))) return(invisible(NULL)) } Xcount <- attr(x, "quadratcount") # plot tessellation tess <- as.tess(Xcount) do.call(plot.tess, resolve.defaults(list(tess), list(...), list(main=xname))) # compute locations for text til <- tiles(tess) ok <- sapply(til, haspositivearea) incircles <- lapply(til[ok], incircle) x0 <- sapply(incircles, getElement, name="x") y0 <- sapply(incircles, getElement, name="y") ra <- sapply(incircles, getElement, name="r") # plot observed counts cos30 <- sqrt(2)/2 sin30 <- 1/2 f <- 0.4 dotext(-f * cos30, f * sin30, as.vector(t(as.table(Xcount)))[ok], x0, y0, ra, textargs, adj=c(1,0), ...) # plot expected counts dotext(f * cos30, f * sin30, round(x$expected,1)[ok], x0, y0, ra, textargs, adj=c(0,0), ...) # plot Pearson residuals dotext(0, -f, signif(x$residuals,2)[ok], x0, y0, ra, textargs, ...) return(invisible(NULL)) } dotext <- function(dx, dy, values, x0, y0, ra, textargs, ...) { do.call.matched(text.default, resolve.defaults(list(x=x0 + dx * ra, y = y0 + dy * ra), list(labels=paste(as.vector(values))), textargs, list(...)), funargs=graphicsPars("text")) } haspositivearea <- function(x) { !is.null(x) && area(x) > 0 } plot.quadrattest }) ######## pooling multiple quadrat tests into a quadrat test pool.quadrattest <- function(..., df=NULL, df.est=NULL, nsim=1999, Xname=NULL, CR=NULL) { argh <- list(...) if(!is.null(df) + !is.null(df.est)) stop("Arguments df and df.est are incompatible") if(all(unlist(lapply(argh, inherits, what="quadrattest")))) { # Each argument is a quadrattest object tests <- argh } else if(length(argh) == 1 && is.list(arg1 <- argh[[1]]) && all(unlist(lapply(arg1, inherits, "quadrattest")))) { # There is just one argument, which is a list of quadrattests tests <- arg1 } else stop("Each entry in the list must be a quadrat test") # data from all cells in all tests OBS <- unlist(lapply(tests, getElement, name="observed")) EXP <- unlist(lapply(tests, getElement, name="expected")) # RES <- unlist(lapply(tests, getElement, name="residuals")) # STA <- unlist(lapply(tests, getElement, name="statistic")) # information about each test Mkey <- unlist(lapply(tests, getElement, name="method.key")) Testname <- lapply(tests, getElement, name="method") Alternative <- unlist(lapply(tests, getElement, name="alternative")) Conditional <- unlist(lapply(tests, getElement, name="conditional")) # name of data if(is.null(Xname)) { Nam <- unlist(lapply(tests, getElement, name="data.name")) Xname <- commasep(sQuote(Nam)) } # name of test testname <- unique(Testname) method.key <- unique(Mkey) if(length(testname) > 1) stop(paste("Cannot combine different types of tests:", commasep(sQuote(method.key)))) testname <- testname[[1]] # alternative hypothesis alternative <- unique(Alternative) if(length(alternative) > 1) stop(paste("Cannot combine tests with different alternatives:", commasep(sQuote(alternative)))) # conditional tests conditional <- any(Conditional) if(conditional) stop("Sorry, not implemented for conditional tests") # Cressie-Read exponent if(is.null(CR)) { CR <- unlist(lapply(tests, getElement, name="CR")) CR <- unique(CR) if(length(CR) > 1) { warning("Tests used different values of CR; assuming CR=1") CR <- 1 } } if(method.key == "Chisq") { # determine degrees of freedom if(is.null(df)) { if(!is.null(df.est)) { # total number of observations minus number of fitted parameters df <- length(OBS) - df.est } else { # total degrees of freedom of tests # implicitly assumes independence of tests PAR <- unlist(lapply(tests, getElement, name="parameter")) df <- sum(PAR) } } # validate df if(df < 1) stop(paste("Degrees of freedom = ", df)) names(df) <- "df" } # perform test result <- X2testEngine(OBS, EXP, method=method.key, df=df, nsim=nsim, conditional=conditional, CR=CR, alternative=alternative, testname=testname, dataname=Xname) # add info class(result) <- c("quadrattest", class(result)) attr(result, "tests") <- as.solist(tests) # there is no quadratcount attribute return(result) } is.atomicQtest <- function(x) { inherits(x, "quadrattest") && is.null(attr(x, "tests")) } extractAtomicQtests <- function(x) { if(is.atomicQtest(x)) return(list(x)) stopifnot(inherits(x, "quadrattest")) tests <- attr(x, "tests") y <- lapply(tests, extractAtomicQtests) z <- do.call(c, y) return(as.solist(z)) } as.tess.quadrattest <- function(X) { if(is.atomicQtest(X)) { Y <- attr(X, "quadratcount") return(as.tess(Y)) } tests <- extractAtomicQtests(X) return(as.solist(lapply(tests, as.tess.quadrattest))) } as.owin.quadrattest <- function(W, ..., fatal=TRUE) { if(is.atomicQtest(W)) return(as.owin(as.tess(W), ..., fatal=fatal)) gezeur <- paste("Cannot convert quadrat test result to a window;", "it contains data for several windows") if(fatal) stop(gezeur) else warning(gezeur) return(NULL) } domain.quadrattest <- Window.quadrattest <- function(X, ...) { as.owin(X) } ## The shift method is undocumented. ## It is only needed in plot.listof etc shift.quadrattest <- function(X, ...) { if(is.atomicQtest(X)) { attr(X, "quadratcount") <- qc <- shift(attr(X, "quadratcount"), ...) attr(X, "lastshift") <- getlastshift(qc) } else { tests <- extractAtomicQtests(X) attr(X, "tests") <- te <- lapply(tests, shift, ...) attr(X, "lastshift") <- getlastshift(te[[1]]) } return(X) } spatstat/R/Jest.R0000755000176200001440000000462413115271075013361 0ustar liggesusers# Jest.S # # Usual invocation to compute J function # if F and G are not required # # $Revision: 4.21 $ $Date: 2016/10/04 02:33:50 $ # # # Jest <- function(X, ..., eps=NULL, r=NULL, breaks=NULL, correction=NULL) { X <- as.ppp(X) W <- Window(X) rmaxdefault <- rmax.rule("J", W, intensity(X)) brks <- handle.r.b.args(r, breaks, W, rmaxdefault=rmaxdefault) # compute F and G FF <- Fest(X, eps, breaks=brks, correction=correction) G <- Gest(X, breaks=brks, correction=correction) # initialise fv object rvals <- FF$r rmax <- max(rvals) # Fvals <- FF[[attr(FF, "valu")]] Z <- fv(data.frame(r=rvals, theo=1), "r", substitute(J(r), NULL), "theo", . ~ r, c(0,rmax), c("r", "%s[pois](r)"), c("distance argument r", "theoretical Poisson %s"), fname="J") # compute J function estimates # this has to be done manually because of the mismatch between names Fnames <- names(FF) Gnames <- names(G) if("raw" %in% Gnames && "raw" %in% Fnames) { Jun <- ratiotweak(1-G$raw, 1-FF$raw) Z <- bind.fv(Z, data.frame(un=Jun), "hat(%s)[un](r)", "uncorrected estimate of %s", "un") attr(Z, "alim") <- range(rvals[FF$raw <= 0.9]) } if("rs" %in% Gnames && "rs" %in% Fnames) { Jrs <- ratiotweak(1-G$rs, 1-FF$rs) Z <- bind.fv(Z, data.frame(rs=Jrs), "hat(%s)[rs](r)", "border corrected estimate of %s", "rs") attr(Z, "alim") <- range(rvals[FF$rs <= 0.9]) } if("han" %in% Gnames && "cs" %in% Fnames) { Jhan <- ratiotweak(1-G$han, 1-FF$cs) Z <- bind.fv(Z, data.frame(han=Jhan), "hat(%s)[han](r)", "Hanisch-style estimate of %s", "han") attr(Z, "alim") <- range(rvals[FF$cs <= 0.9]) } if("km" %in% Gnames && "km" %in% Fnames) { Jkm <- ratiotweak(1-G$km, 1-FF$km) Z <- bind.fv(Z, data.frame(km=Jkm), "hat(%s)[km](r)", "Kaplan-Meier estimate of %s", "km") attr(Z, "alim") <- range(rvals[FF$km <= 0.9]) } if("hazard" %in% Gnames && "hazard" %in% Fnames) { Jhaz <- G$hazard - FF$hazard Z <- bind.fv(Z, data.frame(hazard=Jhaz), "hazard(r)", "Kaplan-Meier estimate of derivative of log(%s)") } # set default plotting values and order nama <- names(Z) fvnames(Z, ".") <- rev(nama[!(nama %in% c("r", "hazard"))]) # add more info attr(Z, "F") <- FF attr(Z, "G") <- G unitname(Z) <- unitname(X) return(Z) } spatstat/R/anova.mppm.R0000755000176200001440000002253513142263030014521 0ustar liggesusers# # anova.mppm.R # # $Revision: 1.13 $ $Date: 2017/08/08 07:18:43 $ # anova.mppm <- local({ do.gripe <- function(...) warning(paste(...), call.=FALSE) dont.gripe <- function(...) NULL tests.choices <- c("Chisq", "LRT", "Rao", "score", "F", "Cp") tests.avail <- c("Chisq", "LRT", "Rao", "score") tests.random <- c("Chisq", "LRT") tests.Gibbs <- c("Chisq", "LRT") totalnquad <- function(fit) sum(sapply(quad.mppm(fit), n.quad)) totalusedquad <- function(fit) with(fit$Fit$moadf, sum(.mpl.SUBSET)) fmlaString <- function(z) { paste(as.expression(formula(z))) } ## interString <- function(z) { as.interact(z)$creator } anova.mppm <- function(object, ..., test=NULL, adjust=TRUE, fine=FALSE, warn=TRUE) { gripe <- if(warn) do.gripe else dont.gripe argh <- list(...) ## trap outmoded usage if("override" %in% names(argh)) { gripe("Argument 'override' is superseded and was ignored") argh <- argh[-which(names(argh) == "override")] } ## list of models objex <- append(list(object), argh) ## Check each model is an mppm object if(!all(sapply(objex, is.mppm))) stop(paste("Arguments must all be", sQuote("mppm"), "objects")) ## are all models Poisson? pois <- all(sapply(objex, is.poisson.mppm)) gibbs <- !pois ## handle anova for a single object expandedfrom1 <- FALSE if(length(objex) == 1 && gibbs) { ## we can't rely on anova.glm in this case ## so we have to re-fit explicitly Terms <- drop.scope(object) if((nT <- length(Terms)) > 0) { ## generate models by adding terms sequentially objex <- vector(mode="list", length=nT+1) for(n in 1L:nT) { ## model containing terms 1, ..., n-1 fmla <- paste(". ~ . - ", paste(Terms[n:nT], collapse=" - ")) fmla <- as.formula(fmla) objex[[n]] <- update(object, fmla) } ## full model objex[[nT+1L]] <- object expandedfrom1 <- TRUE } } ## All models fitted using same method? Fits <- lapply(objex, getElement, name="Fit") fitter <- unique(unlist(lapply(Fits, getElement, name="fitter"))) if(length(fitter) > 1) stop(paste("Models are incompatible;", "they were fitted by different methods (", paste(fitter, collapse=", "), ")" )) ## Choice of test if(fitter == "glmmPQL") { ## anova.lme requires different format of `test' argument ## and does not recognise 'dispersion' if(is.null(test)) test <- FALSE else { test <- match.arg(test, tests.choices) if(!(test %in% tests.random)) stop(paste("Test", dQuote(test), "is not implemented for random effects models")) test <- TRUE } } else if(!is.null(test)) { test <- match.arg(test, tests.choices) if(!(test %in% tests.avail)) stop(paste("test=", dQuote(test), "is not yet implemented"), call.=FALSE) if(!pois && !(test %in% tests.Gibbs)) stop(paste("test=", dQuote(test), "is only implemented for Poisson models"), call.=FALSE) } ## Extract glm fit objects fitz <- lapply(Fits, getElement, name="FIT") ## Ensure all models were fitted using GLM, or all were fitted using GAM isgam <- sapply(fitz, inherits, what="gam") isglm <- sapply(fitz, inherits, what="glm") usegam <- any(isgam) if(usegam && any(isglm)) { gripe("Models were re-fitted with use.gam=TRUE") objex <- lapply(objex, update, use.gam=TRUE) } ## Finally do the appropriate ANOVA opt <- list(test=test) if(fitter != "glmmPQL") opt <- append(opt, list(dispersion=1)) result <- try(do.call(anova, append(fitz, opt))) if(inherits(result, "try-error")) stop("anova failed") ## Remove approximation-dependent columns if present result[, "Resid. Dev"] <- NULL ## replace 'residual df' by number of parameters in model if("Resid. Df" %in% names(result)) { ## count number of quadrature points used in each model nq <- totalusedquad(objex[[1L]]) result[, "Resid. Df"] <- nq - result[, "Resid. Df"] names(result)[match("Resid. Df", names(result))] <- "Npar" } ## edit header if(!is.null(h <- attr(result, "heading"))) { ## remove .mpl.Y and .logi.Y from formulae if present h <- gsub(".mpl.Y", "", h) h <- gsub(".logi.Y", "", h) ## delete GLM information if present h <- gsub("Model: quasi, link: log", "", h) h <- gsub("Model: binomial, link: logit", "", h) h <- gsub("Response: ", "", h) ## remove blank lines (up to 4 consecutive blanks can occur) for(i in 1L:5L) h <- gsub("\n\n", "\n", h) if(length(objex) > 1 && length(h) > 1) { ## anova(mod1, mod2, ...) ## change names of models fmlae <- unlist(lapply(objex, fmlaString)) # intrx <- unlist(lapply(objex, interString)) h[2L] <- paste("Model", paste0(1L:length(objex), ":"), fmlae, # "\t", # intrx, collapse="\n") } ## Add explanation if we did the stepwise thing ourselves if(expandedfrom1) h <- c(h[1L], "Terms added sequentially (first to last)\n", h[-1]) ## Contract spaces in output if spatstat.options('terse') >= 2 if(!waxlyrical('space')) h <- gsub("\n$", "", h) ## Put back attr(result, "heading") <- h } if(adjust && !pois) { ## issue warning, if not already given if(warn) warn.once("anovaMppmAdjust", "anova.mppm now computes the *adjusted* deviances", "when the models are not Poisson processes.") ## Corrected pseudolikelihood ratio nmodels <- length(objex) if(nmodels > 1) { cfac <- rep(1, nmodels) for(i in 2:nmodels) { a <- objex[[i-1]] b <- objex[[i]] df <- length(coef(a)) - length(coef(b)) if(df > 0) { ibig <- i-1 ismal <- i } else { ibig <- i ismal <- i-1 df <- -df } bigger <- objex[[ibig]] smaller <- objex[[ismal]] if(df == 0) { gripe("Models", i-1, "and", i, "have the same dimension") } else { bignames <- names(coef(bigger)) smallnames <- names(coef(smaller)) injection <- match(smallnames, bignames) if(any(uhoh <- is.na(injection))) { gripe("Unable to match", ngettext(sum(uhoh), "coefficient", "coefficients"), commasep(sQuote(smallnames[uhoh])), "of model", ismal, "to coefficients in model", ibig) } else { thetaDot <- 0 * coef(bigger) thetaDot[injection] <- coef(smaller) JH <- vcov(bigger, what="all", new.coef=thetaDot, fine=fine) # J <- if(!logi) JH$Sigma else (JH$Sigma1log+JH$Sigma2log) # H <- if(!logi) JH$A1 else JH$Slog J <- JH$fisher H <- JH$internals$A1 G <- H%*%solve(J)%*%H if(df == 1) { cfac[i] <- H[-injection,-injection]/G[-injection,-injection] } else { Res <- lapply(subfits(bigger), residuals, type="score", drop=TRUE, new.coef=thetaDot, dropcoef=TRUE) U <- sumcompatible(lapply(Res, integral.msr), names(thetaDot)) Uo <- U[-injection] Uo <- matrix(Uo, ncol=1) Hinv <- solve(H) Ginv <- solve(G) Hoo <- Hinv[-injection,-injection, drop=FALSE] Goo <- Ginv[-injection,-injection, drop=FALSE] ScoreStat <- t(Uo) %*% Hoo %*% solve(Goo) %*% Hoo %*% Uo cfac[i] <- ScoreStat/(t(Uo) %*% Hoo %*% Uo) } } } } ## apply Pace et al (2011) adjustment to pseudo-deviances ## (save attributes of 'result' for later reinstatement) oldresult <- result result$Deviance <- AdjDev <- result$Deviance * cfac cn <- colnames(result) colnames(result)[cn == "Deviance"] <- "AdjDeviance" if("Pr(>Chi)" %in% colnames(result)) result[["Pr(>Chi)"]] <- c(NA, pchisq(abs(AdjDev[-1L]), df=abs(result$Df[-1L]), lower.tail=FALSE)) class(result) <- class(oldresult) attr(result, "heading") <- attr(oldresult, "heading") } } return(result) } sumcompatible <- function(xlist, required) { result <- numeric(length(required)) names(result) <- required for(x in xlist) { namx <- names(x) if(!all(ok <- (namx %in% required))) stop(paste("Internal error in sumcompatible:", "list entry", i, "contains unrecognised", ngettext(sum(!ok), "value", "values"), commasep(sQuote(namx[!ok]))), call.=FALSE) inject <- match(namx, required) result[inject] <- result[inject] + x } return(result) } anova.mppm }) spatstat/R/alltypes.R0000755000176200001440000001504513115271075014310 0ustar liggesusers# # alltypes.R # # $Revision: 1.35 $ $Date: 2017/06/05 10:31:58 $ # # alltypes <- function(X, fun="K", ..., dataname=NULL,verb=FALSE,envelope=FALSE,reuse=TRUE) { # # Function 'alltypes' --- calculates a summary function for # each type, or each pair of types, in a multitype point pattern # if(is.ppp(X)) classname <- "ppp" else if(is.lpp(X)) classname <- "lpp" else stop("X should be a ppp or lpp object") if(is.null(dataname)) dataname <- short.deparse(substitute(X)) # -------------------------------------------------------------------- # First inspect marks if(!is.marked(X)) { nmarks <- 0 marklabels <- "" } else { if(!is.multitype(X)) stop("the marks must be a factor") # ensure type names are parseable (for mathematical labels) levels(marks(X)) <- make.parseable(levels(marks(X))) mks <- marks(X) ma <- levels(mks) nmarks <- length(ma) marklabels <- paste(ma) } # --------------------------------------------------------------------- # determine function name f.is.name <- is.name(substitute(fun)) fname <- if(f.is.name) paste(as.name(substitute(fun))) else if(is.character(fun)) fun else sQuote("fun") # --------------------------------------------------------------------- # determine function to be called if(is.function(fun)) { estimator <- fun } else if(is.character(fun)) { # First try matching one of the standard abbreviations K, G etc estimator <- getSumFun(fun, classname, (nmarks > 0), fatal=FALSE) if(is.null(estimator)) estimator <- get(fun, mode="function") } else stop(paste(sQuote("fun"), "should be a function or a character string")) # ------------------------------------------------------------------ # determine how the function shall be called. # indices.expected <- sum(c("i", "j") %in% names(formals(estimator))) apply.to.split <- (indices.expected == 0 && nmarks > 1) if(apply.to.split) ppsplit <- split(X) # -------------------------------------------------------------------- # determine array dimensions and margin labels witch <- if(nmarks == 0) matrix(1L, nrow=1L, ncol=1L, dimnames=list("","")) else if (nmarks == 1) matrix(1L, nrow=1L, ncol=1L, dimnames=list(marklabels, marklabels)) else if(indices.expected != 2) matrix(1L:nmarks, nrow=nmarks, ncol=1L, dimnames=list(marklabels, "")) else matrix(1L:(nmarks^2),ncol=nmarks,nrow=nmarks, byrow=TRUE, dimnames=list(marklabels, marklabels)) # ------------ start computing ------------------------------- # if computing envelopes, first generate simulated patterns # using undocumented feature of envelope() if(envelope && reuse) { L <- do.call(spatstat::envelope, resolve.defaults( list(X, fun=estimator), list(internal=list(eject="patterns")), list(...), switch(1L+indices.expected, NULL, list(i=ma[1L]), list(i=ma[1L], j=ma[2L]), NULL), list(verbose=verb))) intern <- attr(L, "internal") } else intern <- L <- NULL # compute function array and build up 'fasp' object fns <- list() k <- 0 for(i in 1L:nrow(witch)) { Y <- if(apply.to.split) ppsplit[[i]] else X for(j in 1L:ncol(witch)) { if(verb) cat("i =",i,"j =",j,"\n") currentfv <- if(!envelope) switch(1L+indices.expected, estimator(Y, ...), estimator(Y, i=ma[i], ...), estimator(Y, i=ma[i], j=ma[j], ...)) else do.call(spatstat::envelope, resolve.defaults( list(Y, estimator), list(simulate=L, internal=intern), list(verbose=FALSE), list(...), list(Yname=dataname), switch(1L+indices.expected, NULL, list(i=ma[i]), list(i=ma[i], j=ma[j]), NULL))) k <- k+1 fns[[k]] <- as.fv(currentfv) } } # wrap up into 'fasp' object title <- paste(if(nmarks > 1) "array of " else NULL, if(envelope) "envelopes of " else NULL, fname, if(nmarks <= 1) " function " else " functions ", "for ", dataname, ".", sep="") rslt <- fasp(fns, which=witch, formulae=NULL, dataname=dataname, title=title, checkfv=FALSE) return(rslt) } # Lookup table for standard abbreviations of functions getSumFun <- local({ ftable <- rbind( data.frame(class="ppp", marked=FALSE, abbrev=c("F", "G", "J", "K", "L", "pcf"), full=c("Fest", "Gest", "Jest", "Kest", "Lest", "pcf"), stringsAsFactors=FALSE), data.frame(class="ppp", marked=TRUE, abbrev=c("F", "G", "J", "K", "L", "pcf"), full= c("Fest", "Gcross", "Jcross", "Kcross", "Lcross", "pcfcross"), stringsAsFactors=FALSE), data.frame(class="lpp", marked=FALSE, abbrev=c("K", "pcf"), full=c("linearK", "linearpcf"), stringsAsFactors=FALSE), data.frame(class="lpp", marked=TRUE, abbrev=c("K", "pcf"), full=c("linearKcross", "linearpcfcross"), stringsAsFactors=FALSE) ) getfun <- function(abbreviation, classname, ismarked, fatal=TRUE) { matches <- with(ftable, which(abbrev == abbreviation & class == classname & marked == ismarked)) if(length(matches) == 0) { if(!fatal) return(NULL) stop(paste("No match to function abbreviation", sQuote(abbreviation), "for class", sQuote(classname))) } if(length(matches) > 1) stop("Ambiguous function name") fullname <- ftable$full[matches] get(fullname, mode="function") } getfun }) spatstat/R/Gcom.R0000755000176200001440000001507413115271075013342 0ustar liggesusers# # Gcom.R # # Model compensator of G # # $Revision: 1.8 $ $Date: 2014/11/10 13:20:25 $ # ################################################################################ # Gcom <- function(object, r=NULL, breaks=NULL, ..., correction=c("border", "Hanisch"), conditional=!is.poisson(object), restrict=FALSE, model=NULL, trend=~1, interaction=Poisson(), rbord=reach(interaction), ppmcorrection="border", truecoef=NULL, hi.res=NULL) { if(inherits(object, "ppm")) { fit <- object } else if(is.ppp(object) || inherits(object, "quad")) { if(is.ppp(object)) object <- quadscheme(object, ...) if(!is.null(model)) { fit <- update(model, Q=object, forcefit=TRUE) } else { fit <- ppm(object, trend=trend, interaction=interaction, rbord=rbord, forcefit=TRUE) } } else stop("object should be a fitted point process model or a point pattern") if(missing(conditional) || is.null(conditional)) conditional <- !is.poisson(fit) # rfixed <- !is.null(r) || !is.null(breaks) # selection of edge corrections # correction.given <- !missing(correction) && !is.null(correction) correction <- pickoption("correction", correction, c(none="none", border="border", Hanisch="Hanisch", hanisch="Hanisch", best="Hanisch"), multi=TRUE) # Extract data and quadrature points Q <- quad.ppm(fit, drop=FALSE) X <- data.ppm(fit) Win <- X$window # edge correction algorithm algo <- if(!conditional) "classical" else if(restrict) "restricted" else "reweighted" # conditioning on border region? if(!conditional) { Wfree <- Win } else { rbord <- fit$rbord Wfree <- erosion(Win, rbord) if(restrict) { retain <- inside.owin(union.quad(Q), , Wfree) Q <- Q[Wfree] X <- X[Wfree] Win <- Wfree } } # Extract quadrature info U <- union.quad(Q) Z <- is.data(Q) # indicator data/dummy # E <- equalsfun.quad(Q) WQ <- w.quad(Q) # quadrature weights # basic statistics npts <- npoints(X) areaW <- area(Win) lambda <- npts/areaW # quadrature points used USED <- if(algo == "reweighted") (bdist.points(U) > rbord) else rep.int(TRUE, U$n) # adjustments to account for restricted domain if(conditional && spatstat.options("eroded.intensity")) { npts.used <- sum(Z & USED) area.used <- sum(WQ[USED]) lambda.used <- npts.used/area.used } else { npts.used <- npts area.used <- areaW lambda.used <- lambda } # determine breakpoints for r values rmaxdefault <- rmax.rule("G", if(restrict) Wfree else Win, lambda) breaks <- handle.r.b.args(r, breaks, Wfree, rmaxdefault=rmaxdefault) rvals <- breaks$r rmax <- breaks$max # residuals resid <- residuals(fit, type="raw",drop=FALSE, new.coef=truecoef, quad=hi.res) rescts <- with(resid, "continuous") if(restrict) { # keep only data inside Wfree rescts <- rescts[retain] } # absolute weight for continuous integrals # wc <- -rescts # nearest neighbours (quadrature point to data point) nn <- nncross(U, X, seq(U$n), seq(X$n)) dIJ <- nn$dist I <- seq(U$n) # J <- nn$which DD <- Z <- (I <= X$n) # TRUE for data points wcIJ <- -rescts # determine whether a quadrature point will be used in integral okI <- USED[I] # initialise fv object r <- breaks$r df <- data.frame(r=r, pois=1 - exp(-pi * lambda.used * r^2)) G <- fv(df, "r", substitute(G(r), NULL), "pois", . ~ r, alim=c(0, rmax), labl=c("r","%s[pois](r)"), desc=c("distance argument r", "theoretical Poisson %s"), fname="G") # distance to boundary b <- bI <- bdist.points(U) dotnames <- character(0) # Border method if("border" %in% correction) { # reduced sample for G(r) of data only ZUSED <- Z & USED RSX <- Kount(dIJ[DD & okI], bI[DD & okI], b[ZUSED], breaks) Gb <- RSX$numerator/RSX$denom.count G <- bind.fv(G, data.frame(border=Gb), "hat(%s)[bord](r)", "border-corrected nonparametric estimate of %s", "border") # reduced sample for adjustment integral RSD <- Kwtsum(dIJ[okI], bI[okI], wcIJ[okI], b[ZUSED], rep.int(1, sum(ZUSED)), breaks) Gbcom <- RSD$numerator/(1 + RSD$denominator) G <- bind.fv(G, data.frame(bcom=Gbcom), "bold(C)~hat(%s)[bord](r)", "model compensator of border-corrected %s", "bcom") dotnames <- c("border", "bcom", "pois") } # Hanisch correction for data if("Hanisch" %in% correction) { nnd <- dIJ[DD & okI] bdry <- bI[DD & okI] # weights ea <- eroded.areas(Win, rvals) if(algo == "reweighted") { # replace weight(r) by weight(max(rbord,r)) ea[rvals < rbord] <- eroded.areas(Win, rbord) } # compute x <- nnd[nnd <= bdry] h <- whist(x[x <= rmax], breaks=breaks$val) H <- (1/lambda.used) * cumsum(h/ea) # glue on G <- bind.fv(G, data.frame(han=H), "hat(%s)[han](r)", "Hanisch correction estimate of %s", "han") # Hanisch correction for adjustment integral nnd <- dIJ[okI] bdry <- bI[okI] wt <- wcIJ[okI] x <- nnd[nnd <= bdry] wt <- wt[nnd <= bdry] h <- whist(x[x <= rmax], breaks=breaks$val, weights=wt[x <= rmax]) lambdaplus <- (npts.used + 1)/area.used Hint <- (1/lambdaplus) * cumsum(h/ea) # glue on G <- bind.fv(G, data.frame(hcom=Hint), "bold(C)~hat(%s)[han](r)", "model compensator of Hanisch-corrected %s", "hcom") # pseudovariance for Hanisch residual Hvar <- (1/lambdaplus^2) * cumsum(h/ea^2) G <- bind.fv(G, data.frame(hvar=Hvar), "bold(C)^2~hat(%s)[han](r)", "Poincare variance for Hanisch corrected %s", "hcom") # default plot does not show all components dotnames <- c("han", "hcom", dotnames) } # compute sensible 'alim' endpoint <- function(y, r, f) { min(r[y >= f * max(y)]) } amax <- endpoint(G$pois, G$r, 0.99) if(length(dotnames) > 0) amax <- max(amax, unlist(lapply(as.data.frame(G)[,dotnames,drop=FALSE], endpoint, r=r, f=0.9))) attr(G, "alim") <- c(0, amax) # fvnames(G, ".") <- dotnames unitname(G) <- unitname(X) # secret tag used by 'Gres' attr(G, "maker") <- "Gcom" return(G) } spatstat/R/clicklpp.R0000644000176200001440000000341513115271075014247 0ustar liggesusers#' #' $Revision: 1.1 $ $Date: 2017/06/05 10:31:58 $ #' clicklpp <- local({ clicklpp <- function(L, n=NULL, types=NULL, ..., add=FALSE, main=NULL, hook=NULL) { if(!inherits(L, "linnet")) stop("L should be a linear network", call.=FALSE) instructions <- if(!is.null(n)) paste("click", n, "times in window") else paste("add points: click left mouse button in window\n", "exit: press ESC or another mouse button") if(is.null(main)) main <- instructions W <- Window(L) #### single type ######################### if(is.null(types)) { plot(L, add=add, main=main) if(!is.null(hook)) plot(hook, add=TRUE) xy <- if(!is.null(n)) spatstatLocator(n=n, ...) else spatstatLocator(...) ok <- inside.owin(xy, w=W) if((nbad <- sum(!ok)) > 0) warning(paste("Ignored", nbad, ngettext(nbad, "point", "points"), "outside window"), call.=FALSE) X <- as.lpp(xy$x[ok], xy$y[ok], L=L) return(X) } ##### multitype ####################### ftypes <- factor(types, levels=types) #' input points of type 1 X <- getem(ftypes[1L], instructions, n=n, L=L, add=add, ..., pch=1) X <- X %mark% ftypes[1L] #' input points of types 2, 3, ... in turn for(i in 2:length(types)) { Xi <- getem(ftypes[i], instructions, n=n, L=L, add=add, ..., hook=X, pch=i) Xi <- Xi %mark% ftypes[i] X <- superimpose(X, Xi, L=L) } if(!add) plot(X, main="Final pattern") return(X) } getem <- function(i, instr, ...) { main <- paste("Points of type", sQuote(i), "\n", instr) do.call(clicklpp, resolve.defaults(list(...), list(main=main))) } clicklpp }) spatstat/R/pairpiece.R0000755000176200001440000001043313115271120014377 0ustar liggesusers# # # pairpiece.S # # $Revision: 1.22 $ $Date: 2015/10/21 09:06:57 $ # # A pairwise interaction process with piecewise constant potential # # PairPiece() create an instance of the process # [an object of class 'interact'] # # # ------------------------------------------------------------------- # PairPiece <- local({ # .... auxiliary functions ........ delP <- function(i, r) { r <- r[-i] nr <- length(r) if(nr == 0) return(Poisson()) if(nr == 1) return(Strauss(r)) return(PairPiece(r)) } # ..... template .......... BlankPairPiece <- list( name = "Piecewise constant pairwise interaction process", creator = "PairPiece", family = "pairwise.family", # evaluated later pot = function(d, par) { r <- par$r nr <- length(r) out <- array(FALSE, dim=c(dim(d), nr)) out[,,1] <- (d < r[1]) if(nr > 1) { for(i in 2:nr) out[,,i] <- (d >= r[i-1]) & (d < r[i]) } out }, par = list(r = NULL), # filled in later parnames = "interaction thresholds", init = function(self) { r <- self$par$r if(!is.numeric(r) || !all(r > 0)) stop("interaction thresholds r must be positive numbers") if(length(r) > 1 && !all(diff(r) > 0)) stop("interaction thresholds r must be strictly increasing") }, update = NULL, # default OK print = NULL, # default OK interpret = function(coeffs, self) { r <- self$par$r npiece <- length(r) # extract coefficients gammas <- exp(as.numeric(coeffs)) # name them gn <- gammas names(gn) <- paste("[", c(0,r[-npiece]),",", r, ")", sep="") # return(list(param=list(gammas=gammas), inames="interaction parameters gamma_i", printable=dround(gn))) }, valid = function(coeffs, self) { # interaction parameters gamma gamma <- (self$interpret)(coeffs, self)$param$gammas if(!all(is.finite(gamma))) return(FALSE) return(all(gamma <= 1) || gamma[1] == 0) }, project = function(coeffs, self){ # interaction parameters gamma gamma <- (self$interpret)(coeffs, self)$param$gammas # interaction thresholds r[i] r <- self$par$r # check for NA or Inf bad <- !is.finite(gamma) # gamma > 1 forbidden unless hard core ishard <- is.finite(gamma[1]) && (gamma[1] == 0) if(!ishard) bad <- bad | (gamma > 1) if(!any(bad)) return(NULL) if(spatstat.options("project.fast") || sum(bad) == 1) { # remove smallest threshold with an unidentifiable parameter firstbad <- min(which(bad)) return(delP(firstbad, r)) } else { # consider all candidate submodels subs <- lapply(which(bad), delP, r=r) return(subs) } }, irange = function(self, coeffs=NA, epsilon=0, ...) { r <- self$par$r if(all(is.na(coeffs))) return(max(r)) gamma <- (self$interpret)(coeffs, self)$param$gammas gamma[is.na(gamma)] <- 1 active <- (abs(log(gamma)) > epsilon) if(!any(active)) return(0) else return(max(r[active])) }, Mayer=function(coeffs, self) { # second Mayer cluster integral r <- self$par$r gamma <- (self$interpret)(coeffs, self)$param$gammas # areas of annuli between r[i-1], r[i] areas <- pi * diff(c(0,r)^2) return(sum(areas * (1-gamma))) }, version=NULL # filled in later ) class(BlankPairPiece) <- "interact" PairPiece <- function(r) { instantiate.interact(BlankPairPiece, list(r=r)) } PairPiece <- intermaker(PairPiece, BlankPairPiece) PairPiece }) spatstat/R/varblock.R0000755000176200001440000001307613115271120014247 0ustar liggesusers# # varblock.R # # Variance estimation using block subdivision # # $Revision: 1.20 $ $Date: 2016/12/30 01:44:50 $ # varblock <- local({ getrvalues <- function(z) { with(z, .x) } stepsize <- function(z) { mean(diff(z)) } dofun <- function(domain, fun, Xpp, ...) { fun(Xpp, ..., domain=domain) } varblock <- function(X, fun=Kest, blocks=quadrats(X, nx=nx, ny=ny), ..., nx=3, ny=nx, confidence=0.95) { stopifnot(is.ppp(X)) stopifnot(is.tess(blocks)) stopifnot(is.function(fun) || is.character(fun)) if(is.character(fun)) fun <- get(fun, mode="function") ## validate confidence level stopifnot(confidence > 0.5 && confidence < 1) alpha <- 1 - confidence probs <- c(alpha/2, 1-alpha/2) ## determine whether 'fun' has an argument called 'domain' canrestrict <- ("domain" %in% names(formals(fun))) || samefunction(fun, pcf) || samefunction(fun, Lest) ## check there's at least one point in each block Y <- split(X, blocks) nums <- sapply(Y, npoints) blockok <- (nums > 0) if(some.zeroes <- any(!blockok)) warning("Some tiles contain no data: they are discarded") if(!canrestrict) { ## divide data into disjoint blocks if(some.zeroes) Y <- Y[blockok] n <- length(Y) if(n <= 1) stop("Need at least 2 blocks") ## apply 'fun' to each block if(any(c("r", "breaks") %in% names(list(...)))) { ## r vector specified fX <- fun(X, ...) z <- lapply(Y, fun, ...) } else { ## need to ensure compatible fv objects z <- lapply(Y, fun, ...) rlist <- lapply(z, getrvalues) rmax <- min(sapply(rlist, max)) rstep <- min(sapply(rlist, stepsize)) r <- seq(0, rmax, by=rstep) z <- lapply(Y, fun, ..., r=r) fX <- fun(X, ..., r=r) } } else { ## use 'domain' argument of 'fun' to compute contributions from each tile B <- tiles(blocks) if(some.zeroes) B <- B[blockok] n <- length(B) if(any(c("r", "breaks") %in% names(list(...)))) { ## r vector specified fX <- fun(X, ...) z <- lapply(B, dofun, ..., fun=fun, Xpp=X) } else { ## need to ensure compatible fv objects z <- lapply(B, dofun, ..., fun=fun, Xpp=X) rlist <- lapply(z, getrvalues) rmax <- min(sapply(rlist, max)) rstep <- min(sapply(rlist, stepsize)) r <- seq(0, rmax, by=rstep) z <- lapply(B, dofun, ..., fun=fun, Xpp=X, r=r) fX <- fun(X, ..., r=r) } } ## find columns that are common to all estimates zzz <- reconcile.fv(append(list(fX), z)) fX <- zzz[[1]] z <- zzz[-1] ## sample mean m <- meanlistfv(z) ## sample variance sqdev <- lapply(z, sqdev.fv, m=m) v <- meanlistfv(sqdev) v <- eval.fv(v * n/(n-1), dotonly=FALSE) ## sample standard deviation sd <- eval.fv(sqrt(v), dotonly=FALSE) ## upper and lower limits sem <- eval.fv(sd/sqrt(n), dotonly=FALSE) zcrit <- qnorm(probs) lower <- eval.fv(m + zcrit[1] * sem, dotonly=FALSE) upper <- eval.fv(m + zcrit[2] * sem, dotonly=FALSE) ## rebadge fva <- .Spatstat.FvAttrib fva <- fva[fva %in% names(attributes(fX))] attributes(m)[fva] <- attributes(v)[fva] <- attributes(sd)[fva] <- attributes(upper)[fva] <- attributes(lower)[fva] <- attributes(fX)[fva] m <- prefixfv(m, "mean", "sample mean of", "bold(mean)~") v <- prefixfv(v, "var", "estimated variance of", "bold(var)~") sd <- prefixfv(sd, "sd", "estimated standard deviation of", "bold(sd)~") CItext <- paste(c("lower", "upper"), paste0(100 * confidence, "%%"), "CI limit for") lower <- prefixfv(lower, "lo", CItext[1], "bold(lo)~") upper <- prefixfv(upper, "hi", CItext[2], "bold(hi)~") ## tack together out <- cbind(fX,m,v,sd,upper,lower) ## restrict r domain bad <- matrowall(!is.finite(as.matrix(as.data.frame(out)))) rmax <- max(getrvalues(out)[!bad]) alim <- c(0, rmax) if(!canrestrict) alim <- intersect.ranges(attr(out, "alim"), alim) attr(out, "alim") <- alim ## sensible default plot formula ybase <- fvnames(fX, ".y") xname <- fvnames(fX, ".x") tname <- intersect("theo", fvnames(fX, ".")) fvnames(out, ".y") <- yname <- paste0("mean", ybase) fvnames(out, ".s") <- snames <- paste0(c("lo", "hi"), ybase) fvnames(out, ".") <- c(yname, tname, snames) attr(out, "fmla") <- paste(". ~ ", xname) return(out) } sqdev.fv <- function(x,m){ eval.fv((x-m)^2, dotonly=FALSE) } varblock }) meanlistfv <- local({ getYmatrix <- function(x, yn=ynames) { as.matrix(as.data.frame(x)[,yn]) } meanlistfv <- function(z, ...) { ## compute sample mean of a list of fv objects if(!is.list(z) || !all(unlist(lapply(z, is.fv)))) stop("z should be a list of fv objects") if(!do.call(compatible, unname(z))) stop("Objects are not compatible") result <- template <- z[[1]] ## extract each object's function values as a matrix ynames <- fvnames(template, "*") matlist <- unname(lapply(z, getYmatrix, yn=ynames)) ## stack matrices into an array y <- do.call(abind, append(matlist, list(along=3))) ## take mean ymean <- apply(y, 1:2, mean, ...) result[,ynames] <- ymean return(result) } meanlistfv }) spatstat/R/nnmap.R0000644000176200001440000001445213115271120013551 0ustar liggesusers# # nnmap.R # # nearest or k-th nearest neighbour of each pixel # # $Revision: 1.9 $ $Date: 2017/06/05 10:31:58 $ # nnmap <- function(X, k=1, what = c("dist", "which"), ..., W=as.owin(X), is.sorted.X=FALSE, sortby=c("range", "var", "x", "y")) { stopifnot(is.ppp(X)) sortby <- match.arg(sortby) outputarray <- resolve.1.default("outputarray", ..., outputarray=FALSE) W <- as.owin(W) huge <- 1.1 * diameter(boundingbox(as.rectangle(X), as.rectangle(W))) what <- match.arg(what, choices=c("dist", "which"), several.ok=TRUE) want.dist <- "dist" %in% what want.which <- "which" %in% what want.both <- want.dist && want.which if(!missing(k)) { # k can be a single integer or an integer vector if(length(k) == 0) stop("k is an empty vector") else if(length(k) == 1) { if(k != round(k) || k <= 0) stop("k is not a positive integer") } else { if(any(k != round(k)) || any(k <= 0)) stop(paste("some entries of the vector", sQuote("k"), "are not positive integers")) } } k <- as.integer(k) kmax <- max(k) nk <- length(k) # note whether W is `really' a rectangle isrect <- is.rectangle(rescue.rectangle(W)) # set up pixel array M <- do.call.matched(as.mask, resolve.defaults(list(...), list(w=W))) Mdim <- M$dim nxcol <- Mdim[2] nyrow <- Mdim[1] npixel <- nxcol * nyrow nX <- npoints(X) if(nX == 0) { # trivial - avoid potential problems in C code NND <- if(want.dist) array(Inf, dim=c(nk, Mdim)) else 0 NNW <- if(want.which) array(NA_integer_, dim=c(nk, Mdim)) else 0 } else { # usual case if(is.sorted.X && !(sortby %in% c("x", "y"))) stop(paste("If data are already sorted,", "the sorting coordinate must be specified explicitly", "using sortby = \"x\" or \"y\"")) # decide whether to sort on x or y coordinate switch(sortby, range = { s <- sidelengths(as.rectangle(X)) sortby.y <- (s[1] < s[2]) }, var = { sortby.y <- (var(X$x) < var(X$y)) }, x={ sortby.y <- FALSE}, y={ sortby.y <- TRUE} ) # The C code expects points to be sorted by x coordinate. if(sortby.y) { oldM <- M X <- flipxy(X) W <- flipxy(W) M <- flipxy(M) Mdim <- M$dim } xx <- X$x yy <- X$y # sort only if needed if(!is.sorted.X){ oX <- fave.order(xx) xx <- xx[oX] yy <- yy[oX] } # number of neighbours that are well-defined kmaxcalc <- min(nX, kmax) # prepare to call C code nndv <- if(want.dist) numeric(npixel * kmaxcalc) else numeric(1) nnwh <- if(want.which) integer(npixel * kmaxcalc) else integer(1) # ............. call C code ............................ if(kmaxcalc == 1) { zz <- .C("nnGinterface", nx = as.integer(nxcol), x0 = as.double(M$xcol[1]), xstep = as.double(M$xstep), ny = as.integer(nyrow), y0 = as.double(M$yrow[1]), ystep = as.double(M$ystep), np = as.integer(nX), xp = as.double(xx), yp = as.double(yy), wantdist = as.integer(want.dist), wantwhich = as.integer(want.which), nnd = as.double(nndv), nnwhich = as.integer(nnwh), huge = as.double(huge), PACKAGE = "spatstat") } else { zz <- .C("knnGinterface", nx = as.integer(nxcol), x0 = as.double(M$xcol[1]), xstep = as.double(M$xstep), ny = as.integer(nyrow), y0 = as.double(M$yrow[1]), ystep = as.double(M$ystep), np = as.integer(nX), xp = as.double(xx), yp = as.double(yy), kmax = as.integer(kmaxcalc), wantdist = as.integer(want.dist), wantwhich = as.integer(want.which), nnd = as.double(nndv), nnwhich = as.integer(nnwh), huge = as.double(huge), PACKAGE = "spatstat") } # extract results nnW <- zz$nnwhich nnD <- zz$nnd # map index 0 to NA if(want.which && any(uhoh <- (nnW == 0))) { nnW[uhoh] <- NA if(want.dist) nnD[uhoh] <- Inf } # reinterpret indices in original ordering if(!is.sorted.X) nnW <- oX[nnW] # reform as arrays NND <- if(want.dist) array(nnD, dim=c(kmaxcalc, Mdim)) else 0 NNW <- if(want.which) array(nnW, dim=c(kmaxcalc, Mdim)) else 0 if(sortby.y) { # flip x and y back again if(want.dist) NND <- aperm(NND, c(1, 3, 2)) if(want.which) NNW <- aperm(NNW, c(1, 3, 2)) M <- oldM Mdim <- dim(M) } # the return value should correspond to the original vector k if(kmax > kmaxcalc) { # pad with NA / Inf if(want.dist) { NNDcalc <- NND NND <- array(Inf, dim=c(kmax, Mdim)) NND[1:kmaxcalc, , ] <- NNDcalc } if(want.which) { NNWcalc <- NNW NNW <- array(NA_integer_, dim=c(kmax, Mdim)) NNW[1:kmaxcalc, , ] <- NNWcalc } } if(length(k) < kmax) { # select only the specified planes if(want.dist) NND <- NND[k, , , drop=FALSE] if(want.which) NNW <- NNW[k, , , drop=FALSE] } } # secret backdoor if(outputarray) { # return result as an array or pair of arrays result <- if(want.both) { list(dist=NND, which=NNW) } else if(want.dist) NND else NNW attr(result, "pixarea") <- with(M, xstep * ystep) return(result) } # format result as a list of images result <- list() if(want.dist) { dlist <- list() for(i in 1:nk) { DI <- as.im(NND[i,,], M) if(!isrect) DI <- DI[M, drop=FALSE] dlist[[i]] <- DI } names(dlist) <- k result[["dist"]] <- if(nk > 1) dlist else dlist[[1]] } if(want.which) { wlist <- list() for(i in 1:nk) { WI <- as.im(NNW[i,,], M) if(!isrect) WI <- WI[M, drop=FALSE] wlist[[i]] <- WI } names(wlist) <- k result[["which"]] <- if(nk > 1) wlist else wlist[[1]] } if(!want.both) result <- result[[1]] return(result) } spatstat/R/hybrid.family.R0000755000176200001440000001534713115271075015221 0ustar liggesusers# # hybrid.family.R # # $Revision: 1.12 $ $Date: 2017/02/07 07:35:32 $ # # Hybrid interactions # # hybrid.family: object of class 'isf' defining pairwise interaction # # ------------------------------------------------------------------- # hybrid.family <- list( name = "hybrid", print = function(self) { cat("Hybrid interaction family\n") }, plot = function(fint, ..., d=NULL, plotit=TRUE, separate=FALSE) { # plot hybrid interaction if possible verifyclass(fint, "fii") inter <- fint$interaction if(is.null(inter) || is.null(inter$family) || inter$family$name != "hybrid") stop("Tried to plot the wrong kind of interaction") if(is.null(d)) { # compute reach and determine max distance for plots dmax <- 1.25 * reach(inter) if(!is.finite(dmax)) { # interaction has infinite reach # Are plot limits specified? xlim <- resolve.defaults(list(...), list(xlim=c(0, Inf))) if(all(is.finite(xlim))) dmax <- max(xlim) else stop("Interaction has infinite reach; need to specify xlim or d") } d <- seq(0, dmax, length=256) } # get fitted coefficients of interaction terms # and set coefficients of offset terms to 1 Vnames <- fint$Vnames IsOffset <- fint$IsOffset coeff <- rep.int(1, length(Vnames)) names(coeff) <- Vnames coeff[!IsOffset] <- fint$coefs[Vnames[!IsOffset]] # extract the component interactions interlist <- inter$par # check that they are all pairwise interactions families <- unlist(lapply(interlist, interactionfamilyname)) if(!separate && !all(families == "pairwise")) { warning(paste("Cannot compute the resultant function;", "not all components are pairwise interactions;", "plotting each component separately")) separate <- TRUE } # deal with each interaction ninter <- length(interlist) results <- list() for(i in 1:ninter) { interI <- interlist[[i]] nameI <- names(interlist)[[i]] nameI. <- paste(nameI, ".", sep="") # find coefficients with prefix that exactly matches nameI. prefixlength <- nchar(nameI.) Vprefix <- substr(Vnames, 1, prefixlength) relevant <- (Vprefix == nameI.) # construct fii object for this component fitinI <- fii(interI, coeff[relevant], Vnames[relevant], IsOffset[relevant]) # convert to fv object a <- plot(fitinI, ..., d=d, plotit=FALSE) aa <- list(a) names(aa) <- nameI results <- append(results, aa) } # computation of resultant is only implemented for fv objects if(!separate && !all(unlist(lapply(results, is.fv)))) { warning(paste("Cannot compute the resultant function;", "not all interaction components yielded an fv object;", "plotting separate results for each component")) separate <- TRUE } # return separate 'fv' or 'fasp' objects if required results <- as.anylist(results) if(separate) { if(plotit) { main0 <- "Pairwise interaction components" do.call(plot, resolve.defaults(list(results), list(...), list(main=main0))) } return(invisible(results)) } # multiply together to obtain resultant pairwise interaction ans <- results[[1L]] if(ninter >= 2) { for(i in 2:ninter) { Fi <- results[[i]] ans <- eval.fv(ans * Fi) } copyover <- c("ylab", "yexp", "labl", "desc", "fname") attributes(ans)[copyover] <- attributes(results[[1L]])[copyover] } main0 <- "Resultant pairwise interaction" if(plotit) do.call(plot, resolve.defaults(list(ans), list(...), list(main=main0))) return(invisible(ans)) }, eval = function(X,U,EqualPairs,pot,pars,correction, ...) { # `pot' is ignored; `pars' is a list of interactions nU <- length(U$x) V <- matrix(, nU, 0) IsOffset <- logical(0) for(i in 1:length(pars)) { # extract i-th component interaction interI <- pars[[i]] nameI <- names(pars)[[i]] # compute potential for i-th component VI <- evalInteraction(X, U, EqualPairs, interI, correction, ...) if(ncol(VI) > 0) { if(ncol(VI) > 1 && is.null(colnames(VI))) # make up names colnames(VI) <- paste("Interaction", seq(ncol(VI)), sep=".") # prefix label with name of i-th component colnames(VI) <- paste(nameI, dimnames(VI)[[2L]], sep=".") # handle IsOffset offI <- attr(VI, "IsOffset") if(is.null(offI)) offI <- rep.int(FALSE, ncol(VI)) # tack on IsOffset <- c(IsOffset, offI) # append to matrix V V <- cbind(V, VI) } } if(any(IsOffset)) attr(V, "IsOffset") <- IsOffset return(V) }, delta2 = function(X, inte, correction, ..., sparseOK=FALSE) { ## Sufficient statistic for second order conditional intensity result <- NULL interlist <- inte$par for(ii in interlist) { v <- NULL ## look for 'delta2' in component interaction 'ii' if(!is.null(delta2 <- ii$delta2) && is.function(delta2)) v <- delta2(X, ii, correction, sparseOK=sparseOK) ## look for 'delta2' in family of component 'ii' if(is.null(v) && !is.null(delta2 <- ii$family$delta2) && is.function(delta2)) v <- delta2(X, ii, correction, sparseOK=sparseOK) if(is.null(v)) { ## no special algorithm available: generic algorithm needed return(NULL) } if(is.null(result)) { result <- v } else if(inherits(v, c("sparse3Darray", "sparseMatrix"))) { result <- bind.sparse3Darray(result, v, along=3) } else { result <- abind::abind(as.array(result), v, along=3) } } return(result) }, suffstat = NULL ) class(hybrid.family) <- "isf" spatstat/R/plot.ppm.R0000755000176200001440000000542413115271120014213 0ustar liggesusers# # plot.ppm.S # # $Revision: 2.12 $ $Date: 2016/06/11 08:02:17 $ # # plot.ppm() # Plot a point process model fitted by ppm(). # # # plot.ppm <- function(x, ngrid = c(40,40), superimpose = TRUE, trend=TRUE, cif=TRUE, se=TRUE, pause = interactive(), how=c("persp","image", "contour"), plot.it=TRUE, locations=NULL, covariates=NULL, ...) { model <- x # Plot a point process model fitted by ppm(). # verifyclass(model, "ppm") # # find out what kind of model it is # mod <- summary(model, quick="entries") stationary <- mod$stationary poisson <- mod$poisson marked <- mod$marked multitype <- mod$multitype data <- mod$entries$data if(marked) { if(!multitype) stop("Not implemented for general marked point processes") else mrkvals <- levels(marks(data)) } else mrkvals <- 1 # ntypes <- length(mrkvals) # # Interpret options # ----------------- # # Whether to plot trend, cif, se if(!trend && !cif && !se) { cat(paste("Nothing plotted;", sQuote("trend"), ",", sQuote("cif"), "and", sQuote("se"), "are all FALSE\n")) return(invisible(NULL)) } # Suppress uninteresting plots # unless explicitly instructed otherwise if(missing(trend)) trend <- !stationary if(missing(cif)) cif <- !poisson if(missing(se)) se <- poisson && !stationary else if(se && !poisson) { warning(paste("standard error calculation", "is only implemented for Poisson models")) se <- FALSE } if(!trend && !cif && !se) { cat("Nothing plotted -- all plots selected are flat surfaces.\n") return(invisible(NULL)) } # # style of plot: suppress pseudo-default # if(missing(how)) how <- "image" # # # Do the prediction # ------------------ out <- list() surftypes <- c("trend","cif","se")[c(trend,cif,se)] ng <- if(missing(ngrid) && !missing(locations)) NULL else ngrid for (ttt in surftypes) { p <- predict(model, ngrid=ng, locations=locations, covariates=covariates, type = ttt, getoutofjail=TRUE) # permit outdated usage type="se" if(is.im(p)) p <- list(p) out[[ttt]] <- p } # Make it a plotppm object # ------------------------ class(out) <- "plotppm" attr(out, "mrkvals") <- mrkvals # Actually plot it if required # ---------------------------- if(plot.it) { if(!superimpose) data <- NULL if(missing(pause)) pause <- NULL plot(out,data=data,trend=trend,cif=cif,se=se,how=how,pause=pause, ...) } return(invisible(out)) } spatstat/R/sigtrace.R0000644000176200001440000001430413115225157014246 0ustar liggesusers# # sigtrace.R # # $Revision: 1.10 $ $Date: 2016/02/11 09:36:11 $ # # Significance traces # dclf.sigtrace <- function(X, ...) mctest.sigtrace(X, ..., exponent=2) mad.sigtrace <- function(X, ...) mctest.sigtrace(X, ..., exponent=Inf) mctest.sigtrace <- function(X, fun=Lest, ..., exponent=1, interpolate=FALSE, alpha=0.05, confint=TRUE, rmin=0) { check.1.real(exponent) explain.ifnot(exponent >= 0) if(missing(fun) && inherits(X, c("envelope", "hasenvelope"))) fun <- NULL Z <- envelopeProgressData(X, fun=fun, ..., rmin=rmin, exponent=exponent) R <- Z$R devdata <- Z$devdata devsim <- Z$devsim result <- mctestSigtraceEngine(R, devdata, devsim, interpolate=interpolate, confint=confint, alpha=alpha, exponent=exponent, unitname=unitname(X)) result <- hasenvelope(result, Z$envelope) # envelope may be NULL return(result) } mctestSigtraceEngine <- local({ mctestSigtraceEngine <- function(R, devdata, devsim, ..., interpolate=FALSE, confint=TRUE, alpha=0.05, exponent=2, unitname=NULL) { nsim <- ncol(devsim) if(!interpolate) { #' Monte Carlo p-value datarank <- apply(devdata < devsim, 1, sum) + apply(devdata == devsim, 1, sum)/2 + 1 pvalue <- datarank/(nsim+1) } else { #' interpolated p-value devs <- cbind(devdata, devsim) pvalue <- apply(devs, 1, rowwise.interp.tailprob) } if(!confint) { #' create fv object without confidence interval p <- fv(data.frame(R=R, pest=pvalue, alpha=alpha), argu="R", ylab = quote(p(R)), valu="pest", fmla = . ~ R, desc = c("Interval endpoint R", "calculated p-value %s", "threshold for significance"), labl=c("R", "%s(R)", paste(alpha)), unitname = unitname, fname = "p") fvnames(p, ".") <- c("pest", "alpha") } else { # confidence interval if(!interpolate) { #' Agresti-Coull confidence interval successes <- datarank - 1 trials <- nsim z <- qnorm(1 - (1-0.95)/2) nplus <- trials + z^2 pplus <- (successes + z^2/2)/nplus sigmaplus <- sqrt(pplus * (1-pplus)/nplus) lo <- pplus - z * sigmaplus hi <- pplus + z * sigmaplus } else { #' confidence interval by delta method pSE <- apply(devs, 1, rowwise.se) z <- qnorm(1 - (1-0.95)/2) lo <- pmax(0, pvalue - z * pSE) hi <- pmin(1, pvalue + z * pSE) } #' create fv object with confidence interval p <- fv(data.frame(R=R, pest=pvalue, alpha=alpha, lo=lo, hi=hi), argu="R", ylab = quote(p(R)), valu="pest", fmla = . ~ R, desc = c("Interval endpoint R", "calculated p-value %s", "threshold for significance", "lower 95%% limit for p-value", "upper 95%% limit for p-value"), labl=c("R", "%s(R)", paste(alpha), "lo(R)", "hi(R)"), unitname = unitname, fname = "p") fvnames(p, ".") <- c("pest", "alpha", "lo", "hi") fvnames(p, ".s") <- c("lo", "hi") } return(p) } ## interpolated p-value interpol.tailprob <- function(x, q) { sigma <- bw.nrd0(x) mean(pnorm(q, mean=x, sd=sigma, lower.tail=FALSE)) } rowwise.interp.tailprob <- function(x) { interpol.tailprob(x[-1], x[1]) } ## estimated SE of p-value interpol.se <- function(x, q) { sigma <- bw.nrd0(x) z <- density(x, sigma) v <- mean(z$y * pnorm(q, mean=z$x, sd=sigma, lower.tail=FALSE)^2) * diff(range(z$x)) sqrt(v)/length(x) } rowwise.se <- function(x) { interpol.se(x[-1], x[1]) } mctestSigtraceEngine }) dg.sigtrace <- function(X, fun=Lest, ..., exponent=2, nsim=19, nsimsub=nsim-1, alternative=c("two.sided", "less", "greater"), rmin=0, leaveout=1, interpolate=FALSE, confint=TRUE, alpha=0.05, savefuns=FALSE, savepatterns=FALSE, verbose=FALSE) { alternative <- match.arg(alternative) env.here <- sys.frame(sys.nframe()) if(!missing(nsimsub) && !relatively.prime(nsim, nsimsub)) stop("nsim and nsimsub must be relatively prime") ## generate or extract simulated patterns and functions if(verbose) cat("Generating first-level data...") E <- envelope(X, fun=fun, ..., nsim=nsim, savepatterns=TRUE, savefuns=TRUE, verbose=verbose, envir.simul=env.here) ## get first level MC test significance trace if(verbose) cat("Computing significance trace...") T1 <- mctest.sigtrace(E, fun=fun, nsim=nsim, exponent=exponent, rmin=rmin, alternative=alternative, leaveout=leaveout, interpolate=interpolate, confint=FALSE, verbose=verbose, ...) R <- T1$R phat <- T1$pest ## second level traces if(verbose) cat(" Done.\nGenerating second-level data... [silently] ..") Pat <- attr(E, "simpatterns") T2list <- lapply(Pat, mctest.sigtrace, fun=fun, nsim=nsimsub, exponent=exponent, rmin=rmin, alternative=alternative, leaveout=leaveout, interpolate=interpolate, confint=FALSE, verbose=FALSE, ...) phati <- sapply(T2list, getElement, name="pest") ## Dao-Genton p-value if(verbose) cat(" Computing significance trace...") result <- mctestSigtraceEngine(R, -phat, -phati, interpolate=FALSE, confint=confint, exponent=exponent, alpha=alpha, unitname=unitname(X)) if(verbose) cat(" Done.\n") if(savefuns || savepatterns) result <- hasenvelope(result, E) return(result) } spatstat/R/wingeom.R0000755000176200001440000007172413153160242014121 0ustar liggesusers# # wingeom.R Various geometrical computations in windows # # $Revision: 4.123 $ $Date: 2017/09/04 05:10:45 $ # volume.owin <- function(x) { area.owin(x) } area <- function(w) UseMethod("area") area.default <- function(w) area.owin(as.owin(w)) area.owin <- function(w) { stopifnot(is.owin(w)) switch(w$type, rectangle = { width <- abs(diff(w$xrange)) height <- abs(diff(w$yrange)) area <- width * height }, polygonal = { area <- sum(unlist(lapply(w$bdry, Area.xypolygon))) }, mask = { pixelarea <- abs(w$xstep * w$ystep) npixels <- sum(w$m) area <- pixelarea * npixels }, stop("Unrecognised window type") ) return(area) } perimeter <- function(w) { w <- as.owin(w) switch(w$type, rectangle = { return(2*(diff(w$xrange)+diff(w$yrange))) }, polygonal={ return(sum(lengths.psp(edges(w)))) }, mask={ p <- as.polygonal(w) if(is.null(p)) return(NA) delta <- sqrt(w$xstep^2 + w$ystep^2) p <- simplify.owin(p, delta * 1.15) return(sum(lengths.psp(edges(p)))) }) return(NA) } framebottomleft <- function(w) { f <- Frame(w) c(f$xrange[1L], f$yrange[1L]) } sidelengths.owin <- function(x) { if(x$type != "rectangle") warning("Computing the side lengths of a non-rectangular window") with(x, c(diff(xrange), diff(yrange))) } shortside.owin <- function(x) { min(sidelengths(x)) } eroded.areas <- function(w, r, subset=NULL) { w <- as.owin(w) if(!is.null(subset) && !is.mask(w)) w <- as.mask(w) switch(w$type, rectangle = { width <- abs(diff(w$xrange)) height <- abs(diff(w$yrange)) areas <- pmax(width - 2 * r, 0) * pmax(height - 2 * r, 0) }, polygonal = { ## warning("Approximating polygonal window by digital image") w <- as.mask(w) areas <- eroded.areas(w, r) }, mask = { ## distances from each pixel to window boundary b <- if(is.null(subset)) bdist.pixels(w, style="matrix") else bdist.pixels(w)[subset, drop=TRUE, rescue=FALSE] ## histogram breaks to satisfy hist() Bmax <- max(b, r) breaks <- c(-1,r,Bmax+1) ## histogram of boundary distances h <- hist(b, breaks=breaks, plot=FALSE)$counts ## reverse cumulative histogram H <- revcumsum(h) ## drop first entry corresponding to r=-1 H <- H[-1] ## convert count to area pixarea <- w$xstep * w$ystep areas <- pixarea * H }, stop("unrecognised window type") ) areas } even.breaks.owin <- function(w) { verifyclass(w, "owin") Rmax <- diameter(w) make.even.breaks(Rmax, Rmax/(100 * sqrt(2))) } unit.square <- function() { owin(c(0,1),c(0,1)) } square <- function(r=1, unitname=NULL) { stopifnot(is.numeric(r)) if(is.numeric(unitname) && length(unitname) == 1 && length(r) == 1) { #' common error warning("Interpreting square(a, b) as square(c(a,b))", call.=FALSE) r <- c(r, unitname) unitname <- NULL } if(!all(is.finite(r))) stop("argument r is NA or infinite") if(length(r) == 1) { stopifnot(r > 0) r <- c(0,r) } else if(length(r) == 2) { stopifnot(r[1L] < r[2L]) } else stop("argument r must be a single number, or a vector of length 2") owin(r,r, unitname=unitname) } # convert polygonal window to mask window owinpoly2mask <- function(w, rasta, check=TRUE) { if(check) { verifyclass(w, "owin") stopifnot(w$type == "polygonal") verifyclass(rasta, "owin") stopifnot(rasta$type == "mask") } bdry <- w$bdry x0 <- rasta$xcol[1L] y0 <- rasta$yrow[1L] xstep <- rasta$xstep ystep <- rasta$ystep dimyx <- rasta$dim nx <- dimyx[2L] ny <- dimyx[1L] epsilon <- with(.Machine, double.base^floor(double.ulp.digits/2)) score <- numeric(nx*ny) for(i in seq_along(bdry)) { p <- bdry[[i]] xp <- p$x yp <- p$y np <- length(p$x) # repeat last vertex xp <- c(xp, xp[1L]) yp <- c(yp, yp[1L]) np <- np + 1 # rescale coordinates so that pixels are at integer locations xp <- (xp - x0)/xstep yp <- (yp - y0)/ystep # avoid exact integer locations for vertices whole <- (ceiling(xp) == floor(xp)) xp[whole] <- xp[whole] + epsilon whole <- (ceiling(yp) == floor(yp)) yp[whole] <- yp[whole] + epsilon # call C z <- .C("poly2imI", xp=as.double(xp), yp=as.double(yp), np=as.integer(np), nx=as.integer(nx), ny=as.integer(ny), out=as.integer(integer(nx * ny)), PACKAGE="spatstat") if(i == 1) score <- z$out else score <- score + z$out } status <- (score != 0) out <- owin(rasta$xrange, rasta$yrange, mask=matrix(status, ny, nx)) return(out) } overlap.owin <- function(A, B) { # compute the area of overlap between two windows # check units if(!compatible.units(unitname(A), unitname(B))) warning("The two windows have incompatible units of length") At <- A$type Bt <- B$type if(At=="rectangle" && Bt=="rectangle") { xmin <- max(A$xrange[1L],B$xrange[1L]) xmax <- min(A$xrange[2L],B$xrange[2L]) if(xmax <= xmin) return(0) ymin <- max(A$yrange[1L],B$yrange[1L]) ymax <- min(A$yrange[2L],B$yrange[2L]) if(ymax <= ymin) return(0) return((xmax-xmin) * (ymax-ymin)) } if((At=="rectangle" && Bt=="polygonal") || (At=="polygonal" && Bt=="rectangle") || (At=="polygonal" && Bt=="polygonal")) { AA <- as.polygonal(A)$bdry BB <- as.polygonal(B)$bdry area <- 0 for(i in seq_along(AA)) for(j in seq_along(BB)) area <- area + overlap.xypolygon(AA[[i]], BB[[j]]) # small negative numbers can occur due to numerical error return(max(0, area)) } if(At=="mask") { # count pixels in A that belong to B pixelarea <- abs(A$xstep * A$ystep) rxy <- rasterxy.mask(A, drop=TRUE) x <- rxy$x y <- rxy$y ok <- inside.owin(x, y, B) return(pixelarea * sum(ok)) } if(Bt== "mask") { # count pixels in B that belong to A pixelarea <- abs(B$xstep * B$ystep) rxy <- rasterxy.mask(B, drop=TRUE) x <- rxy$x y <- rxy$y ok <- inside.owin(x, y, A) return(pixelarea * sum(ok)) } stop("Internal error") } # # subset operator for window # "[.owin" <- function(x, i, ...) { if(!missing(i) && !is.null(i)) { if(is.im(i) && i$type == "logical") { # convert to window i <- as.owin(eval.im(ifelse1NA(i))) } else stopifnot(is.owin(i)) x <- intersect.owin(x, i, fatal=FALSE) } return(x) } # # # Intersection and union of windows # # intersect.owin <- function(..., fatal=TRUE, p) { argh <- list(...) ## p is a list of arguments to polyclip::polyclip if(missing(p) || is.null(p)) p <- list() ## handle 'solist' objects argh <- expandSpecialLists(argh, "solist") rasterinfo <- list() if(length(argh) > 0) { # explicit arguments controlling raster info israster <- names(argh) %in% names(formals(as.mask)) if(any(israster)) { rasterinfo <- argh[israster] ## remaining arguments argh <- argh[!israster] } } ## look for window arguments isowin <- sapply(argh, is.owin) if(any(!isowin)) warning("Some arguments were not windows") argh <- argh[isowin] nwin <- length(argh) if(nwin == 0) { warning("No windows were given") return(NULL) } ## at least one window A <- argh[[1L]] if(nwin == 1) return(A) ## at least two windows B <- argh[[2L]] if(nwin > 2) { ## handle union of more than two windows windows <- argh[-c(1,2)] ## determine a common set of parameters for polyclip p <- commonPolyclipArgs(A, B, do.call(boundingbox, windows), p=p) ## absorb all windows into B for(i in seq_along(windows)) B <- do.call(intersect.owin, append(list(B, windows[[i]], p=p), rasterinfo)) } ## There are now only two windows if(is.empty(A)) return(A) if(is.empty(B)) return(B) if(identical(A, B)) return(A) # check units if(!compatible(unitname(A), unitname(B))) warning("The two windows have incompatible units of length") # determine intersection of x and y ranges xr <- intersect.ranges(A$xrange, B$xrange, fatal=fatal) yr <- intersect.ranges(A$yrange, B$yrange, fatal=fatal) if(!fatal && (is.null(xr) || is.null(yr))) return(NULL) C <- owin(xr, yr, unitname=unitname(A)) if(is.empty(A) || is.empty(B)) return(emptywindow(C)) # Determine type of intersection Arect <- is.rectangle(A) Brect <- is.rectangle(B) # Apoly <- is.polygonal(A) # Bpoly <- is.polygonal(B) Amask <- is.mask(A) Bmask <- is.mask(B) # Rectangular case if(Arect && Brect) return(C) if(!Amask && !Bmask) { ####### Result is polygonal ############ a <- lapply(as.polygonal(A)$bdry, reverse.xypolygon) b <- lapply(as.polygonal(B)$bdry, reverse.xypolygon) ab <- do.call(polyclip::polyclip, append(list(a, b, "intersection", fillA="nonzero", fillB="nonzero"), p)) if(length(ab)==0) return(emptywindow(C)) # ensure correct polarity totarea <- sum(unlist(lapply(ab, Area.xypolygon))) if(totarea < 0) ab <- lapply(ab, reverse.xypolygon) AB <- owin(poly=ab, check=FALSE, unitname=unitname(A)) AB <- rescue.rectangle(AB) return(AB) } ######### Result is a mask ############## # Restrict domain where possible if(Arect) A <- C if(Brect) B <- C if(Amask) A <- trim.mask(A, C) if(Bmask) B <- trim.mask(B, C) # Did the user specify the pixel raster? if(length(rasterinfo) > 0) { # convert to masks with specified parameters, and intersect if(Amask) { A <- do.call(as.mask, append(list(A), rasterinfo)) return(restrict.mask(A, B)) } else { B <- do.call(as.mask, append(list(B), rasterinfo)) return(restrict.mask(B, A)) } } # One mask and one rectangle? if(Arect && Bmask) return(B) if(Amask && Brect) return(A) # One mask and one polygon? if(Amask && !Bmask) return(restrict.mask(A, B)) if(!Amask && Bmask) return(restrict.mask(B, A)) # Two existing masks? if(Amask && Bmask) { # choose the finer one if(A$xstep <= B$xstep) return(restrict.mask(A, B)) else return(restrict.mask(B, A)) } # No existing masks. No clipping applied so far. # Convert one window to a mask with default pixel raster, and intersect. if(Arect) { A <- as.mask(A) return(restrict.mask(A, B)) } else { B <- as.mask(B) return(restrict.mask(B, A)) } } union.owin <- function(..., p) { argh <- list(...) ## weed out NULL arguments argh <- argh[!sapply(argh, is.null)] ## p is a list of arguments to polyclip::polyclip if(missing(p) || is.null(p)) p <- list() ## handle 'solist' objects argh <- expandSpecialLists(argh, "solist") rasterinfo <- list() if(length(argh) > 0) { ## arguments controlling raster info israster <- names(argh) %in% names(formals(as.mask)) if(any(israster)) { rasterinfo <- argh[israster] ## remaining arguments argh <- argh[!israster] } } ## look for window arguments isowin <- sapply(argh, is.owin) if(any(!isowin)) warning("Some arguments were not windows") argh <- argh[isowin] ## nwin <- length(argh) if(nwin == 0) { warning("No windows were given") return(NULL) } ## find non-empty ones if(any(isemp <- sapply(argh, is.empty))) argh <- argh[!isemp] nwin <- length(argh) if(nwin == 0) { warning("All windows were empty") return(NULL) } ## at least one window A <- argh[[1L]] if(nwin == 1) return(A) ## more than two windows if(nwin > 2) { ## check if we need polyclip somepoly <- !all(sapply(argh, is.mask)) if(somepoly) { ## determine a common set of parameters for polyclip p <- commonPolyclipArgs(do.call(boundingbox, argh), p=p) ## apply these parameters now to avoid numerical errors argh <- applyPolyclipArgs(argh, p=p) A <- argh[[1L]] } ## absorb all windows into A without rescaling nullp <- list(eps=1, x0=0, y0=0) for(i in 2:nwin) A <- do.call(union.owin, append(list(A, argh[[i]], p=nullp), rasterinfo)) if(somepoly) { ## undo rescaling A <- reversePolyclipArgs(A, p=p) } return(A) } ## Exactly two windows B <- argh[[2L]] if(identical(A, B)) return(A) ## check units if(!compatible(unitname(A), unitname(B))) warning("The two windows have incompatible units of length") ## Determine type of intersection ## Arect <- is.rectangle(A) ## Brect <- is.rectangle(B) ## Apoly <- is.polygonal(A) ## Bpoly <- is.polygonal(B) Amask <- is.mask(A) Bmask <- is.mask(B) ## Create a rectangle to contain the result C <- owin(range(A$xrange, B$xrange), range(A$yrange, B$yrange), unitname=unitname(A)) if(!Amask && !Bmask) { ####### Result is polygonal (or rectangular) ############ a <- lapply(as.polygonal(A)$bdry, reverse.xypolygon) b <- lapply(as.polygonal(B)$bdry, reverse.xypolygon) ab <- do.call(polyclip::polyclip, append(list(a, b, "union", fillA="nonzero", fillB="nonzero"), p)) if(length(ab) == 0) return(emptywindow(C)) ## ensure correct polarity totarea <- sum(unlist(lapply(ab, Area.xypolygon))) if(totarea < 0) ab <- lapply(ab, reverse.xypolygon) AB <- owin(poly=ab, check=FALSE, unitname=unitname(A)) AB <- rescue.rectangle(AB) return(AB) } ####### Result is a mask ############ ## Determine pixel raster parameters if(length(rasterinfo) == 0) { rasterinfo <- if(Amask) list(xy=list(x=as.numeric(prolongseq(A$xcol, C$xrange)), y=as.numeric(prolongseq(A$yrow, C$yrange)))) else if(Bmask) list(xy=list(x=as.numeric(prolongseq(B$xcol, C$xrange)), y=as.numeric(prolongseq(B$yrow, C$yrange)))) else list() } ## Convert C to mask C <- do.call(as.mask, append(list(w=C), rasterinfo)) rxy <- rasterxy.mask(C) x <- rxy$x y <- rxy$y ok <- inside.owin(x, y, A) | inside.owin(x, y, B) if(all(ok)) { ## result is a rectangle C <- as.rectangle(C) } else { ## result is a mask C$m[] <- ok } return(C) } setminus.owin <- function(A, B, ..., p) { if(is.null(B)) return(A) verifyclass(B, "owin") if(is.null(A)) return(emptywindow(Frame(B))) verifyclass(A, "owin") if(is.empty(A) || is.empty(B)) return(A) if(identical(A, B)) return(emptywindow(Frame(A))) ## p is a list of arguments to polyclip::polyclip if(missing(p) || is.null(p)) p <- list() ## check units if(!compatible(unitname(A), unitname(B))) warning("The two windows have incompatible units of length") ## Determine type of arguments Arect <- is.rectangle(A) Brect <- is.rectangle(B) ## Apoly <- is.polygonal(A) ## Bpoly <- is.polygonal(B) Amask <- is.mask(A) Bmask <- is.mask(B) ## Case where A and B are both rectangular if(Arect && Brect) { if(is.subset.owin(A, B)) return(emptywindow(B)) C <- intersect.owin(A, B, fatal=FALSE) if(is.null(C) || is.empty(C)) return(A) return(complement.owin(C, A)) } ## Polygonal case if(!Amask && !Bmask) { ####### Result is polygonal ############ a <- lapply(as.polygonal(A)$bdry, reverse.xypolygon) b <- lapply(as.polygonal(B)$bdry, reverse.xypolygon) ab <- do.call(polyclip::polyclip, append(list(a, b, "minus", fillA="nonzero", fillB="nonzero"), p)) if(length(ab) == 0) return(emptywindow(B)) ## ensure correct polarity totarea <- sum(unlist(lapply(ab, Area.xypolygon))) if(totarea < 0) ab <- lapply(ab, reverse.xypolygon) AB <- owin(poly=ab, check=FALSE, unitname=unitname(A)) AB <- rescue.rectangle(AB) return(AB) } ####### Result is a mask ############ ## Determine pixel raster parameters rasterinfo <- if((length(list(...)) > 0)) list(...) else if(Amask) list(xy=list(x=A$xcol, y=A$yrow)) else if(Bmask) list(xy=list(x=B$xcol, y=B$yrow)) else list() ## Convert A to mask AB <- do.call(as.mask, append(list(w=A), rasterinfo)) rxy <- rasterxy.mask(AB) x <- rxy$x y <- rxy$y ok <- inside.owin(x, y, A) & !inside.owin(x, y, B) if(!all(ok)) AB$m[] <- ok else AB <- rescue.rectangle(AB) return(AB) } ## auxiliary functions commonPolyclipArgs <- function(..., p=NULL) { # compute a common resolution for polyclip operations # on several windows if(!is.null(p) && !is.null(p$eps) && !is.null(p$x0) && !is.null(p$y0)) return(p) bb <- boundingbox(...) xr <- bb$xrange yr <- bb$yrange eps <- p$eps %orifnull% max(diff(xr), diff(yr))/(2^31) x0 <- p$x0 %orifnull% mean(xr) y0 <- p$y0 %orifnull% mean(yr) return(list(eps=eps, x0=x0, y0=y0)) } applyPolyclipArgs <- function(x, p=NULL) { if(is.null(p)) return(x) y <- lapply(x, shift, vec=-c(p$x0, p$y0)) z <- lapply(y, scalardilate, f=1/p$eps) return(z) } reversePolyclipArgs <- function(x, p=NULL) { if(is.null(p)) return(x) y <- scalardilate(x, f=p$eps) z <- shift(y, vec=c(p$x0, p$y0)) return(z) } trim.mask <- function(M, R, tolerant=TRUE) { ## M is a mask, ## R is a rectangle ## Ensure R is a subset of bounding rectangle of M R <- owin(intersect.ranges(M$xrange, R$xrange), intersect.ranges(M$yrange, R$yrange)) ## Deal with very thin rectangles if(tolerant) { R$xrange <- adjustthinrange(R$xrange, M$xstep, M$xrange) R$yrange <- adjustthinrange(R$yrange, M$ystep, M$yrange) } ## Extract subset of image grid yrowok <- inside.range(M$yrow, R$yrange) xcolok <- inside.range(M$xcol, R$xrange) if((ny <- sum(yrowok)) == 0 || (nx <- sum(xcolok)) == 0) return(emptywindow(R)) Z <- M Z$xrange <- R$xrange Z$yrange <- R$yrange Z$yrow <- M$yrow[yrowok] Z$xcol <- M$xcol[xcolok] Z$m <- M$m[yrowok, xcolok] if(ny < 2 || nx < 2) Z$m <- matrix(Z$m, nrow=ny, ncol=nx) Z$dim <- dim(Z$m) return(Z) } restrict.mask <- function(M, W) { ## M is a mask, W is any window stopifnot(is.mask(M)) stopifnot(inherits(W, "owin")) if(is.rectangle(W)) return(trim.mask(M, W)) M <- trim.mask(M, as.rectangle(W)) ## Determine which pixels of M are inside W rxy <- rasterxy.mask(M, drop=TRUE) x <- rxy$x y <- rxy$y ok <- inside.owin(x, y, W) Mm <- M$m Mm[Mm] <- ok M$m <- Mm return(M) } # SUBSUMED IN rmhexpand.R # expand.owin <- function(W, f=1) { # # # expand bounding box of 'win' # # by factor 'f' in **area** # if(f <= 0) # stop("f must be > 0") # if(f == 1) # return(W) # bb <- boundingbox(W) # xr <- bb$xrange # yr <- bb$yrange # fff <- (sqrt(f) - 1)/2 # Wexp <- owin(xr + fff * c(-1,1) * diff(xr), # yr + fff * c(-1,1) * diff(yr), # unitname=unitname(W)) # return(Wexp) #} trim.rectangle <- function(W, xmargin=0, ymargin=xmargin) { if(!is.rectangle(W)) stop("Internal error: tried to trim margin off non-rectangular window") xmargin <- ensure2vector(xmargin) ymargin <- ensure2vector(ymargin) if(any(xmargin < 0) || any(ymargin < 0)) stop("values of xmargin, ymargin must be nonnegative") if(sum(xmargin) > diff(W$xrange)) stop("window is too small to cut off margins of the width specified") if(sum(ymargin) > diff(W$yrange)) stop("window is too small to cut off margins of the height specified") owin(W$xrange + c(1,-1) * xmargin, W$yrange + c(1,-1) * ymargin, unitname=unitname(W)) } grow.rectangle <- function(W, xmargin=0, ymargin=xmargin, fraction=NULL) { if(!is.null(fraction)) { fraction <- ensure2vector(fraction) if(any(fraction < 0)) stop("fraction must be non-negative") if(missing(xmargin)) xmargin <- fraction[1L] * diff(W$xrange) if(missing(ymargin)) ymargin <- fraction[2L] * diff(W$yrange) } xmargin <- ensure2vector(xmargin) ymargin <- ensure2vector(ymargin) if(any(xmargin < 0) || any(ymargin < 0)) stop("values of xmargin, ymargin must be nonnegative") owin(W$xrange + c(-1,1) * xmargin, W$yrange + c(-1,1) * ymargin, unitname=unitname(W)) } grow.mask <- function(M, xmargin=0, ymargin=xmargin) { stopifnot(is.mask(M)) m <- as.matrix(M) Rplus <- grow.rectangle(as.rectangle(M), xmargin, ymargin) ## extend the raster xcolplus <- prolongseq(M$xcol, Rplus$xrange) yrowplus <- prolongseq(M$yrow, Rplus$yrange) mplus <- matrix(FALSE, length(yrowplus), length(xcolplus)) ## pad out the mask entries nleft <- attr(xcolplus, "nleft") nright <- attr(xcolplus, "nright") nbot <- attr(yrowplus, "nleft") ntop <- attr(yrowplus, "nright") mplus[ (nbot+1):(length(yrowplus)-ntop), (nleft+1):(length(xcolplus)-nright) ] <- m ## pack up result <- owin(xrange=Rplus$xrange, yrange=Rplus$yrange, xcol=as.numeric(xcolplus), yrow=as.numeric(yrowplus), mask=mplus, unitname=unitname(M)) return(result) } bdry.mask <- function(W) { verifyclass(W, "owin") W <- as.mask(W) m <- W$m nr <- nrow(m) nc <- ncol(m) if(!spatstat.options('Cbdrymask')) { ## old interpreted code b <- (m != rbind(FALSE, m[-nr, ])) b <- b | (m != rbind(m[-1, ], FALSE)) b <- b | (m != cbind(FALSE, m[, -nc])) b <- b | (m != cbind(m[, -1], FALSE)) } else { b <- integer(nr * nc) z <- .C("bdrymask", nx = as.integer(nc), ny = as.integer(nr), m = as.integer(m), b = as.integer(b), PACKAGE = "spatstat") b <- matrix(as.logical(z$b), nr, nc) } W$m <- b return(W) } nvertices <- function(x, ...) { UseMethod("nvertices") } nvertices.default <- function(x, ...) { v <- vertices(x) vx <- v$x n <- if(is.null(vx)) NA else length(vx) return(n) } nvertices.owin <- function(x, ...) { if(is.empty(x)) return(0) n <- switch(x$type, rectangle=4, polygonal=sum(lengths(lapply(x$bdry, getElement, name="x"))), mask=sum(bdry.mask(x)$m)) return(n) } vertices <- function(w) { UseMethod("vertices") } vertices.owin <- function(w) { verifyclass(w, "owin") if(is.empty(w)) return(NULL) switch(w$type, rectangle={ xr <- w$xrange yr <- w$yrange vert <- list(x=xr[c(1,2,2,1)], y=yr[c(1,1,2,2)]) }, polygonal={ vert <- do.call(concatxy,w$bdry) }, mask={ bm <- bdry.mask(w)$m rxy <- rasterxy.mask(w) xx <- rxy$x yy <- rxy$y vert <- list(x=as.vector(xx[bm]), y=as.vector(yy[bm])) }) return(vert) } diameter <- function(x) { UseMethod("diameter") } diameter.owin <- function(x) { w <- as.owin(x) if(is.empty(w)) return(NULL) vert <- vertices(w) if(length(vert$x) > 3) { # extract convex hull h <- with(vert, chull(x, y)) vert <- with(vert, list(x=x[h], y=y[h])) } d <- pairdist(vert, squared=TRUE) return(sqrt(max(d))) } ## radius of inscribed circle inradius <- function(W) { stopifnot(is.owin(W)) if(W$type == "rectangle") diameter(W)/2 else max(distmap(W, invert=TRUE)) } incircle <- function(W) { # computes the largest circle contained in W verifyclass(W, "owin") if(is.empty(W)) return(NULL) if(is.rectangle(W)) { xr <- W$xrange yr <- W$yrange x0 <- mean(xr) y0 <- mean(yr) radius <- min(diff(xr), diff(yr))/2 return(list(x=x0, y=y0, r=radius)) } # compute distance to boundary D <- distmap(W, invert=TRUE) D <- D[W, drop=FALSE] # find maximum distance v <- D$v ok <- !is.na(v) Dvalues <- as.vector(v[ok]) Dmax <- max(Dvalues) # find location of maximum locn <- which.max(Dvalues) locrow <- as.vector(row(v)[ok])[locn] loccol <- as.vector(col(v)[ok])[locn] x0 <- D$xcol[loccol] y0 <- D$yrow[locrow] if(is.mask(W)) { # radius could be one pixel diameter shorter than Dmax Dpixel <- sqrt(D$xstep^2 + D$ystep^2) radius <- max(0, Dmax - Dpixel) } else radius <- Dmax return(list(x=x0, y=y0, r=radius)) } inpoint <- function(W) { # selects a point that is always inside the window. verifyclass(W, "owin") if(is.empty(W)) return(NULL) if(is.rectangle(W)) return(c(mean(W$xrange), mean(W$yrange))) if(is.polygonal(W)) { xy <- centroid.owin(W) if(inside.owin(xy$x, xy$y, W)) return(xy) } W <- as.mask(W) Mm <- W$m Mrow <- as.vector(row(Mm)[Mm]) Mcol <- as.vector(col(Mm)[Mm]) selectmiddle <- function(x) { x[ceiling(length(x)/2)] } midcol <- selectmiddle(Mcol) midrow <- selectmiddle(Mrow[Mcol==midcol]) x <- W$xcol[midcol] y <- W$yrow[midrow] return(c(x,y)) } simplify.owin <- function(W, dmin) { verifyclass(W, "owin") if(is.empty(W)) return(W) W <- as.polygonal(W) W$bdry <- lapply(W$bdry, simplify.xypolygon, dmin=dmin) return(W) } is.convex <- function(x) { verifyclass(x, "owin") if(is.empty(x)) return(TRUE) switch(x$type, rectangle={return(TRUE)}, polygonal={ b <- x$bdry if(length(b) > 1) return(FALSE) b <- b[[1L]] xx <- b$x yy <- b$y ch <- chull(xx,yy) return(length(ch) == length(xx)) }, mask={ v <- vertices(x) v <- as.ppp(v, W=as.rectangle(x)) ch <- convexhull.xy(v) edg <- edges(ch) edgedist <- nncross(v, edg, what="dist") pixdiam <- sqrt(x$xstep^2 + x$ystep^2) return(all(edgedist <= pixdiam)) }) return(as.logical(NA)) } convexhull <- function(x) { if(inherits(x, "owin")) v <- vertices(x) else if(inherits(x, "psp")) v <- endpoints.psp(x) else if(inherits(x, "ppp")) v <- x else { x <- as.owin(x) v <- vertices(x) } b <- as.rectangle(x) if(is.empty(x)) return(emptywindow(b)) ch <- convexhull.xy(v) out <- rebound.owin(ch, b) return(out) } is.empty <- function(x) { UseMethod("is.empty") } is.empty.default <- function(x) { length(x) == 0 } is.empty.owin <- function(x) { switch(x$type, rectangle=return(FALSE), polygonal=return(length(x$bdry) == 0), mask=return(!any(x$m))) return(NA) } emptywindow <- function(w) { w <- as.owin(w) out <- owin(w$xrange, w$yrange, poly=list(), unitname=unitname(w)) return(out) } discs <- function(centres, radii=marks(centres)/2, ..., separate=FALSE, mask=FALSE, trim=TRUE, delta=NULL, npoly=NULL) { stopifnot(is.ppp(centres)) n <- npoints(centres) if(n == 0) return(emptywindow(Frame(centres))) check.nvector(radii, npoints(centres), oneok=TRUE) stopifnot(all(radii > 0)) if(sameradius <- (length(radii) == 1)) radii <- rep(radii, npoints(centres)) if(!separate && mask) { #' compute pixel approximation M <- as.mask(Window(centres), ...) z <- .C("discs2grid", nx = as.integer(M$dim[2L]), x0 = as.double(M$xcol[1L]), xstep = as.double(M$xstep), ny = as.integer(M$dim[1L]), y0 = as.double(M$yrow[1L]), ystep = as.double(M$ystep), nd = as.integer(n), xd = as.double(centres$x), yd = as.double(centres$y), rd = as.double(radii), out = as.integer(integer(prod(M$dim))), PACKAGE = "spatstat") M$m[] <- as.logical(z$out) return(M) } #' construct a list of discs D <- list() if(!sameradius && length(unique(radii)) > 1) { if(is.null(delta) && is.null(npoly)) { ra <- range(radii) rr <- ra[2L]/ra[1L] mm <- ceiling(128/rr) mm <- max(16, mm) ## equals 16 unless ra[2]/ra[1] < 8 delta <- 2 * pi * ra[1L]/mm } for(i in 1:n) D[[i]] <- disc(centre=centres[i], radius=radii[i], delta=delta, npoly=npoly) } else { #' congruent discs -- use 'shift' W0 <- disc(centre=c(0,0), radius=radii[1L], delta=delta, npoly=npoly) for(i in 1:n) D[[i]] <- shift(W0, vec=centres[i]) } D <- as.solist(D) #' return list of discs? if(separate) return(D) #' return union of discs W <- union.owin(D) if(trim) W <- intersect.owin(W, Window(centres)) return(W) } harmonise.owin <- harmonize.owin <- function(...) { argz <- list(...) wins <- solapply(argz, as.owin) if(length(wins) < 2L) return(wins) ismask <- sapply(wins, is.mask) if(!any(ismask)) return(wins) comgrid <- do.call(commonGrid, lapply(argz, as.owin)) result <- solapply(argz, "[", i=comgrid, drop=FALSE) return(result) } spatstat/R/funxy.R0000644000176200001440000000605113115271075013616 0ustar liggesusers# # funxy.R # # Class of functions of x,y location with a spatial domain # # $Revision: 1.14 $ $Date: 2017/06/05 10:31:58 $ # spatstat.xy.coords <- function(x,y) { if(missing(y) || is.null(y)) { xy <- if(is.ppp(x) || is.lpp(x)) coords(x) else if(checkfields(x, c("x", "y"))) x else stop("Argument y is missing", call.=FALSE) x <- xy$x y <- xy$y } xy.coords(x,y)[c("x","y")] } funxy <- function(f, W=NULL) { stopifnot(is.function(f)) stopifnot(is.owin(W)) if(!identical(names(formals(f))[1:2], c("x", "y"))) stop("The first two arguments of f should be named x and y", call.=FALSE) if(is.primitive(f)) stop("Not implemented for primitive functions", call.=FALSE) ## copy 'f' including formals, environment, attributes h <- f ## make new function body: ## paste body of 'f' into last line of 'spatstat.xy.coords' b <- body(spatstat.xy.coords) b[[length(b)]] <- body(f) ## transplant the body body(h) <- b ## reinstate attributes attributes(h) <- attributes(f) ## stamp it class(h) <- c("funxy", class(h)) attr(h, "W") <- W attr(h, "f") <- f return(h) } print.funxy <- function(x, ...) { nama <- names(formals(x)) splat(paste0("function", paren(paste(nama,collapse=","))), "of class", sQuote("funxy")) print(as.owin(x)) splat("\nOriginal function definition:") print(attr(x, "f")) } summary.funxy <- function(object, ...) { print(object, ...) } as.owin.funxy <- function(W, ..., fatal=TRUE) { W <- attr(W, "W") as.owin(W, ..., fatal=fatal) } domain.funxy <- Window.funxy <- function(X, ...) { as.owin(X) } # Note that 'distfun' (and other classes inheriting from funxy) # has a method for as.owin that takes precedence over as.owin.funxy # and this will affect the behaviour of the following plot methods # because 'distfun' does not have its own plot method. plot.funxy <- function(x, ...) { xname <- short.deparse(substitute(x)) W <- as.owin(x) do.call(do.as.im, resolve.defaults(list(x, action="plot"), list(...), list(main=xname, W=W))) invisible(NULL) } contour.funxy <- function(x, ...) { xname <- deparse(substitute(x)) W <- as.owin(x) do.call(do.as.im, resolve.defaults(list(x, action="contour"), list(...), list(main=xname, W=W))) invisible(NULL) } persp.funxy <- function(x, ...) { xname <- deparse(substitute(x)) W <- as.rectangle(as.owin(x)) do.call(do.as.im, resolve.defaults(list(x, action="persp"), list(...), list(main=xname, W=W))) invisible(NULL) } hist.funxy <- function(x, ..., xname) { if(missing(xname) || is.null(xname)) xname <- short.deparse(substitute(x)) a <- do.call.matched(as.im, list(X=x, ...), c("X", "W", "dimyx", "eps", "xy", "na.replace", "strict"), sieve=TRUE) Z <- a$result do.call(hist.im, append(list(x=Z, xname=xname), a$otherargs)) } spatstat/R/subfits.R0000755000176200001440000004275713115271120014133 0ustar liggesusers# # # $Revision: 1.48 $ $Date: 2016/04/25 02:34:40 $ # # subfits.new <- local({ subfits.new <- function(object, what="models", verbose=FALSE) { stopifnot(inherits(object, "mppm")) what <- match.arg(what, c("models", "interactions", "basicmodels")) if(what == "interactions") return(subfits.old(object, what, verbose)) ## extract stuff announce <- if(verbose) Announce else Ignore announce("Extracting stuff...") fitter <- object$Fit$fitter FIT <- object$Fit$FIT trend <- object$trend #%^!ifdef RANDOMEFFECTS random <- object$random #%^!endif info <- object$Info npat <- object$npat Inter <- object$Inter interaction <- Inter$interaction itags <- Inter$itags Vnamelist <- object$Fit$Vnamelist has.design <- info$has.design # has.random <- info$has.random announce("done.\n") ## fitted parameters coefs.full <- coef(object) if(is.null(dim(coefs.full))) { ## fixed effects model: replicate vector to matrix coefs.names <- names(coefs.full) coefs.full <- matrix(coefs.full, byrow=TRUE, nrow=npat, ncol=length(coefs.full), dimnames=list(NULL, coefs.names)) } else { ## random/mixed effects model: coerce to matrix coefs.names <- colnames(coefs.full) coefs.full <- as.matrix(coefs.full) } ## determine which interaction(s) are active on each row announce("Determining active interactions...") active <- active.interactions(object) announce("done.\n") ## exceptions if(any(rowSums(active) > 1)) stop(paste("subfits() is not implemented for models", "in which several interpoint interactions", "are active on the same point pattern")) #%^!ifdef RANDOMEFFECTS if(!is.null(random) && any(variablesinformula(random) %in% itags)) stop(paste("subfits() is not yet implemented for models", "with random effects that involve", "the interpoint interactions")) #%^!endif ## implied coefficients for each active interaction announce("Computing implied coefficients...") implcoef <- list() for(tag in itags) { announce(tag) implcoef[[tag]] <- impliedcoefficients(object, tag) announce(", ") } announce("done.\n") ## Fisher information and vcov fisher <- varcov <- NULL if(what == "models") { announce("Fisher information...") fisher <- vcov(object, what="fisher", err="null") varcov <- try(solve(fisher), silent=TRUE) if(inherits(varcov, "try-error")) varcov <- NULL announce("done.\n") } ## Extract data frame announce("Extracting data...") datadf <- object$datadf rownames <- object$Info$rownames announce("done.\n") ## set up lists for results models <- rep(list(NULL), npat) interactions <- rep(list(NULL), npat) ## interactions announce("Determining interactions...") pstate <- list() for(i in 1:npat) { if(verbose) pstate <- progressreport(i, npat, state=pstate) ## Find relevant interaction acti <- active[i,] nactive <- sum(acti) interi <- if(nactive == 0) Poisson() else interaction[i, acti, drop=TRUE] tagi <- names(interaction)[acti] ## Find relevant coefficients coefs.avail <- coefs.full[i,] names(coefs.avail) <- coefs.names if(nactive == 1) { ic <- implcoef[[tagi]] coefs.implied <- ic[i, ,drop=TRUE] names(coefs.implied) <- colnames(ic) ## overwrite any existing values of coefficients; add new ones. coefs.avail[names(coefs.implied)] <- coefs.implied } ## create fitted interaction with these coefficients vni <- if(nactive > 0) Vnamelist[[tagi]] else character(0) interactions[[i]] <- fii(interi, coefs.avail, vni) } announce("Done!\n") names(interactions) <- rownames ## if(what=="interactions") return(interactions) ## Extract data required to reconstruct complete model fits announce("Extracting more data...") data <- object$data Y <- object$Y Yname <- info$Yname moadf <- object$Fit$moadf fmla <- object$Fit$fmla ## deal with older formats of mppm if(is.null(Yname)) Yname <- info$Xname if(is.null(Y)) Y <- data[ , Yname, drop=TRUE] ## used.cov.names <- info$used.cov.names has.covar <- info$has.covar if(has.covar) { covariates.hf <- data[, used.cov.names, drop=FALSE] dfvar <- used.cov.names %in% names(datadf) } announce("done.\n") ## Construct template for fake ppm object spv <- package_version(versionstring.spatstat()) fake.version <- list(major=spv$major, minor=spv$minor, release=spv$patchlevel, date="$Date: 2016/04/25 02:34:40 $") fake.call <- call("cannot.update", Q=NULL, trend=trend, interaction=NULL, covariates=NULL, correction=object$Info$correction, rbord = object$Info$rbord) fakemodel <- list( method = "mpl", fitter = fitter, coef = coef(object), trend = object$trend, interaction = NULL, fitin = NULL, Q = NULL, maxlogpl = NA, internal = list(glmfit = FIT, glmdata = NULL, Vnames = NULL, fmla = fmla, computed = list()), covariates = NULL, correction = object$Info$correction, rbord = object$Info$rbord, version = fake.version, problems = list(), fisher = fisher, varcov = varcov, call = fake.call, callstring = "cannot.update()", fake = TRUE) class(fakemodel) <- "ppm" ## Loop through point patterns announce("Generating models for each row...") pstate <- list() for(i in 1:npat) { if(verbose) pstate <- progressreport(i, npat, state=pstate) Yi <- Y[[i]] Wi <- if(is.ppp(Yi)) Yi$window else Yi$data$window ## assemble relevant covariate images covariates <- if(has.covar) covariates.hf[i, , drop=TRUE, strip=FALSE] else NULL if(has.covar && has.design) ## Convert each data frame covariate value to an image covariates[dfvar] <- lapply(covariates[dfvar], as.im, W=Wi) ## Extract relevant interaction finte <- interactions[[i]] inte <- finte$interaction if(is.poisson.interact(inte)) inte <- NULL Vnames <- finte$Vnames if(length(Vnames) == 0) Vnames <- NULL ## Construct fake ppm object fakemodel$interaction <- inte fakemodel$fitin <- finte fakemodel$Q <- Yi fakemodel$covariates <- covariates fakemodel$internal$glmdata <- moadf[moadf$id == i, ] fakemodel$internal$Vnames <- Vnames fake.call$Q <- Yi fake.call$covariates <- covariates fakemodel$call <- fake.call fakemodel$callstring <- short.deparse(fake.call) ## store in list models[[i]] <- fakemodel } announce("done.\n") names(models) <- rownames models <- as.anylist(models) return(models) } Announce <- function(...) cat(...) Ignore <- function(...) { NULL } subfits.new }) ## ///////////////////////////////////////////////////// subfits <- subfits.old <- local({ subfits.old <- function(object, what="models", verbose=FALSE) { stopifnot(inherits(object, "mppm")) what <- match.arg(what, c("models","interactions", "basicmodels")) ## extract stuff announce <- if(verbose) Announce else Ignore announce("Extracting stuff...") trend <- object$trend random <- object$random use.gam <- object$Fit$use.gam info <- object$Info npat <- object$npat Inter <- object$Inter interaction <- Inter$interaction itags <- Inter$itags Vnamelist <- object$Fit$Vnamelist has.design <- info$has.design has.random <- info$has.random moadf <- object$Fit$moadf announce("done.\n") ## levels of any factors levelslist <- lapply(as.list(moadf), levelsAsFactor) isfactor <- !sapply(levelslist, is.null) ## fitted parameters coefs.full <- coef(object) if(is.null(dim(coefs.full))) { ## fixed effects model: replicate vector to matrix coefs.names <- names(coefs.full) coefs.full <- matrix(coefs.full, byrow=TRUE, nrow=npat, ncol=length(coefs.full), dimnames=list(NULL, coefs.names)) } else { ## random/mixed effects model: coerce to matrix coefs.names <- colnames(coefs.full) coefs.full <- as.matrix(coefs.full) } ## determine which interaction(s) are active on each row announce("Determining active interactions...") active <- active.interactions(object) announce("done.\n") ## exceptions if(any(rowSums(active) > 1)) stop(paste("subfits() is not implemented for models", "in which several interpoint interactions", "are active on the same point pattern")) #%^!ifdef RANDOMEFFECTS if(!is.null(random) && any(variablesinformula(random) %in% itags)) stop(paste("subfits() is not yet implemented for models", "with random effects that involve", "the interpoint interactions")) #%^!endif ## implied coefficients for each active interaction announce("Computing implied coefficients...") implcoef <- list() for(tag in itags) { announce(tag) implcoef[[tag]] <- impliedcoefficients(object, tag) announce(", ") } announce("done.\n") ## Fisher information and vcov fisher <- varcov <- NULL if(what == "models") { announce("Fisher information...") fisher <- vcov(object, what="fisher", err="null") varcov <- try(solve(fisher), silent=TRUE) if(inherits(varcov, "try-error")) varcov <- NULL announce("done.\n") } ## Extract data frame announce("Extracting data...") datadf <- object$datadf rownames <- object$Info$rownames announce("done.\n") ## set up list for results results <- rep(list(NULL), npat) if(what == "interactions") { announce("Determining interactions...") pstate <- list() for(i in 1:npat) { if(verbose) pstate <- progressreport(i, npat, state=pstate) ## Find relevant interaction acti <- active[i,] nactive <- sum(acti) interi <- if(nactive == 0) Poisson() else interaction[i, acti, drop=TRUE] tagi <- names(interaction)[acti] ## Find relevant coefficients coefs.avail <- coefs.full[i,] names(coefs.avail) <- coefs.names if(nactive == 1) { ic <- implcoef[[tagi]] coefs.implied <- ic[i, ,drop=TRUE] names(coefs.implied) <- colnames(ic) ## overwrite any existing values of coefficients; add new ones. coefs.avail[names(coefs.implied)] <- coefs.implied } ## create fitted interaction with these coefficients vni <- if(nactive > 0) Vnamelist[[tagi]] else character(0) results[[i]] <- fii(interi, coefs.avail, vni) } announce("Done!\n") names(results) <- rownames return(results) } ## Extract data required to reconstruct complete model fits announce("Extracting more data...") data <- object$data Y <- object$Y Yname <- info$Yname ## deal with older formats of mppm if(is.null(Yname)) Yname <- info$Xname if(is.null(Y)) Y <- data[ , Yname, drop=TRUE] ## used.cov.names <- info$used.cov.names has.covar <- info$has.covar if(has.covar) { covariates.hf <- data[, used.cov.names, drop=FALSE] dfvar <- used.cov.names %in% names(datadf) } announce("done.\n") ## Loop through point patterns announce("Looping through rows...") pstate <- list() for(i in 1:npat) { if(verbose) pstate <- progressreport(i, npat, state=pstate) Yi <- Y[[i]] Wi <- if(is.ppp(Yi)) Yi$window else Yi$data$window ## assemble relevant covariate images if(!has.covar) { covariates <- NULL } else { covariates <- covariates.hf[i, , drop=TRUE, strip=FALSE] if(has.design) { ## Convert each data frame covariate value to an image imrowi <- lapply(covariates[dfvar], as.im, W=Wi) ## Problem: constant covariate leads to singular fit ## --------------- Hack: --------------------------- ## Construct fake data by resampling from possible values covar.vals <- lapply(as.list(covariates[dfvar, drop=FALSE]), possible) fake.imrowi <- lapply(covar.vals, scramble, W=Wi, Y=Yi$data) ## insert fake data into covariates covariates[dfvar] <- fake.imrowi ## ------------------ end hack ---------------------------- } ## identify factor-valued spatial covariates spatialfactors <- !dfvar & isfactor[names(covariates)] if(any(spatialfactors)) { ## problem: factor levels may be dropped ## more fakery... spfnames <- names(spatialfactors)[spatialfactors] covariates[spatialfactors] <- lapply(levelslist[spfnames], scramble, W=Wi, Y=Yi$data) } } ## Fit ppm to data for case i only ## using relevant interaction acti <- active[i,] nactive <- sum(acti) if(nactive == 1){ interi <- interaction[i, acti, drop=TRUE] tagi <- names(interaction)[acti] fiti <- PiPiM(Yi, trend, interi, covariates=covariates, allcovar=has.random, use.gam=use.gam, vnamebase=tagi, vnameprefix=tagi) } else { fiti <- PiPiM(Yi, trend, Poisson(), covariates=covariates, allcovar=has.random, use.gam=use.gam) } ## fiti determines which coefficients are required coefi.fitted <- fiti$coef coefnames.wanted <- names(coefi.fitted) ## take the required coefficients from the full mppm fit coefs.avail <- coefs.full[i,] names(coefs.avail) <- coefs.names if(nactive == 1) { ic <- implcoef[[tagi]] coefs.implied <- ic[i, ,drop=TRUE] names(coefs.implied) <- colnames(ic) ## overwrite any existing values of coefficients; add new ones. coefs.avail[names(coefs.implied)] <- coefs.implied } if(!all(coefnames.wanted %in% names(coefs.avail))) stop("Internal error: some fitted coefficients not accessible") coefi.new <- coefs.avail[coefnames.wanted] ## reset coefficients fiti$coef.orig <- coefi.fitted ## (detected by summary.ppm, predict.ppm) fiti$theta <- fiti$coef <- coefi.new fiti$method <- "mppm" ## ... and replace fake data by true data if(has.design) { for(nam in names(imrowi)) { fiti$covariates[[nam]] <- imrowi[[nam]] fiti$internal$glmdata[[nam]] <- data[i, nam, drop=TRUE] } } ## Adjust rank of glm fit object # fiti$internal$glmfit$rank <- FIT$rank fiti$internal$glmfit$rank <- sum(is.finite(fiti$coef)) ## Fisher information and variance-covariance if known ## Extract submatrices for relevant parameters if(!is.null(fisher)) fiti$fisher <- fisher[coefnames.wanted, coefnames.wanted, drop=FALSE] if(!is.null(varcov)) fiti$varcov <- varcov[coefnames.wanted, coefnames.wanted, drop=FALSE] ## store in list results[[i]] <- fiti } announce("done.\n") names(results) <- rownames results <- as.anylist(results) return(results) } PiPiM <- function(Y, trend, inter, covariates, ..., allcovar=FALSE, use.gam=FALSE, vnamebase=c("Interaction", "Interact."), vnameprefix=NULL) { # This ensures that the model is fitted in a unique environment # so that it can be updated later. force(Y) force(trend) force(inter) force(covariates) force(allcovar) force(use.gam) force(vnamebase) force(vnameprefix) feet <- ppm(Y, trend, inter, covariates=covariates, allcovar=allcovar, use.gam=use.gam, forcefit=TRUE, vnamebase=vnamebase, vnameprefix=vnameprefix) return(feet) } possible <- function(z) { if(!is.factor(z)) unique(z) else factor(levels(z), levels=levels(z)) } scramble <- function(vals, W, Y) { W <- as.mask(W) npixels <- prod(W$dim) nvalues <- length(vals) npts <- npoints(Y) ## sample the possible values randomly at the non-data pixels sampled <- sample(vals, npixels, replace=TRUE) Z <- im(sampled, xcol=W$xcol, yrow=W$yrow) ## repeat the possible values cyclically at the data points if(npts >= 1) Z[Y] <- vals[1 + ((1:npts) %% nvalues)] return(Z) } Announce <- function(...) cat(...) Ignore <- function(...) { NULL } subfits.old }) cannot.update <- function(...) { stop("This model cannot be updated") } spatstat/R/clarkevans.R0000755000176200001440000001531413115271075014603 0ustar liggesusers## clarkevans.R ## Clark-Evans statistic and test ## $Revision: 1.17 $ $Date: 2015/10/19 05:03:37 $ clarkevans <- function(X, correction=c("none", "Donnelly", "cdf"), clipregion=NULL) { verifyclass(X, "ppp") W <- X$window # validate correction argument gavecorrection <- !missing(correction) correction <- pickoption("correction", correction, c(none="none", Donnelly="Donnelly", donnelly="Donnelly", guard="guard", cdf="cdf"), multi=TRUE) if(("Donnelly" %in% correction) && (W$type != "rectangle")) { if(gavecorrection) warning("Donnelly correction only available for rectangular windows") correction <- correction[correction != "Donnelly"] } # guard correction applied iff `clipregion' is present isguard <- "guard" %in% correction askguard <- any(isguard) gaveguard <- !is.null(clipregion) if(gaveguard) clipregion <- as.owin(clipregion) if(askguard && !gaveguard) { warning("guard correction not performed; clipregion not specified") correction <- correction[!isguard] } else if(gaveguard && !askguard) correction <- c(correction, "guard") result <- clarkevansCalc(X, correction, clipregion) if(length(result) == 1L) result <- unname(result) return(result) } clarkevans.test <- function(X, ..., correction="none", clipregion=NULL, alternative=c("two.sided", "less", "greater", "clustered", "regular"), nsim=999 ) { Xname <- short.deparse(substitute(X)) miss.nsim <- missing(nsim) verifyclass(X, "ppp") W <- Window(X) nX <- npoints(X) # validate SINGLE correction correction <- pickoption("correction", correction, c(none="none", Donnelly="Donnelly", donnelly="Donnelly", guard="guard", cdf="cdf")) switch(correction, none={ corrblurb <- "No edge correction" }, Donnelly={ if(W$type != "rectangle") stop("Donnelly correction only available for rectangular windows") corrblurb <- "Donnelly correction" }, guard={ if(is.null(clipregion)) stop("clipregion not specified") clipregion <- as.owin(clipregion) corrblurb <- "Guard correction" }, cdf={ corrblurb <- "CDF correction" }) # alternative hypothesis if(missing(alternative) || is.null(alternative)) alternative <- "two.sided" alternative <- pickoption("alternative", alternative, c(two.sided="two.sided", less="less", clustered="less", greater="greater", regular="greater")) altblurb <- switch(alternative, two.sided="two-sided", less="clustered (R < 1)", greater="regular (R > 1)") # compute observed value statistic <- clarkevansCalc(X, correction=correction, clipregion=clipregion, working=TRUE) working <- attr(statistic, "working") # if(correction == "none" && miss.nsim) { # standard Normal p-value SE <- with(working, sqrt(((4-pi)*areaW)/(4 * pi))/npts) Z <- with(working, (Dobs - Dpois)/SE) p.value <- switch(alternative, less=pnorm(Z), greater=1 - pnorm(Z), two.sided= 2*(1-pnorm(abs(Z)))) pvblurb <- "Z-test" } else { # Monte Carlo p-value sims <- numeric(nsim) for(i in 1:nsim) { Xsim <- runifpoint(nX, win=W) sims[i] <- clarkevansCalc(Xsim, correction=correction, clipregion=clipregion) } p.upper <- (1 + sum(sims >= statistic))/(1.0 + nsim) p.lower <- (1 + sum(sims <= statistic))/(1.0 + nsim) p.value <- switch(alternative, less=p.lower, greater=p.upper, two.sided=min(1, 2*min(p.lower, p.upper))) pvblurb <- paste("Monte Carlo test based on", nsim, "simulations of CSR with fixed n") } statistic <- as.numeric(statistic) names(statistic) <- "R" out <- list(statistic=statistic, p.value=p.value, alternative=altblurb, method=c("Clark-Evans test", corrblurb, pvblurb), data.name=Xname) class(out) <- "htest" return(out) } clarkevansCalc <- function(X, correction="none", clipregion=NULL, working=FALSE) { # calculations for Clark-Evans index or test W <- Window(X) areaW <- area(W) npts <- npoints(X) intensity <- npts/areaW # R undefined for empty point pattern if(npts == 0) return(NA) # Dobs = observed mean nearest neighbour distance nndistX <- nndist(X) Dobs <- mean(nndistX) # Dpois = Expected mean nearest neighbour distance for Poisson process Dpois <- 1/(2*sqrt(intensity)) statistic <- NULL if(working) work <- list(areaW=areaW, npts=npts, intensity=intensity, Dobs=Dobs, Dpois=Dpois) # Naive uncorrected value if("none" %in% correction) { Rnaive <- Dobs/Dpois statistic <- c(statistic, naive=Rnaive) } # Donnelly edge correction if("Donnelly" %in% correction) { # Dedge = Edge corrected mean nearest neighbour distance, Donnelly 1978 if(W$type == "rectangle") { perim <- perimeter(W) Dkevin <- Dpois + (0.0514+0.0412/sqrt(npts))*perim/npts Rkevin <- Dobs/Dkevin if(working) work <- append(work, list(perim=perim, Dkevin=Dkevin)) } else Rkevin <- NA statistic <- c(statistic, Donnelly=Rkevin) } # guard area method if("guard" %in% correction && !is.null(clipregion)) { # use nn distances from points inside `clipregion' ok <- inside.owin(X, , clipregion) Dguard <- mean(nndistX[ok]) Rguard <- Dguard/Dpois if(working) work <- append(work, list(Dguard=Dguard)) statistic <- c(statistic, guard=Rguard) } if("cdf" %in% correction) { # compute mean of estimated nearest-neighbour distance distribution G G <- Gest(X) numer <- stieltjes(function(x){x}, G)$km denom <- stieltjes(function(x){rep.int(1, length(x))}, G)$km Dcdf <- numer/denom Rcdf <- Dcdf/Dpois if(working) work <- append(work, list(Dcdf=Dcdf)) statistic <- c(statistic, cdf=Rcdf) } if(working) attr(statistic, "working") <- work return(statistic) } spatstat/R/hypersub.R0000755000176200001440000001422513115271075014313 0ustar liggesusers## ## hypersub.R ## ## ## subset operations for hyperframes ## ## $Revision: 1.25 $ $Date: 2017/02/07 07:35:32 $ ## "[.hyperframe" <- function(x, i, j, drop=FALSE, strip=drop, ...) { x <- unclass(x) if(!missing(i)) { y <- x y$df <- x$df[i, , drop=FALSE] y$ncases <- nrow(y$df) y$hypercolumns <- lapply(x$hypercolumns, "[", i=i) x <- y } if(!missing(j)) { y <- x patsy <- seq_len(y$nvars) names(patsy) <- y$vname jj <- patsy[j] names(jj) <- NULL y$nvars <- length(jj) y$vname <- vname <- x$vname[jj] y$vtype <- vtype <- x$vtype[jj] y$vclass <- x$vclass[jj] if(ncol(x$df) != 0) y$df <- x$df[ , vname[vtype == "dfcolumn"], drop=FALSE] y$hyperatoms <- x$hyperatoms[ vname[ vtype == "hyperatom" ]] y$hypercolumns <- x$hypercolumns[ vname [ vtype == "hypercolumn" ] ] x <- y } if(drop) { nrows <- x$ncases ncols <- x$nvars if(nrows == 1 && ncols == 1 && strip) { ## return a single object y <- switch(as.character(x$vtype), dfcolumn = x$df[, , drop=TRUE], hypercolumn = (x$hypercolumns[[1L]])[[1L]], hyperatom = x$hyperatoms[[1L]]) return(y) } else if(nrows == 1) { ## return the row as a vector or a list if(strip && all(x$vtype == "dfcolumn")) return(x$df[ , , drop=TRUE]) n <- x$nvars y <- vector(mode="list", length=n) names(y) <- nama <- x$vname for(i in seq_len(n)) { nami <- nama[i] y[[i]] <- switch(as.character(x$vtype[i]), dfcolumn = x$df[ , nami, drop=TRUE], hyperatom = x$hyperatoms[[nami]], hypercolumn = (x$hypercolumns[[nami]])[[1L]] ) } return(as.solist(y, demote=TRUE)) } else if(ncols == 1) { ## return a column as an 'anylist'/'solist' or a vector switch(as.character(x$vtype), dfcolumn = { return(x$df[, , drop=TRUE]) }, hypercolumn = { y <- as.solist(x$hypercolumns[[1L]], demote=TRUE) names(y) <- row.names(x$df) return(y) }, hyperatom = { ## replicate it to make a hypercolumn ha <- x$hyperatoms[1L] names(ha) <- NULL hc <- rep.int(ha, x$ncases) hc <- as.solist(hc, demote=TRUE) names(hc) <- row.names(x$df) return(hc) } ) } } class(x) <- c("hyperframe", class(x)) return(x) } "$.hyperframe" <- function(x,name) { m <- match(name, unclass(x)$vname) if(is.na(m)) return(NULL) return(x[, name, drop=TRUE, strip=FALSE]) } "$<-.hyperframe" <- function(x, name, value) { y <- as.list(x) if(is.hyperframe(value)) { if(ncol(value) == 1) { y[name] <- as.list(value) } else { y <- insertinlist(y, name, as.list(value)) } } else { dfcol <- is.atomic(value) && (is.vector(value) || is.factor(value)) if(!dfcol && !is.null(value)) value <- as.list(value) y[[name]] <- value } z <- do.call(hyperframe, append(y, list(row.names=row.names(x), stringsAsFactors=FALSE))) return(z) } "[<-.hyperframe" <- function (x, i, j, value) { sumry <- summary(x) colnam <- sumry$col.names dimx <- sumry$dim igiven <- !missing(i) jgiven <- !missing(j) if(!igiven) i <- seq_len(dimx[1L]) if(!jgiven) j <- seq_len(dimx[2L]) # singlerow <- ((is.integer(i) && length(i) == 1 && i > 0) # || (is.character(i) && length(i) == 1) # || (is.logical(i) && sum(i) == 1)) singlecolumn <- ((is.integer(j) && length(j) == 1 && j > 0) || (is.character(j) && length(j) == 1) || (is.logical(j) && sum(j) == 1)) if(!igiven && jgiven) { # x[, j] <- value if(singlecolumn) { # expecting single hypercolumn if(is.logical(j)) j <- names(x)[j] y <- get("$<-.hyperframe")(x, j, value) } else { # expecting hyperframe xlist <- as.list(x) xlist[j] <- as.list(as.hyperframe(value)) # the above construction accepts all indices including extra entries y <- do.call(hyperframe, append(xlist, list(row.names=row.names(x)))) } } else { ## x[, ] <- value or x[i, ] <- value or x[i,j] <- value ## convert indices to positive integers rowseq <- seq_len(dimx[1L]) colseq <- seq_len(dimx[2L]) names(rowseq) <- row.names(x) names(colseq) <- colnam I <- rowseq[i] J <- colseq[j] ## convert to lists xlist <- as.list(x) hv <- if(is.hyperframe(value)) value else as.hyperframe(as.solist(value, demote=TRUE)) vlist <- as.list(hv) nrowV <- dim(hv)[1L] ncolV <- dim(hv)[2L] if(nrowV != length(I)) { if(nrowV == 1) { ## replicate vlist <- lapply(vlist, rep, times=nrowV) } else stop(paste("Replacement value has wrong number of rows:", nrowV, "should be", length(I)), call.=FALSE) } if(ncolV != length(J)) { if(ncolV == 1) { ## replicate vlist <- rep(vlist, times=ncolV) } else stop(paste("Replacement value has wrong number of columns:", ncolV, "should be", length(J)), call.=FALSE) } ## replace entries for(jj in J) xlist[[jj]][I] <- vlist[[jj]][I] ## put back together y <- do.call(hyperframe, append(xlist, list(row.names=row.names(x)))) } return(y) } split.hyperframe <- local({ split.hyperframe <- function(x, f, drop=FALSE, ...) { y <- data.frame(id=seq_len(nrow(x))) z <- split(y, f, drop=drop) z <- lapply(z, getElement, name="id") out <- lapply(z, indexi, x=x) return(out) } indexi <- function(i, x) x[i,] split.hyperframe }) "split<-.hyperframe" <- function(x, f, drop=FALSE, ..., value) { ix <- split(seq_len(nrow(x)), f, drop = drop, ...) n <- length(value) j <- 0 for (i in ix) { j <- j%%n + 1L x[i, ] <- value[[j]] } x } spatstat/R/compileK.R0000755000176200001440000000761313115271075014220 0ustar liggesusers# compileK # # Function to take a matrix of pairwise distances # and compile a 'K' function in the format required by spatstat. # # $Revision: 1.9 $ $Date: 2017/06/05 10:31:58 $ # ------------------------------------------------------------------- compileK <- function(D, r, weights=NULL, denom=1, check=TRUE, ratio=FALSE, fname="K") { # process r values breaks <- breakpts.from.r(r) rmax <- breaks$max r <- breaks$r # check that D is a symmetric matrix with nonnegative entries if(check) stopifnot(is.matrix(D) && isSymmetric(D) && all(D >= 0)) # ignore the diagonal; throw away any D values greater than rmax ok <- (D <= rmax & D > 0) Dvalues <- D[ok] # # weights? if(!is.null(weights)) { stopifnot(is.matrix(weights) && all(dim(weights)==dim(D))) wvalues <- weights[ok] } else wvalues <- NULL # count the number of D values in each interval (r[k], r[k+1L]] counts <- whist(Dvalues, breaks=breaks$val, weights=wvalues) # cumulative counts: number of D values in [0, r[k]) Kcount <- cumsum(counts) # divide by appropriate denominator Kratio <- Kcount/denom # wrap it up as an 'fv' object for use in spatstat df <- data.frame(r=r, est=Kratio) if(!ratio) { K <- fv(df, "r", quote(K(r)), "est", . ~ r , c(0,rmax), c("r", makefvlabel(NULL, "hat", fname)), c("distance argument r", "estimated %s"), fname=fname) } else { num <- data.frame(r=r, est=Kcount) den <- data.frame(r=r, est=denom) K <- ratfv(num, den, "r", quote(K(r)), "est", . ~ r , c(0,rmax), c("r", makefvlabel(NULL, "hat", fname)), c("distance argument r", "estimated %s"), fname=fname) } return(K) } compilepcf <- function(D, r, weights=NULL, denom=1, check=TRUE, endcorrect=TRUE, ratio=FALSE, ..., fname="g") { # process r values breaks <- breakpts.from.r(r) if(!breaks$even) stop("compilepcf: r values must be evenly spaced", call.=FALSE) r <- breaks$r rmax <- breaks$max # check that D is a symmetric matrix with nonnegative entries if(check) stopifnot(is.matrix(D) && isSymmetric(D) && all(D >= 0)) # ignore the diagonal; throw away any D values greater than rmax ok <- (D <= rmax & D > 0) Dvalues <- D[ok] # # weights? if(!is.null(weights)) { stopifnot(is.matrix(weights) && all(dim(weights)==dim(D))) wvalues <- weights[ok] totwt <- sum(wvalues) normwvalues <- wvalues/totwt } else { nv <- length(Dvalues) normwvalues <- rep.int(1/nv, nv) totwt <- nv } # form kernel estimate rmin <- min(r) rmax <- max(r) nr <- length(r) den <- density(Dvalues, weights=normwvalues, from=rmin, to=rmax, n=nr, ...) gval <- den$y * totwt # normalise gval <- gval/denom # edge effect correction at r = 0 if(endcorrect) { one <- do.call(density, resolve.defaults( list(seq(rmin,rmax,length=512)), list(bw=den$bw, adjust=1), list(from=rmin, to=rmax, n=nr), list(...))) onefun <- approxfun(one$x, one$y, rule=2) gval <- gval /((rmax-rmin) * onefun(den$x)) } # wrap it up as an 'fv' object for use in spatstat df <- data.frame(r=r, est=gval) if(!ratio) { g <- fv(df, "r", quote(g(r)), "est", . ~ r , c(0,rmax), c("r", makefvlabel(NULL, "hat", fname)), c("distance argument r", "estimated %s"), fname=fname) } else { num <- data.frame(r=r, est=gval * denom) den <- data.frame(r=r, est=denom) g <- ratfv(num, den, "r", quote(g(r)), "est", . ~ r , c(0,rmax), c("r", makefvlabel(NULL, "hat", fname)), c("distance argument r", "estimated %s"), fname=fname) } attr(g, "bw") <- den$bw return(g) } spatstat/R/vcov.kppm.R0000755000176200001440000001201113115271120014353 0ustar liggesusers# # vcov.kppm # # vcov method for kppm objects # # Original code: Abdollah Jalilian # # $Revision: 1.10 $ $Date: 2015/07/11 08:19:26 $ # vcov.kppm <- function(object, ..., what=c("vcov", "corr", "fisher", "internals"), fast = NULL, rmax = NULL, eps.rmax = 0.01, verbose = TRUE) { what <- match.arg(what) verifyclass(object, "kppm") fast.given <- !is.null(fast) if(is.null(object$improve)) { ## Normal composite likelihood (poisson) case ## extract composite likelihood results po <- object$po ## ensure it was fitted with quadscheme if(is.null(getglmfit(po))) { warning("Re-fitting model with forcefit=TRUE") po <- update(po, forcefit=TRUE) } ## extract quadrature scheme information Q <- quad.ppm(po) U <- union.quad(Q) nU <- npoints(U) wt <- w.quad(Q) ## compute fitted intensity values lambda <- fitted(po, type="lambda") ## extract covariate values Z <- model.matrix(po) ## evaluate integrand ff <- Z * lambda * wt ## extract pcf g <- pcfmodel(object) ## resolve options for algorithm maxmat <- spatstat.options("maxmatrix") if(!fast.given) { fast <- (nU^2 > maxmat) } else stopifnot(is.logical(fast)) ## attempt to compute large matrix: pair correlation function minus 1 if(!fast) { gminus1 <- there.is.no.try( matrix(g(c(pairdist(U))) - 1, nU, nU) ) } else { if(is.null(rmax)){ diamwin <- diameter(as.owin(U)) fnc <- get("fnc", envir = environment(improve.kppm)) rmax <- if(fnc(diamwin, eps.rmax, g) >= 0) diamwin else uniroot(fnc, lower = 0, upper = diamwin, eps=eps.rmax, g=g)$root } cp <- there.is.no.try( crosspairs(U,U,rmax,what="ijd") ) gminus1 <- if(is.null(cp)) NULL else sparseMatrix(i=cp$i, j=cp$j, x=g(cp$d) - 1, dims=c(nU, nU)) } ## compute quadratic form if(!is.null(gminus1)) { E <- t(ff) %*% gminus1 %*% ff } else { ## split calculation of (gminus1 %*% ff) into blocks nrowperblock <- max(1, floor(maxmat/nU)) nblocks <- ceiling(nU/nrowperblock) g1ff <- NULL if(verbose) { splat("Splitting large matrix calculation into", nblocks, "blocks") pstate <- list() } if(!fast) { for(k in seq_len(nblocks)) { if(verbose) pstate <- progressreport(k, nblocks, state=pstate) istart <- nrowperblock * (k-1) + 1 iend <- min(nrowperblock * k, nU) ii <- istart:iend gm1 <- matrix(g(c(crossdist(U[ii], U))) - 1, iend-istart+1, nU) g1ff <- rbind(g1ff, gm1 %*% ff) } } else { for(k in seq_len(nblocks)) { if(verbose) pstate <- progressreport(k, nblocks, state=pstate) istart <- nrowperblock * (k-1) + 1 iend <- min(nrowperblock * k, nU) ii <- istart:iend cp <- crosspairs(U[ii], U, rmax, what="ijd") gm1 <- sparseMatrix(i=cp$i, j=cp$j, x=g(cp$d) - 1, dims=c(iend-istart+1, nU)) g1ff <- rbind(g1ff, as.matrix(gm1 %*% ff)) } } E <- t(ff) %*% g1ff } ## asymptotic covariance matrix in the Poisson case J <- t(Z) %*% ff J.inv <- try(solve(J)) ## could be singular if(inherits(J.inv, "try-error")) { if(what == "internals") { return(list(ff=ff, J=J, E=E, J.inv=NULL)) } else { return(NULL) } } ## asymptotic covariance matrix in the clustered case vc <- J.inv + J.inv %*% E %*% J.inv } else { ## Case of quasi-likelihood (or other things from improve.kppm) run <- is.null(object$vcov) || (!is.null(fast) && (fast != object$improve$fast.vcov)) if(run){ ## Calculate vcov if it hasn't already been so ## or if option fast differs from fast.vcov args <- object$improve internal <- what=="internals" if(!is.null(fast)) args$fast.vcov <- fast object <- with(args, improve.kppm(object, type = type, rmax = rmax, dimyx = dimyx, fast = fast, vcov = TRUE, fast.vcov = fast.vcov, maxIter = 0, save.internals = internal)) } vc <- object$vcov } ## Convert from Matrix to ordinary matrix: vc <- as.matrix(vc) switch(what, vcov={ return(vc) }, corr={ sd <- sqrt(diag(vc)) co <- vc/outer(sd, sd, "*") return(co) }, fisher={ fish <- try(solve(vc)) if(inherits(fish, "try-error")) fish <- NULL return(fish) }, internals={ return(list(ff=ff, J=J, E=E, J.inv=J.inv, vc=vc)) }) stop(paste("Unrecognised option: what=", what)) } spatstat/R/residuals.mppm.R0000755000176200001440000000566413115271120015413 0ustar liggesusers# # residuals.mppm.R # # computes residuals for fitted multiple point process model # # # $Revision: 1.5 $ $Date: 2015/01/29 06:44:26 $ # residuals.mppm <- function(object, type="raw", ..., fittedvalues = fitted.mppm(object)) { verifyclass(object, "mppm") userfitted <- !missing(fittedvalues) type <- pickoption("type", type, c(inverse="inverse", raw="raw", pearson="pearson", Pearson="pearson")) typenames <- c(inverse="inverse-lambda residuals", raw="raw residuals", pearson="Pearson residuals") typename <- typenames[[type]] # Extract quadrature points and weights Q <- quad.mppm(object) # U <- lapply(Q, union.quad) # quadrature point patterns Z <- unlist(lapply(Q, is.data)) # indicator data/dummy W <- unlist(lapply(Q, w.quad)) # quadrature weights # total number of quadrature points nquadrature <- length(W) # number of quadrature points in each pattern nU <- unlist(lapply(Q, n.quad)) # number of rows of hyperframe npat <- object$npat # attribution of each quadrature point id <- factor(rep(1:npat, nU), levels=1:npat) # Compute fitted conditional intensity at quadrature points if(!is.list(fittedvalues) || length(fittedvalues) != npat) stop(paste(sQuote("fittedvalues"), "should be a list of length", npat, "containing vectors of fitted values")) lambda <- unlist(fittedvalues) # consistency check if(length(lambda) != nquadrature) stop(paste(if(!userfitted) "internal error:" else NULL, "number of fitted values", paren(length(lambda)), "does not match number of quadrature points", paren(nquadrature))) # indicator is 1 if lambda > 0 # (adjusted for numerical behaviour of predict.glm) indicator <- (lambda > .Machine$double.eps) # Evaluate residual measure components discrete <- ifelse(Z, switch(type, raw = 1, inverse = 1/lambda, pearson = 1/sqrt(lambda) ), 0) density <- switch(type, raw = -lambda, inverse = -indicator, pearson = -indicator * sqrt(lambda)) atoms <- as.logical(Z) # All components resdf <- data.frame(discrete=discrete, density=density, atoms=atoms) # Split residual data according to point pattern affiliation splitres <- split(resdf, id) # Associate with quadrature scheme reshf <- hyperframe(R=splitres, Q=Q) # Convert to signed measures answer <- with(reshf, msr(Q, R$discrete[R$atoms], R$density)) # tag answer <- lapply(answer, "attr<-", which="type", value=type) answer <- lapply(answer, "attr<-", which="typename", value=typename) return(as.solist(answer)) } spatstat/R/ewcdf.R0000755000176200001440000000744513151421247013546 0ustar liggesusers# # ewcdf.R # # $Revision: 1.12 $ $Date: 2017/08/30 02:04:46 $ # # With contributions from Kevin Ummel # ewcdf <- function(x, weights=rep(1/length(x), length(x))) { nw <- length(weights) nx <- length(x) if(nw == 0) { weights <- rep(1/nx, nx) } else if(nw == 1) { weights <- rep(weights, nx) } else if(nw != nx) stopifnot(length(x) == length(weights)) # remove NA's together nbg <- is.na(x) x <- x[!nbg] weights <- weights[!nbg] n <- length(x) if (n < 1) stop("'x' must have 1 or more non-missing values") stopifnot(all(weights >= 0)) # sort in increasing order of x value ox <- fave.order(x) x <- x[ox] w <- weights[ox] # find jump locations and match vals <- sort(unique(x)) xmatch <- factor(match(x, vals), levels=seq_along(vals)) # sum weight in each interval wmatch <- tapply(w, xmatch, sum) wmatch[is.na(wmatch)] <- 0 cumwt <- cumsum(wmatch) # make function rval <- approxfun(vals, cumwt, method = "constant", yleft = 0, yright = sum(wmatch), f = 0, ties = "ordered") class(rval) <- c("ewcdf", "ecdf", "stepfun", class(rval)) assign("w", w, envir=environment(rval)) attr(rval, "call") <- sys.call() return(rval) } # Hacked from stats:::print.ecdf print.ewcdf <- function (x, digits = getOption("digits") - 2L, ...) { cat("Weighted empirical CDF \nCall: ") print(attr(x, "call"), ...) env <- environment(x) xx <- get("x", envir=env) ww <- get("w", envir=env) n <- length(xx) i1 <- 1L:min(3L, n) i2 <- if (n >= 4L) max(4L, n - 1L):n else integer() numform <- function(x) paste(formatC(x, digits = digits), collapse = ", ") cat(" x[1:", n, "] = ", numform(xx[i1]), if (n > 3L) ", ", if (n > 5L) " ..., ", numform(xx[i2]), "\n", sep = "") cat(" weights[1:", n, "] = ", numform(ww[i1]), if (n > 3L) ", ", if (n > 5L) " ..., ", numform(ww[i2]), "\n", sep = "") invisible(x) } quantile.ewcdf <- function(x, probs=seq(0,1,0.25), names=TRUE, ..., normalise=TRUE, type=1) { trap.extra.arguments(..., .Context="quantile.ewcdf") if(!(type %in% c(1,2))) stop("Only quantiles of type 1 and 2 are implemented", call.=FALSE) env <- environment(x) xx <- get("x", envir=env) n <- length(xx) Fxx <- get("y", envir=env) maxFxx <- max(Fxx) eps <- 100 * .Machine$double.eps if(normalise) { Fxx <- Fxx/maxFxx maxp <- 1 } else { maxp <- maxFxx } if(any((p.ok <- !is.na(probs)) & (probs/maxp < -eps | probs/maxp > 1 + eps))) { allowed <- if(normalise) "[0,1]" else paste("permitted range", prange(c(0, maxp))) stop(paste("'probs' outside", allowed), call.=FALSE) } if (na.p <- any(!p.ok)) { o.pr <- probs probs <- probs[p.ok] probs <- pmax(0, pmin(maxp, probs)) } np <- length(probs) if (n > 0 && np > 0) { qs <- numeric(np) if(type == 1) { ## right-continuous inverse for(k in 1:np) qs[k] <- xx[min(which(Fxx >= probs[k]))] } else { ## average of left and right continuous for(k in 1:np) { pk <- probs[k] ik <- min(which(Fxx >= probs[k])) qs[k] <- if(Fxx[ik] > pk) (xx[ik] + xx[ik-1L])/2 else xx[ik] } } } else { qs <- rep(NA_real_, np) } if (names && np > 0L) { dig <- max(2L, getOption("digits")) if(normalise) { probnames <- if(np < 100) formatC(100 * probs, format="fg", width=1, digits=dig) else format(100 * probs, trim = TRUE, digits = dig) names(qs) <- paste0(probnames, "%") } else { names(qs) <- if(np < 100) formatC(probs, format="fg", width=1, digits=dig) else format(probs, trim=TRUE, digits=dig) } } if (na.p) { o.pr[p.ok] <- qs names(o.pr) <- rep("", length(o.pr)) names(o.pr)[p.ok] <- names(qs) o.pr } else qs } spatstat/R/pairwise.R0000755000176200001440000000505113115271120014261 0ustar liggesusers# # # pairwise.S # # $Revision: 1.10 $ $Date: 2015/10/21 09:06:57 $ # # Pairwise() create a user-defined pairwise interaction process # [an object of class 'interact'] # # ------------------------------------------------------------------- # Pairwise <- function(pot, name = "user-defined pairwise interaction process", par = NULL, parnames=NULL, printfun) { fop <- names(formals(pot)) if(!identical(all.equal(fop, c("d", "par")), TRUE) && !identical(all.equal(fop, c("d", "tx", "tu", "par")), TRUE)) stop(paste("Formal arguments of pair potential function", sQuote("pot"), "must be either (d, par) or (d, tx, tu, par)")) if(!is.null(parnames)) { stopifnot(is.character(parnames)) if(is.null(par) || length(par) != length(parnames)) stop("par does not match parnames") } if(missing(printfun)) printfun <- function(self) { cat("Potential function:\n") print(self$pot) if(!is.null(parnames <- self$parnames)) { for(i in 1:length(parnames)) { cat(paste(parnames[i], ":\t")) pari <- self$par[[i]] if(is.numeric(pari) && length(pari) == 1) cat(pari, "\n") else print(pari) } } } out <- list( name = name, creator = "Pairwise", family = pairwise.family, pot = pot, par = par, parnames = parnames, init = NULL, update = function(self, ...){ do.call(Pairwise, resolve.defaults(list(...), list(pot=self$pot, name=self$name, par=self$par, parnames=self$parnames, printfun=self$print))) } , print = printfun, version = versionstring.spatstat() ) class(out) <- "interact" return(out) } Pairwise <- intermaker(Pairwise, list(creator="Pairwise", name="user-defined pairwise interaction process", par=formals(Pairwise), parnames=list("the potential", "the name of the interaction", "the list of parameters", "a description of each parameter", "an optional print function"))) spatstat/R/kmrs.R0000755000176200001440000001721613115271075013431 0ustar liggesusers# # kmrs.S # # S code for Kaplan-Meier, Reduced Sample and Hanisch # estimates of a distribution function # from _histograms_ of censored data. # # kaplan.meier() # reduced.sample() # km.rs() # # $Revision: 3.26 $ $Date: 2013/06/27 08:59:16 $ # # The functions in this file produce vectors `km' and `rs' # where km[k] and rs[k] are estimates of F(breaks[k+1]), # i.e. an estimate of the c.d.f. at the RIGHT endpoint of the interval. # "kaplan.meier" <- function(obs, nco, breaks, upperobs=0) { # obs: histogram of all observations : min(T_i,C_i) # nco: histogram of noncensored observations : T_i such that T_i <= C_i # breaks: breakpoints (vector or 'breakpts' object, see breaks.S) # upperobs: number of observations beyond rightmost breakpoint # breaks <- as.breakpts(breaks) n <- length(obs) if(n != length(nco)) stop("lengths of histograms do not match") check.hist.lengths(nco, breaks) # # # reverse cumulative histogram of observations d <- revcumsum(obs) + upperobs # # product integrand s <- ifelseXB(d > 0, 1 - nco/d, 1) # km <- 1 - cumprod(s) # km has length n; km[i] is an estimate of F(r) for r=breaks[i+1] # widths <- diff(breaks$val) lambda <- numeric(n) pos <- (s > 0) lambda[pos] <- -log(s[pos])/widths[pos] # lambda has length n; lambda[i] is an estimate of # the average of \lambda(r) over the interval (breaks[i],breaks[i+1]). # return(list(km=km, lambda=lambda)) } "reduced.sample" <- function(nco, cen, ncc, show=FALSE, uppercen=0) # nco: histogram of noncensored observations: T_i such that T_i <= C_i # cen: histogram of all censoring times: C_i # ncc: histogram of censoring times for noncensored obs: # C_i such that T_i <= C_i # # Then nco[k] = #{i: T_i <= C_i, T_i \in I_k} # cen[k] = #{i: C_i \in I_k} # ncc[k] = #{i: T_i <= C_i, C_i \in I_k}. # # The intervals I_k must span an interval [0,R] beginning at 0. # If this interval did not include all censoring times, # then `uppercen' must be the number of censoring times # that were not counted in 'cen'. { n <- length(nco) if(n != length(cen) || n != length(ncc)) stop("histogram lengths do not match") # # denominator: reverse cumulative histogram of censoring times # denom(r) = #{i : C_i >= r} # We compute # cc[k] = #{i: C_i > breaks[k]} # except that > becomes >= for k=0. # cc <- revcumsum(cen) + uppercen # # # numerator # #{i: T_i <= r <= C_i } # = #{i: T_i <= r, T_i <= C_i} - #{i: C_i < r, T_i <= C_i} # We compute # u[k] = #{i: T_i <= C_i, T_i <= breaks[k+1]} # - #{i: T_i <= C_i, C_i <= breaks[k]} # = #{i: T_i <= C_i, C_i > breaks[k], T_i <= breaks[k+1]} # this ensures that numerator and denominator are # comparable, u[k] <= cc[k] always. # u <- cumsum(nco) - c(0,cumsum(ncc)[1:(n-1)]) rs <- u/cc # # Hence rs[k] = u[k]/cc[k] is an estimator of F(r) # for r = breaks[k+1], i.e. for the right hand end of the interval. # if(!show) return(rs) else return(list(rs=rs, numerator=u, denominator=cc)) } "km.rs" <- function(o, cc, d, breaks) { # o: censored lifetimes min(T_i,C_i) # cc: censoring times C_i # d: censoring indicators 1(T_i <= C_i) # breaks: histogram breakpoints (vector or 'breakpts' object) # breaks <- as.breakpts(breaks) bval <- breaks$val # compile histograms (breakpoints may not span data) obs <- whist( o, breaks=bval) nco <- whist( o[d], breaks=bval) cen <- whist( cc, breaks=bval) ncc <- whist( cc[d], breaks=bval) # number of observations exceeding largest breakpoint upperobs <- attr(obs, "high") uppercen <- attr(cen, "high") # go km <- kaplan.meier(obs, nco, breaks, upperobs=upperobs) rs <- reduced.sample(nco, cen, ncc, uppercen=uppercen) # return(list(rs=rs, km=km$km, hazard=km$lambda, r=breaks$r, breaks=bval)) } "km.rs.opt" <- function(o, cc, d, breaks, KM=TRUE, RS=TRUE) { # o: censored lifetimes min(T_i,C_i) # cc: censoring times C_i # d: censoring indicators 1(T_i <= C_i) # breaks: histogram breakpoints (vector or 'breakpts' object) # breaks <- as.breakpts(breaks) bval <- breaks$val out <- list(r=breaks$r, breaks=bval) if(KM || RS) nco <- whist( o[d], breaks=bval) if(KM) { obs <- whist( o, breaks=bval) upperobs <- attr(obs, "high") km <- kaplan.meier(obs, nco, breaks, upperobs=upperobs) out <- append(list(km=km$km, hazard=km$lambda), out) } if(RS) { cen <- whist( cc, breaks=bval) ncc <- whist( cc[d], breaks=bval) uppercen <- attr(cen, "high") rs <- reduced.sample(nco, cen, ncc, uppercen=uppercen) out <- append(list(rs=rs), out) } return(out) } censtimeCDFest <- function(o, cc, d, breaks, ..., KM=TRUE, RS=TRUE, HAN=TRUE, RAW=TRUE, han.denom=NULL, tt=NULL, pmax=0.9) { # Histogram-based estimation of cumulative distribution function # of lifetimes subject to censoring. # o: censored lifetimes min(T_i,C_i) # cc: censoring times C_i # d: censoring indicators 1(T_i <= C_i) # breaks: histogram breakpoints (vector or 'breakpts' object) # han.denom: denominator (eroded area) for each value of r # tt: uncensored lifetimes T_i, if known breaks <- as.breakpts(breaks) bval <- breaks$val rval <- breaks$r rmax <- breaks$max # Kaplan-Meier and/or Reduced Sample out <- km.rs.opt(o, cc, d, breaks, KM=KM, RS=RS) # convert to data frame out$breaks <- NULL df <- as.data.frame(out) # Raw ecdf of observed lifetimes if available if(RAW && !is.null(tt)) { h <- whist(tt[tt <= rmax], breaks=bval) df <- cbind(df, data.frame(raw=cumsum(h)/length(tt))) } # Hanisch if(HAN) { if(is.null(han.denom)) stop("Internal error: missing denominator for Hanisch estimator") if(length(han.denom) != length(rval)) stop(paste("Internal error:", "length(han.denom) =", length(han.denom), "!=", length(rval), "= length(rvals)")) # uncensored distances x <- o[d] # calculate Hanisch estimator h <- whist(x[x <= rmax], breaks=bval) H <- cumsum(h/han.denom) df <- cbind(df, data.frame(han=H/max(H[is.finite(H)]))) } # determine appropriate plotting range bestest <- if(KM) "km" else if(HAN) "han" else if(RS) "rs" else "raw" alim <- range(df$r[df[[bestest]] <= pmax]) # convert to fv object nama <- c("r", "km", "hazard", "han", "rs", "raw") avail <- c(TRUE, KM, KM, HAN, RS, RAW) iscdf <- c(FALSE, TRUE, FALSE, TRUE, TRUE, TRUE) labl <- c("r", "hat(%s)[km](r)", "lambda(r)", "hat(%s)[han](r)", "hat(%s)[bord](r)", "hat(%s)[raw](r)")[avail] desc <- c("distance argument r", "Kaplan-Meier estimate of %s", "Kaplan-Meier estimate of hazard function lambda(r)", "Hanisch estimate of %s", "border corrected estimate of %s", "uncorrected estimate of %s")[avail] df <- df[, nama[avail]] Z <- fv(df, "r", substitute(CDF(r), NULL), bestest, . ~ r, alim, labl, desc, fname="CDF") fvnames(Z, ".") <- nama[iscdf & avail] return(Z) } # simple interface for students and code development compileCDF <- function(D, B, r, ..., han.denom=NULL, check=TRUE) { han <- !is.null(han.denom) breaks <- breakpts.from.r(r) if(check) { stopifnot(length(D) == length(B) && all(D >= 0) && all(B >= 0)) if(han) stopifnot(length(han.denom) == length(r)) } D <- as.vector(D) B <- as.vector(B) # observed (censored) lifetimes o <- pmin.int(D, B) # censoring indicators d <- (D <= B) # go result <- censtimeCDFest(o, B, d, breaks, HAN=han, han.denom=han.denom, RAW=TRUE, tt=D) result <- rebadge.fv(result, new.fname="compileCDF") } spatstat/R/edgeRipley.R0000755000176200001440000001404313115271075014541 0ustar liggesusers# # edgeRipley.R # # $Revision: 1.16 $ $Date: 2017/06/05 10:31:58 $ # # Ripley isotropic edge correction weights # # edge.Ripley(X, r, W) compute isotropic correction weights # for centres X[i], radii r[i,j], window W # # To estimate the K-function see the idiom in "Kest.S" # ####################################################################### edge.Ripley <- local({ small <- function(x) { abs(x) < .Machine$double.eps } hang <- function(d, r) { nr <- nrow(r) nc <- ncol(r) answer <- matrix(0, nrow=nr, ncol=nc) # replicate d[i] over j index d <- matrix(d, nrow=nr, ncol=nc) hit <- (d < r) answer[hit] <- acos(d[hit]/r[hit]) answer } edge.Ripley <- function(X, r, W=Window(X), method="C", maxweight=100) { # X is a point pattern, or equivalent X <- as.ppp(X, W) W <- X$window switch(W$type, rectangle={}, polygonal={ if(method != "C") stop(paste("Ripley isotropic correction for polygonal windows", "requires method = ", dQuote("C"))) }, mask={ stop(paste("sorry, Ripley isotropic correction", "is not implemented for binary masks")) } ) n <- npoints(X) if(is.matrix(r) && nrow(r) != n) stop("the number of rows of r should match the number of points in X") if(!is.matrix(r)) { if(length(r) != n) stop("length of r is incompatible with the number of points in X") r <- matrix(r, nrow=n) } # Nr <- nrow(r) Nc <- ncol(r) if(Nr * Nc == 0) return(r) ########## x <- X$x y <- X$y stopifnot(method %in% c("interpreted", "C")) switch(method, interpreted = { ######## interpreted R code for rectangular case ######### # perpendicular distance from point to each edge of rectangle # L = left, R = right, D = down, U = up dL <- x - W$xrange[1L] dR <- W$xrange[2L] - x dD <- y - W$yrange[1L] dU <- W$yrange[2L] - y # detect whether any points are corners of the rectangle corner <- (small(dL) + small(dR) + small(dD) + small(dU) >= 2) # angle between (a) perpendicular to edge of rectangle # and (b) line from point to corner of rectangle bLU <- atan2(dU, dL) bLD <- atan2(dD, dL) bRU <- atan2(dU, dR) bRD <- atan2(dD, dR) bUL <- atan2(dL, dU) bUR <- atan2(dR, dU) bDL <- atan2(dL, dD) bDR <- atan2(dR, dD) # The above are all vectors [i] # Now we compute matrices [i,j] # half the angle subtended by the intersection between # the circle of radius r[i,j] centred on point i # and each edge of the rectangle (prolonged to an infinite line) aL <- hang(dL, r) aR <- hang(dR, r) aD <- hang(dD, r) aU <- hang(dU, r) # apply maxima # note: a* are matrices; b** are vectors; # b** are implicitly replicated over j index cL <- pmin.int(aL, bLU) + pmin.int(aL, bLD) cR <- pmin.int(aR, bRU) + pmin.int(aR, bRD) cU <- pmin.int(aU, bUL) + pmin.int(aU, bUR) cD <- pmin.int(aD, bDL) + pmin.int(aD, bDR) # total exterior angle ext <- cL + cR + cU + cD # add pi/2 for corners if(any(corner)) ext[corner,] <- ext[corner,] + pi/2 # OK, now compute weight weight <- 1 / (1 - ext/(2 * pi)) }, C = { ############ C code ############################# switch(W$type, rectangle={ z <- .C("ripleybox", nx=as.integer(n), x=as.double(x), y=as.double(y), rmat=as.double(r), nr=as.integer(Nc), #sic xmin=as.double(W$xrange[1L]), ymin=as.double(W$yrange[1L]), xmax=as.double(W$xrange[2L]), ymax=as.double(W$yrange[2L]), epsilon=as.double(.Machine$double.eps), out=as.double(numeric(Nr * Nc)), PACKAGE = "spatstat") weight <- matrix(z$out, nrow=Nr, ncol=Nc) }, polygonal={ Y <- edges(W) z <- .C("ripleypoly", nc=as.integer(n), xc=as.double(x), yc=as.double(y), nr=as.integer(Nc), rmat=as.double(r), nseg=as.integer(Y$n), x0=as.double(Y$ends$x0), y0=as.double(Y$ends$y0), x1=as.double(Y$ends$x1), y1=as.double(Y$ends$y1), out=as.double(numeric(Nr * Nc)), PACKAGE = "spatstat") angles <- matrix(z$out, nrow = Nr, ncol = Nc) weight <- 2 * pi/angles } ) } ) # eliminate wild values weight <- matrix(pmax.int(0, pmin.int(maxweight, weight)), nrow=Nr, ncol=Nc) return(weight) } edge.Ripley }) rmax.Ripley <- function(W) { W <- as.owin(W) if(is.rectangle(W)) return(boundingradius(W)) if(is.polygonal(W) && length(W$bdry) == 1L) return(boundingradius(W)) ## could have multiple connected components pieces <- tiles(tess(image=connected(W))) answer <- sapply(pieces, boundingradius) return(as.numeric(answer)) } spatstat/R/bermantest.R0000755000176200001440000002603313115271075014616 0ustar liggesusers# # bermantest.R # # Test statistics from Berman (1986) # # $Revision: 1.18 $ $Date: 2016/02/11 10:17:12 $ # # # --------- outdated -------- bermantest <- function(...) { message("bermantest is out of date; use berman.test") # .Deprecated("berman.test", package="spatstat") berman.test(...) } bermantest.ppp <- function(...) { message("bermantest.ppp is out of date; use berman.test.ppp") # .Deprecated("berman.test.ppp", package="spatstat") berman.test.ppp(...) } bermantest.ppm <- function(...) { message("bermantest.ppm is out of date; use berman.test.ppm") # .Deprecated("berman.test.ppm", package="spatstat") berman.test.ppm(...) } bermantest.lpp <- function(...) { message("bermantest.lpp is out of date; use berman.test.lpp") # .Deprecated("berman.test.lpp", package="spatstat") berman.test.lpp(...) } bermantest.lppm <- function(...) { message("bermantest.lppm is out of date; use berman.test.lppm") # .Deprecated("berman.test.lppm", package="spatstat") berman.test.lppm(...) } # --------------------------- berman.test <- function(...) { UseMethod("berman.test") } berman.test.ppp <- function(X, covariate, which=c("Z1", "Z2"), alternative=c("two.sided", "less", "greater"), ...) { Xname <- short.deparse(substitute(X)) covname <- short.deparse(substitute(covariate)) if(is.character(covariate)) covname <- covariate which <- match.arg(which) alternative <- match.arg(alternative) do.call(bermantestEngine, resolve.defaults(list(ppm(X), covariate, which, alternative), list(...), list(modelname="CSR", covname=covname, dataname=Xname))) } berman.test.ppm <- function(model, covariate, which=c("Z1", "Z2"), alternative=c("two.sided", "less", "greater"), ...) { modelname <- short.deparse(substitute(model)) covname <- short.deparse(substitute(covariate)) if(is.character(covariate)) covname <- covariate verifyclass(model, "ppm") which <- match.arg(which) alternative <- match.arg(alternative) if(is.poisson(model) && is.stationary(model)) modelname <- "CSR" do.call(bermantestEngine, resolve.defaults(list(model, covariate, which, alternative), list(...), list(modelname=modelname, covname=covname, dataname=model$Qname))) } berman.test.lpp <- function(X, covariate, which=c("Z1", "Z2"), alternative=c("two.sided", "less", "greater"), ...) { Xname <- short.deparse(substitute(X)) covname <- short.deparse(substitute(covariate)) if(is.character(covariate)) covname <- covariate which <- match.arg(which) alternative <- match.arg(alternative) do.call(bermantestEngine, resolve.defaults(list(lppm(X), covariate, which, alternative), list(...), list(modelname="CSR", covname=covname, dataname=Xname))) } berman.test.lppm <- function(model, covariate, which=c("Z1", "Z2"), alternative=c("two.sided", "less", "greater"), ...) { modelname <- short.deparse(substitute(model)) covname <- short.deparse(substitute(covariate)) if(is.character(covariate)) covname <- covariate verifyclass(model, "lppm") which <- match.arg(which) alternative <- match.arg(alternative) if(is.poisson(model) && is.stationary(model)) modelname <- "CSR" do.call(bermantestEngine, resolve.defaults(list(model, covariate, which, alternative), list(...), list(modelname=modelname, covname=covname, dataname=model$Xname))) } bermantestEngine <- function(model, covariate, which=c("Z1", "Z2"), alternative=c("two.sided", "less", "greater"), ..., modelname, covname, dataname="") { csr <- is.poisson(model) && is.stationary(model) if(missing(modelname)) modelname <- if(csr) "CSR" else short.deparse(substitute(model)) if(missing(covname)) { covname <- short.deparse(substitute(covariate)) if(is.character(covariate)) covname <- covariate } which <- match.arg(which) alternative <- match.arg(alternative) if(!is.poisson(model)) stop("Only implemented for Poisson point process models") # ........... first assemble data ............... fram <- spatialCDFframe(model, covariate, ..., modelname=modelname, covname=covname, dataname=dataname) fvalues <- fram$values info <- fram$info # values of covariate at data points ZX <- fvalues$ZX # transformed to Unif[0,1] under H0 U <- fvalues$U # values of covariate at pixels Zvalues <- fvalues$Zvalues # corresponding pixel areas/weights weights <- fvalues$weights # intensity of model lambda <- fvalues$lambda switch(which, Z1={ #......... Berman Z1 statistic ..................... method <- paste("Berman Z1 test of", if(info$csr) "CSR" else "inhomogeneous Poisson process", "in", info$spacename) # sum of covariate values at data points Sn <- sum(ZX) # predicted mean and variance lamwt <- lambda * weights En <- sum(lamwt) ESn <- sum(lamwt * Zvalues) varSn <- sum(lamwt * Zvalues^2) # working, for plot method working <- list(meanZX=mean(ZX), meanZ=ESn/En) # standardise statistic <- (Sn - ESn)/sqrt(varSn) names(statistic) <- "Z1" p.value <- switch(alternative, two.sided=2 * pnorm(-abs(statistic)), less=pnorm(statistic), greater=pnorm(statistic, lower.tail=FALSE)) altblurb <- switch(alternative, two.sided="two-sided", less="mean value of covariate at random points is less than predicted under model", greater="mean value of covariate at random points is greater than predicted under model") valuename <- paste("covariate", sQuote(paste(covname, collapse="")), "evaluated at points of", sQuote(dataname)) }, Z2={ #......... Berman Z2 statistic ..................... method <- paste("Berman Z2 test of", if(info$csr) "CSR" else "inhomogeneous Poisson process", "in", info$spacename) npts <- length(ZX) statistic <- sqrt(12/npts) * (sum(U) - npts/2) working <- list(meanU=mean(U)) names(statistic) <- "Z2" p.value <- switch(alternative, two.sided=2 * pnorm(-abs(statistic)), less=pnorm(statistic), greater=pnorm(statistic, lower.tail=FALSE)) altblurb <- switch(alternative, two.sided="two-sided", less="covariate values at random points have lower quantiles than predicted under model", greater="covariate values at random points have higher quantiles than predicted under model") valuename <- paste("covariate", sQuote(paste(covname, collapse="")), "evaluated at points of", sQuote(dataname), "\n\t", "and transformed to uniform distribution under", if(info$csr) modelname else sQuote(modelname)) }) out <- list(statistic=statistic, p.value=p.value, alternative=altblurb, method=method, which=which, working=working, data.name=valuename, fram=fram) class(out) <- c("htest", "bermantest") return(out) } plot.bermantest <- function(x, ..., lwd=par("lwd"), col=par("col"), lty=par("lty"), lwd0=lwd, col0=2, lty0=2) { fram <- x$fram if(!is.null(fram)) { values <- fram$values info <- fram$info } else { # old style ks <- x$ks values <- attr(ks, "prep") info <- attr(ks, "info") } work <- x$working op <- options(useFancyQuotes=FALSE) switch(x$which, Z1={ # plot cdf's of Z FZ <- values$FZ xxx <- get("x", environment(FZ)) yyy <- get("y", environment(FZ)) main <- c(x$method, paste("based on distribution of covariate", sQuote(info$covname)), paste("Z1 statistic =", signif(x$statistic, 4)), paste("p-value=", signif(x$p.value, 4))) do.call(plot.default, resolve.defaults( list(x=xxx, y=yyy, type="l"), list(...), list(lwd=lwd0, col=col0, lty=lty0), list(xlab=info$covname, ylab="probability", main=main))) FZX <- values$FZX if(is.null(FZX)) FZX <- ecdf(values$ZX) plot(FZX, add=TRUE, do.points=FALSE, lwd=lwd, col=col, lty=lty) abline(v=work$meanZ, lwd=lwd0,col=col0, lty=lty0, xpd=FALSE) abline(v=work$meanZX, lwd=lwd,col=col, lty=lty, xpd=FALSE) }, Z2={ # plot cdf of U U <- values$U cdfU <- ecdf(U) main <- c(x$method, paste("based on distribution of covariate", sQuote(info$covname)), paste("Z2 statistic =", signif(x$statistic, 4)), paste("p-value=", signif(x$p.value, 4))) do.call(plot.ecdf, resolve.defaults( list(cdfU), list(...), list(do.points=FALSE, asp=1), list(xlim=c(0,1), ylim=c(0,1)), list(lwd=lwd, col=col, lty=lty), list(xlab="U", ylab="relative frequency"), list(main=main))) abline(0,1,lwd=lwd0,col=col0,lty=lty0, xpd=FALSE) abline(v=0.5, lwd=lwd0,col=col0,lty=lty0, xpd=FALSE) abline(v=work$meanU, lwd=lwd,col=col,lty=lty, xpd=FALSE) }) options(op) return(invisible(NULL)) } spatstat/R/subset.R0000644000176200001440000000440313115225157013751 0ustar liggesusers## ## subset.R ## ## Methods for 'subset' ## ## $Revision: 1.5 $ $Date: 2016/03/01 02:07:34 $ subset.ppp <- function(x, subset, select, drop=FALSE, ...) { stopifnot(is.ppp(x)) w <- as.owin(x) y <- as.data.frame(x) r <- if (missing(subset)) { rep_len(TRUE, nrow(y)) } else { e <- substitute(subset) r <- eval(e, y, parent.frame()) if(!is.logical(r)) r <- ppsubset(x, r, "subset", fatal=TRUE) r & !is.na(r) } vars <- if (missing(select)) { TRUE } else { ## create an environment in which column names are mapped to their positions nl <- as.list(seq_along(y)) names(nl) <- names(y) if(length(nl) > 3) { ## multiple columns of marks: add the name 'marks' nl <- append(nl, list(marks=3:length(nl))) } eval(substitute(select), nl, parent.frame()) } ## ensure columns include coordinates nama <- names(y) names(nama) <- nama vars <- union(c("x", "y"), nama[vars]) ## take subset z <- y[r, vars, drop = FALSE] ## reinstate as point pattern out <- as.ppp(z, W=w, check=FALSE) if(drop) out <- out[drop=TRUE] return(out) } subset.pp3 <- subset.lpp <- subset.ppx <- function(x, subset, select, drop=FALSE, ...) { y <- as.data.frame(x) r <- if (missing(subset)) rep_len(TRUE, nrow(y)) else { e <- substitute(subset) r <- eval(e, y, parent.frame()) if(!is.logical(r)) r <- ppsubset(x, r, "subset", fatal=TRUE) r & !is.na(r) } vars <- if (missing(select)) TRUE else { ## create an environment in which column names are mapped to their positions nl <- as.list(seq_along(y)) names(nl) <- names(y) if(!("marks" %in% names(y)) && any(ismark <- (x$ctype == "mark"))) { ## add the symbol 'marks' nl <- append(nl, list(marks=which(ismark))) } eval(substitute(select), nl, parent.frame()) } ## ensure columns include coordinates nama <- names(y) names(nama) <- nama vars <- union(names(coords(x)), nama[vars]) ## take subset z <- y[r, vars, drop = FALSE] ## reinstate as point pattern ctype <- as.character(x$ctype)[match(vars, nama)] out <- ppx(z, domain=x$domain, coord.type=ctype) ## drop unused factor levels if(drop) out <- out[drop=TRUE] ## reinstate class class(out) <- class(x) return(out) } spatstat/R/nnclean.R0000755000176200001440000001732213115271120014060 0ustar liggesusers# # nnclean.R # # Nearest-neighbour clutter removal # # Adapted from statlib file NNclean.q # Authors: Simon Byers and Adrian Raftery # # $Revision: 1.16 $ $Date: 2016/02/11 10:17:12 $ # nnclean <- function(X, k, ...) { UseMethod("nnclean") } nnclean.pp3 <- function(X, k, ..., convergence = 0.001, plothist = FALSE, verbose=TRUE, maxit=50) { # Adapted from statlib file NNclean.q # Authors: Simon Byers and Adrian Raftery # Adapted for spatstat by Adrian Baddeley Xname <- short.deparse(substitute(X)) stopifnot(inherits(X, "pp3")) validposint(k, "nnclean.pp3") kthNND <- nndist(X, k=k) # apply classification algorithm em <- do.call(nncleanEngine, resolve.defaults(list(kthNND, k=k), list(...), list(d=3, tol=convergence, plothist=plothist, verbose=verbose, maxit=maxit, Xname=Xname))) # tack results onto point pattern as marks pp <- em$probs zz <- factor(em$z, levels=c(0,1)) levels(zz) <- c("noise", "feature") mm <- hyperframe(prob=pp, label=zz) marks(X) <- cbind(marks(X), mm) attr(X, "theta") <- em[c("lambda1", "lambda2", "p")] attr(X, "info") <- em[c("d", "niter", "maxit", "converged")] attr(X, "hist") <- em$hist return(X) } nnclean.ppp <- function(X, k, ..., edge.correct = FALSE, wrap = 0.1, convergence = 0.001, plothist = FALSE, verbose=TRUE, maxit=50) { # Adapted from statlib file NNclean.q # Authors: Simon Byers and Adrian Raftery # Adapted for spatstat by Adrian Baddeley Xname <- short.deparse(substitute(X)) validposint(k, "nnclean.ppp") if(!edge.correct) { # compute vector of k-th nearest neighbour distances kthNND <- nndist(X, k=k) } else { # replicate data periodically # (ensuring original points are listed first) Xbox <- X[as.rectangle(X)] Xpand <- periodify(Xbox, ix=c(0,-1,1), iy=c(0,-1,1), check=FALSE) # trim to margin W <- expand.owin(X$window, (1+2*wrap)^2) Xpand <- Xpand[W] kthNND <- nndist(Xpand, k=k) } # apply classification algorithm em <- do.call(nncleanEngine, resolve.defaults(list(kthNND, k=k), list(...), list(d=2, tol=convergence, plothist=plothist, verbose=verbose, maxit=maxit, Xname=Xname))) # extract results pp <- em$probs zz <- em$z zz <- factor(zz, levels=c(0,1)) levels(zz) <- c("noise", "feature") df <- data.frame(class=zz,prob=pp) if(edge.correct) { # trim back to original point pattern df <- df[seq_len(X$n), ] } # tack on marx <- marks(X, dfok=TRUE) if(is.null(marx)) marks(X, dfok=TRUE) <- df else marks(X, dfok=TRUE) <- cbind(df, marx) attr(X, "theta") <- em[c("lambda1", "lambda2", "p")] attr(X, "info") <- em[c("d", "niter", "maxit", "converged")] attr(X, "hist") <- em$hist return(X) } nncleanEngine <- function(kthNND, k, d, ..., tol = 0.001, maxit = 50, plothist = FALSE, lineargs = list(), verbose=TRUE, Xname="X") { ## Adapted from statlib file NNclean.q ## Authors: Simon Byers and Adrian Raftery ## Adapted for spatstat by Adrian Baddeley n <- length(kthNND) ## Undocumented extension by Adrian Baddeley 2014 ## Allow different dimensions in feature and noise. ## d[1] is cluster dimension. d <- ensure2vector(d) alpha.d <- (2. * pi^(d/2.))/(d * gamma(d/2.)) # raise to power d for efficiency kNNDpowd1 <- kthNND^(d[1]) kNNDpowd2 <- kthNND^(d[2]) # # Now use kthNND in E-M algorithm # First set up starting guesses. # # probs <- numeric(n) thresh <- (min(kthNND) + diff(range(kthNND))/3.) high <- (kthNND > thresh) delta <- as.integer(high) p <- 0.5 lambda1 <- k/(alpha.d[1] * mean(kNNDpowd1[!high])) lambda2 <- k/(alpha.d[2] * mean(kNNDpowd2[ high])) loglik.old <- 0. loglik.new <- 1. # # Iterator starts here, # Z <- !kthNND niter <- 0 while(abs(loglik.new - loglik.old)/(1 + abs(loglik.new)) > tol) { if(niter >= maxit) { warning(paste("E-M algorithm failed to converge in", maxit, ngettext(maxit, "iteration", "iterations")), call.=FALSE) break } niter <- niter + 1 # E - step f1 <- dknn(kthNND[!Z], lambda=lambda1, k = k, d = d[1]) f2 <- dknn(kthNND[!Z], lambda=lambda2, k = k, d = d[2]) delta[!Z] <- (p * f1)/(p * f1 + (1 - p) * f2) delta[Z] <- 0 # M - step sumdelta <- sum(delta) negdelta <- 1. - delta p <- sumdelta/n lambda1 <- (k * sumdelta)/(alpha.d[1] * sum(kNNDpowd1 * delta)) lambda2 <- (k * (n - sumdelta))/(alpha.d[2] * sum(kNNDpowd2 * negdelta)) # evaluate marginal loglikelihood loglik.old <- loglik.new loglik.new <- sum( - p * lambda1 * alpha.d[1] * (kNNDpowd1 * delta) - (1. - p) * lambda2 * alpha.d[2] * (kNNDpowd2 * negdelta) + delta * k * log(lambda1 * alpha.d[1]) + negdelta * k * log(lambda2 * alpha.d[2])) if(verbose) cat(paste("Iteration", niter, "\tlogLik =", loglik.new, "\tp =", signif(p,4), "\n")) } if(plothist) { dotargs <- list(...) if(spatstat.options('monochrome')) dotargs <- col.args.to.grey(dotargs) ## compute plot limits to include both histogram and density xlim <- c(0, max(kthNND)) H <- do.call(hist, resolve.defaults(list(kthNND, plot=FALSE, warn.unused=FALSE), dotargs, list(nclass=40))) barheights <- H$density support <- seq(from=xlim[1], to=xlim[2], length.out = 200) fittedy <- p * dknn(support, lambda=lambda1, k = k, d = d[1]) + (1 - p) * dknn(support, lambda=lambda2, k = k, d = d[2]) ylim <- range(c(0, barheights, fittedy)) xlab <- paste("Distance to", ordinal(k), "nearest neighbour") ## now plot it (unless overridden by plot=FALSE) reallyplot <- resolve.1.default("plot", list(...), list(plot=TRUE)) H <- do.call(hist, resolve.defaults(list(kthNND, probability=TRUE), dotargs, list(plot=TRUE, warn.unused=reallyplot, nclass=40, xlim = xlim, ylim=ylim, xlab = xlab, ylab = "Probability density", axes = TRUE, main=""))) H$xname <- xlab if(reallyplot) { box() lineargs <- resolve.defaults(lineargs, list(col="green", lwd=2)) if(spatstat.options("monochrome")) lineargs <- col.args.to.grey(lineargs) do.call(lines, append(list(x=support, y=fittedy), lineargs)) } } # delta1 <- dknn(kthNND[!Z], lambda=lambda1, k = k, d = d[1]) delta2 <- dknn(kthNND[!Z], lambda=lambda2, k = k, d = d[2]) probs[!Z] <- delta1/(delta1 + delta2) probs[Z] <- 1 # if(verbose) { cat("Estimated parameters:\n") cat(paste("p [cluster] =", signif(p, 5), "\n")) cat(paste("lambda [cluster] =", signif(lambda1, 5), "\n")) cat(paste("lambda [noise] =", signif(lambda2, 5), "\n")) } # # z will be the classifications. 1= in cluster. 0= in noise. # return(list(z = round(probs), probs = probs, lambda1 = lambda1, lambda2 = lambda2, p = p, kthNND = kthNND, d=d, n=n, k=k, niter = niter, maxit = maxit, converged = (niter >= maxit), hist=if(plothist) H else NULL)) } spatstat/R/clickppp.R0000755000176200001440000000472413115271075014262 0ustar liggesusers#' Dominic Schuhmacher's idea #' #' $Revision: 1.16 $ $Date: 2017/01/07 09:24:04 $ #' clickppp <- local({ clickppp <- function(n=NULL, win=square(1), types=NULL, ..., add=FALSE, main=NULL, hook=NULL) { win <- as.owin(win) instructions <- if(!is.null(n)) paste("click", n, "times in window") else paste("add points: click left mouse button in window\n", "exit: press ESC or another mouse button") if(is.null(main)) main <- instructions #### single type ######################### if(is.null(types)) { plot(win, add=add, main=main, invert=TRUE) if(!is.null(hook)) plot(hook, add=TRUE) if(!is.null(n)) xy <- spatstatLocator(n=n, ...) else xy <- spatstatLocator(...) #' check whether all points lie inside window if((nout <- sum(!inside.owin(xy$x, xy$y, win))) > 0) { warning(paste(nout, ngettext(nout, "point", "points"), "lying outside specified window; window was expanded")) win <- boundingbox(win, xy) } X <- ppp(xy$x, xy$y, window=win) return(X) } ##### multitype ####################### ftypes <- factor(types, levels=types) #' input points of type 1 X <- getem(ftypes[1L], instructions, n=n, win=win, add=add, ..., pch=1) X <- X %mark% ftypes[1L] #' input points of types 2, 3, ... in turn naughty <- FALSE for(i in 2:length(types)) { Xi <- getem(ftypes[i], instructions, n=n, win=win, add=add, ..., hook=X, pch=i) Xi <- Xi %mark% ftypes[i] if(!naughty && identical(Xi$window, win)) { #' normal case X <- superimpose(X, Xi, W=win) } else { #' User has clicked outside original window. naughty <- TRUE #' Use bounding box for simplicity bb <- boundingbox(Xi$window, X$window) X <- superimpose(X, Xi, W=bb) } } if(!add) { if(!naughty) plot(X, main="Final pattern") else { plot(X$window, main="Final pattern (in expanded window)", invert=TRUE) plot(win, add=TRUE, invert=TRUE) plot(X, add=TRUE) } } return(X) } getem <- function(i, instr, ...) { main <- paste("Points of type", sQuote(i), "\n", instr) do.call(clickppp, resolve.defaults(list(...), list(main=main))) } clickppp }) clickdist <- function() { a <- spatstatLocator(2) return(pairdist(a)[1L,2L]) } spatstat/R/relrisk.R0000755000176200001440000004341413115271120014116 0ustar liggesusers# # relrisk.R # # Estimation of relative risk # # $Revision: 1.33 $ $Date: 2017/01/28 06:29:07 $ # relrisk <- function(X, ...) UseMethod("relrisk") relrisk.ppp <- local({ relrisk.ppp <- function(X, sigma=NULL, ..., varcov=NULL, at="pixels", relative=FALSE, se=FALSE, casecontrol=TRUE, control=1, case) { stopifnot(is.ppp(X)) stopifnot(is.multitype(X)) control.given <- !missing(control) case.given <- !missing(case) if(!relative && (control.given || case.given)) { aa <- c("control", "case")[c(control.given, case.given)] nn <- length(aa) warning(paste(ngettext(nn, "Argument", "Arguments"), paste(sQuote(aa), collapse=" and "), ngettext(nn, "was", "were"), "ignored, because relative=FALSE")) } npts <- npoints(X) Y <- split(X) uX <- unmark(X) types <- names(Y) ntypes <- length(Y) if(ntypes == 1) stop("Data contains only one type of points") marx <- marks(X) imarks <- as.integer(marx) lev <- levels(marx) ## trap arguments dotargs <- list(...) isbwarg <- names(dotargs) %in% c("method", "nh", "hmin", "hmax", "warn") bwargs <- dotargs[isbwarg] dargs <- dotargs[!isbwarg] ## using edge corrections? edge <- resolve.1.default(list(edge=TRUE), list(...)) diggle <- resolve.1.default(list(diggle=FALSE), list(...)) ## bandwidth if(is.null(sigma) && is.null(varcov)) sigma <- do.call(bw.relrisk, append(list(X), bwargs)) SmoothPars <- append(list(sigma=sigma, varcov=varcov, at=at), dargs) if(se) { ## determine other bandwidth for variance estimation if(is.null(varcov)) { varconst <- 1/(4 * pi * prod(sigma)) VarPars <- append(list(sigma=sigma/sqrt(2), at=at), dargs) } else { varconst <- 1/(4 * pi * sqrt(det(varcov))) VarPars <- append(list(varcov=varcov/2, at=at), dargs) } if(edge) { ## evaluate edge correction weights edgeim <- second.moment.calc(uX, sigma, what="edge", ..., varcov=varcov) if(diggle || at == "points") { edgeX <- safelookup(edgeim, uX, warn=FALSE) diggleX <- 1/edgeX diggleX[!is.finite(diggleX)] <- 0 } edgeim <- edgeim[Window(X), drop=FALSE] } } ## ......................................... ## compute intensity estimates for each type ## ......................................... switch(at, pixels = { ## intensity estimates of each type Deach <- do.call(density.splitppp, append(list(x=Y), SmoothPars)) ## compute intensity estimate for unmarked pattern Dall <- Reduce("+", Deach) ## variance terms if(se) { if(!edge) { ## no edge correction Veach <- do.call(density.splitppp, append(list(x=Y), VarPars)) } else if(!diggle) { ## edge correction e(u) Veach <- do.call(density.splitppp, append(list(x=Y), VarPars)) Veach <- lapply(Veach, "/", e2=edgeim) } else { ## Diggle edge correction e(x_i) Veach <- mapply(density.ppp, x=Y, weights=split(diggleX, marx), MoreArgs=VarPars, SIMPLIFY=FALSE) } Veach <- lapply(Veach, "*", varconst) Vall <- Reduce("+", Veach) } }, points = { ## intensity estimates of each type **at each data point** ## dummy variable matrix dumm <- matrix(0, npts, ntypes) dumm[cbind(seq_len(npts), imarks)] <- 1 colnames(dumm) <- lev Deach <- do.call(density.ppp, append(list(x=uX, weights=dumm), SmoothPars)) ## compute intensity estimate for unmarked pattern Dall <- rowSums(Deach) ## variance terms if(se) { if(!edge) { ## no edge correction Veach <- do.call(density.ppp, append(list(x=uX, weights=dumm), VarPars)) } else if(!diggle) { ## edge correction e(u) Veach <- do.call(density.ppp, append(list(x=uX, weights=dumm), VarPars)) Veach <- Veach * diggleX } else { ## Diggle edge correction e(x_i) Veach <- do.call(density.ppp, append(list(x=uX, weights=dumm * diggleX), VarPars)) } Veach <- Veach * varconst Vall <- rowSums(Veach) } }) ## ......................................... ## compute probabilities/risks ## ......................................... if(ntypes == 2 && casecontrol) { if(control.given || !case.given) { stopifnot(length(control) == 1) if(is.numeric(control)) { icontrol <- control <- as.integer(control) stopifnot(control %in% 1:2) } else if(is.character(control)) { icontrol <- match(control, levels(marks(X))) if(is.na(icontrol)) stop(paste("No points have mark =", control)) } else stop(paste("Unrecognised format for argument", sQuote("control"))) if(!case.given) icase <- 3 - icontrol } if(case.given) { stopifnot(length(case) == 1) if(is.numeric(case)) { icase <- case <- as.integer(case) stopifnot(case %in% 1:2) } else if(is.character(case)) { icase <- match(case, levels(marks(X))) if(is.na(icase)) stop(paste("No points have mark =", case)) } else stop(paste("Unrecognised format for argument", sQuote("case"))) if(!control.given) icontrol <- 3 - icase } ## compute ...... switch(at, pixels = { ## compute probability of case pcase <- Deach[[icase]]/Dall ## correct small numerical errors pcase <- clamp01(pcase) ## trap NaN values nbg <- badvalues(pcase) if(any(nbg)) { ## apply l'Hopital's rule: ## p(case) = 1{nearest neighbour is case} distcase <- distmap(Y[[icase]], xy=pcase) distcontrol <- distmap(Y[[icontrol]], xy=pcase) closecase <- eval.im(as.integer(distcase < distcontrol)) pcase[nbg] <- closecase[nbg] } if(!relative) { if(!se) { result <- pcase } else { Vcase <- Veach[[icase]] NUM <- eval.im(Vcase * (1-2*pcase) + Vall * pcase^2) SE <- eval.im(sqrt(pmax(NUM, 0))/Dall) result <- list(estimate=pcase, SE=SE) } } else { rcase <- eval.im(ifelse(pcase < 1, pcase/(1-pcase), NA)) if(!se) { result <- rcase } else { Vcase <- Veach[[icase]] Vctrl <- Veach[[icontrol]] Dctrl <- Deach[[icontrol]] NUM <- eval.im(Vcase + Vctrl * rcase^2) SE <- eval.im(sqrt(pmax(NUM, 0))/Dctrl) result <- list(estimate=rcase, SE=SE) } } }, points={ ## compute probability of case pcase <- Deach[,icase]/Dall ## correct small numerical errors pcase <- clamp01(pcase) ## trap NaN values if(any(nbg <- badvalues(pcase))) { ## apply l'Hopital's rule nntype <- imarks[nnwhich(X)] pcase[nbg] <- as.integer(nntype[nbg] == icase) } if(!relative) { if(!se) { result <- pcase } else { NUM <- Veach[,icase] * (1-2*pcase) + Vall * pcase^2 SE <- sqrt(pmax(NUM, 0))/Dall result <- list(estimate=pcase, SE=SE) } } else { rcase <- ifelse(pcase < 1, pcase/(1-pcase), NA) if(!se) { result <- rcase } else { NUM <- Veach[,icase] + Veach[,icontrol] * rcase^2 SE <- sqrt(pmax(NUM, 0))/Deach[,icontrol] result <- list(estimate=rcase, SE=SE) } } }) } else { ## several types if(relative) { ## need 'control' type stopifnot(length(control) == 1) if(is.numeric(control)) { icontrol <- control <- as.integer(control) stopifnot(control %in% 1:ntypes) } else if(is.character(control)) { icontrol <- match(control, levels(marks(X))) if(is.na(icontrol)) stop(paste("No points have mark =", control)) } else stop(paste("Unrecognised format for argument", sQuote("control"))) } switch(at, pixels={ probs <- as.solist(lapply(Deach, "/", e2=Dall)) ## correct small numerical errors probs <- as.solist(lapply(probs, clamp01)) ## trap NaN values nbg <- lapply(probs, badvalues) nbg <- Reduce("|", nbg) if(any(nbg)) { ## apply l'Hopital's rule distX <- distmap(X, xy=Dall) whichnn <- attr(distX, "index") typenn <- eval.im(imarks[whichnn]) typennsub <- as.matrix(typenn)[nbg] for(k in seq_along(result)) probs[[k]][nbg] <- (typennsub == k) } if(!relative) { if(!se) { result <- probs } else { SE <- list() for(i in 1:ntypes) { NUM <- (Veach[[i]] * (1 - 2 * probs[[i]]) + Vall * probs[[i]]^2) SE[[i]] <- eval.im(sqrt(pmax(NUM, 0))/Dall) } SE <- as.solist(SE) names(SE) <- types result <- list(estimate=probs, SE=SE) } } else { risks <- as.solist(lapply(probs, function(z, d) { eval.im(ifelse(d > 0, z/d, NA)) }, d = probs[[icontrol]])) if(!se) { result <- risks } else { Vctrl <- Veach[[icontrol]] Dctrl <- Deach[[icontrol]] SE <- list() for(i in 1:ntypes) { NUM <- Veach[[i]] + Vctrl * risks[[i]]^2 SE[[i]] <- eval.im(sqrt(pmax(NUM, 0))/Dctrl) } SE <- as.solist(SE) names(SE) <- types result <- list(estimate=risks, SE=SE) } } }, points = { probs <- Deach/Dall ## correct small numerical errors probs <- clamp01(probs) ## trap NaN values bad <- badvalues(probs) badrow <- matrowany(bad) if(any(badrow)) { ## apply l'Hopital's rule typenn <- imarks[nnwhich(X)] probs[badrow, ] <- (typenn == col(result))[badrow, ] } if(!relative) { if(!se) { result <- probs } else { NUM <- Veach * (1-2*probs) + Vall * probs^2 SE <- sqrt(pmax(NUM, 0))/Dall result <- list(estimate=probs, SE=SE) } } else { risks <- probs/probs[,icontrol] if(!se) { result <- risks } else { NUM <- Veach + Veach[,icontrol] * risks^2 NUM[,icontrol] <- 0 SE <- sqrt(pmax(NUM, 0))/Deach[,icontrol] result <- list(estimate=risks, SE=SE) } } }) } attr(result, "sigma") <- sigma attr(result, "varcov") <- varcov return(result) } clamp01 <- function(x) { if(is.im(x)) return(eval.im(pmin(pmax(x, 0), 1))) return(pmin(pmax(x, 0), 1)) } badvalues <- function(x) { if(is.im(x)) x <- as.matrix(x) return(!(is.finite(x) | is.na(x))) } reciprocal <- function(x) 1/x relrisk.ppp }) bw.stoyan <- function(X, co=0.15) { ## Stoyan's rule of thumb stopifnot(is.ppp(X)) n <- npoints(X) W <- Window(X) a <- area(W) stoyan <- co/sqrt(5 * n/a) return(stoyan) } bw.relrisk <- function(X, method="likelihood", nh=spatstat.options("n.bandwidth"), hmin=NULL, hmax=NULL, warn=TRUE) { stopifnot(is.ppp(X)) stopifnot(is.multitype(X)) ## rearrange in ascending order of x-coordinate (for C code) X <- X[fave.order(X$x)] ## Y <- split(X) ntypes <- length(Y) if(ntypes == 1) stop("Data contains only one type of points") marx <- marks(X) method <- pickoption("method", method, c(likelihood="likelihood", leastsquares="leastsquares", ls="leastsquares", LS="leastsquares", weightedleastsquares="weightedleastsquares", wls="weightedleastsquares", WLS="weightedleastsquares")) ## if(method != "likelihood") { ## dummy variables for each type imarks <- as.integer(marx) if(ntypes == 2) { ## 1 = control, 2 = case indic <- (imarks == 2) y01 <- as.integer(indic) } else { indic <- matrix(FALSE, n, ntypes) indic[cbind(seq_len(n), imarks)] <- TRUE y01 <- indic * 1 } X01 <- X %mark% y01 } ## cross-validated bandwidth selection ## determine a range of bandwidth values n <- npoints(X) if(is.null(hmin) || is.null(hmax)) { W <- Window(X) a <- area(W) d <- diameter(as.rectangle(W)) ## Stoyan's rule of thumb applied to the least and most common types mcount <- table(marx) nmin <- max(1, min(mcount)) nmax <- max(1, max(mcount)) stoyan.low <- 0.15/sqrt(nmax/a) stoyan.high <- 0.15/sqrt(nmin/a) if(is.null(hmin)) hmin <- max(minnndist(unique(X)), stoyan.low/5) if(is.null(hmax)) { hmax <- min(d/4, stoyan.high * 20) hmax <- max(hmax, hmin * 2) } } else stopifnot(hmin < hmax) ## h <- geomseq(from=hmin, to=hmax, length.out=nh) cv <- numeric(nh) ## ## compute cross-validation criterion switch(method, likelihood={ methodname <- "Likelihood" ## for efficiency, only compute the estimate of p_j(x_i) ## when j = m_i = mark of x_i. Dthis <- numeric(n) for(i in seq_len(nh)) { Dall <- density.ppp(X, sigma=h[i], at="points", edge=FALSE, sorted=TRUE) Deach <- density.splitppp(Y, sigma=h[i], at="points", edge=FALSE, sorted=TRUE) split(Dthis, marx) <- Deach pthis <- Dthis/Dall cv[i] <- -mean(log(pthis)) } }, leastsquares={ methodname <- "Least Squares" for(i in seq_len(nh)) { phat <- Smooth(X01, sigma=h[i], at="points", leaveoneout=TRUE, sorted=TRUE) cv[i] <- mean((y01 - phat)^2) } }, weightedleastsquares={ methodname <- "Weighted Least Squares" ## need initial value of h from least squares h0 <- bw.relrisk(X, "leastsquares", nh=ceiling(nh/4)) phat0 <- Smooth(X01, sigma=h0, at="points", leaveoneout=TRUE, sorted=TRUE) var0 <- phat0 * (1-phat0) var0 <- pmax.int(var0, 1e-6) for(i in seq_len(nh)) { phat <- Smooth(X01, sigma=h[i], at="points", leaveoneout=TRUE, sorted=TRUE) cv[i] <- mean((y01 - phat)^2/var0) } }) ## optimize iopt <- which.min(cv) ## if(warn && (iopt == nh || iopt == 1)) warning(paste("Cross-validation criterion was minimised at", if(iopt == 1) "left-hand" else "right-hand", "end of interval", "[", signif(hmin, 3), ",", signif(hmax, 3), "];", "use arguments hmin, hmax to specify a wider interval")) ## result <- bw.optim(cv, h, iopt, hname="sigma", creator="bw.relrisk", criterion=paste(methodname, "Cross-Validation"), unitname=unitname(X)) return(result) } which.max.im <- function(x) { .Deprecated("im.apply", "spatstat", "which.max.im(x) is deprecated: use im.apply(x, which.max)") ans <- im.apply(x, which.max) return(ans) } spatstat/R/relrisk.ppm.R0000644000176200001440000003264313115225157014721 0ustar liggesusers## ## relrisk.ppm.R ## ## $Revision: 1.7 $ $Date: 2016/07/15 10:21:26 $ ## relrisk.ppm <- local({ relrisk.ppm <- function(X, ..., at=c("pixels", "points"), relative=FALSE, se=FALSE, casecontrol=TRUE, control=1, case, ngrid=NULL, window=NULL) { stopifnot(is.ppm(X)) stopifnot(is.multitype(X)) control.given <- !missing(control) case.given <- !missing(case) at <- match.arg(at) if(!relative && (control.given || case.given)) { aa <- c("control", "case")[c(control.given, case.given)] nn <- length(aa) warning(paste(ngettext(nn, "Argument", "Arguments"), paste(sQuote(aa), collapse=" and "), ngettext(nn, "was", "were"), "ignored, because relative=FALSE")) } model <- X Y <- data.ppm(model) types <- levels(marks(Y)) ntypes <- length(types) # np <- length(coef(model)) ## compute probabilities or risks if(ntypes == 2 && casecontrol) { if(control.given || !case.given) { stopifnot(length(control) == 1) if(is.numeric(control)) { icontrol <- control <- as.integer(control) stopifnot(control %in% 1:2) } else if(is.character(control)) { icontrol <- match(control, types) if(is.na(icontrol)) stop(paste("No points have mark =", control)) } else stop(paste("Unrecognised format for argument", sQuote("control"))) if(!case.given) icase <- 3 - icontrol } if(case.given) { stopifnot(length(case) == 1) if(is.numeric(case)) { icase <- case <- as.integer(case) stopifnot(case %in% 1:2) } else if(is.character(case)) { icase <- match(case, types) if(is.na(icase)) stop(paste("No points have mark =", case)) } else stop(paste("Unrecognised format for argument", sQuote("case"))) if(!control.given) icontrol <- 3 - icase } switch(at, pixels= { ## estimate is a single image ## compute images of intensities of each mark lambda.each <- predict(model, ngrid=ngrid, window=window) if(!relative) { ## compute probabilities.. ## total intensity (image) lambda.all <- Reduce("+", lambda.each) if(!se) { result <- lambda.each[[icase]]/lambda.all result <- killglitches(result) } else { probs <- lapply(lambda.each, "/", e2=lambda.all) probs <- as.solist(lapply(probs, killglitches)) estimate <- probs[[icase]] SE <- SEprobPixels(model, probs)[[icase]] SE <- killglitches(SE) result <- list(estimate=estimate, SE=SE) } } else { ## relative risks lambda.ctrl <- lambda.each[[icontrol]] if(!se) { result <- lambda.each[[icase]]/lambda.ctrl result <- killglitches(result) } else { risks <- lapply(lambda.each, "/", e2=lambda.ctrl) risks <- as.solist(lapply(risks, killglitches)) estimate <- risks[[icase]] SE <- SErelriskPixels(model, risks, icontrol)[[icase]] SE <- killglitches(SE) result <- list(estimate=estimate, SE=SE) } } }, points={ ## compute intensities of each type Ycase <- unmark(Y) %mark% factor(types[icase], levels=types) Yctrl <- unmark(Y) %mark% factor(types[icontrol], levels=types) lambda.case <- predict(model, locations=Ycase) lambda.ctrl <- predict(model, locations=Yctrl) if(!relative) { ## compute probabilities ## total intensity lambda.all <- lambda.case + lambda.ctrl prob.case <- lambda.case/lambda.all if(!se) { result <- prob.case } else { probs <- matrix(, length(prob.case), 2) probs[,icase] <- prob.case probs[,icontrol] <- 1 - prob.case SE <- SEprobPoints(model, probs)[,icase] result <- list(estimate=prob.case, SE=SE) } } else { ## compute relative risks risk.case <- lambda.case/lambda.ctrl if(!se) { result <- risk.case } else { risks <- matrix(, length(risk.case), 2) risks[,icase] <- risk.case risks[,icontrol] <- 1 SE <- SErelriskPoints(model, risks, icontrol)[,icase] result <- list(estimate=risk.case, SE=SE) } } }) } else { ## several types if(relative) { ## need 'control' type stopifnot(length(control) == 1) if(is.numeric(control)) { icontrol <- control <- as.integer(control) stopifnot(control %in% 1:ntypes) } else if(is.character(control)) { icontrol <- match(control, types) if(is.na(icontrol)) stop(paste("No points have mark =", control)) } else stop(paste("Unrecognised format for argument", sQuote("control"))) } switch(at, pixels={ ## estimate is a list of images ## Compute images of intensities of each type lambda.each <- predict(model, ngrid=ngrid, window=window) if(!relative) { ## compute probabilities... ## image of total intensity lambda.all <- Reduce("+", lambda.each) probs <- lapply(lambda.each, "/", e2=lambda.all) probs <- as.solist(lapply(probs, killglitches)) if(!se) { result <- probs } else { SE <- SEprobPixels(model, probs) SE <- as.solist(lapply(SE, killglitches)) result <- list(estimate=probs, SE=SE) } } else { ## compute relative risks risks <- lapply(lambda.each, "/", e2=lambda.each[[icontrol]]) risks <- as.solist(lapply(risks, killglitches)) if(!se) { result <- risks } else { SE <- SErelriskPixels(model, risks, icontrol) SE <- as.solist(lapply(SE, killglitches)) result <- list(estimate=risks, SE=SE) } } }, points = { ## matrix of intensities of each type at each point ## rows=locations, cols=types lambda.each <- sapply(types, predictfortype, loc=unmark(Y), model=model, types=types) if(!relative) { ## compute probabilities lambda.all <- rowSums(lambda.each) probs <- lambda.each/lambda.all if(!se) { result <- probs } else { SE <- SEprobPoints(model, probs) result <- list(estimate=probs, SE=SE) } } else { ## compute relative risks risks <- lambda.each/lambda.each[,icontrol] if(!se) { result <- risks } else { SE <- SErelriskPoints(model, risks, icontrol) result <- list(estimate=risks, SE=SE) } } }) } return(result) } modmats <- function(model) { # model matrices for data locations for each possible mark QM <- quad.ppm(model) Y <- QM$data QR <- quadscheme.replicated(Y, unmark(Y[FALSE])) sourceid <- QR$param$sourceid ## canonical covariates mm <- model.matrix(model, Q=QR) ## mm is a matrix with one column for canonical covariate ## and one row for each marked point in QR. mm <- cbind(data.frame(".s"=sourceid, ".m"=marks(QR)), mm) ## Split by marks ss <- split(mm, mm$.m) ## Reorganise into compatible matrices zz <- lapply(ss, reorg) return(zz) } reorg <- function(x) { z <- x rownames(z) <- NULL z[x$.s, ] <- z return(z[,-(1:2), drop=FALSE]) } SErelriskPoints <- function(model, riskvalues, icontrol) { ## riskvalues is a matrix with rows=data locations, cols=types types <- colnames(riskvalues) ntypes <- length(types) ## S.um <- modmats(model) S.um <- lapply(S.um, as.matrix) ## S.um is a list of matrices, one for each possible type, ## each matrix having one row per data location dS.um <- lapply(S.um, "-", e2=S.um[[icontrol]]) R.um <- mapply("*", dS.um, as.list(as.data.frame(riskvalues)), SIMPLIFY=FALSE) ## likewise R.um is a list of matrices ## vc <- vcov(model) VAR <- lapply(R.um, quadform, v=vc) VAR <- do.call(cbind, VAR) SE <- sqrt(VAR) colnames(SE) <- types return(SE) } msubtract <- function(z1, z2) mapply("-", e1=z1, e2=z2, SIMPLIFY=FALSE) mmultiply <- function(z1, z2) solapply(z1, "*", e2=z2) SErelriskPixels <- function(model, riskvalues, icontrol) { ## riskvalues is an imlist types <- names(riskvalues) ntypes <- length(types) ## canonical covariates S.um <- model.images(model) ## S.um is a hyperframe with one column for each mark value ## and one row for each canonical covariate dS.um <- lapply(S.um, msubtract, z2=S.um[,icontrol,drop=TRUE]) R.um <- mapply(mmultiply, z1=dS.um, z2=riskvalues, SIMPLIFY=FALSE) VAR <- vector(mode="list", length=ntypes) ntypes <- length(types) vc <- vcov(model) ncoef <- nrow(vc) for(type in 1:ntypes) { v <- 0 Rum <- R.um[[type]] for(i in 1:ncoef) { for(j in 1:ncoef) { v <- v + Rum[[i]] * vc[i,j] * Rum[[j]] } } VAR[[type]] <- v } names(VAR) <- types VAR <- as.solist(VAR) SE <- as.solist(lapply(VAR, sqrt)) return(SE) } SEprobPixels <- function(model, probvalues) { ## probvalues is an imlist types <- names(probvalues) ntypes <- length(types) ## canonical covariates S.um <- model.images(model) ## S.um is a hyperframe with one column for each mark value ## and one row for each canonical covariate ncoef <- length(coef(model)) Sbar.u <- vector(mode="list", length=ncoef) for(k in 1:ncoef) Sbar.u[[k]] <- Reduce("+", mapply("*", e1=S.um[k,,drop=TRUE], e2=probvalues, SIMPLIFY=FALSE)) ## Sbar.u is a list of images, one for each canonical covariate Sdif.um <- lapply(as.list(S.um), msubtract, z2=Sbar.u) ## Sdif.um is a list of lists of images. ## List of length ntypes, ## each entry being an imlist of length ncoef P.um <- mapply(mmultiply, Sdif.um, probvalues, SIMPLIFY=FALSE) ## P.um is same format as Sdif.um vc <- vcov(model) ncoef <- nrow(vc) VAR <- vector(mode="list", length=ntypes) for(m in 1:ntypes) { v <- 0 Pum <- P.um[[m]] for(i in 1:ncoef) { for(j in 1:ncoef) { v <- v + Pum[[i]] * vc[i,j] * Pum[[j]] } } VAR[[m]] <- v } names(VAR) <- types VAR <- as.solist(VAR) SE <- as.solist(lapply(VAR, sqrt)) } SEprobPoints <- function(model, probvalues) { ## probvalues is a matrix with row=location and column=type types <- colnames(probvalues) ntypes <- length(types) ## canonical covariates S.um <- modmats(model) S.um <- lapply(S.um, as.matrix) ## S.um is a list of matrices, one for each possible type, ## each matrix having rows=locations and cols=covariates ## Weight each matrix by its mark probabilities SW <- mapply("*", e1=S.um, e2=as.list(as.data.frame(probvalues)), SIMPLIFY=FALSE) ## average them Sbar.u <- Reduce("+", SW) ## Sbar.u is a matrix with rows=locations and cols=covariates Sdif.um <- lapply(S.um, "-", e2=Sbar.u) ## Sdif.um is a list of matrices like S.um P.um <- mapply("*", e1=Sdif.um, e2=as.list(as.data.frame(probvalues)), SIMPLIFY=FALSE) ## P.um likewise vc <- vcov(model) VAR <- lapply(P.um, quadform, v=vc) VAR <- do.call(cbind, VAR) SE <- sqrt(VAR) colnames(SE) <- types return(SE) } predictfortype <- function(type, model, types, loc) { predict(model, locations=loc %mark% factor(type, levels=types)) } killglitches <- function(z, eps=.Machine$double.eps) { ra <- range(z, finite=TRUE) if(max(abs(ra)) < eps) { z[] <- 0 return(z) } if(diff(ra) < eps) z[] <- mean(z, na.rm=TRUE) return(z) } relrisk.ppm }) spatstat/R/defaultwin.R0000755000176200001440000000251513115271075014613 0ustar liggesusers# # # defaultwin.R # # $Revision: 1.10 $ $Date: 2015/10/21 09:06:57 $ # default.expand <- function(object, m=2, epsilon=1e-6, w=Window(object)) { stopifnot(is.ppm(object) || inherits(object, "rmhmodel")) # no expansion necessary if model is Poisson if(is.poisson(object)) return(.no.expansion) # default is no expansion if model is nonstationary if(!is.stationary(object)) return(.no.expansion) # Redundant since a non-expandable model is non-stationary # if(!is.expandable(object)) # return(.no.expansion) # rule is to expand data window by distance d = m * reach rr <- reach(object, epsilon=epsilon) if(!is.finite(rr)) return(rmhexpand()) if(!is.numeric(m) || length(m) != 1 || m < 1) stop("m should be a single number >= 1") mr <- m * rr rule <- rmhexpand(distance = mr) # if(is.owin(w)) { # apply rule to window wplus <- expand.owin(w, rule) # save as new expansion rule rule <- rmhexpand(wplus) } return(rule) } default.clipwindow <- function(object, epsilon=1e-6) { stopifnot(is.ppm(object) || inherits(object, "rmhmodel")) # data window w <- as.owin(object) if(is.null(w)) return(NULL) # interaction range of model rr <- reach(object, epsilon=epsilon) if(!is.finite(rr)) return(NULL) if(rr == 0) return(w) else return(erosion(w, rr)) } spatstat/R/percy.R0000644000176200001440000000542513115225157013573 0ustar liggesusers## percus.R ## ## Percus-Yevick style approximations to pcf and K ## ## $Revision: 1.4 $ $Date: 2014/01/31 10:10:19 $ pcfmodel.ppm <- local({ pcfmodel.ppm <- function(model, ...) { if(is.multitype(model)) stop("Not yet implemented for multitype models") if(!is.stationary(model)) stop("Model must be stationary") if(is.poisson(model)) return(function(r) rep(1, length(r))) inte <- as.interact(model) if(inte$family$name != "pairwise") stop("Only implemented for pairwise-interaction models") lambda <- intensity(model) beta <- exp(coef(model)[1]) par <- inte$par pot <- inte$pot f <- fitin(model) Vcoefs <- f$coefs[f$Vnames] Mayer <- inte$Mayer G <- Mayer(Vcoefs, inte) irange <- reach(inte, epsilon=1e-6) G2fun <- inte$Percy testit <- resolve.1.default(list(testit=FALSE), list(...)) if(testit || is.null(G2fun)) G2fun <- pairwisePercy fun <- function(r) { pcfapprox(r, beta, lambda, pot, par, Vcoefs, G, G2fun, irange) } return(fun) } pcfapprox <- function(r, beta, lambda, pot, par, Vcoefs, G, G2fun, irange) { as.numeric((beta/lambda)^2 * exp(logpairpot(r, pot, par, Vcoefs) - lambda * G2fun(r, Vcoefs, par, pot=pot, irange=irange, G=G))) } logpairpot <- function(r, pot, par, Vcoefs) { as.numeric(pot(matrix(r, ncol=1), par) %*% Vcoefs) } negpair <- function(x,y, pot, par, Vcoefs) { ## evaluate 1 - g(x,y) ## where g(x,y) is pair interaction between (0,0) and (x,y) 1 - exp(logpairpot(sqrt(x^2+y^2), pot, par, Vcoefs)) } pairwisePercy <- function(r, Vcoefs, par, ..., G, pot, irange, dimyx=256) { S <- max(max(r), irange) ng <- as.im(negpair, square(c(-S,S)), pot=pot, par=par, Vcoefs=Vcoefs, dimyx=dimyx) ng2 <- convolve.im(ng) rr <- seq(min(r), max(r), length=dimyx[1]) yy <- ng2[list(x=rr, y=rep.int(0, dimyx[1]))] zz <- 2 * G - yy z <- approx(rr, zz, r)$y return(z) } pcfmodel.ppm }) Kmodel.ppm <- local({ Kmodel.ppm <- function(model, ...) { if(is.poisson(model)) return(function(r) { pi * r^2 }) pc <- pcfmodel(model, ...) K <- function(r) pcf2K(r, pc) return(K) } pcf2K <- function(r, pc) { ## integrate the pair correlation function to obtain the K-function if(length(r) == 1) { ## definite integral spcfs <- function(s) { s * pc(s) } y <- 2 * pi * integrate(spcfs, lower=0, upper=r)$value } else { ## indefinite integral rr <- seq(0, max(r), length=1025) dr <- max(r)/(length(rr) - 1) ff <- 2 * pi * rr * pc(rr) yy <- dr * cumsum(ff) y <- approx(rr, yy, r)$y } return(y) } Kmodel.ppm }) spatstat/R/pcfmulti.inhom.R0000755000176200001440000002216513115271120015377 0ustar liggesusers# # pcfmulti.inhom.R # # $Revision: 1.15 $ $Date: 2016/09/21 07:28:42 $ # # inhomogeneous multitype pair correlation functions # # pcfcross.inhom <- function(X, i, j, lambdaI=NULL, lambdaJ=NULL, ..., r=NULL, breaks=NULL, kernel="epanechnikov", bw=NULL, stoyan=0.15, correction = c("isotropic", "Ripley", "translate"), sigma=NULL, varcov=NULL) { verifyclass(X, "ppp") stopifnot(is.multitype(X)) if(missing(correction)) correction <- NULL marx <- marks(X) if(missing(i)) i <- levels(marx)[1] if(missing(j)) j <- levels(marx)[2] I <- (marx == i) J <- (marx == j) Iname <- paste("points with mark i =", i) Jname <- paste("points with mark j =", j) g <- pcfmulti.inhom(X, I, J, lambdaI, lambdaJ, ..., r=r,breaks=breaks, kernel=kernel, bw=bw, stoyan=stoyan, correction=correction, sigma=sigma, varcov=varcov, Iname=Iname, Jname=Jname) iname <- make.parseable(paste(i)) jname <- make.parseable(paste(j)) result <- rebadge.fv(g, substitute(g[inhom,i,j](r), list(i=iname,j=jname)), c("g", paste0("list", paren(paste("inhom", i, j, sep=",")))), new.yexp=substitute(g[list(inhom,i,j)](r), list(i=iname,j=jname))) attr(result, "dangerous") <- attr(g, "dangerous") return(result) } pcfdot.inhom <- function(X, i, lambdaI=NULL, lambdadot=NULL, ..., r=NULL, breaks=NULL, kernel="epanechnikov", bw=NULL, stoyan=0.15, correction = c("isotropic", "Ripley", "translate"), sigma=NULL, varcov=NULL) { verifyclass(X, "ppp") stopifnot(is.multitype(X)) if(missing(correction)) correction <- NULL marx <- marks(X) if(missing(i)) i <- levels(marx)[1] I <- (marx == i) J <- rep.int(TRUE, X$n) # i.e. all points Iname <- paste("points with mark i =", i) Jname <- paste("points") g <- pcfmulti.inhom(X, I, J, lambdaI, lambdadot, ..., r=r,breaks=breaks, kernel=kernel, bw=bw, stoyan=stoyan, correction=correction, sigma=sigma, varcov=varcov, Iname=Iname, Jname=Jname) iname <- make.parseable(paste(i)) result <- rebadge.fv(g, substitute(g[inhom, i ~ dot](r), list(i=iname)), c("g", paste0("list(inhom,", iname, "~symbol(\"\\267\"))")), new.yexp=substitute(g[list(inhom, i ~ symbol("\267"))](r), list(i=iname))) if(!is.null(dang <- attr(g, "dangerous"))) { dang[dang == "lambdaJ"] <- "lambdadot" dang[dang == "lambdaIJ"] <- "lambdaIdot" attr(result, "dangerous") <- dang } return(result) } pcfmulti.inhom <- function(X, I, J, lambdaI=NULL, lambdaJ=NULL, ..., r=NULL, breaks=NULL, kernel="epanechnikov", bw=NULL, stoyan=0.15, correction=c("translate", "Ripley"), sigma=NULL, varcov=NULL, Iname="points satisfying condition I", Jname="points satisfying condition J") { verifyclass(X, "ppp") # r.override <- !is.null(r) win <- X$window areaW <- area(win) npts <- npoints(X) correction.given <- !missing(correction) && !is.null(correction) if(is.null(correction)) correction <- c("translate", "Ripley") correction <- pickoption("correction", correction, c(isotropic="isotropic", Ripley="isotropic", trans="translate", translate="translate", translation="translate", best="best"), multi=TRUE) correction <- implemented.for.K(correction, win$type, correction.given) # bandwidth if(is.null(bw) && kernel=="epanechnikov") { # Stoyan & Stoyan 1995, eq (15.16), page 285 h <- stoyan /sqrt(npts/areaW) hmax <- h # conversion to standard deviation bw <- h/sqrt(5) } else if(is.numeric(bw)) { # standard deviation of kernel specified # upper bound on half-width hmax <- 3 * bw } else { # data-dependent bandwidth selection: guess upper bound on half-width hmax <- 2 * stoyan /sqrt(npts/areaW) } ########## indices I and J ######################## if(!is.logical(I) || !is.logical(J)) stop("I and J must be logical vectors") if(length(I) != npts || length(J) != npts) stop(paste("The length of I and J must equal", "the number of points in the pattern")) nI <- sum(I) nJ <- sum(J) if(nI == 0) stop(paste("There are no", Iname)) if(nJ == 0) stop(paste("There are no", Jname)) XI <- X[I] XJ <- X[J] ########## intensity values ######################### dangerous <- c("lambdaI", "lambdaJ") dangerI <- dangerJ <- TRUE if(is.null(lambdaI)) { # Estimate density by leave-one-out kernel smoothing dangerI <- FALSE lambdaI <- density(XI, ..., sigma=sigma, varcov=varcov, at="points", leaveoneout=TRUE) } else { # lambda values provided if(is.vector(lambdaI)) check.nvector(lambdaI, nI) else if(is.im(lambdaI)) lambdaI <- safelookup(lambdaI, XI) else if(is.function(lambdaI)) lambdaI <- lambdaI(XI$x, XI$y) else stop(paste(sQuote("lambdaI"), "should be a vector, a pixel image, or a function")) } if(is.null(lambdaJ)) { # Estimate density by leave-one-out kernel smoothing dangerJ <- FALSE lambdaJ <- density(XJ, ..., sigma=sigma, varcov=varcov, at="points", leaveoneout=TRUE) } else { # lambda values provided if(is.vector(lambdaJ)) check.nvector(lambdaJ, nJ) else if(is.im(lambdaJ)) lambdaJ <- safelookup(lambdaJ, XJ) else if(is.function(lambdaJ)) lambdaJ <- lambdaJ(XJ$x, XJ$y) else stop(paste(sQuote("lambdaJ"), "should be a vector, a pixel image, or a function")) } danger <- dangerI || dangerJ ########## r values ############################ # handle arguments r and breaks rmaxdefault <- rmax.rule("K", win, npts/areaW) breaks <- handle.r.b.args(r, breaks, win, rmaxdefault=rmaxdefault) if(!(breaks$even)) stop("r values must be evenly spaced") # extract r values r <- breaks$r rmax <- breaks$max # recommended range of r values for plotting alim <- c(0, min(rmax, rmaxdefault)) # initialise fv object df <- data.frame(r=r, theo=rep.int(1,length(r))) fname <- c("g", "list(inhom,I,J)") out <- fv(df, "r", quote(g[inhom,I,J](r)), "theo", , alim, c("r", makefvlabel(NULL, NULL, fname, "pois")), c("distance argument r", "theoretical Poisson %s"), fname=fname, yexp=quote(g[list(inhom,I,J)](r))) ########## smoothing parameters for pcf ############################ # arguments for 'density' denargs <- resolve.defaults(list(kernel=kernel, bw=bw), list(...), list(n=length(r), from=0, to=rmax)) ################################################# # compute pairwise distances # identify close pairs of points close <- crosspairs(XI, XJ, rmax+hmax, what="ijd") # map (i,j) to original serial numbers in X orig <- seq_len(npts) imap <- orig[I] jmap <- orig[J] iX <- imap[close$i] jX <- jmap[close$j] # eliminate any identical pairs if(any(I & J)) { ok <- (iX != jX) if(!all(ok)) { close$i <- close$i[ok] close$j <- close$j[ok] close$d <- close$d[ok] } } # extract information for these pairs (relative to orderings of XI, XJ) dclose <- close$d icloseI <- close$i jcloseJ <- close$j # Form weight for each pair weight <- 1/(lambdaI[icloseI] * lambdaJ[jcloseJ]) ###### compute ####### if(any(correction=="translate")) { # translation correction edgewt <- edge.Trans(XI[icloseI], XJ[jcloseJ], paired=TRUE) gT <- sewpcf(dclose, edgewt * weight, denargs, areaW)$g out <- bind.fv(out, data.frame(trans=gT), makefvlabel(NULL, "hat", fname, "Trans"), "translation-corrected estimate of %s", "trans") } if(any(correction=="isotropic")) { # Ripley isotropic correction edgewt <- edge.Ripley(XI[icloseI], matrix(dclose, ncol=1)) gR <- sewpcf(dclose, edgewt * weight, denargs, areaW)$g out <- bind.fv(out, data.frame(iso=gR), makefvlabel(NULL, "hat", fname, "Ripley"), "isotropic-corrected estimate of %s", "iso") } # sanity check if(is.null(out)) { warning("Nothing computed - no edge corrections chosen") return(NULL) } # which corrections have been computed? corrxns <- rev(setdiff(names(out), "r")) # default is to display them all formula(out) <- . ~ r fvnames(out, ".") <- corrxns # unitname(out) <- unitname(X) if(danger) attr(out, "dangerous") <- dangerous return(out) } spatstat/R/randomseg.R0000755000176200001440000000443513115271120014422 0ustar liggesusers# # randomseg.R # # $Revision: 1.12 $ $Date: 2016/12/01 09:32:41 $ # rpoisline <- function(lambda, win=owin()) { win <- as.owin(win) # determine circumcircle xr <- win$xrange yr <- win$yrange xmid <- mean(xr) ymid <- mean(yr) width <- diff(xr) height <- diff(yr) rmax <- sqrt(width^2 + height^2)/2 boundbox <- owin(xmid + c(-1,1) * rmax, ymid + c(-1,1) * rmax) # generate poisson lines through circumcircle n <- rpois(1, lambda * 2 * pi * rmax) if(n == 0) { X <- psp(numeric(0), numeric(0), numeric(0), numeric(0), marks=integer(0), window=win) attr(X, "lines") <- infline(p=numeric(0), theta=numeric(0)) attr(X, "linemap") <- integer(0) return(X) } theta <- runif(n, max= 2 * pi) p <- runif(n, max=rmax) # compute intersection points with circle q <- sqrt(rmax^2 - p^2) co <- cos(theta) si <- sin(theta) X <- psp(x0= xmid + p * co + q * si, y0= ymid + p * si - q * co, x1= xmid + p * co - q * si, y1= ymid + p * si + q * co, marks = seq_len(n), window=boundbox, check=FALSE) # infinite lines L <- infline(p = p + xmid * co + ymid * si, theta = theta) # clip to window X <- X[win] # append info linemap <- as.integer(marks(X)) X <- unmark(X) attr(X, "lines") <- L attr(X, "linemap") <- linemap return(X) } rlinegrid <- function(angle=45, spacing=0.1, win=owin()) { win <- as.owin(win) # determine circumcircle width <- diff(win$xrange) height <- diff(win$yrange) rmax <- sqrt(width^2 + height^2)/2 xmid <- mean(win$xrange) ymid <- mean(win$yrange) # generate randomly-displaced grid of lines through circumcircle u <- runif(1, min=0, max=spacing) - rmax if(u >= rmax) return(psp(numeric(0), numeric(0), numeric(0), numeric(0), window=win, check=FALSE)) p <- seq(from=u, to=rmax, by=spacing) # compute intersection points with circle q <- sqrt(rmax^2 - p^2) theta <- pi * ((angle - 90)/180) co <- cos(theta) si <- sin(theta) X <- psp(x0= xmid + p * co + q * si, y0= ymid + p * si - q * co, x1= xmid + p * co - q * si, y1= ymid + p * si + q * co, window=owin(xmid+c(-1,1)*rmax, ymid+c(-1,1)*rmax), check=FALSE) # clip to window X <- X[win] return(X) } spatstat/R/unstack.R0000644000176200001440000000376713115225157014130 0ustar liggesusers#' #' unstack.R #' #' Methods for generic 'unstack' #' #' $Revision: 1.3 $ $Date: 2016/06/28 04:01:40 $ unstack.ppp <- unstack.psp <- unstack.lpp <- function(x, ...) { trap.extra.arguments(...) marx <- marks(x) d <- dim(marx) if(is.null(d)) return(solist(x)) y <- rep(list(unmark(x)), d[2]) for(j in seq_along(y)) marks(y[[j]]) <- marx[,j,drop=FALSE] names(y) <- colnames(marx) return(as.solist(y)) } unstack.msr <- function(x, ...) { trap.extra.arguments(...) d <- dim(x) if(is.null(d)) return(solist(x)) smo <- attr(x, "smoothdensity") if(!inherits(smo, "imlist")) smo <- NULL nc <- d[2] y <- vector(mode="list", length=nc) for(j in seq_len(nc)) { xj <- x[,j,drop=FALSE] if(!is.null(smo)) attr(xj, "smoothdensity") <- smo[[j]] y[[j]] <- xj } names(y) <- colnames(x) return(as.solist(y)) } unstackFilter <- function(x) { ## deal with a whole swag of classes that do not need to be unstacked nonvectorclasses <- c("im", "owin", "quad", "tess", "quadratcount", "quadrattest", "funxy", "distfun", "nnfun", "linnet", "linfun", "influence.ppm", "leverage.ppm") y <- if(inherits(x, nonvectorclasses)) solist(x) else unstack(x) return(y) } unstack.solist <- function(x, ...) { trap.extra.arguments(...) as.solist(lapply(x, unstackFilter)) } unstack.layered <- function(x, ...) { trap.extra.arguments(...) y <- lapply(x, unstackFilter) ny <- lengths(y) nx <- length(ny) if(all(ny == 1) || nx == 0) return(solist(x)) pa <- layerplotargs(x) mm <- indexCartesian(ny) nz <- nrow(mm) z <- vector(mode="list", length=nz) nama <- lapply(y, names) for(i in seq_len(nz)) { ll <- mapply("[[", x=y, i=mm[i,], SIMPLIFY=FALSE) nam <- mapply("[", x=nama, i=mm[i,]) nam <- nam[!sapply(nam, is.null)] names(z)[i] <- paste(nam, collapse=".") z[[i]] <- layered(LayerList=ll, plotargs=pa) } z <- as.solist(z) return(z) } spatstat/R/pairs.im.R0000755000176200001440000000771513115271120014171 0ustar liggesusers# # pairs.im.R # # $Revision: 1.11 $ $Date: 2016/11/15 03:47:29 $ # pairs.listof <- pairs.solist <- function(..., plot=TRUE) { argh <- expandSpecialLists(list(...), special=c("solist", "listof")) haslines <- any(sapply(argh, inherits, what="linim")) if(haslines) { do.call(pairs.linim, append(argh, list(plot=plot))) } else { do.call(pairs.im, append(argh, list(plot=plot))) } } pairs.im <- function(..., plot=TRUE) { argh <- list(...) cl <- match.call() ## unpack single argument which is a list of images if(length(argh) == 1) { arg1 <- argh[[1]] if(is.list(arg1) && all(unlist(lapply(arg1, is.im)))) argh <- arg1 } ## identify which arguments are images isim <- unlist(lapply(argh, is.im)) nim <- sum(isim) if(nim == 0) stop("No images provided") ## separate image arguments from others imlist <- argh[isim] rest <- argh[!isim] ## determine image names for plotting imnames <- names(imlist) backupnames <- paste(cl)[c(FALSE, isim, FALSE)] if(length(backupnames) != nim) backupnames <- paste("V", seq_len(nim), sep="") if(length(imnames) != nim) imnames <- backupnames else if(any(needname <- !nzchar(imnames))) imnames[needname] <- backupnames[needname] ## if(nim == 1) { ## one image: plot histogram hist(..., plot=plot) ## save pixel values Z <- imlist[[1]] pixvals <- list(Z[]) names(pixvals) <- imnames } else { ## extract pixel rasters and reconcile them imwins <- lapply(imlist, as.owin) names(imwins) <- NULL rasta <- do.call(intersect.owin, imwins) ## extract image pixel values on common raster pixvals <- lapply(imlist, "[.im", i=rasta, raster=rasta, drop=TRUE) } ## combine into data frame pixdf <- do.call(data.frame, pixvals) ## pairs plot if(plot && nim > 1) do.call(pairs, resolve.defaults(list(x=pixdf), rest, list(labels=imnames, pch="."))) labels <- resolve.defaults(rest, list(labels=imnames))$labels colnames(pixdf) <- labels class(pixdf) <- c("plotpairsim", class(pixdf)) return(invisible(pixdf)) } plot.plotpairsim <- function(x, ...) { xname <- short.deparse(substitute(x)) x <- as.data.frame(x) if(ncol(x) == 1) { do.call(hist.default, resolve.defaults(list(x=x[,1]), list(...), list(main=xname))) } else { do.call(pairs.default, resolve.defaults(list(x=x), list(...), list(pch="."))) } return(invisible(NULL)) } print.plotpairsim <- function(x, ...) { cat("Object of class plotpairsim\n") cat(paste("contains pixel data for", commasep(sQuote(colnames(x))), "\n")) return(invisible(NULL)) } panel.image <- function(x, y, ..., sigma=NULL) { usr <- par("usr"); on.exit(par(usr)) par(usr = c(0, 1, 0, 1)) xx <- scaletointerval(x) yy <- scaletointerval(y) p <- ppp(xx, yy, window=square(1), check=FALSE) plot(density(p, sigma=sigma), add=TRUE, ...) } panel.contour <- function(x, y, ..., sigma=NULL) { usr <- par("usr"); on.exit(par(usr)) par(usr = c(0, 1, 0, 1)) xx <- scaletointerval(x) yy <- scaletointerval(y) p <- ppp(xx, yy, window=square(1), check=FALSE) Z <- density(p, sigma=sigma) do.call(contour, resolve.defaults(list(x=Z, add=TRUE), list(...), list(drawlabels=FALSE))) } panel.histogram <- function(x, ...) { usr <- par("usr"); on.exit(par(usr)) par(usr = c(usr[1:2], 0, 1.5) ) h <- hist(x, plot = FALSE) breaks <- h$breaks; nB <- length(breaks) y <- h$counts; y <- y/max(y) do.call(rect, resolve.defaults(list(xleft = breaks[-nB], ybottom = 0, xright = breaks[-1], ytop = y), list(...), list(col="grey"))) } spatstat/R/model.depends.R0000755000176200001440000000441313115271120015160 0ustar liggesusers# # Determine which 'canonical variables' depend on a supplied covariate # # $Revision: 1.8 $ $Date: 2013/04/25 06:37:43 $ # model.depends <- function(object) { # supplied covariates fo <- formula(object) if(length(as.list(fo)) == 3) { # formula has a response: strip it fo <- fo[-2] } covars <- variablesinformula(fo) # canonical covariates mm <- model.matrix(object) ass <- attr(mm, "assign") # model terms tt <- terms(object) lab <- attr(tt, "term.labels") # 'ass' maps canonical covariates to 'lab' # determine which canonical covariate depends on which supplied covariate depends <- matrix(FALSE, length(ass), length(covars)) for(i in seq(along=ass)) { if(ass[i] == 0) # 0 is the intercept term depends[i,] <- FALSE else { turm <- lab[ass[i]] depends[i, ] <- covars %in% all.vars(parse(text=turm)) } } rownames(depends) <- colnames(mm) colnames(depends) <- covars # detect offsets if(!is.null(oo <- attr(tt, "offset")) && ((noo <- length(oo)) > 0)) { # entries of 'oo' index the list of variables in terms object vv <- attr(tt, "variables") offdep <- matrix(FALSE, noo, length(covars)) offnms <- character(noo) for(i in seq_len(noo)) { offseti <- languageEl(vv, oo[i] + 1) offdep[i, ] <- covars %in% all.vars(offseti) offnms[i] <- deparse(offseti) } rownames(offdep) <- offnms colnames(offdep) <- covars attr(depends, "offset") <- offdep } return(depends) } model.is.additive <- function(object) { dep <- model.depends(object) hit <- t(dep) %*% dep diag(hit) <- 0 ok <- all(hit == 0) return(ok) } model.covariates <- function(object, fitted=TRUE, offset=TRUE) { md <- model.depends(object) nm <- colnames(md) keep <- rep.int(FALSE, length(nm)) # variables used in formula with coefficients if(fitted) keep <- apply(md, 2, any) # variables used in offset if(offset) { oo <- attr(md, "offset") if(!is.null(oo)) keep <- keep | apply(oo, 2, any) } return(nm[keep]) } has.offset.term <- function(object) { # model terms tt <- terms(object) oo <- attr(tt, "offset") return(!is.null(oo) && (length(oo) > 0)) } has.offset <- function(object) { has.offset.term(object) || !is.null(model.offset(model.frame(object))) } spatstat/R/hasenvelope.R0000644000176200001440000000120513115225157014752 0ustar liggesusers#' #' hasenvelope.R #' #' A simple class of objects which contain additional envelope data #' #' $Revision: 1.1 $ $Date: 2015/10/05 06:20:31 $ hasenvelope <- function(X, E=NULL) { if(inherits(E, "envelope")) { attr(X, "envelope") <- E class(X) <- c("hasenvelope", class(X)) } return(X) } print.hasenvelope <- function(x, ...) { NextMethod("print") splat("[Object contains simulation envelope data]") return(invisible(NULL)) } envelope.hasenvelope <- function(Y, ..., Yname=NULL) { if(is.null(Yname)) Yname <- short.deparse(substitute(Y)) E <- attr(Y, "envelope") return(envelope(E, ..., Yname=Yname)) } spatstat/R/pairdistlpp.R0000755000176200001440000000560713115271120015000 0ustar liggesusers# # pairdistlpp.R # # $Revision: 1.12 $ $Date: 2017/06/05 10:31:58 $ # # # pairdist.lpp # Calculates the shortest-path distance between each pair of points # in a point pattern on a linear network. # pairdist.lpp <- function(X, ..., method="C") { stopifnot(inherits(X, "lpp")) stopifnot(method %in% c("C", "interpreted")) # n <- npoints(X) pairdistmat <- matrix(Inf,n,n) diag(pairdistmat) <- 0 # L <- as.linnet(X, sparse=FALSE) # if(any(is.infinite(L$dpath))) { #' disconnected network lab <- connected(L, what="labels") subsets <- split(seq_len(nvertices(L)), lab) for(i in seq_along(subsets)) { Xi <- thinNetwork(X, retainvertices=subsets[[i]]) witch <- attr(Xi, "retainpoints") pairdistmat[witch, witch] <- pairdist.lpp(Xi, method=method) } return(pairdistmat) } # Y <- as.ppp(X) Lvert <- L$vertices from <- L$from to <- L$to dpath <- L$dpath # nearest segment for each point pro <- coords(X, local=TRUE, spatial=FALSE, temporal=FALSE)$seg if(method == "interpreted") { # loop through all pairs of data points for (i in 1:(n-1)) { proi <- pro[i] Xi <- Y[i] nbi1 <- from[proi] nbi2 <- to[proi] vi1 <- Lvert[nbi1] vi2 <- Lvert[nbi2] dXi1 <- crossdist(Xi, vi1) dXi2 <- crossdist(Xi, vi2) for (j in (i+1):n) { Xj <- Y[j] proj <- pro[j] if(proi == proj) { # points i and j lie on the same segment # use Euclidean distance d <- crossdist(Xi, Xj) } else { # shortest path from i to j passes through ends of segments nbj1 <- from[proj] nbj2 <- to[proj] vj1 <- Lvert[nbj1] vj2 <- Lvert[nbj2] # Calculate shortest of 4 possible paths from i to j d1Xj <- crossdist(vj1,Xj) d2Xj <- crossdist(vj2,Xj) d11 <- dXi1 + dpath[nbi1,nbj1] + d1Xj d12 <- dXi1 + dpath[nbi1,nbj2] + d2Xj d21 <- dXi2 + dpath[nbi2,nbj1] + d1Xj d22 <- dXi2 + dpath[nbi2,nbj2] + d2Xj d <- min(d11,d12,d21,d22) } # store result pairdistmat[i,j] <- pairdistmat[j,i] <- d } } } else { # C code # convert indices to start at 0 from0 <- from - 1L to0 <- to - 1L segmap <- pro - 1L zz <- .C("linpairdist", np = as.integer(n), xp = as.double(Y$x), yp = as.double(Y$y), nv = as.integer(Lvert$n), xv = as.double(Lvert$x), yv = as.double(Lvert$y), ns = as.double(L$n), from = as.integer(from0), to = as.integer(to0), dpath = as.double(dpath), segmap = as.integer(segmap), answer = as.double(numeric(n*n)), PACKAGE = "spatstat") pairdistmat <- matrix(zz$answer, n, n) } return(pairdistmat) } spatstat/R/rotmean.R0000644000176200001440000000274713115225157014122 0ustar liggesusers## ## rotmean.R ## ## rotational average of pixel values ## ## $Revision: 1.9 $ $Date: 2015/06/18 02:45:42 $ rotmean <- function(X, ..., origin, padzero=TRUE, Xname, result=c("fv", "im")) { if(missing(Xname)) Xname <- sensiblevarname(short.deparse(substitute(X)), "X") trap.extra.arguments(..., .Context="rotmean") stopifnot(is.im(X)) if(!missing(origin)) X <- shift(X, origin=origin) result <- match.arg(result) rmax <- with(vertices(Frame(X)), sqrt(max(x^2+y^2))) if(padzero) X <- padimage(na.handle.im(X, 0), 0, W=square(c(-1,1)*rmax)) values <- X[drop=TRUE] radii <- with(as.data.frame(rasterxy.im(X, drop=TRUE)), sqrt(x^2+y^2)) ra <- pmin(range(radii), rmax) eps <- sqrt(X$xstep^2 + X$ystep^2) a <- unnormdensity(radii, from=ra[1], to=ra[2], bw=eps) b <- unnormdensity(radii, weights=values, from=ra[1], to=ra[2], bw=eps) df <- data.frame(r=a$x, f=b$y/a$y) FUN <- fv(df, argu="r", ylab=substitute(bar(X)(r), list(X=as.name(Xname))), valu="f", fmla=(. ~ r), alim=ra, labl=c("r", "%s(r)"), desc=c("distance argument r", "rotational average"), unitname=unitname(X), fname=paste0("bar", paren(Xname))) attr(FUN, "dotnames") <- "f" if(result == "fv") return(FUN) ## compute image FUN <- as.function(FUN) XX <- as.im(X, na.replace=1) IM <- as.im(function(x,y,FUN){ FUN(sqrt(x^2+y^2)) }, XX, FUN=FUN) return(IM) } spatstat/R/hardcore.R0000755000176200001440000000740013115271075014236 0ustar liggesusers# # # hardcore.S # # $Revision: 1.11 $ $Date: 2015/10/21 09:06:57 $ # # The Hard core process # # Hardcore() create an instance of the Hard Core process # [an object of class 'interact'] # # # ------------------------------------------------------------------- # Hardcore <- local({ BlankHardcore <- list( name = "Hard core process", creator = "Hardcore", family = "pairwise.family", # evaluated later pot = function(d, par) { v <- 0 * d v[ d <= par$hc ] <- (-Inf) attr(v, "IsOffset") <- TRUE v }, par = list(hc = NULL), # filled in later parnames = "hard core distance", selfstart = function(X, self) { # self starter for Hardcore nX <- npoints(X) if(nX < 2) { # not enough points to make any decisions return(self) } md <- minnndist(X) if(md == 0) { warning(paste("Pattern contains duplicated points:", "impossible under Hardcore model")) return(self) } if(!is.na(hc <- self$par$hc)) { # value fixed by user or previous invocation # check it if(md < hc) warning(paste("Hard core distance is too large;", "some data points will have zero probability")) return(self) } # take hc = minimum interpoint distance * n/(n+1) hcX <- md * nX/(nX+1) Hardcore(hc = hcX) }, init = function(self) { hc <- self$par$hc if(length(hc) != 1) stop("hard core distance must be a single value") if(!is.na(hc) && !(is.numeric(hc) && hc > 0)) stop("hard core distance hc must be a positive number, or NA") }, update = NULL, # default OK print = NULL, # default OK interpret = function(coeffs, self) { return(NULL) }, valid = function(coeffs, self) { return(TRUE) }, project = function(coeffs, self) { return(NULL) }, irange = function(self, coeffs=NA, epsilon=0, ...) { hc <- self$par$hc return(hc) }, version=NULL, # evaluated later # fast evaluation is available for the border correction only can.do.fast=function(X,correction,par) { return(all(correction %in% c("border", "none"))) }, fasteval=function(X,U,EqualPairs,pairpot,potpars,correction, ...) { # fast evaluator for Hardcore interaction if(!all(correction %in% c("border", "none"))) return(NULL) if(spatstat.options("fasteval") == "test") message("Using fast eval for Hardcore") hc <- potpars$hc # call evaluator for Strauss process counts <- strausscounts(U, X, hc, EqualPairs) # all counts should be zero v <- matrix(ifelseAB(counts > 0, -Inf, 0), ncol=1) attr(v, "IsOffset") <- TRUE return(v) }, Mayer=function(coeffs, self) { # second Mayer cluster integral hc <- self$par$hc return(pi * hc^2) }, Percy=function(d, coeffs, par, ...) { ## term used in Percus-Yevick type approximation H <- par$hc t <- abs(d/(2*H)) t <- pmin.int(t, 1) y <- 2 * H^2 * (pi - acos(t) + t * sqrt(1 - t^2)) return(y) } ) class(BlankHardcore) <- "interact" Hardcore <- function(hc=NA) { instantiate.interact(BlankHardcore, list(hc=hc)) } Hardcore <- intermaker(Hardcore, BlankHardcore) Hardcore }) spatstat/R/plot.anylist.R0000644000176200001440000005156613151223044015111 0ustar liggesusers## ## plot.anylist.R ## ## Plotting functions for 'solist', 'anylist', 'imlist' ## and legacy class 'listof' ## ## $Revision: 1.25 $ $Date: 2017/08/24 01:58:09 $ ## plot.anylist <- plot.solist <- plot.listof <- local({ ## auxiliary functions has.multiplot <- function(x) { is.ppp(x) } extraplot <- function(nnn, x, ..., add=FALSE, extrargs=list(), panel.args=NULL, plotcommand="plot") { argh <- list(...) if(has.multiplot(x) && identical(plotcommand,"plot")) argh <- c(argh, list(multiplot=FALSE)) if(!is.null(panel.args)) { xtra <- if(is.function(panel.args)) panel.args(nnn) else panel.args if(!is.list(xtra)) stop(paste0("panel.args", if(is.function(panel.args)) "(i)" else "", " should be a list")) argh <- resolve.defaults(xtra, argh) } if(length(extrargs) > 0) argh <- resolve.defaults(argh, extrargs) ## some plot commands don't recognise 'add' if(add) argh <- append(argh, list(add=TRUE)) do.call(plotcommand, append(list(x=x), argh)) } exec.or.plot <- function(cmd, i, xi, ..., extrargs=list(), add=FALSE) { if(is.null(cmd)) return(NULL) argh <- resolve.defaults(list(...), extrargs, ## some plot commands don't recognise 'add' if(add) list(add=TRUE) else NULL, if(has.multiplot(cmd)) list(multiplot=FALSE) else NULL) if(is.function(cmd)) { do.call(cmd, resolve.defaults(list(i, xi), argh)) } else { do.call(plot, resolve.defaults(list(cmd), argh)) } } exec.or.plotshift <- function(cmd, i, xi, ..., vec=vec, extrargs=list(), add=FALSE) { if(is.null(cmd)) return(NULL) argh <- resolve.defaults(list(...), extrargs, ## some plot commands don't recognise 'add' if(add) list(add=TRUE) else NULL, if(has.multiplot(cmd)) list(multiplot=FALSE) else NULL) if(is.function(cmd)) { do.call(cmd, resolve.defaults(list(i, xi), argh)) } else { cmd <- shift(cmd, vec) do.call(plot, resolve.defaults(list(cmd), argh)) } } classes.with.do.plot <- c("im", "ppp", "psp", "msr", "layered", "tess") ## bounding box, including ribbon for images, legend for point patterns getplotbox <- function(x, ..., do.plot, plotcommand="plot", multiplot) { if(inherits(x, classes.with.do.plot)) { if(identical(plotcommand, "plot")) { y <- if(has.multiplot(x)) plot(x, ..., multiplot=FALSE, do.plot=FALSE) else plot(x, ..., do.plot=FALSE) return(as.owin(y)) } else if(identical(plotcommand, "contour")) { y <- contour(x, ..., do.plot=FALSE) return(as.owin(y)) } else { plc <- plotcommand if(is.character(plc)) plc <- get(plc) if(!is.function(plc)) stop("Unrecognised plot function") if("do.plot" %in% names(args(plc))) return(as.owin(do.call(plc, list(x=x, ..., do.plot=FALSE)))) return(as.rectangle(x)) } } return(try(as.rectangle(x), silent=TRUE)) } # calculate bounding boxes for each panel using intended arguments! getPlotBoxes <- function(xlist, ..., panel.args=NULL, extrargs=list()) { userargs <- list(...) n <- length(xlist) result <- vector(length=n, mode="list") for(i in seq_len(n)) { pai <- if(is.function(panel.args)) panel.args(i) else list() argh <- resolve.defaults(pai, userargs, extrargs) result[[i]] <- do.call(getplotbox, append(list(x=xlist[[i]]), argh)) } return(result) } is.shiftable <- function(x) { if(is.null(x)) return(TRUE) if(is.function(x)) return(FALSE) y <- try(as.rectangle(x), silent=TRUE) return(!inherits(y, "try-error")) } maxassigned <- function(i, values) max(-1, values[i[i > 0]]) plot.anylist <- function(x, ..., main, arrange=TRUE, nrows=NULL, ncols=NULL, main.panel=NULL, mar.panel=c(2,1,1,2), hsep = 0, vsep = 0, panel.begin=NULL, panel.end=NULL, panel.args=NULL, panel.begin.args=NULL, panel.end.args=NULL, plotcommand="plot", adorn.left=NULL, adorn.right=NULL, adorn.top=NULL, adorn.bottom=NULL, adorn.size=0.2, equal.scales=FALSE, halign=FALSE, valign=FALSE ) { xname <- short.deparse(substitute(x)) ## recursively expand entries which are 'anylist' etc while(any(sapply(x, inherits, what="anylist"))) x <- as.solist(expandSpecialLists(x, "anylist"), demote=TRUE) isSo <- inherits(x, "solist") isIm <- inherits(x, "imlist") || (isSo && all(unlist(lapply(x, is.im)))) ## `boomerang despatch' cl <- match.call() if(missing(plotcommand) && isIm) { cl[[1]] <- as.name("image.imlist") parenv <- sys.parent() return(invisible(eval(cl, envir=parenv))) } if(isSo) { allfv <- somefv <- FALSE } else { isfv <- unlist(lapply(x, is.fv)) allfv <- all(isfv) somefv <- any(isfv) } ## panel margins if(!missing(mar.panel)) { nm <- length(mar.panel) if(nm == 1) mar.panel <- rep(mar.panel, 4) else if(nm == 2) mar.panel <- rep(mar.panel, 2) else if(nm != 4) stop("mar.panel should have length 1, 2 or 4") } else if(somefv) { ## change default mar.panel <- 0.25+c(4,4,2,2) } n <- length(x) names(x) <- good.names(names(x), "Component_", 1:n) if(is.null(main.panel)) main.panel <- names(x) else { if(!is.expression(main.panel)) main.panel <- as.character(main.panel) nmp <- length(main.panel) if(nmp == 1) main.panel <- rep.int(main.panel, n) else if(nmp != n) stop("Incorrect length for main.panel") } if(allfv && equal.scales) { ## all entries are 'fv' objects: determine their plot limits fvlims <- lapply(x, plot, ..., limitsonly=TRUE) ## establish common x,y limits for all panels xlim <- range(unlist(lapply(fvlims, getElement, name="xlim"))) ylim <- range(unlist(lapply(fvlims, getElement, name="ylim"))) extrargs <- list(xlim=xlim, ylim=ylim) } else extrargs <- list() extrargs.begin <- resolve.defaults(panel.begin.args, extrargs) extrargs.end <- resolve.defaults(panel.end.args, extrargs) if(!arrange) { ## sequence of plots result <- vector(mode="list", length=n) for(i in 1:n) { xi <- x[[i]] exec.or.plot(panel.begin, i, xi, main=main.panel[i], extrargs=extrargs.begin) result[[i]] <- extraplot(i, xi, ..., add=!is.null(panel.begin), main=main.panel[i], panel.args=panel.args, extrargs=extrargs, plotcommand=plotcommand) %orifnull% list() exec.or.plot(panel.end, i, xi, add=TRUE, extrargs=extrargs.end) } if(!is.null(adorn.left)) warning("adorn.left was ignored because arrange=FALSE") if(!is.null(adorn.right)) warning("adorn.right was ignored because arrange=FALSE") if(!is.null(adorn.top)) warning("adorn.top was ignored because arrange=FALSE") if(!is.null(adorn.bottom)) warning("adorn.bottom was ignored because arrange=FALSE") return(invisible(result)) } ## ARRAY of plots ## decide whether to plot a main header main <- if(!missing(main) && !is.null(main)) main else xname if(!is.character(main)) { ## main title could be an expression nlines <- 1 banner <- TRUE } else { ## main title is character string/vector, possibly "" banner <- any(nzchar(main)) if(length(main) > 1) main <- paste(main, collapse="\n") nlines <- length(unlist(strsplit(main, "\n"))) } ## determine arrangement of plots ## arrange like mfrow(nrows, ncols) plus a banner at the top if(is.null(nrows) && is.null(ncols)) { nrows <- as.integer(floor(sqrt(n))) ncols <- as.integer(ceiling(n/nrows)) } else if(!is.null(nrows) && is.null(ncols)) ncols <- as.integer(ceiling(n/nrows)) else if(is.null(nrows) && !is.null(ncols)) nrows <- as.integer(ceiling(n/ncols)) else stopifnot(nrows * ncols >= length(x)) nblank <- ncols * nrows - n if(allfv || list(plotcommand) %in% list("persp", persp)) { ## Function plots do not have physical 'size' sizes.known <- FALSE } else { ## Determine dimensions of objects ## (including space for colour ribbons, if they are images) boxes <- getPlotBoxes(x, ..., plotcommand=plotcommand, panel.args=panel.args, extrargs=extrargs) sizes.known <- !any(sapply(boxes, inherits, what="try-error")) if(sizes.known) { extrargs <- resolve.defaults(extrargs, list(claim.title.space=TRUE)) boxes <- getPlotBoxes(x, ..., plotcommand=plotcommand, panel.args=panel.args, extrargs=extrargs) } if(equal.scales && !sizes.known) { warning("Ignored equal.scales=TRUE; scales could not be determined") equal.scales <- FALSE } } if(sizes.known) { ## determine size of each panel if(equal.scales) { ## do not rescale panels scaledboxes <- boxes } else { ## rescale panels sides <- lapply(boxes, sidelengths) bwidths <- unlist(lapply(sides, "[", 1)) bheights <- unlist(lapply(sides, "[", 2)) ## Force equal heights, unless there is only one column scales <- if(ncols > 1) 1/bheights else 1/bwidths if(all(is.finite(scales))) { scaledboxes <- vector(mode="list", length=n) for(i in 1:n) scaledboxes[[i]] <- scalardilate(boxes[[i]], scales[i]) } else { #' uh-oh equal.scales <- sizes.known <- FALSE scaledboxes <- boxes } } } ## determine whether to display all objects in one enormous plot ## Precondition is that everything has a spatial bounding box single.plot <- equal.scales && sizes.known if(equal.scales && !single.plot && !allfv) warning("equal.scales=TRUE ignored ", "because bounding boxes ", "could not be determined", call.=FALSE) ## enforce alignment by expanding boxes if(halign) { if(!equal.scales) warning("halign=TRUE ignored because equal.scales=FALSE") ## x coordinates align in each column xr <- range(sapply(scaledboxes, getElement, name="xrange")) scaledboxes <- lapply(scaledboxes, "[[<-", i="xrange", value=xr) } if(valign) { if(!equal.scales) warning("valign=TRUE ignored because equal.scales=FALSE") ## y coordinates align in each column yr <- range(sapply(scaledboxes, getElement, name="yrange")) scaledboxes <- lapply(scaledboxes, "[[<-", i="yrange", value=yr) } ## set up layout mat <- matrix(c(seq_len(n), integer(nblank)), byrow=TRUE, ncol=ncols, nrow=nrows) if(sizes.known) { boxsides <- lapply(scaledboxes, sidelengths) xwidths <- sapply(boxsides, "[", i=1) xheights <- sapply(boxsides, "[", i=2) heights <- apply(mat, 1, maxassigned, values=xheights) widths <- apply(mat, 2, maxassigned, values=xwidths) } else { heights <- rep.int(1, nrows) widths <- rep.int(1, ncols) } #' negative heights/widths arise if a row/column is not used. meanheight <- mean(heights[heights > 0]) meanwidth <- mean(widths[heights > 0]) heights[heights <= 0] <- meanheight widths[widths <= 0] <- meanwidth nall <- n ## if(single.plot) { ## ......... create a single plot .................. ## determine sizes ht <- max(heights) wd <- max(widths) marpar <- mar.panel * c(ht, wd, ht, wd)/6 vsep <- vsep * ht/6 hsep <- hsep * wd/6 mainheight <- any(nzchar(main.panel)) * ht/5 ewidths <- marpar[2] + widths + marpar[4] eheights <- marpar[1] + heights + marpar[3] + mainheight Width <- sum(ewidths) + hsep * (length(ewidths) - 1) Height <- sum(eheights) + vsep * (length(eheights) - 1) bigbox <- owin(c(0, Width), c(0, Height)) ox <- marpar[2] + cumsum(c(0, ewidths + hsep))[1:ncols] oy <- marpar[1] + cumsum(c(0, rev(eheights) + vsep))[nrows:1] panelorigin <- as.matrix(expand.grid(x=ox, y=oy)) ## initialise, with banner cex <- resolve.1.default(list(cex.title=1.5), list(...))/par('cex.main') plot(bigbox, type="n", main=main, cex.main=cex) ## plot individual objects result <- vector(mode="list", length=n) for(i in 1:n) { ## determine shift vector that moves bottom left corner of spatial box ## to bottom left corner of target area on plot device vec <- panelorigin[i,] - with(scaledboxes[[i]], c(xrange[1], yrange[1])) ## shift panel contents xi <- x[[i]] xishift <- shift(xi, vec) ## let rip if(!is.null(panel.begin)) exec.or.plotshift(panel.begin, i, xishift, add=TRUE, main=main.panel[i], show.all=TRUE, extrargs=extrargs.begin, vec=vec) result[[i]] <- extraplot(i, xishift, ..., add=TRUE, show.all=is.null(panel.begin), main=main.panel[i], extrargs=extrargs, panel.args=panel.args, plotcommand=plotcommand) %orifnull% list() exec.or.plotshift(panel.end, i, xishift, add=TRUE, extrargs=extrargs.end, vec=vec) } return(invisible(result)) } ## ................. multiple logical plots using 'layout' .............. ## adjust panel margins to accommodate desired extra separation mar.panel <- pmax(0, mar.panel + c(vsep, hsep, vsep, hsep)/2) ## check for adornment if(!is.null(adorn.left)) { ## add margin at left, of width adorn.size * meanwidth nall <- i.left <- n+1 mat <- cbind(i.left, mat) widths <- c(adorn.size * meanwidth, widths) } if(!is.null(adorn.right)) { ## add margin at right, of width adorn.size * meanwidth nall <- i.right <- nall+1 mat <- cbind(mat, i.right) widths <- c(widths, adorn.size * meanwidth) } if(!is.null(adorn.bottom)) { ## add margin at bottom, of height adorn.size * meanheight nall <- i.bottom <- nall+1 mat <- rbind(mat, i.bottom) heights <- c(heights, adorn.size * meanheight) } if(!is.null(adorn.top)) { ## add margin at top, of height adorn.size * meanheight nall <- i.top <- nall + 1 mat <- rbind(i.top, mat) heights <- c(adorn.size * meanheight, heights) } if(banner) { ## Increment existing panel numbers ## New panel 1 is the banner panels <- (mat > 0) mat[panels] <- mat[panels] + 1 mat <- rbind(1, mat) heights <- c(0.1 * meanheight * (1 + nlines), heights) } ## declare layout layout(mat, heights=heights, widths=widths, respect=sizes.known) ## start output ..... ## .... plot banner if(banner) { opa <- par(mar=rep.int(0,4), xpd=TRUE) plot(numeric(0),numeric(0),type="n",ann=FALSE,axes=FALSE, xlim=c(-1,1),ylim=c(-1,1)) cex <- resolve.1.default(list(cex.title=1.5), list(...))/par('cex') text(0,0,main, cex=cex) } ## plot panels npa <- par(mar=mar.panel) if(!banner) opa <- npa result <- vector(mode="list", length=n) for(i in 1:n) { xi <- x[[i]] exec.or.plot(panel.begin, i, xi, main=main.panel[i], extrargs=extrargs.begin) result <- extraplot(i, xi, ..., add = !is.null(panel.begin), main = main.panel[i], extrargs=extrargs, panel.args=panel.args, plotcommand=plotcommand) %orifnull% list() exec.or.plot(panel.end, i, xi, add=TRUE, extrargs=extrargs.end) } ## adornments if(nall > n) { par(mar=rep.int(0,4), xpd=TRUE) if(!is.null(adorn.left)) adorn.left() if(!is.null(adorn.right)) adorn.right() if(!is.null(adorn.bottom)) adorn.bottom() if(!is.null(adorn.top)) adorn.top() } ## revert layout(1) par(opa) return(invisible(result)) } plot.anylist }) contour.imlist <- contour.listof <- function(x, ...) { xname <- short.deparse(substitute(x)) do.call(plot.solist, resolve.defaults(list(x=x, plotcommand="contour"), list(...), list(main=xname))) } plot.imlist <- local({ plot.imlist <- function(x, ..., plotcommand="image", equal.ribbon = FALSE, ribmar=NULL) { xname <- short.deparse(substitute(x)) if(missing(plotcommand) && any(sapply(x, inherits, what=c("linim", "linfun")))) plotcommand <- "plot" if(equal.ribbon && (list(plotcommand) %in% list("image", "plot", image, plot))) { out <- imagecommon(x, ..., xname=xname, ribmar=ribmar) } else { out <- do.call(plot.solist, resolve.defaults(list(x=x, plotcommand=plotcommand), list(...), list(main=xname))) } return(invisible(out)) } imagecommon <- function(x, ..., xname, zlim=NULL, ribbon=TRUE, ribside=c("right", "left", "bottom", "top"), ribsep=NULL, ribwid=0.5, ribn=1024, ribscale=NULL, ribargs=list(), ribmar = NULL, mar.panel = c(2,1,1,2)) { if(missing(xname)) xname <- short.deparse(substitute(x)) ribside <- match.arg(ribside) stopifnot(is.list(ribargs)) if(!is.null(ribsep)) warning("Argument ribsep is not yet implemented for image arrays") ## determine range of values if(is.null(zlim)) zlim <- range(unlist(lapply(x, range))) ## determine common colour map imcolmap <- plot.im(x[[1]], do.plot=FALSE, zlim=zlim, ..., ribn=ribn) ## plot ribbon? if(!ribbon) { ribadorn <- list() } else { ## determine plot arguments for colour ribbon vertical <- (ribside %in% c("right", "left")) scaleinfo <- if(!is.null(ribscale)) list(labelmap=ribscale) else list() sidecode <- match(ribside, c("bottom", "left", "top", "right")) ribstuff <- c(list(x=imcolmap, main="", vertical=vertical), ribargs, scaleinfo, list(side=sidecode)) if (is.null(mar.panel)) mar.panel <- c(2, 1, 1, 2) if (length(mar.panel) != 4) mar.panel <- rep(mar.panel, 4)[1:4] if (is.null(ribmar)) { ribmar <- mar.panel/2 newmar <- c(2, 0) switch(ribside, left = { ribmar[c(2, 4)] <- newmar }, right = { ribmar[c(4, 2)] <- newmar }, bottom = { ribmar[c(1, 3)] <- newmar }, top = { ribmar[c(3, 1)] <- newmar } ) } ## bespoke function executed to plot colour ribbon do.ribbon <- function() { opa <- par(mar=ribmar) do.call(plot, ribstuff) par(opa) } ## ribbon plot function encoded as 'adorn' argument ribadorn <- list(adorn=do.ribbon, adorn.size=ribwid) names(ribadorn)[1] <- paste("adorn", ribside, sep=".") } ## result <- do.call(plot.solist, resolve.defaults(list(x=x, plotcommand="image"), list(...), list(mar.panel=mar.panel, main=xname, col=imcolmap, zlim=zlim, ribbon=FALSE), ribadorn)) return(invisible(result)) } plot.imlist }) image.imlist <- image.listof <- function(x, ..., equal.ribbon = FALSE, ribmar=NULL) { plc <- resolve.1.default(list(plotcommand="image"), list(...)) if(list(plc) %in% list("image", "plot", image, plot)) { out <- plot.imlist(x, ..., plotcommand="image", equal.ribbon=equal.ribbon, ribmar=ribmar) } else { out <- plot.solist(x, ..., ribmar=ribmar) } return(invisible(out)) } spatstat/R/kernel2d.R0000644000176200001440000000656013115225157014160 0ustar liggesusers#' #' kernel2d.R #' #' Two-dimensional smoothing kernels #' #' $Revision: 1.12 $ $Date: 2017/02/07 07:50:52 $ #' .Spatstat.2D.KernelTable <- list( #' table entries: #' d = density of standardised kernel #' sd = standard deviation of x coordinate, for standardised kernel #' hw = halfwidth of support of standardised kernel gaussian=list( d = function(x,y, ...) { dnorm(x) * dnorm(y) }, sd = 1, hw = 8, symmetric = TRUE), epanechnikov=list( d = function(x,y, ...) { (2/pi) * pmax(1 - (x^2+y^2), 0) }, sd = 1/sqrt(6), hw = 1, symmetric = TRUE), quartic=list( d = function(x,y, ...) { (3/pi) * pmax(1 - (x^2+y^2), 0)^2 }, sd = 1/sqrt(8), hw = 1, symmetric = TRUE), disc=list( d = function(x,y,...) { (1/pi) * as.numeric(x^2 + y^2 <= 1) }, sd = 1/2, hw = 1, symmetric = TRUE) ) validate2Dkernel <- function(kernel, fatal=TRUE) { if(is.character(match2DkernelName(kernel))) return(TRUE) if(is.im(kernel) || is.function(kernel)) return(TRUE) if(!fatal) return(FALSE) if(is.character(kernel)) stop(paste("Unrecognised choice of kernel", sQuote(kernel), paren(paste("options are", commasep(sQuote(names(.Spatstat.2D.KernelTable)))))), call.=FALSE) stop(paste("kernel should be a character string,", "a pixel image, or a function (x,y)"), call.=FALSE) } match2DkernelName <- function(kernel) { if(!is.character(kernel) || length(kernel) != 1) return(NULL) nama <- names(.Spatstat.2D.KernelTable) m <- pmatch(kernel, nama) if(is.na(m)) return(NULL) return(nama[m]) } lookup2DkernelInfo <- function(kernel) { validate2Dkernel(kernel) kernel <- match2DkernelName(kernel) if(is.null(kernel)) return(NULL) return(.Spatstat.2D.KernelTable[[kernel]]) } evaluate2Dkernel <- function(kernel, x, y, sigma=NULL, varcov=NULL, ..., scalekernel=is.character(kernel)) { info <- lookup2DkernelInfo(kernel) if(scalekernel) { ## kernel adjustment factor sdK <- if(is.character(kernel)) info$sd else 1 ## transform coordinates to x',y' such that kerfun(x', y') ## yields density k(x,y) at desired bandwidth if(is.null(varcov)) { rr <- sdK/sigma x <- x * rr y <- y * rr const <- rr^2 } else { SinvH <- matrixinvsqrt(varcov) rSinvH <- sdK * SinvH XY <- cbind(x, y) %*% rSinvH x <- XY[,1L] y <- XY[,2L] const <- det(rSinvH) } } ## now evaluate kernel if(is.character(kernel)) { kerfun <- info$d result <- kerfun(x, y) if(scalekernel) result <- const * result return(result) } if(is.function(kernel)) { argh <- list(...) if(length(argh) > 0) argh <- argh[names(argh) %in% names(formals(kernel))] result <- do.call(kernel, append(list(x, y), argh)) if(anyNA(result)) stop("NA values returned from kernel function") if(length(result) != length(x)) stop("Kernel function returned the wrong number of values") if(scalekernel) result <- const * result return(result) } if(is.im(kernel)) { result <- kernel[list(x=x, y=y)] if(anyNA(result)) stop("Domain of kernel image is not large enough") return(result) if(scalekernel) result <- const * result } # never reached stop("Unrecognised format for kernel") } spatstat/R/random.R0000755000176200001440000007731413115271120013731 0ustar liggesusers## ## random.R ## ## Functions for generating random point patterns ## ## $Revision: 4.92 $ $Date: 2017/06/05 10:31:58 $ ## ## ## runifpoint() n i.i.d. uniform random points ("binomial process") ## ## runifpoispp() uniform Poisson point process ## ## rpoispp() general Poisson point process (thinning method) ## ## rpoint() n independent random points (rejection/pixel list) ## ## rMaternI() Mat'ern model I ## rMaternII() Mat'ern model II ## rSSI() Simple Sequential Inhibition process ## ## rthin() independent random thinning ## rjitter() random perturbation ## ## Examples: ## u01 <- owin(0:1,0:1) ## plot(runifpoispp(100, u01)) ## X <- rpoispp(function(x,y) {100 * (1-x/2)}, 100, u01) ## X <- rpoispp(function(x,y) {ifelse(x < 0.5, 100, 20)}, 100) ## plot(X) ## plot(rMaternI(100, 0.02)) ## plot(rMaternII(100, 0.05)) ## runifrect <- function(n, win=owin(c(0,1),c(0,1)), nsim=1, drop=TRUE) { ## no checking xr <- win$xrange yr <- win$yrange result <- vector(mode="list", length=nsim) for(isim in 1:nsim) { x <- runif(n, min=xr[1], max=xr[2]) y <- runif(n, min=yr[1], max=yr[2]) result[[isim]] <- ppp(x, y, window=win, check=FALSE) } if(nsim == 1 && drop) return(result[[1L]]) names(result) <- paste("Simulation", 1:nsim) return(as.ppplist(result)) } runifdisc <- function(n, radius=1, centre=c(0,0), ..., nsim=1, drop=TRUE) { ## i.i.d. uniform points in the disc of radius r and centre (x,y) check.1.real(radius) stopifnot(radius > 0) if(!missing(nsim)) { check.1.integer(nsim) stopifnot(nsim >= 1) } disque <- disc(centre=centre, radius=radius, ...) twopi <- 2 * pi rad2 <- radius^2 result <- vector(mode="list", length=nsim) for(isim in 1:nsim) { theta <- runif(n, min=0, max=twopi) s <- sqrt(runif(n, min=0, max=rad2)) result[[isim]] <- ppp(centre[1] + s * cos(theta), centre[2] + s * sin(theta), window=disque, check=FALSE) } if(nsim == 1 && drop) return(result[[1L]]) names(result) <- paste("Simulation", 1:nsim) return(as.ppplist(result)) } runifpoint <- function(n, win=owin(c(0,1),c(0,1)), giveup=1000, warn=TRUE, ..., nsim=1, drop=TRUE, ex=NULL) { if(!missing(nsim)) { check.1.integer(nsim) stopifnot(nsim >= 1) } if(missing(n) && missing(win) && !is.null(ex)) { stopifnot(is.ppp(ex)) n <- npoints(ex) win <- Window(ex) } else { win <- as.owin(win) check.1.integer(n) stopifnot(n >= 0) } if(n == 0) { emp <- ppp(numeric(0), numeric(0), window=win) if(nsim == 1) return(emp) result <- rep(list(emp), nsim) names(result) <- paste("Simulation", 1:nsim) return(as.ppplist(result)) } if(warn) { nhuge <- spatstat.options("huge.npoints") if(n > nhuge) { whinge <- paste("Attempting to generate", n, "random points") message(whinge) warning(whinge, call.=FALSE) } } switch(win$type, rectangle = { return(runifrect(n, win, nsim=nsim, drop=drop)) }, mask = { dx <- win$xstep dy <- win$ystep ## extract pixel coordinates and probabilities rxy <- rasterxy.mask(win, drop=TRUE) xpix <- rxy$x ypix <- rxy$y ## make a list of nsim point patterns result <- vector(mode="list", length=nsim) for(isim in 1:nsim) { ## select pixels with equal probability id <- sample(seq_along(xpix), n, replace=TRUE) ## extract pixel centres and randomise within pixels x <- xpix[id] + runif(n, min= -dx/2, max=dx/2) y <- ypix[id] + runif(n, min= -dy/2, max=dy/2) result[[isim]] <- ppp(x, y, window=win, check=FALSE) } }, polygonal={ ## make a list of nsim point patterns result <- vector(mode="list", length=nsim) for(isim in 1:nsim) { ## rejection method ## initialise empty pattern x <- numeric(0) y <- numeric(0) X <- ppp(x, y, window=win) ## ## rectangle in which trial points will be generated box <- boundingbox(win) ## ntries <- 0 repeat { ntries <- ntries + 1 ## generate trial points in batches of n qq <- runifrect(n, box) ## retain those which are inside 'win' qq <- qq[win] ## add them to result X <- superimpose(X, qq, W=win, check=FALSE) ## if we have enough points, exit if(X$n > n) { result[[isim]] <- X[1:n] break } else if(X$n == n) { result[[isim]] <- X break } else if(ntries >= giveup) { ## otherwise get bored eventually stop(paste("Gave up after", giveup * n, "trials,", X$n, "points accepted")) } } } }, stop("Unrecognised window type") ) ## list of point patterns produced. if(nsim == 1 && drop) return(result[[1L]]) names(result) <- paste("Simulation", 1:nsim) return(as.ppplist(result)) } runifpoispp <- function(lambda, win = owin(c(0,1),c(0,1)), ..., nsim=1, drop=TRUE) { win <- as.owin(win) if(!is.numeric(lambda) || length(lambda) > 1 || !is.finite(lambda) || lambda < 0) stop("Intensity lambda must be a single finite number >= 0") if(!missing(nsim)) { check.1.integer(nsim) stopifnot(nsim >= 1) } if(lambda == 0) { ## return empty pattern emp <- ppp(numeric(0), numeric(0), window=win) if(nsim == 1 && drop) return(emp) result <- rep(list(emp), nsim) names(result) <- paste("Simulation", 1:nsim) return(as.ppplist(result)) } ## will generate Poisson process in enclosing rectangle and trim it box <- boundingbox(win) meanN <- lambda * area(box) result <- vector(mode="list", length=nsim) for(isim in 1:nsim) { n <- rpois(1, meanN) if(!is.finite(n)) stop(paste("Unable to generate Poisson process with a mean of", meanN, "points")) X <- runifpoint(n, box) ## trim to window if(win$type != "rectangle") X <- X[win] result[[isim]] <- X } if(nsim == 1 && drop) return(result[[1L]]) names(result) <- paste("Simulation", 1:nsim) return(as.ppplist(result)) } rpoint <- function(n, f, fmax=NULL, win=unit.square(), ..., giveup=1000,verbose=FALSE, nsim=1, drop=TRUE) { if(missing(f) || (is.numeric(f) && length(f) == 1)) ## uniform distribution return(runifpoint(n, win, giveup, nsim=nsim, drop=drop)) ## non-uniform distribution.... if(!is.function(f) && !is.im(f)) stop(paste(sQuote("f"), "must be either a function or an", sQuote("im"), "object")) if(!missing(nsim)) { check.1.integer(nsim) stopifnot(nsim >= 1) } if(is.im(f)) { ## ------------ PIXEL IMAGE --------------------- wf <- as.owin(f) if(n == 0) { ## return empty pattern(s) emp <- ppp(numeric(0), numeric(0), window=wf) if(nsim == 1 && drop) return(emp) result <- rep(list(emp), nsim) names(result) <- paste("Simulation", 1:nsim) return(as.ppplist(result)) } w <- as.mask(wf) M <- w$m dx <- w$xstep dy <- w$ystep ## extract pixel coordinates and probabilities rxy <- rasterxy.mask(w, drop=TRUE) xpix <- rxy$x ypix <- rxy$y ppix <- as.vector(f$v[M]) ## not normalised - OK ## result <- vector(mode="list", length=nsim) for(isim in 1:nsim) { ## select pixels id <- sample(length(xpix), n, replace=TRUE, prob=ppix) ## extract pixel centres and randomise within pixels x <- xpix[id] + runif(n, min= -dx/2, max=dx/2) y <- ypix[id] + runif(n, min= -dy/2, max=dy/2) result[[isim]] <- ppp(x, y, window=wf, check=FALSE) } if(nsim == 1 && drop) return(result[[1L]]) names(result) <- paste("Simulation", 1:nsim) return(as.ppplist(result)) } ## ------------ FUNCTION --------------------- ## Establish parameters for rejection method verifyclass(win, "owin") if(n == 0) { ## return empty pattern(s) emp <- ppp(numeric(0), numeric(0), window=win) if(nsim == 1 && drop) return(emp) result <- rep(list(emp), nsim) names(result) <- paste("Simulation", 1:nsim) return(as.ppplist(result)) } if(is.null(fmax)) { ## compute approx maximum value of f imag <- as.im(f, win, ...) summ <- summary(imag) fmax <- summ$max + 0.05 * diff(summ$range) } irregular <- (win$type != "rectangle") box <- boundingbox(win) result <- vector(mode="list", length=nsim) for(isim in 1:nsim) { ## initialise empty pattern X <- ppp(numeric(0), numeric(0), window=win) pbar <- 1 nremaining <- n totngen <- 0 ## generate uniform random points in batches ## and apply the rejection method. ## Collect any points that are retained in X ntries <- 0 repeat{ ntries <- ntries + 1 ## proposal points ngen <- nremaining/pbar + 10 totngen <- totngen + ngen prop <- runifrect(ngen, box) if(irregular) prop <- prop[win] if(prop$n > 0) { fvalues <- f(prop$x, prop$y, ...) paccept <- fvalues/fmax u <- runif(prop$n) ## accepted points Y <- prop[u < paccept] if(Y$n > 0) { ## add to X X <- superimpose(X, Y, W=win, check=FALSE) nX <- X$n pbar <- nX/totngen nremaining <- n - nX if(nremaining <= 0) { ## we have enough! if(verbose) splat("acceptance rate = ", round(100 * pbar, 2), "%") result[[isim]] <- if(nX == n) X else X[1:n] break } } } if(ntries > giveup) stop(paste("Gave up after",giveup * n,"trials with", X$n, "points accepted")) } } if(nsim == 1 && drop) return(result[[1L]]) names(result) <- paste("Simulation", 1:nsim) return(as.ppplist(result)) } rpoispp <- function(lambda, lmax=NULL, win = owin(), ..., nsim=1, drop=TRUE, ex=NULL, warnwin=TRUE) { ## arguments: ## lambda intensity: constant, function(x,y,...) or image ## lmax maximum possible value of lambda(x,y,...) ## win default observation window (of class 'owin') ## ... arguments passed to lambda(x, y, ...) ## nsim number of replicate simulations if(!missing(nsim)) { check.1.integer(nsim) stopifnot(nsim >= 1) } if(missing(lambda) && is.null(lmax) && missing(win) && !is.null(ex)) { lambda <- intensity(unmark(ex)) win <- Window(ex) } else { if(!(is.numeric(lambda) || is.function(lambda) || is.im(lambda))) stop(paste(sQuote("lambda"), "must be a constant, a function or an image")) if(is.numeric(lambda) && !(length(lambda) == 1 && lambda >= 0)) stop(paste(sQuote("lambda"), "must be a single, nonnegative number")) if(!is.null(lmax)) { if(!is.numeric(lmax)) stop("lmax should be a number") if(length(lmax) > 1) stop("lmax should be a single number") } if(is.im(lambda)) { if(warnwin && !missing(win)) warning("Argument win ignored", call.=FALSE) win <- rescue.rectangle(as.owin(lambda)) } else { win <- as.owin(win) } } if(is.numeric(lambda)) ## uniform Poisson return(runifpoispp(lambda, win, nsim=nsim, drop=drop)) ## inhomogeneous Poisson ## perform thinning of uniform Poisson ## determine upper bound if(is.null(lmax)) { imag <- as.im(lambda, win, ...) summ <- summary(imag) lmax <- summ$max + 0.05 * diff(summ$range) } if(is.function(lambda)) { ## function lambda #' runifpoispp checks 'lmax' result <- runifpoispp(lmax, win, nsim=nsim, drop=FALSE) #' result is a 'ppplist' with appropriate names for(isim in 1:nsim) { X <- result[[isim]] if(X$n > 0) { prob <- lambda(X$x, X$y, ...)/lmax u <- runif(X$n) retain <- (u <= prob) result[[isim]] <- X[retain] } } if(nsim == 1 && drop) result <- result[[1L]] return(result) } if(is.im(lambda)) { ## image lambda if(spatstat.options("fastpois")) { ## new code: sample pixels directly mu <- integral(lambda) dx <- lambda$xstep/2 dy <- lambda$ystep/2 df <- as.data.frame(lambda) npix <- nrow(df) lpix <- df$value result <- vector(mode="list", length=nsim) nn <- rpois(nsim, mu) if(!all(is.finite(nn))) stop(paste("Unable to generate Poisson process with a mean of", mu, "points")) for(isim in seq_len(nsim)) { ni <- nn[isim] ii <- sample.int(npix, size=ni, replace=TRUE, prob=lpix) xx <- df$x[ii] + runif(ni, -dx, dx) yy <- df$y[ii] + runif(ni, -dy, dy) result[[isim]] <- ppp(xx, yy, window=win, check=FALSE) } if(nsim == 1 && drop) return(result[[1L]]) names(result) <- paste("Simulation", 1:nsim) return(as.ppplist(result)) } else { ## old code: thinning result <- runifpoispp(lmax, win, nsim=nsim, drop=FALSE) for(isim in 1:nsim) { X <- result[[isim]] if(X$n > 0) { prob <- lambda[X]/lmax u <- runif(X$n) retain <- (u <= prob) result[[isim]] <- X[retain] } } if(nsim == 1 && drop) return(result[[1L]]) return(result) } } stop(paste(sQuote("lambda"), "must be a constant, a function or an image")) } rMaternI <- function(kappa, r, win = owin(c(0,1),c(0,1)), stationary=TRUE, ..., nsim=1, drop=TRUE) { rMaternInhibition(type=1, kappa=kappa, r=r, win=win, stationary=stationary, ..., nsim=nsim, drop=drop) } rMaternII <- function(kappa, r, win = owin(c(0,1),c(0,1)), stationary=TRUE, ..., nsim=1, drop=TRUE) { rMaternInhibition(type=2, kappa=kappa, r=r, win=win, stationary=stationary, ..., nsim=nsim, drop=drop) } rMaternInhibition <- function(type, kappa, r, win = owin(c(0,1),c(0,1)), stationary=TRUE, ..., nsim=1, drop=TRUE) { stopifnot(is.numeric(r) && length(r) == 1) stopifnot(type %in% c(1,2)) if(!missing(nsim)) { check.1.integer(nsim) stopifnot(nsim >= 1) } ## Resolve window class if(!inherits(win, c("owin", "box3", "boxx"))) { givenwin <- win win <- try(as.owin(givenwin), silent = TRUE) if(inherits(win, "try-error")) win <- try(as.boxx(givenwin), silent = TRUE) if(inherits(win, "try-error")) stop("Could not coerce argument win to a window (owin, box3 or boxx).") } dimen <- spatdim(win) if(dimen == 2) { bigbox <- if(stationary) grow.rectangle(win, r) else win result <- rpoispp(kappa, win = bigbox, nsim = nsim, drop=FALSE) } else if(dimen == 3) { bigbox <- if(stationary) grow.box3(win, r) else win result <- rpoispp3(kappa, domain = bigbox, nsim = nsim, drop=FALSE) } else { bigbox <- if(stationary) grow.boxx(win, r) else win result <- rpoisppx(kappa, domain = bigbox, nsim = nsim, drop=FALSE) } for(isim in 1:nsim) { Y <- result[[isim]] nY <- npoints(Y) if(type == 1) { ## Matern Model I if(nY > 1) { d <- nndist(Y) Y <- Y[d > r] } } else { ## Matern Model II if(nY > 1) { ## matrix of squared pairwise distances d2 <- pairdist(Y, squared=TRUE) close <- (d2 <= r^2) ## random order 1:n age <- sample(seq_len(nY), nY, replace=FALSE) earlier <- outer(age, age, ">") conflict <- close & earlier ## delete <- apply(conflict, 1, any) delete <- matrowany(conflict) Y <- Y[!delete] } } if(stationary) Y <- Y[win] result[[isim]] <- Y } if(nsim == 1 && drop) return(result[[1L]]) if(is.owin(win)) result <- as.ppplist(result) return(result) } rSSI <- function(r, n=Inf, win = square(1), giveup = 1000, x.init=NULL, ..., f=NULL, fmax=NULL, nsim=1, drop=TRUE) { win.given <- !missing(win) && !is.null(win) stopifnot(is.numeric(r) && length(r) == 1 && r >= 0) stopifnot(is.numeric(n) && length(n) == 1 && n >= 0) must.reach.n <- is.finite(n) if(!missing(nsim)) { check.1.integer(nsim) stopifnot(nsim >= 1) } ## if(!is.null(f)) { stopifnot(is.numeric(f) || is.im(f) || is.function(f)) if(is.null(fmax) && !is.numeric(f)) fmax <- if(is.im(f)) max(f) else max(as.im(f, win)) } ## result <- vector(mode="list", length=nsim) if(!win.given) win <- square(1) ## validate initial state if(is.null(x.init)) { ## start with empty pattern in specified window win <- as.owin(win) x.init <- ppp(numeric(0),numeric(0), window=win) } else { ## start with specified pattern stopifnot(is.ppp(x.init)) if(!win.given) { win <- as.owin(x.init) } else { ## check compatibility of windows if(!identical(win, as.owin(x.init))) warning(paste("Argument", sQuote("win"), "is not the same as the window of", sQuote("x.init"))) x.init.new <- x.init[win] if(npoints(x.init.new) == 0) stop(paste("No points of x.init lie inside the specified window", sQuote("win"))) nlost <- npoints(x.init) - npoints(x.init.new) if(nlost > 0) warning(paste(nlost, "out of", npoints(x.init), "points of the pattern x.init", "lay outside the specified window", sQuote("win"))) x.init <- x.init.new } if(n < npoints(x.init)) stop(paste("x.init contains", npoints(x.init), "points", "but a pattern containing only n =", n, "points", "is required")) if(n == npoints(x.init)) { warning(paste("Initial state x.init already contains", n, "points;", "no further points were added")) if(nsim == 1 && drop) return(x.init) result <- rep(list(x.init), nsim) names(result) <- paste("Simulation", 1:nsim) return(as.ppplist(result)) } } #' validate radius r2 <- r^2 if(!is.infinite(n) && (n * pi * r2/4 > area(win))) warning(paste("Window is too small to fit", n, "points", "at minimum separation", r)) #' start simulation pstate <- list() for(isim in 1:nsim) { if(nsim > 1) pstate <- progressreport(isim, nsim, state=pstate) ## Simple Sequential Inhibition process ## fixed number of points ## Naive implementation, proposals are uniform X <- x.init ntries <- 0 while(ntries < giveup) { ntries <- ntries + 1 qq <- if(is.null(f)) runifpoint(1, win) else rpoint(1, f, fmax, win) dx <- qq$x[1] - X$x dy <- qq$y[1] - X$y if(all(dx^2 + dy^2 > r2)) { X <- superimpose(X, qq, W=win, check=FALSE) ntries <- 0 } if(X$n >= n) break } if(must.reach.n && X$n < n) warning(paste("Gave up after", giveup, "attempts with only", X$n, "points placed out of", n)) result[[isim]] <- X } if(nsim == 1 && drop) return(result[[1L]]) names(result) <- paste("Simulation", 1:nsim) return(as.ppplist(result)) } rPoissonCluster <- function(kappa, expand, rcluster, win = owin(c(0,1),c(0,1)), ..., lmax=NULL, nsim=1, drop=TRUE, saveparents=TRUE) { ## Generic Poisson cluster process ## Implementation for bounded cluster radius ## ## 'rcluster' is a function(x,y) that takes the coordinates ## (x,y) of the parent point and generates a list(x,y) of offspring ## ## "..." are arguments to be passed to 'rcluster()' ## ## Catch old argument name rmax for expand, and allow rmax to be ## passed to rcluster (and then be ignored) if(missing(expand) && !is.null(rmax <- list(...)$rmax)){ expand <- rmax f <- rcluster rcluster <- function(..., rmax) f(...) } win <- as.owin(win) if(!missing(nsim)) { check.1.integer(nsim) stopifnot(nsim >= 1) } ## Generate parents in dilated window frame <- boundingbox(win) dilated <- owin(frame$xrange + c(-expand, expand), frame$yrange + c(-expand, expand)) if(is.im(kappa) && !is.subset.owin(dilated, as.owin(kappa))) stop(paste("The window in which the image", sQuote("kappa"), "is defined\n", "is not large enough to contain the dilation of the window", sQuote("win"))) parentlist <- rpoispp(kappa, lmax=lmax, win=dilated, nsim=nsim) if(nsim == 1) parentlist <- list(parentlist) resultlist <- vector(mode="list", length=nsim) for(isim in 1:nsim) { parents <- parentlist[[isim]] result <- NULL ## generate clusters np <- parents$n if(np > 0) { xparent <- parents$x yparent <- parents$y for(i in seq_len(np)) { ## generate random offspring of i-th parent point cluster <- rcluster(xparent[i], yparent[i], ...) if(!inherits(cluster, "ppp")) cluster <- ppp(cluster$x, cluster$y, window=frame, check=FALSE) ## skip if cluster is empty if(cluster$n > 0) { ## trim to window cluster <- cluster[win] if(is.null(result)) { ## initialise offspring pattern and offspring-to-parent map result <- cluster parentid <- rep.int(1, cluster$n) } else { ## add to pattern result <- superimpose(result, cluster, W=win, check=FALSE) ## update offspring-to-parent map parentid <- c(parentid, rep.int(i, cluster$n)) } } } } else { ## no parents - empty pattern result <- ppp(numeric(0), numeric(0), window=win) parentid <- integer(0) } if(saveparents) { attr(result, "parents") <- parents attr(result, "parentid") <- parentid attr(result, "expand") <- expand } resultlist[[isim]] <- result } if(nsim == 1 && drop) return(resultlist[[1]]) names(resultlist) <- paste("Simulation", 1:nsim) return(as.ppplist(resultlist)) } rGaussPoisson <- local({ rGaussPoisson <- function(kappa, r, p2, win=owin(c(0,1), c(0,1)), ..., nsim=1, drop=TRUE) { ## Gauss-Poisson process result <- rPoissonCluster(kappa, 1.05 * r, oneortwo, win, radius=r/2, p2=p2, nsim=nsim, drop=drop) return(result) } oneortwo <- function(x0, y0, radius, p2) { if(runif(1) > p2) ## one point return(list(x=x0, y=y0)) ## two points theta <- runif(1, min=0, max=2*pi) return(list(x=x0+c(-1,1)*radius*cos(theta), y=y0+c(-1,1)*radius*sin(theta))) } rGaussPoisson }) rstrat <- function(win=square(1), nx, ny=nx, k=1, nsim=1, drop=TRUE) { win <- as.owin(win) stopifnot(nx >= 1 && ny >= 1) stopifnot(k >= 1) if(!missing(nsim)) { check.1.integer(nsim) stopifnot(nsim >= 1) } result <- vector(mode="list", length=nsim) for(isim in 1:nsim) { xy <- stratrand(win, nx, ny, k) Xbox <- ppp(xy$x, xy$y, win$xrange, win$yrange, check=FALSE) result[[isim]] <- Xbox[win] } if(nsim == 1 && drop) return(result[[1L]]) names(result) <- paste("Simulation", 1:nsim) return(as.ppplist(result)) } xy.grid <- function(xr, yr, nx, ny, dx, dy) { nx.given <- !is.null(nx) ny.given <- !is.null(ny) dx.given <- !is.null(dx) dy.given <- !is.null(dy) if(nx.given && dx.given) stop("Do not give both nx and dx") if(nx.given) { stopifnot(nx >= 1) x0 <- seq(from=xr[1], to=xr[2], length.out=nx+1) dx <- diff(xr)/nx } else if(dx.given) { stopifnot(dx > 0) x0 <- seq(from=xr[1], to=xr[2], by=dx) nx <- length(x0) - 1 } else stop("Need either nx or dx") ## determine y grid if(ny.given && dy.given) stop("Do not give both ny and dy") if(ny.given) { stopifnot(ny >= 1) y0 <- seq(from=yr[1], to=yr[2], length.out=ny+1) dy <- diff(yr)/ny } else { if(is.null(dy)) dy <- dx stopifnot(dy > 0) y0 <- seq(from=yr[1], to=yr[2], by=dy) ny <- length(y0) - 1 } return(list(x0=x0, y0=y0, nx=nx, ny=ny, dx=dx, dy=dy)) } rsyst <- function(win=square(1), nx=NULL, ny=nx, ..., dx=NULL, dy=dx, nsim=1, drop=TRUE) { if(!missing(nsim)) { check.1.integer(nsim) stopifnot(nsim >= 1) } win <- as.owin(win) xr <- win$xrange yr <- win$yrange ## determine grid coordinates if(missing(ny)) ny <- NULL if(missing(dy)) dy <- NULL g <- xy.grid(xr, yr, nx, ny, dx, dy) x0 <- g$x0 y0 <- g$y0 dx <- g$dx dy <- g$dy ## assemble grid and randomise location xy0 <- expand.grid(x=x0, y=y0) result <- vector(mode="list", length=nsim) for(isim in 1:nsim) { x <- xy0$x + runif(1, min = 0, max = dx) y <- xy0$y + runif(1, min = 0, max = dy) Xbox <- ppp(x, y, xr, yr, check=FALSE) ## trim to window result[[isim]] <- Xbox[win] } if(nsim == 1 && drop) return(result[[1L]]) names(result) <- paste("Simulation", 1:nsim) return(as.ppplist(result)) } rcellnumber <- local({ rcellnumber <- function(n, N=10, mu=1) { if(missing(mu) || mu == 1) { z <- rCellUnit(n=n, N=N) } else { z <- replicate(n, rCellCumul(x=mu, N=N)) } return(z) } rCellUnit <- function(n, N=10) { if(!missing(N)) { if(round(N) != N) stop("N must be an integer") stopifnot(is.finite(N)) stopifnot(N > 1) } u <- runif(n, min=0, max=1) p0 <- 1/N pN <- 1/(N * (N-1)) k <- ifelse(u < p0, 0, ifelse(u < (1 - pN), 1, N)) return(k) } rCellCumul <- function(x, N=10) { check.1.real(x) n <- ceiling(x) if(n <= 0) return(0) y <- rCellUnit(n=n, N=N) if(n == x) return(sum(y)) p <- x - (n-1) z <- sum(y[-1]) + rbinom(1, size=y[1], prob=p) return(z) } rcellnumber }) rcell <- function(win=square(1), nx=NULL, ny=nx, ..., dx=NULL, dy=dx, N=10, nsim=1, drop=TRUE) { if(!missing(nsim)) { check.1.integer(nsim) stopifnot(nsim >= 1) } win <- as.owin(win) xr <- win$xrange yr <- win$yrange ## determine grid coordinates if(missing(ny)) ny <- NULL if(missing(dy)) dy <- NULL g <- xy.grid(xr, yr, nx, ny, dx, dy) nx <- g$nx ny <- g$ny x0 <- g$x0 y0 <- g$y0 dx <- g$dx dy <- g$dy ## generate pattern(s) result <- vector(mode="list", length=nsim) for(isim in 1:nsim) { x <- numeric(0) y <- numeric(0) for(ix in seq_len(nx)) for(iy in seq_len(ny)) { nij <- rcellnumber(1, N) x <- c(x, x0[ix] + runif(nij, min=0, max=dx)) y <- c(y, y0[iy] + runif(nij, min=0, max=dy)) } Xbox <- ppp(x, y, xr, yr, check=FALSE) result[[isim]] <- Xbox[win] } if(nsim == 1 && drop) return(result[[1L]]) names(result) <- paste("Simulation", 1:nsim) return(as.ppplist(result)) } thinjump <- function(n, p) { # equivalent to which(runif(n) < p) for constant p stopifnot(length(p) == 1) if(p <= 0) return(integer(0)) if(p >= 1) return(seq_len(n)) if(p > 0.5) return(-thinjump(n, 1-p)) guessmaxlength <- ceiling(n * p + 2 * sqrt(n * p * (1-p))) i <- .Call("thinjumpequal", n, p, guessmaxlength, PACKAGE = "spatstat") return(i) } rthin <- function(X, P, ..., nsim=1, drop=TRUE) { stopifnot(is.ppp(X) || is.lpp(X)) if(!missing(nsim)) { check.1.integer(nsim) stopifnot(nsim >= 1) } nX <- npoints(X) if(nX == 0) { if(nsim == 1 && drop) return(X) result <- rep(list(X), nsim) names(result) <- paste("Simulation", 1:nsim) result <- if(is.ppp(X)) as.ppplist(result) else as.solist(result) return(result) } if(is.numeric(P) && length(P) == 1 && spatstat.options("fastthin")) { # special algorithm for constant probability result <- vector(mode="list", length=nsim) for(isim in 1:nsim) { retain <- thinjump(nX, P) Y <- X[retain] ## also handle offspring-to-parent map if present if(!is.null(parentid <- attr(X, "parentid"))) attr(Y, "parentid") <- parentid[retain] result[[isim]] <- Y } if(nsim == 1 && drop) return(result[[1L]]) names(result) <- paste("Simulation", 1:nsim) result <- if(is.ppp(X)) as.ppplist(result) else as.solist(result) return(result) } if(is.numeric(P)) { ## vector of retention probabilities pX <- P if(length(pX) != nX) { if(length(pX) == 1) pX <- rep.int(pX, nX) else stop("Length of vector P does not match number of points of X") } if(anyNA(pX)) stop("P contains NA's") } else if(is.function(P)) { ## function - evaluate it at points of X pX <- if(inherits(P, c("linfun", "funxy"))) P(X, ...) else P(X$x, X$y, ...) if(length(pX) != nX) stop("Function P returned a vector of incorrect length") if(!is.numeric(pX)) stop("Function P returned non-numeric values") if(anyNA(pX)) stop("Function P returned some NA values") } else if(is.im(P)) { ## image - look it up if(!(P$type %in% c("integer", "real"))) stop("Values of image P should be numeric") pX <- P[X, drop=FALSE] if(anyNA(pX)) stop("some points of X lie outside the domain of image P") } else stop("Unrecognised format for P") if(min(pX) < 0) stop("some probabilities are negative") if(max(pX) > 1) stop("some probabilities are greater than 1") result <- vector(mode="list", length=nsim) for(isim in 1:nsim) { retain <- (runif(length(pX)) < pX) Y <- X[retain] ## also handle offspring-to-parent map if present if(!is.null(parentid <- attr(X, "parentid"))) attr(Y, "parentid") <- parentid[retain] result[[isim]] <- Y } if(nsim == 1 && drop) return(result[[1L]]) names(result) <- paste("Simulation", 1:nsim) result <- if(is.ppp(X)) as.ppplist(result) else as.solist(result) return(result) } ## rjitter rjitter <- function(X, radius, retry=TRUE, giveup=10000, ..., nsim=1, drop=TRUE) { verifyclass(X, "ppp") if(missing(radius) || is.null(radius)) radius <- bw.stoyan(X) if(!missing(nsim)) { check.1.integer(nsim) stopifnot(nsim >= 1) } nX <- npoints(X) W <- X$window if(nX == 0) { if(nsim == 1 && drop) return(X) result <- rep(list(X), nsim) names(result) <- paste("Simulation", 1:nsim) return(as.ppplist(result)) } result <- vector(mode="list", length=nsim) for(isim in 1:nsim) { if(!retry) { ## points outside window are lost D <- runifdisc(nX, radius=radius) xnew <- X$x + D$x ynew <- X$y + D$y ok <- inside.owin(xnew, ynew, W) result[[isim]] <- ppp(xnew[ok], ynew[ok], window=W, check=FALSE) } else { ## retry = TRUE: condition on points being inside window undone <- rep.int(TRUE, nX) triesleft <- giveup Xshift <- X while(any(undone)) { triesleft <- triesleft - 1 if(triesleft <= 0) break Y <- Xshift[undone] D <- runifdisc(Y$n, radius=radius) xnew <- Y$x + D$x ynew <- Y$y + D$y ok <- inside.owin(xnew, ynew, W) if(any(ok)) { changed <- which(undone)[ok] Xshift$x[changed] <- xnew[ok] Xshift$y[changed] <- ynew[ok] undone[changed] <- FALSE } } result[[isim]] <- Xshift } } if(nsim == 1 && drop) return(result[[1L]]) names(result) <- paste("Simulation", 1:nsim) return(as.ppplist(result)) } spatstat/R/ippm.R0000755000176200001440000002262313142253753013423 0ustar liggesusers# # ippm.R # # $Revision: 2.24 $ $Date: 2017/07/18 00:38:31 $ # # Fisher scoring algorithm for irregular parameters in ppm trend # ippm <- local({ chucknames <- c("iScore", "start", "nlm.args", "silent", "warn.unused") hasarg <- function(f,a) { a %in% names(formals(f)) } ippm <- function(Q, ..., iScore=NULL, start=list(), covfunargs=start, nlm.args=list(stepmax=1/2), silent=FALSE, warn.unused=TRUE) { ## remember call cl <- match.call() callframe <- parent.frame() callstring <- short.deparse(sys.call()) ## ppmcall <- cl[!(names(cl) %in% chucknames)] ppmcall[[1L]] <- as.name('ppm') ## validate if(!is.list(start)) stop("start should be a list of initial values for irregular parameters") if(length(start) == 0) { ppmcall <- ppmcall[names(ppmcall) != "covfunargs"] return(eval(ppmcall, callframe)) } if(!is.null(iScore)) { if(!is.list(iScore) || length(iScore) != length(start)) stop("iScore should be a list of the same length as start") stopifnot(identical(names(iScore), names(start))) if(!all(sapply(iScore, is.function))) stop("iScore should be a list of functions") } ## smap <- match(names(start), names(covfunargs)) if(anyNA(smap)) stop("variables in start should be a subset of variables in covfunargs") covfunargs[smap] <- start ## fit the initial model and extract information ppmcall$covfunargs <- covfunargs fit0 <- eval(ppmcall, callframe) # lpl0 <- fit0$maxlogpl # p <- length(coef(fit0)) ## examine covariates and trend covariates <- fit0$covariates isfun <- sapply(covariates, is.function) covfuns <- covariates[isfun] ## determine which covariates depend on which irregular parameters pnames <- names(start) depmat <- matrix(FALSE, nrow=length(covfuns), ncol=length(pnames)) rownames(depmat) <- names(covfuns) colnames(depmat) <- pnames for(j in 1:length(pnames)) depmat[,j] <- sapply(covfuns, hasarg, pnames[j]) ## find covariates that depend on ANY irregular parameter depvar <- rownames(depmat)[apply(depmat, 1L, any)] ## check that these covariates appear only in offset terms covnames.fitted <- model.covariates(fit0, fitted=TRUE, offset=FALSE) if(any(uhoh <- depvar %in% covnames.fitted)) stop(paste(ngettext(sum(uhoh), "The covariate", "The covariates"), commasep(sQuote(depvar[uhoh])), "should appear only in offset terms")) ## check that every irregular parameter to be updated appears somewhere cov.names.offset <- model.covariates(fit0, fitted=FALSE, offset=TRUE) covfun.names.offset <- intersect(cov.names.offset, names(covfuns)) usearg <- apply(depmat[covfun.names.offset, , drop=FALSE], 2L, any) if(!all(usearg)) { if(warn.unused) { nbad <- sum(!usearg) warning(paste("Cannot maximise over the irregular", ngettext(nbad, "parameter", "parameters"), commasep(sQuote(names(usearg)[!usearg])), ngettext(nbad, "because it is", "because they are"), "not used in any term of the model")) } ## restrict start <- start[usearg] if(!is.null(iScore)) iScore <- iScore[usearg] pnames <- names(start) } if(length(start) == 0) { ppmcall <- ppmcall[names(ppmcall) != "covfunargs"] return(eval(ppmcall, callframe)) } ## parameters for objective function fdata <- list(fit0=fit0, nreg=length(coef(fit0)), covfunargs=covfunargs, smap=smap, pnames=pnames, iScore=iScore) ## minimise objective startvec <- unlist(start) typsize <- abs(startvec) typsize <- pmax(typsize, min(typsize[typsize > 0])) g <- do.call(nlm, resolve.defaults(list(f=objectivefun, p=startvec, thedata=fdata), nlm.args, list(typsize=typsize))) popt <- g$estimate ## detect error states icode <- g$code if(!silent && icode > 2) { errmess <- nlmcodes[[icode]] if(!is.null(errmess)) warning(errmess) else warning("Unrecognised error code ", paste(icode), " returned from nlm", call.=FALSE) } ## return optimised model covfunargs[smap] <- popt attr(covfunargs, "fitter") <- "ippm" attr(covfunargs, "free") <- names(start) fit <- update(fit0, covfunargs=covfunargs, use.internal=TRUE) fit$dispatched <- fit[c("call", "callstring", "callframe")] fit$call <- cl fit$callstring <- callstring fit$callframe <- callframe fit$iScore <- iScore class(fit) <- c("ippm", class(fit)) return(fit) } ## define objective function objectivefun <- function(param, thedata) { with(thedata, { ## fit model with current irregular parameters param <- as.list(param) names(param) <- pnames covfunargs[smap] <- param fit <- update(fit0, covfunargs=covfunargs, use.internal=TRUE) lpl <- logLik(fit, warn=FALSE) ## return negative logL because nlm performs *minimisation* value <- -as.numeric(lpl) ## compute derivatives stuff <- ppmInfluence(fit, what="score", iScore=iScore, iArgs=param) score <- stuff$score if(length(score) == length(coef(fit)) + length(param)) attr(value, "gradient") <- -score[-(1:nreg), drop=FALSE] ## attr(value, "hessian") <- -hess[-(1:nreg), -(1:nreg), drop=FALSE] return(value) }) } ## from help(nlm) nlmcodes <- list(c("Relative gradient is close to zero; ", "current iterate is probably solution"), c("Successive iterates are within tolerance; ", "current iterate is probably solution"), c("Last global step failed to locate a point ", "lower than current estimate. ", "Either current estimate is an approximate ", "local minimum of the function ", "or 'steptol' is too small"), "Iteration limit exceeded", c("Maximum step size 'stepmax' ", "exceeded five consecutive times. ", "Either the function is unbounded below, ", "becomes asymptotic to a finite value ", "from above in some direction, ", "or 'stepmax' is too small")) ippm }) update.ippm <- local({ newformula <- function(old, change, eold, enew) { old <- eval(old, eold) change <- eval(change, enew) old <- as.formula(old, env=eold) change <- as.formula(change, env=enew) update.formula(old, change) } update.ippm <- function(object, ..., envir=environment(terms(object))) { # call <- match.call() new.call <- old.call <- object$call old.callframe <- object$callframe Qold <- eval(old.call$Q, as.list(envir), enclos=old.callframe) argh <- list(...) if(any(isfmla <- sapply(argh, inherits, what="formula"))) { if(sum(isfmla) > 1) stop("Syntax not understood: several arguments are formulas") i <- min(which(isfmla)) new.fmla <- argh[[i]] argh <- argh[-i] if(inherits(Qold, "formula")) { ## formula will replace 'Q' if(is.null(lhs.of.formula(new.fmla))) { f <- (. ~ x) f[[3L]] <- new.fmla[[2L]] new.fmla <- f } new.call$Q <- newformula(Qold, new.fmla, old.callframe, envir) } else if(inherits(Qold, c("ppp", "quad"))) { ## formula will replace 'trend' and may replace 'Q' new.fmla <- newformula(formula(object), new.fmla, old.callframe, envir) if(!is.null(lhs <- lhs.of.formula(new.fmla))) { newQ <- eval(eval(substitute(substitute(l, list("."=Q)), list(l=lhs, Q=Qold))), envir=as.list(envir), enclos=old.callframe) new.call$Q <- newQ } new.fmla <- rhs.of.formula(new.fmla) if("trend" %in% names(old.call)) { new.call$trend <- new.fmla } else { ## find which argument in the original call was a formula wasfmla <- sapply(old.call, formulaic, envir=as.list(envir), enclos=old.callframe) if(any(wasfmla)) { new.call[[min(which(wasfmla))]] <- new.fmla } else { new.call$trend <- new.fmla } } } } ## silence the warnings about unused covfunargs (unless overruled) new.call$warn.unused <- FALSE ## other arguments if(length(argh) > 0) { nama <- names(argh) named <- if(is.null(nama)) rep(FALSE, length(argh)) else nzchar(nama) if(any(named)) new.call[nama[named]] <- argh[named] if(any(!named)) new.call[length(new.call) + 1:sum(!named)] <- argh[!named] } result <- eval(new.call, as.list(envir), enclos=old.callframe) return(result) } formulaic <- function(z, envir, enclos) { u <- try(eval(z, envir, enclos)) return(inherits(u, "formula")) } update.ippm }) spatstat/R/versions.R0000755000176200001440000000251313115271120014306 0ustar liggesusers# # versions.R # # version numbers # # $Revision: 1.11 $ $Date: 2016/02/09 04:41:31 $ # ##################### # Extract version string from ppm object versionstring.ppm <- function(object) { verifyclass(object, "ppm") v <- object$version if(is.null(v) || !is.list(v)) v <- list(major=1, minor=3, release=4) vs <- paste(v$major, ".", v$minor, "-", v$release, sep="") return(vs) } # Extract version string from interact object versionstring.interact <- function(object) { verifyclass(object, "interact") v <- object$version return(v) # NULL before 1.11-0 } # Get version number of current spatstat installation # This is now saved in the spatstat cache environment # rather than read from file every time versionstring.spatstat <- function() { if(!existsSpatstatVariable("SpatstatVersion")) store.versionstring.spatstat() getSpatstatVariable("SpatstatVersion") } store.versionstring.spatstat <- function() { vs <- read.dcf(file=system.file("DESCRIPTION", package="spatstat"), fields="Version") vs <- as.character(vs) putSpatstatVariable("SpatstatVersion", vs) } # Extract major and minor versions only. majorminorversion <- function(v) { vp <- package_version(v) return(package_version(paste(vp$major, vp$minor, sep="."))) } # legacy function RandomFieldsSafe <- function() { TRUE } spatstat/R/envelopelpp.R0000755000176200001440000001707713115271075015013 0ustar liggesusers# # envelopelpp.R # # $Revision: 1.23 $ $Date: 2016/11/23 08:10:44 $ # # Envelopes for 'lpp' objects # # envelope.lpp <- function(Y, fun=linearK, nsim=99, nrank=1, ..., funargs=list(), funYargs=funargs, simulate=NULL, fix.n=FALSE, fix.marks=FALSE, verbose=TRUE, transform=NULL, global=FALSE, ginterval=NULL, use.theory=NULL, alternative=c("two.sided", "less", "greater"), scale=NULL, clamp=FALSE, savefuns=FALSE, savepatterns=FALSE, nsim2=nsim, VARIANCE=FALSE, nSD=2, Yname=NULL, do.pwrong=FALSE, envir.simul=NULL) { cl <- short.deparse(sys.call()) if(is.null(Yname)) Yname <- short.deparse(substitute(Y)) if(is.null(fun)) fun <- linearK if("clipdata" %in% names(list(...))) stop(paste("The argument", sQuote("clipdata"), "is not available for envelope.lpp")) envir.user <- if(!is.null(envir.simul)) envir.simul else parent.frame() envir.here <- sys.frame(sys.nframe()) if(!is.null(simulate)) { # ................................................... # Simulations are determined by 'simulate' argument # Processing is deferred to envelopeEngine simrecipe <- simulate # Data pattern is argument Y X <- Y } else if(!fix.n && !fix.marks) { # ................................................... # Realisations of complete spatial randomness # Data pattern X is argument Y # Data pattern determines intensity of Poisson process X <- Y nY <- npoints(Y) Yintens <- intensity(unmark(Y)) Ymarx <- marks(Y) NETWORK <- Y$domain dont.complain.about(nY, Yintens, NETWORK) ## expression that will be evaluated simexpr <- if(is.null(Ymarx)) { #' unmarked point pattern expression(rpoislpp(Yintens, NETWORK)) } else if(is.null(dim(Ymarx))) { #' single column of marks expression({ A <- rpoislpp(Yintens, NETWORK); j <- sample(nY, npoints(A), replace=TRUE); A %mark% Ymarx[j] }) } else { #' multiple columns of marks expression({ A <- rpoislpp(Yintens, NETWORK); j <- sample(nY, npoints(A), replace=TRUE); A %mark% Ymarx[j, , drop=FALSE] }) } # evaluate in THIS environment simrecipe <- simulrecipe(type = "csr", expr = simexpr, envir = envir.here, csr = TRUE) } else if(!fix.marks) { # Fixed number of points, but random locations and marks # Data pattern X is argument Y X <- Y nY <- npoints(Y) Ymarx <- marks(Y) NETWORK <- Y$domain dont.complain.about(nY, NETWORK) # expression that will be evaluated simexpr <- if(is.null(Ymarx)) { ## unmarked expression(runiflpp(nY, NETWORK)) } else if(is.null(dim(Ymarx))) { ## single column of marks expression({ A <- runiflpp(nY, NETWORK); j <- sample(nY, npoints(A), replace=TRUE); A %mark% Ymarx[j] }) } else { ## multiple columns of marks expression({ A <- runiflpp(nY, NETWORK); j <- sample(nY, npoints(A), replace=TRUE); A %mark% Ymarx[j, ,drop=FALSE] }) } # evaluate in THIS environment simrecipe <- simulrecipe(type = "csr", expr = simexpr, envir = envir.here, csr = TRUE) } else { # ................................................... # Randomised locations only; # fixed number of points and fixed marks # Data pattern X is argument Y X <- Y nY <- npoints(Y) Ymarx <- marks(Y) NETWORK <- Y$domain # expression that will be evaluated simexpr <- expression(rpoislpp(nY, NETWORK) %mark% Ymarx) dont.complain.about(nY, Ymarx, NETWORK) # evaluate in THIS environment simrecipe <- simulrecipe(type = "csr", expr = simexpr, envir = envir.here, csr = TRUE) } envelopeEngine(X=X, fun=fun, simul=simrecipe, nsim=nsim, nrank=nrank, ..., funargs=funargs, funYargs=funYargs, verbose=verbose, clipdata=FALSE, transform=transform, global=global, ginterval=ginterval, use.theory=use.theory, alternative=alternative, scale=scale, clamp=clamp, savefuns=savefuns, savepatterns=savepatterns, nsim2=nsim2, VARIANCE=VARIANCE, nSD=nSD, Yname=Yname, cl=cl, envir.user=envir.user, do.pwrong=do.pwrong) } envelope.lppm <- function(Y, fun=linearK, nsim=99, nrank=1, ..., funargs=list(), funYargs=funargs, simulate=NULL, fix.n=FALSE, fix.marks=FALSE, verbose=TRUE, transform=NULL, global=FALSE, ginterval=NULL, use.theory=NULL, alternative=c("two.sided", "less", "greater"), scale=NULL, clamp=FALSE, savefuns=FALSE, savepatterns=FALSE, nsim2=nsim, VARIANCE=FALSE, nSD=2, Yname=NULL, do.pwrong=FALSE, envir.simul=NULL) { cl <- short.deparse(sys.call()) if(is.null(Yname)) Yname <- short.deparse(substitute(Y)) if(is.null(fun)) fun <- linearK if("clipdata" %in% names(list(...))) stop(paste("The argument", sQuote("clipdata"), "is not available for envelope.pp3")) envir.user <- if(!is.null(envir.simul)) envir.simul else parent.frame() envir.here <- sys.frame(sys.nframe()) if(!is.null(simulate)) { # ................................................... # Simulations are determined by 'simulate' argument # Processing is deferred to envelopeEngine simrecipe <- simulate # Data pattern is argument Y X <- Y } else { ## ................................................... ## Simulation of the fitted model Y if(!is.poisson(Y)) stop("Simulation of non-Poisson models is not yet implemented") MODEL <- Y X <- Y$X NETWORK <- domain(X) lambdaFit <- predict(MODEL) Xmarx <- marks(X) nX <- if(!is.marked(X)) npoints(X) else table(marks(X)) dont.complain.about(NETWORK, Xmarx, nX) #' if(!fix.n && !fix.marks) { #' Unconstrained simulations LMAX <- if(is.im(lambdaFit)) max(lambdaFit) else sapply(lambdaFit, max) dont.complain.about(LMAX) simexpr <- expression(rpoislpp(lambdaFit, NETWORK, lmax=LMAX)) } else if(!fix.marks && is.marked(X)) { #' Fixed total number of points EN <- sapply(lambdaFit, integral) PROB <- EN/sum(EN) dont.complain.about(PROB) simexpr <- expression( rlpp(as.integer(rmultinom(1L, nX, PROB)), lambdaFit) ) } else { #' Fixed number of points of each type simexpr <- expression(rlpp(nX, lambdaFit)) } #' evaluate in THIS environment simrecipe <- simulrecipe(type = "lppm", expr = simexpr, envir = envir.here, csr = FALSE) } envelopeEngine(X=X, fun=fun, simul=simrecipe, nsim=nsim, nrank=nrank, ..., funargs=funargs, funYargs=funYargs, verbose=verbose, clipdata=FALSE, transform=transform, global=global, ginterval=ginterval, use.theory=use.theory, alternative=alternative, scale=scale, clamp=clamp, savefuns=savefuns, savepatterns=savepatterns, nsim2=nsim2, VARIANCE=VARIANCE, nSD=nSD, Yname=Yname, cl=cl, envir.user=envir.user, do.pwrong=do.pwrong) } spatstat/R/ssf.R0000644000176200001440000001501213115225157013235 0ustar liggesusers# # ssf.R # # spatially sampled functions # # $Revision: 1.17 $ $Date: 2017/01/26 00:55:22 $ # ssf <- function(loc, val) { stopifnot(is.ppp(loc)) if(is.function(val)) val <- val(loc$x, loc$y) if(is.data.frame(val)) val <- as.matrix(val) if(!is.matrix(val)) val <- matrix(val, ncol=1, dimnames=list(NULL, "value")) if(nrow(val) != npoints(loc)) stop("Incompatible lengths") result <- loc %mark% val class(result) <- c("ssf", class(result)) attr(result, "ok") <- complete.cases(val) return(result) } print.ssf <- function(x, ..., brief=FALSE) { if(brief) { cat(paste("Spatial function sampled at", npoints(x), "locations\n")) } else { cat("Spatially sampled function\n") cat("Locations:\n\t") print(unmark(x)) } val <- marks(x) if(!is.matrix(val)) { d <- 1 warning("Internal format error: val is not a matrix") } else d <- ncol(val) if(!brief) { type <- if(d == 1) "Scalar" else paste(d, "-vector", sep="") cat(paste(type, "valued function\n")) } if(d > 1 && !is.null(nama <- colnames(val))) cat(paste("Component names:", commasep(sQuote(nama)), "\n")) return(invisible(NULL)) } image.ssf <- function(x, ...) { do.call("plot", resolve.defaults(list(x, how="smoothed"), list(...))) } as.im.ssf <- function(X, ...) nnmark(X, ...) as.function.ssf <- function(x, ...) { X <- x mX <- marks(X) switch(markformat(X), vector = { g <- function(x, y=NULL) { Y <- xy.coords(x,y)[c("x","y")] J <- nncross(Y, X, what="which") result <- mX[J] return(unname(result)) } }, dataframe = { g <- function(x, y=NULL) { Y <- xy.coords(x,y)[c("x","y")] J <- nncross(Y, X, what="which") result <- mX[J,,drop=FALSE] row.names(result) <- NULL return(result) } }, stop("Marks must be a vector or data.frame")) h <- funxy(g, Frame(X)) return(h) } plot.ssf <- function(x, ..., how=c("smoothed", "nearest", "points"), style = c("image", "contour", "imagecontour"), sigma=NULL, contourargs=list()) { xname <- short.deparse(substitute(x)) how <- match.arg(how) style <- match.arg(style) otherargs <- list(...) # convert to images y <- switch(how, points = as.ppp(x), nearest = nnmark(x), smoothed = Smooth(x, sigma=sigma) ) # points plot if(how == "points") { out <- do.call("plot", resolve.defaults(list(y), otherargs, list(main=xname))) if(is.null(out)) return(invisible(NULL)) return(out) } # image plot switch(style, image = { out <- do.call("plot", resolve.defaults(list(y), otherargs, list(main=xname))) }, contour = { do.call("plot", resolve.defaults(list(as.owin(x)), otherargs, list(main=xname))) do.call("contour", resolve.defaults(list(y, add=TRUE), contourargs)) out <- NULL }, imagecontour = { out <- do.call("plot", resolve.defaults(list(y), otherargs, list(main=xname))) do.call("contour", resolve.defaults(list(y, add=TRUE), contourargs)) }) return(invisible(out)) } contour.ssf <- function(x, ..., main, sigma=NULL) { if(missing(main)) main <- short.deparse(substitute(x)) y <- Smooth(x, sigma=sigma) contour(y, ..., main=main) return(invisible(NULL)) } Smooth.ssf <- function(X, ...) { stopifnot(inherits(X, "ssf")) ok <- attr(X, "ok") Y <- as.ppp(X)[ok] argh <- list(...) isnul <- as.logical(unlist(lapply(argh, is.null))) nonnularg <- argh[!isnul] sigma0 <- if(any(c("sigma", "varcov") %in% names(nonnularg))) NULL else 1.4 * max(nndist(X)) Z <- do.call("Smooth.ppp", resolve.defaults(list(X = Y), list(...), list(sigma=sigma0), .MatchNull=FALSE)) # don't take NULL for an answer! return(Z) } "[.ssf" <- function(x, i, j, ..., drop) { loc <- unmark(x) val <- marks(x) ok <- attr(x, "ok") # if(!missing(j)) val <- val[, j, drop=FALSE] if(!missing(i)) { # use [.ppp to identify which points are retained locn <- loc %mark% seq_len(npoints(loc)) loci <- locn[i] loc <- unmark(loci) id <- marks(loci) # extract val <- val[id, , drop=FALSE] ok <- ok[id] } out <- loc %mark% val class(out) <- c("ssf", class(out)) attr(out, "ok") <- ok return(out) } as.ppp.ssf <- function(X, ...) { class(X) <- "ppp" attr(X, "ok") <- NULL return(X) } marks.ssf <- function(x, ...) { val <- x$marks if(is.null(dim(val))) val <- matrix(val, ncol=1) if(is.data.frame(val)) val <- as.matrix(val) return(val) } "marks<-.ssf" <- function(x, ..., value) { ssf(unmark(x), value) } unmark.ssf <- function(X) { unmark(as.ppp(X)) } with.ssf <- function(data, ...) { loc <- as.ppp(data) val <- marks(data) newval <- with(as.data.frame(val), ...) if(length(newval) == npoints(loc) || (is.matrix(newval) && nrow(newval) == npoints(loc))) return(ssf(loc, newval)) return(newval) } apply.ssf <- function(X, ...) { loc <- as.ppp(X) val <- marks(X) newval <- apply(val, ...) if(length(newval) == npoints(loc) || (is.matrix(newval) && nrow(newval) == npoints(loc))) return(ssf(loc, newval)) return(newval) } range.ssf <- function(x, ...) range(marks(x), ...) min.ssf <- function(x, ...) min(marks(x), ...) max.ssf <- function(x, ...) max(marks(x), ...) integral.ssf <- function(f, domain=NULL, ..., weights=attr(f, "weights")) { if(!is.null(weights)) { check.nvector(weights, npoints(f), oneok=TRUE) if(length(weights) == 1) weights <- rep(weights, npoints(f)) } if(!is.null(domain)) { ok <- inside.owin(f, w=domain) f <- f[ok,] if(!is.null(weights)) weights <- weights[ok] } y <- marks(f) if(is.null(weights)) { z <- if(!is.matrix(y)) mean(y, na.rm=TRUE) else colMeans(y, na.rm=TRUE) a <- area(Window(f)) } else { z <- if(!is.matrix(y)) weighted.mean(y, w=weights, na.rm=TRUE) else apply(y, 2, weighted.mean, w=weights, na.rm=TRUE) a <- sum(weights) } return(z * a) } spatstat/R/linim.R0000755000176200001440000005727013162673014013572 0ustar liggesusers# # linim.R # # $Revision: 1.45 $ $Date: 2017/09/27 09:31:17 $ # # Image/function on a linear network # linim <- function(L, Z, ..., restrict=TRUE, df=NULL) { L <- as.linnet(L) stopifnot(is.im(Z)) class(Z) <- "im" # prevent unintended dispatch dfgiven <- !is.null(df) if(dfgiven) { stopifnot(is.data.frame(df)) neednames <- c("xc", "yc", "x", "y", "mapXY", "tp", "values") ok <- neednames %in% names(df) if(any(!ok)) { nn <- sum(!ok) stop(paste(ngettext(nn, "A column", "Columns"), "named", commasep(sQuote(neednames[!ok])), ngettext(nn, "is", "are"), "missing from argument", sQuote("df"))) } } if(restrict) { #' restrict image to pixels actually lying on the network M <- as.mask.psp(as.psp(L), Z) if(dfgiven) { #' ensure all mapped pixels are untouched pos <- nearest.pixel(df$xc, df$yc, Z) pos <- cbind(pos$row, pos$col) M[pos] <- TRUE } Z <- Z[M, drop=FALSE] } if(!dfgiven) { # compute the data frame of mapping information xx <- rasterx.im(Z) yy <- rastery.im(Z) mm <- !is.na(Z$v) xx <- as.vector(xx[mm]) yy <- as.vector(yy[mm]) pixelcentres <- ppp(xx, yy, window=as.rectangle(Z), check=FALSE) pixdf <- data.frame(xc=xx, yc=yy) # project pixel centres onto lines p2s <- project2segment(pixelcentres, as.psp(L)) projloc <- as.data.frame(p2s$Xproj) projmap <- as.data.frame(p2s[c("mapXY", "tp")]) # extract values values <- Z[pixelcentres] # bundle df <- cbind(pixdf, projloc, projmap, data.frame(values=values)) } out <- Z attr(out, "L") <- L attr(out, "df") <- df class(out) <- c("linim", class(out)) return(out) } print.linim <- function(x, ...) { splat("Image on linear network") L <- attr(x, "L") Lu <- summary(unitname(L)) nsample <- nrow(attr(x, "df")) print(L) NextMethod("print") if(!is.null(nsample)) splat(" Data frame:", nsample, "sample points along network", "\n", "Average density: one sample point per", signif(volume(L)/nsample, 3), Lu$plural, Lu$explain) return(invisible(NULL)) } summary.linim <- function(object, ...) { y <- NextMethod("summary") if("integral" %in% names(y)) y$integral <- integral(object) y$network <- summary(as.linnet(object)) class(y) <- c("summary.linim", class(y)) return(y) } print.summary.linim <- function(x, ...) { splat(paste0(x$type, "-valued"), "pixel image on a linear network") unitinfo <- summary(x$units) pluralunits <- unitinfo$plural sigdig <- getOption('digits') di <- x$dim win <- x$window splat(di[1L], "x", di[2L], "pixel array (ny, nx)") splat("enclosing rectangle:", prange(signif(win$xrange, sigdig)), "x", prange(signif(win$yrange, sigdig)), unitinfo$plural, unitinfo$explain) splat("dimensions of each pixel:", signif(x$xstep, 3), "x", signif(x$ystep, sigdig), pluralunits) if(!is.null(explain <- unitinfo$explain)) splat(explain) splat("Pixel values (on network):") switch(x$type, integer=, real={ splat("\trange =", prange(signif(x$range, sigdig))) splat("\tintegral =", signif(x$integral, sigdig)) splat("\tmean =", signif(x$mean, sigdig)) }, factor={ print(x$table) }, complex={ splat("\trange: Real", prange(signif(x$Re$range, sigdig)), "Imaginary", prange(signif(x$Im$range, sigdig))) splat("\tintegral =", signif(x$integral, sigdig)) splat("\tmean =", signif(x$mean, sigdig)) }, { print(x$summary) }) splat("Underlying network:") print(x$network) return(invisible(NULL)) } plot.linim <- local({ plot.linim <- function(x, ..., style=c("colour", "width"), scale, adjust=1, negative.args=list(col=2), legend=TRUE, leg.side=c("right", "left", "bottom", "top"), leg.sep=0.1, leg.wid=0.1, leg.args=list(), leg.scale=1, zlim, do.plot=TRUE) { xname <- short.deparse(substitute(x)) style <- match.arg(style) leg.side <- match.arg(leg.side) if(missing(zlim) || is.null(zlim)) { zlim <- NULL zliminfo <- list() } else { check.range(zlim) stopifnot(all(is.finite(zlim))) zliminfo <- list(zlim=zlim) } ribstuff <- list(ribside = leg.side, ribsep = leg.sep, ribwid = leg.wid, ribargs = leg.args, ribscale = leg.scale) #' colour style: plot as pixel image if(style == "colour" || !do.plot) return(do.call(plot.im, resolve.defaults(list(x), list(...), ribstuff, zliminfo, list(main=xname, legend=legend, do.plot=do.plot)))) #' width style L <- attr(x, "L") df <- attr(x, "df") Llines <- as.psp(L) W <- as.owin(L) #' plan layout if(legend) { #' use layout procedure in plot.im z <- do.call(plot.im, resolve.defaults(list(x, do.plot=FALSE, legend=TRUE), list(...), ribstuff, list(main=xname))) bb.all <- attr(z, "bbox") bb.leg <- attr(z, "bbox.legend") } else { bb.all <- Frame(W) bb.leg <- NULL } legend <- !is.null(bb.leg) if(legend) { #' expand plot region to accommodate text annotation in legend if(leg.side %in% c("left", "right")) { delta <- 2 * sidelengths(bb.leg)[1] xmargin <- if(leg.side == "right") c(0, delta) else c(delta, 0) bb.all <- grow.rectangle(bb.all, xmargin=xmargin) } } #' initialise plot bb <- do.call.matched(plot.owin, resolve.defaults(list(x=bb.all, type="n"), list(...), list(main=xname)), extrargs="type") #' resolve graphics parameters for polygons names(negative.args) <- paste0(names(negative.args), ".neg") grafpar <- resolve.defaults(negative.args, list(...), list(col=1), .MatchNull=FALSE) #' rescale values to a plottable range if(is.null(zlim)) zlim <- range(x, finite=TRUE) vr <- range(0, zlim) if(missing(scale)) { maxsize <- mean(distmap(Llines))/2 scale <- maxsize/max(abs(vr)) } df$values <- adjust * scale * df$values/2 #' examine sign of values signtype <- if(vr[1] >= 0) "positive" else if(vr[2] <= 0) "negative" else "mixed" #' split data by segment mapXY <- factor(df$mapXY, levels=seq_len(Llines$n)) dfmap <- split(df, mapXY, drop=TRUE) #' sort each segment's data by position along segment dfmap <- lapply(dfmap, sortalongsegment) #' plot each segment's data Lperp <- angles.psp(Llines) + pi/2 Lfrom <- L$from Lto <- L$to Lvert <- L$vertices Ljoined <- (vertexdegree(L) > 1) #' precompute coordinates of dodecagon dodo <- disc(npoly=12)$bdry[[1L]] #' for(i in seq(length(dfmap))) { z <- dfmap[[i]] segid <- unique(z$mapXY)[1L] xx <- z$x yy <- z$y vv <- z$values #' add endpoints of segment ileft <- Lfrom[segid] iright <- Lto[segid] leftend <- Lvert[ileft] rightend <- Lvert[iright] xx <- c(leftend$x, xx, rightend$x) yy <- c(leftend$y, yy, rightend$y) vv <- c(vv[1L], vv, vv[length(vv)]) rleft <- vv[1L] rright <- vv[length(vv)] ## first add dodecagonal 'joints' if(Ljoined[ileft] && rleft != 0) pltpoly(x=rleft * dodo$x + leftend$x, y=rleft * dodo$y + leftend$y, grafpar, sign(rleft)) if(Ljoined[iright] && rright != 0) pltpoly(x=rright * dodo$x + rightend$x, y=rright * dodo$y + rightend$y, grafpar, sign(rright)) ## Now render main polygon ang <- Lperp[segid] switch(signtype, positive = pltseg(xx, yy, vv, ang, grafpar), negative = pltseg(xx, yy, vv, ang, grafpar), mixed = { ## find zero-crossings xing <- (diff(sign(vv)) != 0) ## excursions excu <- factor(c(0, cumsum(xing))) elist <- split(data.frame(xx=xx, yy=yy, vv=vv), excu) ## plot each excursion for(e in elist) with(e, pltseg(xx, yy, vv, ang, grafpar)) }) } result <- adjust * scale attr(result, "bbox") <- bb if(legend) { attr(result, "bbox.legend") <- bb.leg ## get graphical arguments grafpar <- resolve.defaults(leg.args, grafpar) ## set up scale of typical pixel values gvals <- leg.args$at %orifnull% prettyinside(zlim) ## corresponding widths wvals <- adjust * scale * gvals ## glyph positions ng <- length(gvals) xr <- bb.leg$xrange yr <- bb.leg$yrange switch(leg.side, right = , left = { y <- seq(yr[1], yr[2], length.out=ng+1L) y <- (y[-1L] + y[-(ng+1L)])/2 for(j in 1:ng) { xx <- xr[c(1L,2L,2L,1L)] yy <- (y[j] + c(-1,1) * wvals[j]/2)[c(1L,1L,2L,2L)] pltpoly(x = xx, y = yy, grafpar, sign(wvals[j])) } }, bottom = , top = { x <- seq(xr[1], xr[2], length.out=ng+1L) x <- (x[-1L] + x[-(ng+1L)])/2 for(j in 1:ng) { xx <- (x[j] + c(-1,1) * wvals[j]/2)[c(1L,1L,2L,2L)] yy <- yr[c(1L,2L,2L,1L)] pltpoly(x = xx, y = yy, grafpar, sign(wvals[j])) } }) # add text labels check.1.real(leg.scale) glabs <- leg.args$labels %orifnull% signif(leg.scale * gvals, 2) switch(leg.side, right = text(xr[2], y, pos=4, labels=glabs), left = text(xr[1], y, pos=2, labels=glabs), bottom = text(x, yr[1], pos=1, labels=glabs), top = text(x, yr[2], pos=3, labels=glabs)) } return(invisible(result)) } pltseg <- function(xx, yy, vv, ang, pars) { ## draw polygon around segment sgn <- sign(mean(vv)) xx <- c(xx, rev(xx)) yy <- c(yy, rev(yy)) vv <- c(vv, -rev(vv)) xx <- xx + cos(ang) * vv yy <- yy + sin(ang) * vv pltpoly(xx, yy, pars, sgn) invisible(NULL) } pNames <- c("density", "angle", "border", "col", "lty") posnames <- paste(pNames, ".pos", sep="") negnames <- paste(pNames, ".neg", sep="") pltpoly <- function(x, y, pars, sgn) { #' plot polygon using parameters appropriate to "sign" if(sgn >= 0) { pars <- redub(posnames, pNames, pars) } else { pars <- redub(negnames, pNames, pars) } pars <- pars[names(pars) %in% pNames] if(is.null(pars$border)) pars$border <- pars$col do.call(polygon, append(list(x=x, y=y), pars)) invisible(NULL) } redub <- function(from, to, x) { #' rename entry x$from to x$to m <- match(from, names(x)) if(any(ok <- !is.na(m))) names(x)[m[ok]] <- to[ok] return(resolve.defaults(x)) } plot.linim }) sortalongsegment <- function(df) { df[fave.order(df$tp), , drop=FALSE] } as.im.linim <- function(X, ...) { attr(X, "L") <- attr(X, "df") <- NULL class(X) <- "im" if(length(list(...)) > 0) X <- as.im(X, ...) return(X) } as.linim <- function(X, ...) { UseMethod("as.linim") } as.linim.default <- function(X, L, ..., eps = NULL, dimyx = NULL, xy = NULL, delta = NULL) { stopifnot(inherits(L, "linnet")) Y <- as.im(X, W=Frame(L), ..., eps=eps, dimyx=dimyx, xy=xy) M <- as.mask.psp(as.psp(L), as.owin(Y)) Y[complement.owin(M)] <- NA df <- NULL if(!is.null(delta)) { df <- pointsAlongNetwork(L, delta) pix <- nearest.valid.pixel(df$x, df$y, Y) df$xc <- Y$xcol[pix$col] df$yc <- Y$yrow[pix$row] df$values <- Y$v[cbind(pix$row, pix$col)] df <- df[,c("xc", "yc", "x", "y", "seg", "tp", "values")] names(df)[names(df) == "seg"] <- "mapXY" } if(is.mask(WL <- Window(L)) && !all(sapply(list(eps, dimyx, xy), is.null))) Window(L, check=FALSE) <- as.mask(WL, eps=eps, dimyx=dimyx, xy=xy) out <- linim(L, Y, df=df, restrict=FALSE) return(out) } pointsAlongNetwork <- local({ pointsAlongNetwork <- function(L, delta) { #' sample points evenly spaced along each segment stopifnot(inherits(L, "linnet")) S <- as.psp(L) ns <- nsegments(S) seglen <- lengths.psp(S) ends <- as.data.frame(S) nsample <- pmax(1, ceiling(seglen/delta)) df <- NULL x0 <- ends$x0 y0 <- ends$y0 x1 <- ends$x1 y1 <- ends$y1 for(i in seq_len(ns)) { nn <- nsample[i] + 1L tcut <- seq(0, 1, length.out=nn) tp <- (tcut[-1] + tcut[-nn])/2 x <- x0[i] * (1-tp) + x1[i] * tp y <- y0[i] * (1-tp) + y1[i] * tp df <- rbind(df, data.frame(x=x, y=y, seg=i, tp=tp)) } return(df) } pointsAlongNetwork }) as.linim.linim <- function(X, ...) { if(length(list(...)) == 0) return(X) Y <- as.linim.default(X, as.linnet(X), ...) return(Y) } # analogue of eval.im eval.linim <- function(expr, envir, harmonize=TRUE) { sc <- sys.call() # Get names of all variables in the expression e <- as.expression(substitute(expr)) varnames <- all.vars(e) allnames <- all.names(e, unique=TRUE) funnames <- allnames[!(allnames %in% varnames)] if(length(varnames) == 0) stop("No variables in this expression") # get the values of the variables if(missing(envir)) { envir <- parent.frame() # WAS: sys.parent() } else if(is.list(envir)) { envir <- list2env(envir, parent=parent.frame()) } vars <- mget(varnames, envir=envir, inherits=TRUE, ifnotfound=list(NULL)) funs <- mget(funnames, envir=envir, inherits=TRUE, ifnotfound=list(NULL)) # Find out which variables are (linear) images islinim <- unlist(lapply(vars, inherits, what="linim")) if(!any(islinim)) stop("There are no linear images (class linim) in this expression") # .................................... # Evaluate the pixel values using eval.im # .................................... sc[[1L]] <- as.name('eval.im') sc$envir <- envir Y <- eval(sc) # ......................................... # Then evaluate data frame entries if feasible # ......................................... dfY <- NULL linims <- vars[islinim] nlinims <- length(linims) dframes <- lapply(linims, attr, which="df") nets <- lapply(linims, attr, which="L") isim <- unlist(lapply(vars, is.im)) if(!any(isim & !islinim)) { # all images are 'linim' objects # Check that the images refer to the same linear network if(nlinims > 1) { agree <- unlist(lapply(nets[-1L], identical, y=nets[[1L]])) if(!all(agree)) stop(paste("Images do not refer to the same linear network")) } dfempty <- unlist(lapply(dframes, is.null)) if(!any(dfempty)) { # ensure data frames are compatible if(length(dframes) > 1 && ( length(unique(nr <- sapply(dframes, nrow))) > 1 || !allElementsIdentical(dframes, "seg") || !allElementsIdentical(dframes, "tp") )) { # find the one with finest spacing imax <- which.max(nr) # resample the others dframes[-imax] <- lapply(dframes[-imax], resampleNetworkDataFrame, template=dframes[[imax]]) } # replace each image variable by its data frame column of values vars[islinim] <- lapply(dframes, getElement, "values") # now evaluate expression Yvalues <- eval(e, append(vars, funs)) # pack up dfY <- dframes[[1L]] dfY$values <- Yvalues } } result <- linim(nets[[1L]], Y, df=dfY, restrict=FALSE) return(result) } resampleNetworkDataFrame <- function(df, template) { # resample 'df' at the points of 'template' invalues <- df$values insegment <- df$mapXY inteepee <- df$tp out <- template n <- nrow(out) outvalues <- vector(mode = typeof(invalues), length=n) outsegment <- out$mapXY outteepee <- out$tp for(i in seq_len(n)) { relevant <- which(insegment == outsegment[i]) if(length(relevant) > 0) { j <- which.min(abs(inteepee[relevant] - outteepee[i])) outvalues[i] <- invalues[relevant[j]] } } out$values <- outvalues return(out) } as.linnet.linim <- function(X, ...) { attr(X, "L") } "[.linim" <- function(x, i, ..., drop=TRUE) { if(!missing(i) && is.lpp(i)) { n <- npoints(i) result <- vector(mode=typeof(x$v), length=n) if(n == 0) return(result) if(!is.null(df <- attr(x, "df"))) { #' use data frame of sample points along network knownseg <- df$mapXY knowntp <- df$tp knownval <- df$values #' extract local coordinates of query points coo <- coords(i) queryseg <- coo$seg querytp <- coo$tp #' match to nearest sample point for(j in 1:n) { relevant <- (knownseg == queryseg[j]) if(!any(relevant)) { result[j] <- NA } else { k <- which.min(abs(knowntp[relevant] - querytp[j])) result[j] <- knownval[relevant][k] } } if(drop && anyNA(result)) result <- result[!is.na(result)] return(result) } #' give up and use pixel image } #' apply subset method for 'im' y <- NextMethod("[") if(!is.im(y)) return(y) # vector of pixel values #' handle linear network info L <- attr(x, "L") df <- attr(x, "df") #' clip to new window W <- Window(y) LW <- L[W] df <- df[inside.owin(df$xc, df$yc, W), , drop=FALSE] #' update local coordinates in data frame samplepoints <- ppp(df$x, df$y, window=Frame(W), check=FALSE) a <- project2segment(samplepoints, as.psp(LW)) df$mapXY <- a$mapXY df$tp <- a$tp #' wrap up attr(y, "L") <- LW attr(y, "df") <- df return(y) } "[<-.linim" <- function(x, i, j, value) { y <- NextMethod("[<-") #' extract linear network info L <- attr(x, "L") df <- attr(x, "df") #' propagate *changed* pixel values to sample points pos <- nearest.pixel(df$xc, df$yc, y) pos <- cbind(pos$row, pos$col) yvalue <- y$v[pos] xvalue <- x$v[pos] changed <- (yvalue != xvalue) df$values[changed] <- yvalue[changed] #' restrict main pixel image to network m <- as.mask.psp(L, as.mask(y))$m m[pos] <- TRUE y$v[!m] <- NA #' package up attr(y, "L") <- L attr(y, "df") <- df class(y) <- unique(c("linim", class(y))) return(y) } integral.linim <- function(f, domain=NULL, ...){ verifyclass(f, "linim") if(!is.null(domain)) f <- f[domain] #' extract data L <- as.linnet(f) ns <- nsegments(L) df <- attr(f, "df") vals <- df$values seg <- factor(df$mapXY, levels=1:ns) #' ensure each segment has at least one sample point nper <- table(seg) if(any(missed <- (nper == 0))) { missed <- unname(which(missed)) mp <- midpoints.psp(as.psp(L)[missed]) mp <- lpp(data.frame(x=mp$x, y=mp$y, seg=missed, tp=0.5), L=L) valmid <- f[mp, drop=FALSE] seg <- c(seg, factor(missed, levels=1:ns)) vals <- c(vals, valmid) } #' take average of data on each segment mu <- as.numeric(by(vals, seg, mean, ..., na.rm=TRUE)) mu[is.na(mu)] <- 0 #' weighted sum len <- lengths.psp(as.psp(L)) if(anyNA(vals)) { p <- as.numeric(by(!is.na(vals), seg, mean, ..., na.rm=TRUE)) p[is.na(p)] <- 0 len <- len * p } return(sum(mu * len)) } mean.linim <- function(x, ...) { trap.extra.arguments(...) integral(x)/sum(lengths.psp(as.psp(as.linnet(x)))) } quantile.linim <- function(x, probs = seq(0,1,0.25), ...) { verifyclass(x, "linim") #' extract data df <- attr(x, "df") L <- as.linnet(x) vals <- df$values #' count sample points on each segment seg <- factor(df$mapXY, levels=1:nsegments(L)) nvals <- table(seg) #' calculate weights len <- lengths.psp(as.psp(L)) iseg <- as.integer(seg) wts <- len[iseg]/nvals[iseg] return(weighted.quantile(vals, wts, probs)) } median.linim <- function(x, ...) { trap.extra.arguments(...) return(unname(quantile(x, 0.5))) } shift.linim <- function (X, ...) { verifyclass(X, "linim") Z <- shift(as.im(X), ...) L <- shift(as.linnet(X), ...) v <- getlastshift(L) df <- attr(X, "df") df[,c("xc","yc")] <- shiftxy(df[,c("xc", "yc")], v) df[,c("x","y")] <- shiftxy(df[,c("x", "y")], v) Y <- linim(L, Z, df=df, restrict=FALSE) return(putlastshift(Y, v)) } affine.linim <- function(X, mat = diag(c(1, 1)), vec = c(0, 0), ...) { Z <- affine(as.im(X), mat=mat, vec=vec, ...) L <- affine(as.linnet(X), mat=mat, vec=vec, ...) df <- attr(X, "df") df[,c("xc","yc")] <- affinexy(df[,c("xc", "yc")], mat=mat, vec=vec) df[,c("x","y")] <- affinexy(df[,c("x", "y")], mat=mat, vec=vec) Y <- linim(L, Z, df=df, restrict=FALSE) return(Y) } scalardilate.linim <- function(X, f, ..., origin=NULL) { trap.extra.arguments(..., .Context = "In scalardilate(X,f)") check.1.real(f, "In scalardilate(X,f)") stopifnot(is.finite(f) && f > 0) if (!is.null(origin)) { X <- shift(X, origin = origin) negorig <- getlastshift(X) } else negorig <- c(0, 0) Y <- affine(X, mat = diag(c(f, f)), vec = -negorig) return(Y) } as.data.frame.linim <- function(x, ...) { df <- attr(x, "df") if(!is.na(m <- match("mapXY", colnames(df)))) colnames(df)[m] <- "seg" return(df) } pairs.linim <- function(..., plot=TRUE, eps=NULL) { argh <- list(...) ## unpack single argument which is a list of images if(length(argh) == 1) { arg1 <- argh[[1L]] if(is.list(arg1) && all(sapply(arg1, is.im))) argh <- arg1 } ## identify which arguments are images isim <- sapply(argh, is.im) nim <- sum(isim) if(nim == 0) stop("No images provided") ## separate image arguments from others imlist <- argh[isim] rest <- argh[!isim] ## identify which arguments are images on a network islinim <- sapply(imlist, inherits, what="linim") if(!any(islinim)) # shouldn't be here return(pairs.im(argh, plot=plot)) ## adjust names imnames <- names(imlist) %orifnull% rep("", length(imlist)) if(any(needsname <- !nzchar(imnames))) imnames[needsname] <- paste0("V", seq_len(nim)[needsname]) names(imlist) <- imnames ## choose resolution if(is.null(eps)) { xstep <- min(sapply(imlist, getElement, name="xstep")) ystep <- min(sapply(imlist, getElement, name="ystep")) eps <- min(xstep, ystep) } ## extract linear network Z1 <- imlist[[min(which(islinim))]] L <- as.linnet(Z1) ## construct equally-spaced sample points X <- pointsOnLines(as.psp(L), eps=eps) ## sample each image pixvals <- lapply(imlist, "[", i=X, drop=FALSE) pixdf <- as.data.frame(pixvals) ## pairs plot if(plot) { if(nim > 1) { do.call(pairs.default, resolve.defaults(list(x=pixdf), rest, list(labels=imnames, pch="."))) labels <- resolve.defaults(rest, list(labels=imnames))$labels colnames(pixdf) <- labels } else { do.call(hist.default, resolve.defaults(list(x=pixdf[,1L]), rest, list(main=paste("Histogram of", imnames[1L]), xlab=imnames[1L]))) } } class(pixdf) <- unique(c("plotpairsim", class(pixdf))) attr(pixdf, "eps") <- eps return(invisible(pixdf)) } spatstat/R/randommk.R0000755000176200001440000003677613115271120014270 0ustar liggesusers# # # randommk.R # # Random generators for MULTITYPE point processes # # $Revision: 1.37 $ $Date: 2017/01/08 06:24:51 $ # # rmpoispp() random marked Poisson pp # rmpoint() n independent random marked points # rmpoint.I.allim() ... internal # rpoint.multi() temporary wrapper # rmpoispp <- local({ ## Argument checking is.numvector <- function(x) {is.numeric(x) && is.vector(x)} is.constant <- function(x) {is.numvector(x) && length(x) == 1} checkone <- function(x) { if(is.constant(x)) { if(x >= 0) return(TRUE) else stop("Intensity is negative!") } return(is.function(x) || is.im(x)) } ## Ensure that m can be passed as a single value to function(x,y,m,...) slice.fun <- function(x,y,fun,mvalue, ...) { m <- if(length(mvalue) == 1) rep.int(mvalue, length(x)) else mvalue result <- fun(x,y,m, ...) return(result) } ## Main function rmpoispp <- function(lambda, lmax=NULL, win = owin(c(0,1),c(0,1)), types, ..., nsim=1, drop=TRUE, warnwin=!missing(win)) { ## arguments: ## lambda intensity: ## constant, function(x,y,m,...), image, ## vector, list of function(x,y,...) or list of images ## ## lmax maximum possible value of lambda ## constant, vector, or list ## ## win default observation window (of class 'owin') ## ## types possible types for multitype pattern ## ## ... extra arguments passed to lambda() ## if(missing(types)) types <- NULL force(warnwin) if(nsim > 1) { result <- vector(mode="list", length=nsim) for(i in 1:nsim) result[[i]] <- rmpoispp(lambda, lmax, win, types, ..., warnwin=warnwin) names(result) <- paste("Simulation", 1:nsim) return(as.solist(result)) } ## Validate arguments single.arg <- checkone(lambda) vector.arg <- !single.arg && is.numvector(lambda) list.arg <- !single.arg && is.list(lambda) if(! (single.arg || vector.arg || list.arg)) stop(paste("argument", sQuote("lambda"), "not understood")) if(list.arg && !all(unlist(lapply(lambda, checkone)))) stop(paste("Each entry in the list", sQuote("lambda"), "must be either a constant, a function or an image")) if(vector.arg && any(lambda < 0)) stop(paste("Some entries in the vector", sQuote("lambda"), "are negative")) ## Determine & validate the set of possible types if(is.null(types)) { if(single.arg) { stop(paste(sQuote("types"), "must be given explicitly when", sQuote("lambda"), "is a constant, a function or an image")) } else if(!is.null(nama <- names(lambda)) && sum(nzchar(nama)) == length(lambda)) { types <- nama } else { types <- seq_along(lambda) } } ntypes <- length(types) if(!single.arg && (length(lambda) != ntypes)) stop(paste("The lengths of", sQuote("lambda"), "and", sQuote("types"), "do not match")) factortype <- factor(types, levels=types) ## Validate `lmax' if(! (is.null(lmax) || is.numvector(lmax) || is.list(lmax) )) stop(paste(sQuote("lmax"), "should be a constant, a vector, a list or NULL")) ## coerce lmax to a vector, to save confusion if(is.null(lmax)) maxes <- rep(NULL, ntypes) else if(is.numvector(lmax) && length(lmax) == 1) maxes <- rep.int(lmax, ntypes) else if(length(lmax) != ntypes) stop(paste("The length of", sQuote("lmax"), "does not match the number of possible types")) else if(is.list(lmax)) maxes <- unlist(lmax) else maxes <- lmax ## coerce lambda to a list, to save confusion lam <- if(single.arg) rep(list(lambda), ntypes) else if(vector.arg) as.list(lambda) else lambda ## Simulate for(i in 1:ntypes) { if(single.arg && is.function(lambda)) { ## call f(x,y,m, ...) Y <- rpoispp(slice.fun, lmax=maxes[i], win=win, fun=lambda, mvalue=types[i], ..., warnwin=warnwin) } else { ## call f(x,y, ...) or use other formats Y <- rpoispp(lam[[i]], lmax=maxes[i], win=win, ..., warnwin=warnwin) } Y <- Y %mark% factortype[i] X <- if(i == 1) Y else superimpose(X, Y, W=X$window, check=FALSE) } ## Randomly permute, just in case the order is important permu <- sample(X$n) X <- X[permu] return(if(drop) X else solist(X)) } rmpoispp }) ## ------------------------------------------------------------------------ rmpoint <- local({ ## argument validation is.numvector <- function(x) {is.numeric(x) && is.vector(x)} is.constant <- function(x) {is.numvector(x) && length(x) == 1} checkone <- function(x) { if(is.constant(x)) { if(x >= 0) return(TRUE) else stop("Intensity is negative!") } return(is.function(x) || is.im(x)) } # integration.. integratexy <- function(f, win, ...) { imag <- as.im(f, W=win, ...) integral.im(imag) } ## create a counterpart of f(x,y,m) that works when m is a single value funwithfixedmark <- function(xx, yy, ..., m, fun) { mm <- rep.int(m, length(xx)) fun(xx, yy, mm, ...) } integratewithfixedmark <- function(m, fun, win, ...) { integratexy(funwithfixedmark, win=win, m=m, fun=fun, ...) } # Main function rmpoint <- function(n, f=1, fmax=NULL, win = unit.square(), types, ptypes, ..., giveup = 1000, verbose = FALSE, nsim = 1, drop=TRUE) { if(!is.numeric(n)) stop("n must be a scalar or vector") if(any(ceiling(n) != floor(n))) stop("n must be an integer or integers") if(any(n < 0)) stop("n must be non-negative") if(missing(types)) types <- NULL if(missing(ptypes)) ptypes <- NULL if(nsim > 1) { result <- vector(mode="list", length=nsim) for(i in 1:nsim) result[[i]] <- rmpoint(n, f, fmax, win, types, ptypes, ..., giveup=giveup, verbose=verbose) names(result) <- paste("Simulation", 1:nsim) return(as.solist(result)) } if(sum(n) == 0) { nopoints <- ppp(x=numeric(0), y=numeric(0), window=win, check=FALSE) if(!is.null(types)) { nomarks <- factor(types[numeric(0)], levels=types) nopoints <- nopoints %mark% nomarks } return(if(drop) nopoints else solist(nopoints)) } ############# Model <- if(length(n) == 1) { if(is.null(ptypes)) "I" else "II" } else "III" ############## Validate f argument single.arg <- checkone(f) vector.arg <- !single.arg && is.numvector(f) list.arg <- !single.arg && is.list(f) if(! (single.arg || vector.arg || list.arg)) stop(paste("argument", sQuote("f"), "not understood")) if(list.arg && !all(unlist(lapply(f, checkone)))) stop(paste("Each entry in the list", sQuote("f"), "must be either a constant, a function or an image")) if(vector.arg && any(f < 0)) stop(paste("Some entries in the vector", sQuote("f"), "are negative")) ## cases where it's known that all types of points ## have the same conditional density of location (x,y) const.density <- vector.arg || (list.arg && all(unlist(lapply(f, is.constant)))) same.density <- const.density || (single.arg && !is.function(f)) ################ Determine & validate the set of possible types if(is.null(types)) { if(single.arg && length(n) == 1) stop(paste(sQuote("types"), "must be given explicitly when", sQuote("f"), "is a single number, a function or an image and", sQuote("n"), "is a single number")) else { basis <- if(single.arg) n else f if(!is.null(nama <- names(basis)) && sum(nzchar(nama)) == length(basis)) { types <- nama } else { types <- seq_along(basis) } } } ntypes <- length(types) if(!single.arg && (length(f) != ntypes)) stop(paste("The lengths of", sQuote("f"), "and", sQuote("types"), "do not match")) if(length(n) > 1 && ntypes != length(n)) stop(paste("The lengths of", sQuote("n"), "and", sQuote("types"), "do not match")) factortype <- factor(types, levels=types) ####################### Validate `fmax' if(! (is.null(fmax) || is.numvector(fmax) || is.list(fmax) )) stop(paste(sQuote("fmax"), "should be a constant, a vector, a list or NULL")) ## coerce fmax to a vector, to save confusion if(is.null(fmax)) maxes <- rep(NULL, ntypes) else if(is.constant(fmax)) maxes <- rep.int(fmax, ntypes) else if(length(fmax) != ntypes) stop(paste("The length of", sQuote("fmax"), "does not match the number of possible types")) else if(is.list(fmax)) maxes <- unlist(fmax) else maxes <- fmax ## coerce f to a list, to save confusion flist <- if(single.arg) rep(list(f), ntypes) else if(vector.arg) as.list(f) else f #################### START ################################## ## special algorithm for Model I when all f[[i]] are images if(Model == "I" && !same.density && all(unlist(lapply(flist, is.im)))) { X <- rmpoint.I.allim(n, flist, types) return(if(drop) X else solist(X)) } ## otherwise, first select types, then locations given types if(Model == "I") { ## Compute approximate marginal distribution of type if(vector.arg) ptypes <- f/sum(f) else if(list.arg) { fintegrals <- unlist(lapply(flist, integratexy, win=win, ...)) ptypes <- fintegrals/sum(fintegrals) } else { ## single argument if(is.constant(f)) { ptypes <- rep.int(1/ntypes, ntypes) } else { ## f is a function (x,y,m) ## convert to images and integrate fintegrals <- unlist(lapply(types, integratewithfixedmark, win=win, fun=f, ...)) ## normalise ptypes <- fintegrals/sum(fintegrals) } } } ## Generate marks if(Model == "I" || Model == "II") { ## i.i.d.: n marks with distribution 'ptypes' marques <- sample(factortype, n, prob=ptypes, replace=TRUE) nn <- table(marques) } else { ## multinomial: fixed number n[i] of types[i] repmarks <- factor(rep.int(types, n), levels=types) marques <- sample(repmarks) nn <- n } ntot <- sum(nn) ############## SIMULATE !!! ######################### ## If all types have the same conditional density of location, ## generate the locations using rpoint, and return. if(same.density) { X <- rpoint(ntot, flist[[1]], maxes[[1]], win=win, ..., giveup=giveup, verbose=verbose) X <- X %mark% marques return(if(drop) X else solist(X)) } ## Otherwise invoke rpoint() for each type separately X <- ppp(numeric(ntot), numeric(ntot), window=win, marks=marques, check=FALSE) for(i in 1:ntypes) { if(verbose) cat(paste("Type", i, "\n")) if(single.arg && is.function(f)) { ## want to call f(x,y,m, ...) Y <- rpoint(nn[i], funwithfixedmark, fmax=maxes[i], win=win, ..., m=factortype[i], fun=f, giveup=giveup, verbose=verbose) } else { ## call f(x,y, ...) or use other formats Y <- rpoint(nn[i], flist[[i]], fmax=maxes[i], win=win, ..., giveup=giveup, verbose=verbose) } Y <- Y %mark% factortype[i] X[marques == factortype[i]] <- Y } return(if(drop) X else solist(X)) } rmpoint }) rmpoint.I.allim <- local({ ## Extract pixel coordinates and probabilities get.stuff <- function(imag) { w <- as.mask(as.owin(imag)) dx <- w$xstep dy <- w$ystep rxy <- rasterxy.mask(w, drop=TRUE) xpix <- rxy$x ypix <- rxy$y ppix <- as.vector(imag$v[w$m]) ## not normalised - OK npix <- length(xpix) return(list(xpix=xpix, ypix=ypix, ppix=ppix, dx=rep.int(dx,npix), dy=rep.int(dy, npix), npix=npix)) } rmpoint.I.allim <- function(n, f, types) { ## Internal use only! ## Generates random marked points (Model I *only*) ## when all f[[i]] are pixel images. ## stuff <- lapply(f, get.stuff) ## Concatenate into loooong vectors xpix <- unlist(lapply(stuff, getElement, name="xpix")) ypix <- unlist(lapply(stuff, getElement, name="ypix")) ppix <- unlist(lapply(stuff, getElement, name="ppix")) dx <- unlist(lapply(stuff, getElement, name="dx")) dy <- unlist(lapply(stuff, getElement, name="dy")) ## replicate types numpix <- unlist(lapply(stuff, getElement, name="npix")) tpix <- rep.int(seq_along(types), numpix) ## ## sample pixels from union of all images ## npix <- sum(numpix) id <- sample(npix, n, replace=TRUE, prob=ppix) ## get pixel centre coordinates and randomise within pixel x <- xpix[id] + (runif(n) - 1/2) * dx[id] y <- ypix[id] + (runif(n) - 1/2) * dy[id] ## compute types marx <- factor(types[tpix[id]],levels=types) ## et voila! return(ppp(x, y, window=as.owin(f[[1]]), marks=marx, check=FALSE)) } rmpoint.I.allim }) ## ## wrapper for Rolf's function ## rpoint.multi <- function (n, f, fmax=NULL, marks = NULL, win = unit.square(), giveup = 1000, verbose = FALSE, warn=TRUE, nsim=1, drop=TRUE) { if(nsim > 1) { result <- vector(mode="list", length=nsim) for(i in 1:nsim) result[[i]] <- rpoint.multi(n, f, fmax, marks, win, giveup, verbose) names(result) <- paste("Simulation", 1:nsim) return(as.solist(result)) } no.marks <- is.null(marks) || (is.factor(marks) && length(levels(marks)) == 1) if(warn) { nhuge <- spatstat.options("huge.npoints") if(n > nhuge) warning(paste("Attempting to generate", n, "random points")) } ## unmarked case if (no.marks) { X <- if(is.function(f)) { rpoint(n, f, fmax, win, giveup=giveup, verbose=verbose) } else { rpoint(n, f, fmax, giveup=giveup, verbose=verbose) } return(if(drop) X else solist(X)) } ## multitype case if(length(marks) != n) stop("length of marks vector != n") if(!is.factor(marks)) stop("marks should be a factor") types <- levels(marks) types <- factor(types, levels=types) ## generate required number of points of each type nums <- table(marks) X <- rmpoint(nums, f, fmax, win=win, types=types, giveup=giveup, verbose=verbose) if(any(table(marks(X)) != nums)) stop("Internal error: output of rmpoint illegal") ## reorder them to correspond to the desired 'marks' vector Y <- X Xmarks <- marks(X) for(ty in types) { to <- (marks == ty) from <- (Xmarks == ty) if(sum(to) != sum(from)) stop(paste("Internal error: mismatch for mark =", ty)) if(any(to)) { Y$x[to] <- X$x[from] Y$y[to] <- X$y[from] Y$marks[to] <- ty } } return(if(drop) Y else solist(Y)) } spatstat/R/triplet.family.R0000644000176200001440000000640113115225157015407 0ustar liggesusers# # # triplet.family.R # # $Revision: 1.1 $ $Date: 2011/11/05 07:18:51 $ # # Family of `third-order' point process models # # triplet.family: object of class 'isf' # # # ------------------------------------------------------------------- # triplet.family <- list( name = "triplet", print = function(self) { cat("Family of third-order interactions\n") }, plot = NULL, # ---------------------------------------------------- eval = function(X,U,EqualPairs,pot,pars,correction, ...) { # # This is the eval function for the `triplet' family. # # This internal function is not meant to be called by the user. # It is called by mpl.prepare() during execution of ppm(). # # The eval functions perform all the manipulations that are common to # a given class of interactions. # # This function is currently modelled on 'inforder.family'. # It simply invokes the potential 'pot' directly # and expects 'pot' to return the values of the sufficient statistic S(u,X). # # ARGUMENTS: # All 'eval' functions have the following arguments # which are called in sequence (without formal names) # by mpl.prepare(): # # X data point pattern 'ppp' object # U points at which to evaluate potential list(x,y) suffices # EqualPairs two-column matrix of indices i, j such that X[i] == U[j] # (or NULL, meaning all comparisons are FALSE) # pot potential function # potpars auxiliary parameters for pairpot list(......) # correction edge correction type (string) # # VALUE: # All `eval' functions must return a # matrix of values of the total potential # induced by the pattern X at each location given in U. # The rows of this matrix correspond to the rows of U (the sample points); # the k columns are the coordinates of the k-dimensional potential. # ########################################################################## # POTENTIAL: # In this case the potential function 'pot' should have arguments # pot(X, U, EqualPairs, pars, correction, ...) # # It must return a vector with length equal to the number of points in U, # or a matrix with as many rows as there are points in U. if(!is.ppp(U)) U <- ppp(U$x, U$y, window=X$window) POT <- pot(X, U, EqualPairs, pars, correction, ...) if(is.matrix(POT)) { if(nrow(POT) != U$n) stop("Internal error: the potential returned a matrix with the wrong number of rows") } else if(is.array(POT) && length(dim(POT)) > 2) stop("Internal error: the potential returned an array with more than 2 dimensions") else if(is.vector(POT)) { if(length(POT) != U$n) stop("Internal error: the potential returned a vector with the wrong length") POT <- matrix(POT, ncol=1) } else stop("Internal error: the return value from the potential is not understood") return(POT) }, ######### end of function $eval suffstat = NULL ######### end of function $suffstat ) ######### end of list class(triplet.family) <- "isf" spatstat/R/Tstat.R0000644000176200001440000002107213115225157013544 0ustar liggesusers# # tstat.R Estimation of T function # # $Revision: 1.11 $ $Date: 2016/02/11 09:36:11 $ # Tstat <- local({ # helper functions diffrange <- function(z) diff(range(z, na.rm=TRUE)) edgetri.Trans <- function(X, triid, trim=spatstat.options("maxedgewt")) { triid <- as.matrix(triid) ntri <- nrow(triid) if(ntri == 0) return(numeric(0)) W <- rescue.rectangle(as.owin(X)) if(W$type != "rectangle") stop("Translation correction is only implemented for rectangular windows") x <- matrix(X$x[triid], nrow=ntri) y <- matrix(X$y[triid], nrow=ntri) dx <- apply(x, 1, diffrange) dy <- apply(y, 1, diffrange) wide <- diff(W$xrange) high <- diff(W$yrange) weight <- wide * high/((wide - dx) * (high - dy)) weight <- pmin.int(trim, weight) return(weight) } # helper function implemented.for.T <- function(correction, windowtype, explicit) { rect <- (windowtype == "rectangle") if(any(correction == "best")) { # select best available correction correction <- if(rect) "translate" else "border" } else { # available selection of edge corrections depends on window if(!rect) { tra <- (correction == "translate") if(any(tra)) { whinge <- "Translation correction is only implemented for rectangular windows" if(explicit) { if(all(tra)) stop(whinge) else warning(whinge) } correction <- correction[!tra] } } } return(correction) } # .......... main function .................... Tstat <- function(X, ..., r=NULL, rmax=NULL, correction=c("border", "translate"), ratio=FALSE, verbose=TRUE) { verifyclass(X, "ppp") # rfixed <- !is.null(r) npts <- npoints(X) W <- Window(X) areaW <- area(W) lambda <- npts/areaW lambda2 <- (npts * (npts - 1))/(areaW^2) lambda3 <- (npts * (npts - 1) * (npts - 2))/(areaW^3) rmaxdefault <- if(!is.null(rmax)) rmax else rmax.rule("K", W, lambda) breaks <- handle.r.b.args(r, NULL, W, rmaxdefault=rmaxdefault) r <- breaks$r rmax <- breaks$max # choose correction(s) correction.given <- !missing(correction) && !is.null(correction) if(!correction.given) correction <- c("border", "bord.modif", "translate") correction <- pickoption("correction", correction, c(none="none", border="border", "bord.modif"="bord.modif", trans="translate", translate="translate", translation="translate", best="best"), multi=TRUE) correction <- implemented.for.T(correction, W$type, correction.given) # recommended range of r values alim <- c(0, min(rmax, rmaxdefault)) # this will be the output data frame TT <- data.frame(r=r, theo= (pi/2) * (pi - 3 * sqrt(3)/4) * r^4) desc <- c("distance argument r", "theoretical Poisson %s") TT <- fv(TT, "r", quote(T(r)), "theo", , alim, c("r","%s[pois](r)"), desc, fname="T") # save numerator and denominator? if(ratio) { denom <- lambda2 * areaW numT <- eval.fv(denom * TT) denT <- eval.fv(denom + TT * 0) attributes(numT) <- attributes(denT) <- attributes(T) attr(numT, "desc")[2] <- "numerator for theoretical Poisson %s" attr(denT, "desc")[2] <- "denominator for theoretical Poisson %s" } # identify all close pairs rmax <- max(r) close <- closepairs(X, rmax, what="ijd", twice=FALSE, neat=FALSE) I <- close$i J <- close$j DIJ <- close$d nI <- length(I) # estimate computation time if(verbose) { nTmax <- nI * (nI-1) /2 esttime <- exp(1.25 * log(nTmax) - 21.5) message(paste("Searching", nTmax, "potential triangles;", "estimated time", codetime(esttime))) } # find triangles with their diameters tri <- trianglediameters(I, J, DIJ, nvert=npts) stopifnot(identical(colnames(tri), c("i", "j", "k", "diam"))) # reassemble so each triangle appears 3 times, once for each vertex II <- with(tri, c(i, j, k)) DD <- with(tri, rep.int(diam, 3)) if(any(correction == "none")) { # uncorrected! For demonstration purposes only! wh <- whist(DD, breaks$val) # no weights numTun <- cumsum(wh) denTun <- lambda3 * areaW # uncorrected estimate of T Tun <- numTun/denTun TT <- bind.fv(TT, data.frame(un=Tun), "hat(%s)[un](r)", "uncorrected estimate of %s", "un") if(ratio) { # save numerator and denominator numT <- bind.fv(numT, data.frame(un=numTun), "hat(%s)[un](r)", "numerator of uncorrected estimate of %s", "un") denT <- bind.fv(denT, data.frame(un=denTun), "hat(%s)[un](r)", "denominator of uncorrected estimate of %s", "un") } } if(any(correction == "border" | correction == "bord.modif")) { # border method # Compute distances to boundary b <- bdist.points(X) bI <- b[II] # apply reduced sample algorithm RS <- Kount(DD, bI, b, breaks) if(any(correction == "bord.modif")) { # modified border correction denom.area <- eroded.areas(W, r) numTbm <- RS$numerator denTbm <- lambda3 * denom.area Tbm <- numTbm/denTbm TT <- bind.fv(TT, data.frame(bord.modif=Tbm), "hat(%s)[bordm](r)", "modified border-corrected estimate of %s", "bord.modif") if(ratio) { # save numerator and denominator numT <- bind.fv(numT, data.frame(bord.modif=numTbm), "hat(%s)[bordm](r)", "numerator of modified border-corrected estimate of %s", "bord.modif") denT <- bind.fv(denT, data.frame(bord.modif=denTbm), "hat(%s)[bordm](r)", "denominator of modified border-corrected estimate of %s", "bord.modif") } } if(any(correction == "border")) { numTb <- RS$numerator denTb <- lambda2 * RS$denom.count Tb <- numTb/denTb TT <- bind.fv(TT, data.frame(border=Tb), "hat(%s)[bord](r)", "border-corrected estimate of %s", "border") if(ratio) { numT <- bind.fv(numT, data.frame(border=numTb), "hat(%s)[bord](r)", "numerator of border-corrected estimate of %s", "border") denT <- bind.fv(denT, data.frame(border=denTb), "hat(%s)[bord](r)", "denominator of border-corrected estimate of %s", "border") } } } if(any(correction == "translate")) { # translation correction # apply to triangle list edgewt <- edgetri.Trans(X, tri[, 1:3]) wh <- whist(tri$diam, breaks$val, edgewt) numTtrans <- 3 * cumsum(wh) denTtrans <- lambda3 * areaW Ttrans <- numTtrans/denTtrans h <- diameter(W)/2 Ttrans[r >= h] <- NA TT <- bind.fv(TT, data.frame(trans=Ttrans), "hat(%s)[trans](r)", "translation-corrected estimate of %s", "trans") if(ratio) { numT <- bind.fv(numT, data.frame(trans=numTtrans), "hat(%s)[trans](r)", "numerator of translation-corrected estimate of %s", "trans") denT <- bind.fv(denT, data.frame(trans=denTtrans), "hat(%s)[trans](r)", "denominator of translation-corrected estimate of %s", "trans") } } # default plot will display all edge corrections formula(TT) <- . ~ r unitname(TT) <- unitname(X) # if(ratio) { # finish up numerator & denominator formula(numT) <- formula(denT) <- . ~ r unitname(numT) <- unitname(denT) <- unitname(TT) # tack on to result TT <- rat(TT, numT, denT, check=FALSE) } return(TT) } Tstat }) spatstat/R/linearpcf.R0000755000176200001440000001256013142453243014414 0ustar liggesusers# # linearpcf.R # # $Revision: 1.26 $ $Date: 2017/08/09 00:21:46 $ # # pair correlation function for point pattern on linear network # # linearpcf <- function(X, r=NULL, ..., correction="Ang", ratio=FALSE) { stopifnot(inherits(X, "lpp")) correction <- pickoption("correction", correction, c(none="none", Ang="Ang", best="Ang"), multi=FALSE) # extract info about pattern np <- npoints(X) lengthL <- volume(domain(X)) # compute denom <- np * (np - 1)/lengthL g <- linearpcfengine(X, r=r, ..., denom=denom, correction=correction, ratio=ratio) # extract bandwidth bw <- attr(g, "bw") # set appropriate y axis label switch(correction, Ang = { ylab <- quote(g[L](r)) fname <- c("g", "L") }, none = { ylab <- quote(g[net](r)) fname <- c("g", "net") }) g <- rebadge.fv(g, new.ylab=ylab, new.fname=fname) # reattach bandwidth attr(g, "bw") <- bw return(g) } linearpcfinhom <- function(X, lambda=NULL, r=NULL, ..., correction="Ang", normalise=TRUE, normpower=1, update=TRUE, leaveoneout=TRUE, ratio=FALSE) { stopifnot(inherits(X, "lpp")) loo.given <- !missing(leaveoneout) correction <- pickoption("correction", correction, c(none="none", Ang="Ang", best="Ang"), multi=FALSE) if(is.null(lambda)) linearpcf(X, r=r, ..., correction=correction, ratio=ratio) if(normalise) { check.1.real(normpower) stopifnot(normpower >= 1) } # extract info about pattern lengthL <- volume(domain(X)) # lambdaX <- getlambda.lpp(lambda, X, ..., update=update, leaveoneout=leaveoneout, loo.given=loo.given, lambdaname="lambda") # invlam <- 1/lambdaX invlam2 <- outer(invlam, invlam, "*") denom <- if(!normalise) lengthL else if(normpower == 1) sum(invlam) else lengthL * (sum(invlam)/lengthL)^normpower g <- linearpcfengine(X, ..., r=r, reweight=invlam2, denom=denom, correction=correction, ratio=ratio) # extract bandwidth bw <- attr(g, "bw") # set appropriate y axis label switch(correction, Ang = { ylab <- quote(g[L, inhom](r)) fname <- c("g", "list(L, inhom)") }, none = { ylab <- quote(g[net, inhom](r)) fname <- c("g", "list(net, inhom)") }) g <- rebadge.fv(g, new.fname=fname, new.ylab=ylab) # reattach bandwidth attr(g, "bw") <- bw attr(g, "dangerous") <- attr(lambdaX, "dangerous") return(g) } linearpcfengine <- function(X, ..., r=NULL, reweight=NULL, denom=1, correction="Ang", ratio=FALSE) { # ensure distance information is present X <- as.lpp(X, sparse=FALSE) # extract info about pattern np <- npoints(X) # extract linear network L <- domain(X) W <- Window(L) # determine r values rmaxdefault <- 0.98 * boundingradius(L) breaks <- handle.r.b.args(r, NULL, W, rmaxdefault=rmaxdefault) r <- breaks$r rmax <- breaks$max # type <- if(correction == "Ang") "L" else "net" fname <- c("g", type) ylab <- substitute(g[type](r), list(type=type)) # if(np < 2) { # no pairs to count: return zero function zeroes <- numeric(length(r)) df <- data.frame(r = r, est = zeroes) g <- ratfv(df, NULL, 0, "r", ylab, "est", . ~ r, c(0, rmax), c("r", makefvlabel(NULL, "hat", fname)), c("distance argument r", "estimated %s"), fname = fname, ratio=ratio) if(correction == "Ang") { # tack on theoretical value g <- bind.ratfv(g, quotient = data.frame(theo=r), denominator = 0, labl = makefvlabel(NULL, NULL, fname, "theo"), desc = "theoretical Poisson %s", ratio=ratio) } return(g) } # compute pairwise distances D <- pairdist(X) #--- compile into pcf --- if(correction == "none" && is.null(reweight)) { # no weights (Okabe-Yamada) g <- compilepcf(D, r, denom=denom, fname=fname, ratio=ratio) unitname(g) <- unitname(X) attr(g, "correction") <- correction return(g) } if(correction == "none") edgewt <- 1 else { # inverse m weights (Wei's correction) # determine tolerance toler <- default.linnet.tolerance(L) # compute m[i,j] m <- matrix(1, np, np) for(j in 1:np) m[ -j, j] <- countends(L, X[-j], D[-j,j], toler=toler) edgewt <- 1/m } # compute pcf wt <- if(!is.null(reweight)) edgewt * reweight else edgewt g <- compilepcf(D, r, weights=wt, denom=denom, ..., fname=fname, ratio=ratio) # extract bandwidth bw <- attr(g, "bw") # tack on theoretical value g <- bind.ratfv(g, quotient = data.frame(theo=rep.int(1,length(r))), denominator = denom, labl = makefvlabel(NULL, NULL, fname, "pois"), desc = "theoretical Poisson %s", ratio = ratio) # tweak unitname(g) <- unitname(X) fvnames(g, ".") <- rev(fvnames(g, ".")) # tack on bandwidth again attr(g, "bw") <- bw attr(g, "correction") <- correction return(g) } spatstat/R/hybrid.R0000755000176200001440000002652713115271075013743 0ustar liggesusers# # # hybrid.R # # $Revision: 1.8 $ $Date: 2017/02/07 07:35:32 $ # # Hybrid of several interactions # # Hybrid() create a hybrid of several interactions # [an object of class 'interact'] # # # ------------------------------------------------------------------- # Hybrid <- local({ Hybrid <- function(...) { interlist <- list(...) n <- length(interlist) if(n == 0) stop("No arguments given") #' arguments may be interaction objects or ppm objects isinter <- unlist(lapply(interlist, is.interact)) isppm <- unlist(lapply(interlist, is.ppm)) if(any(nbg <- !(isinter | isppm))) stop(paste(ngettext(sum(nbg), "Argument", "Arguments"), paste(which(nbg), collapse=", "), ngettext(sum(nbg), "is not an interaction", "are not interactions"))) #' ensure the list contains only interaction objects if(any(isppm)) interlist[isppm] <- lapply(interlist[isppm], as.interact) #' recursively expand any components that are themselves hybrids while(any(ishybrid <- unlist(lapply(interlist, is.hybrid)))) { i <- min(which(ishybrid)) n <- length(interlist) expandi <- interlist[[i]]$par interlist <- c(if(i > 1) interlist[1:(i-1L)] else NULL, expandi, if(i < n) interlist[(i+1L):n] else NULL) } #' ncomponents <- length(interlist) if(ncomponents == 1) { #' single interaction - return it return(interlist[[1L]]) } #' ensure all components have names names(interlist) <- good.names(names(interlist), "HybridComponent", 1:ncomponents) out <- list( name = "Hybrid interaction", creator = "Hybrid", family = hybrid.family, pot = NULL, par = interlist, parnames = names(interlist), selfstart = function(X, self) { ilist <- self$par sslist <- lapply(ilist, getElement, name="selfstart") has.ss <- sapply(sslist, is.function) if(any(has.ss)) { ilist[has.ss] <- lapply(ilist[has.ss], invokeSelfStart, Y=X) self$par <- ilist } return(self) }, init = NULL, update = NULL, # default OK print = function(self, ..., family=FALSE, brief=FALSE) { if(family) print.isf(self$family) ncomponents <- length(self$par) clabs <- self$parnames splat("Hybrid of", ncomponents, "components:", commasep(sQuote(clabs))) for(i in 1:ncomponents) { splat(paste0(clabs[i], ":")) print(self$par[[i]], ..., family=family, brief=brief) } parbreak() return(invisible(NULL)) }, interpret = function(coeffs, self) { interlist <- self$par result <- list(param=list(), inames=character(0), printable=list()) for(i in 1:length(interlist)) { interI <- interlist[[i]] nameI <- names(interlist)[[i]] nameI. <- paste(nameI, ".", sep="") #' find coefficients with prefix that exactly matches nameI. Cname <- names(coeffs) prefixlength <- nchar(nameI.) Cprefix <- substr(Cname, 1, prefixlength) relevant <- (Cprefix == nameI.) #' extract them if(any(relevant)) { Crelevant <- coeffs[relevant] names(Crelevant) <- substr(Cname[relevant], prefixlength+1, max(nchar(Cname))) #' invoke the self-interpretation of interI interpretI <- interI$interpret if(is.function(interpretI)) { resultI <- interpretI(Crelevant, interI) paramI <- resultI$param prinI <- resultI$printable inamesI <- resultI$inames inamesI <- paste(nameI, inamesI) if(length(prinI) > 0) { result$param <- append(result$param, paramI) result$printable <- append(result$printable, list(prinI)) result$inames <- c(result$inames, inamesI) } } } } return(result) }, valid = function(coeffs, self) { #' check validity via mechanism used for 'rmhmodel' siminfo <- .Spatstat.Rmhinfo[["Hybrid interaction"]] Z <- siminfo(coeffs, self) cifs <- Z$cif pars <- Z$par ntypes <- Z$ntypes if((Ncif <- length(cifs)) == 1) { #' single cif pars <- append(pars, list(beta=rep.int(1, ntypes))) } else { for(i in 1:Ncif) pars[[i]] <- append(pars[[i]], list(beta=rep.int(1, ntypes[i]))) } RM <- rmhmodel(cif=cifs, par=pars, types=1:max(ntypes), stopinvalid=FALSE) return(RM$integrable) }, project = function(coeffs, self) { if((self$valid)(coeffs, self)) return(NULL) #' separate into components spl <- splitHybridInteraction(coeffs, self) interlist <- spl$interlist coeflist <- spl$coeflist #' compute projection for each component interaction Ncif <- length(interlist) projlist <- vector(mode="list", length=Ncif) nproj <- integer(Ncif) for(i in 1:Ncif) { coefsI <- coeflist[[i]] interI <- interlist[[i]] if(!is.interact(interI)) stop("Internal error: interlist entry is not an interaction") projI <- interI$project if(is.null(projI)) stop(paste("Projection is not yet implemented for a", interI$name)) p <- projI(coefsI, interI) #' p can be NULL (indicating no projection required for interI) #' or a single interaction or a list of interactions. if(is.null(p)) { if(Ncif == 1) return(NULL) # no projection required p <- list(NULL) nproj[i] <- 0 } else if(is.interact(p)) { p <- list(p) nproj[i] <- 1L } else if(is.list(p) && all(unlist(lapply(p, is.interact)))) { nproj[i] <- length(p) } else stop("Internal error: result of projection had wrong format") projlist[[i]] <- p } #' for interaction i there are nproj[i] **new** interactions to try. if(all(nproj == 0)) return(NULL) if(spatstat.options("project.fast")) { #' Single interaction required. #' Extract first entry from each list #' (there should be only one entry, but...) qlist <- lapply(projlist, "[[", i=1L) #' replace NULL entries by corresponding original interactions isnul <- unlist(lapply(qlist, is.null)) if(all(isnul)) return(NULL) if(any(isnul)) qlist[isnul] <- interlist[isnul] names(qlist) <- names(interlist) #' build hybrid and return result <- do.call(Hybrid, qlist) return(result) } #' Full case result <- list() for(i in which(nproj > 0)) { ntry <- nproj[i] tries <- projlist[[i]] for(j in 1:ntry) { #' assemble list of component interactions for hybrid qlist <- interlist qlist[[i]] <- tries[[j]] #' eliminate Poisson ispois <- unlist(lapply(qlist, is.poisson)) if(all(ispois)) { #' collapse to single Poisson h <- Poisson() } else { if(any(ispois)) qlist <- qlist[!ispois] h <- do.call(Hybrid, qlist) } result <- append(result, list(h)) } } #' 'result' is a list of interactions, each a hybrid if(length(result) == 1) result <- result[[1L]] return(result) }, irange = function(self, coeffs=NA, epsilon=0, ...) { interlist <- self$par answer <- 0 for(i in 1:length(interlist)) { interI <- interlist[[i]] nameI <- names(interlist)[[i]] nameI. <- paste(nameI, ".", sep="") #' find coefficients with prefix that exactly matches nameI. if(all(is.na(coeffs))) Crelevant <- NA else { Cname <- names(coeffs) prefixlength <- nchar(nameI.) Cprefix <- substr(Cname, 1, prefixlength) relevant <- (Cprefix == nameI.) #' extract them Crelevant <- coeffs[relevant] names(Crelevant) <- substr(Cname[relevant], prefixlength+1, max(nchar(Cname))) } #' compute reach reachI <- interI$irange if(is.function(reachI)) { resultI <- reachI(interI, coeffs=Crelevant, epsilon=epsilon, ...) answer <- max(answer, resultI) } } return(answer) }, version=versionstring.spatstat() ) class(out) <- "interact" return(out) } invokeSelfStart <- function(inte, Y) { ss <- inte$selfstart if(!is.function(ss)) return(inte) return(ss(Y, inte)) } Hybrid }) is.hybrid <- function(x) { UseMethod("is.hybrid") } is.hybrid.interact <- function(x) { return(is.interact(x) && (x$name == "Hybrid interaction")) } is.hybrid.ppm <- function(x) { return(is.hybrid(as.interact(x))) } splitHybridInteraction <- function(coeffs, inte) { # For hybrids, $par is a list of the component interactions, # but coeffs is a numeric vector. # Split the coefficient vector into the relevant coeffs for each interaction interlist <- inte$par N <- length(interlist) coeflist <- vector(mode="list", length=N) for(i in 1:N) { interI <- interlist[[i]] # forbid hybrids-of-hybrids - these should not occur anyway if(interI$name == "Hybrid interaction") stop("A hybrid-of-hybrid interactions is not implemented") # nameI is the tag that identifies I-th component in hybrid nameI <- names(interlist)[[i]] nameI. <- paste(nameI, ".", sep="") # find coefficients with prefix that exactly matches nameI. Cname <- names(coeffs) prefixlength <- nchar(nameI.) Cprefix <- substr(Cname, 1, prefixlength) relevant <- (Cprefix == nameI.) # extract coefficients # (there may be none, if this interaction is Poisson or an 'offset') coeffsI <- coeffs[relevant] # remove the prefix so the coefficients are recognisable to interaction if(any(relevant)) names(coeffsI) <- substr(Cname[relevant], prefixlength+1, max(nchar(Cname))) # store coeflist[[i]] <- coeffsI } names(coeflist) <- names(interlist) return(list(coeflist=coeflist, interlist=interlist)) } Hybrid <- intermaker(Hybrid, list(creator="Hybrid", name="general hybrid Gibbs process", par=list("..."=42), parnames=list("any list of interactions"))) spatstat/R/Kmodel.R0000755000176200001440000000032413115271075013660 0ustar liggesusers# # Kmodel.R # # Kmodel and pcfmodel # # $Revision: 1.1 $ $Date: 2011/05/30 14:02:21 $ # Kmodel <- function(model, ...) { UseMethod("Kmodel") } pcfmodel <- function(model, ...) { UseMethod("pcfmodel") } spatstat/R/ho.R0000755000176200001440000000431213115271075013054 0ustar liggesusers# # ho.R # # Huang-Ogata method # # $Revision: 1.17 $ $Date: 2016/03/15 07:42:26 $ # ho.engine <- function(model, ..., nsim=100, nrmh=1e5, start=NULL, control=list(nrep=nrmh), verb=TRUE) { verifyclass(model, "ppm") if(is.null(start)) start <- list(n.start=data.ppm(model)$n) # check that the model can be simulated if(!valid.ppm(model)) { warning("Fitted model is invalid - cannot be simulated") return(NULL) } # compute the observed value of the sufficient statistic X <- data.ppm(model) sobs <- suffstat(model, X) # generate 'nsim' realisations of the fitted model # and compute the sufficient statistics of the model rmhinfolist <- rmh(model, start, control, preponly=TRUE, verbose=FALSE) if(verb) { cat("Simulating... ") state <- list() } ndone <- 0 while(ndone < nsim) { Xi <- rmhEngine(rmhinfolist, verbose=FALSE) v <- try(suffstat(model,Xi)) if(!inherits(v, "try-error")) { if(ndone == 0) svalues <- matrix(, nrow=nsim, ncol=length(v)) ndone <- ndone + 1 svalues[ndone, ] <- v } if(verb) state <- progressreport(ndone, nsim, state=state) } if(verb) cat("Done.\n\n") # calculate the sample mean and variance of the # sufficient statistic for the simulations smean <- apply(svalues, 2, mean, na.rm=TRUE) svar <- var(svalues, na.rm=TRUE) # value of canonical parameter from MPL fit theta0 <- coef(model) # Newton-Raphson update Vinverse <- solve(svar) theta <- theta0 + as.vector(Vinverse %*% (sobs - smean)) ## appropriate names nama <- names(theta0) if(!is.null(nama)) { names(theta) <- nama dimnames(svar) <- dimnames(Vinverse) <- list(nama, nama) } ## update model newmodel <- model newmodel$coef <- theta newmodel$coef.orig <- theta0 newmodel$method <- "ho" newmodel$fitter <- "ho" newmodel$fisher <- svar newmodel$varcov <- Vinverse # recompute fitted interaction newmodel$fitin <- NULL newmodel$fitin <- fitin(newmodel) ## update pseudolikelihood value using code in logLik.ppm newmodel$maxlogpl.orig <- model$maxlogpl newmodel$maxlogpl <- logLik(newmodel, new.coef=theta, warn=FALSE) ## return(newmodel) } spatstat/R/hierhard.R0000644000176200001440000001475413115225157014244 0ustar liggesusers## ## hierhard.R ## ## $Revision: 1.3 $ $Date: 2017/02/07 07:35:32 $ ## ## The hierarchical hard core process ## ## ------------------------------------------------------------------- ## HierHard <- local({ # ......... define interaction potential HHpotential <- function(d, tx, tu, par) { # arguments: # d[i,j] distance between points X[i] and U[j] # tx[i] type (mark) of point X[i] # tu[j] type (mark) of point U[j] # # get matrices of interaction radii h <- par$hradii # # get possible marks and validate if(!is.factor(tx) || !is.factor(tu)) stop("marks of data and dummy points must be factor variables") lx <- levels(tx) lu <- levels(tu) if(length(lx) != length(lu) || any(lx != lu)) stop("marks of data and dummy points do not have same possible levels") if(!identical(lx, par$types)) stop("data and model do not have the same possible levels of marks") if(!identical(lu, par$types)) stop("dummy points and model do not have the same possible levels of marks") # ensure factor levels are acceptable for column names (etc) lxname <- make.names(lx, unique=TRUE) ## list all ordered pairs of types to be checked uptri <- par$archy$relation & !is.na(h) mark1 <- (lx[row(h)])[uptri] mark2 <- (lx[col(h)])[uptri] ## corresponding names mark1name <- (lxname[row(h)])[uptri] mark2name <- (lxname[col(h)])[uptri] vname <- apply(cbind(mark1name,mark2name), 1, paste, collapse="x") vname <- paste("mark", vname, sep="") npairs <- length(vname) ## create logical array for result z <- array(FALSE, dim=c(dim(d), npairs), dimnames=list(character(0), character(0), vname)) # go.... if(length(z) > 0) { # apply relevant hard core distance to each pair of points hxu <- h[ tx, tu ] forbid <- (d < hxu) forbid[is.na(forbid)] <- FALSE # form the potential value <- array(0, dim=dim(d)) value[forbid] <- -Inf ## score for(i in 1:npairs) { # data points with mark m1 Xsub <- (tx == mark1[i]) # quadrature points with mark m2 Qsub <- (tu == mark2[i]) # assign z[Xsub, Qsub, i] <- value[Xsub, Qsub] } } attr(z, "IsOffset") <- TRUE return(z) } #### end of 'pot' function #### # Set up basic object except for family and parameters BlankHHobject <- list( name = "Hierarchical hard core process", creator = "HierHard", family = "hierpair.family", # evaluated later pot = HHpotential, par = list(types=NULL, hradii=NULL, archy=NULL), parnames = c("possible types", "hardcore distances", "hierarchical order"), pardesc = c("vector of possible types", "matrix of hardcore distances", "hierarchical order"), selfstart = function(X, self) { types <- self$par$types hradii <- self$par$hradii archy <- self$par$archy if(!is.null(types) && !is.null(hradii) && !is.null(archy)) return(self) if(is.null(types)) types <- levels(marks(X)) if(is.null(archy)) archy <- seq_len(length(types)) if(!inherits(archy, "hierarchicalordering")) archy <- hierarchicalordering(archy, types) if(is.null(hradii)) { marx <- marks(X) d <- nndist(X, by=marx) h <- aggregate(d, by=list(from=marx), min) h <- as.matrix(h[, -1L, drop=FALSE]) m <- table(marx) mm <- outer(m, m, pmin) hradii <- h * mm/(mm+1) dimnames(hradii) <- list(types, types) h[!(archy$relation)] <- NA } HierHard(types=types,hradii=hradii,archy=archy) }, init = function(self) { types <- self$par$types hradii <- self$par$hradii ## hradii could be NULL if(!is.null(types)) { if(!is.null(dim(types))) stop(paste("The", sQuote("types"), "argument should be a vector")) if(length(types) == 0) stop(paste("The", sQuote("types"),"argument should be", "either NULL or a vector of all possible types")) if(anyNA(types)) stop("NA's not allowed in types") if(is.factor(types)) { types <- levels(types) } else { types <- levels(factor(types, levels=types)) } nt <- length(types) if(!is.null(hradii)) MultiPair.checkmatrix(hradii, nt, sQuote("hradii"), asymmok=TRUE) } }, update = NULL, # default OK print = function(self) { hradii <- self$par$hradii types <- self$par$types archy <- self$par$archy if(waxlyrical('gory')) splat(nrow(hradii), "types of points") if(!is.null(types) && !is.null(archy)) { if(waxlyrical('space')) { splat("Possible types and ordering:") } else cat("Hierarchy: ") print(archy) } else if(!is.null(types)) { (if(waxlyrical('space')) splat else cat)("Possible types: ") print(types) } else if(waxlyrical('gory')) splat("Possible types:\t not yet determined") if(!is.null(hradii)) { splat("Hardcore radii:") print(hiermat(dround(hradii), archy)) } else splat("Hardcore radii: not yet determined") invisible(NULL) }, interpret = function(coeffs, self) { # there are no regular parameters (woo-hoo!) return(NULL) }, valid = function(coeffs, self) { return(TRUE) }, project = function(coeffs, self) { return(NULL) }, irange = function(self, coeffs=NA, epsilon=0, ...) { h <- self$par$hradii return(max(0, h, na.rm=TRUE)) }, version=NULL # to be added ) class(BlankHHobject) <- "interact" # finally create main function HierHard <- function(hradii=NULL, types=NULL, archy=NULL) { if(!is.null(types)) { if(is.null(archy)) archy <- seq_len(length(types)) archy <- hierarchicalordering(archy, types) } out <- instantiate.interact(BlankHHobject, list(types=types, hradii=hradii, archy=archy)) if(!is.null(types) && !is.null(out$par$hradii)) dimnames(out$par$hradii) <- list(types,types) return(out) } HierHard <- intermaker(HierHard, BlankHHobject) HierHard }) spatstat/R/tess.R0000755000176200001440000006461613115271120013430 0ustar liggesusers# # tess.R # # support for tessellations # # $Revision: 1.75 $ $Date: 2016/12/20 04:06:47 $ # tess <- function(..., xgrid=NULL, ygrid=NULL, tiles=NULL, image=NULL, window=NULL, marks=NULL, keepempty=FALSE, unitname=NULL, check=TRUE) { uname <- unitname if(!is.null(window)) { window <- as.owin(window) if(is.null(uname)) uname <- unitname(window) } isrect <- !is.null(xgrid) && !is.null(ygrid) istiled <- !is.null(tiles) isimage <- !is.null(image) if(isrect + istiled + isimage != 1) stop("Must specify either (xgrid, ygrid) or tiles or img") if(isrect) { stopifnot(is.numeric(xgrid) && all(diff(xgrid) > 0)) stopifnot(is.numeric(ygrid) && all(diff(ygrid) > 0)) if(!is.null(window)) warning("Argument 'window' ignored, because xgrid, grid are given") window <- owin(range(xgrid), range(ygrid), unitname=uname) ntiles <- (length(xgrid)-1) * (length(ygrid)-1) out <- list(type="rect", window=window, xgrid=xgrid, ygrid=ygrid, n=ntiles) } else if(istiled) { stopifnot(is.list(tiles)) if(check) { if(!all(sapply(tiles, is.owin))) stop("Tiles must be a list of owin objects") if(!is.null(uname)) { ## attach new unit name to each tile tiles <- lapply(tiles, "unitname<-", value=uname) } else { ## extract unit names from tiles, check agreement, use as unitname uu <- unique(lapply(tiles, unitname)) uu <- uu[!sapply(uu, is.null)] nun <- length(uu) if(nun > 1) stop("Tiles have inconsistent names for the unit of length") if(nun == 1) { ## use this unit name uname <- uu[[1]] if(!is.null(window)) unitname(window) <- uname } } } if(!keepempty && check) { # remove empty tiles isempty <- sapply(tiles, is.empty) if(all(isempty)) stop("All tiles are empty") if(any(isempty)) tiles <- tiles[!isempty] } ntiles <- length(tiles) nam <- names(tiles) lev <- if(!is.null(nam) && all(nzchar(nam))) nam else 1:ntiles if(is.null(window)) window <- do.call(union.owin, unname(tiles)) if(is.mask(window) || any(unlist(lapply(tiles, is.mask)))) { # convert to pixel image tessellation window <- as.mask(window) ima <- as.im(window) ima$v[] <- NA for(i in 1:ntiles) ima[tiles[[i]]] <- i ima <- ima[window, drop=FALSE] ima <- eval.im(factor(ima, levels=1:ntiles)) levels(ima) <- lev out <- list(type="image", window=window, image=ima, n=length(lev)) } else { # tile list window <- rescue.rectangle(window) out <- list(type="tiled", window=window, tiles=tiles, n=length(tiles)) } } else if(isimage) { # convert to factor valued image image <- as.im(image) if(!is.null(uname)) unitname(image) <- uname switch(image$type, logical={ # convert to factor if(keepempty) image <- eval.im(factor(image, levels=c(FALSE,TRUE))) else image <- eval.im(factor(image)) }, factor={ # eradicate unused levels if(!keepempty) image <- eval.im(factor(image)) }, { # convert to factor image <- eval.im(factor(image)) }) if(is.null(window)) window <- as.owin(image) out <- list(type="image", window=window, image=image, n=length(levels(image))) } else stop("Internal error: unrecognised format") ## add marks! if(!is.null(marks)) { marks <- as.data.frame(marks) if(nrow(marks) != out$n) stop(paste("wrong number of marks:", nrow(marks), "should be", out$n), call.=FALSE) out$marks <- marks } class(out) <- c("tess", class(out)) return(out) } is.tess <- function(x) { inherits(x, "tess") } print.tess <- function(x, ..., brief=FALSE) { full <- !brief if(full) cat("Tessellation\n") win <- x$window switch(x$type, rect={ if(full) { unitinfo <- summary(unitname(win)) if(equispaced(x$xgrid) && equispaced(x$ygrid)) splat("Tiles are equal rectangles, of dimension", signif(mean(diff(x$xgrid)), 5), "x", signif(mean(diff(x$ygrid)), 5), unitinfo$plural, " ", unitinfo$explain) else splat("Tiles are unequal rectangles") } splat(length(x$xgrid)-1, "by", length(x$ygrid)-1, "grid of tiles") }, tiled={ if(full) { if(win$type == "polygonal") splat("Tiles are irregular polygons") else splat("Tiles are windows of general type") } splat(length(x$tiles), "tiles (irregular windows)") }, image={ nlev <- length(levels(x$image)) if(full) { splat("Tessellation is determined by a factor-valued image with", nlev, "levels") } else splat(nlev, "tiles (levels of a pixel image)") }) if(!is.null(marx <- x$marks)) { m <- dim(marx)[2] %orifnull% 1 if(m == 1) splat("Tessellation is marked") else splat("Tessellation has", m, "columns of marks:", commasep(sQuote(colnames(marx)))) } if(full) print(win) invisible(NULL) } unitname.tess <- function(x) unitname(x$window) "unitname<-.tess" <- function(x, value) { unitname(x$window) <- value switch(x$type, rect={}, tiled={ x$tiles <- lapply(x$tiles, "unitname<-", value) }, image={ unitname(x$image) <- value }) return(x) } plot.tess <- local({ plotem <- function(z, ..., col=NULL) { if(is.null(col)) plot(z, ..., add=TRUE) else if(z$type != "mask") plot(z, ..., border=col, add=TRUE) else plot(z, ..., col=col, add=TRUE) } plotpars <- c("sub", "lty", "lwd", "cex.main", "col.main", "font.main", "cex.sub", "col.sub", "font.sub", "border") plot.tess <- function(x, ..., main, add=FALSE, show.all=!add, col=NULL, do.plot=TRUE, do.labels=FALSE, labels=tilenames(x), labelargs=list()) { if(missing(main) || is.null(main)) main <- short.deparse(substitute(x)) switch(x$type, rect={ win <- x$window result <- do.call.matched(plot.owin, resolve.defaults(list(x=win, main=main, add=add, show.all=show.all, do.plot=do.plot), list(...)), extrargs=plotpars) if(do.plot) { xg <- x$xgrid yg <- x$ygrid do.call.matched(segments, resolve.defaults(list(x0=xg, y0=win$yrange[1], x1=xg, y1=win$yrange[2]), list(col=col), list(...), .StripNull=TRUE)) do.call.matched(segments, resolve.defaults(list(x0=win$xrange[1], y0=yg, x1=win$xrange[2], y1=yg), list(col=col), list(...), .StripNull=TRUE)) } }, tiled={ result <- do.call.matched(plot.owin, resolve.defaults(list(x=x$window, main=main, add=add, show.all=show.all, do.plot=do.plot), list(...)), extrargs=plotpars) if(do.plot) { til <- tiles(x) lapply(til, plotem, ..., col=col) } }, image={ result <- do.call(plot, resolve.defaults(list(x$image, add=add, main=main, show.all=show.all, do.plot=do.plot), list(...), list(valuesAreColours=FALSE))) }) if(do.plot && do.labels) { labels <- paste(as.vector(labels)) til <- tiles(x) incircles <- lapply(til, incircle) x0 <- sapply(incircles, getElement, name="x") y0 <- sapply(incircles, getElement, name="y") do.call.matched(text.default, resolve.defaults(list(x=x0, y = y0), list(labels=labels), labelargs), funargs=graphicsPars("text")) } return(invisible(result)) } plot.tess }) "[<-.tess" <- function(x, i, ..., value) { switch(x$type, rect=, tiled={ til <- tiles(x) til[i] <- value ok <- !unlist(lapply(til, is.null)) x <- tess(tiles=til[ok]) }, image={ stop("Cannot assign new values to subsets of a pixel image") }) return(x) } "[.tess" <- function(x, i, ...) { trap.extra.arguments(..., .Context="in [.tess") if(missing(i)) return(x) if(is.owin(i)) return(intersect.tess(x, i)) switch(x$type, rect=, tiled={ til <- tiles(x)[i] return(tess(tiles=til)) }, image={ img <- x$image oldlev <- levels(img) newlev <- unique(oldlev[i]) img <- eval.im(factor(img, levels=newlev)) return(tess(image=img)) }) } tiles <- function(x) { switch(x$type, rect={ out <- list() xg <- x$xgrid yg <- x$ygrid nx <- length(xg) - 1 ny <- length(yg) - 1 for(j in rev(seq_len(ny))) for(i in seq_len(nx)) { winij <- owin(xg[c(i,i+1)], yg[c(j,j+1)]) dout <- list(winij) names(dout) <- paste("Tile row ", ny-j+1, ", col ", i, sep="") out <- append(out, dout) } }, tiled={ out <- x$tiles if(is.null(names(out))) names(out) <- paste("Tile", seq_along(out)) }, image={ out <- list() ima <- x$image lev <- levels(ima) for(i in seq_along(lev)) out[[i]] <- solutionset(ima == lev[i]) names(out) <- paste(lev) }) out <- as.solist(out) return(out) } tiles.empty <- function(x) { stopifnot(is.tess(x)) switch(x$type, rect = { nx <- length(x$xgrid) - 1 ny <- length(x$ygrid) - 1 ans <- rep(FALSE, nx * ny) }, tiled = { ans <- sapply(x$tiles, is.empty) }, image = { ans <- (table(x$image[]) == 0) }) return(ans) } tilenames <- function(x) { stopifnot(is.tess(x)) switch(x$type, rect={ if(!is.null(x$tilenames)) { out <- x$tilenames } else { nx <- length(x$xgrid) - 1 ny <- length(x$ygrid) - 1 ij <- expand.grid(1:nx, 1:ny) out <- paste0("Tile row ", ij[,2], ", col ", ij[,1]) } }, tiled={ out <- names(x$tiles) if(sum(nzchar(out)) != x$n) out <- paste("Tile", seq_len(x$n)) }, image={ out <- levels(x$image) } ) return(as.character(out)) } "tilenames<-" <- function(x, value) { stopifnot(is.tess(x)) if(!is.null(value)) { ## validate length value <- as.character(value) nv <- length(value) switch(x$type, rect = { nx <- length(x$xgrid) - 1 ny <- length(x$ygrid) - 1 n <- nx * ny }, tiled = { n <- length(x$tiles) }, image = { n <- length(levels(x$image)) }) if(nv != n) stop("Replacement value has wrong length", paren(paste(nv, "instead of", n))) } switch(x$type, rect={ x$tilenames <- value }, tiled={ names(x$tiles) <- value }, image={ levels(x$image) <- value %orifnull% (1:n) } ) return(x) } marks.tess <- function(x, ...) { stopifnot(is.tess(x)) return(x$marks) } "marks<-.tess" <- function(x, ..., value) { stopifnot(is.tess(x)) if(!is.null(value)) { value <- as.data.frame(value) if(nrow(value) != x$n) stop(paste("replacement value for marks has wrong length:", nrow(value), "should be", x$n), call.=FALSE) rownames(value) <- NULL if(ncol(value) == 1) colnames(value) <- "marks" } x$marks <- value return(x) } unmark.tess <- function(X) { marks(X) <- NULL; return(X) } tile.areas <- function(x) { stopifnot(is.tess(x)) switch(x$type, rect={ xg <- x$xgrid yg <- x$ygrid # nx <- length(xg) - 1 # ny <- length(yg) - 1 a <- outer(rev(diff(yg)), diff(xg), "*") a <- as.vector(t(a)) names(a) <- as.vector(t(tilenames(x))) }, tiled={ a <- unlist(lapply(x$tiles, area)) }, image={ z <- x$image a <- table(z$v) * z$xstep * z$ystep }) return(a) } as.im.tess <- function(X, W=NULL, ..., eps=NULL, dimyx=NULL, xy=NULL, na.replace=NULL) { # if W is present, it may have to be converted if(!is.null(W)) { stopifnot(is.owin(W)) if(W$type != "mask") W <- as.mask(W, eps=eps, dimyx=dimyx, xy=xy) } switch(X$type, image={ out <- as.im(X$image, W=W, eps=eps, dimyx=dimyx, xy=xy, na.replace=na.replace) }, tiled={ if(is.null(W)) W <- as.mask(as.owin(X), eps=eps, dimyx=dimyx, xy=xy) til <- X$tiles ntil <- length(til) nama <- names(til) if(is.null(nama) || !all(nzchar(nama))) nama <- paste(seq_len(ntil)) xy <- list(x=W$xcol, y=W$yrow) for(i in seq_len(ntil)) { indic <- as.mask(til[[i]], xy=xy) tag <- as.im(indic, value=i) if(i == 1) { out <- tag outv <- out$v } else { outv <- pmin.int(outv, tag$v, na.rm=TRUE) } } out <- im(factor(outv, levels=seq_len(ntil), labels=nama), out$xcol, out$yrow) unitname(out) <- unitname(W) }, rect={ if(is.null(W)) out <- as.im(as.rectangle(X), eps=eps, dimyx=dimyx, xy=xy) else out <- as.im(W) xg <- X$xgrid yg <- X$ygrid nrows <- length(yg) - 1 ncols <- length(xg) - 1 jx <- findInterval(out$xcol, xg, rightmost.closed=TRUE) iy <- findInterval(out$yrow, yg, rightmost.closed=TRUE) M <- as.matrix(out) Jcol <- jx[col(M)] Irow <- nrows - iy[row(M)] + 1 Ktile <- Jcol + ncols * (Irow - 1) Ktile <- factor(Ktile, levels=seq_len(nrows * ncols)) out <- im(Ktile, xcol=out$xcol, yrow=out$yrow, unitname=unitname(W)) } ) return(out) } nobjects.tess <- function(x) { switch(x$type, image = length(levels(x$image)), rect = (length(x$xgrid)-1) * (length(x$ygrid)-1), tiled = length(x$tiles)) } as.function.tess <- function(x, ..., values=NULL) { V <- x if(is.null(values)) { f <- function(x,y) { tileindex(x,y,V) } } else { if(length(values) != nobjects(x)) stop("Length of 'values' should equal the number of tiles", call.=FALSE) f <- function(x,y) { values[as.integer(tileindex(x,y,V))] } } g <- funxy(f, Window(V)) return(g) } tileindex <- function(x, y, Z) { stopifnot(is.tess(Z)) stopifnot(length(x) == length(y)) switch(Z$type, rect={ jx <- findInterval(x, Z$xgrid, rightmost.closed=TRUE) iy <- findInterval(y, Z$ygrid, rightmost.closed=TRUE) nrows <- length(Z$ygrid) - 1 ncols <- length(Z$xgrid) - 1 iy[iy < 1 | iy > nrows] <- NA jx[jx < 1 | jx > ncols] <- NA jcol <- jx irow <- nrows - iy + 1 ktile <- jcol + ncols * (irow - 1) m <- factor(ktile, levels=seq_len(nrows*ncols)) ij <- expand.grid(j=seq_len(ncols),i=seq_len(nrows)) levels(m) <- paste("Tile row ", ij$i, ", col ", ij$j, sep="") }, tiled={ n <- length(x) todo <- seq_len(n) nt <- length(Z$tiles) m <- integer(n) for(i in 1:nt) { ti <- Z$tiles[[i]] hit <- inside.owin(x[todo], y[todo], ti) if(any(hit)) { m[todo[hit]] <- i todo <- todo[!hit] } if(length(todo) == 0) break } m[m == 0] <- NA nama <- names(Z$tiles) lev <- seq_len(nt) lab <- if(!is.null(nama) && all(nzchar(nama))) nama else paste("Tile", lev) m <- factor(m, levels=lev, labels=lab) }, image={ Zim <- Z$image m <- factor(Zim[list(x=x, y=y), drop=FALSE], levels=levels(Zim)) } ) return(m) } as.tess <- function(X) { UseMethod("as.tess") } as.tess.tess <- function(X) { fields <- switch(X$type, rect={ c("xgrid", "ygrid") }, tiled={ "tiles" }, image={ "image" }, stop(paste("Unrecognised tessellation type", sQuote(X$type)))) fields <- c(c("type", "window", "n", "marks"), fields) X <- unclass(X)[fields] class(X) <- c("tess", class(X)) return(X) } as.tess.im <- function(X) { return(tess(image = X)) } as.tess.list <- function(X) { W <- lapply(X, as.owin) return(tess(tiles=W)) } as.tess.owin <- function(X) { return(tess(tiles=list(X))) } domain.tess <- Window.tess <- function(X, ...) { as.owin(X) } intersect.tess <- function(X, Y, ..., keepmarks=FALSE) { X <- as.tess(X) if(is.owin(Y) && Y$type == "mask") { # special case # convert to pixel image result <- as.im(Y) Xtiles <- tiles(X) for(i in seq_along(Xtiles)) { tilei <- Xtiles[[i]] result[tilei] <- i } result <- result[Y, drop=FALSE] out <- tess(image=result, window=Y) if(keepmarks) marks(out) <- marks(X) return(out) } if(is.owin(Y)) { # efficient code when Y is a window, retaining names of tiles of X Ztiles <- lapply(tiles(X), intersect.owin, B=Y, ..., fatal=FALSE) isempty <- sapply(Ztiles, is.empty) Ztiles <- Ztiles[!isempty] Xwin <- as.owin(X) Ywin <- Y if(keepmarks) { marksX <- marks(X) if(!is.null(marksX)) marx <- as.data.frame(marksX)[!isempty, ] } } else { # general case Y <- as.tess(Y) Xtiles <- tiles(X) Ytiles <- tiles(Y) Ztiles <- list() namesX <- tilenames(X) namesY <- tilenames(Y) if(keepmarks) { Xmarks <- as.data.frame(marks(X)) Ymarks <- as.data.frame(marks(Y)) gotXmarks <- (ncol(Xmarks) > 0) gotYmarks <- (ncol(Ymarks) > 0) if(gotXmarks && gotYmarks) { colnames(Xmarks) <- paste0("X", colnames(Xmarks)) colnames(Ymarks) <- paste0("Y", colnames(Ymarks)) } if(gotXmarks || gotYmarks) { marx <- if(gotXmarks && gotYmarks) { cbind(Xmarks[integer(0), , drop=FALSE], Ymarks[integer(0), , drop=FALSE]) } else if(gotXmarks) { Xmarks[integer(0), , drop=FALSE] } else { Ymarks[integer(0), , drop=FALSE] } } else keepmarks <- FALSE } for(i in seq_along(Xtiles)) { Xi <- Xtiles[[i]] Ti <- lapply(Ytiles, intersect.owin, B=Xi, ..., fatal=FALSE) isempty <- sapply(Ti, is.empty) nonempty <- !isempty if(any(nonempty)) { Ti <- Ti[nonempty] names(Ti) <- paste(namesX[i], namesY[nonempty], sep="x") Ztiles <- append(Ztiles, Ti) if(keepmarks) { extra <- if(gotXmarks && gotYmarks) { data.frame(X=Xmarks[i, ,drop=FALSE], Y=Ymarks[nonempty, ,drop=FALSE], row.names=NULL) } else if(gotYmarks) { Ymarks[nonempty, ,drop=FALSE] } else { Xmarks[rep(i, sum(nonempty)), ,drop=FALSE] } marx <- rbind(marx, extra) } } } Xwin <- as.owin(X) Ywin <- as.owin(Y) } Zwin <- intersect.owin(Xwin, Ywin) out <- tess(tiles=Ztiles, window=Zwin) if(keepmarks) marks(out) <- marx return(out) } bdist.tiles <- local({ vdist <- function(x,w) { z <- as.ppp(vertices(x), W=w, check=FALSE) min(bdist.points(z)) } edist <- function(x,b) { xd <- crossdist(edges(x, check=FALSE), b, type="separation") min(xd) } bdist.tiles <- function(X) { if(!is.tess(X)) stop("X must be a tessellation") W <- as.owin(X) switch(X$type, rect=, tiled={ tt <- tiles(X) if(is.convex(W)) { # distance is minimised at a tile vertex d <- sapply(tt, vdist, w=W) } else { # coerce everything to polygons W <- as.polygonal(W) tt <- lapply(tt, as.polygonal) # compute min dist from tile edges to window edges d <- sapply(tt, edist, b=edges(W)) } }, image={ Xim <- X$image # compute boundary distance for each pixel bd <- bdist.pixels(as.owin(Xim), style="image") bd <- bd[W, drop=FALSE] # split over tiles bX <- split(bd, X) # compute minimum distance over each level of factor d <- sapply(bX, function(z) { summary(z)$min }) } ) return(d) } bdist.tiles }) ## ......... geometrical transformations .................. shift.tess <- function(X, ...) { Y <- X Y$window <- wY <- shift(X$window, ...) vec <- getlastshift(wY) switch(X$type, rect={ Y$xgrid <- Y$xgrid + vec[1] Y$ygrid <- Y$ygrid + vec[2] }, tiled={ Y$tiles <- lapply(Y$tiles, shift, vec=vec) }, image = { Y$image <- shift(Y$image, vec) }) attr(Y, "lastshift") <- vec return(Y) } affine.tess <- function(X, mat=diag(c(1,1)), vec=c(0,0), ...) { Y <- X Y$window <- affine(X$window, mat=mat, vec=vec, ...) switch(Y$type, rect = { if(all(mat == diag(diag(mat)))) { ## result is rectangular Y$xgrid <- sort(mat[1,1] * X$xgrid + vec[1]) Y$ygrid <- sort(mat[2,2] * X$ygrid + vec[2]) } else { ## shear transformation; treat rectangles as general tiles Y <- tess(tiles=tiles(X), window=Y$window) Y$tiles <- lapply(Y$tiles, affine, mat=mat, vec=vec, ...) } }, tiled={ Y$tiles <- lapply(Y$tiles, affine, mat=mat, vec=vec, ...) }, image = { Y$image <- affine(Y$image, mat=mat, vec=vec, ...) }) return(Y) } reflect.tess <- function(X) { Y <- X Y$window <- reflect(Y$window) switch(X$type, rect = { Y$xgrid <- rev(- Y$xgrid) Y$ygrid <- rev(- Y$ygrid) }, tiled = { Y$tiles <- lapply(Y$tiles, reflect) }, image = { Y$image <- reflect(Y$image) }) return(Y) } scalardilate.tess <- function(X, f, ...) { Y <- X Y$window <- scalardilate(X$window, f, ...) switch(X$type, rect = { Y$xgrid <- f * Y$xgrid Y$ygrid <- f * Y$ygrid }, tiled = { Y$tiles <- lapply(Y$tiles, scalardilate, f=f, ...) }, image = { Y$image <- scalardilate(Y$image, f=f, ...) }) return(Y) } rotate.tess <- function(X, angle=pi/2, ..., centre=NULL) { if(angle %% (2 * pi) == 0) return(X) if(!is.null(centre)) { X <- shift(X, origin=centre) negorigin <- getlastshift(X) } else negorigin <- NULL Y <- X Y$window <- rotate(X$window, angle=angle, ...) switch(X$type, rect = { if(angle %% (pi/2) == 0) { ## result is rectangular co <- round(cos(angle)) si <- round(sin(angle)) Y$xgrid <- sort((if(co == 0) 0 else (co * X$xgrid)) - (if(si == 0) 0 else (si * X$ygrid))) Y$ygrid <- sort((if(si == 0) 0 else (si * X$xgrid)) + (if(co == 0) 0 else (co * X$ygrid))) } else { ## general tessellation Y <- tess(tiles=lapply(tiles(X), rotate, angle=angle, ...), window=Y$window) } }, tiled = { Y$tiles <- lapply(X$tiles, rotate, angle=angle, ...) }, image = { Y$image <- rotate(X$image, angle=angle, ...) }) if(!is.null(negorigin)) Y <- shift(Y, -negorigin) return(Y) } as.data.frame.tess <- function(x, ...) { switch(x$type, rect =, tiled = { y <- lapply(tiles(x), as.data.frame, ...) z <- mapply(assignDFcolumn, x=y, value=tilenames(x), MoreArgs=list(name="Tile", ...), SIMPLIFY=FALSE) z <- do.call(rbind, z) row.names(z) <- NULL }, image = { z <- as.data.frame(x$image, ...) if(!is.na(m <- match("value", colnames(z)))) colnames(z)[m] <- "Tile" }, { z <- NULL warning("Unrecognised type of tessellation") }) return(z) } spatstat/R/Jinhom.R0000644000176200001440000002772513115271075013704 0ustar liggesusers# # Jinhom.R # # $Revision: 1.11 $ $Date: 2017/06/05 10:31:58 $ # Ginhom <- function(X, lambda=NULL, lmin=NULL, ..., sigma=NULL, varcov=NULL, r=NULL, breaks=NULL, ratio=FALSE, update = TRUE) { stopifnot(is.ppp(X)) npts <- npoints(X) W <- as.owin(X) areaW <- area(W) miss.update <- missing(update) # determine 'r' values rmaxdefault <- rmax.rule("G", W, npts/areaW) breaks <- handle.r.b.args(r, breaks, W, rmaxdefault=rmaxdefault) if(!breaks$even) stop("r values must be evenly spaced") r <- breaks$r rmax <- breaks$max nr <- length(r) dangerous <- "lambda" danger <- TRUE # Intensity values at data points if(is.null(lambda)) { # No intensity data provided danger <- FALSE # Estimate density at points by leave-one-out kernel smoothing lamX <- density(X, ..., sigma=sigma, varcov=varcov, at="points", leaveoneout=TRUE) lambdaX <- as.numeric(lamX) # negative or zero values are due to numerical error lambdaX <- pmax.int(lambdaX, .Machine$double.eps) } else { # lambda values provided if(is.im(lambda)) lambdaX <- safelookup(lambda, X) else if(is.ppm(lambda) || is.kppm(lambda) || is.dppm(lambda)) { model <- lambda if(!update) { ## just use intensity of fitted model lambdaX <- predict(lambda, locations=X, type="trend") } else { ## re-fit model to data X model <- if(is.ppm(model)) update(model, Q=X) else update(model, X=X) lambdaX <- fitted(model, dataonly=TRUE) danger <- FALSE if(miss.update) warn.once(key="Ginhom.update", "The behaviour of Ginhom when lambda is a ppm object", "has changed (in spatstat 1.37-0 and later).", "See help(Ginhom)") } } else if(is.function(lambda)) lambdaX <- lambda(X$x, X$y) else if(is.numeric(lambda) && is.vector(as.numeric(lambda))) { lambdaX <- lambda check.nvector(lambdaX, npts) } else stop(paste(sQuote("lambda"), "should be a vector, a pixel image, or a function")) # negative values are illegal minX <- min(lambdaX) if(minX < 0) stop("Negative values of lambda were encountered at data points") if(minX == 0) stop("Zero values of lambda were encountered at data points") } # Minimum intensity if(!is.null(lmin)) { check.1.real(lmin) stopifnot(lmin >= 0) } else { # Compute minimum value over window if(is.null(lambda)) { # extract previously selected smoothing bandwidth sigma <- attr(lamX, "sigma") varcov <- attr(lamX, "varcov") # estimate density on a pixel grid and minimise lam <- density(X, ..., sigma=sigma, varcov=varcov, at="pixels") lmin <- min(lam) # negative or zero values may occur due to numerical error lmin <- max(lmin, .Machine$double.eps) } else { if(is.im(lambda)) lmin <- min(lambda) else if(is.ppm(lambda) || is.kppm(lambda) || is.dppm(lambda)) lmin <- min(predict(lambda)) else if(is.function(lambda)) lmin <- min(as.im(lambda, W)) else if(is.numeric(lambda) && is.vector(as.numeric(lambda))) lmin <- min(lambdaX) } if(lmin < 0) stop("Negative values of intensity encountered") # ensure lmin < lambdaX lmin <- min(lmin, lambdaX) } # Compute intensity factor lratio <- lmin/lambdaX vv <- 1 - lratio bad <- (lratio > 1) if((nbad <- sum(bad)) > 0) stop(paste("Value of", sQuote("lmin"), "exceeds", nbad, gettext(nbad, "value", "values"), "of", sQuote("lambda"))) # sort data points in order of increasing x coordinate xx <- X$x yy <- X$y oX <- fave.order(xx) xord <- xx[oX] yord <- yy[oX] vord <- vv[oX] # compute local cumulative products z <- .C("locprod", n = as.integer(npts), x = as.double(xord), y = as.double(yord), v = as.double(vord), nr = as.integer(nr), rmax = as.double(rmax), ans = as.double(numeric(npts * nr)), PACKAGE = "spatstat") ans <- matrix(z$ans, nrow=nr, ncol=npts) # revert to original ordering loccumprod <- matrix(, nrow=nr, ncol=npts) loccumprod[, oX] <- ans # border correction bX <- bdist.points(X) ok <- outer(r, bX, "<=") denom <- .rowSums(ok, nr, npts) loccumprod[!ok] <- 0 numer <- .rowSums(loccumprod, nr, npts) # pack up Gdf <- data.frame(r=r, theo = 1 - exp(- lmin * pi * r^2)) desc <- c("distance argument r", "theoretical Poisson %s") theo.denom <- rep.int(npts, nr) G <- ratfv(Gdf, NULL, theo.denom, "r", quote(G[inhom](r)), "theo", NULL, c(0,rmax), c("r", "{%s[%s]^{pois}}(r)"), desc, fname=c("G", "inhom"), ratio=ratio) G <- bind.ratfv(G, data.frame(bord=denom-numer), denom, "{hat(%s)[%s]^{bord}}(r)", "border estimate of %s", "bord", ratio=ratio) # formula(G) <- . ~ r fvnames(G, ".") <- c("bord", "theo") unitname(G) <- unitname(X) if(ratio) G <- conform.ratfv(G) if(danger) attr(G, "dangerous") <- dangerous return(G) } Finhom <- function(X, lambda=NULL, lmin=NULL, ..., sigma=NULL, varcov=NULL, r=NULL, breaks=NULL, ratio=FALSE, update = TRUE) { stopifnot(is.ppp(X)) npts <- npoints(X) W <- as.owin(X) areaW <- area(W) miss.update <- missing(update) # determine 'r' values rmaxdefault <- rmax.rule("F", W, npts/areaW) breaks <- handle.r.b.args(r, breaks, W, rmaxdefault=rmaxdefault) if(!breaks$even) stop("r values must be evenly spaced") r <- breaks$r rmax <- breaks$max nr <- length(r) dangerous <- "lambda" danger <- TRUE # Intensity values at data points if(is.null(lambda)) { # No intensity data provided danger <- FALSE # Estimate density at points by leave-one-out kernel smoothing lamX <- density(X, ..., sigma=sigma, varcov=varcov, at="points", leaveoneout=TRUE) lambdaX <- as.numeric(lamX) # negative or zero values are due to numerical error lambdaX <- pmax.int(lambdaX, .Machine$double.eps) } else { # lambda values provided if(is.im(lambda)) lambdaX <- safelookup(lambda, X) else if(is.ppm(lambda) || is.kppm(lambda) || is.dppm(lambda)) { model <- lambda if(!update) { ## just use intensity of fitted model lambdaX <- predict(lambda, locations=X, type="trend") } else { ## re-fit model to data X model <- if(is.ppm(model)) update(model, Q=X) else update(model, X=X) lambdaX <- fitted(model, dataonly=TRUE) danger <- FALSE if(miss.update) warn.once(key="Finhom.update", "The behaviour of Finhom when lambda is a ppm object", "has changed (in spatstat 1.37-0 and later).", "See help(Finhom)") } } else if(is.function(lambda)) lambdaX <- lambda(X$x, X$y) else if(is.numeric(lambda) && is.vector(as.numeric(lambda))) { lambdaX <- lambda check.nvector(lambdaX, npts) } else stop(paste(sQuote("lambda"), "should be a vector, a pixel image, or a function")) # negative values are illegal minX <- min(lambdaX) if(minX < 0) stop("Negative values of lambda were encountered at data points") if(minX == 0) stop("Zero values of lambda were encountered at data points") } # Minimum intensity if(!is.null(lmin)) { check.1.real(lmin) stopifnot(lmin >= 0) } else { # Compute minimum value over window if(is.null(lambda)) { # extract previously selected smoothing bandwidth sigma <- attr(lamX, "sigma") varcov <- attr(lamX, "varcov") # estimate density on a pixel grid and minimise lam <- density(X, ..., sigma=sigma, varcov=varcov, at="pixels") lmin <- min(lam) # negative or zero values may occur due to numerical error lmin <- max(lmin, .Machine$double.eps) } else { if(is.im(lambda)) lmin <- min(lambda) else if(is.ppm(lambda) || is.kppm(lambda) || is.dppm(lambda)) lmin <- min(predict(lambda)) else if(is.function(lambda)) lmin <- min(as.im(lambda, W)) else if(is.numeric(lambda) && is.vector(as.numeric(lambda))) lmin <- min(lambdaX) } if(lmin < 0) stop("Negative values of intensity encountered") # ensure lmin < lambdaX lmin <- min(lmin, lambdaX) } # Compute intensity factor lratio <- lmin/lambdaX vv <- 1 - lratio bad <- (lratio > 1) if((nbad <- sum(bad)) > 0) stop(paste("Value of", sQuote("lmin"), "exceeds", nbad, gettext(nbad, "value", "values"), "of", sQuote("lambda"))) # sort data points in order of increasing x coordinate xx <- X$x yy <- X$y oX <- fave.order(xx) xord <- xx[oX] yord <- yy[oX] vord <- vv[oX] # determine pixel grid and compute distance to boundary M <- do.call.matched(as.mask, append(list(w=W), list(...))) bM <- bdist.pixels(M, style="matrix") bM <- as.vector(bM) # x, y coordinates of pixels are already sorted by increasing x xM <- as.vector(rasterx.mask(M)) yM <- as.vector(rastery.mask(M)) nM <- length(xM) # compute local cumulative products z <- .C("locxprod", ntest = as.integer(nM), xtest = as.double(xM), ytest = as.double(yM), ndata = as.integer(npts), xdata = as.double(xord), ydata = as.double(yord), vdata = as.double(vord), nr = as.integer(nr), rmax = as.double(rmax), ans = as.double(numeric(nM * nr)), PACKAGE = "spatstat") loccumprod <- matrix(z$ans, nrow=nr, ncol=nM) # border correction ok <- outer(r, bM, "<=") denom <- .rowSums(ok, nr, nM) loccumprod[!ok] <- 0 numer <- .rowSums(loccumprod, nr, nM) # pack up Fdf <- data.frame(r=r, theo = 1 - exp(- lmin * pi * r^2)) desc <- c("distance argument r", "theoretical Poisson %s") theo.denom <- rep.int(npts, nr) FX <- ratfv(Fdf, NULL, theo.denom, "r", quote(F[inhom](r)), "theo", NULL, c(0,rmax), c("r","{%s[%s]^{pois}}(r)"), desc, fname=c("F", "inhom"), ratio=ratio) FX <- bind.ratfv(FX, data.frame(bord=denom-numer), denom, "{hat(%s)[%s]^{bord}}(r)", "border estimate of %s", "bord", ratio=ratio) # formula(FX) <- . ~ r fvnames(FX, ".") <- c("bord", "theo") unitname(FX) <- unitname(X) if(ratio) FX <- conform.ratfv(FX) if(danger) attr(FX, "dangerous") <- dangerous return(FX) } Jinhom <- function(X, lambda=NULL, lmin=NULL, ..., sigma=NULL, varcov=NULL, r=NULL, breaks=NULL, update = TRUE) { if(missing(update) & (is.ppm(lambda) || is.kppm(lambda) || is.dppm(lambda))) warn.once(key="Jinhom.update", "The behaviour of Jinhom when lambda is a ppm object", "has changed (in spatstat 1.37-0 and later).", "See help(Jinhom)") GX <- Ginhom(X, lambda=lambda, lmin=lmin, ..., sigma=sigma, varcov=varcov, r=r, breaks=breaks, ratio=FALSE, update=update) r <- GX$r FX <- Finhom(X, lambda=lambda, lmin=lmin, ..., sigma=sigma, varcov=varcov, r=r, ratio=FALSE, update=update) JX <- eval.fv((1-GX)/(1-FX)) # relabel the fv object JX <- rebadge.fv(JX, quote(J[inhom](r)), c("J","inhom"), names(JX), new.labl=attr(GX, "labl")) # tack on extra info attr(JX, "G") <- GX attr(JX, "F") <- FX attr(JX, "dangerous") <- attr(GX, "dangerous") return(JX) } spatstat/R/nearestsegment.R0000755000176200001440000000506113115271120015463 0ustar liggesusers# # nearestsegment.R # # $Revision: 1.11 $ $Date: 2014/11/10 11:27:12 $ # # Given a point pattern X and a line segment pattern Y, # for each point x of X, determine which segment of Y is closest to x # and find the point on Y closest to x. # nearestsegment <- function(X,Y) { return(ppllengine(X,Y,"identify")) } project2segment <- function(X, Y) { return(ppllengine(X,Y,"project")) } ppllengine <- function(X, Y, action="project", check=FALSE) { stopifnot(is.ppp(X)) stopifnot(is.psp(Y)) stopifnot(action %in% c("distance", "identify", "project")) # deal with empty patterns if(Y$n == 0) stop("Segment pattern Y contains 0 segments; projection undefined") if(X$n == 0) { nowt <- numeric(0) none <- integer(0) switch(action, identify = return(none), distance = return(list(dist=nowt, which=none)), project = return(list(Xproj=X, mapXY=none, d=nowt, tp=nowt))) } # XX <- as.matrix(as.data.frame(unmark(X))) YY <- as.matrix(as.data.frame(unmark(Y))) # determine which segment lies closest to each point huge <- max(diameter(as.rectangle(as.owin(X))), diameter(as.rectangle(as.owin(Y)))) d <- distppllmin(XX, YY, huge^2) mapXY <- d$min.which if(action == "identify") return(mapXY) else if(action == "distance") return(data.frame(dist=d$min.d, which=mapXY)) # combine relevant rows of data alldata <- as.data.frame(cbind(XX, YY[mapXY, ,drop=FALSE])) colnames(alldata) <- c("x", "y", "x0", "y0", "x1", "y1") # coordinate geometry dx <- with(alldata, x1-x0) dy <- with(alldata, y1-y0) leng <- sqrt(dx^2 + dy^2) # rotation sines & cosines (may include 0/0) co <- dx/leng si <- dy/leng # vector to point from first endpoint of segment xv <- with(alldata, x - x0) yv <- with(alldata, y - y0) # rotate coordinate system so that x axis is parallel to line segment xpr <- xv * co + yv * si # ypr <- - xv * si + yv * co # determine whether projection is an endpoint or interior point of segment ok <- is.finite(xpr) left <- !ok | (xpr <= 0) right <- ok & (xpr >= leng) # location of projected point in rotated coordinates xr <- with(alldata, ifelseAX(left, 0, ifelseXY(right, leng, xpr))) # back to standard coordinates xproj <- with(alldata, x0 + ifelseXB(ok, xr * co, 0)) yproj <- with(alldata, y0 + ifelseXB(ok, xr * si, 0)) Xproj <- ppp(xproj, yproj, window=X$window, marks=X$marks, check=check) # parametric coordinates tp <- xr/leng tp[!is.finite(tp)] <- 0 # return(list(Xproj=Xproj, mapXY=mapXY, d=d$min.d, tp=tp)) } spatstat/R/Kest.R0000755000176200001440000010277313115271075013366 0ustar liggesusers# # Kest.R Estimation of K function # # $Revision: 5.120 $ $Date: 2017/06/05 10:31:58 $ # # # -------- functions ---------------------------------------- # Kest() compute estimate of K # using various edge corrections # # # -------- standard arguments ------------------------------ # X point pattern (of class 'ppp') # # r distance values at which to compute K # # -------- standard output ------------------------------ # A data frame (class "fv") with columns named # # r: same as input # # trans: K function estimated by translation correction # # iso: K function estimated by Ripley isotropic correction # # theo: K function for Poisson ( = pi * r ^2 ) # # border: K function estimated by border method # using standard formula (denominator = count of points) # # bord.modif: K function estimated by border method # using modified formula # (denominator = area of eroded window # # ------------------------------------------------------------------------ "Lest" <- function(X, ...) { K <- Kest(X, ...) L <- eval.fv(sqrt(K/pi), dotonly=FALSE) # handle variance estimates if(any(varcols <- colnames(K) %in% c("rip", "ls"))) { r <- with(L, .x) L[,varcols] <- as.data.frame(K)[,varcols]/(2 * pi * r)^2 # fix 0/0 n <- npoints(X) A <- area(Window(X)) if(any(colnames(K) == "rip")) L[r == 0, "rip"] <- (2 * A/(n-1)^2)/(4 * pi) if(any(colnames(K) == "ls")) L[r == 0, "ls"] <- (2 * A/(n * (n-1)))/(4 * pi) } # relabel the fv object L <- rebadge.fv(L, quote(L(r)), "L", names(K), new.labl=attr(K, "labl")) # return(L) } "Kest"<- function(X, ..., r=NULL, rmax=NULL, breaks=NULL, correction=c("border", "isotropic", "Ripley", "translate"), nlarge=3000, domain=NULL, var.approx=FALSE, ratio=FALSE) { verifyclass(X, "ppp") nlarge.given <- !missing(nlarge) && !is.null(nlarge) rfixed <- !is.null(r) || !is.null(breaks) npts <- npoints(X) W <- X$window areaW <- area(W) lambda <- npts/areaW lambda2 <- (npts * (npts - 1))/(areaW^2) if(!is.null(domain)) { # estimate based on contributions from a subdomain domain <- as.owin(domain) if(!is.subset.owin(domain, W)) stop(paste(dQuote("domain"), "is not a subset of the window of X")) # trick Kdot() into doing it indom <- factor(inside.owin(X$x, X$y, domain), levels=c(FALSE,TRUE)) Kd <- Kdot(X %mark% indom, i="TRUE", r=r, breaks=breaks, correction=correction, ratio=ratio) # relabel and exit Kd <- rebadge.fv(Kd, quote(K(r)), "K") return(Kd) } rmaxdefault <- rmax %orifnull% rmax.rule("K", W, lambda) if(is.infinite(rmaxdefault)) rmaxdefault <- diameter(W) breaks <- handle.r.b.args(r, breaks, W, rmaxdefault=rmaxdefault) r <- breaks$r rmax <- breaks$max # choose correction(s) correction.given <- !missing(correction) && !is.null(correction) if(is.null(correction)) correction <- c("border", "isotropic", "Ripley", "translate") correction <- pickoption("correction", correction, c(none="none", border="border", "bord.modif"="bord.modif", isotropic="isotropic", Ripley="isotropic", trans="translate", translate="translate", translation="translate", rigid="rigid", good="good", best="best"), multi=TRUE) # best.wanted <- ("best" %in% correction) # replace 'good' by the optimal choice for this size of dataset if("good" %in% correction) correction[correction == "good"] <- good.correction.K(X) # retain only corrections that are implemented for the window correction <- implemented.for.K(correction, W$type, correction.given) # recommended range of r values alim <- c(0, min(rmax, rmaxdefault)) ########################################### # Efficient code for border correction and no correction # Usable only if r values are evenly spaced from 0 to rmax # Invoked automatically if number of points is large can.do.fast <- breaks$even large.n <- (npts >= nlarge) # demand.best <- correction.given && best.wanted large.n.trigger <- large.n && !correction.given fastcorrections <- c("border", "bord.modif", "none") fastdefault <- "border" correction.fast <- all(correction %in% fastcorrections) will.do.fast <- can.do.fast && (correction.fast || large.n.trigger) asked <- correction.fast || (nlarge.given && large.n.trigger) if(asked && !can.do.fast) warning("r values not evenly spaced - cannot use efficient code") if(will.do.fast) { # determine correction(s) ok <- correction %in% fastcorrections correction <- if(any(ok)) correction[ok] else fastdefault bord <- any(correction %in% c("border", "bord.modif")) none <- any(correction =="none") if(!all(ok)) { # some corrections were overridden; notify user corx <- c(if(bord) "border correction estimate" else NULL, if(none) "uncorrected estimate" else NULL) corx <- paste(corx, collapse=" and ") message(paste("number of data points exceeds", nlarge, "- computing", corx , "only")) } # restrict r values to recommended range, unless specifically requested if(!rfixed) r <- seq(from=0, to=alim[2], length.out=length(r)) if(bord) Kb <- Kborder.engine(X, max(r), length(r), correction, ratio=ratio) if(none) Kn <- Knone.engine(X, max(r), length(r), ratio=ratio) if(bord && none) { Kn <- Kn[ , names(Kn) != "theo"] yn <- fvnames(Kb, ".y") Kbn <- if(!ratio) bind.fv(Kb, Kn, preferred=yn) else bind.ratfv(Kb, Kn, preferred=yn) return(Kbn) } if(bord) return(Kb) if(none) return(Kn) } do.fast.rectangle <- can.do.fast && is.rectangle(W) && spatstat.options("use.Krect") && !any(correction == "rigid") if(do.fast.rectangle) { ########################################### ## Fast code for rectangular window ########################################### K <- Krect.engine(X, rmax, length(r), correction, ratio=ratio) attr(K, "alim") <- alim } else { ########################################### ## Slower code ########################################### ## this will be the output data frame Kdf <- data.frame(r=r, theo = pi * r^2) desc <- c("distance argument r", "theoretical Poisson %s") denom <- lambda2 * areaW K <- ratfv(Kdf, NULL, denom, "r", quote(K(r)), "theo", NULL, alim, c("r","%s[pois](r)"), desc, fname="K", ratio=ratio) ## identify all close pairs rmax <- max(r) what <- if(any(correction %in% c("translate", "isotropic"))) "all" else "ijd" close <- closepairs(X, rmax, what=what) DIJ <- close$d ## precompute set covariance of window gW <- NULL if(any(correction %in% c("translate", "rigid", "isotropic"))) gW <- setcov(W) if(any(correction == "none")) { ## uncorrected! For demonstration purposes only! wh <- whist(DIJ, breaks$val) # no weights numKun <- cumsum(wh) denKun <- lambda2 * areaW ## uncorrected estimate of K K <- bind.ratfv(K, data.frame(un=numKun), denKun, "hat(%s)[un](r)", "uncorrected estimate of %s", "un", ratio=ratio) } if(any(correction == "border" | correction == "bord.modif")) { ## border method ## Compute distances to boundary b <- bdist.points(X) I <- close$i bI <- b[I] ## apply reduced sample algorithm RS <- Kount(DIJ, bI, b, breaks) if(any(correction == "bord.modif")) { ## modified border correction denom.area <- eroded.areas(W, r) numKbm <- RS$numerator denKbm <- lambda2 * denom.area K <- bind.ratfv(K, data.frame(bord.modif=numKbm), data.frame(bord.modif=denKbm), "hat(%s)[bordm](r)", "modified border-corrected estimate of %s", "bord.modif", ratio=ratio) } if(any(correction == "border")) { numKb <- RS$numerator denKb <- lambda * RS$denom.count K <- bind.ratfv(K, data.frame(border=numKb), data.frame(border=denKb), "hat(%s)[bord](r)", "border-corrected estimate of %s", "border", ratio=ratio) } } if(any(correction == "translate")) { ## Ohser-Stoyan translation correction edgewt <- edge.Trans(dx=close$dx, dy=close$dy, W=W, paired=TRUE, gW = gW, give.rmax=TRUE) wh <- whist(DIJ, breaks$val, edgewt) numKtrans <- cumsum(wh) denKtrans <- lambda2 * areaW h <- attr(edgewt, "rmax") numKtrans[r >= h] <- NA K <- bind.ratfv(K, data.frame(trans=numKtrans), denKtrans, "hat(%s)[trans](r)", "translation-corrected estimate of %s", "trans", ratio=ratio) } if(any(correction == "rigid")) { ## Ohser-Stoyan rigid motion correction CW <- rotmean(gW) edgewt <- areaW/as.function(CW)(DIJ) wh <- whist(DIJ, breaks$val, edgewt) numKrigid <- cumsum(wh) denKrigid <- lambda2 * areaW h <- rmax.Rigid(X, gW) #sic: X not W numKrigid[r >= h] <- NA K <- bind.ratfv(K, data.frame(rigid=numKrigid), denKrigid, "hat(%s)[rigid](r)", "rigid motion-corrected estimate of %s", "rigid", ratio=ratio) } if(any(correction == "isotropic")) { ## Ripley isotropic correction XI <- ppp(close$xi, close$yi, window=W, check=FALSE) edgewt <- edge.Ripley(XI, matrix(DIJ, ncol=1)) wh <- whist(DIJ, breaks$val, edgewt) numKiso <- cumsum(wh) denKiso <- lambda2 * areaW h <- boundingradius(W) numKiso[r >= h] <- NA K <- bind.ratfv(K, data.frame(iso=numKiso), denKiso, "hat(%s)[iso](r)", "Ripley isotropic correction estimate of %s", "iso", ratio=ratio) } } ############################# ## VARIANCE APPROXIMATION ############################# if(var.approx) { ## Compute variance approximations A <- areaW P <- perimeter(W) n <- npts ## Ripley asymptotic approximation rip <- 2 * ((A/(n-1))^2) * (pi * r^2/A + 0.96 * P * r^3/A^2 + 0.13 * (n/A) * P * r^5/A^2) if(!ratio) { K <- bind.fv(K, data.frame(rip=rip), "vR(r)", "Ripley approximation to var(%s) under CSR", "iso") } else { den <- (n-1)^2 ripnum <- den * rip ripden <- rep.int(den, length(rip)) K <- bind.ratfv(K, data.frame(rip=ripnum), data.frame(rip=ripden), "vR(r)", "Ripley approximation to var(%s) under CSR", "iso") } if(W$type == "rectangle") { # Lotwick-Silverman a1r <- (0.21 * P * r^3 + 1.3 * r^4)/A^2 a2r <- (0.24 * P * r^5 + 2.62 * r^6)/A^3 # contains correction to typo on p52 of Diggle 2003 # cf Lotwick & Silverman 1982 eq (5) br <- (pi * r^2/A) * (1 - pi * r^2/A) + (1.0716 * P * r^3 + 2.2375 * r^4)/A^2 ls <- (A^2) * (2 * br - a1r + (n-2) * a2r)/(n*(n-1)) # add column if(!ratio) { K <- bind.fv(K, data.frame(ls=ls), "vLS(r)", "Lotwick-Silverman approx to var(%s) under CSR", "iso") } else { den <- n*(n-1) lsnum <- ls * den lsden <- rep.int(den, length(ls)) K <- bind.ratfv(K, data.frame(ls=lsnum), data.frame(ls=lsden), "vLS(r)", "Lotwick-Silverman approx to var(%s) under CSR", "iso") } } } ### FINISH OFF ##### ## default plot will display all edge corrections formula(K) <- . ~ r nama <- rev(colnames(K)) fvnames(K, ".") <- setdiff(nama, c("r", "rip", "ls")) ## unitname(K) <- unitname(X) # copy to other components if(ratio) K <- conform.ratfv(K) return(K) } ################################################################ ############# SUPPORTING ALGORITHMS ########################### ################################################################ Kount <- function(dIJ, bI, b, breaks) { # # "internal" routine to compute border-correction estimate of K or Kij # # dIJ: vector containing pairwise distances for selected I,J pairs # bI: corresponding vector of boundary distances for I # b: vector of ALL distances to window boundary # # breaks : breakpts object # stopifnot(length(dIJ) == length(bI)) # determine which distances d_{ij} were observed without censoring uncen <- (dIJ <= bI) # histogram of noncensored distances nco <- whist(dIJ[uncen], breaks$val) # histogram of censoring times for noncensored distances ncc <- whist(bI[uncen], breaks$val) # histogram of censoring times (yes, this is a different total size) cen <- whist(b, breaks$val) # count censoring times beyond rightmost breakpoint uppercen <- sum(b > max(breaks$val)) # go RS <- reduced.sample(nco, cen, ncc, show=TRUE, uppercen=uppercen) # extract results numerator <- RS$numerator denom.count <- RS$denominator # check if(length(numerator) != breaks$ncells) stop("internal error: length(numerator) != breaks$ncells") if(length(denom.count) != breaks$ncells) stop("internal error: length(denom.count) != breaks$ncells") return(list(numerator=numerator, denom.count=denom.count)) } #### interface to C code for border method Kborder.engine <- function(X, rmax, nr=100, correction=c("border", "bord.modif"), weights=NULL, ratio=FALSE) { verifyclass(X, "ppp") npts <- npoints(X) W <- as.owin(X) areaW <- area(W) lambda <- npts/areaW lambda2 <- (npts * (npts - 1))/(areaW^2) if(missing(rmax)) rmax <- diameter(W)/4 r <- seq(from=0, to=rmax, length.out=nr) # this will be the output data frame Kdf <- data.frame(r=r, theo= pi * r^2) desc <- c("distance argument r", "theoretical Poisson %s") Kfv <- fv(Kdf, "r", quote(K(r)), "theo", , c(0,rmax), c("r","%s[pois](r)"), desc, fname="K") if(ratio) { # save numerator and denominator denom <- lambda2 * areaW numK <- eval.fv(denom * Kfv) denK <- eval.fv(denom + Kfv * 0) attributes(numK) <- attributes(denK) <- attributes(Kfv) numK <- rebadge.fv(numK, tags="theo", new.desc="numerator for theoretical Poisson %s") denK <- rebadge.fv(denK, tags="theo", new.desc="denominator for theoretical Poisson %s") } ####### start computing ############ # sort in ascending order of x coordinate orderX <- fave.order(X$x) Xsort <- X[orderX] x <- Xsort$x y <- Xsort$y # boundary distances b <- bdist.points(Xsort) # call the C code if(is.null(weights)) { # determine whether the numerator can be stored as an integer bigint <- .Machine$integer.max if(npts < sqrt(bigint)) { # yes - use faster integer arithmetic res <- .C("KborderI", nxy=as.integer(npts), x=as.double(x), y=as.double(y), b=as.double(b), nr=as.integer(nr), rmax=as.double(rmax), numer=as.integer(integer(nr)), denom=as.integer(integer(nr)), PACKAGE = "spatstat") } else { # no - need double precision storage res <- .C("KborderD", nxy=as.integer(npts), x=as.double(x), y=as.double(y), b=as.double(b), nr=as.integer(nr), rmax=as.double(rmax), numer=as.double(numeric(nr)), denom=as.double(numeric(nr)), PACKAGE = "spatstat") } if("bord.modif" %in% correction) { denom.area <- eroded.areas(W, r) numKbm <- res$numer denKbm <- lambda2 * denom.area bm <- numKbm/denKbm Kfv <- bind.fv(Kfv, data.frame(bord.modif=bm), "hat(%s)[bordm](r)", "modified border-corrected estimate of %s", "bord.modif") if(ratio) { # save numerator and denominator numK <- bind.fv(numK, data.frame(bord.modif=numKbm), "hat(%s)[bordm](r)", "numerator of modified border-corrected estimate of %s", "bord.modif") denK <- bind.fv(denK, data.frame(bord.modif=denKbm), "hat(%s)[bordm](r)", "denominator of modified border-corrected estimate of %s", "bord.modif") } } if("border" %in% correction) { numKb <- res$numer denKb <- lambda * res$denom bord <- numKb/denKb Kfv <- bind.fv(Kfv, data.frame(border=bord), "hat(%s)[bord](r)", "border-corrected estimate of %s", "border") if(ratio) { numK <- bind.fv(numK, data.frame(border=numKb), "hat(%s)[bord](r)", "numerator of border-corrected estimate of %s", "border") denK <- bind.fv(denK, data.frame(border=denKb), "hat(%s)[bord](r)", "denominator of border-corrected estimate of %s", "border") } } } else { # weighted version if(is.numeric(weights)) { if(length(weights) != X$n) stop("length of weights argument does not match number of points in X") } else { wim <- as.im(weights, W) weights <- wim[X, drop=FALSE] if(anyNA(weights)) stop("domain of weights image does not contain all points of X") } weights.Xsort <- weights[orderX] res <- .C("Kwborder", nxy=as.integer(npts), x=as.double(x), y=as.double(y), w=as.double(weights.Xsort), b=as.double(b), nr=as.integer(nr), rmax=as.double(rmax), numer=as.double(numeric(nr)), denom=as.double(numeric(nr)), PACKAGE = "spatstat") if("border" %in% correction) { bord <- res$numer/res$denom Kfv <- bind.fv(Kfv, data.frame(border=bord), "hat(%s)[bord](r)", "border-corrected estimate of %s", "border") if(ratio) { numK <- bind.fv(numK, data.frame(border=res$numer), "hat(%s)[bord](r)", "numerator of border-corrected estimate of %s", "border") denK <- bind.fv(denK, data.frame(border=res$denom), "hat(%s)[bord](r)", "denominator of border-corrected estimate of %s", "border") } } if("bord.modif" %in% correction) { numKbm <- res$numer denKbm <- eroded.areas(W, r) bm <- numKbm/denKbm Kfv <- bind.fv(Kfv, data.frame(bord.modif=bm), "hat(%s)[bordm](r)", "modified border-corrected estimate of %s", "bord.modif") if(ratio) { # save numerator and denominator numK <- bind.fv(numK, data.frame(bord.modif=numKbm), "hat(%s)[bordm](r)", "numerator of modified border-corrected estimate of %s", "bord.modif") denK <- bind.fv(denK, data.frame(bord.modif=denKbm), "hat(%s)[bordm](r)", "denominator of modified border-corrected estimate of %s", "bord.modif") } } } ## # default is to display them all formula(Kfv) <- . ~ r unitname(Kfv) <- unitname(X) if(ratio) { # finish off numerator and denominator formula(numK) <- formula(denK) <- . ~ r unitname(denK) <- unitname(numK) <- unitname(X) # tack on to result Kfv <- rat(Kfv, numK, denK, check=FALSE) } return(Kfv) } Knone.engine <- function(X, rmax, nr=100, weights=NULL, ratio=FALSE) { verifyclass(X, "ppp") npts <- npoints(X) W <- as.owin(X) areaW <- area(W) # lambda <- npts/areaW lambda2 <- (npts * (npts - 1))/(areaW^2) denom <- lambda2 * areaW if(missing(rmax)) rmax <- diameter(W)/4 r <- seq(from=0, to=rmax, length.out=nr) # this will be the output data frame Kdf <- data.frame(r=r, theo= pi * r^2) desc <- c("distance argument r", "theoretical Poisson %s") Kfv <- fv(Kdf, "r", quote(K(r)), "theo", , c(0,rmax), c("r","%s[pois](r)"), desc, fname="K") if(ratio) { # save numerator and denominator numK <- eval.fv(denom * Kfv) denK <- eval.fv(denom + Kfv * 0) attributes(numK) <- attributes(denK) <- attributes(Kfv) numK <- rebadge.fv(numK, tags="theo", new.desc="numerator for theoretical Poisson %s") denK <- rebadge.fv(denK, tags="theo", new.desc="denominator for theoretical Poisson %s") } ####### start computing ############ # sort in ascending order of x coordinate orderX <- fave.order(X$x) Xsort <- X[orderX] x <- Xsort$x y <- Xsort$y # call the C code if(is.null(weights)) { # determine whether the numerator can be stored as an integer bigint <- .Machine$integer.max if(npts < sqrt(bigint)) { # yes - use faster integer arithmetic res <- .C("KnoneI", nxy=as.integer(npts), x=as.double(x), y=as.double(y), nr=as.integer(nr), rmax=as.double(rmax), numer=as.integer(integer(nr)), PACKAGE = "spatstat") } else { # no - need double precision storage res <- .C("KnoneD", nxy=as.integer(npts), x=as.double(x), y=as.double(y), nr=as.integer(nr), rmax=as.double(rmax), numer=as.double(numeric(nr)), PACKAGE = "spatstat") } numKun <- res$numer denKun <- denom # = lambda2 * areaW Kun <- numKun/denKun } else { # weighted version if(is.numeric(weights)) { if(length(weights) != X$n) stop("length of weights argument does not match number of points in X") } else { wim <- as.im(weights, W) weights <- wim[X, drop=FALSE] if(anyNA(weights)) stop("domain of weights image does not contain all points of X") } weights.Xsort <- weights[orderX] res <- .C("Kwnone", nxy=as.integer(npts), x=as.double(x), y=as.double(y), w=as.double(weights.Xsort), nr=as.integer(nr), rmax=as.double(rmax), numer=as.double(numeric(nr)), PACKAGE = "spatstat") numKun <- res$numer denKun <- sum(weights) Kun <- numKun/denKun } # tack on to fv object Kfv <- bind.fv(Kfv, data.frame(un=Kun), "hat(%s)[un](r)", "uncorrected estimate of %s", "un") if(ratio) { numK <- bind.fv(numK, data.frame(un=numKun), "hat(%s)[un](r)", "numerator of uncorrected estimate of %s", "un") denK <- bind.fv(denK, data.frame(un=denKun), "hat(%s)[un](r)", "denominator of uncorrected estimate of %s", "un") } ## # default is to display them all formula(Kfv) <- . ~ r unitname(Kfv) <- unitname(X) if(ratio) { # finish off numerator and denominator formula(numK) <- formula(denK) <- . ~ r unitname(denK) <- unitname(numK) <- unitname(X) # tack on to result Kfv <- rat(Kfv, numK, denK, check=FALSE) } return(Kfv) } rmax.rule <- function(fun="K", W, lambda) { verifyclass(W, "owin") switch(fun, K = { # Ripley's Rule ripley <- min(diff(W$xrange), diff(W$yrange))/4 # Count at most 1000 neighbours per point rlarge <- if(!missing(lambda)) sqrt(1000 /(pi * lambda)) else Inf rmax <- min(rlarge, ripley) }, Kscaled = { ## rule of thumb for Kscaled rdiam <- diameter(as.rectangle(W))/2 * sqrt(lambda) rmax <- min(10, rdiam) }, F = , G = , J = { # rule of thumb rdiam <- diameter(as.rectangle(W))/2 # Poisson process has F(rlarge) = 1 - 10^(-5) rlarge <- if(!missing(lambda)) sqrt(log(1e5)/(pi * lambda)) else Inf rmax <- min(rlarge, rdiam) }, stop(paste("Unrecognised function type", sQuote(fun))) ) return(rmax) } implemented.for.K <- function(correction, windowtype, explicit) { pixels <- (windowtype == "mask") if(any(correction == "best")) { # select best available correction correction[correction == "best"] <- if(!pixels) "isotropic" else "translate" } else { # available selection of edge corrections depends on window if(pixels) { iso <- (correction == "isotropic") if(any(iso)) { whinge <- "Isotropic correction not implemented for binary masks" if(explicit) { if(all(iso)) stop(whinge) else warning(whinge) } correction <- correction[!iso] } } } return(correction) } good.correction.K <- function(X) { nX <- npoints(X) W <- as.owin(X) avail <- c("none", if(nX < 1e5) "border" else NULL, if(nX < 3000)"translate" else NULL, if(nX < 1000 && !is.mask(W)) "isotropic" else NULL) chosen <- rev(avail)[1] return(chosen) } Krect.engine <- function(X, rmax, nr=100, correction, weights=NULL, ratio=FALSE, fname="K") { verifyclass(X, "ppp") npts <- npoints(X) W <- as.owin(X) areaW <- area(W) width <- sidelengths(W)[1] height <- sidelengths(W)[2] lambda <- npts/areaW lambda2 <- (npts * (npts - 1))/(areaW^2) if(missing(rmax)) rmax <- diameter(W)/4 r <- seq(from=0, to=rmax, length.out=nr) if(weighted <- !is.null(weights)) { ## coerce weights to a vector if(is.numeric(weights)) { check.nvector(weights, npts) } else { wim <- as.im(weights, W) weights <- wim[X, drop=FALSE] if(anyNA(weights)) stop("domain of weights image does not contain all points of X") } } # this will be the output data frame Kdf <- data.frame(r=r, theo= pi * r^2) desc <- c("distance argument r", "theoretical Poisson %s") denom <- if(weighted) areaW else (lambda2 * areaW) Kfv <- ratfv(Kdf, NULL, denom, "r", quote(K(r)), "theo", NULL, c(0,rmax), c("r", makefvlabel(NULL, NULL, fname, "pois")), desc, fname=fname, ratio=ratio) ####### prepare data ############ if(!all(correction == "translate")) { ## Ensure rectangle has its bottom left corner at the origin if(W$xrange[1] != 0 || W$yrange[1] != 0) { X <- shift(X, origin="bottomleft") W <- as.owin(X) } } ## sort in ascending order of x coordinate orderX <- fave.order(X$x) x <- X$x[orderX] y <- X$y[orderX] if(weighted) wt <- weights[orderX] ## establish algorithm parameters doIso <- "isotropic" %in% correction doTrans <- "translate" %in% correction doBord <- any(c("border", "bord.modif") %in% correction) doUnco <- "none" %in% correction trimedge <- spatstat.options("maxedgewt") ## allocate space for results ziso <- numeric(if(doIso) nr else 1L) ztrans <- numeric(if(doTrans) nr else 1L) ## call the C code if(weighted) { ## weighted version zbnumer <- numeric(if(doBord) nr else 1L) zbdenom <- numeric(if(doBord) nr else 1L) zunco <- numeric(if(doUnco) nr else 1L) res <- .C("KrectWtd", width=as.double(width), height=as.double(height), nxy=as.integer(npts), x=as.double(x), y=as.double(y), w=as.double(wt), nr=as.integer(nr), rmax=as.double(rmax), trimedge=as.double(trimedge), doIso=as.integer(doIso), doTrans=as.integer(doTrans), doBord=as.integer(doBord), doUnco=as.integer(doUnco), iso=as.double(ziso), trans=as.double(ztrans), bnumer=as.double(zbnumer), bdenom=as.double(zbdenom), unco=as.double(zunco), PACKAGE = "spatstat") } else if(npts < sqrt(.Machine$integer.max)) { ## unweighted ## numerator of border correction can be stored as an integer ## use faster integer arithmetic zbnumer <- integer(if(doBord) nr else 1L) zbdenom <- integer(if(doBord) nr else 1L) zunco <- integer(if(doUnco) nr else 1L) res <- .C("KrectInt", width=as.double(width), height=as.double(height), nxy=as.integer(npts), x=as.double(x), y=as.double(y), nr=as.integer(nr), rmax=as.double(rmax), trimedge=as.double(trimedge), doIso=as.integer(doIso), doTrans=as.integer(doTrans), doBord=as.integer(doBord), doUnco=as.integer(doUnco), iso=as.double(ziso), trans=as.double(ztrans), bnumer=as.integer(zbnumer), bdenom=as.integer(zbdenom), unco=as.integer(zunco), PACKAGE = "spatstat") } else { ## unweighted ## need double precision storage zbnumer <- numeric(if(doBord) nr else 1L) zbdenom <- numeric(if(doBord) nr else 1L) zunco <- numeric(if(doUnco) nr else 1L) res <- .C("KrectDbl", width=as.double(width), height=as.double(height), nxy=as.integer(npts), x=as.double(x), y=as.double(y), nr=as.integer(nr), rmax=as.double(rmax), trimedge=as.double(trimedge), doIso=as.integer(doIso), doTrans=as.integer(doTrans), doBord=as.integer(doBord), doUnco=as.integer(doUnco), iso=as.double(ziso), trans=as.double(ztrans), bnumer=as.double(zbnumer), bdenom=as.double(zbdenom), unco=as.double(zunco), PACKAGE = "spatstat") } ## Process corrections in reverse order of priority ## Uncorrected estimate if("none" %in% correction) { numKun <- res$unco denKun <- if(weighted) areaW else (lambda2 * areaW) Kfv <- bind.ratfv(Kfv, data.frame(un=numKun), denKun, makefvlabel(NULL, "hat", fname, "un"), "uncorrected estimate of %s", "un", ratio=ratio) } ## Modified border correction if("bord.modif" %in% correction) { denom.area <- eroded.areas(W, r) numKbm <- res$bnumer denKbm <- if(weighted) denom.area else (lambda2 * denom.area) Kfv <- bind.ratfv(Kfv, data.frame(bord.modif=numKbm), denKbm, makefvlabel(NULL, "hat", fname, "bordm"), "modified border-corrected estimate of %s", "bord.modif", ratio=ratio) } ## Border correction if("border" %in% correction) { numKb <- res$bnumer denKb <- if(weighted) res$bdenom else lambda * res$bdenom Kfv <- bind.ratfv(Kfv, data.frame(border=numKb), denKb, makefvlabel(NULL, "hat", fname, "bord"), "border-corrected estimate of %s", "border", ratio=ratio) } ## translation correction if("translate" %in% correction) { numKtrans <- res$trans denKtrans <- if(weighted) areaW else (lambda2 * areaW) h <- diameter(as.rectangle(W))/2 numKtrans[r >= h] <- NA Kfv <- bind.ratfv(Kfv, data.frame(trans=numKtrans), denKtrans, makefvlabel(NULL, "hat", fname, "trans"), "translation-corrected estimate of %s", "trans", ratio=ratio) } ## isotropic correction if("isotropic" %in% correction) { numKiso <- res$iso denKiso <- if(weighted) areaW else (lambda2 * areaW) h <- diameter(as.rectangle(W))/2 numKiso[r >= h] <- NA Kfv <- bind.ratfv(Kfv, data.frame(iso=numKiso), denKiso, makefvlabel(NULL, "hat", fname, "iso"), "isotropic-corrected estimate of %s", "iso", ratio=ratio) } ## # default is to display them all formula(Kfv) <- . ~ r unitname(Kfv) <- unitname(X) if(ratio) Kfv <- conform.ratfv(Kfv) return(Kfv) } spatstat/R/news.R0000755000176200001440000000062313115271120013412 0ustar liggesusers# # news.R # # News and warnings # latest.news <- function(package="spatstat", doBrowse=FALSE) { # get version number v <- read.dcf(file=system.file("DESCRIPTION", package=package), fields="Version") ne <- eval(substitute(news(Version >= v0, package=package), list(v0=v))) page(ne, method="print", doBrowse=doBrowse) return(invisible(ne)) } class(latest.news) <- "autoexec" spatstat/R/distan3D.R0000755000176200001440000002060113115271075014116 0ustar liggesusers# # distan3D.R # # $Revision: 1.13 $ $Date: 2017/06/05 10:31:58 $ # # Interpoint distances for 3D points # # Methods for pairdist, nndist, nnwhich, crossdist # pairdist.pp3 <- function(X, ..., periodic=FALSE, squared=FALSE) { verifyclass(X, "pp3") # extract point coordinates xyz <- coords(X) n <- nrow(xyz) x <- xyz$x y <- xyz$y z <- xyz$z # # special cases if(n == 0) return(matrix(numeric(0), nrow=0, ncol=0)) else if(n == 1L) return(matrix(0,nrow=1L,ncol=1L)) # if(!periodic) { Cout <- .C("D3pairdist", n = as.integer(n), x = as.double(x), y = as.double(y), z = as.double(z), squared = as.integer(squared), d = as.double(numeric(n*n)), PACKAGE = "spatstat") } else { b <- as.box3(X) wide <- diff(b$xrange) high <- diff(b$yrange) deep <- diff(b$zrange) Cout <- .C("D3pairPdist", n = as.integer(n), x = as.double(x), y = as.double(y), z = as.double(z), xwidth=as.double(wide), yheight=as.double(high), zdepth=as.double(deep), squared = as.integer(squared), d= as.double(numeric(n*n)), PACKAGE = "spatstat") } dout <- matrix(Cout$d, nrow=n, ncol=n) return(dout) } nndist.pp3 <- function(X, ..., k=1) { verifyclass(X, "pp3") if((narg <- length(list(...))) > 0) warning(paste(narg, "unrecognised", ngettext(narg, "argument was", "arguments were"), "ignored")) # extract point coordinates xyz <- coords(X) n <- nrow(xyz) x <- xyz$x y <- xyz$y z <- xyz$z # k can be a single integer or an integer vector if(length(k) == 0) stop("k is an empty vector") else if(length(k) == 1L) { if(k != round(k) || k <= 0) stop("k is not a positive integer") } else { if(any(k != round(k)) || any(k <= 0)) stop(paste("some entries of the vector", sQuote("k"), "are not positive integers")) } k <- as.integer(k) kmax <- max(k) # trivial cases if(n <= 1L) { # empty pattern => return numeric(0) # or pattern with only 1 point => return Inf nnd <- matrix(Inf, nrow=n, ncol=kmax) nnd <- nnd[,k, drop=TRUE] return(nnd) } # number of neighbours that are well-defined kmaxcalc <- min(n-1L, kmax) # calculate k-nn distances for k <= kmaxcalc if(kmaxcalc == 1L) { # calculate nearest neighbour distance only nnd<-numeric(n) o <- fave.order(z) big <- sqrt(.Machine$double.xmax) Cout <- .C("nnd3D", n= as.integer(n), x= as.double(x[o]), y= as.double(y[o]), z= as.double(z[o]), nnd= as.double(nnd), nnwhich = as.integer(integer(1L)), huge=as.double(big), PACKAGE = "spatstat") nnd[o] <- Cout$nnd } else { # case kmaxcalc > 1 nnd<-numeric(n * kmaxcalc) o <- fave.order(z) big <- sqrt(.Machine$double.xmax) Cout <- .C("knnd3D", n = as.integer(n), kmax = as.integer(kmaxcalc), x = as.double(x[o]), y = as.double(y[o]), z = as.double(z[o]), nnd = as.double(nnd), nnwhich = as.integer(integer(1L)), huge = as.double(big), PACKAGE = "spatstat") nnd <- matrix(nnd, nrow=n, ncol=kmaxcalc) nnd[o, ] <- matrix(Cout$nnd, nrow=n, ncol=kmaxcalc, byrow=TRUE) } # post-processing if(kmax > kmaxcalc) { # add columns of Inf's infs <- matrix(as.numeric(Inf), nrow=n, ncol=kmax-kmaxcalc) nnd <- cbind(nnd, infs) } if(length(k) < kmax) { # select only the specified columns nnd <- nnd[, k, drop=TRUE] } return(nnd) } nnwhich.pp3 <- function(X, ..., k=1) { verifyclass(X, "pp3") if((narg <- length(list(...))) > 0) warning(paste(narg, "unrecognised", ngettext(narg, "argument was", "arguments were"), "ignored")) # k can be a single integer or an integer vector if(length(k) == 0) stop("k is an empty vector") else if(length(k) == 1L) { if(k != round(k) || k <= 0) stop("k is not a positive integer") } else { if(any(k != round(k)) || any(k <= 0)) stop(paste("some entries of the vector", sQuote("k"), "are not positive integers")) } k <- as.integer(k) kmax <- max(k) # extract point coordinates xyz <- coords(X) n <- nrow(xyz) x <- xyz$x y <- xyz$y z <- xyz$z # special cases if(n <= 1L) { # empty pattern => return integer(0) # or pattern with only 1 point => return NA nnw <- matrix(as.integer(NA), nrow=n, ncol=kmax) nnw <- nnw[,k, drop=TRUE] return(nnw) } # number of neighbours that are well-defined kmaxcalc <- min(n-1L, kmax) # identify k-nn for k <= kmaxcalc if(kmaxcalc == 1L) { # identify nearest neighbour only nnw <- integer(n) o <- fave.order(z) big <- sqrt(.Machine$double.xmax) Cout <- .C("nnw3D", n = as.integer(n), x = as.double(x[o]), y = as.double(y[o]), z = as.double(z[o]), nnd = as.double(numeric(1L)), nnwhich = as.integer(nnw), huge = as.double(big), PACKAGE = "spatstat") # [sic] Conversion from C to R indexing is done in C code. witch <- Cout$nnwhich if(any(witch <= 0)) stop("Internal error: illegal index returned from C code") if(any(witch > n)) stop("Internal error: index returned from C code exceeds n") nnw[o] <- o[witch] } else { # case kmaxcalc > 1 nnw <- matrix(integer(n * kmaxcalc), nrow=n, ncol=kmaxcalc) o <- fave.order(z) big <- sqrt(.Machine$double.xmax) Cout <- .C("knnw3D", n = as.integer(n), kmax = as.integer(kmaxcalc), x = as.double(x[o]), y = as.double(y[o]), z = as.double(z[o]), nnd = as.double(numeric(1L)), nnwhich = as.integer(nnw), huge = as.double(big), PACKAGE = "spatstat") # [sic] Conversion from C to R indexing is done in C code. witch <- Cout$nnwhich witch <- matrix(witch, nrow=n, ncol=kmaxcalc, byrow=TRUE) if(any(witch <= 0)) stop("Internal error: illegal index returned from C code") if(any(witch > n)) stop("Internal error: index returned from C code exceeds n") # convert back to original ordering nnw[o,] <- matrix(o[witch], nrow=n, ncol=kmaxcalc) } # post-processing if(kmax > kmaxcalc) { # add columns of NA's nas <- matrix(as.integer(NA), nrow=n, ncol=kmax-kmaxcalc) nnw <- cbind(nnw, nas) } if(length(k) < kmax) { # select only the specified columns nnw <- nnw[, k, drop=TRUE] } return(nnw) } crossdist.pp3 <- function(X, Y, ..., periodic=FALSE, squared=FALSE) { verifyclass(X, "pp3") verifyclass(Y, "pp3") cX <- coords(X) cY <- coords(Y) nX <- nrow(cX) nY <- nrow(cY) if(nX == 0 || nY == 0) return(matrix(numeric(0), nrow=nX, ncol=nY)) if(!periodic) { Cout <- .C("D3crossdist", nfrom = as.integer(nX), xfrom = as.double(cX$x), yfrom = as.double(cX$y), zfrom = as.double(cX$z), nto = as.integer(nY), xto = as.double(cY$x), yto = as.double(cY$y), zto = as.double(cY$z), squared = as.integer(squared), d = as.double(matrix(0, nrow=nX, ncol=nY)), PACKAGE = "spatstat") } else { b <- as.box3(X) wide <- diff(b$xrange) high <- diff(b$yrange) deep <- diff(b$zrange) Cout <- .C("D3crossPdist", nfrom = as.integer(nX), xfrom = as.double(cX$x), yfrom = as.double(cX$y), zfrom = as.double(cX$z), nto = as.integer(nY), xto = as.double(cY$x), yto = as.double(cY$y), zto = as.double(cY$z), xwidth = as.double(wide), yheight = as.double(high), zheight = as.double(deep), squared = as.integer(squared), d = as.double(matrix(0, nrow=nX, ncol=nY)), PACKAGE = "spatstat") } return(matrix(Cout$d, nrow=nX, ncol=nY)) } spatstat/R/fardist.R0000644000176200001440000000330213115271075014075 0ustar liggesusers## ## fardist.R ## ## Farthest distance to boundary ## ## $Revision: 1.11 $ $Date: 2017/06/05 10:31:58 $ fardist <- function(X, ...) { UseMethod("fardist") } fardist.owin <- function(X, ..., squared=FALSE) { verifyclass(X, "owin") M <- as.mask(X, ...) V <- if(is.mask(X)) vertices(M) else vertices(X) nx <- dim(M)[2L] ny <- dim(M)[1L] x0 <- M$xcol[1L] y0 <- M$yrow[1L] xstep <- M$xstep ystep <- M$ystep if(squared) { z <- .C("fardist2grid", nx = as.integer(nx), x0 = as.double(x0), xstep = as.double(xstep), ny = as.integer(ny), y0 = as.double(y0), ystep = as.double(ystep), np = as.integer(length(V$x)), xp = as.double(V$x), yp = as.double(V$y), dfar = as.double(numeric(nx * ny)), PACKAGE = "spatstat") } else { z <- .C("fardistgrid", nx = as.integer(nx), x0 = as.double(x0), xstep = as.double(xstep), ny = as.integer(ny), y0 = as.double(y0), ystep = as.double(ystep), np = as.integer(length(V$x)), xp = as.double(V$x), yp = as.double(V$y), dfar = as.double(numeric(nx * ny)), PACKAGE = "spatstat") } out <- im(z$dfar, xcol=M$xcol, yrow=M$yrow, xrange=M$xrange, yrange=M$yrange, unitname=unitname(M)) if(!is.rectangle(X)) out <- out[X, drop=FALSE] return(out) } fardist.ppp <- function(X, ..., squared=FALSE) { verifyclass(X, "ppp") V <- vertices(Window(X)) D2 <- crossdist(X$x, X$y, V$x, V$y, squared=TRUE) D2max <- apply(D2, 1L, max) if(squared) return(D2max) else return(sqrt(D2max)) } spatstat/R/bugtable.R0000644000176200001440000000533713115225157014240 0ustar liggesusers#' #' bugtable.R #' #' $Revision: 1.3 $ $Date: 2017/01/07 04:20:31 $ bugfixes <- function(sinceversion=NULL, sincedate=NULL, package="spatstat", show=TRUE) { if(!is.null(sincedate) && package != "spatstat") { #' news items after specified date ne <- news(package=package) if(is.null(ne) || is.null(ne$Date) || anyNA(ne$Date)) stop(paste(if(is.null(ne)) "News" else "Date", "information is not available for package", sQuote(package)), call.=FALSE) a <- eval(substitute(news(Date >= SD & grepl("^BUG", Category), package=package), list(SD=sincedate))) } else { #' determine a corresponding version number if(is.null(sinceversion) && is.null(sincedate)) { #' default is latest version dfile <- system.file("DESCRIPTION", package=package) sinceversion <- read.dcf(file=dfile, fields="Version") } else if(!is.null(sincedate) && package == "spatstat") { #' read spatstat release history table fname <- system.file("doc", "packagesizes.txt", package="spatstat") p <- read.table(fname, header=TRUE, stringsAsFactors=FALSE) #' find earliest package version on or after the given date imin <- with(p, min(which(as.Date(date) >= sincedate))) sinceversion <- p[imin, "version"] } a <- eval(substitute(news(Version >= sv & grepl("^BUG", Category), package=package), list(sv=sinceversion))) } if(!is.data.frame(a)) return(NULL) #' split each entry into lines alines <- strsplit(a$Text, "\n") #' extract first line f <- unname(sapply(alines, "[", i=1L)) #' extract body b <- unname(lapply(alines, "[", i=-1L)) b <- unname(sapply(b, paste, collapse="\n")) #' extract header from first line h <- unname(sapply(strsplit(f, ":"), "[", i=1L)) h <- unname(sapply(strsplit(h, ","), "[", i=1L)) h <- unname(sapply(strsplit(h, " "), "[", i=1L)) #' sort by header oo <- order(h, f) #' rebuild z <- data.frame(Header=h[oo], Firstline=f[oo], Body=b[oo], Version=a$Version[oo], stringsAsFactors=FALSE) class(z) <- c("bugtable", class(z)) if(!show) return(z) page(z, method="print") return(invisible(z)) } class(bugfixes) <- "autoexec" print.bugtable <- function(x, ...) { hprev <- "" for(i in seq_len(nrow(x))) { h <- x$Header[i] f <- x$Firstline[i] if(h != hprev) { # new main header cat("\n***", h, "***\n", fill=TRUE) } cat(x$Version[i], ":", f, fill=TRUE) cat(x$Body[i], "\n", fill=TRUE) hprev <- h } return(invisible(NULL)) } spatstat/R/linnetsurgery.R0000644000176200001440000001415013151133721015351 0ustar liggesusers#' #' linnetsurgery.R #' #' Surgery on linear networks and related objects #' #' $Revision: 1.12 $ $Date: 2017/08/29 00:39:31 $ #' insertVertices <- function(L, ...) { if(!inherits(L, c("lpp", "linnet"))) stop("L should be a linear network (linnet) or point pattern (lpp)", call.=FALSE) if(haspoints <- is.lpp(L)) { X <- L L <- as.linnet(L) cooXnew <- cooXold <- coords(X) segXold <- cooXold$seg tpXold <- cooXold$tp } ## validate new vertices V <- as.lpp(..., L=L) if(!identical(as.linnet(L, sparse=TRUE), as.linnet(V, sparse=TRUE))) stop("New vertices must lie on exactly the same network as L") if(npoints(V) == 0) { attr(L, "id") <- integer(0) if(!haspoints) { return(L) } else { X$domain <- L return(X) } } ## extract new vertex coordinates co <- coords(V) seg <- co$seg tp <- co$tp ## determine which segments will be split, ## and compute new serial numbers for the un-split segments splitsegments <- sort(unique(seg)) notsplit <- rep(TRUE, nsegments(L)) notsplit[splitsegments] <- FALSE segmap <- cumsum(notsplit) nunsplit <- sum(notsplit) ## existing vertices v <- L$vertices n <- npoints(v) ## initialise nadd <- 0 vadd <- list(x=numeric(0), y=numeric(0)) fromadd <- toadd <- id <- integer(0) ## split segments containing new vertices for(theseg in splitsegments) { ## find new vertices lying on segment 'theseg' i <- L$from[theseg] j <- L$to[theseg] those <- (seg == theseg) idthose <- which(those) ## order the new vertices along this segment tt <- tp[those] oo <- order(tt) tt <- tt[oo] idadd <- idthose[oo] ## make new vertices nnew <- length(tt) xnew <- with(v, x[i] + tt * diff(x[c(i,j)])) ynew <- with(v, y[i] + tt * diff(y[c(i,j)])) vnew <- list(x=xnew, y=ynew) ## make new edges kk <- n + nadd + (1:nnew) fromnew <- c(i, kk) tonew <- c(kk, j) nnewseg <- nnew + 1 ## add new vertices and edges to running total nadd <- nadd + nnew vadd <- concatxy(vadd, vnew) fromadd <- c(fromadd, fromnew) toadd <- c(toadd, tonew) id <- c(id, idadd) ## handle data points if any if(haspoints && any(relevant <- (segXold == theseg))) { tx <- tpXold[relevant] ttt <- c(0, tt, 1) m <- findInterval(tx, ttt, rightmost.closed=TRUE, all.inside=TRUE) t0 <- ttt[m] t1 <- ttt[m+1L] tpXnew <- (tx - t0)/(t1-t0) tpXnew <- pmin(1, pmax(0, tpXnew)) n0 <- nunsplit + length(fromadd) - nnewseg segXnew <- n0 + m cooXnew$seg[relevant] <- segXnew cooXnew$tp[relevant] <- tpXnew } } newfrom <- c(L$from[-splitsegments], fromadd) newto <- c(L$to[-splitsegments], toadd) newv <- superimpose(v, vadd, check=FALSE) Lnew <- linnet(newv, edges=cbind(newfrom, newto), sparse=identical(L$sparse, TRUE)) newid <- integer(nadd) newid[id] <- n + 1:nadd attr(Lnew, "id") <- newid if(!haspoints) return(Lnew) ## adjust segment id for data points on segments that were not split Xnotsplit <- notsplit[segXold] cooXnew$seg[Xnotsplit] <- segmap[segXold[Xnotsplit]] Xnew <- lpp(cooXnew, Lnew) marks(Xnew) <- marks(X) attr(Xnew, "id") <- newid return(Xnew) } thinNetwork <- function(X, retainvertices, retainedges) { ## thin a network by retaining only the specified edges and/or vertices if(!inherits(X, c("linnet", "lpp"))) stop("X should be a linnet or lpp object", call.=FALSE) gotvert <- !missing(retainvertices) gotedge <- !missing(retainedges) if(!gotedge && !gotvert) return(X) L <- as.linnet(X) from <- L$from to <- L$to V <- L$vertices sparse <- identical(L$sparse, TRUE) edgesFALSE <- logical(nsegments(L)) verticesFALSE <- logical(npoints(V)) if(!gotedge) { retainedges <- edgesFALSE } else if(!is.logical(retainedges)) { z <- edgesFALSE z[retainedges] <- TRUE retainedges <- z } if(!gotvert) { retainvertices <- verticesFALSE } else if(!is.logical(retainvertices)) { z <- verticesFALSE z[retainvertices] <- TRUE retainvertices <- z } if(gotvert && !gotedge) { ## retain all edges between retained vertices retainedges <- retainvertices[from] & retainvertices[to] } else if(gotedge) { ## retain vertices required for the retained edges retainvertices[from[retainedges]] <- TRUE retainvertices[to[retainedges]] <- TRUE } ## assign new serial numbers to vertices, and recode Vsub <- V[retainvertices] newserial <- cumsum(retainvertices) newfrom <- newserial[from[retainedges]] newto <- newserial[to[retainedges]] ## extract relevant subset of network Lsub <- linnet(Vsub, edges=cbind(newfrom, newto), sparse=sparse) ## tack on information about subset attr(Lsub, "retainvertices") <- retainvertices attr(Lsub, "retainedges") <- retainedges ## done? if(inherits(X, "linnet")) return(Lsub) ## X is an lpp object ## Find data points that lie on accepted segments dat <- X$data ok <- retainedges[unlist(dat$seg)] dsub <- dat[ok, , drop=FALSE] ## compute new serial numbers for retained segments segmap <- cumsum(retainedges) dsub$seg <- segmap[as.integer(unlist(dsub$seg))] # make new lpp object Y <- ppx(data=dsub, domain=Lsub, coord.type=as.character(X$ctype)) class(Y) <- c("lpp", class(Y)) ## tack on information about subset attr(Y, "retainpoints") <- ok return(Y) } validate.lpp.coords <- function(X, fatal=TRUE, context="") { ## check for mangled internal data proj <- project2segment(as.ppp(X), as.psp(as.linnet(X))) seg.claimed <- coords(X)$seg seg.mapped <- proj$mapXY if(any(seg.claimed != seg.mapped)) { whinge <- paste("Incorrect segment id", context) if(fatal) stop(whinge, call.=FALSE) else warning(whinge, call.=FALSE) return(FALSE) } tp.claimed <- coords(X)$tp tp.mapped <- proj$tp v <- max(abs(tp.claimed - tp.mapped)) if(v > 0.01) { whinge <- paste("Incorrect 'tp' coordinate", paren(paste("max discrepancy", v)), context) if(fatal) stop(whinge, call.=FALSE) else warning(whinge, call.=FALSE) return(FALSE) } return(TRUE) } spatstat/R/quasirandom.R0000644000176200001440000000266313115271120014764 0ustar liggesusers## ## quasirandom.R ## ## Quasi-random sequence generators ## ## $Revision: 1.6 $ $Date: 2017/06/05 10:31:58 $ ## vdCorput <- function(n, base) { stopifnot(is.prime(base)) z <- .C("Corput", base=as.integer(base), n=as.integer(n), result=as.double(numeric(n)), PACKAGE = "spatstat") return(z$result) } Halton <- function(n, bases=c(2,3), raw=FALSE, simplify=TRUE) { d <- length(bases) if(d==2 && !raw && simplify) return(ppp(vdCorput(n, bases[1]), vdCorput(n, bases[2]), window=owin(), check=FALSE)) z <- matrix(, nrow=n, ncol=d) for(j in 1:d) z[,j] <- vdCorput(n, bases[j]) if(raw || d < 2) return(z) b <- do.call(boxx, rep(list(c(0,1)), d)) return(ppx(z, b, simplify=simplify)) } Hammersley <- function(n, bases=2, raw=FALSE, simplify=TRUE) { d <- length(bases) + 1 z <- cbind(Halton(n, bases, raw=TRUE), (1:n)/n) dimnames(z) <- NULL if(raw || d < 2) return(z) b <- do.call(boxx, rep(list(c(0,1)), d)) return(ppx(z, b, simplify=simplify)) } rQuasi <- function(n, W, type=c("Halton", "Hammersley"), ...) { R <- as.rectangle(W) type <- match.arg(type) X <- switch(type, Halton=Halton(n, ...), Hammersley=Hammersley(n, ...)) Y <- ppp(R$xrange[1] + diff(R$xrange) * X$x, R$yrange[1] + diff(R$yrange) * X$y, window=R, check=FALSE) if(!is.rectangle(W)) Y <- Y[W] return(Y) } spatstat/R/is.cadlag.R0000755000176200001440000000031713115271075014274 0ustar liggesusersis.cadlag <- function (s) { if(!is.stepfun(s)) stop("s is not a step function.\n") r <- knots(s) h <- s(r) n <- length(r) r1 <- c(r[-1L],r[n]+1) rm <- (r+r1)/2 hm <- s(rm) identical(all.equal(h,hm),TRUE) } spatstat/R/dummify.R0000644000176200001440000000153413115225157014120 0ustar liggesusers# # dummify.R # # Convert a factor to a matrix of dummy variables, etc. # # $Revision: 1.5 $ $Date: 2016/02/11 10:17:12 $ # dummify <- function(x) { if(is.matrix(x) || is.data.frame(x)) { x <- as.data.frame(x) y <- do.call(data.frame, lapply(x, dummify)) return(as.matrix(y)) } # x is 1-dimensional if(is.complex(x)) return(as.matrix(data.frame(Re=Re(x), Im=Im(x)))) # convert factors etc if(is.character(x)) x <- factor(x) if(is.logical(x)) x <- factor(x, levels=c(FALSE,TRUE)) if(is.factor(x)) { # convert to dummy variables nx <- length(x) lev <- levels(x) y <- matrix(0L, nrow=nx, ncol=length(lev)) colnames(y) <- lev y[cbind(seq_len(nx), as.integer(x))] <- 1L return(y) } # convert to numeric y <- as.numeric(x) if(!is.matrix(y)) y <- matrix(y, ncol=1) return(y) } spatstat/R/rppm.R0000644000176200001440000000722013115271120013411 0ustar liggesusers#' #' rppm.R #' #' Recursive Partitioning for Point Process Models #' #' $Revision: 1.12 $ $Date: 2017/06/05 10:31:58 $ rppm <- function(..., rpargs=list()) { ## do the equivalent of ppm(...) cl <- match.call() cl[[1]] <- as.name('ppm') if("rpargs" %in% names(cl)) cl$rpargs <- NULL cl$forcefit <- TRUE pfit <- eval(cl, envir=parent.frame()) ## if(!is.poisson(pfit)) warning("Interpoint interaction will be ignored", call.=FALSE) df <- getglmdata(pfit) gf <- getglmfit(pfit) sf <- getglmsubset(pfit) rp <- do.call(rpart, resolve.defaults(list(formula=formula(gf), data=df, subset=sf, weights=df$.mpl.W), rpargs, list(method="poisson"))) result <- list(pfit=pfit, rp=rp) class(result) <- c("rppm", class(result)) return(result) } # undocumented as.ppm.rppm <- function(object) { object$pfit } print.rppm <- function(x, ...) { splat("Point process model with recursive partitioning") splat("Data:", sQuote(x$pfit$Qname)) splat("Covariates:", commasep(sQuote(variablesinformula(formula(x$pfit))))) splat("Regression tree:") print(x$rp) invisible(NULL) } plot.rppm <- local({ argsPlotRpart <- c("x", "uniform", "branch", "compress", "margin", "minbranch") argsTextRpart <- c("splits", "label", "FUN", "all", "pretty", "digits", "use.n", "fancy", "fwidth", "fheight", "bg", "minlength") plot.rppm <- function(x, ..., what=c("tree", "spatial"), treeplot=NULL) { xname <- short.deparse(substitute(x)) what <- match.arg(what) switch(what, tree = { if(is.function(treeplot)) return(treeplot(x$rp, ...)) out <- do.call.matched(plot, list(x=x$rp, ...), funargs=argsPlotRpart, extrargs=graphicsPars("plot")) # note: plot.rpart does not pass arguments to 'lines' do.call.matched(text, list(x=x$rp, ...), funargs=argsTextRpart, extrargs=graphicsPars("text")) }, spatial = { p <- predict(x) out <- do.call("plot", resolve.defaults(list(x=p), list(...), list(main=xname))) }) return(invisible(out)) } plot.rppm }) #' prune method prune.rppm <- function(tree, ...) { tree$rp <- rpart::prune(tree$rp, ...) return(tree) } #' predict method predict.rppm <- function(object, ...) { model <- object$pfit tree <- object$rp #' assemble covariates for prediction, using rules of predict.ppm co <- predict(model, ..., type="covariates", check=FALSE, repair=FALSE) newdata <- co$newdata masque <- co$mask #' perform prediction using the tree pred <- predict(tree, newdata=co$newdata) #' pack up appropriately if(is.null(masque)) return(pred) imago <- as.im(masque, value=1.0) if(!is.marked(model)) { out <- imago out[] <- pred } else { lev <- levels(marks(data.ppm(model))) nlev <- length(lev) out <- rep(list(imago), nlev) names(out) <- lev splitpred <- split(pred, newdata$marks) for(i in seq_len(nlev)) out[[i]][] <- splitpred[[i]] out <- as.solist(out) } return(out) } fitted.rppm <- function(object, ...) { predict(object, locations=data.ppm(object$pfit)) } spatstat/R/triangulate.R0000644000176200001440000000156613115225157014772 0ustar liggesusers#' #' triangulate.R #' #' Decompose a polygon into triangles #' #' $Revision: 1.4 $ $Date: 2015/11/21 11:13:00 $ #' triangulate.owin <- local({ is.triangle <- function(p) { return((length(p$bdry) == 1) && (length(p$bdry[[1]]$x) == 3)) } triangulate.owin <- function(W) { stopifnot(is.owin(W)) W <- as.polygonal(W, repair=TRUE) P <- as.ppp(vertices(W), W=Frame(W), check=FALSE) D <- delaunay(P) V <- intersect.tess(W, D) Candidates <- tiles(V) istri <- sapply(Candidates, is.triangle) Accepted <- Candidates[istri] if(any(!istri)) { # recurse Worries <- unname(Candidates[!istri]) Fixed <- lapply(Worries, triangulate.owin) Fixed <- do.call(c, lapply(Fixed, tiles)) Accepted <- append(Accepted, Fixed) } result <- tess(tiles=Accepted, window=W) return(result) } triangulate.owin }) spatstat/R/pairwise.family.R0000755000176200001440000004073713115271120015553 0ustar liggesusers# # # pairwise.family.S # # $Revision: 1.64 $ $Date: 2016/07/15 10:22:11 $ # # The pairwise interaction family of point process models # # pairwise.family: object of class 'isf' defining pairwise interaction # # # ------------------------------------------------------------------- # pairwise.family <- list( name = "pairwise", print = function(self) { cat("Pairwise interaction family\n") }, plot = function(fint, ..., d=NULL, plotit=TRUE) { verifyclass(fint, "fii") inter <- fint$interaction unitz <- unitname(fint) if(is.null(inter) || is.null(inter$family) || inter$family$name != "pairwise") stop("Tried to plot the wrong kind of interaction") # get fitted coefficients of interaction terms # and set coefficients of offset terms to 1 Vnames <- fint$Vnames IsOffset <- fint$IsOffset coeff <- rep.int(1, length(Vnames)) names(coeff) <- Vnames coeff[!IsOffset] <- fint$coefs[Vnames[!IsOffset]] # pairpot <- inter$pot potpars <- inter$par rmax <- reach(fint, epsilon=1e-3) xlim <- list(...)$xlim if(is.infinite(rmax)) { if(!is.null(xlim)) rmax <- max(xlim) else { warning("Reach of interaction is infinite; need xlim to plot it") return(invisible(NULL)) } } if(is.null(d)) { dmax <- 1.25 * rmax d <- seq(from=0, to=dmax, length.out=1024) } else { stopifnot(is.numeric(d) && all(is.finite(d)) && all(diff(d) > 0)) dmax <- max(d) } if(is.null(xlim)) xlim <- c(0, dmax) types <- potpars$types if(is.null(types)) { # compute potential function as `fv' object dd <- matrix(d, ncol=1) p <- pairpot(dd, potpars) if(length(dim(p))==2) p <- array(p, dim=c(dim(p),1), dimnames=NULL) if(dim(p)[3] != length(coeff)) stop("Dimensions of potential do not match coefficient vector") for(k in seq_len(dim(p)[3])) p[,,k] <- multiply.only.finite.entries( p[,,k] , coeff[k] ) y <- exp(apply(p, c(1,2), sum)) ylim <- range(0, 1.1, y, finite=TRUE) fun <- fv(data.frame(r=d, h=y, one=1), "r", substitute(h(r), NULL), "h", cbind(h,one) ~ r, xlim, c("r", "h(r)", "1"), c("distance argument r", "pairwise interaction term h(r)", "reference value 1"), unitname=unitz) if(plotit) do.call(plot.fv, resolve.defaults(list(fun), list(...), list(ylim=ylim))) return(invisible(fun)) } else{ # compute each potential and store in `fasp' object if(!is.factor(types)) types <- factor(types, levels=types) m <- length(types) nd <- length(d) dd <- matrix(rep.int(d, m), nrow=nd * m, ncol=m) tx <- rep.int(types, rep.int(nd, m)) ty <- types p <- pairpot(dd, tx, ty, potpars) if(length(dim(p))==2) p <- array(p, dim=c(dim(p),1), dimnames=NULL) if(dim(p)[3] != length(coeff)) stop("Dimensions of potential do not match coefficient vector") for(k in seq_len(dim(p)[3])) p[,,k] <- multiply.only.finite.entries( p[,,k] , coeff[k] ) y <- exp(apply(p, c(1,2), sum)) ylim <- range(0, 1.1, y, finite=TRUE) fns <- vector(m^2, mode="list") which <- matrix(, m, m) for(i in seq_len(m)) { for(j in seq_len(m)) { # relevant position in matrix ijpos <- i + (j-1) * m which[i,j] <- ijpos # extract values of potential yy <- y[tx == types[i], j] # make fv object fns[[ijpos]] <- fv(data.frame(r=d, h=yy, one=1), "r", substitute(h(r), NULL), "h", cbind(h,one) ~ r, xlim, c("r", "h(r)", "1"), c("distance argument r", "pairwise interaction term h(r)", "reference value 1"), unitname=unitz) # } } funz <- fasp(fns, which=which, formulae=list(cbind(h, one) ~ r), title="Fitted pairwise interactions", rowNames=paste(types), colNames=paste(types)) if(plotit) do.call(plot.fasp, resolve.defaults(list(funz), list(...), list(ylim=ylim))) return(invisible(funz)) } }, # end of function `plot' # ---------------------------------------------------- eval = function(X,U,EqualPairs,pairpot,potpars,correction, ..., Reach=NULL, precomputed=NULL, savecomputed=FALSE, pot.only=FALSE) { # # This is the eval function for the `pairwise' family. # # This internal function is not meant to be called by the user. # It is called by mpl.prepare() during execution of ppm(). # # The eval functions perform all the manipulations that are common to # a given class of interactions. # # For the `pairwise' family of pairwise-interaction processes, # this eval function computes the distances between points, # invokes 'pairpot' to evaluate the potential between each pair of points, # applies edge corrections, and then sums the pair potential terms. # # ARGUMENTS: # All 'eval' functions have the following arguments # which are called in sequence (without formal names) # by mpl.prepare(): # # X data point pattern 'ppp' object # U points at which to evaluate potential list(x,y) suffices # EqualPairs two-column matrix of indices i, j such that X[i] == U[j] # (or NULL, meaning all comparisons are FALSE) # pot potential function # potpars auxiliary parameters for pot list(......) # correction edge correction type (string) # # VALUE: # All `eval' functions must return a # matrix of values of the total potential # induced by the pattern X at each location given in U. # The rows of this matrix correspond to the rows of U (the sample points); # the k columns are the coordinates of the k-dimensional potential. # ########################################################################## # POTENTIAL: # # The pair potential function 'pairpot' should be either # pairpot(d, par) [for potentials that don't depend on marks] # or # pairpot(d, tx, tu, par) [for potentials that do depend on mark] # where d is a matrix of interpoint distances, # tx is the vector of types for the data points, # tu is the vector of types for all quadrature points # and # par is a list of parameters for the potential. # # It must return a matrix with the same dimensions as d # or an array with its first two dimensions the same as the dimensions of d. fop <- names(formals(pairpot)) if(identical(all.equal(fop, c("d", "par")), TRUE)) marx <- FALSE else if(identical(all.equal(fop, c("d", "tx", "tu", "par")), TRUE)) marx <- TRUE else stop("Formal arguments of pair potential function are not understood") ## edge correction argument if(length(correction) > 1) stop("Only one edge correction allowed at a time!") if(!any(correction == c("periodic", "border", "translate", "translation", "isotropic", "Ripley", "none"))) stop(paste("Unrecognised edge correction", sQuote(correction))) no.correction <- #### Compute basic data # Decide whether to apply faster algorithm using 'closepairs' use.closepairs <- (correction %in% c("none", "border", "translate", "translation")) && !is.null(Reach) && is.finite(Reach) && is.null(precomputed) && !savecomputed if(!is.null(precomputed)) { # precomputed X <- precomputed$X U <- precomputed$U EqualPairs <- precomputed$E M <- precomputed$M } else { U <- as.ppp(U, X$window) # i.e. X$window is DEFAULT window if(!use.closepairs) # Form the matrix of distances M <- crossdist(X, U, periodic=(correction=="periodic")) } nX <- npoints(X) nU <- npoints(U) dimM <- c(nX, nU) # Evaluate the pairwise potential without edge correction if(use.closepairs) POT <- evalPairPotential(X,U,EqualPairs,pairpot,potpars,Reach) else if(!marx) POT <- pairpot(M, potpars) else POT <- pairpot(M, marks(X), marks(U), potpars) # Determine whether each column of potential is an offset IsOffset <- attr(POT, "IsOffset") # Check errors and special cases if(!is.matrix(POT) && !is.array(POT)) { if(length(POT) == 0 && X$n == 0) # empty pattern POT <- array(POT, dim=c(dimM,1)) else stop("Pair potential did not return a matrix or array") } if(length(dim(POT)) == 1 || any(dim(POT)[1:2] != dimM)) { whinge <- paste0( "The pair potential function ",short.deparse(substitute(pairpot)), " must produce a matrix or array with its first two dimensions\n", "the same as the dimensions of its input.\n") stop(whinge) } # make it a 3D array if(length(dim(POT))==2) POT <- array(POT, dim=c(dim(POT),1), dimnames=NULL) if(correction == "translate" || correction == "translation") { edgewt <- edge.Trans(X, U) # sanity check ("everybody knows there ain't no...") if(!is.matrix(edgewt)) stop("internal error: edge.Trans() did not yield a matrix") if(nrow(edgewt) != X$n || ncol(edgewt) != length(U$x)) stop("internal error: edge weights matrix returned by edge.Trans() has wrong dimensions") POT <- c(edgewt) * POT } else if(correction == "isotropic" || correction == "Ripley") { # weights are required for contributions from QUADRATURE points edgewt <- t(edge.Ripley(U, t(M), X$window)) if(!is.matrix(edgewt)) stop("internal error: edge.Ripley() did not return a matrix") if(nrow(edgewt) != X$n || ncol(edgewt) != length(U$x)) stop("internal error: edge weights matrix returned by edge.Ripley() has wrong dimensions") POT <- c(edgewt) * POT } # No pair potential term between a point and itself if(length(EqualPairs) > 0) { nplanes <- dim(POT)[3] for(k in 1:nplanes) POT[cbind(EqualPairs, k)] <- 0 } # Return just the pair potential? if(pot.only) return(POT) # Sum the pairwise potentials V <- apply(POT, c(2,3), sum) # attach the original pair potentials attr(V, "POT") <- POT # attach the offset identifier attr(V, "IsOffset") <- IsOffset # pass computed information out the back door if(savecomputed) attr(V, "computed") <- list(E=EqualPairs, M=M) return(V) }, ######### end of function $eval suffstat = function(model, X=NULL, callstring="pairwise.family$suffstat") { # for pairwise models only (possibly nonstationary) verifyclass(model, "ppm") if(!identical(model$interaction$family$name,"pairwise")) stop("Model is not a pairwise interaction process") if(is.null(X)) { X <- data.ppm(model) modelX <- model } else { verifyclass(X, "ppp") modelX <- update(model, X, method="mpl") } # find data points which do not contribute to pseudolikelihood mplsubset <- getglmdata(modelX)$.mpl.SUBSET mpldata <- is.data(quad.ppm(modelX)) contribute <- mplsubset[mpldata] Xin <- X[contribute] Xout <- X[!contribute] # partial model matrix arising from ordered pairs of data points # which both contribute to the pseudolikelihood Empty <- X[numeric(0)] momINxIN <- partialModelMatrix(Xin, Empty, model, "suffstat") # partial model matrix arising from ordered pairs of data points # the second of which does not contribute to the pseudolikelihood mom <- partialModelMatrix(Xout, Xin, model, "suffstat") indx <- Xout$n + seq_len(Xin$n) momINxOUT <- mom[indx, , drop=FALSE] # parameters order2 <- names(coef(model)) %in% model$internal$Vnames order1 <- !order2 result <- 0 * coef(model) if(any(order1)) { # first order contributions can be determined from INxIN o1terms <- momINxIN[ , order1, drop=FALSE] o1sum <- colSums(o1terms) result[order1] <- o1sum } if(any(order2)) { # adjust for double counting of ordered pairs in INxIN but not INxOUT o2termsINxIN <- momINxIN[, order2, drop=FALSE] o2termsINxOUT <- momINxOUT[, order2, drop=FALSE] o2sum <- colSums(o2termsINxIN)/2 + colSums(o2termsINxOUT) result[order2] <- o2sum } return(result) }, ######### end of function $suffstat delta2 = function(X, inte, correction, ...) { # Sufficient statistic for second order conditional intensity # for pairwise interaction processes # Equivalent to evaluating pair potential. X <- as.ppp(X) seqX <- seq_len(npoints(X)) E <- cbind(seqX, seqX) R <- reach(inte) result <- pairwise.family$eval(X,X,E, inte$pot,inte$par, correction, pot.only=TRUE, Reach=R) } ######### end of function $delta2 ) ######### end of list class(pairwise.family) <- "isf" # externally visible evalPairPotential <- function(X, P, E, pairpot, potpars, R) { # Evaluate pair potential without edge correction weights nX <- npoints(X) nP <- npoints(P) stopifnot(is.function(pairpot)) fop <- names(formals(pairpot)) if(identical(all.equal(fop, c("d", "par")), TRUE)) { unmarked <- TRUE } else if(identical(all.equal(fop, c("d", "tx", "tu", "par")), TRUE)) { unmarked <- FALSE } else stop("Formal arguments of pair potential function are not understood") # determine dimension of potential, etc fakePOT <- if(unmarked) pairpot(matrix(, 0, 0), potpars) else pairpot(matrix(, 0, 0), marks(X)[integer(0)], marks(P)[integer(0)], potpars) IsOffset <- attr(fakePOT, "IsOffset") fakePOT <- ensure3Darray(fakePOT) Vnames <- dimnames(fakePOT)[[3]] p <- dim(fakePOT)[3] # Identify close pairs X[i], P[j] cl <- crosspairs(X, P, R, what="ijd") I <- cl$i J <- cl$j D <- matrix(cl$d, ncol=1) # deal with empty cases if(nX == 0 || nP == 0 || length(I) == 0) { result <- array(0, dim=c(nX, nP, p), dimnames=list(NULL, NULL, Vnames)) attr(result, "IsOffset") <- IsOffset return(result) } # evaluate potential for close pairs # POT is a 1-column matrix or array, with rows corresponding to close pairs if(unmarked) { # unmarked POT <- pairpot(D, potpars) IsOffset <- attr(POT, "IsOffset") } else { # marked marX <- marks(X) marP <- marks(P) if(!identical(levels(marX), levels(marP))) stop("Internal error: marks of X and P have different levels") types <- levels(marX) mI <- marX[I] mJ <- marP[J] POT <- NULL # split data by type of P[j] for(k in types) { relevant <- which(mJ == k) if(length(relevant) > 0) { fk <- factor(k, levels=types) POTk <- pairpot(D[relevant, , drop=FALSE], mI[relevant], fk, potpars) POTk <- ensure3Darray(POTk) if(is.null(POT)) { # use first result of 'pairpot' to determine dimension POT <- array(, dim=c(length(I), 1, dim(POTk)[3])) # capture information about offsets, and names of interaction terms IsOffset <- attr(POTk, "IsOffset") Vnames <- dimnames(POTk)[[3]] } # insert values just computed POT[relevant, , ] <- POTk } } } POT <- ensure3Darray(POT) p <- dim(POT)[3] # create result array result <- array(0, dim=c(npoints(X), npoints(P), p), dimnames=list(NULL, NULL, Vnames)) # insert results II <- rep(I, p) JJ <- rep(J, p) KK <- rep(1:p, each=length(I)) result[cbind(II,JJ,KK)] <- POT # finally identify identical pairs and set value to 0 if(length(E) > 0) { E.rep <- apply(E, 2, rep, times=p) p.rep <- rep(1:p, each=nrow(E)) result[cbind(E.rep, p.rep)] <- 0 } attr(result, "IsOffset") <- IsOffset return(result) } spatstat/R/as.im.R0000755000176200001440000002204413153160241013451 0ustar liggesusers# # as.im.R # # conversion to class "im" # # $Revision: 1.48 $ $Date: 2017/09/04 05:03:30 $ # # as.im() # as.im <- function(X, ...) { UseMethod("as.im") } as.im.im <- function(X, W=NULL, ..., eps=NULL, dimyx=NULL, xy=NULL, na.replace=NULL) { X <- repair.old.factor.image(X) nopar <- is.null(eps) && is.null(dimyx) && is.null(xy) if(is.null(W)) { if(nopar) { X <- repair.image.xycoords(X) X <- na.handle.im(X, na.replace) return(X) } # pixel raster determined by dimyx etc W <- as.mask(as.rectangle(X), eps=eps, dimyx=dimyx, xy=xy) # invoke as.im.owin Y <- as.im(W) } else if(is.mask(W) || is.im(W) || !nopar) { #' raster information is present in { W, eps, dimyx, xy } Y <- as.im(W, eps=eps, dimyx=dimyx, xy=xy) } else { #' use existing raster information in X return(X[W, drop=FALSE, tight=TRUE]) } # resample X onto raster of Y Y <- rastersample(X, Y) return(na.handle.im(Y, na.replace)) } as.im.owin <- function(X, W=NULL, ..., eps=NULL, dimyx=NULL, xy=NULL, na.replace=NULL, value=1) { if(!(is.null(eps) && is.null(dimyx) && is.null(xy))) { # raster dimensions determined by dimyx etc # convert X to a mask M <- as.mask(X, eps=eps, dimyx=dimyx, xy=xy) # convert mask to image d <- M$dim v <- matrix(value, d[1L], d[2L]) m <- M$m v[!m] <- if(is.null(na.replace)) NA else na.replace out <- im(v, M$xcol, M$yrow, xrange=M$xrange, yrange=M$yrange, unitname=unitname(X)) return(out) } if(!is.null(W) && is.owin(W) && W$type == "mask") { # raster dimensions determined by W # convert W to zero image d <- W$dim Z <- im(matrix(0, d[1L], d[2L]), W$xcol, W$yrow, unitname=unitname(X)) # adjust values to indicator of X Z[X] <- 1 if(missing(value) && is.null(na.replace)) { # done out <- Z } else { # map {0, 1} to {na.replace, value} v <- matrix(ifelseAB(Z$v == 0, na.replace, value), d[1L], d[2L]) out <- im(v, W$xcol, W$yrow, unitname=unitname(X)) } return(out) } if(X$type == "mask") { # raster dimensions determined by X # convert X to image d <- X$dim v <- matrix(value, d[1L], d[2L]) m <- X$m v[!m] <- if(is.null(na.replace)) NA else na.replace out <- im(v, xcol=X$xcol, yrow=X$yrow, xrange=X$xrange, yrange=X$yrange, unitname=unitname(X)) return(out) } # X is not a mask. # W is either missing, or is not a mask. # Convert X to a image using default settings M <- as.mask(X) # convert mask to image d <- M$dim v <- matrix(value, d[1L], d[2L]) m <- M$m v[!m] <- if(is.null(na.replace)) NA else na.replace out <- im(v, M$xcol, M$yrow, unitname=unitname(X)) return(out) } as.im.funxy <- function(X, W=Window(X), ...) { as.im.function(X, W=W, ...) } as.im.function <- function(X, W=NULL, ..., eps=NULL, dimyx=NULL, xy=NULL, na.replace=NULL, strict=FALSE) { f <- X if(is.null(W)) stop("A window W is required") W <- as.owin(W) W <- as.mask(W, eps=eps, dimyx=dimyx, xy=xy) m <- W$m funnywindow <- !all(m) xx <- as.vector(rasterx.mask(W)) yy <- as.vector(rastery.mask(W)) argh <- list(...) if(strict) argh <- argh[names(argh) %in% names(formals(f))] # evaluate function value at each pixel if(!funnywindow) values <- do.call(f, append(list(xx, yy), argh)) else { # evaluate only inside window inside <- as.vector(m) val <- do.call(f, append(list(xx[inside], yy[inside]), argh)) # create space for full matrix msize <- length(m) values <- if(!is.factor(val)) vector(mode=typeof(val), length=msize) else { lev <- levels(val) factor(rep.int(lev[1L], msize), levels=lev) } # copy values, assigning NA outside window values[inside] <- val values[!inside] <- NA } nc <- length(W$xcol) nr <- length(W$yrow) if(nr == 1 || nc == 1) { # exception: can't determine pixel width/height from centres out <- im(matrix(values, nr, nc), xrange=W$xrange, yrange=W$yrange, unitname=unitname(W)) } else { out <- im(values, W$xcol, W$yrow, unitname=unitname(W)) } return(na.handle.im(out, na.replace)) } as.im.matrix <- function(X, W=NULL, ...) { nr <- nrow(X) nc <- ncol(X) if(is.null(W)) return(im(X, ...)) W <- as.owin(W) if(W$type == "mask") { xcol <- W$xcol yrow <- W$yrow # pixel coordinate information if(length(xcol) == nc && length(yrow) == nr) return(im(X, xcol, yrow, unitname=unitname(W))) } # range information R <- as.rectangle(W) xrange <- R$xrange yrange <- R$yrange return(im(X, xrange=xrange, yrange=yrange, unitname=unitname(W))) } as.im.default <- function(X, W=NULL, ..., eps=NULL, dimyx=NULL, xy=NULL, na.replace=NULL) { if((is.vector(X) || is.factor(X)) && length(X) == 1) { # numerical value: interpret as constant function xvalue <- X X <- function(xx, yy, ...) { rep.int(xvalue, length(xx)) } return(as.im(X, W, ..., eps=eps, dimyx=dimyx, xy=xy, na.replace=na.replace)) } if(is.list(X) && checkfields(X, c("x","y","z"))) { stopifnot(is.matrix(X$z)) z <- X$z y <- X$y x <- X$x # Usual S convention as in contour.default() and image.default() # Rows of z correspond to x values. nr <- nrow(z) nc <- ncol(z) lx <- length(x) ly <- length(y) if(lx == nr + 1) x <- (x[-1L] + x[-lx])/2 else if(lx != nr) stop("length of x coordinate vector does not match number of rows of z") if(ly == nc + 1) y <- (y[-1L] + y[-ly])/2 else if(ly != nc) stop("length of y coordinate vector does not match number of columns of z") # convert to class "im" out <- im(t(z), x, y) # now apply W and dimyx if present if(is.null(W) && !(is.null(eps) && is.null(dimyx) && is.null(xy))) out <- as.im(out, eps=eps, dimyx=dimyx, xy=xy) else if(!is.null(W)) out <- as.im(out, W=W, eps=eps, dimyx=dimyx, xy=xy) return(na.handle.im(out, na.replace)) } stop("Can't convert X to a pixel image") } as.im.ppp <- function(X, ...) { pixellate(X, ..., weights=NULL, zeropad=FALSE) } as.im.data.frame <- function(X, ..., step, fatal=TRUE, drop=TRUE) { if(missing(step)) { xstep <- ystep <- NULL } else { step <- ensure2vector(step) xstep <- step[1L] ystep <- step[2L] } if(ncol(X) < 3) { whinge <- "Argument 'X' must have at least 3 columns of data" if(fatal) stop(whinge) warning(whinge) return(NULL) } ## extract (x,y) coordinates mch <- matchNameOrPosition(c("x", "y", "z"), names(X)) x <- X[, mch[1L]] y <- X[, mch[2L]] z <- X[, -mch[1:2], drop=FALSE] ## unique x,y coordinates xx <- sort(unique(x)) yy <- sort(unique(y)) jj <- match(x, xx) ii <- match(y, yy) iijj <- cbind(ii, jj) ## make matrix (for incomplete x, y sequence) ok <- checkbigmatrix(length(xx), length(yy), fatal=fatal) if(!ok) return(NULL) mm <- matrix(NA, length(yy), length(xx)) ## ensure xx and yy are complete equally-spaced sequences fx <- fillseq(xx, step=xstep) fy <- fillseq(yy, step=ystep) xcol <- fx[[1L]] yrow <- fy[[1L]] ## trap very large matrices ok <- checkbigmatrix(length(xcol), length(yrow), fatal=fatal) if(!ok) return(NULL) ## mapping from xx to xcol, yy to yrow jjj <- fx[[2L]] iii <- fy[[2L]] ## make matrix for full sequence m <- matrix(NA, length(yrow), length(xcol)) ## run through columns of pixel values nz <- ncol(z) result <- vector(mode="list", length=nz) names(result) <- colnames(z) for(k in seq_len(nz)) { zk <- z[,k] mm[iijj] <- zk m[iii,jjj] <- mm lev <- levels(zk) mo <- if(is.null(lev)) m else factor(as.vector(m), levels=lev) result[[k]] <- im(mat=mo, xcol=xcol, yrow=yrow) } if(nz == 1 && drop) result <- result[[1L]] return(result) } # convert to image from some other format, then do something do.as.im <- function(x, action, ..., W = NULL, eps = NULL, dimyx = NULL, xy = NULL, na.replace = NULL) { Z <- as.im(x, W=W, eps=eps, dimyx=dimyx, xy=xy, na.replace=na.replace) Y <- do.call(action, list(Z, ...)) return(Y) } na.handle.im <- function(X, na.replace) { if(is.null(na.replace)) return(X) if(length(na.replace) != 1) stop("na.replace should be a single value") X$v[is.na(X$v)] <- na.replace return(X) } repair.old.factor.image <- function(x) { # convert from old to new representation of factor images if(x$type != "factor") return(x) v <- x$v isold <- !is.null(lev <- attr(x, "levels")) isnew <- is.factor(v) && is.matrix(v) if(isnew) return(x) if(!isold) stop("Internal error: unrecognised format for factor-valued image") v <- factor(v, levels=lev) dim(v) <- x$dim x$v <- v return(x) } repair.image.xycoords <- function(x) { v <- x$v if(is.null(dim(v))) dim(v) <- c(length(x$yrow), length(x$xcol)) im(v, xrange=x$xrange, yrange=x$yrange, unitname=unitname(x)) } spatstat/R/cut.ppp.R0000755000176200001440000000243213115271075014040 0ustar liggesusers# # cut.ppp.R # # cut method for ppp objects # # $Revision: 1.15 $ $Date: 2016/10/26 09:29:57 $ # cut.ppp <- function(x, z=marks(x), ...) { x <- as.ppp(x) if(missing(z) || is.null(z)) { z <- marks(x, dfok=TRUE) if(is.null(z)) stop("x has no marks to cut") } if(is.character(z)) { if(length(z) == npoints(x)) { # interpret as a factor z <- factor(z) } else if((length(z) == 1L) && (z %in% colnames(df <- as.data.frame(x)))) { # interpret as the name of a column of marks or coordinates z <- df[, z] } else stop("format of argument z not understood") } if(is.factor(z) || is.vector(z)) { stopifnot(length(z) == npoints(x)) g <- if(is.factor(z)) z else if(is.numeric(z)) cut(z, ...) else factor(z) marks(x) <- g return(x) } if(is.data.frame(z) || is.matrix(z)) { stopifnot(nrow(z) == npoints(x)) # take first column z <- z[,1L] g <- if(is.numeric(z)) cut(z, ...) else factor(z) marks(x) <- g return(x) } if(is.im(z)) return(cut(x, z[x, drop=FALSE], ...)) if(is.owin(z)) { marks(x) <- factor(inside.owin(x$x, x$y, z), levels=c(FALSE, TRUE)) return(x) } if(is.tess(z)) { marks(x) <- tileindex(x$x, x$y, z) return(x) } stop("Format of z not understood") } spatstat/R/FGmultiInhom.R0000644000176200001440000001721413115271075015012 0ustar liggesusers#' #' FGmultiInhom.R #' #' inhomogeneous multitype G and F functions #' #' Original code by Ottmar Cronie and Marie-Colette van Lieshout #' #' Rewritten for spatstat by Adrian Baddeley #' #' GmultiInhom #' FmultiInhom #' #' $Revision: 1.6 $ $Date: 2017/06/05 10:31:58 $ GmultiInhom <- function(X, I, J, lambda=NULL, lambdaI=NULL, lambdaJ=NULL, lambdamin=NULL, ..., r=NULL, ReferenceMeasureMarkSetI=NULL, ratio=FALSE){ if(!is.ppp(X) || !is.marked(X)) stop("X should be a marked point pattern") W <- Window(X) nX <- npoints(X) #' handle r argument rmax <- rmax.rule("G", W, intensity(X)) bks <- handle.r.b.args(r, NULL, W, rmaxdefault=rmax) r <- bks$r rmax <- bks$max nr <- length(r) #' Accept any kind of index for I; convert it to a logical index I <- ppsubset(X, I) if(is.null(I)) stop("I must be a valid subset index") XI <- X[I] nI <- sum(I) if (nI == 0) stop("No points satisfy condition I") if(!is.null(ReferenceMeasureMarkSetI)) { check.1.real(ReferenceMeasureMarkSetI) stopifnot(ReferenceMeasureMarkSetI >= 0) } #' likewise for J if(missing(J) || is.null(J)) { J <- rep(TRUE, nX) } else { J <- ppsubset(X, J) } XJ <- X[J] nJ <- sum(J) if (nJ == 0) stop("No points satisfy condition J") #' supply either lambda, or lambdaI and lambdaJ lam.given <- !is.null(lambda) lamIJ.given <- !is.null(lambdaI) || !is.null(lambdaJ) if(lam.given == lamIJ.given || is.null(lambdaI) != is.null(lambdaJ)) stop(paste("Supply either a vector lambda of length equal to npoints(X),", "or two vectors lambdaI, lambdaJ of lengths", "equal to npoints(X[I]) and npoints(X[J]) respectively"), call.=FALSE) if(lamIJ.given) { #' lambdaI and lambdaJ given check.nvector(lambdaI, nI, things="points of X[I]") stopifnot(all(lambdaI > 0)) check.nvector(lambdaJ, nJ, things="points of X[J]") stopifnot(all(lambdaJ > 0)) if(is.null(lambdamin)){ stop(paste("Supply lambdamin - a single positive number which is", "smaller than the values in lambdaJ"), call.=FALSE) } check.1.real(lambdamin) stopifnot(lambdamin > 0) stopifnot(lambdamin <= min(lambdaJ)) } else { #' lambda given check.nvector(lambda, nX, things="points of X") stopifnot(all(lambda > 0)) lambdaI <- lambda[I] lambdaJ <- lambda[J] if(is.null(lambdamin)){ stop(paste("Supply lambdamin - a single positive number which is", "smaller than the values in lambda"), call.=FALSE) } check.1.real(lambdamin) stopifnot(lambdamin > 0) stopifnot(lambdamin <= min(lambda)) } #' Calculate 1/lambda(x_i,y_i,m_i)) #' for all (x_i,y_i,m_i) with m_i in I invlambdaI <- 1/lambdaI #' Calculate (1 - lambda_min/lambda(x_i,y_i,m_i)) #' for all (x_i,y_i,m_i) with m_i in J Coeff <- 1-(lambdamin/lambdaJ) ## CoeffMatrix <- matrix(rep(Coeff,times=nI), nrow=nI, byrow=TRUE) #' distances ## DistanceXItoXJ <- crossdist(XI,XJ) #' eroded areas and boundary distances areaWr <- eroded.areas(W, r) bdistXI <- bdist.points(XI) #' for each point x in XI, determine largest r such that x \in W-r ibI <- fastFindInterval(bdistXI, r, labels=TRUE) #' count of points inside W-r for each r ## NumberEroded <- revcumsum(table(ibI)) #' denominator #' sum invlambdaI for all points x \in W-r DenominatorN <- c(sum(invlambdaI), revcumsum(natozero(tapply(invlambdaI, ibI, sum)))) if(!is.null(ReferenceMeasureMarkSetI)) DenominatorA <- areaWr * ReferenceMeasureMarkSetI #' local products of weights #' sort data points in order of increasing x coordinate xxI <- XI$x yyI <- XI$y oXI <- fave.order(xxI) xIord <- xxI[oXI] yIord <- yyI[oXI] #' xxJ <- XJ$x yyJ <- XJ$y vvJ <- Coeff oXJ <- fave.order(xxJ) xJord <- xxJ[oXJ] yJord <- yyJ[oXJ] vJord <- vvJ[oXJ] # compute local cumulative products z <- .C("locxprod", ntest = as.integer(nI), xtest = as.double(xIord), ytest = as.double(yIord), ndata = as.integer(nJ), xdata = as.double(xJord), ydata = as.double(yJord), vdata = as.double(vJord), nr = as.integer(nr), rmax = as.double(rmax), ans = as.double(numeric(nI * nr)), PACKAGE = "spatstat") ans <- matrix(z$ans, nrow=nr, ncol=nI) #' revert to original ordering loccumprod <- matrix(, nrow=nr, ncol=nI) loccumprod[, oXI] <- ans #' border correction outside <- outer(r, bdistXI, ">") loccumprod[outside] <- 0 #' weight by 1/lambdaI wlcp <- loccumprod * matrix(invlambdaI, byrow=TRUE, nr, nI) #' sum over I for each fixed r numer <- .rowSums(wlcp, nr, nI) # pack up Gdf <- data.frame(r=r, theo = 1 - exp(- lambdamin * pi * r^2)) desc <- c("distance argument r", "theoretical Poisson %s") theo.denom <- rep.int(nI, nr) fname <- c("G", "list(inhom,I,J)") G <- ratfv(Gdf, NULL, theo.denom, "r", quote(G[inhom, I, J](r)), "theo", NULL, c(0,rmax), c("r", makefvlabel(NULL, NULL, fname, "pois")), desc, fname=fname, yexp=quote(G[list(inhom,I,J)](r)), ratio=ratio) # add border corrected (Hamilton Principle) estimate G <- bind.ratfv(G, data.frame(bord=DenominatorN-numer), DenominatorN, makefvlabel(NULL, "hat", fname, "bord"), "border estimate of %s", "bord", ratio=ratio) fvnames(G, ".") <- c("bord", "theo") # add modified border corrected (non-Hamilton-Principle) estimate if(!is.null(ReferenceMeasureMarkSetI)) { G <- bind.ratfv(G, data.frame(bordm=DenominatorA-numer), DenominatorA, makefvlabel(NULL, "hat", fname, "bordm"), "modified border estimate of %s", "bordm", ratio=ratio) fvnames(G, ".") <- c("bord", "bordm", "theo") } # formula(G) <- . ~ r unitname(G) <- unitname(X) if(ratio) G <- conform.ratfv(G) return(G) } #' marked inhomogeneous F FmultiInhom <- function(X, J, lambda=NULL,lambdaJ=NULL, lambdamin=NULL, ..., r=NULL) { if(!is.ppp(X) || !is.marked(X)) stop("X should be a marked point pattern") nX <- npoints(X) #' Accept any kind of index for J; convert it to a logical index J <- ppsubset(X, J) if(is.null(J)) stop("J must be a valid subset index") XJ <- X[J] nJ <- sum(J) if (nJ == 0) stop("No points satisfy condition J") if(is.null(lambda) == is.null(lambdaJ)) stop(paste("Supply either a vector lambda of length equal to npoints(X),", "or a vector lambdaJ of length equal to npoints(X[J])"), call.=FALSE) if(is.null(lambdamin)) stop("Supply a value for lambdamin", call.=FALSE) check.1.real(lambdamin) if(!is.null(lambda)) { check.nvector(lambda, nX) stopifnot(all(lambda > 0)) stopifnot(lambdamin <= min(lambda[J])) lambdaJ <- lambda[J] } else { check.nvector(lambdaJ, nJ) stopifnot(all(lambdaJ > 0)) stopifnot(lambdamin <= min(lambdaJ)) } FJ <- Finhom(XJ, lambda=lambdaJ, lmin=lambdamin, r=r) FJ <- rebadge.fv(FJ, new.ylab = quote(F[inhom, J](r)), new.fname = c("F", "list(inhom,J)"), new.yexp = quote(F[list(inhom,J)](r))) return(FJ) } spatstat/R/rmhsnoop.R0000644000176200001440000005122213115225157014312 0ustar liggesusers# # rmhsnoop.R # # visual debug mechanism for rmh # # $Revision: 1.26 $ $Date: 2014/10/24 00:22:30 $ # # When rmh is called in visual debug mode (snooping = TRUE), # it calls e <- rmhSnoopEnv(...) to create an R environment 'e' # containing variables that will represent the current state # of the M-H algorithm with initial state X and model reach R. # # The environment 'e' is passed to the C routine xmethas. # This makes it possible for data to be exchanged between # the C and R code. # # When xmethas reaches the debugger's stopping time, # the current state of the simulation and the proposal # are copied from C into the R environment 'e'. # # Then to execute the visual display, the C code calls # 'eval' to execute the R function rmhsnoop(). # # The function rmhsnoop uses the 'simplepanel' class # to generate a plot showing the state of the simulation # and the proposal, and then wait for point-and-click input using # locator(). # # When rmhsnoop() exits, it returns an integer giving the # (user-specified) next stoppping time. This is read back into # the C code. Then xmethas resumes simulations. # # I said it was simple! %^] rmhSnoopEnv <- function(Xinit, Wclip, R) { stopifnot(is.ppp(Xinit)) # Create an environment that will be accessible to R and C code e <- new.env() # initial state (point pattern) X <- Xinit assign("Wsim", as.owin(X), envir=e) assign("xcoords", coords(X)[,1], envir=e) assign("ycoords", coords(X)[,2], envir=e) if(is.multitype(X)) { mcodes <- as.integer(marks(X)) - 1L mlevels <- levels(marks(X)) assign("mcodes", mcodes, envir=e) assign("mlevels", mlevels, envir=e) } else { assign("mcodes", NULL, envir=e) assign("mlevels", NULL, envir=e) } # clipping window assign("Wclip", Wclip, envir=e) # reach of model (could be infinite) assign("R", R, envir=e) # current iteration number assign("irep", 0L, envir=e) # next iteration to be inspected assign("inxt", 1L, envir=e) # next transition to be inspected assign("tnxt", 1L, envir=e) # proposal type assign("proptype", NULL, envir=e) # outcome of proposal assign("itype", NULL, envir=e) # proposal location assign("proplocn", NULL, envir=e) # proposal mark assign("propmark", NULL, envir=e) # index of proposal point in existing pattern assign("propindx", NULL, envir=e) # Hastings ratio assign("numerator", NULL, envir=e) assign("denominator", NULL, envir=e) # Expression actually evaluated to execute visual debug # Expression is evaluated in the environment 'e' snoopexpr <- expression({ rslt <- rmhsnoop(Wsim=Wsim, Wclip=Wclip, R=R, xcoords=xcoords, ycoords=ycoords, mlevels=mlevels, mcodes=mcodes, irep=irep, itype=itype, proptype=proptype, proplocn=proplocn, propmark=propmark, propindx=propindx, numerator=numerator, denominator=denominator) inxt <- rslt$inxt tnxt <- rslt$tnxt itype <- if(rslt$accepted) rslt$itype else 0 storage.mode(tnxt) <- storage.mode(inxt) <- storage.mode(itype) <- "integer" }) assign("snoopexpr", snoopexpr, envir=e) # callback expression assign("callbackexpr", quote(eval(snoopexpr)), envir=e) return(e) } # visual debug display using base graphics rmhsnoop <- local({ rmhsnoop <- function(..., Wsim, Wclip, R, xcoords, ycoords, mlevels, mcodes, irep, itype, proptype, proplocn, propmark, propindx, numerator, denominator) { trap.extra.arguments(..., .Context="In rmhsnoop") X <- ppp(xcoords, ycoords, window=Wsim) if(!missing(mlevels) && length(mlevels) > 0) marks(X) <- factor(mlevels[mcodes+1], levels=mlevels) Wclip.orig <- Wclip # determine plot arguments if(is.mask(Wclip)) { parg.Wclip <- list(invert=TRUE, col="grey") } else { Wclip <- edges(Wclip) parg.Wclip <- list(lty=3, lwd=2, col="grey") } parg.birth <- list(pch=16, cols="green") parg.death <- list(pch=4, cols="red", lwd=2) parg.birthcircle <- list(col="green", lty=3) parg.deathcircle <- list(col="red", lty=3) # assemble a layered object representing the state and the proposal if(is.null(proptype)) { # initial state L <- layered(Wsim, Wclip, X) layerplotargs(L)$Wclip <- parg.Wclip accepted <- TRUE } else { accepted <- (itype == proptype) # add proposal info switch(decode.proptype(proptype), Reject= { propname <- "rejected" L <- layered(Wsim=Wsim, Wclip=Wclip, X=X) layerplotargs(L)$Wclip <- parg.Wclip }, Birth = { propname <- "birth proposal" U <- ppp(proplocn[1], proplocn[2], window=Wsim) D <- if(is.finite(R) && R > 0) { edges(disc(R, proplocn))[Wsim] } else NULL L <- layered(Wsim=Wsim, Wclip=Wclip, PrevState=X, Reach=D, NewPoint=U) layerplotargs(L)$Wclip <- parg.Wclip layerplotargs(L)$NewPoint <- parg.birth }, Death = { propname <- "death proposal" # convert from C to R indexing propindx <- propindx + 1 XminI <- X[-propindx] XI <- X[propindx] D <- if(is.finite(R) && R > 0) { edges(disc(R, c(XI$x, XI$y)))[Wsim] } else NULL L <- layered(Wsim=Wsim, Wclip=Wclip, RetainedPoints=XminI, Reach=D, Deletion=XI) layerplotargs(L)$Wclip <- parg.Wclip layerplotargs(L)$Reach <- parg.deathcircle layerplotargs(L)$Deletion <- parg.death }, Shift = { propname <- "shift proposal" # convert from C to R indexing propindx <- propindx + 1 # make objects XminI <- X[-propindx] XI <- X[propindx] U <- ppp(proplocn[1], proplocn[2], window=Wsim) if(is.finite(R) && R > 0) { DU <- edges(disc(R, proplocn))[Wsim] DXI <- edges(disc(R, c(XI$x, XI$y)))[Wsim] } else { DU <- DXI <- NULL } # make layers L <- layered(Wsim=Wsim, Wclip=Wclip, OtherPoints=XminI, ReachAfter=DU, AfterShift=U, ReachBefore=DXI, BeforeShift=XI) layerplotargs(L)$Wclip <- parg.Wclip layerplotargs(L)$ReachAfter <- parg.birthcircle layerplotargs(L)$AfterShift <- parg.birth layerplotargs(L)$ReachBefore <- parg.deathcircle layerplotargs(L)$BeforeShift <- parg.death }, stop("Unrecognised proposal type") ) } header <- c(paste("Iteration", irep), propname, paste("Hastings ratio =", signif(numerator, 4), "/", signif(denominator, 4))) info <- list(irep=irep, Wsim=Wsim, Wclip=Wclip.orig, X=X, proptype=proptype, proplocn=proplocn, propindx=propindx, propmark=propmark, accepted=accepted, numerator=numerator, denominator=denominator) inspectProposal(L, info, title=header) } decode.proptype <- function(n) { if(n < 0 || n > 3) stop(paste("Unrecognised proposal type:", n)) switch(n+1, "Reject", "Birth", "Death", "Shift") } encode.proptype <- function(s) { switch(s, Reject=0, Birth=1, Death=2, Shift=3) } inspectProposal <- function(X, info, ..., title) { if(missing(title)) title <- short.deparse(substitute(X)) if(!inherits(X, "layered")) X <- layered(X) lnames <- names(X) if(sum(nzchar(lnames)) != length(X)) lnames <- paste("Layer", seq_len(length(X))) # Find window and bounding box (validates X) W <- as.owin(X) BX <- as.rectangle(W) # Initialise environment for state variables etc # This environment is accessible to the panel button functions en <- new.env() assign("X", X, envir=en) assign("W", W, envir=en) assign("BX", BX, envir=en) assign("zoomfactor", 1L, envir=en) midX <- unlist(centroid.owin(BX)) assign("midX", midX, envir=en) assign("zoomcentre", midX, envir=en) assign("irep", info$irep, envir=en) assign("inxt", info$irep+1, envir=en) assign("tnxt", -1, envir=en) assign("accepted", info$accepted, envir=en) assign("proplocn", info$proplocn, envir=en) assign("info", info, envir=en) # Build interactive panel # Start with data panel P <- simplepanel(title, BX, list(Data=BX), list(Data=dataclickfun), list(Data=dataredrawfun), snoopexit, en) # Add pan buttons margin <- max(sidelengths(BX))/4 panelwidth <- sidelengths(BX)[1]/2 P <- grow.simplepanel(P, "top", margin, navfuns["Up"], aspect=1) P <- grow.simplepanel(P, "bottom", margin, navfuns["Down"], aspect=1) P <- grow.simplepanel(P, "left", margin, navfuns["Left"], aspect=1) P <- grow.simplepanel(P, "right", margin, navfuns["Right"], aspect=1) # Zoom/Pan buttons at right P <- grow.simplepanel(P, "right", panelwidth, zoomfuns) # Accept/reject buttons at top P <- grow.simplepanel(P, "top", margin, accept.clicks, accept.redraws) # Dump/print buttons at bottom P <- grow.simplepanel(P, "bottom", margin, dumpfuns) # Jump controls at left maxchars <- max(4, nchar(names(jump.clicks))) P <- grow.simplepanel(P, "left", panelwidth * maxchars/6, jump.clicks) # go rslt <- run.simplepanel(P, popup=FALSE) clear.simplepanel(P) rm(en) return(rslt) } # button control functions zoomfuns <- rev(list( "Zoom In"=function(env, xy) { z <- get("zoomfactor", envir=env) assign("zoomfactor", z * 2, envir=env) return(TRUE) }, "Zoom Out"=function(env, xy) { z <- get("zoomfactor", envir=env) assign("zoomfactor", z / 2, envir=env) return(TRUE) }, "At Proposal"=function(env, xy) { proplocn <- get("proplocn", envir=env) assign("zoomcentre", proplocn, envir=env) return(TRUE) }, Reset=function(env, xy) { assign("zoomfactor", 1L, envir=env) midX <- get("midX", envir=env) assign("zoomcentre", midX, envir=env) return(TRUE) })) navfuns <- list( Left = function(env, xy) { zoom <- get("zoomfactor", envir=env) ce <- get("zoomcentre", envir=env) BX <- get("BX", envir=env) width <- sidelengths(BX)[1] stepsize <- (width/4)/zoom ce <- ce - c(stepsize, 0) assign("zoomcentre", ce, envir=env) return(TRUE) }, Right = function(env, xy) { zoom <- get("zoomfactor", envir=env) ce <- get("zoomcentre", envir=env) BX <- get("BX", envir=env) width <- sidelengths(BX)[1] stepsize <- (width/4)/zoom ce <- ce + c(stepsize, 0) assign("zoomcentre", ce, envir=env) return(TRUE) }, Up = function(env, xy) { zoom <- get("zoomfactor", envir=env) ce <- get("zoomcentre", envir=env) BX <- get("BX", envir=env) height <- sidelengths(BX)[2] stepsize <- (height/4)/zoom ce <- ce + c(0, stepsize) assign("zoomcentre", ce, envir=env) return(TRUE) }, Down = function(env, xy) { zoom <- get("zoomfactor", envir=env) ce <- get("zoomcentre", envir=env) BX <- get("BX", envir=env) height <- sidelengths(BX)[2] stepsize <- (height/4)/zoom ce <- ce - c(0, stepsize) assign("zoomcentre", ce, envir=env) return(TRUE) }) accept.clicks <- rev(list( Accept=function(env, xy) { assign("accepted", TRUE, envir=env) return(TRUE) }, Reject=function(env, xy) { assign("accepted", FALSE, envir=env) return(TRUE) })) accept.redraws <- rev(list( Accept=function(button, name, env) { accepted <- get("accepted", envir=env) if(accepted) { plot(button, add=TRUE, col="green") } else { plot(button, add=TRUE) } text(centroid.owin(button), labels=name) }, Reject=function(button, name, env) { accepted <- get("accepted", envir=env) if(accepted) { plot(button, add=TRUE) } else { plot(button, add=TRUE, col="pink") } text(centroid.owin(button), labels=name) })) jump.clicks <- rev(list( "Next Iteration"=function(env, xy) { irep <- get("irep", envir=env) assign("inxt", irep+1, envir=env) return(FALSE) }, "Skip 10"=function(env, xy) { irep <- get("irep", envir=env) assign("inxt", irep+10, envir=env) return(FALSE) }, "Skip 100"=function(env, xy) { irep <- get("irep", envir=env) assign("inxt", irep+100, envir=env) return(FALSE) }, "Skip 1000"=function(env, xy) { irep <- get("irep", envir=env) assign("inxt", irep+1000, envir=env) return(FALSE) }, "Skip 10,000"=function(env, xy) { irep <- get("irep", envir=env) assign("inxt", irep+10000, envir=env) return(FALSE) }, "Skip 100,000"=function(env, xy) { irep <- get("irep", envir=env) assign("inxt", irep+100000, envir=env) return(FALSE) }, "Next Birth"=function(env, xy) { assign("inxt", -1, envir=env) assign("tnxt", encode.proptype("Birth"), envir=env) return(FALSE) }, "Next Death"=function(env, xy) { assign("inxt", -1, envir=env) assign("tnxt", encode.proptype("Death"), envir=env) return(FALSE) }, "Next Shift"=function(env, xy) { assign("inxt", -1, envir=env) assign("tnxt", encode.proptype("Shift"), envir=env) return(FALSE) }, "Exit Debugger"=function(env, xy) { assign("inxt", -1L, envir=env) return(FALSE) })) dataclickfun <- function(env, xy) { # function for handling clicks in the data window z <- get("zoomfactor", envir=env) ce <- get("zoomcentre", envir=env) midX <- get("midX", envir=env) ce <- ce + (unlist(xy) - midX)/z assign("zoomcentre", ce, envir=env) return(TRUE) } dataredrawfun <- function(button, name, env) { # redraw data window X <- get("X", envir=env) BX <- get("BX", envir=env) W <- get("W", envir=env) midX <- get("midX", envir=env) z <- get("zoomfactor", envir=env) ce <- get("zoomcentre", envir=env) scaleX <- shift(affine(shift(X, -ce), diag(c(z,z))), unlist(midX)) scaleW <- shift(affine(shift(W, -ce), diag(c(z,z))), unlist(midX)) scaleX <- scaleX[, BX] scaleW <- intersect.owin(scaleW, BX, fatal=FALSE) # redraw data in 'BX' if(!is.null(scaleW)) { if(z == 1 && is.rectangle(scaleW)) { plot(scaleW, add=TRUE, lwd=2) } else { plot(BX, add=TRUE, lty=3, border="red") if(!identical(BX, scaleW)) plot(scaleW, add=TRUE, invert=TRUE) } } if(!is.null(scaleX)) plot(scaleX, add=TRUE) invisible(NULL) } # functions to dump the current state, etc dumpfuns <- list( "Dump to file"=function(env, xy) { irep <- get("irep", envir=env) X <- get("X", envir=env) xname <- paste("dump", irep, sep="") assign(xname, X) fname <- paste(xname, ".rda", sep="") eval(substitute(save(x, file=y, compress=TRUE), list(x=xname, y=fname))) cat(paste("Saved to", sQuote(fname), "\n")) return(TRUE) }, "Print Info"=function(env, xy) { info <- get("info", envir=env) will.accept <- get("accepted", envir=env) with(info, { cat(paste("Iteration", irep, "\n")) cat("Simulation window:\n") print(Wsim) cat("Clipping window:\n") print(Wclip) cat("Current state:\n") print(X) propname <- decode.proptype(proptype) cat(paste("Proposal type:", propname, "\n")) prxy <- function(z) paren(paste(z, collapse=", ")) switch(propname, Reject = { }, Birth = { cat(paste("Birth of new point at location", prxy(proplocn), "\n")) }, Death = { Xi <- X[propindx] cat(paste("Death of data point", propindx, "located at", prxy(as.numeric(coords(Xi))), "\n")) }, Shift = { Xi <- X[propindx] cat(paste("Shift data point", propindx, "from current location", prxy(as.numeric(coords(Xi))), "to new location", prxy(proplocn), "\n")) }) cat(paste("Hastings ratio = ", numerator, "/", denominator, "=", numerator/denominator, "\n")) cat(paste("Fate of proposal:", if(will.accept) "Accepted" else "Rejected", "\n")) return(TRUE) }) }) # function to determine return value snoopexit <- function(env) { ans <- eval(quote(list(inxt=inxt, tnxt=tnxt, accepted=accepted)), envir=env) return(ans) } testit <- function() { rmhsnoop(Wsim=owin(), Wclip=square(0.7), R=0.1, xcoords=runif(40), ycoords=runif(40), mlevels=NULL, mcodes=NULL, irep=3, itype=1, proptype=1, proplocn=c(0.5, 0.5), propmark=0, propindx=0, numerator=42, denominator=24) } rmhsnoop }) spatstat/R/fiksel.R0000755000176200001440000001343613115271075013732 0ustar liggesusers# # # fiksel.R # # $Revision: 1.14 $ $Date: 2017/06/05 10:31:58 $ # # Fiksel interaction # # ee Stoyan Kendall Mcke 1987 p 161 # # ------------------------------------------------------------------- # Fiksel <- local({ # ......... auxiliary functions ........... fikselterms <- function(U, X, r, kappa, EqualPairs=NULL) { answer <- crossfikselterms(U, X, r, kappa) nU <- npoints(U) # subtract contrinbutions from identical pairs (exp(-0) = 1 for each) if(length(EqualPairs) > 0) { idcount <- as.integer(table(factor(EqualPairs[,2L], levels=1:nU))) answer <- answer - idcount } return(answer) } crossfikselterms <- function(X, Y, r, kappa) { stopifnot(is.numeric(r)) # sort in increasing order of x coordinate oX <- fave.order(X$x) oY <- fave.order(Y$x) Xsort <- X[oX] Ysort <- Y[oY] nX <- npoints(X) nY <- npoints(Y) # call C routine out <- .C("Efiksel", nnsource = as.integer(nX), xsource = as.double(Xsort$x), ysource = as.double(Xsort$y), nntarget = as.integer(nY), xtarget = as.double(Ysort$x), ytarget = as.double(Ysort$y), rrmax = as.double(r), kkappa = as.double(kappa), values = as.double(double(nX)), PACKAGE = "spatstat") answer <- integer(nX) answer[oX] <- out$values return(answer) } # ........ template object .............. BlankFiksel <- list( name = "Fiksel process", creator = "Fiksel", family = "pairwise.family", # evaluated later pot = function(d, par) { v <- (d <= par$r) * exp( - d * par$kappa) v[ d <= par$hc ] <- (-Inf) v }, par = list(r = NULL, hc = NULL, kappa=NULL), # filled in later parnames = c("interaction distance", "hard core distance", "rate parameter"), selfstart = function(X, self) { # self starter for Fiksel nX <- npoints(X) if(nX < 2) { # not enough points to make any decisions return(self) } md <- minnndist(X) if(!is.na(hc <- self$par$hc)) { # value fixed by user or previous invocation # check it if(md < hc) warning(paste("Hard core distance is too large;", "some data points will have zero probability")) return(self) } if(md == 0) warning(paste("Pattern contains duplicated points:", "hard core must be zero")) # take hc = minimum interpoint distance * n/(n+1) hcX <- md * nX/(nX+1) Fiksel(r=self$par$r, hc = hcX, kappa=self$par$kappa) }, init = function(self) { r <- self$par$r hc <- self$par$hc kappa <- self$par$kappa check.1.real(r) check.1.real(kappa) if(!is.na(hc)) { check.1.real(hc) stopifnot(hc > 0) stopifnot(r > hc) } else stopifnot(r > 0) }, update = NULL, # default OK print = NULL, # default OK interpret = function(coeffs, self) { a <- as.numeric(coeffs[1L]) return(list(param=list(a=a), inames="interaction strength a", printable=signif(a))) }, valid = function(coeffs, self) { a <- (self$interpret)(coeffs, self)$param$a return(is.finite(a)) }, project = function(coeffs, self) { if((self$valid)(coeffs, self)) return(NULL) hc <- self$par$hc if(hc > 0) return(Hardcore(hc)) else return(Poisson()) }, irange = function(self, coeffs=NA, epsilon=0, ...) { r <- self$par$r hc <- self$par$hc if(anyNA(coeffs)) return(r) a <- coeffs[1L] if(abs(a) <= epsilon) return(hc) else return(r) }, version=NULL, # evaluated later # fast evaluation is available for the border correction only can.do.fast=function(X,correction,par) { return(all(correction %in% c("border", "none"))) }, fasteval=function(X,U,EqualPairs,pairpot,potpars,correction, ...) { # fast evaluator for Fiksel interaction if(!all(correction %in% c("border", "none"))) return(NULL) if(spatstat.options("fasteval") == "test") message("Using fast eval for Fiksel") r <- potpars$r hc <- potpars$hc kappa <- potpars$kappa hclose <- strausscounts(U, X, hc, EqualPairs) fikselbit <- fikselterms(U, X, r, kappa, EqualPairs) answer <- ifelseXB(hclose == 0, fikselbit, -Inf) return(matrix(answer, ncol=1)) }, Mayer=function(coeffs, self) { # second Mayer cluster integral a <- as.numeric(coeffs[1L]) r <- self$par$r hc <- self$par$hc kappa <- self$par$kappa f <- function(x, kappa, a){ 2 * pi * x * (1 - exp(a * exp(-x * kappa))) } hardbit <- integrate(f=f, lower=hc, upper=r, a=a, kappa=kappa) mess <- hardbit[["message"]] if(!identical(mess, "OK")) { warning(mess) return(NA) } return(pi * hc^2 + hardbit$value) } ) class(BlankFiksel) <- "interact" Fiksel <- function(r, hc=NA, kappa) { instantiate.interact(BlankFiksel, list(r = r, hc = hc, kappa=kappa)) } Fiksel <- intermaker(Fiksel, BlankFiksel) Fiksel }) spatstat/R/fitted.ppm.R0000755000176200001440000001072013115271075014520 0ustar liggesusers# # fitted.ppm.R # # method for 'fitted' for ppm objects # # $Revision: 1.17 $ $Date: 2017/06/05 10:31:58 $ # fitted.ppm <- function(object, ..., type="lambda", dataonly=FALSE, new.coef=NULL, leaveoneout=FALSE, drop=FALSE, check=TRUE, repair=TRUE, dropcoef=FALSE) { verifyclass(object, "ppm") if(check && damaged.ppm(object)) { if(!repair) stop("object format corrupted; try update(object, use.internal=TRUE)") message("object format corrupted; repairing it.") object <- update(object, use.internal=TRUE) } if(leaveoneout) { ## Leave-one-out calculation for data points only if(missing(dataonly)) dataonly <- TRUE if(!dataonly) stop("Leave-one-out calculation requires dataonly=TRUE") if(!is.null(new.coef)) stop("Leave-one-out calculation requires new.coef=NULL") } coeffs <- adaptcoef(new.coef, coef(object), drop=dropcoef) uniform <- is.poisson.ppm(object) && no.trend.ppm(object) typelist <- c("lambda", "cif", "trend", "link") typevalu <- c("lambda", "lambda", "trend", "link") if(is.na(m <- pmatch(type, typelist))) stop(paste("Unrecognised choice of ", sQuote("type"), ": ", sQuote(type), sep="")) type <- typevalu[m] if(uniform) { lambda <- exp(coeffs[[1L]]) Q <- quad.ppm(object, drop=drop) lambda <- rep.int(lambda, n.quad(Q)) } else { glmdata <- getglmdata(object, drop=drop) glmfit <- getglmfit(object) Vnames <- object$internal$Vnames interacting <- (length(Vnames) != 0) # Modification of `glmdata' may be required if(interacting) switch(type, trend={ # zero the interaction statistics glmdata[ , Vnames] <- 0 }, link=, lambda={ # Find any dummy points with zero conditional intensity forbid <- matrowany(as.matrix(glmdata[, Vnames]) == -Inf) # exclude from predict.glm glmdata <- glmdata[!forbid, ] }) # Compute predicted [conditional] intensity values changecoef <- !is.null(new.coef) || (object$method != "mpl") lambda <- GLMpredict(glmfit, glmdata, coeffs, changecoef=changecoef, type = ifelse(type == "link", "link", "response")) # Note: the `newdata' argument is necessary in order to obtain # predictions at all quadrature points. If it is omitted then # we would only get predictions at the quadrature points j # where glmdata$SUBSET[j]=TRUE. Assuming drop=FALSE. if(interacting && type=="lambda") { # reinsert zeroes lam <- numeric(length(forbid)) lam[forbid] <- 0 lam[!forbid] <- lambda lambda <- lam } } if(dataonly) lambda <- lambda[is.data(quad.ppm(object))] if(leaveoneout) { ## Perform leverage calculation dfb <- dfbetas(object, multitypeOK=TRUE) delta <- with(dfb, 'discrete')[with(dfb, 'is.atom'),,drop=FALSE] ## adjust fitted value mom <- model.matrix(object)[is.data(quad.ppm(object)),,drop=FALSE] if(type == "trend" && !uniform && interacting) mom[, Vnames] <- 0 lambda <- lambda * exp(- rowSums(delta * mom)) } lambda <- unname(as.vector(lambda)) return(lambda) } adaptcoef <- function(new.coef, fitcoef, drop=FALSE) { ## a replacement for 'fitcoef' will be extracted from 'new.coef' if(is.null(new.coef)) { coeffs <- fitcoef } else if(length(new.coef) == length(fitcoef)) { coeffs <- new.coef } else { fitnames <- names(fitcoef) newnames <- names(new.coef) if(is.null(newnames) || is.null(fitnames)) stop(paste("Argument new.coef has wrong length", length(new.coef), ": should be", length(fitcoef)), call.=FALSE) absentnames <- setdiff(fitnames, newnames) excessnames <- setdiff(newnames, fitnames) if((nab <- length(absentnames)) > 0) stop(paste(ngettext(nab, "Coefficient", "Coefficients"), commasep(sQuote(absentnames)), ngettext(nab, "is", "are"), "missing from new.coef"), call.=FALSE) if(!drop && ((nex <- length(excessnames)) > 0)) stop(paste(ngettext(nex, "Coefficient", "Coefficients"), commasep(sQuote(excessnames)), ngettext(nab, "is", "are"), "present in new.coef but not in coef(object)"), call.=FALSE) #' extract only the relevant coefficients coeffs <- new.coef[fitnames] } return(coeffs) } spatstat/R/rmhtemper.R0000644000176200001440000000366413115225157014457 0ustar liggesusers#' #' rmhtemper.R #' #' $Revision: 1.3 $ $Date: 2015/10/21 09:06:57 $ #' reheat <- local({ expon <- function(x, alpha) { if(is.null(x)) return(NULL) if(is.numeric(x)) return(x^alpha) if(is.im(x)) return(x^alpha) if(is.function(x)) { f <- x g <- function(...) { f(...)^alpha } if(!inherits(f, "funxy")) return(g) return(funxy(g, W=as.owin(f))) } if(is.list(x)) return(lapply(x, expon)) stop("Unrecognised format for x in x^alpha", call.=FALSE) } reheat <- function(model, invtemp) { model <- rmhmodel(model) cif <- model$cif par <- model$par w <- model$w trend <- model$trend types <- model$types newtrend <- expon(trend, invtemp) rules <- lapply(cif, spatstatRmhInfo) temperfuns <- lapply(rules, getElement, name="temper") if(any(bad <- sapply(temperfuns, is.null))) stop(paste("reheating the", commasep(sQuote(cif[bad])), ngettext(sum(bad), "cif", "cifs"), "is not supported")) Ncif <- length(cif) if(Ncif == 1) { newpar <- temperfuns[[1]](par, invtemp) } else { newpar <- par for(i in 1:Ncif) newpar[[i]] <- temperfuns[[i]](par[[i]], invtemp) } newmodel <- rmhmodel(cif=cif, par=newpar, trend=newtrend, w=w, types=types) return(newmodel) } reheat }) rtemper <- function(model, invtemp, nrep, ..., start=NULL, verbose=FALSE){ df <- data.frame(invtemp, nrep) ndf <- nrow(df) X <- NULL for(i in 1:ndf) { if(verbose) cat(paste("Running", nrep[i], "steps", "at inverse temperature", invtemp[i], "... ")) model.i <- reheat(model, invtemp[i]) X <- rmh(model.i, nrep=nrep[i], ..., start=start, overrideXstart = X, overrideclip = (i != ndf), verbose=FALSE) if(verbose) cat(" Done.\n") } return(X) } spatstat/R/density.psp.R0000755000176200001440000000437413115271075014736 0ustar liggesusers# # # density.psp.R # # $Revision: 1.9 $ $Date: 2017/06/05 10:31:58 $ # # density.psp <- function(x, sigma, ..., edge=TRUE, method=c("FFT", "C", "interpreted")) { verifyclass(x, "psp") method <- match.arg(method) w <- x$window n <- x$n if(missing(sigma)) sigma <- 0.1 * diameter(w) w <- as.mask(w, ...) len <- lengths.psp(x) if(n == 0 || all(len == 0)) return(as.im(0, w)) # ang <- angles.psp(x, directed=TRUE) xy <- rasterxy.mask(w) xx <- xy$x yy <- xy$y switch(method, interpreted = { #' compute matrix contribution from each segment coz <- cos(ang) zin <- sin(ang) for(i in seq_len(n)) { en <- x$ends[i,] dx <- xx - en$x0 dy <- yy - en$y0 u1 <- dx * coz[i] + dy * zin[i] u2 <- - dx * zin[i] + dy * coz[i] value <- dnorm(u2, sd=sigma) * (pnorm(u1, sd=sigma) - pnorm(u1-len[i], sd=sigma)) totvalue <- if(i == 1L) value else (value + totvalue) } dens <- im(totvalue, w$xcol, w$yrow) }, C = { #' C implementation of the above xs <- x$ends$x0 ys <- x$ends$y0 xp <- as.numeric(as.vector(xx)) yp <- as.numeric(as.vector(yy)) np <- length(xp) z <- .C("segdens", sigma = as.double(sigma), ns = as.integer(n), xs = as.double(xs), ys = as.double(ys), alps = as.double(ang), lens = as.double(len), np = as.integer(np), xp = as.double(xp), yp = as.double(yp), z = as.double(numeric(np)), PACKAGE = "spatstat") dens <- im(z$z, w$xcol, w$yrow) }, FFT = { L <- pixellate(x, ...) L <- L/with(L, xstep * ystep) dens <- blur(L, sigma, normalise=edge, bleed=FALSE) }) unitname(dens) <- unitname(x) if(edge && method != "FFT") { edg <- second.moment.calc(midpoints.psp(x), sigma, what="edge", ...) dens <- eval.im(dens/edg) } dens <- dens[x$window, drop=FALSE] return(dens) } spatstat/R/plot.fv.R0000755000176200001440000006575513115271120014047 0ustar liggesusers# # plot.fv.R (was: conspire.S) # # $Revision: 1.128 $ $Date: 2016/12/30 01:44:07 $ # # conspire <- function(...) { .Deprecated("plot.fv", package="spatstat") plot.fv(...) } plot.fv <- local({ hasonlyone <- function(x, amongst) { sum(all.vars(parse(text=x)) %in% amongst) == 1 } extendifvector <- function(a, n, nmore) { if(is.null(a)) return(a) if(length(a) == 1) return(a) return(c(a, rep(a[1], nmore))) } fixit <- function(a, n, a0, a00) { # 'a' is formal argument # 'a0' and 'a00' are default and fallback default # 'n' is number of values required if(is.null(a)) a <- if(!is.null(a0)) a0 else a00 if(length(a) == 1) return(rep.int(a, n)) else if(length(a) != n) stop(paste("Length of", short.deparse(substitute(a)), "does not match number of curves to be plotted")) else return(a) } pow10 <- function(x) { 10^x } clip.to.usr <- function() { usr <- par('usr') clip(usr[1], usr[2], usr[3], usr[4]) } plot.fv <- function(x, fmla, ..., subset=NULL, lty=NULL, col=NULL, lwd=NULL, xlim=NULL, ylim=NULL, xlab=NULL, ylab=NULL, ylim.covers=NULL, legend=!add, legendpos="topleft", legendavoid=missing(legendpos), legendmath=TRUE, legendargs=list(), shade=fvnames(x, ".s"), shadecol="grey", add=FALSE, log="", mathfont=c("italic", "plain", "bold", "bolditalic"), limitsonly=FALSE) { xname <- if(is.language(substitute(x))) short.deparse(substitute(x)) else "" force(legendavoid) if(is.null(legend)) legend <- !add mathfont <- match.arg(mathfont) verifyclass(x, "fv") env.user <- parent.frame() indata <- as.data.frame(x) xlogscale <- (log %in% c("x", "xy", "yx")) ylogscale <- (log %in% c("y", "xy", "yx")) ## ---------------- determine plot formula ---------------- defaultplot <- missing(fmla) || is.null(fmla) if(defaultplot) fmla <- formula(x) ## This *is* the last possible moment, so... fmla <- as.formula(fmla, env=env.user) ## validate the variable names vars <- variablesinformula(fmla) reserved <- c(".", ".x", ".y", ".a", ".s") external <- !(vars %in% c(colnames(x), reserved)) if(any(external)) { sought <- vars[external] found <- unlist(lapply(sought, exists, envir=env.user, mode="numeric")) if(any(!found)) { nnot <- sum(!found) stop(paste(ngettext(nnot, "Variable", "Variables"), commasep(sQuote(sought[!found])), ngettext(nnot, "was", "were"), "not found")) } else { ## validate the found variables externvars <- lapply(sought, get, envir=env.user) isnum <- sapply(externvars, is.numeric) len <- lengths(externvars) ok <- isnum & (len == 1 | len == nrow(x)) if(!all(ok)) { nnot <- sum(!ok) stop(paste(ngettext(nnot, "Variable", "Variables"), commasep(sQuote(sought[!ok])), ngettext(nnot, "is", "are"), "not of the right format")) } } } ## Extract left hand side as given # lhs.original <- fmla[[2]] fmla.original <- fmla ## expand "." dotnames <- fvnames(x, ".") starnames <- fvnames(x, "*") umap <- fvexprmap(x) fmla <- eval(substitute(substitute(fom, um), list(fom=fmla, um=umap))) ## ------------------- extract data for plot --------------------- ## extract LHS and RHS of formula lhs <- fmla[[2]] rhs <- fmla[[3]] ## extract data lhsdata <- eval(lhs, envir=indata) rhsdata <- eval(rhs, envir=indata) ## reformat if(is.vector(lhsdata)) { lhsdata <- matrix(lhsdata, ncol=1) lhsvars <- all.vars(as.expression(lhs)) lhsvars <- lhsvars[lhsvars %in% names(x)] colnames(lhsdata) <- if(length(lhsvars) == 1) lhsvars else if(length(starnames) == 1 && (starnames %in% lhsvars)) starnames else paste(deparse(lhs), collapse="") } ## check lhs names exist lnames <- colnames(lhsdata) nc <- ncol(lhsdata) lnames0 <- paste("V", seq_len(nc), sep="") if(length(lnames) != nc) colnames(lhsdata) <- lnames0 else if(any(uhoh <- !nzchar(lnames))) colnames(lhsdata)[uhoh] <- lnames0[uhoh] lhs.names <- colnames(lhsdata) ## check whether each lhs column is associated with a single column of 'x' ## that is one of the alternative versions of the function. ## This may be unreliable, as it depends on the ## column names assigned to lhsdata by eval() one.star <- unlist(lapply(lhs.names, hasonlyone, amongst=fvnames(x, "*"))) one.dot <- unlist(lapply(lhs.names, hasonlyone, amongst=dotnames)) explicit.lhs.names <- ifelse(one.star, lhs.names, "") explicit.lhs.dotnames <- ifelse(one.star & one.dot, lhs.names, "") ## check rhs data if(is.matrix(rhsdata)) stop("rhs of formula should yield a vector") rhsdata <- as.numeric(rhsdata) nplots <- ncol(lhsdata) allind <- 1:nplots ## ---------- extra plots may be implied by 'shade' ----------------- extrashadevars <- NULL if(!is.null(shade)) { ## select columns by name or number names(allind) <- explicit.lhs.names shind <- try(allind[shade]) if(inherits(shind, "try-error")) stop(paste("The argument shade should be a valid subset index", "for columns of x"), call.=FALSE) if(any(nbg <- is.na(shind))) { ## columns not included in formula: add them morelhs <- try(as.matrix(indata[ , shade[nbg], drop=FALSE])) if(inherits(morelhs, "try-error")) stop(paste("The argument shade should be a valid subset index", "for columns of x"), call.=FALSE) nmore <- ncol(morelhs) extrashadevars <- colnames(morelhs) if(defaultplot) { success <- TRUE } else if("." %in% variablesinformula(fmla.original)) { ## evaluate lhs of formula, expanding "." to shade names u <- if(length(extrashadevars) == 1) as.name(extrashadevars) else { as.call(lapply(c("cbind", extrashadevars), as.name)) } ux <- as.name(fvnames(x, ".x")) uy <- as.name(fvnames(x, ".y")) foo <- eval(substitute(substitute(fom, list(.=u, .x=ux, .y=uy)), list(fom=fmla.original))) dont.complain.about(u, ux, uy) lhsnew <- foo[[2]] morelhs <- eval(lhsnew, envir=indata) success <- identical(colnames(morelhs), extrashadevars) } else if(is.name(lhs) && as.character(lhs) %in% names(indata)) { ## lhs is the name of a single column in x ## expand the LHS explicit.lhs.names <- c(explicit.lhs.names, extrashadevars) ff <- paste("cbind", paren(paste(explicit.lhs.names, collapse=", ")), "~ 1") lhs <- lhs.of.formula(as.formula(ff)) success <- TRUE } else if(length(explicit.lhs.dotnames) > 1) { ## lhs = cbind(...) where ... are dotnames cbound <- paste0("cbind", paren(paste(explicit.lhs.dotnames, collapse=", "))) if(identical(deparse(lhs), cbound)) { success <- TRUE explicit.lhs.names <- union(explicit.lhs.names, extrashadevars) ff <- paste("cbind", paren(paste(explicit.lhs.names, collapse=", ")), "~ 1") lhs <- lhs.of.formula(as.formula(ff)) } else success <- FALSE } else success <- FALSE if(success) { ## add these columns to the plotting data lhsdata <- cbind(lhsdata, morelhs) shind[nbg] <- nplots + seq_len(nmore) lty <- extendifvector(lty, nplots, nmore) col <- extendifvector(col, nplots, nmore) lwd <- extendifvector(lwd, nplots, nmore) nplots <- nplots + nmore ## update the names one.star <- unlist(lapply(explicit.lhs.names, hasonlyone, amongst=fvnames(x, "*"))) one.dot <- unlist(lapply(explicit.lhs.names, hasonlyone, amongst=dotnames)) explicit.lhs.names <- ifelse(one.star, explicit.lhs.names, "") explicit.lhs.dotnames <- ifelse(one.star & one.dot, explicit.lhs.names, "") } else { ## cannot add columns warning(paste("Shade", ngettext(sum(nbg), "column", "columns"), commasep(sQuote(shade[nbg])), "were missing from the plot formula, and were omitted")) shade <- NULL extrashadevars <- NULL } } } ## -------------------- determine plotting limits ---------------------- ## restrict data to subset if desired if(!is.null(subset)) { keep <- if(is.character(subset)) { eval(parse(text=subset), envir=indata) } else eval(subset, envir=indata) lhsdata <- lhsdata[keep, , drop=FALSE] rhsdata <- rhsdata[keep] } ## determine x and y limits and clip data to these limits if(is.null(xlim) && add) { ## x limits are determined by existing plot xlim <- par("usr")[1:2] } if(!is.null(xlim)) { ok <- !is.finite(rhsdata) | (xlim[1] <= rhsdata & rhsdata <= xlim[2]) rhsdata <- rhsdata[ok] lhsdata <- lhsdata[ok, , drop=FALSE] } else { ## if we're using the default argument, use its recommended range if(rhs == fvnames(x, ".x")) { xlim <- attr(x, "alim") %orifnull% range(as.vector(rhsdata), finite=TRUE) if(xlogscale && xlim[1] <= 0) xlim[1] <- min(rhsdata[is.finite(rhsdata) & rhsdata > 0], na.rm=TRUE) ok <- !is.finite(rhsdata) | (rhsdata >= xlim[1] & rhsdata <= xlim[2]) rhsdata <- rhsdata[ok] lhsdata <- lhsdata[ok, , drop=FALSE] } else { ## actual range of values to be plotted if(xlogscale) { ok <- is.finite(rhsdata) & (rhsdata > 0) & matrowany(lhsdata > 0) xlim <- range(rhsdata[ok]) } else { xlim <- range(rhsdata, na.rm=TRUE) } } } if(is.null(ylim)) { yok <- is.finite(lhsdata) if(ylogscale) yok <- yok & (lhsdata > 0) ylim <- range(lhsdata[yok],na.rm=TRUE) } if(!is.null(ylim.covers)) ylim <- range(ylim, ylim.covers) ## return x, y limits only? if(limitsonly) return(list(xlim=xlim, ylim=ylim)) ## ------------- work out how to label the plot -------------------- ## extract plot labels, substituting function name labl <- fvlabels(x, expand=TRUE) ## create plot label map (key -> algebraic expression) map <- fvlabelmap(x) ## ......... label for x axis .................. if(is.null(xlab)) { argname <- fvnames(x, ".x") if(as.character(fmla)[3] == argname) { ## The x axis variable is the default function argument. ArgString <- fvlabels(x, expand=TRUE)[[argname]] xexpr <- parse(text=ArgString) ## use specified font xexpr <- fontify(xexpr, mathfont) ## Add name of unit of length? ax <- summary(unitname(x))$axis if(is.null(ax)) { xlab <- xexpr } else { xlab <- expression(VAR ~ COMMENT) xlab[[1]][[2]] <- xexpr[[1]] xlab[[1]][[3]] <- ax } } else { ## map ident to label xlab <- eval(substitute(substitute(rh, mp), list(rh=rhs, mp=map))) ## use specified font xlab <- fontify(xlab, mathfont) } } if(is.language(xlab) && !is.expression(xlab)) xlab <- as.expression(xlab) ## ......... label for y axis ................... leftside <- lhs if(ncol(lhsdata) > 1 || length(dotnames) == 1) { ## For labelling purposes only, simplify the LHS by ## replacing 'cbind(.....)' by '.' ## even if not all columns are included. leftside <- paste(as.expression(leftside)) eln <- explicit.lhs.dotnames eln <- eln[nzchar(eln)] cb <- if(length(eln) == 1) eln else { paste("cbind(", paste(eln, collapse=", "), ")", sep="") } compactleftside <- gsub(cb, ".", leftside, fixed=TRUE) ## Separately expand "." to cbind(.....) ## and ".x", ".y" to their real names dotdot <- c(dotnames, extrashadevars) cball <- if(length(dotdot) == 1) dotdot else { paste("cbind(", paste(dotdot, collapse=", "), ")", sep="") } expandleftside <- gsub(".x", fvnames(x, ".x"), leftside, fixed=TRUE) expandleftside <- gsub(".y", fvnames(x, ".y"), expandleftside, fixed=TRUE) expandleftside <- gsubdot(cball, expandleftside) ## convert back to language compactleftside <- parse(text=compactleftside)[[1]] expandleftside <- parse(text=expandleftside)[[1]] } else { compactleftside <- expandleftside <- leftside } ## construct label for y axis if(is.null(ylab)) { yl <- attr(x, "yexp") if(defaultplot && !is.null(yl)) { ylab <- yl } else { ## replace "." and short identifiers by plot labels ylab <- eval(substitute(substitute(le, mp), list(le=compactleftside, mp=map))) } } if(is.language(ylab)) { ## use specified font ylab <- fontify(ylab, mathfont) ## ensure it's an expression if(!is.expression(ylab)) ylab <- as.expression(ylab) } ## ------------------ start plotting --------------------------- ## create new plot if(!add) do.call(plot.default, resolve.defaults(list(xlim, ylim, type="n", log=log), list(xlab=xlab, ylab=ylab), list(...), list(main=xname))) ## handle 'type' = "n" giventype <- resolve.defaults(list(...), list(type=NA))$type if(identical(giventype, "n")) return(invisible(NULL)) ## process lty, col, lwd arguments opt0 <- spatstat.options("par.fv") lty <- fixit(lty, nplots, opt0$lty, 1:nplots) col <- fixit(col, nplots, opt0$col, 1:nplots) lwd <- fixit(lwd, nplots, opt0$lwd, 1) ## convert to greyscale? if(spatstat.options("monochrome")) col <- to.grey(col) if(!is.null(shade)) { ## shade region between critical boundaries ## extract relevant columns for shaded bands shdata <- lhsdata[, shind] if(!is.matrix(shdata) || ncol(shdata) != 2) stop("The argument shade should select two columns of x") ## truncate infinite values to plot limits if(any(isinf <- is.infinite(shdata))) { if(is.null(ylim)) { warning("Unable to truncate infinite values to the plot area") } else { shdata[isinf & (shdata == Inf)] <- ylim[2] shdata[isinf & (shdata == -Inf)] <- ylim[1] } } ## determine limits of shading shdata1 <- shdata[,1] shdata2 <- shdata[,2] ## plot grey polygon xpoly <- c(rhsdata, rev(rhsdata)) ypoly <- c(shdata1, rev(shdata2)) miss1 <- !is.finite(shdata1) miss2 <- !is.finite(shdata2) if(!any(broken <- (miss1 | miss2))) { ## single polygon clip.to.usr() polygon(xpoly, ypoly, border=shadecol, col=shadecol) } else { ## interrupted dat <- data.frame(rhsdata=rhsdata, shdata1=shdata1, shdata2=shdata2) serial <- cumsum(broken) lapply(split(dat, serial), function(z) { with(z, { xp <- c(rhsdata, rev(rhsdata)) yp <- c(shdata1, rev(shdata2)) clip.to.usr() polygon(xp, yp, border=shadecol, col=shadecol) }) }) ## save for use in placing legend okp <- !c(broken, rev(broken)) xpoly <- xpoly[okp] ypoly <- ypoly[okp] } ## overwrite graphical parameters lty[shind] <- 1 ## try to preserve the same type of colour specification if(is.character(col) && is.character(shadecol)) { ## character representations col[shind] <- shadecol } else if(is.numeric(col) && !is.na(sc <- paletteindex(shadecol))) { ## indices in colour palette col[shind] <- sc } else { ## convert colours to hexadecimal and edit relevant values col <- col2hex(col) col[shind] <- col2hex(shadecol) } ## remove these columns from further plotting allind <- allind[-shind] ## } else xpoly <- ypoly <- numeric(0) ## ----------------- plot lines ------------------------------ for(i in allind) { clip.to.usr() lines(rhsdata, lhsdata[,i], lty=lty[i], col=col[i], lwd=lwd[i]) } if(nplots == 1) return(invisible(NULL)) ## ---------------- determine legend ------------------------- key <- colnames(lhsdata) mat <- match(key, names(x)) keyok <- !is.na(mat) matok <- mat[keyok] legdesc <- rep.int("constructed variable", length(key)) legdesc[keyok] <- attr(x, "desc")[matok] leglabl <- lnames0 leglabl[keyok] <- labl[matok] ylab <- attr(x, "ylab") if(!is.null(ylab)) { if(is.language(ylab)) ylab <- flat.deparse(ylab) legdesc <- sprintf(legdesc, ylab) } ## compute legend info legtxt <- key if(legendmath) { legtxt <- leglabl if(defaultplot) { ## try to convert individual labels to expressions fancy <- try(parse(text=leglabl), silent=TRUE) if(!inherits(fancy, "try-error")) legtxt <- fancy } else { ## try to navigate the parse tree fancy <- try(fvlegend(x, expandleftside), silent=TRUE) if(!inherits(fancy, "try-error")) legtxt <- fancy } } if(is.expression(legtxt) || is.language(legtxt) || all(sapply(legtxt, is.language))) legtxt <- fontify(legtxt, mathfont) ## --------------- handle legend plotting ----------------------------- if(identical(legend, TRUE)) { ## legend will be plotted ## Basic parameters of legend legendxpref <- if(identical(legendpos, "float")) NULL else legendpos optparfv <- spatstat.options("par.fv")$legendargs %orifnull% list() legendspec <- resolve.defaults(legendargs, list(lty=lty, col=col, lwd=lwd), optparfv, list(x=legendxpref, legend=legtxt, inset=0.05, y.intersp=if(legendmath) 1.3 else 1), .StripNull=TRUE) tB <- dev.capabilities()$transparentBackground if(!any(names(legendspec) == "bg") && !is.na(tB) && !identical(tB, "no")) legendspec$bg <- "transparent" if(legendavoid || identical(legendpos, "float")) { ## Automatic determination of legend position ## Assemble data for all plot objects linedata <- list() xmap <- if(xlogscale) log10 else identity ymap <- if(ylogscale) log10 else identity inv.xmap <- if(xlogscale) pow10 else identity inv.ymap <- if(ylogscale) pow10 else identity for(i in seq_along(allind)) linedata[[i]] <- list(x=xmap(rhsdata), y=ymap(lhsdata[,i])) polydata <- if(length(xpoly) > 0) list(x=xmap(xpoly), y=ymap(ypoly)) else NULL #' ensure xlim, ylim define a box boxXlim <- if(diff(xlim) > 0) xlim else par('usr')[1:2] boxYlim <- if(diff(ylim) > 0) ylim else par('usr')[3:4] #' objects <- assemble.plot.objects(xmap(boxXlim), ymap(boxYlim), lines=linedata, polygon=polydata) ## find best position to avoid them legendbest <- findbestlegendpos(objects, preference=legendpos, legendspec=legendspec) ## handle log scale if((xlogscale || ylogscale) && checkfields(legendbest, c("x", "xjust", "yjust"))) { ## back-transform x, y coordinates legendbest$x$x <- inv.xmap(legendbest$x$x) legendbest$x$y <- inv.ymap(legendbest$x$y) } } else legendbest <- list() ## ********** plot legend ************************* if(!is.null(legend) && legend) do.call(graphics::legend, resolve.defaults(legendargs, legendbest, legendspec, .StripNull=TRUE)) } ## convert labels back to character labl <- paste.expr(legtxt) labl <- gsub(" ", "", labl) ## return legend info df <- data.frame(lty=lty, col=col, key=key, label=labl, meaning=legdesc, row.names=key) return(invisible(df)) } plot.fv }) assemble.plot.objects <- function(xlim, ylim, ..., lines=NULL, polygon=NULL) { # Take data that would have been passed to the commands 'lines' and 'polygon' # and form corresponding geometrical objects. objects <- list() if(!is.null(lines)) { if(is.psp(lines)) { objects <- list(lines) } else { if(checkfields(lines, c("x", "y"))) { lines <- list(lines) } else if(!all(unlist(lapply(lines, checkfields, L=c("x", "y"))))) stop("lines should be a psp object, a list(x,y) or a list of list(x,y)") W <- owin(xlim, ylim) for(i in seq_along(lines)) { lines.i <- lines[[i]] x.i <- lines.i$x y.i <- lines.i$y n <- length(x.i) if(length(y.i) != n) stop(paste(paste("In lines[[", i, "]]", sep=""), "the vectors x and y have unequal length")) if(!all(ok <- (is.finite(x.i) & is.finite(y.i)))) { x.i <- x.i[ok] y.i <- y.i[ok] n <- sum(ok) } segs.i <- psp(x.i[-n], y.i[-n], x.i[-1], y.i[-1], W, check=FALSE) objects <- append(objects, list(segs.i)) } } } if(!is.null(polygon)) { # Add filled polygon pol <- polygon[c("x", "y")] ok <- with(pol, is.finite(x) & is.finite(y)) if(!all(ok)) pol <- with(pol, list(x=x[ok], y=y[ok])) if(Area.xypolygon(pol) < 0) pol <- lapply(pol, rev) P <- try(owin(poly=pol, xrange=xlim, yrange=ylim, check=FALSE)) if(!inherits(P, "try-error")) objects <- append(objects, list(P)) } return(objects) } findbestlegendpos <- local({ # Given a list of geometrical objects, find the best position # to avoid them. thefunction <- function(objects, show=FALSE, aspect=1, bdryok=TRUE, preference="float", verbose=FALSE, legendspec=NULL) { # find bounding box W <- do.call(boundingbox, lapply(objects, as.rectangle)) # convert to common box objects <- lapply(objects, rebound, rect=W) # comp # rescale x and y axes so that bounding box has aspect ratio 'aspect' aspectW <- with(W, diff(yrange)/diff(xrange)) s <- aspect/aspectW mat <- diag(c(1, s)) invmat <- diag(c(1, 1/s)) scaled.objects <- lapply(objects, affine, mat=mat) scaledW <- affine(W, mat=mat) if(verbose) { cat("Scaled space:\n") print(scaledW) } # pixellate the scaled objects pix.scal.objects <- lapply(scaled.objects, asma) # apply distance transforms in scaled space D1 <- distmap(pix.scal.objects[[1]]) Dlist <- lapply(pix.scal.objects, distmap, xy=list(x=D1$xcol, y=D1$yrow)) # distance transform of superposition D <- im.apply(Dlist, min) if(!bdryok) { # include distance to boundary B <- attr(D1, "bdry") D <- eval.im(pmin.int(D, B)) } if(show) { plot(affine(D, mat=invmat), add=TRUE) lapply(lapply(scaled.objects, affine, mat=invmat), plot, add=TRUE) } if(preference != "float") { # evaluate preferred location (check for collision) if(!is.null(legendspec)) { # pretend to plot the legend as specified legout <- do.call(graphics::legend, append(legendspec, list(plot=FALSE))) # determine bounding box legbox <- with(legout$rect, owin(c(left, left+w), c(top-h, top))) scaledlegbox <- affine(legbox, mat=mat) # check for collision Dmin <- min(D[scaledlegbox]) if(Dmin >= 0.02) { # no collision: stay at preferred location. Exit. return(list(x=preference)) } # collision occurred! } else { # no legend information. # Pretend legend is 15% of plot width and height xr <- scaledW$xrange yr <- scaledW$yrange testloc <- switch(preference, topleft = c(xr[1],yr[2]), top = c(mean(xr), yr[2]), topright = c(xr[2], yr[2]), right = c(xr[2], mean(yr)), bottomright = c(xr[2], yr[1]), bottom = c(mean(xr), yr[1]), bottomleft = c(xr[1], yr[1]), left = c(xr[1], mean(yr)), center = c(mean(xr), mean(yr)), NULL) if(!is.null(testloc)) { # look up distance value at preferred location testpat <- ppp(x=testloc[1], y=testloc[2], xr, yr, check=FALSE) val <- safelookup(D, testpat) crit <- 0.15 * min(diff(xr), diff(yr)) if(verbose) cat(paste("val=",val, ", crit=", crit, "\n")) if(val > crit) { # no collision: stay at preferred location. Exit. return(list(x=preference)) } # collision occurred! } } # collision occurred! } # find location of max locmax <- which(D$v == max(D), arr.ind=TRUE) locmax <- unname(locmax[1,]) pos <- list(x=D$xcol[locmax[2]], y=D$yrow[locmax[1]]) pos <- affinexy(pos, mat=invmat) if(show) points(pos) # determine justification of legend relative to this point # to avoid crossing edges of plot xrel <- (pos$x - W$xrange[1])/diff(W$xrange) yrel <- (pos$y - W$yrange[1])/diff(W$yrange) xjust <- if(xrel < 0.1) 0 else if(xrel > 0.9) 1 else 0.5 yjust <- if(yrel < 0.1) 0 else if(yrel > 0.9) 1 else 0.5 # out <- list(x=pos, xjust=xjust, yjust=yjust) return(out) } asma <- function(z) { if(is.owin(z)) as.mask(z) else if(is.psp(z)) as.mask.psp(z) else NULL } callit <- function(...) { rslt <- try(thefunction(...)) if(!inherits(rslt, "try-error")) return(rslt) return(list()) } callit }) spatstat/R/Math.linim.R0000644000176200001440000000331613160160256014444 0ustar liggesusers## ## Math.linim.R ## ## $Revision: 1.5 $ $Date: 2017/09/19 09:16:08 $ ## Ops.linim <- function(e1,e2=NULL){ unary <- nargs() == 1L if(unary){ if(!is.element(.Generic, c("!", "-", "+"))) stop("Unary usage is undefined for this operation for images.") callstring <- paste(.Generic, "e1") } else { callstring <- paste("e1", .Generic, "e2") } expr <- parse(text = callstring) return(do.call(eval.linim, list(expr = expr))) } Math.linim <- function(x, ...){ m <- do.call(.Generic, list(x[,,drop=FALSE], ...)) Z <- im(m, xcol = x$xcol, yrow = x$yrow, xrange = x$xrange, yrange = x$yrange, unitname = unitname(x)) df <- attr(x, "df") df$values <- do.call(.Generic, list(df$values, ...)) L <- attr(x, "L") rslt <- linim(L, Z, df=df, restrict=FALSE) return(rslt) } Summary.linim <- function(..., na.rm, finite){ if(missing(finite)) finite <- FALSE if(missing(na.rm)) na.rm <- FALSE argh <- list(...) values <- lapply(argh, "[") dfvalues <- if(is.element(.Generic, c("sum", "prod"))) list() else lapply(lapply(argh, attr, which="df"), getElement, name="values") vals <- as.numeric(unlist(c(values, dfvalues))) if(finite) { vals <- vals[is.finite(vals)] } else if(na.rm) { vals <- vals[!is.na(vals)] } do.call(.Generic, list(vals)) } Complex.linim <- function(z){ L <- attr(z, "L") df <- attr(z, "df") m <- do.call(.Generic, list(z=z[drop=TRUE])) Z <- im(m, xcol = z$xcol, yrow = z$yrow, xrange = z$xrange, yrange = z$yrange, unitname = unitname(z)) df$values <- do.call(.Generic, list(z=df$values)) rslt <- linim(L, Z, df=df, restrict=FALSE) return(rslt) } spatstat/R/distanxD.R0000755000176200001440000001421313115271075014225 0ustar liggesusers# # distanxD.R # # $Revision: 1.7 $ $Date: 2017/06/05 10:31:58 $ # # Interpoint distances for multidimensional points # # Methods for pairdist, nndist, nnwhich, crossdist # pairdist.ppx <- function(X, ...) { verifyclass(X, "ppx") # extract point coordinates coo <- as.matrix(coords(X, ...)) n <- nrow(coo) if(n == 0) return(matrix(numeric(0), nrow=0, ncol=0)) return(as.matrix(dist(coo))) } crossdist.ppx <- function(X, Y, ...) { verifyclass(X, "ppx") verifyclass(Y, "ppx") # extract point coordinates cooX <- as.matrix(coords(X, ...)) cooY <- as.matrix(coords(Y, ...)) nX <- nrow(cooX) nY <- nrow(cooY) if(ncol(cooX) != ncol(cooY)) stop("X and Y have different dimensions (different numbers of coordinates)") if(nX == 0 || nY == 0) return(matrix(numeric(0), nrow=nX, ncol=nY)) coo <- rbind(cooX, cooY) dis <- as.matrix(dist(coo)) ans <- dis[1:nX, nX + (1:nY)] return(ans) } nndist.ppx <- function(X, ..., k=1) { verifyclass(X, "ppx") # extract point coordinates coo <- as.matrix(coords(X, ...)) n <- nrow(coo) m <- ncol(coo) if(m == 0) { warning("nndist.ppx: Zero-dimensional coordinates: returning NA") if(length(k) == 1L) return(rep.int(NA_real_, n)) else return(matrix(NA_real_, n, length(k))) } # k can be a single integer or an integer vector if(length(k) == 0) stop("k is an empty vector") else if(length(k) == 1L) { if(k != round(k) || k <= 0) stop("k is not a positive integer") } else { if(any(k != round(k)) || any(k <= 0)) stop(paste("some entries of the vector", sQuote("k"), "are not positive integers")) } k <- as.integer(k) kmax <- max(k) # trivial cases if(n <= 1L) { # empty pattern => return numeric(0) # or pattern with only 1 point => return Inf nnd <- matrix(Inf, nrow=n, ncol=kmax) nnd <- nnd[,k, drop=TRUE] return(nnd) } # number of neighbours that are well-defined kmaxcalc <- min(n-1L, kmax) # calculate k-nn distances for k <= kmaxcalc if(kmaxcalc == 1L) { # calculate nearest neighbour distance only nnd<-numeric(n) o <- fave.order(coo[,1L]) big <- sqrt(.Machine$double.xmax) Cout <- .C("nndMD", n= as.integer(n), m=as.integer(m), x= as.double(t(coo[o,])), nnd= as.double(nnd), as.double(big), PACKAGE = "spatstat") nnd[o] <- Cout$nnd } else { # case kmaxcalc > 1 nnd<-numeric(n * kmaxcalc) o <- fave.order(coo[,1L]) big <- sqrt(.Machine$double.xmax) Cout <- .C("knndMD", n = as.integer(n), m = as.integer(m), kmax = as.integer(kmaxcalc), x = as.double(t(coo[o,])), nnd = as.double(nnd), huge = as.double(big), PACKAGE = "spatstat") nnd <- matrix(nnd, nrow=n, ncol=kmaxcalc) nnd[o, ] <- matrix(Cout$nnd, nrow=n, ncol=kmaxcalc, byrow=TRUE) } # post-processing if(kmax > kmaxcalc) { # add columns of Inf's infs <- matrix(as.numeric(Inf), nrow=n, ncol=kmax-kmaxcalc) nnd <- cbind(nnd, infs) } if(length(k) < kmax) { # select only the specified columns nnd <- nnd[, k, drop=TRUE] } return(nnd) } nnwhich.ppx <- function(X, ..., k=1) { verifyclass(X, "ppx") # k can be a single integer or an integer vector if(length(k) == 0) stop("k is an empty vector") else if(length(k) == 1L) { if(k != round(k) || k <= 0) stop("k is not a positive integer") } else { if(any(k != round(k)) || any(k <= 0)) stop(paste("some entries of the vector", sQuote("k"), "are not positive integers")) } k <- as.integer(k) kmax <- max(k) # extract point coordinates coo <- coords(X, ...) n <- nrow(coo) m <- ncol(coo) if(m == 0) { warning("nnwhich.ppx: Zero-dimensional coordinates: returning NA") if(length(k) == 1L) return(rep.int(NA_real_, n)) else return(matrix(NA_real_, n, length(k))) } # special cases if(n <= 1L) { # empty pattern => return integer(0) # or pattern with only 1 point => return NA nnw <- matrix(NA_integer_, nrow=n, ncol=kmax) nnw <- nnw[,k, drop=TRUE] return(nnw) } # number of neighbours that are well-defined kmaxcalc <- min(n-1L, kmax) # identify k-nn for k <= kmaxcalc if(kmaxcalc == 1L) { # identify nearest neighbour only nnw <- integer(n) o <- fave.order(coo[,1L]) big <- sqrt(.Machine$double.xmax) Cout <- .C("nnwMD", n = as.integer(n), m = as.integer(m), x = as.double(t(coo[o,])), nnd = as.double(numeric(n)), nnwhich = as.integer(nnw), huge = as.double(big), PACKAGE = "spatstat") witch <- Cout$nnwhich if(any(witch <= 0)) stop("Internal error: non-positive index returned from C code") if(any(witch > n)) stop("Internal error: index returned from C code exceeds n") nnw[o] <- o[witch] } else { # case kmaxcalc > 1 nnw <- matrix(integer(n * kmaxcalc), nrow=n, ncol=kmaxcalc) o <- fave.order(coo[,1L]) big <- sqrt(.Machine$double.xmax) Cout <- .C("knnwMD", n = as.integer(n), m = as.integer(m), kmax = as.integer(kmaxcalc), x = as.double(t(coo[o,])), nnd = as.double(numeric(n * kmaxcalc)), nnwhich = as.integer(nnw), huge = as.double(big), PACKAGE = "spatstat") witch <- Cout$nnwhich witch <- matrix(witch, nrow=n, ncol=kmaxcalc, byrow=TRUE) if(any(witch <= 0)) stop("Internal error: non-positive index returned from C code") if(any(witch > n)) stop("Internal error: index returned from C code exceeds n") # convert back to original ordering nnw[o,] <- matrix(o[witch], nrow=n, ncol=kmaxcalc) } # post-processing if(kmax > kmaxcalc) { # add columns of NA's nas <- matrix(NA_integer_, nrow=n, ncol=kmax-kmaxcalc) nnw <- cbind(nnw, nas) } if(length(k) < kmax) { # select only the specified columns nnw <- nnw[, k, drop=TRUE] } return(nnw) } spatstat/R/clip.psp.R0000755000176200001440000001655413115271075014211 0ustar liggesusers# # clip.psp.R # # $Revision: 1.19 $ $Date: 2017/06/05 10:31:58 $ # # ######################################################## # clipping operation (for subset) ######################################################## clip.psp <- function(x, window, check=TRUE, fragments=TRUE) { verifyclass(x, "psp") verifyclass(window, "owin") if(check && !is.subset.owin(window, x$window)) warning("The clipping window is not a subset of the window containing the line segment pattern x") if(x$n == 0) { emptypattern <- psp(numeric(0), numeric(0), numeric(0), numeric(0), window=window, marks=x$marks) return(emptypattern) } switch(window$type, rectangle={ result <- cliprect.psp(x, window, fragments=fragments) }, polygonal={ result <- clippoly.psp(x, window, fragments=fragments) }, mask={ result <- clippoly.psp(x, as.polygonal(window), fragments=fragments) result$window <- window }) return(result) } ##### # # clipping to a rectangle # cliprect.psp <- function(x, window, fragments=TRUE) { verifyclass(x, "psp") verifyclass(window, "owin") ends <- x$ends marx <- marks(x, dfok=TRUE) # find segments which are entirely inside the window # (by convexity) in0 <- inside.owin(ends$x0, ends$y0, window) in1 <- inside.owin(ends$x1, ends$y1, window) ok <- in0 & in1 # if all segments are inside, return them if(all(ok)) return(as.psp(ends, window=window, marks=marx, check=FALSE)) # otherwise, store those segments which are inside the window ends.inside <- ends[ok, , drop=FALSE] marks.inside <- marx %msub% ok x.inside <- as.psp(ends.inside, window=window, marks=marks.inside, check=FALSE) if(!fragments) return(x.inside) # now consider the rest ends <- ends[!ok, , drop=FALSE] in0 <- in0[!ok] in1 <- in1[!ok] marx <- marx %msub% (!ok) # first clip segments to the range x \in [xmin, xmax] # use parametric coordinates small <- function(x) { abs(x) <= .Machine$double.eps } tvalue <- function(z0, z1, zt) { y1 <- z1 - z0 yt <- zt - z0 tval <- ifelseAX(small(y1), 0.5, yt/y1) betwee <- (yt * (zt - z1)) <= 0 result <- ifelseXB(betwee, tval, NA) return(result) } between <- function(x, r) { ((x-r[1L]) * (x-r[2L])) <= 0 } tx <- cbind(ifelse0NA(between(ends$x0, window$xrange)), ifelse1NA(between(ends$x1, window$xrange)), tvalue(ends$x0, ends$x1, window$xrange[1L]), tvalue(ends$x0, ends$x1, window$xrange[2L])) # discard segments which do not lie in the x range nx <- apply(!is.na(tx), 1L, sum) ok <- (nx >= 2) if(!any(ok)) return(x.inside) ends <- ends[ok, , drop=FALSE] tx <- tx[ok, , drop=FALSE] in0 <- in0[ok] in1 <- in1[ok] marx <- marx %msub% ok # Clip the segments to the x range tmin <- apply(tx, 1L, min, na.rm=TRUE) tmax <- apply(tx, 1L, max, na.rm=TRUE) dx <- ends$x1 - ends$x0 dy <- ends$y1 - ends$y0 ends.xclipped <- data.frame(x0=ends$x0 + tmin * dx, y0=ends$y0 + tmin * dy, x1=ends$x0 + tmax * dx, y1=ends$y0 + tmax * dy) # Now clip the segments to the range y \in [ymin, ymax] ends <- ends.xclipped in0 <- inside.owin(ends$x0, ends$y0, window) in1 <- inside.owin(ends$x1, ends$y1, window) ty <- cbind(ifelse0NA(in0), ifelse1NA(in1), tvalue(ends$y0, ends$y1, window$yrange[1L]), tvalue(ends$y0, ends$y1, window$yrange[2L])) # discard segments which do not lie in the y range ny <- apply(!is.na(ty), 1L, sum) ok <- (ny >= 2) if(!any(ok)) return(x.inside) ends <- ends[ok, , drop=FALSE] ty <- ty[ok, , drop=FALSE] in0 <- in0[ok] in1 <- in1[ok] marx <- marx %msub% ok # Clip the segments to the y range tmin <- apply(ty, 1L, min, na.rm=TRUE) tmax <- apply(ty, 1L, max, na.rm=TRUE) dx <- ends$x1 - ends$x0 dy <- ends$y1 - ends$y0 ends.clipped <- data.frame(x0=ends$x0 + tmin * dx, y0=ends$y0 + tmin * dy, x1=ends$x0 + tmax * dx, y1=ends$y0 + tmax * dy) marks.clipped <- marx # OK - segments clipped # Put them together with the unclipped ones ends.all <- rbind(ends.inside, ends.clipped) marks.all <- marks.inside %mapp% marks.clipped as.psp(ends.all, window=window, marks=marks.all, check=FALSE) } ############################ # # clipping to a polygonal window # clippoly.psp <- function(s, window, fragments=TRUE) { verifyclass(s, "psp") verifyclass(window, "owin") stopifnot(window$type == "polygonal") marx <- marks(s) has.marks <- !is.null(marx) eps <- .Machine$double.eps # find the intersection points between segments and window edges ns <- s$n es <- s$ends x0s <- es$x0 y0s <- es$y0 dxs <- es$x1 - es$x0 dys <- es$y1 - es$y0 bdry <- edges(window) nw <- bdry$n ew <- bdry$ends x0w <- ew$x0 y0w <- ew$y0 dxw <- ew$x1 - ew$x0 dyw <- ew$y1 - ew$y0 out <- .C("xysegint", na=as.integer(ns), x0a=as.double(x0s), y0a=as.double(y0s), dxa=as.double(dxs), dya=as.double(dys), nb=as.integer(nw), x0b=as.double(x0w), y0b=as.double(y0w), dxb=as.double(dxw), dyb=as.double(dyw), eps=as.double(eps), xx=as.double(numeric(ns * nw)), yy=as.double(numeric(ns * nw)), ta=as.double(numeric(ns * nw)), tb=as.double(numeric(ns * nw)), ok=as.integer(integer(ns * nw)), PACKAGE = "spatstat") hitting <- (matrix(out$ok, ns, nw) != 0) ts <- matrix(out$ta, ns, nw) anyhit <- matrowany(hitting) if(!fragments) { #' retain only segments that avoid the boundary entirely leftin <- inside.owin(es$x0, es$y0, window) rightin <- inside.owin(es$x1, es$y1, window) ok <- !anyhit & leftin & rightin return(as.psp(es[ok,,drop=FALSE], window=window, marks=marx %msub% ok, check=FALSE)) } # form all the chopped segments (whether in or out) chopped <- s[numeric(0)] chopped$window <- boundingbox(s$window, window) for(seg in seq_len(ns)) { segment <- s$ends[seg, , drop=FALSE] if(!anyhit[seg]) { # no intersection with boundary - add single segment chopped$ends <- rbind(chopped$ends, segment) if(has.marks) chopped$marks <- (chopped$marks) %mapp% (marx %msub% seg) } else { # crosses boundary - add several pieces tvals <- ts[seg,] tvals <- sort(tvals[hitting[seg,]]) x0 <- segment$x0 dx <- segment$x1 - x0 y0 <- segment$y0 dy <- segment$y1 - y0 newones <- data.frame(x0 = x0 + c(0,tvals) * dx, y0 = y0 + c(0,tvals) * dy, x1 = x0 + c(tvals,1) * dx, y1 = y0 + c(tvals,1) * dy) chopped$ends <- rbind(chopped$ends, newones) if(has.marks) { hitmarks <- marx %msub% seg newmarks <- hitmarks %mrep% nrow(newones) chopped$marks <- (chopped$marks) %mapp% newmarks } } } chopped$n <- nrow(chopped$ends) # select those chopped segments which are inside the window mid <- midpoints.psp(chopped) ins <- inside.owin(mid$x, mid$y, window) retained <- chopped[ins] retained$window <- window return(retained) } spatstat/R/markcorr.R0000755000176200001440000006532713115271120014272 0ustar liggesusers## ## ## markcorr.R ## ## $Revision: 1.77 $ $Date: 2016/02/16 01:39:12 $ ## ## Estimate the mark correlation function ## and related functions ## ## ------------------------------------------------------------------------ markvario <- local({ halfsquarediff <- function(m1, m2) { ((m1-m2)^2)/2 } assigntheo <- function(x, value) { x$theo <- value; return(x) } markvario <- function(X, correction=c("isotropic", "Ripley", "translate"), r=NULL, method="density", ..., normalise=FALSE) { m <- onecolumn(marks(X)) if(!is.numeric(m)) stop("Marks are not numeric") if(missing(correction)) correction <- NULL ## Compute estimates v <- markcorr(X, f=halfsquarediff, r=r, correction=correction, method=method, normalise=normalise, ...) if(is.fv(v)) v <- anylist(v) ## adjust theoretical value and fix labels theoval <- if(normalise) 1 else var(m) for(i in seq_len(length(v))) { v[[i]]$theo <- theoval v[[i]] <- rebadge.fv(v[[i]], quote(gamma(r)), "gamma") } if(length(v) == 1) v <- v[[1]] return(v) } markvario }) markconnect <- local({ indicateij <- function(m1, m2, i, j) { (m1 == i) & (m2 == j) } markconnect <- function(X, i, j, r=NULL, correction=c("isotropic", "Ripley", "translate"), method="density", ..., normalise=FALSE) { stopifnot(is.ppp(X) && is.multitype(X)) if(missing(correction)) correction <- NULL marx <- marks(X) lev <- levels(marx) if(missing(i)) i <- lev[1] if(missing(j)) j <- lev[2] ## compute estimates p <- markcorr(X, f=indicateij, r=r, correction=correction, method=method, ..., fargs=list(i=i, j=j), normalise=normalise) ## alter theoretical value and fix labels if(!normalise) { pipj <- mean(marx==i) * mean(marx==j) p$theo <- pipj } else { p$theo <- 1 } p <- rebadge.fv(p, new.ylab=substitute(p[i,j](r), list(i=paste(i),j=paste(j))), new.fname=c("p", paste0("list(", i, ",", j, ")")), new.yexp=substitute(p[list(i,j)](r), list(i=paste(i),j=paste(j)))) return(p) } markconnect }) Emark <- local({ f1 <- function(m1, m2) { m1 } Emark <- function(X, r=NULL, correction=c("isotropic", "Ripley", "translate"), method="density", ..., normalise=FALSE) { stopifnot(is.ppp(X) && is.marked(X)) marx <- marks(X) isvec <- is.vector(marx) && is.numeric(marx) isdf <- is.data.frame(marx) && all(sapply(as.list(marx), is.numeric)) if(!(isvec || isdf)) stop("All marks of X should be numeric") if(missing(correction)) correction <- NULL E <- markcorr(X, f1, r=r, correction=correction, method=method, ..., normalise=normalise) if(isvec) { E <- rebadge.fv(E, quote(E(r)), "E") } else { E[] <- lapply(E, rebadge.fv, new.ylab=quote(E(r)), new.fname="E") } return(E) } Emark }) Vmark <- local({ f2 <- function(m1, m2) { m1^2 } Vmark <- function(X, r=NULL, correction=c("isotropic", "Ripley", "translate"), method="density", ..., normalise=FALSE) { if(missing(correction)) correction <- NULL E <- Emark(X, r=r, correction=correction, method=method, ..., normalise=FALSE) E2 <- markcorr(X, f2, r=E$r, correction=correction, method=method, ..., normalise=FALSE) if(normalise) sig2 <- var(marks(X)) if(is.fv(E)) { E <- list(E) E2 <- list(E2) } V <- list() for(i in seq_along(E)) { Ei <- E[[i]] E2i <- E2[[i]] Vi <- eval.fv(E2i - Ei^2) if(normalise) Vi <- eval.fv(Vi/sig2[i,i]) Vi <- rebadge.fv(Vi, quote(V(r)), "V") attr(Vi, "labl") <- attr(Ei, "labl") V[[i]] <- Vi } if(length(V) == 1) return(V[[1]]) V <- as.anylist(V) names(V) <- colnames(marks(X)) return(V) } Vmark }) ############## workhorses 'markcorr' and 'markcorrint' #################### markcorrint <- Kmark <- function(X, f=NULL, r=NULL, correction=c("isotropic", "Ripley", "translate"), ..., f1=NULL, normalise=TRUE, returnL=FALSE, fargs=NULL) { ## Computes the analogue of Kest(X) ## where each pair (x_i,x_j) is weighted by w(m_i,m_j) ## ## If multiplicative=TRUE then w(u,v) = f(u) f(v) ## If multiplicative=FALSE then w(u,v) = f(u, v) ## stopifnot(is.ppp(X) && is.marked(X)) is.marked(X, dfok=FALSE) W <- Window(X) ## if(identical(sys.call()[[1]], as.name('markcorrint'))) warn.once('markcorrint', "markcorrint will be deprecated in future versions of spatstat;", "use the equivalent function Kmark") ## validate test function h <- check.testfun(f, f1, X) f <- h$f f1 <- h$f1 ftype <- h$ftype multiplicative <- ftype %in% c("mul", "product") ## ## check corrections correction.given <- !missing(correction) && !is.null(correction) if(is.null(correction)) correction <- c("isotropic", "Ripley", "translate") correction <- pickoption("correction", correction, c(none="none", border="border", "bord.modif"="bord.modif", isotropic="isotropic", Ripley="isotropic", trans="translate", translate="translate", translation="translate", best="best"), multi=TRUE) correction <- implemented.for.K(correction, W$type, correction.given) isborder <- correction %in% c("border", "bord.modif") if(any(isborder) && !multiplicative) { whinge <- paste("Border correction is not valid unless", "test function is of the form f(u,v) = f1(u)*f1(v)") correction <- correction[!isborder] if(length(correction) == 0) stop(whinge) else warning(whinge) } ## estimated intensity lambda <- intensity(X) mX <- marks(X) switch(ftype, mul={ wt <- mX/lambda K <- Kinhom(X, r=r, reciplambda=wt, correction=correction, ..., renormalise=FALSE) Ef2 <- mean(mX)^2 }, equ={ fXX <- outer(mX, mX, "==") wt <- fXX/lambda^2 K <- Kinhom(X, r=r, reciplambda2=wt, correction=correction, ..., renormalise=FALSE) mtable <- table(mX) Ef2 <- sum(mtable^2)/length(mX)^2 }, product={ f1X <- do.call(f1, append(list(mX), fargs)) wt <- f1X/lambda K <- Kinhom(X, r=r, reciplambda=wt, correction=correction, ..., renormalise=FALSE) Ef2 <- mean(f1X)^2 }, general={ fXX <- do.call(outer, append(list(mX, mX, f), fargs)) wt <- fXX/lambda^2 K <- Kinhom(X, r=r, reciplambda2=wt, correction=correction, ..., renormalise=FALSE) Ef2 <- mean(fXX) }) K$theo <- K$theo * Ef2 labl <- attr(K, "labl") if(normalise) K <- eval.fv(K/Ef2) if(returnL) K <- eval.fv(sqrt(K/pi)) attr(K, "labl") <- labl if(normalise && !returnL) { ylab <- quote(K[f](r)) fnam <- c("K", "f") } else if(normalise && returnL) { ylab <- quote(L[f](r)) fnam <- c("L", "f") } else if(!normalise && !returnL) { ylab <- quote(C[f](r)) fnam <- c("C", "f") } else { ylab <- quote(sqrt(C[f](r)/pi)) fnam <- "sqrt(C[f]/pi)" } K <- rebadge.fv(K, ylab, fnam) return(K) } markcorr <- function(X, f = function(m1, m2) { m1 * m2}, r=NULL, correction=c("isotropic", "Ripley", "translate"), method="density", ..., weights=NULL, f1=NULL, normalise=TRUE, fargs=NULL) { ## mark correlation function with test function f stopifnot(is.ppp(X) && is.marked(X)) nX <- npoints(X) ## set defaults to NULL if(missing(f)) f <- NULL if(missing(correction)) correction <- NULL ## handle data frame of marks marx <- marks(X, dfok=TRUE) if(is.data.frame(marx)) { nc <- ncol(marx) result <- list() for(j in 1:nc) { Xj <- X %mark% marx[,j] result[[j]] <- markcorr(Xj, f=f, r=r, correction=correction, method=method, ..., weights=weights, f1=f1, normalise=normalise, fargs=fargs) } result <- as.anylist(result) names(result) <- colnames(marx) return(result) } ## weights if(unweighted <- is.null(weights)) { weights <- rep(1, nX) } else { stopifnot(is.numeric(weights)) if(length(weights) == 1) { weights <- rep(weights, nX) } else check.nvector(weights, nX) stopifnot(all(weights > 0)) } ## validate test function h <- check.testfun(f, f1, X) f <- h$f f1 <- h$f1 ftype <- h$ftype ## ## npts <- npoints(X) W <- X$window ## determine r values rmaxdefault <- rmax.rule("K", W, npts/area(W)) breaks <- handle.r.b.args(r, NULL, W, rmaxdefault=rmaxdefault) r <- breaks$r rmax <- breaks$max if(length(method) > 1) stop("Select only one method, please") if(method=="density" && !breaks$even) stop(paste("Evenly spaced r values are required if method=", sQuote("density"), sep="")) ## available selection of edge corrections depends on window correction.given <- !missing(correction) && !is.null(correction) if(is.null(correction)) correction <- c("isotropic", "Ripley", "translate") correction <- pickoption("correction", correction, c(none="none", border="border", "bord.modif"="bord.modif", isotropic="isotropic", Ripley="isotropic", translate="translate", translation="translate", best="best"), multi=TRUE) correction <- implemented.for.K(correction, W$type, correction.given) ## Denominator ## Ef = Ef(M,M') when M, M' are independent ## Apply f to every possible pair of marks, and average Ef <- switch(ftype, mul = { mean(marx * weights)^2 }, equ = { if(unweighted) { mtable <- table(marx) } else { mtable <- tapply(weights, marx, sum) mtable[is.na(mtable)] <- 0 } sum(mtable^2)/nX^2 }, product={ f1m <- do.call(f1, append(list(marx), fargs)) mean(f1m * weights)^2 }, general = { mcross <- if(is.null(fargs)) { outer(marx, marx, f) } else { do.call(outer, append(list(marx,marx,f),fargs)) } if(unweighted) { mean(mcross) } else { wcross <- outer(weights, weights, "*") mean(mcross * wcross) } }, stop("Internal error: invalid ftype")) if(normalise) { theory <- 1 Efdenom <- Ef } else { theory <- Ef Efdenom <- 1 } if(normalise) { ## check validity of denominator if(Efdenom == 0) stop("Cannot normalise the mark correlation; the denominator is zero") else if(Efdenom < 0) warning(paste("Problem when normalising the mark correlation:", "the denominator is negative")) } ## this will be the output data frame result <- data.frame(r=r, theo= rep.int(theory,length(r))) desc <- c("distance argument r", "theoretical value (independent marks) for %s") alim <- c(0, min(rmax, rmaxdefault)) ## determine conventional name of function if(ftype %in% c("mul", "equ")) { if(normalise) { ylab <- quote(k[mm](r)) fnam <- c("k", "mm") } else { ylab <- quote(c[mm](r)) fnam <- c("c", "mm") } } else { if(normalise) { ylab <- quote(k[f](r)) fnam <- c("k", "f") } else { ylab <- quote(c[f](r)) fnam <- c("c", "f") } } result <- fv(result, "r", ylab, "theo", , alim, c("r","{%s[%s]^{iid}}(r)"), desc, fname=fnam) ## find close pairs of points close <- closepairs(X, rmax) dIJ <- close$d I <- close$i J <- close$j XI <- ppp(close$xi, close$yi, window=W, check=FALSE) ## apply f to marks of close pairs of points ## mI <- marx[I] mJ <- marx[J] ff <- switch(ftype, mul = mI * mJ, equ = (mI == mJ), product={ if(is.null(fargs)) { fI <- f1(mI) fJ <- f1(mJ) } else { fI <- do.call(f1, append(list(mI), fargs)) fJ <- do.call(f1, append(list(mJ), fargs)) } fI * fJ }, general={ if(is.null(fargs)) f(marx[I], marx[J]) else do.call(f, append(list(marx[I], marx[J]), fargs)) }) ## check values of f(M1, M2) if(is.logical(ff)) ff <- as.numeric(ff) else if(!is.numeric(ff)) stop("function f did not return numeric values") if(anyNA(ff)) switch(ftype, mul=, equ=stop("some marks were NA"), product=, general=stop("function f returned some NA values")) if(any(ff < 0)) switch(ftype, mul=, equ=stop("negative marks are not permitted"), product=, general=stop("negative values of function f are not permitted")) ## weights if(!unweighted) ff <- ff * weights[I] * weights[J] #### Compute estimates ############## if(any(correction == "translate")) { ## translation correction XJ <- ppp(close$xj, close$yj, window=W, check=FALSE) edgewt <- edge.Trans(XI, XJ, paired=TRUE) ## get smoothed estimate of mark covariance Mtrans <- sewsmod(dIJ, ff, edgewt, Efdenom, r, method, ...) result <- bind.fv(result, data.frame(trans=Mtrans), "{hat(%s)[%s]^{trans}}(r)", "translation-corrected estimate of %s", "trans") } if(any(correction == "isotropic")) { ## Ripley isotropic correction edgewt <- edge.Ripley(XI, matrix(dIJ, ncol=1)) ## get smoothed estimate of mark covariance Miso <- sewsmod(dIJ, ff, edgewt, Efdenom, r, method, ...) result <- bind.fv(result, data.frame(iso=Miso), "{hat(%s)[%s]^{iso}}(r)", "Ripley isotropic correction estimate of %s", "iso") } ## which corrections have been computed? nama2 <- names(result) corrxns <- rev(nama2[nama2 != "r"]) ## default is to display them all formula(result) <- (. ~ r) fvnames(result, ".") <- corrxns ## unitname(result) <- unitname(X) return(result) } ## mark cross-correlation function markcrosscorr <- function(X, r=NULL, correction=c("isotropic", "Ripley", "translate"), method="density", ..., normalise=TRUE, Xname=NULL) { if(missing(Xname)) Xname <- short.deparse(substitute(X)) stopifnot(is.ppp(X) && is.marked(X)) npts <- npoints(X) W <- Window(X) ## available selection of edge corrections depends on window correction.given <- !missing(correction) && !is.null(correction) if(is.null(correction)) correction <- c("isotropic", "Ripley", "translate") correction <- pickoption("correction", correction, c(none="none", border="border", "bord.modif"="bord.modif", isotropic="isotropic", Ripley="isotropic", translate="translate", translation="translate", best="best"), multi=TRUE) correction <- implemented.for.K(correction, W$type, correction.given) ## determine r values rmaxdefault <- rmax.rule("K", W, npts/area(W)) breaks <- handle.r.b.args(r, NULL, W, rmaxdefault=rmaxdefault) r <- breaks$r rmax <- breaks$max ## find close pairs of points close <- closepairs(X, rmax) dIJ <- close$d I <- close$i J <- close$j XI <- ppp(close$xi, close$yi, window=W, check=FALSE) ## determine estimation method if(length(method) > 1) stop("Select only one method, please") if(method=="density" && !breaks$even) stop(paste("Evenly spaced r values are required if method=", sQuote("density"), sep="")) ## ensure marks are a data frame marx <- marks(X, dfok=TRUE) if(!is.data.frame(marx)) marx <- data.frame(marks=marx) ## convert factor marks to dummy variables while(any(isfac <- sapply(marx, is.factor))) { i <- min(which(isfac)) mari <- marx[,i] levi <- levels(mari) nami <- colnames(marx)[i] dumi <- 1 * outer(mari, levi, "==") colnames(dumi) <- paste0(nami, levi) marx <- as.data.frame(append(marx[,-i,drop=FALSE], list(dumi), after=i-1)) } nc <- ncol(marx) nama <- colnames(marx) ## loop over all pairs of columns funs <- list() for(i in 1:nc) { marxi <- marx[,i] namei <- nama[i] for(j in 1:nc) { marxj <- marx[,j] namej <- nama[j] ## Denominator ## Ef = E M M' = EM EM' ## when M, M' are independent from the respective columns Ef <- mean(marxi) * mean(marxj) if(normalise) { theory <- 1 Efdenom <- Ef ## check validity of denominator if(Efdenom == 0) stop(paste("Cannot normalise the mark correlation for", namei, "x", namej, "because the denominator is zero"), call.=FALSE) else if(Efdenom < 0) warning(paste("Problem when normalising the mark correlation for", namei, "x", namej, "- the denominator is negative"), call.=FALSE) } else { theory <- Ef Efdenom <- 1 } ## this will be the output data frame df.ij <- data.frame(r=r, theo= rep.int(theory,length(r))) desc <- c("distance argument r", "theoretical value (independent marks) for %s") alim <- c(0, min(rmax, rmaxdefault)) ## determine conventional name of function mimj <- as.name(paste0(namei,".",namej)) if(normalise) { ylab <- substitute(k[mm](r), list(mm=mimj)) fnam <- c("k", as.character(mimj)) } else { ylab <- substitute(c[mm](r), list(mm=mimj)) fnam <- c("c", as.character(mimj)) } fun.ij <- fv(df.ij, "r", ylab, "theo", , alim, c("r","{%s[%s]^{ind}}(r)"), desc, fname=fnam) mI <- marxi[I] mJ <- marxj[J] ff <- mI * mJ ## check values of f(M1, M2) if(anyNA(ff)) stop("some marks were NA", call.=FALSE) if(any(ff < 0)) stop("negative marks are not permitted") ## Compute estimates ############## if(any(correction == "translate")) { ## translation correction XJ <- ppp(close$xj, close$yj, window=W, check=FALSE) edgewt <- edge.Trans(XI, XJ, paired=TRUE) ## get smoothed estimate of mark covariance Mtrans <- sewsmod(dIJ, ff, edgewt, Efdenom, r, method, ...) fun.ij <- bind.fv(fun.ij, data.frame(trans=Mtrans), "{hat(%s)[%s]^{trans}}(r)", "translation-corrected estimate of %s", "trans") } if(any(correction == "isotropic")) { ## Ripley isotropic correction edgewt <- edge.Ripley(XI, matrix(dIJ, ncol=1)) ## get smoothed estimate of mark covariance Miso <- sewsmod(dIJ, ff, edgewt, Efdenom, r, method, ...) fun.ij <- bind.fv(fun.ij, data.frame(iso=Miso), "{hat(%s)[%s]^{iso}}(r)", "Ripley isotropic correction estimate of %s", "iso") } ## which corrections have been computed? nama2 <- names(fun.ij) corrxns <- rev(nama2[nama2 != "r"]) ## default is to display them all formula(fun.ij) <- (. ~ r) fvnames(fun.ij, ".") <- corrxns ## unitname(fun.ij) <- unitname(X) funs <- append(funs, list(fun.ij)) } } # matrix mapping array entries to list positions in 'funs' witch <- matrix(1:(nc^2), nc, nc, byrow=TRUE) header <- paste("Mark cross-correlation functions for", Xname) answer <- fasp(funs, witch, rowNames=nama, colNames=nama, title=header, dataname=Xname) return(answer) } sewsmod <- function(d, ff, wt, Ef, rvals, method="smrep", ..., nwtsteps=500) { ## Smooth Estimate of Weighted Second Moment Density ## (engine for computing mark correlations, etc) ## ------ ## Vectors containing one entry for each (close) pair of points ## d = interpoint distance ## ff = f(M1, M2) where M1, M2 are marks at the two points ## wt = edge correction weight ## ----- ## Ef = E[f(M, M')] where M, M' are independent random marks ## d <- as.vector(d) ff <- as.vector(ff) wt <- as.vector(wt) switch(method, density={ fw <- ff * wt sum.fw <- sum(fw) sum.wt <- sum(wt) ## smooth estimate of kappa_f est <- density(d, weights=fw/sum.fw, from=min(rvals), to=max(rvals), n=length(rvals), ...)$y numerator <- est * sum.fw ## smooth estimate of kappa_1 est0 <- density(d, weights=wt/sum.wt, from=min(rvals), to=max(rvals), n=length(rvals), ...)$y denominator <- est0 * Ef * sum.wt result <- numerator/denominator }, sm={ ## This is slow! oldopt <- options(warn=-1) smok <- requireNamespace("sm") options(oldopt) if(!smok) stop(paste("Option method=sm requires package sm,", "which is not available")) ## smooth estimate of kappa_f fw <- ff * wt est <- sm::sm.density(d, weights=fw, eval.points=rvals, display="none", nbins=0, ...)$estimate numerator <- est * sum(fw)/sum(est) ## smooth estimate of kappa_1 est0 <- sm::sm.density(d, weights=wt, eval.points=rvals, display="none", nbins=0, ...)$estimate denominator <- est0 * (sum(wt)/ sum(est0)) * Ef result <- numerator/denominator }, smrep={ oldopt <- options(warn=-1) smok <- requireNamespace("sm") options(oldopt) if(!smok) stop(paste("Option method=smrep requires package sm,", "which is not available")) hstuff <- resolve.defaults(list(...), list(hmult=1, h.weights=NA)) if(hstuff$hmult == 1 && all(is.na(hstuff$h.weights))) warning("default smoothing parameter may be inappropriate") ## use replication to effect the weights (it's faster) nw <- round(nwtsteps * wt/max(wt)) drep.w <- rep.int(d, nw) fw <- ff * wt nfw <- round(nwtsteps * fw/max(fw)) drep.fw <- rep.int(d, nfw) ## smooth estimate of kappa_f est <- sm::sm.density(drep.fw, eval.points=rvals, display="none", ...)$estimate numerator <- est * sum(fw)/sum(est) ## smooth estimate of kappa_1 est0 <- sm::sm.density(drep.w, eval.points=rvals, display="none", ...)$estimate denominator <- est0 * (sum(wt)/ sum(est0)) * Ef result <- numerator/denominator }, loess = { ## set up data frame df <- data.frame(d=d, ff=ff, wt=wt) ## fit curve to numerator using loess fitobj <- loess(ff ~ d, data=df, weights=wt, ...) ## evaluate fitted curve at desired r values Eff <- predict(fitobj, newdata=data.frame(d=rvals)) ## normalise: ## denominator is the sample mean of all ff[i,j], ## an estimate of E(ff(M1,M2)) for M1,M2 independent marks result <- Eff/Ef }, ) return(result) } ############## user interface bits ################################## check.testfun <- local({ fmul <- function(m1, m2) { m1 * m2 } fequ <- function(m1, m2) { m1 == m2 } f1id <- function(m) { m } check.testfun <- function(f=NULL, f1=NULL, X) { ## Validate f or f1 as a test function for point pattern X ## Determine function type 'ftype' ## ("mul", "equ", "product" or "general") if(is.null(f) && is.null(f1)) { ## no functions given ## default depends on kind of marks if(is.multitype(X)) { f <- fequ ftype <- "equ" } else { f1 <- f1id ftype <- "mul" } } else if(!is.null(f1)) { ## f1 given ## specifies test function of the form f(u,v) = f1(u) f1(v) if(!is.null(f)) warning("argument f ignored (overridden by f1)") stopifnot(is.function(f1)) ftype <- "product" } else { ## f given if(is.character(fname <- f)) { switch(fname, "mul" = { f1 <- f1id ftype <- "mul" }, "equ" = { f <- fequ ftype <- "equ" }, { f <- get(fname) ftype <- "general" }) } else if(is.function(f)) { ftype <- if(isTRUE(all.equal(f, fmul))) "mul" else if(isTRUE(all.equal(f, fequ))) "equ" else "general" if(ftype == "mul" && is.multitype(X)) stop(paste("Inappropriate choice of function f;", "point pattern is multitype;", "types cannot be multiplied.")) } else stop("Argument f must be a function or the name of a function") } return(list(f=f, f1=f1, ftype=ftype)) } check.testfun }) spatstat/R/eval.fv.R0000755000176200001440000002244313115271075014014 0ustar liggesusers# # eval.fv.R # # # eval.fv() Evaluate expressions involving fv objects # # compatible.fv() Check whether two fv objects are compatible # # $Revision: 1.35 $ $Date: 2016/07/26 10:30:13 $ # eval.fv <- local({ # main function eval.fv <- function(expr, envir, dotonly=TRUE, equiv=NULL, relabel=TRUE) { # convert syntactic expression to 'expression' object e <- as.expression(substitute(expr)) # convert syntactic expression to call elang <- substitute(expr) # find names of all variables in the expression varnames <- all.vars(e) if(length(varnames) == 0) stop("No variables in this expression") # get the actual variables if(missing(envir)) { envir <- parent.frame() } else if(is.list(envir)) { envir <- list2env(envir, parent=parent.frame()) } vars <- lapply(as.list(varnames), get, envir=envir) names(vars) <- varnames # find out which ones are fv objects fvs <- unlist(lapply(vars, is.fv)) nfuns <- sum(fvs) if(nfuns == 0) stop("No fv objects in this expression") # extract them funs <- vars[fvs] # restrict to columns identified by 'dotnames' if(dotonly) funs <- lapply(funs, restrict.to.dot) # map names if instructed if(!is.null(equiv)) funs <- lapply(funs, mapnames, map=equiv) # test whether the fv objects are compatible if(nfuns > 1L && !(do.call(compatible, unname(funs)))) { warning(paste(if(nfuns > 2) "some of" else NULL, "the functions", commasep(sQuote(names(funs))), "were not compatible: enforcing compatibility")) funs <- do.call(harmonise, append(funs, list(strict=TRUE))) } # copy first object as template result <- funs[[1L]] labl <- attr(result, "labl") origdotnames <- fvnames(result, ".") origshadenames <- fvnames(result, ".s") # determine which function estimates are supplied argname <- fvnames(result, ".x") nam <- names(result) ynames <- nam[nam != argname] # for each function estimate, evaluate expression for(yn in ynames) { # extract corresponding estimates from each fv object funvalues <- lapply(funs, "[[", i=yn) # insert into list of argument values vars[fvs] <- funvalues # evaluate result[[yn]] <- eval(e, vars, enclos=envir) } if(!relabel) return(result) # determine mathematical labels. # 'yexp' determines y axis label # 'ylab' determines y label in printing and description # 'fname' is sprintf-ed into 'labl' for legend yexps <- lapply(funs, attr, which="yexp") ylabs <- lapply(funs, attr, which="ylab") fnames <- lapply(funs, getfname) # Repair 'fname' attributes if blank blank <- unlist(lapply(fnames, isblank)) if(any(blank)) { # Set function names to be object names as used in the expression for(i in which(blank)) attr(funs[[i]], "fname") <- fnames[[i]] <- names(funs)[i] } # Remove duplicated names # Typically occurs when combining several K functions, etc. # Tweak fv objects so their function names are their object names # as used in the expression if(anyDuplicated(fnames)) { newfnames <- names(funs) for(i in 1:nfuns) funs[[i]] <- rebadge.fv(funs[[i]], new.fname=newfnames[i]) fnames <- newfnames } if(anyDuplicated(ylabs)) { flatnames <- lapply(funs, flatfname) for(i in 1:nfuns) { new.ylab <- substitute(f(r), list(f=flatnames[[i]])) funs[[i]] <- rebadge.fv(funs[[i]], new.ylab=new.ylab) } ylabs <- lapply(funs, attr, which="ylab") } if(anyDuplicated(yexps)) { newfnames <- names(funs) for(i in 1:nfuns) { new.yexp <- substitute(f(r), list(f=as.name(newfnames[i]))) funs[[i]] <- rebadge.fv(funs[[i]], new.yexp=new.yexp) } yexps <- lapply(funs, attr, which="yexp") } # now compute y axis labels for the result attr(result, "yexp") <- eval(substitute(substitute(e, yexps), list(e=elang))) attr(result, "ylab") <- eval(substitute(substitute(e, ylabs), list(e=elang))) # compute fname equivalent to expression if(nfuns > 1L) { # take original expression the.fname <- paren(flatten(deparse(elang))) } else if(nzchar(oldname <- flatfname(funs[[1L]]))) { # replace object name in expression by its function name namemap <- list(as.name(oldname)) names(namemap) <- names(funs)[1L] the.fname <- deparse(eval(substitute(substitute(e, namemap), list(e=elang)))) } else the.fname <- names(funs)[1L] attr(result, "fname") <- the.fname # now compute the [modified] y labels labelmaps <- lapply(funs, fvlabelmap, dot=FALSE) for(yn in ynames) { # labels for corresponding columns of each argument funlabels <- lapply(labelmaps, "[[", i=yn) # form expression involving these columns labl[match(yn, names(result))] <- flatten(deparse(eval(substitute(substitute(e, f), list(e=elang, f=funlabels))))) } attr(result, "labl") <- labl # copy dotnames and shade names from template fvnames(result, ".") <- origdotnames[origdotnames %in% names(result)] if(!is.null(origshadenames) && all(origshadenames %in% names(result))) fvnames(result, ".s") <- origshadenames return(result) } # helper functions restrict.to.dot <- function(z) { argu <- fvnames(z, ".x") dotn <- fvnames(z, ".") shadn <- fvnames(z, ".s") ok <- colnames(z) %in% unique(c(argu, dotn, shadn)) return(z[, ok]) } getfname <- function(x) { if(!is.null(y <- attr(x, "fname"))) y else "" } flatten <- function(x) { paste(x, collapse=" ") } mapnames <- function(x, map=NULL) { colnames(x) <- mapstrings(colnames(x), map=map) fvnames(x, ".y") <- mapstrings(fvnames(x, ".y"), map=map) return(x) } isblank <- function(z) { !any(nzchar(z)) } eval.fv }) compatible <- function(A, B, ...) { UseMethod("compatible") } compatible.fv <- local({ approx.equal <- function(x, y) { max(abs(x-y)) <= .Machine$double.eps } compatible.fv <- function(A, B, ...) { verifyclass(A, "fv") if(missing(B)) { answer <- if(length(...) == 0) TRUE else compatible(A, ...) return(answer) } verifyclass(B, "fv") ## are columns the same? namesmatch <- identical(all.equal(names(A),names(B)), TRUE) && (fvnames(A, ".x") == fvnames(B, ".x")) && (fvnames(A, ".y") == fvnames(B, ".y")) if(!namesmatch) return(FALSE) ## are 'r' values the same ? rA <- with(A, .x) rB <- with(B, .x) rmatch <- (length(rA) == length(rB)) && approx.equal(rA, rB) if(!rmatch) return(FALSE) ## A and B are compatible if(length(list(...)) == 0) return(TRUE) ## recursion return(compatible.fv(B, ...)) } compatible.fv }) # force a list of images to be compatible with regard to 'x' values harmonize <- harmonise <- function(...) { UseMethod("harmonise") } harmonize.fv <- harmonise.fv <- local({ harmonise.fv <- function(..., strict=FALSE) { argh <- list(...) n <- length(argh) if(n == 0) return(argh) if(n == 1) { a1 <- argh[[1L]] if(is.fv(a1)) return(argh) if(is.list(a1) && all(sapply(a1, is.fv))) { argh <- a1 n <- length(argh) } } isfv <- sapply(argh, is.fv) if(!all(isfv)) stop("All arguments must be fv objects") if(n == 1) return(argh[[1L]]) ## determine range of argument ranges <- lapply(argh, argumentrange) xrange <- c(max(unlist(lapply(ranges, min))), min(unlist(lapply(ranges, max)))) if(diff(xrange) < 0) stop("No overlap in ranges of argument") if(strict) { ## find common column names and keep these keepnames <- Reduce(intersect, lapply(argh, colnames)) argh <- lapply(argh, "[", j=keepnames) } ## determine finest resolution xsteps <- sapply(argh, argumentstep) finest <- which.min(xsteps) ## extract argument values xx <- with(argh[[finest]], .x) xx <- xx[xrange[1L] <= xx & xx <= xrange[2L]] xrange <- range(xx) ## convert each fv object to a function funs <- lapply(argh, as.function, value="*") ## evaluate at common argument result <- vector(mode="list", length=n) for(i in 1:n) { ai <- argh[[i]] fi <- funs[[i]] xxval <- list(xx=xx) names(xxval) <- fvnames(ai, ".x") starnames <- fvnames(ai, "*") ## ensure they are given in same order as current columns starnames <- colnames(ai)[colnames(ai) %in% starnames] yyval <- lapply(starnames, function(v,xx,fi) fi(xx, v), xx=xx, fi=fi) names(yyval) <- starnames ri <- do.call(data.frame, append(xxval, yyval)) fva <- .Spatstat.FvAttrib attributes(ri)[fva] <- attributes(ai)[fva] class(ri) <- c("fv", class(ri)) attr(ri, "alim") <- intersect.ranges(attr(ai, "alim"), xrange) result[[i]] <- ri } names(result) <- names(argh) return(result) } argumentrange <- function(f) { range(with(f, .x)) } argumentstep <- function(f) { mean(diff(with(f, .x))) } harmonise.fv }) spatstat/R/closepairs.R0000755000176200001440000004523213115271075014620 0ustar liggesusers# # closepairs.R # # $Revision: 1.39 $ $Date: 2017/06/05 10:31:58 $ # # simply extract the r-close pairs from a dataset # # Less memory-hungry for large patterns # closepairs <- function(X, rmax, ...) { UseMethod("closepairs") } closepairs.ppp <- function(X, rmax, twice=TRUE, what=c("all", "indices", "ijd"), distinct=TRUE, neat=TRUE, ...) { verifyclass(X, "ppp") what <- match.arg(what) stopifnot(is.numeric(rmax) && length(rmax) == 1L) stopifnot(is.finite(rmax)) stopifnot(rmax >= 0) ordered <- list(...)$ordered if(missing(twice) && !is.null(ordered)) { warning("Obsolete argument 'ordered' has been replaced by 'twice'") twice <- ordered } npts <- npoints(X) null.answer <- switch(what, all = { list(i=integer(0), j=integer(0), xi=numeric(0), yi=numeric(0), xj=numeric(0), yj=numeric(0), dx=numeric(0), dy=numeric(0), d=numeric(0)) }, indices = { list(i=integer(0), j=integer(0)) }, ijd = { list(i=integer(0), j=integer(0), d=numeric(0)) }) if(npts == 0) return(null.answer) # sort points by increasing x coordinate oo <- fave.order(X$x) Xsort <- X[oo] # First make an OVERESTIMATE of the number of unordered pairs nsize <- ceiling(2 * pi * (npts^2) * (rmax^2)/area(Window(X))) nsize <- max(1024, nsize) if(nsize > .Machine$integer.max) { warning("Estimated number of close pairs exceeds maximum possible integer", call.=FALSE) nsize <- .Machine$integer.max } # Now extract pairs if(spatstat.options("closepairs.newcode")) { # ------------------- use new faster code --------------------- # fast algorithms collect each distinct pair only once got.twice <- FALSE ng <- nsize # x <- Xsort$x y <- Xsort$y r <- rmax storage.mode(x) <- "double" storage.mode(y) <- "double" storage.mode(r) <- "double" storage.mode(ng) <- "integer" switch(what, all = { z <- .Call("Vclosepairs", xx=x, yy=y, rr=r, nguess=ng, PACKAGE = "spatstat") if(length(z) != 9) stop("Internal error: incorrect format returned from Vclosepairs") i <- z[[1L]] # NB no increment required j <- z[[2L]] xi <- z[[3L]] yi <- z[[4L]] xj <- z[[5L]] yj <- z[[6L]] dx <- z[[7L]] dy <- z[[8L]] d <- z[[9L]] }, indices = { z <- .Call("VcloseIJpairs", xx=x, yy=y, rr=r, nguess=ng, PACKAGE = "spatstat") if(length(z) != 2) stop("Internal error: incorrect format returned from VcloseIJpairs") i <- z[[1L]] # NB no increment required j <- z[[2L]] }, ijd = { z <- .Call("VcloseIJDpairs", xx=x, yy=y, rr=r, nguess=ng, PACKAGE = "spatstat") if(length(z) != 3) stop("Internal error: incorrect format returned from VcloseIJDpairs") i <- z[[1L]] # NB no increment required j <- z[[2L]] d <- z[[3L]] }) } else { # ------------------- use older code -------------------------- if(!distinct) { ii <- seq_len(npts) xx <- X$x yy <- X$y zeroes <- rep(0, npts) null.answer <- switch(what, all = { list(i=ii, j=ii, xi=xx, yi=yy, xj=xx, yj=yy, dx=zeroes, dy=zeroes, d=zeroes) }, indices = { list(i=ii, j=ii) }, ijd = { list(i=ii, j=ii, d=zeroes) }) } got.twice <- TRUE nsize <- nsize * 2 z <- .C("Fclosepairs", nxy=as.integer(npts), x=as.double(Xsort$x), y=as.double(Xsort$y), r=as.double(rmax), noutmax=as.integer(nsize), nout=as.integer(integer(1L)), iout=as.integer(integer(nsize)), jout=as.integer(integer(nsize)), xiout=as.double(numeric(nsize)), yiout=as.double(numeric(nsize)), xjout=as.double(numeric(nsize)), yjout=as.double(numeric(nsize)), dxout=as.double(numeric(nsize)), dyout=as.double(numeric(nsize)), dout=as.double(numeric(nsize)), status=as.integer(integer(1L)), PACKAGE = "spatstat") if(z$status != 0) { # Guess was insufficient # Obtain an OVERCOUNT of the number of pairs # (to work around gcc bug #323) rmaxplus <- 1.25 * rmax nsize <- .C("paircount", nxy=as.integer(npts), x=as.double(Xsort$x), y=as.double(Xsort$y), rmaxi=as.double(rmaxplus), count=as.integer(integer(1L)), PACKAGE = "spatstat")$count if(nsize <= 0) return(null.answer) # add a bit more for safety nsize <- ceiling(1.1 * nsize) + 2 * npts # now extract points z <- .C("Fclosepairs", nxy=as.integer(npts), x=as.double(Xsort$x), y=as.double(Xsort$y), r=as.double(rmax), noutmax=as.integer(nsize), nout=as.integer(integer(1L)), iout=as.integer(integer(nsize)), jout=as.integer(integer(nsize)), xiout=as.double(numeric(nsize)), yiout=as.double(numeric(nsize)), xjout=as.double(numeric(nsize)), yjout=as.double(numeric(nsize)), dxout=as.double(numeric(nsize)), dyout=as.double(numeric(nsize)), dout=as.double(numeric(nsize)), status=as.integer(integer(1L)), PACKAGE = "spatstat") if(z$status != 0) stop(paste("Internal error: C routine complains that insufficient space was allocated:", nsize)) } # trim vectors to the length indicated npairs <- z$nout if(npairs <= 0) return(null.answer) actual <- seq_len(npairs) i <- z$iout[actual] # sic j <- z$jout[actual] switch(what, indices={}, all={ xi <- z$xiout[actual] yi <- z$yiout[actual] xj <- z$xjout[actual] yj <- z$yjout[actual] dx <- z$dxout[actual] dy <- z$dyout[actual] d <- z$dout[actual] }, ijd = { d <- z$dout[actual] }) # ------------------- end code switch ------------------------ } # convert i,j indices to original sequence i <- oo[i] j <- oo[j] if(twice) { ## both (i, j) and (j, i) should be returned if(!got.twice) { ## duplication required iold <- i jold <- j i <- c(iold, jold) j <- c(jold, iold) switch(what, indices = { }, ijd = { d <- rep(d, 2) }, all = { xinew <- c(xi, xj) yinew <- c(yi, yj) xjnew <- c(xj, xi) yjnew <- c(yj, yi) xi <- xinew yi <- yinew xj <- xjnew yj <- yjnew dx <- c(dx, -dx) dy <- c(dy, -dy) d <- rep(d, 2) }) } } else { ## only one of (i, j) and (j, i) should be returned if(got.twice) { ## remove duplication ok <- (i < j) i <- i[ok] j <- j[ok] switch(what, indices = { }, all = { xi <- xi[ok] yi <- yi[ok] xj <- xj[ok] yj <- yj[ok] dx <- dx[ok] dy <- dy[ok] d <- d[ok] }, ijd = { d <- d[ok] }) } else if(neat) { ## enforce i < j swap <- (i > j) tmp <- i[swap] i[swap] <- j[swap] j[swap] <- tmp if(what == "all") { xinew <- ifelse(swap, xj, xi) yinew <- ifelse(swap, yj, yi) xjnew <- ifelse(swap, xi, xj) yjnew <- ifelse(swap, yi, yj) xi <- xinew yi <- yinew xj <- xjnew yj <- yjnew dx[swap] <- -dx[swap] dy[swap] <- -dy[swap] } } ## otherwise no action required } ## add pairs of identical points? if(!distinct) { ii <- seq_len(npts) xx <- X$x yy <- X$y zeroes <- rep(0, npts) i <- c(i, ii) j <- c(j, ii) switch(what, ijd={ d <- c(d, zeroes) }, all = { xi <- c(xi, xx) yi <- c(yi, yy) xj <- c(xj, xx) yi <- c(yi, yy) dx <- c(dx, zeroes) dy <- c(dy, zeroes) d <- c(d, zeroes) }) } ## done switch(what, all = { answer <- list(i=i, j=j, xi=xi, yi=yi, xj=xj, yj=yj, dx=dx, dy=dy, d=d) }, indices = { answer <- list(i = i, j = j) }, ijd = { answer <- list(i=i, j=j, d=d) }) return(answer) } ####################### crosspairs <- function(X, Y, rmax, ...) { UseMethod("crosspairs") } crosspairs.ppp <- function(X, Y, rmax, what=c("all", "indices", "ijd"), ...) { verifyclass(X, "ppp") verifyclass(Y, "ppp") what <- match.arg(what) stopifnot(is.numeric(rmax) && length(rmax) == 1L && rmax >= 0) null.answer <- switch(what, all = { list(i=integer(0), j=integer(0), xi=numeric(0), yi=numeric(0), xj=numeric(0), yj=numeric(0), dx=numeric(0), dy=numeric(0), d=numeric(0)) }, indices = { list(i=integer(0), j=integer(0)) }, ijd = { list(i=integer(0), j=integer(0), d=numeric(0)) }) nX <- npoints(X) nY <- npoints(Y) if(nX == 0 || nY == 0) return(null.answer) # order patterns by increasing x coordinate ooX <- fave.order(X$x) Xsort <- X[ooX] ooY <- fave.order(Y$x) Ysort <- Y[ooY] if(spatstat.options("crosspairs.newcode")) { # ------------------- use new faster code --------------------- # First (over)estimate the number of pairs nsize <- ceiling(2 * pi * (rmax^2) * nX * nY/area(Window(Y))) nsize <- max(1024, nsize) if(nsize > .Machine$integer.max) { warning( "Estimated number of close pairs exceeds maximum possible integer", call.=FALSE) nsize <- .Machine$integer.max } # .Call Xx <- Xsort$x Xy <- Xsort$y Yx <- Ysort$x Yy <- Ysort$y r <- rmax ng <- nsize storage.mode(Xx) <- storage.mode(Xy) <- "double" storage.mode(Yx) <- storage.mode(Yy) <- "double" storage.mode(r) <- "double" storage.mode(ng) <- "integer" switch(what, all = { z <- .Call("Vcrosspairs", xx1=Xx, yy1=Xy, xx2=Yx, yy2=Yy, rr=r, nguess=ng, PACKAGE = "spatstat") if(length(z) != 9) stop("Internal error: incorrect format returned from Vcrosspairs") i <- z[[1L]] # NB no increment required j <- z[[2L]] xi <- z[[3L]] yi <- z[[4L]] xj <- z[[5L]] yj <- z[[6L]] dx <- z[[7L]] dy <- z[[8L]] d <- z[[9L]] }, indices = { z <- .Call("VcrossIJpairs", xx1=Xx, yy1=Xy, xx2=Yx, yy2=Yy, rr=r, nguess=ng, PACKAGE = "spatstat") if(length(z) != 2) stop("Internal error: incorrect format returned from VcrossIJpairs") i <- z[[1L]] # NB no increment required j <- z[[2L]] }, ijd = { z <- .Call("VcrossIJDpairs", xx1=Xx, yy1=Xy, xx2=Yx, yy2=Yy, rr=r, nguess=ng, PACKAGE = "spatstat") if(length(z) != 3) stop("Internal error: incorrect format returned from VcrossIJDpairs") i <- z[[1L]] # NB no increment required j <- z[[2L]] d <- z[[3L]] }) } else { # Older code # obtain upper estimate of number of pairs # (to work around gcc bug 323) rmaxplus <- 1.25 * rmax nsize <- .C("crosscount", nn1=as.integer(X$n), x1=as.double(Xsort$x), y1=as.double(Xsort$y), nn2=as.integer(Ysort$n), x2=as.double(Ysort$x), y2=as.double(Ysort$y), rmaxi=as.double(rmaxplus), count=as.integer(integer(1L)), PACKAGE = "spatstat")$count if(nsize <= 0) return(null.answer) # allow slightly more space to work around gcc bug #323 nsize <- ceiling(1.1 * nsize) + X$n + Y$n # now extract pairs z <- .C("Fcrosspairs", nn1=as.integer(X$n), x1=as.double(Xsort$x), y1=as.double(Xsort$y), nn2=as.integer(Y$n), x2=as.double(Ysort$x), y2=as.double(Ysort$y), r=as.double(rmax), noutmax=as.integer(nsize), nout=as.integer(integer(1L)), iout=as.integer(integer(nsize)), jout=as.integer(integer(nsize)), xiout=as.double(numeric(nsize)), yiout=as.double(numeric(nsize)), xjout=as.double(numeric(nsize)), yjout=as.double(numeric(nsize)), dxout=as.double(numeric(nsize)), dyout=as.double(numeric(nsize)), dout=as.double(numeric(nsize)), status=as.integer(integer(1L)), PACKAGE = "spatstat") if(z$status != 0) stop(paste("Internal error: C routine complains that insufficient space was allocated:", nsize)) # trim vectors to the length indicated npairs <- z$nout if(npairs <= 0) return(null.answer) actual <- seq_len(npairs) i <- z$iout[actual] # sic j <- z$jout[actual] xi <- z$xiout[actual] yi <- z$yiout[actual] xj <- z$xjout[actual] yj <- z$yjout[actual] dx <- z$dxout[actual] dy <- z$dyout[actual] d <- z$dout[actual] } # convert i,j indices to original sequences i <- ooX[i] j <- ooY[j] # done switch(what, all = { answer <- list(i=i, j=j, xi=xi, yi=yi, xj=xj, yj=yj, dx=dx, dy=dy, d=d) }, indices = { answer <- list(i=i, j=j) }, ijd = { answer <- list(i=i, j=j, d=d) }) return(answer) } closethresh <- function(X, R, S, twice=TRUE, ...) { # list all R-close pairs # and indicate which of them are S-close (S < R) # so that results are consistent with closepairs(X,S) verifyclass(X, "ppp") stopifnot(is.numeric(R) && length(R) == 1L && R >= 0) stopifnot(is.numeric(S) && length(S) == 1L && S >= 0) stopifnot(S < R) ordered <- list(...)$ordered if(missing(twice) && !is.null(ordered)) { warning("Obsolete argument 'ordered' has been replaced by 'twice'") twice <- ordered } npts <- npoints(X) if(npts == 0) return(list(i=integer(0), j=integer(0), t=logical(0))) # sort points by increasing x coordinate oo <- fave.order(X$x) Xsort <- X[oo] # First make an OVERESTIMATE of the number of pairs nsize <- ceiling(4 * pi * (npts^2) * (R^2)/area(Window(X))) nsize <- max(1024, nsize) if(nsize > .Machine$integer.max) { warning("Estimated number of close pairs exceeds maximum possible integer", call.=FALSE) nsize <- .Machine$integer.max } # Now extract pairs x <- Xsort$x y <- Xsort$y r <- R s <- S ng <- nsize storage.mode(x) <- "double" storage.mode(y) <- "double" storage.mode(r) <- "double" storage.mode(s) <- "double" storage.mode(ng) <- "integer" z <- .Call("Vclosethresh", xx=x, yy=y, rr=r, ss=s, nguess=ng, PACKAGE = "spatstat") if(length(z) != 3) stop("Internal error: incorrect format returned from Vclosethresh") i <- z[[1L]] # NB no increment required j <- z[[2L]] th <- as.logical(z[[3L]]) # convert i,j indices to original sequence i <- oo[i] j <- oo[j] # fast C code only returns i < j if(twice) { iold <- i jold <- j i <- c(iold, jold) j <- c(jold, iold) th <- rep(th, 2) } # done return(list(i=i, j=j, th=th)) } crosspairquad <- function(Q, rmax, what=c("all", "indices")) { # find all close pairs X[i], U[j] stopifnot(inherits(Q, "quad")) what <- match.arg(what) X <- Q$data D <- Q$dummy clX <- closepairs(X=X, rmax=rmax, what=what) clXD <- crosspairs(X=X, Y=D, rmax=rmax, what=what) # convert all indices to serial numbers in union.quad(Q) # assumes data are listed first clXD$j <- npoints(X) + clXD$j result <- list(rbind(as.data.frame(clX), as.data.frame(clXD))) return(result) } spatstat/R/rLGCP.R0000755000176200001440000000605013115271120013345 0ustar liggesusers# # rLGCP.R # # simulation of log-Gaussian Cox process # # original code by Abdollah Jalilian # # $Revision: 1.19 $ $Date: 2016/09/12 02:08:18 $ # rLGCP <- local({ rLGCP <- function(model="exp", mu = 0, param = NULL, ..., win=NULL, saveLambda=TRUE, nsim=1, drop=TRUE) { ## validate if (!(is.numeric(mu) || is.function(mu) || is.im(mu))) stop(paste(sQuote("mu"), "must be a constant, a function or an image")) if (is.numeric(mu) && !(length(mu) == 1)) stop(paste(sQuote("mu"), "must be a single number")) ## check for outdated usage if(!all(nzchar(names(param)))) stop("Outdated syntax of argument 'param' to rLGCP", call.=FALSE) ## do.rLGCP(model=model, mu=mu, param=param, ..., win=win, saveLambda=saveLambda, nsim=nsim, drop=drop) } do.rLGCP <- function(model="exp", mu = 0, param = NULL, ..., win=NULL, saveLambda=TRUE, eps = NULL, dimyx = NULL, xy = NULL, modelonly=FALSE, nsim=1, drop=TRUE) { ## make RF model object from RandomFields package ## get the 'model generator' modgen <- getRandomFieldsModelGen(model) ## now create a RandomFields 'model' object rfmodel <- do.call(modgen, append(as.list(param), list(...))) if(!inherits(rfmodel, "RMmodel")) stop("Unable to create RandomFields model object", call.=FALSE) ## secret exit if(modelonly) return(rfmodel) ## simulation window win.given <- !is.null(win) mu.image <- is.im(mu) win <- if(win.given) as.owin(win) else if(mu.image) as.owin(mu) else owin() if(win.given && mu.image && !is.subset.owin(win, as.owin(mu))) stop(paste("The spatial domain of the pixel image", sQuote("mu"), "does not cover the simulation window", sQuote("win"))) ## convert win to a mask w <- as.mask(w=win, eps=eps, dimyx=dimyx, xy=xy) xcol <- w$xcol yrow <- w$yrow dim <- w$dim xy <- expand.grid(x=xcol, y=yrow) xx <- xy$x yy <- xy$y muxy <- if(is.numeric(mu)) mu else if (is.function(mu)) mu(xx,yy) else lookup.im(mu, xx, yy, naok=TRUE, strict=TRUE) muxy[is.na(muxy)] <- -Inf stopifnot(nsim >= 1) result <- vector(mode="list", length=nsim) for(i in 1:nsim) { ## generate zero-mean Gaussian random field spc <- RandomFields::RFoptions()$general$spConform if(spc) RandomFields::RFoptions(spConform=FALSE) z <- RandomFields::RFsimulate(rfmodel, xcol, yrow, grid = TRUE) if(spc) RandomFields::RFoptions(spConform=TRUE) ## convert to log-Gaussian image logLambda <- muxy + z Lambda <- matrix(exp(logLambda), nrow=dim[1], ncol=dim[2], byrow=TRUE) Lambda <- as.im(Lambda, W=w) ## generate Poisson points X <- rpoispp(Lambda)[win] ## if(saveLambda) attr(X, "Lambda") <- Lambda result[[i]] <- X } if(drop && nsim == 1) return(result[[1]]) names(result) <- paste("Simulation", 1:nsim) return(as.solist(result)) } rLGCP }) spatstat/R/parres.R0000755000176200001440000005243313115271120013740 0ustar liggesusers# # parres.R # # code to plot transformation diagnostic # # $Revision: 1.9 $ $Date: 2016/12/30 01:44:07 $ # parres <- function(model, covariate, ..., smooth.effect=FALSE, subregion=NULL, bw="nrd0", adjust=1, from=NULL,to=NULL, n=512, bw.input = c("points", "quad"), bw.restrict = FALSE, covname) { modelname <- deparse(substitute(model)) if(missing(covname)) covname <- sensiblevarname(deparse(substitute(covariate)), "X") callstring <- paste(deparse(sys.call()), collapse = "") if(is.marked(model)) stop("Sorry, this is not yet implemented for marked models") if(!is.null(subregion)) stopifnot(is.owin(subregion)) if(is.null(adjust)) adjust <- 1 bw.input <- match.arg(bw.input) # validate model stopifnot(is.ppm(model)) modelcall <- model$callstring if(is.null(modelcall)) modelcall <- model$call if(is.null(getglmfit(model))) model <- update(model, forcefit=TRUE) # extract spatial locations Q <- quad.ppm(model) # datapoints <- Q$data quadpoints <- union.quad(Q) Z <- is.data(Q) wts <- w.quad(Q) nQ <- npoints(quadpoints) # fitted intensity lam <- fitted(model, type="trend") # subset of quadrature points used to fit model subQset <- getglmsubset(model) if(is.null(subQset)) subQset <- rep.int(TRUE, nQ) # restriction to subregion insubregion <- if(!is.null(subregion)) { inside.owin(quadpoints, w=subregion) } else rep.int(TRUE, nQ) ################################################################ # Inverse lambda residuals rx <- residuals(model, type="inverse") resid <- with(rx, "increment") ################################################################# # identify the covariate # if(length(covariate) == 0) stop("No covariate specified") covtype <- "unknown" if(!is.character(covariate)) { # Covariate is some kind of data, treated as external covariate covtype <- "external" beta <- 0 covvalues <- evalCovariate(covariate, quadpoints) } else { # Argument is name of covariate covname <- covariate if(length(covname) > 1) stop("Must specify only one covariate") # 'original covariates' orig.covars <- variablesinformula(formula(model)) # 'canonical covariates' canon.covars <- names(coef(model)) # offsets offset.covars <- offsetsinformula(formula(model)) # if(covname %in% orig.covars) { # one of the original covariates covtype <- "original" covvalues <- evalCovariate(findCovariate(covname, model), quadpoints) } else if(covname %in% canon.covars) { # one of the canonical covariates covtype <- "canonical" mm <- model.matrix(model) covvalues <- mm[, covname] ## extract the corresponding coefficient beta <- coef(model)[[covname]] } else if(covname %in% offset.covars) { # an offset term only covtype <- "offset" mf <- model.frame(model, subset=rep.int(TRUE, n.quad(Q))) if(!(covname %in% colnames(mf))) stop(paste("Internal error: offset term", covname, "not found in model frame")) covvalues <- mf[, covname] ## fixed coefficient (not an estimated parameter) beta <- 1 } else{ # must be an external covariate (i.e. not used in fitted model) covtype <- "external" beta <- 0 covvalues <- evalCovariate(findCovariate(covname, model), quadpoints) } } # validate covvalues # if(is.null(covvalues)) stop("Unable to extract covariate values") if(length(covvalues) != npoints(quadpoints)) stop(paste("Internal error: number of covariate values =", length(covvalues), "!=", npoints(quadpoints), "= number of quadrature points")) vtype <- typeof(covvalues) switch(vtype, real=, double = { }, integer = { warning("Covariate is integer-valued") }, stop(paste("Cannot handle covariate of type", sQuote(vtype)))) ################################################################# # Compute covariate effect if(covtype != "original") { effect <- beta * covvalues mediator <- covtype effectfundata <- list(beta=beta) effectFun <- function(x) { (effectfundata$beta) * x } isoffset <- (covtype == "offset") names(isoffset) <- covname } else { ## `original' covariate (passed as argument to ppm) ## may determine one or more canonical covariates and/or offsets origcovdf <- getppmOriginalCovariates(model)[insubregion, , drop=FALSE] isconstant <- lapply(origcovdf, function(z) { length(unique(z)) == 1 }) ## ## Initialise termnames <- character(0) termbetas <- numeric(0) isoffset <- logical(0) mediator <- character(0) effect <- 0 effectFun <- function(x) { effectFun.can(x) + effectFun.off(x) } effectFun.can <- effectFun.off <- function(x) { 0 * x } ## Identify relevant canonical covariates dmat <- model.depends(model) if(!(covname %in% colnames(dmat))) stop("Internal error: cannot match covariate names") othercov <- (colnames(dmat) != covname) relevant <- dmat[, covname] if(any(relevant)) { # original covariate determines one or more canonical covariates mediator <- "canonical" # check whether covariate is separable if(any(conflict <- dmat[relevant, othercov, drop=FALSE])) { ## identify entangled covariates entangled <- colnames(conflict)[matcolany(conflict)] ## not problematic if constant ok <- unlist(isconstant[entangled]) conflict[ , ok] <- FALSE ## re-test if(any(conflict)) { conflictterms <- matrowany(conflict) conflictcovs <- matcolany(conflict) stop(paste("The covariate", sQuote(covname), "cannot be separated from the", ngettext(sum(conflictcovs), "covariate", "covariates"), commasep(sQuote(colnames(conflict)[conflictcovs])), "in the model", ngettext(sum(conflictterms), "term", "terms"), commasep(sQuote(rownames(conflict)[conflictterms])) )) } } # termnames <- rownames(dmat)[relevant] isoffset <- rep.int(FALSE, length(termnames)) names(isoffset) <- termnames # Extract relevant canonical covariates mm <- model.matrix(model) termvalues <- mm[, relevant, drop=FALSE] # extract corresponding coefficients termbetas <- coef(model)[relevant] # evaluate model effect effect <- as.numeric(termvalues %*% termbetas) # check length if(length(effect) != npoints(quadpoints)) stop(paste("Internal error: number of values of fitted effect =", length(effect), "!=", npoints(quadpoints), "= number of quadrature points")) # Trap loglinear case if(length(termnames) == 1 && identical(termnames, covname)) { covtype <- "canonical" beta <- termbetas } # construct the corresponding function gd <- getglmdata(model) goodrow <- min(which(complete.cases(gd))) defaultdata <- gd[goodrow, , drop=FALSE] effectfundata.can <- list(covname=covname, fmla = formula(model), termbetas = termbetas, defaultdata = defaultdata, relevant = relevant, termnames = termnames) effectFun.can <- function(x) { d <- effectfundata.can # replicate default data to correct length df <- as.data.frame(lapply(d$defaultdata, rep, length(x))) # overwrite value of covariate with new data df[,covname] <- x # construct model matrix m <- model.matrix(d$fmla, df) # check it conforms to expected structure if(!identical(colnames(m)[d$relevant], d$termnames)) stop("Internal error: mismatch in term names in effectFun") me <- m[, d$relevant, drop=FALSE] y <- me %*% as.matrix(d$termbetas, ncol=1) return(y) } } if(!is.null(offmat <- attr(dmat, "offset")) && any(relevant <- offmat[, covname])) { # covariate appears in a model offset term mediator <- c(mediator, "offset") # check whether covariate is separable if(any(conflict<- offmat[relevant, othercov, drop=FALSE])) { ## identify entangled covariates entangled <- colnames(conflict)[matcolany(conflict)] ## not problematic if constant ok <- unlist(isconstant[entangled]) conflict[ , ok] <- FALSE ## re-test if(any(conflict)) { conflictterms <- matrowany(conflict) conflictcovs <- matcolany(conflict) stop(paste("The covariate", sQuote(covname), "cannot be separated from the", ngettext(sum(conflictcovs), "covariate", "covariates"), commasep(sQuote(colnames(conflict)[conflictcovs])), "in the model", ngettext(sum(conflictterms), "term", "terms"), commasep(sQuote(rownames(conflict)[conflictterms])) )) } } # collect information about relevant offset offnames <- rownames(offmat)[relevant] termnames <- c(termnames, offnames) noff <- length(offnames) termbetas <- c(termbetas, rep.int(1, noff)) isoffset <- c(isoffset, rep.int(TRUE, noff)) names(termbetas) <- names(isoffset) <- termnames # extract values of relevant offset mf <- model.frame(model, subset=rep.int(TRUE, n.quad(Q))) if(any(nbg <- !(offnames %in% colnames(mf)))) stop(paste("Internal error:", ngettext(sum(nbg), "offset term", "offset terms"), offnames[nbg], "not found in model frame")) effex <- mf[, offnames, drop=FALSE] effect <- effect + rowSums(effex) # # construct the corresponding function gd <- getglmdata(model) goodrow <- min(which(complete.cases(gd))) defaultdata <- gd[goodrow, , drop=FALSE] effectfundata.off <- list(covname=covname, fmla = formula(model), defaultdata = defaultdata, offnames = offnames) effectFun.off <- function(x) { d <- effectfundata.off # replicate default data to correct length df <- as.data.frame(lapply(d$defaultdata, rep, length(x))) # overwrite value of covariate with new data df[,covname] <- x # construct model FRAME mf <- model.frame(d$fmla, df) # check it conforms to expected structure if(!all(d$offnames %in% colnames(mf))) stop("Internal error: mismatch in term names in effectFun") moff <- mf[, d$offnames, drop=FALSE] y <- rowSums(moff) return(y) } } if(length(termnames) == 0) { # Sanity clause # (everyone knows there ain't no Sanity Clause...) warning(paste("Internal error: could not find any", "canonical covariates or offset terms", "that depended on the covariate", sQuote(covname))) # Assume it's an external covariate (i.e. not used in fitted model) covtype <- "external" beta <- 0 effect <- beta * covvalues effectFun <- function(x) { 0 * x } isoffset <- FALSE names(isoffset) <- covname } } #### Canonical covariates and coefficients switch(covtype, original={ cancovs <- termnames canbeta <- termbetas }, offset = , canonical={ cancovs <- covname canbeta <- beta }, external={ cancovs <- canbeta <- NA }) ################################################################# # Validate covariate values # locations that must have finite values operative <- if(bw.restrict) insubregion & subQset else subQset nbg.cov <- !is.finite(covvalues) if(any(offending <- nbg.cov & operative)) { warning(paste(sum(offending), "out of", length(offending), "covariate values discarded because", ngettext(sum(offending), "it is", "they are"), "NA or infinite")) } nbg.eff <- !is.finite(effect) if(any(offending <- nbg.eff & operative)) { warning(paste(sum(offending), "out of", length(offending), "values of fitted effect discarded because", ngettext(sum(offending), "it is", "they are"), "NA or infinite")) } ################################################################# # Restrict data to 'operative' points # with finite values nbg <- nbg.cov | nbg.eff ok <- !nbg & operative Q <- Q[ok] covvalues <- covvalues[ok] quadpoints <- quadpoints[ok] resid <- resid[ok] lam <- lam[ok] effect <- effect[ok] insubregion <- insubregion[ok] Z <- Z[ok] wts <- wts[ok] #################################################### # assemble data for smoothing x <- covvalues y <- resid/wts if(smooth.effect) y <- y + effect w <- wts # if(makefrom <- is.null(from)) from <- min(x) if(maketo <- is.null(to)) to <- max(x) #################################################### # determine smoothing bandwidth # from 'operative' data switch(bw.input, quad = { # bandwidth selection from covariate values at all quadrature points numer <- unnormdensity(x, weights=w*y, bw=bw, adjust=adjust, n=n,from=from,to=to, ...) sigma <- numer$bw }, points= { # bandwidth selection from covariate values at data points fake <- unnormdensity(x[Z], weights=1/lam[Z], bw=bw, adjust=adjust, n=n,from=from,to=to, ...) sigma <- fake$bw numer <- unnormdensity(x, weights=w*y, bw=sigma, adjust=1, n=n,from=from,to=to, ...) }) #################################################### # Restrict data and recompute numerator if required if(!is.null(subregion) && !bw.restrict) { # Bandwidth was computed on all data # Restrict to subregion and recompute numerator x <- x[insubregion] y <- y[insubregion] w <- w[insubregion] Z <- Z[insubregion] lam <- lam[insubregion] if(makefrom) from <- min(x) if(maketo) to <- max(x) numer <- unnormdensity(x, weights=w*y, bw=sigma, adjust=1, n=n,from=from,to=to, ...) } #################################################### # Compute denominator denom <- unnormdensity(x, weights=w, bw=sigma, adjust=1, n=n,from=from,to=to, ...) #################################################### # Determine recommended plot range xr <- range(as.vector(x[Z]), finite=TRUE) alim <- xr + 0.1 * diff(xr) * c(-1,1) alim <- intersect.ranges(alim, c(from, to)) #################################################### # Compute terms interpolate <- function(x,y) { if(inherits(x, "density") && missing(y)) approxfun(x$x, x$y, rule=2) else approxfun(x, y, rule=2) } numfun <- interpolate(numer) denfun <- interpolate(denom) xxx <- numer$x yyy <- numfun(xxx)/denfun(xxx) # variance estimation # smooth 1/lambda(u) with smaller bandwidth tau <- sigma/sqrt(2) varnumer <- unnormdensity(x, weights=w/lam, bw=tau, adjust=1, n=n,from=from,to=to, ...) varnumfun <- interpolate(varnumer) varestxxx <- varnumfun(xxx)/(2 * sigma * sqrt(pi) * denfun(xxx)^2) sd <- sqrt(varestxxx) # alternative estimate of variance using data points only varXnumer <- unnormdensity(x[Z], weights=1/lam[Z]^2, bw=tau, adjust=1, n=n,from=from,to=to, ...) varXnumfun <- interpolate(varXnumer) varXestxxx <- varXnumfun(xxx)/(2 * sigma * sqrt(pi) * denfun(xxx)^2) sdX <- sqrt(varXestxxx) # fitted effect effxxx <- effectFun(xxx) # add fitted effect of covariate, if not added before smoothing if(!smooth.effect) yyy <- yyy + effxxx #################################################### # pack into fv object df <- data.frame(xxx=xxx, h =yyy, varh=varestxxx, hi=yyy+2*sd, lo=yyy-2*sd, hiX=yyy+2*sdX, loX=yyy-2*sdX, fit=effxxx) # remove any funny characters in name of covariate (e.g. if it is an offset) Covname <- make.names(covname) names(df)[1] <- Covname desc <- c(paste("covariate", sQuote(covname)), "Smoothed partial residual", "Variance", "Upper limit of pointwise 5%% significance band (integral)", "Lower limit of pointwise 5%% significance band (integral)", "Upper limit of pointwise 5%% significance band (sum)", "Lower limit of pointwise 5%% significance band (sum)", paste("Parametric fitted effect of", sQuote(covname))) rslt <- fv(df, argu=Covname, ylab=substitute(h(X), list(X=as.name(covname))), valu="h", fmla= as.formula(paste(". ~ ", Covname)), alim=alim, labl=c(covname, paste("%s", paren(covname), sep=""), paste("var", paren(covname), sep=""), paste("hi", paren(covname), sep=""), paste("lo", paren(covname), sep=""), paste("hiX", paren(covname), sep=""), paste("loX", paren(covname), sep=""), paste("fit", paren(covname), sep="")), desc=desc, fname="h", yexp=as.expression(substitute(hat(h)(X), list(X=covname)))) attr(rslt, "dotnames") <- c("h", "hi", "lo", "fit") fvnames(rslt, ".s") <- c("hi", "lo") # add special class data class(rslt) <- c("parres", class(rslt)) attr(rslt, "stuff") <- list(covname = paste(covname, collapse=""), covtype = covtype, mediator = mediator, cancovs = cancovs, canbeta = canbeta, isoffset = isoffset, modelname = modelname, modelcall = modelcall, callstring = callstring, sigma = sigma, smooth.effect = smooth.effect, restricted = !is.null(subregion), bw.input = bw.input) return(rslt) } print.parres <- function(x, ...) { cat("Transformation diagnostic (class parres)\n") s <- attr(x, "stuff") cat(paste("for the", s$covtype, "covariate", sQuote(s$covname), if(s$covtype != "external") "in" else "for", "the fitted model", if(nchar(s$modelcall) < 30) "" else "\n\t", s$modelcall, "\n")) switch(s$covtype, original={ cancovs <- s$cancovs med <- s$mediator isoffset <- s$isoffset if(is.null(isoffset)) isoffset <- rep.int(FALSE, length(cancovs)) ncc <- length(cancovs) noff <- sum(isoffset) nother <- sum(!isoffset) explain <- paste(ngettext(ncc, "Fitted effect:", "Fitted effect: sum of"), if(noff == 0) { paste(paste(med, collapse=" and "), ngettext(ncc, "term", "terms"), commasep(dQuote(cancovs))) } else { paste(paste(med[med != "offset"], collapse=" and "), ngettext(nother, "term", "terms"), commasep(dQuote(cancovs[!isoffset])), "and offset", ngettext(noff, "term", "terms"), commasep(dQuote(cancovs[isoffset]))) }) cat(paste(explain, "\n")) }, external={ cat("Note: effect estimate not justified by delta method\n") }, offset={}, canonical={}) # earlier versions were equivalent to restricted=FALSE if(identical(s$restricted, TRUE)) cat("\t--Diagnostic computed for a subregion--\n") cat(paste("Call:", s$callstring, "\n")) cat(paste("Actual smoothing bandwidth sigma =", signif(s$sigma,5), "\n")) # earlier versions were equivalent to smooth.effect=TRUE sme <- !identical(s$smooth.effect, FALSE) if(sme) { cat("Algorithm: smooth(effect + residual)\n\n") } else { cat("Algorithm: effect + smooth(residual)\n\n") } NextMethod("print") } plot.parres <- function(x, ...) { xname <- deparse(substitute(x)) do.call(plot.fv, resolve.defaults(list(x), list(...), list(main=xname, shade=c("hi", "lo")))) } spatstat/R/linearKmulti.R0000644000176200001440000002230113124102036015067 0ustar liggesusers# # linearKmulti # # $Revision: 1.13 $ $Date: 2017/02/07 08:12:05 $ # # K functions for multitype point pattern on linear network # # linearKdot <- function(X, i, r=NULL, ..., correction="Ang") { if(!is.multitype(X, dfok=FALSE)) stop("Point pattern must be multitype") marx <- marks(X) lev <- levels(marx) if(missing(i)) i <- lev[1L] else if(!(i %in% lev)) stop(paste("i = ", i , "is not a valid mark")) I <- (marx == i) J <- rep(TRUE, npoints(X)) # i.e. all points result <- linearKmulti(X, I, J, r=r, correction=correction, ...) correction <- attr(result, "correction") type <- if(correction == "Ang") "L" else "net" result <- rebadge.as.dotfun(result, "K", type, i) return(result) } linearKcross <- function(X, i, j, r=NULL, ..., correction="Ang") { if(!is.multitype(X, dfok=FALSE)) stop("Point pattern must be multitype") marx <- marks(X) lev <- levels(marx) if(missing(i)) i <- lev[1L] else if(!(i %in% lev)) stop(paste("i = ", i , "is not a valid mark")) if(missing(j)) j <- lev[2L] else if(!(j %in% lev)) stop(paste("j = ", j , "is not a valid mark")) # if(i == j) { result <- linearK(X[marx == i], r=r, correction=correction, ...) } else { I <- (marx == i) J <- (marx == j) result <- linearKmulti(X, I, J, r=r, correction=correction, ...) } # rebrand correction <- attr(result, "correction") type <- if(correction == "Ang") "L" else "net" result <- rebadge.as.crossfun(result, "K", type, i, j) return(result) } linearKmulti <- function(X, I, J, r=NULL, ..., correction="Ang") { stopifnot(inherits(X, "lpp")) correction <- pickoption("correction", correction, c(none="none", Ang="Ang", best="Ang"), multi=FALSE) # extract info about pattern np <- npoints(X) lengthL <- volume(domain(X)) # validate I, J if(!is.logical(I) || !is.logical(J)) stop("I and J must be logical vectors") if(length(I) != np || length(J) != np) stop(paste("The length of I and J must equal", "the number of points in the pattern")) if(!any(I)) stop("no points satisfy I") # if(!any(J)) stop("no points satisfy J") nI <- sum(I) nJ <- sum(J) nIandJ <- sum(I & J) # lambdaI <- nI/lengthL # lambdaJ <- nJ/lengthL # compute K denom <- (nI * nJ - nIandJ)/lengthL K <- linearKmultiEngine(X, I, J, r=r, denom=denom, correction=correction, ...) # set appropriate y axis label correction <- attr(K, "correction") type <- if(correction == "Ang") "L" else "net" K <- rebadge.as.crossfun(K, "K", type, "I", "J") return(K) } # ................ inhomogeneous ............................ linearKdot.inhom <- function(X, i, lambdaI, lambdadot, r=NULL, ..., correction="Ang", normalise=TRUE) { if(!is.multitype(X, dfok=FALSE)) stop("Point pattern must be multitype") marx <- marks(X) lev <- levels(marx) if(missing(i)) i <- lev[1L] else if(!(i %in% lev)) stop(paste("i = ", i , "is not a valid mark")) I <- (marx == i) J <- rep(TRUE, npoints(X)) # i.e. all points # compute result <- linearKmulti.inhom(X, I, J, lambdaI, lambdadot, r=r, correction=correction, normalise=normalise, ...) ## relabel correction <- attr(result, "correction") type <- if(correction == "Ang") "L, inhom" else "net, inhom" result <- rebadge.as.dotfun(result, "K", type, i) return(result) } linearKcross.inhom <- function(X, i, j, lambdaI, lambdaJ, r=NULL, ..., correction="Ang", normalise=TRUE) { if(!is.multitype(X, dfok=FALSE)) stop("Point pattern must be multitype") marx <- marks(X) lev <- levels(marx) if(missing(i)) i <- lev[1L] else if(!(i %in% lev)) stop(paste("i = ", i , "is not a valid mark")) if(missing(j)) j <- lev[2L] else if(!(j %in% lev)) stop(paste("j = ", j , "is not a valid mark")) # if(i == j) { I <- (marx == i) result <- linearKinhom(X[I], lambda=lambdaI, r=r, correction=correction, normalise=normalise, ...) } else { I <- (marx == i) J <- (marx == j) result <- linearKmulti.inhom(X, I, J, lambdaI, lambdaJ, r=r, correction=correction, normalise=normalise, ...) } # rebrand correction <- attr(result, "correction") type <- if(correction == "Ang") "L, inhom" else "net, inhom" result <- rebadge.as.crossfun(result, "K", type, i, j) return(result) } linearKmulti.inhom <- function(X, I, J, lambdaI, lambdaJ, r=NULL, ..., correction="Ang", normalise=TRUE) { stopifnot(inherits(X, "lpp")) correction <- pickoption("correction", correction, c(none="none", Ang="Ang", best="Ang"), multi=FALSE) # extract info about pattern np <- npoints(X) lengthL <- volume(domain(X)) # # validate I, J if(!is.logical(I) || !is.logical(J)) stop("I and J must be logical vectors") if(length(I) != np || length(J) != np) stop(paste("The length of I and J must equal", "the number of points in the pattern")) if(!any(I)) stop("no points satisfy I") # validate lambda vectors lambdaI <- getlambda.lpp(lambdaI, X, subset=I, ...) lambdaJ <- getlambda.lpp(lambdaJ, X, subset=J, ...) # compute K weightsIJ <- outer(1/lambdaI, 1/lambdaJ, "*") denom <- if(!normalise) lengthL else sum(1/lambdaI) K <- linearKmultiEngine(X, I, J, r=r, reweight=weightsIJ, denom=denom, correction=correction, ...) # set appropriate y axis label correction <- attr(K, "correction") type <- if(correction == "Ang") "L, inhom" else "net, inhom" K <- rebadge.as.crossfun(K, "K", type, "I", "J") # set markers for 'envelope' attr(K, "dangerous") <- union(attr(lambdaI, "dangerous"), attr(lambdaJ, "dangerous")) return(K) } # .............. internal ............................... linearKmultiEngine <- function(X, I, J, ..., r=NULL, reweight=NULL, denom=1, correction="Ang", showworking=FALSE) { # ensure distance information is present X <- as.lpp(X, sparse=FALSE) # extract info about pattern np <- npoints(X) # extract linear network L <- domain(X) W <- Window(L) # determine r values rmaxdefault <- 0.98 * boundingradius(L) breaks <- handle.r.b.args(r, NULL, W, rmaxdefault=rmaxdefault) r <- breaks$r rmax <- breaks$max # if(correction == "Ang") { fname <- c("K", "list(L, I, J)") ylab <- quote(K[L,I,J](r)) } else { fname <- c("K", "list(net, I, J)") ylab <- quote(K[net,I,J](r)) } # if(np < 2) { # no pairs to count: return zero function zeroes <- rep(0, length(r)) df <- data.frame(r = r, est = zeroes) K <- fv(df, "r", ylab, "est", . ~ r, c(0, rmax), c("r", makefvlabel(NULL, "hat", fname)), c("distance argument r", "estimated %s"), fname = fname) return(K) } # nI <- sum(I) nJ <- sum(J) whichI <- which(I) whichJ <- which(J) clash <- I & J has.clash <- any(clash) # compute pairwise distances if(exists("crossdist.lpp")) { DIJ <- crossdist(X[I], X[J], check=FALSE) if(has.clash) { # exclude pairs of identical points from consideration Iclash <- which(clash[I]) Jclash <- which(clash[J]) DIJ[cbind(Iclash,Jclash)] <- Inf } } else { D <- pairdist(X) diag(D) <- Inf DIJ <- D[I, J] } #--- compile into K function --- if(correction == "none" && is.null(reweight)) { # no weights (Okabe-Yamada) K <- compileK(DIJ, r, denom=denom, check=FALSE, fname=fname) K <- rebadge.as.crossfun(K, "K", "net", "I", "J") unitname(K) <- unitname(X) attr(K, "correction") <- correction return(K) } if(correction == "none") edgewt <- 1 else { # inverse m weights (Ang's correction) # determine tolerance toler <- default.linnet.tolerance(L) # compute m[i,j] m <- matrix(1, nI, nJ) XI <- X[I] if(!has.clash) { for(k in seq_len(nJ)) { j <- whichJ[k] m[,k] <- countends(L, XI, DIJ[, k], toler=toler) } } else { # don't count identical pairs for(k in seq_len(nJ)) { j <- whichJ[k] inotj <- (whichI != j) m[inotj, k] <- countends(L, XI[inotj], DIJ[inotj, k], toler=toler) } } edgewt <- 1/m } # compute K wt <- if(!is.null(reweight)) edgewt * reweight else edgewt K <- compileK(DIJ, r, weights=wt, denom=denom, check=FALSE, fname=fname) ## rebadge and tweak K <- rebadge.as.crossfun(K, "K", "L", "I", "J") fname <- attr(K, "fname") # tack on theoretical value K <- bind.fv(K, data.frame(theo=r), makefvlabel(NULL, NULL, fname, "pois"), "theoretical Poisson %s") ## unitname(K) <- unitname(X) fvnames(K, ".") <- rev(fvnames(K, ".")) # show working if(showworking) attr(K, "working") <- list(DIJ=DIJ, wt=wt) attr(K, "correction") <- correction return(K) } spatstat/R/envelope3.R0000755000176200001440000000544313115271075014354 0ustar liggesusers# # envelope3.R # # simulation envelopes for pp3 # # $Revision: 1.13 $ $Date: 2016/04/25 02:34:40 $ # envelope.pp3 <- function(Y, fun=K3est, nsim=99, nrank=1, ..., funargs=list(), funYargs=funargs, simulate=NULL, verbose=TRUE, transform=NULL, global=FALSE, ginterval=NULL, use.theory=NULL, alternative=c("two.sided", "less", "greater"), scale=NULL, clamp=FALSE, savefuns=FALSE, savepatterns=FALSE, nsim2=nsim, VARIANCE=FALSE, nSD=2, Yname=NULL, maxnerr=nsim, do.pwrong=FALSE, envir.simul=NULL) { cl <- short.deparse(sys.call()) if(is.null(Yname)) Yname <- short.deparse(substitute(Y)) if(is.null(fun)) fun <- K3est if("clipdata" %in% names(list(...))) stop(paste("The argument", sQuote("clipdata"), "is not available for envelope.pp3")) envir.user <- if(!is.null(envir.simul)) envir.simul else parent.frame() envir.here <- sys.frame(sys.nframe()) if(is.null(simulate)) { # ................................................... # Realisations of complete spatial randomness # will be generated by rpoispp # Data pattern X is argument Y # Data pattern determines intensity of Poisson process X <- Y sY <- summary(Y) Yintens <- sY$intensity Ydomain <- Y$domain # expression that will be evaluated simexpr <- if(!is.marked(Y)) { # unmarked point pattern expression(rpoispp3(Yintens, domain=Ydomain)) } else { stop("Sorry, simulation of marked 3D point patterns is not yet implemented") } # suppress warnings from code checkers dont.complain.about(Yintens, Ydomain) # evaluate in THIS environment simrecipe <- simulrecipe(type = "csr", expr = simexpr, envir = envir.here, csr = TRUE) } else { # ................................................... # Simulations are determined by 'simulate' argument # Processing is deferred to envelopeEngine simrecipe <- simulate # Data pattern is argument Y X <- Y } envelopeEngine(X=X, fun=fun, simul=simrecipe, nsim=nsim, nrank=nrank, ..., funargs=funargs, funYargs=funYargs, verbose=verbose, clipdata=FALSE, transform=transform, global=global, ginterval=ginterval, use.theory=use.theory, alternative=alternative, scale=scale, clamp=clamp, savefuns=savefuns, savepatterns=savepatterns, nsim2=nsim2, VARIANCE=VARIANCE, nSD=nSD, Yname=Yname, maxnerr=maxnerr, cl=cl, envir.user=envir.user, expected.arg=c("rmax", "nrval"), do.pwrong=do.pwrong) } spatstat/R/interact.R0000755000176200001440000002564413115271075014272 0ustar liggesusers# # interact.S # # # $Revision: 1.28 $ $Date: 2015/10/21 09:06:57 $ # # Class 'interact' representing the interpoint interaction # of a point process model # (e.g. Strauss process with a given threshold r) # # Class 'isf' representing a generic interaction structure # (e.g. pairwise interactions) # # These do NOT specify the "trend" part of the model, # only the "interaction" component. # # The analogy is: # # glm() ppm() # # model formula trend formula # # family interaction # # That is, the 'systematic' trend part of a point process # model is specified by a 'trend' formula argument to ppm(), # and the interpoint interaction is specified as an 'interact' # object. # # You only need to know about these classes if you want to # implement a new point process model. # # THE DISTINCTION: # An object of class 'isf' describes an interaction structure # e.g. pairwise interaction, triple interaction, # pairwise-with-saturation, Dirichlet interaction. # Think of it as determining the "order" of interaction # but not the specific interaction potential function. # # An object of class 'interact' completely defines the interpoint # interactions in a specific point process model, except for the # regular parameters of the interaction, which are to be estimated # by ppm() or otherwise. An 'interact' object specifies the values # of all the 'nuisance' or 'irregular' parameters. An example # is the Strauss process with a given, fixed threshold r # but with the parameters beta and gamma undetermined. # # DETAILS: # # An object of class 'isf' contains the following: # # $name Name of the interaction structure # e.g. "pairwise" # # $print How to 'print()' this object # [A function; invoked by the 'print' method # 'print.isf()'] # # $eval A function which evaluates the canonical # sufficient statistic for an interaction # of this general class (e.g. any pairwise # interaction.) # # If lambda(u,X) denotes the conditional intensity at a point u # for the point pattern X, then we assume # log lambda(u, X) = theta . S(u,X) # where theta is the vector of regular parameters, # and we call S(u,X) the sufficient statistic. # # A typical calling sequence for the $eval function is # # (f$eval)(X, U, E, potentials, potargs, correction) # # where X is the data point pattern, U is the list of points u # at which the sufficient statistic S(u,X) is to be evaluated, # E is a logical matrix equivalent to (X[i] == U[j]), # $potentials defines the specific potential function(s) and # $potargs contains any nuisance/irregular parameters of these # potentials [the $potargs are passed to the $potentials without # needing to be understood by $eval.] # $correction is the name of the edge correction method. # # # An object of class 'interact' contains the following: # # # $name Name of the specific potential # e.g. "Strauss" # # $family Object of class "isf" describing # the interaction structure # # $pot The interaction potential function(s) # -- usually a function or list of functions. # (passed as an argument to $family$eval) # # $par list of any nuisance/irregular parameters # (passed as an argument to $family$eval) # # $parnames vector of long names/descriptions # of the parameters in 'par' # # $init() initialisation action # or NULL indicating none required # # $update() A function to modify $par # [Invoked by 'update.interact()'] # or NULL indicating a default action # # $print How to 'print()' this object # [Invoked by 'print' method 'print.interact()'] # or NULL indicating a default action # # -------------------------------------------------------------------------- print.isf <- function(x, ...) { if(is.null(x)) return(invisible(NULL)) verifyclass(x, "isf") if(!is.null(x$print)) (x$print)(x) invisible(NULL) } print.interact <- function(x, ..., family, brief=FALSE, banner=TRUE) { verifyclass(x, "interact") if(missing(family)) family <- waxlyrical('extras') #' Print name of model if(banner) { if(family && !brief && !is.null(xf <- x$family)) print.isf(xf) splat(if(!brief) "Interaction:" else NULL, x$name, sep="") } # Now print the parameters if(!is.null(x$print)) { (x$print)(x) } else { # default # just print the parameter names and their values pwords <- x$parnames parval <- x$par pwords <- paste(toupper(substring(pwords, 1, 1)), substring(pwords, 2), sep="") isnum <- sapply(parval, is.numeric) parval[isnum] <- lapply(parval[isnum], signif, digits=getOption("digits")) splat(paste(paste0(pwords, ":\t", parval), collapse="\n")) } invisible(NULL) } is.interact <- function(x) { inherits(x, "interact") } update.interact <- function(object, ...) { verifyclass(object, "interact") if(!is.null(object$update)) (object$update)(object, ...) else { # Default # First update the version if(outdated.interact(object)) object <- reincarnate.interact(object) # just match the arguments in "..." # with those in object$par and update them want <- list(...) if(length(want) > 0) { m <- match(names(want),names(object$par)) nbg <- is.na(m) if(any(nbg)) { which <- paste((names(want))[nbg]) warning(paste("Arguments not matched: ", which)) } m <- m[!nbg] object$par[m] <- want } # call object's own initialisation routine if(!is.null(object$init)) (object$init)(object) object } } is.poisson.interact <- function(x) { verifyclass(x, "interact") is.null(x$family) } parameters.interact <- function(model, ...) { model$par } # Test whether interact object was made by an older version of spatstat outdated.interact <- function(object) { ver <- object$version older <- is.null(ver) || (package_version(ver) < versionstring.spatstat()) return(older) } # Test whether the functions in the interaction object # expect the coefficient vector to contain ALL coefficients, # or only the interaction coefficients. # This change was introduced in 1.11-0, at the same time # as interact objects were given version numbers. newstyle.coeff.handling <- function(object) { stopifnot(inherits(object, "interact")) ver <- object$version old <- is.null(ver) || (package_version(ver) < "1.11") return(!old) } # ###### # # Re-create an interact object in the current version of spatstat # # reincarnate.interact <- function(object) { # re-creates an interact object in the current version of spatstat if(!is.null(object$update)) { newobject <- (object$update)(object) return(newobject) } par <- object$par # pot <- object$pot name <- object$name # get creator function creator <- object$creator if(is.null(creator)) { # old version: look up list creator <- .Spatstat.Old.InteractionList[[name]] if(is.null(creator)) stop(paste("Don't know how to update", sQuote(name), "to current version of spatstat")) } if(is.character(creator)) creator <- get(creator) if(!is.function(creator) && !is.expression(creator)) stop("Internal error: creator is not a function or expression") # call creator if(is.expression(creator)) newobject <- eval(creator) else { # creator is a function # It's assumed that the creator function's arguments are # either identical to components of 'par' (the usual case) # or to one of the components of the object itself (Ord, Saturated) # or to printfun=object$print (Pairwise). argnames <- names(formals(creator)) available <- append(par, object) available <- append(available, list(printfun=object$print)) ok <- argnames %in% names(available) if(!all(ok)) stop(paste("Internal error:", ngettext(sum(!ok), "argument", "arguments"), paste(sQuote(argnames[!ok]), collapse=", "), "in creator function were not understood")) newobject <- do.call(creator, available[argnames]) } if(!inherits(newobject, "interact")) stop("Internal error: creator did not return an object of class interact") return(newobject) } # This list is necessary to deal with older formats of 'interact' objects # which did not include the creator name .Spatstat.Old.InteractionList <- list("Diggle-Gratton process" = "DiggleGratton", "Geyer saturation process" = "Geyer", "Lennard-Jones potential" = "LennardJones", "Multitype Strauss process" = "MultiStrauss", "Multitype Strauss Hardcore process" = "MultiStraussHard", "Ord process with threshold potential"="OrdThresh", "Piecewise constant pairwise interaction process"="PairPiece", "Poisson process" = "Poisson", "Strauss process" = "Strauss", "Strauss - hard core process" = "StraussHard", "Soft core process" = "Softcore", # weird ones: "Ord process with user-defined potential" = expression(Ord(object$pot)), "Saturated process with user-defined potential" =expression(Saturated(object$pot)), "user-defined pairwise interaction process"= expression( Pairwise(object$pot, par=object$par, parnames=object$parnames, printfun=object$print)) ) as.interact <- function(object) { UseMethod("as.interact") } as.interact.interact <- function(object) { verifyclass(object, "interact") return(object) } interactionfamilyname <- function(x) { if(inherits(x, "isf")) return(x$name) x <- as.interact(x) return(x$family$name) } #### internal code for streamlining initialisation of interactions # # x should be a partially-completed 'interact' object # instantiate.interact <- function(x, par) { if(is.character(x$family)) x$family <- get(x$family) # set parameter values x$par <- par # validate parameters x$init(x) x$version <- versionstring.spatstat() return(x) } spatstat/R/deldir.R0000755000176200001440000002530013115271075013711 0ustar liggesusers# # deldir.R # # Interface to deldir package # # $Revision: 1.28 $ $Date: 2017/06/05 10:31:58 $ # .spst.triEnv <- new.env() assign("use.trigraf", TRUE, envir=.spst.triEnv) assign("use.trigrafS", TRUE, envir=.spst.triEnv) assign("debug.delaunay", FALSE, envir=.spst.triEnv) dirichlet <- local({ dirichlet <- function(X) { stopifnot(is.ppp(X)) X <- unique(X, rule="deldir", warn=TRUE) w <- X$window dd <- safedeldir(X) if(is.null(dd)) return(NULL) pp <- lapply(tile.list(dd), df2poly) if(length(pp) == npoints(X)) names(pp) <- seq_len(npoints(X)) dir <- tess(tiles=pp, window=as.rectangle(w)) if(w$type != "rectangle") dir <- intersect.tess(dir, w) return(dir) } df2poly <- function(z) { owin(poly=z[c("x","y")]) } dirichlet }) delaunay <- function(X) { stopifnot(is.ppp(X)) X <- unique(X, rule="deldir", warn=TRUE) nX <- npoints(X) if(nX < 3) return(NULL) w <- X$window dd <- safedeldir(X) if(is.null(dd)) return(NULL) a <- dd$delsgs[,5L] b <- dd$delsgs[,6L] use.trigraf <- get("use.trigraf", envir=.spst.triEnv) use.trigrafS <- get("use.trigrafS", envir=.spst.triEnv) debug.delaunay <- get("debug.delaunay", envir=.spst.triEnv) if(use.trigrafS) { # first ensure a[] < b[] swap <- (a > b) if(any(swap)) { oldb <- b b[swap] <- a[swap] a[swap] <- oldb[swap] } # next ensure a is sorted o <- order(a, b) a <- a[o] b <- b[o] # nv <- nX ne <- length(a) ntmax <- ne z <- .C("trigrafS", nv = as.integer(nv), ne = as.integer(ne), ie = as.integer(a), je = as.integer(b), ntmax = as.integer(ntmax), nt = as.integer(integer(1L)), it = as.integer(integer(ne)), jt = as.integer(integer(ne)), kt = as.integer(integer(ne)), status = as.integer(integer(1L)), PACKAGE = "spatstat") if(z$status != 0) stop("Internal error: overflow in trigrafS") tlist <- with(z, cbind(it, jt, kt)[1:nt, ]) } else if(use.trigraf) { nv <- nX ne <- length(a) ntmax <- ne z <- .C("trigraf", nv = as.integer(nv), ne = as.integer(ne), ie = as.integer(a), je = as.integer(b), ntmax = as.integer(ntmax), nt = as.integer(integer(1L)), it = as.integer(integer(ntmax)), jt = as.integer(integer(ntmax)), kt = as.integer(integer(ntmax)), status = as.integer(integer(1L)), PACKAGE = "spatstat") if(z$status != 0) stop("Internal error: overflow in trigraf") tlist <- with(z, cbind(it, jt, kt)[1:nt, ]) } else { tlist <- matrix(integer(0), 0, 3) for(i in seq_len(nX)) { # find all Delaunay neighbours of i jj <- c(b[a==i], a[b==i]) jj <- sort(unique(jj)) # select those with a higher index than i jj <- jj[jj > i] # find pairs of neighbours which are Delaunay neighbours # (thus, triangles where the first numbered vertex is i) if(length(jj) > 0) for(j in jj) { kk <- c(b[a == j], a[b == j]) kk <- kk[(kk %in% jj) & (kk > j)] if(length(kk) > 0) for(k in kk) # add (i,j,k) to list of triangles (i < j < k) tlist <- rbind(tlist, c(i, j, k)) } } } # At this point, `tlist' contains all triangles formed by the Delaunay edges, # with vertices given in ascending order i < j < k in the 3 columns of tlist. # Some of these triangles may not belong to the Delaunay triangulation. # They will be weeded out later. # Assemble coordinates of triangles x <- X$x y <- X$y xtri <- matrix(x[tlist], nrow(tlist), 3L) ytri <- matrix(y[tlist], nrow(tlist), 3L) # ensure triangle vertices are in anticlockwise order ztri <- ytri - min(y) dx <- cbind(xtri[,2L]-xtri[,1L], xtri[,3L]-xtri[,2L], xtri[,1L]-xtri[,3L]) zm <- cbind(ztri[,1L]+ztri[,2L], ztri[,2L]+ztri[,3L], ztri[,3L]+ztri[,1L]) negareas <- apply(dx * zm, 1L, sum) clockwise <- (negareas > 0) # if(any(clockwise)) { xc <- xtri[clockwise, , drop=FALSE] yc <- ytri[clockwise, , drop=FALSE] tc <- tlist[clockwise, , drop=FALSE] xtri[clockwise,] <- xc[,c(1L,3L,2L)] ytri[clockwise,] <- yc[,c(1L,3L,2L)] tlist[clockwise,] <- tc[, c(1L,3L,2L)] } # At this point, triangle vertices are listed in anticlockwise order. # The same directed edge (i, j) cannot appear twice. # To weed out invalid triangles, check for such duplication triedges <- rbind(tlist[, c(1L,2L)], tlist[, c(2L,3L)], tlist[, c(3L,1L)]) if(any(bad <- duplicated(triedges))) { badedges <- unique(triedges[bad, , drop=FALSE]) ntri <- nrow(tlist) triid <- rep.int(seq_len(ntri), 3) illegal <- rep.int(FALSE, ntri) for(j in seq_len(nrow(badedges))) { from <- badedges[j, 1L] to <- badedges[j, 2L] if(debug.delaunay) cat(paste("Suspect edge from vertex", from, "to vertex", to, "\n")) # find all triangles sharing this edge in this orientation sustri <- triid[(triedges[,1L] == from) & (triedges[,2L] == to)] if(debug.delaunay) cat(paste("\tInvestigating triangles", commasep(sustri), "\n")) # list all vertices associated with the suspect triangles susvert <- sort(unique(as.vector(tlist[sustri, ]))) if(debug.delaunay) cat(paste("\tInvestigating vertices", commasep(susvert), "\n")) xsusvert <- x[susvert] ysusvert <- y[susvert] # take each triangle in turn and check whether it contains a data point for(k in sustri) { if(!illegal[k] && any(inside.triangle(xsusvert, ysusvert, xtri[k,], ytri[k,]))) { if(debug.delaunay) cat(paste("Triangle", k, "is illegal\n")) illegal[k] <- TRUE } } } if(!any(illegal)) { if(debug.delaunay) cat("No illegal triangles found\n") } else { if(debug.delaunay) cat(paste("Removing", sum(illegal), "triangles\n")) tlist <- tlist[!illegal, , drop=FALSE] xtri <- xtri[!illegal, , drop=FALSE] ytri <- ytri[!illegal, , drop=FALSE] } } # make tile list tiles <- list() for(m in seq_len(nrow(tlist))) { p <- list(x=xtri[m,], y=ytri[m,]) tiles[[m]] <- owin(poly=p, check=FALSE) } wc <- convexhull.xy(x, y) del <- tess(tiles=tiles, window=wc) if(w$type != "rectangle") del <- intersect.tess(del, w) return(del) } delaunayDistance <- function(X) { stopifnot(is.ppp(X)) nX <- npoints(X) w <- as.owin(X) ok <- !duplicated(X, rule="deldir") Y <- X[ok] nY <- npoints(Y) if(nY < 3) return(matrix(Inf, nX, nX)) dd <- deldir(Y$x, Y$y, rw=c(w$xrange,w$yrange)) if(is.null(dd)) return(NULL) joins <- as.matrix(dd$delsgs[,5:6]) joins <- rbind(joins, joins[,2:1]) d <- matrix(-1L, nY, nY) diag(d) <- 0 d[joins] <- 1 adj <- matrix(FALSE, nY, nY) diag(adj) <- TRUE adj[joins] <- TRUE z <- .C("Idist2dpath", nv = as.integer(nY), d = as.integer(d), adj = as.integer(adj), dpath = as.integer(integer(nY * nY)), tol = as.integer(0), niter = as.integer(integer(1L)), status = as.integer(integer(1L)), PACKAGE = "spatstat") if (z$status == -1L) warning(paste("graph connectivity algorithm did not converge after", z$niter, "iterations", "on", nY, "vertices and", sum(adj) - nY, "edges")) dpathY <- matrix(z$dpath, nY, nY) if(all(ok)) { dpathX <- dpathY } else { dpathX <- matrix(NA_integer_, nX, nX) dpathX[ok, ok] <- dpathY } return(dpathX) } safedeldir <- function(X) { rw <- with(X$window, c(xrange,yrange)) dd <- try(deldir(X$x, X$y, rw=rw)) if(!inherits(dd, "try-error") && inherits(dd, "deldir")) return(dd) warning("deldir failed; re-trying with slight perturbation of coordinates.", call.=FALSE) Y <- rjitter(X, mean(nndist(X))/100) dd <- try(deldir(Y$x, Y$y, rw=rw)) if(!inherits(dd, "try-error") && inherits(dd, "deldir")) return(dd) warning("deldir failed even after perturbation of coordinates.", call.=FALSE) return(NULL) } dirichletVertices <- function(X) { DT <- tiles(dirichlet(X)) xy <- do.call(concatxy, lapply(DT, vertices)) Y <- unique(ppp(xy$x, xy$y, window=Window(X), check=FALSE)) b <- bdist.points(Y) thresh <- diameter(Frame(X))/1000 Y <- Y[b > thresh] return(Y) } dirichletAreas <- function(X) { stopifnot(is.ppp(X)) X <- unmark(X) win <- Window(X) dup <- duplicated(X, rule="deldir") if((anydup <- any(dup))) { oldX <- X X <- X[!dup] } switch(win$type, rectangle = { rw <- c(win$xrange, win$yrange) dd <- deldir(X$x, X$y, dpl=NULL, rw=rw) w <- dd$summary[, 'dir.area'] }, polygonal = { w <- tile.areas(dirichlet(X)) }, mask = { #' Nearest data point to each pixel: tileid <- exactdt(X)$i #' Restrict to window (result is a vector - OK) tileid <- tileid[win$m] #' Count pixels in each tile id <- factor(tileid, levels=seq_len(X$n)) counts <- table(id) #' Convert to digital area pixelarea <- win$xstep * win$ystep w <- pixelarea * as.numeric(counts) }) if(!anydup) return(w) oldw <- numeric(npoints(oldX)) oldw[!dup] <- w return(oldw) } delaunayNetwork <- function(X) { stopifnot(is.ppp(X)) X <- unique(X, rule="deldir") nX <- npoints(X) if(nX == 0) return(NULL) if(nX == 1L) return(linnet(X, !diag(TRUE))) if(nX == 2L) return(linnet(X, !diag(c(TRUE,TRUE)))) dd <- safedeldir(X) if(is.null(dd)) return(NULL) joins <- as.matrix(dd$delsgs[, 5:6]) return(linnet(X, edges=joins)) } dirichletEdges <- function(X) { stopifnot(is.ppp(X)) X <- unique(X, rule="deldir") nX <- npoints(X) W <- Window(X) if(nX < 2) return(edges(W)) dd <- safedeldir(X) if(is.null(dd)) return(edges(W)) return(as.psp(dd$dirsgs[,1:4], window=W)) } dirichletNetwork <- function(X, ...) as.linnet(dirichletEdges(X), ...) ## deprecated older names delaunay.distance <- function(...) { .Deprecated("delaunayDistance", package="spatstat") delaunayDistance(...) } delaunay.network <- function(...) { .Deprecated("delaunayNetwork", package="spatstat") delaunayNetwork(...) } dirichlet.edges <- function(...) { .Deprecated("dirichletEdges", package="spatstat") dirichletEdges(...) } dirichlet.network <- function(...) { .Deprecated("dirichletNetwork", package="spatstat") dirichletNetwork(...) } dirichlet.vertices <- function(...) { .Deprecated("dirichletVertices", package="spatstat") dirichletVertices(...) } spatstat/R/dppmclass.R0000644000176200001440000000146013115225157014432 0ustar liggesusersis.dppm <- function#Recognise Fitted Determinantal Point Process Models ### Check that an object inherits the class dppm (x ### Any object. ){ inherits(x, "dppm") ### A single logical value. ##keyword<< spatial ##keyword<< manip ##keyword<< models } plot.dppm <- function (x, ..., what = c("intensity", "statistic")){ objectname <- short.deparse(substitute(x)) if(missing(what) && is.stationary(x)) what <- "statistic" plot.kppm(x, ..., xname = objectname, what = what) } Kmodel.dppm <- function (model, ...){ Kmodel(model$fitted, W=model$window) } pcfmodel.dppm <- function (model, ...){ pcfmodel(model$fitted, W=model$window) } intensity.dppm <- function (X, ...){ return(intensity(X$fitted)) } reach.dppm <- function(x, ...){ reach(x$fitted, ...) } spatstat/R/Kres.R0000755000176200001440000000526713115271075013364 0ustar liggesusers# # Kres.R # # Residual K # # $Revision: 1.3 $ $Date: 2013/04/25 06:37:43 $ # ############################################################################# # Kres <- function(object, ...) { if(!is.fv(object)) { # usual case where 'object' is a ppm, ppp or quad K <- Kcom(object, ...) } else { # case where 'object' is the output of 'Kcom' a <- attr(object, "maker") if(is.null(a) || a != "Kcom") stop("fv object was not created by Kcom") K <- object if(length(list(...)) > 0) warning("Extra arguments ignored") } # initialise fv object df <- data.frame(r=K$r, theo=numeric(length(K$r))) desc <- c("distance argument r", "value 0 corresponding to perfect fit") ans <- fv(df, "r", substitute(bold(R)~hat(K)(r), NULL), "theo", . ~ r , attr(K, "alim"), c("r","bold(R)~%s[theo](r)"), desc, fname="K") # add residual functions nam <- names(K) if("border" %in% nam) ans <- bind.fv(ans, data.frame(bres=with(K, border-bcom)), "bold(R)~hat(%s)[bord](r)", "residual function %s based on border correction", "bres") if(all(c("trans","tcom") %in% nam)) ans <- bind.fv(ans, data.frame(tres=with(K, trans-tcom)), "bold(R)~hat(%s)[trans](r)", "residual function %s based on translation correction", "tres") if(all(c("iso","icom") %in% nam)) ans <- bind.fv(ans, data.frame(ires=with(K, iso-icom)), "bold(R)~hat(%s)[iso](r)", "residual function %s based on isotropic correction", "ires") if("ivar" %in% nam) { savedotnames <- fvnames(ans, ".") ans <- bind.fv(ans, as.data.frame(K)[, c("ivar", "isd", "ihi", "ilo")], c("bold(C)^2~hat(%s)[iso](r)", "sqrt(bold(C)^2~hat(%s)[iso](r))", "bold(R)~hat(%s)[Hi](r)", "bold(R)~hat(%s)[Lo](r)"), c("pseudovariance of isotropic-corrected residual %s", "pseudo-SD of isotropic-corrected residual %s", "upper critical band for isotropic-corrected residual %s", "lower critical band for isotropic-corrected residual %s"), "ires") ans <- bind.fv(ans, data.frame(istdres=with(ans, ires/isd)), "bold(T)~hat(%s)[iso](r)", "standardised isotropic-corrected residual %s", "ires") fvnames(ans, ".") <- c(savedotnames, c("ihi", "ilo")) } unitname(ans) <- unitname(K) return(ans) } spatstat/R/multihard.R0000755000176200001440000001443313115271120014433 0ustar liggesusers# # # multihard.R # # $Revision: 1.17 $ $Date: 2016/02/16 01:39:12 $ # # The Hard core process # # Hardcore() create an instance of the Hard Core process # [an object of class 'interact'] # # # ------------------------------------------------------------------- # MultiHard <- local({ # .... multitype hard core potential MHpotential <- function(d, tx, tu, par) { # arguments: # d[i,j] distance between points X[i] and U[j] # tx[i] type (mark) of point X[i] # tu[i] type (mark) of point U[j] # # get matrices of interaction radii h <- par$hradii # get possible marks and validate if(!is.factor(tx) || !is.factor(tu)) stop("marks of data and dummy points must be factor variables") lx <- levels(tx) lu <- levels(tu) if(length(lx) != length(lu) || any(lx != lu)) stop("marks of data and dummy points do not have same possible levels") if(!identical(lx, par$types)) stop("data and model do not have the same possible levels of marks") if(!identical(lu, par$types)) stop("dummy points and model do not have the same possible levels of marks") # ensure factor levels are acceptable for column names (etc) lxname <- make.names(lx, unique=TRUE) # list all UNORDERED pairs of types to be checked # (the interaction must be symmetric in type, and scored as such) uptri <- (row(h) <= col(h)) & (!is.na(h)) mark1 <- (lx[row(h)])[uptri] mark2 <- (lx[col(h)])[uptri] # corresponding names mark1name <- (lxname[row(h)])[uptri] mark2name <- (lxname[col(h)])[uptri] vname <- apply(cbind(mark1name,mark2name), 1, paste, collapse="x") vname <- paste("mark", vname, sep="") npairs <- length(vname) # list all ORDERED pairs of types to be checked # (to save writing the same code twice) different <- mark1 != mark2 mark1o <- c(mark1, mark2[different]) mark2o <- c(mark2, mark1[different]) nordpairs <- length(mark1o) # unordered pair corresponding to each ordered pair ucode <- c(1:npairs, (1:npairs)[different]) # # create numeric array for result z <- array(0, dim=c(dim(d), npairs), dimnames=list(character(0), character(0), vname)) # go.... if(length(z) > 0) { # apply the relevant hard core distance to each pair of points hxu <- h[ tx, tu ] forbid <- (d < hxu) forbid[is.na(forbid)] <- FALSE # form the potential value <- array(0, dim=dim(d)) value[forbid] <- -Inf # assign value[i,j] -> z[i,j,k] where k is relevant interaction code for(i in 1:nordpairs) { # data points with mark m1 Xsub <- (tx == mark1o[i]) # quadrature points with mark m2 Qsub <- (tu == mark2o[i]) # assign z[Xsub, Qsub, ucode[i]] <- value[Xsub, Qsub] } } attr(z, "IsOffset") <- TRUE return(z) } #### end of 'pot' function #### # ............ template object ................... BlankMH <- list( name = "Multitype Hardcore process", creator = "MultiHard", family = "pairwise.family", # evaluated later pot = MHpotential, par = list(types=NULL, hradii = NULL), # filled in later parnames = c("possible types", "hardcore distances"), pardesc = c("vector of possible types", "matrix of hardcore distances"), selfstart = function(X, self) { types <- self$par$types hradii <- self$par$hradii if(!is.null(types) && !is.null(hradii)) return(self) if(is.null(types)) types <- levels(marks(X)) if(is.null(hradii)) { marx <- marks(X) d <- nndist(X, by=marx) h <- aggregate(d, by=list(from=marx), min) h <- as.matrix(h[, -1, drop=FALSE]) m <- table(marx) mm <- outer(m, m, pmin) hradii <- h * mm/(mm+1) dimnames(hradii) <- list(types, types) } MultiHard(types=types,hradii=hradii) }, init = function(self) { types <- self$par$types if(!is.null(types)) { h <- self$par$hradii nt <- length(types) if(!is.null(h)) MultiPair.checkmatrix(h, nt, sQuote("hradii")) if(length(types) == 0) stop(paste("The", sQuote("types"), "argument should be", "either NULL or a vector of all possible types")) if(anyNA(types)) stop("NA's not allowed in types") if(is.factor(types)) { types <- levels(types) } else { types <- levels(factor(types, levels=types)) } } }, update = NULL, # default OK print = function(self) { h <- self$par$hradii if(waxlyrical('gory')) { if(!is.null(h)) splat(nrow(h), "types of points") types <- self$par$types if(!is.null(types)) { splat("Possible types:") print(noquote(types)) } else splat("Possible types:\t not yet determined") } if(!is.null(h)) { splat("Hardcore radii:") print(signif(h, getOption("digits"))) } else splat("Hardcore radii:\t not yet determined") invisible() }, interpret = function(coeffs, self) { # there are no regular parameters (woo-hoo!) return(NULL) }, valid = function(coeffs, self) { return(TRUE) }, project = function(coeffs, self) { return(NULL) }, irange = function(self, coeffs=NA, epsilon=0, ...) { h <- self$par$hradii return(max(0, h, na.rm=TRUE)) }, version=NULL # fix later ) class(BlankMH) <- "interact" MultiHard <- function(hradii=NULL, types=NULL) { if((missing(hradii) || !is.matrix(hradii)) && is.matrix(types)) { ## old syntax: (types=NULL, hradii) hradii <- types types <- NULL } if(!is.null(hradii)) hradii[hradii == 0] <- NA out <- instantiate.interact(BlankMH, list(types=types, hradii = hradii)) if(!is.null(types)) dimnames(out$par$hradii) <- list(types, types) return(out) } MultiHard <- intermaker(MultiHard, BlankMH) MultiHard }) spatstat/R/infline.R0000755000176200001440000001551613115271075014102 0ustar liggesusers# # infline.R # # Infinite lines # # $Revision: 1.28 $ $Date: 2017/02/07 07:47:20 $ # infline <- function(a=NULL, b=NULL, h=NULL, v=NULL, p=NULL, theta=NULL) { if(is.null(a) != is.null(b)) stop("invalid specification of a,b") if(is.null(p) != is.null(theta)) stop("invalid specification of p,theta") if(!is.null(h)) out <- data.frame(a=h, b=0, h=h, v=NA, p=h, theta=pi/2) else if(!is.null(v)) out <- data.frame(a=NA,b=NA,h=NA,v=v,p=v,theta=ifelseAB(v < 0, pi, 0)) else if(!is.null(a)) { # a, b specified z <- data.frame(a=a,b=b) a <- z$a b <- z$b theta <- ifelseAX(b == 0, pi/2, atan(-1/b)) theta <- theta %% pi p <- a * sin(theta) out <- data.frame(a=a, b=b, h=ifelseXB(b==0, a, NA), v=NA, p=p, theta=theta) } else if(!is.null(p)) { # p, theta specified z <- data.frame(p=p,theta=theta) p <- z$p theta <- z$theta theta <- theta %% (2*pi) if(any(reverse <- (theta >= pi))) { theta[reverse] <- theta[reverse] - pi p[reverse] <- -p[reverse] } vert <- (theta == 0) horz <- (cos(theta) == 0) gene <- !(vert | horz) v <- ifelseXB(vert, p, NA) h <- ifelseXB(horz, p, NA) a <- ifelseXB(gene, p/sin(theta), NA) b <- ifelseXB(gene, -cos(theta)/sin(theta), NA) out <- data.frame(a=a,b=b,h=h,v=v,p=p,theta=theta) } else stop("No data given!") class(out) <- c("infline", class(out)) return(out) } is.infline <- function(x) { inherits(x, "infline") } plot.infline <- function(x, ...) { for(i in seq_len(nrow(x))) { xi <- as.list(x[i, 1:4]) xi[sapply(xi, is.na)] <- NULL do.call(abline, append(xi, list(...))) } return(invisible(NULL)) } print.infline <- function(x, ...) { n <- nrow(x) splat(n, "infinite", ngettext(n, "line", "lines")) print(as.data.frame(x), ...) return(invisible(NULL)) } clip.infline <- function(L, win) { # clip a set of infinite straight lines to a window win <- as.owin(win) stopifnot(inherits(L, "infline")) nL <- nrow(L) if(nL == 0) return(psp(numeric(0),numeric(0),numeric(0),numeric(0), window=win)) seqL <- seq_len(nL) # determine circumcircle of win xr <- win$xrange yr <- win$yrange xmid <- mean(xr) ymid <- mean(yr) width <- diff(xr) height <- diff(yr) rmax <- sqrt(width^2 + height^2)/2 boundbox <- owin(xmid + c(-1,1) * rmax, ymid + c(-1,1) * rmax) # convert line coordinates to origin (xmid, ymid) p <- L$p theta <- L$theta co <- cos(theta) si <- sin(theta) p <- p - xmid * co - ymid * si # compute intersection points with circumcircle hit <- (abs(p) < rmax) if(!any(hit)) return(psp(numeric(0),numeric(0),numeric(0),numeric(0), window=win)) p <- p[hit] theta <- theta[hit] q <- sqrt(rmax^2 - p^2) co <- co[hit] si <- si[hit] id <- seqL[hit] X <- psp(x0= xmid + p * co + q * si, y0= ymid + p * si - q * co, x1= xmid + p * co - q * si, y1= ymid + p * si + q * co, marks = factor(id, levels=seqL), window=boundbox, check=FALSE) # clip to window X <- X[win] return(X) } chop.tess <- function(X, L) { stopifnot(is.infline(L)) stopifnot(is.tess(X)||is.owin(X)) X <- as.tess(X) if(X$type == "image") { Xim <- X$image xr <- Xim$xrange yr <- Xim$yrange # extract matrices of pixel values and x, y coordinates Zmat <- as.integer(as.matrix(Xim)) xmat <- rasterx.im(Xim) ymat <- rastery.im(Xim) # process lines for(i in seq_len(nrow(L))) { # line i chops window into two pieces if(!is.na(h <- L[i, "h"])) { # horizontal line if(h > yr[1L] && h < yr[2L]) Zmat <- 2 * Zmat + (ymat > h) } else if(!is.na(v <- L[i, "v"])) { # vertical line if(v > xr[1L] && v < xr[2L]) Zmat <- 2 * Zmat + (xmat < v) } else { # generic line y = a + bx a <- L[i, "a"] b <- L[i, "b"] Zmat <- 2 * Zmat + (ymat > a + b * xmat) } } # Now just put back as factor image Zim <- im(Zmat, xcol=Xim$xcol, yrow=Xim$yrow, unitname=unitname(Xim)) Z <- tess(image=Zim) return(Z) } #---- polygonal computation -------- # get bounding box B <- as.rectangle(as.owin(X)) xr <- B$xrange yr <- B$yrange # get coordinates for(i in seq_len(nrow(L))) { # line i chops box B into two pieces if(!is.na(h <- L[i, "h"])) { # horizontal line if(h < yr[1L] || h > yr[2L]) Z <- NULL else { lower <- owin(xr, c(yr[1L], h)) upper <- owin(xr, c(h, yr[2L])) Z <- tess(tiles=list(lower,upper), window=B) } } else if(!is.na(v <- L[i, "v"])) { # vertical line if(v < xr[1L] || v > xr[2L]) Z <- NULL else { left <- owin(c(xr[1L], v), yr) right <- owin(c(v, xr[2L]), yr) Z <- tess(tiles=list(left,right), window=B) } } else { # generic line a <- L[i, "a"] b <- L[i, "b"] # Intersect with extended left and right sides of B yleft <- a + b * xr[1L] yright <- a + b * xr[2L] ylo <- min(yleft, yright, yr[1L]) - 1 yhi <- max(yleft, yright, yr[2L]) + 1 lower <- owin(poly=list(x=xr[c(1L,1L,2L,2L)], y=c(yleft,ylo,ylo,yright))) upper <- owin(poly=list(x=xr[c(1L,2L,2L,1L)], y=c(yleft,yright,yhi,yhi))) Bplus <- owin(xr, c(ylo, yhi), unitname=unitname(B)) Z <- tess(tiles=list(lower,upper), window=Bplus) } # intersect this simple tessellation with X if(!is.null(Z)) { X <- intersect.tess(X, Z) tilenames(X) <- paste("Tile", seq_len(length(tiles(X)))) } } return(X) } whichhalfplane <- function(L, x, y=NULL) { verifyclass(L, "infline") xy <- xy.coords(x, y) x <- xy$x y <- xy$y m <- length(x) n <- nrow(L) Z <- matrix(as.logical(NA_integer_), n, m) for(i in seq_len(n)) { if(!is.na(h <- L[i, "h"])) { #' horizontal line Z[i,] <- (y < h) } else if(!is.na(v <- L[i, "v"])) { #' vertical line Z[i,] <- (x < v) } else { #' generic line y = a + bx a <- L[i, "a"] b <- L[i, "b"] Z[i,] <- (y < a + b * x) } } return(Z) } rotate.infline <- function(X, angle=pi/2, ...) { if(nrow(X) == 0) return(X) Y <- with(X, infline(p = p, theta=theta + angle)) return(Y) } shift.infline <- function(X, vec=c(0,0), ...) { if(nrow(X) == 0) return(X) vec <- as2vector(vec) Y <- with(X, infline(p = p + vec[1L] * cos(theta) + vec[2L] * sin(theta), theta=theta)) return(Y) } reflect.infline <- function(X) { if(nrow(X) == 0) return(X) Y <- with(X, infline(p = p, theta=(theta + pi) %% (2 * pi))) return(Y) } flipxy.infline <- function(X) { if(nrow(X) == 0) return(X) Y <- with(X, infline(p = p, theta=(pi/2 - theta) %% (2 * pi))) return(Y) } spatstat/R/intensity.R0000644000176200001440000002235013115271075014473 0ustar liggesusers# # intensity.R # # Code related to intensity and intensity approximations # # $Revision: 1.20 $ $Date: 2017/06/05 10:31:58 $ # intensity <- function(X, ...) { UseMethod("intensity") } intensity.ppp <- function(X, ..., weights=NULL) { n <- npoints(X) a <- area(Window(X)) if(is.null(weights)) { ## unweighted case - for efficiency if(is.multitype(X)) { mks <- marks(X) answer <- as.vector(table(mks))/a names(answer) <- levels(mks) } else answer <- n/a return(answer) } ## weighted case if(is.numeric(weights)) { check.nvector(weights, n) } else if(is.expression(weights)) { # evaluate expression in data frame of coordinates and marks df <- as.data.frame(X) pf <- parent.frame() eval.weights <- try(eval(weights, envir=df, enclos=pf)) if(inherits(eval.weights, "try-error")) stop("Unable to evaluate expression for weights", call.=FALSE) if(!check.nvector(eval.weights, n, fatal=FALSE, warn=TRUE)) stop("Result of evaluating the expression for weights has wrong format") weights <- eval.weights } else stop("Unrecognised format for argument 'weights'") ## if(is.multitype(X)) { mks <- marks(X) answer <- as.vector(tapply(weights, mks, sum))/a answer[is.na(answer)] <- 0 names(answer) <- levels(mks) } else { answer <- sum(weights)/a } return(answer) } intensity.splitppp <- function(X, ..., weights=NULL) { if(is.null(weights)) return(sapply(X, intensity.ppp)) if(is.expression(weights)) return(sapply(X, intensity.ppp, weights=weights)) if(is.numeric(weights)) { fsplit <- attr(X, "fsplit") n <- length(fsplit) check.nvector(weights, n) result <- mapply(intensity.ppp, X, weights=split(weights, fsplit)) result <- simplify2array(result, higher=FALSE) return(result) } stop("Unrecognised format for weights") } intensity.ppm <- function(X, ...) { if(!identical(valid.ppm(X), TRUE)) { warning("Model is invalid - projecting it") X <- project.ppm(X) } if(is.poisson(X)) { if(is.stationary(X)) { # stationary univariate/multivariate Poisson sX <- summary(X, quick="no variances") lam <- sX$trend$value if(sX$multitype && sX$no.trend) { ## trend is ~1; lam should be replicated for each mark lev <- levels(marks(data.ppm(X))) lam <- rep(lam, length(lev)) names(lam) <- lev } return(lam) } # Nonstationary Poisson return(predict(X, ...)) } # Gibbs process if(is.multitype(X)) stop("Not yet implemented for multitype Gibbs processes") # Compute first order term if(is.stationary(X)) { ## activity parameter sX <- summary(X, quick="no variances") beta <- sX$trend$value } else { ## activity function (or values of it, depending on '...') beta <- predict(X, ...) } ## apply approximation lambda <- PoisSaddle(beta, fitin(X)) return(lambda) } PoisSaddle <- function(beta, fi) { ## apply Poisson-Saddlepoint approximation ## given first order term and fitted interaction stopifnot(inherits(fi, "fii")) inte <- as.interact(fi) if(identical(inte$family$name, "pairwise")) return(PoisSaddlePairwise(beta, fi)) if(identical(inte$name, "Geyer saturation process")) return(PoisSaddleGeyer(beta, fi)) if(identical(inte$name, "Area-interaction process")) return(PoisSaddleArea(beta, fi)) stop(paste("Intensity approximation is not yet available for", inte$name), call.=FALSE) } PoisSaddlePairwise <- function(beta, fi) { inte <- as.interact(fi) Mayer <- inte$Mayer if(is.null(Mayer)) stop(paste("Sorry, not yet implemented for", inte$name)) # interaction coefficients co <- with(fi, coefs[Vnames[!IsOffset]]) # compute second Mayer cluster integral G <- Mayer(co, inte) if(is.null(G) || !is.finite(G)) stop("Internal error in computing Mayer cluster integral") if(G < 0) stop(paste("Unable to apply Poisson-saddlepoint approximation:", "Mayer cluster integral is negative")) ## solve if(is.im(beta)) { lambda <- if(G == 0) beta else eval.im(LambertW(G * beta)/G) } else { lambda <- if(G == 0) beta else (LambertW(G * beta)/G) if(length(lambda) == 1) lambda <- unname(lambda) } return(lambda) } # Lambert's W-function LambertW <- local({ yexpyminusx <- function(y,x){y*exp(y)-x} W <- function(x) { result <- rep.int(NA_real_, length(x)) ok <- is.finite(x) & (x >= 0) if(requireNamespace("gsl", quietly=TRUE)) { result[ok] <- gsl::lambert_W0(x[ok]) } else { for(i in which(ok)) result[i] <- uniroot(yexpyminusx, c(0, x[i]), x=x[i])$root } return(result) } W }) PoisSaddleGeyer <- local({ PoisSaddleGeyer <- function(beta, fi) { gamma <- summary(fi)$sensible$param$gamma if(gamma == 1) return(beta) inte <- as.interact(fi) sat <- inte$par$sat R <- inte$par$r #' get probability distribution of Geyer statistic under reference model z <- Spatstat.Geyer.Nulldist # from sysdata if(is.na(m <- match(sat, z$sat))) stop(paste("Sorry, the Poisson-saddlepoint approximation", "is not implemented for Geyer models with sat =", sat), call.=FALSE) probmatrix <- z$prob[[m]] maxachievable <- max(which(colSums(probmatrix) > 0)) - 1 gammarange <- sort(c(1, gamma^maxachievable)) #' apply approximation betavalues <- beta[] nvalues <- length(betavalues) lambdavalues <- numeric(nvalues) for(i in seq_len(nvalues)) { beta.i <- betavalues[i] ra <- beta.i * gammarange lambdavalues[i] <- uniroot(diffapproxGeyer, ra, beta=beta.i, gamma=gamma, R=R, sat=sat, probmatrix=probmatrix)$root } #' return result in same format as 'beta' lambda <- beta lambda[] <- lambdavalues if(length(lambda) == 1) lambda <- unname(lambda) return(lambda) } diffapproxGeyer <- function(lambda, beta, gamma, R, sat, probmatrix) { lambda - approxEpoisGeyerT(lambda, beta, gamma, R, sat, probmatrix) } approxEpoisGeyerT <- function(lambda, beta=1, gamma=1, R=1, sat=1, probmatrix) { #' Compute approximation to E_Pois(lambda) Lambda(0,X) for Geyer #' ('probmatrix' contains distribution of geyerT(0, Z_n) for each n, #' where 'sat' is given, and Z_n is runifdisc(n, radius=2*R). possT <- 0:(ncol(probmatrix)-1) possN <- 0:(nrow(probmatrix)-1) pN <- dpois(possN, lambda * pi * (2*R)^2) EgamT <- pN %*% probmatrix %*% (gamma^possT) #' assume that, for n > max(possN), #' distribution of T is concentrated on T=sat EgamT <- EgamT + (gamma^sat) * (1-sum(pN)) return(beta * EgamT) } PoisSaddleGeyer }) PoisSaddleArea <- local({ PoisSaddleArea <- function(beta, fi) { eta <- summary(fi)$sensible$param$eta if(eta == 1) return(beta) etarange <- range(c(eta^2, 1.1, 0.9)) inte <- as.interact(fi) R <- inte$par$r #' reference distribution of canonical sufficient statistic zeroprob <- Spatstat.Area.Zeroprob areaquant <- Spatstat.Area.Quantiles # expectation of eta^A_n for each n = 0, 1, .... EetaAn <- c(1/eta, zeroprob + (1-zeroprob) * colMeans((eta^(-areaquant)))) #' compute approximation betavalues <- beta[] nvalues <- length(betavalues) lambdavalues <- numeric(nvalues) for(i in seq_len(nvalues)) { beta.i <- betavalues[i] ra <- beta.i * etarange lambdavalues[i] <- uniroot(diffapproxArea, ra, beta=beta.i, eta=eta, r=R, EetaAn=EetaAn)$root } #' return result in same format as 'beta' lambda <- beta lambda[] <- lambdavalues if(length(lambda) == 1) lambda <- unname(lambda) return(lambda) } diffapproxArea <- function(lambda, beta, eta, r, EetaAn) { lambda - approxEpoisArea(lambda, beta, eta, r, EetaAn) } approxEpoisArea <- function(lambda, beta=1, eta=1, r=1, EetaAn) { #' Compute approximation to E_Pois(lambda) Lambda(0,X) for AreaInter mu <- lambda * pi * (2*r)^2 zeta <- pi^2/2 - 1 theta <- -log(eta) zetatheta <- zeta * theta #' contribution from tabulated values Nmax <- length(EetaAn) - 1L possN <- 0:Nmax qN <- dpois(possN, mu) # expectation of eta^A when N ~ poisson (truncated) EetaA <- sum(qN * EetaAn) #' asymptotics for quite large n Nbig <- qpois(0.999, mu) qn <- 0 if(Nbig > Nmax) { n <- (Nmax+1):Nbig #' asymptotic mean uncovered area conditional on this being positive mstarn <- (16/((n+3)^2)) * exp(n * (1/4 - log(4/3))) ztm <- zetatheta * mstarn ok <- (ztm < 1) if(!any(ok)) { Nbig <- Nmax qn <- 0 } else { if(!all(ok)) { Nbig <- max(which(!ok)) - 1 n <- (Nmax+1):Nbig ztm <- ztm[1:((Nbig-Nmax)+1)] } qn <- dpois(n, mu) #' asymptotic probability of complete coverage pstarn <- 1 - pmin(1, 3 * (1 + n^2/(16*pi)) * exp(-n/4)) Estarn <- (1 - ztm)^(-1/zeta) EetaA <- EetaA + sum(qn * (pstarn + (1-pstarn) * Estarn)) } } #' for very large n, assume complete coverage, so A = 0 EetaA <- EetaA + 1 - sum(qN) - sum(qn) return(beta * eta * EetaA) } PoisSaddleArea }) spatstat/R/iplot.R0000755000176200001440000002660113115271075013602 0ustar liggesusers# # interactive plot for ppp objects using rpanel # # $Revision: 1.23 $ $Date: 2017/02/07 07:47:20 $ # # # Effect: # when the user types # iplot(x) # a pop-up panel displays a standard plot of x and # buttons allowing control of the plot parameters. # Coding: # The panel 'p' contains the following internal variables # x Original point pattern # w Window of point pattern # xname Name of x (for main title) # mtype Type of marks of x # bb frame of x # bbmid midpoint of frame # The following variables in 'p' are controlled by panel buttons etc # split Logical: whether to split multitype pattern # pointmap Plot character, or "marks" indicating that marks are used # zoomfactor Zoom factor # zoomcentre Centre point for zoom # charsize Character expansion factor cex # markscale Mark scale factor markscale # iplot <- function(x, ...) { UseMethod("iplot") } iplot.ppp <- local({ iplot.ppp <- function(x, ..., xname) { if(missing(xname)) xname <- short.deparse(substitute(x)) verifyclass(x, "ppp") if(markformat(x) %in% c("hyperframe", "list")) marks(x) <- as.data.frame(as.hyperframe(marks(x))) if(markformat(x) == "dataframe" && ncol(marks(x)) > 1) { warning("Using only the first column of marks") marks(x) <- marks(x)[,1L] } mtype <- if(is.multitype(x)) "multitype" else if(is.marked(x)) "marked" else "unmarked" bb <- as.rectangle(as.owin(x)) bbmid <- unlist(centroid.owin(bb)) ## kraever("rpanel") ## p <- rpanel::rp.control(paste("iplot(", xname, ")", sep=""), x=x, w=as.owin(x), xname=xname, mtype=mtype, bb=bb, bbmid=bbmid, split=FALSE, pointmap=if(is.marked(x)) "marks" else "o", zoomfactor=1, zoomcentre=bbmid, size=c(700, 400)) # Split panel into three # Left: plot controls # Middle: data # Right: navigation/zoom rpanel::rp.grid(p, "gcontrols", pos=list(row=0,column=0)) rpanel::rp.grid(p, "gdisplay", pos=list(row=0,column=1)) rpanel::rp.grid(p, "gnavigate", pos=list(row=0,column=2)) #----- Data display ------------ # This line is to placate the package checker mytkr <- NULL # Create data display panel rpanel::rp.tkrplot(p, mytkr, plotfun=do.iplot.ppp, action=click.iplot.ppp, pos=list(row=0,column=0,grid="gdisplay")) #----- Plot controls ------------ nextrow <- 0 pozzie <- function(n=nextrow, ...) append(list(row=n,column=0,grid="gcontrols"), list(...)) # main title rpanel::rp.textentry(p, xname, action=redraw.iplot.ppp, title="Plot title", pos=pozzie(0)) nextrow <- 1 # split ? if(mtype == "multitype") { rpanel::rp.checkbox(p, split, initval=FALSE, title="Split according to marks", action=redraw.iplot.ppp, pos=pozzie(1)) nextrow <- 2 } # plot character or mark style ptvalues <- c("o", "bullet", "plus") ptlabels <- c("open circles", "filled circles", "crosshairs") if(is.marked(x)) { ptvalues <- c("marks", ptvalues) ptlabels <- if(mtype == "multitype") c("Symbols depending on mark", ptlabels) else c("Circles proportional to mark", ptlabels) } pointmap <- ptvalues[1L] rpanel::rp.radiogroup(p, pointmap, vals=ptvalues, labels=ptlabels, title="how to plot points", action=redraw.iplot.ppp, pos=pozzie(nextrow)) nextrow <- nextrow+1 # plot character size charsize <- 1 rpanel::rp.slider(p, charsize, 0, 5, action=redraw.iplot.ppp, title="symbol expansion factor (cex)", initval=1, showvalue=TRUE, pos=pozzie(nextrow, sticky="")) nextrow <- nextrow+1 # mark scale if(mtype == "marked") { marx <- x$marks marx <- marx[is.finite(marx)] scal <- mark.scale.default(marx, x$window) markscale <- scal rpanel::rp.slider(p, markscale, from=scal/10, to = 10*scal, action=redraw.iplot.ppp, initval=scal, title="mark scale factor (markscale)", showvalue=TRUE, pos=pozzie(nextrow)) nextrow <- nextrow+1 } # button to print a summary at console rpanel::rp.button(p, title="Print summary information", pos=pozzie(nextrow), action=function(panel) { print(summary(panel$x)); panel} ) # #----- Navigation controls ------------ nextrow <- 0 navpos <- function(n=nextrow,cc=0, ...) append(list(row=n,column=cc,grid="gnavigate"), list(...)) rpanel::rp.button(p, title="Up", pos=navpos(nextrow,1,sticky=""), action=function(panel) { zo <- panel$zoomfactor ce <- panel$zoomcentre bb <- panel$bb height <- sidelengths(bb)[2L] stepsize <- (height/4)/zo panel$zoomcentre <- ce + c(0, stepsize) CommitAndRedraw(panel) return(panel) }) nextrow <- nextrow + 1 rpanel::rp.button(p, title="Left", pos=navpos(nextrow,0,sticky="w"), action=function(panel) { zo <- panel$zoomfactor ce <- panel$zoomcentre bb <- panel$bb width <- sidelengths(bb)[1L] stepsize <- (width/4)/zo panel$zoomcentre <- ce - c(stepsize, 0) CommitAndRedraw(panel) return(panel) }) rpanel::rp.button(p, title="Right", pos=navpos(nextrow,2,sticky="e"), action=function(panel) { zo <- panel$zoomfactor ce <- panel$zoomcentre bb <- panel$bb width <- sidelengths(bb)[1L] stepsize <- (width/4)/zo panel$zoomcentre <- ce + c(stepsize, 0) CommitAndRedraw(panel) return(panel) }) nextrow <- nextrow + 1 rpanel::rp.button(p, title="Down", pos=navpos(nextrow,1,sticky=""), action=function(panel) { zo <- panel$zoomfactor ce <- panel$zoomcentre bb <- panel$bb height <- sidelengths(bb)[2L] stepsize <- (height/4)/zo panel$zoomcentre <- ce - c(0, stepsize) CommitAndRedraw(panel) return(panel) }) nextrow <- nextrow + 1 rpanel::rp.button(p, title="Zoom In", pos=navpos(nextrow,1,sticky=""), action=function(panel) { panel$zoomfactor <- panel$zoomfactor * 2 CommitAndRedraw(panel) return(panel) }) nextrow <- nextrow + 1 rpanel::rp.button(p, title="Zoom Out", pos=navpos(nextrow,1,sticky=""), action=function(panel) { panel$zoomfactor <- panel$zoomfactor / 2 CommitAndRedraw(panel) return(panel) }) nextrow <- nextrow + 1 rpanel::rp.button(p, title="Reset", pos=navpos(nextrow,1,sticky=""), action=function(panel) { panel$zoomfactor <- 1 panel$zoomcentre <- panel$bbmid CommitAndRedraw(panel) return(panel) }) nextrow <- nextrow + 1 rpanel::rp.button(p, title="Redraw", pos=navpos(nextrow,1,sticky=""), action=redraw.iplot.ppp) nextrow <- nextrow+1 # quit button rpanel::rp.button(p, title="Quit", quitbutton=TRUE, pos=navpos(nextrow, 1, sticky=""), action= function(panel) { panel }) invisible(NULL) } # Function to redraw the whole shebang redraw.iplot.ppp <- function(panel) { rpanel::rp.tkrreplot(panel, mytkr) panel } # Function executed when data display is clicked click.iplot.ppp <- function(panel, x, y) { if(panel$split) { cat("Mouse interaction is not supported when the point pattern is split\n") } else { panel$zoomcentre <- panel$zoomcentre + (c(x,y) - panel$bbmid)/panel$zoomfactor CommitAndRedraw(panel) } return(panel) } # function that updates the plot when the control panel is operated do.iplot.ppp <- function(panel) { use.marks <- TRUE pch <- 16 switch(panel$pointmap, marks={ use.marks <- TRUE pch <- NULL }, o = { use.marks <- FALSE pch <- 1 }, bullet = { use.marks <- FALSE pch <- 16 }, plus = { use.marks <- FALSE pch <- 3 }) # scale and clip the pattern x <- panel$x w <- panel$w z <- panel$zoomfactor if(is.null(z)) z <- 1 ce <- panel$zoomcentre bb <- panel$bb bbmid <- panel$bbmid scalex <- shift(scalardilate(shift(x, -ce), z), bbmid) scalew <- shift(scalardilate(shift(w, -ce), z), bbmid) scalex <- scalex[, bb] scalew <- intersect.owin(scalew, bb, fatal=FALSE) # determine what is plotted under the clipped pattern blankargs <- list(type="n") dashargs <- list(lty=3, border="red") panel.begin <- if(is.null(scalew)) { # empty intersection; just create the plot space layered(bb, plotargs=list(blankargs)) } else if(identical(bb, scalew)) { if(z == 1) { # original state # window is rectangular # plot the data window as a solid black rectangle layered(bb, scalew, plotargs=list(blankargs, list(lwd=2))) } else { # zoom view is entirely inside window # plot the clipping region as a red dashed rectangle layered(bb, plotargs=list(dashargs)) } } else { # field of view is not a subset of window # plot the clipping region as a red dashed rectangle # Then add the data window layered(bb, scalew, plotargs=list(dashargs, list(invert=TRUE))) } # draw it # opa <- par(ask=FALSE) if(panel$mtype == "multitype" && panel$split) { scalex <- split(scalex, un=(panel$pointmap != "marks")) plot(scalex, main=panel$xname, use.marks=use.marks, pch=pch, cex=panel$charsize, panel.begin=panel.begin) } else { # draw scaled & clipped window plot(panel.begin, main=panel$xname) # add points if(panel$mtype == "marked" && panel$pointmap == "marks") { plot(scalex, add=TRUE, use.marks=use.marks, markscale=panel$markscale) } else { plot(scalex, add=TRUE, use.marks=use.marks, pch=pch, cex=panel$charsize) } } # par(opa) panel } CommitAndRedraw <- function(panel) { # hack to ensure that panel is immediately updated in rpanel kraever("rpanel") ## This is really a triple-colon! rpanel:::rp.control.put(panel$panelname, panel) # now redraw it redraw.iplot.ppp(panel) } iplot.ppp }) spatstat/R/mincontrast.R0000755000176200001440000007344413115271120015012 0ustar liggesusers# # mincontrast.R # # Functions for estimation by minimum contrast # ################## base ################################ mincontrast <- local({ ## objective function (in a format that is re-usable by other code) contrast.objective <- function(par, objargs, ...) { with(objargs, { theo <- theoretical(par=par, rvals, ...) if(!is.vector(theo) || !is.numeric(theo)) stop("theoretical function did not return a numeric vector") if(length(theo) != nrvals) stop("theoretical function did not return the correct number of values") if(!is.null(adjustment)) { theo <- adjustment$fun(theo=theo, par=par, auxdata=adjustment$auxdata) if(!is.vector(theo) || !is.numeric(theo)) stop("adjustment did not return a numeric vector") if(length(theo) != nrvals) stop("adjustment did not return the correct number of values") } discrep <- (abs(theo^qq - obsq))^pp value <- mean(discrep) value <- min(value, .Machine$double.xmax) return(value) }) } mincontrast <- function(observed, theoretical, startpar, ..., ctrl=list(q = 1/4, p = 2, rmin=NULL, rmax=NULL), fvlab=list(label=NULL, desc="minimum contrast fit"), explain=list(dataname=NULL, modelname=NULL, fname=NULL), adjustment=NULL) { verifyclass(observed, "fv") stopifnot(is.function(theoretical)) if(!any("par" %in% names(formals(theoretical)))) stop(paste("Theoretical function does not include an argument called", sQuote("par"))) ## enforce defaults ctrl <- resolve.defaults(ctrl, list(q = 1/4, p = 2, rmin=NULL, rmax=NULL)) fvlab <- resolve.defaults(fvlab, list(label=NULL, desc="minimum contrast fit")) explain <- resolve.defaults(explain, list(dataname=NULL, modelname=NULL, fname=NULL)) ## extract vector of r values argu <- fvnames(observed, ".x") rvals <- observed[[argu]] ## determine range of r values rmin <- ctrl$rmin rmax <- ctrl$rmax if(!is.null(rmin) && !is.null(rmax)) stopifnot(rmin < rmax && rmin >= 0) else { alim <- attr(observed, "alim") %orifnull% range(rvals) if(is.null(rmax)) rmax <- alim[2] if(is.null(rmin)) { rmin <- alim[1] if(rmin == 0 && identical(explain$fname,"g")) rmin <- rmax/1e3 # avoid artefacts at zero in pcf } } ## extract vector of observed values of statistic valu <- fvnames(observed, ".y") obs <- observed[[valu]] ## restrict to [rmin, rmax] if(max(rvals) < rmax) stop(paste("rmax=", signif(rmax,4), "exceeds the range of available data", "= [", signif(min(rvals),4), ",", signif(max(rvals),4), "]")) sub <- (rvals >= rmin) & (rvals <= rmax) rvals <- rvals[sub] obs <- obs[sub] ## sanity clause if(!all(ok <- is.finite(obs))) { whinge <- paste("Some values of the empirical function", sQuote(explain$fname), "were infinite or NA.") iMAX <- max(which(ok)) iMIN <- min(which(!ok)) + 1 if(iMAX > iMIN && all(ok[iMIN:iMAX])) { rmin <- rvals[iMIN] rmax <- rvals[iMAX] obs <- obs[iMIN:iMAX] rvals <- rvals[iMIN:iMAX] sub[sub] <- ok warning(paste(whinge, "Range of r values was reset to", prange(c(rmin, rmax))), call.=FALSE) } else stop(paste(whinge, "Please choose a narrower range [rmin, rmax]"), call.=FALSE) } ## pack data into a list objargs <- list(theoretical = theoretical, rvals = rvals, nrvals = length(rvals), obsq = obs^(ctrl$q), ## for efficiency qq = ctrl$q, pp = ctrl$p, rmin = rmin, rmax = rmax, adjustment = adjustment) ## go minimum <- optim(startpar, fn=contrast.objective, objargs=objargs, ...) ## if convergence failed, issue a warning signalStatus(optimStatus(minimum), errors.only=TRUE) ## evaluate the fitted theoretical curve fittheo <- theoretical(minimum$par, rvals, ...) ## pack it up as an `fv' object label <- fvlab$label %orifnull% "%s[fit](r)" desc <- fvlab$desc fitfv <- bind.fv(observed[sub, ], data.frame(fit=fittheo), label, desc) if(!is.null(adjustment)) { adjtheo <- adjustment$fun(theo=fittheo, par=minimum$par, auxdata=adjustment$auxdata) fitfv <- bind.fv(fitfv, data.frame(adjfit=adjtheo), "%s[adjfit](r)", paste("adjusted", desc)) } result <- list(par = minimum$par, fit = fitfv, opt = minimum, ctrl = list(p=ctrl$p,q=ctrl$q,rmin=rmin,rmax=rmax), info = explain, startpar = startpar, objfun = contrast.objective, objargs = objargs, dotargs = list(...)) class(result) <- c("minconfit", class(result)) return(result) } mincontrast }) print.minconfit <- function(x, ...) { terselevel <- spatstat.options('terse') digits <- getOption('digits') ## explanatory cat(paste("Minimum contrast fit ", "(", "object of class ", dQuote("minconfit"), ")", "\n", sep="")) mo <- x$info$modelname fu <- x$info$fname da <- x$info$dataname cm <- x$covmodel if(!is.null(mo)) cat("Model:", mo, fill=TRUE) if(!is.null(cm)) { ## Covariance/kernel model and nuisance parameters cat("\t", cm$type, "model:", cm$model, fill=TRUE) margs <- cm$margs if(!is.null(margs)) { nama <- names(margs) tags <- ifelse(nzchar(nama), paste(nama, "="), "") tagvalue <- paste(tags, margs) splat("\t", cm$type, "parameters:", paste(tagvalue, collapse=", ")) } } if(!is.null(fu) && !is.null(da)) splat("Fitted by matching theoretical", fu, "function to", da) else { if(!is.null(fu)) splat(" based on", fu) if(!is.null(da)) splat(" fitted to", da) } if(waxlyrical('space', terselevel)) cat("\n") ## Values splat("Internal parameters fitted by minimum contrast ($par):") print(x$par, ...) if(waxlyrical('space', terselevel)) cat("\n") ## Handling new parameters isPCP <- x$isPCP %orifnull% x$internal$model!="lgcp" cpar <- x$clustpar if (!is.null(cpar)) { splat("Fitted", if(isPCP) "cluster" else "covariance", "parameters:") print(cpar, digits=digits) } else{ ## Old modelpar field if necessary mp <- x$modelpar if(!is.null(mp)) { splat("Derived parameters of", if(!is.null(mo)) mo else "model", "($modelpar):") print(mp) } } if(!is.null(mu <- x$mu)) { if(isPCP) { splat("Mean cluster size: ", if(!is.im(mu)) paste(signif(mu, digits), "points") else "[pixel image]") } else { splat("Fitted mean of log of random intensity:", if(!is.im(mu)) signif(mu, digits) else "[pixel image]") } } if(waxlyrical('space', terselevel)) cat("\n") ## Diagnostics printStatus(optimStatus(x$opt)) ## Starting values if(waxlyrical('gory', terselevel)){ cat("\n") splat("Starting values of parameters:") print(x$startpar) ## Algorithm parameters ct <- x$ctrl splat("Domain of integration:", "[", signif(ct$rmin,4), ",", signif(ct$rmax,4), "]") splat("Exponents:", "p=", paste(signif(ct$p, 3), ",", sep=""), "q=", signif(ct$q,3)) } invisible(NULL) } plot.minconfit <- function(x, ...) { xname <- short.deparse(substitute(x)) do.call(plot.fv, resolve.defaults(list(x$fit), list(...), list(main=xname))) } unitname.minconfit <- function(x) { unitname(x$fit) } "unitname<-.minconfit" <- function(x, value) { unitname(x$fit) <- value return(x) } as.fv.minconfit <- function(x) x$fit ###### convergence status of 'optim' object optimStatus <- function(x, call=NULL) { cgce <- x$convergence neval <- x$counts[["function"]] switch(paste(cgce), "0" = { simpleMessage( paste("Converged successfully after", neval, "function evaluations"), call) }, "1" = simpleWarning( paste("Iteration limit maxit was reached after", neval, "function evaluations"), call), "10" = simpleWarning("Nelder-Mead simplex was degenerate", call), "51"= { simpleWarning( paste("Warning message from L-BGFS-B method:", sQuote(x$message)), call) }, "52"={ simpleError( paste("Error message from L-BGFS-B method:", sQuote(x$message)), call) }, simpleWarning(paste("Unrecognised error code", cgce), call) ) } signalStatus <- function(x, errors.only=FALSE) { stopifnot(inherits(x, "condition")) if(inherits(x, "error")) stop(x) if(inherits(x, "warning")) warning(x) if(inherits(x, "message") && !errors.only) message(x) return(invisible(NULL)) } printStatus <- function(x, errors.only=FALSE) { prefix <- if(inherits(x, "error")) "error: " else if(inherits(x, "warning")) "warning: " else NULL if(!is.null(prefix) || !errors.only) cat(paste(prefix, conditionMessage(x), "\n", sep="")) return(invisible(NULL)) } accumulateStatus <- function(x, stats=NULL) { if(is.null(stats)) stats <- list(values=list(), frequencies=integer(0)) if(!inherits(x, c("error", "warning", "message"))) return(stats) with(stats, { same <- unlist(lapply(values, identical, y=x)) if(any(same)) { i <- min(which(same)) frequencies[i] <- frequencies[i] + 1 } else { values <- append(values, list(x)) frequencies <- c(frequencies, 1) } }) stats <- list(values=values, frequencies=frequencies) return(stats) } printStatusList <- function(stats) { with(stats, { for(i in seq_along(values)) { printStatus(values[i]) cat(paste("\t", paren(paste(frequencies[i], "times")), "\n")) } } ) invisible(NULL) } ############### applications (specific models) ################## getdataname <- function(defaultvalue, ..., dataname=NULL) { if(!is.null(dataname)) dataname else defaultvalue } thomas.estK <- function(X, startpar=c(kappa=1,scale=1), lambda=NULL, q=1/4, p=2, rmin=NULL, rmax=NULL, ...) { dataname <- getdataname(short.deparse(substitute(X), 20), ...) if(inherits(X, "fv")) { K <- X if(!identical(attr(K, "fname")[1], "K")) warning("Argument X does not appear to be a K-function") } else if(inherits(X, "ppp")) { K <- Kest(X) dataname <- paste("Kest(", dataname, ")", sep="") if(is.null(lambda)) lambda <- summary(X)$intensity } else stop("Unrecognised format for argument X") info <- spatstatClusterModelInfo("Thomas") startpar <- info$checkpar(startpar) theoret <- info$K result <- mincontrast(K, theoret, startpar, ctrl=list(q=q, p=p,rmin=rmin, rmax=rmax), fvlab=list(label="%s[fit](r)", desc="minimum contrast fit of Thomas process"), explain=list(dataname=dataname, fname=attr(K, "fname"), modelname="Thomas process"), ...) ## imbue with meaning par <- result$par names(par) <- c("kappa", "sigma2") result$par <- par ## infer meaningful model parameters result$modelpar <- info$interpret(par, lambda) result$internal <- list(model="Thomas") ## add new parametrisation to object result$clustpar <- info$checkpar(par, old=FALSE) return(result) } lgcp.estK <- function(X, startpar=c(var=1,scale=1), covmodel=list(model="exponential"), lambda=NULL, q=1/4, p=2, rmin=NULL, rmax=NULL, ...) { dataname <- getdataname(short.deparse(substitute(X), 20), ...) if(inherits(X, "fv")) { K <- X if(!identical(attr(K, "fname")[1], "K")) warning("Argument X does not appear to be a K-function") } else if(inherits(X, "ppp")) { K <- Kest(X) dataname <- paste("Kest(", dataname, ")", sep="") if(is.null(lambda)) lambda <- summary(X)$intensity } else stop("Unrecognised format for argument X") info <- spatstatClusterModelInfo("LGCP") startpar <- info$checkpar(startpar) ## digest parameters of Covariance model and test validity ph <- info$parhandler cmodel <- do.call(ph, covmodel) theoret <- info$K result <- mincontrast(K, theoret, startpar, ctrl=list(q=q, p=p, rmin=rmin, rmax=rmax), fvlab=list(label="%s[fit](r)", desc="minimum contrast fit of LGCP"), explain=list(dataname=dataname, fname=attr(K, "fname"), modelname="log-Gaussian Cox process"), ..., model=cmodel$model, margs=cmodel$margs) ## imbue with meaning par <- result$par names(par) <- c("sigma2", "alpha") result$par <- par result$covmodel <- cmodel ## infer model parameters result$modelpar <- info$interpret(par, lambda) result$internal <- list(model="lgcp") ## add new parametrisation to object result$clustpar <- info$checkpar(par, old=FALSE) result$clustargs <- info$checkclustargs(cmodel$margs, old=FALSE) return(result) } matclust.estK <- function(X, startpar=c(kappa=1,scale=1), lambda=NULL, q=1/4, p=2, rmin=NULL, rmax=NULL, ...) { dataname <- getdataname(short.deparse(substitute(X), 20), ...) if(inherits(X, "fv")) { K <- X if(!identical(attr(K, "fname")[1], "K")) warning("Argument X does not appear to be a K-function") } else if(inherits(X, "ppp")) { K <- Kest(X) dataname <- paste("Kest(", dataname, ")", sep="") if(is.null(lambda)) lambda <- summary(X)$intensity } else stop("Unrecognised format for argument X") info <- spatstatClusterModelInfo("MatClust") startpar <- info$checkpar(startpar) theoret <- info$K funaux <- info$funaux result <- mincontrast(K, theoret, startpar, ctrl=list(q=q, p=p,rmin=rmin, rmax=rmax), fvlab=list(label="%s[fit](r)", desc="minimum contrast fit of Matern Cluster process"), explain=list(dataname=dataname, fname=attr(K, "fname"), modelname="Matern Cluster process"), ..., funaux=funaux) ## imbue with meaning par <- result$par names(par) <- c("kappa", "R") result$par <- par ## infer model parameters result$modelpar <- info$interpret(par, lambda) result$internal <- list(model="MatClust") ## add new parametrisation to object result$clustpar <- info$checkpar(par, old=FALSE) return(result) } ## versions using pcf (suggested by Jan Wild) thomas.estpcf <- function(X, startpar=c(kappa=1,scale=1), lambda=NULL, q=1/4, p=2, rmin=NULL, rmax=NULL, ..., pcfargs=list()){ dataname <- getdataname(short.deparse(substitute(X), 20), ...) if(inherits(X, "fv")) { g <- X if(!identical(attr(g, "fname")[1], "g")) warning("Argument X does not appear to be a pair correlation function") } else if(inherits(X, "ppp")) { g <- do.call(pcf.ppp, append(list(X), pcfargs)) dataname <- paste("pcf(", dataname, ")", sep="") if(is.null(lambda)) lambda <- summary(X)$intensity } else stop("Unrecognised format for argument X") info <- spatstatClusterModelInfo("Thomas") startpar <- info$checkpar(startpar) theoret <- info$pcf ## avoid using g(0) as it may be infinite argu <- fvnames(g, ".x") rvals <- g[[argu]] if(rvals[1] == 0 && (is.null(rmin) || rmin == 0)) { rmin <- rvals[2] } result <- mincontrast(g, theoret, startpar, ctrl=list(q=q, p=p,rmin=rmin, rmax=rmax), fvlab=list( label="%s[fit](r)", desc="minimum contrast fit of Thomas process"), explain=list( dataname=dataname, fname=attr(g, "fname"), modelname="Thomas process"), ...) ## imbue with meaning par <- result$par names(par) <- c("kappa", "sigma2") result$par <- par ## infer model parameters result$modelpar <- info$interpret(par, lambda) result$internal <- list(model="Thomas") ## add new parametrisation to object result$clustpar <- info$checkpar(par, old=FALSE) return(result) } matclust.estpcf <- function(X, startpar=c(kappa=1,scale=1), lambda=NULL, q=1/4, p=2, rmin=NULL, rmax=NULL, ..., pcfargs=list()){ dataname <- getdataname(short.deparse(substitute(X), 20), ...) if(inherits(X, "fv")) { g <- X if(!identical(attr(g, "fname")[1], "g")) warning("Argument X does not appear to be a pair correlation function") } else if(inherits(X, "ppp")) { g <- do.call(pcf.ppp, append(list(X), pcfargs)) dataname <- paste("pcf(", dataname, ")", sep="") if(is.null(lambda)) lambda <- summary(X)$intensity } else stop("Unrecognised format for argument X") info <- spatstatClusterModelInfo("MatClust") startpar <- info$checkpar(startpar) theoret <- info$pcf funaux <- info$funaux ## avoid using g(0) as it may be infinite argu <- fvnames(g, ".x") rvals <- g[[argu]] if(rvals[1] == 0 && (is.null(rmin) || rmin == 0)) { rmin <- rvals[2] } result <- mincontrast(g, theoret, startpar, ctrl=list(q=q, p=p,rmin=rmin, rmax=rmax), fvlab=list(label="%s[fit](r)", desc="minimum contrast fit of Matern Cluster process"), explain=list(dataname=dataname, fname=attr(g, "fname"), modelname="Matern Cluster process"), ..., funaux=funaux) ## imbue with meaning par <- result$par names(par) <- c("kappa", "R") result$par <- par ## infer model parameters result$modelpar <- info$interpret(par, lambda) result$internal <- list(model="MatClust") ## add new parametrisation to object result$clustpar <- info$checkpar(par, old=FALSE) return(result) } lgcp.estpcf <- function(X, startpar=c(var=1,scale=1), covmodel=list(model="exponential"), lambda=NULL, q=1/4, p=2, rmin=NULL, rmax=NULL, ..., pcfargs=list()) { dataname <- getdataname(short.deparse(substitute(X), 20), ...) if(inherits(X, "fv")) { g <- X if(!identical(attr(g, "fname")[1], "g")) warning("Argument X does not appear to be a pair correlation function") } else if(inherits(X, "ppp")) { g <- do.call(pcf.ppp, append(list(X), pcfargs)) dataname <- paste("pcf(", dataname, ")", sep="") if(is.null(lambda)) lambda <- summary(X)$intensity } else stop("Unrecognised format for argument X") info <- spatstatClusterModelInfo("LGCP") startpar <- info$checkpar(startpar) ## digest parameters of Covariance model and test validity ph <- info$parhandler cmodel <- do.call(ph, covmodel) theoret <- info$pcf result <- mincontrast(g, theoret, startpar, ctrl=list(q=q, p=p, rmin=rmin, rmax=rmax), fvlab=list(label="%s[fit](r)", desc="minimum contrast fit of LGCP"), explain=list(dataname=dataname, fname=attr(g, "fname"), modelname="log-Gaussian Cox process"), ..., model=cmodel$model, margs=cmodel$margs) ## imbue with meaning par <- result$par names(par) <- c("sigma2", "alpha") result$par <- par result$covmodel <- cmodel ## infer model parameters result$modelpar <- info$interpret(par, lambda) result$internal <- list(model="lgcp") ## add new parametrisation to object result$clustpar <- info$checkpar(par, old=FALSE) result$clustargs <- info$checkclustargs(cmodel$margs, old=FALSE) return(result) } cauchy.estK <- function(X, startpar=c(kappa=1,scale=1), lambda=NULL, q=1/4, p=2, rmin=NULL, rmax=NULL, ...) { ## omega: scale parameter of Cauchy kernel function ## eta: scale parameter of Cauchy pair correlation function ## eta = 2 * omega dataname <- getdataname(short.deparse(substitute(X), 20), ...) if(inherits(X, "fv")) { K <- X if(!identical(attr(K, "fname")[1], "K")) warning("Argument X does not appear to be a K-function") } else if(inherits(X, "ppp")) { K <- Kest(X) dataname <- paste("Kest(", dataname, ")", sep="") if(is.null(lambda)) lambda <- summary(X)$intensity } else stop("Unrecognised format for argument X") info <- spatstatClusterModelInfo("Cauchy") startpar <- info$checkpar(startpar) theoret <- info$K desc <- "minimum contrast fit of Neyman-Scott process with Cauchy kernel" result <- mincontrast(K, theoret, startpar, ctrl=list(q=q, p=p,rmin=rmin, rmax=rmax), fvlab=list(label="%s[fit](r)", desc=desc), explain=list(dataname=dataname, fname=attr(K, "fname"), modelname="Cauchy process"), ...) ## imbue with meaning par <- result$par names(par) <- c("kappa", "eta2") result$par <- par ## infer model parameters result$modelpar <- info$interpret(par, lambda) result$internal <- list(model="Cauchy") ## add new parametrisation to object result$clustpar <- info$checkpar(par, old=FALSE) return(result) } cauchy.estpcf <- function(X, startpar=c(kappa=1,scale=1), lambda=NULL, q=1/4, p=2, rmin=NULL, rmax=NULL, ..., pcfargs=list()) { ## omega: scale parameter of Cauchy kernel function ## eta: scale parameter of Cauchy pair correlation function ## eta = 2 * omega dataname <- getdataname(short.deparse(substitute(X), 20), ...) if(inherits(X, "fv")) { g <- X if(!identical(attr(g, "fname")[1], "g")) warning("Argument X does not appear to be a pair correlation function") } else if(inherits(X, "ppp")) { g <- do.call(pcf.ppp, append(list(X), pcfargs)) dataname <- paste("pcf(", dataname, ")", sep="") if(is.null(lambda)) lambda <- summary(X)$intensity } else stop("Unrecognised format for argument X") info <- spatstatClusterModelInfo("Cauchy") startpar <- info$checkpar(startpar) theoret <- info$pcf ## avoid using g(0) as it may be infinite argu <- fvnames(g, ".x") rvals <- g[[argu]] if(rvals[1] == 0 && (is.null(rmin) || rmin == 0)) { rmin <- rvals[2] } desc <- "minimum contrast fit of Neyman-Scott process with Cauchy kernel" result <- mincontrast(g, theoret, startpar, ctrl=list(q=q, p=p,rmin=rmin, rmax=rmax), fvlab=list(label="%s[fit](r)", desc=desc), explain=list(dataname=dataname, fname=attr(g, "fname"), modelname="Cauchy process"), ...) ## imbue with meaning par <- result$par names(par) <- c("kappa", "eta2") result$par <- par ## infer model parameters result$modelpar <- info$interpret(par, lambda) result$internal <- list(model="Cauchy") ## add new parametrisation to object result$clustpar <- info$checkpar(par, old=FALSE) return(result) } ## user-callable resolve.vargamma.shape <- function(..., nu.ker=NULL, nu.pcf=NULL, default = FALSE) { if(is.null(nu.ker) && is.null(nu.pcf)){ if(!default) stop("Must specify either nu.ker or nu.pcf", call.=FALSE) nu.ker <- -1/4 } if(!is.null(nu.ker) && !is.null(nu.pcf)) stop("Only one of nu.ker and nu.pcf should be specified", call.=FALSE) if(!is.null(nu.ker)) { check.1.real(nu.ker) stopifnot(nu.ker > -1/2) nu.pcf <- 2 * nu.ker + 1 } else { check.1.real(nu.pcf) stopifnot(nu.pcf > 0) nu.ker <- (nu.pcf - 1)/2 } return(list(nu.ker=nu.ker, nu.pcf=nu.pcf)) } vargamma.estK <- function(X, startpar=c(kappa=1,scale=1), nu = -1/4, lambda=NULL, q=1/4, p=2, rmin=NULL, rmax=NULL, ...) { ## nu.ker: smoothness parameter of Variance Gamma kernel function ## omega: scale parameter of kernel function ## nu.pcf: smoothness parameter of Variance Gamma pair correlation function ## eta: scale parameter of Variance Gamma pair correlation function ## nu.pcf = 2 * nu.ker + 1 and eta = omega dataname <- getdataname(short.deparse(substitute(X), 20), ...) if(inherits(X, "fv")) { K <- X if(!identical(attr(K, "fname")[1], "K")) warning("Argument X does not appear to be a K-function") } else if(inherits(X, "ppp")) { K <- Kest(X) dataname <- paste("Kest(", dataname, ")", sep="") if(is.null(lambda)) lambda <- summary(X)$intensity } else stop("Unrecognised format for argument X") ## Catch old nu.ker/nu.pcf syntax and resolve nu-value. dots <- list(...) if(missing(nu)){ nu <- resolve.vargamma.shape(nu.ker=dots$nu.ker, nu.pcf=dots$nu.pcf, default = TRUE)$nu.ker } check.1.real(nu) stopifnot(nu > -1/2) info <- spatstatClusterModelInfo("VarGamma") startpar <- info$checkpar(startpar) theoret <- info$K ## test validity of parameter nu and digest ph <- info$parhandler cmodel <- ph(nu.ker=nu) margs <- cmodel$margs desc <- "minimum contrast fit of Neyman-Scott process with Variance Gamma kernel" result <- mincontrast(K, theoret, startpar, ctrl=list(q=q, p=p,rmin=rmin, rmax=rmax), fvlab=list(label="%s[fit](r)", desc=desc), explain=list(dataname=dataname, fname=attr(K, "fname"), modelname="Variance Gamma process"), margs=margs, ...) ## imbue with meaning par <- result$par names(par) <- c("kappa", "eta") result$par <- par result$covmodel <- cmodel ## infer model parameters result$modelpar <- info$interpret(par, lambda) result$internal <- list(model="VarGamma") ## add new parametrisation to object result$clustpar <- info$checkpar(par, old=FALSE) result$clustargs <- info$checkclustargs(cmodel$margs, old=FALSE) return(result) } vargamma.estpcf <- function(X, startpar=c(kappa=1,scale=1), nu=-1/4, lambda=NULL, q=1/4, p=2, rmin=NULL, rmax=NULL, ..., pcfargs=list()) { ## nu.ker: smoothness parameter of Variance Gamma kernel function ## omega: scale parameter of kernel function ## nu.pcf: smoothness parameter of Variance Gamma pair correlation function ## eta: scale parameter of Variance Gamma pair correlation function ## nu.pcf = 2 * nu.ker + 1 and eta = omega dataname <- getdataname(short.deparse(substitute(X), 20), ...) if(inherits(X, "fv")) { g <- X if(!identical(attr(g, "fname")[1], "g")) warning("Argument X does not appear to be a pair correlation function") } else if(inherits(X, "ppp")) { g <- do.call(pcf.ppp, append(list(X), pcfargs)) dataname <- paste("pcf(", dataname, ")", sep="") if(is.null(lambda)) lambda <- summary(X)$intensity } else stop("Unrecognised format for argument X") ## Catch old nu.ker/nu.pcf syntax and resolve nu-value. dots <- list(...) if(missing(nu)){ ## nutmp <- try(resolve.vargamma.shape(nu.ker=dots$nu.ker, nu.pcf=dots$nu.pcf)$nu.ker, silent=TRUE) ## if(!inherits(nutmp, "try-error")) nu <- nutmp nu <- resolve.vargamma.shape(nu.ker=dots$nu.ker, nu.pcf=dots$nu.pcf, default = TRUE)$nu.ker } check.1.real(nu) stopifnot(nu > -1/2) info <- spatstatClusterModelInfo("VarGamma") startpar <- info$checkpar(startpar) theoret <- info$pcf ## test validity of parameter nu and digest ph <- info$parhandler cmodel <- ph(nu.ker=nu) margs <- cmodel$margs ## avoid using g(0) as it may be infinite argu <- fvnames(g, ".x") rvals <- g[[argu]] if(rvals[1] == 0 && (is.null(rmin) || rmin == 0)) { rmin <- rvals[2] } desc <- "minimum contrast fit of Neyman-Scott process with Variance Gamma kernel" result <- mincontrast(g, theoret, startpar, ctrl=list(q=q, p=p,rmin=rmin, rmax=rmax), fvlab=list(label="%s[fit](r)", desc=desc), explain=list(dataname=dataname, fname=attr(g, "fname"), modelname="Variance Gamma process"), margs=margs, ...) ## imbue with meaning par <- result$par names(par) <- c("kappa", "eta") result$par <- par result$covmodel <- cmodel ## infer model parameters result$modelpar <- info$interpret(par, lambda) result$internal <- list(model="VarGamma") ## add new parametrisation to object result$clustpar <- info$checkpar(par, old=FALSE) result$clustargs <- info$checkclustargs(cmodel$margs, old=FALSE) return(result) } spatstat/R/digestCovariates.R0000644000176200001440000000401613115225157015744 0ustar liggesusers#' #' digestCovariates.R #' #' $Revision: 1.2 $ $Date: 2017/01/26 00:22:14 $ #' is.scov <- function(x) { #' Determines whether x is a valid candidate for a spatial covariate #' A spatial object is OK if it can be coerced to a function if(inherits(x, c("im", "funxy", "owin", "tess", "ssf", "leverage.ppm"))) return(TRUE) #' A function(x,y,...) is OK if(is.function(x) && identical(names(formals(x))[1:2], c("x", "y"))) return(TRUE) #' A single character "x" or "y" is OK if(is.character(x) && length(x) == 1 && (x %in% c("x", "y"))) return(TRUE) #' Can't handle input return(FALSE) } ## Assumes each input (besides W) is a single covariate or a list of covariates ## Returns a `solist` with possibly a unitname attribute digestCovariates <- function(..., W = NULL) { x <- list(...) #' Find individual covariates in list valid <- sapply(x, is.scov) covs <- x[valid] #' The remaining entries are assumed to be lists of covariates #' so we unlist them x <- unlist(x[!valid], recursive = FALSE) valid <- sapply(x, is.scov) if(!all(valid)) stop("Couldn't interpret all input as spatial covariates.") covs <- append(covs, x) if(any(needW <- !sapply(covs, is.sob))) { if(is.null(W)){ boxes <- sapply(covs[!needW], Frame, fatal = FALSE) W <- do.call(boundingbox, boxes) } else stopifnot(is.owin(W)) } covunits <- vector("list", length(covs)) # Now covs is a list of valid covariates we can loop through for(i in seq_along(covs)){ covar <- covs[[i]] if(inherits(covar, "distfun")) covunits[[i]] <- unitname(covar) if(is.character(covar) && length(covar) == 1 && (covar %in% c("x", "y"))) { covar <- if(covar == "x"){ function(x,y) { x } } else{ function(x,y) { y } } covunits[[i]] <- unitname(W) } if(is.function(covar) && !inherits(covar, "funxy")){ covar <- funxy(f = covar, W = W) } covs[[i]] <- covar } covs <- as.solist(covs) attr(covs, "covunits") <- covunits return(covs) } spatstat/R/sysdata.rda0000644000176200001440000027675413166361223014506 0ustar liggesusers7zXZi"6!X4W])TW"nRʟx8Og0񢀜0^bv#<Q:#+uH\N1/ڋpg) ^/\љ{"2ruKfP l0mZ$#F?,>D/ٞWv0rb]i.+x d*0m$o |o2BuJb*8Ǔ2~mvmvuSǣsQPS^lNj`(OhtO ̩H۱SޒѡE5A$)q|ƠH).q(Rfiϐ,Z3:'Ӎ";aY}L϶L]{AgeKF)*D xB:&h-ceP8:h޴ڴ'4F4Ej0Q㮥Ћ,֍k* Q["4Kנ QVWNn푼9fأ)M66gF%t {zQx_z ji-Zhe9#|IpuYu>ɺXv4& BY9.o"zv~:w8e4 ݨDE!w@=Te˝&#c>)m_Z}zuE9=%s/Tㅘm .=l,0Y2_II;<,u *%=Lم/"1= F P`''gJkpȉv9Cc E>wc ϮX jIPī)C@{OaR( n_q4gjqk]9S2WLD4Qd4D=_$ku%4xH9ZB;4^N2JSըhmϸ\QXOs%BSf,)OdicYX5A;T!0mX]}Z{ ksX)ST}*BJcAm#P9IFrvdI0"9Q"ūt LȇKevIr8{iqsa\"y\VMÜ{ؑCnUm]m $h K +?ʀ#(S}h7wZC~zf:OVJ;tf0ݡŦCsñk@&ӭ&[^$fQu0Q c4Te|s׺uA#QV*".[{'*-ˊCB(_:Xb7.]oKV@;w.hۨU*Ņxυ)~7 ӿ?,|ke1T$ZLT "ig.K=*=m4d^x;2#%d <6&g)߰hj⑰&;^hd2uia汱ˆ{pQw0 t Oh(!zooǘdۊTGEL a@oF8ZK8DP9<lE.Ks@C<3 {7)_Cwi_mYjCvU~Ip0L嶂 z>ss|'TJ`H ) v-j&AfQ警ƩW5gRw(ǎthgZ.yV cU,Ӳ51Ã4 R*Ѻ0_C[?~ה/j'8rc۝g!{;qmܰ܏8=YIv ę3 ѧJDdmh4o+e6OاD-KJev+4&CƽUsPޑ*a;22 N\ Vx{e2[X#v*$jpQ_6QT"Xg.O^a^GCͪw8RtFVS.TɂLV02a%$M8)0n^v̜9RCpHT )=h aU OLa5]¿ ;Qco`\XB֟(leߴ$&_<)q ib@V9Aܥx! |p|rk2e9b×N-җ9"'uX86Q+wm=g Rj`AeᑣӾ($ Dmޓc>WD#{DWCOPi?$ڮ+Y?sCt4N(74/d؋Y-R>NgvvgOS襐=_ؚ aE89=bv1-<2 )M38* Y.RQ/Ă~F_B|O##-'p0O@O"R!Nv yfԅ91"bAz ^3!{ ](x !,b^u>z2b")Z4..pfIiEm֌p}N[y,ߢ:ƌB|k-mIT85X̄+RFP*]U i#+/h~|qN̍W6j>_zrҽ -j,I/A@g9,>l VlsC$&>^ Yq2)`Gdtz-ńQv{MdP/fGp&T 38@T>o*K#g?OhO$/jKa# v%i\p<w(mB6K?6<}LFYޢڰ=IaJHCW%WM5 v(˗+;#ͺJ_Jڴb7:d=7ػW(VIN48*_]eTF["Whr'%Fd=]rC>p^z<`^)H xwʪCrTI~˃ޑk8tF:z1QDW܃a OZW/4xs56%a=*r;OxR}r"ۈ &-Ia|ՌH-0gqvJ\{3 E5} Z0oGZk b>"ol!SiѴt"1s/8?I;n^TZUh2{^po¤qbKktDRmOI_way;wp0:HB\M_anupi_ms1泹7 F0 ض#.ѝz׋w;@8~N|+;pVhऊKڙ'migdjpNųO-5I4(+ cqe, TPj~GIl8/􂯒l^`;eKo{bg%^5\dюɲ,d|> 6lzhĕgJ ;jQ?~WAKa)~v=434%k\IRu=5R#vL F%Q-)y-FibY=Wy&Ү1_z۽v3dn)j؀ ]׼6 ggg ʹ{4O"ޒӟ:F^}B}_jxQkPn*O#=L4DOj e{Y0MW*%/p&u[}TCcH0D]Ϻ"U0 Q"Kώa3uNNqd(u@ 'eZ"zn!<$}iEbG|wlba{mV"U)$][^j%d/㽕G\ǖ8r-hc^,.U]Zlw v!8cS\bpd+uL\!p9@Qf^gW2ynG5',KM/=rA9 8(f"b68GCo%IU߇;p"GP< T`Xbl6=9a;bJȑT{p (6$3'5-y'`wvH`%_8]H~xgM*8 a0AkjO K2h#۹bLzPiuyZ s5Z`X>j31rC 銿I>[KQ9*kl׵)DګsH{J&ye쨬)Q+lo8Pg!o\pF%# S}w)ң<,K5l̵|$Wš(m9,}ˠQg Ldc8f 0tK78+Nnx1(7-OYqϬ% Zyv ϘRE,)|;㷅vΎ5zCO=‚n5\ݥd|C1MBW4l:EGWj%Wa}6iZd<69-8gNz37k6b07}ƟPT/Bs5&3nWO ;W򘖲r6=F9vsy+uû(:p ~y3`moVЌ7e=GA7[vHA-R5,)[.<$7X .fAQ:@@PE`;vw)Y,hPIg1f{a-`|x^C"Gԕl p)pMt0e!*!׳viW/^)k%5\vn .`gղtiܷA-?) Tyk]?EY^Z\sĝK*#iH2HFp6xq `ziU]. "4IS#9(:j[&Ig,ƷgM :P039f dO_c$Kpb~Q[E*gqJR˒fD&+=ND̲X0ҋloD 60UUdW=4rMDr=ޭ}0]YdkMX!DK)Dd-b1l\n܌:9)1ӱGbEGQBu O|ot: VW!e'Ty^ 'yZ;y,6}0TxNoDžH^ 찀?6e; D.%GzUhUxxoU3fc|#s$g52TXu3tm,aɜd@c3KabU moI|Ql_ Te[9`²aC]j+La*&fCS*g"=aۙ:p+mk3zCST~eWh`>d#oLlp]`J;ؿ#+>ɩ0$&(+ֳ͈O;aȔ X}G]efHBTv;pU|4J֒s@D.΢lY-8S闀n$5@ xnW_?%.1a6gw4Tͧ7bcAZBk_ u/\4(M>bH#c#O mm-EO-%`FamyP.T4$iE2'GHalU#C ]~j% MMQ"?*je o&EB*̔%j¡zu \uN5j1u=>6#l]c|vFJI~VHM35lZ9gZ[6#Ze.^󢓞E[(QE[-H- _Ye:B;DR}J-mozH)&;N!e%h&%|b78!" Tt]#jO]:py~GR·t8i#{I[p#AKyʋW;uSZ^Aw- K,Ȇ6 "MÝ%'kQr2DП8E|VG}{]JnlF2qr"O}vFȖ&/_V=w3\t"%(ɑ߆7#UwQ,''g:NϠ7?MЦ@4/-ƒMlO@ͿѠq6:UZ\*q#b9;άI($T \ʜԨ6_5dy9p`mrUդV*>|DgE u |y&iT"U?҉ \Hvo|4Eyr7_!1O3lʠܻ̂ 0gK<% R+rvŊb+Z\e&G(H,ߧsrݛ􇉔iͿQsn\HY%md'qso<К槖(5|^HKT uj~4c=ncDpBky/9_a4m\; 2lCӕę`i$TVbNuG Kh~ҒѶmdrbV3]i~_%+ I8j݃ᑶ(ׄV5)RfILEY+4bT`>ʮSW$5YQm@("WQ d%l~tl"iqu-G{q X|4jVϼz<Yo+w4~ <E*3%GI71eQnpjeDa:eVs Lx& Q@}/LMO ?xV~ E)2ȠE9o4]'Zi¾@^6Z%mӲϛkFV: +V}$A6m'6n|E xR Go<@M%{XKhAz>6} rQɽekj0{S_^έ RO,kH0uD;J7ZDHGTO\ yL—Jڻ30t7XBxI"h}FS`-LXgޅ6( gr_ ~&ynO.u_Йe^$u\5 5iS(υ3p*[2b xT:r-|XCզ!~yoUJ3OO$ە~xhAL*4$pcZ%X`׊Z.6y>'~ 7+rkx?~9Ru%Ses$@JC~6$ PkL ]܁C|a;\tfYsm# Ч=6n? Z7O2fOV/?@K2DC_BwO](!kD<}]rȨPA)(+dw̝4_'Q$δI#ɜa?{67i(C] $-yJ|vX(_ j| cn) -o,oJ9To;ŊZ46ZBk,avY%hU-t_-+ M34Gvo>af" q ?[LҸgv_+ w GOY~muU0/1VCQKOxd,WCE3<5meA4%z5N[DWj3u͎WX9-rx:NO EUag6Ws"Y;Z4<g>hC0?(5(a 40D);S:yYڠ⣥{YSR( 4SoE^w k=?9h`ۭ C3` |jqL 4# =e {6Y I_Ɋ 񫵕ZzĦգJ-/~@h[3c*}V.#s2JjdS08X'D)T "#18̃ $Q&0 ͫ o%VtZ= nBq~rp +('K4tQH/ܭz??=b G(tK/ns [6^[=Оx[gp0qFإr v}hB+e-Z?sfʱ6V+3u*!ɑ4mbPegO6~3;KEbG]i?gm9uI~"f7qa+*}b&SdӷkjzMU?5H 湥%؍x`H_j+^#ܮdFWC/qHg8n}ligc 6S`u&+t}/n%+_ <)tWKVw3$w^G @ VUP|,#jwT 'l`zdxyGwmw-1$pB6b9|m&:9`%'z4ěh!O{ҙ-n(ҖhM6SQX͞6Gɳ9Dִ!n `w2FNU@{NS}XT{Dư"atw;+ٶBH3yy[$Er:w\gtbRxsLϬݹɬ"6'eQ?Lˠtfӻ"x(lP/FV;C5qm2Xm?B6Áoc5d"? .Qۃҍ[,N}IƤ%eRzN\gQc1~wn ξ/cr«-p>;q|5]`G}ґ lA&z'4 d7Y5=-aK*%߰ĭPgMw|C֓j hUr, lu,*ӄ~\%GÊ +etDc 欮GE~ۮq4`L_;e}L)6x'!6d yw1a؝^o`Q 9Gt9UE$N7RxRFkB0W?r t݆ 9TA0d`IǕ[>P#9F/t80)T<nΗK:ch޺lbs2G`Pnvx3ݲC;!7*N +YZ ri4Z< H)Y/XܻVcmI]Ϭu^_2h/(?Q+25PN֖uOA*Gi@IEew\"9+l&+m”}^AWZ( wn em{S 6iĪ ,MAP9bɫ!Zyǹ@(Mf)ZbRQw.T%mv񗎷UD]d|/J=cǁ/QRRjDQ 炩VR{[^Ҵڱd4c-/IAĈ0Ğ~z,IPHi'*Kjck7G|x T:rL>$ih]N4Q^62=2kGTlXӱy.UGd`?NjMA:B`Gf'*nItc/wdyG<g@. t?&GU- \bD e<8XYt˴S\ ! pY5"͝ 祗ɏTO8FC- ־5HCĔg>("А- e8e ӏ=AV)cDimC= )SKoHzlcA0kߍCRb(BQ0%3ުAf_f? .Cm.M:h_ܙ1)WvyWZAl3vaD;77,xEFQ~ A5fZ@]eWзr1\ZN25~0UYq`-5]_^`N3r8e.[fj>mQ\lS8ESSI H |_7WÊe ( E+ )4*pE r}F3ҦMfG7iT Aeº,dYs١_j;{}} XcҦNW*{pG/ͫM}bje`ue,4+.2L05k e6!tjRpAHxUJ?pX]b,P$z$* Esġal'c95Ky dH3ƌ&X" TaɃO 2&zfq!2n->6%A";ԑdz&kL C,Vi`(MH1|&c o^Y [?C=1Iuu&cV{k{ubU(Aj5b4*y`QE ?~`3_bqzdxA#[d딺 aج? G¾kS[XPolVjzao0HENoj BŇ6bXL`!B !YG/c0:Nxq@8E;-9ʊRg /4%v 9v©8qɜ=CqB b%UfMיj;| /ՠ*T(: TUL9%;2 "pu 7yi^NR DVLS/[>efD8{\n%`Ba ñ&kO^W{rxOT 2hqnpTհlIDsW+){wЎk˳lFQ4#{R?j9y9*/xԕϥm{*nۥ ~;,iIֵȽ^v~z{z% A@8B."j"rQJ&Ӷvo}=âaIp\f霄MBu耋kwxM/P_H7;DzloS X-|3e 3 ("3\$ ˪)egb CEe]|Y~$*4\[=.kZW> z[纸;4Lɥ_t-=gt#-V ΟqMtwYV5g]a+ .Bn ~ ܖ沑fbچ7hHY#"f~> d⨱(Zec,S7 w> 3|۩ R$\Iߚ5Ы `%L oKHpMdZR-X!~IhT[€G]-*Vsi.aKEyDY6M[q^6QִF 6m~gN^I:ebwlwb'lW}xׂqAM&? 'jo/>(wi PCPpEu|$߉|f3d .r/a*әLmDnE~N7@LiⳊ;O$F4cݝ_ aH25}]!׎kwy E,) Qc:?uFt`/6fgDkZ)3s%s:u[y?$%{9rQw҇w5\o}'+MQ_UΨL.X m_kSNhT\Lg̜P@·Oµx0KWQ^Av`8RZXӰ.U:քq߿M~J3Bx)W? dOq(a;xۺ?Ïfk @^s{'ie4s;k`z캳9KbCxd.֥^/f pV}(Ԛ7ԭ[笾~,\@qkpiٹ)](G-~IXpL[Q&gB7B\߄2 I/ARX$gtzDu#B, Ԩ+T1@ Rl\Plq^A1[=3Te,G#,M8tڬBٿa^7e *`B gGdv)wӱ2q d]z uC[[0d6tWlX"H>A)fJXyD!O yf  "^O#Y0zD2hx&fqoÇ *$#eAdŐӇfɫgI!j];߬)`Kd k lϵKVu#n#LQv45C()5]6Z6b7"t44cffE33ЭJ.R?|a8 gDHs]mq^gDsBpbUnoh&\Wc~ǟb0~&/MAj?~^Yy]bU7夝g#پc#Zj;4W:3Lג8xcr&CM}!21S<:)67(R:aQU)3Y0|k)O`UϸMscH'hbgI!@.gs *·h^/i5sH: ֻ} rn$Q|~ZO/%JjRH/U$kpqBS@6@QAWIM{c3'%׋~Y܇i7d >ϛ N p"9y\nTJ{`䍗@2ddaÝIǾ0jz}JԎs2 FSBS0RvHHHjUN/TOC3 %9enDLkM`.+rpO$cRM !0"~lOeQp EX6+c;O,pxLE鐖tpCKBQH(Q4 rBv,uÌWlVPi!fMU6sk\\7F'w}vmFQ6jbCwA-}jOo(*iܪ($v׷`D\[8ewt8Pgp/14f;NG~0eo'b+t:yK-s|@uʠ& [DK|ON.9Jo_lir7PV+?qiԈvٚFqt<*[8 1Pt1ژ|Ӑp4Si sGkŢ`f-!.W浰s5Ecs~  @ÓZC4-# %ָ.Cn9coĢ`\,QeThRM!fY?/t*KtD}\z&$h>`1Kedzo~%hǝ.^=MrD ?/~DM_Xb7fo!f6+cbD"J)FXjPis'bhҗ] XX)T澋 ޒgi &=M\2fyYC6%huhc^ 0K==^+l}1v'ZW3NN OVI.Q݀ضU1%XqߴFL4DagCD lK6 lT; Q k4 :v ‡si};Q"[fன j8k]&,v r'- S@߯X j K9ΙhZ;Yt\x[Ap9NE (=L7#UfFrV4hNIq)ybs`S2J:ͼǿLJ@^}1X:DWChoҗXvw\M[wIR!|ZUUpġb-&c3 }련OrK6H *(9&L\bN& Q@!i]1=mSs%1œrt)WgX }AB=/W0:(e3nC/M /4ȪFc λ(-5Wle nr@9:(h{>%VOHU4x(&z8Ij}qQa sdzOxC}|2Ǵp.Hl2`!uJd'ڴ 3dGed`)i)*w#":{h׾J"+%Y?!_ͤEpҦ%qOr~ap7n7ܿke;FR~5 m7_U|EAh5N5K 48ȳ$%\y(S~C$5M[N VefLt$9mK̃d?8Ԕ%s6X2}^q(63f* PZ,a- 4v.^a #sfZ;&ٺ->'6)dK!C #!['dٻ ҄h5421F;%"LZ9rRr u=$Rܡ.w3]uɛK-1/8-ANw_eS:,g5H8ܢBdb䀺0/DѿPT&_\b<&",qHfpUL94xY ؐ[ --RXkTڧxZg 6dp]JN߼#Gb+[I:i{Sҋwq!buvatc=hHx^'}A$K^E ^(T a|!Z"5U%Sͩjb=0vS vL6Q9آesF{"({Hs)Å rZ$Otsg1ğdt$6 KS=*Jg Dh o_p`"<9J |NT\sB̎<#~?Fh\ڡ 2e AM8+E:Qe_bشmcY}W5uQ;TA&],ñWgt?2MU2rrc3_WqG;9=tZ$C`MxCĂ] 6#|1$0}޿rxyM7·URi {7MJ Fh]bCꟀ @n1DJ@6W:({!'BT~y4tI5k`F )AVP=,`Ň7V{6S,m5YYPʥYu$nC#YTf^.Às!xAۑ籥jPQúƊf wZۧa8yc\s؀ӛHC_g[TUYbl& tF =NS҈TۍD0Ib[m{om"waKO`Òѐ@")Z5{h@C;3ڎ-aOAOWۇ^!qb,AF(r k & HZ\>38|NfJA?.%_*l3K0 䉮:88\/pӘ .Z5_f dv0ӗ!yA;nێwZ>أ=#L4A_[:z̴llf#V[l5 Mbt?j?"SkM4e'BCKH:C%C%]ݛ WY4 o )JN * vs%x=#^#b߽ndve%tr/j,M`C:A3 C*AiT>`6%[.N/.͇̋^TEh7]dX_ƄN?̓-l-(͓ke!B7AVrĪpYi%#X}´a5@ f--K.7")x5:gIt>Ytcv:G'BjDҡc!g ՇVʏ2CHdҟPY 1g ,i7rtiΖ ܠ)Pu]īW~9ّ#HհS`9$O};s%\ȓWv93smƯf : #_)Lw Fiȹr. fY08 .9liK/_[oT*LDjsyw)hj$G=ɨ@6L]p\2g}m(u[U-F8tUIB!i084tv  O`fT\DQa~ӍΣ01w!{QajQ%d^:-'g`$s峢˿&EbTpΜR\RtQ׫2U"R .k$8* )|!(\Bj>ˁ1ȄX2&n7AqG "I˪Sijnu~{ƺtcr m} VO4eČcOĖrјbi!P}9 ¸^L]k2!W|Rʿ(S)8&Z }Z `g)%(UN9# kBL 격0!E!@v ţj`MQmM(0(2ez(l F:/xYn3ZGFu}1mt3$М*ֳcGx pxjͧߞលw.g8-Kms  -y5ŠsyM댐=ږ%Uv5Qw3fߧ& &$1&_Rmڢ͟S.n@Vh0ǡ*N!]O;#8ТYreJ9M =pXV#(kEѦt$xsذ X53*lZCg[:(q冸-Ap5{"zTBrRCea֑!۰1O 2Ե\-{9j'>8߭ݺhAUAPu=KшL0VEq,ຢyEg`ogM]'*[z !MXYs1f^Xs˔8Yh4\hV Abf]a[x5*i}l2(1>H?Zu[ <4gMe709V\EwQ3)o)x}8$RLM5}vqT+(NA_MwR5 9Im3^ze뚥v! o&"  {F!8F2,:G7,C܋K_za8P?CCW@ E7t괽DW`z<#r\P(`V~#AvMUmF0?$|n&NZO=쇭њ6?>5{sF%79= Bb'Dwc hH3{1)!aMޛ/솵 _߭1=ogiύAj腛lTf~oV:&qJjc~!m 0HAqQŔMbƖAu l\?(JS&8 %K7%{r=kĊˍP 7SLv:_VS+@": 9 2>AB|VRU:'WQVj~ 'QQd)gy<^+-;' ^*(Q%8 6H^v%ؠJPV /S;Dž|~mZi,DXL8j'Ĺiw)(,ܨ؟^r 0 s}Q4z 3V6]5Igm´{ 5ȩ)UzSj0uKz x,f.Q+3 9{/F32o:M=ۥs?%ET5.MZ>ۤARx %C5ۊvR\W``"R!w3N[-Le{Ry|,.cH9Yg&9GʄpӠԦU }ģ6]4;d{= % %sM,l^ ]Y4M~N44w# j' r׊>x BU07aΠw{EBwFS!Ŝ.ac"؋n*/KWAq'>rUJZH=fZjx7'g(ף~>εa9CF2v' o=IrhO1(L~BryE\N5 o_!X(MM}LmzDNMZOZRW !]BPX :߅^#f~=B5kNMlݑcRO&#5A -g~R(@TJ O2Y R[KEZ܎!>R4,|aǨǾ YP3 i)GEw ̎3$tuR2$M ‰|-A aKvG]ݷrl&o :;J|}98"-E\c_$2]?FM+})u`ntv4}{AY xh~j+S+HyBs.g=7jfnj[큻8-  C}kvh3R# ^xSM04Pgk0"ȋ BT{~c غuEn 74VNj5澼ViH SqlqtבϺN[ED`3ĜURg- 6GCc(׎67ae܌?-$x:SI}$Q&X6jܓ#cTSƈQUiGQ / x 73 JK(_n'zJK*ףGB_73IMMCp*9I)7K ݍ|fѮsMfY.fˋ`W FK߂״<^N\0(BCP"?B|Z'ط@L+jq`h <& =zQ+4K9MS)~6}TL_rߖ)b8][Df30WF{{!..D=)E ⚁.<.]G!jY4Y%ŧͮWq^[{1ky2)KqחJ-ߘB$48wwgP5"u 8o齀ٵDӯO_V6ZwO 0;l*;=JVJi{uQ (|Vo4lsF"lЀ%Ƣ-}>K|[A`Ӷ' TaZTm?fr]Ъ$H@P-Ni!*c o*!Ei.Ӓ_(1 8(5\7G$"4dy"=0-k3Z}t҈|⢼,-5GK4q$&M~`|Q3LO ԙ~bDJzh +:ɿlW=gnHN>d-1rhsxė@Jj} qVY`0oIUX$RpE$圷7a]}4H)m#+ A%fK5Gbĩa?xt(8pKk2^>rm&uTfPβnSyCg3zh`8+[6'v~{HܱR1EOӺ0nlVhؖ\El9LH}`Y^w?Fyq%JQZt᧤Y@>W>UbG3ZL(z8ň[\Fxu!y*vgȄAb%lkdK ]Tjy J椗ƶdቅ@m ޟ49a`?GO&Aq=P%S3GĐΝDQ$TOЭj"|7Vžsh!2ߴ`uĐF(i[Ti(stXFxok8N܈iɘw梙SGNˣ9V:tGK(0Y{;1LCS3B◫(XqUahپ|֍rJ2c!$E9obs(Fˈ ;D axQpX4 .[$dHnl̤*;3p}JrqA+6%fd:8BC .tF`{udKDƣyHd\oS X?\i^rx.6?K'p(T9C!Zt`6irѮ{V[VC 6s{>% F0BSvj-رko`?C"¼x4a1:D -gPH#/&bLw0~P+XQq`ּ &>SӐiT-wm4;MXݞB򂹙תa2miހ8|F2U,?$(O+.u*{xӁڵ$á8>ػX-)ϜFVg@ˡk.j jR1wf@,9L˵:%JeMtyz%Ŕ 6~^QC܈dH[#JR~ǥ0^ux ROF} ɦcOSKs b?=~00ѧ12:v-+`X߼Q%l, 毕eVKTի>:ե 4Dï~l(PCQ`&zDDo99uԂV>Kd_| [s H"Q6$ ߁e 0XE',{UCމ7)#D3 ez{ 8pN˰4 [LyC/w^J|;5dRJ!'=]!\f`Ϡ V};ɽbֲa3jQnK->x 񼝵/ey5_Gc}h#OAʞ6\QLsy|[6u b.L1xA7yZwelP-w)_f5Iw r[F 9_BJJ Yĸƙw$f,MJŜ+[|?9J8jb 5r6=qoy5B%a]7"=$Q[|QrA}%H=R?׃ڗ|XB8dz!&35-;w*h.Zj8RM&/3~JR6Qsa~K$ J%CS31ѧ5ؒ6E:#s?.S|9{EǾ,k (t;Taa@Vԁ^/KU/%|G:IG6MhBKyHL H;Arfn^_[b2sOA#NkyVv;qЮR]pGE/ҫM9乽H_M Vcd\_zg>kI*_Am$YW'K֘릀Ĉ x8z%g$)C@}vH SٴjO}-/󾴜0tt.EdGaȰ`R>5,)V.(xSm%cVvȹ$1~p൏@DƆ0#¨v3?qʣx ׽K*# sM n*HG u4#,c :rrRzM)d P5q.kR`KMb$%I;l!xDRg_֗3c\X]'a)|?Qv=>aw7"KmsHNqøfF#+ؐ g] 3Zu`aY=9:/{'(\Cdxߎ6f'.޷'!˛y~;] ͍Ȕnn׾vr׿@YͣPC#C4F>d!$orm[y4YNR8x1n%L*sÐ&ٌ@bA6ށI@nJP+UOx4C9*CMJkG!w6K!%#M<'5m_XHa F`j.6)!x#:>`P+΃3_ 귷UVP%GEz7< ~e-+P?d!//:7z>ݰFU.jܼ񁯌_u-&Tm Hy9xwLV݁ZnX̴Km9eEBO; 7C̟ Oy.'^1Rt"hkƤ鄒^t=xA7XՓ;D#Ku¶؊露>Wa:tAO]~^Xt8lrkڋ% >>`@'UN6}4aKd.3qt*v<#nL1ۮԦW֥KpW5n|Ͽ 1f_CM!`ԱcZv@bq&u4_>uQ}(2MjI0լL ;[L_;,打'3^)ѽ@C:( p%L<&xiG{2֑z/SQ=7)X$ Tp+@H eHPasUHDl1I&Ϣ,d@wqx8~{ @k]sLEV+i6'=ﵻ](Bg)>X k#pа]`NBjŊ(vsRzN2w'}G?Co2{Ul6hH'qle!^fJ9ѻP4d2Hy=k/ o\()JܬԀ3ඇ$k*YrG!&1J %6UQqhsRl>4Uj# YvRCa)ݼAZdzCjxrnDSp^`Y/$ipiA4[.҄9=tDeHOcm}SUM\Cq(n@a :F u۸r,q_0 h#L0ں lE5F1Lz6ѫrߣFFȌ? +2b9Q'&U{cU#RdH'1{&ΐ8l!Czg]HM1 ڧ֦~.( Bݏk:eC{?6[{h +.l"4r ;rĨi3'|ĸֻ)aJmWB6qYAQN^ï)zpSw yK_eMepCqhQ_+1Oa68B0ED 3*d]#u mnXI"S)\T?3n8ZvuZ&*\)Q M L Od`A{fmZ-٨m :Wi 4rfS><edm0D*:}}VటL%y`XA'9t*͗^ؓEoT6Yp]Q#4%=m+aP"#U>qϸf5 O(d۠!g"(Wee q**NqPG/uQPAQ5JZKYp@Q.NŸuq;"B]B FܝAG ${h ?R+n2ҘUsG[-G)aë" ^4b]R% ٛ+3Rg ԿC7G+`JS%1DuݐL.(*ȝY st%"]ؑ=z6XɑDCyl#i)[Aa=E 3(IަUzmqEhrE: / !nhM?\5G7[W Kfgf aq2H9yEcCLB D2?q!Sr&zޖAx)ԩlg\3 )2K%kFgm.'Y9@l+MhQ%?xc=Wdm\pc|nzSm f^\Qk5qM9ADfd$5't XT-k漡˫5Cxǭ9"Zt0WE{.Z/ٚtm%)tTƲ CZM:b\W)e}lzV%:6kG9nYOXH;|?@7ԲKBi7䊈utEKmF*;jZ_ Y^AWjՇߴY( 4BK:rcmYa9W#~>=j˶m}'~!3ŸӼK4fW<II^ͦX VRRGS">%-ݢڌ{m|\{(l?}N>oڍ2[V*`bF d;T~^Hh;Bړѩh#a'_tF8(o'@7aZQ>i \CK, :q(1\ :]u4("an^p[OaoR% 5N LPI-&x=wK_!אudHBl`Cw_H>ęnBjb=mR Az~IT7A;%yADE03 J)Se*XH#+\WxPZ1 v0U]k~~^dVr90@c <%@r(:oT$0P &5+.FA<) lyEWjJcErB`uG$N՚: Qa2d=~jq.3^JV[M6Bt @b[se$<$# i:CimVkA%ry]a$i\ʠ.476̔TMA[,j$>l/O5.f?ƌ%+FF"jjK<5#Ė|DžRϋFƕ̤oX$}-ܫS\ qfړm[](f]tZї"1fMVQ!u+GT:z-'ҖH~*SRڣR=X%`ӛ̈́m~Z4?q qz(Q%3xfBK&I$c D 0-wXRdKEr_d1`O߰Z#\,= a_P9ԄEH4||^ aq9ЌOYBƼ㧳Z].]ФuȟHpL(37o[<ߛ"̊pksWsX%&rUN#Feȭm áDMoZRͶFN "rhLwVe] v.QfFe7I,:T? :v[DVs)*\v';mu4(,T0g]DB w9i]9tE#GOly:A heeӺ|bn* 5ΏNn!YBnlgQtdݙϒzii*}Nl'm*# ;]m](&ۈٱyZdd .̢J? W >اڢ+ QIBYu֗&+ؗy _1\1C)z2h7-7^%m{%ܗ23Θj,KFq\ݬPUm5=% }MEA`՜?v=#ih=2vUGnſCspp4oq; e.}W.Q**!M K>7/eH[Ԁ:X?ҟҞgA/'/D-j ѱB|?1Ⱥ̬Ӧ.MMH;j\e-}GW)YބZC"b"ku̱E4RߓPt6i/=oU̿ڒTR&s!ri.b4 .ܳ|i rP[|YyFk ˘`OGdY?_S+WzD96E~RHh9UWsϕ$.T \|@[u?9~ƶqb ,2p^~4*-K4mvY@n.i!~̫:x4?փ*zHA\ja# 7dLЗey{{&MRe*>yeE lk1ڣh{R Et&F7t^~{devOmuޘfiU91$pI|{\ 7|tcOW0u\:԰G]-=/!ٍf le%KJ Xl?`pA#z)o1ݜĉmK$-~K0LlH5CV5i6<{(r2O{ *4!ᶫ?`gK…Uk`6D$ 6:.C5C\f?Be5^Y@`up݈\p! x 6Gs]l7*Z$}+Gɳ"ZTNc7>{,\M'z Plo,?e,źP),Q#OΌbp>/;yOO|]ڨ|v{~sF}΅ ؀1tIǠN\?Ouǂ 2պZ0f]S`gDɞoq,>C5M2b_d;] ɓpoB!Z5Hj{*>7 nɁ4&fՍtq F} 7oI:e3<ճ, TW*oG}/Wi&f&?F||z"d~937XH B1i PsO[-)puhM! ĺlczp0.!j)©wV}@Z#W[(B'6ϷʇB.~ϨaapA*k9IZ7[)3 TfMSpqsg4ENQ5#$ESj V?lq:?~3J)#~&g-13 ԜaC~sJu͓覙N} .0r W:8Lvm$E՛hy]bTmVKo){4NCBy<}% zy,0R5_`,b$Y3_5ºԪNVUv0zױǃя제) ղēopْB7AaK?T9^|h"zx m[r+r ofrXFDQ;OΕN6$u)IvJI?gЅJS[ƪhy 7]P+ }ٌ)_cU`$ς6c>[=MXp.#D4(%D^ xtM"\O\џ}0t3͞8$4,f:47 KұmZ-虄f.J@Ξd7IVW^Zl=ܾ!AshsǂXW7@ M _Q{(yc`~M&X:teK-Ѭxz{vu|Ykh">gk).~'e'o摠֑uQ1xDn8_Atfr@`mz@f_lO1رs@,%ά"5}/!mO Lh^vvhTF K+Նs!̈ /ނvr 9lJX( RL[(Vz)`wmW{Yw3X!)bX3{GVハ CNSMD͝u2[XsNu$8ۤ MnKܞx EN$ KHH^AwQqH(ۂWuJ6'EE:.!OOHC7kحG"Oބz@HL}t η6 y!f8ht=CR]T&?uvd\Tp7\zZ !,q 9$5m\0_/>f@W|#00C QE3E ohK1oLB*58CҒF@PA2e*.X$%5It˼";ΩcIB?$Ҵo3sJ~|zMK"sD*}y '[eH'U.j:t#jۖ


zADPT\"̅rQl]r:%qn/;M,ۈ ҌH<.ܛ#f[XwUV2=_ 0MR(H(bJ "o^#Qa((0jHnZ#Uv/r!#M;uj5PAw&S|H \/c3@>X䴔jk}w3W* 5BD0rޟ (0^g@%C37U3 o%Q0pO|ml^Mٸ烹VW/ ]csph[zL а [;3W"kld3;졊; *N%9ٛNrNȹ[?.LP:xjiS`~E;PV*wq]p>訤=YeԪ/6eJӜ\dBz[֫7%T6왩>>au l5u̟T2w+%K^pwД cASGdh7PF xY'k% ^C4(A)țS%S殈1jJ#0_Qfs=.j f h#FEqPc~bp灇2Ԙm/J ;غ0)ѢVx7}W\sB},,a6xL裑 ŒXYβ9:[U$3]>U$:7)m$miEV"eM^K|ηIQTi6|CpDX3sLD}k6䛺/lBttиqv0MڭVdB0=G88?"nXU8sR*-5bJl[^hU[iGI$;L,u$> |Ǜ>RP6|VtBFs{{)~aoCN{IICВ8kCٞmp?]^3Z]VuK+=ZrC䧓Hn 425AȅHi7Śut%Elo0J Ɔ2d}]994=ŀ">'&Pxi !^eUQ\,"t?Ï㧐[N'Bœb X*YuesTUi3 l E,TI&΄iEvWab_лd!l+y"LA4!ۡ8\۩㢥o#1aո"T& Vmfjc09f݅MÀ^q@"{Չ[~ήҎD©'?l9hG]m1R 3!K6%nT310+6HHCo3oIHaA[SD (ch!\:u W: EaL ~8\׌W9dD}:*Ayb`JfDŽ ٵcdLs}!]K@<2׿Jzӽܑf$twVg˳OHh .6:K&x:ǎEs(uL}? u(&)B*#vsUo12oV\Ptg)ܶ;t*'UF"L|74I{qQGvvgIY`eO:N )90ҧEfasݝ^֫~6 bfu-Jmekby P&fNao9 AreGV Aqt4!N)JsQ-\Z/!PV o1:>'" yV^|@s}∑D w¥o.F4ORN96fR&ʧTHv%m(8cDIqìWnDOxq'k v_#Nz_@?|A&d7_n!kcU e~'j8eGdx'I!F%͸&iP2e@\oo#w.a ˃"3ѥNrPRL~PS ' J|0/ eTټ5$3:tC D9$ZGޛF_ݳ6T3 8_ub' ~?% D/@w{YO雋s3"`!4!v0+IVXiy'S*PqzS:Ŀ@j1_4b٤O!bELJԦ|O$Ǵ<Ȣ/EOL[X nl ^y(vLL,/(ReUMQ|<ho:4gk5=tL&!dv\]X,?Z%9m/SxU.8iNj=MڗlhKJ׸`FǺojMij@{Fd)J0)D>-'Ig MQGZn2? k&йrYֆ2s$4NM{53| m>RvR5 p\;o)Y<̓=!c0 civ^1mu{3, =͕)ECmށ]v7kPu~+xͰ9h/3mU:=]fk5bVMvjsD>4X>fUVzu.r5.$o"ZonHIx.._۹ZWC]P6CX ?0:7F!3R8WPN9&f|~52YqVsA2?^FPdFWDLӘ35d-ûNS<wzyay `Nj?`QJȂJT|gB9C+hl/M=z@,鲉j:YO38'W2a2iq?V 38|_dycPRVˆzp&vh(=gЇjj{v1QP&b:IŌaKq8_!WP`w tNZ]^pv%`)w ^dSDzzEԔ݊;gw7EnCcڐm"j`oSSDK7j ~,qHL\*5M\pR*nCLg@?bįC&eĹ'%(x K @Ct2.5EǔI*m3Q-ΔgFo {D@:mXԎNp,ܷTN\\V;5;q*VzH1 ӪtG &,@juf'31bV@M4oh:Ef4E(^f nM`ɸ~IO%3 O}D"{|Pu KBipSϦ`xX oi|\eL:c7#}Zb X-me찒$X (Рf ^œNXH7txR/Xo?ӕ 9R4,Sʨߕzh&zg{k3Rb^!HnXE `gWlGofӸ_OrYKrkYg-g fB򔀡#=V8";,P3lT*$66^.(H#nl(!Vq8m+Q3{]`G"$6Q٭ތ3m4J QSL$-=^twE"V./g_ꎀ8BJpG&eN^gR.jX]띺Á 2҃/#;-L0J䞶 8i i 7k]TM;Ft% F[GG"=0i=HԆlԏ{굄[:#9q^x;)PP^nzv~#j7G?L : h$.ꦘeJx~፴n.jsRn[t g?3~P^@w+^c7ag(O.Ĉr}v T+-YVƌddGpŁI6\syXI87zI>(Mf:UQLtԈHRl^u:Q w$"/\l [#d#?r 75 1'ZU{H:~Em>%Dm2*$0tR z A\?DnvͶ^wWh/G_l.$~uKbZ8c^֤@ Y>3jN 讥a.߲ Tɾu %V4~f։*%uB>I*p8!t{wNM!(dJ=m SGzrKú^VO=dT?-baKu@͏5&yUXF Io<* #Do3+=2*|$&*֔-V8ʶ_,8jF1D/)l~?t:vue#tOI*"K|~̲t 񉨩]+ _Z1E`fyv|xxϟ -}dyDQǗG(_nE5Ԗ5zNrRdJYhm`"56`J"S|yeE?6KyXI^׍ E3kJx[[Y6hP[n䏹Īl8BrG C&_4z3qH< q>DrKipɼǯ?bˆ>6ơ8%`W*x@یmsjW5?lGoTK1*Ju^:<A˟GxW ^x>u1,օLGc\]ط*W_GG}UZ'O $Bg^=ߊPPyۂ\؏fpTgL2'Qܻd D $G5'Yf_2Z!T 9LViT9k뼗ZEimAE*6JGV|qƔzp1{嬡C_--C茥{ [={/@wɲ]N/xz'@sEP*KpzSڠuSI".E(CbV^E?MXUS@X Jߏ^0#fUf%)VDٳq oe:SYS7b.Κ;jzlnG_> \۾whEwWt]."u24Ptܥ$OL}ANpp36oKY2ٜ7*wj!w* <A6OV C$ŸA#5U0D%ΣE}1Zdr*\YNԝZa1Ø'w1Eq:cuBS35jjn2Sz'/NTdb Dɴ}ɞ%%DGc) ?woM="#(TVl&GCio xknn"1|&t#3=GkjF]UkQI0tic$yE25+!9[Di 8E?vQ@ɁD^Q=JBezNz_DGC/95hu.ˤ^b^jC[QE 1ߐ_uc<~pG|ă֛E`v3C@Vxa:#;t%j֋JEH%KrrdBy ev7{,➍Ao 5=i.\~|]!omƷ$PXԅcW蜑7YdB._ͦ-tTXokx92ڈ_=~I;U6Q Ϳe}GNg}Fb=]zfayyJ`OW|x6И?QܼQ04\mT s˪q Uu+PҲ{9Y(7ڷ"75bMk m!k8ӄE>l%A C>X ʷ7j<2ԍ8m 6h;=Բa"޻5~(ϒVhۃX) j++m_r擞%z &g4k[0Ja겨"xYSVi|${RY:*J5HqIK5i4w{™01y~y- xhQ=3e"&u%pUT"6v,,97hA:z&%Ʃ@+zesipq[XYl'm-k0liN%U!"b8!t/Q3Kl/)WRmni$b1gprߍCs^X1B0;7:#TcXD|L%~ym9e?E7 չjx[ӧeY>$&€雿ć.|@.wluˡR8b gʔEU#^Tr3{G{V$#vkDoYN < 14,Rh+Bme:&4z+ dϷZd5tA窯7nUDYW X+'ooq~nwd„? nƪ.|H^лv'*^'`a7AZ-承evoa]w |z1:e9x!6s%w)7dV;/\QRE@(o>F:&.lO^w yň5AmDl|Z%Jösmm/+BHV^EF{{B æeѿ^<v.bꭱ4mΡFj/)dח3]LuOG%:J00y[yv [/nkdkH7 iXTeo(4eD׳Ih^xɯaIiDk!Us>)%EX@'yݿb>#$ ]tzTp`o-w-?TۉHlڼa cQ_G'62At`zQ0@'~qu}.k2"~Mڝ@PA[ U+c.dWH3L}h0$_s>̫hx^ϻַf "1LCr晋!FUp7dmY\:ft(5rJJbZ۾)2UD/6)Fì"QDXb:{wc_.q\6 m\pbiNgqvAYpՌ:j sfjhπɒ#n"lWu|L:>FP~tOӔC&5; cZyy =k$j|gU;G٢-l_qe&Qn>9/5D=$>?k`=B7[T0w"TILX{|u1}/uy"u1'F R 'PҞ]6<ܠ0ҵY޻|tOw۷9`~CElbA@BSհQi -^fyϓݡma1! MY&"|{&x8^v2]&\#ݔ^&D4 ><^w.N⶧f i᰽N^{<]$z@0<6 b,Ayq!D8,p(} +0hZK >ɶ-V\oz/M*BϤ`p!}d 0$wÔ3˜R떯X<5E [*=wsCXyҊ˕X Q9 ׄ8& B+,%/7#u7E? Hae_Gq}Y:axD5^rbbD́!kw/OnevObDRͥEr\_U5\/>σ,,'N0qhmdk6']`6ҢԔXD6 w9Hfɜs$(#|E ~un(wCS{Ig̺tOHӞ0nUZ3hٔpR|DdE<q.rzʱ~Q{!Waݮȣ J(7oƒMh0&ȲߺZ%!eY|0={bN|]2Y:oNLnuC0kC[CV>Iߟw79~ ){djoԬQ/`xa }p4|^A!T-'VJ{'-*RY># z^[9^Ue/A/ xV ALvhS -w~;/"ζITԳDDTU>`dĿ%r_gj ɢ̅U;U\/?Qr$͍XB,81,jqPLmD 18ZG#ȶeX\ "UW+i> ?FK#EhQ%`|Gc9/x}i7%(R&pV#4sORWj M 4#:_"=IMMq#"_Yv7?Ֆâbr#Nv0gm:zr ":T=hXLg&o}~^dX ɍ{ :{>!u"|xb"4P}D:w9 H^`&mii1-&=ȿxM*@j 2< nS@HIZ<|ٶhӢ[HP qk ?وhv ['N8j9Vގ2A,:#LO Ɛڛ?΅ .ۉ51;SC #"R 6/`{/ggq4&o8H[6?HڄGg_\̭A6hdYyRFd ꒹BbwVDJ߆""ۖ{OZV rus|; (S9 煝HYdag0*>o ak)iA'hy}6Q(hg+GhœR>GI#\P?fFњg= ~^\ixxztqC!STd =U&Tg-醹5i 'n`g0R}މzRy(a cA?C6%Y7o:@7>VZj1Vd>97VÑt1#,}:h]-fp1Rl gHQP|_k@#Lqk_cpOU?Ъ7Kp]ISP\nӤpdِH2/Ql51{gL+: Ik`13[8׫?OrҘj)*w9 }p[Zc74W׼UNcCZ)ُ֡;ؾ_&N6GWs;ڟ_p9D_X Q?;4٪ @[aQq6) P%ݍ#_$ miD7MoH]IuZ _FPES(ۤtYTCW4#w4F7Tur`φɛλn︁S0:3'6CFƶtI8D8CňEuBJ\|8zZN]VRkh6qy=JkrC(yې}n6,U=1ׂˋE'#/8ݖqݟ_Y xj|a BQ?#*_\֛PJ3t]u?2a`ݒh੫H 'E/_5;"LUdr[biCxK4"Sw e5+=E)~NB4D`Bxѩ!I!Xтͫ22VH6(S?OH=O2.<##Keq0bFm" #` 5ֵ@ǤDJVkK<֣v 2U,g✧{)tOGف4OqTeж*fn :.OԻa~m#?vRgsErwst)JEdRffiANH,%#w QI nbr+~[?C& Ml']!^|sۅ ;5(DPpG%os-дO*+#Ħ\ u^p6 i^Vu6jSR{r݇fw⿭=mZ@nZHDjsUޘ)@ x顼jĊԦ~!8xA3`ǹrЭ QT3z^5iXr)|փLQL`nM@]X8>,<[ 3J/@n۱)ۑ+{QÂo݇ܠ&CdWsJ8^m>5"3 8չ\L.¨qoӶgj?eg#Ayr,n+emR>TUK70ևLP,m>3ضnF01Ts\3hS5U7aEhgOr U|I7غʤql3];dl=o=3hz$Ìװvn|]+4Q͠f.FO5UFdWemڴl!ۥ^gr mOO-$Qcj+3㧞ܬ'38fpDaZY`Ds.Aew`k 0l ECkj ^P4w 5N- 0jo 'rkԾ,(䷪K8k$ئh;y6bzN %15xCרtV6rE6x}` 9$F7dn {un(Lpǝ_N!D4&I#q 8n Wm1Kٟj_YpYgqthTY7% vh.*G#i5D)fLJEPߧ 73$`Wc`7AnI~ԅ`}**^YF[L5@\{M ⸞ 7̝ }ްQ}gœMb}ֻN-(+|t KY@>&5IϳAxapL7tw0'HR)^E\F'h$b(`S<-V~J|m:l?=B}g$$[R{&Cu v+AP3cw!җ`io􆢁S]K{G?nF; iGPP}y_'X\$ԶCdU"dK,c~v"R=cD\'Gz('ܚ(D~X:Ko^ :ۆ=. nyFk˹S*A]9t1Ik1@ŋBlm  HkC^mb,Ya**%C|1+]~LI _1Ad',~(òQ&eRYkA܍'}5"-jLܽZqqiwo2%Bho}~.yΔ02R::,ybJ~}G&uK tN37믏Upi0쁷䧤߼I޶ن53m+Z}U?2!Tfdہj&YSDu~4ȯI 7˶~ ɕV"a ˜af 7ɆhCF`7ud+LYÊŖq1AawEO8<dSZwzp5t$YiQLEPч&N=NQ­XUU,Ei z^hddhZFCԩ׸B$F*DIGtu)6ki(څN$gqaو$Fl'.1JI'·p&QKǨ?Wl;'Gz-]{87Yp?ȲS<1 Lr7N8u:gn_97}LZ!fྠVb<O<*Y10 q59Rς 3=r~Bs_ggRw{h//mw8p î;t]#]_@g+4퐹 XԨS 6/Q ɋ~D֦5(:y: f2fżٻ(2M&[@aPqbR[>?;tHLa?`tǪeF#xt P3vDcoڝb \;Y*#'Ļ5;PJ7|ϼŒݒHd=it=tRP } 񄈢 BFjA\e?Cސbq< y ij¶3TT?+a+i?jy5\{O4WLwTaI10ǡf'E6uuh!T51=zE#5mn+f+Hb6Mǣ-,x38hk *m/+< r \WJ)T@L w=hx6)]߬quG<: 0Ii8BKɮN?E|{f_.|Y$H,x)bDBC6/Jyxz}-xdˢ T>#s#dbYY|Sbsﵥ|K:Tݑ\2eؗLH"ЃRG(?a_m]4cN;Vm>OsC6z8opʡyν_]0A/|ȏ@ Q=޺4Vg/>F?v5Æ}lqt8Jdߤ|4`(/l"6s-ĜDȻtmmõO#S5q\qAqR )C`q(zaKz5cnMCw:FP"rP>Em;V"z2$0>(EA f+0? tUxzo +l_5=V 5Qv*[gC?L5h+Xz3<>z.X4k34jx:cۍˏlE0Fmٸ-:]n*`о  x&r8.U<.MK(^ûDih<{{=@xGZw\H+a}+tA-0t`b3]Ȇӳbb @' 6uTI) ^EAEh4+VQ}O혛sƞhoŀ 02#0fL.6>[~Cssj̼}٤$ÓJK ݊P;.eȼᴈ?6cȾisK -:yFq>v9u`PQܞ;zCv(URM  Pdl"IW,M`?n*&7n5>{B|KQ.UUH!ڡ@>թ}cWmnޠwie \^ϐRDNBe}S_C?s͑o]hzVREaKmnux)Ǫ!Wc>׳X?P} j,kc mǵ?$Չ|ip~n["HGX+plHPuY;FW\c1[xHtu0?K sX #O!JZ.?)F9:{)&M36gZ5ɨ8[¢_֢T;]7ФxOI.YvU;lz~M?_ct' Xz]OH'=-ǹ]@IZ*Qrl>_h|ӌaط95㘌 nwdR$+Y>AAetݯ]|6+ G*Da=Y5g˷Ђї۟Te0Y9=8li"#܆ =jfVM7z 'My3LjG6T'f>pxͲqΊ"dFk)ΉtSM̮6dt\=$pa{tYM  r N |r: [ETvh?%mS+#c Disg( tAR(W8^SqWo-Hcۨb1υ_Cne+@>WsD0 zr(5ZC*$~!j<`?n A$=册nfk* ;ǟ\j\|6ɼR<"CA Nc5BJROz0`Xk/?!djlɱ8?/63:UM+5Nm)1bm~GЁޠɢ/\= 1O N @sN*#DԕZ?`l]n^xf bĵ%.445(r$ƪ{)Y4l7 /[iiL~L '8mI1{g>^~-9 Ծ|㔯z<'9>] Fn '{awo)*]kZ@:(+81ep.:"5v MR ;5ﰡ摢}`MtaٷV`.iQ|lҚwD48O`\= Qa0ḃlW|NOe~VNl–첹T R1|uYA iHwJտZvH ] )8'8`;Ìac bhdT!'11Ti.[L~/bfQ M^Ș2D^Mk܈BNd>Tf2qu*_yS/:E7ǡ͸BTܾk:nSuIg Ҩ}#cޏr-D`YpL5:)C~tAƊ) )^U~B6Q0_hwyxgMˏ_a^}+6pBw)A E'ʣn~Hy~#HQA-̙LYzGZ0RB o}l Iu|t 3ZQ O95:;^VP s44n_ۡ$ $>Z>n1δ_fڇD-A=@BYʙ=CN=[!#d#}]A|+i0ˈ!.BQ8.VF*ԏW_=dEgCG`xAמD [ء8 M}a ܻ68YFudyyݟe;Fpg3?ߢ&b`$ͪ*u0ߩ)'e:ܜ{ \Ԝ@^E.r&A+cnc ~AnHP>e) Y/372F{*m|RҜn&"O(eY9>EHk\qbB9uffq \왋}\Zâ+>* R%i_S:੹oȺnli&39S!,K=XBjLKe)K︪6AN/}J#:N|`|cj5V}J{Y`]rQaE5̐q(}fa> 9"ReaMA>.f=#V^))oժXS&=u,F4$WҎ Y$ -_"cՇa9\mͲw$0&ydcvyb jk$_LFgc0e9 lo^~w@@)rZd.Z9ulIBU֎5Ɍ.-2,cghbֽ.CXL]84uPQ5>ؚq]3 prTH.v%Ik0z.9NgBDeȤ&Q'^֨_L[Djs VdID7,-I% ={?՞KPa"_˼\Eqj8L#8 vS Ya{W{np~ņd3;w95"SVLѥ&84u%o%Lb-sZ1^8@_oɑgcr܁* >t֩q rw 0N\*y|c_H247/Ψ)(rIO&~*2 jug9úq'7(KlcxwRI)YPR9#kvEl,%#2S5l(tFլXozq5Xإ0$p"D L!Ի<I(Rbq*p-lix^ɶX/Kp=ns J7Dt,8dh&]X2AHxGG[)L" Kgg>N!D}40pY]umԔ%O*A QC6JKcbKִƍmș@`-i˘9&අpB9r+5y%rK_]@ hŠF02Vذ: ԩAʺ<ɚbZ7 bamR mm+ڸ`jO1-F nW clT{U3CVF׫evt* fk)ڛcX5 wk5/2m(3*&\ԩbL1XX'Q:&ÁE~<c[BvCdy[7p\;OzRoתnI6G(! ^rj3Y<}C]^FB},86Q"9 E9B..ha\ŋI&fRzuH.x}RP#̚s ?N[M$QZWQ372fy:GjxapԅMxVw DJ.$-n*^B_wBj^tC;30̗r5ͻ?gJJV j6dsrp>H4x~X=^%Ds RJ83 zļYQM6i发}@.Qt*3Ʋ6_ĉ:@ eXI_tws:w5:(*sAseouUenoPrw;5C,Žv _3PdSIˀG7PH U?m~L1̬7cpީ{X];91诉AX%fSJ1sZxSHP>؇]>Ӄixᦚx0? 5-}9Yd!@Ԛ.߶+,Ou޴g%Y $wǼպΑR\w~:ȗ]l&!B024; LpaQM I3= Cޙd2Iݱ=皤*WIltI {#/:5EcE,rzυ״=sE= @%Pmk(6OQϢΟRgZhCZ|KRsϨc`/ BOLS ڜlڽ&%ijf\(9K{O/< JlԹ [V/B/QGHiH !+J׬VQMQ<GĆ-/n Q)߲TȆdžDwUłKam؃1,ڷݽz҄]g#mxkWÌa2}a -gXqΆ`Ȁ3SoG"-q qYhT%  :nd#d-aM9T8XBԓŠE}|L.q[T%$Դo ]@F(P%bC `>WxZERV :R7 =B /oЌS*~ۚ7:\dy?>rm=L}CS8}I(k 3׽ 3pʿ|Pc[tK7#}`T47b-^= }$cXO, Fdۤe>%GF=g9:c)vP{?yY,zH V><{x'zkEA1NxQS0ø!+?K(q.[ŢUAs҈'y?0y (b/O9>kB!X0r(#뮘}sKq+LEE Vf/p^b8(wygΤ#P2Jo7xߥ ⃹2n8"wCЕ63)[GU'>vmDR#~ VGmR@-U"XRP`E8 n uN;>v xu_PK< dk  ͤ:[b z&o2qls.d&CVw`w K>5!S 45MMQ 뀪`ΘnC"'A2b 9ۡ`^xK&qx`#TP8-zi&]qH3c"tyJFf(@)*B7(U+(>D@.)bZST% {YzfU]2 |c&o^VэG0F*HA+i?$lT$p|W舏>E_NS Θr?WSurݚljf 5M4d4pA/Dl⽻$2!EyrWζ nfkA# J o%.נm\]FpB*6|+Pm€!M tN{oUZ_uF%os.Fz>Q8 3pu 8߯* %n:9sP#z;lh}Z@1hSMe.Szު35/bevm*[b+ x,no y&(~NK[ pk YZik):4L$OpwA(' Z*]N[6mB$SoJ*>,{γu5 !NTcFn/)?jU媫ڀnIE 1p|wXfiO81 2)49#{ŷc7;.ҋۑ]3Kڿ+?dhpOLkUN?܌AOVKwAL`[Q~0Q" *} z?7t_$=C?)0C:,w#P05es"[ô($Ttݦ-m&c^p،w^ɋ19w)V]UBWs\ޞ$&6&e2WH &ػu+cr(_536iI% 1;v$ 2%&$.q>Ah_Ck3&T,z|:<3P|GNwR=q{pMki*z`A`eKԚ|b%1[2VN/5~8s Y&`C`L??Zir8=Ag=tQt/XUPݪv!F׭&Հ(,KJβ~0]a(wG<1WyׅsW1 e=LJbJ1b3$1H QCnƯ-Ξb":W-E.HsU:S=b/r$$y+TK>gԵ2xƳ@‹lFUK3#sn oBU̒XQtƻGV6UCy3Be|Vf]oo/is/dBR(mMwBհpCKVqS> ]rH0%}G" oU جrݘi_(Eo܍/濩F&aاz_ x <*!1|64YevS]H}dtzp03a#yn¬ orj |;ʚdğ)~qq) ľ6OR;)r.N\G-VBS8ʡliG o w,s⮎'OD@3Л }W[?P.:6JePk_3{nѦ#\BHJ? wp9>"ltRsjo(mԙi1NFe 1spb EWf+|!axP~2d{M|4XJxToc24>6^ɑu޻cֽPJt/T>A3F}c_-1`om%~-s%Id}_őPvU㫒/$~bsYѯi2+'Q7+=_HF=es *"84- *x]aе'%v0n+`٩Bv۸i:D,3,g4D$p`Ww'Ha=V4 B4(wüT"feR'BSȚ܋}xuahť,`|fk”$ZC WF@t4d6^hky:O)mlpX 0B =];d踗be[s\7ˤɬLZyLSe_NGFMS](f}vYŐ{E}>8 ,!z]tmb,+>YOB-čn|Zaa3:X3扤qQ*Pu"0*GYtV]BkJrS}˩sptqdPlѷf!ss)#WG17>HV{HCTW5,gɼkKP ^^\) 4kHaAe7_1@ v; 'jZR54Sߑ=%s8t h-&iC R{h\ŗp:3j[[YPw'_1+lPh@[A\uɚ ux_ '@.` Gƈ{O`J #FrdS^v{!ҭ $>B4x}OkTg:#Dz+#wΎ׌xWbyB}V %bP0NjlѓBݻRf"%j9Ŕv+3cn a{mYہ:`WO T5x`r6?9¨>|+myX翲۪6W Xdh-(S1+CճFp<؁,o1Щ_:&LL 9IfpVPvJ+c|>E7*\@?#$޲+i'E7TXD8v`E'N(vX9Ǚ&'\)W`O)x#M<$[tnDr9.Z ^jӿ! _{-PswHaS>&UύyS̭@lknt|Q\098}ۂ'56{ /[fNg0Y^2Ɯ@KI=Cc }#O,2%PMar@Ho [ lB >\*@fF]%n7] *rNu+7JOҵoo!\2]eF&@v UZnɌF-m~[Y9/8IY*wcbb.,|(^A ,̓֘W vEA"p'Nq1M~ԝbJp?!}Dʛ )Ae\K˒v>Aо`Wq^\w=*"xP[fJ/H_BɲbEU%*L5蘽G2&͖Y|1Od=zfg)8x eg|s.|AD8z=N}jsĀRA6%|pd".ކ7z,b0ʍ{׺j{6\$ʵ ڸ?8NJ4폁uknMS/gqٺuYVV8Ӎ:Qᛦ _=D[< TNxvxQ'r=3h0bs_ejm 71h5Dv;^-P[3FN0]-dK3ýaڍfDn<6= (#UL[ 3a`RuMfK+浑`G4l * MS[}X6F#`3K|D>+\O3-W̚,vjRѳp}|[u UP]7i,iZ䱦D]~ CrkE\ic@Q}*~f:yC53\;kmrcQzXINb=*Vlue+qJB-xgtt1576RaͷbewIpHh@9e-И|׾8)dFJ|P=ur%;Yn0Hr;J%/ѽNyX]ppGTgPw͠]1|":'ufSc02~ {)HvX <#/< 12s8QBeBsI2(+ In,E[p0w` kF]3x)Q/N1⊥v.[O HhtIhB$X鼗 DAy2b|,fyfY#z X?F泠Fc=uwj*|"uG{LmtSxŰmzl:^+jv"sآ1_#g$N\,O^- C{!ͲEr!3]pXkXl#\ݤWŚ[s\Ĵ$‚SGj=}.pQ(ev#$r\*N65 ̳nLQ’p9|F =5!ѝB,dgff=7X bJV 8jjA~|8QŜdz _: îc/6tq&ڂ$/&7KMKQ\cjQ^KU*ofL tBK"X1ec+e;& Wpm*#)8עtk@;:l:I&DY&| #[*͉RdwG1cFz9Ykg`ŋOC*A0͝Hδ'Z? B g(gy)h#M-3#?j-۬ܞihWEv4`Za ݗ<)edEJ"CxHe33+J,>YAچzx@v\/WQk DBP)}֤94hn,/C$ܜ9wy1 aD8)}oЏ(֪+6Q]n&-{=u@W|=s3ՌChDo ! 'Wy=6cTb,yQ|{j6"YYV~]_Rxr' _,f]tgBh*}**6gC +: Χ[}iS>[4@ s=r=i?b!*3CŻ.` x$=ʜ=f݈AVOz K@W;`'(J-3!" =kڹh{j -|`RCloF"=!*[x H`_~B"> i ,2/'NLbnk^Rp5-7S%nNa6eⴵ$Hzs:FC6$Z'e6j7 jAulَ+?X*'?!鎣׾m0;ZImΉTz# "!iz7B60K.U8|NǢ(kJmO 70J|ݟ%Re c(nSd]y8-ۉ?+ JiD[ -U/U֭ȾH?jEАA˳-Q!<nt~6\IYr!Lֱ˼lnPm"('S^i`u@-+2 ][IEC5݄X]DbX7dR9]"QmXLM6Ϊ-G,1_LAxH. :Yjh_Tbs$]` IddžI>ۧM0-m9 Q[ W#+U][->h6Οp";#ĕZS(s675 hjXra.# Sg@S. l4i_~BEK"7x5z~ p#y퉌vU!YyQdDžc5YЗJLFI L'"iuX;lOI, a˯96F (I&X=RMBVRA[*޾_A)|4cHj}1rD} ;)dDZ?u+hkkV9\ST]i}wyerGt@Э6@XyNSe_ĺ!uweL\n`\B;<: I,l=TdفNf::m="@Z,6BeM'<`))dm;T6P5l~K툢VSmFfDO`ZKW,K:@< ׅypO ^ysV$kȉtƓpF]L .fpɉg_^lfwڲbp#v /yZXR]n(?Z E Xb-myi9g. V@Ȥ6WRơ[OP%/0gK ,*}G'JG+r+*]_IrUT<*~ηsW5c Jp2|:1D+߷vSR'_Y iL7&0P% êO\C 8oF߅GXN[g\NMwIK@^WD?ȺUc0S+( s;e3ZPAj딵A99ź+g,;W6EQfEvV\ fri;|"5 Ӭ>G6 NyLC,ɩ0F|mz45Gb]@% y7⬐wD&Z)ۑ*{W~;zX(k(n5ECRn ɻæ4i\&Щčǝ-tigj^r_N~1-xUHLhag%n2hE!;ԧPaǥAAWTj!Vz6TjA4Ssyoy«!?Lk=Mc'yvu$ClLR\`B侢m>gcKGr>ʉt)] ).uR(:~CTX5.OWxm},n,KQ1w)Jm 5-L[p z䜖c$+Z?:C-i4vo5,§o N›0\~8CjFr]r2)b 6ÛK,%5*s 0|׭j̉,' 0W8 '>$58υi}>E{ZAoUӉuy] b}(!¬Ll0MmiFw']ؙׄh LH `fjzw2x`Yq>ʱ!RQd.L-WvuAC@;uo`vSF Ov5YkfL<4WF̯qDYTSjB,՞$^ceXDgꯄ]km p}F䆭l㭪Wκg> _زl@"$u- &{!KsbliFFi2TX_O%V1uFrzsa,Ϳ҈XHC: \eSyΰFy/:JڧbثXx >n0keB]FQ&{VZ +\5=+/~0F)/NpNyo.iX!OIRCS>{-I\pq8qIr9ϰVӡ"d+OSπH:`ݒ>1{``LZ3akؔc-=p) 8_Lvs/뿣Adtr5!&}5,2X!eՌ%UU}w&oC"4߸f7,hc $tR M>l1~,W%(偲5O/yC#}CMRH*𜫅o>N6>{ fM&ິxi*Gؾ$,Iw8]w~ZE>98Y7C6'wOŃPid` n</Ӑfj(U]sNy < +SDdW:SWÚ WoE2A".D;l cjx}Td"swV1Kď 철OmLqwE\p9Q*(τF[kÕަ D`˲uXf C^r)Xs$ $1/ -&:rޑ( og)gd.0;mݒ0lGK6 ] ւ>NsPa]E߻I,n5NvrϨ,[pl|܋TAϘJpi1Rt"gصjB?e?1, 蛦Sk#1}1{|j-BcCJH˸D-A1ɪŲma4$0z"tPԒVX /_=u#i!ݽTW{% G r${#Z؎ẍ́d%2sfb-<<9~s#^i2|PfFG3,]4۝;d/Ǫy3т*c}C+G# r /l˘YmfKqD_E$-pneR$Ov(Kwueau2jbѥh1EWi6U< Lhoȶ j"Xaڀgpӳx_C`[yp:[5QE>0DT.)!Dq8IrsO[XYcnm,-sDC6a#ks -0SixfdNioOWZV5ۀ>X1I-F<HP!-fYUY-{ΐOt8n}P~Ρ?Esev$OlQpR7|3 /B8= ֛Exc+4c!dE]Q|]a~/h3O'V[=ENJs}Cz.We#\[/7]'tpX Èn腗J/]&afV9 Bk[{3;f9#v4Z6\x]0PIԱ$32 4G^ u;,7)&8wI;gFwQ7/ ,-nw]ˣk5!W 911@9)uϤo_Ȃ`'hr\;԰sT:  Y3K.D;ql )eֳcW1Xk~L5x}%69N#'|e@a0$*6x#p0_6]>{~o^\ ǫE2;‘ /+8S ~q0K(Ol͚)h|;EW׶ȥ}uM5zC] U*9ɩ9HΞMX7_~68䍊ΒlUJ C|ۭˀ:dEmTّqil {&uLDixϞg|D1B\u.B ј$p^8{=xnXhz2@CyO!`#EͰ\@p4H Sr3!lS}R\D)EoSX(@W-{mý o!LR}Kć3OtKHHDw9ozfCFԮgA]RngP$܋P}' < J?V`qpW/{iݗY4b;4bMU N¤'$/πlfodWT%*` pOR uqb̖@IMϪ,N~Μ@@wz&zy `.M4VM^A;G94XnRy#/a/&ߩ^Mz&z{&MTq=w}@&St>HXIG M< %:i+C0U&ߐ߼kR׆jL4Ѫ)8ztEj%lNpz.E 9oD'A-lSZ]_+c=Qwۨ2J})HJC ''kۯYqm)=Вe܎oN#S_sD{fG(c>2'5ˍ}h7'`w{Yfx G[4˸Ru<ʄV+|%wkpHXq߄7p4OFZ:wz05XF?o.iru#n*4Aoxh;4v\z6 '߷mRw S`n7.vb;'ZBѧG3xC H׻߄^r}s>gG}j Lf?{;`aoo"4K7Kcr dSҌrAEo2:0y A.PYu  Uϱ n[S$+ۏm˗5SX+%?6ƐPUY'f<t@KX >s1׼c<䖏VFxu.ie& !A}-lh a>fuP6?jy:nw'Iڌ ?שյC^4,oEMG_"Hg-W3ǟWIEOd6{}ߵhh<'w7mVae;a< |E]{>ŦHHۊUФҕ_O3 `4zZ*W+0wslVoi!}խh5 EK_ת/ :8AI#bj-f:Nſ,So2+#)k8/ME:z HVz$sr= %z$z2SM*7Ȼ{M2Ai1JӾ`KJ4΀{\l\ *$IBD4tC]OT`3Gxdd[Ilm'Jƞѿxx rV@|O@ k@= cs 51eGɄоur :>0rtd@C5`=w]y*̖cICiVmTP`})#~ FjBtCW"}:ѯ0'M6/! (9s0l[;Tؼwbsؙ( j杓&d<-y2˶w4jΧ*`ݼ:EyC(moMl=~d1y? '$8 K ?IZӗqA(f'E%eԃK 5^oXX8fmo8o$hӂW^Mye11K4#˵nPGpZQe{qP,5(9/JDoDž>(qfJ|Cq/]&.+r;(b:mW/(|7 uޣ_iJ+xJ\xHD@\&r2gtѡsN˂5X _krG쉈%PB%ܕP#h3?7^At͏!S=BGYeޓȴ#yġb|v{?YDDx%PԌsO6q0nWN>-xLS xs_d*I캣FϽzƮ o9B2@(ũJr4 kCx59W.35zYq2x_V|1mg_Ü@[h+yӪڹRi;0(=Ô YK0 :r5T c #ǨG$K喒ZDž~#s KZ я޻ESd!ͦěa_\Qc|"w 2D u%ֱvײ-m4hM۪NKXYќVjnغ0m-udADItG7~)^H^qF'n5d<"F\H\Y;hEnu`y /p84~ֵktQJ]-35eӫ^ S>mf9_4ݳY֪F8$L*k\6BJlo'xcs L~ ^֟ 3^e!Z"E YHܝJf\vg%,ĩ A (nE0Ww1{^$ئX7-"V/76L;UX TC+[K'(k1ߝ97{x漢CU },%=#9B wtM5ԊHXb!RπLh7|oZ=nqW_ Ѽe*;|83LvRyF3xޡوva D`!0RY-?\~W;$ :yJG1PZS,'^5GޭHihWsE͐?ڊM*-Zws1=Ӱ?.օһVy.a"8oP%..|峺|W,pt|RG0\4Frv΂fWL&$n=B-ERyҗ䋔e. y3S Z08_=a&Z:/vȒ8mЏo]lXH!@_#Z'ԁQ{=>1[QviNl L TBx?ɷ%LQҷy|7δ80aD(BNҌu4'fjdv3]>)}TGOU 4~< |x{{鮟 "Zp{fh緕 3ZZ+&gkK+ɏF{ۃQ1jZ2l @.W!q(?>."xrcE^N!L  S߭5&:0x(9TίzѪV^d,W13ү g[xSʀB1c&yЌ}x48~[vKY^m_fY[,;jQ)xqunQ8brOaGeerjTh2]2(@ hrUPBxN e\O]3"?QgIKQ C]vS_N7|$W{hrG>h I/׾)HS]i6| S=3$/2ff.TӇu!U>,B<妁Ȝ{ 5řQJRx֧ c'Soq,L'Ev;^q/=m-\XPɿ`Q͒ua*F֔@_$UOKk>hP&҉?&3uJJg;zYA#w`+Q*ն1;Z-6nJlyl 44EBrinKM/^ 䞫h6@ 1b{M&^!l4 :+r`O_wZFL\ߌAA/L;Wuikb[1ՈR},S&/M1}P@"I{xR)=|ͳT!rcnHfwe}lo%P Q~cPDeTȴX;oRg/ogDݙE۔yb1ʏͦ5j#ļC-:Xd֏&Lr^gmńU Uyldf}cB7'%c,[Vdv5 EgzQl[CBéWn$4$6ڷA%UcFi\~Ӊu5݀Es?b]us#OȺGCB%N}a vHAڽ+3>d~EL ȡ J ÚHY;o2jt}IaDy;ׯA?yWB5C j)6_Qgss Y(O0MTnȋr%YQ36Tf4rWvfQ4̝ܫ[Lo=ąK>U_}*$%Vȹ< SV~;l[Vˮ) =ćL PlaicV"#q'rh~IVRpQꅏpH]@ US9d'l c]mϵKk2Ncnȭ!6fr#ö))CUo}.ݗ$d $* u)ߜB@0!Vi˔Hb!/C3~"4J!C_Sr,bB>-~GVϮWƯf03b7g@!{d@ B˧.55>ъ$X:VȾ`[ob,1a#S#Ei XL25X՛_2Lx;l\4ұd$I<.ܾ{Oy6dg}( yFksڵG(6lrxlKPD-ݨ so@Np/0߲R= x:Fx]JtDZ尞g'vu^SfµJ:;w|+ȫ1F|,_X#88_|{=k^%θ3Qg\%wۄ95DVuθ_Ƈ@|} )AЛ$omV:+tk 6 M#Kvurr]y<-$'"#=լ(or h?c94#YBrSE'" P,hDO΋zy7%SAkKz\"~VRZG&6gT] |Ŧ5KѐM%q w!TAM0JM}]XFsrnoTatd&3]y?SZ&x~[7т<*?+hk%p|5uk\nwybPEwjŽit Qjy:k5'ZT; f.'t3 Uts/1n:]{.! 5s+ZϪu̻k_|qR.9Y>4[)$FNQh5YZcZ>'ٟ%6yiHE?6KەԓAf\_9NpPnG̽Ya.h̄6M$o<m mu,ΎV_jƧp&DL g B.zX]K8) aM߿4<߃sb6=k:?CTJG$ΑNH5/BoR_FQ͍0۵>E2{?.p:oeK/ZlBbDF0&O5qxOv\ԼG=aZФoY<2$Y5vyX`ɔ%67_ 0dck{XRÆX7~jL ia=E\R2\}H\2Oeo-k3=2ԄTա(7jEt^rĮ7bP)|؞ B`PB5²F9 6yz\“8{?:bX6(FI~ppTs8:լ+I]9ւ+u@YHR+8)84Woe6L' Eb)&$SdXCp6;ʻh kҶ3ϕ6,9 gER8c>OWoGin$[{HtrukL׵o(.L*4k)|j Dp=Uza<;@NhĴE ({Ұ?,[]4ߖ%#|PIT )ͺ~Kܾ!RƷ~ŸtH`#B2AfсeF[+p8э=r/ϓNqfH{&"Z`+[`3uJ|y 5 ߸e*~Fl(gtx g3iZѝיvP"@C!ֻXNOxܥ)8DM8 bKWДcTVP]&d,.3e+so>o: 5z+}9NY_ɨ논Og4SQ6Yo۾ONn"%>6 Zg4x Uo+,ˑ͓KP?>T‡H)[7;ܜȨEo'67$c{[K nR?,"-G!"֖IHQ%`' P̣Z)yMQ/NʹQOWm홗9-eGE~6E 5_}c\^*7ux<}[U_k6mbtG¼eh~Ud``XAAFˡ (@u7-Cr :Ϡ4<>:Oo}šS E/noNiŹI.0 'ʂ'IJ0j:y3ŜٸRlp^>%=,0#[' h\UO-4b G}qY,5[n60^(/tXm gr_:ƟQr#̠}YorZ;\kOERڿ5d&x'9v؟10~!ĀQ纗<Pf rߥ?=odM gHݑMZ-sf^:*Stu&*UV*Eڹrk@F>i׆3Ob+2z VI 8_0X TobfڴF@h`fe]bq)tOa媭iq{2H;LzPlTbA.Sw'7rGHnا dǗcUW;`ex"'AlN s^K=iFukP']ǒ[4=? Ziޜ3ne[O:Tl[kK,Y[2}bgϻ P[p0_cBlx{u{{x1$b{ QwZYTeU j[=aȣIC2:G1a?f, +BN,g{i*ӭ'no.+%BptdCҵ$g8Aj90Q\~BkuC_fI P@0Hir <@" z]\C[Ǎ~Z#M,4wplTl݉Q=5suvJ+Gg4 II֊јeDN}vH[c<)۫|{: 5R5I{eA]۔ 'vH&h{GXYka=^&Mcw $Rc Qtt,7+r3@2F4(e0DXɲ"fbY:iլ9qC ٠?)mUCRP) IsoBdy@%aXq=I%piutURryb'mp$Ӗb>0 YZspatstat/R/circdensity.R0000644000176200001440000000275513115225157014774 0ustar liggesusers#' #' circdensity.R #' #' Kernel smoothing for circular data #' #' $Revision: 1.3 $ $Date: 2014/12/04 06:49:20 $ circdensity <- function(x, sigma="nrd0", ..., bw=NULL, weights=NULL, unit=c("degree", "radian")) { xname <- short.deparse(substitute(x)) missu <- missing(unit) if(missing(sigma) && !is.null(bw)) sigma <- bw unit <- match.arg(unit) unit <- validate.angles(x, unit, missu) FullCircle <- switch(unit, degree = 360, radian = 2*pi) if(is.character(sigma)) { sigma <- switch(sigma, bcv = bw.bcv, nrd = bw.nrd, nrd0 = bw.nrd0, SJ = bw.SJ, ucv = bw.ucv, get(paste0("bw.", sigma), mode="function")) } if(is.function(sigma)) { sigma <- sigma(x) if(!(is.numeric(sigma) && length(sigma) == 1L && sigma > 0)) stop("Bandwidth selector should return a single positive number") } check.1.real(sigma) #' replicate data x <- x %% FullCircle xx <- c(x - FullCircle, x, x + FullCircle) #' replicate weights if(!is.null(weights)) { stopifnot(length(weights) == length(x)) weights <- rep(weights, 3)/3 } #' smooth z <- do.call(density.default, resolve.defaults(list(x=xx, bw=sigma, weights=weights), list(...), list(from=0, to=FullCircle))) z$y <- 3 * z$y z$data.name <- xname return(z) } spatstat/R/update.ppm.R0000755000176200001440000003141113165060304014517 0ustar liggesusers# # update.ppm.R # # # $Revision: 1.61 $ $Date: 2017/10/04 03:51:04 $ # # # update.ppm <- local({ ## update point pattern dataset using either data or formula newpattern <- function(oldpattern, lhs, callframe, envir) { eval(eval(substitute(substitute(l, list("."=Q)), list(l=lhs, Q=oldpattern)), envir=as.list(envir), enclos=callframe), envir=as.list(envir), enclos=callframe) } update.ppm <- function(object, ..., fixdummy=TRUE, use.internal=NULL, envir=environment(terms(object))) { verifyclass(object, "ppm") new.callstring <- short.deparse(sys.call()) aargh <- list(...) if(inherits(object, "ippm")) { call <- object$dispatched$call callframe <- object$dispatched$callframe } else { call <- getCall(object) if(!is.call(call)) stop(paste("Internal error - getCall(object) is not of class", sQuote("call"))) callframe <- object$callframe } callfun <- as.character(call[[1]]) newstyle <- (callfun == "ppm.formula") oldstyle <- !newstyle ## Special cases ## (1) no new information given if(length(aargh) == 0 && !identical(use.internal, TRUE)) { result <- eval(call, as.list(envir), enclos=callframe) result$callframe <- callframe return(result) } ## (2) model can be updated using existing covariate data frame if(!identical(use.internal, FALSE) && ## single argument which is a formula (length(aargh) == 1) && inherits(fmla <- aargh[[1]], "formula") && is.null(lhs.of.formula(fmla)) && ## not a ppm.formula call oldstyle && ## fitted by mpl using glm/gam with(object, method == "mpl" && !is.null(fitter) && fitter %in% c("gam", "glm"))) { ## This is a dangerous hack! glmdata <- object$internal$glmdata ## check whether data for new variables are available ## (this doesn't work with things like 'pi') vars.available <- c(colnames(glmdata), names(object$covfunargs)) if(all(variablesinformula(fmla) %in% c(".", vars.available))) { ## we can update using internal data FIT <- object$internal$glmfit orig.env <- environment(FIT$terms) ## update formulae using "." rules trend <- newformula(object$trend, fmla, callframe, envir) fmla <- newformula(formula(FIT), fmla, callframe, envir) ## expand polynom() in formula if(spatstat.options("expand.polynom")) { fmla <- expand.polynom(fmla) trend <- expand.polynom(trend) } ## update GLM/GAM fit upd.glm.call <- update(FIT, fmla, evaluate=FALSE) FIT <- eval(upd.glm.call, envir=orig.env) environment(FIT$terms) <- orig.env object$internal$glmfit <- FIT ## update entries of object object$trend <- trend object$terms <- terms(fmla) object$coef <- co <- FIT$coef object$callstring <- new.callstring object$internal$fmla <- fmla ## if(is.finite(object$maxlogpl)) { ## Update maxlogpl provided it is finite ## (If the likelihood is infinite, this is due to the interaction; ## if we update the trend, the likelihood will remain infinite.) W <- glmdata$.mpl.W SUBSET <- glmdata$.mpl.SUBSET Z <- is.data(object$Q) object$maxlogpl <- -(deviance(FIT)/2 + sum(log(W[Z & SUBSET])) + sum(Z & SUBSET)) } ## update the model call upd.call <- call upd.call$trend <- trend object$call <- upd.call ## update fitted interaction (depends on coefficients, if not Poisson) if(!is.null(inter <- object$interaction) && !is.poisson(inter)) object$fitin <- fii(inter, co, object$internal$Vnames, object$internal$IsOffset) ## if(is.stationary(object) && !is.marked(object)) { ## uniform Poisson if(eval(call$rename.intercept) %orifnull% TRUE) { names(object$coef) <- "log(lambda)" } } return(object) } } ## (3) Need to use internal data if(oldstyle) { ## decide whether to use internal data undecided <- is.null(use.internal) || !is.logical(use.internal) force.int <- !undecided && use.internal force.ext <- !undecided && !use.internal if(!force.int) { ## check for validity of format badformat <- damaged.ppm(object) } if(undecided) { use.internal <- badformat if(badformat) message("object format corrupted; repairing it") } else if(force.ext && badformat) warning("object format corrupted; try update(object, use.internal=TRUE)") if(use.internal) { ## reset the main arguments in the call using the internal data call$Q <- quad.ppm(object) namobj <- names(call) if("trend" %in% namobj) call$trend <- newformula(call$trend, object$trend, callframe, envir) if("interaction" %in% namobj) call$interaction <- object$interaction if("covariates" %in% namobj) call$covariates <- object$covariates } } ## General case. X.is.new <- FALSE ## First split named and unnamed arguments nama <- names(aargh) named <- if(is.null(nama)) rep.int(FALSE, length(aargh)) else nzchar(nama) namedargs <- aargh[named] unnamedargs <- aargh[!named] nama <- names(namedargs) ## Find the argument 'Q' by name or implicitly by class ## (including detection of conflicts) argQ <- NULL if(n <- sp.foundclasses(c("ppp", "quad"), unnamedargs, "Q", nama)) { argQ <- unnamedargs[[n]] unnamedargs <- unnamedargs[-n] } if("Q" %in% nama) { argQ <- namedargs$Q nama <- setdiff(nama, "Q") namedargs <- namedargs[nama] } ## Deal with argument 'Q' which has several possible forms if(!is.null(argQ)) { X.is.new <- TRUE if(inherits(argQ, "formula")) { ## Q = X ~ trend if(newstyle) { ## update the formula call$Q <- newformula(call$Q, argQ, callframe, envir) } else { ## split into Q = X and trend = ~trend if(!is.null(lhs <- lhs.of.formula(argQ))) call$Q <- newpattern(call$Q, lhs, callframe, envir) call$trend <- newformula(call$trend, rhs.of.formula(eval(argQ)), callframe, envir) } } else { ## Q = X if(newstyle) { ## convert old call to old style fo <- as.formula(call$Q) Yexpr <- lhs.of.formula(fo) trend <- rhs.of.formula(fo) newcall <- call("ppm", Q=Yexpr, trend=trend) if(length(call) > 2) { whichQ <- which(names(call) == "Q") morecall <- call[-c(1, whichQ)] if((mc <- length(morecall)) > 0) { newcall[3 + 1:mc] <- morecall names(newcall)[3 + 1:mc] <- names(call)[-c(1, whichQ)] } } call <- newcall newstyle <- FALSE oldstyle <- TRUE } ## Now update the dataset call$Q <- argQ } } ## Find any formula arguments ## (including detection of conflicts) argfmla <- NULL if(n <- sp.foundclass("formula", unnamedargs, "trend", nama)) { argfmla <- unnamedargs[[n]] unnamedargs <- unnamedargs[-n] } else if(n <- sp.foundclass("character", unnamedargs, "trend", nama)) { ## string that might be interpreted as a formula strg <- unnamedargs[[n]] if(!is.na(charmatch("~", strg))) { argfmla <- as.formula(strg) unnamedargs <- unnamedargs[-n] } } if("trend" %in% nama) { argfmla <- namedargs$trend nama <- setdiff(nama, "trend") namedargs <- namedargs[nama] } ## Handle new formula if(!is.null(argfmla)) { lhs <- lhs.of.formula(argfmla) if(newstyle) { ## ppm.formula: update the formula if(is.null(lhs)) { argfmla <- as.formula(paste(".", deparse(argfmla))) } else X.is.new <- TRUE call$Q <- newformula(call$Q, argfmla, callframe, envir) } else { ## ppm.ppp: update the trend and possibly the data if(is.null(lhs)) { ## assign new trend call$trend <- newformula(call$trend, argfmla, callframe, envir) } else { ## split into Q = X and trend = ~trend X.is.new <- TRUE call$Q <- newpattern(call$Q, lhs, callframe, envir) call$trend <- newformula(call$trend, rhs.of.formula(argfmla), callframe, envir) } } } if(length(namedargs) > 0) { ## any other named arguments that were also present in the original call ## override their original values. existing <- !is.na(match(nama, names(call))) for (a in nama[existing]) call[[a]] <- aargh[[a]] ## add any named arguments not present in the original call if (any(!existing)) { call <- c(as.list(call), namedargs[!existing]) call <- as.call(call) } } if(length(unnamedargs) > 0) { ## some further objects identified by their class if(n<- sp.foundclass("interact", unnamedargs, "interaction", nama)) { call$interaction <- unnamedargs[[n]] unnamedargs <- unnamedargs[-n] } if(n <- sp.foundclasses(c("data.frame", "im"), unnamedargs, "covariates", nama)) { call$covariates <- unnamedargs[[n]] unnamedargs <- unnamedargs[-n] } } ## ************************************************************* ## ****** Special action when Q is a point pattern ************* ## ************************************************************* if(X.is.new && fixdummy && oldstyle && is.ppp(X <- eval(call$Q, as.list(envir), enclos=callframe)) && identical(Window(X), Window(data.ppm(object)))) { ## Instead of allowing default.dummy(X) to occur, ## explicitly create a quadrature scheme from X, ## using the same dummy points and weight parameters ## as were used in the fitted model Qold <- quad.ppm(object) if(is.marked(Qold)) { dpar <- Qold$param$dummy wpar <- Qold$param$weight Qnew <- do.call(quadscheme, append(list(X), append(dpar, wpar))) } else { Dum <- Qold$dummy wpar <- Qold$param$weight Qnew <- do.call(quadscheme, append(list(X, Dum), wpar)) } ## replace X by new Q call$Q <- Qnew } ## finally call ppm call[[1]] <- as.name('ppm') return(eval(call, as.list(envir), enclos=callframe)) } update.ppm }) sp.foundclass <- function(cname, inlist, formalname, argsgiven) { ok <- unlist(lapply(inlist, inherits, what=cname)) nok <- sum(ok) if(nok > 1) stop(paste("I am confused: there are two unnamed arguments", "of class", sQuote(cname))) if(nok == 0) return(0) absent <- !(formalname %in% argsgiven) if(!absent) stop(paste("I am confused: there is an unnamed argument", "of class", sQuote(cname), "which conflicts with the", "named argument", sQuote(formalname))) theposition <- seq_along(ok)[ok] return(theposition) } sp.foundclasses <- function(cnames, inlist, formalname, argsgiven) { ncn <- length(cnames) pozzie <- logical(ncn) for(i in seq_len(ncn)) pozzie[i] <- sp.foundclass(cnames[i], inlist, formalname, argsgiven) found <- (pozzie > 0) nfound <- sum(found) if(nfound == 0) return(0) else if(nfound == 1) return(pozzie[found]) else stop(paste("I am confused: there are", nfound, "unnamed arguments of different classes (", paste(sQuote(cnames(pozzie[found])), collapse=", "), ") which could be interpreted as", sQuote(formalname))) } damaged.ppm <- function(object) { ## guess whether the object format has been damaged ## e.g. by dump/restore gf <- getglmfit(object) badfit <- !is.null(gf) && !inherits(gf$terms, "terms") if(badfit) return(TRUE) ## escape clause for fake models if(identical(object$fake, TRUE)) return(FALSE) ## otherwise it was made by ppm Qcall <- object$call$Q cf <- object$callframe if(is.null(cf)) { ## Old format of ppm objects if(is.name(Qcall) && !exists(paste(Qcall))) return(TRUE) Q <- eval(Qcall) } else { ## New format of ppm objects if(is.name(Qcall) && !exists(paste(Qcall), cf)) return(TRUE) Q <- eval(Qcall, cf) } badQ <- is.null(Q) || !(inherits(Q, c("ppp", "quad", "formula"))) return(badQ) } spatstat/R/psp.R0000755000176200001440000005423613156200767013267 0ustar liggesusers# # psp.R # # $Revision: 1.89 $ $Date: 2017/09/13 09:52:15 $ # # Class "psp" of planar line segment patterns # # ################################################# # creator ################################################# psp <- function(x0, y0, x1, y1, window, marks=NULL, check=spatstat.options("checksegments")) { stopifnot(is.numeric(x0)) stopifnot(is.numeric(y0)) stopifnot(is.numeric(x1)) stopifnot(is.numeric(y1)) stopifnot(is.vector(x0)) stopifnot(is.vector(y0)) stopifnot(is.vector(x1)) stopifnot(is.vector(y1)) stopifnot(length(x0) == length(y0)) stopifnot(length(x1) == length(y1)) stopifnot(length(x0) == length(x1)) ends <- data.frame(x0=x0,y0=y0,x1=x1,y1=y1) if(!missing(window)) verifyclass(window,"owin") if(check) { ok <- inside.owin(x0,y0, window) & inside.owin(x1,y1,window) if((nerr <- sum(!ok)) > 0) stop(paste(nerr, ngettext(nerr, "segment does not", "segments do not"), "lie entirely inside the window.\n"), call.=FALSE) } out <- list(ends=ends, window=window, n = nrow(ends)) # add marks if any if(!is.null(marks)) { if(is.matrix(marks)) marks <- as.data.frame(marks) if(is.data.frame(marks)) { omf <- "dataframe" nmarks <- nrow(marks) rownames(marks) <- seq_len(nmarks) whinge <- "The number of rows of marks" } else { omf <- "vector" names(marks) <- NULL nmarks <- length(marks) whinge <- "The length of the marks vector" } if(nmarks != out$n) stop(paste(whinge, "!= length of x and y.\n")) out$marks <- marks out$markformat <- omf } else { out$markformat <- "none" } class(out) <- c("psp", class(out)) return(out) } ###################################################### # conversion ###################################################### is.psp <- function(x) { inherits(x, "psp") } as.psp <- function(x, ..., from=NULL, to=NULL) { # special case: two point patterns if(is.null(from) != is.null(to)) stop(paste("If one of", sQuote("from"), "and", sQuote("to"), "is specified, then both must be specified.\n")) if(!is.null(from) && !is.null(to)) { verifyclass(from, "ppp") verifyclass(to, "ppp") if(from$n != to$n) stop(paste("The point patterns", sQuote("from"), "and", sQuote("to"), "have different numbers of points.\n")) uni <- union.owin(from$window, to$window) Y <- do.call(psp, resolve.defaults(list(from$x, from$y, to$x, to$y), list(...), list(window=uni))) return(Y) } UseMethod("as.psp") } as.psp.psp <- function(x, ..., check=FALSE, fatal=TRUE) { if(!verifyclass(x, "psp", fatal=fatal)) return(NULL) ends <- x$ends psp(ends$x0, ends$y0, ends$x1, ends$y1, window=x$window, marks=x$marks, check=check) } as.psp.data.frame <- function(x, ..., window=NULL, marks=NULL, check=spatstat.options("checksegments"), fatal=TRUE) { window <- suppressWarnings(as.owin(window,fatal=FALSE)) if(!is.owin(window)) { if(fatal) stop("Cannot interpret \"window\" as an object of class owin.\n") return(NULL) } if(checkfields(x,"marks")) { if(is.null(marks)) marks <- x$marks else warning(paste("Column named \"marks\" ignored;\n", "argument named \"marks\" has precedence.\n",sep="")) x$marks <- NULL } if(checkfields(x, c("x0", "y0", "x1", "y1"))) { out <- psp(x$x0, x$y0, x$x1, x$y1, window=window, check=check) x <- x[-match(c("x0","y0","x1","y1"),names(x))] } else if(checkfields(x, c("xmid", "ymid", "length", "angle"))) { rr <- x$length/2 dx <- cos(x$angle) * rr dy <- sin(x$angle) * rr bb <- boundingbox(window) rmax <- max(rr) bigbox <- owin(bb$xrange + c(-1,1) * rmax, bb$yrange + c(-1,1) * rmax) pattern <- psp(x$xmid - dx, x$ymid - dy, x$xmid + dx, x$ymid + dy, window=bigbox,check=FALSE) out <- pattern[window] x <- x[-match(c("xmid","ymid","length","angle"),names(x))] } else if(ncol(x) >= 4) { out <- psp(x[,1], x[,2], x[,3], x[,4], window=window, check=check) x <- x[-(1:4)] } else if(fatal) stop("Unable to interpret x as a line segment pattern.", call.=FALSE) else out <- NULL if(!is.null(out)) { if(is.null(marks) & ncol(x) > 0) marks <- x if(is.null(marks)) { out$markformat <- "none" } else { out$marks <- marks out$markformat <- if(is.data.frame(marks)) "dataframe" else "vector" out <- as.psp(out,check=FALSE) } } return(out) } as.psp.matrix <- function(x, ..., window=NULL, marks=NULL, check=spatstat.options("checksegments"), fatal=TRUE) { x <- as.data.frame(x) as.psp(x,...,window=window,marks=marks,check=check,fatal=fatal) } as.psp.default <- function(x, ..., window=NULL, marks=NULL, check=spatstat.options("checksegments"), fatal=TRUE) { if(checkfields(x,"marks")) { if(is.null(marks)) marks <- x$marks else warning(paste("Component of \"x\" named \"marks\" ignored;\n", "argument named \"marks\" has precedence.\n",sep="")) } if(checkfields(x, c("x0", "y0", "x1", "y1"))) return(psp(x$x0, x$y0, x$x1, x$y1, window=window, marks=marks, check=check)) else if(checkfields(x, c("xmid", "ymid", "length", "angle"))) { rr <- x$length/2 dx <- cos(x$angle) * rr dy <- sin(x$angle) * rr window <- as.owin(window) bb <- boundingbox(window) rmax <- max(rr) bigbox <- owin(bb$xrange + c(-1,1) * rmax, bb$yrange + c(-1,1) * rmax) pattern <- psp(x$x - dx, x$y - dy, x$x + dx, x$y + dy, window=bigbox, marks=marks, check=FALSE) clipped <- pattern[window] return(clipped) } else if(fatal) stop("Unable to interpret x as a line segment pattern") return(NULL) } as.psp.owin <- function(x, ..., window=NULL, check=spatstat.options("checksegments"), fatal=TRUE) { .Deprecated("edges", package="spatstat") edges(x, ..., window=window, check=check) } edges <- function(x, ..., window=NULL, check=FALSE) { x <- as.owin(x) if(is.null(window)) window <- as.rectangle(x) x <- as.polygonal(x) x0 <- y0 <- x1 <- y1 <- numeric(0) bdry <- x$bdry for(i in seq_along(bdry)) { po <- bdry[[i]] ni <- length(po$x) nxt <- c(2:ni, 1) x0 <- c(x0, po$x) y0 <- c(y0, po$y) x1 <- c(x1, po$x[nxt]) y1 <- c(y1, po$y[nxt]) } out <- psp(x0, y0, x1, y1, window=window, check=check) return(out) } xypolygon2psp <- function(p, w, check=spatstat.options("checksegments")) { verify.xypolygon(p) n <- length(p$x) nxt <- c(2:n, 1) return(psp(p$x, p$y, p$x[nxt], p$y[nxt], window=w, check=check)) } ################# as.data.frame.psp <- function(x, row.names=NULL, ...) { df <- as.data.frame(x$ends, row.names=row.names) if(is.marked(x)) df <- cbind(df, if(x$markformat=="dataframe") marks(x) else data.frame(marks=marks(x))) return(df) } ####### manipulation ########################## append.psp <- function(A,B) { verifyclass(A, "psp") verifyclass(B, "psp") stopifnot(identical(A$window, B$window)) marks <- marks(A) %mapp% marks(B) ends <- rbind(A$ends, B$ends) out <- as.psp(ends,window=A$window,marks=marks,check=FALSE) return(out) } rebound.psp <- function(x, rect) { verifyclass(x, "psp") x$window <- rebound.owin(x$window, rect) return(x) } ################################################# # marks ################################################# is.marked.psp <- function(X, ...) { marx <- marks(X, ...) return(!is.null(marx)) } marks.psp <- function(x, ..., dfok = TRUE) { # data frames of marks are as of 19/March 2011 implemented for psp ma <- x$marks if ((is.data.frame(ma) || is.matrix(ma)) && !dfok) stop("Sorry, not implemented when the marks are a data frame.\n") return(ma) } "marks<-.psp" <- function(x, ..., value) { stopifnot(is.psp(x)) if(is.null(value)) { return(unmark(x)) } m <- value if(!(is.vector(m) || is.factor(m) || is.data.frame(m) || is.matrix(m))) stop("Incorrect format for marks") if (is.hyperframe(m)) stop("Hyperframes of marks are not supported in psp objects.\n") nseg <- nsegments(x) if (!is.data.frame(m) && !is.matrix(m)) { if (length(m) == 1) m <- rep.int(m, nseg) else if (nseg == 0) m <- rep.int(m, 0) else if (length(m) != nseg) stop("Number of marks != number of line segments.\n") marx <- m } else { m <- as.data.frame(m) if (ncol(m) == 0) { marx <- NULL } else { if (nrow(m) == nseg) { marx <- m } else { if (nrow(m) == 1 || nseg == 0) { marx <- as.data.frame(lapply(as.list(m), rep.int, times=nseg)) } else stop("Number of rows of data frame != number of points.\n") } } } Y <- as.psp(x$ends, window = x$window, marks = marx, check = FALSE) return(Y) } markformat.psp <- function(x) { mf <- x$markformat if(is.null(mf)) mf <- markformat(marks(x)) return(mf) } unmark.psp <- function(X) { X$marks <- NULL X$markformat <- "none" return(X) } ################################################# # plot and print methods ################################################# plot.psp <- function(x, ..., main, add=FALSE, show.all=!add, show.window=show.all, which.marks=1, ribbon=show.all, ribsep=0.15, ribwid=0.05, ribn=1024, do.plot=TRUE) { if(missing(main) || is.null(main)) main <- short.deparse(substitute(x)) verifyclass(x, "psp") # n <- nsegments(x) marx <- marks(x) # use.colour <- !is.null(marx) && (n != 0) do.ribbon <- identical(ribbon, TRUE) && use.colour ## ## .... initialise plot; draw observation window ...... owinpars <- setdiff(graphicsPars("owin"), "col") if(!do.ribbon) { ## window of x only bb.all <- as.rectangle(as.owin(x)) if(do.plot && (!add || show.window)) do.call.plotfun(plot.owin, resolve.defaults(list(x=x$window, main=if(show.all) main else "", add=add, type = if(show.window) "w" else "n", show.all=show.all), list(...)), extrargs=owinpars) } else { ## enlarged window with room for colour ribbon ## x at left, ribbon at right bb <- as.rectangle(as.owin(x)) xwidth <- diff(bb$xrange) xheight <- diff(bb$yrange) xsize <- max(xwidth, xheight) bb.rib <- owin(bb$xrange[2] + c(ribsep, ribsep+ribwid) * xsize, bb$yrange) bb.all <- boundingbox(bb.rib, bb) if(do.plot) { pt <- prepareTitle(main) ## establish coordinate system if(!add) do.call.plotfun(plot.owin, resolve.defaults(list(x=bb.all, type="n", main=pt$blank), list(...)), extrargs=owinpars) ## now plot window of x ## with title centred on this window if(show.window) { do.call.plotfun(plot.owin, resolve.defaults(list(x=x$window, add=TRUE, main=main, show.all=TRUE), list(...)), extrargs=owinpars) ## title done. main <- "" } } } # plot segments if(n == 0) { result <- symbolmap() attr(result, "bbox") <- bb.all return(invisible(result)) } # determine colours if any if(!use.colour) { # black col <- colmap <- NULL } else { # multicoloured marx <- as.data.frame(marx)[, which.marks] if(is.character(marx) || length(unique(marx)) == 1) marx <- factor(marx) if(is.factor(marx)) { lev <- levels(marx) colmap <- colourmap(col=rainbow(length(lev)), inputs=factor(lev)) } else { if(!all(is.finite(marx))) warning("Some mark values are infinite or NaN or NA") colmap <- colourmap(col=rainbow(ribn), range=range(marx, finite=TRUE)) } col <- colmap(marx) } ## convert to greyscale? if(spatstat.options("monochrome")) { col <- to.grey(col) colmap <- to.grey(colmap) } if(do.plot) { ## plot segments do.call.plotfun(segments, resolve.defaults(as.list(x$ends), list(...), list(col=col), .StripNull=TRUE), extrargs=names(par())) ## plot ribbon if(do.ribbon) plot(colmap, vertical=TRUE, add=TRUE, xlim=bb.rib$xrange, ylim=bb.rib$yrange) } # return colour map result <- colmap %orifnull% colourmap() attr(result, "bbox") <- bb.all return(invisible(result)) } print.psp <- function(x, ...) { verifyclass(x, "psp") n <- x$n ism <- is.marked(x, dfok = TRUE) splat(if(ism) "marked" else NULL, "planar line segment pattern:", n, ngettext(n, "line segment", "line segments")) if(ism) { mks <- marks(x, dfok = TRUE) if(is.data.frame(mks)) { splat("Mark variables: ", paste(names(mks), collapse = ", ")) } else { if(is.factor(mks)) { splat("multitype, with levels =", paste(levels(mks), collapse = "\t")) } else { splat("marks are", if(is.numeric(mks)) "numeric," else NULL, "of type", sQuote(typeof(mks))) } } } print(x$window) return(invisible(NULL)) } unitname.psp <- function(x) { return(unitname(x$window)) } "unitname<-.psp" <- function(x, value) { w <- x$window unitname(w) <- value x$window <- w return(x) } #################################################### # summary information #################################################### endpoints.psp <- function(x, which="both") { verifyclass(x, "psp") ends <- x$ends n <- x$n switch(which, both={ first <- second <- rep.int(TRUE, n) }, first={ first <- rep.int(TRUE, n) second <- rep.int(FALSE, n) }, second={ first <- rep.int(FALSE, n) second <- rep.int(TRUE, n) }, left={ first <- (ends$x0 < ends$x1) second <- !first }, right={ first <- (ends$x0 > ends$x1) second <- !first }, lower={ first <- (ends$y0 < ends$y1) second <- !first }, upper={ first <- (ends$y0 > ends$y1) second <- !first }, stop(paste("Unrecognised option: which=", sQuote(which))) ) ok <- rbind(first, second) xmat <- rbind(ends$x0, ends$x1) ymat <- rbind(ends$y0, ends$y1) idmat <- col(ok) xx <- as.vector(xmat[ok]) yy <- as.vector(ymat[ok]) id <- as.vector(idmat[ok]) result <- ppp(xx, yy, window=x$window, check=FALSE) attr(result, "id") <- id return(result) } midpoints.psp <- function(x) { verifyclass(x, "psp") xm <- eval(expression((x0+x1)/2), envir=x$ends) ym <- eval(expression((y0+y1)/2), envir=x$ends) win <- x$window ok <- inside.owin(xm, ym, win) if(any(!ok)) { warning(paste("Some segment midpoints lie outside the original window;", "window replaced by bounding box")) win <- boundingbox(win) } ppp(x=xm, y=ym, window=win, check=FALSE) } lengths.psp <- function(x, squared=FALSE) { verifyclass(x, "psp") lengths2 <- eval(expression((x1-x0)^2 + (y1-y0)^2), envir=x$ends) return(if(squared) lengths2 else sqrt(lengths2)) } angles.psp <- function(x, directed=FALSE) { verifyclass(x, "psp") a <- eval(expression(atan2(y1-y0, x1-x0)), envir=x$ends) if(!directed) a <- a %% pi return(a) } summary.psp <- function(object, ...) { verifyclass(object, "psp") len <- lengths.psp(object) out <- list(n = object$n, len = summary(len), totlen = sum(len), ang= summary(angles.psp(object)), w = summary.owin(object$window), marks=if(is.null(object$marks)) NULL else summary(object$marks), unitinfo=summary(unitname(object))) class(out) <- c("summary.psp", class(out)) return(out) } print.summary.psp <- function(x, ...) { cat(paste(x$n, "line segments\n")) cat("Lengths:\n") print(x$len) unitblurb <- paste(x$unitinfo$plural, x$unitinfo$explain) cat(paste("Total length:", x$totlen, unitblurb, "\n")) cat(paste("Length per unit area:", x$totlen/x$w$area, "\n")) cat("Angles (radians):\n") print(x$ang) print(x$w) if(!is.null(x$marks)) { cat("Marks:\n") print(x$marks) } return(invisible(NULL)) } ######################################################## # subsets ######################################################## "[.psp" <- function(x, i, j, drop, ..., fragments=TRUE) { verifyclass(x, "psp") if(missing(i) && missing(j)) return(x) if(!missing(i)) { style <- if(inherits(i, "owin")) "window" else "index" switch(style, window={ x <- clip.psp(x, window=i, check=FALSE, fragments=fragments) }, index={ enz <- x$ends[i, ] win <- x$window marx <- marksubset(x$marks, i, markformat(x)) x <- with(enz, psp(x0, y0, x1, y1, window=win, marks=marx, check=FALSE)) }) } if(!missing(j)) x <- x[j] # invokes code above return(x) } #################################################### # affine transformations #################################################### affine.psp <- function(X, mat=diag(c(1,1)), vec=c(0,0), ...) { verifyclass(X, "psp") W <- affine.owin(X$window, mat=mat, vec=vec, ...) E <- X$ends ends0 <- affinexy(list(x=E$x0,y=E$y0), mat=mat, vec=vec) ends1 <- affinexy(list(x=E$x1,y=E$y1), mat=mat, vec=vec) psp(ends0$x, ends0$y, ends1$x, ends1$y, window=W, marks=marks(X, dfok=TRUE), check=FALSE) } shift.psp <- function(X, vec=c(0,0), ..., origin=NULL) { verifyclass(X, "psp") if(!is.null(origin)) { stopifnot(is.character(origin)) if(!missing(vec)) warning("Argument vec ignored; argument origin has precedence.\n") origin <- pickoption("origin", origin, c(centroid="centroid", midpoint="midpoint", bottomleft="bottomleft")) W <- as.owin(X) locn <- switch(origin, centroid={ unlist(centroid.owin(W)) }, midpoint={ c(mean(W$xrange), mean(W$yrange)) }, bottomleft={ c(W$xrange[1], W$yrange[1]) }) return(shift(X, -locn)) } # perform shift W <- shift.owin(X$window, vec=vec, ...) E <- X$ends ends0 <- shiftxy(list(x=E$x0,y=E$y0), vec=vec, ...) ends1 <- shiftxy(list(x=E$x1,y=E$y1), vec=vec, ...) Y <- psp(ends0$x, ends0$y, ends1$x, ends1$y, window=W, marks=marks(X, dfok=TRUE), check=FALSE) # tack on shift vector attr(Y, "lastshift") <- vec return(Y) } rotate.psp <- function(X, angle=pi/2, ..., centre=NULL) { verifyclass(X, "psp") if(!is.null(centre)) { X <- shift(X, origin=centre) negorigin <- getlastshift(X) } else negorigin <- NULL W <- rotate.owin(X$window, angle=angle, ...) E <- X$ends ends0 <- rotxy(list(x=E$x0,y=E$y0), angle=angle) ends1 <- rotxy(list(x=E$x1,y=E$y1), angle=angle) Y <- psp(ends0$x, ends0$y, ends1$x, ends1$y, window=W, marks=marks(X, dfok=TRUE), check=FALSE) if(!is.null(negorigin)) Y <- shift(Y, -negorigin) return(Y) } is.empty.psp <- function(x) { return(x$n == 0) } identify.psp <- function(x, ..., labels=seq_len(nsegments(x)), n=nsegments(x), plot=TRUE) { Y <- x W <- as.owin(Y) mids <- midpoints.psp(Y) poz <- c(1, 2,4, 3)[(floor(angles.psp(Y)/(pi/4)) %% 4) + 1L] if(!(is.numeric(n) && (length(n) == 1) && (n %% 1 == 0) && (n >= 0))) stop("n should be a single integer") out <- integer(0) while(length(out) < n) { xy <- locator(1) # check for interrupt exit if(length(xy$x) == 0) return(out) # find nearest segment X <- ppp(xy$x, xy$y, window=W) ident <- project2segment(X, Y)$mapXY # add to list if(ident %in% out) { cat(paste("Segment", ident, "already selected\n")) } else { if(plot) { # Display mi <- mids[ident] li <- labels[ident] po <- poz[ident] do.call.matched(graphics::text.default, resolve.defaults(list(x=mi$x, y=mi$y, labels=li), list(...), list(pos=po))) } out <- c(out, ident) } } # exit if max n reached return(out) } nsegments <- function(x) { UseMethod("nsegments") } nobjects.psp <- nsegments.psp <- function(x) { x$n } as.ppp.psp <- function (X, ..., fatal=TRUE) { Y <- endpoints.psp(X, which="both") m <- marks(X) marks(Y) <- markappend(m, m) return(Y) } domain.psp <- Window.psp <- function(X, ...) { as.owin(X) } "Window<-.psp" <- function(X, ..., value) { verifyclass(value, "owin") X[value] } edit.psp <- function(name, ...) { x <- name y <- edit(as.data.frame(x), ...) xnew <- as.psp(y, window=Window(x)) return(xnew) } text.psp <- function(x, ...) { mids <- midpoints.psp(x) poz <- c(1, 2,4, 3)[(floor(angles.psp(x)/(pi/4)) %% 4) + 1L] do.call.matched(graphics::text.default, resolve.defaults(list(x=mids$x, y=mids$y), list(...), list(pos=poz), .StripNull=TRUE)) return(invisible(NULL)) } spatstat/R/profilepl.R0000755000176200001440000002671013165362530014452 0ustar liggesusers# # profilepl.R # # $Revision: 1.44 $ $Date: 2017/10/05 07:53:38 $ # # computes profile log pseudolikelihood # profilepl <- local({ ## Determine edge correction ## with partial matching, avoiding collisions with ## other arguments to ppm that have similar names. getppmcorrection <- function(..., correction = "border", covariates = NULL, covfunargs = NULL, control = NULL) { return(correction) } isSingleNA <- function(x) { length(x) == 1 && is.na(x) } profilepl <- function(s, f, ..., aic=FALSE, rbord=NULL, verbose=TRUE) { callenv <- parent.frame() s <- as.data.frame(s) n <- nrow(s) stopifnot(is.function(f)) ## validate 's' parms <- names(s) fargs <- names(formals(f)) if(!all(fargs %in% parms)) { bad <- !(fargs %in% parms) forgiven <- sapply(formals(f)[bad], isSingleNA) if(!all(forgiven)) { slecht <- fargs[bad[!forgiven]] nsl <- length(slecht) stop(paste(ngettext(nsl, "Argument", "Arguments"), commasep(sQuote(slecht)), ngettext(nsl, "is", "are"), "not provided in the data frame s")) } } ## extra columns in 's' are assumed to be parameters of covariate functions is.farg <- parms %in% fargs pass.cfa <- any(!is.farg) got.cfa <- "covfunargs" %in% names(list(...)) if(pass.cfa && got.cfa) stop("Some columns in s are superfluous") ## criterion <- numeric(n) ## make a fake call pseudocall <- match.call() pseudocall[[1]] <- as.symbol("ppm") namcal <- names(pseudocall) ## remove arguments 's' and 'verbose' retain <- !(namcal %in% c("s", "verbose")) pseudocall <- pseudocall[retain] namcal <- namcal[retain] ## place 'f' argument third np <- length(pseudocall) fpos <- (1:np)[namcal == "f"] indices <- (1:np)[-fpos] if(length(indices) < 3) { indices <- c(indices, fpos) } else { indices <- c(indices[1:3], fpos, indices[-(1:3)]) } pseudocall <- pseudocall[indices] namcal <- names(pseudocall) namcal[namcal=="f"] <- "interaction" names(pseudocall) <- namcal ## get correction correction <- getppmcorrection(...) if(correction == "border") { ## determine border correction distance if(is.null(rbord)) { ## compute rbord = max reach of interactions if(verbose) message("(computing rbord)") for(i in 1:n) { fi <- do.call(f, as.list(s[i, is.farg, drop=FALSE])) if(!inherits(fi, "interact")) stop(paste("f did not yield an object of class", sQuote("interact"))) re <- reach(fi) if(is.null(rbord)) rbord <- re else if(rbord < re) rbord <- re } } } ## determine whether computations can be saved if(pass.cfa || got.cfa) { savecomp <- FALSE } else { Q <- do.call(ppm, append(list(...), list(rbord=rbord, justQ=TRUE)), envir=callenv) savecomp <- !oversize.quad(Q) } ## go gc() if(verbose) { message(paste("comparing", n, "models...")) pstate <- list() } for(i in 1:n) { if(verbose) pstate <- progressreport(i, n, state=pstate) fi <- do.call(f, as.list(s[i, is.farg, drop=FALSE])) if(!inherits(fi, "interact")) stop(paste("f did not yield an object of class", sQuote("interact"))) if(pass.cfa) cfai <- list(covfunargs=as.list(s[i, !is.farg, drop=FALSE])) ## fit model if(i == 1) { ## fit from scratch arg1 <- list(..., interaction=fi, rbord=rbord, savecomputed=savecomp, warn.illegal=FALSE, callstring="", skip.border=TRUE) if(pass.cfa) arg1 <- append(arg1, cfai) fiti <- do.call(ppm, arg1, envir=callenv) ## save intermediate computations (pairwise distances, etc) precomp <- fiti$internal$computed savedargs <- list(..., rbord=rbord, precomputed=precomp, warn.illegal=FALSE, callstring="", skip.border=TRUE) } else { ## use precomputed data argi <- append(savedargs, list(interaction=fi)) if(pass.cfa) argi <- append(argi, cfai) fiti <- do.call(ppm, argi, envir=callenv) } ## save log pl for each fit criterion[i] <- if(aic) -AIC(fiti) else as.numeric(logLik(fiti, warn=FALSE)) ## save fitted coefficients for each fit co <- coef(fiti) if(i == 1) { allcoef <- data.frame(matrix(co, nrow=1)) names(allcoef) <- names(co) } else allcoef <- rbind(allcoef, co) } if(verbose) message("fitting optimal model...") opti <- which.max(criterion) gc() optint <- do.call(f, as.list(s[opti, is.farg, drop=FALSE])) optarg <- list(..., interaction=optint, rbord=rbord) if(pass.cfa) { optcfa <- as.list(s[opti, !is.farg, drop=FALSE]) attr(optcfa, "fitter") <- "profilepl" optarg <- append(optarg, list(covfunargs=optcfa)) } optfit <- do.call(ppm, optarg, envir=callenv) if(verbose) message("done.") critname <- if(aic) "-AIC" else if(is.poisson(optfit)) "log l" else if(optfit$method == "logi") "log CL" else "log PL" result <- list(param=s, prof=criterion, critname=critname, iopt=opti, fit=optfit, rbord=rbord, fname=as.interact(optfit)$name, allcoef=allcoef, otherstuff=list(...), pseudocall=pseudocall) class(result) <- c("profilepl", class(result)) return(result) } profilepl }) ## ## print method ## print.profilepl <- function(x, ...) { head1 <- "profile log pseudolikelihood" head2 <- "for model: " psc <- paste(unlist(strsplitretain(format(x$pseudocall))), collapse=" ") if(nchar(psc) + nchar(head2) + 1 <= getOption('width')) { splat(head1) splat(head2, psc) } else { splat(head1, head2) splat(psc) } nparm <- ncol(x$param) if(waxlyrical('extras')) { corx <- x$fit$correction if(identical(corx, "border") && !is.null(x$rbord)) splat("fitted with rbord =", x$rbord) splat("interaction:", x$fname) splat("irregular", ngettext(nparm, "parameter:", "parameters:\n"), paste(names(x$param), "in", unlist(lapply(lapply(as.list(x$param), range), prange)), collapse="\n")) } popt <- x$param[x$iopt,, drop=FALSE] splat("optimum", ngettext(nparm, "value", "values"), "of irregular", ngettext(nparm, "parameter: ", "parameters:\n"), commasep(paste(names(popt), "=", popt))) invisible(NULL) } ## ## summary method ## summary.profilepl <- function(object, ...) { print(object) cat("\n\noptimal model:\n") print(object$fit) } as.ppm.profilepl <- function(object) { object$fit } fitin.profilepl <- function(object) { fitin(as.ppm(object)) } predict.profilepl <- function(object, ...) { predict(as.ppm(object), ...) } ## ## plot method ## plot.profilepl <- local({ plot.profilepl <- function(x, ..., add=FALSE, main=NULL, tag=TRUE, coeff=NULL, xvariable=NULL, col=1, lty=1, lwd=1, col.opt="green", lty.opt=3, lwd.opt=1) { para <- x$param ## graphics arguments may be expressions involving parameters if(ncol(para) > 1) { col <- eval(substitute(col), para) lwd <- eval(substitute(lwd), para) lty <- eval(substitute(lty), para) px <- cbind(para, col, lwd, lty, stringsAsFactors=FALSE) col <- px$col lwd <- px$lwd lty <- px$lty } ## strip any column that is entirely na nacol <- sapply(para, none.finite) para <- para[, !nacol, drop=FALSE] ## npara <- ncol(para) ## main header if(is.null(main)) main <- short.deparse(x$pseudocall) ## x variable for plot if(is.null(xvariable)) { xvalues <- para[,1] xname <- names(para)[1] } else { stopifnot(is.character(xvariable)) if(!(xvariable %in% names(para))) stop("there is no irregular parameter named", sQuote(xvariable)) xvalues <- para[[xvariable]] xname <- xvariable } ## y variable for plot if(is.null(coeff)) { yvalues <- x$prof ylab <- x$critname %orifnull% "log pl" } else { stopifnot(is.character(coeff)) allcoef <- x$allcoef if(!(coeff %in% names(allcoef))) stop(paste("there is no coefficient named", sQuote(coeff), "in the fitted model")) yvalues <- allcoef[[coeff]] ylab <- paste("coefficient:", coeff) } ## start plot if(!add) do.call.matched(plot.default, resolve.defaults(list(x=range(xvalues), y=range(yvalues)), list(type="n", main=main), list(...), list(ylab=ylab, xlab=xname)), extrargs=graphicsPars("plot")) linepars <- graphicsPars("lines") if(npara == 1) { ## single curve do.call.matched(lines.default, resolve.defaults(list(x=xvalues, y=yvalues, ...), spatstat.options("par.fv")), extrargs=linepars) } else { ## multiple curves other <- para[, -1, drop=FALSE] tapply(1:nrow(para), as.list(other), plotslice, xvalues=xvalues, yvalues=yvalues, other=other, tag=tag, ..., col=col, lwd=lwd, lty=lty, lineargs=linepars) } ## show optimal value do.call.matched(abline, resolve.defaults(list(v = xvalues[x$iopt]), list(...), list(lty=lty.opt, lwd=lwd.opt, col=col.opt)), extrargs=linepars) return(invisible(NULL)) } plotslice <- function(z, xvalues, yvalues, other, tag=TRUE, ..., lty=1, col=1, lwd=1, lineargs) { fz <- xvalues[z] pz <- yvalues[z] n <- length(xvalues) if(length(lty) == n) lty <- unique(lty[z])[1] if(length(col) == n) col <- unique(col[z])[1] if(length(lwd) == n) lwd <- unique(lwd[z])[1] do.call.matched(lines.default, resolve.defaults(list(x=fz, y=pz, col=col, lwd=lwd, lty=lty), list(...)), extrargs=lineargs) if(tag) { oz <- other[z, , drop=FALSE] uniques <- apply(oz, 2, unique) labels <- paste(names(uniques), "=", uniques, sep="") label <- paste(labels, sep=",") ii <- which.max(pz) do.call.matched(text.default, list(x=fz[ii], y=pz[ii], labels=label, col=col, ...), funargs=graphicsPars("text")) } return(NULL) } none.finite <- function(x) all(!is.finite(x)) plot.profilepl }) simulate.profilepl <- function(object, ...) { simulate(as.ppm(object), ...) } parameters.profilepl <- function(model, ...) { parameters(as.ppm(model)) } spatstat/R/texture.R0000644000176200001440000003041413115225157014145 0ustar liggesusers## ## texture.R ## ## Texture plots and texture maps ## ## $Revision: 1.15 $ $Date: 2016/02/16 01:39:12 $ ### .................. basic graphics ............................. ## put hatching in a window add.texture <- function(W, texture=4, spacing=NULL, ...) { if(is.data.frame(texture)) { ## texture = f(x) where f is a texturemap out <- do.call(add.texture, resolve.defaults(list(W=W, spacing=spacing), list(...), as.list(texture))) return(out) } ## texture should be an integer stopifnot(is.owin(W)) stopifnot(texture %in% 1:8) if(is.null(spacing)) { spacing <- diameter(as.rectangle(W))/50 } else { check.1.real(spacing) stopifnot(spacing > 0) } P <- L <- NULL switch(texture, { ## texture 1: graveyard P <- rsyst(W, dx=3*spacing) }, { ## texture 2: vertical lines L <- rlinegrid(90, spacing, W)[W] }, { ## texture 3: horizontal lines L <- rlinegrid(0, spacing, W)[W] }, { ## texture 4: forward slashes L <- rlinegrid(45, spacing, W)[W] }, { ## texture 5: back slashes L <- rlinegrid(135, spacing, W)[W] }, { ## texture 6: horiz/vert grid L0 <- rlinegrid(0, spacing, W)[W] L90 <- rlinegrid(90, spacing, W)[W] L <- superimpose(L0, L90, W=W, check=FALSE) }, { ## texture 7: diagonal grid L45 <- rlinegrid(45, spacing, W)[W] L135 <- rlinegrid(135, spacing, W)[W] L <- superimpose(L45, L135, W=W, check=FALSE) }, { ## texture 8: hexagons H <- hextess(W, spacing, offset=runifpoint(1, W)) H <- intersect.tess(H, W) do.call.matched(plot.tess, resolve.defaults(list(x=H, add=TRUE), list(...))) }) if(!is.null(P)) do.call.matched(plot.ppp, resolve.defaults(list(x=P, add=TRUE), list(...), list(chars=3, cex=0.2)), extrargs=c("lwd", "col", "cols", "pch")) if(!is.null(L)) do.call.matched(plot.psp, resolve.defaults(list(x=L, add=TRUE), list(...)), extrargs=c("lwd","lty","col")) return(invisible(NULL)) } ## .................. texture maps ................................ ## create a texture map texturemap <- function(inputs, textures, ...) { argh <- list(...) if(length(argh) > 0) { isnul <- unlist(lapply(argh, is.null)) argh <- argh[!isnul] } if(missing(textures) || is.null(textures)) textures <- seq_along(inputs) df <- do.call(data.frame, append(list(input=inputs, texture=textures), argh)) f <- function(x) { df[match(x, df$input), -1, drop=FALSE] } class(f) <- c("texturemap", class(f)) attr(f, "df") <- df return(f) } print.texturemap <- function(x, ...) { cat("Texture map\n") print(attr(x, "df")) return(invisible(NULL)) } ## plot a texture map plot.texturemap <- local({ ## recognised additional arguments to and axis() axisparams <- c("cex", "cex.axis", "cex.lab", "col.axis", "col.lab", "font.axis", "font.lab", "las", "mgp", "xaxp", "yaxp", "tck", "tcl", "xpd") # rules to determine the map dimensions when one dimension is given widthrule <- function(heightrange, separate, n, gap) { if(separate) 1 else diff(heightrange)/10 } heightrule <- function(widthrange, separate, n, gap) { (if(separate) (n + (n-1)*gap) else 10) * diff(widthrange) } plot.texturemap <- function(x, ..., main, xlim=NULL, ylim=NULL, vertical=FALSE, axis=TRUE, labelmap=NULL, gap=0.25, spacing=NULL, add=FALSE) { if(missing(main)) main <- short.deparse(substitute(x)) df <- attr(x, "df") # textures <- df$textures n <- nrow(df) check.1.real(gap, "In plot.texturemap") explain.ifnot(gap >= 0, "In plot.texturemap") separate <- (gap > 0) if(is.null(labelmap)) { labelmap <- function(x) x } else stopifnot(is.function(labelmap)) ## determine rectangular window for display rr <- c(0, n + (n-1)*gap) if(is.null(xlim) && is.null(ylim)) { u <- widthrule(rr, separate, n, gap) if(!vertical) { xlim <- rr ylim <- c(0,u) } else { xlim <- c(0,u) ylim <- rr } } else if(is.null(ylim)) { if(!vertical) ylim <- c(0, widthrule(xlim, separate, n, gap)) else ylim <- c(0, heightrule(xlim, separate, n, gap)) } else if(is.null(xlim)) { if(!vertical) xlim <- c(0, heightrule(ylim, separate, n, gap)) else xlim <- c(0, widthrule(ylim, separate, n, gap)) } width <- diff(xlim) height <- diff(ylim) ## determine boxes to be filled with textures, if(vertical) { boxheight <- min(width, height/(n + (n-1) * gap)) vgap <- (height - n * boxheight)/(n-1) boxes <- list() for(i in 1:n) boxes[[i]] <- owin(xlim, ylim[1] + c(i-1, i) * boxheight + (i-1) * vgap) } else { boxwidth <- min(height, width/(n + (n-1) * gap)) hgap <- (width - n * boxwidth)/(n-1) boxes <- list() for(i in 1:n) boxes[[i]] <- owin(xlim[1] + c(i-1, i) * boxwidth + (i-1) * hgap, ylim) } boxsize <- shortside(boxes[[1]]) if(is.null(spacing)) spacing <- 0.1 * boxsize # .......... initialise plot ............................... if(!add) do.call.matched(plot.default, resolve.defaults(list(x=xlim, y=ylim, type="n", main=main, axes=FALSE, xlab="", ylab="", asp=1.0), list(...))) ## ................ plot texture blocks ................. for(i in 1:n) { dfi <- df[i,,drop=FALSE] add.texture(W=boxes[[i]], texture=dfi, ..., spacing=spacing) plot(boxes[[i]], add=TRUE) } if(axis) { # ................. draw annotation .................. la <- paste(labelmap(df$input)) if(!vertical) { ## add horizontal axis/annotation at <- lapply(lapply(boxes, centroid.owin), "getElement", name="x") # default axis position is below the ribbon (side=1) sidecode <- resolve.1.default("side", list(...), list(side=1)) if(!(sidecode %in% c(1,3))) warning(paste("side =", sidecode, "is not consistent with horizontal orientation")) pos <- c(ylim[1], xlim[1], ylim[2], xlim[2])[sidecode] # don't draw axis lines if plotting separate blocks lwd0 <- if(separate) 0 else 1 # draw axis do.call.matched(graphics::axis, resolve.defaults(list(...), list(side = 1, pos = pos, at = at), list(labels=la, lwd=lwd0)), extrargs=axisparams) } else { ## add vertical axis at <- lapply(lapply(boxes, centroid.owin), "getElement", name="y") # default axis position is to the right of ribbon (side=4) sidecode <- resolve.1.default("side", list(...), list(side=4)) if(!(sidecode %in% c(2,4))) warning(paste("side =", sidecode, "is not consistent with vertical orientation")) pos <- c(ylim[1], xlim[1], ylim[2], xlim[2])[sidecode] # don't draw axis lines if plotting separate blocks lwd0 <- if(separate) 0 else 1 # draw labels horizontally if plotting separate blocks las0 <- if(separate) 1 else 0 # draw axis do.call.matched(graphics::axis, resolve.defaults(list(...), list(side=4, pos=pos, at=at), list(labels=la, lwd=lwd0, las=las0)), extrargs=axisparams) } } invisible(NULL) } plot.texturemap }) ## plot a pixel image using textures textureplot <- local({ textureplot <- function(x, ..., main, add=FALSE, clipwin=NULL, do.plot=TRUE, border=NULL, col=NULL, lwd=NULL, lty=NULL, spacing=NULL, textures=1:8, legend=TRUE, leg.side=c("right", "left", "bottom", "top"), legsep=0.1, legwid=0.2) { if(missing(main)) main <- short.deparse(substitute(x)) if(!(is.im(x) || is.tess(x))) { x <- try(as.tess(x), silent=TRUE) if(inherits(x, "try-error")) x <- try(as.im(x), silent=TRUE) if(inherits(x, "try-error")) stop("x must be a pixel image or a tessellation", call.=FALSE) } leg.side <- match.arg(leg.side) if(!is.null(clipwin)) x <- x[clipwin, drop=FALSE] if(is.im(x)) { if(x$type != "factor") x <- eval.im(factor(x)) levX <- levels(x) } else { tilX <- tiles(x) levX <- names(tilX) } n <- length(levX) if(n > 8) stop("Too many factor levels or tiles: maximum is 8") ## determine texture map if(inherits(textures, "texturemap")) { tmap <- textures } else { stopifnot(all(textures %in% 1:8)) stopifnot(length(textures) >= n) mono <- spatstat.options("monochrome") col <- enforcelength(col, n, if(mono) 1 else 1:8) lwd <- if(is.null(lwd)) NULL else enforcelength(lwd, n, 1) lty <- if(is.null(lty)) NULL else enforcelength(lwd, n, 1) tmap <- texturemap(inputs=levX, textures=textures[1:n], col=col, lwd=lwd, lty=lty) } ## determine plot region bb <- as.rectangle(x) if(!legend) { bb.all <- bb } else { Size <- max(sidelengths(bb)) bb.leg <- switch(leg.side, right={ ## legend to right of image owin(bb$xrange[2] + c(legsep, legsep+legwid) * Size, bb$yrange) }, left={ ## legend to left of image owin(bb$xrange[1] - c(legsep+legwid, legsep) * Size, bb$yrange) }, top={ ## legend above image owin(bb$xrange, bb$yrange[2] + c(legsep, legsep+legwid) * Size) }, bottom={ ## legend below image owin(bb$xrange, bb$yrange[1] - c(legsep+legwid, legsep) * Size) }) iside <- match(leg.side, c("bottom", "left", "top", "right")) bb.all <- boundingbox(bb.leg, bb) } ## result <- tmap attr(result, "bbox") <- bb ## if(do.plot) { ## Plot textures if(!add) { plot(bb.all, type="n", main="") fakemaintitle(bb, main, ...) } if(is.null(spacing)) spacing <- diameter(as.rectangle(x))/50 areas <- if(is.im(x)) table(x$v) else tile.areas(x) for(i in which(areas > 0)) { Zi <- if(is.tess(x)) tilX[[i]] else levelset(x, levX[i], "==") Zi <- as.polygonal(Zi) if(is.null(border) || !is.na(border)) plot(Zi, add=TRUE, border=border) add.texture(Zi, texture=tmap(levX[i]), spacing=spacing, ...) } vertical <- leg.side %in% c("left", "right") if(legend) do.call(plot.texturemap, resolve.defaults(list(x=tmap, add=TRUE, vertical=vertical, side=iside, xlim=bb.leg$xrange, ylim=bb.leg$yrange, spacing=spacing), list(...))) } return(invisible(result)) } enforcelength <- function(x, n, x0) { if(is.null(x)) x <- x0 if(length(x) < n) x <- rep(x, n) return(x[1:n]) } textureplot }) spatstat/R/hexagons.R0000644000176200001440000000531313115225157014261 0ustar liggesusers## hexagons.R ## $Revision: 1.6 $ $Date: 2017/02/07 07:35:32 $ hexgrid <- function(W, s, offset=c(0,0), origin=NULL, trim=TRUE) { W <- as.owin(W) check.1.real(s) stopifnot(s > 0) hstep <- 3 * s vstep <- sqrt(3) * s R <- grow.rectangle(as.rectangle(W), hstep) xr <- R$xrange yr <- R$yrange ## initial positions for 'odd' and 'even grids p0 <- as2vector(origin %orifnull% centroid.owin(R)) p0 <- p0 + as2vector(offset) q0 <- p0 + c(hstep, vstep)/2 ## 'even' points p0 <- c(startinrange(p0[1L], hstep, xr), startinrange(p0[2L], vstep, yr)) if(!anyNA(p0)) { xeven <- prolongseq(p0[1L], xr, step=hstep) yeven <- prolongseq(p0[2L], yr, step=vstep) xyeven <- expand.grid(x=xeven, y=yeven) } else xyeven <- list(x=numeric(0), y=numeric(0)) ## 'odd' points q0 <- c(startinrange(q0[1L], hstep, xr), startinrange(q0[2L], vstep, yr)) if(!anyNA(q0)) { xodd <- prolongseq(q0[1L], xr, step=hstep) yodd <- prolongseq(q0[2L], yr, step=vstep) xyodd <- expand.grid(x=xodd, y=yodd) } else xyodd <- list(x=numeric(0), y=numeric(0)) ## xy <- concatxy(xyeven, xyodd) XY <- as.ppp(xy, W=R) ## if(trim) return(XY[W]) ok <- inside.owin(XY, w=dilation.owin(W, s)) return(XY[ok]) } hextess <- function(W, s, offset=c(0,0), origin=NULL, trim=TRUE) { W <- as.owin(W) G <- hexgrid(W=W, s=s, offset=offset, origin=origin, trim=FALSE) if(trim && is.mask(W)) { ## Result is a pixel image tessellation ## Determine pixel resolution by extending 'W' to larger domain of 'G' rasta <- harmonise.im(as.im(1, W), as.owin(G))[[1L]] rasta <- as.mask(rasta) ## Tweak G to have mask window G$window <- rasta ## img <- nnmap(G, what="which") result <- tess(image=img) return(result) } ## Result is a polygonal tessellation Gxy <- as.matrix(as.data.frame(G)) n <- nrow(Gxy) ## Hexagon centred at origin hex0 <- disc(npoly=6, radius=s) ## Form hexagons hexes <- vector(mode="list", length=n) for(i in 1:n) hexes[[i]] <- shift(hex0, Gxy[i,]) ## Determine whether tiles intersect window wholly or partly suspect <- rep(TRUE, n) GW <- G[W] GinW <- inside.owin(G, w=W) suspect[GinW] <- (bdist.points(GW) <= s) ## Compute intersection of tiles with window trimmed <- hexes trimmed[suspect] <- trimmed.suspect <- lapply(trimmed[suspect], intersect.owin, B=W, fatal=FALSE) nonempty <- rep(TRUE, n) nonempty[suspect] <- !unlist(lapply(trimmed.suspect, is.empty)) if(trim) { ## return the tiles intersected with W result <- tess(tiles=trimmed[nonempty], window=W) } else { ## return the tiles that have nonempty intersection with W result <- tess(tiles=hexes[nonempty]) } return(result) } spatstat/R/rags.R0000644000176200001440000000511313115225157013377 0ustar liggesusers#' #' rags.R #' #' Alternating Gibbs Sampler #' #' $Revision: 1.6 $ $Date: 2016/11/29 05:01:51 $ #' #' Initial implementation for multitype hard core process #' without interaction within types rags <- function(model, ..., ncycles=100) { if(!is.list(model)) stop("Argument 'model' should be a list") if(!all(c("beta", "hradii") %in% names(model))) stop("Argument 'model' should have entries 'beta' and 'hradii'") do.call(ragsMultiHard, append(model, list(..., ncycles=ncycles))) } ragsMultiHard <- function(beta, hradii, ..., types=NULL, bmax=NULL, periodic=FALSE, ncycles=100) { ## validate beta by generating first proposal points Xprop <- rmpoispp(lambda=beta, lmax=bmax, ..., types=types) ntypes <- length(levels(marks(Xprop))) check.nmatrix(hradii, ntypes, things="types of points") if(any(is.finite(dh <- diag(hradii)) & dh > 0)) stop("Interaction between points of the same type is not permitted") ## initial state empty X <- Xprop[integer(0)] Y <- split(X) ## for(cycle in 1:ncycles) { if(cycle > 1) Xprop <- rmpoispp(lambda=beta, lmax=bmax, ..., types=types) Xprop <- Xprop[order(coords(Xprop)$x)] Yprop <- split(Xprop) for(i in 1:ntypes) { Xi <- Yprop[[i]] ok <- TRUE for(j in (1:ntypes)[-i]) { if(!any(ok)) break; ok <- ok & !has.close(Xi, hradii[i,j], Y[[j]], sorted=TRUE, periodic=periodic) } Y[[i]] <- Xi[ok] } } Z <- do.call(superimpose, Y) return(Z) } ragsAreaInter <- function(beta, eta, r, ..., win=NULL, bmax=NULL, periodic=FALSE, ncycles=100) { check.1.real(eta) check.1.real(r) if(r == 0 || eta == 1) return(rpoispp(beta, win=win, lmax=bmax, ...)) if(eta < 1) stop("Alternating Gibbs algorithm requires eta >= 1", call.=FALSE) if(is.function(beta)) { beta <- as.im(beta, W=win, ...) } else if(is.numeric(beta)) { check.1.real(beta) stopifnot(beta >= 0) } else if(!is.im(beta)) { stop("beta should be a number, a pixel image, or a function(x,y)", call.=FALSE) } if(is.im(beta) && is.null(win)) win <- as.owin(beta) kappa <- beta * eta loggamma <- log(eta)/(pi * r^2) bmax <- if(is.null(bmax)) NULL else c(max(kappa), loggamma) B <- if(is.numeric(beta)) c(kappa, loggamma) else solist(kappa, as.im(loggamma, W=win)) H <- matrix(c(0,r,r,0), 2, 2) Y <- ragsMultiHard(B, H, types=1:2, bmax=bmax, periodic=periodic, ncycles=ncycles) X <- split(Y)[[1]] return(X) } spatstat/R/nnfromvertex.R0000644000176200001440000000776013161364630015213 0ustar liggesusers#' nnfromvertex.R #' #' Nearest data point to each vertex of a network #' #' $Revision: 1.2 $ $Date: 2017/09/23 04:56:45 $ #' nnfromvertex <- function(X, what=c("dist", "which"), k=1) { stopifnot(is.lpp(X)) what <- match.arg(what, several.ok=TRUE) nX <- npoints(X) nv <- nvertices(domain(X)) #' k can be a single integer or an integer vector if(length(k) == 0) stop("k is an empty vector") else if(length(k) == 1) { if(k != round(k) || k <= 0) stop("k is not a positive integer") } else { if(any(k != round(k)) || any(k <= 0)) stop(paste("some entries of the vector", sQuote("k"), "are not positive integers")) } k <- as.integer(k) kmax <- max(k) #' Initialise results nnd <- matrix(Inf, nrow=nv, ncol=kmax) nnw <- matrix(NA_integer_, nrow=nv, ncol=kmax) colnames(nnd) <- colnames(nnw) <- 1:kmax #' Trivial cases if(nX > 0) { #' Unique points, remembering original sequence ii <- which(!duplicated(X)) uX <- X[ii] #' local coordinates coUX <- coords(uX)[, c("seg", "tp")] #' add label from original sequence index coUX$lab <- ii #' reorder oo <- with(coUX, order(seg, tp)) coUXord <- coUX[oo, , drop=FALSE] seg <- coUXord$seg tp <- coUXord$tp #' network data L <- domain(X) nv <- nvertices(L) ns <- nsegments(L) seglen <- lengths.psp(as.psp(L)) from <- L$from to <- L$to #' upper bound on interpoint distance huge <- sum(seglen) #' numerical tolerance for nnwhich tol <- max(sqrt(.Machine$double.eps), diameter(Frame(L))/2^20) #' .............................................. #' number of neighbours that are well-defined kmaxcalc <- min(nX, kmax) #' calculate k-nn distances and identifiers for 1 <= k <= kmaxcalc z <- vnnFind(seg, tp, ns, nv, from, to, seglen, huge, tol, kmax=kmaxcalc) vnndist <- z$vnndist vnnwhich <- z$vnnwhich #' map identifiers back to original data pattern vnnwhich <- coUXord$lab[vnnwhich] #' insert results in correct places nnd[, 1:kmaxcalc] <- vnndist nnw[, 1:kmaxcalc] <- vnnwhich } #' extract required values nnd <- nnd[,k, drop=TRUE] nnw <- nnw[,k, drop=TRUE] if(identical(what, "dist")) return(nnd) if(identical(what, "which")) return(nnw) return(cbind(data.frame(dist=nnd), data.frame(which=nnw))) } vnnFind <- function(seg, tp, ns, nv, from, to, seglen, huge, tol, kmax=1) { #' Find data point nearest to each vertex of network #' Assumed 'seg' is sorted in increasing order #' 'tp' is increasing within 'seg' nX <- length(seg) from0 <- from - 1L to0 <- to - 1L seg0 <- seg - 1L #' if(kmax == 1) { z <- .C("Clinvwhichdist", np = as.integer(nX), sp = as.integer(seg0), tp = as.double(tp), nv = as.integer(nv), ns = as.integer(ns), from = as.integer(from0), to = as.integer(to0), seglen = as.double(seglen), huge = as.double(huge), tol = as.double(tol), dist = as.double(numeric(nv)), which = as.integer(integer(nv)), PACKAGE = "spatstat") } else { z <- .C("linvknndist", kmax = as.integer(kmax), nq = as.integer(nX), sq = as.integer(seg0), tq = as.double(tp), nv = as.integer(nv), ns = as.integer(ns), from = as.integer(from0), to = as.integer(to0), seglen = as.double(seglen), huge = as.double(huge), tol = as.double(tol), dist = as.double(numeric(kmax * nv)), which = as.integer(integer(kmax * nv)), PACKAGE = "spatstat") } vnndist <- z$dist vnnwhich <- z$which + 1L vnnwhich[vnnwhich == 0] <- NA # possible if network is disconnected if(kmax > 1) { vnndist <- matrix(vnndist, ncol=kmax, byrow=TRUE) vnnwhich <- matrix(vnnwhich, ncol=kmax, byrow=TRUE) } return(list(vnndist=vnndist, vnnwhich=vnnwhich)) } spatstat/R/rmhmodel.R0000755000176200001440000013431613115271120014254 0ustar liggesusers# # # rmhmodel.R # # $Revision: 1.74 $ $Date: 2017/06/05 10:31:58 $ # # rmhmodel <- function(...) { UseMethod("rmhmodel") } rmhmodel.rmhmodel <- function(model, ...) { # Check for outdated internal format # C.par was replaced by C.beta and C.ipar in spatstat 1.22-3 if(outdated <- !is.null(model$C.par)) warning("Outdated internal format of rmhmodel object; rebuilding it") if(outdated || (length(list(...)) > 0)) model <- rmhmodel.list(unclass(model), ...) return(model) } rmhmodel.list <- function(model, ...) { argnames <- c("cif","par","w","trend","types") ok <- argnames %in% names(model) do.call(rmhmodel.default, resolve.defaults(list(...), model[argnames[ok]])) } rmhmodel.default <- local({ rmhmodel.default <- function(..., cif=NULL, par=NULL, w=NULL, trend=NULL, types=NULL) { rmhmodelDefault(..., cif=cif, par=par, w=w, trend=trend, types=types) } rmhmodelDefault <- function(..., cif=NULL, par=NULL, w=NULL, trend=NULL, types=NULL, stopinvalid=TRUE) { if(length(list(...)) > 0) stop(paste("rmhmodel.default: syntax should be", "rmhmodel(cif, par, w, trend, types)", "with arguments given by name if they are present"), call. = FALSE) ## Validate parameters if(is.null(cif)) stop("cif is missing or NULL") if(is.null(par)) stop("par is missing or NULL") if(!is.null(w)) w <- as.owin(w) if(!is.character(cif)) stop("cif should be a character string") betamultiplier <- 1 Ncif <- length(cif) if(Ncif > 1) { ## hybrid ## check for Poisson components ispois <- (cif == 'poisson') if(any(ispois)) { ## validate Poisson components Npois <- sum(ispois) poismodels <- vector(mode="list", length=Npois) parpois <- par[ispois] for(i in 1:Npois) poismodels[[i]] <- rmhmodel(cif='poisson', par=parpois[[i]], w=w, trend=NULL, types=types, stopinvalid=FALSE) ## consolidate Poisson intensity parameters poisbetalist <- lapply(poismodels, getElement, name="C.beta") poisbeta <- Reduce("*", poisbetalist) if(all(ispois)) { ## model collapses to a Poisson process cif <- 'poisson' Ncif <- 1 par <- list(beta=poisbeta) betamultiplier <- 1 } else { ## remove Poisson components cif <- cif[!ispois] Ncif <- sum(!ispois) par <- par[!ispois] if(Ncif == 1) # revert to single-cif format par <- par[[1]] ## absorb beta parameters betamultiplier <- poisbeta } } } if(Ncif > 1) { ## genuine hybrid models <- vector(mode="list", length=Ncif) check <- vector(mode="list", length=Ncif) for(i in 1:Ncif) models[[i]] <- rmhmodel(cif=cif[i], par=par[[i]], w=w, trend=NULL, types=types, stopinvalid=FALSE) C.id <- unlist(lapply(models, getElement, name="C.id")) C.betalist <- lapply(models, getElement, name="C.beta") C.iparlist <- lapply(models, getElement, name="C.ipar") ## absorb beta multiplier into beta parameter of first component C.betalist[[1]] <- C.betalist[[1]] * betamultiplier ## concatenate for use in C C.beta <- unlist(C.betalist) C.ipar <- unlist(C.iparlist) check <- lapply(models, getElement, name="check") maxr <- max(unlist(lapply(models, getElement, name="reach"))) ismulti <- unlist(lapply(models, getElement, name="multitype.interact")) multi <- any(ismulti) ## determine whether model exists integ <- unlist(lapply(models, getElement, name="integrable")) stabi <- unlist(lapply(models, getElement, name="stabilising")) integrable <- all(integ) || any(stabi) stabilising <- any(stabi) ## string explanations of conditions for validity expl <- lapply(models, getElement, name="explainvalid") integ.ex <- unlist(lapply(expl, getElement, name="integrable")) stabi.ex <- unlist(lapply(expl, getElement, name="stabilising")) stabi.oper <- !(stabi.ex %in% c("TRUE", "FALSE")) integ.oper <- !(integ.ex %in% c("TRUE", "FALSE")) compnames <- if(!anyDuplicated(C.id)) paste("cif", sQuote(C.id)) else paste("component", 1:Ncif, paren(sQuote(C.id))) if(!integrable && stopinvalid) { ## model is not integrable: explain why ifail <- !integ & integ.oper ireason <- paste(compnames[ifail], "should satisfy", paren(integ.ex[ifail], "{")) ireason <- verbalogic(ireason, "and") if(sum(ifail) <= 1) { ## There's only one offending cif, so stability is redundant sreason <- "FALSE" } else { sfail <- !stabi & stabi.oper sreason <- paste(compnames[sfail], "should satisfy", paren(stabi.ex[sfail], "{")) sreason <- verbalogic(sreason, "or") } reason <- verbalogic(c(ireason, sreason), "or") stop(paste("rmhmodel: hybrid model is not integrable; ", reason), call.=FALSE) } else { ## construct strings summarising conditions for validity if(!any(integ.oper)) ireason <- as.character(integrable) else { ireason <- paste(compnames[integ.oper], "should satisfy", paren(integ.ex[integ.oper], "{")) ireason <- verbalogic(ireason, "and") } if(!any(stabi.oper)) sreason <- as.character(stabilising) else { sreason <- paste(compnames[stabi.oper], "should satisfy", paren(stabi.ex[stabi.oper], "{")) sreason <- verbalogic(sreason, "or") } ireason <- verbalogic(c(ireason, sreason), "or") explainvalid <- list(integrable=ireason, stabilising=sreason) } out <- list(cif=cif, par=par, w=w, trend=trend, types=types, C.id=C.id, C.beta=C.beta, C.ipar=C.ipar, C.betalist=C.betalist, C.iparlist=C.iparlist, check=check, multitype.interact=multi, integrable=integrable, stabilising=stabilising, explainvalid=explainvalid, reach=maxr) class(out) <- c("rmhmodel", class(out)) return(out) } ## non-hybrid ## Check that this is a recognised model ## and look up the rules for this model rules <- spatstatRmhInfo(cif) ## Map the name of the cif from R to C ## (the names are normally identical in R and C, ## except "poisson" -> NA) C.id <- rules$C.id ## Check that the C name is recognised in C if(!is.na(C.id)) { z <- .C("knownCif", cifname=as.character(C.id), answer=as.integer(0), PACKAGE = "spatstat") ok <- as.logical(z$answer) if(!ok) stop(paste("Internal error: the cif", sQuote(C.id), "is not recognised in the C code")) } ## Validate the model parameters and reformat them check <- rules$parhandler checkedpar <- if(!rules$multitype) check(par) else if(!is.null(types)) check(par, types) else ## types vector not given - defer checking NULL if(!is.null(checkedpar)) { stopifnot(is.list(checkedpar)) stopifnot(!is.null(names(checkedpar)) && all(nzchar(names(checkedpar)))) stopifnot(names(checkedpar)[[1]] == "beta") C.beta <- unlist(checkedpar[[1]]) C.beta <- C.beta * betamultiplier C.ipar <- as.numeric(unlist(checkedpar[-1])) } else { C.beta <- C.ipar <- NULL } ## Determine whether model is integrable integrable <- rules$validity(par, "integrable") explainvalid <- rules$explainvalid if(!integrable && stopinvalid) stop(paste("rmhmodel: the model is not integrable; it should satisfy", explainvalid$integrable), call.=FALSE) ## Determine whether cif is stabilising ## (i.e. any hybrid including this cif will be integrable) stabilising <- rules$validity(par, "stabilising") ## Calculate reach of model mreach <- rules$reach(par) ################################################################### ## return augmented list out <- list(cif=cif, par=par, w=w, trend=trend, types=types, C.id=C.id, C.beta=C.beta, C.ipar=C.ipar, check= if(is.null(C.ipar)) check else NULL, multitype.interact=rules$multitype, integrable=integrable, stabilising=stabilising, explainvalid=explainvalid, reach=mreach ) class(out) <- c("rmhmodel", class(out)) return(out) } rmhmodel.default }) print.rmhmodel <- function(x, ...) { verifyclass(x, "rmhmodel") cat("Metropolis-Hastings algorithm, model parameters\n") Ncif <- length(x$cif) cat(paste("Conditional intensity:", if(Ncif == 1) "cif=" else "hybrid of cifs", commasep(sQuote(x$cif)), "\n")) if(!is.null(x$types)) { if(length(x$types) == 1) cat("Univariate process.\n") else { cat("Multitype process with types =\n") print(x$types) if(!x$multitype.interact) cat("Interaction does not depend on type\n") } } else if(x$multitype.interact) cat("Multitype process, types not yet specified.\n") cat("Numerical parameters: par =\n") print(x$par) if(is.null(x$C.ipar)) cat("Parameters have not yet been checked for compatibility with types.\n") if(is.owin(x$w)) print(x$w) else cat("Window: not specified.\n") cat("Trend: ") if(!is.null(x$trend)) print(x$trend) else cat("none.\n") if(!is.null(x$integrable) && !x$integrable) { cat("\n*Warning: model is not integrable and cannot be simulated*\n") } invisible(NULL) } reach.rmhmodel <- function(x, ...) { if(length(list(...)) == 0) return(x$reach) # reach must be recomputed cif <- x$cif Ncif <- length(cif) pars <- if(Ncif == 1) list(x$par) else x$par maxr <- 0 for(i in seq_len(Ncif)) { cif.i <- cif[i] par.i <- pars[[i]] rules <- spatstatRmhInfo(cif.i) rchfun <- rules$reach if(!is.function(rchfun)) stop(paste("Internal error: reach is unknown for cif=", sQuote(cif.i)), call.=FALSE) r.i <- rchfun(par.i, ...) maxr <- max(maxr, r.i, na.rm=TRUE) } return(maxr) } is.poisson.rmhmodel <- function(x) { verifyclass(x, "rmhmodel") identical(x$cif, 'poisson') } is.stationary.rmhmodel <- function(x) { verifyclass(x, "rmhmodel") tren <- x$trend return(is.null(tren) || is.numeric(tren)) } as.owin.rmhmodel <- function(W, ..., fatal=FALSE) { # W is the rmhmodel object. It contains a window w ans <- W$w if(is.owin(ans)) return(ans) if(fatal) stop("rmhmodel object does not contain a window") return(NULL) } domain.rmhmodel <- Window.rmhmodel <- function(X, ...) { as.owin(X) } is.expandable.rmhmodel <- function(x) { tren <- x$tren ok <- function(z) { is.null(z) || is.numeric(z) || is.function(z) } return(if(!is.list(tren)) ok(tren) else all(unlist(lapply(tren, ok)))) } ##### Table of rules for handling rmh models ################## spatstatRmhInfo <- function(cifname) { rules <- .Spatstat.RmhTable[[cifname]] if(is.null(rules)) stop(paste("Unrecognised cif:", sQuote(cifname)), call.=FALSE) return(rules) } .Spatstat.RmhTable <- list( # # 0. Poisson (special case) # 'poisson'= list( C.id=NA, multitype=FALSE, parhandler=function(par, ...) { ctxt <- "For the Poisson process" with(par, forbidNA(beta, ctxt)) par <- check.named.list(par, "beta", ctxt) with(par, explain.ifnot(all(beta >= 0), ctxt)) return(par) }, validity=function(par, kind) { switch(kind, integrable=TRUE, stabilising=FALSE) }, explainvalid=list(integrable="TRUE",stabilising="FALSE"), reach = function(par, ...) { return(0) }, hardcore = function(par, ...) { return(0) }, temper = function(par, invtemp) { return(par^invtemp) } ), # # 1. Strauss. # 'strauss'= list( C.id="strauss", multitype=FALSE, parhandler=function(par, ...) { ctxt <- "For the strauss cif" par <- check.named.list(par, c("beta","gamma","r"), ctxt) # treat r=NA as absence of interaction par <- within(par, if(is.na(r)) { r <- 0; gamma <- 1 }) with(par, check.finite(beta, ctxt)) with(par, check.finite(gamma, ctxt)) with(par, check.finite(r, ctxt)) with(par, check.1.real(gamma, ctxt)) with(par, check.1.real(r, ctxt)) with(par, explain.ifnot(all(beta >= 0), ctxt)) with(par, explain.ifnot(gamma >= 0, ctxt)) with(par, explain.ifnot(r >= 0, ctxt)) return(par) }, validity=function(par, kind) { gamma <- par$gamma switch(kind, integrable=(gamma <= 1), stabilising=(gamma == 0) ) }, explainvalid=list( integrable="gamma <= 1", stabilising="gamma == 0"), reach = function(par, ...) { r <- par[["r"]] g <- par[["gamma"]] return(if(g == 1) 0 else r) }, hardcore = function(par, ..., epsilon=0) { r <- par[["r"]] g <- par[["gamma"]] return(if(g <= epsilon) r else 0) }, temper = function(par, invtemp) { within(par, { beta <- beta^invtemp gamma <- gamma^invtemp }) } ), # # 2. Strauss with hardcore. # 'straush' = list( C.id="straush", multitype=FALSE, parhandler=function(par, ...) { ctxt <- "For the straush cif" par <- check.named.list(par, c("beta","gamma","r","hc"), ctxt) # treat hc=NA as absence of hard core par <- within(par, if(is.na(hc)) { hc <- 0 } ) # treat r=NA as absence of interaction par <- within(par, if(is.na(r)) { r <- hc; gamma <- 1 } ) with(par, check.finite(beta, ctxt)) with(par, check.finite(gamma, ctxt)) with(par, check.finite(r, ctxt)) with(par, check.finite(hc, ctxt)) with(par, check.1.real(gamma, ctxt)) with(par, check.1.real(r, ctxt)) with(par, check.1.real(hc, ctxt)) with(par, explain.ifnot(all(beta >= 0), ctxt)) with(par, explain.ifnot(gamma >= 0, ctxt)) with(par, explain.ifnot(r >= 0, ctxt)) with(par, explain.ifnot(hc >= 0, ctxt)) with(par, explain.ifnot(hc <= r, ctxt)) return(par) }, validity=function(par, kind) { hc <- par$hc gamma <- par$gamma switch(kind, integrable=(hc > 0 || gamma <= 1), stabilising=(hc > 0) ) }, explainvalid=list( integrable="hc > 0 or gamma <= 1", stabilising="hc > 0"), reach = function(par, ...) { h <- par[["hc"]] r <- par[["r"]] g <- par[["gamma"]] return(if(g == 1) h else r) }, hardcore = function(par, ..., epsilon=0) { h <- par[["hc"]] r <- par[["r"]] g <- par[["gamma"]] return(if(g <= epsilon) r else h) }, temper = function(par, invtemp) { within(par, { beta <- beta^invtemp gamma <- gamma^invtemp }) } ), # # 3. Softcore. # 'sftcr' = list( C.id="sftcr", multitype=FALSE, parhandler=function(par, ...) { ctxt <- "For the sftcr cif" par <- check.named.list(par, c("beta","sigma","kappa"), ctxt) with(par, check.finite(beta, ctxt)) with(par, check.finite(sigma, ctxt)) with(par, check.finite(kappa, ctxt)) with(par, check.1.real(sigma, ctxt)) with(par, check.1.real(kappa, ctxt)) with(par, explain.ifnot(all(beta >= 0), ctxt)) with(par, explain.ifnot(sigma >= 0, ctxt)) with(par, explain.ifnot(kappa >= 0 && kappa <= 1, ctxt)) return(par) }, validity=function(par, kind) { switch(kind, integrable=TRUE, stabilising=FALSE) }, explainvalid=list(integrable="TRUE",stabilising="FALSE"), reach = function(par, ..., epsilon=0) { if(epsilon==0) return(Inf) kappa <- par[["kappa"]] sigma <- par[["sigma"]] return(sigma/(epsilon^(kappa/2))) }, hardcore = function(par, ..., epsilon=0) { if(epsilon==0) return(0) kappa <- par[["kappa"]] sigma <- par[["sigma"]] return(sigma/((-log(epsilon))^(kappa/2))) }, temper = function(par, invtemp) { within(par, { beta <- beta^invtemp sigma <- sigma * (invtemp^(kappa/2)) }) } ), # # 4. Multitype Strauss. # 'straussm' = list( C.id="straussm", multitype=TRUE, parhandler=function(par, types) { ctxt <- "For the straussm cif" par <- check.named.list(par, c("beta","gamma","radii"), ctxt) beta <- par$beta gamma <- par$gamma r <- par$radii ntypes <- length(types) check.finite(beta, ctxt) check.nvector(beta, ntypes, TRUE, "types") MultiPair.checkmatrix(gamma, ntypes, "par$gamma") gamma[is.na(gamma)] <- 1 check.finite(gamma, ctxt) MultiPair.checkmatrix(r, ntypes, "par$radii") if(any(nar <- is.na(r))) { r[nar] <- 0 gamma[nar] <- 1 } check.finite(r, ctxt) explain.ifnot(all(beta >= 0), ctxt) explain.ifnot(all(gamma >= 0), ctxt) explain.ifnot(all(r >= 0), ctxt) par <- list(beta=beta, gamma=gamma, r=r) return(par) }, validity=function(par, kind) { gamma <- par$gamma radii <- par$radii dg <- diag(gamma) dr <- diag(radii) hard <-!is.na(dg) & (dg == 0) & !is.na(dr) & (dr > 0) operative <- !is.na(gamma) & !is.na(radii) & (radii > 0) switch(kind, stabilising=all(hard), integrable=all(hard) || all(gamma[operative] <= 1)) }, explainvalid=list( integrable=paste( "gamma[i,j] <= 1 for all i and j,", "or gamma[i,i] = 0 for all i"), stabilising="gamma[i,i] = 0 for all i"), reach = function(par, ...) { r <- par$radii g <- par$gamma operative <- ! (is.na(r) | (g == 1)) return(max(0, r[operative])) }, hardcore = function(par, ..., epsilon=0) { r <- par$radii g <- par$gamma return(max(0, r[!is.na(r) & g <= epsilon])) }, temper = function(par, invtemp) { within(par, { beta <- beta^invtemp gamma <- gamma^invtemp }) } ), # # 5. Multitype Strauss with hardcore. # 'straushm' = list( C.id="straushm", multitype=TRUE, parhandler=function(par, types) { ctxt="For the straushm cif" par <- check.named.list(par, c("beta","gamma","iradii","hradii"), ctxt) beta <- par$beta gamma <- par$gamma iradii <- par$iradii hradii <- par$hradii ntypes <- length(types) check.nvector(beta, ntypes, TRUE, "types") check.finite(beta, ctxt) MultiPair.checkmatrix(gamma, ntypes, "par$gamma") gamma[is.na(gamma)] <- 1 check.finite(gamma, ctxt) MultiPair.checkmatrix(iradii, ntypes, "par$iradii") if(any(nar <- is.na(iradii))) { iradii[nar] <- 0 gamma[nar] <- 1 } check.finite(iradii, ctxt) MultiPair.checkmatrix(hradii, ntypes, "par$hradii") nah <- is.na(hradii) hradii[nah] <- 0 check.finite(hradii, ctxt) explain.ifnot(all(beta >= 0), ctxt) explain.ifnot(all(gamma >= 0), ctxt) explain.ifnot(all(iradii >= 0), ctxt) explain.ifnot(all(hradii >= 0), ctxt) comparable <- !nar & !nah explain.ifnot(all((iradii >= hradii)[comparable]), ctxt) par <- list(beta=beta,gamma=gamma,iradii=iradii,hradii=hradii) return(par) }, validity=function(par, kind) { gamma <- par$gamma iradii <- par$iradii hradii <- par$hradii dh <- diag(hradii) dg <- diag(gamma) dr <- diag(iradii) hhard <- !is.na(dh) & (dh > 0) ihard <- !is.na(dr) & (dr > 0) & !is.na(dg) & (dg == 0) hard <- hhard | ihard operative <- !is.na(gamma) & !is.na(iradii) & (iradii > 0) switch(kind, stabilising=all(hard), integrable={ all(hard) || all(gamma[operative] <= 1) }) }, explainvalid=list( integrable=paste( "hradii[i,i] > 0 or gamma[i,i] = 0 for all i, or", "gamma[i,j] <= 1 for all i and j"), stabilising="hradii[i,i] > 0 or gamma[i,i] = 0 for all i"), reach=function(par, ...) { r <- par$iradii h <- par$hradii g <- par$gamma roperative <- ! (is.na(r) | (g == 1)) hoperative <- ! is.na(h) return(max(0, r[roperative], h[hoperative])) }, hardcore = function(par, ..., epsilon=0) { r <- par$radii h <- par$hradii g <- par$gamma return(max(h[!is.na(h)], r[!is.na(r) & g <= epsilon])) }, temper = function(par, invtemp) { within(par, { beta <- beta^invtemp gamma <- gamma^invtemp }) } ), # # 6. Diggle-Gates-Stibbard interaction # (function number 1 from Diggle, Gates, and Stibbard) 'dgs' = list( C.id="dgs", multitype=FALSE, parhandler=function(par, ...) { ctxt <- "For the dgs cif" par <- check.named.list(par, c("beta","rho"), ctxt) with(par, check.finite(beta, ctxt)) with(par, check.finite(rho, ctxt)) with(par, explain.ifnot(all(beta >= 0), ctxt)) with(par, check.1.real(rho, ctxt)) with(par, explain.ifnot(rho >= 0, ctxt)) return(par) }, validity=function(par, kind) { switch(kind, integrable=TRUE, stabilising=FALSE) }, explainvalid=list(integrable="TRUE", stabilising="FALSE"), reach=function(par, ...) { return(par[["rho"]]) }, hardcore=function(par, ..., epsilon=0) { if(epsilon == 0) return(0) return(par[["rho"]] * (2/pi) * asin(sqrt(epsilon))) }, temper = NULL # not a loglinear model ), # # 7. Diggle-Gratton interaction # (function number 2 from Diggle, Gates, and Stibbard). 'diggra' = list( C.id="diggra", multitype=FALSE, parhandler=function(par, ...) { ctxt <- "For the diggra cif" par <- check.named.list(par, c("beta","kappa","delta","rho"), ctxt) with(par, check.finite(beta, ctxt)) with(par, check.finite(kappa, ctxt)) with(par, check.finite(delta, ctxt)) with(par, check.finite(rho, ctxt)) with(par, explain.ifnot(all(beta >= 0), ctxt)) with(par, check.1.real(kappa, ctxt)) with(par, check.1.real(delta, ctxt)) with(par, check.1.real(rho, ctxt)) with(par, explain.ifnot(kappa >= 0, ctxt)) with(par, explain.ifnot(delta >= 0, ctxt)) with(par, explain.ifnot(rho >= 0, ctxt)) with(par, explain.ifnot(delta < rho, ctxt)) return(par) }, validity=function(par, kind) { switch(kind, integrable=TRUE, stabilising=FALSE) }, explainvalid=list(integrable="TRUE",stabilising="FALSE"), reach=function(par, ...) { return(par[["rho"]]) }, hardcore=function(par, ..., epsilon=0) { return(par[["delta"]]) }, temper = function(par, invtemp) { within(par, { kappa <- kappa * invtemp }) }), # # 8. Geyer saturation model # 'geyer' = list( C.id="geyer", multitype=FALSE, parhandler=function(par, ...) { ctxt <- "For the geyer cif" par <- check.named.list(par, c("beta","gamma","r","sat"), ctxt) with(par, check.1.real(gamma, ctxt)) with(par, check.1.real(r, ctxt)) with(par, check.1.real(sat, ctxt)) par <- within(par, sat <- min(sat, .Machine$integer.max-100)) par <- within(par, if(is.na(gamma)) { r <- 0; gamma <- 1 }) with(par, check.finite(beta, ctxt)) with(par, check.finite(gamma, ctxt)) with(par, check.finite(r, ctxt)) with(par, check.finite(sat, ctxt)) with(par, explain.ifnot(all(beta >= 0), ctxt)) return(par) }, validity=function(par, kind) { switch(kind, integrable=TRUE, stabilising=FALSE) }, explainvalid=list(integrable="TRUE", stabilising="FALSE"), reach = function(par, ...) { r <- par[["r"]] g <- par[["gamma"]] return(if(g == 1) 0 else 2 * r) }, hardcore = function(par, ..., epsilon=0) { r <- par[["r"]] g <- par[["gamma"]] return(if(g <= epsilon) r else 0) }, temper = function(par, invtemp) { within(par, { beta <- beta^invtemp gamma <- gamma^invtemp }) } ), # # 9. The ``lookup'' device. This permits simulating, at least # approximately, ANY pairwise interaction function model # with isotropic pair interaction (i.e. depending only on distance). # The pair interaction function is provided as a vector of # distances and corresponding function values which are used # as a ``lookup table'' by the C code. # 'lookup' = list( C.id="lookup", multitype=FALSE, parhandler=function(par, ...) { ctxt <- "For the lookup cif" par <- check.named.list(par, c("beta","h"), ctxt, "r") with(par, check.finite(beta, ctxt)) with(par, explain.ifnot(all(beta >= 0), ctxt)) beta <- par[["beta"]] h.init <- par[["h"]] r <- par[["r"]] if(is.null(r)) { if(!is.stepfun(h.init)) stop(paste("For cif=lookup, if component r of", "par is absent then component h must", "be a stepfun object.")) if(!is.cadlag(h.init)) stop(paste("The lookup pairwise interaction step", "function must be right continuous,\n", "i.e. built using the default values of the", sQuote("f"), "and", sQuote("right"), "arguments for stepfun.")) r <- knots(h.init) h0 <- get("yleft",envir=environment(h.init)) h <- h.init(r) nlook <- length(r) if(!identical(all.equal(h[nlook],1),TRUE)) stop(paste("The lookup interaction step function", "must be equal to 1 for", dQuote("large"), "distances.")) if(r[1] <= 0) stop(paste("The first jump point (knot) of the lookup", "interaction step function must be", "strictly positive.")) h <- c(h0,h) } else { h <- h.init nlook <- length(r) if(length(h) != nlook) stop("Mismatch of lengths of h and r lookup vectors.") if(anyNA(r)) stop("Missing values not allowed in r lookup vector.") if(is.unsorted(r)) stop("The r lookup vector must be in increasing order.") if(r[1] <= 0) stop(paste("The first entry of the lookup vector r", "should be strictly positive.")) h <- c(h,1) } if(any(h < 0)) stop(paste("Negative values in the lookup", "pairwise interaction function.")) if(h[1] > 0 & any(h > 1)) stop(paste("Lookup pairwise interaction function does", "not define a valid point process.")) rmax <- r[nlook] r <- c(0,r) nlook <- nlook+1 deltar <- mean(diff(r)) if(identical(all.equal(diff(r),rep.int(deltar,nlook-1)),TRUE)) { par <- list(beta=beta,nlook=nlook, equisp=1, deltar=deltar,rmax=rmax, h=h) } else { par <- list(beta=beta,nlook=nlook, equisp=0, deltar=deltar,rmax=rmax, h=h, r=r) } return(par) }, validity=function(par, kind) { h <- par$h if(is.stepfun(h)) h <- eval(expression(c(yleft,y)),envir=environment(h)) switch(kind, integrable={ (h[1] == 0) || all(h <= 1) }, stabilising={ h[1] == 0 }) }, explainvalid=list( integrable="h[1] == 0 or h[i] <= 1 for all i", stabilising="h[1] == 0"), reach = function(par, ...) { r <- par[["r"]] h <- par[["h"]] if(is.null(r)) r <- knots(h) return(max(r)) }, hardcore = function(par, ..., epsilon=0) { r <- par[["r"]] h <- par[["h"]] if(is.null(r)) r <- knots(h) return(max(0, r[h <= epsilon])) }, temper = function(par, invtemp) { within(par, { beta <- beta^invtemp h <- h^invtemp }) } ), # # 10. Area interaction # 'areaint'= list( C.id="areaint", multitype=FALSE, parhandler=function(par, ...) { ctxt <- "For the areaint cif" par <- check.named.list(par, c("beta","eta","r"), ctxt) par <- within(par, if(is.na(r)) { r <- 0 }) with(par, check.finite(beta, ctxt)) with(par, explain.ifnot(all(beta >= 0), ctxt)) with(par, check.1.real(eta, ctxt)) with(par, check.1.real(r, ctxt)) with(par, check.finite(eta, ctxt)) with(par, check.finite(r, ctxt)) with(par, explain.ifnot(eta >= 0, ctxt)) with(par, explain.ifnot(r >= 0, ctxt)) return(par) }, validity=function(par, kind) { switch(kind, integrable=TRUE, stabilising=FALSE) }, explainvalid=list(integrable="TRUE", stabilising="FALSE"), reach = function(par, ...) { r <- par[["r"]] eta <- par[["eta"]] return(if(eta == 1) 0 else (2 * r)) }, hardcore = function(par, ..., epsilon=0) { r <- par[["r"]] eta <- par[["eta"]] if(eta > epsilon) return(0) if(eta == 0) return(2 * r) # linear approximation return(2 * r * eta/epsilon) }, temper = function(par, invtemp) { within(par, { beta <- beta^invtemp eta <- eta^invtemp }) } ), # # 11. The ``badgey'' (Baddeley-Geyer) model. # 'badgey' = list( C.id="badgey", multitype=FALSE, parhandler=function(par, ...) { ctxt <- "For the badgey cif" par <- check.named.list(par, c("beta","gamma","r","sat"), ctxt) par <- within(par, sat <- pmin(sat, .Machine$integer.max-100)) par <- within(par, gamma[is.na(gamma) | is.na(r)] <- 1) par <- within(par, r[is.na(r)] <- 0) with(par, check.finite(beta, ctxt)) with(par, check.finite(gamma, ctxt)) with(par, check.finite(r, ctxt)) with(par, check.finite(sat, ctxt)) with(par, explain.ifnot(all(beta >= 0), ctxt)) with(par, explain.ifnot(all(gamma >= 0), ctxt)) with(par, explain.ifnot(all(r >= 0), ctxt)) with(par, explain.ifnot(all(sat >= 0), ctxt)) with(par, explain.ifnot(length(gamma) == length(r), ctxt)) gamma <- par[["gamma"]] r <- par[["r"]] sat <- par[["sat"]] if(length(sat)==1) sat <- rep.int(sat,length(gamma)) else explain.ifnot(length(sat) == length(gamma), ctxt) mmm <- cbind(gamma,r,sat) mmm <- mmm[fave.order(r),] ndisc <- length(r) par <- list(beta=par$beta,ndisc=ndisc,parms=as.vector(t(mmm))) return(par) }, validity=function(par, kind) { switch(kind, integrable=TRUE, stabilising=FALSE) }, explainvalid=list(integrable="TRUE", stabilising="FALSE"), reach = function(par, ...) { r <- par[["r"]] gamma <- par[["gamma"]] operative <- (gamma != 1) return(if(!any(operative)) 0 else (2 * max(r[operative]))) }, hardcore = function(par, ..., epsilon=0) { r <- par[["r"]] gamma <- par[["gamma"]] return(max(0, r[gamma <= epsilon])) }, temper = function(par, invtemp) { within(par, { beta <- beta^invtemp gamma <- gamma^invtemp }) } ), # # 12. The hard core process 'hardcore' = list( C.id="hardcore", multitype=FALSE, parhandler=function(par, ...) { ctxt <- "For the hardcore cif" par <- check.named.list(par, c("beta", "hc"), ctxt) par <- within(par, if(is.na(hc)) { hc <- 0 }) with(par, check.finite(beta, ctxt)) with(par, check.finite(hc, ctxt)) with(par, explain.ifnot(all(beta >= 0), ctxt)) with(par, check.1.real(hc, ctxt)) return(par) }, validity=function(par, kind) { hc <- par$hc switch(kind, integrable=TRUE, stabilising=(hc > 0)) }, explainvalid=list(integrable="TRUE", stabilising="hc > 0"), reach = function(par, ...) { hc <- par[["hc"]] return(hc) }, hardcore = function(par, ...) { hc <- par[["hc"]] return(hc) }, temper = function(par, invtemp) { within(par, { beta <- beta^invtemp }) } ), # # Lucky 13. Fiksel process 'fiksel' = list( C.id="fiksel", multitype=FALSE, parhandler=function(par, ...) { ctxt <- "For the Fiksel cif" par <- check.named.list(par, c("beta", "r", "hc", "kappa", "a"), ctxt) with(par, check.finite(beta, ctxt)) with(par, check.finite(r, ctxt)) with(par, check.finite(hc, ctxt)) with(par, check.finite(kappa, ctxt)) with(par, check.finite(a, ctxt)) with(par, check.1.real(r, ctxt)) with(par, check.1.real(hc, ctxt)) with(par, check.1.real(kappa, ctxt)) with(par, check.1.real(a, ctxt)) with(par, explain.ifnot(all(beta >= 0), ctxt)) with(par, explain.ifnot(hc >= 0, ctxt)) with(par, explain.ifnot(r > hc, ctxt)) return(par) }, validity=function(par, kind) { hc <- par$hc a <- par$a switch(kind, integrable=(hc > 0 || a <= 0), stabilising=(hc > 0)) }, explainvalid=list( integrable="hc > 0 or a <= 0", stabilising="hc > 0"), reach = function(par, ...) { r <- par[["r"]] hc <- par[["hc"]] a <- par[["a"]] return(if(a != 0) r else hc) }, hardcore = function(par, ...) { return(par[["hc"]]) }, temper = function(par, invtemp) { within(par, { beta <- beta^invtemp a <- a * invtemp }) } ), # # 14. Lennard-Jones 'lennard' = list( C.id="lennard", multitype=FALSE, parhandler=function(par, ...) { ctxt <- "For the Lennard-Jones cif" par <- check.named.list(par, c("beta", "sigma", "epsilon"), ctxt) with(par, check.finite(beta, ctxt)) with(par, check.finite(sigma, ctxt)) with(par, check.finite(epsilon, ctxt)) with(par, explain.ifnot(all(beta >= 0), ctxt)) with(par, check.1.real(sigma, ctxt)) with(par, check.1.real(epsilon, ctxt)) with(par, explain.ifnot(sigma > 0, ctxt)) with(par, explain.ifnot(epsilon > 0, ctxt)) return(par) }, validity=function(par, kind) { switch(kind, integrable=(par$sigma > 0), stabilising=FALSE) }, explainvalid=list( integrable="sigma > 0", stabilising="FALSE"), reach = function(par, ...) { sigma <- par[["sigma"]] return(2.5 * sigma) }, hardcore = function(par, ...) { sigma <- par[["sigma"]] return(sigma/2.5) }, temper = function(par, invtemp) { within(par, { beta <- beta^invtemp epsilon <- epsilon * invtemp }) } ), # # 15. Multitype hardcore. # 'multihard' = list( C.id="multihard", multitype=TRUE, parhandler=function(par, types) { ctxt="For the multihard cif" par <- check.named.list(par, c("beta","hradii"), ctxt) beta <- par$beta hradii <- par$hradii ntypes <- length(types) check.nvector(beta, ntypes, TRUE, "types") check.finite(beta, ctxt) MultiPair.checkmatrix(hradii, ntypes, "par$hradii") hradii[is.na(hradii)] <- 0 check.finite(hradii, ctxt) explain.ifnot(all(beta >= 0), ctxt) explain.ifnot(all(hradii >= 0), ctxt) par <- list(beta=beta,hradii=hradii) return(par) }, validity=function(par, kind) { switch(kind, integrable=return(TRUE), stabilising={ hself <- diag(par$hradii) repel <- !is.na(hself) & (hself > 0) return(all(repel)) }) }, explainvalid=list( integrable="TRUE", stabilising="hradii[i,i] > 0 for all i"), reach=function(par, ...) { return(max(0, par$hradii, na.rm=TRUE)) }, hardcore=function(par, ..., epsilon=0) { return(max(0, par$hradii, na.rm=TRUE)) }, temper = function(par, invtemp) { within(par, { beta <- beta^invtemp }) } ), # # 16. Triplets. # 'triplets'= list( C.id="triplets", multitype=FALSE, parhandler=function(par, ...) { ctxt <- "For the triplets cif" par <- check.named.list(par, c("beta","gamma","r"), ctxt) # treat r=NA as absence of interaction par <- within(par, if(is.na(r)) { r <- 0; gamma <- 1 }) with(par, check.finite(beta, ctxt)) with(par, check.finite(gamma, ctxt)) with(par, check.finite(r, ctxt)) with(par, check.1.real(gamma, ctxt)) with(par, check.1.real(r, ctxt)) with(par, explain.ifnot(all(beta >= 0), ctxt)) with(par, explain.ifnot(gamma >= 0, ctxt)) with(par, explain.ifnot(r >= 0, ctxt)) return(par) }, validity=function(par, kind) { gamma <- par$gamma switch(kind, integrable=(gamma <= 1), stabilising=(gamma == 0) ) }, explainvalid=list( integrable="gamma <= 1", stabilising="gamma == 0"), reach = function(par, ...) { r <- par[["r"]] g <- par[["gamma"]] return(if(g == 1) 0 else r) }, hardcore = function(par, ...) { return(0) }, temper = function(par, invtemp) { within(par, { beta <- beta^invtemp gamma <- gamma^invtemp }) } ), # # 17. Penttinen. # 'penttinen'= list( C.id="penttinen", multitype=FALSE, parhandler=function(par, ...) { ctxt <- "For the penttinen cif" par <- check.named.list(par, c("beta", "gamma", "r"), ctxt) # treat r=NA as absence of interaction par <- within(par, if(is.na(r)) { r <- 0; gamma <- 1 }) with(par, check.finite(beta, ctxt)) with(par, check.finite(gamma, ctxt)) with(par, check.finite(r, ctxt)) with(par, check.1.real(gamma, ctxt)) with(par, check.1.real(r, ctxt)) with(par, explain.ifnot(all(beta >= 0), ctxt)) with(par, explain.ifnot(gamma >= 0, ctxt)) with(par, explain.ifnot(r > 0, ctxt)) return(par) }, validity=function(par, kind) { gamma <- par$gamma switch(kind, integrable=(gamma <= 1), stabilising=(gamma == 0) ) }, explainvalid=list( integrable="gamma <= 1", stabilising="gamma == 0"), reach = function(par, ...) { r <- par[["r"]] g <- par[["gamma"]] return(if(g == 1) 0 else (2 * r)) }, hardcore = function(par, ..., epsilon=0) { r <- par[["r"]] g <- par[["gamma"]] return(if(g <= epsilon) (2 * r) else 0) }, temper = function(par, invtemp) { within(par, { beta <- beta^invtemp gamma <- gamma^invtemp }) } ) # end of list '.Spatstat.RmhTable' ) spatstat/R/linfun.R0000644000176200001440000000742413115271120013734 0ustar liggesusers# # linfun.R # # Class of functions of location on a linear network # # $Revision: 1.12 $ $Date: 2017/06/05 10:31:58 $ # linfun <- function(f, L) { stopifnot(is.function(f)) stopifnot(inherits(L, "linnet")) fargs <- names(formals(f)) needargs <- c("x", "y", "seg", "tp") if(!all(needargs %in% fargs)) stop(paste("Function must have formal arguments", commasep(sQuote(needargs))), call.=FALSE) otherfargs <- setdiff(fargs, needargs) g <- function(...) { argh <- list(...) extra <- names(argh) %in% otherfargs if(!any(extra)) { X <- as.lpp(..., L=L) value <- do.call(f, as.list(coords(X))) } else { extrargs <- argh[extra] mainargs <- argh[!extra] X <- do.call(as.lpp, append(mainargs, list(L=L))) value <- do.call(f, append(as.list(coords(X)), extrargs)) } return(value) } class(g) <- c("linfun", class(g)) attr(g, "L") <- L attr(g, "f") <- f return(g) } print.linfun <- function(x, ...) { L <- as.linnet(x) if(!is.null(explain <- attr(x, "explain"))) { explain(x) } else { splat("Function on linear network:") print(attr(x, "f"), ...) splat("Function domain:") print(L) } invisible(NULL) } summary.linfun <- function(object, ...) { print(object, ...) } as.linim.linfun <- function(X, L=domain(X), ..., eps = NULL, dimyx = NULL, xy = NULL, delta=NULL) { if(is.null(L)) L <- domain(X) # create template Y <- as.linim(1, L, eps=eps, dimyx=dimyx, xy=xy, delta=delta) # extract coordinates of sample points along network df <- attr(Y, "df") coo <- df[, c("x", "y", "mapXY", "tp")] colnames(coo)[3L] <- "seg" # evaluate function at sample points vals <- do.call(X, append(as.list(coo), list(...))) # write values in data frame df$values <- vals # overwrite values in pixel array storage.mode(Y$v) <- typ <- typeof(vals) Y$type <- if(typ == "double") "real" else typ pix <- nearest.raster.point(df$xc, df$yc, Y) Y$v[] <- NA Y$v[cbind(pix$row, pix$col)] <- vals # attr(Y, "df") <- df return(Y) } as.data.frame.linfun <- function(x, ...) { as.data.frame(as.linim(x, ...)) } as.linfun.linim <- function(X, ...) { trap.extra.arguments(..., .Context="as.linfun.linim") ## extract info L <- as.linnet(X) df <- attr(X, "df") ## function values and corresponding locations values <- df$values locations <- with(df, as.lpp(x=x, y=y, seg=mapXY, tp=tp, L=L)) ## Function that maps any spatial location to the nearest data location nearestloc <- nnfun(locations) ## Function that reads value at nearest data location f <- function(x, y, seg, tp) { values[nearestloc(x,y,seg,tp)] } g <- linfun(f, L) return(g) } plot.linfun <- function(x, ..., L=NULL, main) { if(missing(main)) main <- short.deparse(substitute(x)) if(is.null(L)) L <- as.linnet(x) argh <- list(...) fargnames <- get("otherfargs", envir=environment(x)) resolution <- c("eps", "dimyx", "xy", "delta") convert <- names(argh) %in% c(fargnames, resolution) Z <- do.call(as.linim, append(list(x, L=L), argh[convert])) rslt <- do.call(plot.linim, append(list(Z, main=main), argh[!convert])) return(invisible(rslt)) } as.owin.linfun <- function(W, ...) { as.owin(as.linnet(W)) } domain.linfun <- as.linnet.linfun <- function(X, ...) { attr(X, "L") } as.function.linfun <- function(x, ...) { nax <- names(attributes(x)) if(!is.null(nax)) { retain <- (nax == "srcref") attributes(x)[!retain] <- NULL } return(x) } integral.linfun <- function(f, domain=NULL, ..., delta) { if(missing(delta)) delta <- NULL integral(as.linim(f, delta=delta), domain=domain, ...) } as.linfun <- function(X, ...) { UseMethod("as.linfun") } as.linfun.linfun <- function(X, ...) { return(X) }spatstat/R/effectfun.R0000755000176200001440000001437713115271075014427 0ustar liggesusers# # effectfun.R # # $Revision: 1.20 $ $Date: 2017/06/05 10:31:58 $ # effectfun <- local({ okclasses <- c("ppm", "kppm", "lppm", "dppm", "rppm", "profilepl") effectfun <- function(model, covname, ..., se.fit=FALSE) { if(!inherits(model, okclasses)) stop(paste("First argument 'model' should be a fitted model of class", commasep(sQuote(okclasses), " or ")), call.=FALSE) orig.model <- model model <- as.ppm(model) dotargs <- list(...) # determine names of covariates involved intern.names <- if(is.marked.ppm(model)) c("x", "y", "marks") else c("x", "y") needed.names <- variablesinformula(rhs.of.formula(formula(model))) # check for clashes/quirks if("lambda" %in% needed.names) { if(is.dppm(orig.model) && ( identical.formulae(formula(model), ~offset(log(lambda))-1) || identical.formulae(formula(model), ~log(lambda)-1) )) stop("effectfun is not defined for a DPP model with fixed intensity", call.=FALSE) intensityname <- setdiff(c("Lambda", "intensity"), needed.names)[1] } else intensityname <- "lambda" ## validate the relevant covariate if(missing(covname) || is.null(covname)) { mc <- model.covariates(model) if(length(mc) == 1) covname <- mc else stop("covname must be provided") } if(!(covname %in% c(intern.names, needed.names))) stop(paste("model does not have a covariate called", sQuote(covname)), call.=FALSE) # check that fixed values for all other covariates are provided given.covs <- names(dotargs) if(any(uhoh <- !(needed.names %in% c(given.covs, covname)))) { nuh <- sum(uhoh) stop(paste(ngettext(nuh, "A value for the covariate", "Values for the covariates"), commasep(dQuote(needed.names[uhoh])), "must be provided (as", ngettext(nuh, "an argument", "arguments"), "to effectfun)")) } # establish type and range of covariate values N0 <- 256 if(covname == "x") { covtype <- "real" W <- as.owin(data.ppm(model)) Zr <- W$xrange Zvals <- seq(from=Zr[1L], to=Zr[2L], length.out=N0) } else if(covname == "y") { covtype <- "real" W <- as.owin(data.ppm(model)) Zr <- W$yrange Zvals <- seq(from=Zr[1L], to=Zr[2L], length.out=N0) } else if(covname == "marks") { covtype <- "factor" Zvals <- levels(marks(data.ppm(model))) } else { # covariate is external if(is.data.frame(covdf <- model$covariates)) { Z <- covdf$covname covtype <- typeof(Z) if(covtype == "double") covtype <- "real" switch(covtype, real={ Zr <- range(Z) Zvals <- seq(from=Zr[1L], to=Zr[2L], length.out=N0) }, integer={ Zr <- range(Z) Zvals <- seq(from=Zr[1L], to=Zr[2L], by=ceiling((diff(Zr)+1)/N0)) }, factor={ Zvals <- levels(Z) }, logical={ Zvals <- c(FALSE, TRUE) }, stop(paste("Cannot handle covariate of type", dQuote(covtype))) ) } else { Z <- getdataobjects(covname, environment(formula(model)), model$covariates)[[1L]] if(is.null(Z)) stop(paste("Cannot find covariate", sQuote(covname)), call.=FALSE) # convert to image if(!is.im(Z)) Z <- as.im(Z, W=as.owin(model)) covtype <- Z$type switch(covtype, real={ Zr <- summary(Z)$range Zvals <- seq(from=Zr[1L], to=Zr[2L], length.out=N0) }, factor={ Zvals <- levels(Z) }, logical={ Zvals <- c(FALSE, TRUE) }, stop(paste("Cannot handle covariate of type", dQuote(covtype))) ) } } # set up data frames of fake data for predict method # First set up default, constant value for each covariate N <- length(Zvals) fakeloc <- resolve.defaults(dotargs, list(x=0, y=0))[c("x","y")] if(is.marked.ppm(model)) { lev <- levels(marks(data.ppm(model))) fakeloc$marks <- lev[1L] } fakeloc <- lapply(fakeloc, padout, N=N) fakecov <- lapply(dotargs, padout, N=N) # Overwrite value for covariate of interest if(covname %in% intern.names) fakeloc[[covname]] <- Zvals else fakecov[[covname]] <- Zvals # convert to data frame fakeloc <- do.call(data.frame, fakeloc) fakecov <- if(length(fakecov) > 0) do.call(data.frame, fakecov) else NULL # # Now predict pred <- predict(orig.model, locations=fakeloc, covariates=fakecov, se=se.fit) if(!se.fit) lambda <- pred else { lambda <- pred$estimate se <- pred$se sedf <- data.frame(se =se, hi = lambda + 2 * se, lo = lambda - 2 * se) } # dfin <- if(!is.null(fakecov)) cbind(fakeloc, fakecov) else fakeloc dfin <- dfin[covname] dflam <- data.frame(lambda=lambda) names(dflam) <- intensityname df <- cbind(dfin, dflam) # if(covtype != "real") { result <- df if(se.fit) result <- cbind(result, sedf) } else { bc <- paren(covname) result <- fv(df, argu=covname, ylab=substitute(lambda(X), list(X=as.name(covname), lambda=as.name(intensityname))), labl=c(covname, paste("hat(%s)", bc)), valu=intensityname, alim=Zr, desc=c(paste("value of covariate", covname), "fitted intensity"), fname=intensityname) if(se.fit) { result <- bind.fv(result, sedf, labl=c(paste("se[%s]", bc), paste("%s[hi]", bc), paste("%s[lo]", bc)), desc=c("standard error of fitted trend", "upper limit of pointwise 95%% CI for trend", "lower limit of pointwise 95%% CI for trend")) fvnames(result, ".") <- c(intensityname, "hi", "lo") fvnames(result, ".s") <- c("hi", "lo") formula(result) <- paste(". ~ ", covname) } } return(result) } padout <- function(x,N) { rep.int(x[1L],N) } effectfun }) spatstat/R/stienen.R0000644000176200001440000000344513115225157014116 0ustar liggesusers## stienen.R ## ## Stienen diagram with border correction ## ## $Revision: 1.8 $ $Date: 2015/10/21 09:06:57 $ stienen <- function(X, ..., bg="grey", border=list(bg=NULL)) { Xname <- short.deparse(substitute(X)) stopifnot(is.ppp(X)) if(npoints(X) <= 1) { do.call(plot, resolve.defaults(list(x=Window(X)), list(...), list(main=Xname))) return(invisible(NULL)) } d <- nndist(X) b <- bdist.points(X) Y <- X %mark% d gp <- union(graphicsPars("symbols"), "lwd") do.call.plotfun(plot.ppp, resolve.defaults(list(x=Y[b >= d], markscale=1), list(...), list(bg=bg), list(main=Xname)), extrargs=gp) if(!identical(border, FALSE)) { if(!is.list(border)) border <- list() do.call.plotfun(plot.ppp, resolve.defaults(list(x=Y[b < d], markscale=1, add=TRUE), border, list(...), list(bg=bg), list(cols=grey(0.5), lwd=2)), extrargs=gp) } return(invisible(NULL)) } stienenSet <- function(X, edge=TRUE) { stopifnot(is.ppp(X)) nnd <- nndist(X) if(!edge) { ok <- bdist.points(X) >= nnd X <- X[ok] nnd <- nnd[ok] } n <- npoints(X) if(n == 0) return(emptywindow(Window(X))) if(n == 1) return(Window(X)) d <- nnd/2 delta <- 2 * pi * max(d)/128 Z <- disc(d[1], X[1], delta=delta) for(i in 2:n) Z <- union.owin(Z, disc(d[i], X[i], delta=delta)) return(Z) } spatstat/R/Kcom.R0000755000176200001440000003122113115271075013336 0ustar liggesusers# # Kcom.R # # model compensated K-function # # $Revision: 1.14 $ $Date: 2015/10/21 09:06:57 $ # Kcom <- local({ Kcom <- function(object, r=NULL, breaks=NULL, ..., correction=c("border", "isotropic", "translate"), conditional=!is.poisson(object), restrict=FALSE, model=NULL, trend=~1, interaction=Poisson(), rbord=reach(interaction), compute.var=TRUE, truecoef=NULL, hi.res=NULL) { if(inherits(object, "ppm")) { fit <- object } else if(is.ppp(object) || inherits(object, "quad")) { if(is.ppp(object)) object <- quadscheme(object, ...) if(!is.null(model)) { fit <- update(model, Q=object, forcefit=TRUE) } else { fit <- ppm(object, trend=trend, interaction=interaction, rbord=rbord, forcefit=TRUE) } } else stop("object should be a fitted point process model or a point pattern") if(missing(conditional) || is.null(conditional)) conditional <- !is.poisson(fit) # rfixed <- !is.null(r) || !is.null(breaks) # Extract data and window Q <- quad.ppm(fit, drop=FALSE) X <- data.ppm(fit) Win <- X$window # selection of edge corrections correction.given <- !missing(correction) && !is.null(correction) correction <- pickoption("correction", correction, c(none="none", border="border", isotropic="isotropic", Ripley="isotropic", ripley="isotropic", trans="translation", translate="translation", translation="translation", best="best"), multi=TRUE) correction <- implemented.for.K(correction, Win$type, correction.given) opt <- list(bord = any(correction == "border"), tran = any(correction == "translation"), ripl = any(correction == "isotropic")) if(sum(unlist(opt)) == 0) stop("No corrections selected") # edge correction algorithm algo <- if(!conditional) "classical" else if(restrict) "restricted" else "reweighted" # conditioning on border region? if(!conditional) { Wfree <- Win } else { rbord <- fit$rbord Wfree <- erosion(Win, rbord) if(restrict) { retain <- inside.owin(union.quad(Q), , Wfree) # Throw away boundary data Q <- Q[Wfree] X <- X[Wfree] Win <- Wfree } } # Extract quadrature info U <- union.quad(Q) Z <- is.data(Q) # indicator data/dummy E <- equalsfun.quad(Q) WQ <- w.quad(Q) # quadrature weights # quadrature points used USED <- if(algo == "reweighted") (bdist.points(U) > rbord) else rep.int(TRUE, U$n) # basic statistics npts <- npoints(X) areaW <- area(Win) lambda <- npts/areaW lambda2 <- npts * (npts - 1)/(areaW^2) # adjustments to account for restricted domain of pseudolikelihood if(algo == "reweighted") { npts.used <- sum(Z & USED) area.used <- sum(WQ[USED]) # lambda.used <- npts.used/area.used # lambda2.used <- npts.used * (npts.used - 1)/(area.used^2) } else { npts.used <- npts area.used <- areaW # lambda.used <- lambda # lambda2.used <- lambda2 } # 'r' values rmaxdefault <- rmax.rule("K", if(restrict) Wfree else Win, npts/areaW) breaks <- handle.r.b.args(r, breaks, Wfree, rmaxdefault=rmaxdefault) r <- breaks$r # nr <- length(r) rmax <- breaks$max # recommended range of r values alim <- c(0, min(rmax, rmaxdefault)) # this will be the output data frame K <- data.frame(r=r, pois=pi * r^2) desc <- c("distance argument r", "expected %s for CSR") K <- fv(K, "r", substitute(K(r), NULL), "pois", , alim, c("r","%s[pois](r)"), desc, fname="K") ############### start computing ################## # residuals resid <- residuals(fit, type="raw",drop=FALSE, new.coef=truecoef, quad=hi.res) resval <- with(resid, "increment") rescts <- with(resid, "continuous") if(restrict) { # keep only data inside Wfree resval <- resval[retain] rescts <- rescts[retain] } # close pairs of points # (quadrature point to data point) clos <- crosspairs(U, X, rmax, what="ijd") dIJ <- clos$d I <- clos$i J <- clos$j UI <- U[I] XJ <- X[J] EIJ <- E(I, J) # TRUE if points are identical, U[I[k]] == X[J[k]] ZI <- Z[I] # TRUE if U[I[k]] is a data point DD <- ZI & !EIJ # TRUE for pairs of distinct data points only # nDD <- sum(DD) # determine whether a quadrature point will be used in integral okI <- USED[I] if(spatstat.options("Kcom.remove.zeroes")) okI <- okI & !EIJ # residual weights # wIJ <- ifelseXY(EIJ, rescts[I], resval[I]) # absolute weight for continuous integrals wc <- -rescts wcIJ <- -rescts[I] #################################################### if(opt$bord) { # border method # Compute distances to boundary # (in restricted case, the window of U has been adjusted) b <- bdist.points(U) bI <- b[I] # reduced sample for K(r) of data only RSX <- Kount(dIJ[DD & okI], bI[DD & okI], b[Z & USED], breaks) # Kb <- RSX$numerator/(lambda.used * RSX$denom.count) Kb <- RSX$numerator/(lambda * RSX$denom.count) K <- bind.fv(K, data.frame(border=Kb), "hat(%s)[bord](r)", nzpaste(algo, "border-corrected nonparametric estimate of %s"), "border") # reduced sample for adjustment integral RSD <- Kwtsum(dIJ[okI], bI[okI], wcIJ[okI], b[Z & USED], rep.int(1, npts.used), breaks) # lambdaU <- (npts.used + 1)/area.used lambdaU <- (npts + 1)/areaW Kb <- RSD$numerator/((RSD$denominator + 1) * lambdaU) K <- bind.fv(K, data.frame(bcom=Kb), "bold(C)~hat(%s)[bord](r)", nzpaste("model compensator of", algo, "border-corrected %s"), "border") } if(opt$tran) { # translation correction edgewt <- switch(algo, classical = edge.Trans(UI, XJ, paired=TRUE), restricted = edge.Trans(UI, XJ, paired=TRUE), reweighted = edge.Trans.modif(UI, XJ, Win, Wfree, paired=TRUE)) wh <- whist(dIJ[okI], breaks$val, (edgewt * wcIJ)[okI]) whDD <- whist(dIJ[DD & okI], breaks$val, edgewt[DD & okI]) Ktrans <- cumsum(whDD)/(lambda2 * area.used) Ktrans[r >= rmax] <- NA K <- bind.fv(K, data.frame(trans=Ktrans), "hat(%s)[trans](r)", nzpaste(algo, "translation-corrected nonparametric estimate of %s"), "trans") # lambda2U <- (npts.used + 1) * npts.used/(area.used^2) lambda2U <- (npts + 1) * npts/(areaW^2) Ktrans <- cumsum(wh)/(lambda2U * area.used) Ktrans[r >= rmax] <- NA K <- bind.fv(K, data.frame(tcom=Ktrans), "bold(C)~hat(%s)[trans](r)", nzpaste("model compensator of", algo, "translation-corrected %s"), "trans") } if(opt$ripl) { # Ripley isotropic correction edgewt <- edge.Ripley(UI, matrix(dIJ, ncol=1)) wh <- whist(dIJ[okI], breaks$val, (edgewt * wcIJ)[okI]) whDD <- whist(dIJ[DD & okI], breaks$val, edgewt[DD & okI]) # Kiso <- cumsum(whDD)/(lambda2.used * area.used) Kiso <- cumsum(whDD)/(lambda2 * area.used) Kiso[r >= rmax] <- NA K <- bind.fv(K, data.frame(iso=Kiso), "hat(%s)[iso](r)", nzpaste(algo, "isotropic-corrected nonparametric estimate of %s"), "iso") # lambda2U <- (npts.used + 1) * npts.used/(area.used^2) lambda2U <- (npts + 1) * npts/(areaW^2) Kiso <- cumsum(wh)/(lambda2U * area.used) Kiso[r >= rmax] <- NA K <- bind.fv(K, data.frame(icom=Kiso), "bold(C)~hat(%s)[iso](r)", nzpaste("model compensator of", algo, "isotropic-corrected %s"), "iso") # if(compute.var) { savedotnames <- fvnames(K, ".") # compute contribution to compensator from each quadrature point dOK <- dIJ[okI] eOK <- edgewt[okI] iOK <- I[okI] denom <- lambda2U * area.used variso <- varsumiso <- 0 * Kiso for(i in sort(unique(iOK))) { relevant <- (iOK == i) tincrem <- whist(dOK[relevant], breaks$val, eOK[relevant]) localterm <- cumsum(tincrem)/denom variso <- variso + wc[i] * localterm^2 if(Z[i]) varsumiso <- varsumiso + localterm^2 } sdiso <- sqrt(variso) K <- bind.fv(K, data.frame(ivar=variso, isd =sdiso, ihi = 2*sdiso, ilo = -2*sdiso, ivarsum=varsumiso), c("bold(C)^2~hat(%s)[iso](r)", "sqrt(bold(C)^2~hat(%s)[iso](r))", "bold(R)~hat(%s)[hi](r)", "bold(R)~hat(%s)[lo](r)", "hat(C)^2~hat(%s)[iso](r)"), c("Poincare variance of isotropic-corrected %s", "sqrt(Poincare variance) of isotropic-corrected %s", "upper critical band for isotropic-corrected %s", "lower critical band for isotropic-corrected %s", "data estimate of Poincare variance of %s"), "iso") # fvnames(K, ".") <- c(savedotnames, "isd") fvnames(K, ".") <- savedotnames } } # default is to display all corrections formula(K) <- . ~ r unitname(K) <- unitname(X) # secret tag used by 'Kres' attr(K, "maker") <- "Kcom" return(K) } # `reweighted' translation edge correction edge.Trans.modif <- function(X, Y=X, WX=X$window, WY=Y$window, exact=FALSE, paired=FALSE, trim=spatstat.options("maxedgewt")) { # computes edge correction factor # f = area(WY)/area(intersect.owin(WY, shift(WX, X[i] - Y[j]))) X <- as.ppp(X, WX) W <- X$window x <- X$x y <- X$y Y <- as.ppp(Y, WY) xx <- Y$x yy <- Y$y nX <- npoints(X) nY <- npoints(Y) if(paired && (nX != nY)) stop("X and Y should have equal length when paired=TRUE") # For irregular polygons, exact evaluation is very slow; # so use pixel approximation, unless exact=TRUE if(!exact) { if(WX$type == "polygonal") WX <- as.mask(WX) if(WY$type == "polygonal") WY <- as.mask(WX) } typeX <- WX$type typeY <- WY$type if(typeX == "rectangle" && typeY == "rectangle") { # Fast code for this case if(!paired) { DX <- abs(outer(x,xx,"-")) DY <- abs(outer(y,yy,"-")) } else { DX <- abs(xx - x) DY <- abs(yy - y) } A <- WX$xrange B <- WX$yrange a <- WY$xrange b <- WY$yrange # compute width and height of intersection wide <- pmin.int(a[2], A[2]+DX) - pmax(a[1], A[1]+DX) high <- pmin.int(b[2], B[2]+DY) - pmax(b[1], B[1]+DY) # edge correction weight weight <- diff(a) * diff(b) / (wide * high) if(!paired) weight <- matrix(weight, nrow=X$n, ncol=Y$n) } else if(typeX %in% c("rectangle", "polygonal") && typeY %in% c("rectangle", "polygonal")) { # This code is SLOW WX <- as.polygonal(WX) WY <- as.polygonal(WY) a <- area(W) if(!paired) { weight <- matrix(, nrow=nX, ncol=nY) if(nX > 0 && nY > 0) { for(i in seq_len(nX)) { X.i <- c(x[i], y[i]) for(j in seq_len(nY)) { shiftvector <- X.i - c(xx[j],yy[j]) WXshift <- shift(WX, shiftvector) b <- overlap.owin(WY, WXshift) weight[i,j] <- a/b } } } } else { nX <- npoints(X) weight <- numeric(nX) if(nX > 0) { for(i in seq_len(nX)) { shiftvector <- c(x[i],y[i]) - c(xx[i],yy[i]) WXshift <- shift(WX, shiftvector) b <- overlap.owin(WY, WXshift) weight[i] <- a/b } } } } else { WX <- as.mask(WX) WY <- as.mask(WY) # make difference vectors if(!paired) { DX <- outer(x,xx,"-") DY <- outer(y,yy,"-") } else { DX <- x - xx DY <- y - yy } # compute set cross-covariance g <- setcov(WY,WX) # evaluate set cross-covariance at these vectors gvalues <- lookup.im(g, as.vector(DX), as.vector(DY), naok=TRUE, strict=FALSE) weight <- area(WY)/gvalues } # clip high values if(length(weight) > 0) weight <- pmin.int(weight, trim) if(!paired) weight <- matrix(weight, nrow=X$n, ncol=Y$n) return(weight) } Kcom }) spatstat/R/superimpose.R0000755000176200001440000002065113115271120015014 0ustar liggesusers# superimpose.R # # $Revision: 1.36 $ $Date: 2016/10/17 06:48:25 $ # # ############################# superimpose <- function(...) { # remove any NULL arguments arglist <- list(...) if(any(isnull <- sapply(arglist, is.null))) return(do.call(superimpose, arglist[!isnull])) UseMethod("superimpose") } superimpose.default <- function(...) { argh <- list(...) #' First expand any arguments which are lists of objects argh <- expandSpecialLists(argh, "solist") #' Now dispatch if(any(sapply(argh, is.lpp)) || any(sapply(argh, inherits, what="linnet"))) return(do.call(superimpose.lpp, argh)) if(any(sapply(argh, is.psp))) return(do.call(superimpose.psp, argh)) #' default return(do.call(superimpose.ppp, argh)) } superimpose.ppp <- function(..., W=NULL, check=TRUE) { arglist <- list(...) # Check that all "..." arguments have x, y coordinates hasxy <- unlist(lapply(arglist, checkfields, L=c("x", "y"))) if(!all(hasxy)) { nbad <- sum(bad <- !hasxy) stop(paste(ngettext(nbad, "Argument", "Arguments"), commasep(which(bad)), ngettext(nbad, "does not", "do not"), "have components x and y"), call.=FALSE) } # concatenate lists of (x,y) coordinates XY <- do.call(concatxy, arglist) needcheck <- TRUE # determine whether there is any window information if(!is.owin(W)) { # we have to compute the final window WXY <- NULL Wppp <- NULL if(any(isppp <- unlist(lapply(arglist, is.ppp)))) { # extract windows from ppp objects wins <- unname(lapply(arglist[isppp], as.owin)) # take union Wppp <- if(length(wins) == 1) wins[[1]] else do.call(union.owin, wins) } if(is.function(W)) { # W is a function like bounding.box.xy or ripras # Apply function to the x,y coordinates; it should return an owin WXY <- W(XY) if(!is.owin(WXY)) stop("Function W did not return an owin object", call.=FALSE) } if(is.character(W)) { # character string identifies a function pW <- pmatch(W, c("convex", "rectangle", "bbox", "none")) if(is.na(pW)) stop(paste("Unrecognised option W=", sQuote(W)), call.=FALSE) WXY <- switch(pW, convex=ripras(XY), rectangle=ripras(XY, shape="rectangle"), bbox=boundingbox(XY), none=NULL) # in these cases we don't need to verify that the points are inside. needcheck <- !is.null(WXY) } if(is.null(WXY) && is.null(Wppp)) { # no window information return(XY) } W <- union.owin(WXY, Wppp) } # extract the marks if any nobj <- lengths(lapply(arglist, getElement, name="x")) marx <- superimposeMarks(arglist, nobj) # ppp(XY$x, XY$y, window=W, marks=marx, check=check & needcheck) } superimpose.splitppp <- superimpose.ppplist <- function(..., W=NULL, check=TRUE) { arglist <- list(...) while(any(h <- sapply(arglist, inherits, what=c("splitppp", "ppplist")))) { i <- min(which(h)) arglist <- insertinlist(arglist, i, arglist[[i]]) } do.call(superimpose, append(arglist, list(W=W, check=check))) } superimpose.psp <- function(..., W=NULL, check=TRUE) { # superimpose any number of line segment patterns arglist <- list(...) misscheck <- missing(check) if(!all(sapply(arglist, is.psp))) stop("Patterns to be superimposed must all be psp objects", call.=FALSE) # extract segment coordinates matlist <- lapply(lapply(arglist, getElement, name="ends"), asNumericMatrix) # tack them together mat <- do.call(rbind, matlist) # determine whether there is any window information needcheck <- FALSE if(!is.owin(W)) { # we have to compute the final window WXY <- NULL # Wpsp <- NULL if(any(ispsp <- unlist(lapply(arglist, is.psp)))) { # extract windows from psp objects wins <- unname(lapply(arglist[ispsp], as.owin)) # take union Wppp <- if(length(wins) == 1) wins[[1]] else do.call(union.owin, wins) } if(is.function(W) || is.character(W)) { # guess window from x, y coordinates XY <- list(x=cbind(mat[,1], mat[,3]), y=cbind(mat[,2], mat[,4])) if(is.function(W)) { # W is a function like bounding.box.xy or ripras # Apply function to the x,y coordinates; it should return an owin WXY <- W(XY) if(!is.owin(WXY)) stop("Function W did not return an owin object", call.=FALSE) } if(is.character(W)) { # character string identifies a function pW <- pmatch(W, c("convex", "rectangle", "bbox", "none")) if(is.na(pW)) stop(paste("Unrecognised option W=", sQuote(W)), call.=FALSE) WXY <- switch(pW, convex=ripras(XY), rectangle=ripras(XY, shape="rectangle"), bbox=boundingbox(XY), none=NULL) # in these cases we don't need to verify that the points are inside. needcheck <- !is.null(WXY) } } W <- union.owin(WXY, Wppp) } # extract marks, if any nobj <- sapply(arglist, nsegments) marx <- superimposeMarks(arglist, nobj) if(misscheck && !needcheck) check <- FALSE return(as.psp(mat, window=W, marks=marx, check=check)) } superimposeMarks <- function(arglist, nobj) { # combine marks from the objects in the argument list marxlist <- lapply(arglist, marks) marx <- do.call(markappend, unname(marxlist)) nama <- names(arglist) if(length(nama) == length(arglist) && all(nzchar(nama))) { # arguments are named: use names as (extra) marks newmarx <- factor(rep.int(nama, nobj), levels=nama) marx <- markcbind(marx, newmarx) if(ncol(marx) == 2) { ## component marks were not named: call them 'origMarks' colnames(marx) <- c("origMarks", "pattern") } else colnames(marx)[ncol(marx)] <- "pattern" } return(marx) } #==+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+=== # This function is now deprecated. superimposePSP <- function(..., W=NULL, check=TRUE) { .Deprecated("superimpose","spatstat") # superimpose any number of line segment patterns arglist <- list(...) nargue <- length(arglist) if(nargue == 0) stop("No line segment patterns given", call.=FALSE) # catch possible abuses if(is.null(W) && any(suspicious <- (names(arglist) == "window"))) { id <- min(which(suspicious)) Win <- arglist[[id]] if(is.owin(Win) || is.null(Win)) { W <- Win arglist <- arglist[-id] nargue <- length(arglist) } } # unpack a list if(nargue == 1) { X <- arglist[[1]] if(!inherits(X, "psp") && inherits(X, "list")) arglist <- X } isnull <- unlist(lapply(arglist, is.null)) arglist <- arglist[!isnull] if(!all(unlist(lapply(arglist, is.psp)))) stop("Some of the arguments are not psp objects", call.=FALSE) # extract segment coordinates matlist <- lapply(arglist, function(x) { as.matrix(x$ends) }) # tack them together mat <- do.call(rbind, matlist) # extract marks if any marxlist <- lapply(arglist, marks) # check on compatibility of marks mkfmt <- sapply(marxlist,markformat) if(length(unique(mkfmt))>1) stop(paste("The marks of the point patterns have different formats:", commasep(sQuote(mkfmt))), call.=FALSE) mkfmt <- mkfmt[1] if(mkfmt=="dataframe") { mcnms <- lapply(marxlist,names) cdim <- lengths(mcnms) OK <- length(unique(cdim)) == 1 if(OK) { allInOne <- sapply(mcnms,paste,collapse="") OK <- length(unique(allInOne)) == 1 if(!OK) stop("Data frames of marks have different names", call.=FALSE) } else stop("Data frames of marks have different column dimensions", call.=FALSE) } # combine the marks marx <- switch(mkfmt, none = NULL, vector = { marxlist <- lapply(marxlist, as.data.frame.vector, nm="v1") do.call(rbind, marxlist)[,1] }, dataframe = do.call(rbind, marxlist)) # determine window if(!is.null(W)) W <- as.owin(W) else { # extract windows from psp objects Wlist <- lapply(arglist, as.owin) # take the union of all the windows W <- NULL for(i in seq_along(Wlist)) W <- union.owin(W, Wlist[[i]]) } return(as.psp(mat, window=W, marks=marx, check=check)) } spatstat/R/randomlpp.R0000755000176200001440000000617013115271120014435 0ustar liggesusers# # random.R # # Random point pattern generators for a linear network # # $Revision: 1.9 $ $Date: 2016/11/23 07:25:50 $ # rpoislpp <- function(lambda, L, ..., nsim=1, drop=TRUE) { if(missing(L) || is.null(L)) { if(!inherits(lambda, c("linim", "linfun"))) stop("L is missing", call.=FALSE) L <- as.linnet(lambda) } else verifyclass(L, "linnet") result <- vector(mode="list", length=nsim) S <- as.psp(L) bugout <- (nsim == 1) && drop for(i in seq_len(nsim)) { X <- datagen.rpoisppOnLines(lambda, S, ...) Y <- lpp(X, L) if(bugout) return(Y) result[[i]] <- Y } result <- as.solist(result) if(nsim > 0) names(result) <- paste("Simulation", 1:nsim) return(result) } runiflpp <- function(n, L, nsim=1, drop=TRUE) { verifyclass(L, "linnet") result <- vector(mode="list", length=nsim) S <- as.psp(L) bugout <- (nsim == 1) && drop for(i in seq_len(nsim)) { X <- datagen.runifpointOnLines(n, S) Y <- lpp(X, L) if(bugout) return(Y) result[[i]] <- Y } result <- as.solist(result) if(nsim > 0) names(result) <- paste("Simulation", 1:nsim) return(result) } rlpp <- function(n, f, ..., nsim=1, drop=TRUE) { if(inherits(f, "linfun")) f <- as.linim(f, ...) if(!inherits(f, "linim") && is.list(f) && all(sapply(f, inherits, what=c("linim", "linfun")))) { #' f is a list of densities for each type of point stopifnot(length(n) == length(f)) Y <- mapply(rlpp, n=as.list(n), f=f, MoreArgs=list(nsim=nsim, drop=FALSE, ...), SIMPLIFY=FALSE) Z <- do.call(mapply, c(list(superimpose), Y, list(SIMPLIFY=FALSE))) if(nsim == 1 && drop) return(Z[[1]]) return(as.solist(Z)) } if(!inherits(f, "linim")) stop("f should be a linfun or linim object") if(length(n) > 1) { flist <- rep(list(f), length(n)) return(rlpp(n, flist, nsim=nsim, drop=drop, ...)) } check.1.integer(nsim) if(nsim <= 0) return(list()) #' extract data L <- as.linnet(f) df <- attr(f, "df") seglen <- lengths.psp(as.psp(L)) #' sort into segments, left-to-right within segments df <- df[order(df$mapXY, df$tp), , drop=FALSE] nr <- nrow(df) fvals <- df$values if(anyNA(fvals)) stop("f has some NA values") if(min(fvals) < 0) stop("f has some negative values") #' find interval corresponding to each sample point sameseg <- (diff(df$mapXY) == 0) sharenext <- c(sameseg, FALSE) shareprevious <- c(FALSE, sameseg) tcur <- df$tp tnext <- c(tcur[-1], NA) tprev <- c(NA, tcur[-nr]) tleft <- ifelse(shareprevious, (tcur + tprev)/2, 0) tright <- ifelse(sharenext, (tcur + tnext)/2, 1) #' compute probability of each interval probs <- fvals * (tright - tleft) * seglen[df$mapXY] probs <- probs/sum(probs) #' result <- list() for(isim in 1:nsim) { #' sample intervals and place point uniformly in each interval ii <- sample.int(nr, size=n, replace=TRUE, prob=probs) seg <- df[ii, "mapXY"] tp <- runif(n, tleft[ii], tright[ii]) result[[isim]] <- as.lpp(seg=seg, tp=tp, L=L) } if(nsim == 1 && drop) return(result[[1]]) names(result) <- paste("Simulation", 1:nsim) return(as.solist(result)) } spatstat/R/derivfv.R0000644000176200001440000001040213115225157014105 0ustar liggesusers# # derivfv.R # # differentiation for fv objects # # $Revision: 1.6 $ $Date: 2014/10/24 00:22:30 $ # deriv.fv <- local({ derivative <- function(y, r, ...) { ss <- smooth.spline(r, y, ...) predict(ss, r, deriv=1)$y } deriv.fv <- function(expr, which="*", ..., method=c("spline", "numeric"), kinks=NULL, periodic=FALSE, Dperiodic=periodic) { f <- expr method <- match.arg(method) ## select columns ## if(length(which) == 1L && which %in% .Spatstat.FvAbbrev) { if(length(which) == 1L) { if(which == ".x") stop("Cannot smooth the function argument") which <- fvnames(f, which) } if(any(nbg <- !(which %in% names(f)))) stop(paste("Unrecognised column", ngettext(sum(nbg), "name", "names"), commasep(sQuote(which[nbg])), "in argument", sQuote("which"))) relevant <- names(f) %in% which ## get rname <- fvnames(f, ".x") df <- as.data.frame(f) rpos <- which(colnames(df) == rname) rvals <- df[,rpos] yvals <- df[,relevant,drop=FALSE] nr <- length(rvals) ## if(Dperiodic) { ## Derivative should be periodic ## Recycle data to imitate periodicity DR <- diff(range(rvals)) rvals <- c(rvals[-nr] - DR, rvals, rvals[-1L] + DR) yleft <- yvals[-nr, , drop=FALSE] yright <- yvals[-1L, , drop=FALSE] if(!periodic) { ## original data are not periodic (e.g. cdf of angular variable) ## but derivative must be periodic jump <- matrix(as.numeric(yvals[nr,] - yvals[1L, ]), nr-1L, ncol(yvals), byrow=TRUE) yleft <- yleft - jump yright <- yright + jump } yvals <- rbind(yleft, yvals, yright) actual <- nr:(2*nr - 1L) NR <- length(rvals) } else { NR <- nr actual <- 1:nr } ## cut x axis into intervals? if(is.null(kinks)) { cutx <- factor(rep(1, NR)) } else { rr <- range(rvals) if(periodic) kinks <- c(kinks-DR, kinks, kinks+DR) breaks <- sort(unique(kinks)) if(breaks[1L] > rr[1L]) breaks <- c(rr[1L], breaks) if(max(breaks) < rr[2L]) breaks <- c(breaks, rr[2L]) cutx <- cut(rvals, breaks=breaks, include.lowest=TRUE) } ## process for(segment in levels(cutx)) { ii <- (cutx == segment) yy <- yvals[ii, , drop=FALSE] switch(method, numeric = { dydx <- apply(yy, 2, diff)/diff(rvals[ii]) nd <- nrow(dydx) dydx <- rbind(dydx, dydx[nd, ]) }, spline = { dydx <- apply(yy, 2, derivative, r=rvals[ii], ...) }) df[ii[actual], relevant] <- dydx[ actual, ] } ## pack up result <- f result[,] <- df ## tweak name of function if(!is.null(yl <- attr(f, "ylab"))) attr(result, "ylab") <- substitute(bold(D)~Fx, list(Fx=yl)) if(!is.null(ye <- attr(f, "yexp"))) attr(result, "yexp") <- substitute(bold(D)~Fx, list(Fx=ye)) ## tweak mathematical labels attr(result, "labl")[relevant] <- paste0("bold(D)~", attr(f, "labl")[relevant]) return(result) } deriv.fv }) increment.fv <- function(f, delta) { stopifnot(is.fv(f)) check.1.real(delta) stopifnot(delta > 0) half <- delta/2 xx <- with(f, .x) ynames <- fvnames(f, ".") yy <- as.data.frame(lapply(ynames, function(a, xx, f, h) { g <- as.function(f, value=a) g(xx+h)-g(xx-h) }, xx=xx, f=f, h=half)) Y <- f Y[,ynames] <- yy ## tweak name of function if(!is.null(yl <- attr(f, "ylab"))) attr(Y, "ylab") <- substitute(Delta~Fx, list(Fx=yl)) if(!is.null(ye <- attr(f, "yexp"))) attr(Y, "yexp") <- substitute(Delta~Fx, list(Fx=ye)) ## tweak mathematical labels relevant <- colnames(Y) %in% ynames attr(Y, "labl")[relevant] <- paste0("Delta~", attr(f, "labl")[relevant]) ## tweak recommended range attr(Y, "alim") <- intersect.ranges(attr(f, "alim"), range(xx) + c(1,-1)*half) return(Y) } spatstat/R/colourschemes.R0000644000176200001440000000242013115225157015314 0ustar liggesusers# # colourschemes.R # # $Revision: 1.3 $ $Date: 2013/07/17 04:53:48 $ # beachcolourmap <- function(range, ...) { col <- beachcolours(range, ...) z <- colourmap(col, range=range) return(z) } beachcolours <- function(range, sealevel = 0, monochrome=FALSE, ncolours=if(monochrome) 16 else 64, nbeach=1) { if(monochrome) return(grey(seq(from=0,to=1,length.out=ncolours))) stopifnot(is.numeric(range) && length(range) == 2) stopifnot(all(is.finite(range))) depths <- range[1L] peaks <- range[2L] dv <- diff(range)/(ncolours - 1L) epsilon <- nbeach * dv/2 lowtide <- max(sealevel - epsilon, depths) hightide <- min(sealevel + epsilon, peaks) countbetween <- function(a, b, delta) { max(0, round((b-a)/delta)) } nsea <- countbetween(depths, lowtide, dv) nbeach <- countbetween(lowtide, hightide, dv) nland <- countbetween(hightide, peaks, dv) colours <- character(0) if(nsea > 0) colours <- rev(rainbow(nsea, start=3/6,end=4/6)) # cyan/blue if(nbeach > 0) colours <- c(colours, rev(rainbow(nbeach, start=3/12,end=5/12))) # green if(nland > 0) colours <- c(colours, rev(rainbow(nland, start=0, end=1/6))) # red/yellow return(colours) } spatstat/R/rlabel.R0000755000176200001440000000162513115271120013702 0ustar liggesusers# # rlabel.R # # random (re)labelling # # $Revision: 1.8 $ $Date: 2015/02/11 09:19:10 $ # # rlabel <- function(X, labels=marks(X), permute=TRUE) { stopifnot(is.ppp(X) || is.lpp(X) || is.pp3(X) || is.ppx(X)) if(is.null(labels)) stop("labels not given and marks not present") npts <- npoints(X) if(is.vector(labels) || is.factor(labels)) { nlabels <- length(labels) if(permute && (nlabels != npts)) stop("length of labels vector does not match number of points") Y <- X %mark% sample(labels, npts, replace=!permute) } else if(is.data.frame(labels) || is.hyperframe(labels)) { nlabels <- nrow(labels) if(permute && (nlabels != npts)) stop("number of rows of data frame does not match number of points") Y <- X %mark% labels[sample(1:nlabels, npts, replace=!permute), ,drop=FALSE] } else stop("Format of labels argument is not understood") return(Y) } spatstat/R/connected.R0000755000176200001440000001273413115271075014417 0ustar liggesusers# # connected.R # # connected component transform # # $Revision: 1.19 $ $Date: 2017/06/05 10:31:58 $ # # Interpreted code for pixel images by Julian Burgos # Rewritten in C by Adrian Baddeley # # Code for point patterns by Adrian Baddeley connected <- function(X, ...) { UseMethod("connected") } connected.im <- function(X, ..., background=NA, method="C") { W <- if(!is.na(background)) solutionset(X != background) else if(X$type == "logical") solutionset(X) else as.owin(X) connected.owin(W, method=method, ...) } connected.owin <- function(X, ..., method="C") { method <- pickoption("algorithm choice", method, c(C="C", interpreted="interpreted")) # convert X to binary mask X <- as.mask(X, ...) # Y <- X$m nr <- X$dim[1L] nc <- X$dim[2L] if(method == "C") { ################ COMPILED CODE ######################### # Pad border with FALSE M <- rbind(FALSE, Y, FALSE) M <- cbind(FALSE, M, FALSE) # assign unique label to each foreground pixel L <- M L[M] <- seq_len(sum(M)) L[!M] <- 0 # resolve labels z <- .C("cocoImage", mat=as.integer(t(L)), nr=as.integer(nr), nc=as.integer(nc), PACKAGE = "spatstat") # unpack Z <- matrix(z$mat, nr+2, nc+2, byrow=TRUE) } else { ################ INTERPRETED CODE ######################### # by Julian Burgos # # Pad border with zeros padY <- rbind(0, Y, 0) padY <- cbind(0, padY, 0) # Initialise Z <- matrix(0, nrow(padY), ncol(padY)) currentlab <- 1L todo <- as.vector(t(Y)) equiv <- NULL # ........ main loop .......................... while(any(todo)){ # pick first unresolved pixel one <- which(todo)[1L] onerow <- ceiling(one/nc) onecol <- one -((onerow-1L)*nc) parow=onerow+1L # Equivalent rows & column in padded matrix pacol=onecol+1L #Examine four previously scanned neighbors # (use padded matrix to avoid edge issues) nbrs <- rbind(c(parow-1L,pacol-1L), c(parow-1L,pacol), c(parow, pacol-1L), c(parow-1L,pacol+1L)) px <- sum(padY[nbrs]) if (px==0){ # no neighbours: new component Z[parow,pacol] <- currentlab currentlab <- currentlab+1L todo[one] <- FALSE } else if(px==1L) { # one neighbour: assign existing label labs <- unique(Z[nbrs], na.rm=TRUE) labs <- labs[labs != 0] Z[parow,pacol] <- labs[1L] currentlab <- max(Z)+1L todo[one] <- FALSE } else { # more than one neighbour: possible merger of labels labs <- unique(Z[nbrs], na.rm=TRUE) labs <- labs[labs != 0] labs <- sort(labs) equiv <- rbind(equiv,c(labs,rep.int(0,times=4-length(labs)))) Z[parow,pacol] <- labs[1L] currentlab <- max(Z)+1L todo[one] <- FALSE } } # ........... end of loop ............ # Resolve equivalences ................ if(length(equiv)>1L){ merges <- (equiv[,2L] > 1L) nmerge <- sum(merges) if(nmerge==1L) equiv <- equiv[which(merges), , drop=FALSE] else if(nmerge > 1L) { relevant <- (equiv[,2L] > 0) equiv <- equiv[relevant, , drop=FALSE] equiv <- equiv[fave.order(equiv[,1L]),] } for (i in 1:nrow(equiv)){ current <- equiv[i, 1L] for (j in 2:4){ twin <- equiv[i,j] if (twin>0){ # Change labels matrix Z[which(Z==twin)] <- current # Update equivalence table equiv[which(equiv==twin)] <- current } } } } } ########### COMMON CODE ############################ # Renumber labels sequentially mapped <- (Z != 0) usedlabs <- sort(unique(as.vector(Z[mapped]))) nlabs <- length(usedlabs) labtable <- cumsum(seq_len(max(usedlabs)) %in% usedlabs) Z[mapped] <- labtable[Z[mapped]] # banish zeroes Z[!mapped] <- NA # strip borders Z <- Z[2:(nrow(Z)-1L),2:(ncol(Z)-1L)] # dress up Z <- im(factor(Z, levels=1:nlabs), xcol=X$xcol, yrow=X$yrow, unitname=unitname(X)) return(Z) } connected.ppp <- function(X, R, ...) { stopifnot(is.ppp(X)) check.1.real(R, "In connected.ppp") stopifnot(R >= 0) internal <- resolve.1.default("internal", list(...), list(internal=FALSE)) nv <- npoints(X) cl <- closepairs(X, R, what="indices") ie <- cl$i - 1L je <- cl$j - 1L ne <- length(ie) zz <- .C("cocoGraph", nv=as.integer(nv), ne=as.integer(ne), ie=as.integer(ie), je=as.integer(je), label=as.integer(integer(nv)), status=as.integer(integer(1L)), PACKAGE = "spatstat") if(zz$status != 0) stop("Internal error: connected.ppp did not converge") if(internal) return(zz$label) lab <- zz$label + 1L # Renumber labels sequentially lab <- as.integer(factor(lab)) # Convert labels to factor lab <- factor(lab) # Apply to points Y <- X %mark% lab return(Y) } # ................................................. is.connected <- function(X, ...) { UseMethod("is.connected") } is.connected.default <- function(X, ...) { y <- connected(X, ...) npieces <- length(levels(y)) if(npieces == 0) stop("Unable to determine connectedness") return(npieces == 1) } is.connected.ppp <- function(X, R, ...) { lab <- connected(X, R, internal=TRUE) npieces <- length(unique(lab)) return(npieces == 1) }spatstat/R/evalcovar.R0000644000176200001440000004026213145262274014435 0ustar liggesusers#' #' evalcovar.R #' #' evaluate covariate values at data points and at pixels #' #' $Revision: 1.23 $ $Date: 2017/08/17 09:20:00 $ #' evalCovar <- function(model, covariate, ...) { UseMethod("evalCovar") } evalCovar.ppm <- local({ evalCovar.ppm <- function(model, covariate, ..., lambdatype=c("cif", "trend", "intensity"), dimyx=NULL, eps=NULL, interpolate=TRUE, jitter=TRUE, modelname=NULL, covname=NULL, dataname=NULL, subset=NULL) { lambdatype <- match.arg(lambdatype) #' evaluate covariate values at data points and at pixels csr <- is.poisson.ppm(model) && is.stationary.ppm(model) #' determine names if(is.null(modelname)) modelname <- if(csr) "CSR" else short.deparse(substitute(model)) if(is.null(covname)) { covname <- singlestring(short.deparse(substitute(covariate))) if(is.character(covariate)) covname <- covariate } if(is.null(dataname)) dataname <- model$Qname info <- list(modelname=modelname, covname=covname, dataname=dataname, csr=csr, spacename="two dimensions") X <- data.ppm(model) W <- as.owin(model) #' explicit control of pixel resolution if(!is.null(dimyx) || !is.null(eps)) W <- as.mask(W, dimyx=dimyx, eps=eps) if(!is.null(subset)) { #' restrict to subset if required X <- X[subset] W <- W[subset, drop=FALSE] } #' evaluate covariate if(is.character(covariate)) { #' One of the characters 'x' or 'y' #' Turn it into a function. ns <- length(covariate) if(ns == 0) stop("covariate is empty") if(ns > 1) stop("more than one covariate specified") covname <- covariate covariate <- switch(covariate, x=xcoordfun, y=ycoordfun, stop(paste("Unrecognised covariate", dQuote(covariate)))) } if(!is.marked(model)) { #' ................... unmarked ....................... if(is.im(covariate)) { type <- "im" if(!interpolate) { #' look up covariate values ZX <- safelookup(covariate, X) } else { #' evaluate at data points by interpolation ZX <- interp.im(covariate, X$x, X$y) #' fix boundary glitches if(any(uhoh <- is.na(ZX))) ZX[uhoh] <- safelookup(covariate, X[uhoh]) } #' covariate values for pixels inside window Z <- covariate[W, drop=FALSE] #' corresponding mask W <- as.owin(Z) } else if(is.function(covariate)) { type <- "function" #' evaluate exactly at data points ZX <- covariate(X$x, X$y) if(!all(is.finite(ZX))) warning("covariate function returned NA or Inf values") #' window W <- as.mask(W) #' covariate in window Z <- as.im(covariate, W=W) #' collapse function body to single string covname <- singlestring(covname) } else if(is.null(covariate)) { stop("The covariate is NULL", call.=FALSE) } else stop(paste("The covariate should be", "an image, a function(x,y)", "or one of the characters", sQuote("x"), "or", sQuote("y")), call.=FALSE) #' values of covariate in window Zvalues <- as.vector(Z[W, drop=TRUE]) #' corresponding fitted [conditional] intensity values lambda <- as.vector(predict(model, locations=W, type=lambdatype)[W, drop=TRUE]) #' pixel area (constant) pixelarea <- with(Z, xstep * ystep) } else { #' ................... marked ....................... if(!is.multitype(model)) stop("Only implemented for multitype models (factor marks)") marx <- marks(X, dfok=FALSE) possmarks <- levels(marx) npts <- npoints(X) #' single image: replicate if(is.im(covariate)) { covariate <- rep(list(covariate), times=length(possmarks)) names(covariate) <- as.character(possmarks) } #' if(is.list(covariate) && all(unlist(lapply(covariate, is.im)))) { #' list of images type <- "im" if(length(covariate) != length(possmarks)) stop("Number of images does not match number of possible marks") #' evaluate covariate at each data point ZX <- numeric(npts) for(k in seq_along(possmarks)) { ii <- (marx == possmarks[k]) covariate.k <- covariate[[k]] if(!interpolate) { #' look up covariate values values <- safelookup(covariate, X) } else { #' interpolate values <- interp.im(covariate.k, x=X$x[ii], y=X$y[ii]) #' fix boundary glitches if(any(uhoh <- is.na(values))) values[uhoh] <- safelookup(covariate.k, X[ii][uhoh]) } ZX[ii] <- values } #' restrict covariate images to window Z <- lapply(covariate, "[", i=W, drop=FALSE) #' extract pixel locations and pixel values Zframes <- lapply(Z, as.data.frame) #' covariate values at each pixel inside window Zvalues <- unlist(lapply(Zframes, getElement, name="value")) #' pixel locations locn <- lapply(Zframes, getxy) #' tack on mark values for(k in seq_along(possmarks)) locn[[k]] <- cbind(locn[[k]], data.frame(marks=possmarks[k])) loc <- do.call(rbind, locn) #' corresponding fitted [conditional] intensity values lambda <- predict(model, locations=loc, type=lambdatype) #' pixel areas pixelarea <- rep(sapply(Z, pixarea), sapply(Z, npixdefined)) } else if(is.function(covariate)) { type <- "function" #' evaluate exactly at data points ZX <- covariate(X$x, X$y, marx) #' same window W <- as.mask(W) #' covariate in window Z <- list() for(k in seq_along(possmarks)) Z[[k]] <- as.im(functioncaller, m=possmarks[k], f=covariate, W=W) #' functioncaller: function(x,y,m,f) { f(x,y,m) } Zvalues <- unlist(lapply(Z, pixelvalues)) #' corresponding fitted [conditional] intensity values lambda <- predict(model, locations=W, type=lambdatype) lambda <- unlist(lapply(lambda, pixelvalues)) if(length(lambda) != length(Zvalues)) stop("Internal error: length(lambda) != length(Zvalues)") #' collapse function body to single string covname <- singlestring(covname) #' pixel areas pixelarea <- rep(sapply(Z, pixarea), sapply(Z, npixdefined)) } else if(is.null(covariate)) { stop("The covariate is NULL", call.=FALSE) } else stop(paste("For a multitype point process model,", "the covariate should be an image, a list of images,", "a function(x,y,m)", "or one of the characters", sQuote("x"), "or", sQuote("y")), call.=FALSE) } #' .......................................................... #' apply jittering to avoid ties if(jitter) { nX <- length(ZX) dZ <- 0.3 * quantile(diff(sort(unique(c(ZX, Zvalues)))), 1/min(20, nX)) ZX <- ZX + rnorm(nX, sd=dZ) Zvalues <- Zvalues + rnorm(length(Zvalues), sd=dZ) } lambdaname <- if(is.poisson(model)) "intensity" else lambdatype lambdaname <- paste("the fitted", lambdaname) check.finite(lambda, xname=lambdaname, usergiven=FALSE) check.finite(Zvalues, xname="the covariate", usergiven=TRUE) #' lambda values at data points lambdaX <- predict(model, locations=X, type=lambdatype) #' wrap up values <- list(Zimage = Z, Zvalues = Zvalues, lambda = lambda, lambdaX = lambdaX, weights = pixelarea, ZX = ZX, type = type) return(list(values=values, info=info)) } xcoordfun <- function(x,y,m){x} ycoordfun <- function(x,y,m){y} pixarea <- function(z) { z$xstep * z$ystep } npixdefined <- function(z) { sum(!is.na(z$v)) } functioncaller <- function(x,y,m,f) { f(x,y,m) } pixelvalues <- function(z) { as.data.frame(z)[,3L] } getxy <- function(z) { z[,c("x","y")] } evalCovar.ppm }) evalCovar.lppm <- local({ evalCovar.lppm <- function(model, covariate, ..., lambdatype=c("cif", "trend", "intensity"), eps=NULL, nd=1000, interpolate=TRUE, jitter=TRUE, modelname=NULL, covname=NULL, dataname=NULL, subset=NULL) { lambdatype <- match.arg(lambdatype) #' evaluate covariate values at data points and at pixels csr <- is.poisson(model) && is.stationary(model) #' determine names if(is.null(modelname)) modelname <- if(csr) "CSR" else short.deparse(substitute(model)) if(is.null(covname)) { covname <- singlestring(short.deparse(substitute(covariate))) if(is.character(covariate)) covname <- covariate } if(is.null(dataname)) dataname <- model$Xname info <- list(modelname=modelname, covname=covname, dataname=dataname, csr=csr, spacename="linear network") #' convert character covariate to function if(is.character(covariate)) { #' One of the characters 'x' or 'y' #' Turn it into a function. ns <- length(covariate) if(ns == 0) stop("covariate is empty") if(ns > 1) stop("more than one covariate specified") covname <- covariate covariate <- switch(covariate, x=xcoordfun, y=ycoordfun, stop(paste("Unrecognised covariate", dQuote(covariate)))) } #' extract model components X <- model$X fit <- model$fit #' L <- as.linnet(X) Q <- quad.ppm(fit) #' restrict to subset if required if(!is.null(subset)) { X <- X[subset] Q <- Q[subset] } isdat <- is.data(Q) U <- union.quad(Q) wt <- w.quad(Q) #' evaluate covariate if(!is.marked(model)) { #' ................... unmarked ....................... if(is.im(covariate)) { if(inherits(covariate, "linim")) { type <- "linim" Zimage <- covariate } else { type <- "im" Zimage <- as.linim(covariate, L) } if(!interpolate) { #' look up covariate values at quadrature points Zvalues <- safelookup(covariate, U) } else { #' evaluate at quadrature points by interpolation Zvalues <- interp.im(covariate, U$x, U$y) #' fix boundary glitches if(any(uhoh <- is.na(Zvalues))) Zvalues[uhoh] <- safelookup(covariate, U[uhoh]) } #' extract data values ZX <- Zvalues[isdat] } else if(is.function(covariate)) { type <- "function" Zimage <- as.linim(covariate, L) #' evaluate exactly at quadrature points Zvalues <- covariate(U$x, U$y) if(!all(is.finite(Zvalues))) warning("covariate function returned NA or Inf values") #' extract data values ZX <- Zvalues[isdat] #' collapse function body to single string covname <- singlestring(covname) } else if(is.null(covariate)) { stop("The covariate is NULL", call.=FALSE) } else stop(paste("The covariate should be", "an image, a function(x,y)", "or one of the characters", sQuote("x"), "or", sQuote("y")), call.=FALSE) #' corresponding fitted [conditional] intensity values lambda <- as.vector(predict(model, locations=U, type=lambdatype)) } else { #' ................... marked ....................... if(!is.multitype(model)) stop("Only implemented for multitype models (factor marks)") marx <- marks(U, dfok=FALSE) possmarks <- levels(marx) #' single image: replicate if(is.im(covariate)) { covariate <- rep(list(covariate), length(possmarks)) names(covariate) <- possmarks } #' if(is.list(covariate) && all(unlist(lapply(covariate, is.im)))) { #' list of images if(length(covariate) != length(possmarks)) stop("Number of images does not match number of possible marks") #' determine type of data islinim <- unlist(lapply(covariate, inherits, what="linim")) type <- if(all(islinim)) "linim" else "im" Zimage <- covariate Zimage[!islinim] <- lapply(Zimage[!islinim], as.linim, L=L) #' evaluate covariate at each data point by interpolation Zvalues <- numeric(npoints(U)) for(k in seq_along(possmarks)) { ii <- (marx == possmarks[k]) covariate.k <- covariate[[k]] if(!interpolate) { #' direct lookup values <- safelookup(covariate.k, U[ii]) } else { #' interpolation values <- interp.im(covariate.k, x=U$x[ii], y=U$y[ii]) #' fix boundary glitches if(any(uhoh <- is.na(values))) values[uhoh] <- safelookup(covariate.k, U[ii][uhoh]) } Zvalues[ii] <- values } #' extract data values ZX <- Zvalues[isdat] #' corresponding fitted [conditional] intensity values lambda <- predict(model, locations=U, type=lambdatype) if(length(lambda) != length(Zvalues)) stop("Internal error: length(lambda) != length(Zvalues)") } else if(is.function(covariate)) { type <- "function" #' evaluate exactly at quadrature points Zvalues <- covariate(U$x, U$y, marx) #' extract data values ZX <- Zvalues[isdat] #' corresponding fitted [conditional] intensity values lambda <- predict(model, locations=U, type=lambdatype) if(length(lambda) != length(Zvalues)) stop("Internal error: length(lambda) != length(Zvalues)") #' images Zimage <- list() for(k in seq_along(possmarks)) Zimage[[k]] <- as.linim(functioncaller, L=L, m=possmarks[k], f=covariate) #' collapse function body to single string covname <- singlestring(covname) } else if(is.null(covariate)) { stop("The covariate is NULL", call.=FALSE) } else stop(paste("For a multitype point process model,", "the covariate should be an image, a list of images,", "a function(x,y,m)", "or one of the characters", sQuote("x"), "or", sQuote("y")), call.=FALSE) } #' .......................................................... #' apply jittering to avoid ties if(jitter) { nX <- length(ZX) dZ <- 0.3 * quantile(diff(sort(unique(c(ZX, Zvalues)))), 1/min(20, nX)) ZX <- ZX + rnorm(nX, sd=dZ) Zvalues <- Zvalues + rnorm(length(Zvalues), sd=dZ) } lambdaname <- if(is.poisson(model)) "intensity" else lambdatype lambdaname <- paste("the fitted", lambdaname) check.finite(lambda, xname=lambdaname, usergiven=FALSE) check.finite(Zvalues, xname="the covariate", usergiven=TRUE) #' restrict image to subset if(!is.null(subset)) Zimage <- Zimage[subset, drop=FALSE] #' lambda values at data points lambdaX <- predict(model, locations=X, type=lambdatype) #' wrap up values <- list(Zimage = Zimage, Zvalues = Zvalues, lambda = lambda, lambdaX = lambdaX, weights = wt, ZX = ZX, type = type) return(list(values=values, info=info)) } xcoordfun <- function(x,y,m){x} ycoordfun <- function(x,y,m){y} functioncaller <- function(x,y,m,f) { f(x,y,m) } evalCovar.lppm }) spatstat/R/smooth.ppp.R0000755000176200001440000007152513144755476014605 0ustar liggesusers# # smooth.ppp.R # # Smooth the marks of a point pattern # # $Revision: 1.44 $ $Date: 2017/08/16 05:40:56 $ # smooth.ppp <- function(X, ..., weights=rep(1, npoints(X)), at="pixels") { .Deprecated("Smooth.ppp", package="spatstat", msg="smooth.ppp is deprecated: use the generic Smooth with a capital S") Smooth(X, ..., weights=weights, at=at) } Smooth <- function(X, ...) { UseMethod("Smooth") } Smooth.solist <- function(X, ...) { solapply(X, Smooth, ...) } Smooth.ppp <- function(X, sigma=NULL, ..., weights=rep(1, npoints(X)), at="pixels", edge=TRUE, diggle=FALSE, geometric=FALSE) { verifyclass(X, "ppp") if(!is.marked(X, dfok=TRUE, na.action="fatal")) stop("X should be a marked point pattern", call.=FALSE) X <- coerce.marks.numeric(X) if(!all(is.finite(as.matrix(marks(X))))) stop("Some mark values are Inf, NaN or NA", call.=FALSE) at <- pickoption("output location type", at, c(pixels="pixels", points="points")) ## weights weightsgiven <- !missing(weights) && !is.null(weights) if(weightsgiven) { # convert to numeric if(is.im(weights)) { weights <- safelookup(weights, X) # includes warning if NA } else if(is.expression(weights)) weights <- eval(weights, envir=as.data.frame(X), enclos=parent.frame()) if(length(weights) == 0) weightsgiven <- FALSE } if(weightsgiven) { check.nvector(weights, npoints(X)) } else weights <- NULL ## geometric mean smoothing if(geometric) return(ExpSmoothLog(X, sigma=sigma, ..., at=at, weights=weights, edge=edge, diggle=diggle)) ## determine smoothing parameters ker <- resolve.2D.kernel(sigma=sigma, ..., x=X, bwfun=bw.smoothppp, allow.zero=TRUE) sigma <- ker$sigma varcov <- ker$varcov ## Diggle's edge correction? if(diggle && !edge) warning("Option diggle=TRUE overridden by edge=FALSE") diggle <- diggle && edge ## if(ker$cutoff < minnndist(X)) { # very small bandwidth leaveoneout <- resolve.1.default("leaveoneout", list(...), list(leaveoneout=TRUE)) if(!leaveoneout && at=="points") { warning(paste("Bandwidth is close to zero:", "original values returned")) Y <- marks(X) } else { warning(paste("Bandwidth is close to zero:", "nearest-neighbour interpolation performed")) Y <- nnmark(X, ..., k=1, at=at) } return(Y) } if(diggle) { ## absorb Diggle edge correction into weights vector edg <- second.moment.calc(X, sigma, what="edge", ..., varcov=varcov) ei <- safelookup(edg, X, warn=FALSE) weights <- if(weightsgiven) weights/ei else 1/ei weights[!is.finite(weights)] <- 0 weightsgiven <- TRUE } ## rescale weights to avoid numerical gremlins if(weightsgiven && ((mw <- median(abs(weights))) > 0)) weights <- weights/mw ## calculate... marx <- marks(X) if(!is.data.frame(marx)) { # ........ vector of marks ................... values <- marx if(is.factor(values)) { warning("Factor valued marks were converted to integers") values <- as.numeric(values) } ## detect constant values ra <- range(values, na.rm=TRUE) if(diff(ra) == 0) { switch(at, points = { result <- values }, pixels = { M <- do.call.matched(as.mask, list(w=as.owin(X), ...)) result <- as.im(ra[1], M) }) } else { switch(at, points={ result <- do.call(smoothpointsEngine, resolve.defaults(list(x=X, values=values, weights=weights, sigma=sigma, varcov=varcov, edge=FALSE), list(...))) }, pixels={ values.weights <- if(weightsgiven) values * weights else values numerator <- do.call(density.ppp, resolve.defaults(list(x=X, at="pixels", weights = values.weights, sigma=sigma, varcov=varcov, edge=FALSE), list(...))) denominator <- do.call(density.ppp, resolve.defaults(list(x=X, at="pixels", weights = weights, sigma=sigma, varcov=varcov, edge=FALSE), list(...))) result <- eval.im(numerator/denominator) ## trap small values of denominator ## trap NaN and +/- Inf values of result, but not NA eps <- .Machine$double.eps nbg <- eval.im(is.infinite(result) | is.nan(result) | (denominator < eps)) if(any(as.matrix(nbg), na.rm=TRUE)) { warning(paste("Numerical underflow detected:", "sigma is probably too small")) ## l'Hopital's rule distX <- distmap(X, xy=numerator) whichnn <- attr(distX, "index") nnvalues <- eval.im(values[whichnn]) result[nbg] <- nnvalues[nbg] } attr(result, "warnings") <- attr(numerator, "warnings") }) } } else { ## ......... data frame of marks .................. ## detect constant columns ra <- apply(marx, 2, range, na.rm=TRUE) isconst <- (apply(ra, 2, diff) == 0) if(anyisconst <- any(isconst)) { oldmarx <- marx # oldX <- X marx <- marx[, !isconst] X <- X %mark% marx } if(any(!isconst)) { ## compute denominator denominator <- do.call(density.ppp, resolve.defaults(list(x=X, at=at, weights = weights, sigma=sigma, varcov=varcov, edge=FALSE), list(...))) ## compute numerator for each column of marks marx.weights <- if(weightsgiven) marx * weights else marx numerators <- do.call(density.ppp, resolve.defaults(list(x=X, at=at, weights = marx.weights, sigma=sigma, varcov=varcov, edge=FALSE), list(...))) uhoh <- attr(numerators, "warnings") ## calculate ratios switch(at, points={ if(is.null(uhoh)) { ## numerators is a matrix (or may have dropped to vector) if(!is.matrix(numerators)) numerators <- matrix(numerators, ncol=1) ratio <- numerators/denominator if(any(badpoints <- matrowany(!is.finite(ratio)))) { whichnnX <- nnwhich(X) ratio[badpoints,] <- as.matrix(marx[whichnnX[badpoints], , drop=FALSE]) } } else { warning("returning original values") ratio <- marx } result <- as.data.frame(ratio) colnames(result) <- colnames(marx) }, pixels={ ## numerators is a list of images (or may have dropped to 'im') if(is.im(numerators)) numerators <- list(numerators) ratio <- lapply(numerators, "/", e2=denominator) if(!is.null(uhoh)) { ## compute nearest neighbour map on same raster distX <- distmap(X, xy=denominator) whichnnX <- attr(distX, "index") ## fix images for(j in 1:length(ratio)) { ratj <- ratio[[j]] valj <- marx[,j] ratio[[j]] <- eval.im(ifelseXY(is.finite(ratj), ratj, valj[whichnnX])) } attr(ratio, "warnings") <- uhoh } result <- as.solist(ratio) names(result) <- colnames(marx) }) } else result <- NULL if(anyisconst) { partresult <- result switch(at, points = { nX <- npoints(X) result <- matrix(, nX, ncol(oldmarx)) if(length(partresult) > 0) result[,!isconst] <- as.matrix(partresult) result[,isconst] <- rep(ra[1,isconst], each=nX) colnames(result) <- colnames(oldmarx) }, pixels = { result <- vector(mode="list", length=ncol(oldmarx)) if(length(partresult) > 0) { result[!isconst] <- partresult M <- as.owin(partresult[[1]]) } else { M <- do.call.matched(as.mask, list(w=as.owin(X), ...)) } result[isconst] <- lapply(ra[1, isconst], as.im, W=M) result <- as.solist(result) names(result) <- colnames(oldmarx) }) } } ## wrap up attr(result, "warnings") <- unlist(lapply(result, attr, which="warnings")) attr(result, "sigma") <- sigma attr(result, "varcov") <- varcov return(result) } smoothpointsEngine <- function(x, values, sigma, ..., weights=NULL, varcov=NULL, leaveoneout=TRUE, sorted=FALSE, cutoff=NULL) { debugging <- spatstat.options("developer") stopifnot(is.logical(leaveoneout)) #' detect constant values if(diff(range(values, na.rm=TRUE)) == 0) { result <- values attr(result, "sigma") <- sigma attr(result, "varcov") <- varcov return(result) } #' Contributions from pairs of distinct points #' closer than 8 standard deviations sd <- if(is.null(varcov)) sigma else sqrt(sum(diag(varcov))) if(is.null(cutoff)) cutoff <- 8 * sd if(debugging) cat(paste("cutoff=", cutoff, "\n")) ## Handle weights that are meant to be null if(length(weights) == 0 || (!is.null(dim(weights)) && nrow(weights) == 0)) weights <- NULL # detect very small bandwidth nnd <- nndist(x) nnrange <- range(nnd) if(cutoff < nnrange[1]) { if(leaveoneout && (npoints(x) > 1)) { warning("Very small bandwidth; values of nearest neighbours returned") result <- values[nnwhich(x)] } else { warning("Very small bandwidth; original values returned") result <- values } attr(result, "sigma") <- sigma attr(result, "varcov") <- varcov attr(result, "warnings") <- "underflow" return(result) } if(leaveoneout) { # ensure cutoff includes at least one point cutoff <- max(1.1 * nnrange[2], cutoff) } if(spatstat.options("densityTransform") && spatstat.options("densityC")) { ## .................. experimental C code ..................... if(debugging) cat('Using experimental code!\n') npts <- npoints(x) result <- numeric(npts) ## transform to standard coordinates xx <- x$x yy <- x$y if(is.null(varcov)) { xx <- xx/(sqrt(2) * sigma) yy <- yy/(sqrt(2) * sigma) } else { Sinv <- solve(varcov) xy <- cbind(xx, yy) %*% matrixsqrt(Sinv/2) xx <- xy[,1] yy <- xy[,2] sorted <- FALSE } ## cutoff in standard coordinates cutoff <- cutoff/(sqrt(2) * sd) ## sort into increasing order of x coordinate (required by C code) if(!sorted) { oo <- fave.order(xx) xx <- xx[oo] yy <- yy[oo] vv <- values[oo] } else { vv <- values } if(is.null(weights)) { zz <- .C("Gsmoopt", nxy = as.integer(npts), x = as.double(xx), y = as.double(yy), v = as.double(vv), self = as.integer(!leaveoneout), rmaxi = as.double(cutoff), result = as.double(double(npts)), PACKAGE = "spatstat") if(sorted) result <- zz$result else result[oo] <- zz$result } else { wtsort <- weights[oo] zz <- .C("Gwtsmoopt", nxy = as.integer(npts), x = as.double(xx), y = as.double(yy), v = as.double(vv), self = as.integer(!leaveoneout), rmaxi = as.double(cutoff), weight = as.double(wtsort), result = as.double(double(npts)), PACKAGE = "spatstat") if(sorted) result <- zz$result else result[oo] <- zz$result } if(any(nbg <- (is.infinite(result) | is.nan(result)))) { # NaN or +/-Inf can occur if bandwidth is small # Use mark of nearest neighbour (by l'Hopital's rule) result[nbg] <- values[nnwhich(x)[nbg]] } } else if(spatstat.options("densityC")) { # .................. C code ........................... if(debugging) cat('Using standard code.\n') npts <- npoints(x) result <- numeric(npts) # sort into increasing order of x coordinate (required by C code) if(sorted) { xx <- x$x yy <- x$y vv <- values } else { oo <- fave.order(x$x) xx <- x$x[oo] yy <- x$y[oo] vv <- values[oo] } if(is.null(varcov)) { # isotropic kernel if(is.null(weights)) { zz <- .C("smoopt", nxy = as.integer(npts), x = as.double(xx), y = as.double(yy), v = as.double(vv), self = as.integer(!leaveoneout), rmaxi = as.double(cutoff), sig = as.double(sd), result = as.double(double(npts)), PACKAGE = "spatstat") if(sorted) result <- zz$result else result[oo] <- zz$result } else { wtsort <- weights[oo] zz <- .C("wtsmoopt", nxy = as.integer(npts), x = as.double(xx), y = as.double(yy), v = as.double(vv), self = as.integer(!leaveoneout), rmaxi = as.double(cutoff), sig = as.double(sd), weight = as.double(wtsort), result = as.double(double(npts)), PACKAGE = "spatstat") if(sorted) result <- zz$result else result[oo] <- zz$result } } else { # anisotropic kernel Sinv <- solve(varcov) flatSinv <- as.vector(t(Sinv)) if(is.null(weights)) { zz <- .C("asmoopt", nxy = as.integer(npts), x = as.double(xx), y = as.double(yy), v = as.double(vv), self = as.integer(!leaveoneout), rmaxi = as.double(cutoff), sinv = as.double(flatSinv), result = as.double(double(npts)), PACKAGE = "spatstat") if(sorted) result <- zz$result else result[oo] <- zz$result } else { wtsort <- weights[oo] zz <- .C("awtsmoopt", nxy = as.integer(npts), x = as.double(xx), y = as.double(yy), v = as.double(vv), self = as.integer(!leaveoneout), rmaxi = as.double(cutoff), sinv = as.double(flatSinv), weight = as.double(wtsort), result = as.double(double(npts)), PACKAGE = "spatstat") if(sorted) result <- zz$result else result[oo] <- zz$result } } if(any(nbg <- (is.infinite(result) | is.nan(result)))) { # NaN or +/-Inf can occur if bandwidth is small # Use mark of nearest neighbour (by l'Hopital's rule) result[nbg] <- values[nnwhich(x)[nbg]] } } else { # previous, partly interpreted code # compute weighted densities if(is.null(weights)) { # weights are implicitly equal to 1 numerator <- do.call(density.ppp, resolve.defaults(list(x=x, at="points"), list(weights = values), list(sigma=sigma, varcov=varcov), list(leaveoneout=leaveoneout), list(sorted=sorted), list(...), list(edge=FALSE))) denominator <- do.call(density.ppp, resolve.defaults(list(x=x, at="points"), list(sigma=sigma, varcov=varcov), list(leaveoneout=leaveoneout), list(sorted=sorted), list(...), list(edge=FALSE))) } else { numerator <- do.call(density.ppp, resolve.defaults(list(x=x, at="points"), list(weights = values * weights), list(sigma=sigma, varcov=varcov), list(leaveoneout=leaveoneout), list(sorted=sorted), list(...), list(edge=FALSE))) denominator <- do.call(density.ppp, resolve.defaults(list(x=x, at="points"), list(weights = weights), list(sigma=sigma, varcov=varcov), list(leaveoneout=leaveoneout), list(sorted=sorted), list(...), list(edge=FALSE))) } if(is.null(uhoh <- attr(numerator, "warnings"))) { result <- numerator/denominator result <- ifelseXB(is.finite(result), result, NA) } else { warning("returning original values") result <- values attr(result, "warnings") <- uhoh } } # pack up and return attr(result, "sigma") <- sigma attr(result, "varcov") <- varcov return(result) } markmean <- function(X, ...) { stopifnot(is.marked(X)) Y <- Smooth(X, ...) return(Y) } markvar <- function(X, sigma=NULL, ..., weights=NULL, varcov=NULL) { stopifnot(is.marked(X)) if(is.expression(weights)) weights <- eval(weights, envir=as.data.frame(X), enclos=parent.frame()) E1 <- Smooth(X, sigma=sigma, varcov=varcov, weights=weights, ...) X2 <- X %mark% marks(X)^2 ## ensure smoothing bandwidth is the same! sigma <- attr(E1, "sigma") varcov <- attr(E1, "varcov") E2 <- Smooth(X2, sigma=sigma, varcov=varcov, weights=weights, ...) V <- eval.im(E2 - E1^2) return(V) } bw.smoothppp <- function(X, nh=spatstat.options("n.bandwidth"), hmin=NULL, hmax=NULL, warn=TRUE) { stopifnot(is.ppp(X)) stopifnot(is.marked(X)) X <- coerce.marks.numeric(X) # rearrange in ascending order of x-coordinate (for C code) X <- X[fave.order(X$x)] # marx <- marks(X) dimmarx <- dim(marx) if(!is.null(dimmarx)) marx <- as.matrix(as.data.frame(marx)) # determine a range of bandwidth values # n <- npoints(X) if(is.null(hmin) || is.null(hmax)) { W <- Window(X) # a <- area(W) d <- diameter(as.rectangle(W)) # Stoyan's rule of thumb stoyan <- bw.stoyan(X) # rule of thumb based on nearest-neighbour distances nnd <- nndist(X) nnd <- nnd[nnd > 0] if(is.null(hmin)) { hmin <- max(1.1 * min(nnd), stoyan/5) hmin <- min(d/8, hmin) } if(is.null(hmax)) { hmax <- max(stoyan * 20, 3 * mean(nnd), hmin * 2) hmax <- min(d/2, hmax) } } else stopifnot(hmin < hmax) # h <- geomseq(from=hmin, to=hmax, length.out=nh) cv <- numeric(nh) # # compute cross-validation criterion for(i in seq_len(nh)) { yhat <- Smooth(X, sigma=h[i], at="points", leaveoneout=TRUE, sorted=TRUE) if(!is.null(dimmarx)) yhat <- as.matrix(as.data.frame(yhat)) cv[i] <- mean((marx - yhat)^2) } # optimize iopt <- which.min(cv) # hopt <- h[iopt] # if(warn && (iopt == nh || iopt == 1)) warning(paste("Cross-validation criterion was minimised at", if(iopt == 1) "left-hand" else "right-hand", "end of interval", paste(prange(signif(c(hmin, hmax), 3)), ";", sep=""), "use arguments hmin, hmax to specify a wider interval"), call.=FALSE) # result <- bw.optim(cv, h, iopt, hname="sigma", creator="bw.smoothppp", criterion="Least Squares Cross-Validation", unitname=unitname(X)) return(result) } smoothcrossEngine <- function(Xdata, Xquery, values, sigma, ..., weights=NULL, varcov=NULL, sorted=FALSE) { # if(is.null(varcov)) { # const <- 1/(2 * pi * sigma^2) # } else { # detSigma <- det(varcov) # Sinv <- solve(varcov) # const <- 1/(2 * pi * sqrt(detSigma)) # } if(!is.null(dim(weights))) stop("weights must be a vector") if(npoints(Xquery) == 0 || npoints(Xdata) == 0) { if(is.null(dim(values))) return(rep(NA, npoints(Xquery))) nuttin <- matrix(NA, nrow=npoints(Xquery), ncol=ncol(values)) colnames(nuttin) <- colnames(values) return(nuttin) } ## Contributions from pairs of distinct points ## closer than 8 standard deviations sd <- if(is.null(varcov)) sigma else sqrt(sum(diag(varcov))) cutoff <- 8 * sd ## detect very small bandwidth nnc <- nncross(Xquery, Xdata) if(cutoff < min(nnc$dist)) { if(npoints(Xdata) > 1) { warning("Very small bandwidth; values of nearest neighbours returned") nw <- nnc$which result <- if(is.null(dim(values))) values[nw] else values[nw,,drop=FALSE] } else { warning("Very small bandwidth; original values returned") result <- values } attr(result, "sigma") <- sigma attr(result, "varcov") <- varcov attr(result, "warnings") <- "underflow" return(result) } ## Handle weights that are meant to be null if(length(weights) == 0) weights <- NULL ## handle multiple columns of values if(is.matrix(values) || is.data.frame(values)) { k <- ncol(values) stopifnot(nrow(values) == npoints(Xdata)) values <- as.data.frame(values) result <- matrix(, npoints(Xdata), k) colnames(result) <- colnames(values) if(!sorted) { ood <- fave.order(Xdata$x) Xdata <- Xdata[ood] values <- values[ood, ] ooq <- fave.order(Xquery$x) Xquery <- Xquery[ooq] } for(j in 1:k) result[,j] <- smoothcrossEngine(Xdata, Xquery, values[,j], sigma=sigma, varcov=varcov, weights=weights, sorted=TRUE, ...) if(!sorted) { sortresult <- result result[ooq,] <- sortresult } attr(result, "sigma") <- sigma attr(result, "varcov") <- varcov return(result) } ## values must be a vector stopifnot(length(values) == npoints(Xdata) || length(values) == 1) if(length(values) == 1) values <- rep(values, npoints(Xdata)) ndata <- npoints(Xdata) nquery <- npoints(Xquery) result <- numeric(nquery) ## coordinates and values xq <- Xquery$x yq <- Xquery$y xd <- Xdata$x yd <- Xdata$y vd <- values if(!sorted) { ## sort into increasing order of x coordinate (required by C code) ooq <- fave.order(Xquery$x) xq <- xq[ooq] yq <- yq[ooq] ood <- fave.order(Xdata$x) xd <- xd[ood] yd <- yd[ood] vd <- vd[ood] } if(is.null(varcov)) { ## isotropic kernel if(is.null(weights)) { zz <- .C("crsmoopt", nquery = as.integer(nquery), xq = as.double(xq), yq = as.double(yq), ndata = as.integer(ndata), xd = as.double(xd), yd = as.double(yd), vd = as.double(vd), rmaxi = as.double(cutoff), sig = as.double(sd), result = as.double(double(nquery)), PACKAGE = "spatstat") if(sorted) result <- zz$result else result[ooq] <- zz$result } else { wtsort <- weights[ood] zz <- .C("wtcrsmoopt", nquery = as.integer(nquery), xq = as.double(xq), yq = as.double(yq), ndata = as.integer(ndata), xd = as.double(xd), yd = as.double(yd), vd = as.double(vd), wd = as.double(wtsort), rmaxi = as.double(cutoff), sig = as.double(sd), result = as.double(double(nquery)), PACKAGE = "spatstat") if(sorted) result <- zz$result else result[ooq] <- zz$result } } else { # anisotropic kernel Sinv <- solve(varcov) flatSinv <- as.vector(t(Sinv)) if(is.null(weights)) { zz <- .C("acrsmoopt", nquery = as.integer(nquery), xq = as.double(xq), yq = as.double(yq), ndata = as.integer(ndata), xd = as.double(xd), yd = as.double(yd), vd = as.double(vd), rmaxi = as.double(cutoff), sinv = as.double(flatSinv), result = as.double(double(nquery)), PACKAGE = "spatstat") if(sorted) result <- zz$result else result[ooq] <- zz$result } else { wtsort <- weights[ood] zz <- .C("awtcrsmoopt", nquery = as.integer(nquery), xq = as.double(xq), yq = as.double(yq), ndata = as.integer(ndata), xd = as.double(xd), yd = as.double(yd), vd = as.double(vd), wd = as.double(wtsort), rmaxi = as.double(cutoff), sinv = as.double(flatSinv), result = as.double(double(nquery)), PACKAGE = "spatstat") if(sorted) result <- zz$result else result[ooq] <- zz$result } } if(any(nbg <- (is.infinite(result) | is.nan(result)))) { # NaN or +/-Inf can occur if bandwidth is small # Use mark of nearest neighbour (by l'Hopital's rule) result[nbg] <- values[nnc$which[nbg]] } # pack up and return attr(result, "sigma") <- sigma attr(result, "varcov") <- varcov return(result) } ExpSmoothLog <- function(X, ..., at=c("pixels", "points"), weights=NULL) { verifyclass(X, "ppp") at <- match.arg(at) if(!is.null(weights)) check.nvector(weights, npoints(X)) X <- coerce.marks.numeric(X) marx <- marks(X) d <- dim(marx) if(!is.null(d) && d[2] > 1) { switch(at, points = { Z <- lapply(unstack(X), ExpSmoothLog, ..., at=at, weights=weights) Z <- do.call(data.frame(Z)) }, pixels = { Z <- solapply(unstack(X), ExpSmoothLog, ..., at=at, weights=weights) }) return(Z) } # vector or single column of numeric marks v <- as.numeric(marx) vmin <- min(v) if(vmin < 0) stop("Negative values in geometric mean smoothing", call.=FALSE) Y <- X %mark% log(v) if(vmin > 0) { Z <- Smooth(Y, ..., at=at, weights=weights) } else { yok <- is.finite(marks(Y)) YOK <- Y[yok] weightsOK <- if(is.null(weights)) NULL else weights[yok] switch(at, points = { Z <- rep(-Inf, npoints(X)) Z[yok] <- Smooth(YOK, ..., at=at, weights=weightsOK) }, pixels = { isfinite <- nnmark(Y %mark% yok, ...) support <- solutionset(isfinite) Window(YOK) <- support Z <- as.im(-Inf, W=Window(Y), ...) Z[support] <- Smooth(YOK, ..., at=at, weights=weightsOK)[] }) } return(exp(Z)) } spatstat/R/distmap.R0000755000176200001440000000651013115271075014111 0ustar liggesusers# # # distmap.R # # $Revision: 1.23 $ $Date: 2017/06/05 10:31:58 $ # # # Distance transforms # # distmap <- function(X, ...) { UseMethod("distmap") } distmap.ppp <- function(X, ...) { verifyclass(X, "ppp") e <- exactdt(X, ...) W <- e$w uni <- unitname(W) dmat <- e$d imat <- e$i V <- im(dmat, W$xcol, W$yrow, unitname=uni) I <- im(imat, W$xcol, W$yrow, unitname=uni) if(X$window$type == "rectangle") { # distance to frame boundary bmat <- e$b B <- im(bmat, W$xcol, W$yrow, unitname=uni) } else { # distance to window boundary, not frame boundary bmat <- bdist.pixels(W, style="matrix") B <- im(bmat, W$xcol, W$yrow, unitname=uni) # clip all to window V <- V[W, drop=FALSE] I <- I[W, drop=FALSE] B <- B[W, drop=FALSE] } attr(V, "index") <- I attr(V, "bdry") <- B return(V) } distmap.owin <- function(X, ..., discretise=FALSE, invert=FALSE) { verifyclass(X, "owin") uni <- unitname(X) if(X$type == "rectangle") { M <- as.mask(X, ...) Bdry <- im(bdist.pixels(M, style="matrix"), M$xcol, M$yrow, unitname=uni) if(!invert) Dist <- as.im(M, value=0) else Dist <- Bdry } else if(X$type == "polygonal" && !discretise) { Edges <- edges(X) Dist <- distmap(Edges, ...) Bdry <- attr(Dist, "bdry") if(!invert) Dist[X] <- 0 else { bb <- as.rectangle(X) bigbox <- grow.rectangle(bb, diameter(bb)/4) Dist[complement.owin(X, bigbox)] <- 0 } } else { X <- as.mask(X, ...) if(invert) X <- complement.owin(X) xc <- X$xcol yr <- X$yrow nr <- X$dim[1L] nc <- X$dim[2L] # pad out the input image with a margin of width 1 on all sides mat <- X$m pad <- invert # boundary condition is opposite of value inside W mat <- cbind(pad, mat, pad) mat <- rbind(pad, mat, pad) # call C routine res <- .C("distmapbin", xmin=as.double(X$xrange[1L]), ymin=as.double(X$yrange[1L]), xmax=as.double(X$xrange[2L]), ymax=as.double(X$yrange[2L]), nr = as.integer(nr), nc = as.integer(nc), inp = as.integer(as.logical(t(mat))), distances = as.double(matrix(0, ncol = nc + 2, nrow = nr + 2)), boundary = as.double(matrix(0, ncol = nc + 2, nrow = nr + 2)), PACKAGE = "spatstat") # strip off margins again dist <- matrix(res$distances, ncol = nc + 2, byrow = TRUE)[2:(nr + 1), 2:(nc +1)] bdist <- matrix(res$boundary, ncol = nc + 2, byrow = TRUE)[2:(nr + 1), 2:(nc +1)] # cast as image objects Dist <- im(dist, xc, yr, unitname=uni) Bdry <- im(bdist, xc, yr, unitname=uni) } attr(Dist, "bdry") <- Bdry return(Dist) } distmap.psp <- function(X, ...) { verifyclass(X, "psp") W <- as.mask(Window(X), ...) uni <- unitname(W) rxy <- rasterxy.mask(W) xp <- rxy$x yp <- rxy$y E <- X$ends big <- 2 * diameter(Frame(W))^2 z <- NNdist2segments(xp, yp, E$x0, E$y0, E$x1, E$y1, big) xc <- W$xcol yr <- W$yrow Dist <- im(array(sqrt(z$dist2), dim=W$dim), xc, yr, unitname=uni) Indx <- im(array(z$index, dim=W$dim), xc, yr, unitname=uni) Bdry <- im(bdist.pixels(W, style="matrix"), xc, yr, unitname=uni) attr(Dist, "index") <- Indx attr(Dist, "bdry") <- Bdry return(Dist) } spatstat/R/psp2pix.R0000755000176200001440000000646513115271120014055 0ustar liggesusers# # psp2pix.R # # $Revision: 1.11 $ $Date: 2017/06/05 10:31:58 $ # # as.mask.psp <- function(x, W=NULL, ...) { L <- as.psp(x) if(is.null(W)) W <- as.owin(L) else W <- as.owin(W) W <- do.call.matched(as.mask, resolve.defaults(list(...), list(w=W))) ends <- L$ends nseg <- nrow(ends) if(nseg == 0) { # empty W$m[] <- FALSE return(W) } x0 <- (ends$x0 - W$xrange[1])/W$xstep x1 <- (ends$x1 - W$xrange[1])/W$xstep y0 <- (ends$y0 - W$yrange[1])/W$ystep y1 <- (ends$y1 - W$yrange[1])/W$ystep nr <- W$dim[1] nc <- W$dim[2] zz <- .C("seg2pixI", ns=as.integer(nseg), x0=as.double(x0), y0=as.double(y0), x1=as.double(x1), y1=as.double(y1), nx=as.integer(nc), ny=as.integer(nr), out=as.integer(integer(nr * nc)), PACKAGE = "spatstat") mm <- matrix(zz$out, nr, nc) # intersect with existing window W$m <- W$m & mm W } pixellate.psp <- function(x, W=NULL, ..., weights=NULL, what=c("length", "number")) { L <- as.psp(x) what <- match.arg(what) if(is.null(W)) W <- as.owin(L) else W <- as.owin(W) W <- do.call.matched(as.mask, resolve.defaults(list(...), list(w=W))) Z <- as.im(W) ends <- L$ends nseg <- nrow(ends) if(nseg == 0) { # empty Z$v[] <- 0 return(Z) } if(is.null(weights)) weights <- rep.int(1, nseg) else { if(!is.numeric(weights)) stop("weights must be numeric") if(anyNA(weights)) stop("weights must not be NA") if(!all(is.finite(weights))) stop("weights must not be infinite") if(length(weights) == 1) weights <- rep.int(weights, nseg) else if(length(weights) != nseg) stop(paste("weights vector has length", length(weights), "but there are", nseg, "line segments")) } x0 <- (ends$x0 - Z$xrange[1])/Z$xstep x1 <- (ends$x1 - Z$xrange[1])/Z$xstep y0 <- (ends$y0 - Z$yrange[1])/Z$ystep y1 <- (ends$y1 - Z$yrange[1])/Z$ystep nr <- Z$dim[1] nc <- Z$dim[2] switch(what, length = { zz <- .C("seg2pixL", ns=as.integer(nseg), x0=as.double(x0), y0=as.double(y0), x1=as.double(x1), y1=as.double(y1), weights=as.double(weights), pixwidth=as.double(Z$xstep), pixheight=as.double(Z$ystep), nx=as.integer(nc), ny=as.integer(nr), out=as.double(numeric(nr * nc)), PACKAGE = "spatstat") }, number = { zz <- .C("seg2pixN", ns=as.integer(nseg), x0=as.double(x0), y0=as.double(y0), x1=as.double(x1), y1=as.double(y1), w=as.double(weights), nx=as.integer(nc), ny=as.integer(nr), out=as.double(numeric(nr * nc)), PACKAGE = "spatstat") }) mm <- matrix(zz$out, nr, nc) mm[is.na(Z$v)] <- NA ## intersect with existing window Z$v <- mm Z } spatstat/R/nnmark.R0000644000176200001440000000225513115225157013735 0ustar liggesusers# # nnmark.R # # $Revision: 1.6 $ $Date: 2015/10/21 09:06:57 $ nnmark <- local({ nnmark <- function(X, ..., k=1, at=c("pixels", "points")) { stopifnot(is.ppp(X)) stopifnot(is.marked(X)) at <- match.arg(at) mX <- marks(X) switch(at, pixels = { Y <- nnmap(X, k=k, what="which", ...) switch(markformat(X), vector={ result <- eval.im(mX[Y]) }, dataframe = { result <- solapply(mX, lookedup, indeximage=Y) }, stop("Marks must be a vector or dataframe")) }, points = { Y <- nnwhich(X, k=k) switch(markformat(X), vector={ result <- mX[Y] }, dataframe = { result <- mX[Y,, drop=FALSE] row.names(result) <- NULL }, stop("Marks must be a vector or dataframe")) }) return(result) } lookedup <- function(xvals, indeximage) eval.im(xvals[indeximage]) nnmark }) spatstat/R/convexify.R0000644000176200001440000000076013115225157014460 0ustar liggesusers## ## convexify.R ## ## $Revision: 1.1 $ $Date: 2015/10/23 12:34:17 $ convexify <- function(W, eps) { if(!is.polygonal(W)) { if(missing(eps)) eps <- diameter(Frame(W))/20 W <- simplify.owin(W, eps) } e <- edges(W) len <- lengths.psp(e) ang <- angles.psp(e, directed=TRUE) df <- data.frame(ang=ang, len=len) df <- df[order(df$ang), ] df <- within(df, { dx <- len * cos(ang); dy <- len * sin(ang)}) owin(poly=with(df, list(x=cumsum(c(0,dx)), y=cumsum(c(0,dy))))) } spatstat/R/rescale.R0000755000176200001440000000313113115271120014051 0ustar liggesusers# # # rescale.R # # $Revision: 1.6 $ $Date: 2014/10/24 00:22:30 $ # # rescale <- function(X, s, unitname) { UseMethod("rescale") } rescale.ppp <- function(X, s, unitname) { if(missing(unitname)) unitname <- NULL if(missing(s) || is.null(s)) s <- 1/unitname(X)$multiplier Y <- affine.ppp(X, mat=diag(c(1/s,1/s))) unitname(Y) <- rescale(unitname(X), s, unitname) return(Y) } rescale.owin <- function(X, s, unitname) { if(missing(unitname)) unitname <- NULL if(missing(s) || is.null(s)) s <- 1/unitname(X)$multiplier Y <- affine.owin(X, mat=diag(c(1/s,1/s))) unitname(Y) <- rescale(unitname(X), s, unitname) return(Y) } rescale.im <- function(X, s, unitname) { if(missing(unitname)) unitname <- NULL if(missing(s) || is.null(s)) s <- 1/unitname(X)$multiplier Y <- X Y$xrange <- X$xrange/s Y$yrange <- X$yrange/s Y$xstep <- X$xstep/s Y$ystep <- X$ystep/s Y$xcol <- X$xcol/s Y$yrow <- X$yrow/s unitname(Y) <- rescale(unitname(X), s, unitname) return(Y) } rescale.psp <- function(X, s, unitname) { if(missing(unitname)) unitname <- NULL if(missing(s) || is.null(s)) s <- 1/unitname(X)$multiplier Y <- affine.psp(X, mat=diag(c(1/s,1/s))) unitname(Y) <- rescale(unitname(X), s, unitname) return(Y) } rescale.units <- function(X, s, unitname) { if(!missing(unitname) && !is.null(unitname)) return(as.units(unitname)) if(summary(X)$vanilla) return(X) if(missing(s)) { X$multiplier <- 1 } else { if(!is.numeric(s) || length(s) != 1 || s <= 0) stop("s should be a positive number") X$multiplier <- s * X$multiplier } return(X) } spatstat/R/nnorient.R0000644000176200001440000001006313115225157014277 0ustar liggesusers## ## nnorient.R ## ## nearest neighbour pair orientation distribution ## ## Function \vartheta(phi) defined in ## Illian et al (2008) equ (4.5.3) page 253 ## ## $Revision: 1.3 $ $Date: 2014/12/05 07:31:57 $ nnorient <- function(X, ..., cumulative=FALSE, correction, k = 1, unit=c("degree", "radian"), domain=NULL, ratio=FALSE) { stopifnot(is.ppp(X)) check.1.integer(k) stopifnot(k>=1) W <- Window(X) if(!is.null(domain)) stopifnot(is.subset.owin(domain, W)) unit <- match.arg(unit) switch(unit, degree = { FullCircle <- 360 Convert <- 180/pi }, radian = { FullCircle <- 2 * pi Convert <- 1 }) ## choose correction(s) correction.given <- !missing(correction) && !is.null(correction) if(!correction.given) correction <- c("bord.modif", "none") correction <- pickoption("correction", correction, c(none="none", bord.modif="bord.modif", good="good", best="best"), multi=TRUE) correction[correction %in% c("good", "best")] <- "bord.modif" ## process point pattern Xcoord <- coords(X) Ycoord <- Xcoord[nnwhich(X, k=k), ] if(!is.null(domain)) { inD <- inside.owin(Xcoord$x, Xcoord$y, domain) Xcoord <- Xcoord[inD,] Ycoord <- Ycoord[inD,] } else dYX <- Ycoord-Xcoord ANGLE <- with(dYX, atan2(y, x) * Convert) %% FullCircle nangles <- length(ANGLE) ## initialise output object Nphi <- 512 breaks <- make.even.breaks(bmax=FullCircle, npos=Nphi-1) phi <- breaks$r Odf <- data.frame(phi = phi, theo = (if(cumulative) phi else 1)/FullCircle) desc <- c("angle argument phi", "theoretical isotropic %s") NOletter <- if(cumulative) "Theta" else "vartheta" NOsymbol <- as.name(NOletter) NNO <- ratfv(Odf, NULL, denom=nangles, argu="phi", ylab=substitute(fn(phi), list(fn=NOsymbol)), valu="theo", fmla = . ~ phi, alim = c(0, FullCircle), c("phi", "{%s[%s]^{pois}}(phi)"), desc, fname=NOletter, yexp=substitute(fn(phi), list(fn=NOsymbol))) ## ^^^^^^^^^^^^^^^ Compute edge corrected estimates ^^^^^^^^^^^^^^^^ if(any(correction == "none")) { ## uncorrected! For demonstration purposes only! if(cumulative) { wh <- whist(ANGLE, breaks$val) # no weights num.un <- cumsum(wh) } else { kd <- circdensity(ANGLE, ..., n=Nphi, unit=unit) num.un <- kd$y * nangles } den.un <- nangles ## uncorrected estimate NNO <- bind.ratfv(NNO, data.frame(un=num.un), den.un, "{hat(%s)[%s]^{un}}(phi)", "uncorrected estimate of %s", "un", ratio=ratio) } if("bord.modif" %in% correction) { ## border type correction bX <- bdist.points(X) nndX <- nndist(X, k=k) if(!is.null(domain)) { bX <- bX[inD] nndX <- nndX[inD] } ok <- (nndX < bX) nok <- sum(ok) rr <- seq(0, max(bX), length=256) Ar <- eroded.areas(W, rr) Arf <- approxfun(rr, Ar, rule=2) AI <- Arf(bX) edgewt <- ifelse(ok, pmin(area(W)/AI, 100), 0) if(cumulative) { wh <- whist(ANGLE, breaks$val, edgewt) num.bm <- cumsum(wh)/mean(edgewt) } else { w <- edgewt/sum(edgewt) kd <- circdensity(ANGLE, ..., weights=w, n=Nphi, unit=unit) num.bm <- kd$y * nok } den.bm <- nok NNO <- bind.ratfv(NNO, data.frame(bordm=num.bm), den.bm, "{hat(%s)[%s]^{bordm}}(phi)", "modified border-corrected estimate of %s", "bordm", ratio=ratio) } unitname(NNO) <- switch(unit, degree = c("degree", "degrees"), radian = c("radian", "radians")) return(NNO) } spatstat/R/bw.optim.R0000755000176200001440000000624613115271075014215 0ustar liggesusers# # bw.optim.R # # Class of optimised bandwidths # Plotting the object displays the optimisation criterion # # $Revision: 1.25 $ $Date: 2016/04/25 02:34:40 $ # bw.optim <- function(cv, h, iopt=which.min(cv), ..., cvname, hname, criterion="cross-validation", unitname=NULL) { if(missing(cvname) || is.null(cvname)) cvname <- deparse(substitute(cv)) if(missing(hname) || is.null(hname)) hname <- deparse(substitute(h)) stopifnot(is.numeric(cv)) stopifnot(is.numeric(h)) stopifnot(length(h) == length(cv)) result <- h[iopt] attr(result, "cv") <- cv attr(result, "h") <- h attr(result, "iopt") <- iopt attr(result, "labels") <- list(hname=hname, cvname=cvname) attr(result, "info") <- list(...) attr(result, "criterion") <- criterion attr(result, "units") <- unitname class(result) <- "bw.optim" return(result) } print.bw.optim <- function(x, ...) { y <- as.numeric(x) names(y) <- attr(x, "labels")$hname print(y, ...) return(invisible(NULL)) } as.data.frame.bw.optim <- function(x, ...) { h <- attr(x, "h") cv <- attr(x, "cv") df <- data.frame(h, cv) labels <- attr(x, "labels") colnames(df) <- labels[c("hname", "cvname")] info <- attr(x, "info") if(length(info) > 0) { lenfs <- lengths(info) if(any(ok <- (lenfs == nrow(df)))) { df <- cbind(df, as.data.frame(info[ok])) } } return(df) } as.fv.bw.optim <- function(x) { # convert to fv object df <- as.data.frame(x) dfnames <- colnames(df) hname <- dfnames[1L] cvname <- dfnames[2L] descrip <- c("smoothing parameter", paste(attr(x, "criterion"), "criterion")) if(ncol(df) > 2) descrip <- c(descrip, paste("Additional variable", sQuote(dfnames[-(1:2)]))) labl <- c(hname, paste0(dfnames[-1L], paren(hname))) yexp <- substitute(CV(h), list(CV=as.name(cvname), h=as.name(hname))) xfv <- fv(df, argu=hname, ylab=yexp, valu=cvname, labl=labl, desc=descrip, fname=cvname, yexp=yexp) fvnames(xfv, ".") <- cvname unitname(xfv) <- unitname(x) return(xfv) } plot.bw.optim <- function(x, ..., showopt=TRUE, optargs=list(lty=3, col="blue")) { xname <- short.deparse(substitute(x)) # convert to fv object xfv <- as.fv(x) # plot cross-validation criterion out <- do.call(plot.fv, resolve.defaults(list(x=xfv), list(...), list(main=xname))) # Turn off 'showopt' if the x-variable is not the bandwidth if(missing(showopt)) { argh <- list(...) isfmla <- unlist(lapply(argh, inherits, what="formula")) if(any(isfmla)) { fmla <- argh[[min(which(isfmla))]] xvar <- deparse(rhs.of.formula(fmla, tilde=FALSE)) if(!(identical(xvar, fvnames(xfv, ".x")) || identical(xvar, ".x"))) showopt <- FALSE } } # show optimal value? if(showopt) { hoptim <- as.numeric(x) if(spatstat.options('monochrome')) optargs <- col.args.to.grey(optargs) do.call(abline, append(list(v=hoptim), optargs)) } if(is.null(out)) return(invisible(NULL)) return(out) } spatstat/R/breakpts.R0000755000176200001440000001561113115271075014265 0ustar liggesusers# # breakpts.S # # A simple class definition for the specification # of histogram breakpoints in the special form we need them. # # even.breaks() # # $Revision: 1.21 $ $Date: 2017/06/05 10:31:58 $ # # # Other functions in this directory use the standard Splus function # hist() to compute histograms of distance values. # One argument of hist() is the vector 'breaks' # of breakpoints for the histogram cells. # # The breakpoints must # (a) span the range of the data # (b) be given in increasing order # (c) satisfy breaks[2] = 0, # # The function make.even.breaks() will create suitable breakpoints. # # Condition (c) means that the first histogram cell has # *right* endpoint equal to 0. # # Since all our distance values are nonnegative, the effect of (c) is # that the first histogram cell counts the distance values which are # exactly equal to 0. Hence F(0), the probability P{X = 0}, # is estimated without a discretisation bias. # # We assume the histograms have followed the default counting rule # in hist(), which is such that the k-th entry of the histogram # counts the number of data values in # I_k = ( breaks[k],breaks[k+1] ] for k > 1 # I_1 = [ breaks[1],breaks[2] ] # # The implementations of estimators of c.d.f's in this directory # produce vectors of length = length(breaks)-1 # with value[k] = estimate of F(breaks[k+1]), # i.e. value[k] is an estimate of the c.d.f. at the RIGHT endpoint # of the kth histogram cell. # # An object of class 'breakpts' contains: # # $val the actual breakpoints # $max the maximum value (= last breakpoint) # $ncells total number of histogram cells # $r right endpoints, r = val[-1] # $even logical = TRUE if cells known to be evenly spaced # $npos number of histogram cells on the positive halfline # = length(val) - 2, # or NULL if cells not evenly spaced # $step histogram cell width # or NULL if cells not evenly spaced # # -------------------------------------------------------------------- breakpts <- function(val, maxi, even=FALSE, npos=NULL, step=NULL) { out <- list(val=val, max=maxi, ncells=length(val)-1L, r = val[-1L], even=even, npos=npos, step=step) class(out) <- "breakpts" out } scalardilate.breakpts <- function(X, f, ...) { out <- with(X, list(val = f*val, max = f*max, ncells = ncells, r = f*r, even = even, npos = npos, step = f*step)) class(out) <- "breakpts" out } "make.even.breaks" <- function(bmax, npos, bstep) { if(bmax <= 0) stop("bmax must be positive") if(missing(bstep) && missing(npos)) stop(paste("Must specify either", sQuote("bstep"), "or", sQuote("npos"))) if(!missing(npos)) { bstep <- bmax/npos val <- seq(from=0, to=bmax, length.out=npos+1L) val <- c(-bstep,val) right <- bmax } else { npos <- ceiling(bmax/bstep) right <- bstep * npos val <- seq(from=0, to=right, length.out=npos+1L) val <- c(-bstep,val) } breakpts(val, right, TRUE, npos, bstep) } "as.breakpts" <- function(...) { XL <- list(...) if(length(XL) == 1L) { # single argument X <- XL[[1L]] if(!is.null(class(X)) && class(X) == "breakpts") # X already in correct form return(X) if(is.vector(X) && length(X) > 2) { # it's a vector if(X[2L] != 0) stop("breakpoints do not satisfy breaks[2] = 0") # The following test for equal spacing is used in hist.default steps <- diff(X) if(diff(range(steps)) < 1e-07 * mean(steps)) # equally spaced return(breakpts(X, max(X), TRUE, length(X)-2, steps[1L])) else # unknown spacing return(breakpts(X, max(X), FALSE)) } } else { # There are multiple arguments. # exactly two arguments - interpret as even.breaks() if(length(XL) == 2) return(make.even.breaks(XL[[1L]], XL[[2L]])) # two arguments 'max' and 'npos' if(!is.null(XL$max) && !is.null(XL$npos)) return(make.even.breaks(XL$max, XL$npos)) # otherwise stop("Don't know how to convert these data to breakpoints") } # never reached } check.hist.lengths <- function(hist, breaks) { verifyclass(breaks, "breakpts") nh <- length(hist) nb <- breaks$ncells if(nh != nb) stop(paste("Length of histogram =", nh, "not equal to number of histogram cells =", nb)) } breakpts.from.r <- function(r) { if(!is.numeric(r) && !is.vector(r)) stop("r must be a numeric vector") if(length(r) < 2) stop(paste("r has length", length(r), "- must be at least 2")) if(r[1L] != 0) stop("First r value must be 0") if(any(diff(r) <= 0)) stop("successive values of r must be increasing") dr <- r[2L] - r[1L] b <- c(-dr, r) return(as.breakpts(b)) } handle.r.b.args <- function(r=NULL, breaks=NULL, window, pixeps=NULL, rmaxdefault=NULL) { if(!is.null(r) && !is.null(breaks)) stop(paste("Do not specify both", sQuote("r"), "and", sQuote("breaks"))) if(!is.null(breaks)) { breaks <- as.breakpts(breaks) } else if(!is.null(r)) { breaks <- breakpts.from.r(r) } else { #' determine rmax #' ignore infinite or NA values of rmaxdefault if(!is.null(rmaxdefault) && !is.finite(rmaxdefault)) rmaxdefault <- NULL rmax <- rmaxdefault %orifnull% diameter(Frame(window)) if(is.null(pixeps)) { pixeps <- if(is.mask(window)) min(window$xstep, window$ystep) else rmax/128 } rstep <- pixeps/4 breaks <- make.even.breaks(rmax, bstep=rstep) } return(breaks) } check.finespacing <- function(r, eps=NULL, win=NULL, rmaxdefault = max(r), context="", action=c("fatal", "warn", "silent"), rname) { if(missing(rname)) rname <- deparse(substitute(r)) action <- match.arg(action) if(is.null(eps)) { b <- handle.r.b.args(window=win, rmaxdefault=rmaxdefault) eps <- b$step } dr <- max(diff(r)) if(dr > eps * 1.01) { whinge <- paste(context, "the successive", rname, "values must be finely spaced:", "given spacing =", paste0(signif(dr, 5), ";"), "required spacing <= ", signif(eps, 3)) switch(action, fatal = stop(whinge, call.=FALSE), warn = warning(whinge, call.=FALSE), silent = {}) return(FALSE) } return(TRUE) } spatstat/R/nndensity.R0000644000176200001440000000173113115225157014460 0ustar liggesusers# # nndensity.R # # Density estimation based on nn distance # # $Revision: 1.3 $ $Date: 2014/10/24 00:22:30 $ # nndensity <- function(x, ...) { UseMethod("nndensity") } nndensity.ppp <- function(x, k, ..., verbose=TRUE) { if(missing(k) || is.null(k)) { k <- round(sqrt(npoints(x))) if(verbose) cat(paste("k=", k, "\n")) } else if(k == 1) warning("k=1 will produce strange results") # distance to k-th nearest neighbour D <- nnmap(x, k=k, what="dist", ...) # area searched A <- eval.im(pi * D^2) # distance to boundary B <- bdist.pixels(as.owin(D)) # handle edge effects edge <- solutionset(B < D) # centres of all pixels where edge effect occurs xy <- rasterxy.mask(edge, drop=TRUE) # corresponding values of distance rr <- D[edge, drop=TRUE] # compute actual search area X <- as.ppp(xy, W=as.owin(x), check=FALSE) A[edge] <- discpartarea(X, matrix(rr, ncol=1)) # finally compute intensity estimate L <- eval.im(k/A) return(L) } spatstat/R/crossdistlpp.R0000644000176200001440000000704313115271075015200 0ustar liggesusers# # crossdistlpp.R # # $Revision: 1.6 $ $Date: 2017/06/05 10:31:58 $ # # crossdist.lpp # Calculates the shortest-path distance from each point of X # to each point of Y, where X and Y are point patterns # on the same linear network. # crossdist.lpp <- function(X, Y, ..., method="C") { stopifnot(inherits(X, "lpp")) stopifnot(method %in% c("C", "interpreted")) check <- resolve.defaults(list(...), list(check=TRUE))$check # nX <- npoints(X) nY <- npoints(Y) # L <- as.linnet(X, sparse=FALSE) if(check) { LY <- as.linnet(Y, sparse=FALSE) if(!identical(L, LY)) stop("X and Y are on different linear networks") } if(any(is.infinite(L$dpath))) { #' disconnected network lab <- connected(L, what="labels") subsets <- split(seq_len(nvertices(L)), lab) crossdistmat <- matrix(Inf,nX,nY) for(subi in subsets) { Xi <- thinNetwork(X, retainvertices=subi) Yi <- thinNetwork(Y, retainvertices=subi) whichX <- attr(Xi, "retainpoints") whichY <- attr(Yi, "retainpoints") crossdistmat[whichX, whichY] <- crossdist.lpp(Xi, Yi, method=method) } return(crossdistmat) } # network is connected P <- as.ppp(X) Q <- as.ppp(Y) # # Lseg <- L$lines Lvert <- L$vertices from <- L$from to <- L$to dpath <- L$dpath # nearest segment for each point Xpro <- coords(X, local=TRUE, spatial=FALSE, temporal=FALSE)$seg Ypro <- coords(Y, local=TRUE, spatial=FALSE, temporal=FALSE)$seg if(method == "interpreted") { # loop through all pairs of data points crossdistmat <- matrix(,nX,nY) for (i in 1:nX) { Xproi <- Xpro[i] Xi <- P[i] nbi1 <- from[Xproi] nbi2 <- to[Xproi] vi1 <- Lvert[nbi1] vi2 <- Lvert[nbi2] dXi1 <- crossdist(Xi, vi1) dXi2 <- crossdist(Xi, vi2) for (j in 1:nY) { Yj <- Q[j] Yproj <- Ypro[j] if(Xproi == Yproj) { # points i and j lie on the same segment # use Euclidean distance d <- crossdist(Xi, Yj) } else { # shortest path from i to j passes through ends of segments nbj1 <- from[Yproj] nbj2 <- to[Yproj] vj1 <- Lvert[nbj1] vj2 <- Lvert[nbj2] # Calculate shortest of 4 possible paths from i to j d1Yj <- crossdist(vj1,Yj) d2Yj <- crossdist(vj2,Yj) d11 <- dXi1 + dpath[nbi1,nbj1] + d1Yj d12 <- dXi1 + dpath[nbi1,nbj2] + d2Yj d21 <- dXi2 + dpath[nbi2,nbj1] + d1Yj d22 <- dXi2 + dpath[nbi2,nbj2] + d2Yj d <- min(d11,d12,d21,d22) } # store result crossdistmat[i,j] <- d } } } else { # C code # convert indices to start at 0 from0 <- from - 1L to0 <- to - 1L Xsegmap <- Xpro - 1L Ysegmap <- Ypro - 1L zz <- .C("lincrossdist", np = as.integer(nX), xp = as.double(P$x), yp = as.double(P$y), nq = as.integer(nY), xq = as.double(Q$x), yq = as.double(Q$y), nv = as.integer(Lvert$n), xv = as.double(Lvert$x), yv = as.double(Lvert$y), ns = as.double(L$n), from = as.integer(from0), to = as.integer(to0), dpath = as.double(dpath), psegmap = as.integer(Xsegmap), qsegmap = as.integer(Ysegmap), answer = as.double(numeric(nX * nY)), PACKAGE = "spatstat") crossdistmat <- matrix(zz$answer, nX, nY) } return(crossdistmat) } spatstat/R/multistrhard.R0000755000176200001440000003104513115271120015162 0ustar liggesusers# # # multistrhard.S # # $Revision: 2.38 $ $Date: 2016/02/16 01:39:12 $ # # The multitype Strauss/hardcore process # # MultiStraussHard() # create an instance of the multitype Strauss/ harcore # point process # [an object of class 'interact'] # # ------------------------------------------------------------------- # doMultiStraussHard <- local({ # ........ define potential ...................... MSHpotential <- function(d, tx, tu, par) { # arguments: # d[i,j] distance between points X[i] and U[j] # tx[i] type (mark) of point X[i] # tu[i] type (mark) of point U[j] # # get matrices of interaction radii r <- par$iradii h <- par$hradii # get possible marks and validate if(!is.factor(tx) || !is.factor(tu)) stop("marks of data and dummy points must be factor variables") lx <- levels(tx) lu <- levels(tu) if(length(lx) != length(lu) || any(lx != lu)) stop("marks of data and dummy points do not have same possible levels") if(!identical(lx, par$types)) stop("data and model do not have the same possible levels of marks") if(!identical(lu, par$types)) stop("dummy points and model do not have the same possible levels of marks") # ensure factor levels are acceptable for column names (etc) lxname <- make.names(lx, unique=TRUE) # list all UNORDERED pairs of types to be counted # (the interaction must be symmetric in type, and scored as such) uptri <- (row(r) <= col(r)) & !is.na(r) mark1 <- (lx[row(r)])[uptri] mark2 <- (lx[col(r)])[uptri] # corresponding names mark1name <- (lxname[row(r)])[uptri] mark2name <- (lxname[col(r)])[uptri] vname <- apply(cbind(mark1name,mark2name), 1, paste, collapse="x") vname <- paste("mark", vname, sep="") npairs <- length(vname) # list all ORDERED pairs of types to be counted # (to save writing the same code twice) different <- mark1 != mark2 mark1o <- c(mark1, mark2[different]) mark2o <- c(mark2, mark1[different]) nordpairs <- length(mark1o) # unordered pair corresponding to each ordered pair ucode <- c(1:npairs, (1:npairs)[different]) # # create numeric array for result z <- array(0, dim=c(dim(d), npairs), dimnames=list(character(0), character(0), vname)) # go.... if(length(z) > 0) { # apply the relevant interaction distance to each pair of points rxu <- r[ tx, tu ] str <- (d < rxu) str[is.na(str)] <- FALSE # and the relevant hard core distance hxu <- h[ tx, tu ] forbid <- (d < hxu) forbid[is.na(forbid)] <- FALSE # form the potential value <- str value[forbid] <- -Inf # assign value[i,j] -> z[i,j,k] where k is relevant interaction code for(i in 1:nordpairs) { # data points with mark m1 Xsub <- (tx == mark1o[i]) # quadrature points with mark m2 Qsub <- (tu == mark2o[i]) # assign z[Xsub, Qsub, ucode[i]] <- value[Xsub, Qsub] } } return(z) } # ............... end of potential function ................... # .......... auxiliary functions ................. delMSH <- function(which, types, iradii, hradii, ihc) { iradii[which] <- NA if(any(!is.na(iradii))) { # some gamma interactions left # return modified MultiStraussHard with fewer gamma parameters return(MultiStraussHard(types, iradii, hradii)) } else if(any(!ihc)) { # no gamma interactions left, but some active hard cores return(MultiHard(types, hradii)) } else return(Poisson()) } # ........................................................... # Set up basic object except for family and parameters BlankMSHobject <- list( name = "Multitype Strauss Hardcore process", creator = "MultiStraussHard", family = "pairwise.family", # evaluated later pot = MSHpotential, par = list(types=NULL, iradii=NULL, hradii=NULL), # to be added parnames = c("possible types", "interaction distances", "hardcore distances"), pardesc = c("vector of possible types", "matrix of interaction distances", "matrix of hardcore distances"), selfstart = function(X, self) { types <- self$par$types hradii <- self$par$hradii if(!is.null(types) && !is.null(hradii)) return(self) if(is.null(types)) types <- levels(marks(X)) if(is.null(hradii)) { marx <- marks(X) d <- nndist(X, by=marx) h <- aggregate(d, by=list(from=marx), min) h <- as.matrix(h[, -1, drop=FALSE]) m <- table(marx) mm <- outer(m, m, pmin) hradii <- h * mm/(mm+1) dimnames(hradii) <- list(types, types) } MultiStraussHard(types=types,hradii=hradii,iradii=self$par$iradii) }, init = function(self) { types <- self$par$types iradii <- self$par$iradii hradii <- self$par$hradii # hradii could be NULL if(!is.null(types)) { if(!is.null(dim(types))) stop(paste("The", sQuote("types"), "argument should be a vector")) if(length(types) == 0) stop(paste("The", sQuote("types"),"argument should be", "either NULL or a vector of all possible types")) if(anyNA(types)) stop("NA's not allowed in types") if(is.factor(types)) { types <- levels(types) } else { types <- levels(factor(types, levels=types)) } nt <- length(types) MultiPair.checkmatrix(iradii, nt, sQuote("iradii")) if(!is.null(hradii)) MultiPair.checkmatrix(hradii, nt, sQuote("hradii")) } ina <- is.na(iradii) if(all(ina)) stop(paste("All entries of", sQuote("iradii"), "are NA")) if(!is.null(hradii)) { hna <- is.na(hradii) both <- !ina & !hna if(any(iradii[both] <= hradii[both])) stop("iradii must be larger than hradii") } }, update = NULL, # default OK print = function(self) { types <- self$par$types iradii <- self$par$iradii hradii <- self$par$hradii nt <- nrow(iradii) if(waxlyrical('gory')) { splat(nt, "types of points") if(!is.null(types)) { splat("Possible types:") print(noquote(types)) } else splat("Possible types:\t not yet determined") } splat("Interaction radii:") dig <- getOption("digits") print(signif(iradii, dig)) if(!is.null(hradii)) { splat("Hardcore radii:") print(signif(hradii, dig)) } else splat("Hardcore radii: not yet determined") invisible() }, interpret = function(coeffs, self) { # get possible types typ <- self$par$types ntypes <- length(typ) # get matrices of interaction radii r <- self$par$iradii h <- self$par$hradii # list all relevant unordered pairs of types uptri <- (row(r) <= col(r)) & !is.na(r) index1 <- (row(r))[uptri] index2 <- (col(r))[uptri] npairs <- length(index1) # extract canonical parameters; shape them into a matrix gammas <- matrix(, ntypes, ntypes) dimnames(gammas) <- list(typ, typ) expcoef <- exp(coeffs) gammas[ cbind(index1, index2) ] <- expcoef gammas[ cbind(index2, index1) ] <- expcoef # return(list(param=list(gammas=gammas), inames="interaction parameters gamma_ij", printable=dround(gammas))) }, valid = function(coeffs, self) { # interaction radii r[i,j] iradii <- self$par$iradii # hard core radii r[i,j] hradii <- self$par$hradii # interaction parameters gamma[i,j] gamma <- (self$interpret)(coeffs, self)$param$gammas # Check that we managed to estimate all required parameters required <- !is.na(iradii) if(!all(is.finite(gamma[required]))) return(FALSE) # Check that the model is integrable # inactive hard cores ... ihc <- (is.na(hradii) | hradii == 0) # .. must have gamma <= 1 return(all(gamma[required & ihc] <= 1)) }, project = function(coeffs, self) { # types types <- self$par$types # interaction radii r[i,j] iradii <- self$par$iradii # hard core radii r[i,j] hradii <- self$par$hradii # interaction parameters gamma[i,j] gamma <- (self$interpret)(coeffs, self)$param$gammas # required gamma parameters required <- !is.na(iradii) # active hard cores activehard <- !is.na(hradii) & (hradii > 0) ihc <- !activehard # problems gammavalid <- is.finite(gamma) & (activehard | gamma <= 1) naughty <- required & !gammavalid if(!any(naughty)) return(NULL) # if(spatstat.options("project.fast")) { # remove ALL naughty terms simultaneously return(delMSH(naughty, types, iradii, hradii, ihc)) } else { # present a list of candidates rn <- row(naughty) cn <- col(naughty) uptri <- (rn <= cn) upn <- uptri & naughty rowidx <- as.vector(rn[upn]) colidx <- as.vector(cn[upn]) # matindex <- function(v) { matrix(c(v, rev(v)), # ncol=2, byrow=TRUE) } mats <- lapply(as.data.frame(rbind(rowidx, colidx)), matindex) inters <- lapply(mats, delMSH, types=types, iradii=iradii, hradii=hradii, ihc=ihc) return(inters) } }, irange = function(self, coeffs=NA, epsilon=0, ...) { r <- self$par$iradii h <- self$par$hradii ractive <- !is.na(r) hactive <- !is.na(h) if(any(!is.na(coeffs))) { gamma <- (self$interpret)(coeffs, self)$param$gammas gamma[is.na(gamma)] <- 1 ractive <- ractive & (abs(log(gamma)) > epsilon) } if(!any(c(ractive,hactive))) return(0) else return(max(c(r[ractive],h[hactive]))) }, version=NULL # to be added ) class(BlankMSHobject) <- "interact" matindex <- function(v) { matrix(c(v, rev(v)), ncol=2, byrow=TRUE) } # Finally define MultiStraussHard function doMultiStraussHard <- function(iradii, hradii=NULL, types=NULL) { iradii[iradii == 0] <- NA if(!is.null(hradii)) hradii[hradii == 0] <- NA out <- instantiate.interact(BlankMSHobject, list(types=types, iradii = iradii, hradii = hradii)) if(!is.null(types)) { dn <- list(types, types) dimnames(out$par$iradii) <- dn if(!is.null(out$par$hradii)) dimnames(out$par$hradii) <- dn } return(out) } doMultiStraussHard }) MultiStraussHard <- local({ MultiStraussHard <- function(iradii, hradii, types=NULL) { ## try new syntax newcall <- match.call() newcall[[1]] <- as.name('doMultiStraussHard') out <- try(eval(newcall, parent.frame()), silent=TRUE) if(is.interact(out)) return(out) ## try old syntax oldcall <- match.call(function(types=NULL, iradii, hradii) {}) oldcall[[1]] <- as.name('doMultiStraussHard') out <- try(eval(oldcall, parent.frame()), silent=TRUE) if(is.interact(out)) return(out) ## Syntax is wrong: generate error using new syntax rules if(missing(hradii)) hradii <- NULL doMultiStraussHard(iradii=iradii, hradii=hradii, types=types) } BlankMSHobject <- get("BlankMSHobject", envir=environment(doMultiStraussHard)) MultiStraussHard <- intermaker(MultiStraussHard, BlankMSHobject) MultiStraussHard }) spatstat/R/rhohat.R0000755000176200001440000005503713145262274013751 0ustar liggesusers# # rhohat.R # # $Revision: 1.72 $ $Date: 2017/08/17 09:35:46 $ # # Non-parametric estimation of a transformation rho(z) determining # the intensity function lambda(u) of a point process in terms of a # spatial covariate Z(u) through lambda(u) = rho(Z(u)). # More generally allows offsets etc. rhohat <- function(object, covariate, ...) { UseMethod("rhohat") } rhohat.ppp <- rhohat.quad <- function(object, covariate, ..., baseline=NULL, weights=NULL, method=c("ratio", "reweight", "transform"), horvitz=FALSE, smoother=c("kernel", "local"), subset=NULL, dimyx=NULL, eps=NULL, n=512, bw="nrd0", adjust=1, from=NULL, to=NULL, bwref=bw, covname, confidence=0.95) { callstring <- short.deparse(sys.call()) smoother <- match.arg(smoother) method <- match.arg(method) if(missing(covname)) covname <- sensiblevarname(short.deparse(substitute(covariate)), "X") if(is.null(adjust)) adjust <- 1 # validate model if(is.null(baseline)) { model <- ppm(object ~1) reference <- "Lebesgue" } else { model <- ppm(object ~ offset(log(baseline))) reference <- "baseline" } modelcall <- NULL if(is.character(covariate) && length(covariate) == 1) { covname <- covariate switch(covname, x={ covariate <- function(x,y) { x } }, y={ covariate <- function(x,y) { y } }, stop("Unrecognised covariate name") ) covunits <- unitname(data.ppm(model)) } else { covunits <- NULL } W <- Window(data.ppm(model)) if(!is.null(subset)) W <- W[subset, drop=FALSE] areaW <- area(W) rhohatEngine(model, covariate, reference, areaW, ..., subset=subset, weights=weights, method=method, horvitz=horvitz, smoother=smoother, resolution=list(dimyx=dimyx, eps=eps), n=n, bw=bw, adjust=adjust, from=from, to=to, bwref=bwref, covname=covname, covunits=covunits, confidence=confidence, modelcall=modelcall, callstring=callstring) } rhohat.ppm <- function(object, covariate, ..., weights=NULL, method=c("ratio", "reweight", "transform"), horvitz=FALSE, smoother=c("kernel", "local"), subset=NULL, dimyx=NULL, eps=NULL, n=512, bw="nrd0", adjust=1, from=NULL, to=NULL, bwref=bw, covname, confidence=0.95) { callstring <- short.deparse(sys.call()) smoother <- match.arg(smoother) method <- match.arg(method) if(missing(covname)) covname <- sensiblevarname(short.deparse(substitute(covariate)), "X") if(is.null(adjust)) adjust <- 1 if("baseline" %in% names(list(...))) warning("Argument 'baseline' ignored: not available for rhohat.ppm") ## validate model model <- object reference <- "model" modelcall <- model$call if(is.character(covariate) && length(covariate) == 1) { covname <- covariate switch(covname, x={ covariate <- function(x,y) { x } }, y={ covariate <- function(x,y) { y } }, stop("Unrecognised covariate name") ) covunits <- unitname(data.ppm(model)) } else { covunits <- NULL } W <- Window(data.ppm(model)) if(!is.null(subset)) W <- W[subset, drop=FALSE] areaW <- area(W) rhohatEngine(model, covariate, reference, areaW, ..., weights=weights, method=method, horvitz=horvitz, smoother=smoother, resolution=list(dimyx=dimyx, eps=eps), n=n, bw=bw, adjust=adjust, from=from, to=to, bwref=bwref, covname=covname, covunits=covunits, confidence=confidence, modelcall=modelcall, callstring=callstring) } rhohat.lpp <- rhohat.lppm <- function(object, covariate, ..., weights=NULL, method=c("ratio", "reweight", "transform"), horvitz=FALSE, smoother=c("kernel", "local"), subset=NULL, nd=1000, eps=NULL, random=TRUE, n=512, bw="nrd0", adjust=1, from=NULL, to=NULL, bwref=bw, covname, confidence=0.95) { callstring <- short.deparse(sys.call()) smoother <- match.arg(smoother) method <- match.arg(method) if(missing(covname)) covname <- sensiblevarname(short.deparse(substitute(covariate)), "X") if(is.null(adjust)) adjust <- 1 # validate model if(is.lpp(object)) { X <- object model <- lppm(object, ~1, eps=eps, nd=nd, random=random) reference <- "Lebesgue" modelcall <- NULL } else if(inherits(object, "lppm")) { model <- object X <- model$X reference <- "model" modelcall <- model$call } else stop("object should be of class lpp or lppm") if("baseline" %in% names(list(...))) warning("Argument 'baseline' ignored: not available for ", if(is.lpp(object)) "rhohat.lpp" else "rhohat.lppm") if(is.character(covariate) && length(covariate) == 1) { covname <- covariate switch(covname, x={ covariate <- function(x,y) { x } }, y={ covariate <- function(x,y) { y } }, stop("Unrecognised covariate name") ) covunits <- unitname(X) } else { covunits <- NULL } S <- as.psp(as.linnet(X)) if(!is.null(subset)) S <- S[subset] totlen <- sum(lengths.psp(S)) rhohatEngine(model, covariate, reference, totlen, ..., subset=subset, weights=weights, method=method, horvitz=horvitz, smoother=smoother, resolution=list(nd=nd, eps=eps, random=random), n=n, bw=bw, adjust=adjust, from=from, to=to, bwref=bwref, covname=covname, covunits=covunits, confidence=confidence, modelcall=modelcall, callstring=callstring) } rhohatEngine <- function(model, covariate, reference=c("Lebesgue", "model", "baseline"), volume, ..., subset=NULL, weights=NULL, method=c("ratio", "reweight", "transform"), horvitz=FALSE, smoother=c("kernel", "local"), resolution=list(), n=512, bw="nrd0", adjust=1, from=NULL, to=NULL, bwref=bw, covname, covunits=NULL, confidence=0.95, modelcall=NULL, callstring="rhohat") { reference <- match.arg(reference) # evaluate the covariate at data points and at pixels stuff <- do.call(evalCovar, append(list(model=model, covariate=covariate, subset=subset), resolution)) # unpack # info <- stuff$info values <- stuff$values # values at each data point ZX <- values$ZX lambdaX <- values$lambdaX # values at each pixel Zimage <- values$Zimage Zvalues <- values$Zvalues lambda <- values$lambda ## weights if(!is.null(weights)) { X <- data.ppm(model) if(is.im(weights)) weights <- safelookup(weights, X) else if(is.function(weights)) weights <- weights(X$x, X$y) else if(is.numeric(weights) && is.vector(as.numeric(weights))) check.nvector(weights, npoints(X)) else stop(paste(sQuote("weights"), "should be a vector, a pixel image, or a function")) } # normalising constants denom <- volume * (if(reference == "Lebesgue" || horvitz) 1 else mean(lambda)) # info savestuff <- list(reference = reference, horvitz = horvitz, Zimage = Zimage) # calculate rho-hat result <- rhohatCalc(ZX, Zvalues, lambda, denom, ..., weights=weights, lambdaX=lambdaX, method=method, horvitz=horvitz, smoother=smoother, n=n, bw=bw, adjust=adjust, from=from, to=to, bwref=bwref, covname=covname, confidence=confidence, covunits=covunits, modelcall=modelcall, callstring=callstring, savestuff=savestuff) return(result) } # basic calculation of rhohat from covariate values rhohatCalc <- local({ interpolate <- function(x,y) { if(inherits(x, "density") && missing(y)) approxfun(x$x, x$y, rule=2) else approxfun(x, y, rule=2) } ## note: this function normalises the weights, like density.default LocfitRaw <- function(x, ..., weights=NULL) { if(is.null(weights)) weights <- 1 requireNamespace("locfit", quietly=TRUE) do.call.matched(locfit::locfit.raw, append(list(x=x, weights=weights), list(...))) } varlog <- function(obj,xx) { ## variance of log f-hat stopifnot(inherits(obj, "locfit")) if(!identical(obj$trans, exp)) stop("internal error: locfit object does not have log link") ## the following call should have band="local" but that produces NaN's pred <- predict(obj, newdata=xx, se.fit=TRUE, what="coef") se <- pred$se.fit return(se^2) } rhohatCalc <- function(ZX, Zvalues, lambda, denom, ..., weights=NULL, lambdaX=NULL, method=c("ratio", "reweight", "transform"), horvitz=FALSE, smoother=c("kernel", "local"), n=512, bw="nrd0", adjust=1, from=NULL, to=NULL, bwref=bw, covname, confidence=0.95, covunits = NULL, modelcall=NULL, callstring=NULL, savestuff=list()) { method <- match.arg(method) smoother <- match.arg(smoother) ## check availability of locfit package if(smoother == "local" && !requireNamespace("locfit", quietly=TRUE)) { warning(paste("In", paste(dQuote(callstring), ":", sep=""), "package", sQuote("locfit"), "is not available;", "unable to perform local likelihood smoothing;", "using kernel smoothing instead"), call.=FALSE) smoother <- "kernel" } ## validate stopifnot(is.numeric(ZX)) stopifnot(is.numeric(Zvalues)) stopifnot(is.numeric(lambda)) stopifnot(length(lambda) == length(Zvalues)) stopifnot(all(is.finite(lambda))) check.1.real(denom) ## if(horvitz) { ## data points will be weighted by reciprocal of model intensity weights <- (weights %orifnull% 1)/lambdaX } ## normalising constants nX <- if(is.null(weights)) length(ZX) else sum(weights) kappahat <- nX/denom ## limits Zrange <- range(ZX, Zvalues) if(is.null(from)) from <- Zrange[1] if(is.null(to)) to <- Zrange[2] if(from > Zrange[1] || to < Zrange[2]) stop("Interval [from, to] = ", prange(c(from,to)), "does not contain the range of data values =", prange(Zrange)) ## critical constant for CI's crit <- qnorm((1+confidence)/2) percentage <- paste(round(100 * confidence), "%%", sep="") CIblurb <- paste("pointwise", percentage, "confidence interval") ## estimate densities if(smoother == "kernel") { ## ............... kernel smoothing ...................... ## reference density (normalised) for calculation ghat <- density(Zvalues,weights=if(horvitz) NULL else lambda/sum(lambda), bw=bwref,adjust=adjust,n=n,from=from,to=to, ...) xxx <- ghat$x ghatfun <- interpolate(ghat) ## relative density switch(method, ratio={ ## compute ratio of smoothed densities fhat <- unnormdensity(ZX,weights=weights, bw=bw,adjust=adjust, n=n,from=from, to=to, ...) fhatfun <- interpolate(fhat) Ghat.xxx <- denom * ghatfun(xxx) yyy <- fhatfun(xxx)/Ghat.xxx ## compute variance approximation sigma <- fhat$bw weights2 <- if(is.null(weights)) NULL else weights^2 fstar <- unnormdensity(ZX,weights=weights2, bw=bw,adjust=adjust/sqrt(2), n=n,from=from, to=to, ...) fstarfun <- interpolate(fstar) const <- 1/(2 * sigma * sqrt(pi)) vvv <- const * fstarfun(xxx)/Ghat.xxx^2 }, reweight={ ## weight Z values by reciprocal of reference wt <- (weights %orifnull% 1)/(denom * ghatfun(ZX)) rhat <- unnormdensity(ZX, weights=wt, bw=bw,adjust=adjust, n=n,from=from, to=to, ...) rhatfun <- interpolate(rhat) yyy <- rhatfun(xxx) ## compute variance approximation sigma <- rhat$bw rongstar <- unnormdensity(ZX, weights=wt^2, bw=bw,adjust=adjust/sqrt(2), n=n,from=from, to=to, ...) rongstarfun <- interpolate(rongstar) const <- 1/(2 * sigma * sqrt(pi)) vvv <- const * rongstarfun(xxx) }, transform={ ## probability integral transform Gfun <- interpolate(ghat$x, cumsum(ghat$y)/sum(ghat$y)) GZX <- Gfun(ZX) ## smooth density on [0,1] qhat <- unnormdensity(GZX,weights=weights, bw=bw,adjust=adjust, n=n, from=0, to=1, ...) qhatfun <- interpolate(qhat) ## edge effect correction one <- density(seq(from=0,to=1,length.out=512), bw=qhat$bw, adjust=1, n=n,from=0, to=1, ...) onefun <- interpolate(one) ## apply to transformed values Gxxx <- Gfun(xxx) Dxxx <- denom * onefun(Gxxx) yyy <- qhatfun(Gxxx)/Dxxx ## compute variance approximation sigma <- qhat$bw weights2 <- if(is.null(weights)) NULL else weights^2 qstar <- unnormdensity(GZX,weights=weights2, bw=bw,adjust=adjust/sqrt(2), n=n,from=0, to=1, ...) qstarfun <- interpolate(qstar) const <- 1/(2 * sigma * sqrt(pi)) vvv <- const * qstarfun(Gxxx)/Dxxx^2 }) vvvname <- "Variance of estimator" vvvlabel <- paste("bold(Var)~hat(%s)", paren(covname), sep="") sd <- sqrt(vvv) hi <- yyy + crit * sd lo <- yyy - crit * sd } else { ## .................. local likelihood smoothing ....................... xlim <- c(from, to) xxx <- seq(from, to, length=n) ## reference density ghat <- LocfitRaw(Zvalues, weights=if(horvitz) NULL else lambda, xlim=xlim, ...) ggg <- predict(ghat, xxx) ## relative density switch(method, ratio={ ## compute ratio of smoothed densities fhat <- LocfitRaw(ZX, weights=weights, xlim=xlim, ...) fff <- predict(fhat, xxx) yyy <- kappahat * fff/ggg ## compute approximation to variance of log rho-hat varlogN <- 1/nX vvv <- varlog(fhat, xxx) + varlogN }, reweight={ ## weight Z values by reciprocal of reference wt <- (weights %orifnull% 1)/(denom * predict(ghat,ZX)) sumwt <- sum(wt) rhat <- LocfitRaw(ZX, weights=wt, xlim=xlim, ...) rrr <- predict(rhat, xxx) yyy <- sumwt * rrr ## compute approximation to variance of log rho-hat varsumwt <- mean(yyy /(denom * ggg)) * diff(xlim) varlogsumwt <- varsumwt/sumwt^2 vvv <- varlog(rhat, xxx) + varlogsumwt }, transform={ ## probability integral transform Gfun <- approxfun(xxx, cumsum(ggg)/sum(ggg), rule=2) GZX <- Gfun(ZX) ## smooth density on [0,1], end effect corrected qhat <- LocfitRaw(GZX, weights=weights, xlim=c(0,1), ...) ## apply to transformed values Gxxx <- Gfun(xxx) qqq <- predict(qhat, Gxxx) yyy <- kappahat * qqq ## compute approximation to variance of log rho-hat varlogN <- 1/nX vvv <- varlog(qhat, Gxxx) + varlogN }) vvvname <- "Variance of log of estimator" vvvlabel <- paste("bold(Var)~log(hat(%s)", paren(covname), ")", sep="") sss <- exp(crit * sqrt(vvv)) hi <- yyy * sss lo <- yyy / sss } ## pack into fv object df <- data.frame(xxx=xxx, rho=yyy, var=vvv, hi=hi, lo=lo) names(df)[1] <- covname desc <- c(paste("covariate", covname), "Estimated intensity", vvvname, paste("Upper limit of", CIblurb), paste("Lower limit of", CIblurb)) rslt <- fv(df, argu=covname, ylab=substitute(rho(X), list(X=as.name(covname))), valu="rho", fmla= as.formula(paste(". ~ ", covname)), alim=range(ZX), labl=c(covname, paste("hat(%s)", paren(covname), sep=""), vvvlabel, paste("%s[hi]", paren(covname), sep=""), paste("%s[lo]", paren(covname), sep="")), desc=desc, unitname=covunits, fname="rho", yexp=substitute(rho(X), list(X=as.name(covname)))) fvnames(rslt, ".") <- c("rho", "hi", "lo") fvnames(rslt, ".s") <- c("hi", "lo") ## pack up class(rslt) <- c("rhohat", class(rslt)) ## add info stuff <- list(modelcall = modelcall, callstring = callstring, sigma = switch(smoother, kernel=sigma, local=NULL), covname = paste(covname, collapse=""), ZX = ZX, lambda = lambda, method = method, smoother = smoother) attr(rslt, "stuff") <- append(stuff, savestuff) return(rslt) } rhohatCalc }) ## ........... end of 'rhohatCalc' ................................. print.rhohat <- function(x, ...) { s <- attr(x, "stuff") splat("Intensity function estimate (class rhohat)", "for the covariate", s$covname) switch(s$reference, Lebesgue=splat("Function values are absolute intensities"), baseline=splat("Function values are relative to baseline"), model={ splat("Function values are relative to fitted model") print(s$modelcall) }) cat("Estimation method: ") switch(s$method, ratio={ splat("ratio of fixed-bandwidth kernel smoothers") }, reweight={ splat("fixed-bandwidth kernel smoother of weighted data") }, transform={ splat("probability integral transform,", "edge-corrected fixed bandwidth kernel smoothing", "on [0,1]") }, cat("UNKNOWN\n")) if(identical(s$horvitz, TRUE)) splat("\twith Horvitz-Thompson weight") cat("Smoother: ") switch(s$smoother, kernel={ splat("Kernel density estimator") splat("Actual smoothing bandwidth sigma = ", signif(s$sigma,5)) }, local ={ splat("Local likelihood density estimator") } ) splat("Call:", s$callstring) NextMethod("print") } plot.rhohat <- function(x, ..., do.rug=TRUE) { xname <- short.deparse(substitute(x)) s <- attr(x, "stuff") covname <- s$covname asked.rug <- !missing(do.rug) && identical(rug, TRUE) out <- do.call(plot.fv, resolve.defaults(list(x=x), list(...), list(main=xname, shade=c("hi", "lo")))) if(identical(list(...)$limitsonly, TRUE)) return(out) if(do.rug) { rugx <- ZX <- s$ZX # check whether it's the default plot argh <- list(...) isfo <- unlist(lapply(argh, inherits, what="formula")) if(any(isfo)) { # a plot formula was given; inspect RHS fmla <- argh[[min(which(isfo))]] rhs <- rhs.of.formula(fmla) vars <- variablesinformula(rhs) vars <- vars[vars %in% c(colnames(x), ".x", ".y")] if(length(vars) == 1 && vars %in% c(covname, ".x")) { # expression in terms of covariate rhstr <- as.character(rhs)[2] dat <- list(ZX) names(dat) <- vars[1] rugx <- as.numeric(eval(parse(text=rhstr), dat)) } else { if(asked.rug) warning("Unable to add rug plot") rugx <- NULL } } if(!is.null(rugx)) { # restrict to x limits, if given if(!is.null(xlim <- list(...)$xlim)) rugx <- rugx[rugx >= xlim[1] & rugx <= xlim[2]] # finally plot the rug if(length(rugx) > 0) rug(rugx) } } invisible(NULL) } predict.rhohat <- function(object, ..., relative=FALSE, what=c("rho", "lo", "hi", "se")) { trap.extra.arguments(..., .Context="in predict.rhohat") what <- match.arg(what) # extract info s <- attr(object, "stuff") reference <- s$reference # convert to (linearly interpolated) function x <- with(object, .x) y <- if(what == "se") sqrt(object[["var"]]) else object[[what]] fun <- approxfun(x, y, rule=2) # extract image of covariate Z <- s$Zimage # apply fun to Z Y <- if(inherits(Z, "linim")) eval.linim(fun(Z)) else eval.im(fun(Z)) # adjust to reference baseline if(reference != "Lebesgue" && !relative) { lambda <- s$lambda Y[] <- Y[] * lambda } return(Y) } as.function.rhohat <- function(x, ..., value=".y", extrapolate=TRUE) { NextMethod("as.function") } simulate.rhohat <- function(object, nsim=1, ..., drop=TRUE) { trap.extra.arguments(..., .Context="in simulate.rhohat") lambda <- predict(object) if(inherits(lambda, "linim")) { result <- rpoislpp(lambda, nsim=nsim, drop=drop) } else { result <- rpoispp(lambda, nsim=nsim, drop=drop) } return(result) } spatstat/R/nnfun.R0000644000176200001440000000407413115225157013574 0ustar liggesusers# # nnfun.R # # nearest neighbour function (returns a function of x,y) # # $Revision: 1.5 $ $Date: 2014/10/24 00:22:30 $ # nnfun <- function(X, ...) { UseMethod("nnfun") } nnfun.ppp <- function(X, ..., k=1) { # this line forces X to be bound stopifnot(is.ppp(X)) if(length(k) != 1) stop("k should be a single integer") g <- function(x,y=NULL) { Y <- xy.coords(x, y)[c("x", "y")] nncross(Y, X, what="which", k=k) } attr(g, "Xclass") <- "ppp" g <- funxy(g, as.rectangle(as.owin(X))) class(g) <- c("nnfun", class(g)) return(g) } nnfun.psp <- function(X, ...) { # this line forces X to be bound stopifnot(is.psp(X)) g <- function(x,y=NULL) { Y <- xy.coords(x, y)[c("x", "y")] nncross(Y, X, what="which") } attr(g, "Xclass") <- "psp" g <- funxy(g, as.rectangle(as.owin(X))) class(g) <- c("nnfun", class(g)) return(g) } as.owin.nnfun <- function(W, ..., fatal=TRUE) { X <- get("X", envir=environment(W)) as.owin(X, ..., fatal=fatal) } domain.nnfun <- Window.nnfun <- function(X, ...) { as.owin(X) } as.im.nnfun <- function(X, W=NULL, ..., eps=NULL, dimyx=NULL, xy=NULL, na.replace=NULL) { if(is.null(W)) { env <- environment(X) Xdata <- get("X", envir=env) k <- mget("k", envir=env, inherits=FALSE, ifnotfound=list(1))[[1]] Z <- nnmap(Xdata, k=k, what="which", eps=eps, dimyx=dimyx, xy=xy) if(!is.null(na.replace)) Z$v[is.null(Z$v)] <- na.replace return(Z) } # use as.im.function NextMethod("as.im") } print.nnfun <- function(x, ...) { env <- environment(x) X <- get("X", envir=env) k <- mget("k", envir=env, inherits=FALSE, ifnotfound=list(1))[[1]] xtype <- attr(x, "Xclass") typestring <- switch(xtype, ppp="point pattern", psp="line segment pattern", paste("object of class", sQuote(xtype))) Kth <- if(k == 1) "Nearest" else paste0(ordinal(k), "-Nearest") cat(paste(Kth, "Neighbour Index function for ", typestring, "\n")) print(X) return(invisible(NULL)) } spatstat/R/bw.diggle.R0000644000176200001440000000535413115271075014314 0ustar liggesusers## ## bw.diggle.R ## ## bandwidth selection rules bw.diggle and bw.scott (for density.ppp) ## ## $Revision: 1.5 $ $Date: 2017/06/05 10:31:58 $ ## bw.scott <- function(X) { stopifnot(is.ppp(X)) n <- npoints(X) sdx <- sqrt(var(X$x)) sdy <- sqrt(var(X$y)) return(c(sdx, sdy) * n^(-1/6)) } bw.diggle <- local({ #' integrand phi <- function(x,h) { if(h <= 0) return(numeric(length(x))) y <- pmax.int(0, pmin.int(1, x/(2 * h))) 4 * pi * h^2 * (acos(y) - y * sqrt(1 - y^2)) } #' secret option for debugging mf <- function(..., method=c("C", "interpreted")) { match.arg(method) } bw.diggle <- function(X, ..., correction="good", hmax=NULL, nr=512) { stopifnot(is.ppp(X)) method <- mf(...) W <- Window(X) lambda <- npoints(X)/area(W) rmax <- if(!is.null(hmax)) (4 * hmax) else rmax.rule("K", W, lambda) r <- seq(0, rmax, length=nr) K <- Kest(X, r=r, correction=correction) yname <- fvnames(K, ".y") K <- K[, c("r", yname)] ## check that K values can be passed to C code if(any(bad <- !is.finite(K[[yname]]))) { ## throw out bad values lastgood <- min(which(bad)) - 1L if(lastgood < 2L) stop("K function yields too many NA/NaN values") K <- K[1:lastgood, ] } rvals <- K$r ## evaluation of M(r) requires K(2r) rmax2 <- max(rvals)/2 if(!is.null(alim <- attr(K, "alim"))) rmax2 <- min(alim[2L], rmax2) ok <- (rvals <= rmax2) switch(method, interpreted = { rvals <- rvals[ok] nr <- length(rvals) J <- numeric(nr) for(i in 1:nr) J[i] <- stieltjes(phi, K, h=rvals[i])[[yname]]/(2 * pi) }, C = { nr <- length(rvals) nrmax <- sum(ok) dK <- diff(K[[yname]]) ndK <- length(dK) z <- .C("digberJ", r=as.double(rvals), dK=as.double(dK), nr=as.integer(nr), nrmax=as.integer(nrmax), ndK=as.integer(ndK), J=as.double(numeric(nrmax)), PACKAGE = "spatstat") J <- z$J rvals <- rvals[ok] }) pir2 <- pi * rvals^2 M <- (1/lambda - 2 * K[[yname]][ok])/pir2 + J/pir2^2 ## This calculation was for the uniform kernel on B(0,h) ## Convert to standard deviation of (one-dimensional marginal) kernel sigma <- rvals/2 result <- bw.optim(M, sigma, creator="bw.diggle", criterion="Berman-Diggle Cross-Validation", J=J, lambda=lambda, unitname=unitname(X)) return(result) } bw.diggle }) spatstat/R/otherpackages.R0000644000176200001440000000474313115271120015262 0ustar liggesusers#' #' otherpackages.R #' #' Dealing with other packages #' #' $Revision: 1.17 $ $Date: 2017/06/05 10:31:58 $ fft2D <- function(z, inverse=FALSE, west=fftwAvailable()) { if(west) return(fftwtools::fftw2d(data=z, inverse=inverse)) return(stats::fft(z=z, inverse=inverse)) } fftwAvailable <- function() { # including temporary check for recent version ok <- requireNamespace("fftwtools", quietly=TRUE) return(ok) } kraeverRandomFields <- function() { kraever("RandomFieldsUtils") kraever("RandomFields") # should no longer be needed: # capture.output(RandomFieldsUtils:::.onLoad()) # capture.output(RandomFields:::.onLoad()) return(invisible(NULL)) } # require a namespace and optionally check whether it is attached kraever <- function(package, fatal=TRUE) { if(!requireNamespace(package, quietly=TRUE)) { if(fatal) stop(paste("The package", sQuote(package), "is required"), call.=FALSE) return(FALSE) } if(spatstat.options(paste("check", package, "loaded", sep=".")) && !isNamespaceLoaded(package)){ if(fatal) stop(paste("The package", sQuote(package), "must be loaded: please type", sQuote(paste0("library", paren(package)))), call.=FALSE) return(FALSE) } return(TRUE) } getRandomFieldsModelGen <- function(model) { kraeverRandomFields() if(inherits(model, "RMmodelgenerator")) return(model) if(!is.character(model)) stop(paste("'model' should be a character string", "or one of the functions in the RandomFields package", "with a name beginning 'RM'"), call.=FALSE) switch(model, cauchy = RandomFields::RMcauchy, exponential = , exp = RandomFields::RMexp, gencauchy = RandomFields::RMgencauchy, gauss = RandomFields::RMgauss, gneiting = RandomFields::RMgneiting, matern = RandomFields::RMmatern, nugget = RandomFields::RMnugget, spheric = RandomFields::RMspheric, stable = RandomFields::RMstable, whittle = RandomFields::RMwhittle, { modgen <- try(getExportedValue("RandomFields", paste0("RM", model)), silent=TRUE) if(inherits(modgen, "try-error") || !inherits(modgen, "RMmodelgenerator")) stop(paste("Model", sQuote(model), "is not recognised")) modgen }) } spatstat/R/pixellate.R0000755000176200001440000001446613115271120014437 0ustar liggesusers# # pixellate.R # # $Revision: 1.24 $ $Date: 2017/06/05 10:31:58 $ # # pixellate convert an object to a pixel image # # pixellate.ppp convert a point pattern to a pixel image # (pixel value = number of points in pixel) # # pixellate.owin convert a window to a pixel image # (pixel value = area of intersection with pixel) # pixellate <- function(x, ...) { UseMethod("pixellate") } pixellate.ppp <- function(x, W=NULL, ..., weights=NULL, padzero=FALSE, fractional=FALSE, preserve=FALSE) { verifyclass(x, "ppp") if(is.null(W)) W <- Window(x) isrect <- is.rectangle(W) preserve <- preserve && !isrect iscount <- is.null(weights) && !fractional && !preserve W <- do.call.matched(as.mask, resolve.defaults(list(...), list(w=W))) nx <- npoints(x) insideW <- W$m dimW <- W$dim nr <- dimW[1L] nc <- dimW[2L] xcolW <- W$xcol yrowW <- W$yrow xrangeW <- W$xrange yrangeW <- W$yrange unitsW <- unitname(W) # multiple columns of weights? if(is.data.frame(weights) || is.matrix(weights)) { k <- ncol(weights) stopifnot(nrow(weights) == npoints(x)) weights <- if(k == 1) as.vector(weights) else as.data.frame(weights) } else { k <- 1 if(length(weights) == 0) weights <- NULL else stopifnot(length(weights) == npoints(x) || length(weights) == 1) if(length(weights) == 1) weights <- rep(weights, npoints(x)) } # handle empty point pattern if(nx == 0) { zerovalue <- if(iscount) 0L else as.double(0) zeroimage <- as.im(zerovalue, W) if(padzero) # map NA to 0 zeroimage <- na.handle.im(zeroimage, zerovalue) result <- zeroimage if(k > 1) { result <- as.solist(rep(list(zeroimage), k)) names(result) <- colnames(weights) } return(result) } # map points to pixels xx <- x$x yy <- x$y if(!fractional) { #' map (x,y) to nearest raster point pixels <- if(preserve) nearest.valid.pixel(xx, yy, W) else nearest.raster.point(xx, yy, W) rowfac <- factor(pixels$row, levels=1:nr) colfac <- factor(pixels$col, levels=1:nc) } else { #' attribute fractional weights to the 4 pixel centres surrounding (x,y) #' find surrounding pixel centres jj <- findInterval(xx, xcolW, rightmost.closed=TRUE) ii <- findInterval(yy, yrowW, rightmost.closed=TRUE) jleft <- pmax(jj, 1) jright <- pmin(jj + 1, nr) ibot <- pmax(ii, 1) itop <- pmin(ii+1, nc) #' compute fractional weights wleft <- pmin(1, abs(xcolW[jright] - xx)/W$xstep) wright <- 1 - wleft wbot <- pmin(1, abs(yrowW[itop] - yy)/W$ystep) wtop <- 1 - wbot #' pack together ww <- c(wleft * wbot, wleft * wtop, wright * wbot, wright * wtop) rowfac <- factor(c(ibot, itop, ibot, itop), levels=1:nr) colfac <- factor(c(jleft, jleft, jright, jright), levels=1:nc) if(preserve) { #' normalise fractions for each data point to sum to 1 inside window ok <- insideW[cbind(as.integer(rowfac), as.integer(colfac))] wwok <- ww * ok denom <- .colSums(wwok, 4, nx, na.rm=TRUE) recip <- ifelse(denom == 0, 1, 1/denom) ww <- wwok * rep(recip, each=4) } #' data weights must be replicated if(is.null(weights)) { weights <- ww } else if(k == 1) { weights <- ww * rep(weights, 4) } else { weights <- ww * apply(weights, 2, rep, times=4) } } #' sum weights if(is.null(weights)) { ta <- table(row = rowfac, col = colfac) } else if(k == 1) { ta <- tapplysum(weights, list(row = rowfac, col=colfac)) } else { ta <- list() for(j in 1:k) { ta[[j]] <- tapplysum(weights[,j], list(row = rowfac, col=colfac)) } } # pack up as image(s) if(k == 1) { # single image # clip to window of data if(!padzero) ta[!insideW] <- NA out <- im(ta, xcol = xcolW, yrow = yrowW, xrange = xrangeW, yrange = yrangeW, unitname=unitsW) } else { # case k > 1 # create template image to reduce overhead template <- im(ta[[1L]], xcol = xcolW, yrow = yrowW, xrange = xrangeW, yrange = yrangeW, unitname=unitsW) out <- list() for(j in 1:k) { taj <- ta[[j]] # clip to window of data if(!padzero) taj[!insideW] <- NA # copy template and reassign pixel values outj <- template outj$v <- taj # store out[[j]] <- outj } out <- as.solist(out) names(out) <- names(weights) } return(out) } pixellate.owin <- function(x, W=NULL, ...) { stopifnot(is.owin(x)) P <- as.polygonal(x) R <- as.rectangle(x) if(is.null(W)) W <- R else if(!is.subset.owin(R, as.rectangle(W))) stop("W does not cover the domain of x") W <- do.call.matched(as.mask, resolve.defaults(list(...), list(w=W))) ## compute Zmat <- polytileareaEngine(P, W$xrange, W$yrange, nx=W$dim[2L], ny=W$dim[1L]) ## convert to image Z <- im(Zmat, xcol=W$xcol, yrow=W$yrow, xrange=W$xrange, yrange=W$yrange, unitname=unitname(W)) return(Z) } polytileareaEngine <- function(P, xrange, yrange, nx, ny) { x0 <- xrange[1L] y0 <- yrange[1L] dx <- diff(xrange)/nx dy <- diff(yrange)/ny # process each component polygon Z <- matrix(0.0, ny, nx) B <- P$bdry for(i in seq_along(B)) { PP <- B[[i]] # transform so that pixels become unit squares QQ <- affinexypolygon(PP, vec = c(-x0, -y0)) RR <- affinexypolygon(QQ, mat = diag(1/c(dx, dy))) # xx <- RR$x yy <- RR$y nn <- length(xx) # close polygon xx <- c(xx, xx[1L]) yy <- c(yy, yy[1L]) nn <- nn+1 # call C routine zz <- .C("poly2imA", ncol=as.integer(nx), nrow=as.integer(ny), xpoly=as.double(xx), ypoly=as.double(yy), npoly=as.integer(nn), out=as.double(numeric(nx * ny)), status=as.integer(integer(1L)), PACKAGE = "spatstat") if(zz$status != 0) stop("Internal error") # increment output Z[] <- Z[] + zz$out } # revert to original scale pixelarea <- dx * dy return(Z * pixelarea) } spatstat/R/allstats.R0000755000176200001440000000220513115271075014274 0ustar liggesusers# # # allstats.R # # $Revision: 1.18 $ $Date: 2016/02/11 10:17:12 $ # # allstats <- function(pp, ..., dataname=NULL,verb=FALSE) { # # Function allstats --- to calculate the F, G, K, and J functions # for an unmarked point pattern. # verifyclass(pp,"ppp") if(is.marked(pp)) stop("This function is applicable only to unmarked patterns.\n") # estimate F, G and J if(verb) cat("Calculating F, G, J ...") Jout <- do.call.matched(Jest,list(X=pp, ...)) if(verb) cat("ok.\n") # extract F, G and J Fout <- attr(Jout, "F") Gout <- attr(Jout, "G") attr(Jout, "F") <- NULL attr(Jout, "G") <- NULL fns <- list("F function"=Fout, "G function"=Gout, "J function"=Jout) # compute second moment function K if(verb) cat("Calculating K function...") Kout <- do.call.matched(Kest, list(X=pp, ...)) fns <- append(fns, list("K function"=Kout)) if(verb) cat("done.\n") # add title if(is.null(dataname)) dataname <- short.deparse(substitute(pp)) title <- paste("Four summary functions for ", dataname,".",sep="") attr(fns, "title") <- title # fns <- as.anylist(fns) return(fns) } spatstat/R/linnet.R0000755000176200001440000004344513155546330013754 0ustar liggesusers# # linnet.R # # Linear networks # # $Revision: 1.63 $ $Date: 2017/09/11 16:36:37 $ # # An object of class 'linnet' defines a linear network. # It includes the following components # # vertices (ppp) vertices of network # # m (matrix) adjacency matrix # # lines (psp) edges of network # # dpath (matrix) matrix of shortest path distances # between each pair of vertices # # from, to (vectors) map from edges to vertices. # The endpoints of the i-th segment lines[i] # are vertices[from[i]] and vertices[to[i]] # # # FUNCTIONS PROVIDED: # linnet creates an object of class "linnet" from data # print.linnet print an object of class "linnet" # plot.linnet plot an object of class "linnet" # # Make an object of class "linnet" from the minimal data linnet <- function(vertices, m, edges, sparse=FALSE, warn=TRUE) { if(missing(m) && missing(edges)) stop("specify either m or edges") if(!missing(m) && !missing(edges)) stop("do not specify both m and edges") # validate inputs stopifnot(is.ppp(vertices)) nv <- npoints(vertices) if(nv <= 1) { m <- matrix(FALSE, nv, nv) } else if(!missing(m)) { # check logical matrix or logical sparse matrix if(!is.matrix(m) && !inherits(m, c("lgCMatrix", "lgTMatrix"))) stop("m should be a matrix or sparse matrix") stopifnot(is.logical(m) && isSymmetric(m)) if(nrow(m) != vertices$n) stop("dimensions of matrix m do not match number of vertices") if(any(diag(m))) { warning("diagonal entries of the matrix m should not be TRUE; ignored") diag(m) <- FALSE } sparse <- !is.matrix(m) ## determine 'from' and 'to' vectors ij <- which(m, arr.ind=TRUE) ij <- ij[ ij[,1L] < ij[,2L], , drop=FALSE] from <- ij[,1L] to <- ij[,2L] } else { # check (from, to) pairs stopifnot(is.matrix(edges) && ncol(edges) == 2) if(any((edges %% 1) != 0)) stop("Entries of edges list should be integers") if(any(self <- (edges[,1L] == edges[,2L]))) { warning("edge list should not join a vertex to itself; ignored") edges <- edges[!self, , drop=FALSE] } np <- npoints(vertices) if(any(edges > np)) stop("index out-of-bounds in edges list") from <- edges[,1L] to <- edges[,2L] # convert to adjacency matrix if(!sparse) { m <- matrix(FALSE, np, np) m[edges] <- TRUE } else m <- sparseMatrix(i=from, j=to, x=TRUE, dims=c(np, np)) m <- m | t(m) } # create line segments xx <- vertices$x yy <- vertices$y lines <- psp(xx[from], yy[from], xx[to], yy[to], window=vertices$window, check=FALSE) # tolerance toler <- default.linnet.tolerance(lines) ## pack up out <- list(vertices=vertices, m=m, lines=lines, from=from, to=to, sparse=sparse, window=vertices$window, toler=toler) class(out) <- c("linnet", class(out)) ## finish ? if(sparse) return(out) # compute matrix of distances between adjacent vertices n <- nrow(m) d <- matrix(Inf, n, n) diag(d) <- 0 d[m] <- pairdist(vertices)[m] ## now compute shortest-path distances between each pair of vertices out$dpath <- dpath <- dist2dpath(d) if(warn && any(is.infinite(dpath))) warning("Network is not connected", call.=FALSE) # pre-compute bounding radius out$boundingradius <- boundingradius(out) return(out) } print.linnet <- function(x, ...) { nv <- x$vertices$n nl <- x$lines$n splat("Linear network with", nv, ngettext(nv, "vertex", "vertices"), "and", nl, ngettext(nl, "line", "lines")) if(!is.null(br <- x$boundingradius) && is.infinite(br)) splat("[Network is not connected]") print(as.owin(x), prefix="Enclosing window: ") return(invisible(NULL)) } summary.linnet <- function(object, ...) { deg <- vertexdegree(object) sparse <- object$sparse %orifnull% is.null(object$dpath) result <- list(nvert = object$vertices$n, nline = object$lines$n, nedge = sum(deg)/2, unitinfo = summary(unitname(object)), totlength = sum(lengths.psp(object$lines)), maxdegree = max(deg), ncomponents = length(levels(connected(object, what="labels"))), win = as.owin(object), sparse = sparse) if(!sparse) { result$diam <- diameter(object) result$boundrad <- boundingradius(object) } result$toler <- object$toler class(result) <- c("summary.linnet", class(result)) result } print.summary.linnet <- function(x, ...) { dig <- getOption('digits') with(x, { splat("Linear network with", nvert, ngettext(nvert, "vertex", "vertices"), "and", nline, ngettext(nline, "line", "lines")) splat("Total length", signif(totlength, dig), unitinfo$plural, unitinfo$explain) splat("Maximum vertex degree:", maxdegree) if(sparse) splat("[Sparse matrix representation]") else splat("[Non-sparse matrix representation]") if(ncomponents > 1) { splat("Network is disconnected: ", ncomponents, "connected components") } else { splat("Network is connected") if(!sparse) { splat("Diameter:", signif(diam, dig), unitinfo$plural) splat("Bounding radius:", signif(boundrad, dig), unitinfo$plural) } } if(!is.null(x$toler)) splat("Numerical tolerance:", signif(x$toler, dig), unitinfo$plural) print(win, prefix="Enclosing window: ") }) return(invisible(NULL)) } plot.linnet <- function(x, ..., main=NULL, add=FALSE, vertices=FALSE, window=FALSE, do.plot=TRUE) { if(is.null(main)) main <- short.deparse(substitute(x)) stopifnot(inherits(x, "linnet")) bb <- Frame(x) if(!do.plot) return(invisible(bb)) lines <- as.psp(x) if(!add) { # initialise new plot w <- as.owin(lines) if(window) plot(w, ..., main=main) else plot(w, ..., main=main, type="n") } # plot segments and (optionally) vertices do.call(plot, resolve.defaults(list(x=lines, show.all=FALSE, add=TRUE, main=main), list(...))) if(vertices) plot(x$vertices, add=TRUE) return(invisible(bb)) } as.psp.linnet <- function(x, ..., fatal=TRUE) { verifyclass(x, "linnet", fatal=fatal) return(x$lines) } vertices.linnet <- function(w) { verifyclass(w, "linnet") return(w$vertices) } nvertices.linnet <- function(x, ...) { verifyclass(x, "linnet") return(x$vertices$n) } nsegments.linnet <- function(x) { return(x$lines$n) } Window.linnet <- function(X, ...) { return(X$window) } "Window<-.linnet" <- function(X, ..., check=TRUE, value) { if(check) { X <- X[value] } else { X$window <- value X$lines$window <- value X$vertices$window <- value } return(X) } as.owin.linnet <- function(W, ...) { return(Window(W)) } as.linnet <- function(X, ...) { UseMethod("as.linnet") } as.linnet.linnet <- function(X, ..., sparse) { if(missing(sparse)) return(X) if(is.null(X$sparse)) X$sparse <- is.null(X$dpath) if(sparse && !(X$sparse)) { # delete distance matrix X$dpath <- NULL # convert adjacency matrix to sparse matrix X$m <- as(X$m, "sparseMatrix") X$sparse <- TRUE } else if(!sparse && X$sparse) { # convert adjacency to matrix X$m <- m <- as.matrix(X$m) edges <- which(m, arr.ind=TRUE) from <- edges[,1L] to <- edges[,2L] # compute distances to one-step neighbours n <- nrow(m) d <- matrix(Inf, n, n) diag(d) <- 0 coo <- coords(vertices(X)) d[edges] <- sqrt(rowSums((coo[from, 1:2] - coo[to, 1:2])^2)) # compute shortest path distance matrix X$dpath <- dist2dpath(d) # compute bounding radius X$boundingradius <- boundingradius(X) X$sparse <- FALSE } else if(!sparse) { # possibly update internals X$boundingradius <- boundingradius(X) } # possibly update internals X$circumradius <- NULL X$toler <- default.linnet.tolerance(X) return(X) } as.linnet.psp <- local({ as.linnet.psp <- function(X, ..., eps, sparse=FALSE) { X <- selfcut.psp(X) V <- unique(endpoints.psp(X)) if(missing(eps) || is.null(eps)) { eps <- sqrt(.Machine$double.eps) * diameter(Frame(X)) } else { check.1.real(eps) stopifnot(eps >= 0) } if(eps > 0 && minnndist(V) <= eps) { gV <- marks(connected(V, eps)) xy <- split(coords(V), gV) mxy <- lapply(xy, centro) V <- do.call(superimpose, append(unname(mxy), list(W=Window(X)))) } first <- endpoints.psp(X, "first") second <- endpoints.psp(X, "second") from <- nncross(first, V, what="which") to <- nncross(second, V, what="which") nontrivial <- (from != to) join <- cbind(from, to)[nontrivial, , drop=FALSE] result <- linnet(V, edges=join, sparse=sparse) if(is.marked(X)) marks(result$lines) <- marks(X[nontrivial]) return(result) } centro <- function(X) as.list(apply(X, 2, mean)) as.linnet.psp }) unitname.linnet <- function(x) { unitname(x$window) } "unitname<-.linnet" <- function(x, value) { w <- x$window v <- x$vertices l <- x$lines unitname(w) <- unitname(v) <- unitname(l) <- value x$window <- w x$vertices <- v x$lines <- l return(x) } diameter.linnet <- function(x) { stopifnot(inherits(x, "linnet")) dpath <- x$dpath if(is.null(dpath)) return(NULL) else return(max(0, dpath)) } volume.linnet <- function(x) { sum(lengths.psp(x$lines)) } vertexdegree <- function(x) { verifyclass(x, "linnet") return(rowSums(x$m)) } circumradius.linnet <- function(x, ...) { .Deprecated("boundingradius.linnet") boundingradius.linnet(x, ...) } boundingradius.linnet <- function(x, ...) { stopifnot(inherits(x, "linnet")) cr <- x$boundingradius %orifnull% x$circumradius if(!is.null(cr)) return(cr) dpath <- x$dpath if(is.null(dpath)) return(NULL) if(any(is.infinite(dpath))) return(Inf) if(nrow(dpath) <= 1) return(max(0,dpath)) from <- x$from to <- x$to lines <- x$lines nseg <- lines$n leng <- lengths.psp(lines) if(spatstat.options("Clinearradius")) { fromC <- from - 1L toC <- to - 1L nv <- npoints(vertices(x)) huge <- sum(leng) z <- .C("linearradius", ns = as.integer(nseg), from = as.integer(fromC), to = as.integer(toC), lengths = as.double(leng), nv = as.integer(nv), dpath = as.double(dpath), huge = as.double(huge), result = as.double(numeric(1)), PACKAGE = "spatstat") return(z$result) } sA <- sB <- matrix(Inf, nseg, nseg) for(i in 1:nseg) { # endpoints of segment i A <- from[i] B <- to[i] AB <- leng[i] sA[i,i] <- sB[i,i] <- AB/2 for(j in (1:nseg)[-i]) { # endpoints of segment j C <- from[j] D <- to[j] CD <- leng[j] AC <- dpath[A,C] AD <- dpath[A,D] BC <- dpath[B,C] BD <- dpath[B,D] # max dist from A to any point in segment j sA[i,j] <- if(AD > AC + CD) AC + CD else if(AC > AD + CD) AD + CD else (AC + AD + CD)/2 # max dist from B to any point in segment j sB[i,j] <- if(BD > BC + CD) BC + CD else if(BC > BD + CD) BD + CD else (BC + BD + CD)/2 } } # max dist from each A to any point in another segment mA <- apply(sA, 1, max) # max dist from each B to any point in another segment mB <- apply(sB, 1, max) # min of these min(mA, mB) } #################################################### # affine transformations #################################################### scalardilate.linnet <- function(X, f, ...) { trap.extra.arguments(..., .Context="In scalardilate(X,f)") check.1.real(f, "In scalardilate(X,f)") stopifnot(is.finite(f) && f > 0) Y <- X Y$vertices <- scalardilate(X$vertices, f=f) Y$lines <- scalardilate(X$lines, f=f) Y$window <- scalardilate(X$window, f=f) if(!is.null(X$dpath)) { Y$dpath <- f * X$dpath Y$boundingradius <- f * (X$boundingradius %orifnull% X$circumradius) Y$circumradius <- NULL } if(!is.null(X$toler)) X$toler <- makeLinnetTolerance(f * X$toler) return(Y) } affine.linnet <- function(X, mat=diag(c(1,1)), vec=c(0,0), ...) { verifyclass(X, "linnet") if(length(unique(eigen(mat)$values)) == 1) { # transformation is an isometry scal <- sqrt(abs(det(mat))) Y <- X Y$vertices <- affine(X$vertices, mat=mat, vec=vec, ...) Y$lines <- affine(X$lines, mat=mat, vec=vec, ...) Y$window <- affine(X$window, mat=mat, vec=vec, ...) if(!is.null(X$dpath)) { Y$dpath <- scal * X$dpath Y$boundingradius <- scal * (X$boundingradius %orifnull% X$circumradius) X$circumradius <- NULL } if(!is.null(Y$toler)) Y$toler <- makeLinnetTolerance(scal * Y$toler) } else { # general case vertices <- affine(X$vertices, mat=mat, vec=vec, ...) Y <- linnet(vertices, edges=cbind(X$from, X$to)) } return(Y) } shift.linnet <- function(X, vec=c(0,0), ..., origin=NULL) { verifyclass(X, "linnet") Y <- X Y$window <- W <- shift(X$window, vec=vec, ..., origin=origin) v <- getlastshift(W) Y$vertices <- shift(X$vertices, vec=v, ...) Y$lines <- shift(X$lines, vec=v, ...) # tack on shift vector attr(Y, "lastshift") <- v return(Y) } rotate.linnet <- function(X, angle=pi/2, ..., centre=NULL) { verifyclass(X, "linnet") if(!is.null(centre)) { X <- shift(X, origin=centre) negorigin <- getlastshift(X) } else negorigin <- NULL Y <- X Y$vertices <- rotate(X$vertices, angle=angle, ...) Y$lines <- rotate(X$lines, angle=angle, ...) Y$window <- rotate(X$window, angle=angle, ...) if(!is.null(negorigin)) Y <- shift(Y, -negorigin) return(Y) } rescale.linnet <- function(X, s, unitname) { if(missing(unitname)) unitname <- NULL if(missing(s) || is.null(s)) s <- 1/unitname(X)$multiplier Y <- scalardilate(X, f=1/s) unitname(Y) <- rescale(unitname(X), s, unitname) return(Y) } "[.linnet" <- function(x, i, ..., snip=TRUE) { if(!is.owin(i)) stop("In [.linnet: the index i should be a window", call.=FALSE) w <- i wp <- as.polygonal(w) if(is.mask(w)) { ## protect against pixellation artefacts pixel <- owin(w$xstep * c(-1,1)/2, w$ystep * c(-1,1)/2) wp <- MinkowskiSum(wp, pixel) wp <- intersect.owin(wp, Frame(w)) } ## Find vertices that lie inside window vertinside <- inside.owin(x$vertices, w=wp) from <- x$from to <- x$to if(snip) { ## For efficiency, first restrict network to relevant segments. ## Find segments EITHER OF whose endpoints lie in 'w' okedge <- vertinside[from] | vertinside[to] ## extract relevant subset of network graph x <- thinNetwork(x, retainedges=okedge) ## Now add vertices at crossing points with boundary of 'w' b <- crossing.psp(as.psp(x), edges(wp)) x <- insertVertices(x, unique(b)) boundarypoints <- attr(x, "id") ## update data from <- x$from to <- x$to vertinside <- inside.owin(x$vertices, w=wp) vertinside[boundarypoints] <- TRUE } ## find segments whose endpoints BOTH lie in 'w' edgeinside <- vertinside[from] & vertinside[to] ## extract relevant subset of network xnew <- thinNetwork(x, retainedges=edgeinside) ## adjust window efficiently Window(xnew, check=FALSE) <- w return(xnew) } # # interactive plot for linnet objects # iplot.linnet <- function(x, ..., xname) { if(missing(xname)) xname <- short.deparse(substitute(x)) if(!inherits(x, "linnet")) stop("x should be a linnet object") ## predigest v <- vertices(x) deg <- vertexdegree(x) dv <- textstring(v, txt=paste(deg)) y <- layered(lines=as.psp(x), vertices=v, degree=dv) iplot(y, ..., xname=xname, visible=c(TRUE, FALSE, FALSE)) } pixellate.linnet <- function(x, ...) { pixellate(as.psp(x), ...) } connected.linnet <- function(X, ..., what=c("labels", "components")) { verifyclass(X, "linnet") what <- match.arg(what) nv <- npoints(vertices(X)) ie <- X$from - 1L je <- X$to - 1L ne <- length(ie) zz <- .C("cocoGraph", nv = as.integer(nv), ne = as.integer(ne), ie = as.integer(ie), je = as.integer(je), label = as.integer(integer(nv)), status = as.integer(integer(1L)), PACKAGE = "spatstat") if (zz$status != 0) stop("Internal error: connected.linnet did not converge") lab <- zz$label + 1L lab <- as.integer(factor(lab)) lab <- factor(lab) if(what == "labels") return(lab) nets <- list() subsets <- split(seq_len(nv), lab) for(i in seq_along(subsets)) nets[[i]] <- thinNetwork(X, retainvertices=subsets[[i]]) return(nets) } is.connected.linnet <- function(X, ...) { if(!is.null(dpath <- X$dpath)) return(all(is.finite(dpath))) lab <- connected(X, what="labels") npieces <- length(levels(lab)) return(npieces == 1) } crossing.linnet <- function(X, Y) { X <- as.linnet(X) if(!inherits(Y, c("linnet", "infline", "psp"))) stop("L should be an object of class psp, linnet or infline", call.=FALSE) ## convert infinite lines to segments if(inherits(Y, "linnet")) Y <- as.psp(Y) if(inherits(Y, "infline")) { Y <- clip.infline(Y, Frame(X)) id <- marks(Y) lev <- levels(id) } else { id <- lev <- seq_len(nsegments(Y)) } ## extract segments of network S <- as.psp(X) ## find crossing points SY <- crossing.psp(S, Y, fatal=FALSE, details=TRUE) if(is.null(SY) || npoints(SY) == 0) return(lpp(L=X)) SY <- as.data.frame(SY) Z <- with(as.data.frame(SY), as.lpp(x=x, y=y, seg=iA, tp=tA, L=X, marks=factor(id[as.integer(jB)], levels=lev))) return(Z) } spatstat/R/layered.R0000755000176200001440000002650713115271075014105 0ustar liggesusers# # layered.R # # Simple mechanism for layered plotting # # $Revision: 1.39 $ $Date: 2017/06/05 10:31:58 $ # layered <- function(..., plotargs=NULL, LayerList=NULL) { argh <- list(...) if(length(argh) > 0 && !is.null(LayerList)) stop("LayerList is incompatible with other arguments") out <- if(!is.null(LayerList)) LayerList else argh n <- length(out) if(sum(nzchar(names(out))) != n) names(out) <- paste("Layer", seq_len(n)) if(is.null(plotargs)) { plotargs <- rep.int(list(list()), n) } else { if(!is.list(plotargs)) stop("plotargs should be a list of lists") if(!all(unlist(lapply(plotargs, is.list)))) plotargs <- list(plotargs) np <- length(plotargs) if(np == 1) plotargs <- rep(plotargs, n) else if(np != n) stop("plotargs should have one component for each element of the list") } names(plotargs) <- names(out) attr(out, "plotargs") <- plotargs class(out) <- c("layered", class(out)) return(out) } print.layered <- function(x, ...) { splat("Layered object") if(length(x) == 0) splat("(no entries)") for(i in seq_along(x)) { cat(paste("\n", names(x)[i], ":\n", sep="")) print(x[[i]]) } pl <- layerplotargs(x) hasplot <- (lengths(pl) > 0) if(any(hasplot)) splat("Includes plot arguments for", commasep(names(pl)[hasplot])) invisible(NULL) } plot.layered <- function(x, ..., which=NULL, plotargs=NULL, add=FALSE, show.all=!add, main=NULL, do.plot=TRUE) { if(is.null(main)) main <- short.deparse(substitute(x)) n <- length(x) if(!is.null(plotargs)) { np <- length(plotargs) if(!(is.list(plotargs) && all(unlist(lapply(plotargs, is.list))))) stop("plotargs should be a list of lists") } ## select layers if(!is.null(which)) { x <- x[which] nw <- length(x) if(!is.null(plotargs)) { if(np == n) plotargs <- plotargs[which] else if(np == 1) plotargs <- rep(plotargs, nw) else if(np != nw) stop("plotargs should have one component for each layer to be plotted") } n <- nw } else if(!is.null(plotargs)) { if(np == 1) plotargs <- rep(plotargs, n) else if(np != n) stop("plotargs should have one component for each layer") } ## remove null layers if(any(isnul <- unlist(lapply(x, is.null)))) { x <- x[!isnul] if(!is.null(plotargs)) plotargs <- plotargs[!isnul] n <- length(x) } ## anything to plot? if(n == 0) return(invisible(NULL)) ## Merge plotting arguments xplotargs <- layerplotargs(x) if(is.null(plotargs)) { plotargs <- xplotargs } else if(length(xplotargs) > 0) { for(i in 1:n) plotargs[[i]] <- resolve.defaults(plotargs[[i]], xplotargs[[i]]) } ## Determine bounding box a <- plotEachLayer(x, ..., plotargs=plotargs, add=add, show.all=show.all, do.plot=FALSE) if(!do.plot) return(a) bb <- as.rectangle(as.owin(a)) ## Start plotting if(!add && !is.null(bb)) { ## initialise new plot using bounding box pt <- prepareTitle(main) plot(bb, type="n", main=pt$blank) add <- TRUE } # plot the layers out <- plotEachLayer(x, ..., main=main, plotargs=plotargs, add=add, show.all=show.all, do.plot=TRUE) return(invisible(out)) } plotEachLayer <- function(x, ..., main, plotargs, add, show.all, do.plot=TRUE) { main.given <- !missing(main) ## do.plot=TRUE => plot the layers ## do.plot=FALSE => determine bounding boxes out <- boxes <- list() nama <- names(x) firstlayer <- TRUE for(i in seq_along(x)) { xi <- x[[i]] if(length(xi) == 0) { # null layer - no plotting out[[i]] <- boxes[[i]] <- NULL } else { ## plot layer i on top of previous layers if any. ## By default, ## - show all graphic elements of the first component only; ## - show title 'firstmain' on first component; ## - do not show any component names. add.i <- add || !firstlayer if(main.given) { main.i <- if(firstlayer) main else "" } else { show.all.i <- resolve.1.default(list(show.all=FALSE), list(...), plotargs[[i]]) main.i <- if(show.all.i) nama[i] else "" } dflt <- list(main=main.i, show.all=show.all && firstlayer) pla.i <- plotargs[[i]] defaultplot <- !(".plot" %in% names(pla.i)) ## plot layer i, or just determine bounding box if(defaultplot && inherits(xi, c("ppp", "psp", "owin", "lpp", "linnet", "im", "msr", "layered"))) { ## plot method for 'xi' has argument 'do.plot'. mplf <- if(inherits(xi, c("ppp", "lpp"))) list(multiplot=FALSE) else list() out[[i]] <- outi <- do.call(plot, resolve.defaults(list(x=xi, add=add.i, do.plot=do.plot), list(...), mplf, pla.i, dflt)) boxes[[i]] <- as.rectangle(as.owin(outi)) } else { ## plot method for 'xi' does not have argument 'do.plot' if(do.plot) { if(defaultplot) { plotfun <- "plot" } else { plotfun <- pla.i[[".plot"]] pla.i <- pla.i[names(pla.i) != ".plot"] } out[[i]] <- outi <- do.call(plotfun, resolve.defaults(list(x=xi, add=add.i), list(...), pla.i, dflt)) } ## convert layer i to box boxi <- try(as.rectangle(xi), silent=TRUE) boxes[[i]] <- if(!inherits(boxi, "try-error")) boxi else NULL } firstlayer <- FALSE } } ## one box to bound them all if(!all(unlist(lapply(boxes, is.null)))) attr(out, "bbox") <- do.call(boundingbox, boxes) return(out) } "[.layered" <- function(x, i, j, drop=FALSE, ...) { i.given <- !missing(i) && !is.null(i) j.given <- !missing(j) && !is.null(j) if(!i.given && !j.given) return(x) p <- attr(x, "plotargs") x <- unclass(x) nx <- length(x) if(i.given) { if(is.owin(i)) { #' spatial window subset nonemp <- (lengths(x) != 0) x[nonemp] <- lapply(x[nonemp], "[", i=i, ...) } else { #' vector subset index x <- x[i] p <- p[i] nx <- length(x) } } if(j.given) { nonemp <- (lengths(x) != 0) x[nonemp] <- lapply(x[nonemp], "[", i=j, ...) } if(drop && nx == 1) return(x[[1L]]) y <- layered(LayerList=x, plotargs=p) return(y) } "[[<-.layered" <- function(x, i, value) { x[i] <- if(!is.null(value)) list(value) else NULL return(x) } "[<-.layered" <- function(x, i, value) { p <- layerplotargs(x) ## invoke list method y <- x class(y) <- "list" y[i] <- value # make it a 'layered' object too class(y) <- c("layered", class(y)) # update names and plotargs if(any(blank <- !nzchar(names(y)))) { names(y)[blank] <- paste("Layer", which(blank)) pnew <- rep(list(list()), length(y)) names(pnew) <- names(y) m <- match(names(y), names(x)) mok <- !is.na(m) pnew[mok] <- p[m[mok]] layerplotargs(y) <- pnew } else layerplotargs(y) <- layerplotargs(x)[names(y)] return(y) } layerplotargs <- function(L) { stopifnot(inherits(L, "layered")) attr(L, "plotargs") } "layerplotargs<-" <- function(L, value) { if(!inherits(L, "layered")) L <- layered(L) if(!is.list(value)) stop("Replacement value should be a list, or a list-of-lists") n <- length(L) if(!all(unlist(lapply(value, is.list)))) value <- unname(rep(list(value), n)) if(length(value) != n) { if(length(value) == 1) value <- unname(rep(value, n)) else stop("Replacement value is wrong length") } if(is.null(names(value))) names(value) <- names(L) else if(!identical(names(value), names(L))) stop("Mismatch in names of list elements") attr(L, "plotargs") <- value return(L) } applytolayers <- function(L, FUN, ...) { # Apply FUN to each **non-null** layer, # preserving the plot arguments pla <- layerplotargs(L) if(length(L) > 0) { ok <- !unlist(lapply(L, is.null)) L[ok] <- lapply(L[ok], FUN, ...) } Z <- layered(LayerList=L, plotargs=pla) return(Z) } shift.layered <- function(X, vec=c(0,0), ...) { if(length(list(...)) > 0) { if(!missing(vec)) warning("Argument vec ignored; overridden by other arguments") ## ensure the same shift is applied to all layers s <- shift(X[[1L]], ...) vec <- getlastshift(s) } Y <- applytolayers(X, shift, vec=vec) attr(Y, "lastshift") <- vec return(Y) } affine.layered <- function(X, ...) { applytolayers(X, affine, ...) } rotate.layered <- function(X, ..., centre=NULL) { if(!is.null(centre)) { X <- shift(X, origin=centre) negorigin <- getlastshift(X) } else negorigin <- NULL Y <- applytolayers(X, rotate, ...) if(!is.null(negorigin)) Y <- shift(Y, -negorigin) return(Y) } reflect.layered <- function(X) { applytolayers(X, reflect) } flipxy.layered <- function(X) { applytolayers(X, flipxy) } scalardilate.layered <- function(X, ...) { applytolayers(X, scalardilate, ...) } rescale.layered <- function(X, s, unitname) { if(missing(s)) s <- NULL if(missing(unitname)) unitname <- NULL applytolayers(X, rescale, s=s, unitname=unitname) } as.owin.layered <- local({ as.owin.layered <- function(W, ..., fatal=TRUE) { if(length(W) == 0) { if(fatal) stop("Layered object is empty: no window data") return(NULL) } ## remove null layers isnul <- unlist(lapply(W, is.null)) W <- W[!isnul] if(length(W) == 0) { if(fatal) stop("Layered object has no window data") return(NULL) } Wlist <- lapply(unname(W), as.owin, ..., fatal=fatal) Wlist <- lapply(Wlist, rescue.rectangle) Wlist <- lapply(Wlist, puffbox) Z <- Wlist[[1L]] if(length(Wlist) > 1) { same <- unlist(lapply(Wlist[-1L], identical, y=Z)) if(!all(same)) Z <- do.call(union.owin, Wlist) } return(Z) } puffbox <- function(W) { ## union.owin will delete boxes that have width zero or height zero ## so 'puff' them out slightly ss <- sidelengths(Frame(W)) if(ss[1L] == 0) W$xrange <- W$xrange + 1e-6 * c(-1,1) * ss[2L] if(ss[2L] == 0) W$yrange <- W$yrange + 1e-6 * c(-1,1) * ss[1L] return(W) } as.owin.layered }) domain.layered <- Window.layered <- function(X, ...) { as.owin(X) } as.layered <- function(X) { UseMethod("as.layered") } as.layered.default <- function(X) { if(is.list(X) && all(sapply(X, is.sob))) layered(LayerList=X) else layered(X) } as.layered.ppp <- function(X) { if(!is.marked(X)) return(layered(X)) if(is.multitype(X)) return(layered(LayerList=split(X))) mX <- marks(X) if(!is.null(d <- dim(mX)) && d[2L] > 1) { mx <- as.data.frame(marks(X)) Y <- lapply(mx, setmarks, x=X) return(layered(LayerList=Y)) } return(layered(X)) } spatstat/R/vcov.ppm.R0000755000176200001440000017120213115271120014210 0ustar liggesusers## ## Asymptotic covariance & correlation matrices ## and Fisher information matrix ## for ppm objects ## ## $Revision: 1.129 $ $Date: 2017/06/05 10:31:58 $ ## vcov.ppm <- local({ vcov.ppm <- function(object, ..., what="vcov", verbose=TRUE, fine=FALSE, gam.action=c("warn", "fatal", "silent"), matrix.action=c("warn", "fatal", "silent"), logi.action=c("warn", "fatal", "silent"), hessian=FALSE) { verifyclass(object, "ppm") argh <- list(...) gam.action <- match.arg(gam.action) matrix.action <- match.arg(matrix.action) logi.action <- match.arg(logi.action) if(missing(fine) && ("A1dummy" %in% names(argh))) { message("Argument 'A1dummy' has been replaced by 'fine'") fine <- as.logical(argh$A1dummy) } else fine <- as.logical(fine) stopifnot(length(what) == 1 && is.character(what)) what.options <- c("vcov", "corr", "fisher", "Fisher", "internals", "all") what.map <- c("vcov", "corr", "fisher", "fisher", "internals", "all") if(is.na(m <- pmatch(what, what.options))) stop(paste("Unrecognised option: what=", sQuote(what))) what <- what.map[m] ## No vcov for Variational Bayes if(!is.null(object$internal$VB)) stop("Variance calculations currently not possible for variational Bayes fit.") ## no parameters, no variance if(length(coef(object)) == 0) { result <- switch(what, vcov=, corr=, fisher= { matrix(, 0, 0) }, internals=, all={ list() }) return(result) } ## nonstandard calculations (hack) generic.triggers <- c("A1", "new.coef", "matwt", "saveterms", "sparseOK") nonstandard <- any(generic.triggers %in% names(argh)) || fine # saveterms <- identical(resolve.1.default("saveterms", argh), TRUE) ## Fisher information *may* be contained in object fisher <- object$fisher varcov <- object$varcov ## Do we need to go into the guts? needguts <- nonstandard || (is.null(fisher) && what=="fisher") || (is.null(varcov) && what %in% c("vcov", "corr")) || (what %in% c("internals", "all")) ## In general it is not true that varcov = solve(fisher) ## because we might use different estimators, ## or the parameters might be a subset of the canonical parameter if(needguts) { ## warn if fitted model was obtained using GAM if(identical(object$fitter, "gam")) { switch(gam.action, fatal={ stop(paste("model was fitted by gam();", "execution halted because fatal=TRUE"), call.=FALSE) }, warn={ warning(paste("model was fitted by gam();", "asymptotic variance calculation ignores this"), call.=FALSE) }, silent={}) } ## ++++ perform main calculation ++++ if((is.poisson(object) || (hessian && what!="internals")) && object$method != "logi") { ## Poisson model, or Hessian of Gibbs model without internals results <- vcalcPois(object, ..., what=what, matrix.action=matrix.action, verbose=verbose, fisher=fisher) } else { ## Gibbs model results <- vcalcGibbs(object, ..., what=what, fine=fine, matrix.action=matrix.action, hessian = hessian) } varcov <- results$varcov fisher <- results$fisher internals <- results$internals } if(what %in% c("vcov", "corr") && is.null(varcov)) { ## Need variance-covariance matrix. if(!is.null(fisher) && is.poisson(object)) ## Derive from Fisher information varcov <- checksolve(fisher, matrix.action, "Fisher information matrix", "variance") } out <- switch(what, fisher = fisher, vcov = varcov, corr = { if(is.null(varcov)) return(NULL) sd <- sqrt(diag(varcov)) varcov / outer(sd, sd, "*") }, internals = internals, all = results ) return(out) } ## ................ variance calculation for Poisson models ............. vcalcPois <- function(object, ..., what = c("vcov", "corr", "fisher", "internals", "all"), matrix.action=c("warn", "fatal", "silent"), method=c("C", "interpreted"), verbose=TRUE, fisher=NULL, matwt=NULL, new.coef=NULL, dropcoef=FALSE, saveterms=FALSE) { ## variance-covariance matrix of Poisson model, ## or Hessian of Gibbs model what <- match.arg(what) method <- match.arg(method) matrix.action <- match.arg(matrix.action) if(reweighting <- !is.null(matwt)) stopifnot(is.numeric(matwt) && is.vector(matwt)) internals <- NULL nonstandard <- reweighting || !is.null(new.coef) || saveterms ## compute Fisher information if not known if(is.null(fisher) || nonstandard) { gf <- getglmfit(object) ## we need a glm or gam if(is.null(gf)) { if(verbose) warning("Refitting the model using GLM/GAM") object <- update(object, forcefit=TRUE) gf <- getglmfit(object) if(is.null(gf)) stop("Internal error - refitting did not yield a glm object") } ## compute fitted intensity and sufficient statistic ltype <- if(is.poisson(object)) "trend" else "lambda" lambda <- fitted(object, type=ltype, new.coef=new.coef, dropcoef=dropcoef, check=FALSE) mom <- model.matrix(object) nmom <- nrow(mom) Q <- quad.ppm(object) wt <- w.quad(Q) ok <- getglmsubset(object) Z <- is.data(Q) ## save them if(what == "internals") { internals <- if(!saveterms) list(suff=mom) else list(suff=mom, mom=mom, lambda=lambda, Z=Z, ok=ok) } ## Now restrict all terms to the domain of the pseudolikelihood lambda <- lambda[ok] mom <- mom[ok, , drop=FALSE] wt <- wt[ok] Z <- Z[ok] ## apply weights to rows of model matrix - temporary hack if(reweighting) { nwt <- length(matwt) if(nwt == nmom) { ## matwt matches original quadrature scheme - trim it matwt <- matwt[ok] } else if(nwt != sum(ok)) stop("Hack argument matwt has incompatible length") mom.orig <- mom mom <- matwt * mom } ## compute Fisher information switch(method, C = { fisher <- sumouter(mom, lambda * wt) if(reweighting) { gradient <- sumouter(mom.orig, matwt * lambda * wt) } }, interpreted = { if(!reweighting) { fisher <- 0 for(i in 1:nrow(mom)) { ro <- mom[i, ] v <- outer(ro, ro, "*") * lambda[i] * wt[i] if(!anyNA(v)) fisher <- fisher + v } momnames <- dimnames(mom)[[2]] dimnames(fisher) <- list(momnames, momnames) } else { fisher <- gradient <- 0 for(i in 1:nrow(mom)) { ro <- mom[i, ] ro0 <- mom.orig[i,] ldu <- lambda[i] * wt[i] v <- outer(ro, ro, "*") * ldu v0 <- outer(ro0, ro0, "*") * matwt[i] * ldu if(!anyNA(v)) fisher <- fisher + v if(!anyNA(v0)) gradient <- gradient + v0 } momnames <- dimnames(mom)[[2]] dn <- list(momnames, momnames) dimnames(fisher) <- dimnames(gradient) <- dn } }) } if(what %in% c("all", "internals")) { ## Internals needed if(is.null(internals)) internals <- list(suff = model.matrix(object)) internals$fisher <- fisher if(reweighting) internals$gradient <- gradient ilist <- list(internals=internals) } if(what %in% c("all", "vcov", "corr")) { ## Variance-covariance matrix needed if(!reweighting) { ## Derive variance-covariance from Fisher info varcov <- checksolve(fisher, matrix.action, "Fisher information matrix", "variance") vcovlist <- list(fisher=fisher, varcov=varcov) } else { invgrad <- checksolve(gradient, matrix.action, "gradient matrix", "variance") varcov <- if(is.null(invgrad)) NULL else invgrad %*% fisher %*% invgrad vcovlist <- list(fisher=fisher, varcov=varcov, invgrad=invgrad) } } result <- switch(what, fisher = list(fisher=fisher), vcov = vcovlist, corr = vcovlist, internals = ilist, all = append(ilist, vcovlist)) return(result) } ## ...................... vcov calculation for Gibbs models .................... vcalcGibbs <- function(fit, ..., fine=FALSE, what = c("vcov", "corr", "fisher", "internals", "all"), generic=FALSE) { what <- match.arg(what) if(missing(generic)) { ## Change default to TRUE in certain cases ## For logistic fits, use generic method by default if(fit$method == "logi") generic <- TRUE ## For 'difficult' interactions, use generic method by default fasterbygeneric <- c("Areainter") if(as.interact(fit)$creator %in% fasterbygeneric) generic <- TRUE } ## decide whether to use the generic algorithm generic.triggers <- c("A1", "hessian", "new.coef", "matwt", "saveterms", "sparseOK") use.generic <- generic || fine || !is.stationary(fit) || (fit$method == "logi" && ("marks" %in% variablesinformula(fit$trend))) || (fit$method != "logi" && has.offset(fit)) || (fit$method == "logi" && has.offset.term(fit)) || !(fit$correction == "border" && fit$rbord == reach(fit)) || any(generic.triggers %in% names(list(...))) || !identical(options("contrasts")[[1]], c(unordered="contr.treatment", ordered="contr.poly")) ## compute spill <- (what %in% c("all", "internals", "fisher")) spill.vc <- (what == "all") out <- if(use.generic) vcalcGibbsGeneral(fit, ..., fine=fine, spill=spill, spill.vc=spill.vc) else vcalcGibbsSpecial(fit, ..., spill=spill, spill.vc=spill.vc) switch(what, vcov = , corr = { ## out is the variance-covariance matrix; return it return(list(varcov=out)) }, fisher = { ## out is a list of internal data: extract the Fisher info Fmat <- with(out, if(fit$method != "logi") Sigma else Sigma1log+Sigma2log) return(list(fisher=Fmat)) }, internals = { ## out is a list of internal data: return it ## (ensure model matrix is included) if(is.null(out$mom)) out$mom <- model.matrix(fit) return(list(internals=out)) }, all = { ## out is a list(internals, vc): return it ## (ensure model matrix is included) if(is.null(out$internals$mom)) out$internals$mom <- model.matrix(fit) ## ensure Fisher info is included if(is.null(out$internals$fisher)) { Fmat <- with(out$internals, if(fit$method != "logi") Sigma else Sigma1log+Sigma2log) out$internals$fisher <- Fmat } return(out) }, ) return(NULL) } ## ...................... general algorithm ........................... vcalcGibbsGeneral <- function(model, ..., spill = FALSE, spill.vc = FALSE, na.action=c("warn", "fatal", "silent"), matrix.action=c("warn", "fatal", "silent"), logi.action=c("warn", "fatal", "silent"), algorithm=c("vectorclip", "vector", "basic"), A1 = NULL, fine = FALSE, hessian = FALSE, matwt = NULL, new.coef = NULL, dropcoef=FALSE, saveterms = FALSE, parallel = TRUE, sparseOK = FALSE ) { na.action <- match.arg(na.action) matrix.action <- match.arg(matrix.action) logi.action <- match.arg(logi.action) algorithm <- match.arg(algorithm) if(reweighting <- !is.null(matwt)) stopifnot(is.numeric(matwt) && is.vector(matwt)) spill <- spill || spill.vc saveterms <- spill && saveterms logi <- model$method=="logi" asked.parallel <- !missing(parallel) old.coef <- coef(model) use.coef <- adaptcoef(new.coef, old.coef, drop=dropcoef) p <- length(old.coef) if(p == 0) { ## this probably can't happen if(!spill) return(matrix(, 0, 0)) else return(list()) } pnames <- names(old.coef) dnames <- list(pnames, pnames) # (may be revised later) internals <- list() ## sumobj <- summary(model, quick="entries") correction <- model$correction rbord <- model$rbord R <- reach(model, epsilon=1e-2) Q <- quad.ppm(model) D <- dummy.ppm(model) rho <- model$internal$logistic$rho #### If dummy intensity rho is unknown we estimate it if(is.null(rho)) rho <- npoints(D)/(area(D)*markspace.integral(D)) X <- data.ppm(model) Z <- is.data(Q) W <- as.owin(model) areaW <- if(correction == "border") eroded.areas(W, rbord) else area(W) ## ## determine which quadrature points contributed to the ## sum/integral in the pseudolikelihood ## (e.g. some points may be excluded by the border correction) okall <- getglmsubset(model) ## conditional intensity lambda(X[i] | X) = lambda(X[i] | X[-i]) ## data and dummy: lamall <- fitted(model, check = FALSE, new.coef = new.coef, dropcoef=dropcoef) if(anyNA(lamall)) { whinge <- "Some values of the fitted conditional intensity are NA" switch(na.action, fatal = { stop(whinge, call.=FALSE) }, warn = { warning(whinge, call.=FALSE) okall <- okall & !is.na(lamall) }, silent = { okall <- okall & !is.na(lamall) }) } ## data only: lam <- lamall[Z] ok <- okall[Z] nX <- npoints(X) ## sufficient statistic h(X[i] | X) = h(X[i] | X[-i]) ## data and dummy: mall <- model.matrix(model) ## check dimension of canonical statistic if(ncol(mall) != length(pnames)) { if(!dropcoef) stop(paste("Internal error: dimension of sufficient statistic = ", ncol(mall), "does not match length of coefficient vector =", length(pnames)), call.=FALSE) p <- length(pnames) pnames <- colnames(mall) dnames <- list(pnames, pnames) } ## save if(saveterms) internals <- append(internals, list(mom=mall, lambda=lamall, Z=Z, ok=okall, matwt=matwt)) if(reweighting) { ## each column of the model matrix is multiplied by 'matwt' check.nvector(matwt, nrow(mall), things="quadrature points") mall.orig <- mall mall <- mall * matwt } ## subsets of model matrix mokall <- mall[okall, , drop=FALSE] ## data only: m <- mall[Z, , drop=FALSE] mok <- m[ok, , drop=FALSE] ## if(reweighting) { ## save unweighted versions mokall.orig <- mall.orig[okall, , drop=FALSE] m.orig <- mall.orig[Z, , drop=FALSE] mok.orig <- m.orig[ok, , drop=FALSE] ## matwtX <- matwt[Z] } ## ^^^^^^^^^^^^^^^^ First order (sensitivity) matrices A1, S ## logistic if(logi){ ## Sensitivity matrix S for logistic case Slog <- sumouter(mokall, w = lamall[okall]*rho/(lamall[okall]+rho)^2) dimnames(Slog) <- dnames ## A1 matrix for logistic case A1log <- sumouter(mokall, w = lamall[okall]*rho*rho/(lamall[okall]+rho)^3) dimnames(A1log) <- dnames } ## Sensitivity matrix for MPLE case (= A1) if(is.null(A1) || reweighting) { if(fine){ A1 <- sumouter(mokall, w = (lamall * w.quad(Q))[okall]) if(reweighting) gradient <- sumouter(mokall.orig, w=(matwt * lamall * w.quad(Q))[okall]) } else{ A1 <- sumouter(mok) if(reweighting) gradient <- sumouter(mok.orig, w=matwtX) } } else { stopifnot(is.matrix(A1)) if(!all(dim(A1) == p)) stop(paste("Matrix A1 has wrong dimensions:", prange(dim(A1)), "!=", prange(c(p, p)))) } dimnames(A1) <- dnames ## ^^^^^^^^^^ Second order interaction effects A2, A3 if(hessian) { ## interaction terms suppressed A2 <- A3 <- matrix(0, p, p, dimnames=dnames) if(logi) A2log <- A3log <- matrix(0, p, p, dimnames=dnames) } else { ## ^^^^^^^^^^^^^^^^^^^^ `parallel' evaluation need.loop <- TRUE if(parallel) { ## compute second order difference ## ddS[i,j,] = h(X[i] | X) - h(X[i] | X[-j]) ddS <- deltasuffstat(model, restrict="pairs", force=FALSE, sparseOK=sparseOK) sparse <- inherits(ddS, "sparse3Darray") if(is.null(ddS)) { if(asked.parallel) warning("parallel option not available - reverting to loop") } else { need.loop <- FALSE ## rearrange so that ## ddS[ ,i,j] = h(X[i] | X) - h(X[i] | X[-j]) ddS <- aperm(ddS, c(3,2,1)) ## now compute sum_{i,j} for i != j ## outer(ddS[,i,j], ddS[,j,i]) ddSok <- ddS[ , ok, ok, drop=FALSE] A3 <- sumsymouter(ddSok) ## compute pairweight and other arrays if(sparse) { ## Entries are only required for pairs i,j which interact. ## mom.array[ ,i,j] = h(X[i] | X) mom.array <- mapSparseEntries(ddS, margin=2, values=m, conform=TRUE, across=1) ## momdel[ ,i,j] = h(X[i] | X[-j]) momdel <- mom.array - ddS ## pairweight[i,j] = lambda(X[i] | X[-j] )/lambda( X[i] | X ) - 1 pairweight <- expm1(tensor1x1(-use.coef, ddS)) } else { ## mom.array[ ,i,j] = h(X[i] | X) mom.array <- array(t(m), dim=c(p, nX, nX)) ## momdel[ ,i,j] = h(X[i] | X[-j]) momdel <- mom.array - ddS ## lamdel[i,j] = lambda(X[i] | X[-j]) lamdel <- matrix(lam, nX, nX) * exp(tensor::tensor(-use.coef, ddS, 1, 1)) ## pairweight[i,j] = lamdel[i,j]/lambda[i] - 1 pairweight <- lamdel / lam - 1 } ## now compute sum_{i,j} for i != j ## pairweight[i,j] * outer(momdel[,i,j], momdel[,j,i]) ## for data points that contributed to the pseudolikelihood momdelok <- momdel[ , ok, ok, drop=FALSE] pwok <- pairweight[ok, ok] if(anyNA(momdelok) || anyNA(pwok)) stop("Unable to compute variance: NA values present", call.=FALSE) A2 <- sumsymouter(momdelok, w=pwok) dimnames(A2) <- dimnames(A3) <- dnames if(logi){ if(!sparse) { ## lam.array[ ,i,j] = lambda(X[i] | X) lam.array <- array(lam, c(nX,nX,p)) lam.array <- aperm(lam.array, c(3,1,2)) ## lamdel.array[,i,j] = lambda(X[i] | X[-j]) lamdel.array <- array(lamdel, c(nX,nX,p)) lamdel.array <- aperm(lamdel.array, c(3,1,2)) momdellogi <- rho/(lamdel.array+rho)*momdel ddSlogi <- rho/(lam.array+rho)*mom.array - momdellogi } else { ## lam.array[ ,i,j] = lambda(X[i] | X) lam.array <- mapSparseEntries(ddS, margin=2, lam, conform=TRUE, across=1) ## lamdel.array[,i,j] = lambda(X[i] | X[-j]) pairweight.array <- aperm(as.sparse3Darray(pairweight), c(3,1,2)) lamdel.array <- pairweight.array * lam.array + lam.array lamdel.logi <- applySparseEntries(lamdel.array, function(y,rho) { rho/(rho+y) }, rho=rho) lam.logi <- applySparseEntries(lam.array, function(y,rho) { rho/(rho+y) }, rho=rho) momdellogi <- momdel * lamdel.logi ddSlogi <- mom.array * lam.logi - momdellogi } momdellogiok <- momdellogi[ , ok, ok, drop=FALSE] A2log <- sumsymouter(momdellogiok, w=pwok) ddSlogiok <- ddSlogi[ , ok, ok, drop=FALSE] A3log <- sumsymouter(ddSlogiok) dimnames(A2log) <- dimnames(A3log) <- dnames } } } ## ^^^^^^^^^^^^^^^^^^^^ loop evaluation if(need.loop) { A2 <- A3 <- matrix(0, p, p, dimnames=dnames) if(logi) A2log <- A3log <- matrix(0, p, p, dimnames=dnames) if(saveterms) { ## *initialise* matrices ## lamdel[i,j] = lambda(X[i] | X[-j]) = lambda(X[i] | X[-c(i,j)]) lamdel <- matrix(lam, nX, nX) ## momdel[ ,i,j] = h(X[i] | X[-j]) = h(X[i] | X[-c(i,j)]) momdel <- array(t(m), dim=c(p, nX, nX)) } ## identify close pairs if(is.finite(R)) { cl <- closepairs(X, R, what="indices") I <- cl$i J <- cl$j if(algorithm == "vectorclip") { cl2 <- closepairs(X, 2*R, what="indices") I2 <- cl2$i J2 <- cl2$j } } else { ## either infinite reach, or something wrong IJ <- expand.grid(I=1:nX, J=1:nX) IJ <- subset(IJ, I != J) I2 <- I <- IJ$I J2 <- J <- IJ$J } ## filter: I and J must both belong to the nominated subset okIJ <- ok[I] & ok[J] I <- I[okIJ] J <- J[okIJ] ## if(length(I) > 0 && length(J) > 0) { ## .............. loop over pairs ........................ ## The following ensures that 'empty' and 'X' have compatible marks empty <- X[integer(0)] ## make an empty 'equalpairs' matrix nonE <- matrix(, nrow=0, ncol=2) ## Run through pairs switch(algorithm, basic={ for(i in unique(I)) { Xi <- X[i] Ji <- unique(J[I==i]) if((nJi <- length(Ji)) > 0) { for(k in 1:nJi) { j <- Ji[k] X.ij <- X[-c(i,j)] ## compute conditional intensity ## lambda(X[j] | X[-i]) = lambda(X[j] | X[-c(i,j)] plamj.i <- predict(model, type="cif", locations=X[j], X=X.ij, check = FALSE, new.coef = new.coef, sumobj = sumobj, E=nonE) ## corresponding values of sufficient statistic ## h(X[j] | X[-i]) = h(X[j] | X[-c(i,j)] pmj.i <- partialModelMatrix(X.ij, X[j], model)[nX-1, ] ## conditional intensity and sufficient statistic ## in reverse order ## lambda(X[i] | X[-j]) = lambda(X[i] | X[-c(i,j)] plami.j <- predict(model, type="cif", locations=X[i], X=X.ij, check = FALSE, new.coef = new.coef, sumobj = sumobj, E=nonE) pmi.j <- partialModelMatrix(X.ij, Xi, model)[nX-1, ] ## if(reweighting) { pmj.i <- pmj.i * matwtX[j] pmi.j <- pmi.j * matwtX[i] } if(saveterms) { lamdel[i,j] <- plami.j momdel[ , i, j] <- pmi.j lamdel[j,i] <- plamj.i momdel[ , j, i] <- pmj.i } ## increment A2, A3 wt <- plami.j / lam[i] - 1 A2 <- A2 + wt * outer(pmi.j, pmj.i) if(logi) A2log <- A2log + wt * rho/(plami.j+rho) * rho/(plamj.i+rho) * outer(pmi.j, pmj.i) ## delta sufficient statistic ## delta_i h(X[j] | X[-c(i,j)]) ## = h(X[j] | X[-j]) - h(X[j] | X[-c(i,j)]) ## = h(X[j] | X) - h(X[j] | X[-i]) ## delta_j h(X[i] | X[-c(i,j)]) ## = h(X[i] | X[-i]) - h(X[i] | X[-c(i,j)]) ## = h(X[i] | X) - h(X[i] | X[-j]) deltaiSj <- m[j, ] - pmj.i deltajSi <- m[i, ] - pmi.j A3 <- A3 + outer(deltaiSj, deltajSi) if(logi){ deltaiSjlog <- rho*(m[j, ]/ (lam[j]+rho) - pmj.i/(plamj.i+rho)) deltajSilog <- rho*(m[i, ]/ (lam[i]+rho) - pmi.j/(plami.j+rho)) A3log <- A3log + outer(deltaiSjlog, deltajSilog) } } } } }, vector={ ## --------- faster algorithm using vector functions -------- for(i in unique(I)) { Ji <- unique(J[I==i]) nJi <- length(Ji) if(nJi > 0) { Xi <- X[i] ## neighbours of X[i] XJi <- X[Ji] ## all points other than X[i] X.i <- X[-i] ## index of XJi in X.i J.i <- Ji - (Ji > i) ## equalpairs matrix E.i <- cbind(J.i, seq_len(nJi)) ## compute conditional intensity ## lambda(X[j] | X[-i]) = lambda(X[j] | X[-c(i,j)] ## for all j plamj <- predict(model, type="cif", locations=XJi, X=X.i, check = FALSE, new.coef = new.coef, sumobj=sumobj, E=E.i) ## corresponding values of sufficient statistic ## h(X[j] | X[-i]) = h(X[j] | X[-c(i,j)] ## for all j pmj <- partialModelMatrix(X.i, empty, model)[J.i, , drop=FALSE] ## ## conditional intensity & sufficient statistic ## in reverse order ## lambda(X[i] | X[-j]) = lambda(X[i] | X[-c(i,j)] ## for all j plami <- numeric(nJi) pmi <- matrix(, nJi, p) for(k in 1:nJi) { j <- Ji[k] X.ij <- X[-c(i,j)] plami[k] <- predict(model, type="cif", locations=Xi, X=X.ij, check = FALSE, new.coef = new.coef, sumobj = sumobj, E=nonE) pmi[k, ] <- partialModelMatrix(X.ij, Xi, model)[nX-1, ] } ## if(reweighting) { pmj <- pmj * matwtX[Ji] pmi <- pmi * matwtX[i] } if(saveterms) { lamdel[Ji, i] <- plamj momdel[ , Ji, i] <- t(pmj) lamdel[i,Ji] <- plami momdel[ , i, Ji] <- t(pmi) } ## increment A2, A3 wt <- plami / lam[i] - 1 for(k in 1:nJi) { j <- Ji[k] A2 <- A2 + wt[k] * outer(pmi[k,], pmj[k,]) if(logi) A2log <- A2log + wt[k] * rho/(plami[k]+rho) * rho/(plamj[k]+rho) * outer(pmi[k,], pmj[k,]) ## delta sufficient statistic ## delta_i h(X[j] | X[-c(i,j)]) ## = h(X[j] | X[-j]) - h(X[j] | X[-c(i,j)]) ## = h(X[j] | X) - h(X[j] | X[-i]) ## delta_j h(X[i] | X[-c(i,j)]) ## = h(X[i] | X[-i]) - h(X[i] | X[-c(i,j)]) ## = h(X[i] | X) - h(X[i] | X[-j]) deltaiSj <- m[j, ] - pmj[k,] deltajSi <- m[i, ] - pmi[k,] A3 <- A3 + outer(deltaiSj, deltajSi) if(logi){ deltaiSjlog <- rho*(m[j, ]/(lam[j]+rho) - pmj[k,]/(plamj[k]+rho)) deltajSilog <- rho*(m[i, ]/(lam[i]+rho) - pmi[k,]/(plami[k]+rho)) A3log <- A3log + outer(deltaiSjlog, deltajSilog) } } } } }, vectorclip={ ## --------- faster version of 'vector' algorithm ## -------- by removing non-interacting points of X for(i in unique(I)) { ## all points within 2R J2i <- unique(J2[I2==i]) ## all points within R Ji <- unique(J[I==i]) nJi <- length(Ji) if(nJi > 0) { Xi <- X[i] ## neighbours of X[i] XJi <- X[Ji] ## replace X[-i] by X[-i] \cap b(0, 2R) X.i <- X[J2i] nX.i <- length(J2i) ## index of XJi in X.i J.i <- match(Ji, J2i) if(anyNA(J.i)) stop("Internal error: Ji not a subset of J2i") ## equalpairs matrix E.i <- cbind(J.i, seq_len(nJi)) ## compute conditional intensity ## lambda(X[j] | X[-i]) = lambda(X[j] | X[-c(i,j)] ## for all j plamj <- predict(model, type="cif", locations=XJi, X=X.i, check = FALSE, new.coef = new.coef, sumobj = sumobj, E=E.i) ## corresponding values of sufficient statistic ## h(X[j] | X[-i]) = h(X[j] | X[-c(i,j)] ## for all j pmj <- partialModelMatrix(X.i, empty, model)[J.i, , drop=FALSE] ## ## conditional intensity & sufficient statistic ## in reverse order ## lambda(X[i] | X[-j]) = lambda(X[i] | X[-c(i,j)] ## for all j plami <- numeric(nJi) pmi <- matrix(, nJi, p) for(k in 1:nJi) { j <- Ji[k] ## X.ij <- X[-c(i,j)] X.ij <- X.i[-J.i[k]] plami[k] <- predict(model, type="cif", locations=Xi, X=X.ij, check = FALSE, new.coef = new.coef, sumobj = sumobj, E=nonE) pmi[k, ] <- partialModelMatrix(X.ij, Xi, model)[nX.i, ] } ## if(reweighting) { pmj <- pmj * matwtX[Ji] pmi <- pmi * matwtX[i] } if(saveterms) { lamdel[Ji, i] <- plamj momdel[ , Ji, i] <- t(pmj) lamdel[i,Ji] <- plami momdel[ , i, Ji] <- t(pmi) } ## increment A2, A3 wt <- plami / lam[i] - 1 for(k in 1:nJi) { j <- Ji[k] A2 <- A2 + wt[k] * outer(pmi[k,], pmj[k,]) if(logi) A2log <- A2log + wt[k] * rho/(plami[k]+rho) * rho/(plamj[k]+rho) * outer(pmi[k,], pmj[k,]) ## delta sufficient statistic ## delta_i h(X[j] | X[-c(i,j)]) ## = h(X[j] | X[-j]) - h(X[j] | X[-c(i,j)]) ## = h(X[j] | X) - h(X[j] | X[-i]) ## delta_j h(X[i] | X[-c(i,j)]) ## = h(X[i] | X[-i]) - h(X[i] | X[-c(i,j)]) ## = h(X[i] | X) - h(X[i] | X[-j]) deltaiSj <- m[j, ] - pmj[k,] deltajSi <- m[i, ] - pmi[k,] A3 <- A3 + outer(deltaiSj, deltajSi) if(logi){ deltaiSjlog <- rho*(m[j, ]/(lam[j]+rho) - pmj[k,]/(plamj[k]+rho)) deltajSilog <- rho*(m[i, ]/(lam[i]+rho) - pmi[k,]/(plami[k]+rho)) A3log <- A3log + outer(deltaiSjlog, deltajSilog) } } } } }) } } ## ......... end of loop computation ............... } #### Matrix Sigma Sigma <- A1+A2+A3 if(spill) { ## save internal data (with matrices unnormalised) internals <- c(internals, list(A1=A1, A2=A2, A3=A3, Sigma=Sigma, areaW=areaW), if(logi) list(A1log=A1log, A2log=A2log, A3log=A3log, Slog=Slog) else NULL, if(reweighting) list(gradient=gradient) else NULL, list(hessian = if(reweighting) gradient else if(logi) Slog else A1, fisher = Sigma), if(saveterms) list(lamdel=lamdel, momdel=momdel) else NULL) ## return internal data if no further calculation needed if(!spill.vc && !logi) return(internals) } ## ........... calculate variance/covariance matrix for MPL ......... if(!reweighting) { ## Normalise A1 <- A1/areaW Sigma <- Sigma/areaW ## Enforce exact symmetry A1 <- (A1 + t(A1))/2 Sigma <- (Sigma + t(Sigma))/2 ## calculate inverse negative Hessian U <- checksolve(A1, matrix.action, , "variance") } else { ## Normalise gradient <- gradient/areaW Sigma <- Sigma/areaW ## Enforce exact symmetry gradient <- (gradient + t(gradient))/2 Sigma <- (Sigma + t(Sigma))/2 ## calculate inverse negative Hessian U <- checksolve(gradient, matrix.action, , "variance") } ## compute variance-covariance vc.mpl <- if(is.null(U)) matrix(NA, p, p) else U %*% Sigma %*% U / areaW dimnames(vc.mpl) <- dnames ## return variance-covariance matrix, if model was fitted by MPL if(!logi) { if(spill.vc) return(list(varcov=vc.mpl, internals=internals)) return(vc.mpl) } ###### Everything below is only computed for logistic fits ####### ## Matrix Sigma1log (A1log+A2log+A3log): Sigma1log <- A1log+A2log+A3log ## Resolving the dummy process type how <- model$internal$logistic$how if(how %in% c("given", "grid", "transgrid")){ whinge <- paste("vcov is not implemented for dummy type", sQuote(how)) if(logi.action=="fatal") stop(whinge) how <- if(how=="given") "poisson" else "stratrand" if(logi.action=="warn") warning(paste(whinge,"- using", sQuote(how), "formula"), call.=FALSE) } ## Matrix Sigma2log (depends on dummy process type) switch(how, poisson={ Sigma2log <- sumouter(mokall, w = lamall[okall]*lamall[okall]*rho/(lamall[okall]+rho)^3) }, binomial={ Sigma2log <- sumouter(mokall, w = lamall[okall]*lamall[okall]*rho/(lamall[okall]+rho)^3) A1vec <- t(mokall) %*% (rho*lamall[okall]/(lamall[okall]+rho)^2) Sigma2log <- Sigma2log - A1vec%*%t(A1vec)/rho*1/sum(1/(lamall[okall]+rho)) }, stratrand={ ## Dirty way of refitting model with new dummy pattern (should probably be done using call, eval, envir, etc.): ## Changed by ER 2013/06/14 to use the new quadscheme.logi ## D2 <- logi.dummy(X = X, type = "stratrand", nd = model$internal$logistic$args) ## Q2 <- quad(data=X, dummy=D2) ## Q2$dummy$Dinfo <- D2$Dinfo Q2 <- quadscheme.logi(data=X, dummytype = "stratrand", nd = model$internal$logistic$nd) D2 <- Q2$dummy Q2$dummy$Dinfo <- D2$Dinfo Z2 <- is.data(Q2) arglist <- list(Q=Q2, trend=model$trend, interaction = model$interaction, method = model$method, correction = model$correction, rbord = model$rbord, covariates = model$covariates) arglist <- append(arglist, model$internal$logistic$extraargs) model2 <- do.call(ppm, args = arglist) ## New cif lamall2 <- fitted(model2, check = FALSE, new.coef = new.coef, dropcoef=dropcoef) ## New model matrix mall2 <- model.matrix(model2) okall2 <- getglmsubset(model2) ## index vectors of stratrand cell indices of dummy points inD <- model$internal$logistic$inD inD2 <- model2$internal$logistic$inD ## Dummy points inside eroded window (for border correction) if(is.finite(R) && (correction == "border")){ ii <- (bdist.points(D) >= R) ii2 <- (bdist.points(D2) >= R) } else{ ii <- rep.int(TRUE, npoints(D)) ii2 <- rep.int(TRUE, npoints(D2)) } ## OK points of dummy pattern 1 with a valid point of dummy pattern 2 in same stratrand cell (and vice versa) okdum <- okall[!Z] okdum2 <- okall2[!Z2] ok1 <- okdum & ii & is.element(inD, inD2[okdum2 & ii2]) ok2 <- okdum2 & ii2 & is.element(inD2, inD[okdum & ii]) ## ok1 <- okdum & okdum2 & ii & is.element(inD, inD2[ii2]) ## ok2 <- okdum2 & okdum1 & ii2 & is.element(inD2, inD[ii]) ## ok1 <- ii & is.element(inD, inD2[ii2]) ## ok2 <- ii2 & is.element(inD2, inD[ii]) ## cif and suff. stat. for valid points in dummy patterns 1 and 2 lamdum <- lamall[!Z][ok1] lamdum2 <- lamall2[!Z2][ok2] mdum <- mall[!Z,,drop=FALSE][ok1,] mdum2 <- mall2[!Z2,,drop=FALSE][ok2,] ## finally calculation of Sigma2 wlam <- mdum * rho*lamdum/(lamdum+rho) wlam2 <- mdum2 * rho*lamdum2/(lamdum2+rho) Sigma2log <- t(wlam-wlam2)%*%(wlam-wlam2)/(2*rho*rho) }, stop("sorry - unrecognized dummy process in logistic fit") ) ## Attaching to Sigma2log calculated above dimnames(Sigma2log) <- dnames if(spill) { ## return internal data only (with matrices unnormalised) internals <- c(internals, list(Sigma1log=Sigma1log, Sigma2log=Sigma2log, mple=vc.mpl)) if(!spill.vc) return(internals) } ## .. Calculate variance-covariance matrix for logistic fit ........... ## normalise Slog <- Slog/areaW Sigma1log <- Sigma1log/areaW Sigma2log <- Sigma2log/areaW ## evaluate Ulog <- checksolve(Slog, matrix.action, , "variance") vc.logi <- if(is.null(Ulog)) matrix(NA, p, p) else Ulog %*% (Sigma1log+Sigma2log) %*% Ulog / areaW dimnames(vc.logi) <- dnames ## if(spill.vc) return(list(varcov=vc.logi, internals=internals)) return(vc.logi) } ## vcalcGibbs from Ege Rubak and J-F Coeurjolly ## 2013/06/14, modified by Ege to handle logistic case as well vcalcGibbsSpecial <- function(fit, ..., spill=FALSE, spill.vc=FALSE, special.alg = TRUE, matrix.action=c("warn", "fatal", "silent"), logi.action=c("warn", "fatal", "silent")) { matrix.action <- match.arg(matrix.action) logi.action <- match.arg(logi.action) spill <- spill || spill.vc ## Interaction name: iname <- fit$interaction$name ## Does the model have marks which are in the trend? marx <- is.marked(fit) && ("marks" %in% variablesinformula(fit$trend)) ## The full data and window: Xplus <- data.ppm(fit) Wplus <- as.owin(Xplus) ## Fitted parameters and the parameter dimension p (later consiting of p1 trend param. and p2 interaction param.): theta <- coef(fit) p <- length(theta) ## Number of points: n <- npoints(Xplus) ## Using the faster algorithms for special cases if(special.alg && fit$method != "logi"){ param <- coef(fit) switch(iname, "Strauss process"={ ## Only implemented for non-marked case: if(!marx) return(vcovPairPiece(Xplus, reach(fit$interaction), exp(coef(fit)[2]), matrix.action, spill=spill, spill.vc=spill.vc)) }, "Piecewise constant pairwise interaction process"={ ## Only implemented for non-marked case: if(!marx) return(vcovPairPiece(Xplus, fit$interaction$par$r, exp(coef(fit)[-1]), matrix.action, spill=spill, spill.vc=spill.vc)) }, "Multitype Strauss process"={ matR <- fit$interaction$par$radii R <- c(matR[1,1], matR[1,2], matR[2,2]) ## Only implemented for 2 types with equal interaction range: if(ncol(matR)==2 && marx){ n <- length(theta) res <- vcovMultiStrauss(Xplus, R, exp(theta[c(n-2,n-1,n)]), matrix.action,spill=spill,spill.vc=spill.vc) if(!spill) { res <- contrastmatrix(res, 2) dimnames(res) <- list(names(theta), names(theta)) } return(res) } } ) } ## Matrix specifying equal points in the two patterns in the call to eval below: E <- matrix(rep.int(1:n, 2), ncol = 2) ## Eval. the interaction potential difference at all points (internal spatstat function): # V1 <- fit$interaction$family$eval(Xplus, Xplus, E, fit$interaction$pot, fit$interaction$par, fit$correction) oldopt <- NULL if(fit$interaction$family$name=="pairwise"){ oldopt <- spatstat.options(fasteval = "off") } V1 <- evalInteraction(Xplus, Xplus, E, as.interact(fit), fit$correction) spatstat.options(oldopt) ## Calculate parameter dimensions and correct the contrast type parameters: p2 <- ncol(V1) p1 <- p-p2 if(p1>1) theta[2:p1] <- theta[2:p1] + theta[1] ## V1 <- evalInteraction(Q, Xplus, union.quad(Q), fit$interaction, fit$correction) POT <- attr(V1, "POT") attr(V1, "POT") <- NULL ## Adding the constant potential as first column (one column per type for multitype): if(!marx){ V1 <- cbind(1, V1) colnames(V1) <- names(theta) } else{ lev <- levels(marks(Xplus)) ## Indicator matrix for mark type attached to V1: tmp <- matrix(marks(Xplus), nrow(V1), p1)==matrix(lev, nrow(V1), p-ncol(V1), byrow=TRUE) colnames(tmp) <- lev V1 <- cbind(tmp,V1) } ## Matrices for differences of potentials: E <- matrix(rep.int(1:(n-1), 2), ncol = 2) dV <- V2 <- array(0,dim=c(n,n,p)) for(k in 1:p1){ V2[,,k] <- matrix(V1[,k], n, n, byrow = FALSE) } for(k in (p1+1):p){ diag(V2[,,k]) <- V1[,k] } for(j in 1:n){ ## Fast evaluation for pairwise interaction processes: if(fit$interaction$family$name=="pairwise" && !is.null(POT)){ V2[-j,j,-(1:p1)] <- V1[-j,-(1:p1)]-POT[-j,j,] } else{ V2[-j,j,-(1:p1)] <- fit$interaction$family$eval(Xplus[-j], Xplus[-j], E, fit$interaction$pot, fit$interaction$par, fit$correction) ## Q <- quadscheme(Xplus[-j],emptyppp) ## V2[-j,j,-1] <- evalInteraction(Q, Xplus[-j], Xplus[-j], fit$interaction, fit$correction) } for(k in 1:p){ dV[,j,k] <- V1[,k] - V2[,j,k] } } ## Ratio of first and second order Papangelou - 1: frac <- 0*dV[,,1] for(k in (p1+1):p){ frac <- frac + dV[,,k]*theta[k] } frac <- exp(-frac)-1 ## In the rest we restrict attention to points in the interior: ## The interaction range: R <- reach(fit$interaction) ## The reduced window, area and point pattern: W<-erosion.owin(Wplus,R) areaW <- area(W) ## Interior points determined by bdist.points: IntPoints <- bdist.points(Xplus)>=R X <- Xplus[IntPoints] ## Making a logical matrix, I, indicating R-close pairs which are in the interior: D <- pairdist(Xplus) diag(D) <- Inf I <- (D<=R) & outer(IntPoints,IntPoints, "&") ## Matrix A1: A1 <- t(V1[IntPoints,])%*%V1[IntPoints,] ## Matrix A2: A2 <- matrix(0,p,p) for(k in 1:p){ for(l in k:p){ A2[k,l] <- A2[l,k] <- sum(I*V2[,,k]*frac*t(V2[,,l])) } } ## Matrix A3: A3 <- matrix(0,p,p) for(k in 1:p){ for(l in k:p){ A3[k,l] <- A3[l,k] <- sum(I*dV[,,k]*t(dV[,,l])) } } ## Matrix Sigma (A1+A2+A3): Sigma<-A1+A2+A3 if(spill) { # save internal data (with matrices unnormalised) dimnames(A1) <- dimnames(A2) <- dimnames(A3) <- list(names(theta), names(theta)) internals <- list(A1=A1, A2=A2, A3=A3, Sigma=Sigma, areaW=areaW) # return internal data, if model fitted by MPL if(!spill.vc && fit$method != "logi") return(internals) } # ......... Calculate variance-covariance matrix for MPL ........ # normalise A1 <- A1/areaW Sigma <- Sigma/areaW # evaluate U <- checksolve(A1, matrix.action, , "variance") vc.mpl <- if(is.null(U)) matrix(NA, p, p) else U %*% Sigma %*% U / areaW ## Convert to treatment contrasts if(marx) vc.mpl <- contrastmatrix(vc.mpl, p1) dimnames(vc.mpl) <- list(names(theta), names(theta)) # Return result for standard ppm method: if(fit$method!="logi") { if(spill.vc) return(list(varcov=vc.mpl, internals=internals)) return(vc.mpl) } ######################################################################## ###### The remainder is only executed when the method is logistic ###### ######################################################################## ### Most of this is copy/pasted from vcalcGibbsGeneral correction <- fit$correction Q <- quad.ppm(fit) D <- dummy.ppm(fit) rho <- fit$internal$logistic$rho ## If dummy intensity rho is unknown we estimate it if(is.null(rho)) rho <- npoints(D)/(area(D)*markspace.integral(D)) X <- data.ppm(fit) Z <- is.data(Q) # determine which data points entered into the sum in the pseudolikelihood # (border correction, nonzero cif) # data and dummy: okall <- getglmsubset(fit) ## # data only: ## ok <- okall[Z] # conditional intensity lambda(X[i] | X) = lambda(X[i] | X[-i]) # data and dummy: lamall <- fitted(fit, check = FALSE) ## # data only: ## lam <- lamall[Z] # sufficient statistic h(X[i] | X) = h(X[i] | X[-i]) # data and dummy: mall <- model.matrix(fit) mokall <- mall[okall, , drop=FALSE] ## # data only: ## m <- mall[Z, , drop=FALSE] ## mok <- m[ok, , drop=FALSE] # Sensitivity matrix S and A1 matrix for logistic case Slog <- sumouter(mokall, w = lamall[okall]*rho/(lamall[okall]+rho)^2) A1log <- sumouter(mokall, w = lamall[okall]*rho*rho/(lamall[okall]+rho)^3) ## Define W1, W2 and dW for the logistic method based on V1, V2 and dV (frac is unchanged) lambda1 <- exp(.rowSums(matrix(theta,n,p,byrow=TRUE)*V1, n, p)) W1 <- V1*rho/(lambda1+rho) lambda2 <- exp(apply(array(rep(theta,each=n*n),dim=c(n,n,p))*V2, c(1,2), sum)) W2 <- V2 dW <- dV for(k in 1:p){ W2[,,k] <- V2[,,k] * rho/(lambda2+rho) for(j in 1:n){ dW[,j,k] <- W1[,k] - W2[,j,k] } } ## Matrices A2log and A3log for the first component Sigma1log of the variance: A2log <- A3log <- matrix(0,p,p) for(k in 1:p){ for(l in k:p){ A2log[k,l] <- A2log[l,k] <- sum(I*W2[,,k]*frac*t(W2[,,l])) A3log[k,l] <- A3log[l,k] <- sum(I*dW[,,k]*t(dW[,,l])) } } A2log <- A2log A3log <- A3log ## First variance component Sigma1log (A1log+A2log+A3log): Sigma1log <- A1log+A2log+A3log ## Resolving the dummy process type how <- fit$internal$logistic$how if(how %in% c("given", "grid", "transgrid")){ whinge <- paste("vcov is not implemented for dummy type", sQuote(how)) if(logi.action=="fatal") stop(whinge) how <- if(how=="given") "poisson" else "stratrand" if(logi.action=="warn") warning(paste(whinge,"- using", sQuote(how), "formula"), call.=FALSE) } ## Matrix Sigma2log (depends on dummy process type) switch(how, poisson={ Sigma2log <- sumouter(mokall, w = lamall[okall]*lamall[okall]*rho/(lamall[okall]+rho)^3) }, binomial={ Sigma2log <- sumouter(mokall, w = lamall[okall]*lamall[okall]*rho/(lamall[okall]+rho)^3) A1vec <- t(mokall) %*% (rho*lamall[okall]/(lamall[okall]+rho)^2) Sigma2log <- Sigma2log - A1vec%*%t(A1vec)/rho*1/sum(1/(lamall[okall]+rho)) }, stratrand={ ### Dirty way of refitting model with new dummy pattern (should probably be done using call, eval, envir, etc.): ## D2 <- logi.dummy(X = X, type = "stratrand", nd = model$internal$logistic$args) ## Q2 <- quad(data=X, dummy=D2) ## Q2$dummy$Dinfo <- D2$Dinfo Q2 <- quadscheme.logi(data=X, dummytype = "stratrand", nd = fit$internal$logistic$nd) D2 <- Q2$dummy Z2 <- is.data(Q2) arglist <- list(Q=Q2, trend=fit$trend, interaction = fit$interaction, method = fit$method, correction = fit$correction, rbord = fit$rbord, covariates = fit$covariates) arglist <- append(arglist, fit$internal$logistic$extraargs) fit2 <- do.call(ppm, args = arglist) ## New cif lamall2 <- fitted(fit2, check=FALSE) ## New model matrix mall2 <- model.matrix(fit2) okall2 <- getglmsubset(fit2) # index vectors of stratrand cell indices of dummy points inD <- fit$internal$logistic$inD inD2 <- fit2$internal$logistic$inD # Dummy points inside eroded window (for border correction) if(is.finite(R) && (correction == "border")){ ii <- inside.owin(D, w = W) ii2 <- inside.owin(D2, w = W) } else{ ii <- rep.int(TRUE, npoints(D)) ii2 <- rep.int(TRUE, npoints(D2)) } # OK points of dummy pattern 1 with a valid point of dummy pattern 2 in same stratrand cell (and vice versa) okdum <- okall[!Z] okdum2 <- okall2[!Z2] ok1 <- okdum & ii & is.element(inD, inD2[okdum2 & ii2]) ok2 <- okdum2 & ii2 & is.element(inD2, inD[okdum & ii]) ## ok1 <- okdum & okdum2 & ii & is.element(inD, inD2[ii2]) ## ok2 <- okdum2 & okdum1 & ii2 & is.element(inD2, inD[ii]) ## ok1 <- ii & is.element(inD, inD2[ii2]) ## ok2 <- ii2 & is.element(inD2, inD[ii]) # cif and suff. stat. for valid points in dummy patterns 1 and 2 lamdum <- lamall[!Z][ok1] lamdum2 <- lamall2[!Z2][ok2] mdum <- mall[!Z,][ok1,] mdum2 <- mall2[!Z2,][ok2,] # finally calculation of Sigma2 wlam <- mdum * rho*lamdum/(lamdum+rho) wlam2 <- mdum2 * rho*lamdum2/(lamdum2+rho) Sigma2log <- t(wlam-wlam2)%*%(wlam-wlam2)/(2*rho*rho) }, stop("sorry - unrecognized dummy process in logistic fit") ) if(spill) { ## Attach dimnames to all matrices dimnames(Sigma2log) <- dimnames(Slog) <- dimnames(Sigma1log) <- dimnames(A1log) <- dimnames(A2log) <- dimnames(A3log) <- list(names(theta),names(theta)) # return internal data (with matrices unnormalised) internals <- c(internals, list(A1log=A1log, A2log=A2log, A3log=A3log, Slog=Slog, Sigma1log=Sigma1log, Sigma2log=Sigma2log, mple=vc.mpl)) if(!spill.vc) return(internals) } # ....... Compute variance-covariance for logistic fit ............. # Normalise Slog <- Slog/areaW Sigma1log <- Sigma1log/areaW Sigma2log <- Sigma2log/areaW ## Finally the result is calculated: Ulog <- checksolve(Slog, matrix.action, , "variance") vc.logi <- if(is.null(Ulog)) matrix(NA, p, p) else Ulog %*% (Sigma1log+Sigma2log) %*% Ulog / areaW # dimnames(vc.logi) <- list(names(theta), names(theta)) if(spill.vc) return(list(varcov=vc.logi, internals=internals)) return(vc.logi) } vcovPairPiece <- function(Xplus, R, Gam, matrix.action, spill=FALSE, spill.vc=FALSE){ ## R is the vector of breaks (R[length(R)]= range of the pp. ## Gam is the vector of weights Rmax <- R[length(R)] ## Xplus : point process observed in W+R ## Extracting the window and calculating area: Wplus<-as.owin(Xplus) W<-erosion.owin(Wplus,Rmax) areaW <- area(W) ## Interior points determined by bdist.points: IntPoints <- bdist.points(Xplus)>=Rmax X <- Xplus[IntPoints] nX <- npoints(X) nXplus <- npoints(Xplus) ## Matrix D with pairwise distances between points and infinite distance ## between a point and itself: Dplus<-pairdist(Xplus) D <- pairdist(X) diag(D) <- diag(Dplus) <- Inf ## logical matrix, I, indicating R-close pairs: p<-length(R) Tplus<-T<-matrix(0,X$n,p) I<-Iplus<-list() for (i in 1:p){ if (i==1){ Iplus[[1]]<- Dplus <=R[1] I[[1]] <- D<=R[1] } else { Iplus[[i]]<- ((Dplus>R[i-1]) & (Dplus <=R[i])) I[[i]] <- ((D>R[i-1]) & (D <=R[i])) } ## Vector T with the number of $R$-close neighbours to each point: Tplus[,i]<- .colSums(Iplus[[i]], nXplus, nXplus)[IntPoints] T[,i] <- .colSums(I[[i]], nX, nX) } ## Matrices A1, A2 and A3 are initialized to zero: A1 <- A2 <- A3 <- matrix(0,p+1,p+1) ## A1 and A3: A1[1,1] <- npoints(X) for (j in (2:(p+1))){ A1[1,j]<-A1[j,1]<-sum(Tplus[,j-1]) A3[j,j]<-sum(T[,j-1]) for (k in (2:(p+1))){ A1[j,k]<-sum(Tplus[,j-1] * Tplus[,k-1]) } } ## A2: for (j in (2:(p+1))){ A2[1,1]<-A2[1,1]+(Gam[j-1]^(-1)-1)*sum(T[,j-1]) for (l in (2:(p+1))){ if (l==j) vj<-Tplus[,j-1]-1 else vj<-Tplus[,j-1] A2[1,j]<-A2[1,j]+(Gam[l-1]^(-1)-1)*sum(T[,l-1]*(vj) ) } A2[j,1]<-A2[1,j] for (k in (2:(p+1))){ for (l in (2:(p+1))){ if (l==j) vj<-Tplus[,j-1]-1 else vj<-Tplus[,j-1] if (l==k) vk<-Tplus[,k-1]-1 else vk<-Tplus[,k-1] A2[j,k]<-A2[j,k]+ (Gam[l-1]^(-1)-1)*sum(I[[l-1]]*outer(vj,vk)) } } } Sigma<-A1+A2+A3 nam <- c("(Intercept)", names(Gam)) dnam <- list(nam, nam) if(spill) { # return internal data (with matrices unnormalised) dimnames(A1) <- dimnames(A2) <- dimnames(A3) <- dimnames(Sigma) <- dnam internals <- list(A1=A1, A2=A2, A3=A3, Sigma=Sigma) if(!spill.vc) return(internals) } ## Calculate variance-covariance # Normalise: A1 <- A1/areaW Sigma <- Sigma/areaW U <- checksolve(A1, matrix.action, , "variance") mat <- if(is.null(U)) matrix(NA, length(nam), length(nam)) else U%*%Sigma%*%U / areaW dimnames(mat) <- dnam if(spill.vc) return(list(varcov=mat, internals=internals)) return(mat) } vcovMultiStrauss <- function(Xplus, vecR, vecg, matrix.action, spill=FALSE, spill.vc=FALSE){ ## Xplus : marked Strauss point process ## with two types ## observed in W+R (R=max(R11,R12,R22)) ## vecg = estimated parameters of interaction parameters ## ordered as the output of ppm, i.e. vecg=(g11,g12,g22) ## vecR = range for the diff. strauss ordered a vecg(R11,R12,R22) R <- max(vecR) R11<-vecR[1];R12<-vecR[2];R22<-vecR[3] ## Extracting the window and calculating area: Wplus<-as.owin(Xplus) W<-erosion.owin(Wplus,R) areaW <- area(W) X1plus<-Xplus[Xplus$marks==levels(Xplus$marks)[1]] X2plus<-Xplus[Xplus$marks==levels(Xplus$marks)[2]] ## Interior points determined by bdist.points: IntPoints1 <- bdist.points(X1plus)>=R IntPoints2 <- bdist.points(X2plus)>=R X1 <- X1plus[IntPoints1] X2 <- X2plus[IntPoints2] nX1 <- npoints(X1) nX2 <- npoints(X2) nX1plus <- npoints(X1plus) nX2plus <- npoints(X2plus) ## Matrix D with pairwise distances between points and infinite distance ## between a point and itself: D1plus<-pairdist(X1plus) D1 <- pairdist(X1) diag(D1) <- diag(D1plus) <- Inf D2plus<-pairdist(X2plus) D2 <- pairdist(X2) diag(D2) <- diag(D2plus) <- Inf D12plus<-crossdist(X1,X2plus) T12plus<- .rowSums(D12plus<=R12, nX1, nX2plus) D21plus<-crossdist(X2,X1plus) T21plus<- .rowSums(D21plus<=R12, nX2, nX1plus) I12<-crossdist(X1,X2)<=R12 I21<-crossdist(X2,X1)<=R12 T12<- .rowSums(I12, nX1, nX2) T21<- .rowSums(I21, nX2, nX1) ## logical matrix, I, indicating R-close pairs: I1plus<- D1plus <=R11 I1 <- D1<=R11 I2plus<- D2plus <=R22 I2 <- D2<=R22 ## Vector T with the number of $R$-close neighbours to each point: T1plus<- .colSums(I1plus, nX1plus, nX1plus)[IntPoints1] T1 <- .colSums(I1, nX1, nX1) T2plus<- .colSums(I2plus, nX2plus, nX2plus)[IntPoints2] T2 <- .colSums(I2, nX2, nX2) ## Matrices A1, A2 and A3 are initialized to zero: A1 <- A2 <- A3 <- matrix(0,5,5) ## A1 is filled: A1[1,1]<-npoints(X1) A1[1,3]<-A1[3,1]<-sum(T1plus) A1[1,4]<-A1[4,1]<-sum(T12plus) A1[2,2]<-npoints(X2) A1[2,5]<-A1[5,2]<-sum(T2plus) A1[2,4]<-A1[4,2]<-sum(T21plus) A1[3,3]<-sum(T1plus*T1plus) A1[3,4]<-A1[4,3]<-sum(T1plus*T12plus) A1[5,5]<-sum(T2plus*T2plus) A1[4,5]<-A1[5,4]<-sum(T2plus*T21plus) A1[4,4]<-sum(T12plus*T12plus)+sum(T21plus*T21plus) ## A3 is filled: A3[3,3]<-sum(T1) A3[5,5]<-sum(T2) A3[4,4]<-sum(T12)+sum(T21) ## A2 is filled: gamInv<-vecg^(-1)-1 gi1<-gamInv[1];gi12<-gamInv[2];gi2<-gamInv[3] A2[1,1]<-sum(T1)*gi1 A2[1,2]<-A2[2,1]<-sum(T12)*gi12 A2[1,3]<-A2[3,1]<-sum(T1*(T1plus-1))*gi1 A2[1,5]<-A2[5,1]<-sum(T21*T2plus)*gi12 A2[1,4]<-A2[4,1]<-gi1*sum(T1*(T12plus))+gi12*sum(T21*(T21plus-1)) A2[2,2]<-sum(T2)*gi2 A2[2,3]<-A2[3,2]<-sum(T12*T1plus)*gi12 A2[2,5]<-A2[5,2]<-sum(T2*(T2plus-1))*gi2 A2[2,4]<-A2[4,2]<-gi2*sum(T2*(T21plus))+gi12*sum(T12*(T12plus-1)) A2[3,3]<-gi1*sum(I1*outer(T1plus-1,T1plus-1)) A2[3,5]<-A2[5,3]<- gi12*sum(I12*outer(T1plus,T2plus)) A2[3,4]<-A2[4,3]<-gi1*sum(I1*outer(T1plus-1,T12plus))+gi12*sum(I12*outer(T1plus,T21plus-1)) A2[5,5]<-gi2*sum(I2*outer(T2plus-1,T2plus-1)) A2[4,5]<-A2[5,4]<-gi2*sum(I2*outer(T2plus-1,T21plus))+gi12*sum(I21*outer(T2plus,T12plus-1)) A2[4,4]<-gi1*sum(I1*outer(T12plus,T12plus))+gi2*sum(I2*outer(T21plus,T21plus))+ gi12*sum(I12*outer(T12plus-1,T21plus-1))+gi12*sum(I21*outer(T21plus-1,T12plus-1)) Sigma<-A1+A2+A3 nam <- c(levels(marks(Xplus)), names(vecg)) dnam <- list(nam, nam) if(spill) { # return internal data (with matrices unnormalised) dimnames(A1) <- dimnames(A2) <- dimnames(A3) <- dimnames(Sigma) <- dnam internals <- list(A1=A1, A2=A2, A3=A3, Sigma=Sigma) if(!spill.vc) return(internals) } ## Calculate variance-covariance # Normalise: A1 <- A1/areaW Sigma <- Sigma/areaW U <- checksolve(A1, matrix.action, , "variance") mat <- if(is.null(U)) matrix(NA, length(nam), length(nam)) else U%*%Sigma%*%U / areaW dimnames(mat) <- dnam if(spill.vc) return(list(varcov=mat, internals=internals)) return(mat) } # Convert the first p rows & columns of variance matrix x # to variances of treatment contrasts contrastmatrix <- function(x,p){ mat <- x ## Correct column and row 1: for(i in 2:p){ mat[1,i] <- mat[i,1] <- x[1,i]-x[1,1] } ## Correct columns and rows 2,...,p: for(i in 2:p){ for(j in 2:p){ mat[i,j] <- x[1,1]-x[1,i]-x[1,j]+x[i,j] } for(j in (p+1):ncol(x)){ mat[i,j] <- mat[j,i] <- x[i,j]-x[1,j] } } mat } vcov.ppm } ) suffloc <- function(object) { verifyclass(object, "ppm") if(!is.poisson(object)) stop("Internals not available for Gibbs models") return(vcov(object, what="internals")$suff) } spatstat/R/summary.kppm.R0000644000176200001440000001031413115225157015105 0ustar liggesusers#' #' summary.kppm.R #' #' $Revision: 1.5 $ $Date: 2015/05/08 04:25:23 $ #' summary.kppm <- function(object, ..., quick=FALSE) { nama <- names(object) result <- unclass(object)[!(nama %in% c("X", "po", "call", "callframe"))] ## handle old format if(is.null(result$isPCP)) result$isPCP <- TRUE ## summarise trend component result$trend <- summary(as.ppm(object), ..., quick=quick) if(identical(quick, FALSE)) { theta <- coef(object) if(length(theta) > 0) { vc <- vcov(object, matrix.action="warn") if(!is.null(vc)) { se <- if(is.matrix(vc)) sqrt(diag(vc)) else if(length(vc) == 1) sqrt(vc) else NULL } if(!is.null(se)) { two <- qnorm(0.975) lo <- theta - two * se hi <- theta + two * se zval <- theta/se pval <- 2 * pnorm(abs(zval), lower.tail=FALSE) psig <- cut(pval, c(0,0.001, 0.01, 0.05, 1), labels=c("***", "**", "*", " "), include.lowest=TRUE) ## table of coefficient estimates with SE and 95% CI result$coefs.SE.CI <- data.frame(Estimate=theta, S.E.=se, CI95.lo=lo, CI95.hi=hi, Ztest=psig, Zval=zval) } } } class(result) <- "summary.kppm" return(result) } coef.summary.kppm <- function(object, ...) { return(object$coefs.SE.CI) } print.summary.kppm <- function(x, ...) { terselevel <- spatstat.options('terse') digits <- getOption('digits') isPCP <- x$isPCP splat(if(x$stationary) "Stationary" else "Inhomogeneous", if(isPCP) "cluster" else "Cox", "point process model") if(waxlyrical('extras', terselevel) && nchar(x$Xname) < 20) splat("Fitted to point pattern dataset", sQuote(x$Xname)) if(waxlyrical('gory', terselevel)) { switch(x$Fit$method, mincon = { splat("Fitted by minimum contrast") splat("\tSummary statistic:", x$Fit$StatName) }, clik =, clik2 = { splat("Fitted by maximum second order composite likelihood") splat("\trmax =", x$Fit$rmax) if(!is.null(wtf <- x$Fit$weightfun)) { cat("\tweight function: ") print(wtf) } }, palm = { splat("Fitted by maximum Palm likelihood") splat("\trmax =", x$Fit$rmax) if(!is.null(wtf <- x$Fit$weightfun)) { cat("\tweight function: ") print(wtf) } }, warning(paste("Unrecognised fitting method", sQuote(x$Fit$method))) ) } # ............... trend ......................... parbreak() splat("----------- TREND MODEL -----") print(x$trend, ...) # ..................... clusters ................ tableentry <- spatstatClusterModelInfo(x$clusters) parbreak() splat("-----------", if(isPCP) "CLUSTER" else "COX", "MODEL", "-----------") splat("Model:", tableentry$printmodelname(x)) parbreak() cm <- x$covmodel if(!isPCP) { # Covariance model - LGCP only splat("\tCovariance model:", cm$model) margs <- cm$margs if(!is.null(margs)) { nama <- names(margs) tags <- ifelse(nzchar(nama), paste(nama, "="), "") tagvalue <- paste(tags, margs) splat("\tCovariance parameters:", paste(tagvalue, collapse=", ")) } } pa <- x$clustpar if (!is.null(pa)) { splat("Fitted", if(isPCP) "cluster" else "covariance", "parameters:") print(pa, digits=digits) } if(!is.null(mu <- x$mu)) { if(isPCP) { splat("Mean cluster size: ", if(!is.im(mu)) paste(signif(mu, digits), "points") else "[pixel image]") } else { splat("Fitted mean of log of random intensity:", if(!is.im(mu)) signif(mu, digits) else "[pixel image]") } } # table of coefficient estimates with SE and 95% CI if(!is.null(cose <- x$coefs.SE.CI)) { parbreak() splat("Final standard error and CI") splat("(allowing for correlation of", if(isPCP) "cluster" else "Cox", "process):") print(cose) } invisible(NULL) } spatstat/R/badgey.R0000755000176200001440000001613213115271075013704 0ustar liggesusers# # # badgey.S # # $Revision: 1.16 $ $Date: 2016/04/25 02:34:40 $ # # Hybrid Geyer process # # BadGey() create an instance of the process # [an object of class 'interact'] # # # ------------------------------------------------------------------- # BadGey <- local({ # ........... auxiliary functions .............. delBG <- function(i, r, sat) { r <- r[-i] if(length(r) == length(sat)) { r <- r[-i] sat <- sat[-i] } else if(length(sat) == 1) { r <- r[-i] } else stop("Mismatch in dimensions of arguments r and sat") nr <- length(r) if(nr == 0) return(Poisson()) if(nr == 1) return(Geyer(r, sat)) return(BadGey(r, sat)) } # .............. template .................... BlankBG <- list( name = "hybrid Geyer process", creator = "BadGey", family = "pairsat.family", # will be evaluated later pot = function(d, par) { r <- par$r nr <- length(r) out <- array(FALSE, dim=c(dim(d), nr)) for(i in 1:nr) out[,,i] <- (d <= r[i]) out }, par = list(r = NULL, sat=NULL), # to fill in later parnames = c("interaction radii", "saturation parameters"), init = function(self) { r <- self$par$r sat <- self$par$sat if(!is.numeric(r) || !all(r > 0)) stop("interaction radii r must be positive numbers") if(length(r) > 1 && !all(diff(r) > 0)) stop("interaction radii r must be strictly increasing") if(!is.numeric(sat) || any(sat < 0)) stop("saturation parameters must be nonnegative numbers") if(length(sat) != length(r) && length(sat) != 1) stop("vectors r and sat must have equal length") }, update = NULL, # default OK print = NULL, # default OK interpret = function(coeffs, self) { r <- self$par$r npiece <- length(r) # extract coefficients gammas <- exp(as.numeric(coeffs)) # name them gn <- gammas names(gn) <- paste("[0,", r, ")", sep="") # return(list(param=list(gammas=gammas), inames="interaction parameters gamma_i", printable=dround(gn))) }, valid = function(coeffs, self) { # interaction parameters gamma must be # non-NA # finite, if sat > 0 # less than 1, if sat = Inf gamma <- (self$interpret)(coeffs, self)$param$gammas sat <- self$par$sat if(anyNA(gamma)) return(FALSE) return(all((is.finite(gamma) | sat == 0) & (gamma <= 1 | sat != Inf))) }, project = function(coeffs, self){ loggammas <- as.numeric(coeffs) sat <- self$par$sat r <- self$par$r good <- is.finite(loggammas) & (is.finite(sat) | loggammas <= 0) if(all(good)) return(NULL) if(!any(good)) return(Poisson()) bad <- !good if(spatstat.options("project.fast") || sum(bad) == 1) { # remove smallest threshold with an unidentifiable parameter firstbad <- min(which(bad)) return(delBG(firstbad, r, sat)) } else { # consider all candidate submodels subs <- lapply(which(bad), delBG, r=r, sat=sat) return(subs) } }, irange = function(self, coeffs=NA, epsilon=0, ...) { r <- self$par$r sat <- self$par$sat if(all(is.na(coeffs))) return(2 * max(r)) gamma <- (self$interpret)(coeffs, self)$param$gammas gamma[is.na(gamma)] <- 1 active <- (abs(log(gamma)) > epsilon) & (sat > 0) if(!any(active)) return(0) else return(2 * max(r[active])) }, version=NULL, # to be added later # fast evaluation is available for the border correction only can.do.fast=function(X,correction,par) { return(all(correction %in% c("border", "none"))) }, fasteval=function(X,U,EqualPairs,pairpot,potpars,correction, ..., halfway=FALSE) { # fast evaluator for BadGey interaction if(!all(correction %in% c("border", "none"))) return(NULL) if(spatstat.options("fasteval") == "test") message("Using fast eval for BadGey") r <- potpars$r sat <- potpars$sat # ensure r and sat have equal length if(length(r) != length(sat)) { if(length(r) == 1) r <- rep.int(r, length(sat)) else if(length(sat) == 1) sat <- rep.int(sat, length(r)) else stop("lengths of r and sat do not match") } # first ensure all data points are in U nX <- npoints(X) nU <- npoints(U) Xseq <- seq_len(nX) if(length(EqualPairs) == 0) { # no data points currently included missingdata <- rep.int(TRUE, nX) } else { Xused <- EqualPairs[,1L] missingdata <- !(Xseq %in% Xused) } somemissing <- any(missingdata) if(somemissing) { # add the missing data points nmiss <- sum(missingdata) U <- superimpose(U, X[missingdata], W=X$window) # correspondingly augment the list of equal pairs originalrows <- seq_len(nU) newXindex <- Xseq[missingdata] newUindex <- nU + seq_len(nmiss) EqualPairs <- rbind(EqualPairs, cbind(newXindex, newUindex)) nU <- nU + nmiss } nterms <- length(r) answer <- matrix(, nrow=nU, ncol=nterms) for(k in 1:nterms) { # first determine saturated pair counts counts <- strausscounts(U, X, r[k], EqualPairs) satcounts <- pmin.int(sat[k], counts) # trapdoor used by suffstat() if(halfway) answer[,k] <- satcounts else if(sat[k] == Inf) answer[,k] <- 2 * satcounts else { # extract counts for data points Uindex <- EqualPairs[,2L] Xindex <- EqualPairs[,1L] Xcounts <- integer(npoints(X)) Xcounts[Xindex] <- counts[Uindex] # evaluate change in saturated counts of other data points change <- geyercounts(U, X, r[k], sat[k], Xcounts, EqualPairs) answer[,k] <- satcounts + change } } if(somemissing) answer <- answer[originalrows, , drop=FALSE] return(answer) } ) class(BlankBG) <- "interact" BadGey <- function(r, sat) { instantiate.interact(BlankBG, list(r=r, sat=sat)) } BadGey <- intermaker(BadGey, BlankBG) BadGey }) spatstat/R/solist.R0000644000176200001440000001303213145251111013747 0ustar liggesusers## ## solist.R ## ## Methods for class `solist' (spatial object list) ## ## and related classes 'anylist', 'ppplist', 'imlist' ## ## plot.solist is defined in plot.solist.R ## ## $Revision: 1.16 $ $Date: 2017/08/17 07:52:24 $ anylist <- function(...) { x <- list(...) class(x) <- c("anylist", "listof", class(x)) return(x) } print.anylist <- function (x, ...) { ll <- length(x) if(ll == 0) { splat("(Zero length list)") return(invisible(NULL)) } nn <- names(x) if (length(nn) != ll) nn <- paste("Component", seq.int(ll)) spaceok <- waxlyrical('space') for (i in seq_len(ll)) { splat(paste0(nn[i], ":")) print(x[[i]], ...) if(spaceok && i < ll) cat("\n") } return(invisible(NULL)) } as.anylist <- function(x) { if(inherits(x, "anylist")) return(x) if(!is.list(x)) x <- list(x) class(x) <- c("anylist", "listof", class(x)) return(x) } "[.anylist" <- function(x, i, ...) { cl <- oldClass(x) ## invoke list method y <- NextMethod("[") if(length(y) == 0) return(list()) class(y) <- cl return(y) } "[<-.anylist" <- function(x, i, value) { as.anylist(NextMethod("[<-")) } summary.anylist <- function(object, ...) { as.anylist(lapply(object, summary, ...)) } pool.anylist <- function(x, ...) { do.call(pool, append(x, list(...))) } ## .................... solist ............................. is.sob <- local({ ## test whether x is a spatial object suitable for solist sobjectclasses <- c("ppp", "psp", "im", "owin", "quad", "tess", "msr", "quadratcount", "quadrattest", "layered", "funxy", "distfun", "nnfun", "lpp", "linnet", "linfun", "influence.ppm", "leverage.ppm") # Note 'linim' inherits 'im' # 'dfbetas.ppm' inherits 'msr' is.sob <- function(x) { inherits(x, what=sobjectclasses) } is.sob }) solist <- function(..., check=TRUE, promote=TRUE, demote=FALSE) { stuff <- list(...) if((check || demote) && !all(sapply(stuff, is.sob))) { if(demote) return(as.anylist(stuff)) stop("Some arguments of solist() are not 2D spatial objects") } class(stuff) <- c("solist", "anylist", "listof", class(stuff)) if(promote) { if(all(unlist(lapply(stuff, is.ppp)))) { class(stuff) <- c("ppplist", class(stuff)) } else if(all(unlist(lapply(stuff, is.im)))) { class(stuff) <- c("imlist", class(stuff)) } } return(stuff) } as.solist <- function(x, ...) { if(inherits(x, "solist") && length(list(...)) == 0) return(x) if(!is.list(x) || is.sob(x)) x <- list(x) return(do.call(solist, append(x, list(...)))) } print.solist <- function (x, ...) { what <- if(inherits(x, "ppplist")) "point patterns" else if(inherits(x, "imlist")) "pixel images" else "spatial objects" splat(paste("List of", what)) parbreak() NextMethod("print") } "[.solist" <- function(x, i, ...) { cl <- oldClass(x) if(!missing(i) && is.owin(i)) { ## spatial subset y <- lapply(unclass(x), "[", i=i, ...) } else { ## invoke list method y <- NextMethod("[") } if(length(y) == 0) return(list()) class(y) <- cl return(y) } "[<-.solist" <- function(x, i, value) { ## invoke list method y <- NextMethod("[<-") ## check again return(do.call(solist, y)) } summary.solist <- function(object, ...) { x <- lapply(object, summary, ...) attr(x, "otype") <- if(inherits(object, "ppplist")) "ppp" else if(inherits(object, "imlist")) "im" else "" class(x) <- c("summary.solist", "anylist") x } print.summary.solist <- function(x, ...) { what <- switch(attr(x, "otype"), ppp="point patterns", im="pixel images", "spatial objects") splat("Summary of", length(x), what) parbreak() NextMethod("print") } as.layered.solist <- function(X) { layered(LayerList=X) } #' ----- ppplist and imlist ---------------------------- #' for efficiency only as.ppplist <- function(x, check=TRUE) { if(check) { x <- as.solist(x) if(inherits(x, "ppplist")) return(x) stop("some entries are not point patterns") } class(x) <- unique(c("ppplist", "solist", "anylist", "listof", class(x))) return(x) } as.imlist <- function(x, check=TRUE) { if(check) { x <- as.solist(x) if(inherits(x, "imlist")) return(x) stop("some entries are not images") } class(x) <- unique(c("imlist", "solist", "anylist", "listof", class(x))) return(x) } # --------------- counterparts of 'lapply' -------------------- anylapply <- function(X, FUN, ...) { v <- lapply(X, FUN, ...) return(as.anylist(v)) } solapply <- function(X, FUN, ..., check=TRUE, promote=TRUE, demote=FALSE) { v <- lapply(X, FUN, ...) u <- as.solist(v, check=check, promote=promote, demote=demote) return(u) } density.ppplist <- function(x, ..., se=FALSE) { y <- lapply(x, density, ..., se=se) if(!se) return(as.solist(y, demote=TRUE)) y.est <- lapply(y, getElement, name="estimate") y.se <- lapply(y, getElement, name="SE") z <- list(estimate = as.solist(y.est, demote=TRUE), SE = as.solist(y.se, demote=TRUE)) return(z) } expandSpecialLists <- function(x, special="solist") { ## x is a list which may include entries which are lists, of class 'special' ## unlist these entries only hit <- sapply(x, inherits, what=special) if(!any(hit)) return(x) # wrap each *non*-special entry in list() x[!hit] <- lapply(x[!hit], list) # now strip one layer of list() from all entries return(unlist(x, recursive=FALSE)) } spatstat/R/marktable.R0000755000176200001440000000272113115271120014401 0ustar liggesusers# # marktable.R # # Tabulate mark frequencies in neighbourhood of each point # for multitype point patterns # # $Revision: 1.7 $ $Date: 2015/03/25 03:43:35 $ # # Requested by Ian Robertson "marktable" <- function(X, R, N, exclude=TRUE, collapse=FALSE) { verifyclass(X, "ppp") if(!is.marked(X, dfok=FALSE)) stop("point pattern has no marks") gotR <- !missing(R) && !is.null(R) gotN <- !missing(N) && !is.null(N) if(gotN == gotR) stop("Exactly one of the arguments N and R should be given") stopifnot(is.logical(exclude) && length(exclude) == 1) m <- marks(X) if(!is.factor(m)) stop("marks must be a factor") if(gotR) { stopifnot(is.numeric(R) && length(R) == 1 && R > 0) #' identify close pairs p <- closepairs(X,R,what="indices") pi <- p$i pj <- p$j if(!exclude) { #' add identical pairs n <- X$n pi <- c(pi, 1:n) pj <- c(pj, 1:n) } } else { stopifnot(is.numeric(N) && length(N) == 1) ii <- seq_len(npoints(X)) nn <- nnwhich(X, k=1:N) if(N == 1) nn <- matrix(nn, ncol=1) if(!exclude) nn <- cbind(ii, nn) pi <- as.vector(row(nn)) pj <- as.vector(nn) } #' tabulate if(!collapse) { ## table for each point i <- factor(pi, levels=seq_len(npoints(X))) mj <- m[pj] mat <- table(point=i, mark=mj) } else { #' table by type mi <- m[pi] mj <- m[pj] mat <- table(point=mi, neighbour=mj) } return(mat) } spatstat/R/bc.R0000644000176200001440000000405313115225157013031 0ustar liggesusers#' bc.R #' #' Bias correction techniques #' #' $Revision: 1.2 $ $Date: 2016/09/15 02:21:15 $ bc <- function(fit, ...) { UseMethod("bc") } bc.ppm <- function(fit, ..., nfine=256) { stopifnot(is.ppm(fit)) # theta0 <- coef(fit) nc <- length(theta0) # X <- data.ppm(fit) Z <- is.data(quad.ppm(fit)) # evaluate sufficient statistic at data points sufX <- model.matrix(fit)[Z, ] if(ncol(sufX) != nc) stop("Internal error: model.matrix does not match coef(model)") # predict on fine grid finemask <- as.mask(as.owin(fit), dimyx=nfine) lamF <- predict(fit, type="cif", locations=finemask) sufF <- model.images(fit, W=finemask) if(length(sufF) != nc) stop("Internal error: model.images does not match coef(model)") # edge correction if(fit$correction == "border" && ((rbord <- fit$rbord) > 0)) { b <- bdist.pixels(finemask) bX <- bdist.points(X) excludeU <- eval.im(b < rbord) retainX <- (bX >= rbord) sufX <- sufX[retainX, , drop=FALSE] } else { excludeU <- FALSE } # compute fine approximation to score scoreX <- colSums(sufX) scoreW <- numeric(nc) for(k in seq_len(nc)) { S <- sufF[[k]] # infinite values of S may occur and correspond to zero cif Slam <- eval.im(ifelse(is.infinite(S) | excludeU, 0, S * lamF)) scoreW[k] <- integral.im(Slam) } score <- scoreX - scoreW # Newton-Raphson Iinv <- vcov(fit, hessian=TRUE) theta <- theta0 + Iinv %*% score theta <- theta[ , 1L, drop=TRUE] # # return(list(theta0=theta0, theta=theta)) return(theta) } # Richardson extrapolation (generic) rex <- function(x, r=2, k=1, recursive=FALSE) { # x should be a matrix # whose columns are successive estimates of a parameter vector # obtained using "grid step sizes" t, t/r, t/r^2, ... # Estimate from step size t is assumed to converge at rate t^k if(!is.matrix(x)) x <- matrix(x, nrow=1) if(ncol(x) <= 1) return(x) rk <- r^k y <- (rk * x[, -1L, drop=FALSE] - x[, -ncol(x), drop=FALSE])/(rk - 1) if(recursive) y <- rex(y, r=r, k=k+1, recursive=TRUE) return(y) } spatstat/R/smoothfv.R0000755000176200001440000000322013115271120014277 0ustar liggesusers# # smoothfv.R # # $Revision: 1.13 $ $Date: 2014/01/15 10:03:35 $ # smooth.fv <- function(x, which="*", ..., method=c("smooth.spline", "loess"), xinterval=NULL) { .Deprecated("Smooth.fv", package="spatstat", msg="smooth.fv is deprecated: use the generic Smooth with a capital S") Smooth(x, which=which, ..., method=method, xinterval=xinterval) } Smooth.fv <- function(X, which="*", ..., method=c("smooth.spline", "loess"), xinterval=NULL) { x <- X stopifnot(is.character(which)) method <- match.arg(method) if(!is.null(xinterval)) check.range(xinterval) if(length(which) == 1 && which %in% .Spatstat.FvAbbrev) { if(which == ".x") stop("Cannot smooth the function argument") which <- fvnames(x, which) } if(any(nbg <- !(which %in% names(x)))) stop(paste("Unrecognised column", ngettext(sum(nbg), "name", "names"), commasep(sQuote(which[nbg])), "in argument", sQuote("which"))) xx <- x[[fvnames(x, ".x")]] # process each column of function values for(ynam in which) { yy <- x[[ynam]] ok <- is.finite(yy) if(!is.null(xinterval)) ok <- ok & inside.range(xx, xinterval) switch(method, smooth.spline = { ss <- smooth.spline(xx[ok], yy[ok], ...) yhat <- predict(ss, xx[ok])$y }, loess = { df <- data.frame(x=xx[ok], y=yy[ok]) lo <- loess(y ~ x, df, ...) yhat <- predict(lo, df[,"x", drop=FALSE]) }) yy[ok] <- yhat x[[ynam]] <- yy } return(x) } spatstat/R/plot.im.R0000755000176200001440000007405213164570352014044 0ustar liggesusers# # plot.im.R # # $Revision: 1.120 $ $Date: 2017/10/03 02:01:01 $ # # Plotting code for pixel images # # plot.im # image.im # contour.im # ########################################################################### plot.im <- local({ ## auxiliary functions image.doit <- function(imagedata, ..., extrargs=graphicsPars("image"), W, workaround=FALSE) { aarg <- resolve.defaults(...) add <- resolve.1.default(list(add=FALSE), aarg) show.all <- resolve.1.default(list(show.all=!add), aarg) addcontour <- resolve.1.default(list(addcontour=FALSE), aarg) args.contour <- resolve.1.default(list(args.contour=list()), aarg) if(add && show.all) { ## set up the window space *with* the main title ## using the same code as plot.owin, for consistency do.call.matched(plot.owin, resolve.defaults(list(x=W, type="n"), aarg), extrargs=graphicsPars("owin")) } if(workaround && identical(aarg$useRaster, TRUE)) { #' workaround for bug 16035 #' detect reversed coordinates usr <- par('usr') xrev <- (diff(usr[1:2]) < 0) yrev <- (diff(usr[3:4]) < 0) if(xrev || yrev) { #' flip matrix of pixel values, because the device driver does not z <- imagedata$z d <- dim(z) # z is in the orientation expected for image.default if(xrev) z <- z[d[1]:1, , drop=FALSE] if(yrev) z <- z[ , d[2]:1, drop=FALSE] imagedata$z <- z } } extrargs <- setdiff(extrargs, c("claim.title.space", "box")) z <- do.call.matched(image.default, append(imagedata, aarg), extrargs=extrargs) if(addcontour) do.call(do.contour, resolve.defaults(imagedata, list(add=TRUE), args.contour, list(col=par('fg')), aarg, .StripNull=TRUE)) return(z) } do.contour <- function(x, y, z, ..., drawlabels=TRUE) { nx <- length(x) ny <- length(y) nz <- dim(z) if(nx > nz[1]) { if(nz[1] == 1) { z <- rbind(z, z) nz <- dim(z) drawlabels <- FALSE } else { x <- (x[-1] + x[-nx])/2 nx <- nx-1 } } if(ny > nz[2]) { if(nz[2] == 1) { z <- cbind(z, z) nz <- dim(z) drawlabels <- FALSE } else { y <- (y[-1] + y[-ny])/2 ny <- ny-1 } } do.call.matched(contour.default, list(x=x, y=y, z=z, ..., drawlabels=drawlabels)) } do.box.etc <- function(bb, add, argh) do.call(box.etc, append(list(bb=bb, add=add), argh)) box.etc <- function(bb, ..., add=FALSE, axes=FALSE, box=!add) { # axes for image xr <- bb$xrange yr <- bb$yrange if(box) rect(xr[1], yr[1], xr[2], yr[2]) if(axes) { px <- pretty(xr) py <- pretty(yr) do.call.plotfun(graphics::axis, resolve.defaults( list(side=1, at=px), list(...), list(pos=yr[1])), extrargs=graphicsPars("axis")) do.call.plotfun(graphics::axis, resolve.defaults( list(side=2, at=py), list(...), list(pos=xr[1])), extrargs=graphicsPars("axis")) } } clamp <- function(x, v, tol=0.02 * diff(v)) { ok <- (x >= v[1] - tol) & (x <= v[2] + tol) x[ok] } cellbreaks <- function(x, dx) { nx <- length(x) seq(x[1] - dx/2, x[nx] + dx/2, length.out=nx+1) } log10orNA <- function(x) { y <- rep(NA_real_, length(x)) ok <- !is.na(x) & (x > 0) y[ok] <- log10(x[ok]) return(y) } Ticks <- function(usr, log=FALSE, nint=NULL, ...) { #' same as axisTicks but accepts nint=NULL as if it were missing if(!is.null(nint)) return(axisTicks(usr=usr, log=log, nint=nint, ...)) return(axisTicks(usr=usr, log=log, ...)) } # main function PlotIm <- function(x, ..., main, add=FALSE, clipwin=NULL, col=NULL, valuesAreColours=NULL, log=FALSE, ribbon=show.all, show.all=!add, ribside=c("right", "left", "bottom", "top"), ribsep=0.15, ribwid=0.05, ribn=1024, ribscale=1, ribargs=list(), colargs=list(), useRaster=NULL, workaround=FALSE, do.plot=TRUE) { if(missing(main)) main <- short.deparse(substitute(x)) verifyclass(x, "im") if(x$type == "complex") { cl <- match.call() cl$x <- solist(Re=Re(x), Im=Im(x), Mod=Mod(x), Arg=Arg(x)) cl[[1]] <- as.name('plot') cl$main <- main out <- eval(cl, parent.frame()) return(invisible(out)) } ribside <- match.arg(ribside) col.given <- !is.null(col) dotargs <- list(...) stopifnot(is.list(ribargs)) user.ticks <- ribargs$at user.nint <- ribargs$nint if(!is.null(clipwin)) { x <- x[as.rectangle(clipwin)] if(!is.rectangle(clipwin)) x <- x[clipwin, drop=FALSE] } zlim <- dotargs$zlim x <- repair.image.xycoords(x) xtype <- x$type xbox <- as.rectangle(x) do.log <- identical(log, TRUE) if(do.log && !(x$type %in% c("real", "integer"))) stop(paste("Log transform is undefined for an image of type", sQuote(xtype))) # determine whether pixel values are to be treated as colours if(!is.null(valuesAreColours)) { # argument given - validate stopifnot(is.logical(valuesAreColours)) if(valuesAreColours) { # pixel values must be factor or character if(!xtype %in% c("factor", "character")) { warning(paste("Pixel values of type", sQuote(xtype), "are not interpretable as colours")) valuesAreColours <- FALSE } else if(col.given) { # colour info provided: contradictory warning(paste("Pixel values are taken to be colour values,", "because valuesAreColours=TRUE;", "the colour map (argument col) is ignored"), call.=FALSE) col <- NULL } if(do.log) warning(paste("Pixel values are taken to be colour values,", "because valuesAreColours=TRUE;", "the argument log=TRUE is ignored"), call.=FALSE) } } else if(col.given) { # argument 'col' controls colours valuesAreColours <- FALSE } else if(spatstat.options("monochrome")) { valuesAreColours <- FALSE } else { ## default : determine whether pixel values are colours strings <- switch(xtype, character = { as.vector(x$v) }, factor = { levels(x) }, { NULL }) valuesAreColours <- is.character(strings) && !inherits(try(col2rgb(strings), silent=TRUE), "try-error") if(valuesAreColours) cat("Interpreting pixel values as colours\n") } # if(valuesAreColours) { # colour-valued images are plotted using the code for factor images # with the colour map equal to the levels of the factor switch(xtype, factor = { col <- levels(x) }, character = { x <- eval.im(factor(x)) xtype <- "factor" col <- levels(x) }, { warning(paste("Pixel values of type", sQuote(xtype), "are not interpretable as colours")) }) # colours not suitable for ribbon ribbon <- FALSE } # transform pixel values to log scale? if(do.log) { rx <- range(x, finite=TRUE) if(all(rx > 0)) { x <- eval.im(log10(x)) } else { if(any(rx < 0)) warning(paste("Negative pixel values", "omitted from logarithmic colour map;", "range of values =", prange(rx)), call.=FALSE) if(!all(rx < 0)) warning("Zero pixel values omitted from logarithmic colour map", call.=FALSE) x <- eval.im(log10orNA(x)) } xtype <- x$type Log <- log10 Exp <- function(x) { 10^x } } else { Log <- Exp <- function(x) { x } } imagebreaks <- NULL # ribbonvalues <- ribbonbreaks <- NULL ribbonvalues <- NULL ## NOW DETERMINE THE COLOUR MAP colfun <- colmap <- NULL if(valuesAreColours) { ## pixel values are colours; set of colours was determined earlier colmap <- colourmap(col=col, inputs=col) } else if(!col.given) { ## no colour information given: use default colfun <- spatstat.options("image.colfun") } else if(inherits(col, "colourmap")) { ## Bob's your uncle colmap <- col } else if(is.function(col)) { ## Some kind of function determining a colour map if(names(formals(col))[1] == "n") { ## function(n) -> colour values colfun <- col } else { ## colour map determined by a rule (e.g. 'beachcolours') colmap <- invokeColourmapRule(col, x, zlim=zlim, colargs=colargs) if(is.null(colmap)) stop("Unrecognised syntax for colour function") } } switch(xtype, real = { vrange <- range(x, finite=TRUE) vrange <- range(zlim, vrange) if(!is.null(colmap)) { # explicit colour map s <- summary(colmap) if(s$discrete) stop("Discrete colour map is not applicable to real values") imagebreaks <- s$breaks vrange <- range(imagebreaks) col <- s$outputs } trivial <- (diff(vrange) <= .Machine$double.eps) if(!trivial) { # ribbonvalues: domain of colour map (pixel values) # ribbonrange: (min, max) of pixel values in image # nominalrange: range of values shown on ribbon # nominalmarks: values shown on ribbon at tick marks # ribbonticks: pixel values of tick marks # ribbonlabels: text displayed at tick marks ribbonvalues <- seq(from=vrange[1], to=vrange[2], length.out=ribn) ribbonrange <- vrange nominalrange <- Log(ribscale * Exp(ribbonrange)) nominalmarks <- user.ticks %orifnull% Ticks(nominalrange, log=do.log, nint=user.nint) ribbonticks <- Log(nominalmarks/ribscale) ribbonlabels <- paste(nominalmarks) } }, integer = { values <- as.vector(x$v) values <- values[!is.na(values)] uv <- unique(values) vrange <- range(uv, finite=TRUE) vrange <- range(zlim, vrange) nvalues <- length(uv) trivial <- (nvalues < 2) if(!trivial){ nominalrange <- Log(ribscale * Exp(vrange)) if(!is.null(user.ticks)) { nominalmarks <- user.ticks } else { nominalmarks <- Ticks(nominalrange, log=do.log, nint = user.nint) nominalmarks <- nominalmarks[nominalmarks %% 1 == 0] } ribbonticks <- Log(nominalmarks/ribscale) ribbonlabels <- paste(nominalmarks) if(!do.log && identical(all.equal(ribbonticks, vrange[1]:vrange[2]), TRUE)) { # each possible pixel value will appear in ribbon ribbonvalues <- vrange[1]:vrange[2] imagebreaks <- c(ribbonvalues - 0.5, vrange[2] + 0.5) ribbonrange <- range(imagebreaks) ribbonticks <- ribbonvalues ribbonlabels <- paste(ribbonticks * ribscale) } else { # not all possible values will appear in ribbon ribn <- min(ribn, diff(vrange)+1) ribbonvalues <- seq(from=vrange[1], to=vrange[2], length.out=ribn) ribbonrange <- vrange } } if(!is.null(colmap)) { # explicit colour map s <- summary(colmap) imagebreaks <- if(!s$discrete) s$breaks else c(s$inputs[1] - 0.5, s$inputs + 0.5) col <- s$outputs } }, logical = { values <- as.integer(as.vector(x$v)) values <- values[!is.na(values)] uv <- unique(values) trivial <- (length(uv) < 2) vrange <- c(0,1) imagebreaks <- c(-0.5, 0.5, 1.5) ribbonvalues <- c(0,1) ribbonrange <- range(imagebreaks) # ribbonbreaks <- imagebreaks ribbonticks <- user.ticks %orifnull% ribbonvalues ribbonlabels <- c("FALSE", "TRUE") if(!is.null(colmap)) col <- colmap(c(FALSE,TRUE)) }, factor = { lev <- levels(x) nvalues <- length(lev) trivial <- (nvalues < 2) # ensure all factor levels plotted separately fac <- factor(lev, levels=lev) intlev <- as.integer(fac) imagebreaks <- c(intlev - 0.5, max(intlev) + 0.5) ribbonvalues <- intlev ribbonrange <- range(imagebreaks) # ribbonbreaks <- imagebreaks ribbonticks <- user.ticks %orifnull% ribbonvalues ribbonlabels <- paste(lev) vrange <- range(intlev) if(!is.null(colmap) && !valuesAreColours) col <- colmap(fac) }, character = { x <- eval.im(factor(x)) lev <- levels(x) nvalues <- length(lev) trivial <- (nvalues < 2) # ensure all factor levels plotted separately fac <- factor(lev, levels=lev) intlev <- as.integer(fac) imagebreaks <- c(intlev - 0.5, max(intlev) + 0.5) ribbonvalues <- intlev ribbonrange <- range(imagebreaks) # ribbonbreaks <- imagebreaks ribbonticks <- user.ticks %orifnull% ribbonvalues ribbonlabels <- paste(lev) vrange <- range(intlev) if(!is.null(colmap)) col <- colmap(fac) }, stop(paste("Do not know how to plot image of type", sQuote(xtype))) ) ## Compute colour values to be passed to image.default if(!is.null(colmap)) { ## Explicit colour map object colourinfo <- list(breaks=imagebreaks, col=col) } else if(!is.null(colfun)) { ## Function colfun(n) colourinfo <- if(is.null(imagebreaks)) list(col=colfun(256)) else list(breaks=imagebreaks, col=colfun(length(imagebreaks)-1)) } else if(col.given) { ## Colour values if(inherits(try(col2rgb(col), silent=TRUE), "try-error")) stop("Unable to interpret argument col as colour values") if(is.null(imagebreaks)) { colourinfo <- list(col=col) } else { nintervals <- length(imagebreaks) - 1 colourinfo <- list(breaks=imagebreaks, col=col) if(length(col) != nintervals) stop(paste("Length of argument", dQuote("col"), paren(paste(length(col))), "does not match the number of distinct values", paren(paste(nintervals)))) } } else stop("Internal error: unable to determine colour values") if(spatstat.options("monochrome")) { ## transform to grey scale colourinfo$col <- to.grey(colourinfo$col) } # colour map to be returned (invisibly) i.col <- colourinfo$col i.bks <- colourinfo$breaks output.colmap <- if(is.null(i.col)) NULL else if(inherits(i.col, "colourmap")) i.col else if(valuesAreColours) colourmap(col=i.col, inputs=i.col) else switch(xtype, integer=, real= { if(!is.null(i.bks)) { colourmap(col=i.col, breaks=i.bks) } else colourmap(col=i.col, range=vrange) }, logical={ colourmap(col=i.col, inputs=c(FALSE,TRUE)) }, character=, factor={ colourmap(col=i.col, inputs=lev) }, NULL) ## ........ decide whether to use rasterImage ......... ## get device capabilities ## (this will start a graphics device if none is active) rasterable <- dev.capabilities()$rasterImage if(is.null(rasterable)) rasterable <- "no" ## can.use.raster <- switch(rasterable, yes=TRUE, no=FALSE, "non-missing"=!anyNA(x$v), FALSE) if(is.null(useRaster)) { useRaster <- can.use.raster } else if(useRaster && !can.use.raster) { whinge <- "useRaster=TRUE is not supported by the graphics device" if(rasterable == "non-missing") whinge <- paste(whinge, "for images with NA values") warning(whinge, call.=FALSE) } ## ........ start plotting ................. if(!identical(ribbon, TRUE) || trivial) { ## no ribbon wanted attr(output.colmap, "bbox") <- as.rectangle(x) if(!do.plot) return(output.colmap) ## plot image without ribbon image.doit(imagedata=list(x=cellbreaks(x$xcol, x$xstep), y=cellbreaks(x$yrow, x$ystep), z=t(x$v)), W=xbox, workaround=workaround, dotargs, list(useRaster=useRaster, add=add, show.all=show.all), colourinfo, list(xlab = "", ylab = ""), list(asp = 1, main = main, axes=FALSE)) ## if(add && show.all) ## fakemaintitle(x, main, dotargs) do.box.etc(Frame(x), add, dotargs) return(invisible(output.colmap)) } # determine plot region bb <- owin(x$xrange, x$yrange) Width <- diff(bb$xrange) Height <- diff(bb$yrange) Size <- max(Width, Height) switch(ribside, right={ # ribbon to right of image bb.rib <- owin(bb$xrange[2] + c(ribsep, ribsep+ribwid) * Size, bb$yrange) rib.iside <- 4 }, left={ # ribbon to left of image bb.rib <- owin(bb$xrange[1] - c(ribsep+ribwid, ribsep) * Size, bb$yrange) rib.iside <- 2 }, top={ # ribbon above image bb.rib <- owin(bb$xrange, bb$yrange[2] + c(ribsep, ribsep+ribwid) * Size) rib.iside <- 3 }, bottom={ # ribbon below image bb.rib <- owin(bb$xrange, bb$yrange[1] - c(ribsep+ribwid, ribsep) * Size) rib.iside <- 1 }) bb.all <- boundingbox(bb.rib, bb) attr(output.colmap, "bbox") <- bb.all attr(output.colmap, "bbox.legend") <- bb.rib if(!do.plot) return(output.colmap) pt <- prepareTitle(main) if(!add) { ## establish coordinate system do.call.plotfun(plot.owin, resolve.defaults(list(x=bb.all, type="n", main=pt$blank), dotargs), extrargs=graphicsPars("owin")) } if(show.all) { ## plot title centred over main image area 'bb' do.call.plotfun(plot.owin, resolve.defaults(list(x=bb, type="n", main=main, add=TRUE, show.all=TRUE), dotargs), extrargs=graphicsPars("owin")) main <- "" } # plot image image.doit(imagedata=list(x=cellbreaks(x$xcol, x$xstep), y=cellbreaks(x$yrow, x$ystep), z=t(x$v)), W=xbox, workaround=workaround, list(add=TRUE, show.all=show.all), dotargs, list(useRaster=useRaster), colourinfo, list(xlab = "", ylab = ""), list(asp = 1, main = main)) ## if(add && show.all) ## fakemaintitle(bb.all, main, ...) # box or axes for image do.box.etc(bb, add, dotargs) # plot ribbon image containing the range of image values rib.npixel <- length(ribbonvalues) + 1 switch(ribside, left=, right={ # vertical ribbon rib.xcoords <- bb.rib$xrange rib.ycoords <- seq(from=bb.rib$yrange[1], to=bb.rib$yrange[2], length.out=rib.npixel) rib.z <- matrix(ribbonvalues, ncol=1) rib.useRaster <- useRaster }, top=, bottom={ # horizontal ribbon rib.ycoords <- bb.rib$yrange rib.xcoords <- seq(from=bb.rib$xrange[1], to=bb.rib$xrange[2], length.out=rib.npixel) rib.z <- matrix(ribbonvalues, nrow=1) # bug workaround rib.useRaster <- FALSE }) image.doit(imagedata=list(x=rib.xcoords, y=rib.ycoords, z=t(rib.z)), W=bb.rib, workaround=workaround, list(add=TRUE, show.all=show.all), ribargs, list(useRaster=rib.useRaster), list(main="", sub=""), dotargs, colourinfo) # box around ribbon? resol <- resolve.defaults(ribargs, dotargs) if(!identical(resol$box, FALSE)) plot(as.owin(bb.rib), add=TRUE) # scale axis for ribbon image ribaxis <- !(identical(resol$axes, FALSE) || identical(resol$ann, FALSE)) if(ribaxis) { ribaxis.iside <- rib.iside ## check for user-supplied xlim, ylim with reverse order ll <- resolve.defaults(ribargs, dotargs, list(xlim=NULL, ylim=NULL)) xlimflip <- is.numeric(ll$xlim) && (diff(ll$xlim) < 0) ylimflip <- is.numeric(ll$ylim) && (diff(ll$ylim) < 0) if(xlimflip) ribaxis.iside <- c(1, 4, 3, 2)[ribaxis.iside] if(ylimflip) ribaxis.iside <- c(3, 2, 1, 4)[ribaxis.iside] ## axisargs <- list(side=ribaxis.iside, labels=ribbonlabels) switch(ribside, right={ scal <- diff(bb.rib$yrange)/diff(ribbonrange) at <- bb.rib$yrange[1] + scal * (ribbonticks - ribbonrange[1]) axisargs <- append(axisargs, list(at=at)) posargs <- list(pos=bb.rib$xrange[2], yaxp=c(bb.rib$yrange, length(ribbonticks))) }, left={ scal <- diff(bb.rib$yrange)/diff(ribbonrange) at <- bb.rib$yrange[1] + scal * (ribbonticks - ribbonrange[1]) axisargs <- append(axisargs, list(at=at)) posargs <- list(pos=bb.rib$xrange[1], yaxp=c(bb.rib$yrange, length(ribbonticks))) }, top={ scal <- diff(bb.rib$xrange)/diff(ribbonrange) at <- bb.rib$xrange[1] + scal * (ribbonticks - ribbonrange[1]) axisargs <- append(axisargs, list(at=at)) posargs <- list(pos=bb.rib$yrange[2], xaxp=c(bb.rib$xrange, length(ribbonticks))) }, bottom={ scal <- diff(bb.rib$xrange)/diff(ribbonrange) at <- bb.rib$xrange[1] + scal * (ribbonticks - ribbonrange[1]) axisargs <- append(axisargs, list(at=at)) posargs <- list(pos=bb.rib$yrange[1], xaxp=c(bb.rib$xrange, length(ribbonticks))) }) do.call.plotfun(graphics::axis, resolve.defaults(ribargs, axisargs, dotargs, posargs), extrargs=graphicsPars("axis")) } # return(invisible(output.colmap)) } PlotIm }) invokeColourmapRule <- function(colfun, x, ..., zlim=NULL, colargs=list()) { ## utility for handling special functions that generate colour maps ## either ## function(... range) -> colourmap ## function(... inputs) -> colourmap stopifnot(is.im(x)) stopifnot(is.function(colfun)) colargnames <- names(formals(colfun)) ## Convert it to a 'colourmap' colmap <- NULL xtype <- x$type if(xtype %in% c("real", "integer") && "range" %in% colargnames) { ## function(range) -> colourmap vrange <- range(range(x, finite=TRUE), zlim) cvals <- try(do.call.matched(colfun, append(list(range=vrange), colargs)), silent=TRUE) if(!inherits(cvals, "try-error")) { colmap <- if(inherits(cvals, "colourmap")) cvals else if(is.character(cvals)) colourmap(cvals, range=vrange) else NULL } } else if(xtype != "real" && "inputs" %in% colargnames) { ## function(inputs) -> colourmap vpossible <- switch(xtype, logical = c(FALSE, TRUE), factor = levels(x), unique(as.matrix(x))) if(!is.null(vpossible) && length(vpossible) < 256) { cvals <- try(do.call.matched(colfun, append(list(inputs=vpossible), colargs)), silent=TRUE) if(!inherits(cvals, "try-error")) { colmap <- if(inherits(cvals, "colourmap")) cvals else if(is.character(cvals)) colourmap(cvals, inputs=vpossible) else NULL } } } return(colmap) } ######################################################################## image.im <- plot.im ###################################################################### contour.im <- function (x, ..., main, axes=FALSE, add=FALSE, col=par("fg"), clipwin=NULL, show.all=!add, do.plot=TRUE) { defaultmain <- deparse(substitute(x)) dotargs <- list(...) bb <- Frame(x) ## return value result <- bb attr(result, "bbox") <- bb if(!do.plot) return(result) ## main title sop <- spatstat.options("par.contour") if(missing(main)) main <- resolve.1.default(list(main=defaultmain), sop) pt <- prepareTitle(main) ## plotting parameters if(missing(add)) { force(add) ## use default in formal arguments, unless overridden add <- resolve.1.default(list(add=add), sop) } if(missing(axes)) { force(axes) axes <- resolve.1.default(list(axes=axes), sop) } axes <- axes && !add col0 <- if(inherits(col, "colourmap")) par("fg") else col ## clip to subset if(!is.null(clipwin)) x <- x[clipwin, drop=FALSE] #' start plotting if(!add) { ## new plot - establish coordinate system if(axes && show.all) { #' standard plot initialisation in base graphics do.call.plotfun(plot.default, resolve.defaults( list(x = range(x$xcol), y = range(x$yrow), type = "n"), list(...), list(asp = 1, xlab = "x", ylab = "y", col = col0, main = main))) } else { #' plot invisible bounding box do.call.plotfun(plot.owin, resolve.defaults(list(x=bb, type="n", main=pt$blank), dotargs), extrargs=graphicsPars("owin")) } } if(show.all && !axes) { ## plot title centred over contour region do.call.plotfun(plot.owin, resolve.defaults(list(x=bb, main=main, add=TRUE, show.all=TRUE), dotargs, list(col.main=col0)), extrargs=graphicsPars("owin")) } #' plot contour lines if(!inherits(col, "colourmap")) { do.call.plotfun(contour.default, resolve.defaults(list(x=x$xcol, y=x$yrow, z=t(x$v)), list(add=TRUE, col=col), list(...))) } else { clin <- do.call.matched(contourLines, append(list(x=x$xcol, y=x$yrow, z=t(x$v)), list(...))) linpar <- graphicsPars("lines") for(i in seq_along(clin)) { lini <- clin[[i]] levi <- lini$level coli <- col(levi) argi <- resolve.defaults(lini[c("x", "y")], list(...), list(col=coli)) do.call.matched(lines.default, argi, extrargs=linpar) } } return(invisible(result)) } spatstat/R/Gest.R0000755000176200001440000001004513115271075013350 0ustar liggesusers# # Gest.S # # Compute estimates of nearest neighbour distance distribution function G # # $Revision: 4.31 $ $Date: 2015/10/21 09:06:57 $ # ################################################################################ # "Gest" <- "nearest.neighbour" <- function(X, r=NULL, breaks=NULL, ..., correction=c("rs", "km", "han"), domain=NULL) { verifyclass(X, "ppp") if(!is.null(domain)) stopifnot(is.subset.owin(domain, Window(X))) ## W <- X$window npts <- npoints(X) lambda <- npts/area(W) ## determine r values rmaxdefault <- rmax.rule("G", W, lambda) breaks <- handle.r.b.args(r, breaks, W, rmaxdefault=rmaxdefault) rvals <- breaks$r rmax <- breaks$max zeroes <- numeric(length(rvals)) ## choose correction(s) # correction.given <- !missing(correction) && !is.null(correction) if(is.null(correction)) { correction <- c("rs", "km", "han") } else correction <- pickoption("correction", correction, c(none="none", border="rs", rs="rs", KM="km", km="km", Kaplan="km", han="han", Hanisch="han", cs="han", ChiuStoyan="han", best="km"), multi=TRUE) ## compute nearest neighbour distances nnd <- nndist(X$x, X$y) ## distance to boundary bdry <- bdist.points(X) ## restrict to subset ? if(!is.null(domain)) { ok <- inside.owin(X, w=domain) nnd <- nnd[ok] bdry <- bdry[ok] } ## observations o <- pmin.int(nnd,bdry) ## censoring indicators d <- (nnd <= bdry) ## initialise fv object df <- data.frame(r=rvals, theo=1-exp(-lambda * pi * rvals^2)) Z <- fv(df, "r", substitute(G(r), NULL), "theo", . ~ r, c(0,rmax), c("r", "%s[pois](r)"), c("distance argument r", "theoretical Poisson %s"), fname="G") if("none" %in% correction) { ## UNCORRECTED e.d.f. of nearest neighbour distances: use with care if(npts <= 1) edf <- zeroes else { hh <- hist(nnd[nnd <= rmax],breaks=breaks$val,plot=FALSE)$counts edf <- cumsum(hh)/length(nnd) } Z <- bind.fv(Z, data.frame(raw=edf), "hat(%s)[raw](r)", "uncorrected estimate of %s", "raw") } if("han" %in% correction) { if(npts <= 1) G <- zeroes else { ## uncensored distances x <- nnd[d] ## weights a <- eroded.areas(W, rvals, subset=domain) ## calculate Hanisch estimator h <- hist(x[x <= rmax], breaks=breaks$val, plot=FALSE)$counts G <- cumsum(h/a) G <- G/max(G[is.finite(G)]) } ## add to fv object Z <- bind.fv(Z, data.frame(han=G), "hat(%s)[han](r)", "Hanisch estimate of %s", "han") ## modify recommended plot range attr(Z, "alim") <- range(rvals[G <= 0.9]) } if(any(correction %in% c("rs", "km"))) { ## calculate Kaplan-Meier and border correction (Reduced Sample) estimators if(npts == 0) result <- data.frame(rs=zeroes, km=zeroes, hazard=zeroes, theohaz=zeroes) else { result <- km.rs(o, bdry, d, breaks) result$theohaz <- 2 * pi * lambda * rvals result <- as.data.frame(result[c("rs", "km", "hazard", "theohaz")]) } ## add to fv object Z <- bind.fv(Z, result, c("hat(%s)[bord](r)", "hat(%s)[km](r)", "hat(h)[km](r)", "h[pois](r)"), c("border corrected estimate of %s", "Kaplan-Meier estimate of %s", "Kaplan-Meier estimate of hazard function h(r)", "theoretical Poisson hazard function h(r)"), "km") ## modify recommended plot range attr(Z, "alim") <- range(rvals[result$km <= 0.9]) } nama <- names(Z) fvnames(Z, ".") <- rev(setdiff(nama, c("r", "hazard", "theohaz"))) unitname(Z) <- unitname(X) return(Z) } spatstat/R/nncorr.R0000755000176200001440000000772713115271120013753 0ustar liggesusers# # nncorr.R # # $Revision: 1.11 $ $Date: 2015/10/21 09:06:57 $ # nnmean <- function(X, k=1) { stopifnot(is.ppp(X) && is.marked(X)) if(k %% 1 != 0 || length(k) != 1 || k <= 0) stop("k should be a single integer greater than 0", call.=FALSE) if(k >= npoints(X)) stop("Not enough points to compute k-th nearest neighbours") m <- numeric.columns(marks(X), logical=TRUE, others="na") nnid <- nnwhich(X, k=k) ok <- (nndist(X, k=k) <= bdist.points(X)) if(!any(ok, na.rm=TRUE)) stop("Insufficient data") numer <- unlist(lapply(as.data.frame(m[nnid[ok], ]), mean, na.rm=TRUE)) denom <- unlist(lapply(as.data.frame(m), mean, na.rm=TRUE)) ans <- rbind(unnormalised=numer, normalised =numer/denom) if(ncol(ans) == 1) ans <- ans[,1,drop=TRUE] return(ans) } nnvario <- local({ nnvario <- function(X, k=1) { stopifnot(is.ppp(X) && is.marked(X)) m <- numeric.columns(marks(X), logical=TRUE, others="na") ans <- nncorr(X %mark% m, sqdif, k=k, denominator=diag(var(m))) return(ans) } sqdif <- function(m1,m2) { ((m1-m2)^2)/2 } nnvario }) nncorr <- function(X, f = function(m1,m2) { m1 * m2}, k=1, ..., use = "all.obs", method = c("pearson", "kendall", "spearman"), denominator=NULL) { stopifnot(is.ppp(X) && is.marked(X)) if(k %% 1 != 0 || length(k) != 1 || k <= 0) stop("k should be a single integer greater than 0", call.=FALSE) if(k >= npoints(X)) stop("Not enough points to compute k-th nearest neighbours") m <- as.data.frame(marks(X)) nv <- ncol(m) if(nv == 1) colnames(m) <- "" # if(missing(method) || is.null(method)) method <- "pearson" # if(missing(f)) f <- NULL if(!is.null(f) && !is.function(f)) { if(nv == 1) stop("f should be a function") # could be a list of functions if(!(is.list(f) && all(unlist(lapply(f, is.function))))) stop("f should be a function or a list of functions") if(length(f) != nv) stop("Length of list f does not match number of mark variables") } # optional denominator(s) if(!is.null(denominator) && !(length(denominator) %in% c(1, nv))) stop("Denominator has incorrect length") # multi-dimensional case if(nv > 1) { # replicate things if(is.function(f)) f <- rep.int(list(f), nv) if(length(denominator) <= 1) denominator <- rep.int(list(denominator), nv) # result <- matrix(NA, nrow=3, ncol=nv) outnames <- c("unnormalised", "normalised", "correlation") dimnames(result) <- list(outnames, colnames(m)) for(j in 1:nv) { mj <- m[,j, drop=FALSE] denj <- denominator[[j]] nncj <- nncorr(X %mark% mj, f=f[[j]], k=k, use=use, method=method, denominator=denj) kj <- length(nncj) result[1:kj,j] <- nncj } if(all(is.na(result[3, ]))) result <- result[1:2, ] return(result) } # one-dimensional m <- m[,1,drop=TRUE] # select 'f' appropriately for X chk <- check.testfun(f, X=X) f <- chk$f ftype <- chk$ftype # denominator Efmm <- if(!is.null(denominator)) denominator else switch(ftype, mul={ mean(m)^2 }, equ={ sum(table(m)^2)/length(m)^2 }, general={ mean(outer(m, m, f, ...)) }) # border method nn <- nnwhich(X, k=k) ok <- (nndist(X, k=k) <= bdist.points(X)) if(!any(ok)) stop("Insufficient data") mY <- m[nn[ok]] mX <- m[ok] Efmk <- switch(ftype, mul = { mean(mX * mY, ...) }, equ = { mean(mX == mY, ...) }, general = { mean(f(mX, mY, ...)) }) # answer <- c(unnormalised=Efmk, normalised=Efmk/Efmm) if(ftype == "mul") { classic <- cor(mX, mY, use=use, method=method) answer <- c(answer, correlation=classic) } return(answer) } spatstat/R/reach.R0000755000176200001440000000162713115271120013525 0ustar liggesusers# # reach.R # # $Revision: 1.8 $ $Date: 2007/10/24 09:41:15 $ # reach <- function(x, ...) { UseMethod("reach") } reach.interact <- function(x, ...) { verifyclass(x, "interact") irange <- x$irange if(is.null(irange)) return(Inf) if(!is.function(irange)) stop("Internal error - x$irange is not a function") ir <- irange(x) if(is.na(ir)) ir <- Inf return(ir) } reach.ppm <- function(x, ..., epsilon=0) { verifyclass(x, "ppm") # Poisson case if(is.poisson.ppm(x)) return(0) # extract info inte <- x$interaction coeffs <- coef(x) if(newstyle.coeff.handling(inte)) { # extract only interaction coefficients Vnames <- x$internal$Vnames coeffs <- coeffs[Vnames] } # apply 'irange' function irange <- inte$irange if(is.null(irange)) return(Inf) ir <- irange(inte, coeffs, epsilon=epsilon) if(is.na(ir)) ir <- Inf return(ir) } spatstat/R/laslett.R0000644000176200001440000002671713115225157014130 0ustar liggesusers#' Calculating Laslett's transform #' Original by Kassel Hingee #' Adapted by Adrian Baddeley #' Copyright (C) 2016 Kassel Hingee and Adrian Baddeley # $Revision: 1.8 $ $Date: 2017/02/07 08:12:05 $ laslett <- function(X, ..., verbose=FALSE, plotit=TRUE, discretise=FALSE, type = c("lower", "upper", "left", "right")){ #' validate X and convert to a logical matrix type <- match.arg(type) oldX <- X if(is.im(X)) { X <- solutionset(X != 0) } else if(!is.owin(X)) stop("X should be an image or a window", call.=FALSE) if(type != "lower") { nrot <- match(type, c("right", "upper", "left")) theta <- nrot * pi/2 X <- rotate(X, angle=-theta) } if(!discretise && (is.polygonal(X) || is.rectangle(X))) { result <- polyLaslett(X, ..., oldX=oldX, verbose=verbose, plotit=FALSE) } else { result <- maskLaslett(X, ..., oldX=oldX, verbose=verbose, plotit=FALSE) } if(type != "lower") { #' rotate back prods <- c("TanOld", "TanNew", "Rect") result[prods] <- lapply(result[prods], rotate, angle=theta) } if(plotit) plot(result, ...) result$type <- type return(result) } maskLaslett <- local({ sumtoright <- function(x) { rev(cumsum(rev(x))) - x } maskLaslett <- function(X, ..., eps=NULL, dimyx=NULL, xy=NULL, oldX=X, verbose=FALSE, plotit=TRUE) { if(is.null(oldX)) oldX <- X X <- as.mask(X, eps=eps, dimyx=dimyx, xy=xy) unitX <- unitname(X) if(is.empty(X)) stop("Empty window!") M <- as.matrix(X) #' ....... Compute transformed set ................... #' Total width of transformed set on each row TotFalse <- rowSums(!M) ## compute transformed set Laz <- (col(M) <= TotFalse[row(M)]) Laz <- owin(mask=Laz, xrange=X$xrange, yrange=X$yrange, unitname=unitX) #' Largest sub-rectangle of transformed set width <- min(TotFalse) * X$xstep Rect <- owin(X$xrange[1L] + c(0, width), X$yrange, unitname=unitX) #' Along each horizontal line (row), #' compute a running count of FALSE pixels. #' This is the mapping for the set transform #' (the value at any pixel gives the new column number #' for the transformed pixel) CumulFalse <- t(apply(!M, 1L, cumsum)) #' discard one column for consistency with other matrices below CumulFalse <- CumulFalse[,-1L,drop=FALSE] #' ....... Find lower tangent points ................. #' compute discrete gradient in x direction G <- t(apply(M, 1, diff)) #' detect entries, exits, changes Exit <- (G == -1) Enter <- (G == 1) Change <- Exit | Enter #' form a running total of the number of pixels inside X #' to the **right** of the current pixel FutureInside <- t(apply(M, 1, sumtoright))[,-1L,drop=FALSE] #' find locations of changes loc <- which(Change, arr.ind=TRUE) #' don't consider entries/exits in the bottom row ok <- (loc[,"row"] > 1) loc <- loc[ok, , drop=FALSE] #' corresponding locations on horizontal line below current line below <- cbind(loc[,"row"]-1L, loc[,"col"]) #' look up data at these locations df <- data.frame(row=loc[,"row"], col=loc[,"col"], newcol=CumulFalse[loc], Exit=Exit[loc], Enter=Enter[loc], InsideBelow=M[below], FutureInsideBelow=FutureInside[below]) #' identify candidates for tangents df$IsCandidate <- with(df, Enter & !InsideBelow & (newcol < TotFalse[row])) #' collect data for each horizontal line (row) #' then sort by increasing x (column) within each line. oo <- with(df, order(row, col)) df <- df[oo, , drop=FALSE] #' divide data into one piece for each hztal line g <- split(df, df$row) #' Initialise empty list of tangent points tangents <- data.frame(row=integer(0), col=integer(0), newcol=integer(0)) #' process each hztal line for(p in g) { tangents <- with(p, { candidates <- which(IsCandidate) # indices are row numbers in 'p' if(verbose) print(p) exits <- which(Exit) for(i in candidates) { if(verbose) cat(paste("candidate", i, "\n")) if(any(found <- (exits > i))) { j <- exits[min(which(found))] if(verbose) cat(paste("next exit:", j, "\n")) #' check no pixels inside X in row below between i and j if(FutureInsideBelow[i] == FutureInsideBelow[j]) { if(verbose) cat(paste("Tangent (1) at row=", row[i], "col=", col[i], "\n")) tangents <- rbind(tangents, data.frame(row=row[i], col=col[i], newcol=newcol[i])) } } else { #' no exits on this row if(verbose) cat("no subsequent exit\n") if(FutureInsideBelow[i] == 0) { if(verbose) cat(paste("Tangent (2) at row=", row[i], "col=", col[i], "\n")) tangents <- rbind(tangents, data.frame(row=row[i], col=col[i], newcol=newcol[i])) } } } if(verbose) cat("====\n") tangents }) } tangents$oldx <- X$xcol[tangents$col] tangents$newx <- X$xcol[tangents$newcol] tangents$y <- X$yrow[tangents$row] TanOld <- with(tangents, ppp(oldx, y, window=Frame(X), unitname=unitX)) TanNew <- with(tangents, ppp(newx, y, window=Laz), unitname=unitX) result <- list(oldX=oldX, TanOld=TanOld, TanNew=TanNew, Rect=Rect, df=tangents) class(result) <- c("laslett", class(result)) if(plotit) plot(result, ...) return(result) } maskLaslett }) print.laslett <- function(x, ...) { cat("Laslett Transform\n") cat("\nOriginal object:\n") print(x$oldX) cat("\nTransformed set:\n") W <- Window(x$TanNew) print(W) unitinfo <- summary(unitname(W)) cat("\nTransformed area:", area.owin(W), "square", unitinfo$plural, unitinfo$explain, fill=TRUE) cat("\n") type <- x$type %orifnull% "lower" cat(npoints(x$TanNew), type, "tangent points found.", fill=TRUE) return(invisible(NULL)) } plot.laslett <- function(x, ..., Xpars=list(box=TRUE, col="grey"), pointpars=list(pch=3, cols="blue"), rectpars=list(lty=3, border="green")) { Display <- with(x, solist(Original= layered(oldX, TanOld, plotargs=list(Xpars, pointpars)), Transformed= layered(TanNew, Rect, plotargs=list(pointpars, rectpars)))) #' ignore arguments intended for as.mask argh <- list(...) if(any(bad <- names(argh) %in% c("eps", "dimyx", "xy"))) argh <- argh[!bad] do.call(plot, resolve.defaults(list(x=Display), argh, list(main="", mar.panel=0, hsep=1, equal.scales=TRUE))) return(invisible(NULL)) } polyLaslett <- function(X, ..., oldX=X, verbose=FALSE, plotit=TRUE) { X <- as.polygonal(X) if(is.empty(X)) stop("Empty window!") unitX <- unitname(X) # expand frame slightly B <- Frame(X) B <- grow.rectangle(B, max(sidelengths(B))/8) x0 <- B$xrange[1L] x1 <- B$xrange[2L] # extract vertices v <- vertices(X) nv <- length(v$x) # .......... compute transformed set ..................... # make horizontal segments from each vertex to sides of box left <- with(v, psp(rep(x0,nv), y, x, y, window=B, marks=1:nv, check=FALSE)) right <- with(v, psp(x, y, rep(x1,nv), y, window=B, marks=1:nv, check=FALSE)) # intersect each horizontal segment with the window if(verbose) cat("Processing", nv, "polygon vertices... ") clipleft <- clip.psp(left, X) clipright <- clip.psp(right, X) if(verbose) cat("Done.\n") # calculate lengths of clipped segments, and group by vertex. # marks indicate which hztal segment was the parent of each piece. lenleft <- tapply(lengths.psp(clipleft), factor(marks(clipleft), levels=1:nv), sum) lenright <- tapply(lengths.psp(clipright), factor(marks(clipright), levels=1:nv), sum) lenleft[is.na(lenleft)] <- 0 lenright[is.na(lenright)] <- 0 emptylenleft <- lengths.psp(left) - lenleft emptylenright <- lengths.psp(right) - lenright # The transformed polygon isrightmost <- (lenright == 0) yright <- v$y[isrightmost] xright <- x0 + (emptylenleft+emptylenright)[isrightmost] minxright <- min(xright) # right margin of largest rectangle ord <- order(yright) Ty <- yright[ord] Tx <- xright[ord] nT <- length(Ty) if(Tx[nT] > x0) { Ty <- c(Ty, Ty[nT]) Tx <- c(Tx, x0) } if(Tx[1L] > x0) { Ty <- c(Ty[1L], Ty) Tx <- c(x0, Tx) } TX <- owin(B$xrange, B$yrange, poly=list(x=Tx, y=Ty), check=FALSE) TX <- TX[Frame(X)] # .......... identify lower tangents ..................... V <- as.ppp(v, W=Frame(X), unitname=unitX) is.candidate <- is.tangent <- logical(nv) # apply simple criteria for ruling in or out Plist <- X$bdry cumnv <- 0 for(i in seq_along(Plist)) { P <- Plist[[i]] xx <- P$x yy <- P$y nn <- length(xx) # xnext <- c(xx[-1L], xx[1L]) ynext <- c(yy[-1L], yy[1L]) # xprev <- c(xx[nn], xx[-nn]) yprev <- c(yy[nn], yy[-nn]) is.candidate[cumnv + seq_len(nn)] <- if(!is.hole.xypolygon(P)) { (yprev > yy & ynext >= yy) } else { (yprev >= yy & ynext > yy) } cumnv <- cumnv + nn } ## was.candidate <- is.candidate #' reject candidates lying too close to boundary tooclose <- (bdist.points(V[is.candidate]) < diameter(Frame(V))/1000) is.candidate[is.candidate][tooclose] <- FALSE #' evaluate candidate points #' make tiny boxes around vertex candidates <- which(is.candidate) nc <- length(candidates) nnd <- nndist(V) if(verbose) { cat(paste("Processing", nc, "tangent candidates ... ")) pstate <- list() } tiny <- .Machine$double.eps for(j in 1:nc) { i <- candidates[j] eps <- nnd[i]/16 xi <- v$x[i] yi <- v$y[i] Below <- owin(xi + c(-eps,eps), yi + c(-eps, 0)) # Above <- owin(xi + c(-eps, eps), yi + c(0, eps)) UpLeft <- owin(xi + c(-eps, 0), yi + c(0, eps)) is.tangent[i] <- (overlap.owin(X, Below) <= tiny) && (overlap.owin(X, UpLeft) < eps^2) if(verbose) pstate <- progressreport(j, nc, state=pstate) } if(verbose) cat(paste("Found", sum(is.tangent), "tangents\n")) TanOld <- V[is.tangent] ynew <- TanOld$y xnew <- x0 + emptylenleft[is.tangent] TanNew <- ppp(xnew, ynew, window=TX, check=FALSE, unitname=unitX) # maximal rectangle Rect <- owin(c(X$xrange[1L], minxright), X$yrange, unitname=unitX) # df <- data.frame(xold=TanOld$x, xnew=TanNew$x, y=TanNew$y) # result <- list(oldX=oldX, TanOld=TanOld, TanNew=TanNew, Rect=Rect, df=df) class(result) <- c("laslett", class(result)) if(plotit) plot(result, ...) return(result) } spatstat/R/simulate.detPPF.R0000644000176200001440000003311113115225157015406 0ustar liggesusers## This file contains functions to simulate DPP models. ## Two simulation functions are visible: ## - simulate.detpointprocfamily (most useful) ## - rdpp (more generic workhorse function -- actually the real workhorse is the locally defined rdppp) ## ## Furthermore the auxilliary function dppeigen is defined here. rdpp <- local({ ## Generates an empty point pattern emptyppx <- function(W, simplify = TRUE){ W <- as.boxx(W) r <- W$ranges d <- ncol(r) if(simplify){ if(d==2) return(ppp(numeric(0), numeric(0), window=as.owin(W))) if(d==3) return(pp3(numeric(0), numeric(0), numeric(0), W)) } rslt <- replicate(d, numeric(0), simplify=FALSE) names(rslt) <- paste("x",1:d,sep="") rslt <- as.data.frame(rslt) return(ppx(rslt, domain = W, coord.type= rep("spatial", d))) } rdppp <- function(index, basis = "fourierbasis", window = boxx(rep(list(0:1), ncol(index))), reject_max = 1e4, progress = 0, debug = FALSE, ...){ ## Check arguments: if (!(is.logical(debug))) stop(paste(sQuote("debug"), "must be TRUE or FALSE")) if (!is.numeric(reject_max)||reject_max<=1) stop(paste(sQuote("reject_max"), "must be a numeric greater than 1")) if (!is.numeric(progress)||reject_max<1) stop(paste(sQuote("progress"), "must be a numeric greater than or equal to 1")) index <- as.matrix(index) d <- ncol(index) window <- as.boxx(window) ranges <- window$ranges if(ncol(ranges)!=d) stop("The dimension differs from the number of columns in index") basis <- get(basis) if (!(is.function(basis))) stop(paste(sQuote("basis"), "must be a function")) tmp <- basis(ranges[1,,drop=FALSE], index, window) if (!(is.numeric(tmp) || is.complex(tmp))) stop(paste("Output of", sQuote("basis"), "must be numeric or complex")) ## Number of points to simulate: n <- nrow(index) ## Return empty point pattern if n=0: empty <- emptyppx(window) if (n==0) return(empty) ## Initialize debug info: if(debug){ debugList = replicate(n, list(old=empty, accepted=empty, rejected=empty, index=index), simplify=FALSE) } # Matrix of coordinates: x <- matrix(0,n,d) colnames(x) <- paste("x",1:d,sep="") x[1,] <- runif(d,as.numeric(ranges[1,]),as.numeric(ranges[2,])) # Debug info: if(debug){ debugList[[1]]=list(old=empty, accepted=ppx(x[1,,drop=FALSE],window,simplify=TRUE), rejected=empty, index=index, estar=rep(1/n,n)) } if (n==1) return(ppx(x[1,,drop=FALSE], window, simplify = TRUE)) # Initialize matrices for Gram-Schmidt vectors and conj. trans.: e <- matrix(as.complex(0),n,n-1) estar <- matrix(as.complex(0),n-1,n) # First vector of basis-functions evaluated at first point: v <- basis(x[1,,drop=FALSE],index,window) ## Record normalized version in the Gram-Schmidt matrices: e[,1] <- v/sqrt(sum(abs(v)^2)) estar[1,] <- Conj(e[,1]) if(progress>0) cat(paste("Simulating", n, "points:\n")) ## Main for loop over number of points: for(i in (n-1):1){ ## Print progress: if(progress>0) progressreport(n-i, n, every=progress) ## Aux. variable to count number of rejection steps: tries <- 0 # Debug info: if(debug){ rejected <- matrix(NA,reject_max,d) } ## Non-zero vectors of estar matrix: estar2 <- estar[1:(n-i),] repeat{ ## Proposed point: newx <- matrix(runif(d,as.numeric(ranges[1,]),as.numeric(ranges[2,])),ncol=d) ## Basis functions eval. at proposed point: v <- basis(newx, index, window) ## Vector of projection weights (has length n-i) wei <- estar2%*%v ## Accept probability: tmp <- prod(ranges[2,]-ranges[1,])/n*(sum(abs(v)^2)-sum(abs(wei)^2)) ## If proposal is accepted the loop is broken: if(runif(1)reject_max){ stop(paste("Rejection sampling failed reject_max =",reject_max,"times in a row")) } ## Increase the count of rejection steps: tries <- tries+1 # Debug info: if(debug){ rejected[tries,] <- newx } } ## END OF REJECTION LOOP # Record the accepted point: x[n-i+1,] <- newx # Debug info: if(debug){ if(tries==0){ rej <- empty } else{ rej <- ppx(rejected[1:tries,,drop=FALSE],window, simplify=TRUE) } debugList[[n-i+1]] = list( old=ppx(x[1:(n-i),,drop=FALSE],window, simplify=TRUE), accepted=ppx(newx,window,simplify=TRUE), rejected=rej, index=index, estar = estar2) } ## If it is the last point exit the main loop: if(i==1){break} ## Calculate orthogonal vector for Gram-Schmidt procedure: w <- v - rowSums(matrix(wei,n,n-i,byrow=TRUE)*e[,1:(n-i)]) ## Record normalized version in the Gram-Schmidt matrices: e[,n-i+1]=w/sqrt(sum(abs(w)^2)) estar[n-i+1,] <- Conj(e[,n-i+1]) } ## END OF MAIN FOR LOOP # Save points as point pattern: X <- ppx(x, window, simplify = TRUE) # Debug info: if(debug){ attr(X, "dpp") <- list(debug=debugList) } if(progress>0) cat(" Done!\n") return(X) } rdpp <- function(eig, index, basis = "fourierbasis", window = boxx(rep(list(0:1), ncol(index))), reject_max = 1e4, progress = 0, debug = FALSE, ...){ window2d <- NULL if (is.owin(window)) window2d <- window sampleindex <- as.matrix(index[rbinom(nrow(index), 1, eig)==1, ]) X <- rdppp(sampleindex, basis=basis, window=window, reject_max=reject_max, progress=progress, debug=debug, ...) if(!is.null(window2d)) X <- X[window2d] return(X) } rdpp } ) simulate.dppm <- simulate.detpointprocfamily <- function(object, nsim = 1, seed = NULL, ..., W = NULL, trunc = .99, correction = "periodic", rbord = reach(object) # parallel = FALSE ){ # .... copied from simulate.lm .... if (!exists(".Random.seed", envir = .GlobalEnv, inherits = FALSE)) runif(1) if (is.null(seed)) RNGstate <- get(".Random.seed", envir = .GlobalEnv) else { R.seed <- get(".Random.seed", envir = .GlobalEnv) set.seed(seed) RNGstate <- structure(seed, kind = as.list(RNGkind())) on.exit(assign(".Random.seed", R.seed, envir = .GlobalEnv)) } # .................................. if(inherits(object, "dppm")){ if(is.null(W)) W <- Window(object$X) object <- object$fitted } if(!inherits(object, "detpointprocfamily")) stop("The model to simulate must be of class detpointprocfamily") if(length(tmp <- object$freepar)>0) stop(paste("The model to simulate must be completely specified. The following parameters are unspecified:", tmp)) if(!valid(object)) stop("The model is invalid. Please change parameter values to get a valid model") if(!is.numeric(nsim)||nsim<1) stop(paste(sQuote("nsim"), "must be a numeric greater than or equal to 1")) nsim <- floor(nsim) dim <- dim(object) basis <- object$basis ####### BACKDOOR TO SPHERICAL CASE ######## if(!is.null(spherefun <- object$sim_engine)){ sphereSimEngine <- get(spherefun) rslt <- sphereSimEngine(object, trunc, nsim, ...) attr(rslt, "seed") <- RNGstate return(rslt) } ########################################### # Check stationarity and window compatibility (if 'W' and 'thin' both are present) statmodel <- is.null(thin <- object$thin) if(is.null(W)){ if(!statmodel) W <- domain(thin) } Wowin <- if(is.owin(W)) W else NULL if(is.null(W)){ W <- boxx(rep(list(0:1), dim)) } else{ W <- as.boxx(W, warn.owin = FALSE) } if(!statmodel){ if(!is.subset.owin(Wowin,thin)) stop("The window of simulation is not contained in the window of the inhomogeneous intensity.") } r <- W$ranges if(dim!=ncol(r)) stop(paste("The dimension of the window:", ncol(r), "is inconsistent with the dimension of the model:", dim)) Wscale <- as.numeric(r[2,]-r[1,]) if(correction=="border"){ if(!is.numeric(rbord)||any(rbord<0)) stop(paste(sQuote("rbord"), "must be a non-negative numeric")) borderscale <- pmin((Wscale+2*rbord)/Wscale, 2) Wscale <- borderscale*Wscale } ## lambda <- intensity(object) tmp <- dppeigen(object, trunc, Wscale) trunc <- tmp$trunc prec <- tmp$prec n <- length(tmp$eig) indexlist <- replicate(nsim, {x <- as.matrix(tmp$index[rbinom(n, 1, tmp$eig)==1, ]); gc(); x}, simplify = FALSE) rm(tmp) gc() onesim <- function(i, win=NULL){ X <- rdpp(1, indexlist[[i]], basis = basis, window = boxx(rep(list(c(-.5,.5)), dim)), ...) a <- attr(X, "dpp") a <- c(a, list(prec = prec, trunc = trunc)) if(correction=="border"){ if(dim!=2) stop("Border correction only implemented for dimension 2 at the moment.") X <- X[affine.owin(as.owin(X), mat = diag(1/borderscale))] } if(is.ppp(X)){ X <- affine(X, matrix(c(Wscale[1],0,0,Wscale[2]), 2, 2), c(mean(r[,1]), mean(r[,2]))) if(!is.null(win)) X <- X[win] } else{ X <- ppx(X$data, domain = as.boxx(X$domain), coord.type = rep("spatial", dim)) X$data <- as.hyperframe(as.data.frame(X$data)*matrix(Wscale, nrow(X$data), ncol(X$data), byrow = TRUE)) X$domain$ranges <- X$domain$ranges*matrix(Wscale, 2, dim, byrow = TRUE) X <- ppx(X$data, X$domain, simplify = TRUE) } attr(X, "dpp") <- a return(X) } if(nsim==1){ rslt <- onesim(1,win=Wowin) if(!statmodel) rslt <- rthin(rslt, P=thin) } else{ ######## Old code for parallel simulation ######### # if(is.logical(parallel)){ # cl.cores <- if(parallel) NULL else 1 # } else{ # cl.cores <- parallel # } # rslt <- detlapply(1:nsim, onesim, cl.cores=cl.cores, win=Wowin) ################################################### rslt <- lapply(1:nsim, onesim, win=Wowin) if(!statmodel) rslt <- lapply(rslt, rthin, P=thin) names(rslt) <- paste("Simulation", 1:nsim) rslt <- as.solist(rslt) } attr(rslt, "seed") <- RNGstate return(rslt) } dppeigen <- function(model, trunc, Wscale, stationary = FALSE){ dim <- dim(model) if(stationary&&dim!=2) stop("Stationarity can only be exploited in dimension 2 at the moment.") ## Calculate expected number of points if the intensity is a parameter expnum <- NULL lambdaname <- model$intensity if(!is.null(lambdaname)) expnum <- getElement(model$fixedpar, lambdaname)*prod(Wscale) ## Get the maximal truncation in each dimension maxtrunc <- spatstat.options("dpp.maxmatrix")^(1/dim) ## Extract spectral density specden <- dppspecden(model) truncrange <- dppspecdenrange(model)*max(Wscale) if(trunc>=1){ ## Integer truncation fixed by user. if(stationary){ ## Coordinates on axes: index1a <- c(rep(0,trunc),1:trunc) index2a <- c(1:trunc,rep(0,trunc)) ## Coordinates of ordinary points: index1 <- rep(1:trunc,trunc) index2 <- rep(1:trunc,each=trunc) ## Spectral densities: eigo <- specden(0) eiga <- specden(sqrt((index1a/Wscale[1])^2+(index2a/Wscale[2])^2)) eig <- specden(sqrt((index1/Wscale[1])^2+(index2/Wscale[2])^2)) prec <- (eigo+2*sum(eiga)+4*sum(eig))/expnum } else{ trunc <- floor(trunc) index <- do.call(expand.grid, replicate(dim, seq(-trunc,trunc), simplify=FALSE)) indexscaled <- index*matrix(1/Wscale, nrow(index), ncol(index), byrow = TRUE) if(model$isotropic){ eig <- specden(sqrt(rowSums(indexscaled^2))) } else{ eig <- specden(indexscaled) } prec <- sum(eig)/expnum } } else{ ## Integer truncation calculated from user-specified precision. if(is.null(expnum)) stop("Cannot calculate truncation adaptively in a model without intensity parameter. Please specify trunc directly as a positive integer.") prec0 <- trunc trunc <- 1 prec <- 0 ## cat("truncation is being calculated adaptively. Current truncation:\n") while(prec<=prec0 & (2*trunc)<=maxtrunc & trunc<=truncrange){ trunc <- 2*trunc if(stationary){ ## Coordinates on axes: index1a <- c(rep(0,trunc),1:trunc) index2a <- c(1:trunc,rep(0,trunc)) ## Coordinates of ordinary points: index1 <- rep(1:trunc,trunc) index2 <- rep(1:trunc,each=trunc) ## Spectral densities: eigo <- specden(0) eiga <- specden(sqrt((index1a/Wscale[1])^2+(index2a/Wscale[2])^2)) eig <- specden(sqrt((index1/Wscale[1])^2+(index2/Wscale[2])^2)) prec <- (eigo+2*sum(eiga)+4*sum(eig))/expnum } else{ index <- do.call(expand.grid, replicate(dim, seq(-trunc,trunc), simplify=FALSE)) indexscaled <- index*matrix(1/Wscale, nrow(index), ncol(index), byrow = TRUE) if(model$isotropic){ eig <- specden(sqrt(rowSums(indexscaled^2))) } else{ eig <- specden(indexscaled) } prec <- sum(eig)/expnum } } ## cat("\n") if(prec 0) lev <- lev[retain] switch(splittype, factor = { # delete levels that don't occur fsplit <- factor(fsplit, levels=lev) }, stop("Internal error: wrong format for fsplit")) } # split the data out <- list() for(l in lev) out[[paste(l)]] <- x[!is.na(f) & (f == l)] if(un) out <- lapply(out, unmark) class(out) <- c("splitppx", "anylist", class(out)) attr(out, "fsplit") <- fsplit return(out) } print.splitppx <- function(x, ...) { f <- attr(x, "fsplit") what <- if(is.factor(f)) "factor" else "unknown data" cat(paste("Multidimensional point pattern split by", what, "\n")) nam <- names(x) for(i in seq_along(x)) { cat(paste("\n", nam[i], ":\n", sep="")) print(x[[i]]) } return(invisible(NULL)) } summary.splitppx <- function(object, ...) { x <- lapply(object, summary, ...) class(x) <- "summary.splitppx" x } print.summary.splitppx <- function(x, ...) { class(x) <- "anylist" print(x) invisible(NULL) } "[.splitppx" <- function(x, ...) { f <- attr(x, "fsplit") # invoke list method on x class(x) <- "list" y <- x[...] # then make it a 'splitppx' object too class(y) <- c("splitppx", class(y)) if(is.factor(f)) { lev <- levels(f) sublev <- lev[...] subf <- f[f %in% sublev] fsplit <- factor(subf, levels=lev) } else stop("Unknown splitting type") attr(y, "fsplit") <- fsplit y } "[<-.splitppx" <- function(x, ..., value) { if(!all(unlist(lapply(value, is.ppx)))) stop("replacement value must be a list of point patterns (ppx)") f <- attr(x, "fsplit") # invoke list method class(x) <- "list" x[...] <- value # then make it a 'splitppx' object too class(x) <- c("splitppx", class(x)) if(is.factor(f)) { lev <- levels(f) fsplit <- factor(rep.int(lev, unlist(lapply(x, npoints))), levels=lev) } attr(x, "fsplit") <- fsplit x } spatstat/R/clusterset.R0000644000176200001440000000432313115225157014642 0ustar liggesusers# # clusterset.R # # Allard-Fraley estimator of cluster region # # $Revision: 1.12 $ $Date: 2016/02/16 01:39:12 $ # clusterset <- function(X, what=c("marks", "domain"), ..., verbose=TRUE, fast=FALSE, exact=!fast) { stopifnot(is.ppp(X)) what <- match.arg(what, several.ok=TRUE) if(!missing(exact)) stopifnot(is.logical(exact)) if(fast && exact) stop("fast=TRUE is incompatible with exact=TRUE") # compute duplication exactly as in deldir, or the universe will explode X <- unique(unmark(X), rule="deldir", warn=TRUE) n <- npoints(X) W <- as.owin(X) # discretised Dirichlet tessellation if(verbose) cat("Computing Dirichlet tessellation...") if(fast || !exact) cellid <- as.im(nnfun(X), ...) # compute tile areas if(fast) { a <- table(factor(as.vector(as.matrix(cellid)), levels=1:n)) if(verbose) cat("done.\n") a <- a + 0.5 A <- sum(a) } else { d <- dirichlet(X) if(verbose) cat("done.\n") D <- tiles(d) suppressWarnings(id <- as.integer(names(D))) if(anyNA(id) && ("marks" %in% what)) stop("Unable to map Dirichlet tiles to data points") A <- area(W) a <- unlist(lapply(D, area)) } # determine optimal selection of tiles ntile <- length(a) o <- order(a) b <- cumsum(a[o]) m <- seq_len(ntile) logl <- -n * log(n) + m * log(m/b) + (n-m) * log((n-m)/(A-b)) mopt <- which.max(logl) picked <- o[seq_len(mopt)] ## map tiles to points if(!fast) picked <- id[picked] ## logical vector is.picked <- rep.int(FALSE, n) is.picked[picked] <- TRUE # construct result out <- list(marks=NULL, domain=NULL) if("marks" %in% what) { ## label points yesno <- factor(ifelse(is.picked, "yes", "no"), levels=c("no", "yes")) out$marks <- X %mark% yesno } if("domain" %in% what) { if(verbose) cat("Computing cluster set...") if(exact) { domain <- do.call(union.owin, unname(D[is.picked])) domain <- rebound.owin(domain, as.rectangle(W)) } else { domain <- eval.im(is.picked[cellid]) } out$domain <- domain if(verbose) cat("done.\n") } out <- if(length(what) == 1L) out[[what]] else out return(out) } spatstat/R/plot.fasp.R0000755000176200001440000001324113115271120014344 0ustar liggesusers# # plot.fasp.R # # $Revision: 1.29 $ $Date: 2016/02/11 10:17:12 $ # plot.fasp <- function(x, formule=NULL, ..., subset=NULL, title=NULL, banner=TRUE, transpose=FALSE, samex=FALSE, samey=FALSE, mar.panel=NULL, outerlabels=TRUE, cex.outerlabels=1.25, legend=FALSE) { # plot dimensions which <- x$which if(transpose) which <- t(which) nrows <- nrow(which) ncols <- ncol(which) # Determine the overall title of the plot if(banner) { if(!is.null(title)) overall <- title else if(!is.null(x$title)) overall <- x$title else { if(prod(dim(which)) > 1) overall <- "Array of diagnostic functions" else overall <- "Diagnostic function" if(is.null(x$dataname)) overall <- paste(overall,".",sep="") else overall <- paste(overall," for ",x$dataname,".",sep="") } if(length(overall) > 1) overall <- paste(overall, collapse="\n") nlines <- if(!is.character(overall)) 1 else length(unlist(strsplit(overall, "\n"))) } # If no formula is given, look for a default formula in x: defaultplot <- is.null(formule) if(defaultplot && !is.null(x$default.formula)) formule <- x$default.formula if(!is.null(formule)) { # ensure formulae are given as character strings. formule <- FormatFaspFormulae(formule, "formule") # Number of formulae should match number of functions. nf <- length(formule) nfun <- length(x$fns) if(nf == 1 && nfun > 1) formule <- rep.int(formule, nfun) else if(nf != nfun) stop(paste("Wrong number of entries in", sQuote("formule"))) } # Check on the length of the subset argument. ns <- length(subset) if(ns > 1) { if(ns != length(x$fns)) stop("Wrong number of entries in subset argument.\n") msub <- TRUE } else msub <- FALSE # compute common x, y axis limits for all plots ? xlim <- ylim <- NULL if(samex || samey) { cat("Computing limits\n") # call plot.fv to determine plot limits of each panel for(i in 1:nrows) { for(j in 1:ncols) { k <- which[i,j] if(!is.na(k)) { fun <- as.fv(x$fns[[k]]) fmla <- if(!defaultplot) formule[k] else NULL sub <- if(msub) subset[[k]] else subset lims <- plot(fun, fmla, subset=sub, limitsonly=TRUE) # update the limits if(samex) xlim <- range(xlim, lims$xlim) if(samey) ylim <- range(ylim, lims$ylim) } } } } ############################################################# # Set up the plot layout n <- nrows * ncols # panels 1..n = plot panels codes <- matrix(seq_len(n), byrow=TRUE, ncol=ncols, nrow=nrows) heights <- rep.int(1, nrows) widths <- rep.int(1, ncols) # annotation as chosen if(outerlabels) { # column headings colhead.codes <- max(codes) + (1:ncols) colhead.height <- 0.2 codes <- rbind(colhead.codes, codes) heights <- c(colhead.height, heights) # row headings rowhead.codes <- max(codes) + (1:nrows) rowhead.width <- 0.2 codes <- cbind(c(0,rowhead.codes), codes) widths <- c(rowhead.width, widths) } if(banner) { # overall banner top.code <- max(codes) + 1 top.height <- 0.1 * (1+nlines) codes <- rbind(top.code, codes) heights <- c(top.height, heights) } # declare layout layout(codes, widths=widths, heights=heights) ############################################################ # Plot the function panels # # determine annotation colNames <- colnames(which) rowNames <- rownames(which) nrc <- max(nrows, ncols) ann.def <- par("ann") && (nrc <= 3) # determine margin around each panel if(is.null(mar.panel)) mar.panel <- if(nrc > 3 && outerlabels) rep.int(1/nrc, 4) else par("mar") opa <- par(mar=mar.panel, xpd=TRUE) # # plot each function for(i in 1:nrows) { for(j in 1:ncols) { k <- which[i,j] if(is.na(k)) plot(0,0,type='n',xlim=c(0,1), ylim=c(0,1),axes=FALSE,xlab='',ylab='', ...) else { fun <- as.fv(x$fns[[k]]) fmla <- if(!defaultplot) formule[k] else NULL sub <- if(msub) subset[[k]] else subset main <- if(outerlabels) "" else if(nrows == 1) colNames[j] else if(ncols == 1) rowNames[i] else paren(paste(rowNames[i], colNames[j], sep=",")) do.call(plot, resolve.defaults(list(x=fun, fmla=fmla, subset=sub), list(...), list(xlim=xlim, ylim=ylim, main=main, legend=legend), list(ann=ann.def, axes=ann.def, frame.plot=TRUE))) } } } ############################################################ # # Annotation as selected if(outerlabels) { par(mar=rep.int(0,4), xpd=TRUE) # Plot the column headers for(j in 1:ncols) { plot(numeric(0),numeric(0),type="n",ann=FALSE,axes=FALSE, xlim=c(-1,1),ylim=c(-1,1)) text(0,0,colNames[j], cex=cex.outerlabels) } # Plot the row labels for(i in 1:nrows) { plot(numeric(0),numeric(0),type="n",ann=FALSE,axes=FALSE, xlim=c(-1,1),ylim=c(-1,1)) text(0,0,rowNames[i], srt=90, cex=cex.outerlabels) } } if(banner) { par(mar=rep.int(0,4), xpd=TRUE) # plot the banner plot(numeric(0),numeric(0),type="n",ann=FALSE,axes=FALSE, xlim=c(-1,1),ylim=c(-1,1)) cex <- resolve.defaults(list(...), list(cex.title=2))$cex.title text(0,0, overall, cex=cex) } # revert layout(1) par(opa) return(invisible(NULL)) } spatstat/R/hierstrhard.R0000644000176200001440000002661613115225157014775 0ustar liggesusers## ## hierstrhard.R ## ## $Revision: 1.4 $ $Date: 2017/02/07 07:35:32 $ ## ## The hierarchical Strauss-hard core process ## ## ------------------------------------------------------------------- ## HierStraussHard <- local({ # ......... define interaction potential HSHpotential <- function(d, tx, tu, par) { # arguments: # d[i,j] distance between points X[i] and U[j] # tx[i] type (mark) of point X[i] # tu[j] type (mark) of point U[j] # # get matrices of interaction radii r <- par$iradii h <- par$hradii # # get possible marks and validate if(!is.factor(tx) || !is.factor(tu)) stop("marks of data and dummy points must be factor variables") lx <- levels(tx) lu <- levels(tu) if(length(lx) != length(lu) || any(lx != lu)) stop("marks of data and dummy points do not have same possible levels") if(!identical(lx, par$types)) stop("data and model do not have the same possible levels of marks") if(!identical(lu, par$types)) stop("dummy points and model do not have the same possible levels of marks") # ensure factor levels are acceptable for column names (etc) lxname <- make.names(lx, unique=TRUE) ## list all ordered pairs of types to be checked uptri <- par$archy$relation & !is.na(r) mark1 <- (lx[row(r)])[uptri] mark2 <- (lx[col(r)])[uptri] ## corresponding names mark1name <- (lxname[row(r)])[uptri] mark2name <- (lxname[col(r)])[uptri] vname <- apply(cbind(mark1name,mark2name), 1, paste, collapse="x") vname <- paste("mark", vname, sep="") npairs <- length(vname) ## create logical array for result z <- array(FALSE, dim=c(dim(d), npairs), dimnames=list(character(0), character(0), vname)) # go.... if(length(z) > 0) { ## assemble the relevant interaction distance for each pair of points rxu <- r[ tx, tu ] ## apply relevant threshold to each pair of points str <- (d <= rxu) # and the relevant hard core distance hxu <- h[ tx, tu ] forbid <- (d < hxu) forbid[is.na(forbid)] <- FALSE # form the potential value <- str value[forbid] <- -Inf ## score for(i in 1:npairs) { # data points with mark m1 Xsub <- (tx == mark1[i]) # quadrature points with mark m2 Qsub <- (tu == mark2[i]) # assign z[Xsub, Qsub, i] <- value[Xsub, Qsub] } } return(z) } #### end of 'pot' function #### # ........ auxiliary functions .............. delHSH <- function(which, types, iradii, hradii, archy, ihc) { iradii[which] <- NA if(any(!is.na(iradii))) { # some gamma interactions left # return modified HierStraussHard with fewer gamma parameters return(HierStraussHard(types=types, iradii=iradii, hradii=hradii, archy=archy)) } else if(any(!ihc)) { # ihc = inactive hard cores # no gamma interactions left, but some active hard cores return(HierHard(types=types, hradii=hradii, archy=archy)) } else return(Poisson()) } # Set up basic object except for family and parameters BlankHSHobject <- list( name = "Hierarchical Strauss-hard core process", creator = "HierStraussHard", family = "hierpair.family", # evaluated later pot = HSHpotential, par = list(types=NULL, iradii=NULL, hradii=NULL, archy=NULL), parnames = c("possible types", "interaction distances", "hardcore distances", "hierarchical order"), pardesc = c("vector of possible types", "matrix of interaction distances", "matrix of hardcore distances", "hierarchical order"), selfstart = function(X, self) { types <- self$par$types hradii <- self$par$hradii archy <- self$par$archy if(!is.null(types) && !is.null(hradii) && !is.null(archy)) return(self) if(is.null(types)) types <- levels(marks(X)) if(is.null(archy)) archy <- seq_len(length(types)) if(!inherits(archy, "hierarchicalordering")) archy <- hierarchicalordering(archy, types) if(is.null(hradii)) { marx <- marks(X) d <- nndist(X, by=marx) h <- aggregate(d, by=list(from=marx), min) h <- as.matrix(h[, -1L, drop=FALSE]) m <- table(marx) mm <- outer(m, m, pmin) hradii <- h * mm/(mm+1) dimnames(hradii) <- list(types, types) h[!(archy$relation)] <- NA } HierStraussHard(types=types,hradii=hradii, iradii=self$par$iradii, archy=archy) }, init = function(self) { types <- self$par$types iradii <- self$par$iradii hradii <- self$par$hradii ## hradii could be NULL if(!is.null(types)) { if(!is.null(dim(types))) stop(paste("The", sQuote("types"), "argument should be a vector")) if(length(types) == 0) stop(paste("The", sQuote("types"),"argument should be", "either NULL or a vector of all possible types")) if(anyNA(types)) stop("NA's not allowed in types") if(is.factor(types)) { types <- levels(types) } else { types <- levels(factor(types, levels=types)) } nt <- length(types) MultiPair.checkmatrix(iradii, nt, sQuote("iradii"), asymmok=TRUE) if(!is.null(hradii)) MultiPair.checkmatrix(hradii, nt, sQuote("hradii"), asymmok=TRUE) } ina <- is.na(iradii) if(all(ina)) stop(paste("All entries of", sQuote("iradii"), "are NA")) if(!is.null(hradii)) { hna <- is.na(hradii) both <- !ina & !hna if(any(iradii[both] <= hradii[both])) stop("iradii must be larger than hradii") } }, update = NULL, # default OK print = function(self) { iradii <- self$par$iradii hradii <- self$par$hradii types <- self$par$types archy <- self$par$archy if(waxlyrical('gory')) splat(nrow(iradii), "types of points") if(!is.null(types) && !is.null(archy)) { if(waxlyrical('space')) { splat("Possible types and ordering:") } else cat("Hierarchy: ") print(archy) } else if(!is.null(types)) { (if(waxlyrical('space')) splat else cat)("Possible types: ") print(types) } else if(waxlyrical('gory')) splat("Possible types:\t not yet determined") splat("Interaction radii:") dig <- getOption("digits") print(hiermat(signif(iradii, dig), archy)) if(!is.null(hradii)) { splat("Hardcore radii:") print(hiermat(signif(hradii, dig), archy)) } else splat("Hardcore radii: not yet determined") invisible(NULL) }, interpret = function(coeffs, self) { # get possible types typ <- self$par$types ntypes <- length(typ) ## get matrices of interaction radii r <- self$par$iradii h <- self$par$hradii ## list all unordered pairs of types uptri <- self$par$archy$relation & !is.na(r) index1 <- (row(r))[uptri] index2 <- (col(r))[uptri] npairs <- length(index1) # extract canonical parameters; shape them into a matrix gammas <- matrix(NA, ntypes, ntypes) dimnames(gammas) <- list(typ, typ) gammas[ cbind(index1, index2) ] <- exp(coeffs) # return(list(param=list(gammas=gammas), inames="interaction parameters gamma_ij", printable=hiermat(dround(gammas), self$par$archy))) }, valid = function(coeffs, self) { # interaction radii r[i,j] iradii <- self$par$iradii # hard core radii r[i,j] hradii <- self$par$hradii # interaction parameters gamma[i,j] gamma <- (self$interpret)(coeffs, self)$param$gammas # parameters to estimate required <- !is.na(iradii) & self$par$archy$relation # all required parameters must be finite if(!all(is.finite(gamma[required]))) return(FALSE) # DIAGONAL interactions must be non-explosive d <- diag(rep(TRUE, nrow(iradii))) activehard <- !is.na(hradii) & (hradii > 0) return(all(gamma[required & d & !activehard] <= 1)) }, project = function(coeffs, self) { # interaction parameters gamma[i,j] gamma <- (self$interpret)(coeffs, self)$param$gammas # interaction radii iradii <- self$par$iradii # hard core radii r[i,j] hradii <- self$par$hradii types <- self$par$types archy <- self$par$archy # active hard cores activehard <- !is.na(hradii) & (hradii > 0) ihc <- !activehard # problems? uptri <- archy$relation required <- !is.na(iradii) & uptri offdiag <- !diag(nrow(iradii)) gammavalid <- is.finite(gamma) & (activehard | offdiag | (gamma <= 1)) naughty <- required & !gammavalid # if(!any(naughty)) return(NULL) if(spatstat.options("project.fast")) { # remove ALL naughty terms simultaneously return(delHSH(naughty, types, iradii, hradii, archy, ihc)) } else { # present a list of candidates rn <- row(naughty) cn <- col(naughty) ord <- self$par$archy$ordering uptri <- (ord[rn] <= ord[cn]) upn <- uptri & naughty rowidx <- as.vector(rn[upn]) colidx <- as.vector(cn[upn]) mats <- lapply(as.data.frame(rbind(rowidx, colidx)), matrix, ncol=2) inters <- lapply(mats, delHSH, types=types, iradii=iradii, hradii=hradii, archy=archy, ihc=ihc) return(inters) } }, irange = function(self, coeffs=NA, epsilon=0, ...) { r <- self$par$iradii h <- self$par$hradii ractive <- !is.na(r) & self$par$archy$relation hactive <- !is.na(h) & self$par$archy$relation if(any(!is.na(coeffs))) { gamma <- (self$interpret)(coeffs, self)$param$gammas gamma[is.na(gamma)] <- 1 ractive <- ractive & (abs(log(gamma)) > epsilon) } if(!any(c(ractive,hactive))) return(0) else return(max(c(r[ractive],h[hactive]))) }, version=NULL # to be added ) class(BlankHSHobject) <- "interact" # finally create main function HierStraussHard <- function(iradii, hradii=NULL, types=NULL, archy=NULL) { if(!is.null(types)) { if(is.null(archy)) archy <- seq_len(length(types)) archy <- hierarchicalordering(archy, types) } iradii[iradii == 0] <- NA out <- instantiate.interact(BlankHSHobject, list(types=types, iradii=iradii, hradii=hradii, archy=archy)) if(!is.null(types)) { dn <- list(types, types) dimnames(out$par$iradii) <- dn if(!is.null(out$par$hradii)) dimnames(out$par$hradii) <- dn } return(out) } HierStraussHard <- intermaker(HierStraussHard, BlankHSHobject) HierStraussHard }) spatstat/R/pppmatch.R0000755000176200001440000007304113115271120014256 0ustar liggesusers# # pppmatch.R # # $Revision: 1.23 $ $Date: 2017/06/05 10:31:58 $ # # Code by Dominic Schuhmacher # # # ----------------------------------------------------------------- # The standard functions for the new class pppmatching # # Objects of class pppmatching consist of two point patterns pp1 and pp2, # and either an adjacency matrix ((i,j)-th entry 1 if i-th point of pp1 and j-th # point of pp2 are matched, 0 otherwise) for "full point matchings" or # a "generalized adjacency matrix" (or flow matrix; positive values are # no longer limited to 1, (i,j)-th entry gives the "flow" between # the i-th point of pp1 and the j-th point of pp2) for "fractional matchings". # Optional elements are the type # of the matching, the cutoff value for distances in R^2, the order # of averages taken, and the resulting distance for the matching. # Currently recognized types are "spa" (subpattern assignment, # where dummy points at maximal dist are introduced if cardinalities differ), # "ace" (assignment if cardinalities equal, where dist is maximal if cards differ), # and "mat" (mass transfer, fractional matching that belongs to the # Wasserstein distance obtained if point patterns are normalized to probability measures). # ----------------------------------------------------------------- pppmatching <- function(X, Y, am, type = NULL, cutoff = NULL, q = NULL, mdist = NULL) { verifyclass(X, "ppp") verifyclass(Y, "ppp") n1 <- X$n n2 <- Y$n am <- as.matrix(am) if (length(am) == 0) { if (min(n1,n2) == 0) am <- matrix(am, nrow=n1, ncol=n2) else stop("Adjacency matrix does not have the right dimensions") } if (dim(am)[1] != n1 || dim(am)[2] != n2) stop("Adjacency matrix does not have the right dimensions") am <- matrix(as.numeric(am), n1, n2) #am <- apply(am, c(1,2), as.numeric) res <- list("pp1" = X, "pp2" = Y, "matrix" = am, "type" = type, "cutoff" = cutoff, "q" = q, "distance" = mdist) class(res) <- "pppmatching" res } # currently, for fractional matchings all the flows are plotted the same way # irrespective of their weights plot.pppmatching <- function(x, addmatch = NULL, main = NULL, ...) { if (is.null(main)) main <- short.deparse(substitute(x)) pp1 <- x$pp1 pp2 <- x$pp2 plot.owin(pp1$window, main = main, ...) here <- which((x$matrix > 0), arr.ind = TRUE) if (!is.null(addmatch)) { addhere <- which((addmatch > 0), arr.ind = TRUE) seg <- as.psp(from=pp1[addhere[,1]], to=pp2[addhere[,2]]) plot(seg, add=TRUE, lty = 2, col="gray70") } if (length(here) > 0) { seg <- as.psp(from=pp1[here[,1]], to=pp2[here[,2]]) plot(seg, add=TRUE, ...) } points(x$pp1, pch=20, col=2, ...) points(x$pp2, pch=20, col=4, ...) return(invisible(NULL)) } print.pppmatching <- function(x, ...) { n1 <- x$pp1$n n2 <- x$pp2$n if (is.null(x$type) || is.null(x$q) || is.null(x$cutoff)) cat("Generic matching of two planar point patterns \n") else cat(x$type, "-", x$q, " matching of two planar point patterns (cutoff = ", x$cutoff, ") \n", sep = "") cat("pp1:", n1, ngettext(n1, "point", "points"), "\n") cat("pp2:", n2, ngettext(n2, "point", "points"), "\n") print.owin(x$pp1$window) npair <- sum(x$matrix > 0) if (npair == 0) cat("matching is empty \n") else { if (any(x$matrix != trunc(x$matrix))) cat("fractional matching,", npair, ngettext(npair, "flow", "flows"), "\n") else cat("point matching,", npair, ngettext(npair, "line", "lines"), "\n") } if (!is.null(x$distance)) cat("distance:", x$distance, "\n") return(invisible(NULL)) } summary.pppmatching <- function(object, ...) { X <- object$pp1 Y <- object$pp2 n1 <- X$n n2 <- Y$n if (is.null(object$type) || is.null(object$q) || is.null(object$cutoff)) cat("Generic matching of two planar point patterns \n") else cat(object$type, "-", object$q, " matching of two planar point patterns (cutoff = ", object$cutoff, ") \n", sep = "") cat("pp1:", n1, ngettext(n1, "point", "points"), "\n") cat("pp2:", n2, ngettext(n2, "point", "points"), "\n") print.owin(X$window) npair <- sum(object$matrix > 0) if (npair == 0) cat("matching is empty \n") else { if (any(object$matrix != trunc(object$matrix))) { cat("fractional matching,", npair, ngettext(npair, "flow", "flows"), "\n") } else { cat("point matching,", npair, ngettext(npair, "line", "lines"), "\n") rowsum <- rowSums(object$matrix) colsum <- colSums(object$matrix) lt <- ifelse(min(rowsum) >= 1, TRUE, FALSE) ru <- ifelse(max(rowsum) <= 1, TRUE, FALSE) rt <- ifelse(min(colsum) >= 1, TRUE, FALSE) lu <- ifelse(max(colsum) <= 1, TRUE, FALSE) if (lt && ru && rt && lu) cat("matching is 1-1 \n") else if (any(lt, ru, rt, lu)) { cat("matching is", ifelse(lt, " left-total", ""), ifelse(lu, " left-unique", ""), ifelse(rt, " right-total", ""), ifelse(ru, " right-unique", ""), "\n", sep="") } } } if (!is.null(object$distance)) cat("distance:", object$distance, "\n") return(invisible(NULL)) } # ----------------------------------------------------------------- # matchingdist computes the distance associated with a certain kind of matching. # Any of the arguments type, cutoff and order (if supplied) override the # the corresponding arguments in the matching. # This function is useful for verifying the distance element of an # object of class pppmatching as well as for comparing different # (typically non-optimal) matchings. # ----------------------------------------------------------------- matchingdist <- function(matching, type = NULL, cutoff = NULL, q = NULL) { verifyclass(matching, "pppmatching") if (is.null(type)) if (is.null(matching$type)) stop("Type of matching unknown. Distance cannot be computed") else type <- matching$type if (is.null(cutoff)) if (is.null(matching$cutoff)) stop("Cutoff value unknown. Distance cannot be computed") else cutoff <- matching$cutoff if (is.null(q)) if (is.null(matching$q)) stop("Order unknown. Distance cannot be computed") else q <- matching$q X <- matching$pp1 Y <- matching$pp2 n1 <- X$n n2 <- Y$n Lpexpect <- function(x, w, p) { f <- max(x) return(ifelse(f==0, 0, f * sum((x/f)^p * w)^(1/p))) } if (type == "spa") { n <- max(n1,n2) # divisor for Lpexpect if (n == 0) return(0) else if (min(n1,n2) == 0) return(cutoff) shortdim <- which.min(c(n1,n2)) shortsum <- apply(matching$matrix, shortdim, sum) if (any(shortsum != 1)) warning("matching does not attribute mass 1 to each point of point pattern with smaller cardinality") # dfix <- apply(crossdist(X,Y), c(1,2), function(x) { min(x,cutoff) }) dfix <- pmin(crossdist(X,Y), cutoff) if (is.finite(q)) resdist <- (Lpexpect(dfix, matching$matrix/n, q)^q + abs(n2-n1)/n * cutoff^q)^(1/q) else resdist <- ifelse(n1==n2, max(dfix[matching$matrix > 0]), cutoff) } else if (type == "ace") { n <- n1 # divisor for Lpexpect if (n1 != n2) return(cutoff) if (n == 0) return(0) rowsum <- rowSums(matching$matrix) colsum <- colSums(matching$matrix) if (any(c(rowsum, colsum) != 1)) warning("matching is not 1-1") # dfix <- apply(crossdist(X,Y), c(1,2), function(x) { min(x,cutoff) }) dfix <- pmin(crossdist(X,Y), cutoff) if (is.finite(q)) resdist <- Lpexpect(dfix, matching$matrix/n, q) else resdist <- max(dfix[matching$matrix > 0]) } else if (type == "mat") { n <- min(n1,n2) # divisor for Lpexpect if (min(n1,n2) == 0) return(NaN) shortdim <- which.min(c(n1,n2)) shortsum <- apply(matching$matrix, shortdim, sum) if (any(shortsum != 1)) warning("matching does not attribute mass 1 to each point of point pattern with smaller cardinality") # dfix <- apply(crossdist(X,Y), c(1,2), function(x) { min(x,cutoff) }) dfix <- pmin(crossdist(X,Y), cutoff) if (is.finite(q)) resdist <- Lpexpect(dfix, matching$matrix/n, q) else resdist <- max(dfix[matching$matrix > 0]) } else stop(paste("Unrecognised type", sQuote(type))) return(resdist) } # ----------------------------------------------------------------- # The main function for computation of distances and finding optimal # matchings between point patterns: pppdist # ----------------------------------------------------------------- # # pppdist uses several helper functions not normally called by the user # # The arguments of pppdist are # # x and y of class ppp (the two point patterns for which we want to compute # a distance) # The type of distance to be computed; any one of "spa" (default), "ace", "mat". # For details of this and the following two arguments see above (description # for class "pppmatching") # cutoff and order q of the distance # Set matching to TRUE if the full point matching (including distance) # should be returned; otherwise only the distance is returned # If ccode is FALSE R code is used where available. This may be useful if q # is high (say above 10) and severe warning messages pop up. R can # (on most machines) deal with a higher number of significant digits per # number than C (at least with the code used below) # precision should only be entered by advanced users. Empirically reasonable defaults # are used otherwise. As a rule of thumb, if ccode is TRUE, precision should # be the highest value that does not give an error (typically 9); if ccode # is FALSE, precision should be balanced (typically between 10 and 100) in # such a way that the sum of the number of zeroes and pseudo-zeroes given in the # warning messages is minimal # approximation: if q = Inf, by the distance of which order should # the true distance be approximated. If approximation is Inf, brute force # computation is used, which is only practicable for point patterns with # very few points (see also the remarks just before the pppdist.prohorov # function below). # show.rprimal=TRUE shows at each stage of the algorithm what the current restricted # primal problem and its solution are (algorithm jumps between restricted primal # and dual problem until the solution to the restricted primal (a partial # matching of the point patterns) is a full matching) # timelag gives the number of seconds of pause added each time a solution to # the current restricted primal is found (has only an effect if show.primal=TRUE) # ----------------------------------------------------------------- pppdist <- function(X, Y, type = "spa", cutoff = 1, q = 1, matching = TRUE, ccode = TRUE, auction = TRUE, precision = NULL, approximation = 10, show.rprimal = FALSE, timelag = 0) { verifyclass(X, "ppp") verifyclass(Y, "ppp") if (!ccode && type == "mat") { warning("R code is not available for type = ", dQuote("mat"), ". C code is used instead") ccode <- TRUE } if (!ccode && is.infinite(q) && is.infinite(approximation)) { warning("R code is not available for q = Inf and approximation = Inf. C code is used instead") ccode <- TRUE } if (ccode && is.infinite(q) && is.infinite(approximation) && type == "spa" && X$n != Y$n) { warning("approximation = Inf not available for type = ", dQuote("spa"), " and point patterns with differing cardinalities") approximation <- 10 } if (is.infinite(q) && is.infinite(approximation) && type == "mat") { warning("approximation = Inf not available for type = ", dQuote("mat")) approximation <- 10 } if (show.rprimal) { ccode <- FALSE auction <- FALSE if (type != "ace"){ warning("show.rprimal = TRUE not available for type = ", dQuote(type), ". Type is changed to ", dQuote("ace")) type <- "ace" } } if (is.null(precision)) { if (ccode) precision <- trunc(log10(.Machine$integer.max)) else { db <- .Machine$double.base minprec <- trunc(log10(.Machine$double.base^.Machine$double.digits)) if (is.finite(q)) precision <- min(max(minprec,2*q),(.Machine$double.max.exp-1)*log(db)/log(10)) else precision <- min(max(minprec,2*approximation),(.Machine$double.max.exp-1)*log(db)/log(10)) } } if (type == "spa") { if (X$n == 0 && Y$n == 0) { if (!matching) return(0) else { return(pppmatching(X, Y, matrix(0, nrow=0,ncol=0), type, cutoff, q, 0)) } } n1 <- X$n n2 <- Y$n n <- max(n1,n2) dfix <- matrix(cutoff,n,n) if (min(n1,n2) > 0) dfix[1:n1,1:n2] <- crossdist(X,Y) # d <- dfix <- apply(dfix, c(1,2), function(x) { min(x,cutoff) }) d <- dfix <- pmin(dfix,cutoff) if (is.infinite(q)) { if (n1 == n2 || matching) return(pppdist.prohorov(X, Y, n, d, type, cutoff, matching, ccode, auction, precision, approximation)) else return(cutoff) # in the case n1 != n2 the distance is clear, and in a sense any # matching would be correct. We go here the extra mile and call # pppdist.prohorov in order to find (approximate) the matching # that is intuitively most interesting (i.e. the one that # pairs the points of the # smaller cardinality point pattern with the points of the larger # cardinality point pattern in such a way that the maximal pairing distance # is minimal (for q < Inf the q-th order pairing distance before the introduction # of dummy points is automatically minimal if it is minimal after the # introduction of dummy points) # which would be the case for the obtained pairing if q < Inf } } else if (type == "ace") { if (X$n != Y$n) { if (!matching) return(cutoff) else { return(pppmatching(X, Y, matrix(0, nrow=X$n, ncol=Y$n), type, cutoff, q, cutoff)) } } if (X$n == 0) { if (!matching) return(0) else { return(pppmatching(X, Y, matrix(0, nrow=0,ncol=0), type, cutoff, q, 0)) } } n <- n1 <- n2 <- X$n dfix <- crossdist(X,Y) # d <- dfix <- apply(dfix, c(1,2), function(x) { min(x,cutoff) }) d <- dfix <- pmin(dfix, cutoff) if (is.infinite(q)) return(pppdist.prohorov(X, Y, n, d, type, cutoff, matching, ccode, auction, precision, approximation)) } else if (type == "mat") { if (!ccode) warning("R code is not available for type = ", dQuote("mat"), ". C code is used instead") if (auction) warning("Auction algorithm is not available for type = ", dQuote("mat"), ". Primal-dual algorithm is used instead") return(pppdist.mat(X, Y, cutoff, q, matching, precision, approximation)) } else stop(paste("Unrecognised type", sQuote(type))) d <- d/max(d) d <- round((d^q)*(10^precision)) nzeroes <- sum(d == 0 & dfix > 0) if(nzeroes > 0) warning(paste(nzeroes, ngettext(nzeroes, "zero", "zeroes"), "introduced, while rounding the q-th powers of distances")) if(ccode & any(d > .Machine$integer.max)) stop("integer overflow, while rounding the q-th powers of distances") if(!ccode) { if (any(is.infinite(d))) stop("Inf obtained, while taking the q-th powers of distances") maxd <- max(d) npszeroes <- sum(maxd/d[d>0] >= .Machine$double.base^.Machine$double.digits) if (npszeroes > 0) warning(paste(npszeroes, ngettext(npszeroes, "pseudo-zero", "pseudo-zeroes"), "introduced, while taking the q-th powers of distances")) # a pseudo-zero is a value that is positive but contributes nothing to the # q-th order average because it is too small compared to the other values } Lpmean <- function(x, p) { f <- max(x) return(ifelse(f==0, 0, f * mean((x/f)^p)^(1/p))) } if (show.rprimal && type == "ace") { assig <- acedist.show(X, Y, n, d, timelag) am <- matrix(0, n, n) am[cbind(1:n, assig[1:n])] <- 1 } else if (ccode) { if (auction) { dupper <- max(d)/10 lasteps <- 1/(n+1) epsfac <- 10 epsvec <- lasteps # Bertsekas: from dupper/2 to 1/(n+1) divide repeatedly by a constant while (lasteps < dupper) { lasteps <- lasteps*epsfac epsvec <- c(epsvec,lasteps) } epsvec <- rev(epsvec)[-1] neps <- length(epsvec) stopifnot(neps >= 1) d <- max(d)-d # auctionbf uses a "desire matrix" res <- .C("auctionbf", as.integer(d), as.integer(n), pers_to_obj = as.integer(rep(-1,n)), price = as.double(rep(0,n)), profit = as.double(rep(0,n)), as.integer(neps), as.double(epsvec), PACKAGE = "spatstat") am <- matrix(0, n, n) am[cbind(1:n,res$pers_to_obj+1)] <- 1 } else { res <- .C("dwpure", as.integer(d), as.integer(rep.int(1,n)), as.integer(rep.int(1,n)), as.integer(n), as.integer(n), flowmatrix = as.integer(integer(n^2)), PACKAGE = "spatstat") am <- matrix(res$flowmatrix, n, n) } } else { assig <- acedist.noshow(X, Y, n, d) am <- matrix(0, n, n) am[cbind(1:n, assig[1:n])] <- 1 } resdist <- Lpmean(dfix[am == 1], q) if (!matching) return(resdist) else { amsmall <- suppressWarnings(matrix(am[1:n1,1:n2], nrow=n1, ncol=n2)) # previous line solves various problems associated with min(n1,n2) = 0 or = 1 return(pppmatching(X, Y, amsmall, type, cutoff, q, resdist)) } } # # # =========================================================== # =========================================================== # Anything below: # Internal functions usually not to be called by user # =========================================================== # =========================================================== # # # Called if show.rprimal is true # acedist.show <- function(X, Y, n, d, timelag = 0) { plot(pppmatching(X, Y, matrix(0, n, n))) # initialization of dual variables u <- apply(d, 1, min) d <- d - u v <- apply(d, 2, min) d <- d - rep(v, each=n) # the main loop feasible <- FALSE while (!feasible) { rpsol <- maxflow(d) # rpsol = restricted primal, solution am <- matrix(0, n, n) for (i in 1:n) { if (rpsol$assignment[i] > -1) am[i, rpsol$assignment[i]] <- TRUE } Sys.sleep(timelag) channelmat <- (d == 0 & !am) plot(pppmatching(X, Y, am), addmatch = channelmat) # if the solution of the restricted primal is not feasible for # the original primal, update dual variables if (min(rpsol$assignment) == -1) { w1 <- which(rpsol$fi_rowlab > -1) w2 <- which(rpsol$fi_collab == -1) subtractor <- min(d[w1, w2]) d[w1,] <- d[w1,] - subtractor d[,-w2] <- d[,-w2] + subtractor } # otherwise break the loop else { feasible <- TRUE } } return(rpsol$assignment) } # # R-version of hungarian algo without the pictures # useful if q is large # acedist.noshow <- function(X, Y, n, d) { # initialization of dual variables u <- apply(d, 1, min) d <- d - u v <- apply(d, 2, min) d <- d - rep(v, each=n) # the main loop feasible <- FALSE while (!feasible) { rpsol <- maxflow(d) # rpsol = restricted primal, solution # ~~~~~~~~~ deleted by AJB ~~~~~~~~~~~~~~~~~ # am <- matrix(0, n, n) # for (i in 1:n) { # if (rpsol$assignment[i] > -1) am[i, rpsol$assignment[i]] <- TRUE # } # channelmat <- (d == 0 & !am) # ~~~~~~~~~~~~~~~~~~~~~~~~~~ # if the solution of the restricted primal is not feasible for # the original primal, update dual variables if (min(rpsol$assignment) == -1) { w1 <- which(rpsol$fi_rowlab > -1) w2 <- which(rpsol$fi_collab == -1) subtractor <- min(d[w1, w2]) d[w1,] <- d[w1,] - subtractor d[,-w2] <- d[,-w2] + subtractor } # otherwise break the loop else { feasible <- TRUE } } return(rpsol$assignment) } # # Solution of restricted primal # maxflow <- function(costm) { stopifnot(is.matrix(costm)) stopifnot(nrow(costm) == ncol(costm)) if(!all(apply(costm == 0, 1, any))) stop("Each row of the cost matrix must contain a zero") m <- dim(costm)[1] # cost matrix is square m * m assignment <- rep.int(-1, m) # -1 means no pp2-point assigned to i-th pp1-point # initial assignment or rowlabel <- source label (= 0) where not possible for (i in 1:m) { j <- match(0, costm[i,]) if (!(j %in% assignment)) assignment[i] <- j } newlabelfound <- TRUE while (newlabelfound) { rowlab <- rep.int(-1, m) # -1 means no label given, 0 stands for source label collab <- rep.int(-1, m) rowlab <- ifelse(assignment == -1, 0, rowlab) # column and row labeling procedure until either breakthrough occurs # (which means that there is a better point assignment, i.e. one that # creates more point pairs than the current one (flow can be increased)) # or no more labeling is possible breakthrough <- -1 while (newlabelfound && breakthrough == -1) { newlabelfound <- FALSE for (i in 1:m) { if (rowlab[i] != -1) { for (j in 1:m) { if (costm[i,j] == 0 && collab[j] == -1) { collab[j] <- i newlabelfound <- TRUE if (!(j %in% assignment) && breakthrough == -1) breakthrough <- j } } } } for (j in 1:m) { if (collab[j] != -1) { for (i in 1:m) { if (assignment[i] == j && rowlab[i] == -1) { rowlab[i] <- j newlabelfound <- TRUE } } } } } # if the while-loop was left due to breakthrough, # reassign points (i.e. redirect flow) and restart labeling procedure if (breakthrough != -1) { l <- breakthrough while (l != 0) { k <- collab[l] assignment[k] <- l l <- rowlab[k] } } } # the outermost while-loop is left, no more labels can be given; hence # the maximal number of points are paired given the current restriction # (flow is maximal given the current graph) return(list("assignment"=assignment, "fi_rowlab"=rowlab, "fi_collab"=collab)) } # # Prohorov distance computation/approximation (called if q = Inf in pppdist # and type = "spa" or "ace") # Exact brute force computation of distance if approximation = Inf, # scales very badly, should not be used for cardinality n larger than 10-12 # Approximation by order q distance gives often (if the warning messages # are not too extreme) the right matching and therefore the exact Prohorov distance, # but in very rare cases the result can be very wrong. However, it is always # an exact upper bound of the Prohorov distance (since based on *a* pairing # as opposed to optimal pairing. # pppdist.prohorov <- function(X, Y, n, dfix, type, cutoff = 1, matching = TRUE, ccode = TRUE, auction = TRUE, precision = 9, approximation = 10) { n1 <- X$n n2 <- Y$n d <- dfix/max(dfix) if (is.finite(approximation)) { warning(paste("distance with parameter q = Inf is approximated by distance with parameter q =", approximation)) d <- round((d^approximation)*(10^precision)) nzeroes <- sum(d == 0 & dfix > 0) if (nzeroes > 0) warning(paste(nzeroes, ngettext(nzeroes, "zero", "zeroes"), "introduced, while rounding distances")) if (ccode) { if (any(d > .Machine$integer.max)) stop("integer overflow, while rounding the q-th powers of distances") if (auction) { dupper <- max(d)/10 lasteps <- 1/(n+1) epsfac <- 10 epsvec <- lasteps # Bertsekas: from dupper/2 to 1/(n+1) divide repeatedly by a constant while (lasteps < dupper) { lasteps <- lasteps*epsfac epsvec <- c(epsvec,lasteps) } epsvec <- rev(epsvec)[-1] neps <- length(epsvec) stopifnot(neps >= 1) d <- max(d)-d # auctionbf uses a "desire matrix" res <- .C("auctionbf", as.integer(d), as.integer(n), pers_to_obj = as.integer(rep(-1,n)), price = as.double(rep(0,n)), profit = as.double(rep(0,n)), as.integer(neps), as.double(epsvec), PACKAGE = "spatstat") am <- matrix(0, n, n) am[cbind(1:n,res$pers_to_obj+1)] <- 1 } else { res <- .C("dwpure", as.integer(d), as.integer(rep.int(1,n)), as.integer(rep.int(1,n)), as.integer(n), as.integer(n), flowmatrix = as.integer(integer(n^2)), PACKAGE = "spatstat") am <- matrix(res$flowmatrix, n, n) } } else { if (any(is.infinite(d))) stop("Inf obtained, while taking the q-th powers of distances") maxd <- max(d) npszeroes <- sum(maxd/d[d>0] >= .Machine$double.base^.Machine$double.digits) if (npszeroes > 0) warning(paste(npszeroes, ngettext(npszeroes, "pseudo-zero", "pseudo-zeroes"), "introduced, while taking the q-th powers of distances")) assig <- acedist.noshow(X, Y, n, d) am <- matrix(0, n, n) am[cbind(1:n, assig[1:n])] <- 1 } } else { d <- round(d*(10^precision)) nzeroes <- sum(d == 0 & dfix > 0) if (nzeroes > 0) warning(paste(nzeroes, ngettext(nzeroes, "zero", "zeroes"), "introduced, while rounding distances")) if (any(d > .Machine$integer.max)) stop("integer overflow, while rounding the q-th powers of distances") res <- .C("dinfty_R", as.integer(d), as.integer(n), assignment = as.integer(rep.int(-1,n)), PACKAGE = "spatstat") assig <- res$assignment am <- matrix(0, n, n) am[cbind(1:n, assig[1:n])] <- 1 } if (n1 == n2) resdist <- max(dfix[am == 1]) else resdist <- cutoff if (!matching) return(resdist) else { amsmall <- suppressWarnings(matrix(am[1:n1,1:n2], nrow=n1, ncol=n2)) # previous line solves various problems associated with min(n1,n2) = 0 or = 1 return(pppmatching(X, Y, amsmall, type, cutoff, Inf, resdist)) } } # # Computation of "pure Wasserstein distance" for any q (called if type="mat" # in pppdist, no matter if q finite or not). # If q = Inf, approximation using ccode is enforced # (approximation == Inf is not allowed here). # pppdist.mat <- function(X, Y, cutoff = 1, q = 1, matching = TRUE, precision = 9, approximation = 10) { n1 <- X$n n2 <- Y$n n <- min(n1,n2) if (n == 0) { if (!matching) return(NaN) else return(pppmatching(X, Y, matrix(0, nrow=0,ncol=0), "mat", cutoff, q, NaN)) } dfix <- crossdist(X,Y) # d <- dfix <- apply(dfix, c(1,2), function(x) { min(x,cutoff) }) d <- dfix <- pmin(dfix, cutoff) d <- d/max(d) if (is.infinite(q)) { if (is.infinite(approximation)) stop("approximation = Inf") warning(paste("distance with parameter q = Inf is approximated by distance with parameter q =", approximation)) d <- round((d^approximation)*(10^precision)) nzeroes <- sum(d == 0 & dfix > 0) if (nzeroes > 0) warning(paste(nzeroes, "zeroes introduced, while rounding distances")) if (any(d > .Machine$integer.max)) stop("integer overflow, while rounding the q-th powers of distances") gcd <- greatest.common.divisor(n1,n2) mass1 <- n2/gcd mass2 <- n1/gcd res <- .C("dwpure", as.integer(d), as.integer(rep.int(mass1,n1)), as.integer(rep.int(mass2,n2)), as.integer(n1), as.integer(n2), flowmatrix = as.integer(integer(n1*n2)), PACKAGE = "spatstat") am <- matrix(res$flowmatrix/(max(n1,n2)/gcd), n1, n2) resdist <- max(dfix[am > 0]) } else { d <- round((d^q)*(10^precision)) nzeroes <- sum(d == 0 & dfix > 0) if(nzeroes > 0) warning(paste(nzeroes, ngettext(nzeroes, "zero", "zeroes"), "introduced, while rounding the q-th powers of distances")) if(any(d > .Machine$integer.max)) stop("integer overflow, while rounding the q-th powers of distances") gcd <- greatest.common.divisor(n1,n2) mass1 <- n2/gcd mass2 <- n1/gcd Lpexpect <- function(x, w, p) { f <- max(x) return(ifelse(f==0, 0, f * sum((x/f)^p * w)^(1/p))) } res <- .C("dwpure", as.integer(d), as.integer(rep.int(mass1,n1)), as.integer(rep.int(mass2,n2)), as.integer(n1), as.integer(n2), flowmatrix = as.integer(integer(n1*n2)), PACKAGE = "spatstat") am <- matrix(res$flowmatrix/(max(n1,n2)/gcd), n1, n2) # our "adjacency matrix" in this case is standardized to have # rowsum 1 if n1 <= n2 and colsum 1 if n1 >= n2 resdist <- Lpexpect(dfix, am/n, q) } if (!matching) return(resdist) else { amsmall <- suppressWarnings(matrix(am[1:n1,1:n2], nrow=n1, ncol=n2)) # previous line solves various problems associated with min(n1,n2) = 0 or = 1 return(pppmatching(X, Y, amsmall, "mat", cutoff, q, resdist)) } } spatstat/R/ppx.R0000755000176200001440000003643413115271120013256 0ustar liggesusers# # ppx.R # # class of general point patterns in any dimension # # $Revision: 1.60 $ $Date: 2017/06/05 10:31:58 $ # ppx <- local({ ctype.table <- c("spatial", "temporal", "local", "mark") ctype.real <- c(TRUE, TRUE, FALSE, FALSE) ppx <- function(data, domain=NULL, coord.type=NULL, simplify=FALSE) { data <- as.hyperframe(data) # columns suitable for spatial coordinates suitable <- with(unclass(data), vtype == "dfcolumn" & (vclass == "numeric" | vclass == "integer")) if(is.null(coord.type)) { # assume all suitable columns of data are spatial coordinates # and all other columns are marks. ctype <- ifelse(suitable, "spatial", "mark") } else { stopifnot(is.character(coord.type)) stopifnot(length(coord.type) == ncol(data)) ctypeid <- pmatch(coord.type, ctype.table, duplicates.ok=TRUE) # validate if(any(uhoh <- is.na(ctypeid))) stop(paste("Unrecognised coordinate", ngettext(sum(uhoh), "type", "types"), commasep(sQuote(coord.type[uhoh])))) if(any(uhoh <- (!suitable & ctype.real[ctypeid]))) { nuh <- sum(uhoh) stop(paste(ngettext(nuh, "Coordinate", "Coordinates"), commasep(sQuote(names(data)[uhoh])), ngettext(nuh, "does not", "do not"), "contain real numbers")) } ctype <- ctype.table[ctypeid] } ctype <- factor(ctype, levels=ctype.table) # if(simplify && all(ctype == "spatial")) { # attempt to reduce to ppp or pp3 d <- length(ctype) if(d == 2) { ow <- try(as.owin(domain), silent=TRUE) if(!inherits(ow, "try-error")) { X <- try(as.ppp(as.data.frame(data), W=ow)) if(!inherits(X, "try-error")) return(X) } } else if(d == 3) { bx <- try(as.box3(domain), silent=TRUE) if(!inherits(bx, "try-error")) { m <- as.matrix(as.data.frame(data)) X <- try(pp3(m[,1], m[,2], m[,3], bx)) if(!inherits(X, "try-error")) return(X) } } } out <- list(data=data, ctype=ctype, domain=domain) class(out) <- "ppx" return(out) } ppx }) is.ppx <- function(x) { inherits(x, "ppx") } nobjects.ppx <- npoints.ppx <- function(x) { nrow(x$data) } print.ppx <- function(x, ...) { cat("Multidimensional point pattern\n") sd <- summary(x$data) np <- sd$ncases nama <- sd$col.names cat(paste(np, ngettext(np, "point", "points"), "\n")) if(any(iscoord <- (x$ctype == "spatial"))) cat(paste(sum(iscoord), "-dimensional space coordinates ", paren(paste(nama[iscoord], collapse=",")), "\n", sep="")) if(any(istime <- (x$ctype == "temporal"))) cat(paste(sum(istime), "-dimensional time coordinates ", paren(paste(nama[istime], collapse=",")), "\n", sep="")) if(any(islocal <- (x$ctype == "local"))) cat(paste(sum(islocal), ngettext(sum(islocal), "column", "columns"), "of local coordinates:", commasep(sQuote(nama[islocal])), "\n")) if(any(ismark <- (x$ctype == "mark"))) cat(paste(sum(ismark), ngettext(sum(ismark), "column", "columns"), "of marks:", commasep(sQuote(nama[ismark])), "\n")) if(!is.null(x$domain)) { cat("Domain:\n\t") print(x$domain) } invisible(NULL) } summary.ppx <- function(object, ...) { object } plot.ppx <- function(x, ...) { xname <- short.deparse(substitute(x)) coo <- coords(x, local=FALSE) dom <- x$domain m <- ncol(coo) if(m == 1) { coo <- coo[,1] ran <- diff(range(coo)) ylim <- c(-1,1) * ran/20 do.call(plot.default, resolve.defaults(list(coo, numeric(length(coo))), list(...), list(asp=1, ylim=ylim, axes=FALSE, xlab="", ylab=""))) axis(1, pos=ylim[1]) } else if(m == 2) { if(is.null(dom)) { # plot x, y coordinates only nama <- names(coo) do.call.matched(plot.default, resolve.defaults(list(x=coo[,1], y=coo[,2], asp=1), list(...), list(main=xname), list(xlab=nama[1], ylab=nama[2]))) } else { add <- resolve.defaults(list(...), list(add=FALSE))$add if(!add) { # plot domain, whatever it is do.call(plot, resolve.defaults(list(dom), list(...), list(main=xname))) } # convert to ppp x2 <- ppp(coo[,1], coo[,2], window=as.owin(dom), marks=as.data.frame(marks(x)), check=FALSE) # invoke plot.ppp return(do.call(plot, resolve.defaults(list(x2), list(add=TRUE), list(...)))) } } else if(m == 3) { # convert to pp3 if(is.null(dom)) dom <- box3(range(coo[,1]), range(coo[,2]), range(coo[,3])) x3 <- pp3(coo[,1], coo[,2], coo[,3], dom) # invoke plot.pp3 nama <- names(coo) do.call(plot, resolve.defaults(list(x3), list(...), list(main=xname), list(xlab=nama[1], ylab=nama[2], zlab=nama[3]))) } else stop(paste("Don't know how to plot a general point pattern in", ncol(coo), "dimensions")) return(invisible(NULL)) } "[.ppx" <- function (x, i, drop=FALSE, ...) { da <- x$data dom <- x$domain if(!missing(i)) { if(inherits(i, c("boxx", "box3"))) { dom <- i i <- inside.boxx(da, w=i) } da <- da[i, , drop=FALSE] } out <- list(data=da, ctype=x$ctype, domain=dom) class(out) <- "ppx" if(drop) { # remove unused factor levels mo <- marks(out) switch(markformat(mo), none = { }, vector = { if(is.factor(mo)) marks(out) <- factor(mo) }, dataframe = { isfac <- sapply(mo, is.factor) if(any(isfac)) mo[, isfac] <- lapply(mo[, isfac], factor) marks(out) <- mo }, hyperframe = { lmo <- as.list(mo) isfac <- sapply(lmo, is.factor) if(any(isfac)) mo[, isfac] <- as.hyperframe(lapply(lmo[isfac], factor)) marks(out) <- mo }) } return(out) } domain <- function(X, ...) { UseMethod("domain") } domain.ppx <- function(X, ...) { X$domain } coords <- function(x, ...) { UseMethod("coords") } coords.ppx <- function(x, ..., spatial=TRUE, temporal=TRUE, local=TRUE) { ctype <- x$ctype chosen <- (ctype == "spatial" & spatial) | (ctype == "temporal" & temporal) | (ctype == "local" & local) as.data.frame(x$data[, chosen, drop=FALSE]) } coords.ppp <- function(x, ...) { data.frame(x=x$x,y=x$y) } "coords<-" <- function(x, ..., value) { UseMethod("coords<-") } "coords<-.ppp" <- function(x, ..., value) { win <- x$window if(is.null(value)) { # empty pattern return(ppp(window=win)) } value <- as.data.frame(value) if(ncol(value) != 2) stop("Expecting a 2-column matrix or data frame, or two vectors") result <- as.ppp(value, win) marks(result) <- marks(x) return(result) } "coords<-.ppx" <- function(x, ..., spatial=TRUE, temporal=TRUE, local=TRUE, value) { ctype <- x$ctype chosen <- (ctype == "spatial" & spatial) | (ctype == "temporal" & temporal) | (ctype == "local" & local) x$data[, chosen] <- value return(x) } as.hyperframe.ppx <- function(x, ...) { x$data } as.data.frame.ppx <- function(x, ...) { as.data.frame(x$data, ...) } as.matrix.ppx <- function(x, ...) { as.matrix(as.data.frame(x, ...)) } marks.ppx <- function(x, ..., drop=TRUE) { ctype <- x$ctype chosen <- (ctype == "mark") if(!any(chosen)) return(NULL) x$data[, chosen, drop=drop] } "marks<-.ppx" <- function(x, ..., value) { ctype <- x$ctype retain <- (ctype != "mark") coorddata <- x$data[, retain, drop=FALSE] if(is.null(value)) { newdata <- coorddata newctype <- ctype[retain] } else { if(is.matrix(value) && nrow(value) == nrow(x$data)) { # assume matrix is to be treated as data frame value <- as.data.frame(value) } if(!is.data.frame(value) && !is.hyperframe(value)) value <- hyperframe(marks=value) if(is.hyperframe(value) || is.hyperframe(coorddata)) { value <- as.hyperframe(value) coorddata <- as.hyperframe(coorddata) } if(ncol(value) == 0) { newdata <- coorddata newctype <- ctype[retain] } else { if(nrow(coorddata) == 0) value <- value[integer(0), , drop=FALSE] newdata <- cbind(coorddata, value) newctype <- factor(c(as.character(ctype[retain]), rep.int("mark", ncol(value))), levels=levels(ctype)) } } out <- list(data=newdata, ctype=newctype, domain=x$domain) class(out) <- class(x) return(out) } unmark.ppx <- function(X) { marks(X) <- NULL return(X) } markformat.ppx <- function(x) { mf <- x$markformat if(is.null(mf)) mf <- markformat(marks(x)) return(mf) } boxx <- function(..., unitname=NULL) { if(length(list(...)) == 0) stop("No data") ranges <- data.frame(...) nama <- names(list(...)) if(is.null(nama) || !all(nzchar(nama))) names(ranges) <- paste("x", 1:ncol(ranges),sep="") if(nrow(ranges) != 2) stop("Data should be vectors of length 2") if(any(unlist(lapply(ranges, diff)) <= 0)) stop("Illegal range: Second element <= first element") out <- list(ranges=ranges, units=as.units(unitname)) class(out) <- "boxx" return(out) } as.boxx <- function(..., warn.owin = TRUE) { a <- list(...) n <- length(a) if (n == 0) stop("No arguments given") if (n == 1) { a <- a[[1]] if (inherits(a, "boxx")) return(a) if (inherits(a, "box3")) return(boxx(a$xrange, a$yrange, a$zrange, unitname = a$units)) if (inherits(a, "owin")) { if (!is.rectangle(a) && warn.owin) warning("The owin object does not appear to be rectangular - the bounding box is used!") return(boxx(a$xrange, a$yrange, unitname = a$units)) } if (is.numeric(a)) { if ((length(a)%%2) == 0) return(boxx(split(a, rep(1:(length(a)/2), each = 2)))) stop(paste("Don't know how to interpret", length(a), "numbers as a box")) } if (!is.list(a)) stop("Don't know how to interpret data as a box") } return(do.call(boxx, a)) } print.boxx <- function(x, ...) { m <- ncol(x$ranges) cat(paste(m, "-dimensional box:\n", sep="")) bracket <- function(z) paste("[", paste(signif(z, 5), collapse=", "), "]", sep="") v <- paste(unlist(lapply(x$ranges, bracket)), collapse=" x ") s <- summary(unitname(x)) cat(paste(v, s$plural, s$explain, "\n")) invisible(NULL) } unitname.boxx <- function(x) { x$units } "unitname<-.boxx" <- function(x, value) { x$units <- as.units(value) return(x) } unitname.ppx <- function(x) { unitname(x$domain) } "unitname<-.ppx" <- function(x, value) { d <- x$domain unitname(d) <- value x$domain <- d return(x) } as.owin.boxx <- function(W, ..., fatal=TRUE) { ra <- W$ranges if(length(ra) == 2) return(owin(ra[[1]], ra[[2]])) if(fatal) stop(paste("Cannot interpret box of dimension", length(ra), "as a window")) return(NULL) } sidelengths.boxx <- function(x) { stopifnot(inherits(x, "boxx")) y <- unlist(lapply(x$ranges, diff)) return(y) } volume.boxx <- function(x) { prod(sidelengths(x)) } diameter.boxx <- function(x) { d <- sqrt(sum(sidelengths(x)^2)) return(d) } shortside.boxx <- function(x) { return(min(sidelengths(x))) } eroded.volumes.boxx <- local({ eroded.volumes.boxx <- function(x, r) { len <- sidelengths(x) ero <- sapply(as.list(len), erode1side, r=r) apply(ero, 1, prod) } erode1side <- function(z, r) { pmax.int(0, z - 2 * r)} eroded.volumes.boxx }) runifpointx <- function(n, domain, nsim=1, drop=TRUE) { check.1.integer(n) check.1.integer(nsim) stopifnot(inherits(domain, "boxx")) ra <- domain$ranges d <- length(ra) result <- vector(mode="list", length=nsim) for(i in 1:nsim) { if(n == 0) { coo <- matrix(, nrow=0, ncol=d) } else { coo <- mapply(runif, n=rep(n, d), min=ra[1,], max=ra[2,]) } colnames(coo) <- colnames(ra) df <- as.data.frame(coo) result[[i]] <- ppx(df, domain) } if(nsim == 1 && drop) return(result[[1]]) result <- as.anylist(result) names(result) <- paste("Simulation", 1:nsim) return(result) } rpoisppx <- function(lambda, domain, nsim=1, drop=TRUE) { stopifnot(inherits(domain, "boxx")) stopifnot(is.numeric(lambda) && length(lambda) == 1 && lambda >= 0) n <- rpois(nsim, lambda * volume.boxx(domain)) result <- vector(mode="list", length=nsim) for(i in 1:nsim) result[[i]] <- runifpointx(n[i], domain) if(nsim == 1 && drop) return(result[[1]]) result <- as.anylist(result) names(result) <- paste("Simulation", 1:nsim) return(result) } unique.ppx <- function(x, ..., warn=FALSE) { dup <- duplicated(x, ...) if(!any(dup)) return(x) if(warn) warning(paste(sum(dup), "duplicated points were removed"), call.=FALSE) y <- x[!dup] return(y) } duplicated.ppx <- function(x, ...) { dup <- duplicated(as.data.frame(x), ...) return(dup) } anyDuplicated.ppx <- function(x, ...) { anyDuplicated(as.data.frame(x), ...) } multiplicity.ppx <- function(x) { mul <- multiplicity(as.data.frame(x)) return(mul) } intensity.ppx <- function(X, ...) { if(!is.multitype(X)) { n <- npoints(X) } else { mks <- marks(X) n <- as.vector(table(mks)) names(n) <- levels(mks) } v <- volume(domain(X)) return(n/v) } grow.boxx <- function(W, left, right = left){ W <- as.boxx(W) ra <- W$ranges d <- length(ra) if(any(left < 0) || any(right < 0)) stop("values of left and right margin must be nonnegative.") if(length(left)==1) left <- rep(left, d) if(length(right)==1) right <- rep(right, d) if(length(left)!=d || length(right)!=d){ stop("left and right margin must be either of length 1 or the dimension of the boxx.") } W$ranges[1,] <- ra[1,]-left W$ranges[2,] <- ra[2,]+right return(W) } inside.boxx <- function(..., w = NULL){ if(is.null(w)) stop("Please provide a boxx using the named argument w.") w <- as.boxx(w) dat <- list(...) if(length(dat)==1){ dat1 <- dat[[1]] if(inherits(dat1, "ppx")) dat <- coords(dat1) if(inherits(dat1, "hyperframe")) dat <- as.data.frame(dat1) } ra <- w$ranges if(length(ra)!=length(dat)) stop("Mismatch between dimension of boxx and number of coordinate vectors.") ## Check coord. vectors have equal length n <- length(dat[[1]]) if(any(lengths(dat)!=n)) stop("Coordinate vectors have unequal length.") index <- rep(TRUE, n) for(i in seq_along(ra)){ index <- index & inside.range(dat[[i]], ra[[i]]) } return(index) } spatdim <- function(X) { if(is.sob(X)) 2L else if(inherits(X, "box3")) 3 else if(inherits(X, "boxx")) length(X$ranges) else if(is.ppx(X)) as.integer(sum(X$ctype == "spatial")) else NA_integer_ } spatstat/R/triplets.R0000644000176200001440000001244613115225157014320 0ustar liggesusers# # # triplets.R # # $Revision: 1.17 $ $Date: 2016/12/30 01:44:07 $ # # The triplets interaction # # Triplets() create an instance of the triplets process # [an object of class 'interact'] # # ------------------------------------------------------------------- # Triplets <- local({ DebugTriplets <- FALSE # define triplet potential TripletPotential <- function(X,U,EqualPairs,pars,correction, ...) { if(!all(ok <- correction %in% c("border", "none"))) { nbad <- sum(bad <- !ok) warning(paste(ngettext(nbad, "Correction", "Corrections"), commasep(sQuote(correction[bad])), ngettext(nbad, "is unavailable and was ignored", "are unavailable and were ignored"))) } # check that all points of X are included in U nX <- npoints(X) nU <- npoints(U) XinU <- if(length(EqualPairs) == 0) integer(0) else EqualPairs[,1] missX <- which(table(factor(XinU, levels=1:nX)) == 0) if((nmiss <- length(missX)) > 0) { # add missing points to (the end of) U U <- superimpose(U, X[missX], W=as.owin(X), check=FALSE) EqualPairs <- rbind(EqualPairs, cbind(missX, nU + 1:nmiss)) nU <- nU + nmiss } iXX <- EqualPairs[,1] iXU <- EqualPairs[,2] # construct map from X index to U index mapXU <- integer(nX) mapXU[iXX] <- iXU # construct map from U index to X index mapUX <- rep.int(NA_integer_, nU) mapUX[iXU] <- iXX # logical vector identifying which quadrature points are in X isdata <- rep.int(FALSE, nU) isdata[iXU] <- TRUE # identify all close pairs u, x r <- pars$r cp <- crosspairs(U, X, r, what="indices") if(DebugTriplets) cat(paste("crosspairs at distance", r, "yields", length(cp$i), "pairs\n")) IU <- cp$i J <- cp$j # map X index to U index JU <- mapXU[J] # Each (Xi, Xj) pair will appear twice - eliminate duplicates dupX <- isdata[IU] & isdata[JU] & (IU > JU) retain <- !dupX IU <- IU[retain] JU <- JU[retain] if(DebugTriplets) cat(paste(sum(dupX), "duplicate pairs removed\n")) # find all triangles tri <- edges2triangles(IU, JU, nU, friendly=isdata) if(DebugTriplets) cat(paste(nrow(tri), "triangles identified\n")) if(nrow(tri) == 0) { # there are no triangles; return vector of zeroes return(rep.int(0, nU-nmiss)) } # count triangles containing a given quadrature point tcount <- apply(tri, 2, function(x, n) { table(factor(x, levels=1:n)) }, n=nU) tcount <- .rowSums(tcount, nrow(tcount), ncol(tcount)) # select triangles consisting only of data points triX <- matrix(mapUX[tri], nrow=nrow(tri)) isX <- matrowall(!is.na(triX)) triX <- triX[isX, , drop=FALSE] # if(nrow(triX) > 0) { # count triangles of data points containing each given data point tXcount <- apply(triX, 2, function(x, n) { table(factor(x, levels=1:n)) }, n=nX) tXcount <- .rowSums(tXcount, nrow(tXcount), ncol(tXcount)) } else { # there are no triangles of data points tXcount <- rep.int(0, nX) } # answer <- tcount answer[iXU] <- tXcount[iXX] if(DebugTriplets) cat(paste("Max suff stat: data ", max(tXcount), ", dummy ", max(tcount[isdata]), "\n", sep="")) # truncate to original size if(nmiss > 0) answer <- answer[-((nU-nmiss+1):nU)] return(answer) } # set up basic 'triplets' object except for family and parameters BlankTripletsObject <- list( name = "Triplets process", creator = "Triplets", family = "triplet.family", # evaluated later pot = TripletPotential, par = list(r=NULL), # filled in later parnames = "interaction distance", init = function(self) { r <- self$par$r if(!is.numeric(r) || length(r) != 1 || r <= 0) stop("interaction distance r must be a positive number") }, update = NULL, # default OK print = NULL, # default OK interpret = function(coeffs, self) { loggamma <- as.numeric(coeffs[1]) gamma <- exp(loggamma) return(list(param=list(gamma=gamma), inames="interaction parameter gamma", printable=dround(gamma))) }, valid = function(coeffs, self) { gamma <- ((self$interpret)(coeffs, self))$param$gamma return(is.finite(gamma) && (gamma <= 1)) }, project = function(coeffs, self) { if((self$valid)(coeffs, self)) return(NULL) else return(Poisson()) }, irange = function(self, coeffs=NA, epsilon=0, ...) { r <- self$par$r if(anyNA(coeffs)) return(r) loggamma <- coeffs[1] if(abs(loggamma) <= epsilon) return(0) else return(r) }, version=NULL # to be added ) class(BlankTripletsObject) <- "interact" # define Triplets function Triplets <- function(r) { instantiate.interact(BlankTripletsObject, list(r=r)) } Triplets <- intermaker(Triplets, BlankTripletsObject) Triplets }) spatstat/R/summary.quad.R0000755000176200001440000001117413115271120015067 0ustar liggesusers# # summary.quad.R # # summary() method for class "quad" # # $Revision: 1.11 $ $Date: 2016/09/23 07:38:07 $ # summary.quad <- local({ sumriz <- function(ww) { if(length(ww) > 0) return(list(range=range(ww), sum=sum(ww))) else return(NULL) } summary.quad <- function(object, ..., checkdup=FALSE) { verifyclass(object, "quad") X <- object$data D <- object$dummy s <- list( data = summary.ppp(X, checkdup=checkdup), dummy = summary.ppp(D, checkdup=checkdup), param = object$param) ## make description of dummy point arrangement dpar <- object$param$dummy eps.given <- dpar$orig$eps # could be NULL eps.actual <- NULL if(is.null(dpar)) { descrip <- "(provided manually)" } else if(is.character(dmethod <- dpar$method)) { descrip <- dmethod } else if(identical(dpar$quasi, TRUE)) { descrip <- paste(npoints(D), "quasirandom dummy points", "plus 4 corner points") eps.actual <- 1/(2 * sqrt(intensity(D))) } else if(!is.null(nd <- dpar$nd)) { nd <- ensure2vector(nd) eps.actual <- unique(sidelengths(Frame(D))/nd) if(identical(dpar$random, TRUE)) { descrip <- paste("systematic random dummy points in", nd[1], "x", nd[2], "grid", "plus 4 corner points") } else { descrip <- paste(nd[1], "x", nd[2], "grid of dummy points, plus 4 corner points") } } else descrip <- "(rule for creating dummy points not understood)" if(!is.null(eps.actual)) { uD <- unitname(D) s$resolution <- numberwithunit(eps.actual, uD) if(!is.null(eps.given)) { descrip2 <- paste("dummy spacing:", format(eps.given %unit% uD), "requested,", format(eps.actual %unit% uD), "actual") } else { descrip2 <- paste("dummy spacing:", format(eps.actual %unit% uD)) } descrip <- c(descrip, descrip2) } s$descrip <- descrip w <- object$w Z <- is.data(object) s$w <- list(all = sumriz(w), data = sumriz(w[Z]), dummy = sumriz(w[!Z])) class(s) <- "summary.quad" return(s) } summary.quad }) print.summary.quad <- local({ summariseweights <- function(ww, blah, dp=3) { cat(paste(blah, ":\n\t", sep="")) if(is.null(ww)) { cat("(None)\n") return() } splat(paste0("range: ", "[", paste(signif(ww$range, digits=dp), collapse=", "), "]\t", "total: ", signif(ww$sum, digits=dp))) } print.summary.quad <- function(x, ..., dp=3) { splat("Quadrature scheme = data + dummy + weights") pa <- x$param if(is.null(pa)) splat("created by an unknown function.") parbreak() splat("Data pattern:") print(x$data, dp=dp) parbreak() splat("Dummy quadrature points:") ## How they were computed splat(x$descrip, indent=5) parbreak() ## What arguments were given if(!is.null(orig <- pa$dummy$orig)) splat("Original dummy parameters:", paste0(names(orig), "=", orig, collapse=", ")) ## Description of the dummy points print(x$dummy, dp=dp) splat("Quadrature weights:") ## How they were computed if(!is.null(pa)) { wpar <- pa$weight if(is.null(wpar)) splat("(values provided manually)", indent=5) else if(is.character(wmethod <- wpar$method)) { switch(wmethod, grid = { splat("(counting weights based on", wpar$ntile[1], "x", wpar$ntile[2], "array of rectangular tiles)", indent=5) }, dirichlet = { splat("(Dirichlet tile areas, computed", if(wpar$exact) "exactly)" else "by pixel approximation)", indent=5) }, splat(wmethod, indent=5) ) } else splat("(rule for creating dummy points not understood)") } if(waxlyrical('extras')) { summariseweights(x$w$all, "All weights", dp) summariseweights(x$w$data, "Weights on data points", dp) summariseweights(x$w$dummy, "Weights on dummy points", dp) } return(invisible(NULL)) } print.summary.quad }) print.quad <- function(x, ...) { splat("Quadrature scheme") splat(x$data$n, "data points,", x$dummy$n, "dummy points") if(waxlyrical('extras')) { sx <- summary(x) splat(sx$descrip, indent=5) } splat("Total weight", sum(x$w), indent=5) return(invisible(NULL)) } spatstat/R/linearpcfmulti.R0000644000176200001440000002271113124102036015452 0ustar liggesusers# # linearpcfmulti.R # # $Revision: 1.12 $ $Date: 2017/02/07 08:12:05 $ # # pair correlation functions for multitype point pattern on linear network # # linearpcfdot <- function(X, i, r=NULL, ..., correction="Ang") { if(!is.multitype(X, dfok=FALSE)) stop("Point pattern must be multitype") marx <- marks(X) lev <- levels(marx) if(missing(i) || is.null(i)) i <- lev[1L] else if(!(i %in% lev)) stop(paste("i = ", i , "is not a valid mark")) I <- (marx == i) J <- rep(TRUE, npoints(X)) # i.e. all points result <- linearpcfmulti(X, I, J, r=r, correction=correction, ...) correction <- attr(result, "correction") type <- if(correction == "Ang") "L" else "net" result <- rebadge.as.dotfun(result, "g", type, i) return(result) } linearpcfcross <- function(X, i, j, r=NULL, ..., correction="Ang") { if(!is.multitype(X, dfok=FALSE)) stop("Point pattern must be multitype") marx <- marks(X) lev <- levels(marx) if(missing(i) || is.null(i)) i <- lev[1L] else if(!(i %in% lev)) stop(paste("i = ", i , "is not a valid mark")) if(missing(j) || is.null(j)) j <- lev[2L] else if(!(j %in% lev)) stop(paste("j = ", j , "is not a valid mark")) # if(i == j) { result <- linearpcf(X[marx == i], r=r, correction=correction, ...) } else { I <- (marx == i) J <- (marx == j) result <- linearpcfmulti(X, I, J, r=r, correction=correction, ...) } # rebrand correction <- attr(result, "correction") type <- if(correction == "Ang") "L" else "net" result <- rebadge.as.crossfun(result, "g", type, i, j) return(result) } linearpcfmulti <- function(X, I, J, r=NULL, ..., correction="Ang") { stopifnot(inherits(X, "lpp")) correction <- pickoption("correction", correction, c(none="none", Ang="Ang", best="Ang"), multi=FALSE) # extract info about pattern np <- npoints(X) lengthL <- volume(domain(X)) # validate I, J if(!is.logical(I) || !is.logical(J)) stop("I and J must be logical vectors") if(length(I) != np || length(J) != np) stop(paste("The length of I and J must equal", "the number of points in the pattern")) if(!any(I)) stop("no points satisfy I") # if(!any(J)) stop("no points satisfy J") nI <- sum(I) nJ <- sum(J) nIandJ <- sum(I & J) # lambdaI <- nI/lengthL # lambdaJ <- nJ/lengthL # compute pcf denom <- (nI * nJ - nIandJ)/lengthL g <- linearPCFmultiEngine(X, I, J, r=r, denom=denom, correction=correction, ...) # set appropriate y axis label correction <- attr(g, "correction") type <- if(correction == "Ang") "L" else "net" g <- rebadge.as.crossfun(g, "g", type, "I", "J") attr(g, "correction") <- correction return(g) } # ................ inhomogeneous ............................ linearpcfdot.inhom <- function(X, i, lambdaI, lambdadot, r=NULL, ..., correction="Ang", normalise=TRUE) { if(!is.multitype(X, dfok=FALSE)) stop("Point pattern must be multitype") marx <- marks(X) lev <- levels(marx) if(missing(i)) i <- lev[1L] else if(!(i %in% lev)) stop(paste("i = ", i , "is not a valid mark")) I <- (marx == i) J <- rep(TRUE, npoints(X)) # i.e. all points # compute result <- linearpcfmulti.inhom(X, I, J, lambdaI, lambdadot, r=r, correction=correction, normalise=normalise, ...) correction <- attr(result, "correction") type <- if(correction == "Ang") "L, inhom" else "net, inhom" result <- rebadge.as.dotfun(result, "g", type, i) return(result) } linearpcfcross.inhom <- function(X, i, j, lambdaI, lambdaJ, r=NULL, ..., correction="Ang", normalise=TRUE) { if(!is.multitype(X, dfok=FALSE)) stop("Point pattern must be multitype") marx <- marks(X) lev <- levels(marx) if(missing(i)) i <- lev[1L] else if(!(i %in% lev)) stop(paste("i = ", i , "is not a valid mark")) if(missing(j)) j <- lev[2L] else if(!(j %in% lev)) stop(paste("j = ", j , "is not a valid mark")) # if(i == j) { I <- (marx == i) result <- linearpcfinhom(X[I], lambda=lambdaI, r=r, correction=correction, normalise=normalise, ...) } else { I <- (marx == i) J <- (marx == j) result <- linearpcfmulti.inhom(X, I, J, lambdaI, lambdaJ, r=r, correction=correction, normalise=normalise, ...) } # rebrand correction <- attr(result, "correction") type <- if(correction == "Ang") "L, inhom" else "net, inhom" result <- rebadge.as.crossfun(result, "g", type, i, j) return(result) } linearpcfmulti.inhom <- function(X, I, J, lambdaI, lambdaJ, r=NULL, ..., correction="Ang", normalise=TRUE) { stopifnot(inherits(X, "lpp")) correction <- pickoption("correction", correction, c(none="none", Ang="Ang", best="Ang"), multi=FALSE) # extract info about pattern np <- npoints(X) lengthL <- volume(domain(X)) # validate I, J if(!is.logical(I) || !is.logical(J)) stop("I and J must be logical vectors") if(length(I) != np || length(J) != np) stop(paste("The length of I and J must equal", "the number of points in the pattern")) if(!any(I)) stop("no points satisfy I") # validate lambda vectors lambdaI <- getlambda.lpp(lambdaI, X, subset=I, ...) lambdaJ <- getlambda.lpp(lambdaJ, X, subset=J, ...) # compute pcf weightsIJ <- outer(1/lambdaI, 1/lambdaJ, "*") denom <- if(!normalise) lengthL else sum(1/lambdaI) g <- linearPCFmultiEngine(X, I, J, r=r, reweight=weightsIJ, denom=denom, correction=correction, ...) # set appropriate y axis label correction <- attr(g, "correction") type <- if(correction == "Ang") "L, inhom" else "net, inhom" g <- rebadge.as.crossfun(g, "g", type, "I", "J") attr(g, "correction") <- correction attr(g, "dangerous") <- union(attr(lambdaI, "dangerous"), attr(lambdaJ, "dangerous")) return(g) } # .............. internal ............................... linearPCFmultiEngine <- function(X, I, J, ..., r=NULL, reweight=NULL, denom=1, correction="Ang", showworking=FALSE) { # ensure distance information is present X <- as.lpp(X, sparse=FALSE) # extract info about pattern np <- npoints(X) # extract linear network L <- domain(X) W <- Window(L) # determine r values rmaxdefault <- 0.98 * boundingradius(L) breaks <- handle.r.b.args(r, NULL, W, rmaxdefault=rmaxdefault) r <- breaks$r rmax <- breaks$max # if(correction == "Ang") { fname <- c("g", "list(L, I, J)") ylab <- quote(g[L,I,J](r)) } else { fname <- c("g", "list(net, I, J)") ylab <- quote(g[net,I,J](r)) } # if(np < 2) { # no pairs to count: return zero function zeroes <- rep(0, length(r)) df <- data.frame(r = r, est = zeroes) g <- fv(df, "r", ylab, "est", . ~ r, c(0, rmax), c("r", makefvlabel(NULL, "hat", fname)), c("distance argument r", "estimated %s"), fname = fname) unitname(g) <- unitname(X) attr(g, "correction") <- correction return(g) } # nI <- sum(I) nJ <- sum(J) whichI <- which(I) whichJ <- which(J) clash <- I & J has.clash <- any(clash) # compute pairwise distances if(exists("crossdist.lpp")) { DIJ <- crossdist(X[I], X[J], check=FALSE) if(has.clash) { # exclude pairs of identical points from consideration Iclash <- which(clash[I]) Jclash <- which(clash[J]) DIJ[cbind(Iclash,Jclash)] <- Inf } } else { D <- pairdist(X) diag(D) <- Inf DIJ <- D[I, J] } #--- compile into pair correlation function --- if(correction == "none" && is.null(reweight)) { # no weights (Okabe-Yamada) g <- compilepcf(DIJ, r, denom=denom, check=FALSE, fname=fname) g <- rebadge.as.crossfun(g, "g", "net", "I", "J") unitname(g) <- unitname(X) attr(g, "correction") <- correction return(g) } if(correction == "none") edgewt <- 1 else { # inverse m weights (Ang's correction) # determine tolerance toler <- default.linnet.tolerance(L) # compute m[i,j] m <- matrix(1, nI, nJ) XI <- X[I] if(!has.clash) { for(k in seq_len(nJ)) { j <- whichJ[k] m[,k] <- countends(L, XI, DIJ[, k], toler=toler) } } else { # don't count identical pairs for(k in seq_len(nJ)) { j <- whichJ[k] inotj <- (whichI != j) m[inotj, k] <- countends(L, XI[inotj], DIJ[inotj, k], toler=toler) } } edgewt <- 1/m } # compute pcf wt <- if(!is.null(reweight)) edgewt * reweight else edgewt g <- compilepcf(DIJ, r, weights=wt, denom=denom, check=FALSE, ..., fname=fname) ## rebadge and tweak g <- rebadge.as.crossfun(g, "g", "L", "I", "J") fname <- attr(g, "fname") # tack on theoretical value g <- bind.fv(g, data.frame(theo=rep(1,length(r))), makefvlabel(NULL, NULL, fname, "pois"), "theoretical Poisson %s") unitname(g) <- unitname(X) fvnames(g, ".") <- rev(fvnames(g, ".")) # show working if(showworking) attr(g, "working") <- list(DIJ=DIJ, wt=wt) attr(g, "correction") <- correction return(g) } spatstat/R/lindirichlet.R0000644000176200001440000001065413161364630015124 0ustar liggesusers#' lindirichlet.R #' #' Dirichlet tessellation on a linear network #' #' $Revision: 1.8 $ $Date: 2017/09/23 04:56:23 $ lineardirichlet <- function(X) { stopifnot(is.lpp(X)) #' unique points, remembering original sequence ii <- which(!duplicated(X)) uX <- X[ii] #' local coordinates coUX <- coords(uX)[, c("seg", "tp")] #' add label from original sequence index coUX$lab <- ii #' reorder oo <- with(coUX, order(seg, tp)) coUXord <- coUX[oo, , drop=FALSE] seg <- coUXord$seg tp <- coUXord$tp #' network data L <- domain(X) nv <- nvertices(L) ns <- nsegments(L) seglen <- lengths.psp(as.psp(L)) from <- L$from to <- L$to #' upper bound on interpoint distance huge <- sum(seglen) #' numerical tolerance for nnwhich tol <- max(sqrt(.Machine$double.eps), diameter(Frame(L))/2^20) #' Find data point (in sorted pattern) nearest to each vertex of network a <- vnnFind(seg, tp, ns, nv, from, to, seglen, huge, tol) vnndist <- a$vnndist vnnwhich <- a$vnnwhich #' index back into original data pattern vnnlab <- coUXord$lab[vnnwhich] #' compute Dirichlet tessellation df <- ldtEngine(nv, ns, from, to, seglen, huge, coUXord, vnndist, vnnwhich, vnnlab) return(lintess(L, df)) } ldtEngine <- function(nv, ns, from, to, seglen, huge, # network coUXord, # point coordinates, sorted vnndist, vnnwhich, # nearest data point for each vertex vnnlab) { #' initialise tessellation data df <- data.frame(seg=integer(0), t0=numeric(0), t1=numeric(0), tile=integer(0)) #' split point data by segment, discarding segments which contain no points fseg <- factor(coUXord$seg, levels=1:ns) blist <- split(coUXord, fseg, drop=TRUE) #' process each segment containing data points for(b in blist) { n <- nrow(b) #' which segment? sygmund <- b$seg[[1L]] lenf <- seglen[sygmund] #' segment endpoints A <- from[sygmund] B <- to[sygmund] #' data points (from X) closest to endpoints jA <- vnnlab[A] jB <- vnnlab[B] dA <- vnndist[A] dB <- vnndist[B] #' data points (along segment) closest to endpoints iA <- b$lab[1L] iB <- b$lab[n] #' splits between consecutive data points btp <- b$tp tcut <- if(n < 2) numeric(0) else (btp[-1] + btp[-n])/2 labs <- b$lab #' consider left endpoint if(jA == iA) { #' leftmost data point covers left endpoint tcut <- c(0, tcut) } else { #' cut between left endpoint and leftmost data point dA1 <- lenf * btp[1L] dx <- (dA1 - dA)/2 if(dx > 0) { #' expected! tx <- dx/lenf tcut <- c(0, tx, tcut) labs <- c(jA, labs) } else { #' unexpected tcut <- c(0, tcut) } } #' consider right endpoint if(jB == iB) { #' rightmost data point covers right endpoint tcut <- c(tcut, 1) } else { #' cut between right endpoint and rightmost data point dB1 <- lenf * (1 - btp[n]) dx <- (dB1 - dB)/2 if(dx > 0) { #' expected! tx <- 1 - dx/lenf tcut <- c(tcut, tx, 1) labs <- c(labs, jB) } else { #' unexpected tcut <- c(tcut, 1) } } m <- length(tcut) newdf <- data.frame(seg=sygmund, t0=tcut[-m], t1=tcut[-1L], tile=labs) df <- rbind(df, newdf) } #' now deal with segments having no data points unloved <- (table(fseg) == 0) if(any(unloved)) { for(sygmund in which(unloved)) { lenf <- seglen[sygmund] #' segment endpoints A <- from[sygmund] B <- to[sygmund] #' data points (from X) closest to endpoints jA <- vnnlab[A] jB <- vnnlab[B] dA <- vnndist[A] dB <- vnndist[B] if(is.na(jA) || is.na(jB) || jA == jB) { #' entire segment is covered by one tile thetile <- if(is.na(jA)) jB else jA newdf <- data.frame(seg=sygmund, t0=0.0, t1=1.0, tile=thetile) } else { #' split somewhere tx <- (dB - dA + lenf)/(2 * lenf) if(tx >= 0 && tx <= 1) { newdf <- data.frame(seg=sygmund, t0=c(0,tx), t1=c(tx,1), tile=c(jA, jB)) } else if(tx < 0) { # weird newdf <- data.frame(seg=sygmund, t0=0.0, t1=1.0, tile=jB) } else { # weird newdf <- data.frame(seg=sygmund, t0=0.0, t1=1.0, tile=jA) } } df <- rbind(df, newdf) } } return(df) } spatstat/R/rotate.R0000755000176200001440000000524613115271120013742 0ustar liggesusers# # rotate.S # # $Revision: 1.21 $ $Date: 2014/10/24 00:22:30 $ # rotxy <- function(X, angle=pi/2) { co <- cos(angle) si <- sin(angle) list(x = co * X$x - si * X$y, y = si * X$x + co * X$y) } rotxypolygon <- function(p, angle=pi/2) { p[c("x","y")] <- rotxy(p, angle=angle) # area and hole status are invariant under rotation return(p) } rotate <- function(X, ...) { UseMethod("rotate") } rotate.owin <- function(X, angle=pi/2, ..., rescue=TRUE, centre=NULL) { verifyclass(X, "owin") if(!is.null(centre)) { ## rotation about designated centre X <- shift(X, origin=centre) negorig <- getlastshift(X) } else negorig <- NULL switch(X$type, rectangle={ # convert rectangle to polygon P <- owin(X$xrange, X$yrange, poly= list(x=X$xrange[c(1,2,2,1)], y=X$yrange[c(1,1,2,2)]), unitname=unitname(X)) # call polygonal case Y <- rotate.owin(P, angle, rescue=rescue) }, polygonal={ # First rotate the polygonal boundaries bdry <- lapply(X$bdry, rotxypolygon, angle=angle) # wrap up Y <- owin(poly=bdry, check=FALSE, unitname=unitname(X)) if(rescue) Y <- rescue.rectangle(Y) }, mask={ newframe <- boundingbox(rotxy(corners(X), angle)) Y <- if(length(list(...)) > 0) as.mask(newframe, ...) else as.mask(newframe, eps=with(X, min(xstep, ystep))) pixelxy <- rasterxy.mask(Y) xybefore <- rotxy(pixelxy, -angle) Y$m[] <- with(xybefore, inside.owin(x, y, X)) Y <- intersect.owin(Y, boundingbox(Y)) if(rescue) Y <- rescue.rectangle(Y) unitname(Y) <- unitname(X) }, stop("Unrecognised window type") ) if(!is.null(negorig)) Y <- shift(Y, -negorig) return(Y) } rotate.ppp <- function(X, angle=pi/2, ..., centre=NULL) { verifyclass(X, "ppp") if(!is.null(centre)) { X <- shift(X, origin=centre) negorigin <- getlastshift(X) } else negorigin <- NULL r <- rotxy(X, angle) w <- rotate.owin(X$window, angle, ...) Y <- ppp(r$x, r$y, window=w, marks=marks(X, dfok=TRUE), check=FALSE) if(!is.null(negorigin)) Y <- shift(Y, -negorigin) return(Y) } rotate.im <- function(X, angle=pi/2, ..., centre=NULL) { if(!is.null(centre)) { X <- shift(X, origin=centre) negorigin <- getlastshift(X) } else negorigin <- NULL co <- cos(angle) si <- sin(angle) m <- matrix(c(co,si,-si,co), nrow=2, ncol=2) Y <- affine(X, mat=m) if(!is.null(negorigin)) Y <- shift(Y, -negorigin) return(Y) } spatstat/R/unnormdensity.R0000644000176200001440000000453613115225157015371 0ustar liggesusers# # unnormdensity.R # # $Revision: 1.5 $ $Date: 2016/02/11 10:17:12 $ # unnormdensity <- function(x, ..., weights=NULL) { if(any(!nzchar(names(list(...))))) stop("All arguments must be named (tag=value)") if(is.null(weights)) { out <- do.call.matched(density.default, c(list(x=x), list(...))) out$y <- length(x) * out$y } else if(all(weights == 0)) { # result is zero out <- do.call.matched(density.default, c(list(x=x), list(...))) out$y <- 0 * out$y } else if(all(weights >= 0)) { # all masses are nonnegative w <- weights totmass <- sum(w) out <- do.call.matched(density.default, c(list(x=x), list(...), list(weights=w/totmass))) out$y <- out$y * totmass } else if(all(weights <= 0)) { # all masses are nonpositive w <- (- weights) totmass <- sum(w) out <- do.call.matched(density.default, c(list(x=x), list(...), list(weights=w/totmass))) out$y <- out$y * (- totmass) } else { # mixture of positive and negative masses w <- weights wabs <- abs(w) wpos <- pmax.int(0, w) wneg <- - pmin.int(0, w) # determine bandwidth using absolute masses dabs <- do.call.matched(density.default, c(list(x=x), list(...), list(weights=wabs/sum(wabs)))) bw <- dabs$bw # compute densities for positive and negative masses separately outpos <- do.call.matched(density.default, resolve.defaults(list(x=x), list(bw=bw, adjust=1), list(weights=wpos/sum(wpos)), list(...), .StripNull=TRUE)) outneg <- do.call.matched(density.default, resolve.defaults(list(x=x), list(bw=bw, adjust=1), list(weights=wneg/sum(wneg)), list(...), .StripNull=TRUE)) # combine out <- outpos out$y <- sum(wpos) * outpos$y - sum(wneg) * outneg$y } out$call <- match.call() return(out) } spatstat/R/rknn.R0000755000176200001440000000203213115271120013402 0ustar liggesusers# # rknn.R # # Distribution of distance to k-th nearest point in d dimensions # (Poisson process of intensity lambda) # # $Revision: 1.2 $ $Date: 2009/12/31 01:33:44 $ # dknn <- function(x, k=1, d=2, lambda=1) { validposint(k, "dknn") validposint(d, "dknn") alpha.d <- (2 * pi^(d/2))/(d * gamma(d/2.)) y <- dgamma(x^d, shape=k, rate=lambda * alpha.d) y <- y * d * x^(d-1) return(y) } pknn <- function(q, k=1, d=2, lambda=1) { validposint(k, "pknn") validposint(d, "pknn") alpha.d <- (2 * pi^(d/2))/(d * gamma(d/2.)) p <- pgamma(q^d, shape=k, rate=lambda * alpha.d) return(p) } qknn <- function(p, k=1, d=2, lambda=1) { validposint(k, "qknn") validposint(d, "qknn") alpha.d <- (2 * pi^(d/2))/(d * gamma(d/2.)) y <- qgamma(p, shape=k, rate=lambda * alpha.d) z <- y^(1/d) return(z) } rknn <- function(n, k=1, d=2, lambda=1) { validposint(k, "rknn") validposint(d, "rknn") alpha.d <- (2 * pi^(d/2))/(d * gamma(d/2.)) y <- rgamma(n, shape=k, rate=lambda * alpha.d) x <- y^(1/d) return(x) } spatstat/R/Kmulti.R0000755000176200001440000003055313115271075013721 0ustar liggesusers# # Kmulti.S # # Compute estimates of cross-type K functions # for multitype point patterns # # $Revision: 5.48 $ $Date: 2015/10/21 09:06:57 $ # # # -------- functions ---------------------------------------- # Kcross() cross-type K function K_{ij} # between types i and j # # Kdot() K_{i\bullet} # between type i and all points regardless of type # # Kmulti() (generic) # # # -------- standard arguments ------------------------------ # X point pattern (of class 'ppp') # including 'marks' vector # r distance values at which to compute K # # -------- standard output ------------------------------ # A data frame with columns named # # r: same as input # # trans: K function estimated by translation correction # # iso: K function estimated by Ripley isotropic correction # # theo: K function for Poisson ( = pi * r ^2 ) # # border: K function estimated by border method # using standard formula (denominator = count of points) # # bord.modif: K function estimated by border method # using modified formula # (denominator = area of eroded window # # ------------------------------------------------------------------------ "Lcross" <- function(X, i, j, ..., from, to) { if(!is.multitype(X, dfok=FALSE)) stop("Point pattern must be multitype") if(missing(i)) i <- if(!missing(from)) from else levels(marks(X))[1] if(missing(j)) j <- if(!missing(to)) to else levels(marks(X))[2] K <- Kcross(X, i, j, ...) L <- eval.fv(sqrt(K/pi)) # relabel the fv object iname <- make.parseable(paste(i)) jname <- make.parseable(paste(j)) L <- rebadge.fv(L, substitute(L[i,j](r), list(i=iname,j=jname)), c("L", paste0("list(", iname, ",", jname, ")")), new.yexp=substitute(L[list(i,j)](r), list(i=iname,j=jname))) attr(L, "labl") <- attr(K, "labl") return(L) } "Ldot" <- function(X, i, ..., from) { if(!is.multitype(X, dfok=FALSE)) stop("Point pattern must be multitype") if(missing(i)) i <- if(!missing(from)) from else levels(marks(X))[1] K <- Kdot(X, i, ...) L <- eval.fv(sqrt(K/pi)) # relabel the fv object iname <- make.parseable(paste(i)) L <- rebadge.fv(L, substitute(L[i ~ dot](r), list(i=iname)), c("L", paste(iname, "~ symbol(\"\\267\")")), new.yexp=substitute(L[i ~ symbol("\267")](r), list(i=iname))) attr(L, "labl") <- attr(K, "labl") return(L) } "Kcross" <- function(X, i, j, r=NULL, breaks=NULL, correction =c("border", "isotropic", "Ripley", "translate") , ..., ratio=FALSE, from, to) { verifyclass(X, "ppp") if(!is.multitype(X, dfok=FALSE)) stop("Point pattern must be multitype") if(missing(correction)) correction <- NULL marx <- marks(X) if(missing(i)) i <- if(!missing(from)) from else levels(marx)[1] if(missing(j)) j <- if(!missing(to)) to else levels(marx)[2] I <- (marx == i) if(!any(I)) stop(paste("No points have mark i =", i)) if(i == j) { result <- Kest(X[I], r=r, breaks=breaks, correction=correction, ..., ratio=ratio) } else { J <- (marx == j) if(!any(J)) stop(paste("No points have mark j =", j)) result <- Kmulti(X, I, J, r=r, breaks=breaks, correction=correction, ..., ratio=ratio) } iname <- make.parseable(paste(i)) jname <- make.parseable(paste(j)) result <- rebadge.fv(result, substitute(Kcross[i,j](r), list(i=iname,j=jname)), c("K", paste0("list(", iname, ",", jname, ")")), new.yexp=substitute(K[list(i,j)](r), list(i=iname,j=jname))) return(result) } "Kdot" <- function(X, i, r=NULL, breaks=NULL, correction = c("border", "isotropic", "Ripley", "translate") , ..., ratio=FALSE, from) { verifyclass(X, "ppp") if(!is.multitype(X, dfok=FALSE)) stop("Point pattern must be multitype") if(missing(correction)) correction <- NULL marx <- marks(X) if(missing(i)) i <- if(!missing(from)) from else levels(marx)[1] I <- (marx == i) J <- rep.int(TRUE, X$n) # i.e. all points if(!any(I)) stop(paste("No points have mark i =", i)) result <- Kmulti(X, I, J, r=r, breaks=breaks, correction=correction, ..., ratio=ratio) iname <- make.parseable(paste(i)) result <- rebadge.fv(result, substitute(K[i ~ dot](r), list(i=iname)), c("K", paste0(iname, "~ symbol(\"\\267\")")), new.yexp=substitute(K[i ~ symbol("\267")](r), list(i=iname))) return(result) } "Kmulti"<- function(X, I, J, r=NULL, breaks=NULL, correction = c("border", "isotropic", "Ripley", "translate") , ..., ratio=FALSE) { verifyclass(X, "ppp") npts <- npoints(X) W <- X$window areaW <- area(W) correction.given <- !missing(correction) && !is.null(correction) if(is.null(correction)) correction <- c("border", "isotropic", "Ripley", "translate") correction <- pickoption("correction", correction, c(none="none", border="border", "bord.modif"="bord.modif", isotropic="isotropic", Ripley="isotropic", trans="translate", translate="translate", translation="translate", best="best"), multi=TRUE) correction <- implemented.for.K(correction, W$type, correction.given) I <- ppsubset(X, I) J <- ppsubset(X, J) if(is.null(I) || is.null(J)) stop("I and J must be valid subset indices") if(!any(I)) stop("no points belong to subset I") if(!any(J)) stop("no points belong to subset J") nI <- sum(I) nJ <- sum(J) lambdaI <- nI/areaW lambdaJ <- nJ/areaW # r values rmaxdefault <- rmax.rule("K", W, lambdaJ) breaks <- handle.r.b.args(r, breaks, W, rmaxdefault=rmaxdefault) r <- breaks$r rmax <- breaks$max # recommended range of r values alim <- c(0, min(rmax, rmaxdefault)) # this will be the output data frame # It will be given more columns later K <- data.frame(r=r, theo= pi * r^2) desc <- c("distance argument r", "theoretical Poisson %s") K <- fv(K, "r", quote(K[IJ](r)), "theo", , alim, c("r","{%s[%s]^{pois}}(r)"), desc, fname=c("K", "list(I,J)"), yexp=quote(K[list(I,J)](r))) # save numerator and denominator? if(ratio) { denom <- lambdaI * lambdaJ * areaW numK <- eval.fv(denom * K) denK <- eval.fv(denom + K * 0) attributes(numK) <- attributes(denK) <- attributes(K) attr(numK, "desc")[2] <- "numerator for theoretical Poisson %s" attr(denK, "desc")[2] <- "denominator for theoretical Poisson %s" } # find close pairs of points XI <- X[I] XJ <- X[J] close <- crosspairs(XI, XJ, max(r), what="ijd") # close$i and close$j are serial numbers in XI and XJ respectively; # map them to original serial numbers in X orig <- seq_len(npts) imap <- orig[I] jmap <- orig[J] iX <- imap[close$i] jX <- jmap[close$j] # eliminate any identical pairs if(any(I & J)) { ok <- (iX != jX) if(!all(ok)) { close$i <- close$i[ok] close$j <- close$j[ok] close$d <- close$d[ok] } } # extract information for these pairs (relative to orderings of XI, XJ) dcloseIJ <- close$d icloseI <- close$i jcloseJ <- close$j # Compute estimates by each of the selected edge corrections. if(any(correction == "none")) { # uncorrected! wh <- whist(dcloseIJ, breaks$val) # no weights numKun <- cumsum(wh) denKun <- lambdaI * lambdaJ * areaW Kun <- numKun/denKun K <- bind.fv(K, data.frame(un=Kun), "{hat(%s)[%s]^{un}}(r)", "uncorrected estimate of %s", "un") if(ratio) { # save numerator and denominator numK <- bind.fv(numK, data.frame(un=numKun), "{hat(%s)[%s]^{un}}(r)", "numerator of uncorrected estimate of %s", "un") denK <- bind.fv(denK, data.frame(un=denKun), "{hat(%s)[%s]^{un}}(r)", "denominator of uncorrected estimate of %s", "un") } } if(any(correction == "border" | correction == "bord.modif")) { # border method # distance to boundary from each point of type I bI <- bdist.points(XI) # distance to boundary from first element of each (i, j) pair bcloseI <- bI[icloseI] # apply reduced sample algorithm RS <- Kount(dcloseIJ, bcloseI, bI, breaks) if(any(correction == "bord.modif")) { denom.area <- eroded.areas(W, r) numKbm <- RS$numerator denKbm <- denom.area * nI * nJ Kbm <- numKbm/denKbm K <- bind.fv(K, data.frame(bord.modif=Kbm), "{hat(%s)[%s]^{bordm}}(r)", "modified border-corrected estimate of %s", "bord.modif") if(ratio) { # save numerator and denominator numK <- bind.fv(numK, data.frame(bord.modif=numKbm), "{hat(%s)[%s]^{bordm}}(r)", "numerator of modified border-corrected estimate of %s", "bord.modif") denK <- bind.fv(denK, data.frame(bord.modif=denKbm), "{hat(%s)[%s]^{bordm}}(r)", "denominator of modified border-corrected estimate of %s", "bord.modif") } } if(any(correction == "border")) { numKb <- RS$numerator denKb <- lambdaJ * RS$denom.count Kb <- numKb/denKb K <- bind.fv(K, data.frame(border=Kb), "{hat(%s)[%s]^{bord}}(r)", "border-corrected estimate of %s", "border") if(ratio) { numK <- bind.fv(numK, data.frame(border=numKb), "{hat(%s)[%s]^{bord}}(r)", "numerator of border-corrected estimate of %s", "border") denK <- bind.fv(denK, data.frame(border=denKb), "{hat(%s)[%s]^{bord}}(r)", "denominator of border-corrected estimate of %s", "border") } } } if(any(correction == "translate")) { # translation correction edgewt <- edge.Trans(XI[icloseI], XJ[jcloseJ], paired=TRUE) wh <- whist(dcloseIJ, breaks$val, edgewt) numKtrans <- cumsum(wh) denKtrans <- lambdaI * lambdaJ * areaW Ktrans <- numKtrans/denKtrans rmax <- diameter(W)/2 Ktrans[r >= rmax] <- NA K <- bind.fv(K, data.frame(trans=Ktrans), "{hat(%s)[%s]^{trans}}(r)", "translation-corrected estimate of %s", "trans") if(ratio) { numK <- bind.fv(numK, data.frame(trans=numKtrans), "{hat(%s)[%s]^{trans}}(r)", "numerator of translation-corrected estimate of %s", "trans") denK <- bind.fv(denK, data.frame(trans=denKtrans), "{hat(%s)[%s]^{trans}}(r)", "denominator of translation-corrected estimate of %s", "trans") } } if(any(correction == "isotropic")) { # Ripley isotropic correction edgewt <- edge.Ripley(XI[icloseI], matrix(dcloseIJ, ncol=1)) wh <- whist(dcloseIJ, breaks$val, edgewt) numKiso <- cumsum(wh) denKiso <- lambdaI * lambdaJ * areaW Kiso <- numKiso/denKiso rmax <- diameter(W)/2 Kiso[r >= rmax] <- NA K <- bind.fv(K, data.frame(iso=Kiso), "{hat(%s)[%s]^{iso}}(r)", "Ripley isotropic correction estimate of %s", "iso") if(ratio) { numK <- bind.fv(numK, data.frame(iso=numKiso), "{hat(%s)[%s]^{iso}}(r)", "numerator of Ripley isotropic correction estimate of %s", "iso") denK <- bind.fv(denK, data.frame(iso=denKiso), "{hat(%s)[%s]^{iso}}(r)", "denominator of Ripley isotropic correction estimate of %s", "iso") } } # default is to display them all formula(K) <- . ~ r unitname(K) <- unitname(X) if(ratio) { # finish up numerator & denominator formula(numK) <- formula(denK) <- . ~ r unitname(numK) <- unitname(denK) <- unitname(K) # tack on to result K <- rat(K, numK, denK, check=FALSE) } return(K) } spatstat/R/areainter.R0000755000176200001440000002773313115271075014434 0ustar liggesusers# # # areainter.R # # $Revision: 1.44 $ $Date: 2017/06/05 10:31:58 $ # # The area interaction # # AreaInter() create an instance of the area-interaction process # [an object of class 'interact'] # # # ------------------------------------------------------------------- # AreaInter <- local({ # area-interaction conditional intensity potential # corresponds to potential -C(x) = n(x) - A(x)/\pi r^2 areapot <- function(X,U,EqualPairs,pars,correction, ..., W=as.owin(X)) { uhoh <- !(correction %in% c("border", "none")) if(any(uhoh)) { nuh <- sum(uhoh) warning(paste(ngettext(nuh, "Correction", "Corrections"), commasep(sQuote(correction[uhoh])), ngettext(nuh, "is not supported and was ignored", "are not supported and were ignored"))) } r <- pars$r if(is.null(r)) stop("internal error: r parameter not found") n <- U$n areas <- numeric(n) dummies <- !(seq_len(n) %in% EqualPairs[,2L]) if(sum(dummies) > 0) areas[dummies] <- areaGain(U[dummies], X, r, W=W) ii <- EqualPairs[,1L] jj <- EqualPairs[,2L] areas[jj] <- areaLoss(X, r, subset=ii, W=W) return(1 - areas/(pi * r^2)) } #' fractional area of overlap of two unit discs at distance 2 * z discOverlap <- function(z) { z <- pmax(pmin(z, 1), -1) (2/pi) * (acos(z) - z * sqrt(1 - z^2)) } # template object without family, par, version BlankAI <- list( name = "Area-interaction process", creator = "AreaInter", family = "inforder.family", # evaluated later pot = areapot, par = list(r = NULL), # to be filled in parnames = "disc radius", init = function(self) { r <- self$par$r if(!is.numeric(r) || length(r) != 1 || r <= 0) stop("disc radius r must be a positive number") }, update = NULL, # default OK print = NULL, # default OK plot = function(fint, ..., d=NULL, plotit=TRUE) { verifyclass(fint, "fii") inter <- fint$interaction unitz <- unitname(fint) if(!identical(inter$name, "Area-interaction process")) stop("Tried to plot the wrong kind of interaction") #' fitted interaction coefficient theta <- fint$coefs[fint$Vnames] #' interaction radius r <- inter$par$r xlim <- resolve.1.default(list(xlim=c(0, 1.25 * 2*r)), list(...)) rmax <- max(xlim, d) if(is.null(d)) { d <- seq(from=0, to=rmax, length.out=1024) } else { stopifnot(is.numeric(d) && all(is.finite(d)) && all(diff(d) > 0)) } #' compute interaction between two points at distance d y <- exp(theta * discOverlap(d/(2 * r))) #' compute `fv' object fun <- fv(data.frame(r=d, h=y, one=1), "r", substitute(h(r), NULL), "h", cbind(h,one) ~ r, xlim, c("r", "h(r)", "1"), c("distance argument r", "maximal interaction h(r)", "reference value 1"), unitname=unitz) if(plotit) do.call(plot.fv, resolve.defaults(list(fun), list(...), list(ylim=range(0,1,y)))) return(invisible(fun)) }, #' end of function 'plot' interpret = function(coeffs, self) { logeta <- as.numeric(coeffs[1L]) eta <- exp(logeta) return(list(param=list(eta=eta), inames="interaction parameter eta", printable=signif(eta))) }, valid = function(coeffs, self) { eta <- ((self$interpret)(coeffs, self))$param$eta return(is.finite(eta)) }, project = function(coeffs, self) { if((self$valid)(coeffs, self)) return(NULL) return(Poisson()) }, irange = function(self, coeffs=NA, epsilon=0, ...) { r <- self$par$r if(anyNA(coeffs)) return(2 * r) logeta <- coeffs[1L] if(abs(logeta) <= epsilon) return(0) else return(2 * r) }, delta2 = function(X, inte, correction, ..., sparseOK=FALSE) { # Sufficient statistic for second order conditional intensity # Area-interaction model if(!(correction %in% c("border", "none"))) return(NULL) r <- inte$par$r areadelta2(X, r, sparseOK=sparseOK) }, version=NULL # to be added ) class(BlankAI) <- "interact" AreaInter <- function(r) { instantiate.interact(BlankAI, list(r=r)) } AreaInter <- intermaker(AreaInter, BlankAI) AreaInter }) areadelta2 <- local({ areadelta2 <- function(X, r, ..., sparseOK=FALSE) { # Sufficient statistic for second order conditional intensity # Area-interaction model if(is.ppp(X)) return(areadelppp(X, r, ..., sparseOK=sparseOK)) else if(inherits(X, "quad")) return(areadelquad(X, r, sparseOK=sparseOK)) else stop("internal error: X should be a ppp or quad object") } areadelppp <- function(X, r, algorithm=c("C", "nncross", "nnmap"), sparseOK=FALSE) { # Evaluate \Delta_{x_i} \Delta_{x_j} S(x) for data points x_i, x_j # i.e. h(X[i]|X) - h(X[i]|X[-j]) # where h is first order cif statistic algorithm <- match.arg(algorithm) nX <- npoints(X) sparseOK <- sparseOK result <- if(!sparseOK) matrix(0, nX, nX) else sparseMatrix(i=integer(0), j=integer(0), x=numeric(0), dims=c(nX,nX)) if(nX < 2) return(result) if(algorithm == "C") { # use special purpose C routine # called once for each interacting pair of points xx <- X$x yy <- X$y cl <- closepairs(X, 2 * r, what="indices", twice=FALSE, neat=FALSE) I <- cl$i J <- cl$j eps <- r/spatstat.options("ngrid.disc") for(k in seq_along(I)) { i <- I[k] j <- J[k] # all neighbours of i Ki <- union(J[I==i], I[J==i]) # all neighbours of j Kj <- union(J[I==j], I[J==j]) # relevant neighbours K <- setdiff(union(Ki, Kj), c(i,j)) # call C code z <- .C("delta2area", xa = as.double(xx[i]), ya = as.double(yy[i]), xb = as.double(xx[j]), yb = as.double(yy[j]), nother = as.integer(length(K)), xother = as.double(xx[K]), yother = as.double(yy[K]), radius = as.double(r), epsilon = as.double(eps), pixcount = as.integer(integer(1L)), PACKAGE = "spatstat") result[i,j] <- result[j,i] <- z$pixcount } # normalise result <- result * (eps^2)/(pi * r^2) return(result) } # non-C algorithms # confine attention to points which are interacting relevant <- (nndist(X) <= 2 * r) if(!all(relevant)) { if(any(relevant)) { # call self on subset Dok <- areadelppp(X[relevant], r, algorithm, sparseOK=sparseOK) result[relevant,relevant] <- Dok } return(result) } # .............. algorithm using interpreted code ........... # sort pattern in increasing order of x sortX <- (algorithm == "nnmap") if(sortX) { oX <- fave.order(X$x) X <- X[oX] } # area calculation may be restricted to window W for efficiency W <- as.owin(X) U <- as.rectangle(W) # decide pixel resolution eps <- r/spatstat.options("ngrid.disc") npix <- prod(ceiling(sidelengths(U)/eps)) if(npix <= 2^20) { # do it all in one go tile <- list(NULL) } else { # divide into rectangular tiles B <- as.rectangle(W) ntile0 <- ceiling(npix/(2^20)) tile0area <- area(B)/ntile0 tile0side <- sqrt(tile0area) nx <- ceiling(sidelengths(B)[1L]/tile0side) ny <- ceiling(sidelengths(B)[2L]/tile0side) tile <- tiles(quadrats(B, nx, ny)) } result <- matrix(0, nX, nX) for(i in seq_len(length(tile))) { # form pixel grid Ti <- tile[[i]] Wi <- if(is.null(Ti)) W else intersect.owin(W, Ti) if(algorithm == "nncross") { # Trusted, slow algorithm using nncross Z <- as.mask(Wi, eps=eps) G <- as.ppp(rasterxy.mask(Z), U, check=FALSE) # compute 3 nearest neighbours in X of each grid point v <- nncross(G, X, k=1:3) # select pixels which have exactly 2 neighbours within distance r ok <- with(v, dist.3 > r & dist.2 <= r) if(any(ok)) { v <- v[ok, , drop=FALSE] # accumulate pixel counts -> areas counts <- with(v, table(i=factor(which.1, levels=1L:nX), j=factor(which.2, levels=1L:nX))) pixarea <- with(Z, xstep * ystep) result <- result + pixarea * (counts + t(counts)) } } else { # Faster algorithm using nnmap # compute 3 nearest neighbours in X of each grid point stuff <- nnmap(X, k=1:3, W=Wi, eps=eps, is.sorted.X=TRUE, sortby="x", outputarray=TRUE) dist.2 <- stuff$dist[2L,,] dist.3 <- stuff$dist[3L,,] which.1 <- stuff$which[1L,,] which.2 <- stuff$which[2L,,] ok <- (dist.3 > r & dist.2 <= r) if(any(ok)) { which.1 <- as.vector(which.1[ok]) which.2 <- as.vector(which.2[ok]) counts <- table(i=factor(which.1, levels=1L:nX), j=factor(which.2, levels=1L:nX)) pixarea <- attr(stuff, "pixarea") result <- result + pixarea * (counts + t(counts)) } } } if(sortX) { # map back to original ordering result[oX, oX] <- result } # normalise result <- result/(pi * r^2) return(result) } areadelquad <- function(Q, r, sparseOK=FALSE) { # Sufficient statistic for second order conditional intensity # Area-interaction model # Evaluate \Delta_{u_j} \Delta_{u_i} S(x) for quadrature points # answer is area(b(u[i],r) \cap b(u[j],r)\setminus \bigcup_k b(x[k],r)) # where k ranges over all indices that are not equivalent to u[i,j] U <- union.quad(Q) Z <- is.data(Q) nU <- npoints(U) xx <- U$x yy <- U$y # identify all close pairs of quadrature points cl <- closepairs(U, 2 * r, what="indices") I <- cl$i J <- cl$j # find neighbours in X of each quadrature point zJ <- Z[J] neigh <- split(J[zJ], factor(I[zJ], levels=1L:nU)) # result <- if(!sparseOK) matrix(0, nU, nU) else sparseMatrix(i=integer(0), j=integer(0), x=numeric(0), dims=c(nU,nU)) eps <- r/spatstat.options("ngrid.disc") # for(k in seq_along(I)) { i <- I[k] j <- J[k] # all points of X close to U[i] Ki <- neigh[[i]] # all points of X close to U[j] Kj <- neigh[[j]] # relevant neighbours K <- setdiff(union(Ki, Kj), c(i,j)) # call C code z <- .C("delta2area", xa = as.double(xx[i]), ya = as.double(yy[i]), xb = as.double(xx[j]), yb = as.double(yy[j]), nother = as.integer(length(K)), xother = as.double(xx[K]), yother = as.double(yy[K]), radius = as.double(r), epsilon = as.double(eps), pixcount = as.integer(integer(1L)), PACKAGE = "spatstat") result[i,j] <- z$pixcount } # normalise result <- result * (eps^2)/(pi * r^2) return(result) } areadelta2 }) spatstat/R/beginner.R0000644000176200001440000000174113115225157014237 0ustar liggesusers# # beginner.R # # Helpful information for beginners # # $Revision: 1.3 $ $Date: 2015/10/21 09:06:57 $ # print.autoexec <- function(x, ...) { x() } beginner <- function(package="spatstat") { package <- as.character(substitute(package)) RShowDoc("BEGINNER.txt", type="txt", package=package) return(invisible(NULL)) } class(beginner) <- "autoexec" foo <- local({ fooText <- paste0("Error: object 'foo' not found.\n\n", "'foo' is not a defined variable or function.\n", "It is a placeholder name, which serves only to ", "demonstrate a concept. It represents the name of ", "any desired object or function. ", "Other placeholder names popular with computer scientists ", "are 'bar', 'foobar', 'qux' and 'mork'.") foo <- function() { splat(fooText) return(invisible(NULL)) } class(foo) <- "autoexec" foo }) plot.foo <- function(x, ...) foo() spatstat/R/marks.R0000755000176200001440000002540713115271120013562 0ustar liggesusers# # marks.R # # $Revision: 1.44 $ $Date: 2016/04/25 02:34:40 $ # # stuff for handling marks # # marks <- function(x, ...) { UseMethod("marks") } marks.default <- function(x, ...) { NULL } # The 'dfok' switch is temporary # while we convert the code to accept data frames of marks marks.ppp <- function(x, ..., dfok=TRUE, drop=TRUE) { ma <- x$marks if((is.data.frame(ma) || is.matrix(ma))) { if(!dfok) stop("Sorry, not implemented when the marks are a data frame") if(drop && ncol(ma) == 1) ma <- ma[,1,drop=TRUE] } return(ma) } # ------------------------------------------------------------------ "marks<-" <- function(x, ..., value) { UseMethod("marks<-") } "marks<-.ppp" <- function(x, ..., dfok=TRUE, drop=TRUE, value) { np <- npoints(x) m <- value switch(markformat(m), none = { return(unmark(x)) }, vector = { # vector of marks if(length(m) == 1) m <- rep.int(m, np) else if(np == 0) m <- rep.int(m, 0) # ensures marked pattern obtained else if(length(m) != np) stop("number of points != number of marks") marx <- m }, dataframe = { if(!dfok) stop("Sorry, data frames of marks are not yet implemented") m <- as.data.frame(m) # data frame of marks if(ncol(m) == 0) { # no mark variables marx <- NULL } else { # marks to be attached if(nrow(m) == np) { marx <- m } else { # lengths do not match if(nrow(m) == 1 || np == 0) { # replicate data frame marx <- as.data.frame(lapply(as.list(m), function(x, k) { rep.int(x, k) }, k=np)) } else stop("number of rows of data frame != number of points") } # convert single-column data frame to vector? if(drop && ncol(marx) == 1) marx <- marx[,1,drop=TRUE] } }, hyperframe = stop("Hyperframes of marks are not supported in ppp objects; use ppx"), stop("Format of marks is not understood") ) # attach/overwrite marks Y <- ppp(x$x,x$y,window=x$window,marks=marx, check=FALSE, drop=drop) return(Y) } "%mark%" <- setmarks <- function(x,value) { marks(x) <- value return(x) } # ------------------------------------------------- markformat <- function(x) { UseMethod("markformat") } markformat.ppp <- function(x) { mf <- x$markformat if(is.null(mf)) mf <- markformat(marks(x)) return(mf) } markformat.default <- function(x) { if(is.null(x)) return("none") if(is.null(dim(x))) { if(is.vector(x) || is.factor(x) || is.atomic(x)) return("vector") if(inherits(x, "POSIXt") || inherits(x, "Date")) return("vector") } if(is.data.frame(x) || is.matrix(x)) return("dataframe") if(is.hyperframe(x)) return("hyperframe") if(inherits(x, c("solist", "anylist", "listof"))) return("list") stop("Mark format not understood") } # ------------------------------------------------------------------ "is.marked" <- function(X, ...) { UseMethod("is.marked") } "is.marked.ppp" <- function(X, na.action="warn", ...) { marx <- marks(X, ...) if(is.null(marx)) return(FALSE) if((length(marx) > 0) && anyNA(marx)) { gripe <- paste("some mark values are NA in the point pattern", short.deparse(substitute(X))) switch(na.action, warn = warning(gripe, call.=FALSE), fatal = stop(gripe, call.=FALSE), ignore = {} ) } return(TRUE) } "is.marked.default" <- function(...) { return(!is.null(marks(...))) } # ------------------------------------------------------------------ is.multitype <- function(X, ...) { UseMethod("is.multitype") } is.multitype.default <- function(X, ...) { m <- marks(X) if(is.null(m)) return(FALSE) if(!is.null(dim(m))) { # should have a single column if(dim(m)[2] != 1) return(FALSE) m <- m[,1,drop=TRUE] } return(is.factor(m)) } is.multitype.ppp <- function(X, na.action="warn", ...) { marx <- marks(X, dfok=TRUE) if(is.null(marx)) return(FALSE) if((is.data.frame(marx) || is.hyperframe(marx)) && ncol(marx) > 1) return(FALSE) if(!is.factor(marx)) return(FALSE) if((length(marx) > 0) && anyNA(marx)) switch(na.action, warn = { warning(paste("some mark values are NA in the point pattern", short.deparse(substitute(X)))) }, fatal = { return(FALSE) }, ignore = {} ) return(TRUE) } # ------------------------------------------------------------------ unmark <- function(X) { UseMethod("unmark") } unmark.ppp <- function(X) { X$marks <- NULL X$markformat <- "none" return(X) } unmark.splitppp <- function(X) { Y <- lapply(X, unmark.ppp) class(Y) <- c("splitppp", class(Y)) return(Y) } ##### utility functions for subsetting & combining marks ######### marksubset <- function(x, index, format=NULL) { if(is.null(format)) format <- markformat(x) switch(format, none={return(NULL)}, list=, vector={return(x[index])}, hyperframe=, dataframe={return(x[index,,drop=FALSE])}, stop("Internal error: unrecognised format of marks")) } "%msub%" <- marksubsetop <- function(x,i) { marksubset(x, i) } "%mrep%" <- markreplicateop <- function(x,n) { format <- markformat(x) switch(format, none={return(NULL)}, list=, vector={ return(rep.int(x,n))}, dataframe={ return(as.data.frame(lapply(x, rep, times=n))) }, hyperframe={ xcols <- as.list(x) repxcols <- lapply(xcols, rep, times=n) return(do.call(hyperframe, repxcols)) }, stop("Internal error: unrecognised format of marks")) } "%mapp%" <- markappendop <- function(x,y) { fx <- markformat(x) fy <- markformat(y) agree <- (fx == fy) if(all(c(fx,fy) %in% c("dataframe", "hyperframe"))) agree <- agree && identical(names(x),names(y)) if(!agree) stop("Attempted to concatenate marks that are not compatible") switch(fx, none = { return(NULL) }, vector = { if(is.factor(x) || is.factor(y)) return(cat.factor(x,y)) else return(c(x,y)) }, hyperframe=, dataframe = { return(rbind(x,y)) }, list = { z <- append(x,y) z <- as.solist(z, demote=TRUE) return(z) }, stop("Internal error: unrecognised format of marks")) } markappend <- function(...) { # combine marks from any number of patterns marxlist <- list(...) # check on compatibility of marks mkfmt <- sapply(marxlist,markformat) if(length(ufm <- unique(mkfmt))>1) stop(paste("Cannot append marks of different formats:", commasep(sQuote(ufm))), call.=FALSE) mkfmt <- mkfmt[1] # combine the marks switch(mkfmt, none = { return(NULL) }, vector = { marxlist <- lapply(marxlist, function(x){as.data.frame.vector(x,nm="v1")}) marx <- do.call(rbind, marxlist)[,1] return(marx) }, hyperframe =, dataframe = { # check compatibility of data frames # (this is redundant but gives more helpful message) nama <- lapply(marxlist, names) dims <- lengths(nama) if(length(unique(dims)) != 1) stop("Data frames of marks have different column dimensions.") samenames <- unlist(lapply(nama, function(x,y) { identical(x,y) }, y=nama[[1]])) if(!all(samenames)) stop("Data frames of marks have different names.\n") marx <- do.call(rbind, marxlist) return(marx) }, list = { marx <- do.call(c, marxlist) marx <- as.solist(marx, demote=TRUE) return(marx) }) stop("Unrecognised mark format") } markcbind <- function(...) { # cbind several columns of marks marxlist <- list(...) mkfmt <- unlist(lapply(marxlist, markformat)) if(any(vacuous <- (mkfmt == "none"))) { marxlist <- marxlist[!vacuous] mkfmt <- mkfmt[!vacuous] } if(any(isvec <- (mkfmt == "vector"))) { ## convert vectors to data frames with invented names for(i in which(isvec)) { mi <- as.data.frame(marxlist[i]) colnames(mi) <- paste0("V", i) marxlist[[i]] <- mi } mkfmt[isvec] <- "dataframe" } if(all(mkfmt == "dataframe")) { ## result is a data frame marx <- do.call(data.frame, marxlist) } else { ## result is a hyperframe if(!all(ishyp <- (mkfmt == "hyperframe"))) marxlist[!ishyp] <- lapply(marxlist[!ishyp], as.hyperframe) marx <- do.call(hyperframe, marxlist) } return(marx) } # extract only the columns of (passably) numeric data from a data frame numeric.columns <- function(M, logical=TRUE, others=c("discard", "na")) { others <- match.arg(others) M <- as.data.frame(M) if(ncol(M) == 1) colnames(M) <- NULL process <- function(z, logi, other) { if(is.numeric(z)) return(z) if(logi && is.logical(z)) return(as.integer(z)) switch(other, na=rep.int(NA_real_, length(z)), discard=NULL, NULL) } Mprocessed <- lapply(M, process, logi=logical, other=others) isnul <- unlist(lapply(Mprocessed, is.null)) if(all(isnul)) { # all columns have been removed # return a data frame with no columns return(as.data.frame(matrix(, nrow=nrow(M), ncol=0))) } Mout <- do.call(data.frame, Mprocessed[!isnul]) if(ncol(M) == 1 && ncol(Mout) == 1) colnames(Mout) <- NULL return(Mout) } coerce.marks.numeric <- function(X, warn=TRUE) { marx <- marks(X) if(is.null(dim(marx))) { if(is.factor(marx)) { if(warn) warning("Factor-valued marks were converted to integer codes", call.=FALSE) marx <- as.integer(marx) return(X %mark% marx) } } else { marx <- as.data.frame(marx) if(any(fax <- unlist(lapply(marx, is.factor)))) { if(warn) { nf <- sum(fax) whinge <- paste("Factor-valued mark", ngettext(nf, "variable", "variables"), commasep(sQuote(colnames(marx)[fax])), ngettext(nf, "was", "were"), "converted to integer codes") warning(whinge, call.=FALSE) } marx[fax] <- as.data.frame(lapply(marx[fax], as.integer)) return(X %mark% marx) } } return(X) } spatstat/R/disc.R0000755000176200001440000000700313115271075013370 0ustar liggesusers## ## disc.R ## ## discs and ellipses ## ## $Revision: 1.18 $ $Date: 2017/01/15 05:25:16 $ ## disc <- local({ indic <- function(x,y,x0,y0,r) { as.integer((x-x0)^2 + (y-y0)^2 < r^2) } disc <- function(radius=1, centre=c(0,0), ..., mask=FALSE, npoly=128, delta=NULL) { check.1.real(radius) stopifnot(radius > 0) centre <- as2vector(centre) if(!missing(npoly) && !is.null(npoly) && !is.null(delta)) stop("Specify either npoly or delta") if(!missing(npoly) && !is.null(npoly)) { stopifnot(length(npoly) == 1) stopifnot(npoly >= 3) } else if(!is.null(delta)) { check.1.real(delta) stopifnot(delta > 0) npoly <- pmax(16, ceiling(2 * pi * radius/delta)) } else npoly <- 128 if(!mask) { theta <- seq(from=0, to=2*pi, length.out=npoly+1)[-(npoly+1L)] x <- centre[1L] + radius * cos(theta) y <- centre[2L] + radius * sin(theta) W <- owin(poly=list(x=x, y=y), check=FALSE) } else { xr <- centre[1L] + radius * c(-1,1) yr <- centre[2L] + radius * c(-1,1) B <- owin(xr,yr) IW <- as.im(indic, B, x0=centre[1L], y0=centre[2L], r=radius, ...) W <- levelset(IW, 1, "==") } return(W) } disc }) hexagon <- function(edge=1, centre=c(0,0), ..., align=c("bottom", "top", "left", "right", "no")) { regularpolygon(6, edge, centre, align=align) } regularpolygon <- function(n, edge=1, centre=c(0,0), ..., align=c("bottom", "top", "left", "right", "no")) { check.1.integer(n) check.1.real(edge) stopifnot(n >= 3) stopifnot(edge > 0) align <- match.arg(align) theta <- 2 * pi/n radius <- edge/(2 * sin(theta/2)) result <- disc(radius, centre, npoly=n, mask=FALSE) if(align != "no") { k <- switch(align, bottom = 3/4, top = 1/4, left = 1/2, right = 1) alpha <- theta * (1/2 - (k * n) %% 1) result <- rotate(result, -alpha) } Frame(result) <- boundingbox(result) return(result) } ellipse <- local({ indic <- function(x,y,x0,y0,a,b,co,si){ x <- x-x0 y <- y-y0 as.integer(((x*co + y*si)/a)^2 + ((-x*si + y*co)/b)^2 < 1) } ellipse <- function(a, b, centre=c(0,0), phi=0, ..., mask=FALSE, npoly = 128) { ## Czechs: stopifnot(length(a) == 1) stopifnot(a > 0) stopifnot(length(b) == 1) stopifnot(b > 0) centre <- as2vector(centre) stopifnot(length(phi) == 1) stopifnot(length(npoly) == 1) stopifnot(npoly > 2) ## Rotator cuff: co <- cos(phi) si <- sin(phi) ## Mask: if(mask) { ## Thetas maximizing x and y. tx <- atan(-b*tan(phi)/a) ty <- atan(b/(a*tan(phi))) ## Maximal x and y (for centre = c(0,0)). xm <- a*co*cos(tx) - b*si*sin(tx) ym <- a*si*cos(ty) + b*co*sin(ty) ## Range of x and y. xr <- xm*c(-1,1)+centre[1L] yr <- ym*c(-1,1)+centre[2L] ## Wrecked-angle to contain the mask. B <- as.mask(owin(xr,yr),...) ## Build the mask as a level set. IW <- as.im(indic, B, x0=centre[1L], y0=centre[2L], a=a, b=b, co=co, si=si) return(levelset(IW, 1, "==")) } ## Polygonal. ## Build "horizontal" ellipse centred at 0: theta <- seq(0, 2 * pi, length = npoly+1)[-(npoly+1L)] xh <- a * cos(theta) yh <- b * sin(theta) ## Rotate through angle phi and shift centre: x <- centre[1L] + co*xh - si*yh y <- centre[2L] + si*xh + co*yh owin(poly=list(x = x, y = y)) } ellipse }) spatstat/R/quadclass.R0000755000176200001440000002043013115271120014414 0ustar liggesusers# # quadclass.S # # Class 'quad' to define quadrature schemes # in (rectangular) windows in two dimensions. # # $Revision: 4.26 $ $Date: 2016/02/16 01:39:12 $ # # An object of class 'quad' contains the following entries: # # $data: an object of class 'ppp' # defining the OBSERVATION window, # giving the locations (& marks) of the data points. # # $dummy: object of class 'ppp' # defining the QUADRATURE window, # giving the locations (& marks) of the dummy points. # # $w: vector giving the nonnegative weights for the # data and dummy points (data first, followed by dummy) # # w may also have an attribute attr(w, "zeroes") # equivalent to (w == 0). If this is absent # then all points are known to have positive weights. # # $param: # parameters that were used to compute the weights # and possibly to create the dummy points (see below). # # The combined (data+dummy) vectors of x, y coordinates of the points, # and their weights, are extracted using standard functions # x.quad(), y.quad(), w.quad() etc. # # ---------------------------------------------------------------------- # Note about parameters: # # If the quadrature scheme was created by quadscheme(), # then $param contains # # $param$weight # list containing the values of all parameters # actually used to compute the weights. # # $param$dummy # list containing the values of all parameters # actually used to construct the dummy pattern # via default.dummy(); # or NULL if the dummy pattern was provided externally # # $param$sourceid # vector mapping the quadrature points to the # original data and dummy points. # # If you constructed the quadrature scheme manually, this # structure may not be present. # #------------------------------------------------------------- quad <- function(data, dummy, w, param=NULL) { data <- as.ppp(data) dummy <- as.ppp(dummy) n <- data$n + dummy$n if(missing(w)) w <- rep.int(1, n) else { w <- as.vector(w) if(length(w) != n) stop("length of weights vector w is not equal to total number of points") } if(is.null(attr(w, "zeroes")) && any( w == 0)) attr(w, "zeroes") <- (w == 0) Q <- list(data=data, dummy=dummy, w=w, param=param) class(Q) <- "quad" invisible(Q) } # ------------------ extractor functions ---------------------- x.quad <- function(Q) { verifyclass(Q, "quad") c(Q$data$x, Q$dummy$x) } y.quad <- function(Q) { verifyclass(Q, "quad") c(Q$data$y, Q$dummy$y) } w.quad <- function(Q) { verifyclass(Q, "quad") Q$w } param.quad <- function(Q) { verifyclass(Q, "quad") Q$param } n.quad <- function(Q) { verifyclass(Q, "quad") Q$data$n + Q$dummy$n } marks.quad <- function(x, dfok=FALSE, ...) { verifyclass(x, "quad") dat <- x$data dum <- x$dummy if(dfok) warning("ignored dfok = TRUE; not implemented") mdat <- marks(dat, dfok=FALSE, ...) mdum <- marks(dum, dfok=FALSE, ...) if(is.null(mdat) && is.null(mdum)) return(NULL) if(is.null(mdat)) mdat <- rep.int(NA_integer_, dat$n) if(is.null(mdum)) mdum <- rep.int(NA_integer_, dum$n) if(is.factor(mdat) && is.factor(mdum)) { mall <- cat.factor(mdat, mdum) } else mall <- c(mdat, mdum) return(mall) } is.marked.quad <- function(X, na.action="warn", ...) { marx <- marks(X, ...) if(is.null(marx)) return(FALSE) if(anyNA(marx)) switch(na.action, warn = { warning(paste("some mark values are NA in the point pattern", short.deparse(substitute(X)))) }, fatal = { return(FALSE) }, ignore = {} ) return(TRUE) } is.multitype.quad <- function(X, na.action="warn", ...) { marx <- marks(X, ...) if(is.null(marx)) return(FALSE) if(anyNA(marx)) switch(na.action, warn = { warning(paste("some mark values are NA in the point pattern", short.deparse(substitute(X)))) }, fatal = { return(FALSE) }, ignore = {} ) return(!is.data.frame(marx) && is.factor(marx)) } is.data <- function(Q) { verifyclass(Q, "quad") return(c(rep.int(TRUE, Q$data$n), rep.int(FALSE, Q$dummy$n))) } equals.quad <- function(Q) { # return matrix E such that E[i,j] = (X[i] == U[j]) # where X = Q$data and U = union.quad(Q) n <- Q$data$n m <- Q$dummy$n E <- matrix(FALSE, nrow=n, ncol=n+m) diag(E) <- TRUE E } equalsfun.quad <- function(Q) { stopifnot(inherits(Q, "quad")) return(function(i,j) { i == j }) } equalpairs.quad <- function(Q) { # return two-column matrix E such that # X[E[i,1]] == U[E[i,2]] for all i # where X = Q$data and U = union.quad(Q) n <- Q$data$n return(matrix(rep.int(seq_len(n),2), ncol=2)) } union.quad <- function(Q) { verifyclass(Q, "quad") ppp(x= c(Q$data$x, Q$dummy$x), y= c(Q$data$y, Q$dummy$y), window=Q$dummy$window, marks=marks.quad(Q), check=FALSE) } # # Plot a quadrature scheme # # plot.quad <- function(x, ..., main, add=FALSE, dum=list(), tiles=FALSE) { if(missing(main) || is.null(main)) main <- short.deparse(substitute(x)) verifyclass(x, "quad") data <- x$data dummy <- x$dummy # determine plot parameters for dummy points dum <- resolve.defaults(dum, list(pch=".", add=TRUE)) tt <- NULL if(tiles) { # show tiles that determined the weights wp <- x$param$weight tt <- NULL if(is.null(wp) || is.null(wp$method)) { warning("Tile information is not available") } else { switch(wp$method, grid = { ntile <- wp$ntile tt <- quadrats(as.owin(x), ntile[1], ntile[2]) }, dirichlet = { U <- union.quad(x) if(wp$exact) { tt <- dirichlet(U) } else { win <- as.mask(as.owin(U)) tileid <- image(exactdt(U)$i, win$xcol, win$yrow, win$xrange, win$yrange) tt <- tess(image=tileid[win, drop=FALSE]) } }, warning("Unrecognised 'method' for tile weights") ) } } pixeltiles <- !is.null(tt) && tt$type == "image" tileargs <- resolve.defaults(list(x=tt, main=main, add=add), list(...), if(!pixeltiles) list(col="grey") else NULL) if(!is.marked(data)) { if(!is.null(tt)) { do.call(plot, tileargs) add <- TRUE } plot(data, main=main, add=add, ...) do.call(plot, append(list(x=dummy), dum)) } else if(is.multitype(data) && !add) { oldpar <- par(ask = interactive() && (.Device %in% c("X11", "GTK", "windows", "Macintosh"))) on.exit(par(oldpar)) data.marks <- marks(data) dummy.marks <- marks(dummy) types <- levels(data.marks) for(k in types) { add <- FALSE if(!is.null(tt)) { do.call(plot, tileargs) add <- TRUE } maink <- paste(main, "\n mark = ", k, sep="") plot(unmark(data[data.marks == k]), main=maink, add=add, ...) do.call(plot, append(list(x=unmark(dummy[dummy.marks == k])), dum)) } } else { if(!is.null(tt)) { do.call(plot, tileargs) add <- TRUE } plot(data, ..., main=main, add=add) do.call(plot, append(list(x=dummy), dum)) } invisible(NULL) } # subset operator "[.quad" <- function(x, ...) { U <- union.quad(x) Z <- is.data(x) w <- w.quad(x) # determine serial numbers of points to be included V <- U %mark% seq_len(U$n) i <- marks(V[...]) # extract corresponding subsets of vectors Z <- Z[i] w <- w[i] # take subset of points, using any type of subset index U <- U[...] # stick together quad(U[Z], U[!Z], w) } domain.quad <- Window.quad <- function(X, ...) { as.owin(X) } "Window<-.quad" <- function(X, ..., value) { verifyclass(value, "owin") return(X[value]) } unitname.quad <- function(x) { return(unitname(x$data)) } "unitname<-.quad" <- function(x, value) { unitname(x$data) <- value unitname(x$dummy) <- value return(x) } spatstat/R/transect.R0000644000176200001440000000515613115225157014275 0ustar liggesusers# # transect.R # # Line transects of pixel images # # $Revision: 1.6 $ $Date: 2013/03/15 01:28:06 $ # transect.im <- local({ specify.location <- function(loc, rect) { lname <- short.deparse(substitute(loc)) if(is.numeric(loc) && length(loc) == 2) return(list(x=loc[1], y=loc[2])) if(is.list(loc)) return(xy.coords(loc)) if(!(is.character(loc) && length(loc) == 1)) stop(paste("Unrecognised format for", sQuote(lname)), call.=FALSE) xr <- rect$xrange yr <- rect$yrange switch(loc, bottomleft = list(x=xr[1], y=yr[1]), bottom = list(x=mean(xr), y=yr[1]), bottomright = list(x=xr[2], y=yr[1]), right = list(x=xr[2], y=mean(yr)), topright = list(x=xr[2], y=yr[2]), top = list(x=mean(xr), y=yr[2]), topleft = list(x=xr[1], y=yr[2]), left = list(x=xr[1], y=mean(yr)), centre=, center = list(x=mean(xr), y=mean(yr)), stop(paste("Unrecognised location", sQuote(lname), "=", dQuote(loc)), call.=FALSE) ) } transect.im <- function(X, ..., from="bottomleft", to="topright", click=FALSE, add=FALSE) { Xname <- short.deparse(substitute(X)) Xname <- sensiblevarname(Xname, "X") stopifnot(is.im(X)) # determine transect position if(click) { # interactive if(!add) plot(X) from <- locator(1) points(from) to <- locator(1) points(to) segments(from$x, from$y, to$x, to$y) } else { # data defining a line segment R <- as.rectangle(X) from <- specify.location(from, R) to <- specify.location(to, R) } # create sample points along transect if(identical(from,to)) stop(paste(sQuote("from"), "and", sQuote("to"), "must be distinct points"), call.=FALSE) u <- seq(0,1,length=512) x <- from$x + u * (to$x - from$x) y <- from$y + u * (to$y - from$y) leng <- sqrt( (to$x - from$x)^2 + (to$y - from$y)^2) t <- u * leng # look up pixel values (may be NA) v <- X[list(x=x, y=y), drop=FALSE] # package into fv object df <- data.frame(t=t, v=v) colnames(df)[2] <- Xname fv(df, argu = "t", ylab = substitute(Xname(t), list(Xname=as.name(Xname))), valu=Xname, labl = c("t", "%s(t)"), desc = c("distance along transect", "pixel value of %s"), unitname = unitname(X), fname = Xname) } transect.im }) spatstat/R/pcf.R0000755000176200001440000002575413115271120013222 0ustar liggesusers# # pcf.R # # $Revision: 1.64 $ $Date: 2017/06/05 10:31:58 $ # # # calculate pair correlation function # from point pattern (pcf.ppp) # or from estimate of K or Kcross (pcf.fv) # or from fasp object # # pcf <- function(X, ...) { UseMethod("pcf") } pcf.ppp <- function(X, ..., r=NULL, kernel="epanechnikov", bw=NULL, stoyan=0.15, correction=c("translate", "Ripley"), divisor=c("r", "d"), var.approx=FALSE, domain=NULL, ratio=FALSE, close=NULL) { verifyclass(X, "ppp") # r.override <- !is.null(r) win <- Window(X) areaW <- area(win) npts <- npoints(X) lambda <- npts/areaW lambda2area <- areaW * lambda^2 kernel <- match.kernel(kernel) if(!is.null(domain)) { # estimate based on contributions from a subdomain domain <- as.owin(domain) if(!is.subset.owin(domain, win)) stop(paste(dQuote("domain"), "is not a subset of the window of X")) # trick pcfdot() into doing it indom <- factor(inside.owin(X$x, X$y, domain), levels=c(FALSE,TRUE)) g <- pcfdot(X %mark% indom, i="TRUE", r=r, correction=correction, kernel=kernel, bw=bw, stoyan=stoyan, divisor=divisor, ...) if(!ratio) { ## relabel g <- rebadge.fv(g, quote(g(r)), "g") } else { ## construct ratfv object denom <- sum(indom == "TRUE") * lambda g <- ratfv(as.data.frame(g), NULL, denom, "r", quote(g(r)), "theo", NULL, alim, attr(g, "labl"), attr(g, "desc"), fname="g", ratio=TRUE) } if(var.approx) warning("var.approx is not implemented when 'domain' is given") return(g) } correction.given <- !missing(correction) correction <- pickoption("correction", correction, c(isotropic="isotropic", Ripley="isotropic", trans="translate", translate="translate", translation="translate", best="best"), multi=TRUE) correction <- implemented.for.K(correction, win$type, correction.given) divisor <- match.arg(divisor) # bandwidth if(is.null(bw) && (kernel == "epanechnikov")) { # Stoyan & Stoyan 1995, eq (15.16), page 285 h <- stoyan /sqrt(lambda) hmax <- h # conversion to standard deviation bw <- h/sqrt(5) } else if(is.numeric(bw)) { # standard deviation of kernel specified # upper bound on half-width hmax <- 3 * bw } else { # data-dependent bandwidth selection: guess upper bound on half-width hmax <- 2 * stoyan /sqrt(lambda) } ########## r values ############################ # handle arguments r and breaks rmaxdefault <- rmax.rule("K", win, lambda) breaks <- handle.r.b.args(r, NULL, win, rmaxdefault=rmaxdefault) if(!(breaks$even)) stop("r values must be evenly spaced") # extract r values r <- breaks$r rmax <- breaks$max # recommended range of r values for plotting alim <- c(0, min(rmax, rmaxdefault)) # arguments for 'density' denargs <- resolve.defaults(list(kernel=kernel, bw=bw), list(...), list(n=length(r), from=0, to=rmax), .StripNull = TRUE) ################################################# # compute pairwise distances if(npts > 1) { needall <- any(correction %in% c("translate", "isotropic")) if(is.null(close)) { what <- if(needall) "all" else "ijd" close <- closepairs(X, rmax + hmax, what=what) } else { #' check 'close' has correct format needed <- if(!needall) c("i", "j", "d") else c("i", "j", "xi", "yi", "xj", "yj", "dx", "dy", "d") if(any(is.na(match(needed, names(close))))) stop(paste("Argument", sQuote("close"), "should have components named", commasep(sQuote(needed))), call.=FALSE) } dIJ <- close$d } else { undefined <- rep(NaN, length(r)) } # initialise fv object df <- data.frame(r=r, theo=rep.int(1,length(r))) out <- ratfv(df, NULL, lambda2area, "r", quote(g(r)), "theo", NULL, alim, c("r","%s[Pois](r)"), c("distance argument r", "theoretical Poisson %s"), fname="g", ratio=ratio) ###### compute ####### bw.used <- NULL if(any(correction=="translate")) { # translation correction if(npts > 1) { edgewt <- edge.Trans(dx=close$dx, dy=close$dy, W=win, paired=TRUE) kdenT <- sewpcf(dIJ, edgewt, denargs, lambda2area, divisor) gT <- kdenT$g bw.used <- attr(kdenT, "bw") } else gT <- undefined if(!ratio) { out <- bind.fv(out, data.frame(trans=gT), "hat(%s)[Trans](r)", "translation-corrected estimate of %s", "trans") } else { out <- bind.ratfv(out, data.frame(trans=gT * lambda2area), lambda2area, "hat(%s)[Trans](r)", "translation-corrected estimate of %s", "trans") } } if(any(correction=="isotropic")) { # Ripley isotropic correction if(npts > 1) { XI <- ppp(close$xi, close$yi, window=win, check=FALSE) edgewt <- edge.Ripley(XI, matrix(dIJ, ncol=1)) kdenR <- sewpcf(dIJ, edgewt, denargs, lambda2area, divisor) gR <- kdenR$g bw.used <- attr(kdenR, "bw") } else gR <- undefined if(!ratio) { out <- bind.fv(out, data.frame(iso=gR), "hat(%s)[Ripley](r)", "isotropic-corrected estimate of %s", "iso") } else { out <- bind.ratfv(out, data.frame(iso=gR * lambda2area), lambda2area, "hat(%s)[Ripley](r)", "isotropic-corrected estimate of %s", "iso") } } # sanity check if(is.null(out)) { warning("Nothing computed - no edge corrections chosen") return(NULL) } ## variance approximation ## Illian et al 2008 p 234 equation 4.3.42 if(var.approx) { gr <- if(any(correction == "isotropic")) gR else gT # integral of squared kernel intk2 <- kernel.squint(kernel, bw.used) # isotropised set covariance of window gWbar <- as.function(rotmean(setcov(win), result="fv")) vest <- gr * intk2/(pi * r * gWbar(r) * lambda^2) if(!ratio) { out <- bind.fv(out, data.frame(v=vest), "v(r)", "approximate variance of %s", "v") } else { vden <- rep((npts-1)^2, length(vest)) vnum <- vden * vest out <- bind.ratfv(out, data.frame(v=vnum), data.frame(c=vden), "v(r)", "approximate variance of %s", "v") } } ## Finish off ## default is to display all corrections formula(out) <- . ~ r fvnames(out, ".") <- setdiff(rev(colnames(out)), c("r", "v")) ## unitname(out) <- unitname(X) ## copy to other components if(ratio) out <- conform.ratfv(out) attr(out, "bw") <- bw.used return(out) } # Smoothing Estimate of Weighted Pair Correlation # d = vector of relevant distances # w = vector of edge correction weights (in normal use) # denargs = arguments to density.default # lambda2area = constant lambda^2 * areaW (in normal use) sewpcf <- function(d, w, denargs, lambda2area, divisor=c("r","d")) { divisor <- match.arg(divisor) if(divisor == "d") { w <- w/d if(!all(good <- is.finite(w))) { nbad <- sum(!good) warning(paste(nbad, "infinite or NA", ngettext(nbad, "contribution was", "contributions were"), "deleted from pcf estimate")) d <- d[good] w <- w[good] } } wtot <- sum(w) kden <- do.call.matched(density.default, append(list(x=d, weights=w/wtot), denargs)) r <- kden$x y <- kden$y * wtot if(divisor == "r") y <- y/r g <- y/(2 * pi * lambda2area) result <- data.frame(r=r,g=g) attr(result, "bw") <- kden$bw return(result) } # #---------- OTHER METHODS FOR pcf -------------------- # "pcf.fasp" <- function(X, ..., method="c") { verifyclass(X, "fasp") Y <- X Y$title <- paste("Array of pair correlation functions", if(!is.null(X$dataname)) "for", X$dataname) # go to work on each function for(i in seq_along(X$fns)) { Xi <- X$fns[[i]] PCFi <- pcf.fv(Xi, ..., method=method) Y$fns[[i]] <- PCFi if(is.fv(PCFi)) Y$default.formula[[i]] <- formula(PCFi) } return(Y) } pcf.fv <- local({ callmatched <- function(fun, argue) { formalnames <- names(formals(fun)) formalnames <- formalnames[formalnames != "..."] do.call(fun, argue[names(argue) %in% formalnames]) } pcf.fv <- function(X, ..., method="c") { verifyclass(X, "fv") # extract r and the recommended estimate of K r <- with(X, .x) K <- with(X, .y) alim <- attr(X, "alim") # remove NA's ok <- !is.na(K) K <- K[ok] r <- r[ok] switch(method, a = { ss <- callmatched(smooth.spline, list(x=r, y=K, ...)) dK <- predict(ss, r, deriv=1)$y g <- dK/(2 * pi * r) }, b = { y <- K/(2 * pi * r) y[!is.finite(y)] <- 0 ss <- callmatched(smooth.spline, list(x=r, y=y, ...)) dy <- predict(ss, r, deriv=1)$y g <- dy + y/r }, c = { z <- K/(pi * r^2) z[!is.finite(z)] <- 1 ss <- callmatched(smooth.spline, list(x=r, y=z, ...)) dz <- predict(ss, r, deriv=1)$y g <- (r/2) * dz + z }, d = { z <- sqrt(K) z[!is.finite(z)] <- 0 ss <- callmatched(smooth.spline, list(x=r, y=z, ...)) dz <- predict(ss, r, deriv=1)$y g <- z * dz/(pi * r) }, stop(paste("unrecognised method", sQuote(method))) ) # pack result into "fv" data frame Z <- fv(data.frame(r=r, theo=rep.int(1, length(r)), pcf=g), "r", substitute(g(r), NULL), "pcf", . ~ r, alim, c("r", "%s[pois](r)", "%s(r)"), c("distance argument r", "theoretical Poisson value of %s", "estimate of %s by numerical differentiation"), fname="g") unitname(Z) <- unitname(X) return(Z) } pcf.fv }) spatstat/R/randomNS.R0000644000176200001440000003311513115225157014167 0ustar liggesusers## ## randomNS.R ## ## simulating from Neyman-Scott processes ## ## $Revision: 1.23 $ $Date: 2015/10/21 09:06:57 $ ## ## Original code for rCauchy and rVarGamma by Abdollah Jalilian ## Other code and modifications by Adrian Baddeley ## Bug fixes by Abdollah, Adrian, and Rolf Turner rNeymanScott <- function(kappa, expand, rcluster, win = owin(c(0,1),c(0,1)), ..., lmax=NULL, nsim=1, drop=TRUE, nonempty=TRUE, saveparents=TRUE) { ## Generic Neyman-Scott process ## Implementation for bounded cluster radius ## ## Catch old argument name rmax for expand if(missing(expand) && !is.null(rmax <- list(...)$rmax)) expand <- rmax ## 'rcluster' may be ## ## (1) a function(x,y, ...) that takes the coordinates ## (x,y) of the parent point and generates a list(x,y) of offspring ## if(is.function(rcluster)) return(rPoissonCluster(kappa, expand, rcluster, win, ..., lmax=lmax, nsim=nsim, drop=drop, saveparents=saveparents)) ## (2) a list(mu, f) where mu is a numeric value, function, or pixel image ## and f is a function(n, ...) generating n i.i.d. offspring at 0,0 if(!(is.list(rcluster) && length(rcluster) == 2)) stop("rcluster should be either a function, or a list of two elements") win <- as.owin(win) mu <- rcluster[[1]] rdisplace <- rcluster[[2]] if(is.numeric(mu)) { ## homogeneous if(!(length(mu) == 1 && mu >= 0)) stop("rcluster[[1]] should be a single nonnegative number") mumax <- mu } else if (is.im(mu) || is.function(mu)) { ## inhomogeneous if(is.function(mu)) mu <- as.im(mu, W=win, ..., strict=TRUE) mumax <- max(mu) } else stop("rcluster[[1]] should be a number, a function or a pixel image") if(!is.function(rdisplace)) stop("rcluster[[2]] should be a function") ## Generate parents in dilated window frame <- boundingbox(win) dilated <- grow.rectangle(frame, expand) if(is.im(kappa) && !is.subset.owin(dilated, as.owin(kappa))) stop(paste("The window in which the image", sQuote("kappa"), "is defined\n", "is not large enough to contain the dilation of the window", sQuote("win"))) if(nonempty) { if(is.function(kappa)) { kappa <- as.im(kappa, W=dilated, ..., strict=TRUE) lmax <- NULL } ## intensity of parents with at least one offspring point kappa <- kappa * (1 - exp(-mumax)) } ## generate parentlist <- rpoispp(kappa, lmax=lmax, win=dilated, nsim=nsim, drop=FALSE, warnwin=FALSE) resultlist <- vector(mode="list", length=nsim) for(i in 1:nsim) { parents <- parentlist[[i]] np <- npoints(parents) ## generate cluster sizes if(np == 0) { ## no parents - empty pattern result <- ppp(numeric(0), numeric(0), window=win) parentid <- integer(0) } else { if(!nonempty) { ## cluster sizes are Poisson csize <- rpois(np, mumax) } else { ## cluster sizes are Poisson conditional on > 0 csize <- qpois(runif(np, min=dpois(0, mumax)), mumax) } noff <- sum(csize) xparent <- parents$x yparent <- parents$y x0 <- rep.int(xparent, csize) y0 <- rep.int(yparent, csize) ## invoke random generator dd <- rdisplace(noff, ...) mm <- if(is.ppp(dd)) marks(dd) else NULL ## validate xy <- xy.coords(dd) dx <- xy$x dy <- xy$y if(!(length(dx) == noff)) stop("rcluster returned the wrong number of points") ## create offspring and offspring-to-parent map xoff <- x0 + dx yoff <- y0 + dy parentid <- rep.int(1:np, csize) ## trim to window retain <- inside.owin(xoff, yoff, win) if(is.im(mu)) retain[retain] <- inside.owin(xoff[retain], yoff[retain], as.owin(mu)) xoff <- xoff[retain] yoff <- yoff[retain] parentid <- parentid[retain] if(!is.null(mm)) mm <- marksubset(mm, retain) ## done result <- ppp(xoff, yoff, window=win, check=FALSE, marks=mm) } if(is.im(mu)) { ## inhomogeneously modulated clusters a la Waagepetersen P <- eval.im(mu/mumax) result <- rthin(result, P) } if(saveparents) { attr(result, "parents") <- parents attr(result, "parentid") <- parentid attr(result, "expand") <- expand } resultlist[[i]] <- result } if(nsim == 1 && drop) return(resultlist[[1]]) names(resultlist) <- paste("Simulation", 1:nsim) return(as.solist(resultlist)) } rMatClust <- local({ ## like runifdisc but returns only the coordinates rundisk <- function(n, radius) { R <- radius * sqrt(runif(n, min=0, max=1)) Theta <- runif(n, min=0, max=2*pi) cbind(R * cos(Theta), R * sin(Theta)) } rMatClust <- function(kappa, scale, mu, win = owin(c(0,1),c(0,1)), nsim=1, drop=TRUE, saveLambda=FALSE, expand = scale, ..., poisthresh=1e-6, saveparents=TRUE) { ## Matern Cluster Process with Poisson (mu) offspring distribution ## Catch old scale syntax (r) if(missing(scale)) scale <- list(...)$r check.1.real(scale) stopifnot(scale > 0) ## trap case of large clusters, close to Poisson kok <- is.numeric(kappa) || is.im(kappa) if(kok) { kappamax <- max(kappa) } else { kim <- as.im(kappa, W=win, ..., strict=TRUE) kra <- range(kim) kappamax <- kra[2] + 0.05 * diff(kra) } if(1/(pi * kappamax * scale^2) < poisthresh) { kapmu <- mu * (if(kok) kappa else kim) result <- rpoispp(kapmu, win=win, nsim=nsim, drop=drop, warnwin=FALSE) return(result) } result <- rNeymanScott(kappa, scale, list(mu, rundisk), win, radius=scale, nsim=nsim, drop=FALSE, saveparents = saveparents || saveLambda) if(saveLambda){ for(i in 1:nsim) { parents <- attr(result[[i]], "parents") Lambda <- clusterfield("MatClust", parents, scale=scale, mu=mu, ...) attr(result[[i]], "Lambda") <- Lambda[win] } } return(if(nsim == 1 && drop) result[[1]] else result) } rMatClust }) rThomas <- local({ ## random displacements gaus <- function(n, sigma) { matrix(rnorm(2 * n, mean=0, sd=sigma), ncol=2) } ## main function rThomas <- function(kappa, scale, mu, win = owin(c(0,1),c(0,1)), nsim=1, drop=TRUE, saveLambda=FALSE, expand = 4*scale, ..., poisthresh=1e-6, saveparents=TRUE) { ## Thomas process with Poisson(mu) number of offspring ## at isotropic Normal(0,sigma^2) displacements from parent ## ## Catch old scale syntax (sigma) if(missing(scale)) scale <- list(...)$sigma check.1.real(scale) stopifnot(scale > 0) ## trap case of large clusters, close to Poisson kok <- is.numeric(kappa) || is.im(kappa) if(kok) { kappamax <- max(kappa) } else { kim <- as.im(kappa, W=win, ..., strict=TRUE) kra <- range(kim) kappamax <- kra[2] + 0.05 * diff(kra) } if(1/(4*pi * kappamax * scale^2) < poisthresh) { kapmu <- mu * (if(kok) kappa else kim) result <- rpoispp(kapmu, win=win, nsim=nsim, drop=drop, warnwin=FALSE) return(result) } ## determine the maximum radius of clusters if(missing(expand)) expand <- clusterradius("Thomas", scale = scale, ...) result <- rNeymanScott(kappa, expand, list(mu, gaus), win, sigma=scale, nsim=nsim, drop=FALSE, saveparents = saveparents || saveLambda) if(saveLambda){ for(i in 1:nsim) { parents <- attr(result[[i]], "parents") Lambda <- clusterfield("Thomas", parents, scale=scale, mu=mu, ...) attr(result[[i]], "Lambda") <- Lambda[win] } } return(if(nsim == 1 && drop) result[[1]] else result) } rThomas }) ## ================================================ ## Neyman-Scott process with Cauchy kernel function ## ================================================ ## scale / omega: scale parameter of Cauchy kernel function ## eta: scale parameter of Cauchy pair correlation function ## eta = 2 * omega rCauchy <- local({ ## simulate mixture of normals with inverse-gamma distributed variance rnmix.invgam <- function(n = 1, rate) { V <- matrix(rnorm(2 * n, 0, 1), nrow = n, ncol = 2) s <- 1/rgamma(n, shape=1/2, rate=rate) return(sqrt(s) * V) } ## main function rCauchy <- function (kappa, scale, mu, win = owin(), thresh = 0.001, nsim=1, drop=TRUE, saveLambda=FALSE, expand = NULL, ..., poisthresh=1e-6, saveparents=TRUE) { ## scale / omega: scale parameter of Cauchy kernel function ## eta: scale parameter of Cauchy pair correlation function ## Catch old scale syntax (omega) dots <- list(...) if(missing(scale)) scale <- dots$omega ## Catch old name 'eps' for 'thresh': if(missing(thresh)) thresh <- dots$eps %orifnull% 0.001 ## trap case of large clusters, close to Poisson kok <- is.numeric(kappa) || is.im(kappa) if(kok) { kappamax <- max(kappa) } else { kim <- as.im(kappa, W=win, ..., strict=TRUE) kra <- range(kim) kappamax <- kra[2] + 0.05 * diff(kra) } if(1/(pi * kappamax * scale^2) < poisthresh) { kapmu <- mu * (if(kok) kappa else kim) result <- rpoispp(kapmu, win=win, nsim=nsim, drop=drop, warnwin=FALSE) return(result) } ## determine the maximum radius of clusters if(missing(expand)){ expand <- clusterradius("Cauchy", scale = scale, thresh = thresh, ...) } else if(!missing(thresh)){ warning("Argument ", sQuote("thresh"), " is ignored when ", sQuote("expand"), " is given") } ## simulate result <- rNeymanScott(kappa, expand, list(mu, rnmix.invgam), win, rate = scale^2/2, nsim=nsim, drop=FALSE, saveparents = saveparents || saveLambda) ## correction from Abdollah: the rate is beta = omega^2 / 2 = eta^2 / 8. if(saveLambda){ for(i in 1:nsim) { parents <- attr(result[[i]], "parents") Lambda <- clusterfield("Cauchy", parents, scale=scale, mu=mu, ...) attr(result[[i]], "Lambda") <- Lambda[win] } } return(if(nsim == 1 && drop) result[[1]] else result) } rCauchy }) ## ## ================================================================= ## Neyman-Scott process with Variance Gamma (Bessel) kernel function ## ================================================================= ## nu.ker: smoothness parameter of Variance Gamma kernel function ## omega: scale parameter of kernel function ## nu.pcf: smoothness parameter of Variance Gamma pair correlation function ## eta: scale parameter of Variance Gamma pair correlation function ## nu.pcf = 2 * nu.ker + 1 and eta = omega rVarGamma <- local({ ## simulates mixture of isotropic Normal points in 2D with gamma variances rnmix.gamma <- function(n = 1, shape, rate) { V <- matrix(rnorm(2 * n, 0, 1), nrow = n, ncol = 2) s <- rgamma(n, shape=shape, rate=rate) return(sqrt(s) * V) } ## main function rVarGamma <- function (kappa, nu, scale, mu, win = owin(), thresh = 0.001, nsim=1, drop=TRUE, saveLambda=FALSE, expand = NULL, ..., poisthresh=1e-6, saveparents=TRUE) { ## nu / nu.ker: smoothness parameter of Variance Gamma kernel function ## scale / omega: scale parameter of kernel function ## Catch old nu.ker/nu.pcf syntax and resolve nu-value. dots <- list(...) if(missing(nu)){ nu <- resolve.vargamma.shape(nu.ker=dots$nu.ker, nu.pcf=dots$nu.pcf)$nu.ker } else{ check.1.real(nu) stopifnot(nu > -1/2) } ## Catch old scale syntax (omega) if(missing(scale)) scale <- dots$omega ## Catch old name 'eps' for 'thresh': if(missthresh <- missing(thresh)) thresh <- dots$eps %orifnull% 0.001 ## trap case of large clusters, close to Poisson kok <- is.numeric(kappa) || is.im(kappa) if(kok) { kappamax <- max(kappa) } else { kim <- as.im(kappa, W=win, ..., strict=TRUE) kra <- range(kim) kappamax <- kra[2] + 0.05 * diff(kra) } if(1/(4 * pi * kappamax * scale^2) < poisthresh) { kapmu <- mu * (if(kok) kappa else kim) result <- rpoispp(kapmu, win=win, nsim=nsim, drop=drop, warnwin=FALSE) return(result) } ## determine the maximum radius of clusters if(missing(expand)){ expand <- clusterradius("VarGamma", scale = scale, nu = nu, thresh = thresh, ...) } else if(!missthresh){ warning("Argument ", sQuote("thresh"), " is ignored when ", sQuote("expand"), " is given") } ## simulate result <- rNeymanScott(kappa, expand, list(mu, rnmix.gamma), win, ## WAS: shape = 2 * (nu.ker + 1) shape = nu + 1, rate = 1/(2 * scale^2), nsim=nsim, drop=FALSE, saveparents = saveparents || saveLambda) if(saveLambda){ for(i in 1:nsim) { parents <- attr(result[[i]], "parents") Lambda <- clusterfield("VarGamma", parents, scale=scale, nu=nu, mu=mu, ...) attr(result[[i]], "Lambda") <- Lambda[win] } } return(if(nsim == 1 && drop) result[[1]] else result) } rVarGamma }) spatstat/R/pp3.R0000755000176200001440000001621013115271120013137 0ustar liggesusers# # pp3.R # # class of three-dimensional point patterns in rectangular boxes # # $Revision: 1.26 $ $Date: 2016/09/23 11:02:36 $ # box3 <- function(xrange=c(0,1), yrange=xrange, zrange=yrange, unitname=NULL) { stopifnot(is.numeric(xrange) && length(xrange) == 2 && diff(xrange) > 0) stopifnot(is.numeric(yrange) && length(yrange) == 2 && diff(yrange) > 0) stopifnot(is.numeric(zrange) && length(zrange) == 2 && diff(zrange) > 0) out <- list(xrange=xrange, yrange=yrange, zrange=zrange, units=as.units(unitname)) class(out) <- "box3" return(out) } as.box3 <- function(...) { a <- list(...) n <- length(a) if(n == 0) stop("No arguments given") if(n == 1) { a <- a[[1]] if(inherits(a, "box3")) return(a) if(inherits(a, "pp3")) return(a$domain) if(inherits(a, "boxx")){ if(ncol(a$ranges)==3) return(box3(a$ranges[,1], a$ranges[,2], a$ranges[,3])) stop("Supplied boxx object does not have dimension three") } if(inherits(a, "ppx")) return(as.box3(a$domain)) if(is.numeric(a)) { if(length(a) == 6) return(box3(a[1:2], a[3:4], a[5:6])) stop(paste("Don't know how to interpret", length(a), "numbers as a box")) } if(!is.list(a)) stop("Don't know how to interpret data as a box") } return(do.call(box3, a)) } print.box3 <- function(x, ...) { bracket <- function(z) paste("[", paste(signif(z, 5), collapse=", "), "]", sep="") v <- paste(unlist(lapply(x[1:3], bracket)), collapse=" x ") s <- summary(unitname(x)) splat("Box:", v, s$plural, s$explain) invisible(NULL) } unitname.box3 <- function(x) { x$units } "unitname<-.box3" <- function(x, value) { x$units <- as.units(value) return(x) } grow.box3 <- function(W, left, right=left) { as.box3(grow.boxx(as.boxx(W), left, right)) } eroded.volumes <- function(x, r) { UseMethod("eroded.volumes") } eroded.volumes.box3 <- function(x, r) { b <- as.box3(x) ax <- pmax.int(0, diff(b$xrange) - 2 * r) ay <- pmax.int(0, diff(b$yrange) - 2 * r) az <- pmax.int(0, diff(b$zrange) - 2 * r) ax * ay * az } shortside <- function(x) { UseMethod("shortside") } shortside.box3 <- function(x) { min(sidelengths(x)) } sidelengths <- function(x) { UseMethod("sidelengths") } sidelengths.box3 <- function(x) { with(x, c(diff(xrange), diff(yrange), diff(zrange))) } bounding.box3 <- function(...) { wins <- list(...) boxes <- lapply(wins, as.box3) xr <- range(unlist(lapply(boxes, getElement, name="xrange"))) yr <- range(unlist(lapply(boxes, getElement, name="yrange"))) zr <- range(unlist(lapply(boxes, getElement, name="zrange"))) box3(xr, yr, zr) } pp3 <- function(x, y, z, ...) { stopifnot(is.numeric(x)) stopifnot(is.numeric(y)) stopifnot(is.numeric(z)) b <- as.box3(...) out <- ppx(data=data.frame(x=x,y=y,z=z), domain=b) class(out) <- c("pp3", class(out)) return(out) } domain.pp3 <- function(X, ...) { X$domain } is.pp3 <- function(x) { inherits(x, "pp3") } npoints.pp3 <- function(x) { nrow(x$data) } print.pp3 <- function(x, ...) { splat("Three-dimensional point pattern") sd <- summary(x$data) np <- sd$ncases splat(np, ngettext(np, "point", "points")) print(x$domain) invisible(NULL) } summary.pp3 <- function(object, ...) { sd <- summary(object$data) np <- sd$ncases dom <- object$domain v <- volume.box3(dom) u <- summary(unitname(dom)) intens <- np/v out <- list(np=np, sumdat=sd, dom=dom, v=v, u=u, intensity=intens) class(out) <- "summary.pp3" return(out) } print.summary.pp3 <- function(x, ...) { splat("Three-dimensional point pattern") splat(x$np, ngettext(x$np, "point", "points")) print(x$dom) u <- x$u v <- x$v splat("Volume", v, "cubic", if(v == 1) u$singular else u$plural, u$explain) splat("Average intensity", x$intensity, "points per cubic", u$singular, u$explain) invisible(NULL) } plot.pp3 <- function(x, ..., eye=NULL, org=NULL, theta=25, phi=15, type=c("p", "n", "h"), box.back=list(col="pink"), box.front=list(col="blue", lwd=2)) { xname <- short.deparse(substitute(x)) type <- match.arg(type) # given arguments argh <- list(...) if(!missing(box.front)) argh$box.front <- box.front if(!missing(box.back)) argh$box.back <- box.back # Now apply formal defaults above formaldefaults <- list(box.front=box.front, box.back=box.back) #' coo <- as.matrix(coords(x)) xlim <- x$domain$xrange ylim <- x$domain$yrange zlim <- x$domain$zrange if(is.null(org)) org <- c(mean(xlim), mean(ylim), mean(zlim)) if(is.null(eye)) { theta <- theta * pi/180 phi <- phi * pi/180 d <- 2 * diameter(x$domain) eye <- org + d * c(cos(phi) * c(sin(theta), -cos(theta)), sin(phi)) } deefolts <- spatstat.options('par.pp3') ## determine default eye position and centre of view do.call(plot3Dpoints, resolve.defaults(list(xyz=coo, eye=eye, org=org, type=type), argh, deefolts, formaldefaults, list(main=xname, xlim=xlim, ylim=ylim, zlim=zlim))) } "[.pp3" <- function(x, i, drop=FALSE, ...) { answer <- NextMethod("[") if(is.ppx(answer)) class(answer) <- c("pp3", class(answer)) return(answer) } unitname.pp3 <- function(x) { unitname(x$domain) } "unitname<-.pp3" <- function(x, value) { d <- x$domain unitname(d) <- value x$domain <- d return(x) } diameter.box3 <- function(x) { stopifnot(inherits(x, "box3")) with(x, sqrt(diff(xrange)^2+diff(yrange)^2+diff(zrange)^2)) } volume <- function(x) { UseMethod("volume") } volume.box3 <- function(x) { stopifnot(inherits(x, "box3")) with(x, prod(diff(xrange), diff(yrange), diff(zrange))) } runifpoint3 <- function(n, domain=box3(), nsim=1, drop=TRUE) { domain <- as.box3(domain) result <- vector(mode="list", length=nsim) dd <- as.list(domain)[c("xrange", "yrange", "zrange")] for(i in 1:nsim) { x <- with(dd, runif(n, min=xrange[1], max=xrange[2])) y <- with(dd, runif(n, min=yrange[1], max=yrange[2])) z <- with(dd, runif(n, min=zrange[1], max=zrange[2])) result[[i]] <- pp3(x,y,z,domain) } if(drop && nsim == 1) return(result[[1]]) result <- as.anylist(result) names(result) <- paste("Simulation", 1:nsim) return(result) } rpoispp3 <- function(lambda, domain=box3(), nsim=1, drop=TRUE) { domain <- as.box3(domain) v <- volume(domain) if(!(is.numeric(lambda) && length(lambda) == 1)) stop("lambda must be a single numeric value") np <- rpois(nsim, lambda * v) dd <- as.list(domain)[c("xrange", "yrange", "zrange")] result <- vector(mode="list", length=nsim) for(i in 1:nsim) { ni <- np[i] x <- with(dd, runif(ni, min=xrange[1], max=xrange[2])) y <- with(dd, runif(ni, min=yrange[1], max=yrange[2])) z <- with(dd, runif(ni, min=zrange[1], max=zrange[2])) result[[i]] <- pp3(x,y,z,domain) } if(drop && nsim == 1) return(result[[1]]) result <- as.anylist(result) names(result) <- paste("Simulation", 1:nsim) return(result) } spatstat/R/objsurf.R0000644000176200001440000000751213115225157014122 0ustar liggesusers# # objsurf.R # # surface of the objective function for an M-estimator # # $Revision: 1.5 $ $Date: 2016/02/11 10:17:12 $ # objsurf <- function(x, ...) { UseMethod("objsurf") } objsurf.kppm <- objsurf.dppm <- function(x, ..., ngrid=32, ratio=1.5, verbose=TRUE) { Fit <- x$Fit switch(Fit$method, mincon = { result <- objsurf(Fit$mcfit, ..., ngrid=ngrid, ratio=ratio, verbose=verbose) }, clik = { optpar <- x$par objfun <- Fit$objfun objargs <- Fit$objargs result <- objsurfEngine(objfun, optpar, objargs, ..., ngrid=ngrid, ratio=ratio, verbose=verbose) }) return(result) } objsurf.minconfit <- function(x, ..., ngrid=32, ratio=1.5, verbose=TRUE) { optpar <- x$par.canon %orifnull% x$par objfun <- x$objfun objargs <- x$objargs dotargs <- x$dotargs objsurfEngine(objfun, optpar, objargs, ..., dotargs=dotargs, ngrid=ngrid, ratio=ratio, verbose=verbose) } objsurfEngine <- function(objfun, optpar, objargs, ..., dotargs=list(), objname="objective", ngrid=32, ratio=1.5, verbose=TRUE) { trap.extra.arguments(...) if(!is.function(objfun)) stop("Object is in an outdated format and needs to be re-fitted") npar <- length(optpar) if(npar != 2) stop("Only implemented for functions of 2 arguments") # create grid of parameter values ratio <- ensure2vector(ratio) ngrid <- ensure2vector(ngrid) stopifnot(all(ratio > 1)) xgrid <- seq(optpar[1]/ratio[1], optpar[1] * ratio[1], length=ngrid[1]) ygrid <- seq(optpar[2]/ratio[2], optpar[2] * ratio[2], length=ngrid[2]) pargrid <- expand.grid(xgrid, ygrid) colnames(pargrid) <- names(optpar) # evaluate if(verbose) cat(paste("Evaluating", nrow(pargrid), "function values...")) values <- do.call(apply, append(list(pargrid, 1, objfun, objargs=objargs), dotargs)) if(verbose) cat("Done.\n") result <- list(x=xgrid, y=ygrid, z=matrix(values, ngrid[1], ngrid[2])) attr(result, "optpar") <- optpar attr(result, "objname") <- "contrast" class(result) <- "objsurf" return(result) } print.objsurf <- function(x, ...) { cat("Objective function surface\n") optpar <- attr(x, "optpar") objname <- attr(x, "objname") nama <- names(optpar) cat("Parameter ranges:\n") cat(paste(paste0(nama[1], ":"), prange(range(x$x)), "\n")) cat(paste(paste0(nama[2], ":"), prange(range(x$y)), "\n")) cat(paste("Function value:", objname, "\n")) invisible(NULL) } image.objsurf <- plot.objsurf <- function(x, ...) { xname <- short.deparse(substitute(x)) optpar <- attr(x, "optpar") nama <- names(optpar) do.call(image, resolve.defaults(list(x=unclass(x)), list(...), list(xlab=nama[1], ylab=nama[2], main=xname))) abline(v=optpar[1], lty=3) abline(h=optpar[2], lty=3) invisible(NULL) } contour.objsurf <- function(x, ...) { xname <- short.deparse(substitute(x)) optpar <- attr(x, "optpar") nama <- names(optpar) do.call(contour, resolve.defaults(list(x=unclass(x)), list(...), list(xlab=nama[1], ylab=nama[2], main=xname))) abline(v=optpar[1], lty=3) abline(h=optpar[2], lty=3) invisible(NULL) } persp.objsurf <- function(x, ...) { xname <- short.deparse(substitute(x)) optpar <- attr(x, "optpar") objname <- attr(x, "objname") nama <- names(optpar) r <- do.call(persp, resolve.defaults(list(x=x$x, y=x$y, z=x$z), list(...), list(xlab=nama[1], ylab=nama[2], zlab=objname, main=xname))) invisible(r) } spatstat/R/linearmrkcon.R0000644000176200001440000000307613115225157015135 0ustar liggesusers# # linearmrkcon.R # # mark connection function & mark equality function for linear networks # # $Revision: 1.4 $ $Date: 2017/02/07 08:12:05 $ # linearmarkconnect <- function(X, i, j, r=NULL, ...) { if(!is.multitype(X, dfok=FALSE)) stop("Point pattern must be multitype") marx <- marks(X) lev <- levels(marx) if(missing(i) || is.null(i)) i <- lev[1L] else if(!(i %in% lev)) stop(paste("i = ", i , "is not a valid mark")) if(missing(j) || is.null(j)) j <- lev[2L] else if(!(j %in% lev)) stop(paste("j = ", j , "is not a valid mark")) # ensure distance information is present X <- as.lpp(X, sparse=FALSE) # pcfij <- linearpcfcross(X, i, j, r=r, ...) pcfall <- linearpcf(X, r=r, ...) qi <- mean(marx == i) qj <- mean(marx == j) result <- eval.fv(qi * qj * pcfij/pcfall) # rebrand result <- rebadge.as.crossfun(result, "p", "L", i, j) attr(result, "labl") <- attr(pcfij, "labl") return(result) } linearmarkequal <- local({ linearmarkequal <- function(X, r=NULL, ...) { if(!is.multitype(X, dfok=FALSE)) stop("Point pattern must be multitype") ## ensure distance information is present X <- as.lpp(X, sparse=FALSE) lev <- levels(marks(X)) v <- list() for(l in lev) v[[l]] <- linearmarkconnect(X, l, l, r=r, ...) result <- Reduce(addfuns, v) result <-rebadge.fv(result, quote(p[L](r)), new.fname=c("p", "L")) attr(result, "labl") <- attr(v[[1L]], "labl") return(result) } addfuns <- function(f1, f2) eval.fv(f1 + f2) linearmarkequal }) spatstat/R/treebranches.R0000644000176200001440000001361413115225157015115 0ustar liggesusers#' #' treebranches.R #' #' Label branches in a tree #' #' $Revision: 1.4 $ $Date: 2016/07/16 03:14:51 $ #' compute branch labels for each *vertex* in the tree L treebranchlabels <- local({ treebranchlabels <- function(L, root=1) { stopifnot(inherits(L, "linnet")) stopifnot(length(root) == 1) V <- L$vertices #' M <- L$m #' assign label to each vertex e <- rep(NA_character_, npoints(V)) #' do root e[root] <- "" #' recurse descendtree(L, root, e) } descendtree <- function(L, at, labels, verbose=FALSE) { if(verbose) cat(paste("Descending from node", at, "\n")) below <- which(L$m[at, ] & is.na(labels)) while(length(below) == 1) { if(verbose) cat(paste("Line from", at, paren(labels[at]), "to", below, "\n")) labels[below] <- labels[at] at <- below below <- which(L$m[at, ] & is.na(labels)) } if(length(below) == 0) { if(verbose) cat("*\n") return(labels) } if(verbose) cat(paste("split into", length(below), "\n")) if(length(below) > 26) stop("Oops - degree > 27") labels[below] <- paste(labels[at], letters[1:length(below)], sep="") for(b in below) labels <- descendtree(L, b, labels) return(labels) } treebranchlabels }) #' Function which will return the branch label associated with #' any point on the network branchlabelfun <- function(L, root=1) { L <- as.linnet(L) vertexLabels <- treebranchlabels(L, root=root) labfrom <- vertexLabels[L$from] labto <- vertexLabels[L$to] segmentLabels <- ifelse(nchar(labfrom) < nchar(labto), labto, labfrom) f <- function(x, y, seg, tp) { segmentLabels[seg] } fL <- linfun(f, L) return(fL) } #' convenience function for use in model formulae begins <- function(x, firstbit) { stopifnot(is.character(firstbit) && length(firstbit) == 1) n <- nchar(firstbit) if(n == 0) rep(TRUE, length(x)) else (substr(x, 1, n) == firstbit) } #' extract the sub-tree for a particular label #' e.g. extractbranch(L, "a") extracts everything whose label begins with 'a' extractbranch <- function(X, ...) { UseMethod("extractbranch") } extractbranch.linnet <- function(X, code, labels, ..., which=NULL) { L <- X V <- L$vertices if(!is.null(which)) { stopifnot(is.logical(which)) if(length(which) != npoints(V)) stop("Argument 'which' is the wrong length") vin <- which } else { if(length(labels) != npoints(V)) stop("labels vector is the wrong length") #' which vertices are included #' (a) vertices with the right initial code vin <- (substr(labels, 1, nchar(code)) == code) #' (b) the apex isneighbour <- (rowSums(L$m[,vin]) > 0) apexcode <- if(nchar(code) > 1) substr(code, 1, nchar(code)-1) else "" vin <- vin | (isneighbour & (labels == apexcode)) } #' which edges are included ein <- vin[L$from] & vin[L$to] #' new serial numbers for vertices vId <- cumsum(vin) #' pack up sparse <- L$sparse out <- list(vertices=V[vin], m=L$m[vin,vin], lines=L$lines[ein], from=vId[L$from[ein]], to=vId[L$to[ein]], dpath=if(sparse) NULL else L$dpath[vin,vin], sparse=sparse, window=V$window) class(out) <- c("linnet", class(out)) #' pre-compute bounding radius if(sparse) out$boundingradius <- boundingradius(out) out$toler <- default.linnet.tolerance(out) attr(out, "which") <- vin return(out) } extractbranch.lpp <- function(X, code, labels, ..., which=NULL) { L <- as.linnet(X) #' make sub-network if(missing(code)) code <- NULL if(missing(labels)) labels <- NULL Lnew <- extractbranch(L, code, labels, which=which) #' which vertices are included vin <- attr(Lnew, "vin") #' which edges are included ein <- vin[L$from] & vin[L$to] #' which data points are included xin <- ein[coords(X)$seg] #' new serial numbers for edges eId <- cumsum(ein) #' construct subset Xnew <- X[xin] Xnew$domain <- Lnew #' apply new serial numbers to segment map coords(Xnew)$seg <- eId[coords(Xnew)$seg] #' return(Xnew) } deletebranch <- function(X, ...) { UseMethod("deletebranch") } deletebranch.linnet <- function(X, code, labels, ...) { L <- X V <- L$vertices if(length(labels) != npoints(V)) stop("labels vector is the wrong length") #' which vertices are retained vkeep <- (substr(labels, 1, nchar(code)) != code) #' which edges are retained ekeep <- vkeep[L$from] & vkeep[L$to] #' new serial numbers for vertices vId <- cumsum(vkeep) #' pack up sparse <- L$sparse out <- list(vertices=V[vkeep], m=L$m[vkeep,vkeep], lines=L$lines[ekeep], from=vId[L$from[ekeep]], to=vId[L$to[ekeep]], dpath=if(sparse) NULL else L$dpath[vkeep,vkeep], sparse=sparse, window=V$window) class(out) <- c("linnet", class(out)) #' recompute bounding radius if(sparse) out$boundingradius <- boundingradius(out) out$toler <- default.linnet.tolerance(out) attr(out, "which") <- vkeep return(out) } deletebranch.lpp <- function(X, code, labels, ...) { #' make sub-network L <- as.linnet(X) Lnew <- deletebranch(L, code=code, labels=labels) #' which vertices are retained vkeep <- attr(Lnew, "which") #' which edges are retained ekeep <- vkeep[L$from] & vkeep[L$to] #' which data points are retained xin <- ekeep[coords(X)$seg] #' new serial numbers for vertices # vId <- cumsum(vkeep) #' new serial numbers for edges eId <- cumsum(ekeep) #' construct subset Xnew <- X[xin] Xnew$domain <- Lnew #' apply new serial numbers to segment map coords(Xnew)$seg <- eId[coords(Xnew)$seg] #' return(Xnew) } treeprune <- function(X, root=1, level=0){ ## collect names of branches to be pruned tb <- treebranchlabels(as.linnet(X), root=root) keep <- (nchar(tb) <= level) Y <- extractbranch(X, which=keep) return(Y) } spatstat/R/hasclose.R0000644000176200001440000001234713115271075014253 0ustar liggesusers#' #' hasclose.R #' #' Determine whether each point has a close neighbour #' #' $Revision: 1.11 $ $Date: 2017/06/05 10:31:58 $ has.close <- function(X, r, Y=NULL, ...) { UseMethod("has.close") } has.close.default <- function(X, r, Y=NULL, ..., periodic=FALSE) { trap.extra.arguments(...) if(!periodic) { nd <- if(is.null(Y)) nndist(X) else nncross(X, Y, what="dist") return(nd <= r) } if(is.null(Y)) { pd <- pairdist(X, periodic=TRUE) diag(pd) <- Inf } else { pd <- crossdist(X, Y, periodic=TRUE) } # return(apply(pd <= r, 1, any)) return(matrowany(pd <= r)) } has.close.ppp <- function(X, r, Y=NULL, ..., periodic=FALSE, sorted=FALSE) { trap.extra.arguments(...) nX <- npoints(X) if(nX <= 1) return(logical(nX)) #' sort by increasing x coordinate cX <- coords(X) if(!sorted) { oo <- order(cX$x) cX <- cX[oo,,drop=FALSE] } if(is.null(Y)) { if(!periodic) { zz <- .C("hasXclose", n = as.integer(nX), x = as.double(cX$x), y = as.double(cX$y), r = as.double(r), t = as.integer(integer(nX)), PACKAGE = "spatstat") } else { b <- sidelengths(Frame(X)) zz <- .C("hasXpclose", n = as.integer(nX), x = as.double(cX$x), y = as.double(cX$y), r = as.double(r), b = as.double(b), t = as.integer(integer(nX)), PACKAGE = "spatstat") } } else { stopifnot(is.ppp(Y)) nY <- npoints(Y) if(nY == 0) return(logical(nX)) cY <- coords(Y) #' sort Y by increasing x coordinate if(!sorted) { ooY <- order(cY$x) cY <- cY[ooY, , drop=FALSE] } if(!periodic) { zz <- .C("hasXYclose", n1 = as.integer(nX), x1 = as.double(cX$x), y1 = as.double(cX$y), n2 = as.integer(nY), x2 = as.double(cY$x), y2 = as.double(cY$y), r = as.double(r), t = as.integer(integer(nX)), PACKAGE = "spatstat") } else { bX <- sidelengths(Frame(X)) bY <- sidelengths(Frame(Y)) if(any(bX != bY)) warning("Windows are not equal: periodic distance may be erroneous") zz <- .C("hasXYpclose", n1 = as.integer(nX), x1 = as.double(cX$x), y1 = as.double(cX$y), n2 = as.integer(nY), x2 = as.double(cY$x), y2 = as.double(cY$y), r = as.double(r), b = as.double(bX), t = as.integer(integer(nX)), PACKAGE = "spatstat") } } tt <- as.logical(zz$t) if(sorted) return(tt) #' reinstate original order ans <- logical(nX) ans[oo] <- tt return(ans) } has.close.pp3 <- function(X, r, Y=NULL, ..., periodic=FALSE, sorted=FALSE) { trap.extra.arguments(...) nX <- npoints(X) if(nX <= 1) return(logical(nX)) cX <- coords(X) if(!sorted) { #' sort by increasing x coordinate oo <- order(cX$x) cX <- cX[oo,,drop=FALSE] } if(is.null(Y)) { if(!periodic) { zz <- .C("hasX3close", n = as.integer(nX), x = as.double(cX$x), y = as.double(cX$y), z = as.double(cX$z), r = as.double(r), t = as.integer(integer(nX)), PACKAGE = "spatstat") } else { b <- sidelengths(as.box3(X)) zz <- .C("hasX3pclose", n = as.integer(nX), x = as.double(cX$x), y = as.double(cX$y), z = as.double(cX$z), r = as.double(r), b = as.double(b), t = as.integer(integer(nX)), PACKAGE = "spatstat") } } else { stopifnot(is.pp3(Y)) nY <- npoints(Y) if(nY == 0) return(logical(nX)) cY <- coords(Y) if(!sorted) { #' sort Y by increasing x coordinate ooY <- order(cY$x) cY <- cY[ooY, , drop=FALSE] } if(!periodic) { zz <- .C("hasXY3close", n1 = as.integer(nX), x1 = as.double(cX$x), y1 = as.double(cX$y), z1 = as.double(cX$z), n2 = as.integer(nY), x2 = as.double(cY$x), y2 = as.double(cY$y), z2 = as.double(cY$z), r = as.double(r), t = as.integer(integer(nX)), PACKAGE = "spatstat") } else { bX <- sidelengths(as.box3(X)) bY <- sidelengths(as.box3(Y)) if(any(bX != bY)) warning("Domains are not equal: periodic distance may be erroneous") zz <- .C("hasXY3pclose", n1 = as.integer(nX), x1 = as.double(cX$x), y1 = as.double(cX$y), z1 = as.double(cX$z), n2 = as.integer(nY), x2 = as.double(cY$x), y2 = as.double(cY$y), z2 = as.double(cY$z), r = as.double(r), b = as.double(bX), t = as.integer(integer(nX)), PACKAGE = "spatstat") } } tt <- as.logical(zz$t) if(sorted) return(tt) #' reinstate original order ans <- logical(nX) ans[oo] <- tt return(ans) } spatstat/R/covariates.R0000755000176200001440000000306713115271075014614 0ustar liggesusers# # covariates.R # # evaluate covariates # # $Revision: 1.3 $ $Date: 2015/10/21 09:06:57 $ # evalCovariate <- function(covariate, locations) { # evaluate covariate of any kind at specified locations covvalues <- if(is.im(covariate)) safelookup(covariate, locations) else if(is.function(covariate)) covariate(locations$x, locations$y) else if(is.numeric(covariate) || is.factor(covariate)) { if(length(covariate) == 1L) rep.int(covariate, length(locations$x)) else if(length(covariate) == length(locations$x)) covariate else stop("Inappropriate length for covariate vector") } else stop("Covariate should be an image, a function or a factor/numeric vector") return(covvalues) } ppmCovariates <- function(model) { # generate list of all covariates in ppm (excluding marks) stopifnot(is.ppm(model)) co <- as.list(model$covariates) xy <- list(x=function(x,y){x}, y=function(x,y){y}) coplus <- append(co, xy) return(as.anylist(coplus)) } findCovariate <- function(covname, scope, scopename=NULL) { # find the named covariate in the given ppm object or list if(is.ppm(scope)) { covlist <- ppmCovariates(scope) if(missing(scopename)) scopename <- "covariates in model" } else if(is.list(scope)) { covlist <- scope } else stop("scope should be a named list of covariates, or a ppm object") if(!(covname %in% names(covlist))) stop(paste("covariate", dQuote(covname), "not found", if(!is.null(scopename)) paste("amongst", scopename) else NULL)) covlist[[covname]] } spatstat/R/matrixpower.R0000644000176200001440000000457113115225157015033 0ustar liggesusers#' #' matrixpower.R #' #' $Revision: 1.1 $ $Date: 2016/11/13 01:50:51 $ #' matrixsqrt <- function(x, complexOK=TRUE) { ## matrix square root if(length(dim(x)) != 2) stop("x must be a matrix") if(!is.matrix(x)) x <- as.matrix(x) if(missing(complexOK) && is.complex(x)) complexOK <- TRUE if(!complexOK) stopifnot(is.numeric(x)) else stopifnot(is.numeric(x) || is.complex(x)) e <- eigen(x) values <- e$values vectors <- e$vectors if(any(values < 0)) { if(complexOK) values <- as.complex(values) else stop("matrix has negative eigenvalues: square root is complex", call.=FALSE) } y <- vectors %*% diag(sqrt(values)) %*% t(vectors) if(!is.null(dn <- dimnames(x))) dimnames(y) <- rev(dn) return(y) } matrixinvsqrt <- function(x, complexOK=TRUE) { ## matrix inverse square root if(length(dim(x)) != 2) stop("x must be a matrix") if(!is.matrix(x)) x <- as.matrix(x) if(missing(complexOK) && is.complex(x)) complexOK <- TRUE if(!complexOK) stopifnot(is.numeric(x)) else stopifnot(is.numeric(x) || is.complex(x)) e <- eigen(x) values <- e$values vectors <- e$vectors if(any(values == 0)) stop("matrix is singular; cannot compute inverse square root", call.=FALSE) if(any(values < 0)) { if(complexOK) values <- as.complex(values) else stop("matrix has negative eigenvalues: inverse square root is complex", call.=FALSE) } y <- vectors %*% diag(1/sqrt(values)) %*% t(vectors) if(!is.null(dn <- dimnames(x))) dimnames(y) <- rev(dn) return(y) } matrixpower <- function(x, power, complexOK=TRUE) { check.1.real(power) if(length(dim(x)) != 2) stop("x must be a matrix") if(!is.matrix(x)) x <- as.matrix(x) if(missing(complexOK) && is.complex(x)) complexOK <- TRUE if(!complexOK) stopifnot(is.numeric(x)) else stopifnot(is.numeric(x) || is.complex(x)) e <- eigen(x) values <- e$values vectors <- e$vectors if(any(values == 0) && power < 0) stop("matrix is singular; cannot compute negative power", call.=FALSE) if(any(values < 0) && (power != ceiling(power))) { if(complexOK) values <- as.complex(values) else stop("matrix has negative eigenvalues: result is complex", call.=FALSE) } y <- vectors %*% diag(values^power) %*% t(vectors) if(!is.null(dn <- dimnames(x))) dimnames(y) <- rev(dn) return(y) } spatstat/R/progress.R0000644000176200001440000002616413115225157014320 0ustar liggesusers# # progress.R # # $Revision: 1.21 $ $Date: 2016/04/25 02:34:40 $ # # progress plots (envelope representations) # dclf.progress <- function(X, ...) mctest.progress(X, ..., exponent=2) mad.progress <- function(X, ...) mctest.progress(X, ..., exponent=Inf) mctest.progress <- local({ smoothquantile <- function(z, alpha) { min(quantile(density(z), 1-alpha), max(z)) } silentmax <- function(z) { if(all(is.nan(z))) return(NaN) z <- z[is.finite(z)] if(length(z) == 0) return(NA) else return(max(z)) } mctest.progress <- function(X, fun=Lest, ..., exponent=1, nrank=1, interpolate=FALSE, alpha, rmin=0) { check.1.real(exponent) explain.ifnot(exponent >= 0) if(missing(fun) && inherits(X, "envelope")) fun <- NULL Z <- envelopeProgressData(X, fun=fun, ..., rmin=rmin, exponent=exponent) R <- Z$R devdata <- Z$devdata devsim <- Z$devsim nsim <- ncol(devsim) # determine 'alpha' and 'nrank' if(missing(alpha)) { if((nrank %% 1) != 0) stop("nrank must be an integer") alpha <- nrank/(nsim + 1) } else { check.1.real(alpha) stopifnot(alpha > 0 && alpha < 1) if(!interpolate) { if(!missing(nrank)) warning("nrank was ignored because alpha was given", call.=FALSE) nrank <- alpha * (nsim + 1) if(abs(nrank - round(nrank)) > 1e-2) stop("alpha should be a multiple of 1/(nsim + 1)", call.=FALSE) nrank <- as.integer(round(nrank)) } } alphastring <- paste(100 * alpha, "%%", sep="") # compute critical values critval <- if(interpolate) apply(devsim, 1, smoothquantile, alpha=alpha) else if(nrank == 1) apply(devsim, 1, silentmax) else apply(devsim, 1, orderstats, k=nrank, decreasing=TRUE) # create fv object fname <- if(is.infinite(exponent)) "mad" else if(exponent == 2) "T" else paste("D[",exponent,"]", sep="") ylab <- if(is.infinite(exponent)) quote(mad(R)) else if(exponent == 2) quote(T(R)) else eval(substitute(quote(D[p](R)), list(p=exponent))) df <- data.frame(R=R, obs=devdata, crit=critval, zero=0) mcname <- if(interpolate) "interpolated Monte Carlo" else "Monte Carlo" p <- fv(df, argu="R", ylab=ylab, valu="obs", fmla = . ~ R, desc = c("Interval endpoint R", "observed value of test statistic %s", paste(mcname, alphastring, "critical value for %s"), "zero"), labl=c("R", "%s(R)", "%s[crit](R)", "0"), unitname = unitname(X), fname = fname) fvnames(p, ".") <- c("obs", "crit", "zero") fvnames(p, ".s") <- c("zero", "crit") p <- hasenvelope(p, Z$envelope) # envelope may be NULL return(p) } mctest.progress }) # Do not call this function. # Performs underlying computations envelopeProgressData <- local({ envelopeProgressData <- function(X, fun=Lest, ..., exponent=1, alternative=c("two.sided", "less", "greater"), leaveout=1, scale=NULL, clamp=FALSE, normalize=FALSE, deflate=FALSE, rmin=0, save.envelope = savefuns || savepatterns, savefuns = FALSE, savepatterns = FALSE) { alternative <- match.arg(alternative) if(!(leaveout %in% 0:2)) stop("Argument leaveout should equal 0, 1 or 2") ## compute or extract simulated functions X <- envelope(X, fun=fun, ..., alternative=alternative, savefuns=TRUE, savepatterns=savepatterns) Y <- attr(X, "simfuns") ## extract values R <- with(X, .x) obs <- with(X, .y) sim <- as.matrix(as.data.frame(Y))[, -1] nsim <- ncol(sim) ## choose function as reference has.theo <- ("theo" %in% names(X)) use.theo <- identical(attr(X, "einfo")$use.theory, TRUE) if(use.theo && !has.theo) warning("No theoretical function available; use.theory ignored") if(use.theo && has.theo) { # theo.used <- TRUE reference <- with(X, theo) leaveout <- 0 } else { # theo.used <- FALSE if(leaveout == 2) { ## use sample mean of simulations only reference <- with(X, mmean) } else { ## use sample mean of simulations *and* observed reference <- (nsim * with(X, mmean) + obs)/(nsim + 1) } } ## restrict range if(rmin > 0) { if(sum(R >= rmin) < 2) stop("rmin is too large for the available range of r values") nskip <- sum(R < rmin) } else nskip <- 0 ## determine rescaling if any if(is.null(scale)) { scaling <- NULL scr <- 1 } else if(is.function(scale)) { scaling <- scale(R) sname <- "scale(r)" ans <- check.nvector(scaling, length(R), things="values of r", fatal=FALSE, vname=sname) if(!ans) stop(attr(ans, "whinge"), call.=FALSE) if(any(bad <- (scaling <= 0))) { ## issue a warning unless this only happens at r=0 if(any(bad[R > 0])) warning(paste("Some values of", sname, "were negative or zero:", "scale was reset to 1 for these values"), call.=FALSE) scaling[bad] <- 1 } scr <- scaling } else stop("Argument scale should be a function") ## compute deviations rawdevDat <- Deviation(obs, reference, leaveout, nsim, sim[,1]) rawdevSim <- Deviation(sim, reference, leaveout, nsim) ## evaluate signed/absolute deviation relevant to alternative ddat <- RelevantDeviation(rawdevDat, alternative, clamp, scaling) dsim <- RelevantDeviation(rawdevSim, alternative, clamp, scaling) ## compute test statistics if(is.infinite(exponent)) { ## MAD devdata <- cummaxskip(ddat, nskip) devsim <- apply(dsim, 2, cummaxskip, nskip=nskip) if(deflate) { devdata <- scr * devdata devsim <- scr * devsim } testname <- "Maximum absolute deviation test" } else { dR <- c(0, diff(R)) if(clamp || (alternative == "two.sided")) { ## deviations are nonnegative devdata <- cumsumskip(dR * ddat^exponent, nskip) devsim <- apply(dR * dsim^exponent, 2, cumsumskip, nskip=nskip) } else { ## sign of deviations should be retained devdata <- cumsumskip(dR * sign(ddat) * abs(ddat)^exponent, nskip=nskip) devsim <- apply(dR * sign(dsim) * abs(dsim)^exponent, 2, cumsumskip, nskip=nskip) } if(normalize) { devdata <- devdata/R devsim <- sweep(devsim, 1, R, "/") } if(deflate) { devdata <- scr * sign(devdata) * abs(devdata)^(1/exponent) devsim <- scr * sign(devsim) * abs(devsim)^(1/exponent) } testname <- if(exponent == 2) "Diggle-Cressie-Loosmore-Ford test" else if(exponent == 1) "Integral absolute deviation test" else paste("Integrated", ordinal(exponent), "Power Deviation test") } result <- list(R=R, devdata=devdata, devsim=devsim, testname=testname, scaleR=scr, clamp=clamp) if(save.envelope) result$envelope <- X return(result) } cumsumskip <- function(x, nskip=0) { if(nskip == 0) cumsum(x) else c(rep(NA, nskip), cumsum(x[-seq_len(nskip)])) } cummaxskip <- function(x, nskip=0) { if(nskip == 0) cummax(x) else c(rep(NA, nskip), cummax(x[-seq_len(nskip)])) } envelopeProgressData }) dg.progress <- function(X, fun=Lest, ..., exponent=2, nsim=19, nsimsub=nsim-1, nrank=1, alpha, leaveout=1, interpolate=FALSE, rmin=0, savefuns=FALSE, savepatterns=FALSE, verbose=TRUE) { env.here <- sys.frame(sys.nframe()) if(!missing(nsimsub) && !relatively.prime(nsim, nsimsub)) stop("nsim and nsimsub must be relatively prime") ## determine 'alpha' and 'nrank' if(missing(alpha)) { if((nrank %% 1) != 0) stop("nrank must be an integer") alpha <- nrank/(nsim + 1) } else { check.1.real(alpha) stopifnot(alpha > 0 && alpha < 1) if(!interpolate) { if(!missing(nrank)) warning("nrank was ignored because alpha was given", call.=FALSE) nrank <- alpha * (nsim + 1) if(abs(nrank - round(nrank)) > 1e-2) stop("alpha should be a multiple of 1/(nsim + 1)", call.=FALSE) nrank <- as.integer(round(nrank)) } } if(verbose) cat("Computing first-level test data...") ## generate or extract simulated patterns and functions E <- envelope(X, fun=fun, ..., nsim=nsim, savepatterns=TRUE, savefuns=TRUE, verbose=FALSE, envir.simul=env.here) ## get progress data PD <- envelopeProgressData(E, fun=fun, ..., rmin=rmin, nsim=nsim, exponent=exponent, leaveout=leaveout, verbose=FALSE) ## get first level MC test significance trace T1 <- mctest.sigtrace(E, fun=fun, nsim=nsim, exponent=exponent, leaveout=leaveout, interpolate=interpolate, rmin=rmin, confint=FALSE, verbose=FALSE, ...) R <- T1$R phat <- T1$pest if(verbose) { cat("Done.\nComputing second-level data... ") state <- list() } ## second level traces simpat <- attr(E, "simpatterns") phat2 <- matrix(, length(R), nsim) for(j in seq_len(nsim)) { simj <- simpat[[j]] sigj <- mctest.sigtrace(simj, fun=fun, nsim=nsimsub, exponent=exponent, interpolate=interpolate, leaveout=leaveout, rmin=rmin, confint=FALSE, verbose=FALSE, ...) phat2[,j] <- sigj$pest if(verbose) state <- progressreport(j, nsim, state=state) } if(verbose) cat("Done.\n") ## Dao-Genton procedure dgcritrank <- 1 + rowSums(phat > phat2) dgcritrank <- pmin(dgcritrank, nsim) devsim.sort <- t(apply(PD$devsim, 1, sort, decreasing=TRUE, na.last=TRUE)) ii <- cbind(seq_along(dgcritrank), dgcritrank) devcrit <- devsim.sort[ii] devdata <- PD$devdata ## create fv object fname <- if(is.infinite(exponent)) "mad" else if(exponent == 2) "T" else paste("D[",exponent,"]", sep="") ylab <- if(is.infinite(exponent)) quote(mad(R)) else if(exponent == 2) quote(T(R)) else eval(substitute(quote(D[p](R)), list(p=exponent))) df <- data.frame(R=R, obs=devdata, crit=devcrit, zero=0) mcname <- if(interpolate) "interpolated Monte Carlo" else "Monte Carlo" p <- fv(df, argu="R", ylab=ylab, valu="obs", fmla = . ~ R, desc = c("Interval endpoint R", "observed value of test statistic %s", paste(mcname, paste0(100 * alpha, "%%"), "critical value for %s"), "zero"), labl=c("R", "%s(R)", "%s[crit](R)", "0"), unitname = unitname(X), fname = fname) fvnames(p, ".") <- c("obs", "crit", "zero") fvnames(p, ".s") <- c("zero", "crit") if(savefuns || savepatterns) p <- hasenvelope(p, E) return(p) } spatstat/R/adaptive.density.R0000755000176200001440000000315413115271075015724 0ustar liggesusers# # adaptive.density.R # # $Revision: 1.8 $ $Date: 2015/07/11 08:19:26 $ # # adaptive.density <- function(X, f=0.1, ..., nrep=1, verbose=TRUE) { stopifnot(is.ppp(X)) npts <- npoints(X) check.1.real(f) if(badprobability(f)) stop("f should be a probability between 0 and 1") ntess <- floor(f * npts) if(ntess == 0) { # naive estimate of intensity if(f > 0 && verbose) splat("Tiny threshold: returning uniform intensity estimate") W <- X$window lam <- npts/area(W) return(as.im(lam, W, ...)) } if(ntess == npts) { ## Voronoi/Dirichlet estimate tes <- dirichlet(X) # tesim <- as.im(tes, ...) tesim <- nnmap(X, what="which", ...) lam <- 1/tile.areas(tes) out <- eval.im(lam[tesim]) return(out) } if(nrep > 1) { # estimate is the average of nrep randomised estimates total <- 0 if(verbose) cat(paste("Computing", nrep, "intensity estimates...")) state <- list() for(i in seq_len(nrep)) { estimate <- adaptive.density(X, f, ..., nrep=1) total <- eval.im(total + estimate) if(verbose) state <- progressreport(i, nrep, state=state) } if(verbose) cat("Done.\n") average <- eval.im(total/nrep) return(average) } ncount <- npts - ntess fcount <- ncount/npts itess <- sample(seq_len(npts), ntess, replace=FALSE) Xtess <- X[itess] Xcount <- X[-itess] tes <- dirichlet(Xtess) lam <- unlist(lapply(split(Xcount, tes), intensity)) # tesim <- as.im(tes, ...) # out <- eval.im(lam[as.integer(tesim)]/fcount) tesim <- nnmap(Xtess, what="which", ...) out <- eval.im(lam[tesim]/fcount) return(out) } spatstat/R/lurking.R0000755000176200001440000004132013115271120014110 0ustar liggesusers# Lurking variable plot for arbitrary covariate. # # # $Revision: 1.52 $ $Date: 2017/02/07 08:12:05 $ # lurking <- local({ cumsumna <- function(x) { cumsum(ifelse(is.na(x), 0, x)) } ## main function lurking <- function(object, covariate, type="eem", cumulative=TRUE, clipwindow=default.clipwindow(object), rv = NULL, plot.sd=is.poisson(object), envelope=FALSE, nsim=39, nrank=1, plot.it=TRUE, typename, covname, oldstyle=FALSE, check=TRUE, ..., splineargs=list(spar=0.5), verbose=TRUE) { cl <- match.call() ## default name for covariate if(missing(covname) || is.null(covname)) { covname <- if(is.name(cl$covariate)) as.character(cl$covariate) else if(is.expression(cl$covariate)) cl$covariate else NULL } if(!identical(envelope, FALSE)) { ## compute simulation envelope Xsim <- NULL if(!identical(envelope, TRUE)) { ## some kind of object Y <- envelope if(is.list(Y) && all(sapply(Y, is.ppp))) { Xsim <- Y envelope <- TRUE } else if(inherits(Y, "envelope")) { Xsim <- attr(Y, "simpatterns") if(is.null(Xsim)) stop("envelope does not contain simulated point patterns") envelope <- TRUE } else stop("Unrecognised format of argument: envelope") nXsim <- length(Xsim) if(missing(nsim) && (nXsim < nsim)) { warning(paste("Only", nXsim, "simulated patterns available")) nsim <- nXsim } } } ## validate object if(is.ppp(object)) { X <- object object <- ppm(X ~1, forcefit=TRUE) dont.complain.about(X) } else verifyclass(object, "ppm") ## may need to refit the model if(plot.sd && is.null(getglmfit(object))) object <- update(object, forcefit=TRUE, use.internal=TRUE) ## match type argument type <- pickoption("type", type, c(eem="eem", raw="raw", inverse="inverse", pearson="pearson", Pearson="pearson")) if(missing(typename)) typename <- switch(type, eem="exponential energy weights", raw="raw residuals", inverse="inverse-lambda residuals", pearson="Pearson residuals") ## extract spatial locations Q <- quad.ppm(object) datapoints <- Q$data quadpoints <- union.quad(Q) Z <- is.data(Q) wts <- w.quad(Q) ## subset of quadrature points used to fit model subQset <- getglmsubset(object) if(is.null(subQset)) subQset <- rep.int(TRUE, n.quad(Q)) ################################################################# ## compute the covariate if(is.im(covariate)) { covvalues <- covariate[quadpoints, drop=FALSE] } else if(is.vector(covariate) && is.numeric(covariate)) { covvalues <- covariate if(length(covvalues) != quadpoints$n) stop("Length of covariate vector,", length(covvalues), "!=", quadpoints$n, ", number of quadrature points") } else if(is.expression(covariate)) { ## Expression involving covariates in the model glmdata <- getglmdata(object) ## Fix special cases if(is.null(glmdata)) { ## default glmdata <- data.frame(x=quadpoints$x, y=quadpoints$y) if(is.marked(quadpoints)) glmdata$marks <- marks(quadpoints) } ## ensure x and y are in data frame if(!all(c("x","y") %in% names(glmdata))) { glmdata$x <- quadpoints$x glmdata$y <- quadpoints$y } if(!is.null(object$covariates)) { ## Expression may involve an external covariate that's not used in model neednames <- all.vars(covariate) if(!all(neednames %in% colnames(glmdata))) { moredata <- mpl.get.covariates(object$covariates, quadpoints, covfunargs=object$covfunargs) use <- !(names(moredata) %in% colnames(glmdata)) glmdata <- cbind(glmdata, moredata[,use,drop=FALSE]) } } ## Evaluate expression sp <- parent.frame() covvalues <- eval(covariate, envir= glmdata, enclos=sp) if(!is.numeric(covvalues)) stop("The evaluated covariate is not numeric") } else stop(paste("The", sQuote("covariate"), "should be either", "a pixel image, an expression or a numeric vector")) ################################################################# ## Validate covariate values nbg <- is.na(covvalues) if(any(offending <- nbg && subQset)) { if(is.im(covariate)) warning(paste(sum(offending), "out of", length(offending), "quadrature points discarded because", ngettext(sum(offending), "it lies", "they lie"), "outside the domain of the covariate image")) else warning(paste(sum(offending), "out of", length(offending), "covariate values discarded because", ngettext(sum(offending), "it is NA", "they are NA"))) } ## remove points ok <- !nbg & subQset Q <- Q[ok] covvalues <- covvalues[ok] quadpoints <- quadpoints[ok] ## adjust Z <- is.data(Q) wts <- w.quad(Q) if(any(is.infinite(covvalues) | is.nan(covvalues))) stop("covariate contains Inf or NaN values") ## Quadrature points marked by covariate value covq <- quadpoints %mark% as.numeric(covvalues) ################################################################ ## Residuals/marks attached to appropriate locations. ## Stoyan-Grabarnik weights are attached to the data points only. ## Others (residuals) are attached to all quadrature points. resvalues <- if(!is.null(rv)) rv else if(type=="eem") eem(object, check=check) else residuals.ppm(object, type=type, check=check) if(inherits(resvalues, "msr")) { ## signed or vector-valued measure resvalues <- resvalues$val if(ncol(as.matrix(resvalues)) > 1) stop("Not implemented for vector measures; use [.msr to split into separate components") } if(type != "eem") resvalues <- resvalues[ok] res <- (if(type == "eem") datapoints else quadpoints) %mark% as.numeric(resvalues) ## ... and the same locations marked by the covariate covres <- if(type == "eem") covq[Z] else covq ## NAMES OF THINGS ## name of the covariate if(is.null(covname)) covname <- if(is.expression(covariate)) covariate else "covariate" ## type of residual/mark if(missing(typename)) typename <- if(!is.null(rv)) "rv" else "" ####################################################################### ## START ANALYSIS ## Clip to subwindow if needed clip <- (!is.poisson.ppm(object) || !missing(clipwindow)) && !is.null(clipwindow) if(clip) { covq <- covq[clipwindow] res <- res[clipwindow] covres <- covres[clipwindow] clipquad <- inside.owin(quadpoints$x, quadpoints$y, clipwindow) wts <- wts[ clipquad ] } ## ----------------------------------------------------------------------- ## (A) EMPIRICAL CUMULATIVE FUNCTION ## based on data points if type="eem", otherwise on quadrature points ## Reorder the data/quad points in order of increasing covariate value ## and then compute the cumulative sum of their residuals/marks markscovres <- marks(covres) o <- fave.order(markscovres) covsort <- markscovres[o] cummark <- cumsumna(marks(res)[o]) ## we'll plot(covsort, cummark) in the cumulative case ## (B) THEORETICAL MEAN CUMULATIVE FUNCTION ## based on all quadrature points ## Range of covariate values covqmarks <- marks(covq) covrange <- range(covqmarks, na.rm=TRUE) ## Suitable breakpoints cvalues <- seq(from=covrange[1L], to=covrange[2L], length.out=100) csmall <- cvalues[1L] - diff(cvalues[1:2]) cbreaks <- c(csmall, cvalues) ## cumulative area as function of covariate values covclass <- cut(covqmarks, breaks=cbreaks) increm <- tapply(wts, covclass, sum) cumarea <- cumsumna(increm) ## compute theoretical mean (when model is true) mean0 <- if(type == "eem") cumarea else numeric(length(cumarea)) ## we'll plot(cvalues, mean0) in the cumulative case ## (A'),(B') DERIVATIVES OF (A) AND (B) ## Required if cumulative=FALSE ## Estimated by spline smoothing (with x values jittered) if(!cumulative) { ## fit smoothing spline to (A) ss <- do.call(smooth.spline, append(list(covsort, cummark), splineargs) ) ## estimate derivative of (A) derivmark <- predict(ss, covsort, deriv=1)$y ## similarly for (B) ss <- do.call(smooth.spline, append(list(cvalues, mean0), splineargs) ) derivmean <- predict(ss, cvalues, deriv=1)$y } ## ----------------------------------------------------------------------- ## Store what will be plotted if(cumulative) { empirical <- data.frame(covariate=covsort, value=cummark) theoretical <- data.frame(covariate=cvalues, mean=mean0) } else { empirical <- data.frame(covariate=covsort, value=derivmark) theoretical <- data.frame(covariate=cvalues, mean=derivmean) } ## ------------------------------------------------------------------------ ## (C) STANDARD DEVIATION if desired ## (currently implemented only for Poisson) ## (currently implemented only for cumulative case) if(plot.sd && !is.poisson.ppm(object)) warning(paste("standard deviation is calculated for Poisson model;", "not valid for this model")) if(plot.sd && cumulative) { ## Fitted intensity at quadrature points lambda <- fitted.ppm(object, type="trend", check=check) lambda <- lambda[ok] ## Fisher information for coefficients asymp <- vcov(object,what="internals") Fisher <- asymp$fisher ## Local sufficient statistic at quadrature points suff <- asymp$suff suff <- suff[ok, ,drop=FALSE] ## Clip if required if(clip) { lambda <- lambda[clipquad] suff <- suff[clipquad, , drop=FALSE] ## suff is a matrix } ## First term: integral of lambda^(2p+1) switch(type, pearson={ varI <- cumarea }, raw={ ## Compute sum of w*lambda for quadrature points in each interval dvar <- tapply(wts * lambda, covclass, sum) ## tapply() returns NA when the table is empty dvar[is.na(dvar)] <- 0 ## Cumulate varI <- cumsum(dvar) }, inverse=, ## same as eem eem={ ## Compute sum of w/lambda for quadrature points in each interval dvar <- tapply(wts / lambda, covclass, sum) ## tapply() returns NA when the table is empty dvar[is.na(dvar)] <- 0 ## Cumulate varI <- cumsum(dvar) }) ## variance-covariance matrix of coefficients V <- try(solve(Fisher), silent=TRUE) if(inherits(V, "try-error")) { warning("Fisher information is singular; reverting to oldstyle=TRUE") oldstyle <- TRUE } ## Second term: B' V B if(oldstyle) { varII <- 0 } else { ## lamp = lambda^(p + 1) lamp <- switch(type, raw = lambda, pearson = sqrt(lambda), inverse =, eem = as.integer(lambda > 0)) ## Compute sum of w * lamp * suff for quad points in intervals Bcontrib <- as.vector(wts * lamp) * suff dB <- matrix(, nrow=length(cumarea), ncol=ncol(Bcontrib)) for(j in seq_len(ncol(dB))) dB[,j] <- tapply(Bcontrib[,j], covclass, sum, na.rm=TRUE) ## tapply() returns NA when the table is empty dB[is.na(dB)] <- 0 ## Cumulate columns B <- apply(dB, 2, cumsum) ## compute B' V B for each i varII <- diag(B %*% V %*% t(B)) } ## ## variance of residuals varR <- varI - varII ## trap numerical errors nbg <- (varR < 0) if(any(nbg)) { ran <- range(varR) varR[nbg] <- 0 relerr <- abs(ran[1L]/ran[2L]) nerr <- sum(nbg) if(relerr > 1e-6) { warning(paste(nerr, "negative", ngettext(nerr, "value (", "values (min="), signif(ran[1L], 4), ")", "of residual variance reset to zero", "(out of", length(varR), "values)")) } } theoretical$sd <- sqrt(varR) } ## if(envelope) { ## compute envelopes by simulation cl$plot.it <- FALSE cl$envelope <- FALSE cl$rv <- NULL if(is.null(Xsim)) Xsim <- simulate(object, nsim=nsim, progress=verbose) values <- NULL if(verbose) { cat("Processing.. ") state <- list() } for(i in seq_len(nsim)) { cl$object <- update(object, Xsim[[i]]) result.i <- eval(cl, parent.frame()) ## interpolate empirical values onto common sequence f.i <- with(result.i$empirical, approxfun(covariate, value, rule=2)) val.i <- f.i(theoretical$covariate) values <- cbind(values, val.i) if(verbose) state <- progressreport(i, nsim, state=state) } if(verbose) cat("Done.\n") hilo <- if(nrank == 1) apply(values, 1, range) else apply(values, 1, orderstats, k=c(nrank, nsim-nrank+1)) theoretical$upper <- hilo[1L,] theoretical$lower <- hilo[2L,] } ## ---------------- RETURN COORDINATES ---------------------------- stuff <- list(empirical=empirical, theoretical=theoretical) attr(stuff, "info") <- list(typename=typename, cumulative=cumulative, covrange=covrange, covname=covname) class(stuff) <- "lurk" ## --------------- PLOT THEM ---------------------------------- if(plot.it) plot(stuff, ...) return(invisible(stuff)) } lurking }) # plot a lurk object plot.lurk <- function(x, ..., shade="grey") { xplus <- append(x, attr(x, "info")) with(xplus, { ## work out plot range mr <- range(0, empirical$value, theoretical$mean, na.rm=TRUE) if(!is.null(theoretical$sd)) mr <- range(mr, theoretical$mean + 2 * theoretical$sd, theoretical$mean - 2 * theoretical$sd, na.rm=TRUE) if(!is.null(theoretical$upper)) mr <- range(mr, theoretical$upper, theoretical$lower, na.rm=TRUE) ## start plot vname <- paste(if(cumulative)"cumulative" else "marginal", typename) do.call(plot, resolve.defaults( list(covrange, mr), list(type="n"), list(...), list(xlab=covname, ylab=vname))) ## Envelopes if(!is.null(theoretical$upper)) { Upper <- theoretical$upper Lower <- theoretical$lower } else if(!is.null(theoretical$sd)) { Upper <- with(theoretical, mean+2*sd) Lower <- with(theoretical, mean-2*sd) } else Upper <- Lower <- NULL if(!is.null(Upper) && !is.null(Lower)) { xx <- theoretical$covariate if(!is.null(shade)) { ## shaded envelope region shadecol <- if(is.colour(shade)) shade else "grey" xx <- c(xx, rev(xx)) yy <- c(Upper, rev(Lower)) do.call.matched(polygon, resolve.defaults(list(x=xx, y=yy), list(...), list(border=shadecol, col=shadecol))) } else { do.call(lines, resolve.defaults( list(x = xx, y=Upper), list(...), list(lty=3))) do.call(lines, resolve.defaults( list(x = xx, y = Lower), list(...), list(lty=3))) } } ## Empirical lines(value ~ covariate, empirical, ...) ## Theoretical mean do.call(lines, resolve.defaults( list(mean ~ covariate, theoretical), list(...), list(lty=2))) }) return(invisible(NULL)) } spatstat/R/Kmulti.inhom.R0000755000176200001440000004221513115271075015030 0ustar liggesusers# # Kmulti.inhom.S # # $Revision: 1.50 $ $Date: 2016/06/28 08:06:01 $ # # # ------------------------------------------------------------------------ Lcross.inhom <- function(X, i, j, ...) { if(!is.multitype(X, dfok=FALSE)) stop("Point pattern must be multitype") if(missing(i)) i <- levels(marks(X))[1] if(missing(j)) j <- levels(marks(X))[2] K <- Kcross.inhom(X, i, j, ...) L <- eval.fv(sqrt(pmax.int(K,0)/pi)) iname <- make.parseable(paste(i)) jname <- make.parseable(paste(j)) # relabel the fv object L <- rebadge.fv(L, substitute(L[inhom,i,j](r), list(i=iname,j=jname)), c("L", paste0("list", paren(paste("inhom", i, j, sep=",")))), new.yexp=substitute(L[list(inhom,i,j)](r), list(i=iname,j=jname))) attr(L, "labl") <- attr(K, "labl") attr(L, "dangerous") <- attr(K, "dangerous") return(L) } Ldot.inhom <- function(X, i, ...) { if(!is.multitype(X, dfok=FALSE)) stop("Point pattern must be multitype") if(missing(i)) i <- levels(marks(X))[1] K <- Kdot.inhom(X, i, ...) L <- eval.fv(sqrt(pmax.int(K,0)/pi)) # relabel the fv object iname <- make.parseable(paste(i)) L <- rebadge.fv(L, substitute(L[inhom, i ~ dot](r), list(i=iname)), c("L", paste0("list(inhom,", iname, "~symbol(\"\\267\"))")), new.yexp=substitute(L[list(inhom, i ~ symbol("\267"))](r), list(i=iname))) attr(L, "labl") <- attr(K, "labl") attr(L, "dangerous") <- attr(K, "dangerous") return(L) } "Kcross.inhom" <- function(X, i, j, lambdaI=NULL, lambdaJ=NULL, ..., r=NULL, breaks=NULL, correction = c("border", "isotropic", "Ripley", "translate"), sigma=NULL, varcov=NULL, lambdaIJ=NULL, lambdaX=NULL, update=TRUE, leaveoneout=TRUE) { verifyclass(X, "ppp") if(!is.multitype(X, dfok=FALSE)) stop("Point pattern must be multitype") if(missing(correction)) correction <- NULL miss.update <- missing(update) miss.leave <- missing(leaveoneout) marx <- marks(X) if(missing(i)) i <- levels(marx)[1] if(missing(j)) j <- levels(marx)[2] I <- (marx == i) J <- (marx == j) Iname <- paste("points with mark i =", i) Jname <- paste("points with mark j =", j) K <- Kmulti.inhom(X, I, J, lambdaI, lambdaJ, ..., r=r,breaks=breaks,correction=correction, sigma=sigma, varcov=varcov, lambdaIJ=lambdaIJ, Iname=Iname, Jname=Jname, lambdaX=lambdaX, update=update, leaveoneout=leaveoneout, miss.update=miss.update, miss.leave=miss.leave) iname <- make.parseable(paste(i)) jname <- make.parseable(paste(j)) result <- rebadge.fv(K, substitute(K[inhom,i,j](r), list(i=iname,j=jname)), c("K", paste0("list", paren(paste("inhom", i, j, sep=",")))), new.yexp=substitute(K[list(inhom,i,j)](r), list(i=iname,j=jname))) attr(result, "dangerous") <- attr(K, "dangerous") return(result) } "Kdot.inhom" <- function(X, i, lambdaI=NULL, lambdadot=NULL, ..., r=NULL, breaks=NULL, correction = c("border", "isotropic", "Ripley", "translate"), sigma=NULL, varcov=NULL, lambdaIdot=NULL, lambdaX=NULL, update=TRUE, leaveoneout=TRUE) { verifyclass(X, "ppp") if(!is.multitype(X, dfok=FALSE)) stop("Point pattern must be multitype") if(missing(correction)) correction <- NULL miss.update <- missing(update) miss.leave <- missing(leaveoneout) marx <- marks(X) if(missing(i)) i <- levels(marx)[1] I <- (marx == i) J <- rep.int(TRUE, X$n) # i.e. all points Iname <- paste("points with mark i =", i) Jname <- paste("points") K <- Kmulti.inhom(X, I, J, lambdaI, lambdadot, ..., r=r,breaks=breaks,correction=correction, sigma=sigma, varcov=varcov, lambdaIJ=lambdaIdot, Iname=Iname, Jname=Jname, lambdaX=lambdaX, update=update, leaveoneout=leaveoneout, miss.update=miss.update, miss.leave=miss.leave) iname <- make.parseable(paste(i)) result <- rebadge.fv(K, substitute(K[inhom, i ~ dot](r), list(i=iname)), c("K", paste0("list(inhom,", iname, "~symbol(\"\\267\"))")), new.yexp=substitute(K[list(inhom, i ~ symbol("\267"))](r), list(i=iname))) if(!is.null(dang <- attr(K, "dangerous"))) { dang[dang == "lambdaJ"] <- "lambdadot" dang[dang == "lambdaIJ"] <- "lambdaIdot" attr(result, "dangerous") <- dang } return(result) } "Kmulti.inhom"<- function(X, I, J, lambdaI=NULL, lambdaJ=NULL, ..., r=NULL, breaks=NULL, correction = c("border", "isotropic", "Ripley", "translate"), lambdaIJ=NULL, sigma=NULL, varcov=NULL, lambdaX=NULL, update=TRUE, leaveoneout=TRUE) { verifyclass(X, "ppp") dflt <- list(Iname="points satisfying condition I", Jname="points satisfying condition J", miss.update=missing(update), miss.leave=missing(leaveoneout)) extrargs <- resolve.defaults(list(...), dflt) if(length(extrargs) > length(dflt)) warning("Additional arguments unrecognised") Iname <- extrargs$Iname Jname <- extrargs$Jname miss.update <- extrargs$miss.update miss.leave <- extrargs$miss.leave npts <- npoints(X) W <- as.owin(X) areaW <- area(W) # validate edge correction correction.given <- !missing(correction) && !is.null(correction) if(is.null(correction)) correction <- c("border", "isotropic", "Ripley", "translate") correction <- pickoption("correction", correction, c(none="none", border="border", "bord.modif"="bord.modif", isotropic="isotropic", Ripley="isotropic", trans="translate", translate="translate", translation="translate", best="best"), multi=TRUE) correction <- implemented.for.K(correction, W$type, correction.given) # validate I, J I <- ppsubset(X, I) J <- ppsubset(X, J) if(is.null(I) || is.null(J)) stop("I and J must be valid subset indices") XI <- X[I] XJ <- X[J] nI <- sum(I) nJ <- sum(J) if(nI == 0) stop(paste("There are no", Iname)) if(nJ == 0) stop(paste("There are no", Jname)) # r values rmaxdefault <- rmax.rule("K", W, nJ/areaW) breaks <- handle.r.b.args(r, breaks, W, rmaxdefault=rmaxdefault) r <- breaks$r rmax <- breaks$max dangerous <- c("lambdaI", "lambdaJ") dangerI <- dangerJ <- TRUE ## intensity data if(!is.null(lambdaX)) { ## Intensity values for all points of X if(!is.null(lambdaI)) warning("lambdaI was ignored, because lambdaX was given", call.=FALSE) if(!is.null(lambdaJ)) warning("lambdaJ was ignored, because lambdaX was given", call.=FALSE) if(is.im(lambdaX)) { ## Look up intensity values lambdaI <- safelookup(lambdaX, X[I]) lambdaJ <- safelookup(lambdaX, X[J]) } else if(is.function(lambdaX)) { ## evaluate function at locations lambdaI <- lambdaX(XI$x, XI$y) lambdaJ <- lambdaX(XJ$x, XJ$y) } else if(is.numeric(lambdaX) && is.vector(as.numeric(lambdaX))) { ## vector of intensity values if(length(lambdaX) != npts) stop(paste("The length of", sQuote("lambdaX"), "should equal the number of points of X")) lambdaI <- lambdaX[I] lambdaJ <- lambdaX[J] } else if(is.ppm(lambdaX) || is.kppm(lambdaX) || is.dppm(lambdaX)) { ## point process model provides intensity model <- lambdaX if(!update) { ## just use intensity of fitted model lambdaI <- predict(model, locations=XI, type="trend") lambdaJ <- predict(model, locations=XJ, type="trend") } else { ## re-fit model to data X if(is.ppm(model)) { model <- update(model, Q=X) lambdaX <- fitted(model, dataonly=TRUE, leaveoneout=leaveoneout) } else if(is.kppm(model)) { model <- update(model, X=X) lambdaX <- fitted(model, dataonly=TRUE, leaveoneout=leaveoneout) } else { model <- update(model, X=X) if(leaveoneout && !miss.leave) warn.once("dppm.leaveoneout", "fitted.dppm(leaveoneout=TRUE)", "is not yet implemented") lambdaX <- fitted(model, dataonly=TRUE) } lambdaI <- lambdaX[I] lambdaJ <- lambdaX[J] dangerI <- dangerJ <- FALSE dangerous <- "lambdaIJ" if(miss.update) warn.once(key="Kmulti.inhom.update", "The behaviour of Kmulti.inhom when lambda is a ppm object", "has changed (in spatstat 1.45-3 and later).", "See help(Kmulti.inhom)") } } else stop(paste("Argument lambdaX is not understood:", "it should be a numeric vector,", "an image, a function(x,y)", "or a fitted point process model (ppm, kppm or dppm)")) } else { ## lambdaI, lambdaJ expected if(is.null(lambdaI)) { ## estimate intensity dangerI <- FALSE dangerous <- setdiff(dangerous, "lambdaI") lambdaI <- density(X[I], ..., sigma=sigma, varcov=varcov, at="points", leaveoneout=leaveoneout) } else if(is.im(lambdaI)) { ## look up intensity values lambdaI <- safelookup(lambdaI, X[I]) } else if(is.function(lambdaI)) { ## evaluate function at locations lambdaI <- lambdaI(XI$x, XI$y) } else if(is.numeric(lambdaI) && is.vector(as.numeric(lambdaI))) { ## validate intensity vector if(length(lambdaI) != nI) stop(paste("The length of", sQuote("lambdaI"), "should equal the number of", Iname)) } else if(is.ppm(lambdaI) || is.kppm(lambdaI) || is.dppm(lambdaI)) { ## point process model provides intensity model <- lambdaI if(!update) { ## just use intensity of fitted model lambdaI <- predict(model, locations=XI, type="trend") } else { ## re-fit model to data X if(is.ppm(model)) { model <- update(model, Q=X) lambdaX <- fitted(model, dataonly=TRUE, leaveoneout=leaveoneout) } else if(is.kppm(model)) { model <- update(model, X=X) lambdaX <- fitted(model, dataonly=TRUE, leaveoneout=leaveoneout) } else { model <- update(model, X=X) if(leaveoneout && !miss.leave) warn.once("dppm.leaveoneout", "fitted.dppm(leaveoneout=TRUE)", "is not yet implemented") lambdaX <- fitted(model, dataonly=TRUE) } lambdaI <- lambdaX[I] dangerI <- FALSE dangerous <- setdiff(dangerous, "lambdaI") if(miss.update) warn.once(key="Kmulti.inhom.update", "The behaviour of Kmulti.inhom when lambda is a ppm object", "has changed (in spatstat 1.45-3 and later).", "See help(Kmulti.inhom)") } } else stop(paste(sQuote("lambdaI"), "should be a vector or an image")) if(is.null(lambdaJ)) { ## estimate intensity dangerJ <- FALSE dangerous <- setdiff(dangerous, "lambdaJ") lambdaJ <- density(X[J], ..., sigma=sigma, varcov=varcov, at="points", leaveoneout=leaveoneout) } else if(is.im(lambdaJ)) { ## look up intensity values lambdaJ <- safelookup(lambdaJ, X[J]) } else if(is.function(lambdaJ)) { ## evaluate function at locations XJ <- X[J] lambdaJ <- lambdaJ(XJ$x, XJ$y) } else if(is.numeric(lambdaJ) && is.vector(as.numeric(lambdaJ))) { ## validate intensity vector if(length(lambdaJ) != nJ) stop(paste("The length of", sQuote("lambdaJ"), "should equal the number of", Jname)) } else if(is.ppm(lambdaJ) || is.kppm(lambdaJ) || is.dppm(lambdaJ)) { ## point process model provides intensity model <- lambdaJ if(!update) { ## just use intensity of fitted model lambdaJ <- predict(model, locations=XJ, type="trend") } else { ## re-fit model to data X if(is.ppm(model)) { model <- update(model, Q=X) if(leaveoneout && !miss.leave) lambdaX <- fitted(model, dataonly=TRUE, leaveoneout=leaveoneout) } else if(is.kppm(model)) { model <- update(model, X=X) lambdaX <- fitted(model, dataonly=TRUE, leaveoneout=leaveoneout) } else { model <- update(model, X=X) if(leaveoneout && !miss.leave) warn.once("dppm.leaveoneout", "fitted.pppm(leaveoneout=TRUE)", "is not yet implemented") lambdaX <- fitted(model, dataonly=TRUE) } lambdaJ <- lambdaX[J] dangerJ <- FALSE dangerous <- setdiff(dangerous, "lambdaJ") if(miss.update) warn.once(key="Kmulti.inhom.update", "The behaviour of Kmulti.inhom when lambda is a ppm object", "has changed (in spatstat 1.45-3 and later).", "See help(Kmulti.inhom)") } } else stop(paste(sQuote("lambdaJ"), "should be a vector or an image")) } ## Weight for each pair if(!is.null(lambdaIJ)) { dangerIJ <- TRUE dangerous <- union(dangerous, "lambdaIJ") if(!is.matrix(lambdaIJ)) stop("lambdaIJ should be a matrix") if(nrow(lambdaIJ) != nI) stop(paste("nrow(lambdaIJ) should equal the number of", Iname)) if(ncol(lambdaIJ) != nJ) stop(paste("ncol(lambdaIJ) should equal the number of", Jname)) } else { dangerIJ <- FALSE } danger <- dangerI || dangerJ || dangerIJ # Recommended range of r values alim <- c(0, min(rmax, rmaxdefault)) # this will be the output data frame # It will be given more columns later K <- data.frame(r=r, theo= pi * r^2) desc <- c("distance argument r", "theoretical Poisson %s") fname <- c("K", "list(inhom,I,J)") K <- fv(K, "r", quote(K[inhom, I, J](r)), "theo", , alim, c("r", makefvlabel(NULL, NULL, fname, "pois")), desc, fname=fname, yexp=quote(K[list(inhom,I,J)](r))) # identify close pairs of points close <- crosspairs(XI, XJ, max(r), what="ijd") # map (i,j) to original serial numbers in X orig <- seq_len(npts) imap <- orig[I] jmap <- orig[J] iX <- imap[close$i] jX <- jmap[close$j] # eliminate any identical pairs if(any(I & J)) { ok <- (iX != jX) if(!all(ok)) { close$i <- close$i[ok] close$j <- close$j[ok] close$d <- close$d[ok] } } # extract information for these pairs (relative to orderings of XI, XJ) dclose <- close$d icloseI <- close$i jcloseJ <- close$j # Form weight for each pair if(is.null(lambdaIJ)) weight <- 1/(lambdaI[icloseI] * lambdaJ[jcloseJ]) else weight <- 1/lambdaIJ[cbind(icloseI, jcloseJ)] # Compute estimates by each of the selected edge corrections. if(any(correction == "border" | correction == "bord.modif")) { # border method # Compute distances to boundary b <- bdist.points(XI) bI <- b[icloseI] # apply reduced sample algorithm RS <- Kwtsum(dclose, bI, weight, b, 1/lambdaI, breaks) if(any(correction == "border")) { Kb <- RS$ratio K <- bind.fv(K, data.frame(border=Kb), makefvlabel(NULL, "hat", fname, "bord"), "border-corrected estimate of %s", "border") } if(any(correction == "bord.modif")) { Kbm <- RS$numerator/eroded.areas(W, r) K <- bind.fv(K, data.frame(bord.modif=Kbm), makefvlabel(NULL, "hat", fname, "bordm"), "modified border-corrected estimate of %s", "bord.modif") } } if(any(correction == "translate")) { ## translation correction edgewt <- edge.Trans(XI[icloseI], XJ[jcloseJ], paired=TRUE) allweight <- edgewt * weight wh <- whist(dclose, breaks$val, allweight) Ktrans <- cumsum(wh)/areaW rmax <- diameter(W)/2 Ktrans[r >= rmax] <- NA K <- bind.fv(K, data.frame(trans=Ktrans), makefvlabel(NULL, "hat", fname, "trans"), "translation-corrected estimate of %s", "trans") } if(any(correction == "isotropic")) { ## Ripley isotropic correction edgewt <- edge.Ripley(XI[icloseI], matrix(dclose, ncol=1)) allweight <- edgewt * weight wh <- whist(dclose, breaks$val, allweight) Kiso <- cumsum(wh)/areaW rmax <- diameter(W)/2 Kiso[r >= rmax] <- NA K <- bind.fv(K, data.frame(iso=Kiso), makefvlabel(NULL, "hat", fname, "iso"), "Ripley isotropic correction estimate of %s", "iso") } ## default is to display them all formula(K) <- . ~ r unitname(K) <- unitname(X) if(danger) attr(K, "dangerous") <- dangerous return(K) } spatstat/R/First.R0000755000176200001440000000433113164372375013547 0ustar liggesusers# First.R # # $Revision: 1.46 $ $Date: 2017/10/02 08:12:29 $ # .onLoad <- function(...) reset.spatstat.options() .onAttach <- function(libname, pkgname) { store.versionstring.spatstat() ver <- versionstring.spatstat() descfile <- system.file("DESCRIPTION", package="spatstat") nickfile <- system.file("doc", "Nickname.txt", package="spatstat") ni <- scan(file=nickfile, what=character(), n=1, quiet=TRUE) msg <- paste("\nspatstat", ver, " ", paren(paste("nickname:", sQuote(ni))), "\nFor an introduction to spatstat, type", sQuote("beginner"), "\n") packageStartupMessage(msg) if(exists("getRversion") && getRversion() >= "3.2.2") { ## check versions rv <- R.Version() rdate <- with(rv, ISOdate(year, month, day)) if(Sys.Date() - as.Date(rdate) > 270) { ## R version is really old; just warn about this packageStartupMessage(paste("\nNote:", rv$version.string, "is more than 9 months old;", "we strongly recommend upgrading to the latest version")) } else { ## warn if spatstat version is old packdate <- as.Date(read.dcf(file=descfile, fields="Date")) elapsed <- Sys.Date() - packdate if(elapsed > 75) { if(elapsed > 365) { n <- floor(elapsed/365) unit <- "year" sowhat <- "we strongly recommend upgrading to the latest version." } else if(elapsed > 100) { n <- floor(elapsed/30) unit <- "month" sowhat <- "we recommend upgrading to the latest version." } else { n <- floor(elapsed/7) unit <- "week" sowhat <- "a newer version should be available." } expired <- if(n == 1) paste("a", unit) else paste(n, paste0(unit, "s")) packageStartupMessage(paste("\nNote: spatstat version", ver, "is out of date by more than", paste0(expired, ";"), sowhat)) } } } # hack to avoid namespace/load quirks # .C("attachRFoptions", package="RandomFields") #DontDeclare # invisible(NULL) } spatstat/R/detpointprocfamilyfun.R0000644000176200001440000004200613115225157017072 0ustar liggesusers## detpointprocfamilyfun.R ## ## $Revision: 1.5 $ $Date: 2015/10/19 02:27:17 $ ## ## This file contains the function `detpointprocfamilyfun' ## to define new DPP model family functions ## and a print method for class `detpointprocfamilyfun' ## as well as the currently defined ## - dppBessel ## - dppCauchy ## - dppGauss ## - dppMatern ## - dppPowerExp detpointprocfamilyfun <- local({ names_formals <- function(f, dots = FALSE){ nam <- names(formals(f)) if(!dots) nam <- nam[nam!="..."] return(nam) } detpointprocfamilyfun <- function(kernel=NULL, specden=NULL, basis="fourierbasis", convkernel=NULL, Kfun=NULL, valid=NULL, intensity=NULL, dim=2, name="User-defined", isotropic=TRUE, range=NULL, parbounds=NULL, specdenrange=NULL, startpar=NULL, ...) { ## Check which functions are given, check them for sanity and ## extract argument names and other stuff given <- NULL if(!is.null(kernel)){ if(!is.function(kernel)) stop("If kernel is given it must be a function.") given <- "kernel" kernelnames <- names_formals(kernel) if(length(kernelnames)<1L) stop("kernel function must have at least one argument") kernelnames <- kernelnames[-1L] } if(!is.null(specden)){ if(!is.function(specden)) stop("If specden is given it must be a function.") given <- c(given, "specden") specdennames <- names_formals(specden) if(length(specdennames)<1L) stop("specden function must have at least one argument") specdennames <- specdennames[-1L] } if(is.null(given)) stop("At least one of kernel or specden must be provided.") if(length(given)==2){ if(!setequal(kernelnames,specdennames)) stop("argument names of kernel and specden must match.") } if(is.element("kernel",given)){ parnames <- kernelnames } else{ parnames <- specdennames } if(!is.null(convkernel)){ given <- c(given,"convkernel") if(!is.function(convkernel)||length(formals(convkernel))<2) stop("If convkernel is given it must be a function with at least two arguments.") if(!setequal(parnames,names_formals(convkernel)[-(1:2)])) stop("argument names of convkernel must match argument names of kernel and/or specden.") } if(!is.null(Kfun)){ given <- c(given,"Kfun") if(!is.function(Kfun)||length(formals(Kfun))<1L) stop("If Kfun is given it must be a function with at least one arguments.") if(!setequal(parnames,names_formals(Kfun)[-1L])) stop("argument names of Kfun must match argument names of kernel and/or specden.") } if(!is.null(valid)){ if(!(is.function(valid)&&setequal(parnames,names_formals(valid)))) stop("argument names of valid must match argument names of kernel and/or specden.") } else{ warning("No function for checking parameter validity provided. ANY numerical value for the parameters will be accepted.") } if(!is.null(intensity)&&!(is.character(intensity)&&length(intensity)==1L&&is.element(intensity, parnames))) stop("argument intensity must be NULL or have length one, be of class character and match a parameter name") if(!(is.character(dim)|is.numeric(dim))|length(dim)!=1L) stop("argument dim must have length one and be of class character or numeric") if(is.character(dim)){ if(!is.element(dim, parnames)) stop("When dim is a character it must agree with one of the parameter names of the model") } else{ dim <- round(dim) if(dim<1L) stop("When dim is a numeric it must be a positive integer") } ## Catch extra unknown args (will be appended to output object below). dots <- list(...) ## Create output object. out <- function(...){ caller <- match.call()[[1L]] caller <- eval(substitute(caller), parent.frame()) fixedpar <- list(...) nam <- names(fixedpar) if(length(fixedpar)>0&&is.null(nam)) stop(paste("Named arguments are required. Please supply parameter values in a", sQuote("tag=value"), "form")) match <- is.element(nam, parnames) if(sum(!match)>0) warning(paste("Not all supplied argument(s) make sense. Valid arguments are: ", paste(parnames, collapse = ", "), ". The following supplied argument(s) will be ignored: ", paste(nam[!match], collapse = ", "), sep = "")) fixedpar <- fixedpar[match] ## Code to always fix the dimension to a numeric when calling the function ####### if(is.character(dim) && !is.element(dim,names(fixedpar))){ dimpar <- structure(list(2), .Names=dim) fixedpar <- c(fixedpar, dimpar) } ## Detect inhomogeneous intensity (an image), and replace by max and an image for thinning thin <- NULL if(!is.null(intensity)){ lambda <- getElement(fixedpar, intensity) if(is.im(lambda)){ lambdamax <- max(lambda) thin <- lambda/lambdamax fixedpar[[intensity]] <- lambdamax } } obj <- list(fixedpar = fixedpar, freepar = parnames[!is.element(parnames,names(fixedpar))], kernel = kernel, specden = specden, convkernel = convkernel, intensity = intensity, thin = thin, dim = dim, name = name, range = range, valid = valid, parbounds = parbounds, specdenrange = specdenrange, startpar = startpar, isotropic = isotropic, caller = caller, basis = basis ) obj <- append(obj, dots) class(obj) <- "detpointprocfamily" return(obj) } class(out) <- c("detpointprocfamilyfun", "pointprocfamilyfun", class(out)) attr(out, "parnames") <- parnames attr(out, "name") <- name return(out) } detpointprocfamilyfun } ) print.detpointprocfamilyfun <- function(x, ...){ cat(paste(attr(x, "name"), "determinantal point process model family\n")) cat("The parameters of the family are:\n") cat(attr(x, "parnames"), sep = ", ") cat("\n") invisible(NULL) } dppBessel <- detpointprocfamilyfun( name="Bessel", kernel=function(x, lambda, alpha, sigma, d){ a <- 0.5*(sigma+d) y <- abs(x/alpha) # Kernel: lambda*2^a*gamma(a+1)*besselJ(2*y*sqrt(a),a) / (2*y*sqrt(a))^a logrslt <- log(lambda) + a*log(2) + lgamma(a+1) - a*log(2*y*sqrt(a)) rslt <- exp(logrslt) * besselJ(2*y*sqrt(a), a) rslt[x==0] <- lambda return(rslt) }, specden=function(x, lambda, alpha, sigma, d){ a <- sigma+d # specden: lambda*(2*pi)^(d/2)*alpha^d*gamma(0.5*a+1)/a^(d/2)/gamma(sigma/2+1)*(1-2*pi^2*alpha^2*x^2/a)^(sigma/2) logrslt <- log(lambda) + (d/2)*log(2*pi) + d*log(alpha) + lgamma(0.5*a+1) logrslt <- logrslt - (d/2)*log(a) - lgamma(sigma/2+1) tmp <- 1-2*pi^2*alpha^2*x^2/a warnopt <- options(warn=-1) logrslt <- logrslt + ifelse(tmp<0, -Inf, (sigma/2)*log(tmp)) options(warnopt) return(exp(logrslt)) }, specdenrange=function(model){ p <- model$fixedpar sqrt((p$sigma+p$d)/(2*pi^2*p$alpha^2)) }, valid=function(lambda, alpha, sigma, d){ a <- sigma+d OK <- lambda>0 && alpha>0 && d>=1 && sigma>=0 if(!OK) return(FALSE) ## Upper bound for alpha (using log-scale) lognum <- log(a^(0.5*d)) + lgamma(0.5*sigma+1) logdenom <- log( lambda*(2*pi^(0.5*d))) + lgamma(0.5*a+1) logalphamax <- (1/d) * (lognum - logdenom) return(OK && log(alpha) <= logalphamax) }, isotropic=TRUE, intensity="lambda", dim="d", parbounds=function(name, lambda, alpha, sigma, d){ lognum <- log((sigma+d)^(0.5*d)) + lgamma(0.5*sigma+1) logdenom <- log(2*pi^(0.5*d)) + lgamma(0.5*(sigma+d)+1) switch(name, lambda = c(0, exp(lognum - log( alpha^d) - logdenom)) , alpha = c(0, exp((1/d) * (lognum - log(lambda) - logdenom))), sigma = c(0, switch(as.character(d), "2"=Inf, NA)), stop("Parameter name misspecified") ) }, startpar=function(model, X){ rslt <- NULL if("d" %in% model$freepar){ model <- update(model, d=spatdim(X)) } if("lambda" %in% model$freepar){ lambda <- intensity(X) while(!is.na(OK <- valid(model <- update(model, lambda=lambda)))&&!OK) lambda <- lambda/2 rslt <- c(rslt, "lambda" = lambda) } if("sigma" %in% model$freepar){ sigma <- 2 while(!is.na(OK <- valid(model <- update(model, sigma=sigma)))&&!OK) sigma <- sigma/2 rslt <- c(rslt, "sigma" = sigma) } if("alpha" %in% model$freepar){ alpha <- .8*dppparbounds(model, "alpha")[2L] while(!is.na(OK <- valid(model <- update(model, alpha=alpha)))&&!OK){ alpha <- alpha/2 } rslt <- c(rslt, "alpha" = alpha) } return(rslt) } ) dppCauchy <- detpointprocfamilyfun( name="Cauchy", kernel=function(x, lambda, alpha, nu, d){ rslt <- lambda * (1+(x/alpha)^2)^(-nu-d/2) rslt[x==0] <- lambda return(rslt) }, specden=function(x, lambda, alpha, nu, d){ y <- 2*x*alpha*pi rslt <- lambda * y^nu * besselK(x = y, nu = nu) * (sqrt(pi)*alpha)^d * exp((1-nu)*log(2) - lgamma(nu+d/2)) rslt[x==0] <- lambda * exp(lgamma(nu) - lgamma(nu+d/2)) * (sqrt(pi)*alpha)^d return(rslt) }, Kfun = function(x, lambda, alpha, nu, d){ rslt <- pi*x^2 - pi*alpha^2/(2*nu+1) * (1 - (alpha^2/(alpha^2+x^2))^(2*nu+1)) rslt[rslt<0] <- 0 return(rslt) }, valid=function(lambda, alpha, nu, d){ ## Note the upper bound on nu for numerical stability! lambda>0 && alpha>0 && nu>0 && nu<=50 && d>=1 && lambda <= gamma(nu+d/2)/(gamma(nu)*(sqrt(pi)*alpha)^d) }, isotropic=TRUE, intensity="lambda", dim="d", range=function(alpha, nu, d, bound = .99){ if(missing(alpha)) stop("The parameter alpha is missing.") if(missing(nu)) stop("The parameter nu is missing.") if(missing(d)) stop("The parameter d (giving the dimension) is missing.") if(!(is.numeric(bound)&&bound>0&&bound<1)) stop("Argument bound must be a numeric between 0 and 1.") return(alpha * sqrt((1-bound)^(-1/(2*nu+d))-1)) }, parbounds=function(name, lambda, alpha, nu, d){ switch(name, lambda = c(0, gamma(nu+d/2)/(gamma(nu)*(sqrt(pi)*alpha)^d)), alpha = c(0, (exp(lgamma(nu+d/2)-lgamma(nu))/lambda)^(1/d)/sqrt(pi)), ## nu bound only implemented for d = 2. nu = c(switch(as.character(d), "2"=pi*lambda*alpha^2, NA), Inf), stop("Parameter name misspecified") ) }, startpar=function(model, X){ rslt <- NULL if("lambda" %in% model$freepar){ lambda <- intensity(X) while(!is.na(OK <- valid(model <- update(model, lambda=lambda)))&&!OK) lambda <- lambda/2 rslt <- c(rslt, "lambda" = lambda) } if("nu" %in% model$freepar){ nu <- 2 while(!is.na(OK <- valid(model <- update(model, nu=nu)))&&!OK) nu <- nu/2 rslt <- c(rslt, "nu" = nu) } if("alpha" %in% model$freepar){ alpha <- .8*dppparbounds(model, "alpha")[2L] while(!is.na(OK <- valid(model <- update(model, alpha=alpha)))&&!OK){ alpha <- alpha/2 } rslt <- c(rslt, "alpha" = alpha) } return(rslt) } ) dppGauss <- detpointprocfamilyfun( name="Gaussian", kernel=function(x, lambda, alpha, d){ rslt <- lambda*exp(-(x/alpha)^2) return(rslt) }, specden=function(x, lambda, alpha, d){ lambda * (sqrt(pi)*alpha)^d * exp(-(x*alpha*pi)^2) }, convkernel=function(x, k, lambda, alpha, d){ logres <- k*log(lambda*pi*alpha^2) - log(pi*k*alpha^2) - x^2/(k*alpha^2) return(exp(logres)) }, Kfun = function(x, lambda, alpha, d){ pi*x^2 - pi*alpha^2/2*(1-exp(-2*x^2/alpha^2)) }, valid=function(lambda, alpha, d){ lambda>0 && alpha>0 && d>=1 && lambda <= (sqrt(pi)*alpha)^(-d) }, isotropic=TRUE, intensity="lambda", dim="d", range=function(alpha, bound = .99){ if(missing(alpha)) stop("The parameter alpha is missing.") if(!(is.numeric(bound)&&bound>0&&bound<1)) stop("Argument bound must be a numeric between 0 and 1.") return(alpha*sqrt(-log(sqrt(1-bound)))) }, parbounds=function(name, lambda, alpha, d){ switch(name, lambda = c(0, (sqrt(pi)*alpha)^(-d)), alpha = c(0, lambda^(-1/d)/sqrt(pi)), stop("Parameter name misspecified") ) }, startpar=function(model, X){ rslt <- NULL if("lambda" %in% model$freepar){ lambda <- intensity(X) rslt <- c(rslt, "lambda" = lambda) model <- update(model, lambda=lambda) } if("alpha" %in% model$freepar){ alpha <- .8*dppparbounds(model, "alpha")[2L] rslt <- c(rslt, "alpha" = alpha) } return(rslt) } ) dppMatern <- detpointprocfamilyfun( name="Whittle-Matern", kernel=function(x, lambda, alpha, nu, d){ rslt <- lambda*2^(1-nu) / gamma(nu) * ((x/alpha)^nu) * besselK(x = x/alpha, nu = nu) rslt[x==0] <- lambda return(rslt) }, specden=function(x, lambda, alpha, nu, d){ lambda * exp(lgamma(nu+d/2) - lgamma(nu)) * (2*sqrt(pi)*alpha)^d * (1+(2*x*alpha*pi)^2)^(-nu-d/2) }, convkernel=function(x, k, lambda, alpha, nu, d){ nu2 <- k*(nu+d/2)-d/2 logres <- (nu2)*log(x/alpha) + log(besselK(x = x/alpha, nu = nu2, expon.scaled = TRUE)) - x/alpha logres[x == 0] <- (nu2-1)*log(2) + lgamma(nu2) logres <- logres + k*log(lambda) + k*(lgamma(nu+d/2)-lgamma(nu)) + (d*k-d+1-nu2)*log(2) + d*(k-1)*log(sqrt(pi)*alpha) - lgamma(nu2+d/2) index <- which(logres == Inf) logres[index] <- -Inf return(exp(logres)) }, valid=function(lambda, alpha, nu, d){ ## Note the upper bound on nu for numerical stability! lambda>0 && alpha>0 && nu>0 && nu<=50 && d>=1 && lambda <= gamma(nu)/(gamma(nu+d/2)*(2*sqrt(pi)*alpha)^d) }, isotropic=TRUE, intensity="lambda", dim="d", range=function(alpha, nu, d, bound = .99, exact = FALSE){ if(missing(alpha)) stop("The parameter alpha is missing.") if(missing(nu)) stop("The parameter nu is missing.") if(missing(d)) stop("The parameter d (giving the dimension) is missing.") if(!is.logical(exact)) stop("Argument exact must be a logical.") if(!exact&&d==2) return(alpha * sqrt(8*nu)) ## range suggested by Haavard Rue et al. if(!(is.numeric(bound)&&bound>0&&bound<1)) stop("Argument bound must be a numeric between 0 and 1.") fun <- function(x) sqrt(1-bound)-2^(1-nu) / gamma(nu) * ((x/alpha)^nu) * besselK(x = x/alpha, nu = nu) return(uniroot(fun, c(sqrt(.Machine$double.eps),1e3*alpha*sqrt(nu)))$root) }, parbounds=function(name, lambda, alpha, nu, d){ switch(name, lambda = c(0, gamma(nu)/(gamma(nu+d/2)*(2*sqrt(pi)*alpha)^d)), alpha = c(0, (exp(lgamma(nu)-lgamma(nu+d/2))/lambda)^(1/d)/2/sqrt(pi)), ## nu bound only implemented for d = 2 and d = 4. nu = c(0, switch(as.character(d), "2"=1/(4*pi*lambda*alpha^2), "4"=sqrt(1/4+1/(lambda*16*pi*pi*alpha^4))-1/2, NA)), stop("Parameter name misspecified") ) }, startpar=function(model, X){ rslt <- NULL if("lambda" %in% model$freepar){ lambda <- intensity(X) while(!is.na(OK <- valid(model <- update(model, lambda=lambda)))&&!OK) lambda <- lambda/2 rslt <- c(rslt, "lambda" = lambda) } if("nu" %in% model$freepar){ nu <- 2 while(!is.na(OK <- valid(model <- update(model, nu=nu)))&&!OK) nu <- nu/2 rslt <- c(rslt, "nu" = nu) } if("alpha" %in% model$freepar){ alpha <- .8*dppparbounds(model, "alpha")[2L] while(!is.na(OK <- valid(model <- update(model, alpha=alpha)))&&!OK){ alpha <- alpha/2 } rslt <- c(rslt, "alpha" = alpha) } return(rslt) } ) dppPowerExp <- detpointprocfamilyfun( name="Power Exponential Spectral", specden=function(x, lambda, alpha, nu, d){ lambda * gamma(d/2+1) * alpha^d / (pi^(d/2)*gamma(d/nu+1)) * exp(-(alpha*x)^nu) }, valid=function(lambda, alpha, nu, d){ ## Note the upper bound on nu for numerical stability! lambda>0 && alpha>0 && nu>0 && nu<=20 && d>=1 && lambda <= pi^(d/2)*gamma(d/nu+1) / (gamma(1+d/2)*alpha^d) }, isotropic=TRUE, intensity="lambda", dim="d", parbounds=function(name, lambda, alpha, nu, d){ switch(name, lambda = c(0, pi^(d/2)*gamma(d/nu+1) / (gamma(d/2+1)*alpha^d)), alpha = c(0, (pi^(d/2)*gamma(d/nu+1) / (lambda * gamma(d/2+1)))^(1/d)), nu = c(NA, NA), stop("Parameter name misspecified") ) }, startpar=function(model, X){ rslt <- NULL if("lambda" %in% model$freepar){ lambda <- intensity(X) while(!is.na(OK <- valid(model <- update(model, lambda=lambda)))&&!OK) lambda <- lambda/2 rslt <- c(rslt, "lambda" = lambda) } if("nu" %in% model$freepar){ nu <- 2 while(!is.na(OK <- valid(model <- update(model, nu=nu)))&&!OK) nu <- nu/2 rslt <- c(rslt, "nu" = nu) } if("alpha" %in% model$freepar){ alpha <- .8*dppparbounds(model, "alpha")[2L] while(!is.na(OK <- valid(model <- update(model, alpha=alpha)))&&!OK){ alpha <- alpha/2 } rslt <- c(rslt, "alpha" = alpha) } return(rslt) } ) spatstat/R/plot.owin.R0000755000176200001440000002530413115271120014372 0ustar liggesusers# # plot.owin.S # # The 'plot' method for observation windows (class "owin") # # $Revision: 1.58 $ $Date: 2016/07/16 06:11:47 $ # # # plot.owin <- function(x, main, add=FALSE, ..., box, edge=0.04, type = c("w", "n"), show.all=!add, hatch=FALSE, hatchargs=list(), invert=FALSE, do.plot=TRUE, claim.title.space=FALSE) { # # Function plot.owin. A method for plot. # if(missing(main)) main <- short.deparse(substitute(x)) W <- x verifyclass(W, "owin") if(!do.plot) return(invisible(as.rectangle(W))) type <- match.arg(type) if(missing(box) || is.null(box)) { box <- is.mask(W) && show.all } else stopifnot(is.logical(box) && length(box) == 1) #### pt <- prepareTitle(main) main <- pt$main nlines <- pt$nlines ######### xlim <- xr <- W$xrange ylim <- yr <- W$yrange #################################################### ## graphics parameters that can be overridden by user gparam <- resolve.defaults(list(...), par()) ## character expansion factors ## main title size = 'cex.main' * par(cex.main) * par(cex) ## user's graphics expansion factor (*multiplies* par) cex.main.user <- resolve.1.default(list(cex.main=1), list(...)) ## size of main title as multiple of par('cex') cex.main.rela <- cex.main.user * par('cex.main') ## absolute size cex.main.absol <- cex.main.rela * par('cex') if(!add) { ## new plot if(claim.title.space && nlines > 0) { ## allow space for main title (only in multi-panel plots) guesslinespace <- 0.07 * sqrt(diff(xr)^2 + diff(yr)^2) * cex.main.absol added <- (nlines + 1) * guesslinespace ylim[2] <- ylim[2] + added } ## set up plot with equal scales do.call.plotfun(plot.default, resolve.defaults(list(x=numeric(0), y=numeric(0), type="n"), list(...), list(xlim=xlim, ylim=ylim, ann=FALSE, axes=FALSE, asp=1.0, xaxs="i", yaxs="i"), .MatchNull=FALSE)) } if(show.all && nlines > 0) { ## add title if(claim.title.space) { mainheight <- sum(strheight(main, units="user", cex=cex.main.rela)) gapheight <- (strheight("b\nb", units="user", cex=cex.main.rela) - 2 * strheight("b", units="user", cex=cex.main.rela)) if(nlines > 1 && !is.expression(main)) main <- paste(main, collapse="\n") text(x=mean(xr), y=yr[2] + mainheight + 0.5 * gapheight, labels=main, cex=cex.main.rela, col=gparam$col.main, font=gparam$font.main) } else { title(main=main, cex=cex.main.rela, col=gparam$col.main, font=gparam$font.main) } } # Draw surrounding box if(box) do.call.plotfun(segments, resolve.defaults( list(x0=xr[c(1,2,2,1)], y0=yr[c(1,1,2,2)], x1=xr[c(2,2,1,1)], y1=yr[c(1,2,2,1)]), list(...))) # If type = "n", do not plot the window. if(type == "n") return(invisible(as.rectangle(W))) # Draw window switch(W$type, rectangle = { Wpoly <- as.polygonal(W) po <- Wpoly$bdry[[1]] do.call.plotfun(polygon, resolve.defaults(list(x=po), list(...)), extrargs="lwd") if(hatch) do.call(add.texture, append(list(W=W), hatchargs)) }, polygonal = { p <- W$bdry # Determine whether user wants to fill the interior col.poly <- resolve.defaults(list(...), list(col=NA))$col den.poly <- resolve.defaults(list(...), list(density=NULL))$density no.fill <- is.null(den.poly) && (is.null(col.poly) || is.na(col.poly)) # Determine whether we need to triangulate the interior. # If it is required to fill the interior, # this can be done directly using polygon() provided # there are no holes. Otherwise we must triangulate the interior. if(no.fill) triangulate <- FALSE else { # Determine whether there are any holes holes <- unlist(lapply(p, is.hole.xypolygon)) triangulate <- any(holes) } if(!triangulate) { # No triangulation required; # simply plot the polygons for(i in seq_along(p)) do.call.plotfun(polygon, resolve.defaults( list(x=p[[i]]), list(...)), extrargs="lwd") } else { # Try using polypath(): lucy <- names(dev.cur()) if(!(lucy %in% c("xfig","pictex","X11"))) { ppa <- owin2polypath(W) do.call.plotfun(polypath, resolve.defaults(ppa, list(border=col.poly), list(...))) } else { # decompose window into simply-connected pieces broken <- try(break.holes(W)) if(inherits(broken, "try-error")) { warning("Unable to plot filled polygons") } else { # Fill pieces with colour (and draw border in same colour) pp <- broken$bdry for(i in seq_len(length(pp))) do.call.plotfun(polygon, resolve.defaults(list(x=pp[[i]], border=col.poly), list(...))) } } # Now draw polygon boundaries for(i in seq_along(p)) do.call.plotfun(polygon, resolve.defaults( list(x=p[[i]]), list(density=0, col=NA), list(...)), extrargs="lwd") } if(hatch) do.call(add.texture, append(list(W=W), hatchargs)) }, mask = { # capture 'col' argument and ensure it's at least 2 values coldefault <- c(par("bg"), par("fg")) col <- resolve.defaults( list(...), spatstat.options("par.binary"), list(col=coldefault) )$col if(length(col) == 1) { col <- unique(c(par("bg"), col)) if(length(col) == 1) col <- c(par("fg"), col) } ## invert colours? if(invert) col <- rev(col) ## convert to greyscale? if(spatstat.options("monochrome")) col <- to.grey(col) do.call.matched(image.default, resolve.defaults( list(x=W$xcol, y=W$yrow, z=t(W$m), add=TRUE), list(col=col), list(...), spatstat.options("par.binary"), list(zlim=c(FALSE, TRUE)))) if(hatch) do.call(add.texture, append(list(W=W), hatchargs)) }, stop(paste("Don't know how to plot window of type", sQuote(W$type))) ) return(invisible(as.rectangle(W))) } break.holes <- local({ insect <- function(A, Box) { ## efficient version of intersect.owin which doesn't 'fix' the polygons a <- lapply(A$bdry, reverse.xypolygon) b <- lapply(as.polygonal(Box)$bdry, reverse.xypolygon) ab <- polyclip::polyclip(a, b, "intersection", fillA="nonzero", fillB="nonzero") if(length(ab)==0) return(emptywindow(Box)) # ensure correct polarity totarea <- sum(unlist(lapply(ab, Area.xypolygon))) if(totarea < 0) ab <- lapply(ab, reverse.xypolygon) AB <- owin(Box$xrange, Box$yrange, poly=ab, check=FALSE, strict=FALSE, fix=FALSE, unitname=unitname(A)) return(AB) } break.holes <- function(x, splitby=NULL, depth=0, maxdepth=100) { if(is.null(splitby)) { ## first call: validate x stopifnot(is.owin(x)) splitby <- "x" } if(depth > maxdepth) stop("Unable to divide window into simply-connected pieces") p <- x$bdry holes <- unlist(lapply(p, is.hole.xypolygon)) if(!any(holes)) return(x) nholes <- sum(holes) maxdepth <- max(maxdepth, 4 * nholes) i <- min(which(holes)) p.i <- p[[i]] b <- as.rectangle(x) xr <- b$xrange yr <- b$yrange switch(splitby, x = { xsplit <- mean(range(p.i$x)) left <- c(xr[1], xsplit) right <- c(xsplit, xr[2]) xleft <- insect(x, owin(left, yr)) xright <- insect(x, owin(right, yr)) ## recurse xleft <- break.holes(xleft, splitby="y", depth=depth+1, maxdepth=maxdepth) xright <- break.holes(xright, splitby="y", depth=depth+1, maxdepth=maxdepth) ## recombine (without fusing polygons again!) result <- owin(xr, yr, poly=c(xleft$bdry, xright$bdry), check=FALSE, strict=FALSE, fix=FALSE) }, y = { ysplit <- mean(range(p.i$y)) lower <- c(yr[1], ysplit) upper <- c(ysplit, yr[2]) xlower <- insect(x, owin(xr, lower)) xupper <- insect(x, owin(xr, upper)) ## recurse xlower <- break.holes(xlower, splitby="x", depth=depth+1, maxdepth=maxdepth) xupper <- break.holes(xupper, splitby="x", depth=depth+1, maxdepth=maxdepth) ## recombine (without fusing polygons again!) result <- owin(xr, yr, poly=c(xlower$bdry, xupper$bdry), check=FALSE, strict=FALSE, fix=FALSE) }) return(result) } break.holes }) spatstat/R/colourtables.R0000755000176200001440000004200313115271075015143 0ustar liggesusers# # colourtables.R # # support for colour maps and other lookup tables # # $Revision: 1.37 $ $Date: 2016/02/16 01:39:12 $ # colourmap <- function(col, ..., range=NULL, breaks=NULL, inputs=NULL) { if(nargs() == 0) { ## null colour map f <- lut() } else { ## validate colour data col2hex(col) ## store without conversion f <- lut(col, ..., range=range, breaks=breaks, inputs=inputs) } class(f) <- c("colourmap", class(f)) f } lut <- function(outputs, ..., range=NULL, breaks=NULL, inputs=NULL) { if(nargs() == 0) { ## null lookup table f <- function(x, what="value"){NULL} class(f) <- c("lut", class(f)) attr(f, "stuff") <- list(n=0) return(f) } n <- length(outputs) given <- c(!is.null(range), !is.null(breaks), !is.null(inputs)) names(given) <- c("range", "breaks", "inputs") ngiven <- sum(given) if(ngiven == 0) stop(paste("One of the arguments", sQuote("range"), ",", sQuote("breaks"), "or", sQuote("inputs"), "should be given")) if(ngiven > 1L) { offending <- names(breaks)[given] stop(paste("The arguments", commasep(sQuote(offending)), "are incompatible")) } if(!is.null(inputs)) { # discrete set of input values mapped to output values stopifnot(length(inputs) == length(outputs)) stuff <- list(n=n, discrete=TRUE, inputs=inputs, outputs=outputs) f <- function(x, what="value") { m <- match(x, stuff$inputs) if(what == "index") return(m) cout <- stuff$outputs[m] return(cout) } } else if(!is.null(range) && inherits(range, c("Date", "POSIXt"))) { # date/time interval mapped to colours timeclass <- if(inherits(range, "Date")) "Date" else "POSIXt" if(is.null(breaks)) { breaks <- seq(from=range[1L], to=range[2L], length.out=length(outputs)+1L) } else { if(!inherits(breaks, timeclass)) stop(paste("breaks should belong to class", dQuote(timeclass)), call.=FALSE) stopifnot(length(breaks) >= 2) stopifnot(length(breaks) == length(outputs) + 1L) if(!all(diff(breaks) > 0)) stop("breaks must be increasing") } stuff <- list(n=n, discrete=FALSE, breaks=breaks, outputs=outputs) f <- function(x, what="value") { x <- as.vector(as.numeric(x)) z <- findInterval(x, stuff$breaks, rightmost.closed=TRUE) if(what == "index") return(z) cout <- stuff$outputs[z] return(cout) } } else { # interval of real line mapped to colours if(is.null(breaks)) { breaks <- seq(from=range[1L], to=range[2L], length.out=length(outputs)+1L) } else { stopifnot(is.numeric(breaks) && length(breaks) >= 2L) stopifnot(length(breaks) == length(outputs) + 1L) if(!all(diff(breaks) > 0)) stop("breaks must be increasing") } stuff <- list(n=n, discrete=FALSE, breaks=breaks, outputs=outputs) f <- function(x, what="value") { stopifnot(is.numeric(x)) x <- as.vector(x) z <- findInterval(x, stuff$breaks, rightmost.closed=TRUE) if(what == "index") return(z) cout <- stuff$outputs[z] return(cout) } } attr(f, "stuff") <- stuff class(f) <- c("lut", class(f)) f } print.lut <- function(x, ...) { if(inherits(x, "colourmap")) { tablename <- "Colour map" outputname <- "colour" } else { tablename <- "Lookup table" outputname <- "output" } stuff <- attr(x, "stuff") n <- stuff$n if(n == 0) { ## Null map cat(paste("Null", tablename, "\n")) return(invisible(NULL)) } if(stuff$discrete) { cat(paste(tablename, "for discrete set of input values\n")) out <- data.frame(input=stuff$inputs, output=stuff$outputs) } else { b <- stuff$breaks cat(paste(tablename, "for the range", prange(b[c(1L,n+1L)]), "\n")) leftend <- rep("[", n) rightend <- c(rep(")", n-1), "]") inames <- paste(leftend, b[-(n+1L)], ", ", b[-1L], rightend, sep="") out <- data.frame(interval=inames, output=stuff$outputs) } colnames(out)[2L] <- outputname print(out) invisible(NULL) } print.colourmap <- function(x, ...) { NextMethod("print") } summary.lut <- function(object, ...) { s <- attr(object, "stuff") if(inherits(object, "colourmap")) { s$tablename <- "Colour map" s$outputname <- "colour" } else { s$tablename <- "Lookup table" s$outputname <- "output" } class(s) <- "summary.lut" return(s) } print.summary.lut <- function(x, ...) { n <- x$n if(n == 0) { cat(paste("Null", x$tablename, "\n")) return(invisible(NULL)) } if(x$discrete) { cat(paste(x$tablename, "for discrete set of input values\n")) out <- data.frame(input=x$inputs, output=x$outputs) } else { b <- x$breaks cat(paste(x$tablename, "for the range", prange(b[c(1L,n+1L)]), "\n")) leftend <- rep("[", n) rightend <- c(rep(")", n-1L), "]") inames <- paste(leftend, b[-(n+1L)], ", ", b[-1L], rightend, sep="") out <- data.frame(interval=inames, output=x$outputs) } colnames(out)[2L] <- x$outputname print(out) } plot.colourmap <- local({ # recognised additional arguments to image.default() and axis() imageparams <- c("main", "asp", "sub", "axes", "ann", "cex", "font", "cex.axis", "cex.lab", "cex.main", "cex.sub", "col.axis", "col.lab", "col.main", "col.sub", "font.axis", "font.lab", "font.main", "font.sub") axisparams <- c("cex", "cex.axis", "cex.lab", "col.axis", "col.lab", "font.axis", "font.lab", "las", "mgp", "xaxp", "yaxp", "tck", "tcl", "xpd") linmap <- function(x, from, to) { to[1L] + diff(to) * (x - from[1L])/diff(from) } # rules to determine the ribbon dimensions when one dimension is given widthrule <- function(heightrange, separate, n, gap) { if(separate) 1 else diff(heightrange)/10 } heightrule <- function(widthrange, separate, n, gap) { (if(separate) (n + (n-1)*gap) else 10) * diff(widthrange) } plot.colourmap <- function(x, ..., main, xlim=NULL, ylim=NULL, vertical=FALSE, axis=TRUE, labelmap=NULL, gap=0.25, add=FALSE) { if(missing(main)) main <- short.deparse(substitute(x)) stuff <- attr(x, "stuff") col <- stuff$outputs n <- stuff$n if(n == 0) { ## Null map return(invisible(NULL)) } discrete <- stuff$discrete if(discrete) { check.1.real(gap, "In plot.colourmap") explain.ifnot(gap >= 0, "In plot.colourmap") } separate <- discrete && (gap > 0) if(is.null(labelmap)) { labelmap <- function(x) x } else if(is.numeric(labelmap) && length(labelmap) == 1L && !discrete) { labscal <- labelmap labelmap <- function(x) { x * labscal } } else stopifnot(is.function(labelmap)) # determine pixel entries 'v' and colour map breakpoints 'bks' # to be passed to 'image.default' if(!discrete) { # real numbers: continuous ribbon bks <- stuff$breaks rr <- range(bks) v <- seq(from=rr[1L], to=rr[2L], length.out=max(n+1L, 1024)) } else if(!separate) { # discrete values: blocks of colour, run together v <- (1:n) - 0.5 bks <- 0:n rr <- c(0,n) } else { # discrete values: separate blocks of colour vleft <- (1+gap) * (0:(n-1L)) vright <- vleft + 1 v <- vleft + 0.5 rr <- c(0, n + (n-1)*gap) } # determine position of ribbon or blocks of colour if(is.null(xlim) && is.null(ylim)) { u <- widthrule(rr, separate, n, gap) if(!vertical) { xlim <- rr ylim <- c(0,u) } else { xlim <- c(0,u) ylim <- rr } } else if(is.null(ylim)) { if(!vertical) ylim <- c(0, widthrule(xlim, separate, n, gap)) else ylim <- c(0, heightrule(xlim, separate, n, gap)) } else if(is.null(xlim)) { if(!vertical) xlim <- c(0, heightrule(ylim, separate, n, gap)) else xlim <- c(0, widthrule(ylim, separate, n, gap)) } # .......... initialise plot ............................... if(!add) do.call.matched(plot.default, resolve.defaults(list(x=xlim, y=ylim, type="n", main=main, axes=FALSE, xlab="", ylab="", asp=1.0), list(...))) if(separate) { # ................ plot separate blocks of colour ................. if(!vertical) { # horizontal arrangement of blocks xleft <- linmap(vleft, rr, xlim) xright <- linmap(vright, rr, xlim) y <- ylim z <- matrix(1, 1L, 1L) for(i in 1:n) { x <- c(xleft[i], xright[i]) do.call.matched(image.default, resolve.defaults(list(x=x, y=y, z=z, add=TRUE), list(...), list(col=col[i])), extrargs=imageparams) } } else { # vertical arrangement of blocks x <- xlim ylow <- linmap(vleft, rr, ylim) yupp <- linmap(vright, rr, ylim) z <- matrix(1, 1L, 1L) for(i in 1:n) { y <- c(ylow[i], yupp[i]) do.call.matched(image.default, resolve.defaults(list(x=x, y=y, z=z, add=TRUE), list(...), list(col=col[i])), extrargs=imageparams) } } } else { # ................... plot ribbon image ............................. if(!vertical) { # horizontal colour ribbon x <- linmap(v, rr, xlim) y <- ylim z <- matrix(v, ncol=1L) } else { # vertical colour ribbon y <- linmap(v, rr, ylim) z <- matrix(v, nrow=1L) x <- xlim } do.call.matched(image.default, resolve.defaults(list(x=x, y=y, z=z, add=TRUE), list(...), list(breaks=bks, col=col)), extrargs=imageparams) } if(axis) { # ................. draw annotation .................. if(!vertical) { # add horizontal axis/annotation if(discrete) { la <- paste(labelmap(stuff$inputs)) at <- linmap(v, rr, xlim) } else { la <- prettyinside(rr) at <- linmap(la, rr, xlim) la <- labelmap(la) } # default axis position is below the ribbon (side=1) sidecode <- resolve.1.default("side", list(...), list(side=1L)) if(!(sidecode %in% c(1L,3L))) warning(paste("side =", sidecode, "is not consistent with horizontal orientation")) pos <- c(ylim[1L], xlim[1L], ylim[2L], xlim[2L])[sidecode] # don't draw axis lines if plotting separate blocks lwd0 <- if(separate) 0 else 1 # draw axis do.call.matched(graphics::axis, resolve.defaults(list(...), list(side = 1L, pos = pos, at = at), list(labels=la, lwd=lwd0)), extrargs=axisparams) } else { # add vertical axis if(discrete) { la <- paste(labelmap(stuff$inputs)) at <- linmap(v, rr, ylim) } else { la <- prettyinside(rr) at <- linmap(la, rr, ylim) la <- labelmap(la) } # default axis position is to the right of ribbon (side=4) sidecode <- resolve.1.default("side", list(...), list(side=4)) if(!(sidecode %in% c(2L,4L))) warning(paste("side =", sidecode, "is not consistent with vertical orientation")) pos <- c(ylim[1L], xlim[1L], ylim[2L], xlim[2L])[sidecode] # don't draw axis lines if plotting separate blocks lwd0 <- if(separate) 0 else 1 # draw labels horizontally if plotting separate blocks las0 <- if(separate) 1 else 0 # draw axis do.call.matched(graphics::axis, resolve.defaults(list(...), list(side=4, pos=pos, at=at), list(labels=la, lwd=lwd0, las=las0)), extrargs=axisparams) } } invisible(NULL) } plot.colourmap }) # Interpolate a colourmap or lookup table defined on real numbers interp.colourmap <- function(m, n=512) { if(!inherits(m, "colourmap")) stop("m should be a colourmap") st <- attr(m, "stuff") if(st$discrete) { # discrete set of input values mapped to colours xknots <- st$inputs # Ensure the inputs are real numbers if(!is.numeric(xknots)) stop("Cannot interpolate: inputs are not numerical values") } else { # interval of real line, chopped into intervals, mapped to colours # Find midpoints of intervals bks <- st$breaks nb <- length(bks) xknots <- (bks[-1L] + bks[-nb])/2 } # corresponding colours in hsv coordinates yknots.hsv <- rgb2hsva(col2rgb(st$outputs, alpha=TRUE)) # transform 'hue' from polar to cartesian coordinate # divide domain into n equal intervals xrange <- range(xknots) xbreaks <- seq(xrange[1L], xrange[2L], length=n+1L) xx <- (xbreaks[-1L] + xbreaks[-(n+1L)])/2 # interpolate saturation and value in hsv coordinates yy.sat <- approx(x=xknots, y=yknots.hsv["s", ], xout=xx)$y yy.val <- approx(x=xknots, y=yknots.hsv["v", ], xout=xx)$y # interpolate hue by first transforming polar to cartesian coordinate yknots.hue <- 2 * pi * yknots.hsv["h", ] yy.huex <- approx(x=xknots, y=cos(yknots.hue), xout=xx)$y yy.huey <- approx(x=xknots, y=sin(yknots.hue), xout=xx)$y yy.hue <- (atan2(yy.huey, yy.huex)/(2 * pi)) %% 1 # handle transparency yknots.alpha <- yknots.hsv["alpha", ] if(all(yknots.alpha == 1)) { ## opaque colours: form using hue, sat, val yy <- hsv(yy.hue, yy.sat, yy.val) } else { ## transparent colours: interpolate alpha yy.alpha <- approx(x=xknots, y=yknots.alpha, xout=xx)$y ## form colours using hue, sat, val, alpha yy <- hsv(yy.hue, yy.sat, yy.val, yy.alpha) } # done f <- colourmap(yy, breaks=xbreaks) return(f) } interp.colours <- function(x, length.out=512) { y <- colourmap(x, range=c(0,1)) z <- interp.colourmap(y, length.out) oo <- attr(z, "stuff")$outputs return(oo) } tweak.colourmap <- local({ is.hex <- function(z) { is.character(z) && all(nchar(z, keepNA=TRUE) %in% c(7L,9L)) && identical(substr(z, 1L, 7L), substr(col2hex(z), 1L, 7L)) } tweak.colourmap <- function(m, col, ..., inputs=NULL, range=NULL) { if(!inherits(m, "colourmap")) stop("m should be a colourmap") if(is.null(inputs) && is.null(range)) stop("Specify either inputs or range") if(!is.null(inputs) && !is.null(range)) stop("Do not specify both inputs and range") ## determine indices of colours to be changed if(!is.null(inputs)) { ix <- m(inputs, what="index") } else { if(!(is.numeric(range) && length(range) == 2 && diff(range) > 0)) stop("range should be a numeric vector of length 2 giving (min, max)") if(length(col2hex(col)) != 1L) stop("When range is given, col should be a single colour value") ixr <- m(range, what="index") ix <- (ixr[1L]):(ixr[2L]) } ## reassign colours st <- attr(m, "stuff") outputs <- st$outputs result.hex <- FALSE if(is.hex(outputs)) { ## convert replacement data to hex col <- col2hex(col) result.hex <- TRUE } else if(is.hex(col)) { ## convert existing data to hex outputs <- col2hex(outputs) result.hex <- TRUE } else if(!(is.character(outputs) && is.character(col))) { ## unrecognised format - convert both to hex outputs <- col2hex(outputs) col <- col2hex(col) result.hex <- TRUE } if(result.hex) { ## hex codes may be 7 or 9 characters outlen <- nchar(outputs) collen <- nchar(col) if(length(unique(c(outlen, collen))) > 1L) { ## convert all to 9 characters if(any(bad <- (outlen == 7))) outputs[bad] <- paste0(outputs[bad], "FF") if(any(bad <- (collen == 7))) col[bad] <- paste0(col[bad], "FF") } } ## Finally, replace outputs[ix] <- col st$outputs <- outputs attr(m, "stuff") <- st assign("stuff", st, envir=environment(m)) return(m) } tweak.colourmap }) colouroutputs <- function(x) { stopifnot(inherits(x, "colourmap")) attr(x, "stuff")$outputs } "colouroutputs<-" <- function(x, value) { stopifnot(inherits(x, "colourmap")) st <- attr(x, "stuff") col2hex(value) # validates colours st$outputs[] <- value attr(x, "stuff") <- st assign("stuff", st, envir=environment(x)) return(x) } spatstat/R/exactMPLEstrauss.R0000644000176200001440000000400313115225157015647 0ustar liggesusers# # exactMPLEstrauss.R # # 'exact' MPLE for stationary Strauss process # # $Revision: 1.6 $ $Date: 2014/11/10 07:39:41 $ # exactMPLEstrauss <- local({ # main function exactMPLEstrauss <- function(X, R, ngrid=2048, plotit=FALSE, project=TRUE) { # n <- npoints(X) W <- as.owin(X) # border correction WminR <- erosion(W, R) bR <- (bdist.points(X) >= R) nR <- sum(bR) # evaluate neighbour counts for data points Tcounts <- crosspaircounts(X, X, R) - 1L sumT <- sum(Tcounts[bR]) # determine the coefficients a_k for k = 0, 1, ... Z <- scanmeasure(X, R, dimyx=ngrid) Z <- Z[WminR, drop=FALSE] kcounts <- tabulate(as.vector(Z$v) + 1L) pixarea <- with(Z, xstep * ystep) A <- kcounts * pixarea # find optimal log(gamma) op <- optim(log(0.5), lpl, sco, method="L-BFGS-B", control=list(fnscale=-1), lower=-Inf, upper=if(project) 0 else Inf, A=A, sumT=sumT, nR=nR) loggamma <- op$par # plot? if(plotit) { x <- seq(log(1e-4), if(project) 0 else log(1e4), length=512) plot(x, lpl(x, A, sumT, nR), type="l", xlab=expression(log(gamma)), ylab=expression(log(PL(gamma)))) abline(v=loggamma, lty=3) } # derive optimal beta kmax <-length(A) - 1L polypart <- A %*% exp(outer(0:kmax, loggamma)) beta <- nR/polypart logbeta <- log(beta) result <- c(logbeta, loggamma) names(result) <- c("(Intercept)", "Interaction") return(result) } # helper functions (vectorised) # log pseudolikelihood lpl <- function(theta, A=A, sumT=sumT, nR=nR) { kmax <-length(A) - 1L polypart <- A %*% exp(outer(0:kmax, theta)) nR * (log(nR) - log(polypart) - 1) + theta * sumT } # pseudoscore sco <- function(theta, A=A, sumT=sumT, nR=nR) { kmax <- length(A) - 1L kseq <- 0:kmax mat <- exp(outer(kseq, theta)) polypart <- A %*% mat Dpolypart <- (A * kseq) %*% mat sumT - nR * Dpolypart/polypart } exactMPLEstrauss }) spatstat/R/daogenton.R0000644000176200001440000002235713115271075014432 0ustar liggesusers## ## daogenton.R ## ## Dao-Genton adjusted p-values ## ## $Revision: 1.14 $ $Date: 2017/06/05 10:31:58 $ ## bits.test <- function(X, ..., exponent=2, nsim=19, alternative=c("two.sided", "less", "greater"), leaveout=1, interpolate=FALSE, savefuns=FALSE, savepatterns=FALSE, verbose=TRUE) { twostage.test(X, ..., exponent=exponent, nsim=nsim, nsimsub=nsim, reuse=FALSE, alternative=match.arg(alternative), leaveout=leaveout, interpolate=interpolate, savefuns=savefuns, savepatterns=savepatterns, verbose=verbose, testblurb="Balanced Independent Two-stage Test") } dg.test <- function(X, ..., exponent=2, nsim=19, nsimsub=nsim-1, alternative=c("two.sided", "less", "greater"), reuse=TRUE, leaveout=1, interpolate=FALSE, savefuns=FALSE, savepatterns=FALSE, verbose=TRUE) { if(!missing(nsimsub) && !relatively.prime(nsim, nsimsub)) stop("nsim and nsimsub must be relatively prime") twostage.test(X, ..., exponent=exponent, nsim=nsim, nsimsub=nsimsub, reuse=reuse, alternative=match.arg(alternative), leaveout=leaveout, interpolate=interpolate, savefuns=savefuns, savepatterns=savepatterns, verbose=verbose, testblurb="Dao-Genton adjusted goodness-of-fit test") } twostage.test <- function(X, ..., exponent=2, nsim=19, nsimsub=nsim, alternative=c("two.sided", "less", "greater"), reuse=FALSE, leaveout=1, interpolate=FALSE, savefuns=FALSE, savepatterns=FALSE, verbose=TRUE, testblurb="Two-stage Monte Carlo test") { Xname <- short.deparse(substitute(X)) alternative <- match.arg(alternative) env.here <- sys.frame(sys.nframe()) Xismodel <- is.ppm(X) || is.kppm(X) || is.lppm(X) || is.slrm(X) # top-level test if(verbose) cat("Applying first-stage test to original data... ") tX <- envelopeTest(X, ..., nsim=nsim, alternative=alternative, leaveout=leaveout, interpolate=interpolate, exponent=exponent, savefuns=savefuns, savepatterns=savepatterns || reuse, verbose=FALSE, envir.simul=env.here) pX <- tX$p.value ## check special case afortiori <- !interpolate && (nsimsub < nsim) && (pX == (1/(nsim+1)) || pX == 1) if(afortiori) { ## result is determined padj <- pX pY <- NULL } else { ## result is not yet determined if(!reuse) { if(verbose) cat("Repeating first-stage test... ") tXX <- envelopeTest(X, ..., nsim=nsim, alternative=alternative, leaveout=leaveout, interpolate=interpolate, exponent=exponent, savefuns=savefuns, savepatterns=TRUE, verbose=FALSE, envir.simul=env.here) ## extract simulated patterns Ylist <- attr(attr(tXX, "envelope"), "simpatterns") } else { Ylist <- attr(attr(tX, "envelope"), "simpatterns") } if(verbose) cat("Done.\n") ## apply same test to each simulated pattern if(verbose) cat(paste("Running second-stage tests on", nsim, "simulated patterns... ")) pY <- numeric(nsim) for(i in 1:nsim) { if(verbose) progressreport(i, nsim) Yi <- Ylist[[i]] ## if X is a model, fit it to Yi. Otherwise the implicit model is CSR. if(Xismodel) Yi <- update(X, Yi) tYi <- envelopeTest(Yi, ..., nsim=nsimsub, alternative=alternative, leaveout=leaveout, interpolate=interpolate, exponent=exponent, savepatterns=TRUE, verbose=FALSE, envir.simul=env.here) pY[i] <- tYi$p.value } pY <- sort(pY) ## compute adjusted p-value padj <- (1 + sum(pY <= pX))/(1+nsim) } # pack up method <- tX$method method <- c(testblurb, paste("based on", method[1L]), paste("First stage:", method[2L]), method[-(1:2)], if(afortiori) { paren(paste("Second stage was omitted: p0 =", pX, "implies p-value =", padj)) } else if(reuse) { paste("Second stage: nested, ", nsimsub, "simulations for each first-stage simulation") } else { paste("Second stage:", nsim, "*", nsimsub, "nested simulations independent of first stage") } ) names(pX) <- "p0" result <- structure(list(statistic = pX, p.value = padj, method = method, data.name = Xname), class="htest") attr(result, "rinterval") <- attr(tX, "rinterval") attr(result, "pX") <- pX attr(result, "pY") <- pY if(savefuns || savepatterns) result <- hasenvelope(result, attr(tX, "envelope")) return(result) } dg.envelope <- function(X, ..., nsim=19, nsimsub=nsim-1, nrank=1, alternative=c("two.sided", "less", "greater"), leaveout=1, interpolate = FALSE, savefuns=FALSE, savepatterns=FALSE, verbose=TRUE) { # Xname <- short.deparse(substitute(X)) alternative <- match.arg(alternative) env.here <- sys.frame(sys.nframe()) Xismodel <- is.ppm(X) || is.kppm(X) || is.lppm(X) || is.slrm(X) # top-level test if(verbose) cat("Applying test to original data... ") tX <- envelopeTest(X, ..., alternative=alternative, leaveout=leaveout, interpolate = interpolate, nsim=nsim, nrank=nrank, exponent=Inf, savepatterns=TRUE, savefuns=TRUE, verbose=FALSE, envir.simul=env.here) if(verbose) cat("Done.\n") ## extract info envX <- attr(tX, "envelope") ## extract simulated patterns Ylist <- attr(envX, "simpatterns") ## SimFuns <- attr(envX, "simfuns") # apply same test to each simulated pattern if(verbose) cat(paste("Running tests on", nsim, "simulated patterns... ")) pvalY <- numeric(nsim) for(i in 1:nsim) { if(verbose) progressreport(i, nsim) Yi <- Ylist[[i]] # if X is a model, fit it to Yi. Otherwise the implicit model is CSR. if(Xismodel) Yi <- update(X, Yi) tYi <- envelopeTest(Yi, ..., alternative=alternative, leaveout=leaveout, interpolate = interpolate, save.interpolant = FALSE, nsim=nsimsub, nrank=nrank, exponent=Inf, savepatterns=TRUE, verbose=FALSE, envir.simul=env.here) pvalY[i] <- tYi$p.value } ## Find critical deviation if(!interpolate) { ## find critical rank 'l' rankY <- pvalY * (nsimsub + 1) dg.rank <- sort(rankY, na.last=TRUE)[nrank] if(verbose) cat("dg.rank=", dg.rank, fill=TRUE) ## extract deviation values from top-level simulation simdev <- attr(tX, "statistics")[["sim"]] ## find critical deviation dg.crit <- sort(simdev, decreasing=TRUE, na.last=TRUE)[dg.rank] if(verbose) cat("dg.crit=", dg.crit, fill=TRUE) } else { ## compute estimated cdf of t fhat <- attr(tX, "density")[c("x", "y")] fhat$z <- with(fhat, cumsum(y)/sum(y)) # 'within' upsets package checker ## find critical (second stage) p-value pcrit <- sort(pvalY, na.last=TRUE)[nrank] ## compute corresponding upper quantile of estimated density of t dg.crit <- with(fhat, { min(x[z >= 1 - pcrit]) }) } ## make fv object, for now refname <- if("theo" %in% names(envX)) "theo" else "mmean" fname <- attr(envX, "fname") result <- (as.fv(envX))[, c(fvnames(envX, ".x"), fvnames(envX, ".y"), refname)] refval <- envX[[refname]] ## newdata <- data.frame(hi=refval + dg.crit, lo=refval - dg.crit) newlabl <- c(makefvlabel(NULL, NULL, fname, "hi"), makefvlabel(NULL, NULL, fname, "lo")) alpha <- nrank/(nsim+1) alphatext <- paste0(100*alpha, "%%") newdesc <- c(paste("upper", alphatext, "critical boundary for %s"), paste("lower", alphatext, "critical boundary for %s")) switch(alternative, two.sided = { }, less = { newdata$hi <- Inf newlabl[1L] <- "infinity" newdesc[1L] <- "infinite upper limit" }, greater = { newdata$lo <- -Inf newlabl[2L] <- "infinity" newdesc[2L] <- "infinite lower limit" }) result <- bind.fv(result, newdata, newlabl, newdesc) fvnames(result, ".") <- rev(fvnames(result, ".")) fvnames(result, ".s") <- c("lo", "hi") if(savefuns || savepatterns) result <- hasenvelope(result, envX) return(result) } spatstat/R/summary.im.R0000755000176200001440000000757113115271120014550 0ustar liggesusers# # summary.im.R # # summary() method for class "im" # # $Revision: 1.21 $ $Date: 2016/09/01 02:31:52 $ # # summary.im() # print.summary.im() # print.im() # summary.im <- function(object, ...) { verifyclass(object, "im") x <- object y <- unclass(x)[c("dim", "xstep", "ystep")] pixelarea <- y$xstep * y$ystep # extract image values v <- x$v inside <- !is.na(v) v <- v[inside] # type of values? y$type <- x$type # factor-valued? lev <- levels(x) if(!is.null(lev) && !is.factor(v)) v <- factor(v, levels=seq_along(lev), labels=lev) switch(x$type, integer=, real={ y$mean <- mv <- mean(v) y$integral <- mv * length(v) * pixelarea y$range <- ra <- range(v) y$min <- ra[1] y$max <- ra[2] }, factor={ y$levels <- lev y$table <- table(v, dnn="") }, complex={ y$mean <- mv <- mean(v) y$integral <- mv * length(v) * pixelarea rr <- range(Re(v)) y$Re <- list(range=rr, min=rr[1], max=rr[2]) ri <- range(Im(v)) y$Im <- list(range=ri, min=ri[1], max=ri[2]) }, { # another unknown type pixelvalues <- v y$summary <- summary(pixelvalues) }) # summarise pixel raster win <- as.owin(x) y$window <- summary.owin(win) y$fullgrid <- (rescue.rectangle(win)$type == "rectangle") y$units <- unitname(x) class(y) <- "summary.im" return(y) } print.summary.im <- function(x, ...) { verifyclass(x, "summary.im") splat(paste0(x$type, "-valued"), "pixel image") unitinfo <- summary(x$units) pluralunits <- unitinfo$plural sigdig <- getOption('digits') di <- x$dim win <- x$window splat(di[1], "x", di[2], "pixel array (ny, nx)") splat("enclosing rectangle:", prange(signif(x$window$xrange, sigdig)), "x", prange(signif(x$window$yrange, sigdig)), unitinfo$plural, unitinfo$explain) splat("dimensions of each pixel:", signif(x$xstep, 3), "x", signif(x$ystep, sigdig), pluralunits) if(!is.null(explain <- unitinfo$explain)) splat(explain) fullgrid <- x$fullgrid if(fullgrid) { splat("Image is defined on the full rectangular grid") whatpart <- "Frame" } else { splat("Image is defined on a subset of the rectangular grid") whatpart <- "Subset" } splat(whatpart, "area =", win$area, "square", pluralunits) if(!fullgrid) { af <- signif(win$areafraction, min(3, sigdig)) splat(whatpart, "area fraction =", af) } if(fullgrid) splat("Pixel values") else splat("Pixel values (inside window):") switch(x$type, integer=, real={ splat("\trange =", prange(signif(x$range, sigdig))) splat("\tintegral =", signif(x$integral, sigdig)) splat("\tmean =", signif(x$mean, sigdig)) }, factor={ print(x$table) }, complex={ splat("\trange: Real", prange(signif(x$Re$range, sigdig)), "Imaginary", prange(signif(x$Im$range, sigdig))) splat("\tintegral =", signif(x$integral, sigdig)) splat("\tmean =", signif(x$mean, sigdig)) }, { print(x$summary) }) return(invisible(NULL)) } print.im <- function(x, ...) { splat(paste0(x$type, "-valued"), "pixel image") if(x$type == "factor") { splat("factor levels:") print(levels(x)) } sigdig <- min(5, getOption('digits')) unitinfo <- summary(unitname(x)) di <- x$dim splat(di[1], "x", di[2], "pixel array (ny, nx)") splat("enclosing rectangle:", prange(signif(zapsmall(x$xrange), sigdig)), "x", prange(signif(zapsmall(x$yrange), sigdig)), unitinfo$plural, unitinfo$explain) return(invisible(NULL)) } spatstat/R/persp.im.R0000644000176200001440000002511413115225157014203 0ustar liggesusers## ## persp.im.R ## ## 'persp' method for image objects ## plus annotation ## ## $Revision: 1.20 $ $Date: 2016/09/01 05:49:42 $ ## persp.im <- local({ persp.im <- function(x, ..., colmap=NULL, colin=x, apron=FALSE, visible=FALSE) { xname <- deparse(substitute(x)) xinfo <- summary(x) if(xinfo$type == "factor") stop("Perspective plot is inappropriate for factor-valued image") ## check whether 'col' was specified when 'colmap' was intended Col <- list(...)$col if(is.null(colmap) && !is.null(Col) && !is.matrix(Col) && length(Col) != 1) warning("Argument col is not a matrix. Did you mean colmap?") if(!missing(colin)) { ## separate image to determine colours verifyclass(colin, "im") if(!compatible(colin, x)) { ## resample 'colin' onto grid of 'x' colin <- as.im(colin, W=x) } if(is.null(colmap)) colmap <- spatstat.options("image.colfun")(128) } pop <- spatstat.options("par.persp") ## if(is.function(colmap) && !inherits(colmap, "colourmap")) { ## coerce to a 'colourmap' if possible clim <- range(colin, finite=TRUE) if(names(formals(colmap))[1] == "n") { colval <- colmap(128) colmap <- colourmap(colval, range=clim) } else { ## colour map determined by a rule (e.g. 'beachcolours') colmap <- invokeColourmapRule(colmap, colin, zlim=clim, colargs=list(...)) if(is.null(colmap)) stop("Unrecognised syntax for colour function") } } ## colour map? if(is.null(colmap)) { colinfo <- list(col=NULL) } else if(inherits(colmap, "colourmap")) { ## colour map object ## apply colour function to image data colval <- eval.im(colmap(colin)) colval <- t(as.matrix(colval)) ## strip one row and column for input to persp.default colval <- colval[-1, -1] ## replace NA by arbitrary value isna <- is.na(colval) if(any(isna)) { stuff <- attr(colmap, "stuff") colvalues <- stuff$outputs colval[isna] <- colvalues[1] } ## pass colour matrix (and suppress lines) colinfo <- list(col=colval, border=NA) } else { ## interpret 'colmap' as colour map if(is.list(colmap) && all(c("breaks", "col") %in% names(colmap))) { breaks <- colmap$breaks colvalues <- colmap$col } else if(is.vector(colmap)) { colvalues <- colmap breaks <- quantile(colin, seq(from=0,to=1,length.out=length(colvalues)+1)) if(!all(ok <- !duplicated(breaks))) { breaks <- breaks[ok] colvalues <- colvalues[ok[-1]] } } else warning("Unrecognised format for colour map") ## apply colour map to image values colid <- cut.im(colin, breaks=breaks, include.lowest=TRUE) colval <- eval.im(colvalues[unclass(colid)]) colval <- t(as.matrix(colval)) # nr <- nrow(colval) # nc <- ncol(colval) ## strip one row and column for input to persp.default colval <- colval[-1, -1] colval[is.na(colval)] <- colvalues[1] ## pass colour matrix (and suppress lines) colinfo <- list(col=colval, border=NA) } if(apron) { ## add an 'apron' zlim <- list(...)$zlim bottom <- if(!is.null(zlim)) zlim[1] else min(x) x <- na.handle.im(x, na.replace=bottom) x <- padimage(x, bottom) xinfo <- summary(x) if(is.matrix(colval <- colinfo$col)) { colval <- matrix(col2hex(colval), nrow(colval), ncol(colval)) grijs <- col2hex("lightgrey") colval <- cbind(grijs, rbind(grijs, colval, grijs), grijs) colinfo$col <- colval } } if(spatstat.options("monochrome")) colinfo$col <- to.grey(colinfo$col) ## get reasonable z scale while fixing x:y aspect ratio if(xinfo$type %in% c("integer", "real")) { zrange <- xinfo$range if(diff(zrange) > 0) { xbox <- as.rectangle(x) zscale <- 0.5 * mean(diff(xbox$xrange), diff(xbox$yrange))/diff(zrange) zlim <- zrange } else { zscale <- NULL mx <- xinfo$mean zlim <- mx + c(-1,1) * if(mx == 0) 0.1 else min(abs(mx), 1) } } else zscale <- zlim <- NULL dotargs <- list(...) if(spatstat.options("monochrome")) dotargs <- col.args.to.grey(dotargs) yargh <- resolve.defaults(list(x=x$xcol, y=x$yrow, z=t(x$v)), dotargs, pop, colinfo, list(xlab="x", ylab="y", zlab=xname), list(scale=FALSE, expand=zscale, zlim=zlim), list(main=xname), .StripNull=TRUE) jawab <- do.call.matched(persp, yargh, funargs=graphicsPars("persp")) attr(jawab, "expand") <- yargh$expand if(visible) attr(jawab, "visible") <- perspvis(x, M=jawab) return(invisible(jawab)) } diffit <- function(x) { y <- diff(x) return(c(y[1], y)) } perspvis <- function(X, ..., M=NULL) { stopifnot(is.im(X)) ## determine perspective matrix if(is.null(M)) M <- persp(X, ...) ## project the coordinates ## onto (x,y) plane of plot and z axis pointing out of it xy <- rasterxy.im(X, drop=TRUE) z <- X[drop=TRUE] xyz <- cbind(xy, z) v <- cbind(xyz, 1) %*% M pxyz <- v[,1:3]/v[,4] px <- pxyz[,1] py <- pxyz[,2] pz <- pxyz[,3] ## determine greatest possible difference in 'depth' in one pixel step PZ <- as.matrix(X) ok <- !is.na(PZ) PZ[ok] <- pz maxslip <- max(0, abs(apply(PZ, 1, diff)), abs(apply(PZ, 2, diff)), na.rm=TRUE) ## determine which pixels are in front d <- ceiling(dim(X)/2) jx <- cut(px, breaks=d[2]) iy <- cut(py, breaks=d[1]) zmax <- tapply(pz, list(iy,jx), max) isvis <- infront <- (pz > zmax[cbind(iy,jx)] - 2 * maxslip) ## if(TRUE) { ## Additionally check whether unit normal to surface is pointing to viewer Xmat <- as.matrix(X) dzdx <- cbind(0, t(apply(Xmat, 1, diff)))/X$xstep dzdy <- rbind(0, apply(Xmat, 2, diff))/X$ystep dzdx <- as.vector(dzdx[ok]) dzdy <- as.vector(dzdy[ok]) ## unscaled normal is (-dzdx, -dzdy, 1) if(FALSE) { ## THIS DOESN'T WORK - not sure why. ## rescale so that length is half diameter of pixel fac <- sqrt(X$xstep^2 + X$ystep^2)/(2 * sqrt(dzdx^2+dzdy^2+1)) ## add to spatial coordinates xyzplus <- xyz + fac * cbind(-dzdx, -dzdy, 1) ## transform vplus <- cbind(xyzplus, 1) %*% M pplus <- vplus[,1:3]/vplus[,4] ## determine whether normal is pointing toward viewer deltaz <- pplus[,3] - pz isvis <- infront & (deltaz > 0) } else { theta <- atan2(M[2,1],M[1,1]) + pi/2 phi <- - atan2(M[3,3], M[3,2]) ## check agreement ## cat(paste("Guess: theta=", theta * 180/pi, "\n")) ## cat(paste("Guess: phi=", phi * 180/pi, "\n")) ## view vector viewer <- cos(phi) * c(cos(theta), sin(theta), 0) + c(0, 0, sin(phi)) ## inner product dotprod <- -dzdx * viewer[1] - dzdy * viewer[2] + viewer[3] isvis <- infront & (dotprod < 0) } } ## put into image Y <- eval.im(X > 0) Y[] <- isvis ## replace 'NA' by 'FALSE' if(anyNA(Y)) Y <- as.im(Y, na.replace=FALSE) return(Y) } persp.im }) perspPoints <- function(x, y=NULL, ..., Z, M) { xy <- xy.coords(x, y) stopifnot(is.im(Z)) X <- as.ppp(xy, W=Frame(Z)) if(!(is.matrix(M) && all(dim(M) == 4))) stop("M should be a 4 x 4 matrix, returned from persp()") V <- attr(M, "visible") if(is.null(V)) { warning(paste("M does not contain visibility information;", "it should be recomputed by persp() with visible=TRUE")) } else { ## restrict to visible points VX <- V[X, drop=FALSE] VX[is.na(VX)] <- FALSE X <- X[VX] } #' determine heights ZX <- Z[X, drop=FALSE] # may contain NA #' transform and plot points(trans3d(X$x, X$y, ZX, M), ...) } perspSegments <- local({ perspSegments <- function(x0, y0=NULL, x1=NULL, y1=NULL, ..., Z, M) { stopifnot(is.im(Z)) if(!(is.matrix(M) && all(dim(M) == 4))) stop("M should be a 4 x 4 matrix, returned from persp()") V <- attr(M, "visible") if(is.null(V)) warning(paste("M does not contain visibility information;", "it should be recomputed by persp() with visible=TRUE")) if(is.psp(X <- x0) && is.null(y0) && is.null(x1) && is.null(y1)) { eX <- X$ends # nX <- nrow(eX) } else { # nX <- length(x0) check.nvector(x0, naok=TRUE) check.nvector(y0, naok=TRUE) check.nvector(x1, naok=TRUE) check.nvector(y1, naok=TRUE) eX <- cbind(x0, y0, x1, y1) } if(is.null(V)) { Y <- eX } else { ## chop segments to length of single pixel eps <- with(Z, min(xstep,ystep)) Y <- do.call(rbind, lapply(as.data.frame(t(eX)), chopsegment, eps=eps)) ## determine which segments are visible yleft <- list(x=Y[,1], y=Y[,2]) yright <- list(x=Y[,3], y=Y[,4]) ok <- V[yleft, drop=FALSE] & V[yright, drop=FALSE] ok[is.na(ok)] <- FALSE Y <- Y[ok, ,drop=FALSE] } if(nrow(Y) == 0) return(invisible(NULL)) ## map to projected plane x0y0 <- trans3d(Y[,1], Y[,2], Z[list(x=Y[,1],y=Y[,2]), drop=FALSE], M) x1y1 <- trans3d(Y[,3], Y[,4], Z[list(x=Y[,3],y=Y[,4]), drop=FALSE], M) segments(x0y0$x, x0y0$y, x1y1$x, x1y1$y, ...) } chopsegment <- function(x, eps) { len2 <- (x[3] - x[1])^2 + (x[4] - x[2])^2 if(len2 <= eps^2) return(x) n <- ceiling(sqrt(len2)/eps) b <- (1:n)/n a <- (0:(n-1))/n return(cbind(x[1] + a * (x[3]-x[1]), x[2] + a * (x[4]-x[2]), x[1] + b * (x[3]-x[1]), x[2] + b * (x[4]-x[2]))) } perspSegments }) perspLines <- function(x, y=NULL, ..., Z, M) { xy <- xy.coords(x, y) n <- length(xy$x) perspSegments(x[-n], y[-n], x[-1], y[-1], Z=Z, M=M, ...) } perspContour <- function(Z, M, ..., nlevels=10, levels=pretty(range(Z), nlevels)) { cl <- contourLines(x=Z$xcol, y=Z$yrow, z=t(Z$v), nlevels=nlevels, levels=levels) for(i in seq_along(cl)) { cli <- cl[[i]] perspLines(cli$x, cli$y, ..., Z=Z, M=M) } invisible(NULL) } spatstat/R/morisita.R0000755000176200001440000000237113115271120014267 0ustar liggesusers# # morisita.R # # $Revision: 1.2 $ $Date: 2016/02/11 10:17:12 $ # miplot <- function(X, ...) { Xname <- short.deparse(substitute(X)) X <- as.ppp(X) W <- X$window N <- X$n if(W$type != "rectangle") stop("Window of X is not a rectangle - Morisita index undefined") a <- min(diff(W$xrange), diff(W$yrange)) maxnquad <- floor(a/mean(nndist(X))) if(maxnquad <= 1) stop("Not enough points for a Morisita plot") mindex <- numeric(maxnquad) for(nquad in 1:maxnquad) { qq <- quadratcount(X, nquad, nquad) tt <- as.vector(as.table(qq)) mindex[nquad] <- length(tt) * sum(tt * (tt-1))/(N*(N-1)) } quadsize <- diameter(W)/(1:maxnquad) ok <- (quadsize <= a) quadsize <- quadsize[ok] mindex <- mindex[ok] unitinfo <- summary(unitname(W))$axis do.call(plot.default, resolve.defaults(list(quadsize, mindex), list(...), list(xlim=c(0,max(quadsize)), ylim=c(0,max(1, mindex)), xlab=paste("Diameter of quadrat", unitinfo), ylab="Morisita index", main=paste("Morisita plot for", Xname)))) abline(h=1, lty=2) return(invisible(NULL)) } spatstat/R/hackglmm.R0000755000176200001440000000656413115271120014233 0ustar liggesusers# hackglmm.R # $Revision: 1.5 $ $Date: 2017/02/07 07:35:32 $ hackglmmPQL <- function (fixed, random, family, data, correlation, weights, control, niter = 10, verbose = TRUE, subset, ..., reltol=1e-3) { if (is.character(family)) family <- get(family) if (is.function(family)) family <- family() if (is.null(family$family)) { print(family) stop("'family' not recognized") } m <- mcall <- Call <- match.call() nm <- names(m)[-1L] keep <- is.element(nm, c("weights", "data", "subset", "na.action")) for (i in nm[!keep]) m[[i]] <- NULL allvars <- if (is.list(random)) allvars <- c(all.vars(fixed), names(random), unlist(lapply(random, function(x) all.vars(formula(x))))) else c(all.vars(fixed), all.vars(random)) Terms <- if (missing(data)) terms(fixed) else terms(fixed, data = data) off <- attr(Terms, "offset") if (length(off <- attr(Terms, "offset"))) allvars <- c(allvars, as.character(attr(Terms, "variables"))[off + 1]) Call$fixed <- eval(fixed) Call$random <- eval(random) m$formula <- as.formula(paste("~", paste(allvars, collapse = "+"))) environment(m$formula) <- environment(fixed) m$drop.unused.levels <- TRUE m[[1L]] <- as.name("model.frame") mf <- eval.parent(m) off <- model.offset(mf) if (is.null(off)) off <- 0 w <- model.weights(mf) if (is.null(w)) w <- rep(1, nrow(mf)) wts <- mf$wts <- w if(missing(subset)) fit0 <- glm(formula = fixed, family = family, data = mf, weights = wts, ...) else { # hack to get around peculiar problem with `subset' argument glmmsubset <- eval(expression(subset), data) if(length(glmmsubset) != nrow(mf)) { if(sum(glmmsubset) != nrow(mf)) stop("Internal error: subset vector is wrong length") message("(Fixing subset index..)") glmmsubset <- glmmsubset[glmmsubset] } mf$glmmsubset <- glmmsubset fit0 <- glm(formula = fixed, family = family, data = mf, weights = wts, subset=glmmsubset, ...) } w <- fit0$prior.weights eta <- fit0$linear.predictor zz <- eta + fit0$residuals - off wz <- fit0$weights fam <- family nm <- names(mcall)[-1L] keep <- is.element(nm, c("fixed", "random", "data", "subset", "na.action", "control")) for (i in nm[!keep]) mcall[[i]] <- NULL fixed[[2L]] <- quote(zz) mcall[["fixed"]] <- fixed mcall[[1L]] <- as.name("lme") mcall$random <- random mcall$method <- "ML" if (!missing(correlation)) mcall$correlation <- correlation mcall$weights <- quote(varFixed(~invwt)) mf$zz <- zz mf$invwt <- 1/wz mcall$data <- mf for (i in 1:niter) { if (verbose) cat("iteration", i, "\n") fit <- eval(mcall) etaold <- eta eta <- fitted(fit) + off if (sum((eta - etaold)^2) < (reltol^2) * sum(eta^2)) break mu <- fam$linkinv(eta) mu.eta.val <- fam$mu.eta(eta) mf$zz <- eta + (fit0$y - mu)/mu.eta.val - off wz <- w * mu.eta.val^2/fam$variance(mu) mf$invwt <- 1/wz mcall$data <- mf } attributes(fit$logLik) <- NULL fit$call <- Call fit$family <- family fit$logLik <- as.numeric(NA) oldClass(fit) <- c("glmmPQL", oldClass(fit)) fit } spatstat/R/rmh.R0000755000176200001440000000010713115271120013221 0ustar liggesusers# # generic rmh rmh <- function(model, ...){ UseMethod("rmh") } spatstat/R/Math.imlist.R0000644000176200001440000000156613144467431014651 0ustar liggesusers## ## Math.imlist.R ## ## $Revision: 1.4 $ $Date: 2017/08/15 03:46:57 $ ## Math.imlist <- function(x, ...){ solapply(x, .Generic, ...) } Complex.imlist <- function(z){ solapply(z, .Generic) } Summary.imlist <- function(..., na.rm=TRUE){ argh <- expandSpecialLists(list(...)) if(length(names(argh)) > 0) { isim <- sapply(argh, is.im) names(argh)[isim] <- "" } do.call(.Generic, c(argh, list(na.rm=na.rm))) } Ops.imlist <- function(e1,e2=NULL){ if(nargs() == 1L) { #' unary operation return(solapply(e1, .Generic)) } #' binary operation if(inherits(e2, "imlist")) { #' two image lists - must have equal length v <- mapply(.Generic, unname(e1), unname(e2), SIMPLIFY=FALSE) names(v) <- names(e1) return(as.solist(v)) } #' other binary operation e.g. imlist + constant, imlist + im return(solapply(e1, .Generic, e2=e2)) } spatstat/R/weights.R0000755000176200001440000002344113115271120014113 0ustar liggesusers# # weights.S # # Utilities for computing quadrature weights # # $Revision: 4.39 $ $Date: 2017/01/18 06:28:17 $ # # # Main functions: # gridweights() Divide the window frame into a regular nx * ny # grid of rectangular tiles. Given an arbitrary # pattern of (data + dummy) points derive the # 'counting weights'. # # dirichletWeights() Compute the areas of the tiles of the # Dirichlet tessellation generated by the # given pattern of (data+dummy) points, # restricted to the window. # # Auxiliary functions: # # countingweights() compute the counting weights # for a GENERIC tiling scheme and an arbitrary # pattern of (data + dummy) points, # given the tile areas and the information # that point number k belongs to tile number id[k]. # # # gridindex() Divide the window frame into a regular nx * ny # grid of rectangular tiles. # Compute tile membership for arbitrary x,y. # # grid1index() 1-dimensional analogue of gridindex() # # #------------------------------------------------------------------- countingweights <- function(id, areas, check=TRUE) { # # id: cell indices of n points # (length n, values in 1:k) # # areas: areas of k cells # (length k) # id <- as.integer(id) fid <- factor(id, levels=seq_along(areas)) counts <- table(fid) w <- areas[id] / counts[id] # ensures denominator > 0 w <- as.vector(w) # # that's it; but check for funny business # if(check) { zerocount <- (counts == 0) zeroarea <- (areas == 0) if(any(!zeroarea & zerocount)) { lostfrac <- 1 - sum(w)/sum(areas) lostpc <- round(100 * lostfrac, 1) if(lostpc >= 1) warning(paste("some tiles with positive area", "do not contain any quadrature points:", "relative error =", paste0(lostpc, "%"))) } if(any(!zerocount & zeroarea)) { warning("Some tiles with zero area contain quadrature points") warning("Some weights are zero") attr(w, "zeroes") <- zeroarea[id] } } # names(w) <- NULL return(w) } gridindex <- function(x, y, xrange, yrange, nx, ny) { # # The box with dimensions xrange, yrange is divided # into nx * ny cells. # # For each point (x[i], y[i]) compute the index (ix, iy) # of the cell containing the point. # ix <- grid1index(x, xrange, nx) iy <- grid1index(y, yrange, ny) # return(list(ix=ix, iy=iy, index=as.integer((iy-1) * nx + ix))) } grid1index <- function(x, xrange, nx) { i <- ceiling( nx * (x - xrange[1])/diff(xrange)) i <- pmax.int(1, i) i <- pmin.int(i, nx) i } gridweights <- function(X, ntile=NULL, ..., window=NULL, verbose=FALSE, npix=NULL, areas=NULL) { # # Compute counting weights based on a regular tessellation of the # window frame into ntile[1] * ntile[2] rectangular tiles. # # Arguments X and (optionally) 'window' are interpreted as a # point pattern. # # The window frame is divided into a regular ntile[1] * ntile[2] grid # of rectangular tiles. The counting weights based on this tessellation # are computed for the points (x, y) of the pattern. # # 'npix' determines the dimensions of the pixel raster used to # approximate tile areas. X <- as.ppp(X, window) x <- X$x y <- X$y win <- X$window # determine number of tiles if(is.null(ntile)) ntile <- default.ntile(X) if(length(ntile) == 1) ntile <- rep.int(ntile, 2) nx <- ntile[1] ny <- ntile[2] if(verbose) cat(paste("grid weights for a", nx, "x", ny, "grid of tiles\n")) ## determine pixel resolution in case it is required if(!is.null(npix)) { npix <- ensure2vector(npix) } else { npix <- pmax(rev(spatstat.options("npixel")), c(nx, ny)) if(is.mask(win)) npix <- pmax(npix, rev(dim(win))) } if(is.null(areas)) { # compute tile areas switch(win$type, rectangle = { nxy <- nx * ny tilearea <- area(win)/nxy areas <- rep.int(tilearea, nxy) zeroareas <- rep(FALSE, nxy) }, polygonal = { areamat <- polytileareaEngine(win, win$xrange, win$yrange, nx, ny) ## convert from 'im' to 'gridindex' ordering areas <- as.vector(t(areamat)) zeroareas <- (areas == 0) if(verbose) splat("Split polygonal window of area", area(win), "into", nx, "x", ny, "grid of tiles", "of total area", sum(areas)) }, mask = { win <- as.mask(win, dimyx=rev(npix)) if(verbose) splat("Converting mask dimensions to", npix[1], "x", npix[2], "pixels") ## extract pixel coordinates inside window rxy <- rasterxy.mask(win, drop=TRUE) xx <- rxy$x yy <- rxy$y ## classify all pixels into tiles pixelid <- gridindex(xx, yy, win$xrange, win$yrange, nx, ny)$index pixelid <- factor(pixelid, levels=seq_len(nx * ny)) ## compute digital areas of tiles tilepixels <- as.vector(table(pixelid)) pixelarea <- win$xstep * win$ystep areas <- tilepixels * pixelarea zeroareas <- (tilepixels == 0) } ) } else zeroareas <- (areas == 0) id <- gridindex(x, y, win$xrange, win$yrange, nx, ny)$index if(win$type != "rectangle" && any(uhoh <- zeroareas[id])) { # this can happen: the tile has digital area zero # but contains a data/dummy point slivers <- unique(id[uhoh]) switch(win$type, mask = { offence <- "digital area zero" epsa <- pixelarea/2 }, polygonal = { offence <- "very small area" epsa <- min(areas[!zeroareas])/10 }) areas[slivers] <- epsa nsliver <- length(slivers) extraarea <- nsliver * epsa extrafrac <- extraarea/area(win) if(verbose || extrafrac > 0.01) { splat(nsliver, ngettext(nsliver, "tile", "tiles"), "of", offence, ngettext(nsliver, "was", "were"), "given nominal area", signif(epsa, 3), "increasing the total area by", signif(extraarea, 5), "square units or", paste0(round(100 * extrafrac, 1), "% of total area")) if(extrafrac > 0.01) warning(paste("Repairing tiles with", offence, "caused a", paste0(round(100 * extrafrac), "%"), "change in total area"), call.=FALSE) } } # compute counting weights w <- countingweights(id, areas) # attach information about weight construction parameters attr(w, "weight.parameters") <- list(method="grid", ntile=ntile, npix=npix, areas=areas) return(w) } dirichlet.weights <- function(...) { .Deprecated("dirichletWeights", package="spatstat") dirichletWeights(...) } dirichletWeights <- function(X, window = NULL, exact=TRUE, ...) { #' #' Compute weights based on Dirichlet tessellation of the window #' induced by the point pattern X. #' The weights are just the tile areas. #' #' NOTE: X should contain both data and dummy points, #' if you need these weights for the B-T-B method. #' #' Arguments X and (optionally) 'window' are interpreted as a #' point pattern. #' #' If the window is a rectangle, we invoke Rolf Turner's "deldir" #' package to compute the areas of the tiles of the Dirichlet #' tessellation of the window frame induced by the points. #' [NOTE: the functionality of deldir to create dummy points #' is NOT used. ] #' if exact=TRUE compute the exact areas, using "deldir" #' if exact=FALSE compute the digital areas using exactdt() #' #' If the window is a mask, we compute the digital area of #' each tile of the Dirichlet tessellation by counting pixels. #' #' #' X <- as.ppp(X, window) if(!exact && is.polygonal(Window(X))) Window(X) <- as.mask(Window(X)) #' compute tile areas w <- dirichletAreas(X) #' zero areas can occur due to discretisation or weird geometry zeroes <- (w == 0) if(any(zeroes)) { #' compute weights for subset nX <- npoints(X) Xnew <- X[!zeroes] wnew <- dirichletAreas(Xnew) w <- numeric(nX) w[!zeroes] <- wnew #' map deleted points to nearest retained point jj <- nncross(X[zeroes], Xnew, what="which") #' map retained points to themselves ii <- Xseq <- seq_len(nX) ii[zeroes] <- (ii[!zeroes])[jj] #' redistribute weights nshare <- table(factor(ii, levels=Xseq)) w <- w[ii]/nshare[ii] } #' attach information about weight construction parameters attr(w, "weight.parameters") <- list(method="dirichlet", exact=exact) return(w) } default.ntile <- function(X) { # default number of tiles (n x n) for tile weights # when data and dummy points are X X <- as.ppp(X) guess.ngrid <- 10 * floor(sqrt(X$n)/10) max(5, guess.ngrid/2) } spatstat/R/bw.ppl.R0000644000176200001440000000177413115225157013656 0ustar liggesusers# # bw.ppl.R # # Likelihood cross-validation for kernel smoother of point pattern # # $Revision: 1.7 $ $Date: 2017/01/28 06:30:21 $ # bw.ppl <- function(X, ..., srange=NULL, ns=16, sigma=NULL, weights=NULL) { stopifnot(is.ppp(X)) if(!is.null(sigma)) { stopifnot(is.numeric(sigma) && is.vector(sigma)) ns <- length(sigma) } else { if(!is.null(srange)) check.range(srange) else { nnd <- nndist(X) srange <- c(min(nnd[nnd > 0]), diameter(as.owin(X))/2) } sigma <- geomseq(from=srange[1L], to=srange[2L], length.out=ns) } cv <- numeric(ns) for(i in 1:ns) { si <- sigma[i] lamx <- density(X, sigma=si, at="points", leaveoneout=TRUE, weights=weights) lam <- density(X, sigma=si, weights=weights) cv[i] <- sum(log(lamx)) - integral.im(lam) } result <- bw.optim(cv, sigma, iopt=which.max(cv), creator="bw.ppl", criterion="Likelihood Cross-Validation", unitname=unitname(X)) return(result) } spatstat/R/cdf.test.mppm.R0000755000176200001440000002202413115271120015117 0ustar liggesusers# # cdf.test.mppm.R # # $Revision: 1.16 $ $Date: 2016/04/14 02:34:50 $ # cdf.test.mppm <- local({ allpixelvalues <- function(z) { as.vector(as.matrix(z)) } xcoord <- function(x, y) { x } ycoord <- function(x, y) { y } cdf.test.mppm <- function(model, covariate, test=c("ks", "cvm", "ad"), ..., nsim=19, verbose=TRUE, interpolate=FALSE, fast=TRUE, jitter=TRUE) { modelname <- short.deparse(substitute(model)) covname <- short.deparse(substitute(covariate)) test <- match.arg(test) result <- PoissonTest(model, covariate, test=test, ..., verbose=FALSE, interpolate=interpolate, fast=fast, jitter=jitter, modelname=modelname, covname=covname, gibbsok=TRUE) if(is.poisson(model)) return(result) result$poisson.p.value <- pobs <- result$p.value result$poisson.statistic <- tobs <- result$statistic ## Simulate ... Sims <- simulate(model, nsim=nsim, ..., verbose=verbose) if(verbose) cat("Processing ...") state <- list() Yname <- model$Info$Yname Data <- eval(getCall(model)$data, envir=environment(terms(model))) sim.pvals <- sim.stats <- numeric(nsim) for(isim in 1:nsim) { Data[,Yname] <- Sims[,isim,drop=FALSE] modeli <- update(model, data=Data) Ai <- PoissonTest(modeli, covariate, test=test, ..., verbose=FALSE, interpolate=interpolate, fast=fast, jitter=jitter, modelname=modelname, covname=covname, gibbsok=TRUE) sim.pvals[isim] <- Ai$p.value sim.stats[isim] <- Ai$statistic if(verbose) state <- progressreport(isim, nsim, state=state) } ### COMPUTE p-value and pack up result$sim.pvals <- sim.pvals result$sim.stats <- sim.stats ## Monte Carlo p-value ## For tied p-values, first compare values of test statistics ## (because p = 0 may occur due to rounding) ## otherwise resolve ties by randomisation nless <- sum(sim.pvals < pobs) nplus <- sum(sim.pvals == pobs & sim.stats > tobs) nties <- sum(sim.pvals == pobs & sim.stats == tobs) result$p.value <- (nless + nplus + sample(0:nties, 1L))/(nsim+1L) ## result$method <- c("Monte Carlo test of fitted Gibbs model", paste("based on", nsim, "repetitions of"), sub("Spatial", "spatial", result$method)) return(result) } PoissonTest <- function(model, covariate, test=c("ks", "cvm", "ad"), ..., verbose=TRUE, interpolate=FALSE, fast=TRUE, jitter=TRUE, gibbsok=FALSE, modelname, covname) { if(missing(modelname)) modelname <- short.deparse(substitute(model)) if(missing(covname)) covname <- short.deparse(substitute(covariate)) test <- match.arg(test) stopifnot(is.mppm(model)) if(!gibbsok && !is.poisson.mppm(model)) stop("Only implemented for Poisson models") ## extract things from model data <- model$data npat <- model$npat Y <- data.mppm(model) if(fast) { ## extract original quadrature schemes and convert to point patterns QQ <- quad.mppm(model) PP <- lapply(QQ, union.quad) Zweights <- lapply(QQ, w.quad) } else Zweights <- list() ## `evaluate' covariate if(verbose) cat("Extracting covariate...") if(identical(covariate, "x")) covariate <- xcoord if(identical(covariate, "y")) covariate <- ycoord if(is.character(covariate)) { ## extract covariate with this name from data used to fit model if(!(covariate %in% names(data))) stop(paste("Model does not contain a covariate called", dQuote(covariate))) covname <- covariate covariate <- data[, covname, drop=TRUE] } else if(inherits(covariate, c("listof", "anylist"))) { if(length(covariate) != npat) stop(paste("Length of list of covariate values does not match", "number of point patterns in data of original model")) } else if(is.hyperframe(covariate)) { ## extract first column covariate <- covariate[,1L, drop=TRUE] if(length(covariate) != npat) stop(paste("Number of rows of covariate hyperframe does not match", "number of point patterns in data of original model")) } else if(is.function(covariate) || is.im(covariate)) { ## replicate to make a list covariate <- as.anylist(rep(list(covariate), npat)) } else stop(paste("Format of argument", sQuote("covariates"), "not understood")) if(verbose) { cat("done.\nComputing statistics for each pattern...") pstate <- list() } ## compile information for test from each row Zvalues <- ZX <- Win <- list() for(i in 1:npat) { if(verbose) pstate <- progressreport(i, npat, state=pstate) XI <- Y[[i]] if(fast) PI <- PP[[i]] else WI <- XI$window covariateI <- covariate[[i]] if(is.im(covariateI)) { type <- "im" ## evaluate at data points ZXI <- if(interpolate) interp.im(covariateI, XI$x, XI$y) else covariateI[XI] if(fast) { ## covariate values for quadrature points ZI <- covariateI[PI] } else { ## covariate image inside window ZI <- covariateI[WI, drop=FALSE] ## corresponding mask WI <- as.owin(ZI) ## pixel areas Zweights[[i]] <- rep(WI$xstep * WI$ystep, prod(WI$dim)) } } else if(is.function(covariateI)) { type <- "function" ## evaluate exactly at data points ZXI <- covariateI(XI$x, XI$y) if(fast) { ## covariate values for quadrature points ZI <- covariateI(PI$x, PI$y) } else { ## window WI <- as.mask(WI) ## covariate image inside window ZI <- as.im(covariateI, W=WI) ## pixel areas Zweights[[i]] <- rep(WI$xstep * WI$ystep, prod(WI$dim)) } } else stop("covariate should be an image or a function(x,y)") ZX[[i]] <- ZXI if(fast) Zvalues[[i]] <- ZI else { Win[[i]] <- WI ## values of covariate in window Zvalues[[i]] <- allpixelvalues(ZI) } } if(verbose) cat("done.\nComputing predicted intensity...") ## compute predicted intensities trend <- if(fast) fitted(model, type="trend") else predict(model, type="trend", locations=Win, verbose=verbose)$trend if(verbose) cat("done.\nExtracting...") ## extract relevant values lambda <- if(fast) trend else lapply(trend, allpixelvalues) if(verbose) cat("done.\nPerforming test...") ## flatten to vectors lambda <- unlist(lambda) Zweights <- unlist(Zweights) Zvalues <- unlist(Zvalues) ZX <- unlist(ZX) if(length(lambda) != length(Zvalues)) stop("Internal error: mismatch between predicted values and Z values") if(length(Zvalues) != length(Zweights)) stop("Internal error: mismatch between Z values and Z weights") lambda <- lambda * Zweights ## form weighted cdf of Z values in window FZ <- ewcdf(Zvalues, lambda/sum(lambda)) ## Ensure support of cdf includes the range of the data xxx <- knots(FZ) yyy <- FZ(xxx) if(min(xxx) > min(ZX)) { xxx <- c(min(ZX), xxx) yyy <- c(0, yyy) } if(max(xxx) < max(ZX)) { xxx <- c(xxx, max(ZX)) yyy <- c(yyy, 1) } ## make piecewise linear approximation of cdf FZ <- approxfun(xxx, yyy, rule=2) ## evaluate at data points if(!jitter) U <- FZ(ZX) else { ## jitter observed values to avoid ties grain <- min(diff(sort(unique(ZX))))/8 jit <- runif(length(ZX), min=0, max=grain) sgn <- sample(c(-1L,1L), length(ZX), replace=TRUE) sgn[ZX==min(xxx)] <- 1L sgn[ZX==max(xxx)] <- -1L U <- FZ(ZX + sgn*jit) } ## Test uniformity result <- switch(test, ks = ks.test(U, "punif", ...), cvm = cvm.test(U, "punif", ...), ad = ad.test(U, "punif", ...)) testname <- switch(test, ks="Kolmogorov-Smirnov", cvm="Cramer-Von Mises", ad="Anderson-Darling") result$method <- paste("Spatial", testname, "test") result$data.name <- paste("predicted cdf of covariate", sQuote(paste(covname, collapse="")), "evaluated at data points of", sQuote(modelname)) if(verbose) cat("done.\n") class(result) <- c("cdftest", class(result)) attr(result, "prep") <- list(Zvalues = Zvalues, lambda = lambda, ZX = ZX, FZ = FZ, U = U, type = type) attr(result, "info") <- list(modelname = modelname, covname = covname) return(result) } cdf.test.mppm }) spatstat/R/fitted.mppm.R0000755000176200001440000000344213115271120014667 0ustar liggesusers# # fitted.mppm.R # # method for 'fitted' for mppm objects # # $Revision: 1.2 $ $Date: 2014/11/10 07:42:09 $ # fitted.mppm <- function(object, ..., type="lambda", dataonly=FALSE) { # sumry <- summary(object) type <- pickoption("type", type, c(lambda="lambda", cif ="lambda", trend ="trend"), multi=FALSE, exact=FALSE) # extract fitted model object and data frame glmfit <- object$Fit$FIT glmdata <- object$Fit$moadf # interaction names Vnames <- unlist(object$Fit$Vnamelist) interacting <- (length(Vnames) > 0) # Modification of `glmdata' may be required if(interacting) switch(type, trend={ # zero the interaction statistics glmdata[ , Vnames] <- 0 }, lambda={ # Find any dummy points with zero conditional intensity forbid <- matrowany(as.matrix(glmdata[, Vnames]) == -Inf) # exclude from predict.glm glmdata <- glmdata[!forbid, ] }) # Compute predicted [conditional] intensity values values <- predict(glmfit, newdata=glmdata, type="response") # Note: the `newdata' argument is necessary in order to obtain # predictions at all quadrature points. If it is omitted then # we would only get predictions at the quadrature points j # where glmdata$SUBSET[j]=TRUE. if(interacting && type=="lambda") { # reinsert zeroes vals <- numeric(length(forbid)) vals[forbid] <- 0 vals[!forbid] <- values values <- vals } names(values) <- NULL id <- glmdata$id if(dataonly) { # extract only data values isdata <- (glmdata$.mpl.Y != 0) values <- values[isdata] id <- id[isdata] } return(split(values, id)) } spatstat/R/pointsonlines.R0000755000176200001440000000273613115271120015351 0ustar liggesusers# # pointsonlines.R # # place points at regular intervals along line segments # # $Revision: 1.7 $ $Date: 2014/11/10 11:21:02 $ # pointsOnLines <- function(X, eps=NULL, np=1000, shortok=TRUE) { stopifnot(is.psp(X)) len <- lengths.psp(X) nseg <- length(len) if(is.null(eps)) { stopifnot(is.numeric(np) && length(np) == 1) stopifnot(is.finite(np) && np > 0) eps <- sum(len)/np } else { stopifnot(is.numeric(eps) && length(eps) == 1) stopifnot(is.finite(eps) && eps > 0) } # initialise Xdf <- as.data.frame(X) xmid <- with(Xdf, (x0+x1)/2) ymid <- with(Xdf, (y0+y1)/2) # handle very short segments # allsegs <- 1:nseg if(any(short <- (len <= eps)) && shortok) { # very short segments: use midpoints Z <- data.frame(x = xmid[short], y = ymid[short]) } else Z <- data.frame(x=numeric(0), y=numeric(0)) # handle other segments for(i in (1:nseg)[!short]) { # divide segment into pieces of length eps # with shorter bits at each end leni <- len[i] nwhole <- floor(leni/eps) if(leni/eps - nwhole < 0.5 && nwhole > 2) nwhole <- nwhole - 1 rump <- (leni - nwhole * eps)/2 brks <- c(0, rump + (0:nwhole) * eps, leni) nbrks <- length(brks) # points at middle of each piece ss <- (brks[-1] + brks[-nbrks])/2 x <- with(Xdf, x0[i] + (ss/leni) * (x1[i]-x0[i])) y <- with(Xdf, y0[i] + (ss/leni) * (y1[i]-y0[i])) Z <- rbind(Z, data.frame(x=x, y=y)) } Z <- as.ppp(Z, W=X$window) return(Z) } spatstat/R/indicator.R0000644000176200001440000000076013115225157014422 0ustar liggesusers#' indicator function for window as.function.owin <- function(x, ...) { W <- x g <- function(x, y=NULL) { xy <- xy.coords(x, y) inside.owin(xy$x, xy$y, W) } class(g) <- c("indicfun", class(g)) return(g) } print.indicfun <- function(x, ...) { W <- get("W", envir=environment(x)) nama <- names(formals(x)) splat(paste0("function", paren(paste(nama, collapse=",")))) splat("Indicator function (returns 1 inside window, 0 outside)") print(W) return(invisible(NULL)) } spatstat/R/mpl.R0000755000176200001440000015156013164653006013250 0ustar liggesusers# mpl.R # # $Revision: 5.209 $ $Date: 2017/10/03 09:15:36 $ # # mpl.engine() # Fit a point process model to a two-dimensional point pattern # by maximum pseudolikelihood # # mpl.prepare() # set up data for glm procedure # # ------------------------------------------------------------------- # "mpl" <- function(Q, trend = ~1, interaction = NULL, data = NULL, correction="border", rbord = 0, use.gam=FALSE) { .Deprecated("ppm", package="spatstat") ppm(Q=Q, trend=trend, interaction=interaction, covariates=data, correction=correction, rbord=rbord, use.gam=use.gam, method="mpl") } mpl.engine <- function(Q, trend = ~1, interaction = NULL, ..., covariates = NULL, subsetexpr = NULL, clipwin = NULL, covfunargs = list(), correction="border", rbord = 0, use.gam=FALSE, gcontrol=list(), GLM=NULL, GLMfamily=NULL, GLMcontrol=NULL, famille=NULL, forcefit=FALSE, nd = NULL, eps = eps, allcovar=FALSE, callstring="", precomputed=NULL, savecomputed=FALSE, preponly=FALSE, rename.intercept=TRUE, justQ = FALSE, weightfactor = NULL) { GLMname <- if(!missing(GLM)) short.deparse(substitute(GLM)) else NULL ## Extract precomputed data if available if(!is.null(precomputed$Q)) { Q <- precomputed$Q X <- precomputed$X P <- precomputed$U } else { ## Determine quadrature scheme from argument Q if(verifyclass(Q, "quad", fatal=FALSE)) { ## user-supplied quadrature scheme - validate it validate.quad(Q, fatal=TRUE, repair=FALSE, announce=TRUE) ## Extract data points X <- Q$data } else if(verifyclass(Q, "ppp", fatal = FALSE)) { ## point pattern - create default quadrature scheme X <- Q Q <- quadscheme(X, nd=nd, eps=eps, check=FALSE) } else stop("First argument Q should be a point pattern or a quadrature scheme") ## Data and dummy points together P <- union.quad(Q) } ## clip to subset? if(!is.null(clipwin)) { if(is.data.frame(covariates)) covariates <- covariates[inside.owin(P, w=clipwin), , drop=FALSE] Q <- Q[clipwin] X <- X[clipwin] P <- P[clipwin] } ## secret exit if(justQ) return(Q) ## computed <- if(savecomputed) list(X=X, Q=Q, U=P) else NULL ## ## Validate main arguments if(!is.null(trend) && !inherits(trend, "formula")) stop(paste("Argument", sQuote("trend"), "must be a formula")) if(!is.null(interaction) && !inherits(interaction, "interact")) stop(paste("Argument", sQuote("interaction"), "has incorrect format")) ## check.1.real(rbord, "In ppm") explain.ifnot(rbord >= 0, "In ppm") ## rbord applies only to border correction if(correction != "border") rbord <- 0 ## covfunargs <- as.list(covfunargs) ## ## Interpret the call if(is.null(trend)) { trend <- ~1 environment(trend) <- parent.frame() } want.trend <- !identical.formulae(trend, ~1) want.inter <- !is.null(interaction) && !is.null(interaction$family) ## Stamp with spatstat version number spv <- package_version(versionstring.spatstat()) the.version <- list(major=spv$major, minor=spv$minor, release=spv$patchlevel, date="$Date: 2017/10/03 09:15:36 $") if(want.inter) { ## ensure we're using the latest version of the interaction object if(outdated.interact(interaction)) interaction <- update(interaction) } ## if(!want.trend && !want.inter && !forcefit && !allcovar && is.null(subsetexpr)) { ## the model is the uniform Poisson process ## The MPLE (= MLE) can be evaluated directly npts <- npoints(X) W <- as.owin(X) if(correction == "border" && rbord > 0) { npts <- sum(bdist.points(X) >= rbord) areaW <- eroded.areas(W, rbord) } else { npts <- npoints(X) areaW <- area(W) } volume <- areaW * markspace.integral(X) lambda <- npts/volume ## fitted canonical coefficient co <- log(lambda) ## asymptotic variance of canonical coefficient varcov <- matrix(1/npts, 1, 1) fisher <- matrix(npts, 1, 1) se <- sqrt(1/npts) ## give names tag <- if(rename.intercept) "log(lambda)" else "(Intercept)" names(co) <- tag dimnames(varcov) <- dimnames(fisher) <- list(tag, tag) ## maximised log likelihood maxlogpl <- if(npts == 0) 0 else npts * (log(lambda) - 1) ## rslt <- list( method = "mpl", fitter = "exact", projected = FALSE, coef = co, trend = trend, interaction = NULL, fitin = fii(), Q = Q, maxlogpl = maxlogpl, satlogpl = NULL, internal = list(computed=computed, se=se), covariates = mpl.usable(covariates), ## covariates are still retained! covfunargs = covfunargs, subsetexpr = NULL, correction = correction, rbord = rbord, terms = terms(trend), fisher = fisher, varcov = varcov, version = the.version, problems = list()) class(rslt) <- "ppm" return(rslt) } ################# P r e p a r e D a t a ###################### prep <- mpl.prepare(Q, X, P, trend, interaction, covariates, want.trend, want.inter, correction, rbord, "quadrature points", callstring, subsetexpr=subsetexpr, allcovar=allcovar, precomputed=precomputed, savecomputed=savecomputed, covfunargs=covfunargs, weightfactor=weightfactor, ...) ## back door if(preponly) { ## exit now, returning prepared data frame and internal information prep$info <- list(want.trend=want.trend, want.inter=want.inter, correction=correction, rbord=rbord, interaction=interaction) return(prep) } fmla <- prep$fmla glmdata <- prep$glmdata problems <- prep$problems likelihood.is.zero <- prep$likelihood.is.zero is.identifiable <- prep$is.identifiable computed <- resolve.defaults(prep$computed, computed) IsOffset <- prep$IsOffset ## update covariates (if they were resolved from the environment) if(!is.null(prep$covariates)) covariates <- prep$covariates ################# F i t i t #################################### if(!is.identifiable) stop(paste("in", callstring, ":", problems$unidentifiable$print), call.=FALSE) ## to avoid problem with package checker .mpl.W <- glmdata$.mpl.W .mpl.SUBSET <- glmdata$.mpl.SUBSET ## determine algorithm control parameters if(is.null(gcontrol)) gcontrol <- list() else stopifnot(is.list(gcontrol)) gcontrol <- if(!is.null(GLMcontrol)) do.call(GLMcontrol, gcontrol) else if(use.gam) do.call(mgcv::gam.control, gcontrol) else do.call(stats::glm.control, gcontrol) ## Fit the generalized linear/additive model. if(is.null(GLM) && is.null(famille)) { ## the sanctioned technique, using `quasi' family if(want.trend && use.gam) { FIT <- gam(fmla, family=quasi(link="log", variance="mu"), weights=.mpl.W, data=glmdata, subset=.mpl.SUBSET, control=gcontrol) fittername <- "gam" } else { FIT <- glm(fmla, family=quasi(link="log", variance="mu"), weights=.mpl.W, data=glmdata, subset=.mpl.SUBSET, control=gcontrol, model=FALSE) fittername <- "glm" } } else if(!is.null(GLM)) { ## alternative GLM fitting function or penalised GLM etc fam <- GLMfamily %orifnull% quasi(link="log", variance="mu") FIT <- GLM(fmla, family=fam, weights=.mpl.W, data=glmdata, subset=.mpl.SUBSET, control=gcontrol) fittername <- GLMname } else { ## experimentation only! if(is.function(famille)) famille <- famille() stopifnot(inherits(famille, "family")) if(want.trend && use.gam) { FIT <- gam(fmla, family=famille, weights=.mpl.W, data=glmdata, subset=.mpl.SUBSET, control=gcontrol) fittername <- "experimental" } else { FIT <- glm(fmla, family=famille, weights=.mpl.W, data=glmdata, subset=.mpl.SUBSET, control=gcontrol, model=FALSE) fittername <- "experimental" } } environment(FIT$terms) <- sys.frame(sys.nframe()) ################ I n t e r p r e t f i t ####################### ## Fitted coefficients co <- FIT$coef ## glm covariates W <- glmdata$.mpl.W SUBSET <- glmdata$.mpl.SUBSET Z <- is.data(Q) Vnames <- prep$Vnames ## saturated log pseudolikelihood satlogpl <- - (sum(log(W[Z & SUBSET])) + sum(Z & SUBSET)) ## attained value of max log pseudolikelihood maxlogpl <- if(likelihood.is.zero) -Inf else (satlogpl - deviance(FIT)/2) ## fitted interaction object fitin <- if(want.inter) fii(interaction, co, Vnames, IsOffset) else fii() unitname(fitin) <- unitname(X) ###################################################################### ## Clean up & return rslt <- list( method = "mpl", fitter = fittername, projected = FALSE, coef = co, trend = trend, interaction = if(want.inter) interaction else NULL, fitin = fitin, Q = Q, maxlogpl = maxlogpl, satlogpl = satlogpl, internal = list(glmfit=FIT, glmdata=glmdata, Vnames=Vnames, IsOffset=IsOffset, fmla=fmla, computed=computed, vnamebase=prep$vnamebase, vnameprefix=prep$vnameprefix), covariates = mpl.usable(covariates), covfunargs = covfunargs, subsetexpr = subsetexpr, correction = correction, rbord = rbord, terms = terms(trend), version = the.version, problems = problems) class(rslt) <- "ppm" return(rslt) } ########################################################################## ### ///////////////////////////////////////////////////////////////////// ########################################################################## mpl.prepare <- local({ mpl.prepare <- function(Q, X, P, trend, interaction, covariates, want.trend, want.inter, correction, rbord, Pname="quadrature points", callstring="", ..., subsetexpr=NULL, covfunargs=list(), allcovar=FALSE, precomputed=NULL, savecomputed=FALSE, vnamebase=c("Interaction", "Interact."), vnameprefix=NULL, warn.illegal=TRUE, warn.unidentifiable=TRUE, weightfactor=NULL, skip.border=FALSE) { ## Q: quadrature scheme ## X = data.quad(Q) ## P = union.quad(Q) if(missing(want.trend)) want.trend <- !is.null(trend) && !identical.formulae(trend, ~1) if(missing(want.inter)) want.inter <- !is.null(interaction) && !is.null(interaction$family) want.subset <- !is.null(subsetexpr) computed <- list() problems <- list() names.precomputed <- names(precomputed) likelihood.is.zero <- FALSE is.identifiable <- TRUE if(!missing(vnamebase)) { if(length(vnamebase) == 1) vnamebase <- rep.int(vnamebase, 2) if(!is.character(vnamebase) || length(vnamebase) != 2) stop("Internal error: illegal format of vnamebase") } if(!is.null(vnameprefix)) { if(!is.character(vnameprefix) || length(vnameprefix) != 1) stop("Internal error: illegal format of vnameprefix") } ################ C o m p u t e d a t a #################### ## Extract covariate values updatecovariates <- FALSE covariates.df <- NULL if(allcovar || want.trend || want.subset) { if("covariates.df" %in% names.precomputed) { covariates.df <- precomputed$covariates.df } else { if(!is.data.frame(covariates)) { ## names of 'external' covariates to be found covnames <- variablesinformula(trend) if(want.subset) covnames <- union(covnames, all.vars(subsetexpr)) if(allcovar) covnames <- union(covnames, names(covariates)) covnames <- setdiff(covnames, c("x", "y", "marks")) ## resolve 'external' covariates tenv <- environment(trend) covariates <- getdataobjects(covnames, tenv, covariates, fatal=TRUE) updatecovariates <- any(attr(covariates, "external")) } ## extract values of covariates ('internal' and 'external') covariates.df <- mpl.get.covariates(covariates, P, Pname, covfunargs) } if(savecomputed) computed$covariates.df <- covariates.df } ## Form the weights and the ``response variable''. if("dotmplbase" %in% names.precomputed) .mpl <- precomputed$dotmplbase else { nQ <- n.quad(Q) wQ <- w.quad(Q) mQ <- marks.quad(Q) ## is NULL for unmarked patterns zQ <- is.data(Q) yQ <- numeric(nQ) yQ[zQ] <- 1/wQ[zQ] zeroes <- attr(wQ, "zeroes") sQ <- if(is.null(zeroes)) rep.int(TRUE, nQ) else !zeroes ## tweak weights ONLY if(!is.null(weightfactor)) wQ <- wQ * weightfactor ## pack up .mpl <- list(W = wQ, Z = zQ, Y = yQ, MARKS = mQ, SUBSET = sQ) } if(savecomputed) computed$dotmplbase <- .mpl glmdata <- data.frame(.mpl.W = .mpl$W, .mpl.Y = .mpl$Y) ## count data and dummy points in specified subset izdat <- .mpl$Z[.mpl$SUBSET] ndata <- sum(izdat) # ndummy <- sum(!izdat) ## Determine the domain of integration for the pseudolikelihood. if(correction == "border") { bdP <- if("bdP" %in% names.precomputed) precomputed$bdP else bdist.points(P) if(savecomputed) computed$bdP <- bdP .mpl$DOMAIN <- (bdP >= rbord) } skip.border <- skip.border && (correction == "border") ####################### T r e n d ############################## internal.names <- c(".mpl.W", ".mpl.Y", ".mpl.Z", ".mpl.SUBSET", "SUBSET", ".mpl") reserved.names <- c("x", "y", "marks", internal.names) if(allcovar || want.trend || want.subset) { trendvariables <- variablesinformula(trend) ## Check for use of internal names in trend cc <- check.clashes(internal.names, trendvariables, "the model formula") if(cc != "") stop(cc) if(want.subset) { subsetvariables <- all.vars(subsetexpr) cc <- check.clashes(internal.names, trendvariables, "the subset expression") if(cc != "") stop(cc) trendvariables <- union(trendvariables, subsetvariables) } ## Standard variables if(allcovar || "x" %in% trendvariables) glmdata <- data.frame(glmdata, x=P$x) if(allcovar || "y" %in% trendvariables) glmdata <- data.frame(glmdata, y=P$y) if(("marks" %in% trendvariables) || !is.null(.mpl$MARKS)) { if(is.null(.mpl$MARKS)) stop("Model formula depends on marks, but data do not have marks", call.=FALSE) glmdata <- data.frame(glmdata, marks=.mpl$MARKS) } ## ## Check covariates if(!is.null(covariates.df)) { ## Check for duplication of reserved names cc <- check.clashes(reserved.names, names(covariates), sQuote("covariates")) if(cc != "") stop(cc) ## Take only those covariates that are named in the trend formula if(!allcovar) needed <- names(covariates.df) %in% trendvariables else needed <- rep.int(TRUE, ncol(covariates.df)) if(any(needed)) { covariates.needed <- covariates.df[, needed, drop=FALSE] ## Append to `glmdata' glmdata <- data.frame(glmdata,covariates.needed) ## Ignore any quadrature points that have NA's in the covariates nbg <- is.na(covariates.needed) if(any(nbg)) { offending <- matcolany(nbg) covnames.na <- names(covariates.needed)[offending] quadpoints.na <- matrowany(nbg) n.na <- sum(quadpoints.na) n.tot <- length(quadpoints.na) errate <- n.na/n.tot pcerror <- round(signif(100 * errate, 2), 2) complaint <- paste("Values of the", ngettext(length(covnames.na), "covariate", "covariates"), paste(sQuote(covnames.na), collapse=", "), "were NA or undefined at", paste(pcerror, "%", " (", n.na, " out of ", n.tot, ")", sep=""), "of the", Pname) warning(paste(complaint, ". Occurred while executing: ", callstring, sep=""), call. = FALSE) .mpl$SUBSET <- .mpl$SUBSET & !quadpoints.na details <- list(covnames.na = covnames.na, quadpoints.na = quadpoints.na, print = complaint) problems <- append(problems, list(na.covariates=details)) } } } } ###################### I n t e r a c t i o n #################### Vnames <- NULL IsOffset <- NULL if(want.inter) { ## Form the matrix of "regression variables" V. ## The rows of V correspond to the rows of P (quadrature points) ## while the column(s) of V are the regression variables (log-potentials) E <- precomputed$E %orifnull% equalpairs.quad(Q) if(!skip.border) { ## usual case V <- evalInteraction(X, P, E, interaction, correction, ..., precomputed=precomputed, savecomputed=savecomputed) } else { ## evaluate only in eroded domain if(all(c("Esub", "Usub", "Retain") %in% names.precomputed)) { ## use precomputed data Psub <- precomputed$Usub Esub <- precomputed$Esub Retain <- precomputed$Retain } else { Retain <- .mpl$DOMAIN Psub <- P[Retain] ## map serial numbers in 'P[Retain]' to serial numbers in 'Psub' Pmap <- cumsum(Retain) keepE <- Retain[ E[,2] ] ## adjust equal pairs matrix Esub <- E[ keepE, , drop=FALSE] Esub[,2] <- Pmap[Esub[,2]] } ## call evaluator on reduced data ## with 'W=NULL' (currently detected only by AreaInter) if(all(c("X", "Q", "U") %in% names.precomputed)) { subcomputed <- resolve.defaults(list(E=Esub, U=Psub, Q=Q[Retain]), precomputed) } else subcomputed <- NULL V <- evalInteraction(X, Psub, Esub, interaction, correction, ..., W=NULL, precomputed=subcomputed, savecomputed=savecomputed) if(savecomputed) { computed$Usub <- Psub computed$Esub <- Esub computed$Retain <- Retain } } if(!is.matrix(V)) stop("interaction evaluator did not return a matrix") ## extract information about offsets IsOffset <- attr(V, "IsOffset") if(is.null(IsOffset)) IsOffset <- FALSE if(skip.border) { ## fill in the values in the border region with zeroes. Vnew <- matrix(0, nrow=npoints(P), ncol=ncol(V)) colnames(Vnew) <- colnames(V) Vnew[Retain, ] <- V ## retain attributes attr(Vnew, "IsOffset") <- IsOffset attr(Vnew, "computed") <- attr(V, "computed") attr(Vnew, "POT") <- attr(V, "POT") V <- Vnew } ## extract intermediate computation results if(savecomputed) computed <- resolve.defaults(attr(V, "computed"), computed) ## Augment data frame by appending the regression variables ## for interactions. ## ## First determine the names of the variables ## Vnames <- dimnames(V)[[2]] if(is.null(Vnames)) { ## No names were provided for the columns of V. ## Give them default names. ## In ppm the names will be "Interaction" ## or "Interact.1", "Interact.2", ... ## In mppm an alternative tag will be specified by vnamebase. nc <- ncol(V) Vnames <- if(nc == 1) vnamebase[1] else paste0(vnamebase[2], 1:nc) dimnames(V) <- list(dimnames(V)[[1]], Vnames) } else if(!is.null(vnameprefix)) { ## Variable names were provided by the evaluator (e.g. MultiStrauss). ## Prefix the variable names by a string ## (typically required by mppm) Vnames <- paste(vnameprefix, Vnames, sep="") dimnames(V) <- list(dimnames(V)[[1]], Vnames) } ## Check the names are valid as column names in a dataframe okVnames <- make.names(Vnames, unique=TRUE) if(any(Vnames != okVnames)) { warning(paste("Names of interaction terms", "contained illegal characters;", "names have been repaired.")) Vnames <- okVnames } ## Check for name clashes between the interaction variables ## and the formula cc <- check.clashes(Vnames, termsinformula(trend), "model formula") if(cc != "") stop(cc) ## and with the variables in 'covariates' if(!is.null(covariates)) { cc <- check.clashes(Vnames, names(covariates), sQuote("covariates")) if(cc != "") stop(cc) } ## OK. append variables. glmdata <- data.frame(glmdata, V) ## check IsOffset matches Vnames if(length(IsOffset) != length(Vnames)) { if(length(IsOffset) == 1) IsOffset <- rep.int(IsOffset, length(Vnames)) else stop("Internal error: IsOffset has wrong length", call.=FALSE) } ## Keep only those quadrature points for which the ## conditional intensity is nonzero. ##KEEP <- apply(V != -Inf, 1, all) .mpl$KEEP <- matrowall(V != -Inf) .mpl$SUBSET <- .mpl$SUBSET & .mpl$KEEP ## Check that there are at least some data and dummy points remaining datremain <- .mpl$Z[.mpl$SUBSET] somedat <- any(datremain) somedum <- !all(datremain) if(warn.unidentifiable && !(somedat && somedum)) { ## Model would be unidentifiable if it were fitted. ## Register problem is.identifiable <- FALSE if(ndata == 0) { complaint <- "model is unidentifiable: data pattern is empty" } else { offending <- !c(somedat, somedum) offending <- c("all data points", "all dummy points")[offending] offending <- paste(offending, collapse=" and ") complaint <- paste("model is unidentifiable:", offending, "have zero conditional intensity") } details <- list(data=!somedat, dummy=!somedum, print=complaint) problems <- append(problems, list(unidentifiable=details)) } ## check whether the model has zero likelihood: ## check whether ANY data points have zero conditional intensity if(any(.mpl$Z & !.mpl$KEEP)) { howmany <- sum(.mpl$Z & !.mpl$KEEP) complaint <- paste(howmany, "data point(s) are illegal", "(zero conditional intensity under the model)") details <- list(illegal=howmany, print=complaint) problems <- append(problems, list(zerolikelihood=details)) if(warn.illegal && is.identifiable) warning(paste(complaint, ". Occurred while executing: ", callstring, sep=""), call. = FALSE) likelihood.is.zero <- TRUE } } ################## S u b s e t ################### if(correction == "border") .mpl$SUBSET <- .mpl$SUBSET & .mpl$DOMAIN if(!is.null(subsetexpr)) { ## user-defined subset expression USER.SUBSET <- eval(subsetexpr, glmdata, environment(trend)) if(is.owin(USER.SUBSET)) { USER.SUBSET <- inside.owin(P$x, P$y, USER.SUBSET) } else if(is.im(USER.SUBSET)) { USER.SUBSET <- as.logical(USER.SUBSET[P, drop=FALSE]) if(anyNA(USER.SUBSET)) USER.SUBSET[is.na(USER.SUBSET)] <- FALSE } if(!(is.logical(USER.SUBSET) || is.numeric(USER.SUBSET))) stop("Argument 'subset' should yield logical values", call.=FALSE) if(anyNA(USER.SUBSET)) { USER.SUBSET[is.na(USER.SUBSET)] <- FALSE warning("NA values in argument 'subset' were changed to FALSE", call.=FALSE) } .mpl$SUBSET <- .mpl$SUBSET & USER.SUBSET } glmdata <- cbind(glmdata, data.frame(.mpl.SUBSET=.mpl$SUBSET, stringsAsFactors=FALSE)) ################# F o r m u l a ################################## if(!want.trend) trend <- ~1 trendpart <- paste(as.character(trend), collapse=" ") if(!want.inter) rhs <- trendpart else { VN <- Vnames ## enclose offset potentials in 'offset(.)' if(any(IsOffset)) VN[IsOffset] <- paste("offset(", VN[IsOffset], ")", sep="") rhs <- paste(c(trendpart, VN), collapse= "+") } fmla <- paste(".mpl.Y ", rhs) fmla <- as.formula(fmla) ## character string of trend formula (without Vnames) trendfmla <- paste(".mpl.Y ", trendpart) #### result <- list(fmla=fmla, trendfmla=trendfmla, covariates=if(updatecovariates) covariates else NULL, glmdata=glmdata, Vnames=Vnames, IsOffset=IsOffset, subsetexpr=subsetexpr, problems=problems, likelihood.is.zero=likelihood.is.zero, is.identifiable=is.identifiable, computed=computed, vnamebase=vnamebase, vnameprefix=vnameprefix) return(result) } check.clashes <- function(forbidden, offered, where) { name.match <- outer(forbidden, offered, "==") if(any(name.match)) { is.matched <- apply(name.match, 2, any) matched.names <- (offered)[is.matched] if(sum(is.matched) == 1) { return(paste("The variable",sQuote(matched.names), "in", where, "is a reserved name")) } else { return(paste("The variables", paste(sQuote(matched.names), collapse=", "), "in", where, "are reserved names")) } } return("") } mpl.prepare }) #################################################################### #################################################################### mpl.usable <- function(x) { ## silently remove covariates that don't have recognised format if(length(x) == 0 || is.data.frame(x)) return(x) isim <- sapply(x, is.im) isfun <- sapply(x, is.function) iswin <- sapply(x, is.owin) istess <- sapply(x, is.tess) isnum <- sapply(x, is.numeric) & (lengths(x) == 1) recognised <- isim | isfun | iswin | istess | isnum if(!all(recognised)) x <- x[recognised] return(x) } mpl.get.covariates <- local({ mpl.get.covariates <- function(covariates, locations, type="locations", covfunargs=list(), need.deriv=FALSE) { covargname <- sQuote(short.deparse(substitute(covariates))) locargname <- sQuote(short.deparse(substitute(locations))) if(is.null(covfunargs)) covfunargs <- list() ## x <- locations$x y <- locations$y if(is.null(x) || is.null(y)) { xy <- xy.coords(locations) x <- xy$x y <- xy$y } if(is.null(x) || is.null(y)) stop(paste("Can't interpret", locargname, "as x,y coordinates")) n <- length(x) if(is.data.frame(covariates)) { if(nrow(covariates) != n) stop(paste("Number of rows in", covargname, "does not equal the number of", type)) return(covariates) } else if(is.list(covariates)) { if(length(covariates) == 0) return(as.data.frame(matrix(, n, 0))) isim <- unlist(lapply(covariates, is.im)) isfun <- unlist(lapply(covariates, is.function)) iswin <- unlist(lapply(covariates, is.owin)) istess <- unlist(lapply(covariates, is.tess)) isnum <- unlist(lapply(covariates, is.number)) if(!all(isim | isfun | isnum | iswin | istess)) stop(paste("Each entry in the list", covargname, "should be an image, a function,", "a window, a tessellation or a single number")) if(sum(nzchar(names(covariates))) < length(covariates)) stop(paste("Some entries in the list", covargname, "are un-named")) ## look up values of each covariate at the quadrature points values <- unclass(covariates) values[isim] <- lapply(covariates[isim], lookup.im, x=x, y=y, naok=TRUE, strict=FALSE) values[isfun] <- vf <- lapply(covariates[isfun], evalfxy, x=x, y=y, extra=covfunargs) values[isnum] <- lapply(covariates[isnum], rep, length(x)) values[iswin] <- lapply(covariates[iswin], insidexy, x=x, y=y) values[istess] <- lapply(covariates[istess], tileindex, x=x, y=y) result <- as.data.frame(values) if(need.deriv && any(isfun)) { ## check for gradient/hessian attributes of function values grad <- lapply(vf, attr, which="gradient") hess <- lapply(vf, attr, which="hessian") grad <- grad[!unlist(lapply(grad, is.null))] hess <- hess[!unlist(lapply(hess, is.null))] if(length(grad) > 0 || length(hess) > 0) attr(result, "derivatives") <- list(gradient=grad, hessian=hess) } return(result) } stop(paste(covargname, "must be either a data frame or a list")) } ## functions for 'apply' evalfxy <- function(f, x, y, extra) { if(length(extra) == 0) return(f(x,y)) ## extra arguments must be matched explicitly by name ok <- names(extra) %in% names(formals(f)) z <- do.call(f, append(list(x,y), extra[ok])) return(z) } insidexy <- function(w, x, y) { inside.owin(x, y, w) } is.number <- function(x) { is.numeric(x) && (length(x) == 1) } mpl.get.covariates }) bt.frame <- function(Q, trend=~1, interaction=NULL, ..., covariates=NULL, correction="border", rbord=0, use.gam=FALSE, allcovar=FALSE) { prep <- mpl.engine(Q=Q, trend=trend, interaction=interaction, ..., covariates=covariates, correction=correction, rbord=rbord, use.gam=use.gam, allcovar=allcovar, preponly=TRUE, forcefit=TRUE) class(prep) <- c("bt.frame", class(prep)) return(prep) } print.bt.frame <- function(x, ...) { cat("Model frame for Berman-Turner device\n") df <- x$glmdata cat(paste("$glmdata: Data frame with", nrow(df), "rows and", ncol(df), "columns\n")) cat(" Column names:\t") cat(paste(paste(names(df),collapse="\t"), "\n")) cat("Complete model formula ($fmla):\t") print(x$fmla) info <- x$info if(info$want.trend) { cat("Trend:\tyes\nTrend formula string ($trendfmla):\t") cat(paste(x$trendfmla, "\n")) } else cat("Trend:\tno\n") cat("Interaction ($info$interaction):\t") inte <- info$interaction if(is.null(inte)) inte <- Poisson() print(inte, family=FALSE, brief=TRUE) if(!is.poisson.interact(inte)) { cat("Internal names of interaction variables ($Vnames):\t") cat(paste(x$Vnames, collapse="\t")) cat("\n") } edge <- info$correction cat(paste("Edge correction ($info$correction):\t", sQuote(edge), "\n")) if(edge == "border") cat(paste("\tBorder width ($info$rbord):\t", info$rbord, "\n")) if(length(x$problems) > 0) { cat("Problems:\n") print(x$problems) } if(length(x$computed) > 0) cat(paste("Frame contains saved computations for", commasep(dQuote(names(x$computed))))) return(invisible(NULL)) } partialModelMatrix <- function(X, D, model, callstring="", ...) { ## X = 'data' ## D = 'dummy' Q <- quad(X,D) P <- union.quad(Q) trend <- model$trend inter <- model$interaction covar <- model$covariates prep <- mpl.prepare(Q, X, P, trend, inter, covar, correction=model$correction, rbord=model$rbord, Pname="data points", callstring=callstring, warn.unidentifiable=FALSE, ...) fmla <- prep$fmla glmdata <- prep$glmdata mof <- model.frame(fmla, glmdata) mom <- model.matrix(fmla, mof) if(!identical(all.equal(colnames(mom), names(coef(model))), TRUE)) warning(paste("Internal error: mismatch between", "column names of model matrix", "and names of coefficient vector in fitted model")) attr(mom, "mplsubset") <- glmdata$.mpl.SUBSET return(mom) } oversize.quad <- function(Q, ..., nU, nX, p=1) { ## Determine whether the quadrature scheme is ## too large to handle in one piece (in mpl) ## for a generic interaction ## nU = number of quadrature points ## nX = number of data points ## p = dimension of statistic if(missing(nU)) nU <- n.quad(Q) if(missing(nX)) nX <- npoints(Q$data) nmat <- as.double(nU) * nX nMAX <- spatstat.options("maxmatrix")/p needsplit <- (nmat > nMAX) return(needsplit) } quadBlockSizes <- function(nX, nD, p=1, nMAX=spatstat.options("maxmatrix")/p, announce=TRUE) { if(inherits(nX, "quad") && missing(nD)) { nD <- npoints(nX$dummy) nX <- npoints(nX$data) } ## Calculate number of dummy points in largest permissible X * (X+D) matrix nperblock <- max(1, floor(nMAX/nX - nX)) ## determine number of such blocks nblocks <- ceiling(nD/nperblock) ## make blocks roughly equal (except for the last one) nperblock <- min(nperblock, ceiling(nD/nblocks)) ## announce if(announce && nblocks > 1) { msg <- paste("Large quadrature scheme", "split into blocks to avoid memory size limits;", nD, "dummy points split into", nblocks, "blocks,") nfull <- nblocks - 1 nlastblock <- nD - nperblock * nfull if(nlastblock == nperblock) { msg <- paste(msg, "each containing", nperblock, "dummy points") } else { msg <- paste(msg, "the first", ngettext(nfull, "block", paste(nfull, "blocks")), "containing", nperblock, ngettext(nperblock, "dummy point", "dummy points"), "and the last block containing", nlastblock, ngettext(nlastblock, "dummy point", "dummy points")) } message(msg) } else nlastblock <- nperblock return(list(nblocks=nblocks, nperblock=nperblock, nlastblock=nlastblock)) } ## function that should be called to evaluate interaction terms ## between quadrature points and data points evalInteraction <- function(X, P, E = equalpairs(P, X), interaction, correction, ..., precomputed=NULL, savecomputed=FALSE) { ## evaluate the interaction potential ## (does not assign/touch the variable names) verifyclass(interaction, "interact") ## handle Poisson case if(is.poisson(interaction)) { out <- matrix(, nrow=npoints(P), ncol=0) attr(out, "IsOffset") <- logical(0) return(out) } ## determine whether to use fast evaluation in C fastok <- (spatstat.options("fasteval") %in% c("on", "test")) if(fastok) { cando <- interaction$can.do.fast par <- interaction$par dofast <- !is.null(cando) && cando(X, correction, par) } else dofast <- FALSE ## determine whether to split quadscheme into blocks if(dofast) { dosplit <- FALSE } else { ## decide whether the quadrature scheme is too large to handle in one piece needsplit <- oversize.quad(nU=npoints(P), nX=npoints(X)) ## not implemented when savecomputed=TRUE dosplit <- needsplit && !savecomputed if(needsplit && savecomputed) warning(paste("Oversize quadscheme cannot be split into blocks", "because savecomputed=TRUE;", "memory allocation error may occur")) } if(!dosplit) { ## normal case V <- evalInterEngine(X=X, P=P, E=E, interaction=interaction, correction=correction, ..., precomputed=precomputed, savecomputed=savecomputed) } else { ## Too many quadrature points: split into blocks nX <- npoints(X) nP <- npoints(P) ## Determine which evaluation points are data points Pdata <- E[,2] ## hence which are dummy points Pall <- seq_len(nP) Pdummy <- if(length(Pdata) > 0) Pall[-Pdata] else Pall nD <- length(Pdummy) ## calculate block sizes bls <- quadBlockSizes(nX, nD, announce=TRUE) nblocks <- bls$nblocks nperblock <- bls$nperblock ## seqX <- seq_len(nX) EX <- cbind(seqX, seqX) ## for(iblock in 1:nblocks) { first <- min(nD, (iblock - 1) * nperblock + 1) last <- min(nD, iblock * nperblock) ## extract dummy points Di <- P[Pdummy[first:last]] Pi <- superimpose(X, Di, check=FALSE, W=X$window) ## evaluate potential Vi <- evalInterEngine(X=X, P=Pi, E=EX, interaction=interaction, correction=correction, ..., savecomputed=FALSE) if(iblock == 1) { V <- Vi } else { ## tack on the glm variables for the extra DUMMY points only V <- rbind(V, Vi[-seqX, , drop=FALSE]) } } ## The first 'nX' rows of V contain values for X. ## The remaining rows of V contain values for dummy points. if(length(Pdata) == 0) { ## simply discard rows corresponding to data V <- V[-seqX, , drop=FALSE] } else { ## replace data in correct position ii <- integer(nP) ii[Pdata] <- seqX ii[Pdummy] <- (nX+1):nrow(V) V <- V[ii, , drop=FALSE] } } return(V) } ## workhorse function that actually calls relevant code to evaluate interaction evalInterEngine <- function(X, P, E, interaction, correction, ..., Reach = NULL, precomputed=NULL, savecomputed=FALSE) { ## fast evaluator (C code) may exist fasteval <- interaction$fasteval cando <- interaction$can.do.fast par <- interaction$par feopt <- spatstat.options("fasteval") dofast <- !is.null(fasteval) && (is.null(cando) || cando(X, correction,par)) && (feopt %in% c("on", "test")) V <- NULL if(dofast) { if(feopt == "test") message("Calling fasteval") V <- fasteval(X, P, E, interaction$pot, interaction$par, correction, ...) } if(is.null(V)) { ## use generic evaluator for family evaluate <- interaction$family$eval if(is.null(Reach)) Reach <- reach(interaction) if("precomputed" %in% names(formals(evaluate))) { ## Use precomputed data ## version 1.9-3 onward (pairwise and pairsat families) V <- evaluate(X, P, E, interaction$pot, interaction$par, correction, ..., Reach=Reach, precomputed=precomputed, savecomputed=savecomputed) } else { ## Cannot use precomputed data ## Object created by earlier version of ppm ## or not pairwise/pairsat interaction V <- evaluate(X, P, E, interaction$pot, interaction$par, correction, ..., Reach=Reach) } } return(V) } deltasuffstat <- local({ deltasuffstat <- function(model, ..., restrict=c("pairs", "first", "none"), dataonly=TRUE, force=FALSE, quadsub=NULL, sparseOK=FALSE) { stopifnot(is.ppm(model)) sparsegiven <- !missing(sparseOK) restrict <- match.arg(restrict) if(dataonly) { X <- data.ppm(model) nX <- npoints(X) } else { X <- quad.ppm(model) if(!is.null(quadsub)) { z <- is.data(X) z[quadsub] <- FALSE if(any(z)) stop("subset 'quadsub' must include all data points", call.=FALSE) X <- X[quadsub] } nX <- n.quad(X) } ncoef <- length(coef(model)) inte <- as.interact(model) if(!sparseOK && exceedsMaxArraySize(nX, nX, ncoef)) { if(sparsegiven) stop("Array dimensions too large", call.=FALSE) warning("Switching to sparse array code", call.=FALSE) sparseOK <- TRUE } zeroes <- if(!sparseOK) array(0, dim=c(nX, nX, ncoef)) else sparse3Darray(dims=c(nX, nX, ncoef)) if(is.poisson(inte)) return(zeroes) ## Get names of interaction terms in model (including offsets) f <- fitin(model) Inames <- f$Vnames IsOffset <- f$IsOffset ## Offset terms do not contribute to sufficient statistic if(all(IsOffset)) return(zeroes) ## Nontrivial interaction terms must be computed. ## Look for member function $delta2 in the interaction v <- NULL if(!is.null(delta2 <- inte$delta2) && is.function(delta2)) { v <- delta2(X, inte, model$correction, sparseOK=sparseOK) } ## Look for generic $delta2 function for the family if(is.null(v) && !is.null(delta2 <- inte$family$delta2) && is.function(delta2)) v <- delta2(X, inte, model$correction, sparseOK=sparseOK) ## no luck? if(is.null(v)) { if(!force) return(NULL) ## use brute force algorithm v <- if(dataonly) deltasufX(model, sparseOK) else deltasufQ(model, quadsub, sparseOK) } ## make it a 3D array if(length(dim(v)) != 3) { if(is.matrix(v)) { v <- array(v, dim=c(dim(v), 1)) } else if(inherits(v, "sparseMatrix")) { v <- as.sparse3Darray(v) } } if(!sparseOK && inherits(v, "sparse3Darray")) v <- as.array(v) if(restrict != "none") { ## kill contributions from points outside the domain of pseudolikelihood ## (e.g. points in the border region) use <- if(dataonly) getppmdatasubset(model) else if(is.null(quadsub)) getglmsubset(model) else getglmsubset(model)[quadsub] if(any(kill <- !use)) switch(restrict, pairs = { v[kill,kill,] <- 0 }, first = { v[kill,,] <- 0 }, none = {}) } ## Output array: planes must correspond to model coefficients result <- zeroes ## Planes of 'v' correspond to interaction terms (including offsets) if(length(Inames) != dim(v)[3]) stop(paste("Internal error: deltasuffstat:", "number of planes of v =", dim(v)[3], "!= number of interaction terms =", length(Inames)), call.=FALSE) ## Offset terms do not contribute to sufficient statistic if(any(IsOffset)) { v <- v[ , , !IsOffset, drop=FALSE] Inames <- Inames[!IsOffset] } ## Map planes of 'v' into coefficients Imap <- match(Inames, names(coef(model))) if(anyNA(Imap)) stop(paste("Internal error: deltasuffstat:", "cannot match interaction coefficients")) if(length(Imap) > 0) { ## insert 'v' into array result[ , , Imap] <- v } return(result) } ## compute deltasuffstat using partialModelMatrix deltasufX <- function(model, sparseOK=FALSE) { stopifnot(is.ppm(model)) X <- data.ppm(model) nX <- npoints(X) p <- length(coef(model)) isdata <- is.data(quad.ppm(model)) m <- model.matrix(model)[isdata, ] ok <- getppmdatasubset(model) ## canonical statistic before and after deleting X[j] ## mbefore[ , i, j] = h(X[i] | X) ## mafter[ , i, j] = h(X[i] | X[-j]) mafter <- mbefore <- array(t(m), dim=c(p, nX, nX)) ## identify close pairs R <- reach(model) if(is.finite(R)) { cl <- closepairs(X, R, what="indices") I <- cl$i J <- cl$j cl2 <- closepairs(X, 2*R, what="indices") I2 <- cl2$i J2 <- cl2$j } else { ## either infinite reach, or something wrong IJ <- expand.grid(I=1:nX, J=1:nX) IJ <- subset(IJ, I != J) I2 <- I <- IJ$I J2 <- J <- IJ$J } ## filter: I and J must both belong to the nominated subset okIJ <- ok[I] & ok[J] I <- I[okIJ] J <- J[okIJ] ## if(length(I) > 0 && length(J) > 0) { ## .............. loop over pairs ........................ ## The following ensures that 'empty' and 'X' have compatible marks empty <- X[integer(0)] ## Run through pairs for(i in unique(I)) { ## all points within 2R J2i <- unique(J2[I2==i]) ## all points within R Ji <- unique(J[I==i]) nJi <- length(Ji) if(nJi > 0) { Xi <- X[i] ## neighbours of X[i] XJi <- X[Ji] ## replace X[-i] by X[-i] \cap b(0, 2R) X.i <- X[J2i] nX.i <- length(J2i) ## index of XJi in X.i J.i <- match(Ji, J2i) if(anyNA(J.i)) stop("Internal error: Ji not a subset of J2i") ## equalpairs matrix E.i <- cbind(J.i, seq_len(nJi)) ## values of sufficient statistic ## h(X[j] | X[-i]) = h(X[j] | X[-c(i,j)] ## for all j pmj <- partialModelMatrix(X.i, empty, model)[J.i, , drop=FALSE] ## sufficient statistic in reverse order ## h(X[i] | X[-j]) = h(X[i] | X[-c(i,j)] ## for all j pmi <- matrix(, nJi, p) for(k in 1:nJi) { j <- Ji[k] ## X.ij <- X[-c(i,j)] X.ij <- X.i[-J.i[k]] pmi[k, ] <- partialModelMatrix(X.ij, Xi, model)[nX.i, ] } ## mafter[ , Ji, i] <- t(pmj) mafter[ , i, Ji] <- t(pmi) } } } ## delta[ ,i,j] = h(X[i] | X) - h(X[i] | X[-j]) delta <- mbefore - mafter ## delta[i, j, ] = h(X[i] | X) - h(X[i] | X[-j]) delta <- aperm(delta, c(2,3,1)) return(delta) } deltasufQ <- function(model, quadsub, sparseOK) { stopifnot(is.ppm(model)) p <- length(coef(model)) Q <- quad.ppm(model) m <- model.matrix(model) ok <- getglmsubset(model) if(!is.null(quadsub)) { Q <- Q[quadsub] m <- m[quadsub, , drop=FALSE] ok <- ok[quadsub] } X <- Q$data U <- union.quad(Q) nU <- npoints(U) nX <- npoints(X) isdata <- is.data(Q) isdummy <- !isdata m <- m[isdata, ,drop=FALSE] ## canonical statistic before and after adding/deleting U[j] mafter <- mbefore <- array(t(m), dim=c(p, nU, nU)) delta <- array(0, dim=dim(mafter)) ## mbefore[ , i, j] = h(U[i] | X) ## For data points X[j] ## mafter[ , i, j] = h(U[i] | X[-j]) ## delta[ , i, j] = h(U[i] | X) - h(U[i] | X[-j]) ## For dummy points X[j] ## mafter[ , i, j] = h(U[i] | X \cup U[j]) ## delta[ , i, j] = h(U[i] | X \cup U[j]) - h(U[i] | X) changesign <- ifelseAB(isdata, -1, 1) ## identify close pairs of quadrature points R <- reach(model) if(is.finite(R)) { cl <- closepairs(U, R, what="indices") I <- cl$i J <- cl$j cl2 <- closepairs(U, 2*R, what="indices") I2 <- cl2$i J2 <- cl2$j } else { ## either infinite reach, or something wrong IJ <- expand.grid(I=1:nU, J=1:nX) IJ <- IJ[ with(IJ, I != J), ] I2 <- I <- IJ$I J2 <- J <- IJ$J } ## filter: I and J must both belong to the nominated subset okIJ <- ok[I] & ok[J] I <- I[okIJ] J <- J[okIJ] ## if(length(I) > 0 && length(J) > 0) { ## .............. loop over pairs of quadrature points ............... ## Run through pairs uI <- unique(I) zI <- isdata[uI] uIdata <- uI[zI] uIdummy <- uI[!zI] ## Run through pairs i, j where 'i' is a data point for(i in uIdata) { ## all DATA points within 2R of X[i] ## This represents X[-i] J2i <- unique(J2[I2==i]) J2i <- J2i[isdata[J2i]] ## all QUADRATURE points within R of X[i] Ji <- unique(J[I==i]) nJi <- length(Ji) if(nJi > 0) { isd <- isdata[Ji] ## data points which are neighbours of X[i] XJi <- X[Ji[isd]] ## dummy points which are neighbours of X[i] DJi <- U[Ji[!isd]] ## replace X[-i] by X[-i] \cap b(0, 2R) X.i <- X[J2i] nX.i <- length(J2i) ## index of XJi in X.i J.i <- match(Ji[isd], J2i) if(anyNA(J.i)) stop("Internal error: Ji[isd] not a subset of J2i") ## index of DJi in superimpose(X.i, DJi) JDi <- nX.i + seq_len(sum(!isd)) ## values of sufficient statistic ## h(X[j] | X[-i]) = h(X[j] | X[-c(i,j)] ## for all j pmj <- partialModelMatrix(X.i, DJi, model)[c(J.i, JDi), , drop=FALSE] ## mafter[ , Ji, i] <- t(pmj) } } ## Run through pairs i, j where 'i' is a dummy point for(i in uIdummy) { ## all DATA points within 2R of U[i] J2i <- unique(J2[I2==i]) J2i <- J2i[isdata[J2i]] ## all QUADRATURE points within R of U[i] Ji <- unique(J[I==i]) nJi <- length(Ji) if(nJi > 0) { isd <- isdata[Ji] JiData <- Ji[isd] JiDummy <- Ji[!isd] ## data points which are neighbours of U[i] XJi <- X[JiData] ## dummy points which are neighbours of U[i] DJi <- U[JiDummy] ## replace X \cup U[i] by (X \cap b(0, 2R)) \cup U[i] J2Ui <- c(J2i, i) XUi <- U[J2Ui] nXUi <- length(J2Ui) ## index of XJi in X.i J.i <- match(JiData, J2Ui) if(anyNA(J.i)) stop("Internal error: Ji[isd] not a subset of J2i") ## index of DJi in superimpose(X.i, DJi) JDi <- nXUi + seq_len(length(JiDummy)) ## values of sufficient statistic ## h(X[j] | X \cup U[i]) ## for all j pmj <- partialModelMatrix(XUi, DJi, model)[c(J.i, JDi), , drop=FALSE] ## mafter[ , c(JiData, JiDummy), i] <- t(pmj) } } } ## delta[ ,i,j] = h(X[i] | X) - h(X[i] | X[-j]) delta[ , , isdata] <- mbefore[, , isdata] - mafter[ , , isdata] ## delta[ ,i,j] = h(X[i] | X \cup U[j]) - h(X[i] | X) delta[ , , isdummy] <- mafter[, , isdummy] - mbefore[ , , isdummy] ## rearrange: new delta[i,j,] = old delta[, i, j] delta <- aperm(delta, c(2,3,1)) return(delta) } deltasuffstat }) spatstat/R/flipxy.R0000755000176200001440000000247213115271075013766 0ustar liggesusers# # flipxy.R # # flip x and y coordinates # # $Revision: 1.3 $ $Date: 2017/02/07 07:22:47 $ # flipxy <- function(X) { UseMethod("flipxy") } flipxy.ppp <- function(X) { stopifnot(is.ppp(X)) ppp(X$y, X$x, marks=X$marks, window=flipxy(X$window), unitname=unitname(X), check=FALSE) } flipxypolygon <- function(p) { # flip x and y coordinates, and reinstate anticlockwise order oldy <- p$y p$y <- rev(p$x) p$x <- rev(oldy) # area and hole status unchanged return(p) } flipxy.owin <- function(X) { verifyclass(X, "owin") switch(X$type, rectangle={ W <- owin(X$yrange, X$xrange, unitname=unitname(X)) }, polygonal={ bdry <- lapply(X$bdry, flipxypolygon) W <- owin(poly=bdry, check=FALSE, unitname=unitname(X)) }, mask={ W <- owin(mask=t(X$m), xy=list(x=X$yrow, y=X$xcol), unitname=unitname(X)) }, stop("Unrecognised window type") ) return(W) } flipxy.psp <- function(X) { stopifnot(is.psp(X)) flipends <- (X$ends)[, c(2L,1L,4L,3L), drop=FALSE] as.psp(flipends, window=flipxy(X$window), marks=X$marks, unitname=unitname(X), check=FALSE) } flipxy.im <- function(X) { im(t(X$v), xcol=X$yrow, yrow=X$xcol, unitname=unitname(X)) } spatstat/R/blur.R0000755000176200001440000000572413115271075013422 0ustar liggesusers# # blur.R # # apply Gaussian blur to an image # # $Revision: 1.16 $ $Date: 2016/04/25 02:34:40 $ # fillNA <- function(x, value=0) { stopifnot(is.im(x)) v <- x$v v[is.na(v)] <- value x$v <- v return(x) } Smooth.im <- function(X, sigma=NULL, ..., normalise=FALSE, bleed=TRUE, varcov=NULL) { blur(X, sigma=sigma, ..., normalise=normalise, bleed=bleed, varcov=varcov) } blur <- function(x, sigma=NULL, ..., normalise=FALSE, bleed=TRUE, varcov=NULL) { stopifnot(is.im(x)) # determine smoothing kernel sigma.given <- !is.null(sigma) varcov.given <- !is.null(varcov) if (sigma.given) { stopifnot(is.numeric(sigma)) stopifnot(length(sigma) %in% c(1, 2)) stopifnot(all(sigma > 0)) } if (varcov.given) stopifnot(is.matrix(varcov) && nrow(varcov) == 2 && ncol(varcov) == 2) ngiven <- varcov.given + sigma.given switch(ngiven + 1L, { sigma <- (1/8) * min(diff(x$xrange), diff(x$yrange)) }, { if (sigma.given && length(sigma) == 2) varcov <- diag(sigma^2) if (!is.null(varcov)) sigma <- NULL }, { stop(paste("Give only one of the arguments", sQuote("sigma"), "and", sQuote("varcov"))) }) # replace NA's in image raster by zeroes X <- fillNA(x, 0) # convolve with Gaussian Y <- second.moment.calc(X, sigma=sigma, varcov=varcov, what="smooth") # if no bleeding, we restrict data to the original boundary if(!bleed) Y$v[is.na(x$v)] <- NA # if(!normalise) return(Y) # normalisation: # convert original image to window (0/1 image) Xone <- x isna <- is.na(x$v) Xone$v[isna] <- 0 Xone$v[!isna] <- 1 # convolve with Gaussian Ydenom <- second.moment.calc(Xone, sigma=sigma, ..., varcov=varcov, what="smooth") # normalise Z <- eval.im(Y/Ydenom) return(Z) } safelookup <- function(Z, x, factor=2, warn=TRUE) { # x is a ppp # evaluates Z[x], replacing any NA's by blur(Z)[x] Zvals <- Z[x, drop=FALSE] if(any(isna <- is.na(Zvals))) { # First pass - look up values at neighbouring pixels if valid XX <- x[isna] rc <- nearest.valid.pixel(XX$x, XX$y, Z) Zvals[isna] <- Z$v[cbind(rc$row, rc$col)] } if(any(isna <- is.na(Zvals))) { # Second pass - extrapolate XX <- x[isna] pixdiam <- sqrt(Z$xstep^2 + Z$ystep^2) # expand domain of Z RX <- as.rectangle(x) RZ <- as.rectangle(Z) bb <- boundingbox(RX, RZ) big <- grow.rectangle(bb, 2 * pixdiam) Z <- rebound.im(Z, big) # now blur Zblur <- blur(Z, factor * pixdiam, bleed=TRUE, normalise=TRUE) Bvals <- Zblur[XX, drop=FALSE] if(anyNA(Bvals)) stop("Internal error: pixel values were NA, even after blurring") Zvals[isna] <- Bvals if(warn) warning(paste(sum(isna), "out of", npoints(x), "pixel values", "were outside the pixel image domain", "and were estimated by convolution")) } return(Zvals) } spatstat/R/distfun.R0000755000176200001440000000642313115271075014127 0ustar liggesusers# # distfun.R # # distance function (returns a function of x,y) # # $Revision: 1.23 $ $Date: 2017/06/05 10:31:58 $ # distfun <- function(X, ...) { UseMethod("distfun") } distfun.ppp <- function(X, ..., k=1) { # this line forces X to be bound stopifnot(is.ppp(X)) stopifnot(length(k) == 1) g <- function(x,y=NULL) { Y <- xy.coords(x, y)[c("x", "y")] nncross(Y, X, what="dist", k=k) } attr(g, "Xclass") <- "ppp" g <- funxy(g, as.rectangle(as.owin(X))) attr(g, "k") <- k class(g) <- c("distfun", class(g)) return(g) } distfun.psp <- function(X, ...) { # this line forces X to be bound stopifnot(is.psp(X)) g <- function(x,y=NULL) { Y <- xy.coords(x, y)[c("x", "y")] nncross(Y, X, what="dist") } attr(g, "Xclass") <- "psp" g <- funxy(g, as.rectangle(as.owin(X))) class(g) <- c("distfun", class(g)) return(g) } distfun.owin <- function(X, ..., invert=FALSE) { # this line forces X to be bound stopifnot(is.owin(X)) # P <- edges(X) # g <- function(x,y=NULL) { Y <- xy.coords(x, y) inside <- inside.owin(Y$x, Y$y, X) D <- nncross(Y, P, what="dist") out <- if(!invert) ifelseAX(inside, 0, D) else ifelseXB(inside, D, 0) return(out) } attr(g, "Xclass") <- "owin" g <- funxy(g, as.rectangle(as.owin(X))) class(g) <- c("distfun", class(g)) return(g) } as.owin.distfun <- function(W, ..., fatal=TRUE) { X <- get("X", envir=environment(W)) result <- if(is.owin(X)) as.rectangle(X) else as.owin(X, ..., fatal=fatal) return(result) } domain.distfun <- Window.distfun <- function(X, ...) { as.owin(X) } as.im.distfun <- function(X, W=NULL, ..., eps=NULL, dimyx=NULL, xy=NULL, na.replace=NULL, approx=TRUE) { k <- attr(X, "k") if(approx && is.null(W) && (is.null(k) || (k == 1))) { # use 'distmap' for speed env <- environment(X) Xdata <- get("X", envir=env) args <- list(X=Xdata, eps=eps, dimyx=dimyx, xy=xy) if(is.owin(Xdata)) { args <- append(args, list(invert = get("invert", envir=env))) } D <- do.call(distmap, args = args) if(!is.null(na.replace)) D$v[is.null(D$v)] <- na.replace } else if(identical(attr(X, "Xclass"), "ppp")) { # point pattern --- use nngrid/knngrid env <- environment(X) Xdata <- get("X", envir=env) D <- nnmap(Xdata, W=W, what="dist", k=k, eps=eps, dimyx=dimyx, xy=xy, na.replace=na.replace, ...) } else { # evaluate function at pixel centres D <- as.im.function(X, W=W, eps=eps, dimyx=dimyx, xy=xy, na.replace=na.replace) } return(D) } print.distfun <- function(x, ...) { xtype <- attr(x, "Xclass") typestring <- switch(xtype, ppp="point pattern", psp="line segment pattern", owin="window", "unrecognised object") objname <- switch(xtype, ppp="point", psp="line segment", "object") cat(paste("Distance function for", typestring, "\n")) X <- get("X", envir=environment(x)) print(X) if(!is.null(k <- attr(x, "k")) && k > 1) cat(paste("Distance to", ordinal(k), "nearest", objname, "will be computed\n")) return(invisible(NULL)) } spatstat/R/residppm.R0000755000176200001440000000702413115271120014263 0ustar liggesusers# # residppm.R # # computes residuals for fitted point process model # # # $Revision: 1.24 $ $Date: 2016/06/30 03:29:54 $ # residuals.ppm <- function(object, type="raw", ..., check=TRUE, drop=FALSE, fittedvalues = NULL, new.coef=NULL, dropcoef=FALSE, quad=NULL) { verifyclass(object, "ppm") trap.extra.arguments(..., .Context="In residuals.ppm") type <- pickoption("type", type, c(inverse="inverse", raw="raw", pearson="pearson", Pearson="pearson", score="score")) typenames <- c(inverse="inverse-lambda residuals", raw="raw residuals", pearson="Pearson residuals", score="score residuals") typename <- typenames[[type]] given.fitted <- !missing(fittedvalues) && !is.null(fittedvalues) # ................. determine fitted values ................. NewCoef <- NULL if(is.null(new.coef) && is.null(quad)) { # use 'object' without modification # validate 'object' if(check && !given.fitted && damaged.ppm(object)) stop("object format corrupted; try update(object, use.internal=TRUE)") } else { # determine a new set of model coefficients if(!is.null(new.coef)) { # use specified model parameters NewCoef <- new.coef } else { # estimate model parameters using a (presumably) denser set of dummy pts # Determine new quadrature scheme if(inherits(quad, "quad")) hi.res.quad <- quad else if(is.ppp(quad)) hi.res.quad <- quadscheme(data=data.ppm(object), dummy=quad) else { # assume 'quad' is a list of arguments to 'quadscheme' hi.res.quad <- do.call(quadscheme, append(list(data.ppm(object)), quad)) } # refit the model with new quadscheme hi.res.fit <- update(object, hi.res.quad) NewCoef <- coef(hi.res.fit) } } #' now compute fitted values using new coefficients if(!given.fitted) fittedvalues <- fitted(object, drop=drop, check=check, new.coef=NewCoef, dropcoef=dropcoef) # ..................... compute residuals ..................... # Extract quadrature points and weights Q <- quad.ppm(object, drop=drop, clip=drop) # U <- union.quad(Q) # quadrature points Z <- is.data(Q) # indicator data/dummy # W <- w.quad(Q) # quadrature weights # Compute fitted conditional intensity at quadrature points lambda <- fittedvalues # indicator is 1 if lambda > 0 # (adjusted for numerical behaviour of predict.glm) indicator <- (lambda > .Machine$double.eps) if(type == "score") { # need the covariates X <- model.matrix(object) if(drop) { gs <- getglmsubset(object) ok <- !is.na(gs) & gs X <- X[ok,] } } # Evaluate residual measure components discrete <- switch(type, raw = rep.int(1, sum(Z)), inverse = 1/lambda[Z], pearson = 1/sqrt(lambda[Z]), score = X[Z, ] ) density <- switch(type, raw = -lambda, inverse = -indicator, pearson = -indicator * sqrt(lambda), score = -lambda * X) # Residual measure (return value) res <- msr(Q, discrete, density) # name the residuals attr(res, "type") <- type attr(res, "typename") <- typename return(res) } spatstat/R/levelset.R0000755000176200001440000000235313115271075014274 0ustar liggesusers# levelset.R # # $Revision: 1.5 $ $Date: 2015/01/15 07:10:37 $ # # level set of an image levelset <- function(X, thresh, compare="<=") { # force X and thresh to be evaluated in this frame verifyclass(X, "im") thresh <- thresh switch(compare, "<" = { A <- eval.im(X < thresh) }, ">" = { A <- eval.im(X > thresh) }, "<=" = { A <- eval.im(X <= thresh) }, ">=" = { A <- eval.im(X >= thresh) }, "==" = { A <- eval.im(X == thresh) }, "!=" = { A <- eval.im(X != thresh) }, stop(paste("unrecognised comparison operator", sQuote(compare)))) W <- as.owin(eval.im(ifelse1NA(A))) return(W) } # compute owin containing all pixels where image expression is TRUE solutionset <- function(..., envir) { if(missing(envir)) envir <- parent.frame() A <- try(eval.im(..., envir=envir), silent=TRUE) if(inherits(A, "try-error")) A <- try(eval(..., envir=envir), silent=TRUE) if(inherits(A, "try-error")) stop("Unable to evaluate expression") if(!is.im(A)) stop("Evaluating the expression did not yield a pixel image") if(A$type != "logical") stop("Evaluating the expression did not yield a logical-valued image") W <- as.owin(eval.im(ifelse1NA(A))) return(W) } spatstat/R/strauss.R0000755000176200001440000001333413115271120014145 0ustar liggesusers# # # strauss.R # # $Revision: 2.37 $ $Date: 2017/06/05 10:31:58 $ # # The Strauss process # # Strauss() create an instance of the Strauss process # [an object of class 'interact'] # # # ------------------------------------------------------------------- # Strauss <- local({ # create blank template object without family and pars BlankStrauss <- list( name = "Strauss process", creator = "Strauss", family = "pairwise.family", # evaluated later pot = function(d, par) { d <= par$r }, par = list(r = NULL), # to be filled in parnames = "interaction distance", init = function(self) { r <- self$par$r if(!is.numeric(r) || length(r) != 1 || r <= 0) stop("interaction distance r must be a positive number") }, update = NULL, # default OK print = NULL, # default OK interpret = function(coeffs, self) { loggamma <- as.numeric(coeffs[1]) gamma <- exp(loggamma) return(list(param=list(gamma=gamma), inames="interaction parameter gamma", printable=dround(gamma))) }, valid = function(coeffs, self) { loggamma <- as.numeric(coeffs[1]) return(is.finite(loggamma) && (loggamma <= 0)) }, project = function(coeffs, self) { if((self$valid)(coeffs, self)) return(NULL) else return(Poisson()) }, irange = function(self, coeffs=NA, epsilon=0, ...) { r <- self$par$r if(anyNA(coeffs)) return(r) loggamma <- coeffs[1] if(abs(loggamma) <= epsilon) return(0) else return(r) }, version=NULL, # to be filled in # fast evaluation is available for the border correction only can.do.fast=function(X,correction,par) { return(all(correction %in% c("border", "none"))) }, fasteval=function(X,U,EqualPairs,pairpot,potpars,correction, ...) { # fast evaluator for Strauss interaction if(!all(correction %in% c("border", "none"))) return(NULL) if(spatstat.options("fasteval") == "test") message("Using fast eval for Strauss") r <- potpars$r answer <- strausscounts(U, X, r, EqualPairs) return(matrix(answer, ncol=1)) }, Mayer=function(coeffs, self) { # second Mayer cluster integral gamma <- exp(as.numeric(coeffs[1])) r <- self$par$r return((1-gamma) * pi * r^2) }, Percy=function(d, coeffs, par, ...) { ## term used in Percus-Yevick type approximation gamma <- exp(as.numeric(coeffs[1])) R <- par$r t <- abs(d/(2*R)) t <- pmin.int(t, 1) y <- 2 * R^2 * (pi * (1-gamma) - (1-gamma)^2 * (acos(t) - t * sqrt(1 - t^2))) return(y) }, delta2 = function(X, inte, correction, ..., sparseOK=FALSE) { r <- inte$par$r X <- as.ppp(X) # algorithm is the same for data and dummy points nX <- npoints(X) switch(correction, none = , border = { cl <- closepairs(X, r, what="indices") weight <- 1 }, isotropic = , Ripley = { cl <- closepairs(X, r, what="ijd") weight <- edge.Ripley(X[cl$i], cl$d) }, translate = { cl <- closepairs(X, r, what="all") weight <- edge.Trans(dx = cl$dx, dy = cl$dy, W = Window(X), paired=TRUE) }, return(NULL) ) v <- sparseMatrix(i=cl$i, j=cl$j, x=as.numeric(weight), dims=c(nX, nX)) if(!sparseOK) v <- as.matrix(v) return(v) } ) class(BlankStrauss) <- "interact" # Finally define main function Strauss <- function(r) { instantiate.interact(BlankStrauss, list(r=r)) } Strauss <- intermaker(Strauss, BlankStrauss) Strauss }) # generally accessible functions strausscounts <- function(U, X, r, EqualPairs=NULL) { answer <- crosspaircounts(U,X,r) # subtract counts of identical pairs if(length(EqualPairs) > 0) { nU <- npoints(U) idcount <- as.integer(table(factor(EqualPairs[,2L], levels=1:nU))) answer <- answer - idcount } return(answer) } crosspaircounts <- function(X, Y, r) { stopifnot(is.ppp(X)) stopifnot(is.numeric(r) && length(r) == 1) stopifnot(is.finite(r)) stopifnot(r >= 0) # sort in increasing order of x coordinate oX <- fave.order(X$x) oY <- fave.order(Y$x) Xsort <- X[oX] Ysort <- Y[oY] nX <- npoints(X) nY <- npoints(Y) # call C routine out <- .C("Ccrosspaircounts", nnsource = as.integer(nX), xsource = as.double(Xsort$x), ysource = as.double(Xsort$y), nntarget = as.integer(nY), xtarget = as.double(Ysort$x), ytarget = as.double(Ysort$y), rrmax = as.double(r), counts = as.integer(integer(nX)), PACKAGE = "spatstat") answer <- integer(nX) answer[oX] <- out$counts return(answer) } closepaircounts <- function(X, r) { stopifnot(is.ppp(X)) stopifnot(is.numeric(r) && length(r) == 1) stopifnot(is.finite(r)) stopifnot(r >= 0) # sort in increasing order of x coordinate oX <- fave.order(X$x) Xsort <- X[oX] nX <- npoints(X) # call C routine out <- .C("Cclosepaircounts", nxy = as.integer(nX), x = as.double(Xsort$x), y = as.double(Xsort$y), rmaxi = as.double(r), counts = as.integer(integer(nX)), PACKAGE = "spatstat") answer <- integer(nX) answer[oX] <- out$counts return(answer) } spatstat/R/options.R0000755000176200001440000005274113115271120014141 0ustar liggesusers# # options.R # # Spatstat options and other internal states # # $Revision: 1.80 $ $Date: 2017/06/05 10:31:58 $ # # .spEnv <- new.env() putSpatstatVariable <- function(name, value) { assign(name, value, envir=.spEnv) } getSpatstatVariable <- function(name) { get(name, envir=.spEnv) } existsSpatstatVariable <- function(name) { exists(name, envir=.spEnv) } putSpatstatVariable("Spatstat.Options", list()) putSpatstatVariable("Spatstat.ProgressBar", NULL) putSpatstatVariable("Spatstat.ProgressData", NULL) putSpatstatVariable("warnedkeys", character(0)) ## Kovesi's uniform colour map, row 29, linear 'bmy' putSpatstatVariable("DefaultImageColours", c("#000C7D", "#000D7E", "#000D80", "#000E81", "#000E83", "#000E85", "#000F86", "#000F88", "#00108A", "#00108B", "#00118D", "#00118F", "#001190", "#001292", "#001293", "#001295", "#001396", "#001398", "#001399", "#00149A", "#00149C", "#00149D", "#00149E", "#00159F", "#0015A0", "#0015A1", "#0015A2", "#0015A3", "#0015A4", "#0016A5", "#0016A6", "#0016A6", "#0016A7", "#0016A8", "#0016A8", "#0016A8", "#0A16A9", "#1516A9", "#1D15A9", "#2315A9", "#2915A9", "#2F15A8", "#3414A8", "#3914A7", "#3E13A6", "#4313A5", "#4712A4", "#4C12A3", "#5011A2", "#5311A1", "#5710A0", "#5A0F9F", "#5E0F9E", "#610E9E", "#640E9D", "#670D9C", "#6A0D9B", "#6C0C9A", "#6F0B99", "#720B98", "#740A98", "#770A97", "#790996", "#7C0896", "#7E0895", "#800794", "#810794", "#840693", "#860692", "#880692", "#8A0591", "#8C0591", "#8E0490", "#900490", "#92048F", "#94038F", "#96038E", "#98038E", "#9A028D", "#9C028D", "#9E028D", "#A0018C", "#A2018C", "#A4018B", "#A6018B", "#A8008A", "#AA008A", "#AB0089", "#AD0089", "#AF0088", "#B10088", "#B30087", "#B50087", "#B70086", "#B80086", "#BA0086", "#BC0085", "#BE0085", "#C00084", "#C20084", "#C30083", "#C50083", "#C70082", "#C90082", "#CB0081", "#CD0081", "#CE0080", "#D00080", "#D20080", "#D40080", "#D5007F", "#D7007F", "#D9007E", "#DA007E", "#DC007D", "#DD007C", "#DF017C", "#E1027B", "#E2047B", "#E4067A", "#E5087A", "#E70B79", "#E80D78", "#E91078", "#EB1277", "#EC1477", "#ED1676", "#EF1875", "#F01A75", "#F11C74", "#F31E73", "#F42073", "#F52272", "#F62471", "#F72671", "#F82870", "#FA2A6F", "#FB2C6F", "#FC2E6E", "#FD306D", "#FE326C", "#FE346C", "#FE366B", "#FE386A", "#FE3A6A", "#FE3D69", "#FE3F68", "#FE4167", "#FE4366", "#FE4566", "#FE4765", "#FE4964", "#FE4B63", "#FE4D62", "#FE5062", "#FE5261", "#FE5460", "#FE565F", "#FE585E", "#FE5A5D", "#FE5D5C", "#FE5F5B", "#FE615B", "#FE635A", "#FE6559", "#FE6758", "#FE6A57", "#FE6C56", "#FE6E55", "#FE7054", "#FE7253", "#FE7452", "#FE7651", "#FE7850", "#FE7A4E", "#FE7C4D", "#FE7E4C", "#FE7F4B", "#FE804A", "#FE8249", "#FE8448", "#FE8647", "#FE8745", "#FE8944", "#FE8B43", "#FE8D42", "#FE8E40", "#FE903F", "#FE923E", "#FE943C", "#FE953B", "#FE9739", "#FE9938", "#FE9A36", "#FE9C35", "#FE9E33", "#FE9F32", "#FEA130", "#FEA22F", "#FEA42E", "#FEA52C", "#FEA72B", "#FEA82A", "#FEAA29", "#FEAB28", "#FEAD27", "#FEAE26", "#FEB026", "#FEB125", "#FEB324", "#FEB423", "#FEB523", "#FEB722", "#FEB822", "#FEBA21", "#FEBB20", "#FEBC20", "#FEBE1F", "#FEBF1F", "#FEC11F", "#FEC21E", "#FEC31E", "#FEC51E", "#FEC61D", "#FEC71D", "#FEC91D", "#FECA1D", "#FECB1D", "#FECD1D", "#FECE1C", "#FECF1C", "#FED11C", "#FED21C", "#FED31C", "#FED51C", "#FED61D", "#FED71D", "#FED91D", "#FEDA1D", "#FEDB1D", "#FEDD1D", "#FEDE1E", "#FEDF1E", "#FEE11E", "#FEE21E", "#FEE31F", "#FEE51F", "#FEE61F", "#FEE720", "#FEE820", "#FEEA21", "#FEEB21", "#FEEC22", "#FEEE22", "#FEEF23", "#FEF023")) warn.once <- function(key, ...) { warned <- getSpatstatVariable("warnedkeys") if(!(key %in% warned)) { warning(paste(...), call.=FALSE) putSpatstatVariable("warnedkeys", c(warned, key)) } return(invisible(NULL)) } ".Spat.Stat.Opt.Table" <- list( checkpolygons = list( ## superseded superseded=TRUE, default=FALSE, check=function(x) { warning("spatstat.options('checkpolygons') will be ignored in future versions of spatstat", call.=FALSE) return(is.logical(x) && length(x) == 1) }, valid="a single logical value" ), checksegments = list( ## default value of 'check' for psp objects default=TRUE, check=function(x) { is.logical(x) && length(x) == 1}, valid="a single logical value" ), closepairs.newcode=list( ## use new code for 'closepairs' default=TRUE, check=function(x) { is.logical(x) && length(x) == 1 }, valid="a single logical value" ), crossing.psp.useCall=list( ## use new code for 'crossing.psp' default=TRUE, check=function(x) { is.logical(x) && length(x) == 1 }, valid="a single logical value" ), crosspairs.newcode=list( ## use new code for 'crosspairs' default=TRUE, check=function(x) { is.logical(x) && length(x) == 1 }, valid="a single logical value" ), densityTransform=list( ## use experimental new C routines for 'density.ppp' default=TRUE, check=function(x) { is.logical(x) && length(x) == 1 }, valid="a single logical value" ), densityC=list( ## use C routines for 'density.ppp' default=TRUE, check=function(x) { is.logical(x) && length(x) == 1 }, valid="a single logical value" ), dpp.maxmatrix=list( ## maximum size of matrix in dppeigen default=2^24, # 16,777,216 check=function(x) { is.numeric(x) && length(x) == 1 && (x == ceiling(x)) && x > 1024 }, valid="a single integer, greater than 1024" ), exactdt.checks.data=list( ## whether 'exactdt' checks validity of return value default=FALSE, check=function(x) { is.logical(x) && length(x) == 1 }, valid="a single logical value" ), expand=list( ## default area expansion factor default=2, check=function(x) { is.numeric(x) && length(x) == 1 && x > 1 }, valid="a single numeric value, greater than 1" ), expand.polynom=list( ## whether to expand polynom() in ppm formulae default=TRUE, check=function(x) { is.logical(x) && length(x) == 1 }, valid="a single logical value" ), fasteval=list( ## whether to use 'fasteval' code if available default="on", check=function(x) { x %in% c("off", "on", "test") }, valid="one of the strings \'off\', \'on\' or \'test\'" ), fastpois=list( # whether to use fast algorithm for rpoispp() when lambda is an image default=TRUE, check=function(x) { is.logical(x) && length(x) == 1 }, valid="a single logical value" ), fastthin=list( # whether to use fast C algorithm for rthin() when P is constant default=TRUE, check=function(x) { is.logical(x) && length(x) == 1 }, valid="a single logical value" ), fastK.lgcp=list( ## whether to cut a few corners in 'lgcp.estK' default=FALSE, check=function(x) { is.logical(x) && length(x) == 1 }, valid="a single logical value" ), fixpolygons = list( ## whether to repair polygons automatically default=TRUE, check=function(x) { is.logical(x) && length(x) == 1}, valid="a single logical value" ), gpclib=list( ## defunct! superseded=TRUE, default=FALSE, check=function(x) { message("gpclib is no longer needed") return(TRUE) }, valid="a single logical value" ), huge.npoints=list( ## threshold to trigger a warning from rpoispp default=1e6, check=function(x) { is.numeric(x) && length(x) == 1 && (x == ceiling(x)) && x > 1024 }, valid="a single integer, greater than 1024" ), image.colfun=list( ## default colour scheme for plot.im # default=function(n){topo.colors(n)}, default=function(n) { z <- getSpatstatVariable("DefaultImageColours") interp.colours(z, n) }, check=function(x) { if(!is.function(x) || length(formals(x)) == 0) return(FALSE) y <- x(42) if(length(y) != 42 || !is.character(y)) return(FALSE) z <- try(col2rgb(y), silent=TRUE) return(!inherits(z, "try-error")) }, valid="a function f(n) that returns character strings, interpretable as colours" ), Kcom.remove.zeroes=list( ## whether Kcom removes zero distances default=TRUE, check=function(x) { is.logical(x) && length(x) == 1 }, valid="a single logical value" ), maxedgewt=list( ## maximum edge correction weight default=100, check=function(x){ is.numeric(x) && length(x) == 1 && is.finite(x) && x >= 1 }, valid="a finite numeric value, not less than 1" ), maxmatrix=list( ## maximum size of matrix of pairs of points in mpl.R default=2^24, # 16,777,216 check=function(x) { is.numeric(x) && length(x) == 1 && (x == ceiling(x)) && x > 1024 }, valid="a single integer, greater than 1024" ), monochrome = list( ## switch for monochrome colour scheme default=FALSE, check=function(x) { is.logical(x) && length(x) == 1}, valid="a single logical value" ), n.bandwidth=list( ## number of values of bandwidth to try in bandwidth selection default=32, check=function(x) { is.numeric(x) && (length(x) == 1) && (x == ceiling(x)) && (x > 2) }, valid="a single integer, greater than 2" ), ndummy.min=list( ## minimum grid size for dummy points default=32, check=function(x) { is.numeric(x) && length(x) <= 2 && all(x == ceiling(x)) && all(x > 1) }, valid="a single integer or a pair of integers, greater than 1" ), ngrid.disc=list( ## number of grid points used to calculate area in area-interaction default=128, check=function(x) { is.numeric(x) && length(x) == 1 && (x == ceiling(x)) && x > 1 }, valid="a single integer, greater than 1" ), npixel=list( ## default pixel dimensions default=128, check=function(x){ is.numeric(x) && (length(x) %in% c(1,2)) && is.finite(x) && all(x == ceiling(x)) && all(x > 1) }, valid="an integer, or a pair of integers, greater than 1" ), nvoxel=list( ## default total number of voxels default=2^22, check=function(x) { is.numeric(x) && length(x) == 1 && (x == ceiling(x)) && x > 2^12 }, valid="a single integer, greater than 2^12" ), old.morpho.psp=list( ## use old code for morphological operations default=FALSE, check=function(x) { is.logical(x) && length(x) == 1 }, valid="a single logical value" ), par.binary=list( ## default graphics parameters for masks default=list(), check=is.list, valid="a list" ), par.contour=list( ## default graphics parameters for 'contour' default=list(), check=is.list, valid="a list" ), par.fv=list( ## default graphics parameters for 'plot.fv' default=list(), check=is.list, valid="a list" ), par.persp=list( ## default graphics parameters for 'persp' plots default=list(), check=is.list, valid="a list" ), par.points=list( ## default graphics parameters for 'points' default=list(), check=is.list, valid="a list" ), par.pp3=list( ## default graphics parameters for 'plot.pp3' default=list(), check=is.list, valid="a list" ), print.ppm.SE=list( ## under what conditions to print estimated SE in print.ppm default="poisson", check=function(x) { is.character(x) && length(x) == 1 && x %in% c("always", "poisson", "never") }, valid="one of the strings \'always\', \'poisson\' or \'never\'" ), progress = list( ## how to display progress reports default="tty", check=function(x){ x %in% c("tty", "tk", "txtbar") }, valid="one of the strings 'tty', 'tk' or 'txtbar'" ), project.fast=list( ## whether to cut corners when projecting an invalid ppm object default=FALSE, check=function(x) { is.logical(x) && length(x) == 1 }, valid="a single logical value" ), psstA.ngrid=list( ## size of point grid for computing areas in psstA default=32, check=function(x) { is.numeric(x) && length(x) == 1 && (x == ceiling(x)) && x >= 8 }, valid="a single integer, greater than or equal to 8" ), psstA.nr=list( ## number of 'r' values to consider in psstA default=30, check=function(x) { is.numeric(x) && length(x) == 1 && (x == ceiling(x)) && x >= 4 }, valid="a single integer, greater than or equal to 4" ), psstG.remove.zeroes=list( ## whether to remove zero distances in psstG default=TRUE, check=function(x) { is.logical(x) && length(x) == 1 }, valid="a single logical value" ), eroded.intensity=list( ## whether to compute intensity estimate in eroded window ## e.g. for Kcom, Gcom default=FALSE, check=function(x) { is.logical(x) && length(x) == 1 }, valid="a single logical value" ), rmh.nrep=list( ## default value of parameter 'nrep' in rmh default=5e5, check=function(x) { is.numeric(x) && length(x) == 1 && (x == ceiling(x)) && x > 0 }, valid="a single integer, greater than 0" ), rmh.p=list( ## default value of parameter 'p' in rmh default=0.9, check=function(x) { is.numeric(x) && length(x) == 1 && x >= 0 && x <= 1 }, valid="a single numerical value, between 0 and 1" ), rmh.q=list( ## default value of parameter 'q' in rmh default=0.9, check=function(x) { is.numeric(x) && length(x) == 1 && x > 0 && x < 1 }, valid="a single numerical value, strictly between 0 and 1" ), scalable = list( ## whether certain calculations in ppm should be scalable default=TRUE, check=function(x) { is.logical(x) && length(x) == 1}, valid="a single logical value" ), selfcrossing.psp.useCall=list( ## whether to use new code in selfcrossing.psp default=TRUE, check=function(x) { is.logical(x) && length(x) == 1 }, valid="a single logical value" ), terse = list( ## Level of terseness in printed output (higher => more terse) default=0, check=function(x) { length(x) == 1 && (x %in% 0:4) }, valid="an integer between 0 and 4" ), transparent=list( ## whether to allow transparent colours in default colour maps default=TRUE, check=function(x) { is.logical(x) && length(x) == 1 }, valid="a single logical value" ), units.paren=list( default="(", check=function(x) { is.character(x) && (length(x) == 1) && (x %in% c("(", "[", "{", "")) }, valid="one of the strings '(', '[', '{' or '' " ), use.Krect=list( ## whether to use function Krect in Kest(X) when window is rectangle default=TRUE, check=function(x) { is.logical(x) && length(x) == 1 }, valid="a single logical value" ), Cwhist=list( ## whether to use C code for whist default=TRUE, check=function(x) { is.logical(x) && length(x) == 1 }, valid="a single logical value" ), Cbdrymask=list( ## whether to use C code for bdry.mask default=TRUE, check=function(x) { is.logical(x) && length(x) == 1 }, valid="a single logical value" ), kppm.canonical=list( ## whether to use 'canonical' parameters in kppm default=FALSE, check=function(x) { is.logical(x) && length(x) == 1 }, valid="a single logical value" ), kppm.adjusted=list( ## experimental default=FALSE, check=function(x) { is.logical(x) && length(x) == 1 }, valid="a single logical value" ), check.rpanel.loaded=list( # internal debugging default=TRUE, check=function(x) { is.logical(x) && length(x) == 1 }, valid="a single logical value" ), check.RandomFields.loaded=list( # this is working OK so no need to check unless debugging default=FALSE, check=function(x) { is.logical(x) && length(x) == 1 }, valid="a single logical value" ), check.RandomFieldsUtils.loaded=list( # this is working OK so no need to check unless debugging default=FALSE, check=function(x) { is.logical(x) && length(x) == 1 }, valid="a single logical value" ), Clinequad = list( # use C code for 'linequad' default=TRUE, check=function(x) { is.logical(x) && length(x) == 1 }, valid="a single logical value" ), Ccountends = list( # use C code for 'countends' default=TRUE, check=function(x) { is.logical(x) && length(x) == 1 }, valid="a single logical value" ), Clinearradius = list( # use C code for 'boundingradius.linnet' default=TRUE, check=function(x) { is.logical(x) && length(x) == 1 }, valid="a single logical value" ), Cnndistlpp = list( # use C code for 'nndist.lpp'/'nnwhich.lpp' default=TRUE, check=function(x) { is.logical(x) && length(x) == 1 }, valid="a single logical value" ), Cnncrosslpp = list( # use C code for 'nncross.lpp' default=TRUE, check=function(x) { is.logical(x) && length(x) == 1 }, valid="a single logical value" ), developer = list( # general purpose; user is a developer; use experimental code, etc default=FALSE, check=function(x) { is.logical(x) && length(x) == 1 }, valid="a single logical value" ) ) # end of options list reset.spatstat.options <- function() { Spatstat.Options <- lapply(.Spat.Stat.Opt.Table, getElement, name="default") putSpatstatVariable("Spatstat.Options", Spatstat.Options) invisible(Spatstat.Options) } reset.spatstat.options() spatstat.options <- local({ spatstat.options <- function (...) { Spatstat.Options <- getSpatstatVariable("Spatstat.Options") called <- list(...) if(length(called) == 0) { # return all options, except superseded ones allofem <- .Spat.Stat.Opt.Table[names(Spatstat.Options)] retain <- sapply(lapply(allofem, getElement, name="superseded"), is.null) return(Spatstat.Options[retain]) } if(is.null(names(called)) && length(called)==1) { # spatstat.options(x) x <- called[[1]] if(is.null(x)) return(Spatstat.Options) # spatstat.options(NULL) if(is.list(x)) called <- x } if(is.null(names(called))) { # spatstat.options("par1", "par2", ...) ischar <- unlist(lapply(called, is.character)) if(all(ischar)) { choices <- unlist(called) ok <- choices %in% names(Spatstat.Options) if(!all(ok)) stop(paste("Unrecognised option(s):", called[!ok])) if(length(called) == 1) return(Spatstat.Options[[choices]]) else return(Spatstat.Options[choices]) } else { wrong <- called[!ischar] offending <- sapply(wrong, ShortDeparse) offending <- paste(offending, collapse=",") stop(paste("Unrecognised mode of argument(s) [", offending, "]: should be character string or name=value pair")) } } ## spatstat.options(name=value, name2=value2,...) assignto <- names(called) if (is.null(assignto) || !all(nzchar(assignto))) stop("options must all be identified by name=value") recog <- assignto %in% names(.Spat.Stat.Opt.Table) if(!all(recog)) stop(paste("Unrecognised option(s):", assignto[!recog])) ## validate new values for(i in seq_along(assignto)) { nama <- assignto[i] valo <- called[[i]] entry <- .Spat.Stat.Opt.Table[[nama]] ok <- entry$check(valo) if(!ok) stop(paste("Parameter", dQuote(nama), "should be", entry$valid)) } ## reassign changed <- Spatstat.Options[assignto] Spatstat.Options[assignto] <- called putSpatstatVariable("Spatstat.Options", Spatstat.Options) ## return invisible(changed) } ShortDeparse <- function(x) { y <- x dont.complain.about(y) short.deparse(substitute(y)) } spatstat.options }) spatstat/R/zclustermodel.R0000644000176200001440000000520213115225157015336 0ustar liggesusers#' #' zclustermodel.R #' #' Experimental #' zclustermodel <- function(name="Thomas", ..., mu, kappa, scale) { if(missing(kappa)) stop("The parent intensity kappa must be given") if(missing(mu)) stop("The mean cluster size mu must be given") if(missing(scale)) stop("The cluster scale must be given") rules <- spatstatClusterModelInfo(name) argh <- list(startpar=c(kappa=kappa, scale=scale), ...) argh <- do.call(rules$resolvedots, argh) par <- argh$startpar other <- argh[names(argh) != "startpar"] clustargs <- rules$checkclustargs(other$margs, old=FALSE) out <- list(name=name, rules=rules, par=par, mu=mu, clustargs=clustargs, other=other) class(out) <- "zclustermodel" return(out) } print.zclustermodel <- local({ print.zclustermodel <- function(x, ...) { with(x, { splat(rules$printmodelname(list(clustargs=clustargs))) newpar <- rules$checkpar(par, old=FALSE) splat("Parent intensity kappa =", blurb("kappa", newpar["kappa"])) splat("Cluster scale = ", newpar["scale"]) splat("Mean cluster size mu =", blurb("mu", mu)) if(length(clustargs) > 0) { hdr <- paste("Cluster shape", ngettext(length(clustargs), "parameter:", "parameters:")) if(is.list(clustargs) && all(sapply(clustargs, is.numeric)) && all(lengths(clustargs) == 1)) { splat(hdr, paste(names(clustargs), as.numeric(clustargs), sep="=", collapse=", ")) } else { splat(hdr) print(clustargs) } } }) return(invisible(NULL)) } blurb <- function(name, value) { if(is.numeric(value)) as.character(value) else if(is.im(value)) "[image]" else "[unrecognized format]" } print.zclustermodel }) pcfmodel.zclustermodel <- function(model, ...) { p <- model$rules$pcf mpar <- model$par other <- model$other f <- function(r) { do.call(p, c(list(par=mpar, rvals=r), other, model$rules["funaux"])) } return(f) } predict.zclustermodel <- function(object, ..., locations, type="intensity", ngrid=NULL) { ## limited use!!! if(!identical(type, "intensity")) stop("Sorry, only type='intensity' is implemented") lambda <- object$par["kappa"] * object$mu if(is.numeric(lambda)) { if(is.ppp(locations)) return(rep(lambda, npoints(locations))) W <- as.owin(locations) if(!is.mask(W)) W <- as.mask(W, dimyx=ngrid, ...) return(as.im(lambda, W=W)) } return(lambda[locations]) } spatstat/R/diagram.R0000644000176200001440000002422113115225157014050 0ustar liggesusers## ## diagram.R ## ## Simple objects for the elements of a diagram (text, arrows etc) ## that are compatible with plot.layered and plot.solist ## ## $Revision: 1.12 $ $Date: 2016/04/25 02:34:40 $ # ......... internal class 'diagramobj' supports other classes ......... diagramobj <- function(X, ...) { if(inherits(try(Frame(X), silent=TRUE), "try-error")) stop("X is not a spatial object") a <- list(...) if(sum(nzchar(names(a))) != length(a)) stop("All extra arguments must be named") attributes(X) <- append(attributes(X), a) class(X) <- c("diagramobj", class(X)) return(X) } "[.diagramobj" <- function(x, ...) { y <- NextMethod("[") attributes(y) <- attributes(x) return(y) } shift.diagramobj <- function(X, ...) { y <- NextMethod("shift") attributes(y) <- attributes(X) return(y) } scalardilate.diagramobj <- function(X, f, ...) { y <- NextMethod("scalardilate") attributes(y) <- attributes(X) return(y) } # .............. user-accessible classes ................ # ......... (these only need a creator and a plot method) ...... ## ........... text ................. textstring <- function(x, y, txt=NULL, ...) { if(is.ppp(x) && missing(y)) { X <- x Window(X) <- boundingbox(x) } else { if(missing(y) && checkfields(x, c("x", "y"))) { y <- x$y x <- x$x stopifnot(length(x) == length(y)) } X <- ppp(x, y, window=owin(range(x),range(y))) } marks(X) <- txt Y <- diagramobj(X, otherargs=list(...)) class(Y) <- c("textstring", class(Y)) return(Y) } plot.textstring <- function(x, ..., do.plot=TRUE) { txt <- marks(x) otha <- attr(x, "otherargs") if(do.plot) do.call.matched(text.default, resolve.defaults(list(...), list(x=x$x, y=x$y, labels=txt), otha), funargs=graphicsPars("text")) return(invisible(Frame(x))) } print.textstring <- function(x, ...) { splat("Text string object") txt <- marks(x) if(npoints(x) == 1) { splat("Text:", dQuote(txt)) splat("Coordinates:", paren(paste(as.vector(coords(x)), collapse=", "))) } else { splat("Text:") print(txt) splat("Coordinates:") print(coords(x)) } return(invisible(NULL)) } ## ........... 'yardstick' to display scale information ................ yardstick <- function(x0, y0, x1, y1, txt=NULL, ...) { nomore <- missing(y0) && missing(x1) && missing(y1) if(is.ppp(x0) && nomore) { if(npoints(x0) != 2) stop("x0 should consist of exactly 2 points") X <- x0 } else if(is.psp(x0) && nomore) { if(nobjects(x0) != 1) stop("x0 should consist of exactly 1 segment") X <- endpoints.psp(x0) } else { xx <- c(x0, x1) yy <- c(y0, y1) B <- boundingbox(list(x=xx, y=yy)) X <- ppp(xx, yy, window=B, check=FALSE) } Window(X) <- boundingbox(X) Y <- diagramobj(X, txt=txt, otherargs=list(...)) class(Y) <- c("yardstick", class(Y)) return(Y) } plot.yardstick <- local({ mysegments <- function(x0, y0, x1, y1, ..., moreargs=list()) { ## ignore unrecognised arguments without whingeing do.call.matched(segments, resolve.defaults(list(x0=x0, y0=y0, x1=x1, y1=y1), list(...), moreargs), extrargs=c("col", "lty", "lwd", "xpd", "lend")) } myarrows <- function(x0, y0, x1, y1, ..., left=TRUE, right=TRUE, angle=20, frac=0.25, main, show.all, add) { mysegments(x0, y0, x1, y1, ...) if(left || right) { ang <- angle * pi/180 co <- cos(ang) si <- sin(ang) dx <- x1-x0 dy <- y1-y0 le <- sqrt(dx^2 + dy^2) rot <- matrix(c(dx, dy, -dy, dx)/le, 2, 2) arlen <- frac * le up <- arlen * (rot %*% c(co, si)) lo <- arlen * (rot %*% c(co, -si)) if(left) { mysegments(x0, y0, x0+up[1L], y0+up[2L], ...) mysegments(x0, y0, x0+lo[1L], y0+lo[2L], ...) } if(right) { mysegments(x1, y1, x1-up[1L], y1-up[2L], ...) mysegments(x1, y1, x1-lo[1L], y1-lo[2L], ...) } } return(invisible(NULL)) } plot.yardstick <- function(x, ..., angle=20, frac=1/8, split=FALSE, shrink=1/4, pos=NULL, txt.args=list(), txt.shift=c(0,0), do.plot=TRUE) { if(do.plot) { txt <- attr(x, "txt") argh <- resolve.defaults(list(...), attr(x, "otherargs")) A <- as.numeric(coords(x)[1L,]) B <- as.numeric(coords(x)[2L,]) M <- (A+B)/2 if(!split) { ## double-headed arrow myarrows(A[1L], A[2L], B[1L], y1=B[2L], angle=angle, frac=frac, moreargs=argh) if(is.null(pos) && !("adj" %in% names(txt.args))) pos <- if(abs(A[1L] - B[1L]) < abs(A[2L] - B[2L])) 4 else 3 } else { ## two single-headed arrows with text dM <- (shrink/2) * (B - A) AM <- M - dM BM <- M + dM newfrac <- frac/((1-shrink)/2) myarrows(AM[1L], AM[2L], A[1L], A[2L], angle=angle, frac=newfrac, left=FALSE, moreargs=argh) myarrows(BM[1L], BM[2L], B[1L], B[2L], angle=angle, frac=newfrac, left=FALSE, moreargs=argh) } if(is.null(txt.shift)) txt.shift <- rep(0, 2) else txt.shift <- ensure2vector(unlist(txt.shift)) do.call.matched(text.default, resolve.defaults(list(x=M[1L] + txt.shift[1L], y=M[2L] + txt.shift[2L]), txt.args, list(labels=txt, pos=pos), argh, .MatchNull=FALSE), funargs=graphicsPars("text")) } return(invisible(Window(x))) } plot.yardstick }) print.yardstick <- function(x, ...) { splat("Yardstick") if(!is.null(txt <- attr(x, "txt"))) splat("Text:", txt) ui <- summary(unitname(x)) splat("Length:", pairdist(x)[1L,2L], ui$plural, ui$explain) splat("Midpoint:", paren(paste(signif(c(mean(x$x), mean(x$y)), 3), collapse=", "))) dx <- diff(range(x$x)) dy <- diff(range(x$y)) orient <- if(dx == 0) "vertical" else if(dy == 0) "horizontal" else paste(atan2(dy, dx) * 180/pi, "degrees") splat("Orientation:", orient) return(invisible(NULL)) } ## code to draw a decent-looking arrow in spatstat diagrams ## (works in layered objects) ## The name 'onearrow' is used because R contains ## hidden functions [.arrow, length.arrow onearrow <- function(x0, y0, x1, y1, txt=NULL, ...) { nomore <- missing(y0) && missing(x1) && missing(y1) if(is.ppp(x0) && nomore) { if(npoints(x0) != 2) stop("x0 should consist of exactly 2 points") X <- x0 } else if(is.psp(x0) && nomore) { if(nobjects(x0) != 1) stop("x0 should consist of exactly 1 segment") X <- endpoints.psp(x0) } else { xx <- c(x0, x1) yy <- c(y0, y1) B <- boundingbox(list(x=xx, y=yy)) X <- ppp(xx, yy, window=B, check=FALSE) } Window(X) <- boundingbox(X) Y <- diagramobj(X, txt=txt, otherargs=list(...)) class(Y) <- c("onearrow", class(Y)) return(Y) } print.onearrow <- function(x, ...) { cat("Single arrow", fill=TRUE) if(!is.null(txt <- attr(x, "txt"))) cat("Text:", txt, fill=TRUE) NextMethod("print") } plot.onearrow <- function(x, ..., add=FALSE, main="", retract=0.05, headfraction=0.25, headangle=12, # degrees headnick=0.1, # fraction of head length col.head=NA, lwd.head=lwd, lwd=1, col=1, zap=FALSE, zapfraction=0.07, pch=1, cex=1, do.plot=TRUE, do.points=FALSE, show.all=!add) { result <- plot.ppp(x, main=main, add=add, pch=pch, cex=cex, do.plot=do.plot && do.points, show.all=show.all) if(do.plot) { if(!do.points && !add) plot(Frame(x), main="", type="n") txt <- attr(x, "txt") argh <- resolve.defaults(list(...), attr(x, "otherargs")) A <- as.numeric(coords(x)[1L,]) B <- as.numeric(coords(x)[2L,]) V <- B - A AR <- A + retract * V BR <- B - retract * V H <- B - headfraction * V HN <- H + headnick * headfraction * V headlength <- headfraction * sqrt(sum(V^2)) halfwidth <- headlength * tan((headangle/2) * pi/180) alpha <- atan2(V[2L], V[1L]) + pi/2 U <- c(cos(alpha), sin(alpha)) HL <- H + halfwidth * U HR <- H - halfwidth * U Head <- rbind(HN, HL, BR, HR, HN) if(!is.na(col.head)) do.call.matched(polygon, resolve.defaults(list(x=Head), argh, list(col=col.head, lwd=lwd.head))) if(!zap) { Tail <- AR } else { M <- (AR+HN)/2 dM <- (zapfraction/2) * (1-headfraction) * V dM <- dM + c(-dM[2L], dM[1L]) ML <- M + dM MR <- M - dM Tail <- rbind(AR, ML, MR) } do.call.matched(lines, resolve.defaults(list(x=rbind(Tail, Head)), argh, list(col=col, lwd=lwd)), extrargs=c("col", "lwd", "lty", "xpd", "lend")) if(!is.null(txt <- attr(x, "txt"))) { H <- (A+B)/2 do.call.matched(text.default, resolve.defaults( list(x=H[1L], y=H[2L]), argh, list(labels=txt, pos=3 + (V[2L] != 0))), funargs=graphicsPars("text")) } } return(invisible(result)) } spatstat/R/ppp.R0000755000176200001440000005021513140747667013265 0ustar liggesusers# # ppp.R # # A class 'ppp' to define point patterns # observed in arbitrary windows in two dimensions. # # $Revision: 4.111 $ $Date: 2017/06/05 10:31:58 $ # # A point pattern contains the following entries: # # $window: an object of class 'owin' # defining the observation window # # $n: the number of points (for efficiency) # # $x: # $y: vectors of length n giving the Cartesian # coordinates of the points. # # It may also contain the entry: # # $marks: a vector of length n # whose entries are interpreted as the # 'marks' attached to the corresponding points. # #-------------------------------------------------------------------------- ppp <- function(x, y, ..., window, marks, check=TRUE, checkdup=check, drop=TRUE) { # Constructs an object of class 'ppp' # if(!missing(window)) verifyclass(window, "owin") else window <- owin(...) if((missing(x) && missing(y)) || (length(x) == 0 && length(y) == 0)) x <- y <- numeric(0) n <- length(x) if(length(y) != n) stop("coordinate vectors x and y are not of equal length") # validate x, y coordinates stopifnot(is.numeric(x)) stopifnot(is.numeric(y)) good <- is.finite(x) & is.finite(y) if(naughty <- !all(good)) { #' bad values will be discarded nbad <- sum(!good) nna <- sum(is.na(x) | is.na(y)) ninf <- nbad - nna if(nna > 0) warning(paste(nna, "out of", n, ngettext(n, "point", "points"), "had NA or NaN coordinate values, and", ngettext(nna, "was", "were"), "discarded")) if(ninf > 0) warning(paste(ninf, "out of", n, ngettext(n, "point", "points"), "had infinite coordinate values, and", ngettext(ninf, "was", "were"), "discarded")) #' chuck out x <- x[good] y <- y[good] n <- sum(good) } names(x) <- NULL names(y) <- NULL # check (x,y) points lie inside window if(check && n > 0) { ok <- inside.owin(x, y, window) nout <- sum(!ok) if(nout > 0) { warning(paste(nout, ngettext(nout, "point was", "points were"), "rejected as lying outside the specified window"), call.=FALSE) rr <- ripras(x,y) bb <- boundingbox(x,y) bb <- boundingbox(rr, bb, window) rejectwindow <- if(!is.null(rr)) rebound.owin(rr, bb) else bb rejects <- ppp(x[!ok], y[!ok], window=rejectwindow, check=FALSE) # discard illegal points x <- x[ok] y <- y[ok] n <- length(x) } } else nout <- 0 # initialise ppp object pp <- list(window=window, n=n, x=x, y=y) # coerce marks to appropriate format if(missing(marks)) marks <- NULL if(is.hyperframe(marks)) stop("Hyperframes of marks are not implemented for ppp objects; use ppx") if(is.matrix(marks)) marks <- as.data.frame(marks) ## drop dimensions? if(drop && is.data.frame(marks)) { nc <- ncol(marks) if(nc == 0) marks <- NULL else if(nc == 1) marks <- marks[,,drop=TRUE] } # attach marks if(is.null(marks)) { # no marks pp$markformat <- "none" } else if(is.data.frame(marks)) { # data frame of marks pp$markformat <- "dataframe" if(naughty) { #' remove marks attached to discarded points with non-finite coordinates marks <- marks[good, ] } if(nout > 0) { #' sequester marks of points falling outside window marks(rejects) <- marks[!ok,] marks <- marks[ok, ] } if(nrow(marks) != n) stop("number of rows of marks != length of x and y") pp$marks <- marks } else { # should be a vector or factor # To recognise vector, strip attributes isspecial <- is.factor(marks) || inherits(marks, "POSIXt") || inherits(marks, "Date") if(!isspecial) attributes(marks) <- NULL if(!(is.vector(marks) || isspecial)) stop("Format of marks not understood") # OK, it's a vector or factor pp$markformat <- "vector" if(naughty) { #' remove marks attached to discarded points with non-finite coordinates marks <- marks[good] } if(nout > 0) { #' sequester marks of points falling outside window marks(rejects) <- marks[!ok] marks <- marks[ok] } if(length(marks) != n) stop("length of marks vector != length of x and y") names(marks) <- NULL pp$marks <- marks } class(pp) <- "ppp" if(checkdup && anyDuplicated(pp)) warning("data contain duplicated points", call.=FALSE) if(nout > 0) attr(pp, "rejects") <- rejects pp } # #-------------------------------------------------------------------------- # is.ppp <- function(x) { inherits(x, "ppp") } # #-------------------------------------------------------------------------- # as.ppp <- function(X, ..., fatal=TRUE) { UseMethod("as.ppp") } as.ppp.ppp <- function(X, ..., fatal=TRUE) { check <- resolve.defaults(list(...), list(check=FALSE))$check return(ppp(X$x, X$y, window=X$window, marks=X$marks, check=check)) } as.ppp.quad <- function(X, ..., fatal=TRUE) { return(union.quad(X)) } as.ppp.data.frame <- function(X, W = NULL, ..., fatal=TRUE) { X <- as.data.frame(X) #' swim against the tidyverse check <- resolve.defaults(list(...), list(check=TRUE))$check if(ncol(X) < 2) return(complaining("X must have at least two columns", fatal, value=NULL)) if(is.null(W)) return(complaining("x,y coords given but no window specified", fatal, value=NULL)) # columns 1 and 2 are assumed to be coordinates # marks from other columns marx <- if(ncol(X) > 2) X[, -(1:2)] else NULL if(is.function(W)) Z <- cobble.xy(X[,1], X[,2], W, fatal, marks=marx, check=check) else { win <- as.owin(W) Z <- ppp(X[,1], X[,2], window = win, marks=marx, check=check) } return(Z) } as.ppp.matrix <- function(X, W = NULL, ..., fatal=TRUE) { check <- resolve.defaults(list(...), list(check=TRUE))$check if(!verifyclass(X, "matrix", fatal=fatal) || !is.numeric(X)) return(complaining("X must be a numeric matrix", fatal, value=NULL)) if(ncol(X) < 2) return(complaining("X must have at least two columns", fatal, value=NULL)) if(is.null(W)) return(complaining("x,y coords given but no window specified", fatal, value=NULL)) if(is.function(W)) Z <- cobble.xy(X[,1], X[,2], W, fatal) else { win <- as.owin(W) Z <- ppp(X[,1], X[,2], window = win, check=check) } # add marks from other columns if(ncol(X) > 2) marks(Z) <- X[, -(1:2)] return(Z) } as.ppp.default <- function(X, W=NULL, ..., fatal=TRUE) { # tries to coerce data X to a point pattern # X may be: # 1. a structure with entries x, y, xl, xu, yl, yu # 2. a structure with entries x, y, area where # 'area' has entries xl, xu, yl, yu # 3. a structure with entries x, y # 4. a vector of length 2, interpreted as a single point. # The second argument W is coerced to an object of class 'owin' by the # function "as.owin" in window.S # If X also has an entry X$marks # then this will be interpreted as the marks vector for the pattern. # check <- resolve.defaults(list(...), list(check=TRUE))$check if(checkfields(X, c("x", "y", "xl", "xu", "yl", "yu"))) { xrange <- c(X$xl, X$xu) yrange <- c(X$yl, X$yu) if(is.null(X$marks)) Z <- ppp(X$x, X$y, xrange, yrange, check=check) else Z <- ppp(X$x, X$y, xrange, yrange, marks=X$marks, check=check) return(Z) } else if(checkfields(X, c("x", "y", "area")) && checkfields(X$area, c("xl", "xu", "yl", "yu"))) { win <- as.owin(X$area) if (is.null(X$marks)) Z <- ppp(X$x, X$y, window=win, check=check) else Z <- ppp(X$x, X$y, window=win, marks = X$marks, check=check) return(Z) } else if(checkfields(X, c("x", "y"))) { if(is.function(W)) return(cobble.xy(X$x, X$y, W, fatal)) if(is.null(W)) { if(fatal) stop("x,y coords given but no window specified") else return(NULL) } win <- as.owin(W) if(is.null(X$marks)) Z <- ppp(X$x, X$y, window=win, check=check) else Z <- ppp(X$x, X$y, window=win, marks=X$marks, check=check) return(Z) } else if(is.vector(X) && length(X) == 2) { win <- as.owin(W) Z <- ppp(X[1], X[2], window=win, check=check) return(Z) } else { if(fatal) stop("Can't interpret X as a point pattern") else return(NULL) } } cobble.xy <- function(x, y, f=ripras, fatal=TRUE, ...) { if(!is.function(f)) stop("f is not a function") w <- f(x,y) if(!is.owin(w)) { gripe <- "Supplied function f did not return an owin object" if(fatal) stop(gripe) else { warning(gripe) return(NULL) } } return(ppp(x, y, window=w, ...)) } # -------------------------------------------------------------- "[.ppp" <- function(x, i, j, drop=FALSE, ..., clip=FALSE) { verifyclass(x, "ppp") if(!missing(i)) { if(inherits(i, "owin")) { # i is a window window <- i if(clip) window <- intersect.owin(window, x$window) ok <- inside.owin(x$x, x$y, window) x <- ppp(x$x[ok], x$y[ok], window=window, #SIC marks=marksubset(x$marks, ok), check=FALSE) } else if(inherits(i, "im")) { # i is an image if(i$type != "logical") stop(paste("Subset operator X[i] undefined", "when i is a pixel image", "unless it has logical values"), call.=FALSE) # convert logical image to window e <- sys.frame(sys.nframe()) window <- solutionset(i, e) if(clip) window <- intersect.owin(window, x$window) ok <- inside.owin(x$x, x$y, window) x <- ppp(x$x[ok], x$y[ok], window=window, #SIC marks=marksubset(x$marks, ok), check=FALSE) } else { # assume i is a subset index nx <- x$n if(nx == 0) return(x) subset <- seq_len(nx)[i] if(anyNA(subset)) stop("Index out of bounds in [.ppp", call.=FALSE) x <- ppp(x$x[subset], x$y[subset], window=x$window, marks=marksubset(x$marks, subset), check=FALSE) } } if(!missing(j)) x <- x[j] # invokes code above if(drop) { mx <- x$marks switch(markformat(mx), none = { }, vector = { if(is.factor(mx)) marks(x) <- factor(mx) }, dataframe = { isfac <- sapply(mx, is.factor) if(any(isfac)) mx[, isfac] <- lapply(mx[, isfac], factor) }, hyperframe = { }) } return(x) } # ------------------------------------------------------------------ # # scanpp <- function(filename, window, header=TRUE, dir="", factor.marks = NULL, ...) { filename <- if(dir=="") filename else paste(dir, filename, sep=.Platform$file.sep) df <- read.table(filename, header=header, stringsAsFactors = is.null(factor.marks)) if(header) { # check whether there are columns named 'x' and 'y' colnames <- dimnames(df)[[2]] xycolumns <- match(c("x", "y"), colnames, 0) named <- all(xycolumns > 0) } else { named <- FALSE } if(named) { x <- df$x y <- df$y } else { # assume x, y given in columns 1, 2 respectively x <- df[,1] y <- df[,2] xycolumns <- c(1,2) } if(ncol(df) == 2) X <- ppp(x, y, window=window) else { # Catch old argument "multitype": dots <- list(...) multi <- charmatch(names(dots), "multitype") argindex <- which(!is.na(multi)) if(length(argindex)>0){ if(missing(factor.marks)){ factor.marks <- dots[[argindex]] ignored <- "" } else{ ignored <- paste(" and it is ignored since", sQuote("factor.marks"), "is also supplied") } warning("It appears you have called scanpp ", " with (something partially matching) ", " the deprecated argument ", paste0(sQuote("multitype"), ignored, "."), " Please change to the new syntax.") } marks <- df[ , -xycolumns, drop=FALSE] if(any(factor.marks)){ # Find indices to convert to factors (recycling to obtain correct length) factorid <- (1:ncol(marks))[factor.marks] # Convert relevant columns to factors marks[,factorid] <- lapply(marks[,factorid,drop=FALSE], factor) } X <- ppp(x, y, window=window, marks = marks) } X } #------------------------------------------------------------------- "markspace.integral" <- function(X) { verifyclass(X, "ppp") if(!is.marked(X, dfok=TRUE)) return(1) if(is.multitype(X)) return(length(levels(marks(X)))) else stop("Don't know how to compute total mass of mark space") } #------------------------------------------------------------------- print.ppp <- function(x, ...) { verifyclass(x, "ppp") ism <- is.marked(x, dfok=TRUE) nx <- x$n splat(if(ism) "Marked planar" else "Planar", "point pattern:", nx, ngettext(nx, "point", "points")) if(ism) { mks <- marks(x, dfok=TRUE) if(is.data.frame(mks)) { ## data frame of marks exhibitStringList("Mark variables:", names(mks)) } else { ## vector of marks if(is.factor(mks)) { exhibitStringList("Multitype, with levels =", levels(mks)) } else { ## Numeric, or could be dates if(inherits(mks, "Date")) { splat("marks are dates, of class", sQuote("Date")) } else if(inherits(mks, "POSIXt")) { splat("marks are dates, of class", sQuote("POSIXt")) } else { splat(paste0("marks are", if(is.numeric(mks)) " numeric," else NULL), "of storage type ", sQuote(typeof(mks))) } } } } print(x$window) terselevel <- spatstat.options('terse') if(waxlyrical('errors', terselevel) && !is.null(rejects <- attr(x, "rejects"))) { nrejects <- rejects$n splat("***", nrejects, ngettext(nrejects, "illegal point", "illegal points"), "stored in", paste0("attr(,", dQuote("rejects"), ")"), "***") } if(waxlyrical('extras', terselevel) && !is.null(info <- attr(x, "info")) && inherits(info, "rmhInfoList")) splat("Pattern was generated by", if(is.poisson(info$model)) "Poisson" else "Metropolis-Hastings", "simulation.") return(invisible(NULL)) } summary.ppp <- function(object, ..., checkdup=TRUE) { verifyclass(object, "ppp") result <- list() result$is.marked <- is.marked(object, dfok=TRUE) result$n <- object$n result$window <- summary(object$window) result$intensity <- result$n/result$window$area if(checkdup) { result$nduplicated <- sum(duplicated(object)) result$rounding <- rounding(object) } if(result$is.marked) { mks <- marks(object, dfok=TRUE) if(result$multiple.marks <- is.data.frame(mks)) { result$marknames <- names(mks) result$is.numeric <- FALSE result$marktype <- "dataframe" result$is.multitype <- FALSE } else { result$is.numeric <- is.numeric(mks) result$marknames <- "marks" result$marktype <- typeof(mks) result$is.multitype <- is.multitype(object) } if(result$is.multitype) { tm <- as.vector(table(mks)) tfp <- data.frame(frequency=tm, proportion=tm/sum(tm), intensity=tm/result$window$area, row.names=levels(mks)) result$marks <- tfp } else result$marks <- summary(mks) } class(result) <- "summary.ppp" if(!is.null(rejects <- attr(object, "rejects"))) result$rejects <- rejects$n if(!is.null(info <- attr(object, "info")) && inherits(info, "rmhInfoList")) result$rmhinfo <- info return(result) } print.summary.ppp <- function(x, ..., dp=getOption("digits")) { verifyclass(x, "summary.ppp") terselevel <- spatstat.options("terse") splat(if(x$is.marked) "Marked planar" else "Planar", "point pattern: ", x$n, "points") oneline <- resolve.defaults(list(...), list(oneline=FALSE))$oneline if(oneline) return(invisible(NULL)) unitinfo <- summary(x$window$units) splat("Average intensity", signif(x$intensity,dp), "points per square", unitinfo$singular, unitinfo$explain) ndup <- x$nduplicated if(waxlyrical('extras', terselevel) && !is.null(ndup) && (ndup > 0)) { parbreak(terselevel) splat("*Pattern contains duplicated points*") } rndg <- x$rounding if(waxlyrical('gory', terselevel) && !is.null(rndg)) { cat("\n") if(rndg >= 1) { cat("Coordinates are", "given to", rndg, "decimal", ngettext(rndg, "place", "places"), fill=TRUE) if(rndg <= 3) { cat("i.e. rounded to", "the nearest", "multiple of", 10^(-rndg), unitinfo$plural, unitinfo$explain, fill=TRUE) } } else if(rndg == 0) { cat("Coordinates are", "integers", fill=TRUE) cat("i.e. rounded to", "the nearest", unitinfo$singular, unitinfo$explain, fill=TRUE) } else { cat("Coordinates are", "multiples of", 10^(-rndg), unitinfo$plural, unitinfo$explain, fill=TRUE) } parbreak(terselevel) } if(x$is.marked) { if(x$multiple.marks) { splat("Mark variables:", commasep(x$marknames, ", ")) cat("Summary:\n") print(x$marks) } else if(x$is.multitype) { cat("Multitype:\n") print(signif(x$marks,dp)) } else { splat("marks are ", if(x$is.numeric) "numeric, ", "of type ", sQuote(x$marktype), sep="") cat("Summary:\n") print(x$marks) } parbreak(terselevel) } if(waxlyrical('extras', terselevel)) print(x$window) if(waxlyrical('errors', terselevel) && !is.null(nrejects <- x$rejects)) { parbreak(terselevel) splat("***", nrejects, ngettext(nrejects, "illegal point", "illegal points"), "stored in", paste("attr(,", dQuote("rejects"), ")", sep=""), "***") } if(waxlyrical('gory', terselevel) && !is.null(info <- x$rmhinfo)) { cat("\nPattern was generated by", "Metropolis-Hastings algorithm rmh", fill=TRUE) print(info) } return(invisible(x)) } # --------------------------------------------------------------- identify.ppp <- function(x, ...) { verifyclass(x, "ppp") id <- identify(x$x, x$y, ...) if(!is.marked(x)) return(id) marks <- as.data.frame(x)[id, -(1:2)] out <- cbind(data.frame(id=id), marks) row.names(out) <- NULL return(out) } rebound <- function(x, rect) { UseMethod("rebound") } rebound.ppp <- function(x, rect) { verifyclass(x, "ppp") x$window <- rebound.owin(x$window, rect) return(x) } as.data.frame.ppp <- function(x, row.names=NULL, ...) { df <- data.frame(x=x$x, y=x$y, row.names=row.names) marx <- marks(x, dfok=TRUE) if(is.null(marx)) return(df) if(is.data.frame(marx)) df <- cbind(df, marx) else df <- data.frame(df, marks=marx) return(df) } is.empty.ppp <- function(x) { return(x$n == 0) } npoints <- function(x) { UseMethod("npoints") } nobjects <- function(x) { UseMethod("nobjects") } nobjects.ppp <- npoints.ppp <- function(x) { x$n } domain.ppp <- Window.ppp <- function(X, ...) { as.owin(X) } "Window<-.ppp" <- function(X, ..., value) { verifyclass(value, "owin") return(X[value]) } "Frame<-.ppp" <- function(X, value) { Frame(Window(X)) <- value return(X) } spatstat/R/covering.R0000644000176200001440000000213413115225157014257 0ustar liggesusers#' #' covering.R #' #' $Revision: 1.3 $ $Date: 2016/03/26 10:27:20 $ #' covering <- function(W, r, ..., giveup=1000) { W <- as.owin(W) ## compute distance to boundary D <- distmap(W, invert=TRUE, ...) D <- D[W, drop=FALSE] M <- as.owin(D) pixstep <- max(M$xstep, M$ystep) ## very small distances if(r <= pixstep) { warning("r is smaller than the pixel resolution: returning pixel centres", call.=FALSE) xy <- rasterxy.mask(M, drop=TRUE) return(ppp(xy[,1L], xy[,2L], window=W, check=FALSE)) } ## find the point of W farthest from the boundary X <- where.max(D) ## build a hexagonal grid through this point ruse <- if(is.convex(W)) r else (r * 2/3) ruse <- max(pixstep, ruse - pixstep) H <- hexgrid(W, ruse, offset=c(X$x, X$y), origin=c(0,0)) if(npoints(H) == 0) H <- X ## this may not suffice if W is irregular for(i in 1:giveup) { DH <- distmap(H) if(max(DH) < ruse && npoints(H) > 0) return(H) Hnew <- where.max(DH) H <- superimpose(H, Hnew, W=W) } stop(paste("Failed to converge after adding", giveup, "points"), call.=FALSE) } spatstat/R/symbolmap.R0000644000176200001440000005700413151747347014466 0ustar liggesusers## ## symbolmap.R ## ## $Revision: 1.35 $ $Date: 2017/08/31 08:48:24 $ ## symbolmap <- local({ known.unknowns <- c("shape", "pch", "chars", "size", "cex", "direction", "arrowtype", "headlength", "headangle", "col", "cols", "fg", "bg", "lty", "lwd", "border", "fill", "etch") trycolourmap <- function(...) { try(colourmap(...), silent=TRUE) } symbolmap <- function(..., range=NULL, inputs=NULL) { if(!is.null(range) && !is.null(inputs)) stop("Arguments range and inputs are incompatible") ## graphics parameters parlist <- list(...) ## remove unrecognised parameters and NULL values if(length(parlist) > 0) { ok <- names(parlist) %in% known.unknowns ok <- ok & !unlist(lapply(parlist, is.null)) parlist <- parlist[ok] } got.pars <- (length(parlist) > 0) parnames <- names(parlist) type <- if(is.null(inputs) && is.null(range)) "constant" else if(!is.null(inputs)) "discrete" else "continuous" if(got.pars) { ## validate parameters if(is.null(parnames) || !all(nzchar(parnames))) stop("All graphics parameters must have names") atomic <- unlist(lapply(parlist, is.atomic)) functions <- unlist(lapply(parlist, is.function)) lenfs <- lengths(parlist) constants <- atomic & (lenfs == 1) if(any(bad <- !(constants | functions))) { if(type == "discrete" && any(repairable <- atomic[bad])) { ## recycle data to desired length parlist[repairable] <- lapply(parlist[repairable], reptolength, n=length(inputs)) bad[repairable] <- FALSE } if(type == "continuous") { ## look for vectors of colour values iscol <- bad & sapply(parlist, is.colour) & (names(parlist) %in% c("cols", "col", "fg", "bg")) ## convert colour values to colour map if(any(iscol)) { cmap <- lapply(parlist[iscol], trycolourmap, range=range) success <- sapply(cmap, inherits, what="colourmap") iscol[iscol] <- success if(any(iscol)) { parlist[iscol] <- cmap[success] bad[iscol] <- FALSE functions[iscol] <- TRUE } } } nbad <- sum(bad) if(nbad > 0) stop(paste(ngettext(nbad, "Argument", "Arguments"), commasep(sQuote(parnames[bad])), ngettext(nbad, "is neither a function nor a constant", "are neither functions nor constants"))) } if(type == "constant" && any(functions)) type <- "continuous" } switch(type, constant ={ ## set of constant graphics parameters defining a single symbol stuff <- list(type=type, parlist=parlist) ConstantValue <- as.data.frame(parlist, stringsAsFactors=FALSE) f <- function(x) ConstantValue }, discrete = { ## finite set of inputs mapped to symbols stuff <- list(type=type, inputs=inputs, parlist=parlist) f <- function(x) ApplyDiscreteSymbolMap(x, stuff) }, continuous = { got.shape <- "shape" %in% parnames got.size <- "size" %in% parnames got.cha <- any(c("pch", "chars") %in% parnames) ## interval of real line (etc) mapped to symbols or characters if(!got.cha) { ## mapped to symbols if(!got.shape) parlist$shape <- "circles" if(!got.size) stop("Parameter 'size' is missing") } rangetype <- if(is.null(range)) "numeric" else if(inherits(range, "POSIXt")) "datetime" else if(inherits(range, "Date")) "date" else if(is.numeric(range)) "numeric" else "unknown" stuff <- list(type=type, range=range, rangetype=rangetype, parlist=parlist) f <- function(x) ApplyContinuousSymbolMap(x, stuff) }) attr(f, "stuff") <- stuff class(f) <- c("symbolmap", class(f)) f } reptolength <- function(z, n) { rep.int(z, n)[1:n] } MapDiscrete <- function(f, x, i) { if(is.function(f)) f(x) else if(length(f) == 1) rep.int(f, length(x)) else f[i] } MapContinuous <- function(f, x) { if(is.function(f)) f(x) else rep.int(f, length(x)) } ApplyContinuousSymbolMap <- function(x, stuff) { with(stuff, { y <- as.data.frame(lapply(parlist, MapContinuous, x=x), stringsAsFactors=FALSE) return(y) }) } ApplyDiscreteSymbolMap <- function(x, stuff) { with(stuff, { ii <- match(x, inputs) if(anyNA(ii)) stop("Some values do not belong to the domain of the symbol map") y <- as.data.frame(lapply(parlist, MapDiscrete, x=x, i=ii), stringsAsFactors=FALSE) return(y) }) } symbolmap }) symbolmaptype <- function(x) { attr(x, "stuff")$type } update.symbolmap <- function(object, ...) { y <- attr(object, "stuff") oldargs <- append(y[["parlist"]], y[c("inputs", "range")]) do.call(symbolmap, resolve.defaults(list(...), oldargs)) } print.symbolmap <- function(x, ...) { with(attr(x, "stuff"), { switch(type, constant = { if(length(parlist) == 0) { cat("Symbol map", "with no parameters", fill=TRUE) } else { cat("Symbol map", "with constant values", fill=TRUE) } }, discrete = { cat("Symbol map", "for discrete inputs:", fill=TRUE) print(inputs) }, continuous = { cat("Symbol map", "for", switch(rangetype, numeric="real numbers", date = "dates", datetime = "date/time values", unknown = "unrecognised data"), if(!is.null(range)) paste("in", prange(range)) else NULL, fill=TRUE) }) if(length(parlist) > 0) { for(i in seq_along(parlist)) { cat(paste0(names(parlist)[i], ": ")) pari <- parlist[[i]] if(!is.function(pari) && length(pari) == 1) cat(pari, fill=TRUE) else print(pari) } } return(invisible(NULL)) }) } ## Function which actually plots the symbols. ## Called by plot.ppp and plot.symbolmap ## Returns maximum size of symbols invoke.symbolmap <- local({ ## plot points, handling various arguments do.points <- function(x, y, ..., cex=size, size=NULL, col=cols, pch=chars, cols=NULL, chars=NULL, lwd=1, etch=FALSE, do.plot=TRUE) { if(do.plot) { if(length(cex) == 0) cex <- 1 if(length(col) == 0) col <- par("col") if(length(pch) == 0) pch <- 1 if(length(lwd) == 0) lwd <- 1 n <- length(x) if(length(cex) == 1) cex <- rep(cex, n) if(length(col) == 1) col <- rep(col, n) if(length(pch) == 1) pch <- rep(pch, 1) if(length(lwd) == 1) lwd <- rep(lwd, n) if(length(etch) == 1) etch <- rep(etch, n) ## infer which arguments are parallelised other <- append(list(...), list(cex=cex, pch=pch)) isvec <- (lengths(other) == n) other.fixed <- other[!isvec] other.vec <- other[isvec] ## if(any(i <- as.logical(etch))) { anti.col <- complementarycolour(col) anti.lwd <- if(is.numeric(etch)) etch else 2 * lwd do.call.matched(points.default, resolve.defaults(list(x=x[i], y=y[i]), other.fixed, lapply(other.vec, "[", i=i), list(col=anti.col[i], lwd=anti.lwd[i])), extrargs=c("col", "pch", "type", "bg", "cex", "lwd", "lty")) } do.call.matched(points.default, resolve.defaults(list(x=x, y=y), other, list(col=col, lwd=lwd)), extrargs=c("col", "pch", "type", "bg", "cex", "lwd", "lty")) } return(max(cex %orifnull% 1)) } ## plot symbols likewise do.symbols <- function(x, y, ..., shape, size=cex, cex=NULL, fg=col, col=cols, cols=NULL, lwd=1, etch=FALSE, do.plot=TRUE) { if(do.plot) { ## zap tiny sizes tiny <- (size < (max(size)/1000)) size[tiny] <- 0 ## collect arguments n <- length(x) if(length(lwd) == 1) lwd <- rep(lwd, n) if(length(etch) == 1) etch <- rep(etch, n) if(length(fg) == 0) fg <- rep(par("col"), n) else if(length(fg) == 1) fg <- rep(fg, n) other <- resolve.defaults(list(...), list(add=TRUE, inches=FALSE)) ## infer which arguments are parallelised isvec <- (lengths(other) == n) other.fixed <- other[!isvec] other.vec <- other[isvec] ## if(any(as.logical(etch))) { anti.fg <- complementarycolour(fg) anti.lwd <- if(is.numeric(etch)) etch else 2 * lwd } ## plot if(any(i <- (shape == "circles") & as.logical(etch))) do.call.matched(symbols, c(list(x=x[i], y=y[i], circles=size[i]/2), other.fixed, lapply(other.vec, "[", i=i), list(lwd=anti.lwd[i], fg=anti.fg[i])), extrargs=c("lwd", "lty")) if(any(i <- (shape == "circles"))) do.call.matched(symbols, c(list(x=x[i], y=y[i], circles=size[i]/2), other.fixed, lapply(other.vec, "[", i=i), list(lwd=lwd[i], fg=fg[i])), extrargs=c("lwd", "lty")) if(any(i <- (shape == "squares") & as.logical(etch))) do.call.matched(symbols, c(list(x=x[i], y=y[i], squares=size[i]), other.fixed, lapply(other.vec, "[", i=i), list(lwd=anti.lwd[i], fg=anti.fg[i])), extrargs=c("lwd", "lty")) if(any(i <- (shape == "squares"))) do.call.matched(symbols, c(list(x=x[i], y=y[i], squares=size[i]), other.fixed, lapply(other.vec, "[", i=i), list(lwd=lwd[i], fg=fg[i])), extrargs=c("lwd", "lty")) if(any(i <- (shape == "arrows") & as.logical(etch))) do.call.matched(do.arrows, c(list(x=x[i], y=y[i], len=size[i]), other.fixed, lapply(other.vec, "[", i=i), list(lwd=anti.lwd[i], cols=anti.fg[i])), extrargs=c("cols", "col", "lwd", "lty")) if(any(i <- (shape == "arrows"))) do.call.matched(do.arrows, c(list(x=x[i], y=y[i], len=size[i]), other.fixed, lapply(other.vec, "[", i=i), list(lwd=lwd[i], cols=fg[i])), extrargs=c("cols", "col", "lwd", "lty")) } return(max(size)) } do.arrows <- function(x, y, len, direction=0, arrowtype=2, ..., headlength=len * 0.4, headangle=40, cols=col, col=par('fg'), lwd=1, lty=1) { #' vectorise all arguments df <- data.frame(x=x, y=y, len=len, direction=direction, arrowtype=arrowtype, headangle=headangle, cols=cols, lwd=lwd, lty=lty) with(df, { alpha <- direction * pi/180 dx <- len * cos(alpha)/2 dy <- len * sin(alpha)/2 x0 <- x - dx x1 <- x + dx y0 <- y - dy y1 <- y + dy segments(x0, y0, x1, y1, ..., col=cols, lty=lty, lwd=lwd) if(any(arrowtype != 0)) { halfangle <- (headangle/2) * pi/180 beta1 <- alpha + halfangle beta2 <- alpha - halfangle hx1 <- headlength * cos(beta1) hy1 <- headlength * sin(beta1) hx2 <- headlength * cos(beta2) hy2 <- headlength * sin(beta2) if(any(left <- (arrowtype %in% c(1,3)))) { segments(x0[left], y0[left], (x0 + hx1)[left], (y0 + hy1)[left], ..., col=cols[left], lwd=lwd[left], lty=lty[left]) segments(x0[left], y0[left], (x0 + hx2)[left], (y0 + hy2)[left], ..., col=cols[left], lwd=lwd[left], lty=lty[left]) } if(any(right <- (arrowtype %in% c(2,3)))) { segments(x1[right], y1[right], (x1 - hx1)[right], (y1 - hy1)[right], ..., col=cols[right], lwd=lwd[right], lty=lty[right]) segments(x1[right], y1[right], (x1 - hx2)[right], (y1 - hy2)[right], ..., col=cols[right], lwd=lwd[right], lty=lty[right]) } } }) return(invisible(NULL)) } ## main function invoke.symbolmap <- function(map, values, x=NULL, y=NULL, ..., add=FALSE, do.plot=TRUE, started = add && do.plot) { if(!inherits(map, "symbolmap")) stop("map should be an object of class 'symbolmap'") if(hasxy <- (!is.null(x) || !is.null(y))) { xy <- xy.coords(x, y) x <- xy$x y <- xy$y } ## function will return maximum size of symbols plotted. maxsize <- 0 if(do.plot && !add) plot(x, y, type="n", ...) force(values) g <- map(values) parnames <- colnames(g) if(do.plot) { xydf <- data.frame(x=x, y=y) if(nrow(xydf) == 0) return(invisible(maxsize)) g <- if(prod(dim(g)) == 0) xydf else do.call(data.frame, c(as.list(g), as.list(xydf), list(stringsAsFactors=FALSE))) } n <- nrow(g) ## figure out which function does the graphics job need.points <- any(c("pch", "chars") %in% parnames) need.symbols <- "shape" %in% parnames if(need.symbols && need.points) { worker <- with(g, ifelse(!is.na(shape), "symbols", "points")) } else if(need.symbols) { worker <- rep.int("symbols", n) } else { worker <- rep.int("points", n) } ## split data according to graphics function involved z <- split(g, factor(worker)) ## display using 'pch' zpoints <- z[["points"]] if(!is.null(zpoints) && nrow(zpoints) > 0) { ms <- do.call(do.points, resolve.defaults(as.list(zpoints), list(...), list(do.plot=do.plot))) ## value is max(cex) ## guess size of one character charsize <- if(started) max(par('cxy')) else if(hasxy) max(sidelengths(boundingbox(x,y))/40) else 1/40 maxsize <- max(maxsize, charsize * ms) } ## display using 'symbols' zsymbols <- z[["symbols"]] if(!is.null(zsymbols) && nrow(zsymbols) > 0) { ms <- do.call(do.symbols, resolve.defaults(as.list(zsymbols), list(...), list(do.plot=do.plot))) ## ms value is max physical size. maxsize <- max(maxsize, ms) } return(invisible(maxsize)) } invoke.symbolmap }) ## Display the symbol map itself (`legend' style) plot.symbolmap <- function(x, ..., main, xlim=NULL, ylim=NULL, vertical=FALSE, side=c("bottom", "left", "top", "right"), annotate=TRUE, labelmap=NULL, add=FALSE, nsymbols=NULL) { if(missing(main)) main <- short.deparse(substitute(x)) miss.side <- missing(side) side <- match.arg(side) type <- symbolmaptype(x) map <- x stuff <- attr(map, "stuff") if(type == "constant" && length(stuff$parlist) == 0) return(invisible(NULL)) if(is.null(labelmap)) { labelmap <- function(x) x } else if(type == "continuous" && is.numeric(labelmap) && length(labelmap) == 1) { labscal <- labelmap labelmap <- function(x) { x * labscal } } else stopifnot(is.function(labelmap)) ## determine the 'example' input values and their graphical representations switch(type, constant = { vv <- NULL }, continuous = { ra <- stuff$range if(is.null(ra)) stop("Cannot plot symbolmap with an infinite range") vv <- if(is.null(nsymbols)) prettyinside(ra) else prettyinside(ra, n = nsymbols) if(is.numeric(vv)) vv <- signif(vv, 4) }, discrete = { vv <- if(is.null(nsymbols)) prettydiscrete(stuff$inputs) else prettydiscrete(stuff$inputs, n = nsymbols) if(vertical) vv <- rev(vv) }) nn <- length(vv) ## gg <- map(vv) ll <- paste(labelmap(vv)) ## determine position of plot and symbols if(add) { ## x and y limits must respect existing plot space usr <- par('usr') if(is.null(xlim)) xlim <- usr[1:2] if(is.null(ylim)) ylim <- usr[3:4] } else { ## create new plot maxdiam <- invoke.symbolmap(map, vv, do.plot=FALSE, started=FALSE) zz <- c(0, max(1, maxdiam)) if(is.null(xlim) && is.null(ylim)) { if(vertical) { xlim <- zz ylim <- length(vv) * zz } else { xlim <- length(vv) * zz ylim <- zz } } else if(is.null(ylim)) { ylim <- zz } else if(is.null(xlim)) { xlim <- zz } } ## .......... initialise plot ............................... if(!add) do.call.matched(plot.default, resolve.defaults(list(x=xlim, y=ylim, type="n", main=main, axes=FALSE, xlab="", ylab="", asp=1.0), list(...))) ## maximum symbol diameter maxdiam <- invoke.symbolmap(map, vv, do.plot=FALSE, started=TRUE) ## .......... plot symbols .................... if(type == "constant") { xp <- mean(xlim) yp <- mean(ylim) } else if(vertical) { ## vertical arrangement xp <- rep(mean(xlim), nn) vskip <- 1.1 * max(maxdiam, 3 * max(strheight(labelmap(vv)))) if(diff(ylim) > nn * vskip) { yp <- (1:nn) * vskip yp <- yp - mean(yp) + mean(ylim) } else { z <- seq(ylim[1], ylim[2], length=nn+1) yp <- z[-1] - diff(z)/2 } } else { ## horizontal arrangement yp <- rep(mean(ylim), nn) hskip <- 1.1 * max(maxdiam, max(strwidth(labelmap(vv)))) if(diff(xlim) > nn * hskip) { xp <- (1:nn) * hskip xp <- xp - mean(xp) + mean(xlim) } else { z <- seq(xlim[1], xlim[2], length=nn+1) xp <- z[-1] - diff(z)/2 } } invoke.symbolmap(map, vv, xp, yp, ..., add=TRUE) ## ................. draw annotation .................. if(annotate && length(ll) > 0) { if(vertical) { ## default axis position is to the right if(miss.side) side <- "right" sidecode <- match(side, c("bottom", "left", "top", "right")) if(!(sidecode %in% c(2,4))) warning(paste("side =", sQuote(side), "is not consistent with vertical orientation")) pos <- c(ylim[1], xlim[1], ylim[2], xlim[2])[sidecode] ## draw axis do.call.matched(graphics::axis, resolve.defaults(list(...), list(side=sidecode, pos=pos, at=yp, labels=ll, tick=FALSE, las=1)), extrargs=graphicsPars("axis")) } else { ## default axis position is below if(miss.side) side <- "bottom" sidecode <- match(side, c("bottom", "left", "top", "right")) if(!(sidecode %in% c(1,3))) warning(paste("side =", sQuote(side), "is not consistent with horizontal orientation")) pos <- c(ylim[1], xlim[1], ylim[2], xlim[2])[sidecode] ## draw axis do.call.matched(graphics::axis, resolve.defaults(list(...), list(side = sidecode, pos = pos, at = xp, labels=ll, tick=FALSE)), extrargs=graphicsPars("axis")) } } return(invisible(NULL)) } plan.legend.layout <- function(B, ..., side=c("bottom", "left", "top", "right"), sep=NULL, size=NULL, sep.frac=0.05, size.frac=0.05, started=FALSE, map=NULL) { ## Determine size and position of a box containing legend or symbolmap ## attached to a plot in region 'B'. ## sep, size are absolute distances; ## sep.frac, size.frac are fractions of the maximum sidelength of B. side <- match.arg(side) B <- as.rectangle(B) Bsize <- max(sidelengths(B)) if(is.null(size)) { size <- size.frac * Bsize } else { check.1.real(size) stopifnot(size > 0) } if(is.null(sep)) { sep <- sep.frac * Bsize } else { check.1.real(sep) stopifnot(sep > 0) } if(is.null(map) || !inherits(map, "symbolmap")) { textlength <- 8 } else { vv <- with(attr(map, "stuff"), if(type == "discrete") inputs else prettyinside(range)) textlength <- max(nchar(paste(vv))) } if(started) { textwidth <- max(strwidth(vv)) textheight <- max(strheight(vv)) } else { ## the plot has not been initialised: guess character size charsize <- diff(if(side %in% c("left", "right")) B$yrange else B$xrange)/40 textwidth <- charsize * textlength textheight <- charsize } switch(side, right={ ## symbols to right of image b <- owin(B$xrange[2] + sep + c(0, size), B$yrange) ## text to right of symbols tt <- owin(b$xrange[2] + sep + c(0, textwidth), b$yrange) iside <- 4 }, left={ ## symbols to left of image b <- owin(B$xrange[1] - sep - c(size, 0), B$yrange) ## text to left of symbols tt <- owin(b$xrange[1] - sep - c(textwidth, 0), b$yrange) iside <- 2 }, top={ ## symbols above image b <- owin(B$xrange, B$yrange[2] + sep + c(0, size)) ## text above symbols tt <- owin(b$xrange, b$yrange[2] + 3* charsize + c(0, textheight)) iside <- 3 }, bottom={ ## symbols below image b <- owin(B$xrange, B$yrange[1] - sep - c(size, 0)) ## text below symbols tt <- owin(b$xrange, b$yrange[1] - 3 * charsize - c(textheight, 0)) iside <- 1 }) A <- boundingbox(B, b, tt) return(list(A=A, B=B, b=b, tt=tt, iside=iside, side=side, size=size, charsize=charsize, sep=sep)) } spatstat/R/aaaa.R0000644000176200001440000000250713115225157013332 0ustar liggesusers#' #' aaaa.R #' #' Code that must be read before the rest of the R code in spatstat #' #' $Revision: 1.4 $ $Date: 2014/12/10 10:34:53 $ #' ................................................................... #' intermaker: #' Class structure for functions like 'Strauss' #' so they print a nice description. #' intermaker <- function(f, blank) { # f is the creator function like 'Strauss' class(f) <- c("intermaker", class(f)) # blank is the prototype interaction object: extract some fields desired <- c("creator", "name", "par", "parnames", "pardesc") avail <- desired[desired %in% names(blank)] attr(f, "b") <- blank[avail] return(f) } print.intermaker <- function(x, ...) { b <- attr(x, "b") argh <- names(formals(x)) explain <- NULL if(length(argh) > 0) { desc <- b$pardesc %orifnull% b$parnames namep <- names(b$par) if(length(desc) == length(namep) && all(argh %in% namep)) { names(desc) <- namep explain <- paste(", where", commasep(paste(sQuote(argh), "is the", desc[argh]))) } } blah <- paste0("Function ", b$creator, paren(paste(argh, collapse=", ")), ": creates the interpoint interaction of the ", b$name, explain) splat(blah) return(invisible(NULL)) } spatstat/R/spatialcdf.R0000644000176200001440000000356513115225157014566 0ustar liggesusers## ## spatialcdf.R ## ## $Revision: 1.2 $ $Date: 2014/10/24 00:22:30 $ ## spatialcdf <- function(Z, weights=NULL, normalise=FALSE, ..., W=NULL, Zname=NULL) { Zdefaultname <- singlestring(short.deparse(substitute(Z))) if(is.character(Z) && length(Z) == 1) { if(is.null(Zname)) Zname <- Z switch(Zname, x={ Z <- function(x,y) { x } }, y={ Z <- function(x,y) { y } }, stop("Unrecognised covariate name") ) } if(is.null(Zname)) Zname <- Zdefaultname ## if(is.ppm(weights) || is.kppm(weights) || is.dppm(weights)) { Q <- quad.ppm(as.ppm(weights)) loc <- as.ppp(Q) df <- mpl.get.covariates(list(Z=Z), loc, covfunargs=list(...)) df$wt <- fitted(weights) * w.quad(Q) wtname <- if(normalise) "fraction of points" else "number of points" } else { if(is.null(W)) W <- as.owin(weights, fatal=FALSE) if(is.null(W)) W <- as.owin(Z, fatal=FALSE) if(is.null(W)) stop("No information specifying the spatial window") if(is.null(weights)) weights <- 1 M <- as.mask(W, ...) loc <- rasterxy.mask(M, drop=TRUE) df <- mpl.get.covariates(list(Z=Z, weights=weights), loc, covfunargs=list(...)) pixelarea <- with(unclass(M), xstep * ystep) df$wt <- rep(pixelarea, nrow(df)) wtname <- if(normalise) "fraction of weight" else "weight" } if(normalise) df$wt <- with(df, wt/sum(wt)) G <- with(df, ewcdf(Z, wt)) class(G) <- c("spatialcdf", class(G)) attr(G, "call") <- sys.call() attr(G, "Zname") <- Zname attr(G, "ylab") <- paste("Cumulative", wtname) return(G) } plot.spatialcdf <- function(x, ..., xlab, ylab) { if(missing(xlab) || is.null(xlab)) xlab <- attr(x, "Zname") if(missing(ylab) || is.null(ylab)) ylab <- attr(x, "ylab") plot.ecdf(x, ..., xlab=xlab, ylab=ylab) } spatstat/R/sparse3Darray.R0000644000176200001440000007632613131557170015205 0ustar liggesusers#' #' sparse3Darray.R #' #' Sparse 3D arrays represented as list(i,j,k,x) #' #' $Revision: 1.25 $ $Date: 2017/07/13 02:01:19 $ #' sparse3Darray <- function(i=integer(0), j=integer(0), k=integer(0), x=numeric(0), dims=c(max(i),max(j),max(k)), dimnames=NULL, strict=FALSE, nonzero=FALSE) { dat <- data.frame(i, j, k, x) if(typeof(x) == "complex") warn.once("sparse.complex", "complex-valued sparse 3D arrays are supported in spatstat,", "but complex-valued sparse matrices", "are not yet supported by the Matrix package") stopifnot(length(dims) == 3) dims <- as.integer(dims) if(!all(inside.range(i, c(1, dims[1])))) stop("indices i are outside range") if(!all(inside.range(j, c(1, dims[2])))) stop("indices j are outside range") if(!all(inside.range(k, c(1, dims[3])))) stop("indices k are outside range") if(!is.null(dimnames)) { stopifnot(is.list(dimnames)) stopifnot(length(dimnames) == 3) notnull <- !sapply(dimnames, is.null) dimnames[notnull] <- lapply(dimnames[notnull], as.character) } if(nonzero || strict) { #' drop zeroes ok <- (x != RelevantZero(x)) dat <- dat[ok, , drop=FALSE] } if(strict) { #' arrange in 'R order' dat <- dat[with(dat, order(k,j,i)), , drop=FALSE] #' duplicates will be adjacent dup <- with(dat, c(FALSE, diff(i) == 0 & diff(j) == 0 & diff(k) == 0)) if(any(dup)) { #' accumulate values at the same array location retain <- !dup newrow <- cumsum(retain) newx <- as(tapply(dat$x, newrow, sum), typeof(dat$x)) newdat <- dat[retain,,drop=FALSE] newdat$x <- newx dat <- newdat } } result <- append(as.list(dat), list(dim=dims, dimnames=dimnames)) class(result) <- "sparse3Darray" return(result) } as.sparse3Darray <- function(x, ...) { if(inherits(x, "sparse3Darray")) { y <- x } else if(inherits(x, c("matrix", "sparseMatrix"))) { z <- as(x, Class="TsparseMatrix") dn <- dimnames(x) dn <- if(is.null(dn)) NULL else c(dn, list(NULL)) one <- if(length(z@i) > 0) 1L else integer(0) y <- sparse3Darray(i=z@i + 1L, j=z@j + 1L, k=one, x=z@x, dims=c(dim(x), 1L), dimnames=dn) } else if(is.array(x)) { stopifnot(length(dim(x)) == 3) dimx <- dim(x) if(prod(dimx) == 0) { y <- sparse3Darray(, dims=dimx, dimnames=dimnames(x)) } else { ijk <- which(x != RelevantZero(x), arr.ind=TRUE) ijk <- cbind(as.data.frame(ijk), x[ijk]) y <- sparse3Darray(i=ijk[,1L], j=ijk[,2L], k=ijk[,3L], x=ijk[,4L], dims=dimx, dimnames=dimnames(x)) } } else if(inherits(x, "sparseVector")) { one <- if(length(x@i) > 0) 1L else integer(0) y <- sparse3Darray(i=x@i, j=one, k=one, x=x@x, dims=c(x@length, 1L, 1L)) } else if(is.null(dim(x)) && is.atomic(x)) { n <- length(x) dn <- names(x) if(!is.null(dn)) dn <- list(dn, NULL, NULL) one <- if(n > 0) 1L else integer(0) y <- sparse3Darray(i=seq_len(n), j=one, k=one, x=x, dims=c(n, 1L, 1L), dimnames=dn) } else if(is.list(x) && length(x) > 0) { n <- length(x) if(all(sapply(x, is.matrix))) { z <- Reduce(abind, x) y <- as.sparse3Darray(z) } else if(all(sapply(x, inherits, what="sparseMatrix"))) { dimlist <- unique(lapply(x, dim)) if(length(dimlist) > 1) stop("Dimensions of matrices do not match") dimx <- c(dimlist[[1L]], n) dnlist <- lapply(x, dimnames) isnul <- sapply(dnlist, is.null) dnlist <- unique(dnlist[!isnul]) if(length(dnlist) > 1) stop("Dimnames of matrices do not match") dn <- if(length(dnlist) == 0) NULL else c(dn[[1L]], NULL) for(k in seq_len(n)) { mk <- as(x[[k]], "TsparseMatrix") kvalue <- if(length(mk@i) > 0) k else integer(0) dfk <- data.frame(i=mk@i + 1L, j=mk@j + 1L, k=kvalue, x=mk@x) df <- if(k == 1) dfk else rbind(df, dfk) } y <- sparse3Darray(i=df$i, j=df$j, k=df$k, x=df$x, dims=dimx, dimnames=dn) } else { warning("I don't know how to convert a list to a sparse array") return(NULL) } } else { warning("I don't know how to convert x to a sparse array") return(NULL) } return(y) } dim.sparse3Darray <- function(x) { x$dim } "dim<-.sparse3Darray" <- function(x, value) { stopifnot(length(value) == 3) if(!all(inside.range(x$i, c(1, value[1])))) stop("indices i are outside new range") if(!all(inside.range(x$j, c(1, value[2])))) stop("indices j are outside new range") if(!all(inside.range(x$k, c(1, value[3])))) stop("indices k are outside new range") dimx <- dim(x) x$dim <- value if(!is.null(dimnames(x))) { dn <- dimnames(x) for(n in 1:3) { if(value[n] < dimx[n]) dn[[n]] <- dn[[n]][1:value[n]] else if(value[n] > dimx[n]) dn[n] <- list(NULL) } dimnames(x) <- dn } return(x) } dimnames.sparse3Darray <- function(x) { x$dimnames } "dimnames<-.sparse3Darray" <- function(x, value) { if(!is.list(value)) value <- list(value) if(length(value) == 1) value <- rep(value, 3) x$dimnames <- value return(x) } print.sparse3Darray <- function(x, ...) { dimx <- dim(x) cat("Sparse 3D array of dimensions", paste(dimx, collapse="x"), fill=TRUE) dn <- dimnames(x) %orifnull% rep(list(NULL), 3) d3 <- dimx[3] dn3 <- dn[[3]] %orifnull% as.character(seq_len(d3)) df <- data.frame(i=x$i, j=x$j, k=x$k, x=x$x) pieces <- split(df, factor(df$k, levels=1:d3)) dim2 <- dimx[1:2] dn2 <- dn[1:2] if(typeof(x$x) == "complex") { splat("\t[Complex-valued sparse matrices are not printable]") } else { for(k in seq_along(pieces)) { cat(paste0("\n\t[ , , ", dn3[k], "]\n\n")) Mi <- with(pieces[[k]], sparseMatrix(i=i, j=j, x=x, dims=dim2, dimnames=dn2)) stuff <- capture.output(eval(Mi)) #' Remove 'sparse Matrix' header blurb stuff <- stuff[-1] if(is.blank(stuff[1])) stuff <- stuff[-1] cat(stuff, sep="\n") } } return(invisible(NULL)) } aperm.sparse3Darray <- function(a, perm=NULL, resize=TRUE, ...) { if(is.null(perm)) return(a) stopifnot(length(perm) == 3) a <- unclass(a) a[c("i", "j", "k")] <- a[c("i", "j", "k")][perm] if(resize) { a$dim <- a$dim[perm] if(length(a$dimnames)==3) a$dimnames <- a$dimnames[perm] } class(a) <- c("sparse3Darray", class(a)) return(a) } as.array.sparse3Darray <- function(x, ...) { zerovalue <- vector(mode=typeof(x$x), length=1L) z <- array(zerovalue, dim=dim(x), dimnames=dimnames(x)) z[cbind(x$i,x$j,x$k)] <- x$x return(z) } "[.sparse3Darray" <- local({ Extract <- function(x, i,j,k, drop=TRUE, ...) { dimx <- dim(x) dn <- dimnames(x) %orifnull% rep(list(NULL), 3) if(!missing(i) && length(dim(i)) == 2) { ## matrix index i <- as.matrix(i) if(!(missing(j) && missing(k))) stop("If i is a matrix, j and k should not be given", call.=FALSE) if(ncol(i) != 3) stop("If i is a matrix, it should have 3 columns", call.=FALSE) ## start with vector of 'zero' answers of the correct type answer <- sparseVector(x=RelevantZero(x$x)[integer(0)], i=integer(0), length=nrow(i)) ## values outside array return NA if(any(bad <- !inside3Darray(dim(x), i))) answer[bad] <- NA ## if entire array is zero, there is nothing to match if(length(x$x) == 0) return(answer) ## match desired indices to sparse entries varies <- (dimx > 1) nvary <- sum(varies) varying <- which(varies) if(nvary == 3) { ## ---- older code ----- ## convert triples of integers to character codes #### icode <- apply(i, 1, paste, collapse=",") << is too slow >> ## icode <- paste(i[,1], i[,2], i[,3], sep=",") ## dcode <- paste(x$i, x$j, x$k, sep=",") ## ------------------ m <- matchIntegerDataFrames(i, cbind(x$i, x$j, x$k)) } else if(nvary == 2) { ## effectively a sparse matrix ## ---- older code ----- ## icode <- paste(i[,varying[1]], i[,varying[2]], sep=",") ## ijk <- cbind(x$i, x$j, x$k) ## dcode <- paste(ijk[,varying[1]], ijk[,varying[2]], sep=",") ## ------------------ ijk <- cbind(x$i, x$j, x$k) m <- matchIntegerDataFrames(i[,varying,drop=FALSE], ijk[,varying,drop=FALSE]) } else if(nvary == 1) { ## effectively a sparse vector ## ---- older code ----- ## icode <- i[,varying] ## dcode <- switch(varying, x$i, x$j, x$k) ## ------------------ m <- match(i[,varying], switch(varying, x$i, x$j, x$k)) } else { ## effectively a single value ## ---- older code ----- ## icode <- rep(1, nrow(i)) ## dcode <- 1 # since we know length(x$x) > 0 m <- 1 } ## insert any found elements found <- !is.na(m) answer[found] <- x$x[m[found]] return(answer) } if(!(missing(i) && missing(j) && missing(k))) { I <- grokIndexVector(if(missing(i)) NULL else i, dimx[1], dn[[1]]) J <- grokIndexVector(if(missing(j)) NULL else j, dimx[2], dn[[2]]) K <- grokIndexVector(if(missing(k)) NULL else k, dimx[3], dn[[3]]) IJK <- list(I,J,K) if(!all(sapply(lapply(IJK, getElement, name="full"), is.null))) { ## indices exceed array bounds; result is a full array containing NA's result <- as.array(x)[I$full$i, J$full$j, K$full$k, drop=drop] return(result) } IJK <- lapply(IJK, getElement, name="strict") I <- IJK[[1]] J <- IJK[[2]] K <- IJK[[3]] #' number of values to be returned along each margin newdims <- sapply(IJK, getElement, name="n") #' dimnames of return array newdn <- lapply(IJK, getElement, name="s") #' find all required data (not necessarily in required order) inI <- I$lo inJ <- J$lo inK <- K$lo df <- data.frame(i=x$i, j=x$j, k=x$k, x=x$x) use <- with(df, inI[i] & inJ[j] & inK[k]) df <- df[use, ,drop=FALSE] #' contract sub-array to (1:n) * (1:m) * (1:l) df <- transform(df, i = cumsum(inI)[i], j = cumsum(inJ)[j], k = cumsum(inK)[k]) Imap <- I$map Jmap <- J$map Kmap <- K$map if(nrow(df) == 0 || (is.null(Imap) && is.null(Jmap) && is.null(Kmap))) { ## return values are already in correct position outdf <- df } else { #' invert map to determine output positions (reorder/repeat entries) imap <- Imap %orifnull% df$i jmap <- Jmap %orifnull% df$j kmap <- Kmap %orifnull% df$k sn <- seq_len(nrow(df)) whichi <- split(seq_along(imap), factor(imap, levels=sn)) whichj <- split(seq_along(jmap), factor(jmap, levels=sn)) whichk <- split(seq_along(kmap), factor(kmap, levels=sn)) dat.i <- whichi[df$i] dat.j <- whichj[df$j] dat.k <- whichk[df$k] stuff <- mapply(expandwithdata, i=dat.i, j=dat.j, k=dat.k, x=df$x) outdf <- rbindCompatibleDataFrames(stuff) } x <- sparse3Darray(i=outdf$i, j=outdf$j, k=outdf$k, x=outdf$x, dims=newdims, dimnames=newdn) dimx <- newdims dn <- newdn } if(drop) { retain <- (dimx > 1) nretain <- sum(retain) if(nretain == 2) { #' result is a matrix retained <- which(retain) newi <- getElement(x, name=c("i","j","k")[ retained[1] ]) newj <- getElement(x, name=c("i","j","k")[ retained[2] ]) newdim <- dimx[retain] newdn <- dn[retain] return(sparseMatrix(i=newi, j=newj, x=x$x, dims=newdim, dimnames=newdn)) } else if(nretain == 1) { #' sparse vector retained <- which(retain) newi <- getElement(x, name=c("i","j","k")[retained]) #' ensure 'strict' ord <- order(newi) newi <- newi[ord] newx <- x$x[ord] if(any(dup <- c(FALSE, diff(newi) == 0))) { retain <- !dup ii <- cumsum(retain) newi <- newi[retain] newx <- as(tapply(newx, ii, sum), typeof(newx)) } x <- sparseVector(x=newx, i=newi, length=dimx[retained]) } else if(nretain == 0) { #' single value x <- as.vector(as.array(x)) } } return(x) } expandwithdata <- function(i, j, k, x) { z <- expand.grid(i=i, j=j, k=k) if(nrow(z) > 0) z$x <- x return(z) } Extract }) rbindCompatibleDataFrames <- function(x) { #' faster version of Reduce(rbind, x) when entries are known to be compatible nama2 <- colnames(x[[1]]) y <- vector(mode="list", length=length(nama2)) names(y) <- nama2 for(nam in nama2) y[[nam]] <- unlist(lapply(x, getElement, name=nam)) return(as.data.frame(y)) } "[<-.sparse3Darray" <- function(x, i, j, k, ..., value) { dimx <- dim(x) dn <- dimnames(x) %orifnull% rep(list(NULL), 3) #' interpret indices if(!missing(i) && length(dim(i)) == 2) { ## matrix index ijk <- as.matrix(i) if(!(missing(j) && missing(k))) stop("If i is a matrix, j and k should not be given", call.=FALSE) if(ncol(ijk) != 3) stop("If i is a matrix, it should have 3 columns", call.=FALSE) if(!all(inside3Darray(dimx, i))) stop("Some indices lie outside array limits", call.=FALSE) if(nrow(ijk) == 0) return(x) # no items to replace ## assemble data frame xdata <- data.frame(i=x$i, j=x$j, k=x$k, x=x$x) ## match xdata into ijk (not necessarily the first match in original order) m <- matchIntegerDataFrames(xdata[,1:3,drop=FALSE], ijk) ## ------- OLDER VERSION: -------- ## convert triples of integers to character codes ## icode <- apply(ijk, 1, paste, collapse=",") << is too slow >> ## icode <- paste(ijk[,1], ijk[,2], ijk[,3], sep=",") ## xcode <- paste(x$i, x$j, x$k, sep=",") ## m <- match(xcode, icode) ## ------------------------------- ## remove any matches, retaining only data that do not match 'i' xdata <- xdata[is.na(m), , drop=FALSE] # sic ## ensure replacement value is vector-like value <- as.vector(value) nv <- length(value) if(nv != nrow(i) && nv != 1) stop(paste("Number of items to replace", paren(nrow(i)), "does not match number of items given", paren(nv)), call.=FALSE) vdata <- data.frame(i=ijk[,1], j=ijk[,2], k=ijk[,3], x=value) ## combine ydata <- rbind(xdata, vdata) y <- with(ydata, sparse3Darray(i=i,j=j,k=k,x=x, dims=dimx, dimnames=dn, strict=TRUE)) return(y) } I <- grokIndexVector(if(missing(i)) NULL else i, dimx[1], dn[[1]]) J <- grokIndexVector(if(missing(j)) NULL else j, dimx[2], dn[[2]]) K <- grokIndexVector(if(missing(k)) NULL else k, dimx[3], dn[[3]]) IJK <- list(I,J,K) if(!all(sapply(lapply(IJK, getElement, name="full"), is.null))) { warning("indices exceed array bounds; using full array", call.=FALSE) x <- as.array(x) x[I$full$i, J$full$j, K$full$k] <- value x <- as.sparse3Darray(x) return(x) } IJK <- lapply(IJK, getElement, name="strict") if(all(sapply(IJK, getElement, name="nind") == 0)) { # no elements are indexed return(x) } I <- IJK[[1]] J <- IJK[[2]] K <- IJK[[3]] #' extract current array entries xdata <- data.frame(i=x$i, j=x$j, k=x$k, x=x$x) #' identify data volume that will be overwritten inI <- I$lo inJ <- J$lo inK <- K$lo #' remove data that will be overwritten retain <- !with(xdata, inI[i] & inJ[j] & inK[k]) xdata <- xdata[retain,,drop=FALSE] #' expected dimensions of 'value' implied by indices dimVshould <- sapply(IJK, getElement, name="nind") dimV <- dim(value) if(length(dimV) == 3) { #' both source and destination are 3D if(all(dimVshould == dimV)) { #' replace 3D block by 3D block of same dimensions value <- as.sparse3Darray(value) vdata <- data.frame(i=value$i, j=value$j, k=value$k, x=value$x) # determine positions of replacement data in original array vdata <- transform(vdata, i=replacementIndex(i, I), j=replacementIndex(j, J), k=replacementIndex(k, K)) } else stop(paste("Replacement value has wrong dimensions:", paste(dimV, collapse="x"), "instead of", paste(dimVshould, collapse="x")), call.=FALSE) } else if(is.null(dimV)) { #' replacement value is a vector or sparseVector value <- as(value, "sparseVector") iv <- value@i xv <- value@x nv <- value@length collapsing <- (dimVshould == 1) realdim <- sum(!collapsing) if(nv == 1) { #' replacement value is a constant value <- as.vector(value[1]) if(identical(value, RelevantZero(x$x))) { #' assignment causes relevant entries to be set to zero; #' these entries have already been deleted from 'xdata'; #' nothing to add vdata <- data.frame(i=integer(0), j=integer(0), k=integer(0), x=x$x[integer(0)]) } else { #' replicate the constant vdata <- expand.grid(i=I$i, j=J$i, k=K$i, x=as.vector(value[1])) } } else if(realdim == 0) { stop(paste("Replacement value has too many entries:", nv, "instead of 1"), call.=FALSE) } else if(realdim == 1) { theindex <- which(!collapsing) # target slice is one-dimensional if(nv != dimVshould[theindex]) stop(paste("Replacement value has wrong number of entries:", nv, "instead of", dimVshould[theindex]), call.=FALSE) newpos <- replacementIndex(iv, IJK[[theindex]]) vdata <- switch(theindex, data.frame(i=newpos, j=J$i, k=K$i, x=xv), data.frame(i=I$i, j=newpos, k=K$i, x=xv), data.frame(i=I$i, j=J$i, k=newpos, x=xv)) } else { # target slice is two-dimensional sdim <- dimVshould[!collapsing] sd1 <- sdim[1] sd2 <- sdim[2] if(nv != sd1) stop(paste("Length of replacement vector", paren(nv), "does not match dimensions of array subset", paren(paste(dimVshould, collapse="x"))), call.=FALSE) firstindex <- which(!collapsing)[1] secondindex <- which(!collapsing)[2] pos1 <- replacementIndex(iv, IJK[[firstindex]]) pos2 <- replacementIndex(seq_len(sd2), IJK[[secondindex]]) xv <- rep(xv, sd2) pos1 <- rep(pos1, sd2) pos2 <- rep(pos2, each=length(pos1)) pos3 <- if(length(pos1)) IJK[[which(collapsing)]]$i else integer(0) vdata <- data.frame(i=pos3, j=pos3, k=pos3, x=xv) vdata[,firstindex] <- pos1 vdata[,secondindex] <- pos2 } } else if(identical(dimVshould[dimVshould > 1], dimV[dimV > 1])) { #' lower dimensional sets of the same dimension value <- value[drop=TRUE] dimV <- dim(value) dropping <- (dimVshould == 1) if(length(dimV) == 2) { value <- as(value, "TsparseMatrix") iv <- value@i + 1L jv <- value@j + 1L xv <- value@x firstindex <- which(!dropping)[1] secondindex <- which(!dropping)[2] pos1 <- replacementIndex(iv, IJK[[firstindex]]) pos2 <- replacementIndex(jv, IJK[[secondindex]]) pos3 <- if(length(pos1)) IJK[[which(dropping)]]$i else integer(0) vdata <- data.frame(i=pos3, j=pos3, k=pos3, x=xv) vdata[,firstindex] <- pos1 vdata[,secondindex] <- pos2 } else { value <- as(value, "sparseVector") iv <- value@i xv <- value@x vdata <- data.frame(i=if(dropping[1]) I$i else replacementIndex(iv, I), j=if(dropping[2]) J$i else replacementIndex(iv, J), k=if(dropping[3]) K$i else replacementIndex(iv, K), x=xv) } } else stop(paste("Replacement value has wrong dimensions:", paste(dimV, collapse="x"), "instead of", paste(dimVshould, collapse="x")), call.=FALSE) ## combine if(nrow(vdata) > 0) xdata <- rbind(xdata, vdata) y <- with(xdata, sparse3Darray(i=i,j=j,k=k,x=x, dims=dimx, dimnames=dn, strict=TRUE)) return(y) } bind.sparse3Darray <- function(A,B,along) { A <- as.sparse3Darray(A) B <- as.sparse3Darray(B) check.1.integer(along) stopifnot(along %in% 1:3) dimA <- dim(A) dimB <- dim(B) if(!all(dimA[-along] == dimB[-along])) stop("dimensions of A and B do not match") dimC <- dimA dimC[along] <- dimA[along] + dimB[along] # extract data Adf <- SparseEntries(A) Bdf <- SparseEntries(B) # realign 'B' coordinate Bdf[,along] <- Bdf[,along] + dimA[along] # combine C <- EntriesToSparse(rbind(Adf, Bdf), dimC) # add dimnames dnA <- dimnames(A) dnB <- dimnames(B) if(!is.null(dnA) || !is.null(dnB)) { if(length(dnA) != 3) dnA <- rep(list(NULL), 3) if(length(dnB) != 3) dnB <- rep(list(NULL), 3) dnC <- dnA dnC[[along]] <- c(dnA[[along]] %orifnull% rep("", dimA[along]), dnB[[along]] %orifnull% rep("", dimB[along])) dimnames(C) <- dnC } return(C) } anyNA.sparse3Darray <- function(x, recursive=FALSE) { anyNA(x$x) } RelevantZero <- function(x) vector(mode=typeof(x), length=1L) isRelevantZero <- function(x) identical(x, RelevantZero(x)) RelevantEmpty <- function(x) vector(mode=typeof(x), length=0L) unionOfSparseIndices <- function(A, B) { #' A, B are data frames of indices i, j, k ijk <- unique(rbind(A, B)) colnames(ijk) <- c("i", "j", "k") return(ijk) } Ops.sparse3Darray <- function(e1,e2=NULL){ if(nargs() == 1L) { switch(.Generic, "!" = { result <- do.call(.Generic, list(as.array(e1))) }, "-" = , "+" = { result <- e1 result$x <- do.call(.Generic, list(e1$x)) }, stop(paste("Unary", sQuote(.Generic), "is undefined for sparse 3D arrays."), call.=FALSE)) return(result) } # binary operation # Decide whether full or sparse elist <- list(e1, e2) isfull <- sapply(elist, inherits, what=c("matrix", "array")) if(any(isfull) && any(sapply(lapply(elist[isfull], dim), prod) > 1)) { # full array n1 <- length(dim(e1)) n2 <- length(dim(e2)) e1 <- if(n1 == 3) as.array(e1) else if(n1 == 2) as.matrix(e1) else as.vector(as.matrix(as.array(e1))) e2 <- if(n2 == 3) as.array(e2) else if(n2 == 2) as.matrix(e2) else as.vector(as.matrix(as.array(e2))) result <- do.call(.Generic, list(e1, e2)) return(result) } # sparse result (usually) e1 <- as.sparse3Darray(e1) e2 <- as.sparse3Darray(e2) dim1 <- dim(e1) dim2 <- dim(e2) mode1 <- typeof(e1$x) mode2 <- typeof(e2$x) zero1 <- vector(mode=mode1, length=1L) zero2 <- vector(mode=mode2, length=1L) if(prod(dim1) == 1) { ## e1 is constant e1 <- as.vector(as.array(e1)) z12 <- do.call(.Generic, list(e1, zero2)) if(!isRelevantZero(z12)) { # full matrix/array will be generated result <- do.call(.Generic, list(e1, as.array(e2)[drop=TRUE])) } else { # sparse result <- e2 result$x <- do.call(.Generic, list(e1, e2$x)) } return(result) } if(prod(dim2) == 1) { ## e2 is constant e2 <- as.vector(as.array(e2)) z12 <- do.call(.Generic, list(zero1, e2)) if(!isRelevantZero(z12)) { # full matrix/array will be generated result <- do.call(.Generic, list(as.array(e1)[drop=TRUE], e2)) } else { # sparse result <- e1 result$x <- do.call(.Generic, list(e1$x, e2)) } return(result) } z12 <- do.call(.Generic, list(zero1, zero2)) if(!isRelevantZero(z12)) { #' Result is an array e1 <- as.array(e1) e2 <- as.array(e2) result <- do.call(.Generic, list(e1, e2)) return(result) } # Result is sparse if(identical(dim1, dim2)) { #' extents are identical ijk1 <- SparseIndices(e1) ijk2 <- SparseIndices(e2) if(identical(ijk1, ijk2)) { #' patterns of nonzero entries are identical ijk <- ijk1 values <- do.call(.Generic, list(e1$x, e2$x)) } else { #' different patterns of nonzero entries ijk <- unionOfSparseIndices(ijk1, ijk2) values <- as.vector(do.call(.Generic, list(e1[ijk], e2[ijk]))) } dn <- dimnames(e1) %orifnull% dimnames(e2) result <- sparse3Darray(i=ijk$i, j=ijk$j, k=ijk$k, x=values, dims=dim1, dimnames=dn, strict=TRUE) return(result) } drop1 <- (dim1 == 1) drop2 <- (dim2 == 1) if(!any(drop1 & !drop2) && identical(dim1[!drop2], dim2[!drop2])) { #' dim2 is a slice of dim1 ijk1 <- data.frame(i=e1$i, j=e1$j, k=e1$k) ijk2 <- data.frame(i=e2$i, j=e2$j, k=e2$k) expanding <- which(drop2 & !drop1) if(length(expanding) == 1) { n <- dim1[expanding] m <- nrow(ijk2) ijk2 <- as.data.frame(lapply(ijk2, rep, times=n)) ijk2[,expanding] <- rep(seq_len(n), each=m) ijk <- unionOfSparseIndices(ijk1, ijk2) ijkdrop <- ijk if(nrow(ijkdrop) > 0) ijkdrop[,expanding] <- 1 xout <- do.call(.Generic, list(e1[ijk], e2[ijkdrop])) result <- sparse3Darray(i=ijk[,1L], j=ijk[,2L], k=ijk[,3L], x=as.vector(xout), dims=dim1, dimnames=dimnames(e1), strict=TRUE) return(result) } } if(!any(drop2 & !drop1) && identical(dim2[!drop1], dim1[!drop1])) { #' dim1 is a slice of dim2 ijk1 <- data.frame(i=e1$i, j=e1$j, k=e1$k) ijk2 <- data.frame(i=e2$i, j=e2$j, k=e2$k) expanding <- which(drop1 & !drop2) if(length(expanding) == 1) { n <- dim2[expanding] m <- nrow(ijk1) ijk1 <- as.data.frame(lapply(ijk1, rep, times=n)) ijk1[,expanding] <- rep(seq_len(n), each=m) ijk <- unionOfSparseIndices(ijk1, ijk2) ijkdrop <- ijk if(nrow(ijkdrop) > 0) ijkdrop[,expanding] <- 1L xout <- do.call(.Generic, list(e1[ijkdrop], e2[ijk])) result <- sparse3Darray(i=ijk[,1L], j=ijk[,2L], k=ijk[,3L], x=as.vector(xout), dims=dim2, dimnames=dimnames(e2), strict=TRUE) return(result) } } if(all(drop1[-1]) && dim1[1L] == dim2[1L]) { #' e1 is a (sparse) vector matching the first extent of e2 if(.Generic %in% c("*", "&")) { # result is sparse ijk <- data.frame(i=e2$i, j=e2$j, k=e2$k) ones <- rep(1L, nrow(ijk)) i11 <- data.frame(i=e2$i, j=ones, k=ones) xout <- do.call(.Generic, list(e1[i11], e2[ijk])) result <- sparse3Darray(i=ijk[,1L], j=ijk[,2L], k=ijk[,3L], x=as.vector(xout), dims=dim2, dimnames=dimnames(e2), strict=TRUE) } else { # result is full array e1 <- as.array(e1)[,,,drop=TRUE] e2 <- as.array(e2) result <- do.call(.Generic, list(e1, e2)) } return(result) } stop(paste("Non-conformable arrays:", paste(dim1, collapse="x"), "and", paste(dim2, collapse="x")), call.=FALSE) } Math.sparse3Darray <- function(x, ...){ z <- RelevantZero(x$x) fz <- do.call(.Generic, list(z)) if(!isRelevantZero(fz)) { # result is a full array result <- do.call(.Generic, list(as.array(x), ...)) return(result) } x$x <- do.call(.Generic, list(x$x)) return(x) } Summary.sparse3Darray <- function(..., na.rm=FALSE) { argh <- list(...) is3D <- sapply(argh, inherits, what="sparse3Darray") if(any(is3D)) { xvalues <- lapply(argh[is3D], getElement, name="x") argh[is3D] <- lapply(xvalues, .Generic, na.rm=na.rm) zeroes <- lapply(xvalues, RelevantZero) fzeroes <- lapply(zeroes, .Generic, na.rm=na.rm) argh <- append(argh, fzeroes) } rslt <- do.call(.Generic, append(argh, list(na.rm=na.rm))) return(rslt) } SparseIndices <- function(x) { #' extract indices of entries of sparse vector/matrix/array nd <- length(dim(x)) if(nd > 3) stop("Arrays of more than 3 dimensions are not supported", call.=FALSE) if(nd == 0 || nd == 1) { x <- as(x, "sparseVector") df <- data.frame(i=x@i) } else if(nd == 2) { x <- as(x, "TsparseMatrix") df <- data.frame(i=x@i + 1L, j=x@j + 1L) } else if(nd == 3) { x <- as.sparse3Darray(x) df <- data.frame(i=x$i, j=x$j, k=x$k) } return(df) } SparseEntries <- function(x) { #' extract entries of sparse vector/matrix/array nd <- length(dim(x)) if(nd > 3) stop("Arrays of more than 3 dimensions are not supported", call.=FALSE) if(nd == 0 || nd == 1) { x <- as(x, "sparseVector") df <- data.frame(i=x@i, x=x@x) } else if(nd == 2) { x <- as(x, "TsparseMatrix") df <- data.frame(i=x@i + 1L, j=x@j + 1L, x=x@x) } else if(nd == 3) { x <- as.sparse3Darray(x) df <- data.frame(i=x$i, j=x$j, k=x$k, x=x$x) } return(df) } EntriesToSparse <- function(df, dims) { #' convert data frame of indices and values #' to sparse vector/matrix/array nd <- length(dims) if(nd == 0) return(with(df, as(sum(x), typeof(x)))) sn <- seq_len(nd) colnames(df)[sn] <- c("i","j","k")[sn] if(nd == 1) { #' sparse vector: duplicate entries not allowed df <- df[with(df, order(i)), , drop=FALSE] dup <- c(FALSE, with(df, diff(i) == 0)) if(any(dup)) { #' accumulate values at the same array location first <- !dup newi <- cumsum(first) newx <- as(tapply(df$x, newi, sum), typeof(df$x)) df <- data.frame(i=newi[first], x=newx) } result <- with(df, sparseVector(i=i, x=x, length=dims)) } else if(nd == 2) { result <- with(df, sparseMatrix(i=i, j=j, x=x, dims=dims)) } else if(nd == 3) { result <- with(df, sparse3Darray(i=i, j=j, k=k, x=x, dims=dims)) } return(result) } evalSparse3Dentrywise <- function(expr, envir) { ## DANGER: this assumes all sparse arrays in the expression ## have the same pattern of nonzero elements! e <- as.expression(substitute(expr)) ## get names of all variables in the expression varnames <- all.vars(e) allnames <- all.names(e, unique=TRUE) funnames <- allnames[!(allnames %in% varnames)] if(length(varnames) == 0) stop("No variables in this expression") ## get the values of the variables if(missing(envir)) { envir <- parent.frame() # WAS: sys.parent() } else if(is.list(envir)) { envir <- list2env(envir, parent=parent.frame()) } vars <- mget(varnames, envir=envir, inherits=TRUE, ifnotfound=list(NULL)) funs <- mget(funnames, envir=envir, inherits=TRUE, ifnotfound=list(NULL)) ## find out which variables are sparse3Darray isSpud <- sapply(vars, inherits, what="sparse3Darray") if(!any(isSpud)) stop("No sparse 3D arrays in this expression") spuds <- vars[isSpud] template <- spuds[[1L]] ## replace each array by its entries, and evaluate spudvalues <- lapply(spuds, getElement, name="x") ## minimal safety check if(length(unique(lengths(spudvalues))) > 1) stop("Different numbers of sparse entries", call.=FALSE) vars[isSpud] <- spudvalues v <- eval(e, append(vars, funs)) ## reshape as 3D array result <- sparse3Darray(x=v, i=template$i, j=template$j, k=template$k, dims=dim(template), dimnames=dimnames(template)) return(result) } spatstat/R/clickpoly.R0000755000176200001440000000417413115271075014445 0ustar liggesusers# # clickpoly.R # # # $Revision: 1.10 $ $Date: 2015/10/21 09:06:57 $ # # clickpoly <- function(add=FALSE, nv=NULL, np=1, ...) { if((!add) | dev.cur() == 1L) { plot(0,0,type="n", xlab="", ylab="", xlim=c(0,1), ylim=c(0,1), asp=1.0, axes=FALSE) rect(0,0,1,1) } spatstatLocator(0) ## check locator is enabled gon <- list() stopifnot(np >= 1) # for(i in 1:np) { if(np > 1) cat(paste(".... Polygon number", i, ".....\n")) if(!is.null(nv)) cat(paste("click", nv, "times in window\n")) else cat(paste("to add points: click left mouse button in window\n", " to exit: press ESC or click middle mouse button\n", "[The last point should NOT repeat the first point]\n")) xy <- do.call(spatstatLocator, resolve.defaults(if(!is.null(nv)) list(n=nv) else list(), list(...), list(type="o"))) if(Area.xypolygon(xy) < 0) xy <- lapply(xy, rev) gon[[i]] <- xy plotPolygonBdry(owin(poly=xy), ...) } result <- owin(poly=gon) plotPolygonBdry(result, ...) return(result) } clickbox <- function(add=TRUE, ...) { spatstatLocator(0) # check locator enabled cat("Click two corners of a box\n") if(!add) plot(owin(), main="Click two corners of a box") a <- try(spatstatLocator(1), silent=TRUE) if(inherits(a, "try-error")) { ## add=TRUE but there is no current plot plot.new() a <- spatstatLocator(1, ...) } abline(v=a$x) abline(h=a$y) b <- spatstatLocator(1, ...) abline(v=b$x) abline(h=b$y) ab <- concatxy(a, b) result <- owin(range(ab$x), range(ab$y)) plotPolygonBdry(result, ...) return(result) } plotPolygonBdry <- function(x, ...) { # filter appropriate arguments argh <- list(...) polyPars <- union(graphicsPars("lines"), graphicsPars("owin")) polyargs <- argh[names(argh) %in% polyPars] # change 'col' to 'border' nama <- names(polyargs) if(any(nama == "col") && !any(nama == "border")) names(polyargs)[nama == "col"] <- "border" # plot do.call(plot.owin, append(list(x=x, add=TRUE), polyargs)) } spatstat/R/quadscheme.R0000755000176200001440000002351713115271120014564 0ustar liggesusers# # # quadscheme.S # # $Revision: 4.35 $ $Date: 2016/02/11 10:17:12 $ # # quadscheme() generate a quadrature scheme from # data and dummy point patterns. # # quadscheme.spatial() case where both patterns are unmarked # # quadscheme.replicated() case where data are multitype # # #--------------------------------------------------------------------- quadscheme <- function(data, dummy, method="grid", ...) { # # generate a quadrature scheme from data and dummy patterns. # # Other arguments control how the quadrature weights are computed # data <- as.ppp(data) if(missing(dummy)) { # create dummy points dummy <- default.dummy(data, method=method, ...) # extract full set of parameters used to create dummy points dp <- attr(dummy, "dummy.parameters") # extract recommended parameters for computing weights wp <- attr(dummy, "weight.parameters") } else { # user-supplied dummy points if(!is.ppp(dummy)) { # convert to ppp object dummy <- as.ppp(dummy, data$window, check=FALSE) # confine dummy points to data window dummy <- dummy[data$window] wp <- dp <- list() } else { # if it's already a ppp, it may have been created by default.dummy dp <- attr(dummy, "dummy.parameters") wp <- attr(dummy, "weight.parameters") } } # arguments supplied directly to quadscheme() # override any arguments passed as attributes wp <- resolve.defaults(list(method=method), list(...), wp) mX <- is.marked(data) mD <- is.marked(dummy) if(!mX && !mD) Q <- do.call(quadscheme.spatial, append(list(data, dummy, check=FALSE), wp)) else if(mX && !mD) Q <- do.call(quadscheme.replicated, append(list(data, dummy, check=FALSE), wp)) else if(!mX && mD) stop("dummy points are marked but data are unmarked") else stop("marked data and marked dummy points -- sorry, this case is not implemented") # record parameters used to make dummy points Q$param$dummy <- dp return(Q) } quadscheme.spatial <- function(data, dummy, method=c("grid", "dirichlet"), ...) { # # generate a quadrature scheme from data and dummy patterns. # # The 'method' may be "grid" or "dirichlet" # # '...' are passed to gridweights() or dirichletWeights() # # quadscheme.spatial: # for unmarked point patterns. # # weights are determined only by spatial locations # (i.e. weight computations ignore any marks) # # No two points should have the same spatial location # check <- resolve.defaults(list(...), list(check=TRUE))$check method <- match.arg(method) data <- as.ppp(data, check=check) dummy <- as.ppp(dummy, data$window, check=check) # note data$window is the DEFAULT quadrature window # applicable when 'dummy' does not contain a window if(is.marked(data, dfok=TRUE)) warning("marks in data pattern - ignored") if(is.marked(dummy, dfok=TRUE)) warning("marks in dummy pattern - ignored") both <- as.ppp(concatxy(data, dummy), dummy$window, check=check) switch(method, grid={ w <- gridweights(both, window= dummy$window, ...) }, dirichlet = { w <- dirichletWeights(both, window=dummy$window, ...) }, { stop(paste("unrecognised method", sQuote(method))) } ) # parameters actually used to make weights wp <- attr(w, "weight.parameters") param <- list(weight = wp, dummy = NULL) Q <- quad(data, dummy, w, param) return(Q) } "quadscheme.replicated" <- function(data, dummy, method=c("grid", "dirichlet"), ...) { ## ## generate a quadrature scheme from data and dummy patterns. ## ## The 'method' may be "grid" or "dirichlet" ## ## '...' are passed to gridweights() or dirichletWeights() ## ## quadscheme.replicated: ## for multitype point patterns. ## ## No two points in 'data'+'dummy' should have the same spatial location check <- resolve.defaults(list(...), list(check=TRUE))$check method <- match.arg(method) data <- as.ppp(data, check=check) dummy <- as.ppp(dummy, data$window, check=check) ## note data$window is the DEFAULT quadrature window ## unless otherwise specified in 'dummy' ndata <- data$n ndummy <- dummy$n if(!is.marked(data)) stop("data pattern does not have marks") if(is.marked(dummy, dfok=TRUE) && npoints(dummy) > 0) warning("dummy points have marks --- ignored") ## first, ignore marks and compute spatial weights P <- quadscheme.spatial(unmark(data), dummy, method, ...) W <- w.quad(P) iz <- is.data(P) Wdat <- W[iz] Wdum <- W[!iz] ## find the set of all possible marks if(!is.multitype(data)) stop("data pattern is not multitype") data.marks <- marks(data) markset <- levels(data.marks) nmarks <- length(markset) ## replicate dummy points, one copy for each possible mark ## -> dummy x {1,..,K} dumdum <- cartesian(dummy, markset) Wdumdum <- rep.int(Wdum, nmarks) Idumdum <- rep.int(ndata + seq_len(ndummy), nmarks) ## also make dummy marked points at same locations as data points ## but with different marks dumdat <- cartesian(unmark(data), markset) Wdumdat <- rep.int(Wdat, nmarks) Mdumdat <- marks(dumdat) Idumdat <- rep.int(1:ndata, nmarks) Mrepdat <- rep.int(data.marks, nmarks) ok <- (Mdumdat != Mrepdat) dumdat <- dumdat[ok,] Wdumdat <- Wdumdat[ok] Idumdat <- Idumdat[ok] ## combine the two dummy patterns dumb <- superimpose(dumdum, dumdat, W=dummy$window, check=FALSE) Wdumb <- c(Wdumdum, Wdumdat) Idumb <- c(Idumdum, Idumdat) ## record the quadrature parameters param <- list(weight = P$param$weight, dummy = NULL, sourceid=c(1:ndata, Idumb)) ## wrap up Q <- quad(data, dumb, c(Wdat, Wdumb), param) return(Q) } "cartesian" <- function(pp, markset, fac=TRUE) { ## given an unmarked point pattern 'pp' ## and a finite set of marks, ## create the marked point pattern which is ## the Cartesian product, consisting of all pairs (u,k) ## where u is a point of 'pp' and k is a mark in 'markset' nmarks <- length(markset) result <- ppp(rep.int(pp$x, nmarks), rep.int(pp$y, nmarks), window=pp$window, check=FALSE) marx <- rep.int(markset, rep.int(pp$n, nmarks)) if(fac) marx <- factor(marx, levels=markset) marks(result) <- marx return(result) } validate.quad <- function(Q, fatal=FALSE, repair=TRUE, announce=FALSE) { X <- Q$data D <- Q$dummy mX <- is.marked(X) mD <- is.marked(D) nbg <- function(whinge, fatal=FALSE, announce=FALSE) { if(fatal) stop(whinge, call.=FALSE) else { if(announce) warning(whinge, call.=FALSE) return(FALSE) } } if(mX != mD) { whinge <- if(mX) "data points are marked, but dummy points are not" else "dummy points are marked, but data points are not" return(nbg(whinge, fatal, announce)) } if(!mX) return(TRUE) # marked points fX <- is.factor(Xmarx <- marks(X)) fD <- is.factor(Dmarx <- marks(D)) if(fX != fD) { whinge <- if(fX) "data points are multitype, but dummy points are not" else "dummy points are multitype, but data points are not" return(nbg(whinge, fatal, announce)) } if(!fX) return(TRUE) # multitype points lX <- levels(Xmarx) lD <- levels(Dmarx) if(length(lX) != length(lD) || any(lX != lD)) { whinge <- "data and dummy points have different sets of possible marks" return(nbg(whinge, fatal, announce)) } return(TRUE) } pixelquad <- function(X, W=as.owin(X)) { ## make a quadscheme with a dummy point at every pixel verifyclass(X, "ppp") ## convert window to mask if not already one W <- as.owin(W) M <- as.mask(W) MM <- M$m pixelarea <- M$xstep * M$ystep ## create pixel coordinates and corresponding row, column indices rxy <- rasterxy.mask(M, drop=TRUE) xx <- rxy$x yy <- rxy$y cc <- as.vector(col(MM)[MM]) rr <- as.vector(row(MM)[MM]) Nr <- M$dim[1] Nc <- M$dim[2] ## dummy point pattern dum <- ppp(xx, yy, window=W, check=FALSE) ## discretise data points ij <- nearest.raster.point(X$x, X$y, M) ijrow <- ij$row ijcol <- ij$col if(!is.marked(X)) { ## tabulate pixel locations of data points Xtab <- table(row=factor(ijrow, levels=1:Nr), col=factor(ijcol, levels=1:Nc)) ## every pixel contains exactly one dummy point, ## so the total count of quadrature points in each pixel is: Qtab <- Xtab + 1 ## compute counting weights for data points wdat <- 1/Qtab[cbind(ijrow, ijcol)] ## compute counting weights for dummy points wdum <- 1/Qtab[cbind(rr, cc)] } else { marx <- marks(X) ## tabulate pixel locations and marks of data points Xtab <- table(row=factor(ijrow, levels=1:Nr), col=factor(ijcol, levels=1:Nc), mark=marx) ## replicate dummy points (pixel centres) for each mark dum <- cartesian(dum, levels(marx)) ## every marked pixel contains exactly one dummy point, ## so the total count of quadrature points in each marked pixel is: Qtab <- Xtab + 1 ## compute counting weights for data points wdat <- 1/Qtab[cbind(ijrow, ijcol, as.integer(marx))] ## compute counting weights for dummy points nm <- length(levels(marx)) wdum <- 1/Qtab[cbind(rep.int(rr, nm), rep.int(cc, nm), rep(1:nm, each=length(rr)))] } ## create quadrature scheme wboth <- pixelarea * c(wdat, wdum) Q <- quad(X, dum, wboth) attr(Q, "M") <- M return(Q) } spatstat/R/lohboot.R0000644000176200001440000001521513164370265014122 0ustar liggesusers# # lohboot.R # # $Revision: 1.16 $ $Date: 2017/10/02 07:54:12 $ # # Loh's bootstrap CI's for local pcf, local K etc # lohboot <- function(X, fun=c("pcf", "Kest", "Lest", "pcfinhom", "Kinhom", "Linhom"), ..., block=FALSE, global=FALSE, basicboot=FALSE, Vcorrection=FALSE, confidence=0.95, nx = 4, ny = nx, nsim=200, type=7) { stopifnot(is.ppp(X)) fun.name <- short.deparse(substitute(fun)) if(is.character(fun)) { fun <- match.arg(fun) } else if(is.function(fun)) { flist <- list(pcf=pcf, Kest=Kest, Lest=Lest, pcfinhom=pcfinhom, Kinhom=Kinhom, Linhom=Linhom) id <- match(list(fun), flist) if(is.na(id)) stop(paste("Loh's bootstrap is not supported for the function", sQuote(fun.name))) fun <- names(flist)[id] } else stop("Unrecognised format for argument fun") # validate confidence level stopifnot(confidence > 0.5 && confidence < 1) alpha <- 1 - confidence if(!global) { probs <- c(alpha/2, 1-alpha/2) rank <- nsim * probs[2L] } else { probs <- 1-alpha rank <- nsim * probs } if(abs(rank - round(rank)) > 0.001) warning(paste("confidence level", confidence, "corresponds to a non-integer rank", paren(rank), "so quantiles will be interpolated")) n <- npoints(X) # compute local functions localfun <- switch(fun, pcf=localpcf, Kest=localK, Lest=localL, pcfinhom=localpcfinhom, Kinhom=localKinhom, Linhom=localLinhom) f <- localfun(X, ...) theo <- f$theo # parse edge correction info correction <- attr(f, "correction") switch(correction, none = { ctag <- "un"; cadj <- "uncorrected" }, border = { ctag <- "bord"; cadj <- "border-corrected" }, translate = { ctag <- "trans"; cadj <- "translation-corrected" }, isotropic = { ctag <- "iso"; cadj <- "Ripley isotropic corrected" }) # first n columns are the local pcfs (etc) for the n points of X y <- as.matrix(as.data.frame(f))[, 1:n] nr <- nrow(y) ###### Modification by Christophe Biscio if(!block) { # This is the loop in the former version of code # average them ymean <- .rowMeans(y, na.rm=TRUE, nr, n) # resample ystar <- matrix(, nrow=nr, ncol=nsim) for(i in 1:nsim) { # resample n points with replacement ind <- sample(n, replace=TRUE) # average their local pcfs ystar[,i] <- .rowMeans(y[,ind], nr, n, na.rm=TRUE) } } else{ # Block bootstrap as described by Loh. # Block creation for the bootstrap W <- Window(X) blocks <- tiles(quadrats(boundingbox(W), nx = nx, ny =ny)) if(!is.rectangle(W)){ fullblocks <- sapply(blocks, is.subset.owin, B = W) if(sum(fullblocks)<2){ stop("Not enough blocks are fully contained in the window.") } warning("For non-rectangular windows only blocks fully contained in the window are used:", paste(sum(fullblocks), "are used, and ", sum(!fullblocks), "are omitted.")) blocks <- blocks[fullblocks] n <- sum(sapply(blocks, function(w) npoints(X[w]))) } # Average the marks in each blocks nmarks <- length(blocks) # same as the number of columns in ymarks Xinblocks <- lapply( 1:nmarks, FUN = function(i) {which(inside.owin(X, w=blocks[[i]]))}) # which point is in which block ymarks <- lapply(1:nmarks, FUN = function(i) { if(length(Xinblocks[[i]])==0) { rep(0,nr) } else {.rowSums(y[,Xinblocks[[i]]], nr , length(Xinblocks[[i]]), na.rm=TRUE)*nmarks/n } } ) ymarks <- do.call(cbind,ymarks) # average all the marks ymean <- .rowMeans(ymarks, na.rm=TRUE, nr, nmarks) # Average the marks in each blocks ystar <- matrix(, nrow=nr, ncol=nsim) for(i in 1:nsim) { # resample nblocks blocks with replacement ind <- sample( nmarks , replace=TRUE) # average their local pcfs ystar[,i] <- .rowMeans(ymarks[,ind], nr, nmarks, na.rm=TRUE) } } # compute quantiles if(!global) { # pointwise quantiles hilo <- apply(ystar, 1, quantile, probs=probs, na.rm=TRUE, type=type) # Ripley's K function correction proposed by Loh if(Vcorrection & (fun=="Kest" | fun=="Kinhom")) { Vcov=sqrt(1+2*pi*n*(f$r)^2/area.owin(W)) hilo[1L,] <- ymean+(ymean-(hilo[1L,]) ) / Vcov hilo[2L,] <- ymean+(ymean-(hilo[2L,]) ) / Vcov hilo <- hilo[2:1,] # switch of the index to have hilo[1,] as the lower bound and hilo[2,] as the upper bound basicboot <- FALSE # The basic bootstrap interval is already used. Ensure that I do not modified hilo } # So called "basic bootstrap interval" proposed in Loh's paper, the intervals are asymptotically the same if(basicboot) { hilo[1L,] <- 2*ymean-(hilo[1L,]) hilo[2L,] <- 2*ymean-(hilo[2L,]) hilo <- hilo[c(2,1),] # switch of the index to have hilo[1,] as the lower bound and hilo[2,] as the upper bound } } else { # quantiles of deviation ydif <- sweep(ystar, 1, ymean) ydev <- apply(abs(ydif), 2, max, na.rm=TRUE) crit <- quantile(ydev, probs=probs, na.rm=TRUE, type=type) hilo <- rbind(ymean - crit, ymean + crit) } ####### End Modification by Christophe Biscio # create fv object df <- data.frame(r=f$r, theo=theo, ymean, lo=hilo[1L,], hi=hilo[2L,]) colnames(df)[3L] <- ctag CIlevel <- paste(100 * confidence, "%% confidence", sep="") desc <- c("distance argument r", "theoretical Poisson %s", paste(cadj, "estimate of %s"), paste("lower", CIlevel, "limit for %s"), paste("upper", CIlevel, "limit for %s")) clabl <- paste("hat(%s)[", ctag, "](r)", sep="") labl <- c("r", "%s[pois](r)", clabl, "%s[loCI](r)", "%s[hiCI](r)") switch(fun, pcf={ fname <- "g" ; ylab <- quote(g(r)) }, Kest={ fname <- "K" ; ylab <- quote(K(r)) }, Lest={ fname <- "L" ; ylab <- quote(L(r)) }, pcfinhom={ fname <- "g[inhom]" ; ylab <- quote(g[inhom](r)) }, Kinhom={ fname <- "K[inhom]" ; ylab <- quote(K[inhom](r)) }, Linhom={ fname <- "L[inhom]" ; ylab <- quote(L[inhom](r)) }) g <- fv(df, "r", ylab, ctag, , c(0, max(f$r)), labl, desc, fname=fname) formula(g) <- . ~ r fvnames(g, ".") <- c(ctag, "theo", "hi", "lo") fvnames(g, ".s") <- c("hi", "lo") unitname(g) <- unitname(X) g } spatstat/R/boundingcircle.R0000644000176200001440000000246013115271075015434 0ustar liggesusers#' #' boundingcircle.R #' #' bounding circle and its centre #' #' $Revision: 1.6 $ $Date: 2017/06/05 10:31:58 $ #' circumradius <- function(x, ...) { .Deprecated("boundingradius") UseMethod("boundingradius") } circumradius.owin <- function(x, ...) { .Deprecated("boundingradius.owin") boundingradius.owin(x, ...) } circumradius.ppp <- function(x, ...) { .Deprecated("boundingradius.ppp") boundingradius.ppp(x, ...) } boundingradius <- function(x, ...) { UseMethod("boundingradius") } boundingcentre <- function(x, ...) { UseMethod("boundingcentre") } boundingcircle <- function(x, ...) { UseMethod("boundingcircle") } #' owin boundingradius.owin <- function(x, ...) { sqrt(min(fardist(x, ..., squared=TRUE))) } boundingcentre.owin <- function(x, ...) { z <- where.min(fardist(x, ..., squared=TRUE)) Window(z) <- x return(z) } boundingcircle.owin <- function(x, ...) { d2 <- fardist(x, ..., squared=TRUE) z <- where.min(d2) r <- sqrt(min(d2)) w <- disc(centre=z, radius=r) return(w) } #' ppp boundingradius.ppp <- function(x, ...) { boundingradius(convexhull(x), ...) } boundingcentre.ppp <- function(x, ...) { z <- boundingcentre(convexhull(x), ...) Window(z) <- Window(x) return(z) } boundingcircle.ppp <- function(x, ...) { boundingcircle(convexhull(x), ...) } spatstat/R/rho2hat.R0000755000176200001440000002247013115410662014017 0ustar liggesusers# # rho2hat.R # # Relative risk for pairs of covariate values # # $Revision: 1.25 $ $Date: 2016/07/15 10:21:12 $ # rho2hat <- function(object, cov1, cov2, ..., method=c("ratio", "reweight")) { cov1name <- short.deparse(substitute(cov1)) cov2name <- short.deparse(substitute(cov2)) callstring <- short.deparse(sys.call()) method <- match.arg(method) # validate model if(is.ppp(object) || inherits(object, "quad")) { model <- ppm(object, ~1, forcefit=TRUE) reference <- "area" modelcall <- NULL } else if(is.ppm(object)) { model <- object reference <- "model" modelcall <- model$call if(is.null(getglmfit(model))) model <- update(model, forcefit=TRUE) } else stop("object should be a point pattern or a point process model") # interpret string "x" or "y" as a coordinate function getxyfun <- function(s) { switch(s, x = { function(x,y) { x } }, y = { function(x,y) { y } }, stop(paste("Unrecognised covariate name", sQuote(s)))) } if(is.character(cov1) && length(cov1) == 1) { cov1name <- cov1 cov1 <- getxyfun(cov1name) } if(is.character(cov2) && length(cov2) == 1) { cov2name <- cov2 cov2 <- getxyfun(cov2name) } if( (cov1name == "x" && cov2name == "y") || (cov1name == "y" && cov2name == "x")) { # spatial relative risk isxy <- TRUE needflip <- (cov1name == "y" && cov2name == "x") X <- data.ppm(model) if(needflip) X <- flipxy(X) switch(method, ratio = { # ratio of smoothed intensity estimates den <- density(X, ...) sigma <- attr(den, "sigma") varcov <- attr(den, "varcov") W <- as.owin(den) if(!needflip) { lambda <- predict(model, locations=W) } else { lambda <- flipxy(predict(model, locations=flipxy(W))) } rslt <- switch(reference, area = { den }, model = { lam <- blur(lambda, sigma=sigma, varcov=varcov, normalise=TRUE) eval.im(den/lam) }) }, reweight = { # smoothed point pattern with weights = 1/reference W <- do.call.matched(as.mask, append(list(w=as.owin(X)), list(...))) if(!needflip) { lambda <- predict(model, locations=W) } else { lambda <- flipxy(predict(model, locations=flipxy(W))) } gstarX <- switch(reference, area = { rep.int(area(W), npoints(X)) }, model = { lambda[X] }) rslt <- density(X, ..., weights=1/gstarX) sigma <- attr(rslt, "sigma") varcov <- attr(rslt, "varcov") }) Z12points <- X r1 <- W$xrange r2 <- W$yrange lambda <- lambda[] } else { # general case isxy <- FALSE # harmonise covariates if(is.function(cov1) && is.im(cov2)) { cov1 <- as.im(cov1, W=cov2) } else if(is.im(cov1) && is.function(cov2)) { cov2 <- as.im(cov2, W=cov1) } # evaluate each covariate at data points and at pixels stuff1 <- evalCovar(model, cov1) stuff2 <- evalCovar(model, cov2) # unpack values1 <- stuff1$values values2 <- stuff2$values # covariate values at each data point Z1X <- values1$ZX Z2X <- values2$ZX # covariate values at each pixel Z1values <- values1$Zvalues Z2values <- values2$Zvalues # model intensity lambda <- values1$lambda # ranges of each covariate r1 <- range(Z1X, Z1values, finite=TRUE) r2 <- range(Z2X, Z2values, finite=TRUE) scal <- function(x, r) { (x - r[1])/diff(r) } # scatterplot coordinates Z12points <- ppp(scal(Z1X, r1), scal(Z2X, r2), c(0,1), c(0,1)) Z12pixels <- ppp(scal(Z1values, r1), scal(Z2values, r2), c(0,1), c(0,1)) # normalising constants # nX <- length(Z1X) npixel <- length(lambda) areaW <- area(Window(model)) pixelarea <- areaW/npixel baseline <- if(reference == "area") rep.int(1, npixel) else lambda wts <- baseline * pixelarea switch(method, ratio = { # estimate intensities fhat <- density(Z12points, ...) sigma <- attr(fhat, "sigma") varcov <- attr(fhat, "varcov") ghat <- do.call(density.ppp, resolve.defaults(list(Z12pixels, weights=wts), list(...), list(sigma=sigma, varcov=varcov))) # compute ratio of smoothed densities rslt <- eval.im(fhat/ghat) }, reweight = { # compute smoothed intensity with weight = 1/reference ghat <- density(Z12pixels, weights=wts, ...) rslt <- density(Z12points, weights=1/ghat[Z12points], ...) sigma <- attr(rslt, "sigma") varcov <- attr(rslt, "varcov") }) } # add scale and label info attr(rslt, "stuff") <- list(isxy=isxy, cov1=cov1, cov2=cov2, cov1name=cov1name, cov2name=cov2name, r1=r1, r2=r2, reference=reference, lambda=lambda, modelcall=modelcall, callstring=callstring, Z12points=Z12points, sigma=sigma, varcov=varcov) class(rslt) <- c("rho2hat", class(rslt)) rslt } plot.rho2hat <- function(x, ..., do.points=FALSE) { xname <- short.deparse(substitute(x)) s <- attr(x, "stuff") # resolve "..." arguments rd <- resolve.defaults(list(...), list(add=FALSE, axes=!s$isxy, xlab=s$cov1name, ylab=s$cov2name)) # plot image plotparams <- graphicsPars("plot") do.call.matched(plot.im, resolve.defaults(list(x=x, axes=FALSE), list(...), list(main=xname, ribargs=list(axes=TRUE))), extrargs=c(plotparams, "add", "zlim", "breaks")) # add axes if(rd$axes) { axisparams <- graphicsPars("axis") Axis <- function(..., extrargs=axisparams) { do.call.matched(graphics::axis, resolve.defaults(list(...)), extrargs=extrargs) } if(s$isxy) { # for (x,y) plots the image is at the correct physical scale xr <- x$xrange yr <- x$yrange spak <- 0.05 * max(diff(xr), diff(yr)) Axis(side=1, ..., at=pretty(xr), pos=yr[1] - spak) Axis(side=2, ..., at=pretty(yr), pos=xr[1] - spak) } else { # for other plots the image was scaled to the unit square rx <- s$r1 ry <- s$r2 px <- pretty(rx) py <- pretty(ry) Axis(side=1, labels=px, at=(px - rx[1])/diff(rx), ...) Axis(side=2, labels=py, at=(py - ry[1])/diff(ry), ...) } title(xlab=rd$xlab) title(ylab=rd$ylab) } if(do.points) { do.call.matched(plot.ppp, resolve.defaults(list(x=s$Z12points, add=TRUE), list(...)), extrargs=c("pch", "col", "cols", "bg", "cex", "lwd", "lty")) } invisible(NULL) } print.rho2hat <- function(x, ...) { s <- attr(x, "stuff") cat("Scatterplot intensity estimate (class rho2hat)\n") cat(paste("for the covariates", s$cov1name, "and", s$cov2name, "\n")) switch(s$reference, area=cat("Function values are absolute intensities\n"), model={ cat("Function values are relative to fitted model\n") print(s$modelcall) }) cat(paste("Call:", s$callstring, "\n")) if(s$isxy) { cat("Obtained by spatial smoothing of original data\n") cat("Smoothing parameters used by density.ppp:\n") } else { cat("Obtained by transforming to the unit square and smoothing\n") cat("Smoothing parameters (on unit square) used by density.ppp:\n") } if(!is.null(s$sigma)) cat(paste("\tsigma = ", signif(s$sigma, 5), "\n")) if(!is.null(s$varcov)) { cat("\tvarcov =\n") ; print(s$varcov) } cat("Intensity values:\n") NextMethod("print") } predict.rho2hat <- function(object, ..., relative=FALSE) { if(length(list(...)) > 0) warning("Additional arguments ignored in predict.rho2hat") # extract info s <- attr(object, "stuff") reference <- s$reference # extract images of covariate, scaled to [0,1] Z1 <- scaletointerval(s$cov1, xrange=s$r1) Z2 <- scaletointerval(s$cov2, xrange=s$r2) # extract pairs of covariate values ZZ <- pairs(Z1, Z2, plot=FALSE) # apply rho to Z YY <- safelookup(object, ppp(ZZ[,1], ZZ[,2], c(0,1), c(0,1)), warn=FALSE) # reform as image Y <- Z1 Y[] <- YY # adjust to reference baseline if(!(relative || reference == "area")) { lambda <- s$lambda Y <- Y * lambda } return(Y) } spatstat/R/rescue.rectangle.R0000755000176200001440000000145313115271120015671 0ustar liggesusers# # rescue.rectangle.R # # $Revision: 1.6 $ $Date: 2008/06/15 14:53:11 $ # rescue.rectangle <- function(W) { verifyclass(W, "owin") if(W$type == "mask" && all(W$m)) return(owin(W$xrange, W$yrange, unitname=unitname(W))) if(W$type == "polygonal" && length(W$bdry) == 1) { x <- W$bdry[[1]]$x y <- W$bdry[[1]]$y if(length(x) == 4 && length(y) == 4) { # could be a rectangle veryunique <- function(z) { uz <- sort(unique(z)) epsilon <- 2 * .Machine$double.eps * diff(range(uz)) close <- (diff(uz) <= epsilon) uz <- uz[c(TRUE, !close)] return(uz) } ux <- veryunique(x) uy <- veryunique(y) if(length(ux) == 2 && length(uy) == 2) return(owin(ux,uy, unitname=unitname(W))) } } return(W) } spatstat/R/distfunlpp.R0000644000176200001440000000166613115225157014644 0ustar liggesusers# # distfunlpp.R # # method for 'distfun' for class 'lpp' # # $Revision: 1.2 $ $Date: 2016/02/11 09:36:11 $ # distfun.lpp <- local({ distfun.lpp <- function(X, ..., k=1) { stopifnot(inherits(X, "lpp")) force(X) force(k) stopifnot(length(k) == 1) L <- as.linnet(X) f <- function(x, y=NULL, seg=NULL, tp=NULL, ...) { # L is part of the environment Y <- as.lpp(x=x, y=y, seg=seg, tp=tp, L=L) d <- nncross.lpp(Y, X, what="dist", k=k) return(d) } f <- linfun(f, L) assign("k", k, envir=environment(f)) assign("X", X, envir=environment(f)) attr(f, "explain") <- uitleggen return(f) } uitleggen <- function(x, ...) { splat("Distance function for lpp object") envx <- environment(x) k <- get("k", envir=envx) if(k != 1L) splat("Yields distance to", ordinal(k), "nearest point") X <- get("X", envir=envx) print(X) } distfun.lpp }) spatstat/R/fii.R0000755000176200001440000001423413115271075013221 0ustar liggesusers# # fii.R # # Class of fitted interpoint interactions # # fii <- function(interaction=NULL, coefs=numeric(0), Vnames=character(0), IsOffset=NULL) { if(is.null(interaction)) interaction <- Poisson() stopifnot(is.interact(interaction)) if(is.poisson.interact(interaction)) { if(length(Vnames) > 0) stop("Coefficients inappropriate for Poisson process") } if(is.null(IsOffset)) IsOffset <- rep.int(FALSE, length(Vnames)) else { stopifnot(is.logical(IsOffset)) stopifnot(length(IsOffset) == length(Vnames)) } out <- list(interaction=interaction, coefs=coefs, Vnames=Vnames, IsOffset=IsOffset) class(out) <- c("fii", class(out)) return(out) } summary.fii <- function(object, ...) { y <- unclass(object) INTERACT <- object$interaction coefs <- object$coefs Vnames <- object$Vnames IsOffset <- object$IsOffset y$poisson <- is.poisson.interact(INTERACT) thumbnail <- NULL if(y$poisson) { thumbnail <- "Poisson()" } else { if(!is.null(INTERACT$interpret)) { # invoke auto-interpretation feature sensible <- if(newstyle.coeff.handling(INTERACT)) (INTERACT$interpret)(coefs[Vnames[!IsOffset]], INTERACT) else (INTERACT$interpret)(coefs, INTERACT) if(!is.null(sensible)) { header <- paste("Fitted", sensible$inames) printable <- sensible$printable # Try to make a thumbnail description param <- sensible$param ipar <- INTERACT$par if(all(lengths(param) == 1) && all(lengths(ipar) == 1)) { allargs <- append(ipar, param) allargs <- lapply(allargs, signif, digits=4) thumbnail <- fakecallstring(INTERACT$creator, allargs) } } else { # no fitted interaction parameters (e.g. Hard Core) header <- NULL printable <- NULL thumbnail <- paste0(INTERACT$creator, "()") } } else { # fallback sensible <- NULL VN <- Vnames[!IsOffset] if(length(VN) > 0) { header <- "Fitted interaction terms" icoef <- coefs[VN] printable <- exp(unlist(icoef)) ricoef <- lapply(icoef, signif, digits=4) thumbnail <- fakecallstring(INTERACT$creator, ricoef) } else { header <- NULL printable <- NULL thumbnail <- paste0(INTERACT$creator, "()") } } y <- append(y, list(sensible=sensible, header=header, printable=printable, thumbnail=thumbnail)) } class(y) <- c("summary.fii", class(y)) return(y) } print.fii <- function(x, ...) { sx <- summary(x) do.call(print.summary.fii, resolve.defaults(list(x=sx, brief=TRUE), list(...))) return(invisible(NULL)) } print.summary.fii <- local({ #' hide internal arguments print.summary.fii <- function(x, ...) { PrintIt(x, ...) } PrintIt <- function(x, ..., prefix="Interaction: ", banner=TRUE, family = waxlyrical('extras'), brief = !family, tiny = !waxlyrical('errors')) { if(tiny) { #' use thumbnail if available thumbnail <- x$thumbnail if(!is.null(thumbnail)) { splat(thumbnail) return(invisible(NULL)) } } terselevel <- spatstat.options('terse') if(banner && !brief) cat(prefix) if(x$poisson) { splat("Poisson process") parbreak(terselevel) } else { print(x$interaction, family=family, brief=TRUE, banner=banner) if(!is.null(x$printable)) { nvalues <- length(x$printable) nheader <- length(x$header) if(nvalues == 1) { splat(paste(paste0(x$header, ":\t"), x$printable)) } else if(nvalues == nheader) { for(i in 1:nheader) { hdi <- x$header[i] xpi <- x$printable[[i]] if(!is.list(xpi) && length(xpi) == 1) { splat(paste0(hdi, ":\t", xpi)) } else { splat(paste0(hdi, ":")) print(xpi) } } } else { splat(x$header) print(x$printable) } } } if(!brief) { co <- x$coefs[x$Vnames[!x$IsOffset]] if(length(co) > 0) { parbreak(terselevel) splat("Relevant coefficients:") print(co) } } return(invisible(NULL)) } print.summary.fii }) parameters.fii <- function(model, ...) { ss <- summary(model) out <- append(ss$interaction$par, ss$sensible$param) return(out) } coef.summary.fii <- function(object, ...) { object$printable } reach.fii <- function(x, ..., epsilon=0) { inte <- x$interaction coeffs <- x$coefs Vnames <- x$Vnames if(is.poisson.interact(inte)) return(0) # get 'irange' function from interaction object irange <- inte$irange if(is.null(irange)) return(Inf) # apply 'irange' function using fitted coefficients if(newstyle.coeff.handling(inte)) ir <- irange(inte, coeffs[Vnames], epsilon=epsilon) else ir <- irange(inte, coeffs, epsilon=epsilon) if(is.na(ir)) ir <- Inf return(ir) } plot.fii <- function(x, ...) { inte <- x$interaction if(is.poisson.interact(inte)) { message("Poisson interaction; nothing plotted") return(invisible(NULL)) } plfun <- inte$plot %orifnull% inte$family$plot if(is.null(plfun)) stop("Plotting not implemented for this type of interaction") plfun(x, ...) } fitin <- function(object) { UseMethod("fitin") } fitin.ppm <- function(object) { f <- object$fitin if(!is.null(f)) return(f) # For compatibility with older versions inte <- object$interaction if(is.null(inte)) f <- fii() # Poisson else { coefs <- coef(object) Vnames <- object$internal$Vnames IsOffset <- object$internal$IsOffset # Internal names of regressor variables f <- fii(inte, coefs, Vnames, IsOffset) } unitname(f) <- unitname(data.ppm(object)) return(f) } as.interact.fii <- function(object) { verifyclass(object, "fii") return(object$interaction) } coef.fii <- function(object, ...) { verifyclass(object, "fii") return(object$coefs) } spatstat/R/psst.R0000755000176200001440000001405513115271120013433 0ustar liggesusers# # psst.R # # Computes the GNZ contrast of delta-f for any function f # # $Revision: 1.9 $ $Date: 2015/07/11 08:19:26 $ # ################################################################################ # psst <- function(object, fun, r=NULL, breaks=NULL, ..., model=NULL, trend=~1, interaction=Poisson(), rbord=reach(interaction), truecoef=NULL, hi.res=NULL, funargs=list(correction="best"), verbose=TRUE) { if(inherits(object, "ppm")) { fit <- object } else if(is.ppp(object) || inherits(object, "quad")) { if(is.ppp(object)) object <- quadscheme(object, ...) if(!is.null(model)) { fit <- update(model, Q=object, forcefit=TRUE) } else { fit <- ppm(object, trend=trend, interaction=interaction, rbord=rbord, forcefit=TRUE) } } else stop("object should be a fitted point process model or a point pattern") # rfixed <- !is.null(r) || !is.null(breaks) # Extract data and quadrature points Q <- quad.ppm(fit, drop=FALSE) X <- data.ppm(fit) U <- union.quad(Q) Z <- is.data(Q) # indicator data/dummy # E <- equalsfun.quad(Q) # WQ <- w.quad(Q) # quadrature weights # integrals will be restricted to quadrature points # that were actually used in the fit # USED <- getglmsubset(fit) if(fit$correction == "border") { rbord <- fit$rbord b <- bdist.points(U) USED <- (b > rbord) } else USED <- rep.int(TRUE, U$n) # basic statistics Win <- Window(X) npts <- npoints(X) areaW <- area(Win) lambda <- npts/areaW # adjustments to account for restricted domain of pseudolikelihood # if(any(!USED) && spatstat.options("eroded.intensity")) { # XUSED <- USED[Z] # npts.used <- sum(Z & USED) # area.used <- sum(WQ[USED]) # lambda.used <- npts.used/area.used # } else { # XUSED <- rep.int(TRUE, npts) # npts.used <- npts # area.used <- areaW # lambda.used <- lambda # } # determine breakpoints for r values rmaxdefault <- rmax.rule("G", Win, lambda) breaks <- handle.r.b.args(r, breaks, Win, rmaxdefault=rmaxdefault) rvals <- breaks$r rmax <- breaks$max # residuals resid <- residuals(fit, type="raw",drop=FALSE, new.coef=truecoef, quad=hi.res) rescts <- with(resid, "continuous") # absolute weight for continuous integrals wc <- -rescts # initialise fv object df <- data.frame(r=rvals, theo=0) desc <- c("distance argument r", "value 0 corresponding to perfect fit") ans <- fv(df, "r", substitute(bold(R)~Delta~S(r), NULL), "theo", . ~ r, alim=c(0, rmax), c("r","%s[theo](r)"), desc, fname="bold(R)~Delta~S") # evaluate fun(X) for data fX <- do.call(fun, append(list(X, r=rvals), funargs)) fXunits <- unitname(fX) # Extract 'best' estimate only fX <- with(fX, .y) zero <- numeric(length(fX)) # sum over all quadrature points iused <- seq(U$n)[USED] nused <- length(iused) if(verbose) cat(paste("\nProcessing", nused, "quadrature points...")) # running sums & integrals sumX <- zero integ <- integ2 <- zero # template for X \cup {u} uX <- superimpose(U[1], X, W=Win, check=FALSE) Ux <- U$x Uy <- U$y # if(verbose) pstate <- list() # for(j in seq(nused)) { i <- iused[j] wi <- wc[i] if(Z[i]) { # data point fXi <- do.call(fun, append(list(X[-i], r=rvals), funargs)) fXi <- with(fXi, .y) deltaf <- fX - fXi sumX <- sumX + deltaf } else { # dummy point uX$x[1] <- Ux[i] uX$y[1] <- Uy[i] fuX <- do.call(fun, append(list(uX, r=rvals), funargs)) fuX <- with(fuX, .y) deltaf <- fuX - fX } integ <- integ + wi * deltaf integ2 <- integ2 + wi * deltaf^2 # if(j %% 500 == 0) { cat("[garbage ") gc() cat("collected]") } if(verbose) pstate <- progressreport(j, nused, state=pstate) } sdv <- sqrt(integ2) res <- sumX - integ ans <- bind.fv(ans, data.frame(dat=sumX, com=integ, var=integ2, sd=sdv, hi=2*sdv, lo=-2*sdv, res=res, stdres=res/sdv), c("Sigma~Delta~S(r)", "bold(C)~Delta~S(r)", "bold(C)^2~Delta~S(r)", "sqrt(bold(C)^2~Delta~S(r))", "%s[hi](r)", "%s[lo](r)", "bold(R)~Delta~S(r)", "bold(T)~Delta~S(r)"), c("data pseudosum (contribution to %s)", "model compensator (contribution to %s)", "pseudovariance of %s", "sqrt(pseudovariance) of %s", "upper 2 sigma critical band for %s", "lower 2 sigma critical band for %s", "pseudoresidual function %s", "standardised pseudoresidual function %s"), "res") fvnames(ans,".") <- c("res", "hi", "lo", "theo") unitname(ans) <- fXunits # return(ans) } npfun <- function(X, ..., r) { npts <- npoints(X) # initialise fv object df <- data.frame(r=r, theo=0, npoint=npts) desc <- c("distance argument r", "value 0", "value equal to number of points") ans <- fv(df, "r", substitute(npoints(r), NULL), "npoint", . ~ r, alim=c(0, max(r)), c("r","%s[theo](r)", "%s[obs](r)"), desc, fname="npoints") unitname(ans) <- unitname(X) return(ans) } nndcumfun <- function(X, ..., r) { nn <- nndist(X) bk <- breakpts.from.r(r) # nn <- nn[nn <= bdist.points(X)] h <- whist(nn, bk$val) # initialise fv object df <- data.frame(r=r, theo=0, obs=h) desc <- c("distance argument r", "value 0", "observed count") ans <- fv(df, "r", substitute(nndcount(r), NULL), "obs", . ~ r, alim=c(0, max(r)), c("r","%s[theo](r)", "%s[obs](r)"), desc, fname="nndcount") unitname(ans) <- unitname(X) return(ans) } spatstat/R/datasetup.R0000755000176200001440000000064213115271075014442 0ustar liggesusers# # When the package is installed, this tells us # the directory where the .tab files are stored # # Typically data/murgatroyd.R reads data-raw/murgatroyd.tab # and applies special processing # spatstat.rawdata.location <- function(...) { locn <- system.file("data-raw", package="spatstat") if(length(list(...)) != 0) locn <- paste(c(locn, ...), collapse=.Platform$file.sep) return(locn) } spatstat/R/unique.ppp.R0000755000176200001440000001361313115271120014545 0ustar liggesusers# # unique.ppp.R # # $Revision: 1.32 $ $Date: 2016/04/25 02:34:40 $ # # Methods for 'multiplicity' co-authored by Sebastian Meyer # Copyright 2013 Adrian Baddeley and Sebastian Meyer unique.ppp <- function(x, ..., warn=FALSE) { verifyclass(x, "ppp") dupe <- duplicated.ppp(x, ...) if(!any(dupe)) return(x) if(warn) warning(paste(sum(dupe), "duplicated points were removed"), call.=FALSE) return(x[!dupe]) } duplicated.ppp <- function(x, ..., rule=c("spatstat", "deldir", "unmark")) { verifyclass(x, "ppp") rule <- match.arg(rule) if(rule == "deldir") return(deldir::duplicatedxy(x)) if(rule == "unmark") x <- unmark(x) n <- npoints(x) switch(markformat(x), none = { # unmarked points # check for duplication of x and y separately (a necessary condition) xx <- x$x yy <- x$y possible <- duplicated(xx) & duplicated(yy) if(!any(possible)) return(possible) # split by x coordinate of duplicated x values result <- possible xvals <- unique(xx[possible]) for(xvalue in xvals) { sub <- (xx == xvalue) # compare y values result[sub] <- duplicated(yy[sub]) } }, vector = { # marked points - split by mark value m <- marks(x) um <- if(is.factor(m)) levels(m) else unique(m) xx <- unmark(x) result <- logical(n) for(i in seq_along(um)) { sub <- (m == um[i]) result[sub] <- duplicated.ppp(xx[sub]) } }, dataframe = { result <- duplicated(as.data.frame(x)) }, # the following are currently not supported hyperframe = { result <- duplicated(as.data.frame(x)) }, list = { result <- duplicated(as.data.frame(as.hyperframe(x))) }, stop(paste("Unknown mark type", sQuote(markformat(x)))) ) return(result) } anyDuplicated.ppp <- function(x, ...) { anyDuplicated(as.data.frame(x), ...) } ## utility to check whether two rows are identical IdenticalRows <- local({ id <- function(i,j, a, b=a) { ai <- a[i,] bj <- b[j,] row.names(ai) <- row.names(bj) <- NULL identical(ai, bj) } Vectorize(id, c("i", "j")) }) multiplicity <- function(x) { UseMethod("multiplicity") } multiplicity.ppp <- function(x) { verifyclass(x, "ppp") np <- npoints(x) if(np == 0) return(integer(0)) cl <- closepairs(x, 0, what="indices") I <- cl$i J <- cl$j if(length(I) == 0) return(rep.int(1L, np)) switch(markformat(x), none = { }, vector = { marx <- as.data.frame(marks(x)) agree <- IdenticalRows(I, J, marx) I <- I[agree] J <- J[agree] }, dataframe = { marx <- marks(x) agree <- IdenticalRows(I, J, marx) I <- I[agree] J <- J[agree] }, hyperframe = { marx <- as.data.frame(marks(x)) # possibly discards columns agree <- IdenticalRows(I, J, marx) I <- I[agree] J <- J[agree] }, list = stop("Not implemented for lists of marks") ) if(length(I) == 0) return(rep.int(1L, np)) JbyI <- split(J, factor(I, levels=1:np)) result <- 1 + lengths(JbyI) return(result) } multiplicity.data.frame <- function (x) { if(all(unlist(lapply(x, is.numeric)))) return(multiplicityNumeric(as.matrix(x))) ## result template (vector of 1's) result <- setNames(rep.int(1L, nrow(x)), rownames(x)) ## check for duplicates (works for data frames, arrays and vectors) ## CAVE: comparisons are based on a character representation of x if (!any(dup <- duplicated(x))) return(result) ux <- x[!dup, , drop=FALSE] dx <- x[dup, , drop=FALSE] nu <- nrow(ux) nd <- nrow(dx) hit <- outer(seq_len(nu), seq_len(nd), IdenticalRows, a=ux, b=dx) counts <- as.integer(1L + .rowSums(hit, nu, nd)) result[!dup] <- counts dumap <- apply(hit, 2, match, x=TRUE) # equivalent to min(which(z)) result[dup] <- counts[dumap] return(result) } ### multiplicity method for NUMERIC arrays, data frames, and vectors ### This implementation is simply based on checking for dist(x)==0 multiplicityNumeric <- function(x) { if (anyDuplicated(x)) { distmat <- as.matrix(dist(x, method="manhattan")) # faster than euclid. as.integer(rowSums(distmat == 0)) # labels are kept } else { # -> vector of 1's nx <- NROW(x) labels <- if (length(dim(x))) rownames(x) else names(x) if (is.null(labels)) labels <- seq_len(nx) setNames(rep.int(1L, nx), labels) } } ### multiplicity method for arrays, data frames, and vectors (including lists) ### It also works for non-numeric data, since it is based on duplicated(). multiplicity.default <- function (x) { if(is.numeric(x)) return(multiplicityNumeric(x)) nx <- NROW(x) # also works for a vector x ## result template (vector of 1's) labels <- if (length(dim(x))) rownames(x) else names(x) if (is.null(labels)) labels <- seq_len(nx) result <- setNames(rep.int(1L, nx), labels) ## check for duplicates (works for data frames, arrays and vectors) ## CAVE: comparisons are based on a character representation of x if (!any(dup <- duplicated(x))) return(result) ## convert x to a matrix for IdenticalRows() x <- as.matrix(x) dimnames(x) <- NULL # discard any names! ux <- x[!dup, , drop=FALSE] dx <- x[dup, , drop=FALSE] nu <- nrow(ux) nd <- nrow(dx) hit <- outer(seq_len(nu), seq_len(nd), IdenticalRows, a=ux, b=dx) counts <- as.integer(1L + .rowSums(hit, nu, nd)) dumap <- apply(hit, 2, match, x=TRUE) # was: function(z) min(which(z))) result[dup] <- counts[dumap] return(result) } spatstat/R/resid4plot.R0000755000176200001440000005765613115271120014551 0ustar liggesusers# # # Residual plots: # resid4plot four panels with matching coordinates # resid1plot one or more unrelated individual plots # resid1panel one panel of resid1plot # # $Revision: 1.34 $ $Date: 2016/04/19 00:11:51 $ # # resid4plot <- local({ Contour <- function(..., pch, chars, cols, etch, size, maxsize, meansize, markscale, symap, zap, legend, leg.side, leg.args) { ## avoid passing arguments of plot.ppp to contour.default contour(...) } do.clean <- function(fun, ..., pch, chars, cols, etch, size, maxsize, meansize, markscale, symap, zap, legend, leg.side, leg.args, nlevels, levels, labels, drawlabels, labcex) { ## avoid passing arguments of plot.ppp, contour.default to other functions do.call(fun, list(...)) } do.lines <- function(x, y, defaulty=1, ...) { do.call(lines, resolve.defaults(list(x, y), list(...), list(lty=defaulty))) } resid4plot <- function(RES, plot.neg=c("image", "discrete", "contour", "imagecontour"), plot.smooth=c("imagecontour", "image", "contour", "persp"), spacing=0.1, outer=3, srange=NULL, monochrome=FALSE, main=NULL, xlab="x coordinate", ylab="y coordinate", rlab, col.neg=NULL, col.smooth=NULL, ...) { plot.neg <- match.arg(plot.neg) if(missing(rlab)) rlab <- NULL rlablines <- if(is.null(rlab)) 1 else sum(nzchar(rlab)) clip <- RES$clip Yclip <- RES$Yclip Z <- RES$smooth$Z W <- RES$W Wclip <- Yclip$window type <- RES$type typename <- RES$typename Ydens <- RES$Ydens[Wclip, drop=FALSE] Ymass <- RES$Ymass[Wclip] # set up 2 x 2 plot with space wide <- diff(W$xrange) high <- diff(W$yrange) space <- spacing * max(wide,high) width <- wide + space + wide height <- high + space + high outerspace <- outer * space outerRspace <- (outer - 1 + rlablines) * space plot(c(0, width) + c(-outerRspace, outerspace), c(0, height) + c(-outerspace, outerRspace), type="n", asp=1.0, axes=FALSE, xlab="", ylab="") # determine colour map for background if(is.null(srange)) { Yrange <- if(!is.null(Ydens)) summary(Ydens)$range else NULL Zrange <- if(!is.null(Z)) summary(Z)$range else NULL srange <- range(c(0, Yrange, Zrange), na.rm=TRUE) } else { stopifnot(is.numeric(srange) && length(srange) == 2) stopifnot(all(is.finite(srange))) } backcols <- beachcolours(srange, if(type=="eem") 1 else 0, monochrome) if(is.null(col.neg)) col.neg <- backcols if(is.null(col.smooth)) col.smooth <- backcols # ------ plot residuals/marks (in top left panel) ------------ Xlowleft <- c(W$xrange[1],W$yrange[1]) vec <- c(0, high) + c(0, space) - Xlowleft # shift the original window Ws <- shift(W, vec) # shift the residuals Ys <- shift(Yclip,vec) # determine whether pre-plotting the window(s) is redundant redundant <- (plot.neg == "image") && (type != "eem") && (Yclip$window$type == "mask") # pre-plot the window(s) if(!redundant) { if(!clip) do.clean(plot, Ys$window, add=TRUE, ...) else do.clean(ploterodewin, Ws, Ys$window, add=TRUE, ...) } ## adjust position of legend associated with eroded window sep <- if(clip) Wclip$yrange[1] - W$yrange[1] else NULL ## decide whether mark scale should be shown showscale <- (type != "raw") switch(plot.neg, discrete={ neg <- (Ys$marks < 0) ## plot negative masses of discretised measure as squares if(any(c("maxsize","meansize","markscale") %in% names(list(...)))) { plot(Ys[neg], add=TRUE, legend=FALSE, ...) } else { hackmax <- 0.5 * sqrt(area(Wclip)/Yclip$n) plot(Ys[neg], add=TRUE, legend=FALSE, maxsize=hackmax, ...) } ## plot positive masses at atoms plot(Ys[!neg], add=TRUE, leg.side="left", leg.args=list(sep=sep), show.all=TRUE, main="", ...) }, contour = { Yds <- shift(Ydens, vec) Yms <- shift(Ymass, vec) Contour(Yds, add=TRUE, ...) do.call(plot, resolve.defaults(list(x=Yms, add=TRUE), list(...), list(use.marks=showscale, leg.side="left", show.all=TRUE, main="", leg.args=list(sep=sep)))) }, imagecontour=, image={ Yds <- shift(Ydens, vec) Yms <- shift(Ymass, vec) if(redundant) do.clean(ploterodeimage, Ws, Yds, rangeZ=srange, colsZ=col.neg, ...) else if(type != "eem") do.clean(image, Yds, add=TRUE, ribbon=FALSE, col=col.neg, zlim=srange, ...) if(plot.neg == "imagecontour") Contour(Yds, add=TRUE, ...) ## plot positive masses at atoms do.call(plot, resolve.defaults(list(x=Yms, add=TRUE), list(...), list(use.marks=showscale, leg.side="left", show.all=TRUE, main="", leg.args=list(sep=sep)))) } ) # --------- plot smoothed surface (in bottom right panel) ------------ vec <- c(wide, 0) + c(space, 0) - Xlowleft Zs <- shift.im(Z, vec) switch(plot.smooth, image={ do.clean(image, Zs, add=TRUE, col=col.smooth, zlim=srange, ribbon=FALSE, ...) }, contour={ Contour(Zs, add=TRUE, ...) }, persp={ warning("persp not available in 4-panel plot") }, imagecontour={ do.clean(image, Zs, add=TRUE, col=col.smooth, zlim=srange, ribbon=FALSE, ...) Contour(Zs, add=TRUE, ...) } ) lines(Zs$xrange[c(1,2,2,1,1)], Zs$yrange[c(1,1,2,2,1)]) # -------------- lurking variable plots ----------------------- # --------- lurking variable plot for x coordinate ------------------ # (cumulative or marginal) # in bottom left panel if(!is.null(RES$xmargin)) { a <- RES$xmargin observedV <- a$xZ observedX <- a$x theoreticalV <- a$ExZ theoreticalX <- a$x theoreticalSD <- theoreticalHI <- theoreticalLO <- NULL if(is.null(rlab)) rlab <- paste("marginal of", typename) } else if(!is.null(RES$xcumul)) { a <- RES$xcumul observedX <- a$empirical$covariate observedV <- a$empirical$value theoreticalX <- a$theoretical$covariate theoreticalV <- a$theoretical$mean theoreticalSD <- a$theoretical$sd theoreticalHI <- a$theoretical$upper theoreticalLO <- a$theoretical$lower if(is.null(rlab)) rlab <- paste("cumulative sum of", typename) } # pretty axis marks pX <- pretty(theoreticalX) rV <- range(0, observedV, theoreticalV, theoreticalHI, theoreticalLO) if(!is.null(theoreticalSD)) rV <- range(rV, theoreticalV+2*theoreticalSD, theoreticalV-2*theoreticalSD) pV <- pretty(rV) # rescale smoothed values rr <- range(c(0, observedV, theoreticalV, pV)) yscale <- function(y) { high * (y - rr[1])/diff(rr) } xscale <- function(x) { x - W$xrange[1] } if(!is.null(theoreticalHI)) do.call.matched(polygon, resolve.defaults( list(x=xscale(c(theoreticalX, rev(theoreticalX))), y=yscale(c(theoreticalHI, rev(theoreticalLO)))), list(...), list(col="grey", border=NA))) do.clean(do.lines, xscale(observedX), yscale(observedV), 1, ...) do.clean(do.lines, xscale(theoreticalX), yscale(theoreticalV), 2, ...) if(!is.null(theoreticalSD)) { do.clean(do.lines, xscale(theoreticalX), yscale(theoreticalV + 2 * theoreticalSD), 3, ...) do.clean(do.lines, xscale(theoreticalX), yscale(theoreticalV - 2 * theoreticalSD), 3, ...) } axis(side=1, pos=0, at=xscale(pX), labels=pX) text(xscale(mean(theoreticalX)), - outerspace, xlab) axis(side=2, pos=0, at=yscale(pV), labels=pV) text(-outerRspace, yscale(mean(pV)), rlab, srt=90) # --------- lurking variable plot for y coordinate ------------------ # (cumulative or marginal) # in top right panel if(!is.null(RES$ymargin)) { a <- RES$ymargin observedV <- a$yZ observedY <- a$y theoreticalV <- a$EyZ theoreticalY <- a$y theoreticalSD <- NULL if(is.null(rlab)) rlab <- paste("marginal of", typename) } else if(!is.null(RES$ycumul)) { a <- RES$ycumul observedV <- a$empirical$value observedY <- a$empirical$covariate theoreticalY <- a$theoretical$covariate theoreticalV <- a$theoretical$mean theoreticalSD <- a$theoretical$sd theoreticalHI <- a$theoretical$upper theoreticalLO <- a$theoretical$lower if(is.null(rlab)) rlab <- paste("cumulative sum of", typename) } # pretty axis marks pY <- pretty(theoreticalY) rV <- range(0, observedV, theoreticalV, theoreticalHI, theoreticalLO) if(!is.null(theoreticalSD)) rV <- range(rV, theoreticalV+2*theoreticalSD, theoreticalV-2*theoreticalSD) pV <- pretty(rV) # rescale smoothed values rr <- range(c(0, observedV, theoreticalV, pV)) yscale <- function(y) { y - W$yrange[1] + high + space} xscale <- function(x) { wide + space + wide * (rr[2] - x)/diff(rr) } if(!is.null(theoreticalHI)) do.call.matched(polygon, resolve.defaults( list(x=xscale(c(theoreticalHI, rev(theoreticalLO))), y=yscale(c(theoreticalY, rev(theoreticalY)))), list(...), list(col="grey", border=NA))) do.clean(do.lines, xscale(observedV), yscale(observedY), 1, ...) do.clean(do.lines, xscale(theoreticalV), yscale(theoreticalY), 2, ...) if(!is.null(theoreticalSD)) { do.clean(do.lines, xscale(theoreticalV+2*theoreticalSD), yscale(theoreticalY), 3, ...) do.clean(do.lines, xscale(theoreticalV-2*theoreticalSD), yscale(theoreticalY), 3, ...) } axis(side=4, pos=width, at=yscale(pY), labels=pY) text(width + outerspace, yscale(mean(theoreticalY)), ylab, srt=90) axis(side=3, pos=height, at=xscale(pV), labels=pV) text(xscale(mean(pV)), height + outerRspace, rlab) # if(!is.null(main)) title(main=main) invisible(NULL) } resid4plot }) # # # Residual plot: single panel(s) # # resid1plot <- local({ Contour <- function(..., pch, chars, cols, etch, size, maxsize, meansize, markscale, symap, zap, legend, leg.side, leg.args) { ## avoid passing arguments of plot.ppp to contour.default contour(...) } do.clean <- function(fun, ..., pch, chars, cols, etch, size, maxsize, meansize, markscale, symap, zap, legend, leg.side, leg.args, nlevels, levels, labels, drawlabels, labcex) { ## avoid passing arguments of plot.ppp, contour.default to other functions do.call(fun, list(...)) } resid1plot <- function(RES, opt, plot.neg=c("image", "discrete", "contour", "imagecontour"), plot.smooth=c("imagecontour", "image", "contour", "persp"), srange=NULL, monochrome=FALSE, main=NULL, add=FALSE, show.all=!add, do.plot=TRUE, col.neg=NULL, col.smooth=NULL, ...) { if(!any(unlist(opt[c("all", "marks", "smooth", "xmargin", "ymargin", "xcumul", "ycumul")]))) return(invisible(NULL)) if(!add && do.plot) { ## determine size of plot area by calling again with do.plot=FALSE cl <- match.call() cl$do.plot <- FALSE b <- eval(cl, parent.frame()) bb <- as.owin(b, fatal=FALSE) if(is.owin(bb)) { ## initialise plot area plot(bb, type="n", main="") force(show.all) add <- TRUE } } ## extract info clip <- RES$clip Y <- RES$Y Yclip <- RES$Yclip Z <- RES$smooth$Z W <- RES$W Wclip <- Yclip$window type <- RES$type Ydens <- RES$Ydens[Wclip, drop=FALSE] Ymass <- RES$Ymass[Wclip] ## determine colour map if(opt$all || opt$marks || opt$smooth) { if(is.null(srange)) { Yrange <- if(!is.null(Ydens)) summary(Ydens)$range else NULL Zrange <- if(!is.null(Z)) summary(Z)$range else NULL srange <- range(c(0, Yrange, Zrange), na.rm=TRUE) } backcols <- beachcolours(srange, if(type=="eem") 1 else 0, monochrome) if(is.null(col.neg)) col.neg <- backcols if(is.null(col.smooth)) col.smooth <- backcols } ## determine main heading if(is.null(main)) { prefix <- if(opt$marks) NULL else if(opt$smooth) "Smoothed" else if(opt$xcumul) "Lurking variable plot for x coordinate\n" else if(opt$ycumul) "Lurking variable plot for y coordinate\n" else if(opt$xmargin) "Lurking variable plot for x coordinate\n" else if(opt$ymargin) "Lurking variable plot for y coordinate\n" else NULL main <- paste(prefix, RES$typename) } ## ------------- residuals --------------------------------- if(opt$marks) { ## determine whether pre-plotting the window(s) is redundant redundant <- (plot.neg == "image") && (type != "eem") && (Yclip$window$type == "mask") ## pre-plot the window(s) if(redundant && !add) { z <- do.clean(plot, as.rectangle(W), box=FALSE, main="", do.plot=do.plot, ...) } else { if(!clip) z <- do.clean(plot, W, main="", add=add, show.all=show.all, do.plot=do.plot, ...) else z <- do.clean(ploterodewin, W, Wclip, main="", add=add, show.all=show.all, do.plot=do.plot, ...) } bb <- as.owin(z) switch(plot.neg, discrete={ neg <- (Y$marks < 0) ## plot negative masses of discretised measure as squares if(any(c("maxsize", "markscale") %in% names(list(...)))) { z <- plot(Y[neg], add=TRUE, show.all=show.all, do.plot=do.plot, ...) } else { hackmax <- 0.5 * sqrt(area(Wclip)/Yclip$n) z <- plot(Y[neg], add=TRUE, maxsize=hackmax, show.all=show.all, do.plot=do.plot, ...) } ## plot positive masses at atoms zp <- plot(Y[!neg], add=TRUE, show.all=show.all, do.plot=do.plot, ...) bb <- boundingbox(bb, z, zp) }, contour = { z <- Contour(Ydens, add=TRUE, do.plot=do.plot, ...) bb <- boundingbox(bb, z) }, imagecontour=, image={ if(redundant) { z <- do.clean(ploterodeimage, W, Ydens, rangeZ=srange, colsZ=col.neg, add=add, show.all=show.all, main="", do.plot=do.plot, ...) } else if(type != "eem") { z <- do.clean(image, Ydens, col=col.neg, zlim=srange, ribbon=FALSE, add=TRUE, show.all=show.all, do.plot=do.plot, main="", ...) } bb <- boundingbox(bb, z) if(plot.neg == "imagecontour") { z <- Contour(Ydens, add=TRUE, show.all=show.all, do.plot=do.plot, ...) bb <- boundingbox(bb, z) } ## decide whether mark scale should be shown showscale <- (type != "raw") ## plot positive masses at atoms z <- do.call(plot, resolve.defaults(list(x=Ymass, add=TRUE), list(...), list(use.marks=showscale, do.plot=do.plot))) bb <- boundingbox(bb, z) } ) if(do.plot && show.all) title(main=main) } # ------------- smooth ------------------------------------- if(opt$smooth) { if(!clip) { switch(plot.smooth, image={ z <- do.clean(image, Z, main="", axes=FALSE, xlab="", ylab="", col=col.smooth, zlim=srange, ribbon=FALSE, do.plot=do.plot, add=add, show.all=show.all, ...) bb <- as.owin(z) }, contour={ z <- Contour(Z, main="", axes=FALSE, xlab="", ylab="", do.plot=do.plot, add=add, show.all=show.all, ...) bb <- as.owin(z) }, persp={ if(do.plot) do.clean(persp, Z, main="", axes=FALSE, xlab="", ylab="", ...) bb <- NULL }, imagecontour={ z <- do.clean(image, Z, main="", axes=FALSE, xlab="", ylab="", col=col.smooth, zlim=srange, ribbon=FALSE, do.plot=do.plot, add=add, show.all=show.all, ...) Contour(Z, add=TRUE, do.plot=do.plot, ...) bb <- as.owin(z) } ) if(do.plot && show.all) title(main=main) } else { switch(plot.smooth, image={ do.clean(plot, as.rectangle(W), box=FALSE, main=main, do.plot=do.plot, add=add, ...) z <- do.clean(ploterodeimage, W, Z, colsZ=col.smooth, rangeZ=srange, do.plot=do.plot, ...) bb <- boundingbox(as.rectangle(W), z) }, contour={ do.clean(plot, W, main=main, do.plot=do.plot, add=add, show.all=show.all, ...) z <- Contour(Z, add=TRUE, show.all=show.all, do.plot=do.plot, ...) bb <- as.owin(z) }, persp={ if(do.plot) do.clean(persp, Z, main=main, axes=FALSE, xlab="", ylab="", ...) bb <- NULL }, imagecontour={ do.clean(plot, as.rectangle(W), box=FALSE, main=main, do.plot=do.plot, add=add, ...) z <- do.clean(ploterodeimage, W, Z, colsZ=col.smooth, rangeZ=srange, do.plot=do.plot, ...) Contour(Z, add=TRUE, do.plot=do.plot, ...) bb <- as.owin(z) } ) } } # ------------ cumulative x ----------------------------------------- if(opt$xcumul) { a <- RES$xcumul obs <- a$empirical theo <- a$theoretical do.clean(resid1panel, obs$covariate, obs$value, theo$covariate, theo$mean, theo$sd, "x coordinate", "cumulative mark", main=main, ..., do.plot=do.plot) bb <- NULL } # ------------ cumulative y ----------------------------------------- if(opt$ycumul) { a <- RES$ycumul obs <- a$empirical theo <- a$theoretical do.clean(resid1panel, obs$covariate, obs$value, theo$covariate, theo$mean, theo$sd, "y coordinate", "cumulative mark", main=main, ..., do.plot=do.plot) bb <- NULL } ## ------------ x margin ----------------------------------------- if(opt$xmargin) { a <- RES$xmargin do.clean(resid1panel, a$x, a$xZ, a$x, a$ExZ, NULL, "x coordinate", "marginal of residuals", main=main, ..., do.plot=do.plot) bb <- NULL } # ------------ y margin ----------------------------------------- if(opt$ymargin) { a <- RES$ymargin do.clean(resid1panel, a$y, a$yZ, a$y, a$EyZ, NULL, "y coordinate", "marginal of residuals", main=main, ..., do.plot=do.plot) bb <- NULL } attr(bb, "bbox") <- bb return(invisible(bb)) } resid1plot }) resid1panel <- local({ do.lines <- function(x, y, defaulty=1, ...) { do.call(lines, resolve.defaults(list(x, y), list(...), list(lty=defaulty))) } resid1panel <- function(observedX, observedV, theoreticalX, theoreticalV, theoreticalSD, xlab, ylab, ..., do.plot=TRUE) { if(!do.plot) return(NULL) ## work out plot range rX <- range(observedX, theoreticalX) rV <- range(c(0, observedV, theoreticalV)) if(!is.null(theoreticalSD)) rV <- range(c(rV, theoreticalV + 2*theoreticalSD, theoreticalV - 2*theoreticalSD)) ## argument handling ## start plot plot(rX, rV, type="n", xlab=xlab, ylab=ylab, ...) do.lines(observedX, observedV, 1, ...) do.lines(theoreticalX, theoreticalV, 2, ...) if(!is.null(theoreticalSD)) { do.lines(theoreticalX, theoreticalV + 2 * theoreticalSD, 3, ...) do.lines(theoreticalX, theoreticalV - 2 * theoreticalSD, 3, ...) } } resid1panel }) # # ploterodewin <- function(W1, W2, col.edge=grey(0.75), col.inside=rgb(1,0,0), do.plot=TRUE, ...) { ## internal use only ## W2 is assumed to be an erosion of W1 switch(W1$type, rectangle={ z <- plot(W1, ..., do.plot=do.plot) plot(W2, add=TRUE, lty=2, do.plot=do.plot) }, polygonal={ z <- plot(W1, ..., do.plot=do.plot) plot(W2, add=TRUE, lty=2, do.plot=do.plot) }, mask={ Z <- as.im(W1) x <- as.vector(rasterx.mask(W1)) y <- as.vector(rastery.mask(W1)) ok <- inside.owin(x, y, W2) Z$v[ok] <- 2 z <- plot(Z, ..., col=c(col.edge, col.inside), add=TRUE, ribbon=FALSE, do.plot=do.plot) } ) return(z) } ploterodeimage <- function(W, Z, ..., Wcol=grey(0.75), rangeZ, colsZ, do.plot=TRUE) { # Internal use only # Image Z is assumed to live on a subset of mask W # colsZ are the colours for the values in the range 'rangeZ' if(!is.mask(W)) { if(do.plot) plot(W, add=TRUE) W <- as.mask(W) } # Extend the colour map to include an extra colour for pixels in W # (1) Add the desired colour of W to the colour map pseudocols <- c(Wcol, colsZ) # (2) Breakpoints bks <- seq(from=rangeZ[1], to=rangeZ[2], length=length(colsZ)+1) dZ <- diff(bks)[1] pseudobreaks <- c(rangeZ[1] - dZ, bks) # (3) Determine a fake value for pixels in W Wvalue <- rangeZ[1] - dZ/2 # Create composite image on W grid # (with W-pixels initialised to Wvalue) X <- as.im(Wvalue, W) # Look up Z-values of W-pixels xx <- as.vector(rasterx.mask(W)) yy <- as.vector(rastery.mask(W)) Zvalues <- lookup.im(Z, xx, yy, naok = TRUE, strict=FALSE) # Overwrite pixels in Z inZ <- !is.na(Zvalues) X$v[inZ] <- Zvalues[inZ] z <- image(X, ..., add=TRUE, ribbon=FALSE, col=pseudocols, breaks=pseudobreaks, do.plot=do.plot) out <- list(X, pseudocols, pseudobreaks) attr(out, "bbox") <- as.owin(z) return(out) } spatstat/R/rmhstart.R0000755000176200001440000000473413115271120014311 0ustar liggesusers# # # rmhstart.R # # $Revision: 1.12 $ $Date: 2016/02/11 10:17:12 $ # # rmhstart <- function(start, ...) { UseMethod("rmhstart") } rmhstart.rmhstart <- function(start, ...) { return(start) } rmhstart.list <- function(start, ...) { st <- do.call.matched(rmhstart.default, start) return(st) } rmhstart.default <- function(start=NULL, ..., n.start=NULL, x.start=NULL) { if(!is.null(start) || length(list(...)) > 0) stop("Syntax should be rmhstart(n.start) or rmhstart(x.start)") ngiven <- !is.null(n.start) xgiven <- !is.null(x.start) # n.start and x.start are incompatible if(ngiven && xgiven) stop("Give only one of the arguments n.start and x.start") given <- if(ngiven) "n" else if(xgiven) "x" else "none" # Validate arguments if(ngiven && !is.numeric(n.start)) stop("n.start should be numeric") if(xgiven) { # We can't check x.start properly because we don't have the relevant window # Just check that it is INTERPRETABLE as a point pattern xx <- as.ppp(x.start, W=ripras, fatal=FALSE) if(is.null(xx)) stop(paste("x.start should be a point pattern object,", "or coordinate data in a format recognised by as.ppp")) } else xx <- NULL ################################################################### # return augmented list out <- list(n.start=n.start, x.start=x.start, given=given, xx=xx) class(out) <- c("rmhstart", class(out)) return(out) } print.rmhstart <- function(x, ...) { verifyclass(x, "rmhstart") cat("Metropolis-Hastings algorithm starting parameters\n") cat("Initial state: ") switch(x$given, none={ cat("not given\n") }, x = { cat("given as x.start\n") if(is.ppp(x$x.start)) print(x$x.start) else cat(paste("(x,y) coordinates of", x$xx$n, "points (window unspecified)\n")) cat("\n") }, n = { n.start <- x$n.start nstring <- if(length(n.start) == 1) paste(n.start) else paste("(", paste(n.start, collapse=","), ")", sep="") cat(paste("number fixed at n.start =", nstring, "\n")) } ) } update.rmhstart <- function(object, ...) { do.call.matched(rmhstart.default, resolve.defaults(list(...), as.list(object), .StripNull=TRUE)) } spatstat/R/distances.psp.R0000755000176200001440000001124013115271075015222 0ustar liggesusers# # distances.psp.R # # Hausdorff distance and Euclidean separation for psp objects # # $Revision: 1.11 $ $Date: 2015/10/21 09:06:57 $ # # pairdist.psp <- function(X, ..., method="C", type="Hausdorff") { verifyclass(X, "psp") if(X$n == 0) return(matrix(, 0, 0)) type <- pickoption("type", type, c(Hausdorff="Hausdorff", hausdorff="Hausdorff", separation="separation")) D12 <- AsymmDistance.psp(X, X, metric=type, method=method) switch(type, Hausdorff={ # maximum is Hausdorff metric D <- array(pmax.int(D12, t(D12)), dim=dim(D12)) }, separation={ # Take minimum of endpoint-to-segment distances D <- array(pmin.int(D12, t(D12)), dim=dim(D12)) # Identify any pairs of segments which cross cross <- test.selfcrossing.psp(X) # Assign separation = 0 to such pairs D[cross] <- 0 }) return(D) } crossdist.psp <- function(X, Y, ..., method="C", type="Hausdorff") { verifyclass(X, "psp") Y <- as.psp(Y) if(X$n * Y$n == 0) return(matrix(, X$n, Y$n)) type <- pickoption("type", type, c(Hausdorff="Hausdorff", hausdorff="Hausdorff", separation="separation")) DXY <- AsymmDistance.psp(X, Y, metric=type, method=method) DYX <- AsymmDistance.psp(Y, X, metric=type, method=method) switch(type, Hausdorff={ # maximum is Hausdorff metric D <- array(pmax.int(DXY, t(DYX)), dim=dim(DXY)) }, separation={ # Take minimum of endpoint-to-segment distances D <- array(pmin.int(DXY, t(DYX)), dim=dim(DXY)) # Identify pairs of segments which cross cross <- test.crossing.psp(X, Y) # Assign separation = 0 to such pairs D[cross] <- 0 }) return(D) } nndist.psp <- function(X, ..., k=1, method="C") { verifyclass(X, "psp") if(!(is.vector(k) && all(k %% 1 == 0) && all(k >= 1))) stop("k should be a positive integer or integers") n <- nobjects(X) kmax <- max(k) lenk <- length(k) result <- if(lenk == 1) numeric(n) else matrix(, nrow=n, ncol=lenk) if(n == 0) return(result) if(kmax >= n) { # not enough objects # fill with Infinite values result[] <- Inf if(any(ok <- (kmax < n))) { # compute the lower-order nnd's result[, ok] <- nndist.psp(X, ..., k=k[ok], method=method) } return(result) } # normal case: D <- pairdist.psp(X, ..., method=method) diag(D) <- Inf if(kmax == 1L) NND <- apply(D, 1L, min) else NND <- t(apply(D, 1L, orderstats, k=k))[, , drop=TRUE] return(NND) } # ..... AsymmDistance.psp ..... # # If metric="Hausdorff": # this function computes, for each pair of segments A = X[i] and B = Y[j], # the value max_{a in A} d(a,B) = max_{a in A} min_{b in B} ||a-b|| # which appears in the definition of the Hausdorff metric. # Since the distance function d(a,B) of a segment B is a convex function, # the maximum is achieved at an endpoint of A. So the algorithm # actually computes h(A,B) = max (d(e_1,B), d(e_2,B)) where e_1, e_2 # are the endpoints of A. And H(A,B) = max(h(A,B),h(B,A)). # # If metric="separation": # the function computes, for each pair of segments A = X[i] and B = Y[j], # the MINIMUM distance from an endpoint of A to any point of B. # t(A,B) = min (d(e_1,B), d(e_2,B)) # where e_1, e_2 are the endpoints of A. # Define the separation distance # s(A,B) = min_{a in A} min_{b in B} ||a-b||. # The minimum (a*, b*) occurs either when a* is an endpoint of A, # or when b* is an endpoint of B, or when a* = b* (so A and B intersect). # (If A and B are parallel, the minimum is still achieved at an endpoint) # Thus s(A,B) = min(t(A,B), t(B,A)) unless A and B intersect. AsymmDistance.psp <- function(X, Y, metric="Hausdorff", method=c("C", "Fortran", "interpreted")) { method <- match.arg(method) # Extract endpoints of X EX <- endpoints.psp(X, "both") idX <- attr(EX, "id") # compute shortest dist from each endpoint of X to each segment of Y DPL <- distppll(cbind(EX$x,EX$y), Y$ends, mintype=0, method=method) # for each segment in X, maximise or minimise over the two endpoints Dist <- as.vector(DPL) Point <- as.vector(idX[row(DPL)]) Segment <- as.vector(col(DPL)) switch(metric, Hausdorff={ DXY <- tapply(Dist, list(factor(Point), factor(Segment)), max) }, separation={ DXY <- tapply(Dist, list(factor(Point), factor(Segment)), min) }) return(DXY) } spatstat/R/lineardisc.R0000755000176200001440000001714613115271120014563 0ustar liggesusers# # # disc.R # # $Revision: 1.27 $ $Date: 2017/06/05 10:31:58 $ # # Compute the disc of radius r in a linear network # # lineardisc <- function(L, x=locator(1), r, plotit=TRUE, cols=c("blue", "red", "green")) { # L is the linear network (object of class "linnet") # x is the centre point of the disc # r is the radius of the disc # stopifnot(inherits(L, "linnet")) check.1.real(r) if(L$sparse) { message("Converting linear network to non-sparse representation..") L <- as.linnet(L, sparse=FALSE) } lines <- L$lines vertices <- L$vertices lengths <- lengths.psp(lines) win <- L$window # # project x to nearest segment if(missing(x)) x <- clickppp(1, win, add=TRUE) else x <- as.ppp(x, win) pro <- project2segment(x, lines) # which segment? startsegment <- pro$mapXY # parametric position of x along this segment startfraction <- pro$tp # vertices at each end of this segment A <- L$from[startsegment] B <- L$to[startsegment] # distances from x to A and B dxA <- startfraction * lengths[startsegment] dxB <- (1-startfraction) * lengths[startsegment] # is r large enough to reach both A and B? startfilled <- (max(dxA, dxB) <= r) # compute vector of shortest path distances from x to each vertex j, # going through A: dxAv <- dxA + L$dpath[A,] # going through B: dxBv <- dxB + L$dpath[B,] # going either through A or through B: dxv <- pmin.int(dxAv, dxBv) # Thus dxv[j] is the shortest path distance from x to vertex j. # # Determine which vertices are inside the disc of radius r covered <- (dxv <= r) # Thus covered[j] is TRUE if the j-th vertex is inside the disc. # # Determine which line segments are completely inside the disc # from <- L$from to <- L$to # ( a line segment is inside the disc if the shortest distance # from x to one of its endpoints, plus the length of the segment, # is less than r .... allinside <- (dxv[from] + lengths <= r) | (dxv[to] + lengths <= r) # ... or alternatively, if the sum of the # two residual distances exceeds the length of the segment ) residfrom <- pmax.int(0, r - dxv[from]) residto <- pmax.int(0, r - dxv[to]) allinside <- allinside | (residfrom + residto >= lengths) # start segment is special allinside[startsegment] <- startfilled # Thus allinside[k] is TRUE if the k-th segment is inside the disc # Collect all these segments disclines <- lines[allinside] # # Determine which line segments cross the boundary of the disc boundary <- (covered[from] | covered[to]) & !allinside # For each of these, calculate the remaining distance at each end resid.from <- ifelseXB(boundary, pmax.int(r - dxv[from], 0), 0) resid.to <- ifelseXB(boundary, pmax.int(r - dxv[to], 0), 0) # Where the remaining distance is nonzero, create segment and endpoint okfrom <- (resid.from > 0) okfrom[startsegment] <- FALSE if(any(okfrom)) { v0 <- vertices[from[okfrom]] v1 <- vertices[to[okfrom]] tp <- (resid.from/lengths)[okfrom] vfrom <- ppp((1-tp)*v0$x + tp*v1$x, (1-tp)*v0$y + tp*v1$y, window=win) extralinesfrom <- as.psp(from=v0, to=vfrom) } else vfrom <- extralinesfrom <- NULL # okto <- (resid.to > 0) okto[startsegment] <- FALSE if(any(okto)) { v0 <- vertices[to[okto]] v1 <- vertices[from[okto]] tp <- (resid.to/lengths)[okto] vto <- ppp((1-tp)*v0$x + tp*v1$x, (1-tp)*v0$y + tp*v1$y, window=win) extralinesto <- as.psp(from=v0, to=vto) } else vto <- extralinesto <- NULL # # deal with special case where start segment is not fully covered if(!startfilled) { vA <- vertices[A] vB <- vertices[B] rfrac <- r/lengths[startsegment] tleft <- pmax.int(startfraction-rfrac, 0) tright <- pmin.int(startfraction+rfrac, 1) vleft <- ppp((1-tleft) * vA$x + tleft * vB$x, (1-tleft) * vA$y + tleft * vB$y, window=win) vright <- ppp((1-tright) * vA$x + tright * vB$x, (1-tright) * vA$y + tright * vB$y, window=win) startline <- as.psp(from=vleft, to=vright) startends <- superimpose(if(!covered[A]) vleft else NULL, if(!covered[B]) vright else NULL) } else startline <- startends <- NULL # # combine all lines disclines <- superimpose(disclines, extralinesfrom, extralinesto, startline, W=win, check=FALSE) # combine all disc endpoints discends <- superimpose(vfrom, vto, vertices[dxv == r], startends, W=win, check=FALSE) # if(plotit) { if(dev.cur() == 1) { # null device - initialise a plot plot(L, main="") } points(x, col=cols[1L], pch=16) plot(disclines, add=TRUE, col=cols[2L], lwd=2) plot(discends, add=TRUE, col=cols[3L], pch=16) } return(list(lines=disclines, endpoints=discends)) } countends <- function(L, x=locator(1), r, toler=NULL) { # L is the linear network (object of class "linnet") # x is the centre point of the disc # r is the radius of the disc # stopifnot(inherits(L, "linnet")) # get x if(missing(x)) x <- clickppp(1, Window(L), add=TRUE) if(!inherits(x, "lpp")) x <- as.lpp(x, L=L) np <- npoints(x) if(length(r) != np) stop("Length of vector r does not match number of points in x") # if(!is.connected(L)) { #' disconnected network - split into components result <- numeric(np) lab <- connected(L, what="labels") subsets <- split(seq_len(nvertices(L)), factor(lab)) for(subi in subsets) { xi <- thinNetwork(x, retainvertices=subi) witch <- which(attr(xi, "retainpoints")) ok <- is.finite(r[witch]) witchok <- witch[ok] result[witchok] <- countends(domain(xi), xi[ok], r[witchok], toler=toler) } return(result) } lines <- L$lines vertices <- L$vertices lengths <- lengths.psp(lines) dpath <- L$dpath nv <- vertices$n ns <- lines$n # if(!spatstat.options("Ccountends")) { #' interpreted code result <- integer(np) for(i in seq_len(np)) result[i] <- npoints(lineardisc(L, x[i], r[i], plotit=FALSE)$endpoints) return(result) } # extract coordinates coo <- coords(x) #' which segment startsegment <- coo$seg # parametric position of x along this segment startfraction <- coo$tp # convert indices to C seg0 <- startsegment - 1L from0 <- L$from - 1L to0 <- L$to - 1L # determine numerical tolerance if(is.null(toler)) { toler <- default.linnet.tolerance(L) } else { check.1.real(toler) stopifnot(toler > 0) } zz <- .C("Ccountends", np = as.integer(np), f = as.double(startfraction), seg = as.integer(seg0), r = as.double(r), nv = as.integer(nv), xv = as.double(vertices$x), yv = as.double(vertices$y), ns = as.integer(ns), from = as.integer(from0), to = as.integer(to0), dpath = as.double(dpath), lengths = as.double(lengths), toler=as.double(toler), nendpoints = as.integer(integer(np)), PACKAGE = "spatstat") zz$nendpoints } default.linnet.tolerance <- function(L) { # L could be a linnet or psp if(!is.null(toler <- L$toler)) return(toler) len2 <- lengths.psp(as.psp(L), squared=TRUE) len2pos <- len2[len2 > 0] toler <- if(length(len2pos) == 0) 0 else (0.001 * sqrt(min(len2pos))) toler <- makeLinnetTolerance(toler) return(toler) } makeLinnetTolerance <- function(toler) { max(sqrt(.Machine$double.xmin), toler[is.finite(toler)], na.rm=TRUE) } spatstat/R/edit.R0000644000176200001440000000133313115225157013370 0ustar liggesusers## edit.R ## ## Methods for 'edit' ## ## $Revision: 1.3 $ $Date: 2015/04/19 06:14:21 $ edit.ppp <- local({ edit.ppp <- function(name, ...) { X <- name df <- as.data.frame(X) df <- as.data.frame(lapply(df, as.num.or.char)) Y <- edit(df, ...) Z <- as.ppp(Y, W=Window(X)) return(Z) } as.num.or.char <- function(x) { if (is.character(x)) x else if (is.numeric(x)) { storage.mode(x) <- "double" x } else as.character(x) } edit.ppp }) edit.im <- function(name, ...) { X <- name M <- transmat(as.matrix(X), from="spatstat", to="European") Y <- as.data.frame(M) Z <- edit(Y, ...) X[] <- transmat(as.matrix(Z), from="European", to="spatstat") return(X) } spatstat/R/deltametric.R0000755000176200001440000000136713115271075014752 0ustar liggesusers# # deltametric.R # # Delta metric # # $Revision: 1.4 $ $Date: 2014/10/24 00:22:30 $ # deltametric <- function(A, B, p=2, c=Inf, ...) { stopifnot(is.numeric(p) && length(p) == 1L && p > 0) # ensure frames are identical bb <- boundingbox(as.rectangle(A), as.rectangle(B)) # enforce identical frames A <- rebound(A, bb) B <- rebound(B, bb) # compute distance functions dA <- distmap(A, ...) dB <- distmap(B, ...) if(!is.infinite(c)) { dA <- eval.im(pmin.int(dA, c)) dB <- eval.im(pmin.int(dB, c)) } if(is.infinite(p)) { # L^infinity Z <- eval.im(abs(dA-dB)) delta <- summary(Z)$max } else { # L^p Z <- eval.im(abs(dA-dB)^p) iZ <- summary(Z)$mean delta <- iZ^(1/p) } return(delta) } spatstat/R/eem.R0000755000176200001440000000062213115271075013214 0ustar liggesusers# eem.R # # Computes the Stoyan-Grabarnik "exponential energy weights" # # $Revision: 1.4 $ $Date: 2008/07/25 19:51:05 $ # eem <- function(fit, check=TRUE) { verifyclass(fit, "ppm") lambda <- fitted.ppm(fit, check=check) Q <- quad.ppm(fit) Z <- is.data(Q) eemarks <- 1/lambda[Z] attr(eemarks, "type") <- "eem" attr(eemarks, "typename") <- "exponential energy marks" return(eemarks) } spatstat/R/exactdt.R0000755000176200001440000000436013115271075014105 0ustar liggesusers# # exactdt.S # S function exactdt() for exact distance transform # # $Revision: 4.17 $ $Date: 2017/06/05 10:31:58 $ # exactdt <- local({ die <- function(why) { stop(paste("ppp object format corrupted:", why)) } exactdt <- function(X, ...) { verifyclass(X, "ppp") w <- X$window if(spatstat.options("exactdt.checks.data")) { ## check validity of ppp structure bb <- as.rectangle(w) xr <- bb$xrange yr <- bb$yrange rx <- range(X$x) ry <- range(X$y) if(rx[1L] < xr[1L] || rx[2L] > xr[2L]) die("x-coordinates out of bounds") if(ry[1L] < yr[1L] || ry[2L] > yr[2L]) die("y-coordinates out of bounds") if(length(X$x) != length(X$y)) die("x and y vectors have different length") if(length(X$x) != X$n) die("length of x,y vectors does not match n") } w <- as.mask(w, ...) ## dimensions of result nr <- w$dim[1L] nc <- w$dim[2L] ## margins in C array mr <- 2 mc <- 2 ## full dimensions of allocated storage Nnr <- nr + 2 * mr Nnc <- nc + 2 * mc N <- Nnr * Nnc ## output rows & columns (R indexing) rmin <- mr + 1 rmax <- Nnr - mr cmin <- mc + 1 cmax <- Nnc - mc ## go res <- .C("exact_dt_R", as.double(X$x), as.double(X$y), as.integer(X$n), as.double(w$xrange[1L]), as.double(w$yrange[1L]), as.double(w$xrange[2L]), as.double(w$yrange[2L]), nr = as.integer(nr), nc = as.integer(nc), mr = as.integer(mr), mc = as.integer(mc), distances = as.double(double(N)), indices = as.integer(integer(N)), boundary = as.double(double(N)), PACKAGE = "spatstat") ## extract dist <- matrix(res$distances, ncol=Nnc, nrow=Nnr, byrow = TRUE)[rmin:rmax, cmin:cmax] inde <- matrix(res$indices, ncol=Nnc, nrow=Nnr, byrow = TRUE)[rmin:rmax, cmin:cmax] bdry <- matrix(res$boundary, ncol=Nnc, nrow=Nnr, byrow = TRUE)[rmin:rmax, cmin:cmax] ## convert index from C to R indexing inde <- inde + 1L return(list(d = dist, i = inde, b = bdry, w=w)) } exactdt }) spatstat/R/quadratresample.R0000755000176200001440000000223213115271120015626 0ustar liggesusers# # quadratresample.R # # resample a point pattern by resampling quadrats # # $Revision: 1.7 $ $Date: 2015/10/21 09:06:57 $ # quadratresample <- function(X, nx, ny=nx, ..., replace=FALSE, nsamples=1, verbose=(nsamples > 1)) { stopifnot(is.ppp(X)) if(X$window$type != "rectangle") stop("Resampling is only implemented for rectangular windows") # create tessellation A <- quadrats(X, nx=nx, ny=ny) # split data over tessellation B <- split(X, A) nq <- length(B) # determine bottom left corner of each tile V <- lapply(B, framebottomleft) out <- list() if(verbose) { cat("Generating resampled patterns...") pstate <- list() } for(i in 1:nsamples) { # resample tiles ind <- sample(1:nq, nq, replace=replace) Xresampled <- X Bresampled <- B for(j in 1:nq) { k <- ind[j] Bresampled[[j]] <- shift(B[[k]], unlist(V[[j]]) - unlist(V[[k]])) } split(Xresampled, A) <- Bresampled out[[i]] <- Xresampled if(verbose) pstate <- progressreport(i, nsamples, state=pstate) } if(nsamples == 1) return(out[[1]]) return(as.solist(out)) } spatstat/R/quadratmtest.R0000755000176200001440000000067113115271120015157 0ustar liggesusers# # method for 'quadrat.test' for class mppm # # $Revision: 1.8 $ $Date: 2015/08/12 07:29:17 $ # quadrat.test.mppm <- function(X, ...) { Xname <- short.deparse(substitute(X)) if(!is.poisson.mppm(X)) stop("Model is not a Poisson point process") subs <- subfits(X) tests <- anylapply(subs, quadrat.test.ppm, ..., fitname=Xname) df.est <- length(coef(X)) return(pool.quadrattest(tests, Xname=Xname, df.est=df.est)) } spatstat/R/images.R0000755000176200001440000010517713115271075013726 0ustar liggesusers# # images.R # # $Revision: 1.145 $ $Date: 2017/06/05 10:31:58 $ # # The class "im" of raster images # # im() object creator # # is.im() tests class membership # # rasterx.im(), rastery.im() # raster X and Y coordinates # # nearest.pixel() # lookup.im() # facilities for looking up pixel values # ################################################################ ######## basic support for class "im" ################################################################ # # creator im <- function(mat, xcol=seq_len(ncol(mat)), yrow=seq_len(nrow(mat)), xrange=NULL, yrange=NULL, unitname=NULL) { typ <- typeof(mat) if(typ == "double") typ <- "real" miss.xcol <- missing(xcol) miss.yrow <- missing(yrow) # determine dimensions if(!is.null(dim(mat))) { nr <- nrow(mat) nc <- ncol(mat) if(length(xcol) != nc) stop("Length of xcol does not match ncol(mat)") if(length(yrow) != nr) stop("Length of yrow does not match nrow(mat)") } else { if(miss.xcol || miss.yrow) stop(paste(sQuote("mat"), "is not a matrix and I can't guess its dimensions")) stopifnot(length(mat) == length(xcol) * length(yrow)) nc <- length(xcol) nr <- length(yrow) } # deal with factor case if(is.factor(mat)) { typ <- "factor" } else if(!is.null(lev <- levels(mat))) { typ <- "factor" mat <- factor(mat, levels=lev) } # Ensure 'mat' is a matrix (without destroying factor information) if(!is.matrix(mat)) dim(mat) <- c(nr, nc) # set up coordinates if((miss.xcol || length(xcol) <= 1) && !is.null(xrange) ) { # use 'xrange' xstep <- diff(xrange)/nc xcol <- seq(from=xrange[1L] + xstep/2, to=xrange[2L] - xstep/2, length.out=nc) } else if(length(xcol) > 1) { # use 'xcol' # ensure spacing is constant xcol <- seq(from=min(xcol), to=max(xcol), length.out=length(xcol)) xstep <- diff(xcol)[1L] xrange <- range(xcol) + c(-1,1) * xstep/2 } else stop("Cannot determine pixel width") if((miss.yrow || length(yrow) <= 1) && !is.null(yrange)) { # use 'yrange' ystep <- diff(yrange)/nr yrow <- seq(from=yrange[1L] + ystep/2, to=yrange[2L] - ystep/2, length.out=nr) } else if(length(yrow) > 1) { # use 'yrow' # ensure spacing is constant yrow <- seq(from=min(yrow), to=max(yrow), length.out=length(yrow)) ystep <- diff(yrow)[1L] yrange <- range(yrow) + c(-1,1) * ystep/2 } else stop("Cannot determine pixel height") unitname <- as.units(unitname) out <- list(v = mat, dim = c(nr, nc), xrange = xrange, yrange = yrange, xstep = xstep, ystep = ystep, xcol = xcol, yrow = yrow, type = typ, units = unitname) class(out) <- "im" return(out) } is.im <- function(x) { inherits(x,"im") } levels.im <- function(x) { levels(x$v) } "levels<-.im" <- function(x, value) { if(x$type != "factor") stop("image is not factor-valued") levels(x$v) <- value x } ################################################################ ######## methods for class "im" ################################################################ shift.im <- function(X, vec=c(0,0), ..., origin=NULL) { verifyclass(X, "im") if(!is.null(origin)) { stopifnot(is.character(origin)) if(!missing(vec)) warning("argument vec ignored; overruled by argument origin") origin <- pickoption("origin", origin, c(centroid="centroid", midpoint="midpoint", bottomleft="bottomleft")) W <- as.owin(X) locn <- switch(origin, centroid={ unlist(centroid.owin(W)) }, midpoint={ c(mean(W$xrange), mean(W$yrange)) }, bottomleft={ c(W$xrange[1L], W$yrange[1L]) }) return(shift(X, -locn)) } X$xrange <- X$xrange + vec[1L] X$yrange <- X$yrange + vec[2L] X$xcol <- X$xcol + vec[1L] X$yrow <- X$yrow + vec[2L] attr(X, "lastshift") <- vec return(X) } "Frame<-.im" <- function(X, value) { stopifnot(is.rectangle(value)) if(!is.subset.owin(value, Frame(X))) { ## first expand X <- X[value, drop=FALSE] } X[value, drop=TRUE] } "[.im" <- local({ disjoint <- function(r, s) { (r[2L] < s[1L]) || (r[1L] > s[2L]) } clip <- function(r, s) { c(max(r[1L],s[1L]), min(r[2L],s[2L])) } inrange <- function(x, r) { (x >= r[1L]) & (x <= r[2L]) } Extract.im <- function(x, i, j, ..., drop=TRUE, tight=FALSE, raster=NULL, rescue=is.owin(i)) { ## detect 'blank' arguments like second argument in x[i, ] ngiven <- length(sys.call()) nmatched <- length(match.call()) nblank <- ngiven - nmatched itype <- if(missing(i)) "missing" else "given" jtype <- if(missing(j)) "missing" else "given" if(nblank == 1) { if(!missing(i)) jtype <- "blank" if(!missing(j)) itype <- "blank" } else if(nblank == 2) { itype <- jtype <- "blank" } if(missing(rescue) && itype != "given") rescue <- FALSE if(itype == "missing" && jtype == "missing") { ## no indices: return entire image out <- if(is.null(raster)) x else as.im(raster) xy <- expand.grid(y=out$yrow,x=out$xcol) if(!is.null(raster)) { ## resample image on new pixel raster values <- lookup.im(x, xy$x, xy$y, naok=TRUE) out <- im(values, out$xcol, out$yrow, unitname=unitname(out)) } if(!drop) return(out) else { v <- out$v return(v[!is.na(v)]) } } if(itype == "given") { ## ................................................................. ## Try spatial index ## ................................................................. if(verifyclass(i, "owin", fatal=FALSE)) { if(jtype == "given") warning("Argument j ignored") ## 'i' is a window ## if drop = FALSE, just set values outside window to NA ## if drop = TRUE, extract values for all pixels inside window ## as an image (if 'i' is a rectangle) ## or as a vector (otherwise) ## determine pixel raster for output if(!is.null(raster)) { out <- as.im(raster) do.resample <- TRUE } else if(is.subset.owin(i, as.owin(x))) { out <- x do.resample <- FALSE } else { ## new window does not contain data window: expand it bb <- boundingbox(as.rectangle(i), as.rectangle(x)) rr <- if(is.mask(i)) i else x xcol <- prolongseq(rr$xcol, bb$xrange, rr$xstep) yrow <- prolongseq(rr$yrow, bb$yrange, rr$ystep) out <- list(xcol=xcol, yrow=yrow) do.resample <- TRUE } xy <- expand.grid(y=out$yrow,x=out$xcol) if(do.resample) { ## resample image on new pixel raster values <- lookup.im(x, xy$x, xy$y, naok=TRUE) out <- im(values, out$xcol, out$yrow, unitname=unitname(out)) } inside <- inside.owin(xy$x, xy$y, i) if(!drop) { ## set other pixels to NA and return image out$v[!inside] <- NA if(!tight) return(out) } else if(!(rescue && i$type == "rectangle")) { ## return pixel values values <- out$v[inside] return(values) } ## return image in smaller rectangle if(disjoint(i$xrange, x$xrange) || disjoint(i$yrange, x$yrange)) ## empty intersection return(numeric(0)) xr <- clip(i$xrange, x$xrange) yr <- clip(i$yrange, x$yrange) colsub <- inrange(out$xcol, xr) rowsub <- inrange(out$yrow, yr) ncolsub <- sum(colsub) nrowsub <- sum(rowsub) if(ncolsub == 0 || nrowsub == 0) return(numeric(0)) marg <- list(mat=out$v[rowsub, colsub, drop=FALSE], unitname=unitname(x)) xarg <- if(ncolsub > 1) list(xcol = out$xcol[colsub]) else list(xrange=xr) yarg <- if(nrowsub > 1) list(yrow = out$yrow[rowsub]) else list(yrange=yr) result <- do.call(im, c(marg, xarg, yarg)) return(result) } if(verifyclass(i, "im", fatal=FALSE)) { if(jtype == "given") warning("Argument j ignored") ## logical images OK if(i$type == "logical") { ## convert to window w <- as.owin(eval.im(ifelse1NA(i))) return(x[w, drop=drop, ..., raster=raster]) } else stop("Subset argument \'i\' is an image, but not of logical type") } if(inherits(i, "linnet")) { #' linear network if(jtype == "given") warning("Argument j ignored") W <- raster %orifnull% as.owin(x) M <- as.mask.psp(as.psp(i), W=W, ...) xM <- x[M, drop=drop] if(is.im(xM)) xM <- linim(i, xM) return(xM) } if(is.ppp(i)) { ## 'i' is a point pattern if(jtype == "given") warning("Argument j ignored") ## Look up the greyscale values for the points of the pattern values <- lookup.im(x, i$x, i$y, naok=TRUE) if(drop) values <- values[!is.na(values)] if(length(values) == 0) ## ensure the zero-length vector is of the right type values <- switch(x$type, factor={ factor(, levels=levels(x)) }, integer = { integer(0) }, logical = { logical(0) }, real = { numeric(0) }, complex = { complex(0) }, character = { character(0) }, { values } ) return(values) } } ## ............... not a spatial index ............................. ## Try indexing as a matrix ## Construct a matrix index call for possible re-use M <- as.matrix(x) ## suppress warnings from code checkers dont.complain.about(M) ## ycall <- switch(itype, given = { switch(jtype, given = quote(M[i, j, drop=FALSE]), blank = quote(M[i, , drop=FALSE]), missing = quote(M[i, drop=FALSE])) }, blank = { switch(jtype, given = quote(M[ , j, drop=FALSE]), blank = quote(M[ , , drop=FALSE]), missing = quote(M[ , drop=FALSE])) }, missing = { switch(jtype, given = quote(M[j=j, drop=FALSE]), blank = quote(M[j= , drop=FALSE]), missing = quote(M[ drop=FALSE])) }) ## try it y <- try(eval(as.call(ycall)), silent=TRUE) if(!inherits(y, "try-error")) { ## valid subset index for a matrix if(rescue) { ## check whether it's a rectangular block, in correct order RR <- row(x$v) CC <- col(x$v) rcall <- ycall rcall[[2L]] <- quote(RR) ccall <- ycall ccall[[2L]] <- quote(CC) rr <- eval(as.call(rcall)) cc <- eval(as.call(ccall)) rseq <- sort(unique(as.vector(rr))) cseq <- sort(unique(as.vector(cc))) if(all(diff(rseq) == 1) && all(diff(cseq) == 1) && (length(rr) == length(rseq) * length(cseq)) && all(rr == RR[rseq, cseq]) && all(cc == CC[rseq,cseq])) { ## yes - make image dim(y) <- c(length(rseq), length(cseq)) Y <- x Y$v <- y Y$dim <- dim(y) Y$xcol <- x$xcol[cseq] Y$yrow <- x$yrow[rseq] Y$xrange <- range(Y$xcol) + c(-1,1) * x$xstep/2 Y$yrange <- range(Y$yrow) + c(-1,1) * x$ystep/2 return(Y) } } ## return pixel values (possibly as matrix) return(y) } ## Last chance! if(itype == "given" && !is.matrix(i) && !is.null(ip <- as.ppp(i, W=as.owin(x), fatal=FALSE, check=FALSE))) { ## 'i' is convertible to a point pattern ## Look up the greyscale values for the points of the pattern values <- lookup.im(x, ip$x, ip$y, naok=TRUE) if(drop) values <- values[!is.na(values)] if(length(values) == 0) ## ensure the zero-length vector is of the right type values <- switch(x$type, factor={ factor(, levels=levels(x)) }, integer = { integer(0) }, logical = { logical(0) }, real = { numeric(0) }, complex = { complex(0) }, character = { character(0) }, { values } ) return(values) } stop("The subset operation is undefined for this type of index") } Extract.im }) update.im <- function(object, ...) { ## update internal structure of image after manipulation X <- object mat <- X$v typ <- typeof(mat) if(typ == "double") typ <- "real" ## deal with factor case if(is.factor(mat)) { typ <- "factor" } else if(!is.null(lev <- levels(mat))) { typ <- "factor" X$v <- factor(mat, levels=lev) } X$type <- typ return(X) } "[<-.im" <- function(x, i, j, value) { # detect 'blank' arguments like second argument of x[i, ] ngiven <- length(sys.call()) nmatched <- length(match.call()) nblank <- ngiven - nmatched itype <- if(missing(i)) "missing" else "given" jtype <- if(missing(j)) "missing" else "given" if(nblank == 1) { if(!missing(i)) jtype <- "blank" if(!missing(j)) itype <- "blank" } else if(nblank == 2) { itype <- jtype <- "blank" } X <- x W <- as.owin(X) stopifnot(is.im(value) || is.vector(value) || is.matrix(value) || is.array(value) || is.factor(value)) if(is.im(value)) value <- value$v if(itype == "missing" && jtype == "missing") { # no index provided # set all pixels to 'value' v <- X$v if(!is.factor(value)) { v[!is.na(v)] <- value } else { vnew <- matrix(NA_integer_, ncol(v), nrow(v)) vnew[!is.na(v)] <- as.integer(value) v <- factor(vnew, labels=levels(value)) } X$v <- v return(update(X)) } if(itype == "given") { # ..................... Try a spatial index .................... if(verifyclass(i, "owin", fatal=FALSE)) { if(jtype == "given") warning("Index j ignored") # 'i' is a window if(is.empty(i)) return(X) rxy <- rasterxy.mask(W) xx <- rxy$x yy <- rxy$y ok <- inside.owin(xx, yy, i) X$v[ok] <- value X$type <- ifelse(is.factor(X$v), "factor", typeof(X$v)) return(update(X)) } if(verifyclass(i, "im", fatal=FALSE) && i$type == "logical") { if(jtype == "given") warning("Index j ignored") # convert logical vector to window where entries are TRUE i <- as.owin(eval.im(ifelse1NA(i))) # continue as above rxy <- rasterxy.mask(W) xx <- rxy$x yy <- rxy$y ok <- inside.owin(xx, yy, i) X$v[ok] <- value X$type <- ifelse(is.factor(X$v), "factor", typeof(X$v)) return(update(X)) } if(is.ppp(i)) { # 'i' is a point pattern if(jtype == "given") warning("Index j ignored") nv <- length(value) np <- npoints(i) if(nv != np && nv != 1) stop("Length of replacement value != number of point locations") # test whether all points are inside window FRAME ok <- inside.owin(i$x, i$y, as.rectangle(W)) if(any(!ok)) { warning("Some points are outside the outer frame of the image") if(nv == np) value <- value[ok] i <- i[ok] } if(npoints(i) > 0) { # determine row & column positions for each point loc <- nearest.pixel(i$x, i$y, X) # set values X$v[cbind(loc$row, loc$col)] <- value } X$type <- ifelse(is.factor(X$v), "factor", typeof(X$v)) return(update(X)) } } # .................. 'i' is not a spatial index .................... # Construct a matrix replacement call ycall <- switch(itype, given = { switch(jtype, given = quote(X$v[i, j] <- value), blank = quote(X$v[i, ] <- value), missing = quote(X$v[i] <- value)) }, blank = { switch(jtype, given = quote(X$v[ , j] <- value), blank = quote(X$v[ , ] <- value), missing = quote(X$v[ ] <- value)) }, missing = { switch(jtype, given = quote(X$v[j=j] <- value), blank = quote(X$v[j= ] <- value), missing = quote(X$v[] <- value)) }) # try it litmus <- try(eval(as.call(ycall)), silent=TRUE) if(!inherits(litmus, "try-error")){ X$type <- ifelse(is.factor(X$v), "factor", typeof(X$v)) return(update(X)) } # Last chance! if(itype == "given" && !is.matrix(i) && !is.null(ip <- as.ppp(i, W=W, fatal=FALSE, check=TRUE))) { # 'i' is convertible to a point pattern if(jtype == "given") warning("Index j ignored") nv <- length(value) np <- npoints(ip) if(nv != np && nv != 1) stop("Length of replacement value != number of point locations") # test whether all points are inside window FRAME ok <- inside.owin(ip$x, ip$y, as.rectangle(W)) if(any(!ok)) { warning("Some points are outside the outer frame of the image") if(nv == np) value <- value[ok] ip <- ip[ok] } if(npoints(ip) > 0) { # determine row & column positions for each point loc <- nearest.pixel(ip$x, ip$y, X) # set values X$v[cbind(loc$row, loc$col)] <- value } X$type <- ifelse(is.factor(X$v), "factor", typeof(X$v)) return(update(X)) } stop("The subset operation is undefined for this type of index") } ################################################################ ######## other tools ################################################################ # # This function is similar to nearest.raster.point except for # the third argument 'im' and the different idiom for calculating # row & column - which could be used in nearest.raster.point() nearest.pixel <- function(x,y, Z) { stopifnot(is.im(Z) || is.mask(Z)) if(length(x) > 0) { nr <- Z$dim[1L] nc <- Z$dim[2L] cc <- round(1 + (x - Z$xcol[1L])/Z$xstep) rr <- round(1 + (y - Z$yrow[1L])/Z$ystep) cc <- pmax.int(1,pmin.int(cc, nc)) rr <- pmax.int(1,pmin.int(rr, nr)) } else cc <- rr <- integer(0) return(list(row=rr, col=cc)) } # Explores the 3 x 3 neighbourhood of nearest.pixel # and finds the nearest pixel that is not NA nearest.valid.pixel <- function(x, y, Z) { rc <- nearest.pixel(x,y,Z) # checks that Z is an 'im' or 'mask' rr <- rc$row cc <- rc$col # check whether any pixels are outside image domain inside <- as.owin(Z)$m miss <- !inside[cbind(rr, cc)] if(!any(miss)) return(rc) # for offending pixels, explore 3 x 3 neighbourhood nr <- Z$dim[1L] nc <- Z$dim[2L] xcol <- Z$xcol yrow <- Z$yrow for(i in which(miss)) { rows <- rr[i] + c(-1L,0L,1L) cols <- cc[i] + c(-1L,0L,1L) rows <- unique(pmax.int(1, pmin.int(rows, nr))) cols <- unique(pmax.int(1, pmin.int(cols, nc))) rcp <- expand.grid(row=rows, col=cols) ok <- inside[as.matrix(rcp)] if(any(ok)) { # At least one of the neighbours is valid # Find the closest one rcp <- rcp[ok,] dsq <- with(rcp, (x[i] - xcol[col])^2 + (y[i] - yrow[row])^2) j <- which.min(dsq) rc$row[i] <- rcp$row[j] rc$col[i] <- rcp$col[j] } } return(rc) } # This function is a generalisation of inside.owin() # to images other than binary-valued images. lookup.im <- function(Z, x, y, naok=FALSE, strict=TRUE) { verifyclass(Z, "im") if(Z$type == "factor") Z <- repair.old.factor.image(Z) if(length(x) != length(y)) stop("x and y must be numeric vectors of equal length") # initialise answer to NA if(Z$type != "factor") { niets <- NA mode(niets) <- mode(Z$v) } else { niets <- factor(NA, levels=levels(Z)) } value <- rep.int(niets, length(x)) # test whether inside bounding rectangle xr <- Z$xrange yr <- Z$yrange eps <- sqrt(.Machine$double.eps) frameok <- (x >= xr[1L] - eps) & (x <= xr[2L] + eps) & (y >= yr[1L] - eps) & (y <= yr[2L] + eps) if(!any(frameok)) { # all points OUTSIDE range - no further work needed if(!naok) warning("Internal error: all values NA") return(value) # all NA } # consider only those points which are inside the frame xf <- x[frameok] yf <- y[frameok] # map locations to raster (row,col) coordinates if(strict) loc <- nearest.pixel(xf,yf,Z) else loc <- nearest.valid.pixel(xf,yf,Z) # look up image values vf <- Z$v[cbind(loc$row, loc$col)] # insert into answer value[frameok] <- vf if(!naok && anyNA(value)) warning("Internal error: NA's generated") return(value) } ## low level rasterx.im <- function(x) { verifyclass(x, "im") xx <- x$xcol matrix(xx[col(x)], ncol=ncol(x), nrow=nrow(x)) } rastery.im <- function(x) { verifyclass(x, "im") yy <- x$yrow matrix(yy[row(x)], ncol=ncol(x), nrow=nrow(x)) } rasterxy.im <- function(x, drop=FALSE) { verifyclass(x, "im") xx <- x$xcol yy <- x$yrow ans <- cbind(x=as.vector(xx[col(x)]), y=as.vector(yy[row(x)])) if(drop) { ok <- as.vector(!is.na(x$v)) ans <- ans[ok, , drop=FALSE] } return(ans) } ## user interface raster.x <- function(w, drop=FALSE) { if(is.owin(w)) return(rasterx.mask(w, drop=drop)) if(!is.im(w)) stop("w should be a window or an image") x <- w$xcol[col(w)] x <- if(drop) x[!is.na(w$v), drop=TRUE] else array(x, dim=w$dim) return(x) } raster.y <- function(w, drop=FALSE) { if(is.owin(w)) return(rastery.mask(w, drop=drop)) if(!is.im(w)) stop("w should be a window or an image") y <- w$yrow[row(w)] y <- if(drop) y[!is.na(w$v), drop=TRUE] else array(y, dim=w$dim) return(y) } raster.xy <- function(w, drop=FALSE) { if(is.owin(w)) return(rasterxy.mask(w, drop=drop)) if(!is.im(w)) stop("w should be a window or an image") y <- w$xcol[col(w)] y <- w$yrow[row(w)] if(drop) { ok <- !is.na(w$v) x <- x[ok, drop=TRUE] y <- y[ok, drop=TRUE] } return(list(x=as.numeric(x), y=as.numeric(y))) } ############## # methods for other functions xtfrm.im <- function(x) { as.numeric(as.matrix.im(x)) } as.matrix.im <- function(x, ...) { return(x$v) } as.array.im <- function(x, ...) { m <- as.matrix(x) a <- do.call(array, resolve.defaults(list(m), list(...), list(dim=c(dim(m), 1)))) return(a) } as.data.frame.im <- function(x, ...) { verifyclass(x, "im") v <- x$v xx <- x$xcol[col(v)] yy <- x$yrow[row(v)] ok <- !is.na(v) xx <- as.vector(xx[ok]) yy <- as.vector(yy[ok]) # extract pixel values without losing factor info vv <- v[ok] dim(vv) <- NULL # data.frame(x=xx, y=yy, value=vv, ...) } mean.im <- function(x, trim=0, na.rm=TRUE, ...) { verifyclass(x, "im") xvalues <- x[drop=na.rm] return(mean(xvalues, trim=trim, na.rm=na.rm)) } ## arguments of generic 'median' will change in R 3.4 median.im <- if("..." %in% names(formals(median))) { function(x, na.rm=TRUE, ...) { verifyclass(x, "im") xvalues <- x[drop=na.rm] return(median(xvalues, ...)) } } else { function(x, na.rm=TRUE) { verifyclass(x, "im") xvalues <- x[drop=na.rm] return(median(xvalues)) } } where.max <- function(x, first=TRUE) { stopifnot(is.im(x)) if(first) { ## find the first maximum v <- x$v locn <- which.max(as.vector(v)) # ignores NA, NaN locrow <- as.vector(row(v))[locn] loccol <- as.vector(col(v))[locn] } else { ## find all maxima xmax <- max(x) M <- solutionset(x == xmax) loc <- which(M$m, arr.ind=TRUE) locrow <- loc[,1L] loccol <- loc[,2L] } xx <- x$xcol[loccol] yy <- x$yrow[locrow] return(ppp(x=xx, y=yy, window=Window(x))) } where.min <- function(x, first=TRUE) { stopifnot(is.im(x)) if(first) { ## find the first minimum v <- x$v locn <- which.min(as.vector(v)) # ignores NA, NaN locrow <- as.vector(row(v))[locn] loccol <- as.vector(col(v))[locn] } else { ## find all minima xmin <- min(x) M <- solutionset(x == xmin) loc <- which(M$m, arr.ind=TRUE) locrow <- loc[,1L] loccol <- loc[,2L] } xx <- x$xcol[loccol] yy <- x$yrow[locrow] return(ppp(x=xx, y=yy, window=Window(x))) } ## the following ensures that 'sd' works as.double.im <- function(x, ...) { as.double(x[], ...) } ## hist.im <- function(x, ..., probability=FALSE, xname) { if(missing(xname) || is.null(xname)) xname <- short.deparse(substitute(x)) verifyclass(x, "im") main <- paste("Histogram of", xname) # default plot arguments # extract pixel values values <- as.matrix(x) dim(values) <- NULL # barplot or histogram if(x$type %in% c("logical", "factor")) { # barplot tab <- table(values) probs <- tab/sum(tab) if(probability) { heights <- probs ylab <- "Probability" } else { heights <- tab ylab <- "Number of pixels" } mids <- do.call(barplot, resolve.defaults(list(heights), list(...), list(xlab=paste("Pixel value"), ylab=ylab, main=main))) out <- list(counts=tab, probs=probs, heights=heights, mids=mids, xname=xname) class(out) <- "barplotdata" } else { # histogram values <- values[!is.na(values)] plotit <- resolve.defaults(list(...), list(plot=TRUE))$plot if(plotit) { ylab <- if(probability) "Probability density" else "Number of pixels" out <- do.call(hist.default, resolve.defaults(list(values), list(...), list(freq=!probability, xlab="Pixel value", ylab=ylab, main=main))) out$xname <- xname } else { # plot.default whinges if `probability' given when plot=FALSE out <- do.call(hist.default, resolve.defaults(list(values), list(...))) # hack! out$xname <- xname } } return(invisible(out)) } plot.barplotdata <- function(x, ...) { do.call(barplot, resolve.defaults(list(height=x$heights), list(...), list(main=paste("Histogram of ", x$xname)))) } cut.im <- function(x, ...) { verifyclass(x, "im") typ <- x$type if(typ %in% c("factor", "logical", "character")) stop(paste0("cut.im is not defined for ", typ, "-valued images"), call.=FALSE) vcut <- cut(as.numeric(as.matrix(x)), ...) return(im(vcut, xcol=x$xcol, yrow=x$yrow, xrange=x$xrange, yrange=x$yrange, unitname=unitname(x))) } quantile.im <- function(x, ...) { verifyclass(x, "im") q <- do.call(quantile, resolve.defaults(list(as.numeric(as.matrix(x))), list(...), list(na.rm=TRUE))) return(q) } integral <- function(f, domain=NULL, ...) { UseMethod("integral") } integral.im <- function(f, domain=NULL, ...) { verifyclass(f, "im") typ <- f$type if(!any(typ == c("integer", "real", "complex", "logical"))) stop(paste("Don't know how to integrate an image of type", sQuote(typ))) if(!is.null(domain)) { if(is.tess(domain)) return(sapply(tiles(domain), integral.im, f=f)) f <- f[domain, drop=FALSE, tight=TRUE] } a <- with(f, sum(v, na.rm=TRUE) * xstep * ystep) return(a) } conform.imagelist <- function(X, Zlist) { # determine points of X where all images in Zlist are defined ok <- rep.int(TRUE, length(X$x)) for(i in seq_along(Zlist)) { Zi <- Zlist[[i]] ZiX <- Zi[X, drop=FALSE] ok <- ok & !is.na(ZiX) } return(ok) } split.im <- function(x, f, ..., drop=FALSE) { stopifnot(is.im(x)) if(inherits(f, "tess")) subsets <- tiles(f) else if(is.im(f)) { if(f$type != "factor") f <- eval.im(factor(f)) subsets <- tiles(tess(image=f)) } else stop("f should be a tessellation or a factor-valued image") if(!is.subset.owin(as.owin(x), as.owin(f))) stop("f does not cover the window of x") n <- length(subsets) out <- vector(mode="list", length=n) names(out) <- names(subsets) for(i in 1:n) out[[i]] <- x[subsets[[i]], drop=drop] if(drop) return(out) else return(as.solist(out)) } by.im <- function(data, INDICES, FUN, ...) { stopifnot(is.im(data)) V <- split(data, INDICES) U <- lapply(V, FUN, ...) return(as.solist(U, demote=TRUE)) } rebound.im <- function(x, rect) { stopifnot(is.im(x)) stopifnot(is.owin(rect)) rect <- as.rectangle(rect) stopifnot(is.subset.owin(as.rectangle(x), rect)) # compute number of extra rows/columns dx <- x$xstep nleft <- max(0, floor((x$xrange[1L]-rect$xrange[1L])/dx)) nright <- max(0, floor((rect$xrange[2L]-x$xrange[2L])/dx)) dy <- x$ystep nbot <- max(0, floor((x$yrange[1L]-rect$yrange[1L])/dy)) ntop <- max(0, floor((rect$yrange[2L]-x$yrange[2L])/dy)) # determine exact x and y ranges (to preserve original pixel locations) xrange.new <- x$xrange + c(-nleft, nright) * dx yrange.new <- x$yrange + c(-nbot, ntop) * dy # expand pixel data matrix nr <- x$dim[1L] nc <- x$dim[2L] nrnew <- nbot + nr + ntop ncnew <- nleft + nc + nright naval <- switch(x$type, factor=, integer=NA_integer_, real=NA_real_, character=NA_character_, complex=NA_complex_, NA) vnew <- matrix(naval, nrnew, ncnew) if(x$type != "factor") { vnew[nbot + (1:nr), nleft + (1:nc)] <- x$v } else { vnew[nbot + (1:nr), nleft + (1:nc)] <- as.integer(x$v) vnew <- factor(vnew, labels=levels(x)) dim(vnew) <- c(nrnew, ncnew) } # build new image object xnew <- im(vnew, xrange = xrange.new, yrange = yrange.new, unitname = unitname(x)) return(xnew) } sort.im <- function(x, ...) { verifyclass(x, "im") sort(as.vector(as.matrix(x)), ...) } dim.im <- function(x) { x$dim } # colour images rgbim <- function(R, G, B, A=NULL, maxColorValue=255, autoscale=FALSE) { if(autoscale) { R <- scaletointerval(R, 0, maxColorValue) G <- scaletointerval(G, 0, maxColorValue) B <- scaletointerval(B, 0, maxColorValue) if(!is.null(A)) A <- scaletointerval(A, 0, maxColorValue) } Z <- eval.im(factor(rgbNA(as.vector(R), as.vector(G), as.vector(B), as.vector(A), maxColorValue=maxColorValue))) return(Z) } hsvim <- function(H, S, V, A=NULL, autoscale=FALSE) { if(autoscale) { H <- scaletointerval(H, 0, 1) S <- scaletointerval(S, 0, 1) V <- scaletointerval(V, 0, 1) if(!is.null(A)) A <- scaletointerval(A, 0, 1) } Z <- eval.im(factor(hsvNA(as.vector(H), as.vector(S), as.vector(V), as.vector(A)))) return(Z) } scaletointerval <- function(x, from=0, to=1, xrange=range(x)) { UseMethod("scaletointerval") } scaletointerval.default <- function(x, from=0, to=1, xrange=range(x)) { x <- as.numeric(x) rr <- if(missing(xrange)) range(x, na.rm=TRUE) else as.numeric(xrange) b <- as.numeric(to - from)/diff(rr) if(is.finite(b)) { y <- from + b * (x - rr[1L]) } else { y <- (from+to)/2 + 0 * x } y[] <- pmin(pmax(y[], from), to) return(y) } scaletointerval.im <- function(x, from=0, to=1, xrange=range(x)) { v <- scaletointerval(x$v, from, to, xrange=xrange) y <- im(v, x$xcol, x$yrow, x$xrange, x$yrange, unitname(x)) return(y) } zapsmall.im <- function(x, digits) { if(missing(digits)) return(eval.im(zapsmall(x))) return(eval.im(zapsmall(x, digits=digits))) } domain.im <- Window.im <- function(X, ...) { as.owin(X) } "Window<-.im" <- function(X, ..., value) { verifyclass(value, "owin") X[value, drop=FALSE] } padimage <- function(X, value=NA, n=1, W=NULL) { stopifnot(is.im(X)) stopifnot(length(value) == 1) if(!missing(n) && !is.null(W)) stop("Arguments n and W are incompatible", call.=FALSE) padW <- !is.null(W) if(isfac <- (X$type == "factor")) { ## handle factors levX <- levels(X) if(is.factor(value)) { stopifnot(identical(levels(X), levels(value))) } else { value <- factor(value, levels=levX) } X <- eval.im(as.integer(X)) value <- as.integer(value) } if(!padW) { ## pad by 'n' pixels nn <- rep(n, 4) nleft <- nn[1L] nright <- nn[2L] nbottom <- nn[3L] ntop <- nn[4L] } else { ## pad out to window W FX <- Frame(X) B <- boundingbox(Frame(W), FX) nleft <- max(1, round((FX$xrange[1L] - B$xrange[1L])/X$xstep)) nright <- max(1, round((B$xrange[2L] - FX$xrange[2L])/X$xstep)) nbottom <- max(1, round((FX$yrange[1L] - B$yrange[1L])/X$ystep)) ntop <- max(1, round((B$yrange[2L] - FX$yrange[2L])/X$ystep)) } mX <- as.matrix(X) dd <- dim(mX) mX <- cbind(matrix(value, dd[1L], nleft, byrow=TRUE), as.matrix(X), matrix(value, dd[1L], nright, byrow=TRUE)) dd <- dim(mX) mX <- rbind(matrix(rev(value), nbottom, dd[2L]), mX, matrix(value, ntop, dd[2L])) xcol <- with(X, c(xcol[1L] - (nleft:1) * xstep, xcol, xcol[length(xcol)] + (1:nright) * xstep)) yrow <- with(X, c(yrow[1L] - (nbottom:1) * ystep, yrow, yrow[length(yrow)] + (1:ntop) * ystep)) xr <- with(X, xrange + c(-nleft, nright) * xstep) yr <- with(X, yrange + c(-nbottom, ntop) * ystep) Y <- im(mX, xcol=xcol, yrow=yrow, xrange=xr, yrange=yr, unitname=unitname(X)) if(isfac) Y <- eval.im(factor(Y, levels=seq_along(levX), labels=levX)) if(padW && !is.rectangle(W)) Y <- Y[W, drop=FALSE] return(Y) } as.function.im <- function(x, ...) { Z <- x f <- function(x,y) { Z[list(x=x, y=y)] } g <- funxy(f, Window(x)) return(g) } anyNA.im <- function(x, recursive=FALSE) { anyNA(x$v) } spatstat/R/kernels.R0000755000176200001440000002171313115271075014115 0ustar liggesusers# # kernels.R # # rXXX, dXXX, pXXX and qXXX for kernels # # $Revision: 1.17 $ $Date: 2016/07/02 03:36:46 $ # match.kernel <- function(kernel) { kernel.map <- c(Gaussian ="gaussian", gaussian ="gaussian", Normal ="gaussian", normal ="gaussian", rectangular ="rectangular", triangular ="triangular", Epanechnikov="epanechnikov", epanechnikov="epanechnikov", biweight ="biweight", cosine ="cosine", optcosine ="optcosine" ) ker <- pickoption("kernel", kernel, kernel.map) return(ker) } kernel.factor <- function(kernel="gaussian") { # This function returns the factor c such that # h = c * sigma # where sigma is the standard deviation of the kernel, and # h is the corresponding bandwidth parameter as conventionally defined. # Conventionally h is defined as a scale factor # relative to the `standard form' of the kernel, namely the # form with support [-1,1], except in the Gaussian case where # the standard form is N(0,1). # Thus the standard form of the kernel (h=1) has standard deviation 1/c. # The kernel with standard deviation 1 has support [-c,c] # except for gaussian case. kernel <- match.kernel(kernel) switch(kernel, gaussian = 1, rectangular = sqrt(3), triangular = sqrt(6), epanechnikov = sqrt(5), biweight = sqrt(7), cosine = 1/sqrt(1/3 - 2/pi^2), optcosine = 1/sqrt(1 - 8/pi^2)) } rkernel <- function(n, kernel="gaussian", mean=0, sd=1) { kernel <- match.kernel(kernel) if(kernel == "gaussian") return(rnorm(n, mean=mean, sd=sd)) # inverse cdf transformation u <- runif(n) qkernel(u, kernel, mean=mean, sd=sd) } dkernel <- function(x, kernel="gaussian", mean=0, sd=1) { kernel <- match.kernel(kernel) stopifnot(is.numeric(x)) stopifnot(is.numeric(sd) && length(sd) == 1 && sd > 0) a <- sd * kernel.factor(kernel) y <- abs(x-mean)/a dens <- switch(kernel, gaussian = { dnorm(y) }, rectangular = { ifelse(y < 1, 1/2, 0) }, triangular = { ifelse(y < 1, (1 - y), 0) }, epanechnikov = { ifelse(y < 1, (3/4) * (1 - y^2), 0) }, biweight = { ifelse(y < 1, (15/16) * (1 - y^2)^2, 0) }, cosine = { ifelse(y < 1, (1 + cos(pi * y))/2, 0) }, optcosine = { ifelse(y < 1, (pi/4) * cos(pi * y/2), 0) } ) dens/a } pkernel <- function(q, kernel="gaussian", mean=0, sd=1, lower.tail=TRUE){ kernel <- match.kernel(kernel) stopifnot(is.numeric(q)) stopifnot(is.numeric(sd) && length(sd) == 1 && sd > 0) a <- sd * kernel.factor(kernel) y <- (q-mean)/a switch(kernel, gaussian = { pnorm(y, lower.tail=lower.tail) }, rectangular = { punif(y, min=-1, max=1, lower.tail=lower.tail) }, triangular = { p <- ifelse(y < -1, 0, ifelse(y > 1, 1, ifelse(y < 0, y + y^2/2 + 1/2, y - y^2/2 + 1/2))) if(lower.tail) p else (1 - p) }, epanechnikov = { p <- ifelse(y < -1, 0, ifelse(y > 1, 1, (2 + 3 * y - y^3)/4)) if(lower.tail) p else (1 - p) }, biweight = { p <- ifelse(y < -1, 0, ifelse(y > 1, 1, (15 * y - 10 * y^3 + 3 * y^5 + 8)/16)) if(lower.tail) p else (1 - p) }, cosine = { p <- ifelse(y < -1, 0, ifelse(y > 1, 1, (y + sin(pi * y)/pi + 1)/2)) if(lower.tail) p else (1 - p) }, optcosine = { p <- ifelse(y < -1, 0, ifelse(y > 1, 1, (sin(pi * y/2) + 1)/2)) if(lower.tail) p else (1 - p) }) } qkernel <- function(p, kernel="gaussian", mean=0, sd=1, lower.tail=TRUE) { kernel <- match.kernel(kernel) stopifnot(is.numeric(p)) stopifnot(is.numeric(sd) && length(sd) == 1 && sd > 0) a <- sd * kernel.factor(kernel) if(!lower.tail) p <- 1 - p y <- switch(kernel, gaussian = { qnorm(p, lower.tail=lower.tail) }, rectangular = { qunif(p, min=-1, max=1, lower.tail=lower.tail) }, triangular = { ifelse(p < 1/2, sqrt(2 * p) - 1, 1 - sqrt(2 * (1-p))) }, epanechnikov = { # solve using `polyroot' yy <- numeric(n <- length(p)) yy[p == 0] <- -1 yy[p == 1] <- 1 inside <- (p != 0) & (p != 1) # coefficients of polynomial (2 + 3 y - y^3)/4 z <- c(2, 3, 0, -1)/4 for(i in seq(n)[inside]) { sol <- polyroot(z - c(p[i], 0, 0, 0)) ok <- abs(Im(sol)) < 1e-6 realpart <- Re(sol) ok <- ok & (abs(realpart) <= 1) if(sum(ok) != 1) stop(paste("Internal error:", sum(ok), "roots of polynomial")) yy[i] <- realpart[ok] } yy }, biweight = { # solve using `polyroot' yy <- numeric(n <- length(p)) yy[p == 0] <- -1 yy[p == 1] <- 1 inside <- (p != 0) & (p != 1) # coefficients of polynomial (8 + 15 * y - 10 * y^3 + 3 * y^5)/16 z <- c(8, 15, 0, -10, 0, 3)/16 for(i in seq(n)[inside]) { sol <- polyroot(z - c(p[i], 0, 0, 0, 0, 0)) ok <- abs(Im(sol)) < 1e-6 realpart <- Re(sol) ok <- ok & (abs(realpart) <= 1) if(sum(ok) != 1) stop(paste("Internal error:", sum(ok), "roots of polynomial")) yy[i] <- realpart[ok] } yy }, cosine = { # solve using `uniroot' g <- function(y, pval) { (y + sin(pi * y)/pi + 1)/2 - pval } yy <- numeric(n <- length(p)) yy[p == 0] <- -1 yy[p == 1] <- 1 inside <- (p != 0) & (p != 1) for(i in seq(n)[inside]) yy[i] <- uniroot(g, c(-1,1), pval=p[i])$root yy }, optcosine = { (2/pi) * asin(2 * p - 1) }) return(mean + a * y) } # integral of t^m k(t) dt from -Inf to r # was: nukernel(r, m, kernel) kernel.moment <- local({ kernel.moment <- function(m, r, kernel="gaussian") { ker <- match.kernel(kernel) if(ker != "gaussian") { r <- pmin(r, 1) r <- pmax(r, -1) } stopifnot(length(m) == 1) if(!(m %in% c(0,1,2)) || (ker %in% c("cosine", "optcosine"))) { ## use generic integration neginf <- if(ker == "gaussian") -10 else -1 result <- numeric(length(r)) for(i in seq_along(r)) result[i] <- integralvalue(kintegrand, lower=neginf, upper=r[i], m=m, ker=ker) return(result) } switch(ker, gaussian={ if(m == 0) return(pnorm(r)) else if(m == 1) return(-dnorm(r)) else return(pnorm(r) - r * dnorm(r)) }, rectangular = { if(m == 0) return((r + 1)/2) else if(m == 1) return((r^2 - 1)/4) else return((r^3 + 1)/6) }, triangular={ m1 <- m+1 m2 <- m+2 const <- ((-1)^m1)/m1 + ((-1)^m2)/m2 answer <- (r^m1)/m1 + ifelse(r < 0, 1, -1) * (r^m2)/m2 - const return(answer) }, epanechnikov = { if(m == 0) return((2 + 3*r - r^3)/4) else if(m == 1) return((-3 + 6*r^2 - 3*r^4)/16) else return(( 2 + 5*r^3 - 3* r^5)/20) }, biweight = { if(m == 0) return((3*r^5 - 10*r^3 + 15*r + 8)/16) else if(m == 1) return((5*r^6 - 15*r^4 + 15*r^2 -5)/32) else return((15*r^7 - 42*r^5 + 35*r^3 + 8)/112) }, # never reached! cosine={stop("Sorry, not yet implemented for cosine kernel")}, optcosine={stop("Sorry, not yet implemented for optcosine kernel")} ) } integralvalue <- function(...) integrate(...)$value kintegrand <- function(x, m, ker) { x^m * dkernel(x, ker) } kernel.moment }) kernel.squint <- function(kernel="gaussian", bw=1) { kernel <- match.kernel(kernel) check.1.real(bw) RK <- switch(kernel, gaussian = 1/(2 * sqrt(pi)), rectangular = sqrt(3)/6, triangular = sqrt(6)/9, epanechnikov = 3/(5 * sqrt(5)), biweight = 5 * sqrt(7)/49, cosine = 3/4 * sqrt(1/3 - 2/pi^2), optcosine = sqrt(1 - 8/pi^2) * pi^2/16) return(RK/bw) } spatstat/R/fourierbasis.R0000644000176200001440000000127213115225157015142 0ustar liggesusersfourierbasis <- function(x, k, win = boxx(rep(list(0:1), ncol(k)))) { x <- as.matrix(x) k <- as.matrix(k) if (nrow(k) == 0 | nrow(x) == 0) return(complex()) d <- ncol(x) if (ncol(k) != d) stop("Arguments x and k must have the same number of columns.") win <- as.boxx(win) boxlengths <- as.numeric(win$ranges[2L, ] - win$ranges[1L, ]) if (length(boxlengths) != d) stop("The box dimension differs from the number of columns in x and k") rslt <- exp(2 * pi * (0+1i) * outer(k[, 1L], x[, 1L]/boxlengths[1L])) if (d > 1) { for (i in 2:d) { rslt <- rslt * exp(2 * pi * (0+1i) * outer(k[, i], x[, i]/boxlengths[i])) } } return(rslt/prod(boxlengths)) } spatstat/R/ripras.R0000755000176200001440000000266713115271120013750 0ustar liggesusers# # ripras.S Ripley-Rasson estimator of domain # # # $Revision: 1.14 $ $Date: 2014/10/24 00:22:30 $ # # # # #------------------------------------- bounding.box.xy <- function(x, y=NULL) { xy <- xy.coords(x,y) if(length(xy$x) == 0) return(NULL) owin(range(xy$x), range(xy$y), check=FALSE) } convexhull.xy <- function(x, y=NULL) { xy <- xy.coords(x, y) x <- xy$x y <- xy$y if(length(x) < 3) return(NULL) h <- rev(chull(x, y)) # must be anticlockwise if(length(h) < 3) return(NULL) w <- owin(poly=list(x=x[h], y=y[h]), check=FALSE) return(w) } ripras <- function(x, y=NULL, shape="convex", f) { xy <- xy.coords(x, y) n <- length(xy$x) w <- switch(shape, convex = convexhull.xy(xy), rectangle = boundingbox(xy), stop(paste("Unrecognised option: shape=", dQuote(shape)))) if(is.null(w)) return(NULL) # expansion factor if(!missing(f)) stopifnot(is.numeric(f) && length(f) == 1 && f >= 1) else switch(shape, convex = { # number of vertices m <- summary(w)$nvertices f <- if(m < n) 1/sqrt(1 - m/n) else 2 }, rectangle = { f <- (n+1)/(n-1) }) # centroid ce <- unlist(centroid.owin(w)) # shift centroid to origin W <- shift(w, -ce) # rescale W <- affine(W, mat=diag(c(f,f))) # shift origin to centroid W <- shift(W, ce) return(W) } spatstat/R/leverage.R0000755000176200001440000007475713164570352014270 0ustar liggesusers# # leverage.R # # leverage and influence # # $Revision: 1.82 $ $Date: 2017/10/03 02:04:21 $ # leverage <- function(model, ...) { UseMethod("leverage") } leverage.ppm <- function(model, ..., drop=FALSE, iScore=NULL, iHessian=NULL, iArgs=NULL) { fitname <- short.deparse(substitute(model)) a <- ppmInfluence(model, what="leverage", drop=drop, iScore=iScore, iHessian=iHessian, iArgs=iArgs, ..., fitname=fitname) return(a$leverage) } influence.ppm <- function(model, ..., drop=FALSE, iScore=NULL, iHessian=NULL, iArgs=NULL) { fitname <- short.deparse(substitute(model)) a <- ppmInfluence(model, what="influence", drop=drop, iScore=iScore, iHessian=iHessian, iArgs=iArgs, ..., fitname=fitname) return(a$influence) } dfbetas.ppm <- function(model, ..., drop=FALSE, iScore=NULL, iHessian=NULL, iArgs=NULL) { fitname <- short.deparse(substitute(model)) a <- ppmInfluence(model, what="dfbetas", drop=drop, iScore=iScore, iHessian=iHessian, iArgs=iArgs, ..., fitname=fitname) return(a$dfbetas) } ppmInfluence <- function(fit, what=c("leverage", "influence", "dfbetas"), ..., iScore=NULL, iHessian=NULL, iArgs=NULL, drop=FALSE, fitname=NULL) { stuff <- ppmInfluenceEngine(fit, what=what, ..., iScore=iScore, iHessian=iHessian, iArgs=iArgs, drop=drop, fitname=fitname) fnam <- c("fitname", "fit.is.poisson") result <- list() if("lev" %in% names(stuff)) { lev <- stuff[c(fnam, "lev")] class(lev) <- "leverage.ppm" result$leverage <- lev } if("infl" %in% names(stuff)) { infl <- stuff[c(fnam, "infl")] class(infl) <- "influence.ppm" result$influence <- infl } if(!is.null(dfb <- stuff$dfbetas)) { attr(dfb, "info") <- stuff[fnam] result$dfbetas <- dfb } other <- setdiff(names(stuff), c("lev", "infl", "dfbetas")) result[other] <- stuff[other] return(result) } ppmInfluenceEngine <- function(fit, what=c("leverage", "influence", "dfbetas", "score", "derivatives", "increments"), ..., iScore=NULL, iHessian=NULL, iArgs=NULL, drop=FALSE, method=c("C", "interpreted"), precomputed=list(), sparseOK=TRUE, fitname=NULL, multitypeOK=FALSE, entrywise = TRUE, matrix.action = c("warn", "fatal", "silent"), geomsmooth = TRUE) { logi <- identical(fit$method, "logi") if(is.null(fitname)) fitname <- short.deparse(substitute(fit)) stopifnot(is.ppm(fit)) ## type of calculation method <- match.arg(method) what <- match.arg(what, several.ok=TRUE) matrix.action <- match.arg(matrix.action) influencecalc <- any(what %in% c("leverage", "influence", "dfbetas")) hesscalc <- influencecalc || any(what == "derivatives") sparse <- sparseOK target <- paste(what, collapse=",") ## Detect presence of irregular parameters if(is.null(iArgs)) iArgs <- fit$covfunargs gotScore <- !is.null(iScore) gotHess <- !is.null(iHessian) needHess <- gotScore && hesscalc # may be updated later if(!gotHess && needHess) stop("Must supply iHessian", call.=FALSE) ## extract values from model, using precomputed values if given theta <- precomputed$coef %orifnull% coef(fit) lam <- precomputed$lambda %orifnull% fitted(fit, check=FALSE) mom <- precomputed$mom %orifnull% model.matrix(fit) p <- length(theta) Q <- quad.ppm(fit) w <- w.quad(Q) loc <- union.quad(Q) isdata <- is.data(Q) if(length(w) != length(lam)) stop(paste("Internal error: length(w) = ", length(w), "!=", length(lam), "= length(lam)")) ## extract negative Hessian matrix of regular part of log composite likelihood ## hess = negative Hessian H ## fgrad = Fisher-scoring-like gradient G = estimate of E[H] if(logi){ ## Intensity of dummy points rho <- fit$Q$param$rho %orifnull% intensity(as.ppp(fit$Q)) logiprob <- lam / (lam + rho) vclist <- vcov(fit, what = "internals", matrix.action="silent") hess <- vclist$Slog fgrad <- vclist$fisher invhess <- if(is.null(hess)) NULL else checksolve(hess, "silent") invfgrad <- if(is.null(fgrad)) NULL else checksolve(fgrad, "silent") if(is.null(invhess) || is.null(invfgrad)) { #' use more expensive estimate of variance terms vclist <- vcov(fit, what = "internals", fine=TRUE, matrix.action=matrix.action) hess <- vclist$Slog fgrad <- vclist$fisher #' try again - exit if really singular invhess <- checksolve(hess, matrix.action, "Hessian", target) invfgrad <- checksolve(fgrad, matrix.action, "gradient matrix", target) } # vc <- invhess %*% (vclist$Sigma1log+vclist$Sigma2log) %*% invhess } else { invfgrad <- vcov(fit, hessian=TRUE, matrix.action="silent") fgrad <- hess <- if(is.null(invfgrad)) NULL else checksolve(invfgrad, "silent") if(is.null(fgrad)) { invfgrad <- vcov(fit, hessian=TRUE, fine=TRUE, matrix.action=matrix.action) fgrad <- hess <- checksolve(invfgrad, matrix.action, "Hessian", target) } } ## evaluate additional (`irregular') components of score, if any iscoremat <- ppmDerivatives(fit, "gradient", iScore, loc, covfunargs=iArgs) gotScore <- !is.null(iscoremat) needHess <- gotScore && hesscalc if(!gotScore) { REG <- 1:ncol(mom) } else { ## count regular and irregular parameters nreg <- ncol(mom) nirr <- ncol(iscoremat) ## add extra columns to model matrix mom <- cbind(mom, iscoremat) REG <- 1:nreg IRR <- nreg + 1:nirr ## evaluate additional (`irregular') entries of Hessian ihessmat <- if(!needHess) NULL else ppmDerivatives(fit, "hessian", iHessian, loc, covfunargs=iArgs) if(gotHess <- !is.null(ihessmat)) { ## recompute negative Hessian of log PL and its mean fgrad <- hessextra <- matrix(0, ncol(mom), ncol(mom)) } if(!logi) { ## pseudolikelihood switch(method, interpreted = { for(i in seq(loc$n)) { # weight for integrand wti <- lam[i] * w[i] if(all(is.finite(wti))) { # integral of outer product of score momi <- mom[i, ] v1 <- outer(momi, momi, "*") * wti if(all(is.finite(v1))) fgrad <- fgrad + v1 # integral of Hessian # contributions nonzero for irregular parameters if(gotHess) { v2 <- matrix(as.numeric(ihessmat[i,]), nirr, nirr) * wti if(all(is.finite(v2))) hessextra[IRR, IRR] <- hessextra[IRR, IRR] + v2 } } } # subtract sum over data points if(gotHess) { for(i in which(isdata)) { v2 <- matrix(as.numeric(ihessmat[i,]), nirr, nirr) if(all(is.finite(v2))) hessextra[IRR, IRR] <- hessextra[IRR, IRR] - v2 } hess <- fgrad + hessextra invhess <- checksolve(hess, matrix.action, "Hessian", target) } else { invhess <- hess <- NULL } }, C = { wlam <- lam * w fgrad <- sumouter(mom, wlam) if(gotHess) { # integral term isfin <- is.finite(wlam) & matrowall(is.finite(ihessmat)) vintegral <- if(all(isfin)) wlam %*% ihessmat else wlam[isfin] %*% ihessmat[isfin,, drop=FALSE] # sum over data points vdata <- .colSums(ihessmat[isdata, , drop=FALSE], sum(isdata), ncol(ihessmat), na.rm=TRUE) vcontrib <- vintegral - vdata hessextra[IRR, IRR] <- hessextra[IRR, IRR] + matrix(vcontrib, nirr, nirr) hess <- fgrad + hessextra invhess <- checksolve(hess, matrix.action, "Hessian", target) } else { invhess <- hess <- NULL } }) } else { if(!spatstat.options('developer')) stop("Logistic fits are not yet supported") ## logistic fit switch(method, interpreted = { oweight <- logiprob * (1 - logiprob) hweight <- ifelse(isdata, -(1 - logiprob), logiprob) for(i in seq(loc$n)) { ## outer product of score momi <- mom[i, ] v1 <- outer(momi, momi, "*") * oweight[i] if(all(is.finite(v1))) fgrad <- fgrad + v1 ## Hessian term ## contributions nonzero for irregular parameters if(gotHess) { v2 <- hweight[i] * matrix(as.numeric(ihessmat[i,]), nirr, nirr) if(all(is.finite(v2))) hessextra[IRR, IRR] <- hessextra[IRR, IRR] + v2 } } if(gotHess) { hess <- fgrad + hessextra invhess <- checksolve(hess, matrix.action, "Hessian", target) } else { invhess <- hess <- NULL } }, C = { oweight <- logiprob * (1 - logiprob) hweight <- ifelse(isdata, -(1 - logiprob), logiprob) fgrad <- sumouter(mom, oweight) if(gotHess) { # Hessian term isfin <- is.finite(hweight) & matrowall(is.finite(ihessmat)) vcontrib <- if(all(isfin)) hweight %*% ihessmat else hweight[isfin] %*% ihessmat[isfin,, drop=FALSE] hessextra[IRR, IRR] <- hessextra[IRR, IRR] + matrix(vcontrib, nirr, nirr) hess <- fgrad + hessextra invhess <- checksolve(hess, matrix.action, "Hessian", target) } else { invhess <- hess <- NULL } }) } invfgrad <- checksolve(fgrad, matrix.action, "gradient matrix", target) } if(!needHess) { if(!logi){ hess <- fgrad invhess <- invfgrad } } # ok <- NULL if(drop) { ok <- complete.cases(mom) if(all(ok)) { ok <- NULL } else { if((nbad <- sum(isdata[!ok])) > 0) warning(paste("NA value of canonical statistic at", nbad, ngettext(nbad, "data point", "data points")), call.=FALSE) Q <- Q[ok] mom <- mom[ok, , drop=FALSE] loc <- loc[ok] lam <- lam[ok] w <- w[ok] isdata <- isdata[ok] } } # ........ start assembling results ..................... # ## start building result result <- list(fitname=fitname, fit.is.poisson=is.poisson(fit)) class(result) <- "ppmInfluence" if(any(c("score", "derivatives") %in% what)) { ## calculate the composite score rawmean <- if(logi) logiprob else (lam * w) rawresid <- isdata - rawmean score <- matrix(rawresid, nrow=1) %*% mom if("score" %in% what) result$score <- score if("derivatives" %in% what) result$deriv <- list(mom=mom, score=score, fgrad=fgrad, invfgrad=invfgrad, hess=hess, invhess=invhess) if(all(what %in% c("score", "derivatives"))) return(result) } # compute effect of adding/deleting each quadrature point # columns index the point being added/deleted # rows index the points affected # ........ Poisson case .................................. eff <- mom # ........ Gibbs case .................................... ## second order interaction terms ddS <- ddSintegrand <- NULL if(!is.poisson(fit)) { ## goal is to compute these effect matrices: eff.data <- eff.back <- matrix(0, nrow(eff), ncol(eff), dimnames=dimnames(eff)) U <- union.quad(Q) nU <- npoints(U) zerocif <- (lam == 0) anyzerocif <- any(zerocif) ## decide whether to split into blocks nX <- Q$data$n nD <- Q$dummy$n bls <- quadBlockSizes(nX, nD, p, announce=TRUE) nblocks <- bls$nblocks nperblock <- bls$nperblock ## if(nblocks > 1 && ("increments" %in% what)) { warning("Oversize quadrature scheme: cannot return array of increments", call.=FALSE) what <- setdiff(what, "increments") } R <- reach(fit) ## indices into original quadrature scheme whichok <- if(!is.null(ok)) which(ok) else seq_len(nX+nD) whichokdata <- whichok[isdata] whichokdummy <- whichok[!isdata] ## loop for(iblock in 1:nblocks) { first <- min(nD, (iblock - 1) * nperblock + 1) last <- min(nD, iblock * nperblock) # corresponding subset of original quadrature scheme if(!is.null(ok) || nblocks > 1) { ## subset for which we will compute the effect current <- c(whichokdata, whichokdummy[first:last]) ## find neighbours, needed for calculation other <- setdiff(seq_len(nU), current) crx <- crosspairs(U[current], U[other], R, what="indices") nabers <- other[unique(crx$j)] ## subset actually requested requested <- c(current, nabers) ## corresponding stuff ('B' for block) isdataB <- isdata[requested] zerocifB <- zerocif[requested] anyzerocifB <- any(zerocifB) momB <- mom[requested, , drop=FALSE] lamB <- lam[requested] if(logi) logiprobB <- logiprob[requested] wB <- w[requested] currentB <- seq_along(current) } else { requested <- NULL isdataB <- isdata zerocifB <- zerocif anyzerocifB <- anyzerocif momB <- mom lamB <- lam if(logi) logiprobB <- logiprob wB <- w } ## compute second order terms ## ddS[i,j, ] = Delta_i Delta_j S(x) ddS <- deltasuffstat(fit, restrict = "first", dataonly=FALSE, quadsub=requested, sparseOK=sparse) ## if(is.null(ddS)) { warning("Second order interaction terms are not implemented", " for this model; they are treated as zero", call.=FALSE) break } else { sparse <- inherits(ddS, "sparse3Darray") if(gotScore) { ## add extra planes of zeroes to second-order model matrix ## (zero because the irregular components are part of the trend) paddim <- c(dim(ddS)[1:2], nirr) if(!sparse) { ddS <- abind::abind(ddS, array(0, dim=paddim), along=3) } else { ddS <- bind.sparse3Darray(ddS, sparse3Darray(dims=paddim), along=3) } } } ## effect of addition/deletion of U[j] ## on score contribution from data points (sum automatically restricted to ## interior for border correction by earlier call to ## deltasuffstat(..., restrict = "first")) ddSX <- ddS[isdataB, , , drop=FALSE] eff.data.B <- marginSums(ddSX, c(2,3)) ## check if any quadrature points have zero conditional intensity; ## these do not contribute; the associated values of the sufficient ## statistic may be Infinite and must be explicitly set to zero. if(anyzerocifB) eff.data.B[zerocifB, ] <- 0 ## save results for current subset of quadrature points if(is.null(requested)) { eff.data <- eff.data.B } else { eff.data[current,] <- as.matrix(eff.data.B[currentB,,drop=FALSE]) } ## rm(ddSX, eff.data.B) ## effect of addition/deletion of U[j] on integral term in score changesignB <- ifelse(isdataB, -1, 1) if(!sparse) { if(logi){ stop("Non-sparse method is not implemented for method = 'logi'!") } else{ ## model matrix after addition/deletion of each U[j] ## mombefore[i,j,] <- mom[i,] di <- dim(ddS) mombefore <- array(apply(momB, 2, rep, times=di[2]), dim=di) momchange <- ddS momchange[ , isdataB, ] <- - momchange[, isdataB, ] momafter <- mombefore + momchange ## effect of addition/deletion of U[j] on lambda(U[i], X) if(gotScore){ lamratio <- exp(tensor::tensor(momchange[,,REG,drop=FALSE], theta, 3, 1)) } else{ lamratio <- exp(tensor::tensor(momchange, theta, 3, 1)) } lamratio <- array(lamratio, dim=dim(momafter)) ddSintegrand <- lamB * (momafter * lamratio - mombefore) rm(lamratio) } rm(momchange, mombefore, momafter) gc() } else { if(logi){ ## Delta suff. stat. with sign change for data/dummy (sparse3Darray) momchange <- ddS momchange[ , isdataB, ] <- - momchange[, isdataB, ] ## theta^T %*% ddS (with sign -1/+1 according to data/dummy) as triplet sparse matrix if(gotScore){ momchangeeffect <- tenseur(momchange[,,REG,drop=FALSE], theta, 3, 1) } else{ momchangeeffect <- tenseur(momchange, theta, 3, 1) } momchangeeffect <- expandSparse(momchangeeffect, n = dim(ddS)[3], across = 3) ijk <- SparseIndices(momchangeeffect) ## Entrywise calculations below momchange <- as.numeric(momchange[ijk]) ## Transform to change in probability expchange <- exp(momchangeeffect$x) lamBi <- lamB[ijk$i] logiprobBi <- logiprobB[ijk$i] changesignBj <- changesignB[ijk$j] pchange <- changesignBj*(lamBi * expchange / (lamBi*expchange + rho) - logiprobBi) mombefore <- mom[cbind(ijk$i,ijk$k)] ## Note: changesignBj * momchange == as.numeric(ddS[ijk]) ddSintegrand <- (mombefore + momchange) * pchange + logiprobBi * changesignBj * momchange ddSintegrand <- sparse3Darray(i = ijk$i, j = ijk$j, k = ijk$k, x = ddSintegrand, dims = dim(ddS)) } else{ if(entrywise){ momchange <- ddS momchange[ , isdataB, ] <- - momchange[, isdataB, ] if(gotScore){ lamratiominus1 <- expm1(tenseur(momchange[,,REG,drop=FALSE], theta, 3, 1)) } else{ lamratiominus1 <- expm1(tenseur(momchange, theta, 3, 1)) } lamratiominus1 <- expandSparse(lamratiominus1, n = dim(ddS)[3], across = 3) ijk <- SparseIndices(lamratiominus1) ## Everything entrywise with ijk now: # lamratiominus1 <- lamratiominus1[cbind(ijk$i, ijk$j)] lamratiominus1 <- as.numeric(lamratiominus1$x) momchange <- as.numeric(momchange[ijk]) mombefore <- momB[cbind(ijk$i, ijk$k)] momafter <- mombefore + momchange ## lamarray[i,j,k] <- lam[i] lamarray <- lamB[ijk$i] ddSintegrand <- lamarray * (momafter * lamratiominus1 + momchange) ddSintegrand <- sparse3Darray(i = ijk$i, j = ijk$j, k = ijk$k, x = ddSintegrand, dims = dim(ddS)) } else{ ## Entries are required only for pairs i,j which interact. ## mombefore[i,j,] <- mom[i,] mombefore <- mapSparseEntries(ddS, 1, momB, conform=TRUE, across=3) momchange <- ddS momchange[ , isdataB, ] <- - momchange[, isdataB, ] momafter <- evalSparse3Dentrywise(mombefore + momchange) ## lamarray[i,j,k] <- lam[i] lamarray <- mapSparseEntries(ddS, 1, lamB, conform=TRUE, across=3) if(gotScore){ lamratiominus1 <- expm1(tenseur(momchange[,,REG,drop=FALSE], theta, 3, 1)) } else{ lamratiominus1 <- expm1(tenseur(momchange,theta, 3, 1)) } lamratiominus1 <- expandSparse(lamratiominus1, n = dim(ddS)[3], across = 3) ddSintegrand <- evalSparse3Dentrywise(lamarray * (momafter* lamratiominus1 + momchange)) } rm(lamratiominus1, lamarray, momafter) } rm(momchange, mombefore) } if(anyzerocifB) { ddSintegrand[zerocifB,,] <- 0 ddSintegrand[,zerocifB,] <- 0 } ## integrate if(logi){ # eff.back.B <- tenseur(ddSintegrand, rep(1, length(wB)), 1, 1) eff.back.B <- marginSums(ddSintegrand, c(2,3)) } else{ eff.back.B <- changesignB * tenseur(ddSintegrand, wB, 1, 1) } ## save contribution if(is.null(requested)) { eff.back <- eff.back.B } else { eff.back[current,] <- as.matrix(eff.back.B[currentB, , drop=FALSE]) } } ## total eff <- eff + eff.data - eff.back eff <- as.matrix(eff) } if("increments" %in% what) { result$increm <- list(ddS=ddS, ddSintegrand=ddSintegrand, isdata=isdata, wQ=w) } if(!any(c("leverage", "influence", "dfbetas") %in% what)) return(result) # ............ compute leverage, influence, dfbetas .............. # compute basic contribution from each quadrature point nloc <- npoints(loc) switch(method, interpreted = { b <- numeric(nloc) for(i in seq(nloc)) { effi <- eff[i,, drop=FALSE] momi <- mom[i,, drop=FALSE] b[i] <- momi %*% invhess %*% t(effi) } }, C = { b <- bilinearform(mom, invhess, eff) }) # .......... leverage ............. if("leverage" %in% what) { ## values of leverage (diagonal) at points of 'loc' h <- b * lam ok <- is.finite(h) if(mt <- is.multitype(loc)) h <- data.frame(leverage=h, type=marks(loc)) levval <- (loc %mark% h)[ok] levvaldum <- levval[!isdata[ok]] geomsmooth <- geomsmooth && all(marks(levvaldum) >= 0) if(!mt) { levsmo <- Smooth(levvaldum, sigma=maxnndist(loc), geometric=geomsmooth) } else { levsplitdum <- split(levvaldum, reduce=TRUE) levsmo <- Smooth(levsplitdum, sigma=max(sapply(levsplitdum, maxnndist)), geometric=geomsmooth) } ## nominal mean level a <- area(Window(loc)) * markspace.integral(loc) levmean <- p/a lev <- list(val=levval, smo=levsmo, ave=levmean) result$lev <- lev } # .......... influence ............. if("influence" %in% what) { if(logi){ X <- loc effX <- as.numeric(isdata) * eff - mom * logiprob } else{ # values of influence at data points X <- loc[isdata] effX <- eff[isdata, ,drop=FALSE] } M <- (1/p) * quadform(effX, invhess) if(logi || is.multitype(X)) { # result will have several columns of marks M <- data.frame(influence=M) if(logi) M$isdata <- factor(isdata, levels = c(TRUE, FALSE), labels = c("data", "dummy")) if(is.multitype(X)) M$type <- marks(X) } V <- X %mark% M result$infl <- V } # .......... dfbetas ............. if("dfbetas" %in% what) { if(logi){ M <- as.numeric(isdata) * eff - mom * logiprob M <- t(invhess %*% t(M)) Mdum <- M Mdum[isdata,] <- 0 Mdum <- Mdum / w.quad(Q) result$dfbetas <- msr(Q, M[isdata, ], Mdum) } else{ vex <- invhess %*% t(mom) dex <- invhess %*% t(eff) switch(method, interpreted = { dis <- con <- matrix(0, nloc, ncol(mom)) for(i in seq(nloc)) { vexi <- vex[,i, drop=FALSE] dexi <- dex[,i, drop=FALSE] dis[i, ] <- if(isdata[i]) dexi else 0 con[i, ] <- - lam[i] * vexi } }, C = { dis <- t(dex) dis[!isdata,] <- 0 con <- - lam * t(vex) con[lam == 0,] <- 0 }) colnames(dis) <- colnames(con) <- colnames(mom) # result is a vector valued measure result$dfbetas <- msr(Q, dis[isdata, ], con) } } return(result) } ## extract derivatives from covariate functions ## WARNING: these are not the score components in general ppmDerivatives <- function(fit, what=c("gradient", "hessian"), Dcovfun=NULL, loc, covfunargs=list()) { what <- match.arg(what) if(!is.null(Dcovfun)) { ## use provided function Dcov to compute derivatives Dvalues <- mpl.get.covariates(Dcovfun, loc, covfunargs=covfunargs) result <- as.matrix(as.data.frame(Dvalues)) return(result) } ## any irregular parameters? if(length(covfunargs) == 0) return(NULL) ## Try to extract derivatives from covariate functions ## This often works if the functions were created by symbolic differentiation fvalues <- mpl.get.covariates(fit$covariates, loc, covfunargs=covfunargs, need.deriv=TRUE) Dlist <- attr(fvalues, "derivatives")[[what]] if(length(Dlist) == 0) return(NULL) switch(what, gradient = { result <- do.call(cbind, unname(lapply(Dlist, as.data.frame))) result <- as.matrix(result) }, hessian = { ## construct array containing Hessian matrices biga <- do.call(blockdiagarray, Dlist) ## flatten matrices result <- matrix(biga, nrow=dim(biga)[1L]) }) return(result) } plot.leverage.ppm <- function(x, ..., showcut=TRUE, col.cut=par("fg"), args.contour=list(), multiplot=TRUE) { fitname <- x$fitname defaultmain <- paste("Leverage for", fitname) y <- x$lev smo <- y$smo ave <- y$ave if(!multiplot && inherits(smo, "imlist")) { ave <- ave * length(smo) smo <- Reduce("+", smo) defaultmain <- c(defaultmain, "(sum over all types of point)") } cutinfo <- list(addcontour=showcut, args.contour=append(list(levels=ave, col=col.cut), args.contour)) if(is.im(smo)) { do.call(plot.im, resolve.defaults(list(smo), list(...), cutinfo, list(main=defaultmain))) } else if(inherits(smo, "imlist")) { do.call(plot.solist, resolve.defaults(list(smo), list(...), cutinfo, list(main=defaultmain))) } invisible(NULL) } plot.influence.ppm <- function(x, ..., multiplot=TRUE) { fitname <- x$fitname defaultmain <- paste("Influence for", fitname) y <- x$infl if(multiplot && isTRUE(ncol(marks(y)) > 1)) { # apart from the influence value, there may be additional columns of marks # containing factors: {type of point}, { data vs dummy in logistic case } ma <- as.data.frame(marks(y)) fax <- sapply(ma, is.factor) nfax <- sum(fax) if(nfax == 1) { # split on first available factor, and remove this factor y <- split(y, reduce=TRUE) } else if(nfax > 1) { # several factors: split according to them all, and remove them all f.all <- do.call(interaction, ma[fax]) z <- y %mark% ma[,!fax] y <- split(z, f.all) } } do.call(plot, resolve.defaults(list(y), list(...), list(main=defaultmain, multiplot=multiplot, which.marks=1))) } persp.leverage.ppm <- function(x, ..., main) { if(missing(main)) main <- deparse(substitute(x)) y <- as.im(x) if(is.im(y)) return(persp(y, main=main, ...)) pa <- par(ask=TRUE) lapply(y, persp, main=main, ...) par(pa) return(invisible(NULL)) } as.im.leverage.ppm <- function(X, ...) { return(X$lev$smo) # could be either an image or a list of images } as.function.leverage.ppm <- function(x, ...) { X <- x$lev$val S <- ssf(unmark(X), marks(X)) return(as.function(S)) } as.ppp.influence.ppm <- function(X, ...) { return(X$infl) } as.owin.leverage.ppm <- function(W, ..., fatal=TRUE) { y <- as.im(W) if(inherits(y, "imlist")) y <- y[[1L]] as.owin(y, ..., fatal=fatal) } as.owin.influence.ppm <- function(W, ..., fatal=TRUE) { as.owin(as.ppp(W), ..., fatal=fatal) } domain.leverage.ppm <- domain.influence.ppm <- Window.leverage.ppm <- Window.influence.ppm <- function(X, ...) { as.owin(X) } print.leverage.ppm <- function(x, ...) { splat("Point process leverage function") fitname <- x$fitname splat("for model:", fitname) lev <- x$lev splat("\nExact values:") print(lev$val) splat("\nSmoothed values:") print(lev$smo) ## for compatibility we retain the x$fit usage if(x$fit.is.poisson %orifnull% is.poisson(x$fit)) splat("\nAverage value:", lev$ave) return(invisible(NULL)) } print.influence.ppm <- function(x, ...) { splat("Point process influence measure") fitname <- x$fitname splat("for model:", fitname) splat("\nExact values:") print(x$infl) return(invisible(NULL)) } "[.leverage.ppm" <- function(x, i, ..., update=TRUE) { if(missing(i)) return(x) y <- x$lev smoi <- if(is.im(y$smo)) y$smo[i, ...] else solapply(y$smo, "[", i=i, ...) if(!inherits(smoi, c("im", "imlist"))) return(smoi) y$smo <- smoi y$val <- y$val[i, ...] if(update) y$ave <- if(is.im(smoi)) mean(smoi) else mean(sapply(smoi, mean)) x$lev <- y return(x) } "[.influence.ppm" <- function(x, i, ...) { if(missing(i)) return(x) y <- x$infl[i, ...] if(!is.ppp(y)) return(y) x$infl <- y return(x) } shift.leverage.ppm <- function(X, ...) { vec <- getlastshift(shift(as.owin(X), ...)) X$lev$val <- shift(X$lev$val, vec=vec) smo <- X$lev$smo X$lev$smo <- if(is.im(smo)) shift(smo, vec=vec) else solapply(smo, shift, vec=vec) return(putlastshift(X, vec)) } shift.influence.ppm <- function(X, ...) { X$infl <- shift(X$infl, ...) return(putlastshift(X, getlastshift(X$infl))) } spatstat/R/rmhcontrol.R0000755000176200001440000001664313115271120014636 0ustar liggesusers# # # rmhcontrol.R # # $Revision: 1.30 $ $Date: 2017/01/29 07:20:51 $ # # rmhcontrol <- function(...) { UseMethod("rmhcontrol") } rmhcontrol.rmhcontrol <- function(...) { argz <- list(...) if(length(argz) == 1) return(argz[[1]]) stop("Arguments not understood") } rmhcontrol.list <- function(...) { argz <- list(...) nama <- names(argz) if(length(argz) == 1 && !any(nzchar(nama))) do.call(rmhcontrol.default, argz[[1]]) else do.call.matched(rmhcontrol.default, argz) } rmhcontrol.default <- function(..., p=0.9, q=0.5, nrep=5e5, expand=NULL, periodic=NULL, ptypes=NULL, x.cond=NULL, fixall=FALSE, nverb=0, nsave=NULL, nburn=nsave, track=FALSE, pstage=c("block", "start")) { argh <- list(...) nargh <- length(argh) if(nargh > 0) { # allow rmhcontrol(NULL), otherwise flag an error if(!(nargh == 1 && is.null(argh[[1]]))) stop(paste("Unrecognised arguments to rmhcontrol;", "valid arguments are listed in help(rmhcontrol.default)")) } # impose default values if(missing(p)) p <- spatstat.options("rmh.p") if(missing(q)) q <- spatstat.options("rmh.q") if(missing(nrep)) nrep <- spatstat.options("rmh.nrep") # validate arguments if(!is.numeric(p) || length(p) != 1 || p < 0 || p > 1) stop("p should be a number in [0,1]") if(!is.numeric(q) || length(q) != 1 || q < 0 || q > 1) stop("q should be a number in [0,1]") if(!is.numeric(nrep) || length(nrep) != 1 || nrep < 1) stop("nrep should be an integer >= 1") nrep <- as.integer(nrep) if(!is.numeric(nverb) || length(nverb) != 1 || nverb < 0 || nverb > nrep) stop("nverb should be an integer <= nrep") nverb <- as.integer(nverb) if(!is.logical(fixall) || length(fixall) != 1) stop("fixall should be a logical value") if(!is.null(periodic) && (!is.logical(periodic) || length(periodic) != 1)) stop(paste(sQuote("periodic"), "should be a logical value or NULL")) if(saving <- !is.null(nsave)) { if(!is.numeric(nsave) || length(nsave) != 1 || nsave < 0 || nsave >= nrep) stop("nsave should be an integer < nrep") if(is.null(nburn)) nburn <- min(nsave, nrep-nsave) if(!is.null(nburn)) stopifnot(nburn + nsave <= nrep) } stopifnot(is.logical(track)) pstage <- match.arg(pstage) ################################################################# # Conditioning on point configuration # # condtype = "none": no conditioning # condtype = "Palm": conditioning on the presence of specified points # condtype = "window": conditioning on the configuration in a subwindow # if(is.null(x.cond)) { condtype <- "none" n.cond <- NULL } else if(is.ppp(x.cond)) { condtype <- "window" n.cond <- x.cond$n } else if(is.data.frame(x.cond)) { if(ncol(x.cond) %in% c(2,3)) { condtype <- "Palm" n.cond <- nrow(x.cond) } else stop("Wrong number of columns in data frame x.cond") } else if(is.list(x.cond)) { if(length(x.cond) %in% c(2,3)) { x.cond <- as.data.frame(x.cond) condtype <- "Palm" n.cond <- nrow(x.cond) } else stop("Wrong number of components in list x.cond") } else stop("Unrecognised format for x.cond") if(condtype == "Palm" && n.cond == 0) { warning(paste("Ignored empty configuration x.cond;", "conditional (Palm) simulation given an empty point pattern", "is equivalent to unconditional simulation")) condtype <- "none" x.cond <- NULL n.cond <- NULL } ################################################################# # Fixing the number of points? # # fixcode = 1 <--> no conditioning # fixcode = 2 <--> conditioning on n = number of points # fixcode = 3 <--> conditioning on the number of points of each type. fixcode <- 2 - (p<1) + fixall - fixall*(p<1) fixing <- switch(fixcode, "none", "n.total", "n.each.type") # Warn about silly combination if(fixall && p < 1) warning("fixall = TRUE conflicts with p < 1. Ignored.\n") ############################################################### # `expand' determines expansion of the simulation window expand <- rmhexpand(expand) # No expansion is permitted if we are conditioning on the # number of points if(fixing != "none") { if(expand$force.exp) stop(paste("When conditioning on the number of points,", "no expansion may be done.\n"), call.=FALSE) # no expansion expand <- .no.expansion } ################################################################### # return augmented list out <- list(p=p, q=q, nrep=nrep, nverb=nverb, expand=expand, periodic=periodic, ptypes=ptypes, fixall=fixall, fixcode=fixcode, fixing=fixing, condtype=condtype, x.cond=x.cond, saving=saving, nsave=nsave, nburn=nburn, track=track, pstage=pstage) class(out) <- c("rmhcontrol", class(out)) return(out) } print.rmhcontrol <- function(x, ...) { verifyclass(x, "rmhcontrol") splat("Metropolis-Hastings algorithm control parameters") splat("Probability of shift proposal: p =", x$p) if(x$fixing == "none") { splat("Conditional probability of death proposal: q =", x$q) if(!is.null(x$ptypes)) { splat("Birth proposal probabilities for each type of point:") print(x$ptypes) } } switch(x$fixing, none={}, n.total=splat("The total number of points is fixed"), n.each.type=splat("The number of points of each type is fixed")) switch(x$condtype, none={}, window={ splat("Conditional simulation given the", "configuration in a subwindow") print(x$x.cond$window) }, Palm={ splat("Conditional simulation of Palm type") }) splat("Number of M-H iterations: nrep =", x$nrep) if(x$saving) splat("Save point pattern every", x$nsave, "iterations", "after a burn-in of", x$nburn, "iterations.") pstage <- x$pstage %orifnull% "start" hdr <- "Generate random proposal points:" switch(pstage, start = splat(hdr, "at start of simulations."), block = splat(hdr, "before each block of", x$nsave, "iterations.")) cat(paste("Track proposal type and acceptance/rejection?", if(x$track) "yes" else "no", "\n")) if(x$nverb > 0) cat(paste("Progress report every nverb=", x$nverb, "iterations\n")) else cat("No progress reports (nverb = 0).\n") # invoke print.rmhexpand print(x$expand) cat("Periodic edge correction? ") if(is.null(x$periodic)) cat("Not yet determined.\n") else if(x$periodic) cat("Yes.\n") else cat("No.\n") # return(invisible(NULL)) } default.rmhcontrol <- function(model, w=NULL) { # set default for 'expand' return(rmhcontrol(expand=default.expand(model, w=w))) } update.rmhcontrol <- function(object, ...) { do.call.matched(rmhcontrol.default, resolve.defaults(list(...), as.list(object), .StripNull=TRUE)) } rmhResolveControl <- function(control, model) { # adjust control information once the model is known stopifnot(inherits(control, "rmhcontrol")) # change *default* expansion rule to something appropriate for model # (applies only if expansion rule is undecided) control$expand <- change.default.expand(control$expand, default.expand(model)) return(control) } spatstat/R/distances.R0000755000176200001440000001473013115271075014430 0ustar liggesusers# # distances.R # # $Revision: 1.46 $ $Date: 2017/06/05 10:31:58 $ # # # Interpoint distances between pairs # # pairdist <- function(X, ...) { UseMethod("pairdist") } pairdist.ppp <- function(X, ..., periodic=FALSE, method="C", squared=FALSE) { verifyclass(X, "ppp") if(!periodic) return(pairdist.default(X$x, X$y, method=method, squared=squared)) # periodic case W <- X$window if(W$type != "rectangle") stop(paste("periodic edge correction can't be applied", "in a non-rectangular window")) wide <- diff(W$xrange) high <- diff(W$yrange) return(pairdist.default(X$x, X$y, period=c(wide,high), method=method, squared=squared)) } pairdist.default <- function(X, Y=NULL, ..., period=NULL, method="C", squared=FALSE) { xy <- xy.coords(X,Y)[c("x","y")] x <- xy$x y <- xy$y n <- length(x) if(length(y) != n) stop("lengths of x and y do not match") # special cases if(n == 0) return(matrix(numeric(0), nrow=0, ncol=0)) else if(n == 1L) return(matrix(0,nrow=1L,ncol=1L)) if((periodic<- !is.null(period))) { stopifnot(is.numeric(period)) stopifnot(length(period) == 2 || length(period) == 1) stopifnot(all(period > 0)) if(length(period) == 1) period <- rep.int(period, 2) wide <- period[1L] high <- period[2L] } switch(method, interpreted={ xx <- matrix(rep.int(x, n), nrow = n) yy <- matrix(rep.int(y, n), nrow = n) if(!periodic) { d2 <- (xx - t(xx))^2 + (yy - t(yy))^2 } else { dx <- xx - t(xx) dy <- yy - t(yy) dx2 <- pmin.int(dx^2, (dx + wide)^2, (dx - wide)^2) dy2 <- pmin.int(dy^2, (dy + high)^2, (dy - high)^2) d2 <- dx2 + dy2 } if(squared) dout <- d2 else dout <- sqrt(d2) }, C={ d <- numeric( n * n) if(!periodic) { z<- .C("Cpairdist", n = as.integer(n), x= as.double(x), y= as.double(y), squared=as.integer(squared), d= as.double(d), PACKAGE = "spatstat") } else { z <- .C("CpairPdist", n = as.integer(n), x= as.double(x), y= as.double(y), xwidth=as.double(wide), yheight=as.double(high), squared = as.integer(squared), d= as.double(d), PACKAGE = "spatstat") } dout <- matrix(z$d, nrow=n, ncol=n) }, stop(paste("Unrecognised method", sQuote(method))) ) return(dout) } crossdist <- function(X, Y, ...) { UseMethod("crossdist") } crossdist.ppp <- function(X, Y, ..., periodic=FALSE, method="C", squared=FALSE) { verifyclass(X, "ppp") Y <- as.ppp(Y) if(!periodic) return(crossdist.default(X$x, X$y, Y$x, Y$y, method=method, squared=squared)) # periodic case WX <- X$window WY <- Y$window if(WX$type != "rectangle" || WY$type != "rectangle") stop(paste("periodic edge correction can't be applied", "in non-rectangular windows")) if(!is.subset.owin(WX,WY) || !is.subset.owin(WY, WX)) stop(paste("periodic edge correction is not implemented", "for the case when X and Y lie in different rectangles")) wide <- diff(WX$xrange) high <- diff(WX$yrange) return(crossdist.default(X$x, X$y, Y$x, Y$y, period=c(wide,high), method=method, squared=squared)) } crossdist.default <- function(X, Y, x2, y2, ..., period=NULL, method="C", squared=FALSE) { x1 <- X y1 <- Y # returns matrix[i,j] = distance from (x1[i],y1[i]) to (x2[j],y2[j]) if(length(x1) != length(y1)) stop("lengths of x and y do not match") if(length(x2) != length(y2)) stop("lengths of x2 and y2 do not match") n1 <- length(x1) n2 <- length(x2) if(n1 == 0 || n2 == 0) return(matrix(numeric(0), nrow=n1, ncol=n2)) if((periodic<- !is.null(period))) { stopifnot(is.numeric(period)) stopifnot(length(period) == 2 || length(period) == 1) stopifnot(all(period > 0)) if(length(period) == 1L) period <- rep.int(period, 2) wide <- period[1L] high <- period[2L] } switch(method, interpreted = { X1 <- matrix(rep.int(x1, n2), ncol = n2) Y1 <- matrix(rep.int(y1, n2), ncol = n2) X2 <- matrix(rep.int(x2, n1), ncol = n1) Y2 <- matrix(rep.int(y2, n1), ncol = n1) if(!periodic) d2 <- (X1 - t(X2))^2 + (Y1 - t(Y2))^2 else { dx <- X1 - t(X2) dy <- Y1 - t(Y2) dx2 <- pmin.int(dx^2, (dx + wide)^2, (dx - wide)^2) dy2 <- pmin.int(dy^2, (dy + high)^2, (dy - high)^2) d2 <- dx2 + dy2 } return(if(squared) d2 else sqrt(d2)) }, C = { if(!periodic) { z<- .C("Ccrossdist", nfrom = as.integer(n1), xfrom = as.double(x1), yfrom = as.double(y1), nto = as.integer(n2), xto = as.double(x2), yto = as.double(y2), squared = as.integer(squared), d = as.double(matrix(0, nrow=n1, ncol=n2)), PACKAGE = "spatstat") } else { z<- .C("CcrossPdist", nfrom = as.integer(n1), xfrom = as.double(x1), yfrom = as.double(y1), nto = as.integer(n2), xto = as.double(x2), yto = as.double(y2), xwidth = as.double(wide), yheight = as.double(high), squared = as.integer(squared), d = as.double(matrix(0, nrow=n1, ncol=n2)), PACKAGE = "spatstat") } return(matrix(z$d, nrow=n1, ncol=n2)) }, stop(paste("Unrecognised method", method)) ) } spatstat/R/rose.R0000644000176200001440000002424713115225157013424 0ustar liggesusers#' #' rose.R #' #' Rose diagrams #' #' $Revision: 1.9 $ $Date: 2015/08/25 08:19:19 $ #' rose <- function(x, ...) UseMethod("rose") rose.default <- local({ rose.default <- function(x, breaks = NULL, ..., weights=NULL, nclass=NULL, unit=c("degree", "radian"), start=0, clockwise=FALSE, main) { if(missing(main) || is.null(main)) main <- short.deparse(substitute(x)) stopifnot(is.numeric(x)) if(!is.null(weights)) check.nvector(weights, length(x), things="observations") #' determine units missu <- missing(unit) unit <- match.arg(unit) unit <- validate.angles(x, unit, missu) FullCircle <- switch(unit, degree = 360, radian = 2*pi) #' reduce to [0, 2pi] x <- x %% FullCircle #' determine breakpoints strictly inside full circle breaks <- makebreaks(x, c(0, FullCircle), breaks, nclass) #' histogram without weights h <- do.call.matched(hist.default, list(x=x, breaks=breaks, ..., plot=FALSE), skipargs=graphicsAargh, sieve=TRUE) result <- h$result otherargs <- h$otherargs #' redo weights, if given if(!is.null(weights)) { wh <- whist(x=x, breaks=breaks, weights=weights) result$count <- wh result$density <- wh/diff(breaks) } # do.call(rose.histogram, c(list(x=result, main=main, unit=unit, start=start, clockwise=clockwise), otherargs)) } graphicsAargh <- c("density", "angle", "col", "border", "xlim", "ylim", "xlab", "ylab", "axes") makebreaks <- function(x, r, breaks=NULL, nclass=NULL) { use.br <- !is.null(breaks) if (use.br) { if (!is.null(nclass)) warning("'nclass' not used when 'breaks' is specified") } else if (!is.null(nclass) && length(nclass) == 1L) { breaks <- nclass } else breaks <- "Sturges" use.br <- use.br && (nB <- length(breaks)) > 1L if (use.br) breaks <- sort(breaks) else { if (is.character(breaks)) { breaks <- match.arg(tolower(breaks), c("sturges", "fd", "freedman-diaconis", "scott")) breaks <- switch(breaks, sturges = nclass.Sturges(x), `freedman-diaconis` = , fd = nclass.FD(x), scott = nclass.scott(x), stop("unknown 'breaks' algorithm")) } else if (is.function(breaks)) { breaks <- breaks(x) } if (length(breaks) == 1) { if (!is.numeric(breaks) || !is.finite(breaks) || breaks < 1L) stop("invalid number of 'breaks'") breaks <- seq(r[1], r[2], length.out=breaks) } else { if (!is.numeric(breaks) || length(breaks) <= 1) stop(gettextf("Invalid breakpoints produced by 'breaks(x)': %s", format(breaks)), domain = NA) breaks <- sort(breaks) } } return(breaks) } rose.default }) rose.histogram <- function(x, ..., unit=c("degree", "radian"), start=0, clockwise=FALSE, main, labels=TRUE, at=NULL, do.plot=TRUE) { if(missing(main) || is.null(main)) main <- short.deparse(substitute(x)) #' determine units missu <- missing(unit) unit <- match.arg(unit) #' validate bks <- x$breaks unit <- validate.angles(bks, unit, missu) # FullCircle <- switch(unit, degree = 360, radian = 2*pi) #' get sector sizes y <- x$density ymax <- max(y) #' draw disc insideclearance <- 0.1 outsidespace <- if(!is.null(at) && length(at) == 0) 0 else if(identical(labels, FALSE)) 0.1 else 0.25 R <- (1+insideclearance) * ymax DD <- disc(R) Rout <- (1 + outsidespace) * R result <- do.call.matched(plot.owin, resolve.defaults(list(x=disc(Rout), main=main, type="n"), list(...))) do.call.matched(plot.owin, resolve.defaults(list(x=DD, hatch=FALSE, add=TRUE), list(...)), extrargs=graphicsPars("owin"), skipargs="col") if(do.plot) { #' draw sectors ang <- ang2rad(bks, unit=unit, start=start, clockwise=clockwise) eps <- min(diff(ang), pi/128)/2 for(i in seq_along(y)) { aa <- seq(ang[i], ang[i+1], by=eps) aa[length(aa)] <- ang[i+1] yi <- y[i] xx <- c(0, yi * cos(aa), 0) yy <- c(0, yi * sin(aa), 0) do.call.matched(polygon, list(x=xx, y=yy, ...)) } #' add tick marks circticks(R, at=at, unit=unit, start=start, clockwise=clockwise, labels=labels) } #' return(invisible(result)) } rose.density <- function(x, ..., unit=c("degree", "radian"), start=0, clockwise=FALSE, main, labels=TRUE, at=NULL, do.plot=TRUE) { if(missing(main) || is.null(main)) main <- short.deparse(substitute(x)) ang <- x$x rad <- x$y missu <- missing(unit) unit <- match.arg(unit) unit <- validate.angles(ang, unit, missu) #' result <- roseContinuous(ang, rad, unit, ..., start=start, clockwise=clockwise, main=main, labels=labels, at=at, do.plot=do.plot) return(invisible(result)) } rose.fv <- function(x, ..., unit=c("degree", "radian"), start=0, clockwise=FALSE, main, labels=TRUE, at=NULL, do.plot=TRUE) { if(missing(main) || is.null(main)) main <- short.deparse(substitute(x)) ang <- with(x, .x) rad <- with(x, .y) missu <- missing(unit) unit <- match.arg(unit) unit <- validate.angles(ang, unit, missu) #' result <- roseContinuous(ang, rad, unit, ..., start=start, clockwise=clockwise, main=main, labels=labels, at=at, do.plot=do.plot) return(invisible(result)) } roseContinuous <- function(ang, rad, unit, ..., start=0, clockwise=FALSE, main, labels=TRUE, at=NULL, do.plot=TRUE) { rmax <- max(rad) #' draw disc insideclearance <- 0.1 outsidespace <- if(!is.null(at) && length(at) == 0) 0 else if(identical(labels, FALSE)) 0.1 else 0.25 R <- (1+insideclearance) * rmax DD <- disc(R) Rout <- (1 + outsidespace) * R result <- do.call.matched(plot.owin, resolve.defaults(list(x=disc(Rout), main=main, type="n"), list(...))) do.call.matched(plot.owin, resolve.defaults(list(x=DD, add=TRUE, hatch=FALSE), list(...)), extrargs=graphicsPars("owin"), skipargs="col") #' draw plot if(do.plot) { ang <- ang2rad(ang, unit=unit, start=start, clockwise=clockwise) xx <- rad * cos(ang) yy <- rad * sin(ang) do.call.matched(polygon, list(x=xx, y=yy, ...), extrargs="lwd") circticks(R, at=at, unit=unit, start=start, clockwise=clockwise, labels=labels) } return(result) } ang2rad <- local({ compasspoints <- c(E=0,N=90,W=180,S=270) ang2rad <- function(ang, unit=c("degree", "radian"), start=0, clockwise=FALSE) { unit <- match.arg(unit) clocksign <- if(clockwise) -1 else 1 stopifnot(length(start) == 1) if(is.character(start)) { if(is.na(match(toupper(start), names(compasspoints)))) stop(paste("Unrecognised compass point", sQuote(start)), call.=FALSE) startdegrees <- compasspoints[[start]] start <- switch(unit, degree = startdegrees, radian = pi * (startdegrees/180)) # start is measured anticlockwise ang <- start + clocksign * ang } else { stopifnot(is.numeric(start)) # start is measured according to value of 'clockwise' ang <- clocksign * (start + ang) } rad <- switch(unit, degree = pi * (ang/180), radian = ang) return(rad) } ang2rad }) circticks <- function(R, at=NULL, unit=c("degree", "radian"), start=0, clockwise=FALSE, labels=TRUE) { unit <- match.arg(unit) FullCircle <- switch(unit, degree = 360, radian = 2*pi) if(is.null(at)) { at <- FullCircle * (0:23)/24 major <- ((0:23) %% 6 == 0) } else { if(length(at) == 0) return(invisible(NULL)) nat <- (at/FullCircle) * 4 major <- abs(nat - round(nat)) < 0.01 } atradians <- ang2rad(ang=at, unit=unit, start=start, clockwise=clockwise) tx <- R * cos(atradians) ty <- R * sin(atradians) expan <- ifelse(major, 1.1, 1.05) segments(tx, ty, expan * tx, expan * ty, lwd=major+1) if(!identical(labels, FALSE)) { if(identical(labels, TRUE)) { labels <- switch(unit, degree=paste(round(at)), radian=parse(text= simplenumber(at/pi, "pi", "*", 1e-3))) } else stopifnot(is.vector(labels) && length(labels) == length(at)) big <- expan + 0.1 text(big * tx, big * ty, labels=labels) } invisible(NULL) } validate.angles <- function(angles, unit=c("degree", "radian"), guess=TRUE) { #' validate width <- diff(range(angles)) if(missing(unit) && guess && width <= 6.2832) { warning("Very small range of angles: treating them as radian") unit <- "radian" } else unit <- match.arg(unit) FullCircle <- switch(unit, degree = 360, radian = 2*pi) if(width > 1.002 * FullCircle) stop("Range of angles exceeds a full circle") return(unit) } spatstat/R/reduceformula.R0000755000176200001440000000627413115271120015303 0ustar liggesusers# # reduceformula.R # # $Revision: 1.7 $ $Date: 2016/12/30 01:44:07 $ # # delete variable from formula # #...................................................... # reduceformula <- function(fmla, deletevar, verbose=FALSE) { ## removes the variable `deletevar' from the formula `fmla' ## returns a simplified formula, or NULL if it can't simplify. stopifnot(inherits(fmla, "formula")) stopifnot(is.character(deletevar) && length(deletevar) == 1) if(!(deletevar %in% all.vars(as.expression(fmla)))) { if(verbose) message(paste("The formula does not involve", dQuote(deletevar), "and is therefore unchanged")) return(fmla) } lhs <- if(length(fmla) < 3) NULL else fmla[[2]] ## create terms object tt <- attributes(terms(fmla)) ## formula.has.intercept <- (tt$intercept == 1) ## extract all variables appearing in the model vars <- as.list(tt$variables)[-1] nvars <- length(vars) varexprs <- lapply(vars, as.expression) varstrings <- sapply(varexprs, paste) ## identify any offsets offs <- tt$offset v.is.offset <- if(!is.null(offs)) (1:nvars) %in% offs else rep(FALSE, nvars) ## remove the response repo <- tt$response if(repo != 0) { vars <- vars[-repo] varstrings <- varstrings[-repo] varexprs <- varexprs[-repo] v.is.offset <- v.is.offset[-repo] } ## a term may be a variable name ## v.is.name <- sapply(vars, is.name) ## a term may be an expression like sin(x), poly(x,y,degree=2) v.args <- lapply(varexprs, all.vars) matches.delete <- lapply(v.args, "==", deletevar) v.has.delete <- sapply(matches.delete, any) v.has.other <- !sapply(matches.delete, all) v.is.mixed <- v.has.delete & v.has.other ## we can't handle mixed terms like sin(x-d), poly(x,d) ## where d is to be deleted. Handling these would require ## knowledge about the functions sin and poly. if(any(v.is.mixed)) { nmixed <- sum(v.is.mixed) if(verbose) message(paste("Don't know how to reduce the", ngettext(nmixed, "term", "terms"), paste(dQuote(varstrings[v.is.mixed]), collapse=","))) return(NULL) } ## OK. We have identified all first order terms to be deleted. condemned.names <- varstrings[v.has.delete] ## Determine the terms of all orders that include these first order terms ## (1) terms with model coefficients fax <- tt$factors if(prod(dim(fax)) == 0) retained.terms <- character(0) else { ## Rows are first order terms condemned.row <- rownames(fax) %in% condemned.names ## Columns are the terms of all orders allterms <- colnames(fax) ## Find all columns containing a 1 in a row that is to be deleted if(any(condemned.row)) { condemned.column <- matcolany(fax[condemned.row, , drop=FALSE] != 0) retained.terms <- allterms[!condemned.column] } else retained.terms <- allterms } ## (2) offsets if any if(any(v.is.offset)) retained.terms <- c(retained.terms, varstrings[v.is.offset & !v.has.delete]) ## (3) intercept forced? if(length(retained.terms) == 0) retained.terms <- "1" ## OK. Cut-and-paste f <- paste(lhs, "~", paste(retained.terms, collapse=" + ")) return(as.formula(f)) } spatstat/R/iplotlayered.R0000644000176200001440000002433313115225157015145 0ustar liggesusers# # interactive plot # # $Revision: 1.13 $ $Date: 2017/02/07 07:47:20 $ # # iplot.default <- function(x, ..., xname) { if(missing(xname)) xname <- short.deparse(substitute(x)) x <- as.layered(x) iplot(x, ..., xname=xname) } iplot.layered <- local({ CommitAndRedraw <- function(panel) { ## hack to ensure that panel is immediately updated in rpanel kraever("rpanel") ## This is really a triple-colon! rpanel:::rp.control.put(panel$panelname, panel) ## now redraw it redraw.iplot.layered(panel) } faster.layers <- function(x, visible) { if(any(islinnet <- unlist(lapply(x, inherits, what="linnet")))) { # convert linnet layers to psp, for efficiency x[islinnet] <- lapply(x[islinnet], as.psp) } repeat{ islpp <- unlist(lapply(x, inherits, what="lpp")) if(!any(islpp)) break # convert an lpp layer to two layers: psp and ppp, for efficiency ii <- min(which(islpp)) pl <- layerplotargs(x) n <- length(x) xpre <- if(ii == 1) NULL else x[1:ii] xpost <- if(ii == n) NULL else x[(ii+1L):n] ppre <- if(ii == 1) NULL else pl[1:ii] ppost <- if(ii == n) NULL else pl[(ii+1):n] a <- as.psp(as.linnet(x[[ii]])) b <- as.ppp(x[[ii]]) x <- layered(LayerList=c(xpre, list(a, b), xpost), plotargs=unname(c(ppre, pl[ii], pl[ii], ppost))) visible <- visible[if(ii == 1) c(1, seq_len(n)) else if(ii == n) c(seq_len(n), n) else c(1:(ii-1), ii, ii, (ii+1):n)] } attr(x, "visible") <- visible return(x) } iplot.layered <- function(x, ..., xname, visible) { if(missing(xname)) xname <- short.deparse(substitute(x)) verifyclass(x, "layered") if(missing(visible) || is.null(visible)) { visible <- rep(TRUE, length(x)) } else if(length(visible) == 1) { visible <- rep(visible, length(x)) } else stopifnot(length(visible) == length(x)) kraever("rpanel") x <- faster.layers(x, visible) visible <- attr(x, "visible") x <- freeze.colourmaps(x) bb <- as.rectangle(as.owin(x)) bbmid <- unlist(centroid.owin(bb)) lnames <- names(x) if(sum(nzchar(lnames)) != length(x)) lnames <- paste("Layer", seq_len(length(x))) ## p <- rpanel::rp.control(paste("iplot(", xname, ")", sep=""), x=x, w=as.owin(x), xname=xname, layernames=lnames, bb=bb, bbmid=bbmid, zoomfactor=1, zoomcentre=bbmid, which = visible, size=c(700, 400)) # Split panel into three # Left: plot controls # Middle: data # Right: navigation/zoom rpanel::rp.grid(p, "gcontrols", pos=list(row=0,column=0)) rpanel::rp.grid(p, "gdisplay", pos=list(row=0,column=1)) rpanel::rp.grid(p, "gnavigate", pos=list(row=0,column=2)) #----- Data display ------------ # This line is to placate the package checker mytkr <- NULL # Create data display panel rpanel::rp.tkrplot(p, mytkr, plotfun=do.iplot.layered, action=click.iplot.layered, pos=list(row=0,column=0,grid="gdisplay")) #----- Plot controls ------------ nextrow <- 0 pozzie <- function(n=nextrow, ...) append(list(row=n,column=0,grid="gcontrols"), list(...)) # main title rpanel::rp.textentry(p, xname, action=redraw.iplot.layered, title="Plot title", pos=pozzie(0)) nextrow <- 1 # select some layers nx <- length(x) which <- rep.int(TRUE, nx) if(nx > 1) { rpanel::rp.checkbox(p, which, labels=lnames, action=redraw.iplot.layered, title="Select layers to plot", pos=pozzie(nextrow), sticky="") nextrow <- nextrow + 1 } # button to print a summary at console rpanel::rp.button(p, title="Print summary information", pos=pozzie(nextrow), action=function(panel) { lapply(lapply(panel$x, summary), print) return(panel) }) # #----- Navigation controls ------------ nextrow <- 0 navpos <- function(n=nextrow,cc=0, ...) append(list(row=n,column=cc,grid="gnavigate"), list(...)) rpanel::rp.button(p, title="Up", pos=navpos(nextrow,1,sticky=""), action=function(panel) { zo <- panel$zoomfactor ce <- panel$zoomcentre bb <- panel$bb height <- sidelengths(bb)[2L] stepsize <- (height/4)/zo panel$zoomcentre <- ce + c(0, stepsize) CommitAndRedraw(panel) return(panel) }) nextrow <- nextrow + 1 rpanel::rp.button(p, title="Left", pos=navpos(nextrow,0,sticky="w"), action=function(panel) { zo <- panel$zoomfactor ce <- panel$zoomcentre bb <- panel$bb width <- sidelengths(bb)[1L] stepsize <- (width/4)/zo panel$zoomcentre <- ce - c(stepsize, 0) CommitAndRedraw(panel) return(panel) }) rpanel::rp.button(p, title="Right", pos=navpos(nextrow,2,sticky="e"), action=function(panel) { zo <- panel$zoomfactor ce <- panel$zoomcentre bb <- panel$bb width <- sidelengths(bb)[1L] stepsize <- (width/4)/zo panel$zoomcentre <- ce + c(stepsize, 0) CommitAndRedraw(panel) return(panel) }) nextrow <- nextrow + 1 rpanel::rp.button(p, title="Down", pos=navpos(nextrow,1,sticky=""), action=function(panel) { zo <- panel$zoomfactor ce <- panel$zoomcentre bb <- panel$bb height <- sidelengths(bb)[2L] stepsize <- (height/4)/zo panel$zoomcentre <- ce - c(0, stepsize) CommitAndRedraw(panel) return(panel) }) nextrow <- nextrow + 1 rpanel::rp.button(p, title="Zoom In", pos=navpos(nextrow,1,sticky=""), action=function(panel) { panel$zoomfactor <- panel$zoomfactor * 2 CommitAndRedraw(panel) return(panel) }) nextrow <- nextrow + 1 rpanel::rp.button(p, title="Zoom Out", pos=navpos(nextrow,1,sticky=""), action=function(panel) { panel$zoomfactor <- panel$zoomfactor / 2 CommitAndRedraw(panel) return(panel) }) nextrow <- nextrow + 1 rpanel::rp.button(p, title="Reset", pos=navpos(nextrow,1,sticky=""), action=function(panel) { panel$zoomfactor <- 1 panel$zoomcentre <- panel$bbmid CommitAndRedraw(panel) return(panel) }) nextrow <- nextrow + 1 rpanel::rp.button(p, title="Redraw", pos=navpos(nextrow,1,sticky=""), action=redraw.iplot.layered) nextrow <- nextrow+1 # quit button rpanel::rp.button(p, title="Quit", quitbutton=TRUE, pos=navpos(nextrow, 1, sticky=""), action= function(panel) { panel }) invisible(NULL) } # Function to redraw the whole shebang redraw.iplot.layered <- function(panel) { rpanel::rp.tkrreplot(panel, mytkr) panel } # Function executed when data display is clicked click.iplot.layered <- function(panel, x, y) { panel$zoomcentre <- panel$zoomcentre + (c(x,y) - panel$bbmid)/panel$zoomfactor CommitAndRedraw(panel) return(panel) } # function that updates the plot when the control panel is operated do.iplot.layered <- function(panel) { # scale and clip the pattern x <- panel$x[panel$which] w <- panel$w z <- panel$zoomfactor if(is.null(z)) z <- 1 ce <- panel$zoomcentre bb <- panel$bb bbmid <- panel$bbmid scalex <- shift(scalardilate(shift(x, -ce), z), bbmid) scalew <- shift(scalardilate(shift(w, -ce), z), bbmid) scalex <- scalex[, bb] scalew <- intersect.owin(scalew, bb, fatal=FALSE) # determine what is plotted under the clipped pattern blankargs <- list(type="n") dashargs <- list(lty=3, border="red") panel.begin <- if(is.null(scalew)) { # empty intersection; just create the plot space layered(bb, plotargs=list(blankargs)) } else if(identical(bb, scalew)) { if(z == 1) { # original state # window is rectangular # plot the data window as a solid black rectangle layered(bb, scalew, plotargs=list(blankargs, list(lwd=2))) } else { # zoom view is entirely inside window # plot the clipping region as a red dashed rectangle layered(bb, plotargs=list(dashargs)) } } else { # field of view is not a subset of window # plot the clipping region as a red dashed rectangle # Then add the data window layered(bb, scalew, plotargs=list(dashargs, list(invert=TRUE))) } # draw it opa <- par(ask=FALSE) plot(panel.begin, main=panel$xname) plot(scalex, add=TRUE) par(opa) panel } freeze.colourmaps <- function(x) { # tweak a layered object to ensure that # the colours of image layers don't change with zoom/pan isim <- unlist(lapply(x, is.im)) if(any(isim)) { # ensure there are plotargs pl <- attr(x, "plotargs") if(is.null(pl)) pl <- rep.int(list(list()), length(x)) # make sure the plotargs include 'zlim' for(i in which(isim)) { x.i <- x[[i]] if(x.i$type %in% c("integer", "real")) pl[[i]] <- resolve.defaults(pl[[i]], list(zlim=range(x.i))) } # put back attr(x, "plotargs") <- pl } return(x) } iplot.layered }) spatstat/R/nncross.R0000755000176200001440000001623513115271120014131 0ustar liggesusers# # nncross.R # # # $Revision: 1.28 $ $Date: 2017/06/05 10:31:58 $ # # Copyright (C) Adrian Baddeley, Jens Oehlschlaegel and Rolf Turner 2000-2012 # Licence: GNU Public Licence >= 2 nncross <- function(X, Y, ...) { UseMethod("nncross") } nncross.default <- function(X, Y, ...) { X <- as.ppp(X, W=boundingbox) nncross(X, Y, ...) } nncross.ppp <- function(X, Y, iX=NULL, iY=NULL, what = c("dist", "which"), ..., k = 1, sortby=c("range", "var", "x", "y"), is.sorted.X = FALSE, is.sorted.Y = FALSE) { stopifnot(is.ppp(Y) || is.psp(Y)) sortby <- match.arg(sortby) what <- match.arg(what, choices=c("dist", "which"), several.ok=TRUE) want.dist <- "dist" %in% what want.which <- "which" %in% what want.both <- want.dist && want.which if(!missing(k)) { # k can be a single integer or an integer vector if(length(k) == 0) stop("k is an empty vector") else if(length(k) == 1) { if(k != round(k) || k <= 0) stop("k is not a positive integer") } else { if(any(k != round(k)) || any(k <= 0)) stop(paste("some entries of the vector", sQuote("k"), "are not positive integers")) } } k <- as.integer(k) kmax <- max(k) nk <- length(k) # trivial cases nX <- npoints(X) nY <- nobjects(Y) # deal with null cases if(nX == 0) return(as.data.frame(list(dist=matrix(0, nrow=0, ncol=nk), which=matrix(0L, nrow=0, ncol=nk))[what])) if(nY == 0) return(as.data.frame(list(dist=matrix(Inf, nrow=nX, ncol=nk), which=matrix(NA, nrow=nX, ncol=nk))[what])) # Y is a line segment pattern if(is.psp(Y)) { if(!identical(k, 1L)) stop("Sorry, the case k > 1 is not yet implemented for psp objects") return(ppllengine(X,Y,"distance")[, what]) } # Y is a point pattern if(is.null(iX) != is.null(iY)) stop("If one of iX, iY is given, then both must be given") exclude <- (!is.null(iX) || !is.null(iY)) if(exclude) { stopifnot(is.integer(iX) && is.integer(iY)) if(length(iX) != nX) stop("length of iX does not match the number of points in X") if(length(iY) != nY) stop("length of iY does not match the number of points in Y") } if((is.sorted.X || is.sorted.Y) && !(sortby %in% c("x", "y"))) stop(paste("If data are already sorted,", "the sorting coordinate must be specified explicitly", "using sortby = \"x\" or \"y\"")) # decide whether to sort on x or y coordinate switch(sortby, range = { WY <- as.owin(Y) sortby.y <- (diff(WY$xrange) < diff(WY$yrange)) }, var = { sortby.y <- (var(Y$x) < var(Y$y)) }, x={ sortby.y <- FALSE}, y={ sortby.y <- TRUE} ) # The C code expects points to be sorted by y coordinate. if(sortby.y) { Xx <- X$x Xy <- X$y Yx <- Y$x Yy <- Y$y } else { Xx <- X$y Xy <- X$x Yx <- Y$y Yy <- Y$x } # sort only if needed if(!is.sorted.X){ oX <- fave.order(Xy) Xx <- Xx[oX] Xy <- Xy[oX] if(exclude) iX <- iX[oX] } if (!is.sorted.Y){ oY <- fave.order(Yy) Yx <- Yx[oY] Yy <- Yy[oY] if(exclude) iY <- iY[oY] } # number of neighbours that are well-defined kmaxcalc <- min(nY, kmax) if(kmaxcalc == 1) { # ............... single nearest neighbour .................. # call C code nndv <- if(want.dist) numeric(nX) else numeric(1) nnwh <- if(want.which) integer(nX) else integer(1) if(!exclude) iX <- iY <- integer(1) huge <- 1.1 * diameter(boundingbox(as.rectangle(X), as.rectangle(Y))) z <- .C("nnXinterface", n1=as.integer(nX), x1=as.double(Xx), y1=as.double(Xy), id1=as.integer(iX), n2=as.integer(nY), x2=as.double(Yx), y2=as.double(Yy), id2=as.integer(iY), exclude = as.integer(exclude), wantdist = as.integer(want.dist), wantwhich = as.integer(want.which), nnd=as.double(nndv), nnwhich=as.integer(nnwh), huge=as.double(huge), PACKAGE = "spatstat") if(want.which) { nnwcode <- z$nnwhich #sic. C code now increments by 1 if(any(uhoh <- (nnwcode == 0))) { warning("NA's produced in nncross()$which") nnwcode[uhoh] <- NA } } # reinterpret in original ordering if(is.sorted.X){ if(want.dist) nndv <- z$nnd if(want.which) nnwh <- if(is.sorted.Y) nnwcode else oY[nnwcode] } else { if(want.dist) nndv[oX] <- z$nnd if(want.which) nnwh[oX] <- if(is.sorted.Y) nnwcode else oY[nnwcode] } if(want.both) return(data.frame(dist=nndv, which=nnwh)) return(if(want.dist) nndv else nnwh) } else { # ............... k nearest neighbours .................. # call C code nndv <- if(want.dist) numeric(nX * kmaxcalc) else numeric(1) nnwh <- if(want.which) integer(nX * kmaxcalc) else integer(1) if(!exclude) iX <- iY <- integer(1) huge <- 1.1 * diameter(boundingbox(as.rectangle(X), as.rectangle(Y))) z <- .C("knnXinterface", n1=as.integer(nX), x1=as.double(Xx), y1=as.double(Xy), id1=as.integer(iX), n2=as.integer(nY), x2=as.double(Yx), y2=as.double(Yy), id2=as.integer(iY), kmax=as.integer(kmaxcalc), exclude = as.integer(exclude), wantdist = as.integer(want.dist), wantwhich = as.integer(want.which), nnd=as.double(nndv), nnwhich=as.integer(nnwh), huge=as.double(huge), PACKAGE = "spatstat") # extract results nnD <- z$nnd nnW <- z$nnwhich # map 0 to NA if(want.which && any(uhoh <- (nnW == 0))) { nnW[uhoh] <- NA if(want.dist) nnD[uhoh] <- Inf } # reinterpret indices in original ordering if(!is.sorted.Y) nnW <- oY[nnW] # reform as matrices NND <- if(want.dist) matrix(nnD, nrow=nX, ncol=kmaxcalc, byrow=TRUE) else 0 NNW <- if(want.which) matrix(nnW, nrow=nX, ncol=kmaxcalc, byrow=TRUE) else 0 if(!is.sorted.X){ # rearrange rows to correspond to original ordering of points if(want.dist) NND[oX, ] <- NND if(want.which) NNW[oX, ] <- NNW } # the return value should correspond to the original vector k if(kmax > kmaxcalc) { # add columns of NA / Inf kextra <- kmax - kmaxcalc if(want.dist) NND <- cbind(NND, matrix(Inf, nrow=nX, ncol=kextra)) if(want.which) NNW <- cbind(NNW, matrix(NA_integer_, nrow=nX, ncol=kextra)) } if(length(k) < kmax) { # select only the specified columns if(want.dist) NND <- NND[, k, drop=TRUE] if(want.which) NNW <- NNW[, k, drop=TRUE] } result <- as.data.frame(list(dist=NND, which=NNW)[what]) colnames(result) <- c(if(want.dist) paste0("dist.", k) else NULL, if(want.which) paste0("which.",k) else NULL) if(ncol(result) == 1) result <- result[, , drop=TRUE] return(result) } } spatstat/R/randomImage.R0000644000176200001440000000057313115225157014673 0ustar liggesusers#' #' randomImage.R #' #' Functions for generating random images #' #' $Revision: 1.1 $ $Date: 2015/03/23 10:44:04 $ #' #' rnoise <- function(rgen=runif, w=square(1), ...) { a <- do.call.matched(as.mask, list(w=w, ...), sieve=TRUE) W <- a$result argh <- a$otherargs Z <- as.im(W) n <- sum(W$m) Z[] <- do.call(rgen, append(list(n=n), argh)) return(Z) } spatstat/R/discarea.R0000755000176200001440000000536113115271075014226 0ustar liggesusers# # discarea.R # # $Revision: 1.18 $ $Date: 2017/06/05 10:31:58 $ # # # Compute area of intersection between a disc and a window, # discpartarea <- function(X, r, W=as.owin(X)) { if(!missing(W)) { verifyclass(W, "owin") if(!inherits(X, "ppp")) X <- as.ppp(X, W) } verifyclass(X, "ppp") n <- X$n if(is.matrix(r) && nrow(r) != n) stop("the number of rows of r should match the number of points in X") if(!is.matrix(r)) { nr <- length(r) r <- matrix(r, nrow=n, ncol=nr, byrow=TRUE) } else { nr <- ncol(r) } W <- as.polygonal(W) # convert polygon to line segments Y <- edges(W) # remove vertical segments (contribution is zero) vert <- (Y$ends$x1 == Y$ends$x0) Y <- Y[!vert] # go z <- .C("discareapoly", nc=as.integer(n), xc=as.double(X$x), yc=as.double(X$y), nr=as.integer(nr), rmat=as.double(r), nseg=as.integer(Y$n), x0=as.double(Y$ends$x0), y0=as.double(Y$ends$y0), x1=as.double(Y$ends$x1), y1=as.double(Y$ends$y1), eps=as.double(.Machine$double.eps), out=as.double(numeric(length(r))), PACKAGE = "spatstat") areas <- matrix(z$out, n, nr) return(areas) } # Compute area of dilation of point pattern # using Dirichlet tessellation or distmap # (areas of other dilations using distmap) dilated.areas <- function(X, r, W=as.owin(X), ..., constrained=TRUE, exact=FALSE) { if(is.matrix(r)) { if(sum(dim(r) > 1) > 1L) stop("r should be a vector or single value") r <- as.vector(r) } if(exact && !is.ppp(X)) { exact <- FALSE warning("Option exact=TRUE is only available for ppp objects") } if(!constrained) { # unconstrained dilation bb <- as.rectangle(X) W <- grow.rectangle(bb, max(r)) if(is.owin(X)) X <- rebound.owin(X, W) else X$window <- W } else W <- as.owin(W) if(!exact) { D <- distmap(X) pixelarea <- D$xstep * D$ystep Dvals <- D[W, drop=TRUE] if(is.im(Dvals)) Dvals <- as.vector(as.matrix(Dvals)) Dvals <- Dvals[!is.na(Dvals)] rr <- c(-1, r) h <- cumsum(whist(Dvals, rr)) return(h * pixelarea) } X <- unique(X) npts <- npoints(X) nr <- length(r) if(npts == 0) return(numeric(nr)) else if(npts == 1L) return(discpartarea(X, r, W)) samebox <- (W$type == "rectangle") && identical(all.equal(W, as.owin(X)), "TRUE") needclip <- constrained && !samebox dd <- dirichlet(X) til <- tiles(dd) out <- matrix(0, npts, nr) for(i in 1:npts) { Ti <- til[[i]] if(needclip) Ti <- intersect.owin(Ti, W) out[i,] <- discpartarea(X[i], r, Ti) } return(apply(out, 2, sum)) } spatstat/R/anova.ppm.R0000755000176200001440000002627713115271075014363 0ustar liggesusers# # anova.ppm.R # # $Revision: 1.25 $ $Date: 2016/10/23 10:36:58 $ # anova.ppm <- local({ do.gripe <- function(...) warning(paste(...), call.=FALSE) dont.gripe <- function(...) NULL nquad <- function(x) { if(inherits(x, "quad")) n.quad(x) else 0 } fmlaString <- function(z) { paste(as.expression(formula(z))) } interString <- function(z) { as.interact(z)$creator } anova.ppm <- function(object, ..., test=NULL, adjust=TRUE, warn=TRUE, fine=FALSE) { gripe <- if(warn) do.gripe else dont.gripe if(!is.null(test)) { test <- match.arg(test, c("Chisq", "LRT", "Rao", "score", "F", "Cp")) if(test == "score") test <- "Rao" if(!(test %in% c("Chisq", "LRT", "Rao"))) stop("test=", dQuote(test), "is not yet implemented") } ## trap outmoded usage argh <- list(...) if("override" %in% names(argh)) { gripe("Argument 'override' is superseded and was ignored") argh <- argh[-which(names(argh) == "override")] } ## list of models objex <- append(list(object), argh) if(!all(sapply(objex, is.ppm))) stop(paste("Arguments must all be", sQuote("ppm"), "objects")) ## all models Poisson? pois <- all(sapply(objex, is.poisson.ppm)) gibbs <- !pois ## any models fitted by ippm? newton <- any(sapply(objex, inherits, what="ippm")) if(gibbs && !is.null(test) && test == "Rao") stop("Score test is only implemented for Poisson models", call.=FALSE) ## handle anova for a single object expandedfrom1 <- FALSE if(length(objex) == 1 && (gibbs || newton)) { ## we can't rely on anova.glm in this case ## so we have to re-fit explicitly Terms <- drop.scope(object) if((nT <- length(Terms)) > 0) { ## generate models by adding terms sequentially objex <- vector(mode="list", length=nT+1) for(n in 1L:nT) { ## model containing terms 1, ..., n-1 fmla <- paste(". ~ . - ", paste(Terms[n:nT], collapse=" - ")) fmla <- as.formula(fmla) objex[[n]] <- update(object, fmla) } ## full model objex[[nT+1L]] <- object expandedfrom1 <- TRUE } } ## all models fitted by same method? fitmethod <- unique(sapply(objex, getElement, name="method")) if(length(fitmethod) > 1) stop(paste("Models were fitted by different methods", commasep(sQuote(fitmethod)), "- comparison is not possible")) ## fitted by MPL or logistic? if(!(fitmethod %in% c("mpl", "logi"))) stop(paste("Not implemented for models fitted by method=", sQuote(fitmethod))) logi <- (fitmethod == "logi") refitargs <- list() fitz <- NULL ## fitted to same quadscheme using same edge correction? if(length(objex) > 1) { ## same data? datas <- lapply(objex, data.ppm) samedata <- all(sapply(datas[-1L], identical, y=datas[[1L]])) if(!samedata) stop("Models were fitted to different datasets") ## same dummy points? quads <- lapply(objex, quad.ppm) samequad <- all(sapply(quads[-1L], identical, y=quads[[1L]])) if(!samequad) { gripe("Models were re-fitted using a common quadrature scheme") sizes <- sapply(quads, nquad) imax <- which.max(sizes) bigQ <- quads[[imax]] refitargs$Q <- bigQ } ## same edge correction? corrxn <- unique(sapply(objex, getElement, name="correction")) if(length(corrxn) > 1) stop(paste("Models were fitting using different edge corrections", commasep(sQuote(corrxn)))) if(corrxn == "border") { rbord <- unique(sapply(objex, getElement, name="rbord")) if(length(rbord) > 1) { gripe("Models were re-fitted using a common value of 'rbord'") refitargs$rbord <- max(rbord) } } ## Extract glmfit objects fitz <- lapply(objex, getglmfit) ## Any trivial models? (uniform Poisson) trivial <- sapply(fitz, is.null) if(any(trivial)) refitargs$forcefit <- TRUE ## force all non-trivial models to be fitted using same method ## (all using GLM or all using GAM) isgam <- sapply(fitz, inherits, what="gam") isglm <- sapply(fitz, inherits, what="glm") usegam <- any(isgam) if(usegam && any(isglm)) { gripe("Models were re-fitted with use.gam=TRUE") refitargs$use.gam <- TRUE refitargs$forcefit <- TRUE } ## finally refit models if(length(refitargs) > 0) { objex <- do.call(lapply, append(list(X=objex, FUN=update), refitargs)) fitz <- lapply(objex, getglmfit) } } ## Ensure GLM/GAM objects all use the same 'subset' subz <- lapply(objex, getglmsubset) if(length(unique(subz)) > 1) { subsub <- Reduce("&", subz) fitz <- lapply(fitz, refittosubset, sub=subsub) gripe("Models were re-fitted after discarding quadrature points", "that were illegal under some of the models") } ## If any models were fitted by ippm we need to correct the df if(newton) { nfree <- sapply(lapply(objex, logLik), attr, which="df") ncanonical <- lengths(lapply(objex, coef)) nextra <- nfree - ncanonical if(is.null(fitz)) fitz <- lapply(objex, getglmfit) for(i in seq_along(fitz)) if(nextra[i] != 0) fitz[[i]]$df.residual <- fitz[[i]]$df.residual - nextra[i] } ## Finally do the appropriate ANOVA if(is.null(fitz)) fitz <- lapply(objex, getglmfit) result <- do.call(anova, append(fitz, list(test=test, dispersion=1))) ## Remove approximation-dependent columns if present result[, "Resid. Dev"] <- NULL ## replace 'residual df' by number of parameters in model if("Resid. Df" %in% names(result)) { ## count number of quadrature points used in each model obj1 <- objex[[1L]] ss <- getglmsubset(obj1) nq <- if(!is.null(ss)) sum(ss) else n.quad(quad.ppm(obj1)) result[, "Resid. Df"] <- nq - result[, "Resid. Df"] names(result)[match("Resid. Df", names(result))] <- "Npar" } ## edit header if(!is.null(h <- attr(result, "heading"))) { ## remove .mpl.Y and .logi.Y from formulae if present h <- gsub(".mpl.Y", "", h) h <- gsub(".logi.Y", "", h) ## delete GLM information if present h <- gsub("Model: quasi, link: log", "", h) h <- gsub("Model: binomial, link: logit", "", h) h <- gsub("Response: ", "", h) ## remove blank lines (up to 4 consecutive blanks can occur) for(i in 1L:5L) h <- gsub("\n\n", "\n", h) if(length(objex) > 1 && length(h) > 1) { ## anova(mod1, mod2, ...) ## change names of models fmlae <- sapply(objex, fmlaString) intrx <- sapply(objex, interString) h[2L] <- paste("Model", paste0(1L:length(objex), ":"), fmlae, "\t", intrx, collapse="\n") } ## Add explanation if we did the stepwise thing ourselves if(expandedfrom1) h <- c(h[1L], "Terms added sequentially (first to last)\n", h[-1L]) ## Contract spaces in output if spatstat.options('terse') >= 2 if(!waxlyrical('space')) h <- gsub("\n$", "", h) ## Put back attr(result, "heading") <- h } if(adjust && gibbs) { ## issue warning, if not already given if(warn) warn.once("anovaAdjust", "anova.ppm now computes the *adjusted* deviances", "when the models are not Poisson processes.") ## Corrected pseudolikelihood ratio nmodels <- length(objex) if(nmodels > 1) { cfac <- rep(1, nmodels) for(i in 2:nmodels) { a <- objex[[i-1]] b <- objex[[i]] df <- length(coef(a)) - length(coef(b)) if(df > 0) { ibig <- i-1 ismal <- i } else { ibig <- i ismal <- i-1 df <- -df } bigger <- objex[[ibig]] smaller <- objex[[ismal]] if(df == 0) { gripe("Models", i-1, "and", i, "have the same dimension") } else { bignames <- names(coef(bigger)) smallnames <- names(coef(smaller)) injection <- match(smallnames, bignames) if(any(uhoh <- is.na(injection))) { gripe("Unable to match", ngettext(sum(uhoh), "coefficient", "coefficients"), commasep(sQuote(smallnames[uhoh])), "of model", ismal, "to coefficients in model", ibig) } else { thetaDot <- 0 * coef(bigger) thetaDot[injection] <- coef(smaller) JH <- vcov(bigger, what="internals", new.coef=thetaDot, fine=fine) J <- if(!logi) JH$Sigma else (JH$Sigma1log+JH$Sigma2log) H <- if(!logi) JH$A1 else JH$Slog G <- H%*%solve(J)%*%H if(df == 1) { cfac[i] <- H[-injection,-injection]/G[-injection,-injection] } else { Res <- residuals(bigger, type="score", new.coef=thetaDot, drop=TRUE) U <- integral.msr(Res) Uo <- U[-injection] Uo <- matrix(Uo, ncol=1) Hinv <- solve(H) Ginv <- solve(G) Hoo <- Hinv[-injection,-injection, drop=FALSE] Goo <- Ginv[-injection,-injection, drop=FALSE] ScoreStat <- t(Uo) %*% Hoo %*% solve(Goo) %*% Hoo %*% Uo cfac[i] <- ScoreStat/(t(Uo) %*% Hoo %*% Uo) } } } } ## apply Pace et al (2011) adjustment to pseudo-deviances ## (save attributes of 'result' for later reinstatement) oldresult <- result result$Deviance <- AdjDev <- result$Deviance * cfac cn <- colnames(result) colnames(result)[cn == "Deviance"] <- "AdjDeviance" if("Pr(>Chi)" %in% colnames(result)) result[["Pr(>Chi)"]] <- c(NA, pchisq(abs(AdjDev[-1L]), df=abs(result$Df[-1L]), lower.tail=FALSE)) class(result) <- class(oldresult) attr(result, "heading") <- attr(oldresult, "heading") } } if(newton) { ## calculation does not include 'covfunargs' cfa <- lapply(lapply(objex, getElement, name="covfunargs"), names) cfa <- unique(unlist(cfa)) action <- if(adjust && gibbs) "Adjustment to composite likelihood" else if(test == "Rao") "Score test calculation" else NULL if(!is.null(action)) gripe(action, "does not account for", "irregular trend parameters (covfunargs)", commasep(sQuote(cfa))) } return(result) } refittosubset <- function(fut, sub) { etf <- environment(terms(fut)) gd <- get("glmdata", envir=etf) gd$.mpl.SUBSET <- sub assign("glmdata", gd, envir=etf) up <- update(fut, evaluate=FALSE) eval(up, envir=etf) } anova.ppm }) spatstat/R/qqplotppm.R0000755000176200001440000002656413115271120014507 0ustar liggesusers# # QQ plot of smoothed residual field against model # # qqplot.ppm() QQ plot (including simulation) # # $Revision: 1.30 $ $Date: 2016/04/25 02:34:40 $ # qqplot.ppm <- local({ ## How to refit the model refit <- function(fit, pattern) { update.ppm(fit, Q=pattern, use.internal=(fit$method != "mppm")) } ## how to compute the residual field residualfield <- function(fit, ...) { d <- diagnose.ppm(fit, which="smooth", plot.it=FALSE, compute.cts=FALSE, compute.sd=FALSE, check=FALSE, ...) return(d$smooth$Z$v) } qqplot.ppm <- function(fit, nsim=100, expr=NULL, ..., type="raw", style="mean", fast=TRUE, verbose=TRUE, plot.it=TRUE, dimyx=NULL, nrep=if(fast) 5e4 else 1e5, control=update(default.rmhcontrol(fit), nrep=nrep), saveall=FALSE, monochrome=FALSE, limcol=if(monochrome) "black" else "red", maxerr=max(100, ceiling(nsim/10)), check=TRUE, repair=TRUE, envir.expr) { verifyclass(fit, "ppm") if(check && damaged.ppm(fit)) { if(!repair) stop("object format corrupted; try update(fit, use.internal=TRUE)") message("object format corrupted; repairing it.") fit <- update(fit, use.internal=TRUE) } if(fast) { oldnpixel <- spatstat.options("npixel") if(is.null(dimyx)) dimyx <- pmin(40, rev(oldnpixel)) spatstat.options(npixel=rev(dimyx)) } ################ How to evaluate residuals ########################## ## Quantiles of the residual field will be computed. ## Data values dat <- residualfield(fit, type=type, ..., dimyx=dimyx) ################## How to perform simulations? ####################### ## envir.call <- sys.parent() envir.here <- sys.frame(sys.nframe()) ## extract.from.list <- FALSE inext <- 0 # to placate package checker dont.complain.about(inext) if(is.null(expr)) { ## We will simulate from the fitted model 'nsim' times ## and refit the model to these simulations simsource <- "fit" how.simulating <- "simulating from fitted model" ## prepare rmh arguments rcontrol <- rmhcontrol(control) rmodel <- rmhmodel(fit, control=rcontrol, project=FALSE, verbose=verbose) rstart <- rmhstart(n.start=data.ppm(fit)$n) ## pre-digest arguments rmhinfolist <- rmh(rmodel, rstart, rcontrol, preponly=TRUE, verbose=FALSE) ## expression to be evaluated each time expr <- expression( refit(fit, rmhEngine(rmhinfolist, verbose=FALSE))) envir.expr <- envir.here ## pacify code checkers dont.complain.about(rmhinfolist) } else if(is.expression(expr)) { simsource <- "expr" how.simulating <- paste("evaluating", sQuote("expr")) if(missing(envir.expr) || is.null(envir.expr)) envir.expr <- parent.frame() } else if(inherits(expr, "envelope")) { simpat <- attr(expr, "simpatterns") if(!is.null(simpat) && all(sapply(simpat, is.ppp))) { expr <- expression(simpat[[inext]]) envir.expr <- envir.here dont.complain.about(simpat) simsource <- "list" how.simulating <- "extracting point pattern from list" } else stop(paste("Argument", sQuote("expr"), "is an envelope object,", "but does not contain point patterns"), call.=FALSE) } else if(is.list(expr) && all(sapply(expr, is.ppp))) { simpat <- expr expr <- expression(simpat[[inext]]) envir.expr <- envir.here dont.complain.about(simpat) simsource <- "list" how.simulating <- "extracting point pattern from list" } else stop(paste(sQuote("expr"), "should be an expression, or an envelope object,", "or a list of point patterns"), call.=FALSE) exprstring <- if(simsource == "expr") deparse(expr) else NULL ###### Perform simulations if(verbose) { cat(paste("Simulating", nsim, "realisations... ")) pstate <- list() } simul.sizes <- numeric(nsim) isim <- 0 ierr <- 0 repeat { inext <- isim + 1 ## protect from randomly-generated crashes in gam ei <- try(eval(expr, envir=envir.expr), silent=!verbose) if(inherits(ei, "try-error")) { ## error encountered in evaluating 'expr' ierr <- ierr + 1 if(ierr > maxerr) stop(paste("Exceeded maximum of", maxerr, "failures in", how.simulating, "after generating only", isim, "realisations")) else break } else { ## simulation successful isim <- isim + 1 fiti <- if(simsource == "fit") ei else if(is.ppm(ei)) ei else if(is.ppp(ei)) refit(fit, ei) else stop("result of eval(expr) is not a ppm or ppp object") ## diagnostic info simul.sizes[isim] <- data.ppm(fiti)$n ## compute residual field resi <- residualfield(fiti, type=type, ..., dimyx=dimyx) if(isim == 1) sim <- array(, dim=c(dim(resi), nsim)) sim[,,isim] <- resi if(verbose) pstate <- progressreport(isim, nsim, state=pstate) if(isim >= nsim) break } } ###### Report diagnostics if(ierr > 0) cat(paste("\n\n**Alert:", ierr, "failures occurred in", how.simulating, "\n\n")) nempty <- sum(simul.sizes == 0) if(nempty > 0) cat(paste("\n\n**Alert:", nempty, "out of", nsim, "simulated patterns were empty.\n\n")) else cat(paste("\nDiagnostic info:\n", "simulated patterns contained an average of", mean(simul.sizes), "points.\n")) if(nempty == nsim) warning("All simulated patterns were empty") ############ Plot them switch(style, classical = { rr <- range(c(dat,sim)) result <- qqplot(sim, dat, xlim=rr, ylim=rr, asp=1.0, xlab="Quantiles of simulation", ylab="Quantiles of data",plot.it=plot.it) title(sub=paste("Residuals:", type)) abline(0,1, lty=2) result <- append(result, list(data=dat, sim=sim, xlim=rr, ylim=rr, xlab="Quantiles of simulation", ylab="Quantiles of data", rtype=type, nsim=nsim, fit=fit, expr=exprstring, simsource = simsource ) ) }, mean = { ## compute quantiles corresponding to probabilities p[i] ## separately in each realisation. if(verbose) cat("Calculating quantiles...") if(fast) { p <- ppoints(min(100,length(dat)), 3/8) qsim <- apply(sim, 3, quantile, probs=p, na.rm=TRUE) } else { qsim <- apply(sim, 3, sort, na.last=TRUE) } if(verbose) cat("averaging...") ## sample mean of each quantile meanq <- apply(qsim, 1, mean, na.rm=TRUE) ## et cetera varq <- apply(qsim, 1, var, na.rm=TRUE) sdq <- sqrt(varq) q.025 <- apply(qsim, 1, quantile, probs=0.025, na.rm=TRUE) q.975 <- apply(qsim, 1, quantile, probs=0.975, na.rm=TRUE) rr <- range(c(meanq,dat), na.rm=TRUE) dats <- if(fast) quantile(dat, probs=p, na.rm=TRUE) else sort(dat, na.last=TRUE) if(verbose) cat("..Done.\n") if(plot.it) { plot(meanq, dats, xlab="Mean quantile of simulations", ylab="data quantile", xlim=rr, ylim=rr, asp=1.0) abline(0,1) lines(meanq, q.025, lty=2, col=limcol) lines(meanq, q.975, lty=2, col=limcol) title(sub=paste("Residuals:", type)) } result <- list(x=meanq, y=dats, sdq=sdq, q.025=q.025, q.975=q.975, data=dat, sim=sim, xlim=rr, ylim=rr, xlab="Mean quantile of simulations", ylab="data quantile", rtype=type, nsim=nsim, fit=fit, expr=exprstring, simsource=simsource) }, stop(paste("Unrecognised option for", sQuote("style"))) ) ## Throw out baggage if not wanted if(!saveall) { result$fit <- summary(fit, quick=TRUE) result$sim <- NULL } ## reset npixel if(fast) spatstat.options(npixel=oldnpixel) ## class(result) <- c("qqppm", class(result)) return(invisible(result)) } qqplot.ppm }) plot.qqppm <- local({ plot.qqppm <- function(x, ..., limits=TRUE, monochrome=spatstat.options('monochrome'), limcol=if(monochrome) "black" else "red") { stopifnot(inherits(x, "qqppm")) default.type <- if(length(x$x) > 150) "l" else "p" do.call(myplot, resolve.defaults(list(x, ..., type=default.type, limits=limits, limcol=limcol))) return(invisible(x)) } myplot <- function(object, xlab = object$xlab, ylab = object$ylab, xlim = object$xlim, ylim = object$ylim, asp = 1, type = default.type, ..., limits=TRUE, limcol="red") { plot(object$x, object$y, xlab = xlab, ylab = ylab, xlim = xlim, ylim = ylim, asp = asp, type = type, ...) abline(0, 1) if(limits) { if(!is.null(object$q.025)) lines(object$x, object$q.025, lty = 2, col=limcol) if(!is.null(object$q.975)) lines(object$x, object$q.975, lty = 2, col=limcol) } title(sub=paste("Residuals:", object$rtype)) } plot.qqppm }) print.qqppm <- function(x, ...) { stopifnot(inherits(x, "qqppm")) splat("Q-Q plot of point process residuals", "of type", sQuote(x$rtype), "\n", "based on", x$nsim, "simulations") simsource <- x$simsource if(is.null(simsource)) # old version simsource <- if(x$simulate.from.fit) "fit" else "expr" switch(simsource, fit = { fit <- x$fit sumfit <- if(is.ppm(fit)) summary(fit, quick=TRUE) else if(inherits(fit, "summary.ppm")) fit else list(name="(unrecognised format)") splat("\nSimulations from fitted model:", sumfit$name) }, expr = { splat("Simulations obtained by evaluating the following expression:") print(x$expr) }, list = { splat("Simulated point patterns were provided in a list") }) invisible(NULL) } spatstat/R/eval.im.R0000755000176200001440000001755713115271075014020 0ustar liggesusers# # eval.im.R # # eval.im() Evaluate expressions involving images # # compatible.im() Check whether two images are compatible # # harmonise.im() Harmonise images # commonGrid() # # $Revision: 1.41 $ $Date: 2016/11/18 08:40:40 $ # eval.im <- local({ eval.im <- function(expr, envir, harmonize=TRUE) { e <- as.expression(substitute(expr)) ## get names of all variables in the expression varnames <- all.vars(e) allnames <- all.names(e, unique=TRUE) funnames <- allnames[!(allnames %in% varnames)] if(length(varnames) == 0) stop("No variables in this expression") ## get the values of the variables if(missing(envir)) { envir <- parent.frame() # WAS: sys.parent() } else if(is.list(envir)) { envir <- list2env(envir, parent=parent.frame()) } vars <- mget(varnames, envir=envir, inherits=TRUE, ifnotfound=list(NULL)) funs <- mget(funnames, envir=envir, inherits=TRUE, ifnotfound=list(NULL)) ## WAS: vars <- lapply(as.list(varnames), get, envir=envir) ## WAS: funs <- lapply(as.list(funnames), get, envir=envir) ## ## find out which variables are images ims <- unlist(lapply(vars, is.im)) if(!any(ims)) stop("No images in this expression") images <- vars[ims] nimages <- length(images) ## test that the images are compatible if(!(do.call(compatible, unname(images)))) { whinge <- paste(if(nimages > 2) "some of" else NULL, "the images", commasep(sQuote(names(images))), if(!harmonize) "are" else "were", "not compatible") if(!harmonize) { stop(whinge, call.=FALSE) } else { warning(whinge, call.=FALSE) images <- do.call(harmonise.im, images) } } ## trap a common error: using fv object as variable isfun <- unlist(lapply(vars, is.fv)) if(any(isfun)) stop("Cannot use objects of class fv as variables in eval.im") ## replace each image by its matrix of pixel values, and evaluate imagevalues <- lapply(images, getImValues) template <- images[[1L]] ## This bit has been repaired: vars[ims] <- imagevalues v <- eval(e, append(vars, funs)) ## ## reshape, etc result <- im(v, xcol=template$xcol, yrow=template$yrow, xrange=template$xrange, yrange=template$yrange, unitname=unitname(template)) return(result) } ## extract pixel values without destroying type information getImValues <- function(x) { v <- as.matrix(x) dim(v) <- NULL return(v) } eval.im }) compatible.im <- function(A, B, ..., tol=1e-6) { verifyclass(A, "im") if(missing(B)) return(TRUE) verifyclass(B, "im") if(!all(A$dim == B$dim)) return(FALSE) xdiscrep <- max(abs(A$xrange - B$xrange), abs(A$xstep - B$xstep), abs(A$xcol - B$xcol)) ydiscrep <- max(abs(A$yrange - B$yrange), abs(A$ystep - B$ystep), abs(A$yrow - B$yrow)) xok <- (xdiscrep < tol * min(A$xstep, B$xstep)) yok <- (ydiscrep < tol * min(A$ystep, B$ystep)) uok <- compatible.units(unitname(A), unitname(B)) if(!(xok && yok && uok)) return(FALSE) ## A and B are compatible if(length(list(...)) == 0) return(TRUE) ## recursion return(compatible.im(B, ..., tol=tol)) } ## force a list of images to be compatible harmonize.im <- harmonise.im <- function(...) { argz <- list(...) n <- length(argz) if(n < 2) return(argz) result <- vector(mode="list", length=n) isim <- unlist(lapply(argz, is.im)) if(!any(isim)) stop("No images supplied") imgs <- argz[isim] ## if any windows are present, extract bounding box iswin <- unlist(lapply(argz, is.owin)) bb0 <- if(!any(iswin)) NULL else do.call(boundingbox, unname(argz[iswin])) if(length(imgs) == 1L && is.null(bb0)) { ## only one 'true' image: use it as template. result[isim] <- imgs Wtemplate <- imgs[[1L]] } else { ## test for compatible units un <- lapply(imgs, unitname) uok <- unlist(lapply(un, compatible.units, y=un[[1L]])) if(!all(uok)) stop("Images have incompatible units of length") ## find the image with the highest resolution xsteps <- unlist(lapply(imgs, getElement, name="xstep")) which.finest <- which.min(xsteps) finest <- imgs[[which.finest]] ## get the bounding box bb <- do.call(boundingbox, lapply(unname(imgs), as.rectangle)) if(!is.null(bb0)) bb <- boundingbox(bb, bb0) ## determine new raster coordinates xcol <- prolongseq(finest$xcol, bb$xrange) yrow <- prolongseq(finest$yrow, bb$yrange) xy <- list(x=xcol, y=yrow) ## resample all images on new raster newimgs <- lapply(imgs, as.im, xy=xy) result[isim] <- newimgs Wtemplate <- newimgs[[which.finest]] } ## convert other data to images if(any(notim <- !isim)) result[notim] <- lapply(argz[notim], as.im, W=as.mask(Wtemplate)) names(result) <- names(argz) return(result) } ## Return just the corresponding template window commonGrid <- local({ ## auxiliary function gettype <- function(x) { if(is.im(x) || is.mask(x)) "raster" else if(is.owin(x) || is.ppp(x) || is.psp(x)) "spatial" else "none" } commonGrid <- function(...) { argz <- list(...) type <- unlist(lapply(argz, gettype)) israster <- (type == "raster") haswin <- (type != "none") if(any(israster)) { ## Get raster data rasterlist <- argz[israster] } else { ## No existing raster data - apply default resolution if(!any(haswin)) stop("No spatial data supplied") wins <- lapply(argz[haswin], as.owin) rasterlist <- lapply(wins, as.mask) } ## Find raster object with finest resolution if(length(rasterlist) == 1L) { ## only one raster object finest <- rasterlist[[1L]] } else { ## test for compatible units un <- lapply(rasterlist, unitname) uok <- unlist(lapply(un, compatible.units, y=un[[1L]])) if(!all(uok)) stop("Objects have incompatible units of length") ## find the image/mask with the highest resolution xsteps <- unlist(lapply(rasterlist, getElement, name="xstep")) which.finest <- which.min(xsteps) finest <- rasterlist[[which.finest]] } ## determine the bounding box bb <- do.call(boundingbox, lapply(unname(argz[haswin]), as.rectangle)) ## determine new raster coordinates xcol <- prolongseq(finest$xcol, bb$xrange) yrow <- prolongseq(finest$yrow, bb$yrange) xy <- list(x=xcol, y=yrow) ## generate template Wtemplate <- as.mask(bb, xy=xy) return(Wtemplate) } commonGrid }) im.apply <- local({ im.apply <- function(X, FUN, ...) { stopifnot(is.list(X)) if(!all(sapply(X, is.im))) stop("All elements of X must be pixel images") fun <- if(is.character(FUN)) get(FUN) else if(is.function(FUN)) FUN else stop("Unrecognised format for FUN") ## ensure images are compatible X <- do.call(harmonise.im, X) template <- X[[1L]] ## extract numerical values and convert to matrix with one column per image vlist <- lapply(X, flatten) vals <- matrix(unlist(vlist), ncol=length(X)) colnames(vals) <- names(X) ok <- complete.cases(vals) if(!any(ok)) { ## empty result return(as.im(NA, W=template)) } ## apply function resultok <- apply(vals[ok,, drop=FALSE], 1L, fun, ...) if(length(resultok) != sum(ok)) stop("FUN should yield one value per pixel") ## pack up, with attention to type of data d <- dim(template) resultmat <- matrix(resultok[1L], d[1L], d[2L]) resultmat[ok] <- resultok resultmat[!ok] <- NA result <- as.im(resultmat, W=X[[1L]]) if(is.factor(resultok)) levels(result) <- levels(resultok) return(result) } flatten <- function(z) { as.vector(as.matrix(z)) } im.apply }) spatstat/R/suffstat.R0000755000176200001440000000633113115271120014277 0ustar liggesusers# # suffstat.R # # calculate sufficient statistic # # $Revision: 1.17 $ $Date: 2013/04/25 06:37:43 $ # # suffstat <- function(model, X=data.ppm(model)) { cl <- sys.call() callstring <- short.deparse(cl) verifyclass(model, "ppm") if(!missing(X)) verifyclass(X, "ppp") else X <- NULL inter <- model$interaction func <- if(is.null(inter) || is.poisson(inter)) suffstat.poisson else if(!is.null(ssinter <- inter$suffstat)) ssinter else if(!is.null(ssfamily <- inter$family$suffstat)) ssfamily else suffstat.generic return(func(model, X, callstring)) } suffstat.generic <- function(model, X=NULL, callstring="suffstat.generic") { # This should work for an arbitrary ppm # since it uses the fundamental relation between # conditional intensity and likelihood. # But it is computationally intensive. verifyclass(model, "ppm") coefnames <- names(coef(model)) if(is.null(X)) { X <- data.ppm(model) modelX <- model } else { verifyclass(X, "ppp") # refit the model to determine which points are used in pseudolikelihood modelX <- update(model, X, method="mpl") } # find data points which do not contribute to pseudolikelihood mplsubset <- getglmdata(modelX)$.mpl.SUBSET mpldata <- is.data(quad.ppm(modelX)) contribute <- mplsubset[mpldata] if(!any(contribute)) # result is zero vector return(0 * coef(model)) # Add points one-by-one # If there are points which don't contribute, condition on them use <- which(contribute) dontuse <- which(!contribute) for(i in seq_along(use)) { prior <- if(i == 1) c() else use[1:(i-1)] prior <- c(dontuse, prior) Xprior <- X[prior] Xcurrent <- X[use[i]] mom <- partialModelMatrix(Xprior, Xcurrent, model, "suffstat") lastrow <- length(prior) + 1 momrow <- mom[lastrow, ] if(i == 1) result <- momrow else result <- momrow + result } names(result) <- coefnames attr(result, "mplsubset") <- NULL return(result) } killinteraction <- function(model) { verifyclass(model, "ppm") ispoisson <- summary(model, quick=TRUE)$poisson if(ispoisson) return(model) # surgery required newmodel <- model newmodel$interaction <- NULL if(!is.null(Vnames <- model$internal$Vnames)) { matches <- names(model$coef) %in% Vnames newmodel$coef <- model$coef[!matches] newmodel$internal$Vnames <- NULL } # the other 'internal' stuff may still be wrong (or `preserved') return(newmodel) } suffstat.poisson <- function(model, X, callstring="suffstat.poisson") { verifyclass(model, "ppm") if(is.null(X)) X <- data.ppm(model) else verifyclass(X, "ppp") if(!is.poisson(model)) stop("Model is not a Poisson process") Empty <- X[numeric(0)] mom <- partialModelMatrix(X, Empty, model, "suffstat") nmom <- ncol(mom) ncoef <- length(coef(model)) if(nmom != ncoef) stop("Internal error: number of columns of model matrix does not match number of coefficients in fitted model") if(nmom > 1 && any(colnames(mom) != names(coef(model)))) warning("Internal error: mismatch between column names of model matrix and names of coefficient vector in fitted model") o1sum <- apply(mom, 2, sum) return(o1sum) } spatstat/R/clickjoin.R0000755000176200001440000000147213115271075014417 0ustar liggesusers# # clickjoin.R # # interactive addition/deletion of segments between vertices # clickjoin <- function(X, ..., add=TRUE, m=NULL, join=TRUE) { verifyclass(X, "ppp") if(!(is.logical(join) && length(join) == 1)) stop("join should be a single logical value") plot(X, add=add, pch=16) if(is.null(m)) { m <- matrix(FALSE, npoints(X), npoints(X)) } else { stopifnot(is.matrix(m) && is.logical(m)) stopifnot(all(dim(m) == npoints(X))) from <- as.vector(row(m)[m]) to <- as.vector(col(m)[m]) with(X, segments(x[from], y[from], x[to], y[to])) } while(TRUE) { twoid <- identify(X, plot=FALSE, n=2) n <- length(twoid) if(n == 0) break if(n == 2) { m[twoid[1L],twoid[2L]] <- m[twoid[2L],twoid[1L]] <- join lines(X$x[twoid], X$y[twoid], ...) } } return(m) } spatstat/R/minkowski.R0000644000176200001440000000476013115271120014454 0ustar liggesusers#' #' minkowski.R #' #' Minkowski Sum and related operations #' #' $Revision: 1.7 $ $Date: 2017/06/05 10:31:58 $ "%(+)%" <- MinkowskiSum <- local({ MinkowskiSum <- function(A, B) { if(is.ppp(A)) return(UnionOfShifts(B, A)) if(is.ppp(B)) return(UnionOfShifts(A, B)) ## extract lists of simply-connected polygons AA <- simplepolygons(A) BB <- simplepolygons(B) ## determine common resolution for polyclip operations eps <- mean(c(sidelengths(Frame(A)), sidelengths(Frame(B))))/2^30 ## compute Minkowski sums of pieces pieces <- NULL for(b in BB) pieces <- append(pieces, lapply(AA, MinkSumConnected, b=b, eps=eps)) ## form union in one step, to avoid artefacts result <- union.owin(solapply(pieces, poly2owin)) return(result) } poly2owin <- function(z) owin(poly=z, check=FALSE) MinkSumConnected <- function(a, b, eps) { ## a and b are list(x,y) simply-connected polygons out <- polyclip::polyminkowski(a, b, x0=0, y0=0, eps=eps) if(length(out) == 1) return(out) ispos <- (sapply(out, Area.xypolygon) >= 0) if(sum(ispos) > 1) { stop("Internal error: result of sumconnected is not simply connected", call.=FALSE) } return(out[ispos]) } simplepolygons <- function(A) { if(is.psp(A)) return(psp2poly(A)) ## convert to owin, then polygonal A <- as.polygonal(A) ## separate into simply-connected pieces AA <- break.holes(A)$bdry return(AA) } ## handle segment patterns as well psp2poly <- function(X) apply(as.matrix(X$ends), 1, seg2poly) seg2poly <- function(z) with(as.list(z), list(x=c(x0, x1, x0), y=c(y0,y1,y0))) ## UnionOfShifts <- function(X, V) { #' compute the union or superposition of copies of X by vectors in V v <- as.matrix(coords(V)) n <- nrow(v) Y <- vector(mode="list", length=n) for(i in seq_len(n)) Y[[i]] <- shift(X, v[i,]) Y <- as.solist(Y) if(is.owin(X)) { Z <- union.owin(Y) } else { #' X is a pattern of objects in a window W <- MinkowskiSum(Window(X), Window(V)) Z <- superimpose(Y, W=W) } return(Z) } MinkowskiSum }) dilationAny <- function(A, B) { MinkowskiSum(A, reflect(B)) } "%(-)%" <- erosionAny <- function(A, B) { D <- Frame(A) Dplus <- grow.rectangle(D, 0.1 * shortside(D)) Ac <- complement.owin(A, Dplus) AcB <- MinkowskiSum(Ac, reflect(B)) if(is.subset.owin(D, AcB)) return(emptywindow(D)) C <- complement.owin(AcB[Dplus], Dplus)[D] return(C) } spatstat/R/ppqq.R0000644000176200001440000000737213115225157013435 0ustar liggesusers## ## ppqq.R ## ## P-P and Q-Q versions of fv objects ## PPversion <- local({ PPversion <- function(f, theo="theo", columns=".") { if(!any(colnames(f) == theo)) stop(paste(sQuote(theo), "is not the name of a column of f")) ## set up inverse theoretical function f_0: 'theo' |-> 'r' xname <- fvnames(f, ".x") df <- as.data.frame(f) theo.table <- df[,theo] x.table <- df[,xname] invfun <- approxfun(x=theo.table, y=x.table, rule=1) ## evaluate f_0^{-1}(theo) for evenly-spaced grid of 'theo' values ra <- range(theo.table) theo.seq <- seq(from=ra[1], to=ra[2], length.out=nrow(df)) x.vals <- invfun(theo.seq) ## convert f to a function and evaluate at these 'r' values ynames <- setdiff(fvnames(f, columns), theo) ff <- as.function(f, value=ynames) y.vals <- lapply(ynames, evalselected, x=x.vals, f=ff) ## build data frame all.vals <- append(list(theo=theo.seq), y.vals) names(all.vals) <- c(theo, ynames) DF <- as.data.frame(all.vals) ## set up fv object atr <- attributes(f) cnames <- colnames(f) i.theo <- match(theo, cnames) i.yval <- match(ynames, cnames) ii <- c(i.theo, i.yval) old.best <- fvnames(f, ".y") best <- if(old.best %in% ynames) old.best else ynames[length(ynames)] result <- fv(DF, argu = theo, ylab = atr$ylab, valu = best, fmla = . ~ .x, alim = ra, labl = atr$labl[ii], desc = atr$desc[ii], unitname = NULL, fname = atr$fname, yexp = atr$yexp) fvnames(result, ".") <- c(ynames, theo) return(result) } evalselected <- function(what, f, x){ f(x, what=what) } PPversion }) QQversion <- function(f, theo="theo", columns=".") { if(!any(colnames(f) == theo)) stop(paste(sQuote(theo), "is not the name of a column of f")) ## extract relevant columns of data xname <- fvnames(f, ".x") ynames <- fvnames(f, columns) df <- as.data.frame(f) theo.table <- df[,theo] x.table <- df[,xname] y.table <- df[,ynames, drop=FALSE] ## set up inverse theoretical function f_0: 'theo' |-> 'r' invfun <- approxfun(x=theo.table, y=x.table, rule=1) ## apply f_0^{-1} to tabulated function values z.table <- as.data.frame(lapply(y.table, invfun)) ## build data frame DF <- cbind(df[,xname,drop=FALSE], z.table) ## set up fv object atr <- attributes(f) cnames <- colnames(f) i.x <- match(xname, cnames) i.y <- match(ynames, cnames) ii <- c(i.x, i.y) old.best <- fvnames(f, ".y") best <- if(old.best %in% ynames) old.best else ynames[length(ynames)] if(versionstring.spatstat() < package_version("1.38-2")) { fvl <- fvlabels(f, expand=TRUE) theo.string <- fvl[colnames(f) == theo] } else { theo.string <- fvlabels(f, expand=TRUE)[[theo]] } ## remove '(r)' from outer function theo.string <- sub(paren(xname), "", theo.string, fixed=TRUE) theo.expr <- parse(text=theo.string) theo.lang <- theo.expr[[1]] ylab <- substitute({{THEO}^{-1}}(FUN), list(FUN=atr$ylab, THEO=theo.lang)) yexp <- substitute({{THEO}^{-1}}(FUN), list(FUN=atr$yexp, THEO=theo.lang)) oldlabl <- atr$labl labl.iy <- sprintf("{{%s}^{-1}}(%s)", theo.string, oldlabl[i.y]) labl.ii <- c(oldlabl[i.x], labl.iy) result <- fv(DF, argu = atr$argu, ylab = ylab, valu = best, fmla = . ~ .x, alim = atr$alim, labl = labl.ii, desc = atr$desc[ii], unitname = NULL, fname = atr$fname, yexp = yexp) fvnames(result, ".") <- ynames unitname(result) <- unitname(f) return(result) } spatstat/R/dgs.R0000755000176200001440000000707613115271075013235 0ustar liggesusers# # # dgs.R # # $Revision: 1.9 $ $Date: 2017/06/05 10:31:58 $ # # Diggle-Gates-Stibbard process # # # ------------------------------------------------------------------- # DiggleGatesStibbard <- local({ # .......... auxiliary functions ................ dgsTerms <- function(X, Y, idX, idY, rho) { stopifnot(is.numeric(rho)) # sort in increasing order of x coordinate oX <- fave.order(X$x) oY <- fave.order(Y$x) Xsort <- X[oX] Ysort <- Y[oY] idXsort <- idX[oX] idYsort <- idY[oY] nX <- npoints(X) nY <- npoints(Y) # call C routine out <- .C("Ediggatsti", nnsource = as.integer(nX), xsource = as.double(Xsort$x), ysource = as.double(Xsort$y), idsource = as.integer(idXsort), nntarget = as.integer(nY), xtarget = as.double(Ysort$x), ytarget = as.double(Ysort$y), idtarget = as.integer(idYsort), rrho = as.double(rho), values = as.double(double(nX)), PACKAGE = "spatstat") answer <- integer(nX) answer[oX] <- out$values return(answer) } # ...... template object ...................... BlankDGS <- list( name = "Diggle-Gates-Stibbard process", creator = "DiggleGatesStibbard", family = "pairwise.family", # evaluated later pot = function(d, par) { rho <- par$rho v <- log(sin((pi/2) * d/rho)^2) v[ d > par$rho ] <- 0 attr(v, "IsOffset") <- TRUE v }, par = list(rho = NULL), # to be filled in later parnames = "interaction range", init = function(self) { rho <- self$par$rho if(!is.numeric(rho) || length(rho) != 1L || rho <= 0) stop("interaction range rho must be a positive number") }, update = NULL, # default OK print = NULL, # default OK interpret = function(coeffs, self) { return(NULL) }, valid = function(coeffs, self) { return(TRUE) }, project = function(coeffs, self) { return(NULL) }, irange = function(self, coeffs=NA, epsilon=0, ...) { rho <- self$par$rho return(rho) }, version=NULL, # evaluated later # fast evaluation is available for the border correction only can.do.fast=function(X,correction,par) { return(all(correction %in% c("border", "none"))) }, fasteval=function(X,U,EqualPairs,pairpot,potpars,correction, ...) { # fast evaluator for DiggleGatesStibbard interaction if(!all(correction %in% c("border", "none"))) return(NULL) if(spatstat.options("fasteval") == "test") message("Using fast eval for DiggleGatesStibbard") rho <- potpars$rho idX <- seq_len(npoints(X)) idU <- rep.int(-1L, npoints(U)) idU[EqualPairs[,2L]] <- EqualPairs[,1L] v <- dgsTerms(U, X, idU, idX, rho) v <- matrix(v, ncol=1L) attr(v, "IsOffset") <- TRUE return(v) }, Mayer=function(coeffs, self) { # second Mayer cluster integral rho <- self$par$rho return((pi/2 - 2/pi) * rho^2) } ) class(BlankDGS) <- "interact" DiggleGatesStibbard <- function(rho) { instantiate.interact(BlankDGS, list(rho = rho)) } DiggleGatesStibbard <- intermaker(DiggleGatesStibbard, BlankDGS) DiggleGatesStibbard }) spatstat/R/Gres.R0000755000176200001440000000502513115271075013350 0ustar liggesusers# # Gres.R # # Residual G # # $Revision: 1.3 $ $Date: 2013/04/25 06:37:43 $ # ############################################################################# # Gres <- function(object, ...) { if(!is.fv(object)) { # usual case where 'object' is a ppm, ppp or quad G <- Gcom(object, ...) } else { # case where 'object' is the output of 'Gcom' a <- attr(object, "maker") if(is.null(a) || a != "Gcom") stop("fv object was not created by Gcom") G <- object if(length(list(...)) > 0) warning("Extra arguments ignored") } # initialise fv object df <- data.frame(r=G$r, theo=numeric(length(G$r))) desc <- c("distance argument r", "value 0 corresponding to perfect fit") ans <- fv(df, "r", substitute(bold(R)~hat(G)(r), NULL), "theo", . ~ r, attr(G, "alim"), c("r","bold(R)~%s[theo](r)"), desc, fname="G") # add residual estimates nam <- names(G) if(all(c("border","bcom") %in% nam)) ans <- bind.fv(ans, data.frame(bres=with(G, border-bcom)), "bold(R)~hat(%s)[bord](r)", "border corrected residual of %s", "bres") if(all(c("han","hcom") %in% nam)) ans <- bind.fv(ans, data.frame(hres=with(G, han-hcom)), "bold(R)~hat(%s)[han](r)", "Hanisch corrected residual of %s", "hres") if("hvar" %in% nam) { savedotnames <- fvnames(ans, ".") hsd <- with(G, sqrt(hvar)) ans <- bind.fv(ans, data.frame(hvar=with(G, hvar), hsd = hsd, hi = 2*hsd, lo = -2*hsd), c("bold(C)^2~hat(%s)[han](r)", "sqrt(bold(C)^2~hat(%s)[han](r))", "bold(R)~hat(%s)[Hi](r)", "bold(R)~hat(%s)[Lo](r)"), c("pseudovariance of Hanisch corrected residual %s", "pseudo-SD of Hanisch corrected residual %s", "upper critical band for Hanisch corrected residual %s", "lower critical band for Hanisch corrected residual %s"), "hres") ans <- bind.fv(ans, data.frame(hstdres=with(ans, hres/hsd)), "bold(T)~hat(%s)[han](r)", "standardised Hanisch-corrected residual %s", "hres") fvnames(ans, ".") <- c(savedotnames, c("hi", "lo")) } unitname(ans) <- unitname(G) return(ans) } spatstat/R/kppm.R0000755000176200001440000016301313133604370013417 0ustar liggesusers# # kppm.R # # kluster/kox point process models # # $Revision: 1.134 $ $Date: 2017/07/19 07:18:54 $ # kppm <- function(X, ...) { UseMethod("kppm") } kppm.formula <- function(X, clusters = c("Thomas","MatClust","Cauchy","VarGamma","LGCP"), ..., data=NULL) { ## remember call callstring <- short.deparse(sys.call()) cl <- match.call() ########### INTERPRET FORMULA ############################## if(!inherits(X, "formula")) stop(paste("Argument 'X' should be a formula")) formula <- X if(spatstat.options("expand.polynom")) formula <- expand.polynom(formula) ## check formula has LHS and RHS. Extract them if(length(formula) < 3) stop(paste("Formula must have a left hand side")) Yexpr <- formula[[2L]] trend <- formula[c(1L,3L)] ## FIT ####################################### thecall <- call("kppm", X=Yexpr, trend=trend, data=data, clusters=clusters) ncall <- length(thecall) argh <- list(...) nargh <- length(argh) if(nargh > 0) { thecall[ncall + 1:nargh] <- argh names(thecall)[ncall + 1:nargh] <- names(argh) } # result <- eval(thecall, # envir=if(!is.null(data)) data else parent.frame(), # enclos=if(!is.null(data)) parent.frame() else baseenv()) callenv <- list2env(as.list(data), parent=parent.frame()) result <- eval(thecall, envir=callenv, enclos=baseenv()) result$call <- cl result$callframe <- parent.frame() if(!("callstring" %in% names(list(...)))) result$callstring <- callstring return(result) } kppm.ppp <- kppm.quad <- function(X, trend = ~1, clusters = c("Thomas","MatClust","Cauchy","VarGamma","LGCP"), data=NULL, ..., covariates = data, subset, method = c("mincon", "clik2", "palm"), improve.type = c("none", "clik1", "wclik1", "quasi"), improve.args = list(), weightfun=NULL, control=list(), algorithm="Nelder-Mead", statistic="K", statargs=list(), rmax = NULL, covfunargs=NULL, use.gam=FALSE, nd=NULL, eps=NULL) { cl <- match.call() callstring <- paste(short.deparse(sys.call()), collapse="") Xname <- short.deparse(substitute(X)) clusters <- match.arg(clusters) improve.type <- match.arg(improve.type) method <- match.arg(method) if(method == "mincon") statistic <- pickoption("summary statistic", statistic, c(K="K", g="pcf", pcf="pcf")) ClusterArgs <- list(method = method, improve.type = improve.type, improve.args = improve.args, weightfun=weightfun, control=control, algorithm=algorithm, statistic=statistic, statargs=statargs, rmax = rmax) Xenv <- list2env(as.list(covariates), parent=parent.frame()) X <- eval(substitute(X), envir=Xenv, enclos=baseenv()) isquad <- inherits(X, "quad") if(!is.ppp(X) && !isquad) stop("X should be a point pattern (ppp) or quadrature scheme (quad)") if(is.marked(X)) stop("Sorry, cannot handle marked point patterns") if(!missing(subset)) { W <- eval(subset, covariates, parent.frame()) if(!is.null(W)) { if(is.im(W)) { W <- solutionset(W) } else if(!is.owin(W)) { stop("Argument 'subset' should yield a window or logical image", call.=FALSE) } X <- X[W] } } po <- ppm(Q=X, trend=trend, covariates=covariates, forcefit=TRUE, rename.intercept=FALSE, covfunargs=covfunargs, use.gam=use.gam, nd=nd, eps=eps) XX <- if(isquad) X$data else X # set default weight function if(is.null(weightfun) && method != "mincon") { RmaxW <- (rmax %orifnull% rmax.rule("K", Window(XX), intensity(XX))) / 2 weightfun <- function(d, rr=RmaxW) { as.integer(d <= rr) } formals(weightfun)[[2]] <- RmaxW attr(weightfun, "selfprint") <- paste0("Indicator(distance <= ", RmaxW, ")") } # fit out <- switch(method, mincon = kppmMinCon(X=XX, Xname=Xname, po=po, clusters=clusters, control=control, statistic=statistic, statargs=statargs, rmax=rmax, algorithm=algorithm, ...), clik2 = kppmComLik(X=XX, Xname=Xname, po=po, clusters=clusters, control=control, weightfun=weightfun, rmax=rmax, algorithm=algorithm, ...), palm = kppmPalmLik(X=XX, Xname=Xname, po=po, clusters=clusters, control=control, weightfun=weightfun, rmax=rmax, algorithm=algorithm, ...)) # out <- append(out, list(ClusterArgs=ClusterArgs, call=cl, callframe=parent.frame(), callstring=callstring)) # Detect DPPs DPP <- list(...)$DPP class(out) <- c(ifelse(is.null(DPP), "kppm", "dppm"), class(out)) # Update intensity estimate with improve.kppm if necessary: if(improve.type != "none") out <- do.call(improve.kppm, append(list(object = out, type = improve.type), improve.args)) return(out) } kppmMinCon <- function(X, Xname, po, clusters, control, statistic, statargs, algorithm="Nelder-Mead", DPP=NULL, ...) { # Minimum contrast fit stationary <- is.stationary(po) # compute intensity if(stationary) { lambda <- summary(po)$trend$value } else { # compute intensity at high resolution if available w <- as.owin(po, from="covariates") if(!is.mask(w)) w <- NULL lambda <- predict(po, locations=w) } # Detect DPP model and change clusters and intensity correspondingly if(!is.null(DPP)){ tmp <- dppmFixIntensity(DPP, lambda, po) clusters <- tmp$clusters lambda <- tmp$lambda po <- tmp$po } mcfit <- clusterfit(X, clusters, lambda = lambda, dataname = Xname, control = control, statistic = statistic, statargs = statargs, algorithm=algorithm, ...) fitinfo <- attr(mcfit, "info") attr(mcfit, "info") <- NULL # all info that depends on the fitting method: Fit <- list(method = "mincon", statistic = statistic, Stat = fitinfo$Stat, StatFun = fitinfo$StatFun, StatName = fitinfo$StatName, FitFun = fitinfo$FitFun, statargs = statargs, mcfit = mcfit) # results if(!is.null(DPP)){ clusters <- update(clusters, as.list(mcfit$par)) out <- list(Xname = Xname, X = X, stationary = stationary, fitted = clusters, po = po, Fit = Fit) } else{ out <- list(Xname = Xname, X = X, stationary = stationary, clusters = clusters, modelname = fitinfo$modelname, isPCP = fitinfo$isPCP, po = po, lambda = lambda, mu = mcfit$mu, par = mcfit$par, par.canon = mcfit$par.canon, clustpar = mcfit$clustpar, clustargs = mcfit$clustargs, modelpar = mcfit$modelpar, covmodel = mcfit$covmodel, Fit = Fit) } return(out) } clusterfit <- function(X, clusters, lambda = NULL, startpar = NULL, q=1/4, p=2, rmin=NULL, rmax=NULL, ..., statistic = NULL, statargs = NULL, algorithm="Nelder-Mead"){ ## If possible get dataname from dots dataname <- list(...)$dataname ## Cluster info: info <- spatstatClusterModelInfo(clusters) ## Detect DPP usage isDPP <- inherits(clusters, "detpointprocfamily") if(inherits(X, "ppp")){ if(is.null(dataname)) dataname <- getdataname(short.deparse(substitute(X), 20), ...) if(is.null(statistic)) statistic <- "K" # Startpar: if(is.null(startpar)) startpar <- info$selfstart(X) stationary <- is.null(lambda) || (is.numeric(lambda) && length(lambda)==1) # compute summary function if(stationary) { if(is.null(lambda)) lambda <- intensity(X) StatFun <- if(statistic == "K") "Kest" else "pcf" StatName <- if(statistic == "K") "K-function" else "pair correlation function" Stat <- do.call(StatFun, resolve.defaults(list(X=X), statargs, list(correction="best"))) } else { StatFun <- if(statistic == "K") "Kinhom" else "pcfinhom" StatName <- if(statistic == "K") "inhomogeneous K-function" else "inhomogeneous pair correlation function" Stat <- do.call(StatFun, resolve.defaults(list(X=X, lambda=lambda), statargs, list(correction="best"))) } } else if(inherits(X, "fv")){ Stat <- X ## Get statistic type stattype <- attr(Stat, "fname") StatFun <- paste0(stattype) StatName <- NULL if(is.null(statistic)){ if(is.null(stattype) || !is.element(stattype[1L], c("K", "pcf"))) stop("Cannot infer the type of summary statistic from argument ", sQuote("X"), " please specify this via argument ", sQuote("statistic")) statistic <- stattype[1L] } if(stattype[1L]!=statistic) stop("Statistic inferred from ", sQuote("X"), " not equal to supplied argument ", sQuote("statistic")) # Startpar: if(is.null(startpar)){ if(isDPP) stop("No rule for starting parameters in this case. Please set ", sQuote("startpar"), " explicitly.") startpar <- info$checkpar(startpar, old=FALSE) startpar[["scale"]] <- mean(range(Stat[[fvnames(Stat, ".x")]])) } } else{ stop("Unrecognised format for argument X") } ## avoid using g(0) as it may be infinite if(statistic=="pcf"){ argu <- fvnames(Stat, ".x") rvals <- Stat[[argu]] if(rvals[1L] == 0 && (is.null(rmin) || rmin == 0)) { rmin <- rvals[2L] } } ## DPP resolving algorithm and checking startpar changealgorithm <- length(startpar)==1 && algorithm=="Nelder-Mead" if(isDPP){ alg <- dppmFixAlgorithm(algorithm, changealgorithm, clusters, startpar) algorithm <- alg$algorithm } isPCP <- info$isPCP if(isDPP && missing(q)) q <- 1/2 dots <- info$resolvedots(..., q = q, p = p, rmin = rmin, rmax = rmax) # determine initial values of parameters startpar <- info$checkpar(startpar) # fit theoret <- info[[statistic]] desc <- paste("minimum contrast fit of", info$descname) #' ............ experimental ......................... do.adjust <- spatstat.options("kppm.adjusted") if(do.adjust) { W <- Window(X) adjdata <- list(paircorr = info[["pcf"]], pairWcdf = distcdf(W), tohuman = NULL) adjfun <- function(theo, par, auxdata, ...) { with(auxdata, { if(!is.null(tohuman)) par <- tohuman(par) a <- as.numeric(stieltjes(paircorr, pairWcdf, par=par, ...)) return(theo/a) }) } adjustment <- list(fun=adjfun, auxdata=adjdata) } else adjustment <- NULL #' ............ experimental ......................... usecanonical <- spatstat.options("kppm.canonical") if(usecanonical) { tocanonical <- info$tocanonical tohuman <- info$tohuman if(is.null(tocanonical) || is.null(tohuman)) { warning("Canonical parameters are not yet supported for this model") usecanonical <- FALSE } } startpar.human <- startpar if(usecanonical) { htheo <- theoret startpar <- tocanonical(startpar) theoret <- function(par, ...) { htheo(tohuman(par), ...) } if(do.adjust) adjustment$data$tohuman <- tohuman } #' ................................................... mcargs <- resolve.defaults(list(observed=Stat, theoretical=theoret, startpar=startpar, ctrl=dots$ctrl, method=algorithm, fvlab=list(label="%s[fit](r)", desc=desc), explain=list(dataname=dataname, fname=statistic, modelname=info$modelname), margs=dots$margs, model=dots$model, funaux=info$funaux, adjustment=adjustment), list(...)) if(isDPP && algorithm=="Brent" && changealgorithm){ mcargs <- resolve.defaults(mcargs, list(lower=alg$lower, upper=alg$upper)) } mcfit <- do.call(mincontrast, mcargs) # extract fitted parameters and reshape if(!usecanonical) { optpar.canon <- NULL optpar.human <- mcfit$par names(optpar.human) <- names(startpar.human) } else { optpar.canon <- mcfit$par names(optpar.canon) <- names(startpar) optpar.human <- tohuman(optpar.canon) names(optpar.human) <- names(startpar.human) } mcfit$par <- optpar.human mcfit$par.canon <- optpar.canon # Return results for DPPs if(isDPP){ extra <- list(Stat = Stat, StatFun = StatFun, StatName = StatName, modelname = info$modelabbrev, lambda = lambda) attr(mcfit, "info") <- extra return(mcfit) } ## Extra stuff for ordinary cluster/lgcp models ## imbue with meaning ## infer model parameters mcfit$modelpar <- info$interpret(optpar.human, lambda) mcfit$internal <- list(model=ifelse(isPCP, clusters, "lgcp")) mcfit$covmodel <- dots$covmodel if(isPCP) { # Poisson cluster process: extract parent intensity kappa kappa <- mcfit$par[["kappa"]] # mu = mean cluster size mu <- lambda/kappa } else { # LGCP: extract variance parameter sigma2 sigma2 <- mcfit$par[["sigma2"]] # mu = mean of log intensity mu <- log(lambda) - sigma2/2 } ## Parameter values (new format) mcfit$mu <- mu mcfit$clustpar <- info$checkpar(mcfit$par, old=FALSE) mcfit$clustargs <- info$checkclustargs(dots$margs, old=FALSE) ## The old fit fun that would have been used (DO WE NEED THIS?) FitFun <- paste0(tolower(clusters), ".est", statistic) extra <- list(FitFun = FitFun, Stat = Stat, StatFun = StatFun, StatName = StatName, modelname = info$modelabbrev, isPCP = isPCP, lambda = lambda) attr(mcfit, "info") <- extra return(mcfit) } kppmComLik <- function(X, Xname, po, clusters, control, weightfun, rmax, algorithm="Nelder-Mead", DPP=NULL, ...) { W <- as.owin(X) if(is.null(rmax)) rmax <- rmax.rule("K", W, intensity(X)) # identify pairs of points that contribute cl <- closepairs(X, rmax, what="ijd") # I <- cl$i # J <- cl$j dIJ <- cl$d # compute weights for pairs of points if(is.function(weightfun)) { wIJ <- weightfun(dIJ) sumweight <- sum(wIJ) } else { npairs <- length(dIJ) wIJ <- rep.int(1, npairs) sumweight <- npairs } # convert window to mask, saving other arguments for later dcm <- do.call.matched(as.mask, append(list(w=W), list(...)), sieve=TRUE) M <- dcm$result otherargs <- dcm$otherargs ## Detect DPP usage isDPP <- inherits(clusters, "detpointprocfamily") # compute intensity at pairs of data points # and c.d.f. of interpoint distance in window if(stationary <- is.stationary(po)) { # stationary unmarked Poisson process lambda <- intensity(X) # lambdaIJ <- lambda^2 # compute cdf of distance between two uniform random points in W g <- distcdf(W) # scaling constant is (area * intensity)^2 gscale <- npoints(X)^2 } else { # compute fitted intensity at data points and in window # lambdaX <- fitted(po, dataonly=TRUE) lambda <- lambdaM <- predict(po, locations=M) # lambda(x_i) * lambda(x_j) # lambdaIJ <- lambdaX[I] * lambdaX[J] # compute cdf of distance between two random points in W # with density proportional to intensity function g <- distcdf(M, dW=lambdaM) # scaling constant is (integral of intensity)^2 gscale <- integral.im(lambdaM)^2 } # Detect DPP model and change clusters and intensity correspondingly isDPP <- !is.null(DPP) if(isDPP){ tmp <- dppmFixIntensity(DPP, lambda, po) clusters <- tmp$clusters lambda <- tmp$lambda po <- tmp$po } # trim 'g' to [0, rmax] g <- g[with(g, .x) <= rmax,] # get pair correlation function (etc) for model info <- spatstatClusterModelInfo(clusters) pcfun <- info$pcf funaux <- info$funaux selfstart <- info$selfstart isPCP <- info$isPCP parhandler <- info$parhandler modelname <- info$modelname # Assemble information required for computing pair correlation pcfunargs <- list(funaux=funaux) if(is.function(parhandler)) { # Additional parameters of cluster model are required. # These may be given as individual arguments, # or in a list called 'covmodel' clustargs <- if("covmodel" %in% names(otherargs)) otherargs[["covmodel"]] else otherargs clargs <- do.call(parhandler, clustargs) pcfunargs <- append(clargs, pcfunargs) } else clargs <- NULL # determine starting parameter values startpar <- selfstart(X) #' ............ experimental ......................... usecanonical <- spatstat.options("kppm.canonical") if(usecanonical) { tocanonical <- info$tocanonical tohuman <- info$tohuman if(is.null(tocanonical) || is.null(tohuman)) { warning("Canonical parameters are not yet supported for this model") usecanonical <- FALSE } } startpar.human <- startpar if(usecanonical) { pcftheo <- pcfun startpar <- tocanonical(startpar) pcfun <- function(par, ...) { pcftheo(tohuman(par), ...) } } # ..................................................... # create local function to evaluate pair correlation # (with additional parameters 'pcfunargs' in its environment) paco <- function(d, par) { do.call(pcfun, append(list(par=par, rvals=d), pcfunargs)) } # define objective function if(!is.function(weightfun)) { # pack up necessary information objargs <- list(dIJ=dIJ, sumweight=sumweight, g=g, gscale=gscale, envir=environment(paco)) # define objective function (with 'paco' in its environment) # Note that this is 1/2 of the log composite likelihood, # minus the constant term # sum(log(lambdaIJ)) - npairs * log(gscale) obj <- function(par, objargs) { with(objargs, sum(log(paco(dIJ, par))) - sumweight * log(unlist(stieltjes(paco, g, par=par))), enclos=objargs$envir) } } else { # create local function to evaluate pair correlation(d) * weight(d) # (with additional parameters 'pcfunargs', 'weightfun' in its environment) force(weightfun) wpaco <- function(d, par) { y <- do.call(pcfun, append(list(par=par, rvals=d), pcfunargs)) w <- weightfun(d) return(y * w) } # pack up necessary information objargs <- list(dIJ=dIJ, wIJ=wIJ, sumweight=sumweight, g=g, gscale=gscale, envir=environment(wpaco)) # define objective function (with 'paco', 'wpaco' in its environment) # Note that this is 1/2 of the log composite likelihood, # minus the constant term # sum(wIJ * log(lambdaIJ)) - sumweight * log(gscale) obj <- function(par, objargs) { with(objargs, sum(wIJ * log(paco(dIJ, par))) - sumweight * log(unlist(stieltjes(wpaco, g, par=par))), enclos=objargs$envir) } } # arguments for optimization ctrl <- resolve.defaults(list(fnscale=-1), control, list(trace=0)) optargs <- list(par=startpar, fn=obj, objargs=objargs, control=ctrl, method=algorithm) ## DPP resolving algorithm and checking startpar changealgorithm <- length(startpar)==1 && algorithm=="Nelder-Mead" if(isDPP){ alg <- dppmFixAlgorithm(algorithm, changealgorithm, clusters, startpar.human) algorithm <- optargs$method <- alg$algorithm if(algorithm=="Brent" && changealgorithm){ optargs$lower <- alg$lower optargs$upper <- alg$upper } } # optimize it opt <- do.call(optim, optargs) # raise warning/error if something went wrong signalStatus(optimStatus(opt), errors.only=TRUE) # fitted parameters if(!usecanonical) { optpar.canon <- NULL optpar.human <- opt$par names(optpar.human) <- names(startpar.human) } else { optpar.canon <- opt$par names(optpar.canon) <- names(startpar) optpar.human <- tohuman(optpar.canon) names(optpar.human) <- names(startpar.human) } opt$par <- optpar.human opt$par.canon <- optpar.canon # Finish in DPP case if(!is.null(DPP)){ # all info that depends on the fitting method: Fit <- list(method = "clik2", clfit = opt, weightfun = weightfun, rmax = rmax, objfun = obj, objargs = objargs) # pack up clusters <- update(clusters, as.list(opt$par)) result <- list(Xname = Xname, X = X, stationary = stationary, fitted = clusters, modelname = modelname, po = po, lambda = lambda, Fit = Fit) return(result) } # meaningful model parameters modelpar <- info$interpret(optpar.human, lambda) # infer parameter 'mu' if(isPCP) { # Poisson cluster process: extract parent intensity kappa kappa <- optpar.human[["kappa"]] # mu = mean cluster size mu <- if(stationary) lambda/kappa else eval.im(lambda/kappa) } else { # LGCP: extract variance parameter sigma2 sigma2 <- optpar.human[["sigma2"]] # mu = mean of log intensity mu <- if(stationary) log(lambda) - sigma2/2 else eval.im(log(lambda) - sigma2/2) } # all info that depends on the fitting method: Fit <- list(method = "clik2", clfit = opt, weightfun = weightfun, rmax = rmax, objfun = obj, objargs = objargs) # pack up result <- list(Xname = Xname, X = X, stationary = stationary, clusters = clusters, modelname = modelname, isPCP = isPCP, po = po, lambda = lambda, mu = mu, par = optpar.human, par.canon = optpar.canon, clustpar = info$checkpar(par=optpar.human, old=FALSE), clustargs = info$checkclustargs(clargs$margs, old=FALSE), #clargs$margs, modelpar = modelpar, covmodel = clargs, Fit = Fit) return(result) } kppmPalmLik <- function(X, Xname, po, clusters, control, weightfun, rmax, algorithm="Nelder-Mead", DPP=NULL, ...) { W <- as.owin(X) if(is.null(rmax)) rmax <- rmax.rule("K", W, intensity(X)) # identify pairs of points that contribute cl <- closepairs(X, rmax) # I <- cl$i J <- cl$j dIJ <- cl$d # compute weights for pairs of points if(is.function(weightfun)) { wIJ <- weightfun(dIJ) # sumweight <- sum(wIJ) } else { npairs <- length(dIJ) wIJ <- rep.int(1, npairs) # sumweight <- npairs } # convert window to mask, saving other arguments for later dcm <- do.call.matched(as.mask, append(list(w=W), list(...)), sieve=TRUE) M <- dcm$result otherargs <- dcm$otherargs ## Detect DPP usage isDPP <- inherits(clusters, "detpointprocfamily") # compute intensity at data points # and c.d.f. of interpoint distance in window if(stationary <- is.stationary(po)) { # stationary unmarked Poisson process lambda <- intensity(X) lambdaJ <- rep(lambda, length(J)) # compute cdf of distance between a uniform random point in W # and a randomly-selected point in X g <- distcdf(X, M) # scaling constant is (integral of intensity) * (number of points) gscale <- npoints(X)^2 } else { # compute fitted intensity at data points and in window lambdaX <- fitted(po, dataonly=TRUE) lambda <- lambdaM <- predict(po, locations=M) lambdaJ <- lambdaX[J] # compute cdf of distance between a uniform random point in X # and a random point in W with density proportional to intensity function g <- distcdf(X, M, dV=lambdaM) # scaling constant is (integral of intensity) * (number of points) gscale <- integral.im(lambdaM) * npoints(X) } # Detect DPP model and change clusters and intensity correspondingly isDPP <- !is.null(DPP) if(isDPP){ tmp <- dppmFixIntensity(DPP, lambda, po) clusters <- tmp$clusters lambda <- tmp$lambda po <- tmp$po } # trim 'g' to [0, rmax] g <- g[with(g, .x) <= rmax,] # get pair correlation function (etc) for model info <- spatstatClusterModelInfo(clusters) pcfun <- info$pcf funaux <- info$funaux selfstart <- info$selfstart isPCP <- info$isPCP parhandler <- info$parhandler modelname <- info$modelname # Assemble information required for computing pair correlation pcfunargs <- list(funaux=funaux) if(is.function(parhandler)) { # Additional parameters of cluster model are required. # These may be given as individual arguments, # or in a list called 'covmodel' clustargs <- if("covmodel" %in% names(otherargs)) otherargs[["covmodel"]] else otherargs clargs <- do.call(parhandler, clustargs) pcfunargs <- append(clargs, pcfunargs) } else clargs <- NULL # determine starting parameter values startpar <- selfstart(X) #' ............ experimental ......................... usecanonical <- spatstat.options("kppm.canonical") if(usecanonical) { tocanonical <- info$tocanonical tohuman <- info$tohuman if(is.null(tocanonical) || is.null(tohuman)) { warning("Canonical parameters are not yet supported for this model") usecanonical <- FALSE } } startpar.human <- startpar if(usecanonical) { pcftheo <- pcfun startpar <- tocanonical(startpar) pcfun <- function(par, ...) { pcftheo(tohuman(par), ...) } } # ..................................................... # create local function to evaluate pair correlation # (with additional parameters 'pcfunargs' in its environment) paco <- function(d, par) { do.call(pcfun, append(list(par=par, rvals=d), pcfunargs)) } # define objective function if(!is.function(weightfun)) { # pack up necessary information objargs <- list(dIJ=dIJ, g=g, gscale=gscale, sumloglam=sum(log(lambdaJ)), envir=environment(paco)) # define objective function (with 'paco' in its environment) # This is the log Palm likelihood obj <- function(par, objargs) { with(objargs, sumloglam + sum(log(paco(dIJ, par))) - gscale * unlist(stieltjes(paco, g, par=par)), enclos=objargs$envir) } } else { # create local function to evaluate pair correlation(d) * weight(d) # (with additional parameters 'pcfunargs', 'weightfun' in its environment) force(weightfun) wpaco <- function(d, par) { y <- do.call(pcfun, append(list(par=par, rvals=d), pcfunargs)) w <- weightfun(d) return(y * w) } # pack up necessary information objargs <- list(dIJ=dIJ, wIJ=wIJ, g=g, gscale=gscale, wsumloglam=sum(wIJ * log(lambdaJ)), envir=environment(wpaco)) # define objective function (with 'paco', 'wpaco' in its environment) # This is the log Palm likelihood obj <- function(par, objargs) { with(objargs, wsumloglam + sum(wIJ * log(paco(dIJ, par))) - gscale * unlist(stieltjes(wpaco, g, par=par)), enclos=objargs$envir) } } # arguments for optimization ctrl <- resolve.defaults(list(fnscale=-1), control, list(trace=0)) optargs <- list(par=startpar, fn=obj, objargs=objargs, control=ctrl, method=algorithm) ## DPP resolving algorithm and checking startpar changealgorithm <- length(startpar)==1 && algorithm=="Nelder-Mead" if(isDPP){ alg <- dppmFixAlgorithm(algorithm, changealgorithm, clusters, startpar.human) algorithm <- optargs$method <- alg$algorithm if(algorithm=="Brent" && changealgorithm){ optargs$lower <- alg$lower optargs$upper <- alg$upper } } # optimize it opt <- do.call(optim, optargs) # raise warning/error if something went wrong signalStatus(optimStatus(opt), errors.only=TRUE) # Extract optimal values of parameters if(!usecanonical) { optpar.canon <- NULL optpar.human <- opt$par names(optpar.human) <- names(startpar.human) } else { optpar.canon <- opt$par names(optpar.canon) <- names(startpar) optpar.human <- tohuman(optpar.canon) names(optpar.human) <- names(startpar.human) } # Finish in DPP case if(!is.null(DPP)){ opt$par <- optpar.human opt$par.canon <- optpar.canon # all info that depends on the fitting method: Fit <- list(method = "palm", clfit = opt, weightfun = weightfun, rmax = rmax, objfun = obj, objargs = objargs) # pack up clusters <- update(clusters, as.list(optpar.human)) result <- list(Xname = Xname, X = X, stationary = stationary, fitted = clusters, modelname = modelname, po = po, lambda = lambda, Fit = Fit) return(result) } # meaningful model parameters modelpar <- info$interpret(optpar.human, lambda) # infer parameter 'mu' if(isPCP) { # Poisson cluster process: extract parent intensity kappa kappa <- optpar.human[["kappa"]] # mu = mean cluster size mu <- if(stationary) lambda/kappa else eval.im(lambda/kappa) } else { # LGCP: extract variance parameter sigma2 sigma2 <- optpar.human[["sigma2"]] # mu = mean of log intensity mu <- if(stationary) log(lambda) - sigma2/2 else eval.im(log(lambda) - sigma2/2) } # all info that depends on the fitting method: Fit <- list(method = "palm", clfit = opt, weightfun = weightfun, rmax = rmax) # pack up result <- list(Xname = Xname, X = X, stationary = stationary, clusters = clusters, modelname = modelname, isPCP = isPCP, po = po, lambda = lambda, mu = mu, par = optpar.human, par.canon = optpar.canon, clustpar = info$checkpar(par=optpar.human, old=FALSE), clustargs = info$checkclustargs(clargs$margs, old=FALSE), #clargs$margs, modelpar = modelpar, covmodel = clargs, Fit = Fit) return(result) } improve.kppm <- local({ fnc <- function(r, eps, g){ (g(r) - 1)/(g(0) - 1) - eps} improve.kppm <- function(object, type=c("quasi", "wclik1", "clik1"), rmax = NULL, eps.rmax = 0.01, dimyx = 50, maxIter = 100, tolerance = 1e-06, fast = TRUE, vcov = FALSE, fast.vcov = FALSE, verbose = FALSE, save.internals = FALSE) { verifyclass(object, "kppm") type <- match.arg(type) gfun <- pcfmodel(object) X <- object$X win <- as.owin(X) ## simple (rectangular) grid quadrature scheme ## (using pixels with centers inside owin only) mask <- as.mask(win, dimyx = dimyx) wt <- pixellate(win, W = mask) wt <- wt[mask] Uxy <- rasterxy.mask(mask) U <- ppp(Uxy$x, Uxy$y, window = win, check=FALSE) U <- U[mask] # nU <- npoints(U) Yu <- pixellate(X, W = mask) Yu <- Yu[mask] ## covariates at quadrature points po <- object$po Z <- model.images(po, mask) Z <- sapply(Z, "[", i=U) ##obtain initial beta estimate using composite likelihood beta0 <- coef(po) ## determining the dependence range if (type != "clik1" && is.null(rmax)) { diamwin <- diameter(win) rmax <- if(fnc(diamwin, eps.rmax, gfun) >= 0) diamwin else uniroot(fnc, lower = 0, upper = diameter(win), eps=eps.rmax, g=gfun)$root if(verbose) splat(paste0("type: ", type, ", ", "dependence range: ", rmax, ", ", "dimyx: ", dimyx, ", g(0) - 1:", gfun(0) -1)) } ## preparing the WCL case if (type == "wclik1") Kmax <- 2*pi * integrate(function(r){r * (gfun(r) - 1)}, lower=0, upper=rmax)$value * exp(c(Z %*% beta0)) ## the g()-1 matrix without tapering if (!fast || (vcov && !fast.vcov)){ if (verbose) cat("computing the g(u_i,u_j)-1 matrix ...") gminus1 <- matrix(gfun(c(pairdist(U))) - 1, U$n, U$n) if (verbose) cat("..Done.\n") } if ( (fast && type == "quasi") | fast.vcov ){ if (verbose) cat("computing the sparse G-1 matrix ...\n") ## Non-zero gminus1 entries (when using tapering) cp <- crosspairs(U,U,rmax,what="ijd") if (verbose) cat("crosspairs done\n") Gtap <- (gfun(cp$d) - 1) if(vcov){ if(fast.vcov){ gminus1 <- Matrix::sparseMatrix(i=cp$i, j=cp$j, x=Gtap, dims=c(U$n, U$n)) } else{ if(fast) gminus1 <- matrix(gfun(c(pairdist(U))) - 1, U$n, U$n) } } if (verbose & type!="quasi") cat("..Done.\n") } if (type == "quasi" && fast){ mu0 <- exp(c(Z %*% beta0)) * wt mu0root <- sqrt(mu0) sparseG <- Matrix::sparseMatrix(i=cp$i, j=cp$j, x=mu0root[cp$i] * mu0root[cp$j] * Gtap, dims=c(U$n, U$n)) Rroot <- Matrix::Cholesky(sparseG, perm = TRUE, Imult = 1) ##Imult=1 means that we add 1*I if (verbose) cat("..Done.\n") } ## iterative weighted least squares/Fisher scoring bt <- beta0 noItr <- 1 repeat { mu <- exp(c(Z %*% bt)) * wt mu.root <- sqrt(mu) ## the core of estimating equation: ff=phi ## in case of ql, \phi=V^{-1}D=V_\mu^{-1/2}x where (G+I)x=V_\mu^{1/2} Z ff <- switch(type, clik1 = Z, wclik1= Z/(1 + Kmax), quasi = if(fast){ Matrix::solve(Rroot, mu.root * Z)/mu.root } else{ solve(diag(U$n) + t(gminus1 * mu), Z) } ) ##alternative ##R=chol(sparseG+sparseMatrix(i=c(1:U$n),j=c(1:U$n), ## x=rep(1,U$n),dims=c(U$n,U$n))) ##ff2 <- switch(type, ## clik1 = Z, ## wclik1= Z/(1 + Kmax), ## quasi = if (fast) ## solve(R,solve(t(R), mu.root * Z))/mu.root ## else solve(diag(U$n) + t(gminus1 * mu), Z)) ## print(summary(as.numeric(ff)-as.numeric(ff2))) ## the estimating equation: u_f(\beta) uf <- (Yu - mu) %*% ff ## inverse of minus expectation of Jacobian matrix: I_f Jinv <- solve(t(Z * mu) %*% ff) if(maxIter==0){ ## This is a built-in early exit for vcov internal calculations break } deltabt <- as.numeric(uf %*% Jinv) if (any(!is.finite(deltabt))) { warning(paste("Infinite value, NA or NaN appeared", "in the iterative weighted least squares algorithm.", "Returning the initial intensity estimate unchanged."), call.=FALSE) return(object) } ## updating the present estimate of \beta bt <- bt + deltabt if (verbose) splat(paste0("itr: ", noItr, ",\nu_f: ", as.numeric(uf), "\nbeta:", bt, "\ndeltabeta:", deltabt)) if (max(abs(deltabt/bt)) <= tolerance || max(abs(uf)) <= tolerance) break if (noItr > maxIter) stop("Maximum number of iterations reached without convergence.") noItr <- noItr + 1 } out <- object out$po$coef.orig <- beta0 out$po$coef <- bt loc <- if(is.sob(out$lambda)) as.mask(out$lambda) else mask out$lambda <- predict(out$po, locations = loc) out$improve <- list(type = type, rmax = rmax, dimyx = dimyx, fast = fast, fast.vcov = fast.vcov) if(save.internals){ out$improve <- append(out$improve, list(ff=ff, uf=uf, J.inv=Jinv)) } if(vcov){ if (verbose) cat("computing the asymptotic variance ...\n") ## variance of the estimation equation: Sigma_f = Var(u_f(bt)) trans <- if(fast) Matrix::t else t Sig <- trans(ff) %*% (ff * mu) + trans(ff * mu) %*% gminus1 %*% (ff * mu) ## note Abdollah's G does not have mu.root inside... ## the asymptotic variance of \beta: ## inverse of the Godambe information matrix out$vcov <- as.matrix(Jinv %*% Sig %*% Jinv) } return(out) } improve.kppm }) is.kppm <- function(x) { inherits(x, "kppm")} print.kppm <- print.dppm <- function(x, ...) { isPCP <- x$isPCP # detect DPP isDPP <- inherits(x, "dppm") # handle outdated objects - which were all cluster processes if(!isDPP && is.null(isPCP)) isPCP <- TRUE terselevel <- spatstat.options('terse') digits <- getOption('digits') splat(if(x$stationary) "Stationary" else "Inhomogeneous", if(isDPP) "determinantal" else if(isPCP) "cluster" else "Cox", "point process model") if(waxlyrical('extras', terselevel) && nchar(x$Xname) < 20) splat("Fitted to point pattern dataset", sQuote(x$Xname)) if(waxlyrical('gory', terselevel)) { switch(x$Fit$method, mincon = { splat("Fitted by minimum contrast") splat("\tSummary statistic:", x$Fit$StatName) }, clik =, clik2 = { splat("Fitted by maximum second order composite likelihood") splat("\trmax =", x$Fit$rmax) if(!is.null(wtf <- x$Fit$weightfun)) { a <- attr(wtf, "selfprint") %orifnull% pasteFormula(wtf) splat("\tweight function:", a) } }, palm = { splat("Fitted by maximum Palm likelihood") splat("\trmax =", x$Fit$rmax) if(!is.null(wtf <- x$Fit$weightfun)) { a <- attr(wtf, "selfprint") %orifnull% pasteFormula(wtf) splat("\tweight function:", a) } }, warning(paste("Unrecognised fitting method", sQuote(x$Fit$method))) ) } parbreak(terselevel) # ............... trend ......................... if(!(isDPP && is.null(x$fitted$intensity))) print(x$po, what="trend") # ..................... clusters ................ # DPP case if(isDPP){ splat("Fitted DPP model:") print(x$fitted) return(invisible(NULL)) } tableentry <- spatstatClusterModelInfo(x$clusters) splat(if(isPCP) "Cluster" else "Cox", "model:", tableentry$printmodelname(x)) cm <- x$covmodel if(!isPCP) { # Covariance model - LGCP only splat("\tCovariance model:", cm$model) margs <- cm$margs if(!is.null(margs)) { nama <- names(margs) tags <- ifelse(nzchar(nama), paste(nama, "="), "") tagvalue <- paste(tags, margs) splat("\tCovariance parameters:", paste(tagvalue, collapse=", ")) } } pc <- x$par.canon if(!is.null(pc)) { splat("Fitted canonical parameters:") print(pc, digits=digits) } pa <- x$clustpar if (!is.null(pa)) { splat("Fitted", if(isPCP) "cluster" else "covariance", "parameters:") print(pa, digits=digits) } if(!is.null(mu <- x$mu)) { if(isPCP) { splat("Mean cluster size: ", if(!is.im(mu)) paste(signif(mu, digits), "points") else "[pixel image]") } else { splat("Fitted mean of log of random intensity:", if(!is.im(mu)) signif(mu, digits) else "[pixel image]") } } invisible(NULL) } plot.kppm <- local({ plotem <- function(x, ..., main=dmain, dmain) { plot(x, ..., main=main) } plot.kppm <- function(x, ..., what=c("intensity", "statistic", "cluster"), pause=interactive(), xname) { ## catch objectname from dots if present otherwise deparse x: if(missing(xname)) xname <- short.deparse(substitute(x)) nochoice <- missing(what) what <- pickoption("plot type", what, c(statistic="statistic", intensity="intensity", cluster="cluster"), multi=TRUE) ## handle older objects Fit <- x$Fit if(is.null(Fit)) { warning("kppm object is in outdated format") Fit <- x Fit$method <- "mincon" } ## Catch locations for clusters if given loc <- list(...)$locations inappropriate <- (nochoice & ((what == "intensity") & (x$stationary))) | ((what == "statistic") & (Fit$method != "mincon")) | ((what == "cluster") & (identical(x$isPCP, FALSE))) | ((what == "cluster") & (!x$stationary) & is.null(loc)) if(!nochoice && !x$stationary && "cluster" %in% what && is.null(loc)) stop("Please specify additional argument ", sQuote("locations"), " which will be passed to the function ", sQuote("clusterfield"), ".") if(any(inappropriate)) { what <- what[!inappropriate] if(length(what) == 0){ message("Nothing meaningful to plot. Exiting...") return(invisible(NULL)) } } pause <- pause && (length(what) > 1) if(pause) opa <- par(ask=TRUE) for(style in what) switch(style, intensity={ plotem(x$po, ..., dmain=c(xname, "Intensity"), how="image", se=FALSE) }, statistic={ plotem(Fit$mcfit, ..., dmain=c(xname, Fit$StatName)) }, cluster={ plotem(clusterfield(x, locations = loc, verbose=FALSE), ..., dmain=c(xname, "Fitted cluster")) }) if(pause) par(opa) return(invisible(NULL)) } plot.kppm }) predict.kppm <- predict.dppm <- function(object, ...) { se <- resolve.1.default(list(se=FALSE), list(...)) interval <- resolve.1.default(list(interval="none"), list(...)) if(se) warning("Standard error calculation assumes a Poisson process") if(interval != "none") warning(paste(interval, "interval calculation assumes a Poisson process")) predict(as.ppm(object), ...) } fitted.kppm <- fitted.dppm <- function(object, ...) { fitted(as.ppm(object), ...) } residuals.kppm <- residuals.dppm <- function(object, ...) { type <- resolve.1.default(list(type="raw"), list(...)) if(type != "raw") warning(paste("calculation of", type, "residuals", "assumes a Poisson process")) residuals(as.ppm(object), ...) } simulate.kppm <- function(object, nsim=1, seed=NULL, ..., window=NULL, covariates=NULL, verbose=TRUE, retry=10, drop=FALSE) { starttime <- proc.time() verbose <- verbose && (nsim > 1) check.1.real(retry) # .... copied from simulate.lm .... if (!exists(".Random.seed", envir = .GlobalEnv, inherits = FALSE)) runif(1) if (is.null(seed)) RNGstate <- get(".Random.seed", envir = .GlobalEnv) else { R.seed <- get(".Random.seed", envir = .GlobalEnv) set.seed(seed) RNGstate <- structure(seed, kind = as.list(RNGkind())) on.exit(assign(".Random.seed", R.seed, envir = .GlobalEnv)) } # .................................. # determine window for simulation results if(!is.null(window)) { stopifnot(is.owin(window)) win <- window } else { win <- as.owin(object) } # .................................. # determine parameters mp <- as.list(object$modelpar) # parameter 'mu' # = parent intensity of cluster process # = mean log intensity of log-Gaussian Cox process if(is.null(covariates) && (object$stationary || is.null(window))) { # use existing 'mu' (scalar or image) mu <- object$mu } else { # recompute 'mu' using new data switch(object$clusters, Cauchy=, VarGamma=, Thomas=, MatClust={ # Poisson cluster process kappa <- mp$kappa lambda <- predict(object, window=win, covariates=covariates) mu <- eval.im(lambda/kappa) }, LGCP={ # log-Gaussian Cox process sigma2 <- mp$sigma2 lambda <- predict(object, window=win, covariates=covariates) mu <- eval.im(log(lambda) - sigma2/2) }, stop(paste("Simulation of", sQuote(object$clusters), "processes is not yet implemented")) ) } # prepare data for execution out <- list() switch(object$clusters, Thomas={ kappa <- mp$kappa sigma <- mp$sigma cmd <- expression(rThomas(kappa,sigma,mu,win)) dont.complain.about(kappa, sigma, mu) }, MatClust={ kappa <- mp$kappa r <- mp$R cmd <- expression(rMatClust(kappa,r,mu,win)) dont.complain.about(kappa, r) }, Cauchy = { kappa <- mp$kappa omega <- mp$omega cmd <- expression(rCauchy(kappa, omega, mu, win)) dont.complain.about(kappa, omega, mu) }, VarGamma = { kappa <- mp$kappa omega <- mp$omega nu.ker <- object$covmodel$margs$nu.ker cmd <- expression(rVarGamma(kappa, nu.ker, omega, mu, win)) dont.complain.about(kappa, nu.ker, omega, mu) }, LGCP={ sigma2 <- mp$sigma2 alpha <- mp$alpha cm <- object$covmodel model <- cm$model margs <- cm$margs param <- append(list(var=sigma2, scale=alpha), margs) #' if(!is.im(mu)) { # model will be simulated in 'win' cmd <- expression(rLGCP(model=model, mu=mu, param=param, ..., win=win)) #' check that RandomFields package recognises parameter format rfmod <- try(rLGCP(model, mu=mu, param=param, win=win, ..., modelonly=TRUE)) } else { # model will be simulated in as.owin(mu), then change window cmd <- expression(rLGCP(model=model, mu=mu, param=param, ...)[win]) #' check that RandomFields package recognises parameter format rfmod <- try(rLGCP(model, mu=mu, param=param, ..., modelonly=TRUE)) } #' suppress warnings from code checker dont.complain.about(model, mu, param) #' check that model is recognised if(inherits(rfmod, "try-error")) stop(paste("Internal error in simulate.kppm:", "unable to build Random Fields model", "for log-Gaussian Cox process")) }) # run if(verbose) { cat(paste("Generating", nsim, "simulations... ")) state <- list() } for(i in 1:nsim) { out[[i]] <- try(eval(cmd)) if(verbose) state <- progressreport(i, nsim, state=state) } # detect failures if(any(bad <- unlist(lapply(out, inherits, what="try-error")))) { nbad <- sum(bad) gripe <- paste(nbad, ngettext(nbad, "simulation was", "simulations were"), "unsuccessful") if(verbose) splat(gripe) if(retry <= 0) { fate <- "returned as NULL" out[bad] <- list(NULL) } else { if(verbose) cat("Retrying...") ntried <- 0 while(ntried < retry) { ntried <- ntried + 1 for(j in which(bad)) out[[j]] <- try(eval(cmd)) bad <- unlist(lapply(out, inherits, what="try-error")) nbad <- sum(bad) if(nbad == 0) break } if(verbose) cat("Done.\n") fate <- if(nbad == 0) "all recomputed" else paste(nbad, "simulations still unsuccessful") fate <- paste(fate, "after", ntried, ngettext(ntried, "further try", "further tries")) } warning(paste(gripe, fate, sep=": ")) } if(verbose) cat("Done.\n") # pack up if(nsim == 1 && drop) { out <- out[[1L]] } else { out <- as.solist(out) if(nsim > 0) names(out) <- paste("Simulation", 1:nsim) } out <- timed(out, starttime=starttime) attr(out, "seed") <- RNGstate return(out) } formula.kppm <- formula.dppm <- function(x, ...) { formula(x$po, ...) } terms.kppm <- terms.dppm <- function(x, ...) { terms(x$po, ...) } labels.kppm <- labels.dppm <- function(object, ...) { labels(object$po, ...) } update.kppm <- function(object, ..., evaluate=TRUE) { argh <- list(...) nama <- names(argh) callframe <- object$callframe envir <- environment(terms(object)) #' look for a formula argument fmla <- formula(object) jf <- integer(0) if(!is.null(trend <- argh$trend)) { if(!can.be.formula(trend)) stop("Argument \"trend\" should be a formula") fmla <- newformula(formula(object), trend, callframe, envir) jf <- which(nama == "trend") } else if(any(isfo <- sapply(argh, can.be.formula))) { if(sum(isfo) > 1) { if(!is.null(nama)) isfo <- isfo & nzchar(nama) if(sum(isfo) > 1) stop(paste("Arguments not understood:", "there are two unnamed formula arguments")) } jf <- which(isfo) fmla <- argh[[jf]] fmla <- newformula(formula(object), fmla, callframe, envir) } #' look for a point pattern or quadscheme if(!is.null(X <- argh$X)) { if(!inherits(X, c("ppp", "quad"))) stop(paste("Argument X should be a formula,", "a point pattern or a quadrature scheme")) jX <- which(nama == "X") } else if(any(ispp <- sapply(argh, inherits, what=c("ppp", "quad")))) { if(sum(ispp) > 1) { if(!is.null(nama)) ispp <- ispp & nzchar(nama) if(sum(ispp) > 1) stop(paste("Arguments not understood:", "there are two unnamed point pattern/quadscheme arguments")) } jX <- which(ispp) X <- argh[[jX]] } else { X <- object$X jX <- integer(0) } Xexpr <- if(length(jX) > 0) sys.call()[[2L + jX]] else NULL #' remove arguments just recognised, if any jused <- c(jf, jX) if(length(jused) > 0) argh <- argh[-jused] #' update the matched call thecall <- getCall(object) methodname <- as.character(thecall[[1L]]) switch(methodname, kppm.formula = { # original call has X = [formula with lhs] if(!is.null(Xexpr)) { lhs.of.formula(fmla) <- Xexpr } else if(is.null(lhs.of.formula(fmla))) { lhs.of.formula(fmla) <- as.name('.') } oldformula <- as.formula(getCall(object)$X) thecall$X <- newformula(oldformula, fmla, callframe, envir) }, { # original call has X = ppp and trend = [formula without lhs] oldformula <- as.formula(getCall(object)$trend) fom <- newformula(oldformula, fmla, callframe, envir) if(!is.null(Xexpr)) lhs.of.formula(fom) <- Xexpr if(is.null(lhs.of.formula(fom))) { # new call has same format thecall$trend <- fom if(length(jX) > 0) thecall$X <- X } else { # new call has formula with lhs thecall$trend <- NULL thecall$X <- fom } }) knownnames <- union(names(formals(kppm.ppp)), names(formals(mincontrast))) knownnames <- setdiff(knownnames, c("X", "trend", "observed", "theoretical")) ok <- nama %in% knownnames thecall <- replace(thecall, nama[ok], argh[ok]) thecall$formula <- NULL # artefact of 'step', etc thecall[[1L]] <- as.name("kppm") if(!evaluate) return(thecall) out <- eval(thecall, envir=parent.frame(), enclos=envir) #' update name of data if(length(jX) == 1) { mc <- match.call() Xlang <- mc[[2L+jX]] out$Xname <- short.deparse(Xlang) } #' return(out) } unitname.kppm <- unitname.dppm <- function(x) { return(unitname(x$X)) } "unitname<-.kppm" <- "unitname<-.dppm" <- function(x, value) { unitname(x$X) <- value if(!is.null(x$Fit$mcfit)) { unitname(x$Fit$mcfit) <- value } else if(is.null(x$Fit)) { warning("kppm object in outdated format") if(!is.null(x$mcfit)) unitname(x$mcfit) <- value } return(x) } as.fv.kppm <- as.fv.dppm <- function(x) { if(x$Fit$method == "mincon") return(as.fv(x$Fit$mcfit)) gobs <- pcfinhom(x$X, lambda=x, correction="good", update=FALSE) gfit <- (pcfmodel(x))(gobs$r) g <- bind.fv(gobs, data.frame(fit=gfit), "%s[fit](r)", "predicted %s for fitted model") return(g) } coef.kppm <- coef.dppm <- function(object, ...) { return(coef(object$po)) } Kmodel.kppm <- function(model, ...) { Kpcf.kppm(model, what="K") } pcfmodel.kppm <- function(model, ...) { Kpcf.kppm(model, what="pcf") } Kpcf.kppm <- function(model, what=c("K", "pcf", "kernel")) { what <- match.arg(what) # Extract function definition from internal table clusters <- model$clusters tableentry <- spatstatClusterModelInfo(clusters) if(is.null(tableentry)) stop("No information available for", sQuote(clusters), "cluster model") fun <- tableentry[[what]] if(is.null(fun)) stop("No expression available for", what, "for", sQuote(clusters), "cluster model") # Extract model parameters par <- model$par # Extract auxiliary definitions (if applicable) funaux <- tableentry$funaux # Extract covariance model (if applicable) cm <- model$covmodel model <- cm$model margs <- cm$margs # f <- function(r) as.numeric(fun(par=par, rvals=r, funaux=funaux, model=model, margs=margs)) return(f) } is.stationary.kppm <- is.stationary.dppm <- function(x) { return(x$stationary) } is.poisson.kppm <- function(x) { switch(x$clusters, Cauchy=, VarGamma=, Thomas=, MatClust={ # Poisson cluster process mu <- x$mu return(!is.null(mu) && (max(mu) == 0)) }, LGCP = { # log-Gaussian Cox process sigma2 <- x$par[["sigma2"]] return(sigma2 == 0) }, return(FALSE)) } # extract ppm component as.ppm.kppm <- as.ppm.dppm <- function(object) { object$po } # other methods that pass through to 'ppm' as.owin.kppm <- as.owin.dppm <- function(W, ..., from=c("points", "covariates"), fatal=TRUE) { from <- match.arg(from) as.owin(as.ppm(W), ..., from=from, fatal=fatal) } domain.kppm <- Window.kppm <- domain.dppm <- Window.dppm <- function(X, ..., from=c("points", "covariates")) { from <- match.arg(from) as.owin(X, from=from) } model.images.kppm <- model.images.dppm <- function(object, W=as.owin(object), ...) { model.images(as.ppm(object), W=W, ...) } model.matrix.kppm <- model.matrix.dppm <- function(object, data=model.frame(object, na.action=NULL), ..., Q=NULL, keepNA=TRUE) { if(missing(data)) data <- NULL model.matrix(as.ppm(object), data=data, ..., Q=Q, keepNA=keepNA) } model.frame.kppm <- model.frame.dppm <- function(formula, ...) { model.frame(as.ppm(formula), ...) } logLik.kppm <- logLik.dppm <- function(object, ...) { pl <- object$Fit$clfit$value if(is.null(pl)) stop("logLik is only available for kppm objects fitted with method='palm'", call.=FALSE) ll <- logLik(as.ppm(object)) # to inherit class and d.f. ll[] <- pl return(ll) } AIC.kppm <- AIC.dppm <- function(object, ..., k=2) { # extract Palm loglikelihood pl <- object$Fit$clfit$value if(is.null(pl)) stop("AIC is only available for kppm objects fitted with method='palm'", call.=FALSE) df <- length(coef(object)) return(- 2 * as.numeric(pl) + k * df) } extractAIC.kppm <- extractAIC.dppm <- function (fit, scale = 0, k = 2, ...) { edf <- length(coef(fit)) aic <- AIC(fit, k=k) c(edf, aic) } nobs.kppm <- nobs.dppm <- function(object, ...) { nobs(as.ppm(object)) } psib <- function(object) UseMethod("psib") psib.kppm <- function(object) { clus <- object$clusters info <- spatstatClusterModelInfo(clus) if(!info$isPCP) { warning("The model is not a cluster process") return(NA) } g <- pcfmodel(object) p <- 1 - 1/g(0) return(p) } spatstat/R/ppm.R0000755000176200001440000002111313165060304013234 0ustar liggesusers# # $Revision: 1.58 $ $Date: 2017/10/04 04:10:33 $ # # ppm() # Fit a point process model to a two-dimensional point pattern # # ppm <- function(Q, ...) { UseMethod("ppm") } ppm.formula <- function(Q, interaction=NULL, ..., data=NULL, subset) { ## remember call callstring <- short.deparse(sys.call()) cl <- match.call() ## trap a common error to give a more informative message if(is.sob(data) || is.function(data)) stop(paste("The argument", sQuote("data"), "should not be a spatial object;", "it should be a list of spatial objects"), call.=FALSE) ########### INTERPRET FORMULA ############################## if(!inherits(Q, "formula")) stop(paste("Argument 'Q' should be a formula")) formula <- Q ## check formula has LHS and RHS. Extract them if(length(formula) < 3) stop(paste("Formula must have a left hand side")) Yexpr <- formula[[2]] trend <- formula[c(1,3)] ## FIT ####################################### thecall <- if(missing(subset)) { call("ppm", Q=Yexpr, trend=trend, data=data, interaction=interaction) } else { call("ppm", Q=Yexpr, trend=trend, data=data, interaction=interaction, subset=substitute(subset)) } ncall <- length(thecall) argh <- list(...) nargh <- length(argh) if(nargh > 0) { thecall[ncall + 1:nargh] <- argh names(thecall)[ncall + 1:nargh] <- names(argh) } callenv <- list2env(as.list(data), parent=parent.frame()) result <- eval(thecall, envir=callenv) result$call <- cl result$callstring <- callstring result$callframe <- parent.frame() return(result) } ppm.quad <- ppm.ppp <- ppm.default <- function(Q, trend = ~1, interaction = Poisson(), ..., covariates = data, data = NULL, covfunargs = list(), subset, clipwin, correction="border", rbord = reach(interaction), use.gam=FALSE, method = "mpl", forcefit=FALSE, emend=project, project=FALSE, prior.mean = NULL, prior.var = NULL, nd = NULL, eps = NULL, gcontrol=list(), nsim=100, nrmh=1e5, start=NULL, control=list(nrep=nrmh), verb=TRUE, callstring=NULL ) { Qname <- short.deparse(substitute(Q)) subsetexpr <- if(!missing(subset)) substitute(subset) else NULL clipwin <- if(!missing(clipwin)) clipwin else NULL datalistname <- if(missing(covariates)) "data" else "covariates" if(!(method %in% c("mpl", "ho", "logi", "VBlogi"))) stop(paste("Unrecognised fitting method", sQuote(method))) if(!missing(emend) && !missing(project) && emend != project) stop("Conflicting options: emend != project") if(!is.null(prior.mean) | !is.null(prior.var)){ if(missing(method)) method <- "VBlogi" if(method!="VBlogi") stop("Prior specification only works with method ", sQuote("VBlogi")) } if(method=="VBlogi"){ VB <- TRUE method <- "logi" } else{ VB <- FALSE } if(is.sob(covariates) || is.function(covariates)) stop(paste("The argument", sQuote(datalistname), "should not be a spatial object;", "it should be a list of spatial objects"), call.=FALSE) if(inherits(Q, "logiquad")){ if(missing(method)) method <- "logi" if(method != "logi") stop(paste("Only method =", sQuote("logi"), "makes sense when Q is of type", sQuote("logiquad"))) } cl <- match.call() if(is.null(callstring)) callstring <- paste(short.deparse(sys.call()), collapse="") if(is.ppp(Q) && is.marked(Q) && !is.multitype(Q)) stop(paste("ppm is not yet implemented for marked point patterns,", "other than multitype patterns.")) if(!(is.ppp(Q) || inherits(Q, "quad") || checkfields(Q, c("data", "dummy")))) { stop("Argument Q must be a point pattern or a quadrature scheme") } X <- if(is.ppp(Q)) Q else Q$data ## Validate interaction if(is.null(interaction)) { interaction <- Poisson() } else if(inherits(interaction, "intermaker")) { ## e.g. 'interaction=Hardcore': invoke it without arguments interaction <- (f <- interaction)() dont.complain.about(f) } else if(!is.interact(interaction)) stop("Argument 'interaction' must be an object of class 'interact'") ## Ensure interaction is fully defined if(!is.null(ss <- interaction$selfstart)) { # invoke selfstart mechanism to fix all parameters interaction <- ss(X, interaction) } if(inherits(trend, "formula")) { ## handle "." in formula, representing all variables in 'data' if("." %in% variablesinformula(trend)) { if(is.null(covariates)) stop("Cannot expand '.' since 'data' is not present", call.=FALSE) rhs <- paste(names(covariates), collapse=" + ") allmaineffects <- as.formula(paste("~", rhs)) environment(allmaineffects) <- environment(trend) trend <- update(allmaineffects, trend) } ## expand polynom() in formula if(spatstat.options("expand.polynom")) trend <- expand.polynom(trend) } # validate choice of edge correction correction <- pickoption("correction", correction, c(border="border", periodic="periodic", isotropic="isotropic", Ripley="isotropic", trans="translate", translate="translate", translation="translate", none="none")) # validate rbord if(correction == "border") { # rbord for border correction rbord.given <- !missing(rbord) && !is.null(rbord) if(is.null(rbord)) rbord <- reach(interaction) infin <- is.infinite(rbord) too.large <- infin || (eroded.areas(as.owin(X), rbord) == 0) if(too.large) { whinge <- paste(if(rbord.given) "rbord" else "the reach of this interaction", if(infin) "is infinite or unknown;" else "is too large for this window;", "please specify", if(rbord.given) "a smaller value of", "rbord, or use a different edge correction") stop(whinge) } } else { # rbord must be numeric to satisfy mpl.engine if(is.null(rbord)) rbord <- 0 } if(method == "logi") { fitLOGI <- logi.engine(Q=Q, trend=trend, interaction=interaction, covariates=covariates, covfunargs=covfunargs, subsetexpr=subsetexpr, clipwin=clipwin, correction=correction, rbord=rbord, use.gam=use.gam, forcefit=forcefit, nd = nd, gcontrol=gcontrol, callstring=callstring, prior.mean=prior.mean, prior.var=prior.var, VB=VB, ...) fitLOGI$Qname <- Qname fitLOGI$call <- cl fitLOGI$callstring <- callstring fitLOGI$callframe <- parent.frame() if(emend && !valid.ppm(fitLOGI)) fitLOGI <- emend.ppm(fitLOGI) return(fitLOGI) } # fit by maximum pseudolikelihood fitMPL <- mpl.engine(Q=Q, trend=trend, interaction=interaction, covariates=covariates, covfunargs=covfunargs, subsetexpr=subsetexpr, clipwin=clipwin, correction=correction, rbord=rbord, use.gam=use.gam, forcefit=forcefit, nd = nd, eps = eps, gcontrol=gcontrol, callstring=callstring, ...) fitMPL$Qname <- Qname if(!is.ppm(fitMPL)) { # internal use only - returns some other data return(fitMPL) } fitMPL$call <- cl fitMPL$callstring <- callstring fitMPL$callframe <- parent.frame() if(emend && !valid.ppm(fitMPL)) fitMPL <- emend.ppm(fitMPL) if(method == "mpl" || is.poisson.ppm(fitMPL)) return(fitMPL) fitHO <- ho.engine(fitMPL, nsim=nsim, nrmh=nrmh, start=start, control=control, verb=verb) if(is.null(fitHO)) return(fitMPL) if(emend && !valid.ppm(fitHO)) fitHO <- emend.ppm(fitHO) return(fitHO) } spatstat/R/hierstrauss.R0000644000176200001440000002105413115225157015021 0ustar liggesusers## ## hierstrauss.R ## ## $Revision: 1.9 $ $Date: 2016/02/16 01:39:12 $ ## ## The hierarchical Strauss process ## ## HierStrauss() create an instance of the hierarchical Strauss process ## [an object of class 'interact'] ## ## ------------------------------------------------------------------- ## HierStrauss <- local({ # ......... define interaction potential HSpotential <- function(d, tx, tu, par) { # arguments: # d[i,j] distance between points X[i] and U[j] # tx[i] type (mark) of point X[i] # tu[j] type (mark) of point U[j] # # get matrix of interaction radii r[ , ] r <- par$radii # # get possible marks and validate if(!is.factor(tx) || !is.factor(tu)) stop("marks of data and dummy points must be factor variables") lx <- levels(tx) lu <- levels(tu) if(length(lx) != length(lu) || any(lx != lu)) stop("marks of data and dummy points do not have same possible levels") if(!identical(lx, par$types)) stop("data and model do not have the same possible levels of marks") if(!identical(lu, par$types)) stop("dummy points and model do not have the same possible levels of marks") # ensure factor levels are acceptable for column names (etc) lxname <- make.names(lx, unique=TRUE) ## list all ordered pairs of types to be checked uptri <- par$archy$relation & !is.na(r) mark1 <- (lx[row(r)])[uptri] mark2 <- (lx[col(r)])[uptri] ## corresponding names mark1name <- (lxname[row(r)])[uptri] mark2name <- (lxname[col(r)])[uptri] vname <- apply(cbind(mark1name,mark2name), 1, paste, collapse="x") vname <- paste("mark", vname, sep="") npairs <- length(vname) ## create logical array for result z <- array(FALSE, dim=c(dim(d), npairs), dimnames=list(character(0), character(0), vname)) # go.... if(length(z) > 0) { ## assemble the relevant interaction distance for each pair of points rxu <- r[ tx, tu ] ## apply relevant threshold to each pair of points str <- (d <= rxu) ## score for(i in 1:npairs) { # data points with mark m1 Xsub <- (tx == mark1[i]) # quadrature points with mark m2 Qsub <- (tu == mark2[i]) # assign z[Xsub, Qsub, i] <- str[Xsub, Qsub] } } return(z) } #### end of 'pot' function #### # ........ auxiliary functions .............. delHS <- function(which, types, radii, archy) { radii[which] <- NA if(all(is.na(radii))) return(Poisson()) return(HierStrauss(types=types, radii=radii, archy=archy)) } # Set up basic object except for family and parameters BlankHSobject <- list( name = "Hierarchical Strauss process", creator = "HierStrauss", family = "hierpair.family", # evaluated later pot = HSpotential, par = list(types=NULL, radii=NULL, archy=NULL), # filled in later parnames = c("possible types", "interaction distances", "hierarchical order"), selfstart = function(X, self) { if(!is.null(self$par$types) && !is.null(self$par$archy)) return(self) types <- self$par$types %orifnull% levels(marks(X)) archy <- self$par$archy %orifnull% types HierStrauss(types=types,radii=self$par$radii,archy=archy) }, init = function(self) { types <- self$par$types if(!is.null(types)) { radii <- self$par$radii nt <- length(types) MultiPair.checkmatrix(radii, nt, sQuote("radii"), asymmok=TRUE) if(length(types) == 0) stop(paste("The", sQuote("types"),"argument should be", "either NULL or a vector of all possible types")) if(anyNA(types)) stop("NA's not allowed in types") if(is.factor(types)) { types <- levels(types) } else { types <- levels(factor(types, levels=types)) } } }, update = NULL, # default OK print = function(self) { radii <- self$par$radii types <- self$par$types archy <- self$par$archy if(waxlyrical('gory')) splat(nrow(radii), "types of points") if(!is.null(types) && !is.null(archy)) { if(waxlyrical('space')) { splat("Possible types and ordering:") } else cat("Hierarchy: ") print(archy) } else if(!is.null(types)) { (if(waxlyrical('space')) splat else cat)("Possible types: ") print(types) } else if(waxlyrical('gory')) splat("Possible types:\t not yet determined") splat("Interaction radii:") print(hiermat(radii, self$par$archy)) invisible(NULL) }, interpret = function(coeffs, self) { # get possible types typ <- self$par$types ntypes <- length(typ) # get matrix of Strauss interaction radii r <- self$par$radii # list all unordered pairs of types uptri <- self$par$archy$relation & !is.na(r) index1 <- (row(r))[uptri] index2 <- (col(r))[uptri] npairs <- length(index1) # extract canonical parameters; shape them into a matrix gammas <- matrix(NA, ntypes, ntypes) dimnames(gammas) <- list(typ, typ) gammas[ cbind(index1, index2) ] <- exp(coeffs) # return(list(param=list(gammas=gammas), inames="interaction parameters gamma_ij", printable=hiermat(round(gammas, 4), self$par$archy))) }, valid = function(coeffs, self) { # interaction parameters gamma[i,j] gamma <- (self$interpret)(coeffs, self)$param$gammas # interaction radii radii <- self$par$radii # parameters to estimate required <- !is.na(radii) & self$par$archy$relation # all required parameters must be finite if(!all(is.finite(gamma[required]))) return(FALSE) # DIAGONAL interaction parameters must be non-explosive d <- diag(rep(TRUE, nrow(radii))) return(all(gamma[required & d] <= 1)) }, project = function(coeffs, self) { # interaction parameters gamma[i,j] gamma <- (self$interpret)(coeffs, self)$param$gammas # interaction radii and types radii <- self$par$radii types <- self$par$types archy <- self$par$archy # problems? uptri <- archy$relation required <- !is.na(radii) & uptri okgamma <- !uptri | (is.finite(gamma) & (gamma <= 1)) naughty <- required & !okgamma # if(!any(naughty)) return(NULL) if(spatstat.options("project.fast")) { # remove ALL naughty terms simultaneously return(delHS(naughty, types, radii, archy)) } else { # present a list of candidates rn <- row(naughty) cn <- col(naughty) ord <- self$par$archy$ordering uptri <- (ord[rn] <= ord[cn]) upn <- uptri & naughty rowidx <- as.vector(rn[upn]) colidx <- as.vector(cn[upn]) mats <- lapply(as.data.frame(rbind(rowidx, colidx)), matrix, ncol=2) inters <- lapply(mats, delHS, types=types, radii=radii, archy=archy) return(inters) } }, irange = function(self, coeffs=NA, epsilon=0, ...) { r <- self$par$radii active <- !is.na(r) & self$par$archy$relation if(any(!is.na(coeffs))) { gamma <- (self$interpret)(coeffs, self)$param$gammas gamma[is.na(gamma)] <- 1 active <- active & (abs(log(gamma)) > epsilon) } if(any(active)) return(max(r[active])) else return(0) }, version=NULL # to be added ) class(BlankHSobject) <- "interact" # finally create main function HierStrauss <- function(radii, types=NULL, archy=NULL) { if(!is.null(types)) { if(is.null(archy)) archy <- seq_len(length(types)) archy <- hierarchicalordering(archy, types) } radii[radii == 0] <- NA out <- instantiate.interact(BlankHSobject, list(types=types, radii=radii, archy=archy)) if(!is.null(types)) dimnames(out$par$radii) <- list(types, types) return(out) } HierStrauss <- intermaker(HierStrauss, BlankHSobject) HierStrauss }) spatstat/R/terse.R0000644000176200001440000000251413115225157013567 0ustar liggesusers## terse.R ## ## code to control terseness and layout of printed output ## ## $Revision: 1.11 $ $Date: 2016/09/23 02:07:24 $ ## ## paragraph break in long output e.g. ppm parbreak <- function(terse = spatstat.options("terse")) { if(waxlyrical('space', terse)) cat("\n") return(invisible(NULL)) } waxlyrical <- local({ ## Values of spatstat.options('terse'): ## 0 default ## 1 suppress obvious wastage e.g. 'gory details' ## 2 contract space between paragraphs in long output ## 3 suppress extras e.g. standard errors and CI ## 4 suppress error messages eg failed to converge TerseCutoff <- list(gory=1, space=2, extras=3, errors=4) waxlyrical <- function(type, terse = spatstat.options("terse")) { if(!(type %in% names(TerseCutoff))) stop(paste("Internal error: unrecognised permission request", sQuote(type)), call.=TRUE) return(terse < TerseCutoff[[type]]) } waxlyrical }) ruletextline <- function(ch="-", n=getOption('width'), terse=spatstat.options('terse')) { if(waxlyrical('space', terse)) { chn <- paste(rep(ch, n), collapse="") chn <- substr(chn, 1, n) cat(chn, fill=TRUE) } return(invisible(NULL)) } spatstat/R/pairorient.R0000644000176200001440000001623013115225157014621 0ustar liggesusers## ## pairorient.R ## ## point pair orientation distribution ## ## Function O_{r1,r2}(phi) defined in ## Stoyan & Stoyan (1994) equ (14.53) page 271 ## ## and its derivative estimated by kernel smoothing ## ## $Revision: 1.9 $ $Date: 2014/12/05 06:59:53 $ pairorient <- function(X, r1, r2, ..., cumulative=FALSE, correction, ratio=FALSE, unit=c("degree", "radian"), domain=NULL) { stopifnot(is.ppp(X)) check.1.real(r1) check.1.real(r2) stopifnot(r1 < r2) W <- Window(X) if(!is.null(domain)) stopifnot(is.subset.owin(domain, W)) unit <- match.arg(unit) switch(unit, degree = { FullCircle <- 360 Convert <- 180/pi }, radian = { FullCircle <- 2 * pi Convert <- 1 }) ## choose correction(s) correction.given <- !missing(correction) && !is.null(correction) if(!correction.given) correction <- c("border", "isotropic", "translate") correction <- pickoption("correction", correction, c(none="none", border="border", bord.modif="bord.modif", isotropic="isotropic", Ripley="isotropic", trans="translate", translate="translate", translation="translate", good="good", best="best"), multi=TRUE) # best.wanted <- ("best" %in% correction) ## replace 'good' by the optimal choice for this size of dataset if("good" %in% correction) correction[correction == "good"] <- good.correction.K(X) ## retain only corrections that are implemented for the window correction <- implemented.for.K(correction, W$type, correction.given) ## Find close pairs in range [r1, r2] close <- as.data.frame(closepairs(X, r2)) ok <- with(close, r1 <= d & d <= r2) if(!is.null(domain)) ok <- ok & with(close, inside.owin(xi, yi, domain)) if(!any(ok)) { warning(paste("There are no pairs of points in the distance range", prange(c(r1,r2)))) return(NULL) } close <- close[ok, , drop=FALSE] ANGLE <- with(close, atan2(dy, dx) * Convert) %% FullCircle ## initialise output object Nphi <- 512 breaks <- make.even.breaks(bmax=FullCircle, npos=Nphi-1) phi <- breaks$r Odf <- data.frame(phi = phi, theo = (if(cumulative) phi else 1)/FullCircle) desc <- c("angle argument phi", "theoretical isotropic %s") Oletter <- if(cumulative) "O" else "o" Osymbol <- as.name(Oletter) OO <- ratfv(Odf, NULL, denom=nrow(close), argu="phi", ylab=substitute(fn[R1,R2](phi), list(R1=r1, R2=r2, fn=Osymbol)), valu="theo", fmla = . ~ phi, alim = c(0, FullCircle), c("phi", "{%s[%s]^{pois}}(phi)"), desc, fname=c(Oletter, paste0("list(", r1, ",", r2, ")")), yexp=substitute(fn[list(R1,R2)](phi), list(R1=r1,R2=r2,fn=Osymbol))) ## ^^^^^^^^^^^^^^^ Compute edge corrected estimates ^^^^^^^^^^^^^^^^ nangles <- length(ANGLE) if(any(correction == "none")) { ## uncorrected! For demonstration purposes only! if(cumulative) { wh <- whist(ANGLE, breaks$val) # no weights num.un <- cumsum(wh) } else { kd <- circdensity(ANGLE, ..., n=Nphi, unit=unit) num.un <- kd$y * nangles } den.un <- nangles ## uncorrected estimate OO <- bind.ratfv(OO, data.frame(un=num.un), den.un, "{hat(%s)[%s]^{un}}(phi)", "uncorrected estimate of %s", "un", ratio=ratio) } if(any(c("border", "bord.modif") %in% correction)) { ## border type corrections bX <- bdist.points(X) bI <- bX[close$i] if("border" %in% correction) { bok <- (bI > r2) ANGLEok <- ANGLE[bok] nok <- length(ANGLEok) if(cumulative) { wh <- whist(ANGLEok, breaks$val) num.bord <- cumsum(wh) } else { kd <- circdensity(ANGLEok, ..., n=Nphi, unit=unit) num.bord <- kd$y * nok } den.bord <- nok OO <- bind.ratfv(OO, data.frame(border=num.bord), den.bord, "{hat(%s)[%s]^{bord}}(phi)", "border-corrected estimate of %s", "border", ratio=ratio) } if("bord.modif" %in% correction) { ok <- (close$d < bI) nok <- sum(ok) inradius <- max(distmap(W, invert=TRUE)) rrr <- range(r2, inradius) rr <- seq(rrr[1], rrr[2], length=256) Ar <- eroded.areas(W, rr) Arf <- approxfun(rr, Ar, rule=2) AI <- (Arf(bX))[close$i] edgewt <- ifelse(ok, pmin(area(W)/AI, 100), 0) if(cumulative) { wh <- whist(ANGLE, breaks$val, edgewt) num.bm <- cumsum(wh)/mean(edgewt) } else { w <- edgewt/sum(edgewt) kd <- circdensity(ANGLE, ..., weights=w, n=Nphi, unit=unit) num.bm <- kd$y * nok } den.bm <- nok OO <- bind.ratfv(OO, data.frame(bordm=num.bm), den.bm, "{hat(%s)[%s]^{bordm}}(phi)", "modified border-corrected estimate of %s", "bordm", ratio=ratio) } } if(any(correction == "translate")) { ## Ohser-Stoyan translation correction edgewt <- edge.Trans(dx=close$dx, dy=close$dy, W=W, paired=TRUE) if(cumulative) { wh <- whist(ANGLE, breaks$val, edgewt) num.trans <- cumsum(wh)/mean(edgewt) } else { w <- edgewt/sum(edgewt) kd <- circdensity(ANGLE, ..., weights=w, n=Nphi, unit=unit) num.trans <- kd$y * nangles } den.trans <- nangles OO <- bind.ratfv(OO, data.frame(trans=num.trans), den.trans, "{hat(%s)[%s]^{trans}}(phi)", "translation-corrected estimate of %s", "trans", ratio=ratio) } if(any(correction == "isotropic")) { ## Ripley isotropic correction XI <- ppp(close$xi, close$yi, window=W, check=FALSE) DIJ <- close$d edgewt <- edge.Ripley(XI, matrix(DIJ, ncol=1)) if(cumulative) { wh <- whist(ANGLE, breaks$val, edgewt) num.iso <- cumsum(wh)/mean(edgewt) } else { w <- edgewt/sum(edgewt) kd <- circdensity(ANGLE, ..., weights=w, n=Nphi, unit=unit) num.iso <- kd$y * nangles } den.iso <- nangles OO <- bind.ratfv(OO, data.frame(iso=num.iso), den.iso, "{hat(%s)[%s]^{iso}}(phi)", "Ripley isotropic-corrected estimate of %s", "iso", ratio=ratio) } unitname(OO) <- switch(unit, degree = c("degree", "degrees"), radian = c("radian", "radians")) return(OO) } spatstat/R/boundingbox.R0000644000176200001440000001353313115225157014766 0ustar liggesusers## ## boundingbox.R ## ## $Revision: 1.8 $ $Date: 2016/02/11 10:17:12 $ bounding.box <- function(...) { .Deprecated("boundingbox", "spatstat") boundingbox(...) } boundingbox <- function(...) { ## remove any NULL arguments arglist <- list(...) if(any(isnull <- sapply(arglist, is.null))) { if(length(arglist[!isnull])) return(do.call(boundingbox, arglist[!isnull])) stop("No non-null arguments given.\n") } UseMethod("boundingbox") } boundingbox.solist <- function(...) { argh <- list(...) issl <- sapply(argh, inherits, what="solist") yarg <- c(do.call(c, argh[issl]), argh[!issl]) do.call(bbEngine, yarg) } boundingbox.ppp <- boundingbox.psp <- boundingbox.owin <- boundingbox.list <- boundingbox.im <- function(...) { bbEngine(...) } recognise.spatstat.type <- local({ knowntypes <- c("ppp","psp","owin","im") function(x) { for(kt in knowntypes) if(inherits(x, kt)) return(kt) if(is.list(x) && checkfields(x, c("x", "y")) && is.numeric(x$x) && is.numeric(x$y) && is.vector(x$x) && is.vector(x$y) && length(x$x) == length(x$y)) return("listxy") aso <- try(as.owin(x), silent=TRUE) if(!inherits(aso, "try-error")) return("as.owin") return("unknown") } }) bbEngine <- local({ bb.listxy <- function(X) owin(range(X$x), range(X$y)) bbEngine <- function(...) { wins <- list(...) ## first detect any numeric vector arguments if(any(isnumvec <- unlist(lapply(wins, is.vector)) & unlist(lapply(wins, is.numeric)))) { ## invoke default method on these arguments bb <- do.call(boundingbox, wins[isnumvec]) ## repack wins <- append(wins[!isnumvec], list(bb)) } if(length(wins) > 1) { ## multiple arguments -- compute bounding box for each argument. objtype <- unlist(lapply(wins, recognise.spatstat.type)) nbad <- sum(objtype == "unknown") if(nbad > 0) { whinge <- paste("Function boundingbox called with", nbad,"unrecognised", ngettext(nbad,"argument","arguments")) stop(whinge, call.=FALSE) } if(any(isppp <- (objtype == "ppp"))) wins[isppp] <- lapply(wins[isppp], boundingbox) if(any(islistxy <- (objtype == "listxy"))) wins[islistxy] <- lapply(wins[islistxy], bb.listxy) ## then convert all windows to owin wins <- lapply(wins, as.owin) ## then take bounding box of each window boxes <- lapply(wins, boundingbox) ## discard NULL values isnull <- unlist(lapply(boxes, is.null)) boxes <- boxes[!isnull] ## take bounding box of these boxes xrange <- range(unlist(lapply(boxes, getElement, name="xrange"))) yrange <- range(unlist(lapply(boxes, getElement, name="yrange"))) W <- owin(xrange, yrange) ## If all of the windows have a common unit name, give ## that unit name to the bounding box. youse <- unique(t(sapply(boxes,unitname))) if(nrow(youse)==1) { ute <- unlist(youse[1L,]) unitname(W) <- ute } return(W) } ## single argument w <- wins[[1L]] if(is.null(w)) return(NULL) wtype <- recognise.spatstat.type(w) ## point pattern? if(wtype == "ppp") return(boundingbox(coords(w))) ## list(x,y) if(wtype == "listxy") return(bb.listxy(w)) ## convert to window w <- as.owin(w) ## determine a tight bounding box for the window w switch(w$type, rectangle = { return(w) }, polygonal = { bdry <- w$bdry if(length(bdry) == 0) return(NULL) xr <- range(unlist(lapply(bdry, rangeofx))) yr <- range(unlist(lapply(bdry, rangeofy))) return(owin(xr, yr, unitname=unitname(w))) }, mask = { m <- w$m x <- rasterx.mask(w) y <- rastery.mask(w) xr <- range(x[m]) + c(-1,1) * w$xstep/2 yr <- range(y[m]) + c(-1,1) * w$ystep/2 return(owin(xr, yr, unitname=unitname(w))) }, stop("unrecognised window type", w$type) ) } rangeofx <- function(a) range(a$x) rangeofy <- function(a) range(a$y) bbEngine }) boundingbox.default <- local({ bb.listxy <- function(X) owin(range(X$x), range(X$y)) boundingbox.default <- function(...) { arglist <- list(...) bb <- NULL if(length(arglist) == 0) return(bb) ## handle numeric vector arguments if(any(isnumvec <- unlist(lapply(arglist, is.vector)) & unlist(lapply(arglist, is.numeric)))) { nvec <- sum(isnumvec) if(nvec != 2) stop(paste("boundingbox.default expects 2 numeric vectors:", nvec, "were supplied"), call.=FALSE) vecs <- arglist[isnumvec] x <- vecs[[1L]] y <- vecs[[2L]] bb <- if(length(x) == length(y)) owin(range(x), range(y)) else NULL arglist <- arglist[!isnumvec] } if(length(arglist) == 0) return(bb) ## other objects are present objtype <- unlist(lapply(arglist, recognise.spatstat.type)) ## Unrecognised? nbad <- sum(objtype == "unknown") if(nbad > 0) { whinge <- paste("Function boundingbox called with", nbad,"unrecognised", ngettext(nbad,"argument","arguments")) stop(whinge, call.=FALSE) } if(any(aso <- (objtype == "as.owin"))) { ## promote objects to owin (to avoid infinite recursion!) arglist[aso] <- lapply(arglist[aso], as.owin) } if(any(lxy <- (objtype == "listxy"))) { ## handle list(x,y) objects arglist[lxy] <- lapply(arglist[lxy], bb.listxy) } result <- do.call(boundingbox, if(is.null(bb)) arglist else append(list(bb), arglist)) return(result) } boundingbox.default }) spatstat/R/nncross3D.R0000644000176200001440000001615513115271120014316 0ustar liggesusers# # nncross3D.R # # $Revision: 1.8 $ $Date: 2017/06/05 10:31:58 $ # # Copyright (C) Adrian Baddeley, Jens Oehlschlaegel and Rolf Turner 2000-2013 # Licence: GNU Public Licence >= 2 nncross.pp3 <- function(X, Y, iX=NULL, iY=NULL, what = c("dist", "which"), ..., k = 1, sortby=c("range", "var", "x", "y", "z"), is.sorted.X = FALSE, is.sorted.Y = FALSE) { stopifnot(is.pp3(Y)) sortby <- match.arg(sortby) what <- match.arg(what, choices=c("dist", "which"), several.ok=TRUE) want.dist <- "dist" %in% what want.which <- "which" %in% what want.both <- want.dist && want.which if(!missing(k)) { # k can be a single integer or an integer vector if(length(k) == 0) stop("k is an empty vector") else if(length(k) == 1) { if(k != round(k) || k <= 0) stop("k is not a positive integer") } else { if(any(k != round(k)) || any(k <= 0)) stop(paste("some entries of the vector", sQuote("k"), "are not positive integers")) } } k <- as.integer(k) kmax <- max(k) nk <- length(k) # trivial cases nX <- npoints(X) nY <- nobjects(Y) # deal with null cases if(nX == 0) return(as.data.frame(list(dist=matrix(0, nrow=0, ncol=nk), which=matrix(0L, nrow=0, ncol=nk))[what])) if(nY == 0) return(as.data.frame(list(dist=matrix(Inf, nrow=nX, ncol=nk), which=matrix(NA, nrow=nX, ncol=nk))[what])) if(is.null(iX) != is.null(iY)) stop("If one of iX, iY is given, then both must be given") exclude <- (!is.null(iX) || !is.null(iY)) if(exclude) { stopifnot(is.integer(iX) && is.integer(iY)) if(length(iX) != nX) stop("length of iX does not match the number of points in X") if(length(iY) != nY) stop("length of iY does not match the number of points in Y") } if((is.sorted.X || is.sorted.Y) && !(sortby %in% c("x", "y", "z"))) stop(paste("If data are already sorted,", "the sorting coordinate must be specified explicitly", "using sortby = \"x\" or \"y\" or \"z\"")) # decide which coordinate to sort on switch(sortby, range = { s <- sidelengths(as.box3(Y)) sortcoord <- c("x", "y", "z")[which.min(s)] }, var = { v <- apply(coords(Y), 2, var) sortcoord <- c("x", "y", "z")[which.min(v)] }, x={ sortcoord <- "x" }, y={ sortcoord <- "y" }, z={ sortcoord <- "z" } ) # The C code expects points to be sorted by z coordinate. XX <- coords(X) YY <- coords(Y) switch(sortcoord, x = { # rotate x axis to z axis XX <- XX[, c(3,2,1)] YY <- YY[, c(3,2,1)] }, y = { # rotate y axis to z axis XX <- XX[, c(3,1,2)] YY <- YY[, c(3,1,2)] }, z = { }) # sort only if needed if(!is.sorted.X){ oX <- fave.order(XX[,3]) XX <- XX[oX, , drop=FALSE] if(exclude) iX <- iX[oX] } if (!is.sorted.Y){ oY <- fave.order(YY[,3]) YY <- YY[oY, , drop=FALSE] if(exclude) iY <- iY[oY] } # number of neighbours that are well-defined kmaxcalc <- min(nY, kmax) if(kmaxcalc == 1) { # ............... single nearest neighbour .................. # call C code nndv <- if(want.dist) numeric(nX) else numeric(1) nnwh <- if(want.which) integer(nX) else integer(1) if(!exclude) iX <- iY <- integer(1) huge <- 1.1 * diameter(bounding.box3(as.box3(X),as.box3(Y))) z <- .C("nnX3Dinterface", n1=as.integer(nX), x1=as.double(XX[,1]), y1=as.double(XX[,2]), z1=as.double(XX[,3]), id1=as.integer(iX), n2=as.integer(nY), x2=as.double(YY[,1]), y2=as.double(YY[,2]), z2=as.double(YY[,3]), id2=as.integer(iY), exclude = as.integer(exclude), wantdist = as.integer(want.dist), wantwhich = as.integer(want.which), nnd=as.double(nndv), nnwhich=as.integer(nnwh), huge=as.double(huge), PACKAGE = "spatstat") if(want.which) { # conversion to R indexing is done in C code nnwcode <- z$nnwhich if(any(uhoh <- (nnwcode == 0))) { warning("Internal error: NA's produced in nncross()$which") nnwcode[uhoh] <- NA } } # reinterpret in original ordering if(is.sorted.X){ if(want.dist) nndv <- z$nnd if(want.which) nnwh <- if(is.sorted.Y) nnwcode else oY[nnwcode] } else { if(want.dist) nndv[oX] <- z$nnd if(want.which) nnwh[oX] <- if(is.sorted.Y) nnwcode else oY[nnwcode] } if(want.both) return(data.frame(dist=nndv, which=nnwh)) return(if(want.dist) nndv else nnwh) } else { # ............... k nearest neighbours .................. # call C code nndv <- if(want.dist) numeric(nX * kmaxcalc) else numeric(1) nnwh <- if(want.which) integer(nX * kmaxcalc) else integer(1) if(!exclude) iX <- iY <- integer(1) huge <- 1.1 * diameter(bounding.box3(as.box3(X),as.box3(Y))) z <- .C("knnX3Dinterface", n1=as.integer(nX), x1=as.double(XX[,1]), y1=as.double(XX[,2]), z1=as.double(XX[,3]), id1=as.integer(iX), n2=as.integer(nY), x2=as.double(YY[,1]), y2=as.double(YY[,2]), z2=as.double(YY[,3]), id2=as.integer(iY), kmax=as.integer(kmaxcalc), exclude = as.integer(exclude), wantdist = as.integer(want.dist), wantwhich = as.integer(want.which), nnd=as.double(nndv), nnwhich=as.integer(nnwh), huge=as.double(huge), PACKAGE = "spatstat") # extract results nnD <- z$nnd nnW <- z$nnwhich # map 0 to NA if(want.which && any(uhoh <- (nnW == 0))) { nnW[uhoh] <- NA if(want.dist) nnD[uhoh] <- Inf } # reinterpret indices in original ordering if(!is.sorted.Y) nnW <- oY[nnW] # reform as matrices NND <- if(want.dist) matrix(nnD, nrow=nX, ncol=kmaxcalc, byrow=TRUE) else 0 NNW <- if(want.which) matrix(nnW, nrow=nX, ncol=kmaxcalc, byrow=TRUE) else 0 if(!is.sorted.X){ # rearrange rows to correspond to original ordering of points if(want.dist) NND[oX, ] <- NND if(want.which) NNW[oX, ] <- NNW } # the return value should correspond to the original vector k if(kmax > kmaxcalc) { # add columns of NA / Inf kextra <- kmax - kmaxcalc if(want.dist) NND <- cbind(NND, matrix(Inf, nrow=nX, ncol=kextra)) if(want.which) NNW <- cbind(NNW, matrix(NA_integer_, nrow=nX, ncol=kextra)) } if(length(k) < kmax) { # select only the specified columns if(want.dist) NND <- NND[, k, drop=TRUE] if(want.which) NNW <- NNW[, k, drop=TRUE] } result <- as.data.frame(list(dist=NND, which=NNW)[what]) if(ncol(result) == 1) result <- result[, , drop=TRUE] return(result) } } spatstat/R/sparselinalg.R0000644000176200001440000002040113115271120015113 0ustar liggesusers#' #' sparselinalg.R #' #' Counterpart of linalg.R for sparse matrices/arrays #' #' #' $Revision: 1.10 $ $Date: 2017/06/05 10:31:58 $ marginSums <- function(X, MARGIN) { #' equivalent to apply(X, MARGIN, sum) if(length(MARGIN) == 0) return(sum(X)) if(is.array(X) || is.matrix(X)) return(apply(X, MARGIN, sum)) dimX <- dim(X) if(length(MARGIN) == length(dimX)) return(aperm(X, MARGIN)) if(any(huh <- (MARGIN < 0 | MARGIN > length(dimX)))) stop(paste(commasep(sQuote(paste0("MARGIN=", MARGIN[huh]))), ngettext(sum(huh), "is", "are"), "not defined"), call.=FALSE) df <- SparseEntries(X) # discard other indices nonmargin <- setdiff(seq_along(dimX), MARGIN) df <- df[ , -nonmargin, drop=FALSE] # implicitly accumulate result <- EntriesToSparse(df, dimX[MARGIN]) return(result) } tensor1x1 <- function(A, B) { ## equivalent of tensor(A, B, 1, 1) ## when A is a vector and B is a sparse array. stopifnot(length(dim(B)) == 3) A <- as.vector(as.matrix(A)) stopifnot(length(A) == dim(B)[1]) if(is.array(B)) { result <- tensor::tensor(A,B,1,1) } else if(inherits(B, "sparse3Darray")) { result <- sparseMatrix(i=B$j, j=B$k, x=B$x * A[B$i], # values for same (i,j) are summed dims=dim(B)[-1], dimnames=dimnames(B)[2:3]) } else stop("Format of B not understood", call.=FALSE) return(result) } tenseur <- local({ tenseur <- function(A, B, alongA=integer(0), alongB=integer(0)) { #' full arrays? if(isfull(A) && isfull(B)) return(tensor::tensor(A=A, B=B, alongA=alongA, alongB=alongB)) #' check dimensions dimA <- dim(A) %orifnull% length(A) dnA <- dimnames(A) if(is.null(dnA)) dnA <- rep(list(NULL), length(dimA)) dimB <- dim(B) %orifnull% length(B) dnB <- dimnames(B) if(is.null(dnB)) dnB <- rep(list(NULL), length(dimB)) #' check 'along' if (length(alongA) != length(alongB)) stop("\"along\" vectors must be same length") mtch <- dimA[alongA] == dimB[alongB] if (any(is.na(mtch)) || !all(mtch)) stop("Mismatch in \"along\" dimensions") #' dimensions of result retainA <- !(seq_along(dimA) %in% alongA) retainB <- !(seq_along(dimB) %in% alongB) dimC <- c(dimA[retainA], dimB[retainB]) nC <- length(dimC) if(nC > 3) stop("Sorry, sparse arrays of more than 3 dimensions are not supported", call.=FALSE) #' fast code for special cases if(length(dimA) == 1 && length(alongA) == 1 && !isfull(B)) { BB <- SparseEntries(B) Bx <- BB[,ncol(BB)] ijk <- BB[,-ncol(BB),drop=FALSE] kalong <- ijk[,alongB] ABalong <- as.numeric(Bx * A[kalong]) ndimB <- ncol(ijk) switch(ndimB, { result <- sum(ABalong) }, { iout <- ijk[,-alongB] result <- sparseVectorCumul(i=iout, x=ABalong, # values aggregated by i length=dimC) }, { ijout <- ijk[,-alongB,drop=FALSE] result <- sparseMatrix(i=ijout[,1], j=ijout[,2], x=ABalong, # values aggregated by (i,j) dims=dimC, dimnames=dnB[-alongB]) result <- drop0(result) }) return(result) } if(length(dimB) == 1 && length(alongB) == 1 && !isfull(A)) { AA <- SparseEntries(A) Ax <- AA[,ncol(AA)] ijk <- AA[,-ncol(AA),drop=FALSE] kalong <- ijk[,alongA] ABalong <- as.numeric(Ax * B[kalong]) nA <- ncol(ijk) switch(nA, { result <- sum(ABalong) }, { iout <- ijk[,-alongA] result <- sparseVectorCumul(i=iout, x=ABalong, # values aggregated by i length=dimC) }, { ijout <- ijk[,-alongA,drop=FALSE] result <- sparseMatrix(i=ijout[,1], j=ijout[,2], x=ABalong, # values aggregated by (i,j) dims=dimC, dimnames=dnA[-alongA]) result <- drop0(result) }) return(result) } #' extract indices and values of nonzero entries dfA <- SparseEntries(A) dfB <- SparseEntries(B) #' assemble all tuples which contribute if(length(alongA) == 0) { #' outer product dfC <- outersparse(dfA, dfB) } else { if(length(alongA) == 1) { Acode <- dfA[,alongA] Bcode <- dfB[,alongB] } else { Along <- unname(as.list(dfA[,alongA, drop=FALSE])) Blong <- unname(as.list(dfB[,alongB, drop=FALSE])) Acode <- do.call(paste, append(Along, list(sep=","))) Bcode <- do.call(paste, append(Blong, list(sep=","))) } lev <- unique(c(Acode,Bcode)) Acode <- factor(Acode, levels=lev) Bcode <- factor(Bcode, levels=lev) splitA <- split(dfA, Acode) splitB <- split(dfB, Bcode) splitC <- mapply(outersparse, splitA, splitB, SIMPLIFY=FALSE) dfC <- rbindCompatibleDataFrames(splitC) } #' form product of contributing entries dfC$x <- with(dfC, A.x * B.x) #' retain only appropriate columns retain <- c(retainA, FALSE, retainB, FALSE, TRUE) dfC <- dfC[, retain, drop=FALSE] #' collect result result <- EntriesToSparse(dfC, dimC) return(result) } isfull <- function(z) { if(is.array(z) || is.matrix(z) || is.data.frame(z)) return(TRUE) if(inherits(z, c("sparseVector", "sparseMatrix", "sparse3Darray"))) return(FALSE) return(TRUE) } outersparse <- function(dfA, dfB) { if(is.null(dfA) || is.null(dfB)) return(NULL) IJ <- expand.grid(I=seq_len(nrow(dfA)), J=seq_len(nrow(dfB))) dfC <- with(IJ, cbind(A=dfA[I,,drop=FALSE], B=dfB[J,,drop=FALSE])) return(dfC) } tenseur }) sumsymouterSparse <- function(x, w=NULL, dbg=FALSE) { dimx <- dim(x) if(length(dimx) != 3) stop("x should be a 3D array") stopifnot(dim(x)[2] == dim(x)[3]) if(!is.null(w)) { stopifnot(inherits(w, "sparseMatrix")) stopifnot(all(dim(w) == dim(x)[2:3])) } m <- dimx[1] n <- dimx[2] if(inherits(x, "sparse3Darray")) { df <- data.frame(i = x$i - 1L, # need 0-based indices j = x$j - 1L, k = x$k - 1L, value = x$x) } else stop("x is not a recognised kind of sparse array") # trivial? if(nrow(df) < 2) { y <- matrix(0, m, m) dimnames(y) <- rep(dimnames(x)[1], 2) return(y) } # order by increasing j, then k oo <- with(df, order(j, k, i)) df <- df[oo, ] # now provide ordering by increasing k then j ff <- with(df, order(k,j,i)) # if(dbg) { cat("----------------- Data ---------------------\n") print(df) cat("-------------- Reordered data --------------\n") print(df[ff,]) cat("Calling......\n") } if(is.null(w)) { z <- .C("CspaSumSymOut", m = as.integer(m), n = as.integer(n), lenx = as.integer(nrow(df)), ix = as.integer(df$i), # indices are already 0-based jx = as.integer(df$j), kx = as.integer(df$k), x = as.double(df$value), flip = as.integer(ff - 1L), # convert 1-based to 0-based y = as.double(numeric(m * m)), PACKAGE = "spatstat") } else { # extract triplet representation of w w <- as(w, Class="TsparseMatrix") dfw <- data.frame(j=w@i, k=w@j, w=w@x) woo <- with(dfw, order(j, k)) dfw <- dfw[woo, , drop=FALSE] z <- .C("CspaWtSumSymOut", m = as.integer(m), n = as.integer(n), lenx = as.integer(nrow(df)), ix = as.integer(df$i), # indices are already 0-based jx = as.integer(df$j), kx = as.integer(df$k), x = as.double(df$value), flip = as.integer(ff - 1L), # convert 1-based to 0-based lenw = as.integer(nrow(dfw)), jw = as.integer(dfw$j), kw = as.integer(dfw$k), w = as.double(dfw$w), y = as.double(numeric(m * m)), PACKAGE = "spatstat") } y <- matrix(z$y, m, m) dimnames(y) <- rep(dimnames(x)[1], 2) return(y) } spatstat/R/localpcf.R0000755000176200001440000001420413115271120014221 0ustar liggesusers# # localpcf.R # # $Revision: 1.22 $ $Date: 2017/06/05 10:31:58 $ # # localpcf <- function(X, ..., delta=NULL, rmax=NULL, nr=512, stoyan=0.15) { if(length(list(...)) > 0) warning("Additional arguments ignored") stopifnot(is.ppp(X)) localpcfengine(X, delta=delta, rmax=rmax, nr=nr, stoyan=stoyan) } localpcfinhom <- function(X, ..., delta=NULL, rmax=NULL, nr=512, stoyan=0.15, lambda=NULL, sigma=NULL, varcov=NULL) { stopifnot(is.ppp(X)) if(is.null(lambda)) { # No intensity data provided # Estimate density by leave-one-out kernel smoothing lambda <- density(X, ..., sigma=sigma, varcov=varcov, at="points", leaveoneout=TRUE) lambda <- as.numeric(lambda) } else { # validate if(is.im(lambda)) lambda <- safelookup(lambda, X) else if(is.ppm(lambda)) lambda <- predict(lambda, locations=X, type="trend") else if(is.function(lambda)) lambda <- lambda(X$x, X$y) else if(is.numeric(lambda) && is.vector(as.numeric(lambda))) check.nvector(lambda, npoints(X)) else stop(paste(sQuote("lambda"), "should be a vector, a pixel image, or a function")) } localpcfengine(X, delta=delta, rmax=rmax, nr=nr, stoyan=stoyan, lambda=lambda) } localpcfengine <- function(X, ..., delta=NULL, rmax=NULL, nr=512, stoyan=0.15, lambda=NULL) { m <- localpcfmatrix(X, delta=delta, rmax=rmax, nr=nr, stoyan=stoyan, lambda=lambda) r <- attr(m, "r") delta <- attr(m, "delta") nX <- npoints(X) if(nX == 0) { df <- data.frame(r=r, theo=rep.int(1, length(r))) nama <- desc <- labl <- NULL } else { # border correction dbord <- bdist.points(X) m[r[row(m)] > dbord[col(m)]] <- NA # df <- data.frame(m, r=r, theo=rep.int(1, length(r))) icode <- unlist(lapply(seq_len(nX), numalign, nmax=nX)) nama <- paste("est", icode, sep="") desc <- paste("estimate of %s for point", icode) labl <- paste("%s[", icode, "](r)", sep="") } names(df) <- c(nama, "r", "theo") desc <- c(desc, "distance argument r", "theoretical Poisson %s") labl <- c(labl, "r", "%s[pois](r)") # create fv object g <- fv(df, "r", quote(localg(r)), "theo", , c(0, max(r)), labl, desc, fname="localg") # default is to display them all formula(g) <- . ~ r fvnames(g, ".") <- names(df)[names(df) != "r"] unitname(g) <- unitname(X) attr(g, "delta") <- delta attr(g, "correction") <- "border" return(g) } localpcfmatrix <- function(X, i=seq_len(npoints(X)), ..., lambda = NULL, delta=NULL, rmax=NULL, nr=512, stoyan=0.15) { missi <- missing(i) weighted <- !is.null(lambda) nX <- npoints(X) nY <- if(missi) nX else length(seq_len(nX)[i]) W <- as.owin(X) lambda.ave <- nX/area(W) if(is.null(delta)) delta <- stoyan/sqrt(lambda.ave) if(is.null(rmax)) rmax <- rmax.rule("K", W, lambda.ave) # if(nX == 0 || nY == 0) { out <- matrix(0, nr, 0) } else { # sort points in increasing order of x coordinate oX <- fave.order(X$x) Xsort <- X[oX] idXsort <- (1:nX)[oX] if(weighted) { lambdaXsort <- lambda[oX] weightXsort <- 1/lambdaXsort } if(missi) { Y <- X oY <- oX Ysort <- Xsort idYsort <- idXsort } else { # i is some kind of index Y <- X[i] idY <- (1:nX)[i] oY <- fave.order(Y$x) Ysort <- Y[oY] idYsort <- idY[oY] } nY <- npoints(Y) force(nr) # call C if(!weighted) { zz <- .C("locpcfx", nn1 = as.integer(nY), x1 = as.double(Ysort$x), y1 = as.double(Ysort$y), id1 = as.integer(idYsort), nn2 = as.integer(nX), x2 = as.double(Xsort$x), y2 = as.double(Xsort$y), id2 = as.integer(idXsort), nnr = as.integer(nr), rmaxi=as.double(rmax), del=as.double(delta), pcf=as.double(double(nr * nY)), PACKAGE = "spatstat") } else { zz <- .C("locWpcfx", nn1 = as.integer(nY), x1 = as.double(Ysort$x), y1 = as.double(Ysort$y), id1 = as.integer(idYsort), nn2 = as.integer(nX), x2 = as.double(Xsort$x), y2 = as.double(Xsort$y), id2 = as.integer(idXsort), w2 = as.double(weightXsort), nnr = as.integer(nr), rmaxi=as.double(rmax), del=as.double(delta), pcf=as.double(double(nr * nY)), PACKAGE = "spatstat") } out <- matrix(zz$pcf, nr, nY) # reorder columns to match original out[, oY] <- out # rescale out <- out/(2 * pi * if(!weighted) lambda.ave else 1) } # dress up attr(out, "r") <- seq(from=0, to=rmax, length.out=nr) attr(out, "delta") <- delta class(out) <- c("localpcfmatrix", class(out)) return(out) } print.localpcfmatrix <- function(x, ...) { cat("Matrix of local pair correlation estimates\n") nc <- ncol(x) nr <- nrow(x) cat(paste("pcf estimates for", nc, ngettext(nc, "point", "points"), "\n")) rval <- attr(x, "r") cat(paste("r values from 0 to", max(rval), "in", nr, "steps\n")) return(invisible(NULL)) } plot.localpcfmatrix <- function(x, ...) { xname <- short.deparse(substitute(x)) rval <- attr(x, "r") do.call(matplot, resolve.defaults(list(rval, x), list(...), list(type="l", main=xname, xlab="r", ylab="pair correlation"))) } "[.localpcfmatrix" <- function(x, i, ...) { r <- attr(x, "r") delta <- attr(x, "delta") class(x) <- "matrix" if(missing(i)) { x <- x[ , ...] } else { x <- x[i, ...] if(is.matrix(i)) return(x) r <- r[i] } if(!is.matrix(x)) x <- matrix(x, nrow=length(r)) attr(x, "r") <- r attr(x, "delta") <- delta class(x) <- c("localpcfmatrix", class(x)) return(x) } spatstat/R/varcount.R0000644000176200001440000000355013115225157014307 0ustar liggesusers#' #' varcount.R #' #' Variance of N(B) #' #' $Revision: 1.8 $ $Date: 2015/11/21 07:02:51 $ #' varcount <- function(model, B, ..., dimyx=NULL) { stopifnot(is.owin(B) || is.im(B) || is.function(B)) g <- pcfmodel(model) if(!is.function(g)) stop("Pair correlation function cannot be computed") if(is.owin(B)) { lambdaB <- predict(model, locations=B, ngrid=dimyx, type="intensity") v <- varcountEngine(g, B, lambdaB) } else { f <- if(is.im(B)) B else as.im(B, W=as.owin(model), ..., dimyx=dimyx) B <- as.owin(f) lambdaB <- predict(model, locations=B, type="intensity") v <- varcountEngine(g, B, lambdaB, f) } return(v) } varcountEngine <- local({ varcountEngine <- function(g, B, lambdaB, f=1) { if(missing(f) || identical(f, 1)) { v <- integral(lambdaB) + covterm(g, B, lambdaB) } else if(min(f) >= 0) { ## nonnegative integrand v <- integral(lambdaB * f^2) + covterm(g, B, lambdaB * f) } else if(max(f) <= 0) { ## nonpositive integrand v <- integral(lambdaB * f^2) + covterm(g, B, lambdaB * (-f)) } else { ## integrand has both positive and negative parts lamfplus <- eval.im(lambdaB * pmax(0, f)) lamfminus <- eval.im(lambdaB * pmax(0, -f)) v <- integral(lambdaB * f^2) + (covterm(g, B, lamfplus) + covterm(g, B, lamfminus) - covterm(g, B, lamfplus, lamfminus) - covterm(g, B, lamfminus, lamfplus)) } return(v) } covterm <- function(g, B, f, f2) { if(missing(f2)) { # \int_B \int_B (g(u-v) - 1) f(u) f(v) du dv H <- distcdf(B, dW=f) a <- integral(f)^2 * (as.numeric(stieltjes(g, H)) - 1) } else { # \int_B \int_B (g(u-v) - 1) f(u) f2(v) du dv H <- distcdf(B, dW=f, dV=f2) a <- integral(f) * integral(f2) * (as.numeric(stieltjes(g, H)) - 1) } return(a) } varcountEngine }) spatstat/R/copyExampleFiles.R0000644000176200001440000000272313115225157015720 0ustar liggesusers## copyExampleFiles.R ## $Revision: 1.10 $ $Date: 2015/03/11 05:58:50 $ copyExampleFiles <- function(which, folder=getwd()) { choices <- dir(system.file("rawdata", package="spatstat")) if(missing(which) || is.null(which)) { message(paste("Choices are: which=", commasep(sQuote(choices), " or "))) return(invisible(NULL)) } if(!interactive()) stop("Copying files requires an interactive session (by CRAN Policies).") whichdata <- match.arg(which, choices) sourcefolder <- system.file("rawdata", whichdata, package="spatstat") sourcefiles <- dir(sourcefolder) if(length(sourcefiles) == 0) stop("No files available") # set directory oldfolder <- getwd() setwd(folder) on.exit(setwd(oldfolder)) # Warn user: foldername <- if(identical(folder, oldfolder)) "the current folder" else paste("the folder", dQuote(folder)) splat("You are about to have been copying", ngettext(length(sourcefiles), "file", "files"), commasep(dQuote(sourcefiles)), "to", paste0(foldername, "."), "This may overwrite existing files.") # Ask permission: answer <- readline("Do you want to continue? (y/n)[y] ") if(!tolower(substr(answer, 1, 1)) %in% c("", "y")) { splat("Aborting...") return(invisible(NULL)) } # for(fn in sourcefiles) { frompath <- file.path(sourcefolder, fn) file.copy(from = frompath, to = fn, overwrite=TRUE) } splat("Copying completed.") return(invisible(NULL)) } spatstat/R/rmh.default.R0000755000176200001440000010063513115271120014653 0ustar liggesusers# # $Id: rmh.default.R,v 1.108 2017/06/05 10:31:58 adrian Exp adrian $ # rmh.default <- function(model,start=NULL, control=default.rmhcontrol(model), ..., nsim=1, drop=TRUE, saveinfo=TRUE, verbose=TRUE, snoop=FALSE) { # # Function rmh. To simulate realizations of 2-dimensional point # patterns, given the conditional intensity function of the # underlying process, via the Metropolis-Hastings algorithm. # #==+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+=== # # V A L I D A T E # #==+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+=== if(verbose) cat("Checking arguments..") # validate arguments and fill in the defaults model <- rmhmodel(model) start <- rmhstart(start) if(is.null(control)) { control <- default.rmhcontrol(model) } else { control <- rmhcontrol(control) } # override if(length(list(...)) > 0) control <- update(control, ...) control <- rmhResolveControl(control, model) saveinfo <- as.logical(saveinfo) # retain "..." arguments unrecognised by rmhcontrol # These are assumed to be arguments of functions defining the trend argh <- list(...) known <- names(argh) %in% names(formals(rmhcontrol.default)) f.args <- argh[!known] #### Multitype models # Decide whether the model is multitype; if so, find the types. types <- rmhResolveTypes(model, start, control) ntypes <- length(types) mtype <- (ntypes > 1) # If the model is multitype, check that the model parameters agree with types # and digest them if(mtype && !is.null(model$check)) { model <- rmhmodel(model, types=types) } else { model$types <- types } ######## Check for illegal combinations of model, start and control ######## # No expansion can be done if we are using x.start if(start$given == "x") { if(control$expand$force.exp) stop("Cannot expand window when using x.start.\n", call.=FALSE) control$expand <- .no.expansion } # Warn about a silly value of fixall: if(control$fixall & ntypes==1) { warning("control$fixall applies only to multitype processes. Ignored. \n") control$fixall <- FALSE if(control$fixing == "n.each.type") control$fixing <- "n.total" } #==+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+=== # # M O D E L P A R A M E T E R S # #==+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+=== ####### Determine windows ################################ if(verbose) cat("determining simulation windows...") # these may be NULL w.model <- model$w x.start <- start$x.start trend <- model$trend trendy <- !is.null(trend) singletrend <- trendy && (is.im(trend) || is.function(trend) || (is.numeric(trend) && length(trend) == 1)) trendlist <- if(singletrend) list(trend) else trend # window implied by trend image, if any w.trend <- if(is.im(trend)) as.owin(trend) else if(is.list(trend) && any(ok <- unlist(lapply(trend, is.im)))) as.owin((trend[ok])[[1L]]) else NULL ## Clipping window (for final result) w.clip <- if(!is.null(w.model)) w.model else if(!will.expand(control$expand)) { if(start$given == "x" && is.ppp(x.start)) x.start$window else if(is.owin(w.trend)) w.trend } else NULL if(!is.owin(w.clip)) stop("Unable to determine window for pattern") ## Simulation window xpn <- rmhResolveExpansion(w.clip, control, trendlist, "trend") w.sim <- xpn$wsim expanded <- xpn$expanded ## Check the fine print if(expanded) { if(control$fixing != "none") stop(paste("If we're conditioning on the number of points,", "we cannot clip the result to another window.\n")) if(!is.subset.owin(w.clip, w.sim)) stop("Expanded simulation window does not contain model window") } ####### Trend ################################ # Check that the expanded window fits inside the window # upon which the trend(s) live if there are trends and # if any trend is given by an image. if(expanded && !is.null(trend)) { trends <- if(is.im(trend)) list(trend) else trend images <- unlist(lapply(trends, is.im)) if(any(images)) { iwindows <- lapply(trends[images], as.owin) nimages <- length(iwindows) misfit <- !sapply(iwindows, is.subset.owin, A=w.sim) nmisfit <- sum(misfit) if(nmisfit > 1) stop(paste("Expanded simulation window is not contained in", "several of the trend windows.\n", "Bailing out.\n")) else if(nmisfit == 1) { warning(paste("Expanded simulation window is not contained in", if(nimages == 1) "the trend window.\n" else "one of the trend windows.\n", "Expanding to this trend window (only).\n")) w.sim <- iwindows[[which(misfit)]] } } } # Extract the 'beta' parameters if(length(model$cif) == 1) { # single interaction beta <- model$C.beta betalist <- list(beta) } else { # hybrid betalist <- model$C.betalist # multiply beta vectors for each component beta <- Reduce("*", betalist) } ##### .................. CONDITIONAL SIMULATION ................... ##### #|| Determine windows for conditional simulation #|| #|| w.state = window for the full configuration #|| #|| w.sim = window for the 'free' (random) points #|| w.state <- w.sim condtype <- control$condtype x.cond <- control$x.cond # n.cond <- control$n.cond switch(condtype, none={ w.cond <- NULL }, window={ # conditioning on the realisation inside a subwindow w.cond <- as.owin(x.cond) # subtract from w.sim w.sim <- setminus.owin(w.state, w.cond) if(is.empty(w.sim)) stop(paste("Conditional simulation is undefined;", "the conditioning window", sQuote("as.owin(control$x.cond)"), "covers the entire simulation window")) }, Palm={ # Palm conditioning w.cond <- NULL }) ##### #|| Convert conditioning points to appropriate format x.condpp <- switch(condtype, none=NULL, window=x.cond, Palm=as.ppp(x.cond, w.state)) # validate if(!is.null(x.condpp)) { if(mtype) { if(!is.marked(x.condpp)) stop("Model is multitype, but x.cond is unmarked") if(!identical(all.equal(types, levels(marks(x.condpp))), TRUE)) stop("Types of points in x.cond do not match types in model") } } #==+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+=== # # S T A R T I N G S T A T E # #==+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+=== ###################### Starting state data ############################ # whether the initial state should be thinned thin <- (start$given != "x") && (control$fixing == "none") # There must now be a starting state. if(start$given == "none") { # For conditional simulation, the starting state must be given if(condtype != "none") stop("No starting state given") # Determine integral of beta * trend over data window. # This is the expected number of points in the reference Poisson process. area.w.clip <- area(w.clip) if(trendy) { tsummaries <- summarise.trend(trend, w=w.clip, a=area.w.clip) En <- beta * sapply(tsummaries, getElement, name="integral") } else { En <- beta * area.w.clip } # Fix n.start equal to this integral n.start <- if(spatstat.options("scalable")) round(En) else ceiling(En) start <- rmhstart(n.start=n.start) } # In the case of conditional simulation, the start data determine # the 'free' points (i.e. excluding x.cond) in the initial state. switch(start$given, none={ stop("No starting state given") }, x = { # x.start was given # coerce it to a ppp object if(!is.ppp(x.start)) x.start <- as.ppp(x.start, w.state) if(condtype == "window") { # clip to simulation window xs <- x.start[w.sim] nlost <- x.start$n - xs$n if(nlost > 0) warning(paste(nlost, ngettext(nlost, "point","points"), "of x.start", ngettext(nlost, "was", "were"), "removed because", ngettext(nlost, "it", "they"), "fell in the window of x.cond")) x.start <- xs } npts.free <- x.start$n }, n = { # n.start was given n.start <- start$n.start # Adjust the number of points in the starting state in accordance # with the expansion that has occurred. if(expanded) { holnum <- if(spatstat.options("scalable")) round else ceiling n.start <- holnum(n.start * area(w.sim)/area(w.clip)) } # npts.free <- sum(n.start) # The ``sum()'' is redundant if n.start # is scalar; no harm, but. }, stop("Internal error: start$given unrecognized")) #==+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+=== # # C O N T R O L P A R A M E T E R S # #==+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+=== ################### Periodic boundary conditions ######################### periodic <- control$periodic if(is.null(periodic)) { # undecided. Use default rule control$periodic <- periodic <- expanded && is.rectangle(w.state) } else if(periodic && !is.rectangle(w.state)) { # if periodic is TRUE we have to be simulating in a rectangular window. stop("Need rectangular window for periodic simulation.\n") } # parameter passed to C: period <- if(periodic) c(diff(w.state$xrange), diff(w.state$yrange)) else c(-1,-1) #### vector of proposal probabilities if(!mtype) ptypes <- 1 else { ptypes <- control$ptypes if(is.null(ptypes)) { # default proposal probabilities ptypes <- if(start$given == "x" && (nx <- npoints(x.start)) > 0) { table(marks(x.start, dfok=FALSE))/nx } else rep.int(1/ntypes, ntypes) } else { # Validate ptypes if(length(ptypes) != ntypes | sum(ptypes) != 1) stop("Argument ptypes is mis-specified.\n") } } ######################################################################## # Normalising constant for proposal density # # Integral of trend over the expanded window (or area of window): # Iota == Integral Of Trend (or) Area. area.w.sim <- area(w.sim) if(trendy) { if(verbose) cat("Evaluating trend integral...") tsummaries <- summarise.trend(trend, w=w.sim, a=area.w.sim) mins <- sapply(tsummaries, getElement, name="min") if(any(mins < 0)) stop("Trend has negative values") iota <- sapply(tsummaries, getElement, name="integral") tmax <- sapply(tsummaries, getElement, name="max") } else { iota <- area.w.sim tmax <- NULL } #==+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+=== # # A.S. EMPTY PROCESS # # for conditional simulation, 'empty' means there are no 'free' points # #==+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+=== a.s.empty <- FALSE # # Empty pattern, simulated conditional on n # if(npts.free == 0 && control$fixing != "none") { a.s.empty <- TRUE if(verbose) { mess <- paste("Initial pattern has 0 random points,", "and simulation is conditional on the number of points -") if(condtype == "none") warning(paste(mess, "returning an empty pattern\n")) else warning(paste(mess, "returning a pattern with no random points\n")) } } # # If beta = 0, the process is almost surely empty # if(all(beta < .Machine$double.eps)) { if(control$fixing == "none" && condtype == "none") { # return empty pattern if(verbose) warning("beta = 0 implies an empty pattern\n") a.s.empty <- TRUE } else stop("beta = 0 implies an empty pattern, but we are simulating conditional on a nonzero number of points") } # # If we're conditioning on the contents of a subwindow, # and the subwindow covers the clipping region, # the result is deterministic. if(condtype == "window" && is.subset.owin(w.clip, w.cond)) { a.s.empty <- TRUE warning(paste("Model window is a subset of conditioning window:", "result is deterministic\n")) } # # if(a.s.empty) { # create empty pattern, to be returned if(!is.null(x.condpp)) empty <- x.condpp[w.clip] else { empty <- ppp(numeric(0), numeric(0), window=w.clip) if(mtype) { vide <- factor(types[integer(0)], levels=types) empty <- empty %mark% vide } } } #==+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+=== # # PACK UP # #==+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+=== ######### Store decisions Model <- model Start <- start Control <- control Model$w <- w.clip Model$types <- types Control$expand <- if(expanded) rmhexpand(w.state) else .no.expansion Control$internal <- list(w.sim=w.sim, w.state=w.state, x.condpp=x.condpp, ptypes=ptypes, period=period, thin=thin) Model$internal <- list(a.s.empty=a.s.empty, empty=if(a.s.empty) empty else NULL, mtype=mtype, trendy=trendy, betalist=betalist, beta=beta, iota=iota, tmax=tmax) Start$internal <- list(npts.free=npts.free) InfoList <- list(model=Model, start=Start, control=Control) class(InfoList) <- c("rmhInfoList", class(InfoList)) # go if(nsim == 1 && drop) { result <- do.call(rmhEngine, append(list(InfoList, verbose=verbose, snoop=snoop, kitchensink=saveinfo), f.args)) } else { result <- vector(mode="list", length=nsim) if(verbose) { splat("Generating", nsim, "point patterns...") pstate <- list() } subverb <- verbose && (nsim == 1) for(isim in 1:nsim) { if(verbose) pstate <- progressreport(isim, nsim, state=pstate) result[[isim]] <- do.call(rmhEngine, append(list(InfoList, verbose=subverb, snoop=snoop, kitchensink=saveinfo), f.args)) } result <- as.solist(result) if(verbose) splat("Done.\n") } return(result) } print.rmhInfoList <- function(x, ...) { cat("\nPre-digested Metropolis-Hastings algorithm parameters (rmhInfoList)\n") print(as.anylist(x)) } #--------------- rmhEngine ------------------------------------------- # # This is the interface to the C code. # # InfoList is a list of pre-digested, validated arguments # obtained from rmh.default. # # This function is called by rmh.default to generate one simulated # realisation of the model. # It's called repeatedly by ho.engine and qqplot.ppm to generate multiple # realisations (saving time by not repeating the argument checking # in rmh.default). # arguments: # kitchensink: whether to tack InfoList on to the return value as an attribute # preponly: whether to just return InfoList without simulating # # rmh.default digests arguments and calls rmhEngine with kitchensink=T # # qqplot.ppm first gets InfoList by calling rmh.default with preponly=T # (which digests the model arguments and calls rmhEngine # with preponly=T, returning InfoList), # then repeatedly calls rmhEngine(InfoList) to simulate. # # ------------------------------------------------------- rmhEngine <- function(InfoList, ..., verbose=FALSE, kitchensink=FALSE, preponly=FALSE, snoop=FALSE, overrideXstart=NULL, overrideclip=FALSE) { # Internal Use Only! # This is the interface to the C code. if(!inherits(InfoList, "rmhInfoList")) stop("data not in correct format for internal function rmhEngine") if(preponly) return(InfoList) model <- InfoList$model start <- InfoList$start control <- InfoList$control w.sim <- control$internal$w.sim w.state <- control$internal$w.state w.clip <- model$w condtype <- control$condtype x.condpp <- control$internal$x.condpp types <- model$types ntypes <- length(types) ptypes <- control$internal$ptypes period <- control$internal$period mtype <- model$internal$mtype trend <- model$trend trendy <- model$internal$trendy # betalist <- model$internal$betalist beta <- model$internal$beta iota <- model$internal$iota tmax <- model$internal$tmax npts.free <- start$internal$npts.free n.start <- start$n.start x.start <- start$x.start #==+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+=== # # E M P T Y P A T T E R N # #==+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+=== if(model$internal$a.s.empty) { if(verbose) cat("\n") empty <- model$internal$empty attr(empty, "info") <- InfoList return(empty) } #==+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+=== # # S I M U L A T I O N # #==+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+=== ############################################# #### #### Random number seed: initialisation & capture #### ############################################# if(!exists(".Random.seed")) runif(1L) saved.seed <- .Random.seed ############################################# #### #### Poisson case #### ############################################# if(is.poisson.rmhmodel(model)) { if(verbose) cat("\n") intensity <- if(!trendy) beta else model$trend Xsim <- switch(control$fixing, none= { # Poisson process if(!mtype) rpoispp(intensity, win=w.sim, ..., warnwin=FALSE) else rmpoispp(intensity, win=w.sim, types=types, warnwin=FALSE) }, n.total = { # Binomial/multinomial process with fixed total number of points if(!mtype) rpoint(npts.free, intensity, win=w.sim, verbose=verbose) else rmpoint(npts.free, intensity, win=w.sim, types=types, verbose=verbose) }, n.each.type = { # Multinomial process with fixed number of points of each type npts.each <- switch(start$given, n = n.start, x = as.integer(table(marks(x.start, dfok=FALSE))), stop("No starting state given; can't condition on fixed number of points")) rmpoint(npts.each, intensity, win=w.sim, types=types, verbose=verbose) }, stop("Internal error: control$fixing unrecognised") ) # if conditioning, add fixed points if(condtype != "none") Xsim <- superimpose(Xsim, x.condpp, W=w.state) # clip result to output window Xclip <- if(!overrideclip) Xsim[w.clip] else Xsim attr(Xclip, "info") <- InfoList return(Xclip) } ######################################################################## # M e t r o p o l i s H a s t i n g s s i m u l a t i o n ######################################################################## if(verbose) cat("Starting simulation.\nInitial state...") #### Build starting state npts.cond <- if(condtype != "none") x.condpp$n else 0 # npts.total <- npts.free + npts.cond #### FIRST generate the 'free' points #### First the marks, if any. #### The marks must be integers 0 to (ntypes-1) for passing to C Ctypes <- if(mtype) 0:(ntypes-1) else 0 Cmarks <- if(!mtype) 0 else switch(start$given, n = { # n.start given if(control$fixing=="n.each.type") rep.int(Ctypes,n.start) else sample(Ctypes,npts.free,TRUE,ptypes) }, x = { # x.start given as.integer(marks(x.start, dfok=FALSE))-1L }, stop("internal error: start$given unrecognised") ) # # Then the x, y coordinates # switch(start$given, x = { x <- x.start$x y <- x.start$y }, n = { xy <- if(!trendy) runifpoint(npts.free, w.sim, ...) else rpoint.multi(npts.free, trend, tmax, factor(Cmarks,levels=Ctypes), w.sim, ...) x <- xy$x y <- xy$y }) ## APPEND the free points AFTER the conditioning points if(condtype != "none") { x <- c(x.condpp$x, x) y <- c(x.condpp$y, y) if(mtype) Cmarks <- c(as.integer(marks(x.condpp))-1L, Cmarks) } if(!is.null(overrideXstart)) { #' override the previous data x <- overrideXstart$x y <- overrideXstart$y if(mtype) Cmarks <- as.integer(marks(overrideXstart))-1L } # decide whether to activate visual debugger if(snoop) { Xinit <- ppp(x, y, window=w.sim) if(mtype) marks(Xinit) <- Cmarks + 1L if(verbose) cat("\nCreating debugger environment..") snoopenv <- rmhSnoopEnv(Xinit=Xinit, Wclip=w.clip, R=reach(model)) if(verbose) cat("Done.\n") } else snoopenv <- "none" ####################################################################### # Set up C call ###################################################################### # Determine the name of the cif used in the C code C.id <- model$C.id ncif <- length(C.id) # Get the parameters in C-ese ipar <- model$C.ipar iparlist <- if(ncif == 1) list(ipar) else model$C.iparlist iparlen <- lengths(iparlist) beta <- model$internal$beta # Absorb the constants or vectors `iota' and 'ptypes' into the beta parameters beta <- (iota/ptypes) * beta # Algorithm control parameters p <- control$p q <- control$q nrep <- control$nrep # fixcode <- control$fixcode # fixing <- control$fixing fixall <- control$fixall nverb <- control$nverb saving <- control$saving nsave <- control$nsave nburn <- control$nburn track <- control$track thin <- control$internal$thin pstage <- control$pstage %orifnull% "start" if(pstage == "block" && !saving) pstage <- "start" temper <- FALSE invertemp <- 1.0 if(verbose) cat("Ready to simulate. ") storage.mode(ncif) <- "integer" storage.mode(C.id) <- "character" storage.mode(beta) <- "double" storage.mode(ipar) <- "double" storage.mode(iparlen) <- "integer" storage.mode(period) <- "double" storage.mode(ntypes) <- "integer" storage.mode(nrep) <- "integer" storage.mode(p) <- storage.mode(q) <- "double" storage.mode(nverb) <- "integer" storage.mode(x) <- storage.mode(y) <- "double" storage.mode(Cmarks) <- "integer" storage.mode(fixall) <- "integer" storage.mode(npts.cond) <- "integer" storage.mode(track) <- "integer" storage.mode(thin) <- "integer" storage.mode(temper) <- "integer" storage.mode(invertemp) <- "double" if(pstage == "start" || !saving) { #' generate all proposal points now. if(verbose) cat("Generating proposal points...") #' If the pattern is multitype, generate the mark proposals (0 to ntypes-1) Cmprop <- if(mtype) sample(Ctypes,nrep,TRUE,prob=ptypes) else 0 storage.mode(Cmprop) <- "integer" #' Generate the ``proposal points'' in the expanded window. xy <- if(trendy) { rpoint.multi(nrep,trend,tmax, factor(Cmprop, levels=Ctypes), w.sim, ..., warn=FALSE) } else runifpoint(nrep, w.sim, warn=FALSE) xprop <- xy$x yprop <- xy$y storage.mode(xprop) <- storage.mode(yprop) <- "double" } if(!saving) { # ////////// Single block ///////////////////////////////// nrep0 <- 0 storage.mode(nrep0) <- "integer" # Call the Metropolis-Hastings C code: if(verbose) cat("Running Metropolis-Hastings.\n") out <- .Call("xmethas", ncif, C.id, beta, ipar, iparlen, period, xprop, yprop, Cmprop, ntypes, nrep, p, q, nverb, nrep0, x, y, Cmarks, npts.cond, fixall, track, thin, snoopenv, temper, invertemp, PACKAGE="spatstat") # Extract the point pattern returned from C X <- ppp(x=out[[1L]], y=out[[2L]], window=w.state, check=FALSE) if(mtype) { # convert integer marks from C to R marx <- factor(out[[3L]], levels=0:(ntypes-1)) # then restore original type levels levels(marx) <- types # glue to points marks(X) <- marx } # Now clip the pattern to the ``clipping'' window: if(!overrideclip && !control$expand$force.noexp) X <- X[w.clip] # Extract transition history: if(track) { usedout <- if(mtype) 3 else 2 proptype <- factor(out[[usedout+1]], levels=1:3, labels=c("Birth", "Death", "Shift")) accepted <- as.logical(out[[usedout+2]]) History <- data.frame(proposaltype=proptype, accepted=accepted) if(length(out) >= usedout + 4) { # history includes numerator & denominator of Hastings ratio numerator <- as.double(out[[usedout + 3]]) denominator <- as.double(out[[usedout + 4]]) History <- cbind(History, data.frame(numerator=numerator, denominator=denominator)) } } } else { # ////////// Multiple blocks ///////////////////////////////// # determine length of each block of simulations nblocks <- as.integer(1L + ceiling((nrep - nburn)/nsave)) block <- c(nburn, rep.int(nsave, nblocks-1L)) block[nblocks] <- block[nblocks] - (sum(block)-nrep) block <- block[block >= 1L] nblocks <- length(block) blockend <- cumsum(block) # set up list to contain the saved point patterns Xlist <- vector(mode="list", length=nblocks) # Call the Metropolis-Hastings C code repeatedly: xprev <- x yprev <- y Cmarksprev <- Cmarks # thinFALSE <- as.integer(FALSE) storage.mode(thinFALSE) <- "integer" # ................ loop ......................... for(I in 1:nblocks) { # number of iterations for this block nrepI <- block[I] storage.mode(nrepI) <- "integer" # number of previous iterations nrep0 <- if(I == 1) 0 else blockend[I-1] storage.mode(nrep0) <- "integer" # Generate or extract proposals switch(pstage, start = { #' extract proposals from previously-generated vectors if(verbose) cat("Extracting proposal points...") seqI <- 1:nrepI xpropI <- xprop[seqI] ypropI <- yprop[seqI] CmpropI <- Cmprop[seqI] storage.mode(xpropI) <- storage.mode(ypropI) <- "double" storage.mode(CmpropI) <- "integer" }, block = { # generate 'nrepI' random proposals if(verbose) cat("Generating proposal points...") #' If the pattern is multitype, generate the mark proposals CmpropI <- if(mtype) sample(Ctypes,nrepI,TRUE,prob=ptypes) else 0 storage.mode(CmpropI) <- "integer" #' Generate the ``proposal points'' in the expanded window. xy <- if(trendy) { rpoint.multi(nrepI,trend,tmax, factor(Cmprop, levels=Ctypes), w.sim, ..., warn=FALSE) } else runifpoint(nrepI, w.sim, warn=FALSE) xpropI <- xy$x ypropI <- xy$y storage.mode(xpropI) <- storage.mode(ypropI) <- "double" }) # no thinning in subsequent blocks if(I > 1) thin <- thinFALSE #' call if(verbose) cat("Running Metropolis-Hastings.\n") out <- .Call("xmethas", ncif, C.id, beta, ipar, iparlen, period, xpropI, ypropI, CmpropI, ntypes, nrepI, p, q, nverb, nrep0, xprev, yprev, Cmarksprev, npts.cond, fixall, track, thin, snoopenv, temper, invertemp, PACKAGE = "spatstat") # Extract the point pattern returned from C X <- ppp(x=out[[1L]], y=out[[2L]], window=w.state, check=FALSE) if(mtype) { # convert integer marks from C to R marx <- factor(out[[3L]], levels=0:(ntypes-1)) # then restore original type levels levels(marx) <- types # glue to points marks(X) <- marx } # Now clip the pattern to the ``clipping'' window: if(!overrideclip && !control$expand$force.noexp) X <- X[w.clip] # commit to list Xlist[[I]] <- X # Extract transition history: if(track) { usedout <- if(mtype) 3 else 2 proptype <- factor(out[[usedout+1]], levels=1:3, labels=c("Birth", "Death", "Shift")) accepted <- as.logical(out[[usedout+2]]) HistoryI <- data.frame(proposaltype=proptype, accepted=accepted) if(length(out) >= usedout + 4) { # history includes numerator & denominator of Hastings ratio numerator <- as.double(out[[usedout + 3]]) denominator <- as.double(out[[usedout + 4]]) HistoryI <- cbind(HistoryI, data.frame(numerator=numerator, denominator=denominator)) } # concatenate with histories of previous blocks History <- if(I == 1) HistoryI else rbind(History, HistoryI) } # update 'previous state' xprev <- out[[1L]] yprev <- out[[2L]] Cmarksprev <- if(!mtype) 0 else out[[3]] storage.mode(xprev) <- storage.mode(yprev) <- "double" storage.mode(Cmarksprev) <- "integer" if(pstage == "start") { #' discard used proposals xprop <- xprop[-seqI] yprop <- yprop[-seqI] Cmprop <- Cmprop[-seqI] } } # .............. end loop ............................... # Result of simulation is final state 'X' # Tack on the list of intermediate states names(Xlist) <- paste("Iteration", as.integer(blockend), sep="_") attr(X, "saved") <- as.solist(Xlist) } # Append to the result information about how it was generated. if(kitchensink) { attr(X, "info") <- InfoList attr(X, "seed") <- saved.seed } if(track) attr(X, "history") <- History return(X) } # helper function summarise.trend <- local({ # main function summarise.trend <- function(trend, w, a=area(w)) { tlist <- if(is.function(trend) || is.im(trend)) list(trend) else trend return(lapply(tlist, summarise1, w=w, a=a)) } # summarise1 <- function(x, w, a) { if(is.numeric(x)) { mini <- maxi <- x integ <- a*x } else { Z <- as.im(x, w)[w, drop=FALSE] ran <- range(Z) mini <- ran[1L] maxi <- ran[2L] integ <- integral.im(Z) } return(list(min=mini, max=maxi, integral=integ)) } summarise.trend }) spatstat/R/polygood.R0000644000176200001440000001354613115271120014277 0ustar liggesusers#' #' polygood.R #' #' Check validity of polygon data #' #' $Revision: 1.2 $ $Date: 2017/06/05 10:31:58 $ #' #' check validity of a polygonal owin owinpolycheck <- function(W, verbose=TRUE) { verifyclass(W, "owin") stopifnot(W$type == "polygonal") # extract stuff B <- W$bdry npoly <- length(B) outerframe <- owin(W$xrange, W$yrange) # can't use as.rectangle here; we're still checking validity boxarea.mineps <- area.owin(outerframe) * (1 - 0.00001) # detect very large datasets BS <- object.size(B) blowbyblow <- verbose & (BS > 1e4 || npoly > 20) # answer <- TRUE notes <- character(0) err <- character(0) # check for duplicated points, self-intersection, outer frame if(blowbyblow) { cat(paste("Checking", npoly, ngettext(npoly, "polygon...", "polygons..."))) pstate <- list() } dup <- self <- is.box <- logical(npoly) for(i in 1:npoly) { if(blowbyblow && npoly > 1L) pstate <- progressreport(i, npoly, state=pstate) Bi <- B[[i]] # check for duplicated vertices dup[i] <- as.logical(anyDuplicated(ppp(Bi$x, Bi$y, window=outerframe, check=FALSE))) if(dup[i] && blowbyblow) message(paste("Polygon", i, "contains duplicated vertices")) # check for self-intersection self[i] <- xypolyselfint(B[[i]], proper=TRUE, yesorno=TRUE) if(self[i] && blowbyblow) message(paste("Polygon", i, "is self-intersecting")) # check whether one of the current boundary polygons # is the bounding box itself (with + sign) is.box[i] <- (length(Bi$x) == 4) && (Area.xypolygon(Bi) >= boxarea.mineps) } if(blowbyblow) cat("done.\n") if((ndup <- sum(dup)) > 0) { whinge <- paste(ngettext(ndup, "Polygon", "Polygons"), if(npoly == 1L) NULL else commasep(which(dup)), ngettext(ndup, "contains", "contain"), "duplicated vertices") notes <- c(notes, whinge) err <- c(err, "duplicated vertices") if(verbose) message(whinge) answer <- FALSE } if((nself <- sum(self)) > 0) { whinge <- paste(ngettext(nself, "Polygon", "Polygons"), if(npoly == 1L) NULL else commasep(which(self)), ngettext(nself, "is", "are"), "self-intersecting") notes <- c(notes, whinge) if(verbose) message(whinge) err <- c(err, "self-intersection") answer <- FALSE } if(sum(is.box) > 1L) { answer <- FALSE whinge <- paste("Polygons", commasep(which(is.box)), "coincide with the outer frame") notes <- c(notes, whinge) err <- c(err, "polygons duplicating the outer frame") } # check for crossings between different polygons cross <- matrix(FALSE, npoly, npoly) if(npoly > 1L) { if(blowbyblow) { cat(paste("Checking for cross-intersection between", npoly, "polygons...")) pstate <- list() } P <- lapply(B, xypolygon2psp, w=outerframe, check=FALSE) for(i in seq_len(npoly-1L)) { if(blowbyblow) pstate <- progressreport(i, npoly-1L, state=pstate) Pi <- P[[i]] for(j in (i+1L):npoly) { crosses <- if(is.box[i] || is.box[j]) FALSE else { anycrossing.psp(Pi, P[[j]]) } cross[i,j] <- cross[j,i] <- crosses if(crosses) { answer <- FALSE whinge <- paste("Polygons", i, "and", j, "cross over") notes <- c(notes, whinge) if(verbose) message(whinge) err <- c(err, "overlaps between polygons") } } } if(blowbyblow) cat("done.\n") } err <- unique(err) attr(answer, "notes") <- notes attr(answer, "err") <- err return(answer) } #' check for self-intersections in an xypolygon xypolyselfint <- function(p, eps=.Machine$double.eps, proper=FALSE, yesorno=FALSE, checkinternal=FALSE) { verify.xypolygon(p) n <- length(p$x) verbose <- (n > 1000) if(verbose) cat(paste("[Checking polygon with", n, "edges...")) x0 <- p$x y0 <- p$y dx <- diff(x0[c(1:n,1L)]) dy <- diff(y0[c(1:n,1L)]) if(yesorno) { # get a yes-or-no answer answer <- .C("xypsi", n=as.integer(n), x0=as.double(x0), y0=as.double(y0), dx=as.double(dx), dy=as.double(dy), xsep=as.double(2 * max(abs(dx))), ysep=as.double(2 * max(abs(dy))), eps=as.double(eps), proper=as.integer(proper), answer=as.integer(integer(1L)), PACKAGE = "spatstat")$answer if(verbose) cat("]\n") return(answer != 0) } out <- .C("Cxypolyselfint", n=as.integer(n), x0=as.double(x0), y0=as.double(y0), dx=as.double(dx), dy=as.double(dy), eps=as.double(eps), xx=as.double(numeric(n^2)), yy=as.double(numeric(n^2)), ti=as.double(numeric(n^2)), tj=as.double(numeric(n^2)), ok=as.integer(integer(n^2)), PACKAGE = "spatstat") uhoh <- (matrix(out$ok, n, n) != 0) if(proper) { # ignore cases where two vertices coincide ti <- matrix(out$ti, n, n)[uhoh] tj <- matrix(out$tj, n, n)[uhoh] i.is.vertex <- (abs(ti) < eps) | (abs(ti - 1) < eps) j.is.vertex <- (abs(tj) < eps) | (abs(tj - 1) < eps) dup <- i.is.vertex & j.is.vertex uhoh[uhoh] <- !dup } if(checkinternal && any(uhoh != t(uhoh))) warning("Internal error: incidence matrix is not symmetric") xx <- matrix(out$xx, n, n) yy <- matrix(out$yy, n, n) uptri <- (row(uhoh) < col(uhoh)) xx <- as.vector(xx[uhoh & uptri]) yy <- as.vector(yy[uhoh & uptri]) result <- list(x=xx, y=yy) if(verbose) cat("]\n") return(result) } spatstat/R/sharpen.R0000755000176200001440000000406213115271120014077 0ustar liggesusers# # sharpen.R # # $Revision: 1.6 $ $Date: 2013/08/29 03:52:17 $ # sharpen <- function(X, ...) { UseMethod("sharpen") } sharpen.ppp <- function(X, sigma=NULL, ..., varcov=NULL, edgecorrect=FALSE) { stopifnot(is.ppp(X)) Yx <- Smooth(X %mark% X$x, at="points", sigma=sigma, varcov=varcov, edge=TRUE) Yy <- Smooth(X %mark% X$y, at="points", sigma=sigma, varcov=varcov, edge=TRUE) # trap NaN etc nbad <- sum(!(is.finite(Yx) & is.finite(Yy))) if(nbad > 0) stop(paste(nbad, ngettext(nbad, "point is", "points are"), "undefined due to numerical problems;", "smoothing parameter is probably too small")) # W <- as.owin(X) if(edgecorrect) { # convolve x and y coordinate functions with kernel xim <- as.im(function(x,y){x}, W) yim <- as.im(function(x,y){y}, W) xblur <- blur(xim, sigma=sigma, varcov=varcov, normalise=TRUE, ...) yblur <- blur(yim, sigma=sigma, varcov=varcov, normalise=TRUE, ...) # evaluate at data locations xx <- safelookup(xblur, X, warn=FALSE) yy <- safelookup(yblur, X, warn=FALSE) # estimated vector bias of sharpening procedure xbias <- xx - X$x ybias <- yy - X$y # adjust Yx <- Yx - xbias Yy <- Yy - ybias # check this does not place points outside window if(any(uhoh <- !inside.owin(Yx, Yy, W))) { # determine mass of edge effect edgeim <- blur(as.im(W), sigma=sigma, varcov=varcov, normalise=FALSE, ...) edg <- safelookup(edgeim, X[uhoh], warn=FALSE) # contract bias correction Yx[uhoh] <- (1 - edg) * X$x[uhoh] + edg * Yx[uhoh] Yy[uhoh] <- (1 - edg) * X$y[uhoh] + edg * Yy[uhoh] } # check again if(any(nbg <- !inside.owin(Yx, Yy, W))) { # give up Yx[nbg] <- X$x[nbg] Yy[nbg] <- X$y[nbg] } } # make point pattern Y <- ppp(Yx, Yy, marks=marks(X), window=W) # tack on smoothing information attr(Y, "sigma") <- sigma attr(Y, "varcov") <- varcov attr(Y, "edgecorrected") <- edgecorrect return(Y) } spatstat/R/predict.ppm.R0000755000176200001440000006374013115271120014674 0ustar liggesusers# # predict.ppm.S # # $Revision: 1.100 $ $Date: 2016/12/19 09:13:07 $ # # predict.ppm() # From fitted model obtained by ppm(), # evaluate the fitted trend or conditional intensity # at a grid/list of other locations # # # ------------------------------------------------------------------- predict.ppm <- local({ ## ## extract undocumented/outdated arguments, and trap others ## xtract <- function(..., newdata=NULL, sumobj=NULL, E=NULL, total=NULL, getoutofjail=FALSE) { if(!is.null(newdata)) warning(paste("The use of the argument", sQuote("newdata"), "is out-of-date. See help(predict.ppm)")) if(!is.null(total)) message(paste("The use of the argument", sQuote("total"), "is out-of-date. See help(predict.ppm)")) trap.extra.arguments(..., .Context="In predict.ppm") return(list(sumobj=sumobj, E=E, total=total, getoutofjail=getoutofjail)) } ## ## confidence/prediction intervals for number of points predconfPois <- function(region, object, level, what=c("estimate", "se", "confidence", "prediction")) { what <- match.arg(what) stopifnot(0 < level && level < 1) lam <- predict(object, window=region) mu.hat <- integral.im(lam) if(what == "estimate") return(mu.hat) mo <- model.images(object, W=as.owin(lam)) ZL <- unlist(lapply(mo, function(z, w) integral.im(eval.im(z * w)), w = lam)) ZL <- matrix(ZL, nrow=1) var.muhat <- as.numeric(ZL %*% vcov(object) %*% t(ZL)) sd.muhat <- sqrt(var.muhat) if(what == "se") return(sd.muhat) alpha2 <- (1-level)/2 pp <- sort(c(alpha2, 1-alpha2)) out <- switch(what, confidence = mu.hat + qnorm(pp) * sd.muhat, prediction = qmixpois(pp, mu.hat, sd.muhat, I)) names(out) <- paste0(signif(100 * pp, 3), "%") out } typepublic <- c("trend", "cif", "intensity", "count") typeaccept <- c(typepublic, "lambda", "se", "SE", "covariates") typeuse <- c(typepublic, "cif", "se", "se", "covariates") predict.ppm <- function(object, window=NULL, ngrid=NULL, locations=NULL, covariates=NULL, type=c("trend", "cif", "intensity", "count"), se=FALSE, interval=c("none", "confidence", "prediction"), level = 0.95, X=data.ppm(object), correction, ..., new.coef=NULL, check=TRUE, repair=TRUE) { interval <- match.arg(interval) ## extract undocumented arguments xarg <- xtract(...) sumobj <- xarg$sumobj E <- xarg$E total <- xarg$total getoutofjail <- xarg$getoutofjail ## match 'type' argument including 'legacy' options seonly <- FALSE if(missing(type)) type <- type[1] else { if(length(type) > 1) stop("Argument 'type' should be a single value") mt <- pmatch(type, typeaccept) if(is.na(mt)) stop("Argument 'type' should be one of", commasep(sQuote(typepublic), " or ")) type <- typeuse[mt] if(type == "se") { if(!getoutofjail) message(paste("Outdated syntax:", "type='se' should be replaced by se=TRUE;", "then the standard error is predict(...)$se")) type <- "trend" se <- TRUE seonly <- TRUE } } if(!is.null(total)) { message("Outdated argument 'total': use 'window' and set type='count'") type <- "count" if(!is.logical(total)) window <- if(is.tess(total)) total else as.owin(total) } ## model <- object verifyclass(model, "ppm") ## if(check && damaged.ppm(object)) { if(!repair) stop("object format corrupted; try update(object, use.internal=TRUE)") message("object format corrupted; repairing it.") object <- update(object, use.internal=TRUE) } if(missing(correction) || is.null(correction)) correction <- object$correction fitcoef <- coef(object) if(!is.null(new.coef)) { ## validate coefs if(length(new.coef) != length(fitcoef)) stop(paste("Argument new.coef has wrong length", length(new.coef), ": should be", length(fitcoef))) coeffs <- new.coef } else { coeffs <- fitcoef } ## find out what kind of model it is if(is.null(sumobj)) sumobj <- summary(model, quick="entries") # undocumented hack! # stationary <- sumobj$stationary poisson <- sumobj$poisson marked <- sumobj$marked multitype <- sumobj$multitype notrend <- sumobj$no.trend changedcoef <- sumobj$changedcoef || !is.null(new.coef) trivial <- poisson && notrend need.covariates <- sumobj$uses.covars covnames.needed <- sumobj$covars.used if(sumobj$antiquated) warning("The model was fitted by an out-of-date version of spatstat") ## determine mark space if(marked) { if(!multitype) stop("Prediction not yet implemented for general marked point processes") else types <- levels(marks(sumobj$entries$data)) } ## For Poisson models cif=intensity=trend if(poisson && type %in% c("cif", "intensity")) type <- "trend" ## ............. trap un-implemented cases ................... ## Standard errors not yet available for cif, intensity if(se && type %in% c("cif", "intensity")) stop(paste("Standard error for", type, "is not yet implemented"), call.=FALSE) ## Intervals are only available for unmarked Poisson models if(type == "count" && interval != "none" && (marked || !poisson)) { stop(paste0(interval, " intervals for counts are only implemented for", if(marked) " unmarked" else "", if(!poisson) " Poisson", " models"), call.=FALSE) } if(interval == "prediction" && type != "count") stop("Prediction intervals are only available for type='count'", call.=FALSE) if(interval == "confidence" && type %in% c("intensity", "cif")) stop(paste("Confidence intervals are not yet available for", type), call.=FALSE) estimatename <- if(interval == "none") "estimate" else interval ## ............. start computing ............................. ## Total count in a region if(type == "count") { ## point or interval estimate, optionally with SE if(is.null(window)) { ## domain of the original data if(!seonly) est <- predconfPois(NULL, model, level, estimatename) if(se) sem <- predconfPois(NULL, model, level, "se") } else if(is.tess(window)) { ## quadrats tilz <- tiles(window) if(!seonly) { est <- unlist(lapply(tilz, predconfPois, object=model, level=level, what=estimatename)) if(interval != "none") # reshape est <- matrix(unlist(est), byrow=TRUE, ncol=2, dimnames=list(names(est), names(est[[1]]))) } if(se) sem <- unlist(lapply(tilz, predconfPois, object=model, level=level, what="se")) } else { ## window if(!seonly) est <- predconfPois(window, model, level, estimatename) if(se) sem <- predconfPois(window, model, level, "se") } if(!se) return(est) if(seonly) return(sem) result <- list(est, sem) names(result) <- c(estimatename, "se") return(result) } ## ..... Predict a spatial function ....... if(interval != "none") { ## Prepare for confidence interval alpha2 <- (1-level)/2 pp <- sort(c(alpha2, 1-alpha2)) ci.names <- paste0(signif(100 * pp, 3), "%") ci.q <- qnorm(pp) } ## determine what kind of output is required: ## (arguments present) (output) ## window, ngrid -> image ## locations (mask) -> image ## locations (image) -> image ## locations (rectangle) -> treat locations as 'window' ## locations (polygonal) -> treat locations as 'window' ## locations (other) -> data frame ## if(is.im(locations)) locations <- as.owin(locations) if(is.null(window) && is.owin(locations) && !is.mask(locations)) { window <- locations locations <- NULL } if(!is.null(ngrid) && !is.null(locations)) stop(paste("Only one of", sQuote("ngrid"), "and", sQuote("locations"), "should be specified")) if(is.null(ngrid) && is.null(locations)) ## use regular grid ngrid <- rev(spatstat.options("npixel")) want.image <- is.null(locations) || is.mask(locations) make.grid <- !is.null(ngrid) ## ############## Determine prediction points ##################### if(!want.image) { ## (A) list of (x,y) coordinates given by `locations' xpredict <- locations$x ypredict <- locations$y if(is.null(xpredict) || is.null(ypredict)) { xy <- xy.coords(locations) xpredict <- xy$x xpredict <- xy$y } if(is.null(xpredict) || is.null(ypredict)) stop(paste("Don't know how to extract x,y coordinates from", sQuote("locations"))) ## marks if required if(marked) { ## extract marks from data frame `locations' mpredict <- locations$marks if(is.null(mpredict)) stop(paste("The argument", sQuote("locations"), "does not contain a column of marks", "(required since the fitted model", "is a marked point process)")) if(is.factor(mpredict)) { ## verify mark levels match those in model if(!identical(all.equal(levels(mpredict), types), TRUE)) { if(all(levels(mpredict) %in% types)) mpredict <- factor(mpredict, levels=types) else stop(paste("The marks in", sQuote("locations"), "do not have the same levels as", "the marks in the model")) } } else { ## coerce to factor if possible if(all(mpredict %in% types)) mpredict <- factor(mpredict, levels=types) else stop(paste("The marks in", sQuote("locations"), "do not have the same values as the marks in the model")) } } } else { ## (B) pixel grid of points if(!make.grid) ## (B)(i) The grid is given in `locations' masque <- locations else { ## (B)(ii) We have to make the grid ourselves ## Validate ngrid if(!is.null(ngrid)) { if(!is.numeric(ngrid)) stop("ngrid should be a numeric vector") ngrid <- ensure2vector(ngrid) } if(is.null(window)) window <- sumobj$entries$data$window masque <- as.mask(window, dimyx=ngrid) } ## Hack ----------------------------------------------- ## gam with lo() will not allow extrapolation beyond the range of x,y ## values actually used for the fit. Check this: tums <- termsinformula(model$trend) if(any( tums == "lo(x)" | tums == "lo(y)" | tums == "lo(x,y)" | tums == "lo(y,x)") ) { ## determine range of x,y used for fit gg <- model$internal$glmdata gxr <- range(gg$x[gg$SUBSET]) gyr <- range(gg$y[gg$SUBSET]) ## trim window to this range masque <- intersect.owin(masque, owin(gxr, gyr)) } ## ------------------------------------ End Hack ## ## Finally, determine x and y vectors for grid rxy <- rasterxy.mask(masque, drop=TRUE) xpredict <- rxy$x ypredict <- rxy$y } ## ################ CREATE DATA FRAME ########################## ## ... to be passed to predict.glm() ## ## First the x, y coordinates if(!marked) newdata <- data.frame(x=xpredict, y=ypredict) else if(!want.image) newdata <- data.frame(x=xpredict, y=ypredict, marks=mpredict) else { ## replicate nt <- length(types) np <- length(xpredict) xpredict <- rep.int(xpredict,nt) ypredict <- rep.int(ypredict,nt) mpredict <- rep.int(types, rep.int(np, nt)) mpredict <- factor(mpredict, levels=types) newdata <- data.frame(x = xpredict, y = ypredict, marks=mpredict) } ## ## Next the external covariates, if any ## if(need.covariates) { if(is.null(covariates)) { ## Extract covariates from fitted model object ## They have to be images. oldcov <- model$covariates if(is.null(oldcov)) stop("External covariates are required, and are not available") if(is.data.frame(oldcov)) stop(paste("External covariates are required.", "Prediction is not possible at new locations")) covariates <- oldcov } ## restrict to covariates actually required for formula covariates <- if(is.data.frame(covariates)) { covariates[,covnames.needed, drop=FALSE] } else covariates[covnames.needed] covfunargs <- model$covfunargs covariates.df <- mpl.get.covariates(covariates, list(x=xpredict, y=ypredict), "prediction points", covfunargs) newdata <- cbind(newdata, covariates.df) } ## ###### Set up prediction variables ################################ ## ## Provide SUBSET variable ## if(is.null(newdata$SUBSET)) newdata$SUBSET <- rep.int(TRUE, nrow(newdata)) ## ## Dig out information used in Berman-Turner device ## Vnames: the names for the ``interaction variables'' ## glmdata: the data frame used for the glm fit ## glmfit: the fitted glm object ## if(!trivial) { Vnames <- model$internal$Vnames vnameprefix <- model$internal$vnameprefix glmdata <- getglmdata(model) glmfit <- getglmfit(model) if(object$method=="logi") newdata$.logi.B <- rep(glmdata$.logi.B[1], nrow(newdata)) } ## Undocumented secret exit if(type == "covariates") return(list(newdata=newdata, mask=if(want.image) masque else NULL)) ## ########## COMPUTE PREDICTION ############################## ## ## Compute the predicted value z[i] for each row of 'newdata' ## Store in a vector z and reshape it later ## ## ## ############################################################# needSE <- se || (interval != "none") if(trivial) { ## ########### UNIFORM POISSON PROCESS ##################### lambda <- exp(coeffs[[1]]) if(needSE) { npts <- nobs(model) se.lambda <- lambda/sqrt(npts) } switch(interval, none = { z <- rep.int(lambda, nrow(newdata)) }, confidence = { z <- matrix(lambda + se.lambda * ci.q, byrow=TRUE, nrow=nrow(newdata), ncol=2, dimnames=list(NULL, ci.names)) }, stop("Internal error: unreached")) if(se) zse <- rep.int(se.lambda, nrow(newdata)) ## ############################################################## } else if((type %in% c("trend", "intensity")) || poisson) { ## ## ########## COMPUTE TREND ################################### ## ## set explanatory variables to zero ## zeroes <- numeric(nrow(newdata)) for(vn in Vnames) newdata[[vn]] <- zeroes ## ## predict trend ## z <- lambda <- GLMpredict(glmfit, newdata, coeffs, changecoef=changedcoef) ## if(type == "intensity") z <- PoisSaddle(z, fitin(model)) ## if(needSE) { ## extract variance-covariance matrix of parameters vc <- vcov(model) ## compute model matrix fmla <- formula(model) # mf <- model.frame(fmla, newdata, ..., na.action=na.pass) # mm <- model.matrix(fmla, mf, ..., na.action=na.pass) mf <- model.frame(fmla, newdata, na.action=na.pass) mm <- model.matrix(fmla, mf, na.action=na.pass) if(nrow(mm) != nrow(newdata)) stop("Internal error: row mismatch in SE calculation") ## compute relative variance = diagonal of quadratic form vv <- quadform(mm, vc) ## standard error SE <- lambda * sqrt(vv) if(se) zse <- SE if(interval == "confidence") { z <- lambda + outer(SE, ci.q, "*") colnames(z) <- ci.names } } ## ############################################################ } else if(type == "cif" || type =="lambda") { ## ####### COMPUTE FITTED CONDITIONAL INTENSITY ################ ## ## set up arguments inter <- model$interaction if(!missing(X)) stopifnot(is.ppp(X)) W <- as.owin(data.ppm(model)) U <- ppp(newdata$x, y=newdata$y, window=W, check=FALSE) if(marked) marks(U) <- newdata$marks ## determine which prediction points are data points if(is.null(E)) E <- equalpairs(U, X, marked) ## evaluate interaction Vnew <- evalInteraction(X, U, E, inter, correction=correction, check=check) ## Negative infinite values signify cif = zero cif.equals.zero <- matrowany(Vnew == -Inf) ## Insert the potential into the relevant column(s) of `newdata' if(ncol(Vnew) == 1) { ## Potential is real valued (Vnew is a column vector) ## Assign values to a column of the same name in newdata newdata[[Vnames]] <- as.vector(Vnew) ## } else if(is.null(avail <- colnames(Vnew))) { ## Potential is vector-valued (Vnew is a matrix) ## with unnamed components. ## Assign the components, in order of their appearance, ## to the columns of newdata labelled Vnames[1], Vnames[2],... for(i in seq_along(Vnames)) newdata[[Vnames[i] ]] <- Vnew[,i] ## } else { ## Potential is vector-valued (Vnew is a matrix) ## with named components. ## Match variables by name if(all(Vnames %in% avail)) { for(vn in Vnames) newdata[[ vn ]] <- Vnew[ , vn] } else if(all(Vnames %in% (Pavail <- paste0(vnameprefix, avail)))) { for(vn in Vnames) newdata[[ vn ]] <- Vnew[ , match(vn, Pavail)] } else stop(paste( "Internal error: unable to match names", "of available interaction terms", commasep(sQuote(avail)), "to required interaction terms", commasep(sQuote(Vnames)) ), call.=FALSE) } ## invoke predict.glm or compute prediction z <- GLMpredict(glmfit, newdata, coeffs, changecoef=changedcoef) ## reset to zero if potential was zero if(any(cif.equals.zero)) z[cif.equals.zero] <- 0 ## ############################################################### } else stop(paste("Unrecognised type", sQuote(type))) ## ############################################################### ## ## reshape the result ## if(!want.image) { if(!se) { out <- as.vector(z) } else if(seonly) { out <- as.vector(zse) } else { out <- list(as.vector(z), as.vector(zse)) names(out) <- c(estimatename, "se") } } else { ## make an image of the right shape and value imago <- as.im(masque, value=1.0) if(!marked && interval=="none") { ## single image if(!se) { out <- imago ## set entries out[] <- z } else if(seonly) { out <- imago out[] <- zse } else { est <- std <- imago est[] <- z std[] <- zse out <- list(est, std) names(out) <- c(estimatename, "se") } } else if(interval != "none") { ## list of 2 images for CI if(!seonly) { hi <- lo <- imago hi[] <- z[,1] lo[] <- z[,2] est <- solist(hi, lo) names(est) <- ci.names } if(se) { std <- imago std[] <- zse } if(!se) { out <- est } else if(seonly) { out <- std } else { out <- list(est, std) names(out) <- c(estimatename, "se") } } else { ## list of images, one for each level of marks out <- list() for(i in seq_along(types)) { outi <- imago ## set entries outi[] <- z[newdata$marks == types[i]] out[[i]] <- outi } out <- as.solist(out) names(out) <- as.character(types) } } ## ## FINISHED ## return(out) } predict.ppm }) #################################################################### # # compute pointwise uncertainty of fitted intensity # model.se.image <- function(fit, W=as.owin(fit), ..., what="sd") { if(!is.poisson.ppm(fit)) stop("Only implemented for Poisson point process models", call.=FALSE) what <- pickoption("option", what, c(sd="sd", var="var", cv="cv", CV="cv", ce="ce", CE="ce")) W <- as.mask(as.owin(W)) # variance-covariance matrix of coefficients vc <- vcov(fit) np <- dim(vc)[1] # extract sufficient statistic for each coefficient mm <- model.images(fit, W, ...) # compute fitted intensity lam <- predict(fit, locations=W) # initialise resulting image U <- as.im(W) U[] <- 0 # compute pointwise matrix product, assuming vc is symmetric for(i in 1:np) { Si <- mm[[i]] aii <- vc[i,i] U <- eval.im(U + aii * Si^2) if(i > 1) { for(j in 1:(i-1)) { Sj <- mm[[j]] aij <- vc[i,j] twoaij <- 2 * aij U <- eval.im(U + twoaij * Si * Sj) } } } # the matrix product is the relative variance (CV) if(what=="cv") return(U) # relative sd if(what=="ce") { U <- eval.im(sqrt(U)) return(U) } # multiply by squared intensity to obtain variance U <- eval.im(U * lam^2) # variance if(what=="var") return(U) # compute SD and return U <- eval.im(sqrt(U)) return(U) } GLMpredict <- function(fit, data, coefs, changecoef=TRUE, type=c("response", "link")) { ok <- is.finite(coefs) type <- match.arg(type) if(!changecoef && all(ok)) { answer <- predict(fit, newdata=data, type=type) } else { # do it by hand fmla <- formula(fit) data$.mpl.Y <- 1 fram <- model.frame(fmla, data=data, na.action=NULL) # linear predictor mm <- model.matrix(fmla, data=fram) # ensure all required coefficients are present coefs <- fill.coefs(coefs, colnames(mm)) ok <- is.finite(coefs) # if(all(ok)) { eta <- as.vector(mm %*% coefs) } else { #' ensure 0 * anything = 0 eta <- as.vector(mm[ , ok, drop=FALSE] %*% coefs[ok]) for(j in which(!ok)) { mmj <- mm[, j] nonzero <- is.na(mmj) | (mmj != 0) if(any(nonzero)) eta[nonzero] <- eta[nonzero] + mmj[nonzero] * coefs[j] } } # offset mo <- model.offset(fram) if(!is.null(mo)) { if(is.matrix(mo)) mo <- apply(mo, 1, sum) eta <- mo + eta } switch(type, link = { answer <- eta }, response = { linkinv <- family(fit)$linkinv answer <- linkinv(eta) }) } # Convert from fitted logistic prob. to lambda for logistic fit if(type == "response" && family(fit)$family=="binomial") answer <- fit$data$.logi.B[1] * answer/(1-answer) return(answer) } # An 'equalpairs' matrix E is needed in the ppm class # to determine which quadrature points and data points are identical # (not just which quadrature points are data points). # It is a two-column matrix specifying all the identical pairs. # The first column gives the index of a data point (in the data pattern X) # and the second column gives the corresponding index in U. # The following function determines the equal pair information # from the coordinates (and marks) of U and X alone; # it should be used only if we can't figure out this information otherwise. equalpairs <- function(U, X, marked=FALSE) { nn <- nncross(U, X) coincides <- (nn$dist == 0) Xind <- nn$which[coincides] Uind <- which(coincides) if(marked) { samemarks <- (marks(X)[Xind] == marks(U)[Uind]) Xind <- Xind[samemarks] Uind <- Uind[samemarks] } return(cbind(Xind, Uind)) } fill.coefs <- function(coefs, required) { # 'coefs' should contain all the 'required' values coefsname <- deparse(substitute(coefs)) nama <- names(coefs) if(is.null(nama)) { #' names cannot be matched if(length(coefs) != length(required)) stop(paste("The unnamed argument", sQuote(coefsname), "has", length(coefs), "entries, but", length(required), "are required"), call.=FALSE) # blithely assume they match 1-1 names(coefs) <- required return(coefs) } stopifnot(is.character(required)) if(identical(nama, required)) return(coefs) inject <- match(nama, required) if(any(notneeded <- is.na(inject))) { warning(paste("Internal glitch: some coefficients were not required:", commasep(sQuote(nama[notneeded]))), call.=FALSE) coefs <- coefs[!notneeded] nama <- names(coefs) inject <- match(nama, required) } y <- numeric(length(required)) names(y) <- required y[inject] <- coefs return(y) } spatstat/R/dclftest.R0000644000176200001440000003162113115225157014256 0ustar liggesusers# # dclftest.R # # $Revision: 1.35 $ $Date: 2016/12/30 01:44:07 $ # # Monte Carlo tests for CSR (etc) # clf.test <- function(...) { .Deprecated("dclf.test", package="spatstat") dclf.test(...) } dclf.test <- function(X, ..., alternative=c("two.sided", "less", "greater"), rinterval=NULL, leaveout=1, scale=NULL, clamp=FALSE, interpolate=FALSE) { Xname <- short.deparse(substitute(X)) envelopeTest(X, ..., exponent=2, alternative=alternative, rinterval=rinterval, leaveout=leaveout, scale=scale, clamp=clamp, interpolate=interpolate, Xname=Xname) } mad.test <- function(X, ..., alternative=c("two.sided", "less", "greater"), rinterval=NULL, leaveout=1, scale=NULL, clamp=FALSE, interpolate=FALSE) { Xname <- short.deparse(substitute(X)) envelopeTest(X, ..., exponent=Inf, alternative=alternative, rinterval=rinterval, leaveout=leaveout, scale=scale, clamp=clamp, interpolate=interpolate, Xname=Xname) } ## measure deviation of summary function ## leaveout = 0: typically 'ref' is exact theoretical value ## Compute raw deviation. ## leaveout = 1: 'ref' is mean of simulations *and* observed. ## Use algebra to compute leave-one-out deviation. ## leaveout = 2: 'ref' is mean of simulations ## Use algebra to compute leave-two-out deviation. Deviation <- function(x, ref, leaveout, n, xi=x) { if(leaveout == 0) return(x-ref) if(leaveout == 1) return((x-ref) * (n+1)/n) jackmean <- (n * ref - xi)/(n-1) return(x - jackmean) } ## Evaluate signed or absolute deviation, ## taking account of alternative hypothesis and possible scaling ## (Large positive values always favorable to alternative) RelevantDeviation <- local({ positivepart <- function(x) { d <- dim(x) y <- pmax(0, x) if(!is.null(d)) y <- matrix(y, d[1L], d[2L]) return(y) } negativepart <- function(x) positivepart(-x) RelevantDeviation <- function(x, alternative, clamp=FALSE, scaling=NULL) { if(!is.null(scaling)) x <- x/scaling switch(alternative, two.sided = abs(x), less = if(clamp) negativepart(x) else -x, greater = if(clamp) positivepart(x) else x) } RelevantDeviation }) ## workhorse function envelopeTest <- function(X, ..., exponent=1, alternative=c("two.sided", "less", "greater"), rinterval=NULL, leaveout=1, scale=NULL, clamp=FALSE, tie.rule=c("randomise","mean"), interpolate=FALSE, save.interpolant = TRUE, save.envelope = savefuns || savepatterns, savefuns = FALSE, savepatterns = FALSE, Xname=NULL, verbose=TRUE) { if(is.null(Xname)) Xname <- short.deparse(substitute(X)) tie.rule <- match.arg(tie.rule) alternative <- match.arg(alternative) if(!(leaveout %in% 0:2)) stop("Argument leaveout should equal 0, 1 or 2") force(save.envelope) check.1.real(exponent) explain.ifnot(exponent >= 0) deviationtype <- switch(alternative, two.sided = "absolute", greater = if(clamp) "positive" else "signed", less = if(clamp) "negative" else "signed") deviationblurb <- paste(deviationtype, "deviation") ## compute or extract simulated functions X <- envelope(X, ..., savefuns=TRUE, savepatterns=savepatterns, Yname=Xname, verbose=verbose) Y <- attr(X, "simfuns") ## extract values r <- with(X, .x) obs <- with(X, .y) sim <- as.matrix(as.data.frame(Y))[, -1L] nsim <- ncol(sim) nr <- length(r) ## choose function as reference has.theo <- ("theo" %in% names(X)) use.theo <- identical(attr(X, "einfo")$use.theory, TRUE) if(use.theo && !has.theo) warning("No theoretical function available; use.theory ignored") if(use.theo && has.theo) { theo.used <- TRUE reference <- with(X, theo) leaveout <- 0 } else { theo.used <- FALSE if(leaveout == 2) { ## use sample mean of simulations only reference <- apply(sim, 1L, mean, na.rm=TRUE) } else { ## use sample mean of simulations *and* observed reference <- apply(cbind(sim, obs), 1L, mean, na.rm=TRUE) } } ## determine interval of r values for computation rok <- r if(!is.null(rinterval)) { check.range(rinterval) if(max(r) < rinterval[2L]) { oldrinterval <- rinterval rinterval <- intersect.ranges(rinterval, range(r), fatal=FALSE) if(is.null(rinterval)) stop(paste("The specified rinterval", prange(oldrinterval), "has empty intersection", "with the range of r values", prange(range(r)), "computed by the summary function"), call.=FALSE) if(verbose) warning(paste("The interval", prange(oldrinterval), "is too large for the available data;", "it has been trimmed to", prange(rinterval))) } ok <- (rinterval[1L] <= r & r <= rinterval[2L]) nr <- sum(ok) if(nr == 0) { ## rinterval is very short: pick nearest r value ok <- which.min(abs(r - mean(rinterval))) nr <- 1L } rok <- r[ok] obs <- obs[ok] sim <- sim[ok, , drop=FALSE] reference <- reference[ok] } else { rinterval <- range(r) bad <- !matrowall(is.finite(as.matrix(X))) if(any(bad)) { if(bad[1L] && !any(bad[-1L])) { ## ditch r = 0 rinterval <- c(r[2L], max(r)) if(verbose) warning(paste("Some function values were infinite or NaN", "at distance r = 0;", "interval of r values was reset to", prange(rinterval))) ok <- (rinterval[1L] <= r & r <= rinterval[2L]) rok <- r[ok] obs <- obs[ok] sim <- sim[ok, ] reference <- reference[ok] nr <- sum(ok) } else { ## problem rbadmax <- paste(max(r[bad]), summary(unitname(X))$plural) stop(paste("Some function values were infinite or NaN", "at distances r up to", paste(rbadmax, ".", sep=""), "Please specify a shorter", sQuote("rinterval"))) } } } ## determine rescaling if any if(is.null(scale)) { scaling <- NULL } else if(is.function(scale)) { scaling <- scale(rok) sname <- "scale(r)" ans <- check.nvector(scaling, nr, things="values of r", fatal=FALSE, vname=sname) if(!ans) stop(attr(ans, "whinge"), call.=FALSE) if(any(bad <- (scaling <= 0))) { ## issue a warning unless this only happens at r=0 if(any(bad[rok > 0])) warning(paste("Some values of", sname, "were negative or zero:", "scale was reset to 1 for these values"), call.=FALSE) scaling[bad] <- 1 } } else stop("Argument scale should be a function") ## compute deviations rawdevDat <- Deviation(obs, reference, leaveout, nsim, sim[,1L]) rawdevSim <- Deviation(sim, reference, leaveout, nsim) ## evaluate signed/absolute deviation relevant to alternative ddat <- RelevantDeviation(rawdevDat, alternative, clamp, scaling) dsim <- RelevantDeviation(rawdevSim, alternative, clamp, scaling) ## compute test statistic if(is.infinite(exponent)) { ## MAD devdata <- max(ddat) devsim <- apply(dsim, 2, max) names(devdata) <- "mad" testname <- paste("Maximum", deviationblurb, "test") statisticblurb <- paste("Maximum", deviationblurb) } else { L <- if(nr > 1) diff(rinterval) else 1 if(exponent == 2) { ## Cramer-von Mises ddat2 <- if(clamp) ddat^2 else (sign(ddat) * ddat^2) dsim2 <- if(clamp) dsim^2 else (sign(dsim) * dsim^2) devdata <- L * mean(ddat2) devsim <- L * .colMeans(dsim2, nr, nsim) names(devdata) <- "u" testname <- "Diggle-Cressie-Loosmore-Ford test" statisticblurb <- paste("Integral of squared", deviationblurb) } else if(exponent == 1) { ## integral absolute deviation devdata <- L * mean(ddat) devsim <- L * .colMeans(dsim, nr, nsim) names(devdata) <- "L1" testname <- paste("Integral", deviationblurb, "test") statisticblurb <- paste("Integral of", deviationblurb) } else { ## general p if(clamp) { ddatp <- ddat^exponent dsimp <- dsim^exponent } else { ddatp <- sign(ddat) * (abs(ddat)^exponent) dsimp <- sign(dsim) * (abs(dsim)^exponent) } devdata <- L * mean(ddatp) devsim <- L * .colMeans(dsimp, nr, nsim) names(devdata) <- "Lp" testname <- paste("Integrated", ordinal(exponent), "Power Deviation test") statisticblurb <- paste("Integral of", ordinal(exponent), "power of", deviationblurb) } } if(!interpolate) { ## standard Monte Carlo test ## compute rank and p-value datarank <- sum(devdata < devsim) + 1 nties <- sum(devdata == devsim) if(nties > 0) { tierank <- switch(tie.rule, mean = nties/2, randomise = sample(1:nties, 1L)) datarank <- datarank + tierank if(verbose) message("Ties were encountered") } pvalue <- datarank/(nsim+1) ## bookkeeping statistic <- data.frame(devdata, rank=datarank) colnames(statistic)[1L] <- names(devdata) } else { ## Dao-Genton style interpolation fhat <- density(devsim) pvalue <- with(fhat, { if(max(x) <= devdata) 0 else mean(y[x >= devdata]) * (max(x) - devdata) }) statistic <- data.frame(devdata) colnames(statistic)[1L] <- names(devdata) nties <- 0 } e <- attr(X, "einfo") nullmodel <- if(identical(e$csr, TRUE)) "CSR" else if(!is.null(e$simtype)) { switch(e$simtype, csr = "CSR", rmh = paste("fitted", if(identical(e$pois, TRUE)) "Poisson" else "Gibbs", "model"), kppm = "fitted cluster model", expr = "model simulated by evaluating expression", list = "model simulated by drawing patterns from a list", "unrecognised model") } else "unrecognised model" fname <- deparse(attr(X, "ylab")) uname <- with(summary(unitname(X)), if(!vanilla) paste(plural, explain) else NULL) testtype <- paste0(if(interpolate) "Interpolated " else NULL, "Monte Carlo") scaleblurb <- if(is.null(scale)) NULL else paste("Scale function:", paste(deparse(scale), collapse=" ")) refblurb <- if(theo.used) "theoretical" else "sample mean" leaveblurb <- if(leaveout == 0) paste("observed minus", refblurb) else if(leaveout == 1) "leave-one-out" else "leave-two-out" testname <- c(paste(testname, "of", nullmodel), paste(testtype, "test based on", nsim, "simulations", e$constraints), paste("Summary function:", fname), paste("Reference function:", refblurb), paste("Alternative:", alternative), paste("Interval of distance values:", prange(rinterval), uname), scaleblurb, paste("Test statistic:", statisticblurb), paste("Deviation =", leaveblurb) ) result <- structure(list(statistic = statistic, p.value = pvalue, method = testname, data.name = e$Yname), class="htest") attr(result, "rinterval") <- rinterval if(save.interpolant && interpolate) attr(result, "density") <- fhat if(save.envelope) { result <- hasenvelope(result, X) attr(result, "statistics") <- list(data=devdata, sim=devsim) attr(result, "info") <- list(exponent=exponent, alternative=alternative, nties=nties, leaveout=leaveout, interpolate=interpolate, scale=scale, clamp=clamp, tie.rule=tie.rule, use.theo=use.theo) } return(result) } spatstat/R/plot.ppp.R0000755000176200001440000004457213156200767014243 0ustar liggesusers# # plot.ppp.R # # $Revision: 1.93 $ $Date: 2017/09/13 09:52:28 $ # # #-------------------------------------------------------------------------- plot.ppp <- local({ transparencyfun <- function(n) { if(n <= 100) 1 else (0.2 + 0.8 * exp(-(n-100)/1000)) } ## determine symbol map for marks of points default.symap.points <- function(x, ..., chars=NULL, cols=NULL, maxsize=NULL, meansize=NULL, markscale=NULL, markrange=NULL, marklevels=NULL) { marx <- marks(x) if(is.null(marx)) { ## null or constant symbol map ## consider using transparent colours if(is.null(cols) && !any(c("col", "fg", "bg") %in% names(list(...))) && (nx <- npoints(x)) > 100 && identical(dev.capabilities()$semiTransparency, TRUE) && spatstat.options("transparent")) cols <- rgb(0,0,0,transparencyfun(nx)) return(symbolmap(..., chars=chars, cols=cols)) } if(!is.null(dim(marx))) stop("Internal error: multivariate marks in default.symap.points") argnames <- names(list(...)) shapegiven <- "shape" %in% argnames chargiven <- (!is.null(chars)) || ("pch" %in% argnames) assumecircles <- !(shapegiven || chargiven) sizegiven <- ("size" %in% argnames) || (("cex" %in% argnames) && !shapegiven) if(inherits(marx, c("Date", "POSIXt"))) { ## ......... marks are dates or date/times ..................... timerange <- range(marx, na.rm=TRUE) shapedefault <- if(!assumecircles) list() else list(shape="circles") if(sizegiven) { g <- do.call(symbolmap, resolve.defaults(list(range=timerange), list(...), shapedefault, list(chars=chars, cols=cols))) return(g) } ## attempt to determine a scale for the marks y <- scaletointerval(marx, 0, 1, timerange) y <- y[is.finite(y)] if(length(y) == 0) return(symbolmap(..., chars=chars, cols=cols)) scal <- mark.scale.default(y, as.owin(x), markscale=markscale, maxsize=maxsize, meansize=meansize, characters=chargiven) if(is.na(scal)) return(symbolmap(..., chars=chars, cols=cols)) ## scale determined sizefun <- function(x, scal=1) { (scal/2) * scaletointerval(x, 0, 1, timerange) } formals(sizefun)[[2]] <- scal ## ensures value of 'scal' is printed ## g <- do.call(symbolmap, resolve.defaults(list(range=timerange), list(...), shapedefault, list(size=sizefun))) return(g) } if(is.numeric(marx)) { ## ............. marks are numeric values ................... marx <- marx[is.finite(marx)] if(is.null(markrange)) { #' usual case if(length(marx) == 0) return(symbolmap(..., chars=chars, cols=cols)) markrange <- range(marx) } else { if(!all(inside.range(marx, markrange))) warning("markrange does not encompass the range of mark values", call.=FALSE) } ## if(sizegiven) { g <- do.call(symbolmap, resolve.defaults(list(range=markrange), list(...), if(assumecircles) list(shape="circles") else list(), list(chars=chars, cols=cols))) return(g) } ## attempt to determine a scale for the marks if(all(markrange == 0)) return(symbolmap(..., chars=chars, cols=cols)) scal <- mark.scale.default(marx, as.owin(x), markscale=markscale, maxsize=maxsize, meansize=meansize, characters=chargiven) if(is.na(scal)) return(symbolmap(..., chars=chars, cols=cols)) ## scale determined if(markrange[1] >= 0) { ## all marks are nonnegative shapedefault <- if(!assumecircles) list() else list(shape="circles") cexfun <- function(x, scal=1) { scal * x } circfun <- function(x, scal=1) { scal * x } formals(cexfun)[[2]] <- formals(circfun)[[2]] <- scal sizedefault <- if(sizegiven) list() else if(chargiven) list(cex=cexfun) else list(size=circfun) } else { ## some marks are negative shapedefault <- if(!assumecircles) list() else list(shape=function(x) { ifelse(x >= 0, "circles", "squares") }) cexfun <- function(x, scal=1) { scal * abs(x) } circfun <- function(x, scal=1) { scal * abs(x) } formals(cexfun)[[2]] <- formals(circfun)[[2]] <- scal sizedefault <- if(sizegiven) list() else if(chargiven) list(cex=cexfun) else list(size=circfun) } g <- do.call(symbolmap, resolve.defaults(list(range=markrange), list(...), shapedefault, sizedefault, list(chars=chars, cols=cols))) return(g) } ## ........... non-numeric marks ......................... um <- marklevels %orifnull% if(is.factor(marx)) levels(marx) else sort(unique(marx)) ntypes <- length(um) if(!is.null(cols)) cols <- rep.int(cols, ntypes)[1:ntypes] if(shapegiven && sizegiven) { #' values mapped to symbols (shape and size specified) g <- symbolmap(inputs=um, ..., cols=cols) } else if(!shapegiven) { #' values mapped to 'pch' chars <- default.charmap(ntypes, chars) g <- symbolmap(inputs=um, ..., chars=chars, cols=cols) } else { #' values mapped to symbols #' determine size scal <- mark.scale.default(rep(1, npoints(x)), Window(x), maxsize=maxsize, meansize=meansize, characters=FALSE) g <- symbolmap(inputs=um, ..., size=scal, cols=cols) } return(g) } default.charmap <- function(n, ch=NULL) { if(!is.null(ch)) return(rep.int(ch, n)[1:n]) if(n <= 25) return(1:n) ltr <- c(letters, LETTERS) if(n <= 52) return(ltr[1:n]) ## wrapped sequence of letters warning("Too many types to display every type as a different character") return(ltr[1 + (0:(n - 1) %% 52)]) } ## main function plot.ppp <- function(x, main, ..., clipwin=NULL, chars=NULL, cols=NULL, use.marks=TRUE, which.marks=NULL, add=FALSE, type=c("p", "n"), legend=TRUE, leg.side=c("left", "bottom", "top", "right"), leg.args=list(), symap=NULL, maxsize=NULL, meansize=NULL, markscale=NULL, zap=0.01, show.window=show.all, show.all=!add, do.plot=TRUE, multiplot=TRUE) { if(missing(main)) main <- short.deparse(substitute(x)) type <- match.arg(type) if(missing(legend)) legend <- (type == "p") if(!missing(maxsize) || !missing(markscale) || !missing(meansize)) warn.once("circlescale", "Interpretation of arguments maxsize and markscale", "has changed (in spatstat version 1.37-0 and later).", "Size of a circle is now measured by its diameter.") if(clipped <- !is.null(clipwin)) { stopifnot(is.owin(clipwin)) W <- Window(x) clippy <- if(is.mask(W)) intersect.owin(W, clipwin) else edges(W)[clipwin] x <- x[clipwin] } else clippy <- NULL ## sensible default position legend <- legend && show.all if(legend) { leg.side <- match.arg(leg.side) vertical <- (leg.side %in% c("left", "right")) } # if(type == "n" || npoints(x) == 0) { # ## plot the window only # xwindow <- x$window # if(do.plot) # do.call(plot.owin, # resolve.defaults(list(xwindow), # list(...), # list(main=main, invert=TRUE, add=add, # type=if(show.window) "w" else "n"))) # if(is.null(symap)) symap <- symbolmap() # attr(symap, "bbox") <- as.rectangle(xwindow) # return(invisible(symap)) # } ## ................................................................ ## Handle multiple columns of marks as separate plots ## (unless add=TRUE or which.marks selects a single column ## or multipage = FALSE) if(use.marks && is.data.frame(mx <- marks(x))) { implied.all <- is.null(which.marks) want.several <- implied.all || is.data.frame(mx <- mx[,which.marks]) do.several <- want.several && !add && multiplot if(do.several) { ## generate one plot for each column of marks y <- solapply(mx, setmarks, x=x) out <- do.call(plot, resolve.defaults(list(x=y, main=main, show.window=show.window && !clipped, do.plot=do.plot, type=type), list(...), list(equal.scales=TRUE), list(panel.end=clippy), list(legend=legend, leg.side=leg.side, leg.args=leg.args), list(chars=chars, cols=cols, maxsize=maxsize, meansize=meansize, markscale=markscale, zap=zap))) return(invisible(out)) } if(is.null(which.marks)) { which.marks <- 1 if(do.plot) message("Plotting the first column of marks") } } ## ............... unmarked, or single column of marks .................... ## Determine symbol map and mark values to be used y <- x if(!is.marked(x, na.action="ignore") || !use.marks) { ## Marks are not mapped. marx <- NULL if(is.null(symap)) symap <- default.symap.points(unmark(x), ..., chars=chars, cols=cols) } else { ## Marked point pattern marx <- marks(y, dfok=TRUE) if(is.data.frame(marx)) { ## select column or take first colum marx <- marx[, which.marks] y <- setmarks(y, marx) } if(npoints(y) > 0) { ok <- complete.cases(as.data.frame(y)) if(!any(ok)) { warning("All mark values are NA; plotting locations only.") if(is.null(symap)) symap <- default.symap.points(unmark(x), ..., chars=chars, cols=cols) } else if(any(!ok)) { warning(paste("Some marks are NA;", "corresponding points are omitted.")) x <- x[ok] y <- y[ok] marx <- marks(y) } } ## apply default symbol map if(is.null(symap)) symap <- default.symap.points(y, chars=chars, cols=cols, maxsize=maxsize, meansize=meansize, markscale=markscale, ...) } # gtype <- symbolmaptype(symap) ## Determine bounding box for main plot BB <- as.rectangle(x) sick <- inherits(x, "ppp") && !is.null(rejects <- attr(x, "rejects")) if(sick) { ## Get relevant parameters par.direct <- list(main=main, use.marks=use.marks, maxsize=maxsize, meansize=meansize, markscale=markscale) par.rejects <- resolve.1.default(list(par.rejects=list(pch="+")), list(...)) par.all <- resolve.defaults(par.rejects, par.direct) rw <- resolve.defaults(list(...), list(rejectwindow=NULL))$rejectwindow ## determine window for rejects rwin <- if(is.null(rw)) rejects$window else if(is.logical(rw) && rw) rejects$window else if(inherits(rw, "owin")) rw else if(is.character(rw)) { switch(rw, box={boundingbox(rejects, x)}, ripras={ripras(c(rejects$x, x$x), c(rejects$y, x$y))}, stop(paste("Unrecognised option: rejectwindow=", rw))) } else stop("Unrecognised format for rejectwindow") if(is.null(rwin)) stop("Selected window for rejects pattern is NULL") BB <- boundingbox(BB, as.rectangle(rwin)) } ## Augment bounding box with space for legend, if appropriate legend <- legend && (symbolmaptype(symap) != "constant") if(legend) { ## guess maximum size of symbols maxsize <- invoke.symbolmap(symap, marx, corners(as.rectangle(x)), add=add, do.plot=FALSE) sizeguess <- if(maxsize <= 0) NULL else (1.5 * maxsize) leg.args <- append(list(side=leg.side, vertical=vertical), leg.args) ## draw up layout legbox <- do.call.matched(plan.legend.layout, append(list(B=BB, size = sizeguess, started=FALSE, map=symap), leg.args)) ## bounding box for everything BB <- legbox$A } ## return now if not plotting attr(symap, "bbox") <- BB if(!do.plot) return(invisible(symap)) ## ............. start plotting ....................... pt <- prepareTitle(main) main <- pt$main nlines <- pt$nlines blankmain <- if(nlines == 0) "" else rep(" ", nlines) rez <- resolve.defaults(list(...), list(cex.main=1, xlim=NULL, ylim=NULL)) plot(BB, type="n", add=add, main=blankmain, show.all=show.all, cex.main=rez$cex.main, xlim=rez$xlim, ylim=rez$ylim) if(sick) { if(show.window) { ## plot windows if(!is.null(rw)) { ## plot window for rejects rwinpardefault <- list(lty=2,lwd=1,border=1) rwinpars <- resolve.defaults(par.rejects, rwinpardefault)[names(rwinpardefault)] do.call(plot.owin, append(list(rwin, add=TRUE), rwinpars)) } ## plot window of main pattern if(!clipped) { do.call(plot.owin, resolve.defaults(list(x$window, add=TRUE), list(...), list(invert=TRUE))) } else plot(clippy, add=TRUE, ...) } if(type != "n") { ## plot reject points do.call(plot.ppp, append(list(rejects, add=TRUE), par.all)) warning(paste(rejects$n, "illegal points also plotted")) } ## the rest is added add <- TRUE } ## Now convert to bona fide point pattern x <- as.ppp(x) xwindow <- x$window ## Plot observation window (or at least the main title) do.call(plot.owin, resolve.defaults(list(x=xwindow, add=TRUE, main=main, type=if(show.window && !clipped) "w" else "n", show.all=show.all), list(...), list(invert=TRUE))) ## If clipped, plot visible part of original window if(show.window && clipped) plot(clippy, add=TRUE, ...) # else if(show.all) fakemaintitle(as.rectangle(xwindow), main, ...) if(type != "n") { ## plot symbols ## invoke.symbolmap(symap, marx, x, add=TRUE) } ## add legend if(legend) { b <- legbox$b legendmap <- if(length(leg.args) == 0) symap else do.call(update, append(list(object=symap), leg.args)) do.call(plot, append(list(x=legendmap, main="", add=TRUE, xlim=b$xrange, ylim=b$yrange), leg.args)) } return(invisible(symap)) } plot.ppp }) mark.scale.default <- function(marx, w, ..., markscale=NULL, maxsize=NULL, meansize=NULL, characters=FALSE) { ## establish values of markscale, maxsize, meansize ngiven <- (!is.null(markscale)) + (!is.null(maxsize)) + (!is.null(meansize)) if(ngiven > 1) stop("Only one of the arguments markscale, maxsize, meansize", " should be given", call.=FALSE) if(ngiven == 0) { ## if ALL are absent, enforce the spatstat defaults ## (which could also be null) pop <- spatstat.options("par.points") markscale <- pop$markscale maxsize <- pop$maxsize meansize <- pop$meansize } ## Now check whether markscale is fixed if(!is.null(markscale)) { stopifnot(markscale > 0) return(markscale) } # Usual case: markscale is to be determined from maximum/mean physical size if(is.null(maxsize) && is.null(meansize)) { ## compute default value of 'maxsize' ## guess appropriate max physical size of symbols bb <- as.rectangle(w) maxsize <- 1.4/sqrt(pi * length(marx)/area(bb)) maxsize <- min(maxsize, diameter(bb) * 0.07) ## updated: maxsize now represents *diameter* maxsize <- 2 * maxsize } else { if(!is.null(maxsize)) stopifnot(maxsize > 0) else stopifnot(meansize > 0) } # Examine mark values absmarx <- abs(marx) maxabs <- max(absmarx) tiny <- (maxabs < 4 * .Machine$double.eps) if(tiny) return(NA) ## finally determine physical scale for symbols if(!is.null(maxsize)) { scal <- maxsize/maxabs } else { meanabs <- mean(absmarx) scal <- meansize/meanabs } if(!characters) return(scal) ## if using characters ('pch') we need to ## convert physical sizes to 'cex' values charsize <- max(sidelengths(as.rectangle(w)))/40 return(scal/charsize) } fakemaintitle <- function(bb, main, ...) { ## Try to imitate effect of 'title(main=main)' above a specified box if(!any(nzchar(main))) return(invisible(NULL)) bb <- as.rectangle(bb) x0 <- mean(bb$xrange) y0 <- bb$yrange[2] + length(main) * diff(bb$yrange)/12 parnames <- c('cex.main', 'col.main', 'font.main') parlist <- par(parnames) parlist <- resolve.defaults(list(...), parlist)[parnames] names(parlist) <- c('cex', 'col', 'font') do.call.matched(text.default, resolve.defaults(list(x=x0, y=y0, labels=main), parlist, list(...)), funargs=graphicsPars("text")) return(invisible(NULL)) } text.ppp <- function(x, ...) { graphics::text.default(x=x$x, y=x$y, ...) } spatstat/R/hermite.R0000644000176200001440000000417013115225157014102 0ustar liggesusers## ## hermite.R ## ## Gauss-Hermite quadrature ## ## $Revision: 1.5 $ $Date: 2017/02/07 07:35:32 $ ## HermiteCoefs <- function(order) { ## compute coefficients of Hermite polynomial (unnormalised) x <- 1 if(order > 0) for(n in 1:order) x <- c(0, 2 * x) - c(((0:(n-1)) * x)[-1L], 0, 0) return(x) } gauss.hermite <- function(f, mu=0, sd=1, ..., order=5) { stopifnot(is.function(f)) stopifnot(length(mu) == 1) stopifnot(length(sd) == 1) ## Hermite polynomial coefficients (un-normalised) Hn <- HermiteCoefs(order) Hn1 <- HermiteCoefs(order-1) ## quadrature points x <- sort(Re(polyroot(Hn))) ## weights Hn1x <- matrix(Hn1, nrow=1) %*% t(outer(x, 0:(order-1), "^")) w <- 2^(order-1) * factorial(order) * sqrt(pi)/(order * Hn1x)^2 ## adjust ww <- w/sqrt(pi) xx <- mu + sd * sqrt(2) * x ## compute ans <- 0 for(i in seq_along(x)) ans <- ans + ww[i] * f(xx[i], ...) return(ans) } dmixpois <- local({ dpoisG <- function(x, ..., k, g) dpois(k, g(x)) function(x, mu, sd, invlink=exp, GHorder=5) gauss.hermite(dpoisG, mu=mu, sd=sd, g=invlink, k=x, order=GHorder) }) pmixpois <- local({ ppoisG <- function(x, ..., q, g, lot) ppois(q, g(x), lower.tail=lot) function(q, mu, sd, invlink=exp, lower.tail = TRUE, GHorder=5) gauss.hermite(ppoisG, mu=mu, sd=sd, g=invlink, q=q, order=GHorder, lot=lower.tail) }) qmixpois <- function(p, mu, sd, invlink=exp, lower.tail = TRUE, GHorder=5) { ## guess upper limit ## Guess upper and lower limits pmin <- min(p, 1-p)/2 lam.hi <- invlink(qnorm(pmin, mean=max(mu), sd=max(sd), lower.tail=FALSE)) lam.lo <- invlink(qnorm(pmin, mean=min(mu), sd=max(sd), lower.tail=TRUE)) kmin <- qpois(pmin, lam.lo, lower.tail=TRUE) kmax <- qpois(pmin, lam.hi, lower.tail=FALSE) kk <- kmin:kmax pp <- pmixpois(kk, mu, sd, invlink, lower.tail=TRUE, GHorder) ans <- if(lower.tail) kk[findInterval(p, pp, all.inside=TRUE)] else rev(kk)[findInterval(1-p, rev(1-pp), all.inside=TRUE)] return(ans) } rmixpois <- function(n, mu, sd, invlink=exp) { lam <- invlink(rnorm(n, mean=mu, sd=sd)) y <- rpois(n, lam) return(y) } spatstat/R/areadiff.R0000755000176200001440000002015413115271075014211 0ustar liggesusers# # areadiff.R # # $Revision: 1.33 $ $Date: 2017/06/05 10:31:58 $ # # Computes sufficient statistic for area-interaction process # # Invokes areadiff.c # # areaLoss = area lost by removing X[i] from X areaLoss <- function(X, r, ..., W=as.owin(X), subset=NULL, exact=FALSE, ngrid=spatstat.options("ngrid.disc")) { if(exact) areaLoss.diri(X, r, ..., W=W, subset=subset) else areaLoss.grid(X, r, ..., W=W, subset=subset, ngrid=ngrid) } # areaGain = area gained by adding u[i] to X areaGain <- function(u, X, r, ..., W=as.owin(X), exact=FALSE, ngrid=spatstat.options("ngrid.disc")) { if(exact) areaGain.diri(u, X, r, ..., W=W) else areaGain.grid(u, X, r, W=W, ngrid=ngrid) } #//////////////////////////////////////////////////////////// # algorithms using Dirichlet tessellation #/////////////////////////////////////////////////////////// areaLoss.diri <- function(X, r, ..., W=as.owin(X), subset=NULL) { stopifnot(is.ppp(X)) npts <- npoints(X) if(is.matrix(r)) { if(sum(dim(r) > 1) > 1) stop("r should be a vector or single value") r <- as.vector(r) } nr <- length(r) if(npts == 0) return(matrix(, nrow=0, ncol=nr)) else if(npts == 1) return(matrix(discpartarea(X, r, W), nrow=1)) # set up output array indices <- 1L:npts if(!is.null(subset)) indices <- indices[subset] out <- matrix(, nrow=length(indices), ncol=nr) # w <- X$window pir2 <- pi * r^2 # dirichlet neighbour relation in entire pattern dd <- deldir(X$x, X$y, rw=c(w$xrange, w$yrange)) a <- dd$delsgs[,5L] b <- dd$delsgs[,6L] for(k in seq_along(indices)) { i <- indices[k] # find all Delaunay neighbours of i jj <- c(b[a==i], a[b==i]) jj <- sort(unique(jj)) # extract only these points Yminus <- X[jj] Yplus <- X[c(jj, i)] # dilate aplus <- dilated.areas(Yplus, r, W, exact=TRUE) aminus <- dilated.areas(Yminus, r, W, exact=TRUE) areas <- aplus - aminus # area/(pi * r^2) must be positive and nonincreasing y <- ifelseAX(r == 0, 1, areas/pir2) y <- pmin.int(1, y) ok <- is.finite(y) y[ok] <- rev(cummax(rev(y[ok]))) areas <- pmax.int(0, y * pir2) # save out[k, ] <- areas } return(out) } areaGain.diri <- function(u, X, r, ..., W=as.owin(X), verbose=FALSE) { stopifnot(is.ppp(X)) Y <- as.ppp(u, W=W) nX <- X$n nY <- Y$n if(is.matrix(r)) { if(sum(dim(r) > 1) > 1) stop("r should be a vector or single value") r <- as.vector(r) } nr <- length(r) if(nY == 0) return(matrix(, nrow=0, ncol=nr)) if(nX == 0) return(matrix(pi * r^2, nrow=nY, ncol=nr, byrow=TRUE)) if(verbose) splat("areaGain,", nY, ngettext(nY, "point,", "points,"), nr, ngettext(nr, "rvalue", "r values")) out <- matrix(0, nrow=nY, ncol=nr) pir2 <- pi * r^2 wbox <- as.rectangle(as.owin(X)) # state <- list() for(i in 1L:nY) { if(verbose) state <- progressreport(i, nY, state=state) V <- superimpose(Y[i], X, W=wbox, check=FALSE) # Dirichlet neighbour relation for V dd <- deldir(V$x, V$y, rw=c(wbox$xrange, wbox$yrange)) aa <- dd$delsgs[,5L] bb <- dd$delsgs[,6L] # find all Delaunay neighbours of Y[1] in V jj <- c(bb[aa==1L], aa[bb==1L]) jj <- sort(unique(jj)) # extract only these points Zminus <- V[jj] Zplus <- V[c(1, jj)] # dilate aplus <- dilated.areas(Zplus, r, W, exact=TRUE) aminus <- dilated.areas(Zminus, r, W, exact=TRUE) areas <- aplus - aminus # area/(pi * r^2) must be in [0,1] and nonincreasing y <- ifelseAX(r == 0, 1, areas/pir2) y <- pmin.int(1, y) ok <- is.finite(y) y[ok] <- rev(cummax(rev(y[ok]))) areas <- pmax.int(0, y * pir2) # save out[i,] <- areas } return(out) } #//////////////////////////////////////////////////////////////////////// # alternative implementations using grid counting in C #//////////////////////////////////////////////////////////////////////// areaGain.grid <- function(u, X, r, ..., W=NULL, ngrid=spatstat.options("ngrid.disc")) { verifyclass(X, "ppp") u <- as.ppp(u, W=as.owin(X)) stopifnot(is.numeric(r) && all(is.finite(r)) && all(r >= 0)) # nu <- u$n nr <- length(r) if(nr == 0) return(numeric(0)) rmax <- max(r) # constrain <- !is.null(W) if(constrain && (W$type != "rectangle")) { # Constrained to an irregular window # initialise to value for small-r result <- matrix(pi * r^2, nrow=nu, ncol=nr, byrow=TRUE) # vector of radii below which b(u,r) is disjoint from U(X,r) rcrit.u <- nncross(u, X, what="dist")/2 rcrit.min <- min(rcrit.u) # Use distance transform and set covariance D <- distmap(X, ...) DW <- D[W, drop=FALSE] # distance from (0,0) - thresholded to make digital discs discWin <- owin(c(-rmax,rmax),c(-rmax,rmax)) discWin <- as.mask(discWin, eps=min(D$xstep, rmax/4)) rad <- as.im(function(x,y){sqrt(x^2+y^2)}, W=discWin) # for(j in which(r > rcrit.min)) { # rj is above the critical radius rcrit.u[i] for at least one point u[i] rj <- r[j] if(any(above <- (rj > rcrit.u))) { Uncovered <- levelset(DW, rj, ">") DiscRj <- levelset(rad, rj, "<=") AreaGainIm <- setcov(Uncovered, DiscRj) result[above, j] <- safelookup(AreaGainIm, u[above]) } } return(result) } # # xx <- X$x yy <- X$y result <- matrix(, nrow=nu, ncol=nr) # for(i in 1L:nu) { # shift u[i] to origin xu <- u$x[i] yu <- u$y[i] xshift <- xx - xu yshift <- yy - yu # find points within distance 2 rmax of origin close <- (xshift^2 + yshift^2 < 4 * rmax^2) nclose <- sum(close) # invoke C routine if(!constrain) { z <- .C("areadifs", rad = as.double(r), nrads = as.integer(nr), x = as.double(xshift[close]), y = as.double(yshift[close]), nn = as.integer(nclose), ngrid = as.integer(ngrid), answer = as.double(numeric(nr)), PACKAGE = "spatstat") result[i,] <- z$answer } else { z <- .C("areaBdif", rad = as.double(r), nrads = as.integer(nr), x = as.double(xshift[close]), y = as.double(yshift[close]), nn = as.integer(nclose), ngrid = as.integer(ngrid), x0 = as.double(W$xrange[1L] - xu), y0 = as.double(W$yrange[1L] - yu), x1 = as.double(W$xrange[2L] - xu), y1 = as.double(W$yrange[2L] - yu), answer = as.double(numeric(nr)), PACKAGE = "spatstat") result[i,] <- z$answer } } return(result) } areaLoss.grid <- function(X, r, ..., W=as.owin(X), subset=NULL, method = c("count", "distmap"), ngrid = spatstat.options("ngrid.disc"), exact = FALSE) { verifyclass(X, "ppp") n <- npoints(X) nr <- length(r) indices <- if(is.null(subset)) 1L:n else (1L:n)[subset] answer <- matrix(, nrow=length(indices), ncol=nr) if(missing(method)) { method <- if(nr <= 20 || exact) "count" else "distmap" } else method <- match.arg(method) switch(method, count = { # one value of r: use grid-counting for(k in seq_along(indices)) { i <- indices[k] answer[k,] <- areaGain(X[i], X[-i], r, W=W, ngrid=ngrid, exact=exact) } }, distmap = { # Many values of r: use distance transform D <- distmap(X, ...) DW <- D[W, drop=FALSE] a <- area(Window(DW)) # empirical cdf of distance values FW <- ecdf(DW[drop=TRUE]) # radii below which there are no overlaps rcrit <- nndist(X)/2 for(k in seq_along(indices)) { i <- indices[k] Di <- distmap(X[-i], ...) FiW <- ecdf(Di[W, drop=TRUE]) answer[k, ] <- ifelseXY(r > rcrit[i], a * (FW(r) - FiW(r)), pi * r^2) } }) return(answer) } spatstat/R/pool.R0000644000176200001440000000647113131624754013430 0ustar liggesusers#' #' pool.R #' #' $Revision: 1.5 $ $Date: 2017/06/05 10:31:58 $ pool <- function(...) { UseMethod("pool") } pool.fv <- local({ Square <- function(A) { force(A); eval.fv(A^2, relabel=FALSE) } Add <- function(A,B){ force(A); force(B); eval.fv(A+B, relabel=FALSE) } Cmul <- function(A, f) { force(A); force(f); eval.fv(f * A, relabel=FALSE) } pool.fv <- function(..., weights=NULL, relabel=TRUE, variance=TRUE) { argh <- list(...) n <- narg <- length(argh) if(narg == 0) return(NULL) if(narg == 1) return(argh[[1]]) ## validate isfv <- unlist(lapply(argh, is.fv)) if(!all(isfv)) stop("All arguments must be fv objects") argh <- do.call(harmonise, append(argh, list(strict=TRUE))) template <- vanilla.fv(argh[[1]]) ## compute products if(!is.null(weights)) { check.nvector(weights, narg, things="Functions") Y <- Map(Cmul, argh, weights) XY <- Map(Cmul, argh, weights^2) sumX <- sum(weights) sumX2 <- sum(weights^2) } else { ## default: weights=1 Y <- XY <- argh sumX <- sumX2 <- narg } ## sum sumY <- Reduce(Add, Y) attributes(sumY) <- attributes(template) ## ratio-of-sums Ratio <- eval.fv(sumY/sumX, relabel=FALSE) if(variance) { ## variance calculation meanX <- sumX/n meanY <- eval.fv(sumY/n, relabel=FALSE) sumY2 <- Reduce(Add, lapply(Y, Square)) varX <- (sumX2 - n * meanX^2)/(n-1) varY <- eval.fv((sumY2 - n * meanY^2)/(n-1), relabel=FALSE) sumXY <- Reduce(Add, XY) covXY <- eval.fv((sumXY - n * meanX * meanY)/(n-1), relabel=FALSE) ## variance by delta method relvar <- eval.fv(pmax.int(0, varY/meanY^2 + varX/meanX^2 - 2 * covXY/(meanX * meanY)), relabel=FALSE) Variance <- eval.fv(Ratio^2 * relvar/n, relabel=FALSE) ## two sigma CI hiCI <- eval.fv(Ratio + 2 * sqrt(Variance), relabel=FALSE) loCI <- eval.fv(Ratio - 2 * sqrt(Variance), relabel=FALSE) } ## tweak labels of main estimate attributes(Ratio) <- attributes(template) if(relabel) Ratio <- prefixfv(Ratio, tagprefix="pool", descprefix="pooled ", lablprefix="") if(!variance) return(Ratio) ## tweak labels of variance terms attributes(Variance) <- attributes(template) Variance <- prefixfv(Variance, tagprefix="var", descprefix="delta-method variance estimate of ", lablprefix="bold(var)~") attributes(hiCI) <- attributes(loCI) <- attributes(template) hiCI <- prefixfv(hiCI, tagprefix="hi", descprefix="upper limit of two-sigma CI based on ", lablprefix="bold(hi)~") loCI <- prefixfv(loCI, tagprefix="lo", descprefix="lower limit of two-sigma CI based on ", lablprefix="bold(lo)~") ## glue together result <- Reduce(bind.fv, list(Ratio, Variance, hiCI, loCI)) ## don't plot variances, by default fvnames(result, ".") <- setdiff(fvnames(result, "."), fvnames(Variance, ".")) return(result) } pool.fv }) spatstat/R/units.R0000755000176200001440000001210213115271120013573 0ustar liggesusers# # Functions for extracting and setting the name of the unit of length # # $Revision: 1.23 $ $Date: 2016/09/23 07:42:46 $ # # unitname <- function(x) { UseMethod("unitname") } unitname.owin <- function(x) { u <- as.units(x$units) return(u) } unitname.ppp <- function(x) { u <- as.units(x$window$units) return(u) } unitname.im <- function(x) { u <- as.units(x$units) return(u) } unitname.default <- function(x) { return(as.units(attr(x, "units"))) } "unitname<-" <- function(x, value) { UseMethod("unitname<-") } "unitname<-.owin" <- function(x, value) { x$units <- as.units(value) return(x) } "unitname<-.ppp" <- function(x, value) { w <- x$window unitname(w) <- value x$window <- w return(x) } "unitname<-.im" <- function(x, value) { x$units <- as.units(value) return(x) } "unitname<-.default" <- function(x, value) { if(is.null(x)) return(x) attr(x, "units") <- as.units(value) return(x) } ### class 'units' makeunits <- function(sing="unit", plur="units", mul = 1) { if(!is.character(sing)) stop("In unit name, first entry should be a character string") if(!is.character(plur)) stop("In unit name, second entry should be a character string") if(!is.numeric(mul)) { mul <- try(as.numeric(mul), silent=TRUE) if(inherits(mul, "try-error")) stop("In unit name, third entry should be a number") } if(length(mul) != 1 || mul <= 0) stop("In unit name, third entry should be a single positive number") u <- list(singular=sing, plural=plur, multiplier=mul) if(mul != 1 && (sing=="unit" || plur=="units")) stop(paste("A multiplier is not allowed", "if the unit does not have a specific name")) class(u) <- "units" return(u) } as.units <- function(s) { if(inherits(s, "units")) return(s) s <- as.list(s) n <- length(s) if(n > 3) stop(paste("Unit name should be a character string,", "or a vector/list of 2 character strings,", "or a list(character, character, numeric)")) out <- switch(n+1, makeunits(), makeunits(s[[1]], s[[1]]), makeunits(s[[1]], s[[2]]), makeunits(s[[1]], s[[2]], s[[3]])) return(out) } print.units <- function(x, ...) { mul <- x$multiplier if(mul == 1) cat(paste(x$singular, "/", x$plural, "\n")) else cat(paste(mul, x$plural, "\n")) return(invisible(NULL)) } as.character.units <- function(x, ...) { mul <- x$multiplier return(if(mul == 1) x$plural else paste(mul, x$plural)) } summary.units <- function(object, ...) { x <- object scaled <- (x$multiplier != 1) named <- (x$singular != "unit") vanilla <- !named && !scaled out <- if(vanilla) { list(legend = NULL, axis = NULL, explain = NULL, singular = "unit", plural = "units") } else if(named & !scaled) { list(legend = paste("Unit of length: 1", x$singular), axis = paren(x$plural, type=spatstat.options('units.paren')), explain = NULL, singular = x$singular, plural = x$plural) } else { expanded <- paste(x$multiplier, x$plural) expla <- paren(paste("one unit =", expanded), type=spatstat.options('units.paren')) list(legend = paste("Unit of length:", expanded), axis = expla, explain = expla, singular = "unit", plural = "units") } out <- append(out, list(scaled = scaled, named = named, vanilla = vanilla)) class(out) <- "summary.units" return(out) } print.summary.units <- function(x, ...) { if(x$vanilla) cat("Unit of length (unnamed)\n") else cat(paste(x$legend, "\n")) invisible(NULL) } compatible.units <- function(A, B, ..., coerce=TRUE) { stopifnot(inherits(A, "units")) if(missing(B)) return(TRUE) stopifnot(inherits(B, "units")) # check for null units Anull <- summary(A)$vanilla Bnull <- summary(B)$vanilla # `coerce' determines whether `vanilla' units are compatible with other units coerce <- as.logical(coerce) # agree <- if(!Anull && !Bnull) identical(all.equal(A,B), TRUE) else if(Anull && Bnull) TRUE else coerce # if(!agree) return(FALSE) # A and B agree if(length(list(...)) == 0) return(TRUE) # recursion return(compatible.units(B, ...)) } # class 'numberwithunit': numeric value(s) with unit of length numberwithunit <- function(x, u) { u <- as.units(u) x <- as.numeric(x) unitname(x) <- u class(x) <- c(class(x), "numberwithunit") return(x) } "%unit%" <- function(x, u) { numberwithunit(x, u) } format.numberwithunit <- function(x, ..., collapse=" x ", modifier=NULL) { u <- summary(unitname(x)) uname <- if(all(x == 1)) u$singular else u$plural y <- format(as.numeric(x), ...) z <- pasteN(paste(y, collapse=collapse), modifier, uname, u$explain) return(z) } as.character.numberwithunit <- function(x, ...) { return(format(x)) } print.numberwithunit <- function(x, ...) { cat(format(x, ...), fill=TRUE) return(invisible(NULL)) } spatstat/R/polynom.R0000644000176200001440000000450513115225157014144 0ustar liggesusers#' #' polynom.R #' #' $Revision: 1.1 $ $Date: 2017/01/02 09:48:36 $ #' polynom <- function(x, ...) { rest <- list(...) # degree not given if(length(rest) == 0) stop("degree of polynomial must be given") #call with single variable and degree if(length(rest) == 1) { degree <- ..1 if((degree %% 1) != 0 || length(degree) != 1 || degree < 1) stop("degree of polynomial should be a positive integer") # compute values result <- outer(x, 1:degree, "^") # compute column names - the hard part ! namex <- deparse(substitute(x)) # check whether it needs to be parenthesised if(!is.name(substitute(x))) namex <- paste("(", namex, ")", sep="") # column names namepowers <- if(degree == 1) namex else c(namex, paste(namex, "^", 2:degree, sep="")) namepowers <- paste("[", namepowers, "]", sep="") # stick them on dimnames(result) <- list(NULL, namepowers) return(result) } # call with two variables and degree if(length(rest) == 2) { y <- ..1 degree <- ..2 # list of exponents of x and y, in nice order xexp <- yexp <- numeric() for(i in 1:degree) { xexp <- c(xexp, i:0) yexp <- c(yexp, 0:i) } nterms <- length(xexp) # compute result <- matrix(, nrow=length(x), ncol=nterms) for(i in 1:nterms) result[, i] <- x^xexp[i] * y^yexp[i] # names of these terms namex <- deparse(substitute(x)) # namey <- deparse(substitute(..1)) ### seems not to work in R zzz <- as.list(match.call()) namey <- deparse(zzz[[3]]) # check whether they need to be parenthesised # if so, add parentheses if(!is.name(substitute(x))) namex <- paste("(", namex, ")", sep="") if(!is.name(zzz[[3]])) namey <- paste("(", namey, ")", sep="") nameXexp <- c("", namex, paste(namex, "^", 2:degree, sep="")) nameYexp <- c("", namey, paste(namey, "^", 2:degree, sep="")) # make the term names termnames <- paste(nameXexp[xexp + 1], ifelse(xexp > 0 & yexp > 0, ".", ""), nameYexp[yexp + 1], sep="") termnames <- paste("[", termnames, "]", sep="") dimnames(result) <- list(NULL, termnames) # return(result) } stop("Can't deal with more than 2 variables yet") } spatstat/R/timed.R0000644000176200001440000000625113137505710013551 0ustar liggesusers#' #' timed.R #' #' Timed objects #' #' $Revision: 1.3 $ $Date: 2017/07/31 01:08:55 $ timed <- function(x, ..., starttime=NULL, timetaken=NULL) { if(is.null(starttime) && is.null(timetaken)) # time starts now. starttime <- proc.time() # evaluate expression if any object <- x if(is.null(timetaken)) timetaken <- proc.time() - starttime if(!inherits(object, "timed")) class(object) <- c("timed", class(object)) attr(object, "timetaken") <- timetaken return(object) } print.timed <- function(x, ...) { # strip the timing information and print the rest. taken <- attr(x, "timetaken") cx <- class(x) attr(x, "timetaken") <- NULL class(x) <- cx[cx != "timed"] NextMethod("print") # Now print the timing info cat(paste("\nTime taken:", codetime(taken), "\n")) return(invisible(NULL)) } timeTaken <- function(..., warn=TRUE) { allargs <- list(...) hastime <- sapply(allargs, inherits, what="timed") if(!any(hastime)) { if(warn) warning("Data did not contain timing information", call.=FALSE) return(NULL) } if(warn && !all(hastime)) warning("Some arguments did not contain timing information", call.=FALSE) times <- sapply(allargs[hastime], attr, which="timetaken") tottime <- rowSums(times) class(tottime) <- "proc_time" return(tottime) } #' .............. codetime .................................... #' Basic utility for converting times in seconds to text strings codetime <- local({ uname <- c("min", "hours", "days", "years", "thousand years", "million years", "billion years") u1name <- c("min", "hour", "day", "year", "thousand years", "million years", "billion years") multiple <- c(60, 60, 24, 365, 1e3, 1e3, 1e3) codehms <- function(x) { sgn <- if(x < 0) "-" else "" x <- round(abs(x)) hours <- x %/% 3600 mins <- (x %/% 60) %% 60 secs <- x %% 60 h <- if(hours > 0) paste(hours, ":", sep="") else "" started <- (hours > 0) m <- if(mins > 0) { paste(if(mins < 10 && started) "0" else "", mins, ":", sep="") } else if(started) "00:" else "" started <- started | (mins > 0) s <- if(secs > 0) { paste(if(secs < 10 && started) "0" else "", secs, sep="") } else if(started) "00" else "0" if(!started) s <- paste(s, "sec") paste(sgn, h, m, s, sep="") } codetime <- function(x, hms=TRUE, what=c("elapsed","user","system")) { if(inherits(x, "proc_time")) { what <- match.arg(what) x <- summary(x)[[match(what, c("user", "system", "elapsed"))]] } if(!is.numeric(x) || length(x) != 1) stop("codetime: x must be a proc_time object or a single number") sgn <- if(x < 0) "-" else "" x <- abs(x) if(x < 60) return(paste(sgn, signif(x, 3), " sec", sep="")) # more than 1 minute: round to whole number of seconds x <- round(x) if(hms && (x < 60 * 60 * 24)) return(paste(sgn, codehms(x), sep="")) u <- u1 <- "sec" for(k in seq_along(multiple)) { if(x >= multiple[k]) { x <- x/multiple[k] u <- uname[k] u1 <- u1name[k] } else break } xx <- round(x, 1) ux <- if(xx == 1) u1 else u paste(sgn, xx, " ", ux, sep="") } codetime }) spatstat/R/randomonlines.R0000755000176200001440000001442013115271120015306 0ustar liggesusers# # randomOnLines.R # # $Revision: 1.8 $ $Date: 2014/11/17 04:40:14 $ # # Generate random points on specified lines # runifpointOnLines <- function(n, L, nsim=1) { if(!is.numeric(n) || any(n < 0) || any(n %% 1 != 0)) stop("n should be a nonnegative integer or integers") if(!is.psp(L)) L <- as.psp(L) W <- as.owin(L) result <- vector(mode="list", length=nsim) for(i in 1:nsim) { X <- datagen.runifpointOnLines(n, L) Y <- ppp(X$x, X$y, marks=X$marks, window=W, check=FALSE) result[[i]] <- Y } if(nsim == 1) return(result[[1]]) names(result) <- paste("Simulation", 1:nsim) return(as.solist(result)) } datagen.runifpointOnLines <- function(n, L) { stopifnot(is.psp(L)) m <- length(n) ismarked <- (m > 1) if(m == 0 || (m == 1 && n == 0)) return(data.frame(x=numeric(0), y=numeric(0), seg=integer(0), tp=numeric(0))) # extract segment information len <- lengths.psp(L) sumlen <- sum(len) cumlen <- cumsum(len) cum0len <- c(0, cumlen) Ldf <- as.data.frame(L) x0 <- with(Ldf, x0) y0 <- with(Ldf, y0) dx <- with(Ldf, x1-x0) dy <- with(Ldf, y1-y0) # determine mark space if(ismarked) { markvalues <- names(n) if(sum(nzchar(markvalues)) < m) markvalues <- paste(1:m) } # initialise output data.frame out <- data.frame(x=numeric(0), y=numeric(0), seg=integer(0), tp=numeric(0)) if(ismarked) out <- cbind(out, data.frame(marks=character(0))) # generate points of each mark in turn for(j in 1:m) { if(n[[j]] > 0) { # generate random positions uu <- runif(n[[j]], min=0, max=sumlen) # identify segment for each point kk <- findInterval(uu, cum0len, rightmost.closed=TRUE, all.inside=TRUE) # parametric position along segment tt <- (uu - cum0len[kk])/len[kk] tt[!is.finite(tt)] <- 0 # convert to (x,y) x <- x0[kk] + tt * dx[kk] y <- y0[kk] + tt * dy[kk] # assemble result if(!ismarked) { out <- data.frame(x=x, y=y, seg=kk, tp=tt) } else { outj <- data.frame(x=x, y=y, seg=kk, tp=tt, marks=markvalues[j]) out <- rbind(out, outj) } } } if(ismarked) out$marks <- factor(out$marks, levels=markvalues) return(out) } runifpoisppOnLines <- function(lambda, L, nsim=1) { if(!is.numeric(lambda) || !all(is.finite(lambda) && (lambda >= 0))) stop("lambda should be a finite, nonnegative number or numbers") if(!is.psp(L)) L <- as.psp(L) W <- as.owin(L) result <- vector(mode="list", length=nsim) for(i in 1:nsim) { X <- datagen.runifpoisppOnLines(lambda, L) Y <- ppp(X$x, X$y, marks=X$marks, window=W, check=FALSE) result[[i]] <- Y } if(nsim == 1) return(result[[1]]) names(result) <- paste("Simulation", 1:nsim) return(as.solist(result)) } datagen.runifpoisppOnLines <- function(lambda, L) { stopifnot(is.psp(L)) mu <- lambda * sum(lengths.psp(L)) n <- rpois(rep.int(1, length(mu)), mu) if(length(n) > 1) names(n) <- names(lambda) df <- datagen.runifpointOnLines(n, L) return(df) } rpoisppOnLines <- function(lambda, L, lmax=NULL, ..., nsim=1) { if(!is.psp(L)) L <- as.psp(L) W <- as.owin(L) result <- vector(mode="list", length=nsim) for(i in 1:nsim) { X <- datagen.rpoisppOnLines(lambda, L, lmax=lmax, ...) Y <- ppp(X$x, X$y, marks=X$marks, window=W, check=FALSE) result[[i]] <- Y } if(nsim == 1) return(result[[1]]) names(result) <- paste("Simulation", 1:nsim) return(as.solist(result)) } datagen.rpoisppOnLines <- function(lambda, L, lmax=NULL, ..., check=TRUE) { stopifnot(is.psp(L)) if(is.numeric(lambda)) return(datagen.runifpoisppOnLines(lambda, L)) # ensure lambda is a list if(is.function(lambda) || is.im(lambda)) lambda <- list(lambda) m <- length(lambda) # determine type of argument argtype <- if(all(unlist(lapply(lambda, is.im)))) "im" else if(all(unlist(lapply(lambda, is.function)))) "function" else stop(paste(sQuote("lambda"), "must be a numeric vector, a function, an image,", "a list of functions, or a list of images")) # check values of lambda if(argtype == "im") { for(j in seq_len(m)) { lamj <- lambda[[j]] if(!(lamj$type %in% c("real", "integer"))) stop("lambda must be numeric-valued or integer-valued") lrange <- range(lamj) if(any(is.infinite(lrange))) stop("Infinite pixel values not permitted") if(lrange[1] < 0) stop("Negative pixel values not permitted") } } # determine uniform bound if(!is.null(lmax)) { stopifnot(is.numeric(lmax)) if(length(lmax) != m) { if(length(lmax) == 1) { lmax <- rep.int(lmax, m) } else stop("Length of lmax does not match length of lambda") } } else { # compute lmax lmax <- numeric(m) for(j in seq_len(m)) { lamj <- lambda[[j]] if(is.function(lamj)) { X <- pointsOnLines(L, np=10000) lambdaX <- lamj(X$x, X$y, ...) lmax[j] <- max(lambdaX, na.rm=TRUE) } else if(is.im(lamj)) lmax[j] <- max(lamj) } if(!all(is.finite(lmax))) stop("Infinite values of lambda obtained") if(any(lmax < 0)) stop("Negative upper bound for lambda obtained") names(lmax) <- names(lambda) } # Lewis-Shedler (rejection) method Y <- datagen.runifpoisppOnLines(lmax, L) n <- nrow(Y) if(n == 0) return(Y) # evaluate lambda at each simulated point if(m == 1) { lambda <- lambda[[1]] markindex <- 1 if(is.function(lambda)) lambdaY <- lambda(Y$x, Y$y, ...) else lambdaY <- safelookup(lambda, as.ppp(Y, W=as.owin(L))) } else { lambdaY <- numeric(n) markindex <- as.integer(Y$marks) for(j in seq_len(m)) { lamj <- lambda[[j]] jrows <- (markindex == j) Yj <- Y[jrows, , drop=FALSE] if(is.function(lamj)) lambdaY[jrows] <- lamj(Yj$x, Yj$y, ...) else lambdaY[jrows] <- safelookup(lamj, as.ppp(Yj, W=as.owin(L))) } } lambdaY[is.na(lambdaY)] <- 0 # accept/reject pY <- lambdaY/lmax[markindex] if(check) { if(any(pY < 0)) warning("Negative values of lambda obtained") if(any(pY > 1)) warning("lmax is not an upper bound for lambda") } retain <- (runif(n) < pY) Y <- Y[retain, , drop=FALSE] return(Y) } spatstat/R/dg.R0000755000176200001440000001331213115271075013040 0ustar liggesusers# # dg.S # # $Revision: 1.19 $ $Date: 2017/06/05 10:31:58 $ # # Diggle-Gratton pair potential # # DiggleGratton <- local({ # .... auxiliary functions ...... diggraterms <- function(X, Y, idX, idY, delta, rho) { stopifnot(is.numeric(delta)) stopifnot(is.numeric(rho)) stopifnot(delta < rho) # sort in increasing order of x coordinate oX <- fave.order(X$x) oY <- fave.order(Y$x) Xsort <- X[oX] Ysort <- Y[oY] idXsort <- idX[oX] idYsort <- idY[oY] nX <- npoints(X) nY <- npoints(Y) # call C routine out <- .C("Ediggra", nnsource = as.integer(nX), xsource = as.double(Xsort$x), ysource = as.double(Xsort$y), idsource = as.integer(idXsort), nntarget = as.integer(nY), xtarget = as.double(Ysort$x), ytarget = as.double(Ysort$y), idtarget = as.integer(idYsort), ddelta = as.double(delta), rrho = as.double(rho), values = as.double(double(nX)), PACKAGE = "spatstat") answer <- integer(nX) answer[oX] <- out$values return(answer) } # .......... template object .......... BlankDG <- list( name = "Diggle-Gratton process", creator = "DiggleGratton", family = "pairwise.family", #evaluated later pot = function(d, par) { delta <- par$delta rho <- par$rho above <- (d > rho) inrange <- (!above) & (d > delta) h <- above + inrange * (d - delta)/(rho - delta) return(log(h)) }, par = list(delta=NULL, rho=NULL), # to be filled in later parnames = list("lower limit delta", "upper limit rho"), selfstart = function(X, self) { # self starter for DiggleGratton nX <- npoints(X) if(nX < 2) { # not enough points to make any decisions return(self) } md <- minnndist(X) if(!is.na(delta <- self$par$delta)) { # value fixed by user or previous invocation # check it if(md < delta) warning(paste("Hard core distance delta is too large;", "some data points will have zero probability")) return(self) } if(md == 0) warning(paste("Pattern contains duplicated points:", "hard core distance delta must be zero")) # take hard core = minimum interpoint distance * n/(n+1) deltaX <- md * nX/(nX+1) DiggleGratton(delta=deltaX, rho=self$par$rho) }, init = function(self) { delta <- self$par$delta rho <- self$par$rho if(!is.numeric(rho) || length(rho) != 1L) stop("upper limit rho must be a single number") stopifnot(is.finite(rho)) if(!is.na(delta)) { if(!is.numeric(delta) || length(delta) != 1L) stop("lower limit delta must be a single number") stopifnot(delta >= 0) stopifnot(rho > delta) } else stopifnot(rho >= 0) }, update = NULL, # default OK print = NULL, # default OK interpret = function(coeffs, self) { kappa <- as.numeric(coeffs[1L]) return(list(param=list(kappa=kappa), inames="exponent kappa", printable=dround(kappa))) }, valid = function(coeffs, self) { kappa <- as.numeric(coeffs[1L]) return(is.finite(kappa) && (kappa >= 0)) }, project = function(coeffs, self) { kappa <- as.numeric(coeffs[1L]) if(is.finite(kappa) && (kappa >= 0)) return(NULL) return(Poisson()) }, irange = function(self, coeffs=NA, epsilon=0, ...) { rho <- self$par$rho if(all(is.na(coeffs))) return(rho) kappa <- coeffs[1L] delta <- self$par$delta if(abs(kappa) <= epsilon) return(delta) else return(rho) }, version=NULL, # evaluated later # fast evaluation is available for the border correction only can.do.fast=function(X,correction,par) { return(all(correction %in% c("border", "none"))) }, fasteval=function(X,U,EqualPairs,pairpot,potpars,correction, ...) { # fast evaluator for DiggleGratton interaction if(!all(correction %in% c("border", "none"))) return(NULL) if(spatstat.options("fasteval") == "test") message("Using fast eval for DiggleGratton") delta <- potpars$delta rho <- potpars$rho idX <- seq_len(npoints(X)) idU <- rep.int(-1L, npoints(U)) idU[EqualPairs[,2L]] <- EqualPairs[,1L] answer <- diggraterms(U, X, idU, idX, delta, rho) answer <- log(pmax.int(0, answer)) return(matrix(answer, ncol=1L)) }, Mayer=function(coeffs, self) { # second Mayer cluster integral rho <- self$par$rho delta <- self$par$delta width <- rho - delta kappa <- coeffs[1L] ans <- pi * (rho^2 - 2 * rho* width/(kappa + 1) + 2 * width^2/((kappa + 1) * (kappa + 2))) return(ans) } ) class(BlankDG) <- "interact" DiggleGratton <- function(delta=NA, rho) { instantiate.interact(BlankDG, list(delta=delta, rho=rho)) } DiggleGratton <- intermaker(DiggleGratton, BlankDG) DiggleGratton }) spatstat/R/split.ppp.R0000755000176200001440000002215613115271120014374 0ustar liggesusers# # split.ppp.R # # $Revision: 1.32 $ $Date: 2015/08/05 02:50:25 $ # # split.ppp and "split<-.ppp" # ######################################### split.ppp <- function(x, f = marks(x), drop=FALSE, un=NULL, reduce=FALSE, ...) { verifyclass(x, "ppp") mf <- markformat(x) fgiven <- !missing(f) if(is.null(un)) { un <- !fgiven && (mf != "dataframe") } else un <- as.logical(un) if(!fgiven) { # f defaults to marks of x switch(mf, none={ stop("f is missing and there are no marks") }, vector={ if(!is.multitype(x)) stop("f is missing and the pattern is not multitype") f <- fsplit <- marks(x) }, dataframe={ f <- fsplit <- firstfactor(marks(x)) if(is.null(f)) stop("Data frame of marks contains no factors") }) splittype <- "factor" } else { # f was given fsplit <- f if(is.factor(f)) { splittype <- "factor" } else if(is.logical(f)) { splittype <- "factor" f <- factor(f) } else if(is.tess(f)) { # f is a tessellation: determine the grouping f <- marks(cut(x, fsplit)) splittype <- "tess" } else if(is.owin(f)) { # f is a window: coerce to a tessellation W <- as.owin(x) fsplit <- tess(tiles=list(fsplit, setminus.owin(W, fsplit)), window=W) f <- marks(cut(x, fsplit)) splittype <- "tess" } else if(is.im(f)) { # f is an image: coerce to a tessellation fsplit <- tess(image=f) f <- marks(cut(x, fsplit)) splittype <- "tess" } else if(is.character(f) && length(f) == 1) { # f is the name of a column of marks marx <- marks(x) if(is.data.frame(marx) && (f %in% names(marx))) fsplit <- f <- marx[[f]] else stop(paste("The name", sQuote(f), "does not match any column of marks")) splittype <- "factor" } else stop(paste("f must be", "a factor, a logical vector,", "a tessellation, a window, an image,", "or the name of a column of marks")) if(length(f) != npoints(x)) stop("length(f) must equal the number of points in x") } # At this point # 'f' is a factor that can be used to separate the points # 'fsplit' is the object (either a factor or a tessellation) # that determines the split (and can be "un-split") lev <- levels(f) if(drop) { # remove components that don't contain points retain <- (table(f) > 0) lev <- lev[retain] switch(splittype, tess = { # remove tiles that don't contain points fsplit <- fsplit[retain] }, factor = { # delete levels that don't occur fsplit <- factor(fsplit, levels=lev) }, stop("Internal error: wrong format for fsplit")) } ## remove marks that will not be retained if(un && reduce && mf == "dataframe") warning("Incompatible arguments un=TRUE and reduce=TRUE: assumed un=TRUE") if(un) { x <- unmark(x) } else if(reduce && !fgiven && mf == "dataframe") { # remove the column of marks that determined the split j <- findfirstfactor(marks(x)) if(!is.null(j)) marks(x) <- marks(x)[, -j] } ## split the data out <- list() fok <- !is.na(f) for(l in lev) out[[paste(l)]] <- x[fok & (f == l)] ## if(splittype == "tess") { til <- tiles(fsplit) for(i in seq_along(out)) out[[i]]$window <- til[[i]] } class(out) <- c("splitppp", "ppplist", "solist", class(out)) attr(out, "fsplit") <- fsplit attr(out, "fgroup") <- f return(out) } "split<-.ppp" <- function(x, f=marks(x), drop=FALSE, un=missing(f), ..., value) { verifyclass(x, "ppp") W <- x$window mf <- markformat(x) # evaluate `un' before assigning value of 'f' force(un) # validate assignment value stopifnot(is.list(value)) if(!all(unlist(lapply(value, is.ppp)))) stop(paste("Each entry of", sQuote("value"), "must be a point pattern")) ismark <- unlist(lapply(value, is.marked)) if(any(ismark) && !all(ismark)) stop(paste("Some entries of", sQuote("value"), "are marked, and others are unmarked")) vmarked <- all(ismark) # determine type of splitting if(missing(f)) { # f defaults to marks of x switch(mf, none={ stop("f is missing and there are no marks") }, vector={ if(!is.multitype(x)) stop("f is missing and the pattern is not multitype") f <- fsplit <- marks(x) }, dataframe={ f <- fsplit <- firstfactor(marks(x)) if(is.null(f)) stop("Data frame of marks contains no factors") }) } else { # f given fsplit <- f if(is.tess(f)) { # f is a tessellation: determine the grouping f <- marks(cut(x, fsplit)) } else if(is.im(f)) { # f is an image: determine the grouping fsplit <- tess(image=f) f <- marks(cut(x, fsplit)) } else if(is.character(f) && length(f) == 1) { # f is the name of a column of marks marx <- marks(x) if(is.data.frame(marx) && (f %in% names(marx))) fsplit <- f <- marx[[f]] else stop(paste("The name", sQuote(f), "does not match any column of marks")) } else if(is.logical(f)) { f <- factor(f) } else if(!is.factor(f)) stop(paste("f must be", "a factor, a logical vector, a tessellation, an image,", "or the name of a column of marks")) if(length(f) != x$n) stop("length(f) must equal the number of points in x") } # all.levels <- lev <- levels(f) if(!drop) levtype <- "levels of f" else { levtype <- "levels which f actually takes" # remove components that don't contain points lev <- lev[table(f) > 0] } if(length(value) != length(lev)) stop(paste("length of", sQuote("value"), "should equal the number of", levtype)) # ensure value[[i]] is associated with lev[i] if(!is.null(names(value))) { if(!all(names(value) %in% as.character(lev))) stop(paste("names of", sQuote("value"), "should be levels of f")) value <- value[lev] } names(value) <- NULL # restore the marks, if they were discarded if(un && is.marked(x)) { if(vmarked) warning(paste(sQuote("value"), "contains marked point patterns:", "this is inconsistent with un=TRUE; marks ignored.")) for(i in seq_along(value)) value[[i]] <- value[[i]] %mark% factor(lev[i], levels=all.levels) } # handle NA's in splitting factor if(any(isNA <- is.na(f))) { xNA <- x[isNA] if(un && is.marked(x)) xNA <- xNA %mark% factor(NA, levels=all.levels) value <- append(value, list(xNA)) } # put Humpty together again if(npoints(x) == length(f) && length(levels(f)) == length(value) && all(table(f) == sapply(value, npoints))) { ## exact correspondence out <- x for(i in seq_along(levels(f))) out[ f == lev[i] ] <- value[[i]] } else { out <- do.call(superimpose,c(value,list(W=W))) } return(out) } print.splitppp <- function(x, ...) { f <- attr(x, "fsplit") what <- if(is.tess(f)) "tessellation" else if(is.factor(f)) "factor" else "unknown data" cat(paste("Point pattern split by", what, "\n")) nam <- names(x) for(i in seq_along(x)) { cat(paste("\n", nam[i], ":\n", sep="")) print(x[[i]]) } return(invisible(NULL)) } summary.splitppp <- function(object, ...) { x <- lapply(object, summary, ...) class(x) <- "summary.splitppp" x } print.summary.splitppp <- function(x, ...) { class(x) <- "anylist" print(x) invisible(NULL) } "[.splitppp" <- function(x, ...) { f <- attr(x, "fsplit") # invoke list method on x class(x) <- "list" y <- x[...] # then make it a 'splitppp' object too class(y) <- c("splitppp", class(y)) if(is.tess(f)) { fsplit <- f[...] } else if(is.factor(f)) { lev <- levels(f) sublev <- lev[...] subf <- f[f %in% sublev] fsplit <- factor(subf, levels=lev) } else stop("Unknown splitting type") attr(y, "fsplit") <- fsplit y } "[<-.splitppp" <- function(x, ..., value) { if(!all(unlist(lapply(value, is.ppp)))) stop("replacement value must be a list of point patterns") f <- attr(x, "fsplit") # invoke list method class(x) <- "list" x[...] <- value # then make it a 'splitppp' object too class(x) <- c("splitppp", class(x)) if(is.tess(f)) { fsplit <- f } else if(is.factor(f)) { lev <- levels(f) fsplit <- factor(rep.int(lev, unlist(lapply(x, npoints))), levels=lev) } attr(x, "fsplit") <- fsplit x } density.splitppp <- function(x, ..., se=FALSE) { density.ppplist(x, ..., se=se) } plot.splitppp <- function(x, ..., main) { if(missing(main)) main <- short.deparse(substitute(x)) do.call(plot.solist, resolve.defaults(list(x=x, main=main), list(...), list(equal.scales=TRUE))) } as.layered.splitppp <- function(X) { do.call(layered, X) } spatstat/R/fv.R0000755000176200001440000013262613131546203013067 0ustar liggesusers## ## ## fv.R ## ## class "fv" of function value objects ## ## $Revision: 1.148 $ $Date: 2017/07/13 01:26:02 $ ## ## ## An "fv" object represents one or more related functions ## of the same argument, such as different estimates of the K function. ## ## It is a data.frame with additional attributes ## ## argu column name of the function argument (typically "r") ## ## valu column name of the recommended function ## ## ylab generic label for y axis e.g. K(r) ## ## fmla default plot formula ## ## alim recommended range of function argument ## ## labl recommended xlab/ylab for each column ## ## desc longer description for each column ## ## unitname name of unit of length for 'r' ## ## shade (optional) column names of upper & lower limits ## of shading - typically a confidence interval ## ## Objects of this class are returned by Kest(), etc ## ################################################################## ## creator fv <- function(x, argu="r", ylab=NULL, valu, fmla=NULL, alim=NULL, labl=names(x), desc=NULL, unitname=NULL, fname=NULL, yexp=ylab) { stopifnot(is.data.frame(x)) ## check arguments stopifnot(is.character(argu)) if(!is.null(ylab)) stopifnot(is.character(ylab) || is.language(ylab)) if(!missing(yexp)) { if(is.null(yexp)) yexp <- ylab else stopifnot(is.language(yexp)) } stopifnot(is.character(valu)) if(!(argu %in% names(x))) stop(paste(sQuote("argu"), "must be the name of a column of x")) if(!(valu %in% names(x))) stop(paste(sQuote("valu"), "must be the name of a column of x")) if(is.null(fmla)) fmla <- paste(valu, "~", argu) else if(inherits(fmla, "formula")) { ## convert formula to string fmla <- flat.deparse(fmla) } else if(!is.character(fmla)) stop(paste(sQuote("fmla"), "should be a formula or a string")) if(missing(alim)) { ## Note: if alim is given as NULL, it is not changed. argue <- x[[argu]] alim <- range(argue[is.finite(argue)]) } else if(!is.null(alim)) { if(!is.numeric(alim) || length(alim) != 2) stop(paste(sQuote("alim"), "should be a vector of length 2")) } if(!is.character(labl)) stop(paste(sQuote("labl"), "should be a vector of strings")) stopifnot(length(labl) == ncol(x)) if(is.null(desc)) desc <- character(ncol(x)) else { stopifnot(is.character(desc)) stopifnot(length(desc) == ncol(x)) nbg <- is.na(desc) if(any(nbg)) desc[nbg] <- "" } if(!is.null(fname)) stopifnot(is.character(fname) && length(fname) %in% 1:2) ## pack attributes attr(x, "argu") <- argu attr(x, "valu") <- valu attr(x, "ylab") <- ylab attr(x, "yexp") <- yexp attr(x, "fmla") <- fmla attr(x, "alim") <- alim attr(x, "labl") <- labl attr(x, "desc") <- desc attr(x, "units") <- as.units(unitname) attr(x, "fname") <- fname attr(x, "dotnames") <- NULL attr(x, "shade") <- NULL ## class(x) <- c("fv", class(x)) return(x) } .Spatstat.FvAttrib <- c( "argu", "valu", "ylab", "yexp", "fmla", "alim", "labl", "desc", "units", "fname", "dotnames", "shade") as.data.frame.fv <- function(x, ...) { stopifnot(is.fv(x)) fva <- .Spatstat.FvAttrib attributes(x)[fva] <- NULL class(x) <- "data.frame" x } is.fv <- function(x) { inherits(x, "fv") } ## as.fv <- function(x) { UseMethod("as.fv") } as.fv.fv <- function(x) x as.fv.data.frame <- function(x) { if(ncol(x) < 2) stop("Need at least 2 columns") return(fv(x, names(x)[1L], , names(x)[2L])) } as.fv.matrix <- function(x) { y <- as.data.frame(x) if(any(bad <- is.na(names(y)))) names(y)[bad] <- paste0("V", which(bad)) return(as.fv.data.frame(y)) } ## other methods for as.fv are described in the files for the relevant classes. vanilla.fv <- function(x) { ## remove everything except basic fv characteristics retain <- c("names", "row.names", .Spatstat.FvAttrib) attributes(x) <- attributes(x)[retain] class(x) <- c("fv", "data.frame") return(x) } print.fv <- local({ maxwords <- function(z, m) { max(0, which(cumsum(nchar(z) + 1) <= m+1)) } usewords <- function(z, n) paste(z[1:n], collapse=" ") print.fv <- function(x, ..., tight=FALSE) { verifyclass(x, "fv") terselevel <- spatstat.options("terse") showlabels <- waxlyrical('space', terselevel) showextras <- waxlyrical('extras', terselevel) nama <- names(x) a <- attributes(x) if(!is.null(ylab <- a$ylab)) { if(is.language(ylab)) ylab <- flat.deparse(ylab) } if(!inherits(x, "envelope")) { splat("Function value object", paren(paste("class", sQuote("fv")))) if(!is.null(ylab)) { xlab <- fvlabels(x, expand=TRUE)[[a$argu]] splat("for the function", xlab, "->", ylab) } } ## Descriptions .. desc <- a$desc ## .. may require insertion of ylab if(!is.null(ylab)) desc <- sprintf(desc, ylab) ## Labels .. labl <- fvlabels(x, expand=TRUE) ## Avoid overrunning text margin maxlinewidth <- options('width')[[1L]] key.width <- max(nchar(nama)) labl.width <- if(showlabels) max(nchar(labl), nchar("Math.label")) else 0 desc.width <- max(nchar(desc), nchar("Description")) fullwidth <- key.width + labl.width + desc.width + 2 if(fullwidth > maxlinewidth && tight) { ## try shortening the descriptions so that it all fits on one line spaceleft <- maxlinewidth - (key.width + labl.width + 2) desc <- truncline(desc, spaceleft) desc.width <- max(nchar(desc), nchar("Description")) fullwidth <- key.width + labl.width + desc.width + 2 } spaceleft <- maxlinewidth - (key.width + 1) if(desc.width > spaceleft) { ## Descriptions need to be truncated to max line width desc <- truncline(desc, spaceleft) desc.width <- max(nchar(desc), nchar("Description")) fullwidth <- key.width + labl.width + desc.width + 2 } if(showextras) { fullwidth <- pmin(maxlinewidth, fullwidth) fullline <- paste0(rep(".", fullwidth), collapse="") cat(fullline, fill=TRUE) } df <- data.frame(Math.label=labl, Description=desc, row.names=nama, stringsAsFactors=FALSE) if(!showlabels) df <- df[,-1L,drop=FALSE] print(df, right=FALSE) ## if(showextras) { cat(fullline, fill=TRUE) splat("Default plot formula: ", flat.deparse(as.formula(a$fmla))) splat("where", dQuote("."), "stands for", commasep(sQuote(fvnames(x, ".")), ", ")) if(!is.null(a$shade)) splat("Columns", commasep(sQuote(a$shade)), "will be plotted as shading (by default)") alim <- a$alim splat("Recommended range of argument", paste0(a$argu, ":"), if(!is.null(alim)) prange(signif(alim, 5)) else "not specified") rang <- signif(range(with(x, .x)), 5) splat("Available range", "of argument", paste0(a$argu, ":"), prange(rang)) ledge <- summary(unitname(x))$legend if(!is.null(ledge)) splat(ledge) } return(invisible(NULL)) } print.fv }) ## manipulating the names in fv objects .Spatstat.FvAbbrev <- c( ".x", ".y", ".s", ".", "*", ".a") fvnames <- function(X, a=".") { verifyclass(X, "fv") if(!is.character(a) || length(a) > 1) stop("argument a must be a character string") switch(a, ".y"={ return(attr(X, "valu")) }, ".x"={ return(attr(X, "argu")) }, ".s"={ return(attr(X, "shade")) }, "." = { ## The specified 'dotnames' dn <- attr(X, "dotnames") if(is.null(dn)) dn <- fvnames(X, "*") return(dn) }, "*"=, ".a"={ ## all column names other than the function argument allvars <- names(X) argu <- attr(X, "argu") nam <- allvars[allvars != argu] nam <- rev(nam) ## convention return(nam) }, { if(a %in% names(X)) return(a) stop(paste("Unrecognised abbreviation", dQuote(a))) } ) } "fvnames<-" <- function(X, a=".", value) { verifyclass(X, "fv") if(!is.character(a) || length(a) > 1) stop(paste("argument", sQuote("a"), "must be a character string")) ## special cases if(a == "." && length(value) == 0) { ## clear the dotnames attr(X, "dotnames") <- NULL return(X) } if(a == ".a" || a == "*") { warning("Column names unchanged: use names(x) <- value to change them") return(X) } ## validate the names switch(a, ".x"=, ".y"={ if(!is.character(value) || length(value) != 1) stop("value should be a single string") }, ".s"={ if(!is.character(value) || length(value) != 2) stop("value should be a vector of 2 character strings") }, "."={ if(!is.character(value)) stop("value should be a character vector") }, stop(paste("Unrecognised abbreviation", dQuote(a))) ) ## check the names match existing column names tags <- names(X) if(any(nbg <- !(value %in% tags))) stop(paste(ngettext(sum(nbg), "The string", "The strings"), commasep(dQuote(value[nbg])), ngettext(sum(nbg), "does not match the name of any column of X", "do not match the names of any columns of X"))) ## reassign names switch(a, ".x"={ attr(X, "argu") <- value }, ".y"={ attr(X, "valu") <- value }, ".s"={ attr(X, "shade") <- value }, "."={ attr(X, "dotnames") <- value }) return(X) } "names<-.fv" <- function(x, value) { nama <- colnames(x) indx <- which(nama == fvnames(x, ".x")) indy <- which(nama == fvnames(x, ".y")) inds <- which(nama %in% fvnames(x, ".s")) ind. <- which(nama %in% fvnames(x, ".")) ## rename columns of data frame x <- NextMethod("names<-") ## adjust other tags fvnames(x, ".x") <- value[indx] fvnames(x, ".y") <- value[indy] fvnames(x, ".") <- value[ind.] if(length(inds) > 0) fvnames(x, ".s") <- value[inds] return(x) } fvlabels <- function(x, expand=FALSE) { lab <- attr(x, "labl") if(expand && !is.null(fname <- attr(x, "fname"))) { ## expand plot labels using function name nstrings <- max(substringcount("%s", lab)) ## pad with blanks nextra <- nstrings - length(fname) if(nextra > 0) fname <- c(fname, rep("", nextra)) ## render lab <- do.call(sprintf, append(list(lab), as.list(fname))) } ## remove empty space lab <- gsub(" ", "", lab) names(lab) <- names(x) return(lab) } "fvlabels<-" <- function(x, value) { stopifnot(is.fv(x)) stopifnot(is.character(value)) stopifnot(length(value) == length(fvlabels(x))) attr(x, "labl") <- value return(x) } flatfname <- function(x) { fn <- if(is.character(x)) x else attr(x, "fname") if(length(fn) > 1) fn <- paste0(fn[1L], "[", paste(fn[-1L], collapse=" "), "]") as.name(fn) } makefvlabel <- function(op=NULL, accent=NULL, fname, sub=NULL, argname="r") { ## de facto standardised label a <- "%s" if(!is.null(accent)) a <- paste0(accent, paren(a)) ## eg hat(%s) if(!is.null(op)) a <- paste0("bold", paren(op), "~", a) ## eg bold(var)~hat(%s) if(is.null(sub)) { if(length(fname) != 1) { a <- paste0(a, "[%s]") a <- paren(a, "{") } } else { if(length(fname) == 1) { a <- paste0(a, paren(sub, "[")) } else { a <- paste0(a, paren("%s", "["), "^", paren(sub, "{")) a <- paren(a, "{") } } a <- paste0(a, paren(argname)) return(a) } fvlabelmap <- local({ magic <- function(x) { subx <- paste("substitute(", x, ", NULL)") out <- try(eval(parse(text=subx)), silent=TRUE) if(inherits(out, "try-error")) out <- as.name(make.names(subx)) out } fvlabelmap <- function(x, dot=TRUE) { labl <- fvlabels(x, expand=TRUE) ## construct mapping from identifiers to labels map <- as.list(labl) map <- lapply(map, magic) names(map) <- colnames(x) if(dot) { ## also map "." and ".a" to name of target function if(!is.null(ye <- attr(x, "yexp"))) map <- append(map, list("."=ye, ".a"=ye)) ## map other fvnames to their corresponding labels map <- append(map, list(".x"=map[[fvnames(x, ".x")]], ".y"=map[[fvnames(x, ".y")]])) if(!is.null(fvnames(x, ".s"))) { shex <- unname(map[fvnames(x, ".s")]) shadexpr <- substitute(c(A,B), list(A=shex[[1L]], B=shex[[2L]])) map <- append(map, list(".s" = shadexpr)) } } return(map) } fvlabelmap }) ## map from abbreviations to expressions involving the column names, ## for use in eval(substitute(...)) fvexprmap <- function(x) { dotnames <- fvnames(x, ".") u <- if(length(dotnames) == 1) as.name(dotnames) else as.call(lapply(c("cbind", dotnames), as.name)) ux <- as.name(fvnames(x, ".x")) uy <- as.name(fvnames(x, ".y")) umap <- list(.=u, .a=u, .x=ux, .y=uy) if(!is.null(fvnames(x, ".s"))) { shnm <- fvnames(x, ".s") shadexpr <- substitute(cbind(A,B), list(A=as.name(shnm[1L]), B=as.name(shnm[2L]))) umap <- append(umap, list(.s = shadexpr)) } return(umap) } fvlegend <- local({ fvlegend <- function(object, elang) { ## Compute mathematical legend(s) for column(s) in fv object ## transformed by language expression 'elang'. ## The expression must already be in 'expanded' form. ## The result is an expression, or expression vector. ## The j-th entry of the vector is an expression for the ## j-th column of function values. ee <- distributecbind(as.expression(elang)) map <- fvlabelmap(object, dot = TRUE) eout <- as.expression(lapply(ee, invokemap, map=map)) return(eout) } invokemap <- function(ei, map) { eval(substitute(substitute(e, mp), list(e = ei, mp = map))) } fvlegend }) bind.fv <- function(x, y, labl=NULL, desc=NULL, preferred=NULL, clip=FALSE) { verifyclass(x, "fv") ax <- attributes(x) if(is.fv(y)) { ## y is already an fv object ay <- attributes(y) if(!identical(ax$fname, ay$fname)) { ## x and y represent different functions ## expand the labels separately fvlabels(x) <- fvlabels(x, expand=TRUE) fvlabels(y) <- fvlabels(y, expand=TRUE) ax <- attributes(x) ay <- attributes(y) } ## check compatibility of 'r' values xr <- ax$argu yr <- ay$argu rx <- x[[xr]] ry <- y[[yr]] if(length(rx) != length(ry)) { if(!clip) stop("fv objects x and y have incompatible domains") # restrict both objects to a common domain ra <- intersect.ranges(range(rx), range(ry)) x <- x[inside.range(rx, ra), ] y <- y[inside.range(ry, ra), ] rx <- x[[xr]] ry <- y[[yr]] } if(length(rx) != length(ry) || max(abs(rx-ry)) > .Machine$double.eps) stop("fv objects x and y have incompatible values of r") ## reduce y to data frame and strip off 'r' values ystrip <- as.data.frame(y) yrpos <- which(colnames(ystrip) == yr) ystrip <- ystrip[, -yrpos, drop=FALSE] ## determine descriptors if(is.null(labl)) labl <- attr(y, "labl")[-yrpos] if(is.null(desc)) desc <- attr(y, "desc")[-yrpos] ## y <- ystrip } else { ## y is a matrix or data frame y <- as.data.frame(y) } ## check for duplicated column names allnames <- c(colnames(x), colnames(y)) if(any(dup <- duplicated(allnames))) { nbg <- unique(allnames[dup]) nn <- length(nbg) warning(paste("The column", ngettext(nn, "name", "names"), commasep(sQuote(nbg)), ngettext(nn, "was", "were"), "duplicated. Unique names were generated")) allnames <- make.names(allnames, unique=TRUE, allow_ = FALSE) colnames(y) <- allnames[ncol(x) + seq_len(ncol(y))] } if(is.null(labl)) labl <- paste("%s[", colnames(y), "](r)", sep="") else if(length(labl) != ncol(y)) stop(paste("length of", sQuote("labl"), "does not match number of columns of y")) if(is.null(desc)) desc <- character(ncol(y)) else if(length(desc) != ncol(y)) stop(paste("length of", sQuote("desc"), "does not match number of columns of y")) if(is.null(preferred)) preferred <- ax$valu xy <- cbind(as.data.frame(x), y) z <- fv(xy, ax$argu, ax$ylab, preferred, ax$fmla, ax$alim, c(ax$labl, labl), c(ax$desc, desc), unitname=unitname(x), fname=ax$fname, yexp=ax$yexp) return(z) } cbind.fv <- function(...) { a <- list(...) n <- length(a) if(n == 0) return(NULL) if(n == 1) { ## single argument - extract it a <- a[[1L]] ## could be an fv object if(is.fv(a)) return(a) n <- length(a) } z <- a[[1L]] if(!is.fv(z)) stop("First argument should be an object of class fv") if(n > 1) for(i in 2:n) z <- bind.fv(z, a[[i]]) return(z) } collapse.anylist <- collapse.fv <- local({ collapse.fv <- function(object, ..., same=NULL, different=NULL) { if(is.fv(object)) { x <- list(object, ...) } else if(inherits(object, "anylist")) { x <- append(object, list(...)) } else if(is.list(object) && all(sapply(object, is.fv))) { x <- append(object, list(...)) } else stop("Format not understood") if(!all(unlist(lapply(x, is.fv)))) stop("arguments should be objects of class fv") if(is.null(same)) same <- character(0) if(is.null(different)) different <- character(0) if(anyDuplicated(c(same, different))) stop(paste("The arguments", sQuote("same"), "and", sQuote("different"), "should not have entries in common")) either <- c(same, different) ## validate if(length(either) == 0) stop(paste("At least one column of values must be selected", "using the arguments", sQuote("same"), "and", sQuote("different"))) nbg <- unique(unlist(lapply(x, missingnames, expected=either))) if((nbad <- length(nbg)) > 0) stop(paste(ngettext(nbad, "The name", "The names"), commasep(sQuote(nbg)), ngettext(nbad, "is", "are"), "not present in the function objects")) ## names for different versions versionnames <- names(x) if(is.null(versionnames)) versionnames <- paste("x", seq_along(x), sep="") shortnames <- abbreviate(versionnames, minlength=12) ## extract the common values y <- x[[1L]] xname <- fvnames(y, ".x") yname <- fvnames(y, ".y") if(length(same) == 0) { ## The column of 'preferred values' .y cannot be deleted ## retain .y for now and delete it later. z <- y[, c(xname, yname)] } else { if(!(yname %in% same)) fvnames(y, ".y") <- same[1L] z <- y[, c(xname, same)] } dotnames <- same ## now merge the different values for(i in seq_along(x)) { ## extract values for i-th object xi <- x[[i]] wanted <- (names(xi) %in% different) y <- as.data.frame(xi)[, wanted, drop=FALSE] desc <- attr(xi, "desc")[wanted] labl <- attr(xi, "labl")[wanted] ## relabel prefix <- shortnames[i] preamble <- versionnames[i] names(y) <- if(ncol(y) == 1) prefix else paste(prefix,names(y),sep="") dotnames <- c(dotnames, names(y)) ## glue onto fv object z <- bind.fv(z, y, labl=paste(prefix, labl, sep="~"), desc=paste(preamble, desc)) } if(length(same) == 0) { ## remove the second column which was retained earlier fvnames(z, ".y") <- names(z)[3L] z <- z[, -2L] } fvnames(z, ".") <- dotnames return(z) } missingnames <- function(z, expected) { expected[!(expected %in% names(z))] } collapse.fv }) ## rename one of the columns of an fv object tweak.fv.entry <- function(x, current.tag, new.labl=NULL, new.desc=NULL, new.tag=NULL) { hit <- (names(x) == current.tag) if(!any(hit)) return(x) ## update descriptions of column i <- min(which(hit)) if(!is.null(new.labl)) attr(x, "labl")[i] <- new.labl if(!is.null(new.desc)) attr(x, "desc")[i] <- new.desc ## adjust column tag if(!is.null(new.tag)) { names(x)[i] <- new.tag ## update dotnames dn <- fvnames(x, ".") if(current.tag %in% dn ) { dn[dn == current.tag] <- new.tag fvnames(x, ".") <- dn } ## if the tweaked column is the preferred value, adjust accordingly if(attr(x, "valu") == current.tag) attr(x, "valu") <- new.tag ## if the tweaked column is the function argument, adjust accordingly if(attr(x, "argu") == current.tag) attr(x, "valu") <- new.tag } return(x) } ## change some or all of the auxiliary text in an fv object rebadge.fv <- function(x, new.ylab, new.fname, tags, new.desc, new.labl, new.yexp=new.ylab, new.dotnames, new.preferred, new.formula, new.tags) { if(!missing(new.ylab)) attr(x, "ylab") <- new.ylab if(!missing(new.yexp) || !missing(new.ylab)) attr(x, "yexp") <- new.yexp if(!missing(new.fname)) attr(x, "fname") <- new.fname if(!missing(tags) && !(missing(new.desc) && missing(new.labl) && missing(new.tags))) { nama <- names(x) desc <- attr(x, "desc") labl <- attr(x, "labl") valu <- attr(x, "valu") for(i in seq_along(tags)) if(!is.na(m <- match(tags[i], nama))) { if(!missing(new.desc)) desc[m] <- new.desc[i] if(!missing(new.labl)) labl[m] <- new.labl[i] if(!missing(new.tags)) { names(x)[m] <- new.tags[i] if(tags[i] == valu) attr(x, "valu") <- new.tags[i] } } attr(x, "desc") <- desc attr(x, "labl") <- labl } if(!missing(new.dotnames)) fvnames(x, ".") <- new.dotnames if(!missing(new.preferred)) { stopifnot(new.preferred %in% names(x)) attr(x, "valu") <- new.preferred } if(!missing(new.formula)) formula(x) <- new.formula return(x) } ## common invocations to label a function like Kdot or Kcross rebadge.as.crossfun <- function(x, main, sub=NULL, i, j) { i <- make.parseable(i) j <- make.parseable(j) if(is.null(sub)) { ylab <- substitute(main[i, j](r), list(main=main, i=i, j=j)) fname <- c(main, paste0("list", paren(paste(i, j, sep=",")))) yexp <- substitute(main[list(i, j)](r), list(main=main, i=i, j=j)) } else { ylab <- substitute(main[sub, i, j](r), list(main=main, sub=sub, i=i, j=j)) fname <- c(main, paste0("list", paren(paste(sub, i, j, sep=",")))) yexp <- substitute(main[list(sub, i, j)](r), list(main=main, sub=sub, i=i, j=j)) } y <- rebadge.fv(x, new.ylab=ylab, new.fname=fname, new.yexp=yexp) return(y) } rebadge.as.dotfun <- function(x, main, sub=NULL, i) { i <- make.parseable(i) if(is.null(sub)) { ylab <- substitute(main[i ~ dot](r), list(main=main, i=i)) fname <- c(main, paste0(i, "~symbol(\"\\267\")")) yexp <- substitute(main[i ~ symbol("\267")](r), list(main=main, i=i)) } else { ylab <- substitute(main[sub, i ~ dot](r), list(main=main, sub=sub, i=i)) fname <- c(main, paste0("list", paren(paste0(sub, ",", i, "~symbol(\"\\267\")")))) yexp <- substitute(main[list(sub, i ~ symbol("\267"))](r), list(main=main, sub=sub, i=i)) } y <- rebadge.fv(x, new.ylab=ylab, new.fname=fname, new.yexp=yexp) return(y) } ## even simpler wrapper for rebadge.fv rename.fv <- function(x, fname, ylab, yexp=ylab) { stopifnot(is.fv(x)) stopifnot(is.character(fname) && (length(fname) %in% 1:2)) argu <- fvnames(x, ".x") if(missing(ylab) || is.null(ylab)) ylab <- switch(length(fname), substitute(fn(argu), list(fn=as.name(fname), argu=as.name(argu))), substitute(fn[fsub](argu), list(fn=as.name(fname[1]), fsub=as.name(fname[2]), argu=as.name(argu)))) if(missing(yexp) || is.null(yexp)) yexp <- ylab y <- rebadge.fv(x, new.fname=fname, new.ylab=ylab, new.yexp=yexp) return(y) } ## subset extraction operator "[.fv" <- function(x, i, j, ..., drop=FALSE) { igiven <- !missing(i) jgiven <- !missing(j) y <- as.data.frame(x) if(igiven && jgiven) z <- y[i, j, drop=drop] else if(igiven) z <- y[i, , drop=drop] else if(jgiven) z <- y[ , j, drop=drop] else z <- y ## return only the selected values as a data frame or vector. if(drop) return(z) if(!jgiven) selected <- seq_len(ncol(x)) else { nameindices <- seq_along(names(x)) names(nameindices) <- names(x) selected <- as.vector(nameindices[j]) } # validate choice of selected/dropped columns nama <- names(z) argu <- attr(x, "argu") if(!(argu %in% nama)) stop(paste("The function argument", sQuote(argu), "must not be removed")) valu <- attr(x, "valu") if(!(valu %in% nama)) stop(paste("The default column of function values", sQuote(valu), "must not be removed")) # if the plot formula involves explicit mention of dropped columns, # replace it by a generic formula fmla <- as.formula(attr(x, "fmla")) if(!all(variablesinformula(fmla) %in% nama)) fmla <- as.formula(. ~ .x, env=environment(fmla)) ## If range of argument was implicitly changed, adjust "alim" alim <- attr(x, "alim") rang <- range(z[[argu]]) alim <- intersect.ranges(alim, rang, fatal=FALSE) result <- fv(z, argu=attr(x, "argu"), ylab=attr(x, "ylab"), valu=attr(x, "valu"), fmla=fmla, alim=alim, labl=attr(x, "labl")[selected], desc=attr(x, "desc")[selected], unitname=attr(x, "units"), fname=attr(x,"fname"), yexp=attr(x, "yexp")) ## carry over preferred names, if possible dotn <- fvnames(x, ".") fvnames(result, ".") <- dotn[dotn %in% colnames(result)] shad <- fvnames(x, ".s") if(!is.null(shad) && all(shad %in% colnames(result))) fvnames(result, ".s") <- shad return(result) } ## Subset and column replacement methods ## to guard against deletion of columns "[<-.fv" <- function(x, i, j, value) { if(!missing(j)) { ## check for alterations to structure of object if((is.character(j) && !all(j %in% colnames(x))) || (is.numeric(j) && any(j > ncol(x)))) stop("Use bind.fv to add new columns to an object of class fv") if(is.null(value) && missing(i)) { ## column(s) will be removed co <- seq_len(ncol(x)) names(co) <- colnames(x) keepcol <- setdiff(co, co[j]) return(x[ , keepcol, drop=FALSE]) } } NextMethod("[<-") } "$<-.fv" <- function(x, name, value) { j <- which(colnames(x) == name) if(is.null(value)) { ## column will be removed if(length(j) != 0) return(x[, -j, drop=FALSE]) return(x) } if(length(j) == 0) { ## new column df <- data.frame(1:nrow(x), value)[,-1L,drop=FALSE] colnames(df) <- name y <- bind.fv(x, df, desc=paste("Additional variable", sQuote(name))) return(y) } NextMethod("$<-") } ## method for 'formula' formula.fv <- function(x, ...) { attr(x, "fmla") } # new generic "formula<-" <- function(x, ..., value) { UseMethod("formula<-") } "formula<-.fv" <- function(x, ..., value) { if(is.null(value)) value <- paste(fvnames(x, ".y"), "~", fvnames(x, ".x")) else if(inherits(value, "formula")) { ## convert formula to string value <- flat.deparse(value) } else if(!is.character(value)) stop("Assignment value should be a formula or a string") attr(x, "fmla") <- value return(x) } ## method for with() with.fv <- function(data, expr, ..., fun=NULL, enclos=NULL) { if(any(names(list(...)) == "drop")) stop("Outdated argument 'drop' used in with.fv") cl <- short.deparse(sys.call()) verifyclass(data, "fv") if(is.null(enclos)) enclos <- parent.frame() ## convert syntactic expression to 'expression' object # e <- as.expression(substitute(expr)) ## convert syntactic expression to call elang <- substitute(expr) ## map "." etc to names of columns of data datanames <- names(data) xname <- fvnames(data, ".x") yname <- fvnames(data, ".y") ux <- as.name(xname) uy <- as.name(yname) dnames <- datanames[datanames %in% fvnames(data, ".")] ud <- as.call(lapply(c("cbind", dnames), as.name)) anames <- datanames[datanames %in% fvnames(data, ".a")] ua <- as.call(lapply(c("cbind", anames), as.name)) if(!is.null(fvnames(data, ".s"))) { snames <- datanames[datanames %in% fvnames(data, ".s")] us <- as.call(lapply(c("cbind", snames), as.name)) } else us <- NULL expandelang <- eval(substitute(substitute(ee, list(.=ud, .x=ux, .y=uy, .s=us, .a=ua)), list(ee=elang))) dont.complain.about(ua, ud, us, ux, uy) evars <- all.vars(expandelang) used.dotnames <- evars[evars %in% dnames] ## evaluate expression datadf <- as.data.frame(data) results <- eval(expandelang, as.list(datadf), enclos=enclos) ## -------------------- ## commanded to return numerical values only? if(!is.null(fun) && !fun) return(results) if(!is.matrix(results) && !is.data.frame(results)) { ## result is a vector if(is.null(fun)) fun <- FALSE if(!fun || length(results) != nrow(datadf)) return(results) results <- matrix(results, ncol=1) } else { ## result is a matrix or data frame if(is.null(fun)) fun <- TRUE if(!fun || nrow(results) != nrow(datadf)) return(results) } ## result is a matrix or data frame of the right dimensions ## make a new fv object ## ensure columns of results have names if(is.null(colnames(results))) colnames(results) <- paste("col", seq_len(ncol(results)), sep="") resultnames <- colnames(results) ## get values of function argument xvalues <- datadf[[xname]] ## tack onto result matrix results <- cbind(xvalues, results) colnames(results) <- c(xname, resultnames) results <- data.frame(results) ## check for alteration of column names oldnames <- resultnames resultnames <- colnames(results)[-1L] if(any(resultnames != oldnames)) warning("some column names were illegal and have been changed") ## determine mapping (if any) from columns of output to columns of input namemap <- match(colnames(results), names(datadf)) okmap <- !is.na(namemap) ## Build up fv object ## decide which of the columns should be the preferred value newyname <- if(yname %in% resultnames) yname else resultnames[1L] ## construct default plot formula fmla <- flat.deparse(as.formula(paste(". ~", xname))) dotnames <- resultnames ## construct description strings desc <- character(ncol(results)) desc[okmap] <- attr(data, "desc")[namemap[okmap]] desc[!okmap] <- paste("Computed value", resultnames[!okmap]) ## function name (fname) and mathematical expression for function (yexp) oldyexp <- attr(data, "yexp") oldfname <- attr(data, "fname") if(is.null(oldyexp)) { fname <- cl yexp <- substitute(f(xname), list(f=as.name(fname), xname=as.name(xname))) } else { ## map 'cbind(....)' to "." for name of function only cb <- paste("cbind(", paste(used.dotnames, collapse=","), ")", sep="") compresselang <- gsub(cb, ".", flat.deparse(expandelang), fixed=TRUE) compresselang <- as.formula(paste(compresselang, "~1"))[[2L]] ## construct mapping using original function name labmap <- fvlabelmap(data, dot=TRUE) labmap[["."]] <- oldyexp yexp <- eval(substitute(substitute(ee, ff), list(ee=compresselang, ff=labmap))) labmap2 <- labmap labmap2[["."]] <- as.name(oldfname) fname <- eval(substitute(substitute(ee, ff), list(ee=compresselang, ff=labmap2))) fname <- paren(flat.deparse(fname)) } ## construct mathematical labels mathlabl <- as.character(fvlegend(data, expandelang)) mathlabl <- gsub("[[:space:]]+", " ", mathlabl) labl <- colnames(results) mathmap <- match(labl, used.dotnames) okmath <- !is.na(mathmap) labl[okmath] <- mathlabl[mathmap[okmath]] ## form fv object and return out <- fv(results, argu=xname, valu=newyname, labl=labl, desc=desc, alim=attr(data, "alim"), fmla=fmla, unitname=unitname(data), fname=fname, yexp=yexp, ylab=yexp) fvnames(out, ".") <- dotnames return(out) } ## method for 'range' range.fv <- local({ getValues <- function(x) { xdat <- as.matrix(as.data.frame(x)) yall <- fvnames(x, ".") vals <- xdat[, yall] return(as.vector(vals)) } range.fv <- function(..., na.rm=TRUE, finite=na.rm) { aarg <- list(...) isfun <- sapply(aarg, is.fv) if(any(isfun)) aarg[isfun] <- lapply(aarg[isfun], getValues) z <- do.call(range, append(aarg, list(na.rm=na.rm, finite=finite))) return(z) } range.fv }) min.fv <- function(..., na.rm=TRUE, finite=na.rm) { range(..., na.rm=TRUE, finite=na.rm)[1L] } max.fv <- function(..., na.rm=TRUE, finite=na.rm) { range(..., na.rm=TRUE, finite=na.rm)[2L] } ## stieltjes integration for fv objects stieltjes <- function(f, M, ...) { ## stieltjes integral of f(x) dM(x) stopifnot(is.function(f)) if(is.stepfun(M)) { envM <- environment(M) #' jump locations x <- get("x", envir=envM) #' values of integrand fx <- f(x, ...) #' jump amounts xx <- c(-Inf, (x[-1L] + x[-length(x)])/2, Inf) dM <- diff(M(xx)) #' integrate f(x) dM(x) f.dM <- fx * dM result <- sum(f.dM[is.finite(f.dM)]) return(list(result)) } else if(is.fv(M)) { ## integration variable argu <- attr(M, "argu") x <- M[[argu]] ## values of integrand fx <- f(x, ...) ## estimates of measure valuenames <- names(M) [names(M) != argu] Mother <- as.data.frame(M)[, valuenames] Mother <- as.matrix(Mother, nrow=nrow(M)) ## increments of measure dM <- apply(Mother, 2, diff) dM <- rbind(dM, 0) ## integrate f(x) dM(x) f.dM <- fx * dM f.dM[!is.finite(f.dM)] <- 0 results <- colSums(f.dM) results <- as.list(results) names(results) <- valuenames return(results) } else stop("M must be an object of class fv or stepfun") } prefixfv <- function(x, tagprefix="", descprefix="", lablprefix=tagprefix, whichtags=fvnames(x, "*")) { ## attach a prefix to fv information stopifnot(is.fv(x)) att <- attributes(x) relevant <- names(x) %in% whichtags oldtags <- names(x)[relevant] newtags <- paste(tagprefix, oldtags, sep="") newlabl <- paste(lablprefix, att$labl[relevant], sep="") newdesc <- paste(descprefix, att$desc[relevant]) y <- rebadge.fv(x, tags=oldtags, new.desc=newdesc, new.labl=newlabl, new.tags=newtags) return(y) } reconcile.fv <- local({ reconcile.fv <- function(...) { ## reconcile several fv objects by finding the columns they share in common z <- list(...) if(!all(unlist(lapply(z, is.fv)))) { if(length(z) == 1 && is.list(z[[1L]]) && all(unlist(lapply(z[[1L]], is.fv)))) z <- z[[1L]] else stop("all arguments should be fv objects") } n <- length(z) if(n <= 1) return(z) ## find columns that are common to all estimates keepcolumns <- names(z[[1L]]) keepvalues <- fvnames(z[[1L]], "*") for(i in 2:n) { keepcolumns <- intersect(keepcolumns, names(z[[i]])) keepvalues <- intersect(keepvalues, fvnames(z[[i]], "*")) } if(length(keepvalues) == 0) stop("cannot reconcile fv objects: they have no columns in common") ## determine name of the 'preferred' column prefs <- unlist(lapply(z, fvnames, a=".y")) prefskeep <- prefs[prefs %in% keepvalues] if(length(prefskeep) > 0) { ## pick the most popular chosen <- unique(prefskeep)[which.max(table(prefskeep))] } else { ## drat - pick a value arbitrarily chosen <- keepvalues[1L] } z <- lapply(z, rebadge.fv, new.preferred=chosen) z <- lapply(z, "[.fv", j=keepcolumns) ## also clip to the same r values rmax <- min(sapply(z, maxrval)) z <- lapply(z, cliprmax, rmax=rmax) return(z) } maxrval <- function(x) { max(with(x, .x)) } cliprmax <- function(x, rmax) { x[ with(x, .x) <= rmax, ] } reconcile.fv }) as.function.fv <- function(x, ..., value=".y", extrapolate=FALSE) { trap.extra.arguments(...) value.orig <- value ## extract function argument xx <- with(x, .x) ## extract all function values yy <- as.data.frame(x)[, fvnames(x, "*"), drop=FALSE] ## determine which value(s) to supply if(!is.character(value)) stop("value should be a string or vector specifying columns of x") if(!all(value %in% colnames(yy))) { expandvalue <- try(fvnames(x, value)) if(!inherits(expandvalue, "try-error")) { value <- expandvalue } else stop("Unable to determine columns of x") } yy <- yy[,value, drop=FALSE] argname <- fvnames(x, ".x") ## determine extrapolation rule (1=NA, 2=most extreme value) stopifnot(is.logical(extrapolate)) stopifnot(length(extrapolate) %in% 1:2) endrule <- 1 + extrapolate ## make function(s) if(length(value) == 1 && !identical(value.orig, "*")) { ## make a single 'approxfun' and return it f <- approxfun(xx, yy[,,drop=TRUE], rule=endrule) ## magic names(formals(f))[1L] <- argname body(f)[[4L]] <- as.name(argname) } else { ## make a list of 'approxfuns' with different function values funs <- lapply(yy, approxfun, x = xx, rule = endrule) ## return a function which selects the appropriate 'approxfun' and executes f <- function(xxxx, what=value) { what <- match.arg(what) funs[[what]](xxxx) } ## recast function definition ## ('any sufficiently advanced technology is ## indistinguishable from magic' -- Arthur C. Clarke) formals(f)[[2L]] <- value names(formals(f))[1L] <- argname ## body(f)[[3L]][[2L]] <- as.name(argname) body(f) <- eval(substitute(substitute(z, list(xxxx=as.name(argname))), list(z=body(f)))) } class(f) <- c("fvfun", class(f)) attr(f, "fname") <- attr(x, "fname") attr(f, "yexp") <- attr(x, "yexp") return(f) } print.fvfun <- function(x, ...) { y <- args(x) yexp <- as.expression(attr(x, "yexp")) body(y) <- as.name(paste("Returns interpolated value of", yexp)) print(y, ...) return(invisible(NULL)) } findcbind <- function(root, depth=0, maxdepth=1000) { ## recursive search through a parse tree to find calls to 'cbind' if(depth > maxdepth) stop("Reached maximum depth") if(length(root) == 1) return(NULL) if(identical(as.name(root[[1L]]), as.name("cbind"))) return(list(numeric(0))) out <- NULL for(i in 2:length(root)) { di <- findcbind(root[[i]], depth+1, maxdepth) if(!is.null(di)) out <- append(out, lapply(di, append, values=i, after=FALSE)) } return(out) } .MathOpNames <- c("+", "-", "*", "/", "^", "%%", "%/%", "&", "|", "!", "==", "!=", "<", "<=", ">=", ">") distributecbind <- local({ distributecbind <- function(x) { ## x is an expression involving a call to 'cbind' ## return a vector of expressions, each obtained by replacing 'cbind(...)' ## by one of its arguments in turn. stopifnot(typeof(x) == "expression") xlang <- x[[1L]] locations <- findcbind(xlang) if(length(locations) == 0) return(x) ## cbind might occur more than once ## check that the number of arguments is the same each time narg <- unique(sapply(locations, nargs.in.expr, e=xlang)) if(length(narg) > 1) return(NULL) out <- NULL if(narg > 0) { for(i in 1:narg) { ## make a version of the expression ## in which cbind() is replaced by its i'th argument fakexlang <- xlang for(loc in locations) { if(length(loc) > 0) { ## usual case: 'loc' is integer vector representing nested index cbindcall <- xlang[[loc]] ## extract i-th argument argi <- cbindcall[[i+1]] ## if argument is an expression, enclose it in parentheses if(length(argi) > 1 && paste(argi[[1L]]) %in% .MathOpNames) argi <- substitute((x), list(x=argi)) ## replace cbind call by its i-th argument fakexlang[[loc]] <- argi } else { ## special case: 'loc' = integer(0) representing xlang itself cbindcall <- xlang ## extract i-th argument argi <- cbindcall[[i+1L]] ## replace cbind call by its i-th argument fakexlang <- cbindcall[[i+1L]] } } ## add to final expression out <- c(out, as.expression(fakexlang)) } } return(out) } nargs.in.expr <- function(loc, e) { n <- if(length(loc) > 0) length(e[[loc]]) else length(e) return(n - 1L) } distributecbind }) ## Form a new 'fv' object as a ratio ratfv <- function(df, numer, denom, ..., ratio=TRUE) { ## Determine y if(!missing(df)) { y <- fv(df, ...) num <- NULL } else { ## Compute numer/denom ## Numerator must be a data frame num <- fv(numer, ...) ## Denominator may be a data frame or a constant force(denom) y <- eval.fv(num/denom) ## relabel y <- fv(as.data.frame(y), ...) } if(!ratio) return(y) if(is.null(num)) { ## Compute num = y * denom ## Denominator may be a data frame or a constant force(denom) num <- eval.fv(y * denom) ## ditch labels num <- fv(as.data.frame(num), ...) } ## make denominator an fv object if(is.data.frame(denom)) { den <- fv(denom, ...) } else { ## scalar check.1.real(denom, "Unless it is a data frame,") ## replicate it in all the data columns dendf <- as.data.frame(num) valuecols <- (names(num) != fvnames(num, ".x")) dendf[, valuecols] <- denom den <- fv(dendf, ...) } ## tweak the descriptions ok <- (names(y) != fvnames(y, ".x")) attr(num, "desc")[ok] <- paste("numerator of", attr(num, "desc")[ok]) attr(den, "desc")[ok] <- paste("denominator of", attr(den, "desc")[ok]) ## form ratio object y <- rat(y, num, den, check=FALSE) return(y) } ## Tack new column(s) onto a ratio fv object bind.ratfv <- function(x, numerator=NULL, denominator=NULL, labl = NULL, desc = NULL, preferred = NULL, ratio=TRUE, quotient=NULL) { if(ratio && !inherits(x, "rat")) stop("ratio=TRUE is set, but x has no ratio information", call.=FALSE) if(is.null(numerator) && !is.null(denominator) && !is.null(quotient)) numerator <- quotient * denominator if(is.null(denominator) && inherits(numerator, "rat")) { ## extract numerator & denominator from ratio object both <- numerator denominator <- attr(both, "denominator") usenames <- fvnames(both, ".a") numerator <- as.data.frame(both)[,usenames] denominator <- as.data.frame(denominator)[,usenames] ## labels default to those of ratio object if(is.null(labl)) labl <- attr(both, "labl") if(is.null(desc)) desc <- attr(both, "desc") if(is.null(labl)) labl <- attr(both, "labl") } # calculate ratio # The argument 'quotient' is rarely needed # except to avoid 0/0 or to improve accuracy if(is.null(quotient)) quotient <- numerator/denominator # bind new column to x y <- bind.fv(x, quotient, labl=labl, desc=desc, preferred=preferred) if(!ratio) return(y) ## convert scalar denominator to data frame if(!is.data.frame(denominator)) { if(!is.numeric(denominator) || !is.vector(denominator)) stop("Denominator should be a data frame or a numeric vector") nd <- length(denominator) if(nd != 1 && nd != nrow(x)) stop("Denominator has wrong length") dvalue <- denominator denominator <- numerator denominator[] <- dvalue } ## Now fuse with x num <- attr(x, "numerator") den <- attr(x, "denominator") num <- bind.fv(num, numerator, labl=labl, desc=paste("numerator of", desc), preferred=preferred) den <- bind.fv(den, denominator, labl=labl, desc=paste("denominator of", desc), preferred=preferred) y <- rat(y, num, den, check=FALSE) return(y) } conform.ratfv <- function(x) { ## harmonise display properties in components of a ratio stopifnot(inherits(x, "rat"), is.fv(x)) num <- attr(x, "numerator") den <- attr(x, "denominator") formula(num) <- formula(den) <- formula(x) fvnames(num, ".") <- fvnames(den, ".") <- fvnames(x, ".") unitname(num) <- unitname(den) <- unitname(x) attr(x, "numerator") <- num attr(x, "denominator") <- den return(x) } spatstat/R/Kscaled.R0000755000176200001440000001344713115271075014025 0ustar liggesusers# # Kscaled.R Estimation of K function for locally-scaled process # # $Revision: 1.16 $ $Date: 2015/02/22 03:00:48 $ # "Lscaled" <- function(...) { K <- Kscaled(...) L <- eval.fv(sqrt(pmax.int(K,0)/pi)) # relabel the fv object L <- rebadge.fv(L, quote(L[scaled](r)), c("L","scaled")) attr(L, "labl") <- attr(K, "labl") return(L) } "Kscaled"<- function (X, lambda=NULL, ..., r = NULL, breaks = NULL, rmax = 2.5, correction=c("border", "isotropic", "translate"), renormalise=FALSE, normpower=1, sigma=NULL, varcov=NULL) { verifyclass(X, "ppp") # rfixed <- !missing(r) || !missing(breaks) ## determine basic parameters W <- X$window npts <- X$n areaW <- area(W) halfdiameter <- diameter(W)/2 ## match corrections correction.given <- !missing(correction) && !is.null(correction) correction <- pickoption("correction", correction, c(none="none", border="border", isotropic="isotropic", Ripley="isotropic", trans="translate", translate="translate", translation="translate", best="best"), multi=TRUE) # best.wanted <- ("best" %in% correction) correction <- implemented.for.K(correction, W$type, correction.given) ########################################################### ## DETERMINE WEIGHTS AND VALIDATE ## if(missing(lambda)) { ## No intensity data provided ## Estimate density by leave-one-out kernel smoothing lambda <- density(X, ..., sigma=sigma, varcov=varcov, at="points", leaveoneout=TRUE) lambda <- as.numeric(lambda) } else { ## lambda values provided if(is.im(lambda)) lambda <- safelookup(lambda, X) else if(is.function(lambda)) lambda <- lambda(X$x, X$y) else if(is.ppm(lambda)) lambda <- safelookup(predict(lambda, type="trend"), X) else if(!is.numeric(lambda) || !is.null(dim(lambda))) stop(paste(sQuote("lambda"), "should be a vector, a pixel image, a function or a ppm")) check.nvector(lambda, npts) } if(renormalise) { ## renormalise. Here we only need half the power ;-) check.1.real(normpower) stopifnot(normpower %in% 1:2) renorm.factor <- (areaW/sum(1/lambda))^(normpower/2) lambda <- lambda/renorm.factor } ## Calculate range of r values using max lambda sra <- sqrt(range(lambda)) minrescale <- sra[1] maxrescale <- sra[2] ## convert arguments to absolute distances absr <- if(!is.null(r)) r/maxrescale else NULL absrmaxdefault <- min(rmax.rule("K", W), rmax/maxrescale) absbreaks <- if(!is.null(breaks)) scalardilate(breaks, 1/maxrescale) else NULL ## determine absolute distances absbreaks <- handle.r.b.args(absr, absbreaks, W, rmaxdefault=absrmaxdefault) absr <- absbreaks$r ## convert to rescaled distances breaks <- scalardilate(absbreaks, maxrescale) r <- breaks$r rmax <- breaks$max ## recommended range of scaled r values alim <- c(0, min(rmax, maxrescale * absrmaxdefault)) rthresh <- minrescale * halfdiameter ## maximum absolute distance ever needed maxabsdist <- min(rmax/minrescale, halfdiameter) ## this will be the output data frame K <- data.frame(r=r, theo= pi * r^2) desc <- c("distance argument r", "theoretical Poisson %s") K <- fv(K, "r", quote(K[scaled](r)), "theo", , alim, c("r","{%s[%s]^{pois}}(r)"), desc, fname=c("K", "scaled")) ## identify all relevant close pairs what <- if(any(correction == "translate")) "all" else "ijd" close <- closepairs(X, maxabsdist, what=what) I <- close$i J <- close$j ## locally-scaled distances sqrtLambda <- sqrt(lambda) lamIJ <- (sqrtLambda[I] + sqrtLambda[J])/2 absDIJ <- close$d DIJ <- absDIJ * lamIJ XI <- ppp(close$xi, close$yi, window=W, check=FALSE) if(any(correction == "none")) { ## uncorrected! For demonstration purposes only! wh <- whist(DIJ, breaks$val) # no weights Kun <- cumsum(wh)/npts K <- bind.fv(K, data.frame(un=Kun), "{hat(%s)[%s]^{un}}(r)", "uncorrected estimate of %s", "un") } if(any(correction == "border")) { ## border method ## Compute SCALED distances to boundary b <- bdist.points(X) * sqrtLambda bI <- b[I] ## apply reduced sample algorithm to scaled distances RS <- Kount(DIJ, bI, b, breaks) Kb <- RS$numerator/RS$denom.count Kb[r > rthresh] <- NA K <- bind.fv(K, data.frame(border=Kb), "{hat(%s)[%s]^{bord}}(r)", "border-corrected estimate of %s", "border") } if(any(correction == "translate")) { ## translation correction XJ <- ppp(close$xj, close$yj, window=W, check=FALSE) edgewt <- edge.Trans(XI, XJ, paired=TRUE) wh <- whist(DIJ, breaks$val, edgewt) Ktrans <- cumsum(wh)/npts Ktrans[r >= rthresh] <- NA K <- bind.fv(K, data.frame(trans=Ktrans), "{hat(%s)[%s]^{trans}}(r)", "translation-corrected estimate of %s", "trans") } if(any(correction == "isotropic")) { ## Ripley isotropic correction (using UN-SCALED distances) edgewt <- edge.Ripley(XI, matrix(absDIJ, ncol=1)) wh <- whist(DIJ, breaks$val, edgewt) Kiso <- cumsum(wh)/npts Kiso[r >= rthresh] <- NA K <- bind.fv(K, data.frame(iso=Kiso), "{hat(%s)[%s]^{iso}}(r)", "Ripley isotropic correction estimate of %s", "iso") } ## default plot will display all edge corrections formula(K) <- . ~ r nama <- rev(colnames(K)) fvnames(K, ".") <- nama[!(nama %in% c("r", "rip", "ls"))] ## unitname(K) <- c("normalised unit", "normalised units") return(K) } spatstat/R/slrm.R0000755000176200001440000004531713115271120013424 0ustar liggesusers# # slrm.R # # Spatial Logistic Regression # # $Revision: 1.28 $ $Date: 2016/04/25 02:34:40 $ # slrm <- function(formula, ..., data=NULL, offset=TRUE, link="logit", dataAtPoints=NULL, splitby=NULL) { # remember call CallInfo <- list(callstring = short.deparse(sys.call()), cl = match.call(), formula = formula, offset=offset, link=link, splitby=splitby, dotargs=list(...)) if(!(link %in% c("logit", "cloglog"))) stop(paste("Unrecognised link", dQuote(link))) ########### INTERPRET FORMULA ############################## if(!inherits(formula, "formula")) stop(paste("Argument", dQuote("formula"), "should be a formula")) # check formula has LHS and RHS. Extract them if(length(formula) < 3) stop(paste("Argument", sQuote("formula"), "must have a left hand side")) Yname <- formula[[2]] trend <- rhs <- formula[c(1,3)] if(!is.name(Yname)) stop("Left hand side of formula should be a single name") Yname <- paste(Yname) if(!inherits(trend, "formula")) stop("Internal error: failed to extract RHS of formula") varnames <- unique(variablesinformula(trend)) specials <- c("x", "y", "logpixelarea") covnames <- varnames[!(varnames %in% specials)] # add 'splitby' to covariate names if(!is.null(splitby)) { if(!is.character(splitby) || length(splitby) != 1) stop("splitby should be a single character string") covnames <- unique(c(covnames, splitby)) } CallInfo$responsename <- Yname CallInfo$varnames <- varnames CallInfo$covnames <- covnames # Parent environment parenv <- environment(formula) ######## FIND DATA AND RESHAPE ####################### Data <- slr.prepare(CallInfo, parenv, data, dataAtPoints, splitby) # W <- Data$W df <- Data$df ######## FIT MODEL ############################### dformula <- formula if(offset) { # insert offset term in formula rhs <- paste(as.character(rhs), collapse=" ") rhs <- paste(c(rhs, "offset(logpixelarea)"), collapse="+") dformula <- as.formula(paste(Yname, rhs)) } linkname <- link FIT <- glm(dformula, family=binomial(link=linkname), data=df, na.action=na.exclude) result <- list(call = CallInfo$cl, CallInfo = CallInfo, Data = Data, Fit = list(FIT=FIT, dformula=dformula), terms = terms(formula)) class(result) <- c("slrm", class(result)) return(result) } ################ UTILITY TO FIND AND RESHAPE DATA ################# slr.prepare <- function(CallInfo, envir, data, dataAtPoints=NULL, splitby=NULL, clip=TRUE) { # CallInfo is produced by slrm() # envir is parent environment of model formula # data is 'data' argument that takes precedence over 'envir' # 'clip' is TRUE if the data should be clipped to the domain of Y Yname <- CallInfo$responsename # varnames <- CallInfo$varnames covnames <- CallInfo$covnames dotargs <- CallInfo$dotargs # getobj <- function(nama, env, dat) { if(!is.null(dat) && !is.null(x <- dat[[nama]])) return(x) else return(get(nama, envir=env)) } # Get the response point pattern Y Y <- getobj(Yname, envir, data) if(!is.ppp(Y)) stop(paste("The response", sQuote(Yname), "must be a point pattern")) # if(!is.null(dataAtPoints)) { dataAtPoints <- as.data.frame(dataAtPoints) if(nrow(dataAtPoints) != npoints(Y)) stop(paste("dataAtPoints should have one row for each point in", dQuote(Yname))) } # Find the covariates ncov <- length(covnames) covlist <- lapply(as.list(covnames), getobj, env = envir, dat=data) names(covlist) <- covnames # Each covariate should be an image, a window, a function, or a single number if(ncov == 0) { isim <- isowin <- ismask <- isfun <- isnum <- isspatial <- israster <- logical(0) } else { isim <- sapply(covlist, is.im) isowin <- sapply(covlist, is.owin) ismask <- sapply(covlist, is.mask) isfun <- sapply(covlist, is.function) isspatial <- isim | isowin | isfun israster <- isim | ismask isnum <- sapply(covlist, is.numeric) & (lengths(covlist) == 1) } if(!all(ok <- (isspatial | isnum))) { n <- sum(!ok) stop(paste(ngettext(n, "The argument", "Each of the arguments"), commasep(sQuote(covnames[!ok])), "should be either an image, a window, or a single number")) } # 'splitby' if(!is.null(splitby)) { splitwin <- covlist[[splitby]] if(!is.owin(splitwin)) stop("The splitting covariate must be a window") # ensure it is a polygonal window covlist[[splitby]] <- splitwin <- as.polygonal(splitwin) # delete splitting covariate from lists to be processed issplit <- (covnames == splitby) isspatial[issplit] <- FALSE israster[issplit] <- FALSE } # # nnum <- sum(isnum) # nspatial <- sum(isspatial) nraster <- sum(israster) # numlist <- covlist[isnum] spatiallist <- covlist[isspatial] rasterlist <- covlist[israster] # numnames <- names(numlist) spatialnames <- names(spatiallist) # rasternames <- names(rasterlist) # ######## CONVERT TO RASTER DATA ############################### convert <- function(x,W) { if(is.im(x) || is.function(x)) return(as.im(x,W)) if(is.owin(x)) return(as.im(x, W, value=TRUE, na.replace=FALSE)) return(NULL) } # determine spatial domain & common resolution: convert all data to it if(length(dotargs) > 0 || nraster == 0) { # Pixel resolution is determined by explicit arguments if(clip) { # Window extent is determined by response point pattern D <- as.owin(Y) } else { # Window extent is union of domains of data domains <- lapply(append(spatiallist, list(Y)), as.owin) D <- do.call(union.owin, domains) } # Create template mask W <- do.call(as.mask, append(list(D), dotargs)) # Convert all spatial objects to this resolution spatiallist <- lapply(spatiallist, convert, W=W) } else { # Pixel resolution is determined implicitly by covariate data W <- do.call(commonGrid, rasterlist) if(clip) { # Restrict data to spatial extent of response point pattern W <- intersect.owin(W, as.owin(Y)) } # Adjust spatial objects to this resolution spatiallist <- lapply(spatiallist, convert, W=W) } # images containing coordinate values xcoordim <- as.im(function(x,y){x}, W=W) ycoordim <- as.im(function(x,y){y}, W=W) # # create a list of covariate images, with names as in formula covimages <- append(list(x=xcoordim, y=ycoordim), spatiallist) basepixelarea <- W$xstep * W$ystep ######## ASSEMBLE DATA FRAME ############################### if(is.null(splitby)) { df <- slrAssemblePixelData(Y, Yname, W, covimages, dataAtPoints, basepixelarea) sumYloga <- Y$n * log(basepixelarea) serial <- attr(df, "serial") } else { # fractional pixel areas pixsplit <- pixellate(splitwin, W) splitpixelarea <- as.vector(as.matrix(pixsplit)) # determine which points of Y are inside/outside window ins <- inside.owin(Y$x, Y$y, splitwin) # split processing dfIN <- slrAssemblePixelData(Y[ins], Yname, W, covimages, dataAtPoints[ins, ], splitpixelarea) serialIN <- attr(dfIN, "serial") dfIN[[splitby]] <- TRUE dfOUT <- slrAssemblePixelData(Y[!ins], Yname, W, covimages, dataAtPoints[!ins, ], basepixelarea - splitpixelarea) serialOUT <- attr(dfOUT, "serial") dfOUT[[splitby]] <- FALSE df <- rbind(dfIN, dfOUT) serial <- c(serialIN, serialOUT) # sum of log pixel areas associated with points Ysplit <- pixsplit[Y] sumYloga <- sum(log(ifelseXY(ins, Ysplit, basepixelarea - Ysplit))) } # tack on any numeric values df <- do.call(cbind, append(list(df), numlist)) ### RETURN ALL Data <- list(response=Y, covariates=covlist, spatialnames=spatialnames, numnames=numnames, W=W, df=df, serial=serial, sumYloga=sumYloga, dataAtPoints=dataAtPoints) return(Data) } # slrAssemblePixelData <- function(Y, Yname, W, covimages, dataAtPoints, pixelarea) { # pixellate point pattern Z <- pixellate(Y, W=W) Z <- eval.im(as.integer(Z>0)) # overwrite pixel entries for data points using exact values # coordinates xcoordim <- covimages[["x"]] ycoordim <- covimages[["y"]] xcoordim[Y] <- Y$x ycoordim[Y] <- Y$y covimages[["x"]] <- xcoordim covimages[["y"]] <- ycoordim # overwrite pixel entries if(!is.null(dataAtPoints)) { enames <- colnames(dataAtPoints) relevant <- enames %in% names(covimages) for(v in enames[relevant]) { cova <- covimages[[v]] cova[Y] <- dataAtPoints[, v, drop=TRUE] covimages[[v]] <- cova } } # assemble list of all images Ylist <- list(Z) names(Ylist) <- Yname allimages <- append(Ylist, covimages) # extract pixel values of each image pixelvalues <- function(z) { v <- as.vector(as.matrix(z)) if(z$type != "factor") return(v) lev <- levels(z) return(factor(v, levels=seq_along(lev), labels=lev)) } pixdata <- lapply(allimages, pixelvalues) df <- as.data.frame(pixdata) serial <- seq_len(nrow(df)) # add log(pixel area) column if(length(pixelarea) == 1) { df <- cbind(df, logpixelarea=log(pixelarea)) } else { ok <- (pixelarea > 0) df <- cbind(df[ok, ], logpixelarea=log(pixelarea[ok])) serial <- serial[ok] } attr(df, "serial") <- serial return(df) } is.slrm <- function(x) { inherits(x, "slrm") } coef.slrm <- function(object, ...) { coef(object$Fit$FIT) } print.slrm <- function(x, ...) { lk <- x$CallInfo$link switch(lk, logit= { splat("Fitted spatial logistic regression model") }, cloglog= { splat("Fitted spatial regression model (complementary log-log)") }, { splat("Fitted spatial regression model") splat("Link =", dQuote(lk)) }) cat("Formula:\t") print(x$CallInfo$formula) splat("Fitted coefficients:") print(coef(x)) return(invisible(NULL)) } logLik.slrm <- function(object, ..., adjust=TRUE) { FIT <- object$Fit$FIT ll <- -deviance(FIT)/2 if(adjust) { sumYloga <- object$Data$sumYloga ll <- ll - sumYloga } attr(ll, "df") <- length(coef(object)) class(ll) <- "logLik" return(ll) } fitted.slrm <- function(object, ...) { if(length(list(...)) > 0) warning("second argument (and any subsequent arguments) ignored") predict(object, type="probabilities") } predict.slrm <- function(object, ..., type="intensity", newdata=NULL, window=NULL) { type <- pickoption("type", type, c(probabilities="probabilities", link="link", intensity="intensity", lambda="intensity")) FIT <- object$Fit$FIT link <- object$CallInfo$link W <- object$Data$W df <- object$Data$df loga <- df$logpixelarea if(is.null(newdata) && is.null(window)) { # fitted values from existing fit switch(type, probabilities={ values <- fitted(FIT) }, link={ values <- predict(FIT, type="link") }, intensity={ # this calculation applies whether an offset was included or not if(link == "cloglog") { linkvalues <- predict(FIT, type="link") values <- exp(linkvalues - loga) } else { probs <- fitted(FIT) values <- -log(1-probs)/exp(loga) } } ) out <- im(values, xcol=W$xcol, yrow=W$yrow, unitname=unitname(W)) return(out) } else { # prediction using new values # update arguments that may affect pixel resolution CallInfo <- object$CallInfo CallInfo$dotargs <- resolve.defaults(list(...), CallInfo$dotargs) # if(!is.null(window)) { # insert fake response in new window if(is.null(newdata)) newdata <- list() window <- as.owin(window) newdata[[CallInfo$responsename]] <- ppp(numeric(0), numeric(0), window=window) } # process new data newData <- slr.prepare(CallInfo, environment(CallInfo$formula), newdata, clip=!is.null(window)) newdf <- newData$df newW <- newData$W newloga <- newdf$logpixelarea # avoid NA etc npixel <- nrow(newdf) ok <- complete.cases(newdf) if(!all(ok)) { newdf <- newdf[ok, , drop=FALSE] newloga <- newloga[ok] } # compute link values linkvalues <- predict(FIT, newdata=newdf, type="link") # transform to desired scale linkinv <- family(FIT)$linkinv switch(type, probabilities={ values <- linkinv(linkvalues) }, link={ values <- linkvalues }, intensity={ # this calculation applies whether an offset was included or not if(link == "cloglog") { values <- exp(linkvalues - newloga) } else { probs <- linkinv(linkvalues) values <- -log(1-probs)/exp(newloga) } } ) # form image v <- rep.int(NA_real_, npixel) v[ok] <- values out <- im(v, xcol=newW$xcol, yrow=newW$yrow, unitname=unitname(W)) return(out) } } plot.slrm <- function(x, ..., type="intensity") { xname <- short.deparse(substitute(x)) y <- predict(x, type=type) do.call(plot.im, resolve.defaults(list(x=y), list(...), list(main=xname))) } formula.slrm <- function(x, ...) { f <- x$CallInfo$formula return(f) } terms.slrm <- function(x, ...) { terms(formula(x), ...) } labels.slrm <- function(object, ...) { # extract fitted trend coefficients co <- coef(object) # model terms tt <- terms(object) lab <- attr(tt, "term.labels") if(length(lab) == 0) return(character(0)) # model matrix mm <- model.matrix(object) ass <- attr(mm, "assign") # 'ass' associates coefficients with model terms # except ass == 0 for the Intercept coef.ok <- is.finite(co) relevant <- (ass > 0) okterms <- unique(ass[coef.ok & relevant]) return(lab[okterms]) } extractAIC.slrm <- function (fit, scale = 0, k = 2, ...) { edf <- length(coef(fit)) aic <- AIC(fit) c(edf, aic + (k - 2) * edf) } model.matrix.slrm <- function(object,..., keepNA=TRUE) { FIT <- object$Fit$FIT mm <- model.matrix(FIT, ...) if(!keepNA) return(mm) df <- object$Data$df comp <- complete.cases(df) if(all(comp)) return(mm) if(sum(comp) != nrow(mm)) stop("Internal error in patching NA's") mmplus <- matrix(NA, nrow(df), ncol(mm)) mmplus[comp, ] <- mm return(mmplus) } model.images.slrm <- function(object, ...) { mm <- model.matrix(object, ...) mm <- as.data.frame(mm) Data <- object$Data W <- Data$W serial <- Data$serial splitby <- object$CallInfo$splitby blank <- as.im(NA_real_, W) assignbyserial <- function(values, serial, template) { Z <- template Z$v[serial] <- values return(Z) } if(is.null(splitby)) { result <- lapply(as.list(mm), assignbyserial, serial=serial, template=blank) } else { df <- Data$df IN <- as.logical(df[[splitby]]) OUT <- !IN mmIN <- mm[IN, , drop=FALSE] mmOUT <- mm[OUT, , drop=FALSE] resultIN <- lapply(as.list(mmIN), assignbyserial, serial=serial[IN], template=blank) resultOUT <- lapply(as.list(mmOUT), assignbyserial, serial=serial[OUT], template=blank) names(resultIN) <- paste(names(resultIN), splitby, "TRUE", sep="") names(resultOUT) <- paste(names(resultOUT), splitby, "FALSE", sep="") result <- c(resultIN, resultOUT) } return(as.solist(result)) } update.slrm <- function(object, ..., evaluate=TRUE, env=parent.frame()) { e <- update.default(object, ..., evaluate=FALSE) if(evaluate) e <- eval(e, envir=env) return(e) } anova.slrm <- local({ anova.slrm <- function(object, ..., test=NULL) { objex <- append(list(object), list(...)) if(!all(unlist(lapply(objex, is.slrm)))) stop("Some arguments are not of class slrm") fitz <- lapply(objex, getFIT) do.call(anova, append(fitz, list(test=test))) } getFIT <- function(z) {z$Fit$FIT} anova.slrm }) vcov.slrm <- function(object, ..., what=c("vcov", "corr", "fisher", "Fisher")) { stopifnot(is.slrm(object)) what <- match.arg(what) vc <- vcov(object$Fit$FIT) result <- switch(what, vcov = vc, corr = { sd <- sqrt(diag(vc)) vc / outer(sd, sd, "*") }, fisher=, Fisher={ solve(vc) }) return(result) } unitname.slrm <- function(x) { return(unitname(x$Data$response)) } "unitname<-.slrm" <- function(x, value) { unitname(x$Data$response) <- value return(x) } is.stationary.slrm <- function(x) { fo <- formula(x) trend <- fo[c(1,3)] return(identical.formulae(trend, ~1)) } is.poisson.slrm <- function(x) { TRUE } simulate.slrm <- function(object, nsim=1, seed=NULL, ..., window=NULL, covariates=NULL, verbose=TRUE, drop=FALSE) { # .... copied from simulate.lm .... if (!exists(".Random.seed", envir = .GlobalEnv, inherits = FALSE)) runif(1) if (is.null(seed)) RNGstate <- get(".Random.seed", envir = .GlobalEnv) else { R.seed <- get(".Random.seed", envir = .GlobalEnv) set.seed(seed) RNGstate <- structure(seed, kind = as.list(RNGkind())) on.exit(assign(".Random.seed", R.seed, envir = .GlobalEnv)) } starttime <- proc.time() # determine simulation window and compute intensity if(!is.null(window)) stopifnot(is.owin(window)) lambda <- predict(object, type="intensity", newdata=covariates, window=window) # max lambda (for efficiency) summ <- summary(lambda) lmax <- summ$max + 0.05 * diff(summ$range) # run out <- list() verbose <- verbose && (nsim > 1) if(verbose) { cat(paste("Generating", nsim, "simulations... ")) pstate <- list() } for(i in 1:nsim) { out[[i]] <- rpoispp(lambda, lmax=lmax) if(verbose) pstate <- progressreport(i, nsim, state=pstate) } # pack up if(nsim == 1 && drop) { out <- out[[1]] } else { out <- as.solist(out) if(nsim > 0) names(out) <- paste("Simulation", 1:nsim) } out <- timed(out, starttime=starttime) attr(out, "seed") <- RNGstate return(out) } spatstat/R/centroid.R0000755000176200001440000001016213115271075014255 0ustar liggesusers# # centroid.S Centroid of a window # and related operations # # $Revision: 1.6 $ $Date: 2014/11/10 08:20:59 $ # # Function names (followed by "xypolygon" or "owin") # # intX integral of x dx dy # intY integral of y dx dy # meanX mean of x dx dy # meanY mean of y dx dy # centroid (meanX, meanY) # #------------------------------------- intX.xypolygon <- function(polly) { # # polly: list(x,y) vertices of a single polygon (n joins to 1) # verify.xypolygon(polly) x <- polly$x y <- polly$y # nedges <- length(x) # sic # place x axis below polygon y <- y - min(y) # join vertex n to vertex 1 xr <- c(x, x[1L]) yr <- c(y, y[1L]) # slope dx <- diff(xr) dy <- diff(yr) slope <- ifelseAX(dx == 0, 0, dy/dx) # integrate integrals <- x * y * dx + (y + slope * x) * (dx^2)/2 + slope * (dx^3)/3 -sum(integrals) } intX.owin <- function(w) { verifyclass(w, "owin") switch(w$type, rectangle = { width <- abs(diff(w$xrange)) height <- abs(diff(w$yrange)) answer <- width * height * mean(w$xrange) }, polygonal = { answer <- sum(unlist(lapply(w$bdry, intX.xypolygon))) }, mask = { pixelarea <- abs(w$xstep * w$ystep) x <- rasterx.mask(w, drop=TRUE) answer <- (pixelarea * length(x)) * mean(x) }, stop("Unrecognised window type") ) return(answer) } meanX.owin <- function(w) { verifyclass(w, "owin") switch(w$type, rectangle = { answer <- mean(w$xrange) }, polygonal = { area <- sum(unlist(lapply(w$bdry, Area.xypolygon))) integrated <- sum(unlist(lapply(w$bdry, intX.xypolygon))) answer <- integrated/area }, mask = { x <- rasterx.mask(w, drop=TRUE) answer <- mean(x) }, stop("Unrecognised window type") ) return(answer) } intY.xypolygon <- function(polly) { # # polly: list(x,y) vertices of a single polygon (n joins to 1) # verify.xypolygon(polly) x <- polly$x y <- polly$y # nedges <- length(x) # sic # place x axis below polygon yadjust <- min(y) y <- y - yadjust # join vertex n to vertex 1 xr <- c(x, x[1L]) yr <- c(y, y[1L]) # slope dx <- diff(xr) dy <- diff(yr) slope <- ifelseAX(dx == 0, 0, dy/dx) # integrate integrals <- (1/2) * (dx * y^2 + slope * y * dx^2 + slope^2 * dx^3/3) total <- sum(integrals) - yadjust * Area.xypolygon(polly) # change sign to adhere to anticlockwise convention -total } intY.owin <- function(w) { verifyclass(w, "owin") switch(w$type, rectangle = { width <- abs(diff(w$xrange)) height <- abs(diff(w$yrange)) answer <- width * height * mean(w$yrange) }, polygonal = { answer <- sum(unlist(lapply(w$bdry, intY.xypolygon))) }, mask = { pixelarea <- abs(w$xstep * w$ystep) y <- rastery.mask(w, drop=TRUE) answer <- (pixelarea * length(y)) * mean(y) }, stop("Unrecognised window type") ) return(answer) } meanY.owin <- function(w) { verifyclass(w, "owin") switch(w$type, rectangle = { answer <- mean(w$yrange) }, polygonal = { area <- sum(unlist(lapply(w$bdry, Area.xypolygon))) integrated <- sum(unlist(lapply(w$bdry, intY.xypolygon))) answer <- integrated/area }, mask = { y <- rastery.mask(w, drop=TRUE) answer <- mean(y) }, stop("Unrecognised window type") ) return(answer) } centroid.owin <- function(w, as.ppp = FALSE) { w <- as.owin(w) out <- list(x=meanX.owin(w), y=meanY.owin(w)) if(as.ppp){ if(!inside.owin(out$x, out$y, w)) w <- as.rectangle(w) out <- as.ppp(out, W=w) } return(out) } spatstat/R/ord.R0000755000176200001440000000224013115271120013217 0ustar liggesusers# # # ord.S # # $Revision: 1.7 $ $Date: 2015/10/21 09:06:57 $ # # Ord process with user-supplied potential # # Ord() create an instance of the Ord process # [an object of class 'interact'] # with user-supplied potential # # # ------------------------------------------------------------------- # Ord <- local({ BlankOrd <- list( name = "Ord process with user-defined potential", creator = "Ord", family = "ord.family", pot = NULL, par = NULL, parnames = NULL, init = NULL, update = function(self, ...){ do.call(Ord, resolve.defaults(list(...), list(pot=self$pot, name=self$name))) } , print = function(self) { cat("Potential function:\n") print(self$pot) invisible() }, version=NULL ) class(BlankOrd) <- "interact" Ord <- function(pot, name) { out <- instantiate.interact(BlankOrd) out$pot <- pot if(!missing(name)) out$name <- name } Ord <- intermaker(Ord, BlankOrd) }) spatstat/R/Fest.R0000755000176200001440000001406513115271075013355 0ustar liggesusers# # Fest.R # # Computes estimates of the empty space function # # $Revision: 4.43 $ $Date: 2016/10/04 01:04:13 $ # Fhazard <- function(X, ...) { Z <- Fest(X, ...) if(!any(names(Z) == "km")) stop("Kaplan-Meier estimator 'km' is required for hazard rate") ## strip off Poisson F Z <- Z[, (colnames(Z) != "theo")] ## relabel the fv object Z <- rebadge.fv(Z, new.ylab=quote(h(r)), new.fname="h", tags=c("hazard", "theohaz"), new.tags=c("hazard", "theo"), new.labl=c("hat(%s)[km](r)", "%s[pois](r)"), new.desc=c( "Kaplan-Meier estimate of %s", "theoretical Poisson %s"), new.dotnames=c("hazard", "theo"), new.preferred="hazard") ## strip off unwanted bits Z <- Z[, c("r", "hazard", "theo")] return(Z) } Fest <- function(X, ..., eps = NULL, r=NULL, breaks=NULL, correction=c("rs", "km", "cs"), domain=NULL) { verifyclass(X, "ppp") if(!is.null(domain)) stopifnot(is.subset.owin(domain, Window(X))) rorbgiven <- !is.null(r) || !is.null(breaks) ## Intensity estimate W <- X$window npts <- npoints(X) lambda <- npts/area(W) ## First discretise dwin <- as.mask(W, eps=eps) dX <- ppp(X$x, X$y, window=dwin, check=FALSE) ## histogram breakpoints rmaxdefault <- rmax.rule("F", dwin, lambda) breaks <- handle.r.b.args(r, breaks, dwin, eps, rmaxdefault=rmaxdefault) rvals <- breaks$r rmax <- breaks$max if(rorbgiven) check.finespacing(rvals, if(is.null(eps)) NULL else eps/4, dwin, rmaxdefault=rmaxdefault, action="fatal", rname="r", context="in Fest(X, r)") ## choose correction(s) # correction.given <- !missing(correction) && !is.null(correction) if(is.null(correction)) { correction <- c("rs", "km", "cs") } else correction <- pickoption("correction", correction, c(none="none", border="rs", rs="rs", KM="km", km="km", Kaplan="km", cs="cs", ChiuStoyan="cs", Hanisch="cs", han="cs", best="km"), multi=TRUE) ## initialise fv object df <- data.frame(r=rvals, theo=1-exp(-lambda * pi * rvals^2)) Z <- fv(df, "r", substitute(F(r), NULL), "theo", . ~ r, c(0,rmax), c("r", "%s[pois](r)"), c("distance argument r", "theoretical Poisson %s"), fname="F") nr <- length(rvals) zeroes <- numeric(nr) ## compute distances and censoring distances if(X$window$type == "rectangle") { ## original data were in a rectangle ## output of exactdt() is sufficient e <- exactdt(dX) dist <- e$d bdry <- e$b if(!is.null(domain)) { ok <- inside.owin(raster.xy(e$w), , domain) dist <- dist[ok] bdry <- bdry[ok] } } else { ## window is irregular.. # Distance transform & boundary distance for all pixels e <- exactdt(dX) b <- bdist.pixels(dX$window, style="matrix") ## select only those pixels inside mask mm <- dwin$m if(!is.null(domain)) { ok <- inside.owin(raster.xy(e$w), , domain) mm <- as.vector(mm) & ok } dist <- e$d[mm] bdry <- b[mm] } ## censoring indicators d <- (dist <= bdry) ## observed distances o <- pmin.int(dist, bdry) ## start calculating estimates of F if("none" %in% correction) { ## UNCORRECTED e.d.f. of empty space distances if(npts == 0) edf <- zeroes else { hh <- hist(dist[dist <= rmax],breaks=breaks$val,plot=FALSE)$counts edf <- cumsum(hh)/length(dist) } Z <- bind.fv(Z, data.frame(raw=edf), "hat(%s)[raw](r)", "uncorrected estimate of %s", "raw") } if("cs" %in% correction) { ## Chiu-Stoyan correction if(npts == 0) cs <- zeroes else { ## uncensored distances x <- dist[d] ## weights a <- eroded.areas(W, rvals) ## calculate Hanisch estimator h <- hist(x[x <= rmax], breaks=breaks$val, plot=FALSE)$counts H <- cumsum(h/a) cs <- H/max(H[is.finite(H)]) } ## add to fv object Z <- bind.fv(Z, data.frame(cs=cs), "hat(%s)[cs](r)", "Chiu-Stoyan estimate of %s", "cs") } if(any(correction %in% c("rs", "km"))) { ## calculate Kaplan-Meier and/or border corrected (Reduced Sample) estimators want.rs <- "rs" %in% correction want.km <- "km" %in% correction selection <- c(want.rs, want.km, want.km, want.km) tags <- c("rs", "km", "hazard", "theohaz")[selection] labels <- c("hat(%s)[bord](r)", "hat(%s)[km](r)", "hat(h)[km](r)", "h[pois](r)")[selection] descr <- c("border corrected estimate of %s", "Kaplan-Meier estimate of %s", "Kaplan-Meier estimate of hazard function h(r)", "theoretical Poisson hazard h(r)")[selection] if(npts == 0) { result <- as.data.frame(matrix(0, nr, length(tags))) names(result) <- tags } else { result <- km.rs.opt(o, bdry, d, breaks, KM=want.km, RS=want.rs) result$theohaz <- 2 * pi * lambda * rvals result <- as.data.frame(result[tags]) } ## add to fv object Z <- bind.fv(Z, result, labels, descr, if(want.km) "km" else "rs") } ## wrap up unitname(Z) <- unitname(X) ## remove 'hazard' from the dotnames nama <- names(Z) fvnames(Z, ".") <- rev(setdiff(nama, c("r", "hazard", "theohaz"))) ## determine recommended plot range attr(Z, "alim") <- with(Z, range(.x[is.finite(.y) & .y <= 0.9])) return(Z) } spatstat/R/density.ppp.R0000755000176200001440000007017313115271075014733 0ustar liggesusers# # density.ppp.R # # Method for 'density' for point patterns # # $Revision: 1.86 $ $Date: 2017/06/05 10:31:58 $ # ksmooth.ppp <- function(x, sigma, ..., edge=TRUE) { .Deprecated("density.ppp", package="spatstat") density.ppp(x, sigma, ..., edge=edge) } density.ppp <- local({ density.ppp <- function(x, sigma=NULL, ..., weights=NULL, edge=TRUE, varcov=NULL, at="pixels", leaveoneout=TRUE, adjust=1, diggle=FALSE, se=FALSE, kernel="gaussian", scalekernel=is.character(kernel), positive=FALSE, verbose=TRUE) { verifyclass(x, "ppp") output <- pickoption("output location type", at, c(pixels="pixels", points="points")) if(!identical(kernel, "gaussian")) { validate2Dkernel(kernel) ## kernel is only partly implemented! if(se) stop("Standard errors are not implemented for non-Gaussian kernel") if(verbose && (is.function(sigma) || (is.null(sigma) && is.null(varcov)))) warning("Bandwidth selection will be based on Gaussian kernel") } ker <- resolve.2D.kernel(..., sigma=sigma, varcov=varcov, x=x, adjust=adjust) sigma <- ker$sigma varcov <- ker$varcov if(is.im(weights)) { weights <- safelookup(weights, x) # includes warning if NA } else if(is.expression(weights)) weights <- eval(weights, envir=as.data.frame(x), enclos=parent.frame()) if(length(weights) == 0 || (!is.null(dim(weights)) && nrow(weights) == 0)) weights <- NULL if(se) { # compute standard error SE <- denspppSEcalc(x, sigma=sigma, varcov=varcov, ..., weights=weights, edge=edge, at=output, leaveoneout=leaveoneout, adjust=adjust, diggle=diggle) if(positive) SE <- posify(SE) } if(output == "points") { # VALUES AT DATA POINTS ONLY result <- densitypointsEngine(x, sigma, varcov=varcov, kernel=kernel, scalekernel=scalekernel, weights=weights, edge=edge, leaveoneout=leaveoneout, diggle=diggle, ...) if(verbose && !is.null(uhoh <- attr(result, "warnings"))) { switch(uhoh, underflow=warning("underflow due to very small bandwidth"), warning(uhoh)) } ## constrain values to be positive if(positive) result <- posify(result) if(se) result <- list(estimate=result, SE=SE) return(result) } # VALUES AT PIXELS if(!edge) { # no edge correction edg <- NULL raw <- second.moment.calc(x, sigma, what="smooth", ..., kernel=kernel, scalekernel=scalekernel, weights=weights, varcov=varcov) raw <- divide.by.pixelarea(raw) smo <- raw } else if(!diggle) { # edge correction e(u) both <- second.moment.calc(x, sigma, what="smoothedge", ..., kernel=kernel, scalekernel=scalekernel, weights=weights, varcov=varcov) raw <- divide.by.pixelarea(both$smooth) edg <- both$edge smo <- if(is.im(raw)) eval.im(raw/edg) else lapply(raw, divideimage, denom=edg) } else { # edge correction e(x_i) edg <- second.moment.calc(x, sigma, what="edge", ..., scalekernel=scalekernel, kernel=kernel, varcov=varcov) wi <- 1/safelookup(edg, x, warn=FALSE) wi[!is.finite(wi)] <- 0 # edge correction becomes weight attached to points if(is.null(weights)) { newweights <- wi } else if(is.matrix(weights) || is.data.frame(weights)) { stopifnot(nrow(weights) == npoints(x)) newweights <- weights * wi } else { stopifnot(length(weights) == npoints(x)) newweights <- weights * wi } raw <- second.moment.calc(x, sigma, what="smooth", ..., kernel=kernel, scalekernel=scalekernel, weights=newweights, varcov=varcov) raw <- divide.by.pixelarea(raw) smo <- raw } result <- if(is.im(smo)) smo[x$window, drop=FALSE] else solapply(smo, "[", i=x$window, drop=FALSE) # internal use only spill <- resolve.1.default(list(spill=FALSE), list(...)) if(spill) return(list(result=result, sigma=sigma, varcov=varcov, raw = raw, edg=edg)) # constrain values to be positive if(positive) result <- posify(result) # normal return attr(result, "sigma") <- sigma attr(result, "varcov") <- varcov attr(result, "kernel") <- kernel if(se) result <- list(estimate=result, SE=SE) return(result) } divideimage <- function(numer, denom) eval.im(numer/denom) posify <- function(x, eps=.Machine$double.xmin) { force(eps) # scalpel if(is.im(x)) return(eval.im(pmax(eps, x))) if(inherits(x, "solist")) return(solapply(x, posify, eps=eps)) if(is.numeric(x)) return(pmax(eps, x)) # data frame or list if(is.list(x) && all(sapply(x, is.numeric))) return(lapply(x, posify, eps=eps)) warning("Internal error: posify did not recognise data format") return(x) } divide.by.pixelarea <- function(x) { if(is.im(x)) { x$v <- x$v/(x$xstep * x$ystep) } else { for(i in seq_along(x)) x[[i]]$v <- with(x[[i]], v/(xstep * ystep)) } return(x) } denspppSEcalc <- function(x, sigma, varcov, ..., weights, edge, diggle, at) { ## Calculate standard error, rather than estimate tau <- taumat <- NULL if(is.null(varcov)) { varconst <- 1/(4 * pi * prod(sigma)) tau <- sigma/sqrt(2) } else { varconst <- 1/(4 * pi * sqrt(det(varcov))) taumat <- varcov/2 } ## Calculate edge correction weights if(edge) { edgeim <- second.moment.calc(x, sigma, what="edge", ..., varcov=varcov) if(diggle || at == "points") { edgeX <- safelookup(edgeim, x, warn=FALSE) diggleX <- 1/edgeX diggleX[!is.finite(diggleX)] <- 0 } edgeim <- edgeim[Window(x), drop=FALSE] } ## Perform smoothing if(!edge) { ## no edge correction V <- density(x, sigma=tau, varcov=taumat, ..., weights=weights, edge=edge, diggle=diggle, at=at) } else if(!diggle) { ## edge correction e(u) V <- density(x, sigma=tau, varcov=taumat, ..., weights=weights, edge=edge, diggle=diggle, at=at) V <- if(at == "pixels") (V/edgeim) else (V * diggleX) } else { ## Diggle edge correction e(x_i) wts <- diggleX * (weights %orifnull% 1) V <- density(x, sigma=tau, varcov=taumat, ..., weights=wts, edge=edge, diggle=diggle, at=at) } V <- V * varconst return(sqrt(V)) } density.ppp }) densitypointsEngine <- function(x, sigma, ..., kernel="gaussian", scalekernel=is.character(kernel), weights=NULL, edge=TRUE, varcov=NULL, leaveoneout=TRUE, diggle=FALSE, sorted=FALSE, spill=FALSE, cutoff=NULL) { debugging <- spatstat.options("developer") stopifnot(is.logical(leaveoneout)) validate2Dkernel(kernel) if(is.character(kernel)) kernel <- match2DkernelName(kernel) isgauss <- identical(kernel, "gaussian") # constant factor in density computations if(is.null(varcov)) { const <- 1/sigma^2 } else { detSigma <- det(varcov) Sinv <- solve(varcov) const <- 1/sqrt(detSigma) } if(isgauss) { # absorb leading constant in Gaussian density const <- const/(2 * pi) } if(length(weights) == 0 || (!is.null(dim(weights)) && nrow(weights) == 0)) weights <- NULL # Leave-one-out computation # cutoff: contributions from pairs of distinct points # closer than 8 standard deviations sd <- if(is.null(varcov)) sigma else sqrt(sum(diag(varcov))) if(is.null(cutoff)) cutoff <- 8 * sd if(debugging) cat(paste("cutoff=", cutoff, "\n")) if(leaveoneout && npoints(x) > 1) { # ensure each point has its closest neighbours within the cutoff nndmax <- maxnndist(x) cutoff <- max(2 * nndmax, cutoff) if(debugging) cat(paste("adjusted cutoff=", cutoff, "\n")) } # validate weights if(is.null(weights)) { k <- 1L } else if(is.matrix(weights) || is.data.frame(weights)) { k <- ncol(weights) stopifnot(nrow(weights) == npoints(x)) weights <- as.data.frame(weights) weightnames <- colnames(weights) } else { k <- 1L stopifnot(length(weights) == npoints(x) || length(weights) == 1L) } # evaluate edge correction weights at points if(edge) { win <- x$window if(isgauss && is.null(varcov) && win$type == "rectangle") { # evaluate Gaussian probabilities directly xr <- win$xrange yr <- win$yrange xx <- x$x yy <- x$y xprob <- pnorm(xr[2L], mean=xx, sd=sigma) - pnorm(xr[1L], mean=xx, sd=sigma) yprob <- pnorm(yr[2L], mean=yy, sd=sigma) - pnorm(yr[1L], mean=yy, sd=sigma) edgeweight <- xprob * yprob } else { edg <- second.moment.calc(x, sigma=sigma, kernel=kernel, scalekernel=scalekernel, what="edge", varcov=varcov) edgeweight <- safelookup(edg, x, warn=FALSE) } if(diggle) { # Diggle edge correction # edgeweight is attached to each point if(is.null(weights)) { k <- 1L weights <- 1/edgeweight } else { weights <- weights/edgeweight } } } if(isgauss && spatstat.options("densityTransform") && spatstat.options("densityC")) { ## .................. experimental C code ..................... if(debugging) cat('Using experimental code!\n') npts <- npoints(x) result <- if(k == 1L) numeric(npts) else matrix(, npts, k) xx <- x$x yy <- x$y ## transform to standard coordinates if(is.null(varcov)) { xx <- xx/(sqrt(2) * sigma) yy <- yy/(sqrt(2) * sigma) } else { xy <- cbind(xx, yy) %*% matrixsqrt(Sinv/2) xx <- xy[,1L] yy <- xy[,2L] sorted <- FALSE } ## cutoff in standard coordinates cutoff <- cutoff/(sqrt(2) * sd) ## sort into increasing order of x coordinate (required by C code) if(!sorted) { oo <- fave.order(xx) xx <- xx[oo] yy <- yy[oo] } if(is.null(weights)) { zz <- .C("Gdenspt", nxy = as.integer(npts), x = as.double(xx), y = as.double(yy), rmaxi = as.double(cutoff), result = as.double(double(npts)), PACKAGE = "spatstat") if(sorted) result <- zz$result else result[oo] <- zz$result result <- result * const } else if(k == 1L) { wtsort <- if(sorted) weights else weights[oo] zz <- .C("Gwtdenspt", nxy = as.integer(npts), x = as.double(xx), y = as.double(yy), rmaxi = as.double(cutoff), weight = as.double(wtsort), result = as.double(double(npts)), PACKAGE = "spatstat") if(sorted) result <- zz$result else result[oo] <- zz$result result <- result * const } else { ## matrix of weights wtsort <- if(sorted) weights else weights[oo, ] for(j in 1:k) { zz <- .C("Gwtdenspt", nxy = as.integer(npts), x = as.double(xx), y = as.double(yy), rmaxi = as.double(cutoff), weight = as.double(wtsort[,j]), result = as.double(double(npts)), PACKAGE = "spatstat") if(sorted) result[,j] <- zz$result else result[oo,j] <- zz$result } result <- result * const } } else if(isgauss && spatstat.options("densityC")) { # .................. C code ........................... if(debugging) cat('Using standard code.\n') npts <- npoints(x) result <- if(k == 1L) numeric(npts) else matrix(, npts, k) # sort into increasing order of x coordinate (required by C code) if(sorted) { xx <- x$x yy <- x$y } else { oo <- fave.order(x$x) xx <- x$x[oo] yy <- x$y[oo] } if(is.null(varcov)) { # isotropic kernel if(is.null(weights)) { zz <- .C("denspt", nxy = as.integer(npts), x = as.double(xx), y = as.double(yy), rmaxi = as.double(cutoff), sig = as.double(sd), result = as.double(double(npts)), PACKAGE = "spatstat") if(sorted) result <- zz$result else result[oo] <- zz$result } else if(k == 1L) { wtsort <- if(sorted) weights else weights[oo] zz <- .C("wtdenspt", nxy = as.integer(npts), x = as.double(xx), y = as.double(yy), rmaxi = as.double(cutoff), sig = as.double(sd), weight = as.double(wtsort), result = as.double(double(npts)), PACKAGE = "spatstat") if(sorted) result <- zz$result else result[oo] <- zz$result } else { # matrix of weights wtsort <- if(sorted) weights else weights[oo, ] for(j in 1:k) { zz <- .C("wtdenspt", nxy = as.integer(npts), x = as.double(xx), y = as.double(yy), rmaxi = as.double(cutoff), sig = as.double(sd), weight = as.double(wtsort[,j]), result = as.double(double(npts)), PACKAGE = "spatstat") if(sorted) result[,j] <- zz$result else result[oo,j] <- zz$result } } } else { # anisotropic kernel flatSinv <- as.vector(t(Sinv)) if(is.null(weights)) { zz <- .C("adenspt", nxy = as.integer(npts), x = as.double(xx), y = as.double(yy), rmaxi = as.double(cutoff), detsigma = as.double(detSigma), sinv = as.double(flatSinv), result = as.double(double(npts)), PACKAGE = "spatstat") if(sorted) result <- zz$result else result[oo] <- zz$result } else if(k == 1L) { # vector of weights wtsort <- if(sorted) weights else weights[oo] zz <- .C("awtdenspt", nxy = as.integer(npts), x = as.double(xx), y = as.double(yy), rmaxi = as.double(cutoff), detsigma = as.double(detSigma), sinv = as.double(flatSinv), weight = as.double(wtsort), result = as.double(double(npts)), PACKAGE = "spatstat") if(sorted) result <- zz$result else result[oo] <- zz$result } else { # matrix of weights wtsort <- if(sorted) weights else weights[oo, ] for(j in 1:k) { zz <- .C("awtdenspt", nxy = as.integer(npts), x = as.double(xx), y = as.double(yy), rmaxi = as.double(cutoff), detsigma = as.double(detSigma), sinv = as.double(flatSinv), weight = as.double(wtsort[,j]), result = as.double(double(npts)), PACKAGE = "spatstat") if(sorted) result[,j] <- zz$result else result[oo,j] <- zz$result } } } } else { # ..... interpreted code ......................................... close <- closepairs(x, cutoff) i <- close$i j <- close$j d <- close$d npts <- npoints(x) result <- if(k == 1L) numeric(npts) else matrix(, npts, k) # evaluate contribution from each close pair (i,j) if(isgauss) { if(is.null(varcov)) { contrib <- const * exp(-d^2/(2 * sigma^2)) } else { ## anisotropic kernel dx <- close$dx dy <- close$dy contrib <- const * exp(-(dx * (dx * Sinv[1L,1L] + dy * Sinv[1L,2L]) + dy * (dx * Sinv[2L,1L] + dy * Sinv[2L,2L]))/2) } } else { contrib <- evaluate2Dkernel(kernel, close$dx, close$dy, sigma=sigma, varcov=varcov, ...) } ## sum (weighted) contributions ifac <- factor(i, levels=1:npts) if(is.null(weights)) { result <- tapply(contrib, ifac, sum) } else if(k == 1L) { wcontrib <- contrib * weights[j] result <- tapply(wcontrib, ifac, sum) } else { for(kk in 1:k) { wcontribkk <- contrib * weights[j, kk] result[,kk] <- tapply(wcontribkk, ifac, sum) } } result[is.na(result)] <- 0 # } # ----- contribution from point itself ---------------- if(!leaveoneout) { # add contribution from point itself self <- const if(!is.null(weights)) self <- self * weights result <- result + self } # ........ Edge correction ........................................ if(edge && !diggle) result <- result/edgeweight # ............. validate ................................. npts <- npoints(x) if(k == 1L) { result <- as.numeric(result) if(length(result) != npts) stop(paste("Internal error: incorrect number of lambda values", "in leave-one-out method:", "length(lambda) = ", length(result), "!=", npts, "= npoints")) if(anyNA(result)) { nwrong <- sum(is.na(result)) stop(paste("Internal error:", nwrong, "NA or NaN", ngettext(nwrong, "value", "values"), "generated in leave-one-out method")) } } else { if(ncol(result) != k) stop(paste("Internal error: incorrect number of columns returned:", ncol(result), "!=", k)) colnames(result) <- weightnames if(nrow(result) != npts) stop(paste("Internal error: incorrect number of rows of lambda values", "in leave-one-out method:", "nrow(lambda) = ", nrow(result), "!=", npts, "= npoints")) if(anyNA(result)) { nwrong <- sum(!complete.cases(result)) stop(paste("Internal error:", nwrong, ngettext(nwrong, "row", "rows"), "of NA values generated in leave-one-out method")) } } if(spill) return(list(result=result, sigma=sigma, varcov=varcov, edg=edgeweight)) # tack on bandwidth attr(result, "sigma") <- sigma attr(result, "varcov") <- varcov # return(result) } resolve.2D.kernel <- function(..., sigma=NULL, varcov=NULL, x, mindist=NULL, adjust=1, bwfun=NULL, allow.zero=FALSE) { if(is.function(sigma)) { bwfun <- sigma sigma <- NULL } if(is.null(sigma) && is.null(varcov) && !is.null(bwfun)) { # call bandwidth selection function bw <- do.call.matched(bwfun, resolve.defaults(list(X=x), list(...))) # interpret the result as either sigma or varcov if(!is.numeric(bw)) stop("bandwidth selector returned a non-numeric result") if(length(bw) %in% c(1L,2L)) { sigma <- as.numeric(bw) if(!all(sigma > 0)) { gripe <- "bandwidth selector returned negative value(s)" if(allow.zero) warning(gripe) else stop(gripe) } } else if(is.matrix(bw) && nrow(bw) == 2 && ncol(bw) == 2) { varcov <- bw if(!all(eigen(varcov)$values > 0)) stop("bandwidth selector returned matrix with negative eigenvalues") } else stop("bandwidth selector did not return a matrix or numeric value") } sigma.given <- !is.null(sigma) varcov.given <- !is.null(varcov) if(sigma.given) { stopifnot(is.numeric(sigma)) stopifnot(length(sigma) %in% c(1L,2L)) if(!allow.zero) stopifnot(all(sigma > 0)) } if(varcov.given) stopifnot(is.matrix(varcov) && nrow(varcov) == 2 && ncol(varcov)==2 ) # reconcile ngiven <- varcov.given + sigma.given switch(ngiven+1L, { # default w <- x$window sigma <- (1/8) * shortside(as.rectangle(w)) }, { if(sigma.given && length(sigma) == 2) varcov <- diag(sigma^2) if(!is.null(varcov)) sigma <- NULL }, { stop(paste("Give only one of the arguments", sQuote("sigma"), "and", sQuote("varcov"))) }) # apply adjustments if(!is.null(sigma)) sigma <- adjust * sigma if(!is.null(varcov)) varcov <- (adjust^2) * varcov # sd <- if(is.null(varcov)) sigma else sqrt(sum(diag(varcov))) cutoff <- 8 * sd uhoh <- if(!is.null(mindist) && cutoff < mindist) "underflow" else NULL result <- list(sigma=sigma, varcov=varcov, cutoff=cutoff, warnings=uhoh) return(result) } densitycrossEngine <- function(Xdata, Xquery, sigma, ..., weights=NULL, edge=TRUE, varcov=NULL, diggle=FALSE, sorted=FALSE) { if(!is.null(varcov)) { detSigma <- det(varcov) Sinv <- solve(varcov) } if(length(weights) == 0 || (!is.null(dim(weights)) && nrow(weights) == 0)) weights <- NULL ## Leave-one-out computation ## cutoff: contributions from pairs of distinct points ## closer than 8 standard deviations sd <- if(is.null(varcov)) sigma else sqrt(sum(diag(varcov))) cutoff <- 8 * sd # validate weights if(is.null(weights)) { k <- 1L } else if(is.matrix(weights) || is.data.frame(weights)) { k <- ncol(weights) stopifnot(nrow(weights) == npoints(Xdata)) weights <- as.data.frame(weights) weightnames <- colnames(weights) } else { k <- 1L stopifnot(length(weights) == npoints(Xdata) || length(weights) == 1L) } # evaluate edge correction weights at points if(edge) { win <- Xdata$window if(diggle) { ## edge correction weights are attached to data points xedge <- Xdata } else { ## edge correction weights are applied at query points xedge <- Xquery if(!all(inside.owin(Xquery, , win))) stop(paste("Edge correction is not possible:", "some query points lie outside the data window"), call.=FALSE) } if(is.null(varcov) && win$type == "rectangle") { ## evaluate Gaussian probabilities directly xr <- win$xrange yr <- win$yrange xx <- xedge$x yy <- xedge$y xprob <- pnorm(xr[2L], mean=xx, sd=sigma) - pnorm(xr[1L], mean=xx, sd=sigma) yprob <- pnorm(yr[2L], mean=yy, sd=sigma) - pnorm(yr[1L], mean=yy, sd=sigma) edgeweight <- xprob * yprob } else { edg <- second.moment.calc(Xdata, sigma=sigma, what="edge", varcov=varcov) edgeweight <- safelookup(edg, xedge, warn=FALSE) } if(diggle) { ## Diggle edge correction ## edgeweight is attached to each data point if(is.null(weights)) { k <- 1L weights <- 1/edgeweight } else { weights <- weights/edgeweight } } } ndata <- npoints(Xdata) nquery <- npoints(Xquery) result <- if(k == 1L) numeric(nquery) else matrix(, nquery, k) ## coordinates xq <- Xquery$x yq <- Xquery$y xd <- Xdata$x yd <- Xdata$y if(!sorted) { ## sort into increasing order of x coordinate (required by C code) ooq <- fave.order(Xquery$x) xq <- xq[ooq] yq <- yq[ooq] ood <- fave.order(Xdata$x) xd <- xd[ood] yd <- yd[ood] } if(is.null(varcov)) { ## isotropic kernel if(is.null(weights)) { zz <- .C("crdenspt", nquery = as.integer(nquery), xq = as.double(xq), yq = as.double(yq), ndata = as.integer(ndata), xd = as.double(xd), yd = as.double(yd), rmaxi = as.double(cutoff), sig = as.double(sd), result = as.double(double(nquery)), PACKAGE = "spatstat") if(sorted) result <- zz$result else result[ooq] <- zz$result } else if(k == 1L) { wtsort <- if(sorted) weights else weights[ood] zz <- .C("wtcrdenspt", nquery = as.integer(nquery), xq = as.double(xq), yq = as.double(yq), ndata = as.integer(ndata), xd = as.double(xd), yd = as.double(yd), wd = as.double(wtsort), rmaxi = as.double(cutoff), sig = as.double(sd), result = as.double(double(nquery)), PACKAGE = "spatstat") if(sorted) result <- zz$result else result[ooq] <- zz$result } else { ## matrix of weights wtsort <- if(sorted) weights else weights[ood, ] for(j in 1:k) { zz <- .C("wtcrdenspt", nquery = as.integer(nquery), xq = as.double(xq), yq = as.double(yq), ndata = as.integer(ndata), xd = as.double(xd), yd = as.double(yd), wd = as.double(wtsort[,j]), rmaxi = as.double(cutoff), sig = as.double(sd), result = as.double(double(nquery)), PACKAGE = "spatstat") if(sorted) result[,j] <- zz$result else result[ooq,j] <- zz$result } colnames(result) <- weightnames } } else { ## anisotropic kernel flatSinv <- as.vector(t(Sinv)) if(is.null(weights)) { zz <- .C("acrdenspt", nquery = as.integer(nquery), xq = as.double(xq), yq = as.double(yq), ndata = as.integer(ndata), xd = as.double(xd), yd = as.double(yd), rmaxi = as.double(cutoff), detsigma = as.double(detSigma), sinv = as.double(flatSinv), result = as.double(double(nquery)), PACKAGE = "spatstat") if(sorted) result <- zz$result else result[ooq] <- zz$result } else if(k == 1L) { ## vector of weights wtsort <- if(sorted) weights else weights[ood] zz <- .C("awtcrdenspt", nquery = as.integer(nquery), xq = as.double(xq), yq = as.double(yq), ndata = as.integer(ndata), xd = as.double(xd), yd = as.double(yd), wd = as.double(wtsort), rmaxi = as.double(cutoff), detsigma = as.double(detSigma), sinv = as.double(flatSinv), result = as.double(double(nquery)), PACKAGE = "spatstat") if(sorted) result <- zz$result else result[ooq] <- zz$result } else { ## matrix of weights wtsort <- if(sorted) weights else weights[ood, ] for(j in 1:k) { zz <- .C("awtcrdenspt", nquery = as.integer(nquery), xq = as.double(xq), yq = as.double(yq), ndata = as.integer(ndata), xd = as.double(xd), yd = as.double(yd), wd = as.double(wtsort[,j]), rmaxi = as.double(cutoff), detsigma = as.double(detSigma), sinv = as.double(flatSinv), result = as.double(double(nquery)), PACKAGE = "spatstat") if(sorted) result[,j] <- zz$result else result[ooq,j] <- zz$result } colnames(result) <- weightnames } } # ........ Edge correction ........................................ if(edge && !diggle) result <- result/edgeweight # tack on bandwidth attr(result, "sigma") <- sigma attr(result, "varcov") <- varcov # return(result) } spatstat/R/pcfmulti.R0000644000176200001440000001641513115225157014275 0ustar liggesusers# # pcfmulti.R # # $Revision: 1.8 $ $Date: 2016/09/21 07:28:58 $ # # multitype pair correlation functions # pcfcross <- function(X, i, j, ..., r=NULL, kernel="epanechnikov", bw=NULL, stoyan=0.15, correction = c("isotropic", "Ripley", "translate"), divisor=c("r","d")) { verifyclass(X, "ppp") stopifnot(is.multitype(X)) if(missing(correction)) correction <- NULL divisor <- match.arg(divisor) ## marx <- marks(X) if(missing(i)) i <- levels(marx)[1] if(missing(j)) j <- levels(marx)[2] I <- (marx == i) J <- (marx == j) Iname <- paste("points with mark i =", i) Jname <- paste("points with mark j =", j) ## result <- pcfmulti(X, I, J, ..., r=r, kernel=kernel, bw=bw, stoyan=stoyan, correction=correction, divisor=divisor, Iname=Iname, Jname=Jname) ## iname <- make.parseable(paste(i)) jname <- make.parseable(paste(j)) result <- rebadge.fv(result, substitute(g[i,j](r), list(i=iname,j=jname)), c("g", paste0("list", paren(paste(iname, jname, sep=",")))), new.yexp=substitute(g[list(i,j)](r), list(i=iname,j=jname))) return(result) } pcfdot <- function(X, i, ..., r=NULL, kernel="epanechnikov", bw=NULL, stoyan=0.15, correction = c("isotropic", "Ripley", "translate"), divisor=c("r", "d")) { verifyclass(X, "ppp") stopifnot(is.multitype(X)) if(missing(correction)) correction <- NULL divisor <- match.arg(divisor) marx <- marks(X) if(missing(i)) i <- levels(marx)[1] I <- (marx == i) J <- rep.int(TRUE, X$n) # i.e. all points Iname <- paste("points with mark i =", i) Jname <- "points" result <- pcfmulti(X, I, J, ..., r=r, kernel=kernel, bw=bw, stoyan=stoyan, correction=correction, divisor=divisor, Iname=Iname, Jname=Jname) iname <- make.parseable(paste(i)) result <- rebadge.fv(result, substitute(g[i ~ dot](r), list(i=iname)), c("g", paste0(iname, "~symbol(\"\\267\")")), new.yexp=substitute(g[i ~ symbol("\267")](r), list(i=iname))) return(result) } pcfmulti <- function(X, I, J, ..., r=NULL, kernel="epanechnikov", bw=NULL, stoyan=0.15, correction=c("translate", "Ripley"), divisor=c("r","d"), Iname="points satisfying condition I", Jname="points satisfying condition J") { verifyclass(X, "ppp") # r.override <- !is.null(r) divisor <- match.arg(divisor) win <- X$window areaW <- area(win) npts <- npoints(X) correction.given <- !missing(correction) && !is.null(correction) if(is.null(correction)) correction <- c("translate", "Ripley") correction <- pickoption("correction", correction, c(isotropic="isotropic", Ripley="isotropic", trans="translate", translate="translate", translation="translate", best="best"), multi=TRUE) correction <- implemented.for.K(correction, win$type, correction.given) ## .......... indices I and J ............................. I <- ppsubset(X, I) J <- ppsubset(X, J) if(is.null(I) || is.null(J)) stop("I and J must be valid subset indices") nI <- sum(I) nJ <- sum(J) if(nI == 0) stop(paste("There are no", Iname)) if(nJ == 0) stop(paste("There are no", Jname)) XI <- X[I] XJ <- X[J] # lambdaI <- nI/areaW lambdaJ <- nJ/areaW nIJ <- sum(I & J) lambdaIJarea <- (nI * nJ - nIJ)/areaW ## ........... kernel bandwidth and support ......................... if(is.null(bw) && kernel=="epanechnikov") { # Stoyan & Stoyan 1995, eq (15.16), page 285 h <- stoyan /sqrt(lambdaJ) hmax <- h # conversion to standard deviation bw <- h/sqrt(5) } else if(is.numeric(bw)) { # standard deviation of kernel specified # upper bound on half-width hmax <- 3 * bw } else { # data-dependent bandwidth selection: guess upper bound on half-width hmax <- 2 * stoyan /sqrt(lambdaJ) } ########## r values ############################ # handle argument r rmaxdefault <- rmax.rule("K", win, lambdaJ) breaks <- handle.r.b.args(r, NULL, win, rmaxdefault=rmaxdefault) if(!(breaks$even)) stop("r values must be evenly spaced") # extract r values r <- breaks$r rmax <- breaks$max # recommended range of r values for plotting alim <- c(0, min(rmax, rmaxdefault)) # initialise fv object df <- data.frame(r=r, theo=rep.int(1,length(r))) fname <- c("g", "list(I,J)") yexp <- quote(g[list(I,J)](r)) out <- fv(df, "r", quote(g[I,J](r)), "theo", , alim, c("r", makefvlabel(NULL, NULL, fname, "Pois")), c("distance argument r", "theoretical Poisson %s"), fname=fname, yexp=yexp) ########## smoothing parameters for pcf ############################ # arguments for 'density' denargs <- resolve.defaults(list(kernel=kernel, bw=bw), list(...), list(n=length(r), from=0, to=rmax)) ################################################# ## compute pairwise distances ## identify close pairs of points what <- if(any(correction == "translate")) "all" else "ijd" close <- crosspairs(XI, XJ, rmax+hmax, what=what) ## map (i,j) to original serial numbers in X orig <- seq_len(npts) imap <- orig[I] jmap <- orig[J] iX <- imap[close$i] jX <- jmap[close$j] ## eliminate any identical pairs if(nIJ > 0) { ok <- (iX != jX) if(!all(ok)) close <- as.list(as.data.frame(close)[ok, , drop=FALSE]) } ## extract information for these pairs (relative to orderings of XI, XJ) dclose <- close$d icloseI <- close$i # jcloseJ <- close$j ###### compute ####### if(any(correction=="translate")) { # translation correction edgewt <- edge.Trans(dx=close$dx, dy=close$dy, W=win, paired=TRUE) gT <- sewpcf(dclose, edgewt, denargs, lambdaIJarea, divisor)$g out <- bind.fv(out, data.frame(trans=gT), makefvlabel(NULL, "hat", fname, "Trans"), "translation-corrected estimate of %s", "trans") } if(any(correction=="isotropic")) { # Ripley isotropic correction edgewt <- edge.Ripley(XI[icloseI], matrix(dclose, ncol=1)) gR <- sewpcf(dclose, edgewt, denargs, lambdaIJarea, divisor)$g out <- bind.fv(out, data.frame(iso=gR), makefvlabel(NULL, "hat", fname, "Ripley"), "isotropic-corrected estimate of %s", "iso") } ## sanity check if(is.null(out)) { warning("Nothing computed - no edge corrections chosen") return(NULL) } # which corrections have been computed? corrxns <- rev(setdiff(names(out), "r")) # default is to display them all formula(out) <- . ~ r fvnames(out, ".") <- corrxns # unitname(out) <- unitname(X) return(out) } spatstat/R/lppm.R0000755000176200001440000002326313155562665013440 0ustar liggesusers# # lppm.R # # Point process models on a linear network # # $Revision: 1.41 $ $Date: 2017/09/11 19:35:22 $ # lppm <- function(X, ...) { UseMethod("lppm") } lppm.formula <- function(X, interaction=NULL, ..., data=NULL) { ## remember call callstring <- paste(short.deparse(sys.call()), collapse = "") cl <- match.call() ########### INTERPRET FORMULA ############################## if(!inherits(X, "formula")) stop(paste("Argument 'X' should be a formula")) formula <- X if(spatstat.options("expand.polynom")) formula <- expand.polynom(formula) ## check formula has LHS and RHS. Extract them if(length(formula) < 3) stop(paste("Formula must have a left hand side")) Yexpr <- formula[[2L]] trend <- formula[c(1L,3L)] ## FIT ####################################### thecall <- call("lppm", X=Yexpr, trend=trend, data=data, interaction=interaction) ncall <- length(thecall) argh <- list(...) nargh <- length(argh) if(nargh > 0) { thecall[ncall + 1:nargh] <- argh names(thecall)[ncall + 1:nargh] <- names(argh) } result <- eval(thecall, parent.frame()) result$call <- cl result$callstring <- callstring return(result) } lppm.lpp <- function(X, ..., eps=NULL, nd=1000, random=FALSE) { Xname <- short.deparse(substitute(X)) callstring <- paste(short.deparse(sys.call()), collapse = "") cl <- match.call() nama <- names(list(...)) resv <- c("method", "forcefit") if(any(clash <- resv %in% nama)) warning(paste(ngettext(sum(clash), "Argument", "Arguments"), commasep(sQuote(resv[clash])), "must not be used")) stopifnot(inherits(X, "lpp")) Q <- linequad(X, eps=eps, nd=nd, random=random) fit <- ppm(Q, ..., method="mpl", forcefit=TRUE) if(!is.poisson.ppm(fit)) warning("Non-Poisson models currently use Euclidean distance") out <- list(X=X, fit=fit, Xname=Xname, call=cl, callstring=callstring) class(out) <- "lppm" return(out) } is.lppm <- function(x) { inherits(x, "lppm") } # undocumented as.ppm.lppm <- function(object) { object$fit } fitted.lppm <- function(object, ..., dataonly=FALSE, new.coef=NULL, leaveoneout=FALSE) { pfit <- object$fit v <- fitted(pfit, dataonly=dataonly, new.coef=new.coef, leaveoneout=leaveoneout) return(v) } predict.lppm <- function(object, ..., type="trend", locations=NULL, new.coef=NULL) { type <- pickoption("type", type, c(trend="trend", cif="cif", lambda="cif")) X <- object$X fit <- object$fit L <- as.linnet(X) if(!is.null(locations)) { #' locations given; return a vector of predicted values if(is.lpp(locations)) locations <- as.ppp(locations) values <- predict(fit, locations=locations, type=type, new.coef=new.coef) return(values) } # locations not given; want a pixel image # pixellate the lines Llines <- as.psp(L) linemask <- as.mask.psp(Llines, ...) lineimage <- as.im(linemask) # extract pixel centres xx <- rasterx.mask(linemask) yy <- rastery.mask(linemask) mm <- linemask$m xx <- as.vector(xx[mm]) yy <- as.vector(yy[mm]) pixelcentres <- ppp(xx, yy, window=as.rectangle(linemask), check=FALSE) pixdf <- data.frame(xc=xx, yc=yy) # project pixel centres onto lines p2s <- project2segment(pixelcentres, Llines) projloc <- as.data.frame(p2s$Xproj) projmap <- as.data.frame(p2s[c("mapXY", "tp")]) projdata <- cbind(pixdf, projloc, projmap) # predict at the projected points if(!is.multitype(fit)) { values <- predict(fit, locations=projloc, type=type, new.coef=new.coef) # map to nearest pixels Z <- lineimage Z[pixelcentres] <- values # attach exact line position data df <- cbind(projdata, values) out <- linim(L, Z, df=df, restrict=FALSE) } else { # predict for each type lev <- levels(marks(data.ppm(fit))) out <- list() for(k in seq(length(lev))) { markk <- factor(lev[k], levels=lev) locnk <- cbind(projloc, data.frame(marks=markk)) values <- predict(fit, locations=locnk, type=type, new.coef=new.coef) Z <- lineimage Z[pixelcentres] <- values df <- cbind(projdata, values) out[[k]] <- linim(L, Z, df=df, restrict=FALSE) } out <- as.solist(out) names(out) <- as.character(lev) } return(out) } coef.lppm <- function(object, ...) { coef(object$fit) } print.lppm <- function(x, ...) { splat("Point process model on linear network") print(x$fit) terselevel <- spatstat.options('terse') if(waxlyrical('extras', terselevel)) splat("Original data:", x$Xname) if(waxlyrical('gory', terselevel)) print(as.linnet(x)) return(invisible(NULL)) } summary.lppm <- function(object, ...) { splat("Point process model on linear network") print(summary(object$fit)) terselevel <- spatstat.options('terse') if(waxlyrical('extras', terselevel)) splat("Original data:", object$Xname) if(waxlyrical('gory', terselevel)) print(summary(as.linnet(object))) return(invisible(NULL)) } plot.lppm <- function(x, ..., type="trend") { xname <- short.deparse(substitute(x)) y <- predict(x, type=type) do.call(plot, resolve.defaults(list(y), list(...), list(main=xname))) } anova.lppm <- function(object, ..., test=NULL) { stuff <- list(object=object, ...) if(!is.na(hit <- match("override", names(stuff)))) { warning("Argument 'override' is outdated and was ignored") stuff <- stuff[-hit] } #' extract ppm objects where appropriate mod <- sapply(stuff, is.lppm) stuff[mod] <- lapply(stuff[mod], getElement, name="fit") #' analysis of deviance or adjusted composite deviance do.call(anova.ppm, append(stuff, list(test=test))) } update.lppm <- function(object, ...) { stopifnot(inherits(object, "lppm")) X <- object$X fit <- object$fit Xname <- object$Xname callframe <- environment(formula(fit)) aargh <- list(...) islpp <- sapply(aargh, is.lpp) if(any(islpp)) { # trap point pattern argument & convert to quadscheme ii <- which(islpp) if((npp <- length(ii)) > 1) stop(paste("Arguments not understood:", npp, "lpp objects given")) X <- aargh[[ii]] aargh[[ii]] <- linequad(X) } isfmla <- sapply(aargh, inherits, what="formula") if(any(isfmla)) { # trap formula pattern argument, update it, evaluate LHS if required jj <- which(isfmla) if((nf <- length(jj)) > 1) stop(paste("Arguments not understood:", nf, "formulae given")) fmla <- aargh[[jj]] fmla <- update(formula(object), fmla) if(!is.null(lhs <- lhs.of.formula(fmla))) { X <- eval(lhs, envir=list2env(list("."=X), parent=callframe)) Qpos <- if(any(islpp)) ii else (length(aargh) + 1L) aargh[[Qpos]] <- linequad(X) } aargh[[jj]] <- rhs.of.formula(fmla) } newfit <- do.call(update.ppm, append(list(fit), aargh), envir=callframe) if(!is.poisson.ppm(newfit)) warning("Non-Poisson models currently use Euclidean distance") out <- list(X=X, fit=newfit, Xname=Xname) class(out) <- "lppm" return(out) } terms.lppm <- function(x, ...) { terms(x$fit, ...) } logLik.lppm <- function(object, ...) { logLik(object$fit, ...) } deviance.lppm <- function(object, ...) { as.numeric(-2 * logLik(object, ...)) } pseudoR2.lppm <- function(object, ...) { dres <- deviance(object, ..., warn=FALSE) nullmod <- update(object, . ~ 1) dnul <- deviance(nullmod, warn=FALSE) return(1 - dres/dnul) } formula.lppm <- function(x, ...) { formula(x$fit, ...) } extractAIC.lppm <- function(fit, ...) { extractAIC(fit$fit, ...) } as.owin.lppm <- function(W, ..., fatal=TRUE) { stopifnot(inherits(W, "lppm")) as.owin(as.linnet(W), ..., fatal=fatal) } Window.lppm <- function(X, ...) { as.owin(X) } model.images.lppm <- local({ model.images.lppm <- function(object, L=as.linnet(object), ...) { stopifnot(inherits(object, "lppm")) stopifnot(inherits(L, "linnet")) m <- model.images(object$fit, W=as.rectangle(L), ...) if(length(m) > 0) { ## restrict images to L rasta <- as.mask(m[[1L]]) DL <- as.mask.psp(as.psp(L), xy=rasta) ZL <- as.im(DL) if(!is.hyperframe) { ## list of images: convert to list of linims m <- tolinims(m, L=L, imL=ZL) } else { ## hyperframe, each column being a list of images mm <- lapply(as.list(m), tolinims, L=L, imL=ZL) m <- do.call(hyperframe, mm) } } return(m) } tolinim <- function(x, L, imL) linim(L, eval.im(x * imL), restrict=FALSE) tolinims <- function(x, L, imL) solapply(x, tolinim, L=L, imL=imL) model.images.lppm }) model.matrix.lppm <- function(object, data=model.frame(object, na.action=NULL), ..., keepNA=TRUE) { stopifnot(is.lppm(object)) if(missing(data)) data <- NULL model.matrix(object$fit, data=data, ..., keepNA=keepNA) } model.frame.lppm <- function(formula, ...) { stopifnot(inherits(formula, "lppm")) model.frame(formula$fit, ...) } domain.lppm <- as.linnet.lppm <- function(X, ...) { as.linnet(X$X, ...) } nobs.lppm <- function(object, ...) { npoints(object$X) } is.poisson.lppm <- function(x) { is.poisson(x$fit) } is.stationary.lppm <- function(x) { is.stationary(x$fit) } is.multitype.lppm <- function(X, ...) { is.multitype(X$fit) } is.marked.lppm <- function(X, ...) { is.marked(X$fit) } vcov.lppm <- function(object, ...) { if(!is.poisson(object)) stop("vcov.lppm is only implemented for Poisson models") vcov(object$fit, ...) } valid.lppm <- function(object, ...) { valid(object$fit, ...) } emend.lppm <- function(object, ...) { object$fit <- emend(object$fit, ...) return(object) } spatstat/R/rat.R0000644000176200001440000001260213131624754013236 0ustar liggesusers# # rat.R # # Ratio objects # # Numerator and denominator are stored as attributes # # $Revision: 1.11 $ $Date: 2017/07/13 08:02:16 $ # rat <- function(ratio, numerator, denominator, check=TRUE) { if(check) { stopifnot(compatible(numerator, denominator)) stopifnot(compatible(ratio, denominator)) } attr(ratio, "numerator") <- numerator attr(ratio, "denominator") <- denominator class(ratio) <- c("rat", class(ratio)) return(ratio) } print.rat <- function(x, ...) { NextMethod("print") cat("[Contains ratio information]\n") return(invisible(NULL)) } compatible.rat <- function(A, B, ...) { NextMethod("compatible") } pool.rat <- local({ Add <- function(A,B){ force(A); force(B); eval.fv(A+B, relabel=FALSE) } Square <- function(A) { force(A); eval.fv(A^2, relabel=FALSE) } Mul <- function(A,B){ force(A); force(B); eval.fv(A*B, relabel=FALSE) } pool.rat <- function(..., weights=NULL, relabel=TRUE, variance=TRUE) { argh <- list(...) n <- narg <- length(argh) if(narg == 0) return(NULL) if(narg == 1) return(argh[[1]]) ## israt <- unlist(lapply(argh, inherits, what="rat")) if(any(bad <- !israt)) { nbad <- sum(bad) stop(paste(ngettext(nbad, "Argument", "Arguments"), commasep(which(bad)), ngettext(nbad, "does not", "do not"), "contain ratio (numerator/denominator) information")) } isfv <- unlist(lapply(argh, is.fv)) if(!all(isfv)) stop("All arguments must be fv objects") ## extract template <- vanilla.fv(argh[[1]]) Y <- lapply(argh, attr, which="numerator") X <- lapply(argh, attr, which="denominator") X <- do.call(harmonise, X) Y <- do.call(harmonise, Y) templateX <- vanilla.fv(X[[1]]) templateY <- vanilla.fv(Y[[1]]) ## compute products if(!is.null(weights)) { check.nvector(weights, narg, things="Functions") X <- Map(Mul, X, weights) Y <- Map(Mul, Y, weights) } ## sum sumX <- Reduce(Add, X) sumY <- Reduce(Add, Y) attributes(sumX) <- attributes(templateX) attributes(sumY) <- attributes(templateY) ## ratio-of-sums Ratio <- eval.fv(sumY/sumX, relabel=FALSE) attributes(Ratio) <- attributes(template) ## variance calculation if(variance) { meanX <- eval.fv(sumX/n, relabel=FALSE) meanY <- eval.fv(sumY/n, relabel=FALSE) sumX2 <- Reduce(Add, lapply(X, Square)) sumY2 <- Reduce(Add, lapply(Y, Square)) varX <- eval.fv((sumX2 - n * meanX^2)/(n-1), relabel=FALSE) varY <- eval.fv((sumY2 - n * meanY^2)/(n-1), relabel=FALSE) XY <- Map(Mul, X, Y) sumXY <- Reduce(Add, XY) covXY <- eval.fv((sumXY - n * meanX * meanY)/(n-1), relabel=FALSE) ## variance by delta method relvar <- eval.fv(pmax.int(0, varY/meanY^2 + varX/meanX^2 - 2 * covXY/(meanX * meanY)), relabel=FALSE) Variance <- eval.fv(Ratio^2 * relvar/n, relabel=FALSE) attributes(Variance) <- attributes(template) ## two sigma CI hiCI <- eval.fv(Ratio + 2 * sqrt(Variance), relabel=FALSE) loCI <- eval.fv(Ratio - 2 * sqrt(Variance), relabel=FALSE) attributes(hiCI) <- attributes(loCI) <- attributes(template) } ## dress up if(relabel) { Ratio <- prefixfv(Ratio, tagprefix="pool", descprefix="pooled ", lablprefix="") if(variance) { Variance <- prefixfv(Variance, tagprefix="var", descprefix="delta-method variance estimate of ", lablprefix="bold(var)~") hiCI <- prefixfv(hiCI, tagprefix="hi", descprefix="upper limit of two-sigma CI based on ", lablprefix="bold(hi)~") loCI <- prefixfv(loCI, tagprefix="lo", descprefix="lower limit of two-sigma CI based on ", lablprefix="bold(lo)~") } } result <- if(!variance) Ratio else Reduce(bind.fv, list(Ratio, Variance, hiCI, loCI)) return(result) } pool.rat }) adjust.ratfv <- function(f, columns=fvnames(f, "*"), numfactor=1, denfactor=1) { stopifnot(is.fv(f)) f[,columns] <- (numfactor/denfactor) * as.data.frame(f)[,columns] if(numfactor != 1 && !is.null(num <- attr(f, "numerator"))) { num[,columns] <- numfactor * as.data.frame(num)[,columns] attr(f, "numerator") <- num } if(denfactor != 1 && !is.null(den <- attr(f, "denominator"))) { den[,columns] <- denfactor * as.data.frame(den)[,columns] attr(f, "denominator") <- den } return(f) } tweak.ratfv.entry <- function(x, ...) { # apply same tweak to function, numerator and denominator. x <- tweak.fv.entry(x, ...) if(!is.null(num <- attr(x, "numerator"))) attr(x, "numerator") <- tweak.fv.entry(num, ...) if(!is.null(den <- attr(x, "denominator"))) attr(x, "denominator") <- tweak.fv.entry(den, ...) return(x) } "[.rat" <- function(x, ...) { if(!is.fv(x)) stop("Not yet implemented for non-fv ratios") num <- attr(x, "numerator") den <- attr(x, "denominator") class(x) <- "fv" x <- x[...] den <- den[...] num <- num[...] attr(x, "numerator") <- num attr(x, "denominator") <- den class(x) <- c("rat", class(x)) return(x) } spatstat/R/Iest.R0000755000176200001440000000506613115271075013361 0ustar liggesusers# Iest.R # # I function # # $Revision: 1.15 $ $Date: 2016/04/25 02:34:40 $ # # # Iest <- local({ Iest <- function(X, ..., eps=NULL, r = NULL, breaks = NULL, correction=NULL) { X <- as.ppp(X) if(!is.multitype(X)) stop("Only applicable to multitype point patterns") marx <- marks(X, dfok=FALSE) ntypes <- length(levels(marx)) Y <- unmark(split(X)) ## relative proportions ni <- sapply(Y, npoints) fi <- ni/sum(ni) ## J function of pattern regardless of type Jdotdot <- Jest(unmark(X), correction=correction, r=r, eps=eps, breaks=breaks) rvals <- Jdotdot$r ## J function of subpattern of each type i Jii <- lapply(Y, Jest, r=rvals, correction=correction) nrvals <- lengths(lapply(Jii, getElement, name="r")) if(length(unique(nrvals)) != 1 || nrvals[1] != length(rvals)) stop("Internal error: J function objects have different lengths") ## initialise fv object alim <- attr(Jdotdot, "alim") Z <- fv(data.frame(r=rvals, theo=0), "r", substitute(I(r), NULL), "theo", . ~ r, alim, c("r", "%s[pois](r)"), c("distance argument r", "theoretical Poisson %s"), fname="I") ## Estimates of each type namii <- unlist(lapply(Jii, names)) namdd <- names(Jdotdot) bothnames <- namii[namii %in% namdd] if("un" %in% bothnames) { Jun <- matrix(extract(Jii, "un"), nrow=ntypes, byrow=TRUE) Iun <- apply(fi * Jun, 2, sum) - Jdotdot$un Z <- bind.fv(Z, data.frame(un=Iun), "hat(%s)[un](r)", "uncorrected estimate of %s", "un") } if("rs" %in% bothnames) { Jrs <- matrix(extract(Jii, "rs"), nrow=ntypes, byrow=TRUE) Irs <- apply(fi * Jrs, 2, sum) - Jdotdot$rs Z <- bind.fv(Z, data.frame(rs=Irs), "hat(%s)[rs](r)", "border corrected estimate of %s", "rs") } if("han" %in% bothnames) { Jhan <- matrix(extract(Jii, "han"), nrow=ntypes, byrow=TRUE) Ihan <- apply(fi * Jhan, 2, sum) - Jdotdot$han Z <- bind.fv(Z, data.frame(han=Ihan), "hat(%s)[han](r)", "Hanisch-style estimate of %s", "han") } if("km" %in% bothnames) { Jkm <- matrix(extract(Jii, "km"), nrow=ntypes, byrow=TRUE) Ikm <- apply(fi * Jkm, 2, sum) - Jdotdot$km Z <- bind.fv(Z, data.frame(km=Ikm), "hat(%s)[km](r)", "Kaplan-Meier estimate of %s", "km") } unitname(Z) <- unitname(X) return(Z) } extract <- function(Zlist, what) sapply(Zlist, "[[", i=what) Iest }) spatstat/R/rmh.ppm.R0000755000176200001440000001212013115271120014012 0ustar liggesusers# # simulation of FITTED model # # $Revision: 1.34 $ $Date: 2016/07/31 07:39:32 $ # # rmh.ppm <- function(model, start = NULL, control = default.rmhcontrol(model, w=w), ..., w = NULL, project=TRUE, nsim=1, drop=TRUE, saveinfo=TRUE, verbose=TRUE, new.coef=NULL) { verifyclass(model, "ppm") argh <- list(...) if(is.null(control)) { control <- default.rmhcontrol(model, w=w) } else { control <- rmhcontrol(control) } # override if(length(list(...)) > 0) control <- update(control, ...) # convert fitted model object to list of parameters for rmh.default X <- rmhmodel(model, w=w, verbose=verbose, project=project, control=control, new.coef=new.coef) # set initial state if(is.null(start)) { datapattern <- data.ppm(model) start <- rmhstart(n.start=datapattern$n) } # call rmh.default # passing only arguments unrecognised by rmhcontrol known <- names(argh) %in% names(formals(rmhcontrol.default)) fargs <- argh[!known] Y <- do.call(rmh.default, append(list(model=X, start=start, control=control, nsim=nsim, drop=drop, saveinfo=saveinfo, verbose=verbose), fargs)) return(Y) } simulate.ppm <- function(object, nsim=1, ..., singlerun=FALSE, start = NULL, control = default.rmhcontrol(object, w=w), w = NULL, project=TRUE, new.coef=NULL, verbose=FALSE, progress=(nsim > 1), drop=FALSE) { verifyclass(object, "ppm") argh <- list(...) if(nsim == 0) return(list()) starttime = proc.time() # set up control parameters if(missing(control) || is.null(control)) { rcontr <- default.rmhcontrol(object, w=w) } else { rcontr <- rmhcontrol(control) } if(singlerun) { # allow nsave, nburn to determine nrep nsave <- resolve.1.default("nsave", list(...), as.list(rcontr), .MatchNull=FALSE) nburn <- resolve.1.default("nburn", list(...), as.list(rcontr), list(nburn=nsave), .MatchNull=FALSE) if(!is.null(nsave)) { nrep <- nburn + (nsim-1) * nsave rcontr <- update(rcontr, nrep=nrep, nsave=nsave, nburn=nburn) } } # other overrides if(length(list(...)) > 0) rcontr <- update(rcontr, ...) # Set up model parameters for rmh rmodel <- rmhmodel(object, w=w, verbose=FALSE, project=TRUE, control=rcontr, new.coef=new.coef) if(is.null(start)) { datapattern <- data.ppm(object) start <- rmhstart(n.start=datapattern$n) } rstart <- rmhstart(start) ######### if(singlerun && nsim > 1) { # ////////////////////////////////////////////////// # execute one long run and save every k-th iteration if(is.null(rcontr$nsave)) { # determine spacing between subsamples if(!is.null(rcontr$nburn)) { nsave <- max(1, with(rcontr, floor((nrep - nburn)/(nsim-1)))) } else { # assume nburn = 2 * nsave nsave <- max(1, with(rcontr, floor(nrep/(nsim+1)))) nburn <- 2 * nsave } rcontr <- update(rcontr, nsave=nsave, nburn=nburn) } # check nrep is enough nrepmin <- with(rcontr, nburn + (nsim-1) * nsave) if(rcontr$nrep < nrepmin) rcontr <- update(rcontr, nrep=nrepmin) # OK, run it if(progress) { cat(paste("Generating", nsim, "simulated patterns in a single run ... ")) flush.console() } Y <- rmh(rmodel, rstart, rcontr, verbose=verbose) if(progress) cat("Done.\n") # extract sampled states out <- attr(Y, "saved") if(length(out) != nsim) stop(paste("Internal error: wrong number of simulations generated:", length(out), "!=", nsim)) } else { # ////////////////////////////////////////////////// # execute 'nsim' independent runs out <- list() # pre-digest arguments rmhinfolist <- rmh(rmodel, rstart, rcontr, preponly=TRUE, verbose=verbose) # go if(nsim > 0) { if(progress) { cat(paste("Generating", nsim, "simulated", ngettext(nsim, "pattern", "patterns"), "...")) flush.console() } # call rmh # passing only arguments unrecognised by rmhcontrol known <- names(argh) %in% names(formals(rmhcontrol.default)) fargs <- argh[!known] rmhargs <- append(list(InfoList=rmhinfolist, verbose=verbose), fargs) if(progress) pstate <- list() for(i in 1:nsim) { out[[i]] <- do.call(rmhEngine, rmhargs) if(progress) pstate <- progressreport(i, nsim, state=pstate) } } } if(nsim == 1 && drop) { out <- out[[1]] } else { out <- as.solist(out) if(nsim > 0) names(out) <- paste("Simulation", 1:nsim) } out <- timed(out, starttime=starttime) return(out) } spatstat/R/detPPF-class.R0000644000176200001440000001756013115225157014701 0ustar liggesusers## support for class 'detpointprocfamily' print.detpointprocfamily <- function(x, ...){ splat(x$name, "determinantal point process model", ifelse(is.numeric(x$dim), paste("in dimension", x$dim), "")) #' Not used: #' parnames <- names(x$par) anyfixed <- length(x$fixedpar)>0 if(anyfixed){ fixedlambda <- NULL if(!is.null(x$intensity) && is.element(x$intensity, names(x$fixedpar))){ lambda <- signif(x$fixedpar[[x$intensity]], 4) x$fixedpar <- x$fixedpar[names(x$fixedpar)!=x$intensity] fixedlambda <- paste(x$intensity, ifelse(is.null(x$thin), paste("=", lambda), "= an image")) } if(length(x$fixedpar)>0){ fixedparstring <- paste(names(x$fixedpar), signif(unlist(x$fixed),4), sep = " = ", collapse = ", ") fixedparstring <- paste(fixedlambda, fixedparstring, sep=", ") } else{ fixedparstring <- fixedlambda } } ## Partially specified model: if(length(x$freepar)>0){ splat("The model is only partially specified.") splat("The following parameters are free (e.g. to be estimated by dppm):") cat(x$freepar, sep = ", ") cat("\n") if(anyfixed){ cat("The fixed parameters are: ") cat(fixedparstring, sep = ", ") } else{ splat("There are no fixed parameters.") } } else{ cat("The parameters are: ") cat(fixedparstring, sep = ", ") } cat("\n") if(!is.null(x$intensity)){ splat("The parameter", x$intensity, "specifies the intensity of the process.") } if(is.character(x$dim)){ splat("The parameter", x$dim, "specifies the dimension of the state space.") } invisible(NULL) } reach.detpointprocfamily <- function(x, ...){ model <- x fun <- model$range nam <- names(formals(fun)) do.call(model$range, c(model$fixedpar[is.element(names(model$fixedpar),nam)], list(...))) } dppparbounds <- function(model, name, ...){ if(inherits(model, "dppm")) model <- model$fitted if(!inherits(model, "detpointprocfamily")) stop("input model must be of class detpointprocfamily or dppm") fun <- model$parbounds nam <- names(formals(fun)) if(missing(name)) name <- nam[!is.element(nam, c("name", model$dim))] rslt <- matrix(0,length(name), 2, dimnames = list(name, c("lower", "upper"))) for(nn in name){ tmp <- try(do.call(fun, c(model$fixedpar[is.element(names(model$fixedpar),nam)], list(...), list(name=nn))), silent=TRUE) if(class(tmp)=="try-error"){ rslt[nn,] <- c(NA, NA) }else{ rslt[nn,] <- tmp } } rslt } valid.detpointprocfamily <- function(object, ...){ if(length(object$freepar)>0) return(NA) ## If there is no function for checking validity we always return TRUE: if(is.null(object$valid)) return(TRUE) do.call(object$valid, object$fixedpar) } dppspecdenrange <- function(model){ ## If there is no function for checking finite range of spectral density we always return Inf: fun <- model$specdenrange if(is.null(fun)) return(Inf) xx <- try(fun(model), silent = TRUE) ifelse(class(xx)=="try-error", Inf, xx) } dppspecden <- function(model){ fun <- model$specden if(is.null(fun)) stop("Spectral density unknown for this model!") if(length(model$freepar)>0) stop("Cannot extract the spectral density of a partially specified model. Please supply all parameters.") specden <- function(x, ...){ allargs <- c(list(x), model$fixedpar, list(...)) do.call(fun, allargs) } return(specden) } dppkernel <- function(model, ...){ if(inherits(model, "dppm")) model <- model$fitted fun <- model$kernel if(is.null(fun)) return(dppapproxkernel(model, ...)) if(length(model$freepar)>0) stop("Cannot extract the kernel of a partially specified model. Please supply all parameters.") firstarg <- names(formals(fun))[1L] kernel <- function(x){ allargs <- c(structure(list(x), .Names=firstarg), model$fixedpar) do.call(fun, allargs) } return(kernel) } dppapproxkernel <- function(model, trunc = .99, W = NULL){ if(inherits(model, "dppm")){ W <- model$window model <- model$fitted } ####### BACKDOOR TO SPHERICAL CASE ######## if(!is.null(spherefun <- model$approxkernelfun)){ spherefun <- get(spherefun) rslt <- spherefun(model, trunc) return(rslt) } ########################################### d <- dim(model) if(is.null(W)) W <- boxx(replicate(d, c(-.5,.5), simplify=FALSE)) W <- as.boxx(W) if(d!=ncol(W$ranges)) stop(paste("The dimension of the window:", ncol(W$ranges), "is inconsistent with the dimension of the model:", d)) Wscale <- as.numeric(W$ranges[2L,]-W$ranges[1L,]) tmp <- dppeigen(model, trunc, Wscale, stationary=FALSE) index <- tmp$index eig <- tmp$eig prec <- tmp$prec trunc <- tmp$trunc rm(tmp) f <- function(r){ x <- matrix(0, nrow=length(r), ncol=d) x[,1L] <- r basis <- fourierbasis(x, index, win = W) approx <- matrix(eig, nrow=length(eig), ncol=length(r)) * basis return(Re(colSums(approx))) } attr(f, "dpp") <- list(prec = prec, trunc = trunc) return(f) } pcfmodel.detpointprocfamily <- function(model, ...){ kernel <- dppkernel(model, ...) f <- function(x){ 1 - (kernel(x)/kernel(0))^2 } return(f) } dppapproxpcf <- function(model, trunc = .99, W = NULL){ kernel <- dppapproxkernel(model, trunc = trunc, W = W) f <- function(x){ 1 - (kernel(x)/kernel(0))^2 } attr(f, "dpp") <- attr(kernel, "dpp") return(f) } Kmodel.detpointprocfamily <- function(model, ...){ if(length(model$freepar)>0) stop("Cannot extract the K function of a partially specified model. Please supply all parameters.") fun <- model$Kfun if(!is.null(fun)){ firstarg <- names(formals(fun))[1L] Kfun <- function(r){ allargs <- c(structure(list(r), .Names=firstarg), model$fixedpar) do.call(fun, allargs) } } else{ pcf <- pcfmodel(model, ...) intfun <- function(xx){ 2*pi*xx*pcf(xx) } Kfun <- function(r){ r <- sort(r) if(r[1L]<0) stop("Negative values not allowed in K function!") r <- c(0,r) int <- unlist(lapply(2:length(r), function(i) integrate(intfun, r[i-1L], r[i], subdivisions=10)$value)) return(cumsum(int)) } } return(Kfun) } update.detpointprocfamily <- function(object, ...){ newpar <- list(...) if(length(newpar)==1L && is.list(newpar[[1L]]) && !is.im(newpar[[1L]])) newpar <- newpar[[1L]] nam <- names(newpar) if(length(newpar)>0&&is.null(nam)) stop(paste("Named arguments are required. Please supply parameter values in a", sQuote("tag=value"), "form")) oldpar <- object$fixedpar[!is.element(names(object$fixedpar), nam)] thin <- object$thin object <- do.call(object$caller, c(newpar,oldpar)) if(is.null(object$thin)) object$thin <- thin return(object) } is.stationary.detpointprocfamily <- function(x){ if(is.null(x$intensity)) return(FALSE) lambda <- getElement(x$fixedpar, x$intensity) if(!is.null(lambda)&&is.numeric(lambda)&&is.null(x$thin)) return(TRUE) return(FALSE) } intensity.detpointprocfamily <- function(X, ...){ lambda <- NULL if(!is.null(X$intensity)) lambda <- getElement(X$fixedpar, X$intensity) if(!is.null(lambda)){ if(!is.null(X$thin)) lambda <- lambda*X$thin return(lambda) } return(NA) } parameters.dppm <- parameters.detpointprocfamily <- function(model, ...){ if(inherits(model, "dppm")) model <- model$fitted c(model$fixed, structure(rep(NA,length(model$freepar)), .Names = model$freepar)) } dim.detpointprocfamily <- function(x){ if(is.numeric(d <- x$dim)){ return(d) } else{ return(getElement(x$fixedpar, d)) } } spatstat/R/is.subset.owin.R0000755000176200001440000000412513115271075015342 0ustar liggesusers# # is.subset.owin.R # # $Revision: 1.14 $ $Date: 2017/02/07 07:47:20 $ # # Determine whether a window is a subset of another window # # is.subset.owin() # is.subset.owin <- local({ is.subset.owin <- function(A, B) { A <- as.owin(A) B <- as.owin(B) if(identical(A, B)) return(TRUE) A <- rescue.rectangle(A) B <- rescue.rectangle(B) if(is.rectangle(B)) { # Some cases can be resolved using convexity of B # (1) A is also a rectangle if(is.rectangle(A)) { xx <- A$xrange[c(1L,2L,2L,1L)] yy <- A$yrange[c(1L,1L,2L,2L)] ok <- inside.owin(xx, yy, B) return(all(ok)) } # (2) A is polygonal # Then A is a subset of B iff, # for every constituent polygon of A with positive sign, # the vertices are all in B if(is.polygonal(A)) { ok <- unlist(lapply(A$bdry, okpolygon, B=B)) return(all(ok)) } # (3) Feeling lucky # Test whether the bounding box of A is a subset of B # Then a fortiori, A is a subset of B AA <- boundingbox(A) if(is.subset.owin(AA, B)) return(TRUE) } if(!is.mask(A) && !is.mask(B)) { # rectangles or polygonal domains if(!all(inside.owin(vertices(A), , B))) return(FALSE) # all vertices of A are inside B. if(is.convex(B)) return(TRUE) A <- as.polygonal(A) B <- as.polygonal(B) if(length(B$bdry) == 1 && length(A$bdry) == 1) { # two simply-connected sets # check for boundary crossings bx <- crossing.psp(edges(A), edges(B)) return(npoints(bx) == 0) } else { # compare area of intersection with area of A return(overlap.owin(A,B) >= area(A)) } } # Discretise a <- as.mask(A) b <- as.mask(B) rxy <- rasterxy.mask(a, drop=TRUE) xx <- rxy$x yy <- rxy$y ok <- inside.owin(xx, yy, b) return(all(ok)) } okpolygon <- function(a, B) { if(Area.xypolygon(a) < 0) return(TRUE) ok <- inside.owin(a$x, a$y, B) return(all(ok)) } is.subset.owin }) spatstat/R/scriptUtils.R0000644000176200001440000000273313115225157014775 0ustar liggesusers## scriptUtils.R ## $Revision: 1.4 $ $Date: 2014/02/07 06:58:43 $ ## slick way to use precomputed data ## If the named file exists, it is loaded, giving access to the data. ## Otherwise, 'expr' is evaluated, and all objects created ## are saved in the designated file, for loading next time. reload.or.compute <- function(filename, expr, objects=NULL, destination=parent.frame()) { stopifnot(is.character(filename) && length(filename) == 1) if(!file.exists(filename)) { ## evaluate 'expr' in a fresh environment ee <- as.expression(substitute(expr)) en <- new.env() local(eval(ee), envir=en) ## default is to save all objects that were created if(is.null(objects)) objects <- ls(envir=en) ## save them in the designated file evalq(save(list=objects, file=filename, compress=TRUE), envir=en) ## assign them into the parent frame for(i in seq_along(objects)) assign(objects[i], get(objects[i], envir=en), envir=destination) result <- objects } else { result <- load(filename, envir=destination) if(!all(ok <- (objects %in% result))) { nbad <- sum(!ok) warning(paste(ngettext(nbad, "object", "objects"), commasep(sQuote(objects[!ok])), ngettext(nbad, "was", "were"), "not present in data file", dQuote(filename)), call.=FALSE) } } return(invisible(result)) } spatstat/R/scanstat.R0000644000176200001440000002520713115271120014260 0ustar liggesusers## ## scanstat.R ## ## Spatial scan statistics ## ## $Revision: 1.17 $ $Date: 2017/06/05 10:31:58 $ ## scanmeasure <- function(X, ...){ UseMethod("scanmeasure") } scanmeasure.ppp <- function(X, r, ..., method=c("counts", "fft")) { method <- match.arg(method) check.1.real(r) ## enclosing window R <- as.rectangle(as.owin(X)) ## determine pixel resolution M <- as.mask(R, ...) ## expand domain to include centres of all circles intersecting R W <- grow.mask(M, r) ## switch(method, counts = { ## direct calculation using C code ## get new dimensions dimyx <- W$dim xr <- W$xrange yr <- W$yrange nr <- dimyx[1] nc <- dimyx[2] ## n <- npoints(X) zz <- .C("scantrans", x=as.double(X$x), y=as.double(X$y), n=as.integer(n), xmin=as.double(xr[1]), ymin=as.double(yr[1]), xmax=as.double(xr[2]), ymax=as.double(yr[2]), nr=as.integer(nr), nc=as.integer(nc), R=as.double(r), counts=as.integer(numeric(prod(dimyx))), PACKAGE = "spatstat") zzz <- matrix(zz$counts, nrow=dimyx[1], ncol=dimyx[2], byrow=TRUE) Z <- im(zzz, xrange=xr, yrange=yr, unitname=unitname(X)) }, fft = { ## Previous version of scanmeasure.ppp had ## Y <- pixellate(X, ..., padzero=TRUE) ## but this is liable to Gibbs phenomena. ## Instead, convolve with small Gaussian (sd = 1 pixel width) sigma <- with(W, unique(c(xstep, ystep))) Y <- density(X, ..., sigma=sigma) ## invoke scanmeasure.im Z <- scanmeasure(Y, r) Z <- eval.im(as.integer(round(Z))) }) return(Z) } scanmeasure.im <- function(X, r, ...) { D <- disc(radius=r) eps <- with(X, c(xstep,ystep)) if(any(eps >= 2 * r)) return(eval.im(X * pi * r^2)) D <- as.im(as.mask(D, eps=eps)) Z <- imcov(X, D) return(Z) } scanPoisLRTS <- function(nZ, nG, muZ, muG, alternative) { nZco <- nG - nZ muZco <- muG - muZ nlogn <- function(n, a) ifelse(n == 0, 0, n * log(n/a)) ll <- nlogn(nZ, muZ) + nlogn(nZco, muZco) - nlogn(nG, muG) criterion <- (nZ * muZco - muZ * nZco) switch(alternative, less={ ll[criterion > 0] <- 0 }, greater={ ll[criterion < 0] <- 0 }, two.sided={}) return(2 * ll) } scanBinomLRTS <- function(nZ, nG, muZ, muG, alternative) { nZco <- nG - nZ muZco <- muG - muZ nlogn <- function(n, a) ifelse(n == 0, 0, n * log(n/a)) logbin <- function(k, n) { nlogn(k, n) + nlogn(n-k, n) } ll <- logbin(nZ, muZ) + logbin(nZco, muZco) - logbin(nG, muG) criterion <- (nZ * muZco - muZ * nZco) switch(alternative, less={ ll[criterion > 0] <- 0 }, greater={ ll[criterion < 0] <- 0 }, two.sided={}) return(2 * ll) } scanLRTS <- function(X, r, ..., method=c("poisson", "binomial"), baseline=NULL, case=2, alternative=c("greater", "less", "two.sided"), saveopt = FALSE, Xmask=NULL) { stopifnot(is.ppp(X)) stopifnot(check.nvector(r)) method <- match.arg(method) alternative <- match.arg(alternative) if(is.null(Xmask)) Xmask <- as.mask(as.owin(X), ...) switch(method, poisson={ Y <- X if(is.null(baseline)) { mu <- as.im(Xmask, value=1) } else if(is.ppm(baseline)) { if(is.marked(baseline)) stop("baseline is a marked point process: not supported") mu <- predict(baseline, locations=Xmask) } else if(is.im(baseline) || is.function(baseline)) { mu <- as.im(baseline, W=Xmask) } else stop(paste("baseline should be", "a pixel image, a function, or a fitted model")) nG <- npoints(Y) }, binomial={ stopifnot(is.multitype(X)) lev <- levels(marks(X)) if(length(lev) != 2) warning("X should usually be a bivariate (2-type) point pattern") if(!is.null(baseline)) stop("baseline is not supported in the binomial case") if(is.character(case) && !(case %in% lev)) stop(paste("Unrecognised label for cases:", sQuote(case))) if(is.numeric(case) && !(case %in% seq_along(lev))) stop(paste("Undefined level:", case)) Y <- split(X)[[case]] nG <- npoints(Y) mu <- unmark(X) }) ## The following line ensures that the same pixel resolution information ## is passed to the two calls to 'scanmeasure' below Y$window <- Xmask ## nr <- length(r) lrts <- vector(mode="list", length=nr) for(i in 1:nr) { ri <- r[i] nZ <- scanmeasure(Y, ri) muZ <- scanmeasure(mu, ri) if(!compatible.im(nZ, muZ)) { ha <- harmonise.im(nZ, muZ) nZ <- ha[[1]] muZ <- ha[[2]] } switch(method, poisson = { muG <- integral.im(mu) lrts[[i]] <- eval.im(scanPoisLRTS(nZ, nG, muZ, muG, alternative)) }, binomial = { muG <- npoints(mu) lrts[[i]] <- eval.im(scanBinomLRTS(nZ, nG, muZ, muG, alternative)) }) } if(length(lrts) == 1) { result <- lrts[[1]] } else { result <- im.apply(lrts, max) if(saveopt) attr(result, "iopt") <- im.apply(lrts, which.max) } return(result) } scan.test <- function(X, r, ..., method=c("poisson", "binomial"), nsim = 19, baseline=NULL, case = 2, alternative=c("greater", "less", "two.sided"), verbose=TRUE) { dataname <- short.deparse(substitute(X)) stopifnot(is.ppp(X)) method <- match.arg(method) alternative <- match.arg(alternative) stopifnot(is.numeric(r)) check.1.real(nsim) if(!(round(nsim) == nsim && nsim > 1)) stop("nsim should be an integer > 1") regionname <- paste("circles of radius", if(length(r) == 1) r else paste("between", min(r), "and", max(r))) ## ## compute observed loglikelihood function ## This also validates the arguments. obsLRTS <- scanLRTS(X=X, r=r, method=method, alternative=alternative, baseline=baseline, case=case, ..., saveopt=TRUE) obs <- max(obsLRTS) sim <- numeric(nsim) ## determine how to simulate switch(method, binomial={ methodname <- c("Spatial scan test", "Null hypothesis: constant relative risk", paste("Candidate cluster regions:", regionname), "Likelihood: binomial", paste("Monte Carlo p-value based on", nsim, "simulations")) lev <- levels(marks(X)) names(lev) <- lev casename <- lev[case] counted <- paste("points with mark", sQuote(casename), "inside cluster region") simexpr <- expression(rlabel(X)) }, poisson={ counted <- paste("points inside cluster region") X <- unmark(X) Xwin <- as.owin(X) Xmask <- as.mask(Xwin, ...) if(is.null(baseline)) { nullname <- "Complete Spatial Randomness (CSR)" lambda <- intensity(X) simexpr <- expression(runifpoispp(lambda, Xwin)) dont.complain.about(lambda) } else if(is.ppm(baseline)) { nullname <- baseline$callstring rmhstuff <- rmh(baseline, preponly=TRUE, verbose=FALSE) simexpr <- expression(rmhEngine(rmhstuff)) dont.complain.about(rmhstuff) } else if(is.im(baseline) || is.function(baseline)) { nullname <- "Poisson process with intensity proportional to baseline" base <- as.im(baseline, W=Xmask) alpha <- npoints(X)/integral.im(base) lambda <- eval.im(alpha * base) simexpr <- expression(rpoispp(lambda)) dont.complain.about(lambda) } else stop(paste("baseline should be", "a pixel image, a function, or a fitted model")) methodname <- c("Spatial scan test", paste("Null hypothesis:", nullname), paste("Candidate cluster regions:", regionname), "Likelihood: Poisson", paste("Monte Carlo p-value based on", nsim, "simulations")) }) if(verbose) { cat("Simulating...") pstate <- list() } for(i in 1:nsim) { if(verbose) pstate <- progressreport(i, nsim, state=pstate) Xsim <- eval(simexpr) simLRTS <- scanLRTS(X=Xsim, r=r, method=method, alternative=alternative, baseline=baseline, case=case, ...) sim[i] <- max(simLRTS) } pval <- mean(c(sim,obs) >= obs, na.rm=TRUE) names(obs) <- "maxLRTS" nm.alternative <- switch(alternative, greater="Excess of", less="Deficit of", two.sided="Two-sided: excess or deficit of", stop("Unknown alternative")) nm.alternative <- paste(nm.alternative, counted) result <- list(statistic = obs, p.value = pval, alternative = nm.alternative, method = methodname, data.name = dataname) class(result) <- c("scan.test", "htest") attr(result, "obsLRTS") <- obsLRTS attr(result, "X") <- X attr(result, "r") <- r return(result) } plot.scan.test <- function(x, ..., what=c("statistic", "radius"), do.window=TRUE) { xname <- short.deparse(substitute(x)) what <- match.arg(what) Z <- as.im(x, what=what) do.call(plot, resolve.defaults(list(x=Z), list(...), list(main=xname))) if(do.window) { X <- attr(x, "X") plot(as.owin(X), add=TRUE, invert=TRUE) } invisible(NULL) } as.im.scan.test <- function(X, ..., what=c("statistic", "radius")) { Y <- attr(X, "obsLRTS") what <- match.arg(what) if(what == "radius") { iopt <- attr(Y, "iopt") r <- attr(X, "r") Y <- eval.im(r[iopt]) } return(as.im(Y, ...)) } spatstat/R/hierarchy.R0000644000176200001440000000257413115225157014431 0ustar liggesusers## hierarchy.R ## ## Support functions for hierarchical interactions ## ## $Revision: 1.1 $ $Date: 2015/05/26 08:39:56 $ hierarchicalordering <- function(i, s) { s <- as.character(s) if(inherits(i, "hierarchicalordering")) { ## already a hierarchical ordering if(length(s) != length(i$labels)) stop("Tried to change the number of types in the hierarchical order") i$labels <- s return(i) } n <- length(s) possible <- if(is.character(i)) s else seq_len(n) j <- match(i, possible) if(any(uhoh <- is.na(j))) stop(paste("Unrecognised", ngettext(sum(uhoh), "level", "levels"), commasep(sQuote(i[uhoh])), "amongst possible levels", commasep(sQuote(s)))) if(length(j) < n) stop("Ordering is incomplete") ord <- order(j) m <- matrix(, n, n) rel <- matrix(ord[row(m)] <= ord[col(m)], n, n) dimnames(rel) <- list(s, s) x <- list(indices=j, ordering=ord, labels=s, relation=rel) class(x) <- "hierarchicalordering" x } print.hierarchicalordering <- function(x, ...) { splat(x$labels[x$indices], collapse=" ~> ") invisible(NULL) } hiermat <- function (x, h) { stopifnot(is.matrix(x)) isna <- is.na(x) x[] <- as.character(x) x[isna] <- "" if(inherits(h, "hierarchicalordering")) ## allows h to be NULL, etc x[!(h$relation)] <- "" return(noquote(x)) } spatstat/R/studpermutest.R0000644000176200001440000005460113115225157015401 0ustar liggesusers#' #' studpermtest.R #' #' Original by Ute Hahn 2014 #' #' $Revision: 1.5 $ $Date: 2015/10/21 09:06:57 $ #' #' Studentized permutation test for comparison of grouped point patterns; #' functions to generate these grouped point patterns; #' wrapper for test of reweighted second order stationarity. #' #' studpermu.test #' studentized permutation test for grouped point patterns #' interpreted version, random permutations only. #' A group needs to contain at least two point patterns with at least minpoints each. # #' X the data, may be a list of lists of point patterns, or a hyperframe #' formula if X is a hyperframe, relates point patterns to factor variables that #' determine the groups. If missing, the first column of X that contains #' a factor variable is used. #' summaryfunction the function used in the test #' ... additional arguments for summaryfunction #' rinterval r-interval where summaryfunction is evaluated. If NULL, the #' interval is calculated from spatstat defaults #' (intersection for all patterns) #' nperm number of random permutations #' use.Tbar use the alternative test statistic, for summary functions with #' roughly constant variance, such as K/r or L #' minpoints the minimum number of points a pattern needs to have. Patterns #' with fewer points are not used. #' rsteps discretization steps of the r-interval #' r arguments at which to evaluate summaryfunction, overrides rinterval #' Should normally not be given, replace by rinterval instead, #' this allows r_0 > 0. Also, there is no plausibility check for r so far #' arguments.in.data if TRUE, individual extra arguments to summary function that #' change are taken from X (which has to be a hyperframe then). #' Assumes that the first argument of summaryfunction always is the #' point pattern. #' This is meant for internal purposes (automatisation) # #' returns an object of classes htest and studpermutest, that can be plotted. The #' plot shows the summary functions for the groups (and the means if requested) studpermu.test <- local({ studpermu.test <- function (X, formula, summaryfunction = Kest, ..., rinterval = NULL, nperm = 999, use.Tbar = FALSE, # the alternative statistic, use with K/r or L minpoints = 20, rsteps = 128, r = NULL, arguments.in.data = FALSE) { #' ---- the loooong preliminaries ------- #' ---- argument checking paranoia ---- if (arguments.in.data & !is.hyperframe(X)) stop(paste("X needs to be a hyperframe", "if arguments for summary function are to be retrieved"), call.=FALSE) stopifnot(is.function(summaryfunction)) #' there could be more... #' first prepare the data if(is.hyperframe(X)) { if(dim(X)[2] < 2) stop(paste("Hyperframe X needs to contain at least 2 columns,", "one for patterns, one indicating groups"), call.=FALSE) data <- X # renaming for later. Xclass <- unclass(X)$vclass factorcandidate <- Xclass %in% c("integer", "numeric", "character", "factor") ppcandidate <- Xclass == "ppp" names(factorcandidate) <- names(ppcandidate) <- names(Xclass) <- Xnames <- names(X) if(all(!factorcandidate) || all(!ppcandidate)) stop(paste("Hyperframe X needs to contain at least a column", "with point patterns, and one indicating groups"), call.=FALSE) if(!missing(formula)){ #' safety precautions ;-) if(!inherits(formula, "formula")) stop(paste("Argument", dQuote("formula"), "should be a formula")) if (length(formula) < 3) stop(paste("Argument", sQuote("formula"), "must have a left hand side")) rhs <- rhs.of.formula(formula) ppname <- formula[[2]] if (!is.name(ppname)) stop("Left hand side of formula should be a single name") ppname <- paste(ppname) if(!ppcandidate[ppname]) stop(paste("Left hand side of formula", "should be the name of a column of point patterns"), call.=FALSE) groupvars <- all.vars(as.expression(rhs)) if(!all(groupvars %in% Xnames) || any(!factorcandidate[groupvars])) stop(paste("Not all variables on right hand side of formula", "can be interpreted as factors"), call.=FALSE) #' make the groups to be compared group <- interaction(lapply(as.data.frame(data[ , groupvars, drop=FALSE]), factor)) #' rename the point patterns, needs the patch newnames <- Xnames newnames[Xnames == ppname] <- "pp" names(data) <- newnames data$group <- group } else { #' No formula supplied. #' Choose first ppp column and first factor column to make pp and groups thepp <- which.max(ppcandidate) thegroup <- which.max(factorcandidate) #' fake formula for output of test result formula <- as.formula(paste( Xnames[thepp],"~", Xnames[thegroup])) newnames <- Xnames newnames[thepp] <- "pp" newnames[thegroup] <- "group" names(data) <- newnames data$group <- as.factor(data$group) } } else { #' X is not a hyperframe, but hopefully a list of ppp if(!is.list(X)) stop("X should be a hyperframe or a list of lists of point patterns") if (!is.list(X[[1]]) || !is.ppp(X[[1]][[1]])) stop("X is a list, but not a list of lists of point patterns") nams <- names(X) if(is.null(nams)) nams <- paste("group", seq_along(X)) pp <- list() group <- NULL for (i in seq_along(X)) { pp <- c(pp, X[[i]]) group <- c(group, rep(nams[i], length(X[[i]]))) } group <- as.factor(group) data <- hyperframe(pp = pp, group = group) ppname <- "pp" } framename <- deparse(substitute(X)) fooname <- deparse(substitute(summaryfunction)) #' sorting out the patterns that contain too few points OK <- sapply(data$pp, npoints) >= minpoints if((nbad <- sum(!OK)) > 0) warning(paste(nbad, "patterns have been discarded", "because they contained fewer than", minpoints, "points"), call.=FALSE) data <- data[OK, ,drop=FALSE] pp <- data$pp #' ---- the groups, #' or what remains after discarding the poor patterns with few points ----- #' check if at least two observations in each group groupi <- as.integer(data$group) ngroups <- max(groupi) if(ngroups < 2) stop(paste("Sorry, after discarding patterns with fewer than", minpoints, "points,", if(ngroups < 1) "nothing" else "only one group", "is left over.", "\n- nothing to compare, take a break!"), call.=FALSE) lev <- 1:ngroups m <- as.vector(table(groupi)) if (any(m < 3)) stop(paste("Data groups need to contain at least two patterns;", "\nafter discarding those with fewer than", minpoints, "points, the remaining group sizes are", commasep(m)), call.=FALSE) #' check if number of possible outcomes is small npossible <- factorial(sum(m))/prod(factorial(m))/prod(factorial(table(m))) if (npossible < max(100, nperm)) warning("Don't expect exact results - group sizes are too small") #' --------- real preliminaries now ------ #' get interval for arguments if(!is.null(r)){ rinterval <- range(r) rsteps <- length(r) } else if (is.null(rinterval)) { foochar <- substr(fooname, 1, 1) if (foochar %in% c("p", "L")) foochar <- "K" if (fooname %in% c("Kscaled", "Lscaled")) foochar <- "Kscaled" rinterval <- c(0, min(with(data, rmax.rule(foochar, Window(pp), intensity(pp))))) } ranger <- diff(range(rinterval)) #' r sequence needs to start at 0 for Kest and such rr <- r %orifnull% seq(0, rinterval[2], length.out = rsteps + 1) taker <- rr >= rinterval[1] & rr <= rinterval[2] # used for testing #' now estimate the summary function, finally... #' TO DO!!!! Match function call of summary function with data columns! #' use arguments.in.data, if applicable. This is for inhomogeneous summary #' functions #' --------- retrieve arguments for summary function from data, hvis det er if(arguments.in.data) fvlist <- multicall(summaryfunction, pp, data, r = rr, ...) else fvlist <- with(data, summaryfunction(pp, r = rr, ...)) fvtemplate <- fvlist[[1]] valu <- attr(fvtemplate, "valu") argu <- attr(fvtemplate, "argu") foar <- sapply(lapply(fvlist, "[[", valu), "[", taker) #' --------- the real stuff -------------- #' function that calculates the discrepancy #' slow version combs <- combn(lev, 2) #' --------- now do the real real stuff :-) ------------- #' generate "simulated values" from random permutations. #' possible improvement for future work: #' If the number of all permutations (combis) is small, #' first generate all permutations and then #' sample from them to improve precision predigested <- list(lev=lev, foar=foar, m=m, combs=combs, rrr=rr[taker], ranger=ranger) if(use.Tbar) { Tobs <- Tbarstat(groupi, predigested) Tsim <- replicate(nperm, Tbarstat(sample(groupi), predigested)) } else { Tobs <- Tstat(groupi, predigested) Tsim <- replicate(nperm, Tstat(sample(groupi), predigested)) } names(Tobs) <- if(use.Tbar) "Tbar" else "T" pval <- (1 + sum(Tobs < Tsim))/(1 + nperm) #' ----- making a test object ----- method <- c("Studentized permutation test for grouped point patterns", if(is.hyperframe(X)) pasteFormula(formula) else NULL, choptext(ngroups, "groups:", paste(levels(data$group), collapse=", ")), choptext("summary function:", paste0(fooname, ","), "evaluated on r in", prange(rinterval)), choptext("test statistic:", if(use.Tbar) "Tbar," else "T,", nperm, "random permutations")) fooshort <- switch(fooname, pcf = "pair correlation ", Kinhom = "inhomogeneous K-", Linhom = "inhomogeneous L-", Kscaled = "locally scaled K-", Lscaled = "locally scaled L-", paste(substr(fooname, 1, 1),"-",sep="")) alternative <- c(paste("not the same ",fooshort,"function", sep="")) testerg <- list(statistic = Tobs, p.value = pval, alternative = alternative, method = method, data.name = framename) class(testerg) <- c("studpermutest", "htest") #' Add things for plotting #' prepare the fvlist, so that it only contains the estimates used, fvs <- lapply(fvlist, "[.fv", j=c(argu, valu)) #' with rinterval as alim fvs <- lapply(fvs, "attr<-", which="alim", value=rinterval) testerg$curves <- hyperframe(fvs = fvs, groups = data$group) fvtheo <- fvlist[[1]] fvnames(fvtheo, ".y") <- "theo" attr(fvtheo, "alim") <- rinterval testerg$curvtheo <- fvtheo[ , c(argu, "theo")] #' group means grmn <- lapply(lev, splitmean, ind=groupi, f=foar) testerg$groupmeans <- lapply(grmn, makefv, xvals=rr[taker], template=fvtheo) return(testerg) } splitmean <- function(l, ind, f) { apply(f[ , ind == l], 1, mean) } splitvarn <- function(l, ind, f, m) { apply(f[ , ind == l], 1, var) / m[l] } studentstat <- function(i, grmean, grvar) { (grmean[, i[1]] - grmean[, i[2]])^2 / (grvar[i[1],] + grvar[i[2], ]) } Tstat <- function (ind = groupi, predigested) { #' predigested should be a list with entries lev, foar, m, combs, rrr with(predigested, { grmean <- sapply(lev, splitmean, ind=ind, f=foar) grvar <- t(sapply(lev, splitvarn, ind=ind, f=foar, m=m)) y <- apply(combs, 2, studentstat, grmean=grmean, grvar=grvar) sum(apply(y, 2, trapint, x = rrr)) }) } intstudent <- function(i, rrr, grmean, meangrvar) { trapint(rrr, (grmean[, i[1]] - grmean[, i[2]])^2 / (meangrvar[i[1]] + meangrvar[i[2]])) } Tbarstat <- function (ind = groupi, predigested) { #' predigested should be a list #' with entries lev, foar, m, combs, rrr, ranger with(predigested, { grmean <- sapply(lev, splitmean, ind=ind, f=foar) grvar <- t(sapply(lev, splitvarn, ind=ind, f=foar, m=m)) meangrvar <- apply(grvar, 1, trapint, x = rrr)/ranger sum(apply(combs, 2, intstudent, rrr=rrr, grmean=grmean, meangrvar=meangrvar)) #' trapint(rr[taker], grvar[i[1],] + grvar[i[2], ])))) }) } makefv <- function(yvals, xvals, template) { fdf <- data.frame(r = xvals, y = yvals) argu <- fvnames(template, ".x") valu <- fvnames(template, ".y") names(fdf) <- c(argu,valu) fv(fdf, argu = argu, ylab = attr(template, "ylab"), valu = valu, fmla = attr(template,"fmla"), alim = attr(template, "alim")) } #' Trapezoidal rule approximation to integral #' ------- Trapezregel, mit Behandlung von NAns: #' die werden einfach ignoriert ---- trapint <- function(x, y) { nonan <- !is.na(y) nn <- sum(nonan) if(nn < 2L) return(0) Y <- y[nonan] X <- x[nonan] 0.5 * sum( (Y[-1] + Y[-nn]) * diff(X)) } #' call foo(x, further arguments) repeatedly #' further arguments are taken from hyperframe H and ... multicall <- function(foo, x, H, ...){ stopifnot(is.hyperframe(H)) if (is.hyperframe(x)) { x <- as.list(x)[[1]] } else if(!is.list(x)) stop("in multicall: x should be a hyperframe or list", call.=FALSE) #' check if same length nrows <- dim(H)[1] if (length(x) != nrows) stop(paste("in multicall: x and H need to have", "the same number of rows or list elements"), call.=FALSE) dotargs <- list(...) hnames <- names(H) argnames <- names(formals(foo))#' always assume first argument is given ppname <- argnames[1] argnames <- argnames[-1] dotmatch <- pmatch(names(dotargs), argnames) dotmatched <- dotmatch[!is.na(dotmatch)] dotuseargs <- dotargs[!is.na(dotmatch)] restargs <- if(length(dotmatched) >0) argnames[-dotmatched] else argnames hmatch <- pmatch(hnames, restargs) huse <- !is.na(hmatch) lapply(seq_len(nrows), function (i) do.call(foo, c(list(x[[i]]), as.list(H[i, huse, drop=TRUE, strip=FALSE]), dotargs))) } studpermu.test }) #' ------------------- plot studpermutest --------------------------------------- # #' plot.studpermutest #' plot the functions that were used in studperm.test #' also plot group means, if requested # #' x a studpermtest object, the test result #' fmla a plot formula as in plot.fv, should be generic, using "." for values #' ... further plot parameters #' col, lty, lwd parameter (vectors) for plotting the individual summary functions, #' according to group, if vectors #' col.theo, lty.theo, lwd.theo if not all are NULL, the "theo" curve is also plotted #' lwd.mean a multiplyer for the line width of the group means. #' if NULL, group means are not plotted, defaults to NULL #' lty.mean, col.mean selbsterklaerend #' separately generate a separate plot for each group (then no legends are plotted) #' meanonly do not plot individual summary functions #' legend if TRUE, and plots are not separate, plot a legend #' legendpos ... #' lbox if TRUE, draw box around legend. Defaults to FALSE #' add ... plot.studpermutest <- local({ plot.studpermutest <- function(x, fmla, ..., lty = NULL, col = NULL, lwd = NULL, lty.theo = NULL, col.theo = NULL, lwd.theo = NULL, lwd.mean = if(meanonly) 1 else NULL, lty.mean = lty, col.mean = col, separately = FALSE, meanonly = FALSE, main = if(meanonly) "group means" else NULL, xlim = NULL, ylim = NULL, ylab = NULL, legend = !add, legendpos = "topleft", lbox=FALSE, add = FALSE) { stopifnot(inherits(x, "studpermutest")) env.user <- parent.frame() curvlists <- split(x$curves, x$curves$groups) ngroups <- length(curvlists) gnames <- names(curvlists) #' check if theoretical functions shall be plottet plottheo <- !(is.null(lty.theo) & is.null(col.theo) & is.null(lwd.theo)) #' prepare plot parameters for groups if (is.null(lty)) lty <- 1:ngroups if (is.null(col)) col <- 1:ngroups if (is.null(lwd)) lwd <- par("lwd") if (is.null(col.mean)) col.mean <- col if (is.null(lty.mean)) lty.mean <- lty lty <- rep(lty, length.out = ngroups) col <- rep(col, length.out = ngroups) lwd <- rep(lwd, length.out = ngroups) col.mean <- rep(col.mean, length.out = ngroups) lty.mean <- rep(lty.mean, length.out = ngroups) if (plottheo){ if (is.null(lty.theo)) lty.theo <- ngroups + 1#par("lty") if (is.null(col.theo)) col.theo <- ngroups + 1 #par("col") if (is.null(lwd.theo)) lwd.theo <- par("lwd") } #' transporting the formula in ... unfortunately does not work #' for the axis labels, because the fvs contain only one variable. #' Have to knit them self if (is.null(ylab)) { if (!missing(fmla)) { #' puha. det bliver noget lappevaerk. fmla <- as.formula(fmla, env=env.user) map <- fvlabelmap(x$curvtheo) lhs <- lhs.of.formula(as.formula(fmla)) ylab <- eval(substitute(substitute(le, mp), list(le = lhs, mp = map))) } else ylab <- attr(x$curvtheo, "yexp") } if (missing(fmla)) fmla <- attr(x$curvtheo, "fmla") if(!is.null(lwd.mean)) lwd.Mean <- lwd.mean*lwd if(separately) { for (i in seq_along(gnames)) { if(!meanonly) plot.fvlist(curvlists[[i]]$fvs, fmla, ..., col = col[i], lwd = lwd[i], lty= lty[i], xlim = xlim, ylim = ylim, ylab = ylab, main = gnames[i]) if (!is.null(lwd.mean)) plot(x$groupmeans[[i]], fmla, ..., col = col.mean[i], lwd = lwd.Mean[i], lty = lty.mean[i], main = gnames[i], add = !meanonly, ylim = ylim) if (plottheo) plot(x$curvtheo, fmla, ..., add = TRUE, col = col.theo, lwd = lwd.theo, lty = lty.theo) } } else { #' ---- TODO SIMPLIFY! they should all have the same x-range, #' just check y-range ---- lims <- if (meanonly) { plot.fvlist(x$groupmeans, fmla,..., limitsonly = TRUE) } else { as.data.frame(apply(sapply(curvlists, function(C) plot.fvlist(C$fvs, fmla,..., limitsonly = TRUE)), 1, range)) } if(is.null(xlim)) xlim <- lims$xlim if(is.null(ylim)) ylim <- lims$ylim iadd <- add for (i in seq_along(gnames)) { if(!meanonly) plot.fvlist(curvlists[[i]]$fvs, fmla, ..., col = col[i], lwd = lwd[i], lty= lty[i], xlim = xlim, ylim = ylim, ylab= ylab, main = main, add = iadd) iadd <- iadd | !meanonly if (!is.null(lwd.mean)) plot(x$groupmeans[[i]], fmla, ..., col = col.mean[i], lwd = lwd.Mean[i], lty = lty.mean[i], add = iadd, xlim = xlim, ylim = ylim, ylab= ylab, main=main) if (plottheo) plot(x$curvtheo, fmla, ..., add = TRUE, col = col.theo, lwd = lwd.theo, lty = lty.theo, xlim = xlim, ylim = ylim, ylab= ylab, main=main) iadd <- TRUE } if(legend) { if(meanonly) { lwd <- lwd.Mean col <- col.mean lty <- lty.mean } if(plottheo){ gnames <- c(gnames, "Poisson mean") col <- c(col, col.theo) lty <- c(lty, lty.theo) lwd <- c(lwd, lwd.theo) } legend(legendpos, gnames, col = col, lty = lty, lwd = lwd, bty=ifelse(lbox, "o", "n")) } } return(invisible(NULL)) } #' ------------------ Helper function---------------- #' flist: list of fv, with plot method plot.fvlist <- function(x, fmla, ..., xlim=NULL, ylim=NULL, add = FALSE, limitsonly = FALSE, main=NULL){ #' no safety precautions if (missing(fmla)) fmla <- attr(x[[1]], "fmla") if (!add | limitsonly) { lims <- sapply(x, plot, fmla, ..., limitsonly = TRUE) if(is.null(xlim)) xlim = range(unlist(lims[1,])) if(is.null(ylim)) ylim = range(unlist(lims[2,])) lims=list(xlim=xlim, ylim=ylim) if(limitsonly) return(lims) plot(x[[1]], fmla, ..., xlim = xlim, ylim = ylim, main = main) } else plot(x[[1]], fmla,..., add=T) for (foo in x[-1]) plot(foo, fmla, ..., add=T) } plot.studpermutest }) spatstat/R/headtail.R0000644000176200001440000000103513115225157014215 0ustar liggesusers#' #' headtail.R #' #' Methods for head() and tail() #' #' $Revision: 1.1 $ $Date: 2016/12/20 01:11:29 $ head.tess <- head.psp <- head.ppx <- head.ppp <- function(x, n=6L, ...) { stopifnot(length(n) == 1L) xlen <- nobjects(x) n <- if (n < 0L) max(xlen + n, 0L) else min(n, xlen) x[seq_len(n)] } tail.tess <- tail.psp <- tail.ppx <- tail.ppp <- function (x, n = 6L, ...) { stopifnot(length(n) == 1L) xlen <- nobjects(x) n <- if (n < 0L) max(xlen + n, 0L) else min(n, xlen) x[seq.int(to = xlen, length.out = n)] } spatstat/R/weightedStats.R0000644000176200001440000000477413115271120015265 0ustar liggesusers#' #' weightedStats.R #' #' weighted versions of hist, var, median, quantile #' #' $Revision: 1.3 $ $Date: 2017/06/05 10:31:58 $ #' #' #' whist weighted histogram #' whist <- function(x, breaks, weights=NULL) { N <- length(breaks) if(length(x) == 0) h <- numeric(N+1) else { # classify data into histogram cells (breaks need not span range of data) cell <- findInterval(x, breaks, rightmost.closed=TRUE) # values of 'cell' range from 0 to N. nb <- N + 1L if(is.null(weights)) { ## histogram h <- tabulate(cell+1L, nbins=nb) } else { ## weighted histogram if(!spatstat.options("Cwhist")) { cell <- factor(cell, levels=0:N) h <- unlist(lapply(split(weights, cell), sum, na.rm=TRUE)) } else { h <- .Call("Cwhist", as.integer(cell), as.double(weights), as.integer(nb), PACKAGE = "spatstat") } } } h <- as.numeric(h) y <- h[2:N] attr(y, "low") <- h[1] attr(y, "high") <- h[N+1] return(y) } #' wrapper for computing weighted variance of a vector #' Note: this includes a factor 1 - sum(v^2) in the denominator #' where v = w/sum(w). See help(cov.wt) weighted.var <- function(x, w, na.rm=TRUE) { bad <- is.na(w) | is.na(x) if(any(bad)) { if(!na.rm) return(NA_real_) ok <- !bad x <- x[ok] w <- w[ok] } cov.wt(matrix(x, ncol=1),w)$cov[] } #' weighted median weighted.median <- function(x, w, na.rm=TRUE) { unname(weighted.quantile(x, probs=0.5, w=w, na.rm=na.rm)) } #' weighted quantile weighted.quantile <- function(x, w, probs=seq(0,1,0.25), na.rm=TRUE) { x <- as.numeric(as.vector(x)) w <- as.numeric(as.vector(w)) if(anyNA(x) || anyNA(w)) { ok <- !(is.na(x) | is.na(w)) x <- x[ok] w <- w[ok] } stopifnot(all(w >= 0)) if(all(w == 0)) stop("All weights are zero", call.=FALSE) #' oo <- order(x) x <- x[oo] w <- w[oo] Fx <- cumsum(w)/sum(w) #' result <- numeric(length(probs)) for(i in seq_along(result)) { p <- probs[i] lefties <- which(Fx <= p) if(length(lefties) == 0) { result[i] <- x[1] } else { left <- max(lefties) result[i] <- x[left] if(Fx[left] < p && left < length(x)) { right <- left+1 y <- x[left] + (x[right]-x[left]) * (p-Fx[left])/(Fx[right]-Fx[left]) if(is.finite(y)) result[i] <- y } } } names(result) <- paste0(format(100 * probs, trim = TRUE), "%") return(result) } spatstat/R/inforder.family.R0000755000176200001440000000653013115271075015542 0ustar liggesusers# # # inforder.family.R # # $Revision: 1.2 $ $Date: 2010/07/10 10:22:09 $ # # Family of `infinite-order' point process models # # inforder.family: object of class 'isf' # # # ------------------------------------------------------------------- # inforder.family <- list( name = "inforder", print = function(self) { cat("Family of infinite-order interactions\n") }, plot = NULL, # ---------------------------------------------------- eval = function(X,U,EqualPairs,pot,pars,correction, ...) { # # This is the eval function for the `inforder' family. # # This internal function is not meant to be called by the user. # It is called by mpl.prepare() during execution of ppm(). # # The eval functions perform all the manipulations that are common to # a given class of interactions. # # For the `inforder' family of interactions with infinite order, # there are no structures common to all interactions. # So this function simply invokes the potential 'pot' directly # and expects 'pot' to return the values of the sufficient statistic S(u,X). # # ARGUMENTS: # All 'eval' functions have the following arguments # which are called in sequence (without formal names) # by mpl.prepare(): # # X data point pattern 'ppp' object # U points at which to evaluate potential list(x,y) suffices # EqualPairs two-column matrix of indices i, j such that X[i] == U[j] # (or NULL, meaning all comparisons are FALSE) # pot potential function # potpars auxiliary parameters for pairpot list(......) # correction edge correction type (string) # # VALUE: # All `eval' functions must return a # matrix of values of the total potential # induced by the pattern X at each location given in U. # The rows of this matrix correspond to the rows of U (the sample points); # the k columns are the coordinates of the k-dimensional potential. # ########################################################################## # POTENTIAL: # In this case the potential function 'pot' should have arguments # pot(X, U, EqualPairs, pars, correction, ...) # # It must return a vector with length equal to the number of points in U, # or a matrix with as many rows as there are points in U. if(!is.ppp(U)) U <- ppp(U$x, U$y, window=X$window) POT <- pot(X, U, EqualPairs, pars, correction, ...) if(is.matrix(POT)) { if(nrow(POT) != U$n) stop("Internal error: the potential returned a matrix with the wrong number of rows") } else if(is.array(POT) && length(dim(POT)) > 2) stop("Internal error: the potential returned an array with more than 2 dimensions") else if(is.vector(POT)) { if(length(POT) != U$n) stop("Internal error: the potential returned a vector with the wrong length") POT <- matrix(POT, ncol=1) } else stop("Internal error: the return value from the potential is not understood") return(POT) }, ######### end of function $eval suffstat = NULL ######### end of function $suffstat ) ######### end of list class(inforder.family) <- "isf" spatstat/R/util.R0000755000176200001440000003031213115271120013411 0ustar liggesusers# # util.R miscellaneous utilities # # $Revision: 1.237 $ $Date: 2017/06/05 10:31:58 $ # # common invocation of matrixsample rastersample <- function(X, Y) { stopifnot(is.im(X) || is.mask(X)) stopifnot(is.im(Y) || is.mask(Y)) phase <- c((Y$yrow[1] - X$yrow[1])/X$ystep, (Y$xcol[1] - X$xcol[1])/X$xstep) scale <- c(Y$ystep/X$ystep, Y$xstep/X$xstep) if(is.im(X)) { # resample an image if(!is.im(Y)) Y <- as.im(Y) Xtype <- X$type Xv <- X$v # handle factor-valued image as integer if(Xtype == "factor") Xv <- array(as.integer(Xv), dim=X$dim) # resample naval <- switch(Xtype, factor=, integer= NA_integer_, logical = as.logical(NA_integer_), real = NA_real_, complex = NA_complex_, character = NA_character_, NA) Y$v <- matrixsample(Xv, Y$dim, phase=phase, scale=scale, na.value=naval) # inherit pixel data type from X Y$type <- Xtype if(Xtype == "factor") { Y$v <- factor(Y$v, labels=levels(X)) dim(Y$v) <- Y$dim } } else { # resample a mask if(!is.mask(Y)) Y <- as.mask(Y) Y$m <- matrixsample(X$m, Y$dim, phase=phase, scale=scale, na.value=FALSE) } return(Y) } pointgrid <- function(W, ngrid) { W <- as.owin(W) masque <- as.mask(W, dimyx=ngrid) rxy <- rasterxy.mask(masque, drop=TRUE) xx <- rxy$x yy <- rxy$y return(ppp(xx, yy, W)) } onecolumn <- function(m) { switch(markformat(m), none=stop("No marks provided"), vector=m, dataframe=m[,1, drop=TRUE], NA) } checkbigmatrix <- function(n, m, fatal=FALSE, silent=FALSE) { if(n * m <= spatstat.options("maxmatrix")) return(TRUE) whinge <- paste("Attempted to create binary mask with", n, "*", m, "=", n * m, "entries") if(fatal) stop(whinge, call.=FALSE) if(!silent) warning(whinge, call.=FALSE) return(FALSE) } ## ........... progress reports ..................... progressreport <- local({ Put <- function(name, value, state) { if(is.null(state)) { putSpatstatVariable(paste0("Spatstat.", name), value) } else { state[[name]] <- value } return(state) } Get <- function(name, state) { if(is.null(state)) { value <- getSpatstatVariable(paste0("Spatstat.", name)) } else { value <- state[[name]] } return(value) } IterationsPerLine <- function(charsperline, n, every, tick, showtime, showevery) { # Calculate number of iterations that triggers a newline. # A dot is printed every 'tick' iterations # Iteration number is printed every 'every' iterations. # If showtime=TRUE, the time is shown every 'showevery' iterations # where showevery \in {1, every, n}. chars.report <- max(1, ceiling(log10(n))) if(showtime) { chars.time <- nchar(' [etd 12:00:00] ') timesperreport <- if(showevery == 1) every else if(showevery == every) 1 else 0 chars.report <- chars.report + timesperreport * chars.time } chars.ticks <- floor((every-1)/tick) chars.block <- chars.report + chars.ticks nblocks <- max(1, floor(charsperline/chars.block)) nperline <- nblocks * every leftover <- charsperline - nblocks * chars.block if(leftover > 0) nperline <- nperline + min(leftover * tick, showevery - 1) return(nperline) } progressreport <- function(i, n, every=min(100,max(1, ceiling(n/100))), tick=1, nperline=NULL, charsperline=getOption("width"), style=spatstat.options("progress"), showtime=NULL, state=NULL) { missevery <- missing(every) nperline.fixed <- !is.null(nperline) showtime.optional <- is.null(showtime) if(showtime.optional) showtime <- FALSE # initialise only if(i > n) { warning(paste("progressreport called with i =", i, "> n =", n)) return(invisible(NULL)) } if(style == "tk" && !requireNamespace("tcltk")) { warning("tcltk is unavailable; switching to style='txtbar'", call.=FALSE) style <- "txtbar" } if(is.null(state) && style != "tty") stop(paste("Argument 'state' is required when style =",sQuote(style)), call.=FALSE) switch(style, txtbar={ if(i == 1) { ## initialise text bar state <- Put("ProgressBar", txtProgressBar(1, n, 1, style=3), state) } else { ## get text bar pbar <- Get("ProgressBar", state) ## update setTxtProgressBar(pbar, i) if(i == n) { close(pbar) state <- Put("ProgressBar", NULL, state) } } }, tk={ requireNamespace("tcltk") if(i == 1) { ## initialise text bar state <- Put("ProgressBar", tcltk::tkProgressBar(title="progress", min=0, max=n, width=300), state) } else { ## get text bar pbar <- Get("ProgressBar", state) ## update tcltk::setTkProgressBar(pbar, i, label=paste0(round(100 * i/n), "%")) if(i == n) { close(pbar) state <- Put("ProgressBar", NULL, state) } } }, tty={ now <- proc.time() if(i == 1 || is.null(state)) { ## Initialise stuff if(missevery && every > 1 && n > 10) every <- niceround(every) showevery <- if(showtime) every else n if(!nperline.fixed) nperline <- IterationsPerLine(charsperline, n, every, tick, showtime, showevery) state <- Put("ProgressData", list(every=every, tick=tick, nperline=nperline, starttime=now, showtime=showtime, showevery=showevery, nperline.fixed=nperline.fixed, showtime.optional=showtime.optional), state) } else { pd <- Get("ProgressData", state) if(is.null(pd)) stop(paste("progressreport called with i =", i, "before i = 1")) every <- pd$every tick <- pd$tick nperline <- pd$nperline showtime <- pd$showtime showevery <- pd$showevery showtime.optional <- pd$showtime.optional nperline.fixed <- pd$nperline.fixed if(i < n) { if(showtime || showtime.optional) { ## estimate time remaining starttime <- pd$starttime elapsed <- now - starttime elapsed <- unname(elapsed[3]) rate <- elapsed/(i-1) remaining <- rate * (n-i) if(!showtime) { ## show time remaining if.. if(rate > 20) { ## .. rate is very slow showtime <- TRUE showevery <- 1 } else if(remaining > 180) { ## ... more than 3 minutes remaining showtime <- TRUE showevery <- every aminute <- ceiling(60/rate) if(aminute < showevery) showevery <- min(niceround(aminute), showevery) } # update number of iterations per line if(showtime && !nperline.fixed) nperline <- IterationsPerLine(charsperline, n, every, tick, showtime, showevery) } } state <- Put("ProgressData", list(every=every, tick=tick, nperline=nperline, starttime=starttime, showtime=showtime, showevery=showevery, nperline.fixed=nperline.fixed, showtime.optional=showtime.optional), state) } } if(i == n) cat(paste(" ", n, ".\n", sep="")) else if(every == 1 || i <= 3) cat(paste(i, ",", if(i %% nperline == 0) "\n" else " ", sep="")) else { if(i %% every == 0) cat(i) else if(i %% tick == 0) cat(".") if(i %% nperline == 0) cat("\n") } if(i < n && showtime && (i %% showevery == 0)) { st <- paste("etd", codetime(round(remaining))) st <- paren(st, "[") cat(paste("", st, "")) } flush.console() }, stop(paste("Unrecognised option for style:", dQuote(style))) ) return(invisible(state)) } progressreport }) multiply.only.finite.entries <- function(x, a) { # In ppm a potential value that is -Inf must remain -Inf # and a potential value that is 0 multiplied by NA remains 0 y <- x ok <- is.finite(x) & (x != 0) y[ok] <- a * x[ok] return(y) } ## print names and version numbers of libraries loaded sessionLibs <- function() { a <- sessionInfo() b <- unlist(lapply(a$otherPkgs, getElement, name="Version")) g <- rbind(names(b), unname(b)) d <- apply(g, 2, paste, collapse=" ") if(length(d) > 0) { cat("Libraries loaded:\n") for(di in d) cat(paste("\t", di, "\n")) } else cat("Libraries loaded: None\n") return(invisible(d)) } # .................. prepareTitle <- function(main) { ## Count the number of lines in a main title ## Convert title to a form usable by plot.owin if(is.expression(main)) { nlines <- 1 } else { main <- paste(main) ## break at newline main <- unlist(strsplit(main, "\n")) nlines <- if(sum(nchar(main)) == 0) 0 else length(main) } return(list(main=main, nlines=nlines, blank=rep(' ', nlines))) } requireversion <- function(pkg, ver) { pkgname <- deparse(substitute(pkg)) v <- read.dcf(file=system.file("DESCRIPTION", package=pkgname), fields="Version") if(package_version(v) < ver) stop(paste("Package", sQuote(pkgname), "is out of date: version >=", ver, "is needed"), call.=FALSE) invisible(NULL) } spatstatDiagnostic <- function(msg) { cat("-----------------------------\n") cat(paste(" >>> Spatstat Diagnostic: ", msg, "<<<\n")) cat("-----------------------------\n") invisible(NULL) } allElementsIdentical <- function(x, entry=NULL) { if(length(x) <= 1) return(TRUE) if(!is.null(entry)) { x1 <- x[[1]] for(i in 2:length(x)) if(!identical(x[[i]], x1)) return(FALSE) } else { e1 <- x[[1]][[entry]] for(i in 2:length(x)) if(!identical(x[[i]][[entry]], e1)) return(FALSE) } return(TRUE) } representativeRows <- function(x) { # select a unique representative of each equivalence class of rows, # in a numeric matrix or data frame of numeric values. ord <- do.call(order, as.list(as.data.frame(x))) y <- x[ord, , drop=FALSE] dy <- apply(y, 2, diff) answer <- logical(nrow(y)) answer[ord] <- c(TRUE, !matrowall(dy == 0)) return(answer) }spatstat/R/lennard.R0000755000176200001440000000704013115271075014072 0ustar liggesusers# # # lennard.R # # $Revision: 1.21 $ $Date: 2017/02/07 08:12:05 $ # # Lennard-Jones potential # # # ------------------------------------------------------------------- # LennardJones <- local({ BlankLJ <- list( name = "Lennard-Jones process", creator = "LennardJones", family = "pairwise.family", # evaluated later pot = function(d, par) { sig0 <- par$sigma0 if(is.na(sig0)) { d6 <- d^{-6} p <- array(c(-d6^2,d6),dim=c(dim(d),2)) } else { # expand around sig0 and set large numbers to Inf drat <- d/sig0 d6 <- drat^{-6} p <- array(c(-d6^2,d6),dim=c(dim(d),2)) small <- (drat < 1/4) small <- array(c(small, small), dim=c(dim(d), 2)) p[small] <- -Inf big <- (drat > 4) big <- array(c(big, big), dim=c(dim(d), 2)) p[big] <- 0 } return(p) }, par = list(sigma0=NULL), # filled in later parnames = "Initial approximation to sigma", selfstart = function(X, self) { # self starter for Lennard Jones # attempt to set value of 'sigma0' if(!is.na(self$par$sigma0)) { # value fixed by user or previous invocation return(self) } if(npoints(X) < 2) { # not enough points return(self) } s0 <- minnndist(X) if(s0 == 0) { warning(paste("Pattern contains duplicated points:", "impossible under Lennard-Jones model")) s0 <- mean(nndist(X)) if(s0 == 0) return(self) } LennardJones(s0) }, init = function(...){}, # do nothing update = NULL, # default OK print = NULL, # default OK interpret = function(coeffs, self) { theta1 <- as.numeric(coeffs[1L]) theta2 <- as.numeric(coeffs[2L]) sig0 <- self$par$sigma0 if(is.na(sig0)) sig0 <- 1 if(sign(theta1) * sign(theta2) == 1) { sigma <- sig0 * (theta1/theta2)^(1/6) epsilon <- (theta2^2)/(4 * theta1) } else { sigma <- NA epsilon <- NA } return(list(param=list(sigma=sigma, epsilon=epsilon), inames="interaction parameters", printable=signif(c(sigma=sigma,epsilon=epsilon)))) }, valid = function(coeffs, self) { p <- unlist(self$interpret(coeffs, self)$param) return(all(is.finite(p) & (p > 0))) }, project = function(coeffs, self) { if((self$valid)(coeffs, self)) return(NULL) else return(Poisson()) }, irange = function(self, coeffs=NA, epsilon=0, ...) { if(anyNA(coeffs) || epsilon == 0) return(Inf) sig0 <- self$par$sigma0 if(is.na(sig0)) sig0 <- 1 theta1 <- abs(coeffs[1L]) theta2 <- abs(coeffs[2L]) return(sig0 * max((theta1/epsilon)^(1/12), (theta2/epsilon)^(1/6))) }, version=NULL # filled in later ) class(BlankLJ) <- "interact" LennardJones <- function(sigma0=NA) { if(is.null(sigma0) || !is.finite(sigma0)) sigma0 <- NA instantiate.interact(BlankLJ, list(sigma0=sigma0)) } LennardJones <- intermaker(LennardJones, BlankLJ) LennardJones }) spatstat/R/lixellate.R0000644000176200001440000000551613161101170014422 0ustar liggesusers#' #' lixellate.R #' #' Divide each segment of a linear network into several pieces #' #' $Revision: 1.6 $ $Date: 2017/09/22 02:20:48 $ #' lixellate <- function(X, ..., nsplit, eps, sparse=TRUE) { missn <- missing(nsplit) misse <- missing(eps) if(missn && misse) stop("One of the arguments 'nsplit' or 'eps' must be given") if(!missn && !misse) stop("The arguments 'nsplit' or 'eps' are incompatible") if(!missn) { stopifnot(is.numeric(nsplit)) stopifnot(all(is.finite(nsplit))) stopifnot(all(nsplit >= 0)) } if(is.lpp(X)) { rtype <- "lpp" np <- npoints(X) L <- as.linnet(X) } else if(inherits(X, "linnet")) { rtype <- "linnet" L <- X X <- runiflpp(1, L) np <- 0 } else stop("X should be a linnet or lpp object") if(is.null(sparse)) sparse <- identical(L$sparse, TRUE) from <- L$from to <- L$to ns <- length(from) if(missn) { lenfs <- lengths.psp(as.psp(L)) nsplit <- ceiling(lenfs/eps) } else { if(length(nsplit) == 1) { nsplit <- rep(nsplit, ns) } else if(length(nsplit) != ns) { stop(paste("nsplit should be a single number,", "or a vector of length equal to the number of segments")) } } sumN <- sum(nsplit) sumN1 <- sum(nsplit-1) V <- vertices(L) nv <- npoints(V) xv <- V$x yv <- V$y coordsX <- coords(X) sp <- coordsX$seg tp <- coordsX$tp ## sort data in increasing order of 'sp' oo <- order(sp) z <- .C("Clixellate", ns=as.integer(ns), fromcoarse=as.integer(from-1L), tocoarse = as.integer(to-1L), fromfine=as.integer(integer(sumN)), tofine = as.integer(integer(sumN)), nv = as.integer(nv), xv = as.double(c(xv, numeric(sumN1))), yv = as.double(c(yv, numeric(sumN1))), svcoarse = as.integer(integer(nv + sumN1)), tvcoarse = as.double(numeric(nv + sumN1)), nsplit = as.integer(nsplit), np = as.integer(np), spcoarse = as.integer(sp[oo]-1L), tpcoarse = as.double(tp[oo]), spfine = as.integer(integer(np)), tpfine = as.double(numeric(np)), PACKAGE = "spatstat") Lfine <- with(z, { ii <- seq_len(nv) Vnew <- ppp(xv[ii], yv[ii], window=Frame(L), check=FALSE) Lfine <- linnet(Vnew, edges=cbind(fromfine,tofine)+1, sparse=sparse) marks(Lfine$vertices) <- markcbind(marks(Lfine$vertices), data.frame(segcoarse=svcoarse+1, tpcoarse=tvcoarse)) Lfine }) if(rtype == "linnet") return(Lfine) ## put coordinates back in original order sp[oo] <- as.integer(z$spfine + 1L) tp[oo] <- z$tpfine coordsX$seg <- sp coordsX$tp <- tp ## make lpp Xfine <- lpp(coordsX, Lfine) marks(Xfine) <- marks(X) return(Xfine) } spatstat/R/rPerfect.R0000755000176200001440000002156113115271120014214 0ustar liggesusers# # Perfect Simulation # # $Revision: 1.21 $ $Date: 2017/06/05 10:31:58 $ # # rStrauss # rHardcore # rStraussHard # rDiggleGratton # rDGS # rPenttinen rStrauss <- function(beta, gamma=1, R=0, W=owin(), expand=TRUE, nsim=1, drop=TRUE) { if(!missing(W)) verifyclass(W, "owin") check.1.real(beta) check.1.real(gamma) check.1.real(R) check.finite(beta) check.finite(gamma) check.finite(R) stopifnot(beta > 0) stopifnot(gamma >= 0) stopifnot(gamma <= 1) stopifnot(R >= 0) runif(1) Wsim <- expandwinPerfect(W, expand, rmhexpand(distance=2*R)) xrange <- Wsim$xrange yrange <- Wsim$yrange result <- vector(mode="list", length=nsim) for(i in 1:nsim) { storage.mode(beta) <- storage.mode(gamma) <- storage.mode(R) <- "double" storage.mode(xrange) <- storage.mode(yrange) <- "double" z <- .Call("PerfectStrauss", beta, gamma, R, xrange, yrange, PACKAGE = "spatstat") X <- z[[1]] Y <- z[[2]] nout <- z[[3]] times <- c(start=z[[4]], end=z[[5]]) if(nout<0) stop("internal error: copying failed in PerfectStrauss") seqn <- seq_len(nout) P <- ppp(X[seqn], Y[seqn], window=Wsim, check=FALSE) if(attr(Wsim, "changed")) P <- P[W] attr(P, "times") <- times if(nsim == 1 && drop) return(P) result[[i]] <- P } result <- as.solist(result) names(result) <- paste("Simulation", 1:nsim) return(result) } # Perfect Simulation of Hardcore process rHardcore <- function(beta, R=0, W=owin(), expand=TRUE, nsim=1, drop=TRUE) { if(!missing(W)) verifyclass(W, "owin") check.1.real(beta) check.1.real(R) check.finite(beta) check.finite(R) stopifnot(beta > 0) stopifnot(R >= 0) runif(1) Wsim <- expandwinPerfect(W, expand, rmhexpand(distance=2*R)) xrange <- Wsim$xrange yrange <- Wsim$yrange result <- vector(mode="list", length=nsim) for(i in 1:nsim) { storage.mode(beta) <- storage.mode(R) <- "double" storage.mode(xrange) <- storage.mode(yrange) <- "double" z <- .Call("PerfectHardcore", beta, R, xrange, yrange, PACKAGE = "spatstat") X <- z[[1]] Y <- z[[2]] nout <- z[[3]] if(nout<0) stop("internal error: copying failed in PerfectHardcore") seqn <- seq_len(nout) P <- ppp(X[seqn], Y[seqn], window=Wsim, check=FALSE) if(attr(Wsim, "changed")) P <- P[W] if(nsim == 1 && drop) return(P) result[[i]] <- P } result <- as.solist(result) names(result) <- paste("Simulation", 1:nsim) return(result) } # # Perfect simulation of hybrid Strauss-Hardcore # provided gamma <= 1 # rStraussHard <- function(beta, gamma=1, R=0, H=0, W=owin(), expand=TRUE, nsim=1, drop=TRUE) { if(!missing(W)) verifyclass(W, "owin") check.1.real(beta) check.1.real(gamma) check.1.real(R) check.1.real(H) check.finite(beta) check.finite(gamma) check.finite(R) check.finite(H) stopifnot(beta > 0) stopifnot(gamma >= 0) if(gamma > 1) stop("Sorry, perfect simulation is only implemented for gamma <= 1") stopifnot(R >= 0) stopifnot(H >= 0) stopifnot(H <= R) runif(1) Wsim <- expandwinPerfect(W, expand, rmhexpand(distance=2*R)) xrange <- Wsim$xrange yrange <- Wsim$yrange result <- vector(mode="list", length=nsim) for(i in 1:nsim) { storage.mode(beta) <- storage.mode(gamma) <- storage.mode(R) <- storage.mode(H) <- "double" storage.mode(xrange) <- storage.mode(yrange) <- "double" z <- .Call("PerfectStraussHard", beta, gamma, R, H, xrange, yrange, PACKAGE = "spatstat") X <- z[[1]] Y <- z[[2]] nout <- z[[3]] if(nout<0) stop("internal error: copying failed in PerfectStraussHard") seqn <- seq_len(nout) P <- ppp(X[seqn], Y[seqn], window=Wsim, check=FALSE) if(attr(Wsim, "changed")) P <- P[W] if(nsim == 1 && drop) return(P) result[[i]] <- P } result <- as.solist(result) names(result) <- paste("Simulation", 1:nsim) return(result) } # # Perfect Simulation of Diggle-Gratton process # rDiggleGratton <- function(beta, delta, rho, kappa=1, W=owin(), expand=TRUE, nsim=1, drop=TRUE) { if(!missing(W)) verifyclass(W, "owin") check.1.real(beta) check.1.real(delta) check.1.real(rho) check.1.real(kappa) check.finite(beta) check.finite(delta) check.finite(rho) check.finite(kappa) stopifnot(beta > 0) stopifnot(delta >= 0) stopifnot(rho >= 0) stopifnot(delta <= rho) stopifnot(kappa >= 0) runif(1) Wsim <- expandwinPerfect(W, expand, rmhexpand(distance=2*rho)) xrange <- Wsim$xrange yrange <- Wsim$yrange result <- vector(mode="list", length=nsim) for(i in 1:nsim) { storage.mode(beta) <- "double" storage.mode(delta) <- storage.mode(rho) <- storage.mode(kappa) <- "double" storage.mode(xrange) <- storage.mode(yrange) <- "double" z <- .Call("PerfectDiggleGratton", beta, delta, rho, kappa, xrange, yrange, PACKAGE = "spatstat") X <- z[[1]] Y <- z[[2]] nout <- z[[3]] if(nout<0) stop("internal error: copying failed in PerfectDiggleGratton") seqn <- seq_len(nout) P <- ppp(X[seqn], Y[seqn], window=Wsim, check=FALSE) if(attr(Wsim, "changed")) P <- P[W] if(nsim == 1 && drop) return(P) result[[i]] <- P } result <- as.solist(result) names(result) <- paste("Simulation", 1:nsim) return(result) } # # Perfect Simulation of Diggle-Gates-Stibbard process # rDGS <- function(beta, rho, W=owin(), expand=TRUE, nsim=1, drop=TRUE) { if(!missing(W)) verifyclass(W, "owin") check.1.real(beta) check.1.real(rho) check.finite(beta) check.finite(rho) stopifnot(beta > 0) stopifnot(rho >= 0) runif(1) Wsim <- expandwinPerfect(W, expand, rmhexpand(distance=2*rho)) xrange <- Wsim$xrange yrange <- Wsim$yrange result <- vector(mode="list", length=nsim) for(i in 1:nsim) { storage.mode(beta) <- "double" storage.mode(rho) <- "double" storage.mode(xrange) <- storage.mode(yrange) <- "double" z <- .Call("PerfectDGS", beta, rho, xrange, yrange, PACKAGE = "spatstat") X <- z[[1]] Y <- z[[2]] nout <- z[[3]] if(nout<0) stop("internal error: copying failed in PerfectDGS") seqn <- seq_len(nout) P <- ppp(X[seqn], Y[seqn], window=Wsim, check=FALSE) if(attr(Wsim, "changed")) P <- P[W] if(nsim == 1 && drop) return(P) result[[i]] <- P } result <- as.solist(result) names(result) <- paste("Simulation", 1:nsim) return(result) } # # Perfect Simulation of Penttinen process # rPenttinen <- function(beta, gamma=1, R, W=owin(), expand=TRUE, nsim=1, drop=TRUE) { if(!missing(W)) verifyclass(W, "owin") check.1.real(beta) check.1.real(gamma) check.1.real(R) check.finite(beta) check.finite(gamma) check.finite(R) stopifnot(beta > 0) stopifnot(gamma >= 0) stopifnot(gamma <= 1) stopifnot(R >= 0) runif(1) Wsim <- expandwinPerfect(W, expand, rmhexpand(distance=2*R)) xrange <- Wsim$xrange yrange <- Wsim$yrange result <- vector(mode="list", length=nsim) for(i in 1:nsim) { storage.mode(beta) <- storage.mode(gamma) <- storage.mode(R) <- "double" storage.mode(xrange) <- storage.mode(yrange) <- "double" z <- .Call("PerfectPenttinen", beta, gamma, R, xrange, yrange, PACKAGE = "spatstat") X <- z[[1]] Y <- z[[2]] nout <- z[[3]] if(nout<0) stop("internal error: copying failed in PerfectPenttinen") seqn <- seq_len(nout) P <- ppp(X[seqn], Y[seqn], window=Wsim, check=FALSE) if(attr(Wsim, "changed")) P <- P[W] if(nsim == 1 && drop) return(P) result[[i]] <- P } result <- as.solist(result) names(result) <- paste("Simulation", 1:nsim) return(result) } ## ....... utilities ................................. expandwinPerfect <- function(W, expand, amount) { ## expand 'W' if expand=TRUE according to default 'amount' ## or expand 'W' using rmhexpand(expand) if(!is.logical(expand)) { amount <- rmhexpand(expand) expand <- TRUE } changed <- FALSE if(expand) { W <- expand.owin(W, amount) changed <- TRUE } if(!is.rectangle(W)) { W <- as.rectangle(W) changed <- TRUE warning(paste("Simulation will be performed in the containing rectangle", "and clipped to the original window."), call.=FALSE) } attr(W, "changed") <- changed return(W) } spatstat/R/harmonic.R0000755000176200001440000000327613115271075014256 0ustar liggesusers# # # harmonic.R # # $Revision: 1.2 $ $Date: 2004/01/07 08:57:39 $ # # harmonic() # Analogue of polynom() for harmonic functions only # # ------------------------------------------------------------------- # harmonic <- function(x,y,n) { if(missing(n)) stop("the order n must be specified") n <- as.integer(n) if(is.na(n) || n <= 0) stop("n must be a positive integer") if(n > 3) stop("Sorry, harmonic() is not implemented for degree > 3") namex <- deparse(substitute(x)) namey <- deparse(substitute(y)) if(!is.name(substitute(x))) namex <- paste("(", namex, ")", sep="") if(!is.name(substitute(y))) namey <- paste("(", namey, ")", sep="") switch(n, { result <- cbind(x, y) names <- c(namex, namey) }, { result <- cbind(x, y, x*y, x^2-y^2) names <- c(namex, namey, paste("(", namex, ".", namey, ")", sep=""), paste("(", namex, "^2-", namey, "^2)", sep="")) }, { result <- cbind(x, y, x * y, x^2-y^2, x^3 - 3 * x * y^2, y^3 - 3 * x^2 * y) names <- c(namex, namey, paste("(", namex, ".", namey, ")", sep=""), paste("(", namex, "^2-", namey, "^2)", sep=""), paste("(", namex, "^3-3", namex, ".", namey, "^2)", sep=""), paste("(", namey, "^3-3", namex, "^2.", namey, ")", sep="") ) } ) dimnames(result) <- list(NULL, names) return(result) } spatstat/R/rshift.R0000755000176200001440000001134513115271120013740 0ustar liggesusers# # rshift.R # # random shift with optional toroidal boundary # # $Revision: 1.17 $ $Date: 2016/02/11 10:17:12 $ # # rshift <- function(X, ...) { UseMethod("rshift") } rshift.splitppp <- function(X, ..., which=seq_along(X)) { verifyclass(X, "splitppp") if("group" %in% names(list(...))) stop(paste("argument", sQuote("group"), "not implemented for splitppp objects")) if(is.null(which)) { iwhich <- which <- seq_along(X) } else { id <- seq_along(X) names(id) <- names(X) iwhich <- id[which] if(length(iwhich) == 0) stop(paste("Argument", sQuote("which"), "did not match any marks")) } # validate arguments and determine common clipping window arglist <- handle.rshift.args(X[[1]]$window, ..., edgedefault="torus") if(!is.null(clip <- arglist$clip)) { # clip the patterns that are not to be shifted if(length(iwhich) < length(X)) X[-iwhich] <- lapply(X[-iwhich], "[.ppp", i=clip) } # perform shift on selected patterns # (setting group = NULL ensures each pattern is not split further) shiftXsub <- do.call(lapply, append(list(X[iwhich], rshift.ppp, group=NULL), arglist)) # put back X[iwhich] <- shiftXsub return(X) } rshift.ppp <- function(X, ..., which=NULL, group) { verifyclass(X, "ppp") # validate arguments and determine common clipping window arglist <- handle.rshift.args(X$window, ..., edgedefault="torus") # default grouping # (NULL is not the default) # (NULL means all points shifted in parallel) if(missing(group)) group <- if(is.multitype(X)) marks(X) else NULL # if no grouping, use of `which' is undefined if(is.null(group) && !is.null(which)) stop(paste("Cannot apply argument", sQuote("which"), "; no grouping defined")) # if grouping, use split if(!is.null(group)) { Y <- split(X, group) split(X, group) <- do.call(rshift.splitppp, append(list(Y, which=which), arglist)) return(X) } # ungrouped point pattern # shift all points in parallel # recover arguments radius <- arglist$radius width <- arglist$width height <- arglist$height edge <- arglist$edge clip <- arglist$clip W <- X$window W <- rescue.rectangle(W) if(W$type != "rectangle" && edge=="torus") stop("Torus (periodic) boundary is only meaningful for rectangular windows") # generate random translation vector if(!is.null(radius)) jump <- runifdisc(1, radius=radius) else { jump <- list(x=runif(1, min=0, max=width), y=runif(1, min=0, max=height)) } # translate points x <- X$x + jump$x y <- X$y + jump$y # wrap points if(edge == "torus") { xr <- W$xrange yr <- W$yrange Wide <- diff(xr) High <- diff(yr) x <- xr[1] + (x - xr[1]) %% Wide y <- yr[1] + (y - yr[1]) %% High } # put back into point pattern X$x <- x X$y <- y # clip to window if(!is.null(clip)) X <- X[clip] return(X) } handle.rshift.args <- function(W, ..., radius=NULL, width=NULL, height=NULL, edge=NULL, clip=NULL, edgedefault) { verifyclass(W, "owin") W <- rescue.rectangle(W) if(length(aargh <- list(...)) > 0) stop(paste("Unrecognised arguments:", paste(names(aargh), collapse=","))) if(!is.null(radius)) { # radial generator if(!(is.null(width) && is.null(height))) stop(paste(sQuote("radius"), "is incompatible with", sQuote("width"), "and", sQuote("height"))) } else { # rectangular generator if(is.null(width) != is.null(height)) stop("Must specify both width and height, if one is specified") if(is.null(width)) width <- diff(W$xrange) if(is.null(height)) height <- diff(W$yrange) } if(is.null(edge)) edge <- edgedefault else if(!(edge %in% c("torus", "erode", "none"))) stop(paste("Unrecognised option erode=", sQuote(edge))) # determine whether clipping window is needed if(is.null(clip)) clip <- switch(edge, torus= NULL, none= W, erode={ if(!is.null(radius)) erosion.owin(W, radius) else if(W$type == "rectangle") trim.rectangle(W, width, height) else erosion.owin(W, max(width, height)) }) return(list(radius=radius, width=width, height=height, edge=edge, clip=clip)) } rtoro <- function(X, which=NULL, radius=NULL, width=NULL, height=NULL) { .Deprecated("rshift", package="spatstat") rshift(X, which=which, radius=radius, width=width, height=height) } spatstat/R/replace.ppp.R0000755000176200001440000000354413115271120014654 0ustar liggesusers# # replace.ppp.R # "[<-.ppp" <- function(x, i, j, value) { verifyclass(x, "ppp") verifyclass(value, "ppp") if(missing(i) && missing(j)) return(value) if(missing(i)) { message("The use of argument j in [<-.ppp is deprecated; use argument i") # invoke code below x[j] <- value return(x) } xmf <- markformat(x) vmf <- markformat(value) if(xmf != vmf) { if(xmf == "none") stop("Replacement points are marked, but x is not marked") else if(vmf == "none") stop("Replacement points have no marks, but x is marked") else stop("Format of marks in replacement is incompatible with original") } if(inherits(i, "owin")) { win <- i vok <- inside.owin(value$x, value$y, win) if(!all(vok)) { warning("Replacement points outside the specified window were deleted") value <- value[vok] } # convert to vector index i <- inside.owin(x$x, x$y, win) } if(!is.vector(i)) stop("Unrecognised format for subset index i") # vector index # determine index subset n <- x$n SUB <- seq_len(n)[i] # anything to replace? if(length(SUB) == 0) return(x) # sanity checks if(anyNA(SUB)) stop("Invalid subset: the resulting subscripts include NAs") # exact replacement of this subset? if(value$n == length(SUB)) { x$x[SUB] <- value$x x$y[SUB] <- value$y switch(xmf, none={}, list=, vector={ x$marks[SUB] <- value$marks }, dataframe={ x$marks[SUB,] <- value$marks }) } else x <- superimpose(x[-SUB], value, W=x$window) if(!missing(j)) { warning("The use of argument j in [<-.ppp is deprecated; use argument i") # invoke code above x[j] <- value } return(x) } spatstat/R/nnfunlpp.R0000644000176200001440000000162113115225157014303 0ustar liggesusers# # nnfunlpp.R # # method for 'nnfun' for class 'lpp' # # $Revision: 1.2 $ $Date: 2016/08/21 04:33:47 $ # nnfun.lpp <- local({ nnfun.lpp <- function(X, ..., k=1) { stopifnot(inherits(X, "lpp")) force(X) force(k) L <- as.linnet(X) f <- function(x, y=NULL, seg=NULL, tp=NULL, ...) { # L is part of the environment Y <- as.lpp(x=x, y=y, seg=seg, tp=tp, L=L) i <- nncross.lpp(Y, X, what="which", k=k) return(i) } f <- linfun(f, L) attr(f, "explain") <- uitleggen return(f) } uitleggen <- function(x, ...) { env <- environment(attr(x, "f")) X <- get("X", envir=env) k <- get("k", envir=env) if(identical(k, 1)) { cat("Nearest-neighbour function for lpp object\n") } else { cat("k-th nearest neighbour function for lpp object\n") cat(paste("k =", commasep(k), "\n")) } print(X) } nnfun.lpp }) spatstat/R/pairsat.family.R0000755000176200001440000002071313115271120015363 0ustar liggesusers# # # pairsat.family.S # # $Revision: 1.44 $ $Date: 2016/02/11 09:36:11 $ # # The saturated pairwise interaction family of point process models # # (an extension of Geyer's saturation process to all pairwise interactions) # # pairsat.family: object of class 'isf' # defining saturated pairwise interaction # # # ------------------------------------------------------------------- # pairsat.family <- list( name = "saturated pairwise", print = function(self) { cat("Saturated pairwise interaction family\n") }, eval = function(X,U,EqualPairs,pairpot,potpars,correction, ..., Reach=NULL, precomputed=NULL, savecomputed=FALSE, halfway=FALSE) { # # This is the eval function for the `pairsat' family. # # This internal function is not meant to be called by the user. # It is called by mpl.prepare() during execution of ppm(). # # The eval functions perform all the manipulations that are common to # a given class of interactions. # # For the `pairsat' family of pairwise-interaction processes, # this eval function computes the distances between points, # invokes 'pairpot' to evaluate the potential between each pair of points, # applies edge corrections, and then sums the pair potential terms # applying the saturation threshold. # # ARGUMENTS: # All 'eval' functions have the following arguments # which are called in sequence (without formal names) # by mpl.prepare(): # # X data point pattern 'ppp' object # U points at which to evaluate potential list(x,y) suffices # EqualPairs two-column matrix of indices i, j such that X[i] == U[j] # (or NULL, meaning all comparisons are FALSE) # pot potential function # potpars auxiliary parameters for pot list(......) # correction edge correction type (string) # # VALUE: # All `eval' functions must return a # matrix of values of the total potential # induced by the pattern X at each location given in U. # The rows of this matrix correspond to the rows of U (the sample points); # the k columns are the coordinates of the k-dimensional potential. # ######################################################################## # # POTENTIAL: # The pair potential function 'pairpot' will be called as # pairpot(M, potpars) where M is a matrix of interpoint distances. # It must return a matrix with the same dimensions as M # or an array with its first two dimensions the same as the dimensions of M. # # NOTE: # Note the Geyer saturation threshold must be given in 'potpars$sat' ########################################################################## # coercion should be unnecessary, but this is useful for debugging X <- as.ppp(X) U <- as.ppp(U, X$window) # i.e. X$window is DEFAULT window # saturation parameter(s) saturate <- potpars$sat # interaction distance of corresponding pairwise interaction PairReach <- if(!is.null(Reach) && is.finite(Reach)) Reach/2 else NULL if(is.null(saturate)) { # pairwise interaction V <- pairwise.family$eval(X, U, EqualPairs, pairpot, potpars, correction, ..., Reach=PairReach, precomputed=precomputed, savecomputed=savecomputed) return(V) } # first ensure all data points are included in the quadrature points nX <- npoints(X) nU <- npoints(U) Xseq <- seq_len(nX) if(length(EqualPairs) == 0) { # no data points currently included missingdata <- rep.int(TRUE, nX) } else { Xused <- EqualPairs[,1] missingdata <- !(Xseq %in% Xused) } somemissing <- any(missingdata) if(somemissing) { # add the missing data points originalrows <- seq_len(nU) nmiss <- sum(missingdata) U <- superimpose(U, X[missingdata], W=X$window, check=FALSE) # correspondingly augment the list of equal pairs newXindex <- Xseq[missingdata] newUindex <- nU + seq_len(nmiss) EqualPairs <- rbind(EqualPairs, cbind(newXindex, newUindex)) nU <- nU + nmiss } # compute the pair potentials POT and the unsaturated potential sums V V <- pairwise.family$eval(X, U, EqualPairs, pairpot, potpars, correction, ..., Reach=PairReach) POT <- attr(V, "POT") computed <- attr(V, "computed") # could be NULL # # V is a matrix with rows = quadrature points, # columns = coordinates of potential # POT is an array with rows = data points # columns = quadrature points # planes = coordinates of potential ################################################################# ################## saturation part ############################## ################################################################# # check dimensions and ensure 'saturate' is a vector ns <- length(saturate) np <- ncol(V) if(ns == 1 && np > 1) saturate <- rep.int(saturate, np) else if(ns != np) stop("Length of vector of saturation parameters is incompatible with the pair potential", call.=FALSE) # replicate as a matrix and as an array saturate2 <- array(saturate[slice.index(V, 2)], dim=dim(V)) saturate3 <- array(saturate[slice.index(POT, 3)], dim=dim(POT)) # # (a) compute SATURATED potential sums V.sat <- pmin(V, saturate2) if(halfway) return(V.sat) # # (b) compute effect of addition/deletion of dummy/data point j # on the UNSATURATED potential sum of each data point i # # Identify data points is.data <- seq_len(npoints(U)) %in% EqualPairs[,2] # logical vector corresp. to rows of V # Extract potential sums for data points only V.data <- V[is.data, , drop=FALSE] # replicate them so that V.dat.rep[i,j,k] = V.data[i, k] V.dat.rep <- aperm(array(V.data, dim=c(dim(V.data), U$n)), c(1,3,2)) # make a logical array col.is.data[i,j,k] = is.data[j] col.is.data <- array(is.data[slice.index(POT, 2)], dim=dim(POT)) # compute value of unsaturated potential sum for each data point i # obtained after addition/deletion of each dummy/data point j if(!(correction %in% c("isotropic", "Ripley"))) { dV <- ifelseNegPos(col.is.data, POT) ## equivalent to ifelse(col.is.data, -POT, POT) } else { ## Weighted potential is not exactly symmetric dV <- POT dV[col.is.data] <- - aperm(POT[ , is.data, , drop=FALSE], c(2,1,3)) } V.after <- V.dat.rep + dV # # # (c) difference of SATURATED potential sums for each data point i # before & after increment/decrement of each dummy/data point j # # saturated values after increment/decrement V.after.sat <- array(pmin.int(saturate3, V.after), dim=dim(V.after)) # saturated values before V.dat.rep.sat <- array(pmin.int(saturate3, V.dat.rep), dim=dim(V.dat.rep)) # difference V.delta <- V.after.sat - V.dat.rep.sat V.delta <- ifelseNegPos(col.is.data, V.delta) # # (d) Sum (c) over all data points i V.delta.sum <- apply(V.delta, c(2,3), sum) # # (e) Result V <- V.sat + V.delta.sum ########################################## # remove rows corresponding to supplementary points if(somemissing) V <- V[originalrows, , drop=FALSE] ### tack on the saved computations from pairwise.family$eval if(savecomputed) attr(V, "computed") <- computed return(V) }, ######### end of function $eval suffstat = function(model, X=NULL, callstring="pairsat.family$suffstat") { # for saturated pairwise models only (possibly nonstationary) verifyclass(model, "ppm") if(!identical(model$interaction$family$name,"saturated pairwise")) stop("Model is not a saturated pairwise interaction process") if(is.null(X)) { X <- data.ppm(model) modelX <- model } else { verifyclass(X, "ppp") modelX <- update(model, X, method="mpl") } # find data points which do not contribute to pseudolikelihood mplsubset <- getglmdata(modelX)$.mpl.SUBSET mpldata <- is.data(quad.ppm(modelX)) contribute <- mplsubset[mpldata] Empty <- X[integer(0)] mom <- partialModelMatrix(X, Empty, model, "suffstat", halfway=TRUE) # halfway=TRUE is passed to pairsat.family$eval # and yields matrix of saturated potential sums # take only those terms that contribute to the pseudolikelihood mom <- mom[contribute, , drop=FALSE] result <- apply(mom, 2, sum) return(result) } ######### end of function $suffstat ) ######### end of list class(pairsat.family) <- "isf" spatstat/R/compareFit.R0000755000176200001440000000503113115271075014536 0ustar liggesusers# # compareFit.R # # $Revision: 1.3 $ $Date: 2015/10/21 09:06:57 $ compareFit <- function(object, Fun, r=NULL, breaks=NULL, ..., trend=~1, interaction=Poisson(), rbord=NULL, modelnames=NULL, same=NULL, different=NULL) { dotargs <- list(...) h <- hyperframe(obj=object, tren=trend, inte=interaction) N <- nrow(h) if(N == 0) stop("No objects specified") # determine rbord for summary statistics if(is.null(rbord) && !is.null(interaction)) rbord <- max(with(h, reach(inte))) h$rbord <- rbord # try to get nice model names if(is.null(modelnames)) { if(inherits(trend, "formula") && is.interact(interaction) && inherits(object, c("anylist", "listof")) && all(nzchar(names(object))) && length(names(object)) == nrow(h)) modelnames <- names(object) else if(inherits(trend, c("anylist", "listof")) && all(nzchar(names(trend))) && length(names(trend)) == nrow(h)) modelnames <- names(trend) else if(inherits(interaction, c("anylist", "listof")) && all(nzchar(names(interaction))) && length(names(interaction)) == nrow(h)) modelnames <- names(interaction) else modelnames <- row.names(h) } row.names(h) <- make.names(modelnames) # fix a common vector of r values if(is.null(r)) { # compute first function fun1 <- with(h[1L,,drop=TRUE,strip=FALSE], do.call(Fun, append(list(object=obj, trend=tren, interaction=inte, rbord=rbord, r=NULL, breaks=breaks), dotargs))) # extract r values r <- with(fun1, .x) } # compute the subsequent functions if(N == 1L) funs2toN <- NULL else funs2toN <- with(h[-1L, , drop=TRUE, strip=FALSE], do.call(Fun, append(list(object=obj, trend=tren, interaction=inte, rbord=rbord, r=r), dotargs))) if(N == 2) funs2toN <- list(funs2toN) # collect all functions in a list funs <- as.anylist(append(list(fun1), funs2toN)) names(funs) <- row.names(h) # collapse together out <- collapse.fv(funs, same=same, different=different) return(out) } spatstat/R/transmat.R0000644000176200001440000000364413115225157014303 0ustar liggesusers## transmat.R ## ## transform matrices between different spatial indexing conventions ## ## $Revision: 1.1 $ $Date: 2015/03/04 07:13:10 $ transmat <- local({ euro <- matrix(c(0,-1,1,0), 2, 2) spat <- matrix(c(0,1,1,0), 2, 2) cart <- diag(c(1,1)) dimnames(euro) <- dimnames(spat) <- dimnames(cart) <- list(c("x","y"), c("i","j")) known <- list(spatstat=spat, cartesian=cart, Cartesian=cart, european=euro, European=euro) cmap <- list(x=c(1,0), y=c(0,1), i=c(1,0), j=c(0,1)) maptocoef <- function(s) { e <- parse(text=s)[[1]] eval(eval(substitute(substitute(f, cmap), list(f=e)))) } as.convention <- function(x) { if(is.character(x) && length(x) == 1) { k <- pmatch(x, names(known)) if(is.na(k)) stop(paste("Unrecognised convention", sQuote(x)), call.=FALSE) return(known[[k]]) } if(is.list(x) && is.character(unlist(x))) { xx <- lapply(x, maptocoef) if(all(c("x", "y") %in% names(xx))) z <- rbind(xx$x, xx$y) else if(all(c("i", "j") %in% names(xx))) z <- cbind(xx$x, xx$y) else stop("entries should be named i,j or x,y", call.=FALSE) dimnames(z) <- list(c("x","y"), c("i","j")) if(!(all(z == 0 | z == 1 | z == -1) && all(rowSums(abs(z)) == 1) && all(colSums(abs(z)) == 1))) stop("Illegal convention", call.=FALSE) return(z) } stop("Unrecognised format for spatial convention", call.=FALSE) } transmat <- function(m, from, to) { m <- as.matrix(m) from <- as.convention(from) to <- as.convention(to) conv <- solve(from) %*% to flip <- apply(conv == -1, 2, any) if(flip[["i"]]) m <- m[nrow(m):1, , drop=FALSE] if(flip[["j"]]) m <- m[ , ncol(m):1, drop=FALSE] if(all(diag(conv) == 0)) m <- t(m) return(m) } transmat }) spatstat/R/distbdry.R0000755000176200001440000001610513115271075014275 0ustar liggesusers# # distbdry.S Distance to boundary # # $Revision: 4.42 $ $Date: 2017/02/01 10:26:09 $ # # -------- functions ---------------------------------------- # # bdist.points() # compute vector of distances # from each point of point pattern # to boundary of window # # bdist.pixels() # compute matrix of distances from each pixel # to boundary of window # # erodemask() erode the window mask by a distance r # [yields a new window] # # # "bdist.points"<- function(X) { verifyclass(X, "ppp") if(X$n == 0) return(numeric(0)) x <- X$x y <- X$y window <- X$window switch(window$type, rectangle = { xmin <- min(window$xrange) xmax <- max(window$xrange) ymin <- min(window$yrange) ymax <- max(window$yrange) result <- pmin.int(x - xmin, xmax - x, y - ymin, ymax - y) }, polygonal = { xy <- cbind(x,y) result <- rep.int(Inf, X$n) bdry <- window$bdry for(i in seq_along(bdry)) { polly <- bdry[[i]] px <- polly$x py <- polly$y nsegs <- length(px) for(j in seq_len(nsegs)) { j1 <- if(j < nsegs) j + 1L else 1L seg <- c(px[j], py[j], px[j1], py[j1]) result <- pmin.int(result, distppl(xy, seg)) } } }, mask = { b <- bdist.pixels(window, style="matrix") loc <- nearest.raster.point(x,y,window) result <- b[cbind(loc$row, loc$col)] }, stop("Unrecognised window type", window$type) ) return(result) } "bdist.pixels" <- function(w, ..., style="image", method=c("C", "interpreted")) { verifyclass(w, "owin") masque <- as.mask(w, ...) switch(w$type, mask = { neg <- complement.owin(masque) m <- exactPdt(neg) b <- pmin.int(m$d,m$b) }, rectangle = { rxy <- rasterxy.mask(masque) x <- rxy$x y <- rxy$y xmin <- w$xrange[1L] xmax <- w$xrange[2L] ymin <- w$yrange[1L] ymax <- w$yrange[2L] b <- pmin.int(x - xmin, xmax - x, y - ymin, ymax - y) }, polygonal = { # set up pixel raster method <- match.arg(method) rxy <- rasterxy.mask(masque) x <- rxy$x y <- rxy$y b <- numeric(length(x)) # test each pixel in/out, analytically inside <- inside.owin(x, y, w) # compute distances for these pixels xy <- cbind(x[inside], y[inside]) switch(method, C = { #' C code ll <- as.data.frame(edges(w)) dxy <- distppllmin(xy, ll)$min.d }, interpreted = { #' ancient R code dxy <- rep.int(Inf, sum(inside)) bdry <- w$bdry for(i in seq_along(bdry)) { polly <- bdry[[i]] nsegs <- length(polly$x) for(j in 1:nsegs) { j1 <- if(j < nsegs) j + 1L else 1L seg <- c(polly$x[j], polly$y[j], polly$x[j1], polly$y[j1]) dxy <- pmin.int(dxy, distppl(xy, seg)) } } }) b[inside] <- dxy }, stop("unrecognised window type", w$type) ) # reshape it b <- matrix(b, nrow=masque$dim[1L], ncol=masque$dim[2L]) switch(style, coords={ # format which can be plotted by image(), persp() etc return(list(x=masque$xcol, y=masque$yrow, z=t(b))) }, matrix={ # return matrix (for internal use by package) return(b) }, image={ bim <- im(b, xcol=masque$xcol, yrow=masque$yrow, unitname=unitname(masque)) return(bim) }, stop(paste("Unrecognised option for style:", style))) } erodemask <- function(w, r, strict=FALSE) { # erode a binary image mask without changing any other entries verifyclass(w, "owin") if(w$type != "mask") stop(paste("window w is not of type", sQuote("mask"))) if(!is.numeric(r) || length(r) != 1L) stop("r must be a single number") if(r < 0) stop("r must be nonnegative") bb <- bdist.pixels(w, style="matrix") if(r > max(bb)) warning("eroded mask is empty") if(identical(strict, TRUE)) w$m <- (bb > r) else w$m <- (bb >= r) return(w) } "Frame<-.owin" <- function(X, value) { stopifnot(is.rectangle(value)) W <- Frame(X) if(!is.subset.owin(W, value)) W <- intersect.owin(W, value) rebound.owin(X, value) } rebound.owin <- function(x, rect) { w <- x verifyclass(rect, "owin") if(is.empty(w)) return(emptywindow(rect)) verifyclass(w, "owin") if(!is.subset.owin(as.rectangle(w), rect)) { bb <- boundingbox(w) if(!is.subset.owin(bb, rect)) stop(paste("The new rectangle", sQuote("rect"), "does not contain the window", sQuote("win"))) } xr <- rect$xrange yr <- rect$yrange switch(w$type, rectangle={ return(owin(xr, yr, poly=list(x=w$xrange[c(1L,2L,2L,1L)], y=w$yrange[c(1L,1L,2L,2L)]), check=FALSE)) }, polygonal={ return(owin(xr, yr, poly=w$bdry, check=FALSE)) }, mask={ newseq <- function(oldseq, newrange, dstep) { oldends <- range(oldseq) nleft <- max(0, floor((oldends[1L] - newrange[1L])/dstep)) nright <- max(0, floor((newrange[2L] - oldends[2L])/dstep)) newstart <- max(oldends[1L] - nleft * dstep, newrange[1L]) newend <- min(oldends[2L] + nright * dstep, newrange[2L]) seq(from=newstart, by=dstep, to=newend) } xcol <- newseq(w$xcol, xr, mean(diff(w$xcol))) yrow <- newseq(w$yrow, yr, mean(diff(w$yrow))) newmask <- as.mask(xy=list(x=xcol, y=yrow)) xx <- rasterx.mask(newmask) yy <- rastery.mask(newmask) newmask$m <- inside.owin(xx, yy, w) return(newmask) } ) } spatstat/R/pcfinhom.R0000755000176200001440000001647413115271120014254 0ustar liggesusers# # pcfinhom.R # # $Revision: 1.21 $ $Date: 2017/06/05 10:31:58 $ # # inhomogeneous pair correlation function of point pattern # # pcfinhom <- function(X, lambda=NULL, ..., r=NULL, kernel="epanechnikov", bw=NULL, stoyan=0.15, correction=c("translate", "Ripley"), divisor=c("r","d"), renormalise=TRUE, normpower=1, update=TRUE, leaveoneout=TRUE, reciplambda=NULL, sigma=NULL, varcov=NULL, close=NULL) { verifyclass(X, "ppp") # r.override <- !is.null(r) miss.update <- missing(update) win <- X$window areaW <- area(win) npts <- npoints(X) kernel <- match.kernel(kernel) correction.given <- !missing(correction) correction <- pickoption("correction", correction, c(isotropic="isotropic", Ripley="isotropic", trans="translate", translate="translate", translation="translate", best="best"), multi=TRUE) correction <- implemented.for.K(correction, win$type, correction.given) divisor <- match.arg(divisor) if(is.null(bw) && kernel=="epanechnikov") { # Stoyan & Stoyan 1995, eq (15.16), page 285 h <- stoyan /sqrt(npts/areaW) hmax <- h # conversion to standard deviation bw <- h/sqrt(5) } else if(is.numeric(bw)) { # standard deviation of kernel specified # upper bound on half-width hmax <- 3 * bw } else { # data-dependent bandwidth selection: guess upper bound on half-width hmax <- 2 * stoyan /sqrt(npts/areaW) } ########## intensity values ######################### dangerous <- c("lambda", "reciplambda") danger <- TRUE if(npts == 0) { lambda <- reciplambda <- numeric(0) danger <- FALSE } else if(missing(lambda) && is.null(reciplambda)) { # No intensity data provided danger <- FALSE # Estimate density by leave-one-out kernel smoothing lambda <- density(X, ..., sigma=sigma, varcov=varcov, at="points", leaveoneout=TRUE) lambda <- as.numeric(lambda) reciplambda <- 1/lambda } else if(!is.null(reciplambda)) { # 1/lambda values provided if(is.im(reciplambda)) reciplambda <- safelookup(reciplambda, X) else if(is.function(reciplambda)) reciplambda <- reciplambda(X$x, X$y) else if(is.numeric(reciplambda) && is.vector(as.numeric(reciplambda))) check.nvector(reciplambda, npts) else stop(paste(sQuote("reciplambda"), "should be a vector, a pixel image, or a function")) } else { # lambda values provided if(is.im(lambda)) lambda <- safelookup(lambda, X) else if(is.ppm(lambda) || is.kppm(lambda) || is.dppm(lambda)) { model <- lambda if(!update) { ## just use intensity of fitted model lambda <- predict(model, locations=X, type="trend") } else { if(is.ppm(model)) { model <- update(model, Q=X) lambda <- fitted(model, dataonly=TRUE, leaveoneout=leaveoneout) } else if(is.kppm(model)) { model <- update(model, X=X) lambda <- fitted(model, dataonly=TRUE, leaveoneout=leaveoneout) } else { model <- update(model, X=X) lambda <- fitted(model, dataonly=TRUE) } danger <- FALSE if(miss.update) warn.once(key="pcfinhom.update", "The behaviour of pcfinhom when lambda is a ppm object", "has changed (in spatstat 1.45-0 and later).", "See help(pcfinhom)") } } else if(is.function(lambda)) lambda <- lambda(X$x, X$y) else if(is.numeric(lambda) && is.vector(as.numeric(lambda))) check.nvector(lambda, npts) else stop(paste(sQuote("lambda"), "should be a vector, a pixel image, a function, or a fitted model")) # evaluate reciprocal reciplambda <- 1/lambda } # renormalise if(renormalise && npts > 0) { check.1.real(normpower) stopifnot(normpower %in% 1:2) renorm.factor <- (areaW/sum(reciplambda))^normpower } ########## r values ############################ # handle arguments r and breaks rmaxdefault <- rmax.rule("K", win, lambda) breaks <- handle.r.b.args(r, NULL, win, rmaxdefault=rmaxdefault) if(!(breaks$even)) stop("r values must be evenly spaced") # extract r values r <- breaks$r rmax <- breaks$max # recommended range of r values for plotting alim <- c(0, min(rmax, rmaxdefault)) ########## smoothing parameters for pcf ############################ # arguments for 'density' denargs <- resolve.defaults(list(kernel=kernel, bw=bw), list(...), list(n=length(r), from=0, to=rmax)) ################################################# # compute pairwise distances if(npts > 1) { if(is.null(close)) { #' find close pairs close <- closepairs(X, rmax+hmax) } else { #' check 'close' has correct format needed <- c("i", "j", "xi", "yi", "xj", "yj", "dx", "dy", "d") if(any(is.na(match(needed, names(close))))) stop(paste("Argument", sQuote("close"), "should have components named", commasep(sQuote(needed))), call.=FALSE) } dIJ <- close$d I <- close$i J <- close$j XI <- ppp(close$xi, close$yi, window=win, check=FALSE) wIJ <- reciplambda[I] * reciplambda[J] } else { undefined <- rep(NaN, length(r)) } # initialise fv object df <- data.frame(r=r, theo=rep.int(1,length(r))) out <- fv(df, "r", quote(g[inhom](r)), "theo", , alim, c("r","{%s[%s]^{pois}}(r)"), c("distance argument r", "theoretical Poisson %s"), fname=c("g", "inhom")) ###### compute ####### if(any(correction=="translate")) { # translation correction if(npts > 1) { XJ <- ppp(close$xj, close$yj, window=win, check=FALSE) edgewt <- edge.Trans(XI, XJ, paired=TRUE) gT <- sewpcf(dIJ, edgewt * wIJ, denargs, areaW, divisor)$g if(renormalise) gT <- gT * renorm.factor } else gT <- undefined out <- bind.fv(out, data.frame(trans=gT), "{hat(%s)[%s]^{Trans}}(r)", "translation-corrected estimate of %s", "trans") } if(any(correction=="isotropic")) { # Ripley isotropic correction if(npts > 1) { edgewt <- edge.Ripley(XI, matrix(dIJ, ncol=1)) gR <- sewpcf(dIJ, edgewt * wIJ, denargs, areaW, divisor)$g if(renormalise) gR <- gR * renorm.factor } else gR <- undefined out <- bind.fv(out, data.frame(iso=gR), "{hat(%s)[%s]^{Ripley}}(r)", "isotropic-corrected estimate of %s", "iso") } # sanity check if(is.null(out)) { warning("Nothing computed - no edge corrections chosen") return(NULL) } # which corrections have been computed? corrxns <- rev(setdiff(names(out), "r")) # default is to display them all formula(out) <- . ~ r fvnames(out, ".") <- corrxns unitname(out) <- unitname(X) if(danger) attr(out, "dangerous") <- dangerous return(out) } spatstat/R/Kinhom.R0000755000176200001440000004177113157174467013722 0ustar liggesusers# # Kinhom.S Estimation of K function for inhomogeneous patterns # # $Revision: 1.93 $ $Date: 2017/07/18 10:14:42 $ # # Kinhom() compute estimate of K_inhom # # # Reference: # Non- and semiparametric estimation of interaction # in inhomogeneous point patterns # A.Baddeley, J.Moller, R.Waagepetersen # Statistica Neerlandica 54 (2000) 329--350. # # -------- functions ---------------------------------------- # Kinhom() compute estimate of K # using various edge corrections # # Kwtsum() internal routine for border correction # # -------- standard arguments ------------------------------ # X point pattern (of class 'ppp') # # r distance values at which to compute K # # lambda vector of intensity values for points of X # # -------- standard output ------------------------------ # A data frame (class "fv") with columns named # # r: same as input # # trans: K function estimated by translation correction # # iso: K function estimated by Ripley isotropic correction # # theo: K function for Poisson ( = pi * r ^2 ) # # border: K function estimated by border method # (denominator = sum of weights of points) # # bord.modif: K function estimated by border method # (denominator = area of eroded window) # # ------------------------------------------------------------------------ "Linhom" <- function(...) { K <- Kinhom(...) L <- eval.fv(sqrt(pmax.int(K,0)/pi)) # relabel the fv object L <- rebadge.fv(L, quote(L[inhom](r)), c("L", "inhom"), names(K), new.labl=attr(K, "labl")) attr(L, "labl") <- attr(K, "labl") attr(L, "dangerous") <- attr(K, "dangerous") # return(L) } "Kinhom"<- function (X, lambda=NULL, ..., r = NULL, breaks = NULL, correction=c("border", "bord.modif", "isotropic", "translate"), renormalise=TRUE, normpower=1, update = TRUE, leaveoneout = TRUE, nlarge = 1000, lambda2=NULL, reciplambda=NULL, reciplambda2=NULL, diagonal=TRUE, sigma=NULL, varcov=NULL, ratio=FALSE) { verifyclass(X, "ppp") nlarge.given <- !missing(nlarge) rfixed <- !missing(r) || !missing(breaks) miss.update <- missing(update) # determine basic parameters W <- X$window npts <- npoints(X) areaW <- area(W) diamW <- diameter(W) rmaxdefault <- rmax.rule("K", W, npts/areaW) breaks <- handle.r.b.args(r, breaks, W, rmaxdefault=rmaxdefault) r <- breaks$r rmax <- breaks$max # match corrections correction.given <- !missing(correction) && !is.null(correction) correction <- pickoption("correction", correction, c(none="none", border="border", "bord.modif"="bord.modif", isotropic="isotropic", Ripley="isotropic", trans="translate", translate="translate", translation="translate", good="good", best="best"), multi=TRUE) # best.wanted <- ("best" %in% correction) ## replace 'good' by the optimal choice for this size of dataset if("good" %in% correction) correction[correction == "good"] <- good.correction.K(X) ## retain only corrections that are implemented for the window correction <- implemented.for.K(correction, W$type, correction.given) ########################################################### # DETERMINE WEIGHTS AND VALIDATE # # The matrix 'lambda2' or 'reciplambda2' is sufficient information # unless we want the border correction. lambda2.given <- !is.null(lambda2) || !is.null(reciplambda2) lambda2.suffices <- !any(correction %in% c("bord", "bord.modif")) ## Arguments that are 'dangerous' for envelope, if fixed dangerous <- c("lambda", "reciplambda", "lambda2", "reciplambda2") danger <- TRUE # Use matrix of weights if it was provided and if it is sufficient if(lambda2.suffices && lambda2.given) { if(!is.null(reciplambda2)) { check.nmatrix(reciplambda2, npts) validate.weights(reciplambda2, recip=TRUE) } else { check.nmatrix(lambda2, npts) validate.weights(lambda2) reciplambda2 <- 1/lambda2 } # renormalise if(renormalise) { check.1.real(normpower) stopifnot(normpower %in% 1:2) rlam2 <- reciplambda2 if(!diagonal) diag(rlam2) <- 0 renorm.factor <- (areaW^2/sum(rlam2))^(normpower/2) } } else { # Vector lambda or reciplambda is required if(missing(lambda) && is.null(reciplambda)) { # No intensity data provided danger <- FALSE # Estimate density by leave-one-out kernel smoothing lambda <- density(X, ..., sigma=sigma, varcov=varcov, at="points", leaveoneout=leaveoneout) lambda <- as.numeric(lambda) validate.weights(lambda, how="density estimation") reciplambda <- 1/lambda } else if(!is.null(reciplambda)) { # 1/lambda values provided if(is.im(reciplambda)) reciplambda <- safelookup(reciplambda, X) else if(is.function(reciplambda)) reciplambda <- reciplambda(X$x, X$y) else if(is.numeric(reciplambda) && is.vector(as.numeric(reciplambda))) check.nvector(reciplambda, npts) else stop(paste(sQuote("reciplambda"), "should be a vector, a pixel image, or a function")) validate.weights(reciplambda, recip=TRUE) } else { # lambda values provided if(is.im(lambda)) lambda <- safelookup(lambda, X) else if(is.ppm(lambda) || is.kppm(lambda) || is.dppm(lambda)) { model <- lambda if(!update) { ## just use intensity of fitted model lambda <- predict(model, locations=X, type="trend") } else { ## re-fit model to data X if(is.ppm(model)) { model <- update(model, Q=X) lambda <- fitted(model, dataonly=TRUE, leaveoneout=leaveoneout) } else if(is.kppm(model)) { model <- update(model, X=X) lambda <- fitted(model, dataonly=TRUE, leaveoneout=leaveoneout) } else { model <- update(model, X=X) lambda <- fitted(model, dataonly=TRUE) } danger <- FALSE if(miss.update) warn.once(key="Kinhom.update", "The behaviour of Kinhom when lambda is a ppm object", "has changed (in spatstat 1.37-0 and later).", "See help(Kinhom)") } } else if(is.function(lambda)) lambda <- lambda(X$x, X$y) else if(is.numeric(lambda) && is.vector(as.numeric(lambda))) check.nvector(lambda, npts) else stop(paste(sQuote("lambda"), "should be a vector, a pixel image, or a function")) validate.weights(lambda) # evaluate reciprocal reciplambda <- 1/lambda } # renormalise if(renormalise) { check.1.real(normpower) stopifnot(normpower %in% 1:2) if(!diagonal && normpower == 2) { renorm.factor <- (areaW^2)/(sum(reciplambda)^2 - sum(reciplambda^2)) } else { renorm.factor <- (areaW/sum(reciplambda))^normpower } } } # recommended range of r values alim <- c(0, min(rmax, rmaxdefault)) ########################################### # Efficient code for border correction and no correction # Usable only if r values are evenly spaced from 0 to rmax # Invoked automatically if number of points is large can.do.fast <- breaks$even && !lambda2.given large.n <- (npts >= nlarge) # demand.best <- correction.given && best.wanted large.n.trigger <- large.n && !correction.given fastcorrections <- c("border", "bord.modif", "none") fastdefault <- "border" correction.fast <- all(correction %in% fastcorrections) will.do.fast <- can.do.fast && (correction.fast || large.n.trigger) asked.fast <- (correction.given && correction.fast) || (nlarge.given && large.n.trigger) if(!can.do.fast && asked.fast) { whynot <- if(!(breaks$even)) "r values not evenly spaced" else if(!missing(lambda)) "matrix lambda2 was given" else NULL warning(paste("cannot use efficient code", whynot, sep="; ")) } if(will.do.fast) { ## Compute Kinhom using fast algorithm(s) ## determine correction(s) ok <- correction %in% fastcorrections correction <- if(any(ok)) correction[ok] else fastdefault bord <- any(correction %in% c("border", "bord.modif")) none <- any(correction =="none") if(!all(ok)) { ## some corrections were overridden; notify user corx <- c(if(bord) "border correction estimate" else NULL, if(none) "uncorrected estimate" else NULL) corx <- paste(corx, collapse=" and ") message(paste("number of data points exceeds", nlarge, "- computing", corx , "only")) } ## restrict r values to recommended range, unless specifically requested if(!rfixed) r <- seq(from=0, to=alim[2], length.out=length(r)) ## border method if(bord) { Kb <- Kborder.engine(X, max(r), length(r), correction, weights=reciplambda, ratio=ratio) if(renormalise) { ynames <- setdiff(fvnames(Kb, "*"), "theo") Kb <- adjust.ratfv(Kb, ynames, denfactor=1/renorm.factor) } Kb <- tweak.ratfv.entry(Kb, "border", new.labl="{hat(%s)[%s]^{bord}} (r)") Kb <- tweak.ratfv.entry(Kb, "bord.modif", new.labl="{hat(%s)[%s]^{bordm}} (r)") } ## uncorrected if(none) { Kn <- Knone.engine(X, max(r), length(r), weights=reciplambda, ratio=ratio) if(renormalise) Kn <- adjust.ratfv(Kn, "un", denfactor=1/renorm.factor) Kn <- tweak.ratfv.entry(Kn, "un", new.labl="{hat(%s)[%s]^{un}} (r)") } K <- if(bord && !none) Kb else if(!bord && none) Kn else if(!ratio) cbind.fv(Kb, Kn[, c("r", "un")]) else bind.ratfv(Kb, Kn[, c("r", "un")], ratio=TRUE) ## tweak labels K <- rebadge.fv(K, quote(K[inhom](r)), c("K", "inhom")) if(danger) attr(K, "dangerous") <- dangerous return(K) } ########################################### # Fast code for rectangular window ########################################### if(can.do.fast && is.rectangle(W) && spatstat.options("use.Krect")) { K <- Krect.engine(X, rmax, length(r), correction, weights=reciplambda, ratio=ratio, fname=c("K", "inhom")) if(renormalise) { allfun <- setdiff(fvnames(K, "*"), "theo") K <- adjust.ratfv(K, allfun, denfactor=1/renorm.factor) } K <- rebadge.fv(K, quote(K[inhom](r)), c("K", "inhom")) attr(K, "alim") <- alim if(danger) attr(K, "dangerous") <- dangerous return(K) } ########################################### # Slower code ########################################### # this will be the output data frame K <- data.frame(r=r, theo= pi * r^2) desc <- c("distance argument r", "theoretical Poisson %s") denom <- if(renormalise) (areaW / renorm.factor) else areaW K <- ratfv(K, NULL, denom, argu="r", ylab=quote(K[inhom](r)), valu="theo", fmla=NULL, alim=alim, labl=c("r","{%s[%s]^{pois}}(r)"), desc=desc, fname=c("K", "inhom"), ratio=ratio) # identify all close pairs rmax <- max(r) what <- if(any(correction == "translate")) "all" else "ijd" close <- closepairs(X, rmax, what=what) dIJ <- close$d # compute weights for these pairs I <- close$i J <- close$j # wI <- reciplambda[I] wIJ <- if(!lambda2.given) reciplambda[I] * reciplambda[J] else reciplambda2[cbind(I,J)] # # compute edge corrected estimates if(any(correction == "border" | correction == "bord.modif")) { # border method # Compute distances to boundary b <- bdist.points(X) bI <- b[I] # apply reduced sample algorithm RS <- Kwtsum(dIJ, bI, wIJ, b, w=reciplambda, breaks) if(any(correction == "border")) { Kb <- RS$ratio if(renormalise) Kb <- Kb * renorm.factor K <- bind.ratfv(K, quotient = data.frame(border=Kb), denominator = denom, labl = "{hat(%s)[%s]^{bord}}(r)", desc = "border-corrected estimate of %s", preferred = "border", ratio=ratio) } if(any(correction == "bord.modif")) { Kbm <- RS$numerator/eroded.areas(W, r) if(renormalise) Kbm <- Kbm * renorm.factor K <- bind.ratfv(K, quotient = data.frame(bord.modif=Kbm), denominator = denom, labl = "{hat(%s)[%s]^{bordm}}(r)", desc = "modified border-corrected estimate of %s", preferred = "bord.modif", ratio=ratio) } } if(any(correction == "translate")) { # translation correction edgewt <- edge.Trans(dx=close$dx, dy=close$dy, W=W, paired=TRUE) allweight <- edgewt * wIJ wh <- whist(dIJ, breaks$val, allweight) Ktrans <- cumsum(wh)/areaW if(renormalise) Ktrans <- Ktrans * renorm.factor rmax <- diamW/2 Ktrans[r >= rmax] <- NA K <- bind.ratfv(K, quotient = data.frame(trans=Ktrans), denominator = denom, labl ="{hat(%s)[%s]^{trans}}(r)", desc = "translation-correction estimate of %s", preferred = "trans", ratio=ratio) } if(any(correction == "isotropic" | correction == "Ripley")) { # Ripley isotropic correction edgewt <- edge.Ripley(X[I], matrix(dIJ, ncol=1)) allweight <- edgewt * wIJ wh <- whist(dIJ, breaks$val, allweight) Kiso <- cumsum(wh)/areaW if(renormalise) Kiso <- Kiso * renorm.factor rmax <- diamW/2 Kiso[r >= rmax] <- NA K <- bind.ratfv(K, quotient = data.frame(iso=Kiso), denominator = denom, labl = "{hat(%s)[%s]^{iso}}(r)", desc = "Ripley isotropic correction estimate of %s", preferred = "iso", ratio=ratio) } # default is to display them all formula(K) <- . ~ r unitname(K) <- unitname(X) if(danger) attr(K, "dangerous") <- dangerous return(K) } Kwtsum <- function(dIJ, bI, wIJ, b, w, breaks) { # # "internal" routine to compute border-correction estimates of Kinhom # # dIJ: vector containing pairwise distances for selected I,J pairs # bI: corresponding vector of boundary distances for I # wIJ: product weight for selected I, J pairs # # b: vector of ALL distances to window boundary # w: weights for ALL points # # breaks : breakpts object # stopifnot(length(dIJ) == length(bI)) stopifnot(length(bI) == length(wIJ)) stopifnot(length(w) == length(b)) if(!is.finite(sum(w, wIJ))) stop("Weights in K-function were infinite or NA") bkval <- breaks$val # determine which distances d_{ij} were observed without censoring uncen <- (dIJ <= bI) # # histogram of noncensored distances nco <- whist(dIJ[uncen], bkval, wIJ[uncen]) # histogram of censoring times for noncensored distances ncc <- whist(bI[uncen], bkval, wIJ[uncen]) # histogram of censoring times (yes, this is a different total size) cen <- whist(b, bkval, w) # total weight of censoring times beyond rightmost breakpoint uppercen <- sum(w[b > breaks$max]) # go RS <- reduced.sample(nco, cen, ncc, show=TRUE, uppercen=uppercen) # extract results numerator <- RS$numerator denominator <- RS$denominator ratio <- RS$numerator/RS$denominator # check if(length(numerator) != breaks$ncells) stop("internal error: length(numerator) != breaks$ncells") if(length(denominator) != breaks$ncells) stop("internal error: length(denom.count) != breaks$ncells") return(list(numerator=numerator, denominator=denominator, ratio=ratio)) } validate.weights <- function(x, recip=FALSE, how = NULL, allowzero = recip, allowinf = !recip) { xname <- deparse(substitute(x)) ra <- range(x) offence <- if(!allowinf && !all(is.finite(ra))) "infinite" else if(ra[1] < 0) "negative" else if(!allowzero && ra[1] == 0) "zero" else NULL if(!is.null(offence)) { offenders <- paste(offence, "values of", sQuote(xname)) if(is.null(how)) stop(paste(offenders, "are not allowed"), call.=FALSE) stop(paste(how, "yielded", offenders), call.=FALSE) } return(TRUE) } spatstat/R/simulatelppm.R0000644000176200001440000000176013115225157015163 0ustar liggesusers## ## simulatelppm.R ## ## Simulation of lppm objects ## ## $Revision: 1.6 $ $Date: 2015/07/11 08:19:26 $ ## simulate.lppm <- function(object, nsim=1, ..., new.coef=NULL, progress=(nsim > 1), drop=FALSE) { starttime <- proc.time() if(!is.poisson(object$fit)) stop("Simulation of non-Poisson models is not yet implemented") lambda <- predict(object, ..., new.coef=new.coef) lmax <- if(is.im(lambda)) max(lambda) else unlist(lapply(lambda, max)) L <- as.linnet(object) result <- vector(mode="list", length=nsim) pstate <- list() for(i in seq_len(nsim)) { if(progress) pstate <- progressreport(i, nsim, state=pstate) result[[i]] <- rpoislpp(lambda, L, lmax=lmax) } if(nsim == 1 && drop) { result <- result[[1]] } else { result <- as.solist(result) if(nsim > 0) names(result) <- paste("Simulation", 1:nsim) } result <- timed(result, starttime=starttime) return(result) } spatstat/R/saturated.R0000755000176200001440000000332213115271120014431 0ustar liggesusers# # # saturated.S # # $Revision: 1.8 $ $Date: 2015/10/21 09:06:57 $ # # Saturated pairwise process with user-supplied potential # # Saturated() create a saturated pairwise process # [an object of class 'interact'] # with user-supplied potential # # # ------------------------------------------------------------------- # Saturated <- function(pot, name) { if(missing(name)) name <- "Saturated process with user-defined potential" fop <- names(formals(pot)) if(!identical(all.equal(fop, c("d", "par")), TRUE) && !identical(all.equal(fop, c("d", "tx", "tu", "par")), TRUE)) stop(paste("Formal arguments of pair potential function", sQuote("pot"), "must be either (d, par) or (d, tx, tu, par)")) out <- list( name = name, creator = "Saturated", family = pairsat.family, pot = pot, par = NULL, parnames = NULL, init = NULL, update = function(self, ...){ do.call(Saturated, resolve.defaults(list(...), list(pot=self$pot, name=self$name))) } , print = function(self) { cat("Potential function:\n") print(self$pot) invisible() }, version=versionstring.spatstat() ) class(out) <- "interact" return(out) } Saturated <- intermaker(Saturated, list(creator="Saturated", name="saturated process with user-defined potential", par=formals(Saturated), parnames=list("the potential", "the name of the interaction"))) spatstat/R/psstA.R0000755000176200001440000001150013115271120013524 0ustar liggesusers# # psstA.R # # Pseudoscore residual for unnormalised F (area-interaction) # # $Revision: 1.7 $ $Date: 2014/11/11 02:31:44 $ # ################################################################################ # psstA <- function(object, r=NULL, breaks=NULL, ..., model=NULL, trend=~1, interaction=Poisson(), rbord=reach(interaction), ppmcorrection="border", correction="all", truecoef=NULL, hi.res=NULL, nr=spatstat.options("psstA.nr"), ngrid=spatstat.options("psstA.ngrid")) { if(inherits(object, "ppm")) fit <- object else if(inherits(object, "ppp") || inherits(object, "quad")) { # convert to quadscheme if(inherits(object, "ppp")) object <- quadscheme(object, ...) # fit model if(!is.null(model)) fit <- update(model, Q=object, forcefit=TRUE) else if(ppmcorrection == "border") fit <- ppm(object, trend=trend, interaction=interaction, rbord=rbord, forcefit=TRUE) else fit <- ppm(object, trend=trend, interaction=interaction, correction=ppmcorrection, forcefit=TRUE) } else stop("object should be a fitted point process model or a point pattern") rfixed <- !is.null(r) || !is.null(breaks) # Extract data and quadrature points Q <- quad.ppm(fit, drop=FALSE) X <- data.ppm(fit) U <- union.quad(Q) Z <- is.data(Q) # indicator data/dummy # E <- equalsfun.quad(Q) # WQ <- w.quad(Q) # quadrature weights # integrals will be restricted to quadrature points # that were actually used in the fit # USED <- getglmsubset(fit) if(fit$correction == "border") { rbord <- fit$rbord b <- bdist.points(U) USED <- (b > rbord) bX <- bdist.points(X) USEDX <- (bX > rbord) } else { USED <- rep.int(TRUE, U$n) USEDX <- rep.int(TRUE, X$n) } # basic statistics Win <- Window(X) npts <- npoints(X) areaW <- area(Win) lambda <- npts/areaW # determine breakpoints for r values rmaxdefault <- rmax.rule("F", Win, lambda) if(rfixed) breaks <- handle.r.b.args(r, breaks, Win, rmaxdefault=rmaxdefault) else { # create fairly coarse 'r' values r <- seq(0, rmaxdefault, length=nr) breaks <- breakpts.from.r(r) } rvals <- breaks$r rmax <- breaks$max # residuals res <- residuals(fit, type="raw", drop=FALSE, new.coef=truecoef, quad=hi.res) # rescts <- with(res, "continuous") # absolute weight for continuous integrals wc <- -rescts # initialise fv object df <- data.frame(r=rvals, theo=0) desc <- c("distance argument r", "value 0 corresponding to perfect fit") ans <- fv(df, "r", substitute(bold(R)~Delta~V[A](r), NULL), "theo", . ~ r, alim=c(0, rmax), c("r","%s[theo](r)"), desc, fname="bold(R)~Delta~V[A]") # # for efficiency, compute the largest value of distance transform Dmax <- 0 for(i in 1:npts) { Di <- distmap(X[-i]) Dimax <- summary(Di)$max Dmax <- max(Dmax, Dimax) } Rmax <- min(max(rvals), Dmax * 1.1) nontrivial <- (rvals <= Rmax) trivialzeroes <- numeric(sum(!nontrivial)) # pseudosum Ax <- areaLoss.grid(X, rvals[nontrivial], subset=USEDX, ngrid=ngrid) C1 <- apply(Ax, 2, sum) C1 <- c(C1, trivialzeroes) # pseudocompensator OK <- USED & !Z Au <- areaGain.grid(U[OK], X, rvals[nontrivial], W=Win, ngrid=ngrid) lamu <- matrix(wc[OK], nrow=nrow(Au), ncol=ncol(Au)) C2 <- apply(lamu * Au, 2, sum) C2 <- c(C2, trivialzeroes) # pseudoscore residual Ctot <- C1 - C2 # tack on ans <- bind.fv(ans, data.frame(dat=C1, com=C2, res=Ctot), c("Sigma~Delta~V[A](r)", "bold(C)~Delta~V[A](r)", "%s(r)"), c("data pseudosum (contribution to %s)", "model pseudocompensator (contribution to %s)", "pseudoscore residual %s"), "res") # # pseudovariance # (skipped if called by envelope() etc) # if(correction == "all") { lamX <- matrix(wc[USED & Z], nrow=nrow(Ax), ncol=ncol(Ax)) Var <- apply(lamu * Au^2, 2, sum) + apply(lamX * Ax^2, 2, sum) Var <- c(Var, trivialzeroes) # two-sigma limits TwoSig <- 2 * sqrt(Var) # tack on ans <- bind.fv(ans, data.frame(var=Var, up=TwoSig, lo=-TwoSig), c("bold(C)^2~Delta~V[A](r)", "%s[up](r)", "%s[lo](r)"), c("pseudovariance of %s", "upper 2sigma critical limit for %s", "lower 2sigma critical limit for %s"), "res") fvnames(ans, ".") <- c("res", "up", "lo", "theo") } unitname(ans) <- unitname(fit) # return(ans) } spatstat/R/linequad.R0000755000176200001440000002253313115271120014244 0ustar liggesusers# # linequad.R # # $Revision: 1.14 $ $Date: 2017/06/05 10:31:58 $ # # create quadscheme for a pattern of points lying *on* line segments linequad <- function(X, Y, ..., eps=NULL, nd=1000, random=FALSE) { epsgiven <- !is.null(eps) if(is.lpp(X)) { # extract local coordinates from lpp object coo <- coords(X) mapXY <- coo$seg tp <- coo$tp Xproj <- as.ppp(X) if(!missing(Y) && !is.null(Y)) warning("Argument Y ignored when X is an lpp object") Y <- as.psp(X) } else if(is.ppp(X)) { # project data points onto segments stopifnot(is.psp(Y)) v <- project2segment(X, Y) Xproj <- v$Xproj mapXY <- v$mapXY tp <- v$tp } else stop("X should be an object of class lpp or ppp") # handle multitype ismulti <- is.multitype(X) if(is.marked(X) && !ismulti) stop("Not implemented for marked patterns") if(ismulti) { marx <- marks(X) flev <- factor(levels(marx)) } # win <- as.owin(Y) len <- lengths.psp(Y) nseg <- length(len) if(is.null(eps)) { stopifnot(is.numeric(nd) && length(nd) == 1L & is.finite(nd) && nd > 0) eps <- sum(len)/nd } else stopifnot(is.numeric(eps) && length(eps) == 1L && is.finite(eps) && eps > 0) ## if(is.lpp(X) && spatstat.options('Clinequad')) { L <- as.linnet(X) W <- Frame(L) V <- vertices(L) nV <- npoints(V) coordsV <- coords(V) coordsX <- coords(X) nX <- npoints(X) ooX <- order(coordsX$seg) ndumeach <- ceiling(len/eps) + 1L ndummax <- sum(ndumeach) maxdataperseg <- max(table(factor(coordsX$seg, levels=1:nsegments(L)))) maxscratch <- max(ndumeach) + maxdataperseg if(!ismulti) { if(!random) { z <- .C("Clinequad", ns = as.integer(nseg), from = as.integer(L$from-1L), to = as.integer(L$to-1L), nv = as.integer(nV), xv = as.double(coordsV$x), yv = as.double(coordsV$y), eps = as.double(eps), ndat = as.integer(nX), sdat = as.integer(coordsX$seg[ooX]-1L), tdat = as.double(coordsX$tp[ooX]), wdat = as.double(numeric(nX)), ndum = as.integer(integer(1L)), xdum = as.double(numeric(ndummax)), ydum = as.double(numeric(ndummax)), sdum = as.integer(integer(ndummax)), tdum = as.double(numeric(ndummax)), wdum = as.double(numeric(ndummax)), maxscratch = as.integer(maxscratch), PACKAGE = "spatstat") } else { z <- .C("ClineRquad", ns = as.integer(nseg), from = as.integer(L$from-1L), to = as.integer(L$to-1L), nv = as.integer(nV), xv = as.double(coordsV$x), yv = as.double(coordsV$y), eps = as.double(eps), ndat = as.integer(nX), sdat = as.integer(coordsX$seg[ooX]-1L), tdat = as.double(coordsX$tp[ooX]), wdat = as.double(numeric(nX)), ndum = as.integer(integer(1L)), xdum = as.double(numeric(ndummax)), ydum = as.double(numeric(ndummax)), sdum = as.integer(integer(ndummax)), tdum = as.double(numeric(ndummax)), wdum = as.double(numeric(ndummax)), maxscratch = as.integer(maxscratch), PACKAGE = "spatstat") } seqdum <- seq_len(z$ndum) dum <- with(z, ppp(xdum[seqdum], ydum[seqdum], window=W, check=FALSE)) wdum <- z$wdum[seqdum] wdat <- numeric(nX) wdat[ooX] <- z$wdat dat <- as.ppp(X) } else { ntypes <- length(flev) ndummax <- ntypes * (ndummax + nX) maxscratch <- ntypes * maxscratch if(!random) { z <- .C("ClineMquad", ns = as.integer(nseg), from = as.integer(L$from-1L), to = as.integer(L$to-1L), nv = as.integer(nV), xv = as.double(coordsV$x), yv = as.double(coordsV$y), eps = as.double(eps), ntypes = as.integer(ntypes), ndat = as.integer(nX), xdat = as.double(coordsX$x), ydat = as.double(coordsX$y), mdat = as.integer(as.integer(marx)-1L), sdat = as.integer(coordsX$seg[ooX]-1L), tdat = as.double(coordsX$tp[ooX]), wdat = as.double(numeric(nX)), ndum = as.integer(integer(1L)), xdum = as.double(numeric(ndummax)), ydum = as.double(numeric(ndummax)), mdum = as.integer(integer(ndummax)), sdum = as.integer(integer(ndummax)), tdum = as.double(numeric(ndummax)), wdum = as.double(numeric(ndummax)), maxscratch = as.integer(maxscratch), PACKAGE = "spatstat") } else { z <- .C("ClineRMquad", ns = as.integer(nseg), from = as.integer(L$from-1L), to = as.integer(L$to-1L), nv = as.integer(nV), xv = as.double(coordsV$x), yv = as.double(coordsV$y), eps = as.double(eps), ntypes = as.integer(ntypes), ndat = as.integer(nX), xdat = as.double(coordsX$x), ydat = as.double(coordsX$y), mdat = as.integer(as.integer(marx)-1L), sdat = as.integer(coordsX$seg[ooX]-1L), tdat = as.double(coordsX$tp[ooX]), wdat = as.double(numeric(nX)), ndum = as.integer(integer(1L)), xdum = as.double(numeric(ndummax)), ydum = as.double(numeric(ndummax)), mdum = as.integer(integer(ndummax)), sdum = as.integer(integer(ndummax)), tdum = as.double(numeric(ndummax)), wdum = as.double(numeric(ndummax)), maxscratch = as.integer(maxscratch), PACKAGE = "spatstat") } seqdum <- seq_len(z$ndum) marques <- factor(z$mdum[seqdum] + 1L, labels=flev) dum <- with(z, ppp(xdum[seqdum], ydum[seqdum], marks=marques, window=W, check=FALSE)) wdum <- z$wdum[seqdum] wdat <- numeric(nX) wdat[ooX] <- z$wdat dat <- as.ppp(X) } } else { ## older, interpreted code ## initialise quad scheme dat <- dum <- ppp(numeric(0), numeric(0), window=win) wdat <- wdum <- numeric(0) if(ismulti) marks(dat) <- marks(dum) <- marx[integer(0)] ## consider each segment in turn YY <- as.data.frame(Y) for(i in 1:nseg) { ## divide segment into pieces of length eps ## with shorter bits at each end leni <- len[i] nwhole <- floor(leni/eps) if(leni/eps - nwhole < 0.5 && nwhole > 2) nwhole <- nwhole - 1 rump <- (leni - nwhole * eps)/2 brks <- c(0, rump + (0:nwhole) * eps, leni) nbrks <- length(brks) ## dummy points at middle of each piece sdum <- (brks[-1L] + brks[-nbrks])/2 x <- with(YY, x0[i] + (sdum/leni) * (x1[i]-x0[i])) y <- with(YY, y0[i] + (sdum/leni) * (y1[i]-y0[i])) newdum <- list(x=x, y=y) ndum <- length(sdum) IDdum <- 1:ndum ## relevant data points relevant <- (mapXY == i) newdat <- Xproj[relevant] sdat <- leni * tp[relevant] IDdat <- findInterval(sdat, brks, rightmost.closed=TRUE, all.inside=TRUE) ## determine weights w <- countingweights(id=c(IDdum, IDdat), areas=diff(brks)) wnewdum <- w[1:ndum] wnewdat <- w[-(1:ndum)] ## if(!ismulti) { ## unmarked pattern dat <- superimpose(dat, newdat, W=win, check=FALSE) dum <- superimpose(dum, newdum, W=win, check=FALSE) wdat <- c(wdat, wnewdat) wdum <- c(wdum, wnewdum) } else { ## marked point pattern ## attach correct marks to data points marks(newdat) <- marx[relevant] dat <- superimpose(dat, newdat, W=win, check=FALSE) wdat <- c(wdat, wnewdat) newdum <- as.ppp(newdum, W=win, check=FALSE) ## replicate dummy points with each mark ## also add points at data locations with other marks for(k in seq_len(length(flev))) { le <- flev[k] avoid <- (marks(newdat) != le) dum <- superimpose(dum, newdum %mark% le, newdat[avoid] %mark% le, W=win, check=FALSE) wdum <- c(wdum, wnewdum, wnewdat[avoid]) } } } } ## save parameters dmethod <- paste("Equally spaced along each segment at spacing eps =", signif(eps, 4), summary(unitname(X))$plural) if(!epsgiven) dmethod <- paste0(dmethod, "\nOriginal parameter nd = ", nd) wmethod <- "Counting weights based on segment length" param <- list(dummy = list(method=dmethod), weight = list(method=wmethod)) ## make quad scheme Qout <- quad(dat, dum, c(wdat, wdum), param=param) ## silently attach lines attr(Qout, "lines") <- Y return(Qout) } spatstat/R/logistic.R0000644000176200001440000003350213164653006014265 0ustar liggesusers# # logistic.R # # $Revision: 1.24 $ $Date: 2017/10/03 09:15:23 $ # # Logistic likelihood method - under development # logi.engine <- function(Q, trend = ~1, interaction, ..., covariates=NULL, subsetexpr=NULL, clipwin=NULL, correction="border", rbord=reach(interaction), covfunargs=list(), allcovar=FALSE, vnamebase=c("Interaction", "Interact."), vnameprefix=NULL, justQ = FALSE, savecomputed = FALSE, precomputed = NULL, VB=FALSE ){ if(is.null(trend)) trend <- ~1 if(is.null(interaction)) interaction <- Poisson() want.trend <- !identical.formulae(trend, ~1) want.inter <- !is.poisson(interaction) want.subset <- !is.null(subsetexpr) # validate choice of edge correction correction <- pickoption("correction", correction, c(border="border", periodic="periodic", isotropic="isotropic", Ripley="isotropic", trans="translate", translate="translate", translation="translate", none="none")) # rbord applies only to border correction if(correction == "border") { check.1.real(rbord, "In ppm") explain.ifnot(rbord >= 0, "In ppm") } else rbord <- 0 # backdoor stuff if(!missing(vnamebase)) { if(length(vnamebase) == 1) vnamebase <- rep.int(vnamebase, 2) if(!is.character(vnamebase) || length(vnamebase) != 2) stop("Internal error: illegal format of vnamebase") } if(!is.null(vnameprefix)) { if(!is.character(vnameprefix) || length(vnameprefix) != 1) stop("Internal error: illegal format of vnameprefix") } # create dummy points if(inherits(Q, "ppp")){ Xplus <- Q Q <- quadscheme.logi(Xplus, ...) D <- Q$dummy Dinfo <- Q$param } else if(checkfields(Q, c("data", "dummy"))) { Xplus <- Q$data D <- Q$dummy Dinfo <- Q$param if(is.null(Dinfo)){ Dinfo <- list(how="given", rho=npoints(D)/(area(D)*markspace.integral(D))) } Q <- quadscheme.logi(Xplus, D) } else stop("Format of object Q is not understood") ## clip to subset? if(!is.null(clipwin)) { if(is.data.frame(covariates)) { ok <- inside.owin(union.quad(Q), w=clipwin) covariates <- covariates[ok, , drop=FALSE] } Q <- Q[clipwin] Xplus <- Q$data D <- Q$dummy } if (justQ) return(Q) ### Dirty way of recording arguments so that the model can be refitted later (should probably be done using call, eval, envir, etc.): extraargs <- list(covfunargs = covfunargs, allcovar = allcovar, vnamebase = vnamebase, vnameprefix = vnameprefix) extraargs <- append(extraargs, list(...)) ## Dummy intensity if(correction == "border" && Dinfo$how=="grid"){ Dbord <- D[bdist.points(D)>=rbord] Dinfo$rho <- npoints(Dbord)/(eroded.areas(as.owin(Dbord), rbord)*markspace.integral(Dbord)) } rho <- Dinfo$rho ##Setting the B from Barker dynamics (relative to dummy intensity) B <- list(...)$Barker if(is.null(B)) B <- 1 B <- B*rho Dinfo <- append(Dinfo, list(B=B)) Dinfo <- append(Dinfo, list(extraargs=extraargs)) # Wplus <- as.owin(Xplus) nXplus <- npoints(Xplus) U <- superimpose(Xplus, D, W=Wplus, check=FALSE) # E <- equalpairs(U, Xplus, marked = is.marked(Xplus)) E <- cbind(1:nXplus, 1:nXplus) # computed <- if (savecomputed) list(X = Xplus, Q = Q, U = U) else list() # assemble covariate data frame if(want.trend || want.subset) { tvars <- variablesinformula(trend) if(want.subset) tvars <- union(tvars, all.vars(subsetexpr)) if(!is.data.frame(covariates)) { ## resolve 'external' covariates externalvars <- setdiff(tvars, c("x", "y", "marks")) tenv <- environment(trend) covariates <- getdataobjects(externalvars, tenv, covariates, fatal=TRUE) } wantxy <- c("x", "y") %in% tvars wantxy <- wantxy | rep.int(allcovar, 2) cvdf <- data.frame(x=U$x, y=U$y)[, wantxy, drop=FALSE] if(!is.null(covariates)) { df <- mpl.get.covariates(covariates, U, "quadrature points", covfunargs) cvdf <- cbind(cvdf, df) } wantmarks <- "marks" %in% tvars if(wantmarks) cvdf <- cbind(cvdf, marks = marks(U)) } else cvdf <- NULL # evaluate interaction sufficient statistics if (!is.null(ss <- interaction$selfstart)) interaction <- ss(Xplus, interaction) V <- evalInteraction(Xplus, U, E, interaction, correction, precomputed = precomputed, savecomputed = savecomputed) if(!is.matrix(V)) stop("evalInteraction did not return a matrix") if (savecomputed) computed <- append(computed, attr(V, "computed")) IsOffset <- attr(V, "IsOffset") if(is.null(IsOffset)) IsOffset <- rep.int(FALSE, ncol(V)) # determine names if(ncol(V) > 0) { Vnames <- colnames(V) if(is.null(Vnames)) { nc <- ncol(V) Vnames <- if(nc == 1) vnamebase[1L] else paste(vnamebase[2L], 1:nc, sep="") colnames(V) <- Vnames } else if(!is.null(vnameprefix)) { Vnames <- paste(vnameprefix, Vnames, sep="") colnames(V) <- Vnames } } else Vnames <- character(0) # combine all data glmdata <- as.data.frame(V) if(!is.null(cvdf)) glmdata <- cbind(glmdata, cvdf) # construct response and weights ok <- if(correction == "border") (bdist.points(U) >= rbord) else rep.int(TRUE, npoints(U)) # Keep only those quadrature points for which the # conditional intensity is nonzero. KEEP <- if(ncol(V)>0) matrowall(V != -Inf) else rep.int(TRUE, npoints(U)) ok <- ok & KEEP wei <- c(rep.int(1,npoints(Xplus)),rep.int(B/rho,npoints(D))) resp <- c(rep.int(1,npoints(Xplus)),rep.int(0,npoints(D))) ## User-defined subset: if(!is.null(subsetexpr)) { USERSUBSET <- eval(subsetexpr, glmdata, environment(trend)) ok <- ok & USERSUBSET } # add offset, subset and weights to data frame # using reserved names beginning with ".logi." glmdata <- cbind(glmdata, .logi.Y = resp, .logi.B = B, .logi.w = wei, .logi.ok =ok) # build glm formula # (reserved names begin with ".logi.") trendpart <- paste(as.character(trend), collapse=" ") fmla <- paste(".logi.Y ", trendpart) # Interaction terms if(want.inter) { VN <- Vnames # enclose offset potentials in 'offset(.)' if(any(IsOffset)) VN[IsOffset] <- paste("offset(", VN[IsOffset], ")", sep="") fmla <- paste(c(fmla, VN), collapse="+") } # add offset intrinsic to logistic technique fmla <- paste(fmla, "offset(-log(.logi.B))", sep="+") fmla <- as.formula(fmla) # to satisfy package checker: .logi.B <- B .logi.w <- wei .logi.ok <- ok .logi.Y <- resp # suppress warnings from code checkers dont.complain.about(.logi.B, .logi.w, .logi.ok, .logi.Y) # go ##fit <- glm(fmla, data=glmdata, ## family=binomial(), subset = .logi.ok, weights = .logi.w) fit <- if(VB) vblogit.fmla(fmla, data = glmdata, subset = .logi.ok, weights = .logi.w, ...) else glm(fmla, data = glmdata, family = binomial(), subset = .logi.ok, weights = .logi.w) environment(fit$terms) <- sys.frame(sys.nframe()) ## Fitted coeffs co <- coef(fit) fitin <- fii(interaction, co, Vnames, IsOffset) ## Saturated log-likelihood: satlogpl <- sum(ok*resp*log(B)) ## Max. value of log-likelihood: maxlogpl <- logLik(fit) + satlogpl # Stamp with spatstat version number spv <- package_version(versionstring.spatstat()) the.version <- list(major=spv$major, minor=spv$minor, release=spv$patchlevel, date="$Date: 2017/10/03 09:15:23 $") ## Compile results fit <- list(method = "logi", fitter = "glm", projected = FALSE, coef = co, trend = trend, interaction = interaction, Q = Q, correction = correction, rbord = rbord, terms = terms(trend), version = the.version, fitin = fitin, maxlogpl = maxlogpl, satlogpl = satlogpl, covariates = mpl.usable(covariates), # varcov = if(VB) fit$S else NULL, internal = list(Vnames = Vnames, IsOffset=IsOffset, glmdata = glmdata, glmfit = fit, logistic = Dinfo, computed = computed, vnamebase=vnamebase, vnameprefix=vnameprefix, VB = if(VB) TRUE else NULL, priors = if(VB) fit$priors else NULL ) ) class(fit) <- "ppm" return(fit) } forbid.logi <- function(object) { if(object$method == "logi") stop("Sorry, this is not implemented for method=\'logi\'") return(invisible(NULL)) } logi.dummy <- function(X, dummytype = "stratrand", nd = NULL, mark.repeat = FALSE, ...){ ## Resolving nd inspired by default.n.tiling if(is.null(nd)){ nd <- spatstat.options("ndummy.min") if(inherits(X, "ppp")) nd <- pmax(nd, 10 * ceiling(2 * sqrt(X$n)/10)) } nd <- ensure2vector(nd) marx <- is.multitype(X) if(marx) lev <- levels(marks(X)) if(marx && mark.repeat){ N <- length(lev) Dlist <- inDlist <- vector("list", N) } else{ N <- 1 } W <- as.owin(X) type <- match.arg(dummytype, c("stratrand", "binomial", "poisson", "grid", "transgrid")) B <- boundingbox(W) rho <- nd[1L]*nd[2L]/area(B) Dinfo <- list(nd=nd, rho=rho, how=type) ## Repeating dummy process for each mark type 1:N (only once if unmarked or mark.repeat = FALSE) for(i in 1:N){ switch(type, stratrand={ D <- as.ppp(stratrand(B, nd[1L], nd[2L]), W = B) inD <- which(inside.owin(D, w = W)) D <- D[W] inD <- paste(i,inD,sep="_") }, binomial={ D <- runifpoint(nd[1L]*nd[2L], win=B) D <- D[W] }, poisson={ D <- rpoispp(rho, win = W) }, grid={ D <- as.ppp(gridcenters(B, nd[1L], nd[2L]), W = B) inD <- which(inside.owin(D, w = W)) D <- D[W] inD <- paste(i,inD,sep="_") }, transgrid={ D <- as.ppp(gridcenters(B, nd[1L], nd[2L]), W = B) dxy <- c(diff(D$window$xrange),diff(D$window$yrange))/(2*nd) coords(D) <- coords(D)+matrix(runif(2,-dxy,dxy),npoints(D),2,byrow=TRUE) inD <- which(inside.owin(D, w = W)) D <- D[W] inD <- paste(i,inD,sep="_") }, stop("unknown dummy type")) if(marx && mark.repeat){ marks(D) <- factor(lev[i], levels = lev) Dlist[[i]] <- D if(type %in% c("stratrand","grid","transgrid")) inDlist[[i]] <- inD } } if(marx && mark.repeat){ inD <- Reduce(append, inDlist) D <- Reduce(superimpose, Dlist) } if(type %in% c("stratrand","grid","transgrid")) Dinfo <- append(Dinfo, list(inD=inD)) if(marx && !mark.repeat){ marks(D) <- sample(factor(lev, levels=lev), npoints(D), replace = TRUE) Dinfo$rho <- Dinfo$rho/length(lev) } attr(D, "dummy.parameters") <- Dinfo return(D) } quadscheme.logi <- function(data, dummy, dummytype = "stratrand", nd = NULL, mark.repeat = FALSE, ...){ data <- as.ppp(data) ## If dummy is missing we generate dummy pattern with logi.dummy. if(missing(dummy)) dummy <- logi.dummy(data, dummytype, nd, mark.repeat, ...) Dinfo <- attr(dummy, "dummy.parameters") D <- as.ppp(dummy) if(is.null(Dinfo)) Dinfo <- list(how="given", rho=npoints(D)/(area(D)*markspace.integral(D))) ## Weights: n <- npoints(data)+npoints(D) w <- area(Window(data))/n Q <- quad(data, D, rep(w,n), param=Dinfo) class(Q) <- c("logiquad", class(Q)) return(Q) } summary.logiquad <- function(object, ..., checkdup=FALSE) { verifyclass(object, "logiquad") s <- list( data = summary.ppp(object$data, checkdup=checkdup), dummy = summary.ppp(object$dummy, checkdup=checkdup), param = object$param) class(s) <- "summary.logiquad" return(s) } print.summary.logiquad <- function(x, ..., dp=3) { cat("Quadrature scheme = data + dummy\n") Dinfo <- x$param if(is.null(Dinfo)) cat("created by an unknown function.\n") cat("Data pattern:\n") print(x$data, dp=dp) cat("\n\nDummy pattern:\n") # How they were computed switch(Dinfo$how, stratrand={ cat(paste("(Stratified random dummy points,", paste(Dinfo$nd, collapse=" x "), "grid of cells)\n")) }, binomial={ cat("(Binomial dummy points)\n") }, poisson={ cat("(Poisson dummy points)\n") }, grid={ cat(paste("(Fixed grid of dummy points,", paste(Dinfo$nd, collapse=" x "), "grid)\n")) }, transgrid={ cat(paste("(Random translation of fixed grid of dummy points,", paste(Dinfo$nd, collapse=" x "), "grid)\n")) }, given=cat("(Dummy points given by user)\n") ) # Description of them print(x$dummy, dp=dp) return(invisible(NULL)) } spatstat/R/ordthresh.R0000755000176200001440000000325713115271120014446 0ustar liggesusers# # # ordthresh.S # # $Revision: 1.11 $ $Date: 2015/10/21 09:06:57 $ # # Ord process with threshold potential # # OrdThresh() create an instance of the Ord process # [an object of class 'interact'] # with threshold potential # # # ------------------------------------------------------------------- # OrdThresh <- local({ BlankOrdThresh <- list( name = "Ord process with threshold potential", creator = "OrdThresh", family = "ord.family", pot = function(d, par) { (d <= par$r) }, par = list(r = NULL), parnames = "threshold distance", init = function(self) { r <- self$par$r if(!is.numeric(r) || length(r) != 1 || r <= 0) stop("threshold distance r must be a positive number") }, update = NULL, # default OK print = NULL, # default OK interpret = function(coeffs, self) { loggamma <- as.numeric(coeffs[1]) gamma <- exp(loggamma) return(list(param=list(gamma=gamma), inames="interaction parameter gamma", printable=dround(gamma))) }, valid = function(coeffs, self) { loggamma <- as.numeric(coeffs[1]) is.finite(loggamma) }, project = function(coeffs, self) { if((self$valid)(coeffs, self)) return(NULL) else return(Poisson()) }, irange = function(...) { return(Inf) }, version=NULL ) class(BlankOrdThresh) <- "interact" OrdThresh <- function(r) { instantiate.interact(BlankOrdThresh, list(r=r)) } OrdThresh <- intermaker(OrdThresh, BlankOrdThresh) OrdThresh }) spatstat/R/fgk3.R0000755000176200001440000003757613115271075013322 0ustar liggesusers# # $Revision: 1.26 $ $Date: 2017/06/05 10:31:58 $ # # Estimates of F, G and K for three-dimensional point patterns # # # ............ user interface ............................. # K3est <- function(X, ..., rmax=NULL, nrval=128, correction=c("translation", "isotropic"), ratio=FALSE) { stopifnot(inherits(X, "pp3")) correction <- pickoption("correction", correction, c(translation="translation", trans="translation", isotropic="isotropic", iso="isotropic", best="isotropic"), multi=TRUE) trap.extra.arguments(..., .Context="In K3est") B <- X$domain if(is.null(rmax)) rmax <- diameter(B)/2 r <- seq(from=0, to=rmax, length.out=nrval) np <- npoints(X) denom <- np * (np-1)/volume(B) # this will be the output data frame K <- data.frame(r=r, theo= (4/3) * pi * r^3) desc <- c("distance argument r", "theoretical Poisson %s") K <- ratfv(K, NULL, denom, "r", quote(K[3](r)), "theo", NULL, c(0,rmax/2), c("r","{%s[%s]^{pois}}(r)"), desc, fname=c("K", "3"), ratio=ratio) # extract the x,y,z ranges as a vector of length 6 flatbox <- unlist(B[1:3]) # extract coordinates coo <- coords(X) if(any(correction %in% "translation")) { u <- k3engine(coo$x, coo$y, coo$z, flatbox, rmax=rmax, nrval=nrval, correction="translation") K <- bind.ratfv(K, data.frame(trans=u$num), u$denom, "{hat(%s)[%s]^{trans}}(r)", "translation-corrected estimate of %s", "trans", ratio=ratio) } if(any(correction %in% "isotropic")) { u <- k3engine(coo$x, coo$y, coo$z, flatbox, rmax=rmax, nrval=nrval, correction="isotropic") K <- bind.ratfv(K, data.frame(iso=u$num), u$denom, "{hat(%s)[%s]^{iso}}(r)", "isotropic-corrected estimate of %s", "iso", ratio=ratio) } # default is to display them all formula(K) <- . ~ r unitname(K) <- unitname(X) return(K) } G3est <- function(X, ..., rmax=NULL, nrval=128, correction=c("rs", "km", "Hanisch")) { stopifnot(inherits(X, "pp3")) correction <- pickoption("correction", correction, c(rs="rs", border="rs", km="km", KM="km", Hanisch="han", hanisch="han", best="km"), multi=TRUE) trap.extra.arguments(..., .Context="In G3est") B <- X$domain if(is.null(rmax)) rmax <- diameter(B)/2 r <- seq(from=0, to=rmax, length.out=nrval) coo <- coords(X) lambda <- nrow(coo)/volume(B) # this will be the output data frame G <- data.frame(r=r, theo= 1 - exp( - lambda * (4/3) * pi * r^3)) desc <- c("distance argument r", "theoretical Poisson %s") G <- fv(G, "r", substitute(G3(r), NULL), "theo", , c(0,rmax/2), c("r","%s[pois](r)"), desc, fname="G3") # extract the x,y,z ranges as a vector of length 6 flatbox <- unlist(B[1:3]) # collect four histograms for censored data u <- g3Cengine(coo$x, coo$y, coo$z, flatbox, rmax=rmax, nrval=nrval) if("rs" %in% correction) G <- bind.fv(G, data.frame(rs=u$rs), "%s[rs](r)", "reduced sample estimate of %s", "rs") if("km" %in% correction) G <- bind.fv(G, data.frame(km=u$km), "%s[km](r)", "Kaplan-Meier estimate of %s", "km") if("han" %in% correction) G <- bind.fv(G, data.frame(han=u$han), "%s[han](r)", "Normalised Hanisch estimate of %s", "han") # default is to display them all formula(G) <- . ~ r unitname(G) <- unitname(X) return(G) } F3est <- function(X, ..., rmax=NULL, nrval=128, vside=NULL, correction=c("rs", "km", "cs"), sphere=c("fudge", "ideal", "digital")) { stopifnot(inherits(X, "pp3")) sphere <- match.arg(sphere) correction <- pickoption("correction", correction, c(rs="rs", border="rs", km="km", KM="km", Kaplan="km", cs="cs", CS="cs", best="km"), multi=TRUE) trap.extra.arguments(..., .Context="In F3est") B <- X$domain if(is.null(rmax)) rmax <- diameter(B)/2 r <- seq(from=0, to=rmax, length.out=nrval) coo <- coords(X) vol <- volume(B) lambda <- nrow(coo)/vol # determine voxel size if(missing(vside)) { voxvol <- vol/spatstat.options("nvoxel") vside <- voxvol^(1/3) # ensure the shortest side is a whole number of voxels s <- shortside(B) m <- ceiling(s/vside) vside <- s/m } # compute theoretical value switch(sphere, ideal = { volsph <- (4/3) * pi * r^3 spherename <- "ideal sphere" }, fudge = { volsph <- 0.78 * (4/3) * pi * r^3 spherename <- "approximate sphere" }, digital = { volsph <- digital.volume(c(0, rmax), nrval, vside) spherename <- "digital sphere" }) theo.desc <- paste("theoretical Poisson %s using", spherename) # this will be the output data frame FF <- data.frame(r = r, theo = 1 - exp( - lambda * volsph)) desc <- c("distance argument r", theo.desc) labl <- c("r","%s[pois](r)") FF <- fv(FF, "r", substitute(F3(r), NULL), "theo", , c(0,rmax/2), labl, desc, fname="F3") # extract the x,y,z ranges as a vector of length 6 flatbox <- unlist(B[1:3]) # go u <- f3Cengine(coo$x, coo$y, coo$z, flatbox, rmax=rmax, nrval=nrval, vside=vside) if("rs" %in% correction) FF <- bind.fv(FF, data.frame(rs=u$rs), "%s[rs](r)", "reduced sample estimate of %s", "rs") if("km" %in% correction) FF <- bind.fv(FF, data.frame(km=u$km), "%s[km](r)", "Kaplan-Meier estimate of %s", "km") if("cs" %in% correction) FF <- bind.fv(FF, data.frame(cs=u$cs), "%s[cs](r)", "Chiu-Stoyan estimate of %s", "cs") # default is to display them all formula(FF) <- . ~ r unitname(FF) <- unitname(X) return(FF) } pcf3est <- function(X, ..., rmax=NULL, nrval=128, correction=c("translation", "isotropic"), delta=NULL, adjust=1, biascorrect=TRUE) { stopifnot(inherits(X, "pp3")) correction <- pickoption("correction", correction, c(translation="translation", trans="translation", isotropic="isotropic", iso="isotropic", best="isotropic"), multi=TRUE) trap.extra.arguments(..., .Context="In pcf3est") B <- X$domain if(is.null(rmax)) rmax <- diameter(B)/2 r <- seq(from=0, to=rmax, length.out=nrval) if(is.null(delta)) { lambda <- npoints(X)/volume(B) delta <- adjust * 0.26/lambda^(1/3) } if(biascorrect) { # bias correction rondel <- r/delta biasbit <- ifelseAX(rondel > 1, 1, (3/4)*(rondel + 2/3 - (1/3)*rondel^3)) } # this will be the output data frame g <- data.frame(r=r, theo=rep.int(1, length(r))) desc <- c("distance argument r", "theoretical Poisson %s") g <- fv(g, "r", quote(g[3](r)), "theo", , c(0,rmax/2), c("r", "{%s[%s]^{pois}}(r)"), desc, fname=c("g", "3")) # extract the x,y,z ranges as a vector of length 6 flatbox <- unlist(B[1:3]) # extract coordinates coo <- coords(X) if(any(correction %in% "translation")) { u <- pcf3engine(coo$x, coo$y, coo$z, flatbox, rmax=rmax, nrval=nrval, correction="translation", delta=delta) gt <- u$f if(biascorrect) gt <- gt/biasbit g <- bind.fv(g, data.frame(trans=gt), "{hat(%s)[%s]^{trans}}(r)", "translation-corrected estimate of %s", "trans") } if(any(correction %in% "isotropic")) { u <- pcf3engine(coo$x, coo$y, coo$z, flatbox, rmax=rmax, nrval=nrval, correction="isotropic", delta=delta) gi <- u$f if(biascorrect) gi <- gi/biasbit g <- bind.fv(g, data.frame(iso=gi), "{hat(%s)[%s]^{iso}}(r)", "isotropic-corrected estimate of %s", "iso") } # default is to display them all formula(g) <- . ~ r unitname(g) <- unitname(X) attr(g, "delta") <- delta return(g) } # ............ low level code .............................. # k3engine <- function(x, y, z, box=c(0,1,0,1,0,1), rmax=1, nrval=100, correction="translation") { code <- switch(correction, translation=0, isotropic=1) res <- .C("RcallK3", as.double(x), as.double(y), as.double(z), as.integer(length(x)), as.double(box[1L]), as.double(box[2L]), as.double(box[3L]), as.double(box[4L]), as.double(box[5L]), as.double(box[6L]), as.double(0), as.double(rmax), as.integer(nrval), f = as.double(numeric(nrval)), num = as.double(numeric(nrval)), denom = as.double(numeric(nrval)), as.integer(code), PACKAGE = "spatstat") return(list(range = c(0,rmax), f = res$f, num=res$num, denom=res$denom, correction=correction)) } # # # g3engine <- function(x, y, z, box=c(0,1,0,1,0,1), rmax=1, nrval=10, correction="Hanisch G3") { code <- switch(correction, "minus sampling"=1, "Hanisch G3"=3) res <- .C("RcallG3", as.double(x), as.double(y), as.double(z), as.integer(length(x)), as.double(box[1L]), as.double(box[2L]), as.double(box[3L]), as.double(box[4L]), as.double(box[5L]), as.double(box[6L]), as.double(0), as.double(rmax), as.integer(nrval), f = as.double(numeric(nrval)), num = as.double(numeric(nrval)), denom = as.double(numeric(nrval)), as.integer(code), PACKAGE = "spatstat") return(list(range = range, f = res$f, num=res$num, denom=res$denom, correction=correction)) } # # f3engine <- function(x, y, z, box=c(0,1,0,1,0,1), vside=0.05, range=c(0,1.414), nval=25, correction="minus sampling") { # code <- switch(correction, "minus sampling"=1, no=0) res <- .C("RcallF3", as.double(x), as.double(y), as.double(z), as.integer(length(x)), as.double(box[1L]), as.double(box[2L]), as.double(box[3L]), as.double(box[4L]), as.double(box[5L]), as.double(box[6L]), as.double(vside), as.double(range[1L]), as.double(range[2L]), m=as.integer(nval), num = as.integer(integer(nval)), denom = as.integer(integer(nval)), as.integer(code), PACKAGE = "spatstat") r <- seq(from=range[1L], to=range[2L], length.out=nval) f <- with(res, ifelseXB(denom > 0, num/denom, 1)) return(list(r = r, f = f, num=res$num, denom=res$denom, correction=correction)) } f3Cengine <- function(x, y, z, box=c(0,1,0,1,0,1), vside=0.05, rmax=1, nrval=25) { # res <- .C("RcallF3cen", as.double(x), as.double(y), as.double(z), as.integer(length(x)), as.double(box[1L]), as.double(box[2L]), as.double(box[3L]), as.double(box[4L]), as.double(box[5L]), as.double(box[6L]), as.double(vside), as.double(0), as.double(rmax), m=as.integer(nrval), obs = as.integer(integer(nrval)), nco = as.integer(integer(nrval)), cen = as.integer(integer(nrval)), ncc = as.integer(integer(nrval)), upperobs = as.integer(integer(1L)), uppercen = as.integer(integer(1L)), PACKAGE = "spatstat") r <- seq(from=0, to=rmax, length.out=nrval) # obs <- res$obs nco <- res$nco cen <- res$cen ncc <- res$ncc upperobs <- res$upperobs uppercen <- res$uppercen # breaks <- breakpts.from.r(r) km <- kaplan.meier(obs, nco, breaks, upperobs=upperobs) rs <- reduced.sample(nco, cen, ncc, uppercen=uppercen) # ero <- eroded.volumes(as.box3(box), r) H <- cumsum(nco/ero) cs <- H/max(H[is.finite(H)]) # return(list(rs=rs, km=km$km, hazard=km$lambda, cs=cs, r=r)) } g3Cengine <- function(x, y, z, box=c(0,1,0,1,0,1), rmax=1, nrval=25) { # res <- .C("RcallG3cen", as.double(x), as.double(y), as.double(z), as.integer(length(x)), as.double(box[1L]), as.double(box[2L]), as.double(box[3L]), as.double(box[4L]), as.double(box[5L]), as.double(box[6L]), as.double(0), as.double(rmax), m=as.integer(nrval), obs = as.integer(integer(nrval)), nco = as.integer(integer(nrval)), cen = as.integer(integer(nrval)), ncc = as.integer(integer(nrval)), upperobs = as.integer(integer(1L)), uppercen = as.integer(integer(1L)), PACKAGE = "spatstat") r <- seq(from=0, to=rmax, length.out=nrval) # obs <- res$obs nco <- res$nco cen <- res$cen ncc <- res$ncc upperobs <- res$upperobs uppercen <- res$uppercen # breaks <- breakpts.from.r(r) km <- kaplan.meier(obs, nco, breaks, upperobs=upperobs) rs <- reduced.sample(nco, cen, ncc, uppercen=uppercen) # ero <- eroded.volumes(as.box3(box), r) H <- cumsum(nco/ero) han <- H/max(H[is.finite(H)]) return(list(rs=rs, km=km$km, hazard=km$lambda, han=han, r=r)) } pcf3engine <- function(x, y, z, box=c(0,1,0,1,0,1), rmax=1, nrval=100, correction="translation", delta=rmax/10) { code <- switch(correction, translation=0, isotropic=1) res <- .C("Rcallpcf3", as.double(x), as.double(y), as.double(z), as.integer(length(x)), as.double(box[1L]), as.double(box[2L]), as.double(box[3L]), as.double(box[4L]), as.double(box[5L]), as.double(box[6L]), as.double(0), as.double(rmax), as.integer(nrval), f = as.double(numeric(nrval)), num = as.double(numeric(nrval)), denom = as.double(numeric(nrval)), method=as.integer(code), delta=as.double(delta), PACKAGE = "spatstat") return(list(range = c(0,rmax), f = res$f, num=res$num, denom=res$denom, correction=correction)) } # # ------------------------------------------------------------ # volume of a sphere (exact and approximate) # sphere.volume <- function(range=c(0,1.414), nval=10) { rr <- seq(from=range[1L], to=range[2L], length.out=nval) return( (4/3) * pi * rr^3) } digital.volume <- function(range=c(0, 1.414), nval=25, vside= 0.05) { # Calculate number of points in digital sphere # by performing distance transform for a single point # in the middle of a suitably large box # # This takes EIGHT TIMES AS LONG as the corresponding empirical F-hat !!! # w <- 2 * range[2L] + 2 * vside # dvol <- .C("RcallF3", as.double(w/2), as.double(w/2), as.double(w/2), as.integer(1L), as.double(0), as.double(w), as.double(0), as.double(w), as.double(0), as.double(w), as.double(vside), as.double(range[1L]), as.double(range[2L]), as.integer(nval), num = as.integer(integer(nval)), denom = as.integer(integer(nval)), as.integer(0), PACKAGE = "spatstat")$num # (vside^3) * dvol } spatstat/R/mppm.R0000755000176200001440000005660513115271120013422 0ustar liggesusers# # mppm.R # # $Revision: 1.81 $ $Date: 2016/12/30 01:44:07 $ # mppm <- local({ mppm <- function(formula, data, interaction=Poisson(), ..., iformula=NULL, #%^!ifdef RANDOMEFFECTS random=NULL, #%^!endif use.gam=FALSE, #%^!ifdef RANDOMEFFECTS reltol.pql=1e-3, #%^!endif gcontrol=list() ) { ## remember call cl <- match.call() callstring <- paste(short.deparse(sys.call()), collapse="") ## Validate arguments if(!inherits(formula, "formula")) stop(paste("Argument", dQuote("formula"), "should be a formula")) stopifnot(is.hyperframe(data)) data.sumry <- summary(data, brief=TRUE) npat <- data.sumry$ncases if(npat == 0) stop(paste("Hyperframe", sQuote("data"), "has zero rows")) if(!is.null(iformula) && !inherits(iformula, "formula")) stop(paste("Argument", sQuote("iformula"), "should be a formula or NULL")) #%^!ifdef RANDOMEFFECTS if(has.random <- !is.null(random)) { if(!inherits(random, "formula")) stop(paste(sQuote("random"), "should be a formula or NULL")) if(use.gam) stop("Sorry, random effects are not available in GAMs") } #%^!endif if(! (is.interact(interaction) || is.hyperframe(interaction))) stop(paste("The argument", sQuote("interaction"), "should be a point process interaction object (class", dQuote("interact"), "), or a hyperframe containing such objects", sep="")) backdoor <- list(...)$backdoor if(is.null(backdoor) || !is.logical(backdoor)) backdoor <- FALSE ############## HANDLE FORMULAS ############################ ##------ Trend Formula ------------------ ## check all variables in trend formula are recognised checkvars(formula, data.sumry$col.names, extra=c("x","y","id","marks"), bname="data") ## check formula has LHS and RHS. Extract them if(length(formula) < 3) stop(paste("Argument", sQuote("formula"), "must have a left hand side")) Yname <- formula[[2]] trend <- formula[c(1,3)] if(!is.name(Yname)) stop("Left hand side of formula should be a single name") Yname <- paste(Yname) if(!inherits(trend, "formula")) stop("Internal error: failed to extract RHS of formula") allvars <- variablesinformula(trend) ## --- Interaction formula ----- ## names of interactions as they may appear in formulae itags <- if(is.hyperframe(interaction)) names(interaction) else "Interaction" ninteract <- length(itags) ## ensure `iformula' is a formula without a LHS ## and determine which columns of `interaction' are actually used if(is.null(iformula)) { if(ninteract > 1) stop(paste("interaction hyperframe has more than 1 column;", "you must specify the choice of interaction", "using argument", sQuote("iformula"))) iused <- TRUE iformula <- as.formula(paste("~", itags)) } else { if(length(iformula) > 2) stop(paste("The interaction formula", sQuote("iformula"), "should not have a left hand side")) ## valid variables in `iformula' are interactions and data frame columns permitted <- paste(sQuote("interaction"), "or permitted name in", sQuote("data")) checkvars(iformula, itags, extra=c(data.sumry$dfnames, "id"), bname=permitted) ivars <- variablesinformula(iformula) ## check which columns of `interaction' are actually used iused <- itags %in% ivars if(sum(iused) == 0) stop("No interaction specified in iformula") ## OK allvars <- c(allvars, ivars) } #%^!ifdef RANDOMEFFECTS ## --- Random effects formula ---- if(!is.null(random)) { if(length(random) > 2) stop(paste("The random effects formula", sQuote("random"), "should not have a left hand side")) checkvars(random, itags, extra=c(data.sumry$col.names, "x", "y", "id"), bname="either data or interaction") allvars <- c(allvars, variablesinformula(random)) } #%^!endif ## ---- variables required (on RHS of one of the above formulae) ----- allvars <- unique(allvars) ######## EXTRACT DATA ##################################### ## Insert extra variable 'id' data <- cbind.hyperframe(data, id=factor(1:npat)) data.sumry <- summary(data, brief=TRUE) allvars <- unique(c(allvars, "id")) ## Extract the list of responses (point pattern/quadscheme) Y <- data[, Yname, drop=TRUE] if(npat == 1) Y <- solist(Y) Yclass <- data.sumry$classes[Yname] if(Yclass == "ppp") { ## convert to quadrature schemes, for efficiency's sake Y <- solapply(Y, quadscheme) } else { if(Yclass != "quad") stop(paste("Column", dQuote(Yname), "of data", "does not consist of point patterns (class ppp)", "nor of quadrature schemes (class quad)")) Y <- as.solist(Y) } ## Extract sub-hyperframe of data named in formulae datanames <- names(data) used.cov.names <- allvars[allvars %in% datanames] has.covar <- (length(used.cov.names) > 0) if(has.covar) { dfvar <- used.cov.names %in% data.sumry$dfnames imvar <- data.sumry$types[used.cov.names] == "im" if(any(nbg <- !(dfvar | imvar))) stop(paste("Inappropriate format for", ngettext(sum(nbg), "covariate", "covariates"), paste(sQuote(used.cov.names[nbg]), collapse=", "), ": should contain image objects or vector/factor")) covariates.hf <- data[, used.cov.names, drop=FALSE] has.design <- any(dfvar) dfvarnames <- used.cov.names[dfvar] datadf <- if(has.design) as.data.frame(covariates.hf, discard=TRUE, warn=FALSE) else NULL if(has.design) { ## check for NA's in design covariates # if(any(nbg <- apply(is.na(datadf), 2, any))) if(any(nbg <- matcolany(is.na(datadf)))) stop(paste("There are NA's in the", ngettext(sum(nbg), "covariate", "covariates"), commasep(dQuote(names(datadf)[nbg])))) } } else { has.design <- FALSE datadf <- NULL } ############### INTERACTION ################################### ## ensure `interaction' is a hyperframe of `interact' objects ## with the right number of rows. ## All entries in a column must represent the same process ## (possibly with different values of the irregular parameters). ## Extract the names of the point processes. if(is.interact(interaction)) { ninteract <- 1 processes <- list(Interaction=interaction$name) interaction <- hyperframe(Interaction=interaction, id=1:npat)[,1] constant <- c(Interaction=TRUE) } else if(is.hyperframe(interaction)) { inter.sumry <- summary(interaction) ninteract <- inter.sumry$nvars ## ensure it has the same number of rows as 'data' nr <- inter.sumry$ncases if(nr == 1 && npat > 1) { interaction <- cbind.hyperframe(id=1:npat, interaction)[,-1] inter.sumry <- summary(interaction) } else if(nr != npat) stop(paste("Number of rows in", sQuote("interaction"), "=", nr, "!=", npat, "=", "number of rows in", sQuote("data"))) ## check all columns contain interaction objects ok <- (inter.sumry$classes == "interact") if(!all(ok)) { nbg <- names(interaction)[!ok] nn <- sum(!ok) stop(paste(ngettext(nn, "Column", "Columns"), paste(sQuote(nbg), collapse=", "), ngettext(nn, "does", "do"), "not consist of interaction objects")) } ## all entries in a column must represent the same process type ## (with possibly different values of the irregular parameters) ok <- unlist(lapply(as.list(interaction), consistentname)) if(!all(ok)) { nbg <- names(interaction)[!ok] stop(paste("Different interactions may not appear in a single column.", "Violated by", paste(sQuote(nbg), collapse=", "))) } processes <- lapply(as.list(interaction), firstname) ## determine whether all entries in a column are EXACTLY the same ## (=> have the same parameters) constant <- (inter.sumry$storage == "hyperatom") if(any(!constant)) { others <- interaction[,!constant] constant[!constant] <- sapply(lapply(as.list(others), unique), length) == 1 } } ## check for trivial (Poisson) interactions trivial <- unlist(lapply(as.list(interaction), allpoisson)) ## check that iformula does not combine two interactions on one row nondfnames <- datanames[!(datanames %in% data.sumry$dfnames)] ip <- impliedpresence(itags, iformula, datadf, nondfnames) if(any(rowSums(ip) > 1)) stop("iformula invokes more than one interaction on a single row") ## #################### BERMAN-TURNER DEVICE ######################### ## ## set up list to contain the glm variable names for each interaction. Vnamelist <- rep(list(character(0)), ninteract) names(Vnamelist) <- itags ## set up list to contain 'IsOffset' Isoffsetlist <- rep(list(logical(0)), ninteract) names(Isoffsetlist) <- itags #### ## ---------------- L O O P --------------------------------- for(i in 1:npat) { ## extract responses and covariates for presentation to ppm() Yi <- Y[[i]] covariates <- if(has.covar) covariates.hf[i, , drop=TRUE, strip=FALSE] else NULL if(has.design) { ## convert each data frame value to an image covariates[dfvarnames] <- lapply(as.list(as.data.frame(covariates[dfvarnames])), as.im, W=Yi$data$window) } ## Generate data frame and glm info for this point pattern ## First the trend covariates prep0 <- bt.frame(Yi, trend, Poisson(), ..., covariates=covariates, allcovar=TRUE, use.gam=use.gam) glmdat <- prep0$glmdata ## now the nontrivial interaction terms for(j in (1:ninteract)[iused & !trivial]) { inter <- interaction[i,j,drop=TRUE] prepj <- bt.frame(Yi, ~1, inter, ..., covariates=covariates, allcovar=TRUE, use.gam=use.gam, vnamebase=itags[j], vnameprefix=itags[j]) ## store GLM variable names & check consistency vnameij <- prepj$Vnames if(i == 1) Vnamelist[[j]] <- vnameij else if(!identical(vnameij, Vnamelist[[j]])) stop("Internal error: Unexpected conflict in glm variable names") ## store offset indicator vectors isoffset.ij <- prepj$IsOffset if(i == 1) Isoffsetlist[[j]] <- isoffset.ij else if(!identical(isoffset.ij, Isoffsetlist[[j]])) stop("Internal error: Unexpected conflict in offset indicators") ## GLM data frame for this interaction glmdatj <- prepj$glmdata if(nrow(glmdatj) != nrow(glmdat)) stop("Internal error: differing numbers of rows in glm data frame") iterms.ij <- glmdatj[vnameij] subset.ij <- glmdatj$.mpl.SUBSET ## tack on columns of interaction terms glmdat <- cbind(glmdat, iterms.ij) ## update subset (quadrature points where cif is positive) glmdat$.mpl.SUBSET <- glmdat$.mpl.SUBSET & subset.ij } ## assemble the Mother Of All Data Frames if(i == 1) { moadf <- glmdat } else { ## There may be new or missing columns recognised <- names(glmdat) %in% names(moadf) if(any(!recognised)) { newnames <- names(glmdat)[!recognised] zeroes <- as.data.frame(matrix(0, nrow(moadf), length(newnames))) names(zeroes) <- newnames moadf <- cbind(moadf, zeroes) } provided <- names(moadf) %in% names(glmdat) if(any(!provided)) { absentnames <- names(moadf)[!provided] zeroes <- as.data.frame(matrix(0, nrow(glmdat), length(absentnames))) names(zeroes) <- absentnames glmdat <- cbind(glmdat, zeroes) } ## Ensure factor columns are consistent m.isfac <- sapply(as.list(glmdat), is.factor) g.isfac <- sapply(as.list(glmdat), is.factor) if(any(uhoh <- (m.isfac != g.isfac))) errorInconsistentRows("values (factor and non-factor)", colnames(moadf)[uhoh]) if(any(m.isfac)) { m.levels <- lapply(as.list(moadf)[m.isfac], levels) g.levels <- lapply(as.list(glmdat)[g.isfac], levels) clash <- !mapply(identical, x=m.levels, y=g.levels) if(any(clash)) errorInconsistentRows("factor levels", (colnames(moadf)[m.isfac])[clash]) } ## Finally they are compatible moadf <- rbind(moadf, glmdat) } } ## ---------------- E N D o f L O O P -------------------------- ## ## backdoor exit - Berman-Turner frame only - used by predict.mppm if(backdoor) return(moadf) ## ## ## -------------------------------------------------------------------- ## ## Construct the glm formula for the Berman-Turner device ## ## Get trend part from the last-computed prep0 fmla <- prep0$trendfmla ## Tack on the RHS of the interaction formula if(!all(trivial)) fmla <- paste(fmla, "+", as.character(iformula)[[2]]) ## Make it a formula fmla <- as.formula(fmla) ## Ensure that each interaction name is recognised. ## ## To the user, an interaction is identified by its `tag' name ## (default tag: "Interaction") ## ## Internally, an interaction is fitted using its sufficient statistic ## which may be 0, 1 or k-dimensional. ## The column names of the sufficient statistic are the Vnames ## returned from ppm. ## The Poisson process is a special case: it is 0-dimensional (no Vnames). ## ## For k-dimensional sufficient statistics, we modify the formulae, ## replacing the interaction name by (vname1 + vname2 + .... + vnamek) ## for(j in (1:ninteract)[iused]) { vnames <- Vnamelist[[j]] tag <- itags[j] isoffset <- Isoffsetlist[[j]] if(any(isoffset)) { ## enclose names of offset variables in 'offset()' vnames[isoffset] <- paste("offset(", vnames[isoffset], ")", sep="") } if(trivial[j]) ## Poisson case: add a column of zeroes moadf[[tag]] <- 0 else if(!identical(vnames, tag)) { if(length(vnames) == 1) ## tag to be replaced by vname vn <- paste("~", vnames[1]) else ## tag to be replaced by (vname1 + vname2 + .... + vnamek) vn <- paste("~(", paste(vnames, collapse=" + "), ")") ## pull out formula representation of RHS vnr <- as.formula(vn)[[2]] ## make substitution rule: list(=) vnsub <- list(vnr) names(vnsub) <- tag ## perform substitution in trend formula fmla <- eval(substitute(substitute(fom, vnsub), list(fom=fmla))) #%^!ifdef RANDOMEFFECTS ## perform substitution in random effects formula if(has.random && tag %in% variablesinformula(random)) random <- eval(substitute(substitute(fom, vnsub), list(fom=random))) #%^!endif } } fmla <- as.formula(fmla) ## Fix scoping problem assign("glmmsubset", moadf$.mpl.SUBSET, envir=environment(fmla)) ## Satisfy package checker glmmsubset <- .mpl.SUBSET <- moadf$.mpl.SUBSET .mpl.W <- moadf$.mpl.W ## ---------------- FIT THE MODEL ------------------------------------ want.trend <- prep0$info$want.trend if(want.trend && use.gam) { fitter <- "gam" ctrl <- do.call(gam.control, resolve.defaults(gcontrol, list(maxit=50))) FIT <- gam(fmla, family=quasi(link=log, variance=mu), weights=.mpl.W, data=moadf, subset=(.mpl.SUBSET=="TRUE"), control=ctrl) deviants <- deviance(FIT) #%^!ifdef RANDOMEFFECTS } else if(!is.null(random)) { fitter <- "glmmPQL" ctrl <- do.call(lmeControl, resolve.defaults(gcontrol, list(maxIter=50))) attr(fmla, "ctrl") <- ctrl # very strange way to pass argument fixed <- 42 # to satisfy package checker FIT <- hackglmmPQL(fmla, random=random, family=quasi(link=log, variance=mu), weights=.mpl.W, data=moadf, subset=glmmsubset, control=attr(fixed, "ctrl"), reltol=reltol.pql) deviants <- -2 * logLik(FIT) #%^!endif } else { fitter <- "glm" ctrl <- do.call(glm.control, resolve.defaults(gcontrol, list(maxit=50))) FIT <- glm(fmla, family=quasi(link="log", variance="mu"), weights=.mpl.W, data=moadf, subset=(.mpl.SUBSET=="TRUE"), control=ctrl) deviants <- deviance(FIT) } ## maximised log-pseudolikelihood W <- moadf$.mpl.W SUBSET <- moadf$.mpl.SUBSET Z <- (moadf$.mpl.Y != 0) maxlogpl <- -(deviants/2 + sum(log(W[Z & SUBSET])) + sum(Z & SUBSET)) ## ## ---------------- PACK UP THE RESULT -------------------------------- ## result <- list(Call = list(callstring=callstring, cl=cl), Info = list( #%^!ifdef RANDOMEFFECTS has.random=has.random, #%^!endif has.covar=has.covar, has.design=has.design, Yname=Yname, used.cov.names=used.cov.names, allvars=allvars, names.data=names(data), is.df.column=(data.sumry$storage == "dfcolumn"), rownames=row.names(data), correction=prep0$info$correction, rbord=prep0$info$rbord ), Fit= list( fitter=fitter, use.gam=use.gam, fmla=fmla, FIT=FIT, moadf=moadf, Vnamelist=Vnamelist ), Inter = list( ninteract=ninteract, interaction=interaction, iformula=iformula, iused=iused, itags=itags, processes=processes, trivial=trivial, constant=constant ), formula=formula, trend=trend, iformula=iformula, #%^!ifdef RANDOMEFFECTS random=random, #%^!endif npat=npat, data=data, Y=Y, maxlogpl=maxlogpl, datadf=datadf) class(result) <- c("mppm", class(result)) return(result) } # helper functions checkvars <- function(f, b, extra=NULL, bname=short.deparse(substitute(b))){ fname <- short.deparse(substitute(f)) fvars <- variablesinformula(f) bvars <- if(is.character(b)) b else names(b) bvars <- c(bvars, extra) nbg <- !(fvars %in% bvars) if(any(nbg)) { nn <- sum(nbg) stop(paste(ngettext(nn, "Variable", "Variables"), commasep(dQuote(fvars[nbg])), "in", fname, ngettext(nn, "is not one of the", "are not"), "names in", bname)) } return(NULL) } consistentname <- function(x) { xnames <- unlist(lapply(x, getElement, name="name")) return(length(unique(xnames)) == 1) } firstname <- function(z) { z[[1]]$name } allpoisson <- function(x) all(sapply(x, is.poisson.interact)) errorInconsistentRows <- function(what, offending) { stop(paste("There are inconsistent", what, "for the", ngettext(length(offending), "variable", "variables"), commasep(sQuote(offending)), "between different rows of the hyperframe 'data'"), call.=FALSE) } mppm }) is.mppm <- function(x) { inherits(x, "mppm") } coef.mppm <- function(object, ...) { coef(object$Fit$FIT) } #%^!ifdef RANDOMEFFECTS fixef.mppm <- function(object, ...) { if(object$Fit$fitter == "glmmPQL") fixef(object$Fit$FIT) else coef(object$Fit$FIT) } ranef.mppm <- function(object, ...) { if(object$Fit$fitter == "glmmPQL") ranef(object$Fit$FIT) else as.data.frame(matrix(, nrow=object$npat, ncol=0)) } #%^!endif print.mppm <- function(x, ...) { print(summary(x, ..., brief=TRUE)) } is.poisson.mppm <- function(x) { trivial <- x$Inter$trivial iused <- x$Inter$iused all(trivial[iused]) } quad.mppm <- function(x) { as.solist(x$Y) } data.mppm <- function(x) { solapply(x$Y, getElement, name="data") } windows.mppm <- function(x) { solapply(data.mppm(x), Window) } logLik.mppm <- function(object, ..., warn=TRUE) { if(warn && !is.poisson.mppm(object)) warning(paste("log likelihood is not available for non-Poisson model;", "log-pseudolikelihood returned")) ll <- object$maxlogpl #%^!ifdef RANDOMEFFECTS attr(ll, "df") <- length(fixef(object)) #%^!else # attr(ll, "df") <- length(coef(object)) #%^!endif class(ll) <- "logLik" return(ll) } AIC.mppm <- function(object, ..., k=2, takeuchi=TRUE) { ll <- logLik(object, warn=FALSE) pen <- attr(ll, "df") if(takeuchi && !is.poisson(object)) { vv <- vcov(object, what="all") J <- vv$fisher H <- vv$internals$A1 ## Takeuchi penalty = trace of J H^{-1} = trace of H^{-1} J JiH <- try(solve(H, J), silent=TRUE) if(!inherits(JiH, "try-error")) pen <- sum(diag(JiH)) } return(- 2 * as.numeric(ll) + k * pen) } extractAIC.mppm <- function(fit, scale = 0, k = 2, ..., takeuchi = TRUE) { edf <- length(coef(fit)) aic <- AIC(fit, k = k, takeuchi = takeuchi) c(edf, aic) } getCall.mppm <- function(x, ...) { x$Call$cl } terms.mppm <- function(x, ...) { terms(formula(x)) } nobs.mppm <- function(object, ...) { sum(sapply(data.mppm(object), npoints)) } simulate.mppm <- function(object, nsim=1, ..., verbose=TRUE) { subs <- subfits(object) nr <- length(subs) sims <- list() if(verbose) { splat("Generating simulated realisations of", nr, "models..") state <- list() } for(irow in seq_len(nr)) { sims[[irow]] <- do.call(simulate, resolve.defaults(list(object=subs[[irow]], nsim=nsim, drop=FALSE), list(...), list(progress=FALSE))) if(verbose) state <- progressreport(irow, nr, state=state) } sim1list <- lapply(sims, "[[", i=1) h <- hyperframe("Sim1"=sim1list) if(nsim > 1) { for(j in 2:nsim) { simjlist <- lapply(sims, "[[", i=j) hj <- hyperframe(Sim=simjlist) names(hj) <- paste0("Sim", j) h <- cbind(h, hj) } } return(h) } spatstat/R/softcore.R0000755000176200001440000000724513115271120014271 0ustar liggesusers# # # softcore.S # # $Revision: 2.15 $ $Date: 2016/02/16 01:39:12 $ # # Soft core processes. # # Softcore() create an instance of a soft core process # [an object of class 'interact'] # # # ------------------------------------------------------------------- # Softcore <- local({ BlankSoftcore <- list( name = "Soft core process", creator = "Softcore", family = "pairwise.family", # evaluated later pot = function(d, par) { sig0 <- par$sigma0 if(is.na(sig0)) { p <- -d^(-2/par$kappa) } else { # expand around sigma0 and set large negative numbers to -Inf drat <- d/sig0 p <- -drat^(-2/par$kappa) p[p < -25] <- -Inf } return(p) }, par = list(kappa = NULL, sigma0=NA), # filled in later parnames = c("Exponent kappa", "Initial approximation to sigma"), selfstart = function(X, self) { # self starter for Softcore if(npoints(X) < 2) { # not enough points to make any decisions return(self) } md <- minnndist(X) if(md == 0) { warning(paste("Pattern contains duplicated points:", "impossible under Softcore model")) return(self) } kappa <- self$par$kappa if(!is.na(sigma0 <- self$par$sigma0)) { # value fixed by user or previous invocation # check it if((md/sigma0)^(-2/kappa) > 25) warning(paste("Initial approximation sigma0 is too large;", "some data points will have zero probability")) return(self) } # take sigma0 = minimum interpoint distance Softcore(kappa=kappa, sigma0=md) }, init = function(self) { kappa <- self$par$kappa if(!is.numeric(kappa) || length(kappa) != 1 || kappa <= 0 || kappa >= 1) stop(paste("Exponent kappa must be a", "positive number less than 1")) }, update = NULL, # default OK print = NULL, # default OK interpret = function(coeffs, self) { theta <- as.numeric(coeffs[1]) sigma <- theta^(self$par$kappa/2) if(!is.na(sig0 <- self$par$sigma0)) sigma <- sigma * sig0 return(list(param=list(sigma=sigma), inames="interaction parameter sigma", printable=signif(sigma))) }, valid = function(coeffs, self) { theta <- coeffs[1] return(is.finite(theta) && (theta >= 0)) }, project = function(coeffs, self) { if((self$valid)(coeffs, self)) return(NULL) else return(Poisson()) }, irange = function(self, coeffs=NA, epsilon=0, ...) { # distance d beyond which log(interaction factor) <= epsilon if(anyNA(coeffs) || epsilon == 0) return(Inf) theta <- as.numeric(coeffs[1]) kappa <- self$par$kappa sig0 <- self$par$sigma0 if(is.na(sig0)) sig0 <- 1 return(sig0 * (theta/epsilon)^(kappa/2)) }, Mayer=function(coeffs, self) { # second Mayer cluster integral kappa <- self$par$kappa sigma <- (self$interpret)(coeffs, self)$param$sigma return(pi * (sigma^2) * gamma(1 - kappa)) }, version=NULL # filled in later ) class(BlankSoftcore) <- "interact" Softcore <- function(kappa, sigma0=NA) { instantiate.interact(BlankSoftcore, list(kappa=kappa, sigma0=sigma0)) } Softcore <- intermaker(Softcore, BlankSoftcore) Softcore }) spatstat/R/dist2dpath.R0000755000176200001440000000415313115271075014517 0ustar liggesusers# # dist2dpath.R # # $Revision: 1.10 $ $Date: 2017/06/05 10:31:58 $ # # dist2dpath compute shortest path distances # dist2dpath <- function(dist, method="C") { ## given a matrix of distances between adjacent vertices ## (value = Inf if not adjacent) ## compute the matrix of shortest path distances stopifnot(is.matrix(dist) && isSymmetric(dist)) stopifnot(all(diag(dist) == 0)) findist <- dist[is.finite(dist)] if(any(findist < 0)) stop("Some distances are negative") ## n <- nrow(dist) if(n <= 1L) return(dist) cols <- col(dist) ## tol <- .Machine$double.eps posdist <- findist[findist > 0] if(length(posdist) > 0) { shortest <- min(posdist) tol2 <- shortest/max(n,1024) tol <- max(tol, tol2) } ## switch(method, interpreted={ dpathnew <- dpath <- dist changed <- TRUE while(changed) { for(j in 1:n) dpathnew[,j] <- apply(dpath + dist[j,][cols], 1L, min) unequal <- (dpathnew != dpath) changed <- any(unequal) & any(abs(dpathnew-dpath)[unequal] > tol) dpath <- dpathnew } }, C={ adj <- is.finite(dist) diag(adj) <- TRUE d <- dist d[!adj] <- -1 z <- .C("Ddist2dpath", nv=as.integer(n), d=as.double(d), adj=as.integer(adj), dpath=as.double(numeric(n*n)), tol=as.double(tol), niter=as.integer(integer(1L)), status=as.integer(integer(1L)), PACKAGE = "spatstat") if(z$status == -1L) warning(paste("C algorithm did not converge to tolerance", tol, "after", z$niter, "iterations", "on", n, "vertices and", sum(adj) - n, "edges")) dpath <- matrix(z$dpath, n, n) ## value=-1 implies unreachable dpath[dpath < 0] <- Inf }, stop(paste("Unrecognised method", sQuote(method)))) return(dpath) } spatstat/R/vblogistic.R0000644000176200001440000002127013115225157014612 0ustar liggesusers#' Variational Bayesian Logistic regression #' #' author: Tuomas Rajala < tuomas.rajala a iki.fi > #' #' Copyright (C) Tuomas Rajala 2014 #' GNU Public License GPL 2.0 | 3.0 #' #' Special version for 'spatstat' #' #' $Revision: 1.5 $ $Date: 2015/04/02 02:17:19 $ #' #################################################### #' Used inside ppm vblogit.fmla <- function(formula, offset, data, subset, weights, verbose=FALSE, epsilon=0.01, ...) { mf <- match.call(expand.dots = FALSE) m <- match(c("formula", "data", "subset", "offset"), names(mf), 0L) mf <- mf[c(1L, m)] mf$drop.unused.levels <- TRUE mf[[1L]] <- quote(stats::model.frame) mf <- eval(mf, parent.frame()) mt <- attr(mf, "terms") offset <- model.offset(mf) y <- model.response(mf, "any") X <- model.matrix(mt, mf) colnames(X)[1] <- "(Intercept)" Vnames <- colnames(X) #' then we fit: fit <- vblogit(y=y, X=X, offset=offset, verb=verbose, eps=epsilon, ...) #' names(fit$coefficients) <- names(fit$coef) <- Vnames #' add some variables to conform to summary.ppm fit$se <- sqrt(diag(as.matrix(fit$S))) fit$call <- match.call(expand.dots=FALSE) fit$formula <- formula fit$method <- "vblogit" fit$model <- mf fit$terms <- mt fit$offset <- offset fit$data <- data fit$xlevels <- .getXlevels(mt, mf) fit } ################################################### # the fitting function: vblogit <- local({ ## helper functions needed: lambda <- function(x) { -tanh(x/2)/(4*x) } mygamma <- function(x) { x/2 - log(1+exp(x)) + x*tanh(x/2)/4 } vblogit <- function(y, X, offset, eps=1e-2, m0, S0, S0i, xi0, verb=FALSE, maxiter=1000, ...) { ## Logistic regression using JJ96 idea. Ormeron00 notation. ## p(y, w, t) = p(y | w) p(w | t) p(t) ## ## Y ~ Bern(logit(Xw + offset)) ## w ~ N(m0, S0) iid ## ## "*0" are fixed priors. ## cat2 <- if(verb) cat else function(...) NULL varnames <- colnames(data.frame(as.matrix(X[1:2,]))) ## Write N <- length(y) K <- ncol(X) #' #' #' offset if(missing('offset')) offset <- 0 if(length(offset) eps & (iter<-iter+1) <= maxiter le_hist <- c(le_hist, le) cat2("diff:", devi, " \r") } if(iter == maxiter) warning("Maximum iteration limit reached.") cat2("\n") ## done. Compile: est <- list(m=m, S=S, Si=Si, xi=xi, lambda_xi=la) #' Marginal evidence est$logLik <- le #' Compute max logLik with the Bernoulli model; #' this should be what glm gives: est$logLik_ML <- as.numeric( t(y)%*%(X%*%m+offset) - sum( log( 1 + exp(X%*%m+offset)) ) ) #' Max loglik with the approximation est$logLik_ML2 <- as.numeric( t(y)%*%(X%*%m + offset) + t(m)%*%t(X*la)%*%X%*%m - 0.5*sum(X%*%m) + sum(mygamma(xi)) + 2*t(offset*la)%*%X%*%m + t(offset*la)%*%offset - 0.5 * sum(offset) ) #' some additional parts, like in glm output est$coefficients <- est$m[,1] names(est$coefficients) <- varnames est$call <- sys.call() est$converged <- !(maxiter==iter) #' more additional stuff est$logp_hist <- le_hist est$parameters <- list(eps=eps, maxiter=maxiter) est$priors <- list(m=m0, S=S0) est$iterations <- iter class(est) <- "vblogit" ## return est } vblogit }) ################################################### #' Predict method predict.vblogit <- local({ sigmoid <- function(e) 1/(1+exp(-e)) predict.vblogit <- function(object, newdata = NULL, type = c("link", "response", "terms"), se.fit = FALSE, dispersion = NULL, terms = NULL, na.action = na.pass, ...) { type <- match.arg(type) if(type != "response") stop("type not supported.") if(missing(newdata)) { stop("not implemented.") } else{ # newdata #' build the new covariate matrix, inspired by predict.lm tt <- terms(object) Terms <- delete.response(tt) m <- model.frame(Terms, newdata, na.action = na.action, xlev = object$xlevels) X <- model.matrix(Terms, m, contrasts.arg = object$contrasts) offset <- rep(0, nrow(X)) if (!is.null(off.num <- attr(tt, "offset"))) for (i in off.num) offset <- offset + eval(attr(tt, "variables")[[i + 1]], newdata) if (!is.null(object$call$offset)) offset <- offset + eval(object$call$offset, newdata) #' predict using probit approximation to logit-function mu <- object$m S <- object$S mua <- as.numeric(X%*%mu)+offset s2a <- diag(X%*%S%*%t(X) ) predictor <- sigmoid( as.numeric( mua/sqrt(1+pi*s2a/8) ) ) names(predictor) <- rownames(X) } predictor } predict.vblogit }) # ################################################### # print method print.vblogit <- function(x, ...) { splat("Variational Bayes logistic regression fit") cat("\nCall: ") print(x$call) cat("\nCoefficients:\n") print(x$coefficients) cat("\n") splat("Log-likelihood:", x$logLik) splat("Converged:", x$converged) splat("Convergence threshold:", x$parameters$eps) splat("Iterations / max:", x$iterations, "/", x$parameters$maxiter) splat("* Caution: the estimates are conditional on convergence.") invisible(NULL) } #################################################### # vblogit family method family.vblogit <- function(object, ...) binomial() #################################################### #' vblogit fit summary method summary.vblogit <- function(object, ...) { splat("Variational Bayes logistic regression fit") cat("\nCall: ") print(object$call) splat("\nCoefficients and posterior 95% central regions:") vna <- names(object$coefficients) s <- sqrt(diag(object$S)) q0 <- qnorm(c(0.025, 0.975)) m <- as.numeric(object$m) df <- data.frame(estimate=m, "low 0.05"=m+s*q0[1], "high 97.5"=m+s*q0[2], "prior mean"=object$priors$m, "prior var"=diag(object$priors$S)) rownames(df) <- vna print(df) cat("\n") splat("Lower bound for log-likelihood:", object$logLik) invisible(NULL) } #################################################### # Coef coef.vblogit <- function(object, ...) object$coefficients #################################################### # Log-evidence logLik.vblogit <- function(object, ...) { object$logLik } spatstat/R/eval.fasp.R0000755000176200001440000000560513115271075014333 0ustar liggesusers# # eval.fasp.R # # # eval.fasp() Evaluate expressions involving fasp objects # # compatible.fasp() Check whether two fasp objects are compatible # # $Revision: 1.11 $ $Date: 2016/02/11 10:17:12 $ # eval.fasp <- local({ eval.fasp <- function(expr, envir, dotonly=TRUE) { #' convert syntactic expression to 'expression' object e <- as.expression(substitute(expr)) #' convert syntactic expression to call ## elang <- substitute(expr) #' find names of all variables in the expression varnames <- all.vars(e) if(length(varnames) == 0) stop("No variables in this expression") ## get the actual variables if(missing(envir)) { envir <- sys.parent() } else if(is.list(envir)) { envir <- list2env(envir, parent=parent.frame()) } vars <- lapply(as.list(varnames), get, envir=envir) names(vars) <- varnames ## find out which ones are fasp objects isfasp <- unlist(lapply(vars, inherits, what="fasp")) if(!any(isfasp)) stop("No fasp objects in this expression") fasps <- vars[isfasp] nfasps <- length(fasps) ## test whether the fasp objects are compatible if(nfasps > 1L && !(do.call(compatible, unname(fasps)))) stop(paste(if(nfasps > 2) "some of" else NULL, "the objects", commasep(sQuote(names(fasps))), "are not compatible")) ## copy first object as template result <- fasps[[1L]] which <- result$which nr <- nrow(which) nc <- ncol(which) ## create environment for evaluation fenv <- new.env() ## for each [i,j] extract fv objects and evaluate expression for(i in seq_len(nr)) for(j in seq_len(nc)) { ## extract fv objects at position [i,j] funs <- lapply(fasps, getpanel, i=i, j=j) ## insert into list of argument values vars[isfasp] <- funs ## assign them into the right environment for(k in seq_along(vars)) assign(varnames[k], vars[[k]], envir=fenv) ## evaluate resultij <- eval(substitute(eval.fv(ee,ff,dd), list(ee=e, ff=fenv, dd=dotonly))) ## insert back into fasp result$fns[[which[i,j] ]] <- resultij } result$title <- paste("Result of eval.fasp(", e, ")", sep="") return(result) } getpanel <- function(x, i, j) { as.fv(x[i,j]) } eval.fasp }) compatible.fasp <- function(A, B, ...) { verifyclass(A, "fasp") if(missing(B)) return(TRUE) verifyclass(B, "fasp") dimA <- dim(A$which) dimB <- dim(B$which) if(!all(dimA == dimB)) return(FALSE) for(i in seq_len(dimA[1L])) for(j in seq_len(dimA[2L])) { Aij <- as.fv(A[i,j]) Bij <- as.fv(B[i,j]) if(!compatible.fv(Aij, Bij)) return(FALSE) } # A and B agree if(length(list(...)) == 0) return(TRUE) # recursion return(compatible.fasp(B, ...)) } spatstat/R/geyer.R0000755000176200001440000003073013115271075013564 0ustar liggesusers# # # geyer.S # # $Revision: 2.38 $ $Date: 2017/06/05 10:31:58 $ # # Geyer's saturation process # # Geyer() create an instance of Geyer's saturation process # [an object of class 'interact'] # # Geyer <- local({ # .......... template .......... BlankGeyer <- list( name = "Geyer saturation process", creator = "Geyer", family = "pairsat.family", # evaluated later pot = function(d, par) { (d <= par$r) # same as for Strauss }, par = list(r = NULL, sat=NULL), # filled in later parnames = c("interaction distance","saturation parameter"), init = function(self) { r <- self$par$r sat <- self$par$sat if(!is.numeric(r) || length(r) != 1 || r <= 0) stop("interaction distance r must be a positive number") if(!is.numeric(sat) || length(sat) != 1 || sat < 0) stop("saturation parameter sat must be a positive number") }, update = NULL, # default OK print = NULL, # default OK plot = function(fint, ..., d=NULL, plotit=TRUE) { verifyclass(fint, "fii") inter <- fint$interaction unitz <- unitname(fint) if(!identical(inter$name, "Geyer saturation process")) stop("Tried to plot the wrong kind of interaction") #' fitted interaction coefficient theta <- fint$coefs[fint$Vnames] #' interaction radius r <- inter$par$r sat <- inter$par$sat xlim <- resolve.1.default(list(xlim=c(0, 1.25 * r)), list(...)) rmax <- max(xlim, d) if(is.null(d)) { d <- seq(from=0, to=rmax, length.out=1024) } else { stopifnot(is.numeric(d) && all(is.finite(d)) && all(diff(d) > 0)) } #' compute interaction between two points at distance d y <- exp(theta * sat * (d <= r)) #' compute `fv' object fun <- fv(data.frame(r=d, h=y, one=1), "r", substitute(h(r), NULL), "h", cbind(h,one) ~ r, xlim, c("r", "h(r)", "1"), c("distance argument r", "maximal interaction h(r)", "reference value 1"), unitname=unitz) if(plotit) do.call(plot.fv, resolve.defaults(list(fun), list(...), list(ylim=range(0,1,y)))) return(invisible(fun)) }, #' end of function 'plot' interpret = function(coeffs, self) { loggamma <- as.numeric(coeffs[1L]) gamma <- exp(loggamma) return(list(param=list(gamma=gamma), inames="interaction parameter gamma", printable=dround(gamma))) }, valid = function(coeffs, self) { loggamma <- as.numeric(coeffs[1L]) sat <- self$par$sat return(is.finite(loggamma) && (is.finite(sat) || loggamma <= 0)) }, project = function(coeffs, self) { if((self$valid)(coeffs, self)) return(NULL) else return(Poisson()) }, irange = function(self, coeffs=NA, epsilon=0, ...) { r <- self$par$r if(any(!is.na(coeffs))) { loggamma <- coeffs[1L] if(!is.na(loggamma) && (abs(loggamma) <= epsilon)) return(0) } return(2 * r) }, version=NULL, # evaluated later # fast evaluation is available for the border correction only can.do.fast=function(X,correction,par) { return(all(correction %in% c("border", "none"))) }, fasteval=function(X,U,EqualPairs,pairpot,potpars,correction, ..., halfway=FALSE, check=TRUE) { # fast evaluator for Geyer interaction if(!all(correction %in% c("border", "none"))) return(NULL) if(spatstat.options("fasteval") == "test") message("Using fast eval for Geyer") r <- potpars$r sat <- potpars$sat # first ensure all data points are in U nX <- npoints(X) nU <- npoints(U) Xseq <- seq_len(nX) if(length(EqualPairs) == 0) { # no data points currently included missingdata <- rep.int(TRUE, nX) } else { Xused <- EqualPairs[,1L] missingdata <- !(Xseq %in% Xused) } somemissing <- any(missingdata) if(somemissing) { # add the missing data points nmiss <- sum(missingdata) U <- superimpose(U, X[missingdata], W=X$window, check=check) # correspondingly augment the list of equal pairs originalrows <- seq_len(nU) newXindex <- Xseq[missingdata] newUindex <- nU + seq_len(nmiss) EqualPairs <- rbind(EqualPairs, cbind(newXindex, newUindex)) nU <- nU + nmiss } # determine saturated pair counts counts <- strausscounts(U, X, r, EqualPairs) satcounts <- pmin.int(sat, counts) satcounts <- matrix(satcounts, ncol=1) if(halfway) { # trapdoor used by suffstat() answer <- satcounts } else if(sat == Inf) { # no saturation: fast code answer <- 2 * satcounts } else { # extract counts for data points Uindex <- EqualPairs[,2L] Xindex <- EqualPairs[,1L] Xcounts <- integer(npoints(X)) Xcounts[Xindex] <- counts[Uindex] # evaluate change in saturated counts of other data points change <- geyercounts(U, X, r, sat, Xcounts, EqualPairs) answer <- satcounts + change answer <- matrix(answer, ncol=1) } if(somemissing) answer <- answer[originalrows, , drop=FALSE] return(answer) }, delta2 = function(X,inte,correction, ..., sparseOK=FALSE) { # Sufficient statistic for second order conditional intensity # h(X[i] | X) - h(X[i] | X[-j]) # Geyer interaction if(!(correction %in% c("border", "none"))) return(NULL) r <- inte$par$r sat <- inte$par$sat result <- geyerdelta2(X, r, sat, sparseOK=sparseOK) return(result) } ) class(BlankGeyer) <- "interact" Geyer <- function(r, sat) { instantiate.interact(BlankGeyer, list(r = r, sat=sat)) } Geyer <- intermaker(Geyer, BlankGeyer) Geyer }) # ........... externally visible auxiliary functions ......... geyercounts <- function(U, X, r, sat, Xcounts, EqualPairs) { # evaluate effect of adding dummy point or deleting data point # on saturated counts of other data points stopifnot(is.numeric(r)) stopifnot(is.numeric(sat)) # for C calls we need finite numbers stopifnot(is.finite(r)) stopifnot(is.finite(sat)) # sort in increasing order of x coordinate oX <- fave.order(X$x) oU <- fave.order(U$x) Xsort <- X[oX] Usort <- U[oU] nX <- npoints(X) nU <- npoints(U) Xcountsort <- Xcounts[oX] # inverse: data point i has sorted position i' = rankX[i] rankX <- integer(nX) rankX[oX] <- seq_len(nX) rankU <- integer(nU) rankU[oU] <- seq_len(nU) # map from quadrature points to data points Uindex <- EqualPairs[,2L] Xindex <- EqualPairs[,1L] Xsortindex <- rankX[Xindex] Usortindex <- rankU[Uindex] Cmap <- rep.int(-1L, nU) Cmap[Usortindex] <- Xsortindex - 1L # call C routine zz <- .C("Egeyer", nnquad = as.integer(nU), xquad = as.double(Usort$x), yquad = as.double(Usort$y), quadtodata = as.integer(Cmap), nndata = as.integer(nX), xdata = as.double(Xsort$x), ydata = as.double(Xsort$y), tdata = as.integer(Xcountsort), rrmax = as.double(r), ssat = as.double(sat), result = as.double(numeric(nU)), PACKAGE = "spatstat") result <- zz$result[rankU] return(result) } geyerdelta2 <- local({ geyerdelta2 <- function(X, r, sat, ..., sparseOK=FALSE) { # Sufficient statistic for second order conditional intensity # Geyer model stopifnot(is.numeric(sat) && length(sat) == 1 && sat > 0) # X could be a ppp or quad. if(is.ppp(X)) { # evaluate \Delta_{x_i} \Delta_{x_j} S(x) for data points x_i, x_j # i.e. h(X[i]|X) - h(X[i]|X[-j]) where h is first order cif statistic return(geydelppp(X, r, sat, sparseOK)) } else if(inherits(X, "quad")) { # evaluate \Delta_{u_i} \Delta_{u_j} S(x) for quadrature points u_i, u_j return(geydelquad(X, r, sat, sparseOK)) } else stop("Internal error: X should be a ppp or quad object") } geydelppp <- function(X, r, sat, sparseOK) { # initialise nX <- npoints(X) result <- if(!sparseOK) matrix(0, nX, nX) else sparseMatrix(i=integer(0), j=integer(0), x=numeric(0), dims=c(nX, nX)) # identify all r-close pairs (ordered pairs i ~ j) a <- closepairs(X, r, what="indices") I <- a$i J <- a$j IJ <- cbind(I,J) # count number of r-neighbours for each point # (consistently with the above) tvals <- table(factor(I, levels=1:nX)) # Compute direct part # (arising when i~j) tI <- tvals[I] tJ <- tvals[J] result[IJ] <- pmin(sat, tI) - pmin(sat, tI - 1) + pmin(sat, tJ) - pmin(sat, tJ - 1) # Compute indirect part # (arising when i~k and j~k for another point k) # First find all such triples ord <- (I < J) vees <- edges2vees(I[ord], J[ord], nX) # evaluate contribution of (k, i, j) KK <- vees$i tKK <- tvals[KK] contribKK <- pmin(sat, tKK) - 2 * pmin(sat, tKK-1) + pmin(sat, tKK-2) # for each (i, j), sum the contributions over k II <- vees$j JJ <- vees$k if(!sparseOK) { II <- factor(II, levels=1:nX) JJ <- factor(JJ, levels=1:nX) # was: # delta3 <- tapply(contribKK, list(I=II, J=JJ), sum) # delta3[is.na(delta3)] <- 0 delta3 <- tapplysum(contribKK, list(I=II, J=JJ)) } else { delta3 <- sparseMatrix(i=II, j=JJ, x=contribKK, dims=c(nX, nX)) } # symmetrise and combine result <- result + delta3 + t(delta3) return(result) } geydelquad <- function(Q, r, sat, sparseOK) { Z <- is.data(Q) U <- union.quad(Q) nU <- npoints(U) nX <- npoints(Q$data) result <- if(!sparseOK) matrix(0, nU, nU) else sparseMatrix(i=integer(0), j=integer(0), x=numeric(0), dims=c(nU, nU)) # identify all r-close pairs U[i], U[j] a <- closepairs(U, r, what="indices") I <- a$i J <- a$j IJ <- cbind(I, J) # tag which ones are data points zI <- Z[I] zJ <- Z[J] # count t(U[i], X) IzJ <- I[zJ] JzJ <- J[zJ] tvals <- table(factor(IzJ, levels=1:nU)) # Compute direct part # (arising when U[i]~U[j]) tI <- tvals[I] tJ <- tvals[J] tIJ <- tI - zJ tJI <- tJ - zI result[IJ] <- pmin(sat, tIJ + 1L) - pmin(sat, tIJ) + pmin(sat, tJI + 1L) - pmin(sat, tJI) # Compute indirect part # (arising when U[i]~X[k] and U[j]~X[k] for another point X[k]) # First find all such triples # Group close pairs X[k] ~ U[j] by index k spl <- split(IzJ, factor(JzJ, levels=1:nX)) grlen <- lengths(spl) # Assemble list of triples U[i], X[k], U[j] # by expanding each pair U[i], X[k] JJ <- unlist(spl[JzJ]) II <- rep(IzJ, grlen[JzJ]) KK <- rep(JzJ, grlen[JzJ]) # remove identical pairs i = j ok <- II != JJ II <- II[ok] JJ <- JJ[ok] KK <- KK[ok] # evaluate contribution of each triple tKK <- tvals[KK] zII <- Z[II] zJJ <- Z[JJ] tKIJ <- tKK - zII - zJJ contribKK <- pmin(sat, tKIJ + 2) - 2 * pmin(sat, tKIJ + 1) + pmin(sat, tKIJ) # for each (i, j), sum the contributions over k if(!sparseOK) { II <- factor(II, levels=1:nU) JJ <- factor(JJ, levels=1:nU) # was: # delta4 <- tapply(contribKK, list(I=II, J=JJ), sum) # delta4[is.na(delta4)] <- 0 delta4 <- tapplysum(contribKK, list(I=II, J=JJ)) } else { delta4 <- sparseMatrix(i=II, j=JJ, x=contribKK, dims=c(nU, nU)) } # combine result <- result + delta4 return(result) } geyerdelta2 }) spatstat/R/quantiledensity.R0000644000176200001440000000513313115225157015667 0ustar liggesusers#' #' quantiledensity.R #' #' quantile method for class 'density' #' #' Also a CDF from a 'density' #' #' $Revision: 1.3 $ $Date: 2015/09/01 11:53:15 $ quantile.density <- local({ quantile.density <- function(x, probs = seq(0, 1, 0.25), names = TRUE, ..., warn=TRUE) { stopifnot(inherits(x, "density")) #' check whether density estimate was restricted to an interval if(warn && is.call(cl <- x$call) && any(c("from", "to") %in% names(cl))) warning(paste("Density was normalised within the computed range", "of x values", prange(c(cl$from, cl$to))), call.=FALSE) #' validate probs eps <- 100 * .Machine$double.eps if(any((p.ok <- !is.na(probs)) & (probs < -eps | probs > 1 + eps))) stop("'probs' outside [0,1]") if (na.p <- any(!p.ok)) { o.pr <- probs probs <- probs[p.ok] probs <- pmax(0, pmin(1, probs)) } np <- length(probs) qs <- rep(NA_real_, np) if (np > 0) { #' extract density values xx <- x$x yy <- x$y nn <- length(xx) #' integrate, normalise Fx <- cumsum(yy * c(0, diff(xx))) Fx <- Fx/Fx[nn] #' quantile for(j in 1:np) { ii <- min(which(Fx >= probs[j])) if(!is.na(ii) && ii >= 1 && ii <= nn) qs[j] <- xx[ii] } if (names && np > 0L) { names(qs) <- format_perc(probs) } } if (na.p) { o.pr[p.ok] <- qs names(o.pr) <- rep("", length(o.pr)) names(o.pr)[p.ok] <- names(qs) return(o.pr) } else return(qs) } format_perc <- function (x, digits = max(2L, getOption("digits")), probability = TRUE, use.fC = length(x) < 100, ...) { if (length(x)) { if (probability) x <- 100 * x paste0(if (use.fC) formatC(x, format = "fg", width = 1, digits = digits) else format(x, trim = TRUE, digits = digits, ...), "%") } else character(0) } quantile.density }) CDF <- function(f, ...) { UseMethod("CDF") } CDF.density <- function(f, ..., warn=TRUE) { stopifnot(inherits(f, "density")) #' check whether density estimate was restricted to an interval if(warn && is.call(cl <- f$call) && any(c("from", "to") %in% names(cl))) warning(paste("Density was normalised within the computed range", "of x values", prange(c(cl$from, cl$to))), call.=FALSE) #' integrate xx <- f$x yy <- f$y nn <- length(xx) Fx <- cumsum(yy * c(0, diff(xx))) #' normalise Fx <- Fx/Fx[nn] #' FF <- approxfun(xx, Fx, method="linear", rule=2) return(FF) } spatstat/R/rshift.psp.R0000755000176200001440000000270513115271120014541 0ustar liggesusers# # rshift.psp.R # # $Revision: 1.6 $ $Date: 2011/05/18 09:10:12 $ # rshift.psp <- function(X, ..., group=NULL, which=NULL) { verifyclass(X, "psp") # process arguments W <- rescue.rectangle(X$window) arglist <- handle.rshift.args(W, ..., edgedefault="erode") radius <- arglist$radius width <- arglist$width height <- arglist$height edge <- arglist$edge clip <- arglist$clip if(W$type != "rectangle") stop("Not yet implemented for non-rectangular windows") if(edge != "erode") stop(paste("Only implemented for edge=", dQuote("erode"))) # split into groups if(is.null(group)) Y <- list(X) else { stopifnot(is.factor(group)) stopifnot(length(group) == X$n) Y <- lapply(levels(group), function(l, X, group) {X[group == l]}, X=X, group=group) } ############ loop ################ result <- psp(numeric(0), numeric(0), numeric(0), numeric(0), X$window) for(i in seq_along(Y)) { Z <- Y[[i]] # generate random translation vector if(!is.null(radius)) jump <- runifdisc(1, radius=radius) else { jump <- list(x=runif(1, min=0, max=width), y=runif(1, min=0, max=height)) } # translate segments Zsh <- shift(Z, c(jump$x, jump$y)) Zsh$window <- W # append to result result <- append.psp(result, Zsh) } # clip if(!is.null(clip)) result <- result[clip] return(result) } spatstat/R/periodify.R0000755000176200001440000000751113115271120014433 0ustar liggesusers# # periodify.R # # replicate a pattern periodically # # $Revision: 1.3 $ $Date: 2011/04/17 05:52:50 $ # periodify <- function(X, ...) { UseMethod("periodify") } periodify.ppp <- function(X, nx=1, ny=1, ..., combine=TRUE, warn=TRUE, check=TRUE, ix=(-nx):nx, iy=(-ny):ny, ixy=expand.grid(ix=ix,iy=iy)) { # sanity checks if(!missing(nx) || !missing(ny)) { if(is.null(nx)) nx <- 1 if(is.null(ny)) ny <- 1 if(length(nx) != 1 || length(ny) != 1) stop("nx and ny should be single integers") if(nx != round(nx) || ny != round(ny)) stop("nx and ny should be integers") } force(ixy) W <- X$window isrect <- (W$type == "rectangle") if(warn && combine && !isrect) warning("X has a non-rectangular window") else isrect <- isrect && all(diff(nx) == 1) && all(diff(ny) == 1) width <- diff(W$xrange) height <- diff(W$yrange) shifts <- cbind(ixy[,1] * width, ixy[,2] * height) Xshift <- list() for(i in 1:nrow(shifts)) Xshift[[i]] <- shift(X, vec=as.numeric(shifts[i, ])) if(!combine) return(Xshift) Wnew <- if(isrect) { owin(range(range(W$xrange) + range(shifts[,1])), range(range(W$yrange) + range(shifts[,2]))) } else NULL Z <- do.call(superimpose, append(Xshift, list(W=Wnew, check=check))) return(Z) } periodify.psp <- function(X, nx=1, ny=1, ..., combine=TRUE, warn=TRUE, check=TRUE, ix=(-nx):nx, iy=(-ny):ny, ixy=expand.grid(ix=ix,iy=iy)) { # sanity checks if(!missing(nx) || !missing(ny)) { if(is.null(nx)) nx <- 1 if(is.null(ny)) ny <- 1 if(length(nx) != 1 || length(ny) != 1) stop("nx and ny should be single integers") if(nx != round(nx) || ny != round(ny)) stop("nx and ny should be integers") } force(ixy) W <- X$window isrect <- (W$type == "rectangle") if(warn && combine && !isrect) warning("X has a non-rectangular window") else isrect <- isrect && all(diff(nx) == 1) && all(diff(ny) == 1) width <- diff(W$xrange) height <- diff(W$yrange) shifts <- cbind(ixy[,1] * width, ixy[,2] * height) Xshift <- list() for(i in 1:nrow(shifts)) Xshift[[i]] <- shift(X, vec=as.numeric(shifts[i, ])) if(!combine) return(Xshift) Wnew <- if(isrect) { owin(range(range(W$xrange) + range(shifts[,1])), range(range(W$yrange) + range(shifts[,2]))) } else NULL Z <- do.call(superimpose, append(Xshift, list(W=Wnew, check=check))) return(Z) } periodify.owin <- function(X, nx=1, ny=1, ..., combine=TRUE, warn=TRUE, ix=(-nx):nx, iy=(-ny):ny, ixy=expand.grid(ix=ix,iy=iy)) { # sanity checks if(!missing(nx) || !missing(ny)) { if(is.null(nx)) nx <- 1 if(is.null(ny)) ny <- 1 if(length(nx) != 1 || length(ny) != 1) stop("nx and ny should be single integers") if(nx != round(nx) || ny != round(ny)) stop("nx and ny should be integers") } force(ixy) isrect <- (X$type == "rectangle") if(warn && combine && !isrect) warning("X is not rectangular") else isrect <- isrect && all(diff(nx) == 1) && all(diff(ny) == 1) width <- diff(X$xrange) height <- diff(X$yrange) shifts <- cbind(ixy[,1] * width, ixy[,2] * height) if(combine) { if(isrect) { # result is a rectangle Y <- owin(range(range(X$xrange) + range(shifts[,1])), range(range(X$yrange) + range(shifts[,2]))) } else { # result is another type of window for(i in 1:nrow(shifts)) { Xi <- shift(X, vec=as.numeric(shifts[i, ])) Y <- if(i == 1) Xi else union.owin(Y, Xi) } } } else { # result is a list Y <- list() for(i in 1:nrow(shifts)) Y[[i]] <- shift(X, vec=as.numeric(shifts[i, ])) } return(Y) } spatstat/R/idw.R0000755000176200001440000000436613115271075013242 0ustar liggesusers# # idw.R # # Inverse-distance weighted smoothing # # $Revision: 1.9 $ $Date: 2017/06/05 10:31:58 $ idw <- function(X, power=2, at="pixels", ...) { stopifnot(is.ppp(X) && is.marked(X)) marx <- marks(X) if(is.data.frame(marx)) { if(ncol(marx) > 1) { # multiple columns of marks - process one-by-one out <- list() for(j in 1:ncol(marx)) out[[j]] <- idw(X %mark% marx[,j], power=power, at=at, ...) names(out) <- names(marx) switch(at, pixels = { out <- as.solist(out) }, points = { out <- as.data.frame(out) }) return(out) } else marx <- marx[,1L] } if(!is.numeric(marx)) stop("Marks must be numeric") check.1.real(power) switch(at, pixels = { # create grid W <- as.mask(as.owin(X), ...) dim <- W$dim npixels <- prod(dim) # call C z <- .C("Cidw", x = as.double(X$x), y = as.double(X$y), v = as.double(marx), n = as.integer(npoints(X)), xstart = as.double(W$xcol[1L]), xstep = as.double(W$xstep), nx = as.integer(dim[2L]), ystart = as.double(W$yrow[1L]), ystep = as.double(W$ystep), ny = as.integer(dim[1L]), power = as.double(power), num = as.double(numeric(npixels)), den = as.double(numeric(npixels)), rat = as.double(numeric(npixels)), PACKAGE = "spatstat") out <- as.im(matrix(z$rat, dim[1L], dim[2L]), W=W) out <- out[W, drop=FALSE] }, points={ npts <- npoints(X) z <- .C("idwloo", x = as.double(X$x), y = as.double(X$y), v = as.double(marx), n = as.integer(npts), power = as.double(power), num = as.double(numeric(npts)), den = as.double(numeric(npts)), rat = as.double(numeric(npts)), PACKAGE = "spatstat") out <- z$rat }) return(out) } spatstat/R/close3Dpairs.R0000644000176200001440000001421313115271075014777 0ustar liggesusers# # close3Dpairs.R # # $Revision: 1.9 $ $Date: 2017/06/05 10:31:58 $ # # extract the r-close pairs from a 3D dataset # # closepairs.pp3 <- local({ closepairs.pp3 <- function(X, rmax, twice=TRUE, what=c("all", "indices"), distinct=TRUE, neat=TRUE, ...) { verifyclass(X, "pp3") what <- match.arg(what) stopifnot(is.numeric(rmax) && length(rmax) == 1L) stopifnot(is.finite(rmax)) stopifnot(rmax >= 0) ordered <- list(...)$ordered if(missing(twice) && !is.null(ordered)) { warning("Obsolete argument 'ordered' has been replaced by 'twice'") twice <- ordered } npts <- npoints(X) nama <- switch(what, all = c("i", "j", "xi", "yi", "zi", "xj", "yj", "zj", "dx", "dy", "dz", "d"), indices = c("i", "j")) names(nama) <- nama if(npts == 0) { null.answer <- lapply(nama, nuttink) return(null.answer) } ## sort points by increasing x coordinate oo <- fave.order(coords(X)$x) Xsort <- X[oo] ## First make an OVERESTIMATE of the number of pairs nsize <- ceiling(5 * pi * (npts^2) * (rmax^3)/volume(as.box3(X))) nsize <- max(1024, nsize) if(nsize > .Machine$integer.max) { warning( "Estimated number of close pairs exceeds maximum possible integer", call.=FALSE) nsize <- .Machine$integer.max } ## Now extract pairs XsortC <- coords(Xsort) x <- XsortC$x y <- XsortC$y z <- XsortC$z r <- rmax ng <- nsize storage.mode(x) <- "double" storage.mode(y) <- "double" storage.mode(z) <- "double" storage.mode(r) <- "double" storage.mode(ng) <- "integer" ## go a <- switch(what, all = { .Call("close3pairs", xx=x, yy=y, zz=z, rr=r, nguess=ng, PACKAGE = "spatstat") }, indices = { .Call("close3IJpairs", xx=x, yy=y, zz=z, rr=r, nguess=ng, PACKAGE = "spatstat") }) names(a) <- nama ## convert i,j indices to original sequence a$i <- oo[a$i] a$j <- oo[a$j] ## handle options if(twice) { ## both (i, j) and (j, i) should be returned a <- as.data.frame(a) a <- as.list(rbind(a, swapdata(a, what))) } else if(neat) { ## enforce i < j swap <- with(a, (j < i)) if(any(swap)) { a <- as.data.frame(a) a[swap,] <- swapdata(a[swap, ,drop=FALSE], what) a <- as.list(a) } } ## add pairs of identical points? if(!distinct) { ii <- seq_len(npts) xtra <- data.frame(i = ii, j=ii) if(what == "all") { coo <- coords(X)[, c("x","y","z")] zeroes <- rep(0, npts) xtra <- cbind(xtra, coo, coo, zeroes, zeroes, zeroes, zeroes) } a <- as.list(rbind(as.data.frame(a), xtra)) } ## done return(a) } swapdata <- function(a, what) { switch(what, all = { with(a, data.frame(i = j, j = i, xi = xj, yi = yj, zi = zj, xj = xi, yj = yi, zj = zi, dx = -dx, dy = -dy, dz = -dz, d = d)) }, indices = { with(a, data.frame(i=j, j=i)) }) } nuttink <- function(x) numeric(0) closepairs.pp3 }) ####################### crosspairs.pp3 <- local({ crosspairs.pp3 <- function(X, Y, rmax, what=c("all", "indices"), ...) { verifyclass(X, "pp3") verifyclass(Y, "pp3") what <- match.arg(what) stopifnot(is.numeric(rmax) && length(rmax) == 1L && rmax >= 0) nama <- switch(what, all = c("i", "j", "xi", "yi", "zi", "xj", "yj", "zj", "dx", "dy", "dz", "d"), indices = c("i", "j")) names(nama) <- nama nX <- npoints(X) nY <- npoints(Y) if(nX == 0 || nY == 0) { null.answer <- lapply(nama, nuttink) return(null.answer) } ## order patterns by increasing x coordinate ooX <- fave.order(coords(X)$x) Xsort <- X[ooX] ooY <- fave.order(coords(Y)$x) Ysort <- Y[ooY] ## First (over)estimate the number of pairs nsize <- ceiling(3 * pi * (rmax^3) * nX * nY/volume(as.box3(Y))) nsize <- max(1024, nsize) if(nsize > .Machine$integer.max) { warning( "Estimated number of close pairs exceeds maximum possible integer", call.=FALSE) nsize <- .Machine$integer.max } ## .Call XsortC <- coords(Xsort) YsortC <- coords(Ysort) Xx <- XsortC$x Xy <- XsortC$y Xz <- XsortC$z Yx <- YsortC$x Yy <- YsortC$y Yz <- YsortC$z r <- rmax ng <- nsize storage.mode(Xx) <- storage.mode(Xy) <- storage.mode(Xz) <- "double" storage.mode(Yx) <- storage.mode(Yy) <- storage.mode(Yz) <- "double" storage.mode(r) <- "double" storage.mode(ng) <- "integer" ## go a <- switch(what, all = { .Call("cross3pairs", xx1=Xx, yy1=Xy, zz1=Xz, xx2=Yx, yy2=Yy, zz2=Yz, rr=r, nguess=ng, PACKAGE = "spatstat") }, indices = { .Call("cross3IJpairs", xx1=Xx, yy1=Xy, zz1=Xz, xx2=Yx, yy2=Yy, zz2=Yz, rr=r, nguess=ng, PACKAGE = "spatstat") }) names(a) <- nama ## convert i,j indices to original sequence a$i <- ooX[a$i] a$j <- ooY[a$j] return(a) } nuttink <- function(x) numeric(0) crosspairs.pp3 }) spatstat/R/istat.R0000755000176200001440000001430713115271075013577 0ustar liggesusers# # interactive analysis of point patterns # # $Revision: 1.23 $ $Date: 2015/10/21 09:06:57 $ # # istat <- function(x, xname) { if(missing(xname)) xname <- short.deparse(substitute(x)) verifyclass(x, "ppp") kraever("rpanel") # generate simulations of CSR for use in envelopes simx <- envelope(x, fun=NULL, nsim=39, verbose=FALSE, internal=list(csr=TRUE, eject="patterns")) # initial value of smoothing parameter sigma0 <- with(x$window, min(diff(xrange),diff(yrange)))/8 # create panel p <- rpanel::rp.control(title=paste("istat(", xname, ")", sep=""), panelname="istat", size=c(600,400), x=x, # point pattern xname=xname, # name of point pattern simx=simx, # simulated realisations of CSR stat="data", envel="none", sigma=sigma0) # Split panel into two halves # Left half of panel: display # Right half of panel: controls rpanel::rp.grid(p, "gdisplay", pos=list(row=0,column=0), width=400, height=400) rpanel::rp.grid(p, "gcontrols", pos=list(row=0,column=1), width=200, height=400) #----- Display side ------------ # This line is to placate the package checker mytkr2 <- NULL rpanel::rp.tkrplot(p, mytkr2, do.istat, pos=list(row=0,column=0,grid="gdisplay")) redraw <- function(panel) { rpanel::rp.tkrreplot(panel, mytkr2) panel } #----- Control side ------------ nextrow <- 0 pozzie <- function(n=nextrow,s='w') list(row=n,column=0,grid="gcontrols",sticky=s) # choice of summary statistic ftable <- c(data="data", density="kernel smoothed", Kest="K-function", Lest="L-function", pcf="pair correlation", Kinhom="inhomogeneous K", Linhom="inhomogeneous L", Fest="empty space function F", Gest="nearest neighbour function G", Jest="J-function") fvals <- names(ftable) flabs <- as.character(ftable) stat <- NULL rpanel::rp.radiogroup(p, stat, vals=fvals, labels=flabs, title="statistic", action=redraw, pos=pozzie(0)) nextrow <- 1 # envelopes? envel <- NULL evals <- c("none", "pointwise", "simultaneous") elabs <- c("No simulation envelopes", "Pointwise envelopes under CSR", "Simultaneous envelopes under CSR") rpanel::rp.radiogroup(p, envel, vals=evals, labels=elabs, title="Simulation envelopes", action=redraw, pos=pozzie(nextrow)) nextrow <- nextrow + 1 # smoothing parameters sigma <- NULL rect <- as.rectangle(x$window) winwid <- min(abs(diff(rect$xrange)), abs(diff(rect$yrange))) rpanel::rp.slider(p, sigma, winwid/80, winwid/2, action=redraw, title="sigma", initval=winwid/8, showvalue=TRUE, pos=pozzie(nextrow, '')) nextrow <- nextrow + 1 pcfbw <- pcfbwinit <- 0.15/sqrt(5 * x$n/area(x$window)) rpanel::rp.slider(p, pcfbw, pcfbwinit/10, 4 * pcfbwinit, action=redraw, title="bw", initval=pcfbwinit, showvalue=TRUE, pos=pozzie(nextrow, '')) nextrow <- nextrow + 1 # button to print a summary at console rpanel::rp.button(p, title="Print summary information", action=function(panel) { print(summary(panel$x)); panel}, pos=pozzie(nextrow)) nextrow <- nextrow + 1 # quit button rpanel::rp.button(p, title="Quit", quitbutton=TRUE, action= function(panel) { panel }, pos=pozzie(nextrow)) invisible(NULL) } # function that updates the plot when the control panel is operated do.istat <- function(panel) { x <- panel$x xname <- panel$xname envel <- panel$envel stat <- panel$stat sigma <- panel$sigma simx <- panel$simx if(stat=="data") { plot(x, main=xname) return(panel) } out <- switch(envel, none=switch(stat, density=density(x, sigma=sigma), Kest=Kest(x), Lest=Lest(x), pcf=pcf(x, bw=panel$pcfbw), Kinhom=Kinhom(x, sigma=sigma), Linhom=Linhom(x, sigma=sigma), Fest=Fest(x), Gest=Gest(x), Jest=Jest(x)), pointwise=switch(stat, density=density(x, sigma=sigma), Kest=envelope(x, Kest, nsim=39, simulate=simx), Lest=envelope(x, Lest, nsim=39, simulate=simx), pcf=envelope(x, pcf, bw=panel$pcfbw, nsim=39, simulate=simx), Kinhom=envelope(x, Kinhom, nsim=39, sigma=sigma, simulate=simx), Linhom=envelope(x, Linhom, nsim=39, sigma=sigma, simulate=simx), Fest=envelope(x, Fest, nsim=39, simulate=simx), Gest=envelope(x, Gest, nsim=39, simulate=simx), Jest=envelope(x, Jest, nsim=39, simulate=simx)), simultaneous=switch(stat, density=density(x, sigma=sigma), Kest=envelope(x, Kest, nsim=19, global=TRUE, simulate=simx), Lest=envelope(x, Lest, nsim=19, global=TRUE, simulate=simx), pcf=envelope(x, pcf, bw=panel$pcfbw, nsim=19, global=TRUE, simulate=simx), Kinhom=envelope(x, Kinhom, nsim=19, sigma=sigma, global=TRUE, simulate=simx), Linhom=envelope(x, Linhom, nsim=19, sigma=sigma, global=TRUE, simulate=simx), Fest=envelope(x, Fest, nsim=19, global=TRUE, simulate=simx), Gest=envelope(x, Gest, nsim=19, global=TRUE, simulate=simx), Jest=envelope(x, Jest, nsim=19, global=TRUE, simulate=simx)) ) # plot it if(stat %in% c("density", "Kinhom", "Linhom")) { plot(out, main=paste(stat, "(", xname, ", sigma)", sep="")) if(stat == "density") points(x) } else if(stat == "pcf") plot(out, main=paste("pcf(", xname, ", bw)", sep="")) else plot(out, main=paste(stat, "(", xname, ")", sep="")) return(panel) } spatstat/R/interactions.R0000755000176200001440000002112413115271120015137 0ustar liggesusers# # interactions.R # # Works out which interaction is in force for a given point pattern # # $Revision: 1.25 $ $Date: 2016/04/25 02:34:40 $ # # impliedpresence <- function(tags, formula, df, extranames=character(0)) { # Determines, for each row of the data frame df, # whether the variable called tags[j] is required in the formula stopifnot(is.data.frame(df)) stopifnot(inherits(formula, "formula")) stopifnot(is.character(tags)) stopifnot(is.character(extranames)) # allvars <- variablesinformula(formula) if(any(tags %in% names(df))) stop(paste(sQuote("tags"), "conflicts with the name of a column of", sQuote("df"))) if(any(extranames %in% names(df))) stop(paste(sQuote("extranames"), "conflicts with the name of a column of", sQuote("df"))) # answer is a matrix nvars <- length(tags) nrows <- nrow(df) answer <- matrix(TRUE, nrows, nvars) # expand data frame with zeroes for each tags and extranames for(v in unique(c(tags, extranames))) df[ , v] <- 0 # loop for(i in seq(nrow(df))) { # make a fake data frame for the formula # using the data frame entries from row i # (includes 0 values for all other variables) pseudat <- df[i, , drop=FALSE] # use this to construct a fake model matrix mof0 <- model.frame(formula, pseudat) mom0 <- model.matrix(formula, mof0) for(j in seq(nvars)) { # Reset the variable called tags[j] to 1 pseudatj <- pseudat pseudatj[ , tags[j]] <- 1 # Now create the fake model matrix mofj <- model.frame(formula, pseudatj) momj <- model.matrix(formula, mofj) # Compare the two matrices answer[i,j] <- any(momj != mom0) } } return(answer) } active.interactions <- function(object) { stopifnot(inherits(object, "mppm")) interaction <- object$Inter$interaction iformula <- object$iformula nenv <- new.env() environment(iformula) <- nenv #%^!ifdef RANDOMEFFECTS random <- object$random if(!is.null(random)) environment(random) <- nenv #%^!endif itags <- object$Inter$itags # The following are currently unused # ninter <- object$Inter$ninter # iused <- object$Inter$iused # trivial <- object$Inter$trivial # names of variables dat <- object$data datanames <- names(dat) dfnames <- summary(dat)$dfnames nondfnames <- datanames[!(datanames %in% dfnames)] nondfnames <- union(nondfnames, c("x", "y")) # extract data-frame values dfdata <- as.data.frame(dat[, dfnames, drop=FALSE], warn=FALSE) # determine which interaction(s) are in force answer <- impliedpresence(itags, iformula, dfdata, nondfnames) #%^!ifdef RANDOMEFFECTS if(!is.null(random)) { if("|" %in% all.names(random)) { ## hack since model.matrix doesn't handle "|" as desired rnd <- gsub("|", "/", pasteFormula(random), fixed=TRUE) random <- as.formula(rnd, env=environment(random)) } answer2 <- impliedpresence(itags, random, dfdata, nondfnames) answer <- answer | answer2 } #%^!endif colnames(answer) <- names(interaction) return(answer) } impliedcoefficients <- function(object, tag) { stopifnot(inherits(object, "mppm")) stopifnot(is.character(tag) && length(tag) == 1) fitobj <- object$Fit$FIT Vnamelist <- object$Fit$Vnamelist has.random <- object$Info$has.random # Not currently used: # fitter <- object$Fit$fitter # interaction <- object$Inter$interaction # ninteract <- object$Inter$ninteract # trivial <- object$Inter$trivial # iused <- object$Inter$iused itags <- object$Inter$itags if(!(tag %in% itags)) stop(paste("Argument", dQuote("tag"), "is not one of the interaction names")) # (0) Set up # Identify the columns of the glm data frame # that are associated with this interpoint interaction vnames <- Vnamelist[[tag]] if(!is.character(vnames)) stop("Internal error - wrong format for vnames") # Check atomic type of each covariate Moadf <- as.list(object$Fit$moadf) islog <- sapply(Moadf, is.logical) isnum <- sapply(Moadf, is.numeric) isfac <- sapply(Moadf, is.factor) # Interaction variables must be numeric or logical if(any(bad <- !(isnum | islog)[vnames])) stop(paste("Internal error: the", ngettext(sum(bad), "variable", "variables"), commasep(sQuote(vnames[bad])), "should be numeric or logical"), call.=FALSE) # The answer is a matrix of coefficients, # with one row for each point pattern, # and one column for each vname answer <- matrix(, nrow=object$npat, ncol=length(vnames)) colnames(answer) <- vnames # (1) make a data frame of covariates # Names of all columns in glm data frame allnames <- names(Moadf) # Extract the design covariates df <- as.data.frame(object$data, warn=FALSE) # Names of all covariates other than design covariates othernames <- allnames[!(allnames %in% names(df))] # Add columns in which all other covariates are set to 0, FALSE, etc for(v in othernames) { df[, v] <- if(isnum[[v]]) 0 else if(islog[[v]]) FALSE else if(isfac[[v]]) { lev <- levels(Moadf[[v]]) factor(lev[1], levels=lev) } else sort(unique(Moadf[[v]]))[1] } # (2) evaluate linear predictor Coefs <- if(!has.random) coef(fitobj) else fixef(fitobj) opt <- options(warn= -1) # eta0 <- predict(fitobj, newdata=df, type="link") eta0 <- GLMpredict(fitobj, data=df, coefs=Coefs, changecoef=TRUE, type="link") options(opt) # (3) for each vname in turn, # set the value of the vname to 1 and predict again for(j in seq_along(vnames)) { vnj <- vnames[j] df[[vnj]] <- 1 opt <- options(warn= -1) # etaj <- predict(fitobj, newdata=df, type="link") etaj <- GLMpredict(fitobj, data=df, coefs=Coefs, changecoef=TRUE, type="link") options(opt) answer[ ,j] <- etaj - eta0 # set the value of this vname back to 0 df[[vnj]] <- 0 } return(answer) } illegal.iformula <- local({ illegal.iformula <- function(ifmla, itags, dfvarnames) { ## THIS IS TOO STRINGENT! ## Check validity of the interaction formula. ## ifmla is the formula. ## itags is the character vector of interaction names. ## Check whether the occurrences of `itags' in `iformula' are valid: ## e.g. no functions applied to `itags[i]'. ## Returns NULL if legal, otherwise a character string stopifnot(inherits(ifmla, "formula")) stopifnot(is.character(itags)) ## formula must not have a LHS if(length(ifmla) > 2) return("iformula must not have a left-hand side") ## variables in formula must be interaction tags or data frame variables varsinf <- variablesinformula(ifmla) if(!all(ok <- varsinf %in% c(itags, dfvarnames))) return(paste( ngettext(sum(!ok), "variable", "variables"), paste(dQuote(varsinf[!ok]), collapse=", "), "not allowed in iformula")) ## if formula uses no interaction tags, it's trivial if(!any(itags %in% variablesinformula(ifmla))) return(NULL) ## create terms object tt <- attributes(terms(ifmla)) ## extract all variables appearing in the formula vars <- as.list(tt$variables)[-1] ## nvars <- length(vars) varexprs <- lapply(vars, as.expression) varstrings <- sapply(varexprs, paste) ## Each variable may be a name or an expression v.is.name <- sapply(vars, is.name) ## a term may be an expression like sin(x), poly(x,y,degree=2) v.args <- lapply(varexprs, all.vars) ## v.n.args <- sapply(v.args, length) v.has.itag <- sapply(lapply(v.args, "%in%", x=itags), any) ## interaction tags may only appear as names, not in functions if(any(nbg <- v.has.itag & !v.is.name)) return(paste("interaction tags may not appear inside a function:", paste(dQuote(varstrings[nbg]), collapse=", "))) ## Interaction between two itags is not defined ## Inspect the higher-order terms fax <- tt$factors if(prod(dim(fax)) == 0) return(NULL) ## rows are first order terms, columns are terms of order >= 1 fvars <- rownames(fax) fterms <- colnames(fax) fv.args <- lapply(fvars, variablesintext) ft.args <- lapply(fterms, variables.in.term, factors=fax, varnamelist=fv.args) ft.itags <- lapply(ft.args, intersect, y=itags) if(any(lengths(ft.itags) > 1)) return("Interaction between itags is not defined") return(NULL) } variables.in.term <- function(term, factors, varnamelist) { basis <- (factors[, term] != 0) unlist(varnamelist[basis]) } illegal.iformula }) spatstat/R/factors.R0000644000176200001440000000310313115225157014101 0ustar liggesusers#' #' factors.R #' #' Tools for manipulating factors and factor-valued things #' #' $Revision: 1.4 $ $Date: 2016/04/25 02:34:40 $ relevel.im <- function(x, ref, ...) { if(x$type != "factor") stop("Only valid for factor-valued images") x[] <- relevel(x[], ref, ...) return(x) } relevel.ppp <- relevel.ppx <- function(x, ref, ...) { stopifnot(is.multitype(x)) marks(x) <- relevel(marks(x), ref, ...) return(x) } mergeLevels <- function(.f, ...) { if(is.im(.f)) { aa <- mergeLevels(.f[], ...) .f[] <- aa return(.f) } if(is.multitype(.f)) { marks(.f) <- mergeLevels(marks(.f), ...) return(.f) } stopifnot(is.factor(.f)) map <- list(...) n <- length(map) if(n == 0) return(.f) # mapping for 'other' if(any(isnul <- (lengths(map) == 0))) { if(sum(isnul) > 1) stop("At most one argument should be NULL or character(0)") otherlevels <- setdiff(levels(.f), unlist(map)) map[[which(isnul)]] <- otherlevels } newlevels <- names(map) oldlevels <- levels(.f) mappedlevels <- unlist(map) if(sum(nzchar(newlevels)) != n) stop("Arguments must be in the form name=value") if(!all(mappedlevels %in% oldlevels)) stop("Argument values must be levels of .f") ## construct mapping fullmap <- oldlevels for(i in seq_len(n)) { relevant <- oldlevels %in% map[[i]] fullmap[relevant] <- newlevels[i] } ## apply mapping newf <- factor(fullmap[.f], levels=unique(fullmap)) return(newf) } levelsAsFactor <- function(x) { lev <- levels(x) if(is.null(lev)) return(NULL) return(factor(lev, levels=lev)) } spatstat/R/by.ppp.R0000755000176200001440000000062413115271075013660 0ustar liggesusers# # by.ppp.R # # $Revision: 1.6 $ $Date: 2015/10/21 09:06:57 $ # by.ppp <- function(data, INDICES=marks(data), FUN, ...) { if(missing(INDICES)) INDICES <- marks(data, dfok=FALSE) if(missing(FUN)) stop("FUN is missing") y <- split(data, INDICES) z <- list() for(i in seq_along(y)) z[[i]] <- FUN(y[[i]], ...) names(z) <- names(y) z <- as.solist(z, demote=TRUE) return(z) } spatstat/R/diagnoseppm.R0000755000176200001440000003457013115271075014765 0ustar liggesusers# # diagnoseppm.R # # Makes diagnostic plots based on residuals or energy weights # # $Revision: 1.43 $ $Date: 2016/07/06 06:56:56 $ # diagnose.ppm.engine <- function(object, ..., type="eem", typename, opt, sigma=NULL, rbord=reach(object), compute.sd=is.poisson(object), compute.cts=TRUE, envelope=FALSE, nsim=39, nrank=1, rv=NULL, oldstyle=FALSE, splineargs = list(spar=0.5), verbose=TRUE) { if(is.marked.ppm(object)) stop("Sorry, this is not yet implemented for marked models") # quadrature points Q <- quad.ppm(object) U <- union.quad(Q) Qweights <- w.quad(Q) # -------------- Calculate residuals/weights ------------------- # Discretised residuals if(type == "eem") { residval <- if(!is.null(rv)) rv else eem(object, check=FALSE) residval <- as.numeric(residval) X <- data.ppm(object) Y <- X %mark% residval } else { if(!is.null(rv) && !inherits(rv, "msr")) stop("rv should be a measure (object of class msr)") residobj <- if(!is.null(rv)) rv else residuals.ppm(object, type=type, check=FALSE) residval <- with(residobj, "increment") if(ncol(as.matrix(residval)) > 1L) stop("Not implemented for vector-valued residuals; use [.msr to split into separate components") Y <- U %mark% residval } # Atoms and density of measure Ymass <- NULL Ycts <- NULL Ydens <- NULL if(compute.cts) { if(type == "eem") { Ymass <- Y Ycts <- U %mark% (-1) Ydens <- as.im(-1, Y$window) } else { atoms <- with(residobj, "is.atom") masses <- with(residobj, "discrete") cts <- with(residobj, "density") if(!is.null(atoms) && !is.null(masses) && !is.null(cts)) { Ymass <- (U %mark% masses)[atoms] Ycts <- U %mark% cts # remove NAs (as opposed to zero cif points) if(!all(ok <- is.finite(cts))) { U <- U[ok] Ycts <- Ycts[ok] cts <- cts[ok] Qweights <- Qweights[ok] } # interpolate continuous part to yield an image for plotting if(type == "inverse" && all(cts > 0)) { Ydens <- as.im(-1, Y$window) } else if(is.stationary.ppm(object) && is.poisson.ppm(object)) { # all values of `cts' will be equal Ydens <- as.im(cts[1L], Y$window) } else { smallsigma <- maxnndist(Ycts) Ujitter <- U Ujitter$x <- U$x + runif(U$n, -smallsigma, smallsigma) Ujitter$y <- U$y + runif(U$n, -smallsigma, smallsigma) Ydens <- Smooth(Ujitter %mark% marks(Ycts), sigma=smallsigma, weights=Qweights, edge=TRUE, ...) } } } } #---------------- Erode window --------------------------------- # ## Compute windows W <- Y$window # Erode window if required clip <- !is.null(rbord) && is.finite(rbord) && (rbord > 0) if(clip) { Wclip <- erosion.owin(W, rbord) Yclip <- Y[Wclip] Qweightsclip <- Qweights[inside.owin(U, , Wclip)] if(!is.null(Ycts)) Ycts <- Ycts[Wclip] if(!is.null(Ydens)) Ydens <- Ydens[Wclip, drop=FALSE] } else { Wclip <- W Yclip <- Y } # ------------ start collecting results ------------------------- result <- list(type=type, clip=clip, Y=Y, W=W, Yclip=Yclip, Ymass=Ymass, Ycts=Ycts, Ydens=Ydens) # ------------- smoothed field ------------------------------ Z <- NULL if(opt$smooth | opt$xcumul | opt$ycumul | opt$xmargin | opt$ymargin) { if(is.null(sigma)) sigma <- 0.1 * diameter(Wclip) Z <- density.ppp(Yclip, sigma, weights=Yclip$marks, edge=TRUE, ...) } if(opt$smooth) { result$smooth <- list(Z = Z, sigma=sigma) if(type == "pearson") result$smooth$sdp <- 1/(2 * sigma * sqrt(pi)) } # -------------- marginals of smoothed field ------------------------ if(opt$xmargin) { xZ <- apply(Z$v, 2, sum, na.rm=TRUE) * Z$xstep if(type == "eem") ExZ <- colSums(!is.na(Z$v)) * Z$xstep else ExZ <- numeric(length(xZ)) result$xmargin <- list(x=Z$xcol, xZ=xZ, ExZ=ExZ) } if(opt$ymargin) { yZ <- apply(Z$v, 1L, sum, na.rm=TRUE) * Z$ystep if(type == "eem") EyZ <- rowSums(!is.na(Z$v)) * Z$ystep else EyZ <- numeric(length(yZ)) result$ymargin <- list(y=Z$yrow, yZ=yZ, EyZ=EyZ) } # -------------- cumulative (lurking variable) plots -------------- ## precompute simulated patterns for envelopes if(identical(envelope, TRUE)) envelope <- simulate(object, nsim=nsim, progress=verbose) if(opt$xcumul) result$xcumul <- lurking(object, covariate=expression(x), type=type, clipwindow= if(clip) Wclip else NULL, rv=residval, plot.sd=compute.sd, envelope=envelope, nsim=nsim, nrank=nrank, plot.it=FALSE, typename=typename, covname="x coordinate", oldstyle=oldstyle, check=FALSE, splineargs=splineargs, ...) if(opt$ycumul) result$ycumul <- lurking(object, covariate=expression(y), type=type, clipwindow= if(clip) Wclip else NULL, rv=residval, plot.sd=compute.sd, envelope=envelope, nsim=nsim, nrank=nrank, plot.it=FALSE, typename=typename, covname="y coordinate", oldstyle=oldstyle, check=FALSE, splineargs=splineargs, ...) # -------------- summary numbers -------------- if(opt$sum) result$sum <- list(marksum=sum(Yclip$marks, na.rm=TRUE), areaWclip=area(Wclip), areaquad=if(clip) sum(Qweightsclip) else sum(Qweights), range=if(!is.null(Z)) range(Z) else NULL) return(invisible(result)) } ######################################################################## diagnose.ppm <- function(object, ..., type="raw", which="all", sigma=NULL, rbord =reach(object), cumulative=TRUE, plot.it = TRUE, rv = NULL, compute.sd=is.poisson(object), compute.cts=TRUE, envelope=FALSE, nsim=39, nrank=1, typename, check=TRUE, repair=TRUE, oldstyle=FALSE, splineargs=list(spar=0.5)) { asked.newstyle <- !missing(oldstyle) && !oldstyle if(is.marked.ppm(object)) stop("Sorry, this is not yet implemented for marked models") # check whether model originally came from replicated data is.subfit <- (object$method == "mppm") Coefs <- coef(object) if(check && damaged.ppm(object)) { if(!repair) stop("object format corrupted; try update(object, use.internal=TRUE)") message("object format corrupted; repairing it.") object <- update(object, use.internal=TRUE) object <- tweak.coefs(object, Coefs) } else if(compute.sd && is.null(getglmfit(object))) { object <- update(object, forcefit=TRUE, use.internal=TRUE) object <- tweak.coefs(object, Coefs) } # ------------- Interpret arguments -------------------------- # Edge-effect avoidance if(missing(rbord) && !is.finite(rbord)) { ## Model has infinite reach ## Use correction rule employed when fitting rbord <- if(object$correction == "border") object$rbord else 0 } # match type argument type <- pickoption("type", type, c(eem="eem", raw="raw", inverse="inverse", pearson="pearson", Pearson="pearson")) if(missing(typename)) typename <- switch(type, eem="exponential energy weights", raw="raw residuals", inverse="inverse-lambda residuals", pearson="Pearson residuals") # 'which' is multiple choice with exact matching optionlist <- c("all", "marks", "smooth", "x", "y", "sum") if(!all(m <- which %in% optionlist)) stop(paste("Unrecognised choice(s) of", paste(sQuote("which"), ":", sep=""), paste(which[!m], collapse=", "))) opt <- list() opt$all <- "all" %in% which opt$marks <- ("marks" %in% which) | opt$all opt$smooth <- ("smooth" %in% which) | opt$all opt$xmargin <- (("x" %in% which) | opt$all) && !cumulative opt$ymargin <- (("y" %in% which) | opt$all) && !cumulative opt$xcumul <- (("x" %in% which) | opt$all) && cumulative opt$ycumul <- (("y" %in% which) | opt$all) && cumulative opt$sum <- ("sum" %in% which) | opt$all # compute and plot estimated standard deviations? # yes for Poisson, no for other models, unless overridden if(!missing(compute.sd)) plot.sd <- compute.sd else plot.sd <- list(...)$plot.sd if(is.null(plot.sd)) plot.sd <- is.poisson.ppm(object) if(missing(compute.sd)) compute.sd <- plot.sd # default for mppm objects is oldstyle=TRUE if(compute.sd && is.subfit) { if(!asked.newstyle) { # silently change default oldstyle <- TRUE } else { stop(paste("Variance calculation for a subfit of an mppm object", "is only implemented for oldstyle=TRUE"), call.=FALSE) } } # interpolate the density of the residual measure? if(missing(compute.cts)) { plot.neg <- resolve.defaults(list(...), formals(plot.diagppm)["plot.neg"])$plot.neg # only if it is needed for the mark plot compute.cts <- opt$marks && (plot.neg != "discrete") } # ------- DO THE CALCULATIONS ----------------------------------- RES <- diagnose.ppm.engine(object, type=type, typename=typename, opt=opt, sigma=sigma, rbord=rbord, compute.sd=compute.sd, compute.cts=compute.cts, envelope=envelope, nsim=nsim, nrank=nrank, rv=rv, oldstyle=oldstyle, splineargs=splineargs, ...) RES$typename <- typename RES$opt <- opt RES$compute.sd <- compute.sd RES$compute.cts <- compute.cts class(RES) <- "diagppm" # ------- PLOT -------------------------------------------------- if(plot.it) plot(RES, ...) return(RES) } plot.diagppm <- function(x, ..., which, plot.neg=c("image", "discrete", "contour", "imagecontour"), plot.smooth=c("imagecontour", "image", "contour", "persp"), plot.sd, spacing=0.1, outer=3, srange=NULL, monochrome=FALSE, main=NULL) { opt <- x$opt plot.neg <- match.arg(plot.neg) plot.smooth <- match.arg(plot.smooth) if(!missing(which)) { witches <- c("all", "marks", "smooth", "x", "y", "sum") unknown <- is.na(match(which, witches)) if(any(unknown)) warning(paste("Unrecognised", ngettext(sum(unknown), "option", "options"), "which =", commasep(sQuote(which[unknown])), ": valid options are", commasep(sQuote(witches))), call.=FALSE) oldopt <- opt newopt <- list() newopt$all <- "all" %in% which newopt$marks <- ("marks" %in% which) | newopt$all newopt$smooth <- ("smooth" %in% which) | newopt$all newopt$xmargin <- (("x" %in% which) | newopt$all) && oldopt$xmargin newopt$ymargin <- (("y" %in% which) | newopt$all) && oldopt$ymargin newopt$xcumul <- (("x" %in% which) | newopt$all) && oldopt$xcumul newopt$ycumul <- (("y" %in% which) | newopt$all) && oldopt$ycumul newopt$sum <- ("sum" %in% which) | newopt$all illegal <- (unlist(newopt) > unlist(oldopt)) if(any(illegal)) { offending <- paste(names(newopt)[illegal], collapse=", ") whinge <- paste("cannot display the following components;\n", "they were not computed: - \n", offending, "\n") stop(whinge) } opt <- newopt } if(missing(plot.sd)) { plot.sd <- x$compute.sd } else if(plot.sd && !(x$compute.sd)) { warning("can't plot standard deviations; they were not computed") plot.sd <- FALSE } if(!(x$compute.cts) && (plot.neg != "discrete") && (opt$marks || opt$all)) { if(!missing(plot.neg)) warning("can't plot continuous component of residuals; it was not computed") plot.neg <- "discrete" } if(opt$all) resid4plot(RES=x, plot.neg=plot.neg, plot.smooth=plot.smooth, spacing=spacing, outer=outer, srange=srange, monochrome=monochrome, main=main, ...) else resid1plot(RES=x, opt=opt, plot.neg=plot.neg, plot.smooth=plot.smooth, srange=srange, monochrome=monochrome, main=main, ...) } print.diagppm <- function(x, ...) { opt <- x$opt typename <- x$typename splat("Model diagnostics", paren(typename)) splat("Diagnostics available:") optkey <- list(all="four-panel plot", marks=paste("mark plot", if(!x$compute.cts) "(discrete representation only)" else NULL), smooth="smoothed residual field", xmargin="x marginal density", ymargin="y marginal density", xcumul="x cumulative residuals", ycumul="y cumulative residuals", sum="sum of all residuals") avail <- unlist(optkey[names(opt)[unlist(opt)]]) names(avail) <- NULL cat(paste("\t", paste(avail, collapse="\n\t"), "\n", sep="")) if(opt$sum) { xs <- x$sum windowname <- if(x$clip) "clipped window" else "entire window" splat("sum of", typename, "in", windowname, "=", signif(sum(xs$marksum),4)) splat("area of", windowname, "=", signif(xs$areaWclip, 4)) splat("quadrature area =", signif(xs$areaquad, 4)) } if(opt$smooth) { splat("range of smoothed field = ", prange(signif(range(x$smooth$Z),4))) if(!is.null(sdp <- x$smooth$sdp)) splat("Null standard deviation of smoothed Pearson residual field:", signif(sdp, 4)) } return(invisible(NULL)) } spatstat/R/edgeTrans.R0000755000176200001440000001045413115271075014366 0ustar liggesusers# # edgeTrans.R # # $Revision: 1.15 $ $Date: 2016/04/25 02:34:40 $ # # Translation edge correction weights # # edge.Trans(X) compute translation correction weights # for each pair of points from point pattern X # # edge.Trans(X, Y, W) compute translation correction weights # for all pairs of points X[i] and Y[j] # (i.e. one point from X and one from Y) # in window W # # edge.Trans(X, Y, W, paired=TRUE) # compute translation correction weights # for each corresponding pair X[i], Y[i]. # # To estimate the K-function see the idiom in "Kest.R" # ####################################################################### edge.Trans <- function(X, Y=X, W=Window(X), exact=FALSE, paired=FALSE, ..., trim=spatstat.options("maxedgewt"), dx=NULL, dy=NULL, give.rmax=FALSE, gW = NULL) { given.dxdy <- !is.null(dx) && !is.null(dy) if(!given.dxdy) { ## dx, dy will be computed from X, Y X <- as.ppp(X, W) W <- X$window Y <- if(!missing(Y)) as.ppp(Y, W) else X nX <- X$n nY <- Y$n if(paired) { if(nX != nY) stop("X and Y should have equal length when paired=TRUE") dx <- Y$x - X$x dy <- Y$y - X$y } else { dx <- outer(X$x, Y$x, "-") dy <- outer(X$y, Y$y, "-") } } else { ## dx, dy given if(paired) { ## dx, dy are vectors check.nvector(dx) check.nvector(dy) stopifnot(length(dx) == length(dy)) } else { ## dx, dy are matrices check.nmatrix(dx) check.nmatrix(dy) stopifnot(all(dim(dx) == dim(dy))) nX <- nrow(dx) nY <- ncol(dx) } stopifnot(is.owin(W)) } ## For irregular polygons, exact evaluation is very slow; ## so use pixel approximation, unless exact=TRUE if(W$type == "polygonal" && !exact) W <- as.mask(W) ## compute if(!paired) { dx <- as.vector(dx) dy <- as.vector(dy) } switch(W$type, rectangle={ ## Fast code for this case wide <- diff(W$xrange) high <- diff(W$yrange) weight <- wide * high / ((wide - abs(dx)) * (high - abs(dy))) }, polygonal={ ## This code is SLOW n <- length(dx) weight <- numeric(n) if(n > 0) { for(i in seq_len(n)) { Wshift <- shift(W, c(dx[i], dy[i])) weight[i] <- overlap.owin(W, Wshift) } weight <- area(W)/weight } }, mask={ ## compute set covariance of window if(is.null(gW)) gW <- setcov(W) ## evaluate set covariance at these vectors gvalues <- lookup.im(gW, dx, dy, naok=TRUE, strict=FALSE) weight <- area(W)/gvalues } ) ## clip high values if(length(weight) > 0) weight <- pmin.int(weight, trim) if(!paired) weight <- matrix(weight, nrow=nX, ncol=nY) if(give.rmax) attr(weight, "rmax") <- rmax.Trans(W, gW) return(weight) } ## maximum radius for translation correction ## = radius of largest circle centred at 0 contained in W + ^W rmax.Trans <- function(W, g=setcov(W)) { ## calculate maximum permissible 'r' value ## for validity of translation correction W <- as.owin(W) if(is.rectangle(W)) return(shortside(W)) ## find support of set covariance if(is.null(g)) g <- setcov(W) eps <- 2 * max(1, max(g)) * .Machine$double.eps gsupport <- solutionset(g > eps) gboundary <- bdry.mask(gsupport) xy <- rasterxy.mask(gboundary, drop=TRUE) rmax <- with(xy, sqrt(min(x^2 + y^2))) return(rmax) } ## maximum radius for rigid motion correction ## = radius of smallest circle centred at 0 containing W + ^W rmax.Rigid <- function(X, g=setcov(Window(X))) { stopifnot(is.ppp(X) || is.owin(X)) if(is.ppp(X)) return(max(pairdist(X[chull(X)]))) W <- X if(is.rectangle(W)) return(diameter(W)) if(is.null(g)) g <- setcov(W) eps <- 2 * max(1, max(g)) * .Machine$double.eps gsupport <- solutionset(g > eps) gboundary <- bdry.mask(gsupport) xy <- rasterxy.mask(gboundary, drop=TRUE) rmax <- with(xy, sqrt(max(x^2 + y^2))) return(rmax) } spatstat/R/applynbd.R0000755000176200001440000000501113115271075014254 0ustar liggesusers# applynbd.R # # $Revision: 1.17 $ $Date: 2016/10/23 10:36:58 $ # # applynbd() # For each point, identify either # - all points within distance R # - the closest N points # - those points satisfying some constraint # and apply the function FUN to them # # markstat() # simple application of applynbd ################################################################# applynbd <- function(X, FUN, N=NULL, R=NULL, criterion=NULL, exclude=FALSE, ...) { if(is.null(N) && is.null(R) && is.null(criterion)) stop(paste("must specify at least one of the arguments", commasep(sQuote(c("N","R","criterion"))))) X <- as.ppp(X) npts <- npoints(X) # compute matrix of pairwise distances dist <- pairdist(X) # compute row ranks (avoid ties) rankit <- function(x) { u <- numeric(length(x)); u[fave.order(x)] <- seq_along(x); return(u) } drank <- t(apply(dist, 1L, rankit)) - 1L included <- matrix(TRUE, npts, npts) if(!is.null(R)) { # select points closer than R included <- included & (dist <= R) } if(!is.null(N)) { # select N closest points if(N < 1) stop("Value of N must be at least 1") if(exclude) included <- included & (drank <= N) else included <- included & (drank <= N-1) } if(!is.null(criterion)) { # some funny criterion for(i in 1L:npts) included[i,] <- included[i,] & criterion(dist[i,], drank[i,]) } if(exclude) diag(included) <- FALSE # bind into an array a <- array(c(included, dist, drank, row(included)), dim=c(npts,npts,4)) # what to do with a[i, , ] if(!is.marked(X)) go <- function(ai, Z, fun, ...) { which <- as.logical(ai[,1L]) distances <- ai[,2L] dranks <- ai[,3L] here <- ai[1L,4L] fun(Y=Z[which], current=c(x=Z$x[here], y=Z$y[here]), dists=distances[which], dranks=dranks[which], ...) } else go <- function(ai, Z, fun, ...) { which <- as.logical(ai[,1L]) distances <- ai[,2L] dranks <- ai[,3L] here <- ai[1L,4L] fun(Y=Z[which], current=Z[here], dists=distances[which], dranks=dranks[which], ...) } # do it result <- apply(a, 1, go, Z=X, fun=FUN, ...) return(result) } markstat <- function(X, fun, N=NULL, R=NULL, ...) { verifyclass(X, "ppp") stopifnot(is.function(fun)) statfun <- function(Y, current, dists, dranks, func, ...) { func(marks(Y, dfok=TRUE), ...) } applynbd(X, statfun, R=R, N=N, func=fun, ...) } spatstat/R/envelopeArray.R0000644000176200001440000000536413115271075015267 0ustar liggesusers# # envelopeArray.R # # $Revision: 1.1 $ $Date: 2017/06/05 10:31:58 $ # # envelopeArray <- function(X, fun, ..., dataname=NULL,verb=FALSE,reuse=TRUE) { #' if(is.null(dataname)) dataname <- short.deparse(substitute(X)) #' determine function name f.is.name <- is.name(substitute(fun)) fname <- if(f.is.name) paste(as.name(substitute(fun))) else if(is.character(fun)) fun else sQuote("fun") #' determine function to be called if(is.character(fun)) { fun <- get(fun, mode="function") } else if(!is.function(fun)) stop(paste(sQuote("fun"), "should be a function or a character string")) #' Apply function to data pattern, to test it #' and to determine array dimensions, margin labels etc. fX <- do.call.matched(fun, append(list(X), list(...)), matchfirst=TRUE) if(!inherits(fX, "fasp")) stop("function did not return an object of class 'fasp'") d <- dim(fX) witch <- matrix(1:prod(d), nrow=d[1L], ncol=d[2L], dimnames=dimnames(fX)) #' make function that extracts [i,j] entry of result ijfun <- function(X, ..., i=1, j=1, expectdim=d) { fX <- fun(X, ...) if(!inherits(fX, "fasp")) stop("function did not return an object of class 'fasp'") if(!all(dim(fX) == expectdim)) stop("function returned an array with different dimensions") return(fX[i,j]) } # ------------ start computing ------------------------------- if(reuse) { L <- do.call(spatstat::envelope, resolve.defaults( list(X, fun=ijfun), list(internal=list(eject="patterns")), list(...), list(verbose=verb))) intern <- attr(L, "internal") } else intern <- L <- NULL # compute function array and build up 'fasp' object fns <- list() k <- 0 for(i in 1:nrow(witch)) { for(j in 1:ncol(witch)) { if(verb) cat("i =",i,"j =",j,"\n") currentfv <- do.call(spatstat::envelope, resolve.defaults( list(X, ijfun), list(simulate=L, internal=intern), list(verbose=FALSE), list(...), list(Yname=dataname), list(i=i, j=j))) k <- k+1 fns[[k]] <- as.fv(currentfv) } } # wrap up into 'fasp' object title <- paste("array of envelopes of", fname, "for", dataname) rslt <- fasp(fns, which=witch, formulae=NULL, dataname=dataname, title=title, checkfv=FALSE) return(rslt) } spatstat/R/psstG.R0000755000176200001440000001342713115271120013544 0ustar liggesusers# # psstG.R # # Pseudoscore residual for unnormalised G (saturation process) # # $Revision: 1.9 $ $Date: 2015/10/21 09:06:57 $ # ################################################################################ # psstG <- function(object, r=NULL, breaks=NULL, ..., model=NULL, trend=~1, interaction=Poisson(), rbord=reach(interaction), truecoef=NULL, hi.res=NULL) { if(inherits(object, "ppm")) fit <- object else if(inherits(object, "ppp") || inherits(object, "quad")) { # convert to quadscheme if(inherits(object, "ppp")) object <- quadscheme(object, ...) # fit model if(!is.null(model)) fit <- update(model, Q=object, forcefit=TRUE) else fit <- ppm(object, trend=trend, interaction=interaction, rbord=rbord, forcefit=TRUE) } else stop("object should be a fitted point process model or a point pattern") # rfixed <- !is.null(r) || !is.null(breaks) # Extract data and quadrature points Q <- quad.ppm(fit, drop=FALSE) X <- data.ppm(fit) U <- union.quad(Q) Z <- is.data(Q) # indicator data/dummy E <- equalsfun.quad(Q) # WQ <- w.quad(Q) # quadrature weights # integrals will be restricted to quadrature points # that were actually used in the fit # USED <- getglmsubset(fit) if(fit$correction == "border") { rbord <- fit$rbord b <- bdist.points(U) USED <- (b > rbord) } else USED <- rep.int(TRUE, U$n) # basic statistics Win <- Window(X) npts <- npoints(X) areaW <- area(Win) lambda <- npts/areaW # adjustments to account for restricted domain of pseudolikelihood # if(any(!USED)) { # npts.used <- sum(Z & USED) # area.used <- sum(WQ[USED]) # lambda.used <- npts.used/area.used # } else { # npts.used <- npts # area.used <- areaW # lambda.used <- lambda # } # determine breakpoints for r values rmaxdefault <- rmax.rule("G", Win, lambda) breaks <- handle.r.b.args(r, breaks, Win, rmaxdefault=rmaxdefault) rvals <- breaks$r rmax <- breaks$max # residuals res <- residuals(fit, type="raw",drop=FALSE, new.coef=truecoef, quad=hi.res) # resval <- with(res, "increment") rescts <- with(res, "continuous") # absolute weight for continuous integrals wc <- -rescts # initialise fv object df <- data.frame(r=rvals, theo=0) desc <- c("distance argument r", "value 0 corresponding to perfect fit") ans <- fv(df, "r", substitute(bold(R)~Delta~V[S](r), NULL), "theo", . ~ r, alim=c(0, rmax), c("r","%s[theo](r)"), desc, fname="bold(R)~Delta~V[S]") # First phase: ................................................. # nearest neighbours (quadrature point to data point) nn <- nncross(U, X, seq(U$n), seq(X$n)) # excludes identical pairs dIJ <- nn$dist I <- seq(U$n) J <- nn$which DD <- (I <= X$n) # TRUE for data points wcIJ <- wc okI <- USED[I] # histogram of nndist for data points only (without edge correction) Bsum <- cumsum(whist(dIJ[DD & okI], breaks$val)) # weighted histogram of nncross (without edge correction) Bint <- cumsum(whist(dIJ[okI], breaks$val, wcIJ[okI])) # residual Bres <- Bsum - Bint # tack on ans <- bind.fv(ans, data.frame(dat1=Bsum, com1=Bint, res1=Bres), c("%s[dat1](r)", "%s[com1](r)", "%s[res1](r)"), c("phase 1 pseudosum (contribution to %s)", "phase 1 pseudocompensator (contribution to %s)", "phase 1 pseudoresidual (contribution to %s)")) # Second phase: ................................................ # close pairs (quadrature point to data point) close <- crosspairs(U, X, rmax, what="ijd") dIJ <- close$d I <- close$i J <- close$j # UI <- U[I] # XJ <- X[J] EIJ <- E(I, J) # TRUE if points are identical, U[I[k]] == X[J[k]] ZI <- Z[I] # TRUE if U[I[k]] is a data point DD <- ZI & !EIJ # TRUE for pairs of distinct data points only # nDD <- sum(DD) okI <- USED[I] # residual weights # wIJ <- ifelseXY(EIJ, rescts[I], resval[I]) # absolute weight for continuous integrals wc <- -rescts wcIJ <- -rescts[I] # nearest and second-nearest neighbour distances in X nn1 <- nndist(X) nn2 <- nndist(X, k=2) nn1J <- nn1[J] nn2J <- nn2[J] # weird use of the reduced sample estimator # data sum: RSX <- Kount(dIJ[DD & okI], nn2J[DD & okI], nn2J[ZI & okI], breaks) Csum <- RSX$numerator # integral: if(spatstat.options("psstG.remove.zeroes")) okE <- okI & !EIJ else okE <- okI RSD <- Kwtsum(dIJ[okE], nn1J[okE], wcIJ[okE], nn1, rep.int(1, length(nn1)), breaks) Cint <- RSD$numerator # Cres <- Bres + Csum - Cint # tack on ans <- bind.fv(ans, data.frame(dat2=Csum, com2=Cint, res2=Cres, dat=Bsum+Csum, com=Bint+Cint, res=Bres+Cres), c("%s[dat2](r)", "%s[com2](r)", "%s[res2](r)", "Sigma~Delta~V[S](r)", "bold(C)~Delta~V[S](r)", "bold(R)~Delta~V[S](r)"), c("phase 2 pseudosum (contribution to %s)", "phase 2 pseudocompensator (contribution to %s)", "phase 2 pseudoresidual (contribution to %s)", "pseudosum (contribution to %s)", "pseudocompensator (contribution to %s)", "pseudoresidual function %s"), "res") # restrict choice of curves in default plot fvnames(ans, ".") <- c("dat", "com", "res", "theo") # return(ans) } spatstat/R/addvar.R0000755000176200001440000003065313115271075013716 0ustar liggesusers# # addvar.R # # added variable plot # # $Revision: 1.11 $ $Date: 2016/10/23 10:36:58 $ # addvar <- function(model, covariate, ..., subregion=NULL, bw="nrd0", adjust=1, from=NULL, to=NULL, n=512, bw.input = c("points", "quad"), bw.restrict = FALSE, covname, crosscheck=FALSE) { if(missing(covname)) covname <- sensiblevarname(deparse(substitute(covariate)), "X") callstring <- paste(deparse(sys.call()), collapse = "") if(is.marked(model)) stop("Sorry, this is not yet implemented for marked models") if(is.null(adjust)) adjust <- 1 bw.input <- match.arg(bw.input) # validate model stopifnot(is.ppm(model)) if(is.null(getglmfit(model))) model <- update(model, forcefit=TRUE) modelcall <- model$callstring if(is.null(modelcall)) modelcall <- model$call # extract spatial locations Q <- quad.ppm(model) # datapoints <- Q$data quadpoints <- union.quad(Q) Z <- is.data(Q) wts <- w.quad(Q) nQ <- n.quad(Q) # fitted intensity lam <- fitted(model, type="trend") # subset of quadrature points used to fit model subQset <- getglmsubset(model) if(is.null(subQset)) subQset <- rep.int(TRUE, nQ) # restriction to subregion insubregion <- if(!is.null(subregion)) { inside.owin(quadpoints, w=subregion) } else rep.int(TRUE, nQ) ################################################################ # Pearson residuals from point process model yr <- residuals(model, type="Pearson") yresid <- with(yr, "increment") # averaged (then sum with weight 'wts') yresid <- yresid/wts ################################################################# # Covariates # # covariate data frame df <- getglmdata(model) if(!all(c("x", "y") %in% names(df))) { xy <- as.data.frame(quadpoints) notxy <- !(colnames(df) %in% c("x", "y")) other <- df[, notxy] df <- cbind(xy, other) } # avail.covars <- names(df) # covariates used in model used.covars <- model.covariates(model) fitted.covars <- model.covariates(model, offset=FALSE) # ################################################################# # identify the covariate # if(!is.character(covariate)) { # Covariate is some kind of data, treated as external covariate if(covname %in% fitted.covars) stop(paste("covariate named", dQuote(covname), "is already used in model")) covvalues <- evalCovariate(covariate, quadpoints) # validate covvalues if(is.null(covvalues)) stop("Unable to extract covariate values") else if(length(covvalues) != npoints(quadpoints)) stop(paste("Internal error: number of covariate values =", length(covvalues), "!=", npoints(quadpoints), "= number of quadrature points")) # tack onto data frame covdf <- data.frame(covvalues) names(covdf) <- covname df <- cbind(df, covdf) } else { # Argument is name of covariate covname <- covariate if(length(covname) > 1) stop("Must specify only one covariate") # if(covname %in% fitted.covars) stop(paste("covariate", dQuote(covname), "already used in model")) # if(!(covname %in% avail.covars)) stop(paste("covariate", dQuote(covname), "not available")) # covvalues <- df[, covname] } ################################################################ # Pearson residuals from weighted linear regression of new covariate on others rhs <- formula(model) fo <- as.formula(paste(covname, paste(rhs, collapse=" "))) fit <- lm(fo, data=df, weights=lam * wts) xresid <- residuals(fit, type="pearson")/sqrt(wts) if(crosscheck) { message("Cross-checking...") X <- model.matrix(fo, data=df) V <- diag(lam * wts) sqrtV <- diag(sqrt(lam * wts)) Info <- t(X) %*% V %*% X H <- sqrtV %*% X %*% solve(Info) %*% t(X) %*% sqrtV nQ <- length(lam) Id <- diag(1, nQ, nQ) xresid.pearson <- (Id - H) %*% sqrtV %*% covvalues xresid.correct <- xresid.pearson/sqrt(wts) abserr <- max(abs(xresid - xresid.correct), na.rm=TRUE) relerr <- abserr/diff(range(xresid.correct, finite=TRUE)) if(is.finite(relerr) && relerr > 0.01) { warning("Large relative error in residual computation") } message("Done.") } # experiment suggests residuals(fit, "pearson") == xresid.correct # and residuals(fit) equivalent to # covvalues - X %*% solve(t(X) %*% V %*% X) %*% t(X) %*% V %*% covvalues ################################################################# # check for NA's etc # locations that must have finite values operative <- if(bw.restrict) insubregion & subQset else subQset nbg <- !is.finite(xresid) | !is.finite(yresid) if(any(offending <- nbg & operative)) { warning(paste(sum(offending), "out of", length(offending), "covariate values discarded because", ngettext(sum(offending), "it is", "they are"), "NA or infinite")) } ################################################################# # Restrict data to 'operative' points # with finite values ok <- !nbg & operative Q <- Q[ok] xresid <- xresid[ok] yresid <- yresid[ok] covvalues <- covvalues[ok] df <- df[ok, ] lam <- lam[ok] wts <- wts[ok] Z <- Z[ok] insubregion <- insubregion[ok] #################################################### # assemble data for smoothing xx <- xresid yy <- yresid ww <- wts if(makefrom <- is.null(from)) from <- min(xresid) if(maketo <- is.null(to)) to <- max(xresid) #################################################### # determine smoothing bandwidth # from 'operative' data switch(bw.input, quad = { # bandwidth selection from covariate values at all quadrature points numer <- unnormdensity(xx, weights=yy * ww, bw=bw, adjust=adjust, n=n,from=from,to=to, ...) sigma <- numer$bw }, points= { # bandwidth selection from covariate values at data points fake <- unnormdensity(xx[Z], weights=1/lam[Z], bw=bw, adjust=adjust, n=n,from=from,to=to, ...) sigma <- fake$bw numer <- unnormdensity(xx, weights=yy * ww, bw=sigma, adjust=1, n=n,from=from,to=to, ...) }) #################################################### # Restrict data and recompute numerator if required if(!is.null(subregion) && !bw.restrict) { # Bandwidth was computed on all data # Restrict to subregion and recompute numerator xx <- xx[insubregion] yy <- yy[insubregion] ww <- ww[insubregion] lam <- lam[insubregion] Z <- Z[insubregion] if(makefrom) from <- min(xx) if(maketo) to <- max(xx) numer <- unnormdensity(xx, weights=yy * ww, bw=sigma, adjust=1, n=n,from=from,to=to, ...) } #################################################### # Compute denominator denom <- unnormdensity(xx,weights=ww, bw=sigma, adjust=1, n=n,from=from,to=to, ...) #################################################### # Determine recommended plot range xr <- range(xresid[Z], finite=TRUE) alim <- xr + 0.1 * diff(xr) * c(-1,1) alim <- intersect.ranges(alim, c(from, to)) #################################################### # Compute terms interpolate <- function(x,y) { if(inherits(x, "density") && missing(y)) approxfun(x$x, x$y, rule=2) else approxfun(x, y, rule=2) } numfun <- interpolate(numer) denfun <- interpolate(denom) xxx <- numer$x ratio <- function(y, x) { ifelseXB(x != 0, y/x, NA) } yyy <- ratio(numfun(xxx), denfun(xxx)) # Null variance estimation # smooth with weight 1 and smaller bandwidth tau <- sigma/sqrt(2) varnumer <- unnormdensity(xx,weights=ww, bw=tau,adjust=1, n=n,from=from,to=to, ...) varnumfun <- interpolate(varnumer) vvv <- ratio(varnumfun(xxx), 2 * sigma * sqrt(pi) * denfun(xxx)^2) safesqrt <- function(x) { ok <- is.finite(x) & (x >= 0) y <- rep.int(NA_real_, length(x)) y[ok] <- sqrt(x[ok]) return(y) } twosd <- 2 * safesqrt(vvv) # pack into fv object rslt <- data.frame(rcov=xxx, rpts=yyy, theo=0, var=vvv, hi=twosd, lo=-twosd) nuc <- length(used.covars) if(nuc == 0) { given <- givenlab <- 1 } else if(nuc == 1) { given <- givenlab <- used.covars } else { given <- commasep(used.covars, ", ") givenlab <- paste("list", paren(given)) } given <- paste("|", given) xlab <- sprintf("r(paste(%s, '|', %s))", covname, givenlab) ylab <- sprintf("r(paste(points, '|', %s))", givenlab) yexpr <- parse(text=ylab)[[1L]] desc <- c(paste("Pearson residual of covariate", covname, given), paste("Smoothed Pearson residual of point process", given), "Null expected value of point process residual", "Null variance of point process residual", "Upper limit of pointwise 5%% significance band", "Lower limit of pointwise 5%% significance band") rslt <- fv(rslt, argu="rcov", ylab=yexpr, valu="rpts", fmla= (. ~ rcov), alim=alim, labl=c(xlab, "%s", "0", "bold(var) ~ %s", "%s[hi]", "%s[lo]"), desc=desc, fname=ylab) attr(rslt, "dotnames") <- c("rpts", "theo", "hi", "lo") # data associated with quadrature points reserved <- (substr(colnames(df), 1L, 4L) == ".mpl") isxy <- colnames(df) %in% c("x", "y") dfpublic <- cbind(df[, !(reserved | isxy)], data.frame(xresid, yresid)) attr(rslt, "spatial") <- union.quad(Q) %mark% dfpublic # auxiliary data attr(rslt, "stuff") <- list(covname = covname, xresid = xresid, yresid = yresid, covvalues = covvalues, wts = wts, bw = bw, adjust = adjust, sigma = sigma, used.covars = used.covars, modelcall = modelcall, callstring = callstring, xlim = c(from, to), xlab = xlab, ylab = ylab, lmcoef = coef(fit), bw.input = bw.input, bw.restrict = bw.restrict, restricted = !is.null(subregion)) # finish class(rslt) <- c("addvar", class(rslt)) return(rslt) } print.addvar <- function(x, ...) { cat("Added variable plot diagnostic (class addvar)\n") s <- attr(x, "stuff") mc <- paste(s$modelcall, collapse="") cat(paste("for the covariate", dQuote(s$covname), "for the fitted model:", if(nchar(mc) <= 30) "" else "\n\t", mc, "\n\n")) if(identical(s$restricted, TRUE)) cat("\t--Diagnostic computed for a subregion--\n") cat(paste("Call:", s$callstring, "\n")) cat(paste("Actual smoothing bandwidth sigma =", signif(s$sigma,5), "\n\n")) NextMethod("print") } plot.addvar <- function(x, ..., do.points=FALSE) { xname <- deparse(substitute(x)) s <- attr(x, "stuff") # covname <- s$covname xresid <- s$xresid yresid <- s$yresid # adjust y limits if intending to plot points as well ylimcover <- if(do.points) range(yresid, finite=TRUE) else NULL # do.call(plot.fv, resolve.defaults(list(x), list(...), list(main=xname, shade=c("hi", "lo"), legend=FALSE, ylim.covers=ylimcover))) # plot points if(do.points) do.call(points, resolve.defaults(list(x=xresid, y=yresid), list(...), list(pch=3, cex=0.5))) return(invisible(x)) } spatstat/R/Gmulti.R0000755000176200001440000001625713115271075013722 0ustar liggesusers# Gmulti.S # # Compute estimates of nearest neighbour distance distribution functions # for multitype point patterns # # S functions: # Gcross G_{ij} # Gdot G_{i\bullet} # Gmulti (generic) # # $Revision: 4.43 $ $Date: 2015/10/21 09:06:57 $ # ################################################################################ "Gcross" <- function(X, i, j, r=NULL, breaks=NULL, ..., correction=c("rs", "km", "han")) { # computes G_{ij} estimates # # X marked point pattern (of class 'ppp') # i,j the two mark values to be compared # # r: (optional) values of argument r # breaks: (optional) breakpoints for argument r # X <- as.ppp(X) if(!is.marked(X, dfok=FALSE)) stop(paste("point pattern has no", sQuote("marks"))) stopifnot(is.multitype(X)) # marx <- marks(X, dfok=FALSE) if(missing(i)) i <- levels(marx)[1] if(missing(j)) j <- levels(marx)[2] # I <- (marx == i) if(sum(I) == 0) stop("No points are of type i") if(i == j) result <- Gest(X[I], r=r, breaks=breaks, ...) else { J <- (marx == j) if(sum(J) == 0) stop("No points are of type j") result <- Gmulti(X, I, J, r=r, breaks=breaks, disjoint=FALSE, ..., correction=correction) } iname <- make.parseable(paste(i)) jname <- make.parseable(paste(j)) result <- rebadge.fv(result, substitute(G[i,j](r), list(i=iname, j=jname)), c("G", paste0("list(", iname, ",", jname, ")")), new.yexp=substitute(G[list(i,j)](r), list(i=iname,j=jname))) return(result) } "Gdot" <- function(X, i, r=NULL, breaks=NULL, ..., correction=c("km","rs","han")) { # Computes estimate of # G_{i\bullet}(t) = # P( a further point of pattern in B(0,t)| a type i point at 0 ) # # X marked point pattern (of class ppp) # # r: (optional) values of argument r # breaks: (optional) breakpoints for argument r # X <- as.ppp(X) if(!is.marked(X)) stop(paste("point pattern has no", sQuote("marks"))) stopifnot(is.multitype(X)) # marx <- marks(X, dfok=FALSE) if(missing(i)) i <- levels(marx)[1] I <- (marx == i) if(sum(I) == 0) stop("No points are of type i") J <- rep.int(TRUE, X$n) # i.e. all points # result <- Gmulti(X, I, J, r, breaks, disjoint=FALSE, ..., correction=correction) iname <- make.parseable(paste(i)) result <- rebadge.fv(result, substitute(G[i ~ dot](r), list(i=iname)), c("G", paste(iname, "~ symbol(\"\\267\")")), new.yexp=substitute(G[i ~ symbol("\267")](r), list(i=iname))) return(result) } ########## "Gmulti" <- function(X, I, J, r=NULL, breaks=NULL, ..., disjoint=NULL, correction=c("rs", "km", "han")) { # # engine for computing the estimate of G_{ij} or G_{i\bullet} # depending on selection of I, J # # X marked point pattern (of class ppp) # # I,J logical vectors of length equal to the number of points # and identifying the two subsets of points to be # compared. # # r: (optional) values of argument r # breaks: (optional) breakpoints for argument r # verifyclass(X, "ppp") W <- X$window npts <- npoints(X) areaW <- area(W) # check I and J I <- ppsubset(X, I) J <- ppsubset(X, J) if(is.null(I) || is.null(J)) stop("I and J must be valid subset indices") nI <- sum(I) nJ <- sum(J) if(nI == 0) stop("No points satisfy condition I") if(nJ == 0) stop("No points satisfy condition J") if(is.null(disjoint)) disjoint <- !any(I & J) # choose correction(s) # correction.given <- !missing(correction) && !is.null(correction) if(is.null(correction)) correction <- c("rs", "km", "han") correction <- pickoption("correction", correction, c(none="none", border="rs", rs="rs", KM="km", km="km", Kaplan="km", han="han", Hanisch="han", best="km"), multi=TRUE) # determine breakpoints for r values lamJ <- nJ/areaW rmaxdefault <- rmax.rule("G", W, lamJ) breaks <- handle.r.b.args(r, breaks, W, rmaxdefault=rmaxdefault) # brks <- breaks$val rmax <- breaks$max rvals <- breaks$r zeroes <- numeric(length(rvals)) # initialise fv object df <- data.frame(r=rvals, theo=1-exp(-lamJ * pi * rvals^2)) fname <- c("G", "list(I,J)") Z <- fv(df, "r", quote(G[I,J](r)), "theo", . ~ r, c(0,rmax), c("r", makefvlabel(NULL, NULL, fname, "pois")), c("distance argument r", "theoretical Poisson %s"), fname=fname, yexp=quote(G[list(I,J)](r))) # "type I to type J" nearest neighbour distances XI <- X[I] XJ <- X[J] if(disjoint) nnd <- nncross(XI, XJ, what="dist") else { seqnp <- seq_len(npts) iX <- seqnp[I] iY <- seqnp[J] nnd <- nncross(XI, XJ, iX, iY, what="dist") } # distance to boundary from each type i point bdry <- bdist.points(XI) # observations o <- pmin.int(nnd,bdry) # censoring indicators d <- (nnd <= bdry) # # calculate estimates if("none" %in% correction) { # UNCORRECTED e.d.f. of nearest neighbour distances: use with care if(npts == 0) edf <- zeroes else { hh <- hist(nnd[nnd <= rmax],breaks=breaks$val,plot=FALSE)$counts edf <- cumsum(hh)/length(nnd) } Z <- bind.fv(Z, data.frame(raw=edf), makefvlabel(NULL, "hat", fname, "raw"), "uncorrected estimate of %s", "raw") } if("han" %in% correction) { # Hanisch style estimator if(npts == 0) G <- zeroes else { # uncensored distances x <- nnd[d] # weights a <- eroded.areas(W, rvals) # calculate Hanisch estimator h <- hist(x[x <= rmax], breaks=breaks$val, plot=FALSE)$counts G <- cumsum(h/a) G <- G/max(G[is.finite(G)]) } # add to fv object Z <- bind.fv(Z, data.frame(han=G), makefvlabel(NULL, "hat", fname, "han"), "Hanisch estimate of %s", "han") # modify recommended plot range attr(Z, "alim") <- range(rvals[G <= 0.9]) } if(any(correction %in% c("rs", "km"))) { # calculate Kaplan-Meier and border correction (Reduced Sample) estimators if(npts == 0) result <- data.frame(rs=zeroes, km=zeroes, hazard=zeroes) else { result <- km.rs(o, bdry, d, breaks) result <- as.data.frame(result[c("rs", "km", "hazard")]) } # add to fv object Z <- bind.fv(Z, result, c(makefvlabel(NULL, "hat", fname, "bord"), makefvlabel(NULL, "hat", fname, "km"), "hazard(r)"), c("border corrected estimate of %s", "Kaplan-Meier estimate of %s", "Kaplan-Meier estimate of hazard function lambda(r)"), "km") # modify recommended plot range attr(Z, "alim") <- range(rvals[result$km <= 0.9]) } nama <- names(Z) fvnames(Z, ".") <- rev(nama[!(nama %in% c("r", "hazard"))]) unitname(Z) <- unitname(X) return(Z) } spatstat/R/satpiece.R0000755000176200001440000001114613115271120014235 0ustar liggesusers# # # satpiece.S # # $Revision: 1.16 $ $Date: 2016/02/16 01:39:12 $ # # Saturated pairwise interaction process with piecewise constant potential # # SatPiece() create an instance of the process # [an object of class 'interact'] # # # ------------------------------------------------------------------- # SatPiece <- local({ # ..... auxiliary functions ...... delSP <- function(i, r, sat) { r <- r[-i] sat <- sat[-i] nr <- length(r) if(nr == 0) return(Poisson()) if(nr == 1) return(Geyer(r, sat)) return(SatPiece(r, sat)) } # ....... template object .......... BlankSatPiece <- list( name = "piecewise constant Saturated pairwise interaction process", creator = "SatPiece", family = "pairsat.family", # evaluated later pot = function(d, par) { r <- par$r nr <- length(r) out <- array(FALSE, dim=c(dim(d), nr)) out[,,1] <- (d < r[1]) if(nr > 1) { for(i in 2:nr) out[,,i] <- (d >= r[i-1]) & (d < r[i]) } out }, par = list(r = NULL, sat=NULL), # filled in later parnames = c("interaction thresholds", "saturation parameters"), init = function(self) { r <- self$par$r sat <- self$par$sat if(!is.numeric(r) || !all(r > 0)) stop("interaction thresholds r must be positive numbers") if(length(r) > 1 && !all(diff(r) > 0)) stop("interaction thresholds r must be strictly increasing") if(!is.numeric(sat) || any(sat < 0)) stop("saturation parameters must be nonnegative numbers") if(any(ceiling(sat) != floor(sat))) warning("saturation parameter has a non-integer value") if(length(sat) != length(r) && length(sat) != 1) stop("vectors r and sat must have equal length") }, update = NULL, # default OK print = NULL, # default OK interpret = function(coeffs, self) { r <- self$par$r npiece <- length(r) # extract coefficients gammas <- exp(as.numeric(coeffs)) # name them gn <- gammas names(gn) <- paste("[", c(0,r[-npiece]),",", r, ")", sep="") # return(list(param=list(gammas=gammas), inames="interaction parameters gamma_i", printable=dround(gn))) }, valid = function(coeffs, self) { # interaction parameters gamma must be # non-NA # finite, if sat > 0 # less than 1, if sat = Inf gamma <- (self$interpret)(coeffs, self)$param$gammas sat <- self$par$sat if(anyNA(gamma)) return(FALSE) return(all((is.finite(gamma) | sat == 0) & (gamma <= 1 | sat != Inf))) }, project = function(coeffs, self){ loggammas <- as.numeric(coeffs) sat <- self$par$sat r <- self$par$r ok <- is.finite(loggammas) & (is.finite(sat) | loggammas <= 0) if(all(ok)) return(NULL) if(!any(ok)) return(Poisson()) bad <- !ok if(spatstat.options("project.fast") || sum(bad) == 1) { # remove smallest threshold with an unidentifiable parameter firstbad <- min(which(bad)) return(delSP(firstbad, r, sat)) } else { # consider all candidate submodels subs <- lapply(which(bad), delSP, r=r, sat=sat) return(subs) } }, irange = function(self, coeffs=NA, epsilon=0, ...) { r <- self$par$r sat <- self$par$sat if(all(is.na(coeffs))) return(2 * max(r)) gamma <- (self$interpret)(coeffs, self)$param$gammas gamma[is.na(gamma)] <- 1 active <- (abs(log(gamma)) > epsilon) & (sat > 0) if(!any(active)) return(0) else return(2 * max(r[active])) }, version=NULL # added later ) class(BlankSatPiece) <- "interact" SatPiece <- function(r, sat) { instantiate.interact(BlankSatPiece, list(r=r, sat=sat)) } SatPiece <- intermaker(SatPiece, BlankSatPiece) SatPiece }) spatstat/R/pspcross.R0000755000176200001440000002032413115271120014312 0ustar liggesusers# # pspcross.R # # Intersections of line segments # # $Revision: 1.23 $ $Date: 2017/06/05 10:31:58 $ # # crossing.psp <- function(A,B,fatal=TRUE,details=FALSE) { verifyclass(A, "psp") verifyclass(B, "psp") # first check for intersection of windows ABW <- intersect.owin(A$window, B$window, fatal=fatal) if(is.null(ABW)) return(NULL) eps <- .Machine$double.eps na <- A$n eA <- A$ends x0a <- eA$x0 y0a <- eA$y0 dxa <- eA$x1 - eA$x0 dya <- eA$y1 - eA$y0 nb <- B$n eB <- B$ends x0b <- eB$x0 y0b <- eB$y0 dxb <- eB$x1 - eB$x0 dyb <- eB$y1 - eB$y0 useCall <- spatstat.options("crossing.psp.useCall") if(!useCall) { # old C routine out <- .C("xysegint", na=as.integer(na), x0a=as.double(x0a), y0a=as.double(y0a), dxa=as.double(dxa), dya=as.double(dya), nb=as.integer(nb), x0b=as.double(x0b), y0b=as.double(y0b), dxb=as.double(dxb), dyb=as.double(dyb), eps=as.double(eps), xx=as.double(numeric(na * nb)), yy=as.double(numeric(na * nb)), ta=as.double(numeric(na * nb)), tb=as.double(numeric(na * nb)), ok=as.integer(integer(na * nb)), PACKAGE = "spatstat") ok <- (matrix(out$ok, na, nb) != 0) xx <- matrix(out$xx, na, nb) yy <- matrix(out$yy, na, nb) xx <- as.vector(xx[ok]) yy <- as.vector(yy[ok]) if(details) { ia <- as.vector(row(ok)[ok]) jb <- as.vector(col(ok)[ok]) ta <- as.vector(matrix(out$ta, na, nb)[ok]) tb <- as.vector(matrix(out$tb, na, nb)[ok]) } } else { # new storage.mode(x0a) <- storage.mode(y0a) <- "double" storage.mode(dxa) <- storage.mode(dya) <- "double" storage.mode(x0b) <- storage.mode(y0b) <- "double" storage.mode(dxb) <- storage.mode(dyb) <- "double" storage.mode(eps) <- "double" out <- .Call("Cxysegint", x0a, y0a, dxa, dya, x0b, y0b, dxb, dyb, eps, PACKAGE="spatstat") xx <- out[[5]] yy <- out[[6]] if(details) { ia <- out[[1L]] + 1L jb <- out[[2L]] + 1L ta <- out[[3L]] tb <- out[[4L]] } } result <- ppp(xx, yy, window=ABW, check=FALSE) if(details) marks(result) <- data.frame(iA=ia, jB=jb, tA=ta, tB=tb) return(result) } test.crossing.psp <- function(A,B) { # return logical matrix specifying whether A[i] and B[j] cross verifyclass(A, "psp") verifyclass(B, "psp") eps <- .Machine$double.eps na <- A$n eA <- A$ends x0a <- eA$x0 y0a <- eA$y0 dxa <- eA$x1 - eA$x0 dya <- eA$y1 - eA$y0 nb <- B$n eB <- B$ends x0b <- eB$x0 y0b <- eB$y0 dxb <- eB$x1 - eB$x0 dyb <- eB$y1 - eB$y0 out <- .C("xysi", na=as.integer(na), x0a=as.double(x0a), y0a=as.double(y0a), dxa=as.double(dxa), dya=as.double(dya), nb=as.integer(nb), x0b=as.double(x0b), y0b=as.double(y0b), dxb=as.double(dxb), dyb=as.double(dyb), eps=as.double(eps), ok=as.integer(integer(na * nb)), PACKAGE = "spatstat") hit <- (matrix(out$ok, na, nb) != 0) return(hit) } anycrossing.psp <- function(A,B) { # equivalent to: any(test.crossing.psp(A,B)) # Test whether two psp objects have at least one crossing point verifyclass(A, "psp") verifyclass(B, "psp") eps <- .Machine$double.eps na <- A$n eA <- A$ends x0a <- eA$x0 y0a <- eA$y0 dxa <- eA$x1 - eA$x0 dya <- eA$y1 - eA$y0 nb <- B$n eB <- B$ends x0b <- eB$x0 y0b <- eB$y0 dxb <- eB$x1 - eB$x0 dyb <- eB$y1 - eB$y0 out <- .C("xysiANY", na=as.integer(na), x0a=as.double(x0a), y0a=as.double(y0a), dxa=as.double(dxa), dya=as.double(dya), nb=as.integer(nb), x0b=as.double(x0b), y0b=as.double(y0b), dxb=as.double(dxb), dyb=as.double(dyb), eps=as.double(eps), ok=as.integer(integer(1L)), PACKAGE = "spatstat") hit <- (out$ok != 0) return(hit) } selfcrossing.psp <- function(A) { verifyclass(A, "psp") eps <- .Machine$double.eps n <- A$n eA <- A$ends x0 <- eA$x0 y0 <- eA$y0 dx <- eA$x1 - eA$x0 dy <- eA$y1 - eA$y0 useCall <- spatstat.options("selfcrossing.psp.useCall") if(!useCall) { # old C routine out <- .C("xysegXint", n=as.integer(n), x0=as.double(x0), y0=as.double(y0), dx=as.double(dx), dy=as.double(dy), eps=as.double(eps), xx=as.double(numeric(n^2)), yy=as.double(numeric(n^2)), ti=as.double(numeric(n^2)), tj=as.double(numeric(n^2)), ok=as.integer(integer(n^2)), PACKAGE = "spatstat") ok <- (matrix(out$ok, n, n) != 0) xx <- matrix(out$xx, n, n) yy <- matrix(out$yy, n, n) xx <- as.vector(xx[ok]) yy <- as.vector(yy[ok]) } else { # new storage.mode(x0) <- storage.mode(y0) <- "double" storage.mode(dx) <- storage.mode(dy) <- "double" storage.mode(eps) <- "double" out <- .Call("CxysegXint", x0, y0, dx, dy, eps, PACKAGE="spatstat") xx <- out[[5L]] yy <- out[[6L]] } result <- ppp(xx, yy, window=A$window, check=FALSE) return(result) } test.selfcrossing.psp <- function(A) { verifyclass(A, "psp") eps <- .Machine$double.eps n <- A$n eA <- A$ends x0 <- eA$x0 y0 <- eA$y0 dx <- eA$x1 - eA$x0 dy <- eA$y1 - eA$y0 out <- .C("xysxi", na=as.integer(n), x0=as.double(x0), y0=as.double(y0), dx=as.double(dx), dy=as.double(dy), eps=as.double(eps), ok=as.integer(integer(n*n)), PACKAGE = "spatstat") hit <- (matrix(out$ok, n, n) != 0) return(hit) } selfcut.psp <- function(A, ..., eps) { stopifnot(is.psp(A)) # n <- A$n eA <- A$ends x0 <- eA$x0 y0 <- eA$y0 dx <- eA$x1 - eA$x0 dy <- eA$y1 - eA$y0 if(missing(eps) || is.null(eps)) { eps <- sqrt(.Machine$double.eps) * diameter(Frame(A)) } else { check.1.real(eps) stopifnot(eps >= 0) } ## identify self-crossings eps <- .Machine$double.eps storage.mode(x0) <- storage.mode(y0) <- "double" storage.mode(dx) <- storage.mode(dy) <- "double" storage.mode(eps) <- "double" zz <- .Call("CxysegXint", x0, y0, dx, dy, eps, PACKAGE = "spatstat") if(length(zz[[1]]) == 0) return(A) ## names(zz) <- c("i", "j", "ti", "tj", "x", "y") df <- as.data.frame(zz) df$i <- df$i + 1L df$j <- df$j + 1L ## gone <- with(df, unique(c(i,j))) newends <- as.matrix(eA) newends <- newends[-gone, , drop=FALSE] newmarx <- marx <- marks(A) if(mama <- !is.null(marx)) { marx <- as.data.frame(marx) newmarx <- marx[-gone, ,drop=FALSE] } ## cut each segment using the *provided* values of x,y for(ii in gone) { ## assemble cuts through segment ii imatch <- with(df, which(i == ii)) jmatch <- with(df, which(j == ii)) df.i <- with(df, data.frame(t=c(ti[imatch], tj[jmatch]), x=x[c(imatch, jmatch)], y=y[c(imatch, jmatch)])) # discard T-junctions ok <- with(df.i, t > 0 & t < 1) df.i <- df.i[ok, ,drop=FALSE] # order the pieces ord <- with(df.i, order(t)) df.i <- df.i[ord, , drop=FALSE] ## add endpoints xnew <- c(eA[ii,"x0"], df.i$x, eA[ii,"x1"]) ynew <- c(eA[ii,"y0"], df.i$y, eA[ii,"y1"]) m <- length(xnew) newsegs <- cbind(xnew[-m], ynew[-m], xnew[-1], ynew[-1]) newends <- rbind(newends, newsegs) if(mama) newmarx <- rbind(newmarx, marx[rep(ii, m-1), , drop=FALSE]) } Y <- as.psp(newends, window=Window(A), marks=newmarx) if(eps > 0) { ok <- (lengths.psp(Y) > eps) if(any(!ok)) Y <- Y[ok] } return(Y) } spatstat/R/distcdf.R0000644000176200001440000000642213115225157014067 0ustar liggesusers#' #' distcdf.R #' #' cdf of |X1-X2| when X1,X2 are iid uniform in W, etc #' #' $Revision: 1.10 $ $Date: 2016/02/11 10:17:12 $ #' distcdf <- function(W, V=W, ..., dW=1, dV=dW, nr=1024, regularise=TRUE) { reflexive <- missing(V) && missing(dV) diffuse <- is.owin(W) && is.owin(V) uniformW <- identical(dW, 1) uniformV <- identical(dV, 1) uniform <- uniformW && uniformV if(is.owin(W)) { W <- as.mask(as.owin(W), ...) dW <- as.im(dW, W=W) } else if(is.ppp(W)) { if(uniformW) { #' discrete uniform distribution on W dW <- pixellate(W, ...) } else { #' dW should be a weight or vector of weights if(!is.vector(dW) || !is.numeric(dW)) stop("If W is a point pattern, dW should be a vector of weights") if(length(dW) == 1L) { dW <- rep(dW, npoints(W)) } else stopifnot(length(dW) == npoints(W)) dW <- pixellate(W, weights=dW, ...) } } else stop("W should be a point pattern or a window") if(is.owin(V)) { V <- as.mask(as.owin(V), ...) dV <- as.im(dV, W=V) } else if(is.ppp(V)) { if(uniformV) { #' discrete uniform distribution on V dV <- pixellate(V, ...) } else { #' dV should be a weight or vector of weights if(!is.vector(dV) || !is.numeric(dV)) stop("If V is a point pattern, dV should be a vector of weights") if(length(dV) == 1L) { dV <- rep(dV, npoints(V)) } else stopifnot(length(dV) == npoints(V)) dV <- pixellate(V, weights=dV, ...) } } else stop("V should be a point pattern or a window") if(!uniformW && min(dW) < 0) stop("Negative values encountered in dW") if(!uniformV && min(dV) < 0) stop("Negative values encountered in dV") #' compute if(diffuse && uniform) { #' uniform distributions on windows g <- if(reflexive) setcov(W, ...) else setcov(W, V, ...) } else { g <- if(reflexive) imcov(dW) else imcov(dW, dV) } r <- as.im(function(x,y) { sqrt(x^2 + y^2) }, g) rvals <- as.vector(as.matrix(r)) gvals <- as.vector(as.matrix(g)) rgrid <- seq(0, max(rvals), length=nr) dr <- max(rvals)/(nr-1) h <- whist(rvals, breaks=rgrid, weights=gvals/sum(gvals)) ch <- c(0,cumsum(h)) #' regularise at very short distances if(regularise) { sevenpix <- 7 * with(r, max(xstep, ystep)) ii <- round(sevenpix/dr) ch[1:ii] <- ch[ii] * (rgrid[1:ii]/rgrid[ii])^2 } #' ok result <- fv(data.frame(r=rgrid, f=ch), "r", quote(CDF(r)), "f", , range(rvals), c("r","%s(r)"), c("Interpoint distance","Cumulative probability"), fname="CDF") return(result) } bw.frac <- function(X, ..., f=1/4) { X <- as.owin(X) g <- distcdf(X, ...) r <- with(g, .x) Fr <- with(g, .y) iopt <- min(which(Fr >= f)) ropt <- r[iopt] attr(ropt, "f") <- f attr(ropt, "g") <- g class(ropt) <- c("bw.frac", class(ropt)) return(ropt) } print.bw.frac <- function(x, ...) { print(as.numeric(x), ...) } plot.bw.frac <- function(x, ...) { xname <- short.deparse(substitute(x)) g <- attr(x, "g") f <- attr(x, "f") ropt <- as.numeric(x) do.call(plot, resolve.defaults(list(g), list(...), list(main=xname))) abline(v=ropt, lty=3) abline(h=f, lty=3) invisible(NULL) } spatstat/R/minnndist.R0000644000176200001440000000307313115271120014440 0ustar liggesusers## ## minnndist.R ## ## Fast versions of min(nndist(X)), max(nndist(X)) ## ## $Revision: 1.5 $ $Date: 2017/06/05 10:31:58 $ minnndist <- function(X, positive=FALSE) { stopifnot(is.ppp(X)) n <- npoints(X) if(n <= 1) return(NA) x <- X$x y <- X$y o <- fave.order(y) big <- sqrt(.Machine$double.xmax) if(positive) { z <- .C("minPnnd2", n = as.integer(n), x = as.double(x[o]), y = as.double(y[o]), as.double(big), result = as.double(numeric(1)), PACKAGE = "spatstat") } else { z <- .C("minnnd2", n = as.integer(n), x = as.double(x[o]), y = as.double(y[o]), as.double(big), result = as.double(numeric(1)), PACKAGE = "spatstat") } return(sqrt(z$result)) } maxnndist <- function(X, positive=FALSE) { stopifnot(is.ppp(X)) n <- npoints(X) if(n <= 1) return(NA) x <- X$x y <- X$y o <- fave.order(y) big <- sqrt(.Machine$double.xmax) if(positive) { z <- .C("maxPnnd2", n = as.integer(n), x = as.double(x[o]), y = as.double(y[o]), as.double(big), result = as.double(numeric(1)), PACKAGE = "spatstat") } else { z <- .C("maxnnd2", n = as.integer(n), x = as.double(x[o]), y = as.double(y[o]), as.double(big), result = as.double(numeric(1)), PACKAGE = "spatstat") } return(sqrt(z$result)) } spatstat/R/summary.ppm.R0000755000176200001440000004206413115271120014733 0ustar liggesusers# # summary.ppm.R # # summary() method for class "ppm" # # $Revision: 1.77 $ $Date: 2016/12/30 01:44:07 $ # # summary.ppm() # print.summary.ppm() # summary.ppm <- local({ covtype <- function(x) { if(is.im(x)) "im" else if(is.function(x)) "function" else if(is.owin(x)) "owin" else if(is.numeric(x) && length(x) == 1) "number" else if(is.factor(x)) "factor" else if(is.integer(x)) "integer" else if(is.numeric(x)) "numeric" else storage.mode(x) } xargs <- function(f) { ar <- names(formals(f))[-(1:2)] return(ar[ar != "..."]) } summary.ppm <- function(object, ..., quick=FALSE, fine=FALSE) { verifyclass(object, "ppm") x <- object y <- list() class(y) <- "summary.ppm" ####### Extract main data components ######################### QUAD <- object$Q DATA <- QUAD$data TREND <- x$trend INTERACT <- x$interaction if(is.null(INTERACT)) INTERACT <- Poisson() ####### Check version ######################### mpl.ver <- versionstring.ppm(object) int.ver <- versionstring.interact(INTERACT) current <- versionstring.spatstat() virgin <- min(package_version(c(mpl.ver, int.ver))) y$antiquated <- antiquated <- (virgin <= package_version("1.5")) y$old <- (virgin < majorminorversion(current)) y$version <- as.character(virgin) ####### Determine type of model ############################ y$entries <- list() y$no.trend <- identical.formulae(TREND, NULL) || identical.formulae(TREND, ~1) y$trendvar <- trendvar <- variablesinformula(TREND) y$stationary <- y$no.trend || all(trendvar == "marks") y$poisson <- is.poisson.interact(INTERACT) y$marked <- is.marked.ppp(DATA) y$multitype <- is.multitype.ppp(DATA) y$marktype <- if(y$multitype) "multitype" else if(y$marked) "marked" else "unmarked" if(y$marked) y$entries$marks <- marks(DATA) y$name <- paste(if(y$stationary) "Stationary " else "Nonstationary ", if(y$poisson) { if(y$multitype) "multitype " else if(y$marked) "marked " else "" }, INTERACT$name, sep="") ###### Fitting algorithm ######################################## y$method <- x$method y$VB <- x$internal$VB y$problems <- x$problems y$fitter <- if(!is.null(x$fitter)) x$fitter else "unknown" if(y$fitter %in% c("glm", "gam")) y$converged <- x$internal$glmfit$converged ###### Coefficients were changed after fit? ##################### y$projected <- yproj <- identical(x$projected, TRUE) y$changedcoef <- yproj || !is.null(x$coef.orig) y$valid <- valid.ppm(x, warn=FALSE) ###### Extract fitted model coefficients ######################### y$entries$coef <- COEFS <- x$coef y$coef.orig <- x$coef.orig y$entries$Vnames <- Vnames <- x$internal$Vnames y$entries$IsOffset <- x$internal$IsOffset ###### Extract fitted interaction and summarise ################# FITIN <- fitin(x) y$interaction <- summary(FITIN) # Exit here if quick=TRUE if(identical(quick, TRUE)) return(y) ###### Does it have external covariates? #################### # defaults y <- append(y, list(has.covars = FALSE, covnames = character(0), covars.used = character(0), uses.covars = FALSE, covars.are.df = FALSE, expandable = TRUE, covar.type = character(0), covar.descrip = character(0), has.funcs = FALSE, covfunargs = NULL, has.xargs = FALSE, xargmap = NULL)) class(y) <- "summary.ppm" if(!antiquated) { covars <- x$covariates y$has.covars <- hc <- !is.null(covars) && (length(covars) > 0) if(hc) { y$covnames <- names(covars) used <- (y$trendvar %in% names(covars)) y$covars.used <- y$trendvar[used] y$uses.covars <- any(used) y$covars.are.df <- is.data.frame(covars) # describe covariates ctype <- unlist(lapply(covars, covtype)) y$expandable <- all(ctype[used] %in%c("function", "number")) names(ctype) <- names(covars) y$covar.type <- ctype y$covar.descrip <- ctype # are there any functions? y$has.funcs <- any(isfun <- (ctype == "function")) # do covariates depend on additional arguments? if(y$has.funcs) { y$covfunargs <- x$covfunargs y$cfafitter <- attr(x$covfunargs, "fitter") funs <- covars[isfun] fdescrip <- function(f) { if(inherits(f, "distfun")) return("distfun") alist <- paste(names(formals(f)), collapse=", ") paste("function(", alist, ")", sep="") } y$covar.descrip[isfun] <- unlist(lapply(funs, fdescrip)) # find any extra arguments (after args 1 & 2) explicitly named fargs <- lapply(funs, xargs) nxargs <- lengths(fargs) y$has.xargs <- any(nxargs > 0) if(y$has.xargs) { # identify which function arguments are fixed in the call fmap <- data.frame(Covariate=rep.int(names(funs), nxargs), Argument=unlist(fargs)) fmap$Given <- (fmap$Argument %in% names(y$covfunargs)) y$xargmap <- fmap } } } } else { # Antiquated format # Interpret the function call instead callexpr <- parse(text=x$call) callargs <- names(as.list(callexpr[[1]])) # Data frame of covariates was called 'data' in versions up to 1.4-x y$has.covars <- !is.null(callargs) && !is.na(pmatch("data", callargs)) # conservative guess y$uses.covars <- y$has.covars y$covfunargs <- NULL } ###### Arguments in call #################################### y$args <- x[c("call", "correction", "rbord")] ####### Main data components ######################### y$entries <- append(list(quad=QUAD, data=DATA, interaction=INTERACT), y$entries) if(is.character(quick) && (quick == "entries")) return(y) ####### Summarise data ############################ y$data <- summary(DATA, checkdup=FALSE) y$quad <- summary(QUAD, checkdup=FALSE) if(is.character(quick) && (quick == "no prediction")) return(y) ###### Trend component ######################### y$trend <- list() y$trend$name <- if(y$poisson) "Intensity" else "Trend" y$trend$formula <- if(y$no.trend) NULL else TREND if(y$poisson && y$no.trend) { # uniform Poisson process y$trend$value <- exp(COEFS[[1]]) y$trend$label <- switch(y$marktype, unmarked="Uniform intensity", multitype="Uniform intensity for each mark level", marked="Uniform intensity in product space", "") } else if(y$stationary) { # stationary switch(y$marktype, unmarked={ # stationary non-poisson non-marked y$trend$label <- "First order term" y$trend$value <- c(beta=exp(COEFS[[1]])) }, multitype={ # stationary, multitype mrk <- marks(DATA) y$trend$label <- if(y$poisson) "Intensities" else "First order terms" # Use predict.ppm to evaluate the fitted intensities lev <- factor(levels(mrk), levels=levels(mrk)) nlev <- length(lev) marx <- list(x=rep.int(0, nlev), y=rep.int(0, nlev), marks=lev) betas <- predict(x, locations=marx, type="trend") names(betas) <- paste("beta_", as.character(lev), sep="") y$trend$value <- betas }, marked={ # stationary, marked y$trend$label <- "Fitted intensity coefficients" y$trend$value <- blankcoefnames(COEFS) }) } else { # not stationary # extract trend terms without trying to understand them much if(is.null(Vnames)) trendbits <- COEFS else { agree <- outer(names(COEFS), Vnames, "==") whichbits <- matrowall(!agree) trendbits <- COEFS[whichbits] } y$trend$label <- ngettext(length(trendbits), "Fitted trend coefficient", "Fitted trend coefficients") y$trend$value <- blankcoefnames(trendbits) } # ----- parameters with SE -------------------------- if(is.character(quick) && (quick == "no variances")) return(y) # Exit before SE for variational Bayes if(!is.null(x$internal$VB)) return(y) if(length(COEFS) > 0) { # compute standard errors se <- x$internal$se if(is.null(se)) { vc <- vcov(x, fine=fine, matrix.action="warn") if(!is.null(vc)) { se <- if(is.matrix(vc)) sqrt(diag(vc)) else if(length(vc) == 1) sqrt(vc) else NULL } } if(!is.null(se)) { two <- qnorm(0.975) lo <- COEFS - two * se hi <- COEFS + two * se zval <- COEFS/se pval <- 2 * pnorm(abs(zval), lower.tail=FALSE) psig <- cut(pval, c(0,0.001, 0.01, 0.05, 1), labels=c("***", "**", "*", " "), include.lowest=TRUE) # table of coefficient estimates with SE and 95% CI y$coefs.SE.CI <- data.frame(Estimate=COEFS, S.E.=se, CI95.lo=lo, CI95.hi=hi, Ztest=psig, Zval=zval) } } return(y) } summary.ppm }) coef.summary.ppm <- function(object, ...) { object$coefs.SE.CI } print.summary.ppm <- function(x, ...) { if(x$old) warning("Model was fitted by an older version of spatstat") if(is.null(x$args)) { # this is the quick version splat(x$name) return(invisible(NULL)) } # otherwise - full details splat("Point process model") fitter <- if(!is.null(x$fitter)) x$fitter else "unknown" methodchosen <- if(is.null(x$method)) "unspecified method" else if(fitter == "exact") "maximum likelihood" else switch(x$method, mpl={ if(x$poisson) { # Poisson process "maximum likelihood (Berman-Turner approximation)" } else { "maximum pseudolikelihood (Berman-Turner approximation)" } }, logi={ if(is.null(x$VB)){ if(x$poisson) { # Poisson process "maximum likelihood (logistic regression approximation)" } else { "maximum pseudolikelihood (logistic regression approximation)" } } else { "maximum posterior density (variational Bayes approximation)" } }, ho="Huang-Ogata method (approximate maximum likelihood)", paste("unrecognised method", sQuote(x$method))) splat("Fitting method:", methodchosen) howfitted <- switch(fitter, exact= "analytically", gam = "using gam()", glm = "using glm()", ho = NULL, paste("using unrecognised fitter", sQuote(fitter))) if(!is.null(howfitted)) splat("Model was fitted", howfitted) if(fitter %in% c("glm", "gam")) { if(x$converged) splat("Algorithm converged") else splat("*** Algorithm did not converge ***") } if(x$projected) splat("Fit was projected to obtain a valid point process model") cat("Call:\n") print(x$args$call) if(x$old) splat("** Executed by old spatstat version", x$version, " **") splat("Edge correction:", dQuote(x$args$correction)) if(x$args$correction == "border") splat("\t[border correction distance r =", x$args$rbord,"]") # print summary of quadrature scheme if(is.null(x$quad)) return(invisible(NULL)) ruletextline() print(x$quad) ## start printing trend information if(is.null(x$no.trend)) return(invisible(NULL)) ruletextline() splat("FITTED MODEL:") parbreak() # This bit is currently identical to print.ppm() # except for a bit more fanfare # and the inclusion of the 'gory details' bit notrend <- x$no.trend # stationary <- x$stationary poisson <- x$poisson markeddata <- x$marked multitype <- x$multitype # markedpoisson <- poisson && markeddata # ----------- Print model type ------------------- cat(x$name) cat("\n") if(markeddata) mrk <- x$entries$marks if(multitype) { splat("Possible marks:") cat(paste(levels(mrk))) } # ----- trend -------------------------- if(length(x$trend) == 0) return(invisible(NULL)) parbreak() splat(paste0("---- ", x$trend$name, ": ----")) parbreak() if(!notrend) { splat("Log", if(poisson) "intensity:" else "trend:", pasteFormula(x$trend$formula)) if(x$uses.covars) splat("Model depends on external", ngettext(length(x$covars.used), "covariate", "covariates"), commasep(sQuote(x$covars.used))) } if(x$has.covars) { if(notrend || !x$uses.covars) splat("Model object contains external covariates") isdf <- identical(x$covars.are.df, TRUE) if(!is.null(cd <- x$covar.descrip)) { # print description of each covariate splat(paste0("Covariates provided", if(isdf) " (in data frame)" else NULL, ":")) namescd <- names(cd) for(i in seq_along(cd)) splat(paste0("\t", namescd[i], ": ", cd[i])) } if(!is.null(cfa <- x$covfunargs) && length(cfa) > 0) { splat("Covariate function arguments (covfunargs) provided:") namescfa <- names(cfa) for(i in seq_along(cfa)) { cat(paste(namescfa[i], "= ")) cfai <- cfa[[i]] if(is.numeric(cfai) && length(cfai) == 1) { cat(paste(cfai, "\n")) } else print(cfa[[i]]) } } } parbreak() splat(paste0(x$trend$label, ":")) tv <- x$trend$value if(!is.list(tv)) print(tv) else for(i in seq_along(tv)) print(tv[[i]]) # table of coefficient estimates with SE and 95% CI if(!is.null(cose <- x$coefs.SE.CI)) { cat("\n") print(cose) } # ---- Interaction ---------------------------- if(!poisson) { parbreak() splat(" ---- Interaction: -----") parbreak() print(x$interaction) } ####### Gory details ################################### parbreak() splat("----------- gory details -----") parbreak() COEFS <- x$entries$coef splat("Fitted regular parameters (theta):") print(COEFS) parbreak() splat("Fitted exp(theta):") print(exp(unlist(COEFS))) ##### Warnings issued ####### probs <- x$problems if(!is.null(probs) && is.list(probs) && (length(probs) > 0)) lapply(probs, function(a) { if(is.list(a) && !is.null(p <- a$print)) cat(paste("Problem:\n", p, "\n\n")) }) vali <- x$valid if(identical(vali, FALSE) && waxlyrical("errors")) { parbreak() splat("*** Model is not valid ***") if(!all(is.finite(x$entries$coef))) { splat("*** Some coefficients are NA or Inf ***") } else { splat("*** Interaction parameters are outside valid range ***") } } else if(is.na(vali) && waxlyrical("extras")) { parbreak() splat("[Validity of model could not be checked]") } return(invisible(NULL)) } no.trend.ppm <- function(x) { summary.ppm(x, quick=TRUE)$no.trend } is.stationary <- function(x) { UseMethod("is.stationary") } is.poisson <- function(x) { UseMethod("is.poisson") } is.stationary.ppm <- function(x) { TREND <- x$trend if(is.null(TREND) || identical.formulae(TREND, ~1)) return(TRUE) if(all(variablesinformula(TREND) == "marks")) return(TRUE) return(FALSE) } is.poisson.ppm <- function(x) { stopifnot(is.ppm(x)) y <- x$interaction if(is.null(y)) y <- Poisson() is.poisson.interact(y) } is.marked.ppm <- function(X, ...) { summary.ppm(X, quick=TRUE)$marked } is.multitype.ppm <- function(X, ...) { summary.ppm(X, quick=TRUE)$multitype } is.expandable.ppm <- function(x) { return(identical(summary(x, quick="entries")$expandable, TRUE)) } blankcoefnames <- function(x) { # remove name labels from ppm coefficients # First decide whether there are 'labels within labels' unlabelled <- unlist(lapply(x, function(z) { is.null(names(z)) } )) if(all(unlabelled)) value <- unlist(x) else { value <- list() for(i in seq_along(x)) value[[i]] <- if(unlabelled[i]) unlist(x[i]) else x[[i]] } return(value) } spatstat/R/hyperframe.R0000755000176200001440000004500313115271075014612 0ustar liggesusers# # hyperframe.R # # $Revision: 1.64 $ $Date: 2017/02/07 07:35:32 $ # hyperframe <- local({ hyperframe <- function(..., row.names=NULL, check.rows=FALSE, check.names=TRUE, stringsAsFactors=default.stringsAsFactors()) { aarg <- list(...) nama <- names(aarg) ## number of columns (= variables) nvars <- length(aarg) if(nvars == 0) { ## zero columns - return result <- list(nvars=0, ncases=0, vname=character(0), vtype=factor(, levels=c("dfcolumn","hypercolumn","hyperatom")), vclass=character(0), df=data.frame(), hyperatoms=list(), hypercolumns=list()) class(result) <- c("hyperframe", class(result)) return(result) } ## check column names if(is.null(nama)) nama <- paste("V", 1:nvars, sep="") else if(any(unnamed <- (nama == ""))) nama[unnamed] <- paste("V", seq_len(sum(unnamed)), sep="") nama <- make.names(nama, unique=TRUE) names(aarg) <- nama ## Each argument must be either ## - a vector suitable as a column in a data frame ## - a list of objects of the same class ## - a single object of some class dfcolumns <- sapply(aarg, is.dfcolumn) hypercolumns <- sapply(aarg, is.hypercolumn) hyperatoms <- !(dfcolumns | hypercolumns) ## Determine number of rows (= cases) columns <- dfcolumns | hypercolumns if(!any(columns)) { ncases <- 1 } else { heights <- rep.int(1, nvars) heights[columns] <- lengths(aarg[columns]) u <- unique(heights) if(length(u) > 1) { u <- u[u != 1] if(length(u) > 1) stop(paste("Column lengths are inconsistent:", paste(u, collapse=","))) } ncases <- u if(ncases > 1 && all(heights[dfcolumns] == 1)) { ## force the data frame to have 'ncases' rows aarg[dfcolumns] <- lapply(aarg[dfcolumns], rep, ncases) heights[dfcolumns] <- ncases } if(any(stubs <- hypercolumns & (heights != ncases))) { ## hypercolumns of height 1 should be hyperatoms aarg[stubs] <- lapply(aarg[stubs], "[[", i=1L) hypercolumns[stubs] <- FALSE hyperatoms[stubs] <- TRUE } } ## Collect the data frame columns into a data frame if(!any(dfcolumns)) df <- as.data.frame(matrix(, ncases, 0), row.names=row.names) else { df <- do.call(data.frame, append(aarg[dfcolumns], list(row.names=row.names, check.rows=check.rows, check.names=check.names, stringsAsFactors=stringsAsFactors))) names(df) <- nama[dfcolumns] } ## Storage type of each variable vtype <- character(nvars) vtype[dfcolumns] <- "dfcolumn" vtype[hypercolumns] <- "hypercolumn" vtype[hyperatoms] <- "hyperatom" vtype=factor(vtype, levels=c("dfcolumn","hypercolumn","hyperatom")) ## Class of each variable vclass <- character(nvars) if(any(dfcolumns)) vclass[dfcolumns] <- unlist(lapply(as.list(df), class1)) if(any(hyperatoms)) vclass[hyperatoms] <- unlist(lapply(aarg[hyperatoms], class1)) if(any(hypercolumns)) vclass[hypercolumns] <- unlist(lapply(aarg[hypercolumns], class1of1)) ## Put the result together result <- list(nvars=nvars, ncases=ncases, vname=nama, vtype=vtype, vclass=vclass, df=df, hyperatoms=aarg[hyperatoms], hypercolumns=aarg[hypercolumns]) class(result) <- c("hyperframe", class(result)) return(result) } is.dfcolumn <- function(x) { is.atomic(x) && (is.vector(x) || is.factor(x)) } is.hypercolumn <- function(x) { if(!is.list(x)) return(FALSE) if(inherits(x, c("listof", "anylist"))) return(TRUE) if(length(x) <= 1) return(TRUE) cla <- lapply(x, class) return(length(unique(cla)) == 1) } class1 <- function(x) { class(x)[1L] } class1of1 <- function(x) { class(x[[1L]])[1L] } hyperframe }) is.hyperframe <- function(x) inherits(x, "hyperframe") print.hyperframe <- function(x, ...) { ux <- unclass(x) nvars <- ux$nvars ncases <- ux$ncases if(nvars * ncases == 0) { splat("NULL hyperframe with", ncases, ngettext(ncases, "row (=case)", "rows (=cases)"), "and", nvars, ngettext(nvars, "column (=variable)", "columns (=variables)")) } else { if(waxlyrical('gory')) cat("Hyperframe:\n") print(as.data.frame(x, discard=FALSE), ...) } return(invisible(NULL)) } dim.hyperframe <- function(x) { with(unclass(x), c(ncases, nvars)) } summary.hyperframe <- function(object, ..., brief=FALSE) { x <- unclass(object) y <- list( nvars = x$nvars, ncases = x$ncases, dim = c(x$ncases, x$nvars), typeframe = data.frame(VariableName=x$vname, Class=x$vclass), storage = x$vtype, col.names = x$vname) classes <- x$vclass names(classes) <- x$vname y$classes <- classes # Ordinary data frame columns df <- x$df y$dfnames <- colnames(df) y$df <- if(length(df) > 0 && !brief) summary(df) else NULL y$row.names <- row.names(df) # insert into full array if(!brief && x$nvars > 0) { isobject <- (x$vtype != "dfcolumn") nobj <- sum(isobject) if(nobj == 0) { allcols <- y$df } else { nas <- rep(list(NA_character_), nobj) names(nas) <- x$vname[isobject] allcols <- do.call(cbind, append(list(y$df), nas)) acnames <- c(colnames(df), names(nas)) allcols <- allcols[ , match(x$vname, acnames), drop=FALSE] } pclass <- padtowidth(paren(classes), colnames(allcols), justify="right") allcols <- as.table(rbind(class=pclass, as.table(allcols))) row.names(allcols) <- rep("", nrow(allcols)) y$allcols <- allcols } class(y) <- c("summary.hyperframe", class(y)) return(y) } print.summary.hyperframe <- function(x, ...) { nvars <- x$nvars ncases <- x$ncases splat(if(nvars * ncases == 0) "NULL hyperframe" else "hyperframe", "with", ncases, ngettext(ncases, "row", "rows"), "and", nvars, ngettext(nvars, "column", "columns")) if(nvars == 0) return(invisible(NULL)) print(if(any(x$storage == "dfcolumn")) x$allcols else noquote(x$classes)) return(invisible(NULL)) } names.hyperframe <- function(x) { unclass(x)$vname } "names<-.hyperframe" <- function(x, value) { x <- unclass(x) stopifnot(is.character(value)) value <- make.names(value) if(length(value) != x$nvars) stop("Incorrect length for vector of names") vtype <- x$vtype names(x$df) <- value[vtype == "dfcolumn"] names(x$hyperatoms) <- value[vtype == "hyperatom"] names(x$hypercolumns) <- value[vtype == "hypercolumn"] x$vname <- value class(x) <- c("hyperframe", class(x)) return(x) } row.names.hyperframe <- function(x) { return(row.names(unclass(x)$df)) } "row.names<-.hyperframe" <- function(x, value) { y <- unclass(x) df <- y$df row.names(df) <- value y$df <- df class(y) <- c("hyperframe", class(y)) return(y) } ## conversion to hyperframe as.hyperframe <- function(x, ...) { UseMethod("as.hyperframe") } as.hyperframe.hyperframe <- function(x, ...) { return(x) } as.hyperframe.data.frame <- function(x, ..., stringsAsFactors=FALSE) { xlist <- if(missing(x)) NULL else as.list(x) do.call(hyperframe, resolve.defaults( xlist, list(...), list(row.names=rownames(x), stringsAsFactors=stringsAsFactors), .StripNull=TRUE)) } as.hyperframe.anylist <- as.hyperframe.listof <- function(x, ...) { if(!missing(x)) { xname <- sensiblevarname(short.deparse(substitute(x)), "x") xlist <- list(x) names(xlist) <- xname } else xlist <- NULL do.call(hyperframe, resolve.defaults( xlist, list(...), list(row.names=rownames(x)), .StripNull=TRUE)) } as.hyperframe.default <- function(x, ...) { as.hyperframe(as.data.frame(x, ...)) } #### conversion to other types as.data.frame.hyperframe <- function(x, row.names = NULL, optional = FALSE, ..., discard=TRUE, warn=TRUE) { ux <- unclass(x) if(is.null(row.names)) row.names <- row.names(ux$df) vtype <- ux$vtype vclass <- ux$vclass dfcol <- (vtype == "dfcolumn") if(discard) { nhyper <- sum(!dfcol) if(nhyper > 0 && warn) warning(paste(nhyper, ngettext(nhyper, "variable", "variables"), "discarded in conversion to data frame")) df <- as.data.frame(ux$df, row.names=row.names, optional=optional, ...) } else { lx <- as.list(x) nrows <- ux$ncases vclassstring <- paren(vclass) if(any(!dfcol)) lx[!dfcol] <- lapply(as.list(vclassstring[!dfcol]), rep.int, times=nrows) df <- do.call(data.frame, append(lx, list(row.names=row.names))) colnames(df) <- ux$vname } return(df) } as.list.hyperframe <- function(x, ...) { ux <- unclass(x) out <- vector(mode="list", length=ux$nvars) vtype <- ux$vtype df <- ux$df if(any(dfcol <- (vtype == "dfcolumn"))) out[dfcol] <- as.list(df) if(any(hypcol <- (vtype == "hypercolumn"))) { hc <- lapply(ux$hypercolumns, as.solist, demote=TRUE) out[hypcol] <- hc } if(any(hatom <- (vtype == "hyperatom"))) { ha <- ux$hyperatoms names(ha) <- NULL hacol <- lapply(ha, list) hacol <- lapply(hacol, rep.int, times=ux$ncases) hacol <- lapply(hacol, as.solist, demote=TRUE) out[hatom] <- hacol } out <- lapply(out, "names<-", value=row.names(df)) names(out) <- names(x) return(out) } # evaluation eval.hyper <- function(e, h, simplify=TRUE, ee=NULL) { .Deprecated("with.hyperframe", package="spatstat") if(is.null(ee)) ee <- as.expression(substitute(e)) with.hyperframe(h, simplify=simplify, ee=ee) } with.hyperframe <- function(data, expr, ..., simplify=TRUE, ee=NULL, enclos=NULL) { if(!inherits(data, "hyperframe")) stop("data must be a hyperframe") if(is.null(ee)) ee <- as.expression(substitute(expr)) if(is.null(enclos)) enclos <- parent.frame() n <- nrow(data) out <- vector(mode="list", length=n) datalist <- as.list(data) for(i in 1:n) { rowi <- lapply(datalist, "[[", i=i) # ensures the result is always a list outi <- eval(ee, rowi, enclos) if(!is.null(outi)) out[[i]] <- outi } names(out) <- row.names(data) if(simplify && all(unlist(lapply(out, is.vector)))) { # if all results are atomic vectors of equal length, # return a matrix or vector. lenfs <- lengths(out) if(all(unlist(lapply(out, is.atomic))) && length(unique(lenfs)) == 1) { out <- t(as.matrix(as.data.frame(out))) row.names(out) <- row.names(data) out <- out[,,drop=TRUE] return(out) } } out <- hyperframe(result=out, row.names=row.names(data))$result return(out) } cbind.hyperframe <- function(...) { aarg <- list(...) narg <- length(aarg) if(narg == 0) return(hyperframe()) namarg <- names(aarg) if(is.null(namarg)) namarg <- rep.int("", narg) ishyper <- unlist(lapply(aarg, inherits, what="hyperframe")) isdf <- unlist(lapply(aarg, inherits, what="data.frame")) columns <- list() for(i in 1:narg) { if(ishyper[i] || isdf[i]){ if(ncol(aarg[[i]]) > 0) { newcolumns <- as.list(aarg[[i]]) if(namarg[i] != "") names(newcolumns) <- paste(namarg[i], ".", names(newcolumns), sep="") columns <- append(columns, newcolumns) } } else { nextcolumn <- list(aarg[[i]]) names(nextcolumn) <- namarg[i] columns <- append(columns, nextcolumn) } } result <- do.call(hyperframe, columns) return(result) } rbind.hyperframe <- function(...) { argh <- list(...) if(length(argh) == 0) return(NULL) # convert them all to hyperframes argh <- lapply(argh, as.hyperframe) # nargh <- length(argh) if(nargh == 1) return(argh[[1L]]) # check for compatibility of dimensions & names dfs <- lapply(argh, as.data.frame, discard=FALSE) dfall <- do.call(rbind, dfs) # check that data frame columns also match dfs0 <- lapply(argh, as.data.frame, discard=TRUE, warn=FALSE) df0all <- do.call(rbind, dfs0) # assemble data rslt <- list() nam <- names(dfall) nam0 <- names(df0all) for(k in seq_along(nam)) { nama <- nam[k] if(nama %in% nam0) { # data frame column: already made rslt[[k]] <- dfall[,k] } else { # hypercolumns or hyperatoms: extract them hdata <- lapply(argh, "[", j=nama, drop=FALSE) hdata <- lapply(lapply(hdata, as.list), getElement, name=nama) # append them hh <- hdata[[1L]] for(j in 2:nargh) { hh <- append(hh, hdata[[j]]) } rslt[[k]] <- hh } } # make hyperframe names(rslt) <- nam out <- do.call(hyperframe, append(rslt, list(stringsAsFactors=FALSE))) return(out) } plot.hyperframe <- function(x, e, ..., main, arrange=TRUE, nrows=NULL, ncols=NULL, parargs=list(mar=mar * marsize), marsize=1, mar=c(1,1,3,1)) { xname <- short.deparse(substitute(x)) main <- if(!missing(main)) main else xname mar <- rep(mar, 4)[1:4] if(missing(e)) { # default: plot first column that contains objects ok <- (summary(x)$storage %in% c("hypercolumn", "hyperatom")) if(any(ok)) { j <- min(which(ok)) x <- x[,j, drop=TRUE, strip=FALSE] x <- as.solist(x, demote=TRUE) plot(x, ..., main=main, arrange=arrange, nrows=nrows, ncols=ncols) return(invisible(NULL)) } else { # hyperframe does not contain any objects # invoke plot.data.frame x <- as.data.frame(x) plot(x, ..., main=main) return(invisible(NULL)) } } if(!is.language(e)) stop(paste("Argument e should be a call or an expression;", "use quote(...) or expression(...)")) ee <- as.expression(e) if(!arrange) { # No arrangement specified: just evaluate the plot expression 'nr' times with(x, ee=ee) return(invisible(NULL)) } # Arrangement # Decide whether to plot a main header banner <- (sum(nchar(as.character(main))) > 0) if(length(main) > 1) main <- paste(main, collapse="\n") nlines <- if(!is.character(main)) 1 else length(unlist(strsplit(main, "\n"))) # determine arrangement of plots # arrange like mfrow(nrows, ncols) plus a banner at the top n <- summary(x)$ncases if(is.null(nrows) && is.null(ncols)) { nrows <- as.integer(floor(sqrt(n))) ncols <- as.integer(ceiling(n/nrows)) } else if(!is.null(nrows) && is.null(ncols)) ncols <- as.integer(ceiling(n/nrows)) else if(is.null(nrows) && !is.null(ncols)) nrows <- as.integer(ceiling(n/ncols)) else stopifnot(nrows * ncols >= length(x)) nblank <- ncols * nrows - n # declare layout mat <- matrix(c(seq_len(n), numeric(nblank)), byrow=TRUE, ncol=ncols, nrow=nrows) heights <- rep.int(1, nrows) if(banner) { # Increment existing panel numbers # New panel 1 is the banner panels <- (mat > 0) mat[panels] <- mat[panels] + 1L mat <- rbind(rep.int(1,ncols), mat) heights <- c(0.1 * (1 + nlines), heights) } # initialise plot layout(mat, heights=heights) # plot banner if(banner) { opa <- par(mar=rep.int(0,4), xpd=TRUE) plot(numeric(0),numeric(0),type="n",ann=FALSE,axes=FALSE, xlim=c(-1,1),ylim=c(-1,1)) cex <- resolve.defaults(list(...), list(cex.title=2))$cex.title text(0,0,main, cex=cex) } # plot panels npa <- do.call(par, parargs) if(!banner) opa <- npa with(x, ee=ee) # revert layout(1) par(opa) return(invisible(NULL)) } str.hyperframe <- function(object, ...) { d <- dim(object) x <- unclass(object) argh <- resolve.defaults(list(...), list(nest.lev=0, indent.str=" ..")) cat(paste("'hyperframe':\t", d[1L], ngettext(d[1L], "row", "rows"), "and", d[2L], ngettext(d[2L], "column", "columns"), "\n")) nr <- d[1L] nc <- d[2L] if(nc > 0) { vname <- x$vname vclass <- x$vclass vtype <- as.character(x$vtype) indentstring <- with(argh, paste(rep.int(indent.str, nest.lev), collapse="")) for(j in 1:nc) { tag <- paste("$", vname[j]) switch(vtype[j], dfcolumn={ desc <- vclass[j] if(nr > 0) { vals <- object[1:min(nr,3),j,drop=TRUE] vals <- paste(paste(format(vals), collapse=" "), "...") } else vals <- "" }, hypercolumn=, hyperatom={ desc <- "objects of class" vals <- vclass[j] }) cat(paste(paste(indentstring, tag, sep=""), ":", desc, vals, "\n")) } } return(invisible(NULL)) } subset.hyperframe <- function(x, subset, select, ...) { stopifnot(is.hyperframe(x)) r <- if(missing(subset)) { rep_len(TRUE, nrow(x)) } else { r <- eval(substitute( with(x, e, enclos=parent.frame()), list(e=substitute(subset)))) if (!is.logical(r)) stop("'subset' must be logical") r & !is.na(r) } vars <- if(missing(select)) { TRUE } else { nl <- as.list(seq_len(ncol(x))) names(nl) <- names(x) eval(substitute(select), nl, parent.frame()) } nama <- names(x) names(nama) <- nama vars <- nama[vars] z <- x[i=r, j=vars, ...] return(z) } head.hyperframe <- function (x, n = 6L, ...) { stopifnot(length(n) == 1L) n <- if(n < 0L) max(nrow(x) + n, 0L) else min(n, nrow(x)) x[seq_len(n), , drop = FALSE] } tail.hyperframe <- function(x, n = 6L, ...) { stopifnot(length(n) == 1L) nrx <- nrow(x) n <- if(n < 0L) max(nrx + n, 0L) else min(n, nrx) sel <- seq.int(to = nrx, length.out = n) x[sel, , drop = FALSE] } edit.hyperframe <- function(name, ...) { x <- name isdf <- unclass(x)$vtype == "dfcolumn" if(!any(isdf)) { warning("No columns of editable data", call.=FALSE) return(x) } y <- x[,isdf] ynew <- edit(as.data.frame(y), ...) xnew <- x for(na in names(ynew)) xnew[,na] <- ynew[,na] losenames <- setdiff(names(y), names(ynew)) for(na in losenames) xnew[,na] <- NULL return(xnew) } spatstat/R/fryplot.R0000755000176200001440000000450013115271075014144 0ustar liggesusers# # fryplot.R # # $Revision: 1.15 $ $Date: 2017/02/07 07:22:47 $ # fryplot <- function(X, ..., width=NULL, from=NULL, to=NULL, axes=FALSE) { Xname <- short.deparse(substitute(X)) X <- as.ppp(X) b <- as.rectangle(X) halfspan <- with(b, c(diff(xrange), diff(yrange))) if(!is.null(width)) { halfwidth <- ensure2vector(width)/2 halfspan <- pmin.int(halfspan, halfwidth) } bb <- owin(c(-1,1) * halfspan[1L], c(-1,1) * halfspan[2L]) Y <- frypoints(X, from=from, to=to, dmax=diameter(bb))[bb] do.call(plot.ppp, resolve.defaults(list(x=Y), list(...), list(main=paste("Fry plot of", Xname)))) if(axes) { lines(c(0,0), c(-1,1) * halfspan[1L]) lines(c(-1,1) * halfspan[2L], c(0,0)) } return(invisible(NULL)) } frypoints <- function(X, from=NULL, to=NULL, dmax=Inf) { X <- as.ppp(X) b <- as.rectangle(X) bb <- owin(c(-1,1) * diff(b$xrange), c(-1,1) * diff(b$yrange)) n <- X$n xx <- X$x yy <- X$y ## determine (dx, dy) for all relevant pairs if(is.null(from) && is.null(to)) { if(is.infinite(dmax)) { dx <- outer(xx, xx, "-") dy <- outer(yy, yy, "-") notsame <- matrix(TRUE, n, n) diag(notsame) <- FALSE DX <- as.vector(dx[notsame]) DY <- as.vector(dy[notsame]) I <- row(notsame)[notsame] } else { cl <- closepairs(X, dmax) DX <- cl$dx DY <- cl$dy I <- cl$j ## sic: I is the index of the 'TO' element } } else { seqn <- seq_len(n) from <- if(is.null(from)) seqn else seqn[from] to <- if(is.null(to)) seqn else seqn[to] if(is.infinite(dmax)) { dx <- outer(xx[to], xx[from], "-") dy <- outer(yy[to], yy[from], "-") notsame <- matrix(TRUE, n, n) diag(notsame) <- FALSE notsame <- notsame[to, from, drop=FALSE] DX <- as.vector(dx[notsame]) DY <- as.vector(dy[notsame]) I <- row(notsame)[notsame] } else { cl <- crosspairs(X[from], X[to], dmax) ok <- with(cl, from[i] != to[j]) DX <- cl$dx[ok] DY <- cl$dy[ok] I <- cl$j[ok] } } ## form into point pattern Fry <- ppp(DX, DY, window=bb, check=FALSE) if(is.marked(X)) { marx <- as.data.frame(marks(X)) marxto <- if(is.null(to)) marx else marx[to, ,drop=FALSE] marks(Fry) <- marxto[I, ] } return(Fry) } spatstat/R/indices.R0000644000176200001440000001620213115225157014062 0ustar liggesusers#' #' indices.R #' #' Code for handling vector/array indices #' #' $Revision: 1.7 $ $Date: 2017/02/07 07:47:20 $ #' grokIndexVector <- function(ind, len, nama=NULL) { #' Parse any kind of index vector, #' returning #' a logical index 'lo' (the subset of elements), #' a positive integer index 'i' ( = which(lo) ), #' the number 'n' of values required #' the number 'nind' of values indexed #' and if appropriate #' a character vector 's' of names #' a mapping 'map' (matching 'ind' to 'i') #' #' There are two versions: #' 'strict' (confined to specified bounds 1:len and specified names 'nama') #' 'full' (allowing implied extension of array bounds) named <- !is.null(nama) if(missing(len) && named) len <- length(nama) force(len) # special cases if(is.null(ind)) { #' all entries (implied) return(list(strict=list(lo=rep(TRUE, len), i=seq_len(len), n=len, s=nama, nind=len, map=NULL))) } if(length(ind) == 0) { #' no entries return(list(strict=list(lo=logical(len), i=integer(0), n=0L, s=character(0), nind=0L, map=NULL))) } #' main cases if(is.logical(ind)) { # logical (subset) index into 1:len lo <- ind m <- length(lo) if(m < len) { #' recycle oldlo <- lo lo <- logical(len) lo[oldlo] <- TRUE m <- len } if(m == len) { n <- sum(lo) result <- list(strict=list(lo=lo, i=which(lo), n=n, s=nama, nind=n, map=NULL)) return(result) } #' new elements implied lostrict <- lo[1:len] newones <- (len+1L):m nstrict <- sum(lostrict) strict <- list(lo=lostrict, i=which(lostrict), n=nstrict, s=nama, nind=nstrict, map=NULL) nfull <- sum(lo) full <- list(newones=newones, fullset=1:m, lo=lo, i=which(lo), n=nfull, s=if(named) c(nama, rep("", length(newones))) else NULL, nind=nfull, map=NULL) return(list(strict=strict, full=full)) } if(is.character(ind)) { #' character index into 'nama' #' order is important imap <- match(ind, nama) unknown <- is.na(imap) i <- sort(unique(imap[!unknown])) lo <- logical(len) lo[i] <- TRUE map <- match(imap, i) n <- length(ind) s <- nama[map] nind <- length(ind) if(identical(map, seq_along(map))) map <- NULL strict <- list(lo=lo, i=i, n=n, s=s, nind, map=map) if(!any(unknown)) return(list(strict=strict)) # some unrecognised strings newones <- unique(ind[unknown]) fullset <- c(nama, newones) imapfull <- match(ind, fullset) ifull <- sort(unique(imapfull)) lofull <- logical(length(fullset)) lofull[ifull] <- TRUE mapfull <- match(imapfull, ifull) nfull <- length(ind) sfull <- fullset[mapfull] if(identical(mapfull, seq_along(mapfull))) mapfull <- NULL full <- list(newones=newones, fullset=fullset, lo=lofull, i=ifull, n=nfull, s=sfull, nind=nind, map=mapfull) return(list(strict=strict, full=full)) } if(is.numeric(ind)) { if(all(ind > 0)) { #' integer index into 1:len #' order is important ifull <- sort(unique(ind)) inside <- (ifull <= len) i <- ifull[inside] map <- match(ind, i) lo <- logical(len) lo[i] <- TRUE n <- length(ind) s <- nama[ind] if(identical(map, seq_along(map))) map <- NULL strict <- list(lo=lo,i=i,n=n,s=s,nind=length(i),map=map) if(all(inside)) return(list(strict=strict)) newones <- ifull[!inside] mapfull <- match(ind, ifull) fullset <- 1:max(ifull) lofull <- logical(length(fullset)) lofull[ifull] <- TRUE nfull <- length(ind) sfull <- if(named) c(nama, rep("", length(newones)))[ind] else NULL if(identical(mapfull, seq_along(mapfull))) mapfull <- NULL return(list(strict=strict, full=list(newones=newones, fullset=fullset, lo=lofull, i=ifull, n=nfull, s=sfull, nind=nfull, map=mapfull))) } if(all(ind < 0)) { #' exclusion index #' ignore indices outside bounds negind <- -ind negind <- negind[negind <= len] lo <- rep(TRUE, len) lo[negind] <- FALSE i <- which(lo) n <- length(i) map <- seq_len(n) return(list(strict=list(lo=lo, i=i, n=n, s=nama[i], nind=n, map=map))) } stop("An integer index may not contain both negative and positive values", call.=FALSE) } stop("Unrecognised format for index", call.=FALSE) } replacementIndex <- function(ii, stuff) { # 'stuff' is predigested information about a subset index. # Find the location in the original array # whose value should be replaced by the 'ii'-th replacement value # according to this info. with(stuff, { if(!is.null(map)) ii <- map[ii] i[ii] }) } positiveIndex <- function(i, nama, len=length(nama)) { #' convert any kind of index to a positive integer sequence x <- seq_len(len) if(is.null(i)) return(x) stopifnot(is.vector(i)) if(is.numeric(i) && !all(ok <- (abs(i) <= len))) { warning("Index values lie outside array bounds", call.=FALSE) i <- i[ok] } names(x) <- nama y <- x[i] return(unname(y)) } logicalIndex <- function(i, nama, len=length(nama)) { #' convert any kind of index to a logical vector if(is.null(i)) return(rep(TRUE, len)) stopifnot(is.vector(i)) if(is.numeric(i) && !all(ok <- (abs(i) <= len))) { warning("Index values lie outside array bounds", call.=FALSE) i <- i[ok] } x <- logical(len) names(x) <- nama x[i] <- TRUE return(unname(x)) } #' convert any appropriate subset index for any kind of point pattern #' to a logical vector ppsubset <- function(X, I, Iname, fatal=FALSE) { if(missing(Iname)) Iname <- deparse(substitute(I)) # I could be a window or logical image if(is.im(I)) I <- solutionset(I) if((is.ppp(X) || is.lpp(X)) && is.owin(I)) { I <- inside.owin(X, w=I) return(I) } if((is.pp3(X) && inherits(I, "box3")) || (is.ppx(X) && inherits(I, "boxx"))) { I <- inside.boxx(X, w=I) return(I) } # I could be a function to be applied to X if(is.function(I)) { I <- I(X) if(!is.vector(I)) { whinge <- paste("Function", sQuote(Iname), "did not return a vector") if(fatal) stop(whinge, call.=FALSE) warning(whinge, call.=FALSE) return(NULL) } } # I is now a subset index: convert to logical I <- grokIndexVector(I, npoints(X))$strict$lo if(anyNA(I)) { #' illegal entries whinge <- paste("Indices in", sQuote(Iname), "exceed array limits") if(fatal) stop(whinge, call.=FALSE) warning(whinge, call.=FALSE) return(NULL) } return(I) } spatstat/R/nndist.R0000644000176200001440000002500313115271120013731 0ustar liggesusers# # nndist.R # # nearest neighbour distances (nndist) and identifiers (nnwhich) # # $Revision: 1.8 $ $Date: 2017/06/05 10:31:58 $ # nndist <- function(X, ...) { UseMethod("nndist") } nndist.ppp <- local({ nndist.ppp <- function(X, ..., k=1, by=NULL, method="C") { verifyclass(X, "ppp") trap.extra.arguments(..., .Context="In nndist.ppp") if(is.null(by)) # usual case return(nndist.default(X$x, X$y, k=k, by=by, method=method)) return(nndistby(X, k=k, by=by)) } nndistby <- function(X, k, by) { # split by factor idX <- seq_len(npoints(X)) Y <- split(X %mark% idX, f=by, un=FALSE) distY <- lapply(Y, nndistsub, XX=X, iX=idX, k=k) result <- do.call(cbind, distY) return(result) } nndistsub <- function(Z, XX, iX, k) { nncross(XX, Z, iX=iX, iY=marks(Z), k=k, what="dist") } nndist.ppp }) nndist.default <- function(X, Y=NULL, ..., k=1, by=NULL, method="C") { # computes the vector of nearest-neighbour distances # for the pattern of points (x[i],y[i]) # xy <- xy.coords(X,Y)[c("x","y")] x <- xy$x y <- xy$y # validate n <- length(x) if(length(y) != n) stop("lengths of x and y do not match") # other arguments ignored trap.extra.arguments(..., .Context="In nndist.default") # split by factor ? if(!is.null(by)) { X <- as.ppp(xy, W=boundingbox) return(nndist(X, by=by, k=k)) } # k can be a single integer or an integer vector if(length(k) == 0) stop("k is an empty vector") else if(length(k) == 1) { if(k != round(k) || k <= 0) stop("k is not a positive integer") } else { if(any(k != round(k)) || any(k <= 0)) stop(paste("some entries of the vector", sQuote("k"), "are not positive integers")) } k <- as.integer(k) kmax <- max(k) # trivial cases if(n <= 1) { # empty pattern => return numeric(0) # or pattern with only 1 point => return Inf nnd <- matrix(Inf, nrow=n, ncol=kmax) nnd <- nnd[,k, drop=TRUE] return(nnd) } # number of neighbours that are well-defined kmaxcalc <- min(n-1, kmax) # calculate k-nn distances for k <= kmaxcalc if(kmaxcalc == 1) { # calculate nearest neighbour distance only switch(method, interpreted={ # matrix of squared distances between all pairs of points sq <- function(a, b) { (a-b)^2 } squd <- outer(x, x, sq) + outer(y, y, sq) # reset diagonal to a large value so it is excluded from minimum diag(squd) <- Inf # nearest neighbour distances nnd <- sqrt(apply(squd,1,min)) }, C={ nnd<-numeric(n) o <- fave.order(y) big <- sqrt(.Machine$double.xmax) z<- .C("nndistsort", n= as.integer(n), x= as.double(x[o]), y= as.double(y[o]), nnd= as.double(nnd), as.double(big), PACKAGE = "spatstat") nnd[o] <- z$nnd }, stop(paste("Unrecognised method", sQuote(method))) ) } else { # case kmaxcalc > 1 switch(method, interpreted={ if(n <= 1000) { # form n x n matrix of squared distances D2 <- pairdist.default(x, y, method=method, squared=TRUE) # find k'th smallest squared distance diag(D2) <- Inf NND2 <- t(apply(D2, 1, sort))[, 1:kmaxcalc] nnd <- sqrt(NND2) } else { # avoid creating huge matrix # handle one row of D at a time NND2 <- matrix(numeric(n * kmaxcalc), nrow=n, ncol=kmaxcalc) for(i in seq_len(n)) { D2i <- (x - x[i])^2 + (y - y[i])^2 D2i[i] <- Inf NND2[i,] <- sort(D2i)[1:kmaxcalc] } nnd <- sqrt(NND2) } }, C={ nnd<-numeric(n * kmaxcalc) o <- fave.order(y) big <- sqrt(.Machine$double.xmax) z<- .C("knndsort", n = as.integer(n), kmax = as.integer(kmaxcalc), x = as.double(x[o]), y = as.double(y[o]), nnd = as.double(nnd), huge = as.double(big), PACKAGE = "spatstat") nnd <- matrix(nnd, nrow=n, ncol=kmaxcalc) nnd[o, ] <- matrix(z$nnd, nrow=n, ncol=kmaxcalc, byrow=TRUE) }, stop(paste("Unrecognised method", sQuote(method))) ) } # post-processing if(kmax > kmaxcalc) { # add columns of Inf infs <- matrix(Inf, nrow=n, ncol=kmax-kmaxcalc) nnd <- cbind(nnd, infs) } if(kmax > 1) colnames(nnd) <- paste0("dist.", 1:kmax) if(length(k) < kmax) { # select only the specified columns nnd <- nnd[, k, drop=TRUE] } return(nnd) } nnwhich <- function(X, ...) { UseMethod("nnwhich") } nnwhich.ppp <- local({ nnwhich.ppp <- function(X, ..., k=1, by=NULL, method="C") { verifyclass(X, "ppp") trap.extra.arguments(..., .Context="In nnwhich.ppp") if(is.null(by)) return(nnwhich.default(X$x, X$y, k=k, method=method)) return(nnwhichby(X, k=k, by=by)) } nnwhichby <- function(X, k, by) { # split by factor idX <- seq_len(npoints(X)) Y <- split(X %mark% idX, f=by, un=FALSE) whichY <- lapply(Y, nnwhichsub, XX=X, iX=idX, k=k) result <- do.call(cbind, whichY) return(result) } nnwhichsub <- function(Z, XX, iX, k) { # marks(Z) gives original serial numbers of subset Z iY <- marks(Z) Zid <- nncross(XX, Z, iX=iX, iY=iY, k=k, what="which") nk <- length(k) if(nk == 1) { Yid <- iY[Zid] } else { Zid <- as.vector(as.matrix(Zid)) Yid <- iY[Zid] Yid <- data.frame(which=matrix(Yid, ncol=nk)) } return(Yid) } nnwhich.ppp }) nnwhich.default <- function(X, Y=NULL, ..., k=1, by=NULL, method="C") { # identifies nearest neighbour of each point in # the pattern of points (x[i],y[i]) # xy <- xy.coords(X,Y)[c("x","y")] x <- xy$x y <- xy$y # validate n <- length(x) if(length(y) != n) stop("lengths of x and y do not match") # other arguments ignored trap.extra.arguments(..., .Context="In nnwhich.default") # split by factor ? if(!is.null(by)) { X <- as.ppp(xy, W=boundingbox) return(nnwhich(X, by=by, k=k)) } # k can be a single integer or an integer vector if(length(k) == 0) stop("k is an empty vector") else if(length(k) == 1) { if(k != round(k) || k <= 0) stop("k is not a positive integer") } else { if(any(k != round(k)) || any(k <= 0)) stop(paste("some entries of the vector", sQuote("k"), "are not positive integers")) } k <- as.integer(k) kmax <- max(k) # special cases if(n <= 1) { # empty pattern => return integer(0) # or pattern with only 1 point => return NA nnw <- matrix(as.integer(NA), nrow=n, ncol=kmax) nnw <- nnw[,k, drop=TRUE] return(nnw) } # number of neighbours that are well-defined kmaxcalc <- min(n-1, kmax) # identify k-nn for k <= kmaxcalc if(kmaxcalc == 1) { # identify nearest neighbour only switch(method, interpreted={ # matrix of squared distances between all pairs of points sq <- function(a, b) { (a-b)^2 } squd <- outer(x, x, sq) + outer(y, y, sq) # reset diagonal to a large value so it is excluded from minimum diag(squd) <- Inf # nearest neighbours nnw <- apply(squd,1,which.min) }, C={ nnw <- integer(n) o <- fave.order(y) big <- sqrt(.Machine$double.xmax) z<- .C("nnwhichsort", n = as.integer(n), x = as.double(x[o]), y = as.double(y[o]), nnwhich = as.integer(nnw), huge = as.double(big), PACKAGE = "spatstat") witch <- z$nnwhich # sic if(any(witch <= 0)) stop("Internal error: non-positive index returned from C code") if(any(witch > n)) stop("Internal error: index returned from C code exceeds n") nnw[o] <- o[witch] }, stop(paste("Unrecognised method", sQuote(method))) ) } else { # case kmaxcalc > 1 switch(method, interpreted={ if(n <= 1000) { # form n x n matrix of squared distances D2 <- pairdist.default(x, y, method=method, squared=TRUE) # find k'th smallest squared distance diag(D2) <- Inf nnw <- t(apply(D2, 1, fave.order))[, 1:kmaxcalc] } else { # avoid creating huge matrix # handle one row of D at a time nnw <- matrix(as.integer(NA), nrow=n, ncol=kmaxcalc) for(i in seq_len(n)) { D2i <- (x - x[i])^2 + (y - y[i])^2 D2i[i] <- Inf nnw[i,] <- fave.order(D2i)[1:kmaxcalc] } } }, C={ nnw <- matrix(integer(n * kmaxcalc), nrow=n, ncol=kmaxcalc) o <- fave.order(y) big <- sqrt(.Machine$double.xmax) z<- .C("knnsort", n = as.integer(n), kmax = as.integer(kmaxcalc), x = as.double(x[o]), y = as.double(y[o]), nnd = as.double(numeric(n * kmaxcalc)), nnwhich = as.integer(nnw), huge = as.double(big), PACKAGE = "spatstat") witch <- z$nnwhich # sic witch <- matrix(witch, nrow=n, ncol=kmaxcalc, byrow=TRUE) if(any(witch <= 0)) stop("Internal error: non-positive index returned from C code") if(any(witch > n)) stop("Internal error: index returned from C code exceeds n") # convert back to original ordering nnw[o,] <- matrix(o[witch], nrow=n, ncol=kmaxcalc) }, stop(paste("Unrecognised method", sQuote(method))) ) } # post-processing if(kmax > kmaxcalc) { # add columns of NA's nas <- matrix(as.numeric(NA), nrow=n, ncol=kmax-kmaxcalc) nnw <- cbind(nnw, nas) } if(kmax > 1) colnames(nnw) <- paste0("which.", 1:kmax) if(length(k) < kmax) { # select only the specified columns nnw <- nnw[, k, drop=TRUE] } return(nnw) } spatstat/R/clusterinfo.R0000644000176200001440000007123313115644030015001 0ustar liggesusers## lookup table of explicitly-known K functions and pcf ## and algorithms for computing sensible starting parameters .Spatstat.ClusterModelInfoTable <- list( Thomas=list( ## Thomas process: old par = (kappa, sigma2) (internally used everywhere) ## Thomas process: new par = (kappa, scale) (officially recommended for input/output) modelname = "Thomas process", # In modelname field of mincon fv obj. descname = "Thomas process", # In desc field of mincon fv obj. modelabbrev = "Thomas process", # In fitted obj. printmodelname = function(...) "Thomas process", # Used by print.kppm parnames = c("kappa", "sigma2"), clustargsnames = NULL, checkpar = function(par, old = TRUE){ if(is.null(par)) par <- c(kappa=1,scale=1) if(any(par<=0)) stop("par values must be positive.") nam <- check.named.vector(par, c("kappa","sigma2"), onError="null") if(is.null(nam)) { check.named.vector(par, c("kappa","scale")) names(par)[2L] <- "sigma2" par[2L] <- par[2L]^2 } if(!old){ names(par)[2L] <- "scale" par[2L] <- sqrt(par[2L]) } return(par) }, checkclustargs = function(margs, old = TRUE) list(), resolvedots = function(...){ ## resolve dots for kppm and friends allowing for old/new par syntax dots <- list(...) nam <- names(dots) out <- list() if("ctrl" %in% nam){ out$ctrl <- dots$ctrl } else{ out$ctrl <- dots[nam %in% c("p", "q", "rmin", "rmax")] } chk <- .Spatstat.ClusterModelInfoTable$Thomas$checkpar if(!is.null(dots$startpar)) out$startpar <- chk(dots$startpar) return(out) }, # density function for the distance to offspring ddist = function(r, scale, ...) { 2 * pi * r * dnorm(r, 0, scale)/sqrt(2*pi*scale^2) }, ## Practical range of clusters range = function(...){ dots <- list(...) par <- dots$par # Choose the first of the possible supplied values for scale: scale <- c(dots$scale, dots$par[["scale"]], dots$sigma, dots$par[["sigma"]])[1L] if(is.null(scale)) stop("Argument ", sQuote("scale"), " must be given.") thresh <- dots$thresh if(!is.null(thresh)){ ## The squared length of isotropic Gaussian (sigma) ## is exponential with mean 2 sigma^2 rmax <- scale * sqrt(2 * qexp(thresh, lower.tail=FALSE)) ## old code ## ddist <- .Spatstat.ClusterModelInfoTable$Thomas$ddist ## kernel0 <- clusterkernel("Thomas", scale = scale)(0,0) ## f <- function(r) ddist(r, scale = scale)-thresh*kernel0 ## rmax <- uniroot(f, lower = scale, upper = 1000 * scale)$root } else{ rmax <- 4*scale } return(rmax) }, kernel = function(par, rvals, ...) { scale <- sqrt(par[2L]) dnorm(rvals, 0, scale)/sqrt(2*pi*scale^2) }, isPCP=TRUE, ## K-function K = function(par,rvals, ...){ if(any(par <= 0)) return(rep.int(Inf, length(rvals))) pi*rvals^2+(1-exp(-rvals^2/(4*par[2L])))/par[1L] }, ## pair correlation function pcf= function(par,rvals, ...){ if(any(par <= 0)) return(rep.int(Inf, length(rvals))) 1 + exp(-rvals^2/(4 * par[2L]))/(4 * pi * par[1L] * par[2L]) }, ## sensible starting parameters selfstart = function(X) { kappa <- intensity(X) sigma2 <- 4 * mean(nndist(X))^2 c(kappa=kappa, sigma2=sigma2) }, ## meaningful model parameters interpret = function(par, lambda) { kappa <- par[["kappa"]] sigma <- sqrt(par[["sigma2"]]) mu <- if(is.numeric(lambda) && length(lambda) == 1) lambda/kappa else NA c(kappa=kappa, sigma=sigma, mu=mu) }, ## Experimental: convert to/from canonical cluster parameters tocanonical = function(par) { kappa <- par[[1L]] sigma2 <- par[[2L]] c(strength=1/(kappa * sigma2), scale=sqrt(sigma2)) }, tohuman = function(can) { strength <- can[[1L]] scale <- can[[2L]] sigma2 <- scale^2 c(kappa=1/(strength * sigma2), sigma2=sigma2) } ), ## ............................................... MatClust=list( ## Matern Cluster process: old par = (kappa, R) (internally used everywhere) ## Matern Cluster process: new par = (kappa, scale) (officially recommended for input/output) modelname = "Matern cluster process", # In modelname field of mincon fv obj. descname = "Matern cluster process", # In desc field of mincon fv obj. modelabbrev = "Matern cluster process", # In fitted obj. printmodelname = function(...) "Matern cluster process", # Used by print.kppm parnames = c("kappa", "R"), clustargsnames = NULL, checkpar = function(par, old = TRUE){ if(is.null(par)) par <- c(kappa=1,scale=1) if(any(par<=0)) stop("par values must be positive.") nam <- check.named.vector(par, c("kappa","R"), onError="null") if(is.null(nam)) { check.named.vector(par, c("kappa","scale")) names(par)[2L] <- "R" } if(!old){ names(par)[2L] <- "scale" } return(par) }, # density function for the distance to offspring ddist = function(r, scale, ...) { ifelse(r>scale, 0, 2 * r / scale^2) }, ## Practical range of clusters range = function(...){ dots <- list(...) par <- dots$par # Choose the first of the possible supplied values for scale: scale <- c(dots$scale, dots$par[["scale"]], dots$R, dots$par[["R"]])[1L] if(is.null(scale)) stop("Argument ", sQuote("scale"), " must be given.") if(!is.null(dots$thresh)) warning("Argument ", sQuote("thresh"), " is ignored for Matern Cluster model") return(scale) }, checkclustargs = function(margs, old = TRUE) list(), resolvedots = function(...){ ## resolve dots for kppm and friends allowing for old/new par syntax dots <- list(...) nam <- names(dots) out <- list() if("ctrl" %in% nam){ out$ctrl <- dots$ctrl } else{ out$ctrl <- dots[nam %in% c("p", "q", "rmin", "rmax")] } chk <- .Spatstat.ClusterModelInfoTable$MatClust$checkpar if(!is.null(dots$startpar)) out$startpar <- chk(dots$startpar) return(out) }, kernel = function(par, rvals, ...) { scale <- par[2L] ifelse(rvals>scale, 0, 1/(pi*scale^2)) }, isPCP=TRUE, K = function(par,rvals, ..., funaux){ if(any(par <= 0)) return(rep.int(Inf, length(rvals))) kappa <- par[1L] R <- par[2L] Hfun <- funaux$Hfun y <- pi * rvals^2 + (1/kappa) * Hfun(rvals/(2 * R)) return(y) }, pcf= function(par,rvals, ..., funaux){ if(any(par <= 0)) return(rep.int(Inf, length(rvals))) kappa <- par[1L] R <- par[2L] g <- funaux$g y <- 1 + (1/(pi * kappa * R^2)) * g(rvals/(2 * R)) return(y) }, funaux=list( Hfun=function(zz) { ok <- (zz < 1) h <- numeric(length(zz)) h[!ok] <- 1 z <- zz[ok] h[ok] <- 2 + (1/pi) * ( (8 * z^2 - 4) * acos(z) - 2 * asin(z) + 4 * z * sqrt((1 - z^2)^3) - 6 * z * sqrt(1 - z^2) ) return(h) }, DOH=function(zz) { ok <- (zz < 1) h <- numeric(length(zz)) h[!ok] <- 0 z <- zz[ok] h[ok] <- (16/pi) * (z * acos(z) - (z^2) * sqrt(1 - z^2)) return(h) }, ## g(z) = DOH(z)/z has a limit at z=0. g=function(zz) { ok <- (zz < 1) h <- numeric(length(zz)) h[!ok] <- 0 z <- zz[ok] h[ok] <- (2/pi) * (acos(z) - z * sqrt(1 - z^2)) return(h) }), ## sensible starting paramters selfstart = function(X) { kappa <- intensity(X) R <- 2 * mean(nndist(X)) c(kappa=kappa, R=R) }, ## meaningful model parameters interpret = function(par, lambda) { kappa <- par[["kappa"]] R <- par[["R"]] mu <- if(is.numeric(lambda) && length(lambda) == 1) lambda/kappa else NA c(kappa=kappa, R=R, mu=mu) } ), ## ............................................... Cauchy=list( ## Neyman-Scott with Cauchy clusters: old par = (kappa, eta2) (internally used everywhere) ## Neyman-Scott with Cauchy clusters: new par = (kappa, scale) (officially recommended for input/output) modelname = "Neyman-Scott process with Cauchy kernel", # In modelname field of mincon fv obj. descname = "Neyman-Scott process with Cauchy kernel", # In desc field of mincon fv obj. modelabbrev = "Cauchy process", # In fitted obj. printmodelname = function(...) "Cauchy process", # Used by print.kppm parnames = c("kappa", "eta2"), clustargsnames = NULL, checkpar = function(par, old = TRUE){ if(is.null(par)) par <- c(kappa=1,scale=1) if(any(par<=0)) stop("par values must be positive.") nam <- check.named.vector(par, c("kappa","eta2"), onError="null") if(is.null(nam)) { check.named.vector(par, c("kappa","scale")) names(par)[2L] <- "eta2" par[2L] <- (2*par[2L])^2 } if(!old){ names(par)[2L] <- "scale" par[2L] <- sqrt(par[2L])/2 } return(par) }, checkclustargs = function(margs, old = TRUE) list(), resolvedots = function(...){ ## resolve dots for kppm and friends allowing for old/new par syntax dots <- list(...) nam <- names(dots) out <- list() if("ctrl" %in% nam){ out$ctrl <- dots$ctrl } else{ out$ctrl <- dots[nam %in% c("p", "q", "rmin", "rmax")] } chk <- .Spatstat.ClusterModelInfoTable$Cauchy$checkpar if(!is.null(dots$startpar)) out$startpar <- chk(dots$startpar) return(out) }, # density function for the distance to offspring ddist = function(r, scale, ...) { r/(scale^2) * (1 + (r / scale)^2)^(-3/2) }, ## Practical range of clusters range = function(...){ dots <- list(...) # Choose the first of the possible supplied values for scale: scale <- c(dots$scale, dots$par[["scale"]])[1L] if(is.null(scale)) stop("Argument ", sQuote("scale"), " must be given.") thresh <- dots$thresh %orifnull% 0.01 ## integral of ddist(r) dr is 1 - (1+(r/scale)^2)^(-1/2) ## solve for integral = 1-thresh: rmax <- scale * sqrt(1/thresh^2 - 1) ## old code ## ddist <- .Spatstat.ClusterModelInfoTable$Cauchy$ddist ## kernel0 <- clusterkernel("Cauchy", scale = scale)(0,0) ## f <- function(r) ddist(r, scale = scale)-thresh*kernel0 ## rmax <- uniroot(f, lower = scale, upper = 1000 * scale)$root return(rmax) }, kernel = function(par, rvals, ...) { scale <- sqrt(par[2L])/2 1/(2*pi*scale^2)*((1 + (rvals/scale)^2)^(-3/2)) }, isPCP=TRUE, K = function(par,rvals, ...){ if(any(par <= 0)) return(rep.int(Inf, length(rvals))) pi*rvals^2 + (1 - 1/sqrt(1 + rvals^2/par[2L]))/par[1L] }, pcf= function(par,rvals, ...){ if(any(par <= 0)) return(rep.int(Inf, length(rvals))) 1 + ((1 + rvals^2/par[2L])^(-1.5))/(2 * pi * par[2L] * par[1L]) }, selfstart = function(X) { kappa <- intensity(X) eta2 <- 4 * mean(nndist(X))^2 c(kappa = kappa, eta2 = eta2) }, ## meaningful model parameters interpret = function(par, lambda) { kappa <- par[["kappa"]] omega <- sqrt(par[["eta2"]])/2 mu <- if(is.numeric(lambda) && length(lambda) == 1) lambda/kappa else NA c(kappa=kappa, omega=omega, mu=mu) } ), ## ............................................... VarGamma=list( ## Neyman-Scott with VarianceGamma/Bessel clusters: old par = (kappa, eta) (internally used everywhere) ## Neyman-Scott with VarianceGamma/Bessel clusters: new par = (kappa, scale) (officially recommended for input/output) modelname = "Neyman-Scott process with Variance Gamma kernel", # In modelname field of mincon fv obj. descname = "Neyman-Scott process with Variance Gamma kernel", # In desc field of mincon fv obj. modelabbrev = "Variance Gamma process", # In fitted obj. printmodelname = function(obj){ # Used by print.kppm paste0("Variance Gamma process (nu=", signif(obj$clustargs[["nu"]], 2), ")") }, parnames = c("kappa", "eta"), clustargsnames = "nu", checkpar = function(par, old = TRUE){ if(is.null(par)) par <- c(kappa=1,scale=1) if(any(par<=0)) stop("par values must be positive.") nam <- check.named.vector(par, c("kappa","eta"), onError="null") if(is.null(nam)) { check.named.vector(par, c("kappa","scale")) names(par)[2L] <- "eta" } if(!old) names(par)[2L] <- "scale" return(par) }, checkclustargs = function(margs, old = TRUE){ if(!old) margs <- list(nu=margs$nu.ker) return(margs) }, resolvedots = function(...){ ## resolve dots for kppm and friends allowing for old/new par syntax dots <- list(...) nam <- names(dots) out <- list() if("ctrl" %in% nam){ out$ctrl <- dots$ctrl } else{ out$ctrl <- dots[nam %in% c("p", "q", "rmin", "rmax")] } chk <- .Spatstat.ClusterModelInfoTable$VarGamma$checkpar if(!is.null(dots$startpar)) out$startpar <- chk(dots$startpar) nu <- dots$nu if(is.null(nu)){ nu <- try(resolve.vargamma.shape(nu.ker=dots$nu.ker, nu.pcf=dots$nu.pcf)$nu.ker, silent = TRUE) if(inherits(nu, "try-error")) nu <- -1/4 } else{ check.1.real(nu) stopifnot(nu > -1/2) } out$margs <- list(nu.ker=nu, nu.pcf=2*nu+1) out$covmodel <- list(type="Kernel", model="VarGamma", margs=out$margs) return(out) }, # density function for the distance to offspring ddist = function(r, scale, nu, ...) { numer <- ((r/scale)^(nu+1)) * besselK(r/scale, nu) numer[r==0] <- 0 denom <- (2^nu) * scale * gamma(nu + 1) numer/denom }, ## Practical range of clusters range = function(...){ dots <- list(...) # Choose the first of the possible supplied values for scale: scale <- c(dots$scale, dots$par[["scale"]])[1L] if(is.null(scale)) stop("Argument ", sQuote("scale"), " must be given.") # Find value of nu: extra <- .Spatstat.ClusterModelInfoTable$VarGamma$resolvedots(...) nu <- .Spatstat.ClusterModelInfoTable$VarGamma$checkclustargs(extra$margs, old=FALSE)$nu if(is.null(nu)) stop("Argument ", sQuote("nu"), " must be given.") thresh <- dots$thresh if(is.null(thresh)) thresh <- .001 ddist <- .Spatstat.ClusterModelInfoTable$VarGamma$ddist f1 <- function(rmx) { integrate(ddist, 0, rmx, scale=scale, nu=nu)$value - (1 - thresh) } f <- Vectorize(f1) ## old code ## kernel0 <- clusterkernel("VarGamma", scale = scale, nu = nu)(0,0) ## f <- function(r) ddist(r, scale = scale, nu = nu) - thresh*kernel0 rmax <- uniroot(f, lower = scale, upper = 1000 * scale)$root return(rmax) }, ## kernel function in polar coordinates (no angular argument). kernel = function(par, rvals, ..., margs) { scale <- as.numeric(par[2L]) nu <- margs$nu if(is.null(nu)) stop("Argument ", sQuote("nu"), " is missing.") numer <- ((rvals/scale)^nu) * besselK(rvals/scale, nu) numer[rvals==0] <- ifelse(nu>0, 2^(nu-1)*gamma(nu), Inf) denom <- pi * (2^(nu+1)) * scale^2 * gamma(nu + 1) numer/denom }, isPCP=TRUE, K = local({ ## K function requires integration of pair correlation xgx <- function(x, par, nu.pcf) { ## x * pcf(x) without check on par values numer <- (x/par[2L])^nu.pcf * besselK(x/par[2L], nu.pcf) denom <- 2^(nu.pcf+1) * pi * par[2L]^2 * par[1L] * gamma(nu.pcf + 1) return(x * (1 + numer/denom)) } vargammaK <- function(par,rvals, ..., margs){ ## margs = list(.. nu.pcf.. ) if(any(par <= 0)) return(rep.int(Inf, length(rvals))) nu.pcf <- margs$nu.pcf out <- numeric(length(rvals)) ok <- (rvals > 0) rvalsok <- rvals[ok] outok <- numeric(sum(ok)) for (i in 1:length(rvalsok)) outok[i] <- 2 * pi * integrate(xgx, lower=0, upper=rvalsok[i], par=par, nu.pcf=nu.pcf)$value out[ok] <- outok return(out) } ## Initiated integration in sub-subintervals, but it is unfinished! ## vargammaK <- function(par,rvals, ..., margs){ ## ## margs = list(.. nu.pcf.. ) ## if(any(par <= 0)) ## return(rep.int(Inf, length(rvals))) ## nu.pcf <- margs$nu.pcf ## out <- numeric(length(rvals)) ## out[1L] <- if(rvals[1L] == 0) 0 else ## integrate(xgx, lower=0, upper=rvals[1L], ## par = par, nu.pcf=nu.pcf)$value ## for (i in 2:length(rvals)) { ## delta <- integrate(xgx, ## lower=rvals[i-1L], upper=rvals[i], ## par=par, nu.pcf=nu.pcf) ## out[i]=out[i-1L]+delta$value ## } ## return(out) ## } vargammaK }), ## end of 'local' pcf= function(par,rvals, ..., margs){ ## margs = list(..nu.pcf..) if(any(par <= 0)) return(rep.int(Inf, length(rvals))) nu.pcf <- margs$nu.pcf sig2 <- 1 / (4 * pi * (par[2L]^2) * nu.pcf * par[1L]) denom <- 2^(nu.pcf - 1) * gamma(nu.pcf) rr <- rvals / par[2L] ## Matern correlation function fr <- ifelseXB(rr > 0, (rr^nu.pcf) * besselK(rr, nu.pcf) / denom, 1) return(1 + sig2 * fr) }, parhandler = function(..., nu.ker = -1/4) { check.1.real(nu.ker) stopifnot(nu.ker > -1/2) nu.pcf <- 2 * nu.ker + 1 return(list(type="Kernel", model="VarGamma", margs=list(nu.ker=nu.ker, nu.pcf=nu.pcf))) }, ## sensible starting values selfstart = function(X) { kappa <- intensity(X) eta <- 2 * mean(nndist(X)) c(kappa=kappa, eta=eta) }, ## meaningful model parameters interpret = function(par, lambda) { kappa <- par[["kappa"]] omega <- par[["eta"]] mu <- if(is.numeric(lambda) && length(lambda) == 1) lambda/kappa else NA c(kappa=kappa, omega=omega, mu=mu) } ), ## ............................................... LGCP=list( ## Log Gaussian Cox process: old par = (sigma2, alpha) (internally used everywhere) ## Log Gaussian Cox process: new par = (var, scale) (officially recommended for input/output) modelname = "Log-Gaussian Cox process", # In modelname field of mincon fv obj. descname = "LGCP", # In desc field of mincon fv obj. modelabbrev = "log-Gaussian Cox process", # In fitted obj. printmodelname = function(...) "log-Gaussian Cox process", # Used by print.kppm parnames = c("sigma2", "alpha"), checkpar = function(par, old = TRUE){ if(is.null(par)) par <- c(var=1,scale=1) if(any(par<=0)) stop("par values must be positive.") nam <- check.named.vector(par, c("sigma2","alpha"), onError="null") if(is.null(nam)) { check.named.vector(par, c("var","scale")) names(par) <- c("sigma2", "alpha") } if(!old) names(par) <- c("var", "scale") return(par) }, checkclustargs = function(margs, old = TRUE) return(margs), resolvedots = function(...){ ## resolve dots for kppm and friends allowing for old/new par syntax dots <- list(...) nam <- names(dots) out <- list() if("ctrl" %in% nam){ out$ctrl <- dots$ctrl } else{ out$ctrl <- dots[nam %in% c("p", "q", "rmin", "rmax")] } chk <- .Spatstat.ClusterModelInfoTable$LGCP$checkpar if(!is.null(dots$startpar)) out$startpar <- chk(dots$startpar) cmod <- dots$covmodel model <- cmod$model %orifnull% dots$model %orifnull% "exponential" margs <- NULL if(!identical(model, "exponential")) { ## get the 'model generator' modgen <- getRandomFieldsModelGen(model) attr(model, "modgen") <- modgen if(is.null(cmod)){ margsnam <- names(formals(modgen)) margsnam <- margsnam[!(margsnam %in% c("var", "scale"))] margs <- dots[nam %in% margsnam] } else{ margs <- cmod[names(cmod)!="model"] } } if(length(margs)==0) { margs <- NULL } else { # detect anisotropic model if("Aniso" %in% names(margs)) stop("Anisotropic covariance models cannot be used", call.=FALSE) } out$margs <- margs out$model <- model out$covmodel <- list(type="Covariance", model=model, margs=margs) return(out) }, isPCP=FALSE, ## calls relevant covariance function from RandomFields package K = function(par, rvals, ..., model, margs) { if(any(par <= 0)) return(rep.int(Inf, length(rvals))) if(model == "exponential") { ## For efficiency and to avoid need for RandomFields package integrand <- function(r,par,...) 2*pi*r*exp(par[1L]*exp(-r/par[2L])) } else { kraeverRandomFields() integrand <- function(r,par,model,margs) { modgen <- attr(model, "modgen") if(length(margs) == 0) { mod <- modgen(var=par[1L], scale=par[2L]) } else { mod <- do.call(modgen, append(list(var=par[1L], scale=par[2L]), margs)) } 2*pi *r *exp(RandomFields::RFcov(model=mod, x=r)) } } nr <- length(rvals) th <- numeric(nr) if(spatstat.options("fastK.lgcp")) { ## integrate using Simpson's rule fvals <- integrand(r=rvals, par=par, model=model, margs=margs) th[1L] <- rvals[1L] * fvals[1L]/2 if(nr > 1) for(i in 2:nr) th[i] <- th[i-1L] + (rvals[i] - rvals[i-1L]) * (fvals[i] + fvals[i-1L])/2 } else { ## integrate using 'integrate' th[1L] <- if(rvals[1L] == 0) 0 else integrate(integrand,lower=0,upper=rvals[1L], par=par,model=model,margs=margs)$value for (i in 2:length(rvals)) { delta <- integrate(integrand, lower=rvals[i-1L],upper=rvals[i], par=par,model=model,margs=margs) th[i]=th[i-1L]+delta$value } } return(th) }, pcf= function(par, rvals, ..., model, margs) { if(any(par <= 0)) return(rep.int(Inf, length(rvals))) if(model == "exponential") { ## For efficiency and to avoid need for RandomFields package gtheo <- exp(par[1L]*exp(-rvals/par[2L])) } else { kraeverRandomFields() modgen <- attr(model, "modgen") if(length(margs) == 0) { mod <- modgen(var=par[1L], scale=par[2L]) } else { mod <- do.call(modgen, append(list(var=par[1L], scale=par[2L]), margs)) } gtheo <- exp(RandomFields::RFcov(model=mod, x=rvals)) } return(gtheo) }, parhandler=function(model = "exponential", ...) { if(!is.character(model)) stop("Covariance function model should be specified by name") margs <- c(...) if(!identical(model, "exponential")) { ## get the 'model generator' modgen <- getRandomFieldsModelGen(model) attr(model, "modgen") <- modgen } return(list(type="Covariance", model=model, margs=margs)) }, ## sensible starting values selfstart = function(X) { alpha <- 2 * mean(nndist(X)) c(sigma2=1, alpha=alpha) }, ## meaningful model parameters interpret = function(par, lambda) { sigma2 <- par[["sigma2"]] alpha <- par[["alpha"]] mu <- if(is.numeric(lambda) && length(lambda) == 1 && lambda > 0) log(lambda) - sigma2/2 else NA c(sigma2=sigma2, alpha=alpha, mu=mu) } ) ) spatstatClusterModelInfo <- function(name, onlyPCP = FALSE) { if(inherits(name, "detpointprocfamily")) return(spatstatDPPModelInfo(name)) if(!is.character(name) || length(name) != 1) stop("Argument must be a single character string", call.=FALSE) TheTable <- .Spatstat.ClusterModelInfoTable nama2 <- names(TheTable) if(onlyPCP){ ok <- sapply(TheTable, getElement, name="isPCP") nama2 <- nama2[ok] } if(!(name %in% nama2)) stop(paste(sQuote(name), "is not recognised;", "valid names are", commasep(sQuote(nama2))), call.=FALSE) out <- TheTable[[name]] return(out) } spatstat/R/measures.R0000755000176200001440000005605713151747347014321 0ustar liggesusers# # measures.R # # signed/vector valued measures with atomic and diffuse components # # $Revision: 1.71 $ $Date: 2017/08/31 08:27:12 $ # msr <- function(qscheme, discrete, density, check=TRUE) { if(!inherits(qscheme, "quad")) stop("qscheme should be a quadrature scheme") nquad <- n.quad(qscheme) U <- union.quad(qscheme) wt <- w.quad(qscheme) Z <- is.data(qscheme) ndata <- sum(Z) # ensure conformable vectors/matrices stopifnot(is.numeric(discrete) || is.logical(discrete)) stopifnot(is.numeric(density)) if(is.vector(discrete) && is.vector(density)) { # handle constants if(length(discrete) == 1) discrete <- rep.int(discrete, ndata) if(length(density) == 1) density <- rep.int(density, nquad) # check lengths if(check) { check.nvector(discrete, ndata, things="data points", naok=TRUE) check.nvector(density, nquad, things="quadrature points", naok=TRUE) } discretepad <- numeric(nquad) discretepad[Z] <- discrete } else { if(length(discrete) == 1 && is.matrix(density)) { # replicate constant 'discrete' component to matrix of correct size discrete <- matrix(discrete, ndata, ncol(density)) } else if(length(density) == 1 && is.matrix(discrete)) { # replicate constant 'density' to matrix of correct size density <- matrix(density, nquad, ncol(discrete)) } else { discrete <- as.matrix(discrete) density <- as.matrix(density) } if(check) { # check numbers of rows check.nmatrix(discrete, ndata, things="data points", naok=TRUE, squarematrix=FALSE) check.nmatrix(density, nquad, things="quadrature points", naok=TRUE, squarematrix=FALSE) } nd <- ncol(discrete) nc <- ncol(density) if(nd != nc) { if(nd == 1) { # replicate columns of discrete component discrete <- matrix(rep.int(discrete, nc), ndata, nc) colnames(discrete) <- colnames(density) } else if(nc == 1) { # replicate columns of density component density <- matrix(rep.int(density, nd), nquad, nd) colnames(density) <- colnames(discrete) } else stop(paste("Incompatible numbers of columns in", sQuote("discrete"), paren(nd), "and", sQuote("density"), paren(nc))) } discretepad <- matrix(0, nquad, max(nd, nc)) discretepad[Z, ] <- discrete colnames(discretepad) <- colnames(density) } # # # Discretised measure (value of measure for each quadrature tile) val <- discretepad + wt * density if(is.matrix(density)) colnames(val) <- colnames(density) # out <- list(loc = U, val = val, atoms = Z, discrete = discretepad, density = density, wt = wt) class(out) <- "msr" return(out) } # Translation table for usage of measures # # e.g. res <- residuals(fit, ...) # # OLD NEW # res[ ] res$val[ ] with(res, "increment") # attr(res, "atoms") res$atoms with(res, "is.atom") # attr(res, "discrete") res$discrete with(res, "discrete") # attr(res, "continuous") res$density with(res, "density") # w.quad(quad.ppm(fit)) res$wt with(res, "qweights") # union.quad(quad.ppm(fit)) res$loc with(res, "qlocations") # ................................................. with.msr <- function(data, expr, ...) { stopifnot(inherits(data, "msr")) stuff <- list(increment = data$val, is.atom = data$atoms, discrete = data$discrete, density = data$density, continuous = data$density * data$wt, qweights = data$wt, qlocations = data$loc, atoms = data$loc[data$atoms], atommass = marksubset(data$discrete, data$atoms)) y <- eval(substitute(expr), envir=stuff, enclos=parent.frame()) if(is.character(y) && length(y) == 1 && y %in% names(stuff)) y <- stuff[[y]] return(y) } print.msr <- function(x, ...) { xloc <- x$loc n <- npoints(xloc) d <- ncol(as.matrix(x$val)) splat(paste0(if(d == 1) "Scalar" else paste0(d, "-dimensional vector"), "-valued measure")) if(d > 1 && !is.null(cn <- colnames(x$val)) && waxlyrical("space")) splat("vector components:", commasep(sQuote(cn))) if(is.marked(xloc)) { splat("\tDefined on 2-dimensional space x marks") if(is.multitype(xloc)) exhibitStringList("\tPossible marks: ", levels(marks(xloc))) } if(waxlyrical("gory")) { splat("Approximated by", n, "quadrature points") print(as.owin(xloc)) splat(sum(x$atoms), "atoms") } if(waxlyrical("extras")) { splat("Total mass:") if(d == 1) { splat("discrete =", signif(sum(with(x, "discrete")), 5), " continuous =", signif(sum(with(x, "continuous")), 5), " total =", signif(sum(with(x, "increment")), 5)) } else { if(is.null(cn)) cn <- paste("component", 1:d) for(j in 1:d) { splat(paste0(cn[j], ":\t"), "discrete =", signif(sum(with(x, "discrete")[,j]), 5), " continuous =", signif(sum(with(x, "continuous")[,j]), 5), " total =", signif(sum(with(x, "increment")[,j]), 5)) } } } return(invisible(NULL)) } is.multitype.msr <- function(X, ...) { is.multitype(X$loc, ...) } is.marked.msr <- function(X, ...) { is.marked(X$loc, ...) } split.msr <- function(x, f, drop=FALSE, ...) { xloc <- x$loc ## determine split using rules for split.ppp locsplit <- if(missing(f)) split(xloc, drop=drop) else split(xloc, f, drop=drop) ## extract grouping factor g <- attr(locsplit, "fgroup") ## split contributions to measure atomsplit <- split(x$atoms, g, drop=drop) # hyuk wtsplit <- split(x$wt, g, drop=drop) if(ncol(x) == 1) { ## scalar measure valsplit <- split(x$val, g, drop=drop) discsplit <- split(x$discrete, g, drop=drop) denssplit <- split(x$density, g, drop=drop) } else { ## vector measure valsplit <- lapply(split(as.data.frame(x$val), g, drop=drop), as.matrix) discsplit <- lapply(split(as.data.frame(x$discrete), g, drop=drop), as.matrix) denssplit <- lapply(split(as.data.frame(x$density), g, drop=drop), as.matrix) } ## form the component measures result <- mapply(list, loc=locsplit, val=valsplit, atoms=atomsplit, discrete=discsplit, density=denssplit, wt=wtsplit, SIMPLIFY=FALSE) names(result) <- names(locsplit) result <- lapply(result, "class<-", value="msr") if(drop && any(isnul <- (sapply(locsplit, npoints) == 0))) result[isnul] <- NULL result <- as.solist(result) return(result) } integral.msr <- function(f, domain=NULL, ...) { stopifnot(inherits(f, "msr")) if(!is.null(domain)) { if (is.tess(domain)) return(sapply(tiles(domain), integral.msr, f = f)) f <- f[domain] } y <- with(f, "increment") if(is.matrix(y)) apply(y, 2, sum) else sum(y) } augment.msr <- function(x, ..., sigma) { ## add a pixel image of the smoothed density component stopifnot(inherits(x, "msr")) if(!is.null(attr(x, "smoothdensity"))) return(x) d <- ncol(as.matrix(x$val)) xloc <- x$loc W <- as.owin(xloc) if(missing(sigma)) sigma <- maxnndist(xloc, positive=TRUE) if(is.multitype(xloc)) { ## multitype case - split by type, smooth, sum y <- lapply(split(x), augment.msr, sigma=sigma, ...) z <- lapply(y, attr, which="smoothdensity") if((nc <- ncol(x)) == 1) { ## scalar valued smo <- Reduce("+", z) } else { ## vector valued smo <- vector(mode="list", length=nc) for(j in 1:nc) smo[[j]] <- Reduce("+", lapply(z, "[[", i=j)) smo <- as.solist(smo) } attr(x, "smoothdensity") <- smo return(x) } ## smooth density unless constant xdensity <- as.matrix(x$density) ra <- apply(xdensity, 2, range) varble <- apply(as.matrix(ra), 2, diff) > sqrt(.Machine$double.eps) ## if(d == 1) { smo <- if(!varble) as.im(mean(xdensity), W=W) else do.call(Smooth, resolve.defaults(list(X=xloc %mark% xdensity), list(...), list(sigma=sigma))) } else { smo <- vector(mode="list", length=d) names(smo) <- colnames(x) if(any(varble)) smo[varble] <- do.call(Smooth, resolve.defaults(list(X=xloc %mark% xdensity[,varble, drop=FALSE]), list(...), list(sigma=sigma))) if(any(!varble)) smo[!varble] <- lapply(apply(xdensity[, !varble, drop=FALSE], 2, mean), as.im, W=W) smo <- as.solist(smo) } attr(x, "smoothdensity") <- smo return(x) } plot.msr <- function(x, ..., add=FALSE, how=c("image", "contour", "imagecontour"), main=NULL, do.plot=TRUE, multiplot=TRUE, massthresh=0, equal.markscale=FALSE, equal.ribbon=FALSE) { if(is.null(main)) main <- short.deparse(substitute(x)) how <- match.arg(how) if(!multiplot) { ## compress everything to a single panel x$loc <- unmark(x$loc) if(is.matrix(x$val)) x$val <- rowSums(x$val) if(is.matrix(x$discrete)) x$discrete <- rowSums(x$discrete) if(is.matrix(x$density)) x$density <- rowSums(x$density) if(!is.null(smo <- attr(x, "smoothdensity")) && inherits(smo, "solist")) attr(x, "smoothdensity") <- Reduce("+", smo) } d <- dim(x)[2] k <- if(is.multitype(x)) length(levels(marks(x$loc))) else 1 ## multiple plot panels may be generated if(k == 1 && d == 1) { ## single plot y <- solist(x) } else if(k > 1 && d == 1) { ## multitype y <- split(x) } else if(k == 1 && d > 1) { ## vector-valued y <- unstack(x) } else if(k > 1 && d > 1) { ## both multitype and vector-valued y <- split(x) typenames <- names(y) vecnames <- colnames(x$val) y <- as.solist(Reduce(append, lapply(y, unstack))) names(y) <- as.vector(t(outer(typenames, vecnames, paste, sep="."))) } #' ensure image of density is present y <- solapply(y, augment.msr) #' ready to plot if(length(y) > 1) { ## plot as an array of panels userarg <- list(...) rowcol <- list(nrows=k, ncols=d) if(any(c("nrows", "ncols") %in% names(userarg))) rowcol <- list() #' determine common scales if required scaleinfo <- list() if(equal.markscale) { W <- Window(x) #' extract vectors of atomic masses from each panel marx <- lapply(y, with, "atommass") #' make a separate scale calculation for each panel scales <- sapply(marx, mark.scale.default, w=W, ...) scaleinfo$markscale <- min(scales) scaleinfo$markrange <- range(unlist(marx)) } if(equal.ribbon) { images <- lapply(y, attr, which="smoothdensity") scaleinfo$zlim <- range(sapply(images, range)) } ## go result <- do.call(plot.solist, resolve.defaults(list(y), userarg, rowcol, scaleinfo, list(how=how, main=main, equal.scales=TRUE, halign=TRUE, valign=TRUE, claim.title.space=TRUE))) return(invisible(result)) } ## scalar measure x <- y[[1]] ## get atoms xatomic <- (x$loc %mark% x$discrete)[x$atoms] if(length(massthresh) && all(is.finite(massthresh))) { ## ignore atoms with absolute mass <= massthresh check.1.real(massthresh) xatomic <- xatomic[abs(marks(xatomic)) > massthresh] } xtra.im <- graphicsPars("image") xtra.pp <- setdiff(graphicsPars("ppp"), c("box", "col")) xtra.pp <- union(xtra.pp, c("markrange", "marklevels")) xtra.ow <- graphicsPars("owin") smo <- attr(x, "smoothdensity") ## do.image <- how %in% c("image", "imagecontour") do.contour <- how %in% c("contour", "imagecontour") ## allocate space for plot and legend using do.plot=FALSE mechanism pdata <- do.call.matched(plot.ppp, resolve.defaults(list(x=xatomic, do.plot=FALSE, main=main), list(...), list(show.all=TRUE)), extrargs=xtra.pp) result <- pdata bb <- attr(pdata, "bbox") if(do.image) { idata <- do.call.matched(plot.im, resolve.defaults(list(x=smo, main=main, do.plot=FALSE), list(...)), extrargs=xtra.im) result <- idata bb <- boundingbox(bb, attr(idata, "bbox")) } ## attr(result, "bbox") <- bb ## if(do.plot) { if(!add) { blankmain <- prepareTitle(main)$blank ## initialise plot do.call.matched(plot.owin, resolve.defaults(list(x=bb, type="n", main=blankmain), list(...)), extrargs=xtra.ow) } ## display density if(do.image) do.call.matched(plot.im, resolve.defaults(list(x=smo, add=TRUE), list(...), list(main=main, show.all=TRUE)), extrargs=xtra.im) if(do.contour) do.call.matched(contour.im, resolve.defaults(list(x=smo, add=TRUE), list(...), list(main=main, axes=FALSE, show.all=!do.image)), extrargs=c("zlim", "labels", "labcex", ## DO NOT ALLOW 'col' "drawlabels", "method", "vfont", "lty", "lwd", "claim.title.space")) ## display atoms do.call.matched(plot.ppp, resolve.defaults(list(x=xatomic, add=TRUE, main=""), list(...), list(show.all=TRUE)), extrargs=xtra.pp) } return(invisible(result)) } "[.msr" <- function(x, i, j, ...) { valu <- as.matrix(x$val) disc <- as.matrix(x$discrete) dens <- as.matrix(x$density) wt <- x$wt atoms <- x$atoms # if(!missing(j)) { valu <- valu[, j] disc <- disc[, j] dens <- dens[, j] } loc <- x$loc if(!missing(i)) { # use [.ppp to identify which points are retained locn <- loc %mark% seq_len(npoints(loc)) loci <- locn[i, clip=TRUE] loc <- unmark(loci) id <- marks(loci) # extract valu <- valu[id, ] disc <- disc[id, ] dens <- dens[id, ] wt <- wt[id] atoms <- atoms[id] } out <- list(loc=loc, val=valu, atoms=atoms, discrete=disc, density=dens, wt=wt) class(out) <- "msr" return(out) } dim.msr <- function(x) { dim(as.matrix(x$val)) } dimnames.msr <- function(x) { list(NULL, colnames(x$val)) } smooth.msr <- function(X, ...) { .Deprecated("Smooth.msr", package="spatstat", msg="smooth.msr is deprecated: use the generic Smooth with a capital S") Smooth(X, ...) } Smooth.msr <- function(X, ..., drop=TRUE) { verifyclass(X, "msr") loc <- X$loc val <- X$val result <- density(loc, weights=val, ...) if(!drop && is.im(result)) result <- solist(result) return(result) } as.owin.msr <- function(W, ..., fatal=TRUE) { as.owin(W$loc, ..., fatal=fatal) } domain.msr <- Window.msr <- function(X, ...) { as.owin(X) } shift.msr <- function(X, ...) { X$loc <- Xloc <- shift(X$loc, ...) if(!is.null(smo <- attr(X, "smoothdensity"))) attr(X, "smoothdensity") <- shift(smo, getlastshift(Xloc)) putlastshift(X, getlastshift(Xloc)) } as.layered.msr <- local({ as.layered.msr <- function(X) { nc <- ncol(X) if(nc == 0) return(layered()) if(nc == 1) return(layered(X)) Y <- lapply(seq_len(nc), pickcol, x=X) names(Y) <- colnames(X) return(layered(LayerList=Y)) } pickcol <- function(j,x) x[,j] as.layered.msr }) scalardilate.msr <- function(X, f, ...) { X$loc <- Xloc <- scalardilate(X$loc, f, ...) putlastshift(X, getlastshift(Xloc)) } Ops.msr <- function(e1,e2=NULL){ vn <- c("val", "discrete", "density") if(nargs() == 1L) { #' unary operator if(!is.element(.Generic, c("+", "-"))) stop(paste("Unary operation", sQuote(paste0(.Generic, "A")), "is undefined for a measure A."), call.=FALSE) e1 <- unclass(e1) e1[vn] <- lapply(e1[vn], .Generic) class(e1) <- "msr" return(e1) } else { #' binary operator m1 <- inherits(e1, "msr") m2 <- inherits(e2, "msr") if(m1 && m2) { if(!is.element(.Generic, c("+", "-"))) stop(paste("Operation", sQuote(paste0("A", .Generic, "B")), "is undefined for measures A, B"), call.=FALSE) k1 <- dim(e1)[2] k2 <- dim(e2)[2] if(k1 != k2) stop(paste("Operation", sQuote(paste0("A", .Generic, "B")), "is undefined because A, B have incompatible dimensions:", "A is", ngettext(k1, "scalar", paste0(k1, "-vector")), ", B is", ngettext(k2, "scalar", paste0(k2, "-vector"))), call.=FALSE) if(!identical(e1$loc, e2$loc)) { haha <- harmonise(e1, e2) e1 <- haha[[1L]] e2 <- haha[[2L]] } e1 <- unclass(e1) e2 <- unclass(e2) e1[vn] <- mapply(.Generic, e1[vn], e2[vn], SIMPLIFY=FALSE) class(e1) <- "msr" return(e1) } else if(m1 && is.numeric(e2)) { if(!is.element(.Generic, c("/", "*"))) stop(paste("Operation", sQuote(paste0("A", .Generic, "z")), "is undefined for a measure A and numeric z."), call.=FALSE) e1 <- unclass(e1) e1[vn] <- lapply(e1[vn], .Generic, e2=e2) class(e1) <- "msr" return(e1) } else if(m2 && is.numeric(e1)) { if(.Generic != "*") stop(paste("Operation", sQuote(paste0("z", .Generic, "A")), "is undefined for a measure A and numeric z."), call.=FALSE) e2 <- unclass(e2) e2[vn] <- lapply(e2[vn], .Generic, e1=e1) class(e2) <- "msr" return(e2) } stop(paste("Operation", sQuote(paste0("e1", .Generic, "e2")), "is undefined for this kind of data"), call.=FALSE) } } measurePositive <- function(x) { if(!inherits(x, "msr")) stop("x must be a measure", call.=FALSE) y <- x y$discrete <- pmax(0, x$discrete) y$density <- pmax(0, x$density) y$val <- y$discrete + y$wt * y$density return(y) } measureNegative <- function(x) { if(!inherits(x, "msr")) stop("x must be a measure", call.=FALSE) y <- x y$discrete <- -pmin(0, x$discrete) y$density <- -pmin(0, x$density) y$val <- y$discrete + y$wt * y$density return(y) } measureVariation <- function(x) { if(!inherits(x, "msr")) stop("x must be a measure", call.=FALSE) y <- x y$discrete <- abs(x$discrete) y$density <- abs(x$density) y$val <- y$discrete + y$wt * y$density return(y) } totalVariation <- function(x) integral(measureVariation(x)) harmonise.msr <- local({ harmonise.msr <- function(...) { argz <- list(...) n <- length(argz) if(n == 0) return(argz) ismeasure <- sapply(argz, inherits, what="msr") if(!any(ismeasure)) stop("No measures supplied") if(!all(ismeasure)) stop("All arguments should be measures (objects of class msr)") if(n < 2) return(argz) result <- vector(mode="list", length=n) ## extract entries loclist <- lapply(argz, getElement, name="loc") atomlist <- lapply(argz, getElement, name="atoms") masslist <- lapply(argz, getElement, name="discrete") denslist <- lapply(argz, getElement, name="density") ## check for compatible dimensions of measure values dimen <- unique(sapply(argz, ncol)) if(length(dimen) > 1) stop("Measures have different dimensions:", commasep(sort(dimen))) ## check for marked points ismarked <- sapply(loclist, is.marked) if(any(ismarked) && !all(ismarked)) stop("Some, but not all, quadrature schemes are marked") ismarked <- all(ismarked) ## union of all quadrature points in all measures Uloc <- do.call(superimpose, append(unname(loclist), list(check=FALSE))) Uloc <- unique(Uloc) nU <- npoints(Uloc) ## match each quadrature set to the union ## and find nearest data point to each point in the union if(!ismarked) { matchlist <- lapply(loclist, nncross, Y=Uloc, what="which") nearlist <- lapply(loclist, ssorcnn, xx=Uloc, what="which") } else { stop("Not yet implemented for marked quadrature schemes") } ## nearest neighbour interpolation of density values of each argument ## onto the common quadrature set Udenslist <- mapply(extract, x=denslist, i=nearlist, SIMPLIFY=FALSE) ## initialise other bits noatoms <- logical(nU) zeromass <- if(dimen == 1) numeric(nU) else matrix(0, nU, dimen) Uatomlist <- rep(list(noatoms), n) Umasslist <- rep(list(zeromass), n) ## assign atoms in each argument Uatomlist <- mapply(subsetgets, x=Uatomlist, i=matchlist, value=atomlist, SIMPLIFY=FALSE) Umasslist <- mapply(subsetgets, x=Umasslist, i=matchlist, value=masslist, SIMPLIFY=FALSE) ## union of atoms isatom <- Reduce("|", Uatomlist) ## masses at atoms Umasslist <- lapply(Umasslist, extract, i=isatom) ## make common quadrature scheme UQ <- quadscheme(Uloc[isatom], Uloc[!isatom]) ## reorder density data correspondingly neworder <- c(which(isatom), which(!isatom)) Udenslist <- lapply(Udenslist, extract, i=neworder) ## make new measures result <- mapply(msr, MoreArgs=list(qscheme=UQ), discrete=Umasslist, density=Udenslist, SIMPLIFY=FALSE) names(result) <- names(argz) class(result) <- unique(c("solist", class(result))) return(result) } ssorcnn <- function(xx, yy, what) nncross(xx, yy, what=what) extract <- function(x, i) { if(is.matrix(x)) x[i, , drop=FALSE] else x[i] } subsetgets <- function(x, i, value) { if(is.matrix(x)) { x[i, ] <- value } else { x[i] <- value } return(x) } harmonise.msr }) spatstat/R/setcov.R0000755000176200001440000000634213115271120013745 0ustar liggesusers# # # setcov.R # # $Revision: 1.15 $ $Date: 2017/06/05 10:31:58 $ # # Compute the set covariance function of a window # or the (noncentred) spatial covariance function of an image # setcov <- function(W, V=W, ...) { W <- as.owin(W) # pixel approximation mW <- as.mask(W, ...) Z <- as.im(mW, na.replace=0) if(missing(V)) return(imcov(Z)) # cross-covariance V <- as.owin(V) mV <- as.mask(V, ...) Z2 <- as.im(mV, na.replace=0) imcov(Z, Z2) } imcov <- function(X, Y=X) { if(missing(Y)) Y <- NULL convolve.im(X, Y, reflectX = FALSE, reflectY=TRUE) } convolve.im <- function(X, Y=X, ..., reflectX=FALSE, reflectY=FALSE) { stopifnot(is.im(X)) have.Y <- !missing(Y) && !is.null(Y) crosscov <- have.Y || reflectX || reflectY trap.extra.arguments(..., .Context="In convolve.im") #' Check whether Fastest Fourier Transform in the West is available west <- fftwAvailable() #' if(have.Y) { # cross-covariance stopifnot(is.im(Y)) Xbox <- as.rectangle(X) Ybox <- as.rectangle(Y) # first shift images to common midpoint, to reduce storage Xmid <- centroid.owin(Xbox) Ymid <- centroid.owin(Ybox) svec <- as.numeric(Xmid) - as.numeric(Ymid) Y <- shift(Y, svec) # ensure images are compatible XY <- harmonise.im(X=X, Y=Y) X <- XY$X Y <- XY$Y } else { # Y is missing or NULL Y <- X Xbox <- Ybox <- as.rectangle(X) } M <- X$v M[is.na(M)] <- 0 xstep <- X$xstep ystep <- X$ystep # pad with zeroes nr <- nrow(M) nc <- ncol(M) Mpad <- matrix(0, ncol=2*nc, nrow=2*nr) Mpad[1:nr, 1:nc] <- M lengthMpad <- 4 * nc * nr fM <- fft2D(Mpad, west=west) if(!crosscov) { # compute convolution square G <- fft2D(fM^2, inverse=TRUE, west=west)/lengthMpad } else { # compute set cross-covariance or convolution by FFT N <- Y$v N[is.na(N)] <- 0 Npad <- matrix(0, ncol=2*nc, nrow=2*nr) Npad[1:nr, 1:nc] <- N fN <- fft2D(Npad, west=west) if(reflectY) fN <- Conj(fN) if(reflectX) fM <- Conj(fM) G <- fft2D(fM * fN, inverse=TRUE, west=west)/lengthMpad } # cat(paste("maximum imaginary part=", max(Im(G)), "\n")) G <- Mod(G) * xstep * ystep if(reflectX != reflectY) { # Currently G[i,j] corresponds to a vector shift of # dy = (i-1) mod nr, dx = (j-1) mod nc. # Rearrange this periodic function so that # the origin of translations (0,0) is at matrix position (nr,nc) # NB this introduces an extra row and column G <- G[ ((-nr):nr) %% (2 * nr) + 1, (-nc):nc %% (2*nc) + 1] } # Determine spatial domain of full raster image XB <- as.rectangle(X) YB <- as.rectangle(Y) # undo shift if(have.Y) YB <- shift(YB, -svec) # reflect if(reflectX) XB <- reflect(XB) if(reflectY) YB <- reflect(YB) # Minkowski sum of covering boxes xran <- XB$xrange + YB$xrange yran <- XB$yrange + YB$yrange # Declare spatial domain out <- im(G, xrange = xran, yrange=yran) if(crosscov) { # restrict to actual spatial domain of function if(reflectX) Xbox <- reflect(Xbox) if(reflectY) Ybox <- reflect(Ybox) # Minkowski sum xran <- Xbox$xrange + Ybox$xrange yran <- Xbox$yrange + Ybox$yrange XYbox <- owin(xran, yran) out <- out[XYbox, rescue=TRUE] } return(out) } spatstat/R/multistrauss.R0000755000176200001440000002040613115271120015216 0ustar liggesusers# # # multistrauss.S # # $Revision: 2.23 $ $Date: 2015/03/31 03:57:11 $ # # The multitype Strauss process # # MultiStrauss() create an instance of the multitype Strauss process # [an object of class 'interact'] # # ------------------------------------------------------------------- # MultiStrauss <- local({ # ......... define interaction potential MSpotential <- function(d, tx, tu, par) { # arguments: # d[i,j] distance between points X[i] and U[j] # tx[i] type (mark) of point X[i] # tu[j] type (mark) of point U[j] # # get matrix of interaction radii r[ , ] r <- par$radii # # get possible marks and validate if(!is.factor(tx) || !is.factor(tu)) stop("marks of data and dummy points must be factor variables") lx <- levels(tx) lu <- levels(tu) if(length(lx) != length(lu) || any(lx != lu)) stop("marks of data and dummy points do not have same possible levels") if(!identical(lx, par$types)) stop("data and model do not have the same possible levels of marks") if(!identical(lu, par$types)) stop("dummy points and model do not have the same possible levels of marks") # ensure factor levels are acceptable for column names (etc) lxname <- make.names(lx, unique=TRUE) # list all UNORDERED pairs of types to be checked # (the interaction must be symmetric in type, and scored as such) uptri <- (row(r) <= col(r)) & !is.na(r) mark1 <- (lx[row(r)])[uptri] mark2 <- (lx[col(r)])[uptri] # corresponding names mark1name <- (lxname[row(r)])[uptri] mark2name <- (lxname[col(r)])[uptri] vname <- apply(cbind(mark1name,mark2name), 1, paste, collapse="x") vname <- paste("mark", vname, sep="") npairs <- length(vname) # list all ORDERED pairs of types to be checked # (to save writing the same code twice) different <- mark1 != mark2 mark1o <- c(mark1, mark2[different]) mark2o <- c(mark2, mark1[different]) nordpairs <- length(mark1o) # unordered pair corresponding to each ordered pair ucode <- c(1:npairs, (1:npairs)[different]) # # create logical array for result z <- array(FALSE, dim=c(dim(d), npairs), dimnames=list(character(0), character(0), vname)) # go.... if(length(z) > 0) { # assemble the relevant interaction distance for each pair of points rxu <- r[ tx, tu ] # apply relevant threshold to each pair of points str <- (d <= rxu) # assign str[i,j] -> z[i,j,k] where k is relevant interaction code for(i in 1:nordpairs) { # data points with mark m1 Xsub <- (tx == mark1o[i]) # quadrature points with mark m2 Qsub <- (tu == mark2o[i]) # assign z[Xsub, Qsub, ucode[i]] <- str[Xsub, Qsub] } } return(z) } #### end of 'pot' function #### # ........ auxiliary functions .............. delMS <- function(which, types, radii) { radii[which] <- NA if(all(is.na(radii))) return(Poisson()) return(MultiStrauss(types, radii)) } # Set up basic object except for family and parameters BlankMSobject <- list( name = "Multitype Strauss process", creator = "MultiStrauss", family = "pairwise.family", # evaluated later pot = MSpotential, par = list(types=NULL, radii = NULL), # to be filled in later parnames = c("possible types", "interaction distances"), pardesc = c("vector of possible types", "matrix of hardcore distances"), selfstart = function(X, self) { if(!is.null(self$par$types)) return(self) types <- levels(marks(X)) MultiStrauss(types=types,radii=self$par$radii) }, init = function(self) { types <- self$par$types if(!is.null(types)) { radii <- self$par$radii nt <- length(types) MultiPair.checkmatrix(radii, nt, sQuote("radii")) if(length(types) == 0) stop(paste("The", sQuote("types"),"argument should be", "either NULL or a vector of all possible types")) if(anyNA(types)) stop("NA's not allowed in types") if(is.factor(types)) { types <- levels(types) } else { types <- levels(factor(types, levels=types)) } } }, update = NULL, # default OK print = function(self) { radii <- self$par$radii types <- self$par$types if(waxlyrical('gory')) { splat(nrow(radii), "types of points") if(!is.null(types)) { splat("Possible types: ") print(noquote(types)) } else splat("Possible types:\t not yet determined") } cat("Interaction radii:\n") print(signif(radii, getOption("digits"))) invisible() }, interpret = function(coeffs, self) { # get possible types typ <- self$par$types ntypes <- length(typ) # get matrix of Strauss interaction radii r <- self$par$radii # list all unordered pairs of types uptri <- (row(r) <= col(r)) & (!is.na(r)) index1 <- (row(r))[uptri] index2 <- (col(r))[uptri] npairs <- length(index1) # extract canonical parameters; shape them into a matrix gammas <- matrix(, ntypes, ntypes) dimnames(gammas) <- list(typ, typ) expcoef <- exp(coeffs) gammas[ cbind(index1, index2) ] <- expcoef gammas[ cbind(index2, index1) ] <- expcoef # return(list(param=list(gammas=gammas), inames="interaction parameters gamma_ij", printable=dround(gammas))) }, valid = function(coeffs, self) { # interaction parameters gamma[i,j] gamma <- (self$interpret)(coeffs, self)$param$gammas # interaction radii radii <- self$par$radii # parameters to estimate required <- !is.na(radii) gr <- gamma[required] return(all(is.finite(gr) & gr <= 1)) }, project = function(coeffs, self) { # interaction parameters gamma[i,j] gamma <- (self$interpret)(coeffs, self)$param$gammas # interaction radii and types radii <- self$par$radii types <- self$par$types # problems? required <- !is.na(radii) okgamma <- is.finite(gamma) & (gamma <= 1) naughty <- required & !okgamma # if(!any(naughty)) return(NULL) if(spatstat.options("project.fast")) { # remove ALL naughty terms simultaneously return(delMS(naughty, types, radii)) } else { # present a list of candidates rn <- row(naughty) cn <- col(naughty) uptri <- (rn <= cn) upn <- uptri & naughty rowidx <- as.vector(rn[upn]) colidx <- as.vector(cn[upn]) matindex <- function(v) { matrix(c(v, rev(v)), ncol=2, byrow=TRUE) } mats <- lapply(as.data.frame(rbind(rowidx, colidx)), matindex) inters <- lapply(mats, delMS, types=types, radii=radii) return(inters) } }, irange = function(self, coeffs=NA, epsilon=0, ...) { r <- self$par$radii active <- !is.na(r) if(any(!is.na(coeffs))) { gamma <- (self$interpret)(coeffs, self)$param$gammas gamma[is.na(gamma)] <- 1 active <- active & (abs(log(gamma)) > epsilon) } if(any(active)) return(max(r[active])) else return(0) }, version=NULL # to be added ) class(BlankMSobject) <- "interact" # finally create main function MultiStrauss <- function(radii, types=NULL) { if((missing(radii) || !is.matrix(radii)) && is.matrix(types)) { ## old syntax: (types=NULL, radii) radii <- types types <- NULL } radii[radii == 0] <- NA out <- instantiate.interact(BlankMSobject, list(types=types, radii = radii)) if(!is.null(types)) dimnames(out$par$radii) <- list(types, types) return(out) } MultiStrauss <- intermaker(MultiStrauss, BlankMSobject) MultiStrauss }) spatstat/R/morphology.R0000755000176200001440000002415413115271120014642 0ustar liggesusers# # morphology.R # # dilation, erosion, opening, closing # # generic functions # and methods for owin, psp, ppp # # $Revision: 1.30 $ $Date: 2016/07/30 05:13:53 $ # # ............ generic ............................ erosion <- function(w, r, ...) { UseMethod("erosion") } dilation <- function(w, r, ...) { UseMethod("dilation") } closing <- function(w, r, ...) { UseMethod("closing") } opening <- function(w, r, ...) { UseMethod("opening") } # ............ methods for class 'owin' ............................ erode.owin <- function(...) { .Deprecated("erosion.owin", package="spatstat") erosion.owin(...) } erosion.owin <- function(w, r, shrink.frame=TRUE, ..., strict=FALSE, polygonal=NULL) { verifyclass(w, "owin") validradius(r, "erosion") if(r == 0 && !strict) return(w) xr <- w$xrange yr <- w$yrange if(2 * r >= max(diff(xr), diff(yr))) stop("erosion distance r too large for frame of window") # compute the dimensions of the eroded frame exr <- xr + c(r, -r) eyr <- yr + c(r, -r) ebox <- list(x=exr[c(1,2,2,1)], y=eyr[c(1,1,2,2)]) ismask <- is.mask(w) if(is.empty(w)) return(emptywindow(ebox)) # determine type of computation if(is.null(polygonal)) polygonal <- !ismask else { stopifnot(is.logical(polygonal)) if(polygonal && ismask) { # try to convert w <- as.polygonal(w) if(is.mask(w)) polygonal <- FALSE } } if(is.rectangle(w) && polygonal) { # result is a smaller rectangle if(shrink.frame) { return(owin(exr, eyr)) # type 'rectangle' } else { return(owin(xr, yr, poly=ebox, check=FALSE)) # type 'polygonal' } } if(polygonal) { # compute polygonal region using polyclip package pnew <- polyclip::polyoffset(w$bdry, -r, jointype="round") # ensure correct polarity totarea <- sum(unlist(lapply(pnew, Area.xypolygon))) if(totarea < 0) pnew <- lapply(pnew, reverse.xypolygon) if(shrink.frame) { return(owin(poly=pnew, check=FALSE)) } else { return(owin( xr, yr, poly=pnew, check=FALSE)) } } # otherwise erode the window in pixel image form if(w$type == "mask") wnew <- erodemask(w, r, strict=strict) else { D <- distmap(w, invert=TRUE, ...) wnew <- levelset(D, r, if(strict) ">" else ">=") } if(shrink.frame) { # trim off some rows & columns of pixel raster keepcol <- (wnew$xcol >= exr[1] & wnew$xcol <= exr[2]) keeprow <- (wnew$yrow >= eyr[1] & wnew$yrow <= eyr[2]) wnew$xcol <- wnew$xcol[keepcol] wnew$yrow <- wnew$yrow[keeprow] wnew$dim <- c(sum(keeprow), sum(keepcol)) wnew$m <- wnew$m[keeprow, keepcol] wnew$xrange <- exr wnew$yrange <- eyr } return(wnew) } dilate.owin <- function(...) { .Deprecated("dilation.owin", package="spatstat") dilation.owin(...) } dilation.owin <- function(w, r, ..., polygonal=NULL, tight=TRUE) { verifyclass(w, "owin") validradius(r, "dilation") if(r == 0) return(w) ismask <- is.mask(w) if(is.empty(w)) return(w) # determine type of computation if(is.null(polygonal)) { polygonal <- !ismask } else stopifnot(is.logical(polygonal)) if(polygonal) { # convert to polygonal w <- as.polygonal(w) if(!is.polygonal(w)) polygonal <- FALSE } # bounding frame bb <- if(tight) boundingbox(w) else as.rectangle(w) newbox <- grow.rectangle(bb, r) # compute dilation if(!polygonal) { # compute pixel approximation epsilon <- sqrt(w$xstep^2 + w$ystep^2) r <- max(r, epsilon) w <- rebound.owin(w, newbox) distant <- distmap(w, ...) dil <- levelset(distant, r, "<=") return(dil) } else { # compute polygonal region using polyclip package pnew <- polyclip::polyoffset(w$bdry, r, jointype="round") # ensure correct polarity totarea <- sum(unlist(lapply(pnew, Area.xypolygon))) if(totarea < 0) pnew <- lapply(pnew, reverse.xypolygon) # determine bounding frame, convert to owin if(tight) { out <- owin(poly=pnew, check=FALSE) } else { out <- owin(newbox$xrange, newbox$yrange, poly=pnew, check=FALSE) } return(out) } } closing.owin <- function(w, r, ..., polygonal=NULL) { if(missing(r)) stop("r is required") validradius(r, "closing") wplus <- dilation.owin(w, r, ..., polygonal=polygonal, tight=FALSE) if(is.empty(wplus)) return(wplus) wclose <- erosion.owin(wplus, r, strict=TRUE) b <- as.rectangle(w) wclose <- rebound.owin(wclose[b], b) return(wclose) } opening.owin <- function(w, r, ..., polygonal=NULL) { if(missing(r)) stop("r is required") validradius(r, "opening") wminus <- erosion.owin(w, r, ..., polygonal=polygonal, shrink.frame=FALSE) if(is.empty(wminus)) return(wminus) wopen <- dilation.owin(wminus, r, tight=FALSE) b <- as.rectangle(w) wopen <- rebound.owin(wopen[b], b) return(wopen) } border <- function(w, r, outside=FALSE, ...) { w <- as.owin(w) if(!outside) { e <- erosion(w, r, ...) b <- setminus.owin(w, e) } else { d <- dilation(w, r, ...) b <- setminus.owin(d, w) } return(b) } # ............ methods for class 'psp' ............................ dilation.psp <- function(w, r, ..., polygonal=TRUE, tight=TRUE) { verifyclass(w, "psp") x <- w validradius(r, "dilation") if(r == 0) return(w) if(is.empty(x)) return(emptywindow(as.owin(w))) # bounding frame bb <- if(tight) boundingbox(x) else as.rectangle(x) newbox <- grow.rectangle(bb, r) # compute dilation if(!polygonal) { x <- rebound.psp(x, newbox) distant <- distmap(x, ...) dil <- levelset(distant, r, "<=") return(dil) } else if(spatstat.options("old.morpho.psp")) { # old code for polygonal case ends <- x$ends angles <- angles.psp(x, directed=TRUE) # lengths <- lengths.psp(x) out <- NULL # dilate individual segments halfcircle <- seq(from=0, to=pi, length.out=128)[-c(1,128)] for(i in seq_len(x$n)) { seg <- ends[i,] co <- cos(angles[i]) si <- sin(angles[i]) # draw sausage around i-th segment xx <- c(seg$x0, seg$x1) + r * si yy <- c(seg$y0, seg$y1) - r * co rightcircle <- angles[i] - pi/2 + halfcircle xx <- c(xx, seg$x1 + r * cos(rightcircle)) yy <- c(yy, seg$y1 + r * sin(rightcircle)) xx <- c(xx, c(seg$x1, seg$x0) - r * si) yy <- c(yy, c(seg$y1, seg$y0) + r * co) leftcircle <- angles[i] + pi/2 + halfcircle xx <- c(xx, seg$x0 + r * cos(leftcircle)) yy <- c(yy, seg$y0 + r * sin(leftcircle)) sausage <- owin(newbox$xrange, newbox$yrange, poly=list(x=xx, y=yy), check=FALSE) # add to set out <- union.owin(out, sausage, ...) } return(out) } else { # new code using 'polyclip' package # convert to list of list(x,y) ends <- as.matrix(x$ends) n <- nrow(ends) plines <- vector(mode="list", length=n) for(i in 1:n) plines[[i]] <- list(x=ends[i, c("x0","x1")], y=ends[i, c("y0","y1")]) # call pnew <- polyclip::polylineoffset(plines, r, jointype="round", endtype="openround") # ensure correct polarity totarea <- sum(unlist(lapply(pnew, Area.xypolygon))) if(totarea < 0) pnew <- lapply(pnew, reverse.xypolygon) # convert to owin object out <- if(tight) owin(poly=pnew, check=FALSE) else owin(newbox$xrange, newbox$yrange, poly=pnew, check=FALSE) return(out) } } closing.psp <- function(w, r, ..., polygonal=TRUE) { if(missing(r)) stop("r is required") validradius(r, "closing") wplus <- dilation.psp(w, r, ..., polygonal=polygonal, tight=FALSE) if(is.empty(wplus)) return(emptywindow(as.owin(w))) wclose <- erosion.owin(wplus, r, strict=TRUE) wclose <- rebound.owin(wclose, as.rectangle(w)) return(wclose) } erosion.psp <- function(w, r, ...) { idorempty(w, r, "erosion") } opening.psp <- function(w, r, ...) { idorempty(w, r,"opening") } # ............ methods for class 'ppp' ............................ dilation.ppp <- function(w, r, ..., polygonal=TRUE, tight=TRUE) { verifyclass(w, "ppp") validradius(r, "dilation") x <- w if(r == 0) return(x) if(is.empty(w)) return(emptywindow(as.owin(w))) # bounding frame bb <- if(tight) boundingbox(x) else as.rectangle(x) newbox <- grow.rectangle(bb, r) # compute dilation if(!polygonal) { # compute pixel approximation x <- rebound.ppp(x, newbox) distant <- distmap(x, ...) dil <- levelset(distant, r, "<=") return(dil) } else { # compute polygonal approximation # generate discs coo <- coords(x) nn <- npoints(x) balls <- vector(mode="list", length=nn) ball0 <- disc(r, c(0,0), ...) for(i in seq_len(nn)) balls[[i]] <- shift(ball0, vec=coo[i,]) class(balls) <- c("solist", class(balls)) out <- union.owin(balls) return(out) } } closing.ppp <- function(w, r, ..., polygonal=TRUE) { if(missing(r)) stop("r is required") validradius(r, "closing") if(is.empty(w) || w$n <= 3) return(emptywindow(as.owin(w))) # remove `isolated' points ok <- (nndist(w) <= 2 * r) if(sum(ok) <= 3) return(emptywindow(as.owin(w))) w <- w[ok] # dilate wplus <- dilation.ppp(w, r, ..., polygonal=polygonal, tight=FALSE) wclose <- erosion.owin(wplus, r, strict=TRUE) wclose <- rebound.owin(wclose, as.rectangle(w)) return(wclose) } erosion.ppp <- function(w, r, ...) { idorempty(w, r, "erosion") } opening.ppp <- function(w, r, ...) { idorempty(w, r,"opening") } # ............ utilities ............................ validradius <- local({ validradius <- function(r, caller="morphological operator") { # rname <- short.deparse(substitute(r)) if(!is.numeric(r) || length(r) != 1) groan("radius r must be a single number", caller) if(r < 0) groan("radius r must be nonnegative", caller) return(TRUE) } groan <- function(whinge, caller) { stop(paste("for", paste(caller, ",", sep=""), whinge), call.=FALSE) } validradius }) idorempty <- function(w, r, caller="morphological operator") { validradius(r, caller) if(r == 0) return(w) else return(emptywindow(w)) } spatstat/R/ppmclass.R0000755000176200001440000007153313131557170014302 0ustar liggesusers# # ppmclass.R # # Class 'ppm' representing fitted point process models. # # # $Revision: 2.134 $ $Date: 2017/07/13 02:03:11 $ # # An object of class 'ppm' contains the following: # # $method model-fitting method (currently "mpl") # # $coef vector of fitted regular parameters # as given by coef(glm(....)) # # $trend the trend formula # or NULL # # $interaction the interaction family # (an object of class 'interact') or NULL # # $Q the quadrature scheme used # # $maxlogpl the maximised value of log pseudolikelihood # # $internal list of internal calculation results # # $correction name of edge correction method used # $rbord erosion distance for border correction (or NULL) # # $the.call the originating call to ppm() # # $the.version version of mpl() which yielded the fit # # #------------------------------------------------------------------------ is.ppm <- function(x) { inherits(x, "ppm") } print.ppm <- function(x, ..., what=c("all", "model", "trend", "interaction", "se", "errors")) { verifyclass(x, "ppm") misswhat <- missing(what) opts <- c("model", "trend", "interaction", "se", "errors") what <- match.arg(what, c("all", opts), several.ok=TRUE) if("all" %in% what) what <- opts np <- length(coef(x)) terselevel <- spatstat.options("terse") digits <- getOption('digits') # If SE was explicitly requested, calculate it. # Otherwise, do it only if the model is Poisson (by default) do.SE <- force.no.SE <- force.SE <- FALSE if(np == 0) { force.no.SE <- TRUE } else if(!is.null(x$internal$VB)) { force.no.SE <- TRUE } else if(!misswhat && ("se" %in% what)) { force.SE <- TRUE } else switch(spatstat.options("print.ppm.SE"), always = { force.SE <- TRUE }, never = { force.no.SE <- TRUE }, poisson = { do.SE <- is.poisson(x) && !identical(x$fitter, "gam") && (!is.null(x$varcov) || x$method != "logi") && waxlyrical("extras", terselevel) }) do.SE <- (do.SE || force.SE) && !force.no.SE s <- summary.ppm(x, quick=if(do.SE) FALSE else "no variances") notrend <- s$no.trend # stationary <- s$stationary poisson <- s$poisson markeddata <- s$marked multitype <- s$multitype # markedpoisson <- poisson && markeddata csr <- poisson && notrend && !markeddata special <- csr && all(c("model", "trend") %in% what) if(special) { ## ---------- Trivial/special cases ----------------------- splat("Stationary Poisson process") cat("Intensity:", signif(s$trend$value, digits), fill=TRUE) } else { ## ----------- Print model type ------------------- if("model" %in% what) { splat(s$name) parbreak(terselevel) if(markeddata) mrk <- s$entries$marks if(multitype) { splat(paste("Possible marks:", commasep(sQuote(levels(mrk))))) parbreak(terselevel) } } ## ----- trend -------------------------- if("trend" %in% what) { if(!notrend) { splat("Log", if(poisson) "intensity: " else "trend: ", pasteFormula(s$trend$formula)) parbreak(terselevel) } if(waxlyrical('space', terselevel) || !do.SE) { ## print trend coefficients, unless redundant and space is tight tv <- s$trend$value if(length(tv) == 0) splat("[No trend coefficients]") else { thead <- paste0(s$trend$label, ":") if(is.list(tv)) { splat(thead) for(i in seq_along(tv)) print(tv[[i]]) } else if(is.numeric(tv) && length(tv) == 1) { ## single number: append to end of current line tvn <- names(tv) tveq <- if(is.null(tvn)) "\t" else paste(" ", tvn, "= ") splat(paste0(thead, tveq, signif(tv, digits))) } else { ## some other format splat(thead) print(tv) } } parbreak(terselevel) } } if(waxlyrical("space", terselevel) && !is.null(cfa <- s$covfunargs) && length(cfa) > 0) { cfafitter <- s$cfafitter if(is.null(cfafitter)) { cat("Covariate", "function", "arguments", "(covfunargs)", "provided:", fill=TRUE) } else { cat("Irregular", "parameters", "(covfunargs)", "fitted", "by", paste0(sQuote(cfafitter), ":"), fill=TRUE) } for(i in seq_along(cfa)) { cat(paste(names(cfa)[i], "= ")) cfai <- cfa[[i]] if(is.numeric(cfai) && length(cfai) == 1) { cfai <- signif(cfai, digits) cat(paste(cfai, "\n")) } else print(cfai) } } } # ---- Interaction ---------------------------- if("interaction" %in% what) { if(!poisson) { print(s$interaction, family=FALSE, banner=FALSE, brief=!waxlyrical("extras")) parbreak(terselevel) } } # ----- parameter estimates with SE and 95% CI -------------------- if(waxlyrical("extras", terselevel) && ("se" %in% what) && (np > 0)) { if(!is.null(cose <- s$coefs.SE.CI)) { print(cose, digits=digits) } else if(do.SE) { # standard error calculation failed splat("Standard errors unavailable; variance-covariance matrix is singular") } else if(!force.no.SE) { # standard error was voluntarily omitted if(waxlyrical('space', terselevel)) splat("For standard errors, type coef(summary(x))\n") } } # ---- Warnings issued in mpl.prepare --------------------- if(waxlyrical("errors", terselevel) && "errors" %in% what) { probs <- s$problems if(!is.null(probs) && is.list(probs) && (length(probs) > 0)) lapply(probs, function(x) { if(is.list(x) && !is.null(p <- x$print)) splat(paste("Problem:\n", p, "\n\n")) }) if(s$old) warning(paste("Model fitted by old spatstat version", s$version)) # ---- Algorithm status ---------------------------- fitter <- s$fitter converged <- s$converged if(!is.null(fitter) && fitter %in% c("glm", "gam") && !converged) splat("*** Fitting algorithm for", sQuote(fitter), "did not converge ***") } if(waxlyrical("extras", terselevel) && s$projected) { parbreak() splat("Fit was emended to obtain a valid point process model") } if(identical(s$valid, FALSE) && waxlyrical("errors", terselevel)) { parbreak() splat("*** Model is not valid ***") if(!all(is.finite(s$entries$coef))) { splat("*** Some coefficients are NA or Inf ***") } else { splat("*** Interaction parameters are outside valid range ***") } } else if(is.na(s$valid) && waxlyrical("extras", terselevel)) { parbreak() splat("[Validity of model could not be checked]") } return(invisible(NULL)) } quad.ppm <- function(object, drop=FALSE, clip=FALSE) { if(!is.ppm(object)) { if(is.kppm(object)) object <- object$po else if(is.lppm(object)) object <- object$fit else stop("object is not of class ppm, kppm or lppm") } Q <- object$Q if(is.null(Q)) return(Q) if(drop || clip) { ok <- getglmsubset(object) if(!is.null(ok)) Q <- Q[ok] } if(clip && object$correction == "border") { Wminus <- erosion(as.owin(object), object$rbord) Q <- Q[Wminus] } return(Q) } data.ppm <- function(object) { verifyclass(object, "ppm") object$Q$data } dummy.ppm <- function(object, drop=FALSE) { return(quad.ppm(object, drop=drop)$dummy) } # method for 'coef' coef.ppm <- function(object, ...) { verifyclass(object, "ppm") object$coef } getglmfit <- function(object) { verifyclass(object, "ppm") glmfit <- object$internal$glmfit if(is.null(glmfit)) return(NULL) if(object$method != "mpl") glmfit$coefficients <- object$coef return(glmfit) } getglmdata <- function(object, drop=FALSE) { verifyclass(object, "ppm") gd <- object$internal$glmdata if(!drop) return(gd) return(gd[getglmsubset(object), , drop=FALSE]) } getglmsubset <- function(object) { gd <- object$internal$glmdata if(object$method=="logi") return(gd$.logi.ok) return(gd$.mpl.SUBSET) } getppmdatasubset <- function(object) { # Equivalent to getglmsubset(object)[is.data(quad.ppm(object))] # but also works for models fitted exactly, etc # if(object$method %in% c("mpl", "ho")) { sub <- getglmsubset(object) if(!is.null(sub)) { Z <- is.data(quad.ppm(object)) return(sub[Z]) } } X <- data.ppm(object) sub <- if(object$correction == "border") { (bdist.points(X) >= object$rbord) } else rep(TRUE, npoints(X)) return(sub) } getppmOriginalCovariates <- function(object) { df <- as.data.frame(as.ppp(quad.ppm(object))) cova <- object$covariates if(length(cova) > 0) { df2 <- mpl.get.covariates(object$covariates, union.quad(quad.ppm(object)), "quadrature points", object$covfunargs) df <- cbind(df, df2) } return(df) } # ??? method for 'effects' ??? valid <- function(object, ...) { UseMethod("valid") } valid.ppm <- function(object, warn=TRUE, ...) { verifyclass(object, "ppm") coeffs <- coef(object) # ensure all coefficients are fitted, and finite if(!all(is.finite(coeffs))) return(FALSE) # inspect interaction inte <- object$interaction if(is.poisson(object)) return(TRUE) # Poisson process # extract fitted interaction coefficients Vnames <- object$internal$Vnames IsOffset <- object$internal$IsOffset Icoeffs <- coeffs[Vnames[!IsOffset]] # check interaction checker <- inte$valid if(is.null(checker) || !newstyle.coeff.handling(inte)) { if(warn) warning("Internal error: unable to check validity of model") return(NA) } answer <- checker(Icoeffs, inte) return(answer) } emend <- function(object, ...) { UseMethod("emend") } emend.ppm <- project.ppm <- local({ tracemessage <- function(depth, ...) { if(depth == 0) return(NULL) spacer <- paste(rep.int(" ", depth), collapse="") marker <- ngettext(depth, "trace", paste("trace", depth)) marker <- paren(marker, "[") splat(paste0(spacer, marker, " ", paste(...))) } leaving <- function(depth) { tracemessage(depth, ngettext(depth, "Returning.", "Exiting level.")) } emend.ppm <- function(object, ..., fatal=FALSE, trace=FALSE) { verifyclass(object, "ppm") fast <- spatstat.options("project.fast") # user specifies 'trace' as logical # but 'trace' can also be integer representing trace depth td <- as.integer(trace) trace <- (td > 0) tdnext <- if(trace) td+1 else 0 if(valid.ppm(object)) { tracemessage(td, "Model is valid.") leaving(td) return(object) } # First ensure trend coefficients are all finite coeffs <- coef(object) # Which coefficients are trend coefficients coefnames <- names(coeffs) internames <- object$internal$Vnames trendnames <- coefnames[!(coefnames %in% internames)] # Trend terms in trend formula trendterms <- attr(terms(object), "term.labels") # Mapping from coefficients to terms of GLM coef2term <- attr(model.matrix(object), "assign") istrend <- (coef2term > 0) & (coefnames %in% trendnames) # Identify non-finite trend coefficients bad <- istrend & !is.finite(coeffs) if(!any(bad)) { tracemessage(td, "Trend terms are valid.") } else { nbad <- sum(bad) tracemessage(td, "Non-finite ", ngettext(nbad, "coefficient for term ", "coefficients for terms "), commasep(sQuote(trendterms[coef2term[bad]]))) if(fast) { # remove first illegal term firstbad <- min(which(bad)) badterm <- trendterms[coef2term[firstbad]] # remove this term from model tracemessage(td, "Removing term ", sQuote(badterm)) removebad <- as.formula(paste("~ . - ", badterm), env=object$callframe) newobject <- update(object, removebad) if(trace) { tracemessage(td, "Updated model:") print(newobject) } # recurse newobject <- emend.ppm(newobject, fatal=fatal, trace=tdnext) # return leaving(td) return(newobject) } else { # consider all illegal terms bestobject <- NULL for(i in which(bad)) { badterm <- trendterms[coef2term[i]] # remove this term from model tracemessage(td, "Considering removing term ", sQuote(badterm)) removebad <- as.formula(paste("~ . - ", badterm), env=object$callframe) object.i <- update(object, removebad) if(trace) { tracemessage(td, "Considering updated model:") print(object.i) } # recurse object.i <- emend.ppm(object.i, fatal=fatal, trace=tdnext) # evaluate logPL logPL.i <- logLik(object.i, warn=FALSE) tracemessage(td, "max log pseudolikelihood = ", logPL.i) # optimise if(is.null(bestobject) || (logLik(bestobject, warn=FALSE) < logPL.i)) bestobject <- object.i } if(trace) { tracemessage(td, "Best submodel:") print(bestobject) } # return leaving(td) return(bestobject) } } # Now handle interaction inte <- object$interaction if(is.null(inte)) { tracemessage(td, "No interaction to check.") leaving(td) return(object) } tracemessage(td, "Inspecting interaction terms.") proj <- inte$project if(is.null(proj)) { whinge <- "Internal error: interaction has no projection operator" if(fatal) stop(whinge) warning(whinge) leaving(td) return(object) } # ensure the same edge correction is used! correction <- object$correction rbord <- object$rbord # apply projection coef.orig <- coeffs <- coef(object) Vnames <- object$internal$Vnames Icoeffs <- coeffs[Vnames] change <- proj(Icoeffs, inte) if(is.null(change)) { tracemessage(td, "Interaction does not need updating.") leaving(td) return(object) } tracemessage(td, "Interaction is not valid.") if(is.numeric(change)) { tracemessage(td, "Interaction coefficients updated without re-fitting.") # old style: 'project' returned a vector of updated coefficients Icoeffs <- change # tweak interaction coefficients object$coef[Vnames] <- Icoeffs # recompute fitted interaction object$fitin <- NULL object$fitin <- fitin(object) } else if(is.interact(change)) { # new style: 'project' returns an interaction if(trace) { tracemessage(td, "Interaction changed to:") print(change) } # refit the whole model # (using the same edge correction) # (and the same quadrature scheme) newobject <- update(object, interaction=change, correction=correction, rbord=rbord, forcefit=TRUE, envir=object$callframe) if(trace) { tracemessage(td, "Updated model:") print(newobject) } # recurse newobject <- emend.ppm(newobject, fatal=fatal, trace=tdnext) object <- newobject } else if(is.list(change) && all(unlist(lapply(change, is.interact)))) { # new style: 'project' returns a list of candidate interactions nchange <- length(change) tracemessage(td, "Considering", nchange, ngettext(nchange, "submodel", "submodels")) bestobject <- NULL for(i in seq_len(nchange)) { change.i <- change[[i]] if(trace) { tracemessage(td, "Considering", ordinal(i), "candidate submodel, with interaction:") print(change.i) } # refit the whole model object.i <- update(object, interaction=change.i, correction=correction, rbord=rbord, forcefit=TRUE, envir=object$callframe) if(trace) { tracemessage(td, "Considering", ordinal(i), "candidate updated model:") print(object.i) } # recurse object.i <- emend.ppm(object.i, fatal=fatal, trace=tdnext) # evaluate logPL logPL.i <- logLik(object.i, warn=FALSE) tracemessage(td, "max log pseudolikelihood = ", logPL.i) # optimise if(is.null(bestobject) || (logLik(bestobject, warn=FALSE) < logPL.i)) bestobject <- object.i } # end loop through submodels if(trace) { tracemessage(td, "Best submodel:") print(bestobject) } object <- bestobject } else stop("Internal error: unrecognised format of update") object$projected <- TRUE object$coef.orig <- coef.orig leaving(td) return(object) } emend.ppm }) # more methods deviance.ppm <- function(object, ...) { satlogpl <- object$satlogpl if(is.null(satlogpl)) { object <- update(object, forcefit=TRUE) satlogpl <- object$satlogpl } if(is.null(satlogpl) || !is.finite(satlogpl)) return(NA) ll <- do.call(logLik, resolve.defaults(list(object=object, absolute=FALSE), list(...))) ll <- as.numeric(ll) 2 * (satlogpl - ll) } logLik.ppm <- function(object, ..., new.coef=NULL, warn=TRUE, absolute=FALSE) { if(!is.poisson.ppm(object) && warn) warn.once("ppmLogLik", "log likelihood is not available for non-Poisson model;", "log pseudolikelihood returned") ## degrees of freedom nip <- if(!inherits(object, "ippm")) 0 else length(attr(object$covfunargs, "free")) df <- length(coef(object)) + nip ## compute adjustment constant if(absolute && object$method %in% c("exact", "mpl", "ho")) { X <- data.ppm(object) W <- Window(X) areaW <- if(object$correction == "border" && object$rbord > 0) eroded.areas(W, object$rbord) else area(W) constant <- areaW * markspace.integral(X) } else constant <- 0 ## if(is.null(new.coef)) { ## extract from object ll <- object$maxlogpl + constant attr(ll, "df") <- df class(ll) <- "logLik" return(ll) } ## recompute for new parameter values method <- object$method if(method == "exact") method <- update(method, forcefit=TRUE) Q <- quad.ppm(object, drop=TRUE) Z <- is.data(Q) cif <- fitted(object, type="cif", new.coef=new.coef, drop=TRUE) cifdata <- cif[Z] switch(method, mpl=, exact=, ho = { w <- w.quad(Q) ll <- sum(log(cifdata[cifdata > 0])) - sum(w * cif) }, logi=, VBlogi={ B <- getglmdata(object, drop=TRUE)$.logi.B p <- cif/(B+cif) ll <- sum(log(p/(1-p))[Z]) + sum(log(1-p)) + sum(log(B[Z])) }, stop(paste("Internal error: unrecognised ppm method:", dQuote(method))) ) ll <- ll + constant attr(ll, "df") <- df class(ll) <- "logLik" return(ll) } pseudoR2 <- function(object, ...) { UseMethod("pseudoR2") } pseudoR2.ppm <- function(object, ...) { dres <- deviance(object, ..., warn=FALSE) nullmod <- update(object, . ~ 1, forcefit=TRUE) dnul <- deviance(nullmod, warn=FALSE) return(1 - dres/dnul) } formula.ppm <- function(x, ...) { return(x$trend) } terms.ppm <- function(x, ...) { terms(x$terms, ...) } labels.ppm <- function(object, ...) { # extract fitted trend coefficients co <- coef(object) Vnames <- object$internal$Vnames is.trend <- !(names(co) %in% Vnames) # model terms tt <- terms(object) lab <- attr(tt, "term.labels") if(length(lab) == 0) return(character(0)) # model matrix mm <- model.matrix(object) ass <- attr(mm, "assign") # 'ass' associates coefficients with model terms # except ass == 0 for the Intercept coef.ok <- is.finite(co) relevant <- (ass > 0) & is.trend okterms <- unique(ass[coef.ok & relevant]) return(lab[okterms]) } AIC.ppm <- function(object, ..., k=2, takeuchi=TRUE) { ll <- logLik(object, warn=FALSE) pen <- attr(ll, "df") if(takeuchi && !is.poisson(object)) { vv <- vcov(object, what="internals") logi <- (object$method == "logi") J <- with(vv, if(!logi) Sigma else (Sigma1log+Sigma2log)) H <- with(vv, if(!logi) A1 else Slog) ## Takeuchi penalty = trace of J H^{-1} = trace of H^{-1} J JiH <- try(solve(H, J), silent=TRUE) if(!inherits(JiH, "try-error")) pen <- sum(diag(JiH)) } return(- 2 * as.numeric(ll) + k * pen) } extractAIC.ppm <- function (fit, scale = 0, k = 2, ..., takeuchi=TRUE) { edf <- length(coef(fit)) aic <- AIC(fit, k=k, takeuchi=takeuchi) c(edf, aic) } # # method for model.frame model.frame.ppm <- function(formula, ...) { object <- formula gf <- getglmfit(object) if(is.null(gf)) { warning("Model re-fitted with forcefit=TRUE") object <- update(object, forcefit=TRUE) gf <- getglmfit(object) } # gd <- getglmdata(object) # model.frame(gf, data=gd, ...) if(object$fitter == "gam") modelFrameGam(gf, ...) else model.frame(gf, ...) } #' a hacked version of model.frame.glm that works for gam objects (mgcv) modelFrameGam <- function(formula, ...) { dots <- list(...) nargs <- dots[match(c("data", "na.action", "subset"), names(dots), 0L)] if (length(nargs) || is.null(formula$model)) { fcall <- formula$call # fcall$method <- "model.frame" fcall[[1L]] <- quote(mgcv::gam) fcall[names(nargs)] <- nargs env <- environment(formula$terms) if (is.null(env)) env <- parent.frame() refut <- eval(fcall, env) refut$model } else formula$model } # # method for model.matrix model.matrix.ppm <- function(object, data=model.frame(object, na.action=NULL), ..., Q=NULL, keepNA=TRUE) { if(missing(data)) data <- NULL PPMmodelmatrix(object, data=data, ..., Q=Q, keepNA=keepNA) } model.matrix.ippm <- function(object, data=model.frame(object, na.action=NULL), ..., Q=NULL, keepNA=TRUE, irregular=FALSE) { if(missing(data)) data <- NULL PPMmodelmatrix(object, data=data, ..., Q=Q, keepNA=keepNA, irregular=irregular) } PPMmodelmatrix <- function(object, data=model.frame(object, na.action=NULL), ..., Q=NULL, keepNA=TRUE, irregular=FALSE) { # handles ppm and ippm data.given <- !is.null(data) irregular <- irregular && inherits(object, "ippm") && !is.null(object$iScore) if(!is.null(Q)) { if(data.given) stop("Arguments Q and data are incompatible") if(!inherits(Q, c("ppp", "quad"))) stop("Q should be a point pattern or quadrature scheme") if(is.ppp(Q)) Q <- quad(Q, Q[FALSE]) ## construct Berman-Turner frame needed <- c("trend", "interaction", "covariates", "covfunargs", "correction", "rbord") bt <- do.call(bt.frame, append(list(Q), object[needed])) ## compute model matrix mf <- model.frame(bt$fmla, bt$glmdata, ...) mm <- model.matrix(bt$fmla, mf, ...) if(irregular) { ## add irregular score components U <- union.quad(Q) mi <- sapply(object$iScore, do.call, args=append(list(x=U$x, y=U$y), object$covfunargs), envir=environment(terms(object))) if(nrow(mi) != nrow(mm)) stop("Internal error: incorrect number of rows in iScore") mm <- cbind(mm, mi) } ## remove NA's ? if(!keepNA) mm <- mm[complete.cases(mm), , drop=FALSE] return(mm) } gf <- getglmfit(object) if(is.null(gf)) { warning("Model re-fitted with forcefit=TRUE") object <- update(object, forcefit=TRUE) gf <- getglmfit(object) if(is.null(gf)) stop("internal error: unable to extract a glm fit") } if(data.given) { # new data. Must contain the Berman-Turner variables as well. bt <- list(.mpl.Y=1, .mpl.W=1, .mpl.SUBSET=TRUE) if(any(forgot <- !(names(bt) %in% names(data)))) data <- do.call(cbind, append(list(data), bt[forgot])) mm <- model.matrix(gf, data=data, ...) if(irregular) { ## add irregular score components mi <- sapply(object$iScore, do.call, args=append(list(x=data$x, y=data$y), object$covfunargs), envir=environment(terms(object))) if(nrow(mi) != nrow(mm)) stop("Internal error: incorrect number of rows in iScore") mm <- cbind(mm, mi) } if(inherits(gf, "gam")) attr(mm, "assign") <- gf$assign return(mm) } if(!keepNA && !irregular) { # extract model matrix of glm fit object # restricting to its 'subset' mm <- model.matrix(gf, ...) if(inherits(gf, "gam")) attr(mm, "assign") <- gf$assign return(mm) } # extract model matrix for all cases mm <- model.matrix(gf, ..., subset=NULL, na.action=NULL) cn <- colnames(mm) gd <- getglmdata(object, drop=FALSE) if(nrow(mm) != nrow(gd)) { # can occur if covariates include NA's or interaction is -Inf insubset <- getglmsubset(object) isna <- is.na(insubset) | !insubset if(sum(isna) + nrow(mm) == nrow(gd)) { # insert rows of NA's mmplus <- matrix( , nrow(gd), ncol(mm)) mmplus[isna, ] <- NA mmplus[!isna, ] <- mm mm <- mmplus } else stop("internal error: model matrix does not match glm data frame") } if(irregular) { ## add irregular score components U <- union.quad(quad.ppm(object, drop=FALSE)) mi <- sapply(object$iScore, do.call, args=append(list(x=U$x, y=U$y), object$covfunargs), envir=environment(terms(object))) if(nrow(mi) != nrow(mm)) stop("Internal error: incorrect number of rows in iScore") mm <- cbind(mm, mi) cn <- c(cn, colnames(mi)) } if(!keepNA) mm <- mm[complete.cases(mm), , drop=FALSE] if(inherits(gf, "gam")) attr(mm, "assign") <- gf$assign colnames(mm) <- cn return(mm) } model.images <- function(object, ...) { UseMethod("model.images") } model.images.ppm <- function(object, W=as.owin(object), ...) { X <- data.ppm(object) # irregular <- resolve.1.default(list(irregular=FALSE), list(...)) ## make a quadscheme with a dummy point at every pixel Q <- pixelquad(X, W) ## compute model matrix mm <- model.matrix(object, Q=Q, ...) ## retain only the entries for dummy points (pixels) mm <- mm[!is.data(Q), , drop=FALSE] mm <- as.data.frame(mm) ## create template image Z <- as.im(attr(Q, "M")) ok <- !is.na(Z$v) ## make images imagenames <- colnames(mm) if(!is.multitype(object)) { result <- lapply(as.list(mm), replace, list=ok, x=Z) result <- as.solist(result) names(result) <- imagenames } else { marx <- marks(Q$dummy) mmsplit <- split(mm, marx) result <- vector(mode="list", length=length(mmsplit)) for(i in seq_along(mmsplit)) result[[i]] <- as.solist(lapply(as.list(mmsplit[[i]]), replace, list=ok, x=Z)) names(result) <- names(mmsplit) result <- do.call(hyperframe, result) row.names(result) <- imagenames } return(result) } unitname.ppm <- function(x) { return(unitname(x$Q)) } "unitname<-.ppm" <- function(x, value) { unitname(x$Q) <- value return(x) } nobs.ppm <- function(object, ...) { npoints(data.ppm(object)) } as.interact.ppm <- function(object) { verifyclass(object, "ppm") inte <- object$interaction if(is.null(inte)) inte <- Poisson() return(inte) } as.ppm <- function(object) { UseMethod("as.ppm") } as.ppm.ppm <- function(object) { object } ## method for as.owin as.owin.ppm <- function(W, ..., from=c("points", "covariates"), fatal=TRUE) { if(!verifyclass(W, "ppm", fatal=fatal)) return(NULL) from <- match.arg(from) datawin <- as.owin(data.ppm(W)) if(from == "points") return(datawin) covs <- W$covariates isim <- unlist(lapply(covs, is.im)) if(!any(isim)) return(datawin) cwins <- lapply(covs[isim], as.owin) covwin <- do.call(intersect.owin, unname(cwins)) result <- intersect.owin(covwin, datawin) return(result) } domain.ppm <- Window.ppm <- function(X, ..., from=c("points", "covariates")) { from <- match.arg(from) as.owin(X, ..., from=from) } ## change the coefficients in a ppm or other model tweak.coefs <- function(model, new.coef) { if(is.null(new.coef)) return(model) co <- coef(model) check.nvector(new.coef, length(co), things="coefficients") model$coef.orig <- co model$coef <- new.coef return(model) } spatstat/R/randomtess.R0000755000176200001440000000270113115271120014614 0ustar liggesusers# # randomtess.R # # Random tessellations # # $Revision: 1.7 $ $Date: 2015/10/21 09:06:57 $ # # Poisson line tessellation rpoislinetess <- function(lambda, win=owin()) { win <- as.owin(win) if(win$type == "mask") stop("Not implemented for masks") # determine circumcircle xr <- win$xrange yr <- win$yrange xmid <- mean(xr) ymid <- mean(yr) width <- diff(xr) height <- diff(yr) rmax <- sqrt(width^2 + height^2)/2 boundbox <- owin(xmid + c(-1,1) * rmax, ymid + c(-1,1) * rmax) # generate poisson lines through circumcircle n <- rpois(1, lambda * 2 * pi * rmax) if(n == 0) return(tess(tiles=list(win))) theta <- runif(n, max= 2 * pi) p <- runif(n, max=rmax) Y <- infline(p=p, theta=theta) # form the induced tessellation in bounding box Z <- chop.tess(boundbox, Y) # clip to window Z <- intersect.tess(Z, win) attr(Z, "lines") <- Y return(Z) } rMosaicSet <- function(X, p=0.5) { stopifnot(is.tess(X)) Y <- tiles(X) Y <- Y[runif(length(Y)) < p] if(length(Y) == 0) return(NULL) Z <- NULL for(i in seq_along(Y)) Z <- union.owin(Z, Y[[i]]) return(Z) } rMosaicField <- function(X, rgen=function(n) { sample(0:1, n, replace=TRUE)}, ..., rgenargs=NULL ) { stopifnot(is.tess(X)) Y <- as.im(X, ...) ntiles <- length(levels(Y)) values <- do.call(rgen, append(list(ntiles),rgenargs)) Z <- eval.im(values[as.integer(Y)]) return(Z) } spatstat/R/rmhResolveTypes.R0000755000176200001440000000613413115271120015614 0ustar liggesusers# # # rmhResolveTypes.R # # $Revision: 1.9 $ $Date: 2009/10/31 01:52:54 $ # # rmhResolveTypes <- function(model, start, control) { # Decide whether a multitype point process is to be simulated. # If so, determine the vector of types. verifyclass(model, "rmhmodel") verifyclass(start, "rmhstart") verifyclass(control, "rmhcontrol") # Different ways of specifying types directly types.model <- model$types types.start <- if(start$given=="x" && is.marked(x.start <- start$x.start)) levels(marks(x.start, dfok=FALSE)) else NULL # Check for inconsistencies if(!is.null(types.model) && !is.null(types.start)) if(!identical(all.equal(types.model, types.start), TRUE)) stop("marks in start$x.start do not match model$types") types.given <- if(!is.null(types.model)) types.model else types.start types.given.source <- if(!is.null(types.model)) "model$types" else "marks of x.start" # Different ways of implying the number of types ntypes.beta <- length(model$par[["beta"]]) ntypes.ptypes <- length(control$ptypes) ntypes.nstart <- if(start$given == "n") length(start$n.start) else 0 mot <- model$trend ntypes.trend <- if(is.null(mot)) 0 else if(is.im(mot)) 1 else if(is.list(mot) && all(unlist(lapply(mot, is.im)))) length(mot) else 0 # Check for inconsistencies in implied number of types (only for numbers > 1) nty <- c(ntypes.beta, ntypes.ptypes, ntypes.nstart, ntypes.trend) nam <- c("model$par$beta", "control$ptypes", "start$n.start", "model$trend") implied <- (nty > 1) if(!any(implied)) ntypes.implied <- 1 else { if(length(unique(nty[implied])) > 1) stop(paste("Mismatch in numbers of types implied by", commasep(sQuote(nam[implied])))) ntypes.implied <- unique(nty[implied]) ntypes.implied.source <- (nam[implied])[1] } # Check consistency between types.given and ntypes.implied if(!is.null(types.given) && ntypes.implied > 1) if(length(types.given) != ntypes.implied) stop(paste("Mismatch between number of types in", types.given.source, "and length of", ntypes.implied.source)) # Finally determine the types if(model$multitype.interact) { # There MUST be a types vector types <- if(!is.null(types.given)) types.given else if(ntypes.implied > 1) 1:ntypes.implied else stop("Cannot determine types for multitype process") } else { types <- if(!is.null(types.given)) types.given else if(ntypes.implied > 1) 1:ntypes.implied else 1 } ntypes <- length(types) # If we are conditioning on the number of points of each type, # make sure the starting state is appropriate if(control$fixing == "n.each.type") { if(start$given == "n" && ntypes.nstart != ntypes) stop("Length of start$n.start not equal to number of types.\n") else if(start$given == "x" && length(types.given) != ntypes) stop("Marks of start$x.start do not match number of types.\n") } return(types) } spatstat/R/smoothfun.R0000644000176200001440000000323513115225157014470 0ustar liggesusers## ## smoothfun.R ## ## Exact 'funxy' counterpart of Smooth.ppp ## ## $Revision: 1.2 $ $Date: 2016/02/11 10:17:12 $ Smoothfun <- function(X, ...) { UseMethod("Smoothfun") } Smoothfun.ppp <- function(X, sigma=NULL, ..., weights=NULL, edge=TRUE, diggle=FALSE) { verifyclass(X, "ppp") if(!is.marked(X, dfok=TRUE)) stop("X should be a marked point pattern") stuff <- list(X=X, weights=weights, edge=edge, diggle=diggle) X <- coerce.marks.numeric(X) ## determine smoothing parameters ker <- resolve.2D.kernel(sigma=sigma, ..., x=X, bwfun=bw.smoothppp, allow.zero=TRUE) stuff <- append(stuff, ker[c("sigma", "varcov")]) ## g <- function(x, y=NULL) { Y <- xy.coords(x, y)[c("x", "y")] with(stuff, smoothcrossEngine(Xdata=X, Xquery=as.ppp(Y, X$window), values=marks(X), sigma=sigma, varcov=varcov, weights=weights, edge=edge, diggle=diggle)) } g <- funxy(g, as.rectangle(as.owin(X))) class(g) <- c("Smoothfun", class(g)) return(g) } print.Smoothfun <- function(x, ...) { cat("function(x,y)", "which returns", "values", "interpolated from", fill=TRUE) X <- get("X", envir=environment(x)) print(X, ...) return(invisible(NULL)) } ## Method for as.im ## (enables plot.funxy, persp.funxy, contour.funxy to work for this class) as.im.Smoothfun <- function(X, W=NULL, ...) { stuff <- get("stuff", envir=environment(X)) if(!is.null(W)) stuff$X <- stuff$X[W] do.call(Smooth, resolve.defaults(list(...), stuff)) } spatstat/R/quadratcount.R0000755000176200001440000001577613115271120015167 0ustar liggesusers# # quadratcount.R # # $Revision: 1.57 $ $Date: 2016/08/15 03:05:15 $ # quadratcount <- function(X, ...) { UseMethod("quadratcount") } quadratcount.splitppp <- function(X, ...) { solapply(X, quadratcount, ...) } quadratcount.ppp <- function(X, nx=5, ny=nx, ..., xbreaks=NULL, ybreaks=NULL, tess=NULL) { verifyclass(X, "ppp") W <- X$window if(is.null(tess)) { # rectangular boundaries if(!is.numeric(nx)) stop("nx should be numeric") # start with rectangular tessellation tess <- quadrats(as.rectangle(W), nx=nx, ny=ny, xbreaks=xbreaks, ybreaks=ybreaks) # fast code for counting points in rectangular grid Xcount <- rectquadrat.countEngine(X$x, X$y, tess$xgrid, tess$ygrid) # if(W$type != "rectangle") { # intersections of rectangles with window including empty intersections tess <- quadrats(X, nx=nx, ny=ny, xbreaks=xbreaks, ybreaks=ybreaks, keepempty=TRUE) # now delete the empty quadrats and the corresponding counts nonempty <- !tiles.empty(tess) # WAS: nonempty <- !unlist(lapply(tiles(tess), is.empty)) if(!any(nonempty)) stop("All tiles are empty") if(!all(nonempty)) { # ntiles <- sum(nonempty) tess <- tess[nonempty] Xcount <- t(Xcount)[nonempty] # matrices and tables are in row-major order, # tiles in a rectangular tessellation are in column-major order Xcount <- array(Xcount, dimnames=list(tile=tilenames(tess))) class(Xcount) <- "table" } } } else { # user-supplied tessellation if(!inherits(tess, "tess")) { tess <- try(as.tess(tess), silent=TRUE) if(inherits(tess, "try-error")) stop("The argument tess should be a tessellation", call.=FALSE) } if(tess$type == "rect") { # fast code for counting points in rectangular grid Xcount <- rectquadrat.countEngine(X$x, X$y, tess$xgrid, tess$ygrid) } else { # quadrats are another type of tessellation Y <- cut(X, tess) if(anyNA(marks(Y))) warning("Tessellation does not contain all the points of X") Xcount <- table(tile=marks(Y)) } } attr(Xcount, "tess") <- tess class(Xcount) <- c("quadratcount", class(Xcount)) return(Xcount) } plot.quadratcount <- function(x, ..., add=FALSE, entries=as.vector(t(as.table(x))), dx=0, dy=0, show.tiles=TRUE, textargs = list()) { xname <- short.deparse(substitute(x)) tess <- attr(x, "tess") # add=FALSE, show.tiles=TRUE => plot tiles + numbers # add=FALSE, show.tiles=FALSE => plot window (add=FALSE) + numbers # add=TRUE, show.tiles=TRUE => plot tiles (add=TRUE) + numbers # add=TRUE, show.tiles=FALSE => plot numbers if(show.tiles || !add) { context <- if(show.tiles) tess else as.owin(tess) do.call(plot, resolve.defaults(list(context, add=add), list(...), list(main=xname), .StripNull=TRUE)) } if(!is.null(entries)) { labels <- paste(as.vector(entries)) til <- tiles(tess) incircles <- lapply(til, incircle) x0 <- sapply(incircles, getElement, name="x") y0 <- sapply(incircles, getElement, name="y") ra <- sapply(incircles, getElement, name="r") do.call.matched(text.default, resolve.defaults(list(x=x0 + dx * ra, y = y0 + dy * ra), list(labels=labels), textargs, list(...)), funargs=graphicsPars("text")) } return(invisible(NULL)) } rectquadrat.breaks <- function(xr, yr, nx=5, ny=nx, xbreaks=NULL, ybreaks=NULL) { if(is.null(xbreaks)) xbreaks <- seq(from=xr[1], to=xr[2], length.out=nx+1) else if(min(xbreaks) > xr[1] || max(xbreaks) < xr[2]) stop("xbreaks do not span the range of x coordinates in the window") if(is.null(ybreaks)) ybreaks <- seq(from=yr[1], to=yr[2], length.out=ny+1) else if(min(ybreaks) > yr[1] || max(ybreaks) < yr[2]) stop("ybreaks do not span the range of y coordinates in the window") return(list(xbreaks=xbreaks, ybreaks=ybreaks)) } rectquadrat.countEngine <- function(x, y, xbreaks, ybreaks, weights) { if(length(x) > 0) { # check validity of breaks if(!all(inside.range(range(x), range(xbreaks)))) stop("xbreaks do not span the actual range of x coordinates in data") if(!all(inside.range(range(y), range(ybreaks)))) stop("ybreaks do not span the actual range of y coordinates in data") } # WAS: # xg <- cut(x, breaks=xbreaks, include.lowest=TRUE) # yg <- cut(y, breaks=ybreaks, include.lowest=TRUE) xg <- fastFindInterval(x, xbreaks, labels=TRUE) yg <- fastFindInterval(y, ybreaks, labels=TRUE) if(missing(weights)) { sumz <- table(list(y=yg, x=xg)) } else { # was: # sumz <- tapply(weights, list(y=yg, x=xg), sum) # if(any(nbg <- is.na(sumz))) # sumz[nbg] <- 0 sumz <- tapplysum(weights, list(y=yg, x=xg), do.names=TRUE) } # reverse order of y sumz <- sumz[rev(seq_len(nrow(sumz))), ] sumz <- as.table(sumz) # attr(sumz, "xbreaks") <- xbreaks attr(sumz, "ybreaks") <- ybreaks return(sumz) } quadrats <- function(X, nx=5, ny=nx, xbreaks = NULL, ybreaks = NULL, keepempty=FALSE) { W <- as.owin(X) xr <- W$xrange yr <- W$yrange b <- rectquadrat.breaks(xr, yr, nx, ny, xbreaks, ybreaks) # rectangular tiles Z <- tess(xgrid=b$xbreaks, ygrid=b$ybreaks, unitname=unitname(W)) if(W$type != "rectangle") { # intersect rectangular tiles with window W if(!keepempty) { Z <- intersect.tess(Z, W) } else { til <- tiles(Z) for(i in seq_along(til)) til[[i]] <- intersect.owin(til[[i]], W) Z <- tess(tiles=til, window=W, keepempty=TRUE) } } return(Z) } as.tess.quadratcount <- function(X) { return(attr(X, "tess")) } as.owin.quadratcount <- function(W, ..., fatal=TRUE) { return(as.owin(as.tess(W), ..., fatal=fatal)) } domain.quadratcount <- Window.quadratcount <- function(X, ...) { as.owin(X) } intensity.quadratcount <- function(X, ..., image=FALSE) { Y <- as.tess(X) a <- tile.areas(Y) ## in the rectangular case, tiles are indexed in column-major order if(Y$type == "rect" && length(dim(X)) > 1) a <- matrix(a, byrow=TRUE, nrow(X), ncol(X)) lambda <- X/a if(!image) { trap.extra.arguments(...) class(lambda) <- "table" attr(lambda, "tess") <- NULL return(lambda) } ## again to handle rectangular case lambda <- as.vector(t(lambda)) tileid <- as.im(Y, ...) result <- eval.im(lambda[tileid]) return(result) } ## The shift method is undocumented. ## It is only needed in plot.listof / plot.solist / plot.layered shift.quadratcount <- function(X, ...) { attr(X, "tess") <- te <- shift(attr(X, "tess"), ...) attr(X, "lastshift") <- getlastshift(te) return(X) } spatstat/R/strausshard.R0000755000176200001440000001105313115271120015000 0ustar liggesusers# # # strausshard.S # # $Revision: 2.22 $ $Date: 2016/02/16 01:39:12 $ # # The Strauss/hard core process # # StraussHard() create an instance of the Strauss-hardcore process # [an object of class 'interact'] # # # ------------------------------------------------------------------- # StraussHard <- local({ BlankStraussHard <- list( name = "Strauss - hard core process", creator = "StraussHard", family = "pairwise.family", # evaluated later pot = function(d, par) { v <- 1 * (d <= par$r) v[ d <= par$hc ] <- (-Inf) v }, par = list(r = NULL, hc = NULL), # filled in later parnames = c("interaction distance", "hard core distance"), selfstart = function(X, self) { # self starter for StraussHard nX <- npoints(X) if(nX < 2) { # not enough points to make any decisions return(self) } r <- self$par$r md <- minnndist(X) if(md == 0) { warning(paste("Pattern contains duplicated points:", "hard core must be zero")) return(StraussHard(r=r, hc=0)) } if(!is.na(hc <- self$par$hc)) { # value fixed by user or previous invocation # check it if(md < hc) warning(paste("Hard core distance is too large;", "some data points will have zero probability")) return(self) } # take hc = minimum interpoint distance * n/(n+1) hcX <- md * nX/(nX+1) StraussHard(r=r, hc = hcX) }, init = function(self) { r <- self$par$r hc <- self$par$hc if(length(hc) != 1) stop("hard core distance must be a single value") if(!is.na(hc)) { if(!is.numeric(hc) || hc <= 0) stop("hard core distance hc must be a positive number, or NA") if(!is.numeric(r) || length(r) != 1 || r <= hc) stop("interaction distance r must be a number greater than hc") } }, update = NULL, # default OK print = NULL, # default OK interpret = function(coeffs, self) { loggamma <- as.numeric(coeffs[1]) gamma <- exp(loggamma) return(list(param=list(gamma=gamma), inames="interaction parameter gamma", printable=dround(gamma))) }, valid = function(coeffs, self) { loggamma <- as.numeric(coeffs[1]) return(is.finite(loggamma)) }, project = function(coeffs, self) { loggamma <- as.numeric(coeffs[1]) if(is.finite(loggamma)) return(NULL) hc <- self$par$hc if(hc > 0) return(Hardcore(hc)) else return(Poisson()) }, irange = function(self, coeffs=NA, epsilon=0, ...) { r <- self$par$r hc <- self$par$hc if(anyNA(coeffs)) return(r) loggamma <- coeffs[1] if(abs(loggamma) <= epsilon) return(hc) else return(r) }, version=NULL, # evaluated later # fast evaluation is available for the border correction only can.do.fast=function(X,correction,par) { return(all(correction %in% c("border", "none"))) }, fasteval=function(X,U,EqualPairs,pairpot,potpars,correction, ...) { # fast evaluator for StraussHard interaction if(!all(correction %in% c("border", "none"))) return(NULL) if(spatstat.options("fasteval") == "test") message("Using fast eval for StraussHard") r <- potpars$r hc <- potpars$hc hclose <- strausscounts(U, X, hc, EqualPairs) rclose <- strausscounts(U, X, r, EqualPairs) answer <- ifelseXB(hclose == 0, rclose, -Inf) return(matrix(answer, ncol=1)) }, Mayer=function(coeffs, self) { # second Mayer cluster integral gamma <- exp(as.numeric(coeffs[1])) r <- self$par$r hc <- self$par$hc return(pi * (hc^2 + (1-gamma) * (r^2 - hc^2))) } ) class(BlankStraussHard) <- "interact" StraussHard <- function(r, hc=NA) { instantiate.interact(BlankStraussHard, list(r=r, hc=hc)) } StraussHard <- intermaker(StraussHard, BlankStraussHard) StraussHard }) spatstat/R/summary.mppm.R0000755000176200001440000001667413115271120015120 0ustar liggesusers# # summary.mppm.R # # $Revision: 1.15 $ $Date: 2016/04/25 02:34:40 $ # summary.mppm <- function(object, ..., brief=FALSE) { # y will be the summary y <- object[c("Call", "Info", "Inter", "trend", "iformula", #%^!ifdef RANDOMEFFECTS "random", #%^!endif "npat", "maxlogpl")] y$brief <- brief Info <- object$Info Inter <- object$Inter FIT <- object$Fit$FIT moadf <- object$Fit$moadf y$Fit <- object$Fit[c("fitter", "use.gam", "fmla", "Vnamelist")] y$Fit$FIT <- summary(FIT) y$Fit$moadf <- list(nrow=nrow(moadf), colnames=colnames(moadf)) ninteract <- Inter$ninteract interaction <- Inter$interaction iused <- Inter$iused itags <- Inter$itags processnames <- Inter$processes constant <- Inter$constant trivial <- Inter$trivial npat <- y$npat iformula <- y$iformula #%^!ifdef RANDOMEFFECTS random <- y$random #%^!endif Vnamelist <- y$Fit$Vnamelist allVnames <- unlist(Vnamelist) poistags <- itags[trivial] # rownames <- y$Info$rownames switch(y$Fit$fitter, #%^!ifdef RANDOMEFFECTS glmmPQL={ y$coef <- co <- fixed.effects(FIT) systematic <- !(names(co) %in% c(allVnames, poistags)) y$coef.syst <- co[systematic] y$coef.rand <- random.effects(FIT) }, #%^!endif gam=, glm={ y$coef <- co <- coef(FIT) systematic <- !(names(co) %in% c(allVnames, poistags)) y$coef.syst <- co[systematic] }) # model depends on covariates y$depends.covar <- Info$has.covar && (length(Info$used.cov.names) > 0) #%^!ifdef RANDOMEFFECTS # random effects y$ranef <- if(Info$has.random) summary(FIT$modelStruct) else NULL #%^!endif ### Interactions # model is Poisson y$poisson <- all(trivial[iused]) # Determine how complicated the interactions are: #%^!ifdef RANDOMEFFECTS # (0) are there random effects involving the interactions randominteractions <- !is.null(random) && any(variablesinformula(random) %in% itags) #%^!endif # (1) is the interaction formula of the form ~ tag + tag + ... + tag isimple <- identical(sort(variablesinformula(iformula)), sort(termsinformula(iformula))) # (2) is it of the form ~tag trivialformula <- (isimple && ninteract == 1) # (3) is it of the form ~tag where the interaction is the same in each row #%^!ifdef RANDOMEFFECTS fixedinteraction <- (trivialformula && constant && !randominteractions) #%^!else # fixedinteraction <- trivialformula && constant #%^!endif ### Determine printing of interactions, accordingly ### iprint <- list() #%^!ifdef RANDOMEFFECTS if(randominteractions) { toohard <- TRUE printeachrow <- FALSE } else #%^!endif if(fixedinteraction) { # exactly the same interaction for all patterns interaction <- interaction[1,1,drop=TRUE] fi.all <- fii(interaction, co, Vnamelist[[1]]) iprint <- list("Interaction for all patterns"=fi.all) printeachrow <- FALSE toohard <- FALSE } else if(trivialformula) { # same type of process for all patterns pname <- unlist(processnames)[iused] iprint <- list("Interaction for each pattern" = pname) printeachrow <- TRUE toohard <- FALSE } else if(isimple && all(constant)) { # several interactions involved, each of which is the same for all patterns iprint <- list("Interaction formula"=iformula, "Interactions defined for each pattern"=NULL) for(j in (1:ninteract)[iused]) { name.j <- paste("Interaction", sQuote(itags[j])) int.j <- Inter$interaction[1,j,drop=TRUE] Vnames.j <- Vnamelist[[j]] fii.j <- fii(int.j, co, Vnames.j) extra.j <- list(fii.j) names(extra.j) <- name.j iprint <- append(iprint, extra.j) } printeachrow <- FALSE toohard <- FALSE } else { # general case # determine which interaction(s) are active on each row active <- active.interactions(object) if(ninteract > 1 || !all(active)) iprint <- list("Active interactions"=active) printeachrow <- TRUE toohard <- any(rowSums(active) > 1) } y$ikind <- list( #%^!ifdef RANDOMEFFECTS randominteractions=randominteractions, #%^!endif isimple =isimple, trivialformula =trivialformula, fixedinteraction =fixedinteraction, toohard =toohard, printeachrow =printeachrow) if(toohard) iprint <- append(iprint, list("(Sorry, cannot interpret fitted interactions)")) else if(printeachrow) { subs <- subfits(object, what="interactions") names(subs) <- paste("Interaction", 1:npat) iprint <- append(iprint, subs) } y$iprint <- iprint class(y) <- c("summary.mppm", class(list)) return(y) } print.summary.mppm <- function(x, ..., brief=x$brief) { # NB: x is an object of class "summary.mppm" npat <- x$npat # Inter <- x$Inter # ninteract <- Inter$ninteract # interaction <- Inter$interaction # iused <- Inter$iused # constant <- Inter$constant # iformula <- x$iformula # processnames <- Inter$processes # itags <- Inter$itags # trivial <- Inter$trivial #%^!ifdef RANDOMEFFECTS # random <- x$random #%^!endif FIT <- x$Fit$FIT # Vnamelist <- x$Fit$Vnamelist # allVnames <- unlist(Vnamelist) # poistags <- itags[trivial] terselevel <- spatstat.options("terse") # rownames <- x$Info$rownames splat("Point process model fitted to", npat, "point patterns") if(waxlyrical('gory', terselevel)) splat("Call:", x$Call$callstring) splat("Log trend formula:", pasteFormula(x$trend)) switch(x$Fit$fitter, #%^!ifdef RANDOMEFFECTS glmmPQL={ cat("Fixed effects:\n") print(x$coef.syst) cat("Random effects:\n") print(x$coef.rand) co <- fixed.effects(FIT) }, #%^!endif gam=, glm={ cat("Fitted trend coefficients:\n") print(x$coef.syst) co <- coef(FIT) }) if(!brief && waxlyrical('extras', terselevel)) { cat("All fitted coefficients:\n") print(co) } parbreak(terselevel) #%^!ifdef RANDOMEFFECTS if(!is.null(x$ranef)) { splat("Random effects summary:") print(x$ranef) parbreak(terselevel) } #%^!endif ### Print interaction information ### if(waxlyrical('extras', terselevel)) { iprint <- x$iprint nama <- names(iprint) %orifnull% rep("", length(iprint)) for(i in seq_along(iprint)) { nami <- nama[i] vali <- iprint[[i]] if(brief && is.matrix(vali)) vali <- paren(paste(nrow(vali), "x", ncol(vali), "matrix")) if(nami != "") { inline <- inherits(vali, "formula") || is.character(vali) || (brief && inherits(vali, "fii")) if(inline) cat(paste0(nami, ":\t")) else splat(paste0(nami, ":")) } if(!is.null(vali)) { if(inherits(vali, "fii")) { print(vali, tiny=brief) } else if(is.character(vali)) { splat(vali) } else { print(vali) } } parbreak(terselevel) } } if(!brief && waxlyrical('gory', terselevel)) { splat("--- Gory details: ---") splat("Combined data frame has", x$Fit$moadf$nrow, "rows") print(FIT) } invisible(NULL) } spatstat/R/predictmppm.R0000755000176200001440000002750413115271120014771 0ustar liggesusers# # predictmppm.R # # $Revision: 1.9 $ $Date: 2015/10/21 09:06:57 $ # # # ------------------------------------------------------------------- predict.mppm <- local({ predict.mppm <- function(object, ..., newdata=NULL, type=c("trend", "cif"), ngrid=40, locations=NULL, verbose=FALSE) { ## ## 'object' is the output of mppm() ## model <- object verifyclass(model, "mppm") ## ## ## 'type' type <- pickoption("type", type, c(trend="trend", lambda="cif", cif="cif"), multi=TRUE) want.trend <- "trend" %in% type want.cif <- "cif" %in% type selfcheck <- resolve.defaults(list(...), list(selfcheck=FALSE))$selfcheck ## ## if(verbose) cat("Inspecting arguments...") ## ## 'newdata' use.olddata <- is.null(newdata) if(use.olddata) { newdata <- model$data newdataname <- "Original data" } else { stopifnot(is.data.frame(newdata) || is.hyperframe(newdata)) newdataname <- sQuote("newdata") } ## ## ## Locations for prediction if(is.hyperframe(locations)) locations <- locations[,1,drop=TRUE] if(is.list(locations)) cls <- unique(sapply(locations, class)) loctype <- if(is.null(locations)) "null" else if(is.data.frame(locations)) "data.frame" else if(is.list(locations)) { if(any(c("ppp", "quad") %in% cls)) "points" else if("owin" %in% cls) { if(all(sapply(locations, is.mask))) "mask" else "window" } else "unknown" } else "unknown" need.grid <- switch(loctype, null =TRUE, data.frame=FALSE, points =FALSE, mask =FALSE, window =TRUE, unknown =stop("Unrecognised format for locations")) make.image <- need.grid || (loctype == "mask") ## locationvars <- c("x", "y", "id") ## ## if(verbose) cat("done.\nDetermining locations for prediction...") if(need.grid) { ## prediction on a grid is required if(is.data.frame(newdata)) stop(paste("Cannot predict model on a grid;", newdataname, "are a data frame")) } else { ## prediction at `locations' is required if(is.hyperframe(newdata)) { ## check consistency between locations and newdata nloc <- length(locations) nnew <- summary(newdata)$ncases if(nloc != nnew) stop(paste("Length of argument", sQuote("locations"), paren(nloc), "does not match number of rows in", newdataname, paren(nnew))) } else { ## newdata is a data frame if(!is.data.frame(locations)) stop(paste(newdataname, "is a data frame; locations must be a data frame")) else { stopifnot(nrow(locations) == nrow(newdata)) dup <- names(newdata) %in% names(locations) if(any(dup)) for(nam in names(newdata)[dup]) if(!all.equal(newdata[,nam], locations[,nam])) stop(paste("The data frames newdata and locations", "both have a column called", sQuote(nam), "but the entries differ")) nbg <- !(locationvars %in% c(names(newdata),names(locations))) if(any(nbg)) stop(paste(ngettext(sum(nbg), "Variable", "Variables"), commasep(locationvars[nbg]), "not provided")) ## merge the two data frames newdata <- cbind(newdata[,!dup], locations) locations <- NULL } } } if(verbose) cat("done.\n Constructing data for prediction...") ## ## ## extract fitted glm/gam/glmm object FIT <- model$Fit$FIT ## extract names of interaction variables Vnamelist <- model$Fit$Vnamelist vnames <- unlist(Vnamelist) ## ## ## newdata is data frame if(is.data.frame(newdata)) { if(verbose) cat("(data frame)...") if(need.grid) stop("Cannot predict model on a grid; newdata is a data frame") ## use newdata as covariates nbg <- !(locationvars %in% names(newdata)) if(any(nbg)) stop(paste(ngettext(sum(nbg), "variable", "variables"), commasep(locationvars[nbg]), "not provided")) ## create output data frame answer <- as.data.frame(matrix(, nrow=nrow(newdata), ncol=0), row.names=row.names(newdata)) if(want.trend) { ## add interaction components, set to zero (if any) if(length(vnames) > 0) newdata[, vnames] <- 0 ## compute fitted values answer$trend <- predict(FIT, newdata=newdata, type="response") } if(want.cif) { warning("Not yet implemented (computation of cif in data frame case)") ## split data frame by 'id' ## compute interaction components using existing point patterns ## compute fitted values } return(answer) } ## newdata is a hyperframe if(verbose) cat("(hyperframe)...") sumry <- summary(newdata) npat.new <- sumry$ncases ## name of response point pattern in model Yname <- model$Info$Yname ## ## Determine response point patterns if known. ## Extract from newdata if available ## Otherwise from the original data if appropriate if(verbose) cat("(responses)...") Y <- if(Yname %in% sumry$col.names) newdata[, Yname, drop=TRUE, strip=FALSE] else if(npat.new == model$npat) data[, Yname, drop=TRUE, strip=FALSE] else NULL ## if(want.cif && is.null(Y)) stop(paste("Cannot compute cif:", "newdata does not contain column", dQuote(Yname), "of response point patterns")) ## ## Determine windows for prediction if(verbose) cat("(windows)...") Wins <- if(!need.grid) lapply(locations, as.owin, fatal=FALSE) else if(!is.null(Y)) lapply(Y, as.owin, fatal=FALSE) else NULL if(is.null(Wins) || any(sapply(Wins, is.null))) stop("Cannot determine windows where predictions should be made") ## ## if(is.null(Y)) { ## only want trend; empty patterns will do Y <- lapply(Wins, emptypattern) } ## ensure Y contains data points only if(inherits(Y[[1]], "quad")) Y <- lapply(Y, getElement, name="data") ## Determine locations for prediction if(need.grid) { ## Generate grids of dummy locations if(verbose) cat("(grids)...") Gridded <- lapply(Wins, gridsample, ngrid=ngrid) Dummies <- lapply(Gridded, getElement, name="D") Templates <- lapply(Gridded, getElement, name="I") } else { ## locations are given somehow if(verbose) cat("(locations)...") if(loctype == "points") Dummies <- locations else if(loctype == "mask") { Dummies <- lapply(locations, punctify) Templates <- lapply(locations, as.im) } else stop("Internal error: illegal loctype") } ## Pack into quadschemes if(verbose) cat("(quadschemes)...") Quads <- list() for(i in seq(npat.new)) Quads[[i]] <- quad(data=Y[[i]], dummy=Dummies[[i]]) ## Insert quadschemes into newdata newdata[, Yname] <- Quads ## Determine interactions to be used if(verbose) cat("(interactions)...") interactions <- model$Inter$interaction ninter <- if(is.hyperframe(interactions)) nrow(interactions) else 1 nnew <- nrow(newdata) if(ninter != nnew && ninter != 1) { if(!all(model$Inter$constant)) stop(paste("Number of rows of newdata", paren(nnew), "does not match number of interactions in model", paren(ninter))) interactions <- interactions[1, ] } ## compute the Berman-Turner frame if(verbose) cat("done.\nStarting prediction...(Berman-Turner frame)...") moadf <- mppm(formula = model$formula, data = newdata, interaction = interactions, iformula = model$iformula, #%^!ifdef RANDOMEFFECTS random = model$random, #%^!endif use.gam = model$Fit$use.gam, correction = model$Info$correction, rbord = model$Info$rbord, backdoor = TRUE) ## compute fitted values if(verbose) cat("(glm prediction)...") values <- moadf[, c("x", "y", "id")] if(want.cif) values$cif <- predict(FIT, newdata=moadf, type="response") if(want.trend) { if(length(vnames) == 0) { ## Poisson model: trend = cif values$trend <- if(want.cif) values$cif else predict(FIT, newdata=moadf, type="response") } else { ## zero the interaction components moadf[, vnames] <- 0 ## compute fitted values values$trend <- predict(FIT, newdata=moadf, type="response") } } if(verbose) cat("done.\nReshaping results...") ## ## Reshape results ## separate answers for each image values <- split(values, values$id) ## Trends <- list() Lambdas <- list() if(!make.image) { if(verbose) cat("(marked point patterns)...") ## values become marks attached to locations for(i in seq(npat.new)) { Val <- values[[i]] Loc <- Dummies[[i]] isdum <- !is.data(Quads[[i]]) if(selfcheck) if(length(isdum) != length(Val$trend)) stop("Internal error: mismatch between data frame and locations") if(want.trend) Trends[[i]] <- Loc %mark% (Val$trend[isdum]) if(want.cif) Lambdas[[i]] <- Loc %mark% (Val$cif[isdum]) } } else { if(verbose) cat("(pixel images)...") ## assign values to pixel images for(i in seq(npat.new)) { values.i <- values[[i]] Q.i <- Quads[[i]] values.i <- values.i[!is.data(Q.i), ] Template.i <- Templates[[i]] ok.i <- !is.na(Template.i$v) if(sum(ok.i) != nrow(values.i)) stop("Internal error: mismatch between data frame and image") if(selfcheck) { dx <- rasterx.im(Template.i)[ok.i] - values.i$x dy <- rastery.im(Template.i)[ok.i] - values.i$y cat(paste("i=", i, "range(dx) =", paste(range(dx), collapse=", "), "range(dy) =", paste(range(dy), collapse=", "), "\n")) } if(want.trend) { Trend.i <- Template.i Trend.i$v[ok.i] <- values.i$trend Trends[[i]] <- Trend.i } if(want.cif) { Lambda.i <- Template.i Lambda.i$v[ok.i] <- values.i$cif Lambdas[[i]] <- Lambda.i } } } if(verbose) cat("done.\n") ## answer is a hyperframe Answer <- hyperframe(id=factor(levels(moadf$id)), row.names=sumry$row.names) if(want.trend) Answer$trend <- Trends if(want.cif) Answer$cif <- Lambdas return(Answer) } ## helper functions emptypattern <- function(w) { ppp(numeric(0), numeric(0), window=w) } gridsample <- function(W, ngrid) { masque <- as.mask(W, dimyx=ngrid) xx <- raster.x(masque) yy <- raster.y(masque) xpredict <- xx[masque$m] ypredict <- yy[masque$m] Dummy <- ppp(xpredict, ypredict, window=W) Image <- as.im(masque) return(list(D=Dummy, I=Image)) } punctify <- function(M) { xx <- raster.x(M) yy <- raster.y(M) xpredict <- xx[M$m] ypredict <- yy[M$m] return(ppp(xpredict, ypredict, window=M)) } predict.mppm }) spatstat/R/rmhmodel.ppm.R0000755000176200001440000003321513115271120015043 0ustar liggesusers# # rmhmodel.ppm.R # # convert ppm object into format palatable to rmh.default # # $Revision: 2.64 $ $Date: 2017/06/05 10:31:58 $ # # .Spatstat.rmhinfo # rmhmodel.ppm() # .Spatstat.Rmhinfo <- list( "Multitype Hardcore process" = function(coeffs, inte) { # hard core radii r[i,j] hradii <- inte$par[["hradii"]] return(list(cif='multihard', par=list(hradii=hradii), ntypes=ncol(hradii))) }, "Lennard-Jones process" = function(coeffs, inte) { pa <- inte$interpret(coeffs,inte)$param sigma <- pa[["sigma"]] epsilon <- pa[["epsilon"]] return(list(cif='lennard', par=list(sigma=sigma, epsilon=epsilon), ntypes=1)) }, "Fiksel process" = function(coeffs, inte) { hc <- inte$par[["hc"]] r <- inte$par[["r"]] kappa <- inte$par[["kappa"]] a <- inte$interpret(coeffs,inte)$param$a return(list(cif='fiksel', par=list(r=r,hc=hc,kappa=kappa,a=a), ntypes=1)) }, "Diggle-Gates-Stibbard process" = function(coeffs, inte) { rho <- inte$par[["rho"]] return(list(cif='dgs', par=list(rho=rho), ntypes=1)) }, "Diggle-Gratton process" = function(coeffs, inte) { kappa <- inte$interpret(coeffs,inte)$param$kappa delta <- inte$par[["delta"]] rho <- inte$par[["rho"]] return(list(cif='diggra', par=list(kappa=kappa,delta=delta,rho=rho), ntypes=1)) }, "Hard core process" = function(coeffs, inte) { hc <- inte$par[["hc"]] return(list(cif='hardcore', par=list(hc=hc), ntypes=1)) }, "Geyer saturation process" = function(coeffs, inte) { gamma <- inte$interpret(coeffs,inte)$param$gamma r <- inte$par[["r"]] sat <- inte$par[["sat"]] return(list(cif='geyer', par=list(gamma=gamma,r=r,sat=sat), ntypes=1)) }, "Soft core process" = function(coeffs, inte) { kappa <- inte$par[["kappa"]] sigma <- inte$interpret(coeffs,inte)$param$sigma return(list(cif="sftcr", par=list(sigma=sigma,kappa=kappa), ntypes=1)) }, "Strauss process" = function(coeffs, inte) { gamma <- inte$interpret(coeffs,inte)$param$gamma r <- inte$par[["r"]] return(list(cif = "strauss", par = list(gamma = gamma, r = r), ntypes=1)) }, "Strauss - hard core process" = function(coeffs, inte) { gamma <- inte$interpret(coeffs,inte)$param$gamma r <- inte$par[["r"]] hc <- inte$par[["hc"]] return(list(cif='straush', par=list(gamma=gamma,r=r,hc=hc), ntypes=1)) }, "Triplets process" = function(coeffs, inte) { gamma <- inte$interpret(coeffs,inte)$param$gamma r <- inte$par[["r"]] return(list(cif = "triplets", par = list(gamma = gamma, r = r), ntypes=1)) }, "Penttinen process" = function(coeffs, inte) { gamma <- inte$interpret(coeffs,inte)$param$gamma r <- inte$par[["r"]] return(list(cif='penttinen', par=list(gamma=gamma, r=r), ntypes=1)) }, "Multitype Strauss process" = function(coeffs, inte) { # interaction radii r[i,j] radii <- inte$par[["radii"]] # interaction parameters gamma[i,j] gamma <- (inte$interpret)(coeffs, inte)$param$gammas return(list(cif='straussm', par=list(gamma=gamma,radii=radii), ntypes=ncol(radii))) }, "Multitype Strauss Hardcore process" = function(coeffs, inte) { # interaction radii r[i,j] iradii <- inte$par[["iradii"]] # hard core radii r[i,j] hradii <- inte$par[["hradii"]] # interaction parameters gamma[i,j] gamma <- (inte$interpret)(coeffs, inte)$param$gammas return(list(cif='straushm', par=list(gamma=gamma,iradii=iradii,hradii=hradii), ntypes=ncol(iradii))) }, "Piecewise constant pairwise interaction process" = function(coeffs, inte) { r <- inte$par[["r"]] gamma <- (inte$interpret)(coeffs, inte)$param$gammas h <- stepfun(r, c(gamma, 1)) return(list(cif='lookup', par=list(h=h), ntypes=1)) }, "Area-interaction process" = function(coeffs, inte) { r <- inte$par[["r"]] eta <- (inte$interpret)(coeffs, inte)$param$eta return(list(cif='areaint', par=list(eta=eta,r=r), ntypes=1)) }, "hybrid Geyer process" = function(coeffs, inte) { r <- inte$par[["r"]] sat <- inte$par[["sat"]] gamma <- (inte$interpret)(coeffs,inte)$param$gammas return(list(cif='badgey',par=list(gamma=gamma,r=r,sat=sat), ntypes=1)) }, "Hybrid interaction"= function(coeffs, inte){ # for hybrids, $par is a list of the component interactions interlist <- inte$par # check for Poisson components ispois <- unlist(lapply(interlist, is.poisson)) if(all(ispois)) { # reduces to Poisson Z <- list(cif='poisson', par=list()) return(Z) } else if(any(ispois)) { # remove Poisson components interlist <- interlist[!ispois] } # N <- length(interlist) cifs <- character(N) pars <- vector(mode="list", length=N) ntyp <- integer(N) for(i in 1:N) { interI <- interlist[[i]] # forbid hybrids-of-hybrids - these should not occur anyway if(interI$name == "Hybrid interaction") stop("Simulation of a hybrid-of-hybrid interaction is not implemented") # get RMH mapping for I-th component siminfoI <- .Spatstat.Rmhinfo[[interI$name]] if(is.null(siminfoI)) stop(paste("Simulation of a fitted", sQuote(interI$name), "has not yet been implemented"), call.=FALSE) # nameI is the tag that identifies I-th component in hybrid nameI <- names(interlist)[[i]] nameI. <- paste(nameI, ".", sep="") # find coefficients with prefix that exactly matches nameI. Cname <- names(coeffs) prefixlength <- nchar(nameI.) Cprefix <- substr(Cname, 1, prefixlength) relevant <- (Cprefix == nameI.) # extract coefficients # (there may be none, if this interaction is an 'offset') coeffsI <- coeffs[relevant] # remove the prefix so the coefficients are recognisable to 'siminfoI' if(any(relevant)) names(coeffsI) <- substr(Cname[relevant], prefixlength+1, max(nchar(Cname))) # compute RMH info ZI <- siminfoI(coeffsI, interI) cifs[i] <- ZI$cif pars[[i]] <- ZI$par ntyp[i] <- ZI$ntypes } nt <- unique(ntyp[ntyp != 1]) if(length(nt) > 1) stop(paste("Hybrid components have different numbers of types:", commasep(nt))) if(N == 1) { # single cif: revert to original format: par is a list of parameters Z <- list(cif=cifs[1], par=pars[[1]], ntypes=ntyp) } else { # hybrid cif: par is a list of lists of parameters Z <- list(cif=cifs, par=pars, ntypes=ntyp) } return(Z) } ) # OTHER MODELS not yet implemented: # # # interaction object rmh.default # ------------------ ----------- # # OrdThresh # rmhmodel.ppm <- function(model, w, ..., verbose=TRUE, project=TRUE, control=rmhcontrol(), new.coef=NULL) { ## converts ppm object `model' into format palatable to rmh.default verifyclass(model, "ppm") argh <- list(...) if(!is.null(new.coef)) model <- tweak.coefs(model, new.coef) ## Ensure the fitted model is valid ## (i.e. exists mathematically as a point process) if(!valid.ppm(model)) { if(project) { if(verbose) cat("Model is invalid - projecting it\n") model <- project.ppm(model, fatal=TRUE) } else stop("The fitted model is not a valid point process") } if(verbose) cat("Extracting model information...") ## Extract essential information Y <- summary(model, quick="no variances") if(Y$marked && !Y$multitype) stop("Not implemented for marked point processes other than multitype") if(Y$uses.covars && is.data.frame(model$covariates)) stop(paste("This model cannot be simulated, because the", "covariate values were given as a data frame.")) ## enforce defaults for `control' control <- rmhcontrol(control) ## adjust to peculiarities of model control <- rmhResolveControl(control, model) ######## Interpoint interaction if(Y$poisson) { Z <- list(cif="poisson", par=list()) # par is filled in later } else { ## First check version number of ppm object if(Y$antiquated) stop(paste("This model was fitted by a very old version", "of the package: spatstat", Y$version, "; simulation is not possible.", "Re-fit the model using your original code")) else if(Y$old) warning(paste("This model was fitted by an old version", "of the package: spatstat", Y$version, ". Re-fit the model using update.ppm", "or your original code")) ## Extract the interpoint interaction object inte <- Y$entries$interaction ## Determine whether the model can be simulated using rmh siminfo <- .Spatstat.Rmhinfo[[inte$name]] if(is.null(siminfo)) stop(paste("Simulation of a fitted", sQuote(inte$name), "has not yet been implemented")) ## Get fitted model's canonical coefficients coeffs <- Y$entries$coef if(newstyle.coeff.handling(inte)) { ## extract only the interaction coefficients Vnames <- Y$entries$Vnames IsOffset <- Y$entries$IsOffset coeffs <- coeffs[Vnames[!IsOffset]] } ## Translate the model to the format required by rmh.default Z <- siminfo(coeffs, inte) if(is.null(Z)) stop("The model cannot be simulated") else if(is.null(Z$cif)) stop(paste("Internal error: no cif returned from .Spatstat.Rmhinfo")) } ## Don't forget the types if(Y$multitype && is.null(Z$types)) Z$types <- levels(Y$entries$marks) ######## Window for result if(missing(w) || is.null(w)) { ## check for outdated argument name 'win' if(!is.na(m <- match("win", names(argh)))) { warning("Argument 'win' to rmhmodel.ppm is deprecated; use 'w'") w <- argh[[m]] argh <- argh[-m] } else w <- Y$entries$data$window } Z$w <- w ######## Expanded window for simulation? covims <- if(Y$uses.covars) model$covariates[Y$covars.used] else NULL wsim <- rmhResolveExpansion(w, control, covims, "covariate")$wsim ###### Trend or Intensity ############ if(verbose) cat("Evaluating trend...") if(Y$stationary) { ## first order terms (beta or beta[i]) are carried in Z$par beta <- as.numeric(Y$trend$value) Z$trend <- NULL } else { ## trend terms present ## all first order effects are subsumed in Z$trend beta <- if(!Y$marked) 1 else rep.int(1, length(Z$types)) ## predict on window possibly larger than original data window Z$trend <- if(wsim$type == "mask") predict(model, window=wsim, type="trend", locations=wsim) else predict(model, window=wsim, type="trend") } Ncif <- length(Z$cif) if(Ncif == 1) { ## single interaction Z$par[["beta"]] <- beta } else { ## hybrid interaction if(all(Z$ntypes == 1)) { ## unmarked model: scalar 'beta' is absorbed in first cif absorb <- 1 } else { ## multitype model: vector 'beta' is absorbed in a multitype cif absorb <- min(which(Z$ntypes > 1)) } Z$par[[absorb]]$beta <- beta ## other cifs have par$beta = 1 for(i in (1:Ncif)[-absorb]) Z$par[[i]]$beta <- rep.int(1, Z$ntypes[i]) } if(verbose) cat("done.\n") Z <- do.call(rmhmodel, append(list(Z), argh)) return(Z) } rmhResolveExpansion <- function(win, control, imagelist, itype="covariate") { # Determine expansion window for simulation ex <- control$expand # The following is redundant because it is implied by !will.expand(ex) # if(ex$force.noexp) { # # Expansion prohibited # return(list(wsim=win, expanded=FALSE)) # } # Is expansion contemplated? if(!will.expand(ex)) return(list(wsim=win, expanded=FALSE)) # Proposed expansion window wexp <- expand.owin(win, ex) # Check feasibility isim <- unlist(lapply(imagelist, is.im)) imagelist <- imagelist[isim] if(length(imagelist) == 0) { # Unlimited expansion is feasible return(list(wsim=wexp, expanded=TRUE)) } # Expansion is limited to domain of image data # Determine maximum possible expansion window wins <- lapply(imagelist, as.owin) cwin <- do.call(intersect.owin, unname(wins)) if(!is.subset.owin(wexp, cwin)) { # Cannot expand to proposed window if(ex$force.exp) stop(paste("Cannot expand the simulation window,", "because the", itype, "images do not cover", "the expanded window"), call.=FALSE) # Take largest possible window wexp <- intersect.owin(wexp, cwin) } return(list(wsim=wexp, expanded=TRUE)) } spatstat/R/linearK.R0000755000176200001440000002233513142453243014037 0ustar liggesusers# # linearK # # $Revision: 1.48 $ $Date: 2017/08/09 00:21:51 $ # # K function for point pattern on linear network # # linearK <- function(X, r=NULL, ..., correction="Ang", ratio=FALSE) { stopifnot(inherits(X, "lpp")) correction <- pickoption("correction", correction, c(none="none", Ang="Ang", best="Ang"), multi=FALSE) np <- npoints(X) lengthL <- volume(domain(X)) denom <- np * (np - 1)/lengthL K <- linearKengine(X, r=r, ..., denom=denom, correction=correction, ratio=ratio) # set appropriate y axis label switch(correction, Ang = { ylab <- quote(K[L](r)) fname <- c("K", "L") }, none = { ylab <- quote(K[net](r)) fname <- c("K", "net") }) K <- rebadge.fv(K, new.ylab=ylab, new.fname=fname) return(K) } linearKinhom <- function(X, lambda=NULL, r=NULL, ..., correction="Ang", normalise=TRUE, normpower=1, update=TRUE, leaveoneout=TRUE, ratio=FALSE) { stopifnot(inherits(X, "lpp")) loo.given <- !missing(leaveoneout) correction <- pickoption("correction", correction, c(none="none", Ang="Ang", best="Ang"), multi=FALSE) if(is.null(lambda)) linearK(X, r=r, ..., ratio=ratio, correction=correction) if(normalise) { check.1.real(normpower) stopifnot(normpower >= 1) } lambdaX <- getlambda.lpp(lambda, X, ..., update=update, leaveoneout=leaveoneout, loo.given=loo.given, lambdaname="lambda") invlam <- 1/lambdaX invlam2 <- outer(invlam, invlam, "*") lengthL <- volume(domain(X)) denom <- if(!normalise) lengthL else if(normpower == 1) sum(invlam) else lengthL * (sum(invlam)/lengthL)^normpower K <- linearKengine(X, reweight=invlam2, denom=denom, r=r, correction=correction, ratio=ratio, ...) # set appropriate y axis label switch(correction, Ang = { ylab <- quote(K[L, inhom](r)) yexp <- quote(K[list(L, "inhom")](r)) fname <- c("K", "list(L, inhom)") }, none = { ylab <- quote(K[net, inhom](r)) yexp <- quote(K[list(net, "inhom")](r)) fname <- c("K", "list(net, inhom)") }) K <- rebadge.fv(K, new.fname=fname, new.ylab=ylab, new.yexp=yexp) attr(K, "dangerous") <- attr(lambdaX, "dangerous") return(K) } getlambda.lpp <- function(lambda, X, subset=NULL, ..., update=TRUE, leaveoneout=TRUE, loo.given=TRUE, lambdaname) { missup <- missing(update) if(missing(lambdaname)) lambdaname <- deparse(substitute(lambda)) Y <- if(is.null(subset)) X else X[subset] danger <- TRUE if(is.ppm(lambda) || is.lppm(lambda)) { ## fitted model if(update) { ## refit the model to the full dataset X lambda <- if(is.lppm(lambda)) update(lambda, X) else update(lambda, as.ppp(X)) ## now evaluate lambdaX <- fitted(lambda, dataonly=TRUE, leaveoneout=leaveoneout) ## restrict if required lambdaY <- if(is.null(subset)) lambdaX else lambdaX[subset] ## danger <- FALSE if(missup) warn.once("lin.inhom.update", "The behaviour of linearKinhom and similar functions", "when lambda is an lppm object", "has changed in spatstat 1.41-0,", "and again in spatstat 1.52-0.", "See help(linearKinhom)") } else { if(loo.given && leaveoneout) stop("leave-one-out calculation for fitted models is only available when update=TRUE", call.=FALSE) lambdaY <- predict(lambda, locations=as.data.frame(as.ppp(Y))) } } else { ## lambda is some other kind of object lambdaY <- if(is.vector(lambda)) lambda else if(inherits(lambda, "linfun")) lambda(Y, ...) else if(inherits(lambda, "linim")) lambda[Y, drop=FALSE] else if(is.function(lambda)) { coo <- coords(Y) do.call.matched(lambda, list(x=coo$x, y=coo$y, ...)) } else if(is.im(lambda)) safelookup(lambda, as.ppp(Y)) else stop(paste(lambdaname, "should be", "a numeric vector, function, pixel image, or fitted model")) } if(!is.numeric(lambdaY)) stop(paste("Values of", lambdaname, "are not numeric")) if((nv <- length(lambdaY)) != (np <- npoints(Y))) stop(paste("Obtained", nv, "values of", lambdaname, "but point pattern contains", np, "points")) if(any(lambdaY < 0)) stop(paste("Negative values of", lambdaname, "obtained")) if(any(lambdaY == 0)) stop(paste("Zero values of", lambdaname, "obtained")) if(danger) attr(lambdaY, "dangerous") <- lambdaname return(lambdaY) } linearKengine <- function(X, ..., r=NULL, reweight=NULL, denom=1, correction="Ang", ratio=FALSE, showworking=FALSE) { # ensure distance information is present X <- as.lpp(X, sparse=FALSE) # extract info about pattern np <- npoints(X) # extract linear network L <- domain(X) W <- Window(L) # determine r values rmaxdefault <- 0.98 * boundingradius(L) breaks <- handle.r.b.args(r, NULL, W, rmaxdefault=rmaxdefault) r <- breaks$r rmax <- breaks$max # type <- if(correction == "Ang") "L" else "net" fname <- c("K", type) ylab <- substitute(K[type](r), list(type=type)) # if(np < 2) { # no pairs to count: return zero function zeroes <- numeric(length(r)) df <- data.frame(r = r, est = zeroes) K <- ratfv(df, NULL, 0, "r", ylab, "est", . ~ r, c(0, rmax), c("r", makefvlabel(NULL, "hat", fname)), c("distance argument r", "estimated %s"), fname = fname, ratio=ratio) if(correction == "Ang") { # tack on theoretical value K <- bind.ratfv(K, quotient = data.frame(theo=r), denominator = 0, labl = makefvlabel(NULL, NULL, fname, "theo"), desc = "theoretical Poisson %s", ratio = ratio) } return(K) } # compute pairwise distances D <- pairdist(X) #--- compile into K function --- if(correction == "none" && is.null(reweight)) { # no weights (Okabe-Yamada) K <- compileK(D, r, denom=denom, fname=fname, ratio=ratio) K <- rebadge.fv(K, ylab, fname) unitname(K) <- unitname(X) return(K) } if(correction == "none") edgewt <- 1 else { # inverse m weights (Wei's correction) # determine tolerance toler <- default.linnet.tolerance(L) # compute m[i,j] m <- matrix(1, np, np) for(j in 1:np) m[ -j, j] <- countends(L, X[-j], D[-j,j], toler=toler) if(any(uhoh <- (m == 0) & is.finite(D))) { warning("Internal error: disc boundary count equal to zero") m[uhoh] <- 1 } edgewt <- 1/m } # compute K wt <- if(!is.null(reweight)) edgewt * reweight else edgewt K <- compileK(D, r, weights=wt, denom=denom, fname=fname, ratio=ratio) # tack on theoretical value if(ratio) { K <- bind.ratfv(K, quotient = data.frame(theo = r), denominator = denom, labl = makefvlabel(NULL, NULL, fname, "theo"), desc = "theoretical Poisson %s") } else { K <- bind.fv(K, data.frame(theo=r), makefvlabel(NULL, NULL, fname, "theo"), "theoretical Poisson %s") } K <- rebadge.fv(K, ylab, fname) unitname(K) <- unitname(X) fvnames(K, ".") <- rev(fvnames(K, ".")) # show working if(showworking) attr(K, "working") <- list(D=D, wt=wt) attr(K, "correction") <- correction return(K) } ApplyConnected <- function(X, Engine, r=NULL, ..., rule, auxdata=NULL) { # Apply 'Engine' to each connected component of domain(X) stopifnot(is.function(rule)) # Ensure distance information is present X <- as.lpp(X, sparse=FALSE) L <- domain(X) # check network connectivity br <- boundingradius(L) if(disco <- is.infinite(br)) { # disconnected network XX <- connected(X) LL <- lapply(XX, domain) br <- max(sapply(LL, boundingradius)) } else XX <- NULL # determine r values rmaxdefault <- 0.98 * br breaks <- handle.r.b.args(r, NULL, Window(L), rmaxdefault=rmaxdefault) r <- breaks$r if(!disco) { # single connected network stuff <- rule(X=X, auxdata=auxdata, ...) result <- do.call(Engine, append(list(X=X, r=r), stuff)) return(result) } # disconnected network nsub <- length(XX) results <- anylist() denoms <- numeric(nsub) for(i in seq_len(nsub)) { X.i <- XX[[i]] sub.i <- attr(X.i, "retainpoints") # identifies which points of X aux.i <- if(length(auxdata) == 0) NULL else lapply(auxdata, marksubset, index=sub.i) stuff.i <- rule(X=X.i, auxdata=aux.i, ...) denoms[i] <- stuff.i$denom %orifnull% 1 results[[i]] <- do.call(Engine, append(list(X=X.i, r=r), stuff.i)) } result <- do.call(pool, append(results, list(weights=denoms, relabel=FALSE, variance=FALSE))) return(result) } spatstat/R/plot.mppm.R0000755000176200001440000000125013115271120014361 0ustar liggesusers# # plot.mppm.R # # $Revision: 1.4 $ $Date: 2016/02/11 10:17:12 $ # # plot.mppm <- function(x, ..., trend=TRUE, cif=FALSE, se=FALSE, how=c("image", "contour", "persp")) { xname <- deparse(substitute(x)) how <- match.arg(how) subs <- subfits(x) arglist <- resolve.defaults(list(x=subs, how=how), list(...), list(main=xname)) if(trend) do.call(plot, c(arglist, list(trend=TRUE, cif=FALSE, se=FALSE))) if(cif) do.call(plot, c(arglist, list(trend=FALSE, cif=TRUE, se=FALSE))) if(se) do.call(plot, c(arglist, list(trend=FALSE, cif=FALSE, se=TRUE))) invisible(NULL) } spatstat/R/parameters.R0000644000176200001440000000121313115225157014603 0ustar liggesusers## ## parameters.R ## ## $Revision: 1.2 $ $Date: 2015/05/08 04:27:15 $ ## parameters <- function(model, ...) { UseMethod("parameters") } parameters.ppm <- function(model, ...) { ss <- summary(model, quick="no variances") out <- c(list(trend=ss$trend$value), ss$covfunargs, ss$interaction$interaction$par, ss$interaction$sensible$param) return(out) } parameters.kppm <- function(model, ...) { ss <- summary(model, quick="no variances") out <- c(list(trend=ss$trend$trend$value), ss$covfunargs, ss$clustpar, ss$clustargs, list(mu=ss$mu)) return(out) } spatstat/R/hopskel.R0000644000176200001440000000500413115225157014107 0ustar liggesusers## ## hopskel.R ## Hopkins-Skellam test ## ## $Revision: 1.2 $ $Date: 2014/09/23 08:24:36 $ hopskel <- function(X) { stopifnot(is.ppp(X)) n <- npoints(X) if(n < 2) return(NA) dX <- nndist(X) U <- runifpoint(n, Window(X)) dU <- nncross(U, X, what="dist") A <- mean(dX^2)/mean(dU^2) return(A) } hopskel.test <- function(X, ..., alternative=c("two.sided", "less", "greater", "clustered", "regular"), method=c("asymptotic", "MonteCarlo"), nsim=999 ) { Xname <- short.deparse(substitute(X)) verifyclass(X, "ppp") W <- Window(X) n <- npoints(X) method <- match.arg(method) # alternative hypothesis alternative <- match.arg(alternative) if(alternative == "clustered") alternative <- "less" if(alternative == "regular") alternative <- "greater" altblurb <- switch(alternative, two.sided="two-sided", less="clustered (A < 1)", greater="regular (A > 1)") ## compute observed value statistic <- hopskel(X) ## p-value switch(method, asymptotic = { ## F-distribution nn <- 2 * n p.value <- switch(alternative, less = pf(statistic, nn, nn, lower.tail=TRUE), greater = pf(statistic, nn, nn, lower.tail=FALSE), two.sided = 2 * pf(statistic, nn, nn, lower.tail=(statistic < 1))) pvblurb <- "using F distribution" }, MonteCarlo = { ## Monte Carlo p-value sims <- numeric(nsim) for(i in 1:nsim) { Xsim <- runifpoint(n, win=W) sims[i] <- hopskel(Xsim) p.upper <- (1 + sum(sims >= statistic))/(1 + nsim) p.lower <- (1 + sum(sims <= statistic))/(1 + nsim) p.value <- switch(alternative, less=p.lower, greater=p.upper, two.sided=2*min(p.lower, p.upper)) } pvblurb <- paste("Monte Carlo test based on", nsim, "simulations of CSR with fixed n") }) statistic <- as.numeric(statistic) names(statistic) <- "A" out <- list(statistic=statistic, p.value=p.value, alternative=altblurb, method=c("Hopkins-Skellam test of CSR", pvblurb), data.name=Xname) class(out) <- "htest" return(out) } spatstat/R/localK.R0000755000176200001440000001557213115271120013654 0ustar liggesusers# # localK.R Getis-Franklin neighbourhood density function # # $Revision: 1.21 $ $Date: 2015/07/11 08:19:26 $ # # "localL" <- function(X, ..., correction="Ripley", verbose=TRUE, rvalue=NULL) { localK(X, wantL=TRUE, correction=correction, verbose=verbose, rvalue=rvalue) } "localLinhom" <- function(X, lambda=NULL, ..., correction="Ripley", verbose=TRUE, rvalue=NULL, sigma=NULL, varcov=NULL) { localKinhom(X, lambda=lambda, wantL=TRUE, ..., correction=correction, verbose=verbose, rvalue=rvalue, sigma=sigma, varcov=varcov) } "localK" <- function(X, ..., correction="Ripley", verbose=TRUE, rvalue=NULL) { verifyclass(X, "ppp") localKengine(X, ..., correction=correction, verbose=verbose, rvalue=rvalue) } "localKinhom" <- function(X, lambda=NULL, ..., correction="Ripley", verbose=TRUE, rvalue=NULL, sigma=NULL, varcov=NULL) { verifyclass(X, "ppp") if(is.null(lambda)) { # No intensity data provided # Estimate density by leave-one-out kernel smoothing lambda <- density(X, ..., sigma=sigma, varcov=varcov, at="points", leaveoneout=TRUE) lambda <- as.numeric(lambda) } else { # validate if(is.im(lambda)) lambda <- safelookup(lambda, X) else if(is.ppm(lambda)) lambda <- predict(lambda, locations=X, type="trend") else if(is.function(lambda)) lambda <- lambda(X$x, X$y) else if(is.numeric(lambda) && is.vector(as.numeric(lambda))) check.nvector(lambda, npoints(X)) else stop(paste(sQuote("lambda"), "should be a vector, a pixel image, or a function")) } localKengine(X, lambda=lambda, ..., correction=correction, verbose=verbose, rvalue=rvalue) } "localKengine" <- function(X, ..., wantL=FALSE, lambda=NULL, correction="Ripley", verbose=TRUE, rvalue=NULL) { npts <- npoints(X) W <- X$window areaW <- area(W) lambda.ave <- npts/areaW lambda1.ave <- (npts - 1)/areaW weighted <- !is.null(lambda) if(is.null(rvalue)) rmaxdefault <- rmax.rule("K", W, lambda.ave) else { stopifnot(is.numeric(rvalue)) stopifnot(length(rvalue) == 1) stopifnot(rvalue >= 0) rmaxdefault <- rvalue } breaks <- handle.r.b.args(NULL, NULL, W, rmaxdefault=rmaxdefault) r <- breaks$r rmax <- breaks$max correction.given <- !missing(correction) correction <- pickoption("correction", correction, c(none="none", isotropic="isotropic", Ripley="isotropic", trans="translate", translate="translate", translation="translate", best="best"), multi=FALSE) correction <- implemented.for.K(correction, W$type, correction.given) # recommended range of r values alim <- c(0, min(rmax, rmaxdefault)) # identify all close pairs rmax <- max(r) close <- closepairs(X, rmax) DIJ <- close$d XI <- ppp(close$xi, close$yi, window=W, check=FALSE) I <- close$i if(weighted) { J <- close$j lambdaJ <- lambda[J] weightJ <- 1/lambdaJ } # initialise df <- as.data.frame(matrix(NA, length(r), npts)) labl <- desc <- character(npts) bkt <- function(x) { paste("[", x, "]", sep="") } if(verbose) state <- list() switch(correction, none={ # uncorrected! For demonstration purposes only! for(i in 1:npts) { ii <- (I == i) wh <- whist(DIJ[ii], breaks$val, if(weighted) weightJ[ii] else NULL) # no edge weights df[,i] <- cumsum(wh) icode <- numalign(i, npts) names(df)[i] <- paste("un", icode, sep="") labl[i] <- paste("%s", bkt(icode), "(r)", sep="") desc[i] <- paste("uncorrected estimate of %s", "for point", icode) if(verbose) state <- progressreport(i, npts, state=state) } if(!weighted) df <- df/lambda1.ave }, translate={ # Translation correction XJ <- ppp(close$xj, close$yj, window=W, check=FALSE) edgewt <- edge.Trans(XI, XJ, paired=TRUE) if(weighted) edgewt <- edgewt * weightJ for(i in 1:npts) { ii <- (I == i) wh <- whist(DIJ[ii], breaks$val, edgewt[ii]) Ktrans <- cumsum(wh) df[,i] <- Ktrans icode <- numalign(i, npts) names(df)[i] <- paste("trans", icode, sep="") labl[i] <- paste("%s", bkt(icode), "(r)", sep="") desc[i] <- paste("translation-corrected estimate of %s", "for point", icode) if(verbose) state <- progressreport(i, npts, state=state) } if(!weighted) df <- df/lambda1.ave h <- diameter(W)/2 df[r >= h, ] <- NA }, isotropic={ # Ripley isotropic correction edgewt <- edge.Ripley(XI, matrix(DIJ, ncol=1)) if(weighted) edgewt <- edgewt * weightJ for(i in 1:npts) { ii <- (I == i) wh <- whist(DIJ[ii], breaks$val, edgewt[ii]) Kiso <- cumsum(wh) df[,i] <- Kiso icode <- numalign(i, npts) names(df)[i] <- paste("iso", icode, sep="") labl[i] <- paste("%s", bkt(icode), "(r)", sep="") desc[i] <- paste("Ripley isotropic correction estimate of %s", "for point", icode) if(verbose) state <- progressreport(i, npts, state=state) } if(!weighted) df <- df/lambda1.ave h <- diameter(W)/2 df[r >= h, ] <- NA }) # transform values if L required if(wantL) df <- sqrt(df/pi) # return vector of values at r=rvalue, if desired if(!is.null(rvalue)) { nr <- length(r) if(r[nr] != rvalue) stop("Internal error - rvalue not attained") return(as.numeric(df[nr,])) } # function value table required # add r and theo if(!wantL) { df <- cbind(df, data.frame(r=r, theo=pi * r^2)) if(!weighted) { ylab <- quote(K[loc](r)) fnam <- "K[loc][',']" } else { ylab <- quote(Kinhom[loc](r)) fnam <- "Kinhom[loc][',']" } } else { df <- cbind(df, data.frame(r=r, theo=r)) if(!weighted) { ylab <- quote(L[loc](r)) fnam <- "L[loc][',']" } else { ylab <- quote(Linhom[loc](r)) fnam <- "Linhom[loc][',']" } } desc <- c(desc, c("distance argument r", "theoretical Poisson %s")) labl <- c(labl, c("r", "%s[pois](r)")) # create fv object K <- fv(df, "r", ylab, "theo", , alim, labl, desc, fname=fnam) # default is to display them all formula(K) <- . ~ r unitname(K) <- unitname(X) attr(K, "correction") <- correction return(K) } spatstat/R/poisson.R0000755000176200001440000000172713115271120014136 0ustar liggesusers# # # poisson.S # # $Revision: 1.8 $ $Date: 2015/10/21 09:06:57 $ # # The Poisson process # # Poisson() create an object of class 'interact' describing # the (null) interpoint interaction structure # of the Poisson process. # # # ------------------------------------------------------------------- # Poisson <- local({ BlankPoisson <- list( name = "Poisson process", creator = "Poisson", family = NULL, pot = NULL, par = NULL, parnames = NULL, init = function(...) { }, update = function(...) { }, print = function(self) { cat("Poisson process\n") invisible() }, valid = function(...) { TRUE }, project = function(...) NULL, irange = function(...) { 0 }, version=NULL ) class(BlankPoisson) <- "interact" Poisson <- function() { BlankPoisson } Poisson <- intermaker(Poisson, BlankPoisson) Poisson }) spatstat/R/Hest.R0000755000176200001440000000750213115271075013355 0ustar liggesusers# # Hest.R # # Contact distribution for a random set # # Hest <- local({ Hest <- function(X, r=NULL, breaks=NULL, ..., W, correction=c("km", "rs", "han"), conditional=TRUE) { rorbgiven <- !is.null(r) || !is.null(breaks) if(is.ppp(X) || is.psp(X)) { XX <- X W0 <- Window(X) } else if(is.owin(X)) { XX <- X W0 <- Frame(X) } else if(is.im(X)) { if(X$type != "logical") stop("When X is an image, its pixel values should be logical values") XX <- solutionset(X) W0 <- Window(X) } else stop("X should be an object of class ppp, psp, owin or im") ## if(given.W <- !missing(W)) { if(!is.subset.owin(W, W0)) stop("W is not a subset of the observation window of X") } else { W <- W0 } ## handle corrections if(is.null(correction)) correction <- c("rs", "km", "cs") correction <- pickoption("correction", correction, c(none="none", raw="none", border="rs", rs="rs", KM="km", km="km", Kaplan="km", han="han", Hanisch="han", best="km"), multi=TRUE) corxtable <- c("km", "rs", "han", "none") corx <- as.list(corxtable %in% correction) names(corx) <- corxtable ## compute distance map D <- distmap(XX, ...) pixeps <- with(D, min(xstep, ystep)) if(!given.W && !is.im(X)) { B <- attr(D, "bdry") } else { B <- distmap(W, invert=TRUE, ...) har <- harmonise(D=D, B=B) D <- har$D[W, drop=FALSE] B <- har$B[W, drop=FALSE] } ## histogram breakpoints dmax <- max(D) breaks <- handle.r.b.args(r, breaks, W, NULL, rmaxdefault=dmax) rval <- breaks$r if(rorbgiven) check.finespacing(rval, rname="r", eps=pixeps/4, W, rmaxdefault=dmax, context="in Hest(X,r)", action="fatal") ## extract distances and censoring distances dist <- as.vector(as.matrix(D)) bdry <- as.vector(as.matrix(B)) ok <- !is.na(dist) & !is.na(bdry) dist <- dist[ok] bdry <- bdry[ok] ## delete zero distances if(is.owin(X) || is.im(X)) { pos <- (dist > 0) areafraction <- 1 - mean(pos) dist <- dist[pos] bdry <- bdry[pos] } ## censoring indicators d <- (dist <= bdry) ## observed distances o <- pmin.int(dist, bdry) ## calculate estimates Z <- censtimeCDFest(o, bdry, d, breaks, KM=corx$km, RS=corx$rs, HAN=corx$han, RAW=corx$none, han.denom=if(corx$han) eroded.areas(W, rval) else NULL, tt=dist) ## conditional on d > 0 ? if(is.owin(X) || is.im(X)) { if(conditional) { if(corx$km) Z$km <- condition(Z$km) if(corx$rs) Z$rs <- condition(Z$rs) if(corx$han) Z$han <- condition(Z$han) if(corx$none) Z$raw <- condition(Z$raw) } else { if(corx$km) Z$km <- reconstitute(Z$km, areafraction) if(corx$rs) Z$rs <- reconstitute(Z$rs, areafraction) if(corx$han) Z$han <- reconstitute(Z$han, areafraction) if(corx$none) Z$raw <- reconstitute(Z$raw, areafraction) } } ## relabel Z <- rebadge.fv(Z, substitute(H(r), NULL), "H") unitname(Z) <- unitname(X) return(Z) } condition <- function(x) { (x - x[1])/(1-x[1]) } reconstitute <- function(x, p) { p + (1-p) * x } Hest }) spatstat/R/interp.im.R0000755000176200001440000000365213115271075014361 0ustar liggesusers# # interp.im.R # # $Revision: 1.4 $ $Date: 2017/02/07 07:47:20 $ # interp.im <- local({ lukimyu <- function(ccc, rrr, mat, defaults) { dimm <- dim(mat) within <- (rrr >= 1 & rrr <= dimm[1L] & ccc >= 1 & ccc <= dimm[2L]) result <- defaults result[within] <- mat[cbind(rrr[within], ccc[within])] result } interp.im <- function(Z, x, y=NULL) { stopifnot(is.im(Z)) if(!is.null(levels(Z))) stop("Interpolation is undefined for factor-valued images") xy <- xy.coords(x, y) x <- xy$x y <- xy$y ok <- inside.owin(x,y, as.owin(Z)) ## get default lookup values (for boundary cases) fallback <- Z[ppp(x[ok], y[ok], window=as.rectangle(Z), check=FALSE)] ## Transform to grid coordinates ## so that pixel centres are at integer points, ## bottom left of image is (0,0) xx <- (x[ok] - Z$xcol[1L])/Z$xstep yy <- (y[ok] - Z$yrow[1L])/Z$ystep ## find grid point to left and below ## (may transgress boundary) xlower <- floor(xx) ylower <- floor(yy) cc <- as.integer(xlower) + 1L rr <- as.integer(ylower) + 1L ## determine whether (x,y) is above or below antidiagonal in square dx <- xx - xlower dy <- yy - ylower below <- (dx + dy <= 1) ## if below, interpolate Z(x,y) = (1-x-y)Z(0,0) + xZ(1,0) + yZ(0,1) ## if above, interpolate Z(x,y) = (x+y-1)Z(1,1) + (1-x)Z(0,1) + (1-y)Z(1,0) V <- Z$v values <- ifelse(below, ( (1-dx-dy)*lukimyu(cc,rr,V,fallback) + dx*lukimyu(cc+1,rr,V,fallback) + dy*lukimyu(cc,rr+1,V,fallback) ), ( (dx+dy-1)*lukimyu(cc+1,rr+1,V,fallback) + (1-dx)*lukimyu(cc,rr+1,V,fallback) + (1-dy)*lukimyu(cc+1,rr,V,fallback) )) result <- numeric(length(x)) result[ok] <- values result[!ok] <- NA return(result) } interp.im }) spatstat/R/newformula.R0000644000176200001440000000102713163407344014625 0ustar liggesusers#' #' newformula.R #' #' $Revision: 1.2 $ $Date: 2017/09/29 09:08:51 $ #' #' Update formula and expand polynomial newformula <- function(old, change, eold, enew) { old <- if(is.null(old)) ~1 else eval(old, eold) change <- if(is.null(change)) ~1 else eval(change, enew) old <- as.formula(old, env=eold) change <- as.formula(change, env=enew) if(spatstat.options("expand.polynom")) { old <- expand.polynom(old) change <- expand.polynom(change) } answer <- update.formula(old, change) return(answer) } spatstat/R/Jmulti.R0000755000176200001440000001302213115271075013710 0ustar liggesusers# Jmulti.S # # Usual invocations to compute multitype J function(s) # if F and G are not required # # $Revision: 4.39 $ $Date: 2014/10/24 00:22:30 $ # # # "Jcross" <- function(X, i, j, eps=NULL, r=NULL, breaks=NULL, ..., correction=NULL) { # # multitype J function J_{ij}(r) # # X: point pattern (an object of class 'ppp') # i, j: types for which J_{i,j}(r) is calculated # eps: raster grid mesh size for distance transform # (unless specified by X$window) # r: (optional) values of argument r # breaks: (optional) breakpoints for argument r # X <- as.ppp(X) if(!is.marked(X)) stop(paste("point pattern has no", sQuote("marks"))) stopifnot(is.multitype(X)) # marx <- marks(X, dfok=FALSE) if(missing(i)) i <- levels(marx)[1] if(missing(j)) j <- levels(marx)[2] # I <- (marx == i) if(sum(I) == 0) stop(paste("No points have mark = ", i)) # if(i == j) result <- Jest(X[I], eps=eps, r=r, breaks=breaks, correction=correction) else { J <- (marx == j) result <- Jmulti(X, I, J, eps=eps, r=r, breaks=breaks, disjoint=TRUE, correction=correction) } iname <- make.parseable(paste(i)) jname <- make.parseable(paste(j)) result <- rebadge.fv(result, substitute(J[i,j](r), list(i=iname,j=jname)), c("J", paste0("list(", iname, ",", jname, ")")), new.yexp=substitute(J[list(i,j)](r), list(i=iname,j=jname))) return(result) } "Jdot" <- function(X, i, eps=NULL, r=NULL, breaks=NULL, ..., correction=NULL) { # # multitype J function J_{i\dot}(r) # # X: point pattern (an object of class 'ppp') # i: mark i for which we calculate J_{i\cdot}(r) # eps: raster grid mesh size for distance transform # (unless specified by X$window) # r: (optional) values of argument r # breaks: (optional) breakpoints for argument r # X <- as.ppp(X) if(!is.marked(X)) stop(paste("point pattern has no", sQuote("marks"))) stopifnot(is.multitype(X)) # marx <- marks(X, dfok=FALSE) if(missing(i)) i <- levels(marx)[1] # I <- (marx == i) if(sum(I) == 0) stop(paste("No points have mark = ", i)) J <- rep.int(TRUE, X$n) # result <- Jmulti(X, I, J, eps=eps, r=r, breaks=breaks, disjoint=FALSE, correction=correction) iname <- make.parseable(paste(i)) result <- rebadge.fv(result, substitute(J[i ~ dot](r), list(i=iname)), c("J", paste(iname, "~ symbol(\"\\267\")")), new.yexp=substitute(J[i ~ symbol("\267")](r), list(i=iname))) return(result) } "Jmulti" <- function(X, I, J, eps=NULL, r=NULL, breaks=NULL, ..., disjoint=NULL, correction=NULL) { # # multitype J function (generic engine) # # X marked point pattern (of class ppp) # # I,J logical vectors of length equal to the number of points # and identifying the two subsets of points to be # compared. # # eps: raster grid mesh size for distance transform # (unless specified by X$window) # # r: (optional) values of argument r # breaks: (optional) breakpoints for argument r # # X <- as.ppp(X) W<- X$window rmaxdefault <- rmax.rule("J", W) brks <- handle.r.b.args(r, breaks, W, rmaxdefault=rmaxdefault)$val I <- ppsubset(X, I) J <- ppsubset(X, J) if(is.null(I) || is.null(J)) stop("I and J must be valid subset indices") FJ <- Fest(X[J], eps, breaks=brks, correction=correction) GIJ <- Gmulti(X, I, J, breaks=brks, disjoint=disjoint, correction=correction) rvals <- FJ$r Fnames <- names(FJ) Gnames <- names(GIJ) bothnames <- Fnames[Fnames %in% Gnames] # initialise fv object alim <- attr(FJ, "alim") fname <- c("J", "list(I,J)") Z <- fv(data.frame(r=rvals, theo=1), "r", quote(J[I,J](r)), "theo", . ~ r, alim, c("r", makefvlabel(NULL, NULL, fname, "pois")), c("distance argument r", "theoretical Poisson %s"), fname=fname, yexp=quote(J[list(I,J)](r))) # add pieces manually ratio <- function(a, b) { result <- a/b result[ b == 0 ] <- NA result } if("raw" %in% bothnames) { Jun <- ratio(1-GIJ$raw, 1-FJ$raw) Z <- bind.fv(Z, data.frame(un=Jun), makefvlabel(NULL, "hat", fname, "un"), "uncorrected estimate of %s", "un") } if("rs" %in% bothnames) { Jrs <- ratio(1-GIJ$rs, 1-FJ$rs) Z <- bind.fv(Z, data.frame(rs=Jrs), makefvlabel(NULL, "hat", fname, "rs"), "border corrected estimate of %s", "rs") } if("han" %in% Gnames && "cs" %in% Fnames) { Jhan <- ratio(1-GIJ$han, 1-FJ$cs) Z <- bind.fv(Z, data.frame(han=Jhan), makefvlabel(NULL, "hat", fname, "han"), "Hanisch-style estimate of %s", "han") } if("km" %in% bothnames) { Jkm <- ratio(1-GIJ$km, 1-FJ$km) Z <- bind.fv(Z, data.frame(km=Jkm), makefvlabel(NULL, "hat", fname, "km"), "Kaplan-Meier estimate of %s", "km") if("hazard" %in% names(GIJ) && "hazard" %in% names(FJ)) { Jhaz <- GIJ$hazard - FJ$hazard Z <- bind.fv(Z, data.frame(hazard=Jhaz), "hazard(r)", "Kaplan-Meier estimate of derivative of log(%s)") } } # set default plotting values and order nama <- names(Z) fvnames(Z, ".") <- rev(nama[!(nama %in% c("r", "hazard"))]) # add other info attr(Z, "G") <- GIJ attr(Z, "F") <- FJ unitname(Z) <- unitname(X) return(Z) } spatstat/R/plot.plotppm.R0000755000176200001440000001050713115271120015110 0ustar liggesusers# # plot.plotppm.R # # engine of plot method for ppm # # $Revision: 1.20 $ $Date: 2016/12/30 01:44:07 $ # # plot.plotppm <- function(x,data=NULL,trend=TRUE,cif=TRUE,se=TRUE, pause=interactive(), how=c("persp","image","contour"), ..., pppargs=list()) { verifyclass(x,"plotppm") # determine main plotting actions superimposed <- !is.null(data) if(!missing(trend) && (trend & is.null(x[["trend"]]))) stop("No trend to plot.\n") trend <- trend & !is.null(x[["trend"]]) if(!missing(cif) && (cif & is.null(x[["cif"]]))) stop("No cif to plot.\n") cif <- cif & !is.null(x[["cif"]]) if(!missing(se) && (se & is.null(x[["se"]]))) stop("No SE to plot.\n") se <- se & !is.null(x[["se"]]) surftypes <- c("trend", "cif", "se")[c(trend, cif, se)] # marked point process? mrkvals <- attr(x,"mrkvals") marked <- (length(mrkvals) > 1) if(marked) data.marks <- marks(data) if(marked & superimposed) { data.types <- levels(data.marks) if(any(sort(data.types) != sort(mrkvals))) stop(paste("Data marks are different from mark", "values for argument x.\n")) } # plotting style howmat <- outer(how, c("persp", "image", "contour"), "==") howmatch <- matrowany(howmat) if (any(!howmatch)) stop(paste("unrecognised option", how[!howmatch])) # no pause required for single display if(missing(pause) || is.null(pause)) { nplots <- length(surftypes) * length(mrkvals) pause <- interactive() && (nplots != 1) } # start plotting if(pause) oldpar <- par(ask = TRUE) on.exit(if(pause) par(oldpar)) for(ttt in surftypes) { xs <- x[[ttt]] for (i in seq_along(mrkvals)) { level <- mrkvals[i] main <- paste(if(ttt == "se") "Estimated" else "Fitted", ttt, if(marked) paste("\n mark =", level) else NULL) for (style in how) { switch(style, persp = { do.call(persp, resolve.defaults(list(xs[[i]]), list(...), spatstat.options("par.persp"), list(xlab="x", zlab=ttt, main=main))) }, image = { do.call(image, resolve.defaults(list(xs[[i]]), list(...), list(main=main))) if(superimposed) { X <- if(marked) data[data.marks == level] else data do.call(plot.ppp, append(list(x=X, add=TRUE), pppargs)) } }, contour = { do.call(contour, resolve.defaults(list(xs[[i]]), list(...), list(main=main))) if(superimposed) { X <- if(marked) data[data.marks == level] else data do.call(plot.ppp, append(list(x=X, add=TRUE), pppargs)) } }, { stop(paste("Unrecognised plot style", style)) }) } } } return(invisible()) } print.plotppm <- function(x, ...) { verifyclass(x, "plotppm") trend <- x$trend cif <- x$cif mrkvals <- attr(x, "mrkvals") ntypes <- length(mrkvals) unmarked <- (ntypes == 1 ) cat(paste("Object of class", sQuote("plotppm"), "\n")) if(unmarked) cat("Computed for an unmarked point process\n") else { cat("Computed for a marked point process, with mark values:\n") print(mrkvals) } cat("Contains the following components:\n") if(!is.null(trend)) { cat("\n$trend:\tFitted trend.\n") if(unmarked) { cat("A list containing 1 image\n") print(trend[[1]], ...) } else { cat(paste("A list of", ntypes, "images\n")) cat("Typical details:\n") print(trend[[1]], ...) } } if(!is.null(cif)) { cat("\n$cif:\tFitted conditional intensity.\n") if(unmarked) { cat("A list containing 1 image\n") print(cif[[1]], ...) } else { cat(paste("A list of", ntypes, "images\n")) cat("Typical details:\n") print(cif[[1]], ...) } } invisible(NULL) } spatstat/R/edges2triangles.R0000644000176200001440000000720313115271075015527 0ustar liggesusers# # edges2triangles.R # # $Revision: 1.14 $ $Date: 2017/06/05 10:31:58 $ # edges2triangles <- function(iedge, jedge, nvert=max(iedge, jedge), ..., check=TRUE, friendly=rep(TRUE, nvert)) { usefriends <- !missing(friendly) if(check) { stopifnot(length(iedge) == length(jedge)) stopifnot(all(iedge > 0)) stopifnot(all(jedge > 0)) if(!missing(nvert)) { stopifnot(all(iedge <= nvert)) stopifnot(all(jedge <= nvert)) } if(usefriends) { stopifnot(is.logical(friendly)) stopifnot(length(friendly) == nvert) usefriends <- !all(friendly) } } # zero length data, or not enough to make triangles if(length(iedge) < 3) return(matrix(, nrow=0, ncol=3)) # sort in increasing order of 'iedge' oi <- fave.order(iedge) iedge <- iedge[oi] jedge <- jedge[oi] # call C storage.mode(nvert) <- storage.mode(iedge) <- storage.mode(jedge) <- "integer" if(!usefriends) { zz <- .Call("triograph", nv=nvert, iedge=iedge, jedge=jedge, PACKAGE="spatstat") } else { fr <- as.logical(friendly) storage.mode(fr) <- "integer" zz <- .Call("trioxgraph", nv=nvert, iedge=iedge, jedge=jedge, friendly=fr, PACKAGE="spatstat") } mat <- as.matrix(as.data.frame(zz)) return(mat) } # compute triangle diameters as well trianglediameters <- function(iedge, jedge, edgelength, ..., nvert=max(iedge, jedge), dmax=Inf, check=TRUE) { if(check) { stopifnot(length(iedge) == length(jedge)) stopifnot(length(iedge) == length(edgelength)) stopifnot(all(iedge > 0)) stopifnot(all(jedge > 0)) if(!missing(nvert)) { stopifnot(all(iedge <= nvert)) stopifnot(all(jedge <= nvert)) } if(is.finite(dmax)) check.1.real(dmax) } # zero length data if(length(iedge) == 0 || dmax < 0) return(data.frame(i=integer(0), j=integer(0), k=integer(0), diam=numeric(0))) # call C storage.mode(nvert) <- storage.mode(iedge) <- storage.mode(jedge) <- "integer" storage.mode(edgelength) <- "double" if(is.infinite(dmax)) { zz <- .Call("triDgraph", nv=nvert, iedge=iedge, jedge=jedge, edgelength=edgelength, PACKAGE = "spatstat") } else { storage.mode(dmax) <- "double" zz <- .Call("triDRgraph", nv=nvert, iedge=iedge, jedge=jedge, edgelength=edgelength, dmax=dmax, PACKAGE = "spatstat") } df <- as.data.frame(zz) colnames(df) <- c("i", "j", "k", "diam") return(df) } closetriples <- function(X, rmax) { a <- closepairs(X, rmax, what="ijd", twice=FALSE, neat=FALSE) tri <- trianglediameters(a$i, a$j, a$d, nvert=npoints(X), dmax=rmax) return(tri) } # extract 'vees', i.e. triples (i, j, k) where i ~ j and i ~ k edges2vees <- function(iedge, jedge, nvert=max(iedge, jedge), ..., check=TRUE) { if(check) { stopifnot(length(iedge) == length(jedge)) stopifnot(all(iedge > 0)) stopifnot(all(jedge > 0)) if(!missing(nvert)) { stopifnot(all(iedge <= nvert)) stopifnot(all(jedge <= nvert)) } } # zero length data, or not enough to make vees if(length(iedge) < 2) return(data.frame(i=numeric(0), j=numeric(0), k=numeric(0))) # call vees <- .Call("graphVees", nv = nvert, iedge = iedge, jedge = jedge, PACKAGE="spatstat") names(vees) <- c("i", "j", "k") vees <- as.data.frame(vees) return(vees) } spatstat/R/auc.R0000644000176200001440000001036613115225157013221 0ustar liggesusers## ## auc.R ## ## Calculate ROC curve or area under it ## ## $Revision: 1.6 $ $Date: 2016/11/10 01:08:04 $ roc <- function(X, ...) { UseMethod("roc") } roc.ppp <- function(X, covariate, ..., high=TRUE) { nullmodel <- ppm(X) result <- rocData(covariate, nullmodel, ..., high=high) return(result) } roc.lpp <- function(X, covariate, ..., high=TRUE) { nullmodel <- lppm(X) result <- rocData(covariate, nullmodel, ..., high=high) return(result) } rocData <- function(covariate, nullmodel, ..., high=TRUE) { d <- spatialCDFframe(nullmodel, covariate, ...) U <- d$values$U ec <- if(high) ecdf(1-U) else ecdf(U) p <- seq(0,1,length=1024) df <- data.frame(p=p, fobs=ec(p), fnull=p) result <- fv(df, argu="p", ylab=quote(roc(p)), valu="fobs", desc=c("fraction of area", "observed fraction of points", "expected fraction if no effect"), fname="roc") fvnames(result, ".") <- c("fobs", "fnull") return(result) } roc.ppm <- function(X, ...) { stopifnot(is.ppm(X)) model <- X lambda <- predict(model, ...) Y <- data.ppm(model) nullmodel <- ppm(Y) result <- rocModel(lambda, nullmodel, ...) return(result) } roc.kppm <- function(X, ...) { stopifnot(is.kppm(X)) model <- as.ppm(X) lambda <- predict(model, ...) Y <- data.ppm(model) nullmodel <- ppm(Y) result <- rocModel(lambda, nullmodel, ...) return(result) } roc.lppm <- function(X, ...) { stopifnot(is.lppm(X)) model <- X lambda <- predict(model, ...) Y <- X$X nullmodel <- lppm(Y) result <- rocModel(lambda, nullmodel, ...) return(result) } rocModel <- function(lambda, nullmodel, ..., high) { if(!missing(high)) warning("Argument 'high' is ignored when computing ROC for a fitted model") d<- spatialCDFframe(nullmodel, lambda, ...) U <- d$values$U ec <- ecdf(1-U) p <- seq(0,1,length=1024) fobs <- ec(p) FZ <- d$values$FZ lambdavalues <- if(is.im(lambda)) lambda[] else unlist(lapply(lambda, "[")) F1Z <- ewcdf(lambdavalues, lambdavalues/sum(lambdavalues)) pZ <- get("y", environment(FZ)) qZ <- get("x", environment(FZ)) FZinverse <- approxfun(pZ, qZ, rule=2) ftheo <- 1 - F1Z(FZinverse(1-p)) df <- data.frame(p=p, fobs=fobs, ftheo=ftheo, fnull=p) result <- fv(df, argu="p", ylab=quote(roc(p)), valu="fobs", fmla = . ~ p, desc=c("fraction of area", "observed fraction of points", "expected fraction of points", "expected fraction if no effect"), fname="roc") fvnames(result, ".") <- c("fobs", "ftheo", "fnull") return(result) } # ...................................................... auc <- function(X, ...) { UseMethod("auc") } auc.ppp <- function(X, covariate, ..., high=TRUE) { d <- spatialCDFframe(ppm(X), covariate, ...) U <- d$values$U EU <- mean(U) result <- if(high) EU else (1 - EU) return(result) } auc.lpp <- function(X, covariate, ..., high=TRUE) { d <- spatialCDFframe(lppm(X), covariate, ...) U <- d$values$U EU <- mean(U) result <- if(high) EU else (1 - EU) return(result) } auc.kppm <- function(X, ...) { auc(as.ppm(X), ...) } auc.ppm <- function(X, ...) { model <- X if(is.multitype(model)) { # cheat ro <- roc(model, ...) aobs <- with(ro, mean(fobs)) atheo <- with(ro, mean(ftheo)) } else if(is.stationary(model)) { aobs <- atheo <- 1/2 } else { lambda <- intensity(model) Fl <- ecdf(lambda[]) lambda <- as.im(lambda, Window(model)) X <- data.ppm(model) lamX <- lambda[X] aobs <- mean(Fl(lamX)) atheo <- mean(lambda[] * Fl(lambda[]))/mean(lambda) } result <- c(aobs, atheo) names(result) <- c("obs", "theo") return(result) } auc.lppm <- function(X, ...) { stopifnot(inherits(X, "lppm")) model <- X if(is.multitype(model)) { # cheat ro <- roc(model, ...) aobs <- with(ro, mean(fobs)) atheo <- with(ro, mean(ftheo)) } else { lambda <- predict(model, ...) Fl <- ecdf(lambda[]) lamX <- lambda[model$X] aobs <- mean(Fl(lamX)) atheo <- mean(lambda[] * Fl(lambda[]))/mean(lambda) } result <- c(aobs, atheo) names(result) <- c("obs", "theo") return(result) } spatstat/R/concom.R0000644000176200001440000000737213115225157013732 0ustar liggesusers# # # concom.R # # $Revision: 1.4 $ $Date: 2016/04/25 02:34:40 $ # # The connected component interaction # # Concom() create an instance of the connected component interaction # [an object of class 'interact'] # # ------------------------------------------------------------------- # Concom <- local({ connectedlabels <- function(X, R) { connected(X, R, internal=TRUE) } countcompo <- function(X, R) { length(unique(connectedlabels(X, R))) } # change in number of components when point i is deleted cocoDel <- function(X, R, subset=seq_len(npoints(X))) { n <- length(subset) ans <- integer(n) if(n > 0) { cX <- countcompo(X, R) for(i in 1:n) ans[i] = countcompo(X[-subset[i]], R) - cX } return(ans) } # change in number of components when new point is added cocoAdd <- function(U, X, R) { U <- as.ppp(U, W=as.owin(X)) nU <- npoints(U) cr <- crosspairs(U, X, R, what="indices") lab <- connectedlabels(X, R) hitcomp <- tapply(X=lab[cr$j], INDEX=factor(cr$i, levels=1:nU), FUN=unique, simplify=FALSE) nhit <- unname(lengths(hitcomp)) change <- 1L - nhit return(change) } # connected component potential cocopot <- function(X,U,EqualPairs,pars,correction, ...) { bad <- !(correction %in% c("border", "none")) if((nbad <- sum(bad)) > 0) warning(paste("The", ngettext(nbad, "correction", "corrections"), commasep(sQuote(correction[!ok])), ngettext(nbad, "is", "are"), "not implemented")) n <- U$n answer <- numeric(n) r <- pars$r if(is.null(r)) stop("internal error: r parameter not found") dummies <- !(seq_len(n) %in% EqualPairs[,2L]) if(sum(dummies) > 0) answer[dummies] <- -cocoAdd(U[dummies], X, r) ii <- EqualPairs[,1L] jj <- EqualPairs[,2L] answer[jj] <- cocoDel(X, r, subset=ii) return(answer + 1L) } # template object without family, par, version BlankCoco <- list( name = "Connected component process", creator = "Concom", family = "inforder.family", # evaluated later pot = cocopot, par = list(r = NULL), # to be filled in parnames = "distance threshold", init = function(self) { r <- self$par$r if(!is.numeric(r) || length(r) != 1L || r <= 0) stop("distance threshold r must be a positive number") }, update = NULL, # default OK print = NULL, # default OK interpret = function(coeffs, self) { logeta <- as.numeric(coeffs[1L]) eta <- exp(logeta) return(list(param=list(eta=eta), inames="interaction parameter eta", printable=signif(eta))) }, valid = function(coeffs, self) { eta <- ((self$interpret)(coeffs, self))$param$eta return(is.finite(eta)) }, project = function(coeffs, self) { if((self$valid)(coeffs, self)) return(NULL) return(Poisson()) }, irange = function(self, coeffs=NA, epsilon=0, ...) { if(anyNA(coeffs)) return(Inf) logeta <- coeffs[1L] if(abs(logeta) <= epsilon) return(0) else return(Inf) }, version=NULL # to be added ) class(BlankCoco) <- "interact" Concom <- function(r) { instantiate.interact(BlankCoco, list(r=r)) } Concom <- intermaker(Concom, BlankCoco) Concom }) spatstat/R/clusterfunctions.R0000644000176200001440000000574513115225157016070 0ustar liggesusers## clusterfunctions.R ## ## Contains the generic functions: ## - clusterkernel ## - clusterfield ## - clusterradius. ## ## $Revision: 1.3 $ $Date: 2015/02/23 00:21:39 $ ## clusterkernel <- function(model, ...) { UseMethod("clusterkernel") } clusterkernel.kppm <- function(model, ...) { kernelR <- Kpcf.kppm(model, what = "kernel") f <- function(x, y = 0, ...){ kernelR(sqrt(x^2+y^2)) } return(f) } clusterkernel.character <- function(model, ...){ info <- spatstatClusterModelInfo(model, onlyPCP = TRUE) internalkernel <- info$kernel dots <- list(...) par <- c(kappa = 1, scale = dots$scale) par <- info$checkpar(par, old = TRUE) nam <- info$clustargsnames margs <- NULL if(!is.null(nam)) margs <- dots[nam] f <- function(x, y = 0, ...){ internalkernel(par = par, rvals = sqrt(x^2+y^2), margs = margs) } return(f) } clusterfield <- function(model, locations = NULL, ...) { UseMethod("clusterfield") } clusterfield.kppm <- function(model, locations = NULL, ...) { f <- clusterkernel(model) if(is.null(locations)){ if(!is.stationary(model)) stop("The model is non-stationary. The argument ", sQuote("locations"), " must be given.") locations <- centroid.owin(Window(model), as.ppp = TRUE) } clusterfield.function(f, locations, ..., mu = model$mu) } clusterfield.character <- function(model, locations = NULL, ...){ f <- clusterkernel(model, ...) clusterfield.function(f, locations, ...) } clusterfield.function <- function(model, locations = NULL, ..., mu = NULL) { if(is.null(locations)){ locations <- ppp(.5, .5, window=square(1)) } if(!inherits(locations, "ppp")) stop("Argument ", sQuote("locations"), " must be a point pattern (ppp).") if("sigma" %in% names(list(...)) && "sigma" %in% names(formals(model))) warning("Currently ", sQuote("sigma"), "cannot be passed as an extra argument to the kernel function. ", "Please redefine the kernel function to use another argument name.") rslt <- density(locations, kernel=model, ...) if(is.null(mu)) return(rslt) mu <- as.im(mu, W=rslt) if(min(mu)<0) stop("Cluster reference intensity ", sQuote("mu"), " is negative.") return(rslt*mu) } clusterradius <- function(model, ...){ UseMethod("clusterradius") } clusterradius.character <- function(model, ..., thresh = NULL, precision = FALSE){ info <- spatstatClusterModelInfo(model, onlyPCP = TRUE) rmax <- info$range(..., thresh = thresh) if(precision){ ddist <- function(r) info$ddist(r, ...) prec <- integrate(ddist, 0, rmax) attr(rmax, "prec") <- prec } return(rmax) } clusterradius.kppm <- function(model, ..., thresh = NULL, precision = FALSE){ a <- list(model = model$clusters, thresh = thresh, precision = precision) a <- append(a, as.list(c(model$clustpar, model$clustargs))) do.call(clusterradius.character, a) } spatstat/R/bw.pcf.R0000644000176200001440000001360113115271075013623 0ustar liggesusers#' #' bw.pcf.R #' #' $Revision: 1.4 $ $Date: 2017/06/05 10:31:58 $ #' #' bandwidth selection for pcf #' with least-squares cross-validation method #' #' Original code by: Rasmus Waagepetersen and Abdollah Jalilian #' #' References: #' Guan, Y. (2007). A composite likelihood cross-validation approach in #' selecting bandwidth for the estimation of the pair correlation function. #' Scandinavian Journal of Statistics, 34(2), 336--346. #' DOI: http://doi.org/10.1111/j.1467-9469.2006.00533.x #' Guan, Y. (2007). A least-squares cross-validation bandwidth #' selection approach in pair correlation function estimations. #' Statistics & Probability Letters, 77(18), 1722--1729. #' DOI: http://doi.org/10.1016/j.spl.2007.04.016 bw.pcf <- function(X, rmax=NULL, lambda=NULL, divisor="r", kernel="epanechnikov", nr=10000, bias.correct=TRUE, cv.method=c("compLik", "leastSQ"), simple=TRUE, srange=NULL, ..., verbose=FALSE) { stopifnot(is.ppp(X)) X <- unmark(X) win <- Window(X) areaW <- area(win) nX <- npoints(X) cv.method <- match.arg(cv.method) kernel <- match.kernel(kernel) #' maximum distance lag: rmax if (is.null(rmax)) rmax <- rmax.rule("K", win, nX/areaW) if(is.null(srange)) srange <- c(0, rmax/4) #' number of subintervals for discretization of [0, rmax]: nr #' length of subintervals discr <- rmax / nr #' breaks of subintervals rs <- seq(0, rmax, length.out= nr + 1) #' closepairs distances: \\ u - v \\ #' Pre-compute close pair distances for use in 'pcf' #' we need close pairs up to a distance rmax + smax #' where 'smax' is the maximum halfwidth of the support of the kernel smax <- srange[2] * (if(kernel == "gaussian") 2 else kernel.factor(kernel)) cpfull <- closepairs(X, rmax + smax, what="all", twice=TRUE) #' For cross-validation, restrict close pairs to distance rmax ok <- (cpfull$d <= rmax) cp <- lapply(cpfull, "[", i=ok) ds <- cp$d #' determining closepairs distances are in which subinterval idx <- round(ds / discr) + 1L idx <- pmin.int(idx, nr+1L) #' translation edge correction factor: /W|/|W \cap W_{u-v}| edgewt <- edge.Trans(dx=cp$dx, dy=cp$dy, W=win, paired=TRUE) if(homogeneous <- is.null(lambda)) { #' homogeneous case lambda <- nX/areaW lambda2area <- lambda^2 * areaW pcfargs <- list(X=X, r=rs, divisor=divisor, kernel=kernel, correction="translate", close=cpfull) renorm.factor <- 1 } else { # inhomogeneous case: lambda is assumed to be a numeric vector giving # the intensity at the points of the point pattern X check.nvector(lambda, nX) lambda2area <- lambda[cp$i] * lambda[cp$j] * areaW pcfargs <- list(X=X, lambda=lambda, r=rs, divisor=divisor, kernel=kernel, correction="translate", close=cpfull) renorm.factor <- (areaW/sum(1/lambda)) } stuff <- list(cv.method=cv.method, kernel=kernel, homogeneous=homogeneous, bias.correct=bias.correct, simple = simple, discr=discr, rs=rs, cp=cp, ds=ds, idx=idx, edgewt=edgewt, pcfargs=pcfargs, lambda=lambda, lambda2area=lambda2area, renorm.factor=renorm.factor, show=verbose) stuff <- list2env(stuff) #' find optimum bandwidth z <- optimizeWithTrace(CVforPCF, srange, maximum=TRUE, stuff=stuff) #' pack up ox <- order(z$x) sigma <- z$x[ox] cv <- z$y[ox] criterion <- switch(cv.method, compLik = "composite likelihood cross-validation", leastSQ = "least squares cross-validation") result <- bw.optim(cv, sigma, which.max(cv), criterion = criterion, unitname=unitname(X)) return(result) } CVforPCF <- function(bw, stuff) { stuff$bw <- bw with(stuff, { if(show) splat("bw=", bw) #' values of pair correlation at breaks of subintervals a <- append(pcfargs, list(bw=bw)) grs <- if(homogeneous) do.call(pcf.ppp, a) else do.call(pcfinhom, a) grs <- grs$trans #' bias correction if (bias.correct) { grs <- grs / pkernel(rs, kernel, 0, bw) dcorrec <- pkernel(ds, kernel, 0, bw) } else { dcorrec <- 1 } #' make sure that the estimated pair correlation at origin is finite if (!is.finite(grs[1])) grs[1] <- grs[2] #' approximate the pair correlation values at closepairs distances gds <- grs[idx] wt <- edgewt / (2 * pi * ds * lambda2area * dcorrec) * renorm.factor #' remove pairs to approximate the cross-validation term: g^{-(u, v)} if (simple) { gds <- gds - 2 * wt * dkernel(0, kernel, 0, bw) } else { cpi <- cp$i cpj <- cp$j for (k in 1:length(ds)) { exclude <- (cpi == cpi[k]) | (cpj == cpj[k]) gds[k] <- gds[k] - 2 * sum(wt[exclude] * dkernel(ds[k] - ds[exclude], kernel, 0, bw)) } } #' remove negative and zero values gds <- pmax.int(.Machine$double.eps, gds) switch(cv.method, compLik={ #' composite likelihood cross-validation #' the integral term: 2 \pi \int_{0}^{rmax} \hat g(r) r dr normconst <- 2 * pi * sum(grs * rs) * discr value <- mean(log(gds)) - log(normconst) }, leastSQ={ #' least squares cross-validation #' the integral term: 2 \pi \int_{0}^{rmax} \hat g^2(r) r dr normconst <- 2 * pi * sum(grs^2 * rs) * discr value <- 2 * sum(gds * edgewt / (lambda2area)) - normconst }, stop("Unrecognised cross-validation method")) if(show) splat("value=", value) return(value) }) } spatstat/R/fasp.R0000755000176200001440000001367213115271075013410 0ustar liggesusers# # fasp.R # # $Revision: 1.35 $ $Date: 2017/02/07 07:22:47 $ # # #----------------------------------------------------------------------------- # # creator fasp <- function(fns, which, formulae=NULL, dataname=NULL, title=NULL, rowNames=NULL, colNames=NULL, checkfv=TRUE) { stopifnot(is.list(fns)) stopifnot(is.matrix(which)) stopifnot(length(fns) == length(which)) n <- length(which) if(checkfv) for(i in seq_len(n)) if(!is.fv(fns[[i]])) stop(paste("fns[[", i, "]] is not an fv object", sep="")) # set row and column labels if(!is.null(rowNames)) rownames(which) <- rowNames if(!is.null(colNames)) colnames(which) <- colNames if(!is.null(formulae)) { # verify format and convert to character vector formulae <- FormatFaspFormulae(formulae, "formulae") # ensure length matches length of "fns" if(length(formulae) == 1L && n > 1L) # single formula - replicate it formulae <- rep.int(formulae, n) else stopifnot(length(formulae) == length(which)) } rslt <- list(fns=fns, which=which, default.formula=formulae, dataname=dataname, title=title) class(rslt) <- "fasp" return(rslt) } # subset extraction operator "[.fasp" <- function(x, I, J, drop=TRUE, ...) { verifyclass(x, "fasp") m <- nrow(x$which) n <- ncol(x$which) if(missing(I)) I <- 1:m if(missing(J)) J <- 1:n if(!is.vector(I) || !is.vector(J)) stop("Subset operator is only implemented for vector indices") # determine index subset for lists 'fns', 'titles' etc included <- rep.int(FALSE, length(x$fns)) w <- as.vector(x$which[I,J]) if(length(w) == 0) stop("result is empty") included[w] <- TRUE # if only one cell selected, and drop=TRUE: if((sum(included) == 1L) && drop) return(x$fns[included][[1L]]) # determine positions in shortened lists whichIJ <- x$which[I,J,drop=FALSE] newk <- cumsum(included) newwhich <- matrix(newk[whichIJ], ncol=ncol(whichIJ), nrow=nrow(whichIJ)) rownames(newwhich) <- rownames(x$which)[I] colnames(newwhich) <- colnames(x$which)[J] # default plotting formulae - could be NULL deform <- x$default.formula # create new fasp object Y <- fasp(fns = x$fns[included], formulae = if(!is.null(deform)) deform[included] else NULL, which = newwhich, dataname = x$dataname, title = x$title) return(Y) } dim.fasp <- function(x) { dim(x$which) } # print method print.fasp <- function(x, ...) { verifyclass(x, "fasp") cat(paste("Function array (class", sQuote("fasp"), ")\n")) dim <- dim(x$which) cat(paste("Dimensions: ", dim[1L], "x", dim[2L], "\n")) cat(paste("Title:", if(is.null(x$title)) "(None)" else x$title, "\n")) invisible(NULL) } # other methods as.fv.fasp <- function(x) do.call(cbind.fv, x$fns) dimnames.fasp <- function(x) { return(dimnames(x$which)) } "dimnames<-.fasp" <- function(x, value) { w <- x$which dimnames(w) <- value x$which <- w return(x) } pool.fasp <- local({ pool.fasp <- function(...) { Alist <- list(...) Yname <- short.deparse(sys.call()) if(nchar(Yname) > 60) Yname <- paste(substr(Yname, 1L, 40L), "[..]") nA <- length(Alist) if(nA == 0) return(NULL) ## validate.... ## All arguments must be fasp objects notfasp <- !unlist(lapply(Alist, inherits, what="fasp")) if(any(notfasp)) { n <- sum(notfasp) why <- paste(ngettext(n, "Argument", "Arguments"), commasep(which(notfasp)), ngettext(n, "does not", "do not"), "belong to the class", dQuote("fasp")) stop(why) } ## All arguments must have envelopes notenv <- !unlist(lapply(Alist, has.env)) if(any(notenv)) { n <- sum(notenv) why <- paste(ngettext(n, "Argument", "Arguments"), commasep(which(notenv)), ngettext(n, "does not", "do not"), "contain envelope data") stop(why) } if(nA == 1L) return(Alist[[1L]]) ## All arguments must have the same dimensions witches <- lapply(Alist, getElement, name="which") witch1 <- witches[[1L]] same <- unlist(lapply(witches, identical, y=witch1)) if(!all(same)) stop("Function arrays do not have the same array dimensions") ## OK. ## Pool envelopes at each position result <- Alist[[1L]] fns <- result$fns for(k in seq_along(fns)) { funks <- lapply(Alist, extractfun, k=k) fnk <- do.call(pool.envelope, funks) attr(fnk, "einfo")$Yname <- Yname fns[[k]] <- fnk } result$fns <- fns return(result) } has.env <- function(z) { all(unlist(lapply(z$fns, inherits, what="envelope"))) } extractfun <- function(z, k) { z$fns[[k]] } pool.fasp }) ## other functions FormatFaspFormulae <- local({ zapit <- function(x, argname) { if(inherits(x, "formula")) deparse(x) else if(is.character(x)) x else stop(paste("The entries of", sQuote(argname), "must be formula objects or strings")) } FormatFaspFormulae <- function(f, argname) { ## f should be a single formula object, a list of formula objects, ## a character vector, or a list containing formulae and strings. ## It will be converted to a character vector. result <- if(is.character(f)) f else if(inherits(f, "formula")) deparse(f) else if(is.list(f)) unlist(lapply(f, zapit, argname=argname)) else stop(paste(sQuote(argname), "should be a formula, a list of formulae,", "or a character vector")) return(result) } FormatFaspFormulae }) spatstat/R/Kmeasure.R0000755000176200001440000004136113134610642014225 0ustar liggesusers# # Kmeasure.R # # $Revision: 1.63 $ $Date: 2017/07/22 08:33:10 $ # # Kmeasure() compute an estimate of the second order moment measure # # Kest.fft() use Kmeasure() to form an estimate of the K-function # # second.moment.calc() underlying algorithm # Kmeasure <- function(X, sigma, edge=TRUE, ..., varcov=NULL) { stopifnot(is.ppp(X)) sigma.given <- !missing(sigma) && !is.null(sigma) varcov.given <- !is.null(varcov) ngiven <- sigma.given + varcov.given if(ngiven == 2) stop(paste("Give only one of the arguments", sQuote("sigma"), "and", sQuote("varcov"))) if(ngiven == 0) stop(paste("Please specify smoothing bandwidth", sQuote("sigma"), "or", sQuote("varcov"))) if(varcov.given) { stopifnot(is.matrix(varcov) && nrow(varcov) == 2 && ncol(varcov)==2 ) sigma <- NULL } else { stopifnot(is.numeric(sigma)) stopifnot(length(sigma) %in% c(1,2)) stopifnot(all(sigma > 0)) if(length(sigma) == 2) { varcov <- diag(sigma^2) sigma <- NULL } } second.moment.calc(x=X, sigma=sigma, edge=edge, what="Kmeasure", varcov=varcov, ...) } second.moment.calc <- function(x, sigma=NULL, edge=TRUE, what=c("Kmeasure", "kernel", "smooth", "Bartlett", "edge", "smoothedge", "all"), ..., varcov=NULL, expand=FALSE, debug=FALSE) { if(is.null(sigma) && is.null(varcov)) stop("must specify sigma or varcov") what <- match.arg(what) sig <- if(!is.null(sigma)) sigma else max(c(diag(varcov), sqrt(det(varcov)))) xtype <- if(is.ppp(x)) "ppp" else if(is.im(x)) "im" else if(inherits(x, "imlist")) "imlist" else if(all(sapply(x, is.im))) "imlist" else stop("x should be a point pattern or a pixel image") nimages <- switch(xtype, ppp = 1, im = 1, imlist = length(x)) win <- if(nimages == 1) as.owin(x) else as.owin(x[[1]]) win <- rescue.rectangle(win) rec <- as.rectangle(win) across <- min(diff(rec$xrange), diff(rec$yrange)) # determine whether to expand window if(!expand || (6 * sig < across)) { result <- second.moment.engine(x, sigma=sigma, edge=edge, what=what, debug=debug, ..., varcov=varcov) return(result) } # need to expand window bigger <- grow.rectangle(rec, (7 * sig - across)/2) switch(xtype, ppp = { # pixellate first (to preserve pixel resolution) X <- pixellate(x, ..., padzero=TRUE) np <- npoints(x) }, im = { X <- x np <- NULL }, imlist = { X <- x np <- NULL }) # Now expand if(nimages == 1) { X <- rebound.im(X, bigger) X <- na.handle.im(X, 0) } else { X <- lapply(X, rebound.im, rect=bigger) X <- lapply(X, na.handle.im, na.replace=0) } # Compute! out <- second.moment.engine(X, sigma=sigma, edge=edge, what=what, debug=debug, ..., obswin=win, varcov=varcov, npts=np) # Now clip it fbox <- shift(rec, origin="midpoint") if(nimages == 1) { result <- switch(what, kernel = out[fbox], smooth = out[win], Kmeasure = out[fbox], Bartlett = out[fbox], edge = out[win], smoothedge = list(smooth=out$smooth[win], edge =out$edge[win]), all = list(kernel=out$kernel[fbox], smooth=out$smooth[win], Kmeasure=out$Kmeasure[fbox], Bartlett=out$Bartlett[fbox], edge=out$edge[win])) } else { result <- switch(what, kernel = out[fbox], smooth = lapply(out, "[", i=win), Kmeasure = lapply(out, "[", i=fbox), Bartlett = lapply(out, "[", i=fbox), edge = out[win], smoothedge = list( smooth = lapply(out$smooth, "[", i=win), edge = out$edge[win]), all = list( kernel=out$kernel[fbox], smooth=lapply(out$smooth, "[", i=win), Kmeasure=lapply(out$Kmeasure, "[", i=fbox), Bartlett=lapply(out$Bartlett, "[", i=fbox), edge=out$edge[win])) } return(result) } second.moment.engine <- function(x, sigma=NULL, edge=TRUE, what=c("Kmeasure", "kernel", "smooth", "Bartlett", "edge", "smoothedge", "all"), ..., kernel="gaussian", obswin = as.owin(x), varcov=NULL, npts=NULL, debug=FALSE) { what <- match.arg(what) validate2Dkernel(kernel) is.second.order <- what %in% c("Kmeasure", "Bartlett", "all") needs.kernel <- what %in% c("kernel", "all", "Kmeasure") returns.several <- what %in% c("all", "smoothedge") # check whether Fastest Fourier Transform in the West is available west <- fftwAvailable() if(returns.several) result <- list() # several results will be returned in a list if(is.ppp(x)) { # convert list of points to mass distribution X <- pixellate(x, ..., padzero=TRUE) if(is.null(npts)) npts <- npoints(x) } else X <- x if(is.im(X)) { Xlist <- list(X) nimages <- 1 } else if(all(unlist(lapply(X, is.im)))) { Xlist <- X X <- Xlist[[1]] nimages <- length(Xlist) blanklist <- vector(mode="list", length=nimages) names(blanklist) <- names(Xlist) } else stop("internal error: unrecognised format for x") unitsX <- unitname(X) xstep <- X$xstep ystep <- X$ystep # ensure obswin has same bounding frame as X if(!missing(obswin)) obswin <- rebound.owin(obswin, as.rectangle(X)) # go to work Y <- X$v Ylist <- lapply(Xlist, getElement, name="v") # pad with zeroes nr <- nrow(Y) nc <- ncol(Y) Ypad <- matrix(0, ncol=2*nc, nrow=2*nr) Ypadlist <- rep(list(Ypad), nimages) for(i in 1:nimages) Ypadlist[[i]][1:nr, 1:nc] <- Ylist[[i]] Ypad <- Ypadlist[[1]] lengthYpad <- 4 * nc * nr # corresponding coordinates xcol.pad <- X$xcol[1] + xstep * (0:(2*nc-1)) yrow.pad <- X$yrow[1] + ystep * (0:(2*nr-1)) # compute kernel and its Fourier transform if(!needs.kernel && identical(kernel, "gaussian") && is.numeric(sigma) && (length(sigma) == 1) && spatstat.options('developer')) { # compute Fourier transform of kernel directly (*experimental*) ii <- c(0:(nr-1), nr:1) jj <- c(0:(nc-1), nc:1) zz <- -sigma^2 * pi^2/2 uu <- exp(zz * ii^2) vv <- exp(zz * jj^2) fK <- outer(uu, vv, "*") } else { # set up kernel xcol.ker <- xstep * c(0:(nc-1),-(nc:1)) yrow.ker <- ystep * c(0:(nr-1),-(nr:1)) kerpixarea <- xstep * ystep if(identical(kernel, "gaussian")) { if(!is.null(sigma)) { densX.ker <- dnorm(xcol.ker, sd=sigma) densY.ker <- dnorm(yrow.ker, sd=sigma) #' WAS: Kern <- outer(densY.ker, densX.ker, "*") * kerpixarea Kern <- outer(densY.ker, densX.ker, "*") Kern <- Kern/sum(Kern) } else if(!is.null(varcov)) { ## anisotropic kernel detSigma <- det(varcov) Sinv <- solve(varcov) halfSinv <- Sinv/2 constker <- kerpixarea/(2 * pi * sqrt(detSigma)) xsq <- matrix((xcol.ker^2)[col(Ypad)], ncol=2*nc, nrow=2*nr) ysq <- matrix((yrow.ker^2)[row(Ypad)], ncol=2*nc, nrow=2*nr) xy <- outer(yrow.ker, xcol.ker, "*") Kern <- constker * exp(-(xsq * halfSinv[1,1] + xy * (halfSinv[1,2]+halfSinv[2,1]) + ysq * halfSinv[2,2])) Kern <- Kern/sum(Kern) } else stop("Must specify either sigma or varcov") } else { ## non-Gaussian kernel ## evaluate kernel at array of points xker <- as.vector(xcol.ker[col(Ypad)]) yker <- as.vector(yrow.ker[row(Ypad)]) Kern <- evaluate2Dkernel(kernel, xker, yker, sigma=sigma, varcov=varcov, ...) * kerpixarea Kern <- matrix(Kern, ncol=2*nc, nrow=2*nr) Kern <- Kern/sum(Kern) } if(what %in% c("kernel", "all")) { ## kernel will be returned ## first rearrange it into spatially sensible order (monotone x and y) rtwist <- ((-nr):(nr-1)) %% (2 * nr) + 1 ctwist <- (-nc):(nc-1) %% (2*nc) + 1 if(debug) { if(any(fave.order(xcol.ker) != rtwist)) cat("something round the twist\n") } Kermit <- Kern[ rtwist, ctwist] ker <- im(Kermit, xcol.ker[ctwist], yrow.ker[ rtwist], unitname=unitsX) if(what == "kernel") return(ker) else result$kernel <- ker } ## convolve using fft fK <- fft2D(Kern, west=west) } if(what != "edge") { if(nimages == 1) { fY <- fft2D(Ypad, west=west) sm <- fft2D(fY * fK, inverse=TRUE, west=west)/lengthYpad if(debug) { cat(paste("smooth: maximum imaginary part=", signif(max(Im(sm)),3), "\n")) if(!is.null(npts)) cat(paste("smooth: mass error=", signif(sum(Mod(sm))-npts,3), "\n")) } } else { fYlist <- smlist <- blanklist for(i in 1:nimages) { fYlist[[i]] <- fY.i <- fft2D(Ypadlist[[i]], west=west) smlist[[i]] <- sm.i <- fft2D(fY.i * fK, inverse=TRUE, west=west)/lengthYpad if(debug) { cat(paste("smooth component", i, ": maximum imaginary part=", signif(max(Im(sm.i)),3), "\n")) if(!is.null(npts)) cat(paste("smooth component", i, ": mass error=", signif(sum(Mod(sm.i))-npts,3), "\n")) } } } } if(what %in% c("smooth", "all", "smoothedge")) { # compute smoothed point pattern without edge correction if(nimages == 1) { smo <- im(Re(sm)[1:nr, 1:nc], xcol.pad[1:nc], yrow.pad[1:nr], unitname=unitsX) if(what == "smooth") { return(smo) } else { result$smooth <- smo } } else { smolist <- blanklist for(i in 1:nimages) smolist[[i]] <- im(Re(smlist[[i]])[1:nr, 1:nc], xcol.pad[1:nc], yrow.pad[1:nr], unitname=unitsX) smolist <- as.solist(smolist) if(what == "smooth") { return(smolist) } else { result$smooth <- smolist } } } if(is.second.order) { # compute Bartlett spectrum if(nimages == 1) { bart <- BartCalc(fY, fK) ## bart <- Mod(fY)^2 * fK } else { bartlist <- lapply(fYlist, BartCalc, fK=fK) } } if(what %in% c("Bartlett", "all")) { # Bartlett spectrum will be returned # rearrange into spatially sensible order (monotone x and y) rtwist <- ((-nr):(nr-1)) %% (2 * nr) + 1 ctwist <- (-nc):(nc-1) %% (2*nc) + 1 if(nimages == 1) { Bart <- bart[ rtwist, ctwist] Bartlett <- im(Mod(Bart),(-nc):(nc-1), (-nr):(nr-1)) if(what == "Bartlett") return(Bartlett) else result$Bartlett <- Bartlett } else { Bartlist <- blanklist for(i in 1:nimages) { Bart <- (bartlist[[i]])[ rtwist, ctwist] Bartlist[[i]] <- im(Mod(Bart),(-nc):(nc-1), (-nr):(nr-1)) } Bartlist <- as.solist(Bartlist) if(what == "Bartlett") return(Bartlist) else result$Bartlett <- Bartlist } } #### ------- Second moment measure -------------- # if(is.second.order) { if(nimages == 1) { mom <- fft2D(bart, inverse=TRUE, west=west)/lengthYpad if(debug) { cat(paste("2nd moment measure: maximum imaginary part=", signif(max(Im(mom)),3), "\n")) if(!is.null(npts)) cat(paste("2nd moment measure: mass error=", signif(sum(Mod(mom))-npts^2, 3), "\n")) } mom <- Mod(mom) # subtract (delta_0 * kernel) * npts if(is.null(npts)) stop("Internal error: second moment measure requires npts") mom <- mom - npts* Kern } else { momlist <- blanklist for(i in 1:nimages) { mom.i <- fft2D(bartlist[[i]], inverse=TRUE, west=west)/lengthYpad if(debug) { cat(paste("2nd moment measure: maximum imaginary part=", signif(max(Im(mom.i)),3), "\n")) if(!is.null(npts)) cat(paste("2nd moment measure: mass error=", signif(sum(Mod(mom.i))-npts^2, 3), "\n")) } mom.i <- Mod(mom.i) # subtract (delta_0 * kernel) * npts if(is.null(npts)) stop("Internal error: second moment measure requires npts") mom.i <- mom.i - npts* Kern momlist[[i]] <- mom.i } } } # edge correction if(edge || what %in% c("edge", "all", "smoothedge")) { M <- as.mask(obswin, xy=list(x=X$xcol, y=X$yrow))$m # previous line ensures M has same dimensions and scale as Y Mpad <- matrix(0, ncol=2*nc, nrow=2*nr) Mpad[1:nr, 1:nc] <- M lengthMpad <- 4 * nc * nr fM <- fft2D(Mpad, west=west) if(edge && is.second.order) { # compute kernel-smoothed set covariance # apply edge correction co <- fft2D(Mod(fM)^2 * fK, inverse=TRUE, west=west)/lengthMpad co <- Mod(co) a <- sum(M) wt <- a/co me <- spatstat.options("maxedgewt") weight <- matrix(pmin.int(me, wt), ncol=2*nc, nrow=2*nr) # apply edge correction to second moment measure if(nimages == 1) { mom <- mom * weight # set to NA outside 'reasonable' region mom[wt > 10] <- NA } else { wgt10 <- (wt > 10) for(i in 1:nimages) { mom.i <- momlist[[i]] mom.i <- mom.i * weight # set to NA outside 'reasonable' region mom.i[wgt10] <- NA momlist[[i]] <- mom.i } } } } if(is.second.order) { # rearrange second moment measure # into spatially sensible order (monotone x and y) rtwist <- ((-nr):(nr-1)) %% (2 * nr) + 1 ctwist <- (-nc):(nc-1) %% (2*nc) + 1 if(nimages == 1) { mom <- mom[ rtwist, ctwist] } else { momlist <- lapply(momlist, "[", i=rtwist, j=ctwist) } if(debug) { if(any(fave.order(xcol.ker) != rtwist)) cat("internal error: something round the twist\n") } } if(what %in% c("edge", "all", "smoothedge")) { # return convolution of window with kernel # (evaluated inside window only) con <- fft2D(fM * fK, inverse=TRUE, west=west)/lengthMpad edg <- Mod(con[1:nr, 1:nc]) edg <- im(edg, xcol.pad[1:nc], yrow.pad[1:nr], unitname=unitsX) if(what == "edge") return(edg) else result$edge <- edg } if(what == "smoothedge") return(result) # Second moment measure, density estimate # Divide by number of points * lambda and convert mass to density pixarea <- xstep * ystep if(nimages == 1) { mom <- mom * area(obswin) / (pixarea * npts^2) # this is the second moment measure mm <- im(mom, xcol.ker[ctwist], yrow.ker[rtwist], unitname=unitsX) if(what == "Kmeasure") return(mm) else result$Kmeasure <- mm } else { ccc <- area(obswin) / (pixarea * npts^2) mmlist <- blanklist for(i in 1:nimages) { mom.i <- momlist[[i]] mom.i <- mom.i * ccc # this is the second moment measure mmlist[[i]] <- im(mom.i, xcol.ker[ctwist], yrow.ker[rtwist], unitname=unitsX) } mmlist <- as.solist(mmlist) if(what == "Kmeasure") return(mmlist) else result$Kmeasure <- mmlist } # what = "all", so return all computed objects return(result) } BartCalc <- function(fY, fK) { Mod(fY)^2 * fK } Kest.fft <- function(X, sigma, r=NULL, ..., breaks=NULL) { verifyclass(X, "ppp") W <- Window(X) lambda <- npoints(X)/area(W) rmaxdefault <- rmax.rule("K", W, lambda) bk <- handle.r.b.args(r, breaks, W, rmaxdefault=rmaxdefault) breaks <- bk$val rvalues <- bk$r u <- Kmeasure(X, sigma, ...) xx <- rasterx.im(u) yy <- rastery.im(u) rr <- sqrt(xx^2 + yy^2) tr <- whist(rr, breaks, u$v) K <- cumsum(tr) * with(u, xstep * ystep) rmax <- min(rr[is.na(u$v)]) K[rvalues >= rmax] <- NA result <- data.frame(r=rvalues, theo=pi * rvalues^2, border=K) w <- X$window alim <- c(0, min(diff(w$xrange), diff(w$yrange))/4) out <- fv(result, "r", quote(K(r)), "border", . ~ r, alim, c("r", "%s[pois](r)", "hat(%s)[fb](r)"), c("distance argument r", "theoretical Poisson %s", "border-corrected FFT estimate of %s"), fname="K", unitname=unitname(X) ) return(out) } spatstat/R/dppm.R0000644000176200001440000001261413115271075013407 0ustar liggesusers#' #' dppm.R #' #' $Revision: 1.8 $ $Date: 2017/06/05 10:31:58 $ dppm <- function(formula, family, data=NULL, ..., startpar = NULL, method = c("mincon", "clik2", "palm"), weightfun=NULL, control=list(), algorithm="Nelder-Mead", statistic="K", statargs=list(), rmax = NULL, covfunargs=NULL, use.gam=FALSE, nd=NULL, eps=NULL) { # Instantiate family if not already done. if(is.character(family)) family <- get(family, mode="function") if(inherits(family, "detpointprocfamilyfun")) { familyfun <- family family <- familyfun() } verifyclass(family, "detpointprocfamily") # Check for intensity as only unknown and exit (should be changed for likelihood method) if(length(family$freepar)==1 && (family$freepar %in% family$intensity)) stop("Only the intensity needs to be estimated. Please do this with ppm yourself.") # Detect missing rhs of 'formula' and fix if(inherits(formula, c("ppp", "quad"))){ Xname <- short.deparse(substitute(formula)) formula <- as.formula(paste(Xname, "~ 1")) } if(!inherits(formula, "formula")) stop(paste("Argument 'formula' should be a formula")) # kppm(formula, DPP = family, data = data, covariates = data, # startpar = startpar, method = method, weightfun = weightfun, # control = control, algorithm = algorithm, statistic = statistic, # statargs = statargs, rmax = rmax, covfunargs = covfunargs, # use.gam = use.gam, nd = nd, eps = eps, ...) thecall <- call("kppm", X=formula, DPP=family, data = data, covariates = data, startpar = startpar, method = method, weightfun = weightfun, control = control, algorithm = algorithm, statistic = statistic, statargs = statargs, rmax = rmax, covfunargs = covfunargs, use.gam = use.gam, nd = nd, eps = eps) ncall <- length(thecall) argh <- list(...) nargh <- length(argh) if(nargh > 0) { thecall[ncall + 1:nargh] <- argh names(thecall)[ncall + 1:nargh] <- names(argh) } callenv <- parent.frame() if(!is.null(data)) callenv <- list2env(data, parent=callenv) result <- eval(thecall, envir=callenv, enclos=baseenv()) return(result) } ## Auxiliary function to mimic cluster models for DPPs in kppm code spatstatDPPModelInfo <- function(model){ out <- list( modelname = paste(model$name, "DPP"), # In modelname field of mincon fv obj. descname = paste(model$name, "DPP"), # In desc field of mincon fv obj. modelabbrev = paste(model$name, "DPP"), # In fitted obj. printmodelname = function(...) paste(model$name, "DPP"), # Used by print.kppm parnames = model$freepar, checkpar = function(par){ return(par) }, checkclustargs = function(margs, old = TRUE) list(), resolvedots = function(...){ ## returning the input arguments p, q, rmin, rmax in list with one element 'ctrl' dots <- list(...) return(list(ctrl = dots[c("p", "q", "rmin", "rmax")])) }, ## K-function K = function(par, rvals, ...){ if(length(par)==1 && is.null(names(par))) names(par) <- model$freepar mod <- update(model, as.list(par)) if(!valid(mod)){ return(rep(Inf, length(rvals))) } else{ return(Kmodel(mod)(rvals)) } }, ## pair correlation function pcf = function(par, rvals, ...){ if(length(par)==1 && is.null(names(par))) names(par) <- model$freepar mod <- update(model, as.list(par)) if(!valid(mod)){ return(rep(Inf, length(rvals))) } else{ return(pcfmodel(mod)(rvals)) } }, ## sensible starting parameters selfstart = function(X) { return(model$startpar(model, X)) } ) return(out) } ## Auxilliary function used for DPP stuff in kppm.R dppmFixIntensity <- function(DPP, lambda, po){ lambdaname <- DPP$intensity if(is.null(lambdaname)) warning("The model has no intensity parameter.\n", "Prediction from the fitted model is invalid ", "(but no warning or error will be given by predict.dppm).") ## Update model object with estimated intensity if it is a free model parameter if(lambdaname %in% DPP$freepar){ clusters <- update(DPP, structure(list(lambda), .Names=lambdaname)) } else{ clusters <- DPP lambda <- intensity(clusters) ## Overwrite po object with fake version X <- po$Q$data dont.complain.about(X) po <- ppm(X~offset(log(lambda))-1) po$fitter <- "dppm" ## update pseudolikelihood value using code in logLik.ppm po$maxlogpl.orig <- po$maxlogpl po$maxlogpl <- logLik(po, warn=FALSE) ######################################### } return(list(clusters=clusters, lambda=lambda, po=po)) } ## Auxiliary function used for DPP stuff in kppm.R dppmFixAlgorithm <- function(algorithm, changealgorithm, clusters, startpar){ if(!setequal(clusters$freepar, names(startpar))) stop("Names of startpar vector does not match the free parameters of the model.") lower <- upper <- NULL if(changealgorithm){ bb <- dppparbounds(clusters, names(startpar)) if(all(is.finite(bb))){ algorithm <- "Brent" lower <- bb[1L] upper <- bb[2L] } else{ algorithm <- "BFGS" } } return(list(algorithm = algorithm, lower = lower, upper = upper)) } spatstat/R/GJfox.R0000755000176200001440000000556413115271075013475 0ustar liggesusers# # GJfox.R # # Foxall G-function and J-function # # $Revision: 1.7 $ $Date: 2014/10/14 04:00:43 $ # Gfox <- function(X, Y, r=NULL, breaks=NULL, correction=c("km", "rs", "han"), ...) { stopifnot(is.ppp(X)) if(!(is.ppp(Y) || is.psp(Y) || is.owin(Y))) stop("Y should be an object of class ppp, psp or owin") if(!identical(unitname(X), unitname(Y))) warning("X and Y are not in the same units") # if(is.null(correction)) correction <- c("rs", "km", "cs") correction <- pickoption("correction", correction, c(none="none", raw="none", border="rs", rs="rs", KM="km", km="km", Kaplan="km", han="han", Hanisch="han", best="km"), multi=TRUE) corxtable <- c("km", "rs", "han", "none") corx <- as.list(corxtable %in% correction) names(corx) <- corxtable # ensure compatible windows WX <- as.owin(X) WY <- as.owin(Y) if(!is.subset.owin(WX, WY)) { warning("Trimming the window of X to be a subset of the window of Y") WX <- intersect.owin(WX, WY) X <- X[WX] } # compute distances and censoring distances D <- distfun(Y) dist <- D(X) bdry <- bdist.points(X[WY]) # histogram breakpoints dmax <- max(dist) breaks <- handle.r.b.args(r, breaks, WX, NULL, rmaxdefault=dmax) rval <- breaks$r # censoring indicators d <- (dist <= bdry) # observed distances o <- pmin.int(dist, bdry) # calculate estimates Z <- censtimeCDFest(o, bdry, d, breaks, KM=corx$km, RS=corx$rs, HAN=corx$han, RAW=corx$none, han.denom=if(corx$han) eroded.areas(WX, rval) else NULL, tt=dist) # relabel Z <- rebadge.fv(Z, quote(G[fox](r)), c("G", "fox")) unitname(Z) <- unitname(Y) return(Z) } Jfox <- function(X, Y, r=NULL, breaks=NULL, correction=c("km", "rs", "han"), ...) { H <- Hest(Y, r=r, breaks=breaks, correction=correction, ...) G <- Gfox(X, Y, r=H$r, correction=correction, ...) # derive J-function J <- eval.fv((1-G)/(1-H), dotonly=FALSE) # correct calculation of hazard is different if("hazard" %in% names(J)) J$hazard <- G$hazard - H$hazard # base labels on 'J' rather than full expression attr(J, "labl") <- attr(H, "labl") # add column of 1's J <- bind.fv(J, data.frame(theo=rep.int(1, nrow(J))), "%s[theo](r)", "theoretical value of %s for independence") # rename J <- rebadge.fv(J, quote(J[fox](r)), c("J", "fox")) funs <- c("km", "han", "rs", "raw", "theo") fvnames(J, ".") <- funs[funs %in% names(J)] unitname(J) <- unitname(Y) return(J) } spatstat/R/hierpair.family.R0000644000176200001440000002511313115225157015530 0ustar liggesusers# # # hierpair.family.R # # $Revision: 1.6 $ $Date: 2017/02/07 07:35:32 $ # # The family of hierarchical pairwise interactions # # # ------------------------------------------------------------------- # hierpair.family <- list( name = "hierpair", print = function(self) { splat("Hierarchical pairwise interaction family") }, plot = function(fint, ..., d=NULL, plotit=TRUE) { verifyclass(fint, "fii") inter <- fint$interaction if(is.null(inter) || is.null(inter$family) || inter$family$name != "hierpair") stop("Tried to plot the wrong kind of interaction") # get fitted coefficients of interaction terms # and set coefficients of offset terms to 1 Vnames <- fint$Vnames IsOffset <- fint$IsOffset coeff <- rep.int(1, length(Vnames)) names(coeff) <- Vnames coeff[!IsOffset] <- fint$coefs[Vnames[!IsOffset]] # pairpot <- inter$pot potpars <- inter$par rmax <- reach(fint, epsilon=1e-3) xlim <- list(...)$xlim if(is.infinite(rmax)) { if(!is.null(xlim)) rmax <- max(xlim) else { warning("Reach of interaction is infinite; need xlim to plot it") return(invisible(NULL)) } } if(is.null(d)) { dmax <- 1.25 * rmax d <- seq(from=0, to=dmax, length.out=1024) } else { stopifnot(is.numeric(d) && all(is.finite(d)) && all(diff(d) > 0)) dmax <- max(d) } if(is.null(xlim)) xlim <- c(0, dmax) types <- potpars$types if(is.null(types)) stop("Unable to determine types of points") if(!is.factor(types)) types <- factor(types, levels=types) ## compute each potential and store in `fasp' object m <- length(types) nd <- length(d) dd <- matrix(rep.int(d, m), nrow=nd * m, ncol=m) tx <- rep.int(types, rep.int(nd, m)) ty <- types p <- pairpot(dd, tx, ty, potpars) if(length(dim(p))==2) p <- array(p, dim=c(dim(p),1), dimnames=NULL) if(dim(p)[3L] != length(coeff)) stop("Dimensions of potential do not match coefficient vector") for(k in seq_len(dim(p)[3L])) p[,,k] <- multiply.only.finite.entries( p[,,k] , coeff[k] ) y <- exp(apply(p, c(1,2), sum)) ylim <- range(0, 1.1, y, finite=TRUE) fns <- vector(m^2, mode="list") which <- matrix(, m, m) for(i in seq_len(m)) { for(j in seq_len(m)) { ## relevant position in matrix ijpos <- i + (j-1L) * m which[i,j] <- ijpos ## extract values of potential yy <- y[tx == types[i], j] ## make fv object fns[[ijpos]] <- fv(data.frame(r=d, h=yy, one=1), "r", quote(h(r)), "h", cbind(h,one) ~ r, xlim, c("r", "h(r)", "1"), c("distance argument r", "pairwise interaction term h(r)", "reference value 1")) } } funz <- fasp(fns, which=which, formulae=list(cbind(h, one) ~ r), title="Fitted pairwise interactions", rowNames=paste(types), colNames=paste(types)) if(plotit) do.call(plot.fasp, resolve.defaults(list(funz), list(...), list(ylim=ylim, ylab="Pairwise interaction", xlab="Distance"))) return(invisible(funz)) }, # end of function `plot' # ---------------------------------------------------- eval = function(X,U,EqualPairs,pairpot,potpars,correction, ..., Reach=NULL, precomputed=NULL, savecomputed=FALSE, pot.only=FALSE) { ## ## This is the eval function for the `hierpair' family. ## fop <- names(formals(pairpot)) if(identical(all.equal(fop, c("d", "par")), TRUE)) marx <- FALSE else if(identical(all.equal(fop, c("d", "tx", "tu", "par")), TRUE)) marx <- TRUE else stop("Formal arguments of pair potential function are not understood") ## edge correction argument if(length(correction) > 1) stop("Only one edge correction allowed at a time!") if(!any(correction == c("periodic", "border", "translate", "translation", "isotropic", "Ripley", "none"))) stop(paste("Unrecognised edge correction", sQuote(correction))) no.correction <- #### Compute basic data # Decide whether to apply faster algorithm using 'closepairs' use.closepairs <- FALSE && (correction %in% c("none", "border", "translate", "translation")) && !is.null(Reach) && is.finite(Reach) && is.null(precomputed) && !savecomputed if(!is.null(precomputed)) { # precomputed X <- precomputed$X U <- precomputed$U EqualPairs <- precomputed$E M <- precomputed$M } else { U <- as.ppp(U, X$window) # i.e. X$window is DEFAULT window if(!use.closepairs) # Form the matrix of distances M <- crossdist(X, U, periodic=(correction=="periodic")) } nX <- npoints(X) nU <- npoints(U) dimM <- c(nX, nU) # Evaluate the pairwise potential without edge correction if(use.closepairs) POT <- evalPairPotential(X,U,EqualPairs,pairpot,potpars,Reach) else if(!marx) POT <- pairpot(M, potpars) else POT <- pairpot(M, marks(X), marks(U), potpars) # Determine whether each column of potential is an offset IsOffset <- attr(POT, "IsOffset") # Check errors and special cases if(!is.matrix(POT) && !is.array(POT)) { if(length(POT) == 0 && X$n == 0) # empty pattern POT <- array(POT, dim=c(dimM,1)) else stop("Pair potential did not return a matrix or array") } if(length(dim(POT)) == 1 || any(dim(POT)[1:2] != dimM)) { whinge <- paste0( "The pair potential function ",short.deparse(substitute(pairpot)), " must produce a matrix or array with its first two dimensions\n", "the same as the dimensions of its input.\n") stop(whinge) } # make it a 3D array if(length(dim(POT))==2) POT <- array(POT, dim=c(dim(POT),1), dimnames=NULL) if(correction == "translate" || correction == "translation") { edgewt <- edge.Trans(X, U) # sanity check ("everybody knows there ain't no...") if(!is.matrix(edgewt)) stop("internal error: edge.Trans() did not yield a matrix") if(nrow(edgewt) != X$n || ncol(edgewt) != length(U$x)) stop("internal error: edge weights matrix returned by edge.Trans() has wrong dimensions") POT <- c(edgewt) * POT } else if(correction == "isotropic" || correction == "Ripley") { # weights are required for contributions from QUADRATURE points edgewt <- t(edge.Ripley(U, t(M), X$window)) if(!is.matrix(edgewt)) stop("internal error: edge.Ripley() did not return a matrix") if(nrow(edgewt) != X$n || ncol(edgewt) != length(U$x)) stop("internal error: edge weights matrix returned by edge.Ripley() has wrong dimensions") POT <- c(edgewt) * POT } # No pair potential term between a point and itself if(length(EqualPairs) > 0) { nplanes <- dim(POT)[3L] for(k in 1:nplanes) POT[cbind(EqualPairs, k)] <- 0 } # Return just the pair potential? if(pot.only) return(POT) # Sum the pairwise potentials V <- apply(POT, c(2,3), sum) # attach the original pair potentials attr(V, "POT") <- POT # attach the offset identifier attr(V, "IsOffset") <- IsOffset # pass computed information out the back door if(savecomputed) attr(V, "computed") <- list(E=EqualPairs, M=M) return(V) }, ######### end of function $eval suffstat = function(model, X=NULL, callstring="hierpair.family$suffstat") { # for hierarchical pairwise models only (possibly nonstationary) verifyclass(model, "ppm") if(!identical(model$interaction$family$name,"hierpair")) stop("Model is not a hierarchical pairwise interaction process") if(is.null(X)) { X <- data.ppm(model) modelX <- model } else { verifyclass(X, "ppp") modelX <- update(model, X, method="mpl") } # find data points which do not contribute to pseudolikelihood mplsubset <- getglmdata(modelX)$.mpl.SUBSET mpldata <- is.data(quad.ppm(modelX)) contribute <- mplsubset[mpldata] Xin <- X[contribute] Xout <- X[!contribute] # partial model matrix arising from ordered pairs of data points # which both contribute to the pseudolikelihood Empty <- X[integer(0)] momINxIN <- partialModelMatrix(Xin, Empty, model, "suffstat") # partial model matrix at data points which contribute to the pseudolikelihood momIN <- partialModelMatrix(X, Empty, model, "suffstat")[contribute, , drop=FALSE] # partial model matrix arising from ordered pairs of data points # the second of which does not contribute to the pseudolikelihood mom <- partialModelMatrix(Xout, Xin, model, "suffstat") indx <- Xout$n + seq_len(Xin$n) momINxOUT <- mom[indx, , drop=FALSE] ## determine which canonical covariates are true second-order terms ## eg 'mark1x1' typ <- levels(marks(X)) vn <- paste0("mark", typ, "x", typ) order2 <- names(coef(model)) %in% vn order1 <- !order2 result <- 0 * coef(model) if(any(order1)) { # first order contributions (including 'mark1x2' etc) o1terms <- momIN[ , order1, drop=FALSE] o1sum <- colSums(o1terms) result[order1] <- o1sum } if(any(order2)) { # adjust for double counting of ordered pairs in INxIN but not INxOUT o2termsINxIN <- momINxIN[, order2, drop=FALSE] o2termsINxOUT <- momINxOUT[, order2, drop=FALSE] o2sum <- colSums(o2termsINxIN)/2 + colSums(o2termsINxOUT) result[order2] <- o2sum } return(result) }, ######### end of function $suffstat delta2 = function(X, inte, correction, ...) { # Sufficient statistic for second order conditional intensity # for hierarchical pairwise interaction processes # Equivalent to evaluating pair potential. X <- as.ppp(X) nX <- npoints(X) E <- cbind(1:nX, 1:nX) R <- reach(inte) result <- hierpair.family$eval(X,X,E, inte$pot,inte$par, correction, pot.only=TRUE, Reach=R) } ######### end of function $delta2 ) ######### end of list class(hierpair.family) <- "isf" spatstat/R/simplepanel.R0000644000176200001440000001646713115225157014772 0ustar liggesusers# # simplepanel.R # # A simple, robust point & click interface # used in rmh visual debugger. # # $Revision: 1.14 $ $Date: 2016/04/25 02:34:40 $ # simplepanel <- function(title, B, boxes, clicks, redraws=NULL, exit=NULL, env) { stopifnot(is.rectangle(B)) stopifnot(is.list(boxes)) if(!all(unlist(lapply(boxes, is.rectangle)))) stop("some of the boxes are not rectangles") if(!all(unlist(lapply(boxes, is.subset.owin, B=B)))) stop("Some boxes do not lie inside the bounding box B") stopifnot(is.list(clicks) && length(clicks) == length(boxes)) if(!all(unlist(lapply(clicks, is.function)))) stop("clicks must be a list of functions") if(is.null(redraws)) { redraws <- rep.int(list(dflt.redraw), length(boxes)) } else { stopifnot(is.list(redraws) && length(redraws) == length(boxes)) if(any(isnul <- unlist(lapply(redraws, is.null)))) redraws[isnul] <- rep.int(list(dflt.redraw), sum(isnul)) if(!all(unlist(lapply(redraws, is.function)))) stop("redraws must be a list of functions") } if(is.null(exit)) { exit <- function(...) { NULL} } else stopifnot(is.function(exit)) stopifnot(is.environment(env)) n <- length(boxes) bnames <- names(boxes) %orifnull% rep("", n) cnames <- names(clicks) %orifnull% rep("", n) dnames <- paste("Button", seq_len(n)) nama <- ifelse(nzchar(bnames), bnames, ifelse(nzchar(cnames), cnames, dnames)) out <- list(title=title, B=B, nama=nama, boxes=boxes, clicks=clicks, redraws=redraws, exit=exit, env=env) class(out) <- c("simplepanel", class(out)) return(out) } grow.simplepanel <- function(P, side=c("right","left","top","bottom"), len=NULL, new.clicks, new.redraws=NULL, ..., aspect) { verifyclass(P, "simplepanel") side <- match.arg(side) stopifnot(is.list(new.clicks)) if(!all(unlist(lapply(new.clicks, is.function)))) stop("new.clicks must be a list of functions") if(is.null(new.redraws)) { new.redraws <- rep.int(list(dflt.redraw), length(new.clicks)) } else { stopifnot(is.list(new.redraws) && length(new.redraws) == length(new.clicks)) if(any(isnul <- sapply(new.redraws, is.null))) new.redraws[isnul] <- rep.int(list(dflt.redraw), sum(isnul)) if(!all(unlist(lapply(new.redraws, is.function)))) stop("new.redraws must be a list of functions") } if(missing(aspect) || is.null(aspect)) { # determine aspect ratio from length of longest text string n <- length(new.clicks) nama <- names(new.clicks) if(sum(nzchar(nama)) != n) nama <- names(new.redraws) if(sum(nzchar(nama)) != n) nama <- paste("Box", seq_len(n)) aspect <- 3/max(4, nchar(nama)) } B <- P$B n <- length(new.clicks) switch(side, right={ new.width <- if(!is.null(len)) len else sidelengths(B)[1]/2 extraspace <- owin(B$xrange[2] + c(0, new.width), B$yrange) new.boxes <- layout.boxes(extraspace, n, ..., aspect=aspect) }, left={ new.width <- if(!is.null(len)) len else sidelengths(B)[1]/2 extraspace <- owin(B$xrange[1] - c(new.width, 0), B$yrange) new.boxes <- layout.boxes(extraspace, n, ..., aspect=aspect) }, top={ new.height <- if(!is.null(len)) len else sidelengths(B)[2]/2 extraspace <- owin(B$xrange, B$yrange[2] + c(0, new.height)) new.boxes <- layout.boxes(extraspace, n, ..., aspect=aspect, horizontal=TRUE) }, bottom={ new.height <- if(!is.null(len)) len else sidelengths(B)[2]/2 extraspace <- owin(B$xrange, B$yrange[1] - c(new.height, 0)) new.boxes <- layout.boxes(extraspace, n, ..., aspect=aspect, horizontal=TRUE) }) with(P, simplepanel(title, boundingbox(B, extraspace), append(boxes, new.boxes), append(clicks, new.clicks), append(redraws, new.redraws), exit, env)) } redraw.simplepanel <- function(P, verbose=FALSE) { verifyclass(P, "simplepanel") if(verbose) cat("Redrawing entire panel\n") with(P, { # ntitle <- sum(nzchar(title)) plot(B, type="n", main=title) for(j in seq_along(nama)) (redraws[[j]])(boxes[[j]], nama[j], env) }) invisible(NULL) } clear.simplepanel <- function(P) { verifyclass(P, "simplepanel") plot(P$B, main="") invisible(NULL) } run.simplepanel <- function(P, popup=TRUE, verbose=FALSE) { verifyclass(P, "simplepanel") if(popup) dev.new() ntitle <- sum(nzchar(P$title)) opa <- par(mar=c(0,0,ntitle+0.2,0),ask=FALSE) with(P, { # interaction loop more <- TRUE while(more) { redraw.simplepanel(P, verbose=verbose) xy <- locator(1) if(is.null(xy)) { if(verbose) cat("No (x,y) coordinates\n") break } found <- FALSE for(j in seq_along(boxes)) { if(inside.owin(xy$x, xy$y, boxes[[j]])) { found <- TRUE if(verbose) cat(paste("Caught click on", sQuote(nama[j]), "\n")) more <- (clicks[[j]])(env, xy) if(!is.logical(more) || length(more) != 1) { warning(paste("Click function for", sQuote(nama[j]), "did not return TRUE/FALSE")) more <- FALSE } if(verbose) cat(if(more) "Continuing\n" else "Terminating\n") break } } if(verbose && !found) cat(paste("Coordinates", paren(paste(xy, collapse=",")), "not matched to any box\n")) } }) if(verbose) cat("Calling exit function\n") rslt <- with(P, exit(env)) # revert to original graphics parameters par(opa) # close popup window? if(popup) dev.off() # return value of 'exit' function return(rslt) } layout.boxes <- function(B, n, horizontal=FALSE, aspect=0.5, usefrac=0.9){ # make n boxes in B stopifnot(is.rectangle(B)) stopifnot(n > 0) width <- sidelengths(B)[1] height <- sidelengths(B)[2] if(!horizontal) { heightshare <- height/n useheight <- min(width * aspect, heightshare * usefrac) usewidth <- min(useheight /aspect, width * usefrac) lostwidth <- width - usewidth lostheightshare <- heightshare - useheight template <- owin(c(0, usewidth), c(0, useheight)) boxes <- list() boxes[[1]] <- shift(template, c(B$xrange[1]+lostwidth/2, B$yrange[1] + lostheightshare/2)) if(n > 1) for(j in 2:n) boxes[[j]] <- shift(boxes[[j-1]], c(0, heightshare)) } else { boxes <- layout.boxes(flipxy(B), n, horizontal=FALSE, aspect=1/aspect, usefrac=usefrac) boxes <- lapply(boxes, flipxy) } return(boxes) } # default redraw function for control buttons dflt.redraw <- function(button, name, env) { plot(button, add=TRUE, border="pink") text(centroid.owin(button), labels=name) } print.simplepanel <- function(x, ...) { nama <- x$nama cat("simplepanel object\n") cat(paste("\tTitle:", sQuote(x$title), "\n")) cat("\tPanel names:") for(i in seq_along(nama)) { if(i %% 6 == 1) cat("\n\t") cat(paste0(sQuote(nama[i]), " ")) } cat("\n") return(invisible(NULL)) } spatstat/R/Ksector.R0000644000176200001440000001722613115225157014065 0ustar liggesusers# # Ksector.R Estimation of 'sector K function' # # $Revision: 1.5 $ $Date: 2014/11/10 10:41:14 $ # Ksector <- function(X, begin=0, end=360, ..., units=c("degrees", "radians"), r=NULL, breaks=NULL, correction=c("border", "isotropic", "Ripley", "translate"), domain = NULL, ratio=FALSE, verbose=TRUE) { verifyclass(X, "ppp") # rfixed <- !is.null(r) || !is.null(breaks) npts <- npoints(X) W <- Window(X) areaW <- area(W) lambda <- npts/areaW lambda2 <- (npts * (npts - 1))/(areaW^2) rmaxdefault <- rmax.rule("K", W, lambda) breaks <- handle.r.b.args(r, breaks, W, rmaxdefault=rmaxdefault) r <- breaks$r rmax <- breaks$max if(!is.null(domain)) { domain <- as.owin(domain) stopifnot(is.subset.owin(domain, Window(X))) areaW <- area(domain) } units <- match.arg(units) switch(units, radians = { if(missing(end)) end <- 2 * pi check.1.real(begin) check.1.real(end) check.in.range(begin, c(-pi, 2*pi)) check.in.range(end, c(0, 2*pi)) stopifnot(begin < end) stopifnot((end - begin) <= 2 * pi) BEGIN <- begin END <- end Bname <- simplenumber(begin/pi, "pi") %orifnull% signif(begin, 3) Ename <- simplenumber(end/pi, "pi") %orifnull% signif(end, 3) }, degrees = { check.1.real(begin) check.1.real(end) check.in.range(begin, c(-90, 360)) check.in.range(end, c(0, 360)) stopifnot(begin < end) stopifnot((end - begin) <= 360) if(verbose && (end - begin) <= 2 * pi) warning("Very small interval in degrees: did you mean radians?") BEGIN <- pi* (begin/180) END <- pi * (end/180) Bname <- signif(begin, 3) Ename <- signif(end, 3) }) ## choose correction(s) correction.given <- !missing(correction) && !is.null(correction) if(is.null(correction)) correction <- c("border", "isotropic", "Ripley", "translate") correction <- pickoption("correction", correction, c(none="none", border="border", "bord.modif"="bord.modif", isotropic="isotropic", Ripley="isotropic", trans="translate", translate="translate", translation="translate", good="good", best="best"), multi=TRUE) # best.wanted <- ("best" %in% correction) ## replace 'good' by the optimal choice for this size of dataset if("good" %in% correction) correction[correction == "good"] <- good.correction.K(X) ## retain only corrections that are implemented for the window correction <- implemented.for.K(correction, W$type, correction.given) ## recommended range of r values alim <- c(0, min(rmax, rmaxdefault)) ## labels subscripts <- paste("sector", Bname, Ename, sep=",") ylabel <- paste("K[", subscripts, "]") ylab <- eval(parse(text=paste("quote(", ylabel, ")"))) # ylab <- parse(text=paste("K[sector,", Bname, ",", Ename, "]")) # yexp <- substitute(K[list(sector,B,E)](r), # list(B=Bname, E=Ename)) yexp <- parse(text=paste("K[list(", subscripts, ")]")) fname <- c("K", paste("list", paren(subscripts))) ## this will be the output data frame Kdf <- data.frame(r=r, theo = ((END-BEGIN)/2) * r^2) desc <- c("distance argument r", "theoretical Poisson %s") denom <- lambda2 * areaW K <- ratfv(Kdf, NULL, denom, "r", ylab = ylab, valu = "theo", fmla = NULL, alim =alim, labl = c("r","{%s[%s]^{pois}}(r)"), desc = desc, fname=fname, yexp=yexp, ratio=ratio) ## identify all close pairs rmax <- max(r) close <- as.data.frame(closepairs(X, rmax)) if(!is.null(domain)) { ## restrict to pairs with first point in 'domain' indom <- with(close, inside.owin(xi, yi, domain)) close <- close[indom, , drop=FALSE] } ## select pairs in angular range ang <- with(close, atan2(dy, dx)) %% (2*pi) if(BEGIN >= 0) { ## 0 <= begin < end ok <- (BEGIN <= ang) & (ang <= END) } else { ## begin < 0 <= end ok <- (ang >= 2 * pi + BEGIN) | (ang <= END) } close <- close[ok, , drop=FALSE] ## pairwise distances DIJ <- close$d if(any(correction == "none")) { # uncorrected! For demonstration purposes only! wh <- whist(DIJ, breaks$val) # no weights numKun <- cumsum(wh) denKun <- lambda2 * areaW # uncorrected estimate of K K <- bind.ratfv(K, data.frame(un=numKun), denKun, "{hat(%s)[%s]^{un}}(r)", "uncorrected estimate of %s", "un", ratio=ratio) } if(any(correction == "border" | correction == "bord.modif")) { # border method # Compute distances to boundary b <- bdist.points(X) I <- close$i bI <- b[I] if(!is.null(domain)) b <- b[inside.owin(X, , w=domain)] # apply reduced sample algorithm RS <- Kount(DIJ, bI, b, breaks) if(any(correction == "bord.modif")) { # modified border correction denom.area <- eroded.areas(W, r, subset=domain) numKbm <- RS$numerator denKbm <- lambda2 * denom.area K <- bind.ratfv(K, data.frame(bord.modif=numKbm), data.frame(bord.modif=denKbm), "{hat(%s)[%s]^{bordm}}(r)", "modified border-corrected estimate of %s", "bord.modif", ratio=ratio) } if(any(correction == "border")) { numKb <- RS$numerator denKb <- lambda * RS$denom.count K <- bind.ratfv(K, data.frame(border=numKb), data.frame(border=denKb), "{hat(%s)[%s]^{bord}}(r)", "border-corrected estimate of %s", "border", ratio=ratio) } } if(any(correction == "translate")) { ## Ohser-Stoyan translation correction edgewt <- edge.Trans(dx=close$dx, dy=close$dy, W=W, paired=TRUE) wh <- whist(DIJ, breaks$val, edgewt) numKtrans <- cumsum(wh) denKtrans <- lambda2 * areaW h <- diameter(as.rectangle(W))/2 numKtrans[r >= h] <- NA K <- bind.ratfv(K, data.frame(trans=numKtrans), denKtrans, "{hat(%s)[%s]^{trans}}(r)", "translation-corrected estimate of %s", "trans", ratio=ratio) } if(any(correction == "isotropic")) { ## Ripley isotropic correction XI <- ppp(close$xi, close$yi, window=W, check=FALSE) edgewt <- edge.Ripley(XI, matrix(DIJ, ncol=1)) wh <- whist(DIJ, breaks$val, edgewt) numKiso <- cumsum(wh) denKiso <- lambda2 * areaW h <- diameter(W)/2 numKiso[r >= h] <- NA K <- bind.ratfv(K, data.frame(iso=numKiso), denKiso, "{hat(%s)[%s]^{iso}}(r)", "Ripley isotropic correction estimate of %s", "iso", ratio=ratio) } # # default plot will display all edge corrections formula(K) <- . ~ r nama <- rev(colnames(K)) nama <- nama[!(nama %in% c("r", "rip", "ls"))] fvnames(K, ".") <- nama unitname(K) <- unitname(X) # copy to other components if(ratio) K <- conform.ratfv(K) return(K) } spatstat/R/envelope.R0000755000176200001440000022673213115271075014277 0ustar liggesusers# # envelope.R # # computes simulation envelopes # # $Revision: 2.86 $ $Date: 2017/06/05 10:31:58 $ # envelope <- function(Y, fun, ...) { UseMethod("envelope") } # ................................................................. # A 'simulation recipe' contains the following variables # # type = Type of simulation # "csr": uniform Poisson process # "rmh": simulated realisation of fitted Gibbs or Poisson model # "kppm": simulated realisation of fitted cluster model # "expr": result of evaluating a user-supplied expression # "list": user-supplied list of point patterns # # expr = expression that is repeatedly evaluated to generate simulations # # envir = environment in which to evaluate the expression `expr' # # 'csr' = TRUE iff the model is (known to be) uniform Poisson # # pois = TRUE if model is known to be Poisson # # constraints = additional information about simulation (e.g. 'with fixed n') # # ................................................................... simulrecipe <- function(type, expr, envir, csr, pois=csr, constraints="") { if(csr && !pois) warning("Internal error: csr=TRUE but pois=FALSE") out <- list(type=type, expr=expr, envir=envir, csr=csr, pois=pois, constraints=constraints) class(out) <- "simulrecipe" out } envelope.ppp <- function(Y, fun=Kest, nsim=99, nrank=1, ..., funargs=list(), funYargs=funargs, simulate=NULL, fix.n=FALSE, fix.marks=FALSE, verbose=TRUE, clipdata=TRUE, transform=NULL, global=FALSE, ginterval=NULL, use.theory=NULL, alternative=c("two.sided", "less", "greater"), scale=NULL, clamp=FALSE, savefuns=FALSE, savepatterns=FALSE, nsim2=nsim, VARIANCE=FALSE, nSD=2, Yname=NULL, maxnerr=nsim, do.pwrong=FALSE, envir.simul=NULL) { cl <- short.deparse(sys.call()) if(is.null(Yname)) Yname <- short.deparse(substitute(Y)) if(is.null(fun)) fun <- Kest envir.user <- if(!is.null(envir.simul)) envir.simul else parent.frame() envir.here <- sys.frame(sys.nframe()) ismarked <- is.marked(Y) ismulti <- is.multitype(Y) fix.marks <- fix.marks && ismarked if(!is.null(simulate)) { # ................................................... # Simulations are determined by 'simulate' argument if(fix.n || fix.marks) warning("fix.n and fix.marks were ignored, because 'simulate' was given") # Processing is deferred to envelopeEngine simrecipe <- simulate # Data pattern is argument Y X <- Y } else if(!fix.n && !fix.marks) { # ................................................... # Realisations of complete spatial randomness # will be generated by rpoispp # Data pattern X is argument Y # Data pattern determines intensity of Poisson process X <- Y sY <- summary(Y, checkdup=FALSE) Yintens <- sY$intensity nY <- npoints(Y) Ywin <- Y$window Ymarx <- marks(Y) # expression that will be evaluated simexpr <- if(is.null(Ymarx)) { # unmarked point pattern expression(rpoispp(Yintens, win=Ywin)) } else if(is.null(dim(Ymarx))) { # single column of marks expression({ A <- rpoispp(Yintens, win=Ywin); j <- sample(nY, npoints(A), replace=TRUE); A %mark% Ymarx[j] }) } else { # multiple columns of marks expression({ A <- rpoispp(Yintens, win=Ywin); j <- sample(nY, npoints(A), replace=TRUE); A %mark% Ymarx[j, , drop=FALSE] }) } dont.complain.about(Yintens, Ywin) # evaluate in THIS environment simrecipe <- simulrecipe(type = "csr", expr = simexpr, envir = envir.here, csr = TRUE, pois = TRUE) } else if(fix.marks) { # ................................................... # Data pattern is argument Y X <- Y # Realisations of binomial process # with fixed number of points and fixed marks # will be generated by runifpoint nY <- npoints(Y) Ywin <- as.owin(Y) Ymarx <- marks(Y) # expression that will be evaluated simexpr <- expression(runifpoint(nY, Ywin) %mark% Ymarx) # suppress warnings from code checkers dont.complain.about(nY, Ywin, Ymarx) # simulation constraints (explanatory string) constraints <- if(ismulti) "with fixed number of points of each type" else "with fixed number of points and fixed marks" # evaluate in THIS environment simrecipe <- simulrecipe(type = "csr", expr = simexpr, envir = envir.here, csr = TRUE, pois = TRUE, constraints = constraints) } else { # ................................................... # Data pattern is argument Y X <- Y # Realisations of binomial process # will be generated by runifpoint nY <- npoints(Y) Ywin <- as.owin(Y) Ymarx <- marks(Y) # expression that will be evaluated simexpr <- if(is.null(Ymarx)) { ## unmarked expression(runifpoint(nY, Ywin)) } else if(is.null(dim(Ymarx))) { ## single column of marks expression({ A <- runifpoint(nY, Ywin); j <- sample(nY, npoints(A), replace=TRUE); A %mark% Ymarx[j] }) } else { ## multiple columns of marks expression({ A <- runifpoint(nY, Ywin); j <- sample(nY, npoints(A), replace=TRUE); A %mark% Ymarx[j, ,drop=FALSE] }) } dont.complain.about(nY, Ywin) # evaluate in THIS environment simrecipe <- simulrecipe(type = "csr", expr = simexpr, envir = envir.here, csr = TRUE, pois = TRUE, constraints = "with fixed number of points") } envelopeEngine(X=X, fun=fun, simul=simrecipe, nsim=nsim, nrank=nrank, ..., funargs=funargs, funYargs=funYargs, verbose=verbose, clipdata=clipdata, transform=transform, global=global, ginterval=ginterval, use.theory=use.theory, alternative=alternative, scale=scale, clamp=clamp, savefuns=savefuns, savepatterns=savepatterns, nsim2=nsim2, VARIANCE=VARIANCE, nSD=nSD, Yname=Yname, maxnerr=maxnerr, cl=cl, envir.user=envir.user, do.pwrong=do.pwrong) } envelope.ppm <- function(Y, fun=Kest, nsim=99, nrank=1, ..., funargs=list(), funYargs=funargs, simulate=NULL, fix.n=FALSE, fix.marks=FALSE, verbose=TRUE, clipdata=TRUE, start=NULL, control=update(default.rmhcontrol(Y), nrep=nrep), nrep=1e5, transform=NULL, global=FALSE, ginterval=NULL, use.theory=NULL, alternative=c("two.sided", "less", "greater"), scale=NULL, clamp=FALSE, savefuns=FALSE, savepatterns=FALSE, nsim2=nsim, VARIANCE=FALSE, nSD=2, Yname=NULL, maxnerr=nsim, do.pwrong=FALSE, envir.simul=NULL) { cl <- short.deparse(sys.call()) if(is.null(Yname)) Yname <- short.deparse(substitute(Y)) if(is.null(fun)) fun <- Kest envir.user <- if(!is.null(envir.simul)) envir.simul else parent.frame() envir.here <- sys.frame(sys.nframe()) # Extract data pattern X from fitted model Y X <- data.ppm(Y) if(is.null(simulate)) { # ................................................... # Simulated realisations of the fitted model Y # will be generated pois <- is.poisson(Y) csr <- is.stationary(Y) && pois type <- if(csr) "csr" else "rmh" # Set up parameters for rmh rmodel <- rmhmodel(Y, verbose=FALSE) if(is.null(start)) start <- list(n.start=npoints(X)) rstart <- rmhstart(start) rcontr <- rmhcontrol(control) if(fix.marks) { rcontr <- update(rcontr, fixall=TRUE, p=1, expand=1) nst <- if(is.multitype(X)) table(marks(X)) else npoints(X) rstart <- update(rstart, n.start=nst) constraints <- "with fixed number of points of each type" } else if(fix.n) { rcontr <- update(rcontr, p=1, expand=1) rstart <- update(rstart, n.start=X$n) constraints <- "with fixed number of points" } else constraints <- "" # pre-digest arguments rmhinfolist <- rmh(rmodel, rstart, rcontr, preponly=TRUE, verbose=FALSE) # expression that will be evaluated simexpr <- expression(rmhEngine(rmhinfolist, verbose=FALSE)) dont.complain.about(rmhinfolist) # evaluate in THIS environment simrecipe <- simulrecipe(type = type, expr = simexpr, envir = envir.here, csr = csr, pois = pois, constraints = constraints) } else { # ................................................... # Simulations are determined by 'simulate' argument # Processing is deferred to envelopeEngine simrecipe <- simulate } envelopeEngine(X=X, fun=fun, simul=simrecipe, nsim=nsim, nrank=nrank, ..., funargs=funargs, funYargs=funYargs, verbose=verbose, clipdata=clipdata, transform=transform, global=global, ginterval=ginterval, use.theory=use.theory, alternative=alternative, scale=scale, clamp=clamp, savefuns=savefuns, savepatterns=savepatterns, nsim2=nsim2, VARIANCE=VARIANCE, nSD=nSD, Yname=Yname, maxnerr=maxnerr, cl=cl, envir.user=envir.user, do.pwrong=do.pwrong) } envelope.kppm <- function(Y, fun=Kest, nsim=99, nrank=1, ..., funargs=list(), funYargs=funargs, simulate=NULL, verbose=TRUE, clipdata=TRUE, transform=NULL, global=FALSE, ginterval=NULL, use.theory=NULL, alternative=c("two.sided", "less", "greater"), scale=NULL, clamp=FALSE, savefuns=FALSE, savepatterns=FALSE, nsim2=nsim, VARIANCE=FALSE, nSD=2, Yname=NULL, maxnerr=nsim, do.pwrong=FALSE, envir.simul=NULL) { cl <- short.deparse(sys.call()) if(is.null(Yname)) Yname <- short.deparse(substitute(Y)) if(is.null(fun)) fun <- Kest envir.user <- if(!is.null(envir.simul)) envir.simul else parent.frame() envir.here <- sys.frame(sys.nframe()) # Extract data pattern X from fitted model Y X <- Y$X if(is.null(simulate)) { # Simulated realisations of the fitted model Y # will be generated using simulate.kppm kmodel <- Y # expression that will be evaluated simexpr <- expression(simulate(kmodel)[[1L]]) dont.complain.about(kmodel) # evaluate in THIS environment simrecipe <- simulrecipe(type = "kppm", expr = simexpr, envir = envir.here, csr = FALSE, pois = FALSE) } else { # ................................................... # Simulations are determined by 'simulate' argument # Processing is deferred to envelopeEngine simrecipe <- simulate } envelopeEngine(X=X, fun=fun, simul=simrecipe, nsim=nsim, nrank=nrank, ..., funargs=funargs, funYargs=funYargs, verbose=verbose, clipdata=clipdata, transform=transform, global=global, ginterval=ginterval, use.theory=use.theory, alternative=alternative, scale=scale, clamp=clamp, savefuns=savefuns, savepatterns=savepatterns, nsim2=nsim2, VARIANCE=VARIANCE, nSD=nSD, Yname=Yname, maxnerr=maxnerr, cl=cl, envir.user=envir.user, do.pwrong=do.pwrong) } ## ................................................................. ## Engine for simulating and computing envelopes ## ................................................................. # # X is the data point pattern, which could be ppp, pp3, ppx etc # X determines the class of pattern expected from the simulations # envelopeEngine <- function(X, fun, simul, nsim=99, nrank=1, ..., funargs=list(), funYargs=funargs, verbose=TRUE, clipdata=TRUE, transform=NULL, global=FALSE, ginterval=NULL, use.theory=NULL, alternative=c("two.sided", "less", "greater"), scale=NULL, clamp=FALSE, savefuns=FALSE, savepatterns=FALSE, saveresultof=NULL, weights=NULL, nsim2=nsim, VARIANCE=FALSE, nSD=2, Yname=NULL, maxnerr=nsim, internal=NULL, cl=NULL, envir.user=envir.user, expected.arg="r", do.pwrong=FALSE, foreignclass=NULL, collectrubbish=FALSE) { # envir.here <- sys.frame(sys.nframe()) alternative <- match.arg(alternative) foreignclass <- as.character(foreignclass) if(length(foreignclass) != 0 && clipdata) { warning(paste("Ignoring clipdata=TRUE:", "I don't know how to clip objects of class", sQuote(paste(foreignclass, collapse=",")))) clipdata <- FALSE } # ---------------------------------------------------------- # Determine Simulation # ---------------------------------------------------------- # Identify class of patterns to be simulated, from data pattern X Xclass <- if(is.ppp(X)) "ppp" else if(is.pp3(X)) "pp3" else if(is.ppx(X)) "ppx" else if(inherits(X, foreignclass)) foreignclass else stop("Unrecognised class of point pattern") Xobjectname <- paste("point pattern of class", sQuote(Xclass)) # Option to use weighted average if(use.weights <- !is.null(weights)) { # weight can be either a numeric vector or a function if(is.numeric(weights)) { compute.weights <- FALSE weightfun <- NULL } else if(is.function(weights)) { compute.weights <- TRUE weightfun <- weights weights <- NULL } else stop("weights should be either a function or a numeric vector") } else compute.weights <- FALSE # Undocumented option to generate patterns only. patterns.only <- identical(internal$eject, "patterns") # Undocumented option to evaluate 'something' for each pattern if(savevalues <- !is.null(saveresultof)) { stopifnot(is.function(saveresultof)) SavedValues <- list() } # Identify type of simulation from argument 'simul' if(inherits(simul, "simulrecipe")) { # .................................................. # simulation recipe is given simtype <- simul$type simexpr <- simul$expr envir <- simul$envir csr <- simul$csr pois <- simul$pois constraints <- simul$constraints } else { # ................................................... # simulation is specified by argument `simulate' to envelope() simulate <- simul # which should be an expression, or a list of point patterns, # or an envelope object. csr <- FALSE # override if(!is.null(icsr <- internal$csr)) csr <- icsr pois <- csr constraints <- "" # model <- NULL if(inherits(simulate, "envelope")) { # envelope object: see if it contains stored point patterns simpat <- attr(simulate, "simpatterns") if(!is.null(simpat)) simulate <- simpat else stop(paste("The argument", sQuote("simulate"), "is an envelope object but does not contain", "any saved point patterns.")) } if(is.expression(simulate)) { # The user-supplied expression 'simulate' will be evaluated repeatedly simtype <- "expr" simexpr <- simulate envir <- envir.user } else if(is.list(simulate) && all(sapply(simulate, inherits, what=Xclass))) { # The user-supplied list of point patterns will be used simtype <- "list" SimDataList <- simulate # expression that will be evaluated simexpr <- expression(SimDataList[[i]]) dont.complain.about(SimDataList) envir <- envir.here # ensure that `i' is defined i <- 1L # any messages? if(!is.null(mess <- attr(simulate, "internal"))) { # determine whether these point patterns are realisations of CSR csr <- !is.null(mc <- mess$csr) && mc } } else stop(paste(sQuote("simulate"), "should be an expression, or a list of point patterns")) } # ------------------------------------------------------------------- # Determine clipping window # ------------------------------------------------------------------ if(clipdata) { # Generate one realisation Xsim <- eval(simexpr, envir=envir) if(!inherits(Xsim, Xclass)) switch(simtype, csr=stop(paste("Internal error:", Xobjectname, "not generated")), rmh=stop(paste("Internal error: rmh did not return an", Xobjectname)), kppm=stop(paste("Internal error: simulate.kppm did not return an", Xobjectname)), expr=stop(paste("Evaluating the expression", sQuote("simulate"), "did not yield an", Xobjectname)), list=stop(paste("Internal error: list entry was not an", Xobjectname)), stop(paste("Internal error:", Xobjectname, "not generated")) ) # Extract window clipwin <- Xsim$window if(!is.subset.owin(clipwin, X$window)) warning("Window containing simulated patterns is not a subset of data window") } # ------------------------------------------------------------------ # Summary function to be applied # ------------------------------------------------------------------ if(is.null(fun)) stop("Internal error: fun is NULL") # Name of function, for error messages fname <- if(is.name(substitute(fun))) short.deparse(substitute(fun)) else if(is.character(fun)) fun else "fun" fname <- sQuote(fname) # R function to apply if(is.character(fun)) { gotfun <- try(get(fun, mode="function")) if(inherits(gotfun, "try-error")) stop(paste("Could not find a function named", sQuote(fun))) fun <- gotfun } else if(!is.function(fun)) stop(paste("unrecognised format for function", fname)) fargs <- names(formals(fun)) if(!any(c(expected.arg, "...") %in% fargs)) stop(paste(fname, "should have", ngettext(length(expected.arg), "an argument", "arguments"), "named", commasep(sQuote(expected.arg)), "or a", sQuote("..."), "argument")) usecorrection <- any(c("correction", "...") %in% fargs) # --------------------------------------------------------------------- # validate other arguments if((nrank %% 1) != 0) stop("nrank must be an integer") if((nsim %% 1) != 0) stop("nsim must be an integer") stopifnot(nrank > 0 && nrank < nsim/2) rgiven <- any(expected.arg %in% names(list(...))) if(tran <- !is.null(transform)) { stopifnot(is.expression(transform)) # prepare expressions to be evaluated each time transform.funX <- inject.expr("with(funX,.)", transform) transform.funXsim <- inject.expr("with(funXsim,.)", transform) # .... old code using 'eval.fv' ...... # transform.funX <- dotexpr.to.call(transform, "funX", "eval.fv") # transform.funXsim <- dotexpr.to.call(transform, "funXsim", "eval.fv") # 'transform.funX' and 'transform.funXsim' are unevaluated calls to eval.fv } if(!is.null(ginterval)) stopifnot(is.numeric(ginterval) && length(ginterval) == 2) # --------------------------------------------------------------------- # Evaluate function for data pattern X # ------------------------------------------------------------------ Xarg <- if(!clipdata) X else X[clipwin] corrx <- if(usecorrection) list(correction="best") else NULL funX <- do.call(fun, resolve.defaults(list(Xarg), list(...), funYargs, corrx)) if(!inherits(funX, "fv")) stop(paste("The function", fname, "must return an object of class", sQuote("fv"))) ## warn about 'dangerous' arguments if(!is.null(dang <- attr(funX, "dangerous")) && any(uhoh <- dang %in% names(list(...)))) { nuh <- sum(uhoh) warning(paste("Envelope may be invalid;", ngettext(nuh, "argument", "arguments"), commasep(sQuote(dang[uhoh])), ngettext(nuh, "appears", "appear"), "to have been fixed."), call.=FALSE) } argname <- fvnames(funX, ".x") valname <- fvnames(funX, ".y") has.theo <- "theo" %in% fvnames(funX, "*") csr.theo <- csr && has.theo use.theory <- if(is.null(use.theory)) csr.theo else (use.theory && has.theo) if(tran) { # extract only the recommended value if(use.theory) funX <- funX[, c(argname, valname, "theo")] else funX <- funX[, c(argname, valname)] # apply the transformation to it funX <- eval(transform.funX) } rvals <- funX[[argname]] # fX <- funX[[valname]] # default domain over which to maximise alim <- attr(funX, "alim") if(global && is.null(ginterval)) ginterval <- if(rgiven || is.null(alim)) range(rvals) else alim #-------------------------------------------------------------------- # Determine number of simulations # ------------------------------------------------------------------ # ## determine whether dual simulations are required ## (one set of simulations to calculate the theoretical mean, ## another independent set of simulations to obtain the critical point.) dual <- (global && !use.theory && !VARIANCE) Nsim <- if(!dual) nsim else (nsim + nsim2) # if taking data from a list of point patterns, # check there are enough of them if(simtype == "list" && Nsim > length(SimDataList)) stop(paste("Number of simulations", paren(if(!dual) paste(nsim) else paste(nsim, "+", nsim2, "=", Nsim) ), "exceeds number of point pattern datasets supplied")) # Undocumented secret exit # ------------------------------------------------------------------ if(patterns.only) { # generate simulated realisations and return only these patterns if(verbose) { action <- if(simtype == "list") "Extracting" else "Generating" descrip <- switch(simtype, csr = "simulations of CSR", rmh = paste("simulated realisations of fitted", if(pois) "Poisson" else "Gibbs", "model"), kppm = "simulated realisations of fitted cluster model", expr = "simulations by evaluating expression", list = "point patterns from list", "simulated realisations") if(!is.null(constraints) && nzchar(constraints)) descrip <- paste(descrip, constraints) explan <- if(dual) paren(paste(nsim2, "to estimate the mean and", nsim, "to calculate envelopes")) else "" splat(action, Nsim, descrip, explan, "...") } XsimList <- list() # start simulation loop sstate <- list() for(i in 1:Nsim) { if(verbose) sstate <- progressreport(i, Nsim, state=sstate) Xsim <- eval(simexpr, envir=envir) if(!inherits(Xsim, Xclass)) switch(simtype, csr={ stop(paste("Internal error:", Xobjectname, "not generated")) }, rmh={ stop(paste("Internal error: rmh did not return an", Xobjectname)) }, kppm={ stop(paste("Internal error: simulate.kppm did not return an", Xobjectname)) }, expr={ stop(paste("Evaluating the expression", sQuote("simulate"), "did not yield an", Xobjectname)) }, list={ stop(paste("Internal error: list entry was not an", Xobjectname)) }, stop(paste("Internal error:", Xobjectname, "not generated")) ) XsimList[[i]] <- Xsim } if(verbose) { cat(paste("Done.\n")) flush.console() } attr(XsimList, "internal") <- list(csr=csr) return(XsimList) } # capture main decision parameters envelopeInfo <- list(call=cl, Yname=Yname, valname=valname, csr=csr, csr.theo=csr.theo, use.theory=use.theory, pois=pois, simtype=simtype, constraints=constraints, nrank=nrank, nsim=nsim, Nsim=Nsim, global=global, ginterval=ginterval, dual=dual, nsim2=nsim2, VARIANCE=VARIANCE, nSD=nSD, alternative=alternative, scale=scale, clamp=clamp, use.weights=use.weights, do.pwrong=do.pwrong) # ---------------------------------------- ######### SIMULATE ####################### # ---------------------------------------- if(verbose) { action <- if(simtype == "list") "Extracting" else "Generating" descrip <- switch(simtype, csr = "simulations of CSR", rmh = paste("simulated realisations of fitted", if(pois) "Poisson" else "Gibbs", "model"), kppm = "simulated realisations of fitted cluster model", expr = "simulations by evaluating expression", list = "point patterns from list", "simulated patterns") if(!is.null(constraints) && nzchar(constraints)) descrip <- paste(descrip, constraints) explan <- if(dual) paren(paste(nsim2, "to estimate the mean and", nsim, "to calculate envelopes")) else "" splat(action, Nsim, descrip, explan, "...") } # determine whether simulated point patterns should be saved catchpatterns <- savepatterns && simtype != "list" Caughtpatterns <- list() # allocate space for computed function values nrvals <- length(rvals) simvals <- matrix(, nrow=nrvals, ncol=Nsim) # allocate space for weights to be computed if(compute.weights) weights <- numeric(Nsim) # inferred values of function argument 'r' or equivalent parameters if(identical(expected.arg, "r")) { # Kest, etc inferred.r.args <- list(r=rvals) } else if(identical(expected.arg, c("rmax", "nrval"))) { # K3est, etc inferred.r.args <- list(rmax=max(rvals), nrval=length(rvals)) } else stop(paste("Don't know how to infer values of", commasep(expected.arg))) # arguments for function when applied to simulated patterns funargs <- resolve.defaults(funargs, inferred.r.args, list(...), if(usecorrection) list(correction="best") else NULL) # start simulation loop nerr <- 0 if(verbose) pstate <- list() for(i in 1:Nsim) { ok <- FALSE # safely generate a random pattern and apply function while(!ok) { Xsim <- eval(simexpr, envir=envir) # check valid point pattern if(!inherits(Xsim, Xclass)) switch(simtype, csr=stop(paste("Internal error:", Xobjectname, "not generated")), rmh=stop(paste("Internal error: rmh did not return an", Xobjectname)), kppm=stop(paste("Internal error:", "simulate.kppm did not return an", Xobjectname)), expr=stop(paste("Evaluating the expression", sQuote("simulate"), "did not yield an", Xobjectname)), list=stop(paste("Internal error: list entry was not an", Xobjectname)), stop(paste("Internal error:", Xobjectname, "not generated")) ) if(catchpatterns) Caughtpatterns[[i]] <- Xsim if(savevalues) SavedValues[[i]] <- saveresultof(Xsim) if(compute.weights) { wti <- weightfun(Xsim) if(!is.numeric(wti)) stop("weightfun did not return a numeric value") if(length(wti) != 1L) stop("weightfun should return a single numeric value") weights[i] <- wti } # apply function safely funXsim <- try(do.call(fun, append(list(Xsim), funargs))) ok <- !inherits(funXsim, "try-error") if(!ok) { nerr <- nerr + 1L if(nerr > maxnerr) stop("Exceeded maximum number of errors") cat("[retrying]\n") } } # sanity checks if(i == 1L) { if(!inherits(funXsim, "fv")) stop(paste("When applied to a simulated pattern, the function", fname, "did not return an object of class", sQuote("fv"))) argname.sim <- fvnames(funXsim, ".x") valname.sim <- fvnames(funXsim, ".y") if(argname.sim != argname) stop(paste("The objects returned by", fname, "when applied to a simulated pattern", "and to the data pattern", "are incompatible. They have different argument names", sQuote(argname.sim), "and", sQuote(argname), "respectively")) if(valname.sim != valname) stop(paste("When", fname, "is applied to a simulated pattern", "it provides an estimate named", sQuote(valname.sim), "whereas the estimate for the data pattern is named", sQuote(valname), ". Try using the argument", sQuote("correction"), "to make them compatible")) rfunX <- with(funX, ".x") rfunXsim <- with(funXsim, ".x") if(!identical(rfunX, rfunXsim)) stop(paste("When", fname, "is applied to a simulated pattern,", "the values of the argument", sQuote(argname.sim), "are different from those used for the data.")) } if(tran) { # extract only the recommended value if(use.theory) funXsim <- funXsim[, c(argname, valname, "theo")] else funXsim <- funXsim[, c(argname, valname)] # apply the transformation to it funXsim <- eval(transform.funXsim) } # extract the values for simulation i simvals.i <- funXsim[[valname]] if(length(simvals.i) != nrvals) stop("Vectors of function values have incompatible lengths") simvals[ , i] <- funXsim[[valname]] if(verbose) pstate <- progressreport(i, Nsim, state=pstate) if(collectrubbish) { rm(Xsim) rm(funXsim) gc() } } ## end simulation loop if(verbose) { cat("\nDone.\n") flush.console() } # ........................................................... # save functions and/or patterns if so commanded if(savefuns) { alldata <- cbind(rvals, simvals) simnames <- paste("sim", 1:Nsim, sep="") colnames(alldata) <- c("r", simnames) alldata <- as.data.frame(alldata) SimFuns <- fv(alldata, argu="r", ylab=attr(funX, "ylab"), valu="sim1", fmla= deparse(. ~ r), alim=attr(funX, "alim"), labl=names(alldata), desc=c("distance argument r", paste("Simulation ", 1:Nsim, sep="")), fname=attr(funX, "fname"), yexp=attr(funX, "yexp"), unitname=unitname(funX)) fvnames(SimFuns, ".") <- simnames } if(savepatterns) SimPats <- if(simtype == "list") SimDataList else Caughtpatterns ######### COMPUTE ENVELOPES ####################### etype <- if(global) "global" else if(VARIANCE) "variance" else "pointwise" if(dual) { jsim <- 1:nsim jsim.mean <- nsim + 1:nsim2 } else { jsim <- jsim.mean <- NULL } result <- envelope.matrix(simvals, funX=funX, jsim=jsim, jsim.mean=jsim.mean, type=etype, alternative=alternative, scale=scale, clamp=clamp, csr=csr, use.theory=use.theory, nrank=nrank, ginterval=ginterval, nSD=nSD, Yname=Yname, do.pwrong=do.pwrong, weights=weights) # tack on envelope information attr(result, "einfo") <- envelopeInfo # tack on functions and/or patterns if so commanded if(savefuns) attr(result, "simfuns") <- SimFuns if(savepatterns) { attr(result, "simpatterns") <- SimPats attr(result, "datapattern") <- X } # save function weights if(use.weights) attr(result, "weights") <- weights # undocumented - tack on values of some other quantity if(savevalues) { attr(result, "simvalues") <- SavedValues attr(result, "datavalue") <- saveresultof(X) } return(result) } plot.envelope <- function(x, ..., main) { if(missing(main)) main <- short.deparse(substitute(x)) shade.given <- ("shade" %in% names(list(...))) shade.implied <- !is.null(fvnames(x, ".s")) if(!(shade.given || shade.implied)) { # ensure x has default 'shade' attribute # (in case x was produced by an older version of spatstat) if(all(c("lo", "hi") %in% colnames(x))) fvnames(x, ".s") <- c("lo", "hi") else warning("Unable to determine shading for envelope") } NextMethod("plot", main=main) } print.envelope <- function(x, ...) { e <- attr(x, "einfo") g <- e$global csr <- e$csr pois <- e$pois if(is.null(pois)) pois <- csr simtype <- e$simtype constraints <- e$constraints nr <- e$nrank nsim <- e$nsim V <- e$VARIANCE fname <- flat.deparse(attr(x, "ylab")) type <- if(V) paste("Pointwise", e$nSD, "sigma") else if(g) "Simultaneous" else "Pointwise" splat(type, "critical envelopes for", fname, "\nand observed value for", sQuote(e$Yname)) if(!is.null(valname <- e$valname) && waxlyrical('extras')) splat("Edge correction:", dQuote(valname)) ## determine *actual* type of simulation descrip <- if(csr) "simulations of CSR" else if(!is.null(simtype)) { switch(simtype, csr="simulations of CSR", rmh=paste("simulations of fitted", if(pois) "Poisson" else "Gibbs", "model"), kppm="simulations of fitted cluster model", expr="evaluations of user-supplied expression", list="point pattern datasets in user-supplied list", funs="columns of user-supplied data") } else "simulations of fitted model" if(!is.null(constraints) && nzchar(constraints)) descrip <- paste(descrip, constraints) # splat("Obtained from", nsim, descrip) # if(waxlyrical('extras')) { if(!is.null(e$dual) && e$dual) splat("Theoretical (i.e. null) mean value of", fname, "estimated from a separate set of", e$nsim2, "simulations") if(!is.null(attr(x, "simfuns"))) splat("(All simulated function values are stored)") if(!is.null(attr(x, "simpatterns"))) splat("(All simulated point patterns are stored)") } splat("Alternative:", e$alternative) if(!V && waxlyrical('extras')) { ## significance interpretation! alpha <- if(g) { nr/(nsim+1) } else { 2 * nr/(nsim+1) } splat("Significance level of", if(g) "simultaneous" else "pointwise", "Monte Carlo test:", paste0(if(g) nr else 2 * nr, "/", nsim+1), "=", signif(alpha, 3)) } if(waxlyrical('gory') && !is.null(pwrong <- attr(x, "pwrong"))) { splat("\t[Estimated significance level of pointwise excursions:", paste0("pwrong=", signif(pwrong, 3), "]")) } NextMethod("print") } summary.envelope <- function(object, ...) { e <- attr(object, "einfo") g <- e$global V <- e$VARIANCE nr <- e$nrank nsim <- e$nsim csr <- e$csr pois <- e$pois if(is.null(pois)) pois <- csr simtype <- e$simtype constraints <- e$constraints alternative <- e$alternative use.theory <- e$use.theory has.theo <- "theo" %in% fvnames(object, "*") csr.theo <- csr && has.theo use.theory <- if(is.null(use.theory)) csr.theo else (use.theory && has.theo) fname <- deparse(attr(object, "ylab")) type <- if(V) paste("Pointwise", e$nSD, "sigma") else if(g) "Simultaneous" else "Pointwise" splat(type, "critical envelopes for", fname, "\nand observed value for", sQuote(e$Yname)) # determine *actual* type of simulation descrip <- if(csr) "simulations of CSR" else if(!is.null(simtype)) { switch(simtype, csr="simulations of CSR", rmh=paste("simulations of fitted", if(pois) "Poisson" else "Gibbs", "model"), kppm="simulations of fitted cluster model", expr="evaluations of user-supplied expression", list="point pattern datasets in user-supplied list", funs="columns of user-supplied data", "simulated point patterns") } else "simulations of fitted model" if(!is.null(constraints) && nzchar(constraints)) descrip <- paste(descrip, constraints) # splat("Obtained from", nsim, descrip) # if(waxlyrical('extras')) { if(!is.null(e$dual) && e$dual) splat("Theoretical (i.e. null) mean value of", fname, "estimated from a separate set of", e$nsim2, "simulations") if(!is.null(attr(object, "simfuns"))) splat("(All", nsim, "simulated function values", "are stored in attr(,", dQuote("simfuns"), ") )") if(!is.null(attr(object, "simpatterns"))) splat("(All", nsim, "simulated point patterns", "are stored in attr(,", dQuote("simpatterns"), ") )") } # splat("Alternative:", alternative) if(V) { # nSD envelopes splat(switch(alternative, two.sided = "Envelopes", "Critical boundary"), "computed as sample mean", switch(alternative, two.sided="plus/minus", less="minus", greater="plus"), e$nSD, "sample standard deviations") } else { # critical envelopes lo.ord <- if(nr == 1L) "minimum" else paste(ordinal(nr), "smallest") hi.ord <- if(nr == 1L) "maximum" else paste(ordinal(nr), "largest") if(g) splat(switch(alternative, two.sided = "Envelopes", "Critical boundary"), "computed as", if(use.theory) "theoretical curve" else "mean of simulations", switch(alternative, two.sided="plus/minus", less="minus", greater="plus"), hi.ord, "simulated value of maximum", switch(alternative, two.sided="absolute", less="negative", greater="positive"), "deviation") else { if(alternative != "less") splat("Upper envelope: pointwise", hi.ord, "of simulated curves") if(alternative != "greater") splat("Lower envelope: pointwise", lo.ord, "of simulated curves") } symmetric <- (alternative == "two.sided") && !g alpha <- if(!symmetric) { nr/(nsim+1) } else { 2 * nr/(nsim+1) } splat("Significance level of Monte Carlo test:", paste0(if(!symmetric) nr else 2 * nr, "/", nsim+1), "=", alpha) } splat("Data:", e$Yname) return(invisible(NULL)) } # envelope.matrix # core functionality to compute envelope values # theory = funX[["theo"]] # observed = fX envelope.matrix <- function(Y, ..., rvals=NULL, observed=NULL, theory=NULL, funX=NULL, nsim=NULL, nsim2=NULL, jsim=NULL, jsim.mean=NULL, type=c("pointwise", "global", "variance"), alternative=c("two.sided", "less", "greater"), scale = NULL, clamp=FALSE, csr=FALSE, use.theory = csr, nrank=1, ginterval=NULL, nSD=2, savefuns=FALSE, check=TRUE, Yname=NULL, do.pwrong=FALSE, weights=NULL, precomputed=NULL) { if(is.null(Yname)) Yname <- short.deparse(substitute(Y)) type <- match.arg(type) alternative <- match.arg(alternative) if(!is.null(funX)) stopifnot(is.fv(funX)) pwrong <- NULL use.weights <- !is.null(weights) cheat <- !is.null(precomputed) if(is.null(rvals) && is.null(observed) && !is.null(funX)) { # assume funX is summary function for observed data rvals <- with(funX, .x) observed <- with(funX, .y) theory <- if(use.theory && "theo" %in% names(funX)) with(funX, theo) else NULL } else if(check) { # validate vectors of data if(is.null(rvals)) stop("rvals must be supplied") if(is.null(observed)) stop("observed must be supplied") if(!is.null(Y)) stopifnot(length(rvals) == nrow(Y)) stopifnot(length(observed) == length(rvals)) } if(use.theory) { use.theory <- !is.null(theory) if(use.theory && check) stopifnot(length(theory) == length(rvals)) } simvals <- Y fX <- observed atr <- if(!is.null(funX)) attributes(funX) else list(alim=range(rvals), ylab=quote(f(r)), yexp=quote(f(r)), fname="f") fname <- atr$fname if(!cheat) { # ................ standard calculation ..................... # validate weights if(use.weights) check.nvector(weights, ncol(simvals), things="simulated functions", naok=TRUE) # determine numbers of columns used Ncol <- ncol(simvals) if(Ncol < 2) stop("Need at least 2 columns of function values") if(is.null(jsim) && !is.null(nsim)) { # usual case - 'nsim' determines 'jsim' if(nsim > Ncol) stop(paste(nsim, "simulations are not available; only", Ncol, "columns provided")) jsim <- 1:nsim if(!is.null(nsim2)) { # 'nsim2' determines 'jsim.mean' if(nsim + nsim2 > Ncol) stop(paste(nsim, "+", nsim2, "=", nsim+nsim2, "simulations are not available; only", Ncol, "columns provided")) jsim.mean <- nsim + 1:nsim2 } } restrict.columns <- !is.null(jsim) dual <- !is.null(jsim.mean) } else { # ................ precomputed values .................. # validate weights if(use.weights) check.nvector(weights, nsim, things="simulations", naok=TRUE) restrict.columns <- FALSE dual <- FALSE } shadenames <- NULL switch(type, pointwise = { # ....... POINTWISE ENVELOPES ............................... if(cheat) { stopifnot(checkfields(precomputed, c("lo", "hi"))) lo <- precomputed$lo hi <- precomputed$hi } else { simvals[is.infinite(simvals)] <- NA if(restrict.columns) { simvals <- simvals[,jsim] if(use.weights) weights <- weights[jsim] } nsim <- ncol(simvals) nsim.mean <- NULL if(nrank == 1L) { lohi <- apply(simvals, 1L, range) } else { lohi <- apply(simvals, 1L, # function(x, n) { sort(x)[n] }, orderstats, k=c(nrank, nsim-nrank+1L)) } lo <- lohi[1L,] hi <- lohi[2L,] } lo.name <- "lower pointwise envelope of %s from simulations" hi.name <- "upper pointwise envelope of %s from simulations" ## switch(alternative, two.sided = { }, less = { hi <- rep.int(Inf, length(hi)) hi.name <- "infinite upper limit" }, greater = { lo <- rep.int(-Inf, length(lo)) lo.name <- "infinite lower limit" }) # if(use.theory) { results <- data.frame(r=rvals, obs=fX, theo=theory, lo=lo, hi=hi) } else { m <- if(cheat) precomputed$mmean else if(!use.weights) apply(simvals, 1L, mean, na.rm=TRUE) else apply(simvals, 1L, weighted.mean, w=weights, na.rm=TRUE) results <- data.frame(r=rvals, obs=fX, mmean=m, lo=lo, hi=hi) } shadenames <- c("lo", "hi") if(do.pwrong) { # estimate the p-value for the 'wrong test' if(cheat) { pwrong <- precomputed$pwrong do.pwrong <- !is.null(pwrong) && !badprobability(pwrong, FALSE) } else { dataranks <- t(apply(simvals, 1, rank, ties.method="random")) upper.signif <- (dataranks <= nrank) lower.signif <- (dataranks >= nsim-nrank+1L) is.signif <- switch(alternative, less = lower.signif, greater = upper.signif, two.sided = lower.signif | upper.signif) # is.signif.somewhere <- apply(is.signif, 2, any) is.signif.somewhere <- matcolany(is.signif) pwrong <- sum(is.signif.somewhere)/nsim } } }, global = { # ..... SIMULTANEOUS ENVELOPES .......................... if(cheat) { # ... use precomputed values .. stopifnot(checkfields(precomputed, c("lo", "hi"))) lo <- precomputed$lo hi <- precomputed$hi if(use.theory) { reference <- theory } else { stopifnot(checkfields(precomputed, "mmean")) reference <- precomputed$mmean } nsim.mean <- NULL domain <- rep.int(TRUE, length(rvals)) } else { # ... normal case: compute envelopes from simulations if(!is.null(ginterval)) { domain <- (rvals >= ginterval[1L]) & (rvals <= ginterval[2L]) funX <- funX[domain, ] simvals <- simvals[domain, ] } else domain <- rep.int(TRUE, length(rvals)) simvals[is.infinite(simvals)] <- NA if(use.theory) { reference <- theory[domain] if(restrict.columns) { simvals <- simvals[, jsim] if(use.weights) weights <- weights[jsim] } nsim.mean <- NULL } else if(dual) { # Estimate the mean from one set of columns # Form envelopes from another set of columns simvals.mean <- simvals[, jsim.mean] # mmean <- reference <- if(!use.weights) apply(simvals.mean, 1L, mean, na.rm=TRUE) else apply(simvals.mean, 1L, weighted.mean, w=weights[jsim.mean], na.rm=TRUE) nsim.mean <- ncol(simvals.mean) # retain only columns used for envelope simvals <- simvals[, jsim] } else { # Compute the mean and envelopes using the same data if(restrict.columns) { simvals <- simvals[, jsim] if(use.weights) weights <- weights[jsim] } # mmean <- reference <- if(!use.weights) apply(simvals.mean, 1L, mean, na.rm=TRUE) else apply(simvals.mean, 1L, weighted.mean, w=weights, na.rm=TRUE) nsim.mean <- NULL } nsim <- ncol(simvals) # compute deviations deviations <- sweep(simvals, 1L, reference) deviations <- switch(alternative, two.sided = abs(deviations), greater = if(clamp) pmax(0, deviations) else deviations, less = if(clamp) pmax(0, -deviations) else (-deviations)) deviations <- matrix(deviations, nrow=nrow(simvals), ncol=ncol(simvals)) ## rescale ? sc <- 1 if(!is.null(scale)) { stopifnot(is.function(scale)) sc <- scale(rvals) sname <- "scale(r)" ans <- check.nvector(sc, length(rvals), things="values of r", fatal=FALSE, vname=sname) if(!ans) stop(attr(ans, "whinge"), call.=FALSE) if(any(bad <- (sc <= 0))) { ## issue a warning unless this only happens at r=0 if(any(bad[rvals > 0])) warning(paste("Some values of", sname, "were negative or zero:", "scale was reset to 1 for these values"), call.=FALSE) sc[bad] <- 1 } deviations <- sweep(deviations, 1L, sc, "/") } ## compute max (scaled) deviations suprema <- apply(deviations, 2L, max, na.rm=TRUE) # ranked deviations dmax <- sort(suprema)[nsim-nrank+1L] # simultaneous bands lo <- reference - sc * dmax hi <- reference + sc * dmax } lo.name <- "lower critical boundary for %s" hi.name <- "upper critical boundary for %s" switch(alternative, two.sided = { }, less = { hi <- rep.int(Inf, length(hi)) hi.name <- "infinite upper boundary" }, greater = { lo <- rep.int(-Inf, length(lo)) lo.name <- "infinite lower boundary" }) if(use.theory) results <- data.frame(r=rvals[domain], obs=fX[domain], theo=reference, lo=lo, hi=hi) else results <- data.frame(r=rvals[domain], obs=fX[domain], mmean=reference, lo=lo, hi=hi) shadenames <- c("lo", "hi") if(do.pwrong) warning(paste("Argument", sQuote("do.pwrong=TRUE"), "ignored;", "it is not relevant to global envelopes")) }, variance={ # ....... POINTWISE MEAN, VARIANCE etc ...................... if(cheat) { # .... use precomputed values .... stopifnot(checkfields(precomputed, c("Ef", "varf"))) Ef <- precomputed$Ef varf <- precomputed$varf } else { # .... normal case: compute from simulations simvals[is.infinite(simvals)] <- NA if(restrict.columns) { simvals <- simvals[, jsim] if(use.weights) weights <- weights[jsim] } nsim <- ncol(simvals) if(!use.weights) { Ef <- apply(simvals, 1L, mean, na.rm=TRUE) varf <- apply(simvals, 1L, var, na.rm=TRUE) } else { Ef <- apply(simvals, 1L, weighted.mean, w=weights, na.rm=TRUE) varf <- apply(simvals, 1L, weighted.var, w=weights, na.rm=TRUE) } } nsim.mean <- NULL # derived quantities sd <- sqrt(varf) stdres <- (fX-Ef)/sd stdres[!is.finite(stdres)] <- NA # critical limits lo <- Ef - nSD * sd hi <- Ef + nSD * sd lo.name <- paste("lower", nSD, "sigma critical limit for %s") hi.name <- paste("upper", nSD, "sigma critical limit for %s") # confidence interval loCI <- Ef - nSD * sd/sqrt(nsim) hiCI <- Ef + nSD * sd/sqrt(nsim) loCI.name <- paste("lower", nSD, "sigma confidence bound", "for mean of simulated %s") hiCI.name <- paste("upper", nSD, "sigma confidence bound", "for mean of simulated %s") ## switch(alternative, two.sided = { }, less = { hi <- hiCI <- rep.int(Inf, length(hi)) hi.name <- "infinite upper boundary" hiCI.name <- "infinite upper confidence limit" }, greater = { lo <- loCI <- rep.int(-Inf, length(lo)) lo.name <- "infinite lower boundary" loCI.name <- "infinite lower confidence limit" }) # put together if(use.theory) { results <- data.frame(r=rvals, obs=fX, theo=theory, lo=lo, hi=hi) shadenames <- c("lo", "hi") morestuff <- data.frame(mmean=Ef, var=varf, res=fX-Ef, stdres=stdres, loCI=loCI, hiCI=hiCI) loCIlabel <- if(alternative == "greater") "-infinity" else makefvlabel(NULL, NULL, fname, "loCI") hiCIlabel <- if(alternative == "less") "infinity" else makefvlabel(NULL, NULL, fname, "hiCI") mslabl <- c(makefvlabel(NULL, "bar", fname), makefvlabel("var", "hat", fname), makefvlabel("res", "hat", fname), makefvlabel("stdres", "hat", fname), loCIlabel, hiCIlabel) wted <- if(use.weights) "weighted " else NULL msdesc <- c(paste0(wted, "sample mean of %s from simulations"), paste0(wted, "sample variance of %s from simulations"), "raw residual", "standardised residual", loCI.name, hiCI.name) } else { results <- data.frame(r=rvals, obs=fX, mmean=Ef, lo=lo, hi=hi) shadenames <- c("lo", "hi") morestuff <- data.frame(var=varf, res=fX-Ef, stdres=stdres, loCI=loCI, hiCI=hiCI) loCIlabel <- if(alternative == "greater") "-infinity" else makefvlabel(NULL, NULL, fname, "loCI") hiCIlabel <- if(alternative == "less") "infinity" else makefvlabel(NULL, NULL, fname, "hiCI") mslabl <- c(makefvlabel("var", "hat", fname), makefvlabel("res", "hat", fname), makefvlabel("stdres", "hat", fname), loCIlabel, hiCIlabel) msdesc <- c(paste0(if(use.weights) "weighted " else NULL, "sample variance of %s from simulations"), "raw residual", "standardised residual", loCI.name, hiCI.name) } if(do.pwrong) { # estimate the p-value for the 'wrong test' if(cheat) { pwrong <- precomputed$pwrong do.pwrong <- !is.null(pwrong) && !badprobability(pwrong, FALSE) } else { upper.signif <- (simvals > hi) lower.signif <- (simvals < lo) is.signif <- switch(alternative, less = lower.signif, greater = upper.signif, two.sided = lower.signif | upper.signif) # is.signif.somewhere <- apply(is.signif, 2, any) is.signif.somewhere <- matcolany(is.signif) pwrong <- sum(is.signif.somewhere)/nsim } } } ) ############ WRAP UP ######################### if(use.theory) { # reference is computed curve `theo' reflabl <- makefvlabel(NULL, NULL, fname, "theo") refdesc <- paste0("theoretical value of %s", if(csr) " for CSR" else NULL) } else { # reference is sample mean of simulations reflabl <- makefvlabel(NULL, "bar", fname) refdesc <- paste0(if(use.weights) "weighted " else NULL, "sample mean of %s from simulations") } lolabl <- if(alternative == "greater") "-infinity" else makefvlabel(NULL, "hat", fname, "lo") hilabl <- if(alternative == "less") "infinity" else makefvlabel(NULL, "hat", fname, "hi") result <- fv(results, argu="r", ylab=atr$ylab, valu="obs", fmla= deparse(. ~ r), alim=intersect.ranges(atr$alim, range(results$r)), labl=c("r", makefvlabel(NULL, "hat", fname, "obs"), reflabl, lolabl, hilabl), desc=c("distance argument r", "observed value of %s for data pattern", refdesc, lo.name, hi.name), fname=atr$fname, yexp =atr$yexp) # columns to be plotted by default dotty <- c("obs", if(use.theory) "theo" else "mmean", "hi", "lo") if(type == "variance") { # add more stuff result <- bind.fv(result, morestuff, mslabl, msdesc) if(use.theory) dotty <- c(dotty, "mmean") } fvnames(result, ".") <- dotty fvnames(result, ".s") <- shadenames unitname(result) <- unitname(funX) class(result) <- c("envelope", class(result)) # tack on envelope information attr(result, "einfo") <- list(global = (type =="global"), ginterval = ginterval, alternative=alternative, scale = scale, clamp = clamp, csr = csr, use.theory = use.theory, csr.theo = csr && use.theory, simtype = "funs", constraints = "", nrank = nrank, nsim = nsim, VARIANCE = (type == "variance"), nSD = nSD, valname = NULL, dual = dual, nsim = nsim, nsim2 = nsim.mean, Yname = Yname, do.pwrong=do.pwrong, use.weights=use.weights) # tack on saved functions if(savefuns) { alldata <- cbind(rvals, simvals) simnames <- paste("sim", 1:nsim, sep="") colnames(alldata) <- c("r", simnames) alldata <- as.data.frame(alldata) SimFuns <- fv(alldata, argu="r", ylab=atr$ylab, valu="sim1", fmla= deparse(. ~ r), alim=atr$alim, labl=names(alldata), desc=c("distance argument r", paste("Simulation ", 1:nsim, sep="")), unitname=unitname(funX)) fvnames(SimFuns, ".") <- simnames attr(result, "simfuns") <- SimFuns } if(do.pwrong) attr(result, "pwrong") <- pwrong if(use.weights) attr(result, "weights") <- weights return(result) } envelope.envelope <- function(Y, fun=NULL, ..., transform=NULL, global=FALSE, VARIANCE=FALSE) { Yname <- short.deparse(substitute(Y)) stopifnot(inherits(Y, "envelope")) Yorig <- Y aargh <- list(...) X <- attr(Y, "datapattern") sf <- attr(Y, "simfuns") sp <- attr(Y, "simpatterns") wt <- attr(Y, "weights") einfo <- attr(Y, "einfo") csr <- aargh$internal$csr %orifnull% einfo$csr if(is.null(fun) && is.null(sf)) { # No simulated functions - must compute them from simulated patterns if(is.null(sp)) stop(paste("Cannot compute envelope:", "Y does not contain simulated functions", "(was not generated with savefuns=TRUE)", "and does not contain simulated patterns", "(was not generated with savepatterns=TRUE)")) # set default fun=Kest fun <- Kest } if(!is.null(fun)) { # apply new function # point patterns are required if(is.null(sp)) stop(paste("Object Y does not contain simulated point patterns", "(attribute", dQuote("simpatterns"), ");", "cannot apply a new", sQuote("fun"))) if(is.null(X)) stop(paste("Cannot apply a new", sQuote("fun"), "; object Y generated by an older version of spatstat")) ## send signal if simulations were CSR internal <- aargh$internal if(csr) { if(is.null(internal)) internal <- list() internal$csr <- TRUE } ## compute new envelope result <- do.call(envelope, resolve.defaults(list(Y=X, fun=fun, simulate=sp), aargh, list(transform=transform, global=global, VARIANCE=VARIANCE, internal=internal, Yname=Yname, nsim=einfo$nsim, nsim2=einfo$nsim2, weights=wt), .StripNull=TRUE)) } else { # compute new envelope with existing simulated functions if(is.null(sf)) stop(paste("Y does not contain a", dQuote("simfuns"), "attribute", "(it was not generated with savefuns=TRUE)")) if(!is.null(transform)) { # Apply transformation to Y and sf stopifnot(is.expression(transform)) ## cc <- dotexpr.to.call(transform, "Y", "eval.fv") cc <- inject.expr("with(Y, .)", transform) Y <- eval(cc) ## cc <- dotexpr.to.call(transform, "sf", "eval.fv") cc <- inject.expr("with(sf, .)", transform) sf <- eval(cc) } # extract simulated function values df <- as.data.frame(sf) rname <- fvnames(sf, ".x") df <- df[, (names(df) != rname)] # interface with 'envelope.matrix' etype <- if(global) "global" else if(VARIANCE) "variance" else "pointwise" result <- do.call(envelope.matrix, resolve.defaults(list(Y=as.matrix(df)), aargh, list(type=etype, csr=csr, funX=Y, Yname=Yname, weights=wt), .StripNull=TRUE)) } if(!is.null(transform)) { # post-process labels labl <- attr(result, "labl") dnames <- colnames(result) dnames <- dnames[dnames %in% fvnames(result, ".")] # expand "." ud <- as.call(lapply(c("cbind", dnames), as.name)) dont.complain.about(ud) expandtransform <- eval(substitute(substitute(tr, list(.=ud)), list(tr=transform[[1L]]))) # compute new labels attr(result, "fname") <- attr(Yorig, "fname") mathlabl <- as.character(fvlegend(result, expandtransform)) # match labels to columns evars <- all.vars(expandtransform) used.dotnames <- evars[evars %in% dnames] mathmap <- match(colnames(result), used.dotnames) okmath <- !is.na(mathmap) # update appropriate labels labl[okmath] <- mathlabl[mathmap[okmath]] attr(result, "labl") <- labl } # Tack on envelope info copyacross <- c("Yname", "csr.theo", "use.theory", "simtype", "constraints") attr(result, "einfo")[copyacross] <- attr(Yorig, "einfo")[copyacross] attr(result, "einfo")$csr <- csr # Save data return(result) } pool.envelope <- local({ pool.envelope <- function(..., savefuns=FALSE, savepatterns=FALSE) { Yname <- short.deparse(sys.call()) if(nchar(Yname) > 60) Yname <- paste(substr(Yname, 1L, 40L), "[..]") Elist <- unname(list(...)) nE <- length(Elist) if(nE == 0) return(NULL) #' ........ validate envelopes ..................... #' All arguments must be envelopes notenv <- !unlist(lapply(Elist, inherits, what="envelope")) if(any(notenv)) { n <- sum(notenv) why <- paste(ngettext(n, "Argument", "Arguments"), commasep(which(notenv)), ngettext(n, "does not", "do not"), "belong to the class", dQuote("envelope")) stop(why) } ## Only one envelope? if(nE == 1) return(Elist[[1L]]) ## envelopes must be compatible ok <- do.call(compatible, Elist) if(!ok) stop("Envelopes are not compatible") ## ... reconcile parameters in different envelopes ....... eilist <- lapply(Elist, attr, which="einfo") global <- resolveEinfo(eilist, "global", FALSE) ginterval <- resolveEinfo(eilist, "ginterval", NULL, atomic=FALSE) VARIANCE <- resolveEinfo(eilist, "VARIANCE", FALSE) alternative <- resolveEinfo(eilist, "alternative", FALSE) scale <- resolveEinfo(eilist, "scale", NULL, atomic=FALSE) clamp <- resolveEinfo(eilist, "clamp", FALSE) resolveEinfo(eilist, "simtype", "funs", "Envelopes were generated using different types of simulation") resolveEinfo(eilist, "constraints", "", "Envelopes were generated using different types of conditioning") resolveEinfo(eilist, "csr.theo", FALSE, NULL) csr <- resolveEinfo(eilist, "csr", FALSE, NULL) use.weights <- resolveEinfo(eilist, "use.weights" , FALSE, "Weights were used in some, but not all, envelopes: they will be ignored") use.theory <- resolveEinfo(eilist, "use.theory", csr, NULL) ## weights <- if(use.weights) unlist(lapply(Elist, attr, which="weights")) else NULL type <- if(global) "global" else if(VARIANCE) "variance" else "pointwise" ## ........ validate saved functions ..................... if(savefuns || !VARIANCE) { ## Individual simulated functions are required SFlist <- lapply(Elist, attr, which="simfuns") isnul <- unlist(lapply(SFlist, is.null)) if(any(isnul)) { n <- sum(isnul) comply <- if(!VARIANCE) "compute the envelope:" else "save the simulated functions:" why <- paste("Cannot", comply, ngettext(n, "argument", "arguments"), commasep(which(isnul)), ngettext(n, "does not", "do not"), "contain a", dQuote("simfuns"), "attribute", "(not generated with savefuns=TRUE)") stop(why) } ## Simulated functions must be the same function fnames <- unique(lapply(SFlist, attr, which="fname")) if(length(fnames) > 1L) { fnames <- unlist(lapply(fnames, flatfname)) stop(paste("Envelope objects contain values", "of different functions:", commasep(sQuote(fnames)))) } ## vectors of r values must be identical rlist <- lapply(SFlist, getrvals) rvals <- rlist[[1L]] samer <- unlist(lapply(rlist, identical, y=rvals)) if(!all(samer)) stop(paste("Simulated function values are not compatible", "(different values of function argument)")) ## Extract function values and assemble into one matrix matlist <- lapply(SFlist, getdotvals) SFmatrix <- do.call(cbind, matlist) } ## compute pooled envelope switch(type, pointwise = { result <- envelope(SFmatrix, funX=Elist[[1L]], type=type, alternative=alternative, clamp=clamp, csr=csr, use.theory=use.theory, Yname=Yname, weights=weights, savefuns=savefuns) }, global = { simfunmatrix <- if(is.null(ginterval)) SFmatrix else { ## savefuns have not yet been clipped to ginterval ## while envelope data have been clipped. domain <- (rvals >= ginterval[1L]) & (rvals <= ginterval[2L]) SFmatrix[domain, , drop=FALSE] } result <- envelope(simfunmatrix, funX=Elist[[1L]], type=type, alternative=alternative, scale=scale, clamp=clamp, csr=csr, use.theory=use.theory, ginterval=ginterval, Yname=Yname, weights=weights, savefuns=savefuns) }, variance = { ## Pool sample means and variances nsims <- unlist(lapply(eilist, getElement, name="nsim")) mmeans <- lapply(Elist, getElement, name="mmean") vars <- lapply(Elist, getElement, name="var") mmeans <- matrix(unlist(mmeans), ncol=nE) vars <- matrix(unlist(vars), ncol=nE) if(!use.weights) { w.mean <- nsims d.mean <- sum(nsims) w.var <- nsims - 1 d.var <- sum(nsims) - 1 } else { weightlist <- lapply(Elist, attr, which="weights") w.mean <- unlist(lapply(weightlist, sum)) d.mean <- sum(w.mean) ssw <- unlist(lapply(weightlist, meansqfrac)) ## meansqfrac : function(x) {sum((x/sum(x))^2)})) w.var <- w.mean * (1 - ssw) d.var <- d.mean * (1 - sum(ssw)) } poolmmean <- as.numeric(mmeans %*% matrix(w.mean/d.mean, ncol=1L)) within <- vars %*% matrix(w.var, ncol=1L) between <- ((mmeans - poolmmean[])^2) %*% matrix(w.mean, ncol=1L) poolvar <- as.numeric((within + between)/d.var) ## feed precomputed data to envelope.matrix pc <- list(Ef=poolmmean[], varf=poolvar[]) nsim <- sum(nsims) result <- envelope.matrix(NULL, funX=Elist[[1L]], type=type, alternative=alternative, csr=csr, Yname=Yname, weights=weights, savefuns=savefuns, nsim=nsim, precomputed=pc) }) ## Copy envelope info that is not handled by envelope.matrix copyacross <- c("Yname", "csr.theo", "use.theory", "simtype", "constraints") attr(result, "einfo")[copyacross] <- attr(Elist[[1L]], "einfo")[copyacross] ## ..............saved patterns ..................... if(savepatterns) { SPlist <- lapply(Elist, attr, which="simpatterns") isnul <- unlist(lapply(SPlist, is.null)) if(any(isnul)) { n <- sum(isnul) why <- paste("Cannot save the simulated patterns:", ngettext(n, "argument", "arguments"), commasep(which(isnul)), ngettext(n, "does not", "do not"), "contain a", dQuote("simpatterns"), "attribute", "(not generated with savepatterns=TRUE)") warning(why) } else { attr(result, "simpatterns") <- Reduce(append, SPlist) } } ## ..............saved summary functions ................ if(savefuns) { alldata <- cbind(rvals, SFmatrix) Nsim <- ncol(SFmatrix) simnames <- paste0("sim", 1:Nsim) colnames(alldata) <- c("r", simnames) alldata <- as.data.frame(alldata) SFtemplate <- SFlist[[1L]] SimFuns <- fv(alldata, argu="r", ylab=attr(SFtemplate, "ylab"), valu="sim1", fmla= deparse(. ~ r), alim=attr(SFtemplate, "alim"), labl=names(alldata), desc=c("distance argument r", paste("Simulation ", 1:Nsim, sep="")), fname=attr(SFtemplate, "fname"), yexp=attr(SFtemplate, "yexp"), unitname=unitname(SFtemplate)) fvnames(SimFuns, ".") <- simnames attr(result, "simfuns") <- SimFuns } dotnames <- lapply(Elist, fvnames, a=".") dn <- dotnames[[1L]] if(all(unlist(lapply(dotnames, identical, y=dn)))) fvnames(result, ".") <- dn shadenames <- lapply(Elist, fvnames, a=".s") sh <- shadenames[[1L]] if(all(unlist(lapply(shadenames, identical, y=sh)))) fvnames(result, ".s") <- sh return(result) } getrvals <- function(z) { as.matrix(z)[, fvnames(z, ".x")] } getdotvals <- function(z) { as.matrix(z)[, fvnames(z, "."), drop=FALSE] } meansqfrac <- function(x) {sum((x/sum(x))^2)} pool.envelope }) # resolve matching entries in different envelope objects # x is a list of envelope info objects resolveEinfo <- function(x, what, fallback, warn, atomic=TRUE) { if(atomic) { y <- unique(unlist(lapply(x, getElement, name=what))) if(length(y) == 1L) return(y) } else { y <- unique(lapply(x, getElement, name=what)) if(length(y) == 1L) return(y[[1L]]) } if(missing(warn)) warn <- paste("Envelopes were generated using different values", "of argument", paste(sQuote(what), ";", sep=""), "reverting to default value") if(!is.null(warn)) warning(warn, call.=FALSE) return(fallback) } as.data.frame.envelope <- function(x, ..., simfuns=FALSE) { if(simfuns && !is.null(sf <- attr(x, "simfuns"))) { # tack on the simulated functions as well y <- as.data.frame(bind.fv(x, sf, clip=TRUE)) return(y) } NextMethod("as.data.frame") } spatstat/R/listof.R0000755000176200001440000000214713115271120013741 0ustar liggesusers# # listof.R # # Methods for class `listof' # # plot.listof is defined in plot.splitppp.R # "[<-.listof" <- function(x, i, value) { # invoke list method class(x) <- "list" x[i] <- value # then make it a 'listof' object too class(x) <- c("listof", class(x)) x } summary.listof <- function(object, ...) { x <- lapply(object, summary, ...) class(x) <- "summary.listof" x } print.summary.listof <- function(x, ...) { class(x) <- "listof" print(x) invisible(NULL) } listof <- function(...) { # warn.once("listof", # "The class listof will be Deprecated", # "in future versions of spatstat.", # "Use anylist or solist") stuff <- list(...) class(stuff) <- c("listof", class(stuff)) return(stuff) } as.listof <- function(x) { if(!is.list(x)) x <- list(x) if(!inherits(x, "listof")) class(x) <- c("listof", class(x)) # warn.once("listof", # "The class listof will be Deprecated", # "in future versions of spatstat.", # "Use anylist or solist") return(x) } as.layered.listof <- function(X) { layered(LayerList=X) } spatstat/R/linalg.R0000755000176200001440000001532513115271120013711 0ustar liggesusers# # linalg.R # # Linear Algebra # # $Revision: 1.23 $ $Date: 2017/06/05 10:31:58 $ # sumouter <- function(x, w=NULL, y=x) { #' compute matrix sum_i (w[i] * outer(x[i,], y[i,])) stopifnot(is.matrix(x)) weighted <- !is.null(w) symmetric <- missing(y) || identical(x,y) if(weighted) { if(length(dim(w)) > 1) stop("w should be a vector") w <- as.numeric(w) check.nvector(w, nrow(x), things="rows of x") } if(!symmetric) { stopifnot(is.matrix(y)) stopifnot(nrow(x) == nrow(y)) } #' transpose (compute outer squares of columns) tx <- t(x) if(!symmetric) ty <- t(y) #' check for NA etc ok <- apply(is.finite(tx), 2, all) if(!symmetric) ok <- ok & apply(is.finite(ty), 2, all) if(weighted) ok <- ok & is.finite(w) #' remove NA etc if(!all(ok)) { tx <- tx[ , ok, drop=FALSE] if(!symmetric) ty <- ty[ , ok, drop=FALSE] if(weighted) w <- w[ok] } #' call C code if(symmetric) { n <- ncol(tx) p <- nrow(tx) if(is.null(w)) { zz <- .C("Csumouter", x=as.double(tx), n=as.integer(n), p=as.integer(p), y=as.double(numeric(p * p)), PACKAGE = "spatstat") } else { zz <- .C("Cwsumouter", x=as.double(tx), n=as.integer(n), p=as.integer(p), w=as.double(w), y=as.double(numeric(p * p)), PACKAGE = "spatstat") } out <- matrix(zz$y, p, p) if(!is.null(nama <- colnames(x))) dimnames(out) <- list(nama, nama) } else { n <- ncol(tx) px <- nrow(tx) py <- nrow(ty) if(is.null(w)) { zz <- .C("Csum2outer", x=as.double(tx), y=as.double(ty), n=as.integer(n), px=as.integer(px), py=as.integer(py), z=as.double(numeric(px * py)), PACKAGE = "spatstat") } else { zz <- .C("Cwsum2outer", x=as.double(tx), y=as.double(ty), n=as.integer(n), px=as.integer(px), py=as.integer(py), w=as.double(w), z=as.double(numeric(px * py))) } out <- matrix(zz$z, px, py) namx <- colnames(x) namy <- colnames(y) if(!is.null(namx) || !is.null(namy)) dimnames(out) <- list(namx, namy) } return(out) } quadform <- function(x, v) { #' compute vector of values y[i] = x[i, ] %*% v %*% t(x[i,] stopifnot(is.matrix(x)) p <- ncol(x) n <- nrow(x) nama <- rownames(x) # transpose (evaluate quadratic form for each column) tx <- t(x) ok <- apply(is.finite(tx), 2, all) allok <- all(ok) if(!allok) { tx <- tx[ , ok, drop=FALSE] n <- ncol(tx) } if(missing(v)) { v <- diag(rep.int(1, p)) } else { stopifnot(is.matrix(v)) if(nrow(v) != ncol(v)) stop("v should be a square matrix") stopifnot(ncol(x) == nrow(v)) } z <- .C("Cquadform", x=as.double(tx), n=as.integer(n), p=as.integer(p), v=as.double(v), y=as.double(numeric(n)), PACKAGE = "spatstat") result <- z$y names(result) <- nama[ok] if(allok) return(result) fullresult <- rep.int(NA_real_, length(ok)) fullresult[ok] <- result names(fullresult) <- nama return(fullresult) } bilinearform <- function(x, v, y) { #' compute vector of values z[i] = x[i, ] %*% v %*% t(y[i,]) stopifnot(is.matrix(x)) stopifnot(is.matrix(y)) stopifnot(identical(dim(x), dim(y))) p <- ncol(x) n <- nrow(x) nama <- rownames(x) # transpose (evaluate quadratic form for each column) tx <- t(x) ty <- t(y) ok <- matcolall(is.finite(tx)) & matcolall(is.finite(ty)) allok <- all(ok) if(!allok) { tx <- tx[ , ok, drop=FALSE] ty <- ty[ , ok, drop=FALSE] n <- ncol(tx) } if(missing(v)) { v <- diag(rep.int(1, p)) } else { stopifnot(is.matrix(v)) if(nrow(v) != ncol(v)) stop("v should be a square matrix") stopifnot(ncol(x) == nrow(v)) } z <- .C("Cbiform", x=as.double(tx), y=as.double(ty), n=as.integer(n), p=as.integer(p), v=as.double(v), z=as.double(numeric(n)), PACKAGE = "spatstat") result <- z$z names(result) <- nama[ok] if(allok) return(result) fullresult <- rep.int(NA_real_, length(ok)) fullresult[ok] <- result names(fullresult) <- nama return(fullresult) } sumsymouter <- function(x, w=NULL) { ## x is a 3D array ## w is a matrix ## Computes the sum of outer(x[,i,j], x[,j,i]) * w[i,j] over all pairs i != j if(inherits(x, c("sparseSlab", "sparse3Darray")) && (is.null(w) || inherits(w, "sparseMatrix"))) return(sumsymouterSparse(x, w)) x <- as.array(x) stopifnot(length(dim(x)) == 3) if(dim(x)[2L] != dim(x)[3L]) stop("The second and third dimensions of x should be equal") if(!is.null(w)) { w <- as.matrix(w) if(!all(dim(w) == dim(x)[-1L])) stop("Dimensions of w should match the second and third dimensions of x") } p <- dim(x)[1L] n <- dim(x)[2L] if(is.null(w)) { zz <- .C("Csumsymouter", x = as.double(x), p = as.integer(p), n = as.integer(n), y = as.double(numeric(p * p)), PACKAGE = "spatstat") } else { zz <- .C("Cwsumsymouter", x = as.double(x), w = as.double(w), p = as.integer(p), n = as.integer(n), y = as.double(numeric(p * p)), PACKAGE = "spatstat") } matrix(zz$y, p, p) } checksolve <- function(M, action, descrip, target="") { Mname <- short.deparse(substitute(M)) Minv <- try(solve(M), silent=(action=="silent")) if(!inherits(Minv, "try-error")) return(Minv) if(missing(descrip)) descrip <- paste("the matrix", sQuote(Mname)) whinge <- paste0("Cannot compute ", target, ": ", descrip, " is singular") switch(action, fatal=stop(whinge, call.=FALSE), warn= warning(whinge, call.=FALSE), silent={}) return(NULL) } check.mat.mul <- function(A, B, Acols="columns of A", Brows="rows of B", fatal=TRUE) { # check whether A %*% B would be valid: if not, print a useful message if(!is.matrix(A)) A <- matrix(A, nrow=1, dimnames=list(NULL, names(A))) if(!is.matrix(B)) B <- matrix(B, ncol=1, dimnames=list(names(B), NULL)) nA <- ncol(A) nB <- nrow(B) if(nA == nB) return(TRUE) if(!fatal) return(FALSE) if(any(nzchar(Anames <- colnames(A)))) message(paste0("Names of ", Acols, ": ", commasep(Anames))) if(any(nzchar(Bnames <- rownames(B)))) message(paste0("Names of ", Brows, ": ", commasep(Bnames))) stop(paste("Internal error: number of", Acols, paren(nA), "does not match number of", Brows, paren(nB)), call.=FALSE) } spatstat/R/lpp.R0000755000176200001440000004652113156200767013256 0ustar liggesusers# # lpp.R # # $Revision: 1.56 $ $Date: 2017/08/08 03:24:35 $ # # Class "lpp" of point patterns on linear networks lpp <- function(X, L, ...) { stopifnot(inherits(L, "linnet")) if(missing(X) || is.null(X)) { ## empty pattern df <- data.frame(x=numeric(0), y=numeric(0)) lo <- data.frame(seg=integer(0), tp=numeric(0)) } else { localnames <- c("seg", "tp") spatialnames <- c("x", "y") allcoordnames <- c(spatialnames, localnames) if(is.matrix(X)) X <- as.data.frame(X) if(checkfields(X, localnames)) { #' X includes at least local coordinates X <- as.data.frame(X) #' validate local coordinates if(nrow(X) > 0) { nedge <- nsegments(L) if(with(X, any(seg < 1 || seg > nedge))) stop("Segment index coordinate 'seg' exceeds bounds") if(with(X, any(tp < 0 || tp > 1))) stop("Local coordinate 'tp' outside [0,1]") } if(!checkfields(X, spatialnames)) { #' data give local coordinates only #' reconstruct x,y coordinates from local coordinates Y <- local2lpp(L, X$seg, X$tp, df.only=TRUE) X[,spatialnames] <- Y[,spatialnames,drop=FALSE] } #' local coordinates lo <- X[ , localnames, drop=FALSE] #' spatial coords and marks marknames <- setdiff(names(X), allcoordnames) df <- X[, c(spatialnames, marknames), drop=FALSE] } else { #' local coordinates must be computed from spatial coordinates if(!is.ppp(X)) X <- as.ppp(X, W=L$window, ...) #' project to segment pro <- project2segment(X, as.psp(L)) #' projected points (spatial coordinates and marks) df <- as.data.frame(pro$Xproj) #' local coordinates lo <- data.frame(seg=pro$mapXY, tp=pro$tp) } } # combine spatial, local, marks nmark <- ncol(df) - 2 if(nmark == 0) { df <- cbind(df, lo) ctype <- c(rep("s", 2), rep("l", 2)) } else { df <- cbind(df[,1:2], lo, df[, -(1:2), drop=FALSE]) ctype <- c(rep("s", 2), rep("l", 2), rep("m", nmark)) } out <- ppx(data=df, domain=L, coord.type=ctype) class(out) <- c("lpp", class(out)) return(out) } print.lpp <- function(x, ...) { stopifnot(inherits(x, "lpp")) splat("Point pattern on linear network") sd <- summary(x$data) np <- sd$ncases nama <- sd$col.names splat(np, ngettext(np, "point", "points")) ## check for unusual coordinates ctype <- x$ctype nam.m <- nama[ctype == "mark"] nam.t <- nama[ctype == "temporal"] nam.c <- setdiff(nama[ctype == "spatial"], c("x","y")) nam.l <- setdiff(nama[ctype == "local"], c("seg", "tp")) if(length(nam.c) > 0) splat("Additional spatial coordinates", commasep(sQuote(nam.c))) if(length(nam.l) > 0) splat("Additional local coordinates", commasep(sQuote(nam.l))) if(length(nam.t) > 0) splat("Additional temporal coordinates", commasep(sQuote(nam.t))) if((nmarks <- length(nam.m)) > 0) { if(nmarks > 1) { splat(nmarks, "columns of marks:", commasep(sQuote(nam.m))) } else { marx <- marks(x) if(is.factor(marx)) { exhibitStringList("Multitype, with possible types:", levels(marx)) } else splat("Marks of type", sQuote(typeof(marx))) } } print(x$domain, ...) return(invisible(NULL)) } plot.lpp <- function(x, ..., main, add=FALSE, use.marks=TRUE, which.marks=NULL, show.all=!add, show.window=FALSE, show.network=TRUE, do.plot=TRUE, multiplot=TRUE) { if(missing(main)) main <- short.deparse(substitute(x)) ## Handle multiple columns of marks as separate plots ## (unless add=TRUE or which.marks selects a single column ## or multiplot = FALSE) mx <- marks(x) if(use.marks && !is.null(dim(mx))) { implied.all <- is.null(which.marks) want.several <- implied.all || !is.null(dim(mx <- mx[,which.marks])) do.several <- want.several && !add && multiplot if(want.several) mx <- as.data.frame(mx) #' ditch hyperframe columns if(do.several) { ## generate one plot for each column of marks y <- solapply(mx, setmarks, x=x) out <- do.call(plot, c(list(x=y, main=main, do.plot=do.plot, show.window=show.window), list(...))) return(invisible(out)) } if(is.null(which.marks)) { which.marks <- 1 if(do.plot) message("Plotting the first column of marks") } } ## determine space required, including legend P <- as.ppp(x) a <- plot(P, ..., do.plot=FALSE) if(!do.plot) return(a) ## initialise graphics space if(!add) { if(show.window) { plot(Window(P), main=main, invert=TRUE, ...) } else { b <- attr(a, "bbox") plot(b, type="n", main=main, ..., show.all=FALSE) } } ## plot linear network if(show.network) { L <- as.linnet(x) do.call.matched(plot.linnet, resolve.defaults(list(x=L, add=TRUE), list(...)), extrargs=c("lty", "lwd", "col")) } ## plot points, legend, title ans <- do.call.matched(plot.ppp, c(list(x=P, add=TRUE, main=main, show.all=show.all, show.window=FALSE), list(...)), extrargs=c("shape", "size", "pch", "cex", "fg", "bg", "cols", "lty", "lwd", "etch", "cex.main", "col.main", "line", "outer", "sub")) return(invisible(ans)) } summary.lpp <- function(object, ...) { stopifnot(inherits(object, "lpp")) L <- object$domain result <- summary(L) np <- npoints(object) result$npoints <- np <- npoints(object) result$intensity <- np/result$totlength result$is.marked <- is.marked(object) result$is.multitype <- is.marked(object) if(result$is.marked) { mks <- marks(object) if(result$multiple.marks <- is.data.frame(mks)) { result$marknames <- names(mks) result$is.numeric <- FALSE result$marktype <- "dataframe" result$is.multitype <- FALSE } else { result$is.numeric <- is.numeric(mks) result$marknames <- "marks" result$marktype <- typeof(mks) result$is.multitype <- is.multitype(object) } if(result$is.multitype) { tm <- as.vector(table(mks)) tfp <- data.frame(frequency=tm, proportion=tm/sum(tm), intensity=tm/result$totlength, row.names=levels(mks)) result$marks <- tfp } else result$marks <- summary(mks) } class(result) <- "summary.lpp" return(result) } print.summary.lpp <- function(x, ...) { splat("Point pattern on linear network") splat(x$npoints, "points") splat("Linear network with", x$nvert, "vertices and", x$nline, "lines") u <- x$unitinfo dig <- getOption('digits') splat("Total length", signif(x$totlength, dig), u$plural, u$explain) splat("Average intensity", signif(x$intensity, dig), "points per", if(u$vanilla) "unit length" else u$singular) if(x$is.marked) { if(x$multiple.marks) { splat("Mark variables:", commasep(x$marknames, ", ")) cat("Summary:\n") print(x$marks) } else if(x$is.multitype) { cat("Multitype:\n") print(signif(x$marks,dig)) } else { splat("marks are ", if(x$is.numeric) "numeric, ", "of type ", sQuote(x$marktype), sep="") cat("Summary:\n") print(x$marks) } } print(x$win, prefix="Enclosing window: ") invisible(NULL) } intensity.lpp <- function(X, ...) { len <- sum(lengths.psp(as.psp(as.linnet(X)))) if(is.multitype(X)) table(marks(X))/len else npoints(X)/len } is.lpp <- function(x) { inherits(x, "lpp") } is.multitype.lpp <- function(X, na.action="warn", ...) { marx <- marks(X) if(is.null(marx)) return(FALSE) if((is.data.frame(marx) || is.hyperframe(marx)) && ncol(marx) > 1) return(FALSE) if(!is.factor(marx)) return(FALSE) if((length(marx) > 0) && anyNA(marx)) switch(na.action, warn = { warning(paste("some mark values are NA in the point pattern", short.deparse(substitute(X)))) }, fatal = { return(FALSE) }, ignore = {} ) return(TRUE) } as.lpp <- function(x=NULL, y=NULL, seg=NULL, tp=NULL, ..., marks=NULL, L=NULL, check=FALSE, sparse) { nomore <- is.null(y) && is.null(seg) && is.null(tp) if(inherits(x, "lpp") && nomore) { X <- x if(!missing(sparse) && !is.null(sparse)) X$domain <- as.linnet(domain(X), sparse=sparse) } else { if(!inherits(L, "linnet")) stop("L should be a linear network") if(!missing(sparse) && !is.null(sparse)) L <- as.linnet(L, sparse=sparse) if(is.ppp(x) && nomore) { X <- lpp(x, L) } else if(is.null(x) && is.null(y) && !is.null(seg) && !is.null(tp)){ X <- lpp(data.frame(seg=seg, tp=tp), L=L) } else { if(is.numeric(x) && length(x) == 2 && is.null(y)) { xy <- list(x=x[1L], y=x[2L]) } else { xy <- xy.coords(x,y)[c("x", "y")] } if(!is.null(seg) && !is.null(tp)) { # add segment map information xy <- append(xy, list(seg=seg, tp=tp)) } else { # convert to ppp, typically suppressing check mechanism xy <- as.ppp(xy, W=as.owin(L), check=check) } X <- lpp(xy, L) } } if(!is.null(marks)) marks(X) <- marks return(X) } as.ppp.lpp <- function(X, ..., fatal=TRUE) { verifyclass(X, "lpp", fatal=fatal) L <- X$domain Y <- as.ppp(coords(X, temporal=FALSE, local=FALSE), W=L$window, check=FALSE) if(!is.null(marx <- marks(X))) { if(is.hyperframe(marx)) marx <- as.data.frame(marx) marks(Y) <- marx } return(Y) } Window.lpp <- function(X, ...) { as.owin(X) } "Window<-.lpp" <- function(X, ..., check=TRUE, value) { if(check) { X <- X[value] } else { Window(X$domain, check=FALSE) <- value } return(X) } as.owin.lpp <- function(W, ..., fatal=TRUE) { as.owin(as.ppp(W, ..., fatal=fatal)) } domain.lpp <- function(X, ...) { as.linnet(X) } as.linnet.lpp <- function(X, ..., fatal=TRUE, sparse) { verifyclass(X, "lpp", fatal=fatal) L <- X$domain if(!missing(sparse)) L <- as.linnet(L, sparse=sparse) return(L) } unitname.lpp <- function(x) { u <- unitname(x$domain) return(u) } "unitname<-.lpp" <- function(x, value) { w <- x$domain unitname(w) <- value x$domain <- w return(x) } "marks<-.lpp" <- function(x, ..., value) { NextMethod("marks<-") } unmark.lpp <- function(X) { NextMethod("unmark") } as.psp.lpp <- function(x, ..., fatal=TRUE){ verifyclass(x, "lpp", fatal=fatal) return(x$domain$lines) } nsegments.lpp <- function(x) { return(x$domain$lines$n) } local2lpp <- function(L, seg, tp, X=NULL, df.only=FALSE) { stopifnot(inherits(L, "linnet")) if(is.null(X)) { # map to (x,y) Ldf <- as.data.frame(L$lines) dx <- with(Ldf, x1-x0) dy <- with(Ldf, y1-y0) x <- with(Ldf, x0[seg] + tp * dx[seg]) y <- with(Ldf, y0[seg] + tp * dy[seg]) } else { x <- X$x y <- X$y } # compile into data frame data <- data.frame(x=x, y=y, seg=seg, tp=tp) if(df.only) return(data) ctype <- c("s", "s", "l", "l") out <- ppx(data=data, domain=L, coord.type=ctype) class(out) <- c("lpp", class(out)) return(out) } #################################################### # subset extractor #################################################### "[.lpp" <- function (x, i, j, drop=FALSE, ..., snip=TRUE) { if(!missing(i) && !is.null(i)) { if(is.owin(i)) { # spatial domain: call code for 'j' xi <- x[,i,snip=snip] } else { # usual row-type index da <- x$data daij <- da[i, , drop=FALSE] xi <- ppx(data=daij, domain=x$domain, coord.type=as.character(x$ctype)) if(drop) xi <- xi[drop=TRUE] # call [.ppx to remove unused factor levels class(xi) <- c("lpp", class(xi)) } x <- xi } if(missing(j) || is.null(j)) return(x) stopifnot(is.owin(j)) w <- j L <- x$domain # Find vertices that lie inside 'w' vertinside <- inside.owin(L$vertices, w=w) from <- L$from to <- L$to if(snip) { ## For efficiency, first restrict network to relevant segments. ## Find segments EITHER OF whose endpoints lie in 'w' okedge <- vertinside[from] | vertinside[to] ## extract relevant subset of network graph x <- thinNetwork(x, retainedges=okedge) ## Now add vertices at crossing points with boundary of 'w' b <- crossing.psp(as.psp(L), edges(w)) x <- insertVertices(x, unique(b)) boundarypoints <- attr(x, "id") ## update data L <- x$domain from <- L$from to <- L$to vertinside <- inside.owin(L$vertices, w=w) vertinside[boundarypoints] <- TRUE } ## find segments whose endpoints BOTH lie in 'w' edgeinside <- vertinside[from] & vertinside[to] ## extract relevant subset of network xnew <- thinNetwork(x, retainedges=edgeinside) ## adjust window without checking Window(xnew, check=FALSE) <- w return(xnew) } #################################################### # affine transformations #################################################### scalardilate.lpp <- function(X, f, ...) { trap.extra.arguments(..., .Context="In scalardilate(X,f)") check.1.real(f, "In scalardilate(X,f)") stopifnot(is.finite(f) && f > 0) Y <- X Y$data$x <- f * as.numeric(X$data$x) Y$data$y <- f * as.numeric(X$data$y) Y$domain <- scalardilate(X$domain, f) return(Y) } affine.lpp <- function(X, mat=diag(c(1,1)), vec=c(0,0), ...) { verifyclass(X, "lpp") Y <- X Y$data[, c("x","y")] <- affinexy(X$data[, c("x","y")], mat=mat, vec=vec) Y$domain <- affine(X$domain, mat=mat, vec=vec, ...) return(Y) } shift.lpp <- function(X, vec=c(0,0), ..., origin=NULL) { verifyclass(X, "lpp") Y <- X Y$domain <- shift(X$domain, vec=vec, ..., origin=origin) vec <- getlastshift(Y$domain) Y$data[, c("x","y")] <- shiftxy(X$data[, c("x","y")], vec=vec) # tack on shift vector attr(Y, "lastshift") <- vec return(Y) } rotate.lpp <- function(X, angle=pi/2, ..., centre=NULL) { verifyclass(X, "lpp") if(!is.null(centre)) { X <- shift(X, origin=centre) negorigin <- getlastshift(X) } else negorigin <- NULL Y <- X Y$data[, c("x","y")] <- rotxy(X$data[, c("x","y")], angle=angle) Y$domain <- rotate(X$domain, angle=angle, ...) if(!is.null(negorigin)) Y <- shift(Y, -negorigin) return(Y) } rescale.lpp <- function(X, s, unitname) { if(missing(unitname)) unitname <- NULL if(missing(s)) s <- 1/unitname(X)$multiplier Y <- scalardilate(X, f=1/s) unitname(Y) <- rescale(unitname(X), s, unitname) return(Y) } superimpose.lpp <- function(..., L=NULL) { objects <- list(...) if(!is.null(L) && !inherits(L, "linnet")) stop("L should be a linear network") if(length(objects) == 0) { if(is.null(L)) return(NULL) emptyX <- lpp(list(x=numeric(0), y=numeric(0)), L) return(emptyX) } islpp <- unlist(lapply(objects, is.lpp)) if(is.null(L) && !any(islpp)) stop("Cannot determine linear network: no lpp objects given") nets <- unique(lapply(objects[islpp], as.linnet)) if(length(nets) > 1) stop("Point patterns are defined on different linear networks") if(!is.null(L)) { nets <- unique(append(nets, list(L))) if(length(nets) > 1) stop("Argument L is a different linear network") } L <- nets[[1L]] ## convert list(x,y) to linear network, etc if(any(!islpp)) objects[!islpp] <- lapply(objects[!islpp], lpp, L=L) ## concatenate coordinates locns <- do.call(rbind, lapply(objects, coords)) ## concatenate marks (or use names of arguments) marx <- superimposeMarks(objects, sapply(objects, npoints)) ## make combined pattern Y <- lpp(locns, L) marks(Y) <- marx return(Y) } # # interactive plot for lpp objects # iplot.lpp <- function(x, ..., xname) { if(missing(xname)) xname <- short.deparse(substitute(x)) stopifnot(is.lpp(x)) ## predigest L <- domain(x) v <- vertices(L) deg <- vertexdegree(L) dv <- textstring(v, txt=paste(deg)) y <- layered(lines=as.psp(L), vertices=v, degree=dv, points=as.ppp(x)) iplot(y, ..., xname=xname, visible=c(TRUE, FALSE, FALSE, TRUE)) } identify.lpp <- function(x, ...) { verifyclass(x, "lpp") P <- as.ppp(x) id <- identify(P$x, P$y, ...) if(!is.marked(x)) return(id) marks <- as.data.frame(P)[id, -(1:2)] out <- cbind(data.frame(id=id), marks) row.names(out) <- NULL return(out) } cut.lpp <- function(x, z=marks(x), ...) { if(missing(z) || is.null(z)) { z <- marks(x, dfok=TRUE) if(is.null(z)) stop("x has no marks to cut") } else { #' special objects if(inherits(z, "linim")) { z <- z[x, drop=FALSE] } else if(inherits(z, "linfun")) { z <- z(x) } else if(inherits(z, "lintess")) { z <- (as.linfun(z))(x) } } #' standard data types if(is.character(z)) { if(length(z) == npoints(x)) { # interpret as a factor z <- factor(z) } else if((length(z) == 1) && (z %in% colnames(df <- as.data.frame(x)))) { # interpret as the name of a column of marks or a coordinate zname <- z z <- df[, zname] if(zname == "seg") z <- factor(z) } else stop("format of argument z not understood") } if(is.factor(z) || is.vector(z)) { stopifnot(length(z) == npoints(x)) g <- if(is.factor(z)) z else if(is.numeric(z)) cut(z, ...) else factor(z) marks(x) <- g return(x) } if(is.data.frame(z) || is.matrix(z)) { stopifnot(nrow(z) == npoints(x)) # take first column z <- z[,1L] g <- if(is.numeric(z)) cut(z, ...) else factor(z) marks(x) <- g return(x) } stop("Format of z not understood") } points.lpp <- function(x, ...) { points(coords(x, spatial=TRUE, local=FALSE), ...) } connected.lpp <- function(X, R=Inf, ..., dismantle=TRUE) { if(!dismantle) { if(is.infinite(R)) { Y <- X %mark% factor(1) attr(Y, "retainpoints") <- attr(X, "retainpoints") return(Y) } check.1.real(R) stopifnot(R >= 0) nv <- npoints(X) close <- (pairdist(X) <= R) diag(close) <- FALSE ij <- which(close, arr.ind=TRUE) ie <- ij[,1] - 1L je <- ij[,2] - 1L ne <- length(ie) zz <- .C("cocoGraph", nv=as.integer(nv), ne=as.integer(ne), ie=as.integer(ie), je=as.integer(je), label=as.integer(integer(nv)), status=as.integer(integer(1L)), PACKAGE = "spatstat") if(zz$status != 0) stop("Internal error: connected.ppp did not converge") lab <- zz$label + 1L # Renumber labels sequentially lab <- as.integer(factor(lab)) # Convert labels to factor lab <- factor(lab) # Apply to points Y <- X %mark% lab attr(Y, "retainpoints") <- attr(X, "retainpoints") return(Y) } # first break the *network* into connected components L <- domain(X) lab <- connected(L, what="labels") if(length(levels(lab)) == 1) { XX <- solist(X) } else { subsets <- split(seq_len(nvertices(L)), lab) XX <- solist() for(i in seq_along(subsets)) XX[[i]] <- thinNetwork(X, retainvertices=subsets[[i]]) } # now find R-connected components in each dismantled piece YY <- solapply(XX, connected.lpp, R=R, dismantle=FALSE) if(length(YY) == 1) YY <- YY[[1]] return(YY) } text.lpp <- function(x, ...) { co <- coords(x) graphics::text.default(x=co$x, y=co$y, ...) } spatstat/R/pickoption.R0000755000176200001440000000247513115271120014624 0ustar liggesusers# # pickoption.R # # $Revision: 1.6 $ $Date: 2016/04/25 02:34:40 $ # pickoption <- function(what="option", key, keymap, ..., exact=FALSE, list.on.err=TRUE, die=TRUE, multi=FALSE, allow.all=TRUE) { keyname <- short.deparse(substitute(key)) if(!is.character(key)) stop(paste(keyname, "must be a character string", if(multi) "or strings" else NULL)) if(length(key) == 0) stop(paste("Argument", sQuote(keyname), "has length zero")) key <- unique(key) if(!multi && length(key) > 1) stop(paste("Must specify only one", what, sQuote(keyname))) allow.all <- allow.all && multi id <- if(allow.all && identical(key, "all")) { seq_along(keymap) } else if(exact) { match(key, names(keymap), nomatch=NA) } else { pmatch(key, names(keymap), nomatch=NA) } if(any(nbg <- is.na(id))) { # no match whinge <- paste("unrecognised", what, paste(dQuote(key[nbg]), collapse=", "), "in argument", sQuote(keyname)) if(list.on.err) { cat(paste(whinge, "\n", "Options are:"), paste(dQuote(names(keymap)), collapse=","), "\n") } if(die) stop(whinge, call.=FALSE) else return(NULL) } key <- keymap[id] names(key) <- NULL return(key) } spatstat/R/multipair.util.R0000755000176200001440000000173013115271120015420 0ustar liggesusers## ## ## multipair.util.R ## ## $Revision: 1.13 $ $Date: 2014/04/29 01:13:35 $ ## ## Utilities for multitype pairwise interactions ## ## ------------------------------------------------------------------- ## MultiPair.checkmatrix <- function(mat, n, matname, naok=TRUE, zerook=TRUE, asymmok=FALSE) { if(missing(matname)) matname <- short.deparse(substitute(mat)) if(!is.matrix(mat)) stop(paste(matname, "must be a matrix")) if(any(dim(mat) != rep.int(n,2))) stop(paste(matname, "must be a square matrix,", "of size", n, "x", n)) isna <- is.na(mat) if(!naok && any(isna)) stop(paste("NA entries not allowed in", matname)) if(any(mat[!isna] < 0)) stop(paste("Negative entries not allowed in", matname)) if(!zerook && any(mat[!isna] == 0)) stop(paste("Zero entries not allowed in", matname)) if(!asymmok && !isSymmetric(mat)) stop(paste(matname, "must be a symmetric matrix")) } spatstat/vignettes/0000755000176200001440000000000013166361210014124 5ustar liggesusersspatstat/vignettes/datasets.Rnw0000644000176200001440000006364313161127550016442 0ustar liggesusers\documentclass[11pt]{article} % \VignetteIndexEntry{Datasets Provided for the Spatstat Package} <>= options(SweaveHooks=list(fig=function() par(mar=c(1,1,1,1)))) @ \usepackage{graphicx} \usepackage{anysize} \marginsize{2cm}{2cm}{2cm}{2cm} \newcommand{\pkg}[1]{\texttt{#1}} \newcommand{\bold}[1]{{\textbf {#1}}} \newcommand{\R}{{\sf R}} \newcommand{\spst}{\pkg{spatstat}} \newcommand{\Spst}{\pkg{Spatstat}} \newcommand{\sdat}{\pkg{spatstat.data}} \newcommand{\Sdat}{\pkg{Spatstat.data}} \begin{document} \bibliographystyle{plain} \thispagestyle{empty} \SweaveOpts{eps=TRUE} \setkeys{Gin}{width=0.6\textwidth} <>= library(spatstat) sdate <- read.dcf(file = system.file("DESCRIPTION", package = "spatstat"), fields = "Date") sversion <- read.dcf(file = system.file("DESCRIPTION", package = "spatstat"), fields = "Version") spatstat.options(transparent=FALSE) options(useFancyQuotes=FALSE) @ \title{Datasets provided for \spst} \author{Adrian Baddeley, Rolf Turner and Ege Rubak} \date{For \spst\ version \texttt{\Sexpr{sversion}}} \maketitle This document is an overview of the spatial datasets that are provided for the \spst\ package. To flick through a nice display of all the data sets that come with \spst\ type \texttt{demo(data)}. To see information about a given data set, type \texttt{help({\em name})} where \emph{name} is the name of the data set. To plot a given data set, type \texttt{plot({\em name})}. Datasets in \spst\ are ``lazy-loaded'', which means that they can be accessed simply by typing their name. Not all packages do this; in some packages you have to type \texttt{data({\em name})} in order to access a data set. To list all the datasets in \spst, you need to type \texttt{data(package="spatstat.data")}. This is because, for efficiency, the datasets are actually installed in a sub-package \sdat. This is the only time you should ever need to mention \sdat\ explicitly. When the \spst\ package is loaded by the command \texttt{library(spatstat)}, the sub-package \sdat\ is automatically loaded. \section{List of datasets} \subsection{Point patterns in 2D} Here is a list of the standard point pattern data sets that are supplied with the current installation of \sdat: \newcommand{\recto}{\framebox{\hphantom{re}\vphantom{re}}} \newcommand{\irregpoly}{\includegraphics*[width=6mm]{irregpoly}} \newcommand{\convpoly}{\includegraphics*[width=4mm]{hexagon}} \newcommand{\disc}{$\bigcirc$} \newcommand{\nomarks}{$\cdot$} \newcommand{\nocov}{$\cdot$} \begin{tabular}{l|l|ccc} {\sf name} & {\sf description} & {\sf marks} & {\sf covariates} & {\sf window} \\ \hline {\tt amacrine} & rabbit amacrine cells & cell type & \nocov & \recto \\ {\tt anemones} & sea anemones & diameter & \nocov & \recto \\ {\tt ants} & ant nests& species & zones & \convpoly \\ {\tt bdspots} & breakdown spots & \nomarks & \nocov & \disc \\ {\tt bei} & rainforest trees & \nomarks & topography & \recto \\ {\tt betacells} & cat retinal ganglia & cell type, area & \nocov & \recto \\ {\tt bramblecanes} & bramble canes & age & \nocov & \recto \\ {\tt bronzefilter} & bronze particles & diameter & \nocov & \recto \\ {\tt cells} & biological cells & \nomarks &\nocov & \recto \\ {\tt chorley} & cancers & case/control &\nocov & \irregpoly \\ {\tt clmfires} & forest fires & cause, size, date & \shortstack[c]{elevation, orientation,\\ slope, land use} & \irregpoly \\ {\tt copper} & copper deposits & \nomarks & fault lines & \recto \\ {\tt demopat} & artificial data & type & \nocov & \irregpoly \\ {\tt finpines} & trees & diam, height & \nocov & \recto \\ {\tt gordon} & people in a park & \nomarks & \nocov & \irregpoly \\ {\tt gorillas} & gorilla nest sites & group, season & \shortstack[c]{terrain, vegetation,\\ heat, water} & \irregpoly \\ {\tt hamster} & hamster tumour cells & cell type &\nocov & \recto \\ {\tt humberside} & child leukaemia & case/control & \nocov & \irregpoly\\ {\tt hyytiala} & mixed forest & species &\nocov & \recto \\ {\tt japanesepines} & Japanese pines & \nomarks &\nocov & \recto \\ {\tt lansing} & mixed forest & species & \nocov & \recto \\ {\tt longleaf} & trees & diameter & \nocov & \recto \\ {\tt mucosa} & gastric mucosa cells & cell type & \nocov & \recto \\ {\tt murchison} & gold deposits & \nomarks & faults, rock type & \irregpoly \\ {\tt nbfires} & wildfires & several & \nocov & \irregpoly \\ {\tt nztrees} & trees & \nomarks & \nocov & \recto \\ {\tt paracou} & trees & adult/juvenile & \nocov & \recto \\ {\tt ponderosa} & trees & \nomarks & \nocov & \recto \\ {\tt redwood} & saplings & \nomarks & \nocov & \recto \\ {\tt redwood3} & saplings & \nomarks & \nocov & \recto \\ {\tt redwoodfull} & saplings & \nomarks & zones & \recto \\ {\tt shapley} & galaxies & magnitude, recession, SE & \nocov & \convpoly \\ {\tt simdat} & simulated pattern & \nomarks & \nocov & \recto \\ {\tt sporophores} & fungi & species & \nocov & \disc \\ {\tt spruces} & trees & diameter & \nocov & \recto \\ {\tt swedishpines} & trees & \nomarks & \nocov & \recto \\ {\tt urkiola} & mixed forest & species & \nocov & \irregpoly \\ {\tt vesicles} & synaptic vesicles & \nomarks & zones & \irregpoly \\ {\tt waka} & trees & diameter & \nocov & \recto \\ \hline \end{tabular} \bigskip \noindent The shape of the window containing the point pattern is indicated by the symbols \recto\ (rectangle), \disc\ (disc), \convpoly\ (convex polygon) and \irregpoly\ (irregular polygon). Additional information about the data set \texttt{\em name} may be stored in a separate list \texttt{{\em name}.extra}. Currently these are the available options: \begin{tabular}[!h]{ll} {\sc Name} & {\sc Contents} \\ \hline {\tt ants.extra} & field and scrub subregions; \\ & additional map elements; plotting function \\ {\tt bei.extra} & covariate images \\ {\tt chorley.extra} & incinerator location; plotting function \\ {\tt gorillas.extra} & covariate images\\ {\tt nbfires.extra} & inscribed rectangle; border type labels \\ {\tt ponderosa.extra} & data points of interest; plotting function\\ {\tt redwoodfull.extra} & subregions; plotting function \\ {\tt shapley.extra} & individual survey fields; plotting function \\ {\tt vesicles.extra} & anatomical regions \\ \hline \end{tabular} For demonstration and instruction purposes, raw data files are available for the datasets \texttt{vesicles}, \texttt{gorillas} and \texttt{osteo}. \subsection{Other Data Types} There are also the following spatial data sets which are not 2D point patterns: \begin{tabular}[c]{l|l|l} {\sf name} & {\sf description} & {\sf format} \\ \hline {\tt austates} & Australian states & tessellation \\ {\tt chicago} & crimes & point pattern on linear network \\ {\tt dendrite} & dendritic spines & point pattern on linear network \\ {\tt spiders} & spider webs & point pattern on linear network \\ {\tt flu} & virus proteins & replicated 2D point patterns \\ {\tt heather} & heather mosaic & binary image (three versions) \\ {\tt demohyper} & simulated data & replicated 2D point patterns with covariates\\ {\tt osteo} & osteocyte lacunae & replicated 3D point patterns with covariates\\ {\tt pyramidal} & pyramidal neurons & replicated 2D point patterns in 3 groups\\ {\tt residualspaper} & data \& code from Baddeley et al (2005) & 2D point patterns, \R\ function \\ {\tt simba} & simulated data & replicated 2D point patterns in 2 groups\\ {\tt waterstriders} & insects on water & replicated 2D point patterns\\ \hline \end{tabular} Additionally there is a dataset \texttt{Kovesi} containing several colour maps with perceptually uniform contrast. \section{Information on each dataset} Here we give basic information about each dataset. For further information, consult the help file for the particular dataset. <>= opa <- par() ## How to set all margins to zero and eliminate all outer spaces zeromargins <- function() { par( mar=rep(0,4), omd=c(0,1,0,1), xaxs="i", yaxs="i" ) invisible(NULL) } ## Set 'mar' setmargins <- function(...) { x <- c(...) x <- rep(x, 4)[1:4] par(mar=x) invisible(NULL) } @ \subsubsection*{\texttt{amacrine}: Amacrine cells} Locations of displaced amacrine cells in the retina of a rabbit. There are two types of points, ``on'' and ``off''. \SweaveOpts{width=5.5,height=3}\setkeys{Gin}{width=0.8\textwidth} <>= plot(amacrine) @ <>= setmargins(0,1,2,0) plot(amacrine) @ \subsubsection*{\texttt{anemones}: Sea Anemones} These data give the spatial locations and diameters of sea anemones on a boulder near sea level. \SweaveOpts{width=7,height=4.5}\setkeys{Gin}{width=0.8\textwidth} <>= plot(anemones, markscale=1) @ <>= setmargins(0,0,2,0) plot(anemones, markscale=1) @ \subsubsection*{\texttt{ants}: Ants' nests} Spatial locations of nests of two species of ants at a site in Greece. The full dataset (supplied here) has an irregular polygonal boundary, while most analyses have been confined to two rectangular subsets of the pattern (also supplied here). % Parameters for Ants data with key at right \SweaveOpts{width=6.3,height=4}\setkeys{Gin}{width=0.7\textwidth} <>= ants.extra$plotit() @ %$ <>= setmargins(0,0,1,0) ants.extra$plotit() @ %$ \subsubsection*{\texttt{austates}: Australian states} The states and large mainland territories of Australia are represented as polygonal regions forming a tessellation. <>= plot(austates) @ \subsubsection*{\texttt{bdspots}: Breakdown spots} A list of three point patterns, each giving the locations of electrical breakdown spots on a circular electrode in a microelectronic capacitor. \SweaveOpts{width=12,height=6}\setkeys{Gin}{width=\textwidth} <>= plot(bdspots, equal.scales=TRUE, pch="+", panel.args=function(i)list(cex=c(0.15, 0.2, 0.7)[i])) @ <>= zeromargins() plot(bdspots, equal.scales=TRUE, pch="+", main="", mar.panel=0, hsep=1, panel.args=function(i)list(cex=c(0.15, 0.2, 0.7)[i])) @ \subsubsection*{\texttt{bei}: Beilschmiedia data} Locations of 3605 trees in a tropical rain forest. Accompanied by covariate data giving the elevation (altitude) and slope of elevation in the study region. \SweaveOpts{width=12,height=6}\setkeys{Gin}{width=0.8\textwidth} <>= plot(bei.extra$elev, main="Beilschmiedia") plot(bei, add=TRUE, pch=16, cex=0.3) @ <>= setmargins(0,0,2,0) plot(bei.extra$elev, main="Beilschmiedia") plot(bei, add=TRUE, pch=16, cex=0.3) @ <>= M <- persp(bei.extra$elev, theta=-45, phi=18, expand=7, border=NA, apron=TRUE, shade=0.3, box=FALSE, visible=TRUE, main="") perspPoints(bei, Z=bei.extra$elev, M=M, pch=16, cex=0.3) @ \subsubsection*{\texttt{betacells}: Beta ganglion cells} Locations of beta ganglion cells in cat retina, each cell classified as `on' or `off' and also labelled with the cell profile area. <>= plot(betacells) @ \subsubsection*{\texttt{bramblecanes}: Bramble canes} <>= plot(bramblecanes, cols=1:3) @ <>= plot(split(bramblecanes)) @ \subsubsection*{\texttt{bronzefilter}: Bronze filter section profiles} Spatially inhomogeneous pattern of circular section profiles of particles, observed in a longitudinal plane section through a gradient sinter filter made from bronze powder. <>= plot(bronzefilter,markscale=2) @ \subsubsection*{\texttt{cells}: Biological cells} Locations of the centres of 42 biological cells observed under optical microscopy in a histological section. Often used as a demonstration example. <>= plot(cells) @ \subsubsection*{\texttt{chicago}: Chicago crimes} Locations (street addresses) of crimes reported in a two-week period in an area close to the University of Chicago. A multitype point pattern on a linear network. <>= plot(chicago, main="Chicago Crimes", col="grey", cols=c("red", "blue", "black", "blue", "red", "blue", "blue"), chars=c(16,2,22,17,24,15,6), leg.side="left", show.window=FALSE) @ \subsubsection*{\texttt{chorley}: Chorley-Ribble cancer data} Spatial locations of cases of cancer of the larynx and cancer of the lung, and the location of a disused industrial incinerator. A marked point pattern, with an irregular window and a simple covariate. <>= chorley.extra$plotit() @ %$ \subsubsection*{\texttt{clmfires}: Castilla-La Mancha Fires} Forest fires in the Castilla-La Mancha region of Spain between 1998 and 2007. A point pattern with 4 columns of marks: \begin{tabular}{ll} \texttt{cause} & cause of fire\\ \texttt{burnt.area} & total area burned, in hectares \\ \texttt{date} & date of fire \\ \texttt{julian.date} & date of fire in days since 1.1.1998 \end{tabular} <>= plot(clmfires, which.marks="cause", cols=2:5, cex=0.25, main="Castilla-La Mancha forest fires") @ The accompanying dataset \texttt{clmfires.extra} is a list of two items \texttt{clmcov100} and \texttt{clmcov200} containing covariate information for the entire Castilla-La Mancha region. Each of these two elements is a list of four pixel images named \texttt{elevation}, \texttt{orientation}, \texttt{slope} and \texttt{landuse}. <>= plot(clmfires.extra$clmcov200, main="Covariates for forest fires") @ %$ \subsubsection*{\texttt{copper}: Queensland copper data} These data come from an intensive geological survey in central Queensland, Australia. They consist of 67 points representing copper ore deposits, and 146 line segments representing geological `lineaments', mostly faults. <>= plot(copper$Points, main="Copper") plot(copper$Lines, add=TRUE) @ \subsubsection*{\texttt{demohyper}} A synthetic example of a \texttt{hyperframe} for demonstration purposes. <>= plot(demohyper, quote({ plot(Image, main=""); plot(Points, add=TRUE) }), parargs=list(mar=rep(1,4))) @ \subsubsection*{\texttt{demopat}} A synthetic example of a point pattern for demonstration purposes. <>= plot(demopat) @ \subsubsection*{\texttt{dendrite}} Dendrites are branching filaments which extend from the main body of a neuron (nerve cell) to propagate electrochemical signals. Spines are small protrusions on the dendrites. This dataset gives the locations of 566 spines observed on one branch of the dendritic tree of a rat neuron. The spines are classified according to their shape into three types: mushroom, stubby or thin. <>= plot(dendrite, leg.side="bottom", main="", cex=0.75, cols=2:4) @ \subsubsection*{\texttt{finpines}: Finnish pine saplings} Locations of 126 pine saplings in a Finnish forest, their heights and their diameters. <>= plot(finpines, main="Finnish pines") @ \subsubsection*{\texttt{flu}: Influenza virus proteins} The \texttt{flu} dataset contains replicated spatial point patterns giving the locations of two different virus proteins on the membranes of cells infected with influenza virus. It is a \texttt{hyperframe} containing point patterns and explanatory variables. <>= wildM1 <- with(flu, virustype == "wt" & stain == "M2-M1") plot(flu[wildM1, 1, drop=TRUE], main=c("flu data", "wild type virus, M2-M1 stain"), chars=c(16,3), cex=0.4, cols=2:3) @ \subsubsection*{\texttt{gordon}: People in Gordon Square} Locations of people sitting on a grass patch on a sunny afternoon. <>= plot(gordon, main="People in Gordon Square", pch=16) @ \subsubsection*{\texttt{gorillas}: Gorilla nesting sites} Locations of nesting sites of gorillas, and associated covariates, in a National Park in Cameroon. \texttt{gorillas} is a marked point pattern (object of class \texttt{"ppp"}) representing nest site locations. \texttt{gorillas.extra} is a named list of 7 pixel images (objects of class \texttt{"im"}) containing spatial covariates. It also belongs to the class \texttt{"listof"}. <>= plot(gorillas, which.marks=1, chars=c(1,3), cols=2:3, main="Gorilla nest sites") @ The \texttt{vegetation} covariate is also available as a raw ASCII format file, <>= system.file("rawdata/gorillas/vegetation.asc", package="spatstat") @ \subsubsection*{\texttt{hamster}: Hamster kidney cells} Cell nuclei in hamster kidney, each nucleus classified as either `dividing' or `pyknotic'. A multitype point pattern. <>= plot(hamster, cols=c(2,4)) @ \subsubsection*{\texttt{heather}: Heather mosaic} The spatial mosaic of vegetation of the heather plant, recorded in a 10 by 20 metre sampling plot in Sweden. A list with three entries, representing the same data at different spatial resolutions. <>= plot(heather) @ \subsubsection*{\texttt{humberside}: Childhood Leukemia and Lymphoma} Spatial locations of cases of childhood leukaemia and lymphoma, and randomly-selected controls, in North Humberside. A marked point pattern. <>= plot(humberside) @ The dataset \texttt{humberside.convex} is an object of the same format, representing the same point pattern data, but contained in a larger, 5-sided convex polygon. \subsubsection*{\texttt{hyytiala}: Mixed forest} Spatial locations and species classification for trees in a Finnish forest. <>= plot(hyytiala, cols=2:5) @ \subsubsection*{\texttt{japanesepines}: Japanese black pine saplings} Locations of Japanese black pine saplings in a square sampling region in a natural forest. Often used as a standard example. <>= plot(japanesepines) @ \subsubsection*{\texttt{lansing}: Lansing Woods} Locations and botanical classification of trees in a forest. A multitype point pattern with 6 different types of points. Includes duplicated points. <>= plot(lansing) @ <>= plot(split(lansing)) @ \subsubsection*{\texttt{longleaf}: Longleaf Pines} Locations and diameters of Longleaf pine trees. <>= plot(longleaf) @ \subsubsection*{\texttt{mucosa}: Gastric Mucosa Cells} A bivariate inhomogeneous point pattern, giving the locations of the centres of two types of cells in a cross-section of the gastric mucosa of a rat. <>= plot(mucosa, chars=c(1,3), cols=c("red", "green")) plot(mucosa.subwin, add=TRUE, lty=3) @ \subsubsection*{\texttt{murchison}: Murchison Gold Deposits} Spatial locations of gold deposits and associated geological features in the Murchison area of Western Australia. A list of three elements: \begin{itemize} \item \texttt{gold}, the point pattern of gold deposits; \item \texttt{faults}, the line segment pattern of geological faults; \item \texttt{greenstone}, the subregion of greenstone outcrop. \end{itemize} <>= plot(murchison$greenstone, main="Murchison data", col="lightgreen") plot(murchison$gold, add=TRUE, pch=3, col="blue") plot(murchison$faults, add=TRUE, col="red") @ \subsubsection*{\texttt{nbfires}: New Brunswick Fires} Fires in New Brunswick (Canada) with marks giving information about each fire. <>= plot(nbfires, use.marks=FALSE, pch=".") @ <>= plot(split(nbfires), use.marks=FALSE, chars=".") @ <>= par(mar=c(0,0,2,0)) plot(split(nbfires)$"2000", which.marks="fire.type", main=c("New Brunswick fires 2000", "by fire type"), cols=c("blue", "green", "red", "cyan"), leg.side="left") @ \subsubsection*{\texttt{nztrees}: New Zealand Trees} Locations of trees in a forest plot in New Zealand. Often used as a demonstration example. <>= plot(nztrees) plot(trim.rectangle(as.owin(nztrees), c(0,5), 0), add=TRUE, lty=3) @ \subsubsection*{\texttt{osteo}: Osteocyte Lacunae} Replicated three-dimensional point patterns: the three-dimensional locations of osteocyte lacunae observed in rectangular volumes of solid bone using a confocal microscope. A \texttt{hyperframe} containing 3D point patterns and explanatory variables. <>= plot(osteo[1:10,], main.panel="", pch=21, bg='white') @ For demonstration and instruction purposes, the raw data from the 36th point pattern are available in a plain ascii file in the \texttt{spatstat} installation, <>= system.file("rawdata/osteo/osteo36.txt", package="spatstat") @ \subsubsection*{\texttt{paracou}: Kimboto trees} Point pattern of adult and juvenile Kimboto trees recorded at Paracou in French Guiana. A bivariate point pattern. <>= plot(paracou, cols=2:3, chars=c(16,3)) @ \subsubsection*{\texttt{ponderosa}: Ponderosa Pines} Locations of Ponderosa Pine trees in a forest. Several special points are identified. <>= ponderosa.extra$plotit() @ %$ \subsubsection*{\texttt{pyramidal}: Pyramidal Neurons in Brain} Locations of pyramidal neurons in sections of human brain. There is one point pattern from each of 31 human subjects. The subjects are divided into three groups: controls (12 subjects), schizoaffective (9 subjects) and schizophrenic (10 subjects). <>= pyr <- pyramidal pyr$grp <- abbreviate(pyramidal$group, minlength=7) plot(pyr, quote(plot(Neurons, pch=16, main=grp)), main="Pyramidal Neurons") @ \subsubsection*{\texttt{redwood}, \texttt{redwood3}, \texttt{redwoodfull}: Redwood seedlings and saplings} California Redwood seedlings and saplings in a forest. There are two versions of this dataset: \texttt{redwood} and \texttt{redwoodfull}. The \texttt{redwoodfull} dataset is the full data. It is spatially inhomogeneous in density and spacing of points. The \texttt{redwood} dataset is a subset of the full data, selected because it is apparently homogeneous, and has often been used as a demonstration example. This comes in two versions commonly used in the literature: \texttt{redwood} (coordinates given to 2 decimal places) and \texttt{redwood3} (coordinates given to 3 decimal places). <>= plot(redwood) plot(redwood3, add=TRUE, pch=20) @ <>= redwoodfull.extra$plotit() @ %$ \subsubsection*{\texttt{residualspaper}: Data from residuals paper} Contains the point patterns used as examples in \begin{quote} A. Baddeley, R. Turner, J. M{\o}ller and M. Hazelton (2005) Residual analysis for spatial point processes. \emph{Journal of the Royal Statistical Society, Series B} \textbf{67}, 617--666 \end{quote} along with {\sf R} code. <>= plot(as.listof(residualspaper[c("Fig1", "Fig4a", "Fig4b", "Fig4c")]), main="") @ \subsubsection*{\texttt{shapley}: Shapley Galaxy Concentration} Sky positions of 4215 galaxies in the Shapley Supercluster (mapped by radioastronomy). <>= shapley.extra$plotit(main="Shapley") @ %$ \subsubsection*{\texttt{simdat}: Simulated data} Another simulated dataset used for demonstration purposes. <>= plot(simdat) @ \subsubsection*{\texttt{spiders}: Spider webs} Spider webs across the mortar lines of a brick wall. A point pattern on a linear network. <>= plot(spiders, pch=16, show.window=FALSE) @ \subsubsection*{\texttt{sporophores}: Sporophores} Sporophores of three species of fungi around a tree. <>= plot(sporophores, chars=c(16,1,2), cex=0.6) points(0,0,pch=16, cex=2) text(15,8,"Tree", cex=0.75) @ \subsubsection*{\texttt{spruces}: Spruces in Saxony} Locations of Norwegian spruce trees in a natural forest stand in Saxonia, Germany. Each tree is marked with its diameter at breast height. <>= plot(spruces, maxsize=min(nndist(spruces))) @ \subsubsection*{\texttt{swedishpines}: Swedish Pines} Locations of pine saplings in a Swedish forest. Often used as a demonstration example. <>= plot(swedishpines) @ \subsubsection*{\texttt{urkiola}: trees in a wood} Locations of birch and oak trees in a secondary wood in Urkiola Natural Park (Basque country, northern Spain). Irregular window, bivariate point pattern. <>= plot(urkiola, cex=0.5, cols=2:3) @ \subsubsection*{\texttt{waka}: trees in Waka National Park} Spatial coordinates of each tree, marked by the tree diameter at breast height. <>= par(mar=c(0,0,2,0)) plot(waka, markscale=0.04, main=c("Waka national park", "tree diameters")) @ \subsubsection*{\texttt{vesicles}: synaptic vesicles} Point pattern of synaptic vesicles observed in rat brain tissue. <>= v <- rotate(vesicles, pi/2) ve <- lapply(vesicles.extra, rotate, pi/2) plot(v, main="Vesicles") plot(ve$activezone, add=TRUE, lwd=3) @ The auxiliary dataset \texttt{vesicles.extra} is a list with entries\\ \begin{tabular}{ll} \texttt{presynapse} & outer polygonal boundary of presynapse \\ \texttt{mitochondria} & polygonal boundary of mitochondria \\ \texttt{mask} & binary mask representation of vesicles window \\ \texttt{activezone} & line segment pattern representing the active zone. \end{tabular} For demonstration and training purposes, the raw data files for this dataset are also provided in the \pkg{spatstat} package installation:\\ \begin{tabular}{ll} \texttt{vesicles.txt} & spatial locations of vesicles \\ \texttt{presynapse.txt} & vertices of \texttt{presynapse} \\ \texttt{mitochondria.txt} & vertices of \texttt{mitochondria} \\ \texttt{vesiclesimage.tif} & greyscale microscope image \\ \texttt{vesiclesmask.tif} & binary image of \texttt{mask} \\ \texttt{activezone.txt} & coordinates of \texttt{activezone} \end{tabular} The files are in the folder \texttt{rawdata/vesicles} in the \texttt{spatstat} installation directory. The precise location of the files can be obtained using \texttt{system.file}, for example <>= system.file("rawdata/vesicles/mitochondria.txt", package="spatstat") @ \subsubsection*{\texttt{waterstriders}: Insects on a pond} Three independent replications of a point pattern formed by insects on the surface of a pond. <>= plot(waterstriders) @ \end{document} spatstat/vignettes/irregpoly.eps0000755000176200001440000000646113115273007016663 0ustar liggesusers%!PS-Adobe-2.0 EPSF-2.0 %%Title: irregpoly.fig %%Creator: fig2dev Version 3.2 Patchlevel 5a %%CreationDate: Tue Nov 23 11:04:01 2010 %%BoundingBox: 0 0 226 144 %Magnification: 1.0000 %%EndComments %%BeginProlog /$F2psDict 200 dict def $F2psDict begin $F2psDict /mtrx matrix put /col-1 {0 setgray} bind def /col0 {0.000 0.000 0.000 srgb} bind def /col1 {0.000 0.000 1.000 srgb} bind def /col2 {0.000 1.000 0.000 srgb} bind def /col3 {0.000 1.000 1.000 srgb} bind def /col4 {1.000 0.000 0.000 srgb} bind def /col5 {1.000 0.000 1.000 srgb} bind def /col6 {1.000 1.000 0.000 srgb} bind def /col7 {1.000 1.000 1.000 srgb} bind def /col8 {0.000 0.000 0.560 srgb} bind def /col9 {0.000 0.000 0.690 srgb} bind def /col10 {0.000 0.000 0.820 srgb} bind def /col11 {0.530 0.810 1.000 srgb} bind def /col12 {0.000 0.560 0.000 srgb} bind def /col13 {0.000 0.690 0.000 srgb} bind def /col14 {0.000 0.820 0.000 srgb} bind def /col15 {0.000 0.560 0.560 srgb} bind def /col16 {0.000 0.690 0.690 srgb} bind def /col17 {0.000 0.820 0.820 srgb} bind def /col18 {0.560 0.000 0.000 srgb} bind def /col19 {0.690 0.000 0.000 srgb} bind def /col20 {0.820 0.000 0.000 srgb} bind def /col21 {0.560 0.000 0.560 srgb} bind def /col22 {0.690 0.000 0.690 srgb} bind def /col23 {0.820 0.000 0.820 srgb} bind def /col24 {0.500 0.190 0.000 srgb} bind def /col25 {0.630 0.250 0.000 srgb} bind def /col26 {0.750 0.380 0.000 srgb} bind def /col27 {1.000 0.500 0.500 srgb} bind def /col28 {1.000 0.630 0.630 srgb} bind def /col29 {1.000 0.750 0.750 srgb} bind def /col30 {1.000 0.880 0.880 srgb} bind def /col31 {1.000 0.840 0.000 srgb} bind def end /cp {closepath} bind def /ef {eofill} bind def /gr {grestore} bind def /gs {gsave} bind def /sa {save} bind def /rs {restore} bind def /l {lineto} bind def /m {moveto} bind def /rm {rmoveto} bind def /n {newpath} bind def /s {stroke} bind def /sh {show} bind def /slc {setlinecap} bind def /slj {setlinejoin} bind def /slw {setlinewidth} bind def /srgb {setrgbcolor} bind def /rot {rotate} bind def /sc {scale} bind def /sd {setdash} bind def /ff {findfont} bind def /sf {setfont} bind def /scf {scalefont} bind def /sw {stringwidth} bind def /tr {translate} bind def /tnt {dup dup currentrgbcolor 4 -2 roll dup 1 exch sub 3 -1 roll mul add 4 -2 roll dup 1 exch sub 3 -1 roll mul add 4 -2 roll dup 1 exch sub 3 -1 roll mul add srgb} bind def /shd {dup dup currentrgbcolor 4 -2 roll mul 4 -2 roll mul 4 -2 roll mul srgb} bind def /$F2psBegin {$F2psDict begin /$F2psEnteredState save def} def /$F2psEnd {$F2psEnteredState restore end} def /pageheader { save newpath 0 144 moveto 0 0 lineto 226 0 lineto 226 144 lineto closepath clip newpath -3.6 146.6 translate 1 -1 scale $F2psBegin 10 setmiterlimit 0 slj 0 slc 0.06299 0.06299 sc } bind def /pagefooter { $F2psEnd restore } bind def %%EndProlog pageheader % % Fig objects follow % % % here starts figure with depth 50 % Polyline 0 slj 0 slc 30.000 slw n 945 180 m 1170 1035 l 225 315 l 135 405 l 90 1215 l 675 1350 l 675 1665 l 135 1755 l 180 2205 l 990 2295 l 1260 1350 l 1530 1440 l 1440 2205 l 2250 2115 l 1890 1350 l 2520 1305 l 2250 1530 l 2475 2250 l 3330 2250 l 3330 1575 l 2790 1530 l 3600 1260 l 3465 720 l 2790 810 l 2475 765 l 3465 585 l 3510 360 l 2430 90 l 2115 225 l 2070 630 l 1800 945 l 1935 135 l 990 225 l gs col0 s gr % here ends figure; pagefooter showpage %%Trailer %EOF spatstat/vignettes/hexagon.eps0000755000176200001440000000577013115273007016302 0ustar liggesusers%!PS-Adobe-2.0 EPSF-2.0 %%Title: hexagon.fig %%Creator: fig2dev Version 3.2 Patchlevel 5a %%CreationDate: Tue Nov 23 11:04:35 2010 %%BoundingBox: 0 0 98 98 %Magnification: 1.0000 %%EndComments %%BeginProlog /$F2psDict 200 dict def $F2psDict begin $F2psDict /mtrx matrix put /col-1 {0 setgray} bind def /col0 {0.000 0.000 0.000 srgb} bind def /col1 {0.000 0.000 1.000 srgb} bind def /col2 {0.000 1.000 0.000 srgb} bind def /col3 {0.000 1.000 1.000 srgb} bind def /col4 {1.000 0.000 0.000 srgb} bind def /col5 {1.000 0.000 1.000 srgb} bind def /col6 {1.000 1.000 0.000 srgb} bind def /col7 {1.000 1.000 1.000 srgb} bind def /col8 {0.000 0.000 0.560 srgb} bind def /col9 {0.000 0.000 0.690 srgb} bind def /col10 {0.000 0.000 0.820 srgb} bind def /col11 {0.530 0.810 1.000 srgb} bind def /col12 {0.000 0.560 0.000 srgb} bind def /col13 {0.000 0.690 0.000 srgb} bind def /col14 {0.000 0.820 0.000 srgb} bind def /col15 {0.000 0.560 0.560 srgb} bind def /col16 {0.000 0.690 0.690 srgb} bind def /col17 {0.000 0.820 0.820 srgb} bind def /col18 {0.560 0.000 0.000 srgb} bind def /col19 {0.690 0.000 0.000 srgb} bind def /col20 {0.820 0.000 0.000 srgb} bind def /col21 {0.560 0.000 0.560 srgb} bind def /col22 {0.690 0.000 0.690 srgb} bind def /col23 {0.820 0.000 0.820 srgb} bind def /col24 {0.500 0.190 0.000 srgb} bind def /col25 {0.630 0.250 0.000 srgb} bind def /col26 {0.750 0.380 0.000 srgb} bind def /col27 {1.000 0.500 0.500 srgb} bind def /col28 {1.000 0.630 0.630 srgb} bind def /col29 {1.000 0.750 0.750 srgb} bind def /col30 {1.000 0.880 0.880 srgb} bind def /col31 {1.000 0.840 0.000 srgb} bind def end /cp {closepath} bind def /ef {eofill} bind def /gr {grestore} bind def /gs {gsave} bind def /sa {save} bind def /rs {restore} bind def /l {lineto} bind def /m {moveto} bind def /rm {rmoveto} bind def /n {newpath} bind def /s {stroke} bind def /sh {show} bind def /slc {setlinecap} bind def /slj {setlinejoin} bind def /slw {setlinewidth} bind def /srgb {setrgbcolor} bind def /rot {rotate} bind def /sc {scale} bind def /sd {setdash} bind def /ff {findfont} bind def /sf {setfont} bind def /scf {scalefont} bind def /sw {stringwidth} bind def /tr {translate} bind def /tnt {dup dup currentrgbcolor 4 -2 roll dup 1 exch sub 3 -1 roll mul add 4 -2 roll dup 1 exch sub 3 -1 roll mul add 4 -2 roll dup 1 exch sub 3 -1 roll mul add srgb} bind def /shd {dup dup currentrgbcolor 4 -2 roll mul 4 -2 roll mul 4 -2 roll mul srgb} bind def /$F2psBegin {$F2psDict begin /$F2psEnteredState save def} def /$F2psEnd {$F2psEnteredState restore end} def /pageheader { save newpath 0 98 moveto 0 0 lineto 98 0 lineto 98 98 lineto closepath clip newpath -11.0 102.4 translate 1 -1 scale $F2psBegin 10 setmiterlimit 0 slj 0 slc 0.06299 0.06299 sc } bind def /pagefooter { $F2psEnd restore } bind def %%EndProlog pageheader % % Fig objects follow % % % here starts figure with depth 50 % Polyline 0 slj 0 slc 30.000 slw n 1485 1395 m 1683 657 l 1143 117 l 405 315 l 207 1053 l 747 1593 l cp gs col0 s gr % here ends figure; pagefooter showpage %%Trailer %EOF spatstat/vignettes/shapefiles.Rnw0000755000176200001440000004360113123356342016751 0ustar liggesusers\documentclass[twoside,11pt]{article} % \VignetteIndexEntry{Handling shapefiles in the spatstat package} \SweaveOpts{eps=TRUE} <>= options(SweaveHooks=list(fig=function() par(mar=c(1,1,1,1)))) @ \usepackage{graphicx} \usepackage[colorlinks=true,urlcolor=blue]{hyperref} \usepackage{color} \usepackage{anysize} \marginsize{2cm}{2cm}{2cm}{2cm} \newcommand{\pkg}[1]{\texttt{#1}} \newcommand{\bold}[1]{{\textbf {#1}}} \newcommand{\R}{{\sf R}} \begin{document} %\bibliographystyle{plain} \thispagestyle{empty} <>= library(spatstat) options(useFancyQuotes=FALSE) sdate <- read.dcf(file = system.file("DESCRIPTION", package = "spatstat"), fields = "Date") sversion <- read.dcf(file = system.file("DESCRIPTION", package = "spatstat"), fields = "Version") @ \title{Handling shapefiles in the \texttt{spatstat} package} \author{Adrian Baddeley, Rolf Turner and Ege Rubak} \date{ \Sexpr{sdate} \\ \pkg{spatstat} version \texttt{\Sexpr{sversion}} } \maketitle This vignette explains how to read data into the \pkg{spatstat} package from files in the popular `shapefile' format. This vignette is part of the documentation included in \pkg{spatstat} version \texttt{\Sexpr{sversion}}. The information applies to \pkg{spatstat} versions \texttt{1.36-0} and above. \section{Shapefiles} A shapefile represents a list of spatial objects --- a list of points, a list of lines, or a list of polygonal regions --- and each object in the list may have additional variables attached to it. A dataset stored in shapefile format is actually stored in a collection of text files, for example \begin{verbatim} mydata.shp mydata.prj mydata.sbn mydata.dbf \end{verbatim} which all have the same base name \texttt{mydata} but different file extensions. To refer to this collection you will always use the filename with the extension \texttt{shp}, for example \texttt{mydata.shp}. \section{Helper packages} \label{S:helpers} We'll use two other packages% \footnote{In previous versions of \pkg{spatstat}, the package \pkg{gpclib} was also needed for some tasks. This is no longer required.} to handle shapefile data. The \pkg{maptools} package is designed specifically for handling file formats for spatial data. It contains facilities for reading and writing files in shapefile format. The \pkg{sp} package supports a standard set of spatial data types in \R. These standard data types can be handled by many other packages, so it is useful to convert your spatial data into one of the data types supported by \pkg{sp}. \section{How to read shapefiles into \pkg{spatstat}} To read shapefile data into \pkg{spatstat}, you follow two steps: \begin{enumerate} \item using the facilities of \pkg{maptools}, read the shapefiles and store the data in one of the standard formats supported by \pkg{sp}. \item convert the \pkg{sp} data type into one of the data types supported by \pkg{spatstat}. \end{enumerate} \subsection{Read shapefiles using \pkg{maptools}} Here's how to read shapefile data. \begin{enumerate} \item ensure that the package \pkg{maptools} is installed. You will need version \texttt{0.7-16} or later. \item start R and load the package: <>= library(maptools) @ \item read the shapefile into an object in the \pkg{sp} package using \texttt{readShapeSpatial}, for example <>= x <- readShapeSpatial("mydata.shp") @ \item To find out what kind of spatial objects are represented by the dataset, inspect its class: <>= class(x) @ The class may be either \texttt{SpatialPoints} indicating a point pattern, \texttt{SpatialLines} indicating a list of polygonal lines, or \texttt{SpatialPolygons} indicating a list of polygons. It may also be \texttt{SpatialPointsDataFrame}, \texttt{SpatialLinesDataFrame} or \texttt{SpatialPolygonsDataFrame} indicating that, in addition to the spatial objects, there is a data frame of additional variables. The classes \texttt{SpatialPixelsDataFrame} and \texttt{SpatialGridDataFrame} represent pixel image data. \end{enumerate} Here are some examples, using the example shapefiles supplied in the \pkg{maptools} package itself. % fake data because we don't want spatstat to depend on maptools <>= baltim <- columbus <- fylk <- list() class(baltim) <- "SpatialPointsDataFrame" class(columbus) <- "SpatialPolygonsDataFrame" class(fylk) <- "SpatialLinesDataFrame" @ <>= setwd(system.file("shapes", package="maptools")) baltim <- readShapeSpatial("baltim.shp") columbus <- readShapeSpatial("columbus.shp") fylk <- readShapeSpatial("fylk-val.shp") @ <<>>= class(baltim) class(columbus) class(fylk) @ \subsection{Convert data to \pkg{spatstat} format} To convert the dataset to an object in the \pkg{spatstat} package, the procedure depends on the type of data, as explained below. Both packages \pkg{maptools} and \pkg{spatstat} must be loaded in order to convert the data. \subsubsection{Objects of class \texttt{SpatialPoints}} An object \texttt{x} of class \texttt{SpatialPoints} represents a spatial point pattern. Use \verb!as(x, "ppp")! or \texttt{as.ppp(x)} to convert it to a spatial point pattern in \pkg{spatstat}. (The conversion is performed by \texttt{as.ppp.SpatialPoints}, a function in \pkg{maptools}.) The window for the point pattern will be taken from the bounding box of the points. You will probably wish to change this window, usually by taking another dataset to provide the window information. Use \verb![.ppp! to change the window: if \texttt{X} is a point pattern object of class \verb!"ppp"! and \texttt{W} is a window object of class \verb!"owin"!, type <>= X <- X[W] @ \subsubsection{Objects of class \texttt{SpatialPointsDataFrame }} An object \texttt{x} of class \texttt{SpatialPointsDataFrame} represents a pattern of points with additional variables (`marks') attached to each point. It includes an object of class \texttt{SpatialPoints} giving the point locations, and a data frame containing the additional variables attached to the points. Use \verb!as(x, "ppp")! or \texttt{as.ppp(x)} to convert an object \texttt{x} of class \texttt{SpatialPointsDataFrame} to a spatial point pattern in \pkg{spatstat}. In this conversion, the data frame of additional variables in \texttt{x} will become the \texttt{marks} of the point pattern \texttt{z}. <>= y <- as(x, "ppp") @ (The conversion is performed by \texttt{as.ppp.SpatialPointsDataFrame}, a function in \pkg{maptools}.) Before the conversion you can extract the data frame of auxiliary data by \verb!df <- x@data! or \verb!df <- slot(x, "data")!. After the conversion you can extract these data by \verb!df <- marks(y)!. For example: <>= balt <- as(baltim, "ppp") bdata <- slot(baltim, "data") @ \subsubsection{Objects of class \texttt{SpatialLines}} \label{spatiallines.2.psp} A ``line segment'' is the straight line between two points in the plane. In the \pkg{spatstat} package, an object of class \texttt{psp} (``planar segment pattern'') represents a pattern of line segments, which may or may not be connected to each other (like matches which have fallen at random on the ground). In the \pkg{sp} package, an object of class \texttt{SpatialLines} represents a \textbf{list of lists} of \textbf{connected curves}, each curve consisting of a sequence of straight line segments that are joined together (like several pieces of a broken bicycle chain.) So these two data types do not correspond exactly. The list-of-lists hierarchy in a \texttt{SpatialLines} object is useful when representing internal divisions in a country. For example, if \texttt{USA} is an object of class \texttt{SpatialLines} representing the borders of the United States of America, then \verb!USA@lines! might be a list of length 52, with \verb!USA@lines[[i]]! representing the borders of the \texttt{i}-th State. The borders of each State consist of several different curved lines. Thus \verb!USA@lines[[i]]@Lines[[j]]! would represent the \texttt{j}th piece of the boundary of the \texttt{i}-th State. If \texttt{x} is an object of class \texttt{SpatialLines}, there are several things that you might want to do: \begin{enumerate} \item collect together all the line segments (all the segments that make up all the connected curves) and store them as a single object of class \texttt{psp}. \begin{quote} To do this, use \verb!as(x, "psp")! or \texttt{as.psp(x)} to convert it to a spatial line segment pattern. \end{quote} \item convert each connected curve to an object of class \texttt{psp}, keeping different connected curves separate. To do this, type something like the following: <>= out <- lapply(x@lines, function(z) { lapply(z@Lines, as.psp) }) @ The result will be a \textbf{list of lists} of objects of class \texttt{psp}. Each one of these objects represents a connected curve, although the \pkg{spatstat} package does not know that. The list structure will reflect the list structure of the original \texttt{SpatialLines} object \texttt{x}. If that's not what you want, then use \verb!curvelist <- do.call("c", out)! or <>= curvegroup <- lapply(out, function(z) { do.call("superimpose", z)}) @ to collapse the list-of-lists-of-\texttt{psp}'s into a list-of-\texttt{psp}'s. In the first case, \texttt{curvelist[[i]]} is a \texttt{psp} object representing the \texttt{i}-th connected curve. In the second case, \texttt{curvegroup[[i]]} is a \texttt{psp} object containing all the line segments in the \texttt{i}-th group of connected curves (for example the \texttt{i}-th State in the \texttt{USA} example). \end{enumerate} The window for the spatial line segment pattern can be specified as an argument \texttt{window} to the function \texttt{as.psp}. (The conversion is performed by \texttt{as.psp.SpatialLines} or \texttt{as.psp.Lines}, which are functions in \pkg{maptools}.) \subsubsection{Objects of class \texttt{SpatialLinesDataFrame}} An object \texttt{x} of class \texttt{SpatialLinesDataFrame} is a \texttt{SpatialLines} object with additional data. The additional data is stored as a data frame \verb!x@data! with one row for each entry in \verb!x@lines!, that is, one row for each group of connected curves. In the \pkg{spatstat} package, an object of class \texttt{psp} (representing a collection of line segments) may have a data frame of marks. Note that each \emph{line segment} in a \texttt{psp} object may have different mark values. If \texttt{x} is an object of class \texttt{SpatialLinesDataFrame}, there are two things that you might want to do: \begin{enumerate} \item collect together all the line segments that make up all the connected lines, and store them as a single object of class \texttt{psp}. \begin{quote} To do this, use \verb!as(x, "psp")! or \texttt{as.psp(x)} to convert it to a marked spatial line segment pattern. \end{quote} \item keep each connected curve separate, and convert each connected curve to an object of class \texttt{psp}. To do this, type something like the following: <>= out <- lapply(x@lines, function(z) { lapply(z@Lines, as.psp) }) dat <- x@data for(i in seq(nrow(dat))) out[[i]] <- lapply(out[[i]], "marks<-", value=dat[i, , drop=FALSE]) @ The result is a list-of-lists-of-\texttt{psp}'s. See the previous subsection for explanation on how to change this using \texttt{c()} or \texttt{superimposePSP}. \end{enumerate} In either case, the mark variables attached to a particular \emph{group of connected lines} in the \texttt{SpatialLinesDataFrame} object, will be duplicated and attached to each \emph{line segment} in the resulting \texttt{psp} object. \subsubsection{Objects of class \texttt{SpatialPolygons}} First, so that we don't go completely crazy, let's introduce some terminology. A \emph{polygon} is a closed curve that is composed of straight line segments. You can draw a polygon without lifting your pen from the paper. \setkeys{Gin}{width=0.4\textwidth} \begin{center} <>= data(chorley) plot(as.owin(chorley), lwd=3, main="polygon") @ \end{center} A \emph{polygonal region} is a region in space whose boundary is composed of straight line segments. A polygonal region may consist of several unconnected pieces, and each piece may have holes. The boundary of a polygonal region consists of one or more polygons. To draw the boundary of a polygonal region, you may need to lift and drop the pen several times. \setkeys{Gin}{width=0.4\textwidth} \begin{center} <>= data(demopat) plot(as.owin(demopat), col="blue", main="polygonal region") @ \end{center} An object of class \texttt{owin} in \pkg{spatstat} represents a polygonal region. It is a region of space that is delimited by boundaries made of lines. An object \texttt{x} of class \texttt{SpatialPolygons} represents a \textbf{list of polygonal regions}. For example, a single object of class \texttt{SpatialPolygons} could store information about every State in the United States of America (or the United States of Malaysia). Each State would be a separate polygonal region (and it might contain holes such as lakes). There are two things that you might want to do with an object of class \texttt{SpatialPolygons}: \begin{enumerate} \item combine all the polygonal regions together into a single polygonal region, and convert this to a single object of class \texttt{owin}. \begin{quote} For example, you could combine all the States of the USA together and obtain a single object that represents the territory of the USA. To do this, use \verb!as(x, "owin")! or \texttt{as.owin(x)}. The result is a single window (object of class \texttt{"owin"}) in the \pkg{spatstat} package. \end{quote} \item keep the different polygonal regions separate; convert each one of the polygonal regions to an object of class \texttt{owin}. \begin{quote} For example, you could keep the States of the USA separate, and convert each State to an object of class \texttt{owin}. \end{quote} To do this, type the following: <>= regions <- slot(x, "polygons") regions <- lapply(regions, function(x) { SpatialPolygons(list(x)) }) windows <- lapply(regions, as.owin) @ The result is a list of objects of class \texttt{owin}. Often it would make sense to convert this to a tessellation object, by typing <>= te <- tess(tiles=windows) @ \end{enumerate} {\bf The following is different from what happened in previous versions of \pkg{spatstat}} (prior to version \texttt{1.36-0}.) During the conversion process, the geometry of the polygons will be automatically ``repaired'' if needed. Polygon data from shapefiles often contain geometrical inconsistencies such as self-intersecting boundaries and overlapping pieces. For example, these can arise from small errors in curve-tracing. Geometrical inconsistencies are tolerated in an object of class \texttt{SpatialPolygons} which is a list of lists of polygonal curves. However, they are not tolerated in an object of class \texttt{owin}, because an \texttt{owin} must specify a well-defined region of space. These data inconsistencies must be repaired to prevent technical problems. \pkg{Spatstat} uses polygon-clipping code to automatically convert polygonal lines into valid polygon boundaries. The repair process changes the number of vertices in each polygon, and the number of polygons (if you chose option 1). To disable the repair process, set \texttt{spatstat.options(fixpolygons=FALSE)}. \subsubsection{Objects of class \texttt{SpatialPolygonsDataFrame}} What a mouthful! An object \texttt{x} of class \texttt{SpatialPolygonsDataFrame} represents a list of polygonal regions, with additional variables attached to each region. It includes an object of class \texttt{SpatialPolygons} giving the spatial regions, and a data frame containing the additional variables attached to the regions. The regions are extracted by <>= y <- as(x, "SpatialPolygons") @ and you then proceed as above to convert the curves to \pkg{spatstat} format. The data frame of auxiliary data is extracted by \verb!df <- x@data! or \verb!df <- slot(x, "data")!. For example: <>= cp <- as(columbus, "SpatialPolygons") cregions <- slot(cp, "polygons") cregions <- lapply(cregions, function(x) { SpatialPolygons(list(x)) }) cwindows <- lapply(cregions, as.owin) @ There is currently no facility in \pkg{spatstat} for attaching marks to an \texttt{owin} object directly. However, \pkg{spatstat} supports objects called \textbf{hyperframes}, which are like data frames except that the entries can be any type of object. Thus we can represent the \texttt{columbus} data in \pkg{spatstat} as follows: <>= ch <- hyperframe(window=cwindows) ch <- cbind.hyperframe(ch, columbus@data) @ Then \texttt{ch} is a hyperframe containing a column of \texttt{owin} objects followed by the columns of auxiliary data. \subsubsection{Objects of class \texttt{SpatialGridDataFrame} and \texttt{SpatialPixelsDataFrame}} An object \texttt{x} of class \texttt{SpatialGridDataFrame} represents a pixel image on a rectangular grid. It includes a \texttt{SpatialGrid} object \texttt{slot(x, "grid")} defining the full rectangular grid of pixels, and a data frame \texttt{slot(x, "data")} containing the pixel values (which may include \texttt{NA} values). The command \texttt{as(x, "im")} converts \texttt{x} to a pixel image of class \texttt{"im"}, taking the pixel values from the \emph{first column} of the data frame. If the data frame has multiple columns, these have to be converted to separate pixel images in \pkg{spatstat}. For example <>= y <- as(x, "im") ylist <- lapply(slot(x, "data"), function(z, y) { y[,] <- z; y }, y=y) @ An object \texttt{x} of class \texttt{SpatialPixelsDataFrame} represents a \emph{subset} of a pixel image. To convert this to a \pkg{spatstat} object, it should first be converted to a \texttt{SpatialGridDataFrame} by \texttt{as(x, "SpatialGridDataFrame")}, then handled as described above. \end{document} spatstat/vignettes/irregpoly.pdf0000644000176200001440000000524113115225157016640 0ustar liggesusers%PDF-1.4 %쏢 5 0 obj <> stream xm=n1 =O0?}n #EJH6@ob%^EsMo? 'P*fvjqp}]",D <qe]X/pRiuS\ *qX2B*XkӜ\lDU1J卼PX]rrZ0CP$!7D,(ƃ[Q5C2)RHQ5A70=x"EuZ1m=@4)YJ9VL,6}9f,q^ѕ:'1V8'3&R[UXߑX |endstream endobj 6 0 obj 311 endobj 4 0 obj <> /Contents 5 0 R >> endobj 3 0 obj << /Type /Pages /Kids [ 4 0 R ] /Count 1 >> endobj 1 0 obj <> endobj 7 0 obj <>endobj 8 0 obj <> endobj 9 0 obj <>stream 2013-12-23T19:50:47+08:00 2013-12-23T19:50:47+08:00 fig2dev Version 3.2 Patchlevel 5d irregpoly.fig endstream endobj 2 0 obj <>endobj xref 0 10 0000000000 65535 f 0000000605 00000 n 0000002179 00000 n 0000000546 00000 n 0000000415 00000 n 0000000015 00000 n 0000000396 00000 n 0000000669 00000 n 0000000710 00000 n 0000000739 00000 n trailer << /Size 10 /Root 1 0 R /Info 2 0 R /ID [] >> startxref 2368 %%EOF spatstat/vignettes/updates.Rnw0000644000176200001440000023463313166356056016310 0ustar liggesusers\documentclass[11pt]{article} \usepackage{graphicx} \usepackage{Sweave} \usepackage{bm} \usepackage{anysize} \marginsize{2cm}{2cm}{2cm}{2cm} % \VignetteIndexEntry{Summary of Recent Updates to Spatstat} \newcommand{\pkg}[1]{\texttt{#1}} \newcommand{\code}[1]{\texttt{#1}} \newcommand{\R}{{\sf R}} \newcommand{\spst}{\pkg{spatstat}} \newcommand{\Spst}{\pkg{Spatstat}} \begin{document} \bibliographystyle{plain} <>= library(spatstat) sversion <- read.dcf(file = system.file("DESCRIPTION", package = "spatstat"), fields = "Version") options(useFancyQuotes=FALSE) @ \title{Summary of recent updates to \spst} \author{Adrian Baddeley, Rolf Turner and Ege Rubak} \date{For \spst\ version \texttt{\Sexpr{sversion}}} \maketitle \thispagestyle{empty} This is a summary of changes that have been made to the \spst\ package since the publication of the accompanying book \cite{baddrubaturn15}. The book, published in December 2015, covers everything in \spst\ up to version \texttt{1.42-0}, released in May 2015. <>= readSizeTable <- function(fname) { if(is.null(fname) || !file.exists(fname)) return(NULL) a <- read.table(fname, header=TRUE) a$date <- as.Date(a$date) return(a) } getSizeTable <- function(packagename="spatstat", tablename="packagesizes.txt") { fname <- system.file("doc", tablename, package=packagename) readSizeTable(fname) } counts <- c("nhelpfiles", "nobjects", "ndatasets", "Rlines", "srclines") mergeSizeTables <- function(a, b) { if(is.null(b)) return(a) for(i in seq_len(nrow(a))) { j <- which(b$date <= a$date[i]) if(length(j) > 0) a[i,counts] <- a[i,counts] + b[max(j), counts] } return(a) } z <- getSizeTable() zutils <- getSizeTable("spatstat.utils") zdata <- getSizeTable("spatstat.data") zlocal <- getSizeTable("spatstat", "spatstatlocalsize.txt") z <- mergeSizeTables(z, zutils) z <- mergeSizeTables(z, zdata) z <- mergeSizeTables(z, zlocal) # changes <- z[nrow(z), ] - z[z$version == "1.42-0", ] newobj <- changes[["nobjects"]] newdat <- changes[["ndatasets"]] + 1 # counting rule doesn't detect redwood3 @ %$ The current version of \spst\ is \texttt{\Sexpr{sversion}}. It contains \Sexpr{newobj} new functions and \Sexpr{newdat} new datasets introduced after May 2015. This document summarises the most important changes. This document also lists all \emph{important} bugs detected \emph{since 2010}. <>= options(SweaveHooks=list(fig=function() par(mar=0.2+c(2,4,2,0)))) Plot <- function(fmla, ..., dat=z) { yvals <- eval(as.expression(fmla[[2]]), envir=dat) plot(fmla, ..., data=dat, type="l", xlab="", lwd=2, ylim=c(0, max(yvals))) } @ \SweaveOpts{eps=TRUE} \setkeys{Gin}{width=0.45\textwidth} \centerline{ <>= Plot((Rlines + srclines)/1000 ~ date, ylab="Lines of code (x 1000)", main="Spatstat growth") lines(srclines/1000 ~ date, data=z) text(as.Date("2015-01-01"), 9.5, "C code") text(as.Date("2015-01-01"), 60, "R code") @ } \tableofcontents \newpage \section{\pkg{spatstat} is splitting into parts} \pkg{spatstat} is being split into several sub-packages, to satisfy the requirements of CRAN. This should not affect the user: existing code will continue to work in the same way. Currently there are two sub-packages, called \pkg{spatstat.utils} and \pkg{spatstat}. Typing \code{library(spatstat)} will load the familiar \pkg{spatstat} package which can be used as before, and will silently import the \pkg{spatstat.utils} package. The \pkg{spatstat.utils} package contains utility functions that were originally written for \pkg{spatstat}: they were undocumented internal functions in \pkg{spatstat}, but are now documented and accessible in a separate package because they may be useful for other purposes. To access these functions, you need to type \code{library(spatstat.utils)}. \section{Precis of all changes} Here is the text from the `overview' sections of the News and Release Notes for each update. \begin{itemize} \item \spst\ now Imports the package \pkg{spatstat.utils}. \item \spst\ now requires the package \pkg{spatstat.data} which contains the datasets. \item \spst\ now suggests the package \pkg{fftwtools}. \item Improvements to \texttt{ppm} and \texttt{update.ppm}. \item Correction to \texttt{lohboot} \item Numerous bug fixes for linear networks code. \item Now handles disconnected linear networks. \item Effect function is now available for all types of fitted model. \item Geometric-mean smoothing. \item A model can be fitted or re-fitted to a sub-region of data. \item New fast algorithm for kernel smoothing on a linear network. \item Leverage and influence diagnostics extended to Poisson/Gibbs models fitted by logistic composite likelihood. \item Two-stage Monte Carlo test. \item Dirichlet/Voronoi tessellation on a linear network. \item Thinning of point patterns on a linear network. \item More support for functions and tessellations on a linear network. \item Bandwidth selection for pair correlation function. \item Pooling operations improved. \item Operations on signed measures. \item Operations on lists of pixel images. \item Improved pixellation of point patterns. \item Stieltjes integral extended. \item Subset operators extended. \item Greatly accelerated \texttt{rmh} when using \texttt{nsave} \item Sufficient Dimension Reduction for point processes. \item Alternating Gibbs Sampler for point process simulation. \item New class of spatially sampled functions. \item ROC and AUC extended to other types of point patterns and models. \item More support for linear networks. \item More support for infinite straight lines. \item \spst\ now depends on the packages \pkg{nlme} and \pkg{rpart}. \item Important bug fix in \code{linearK}, \code{linearpcf} \item Changed internal format of \code{linnet} and \code{lpp} objects. \item Faster computation in linear networks. \item Bias correction techniques. \item Bounding circle of a spatial object. \item Option to plot marked points as arrows. \item Kernel smoothing accelerated. \item Workaround for bug in some graphics drivers affecting image orientation. \item Non-Gaussian smoothing kernels. \item Improvements to inhomogeneous multitype $K$ and $L$ functions. \item Variance approximation for pair correlation function. \item Leverage and influence for multitype point process models. \item Functions for extracting components of vector-valued objects. \item Recursive-partition point process models. \item Minkowski sum, morphological dilation and erosion with any shape. \item Minkowski sum also applicable to point patterns and line segment patterns. \item Important bug fix in Smooth.ppp \item Important bug fix in spatial CDF tests. \item More bug fixes for replicated patterns. \item Simulate a model fitted to replicated point patterns. \item Inhomogeneous multitype $F$ and $G$ functions. \item Summary functions recognise \texttt{correction="all"} \item Leverage and influence code handles bigger datasets. \item More support for pixel images. \item Improved progress reports. \item New dataset \texttt{redwood3} \item Fixed namespace problems arising when spatstat is not loaded. \item Important bug fix in leverage/influence diagnostics for Gibbs models. \item Surgery with linear networks. \item Tessellations on a linear network. \item Laslett's Transform. \item Colour maps for point patterns with continuous marks are easier to define. \item Pair correlation function estimates can be pooled. \item Stipulate a particular version of a package. \item More support for replicated point patterns. \item More support for tessellations. \item More support for multidimensional point patterns and point processes. \item More options for one-sided envelopes. \item More support for model comparison. \item Convexifying operation. \item Subdivide a linear network. \item Penttinen process can be simulated (by Metropolis-Hastings or CFTP). \item Calculate the predicted variance of number of points. \item Accelerated algorithms for linear networks. \item Quadrat counting accelerated, in some cases. \item Simulation algorithms have been accelerated; simulation outcomes are \emph{not} identical to those obtained from previous versions of \spst. \item Determinantal point process models. \item Random-effects and mixed-effects models for replicated patterns. \item Dao-Genton test, and corresponding simulation envelopes. \item Simulated annealing and simulated tempering. \item spatstat colour tools now handle transparent colours. \item Improvements to \verb![! and \texttt{subset} methods \item Extensions to kernel smoothing on a linear network. \item Support for one-dimensional smoothing kernels. \item Mark correlation function may include weights. \item Cross-correlation version of the mark correlation function. \item Penttinen pairwise interaction model. \item Improvements to simulation of Neyman-Scott processes. \item Improvements to fitting of Neyman-Scott models. \item Extended functionality for pixel images. \item Fitted intensity on linear network \item Triangulation of windows. \item Corrected an edge correction. \end{itemize} \section{New datasets} The following datasets have been added to the package. \begin{itemize} \item \texttt{austates}: The states and large mainland territories of Australia represented as polygonal regions forming a tessellation. \item \texttt{redwood3}: a more accurate version of the \texttt{redwood} data. \end{itemize} \section{New classes} \begin{itemize} \item \texttt{ssf}: Class of spatially sampled functions. \end{itemize} \section{New Functions} Following is a list of all the functions that have been added. \begin{itemize} \item \texttt{fitin.profilepl}: Extract the fitted interaction from a model fitted by profile likelihood. \item \verb![<-.linim!: Subset assignment method for pixel images on a linear network. \item \texttt{nnfromvertex}: Given a point pattern on a linear network, find the nearest data point from each vertex of the network. \item \texttt{tile.lengths}: Calculate the length of each tile in a tessellation on a network. \item \texttt{text.ppp}, \texttt{text.lpp}, \texttt{text.psp}: Methods for \texttt{text} for spatial patterns. \item \texttt{as.data.frame.envelope}: Extract function data from an envelope object, including the functions for the simulated data ('simfuns') if they were saved. \item \texttt{is.connected}, \texttt{is.connected.default}, \texttt{is.connected.linnet}: Determines whether a spatial object consists of one topologically connected piece, or several pieces. \item \texttt{is.connected.ppp}: Determines whether a point pattern is connected after all pairs of points closer than distance R are joined. \item \texttt{hist.funxy}: Histogram of values of a spatial function. \item \texttt{model.matrix.ippm}: Method for \texttt{model.matrix} which allows computation of regular and irregular score components. \item \texttt{harmonise.msr}: Convert several measures (objects of class \texttt{msr}) to a common quadrature scheme. \item \texttt{bits.test}: Balanced Independent Two-Stage Monte Carlo test, an improvement on the Dao-Genton test. \item \texttt{lineardirichlet}: Computes the Dirichlet-Voronoi tessellation associated with a point pattern on a linear network. \item \texttt{domain.lintess}, \texttt{domain.linfun}: Extract the linear network from a \texttt{lintess} or \texttt{linfun} object. \item \texttt{summary.lintess}: Summary of a tessellation on a linear network. \item \texttt{clicklpp}: Interactively add points on a linear network. \item \texttt{envelopeArray}: Ggenerate an array of envelopes using a function that returns \texttt{fasp} objects. \item \texttt{bw.pcf}: Bandwidth selection for pair correlation function. \item \texttt{grow.box3}: Expand a three-dimensional box. \item \texttt{hexagon}, \texttt{regularpolygon}: Create regular polygons. \item \texttt{Ops.msr}: Arithmetic operations for measures. \item \texttt{Math.imlist}, \texttt{Ops.imlist}, \texttt{Summary.imlist}, \texttt{Complex.imlist}: Arithmetic operations for lists of pixel images. \item \texttt{measurePositive}, \texttt{measureNegative}, \texttt{measureVariation}, \texttt{totalVariation}: Positive and negative parts of a measure, and variation of a measure. \item \texttt{as.function.owin}: Convert a spatial window to a \texttt{function(x,y)}, the indicator function. \item \texttt{as.function.ssf}: Convert an object of class \texttt{ssf} to a \texttt{function(x,y)} \item \texttt{as.function.leverage.ppm} Convert an object of class \texttt{leverage.ppm} to a \texttt{function(x,y)} \item \texttt{sdr}, \texttt{dimhat}: Sufficient Dimension Reduction for point processes. \item \texttt{simulate.rhohat}: Simulate a Poisson point process with the intensity estimated by \texttt{rhohat}. \item \texttt{rlpp}: Random points on a linear network with a specified probability density. \item \texttt{cut.lpp}: Method for \texttt{cut} for point patterns on a linear network. \item \texttt{has.close}: Faster way to check whether a point has a close neighbour. \item \texttt{psib}: Sibling probability (index of clustering strength in a cluster process). \item \texttt{rags}, \texttt{ragsAreaInter}, \texttt{ragsMultiHard}: Alternating Gibbs Sampler for point processes. \item \texttt{bugfixes}: List all bug fixes in recent versions of a package. \item \texttt{ssf}: Create a spatially sampled function \item \texttt{print.ssf}, \texttt{plot.ssf}, \texttt{contour.ssf}, \texttt{image.ssf}: Display a spatially sampled function \item \texttt{as.im.ssf}, \texttt{as.ppp.ssf}, \texttt{marks.ssf}, \verb!marks<-.ssf!, \texttt{unmark.ssf}, \verb![.ssf!, \texttt{with.ssf}: Manipulate data in a spatially sampled function \item \texttt{Smooth.ssf}: Smooth a spatially sampled function \item \texttt{integral.ssf}: Approximate integral of spatially sampled function \item \texttt{roc.kppm}, \texttt{roc.lppm}, \texttt{roc.lpp}: Methods for \texttt{roc} for fitted models of class \texttt{"kppm"} and \texttt{"lppm"} and point patterns of class \texttt{"lpp"} \item \texttt{auc.kppm}, \texttt{auc.lppm}, \texttt{auc.lpp}: Methods for \texttt{auc} for fitted models of class \texttt{"kppm"} and \texttt{"lppm"} and point patterns of class \texttt{"lpp"} \item \texttt{timeTaken}: Extract the timing data from a \texttt{"timed"} object or objects. \item \texttt{rotate.infline}, \texttt{shift.infline}, \texttt{reflect.infline}, \texttt{flipxy.infline}: Geometrical transformations for infinite straight lines. \item \texttt{whichhalfplane}: Determine which side of an infinite line a point lies on. \item \texttt{matrixpower}, \texttt{matrixsqrt}, \texttt{matrixinvsqrt}: Raise a matrix to any power. \item \texttt{points.lpp}: Method for \texttt{points} for point patterns on a linear network. \item \texttt{pairs.linim}: Pairs plot for images on a linear network. \item \texttt{closetriples}: Find close triples of points. \item \texttt{anyNA.im}: Method for \texttt{anyNA} for pixel images. \item \texttt{bc}: Bias correction (Newton-Raphson) for fitted model parameters. \item \texttt{rex}: Richardson extrapolation for numerical integrals and statistical model parameter estimates. \item \texttt{boundingcircle}, \texttt{boundingcentre}: Find the smallest circle enclosing a window or point pattern. \item \verb![.linim! : Subset operator for pixel images on a linear network. \item \texttt{mean.linim}, \texttt{median.linim}, \texttt{quantile.linim}: The mean, median, or quantiles of pixel values in a pixel image on a linear network. \item \texttt{weighted.median}, \texttt{weighted.quantile}: Median or quantile of numerical data with associated weights. \item \verb!"[.linim"!: Subset operator for pixel images on a linear network. \item \texttt{mean.linim}, \texttt{median.linim}, \texttt{quantile.linim}: The mean, median, or quantiles of pixel values in a pixel image on a linear network. \item \texttt{boundingcircle}, \texttt{boundingcentre}: Smallest circle enclosing a spatial object. \item \texttt{split.msr}: Decompose a measure into parts. \item \texttt{unstack.msr}: Decompose a vector-valued measure into its component measures. \item \texttt{unstack.ppp}, \texttt{unstack.psp}, \texttt{unstack.lpp}: Given a spatial pattern with several columns of marks, separate the columns and return a list of spatial patterns, each having only one column of marks. \item \texttt{kernel.squint}: Integral of squared kernel, for the kernels used in density estimation. \item \texttt{as.im.data.frame}: Build a pixel image from a data frame of coordinates and pixel values. \item \texttt{covering}: Cover a window using discs of a given radius. \item \texttt{dilationAny}, \texttt{erosionAny}, \verb!%(-)%! : Morphological dilation and erosion by any shape. \item \texttt{FmultiInhom}, \texttt{GmultiInhom} Inhomogeneous multitype/marked versions of the summary functions \texttt{Fest}, \texttt{Gest}. \item \texttt{kernel.moment} Moment or incomplete moment of smoothing kernel. \item \texttt{MinkowskiSum}, \verb!%(+)%!: Minkowski sum of two windows: \verb!A %(+)% B!, or \texttt{MinkowskiSum(A,B)} \item \texttt{nobjects}: New generic function for counting the number of 'things' in a dataset. There are methods for \texttt{ppp}, \texttt{ppx}, \texttt{psp}, \texttt{tess}. \item \texttt{parameters.interact}, \texttt{parameters.fii}: Extract parameters from interpoint interactions. (These existing functions are now documented.) \item \texttt{ppmInfluence}: Calculate \texttt{leverage.ppm}, \texttt{influence.ppm} and \texttt{dfbetas.ppm} efficiently. \item \texttt{rppm}, \texttt{plot.rppm}, \texttt{predict.rppm}, \texttt{prune.rppm}: Recursive-partition point process models. \item \texttt{simulate.mppm} Simulate a point process model fitted to replicated point patterns. \item \texttt{update.interact}: Update the parameters of an interpoint interaction. [This existing function is now documented.] \item \texttt{where.max}, \texttt{where.min} Find the spatial location(s) where a pixel image achieves its maximum or minimum value. \item \texttt{compileK}, \texttt{compilepcf}: make a $K$ function or pair correlation function given the pairwise distances and their weights. [These existing internal functions are now documented.] \item \texttt{laslett}: Laslett's Transform. \item \texttt{lintess}: Tessellation on a linear network. \item \texttt{divide.linnet}: Divide a linear network into pieces demarcated by a point pattern. \item \texttt{insertVertices}: Insert new vertices in a linear network. \item \texttt{thinNetwork}: Remove vertices and/or segments from a linear network etc. \item \texttt{connected.linnet}: Find connected components of a linear network. \item \texttt{nvertices}, \texttt{nvertices.linnet}, \texttt{nvertices.owin}: Count the number of vertices in a linear network or vertices of the boundary of a window. \item \texttt{as.data.frame.linim}, \texttt{as.data.frame.linfun}: Extract a data frame of spatial locations and function values from an object of class \texttt{linim} or \texttt{linfun}. \item \texttt{as.linfun}, \texttt{as.linfun.linim}, \texttt{as.linfun.lintess}: Convert other kinds of data to a \texttt{linfun} object. \item \texttt{requireversion}: Require a particular version of a package (for use in stand-alone R scripts). \item \texttt{as.function.tess}: Convert a tessellation to a \texttt{function(x,y)}. The function value indicates which tile of the tessellation contains the point $(x,y)$. \item \texttt{tileindex}: Determine which tile of a tessellation contains a given point $(x,y)$. \item \texttt{persp.leverage.ppm}: Method for persp plots for objects of class \texttt{leverage.ppm} \item \texttt{AIC.mppm}, \texttt{extractAIC.mppm}: AIC for point process models fitted to replicated point patterns. \item \texttt{nobs.mppm}, \texttt{terms.mppm}, \texttt{getCall.mppm}: Methods for point process models fitted to replicated point patterns. \item \texttt{rPenttinen}: Simulate the Penttinen process using perfect simulation. \item \texttt{varcount}: Given a point process model, compute the predicted variance of the number of points falling in a window. \item \texttt{inside.boxx}: Test whether multidimensional points lie inside a specified multidimensional box. \item \texttt{lixellate}: Divide each segment of a linear network into smaller segments. \item \texttt{nsegments.linnet}, \texttt{nsegments.lpp}: Count the number of line segments in a linear network. \item \texttt{grow.boxx}: Expand a multidimensional box. \item \texttt{deviance.ppm}, \texttt{deviance.lppm}: Deviance for a fitted point process model. \item \texttt{pseudoR2}: Pseudo-R-squared for a fitted point process model. \item \texttt{tiles.empty} Checks whether each tile of a tessellation is empty or nonempty. \item \texttt{summary.linim}: Summary for a pixel image on a linear network. \item Determinantal Point Process models: \begin{itemize} \item \texttt{dppm}: Fit a determinantal point process model. \item \texttt{fitted.dppm}, \texttt{predict.dppm}, \texttt{intensity.dppm}: prediction for a fitted determinantal point process model. \item \texttt{Kmodel.dppm}, \texttt{pcfmodel.dppm}: Second moments of a determinantal point process model. \item \texttt{rdpp}, \texttt{simulate.dppm}: Simulation of a determinantal point process model. \item \texttt{logLik.dppm}, \texttt{AIC.dppm}, \texttt{extractAIC.dppm}, \texttt{nobs.dppm}: Likelihood and AIC for a fitted determinantal point process model. \item \texttt{print.dppm}, \texttt{reach.dppm}, \texttt{valid.dppm}: Basic information about a \texttt{dpp} model. \item \texttt{coef.dppm}, \texttt{formula.dppm}, \texttt{print.dppm}, \texttt{terms.dppm}, \texttt{labels.dppm}, \texttt{model.frame.dppm}, \texttt{model.matrix.dppm}, \texttt{model.images.dppm}, \texttt{is.stationary.dppm}, \texttt{reach.dppm}, \texttt{unitname.dppm}, \verb!unitname<-.dppm!, \texttt{Window.dppm}: Various methods for \texttt{dppm} objects. \item \texttt{parameters.dppm}: Extract meaningful list of model parameters. \item \texttt{objsurf.dppm}: Objective function surface of a \texttt{dppm} object. \item \texttt{residuals.dppm}: Residual measure for a \texttt{dppm} object. \end{itemize} \item Determinantal Point Process model families: \begin{itemize} \item \texttt{dppBessel}, \texttt{dppCauchy}, \texttt{dppGauss}, \texttt{dppMatern}, \texttt{dppPowerExp}: Determinantal Point Process family functions. \item \texttt{detpointprocfamilyfun}: Create a family function. \item \texttt{update.detpointprocfamily}: Set parameter values in a determinantal point process model family. \item \texttt{simulate.dppm}: Simulation. \item \texttt{is.stationary.detpointprocfamily}, \texttt{intensity.detpointprocfamily}, \texttt{Kmodel.detpointprocfamily}, \texttt{pcfmodel.detpointprocfamily}: Moments. \item \texttt{dim.detpointprocfamily}, \texttt{dppapproxkernel}, \texttt{dppapproxpcf}, \texttt{dppeigen}, \texttt{dppkernel}, \texttt{dppparbounds}, \texttt{dppspecdenrange}, \texttt{dppspecden}: Helper functions. \end{itemize} \item \texttt{dg.envelope}: Simulation envelopes corresponding to Dao-Genton test. \item \texttt{dg.progress}: Progress plot (envelope representation) for the Dao-Genton test. \item \texttt{dg.sigtrace}: significance trace for the Dao-Genton test. \item \texttt{markcrosscorr}: Mark cross-correlation function for point patterns with several columns of marks. \item \texttt{rtemper}: Simulated annealing or simulated tempering. \item \texttt{rgb2hsva}: Convert RGB to HSV data, like \texttt{rgb2hsv}, but preserving transparency. \item \texttt{superimpose.ppplist}, \texttt{superimpose.splitppp}: New methods for 'superimpose' for lists of point patterns. \item \texttt{dkernel}, \texttt{pkernel}, \texttt{qkernel}, \texttt{rkernel}: Probability density, cumulative probability, quantiles and random generation from distributions used in basic one-dimensional kernel smoothing. \item \texttt{kernel.factor}: Auxiliary calculations for one-dimensional kernel smoothing. \item \texttt{spatdim}: Spatial dimension of any object in the \spst\ package. \item \texttt{as.boxx}: Convert data to a multi-dimensional box. \item \texttt{intensity.ppx}: Method for \texttt{intensity} for multi-dimensional space-time point patterns. \item \texttt{fourierbasis}: Evaluate Fourier basis functions in any number of dimensions. \item \texttt{valid}: New generic function, with methods \texttt{valid.ppm}, \texttt{valid.lppm}, \texttt{valid.dppm}. \item \texttt{emend}, \texttt{emend.ppm}, \texttt{emend.lppm}: New generic function with methods for \texttt{ppm} and \texttt{lppm}. \texttt{emend.ppm} is equivalent to \texttt{project.ppm}. \item \texttt{Penttinen}: New pairwise interaction model. \item \texttt{quantile.density}: Calculates quantiles from kernel density estimates. \item \texttt{CDF.density}: Calculates cumulative distribution function from kernel density estimates. \item \texttt{triangulate.owin}: decompose a spatial window into triangles. \item \texttt{fitted.lppm}: fitted intensity values for a point process on a linear network. \item \texttt{parameters}: Extract all parameters from a fitted model. \end{itemize} \section{Alphabetical list of changes} Here is a list of all changes made to existing functions, listed alphabetically. \begin{itemize} %%A \item \texttt{affine.owin}: Allows transformation matrix to be singular, if the window is polygonal. \item \texttt{anova.mppm}: Now handles Gibbs models, and performs the adjusted composite likelihood ratio test. New argument \texttt{fine}. \item \texttt{as.function.tess}: New argument \texttt{values} specifies the function values. \item \texttt{as.im.distfun}: New argument \texttt{approx} specifies the choice of algorithm. \item \texttt{as.im.function}: New argument \texttt{strict}. \item \texttt{as.layered}: Default method now handles a (vanilla) list of spatial objects. \item \texttt{as.linfun.lintess}: \begin{itemize} \item New argument \texttt{values} specifies the function value for each tile. \item New argument \texttt{navalue}. \end{itemize} \item \texttt{as.linim.default}: New argument \texttt{delta} controls spacing of sample points in internal data. \item \texttt{as.linnet.psp}: If the line segment pattern has marks, then the resulting linear network also carries these marks in the \verb!$lines! component. \item \texttt{as.owin}: Now refuses to convert a \code{box3} to a two-dimensional window. \item \texttt{as.owin.data.frame}: New argument \texttt{step} \item \texttt{as.polygonal}: \begin{itemize} \item Can now repair errors in polygon data, if \texttt{repair=TRUE}. \item Accelerated when \texttt{w} is a pixel mask. \end{itemize} \item \texttt{as.solist}: The argument \texttt{x} can now be a spatial object; \texttt{as.solist(cells)} is the same as \texttt{solist(cells)}. %%B \item \texttt{bdist.pixels}: Accelerated for polygonal windows. New argument \texttt{method}. \item \texttt{bind.fv}: New argument \texttt{clip}. \item \texttt{bw.ppl}: New arguments \texttt{weights} and \texttt{sigma}. \item \texttt{bw.diggle}, \texttt{bw.ppl}, \texttt{bw.relrisk}, \texttt{bw.smoothppp}, These functions now extract and store the name of the unit of length from the point pattern dataset. When the bandwidth selection criterion is plotted, the name of the unit of length is shown on the x-axis. %%C \item \texttt{cdf.test}: \begin{itemize} \item Calculations are more robust against numerical rounding effects. \item The methods for classes \texttt{ppp}, \texttt{ppm}, \texttt{lpp}, \texttt{lppm}, \texttt{slrm} have a new argument \texttt{interpolate}. \end{itemize} \item \texttt{cdf.test.mppm}: \begin{itemize} \item Now handles Gibbs models. \item Now recognises \texttt{covariate="x"} or \texttt{"y"}. \end{itemize} \item \texttt{clarkevans}: The argument \texttt{correction="all"} is now recognised: it selects all the available options. [This is also the default.] \item \texttt{clickpoly}: The polygon is now drawn progressively as the user clicks new vertices. \item \texttt{closepairs.ppp}, \texttt{closepairs.pp3}: \begin{itemize} \item New arguments \texttt{distinct} and \texttt{neat} allow more options. \item Argument \texttt{ordered} has been replaced by \texttt{twice} (but \texttt{ordered} is still accepted, with a warning). \item Performance improved (computation time and memory requirements reduced.) This should improve the performance of many functions in \texttt{spatstat}. \end{itemize} \item \texttt{clusterset}: Improved behaviour. \item \texttt{clusterfit}: New argument \texttt{algorithm} specifies the choice of optimisation algorithm. \item \texttt{collapse.fv}: This is now treated as a method for the \texttt{nlme} generic \texttt{collapse}. Its syntax has been adjusted slightly. \item \texttt{connected.im}: Now handles a logical-valued image properly. Arguments \texttt{...} now determine pixel resolution. \item \texttt{connected.owin}: Arguments \texttt{...} now determine pixel resolution. \item \texttt{contour.im}: New argument \texttt{col} specifies the colour of the contour lines. If \texttt{col} is a colour map, then the contours are drawn in different colours. \item \texttt{crossing.psp}: New argument \texttt{details} gives more information about the intersections between the segments. \item \texttt{cut.ppp}: Argument \texttt{z} can be \texttt{"x"} or \texttt{"y"} indicating one of the spatial coordinates. %%D \item \texttt{dclf.test, mad.test, dclf.progress, mad.progress,} \texttt{dclf.sigtrace, mad.sigtrace}, \texttt{dg.progress, dg.sigtrace}: \begin{itemize} \item New argument \texttt{clamp} determines the test statistic for one-sided tests. \item New argument \texttt{rmin} determines the left endpoint of the test interval. \item New argument \texttt{leaveout} specifies how to calculate discrepancy between observed and simulated function values. \item New argument \texttt{scale} allows summary function values to be rescaled before the comparison is performed. \item New argument \texttt{interpolate} supports interpolation of $p$-value. \item New argument \texttt{interpolate} supports interpolation of critical value of test. \end{itemize} \item \texttt{default.rmhcontrol, default.rmhexpand}: New argument \texttt{w}. \item \texttt{density.lpp}: \begin{itemize} \item New fast algorithm (up to 1000 times faster) for the default case where \texttt{kernel="gaussian"} and \texttt{continuous=TRUE}. Generously contributed by Greg McSwiggan. \item Fast algorithm has been further accelerated. \item New argument \texttt{kernel} specifies the smoothing kernel. Any of the standard one-dimensional smoothing kernels can be used. \item Now supports both the `equal-split continuous' and `equal-split discontinuous' smoothers. New argument \texttt{continuous} determines the choice of smoother. \item New arguments \texttt{weights} and \texttt{old}. \end{itemize} \item \texttt{density.ppp}: \begin{itemize} \item A non-Gaussian kernel can now be specified using the argument \texttt{kernel}. \item Argument \texttt{weights} can now be a pixel image. \item Accelerated by about 30\% when \texttt{at="pixels"}. \item Accelerated by about 15\% in the case where \texttt{at="points"} and \texttt{kernel="gaussian"}. \item Accelerated in the cases where weights are given or \texttt{diggle=TRUE}. \item New argument \texttt{verbose}. \end{itemize} \item \texttt{density.psp}: \begin{itemize} \item New argument \texttt{method}. \item Accelerated by 1 to 2 orders of magnitude. \end{itemize} \item \texttt{dfbetas.ppm}: For Gibbs models, memory usage has been dramatically reduced, so the code can handle larger datasets and finer quadrature schemes. \item \texttt{diagnose.ppm}: Infinite values of \texttt{rbord} are now ignored and treated as zero. This ensures that \texttt{diagnose.ppm} has a sensible default when the fitted model has infinite reach. \item \texttt{diagnose.ppm, plot.diagppm}: New arguments \texttt{col.neg, col.smooth} control the colour maps. \item \texttt{dilation.ppp}: Improved geometrical accuracy. Now accepts arguments to control resolution of polygonal approximation. \item \texttt{discs}: \begin{itemize} \item Now accepts a single numeric value for \texttt{radii}. \item New argument \texttt{npoly}. \item Accelerated in some cases. \end{itemize} \item \texttt{distfun}: When the user calls a distance function that was created by \texttt{distfun}, the user may now give a \texttt{ppp} or \texttt{lpp} object for the argument \texttt{x}, instead of giving two coordinate vectors \texttt{x} and \texttt{y}. %%E \item \texttt{edge.Trans}: New argument \texttt{gW} for efficiency. \item \texttt{effectfun}: Now works for \texttt{ppm}, \texttt{kppm}, \texttt{lppm}, \texttt{dppm}, \texttt{rppm} and \texttt{profilepl} objects. \item \texttt{envelope}: \begin{itemize} \item New argument \texttt{clamp} gives greater control over one-sided envelopes. \item New argument \texttt{funargs} \item New argument \texttt{scale} allows global envelopes to have width proportional to a specified function of $r$, rather than constant width. \item New argument \texttt{funYargs} contains arguments to the summary function when applied to the data pattern only. \end{itemize} \item \texttt{envelope.lpp}, \texttt{envelope.lppm}: New arguments \texttt{fix.n} and \texttt{fix.marks} allow envelopes to be computed using simulations conditional on the observed number of points. \item \texttt{ewcdf}: Argument \texttt{weights} can now be \texttt{NULL}. %%F \item \texttt{Fest}: Additional checks for errors in input data. \item \texttt{fitted.lppm}: New argument \texttt{leaveoneout} allows leave-one-out computation of fitted value. \item \texttt{fitted.ppm}: New option, \texttt{type="link"}. \item \texttt{funxy}: When the user calls a function that was created by \texttt{funxy}, the user may now give a \texttt{ppp} or \texttt{lpp} object for the argument \texttt{x}, instead of giving two coordinate vectors \texttt{x} and \texttt{y}. %%G \item \texttt{Geyer}: The saturation parameter \texttt{sat} can now be less than 1. \item \texttt{grow.rectangle}: New argument \texttt{fraction}. %%H \item \texttt{Hest}: \begin{itemize} \item Argument \texttt{X} can now be a pixel image with logical values. \item New argument \texttt{W}. [Based on code by Kassel Hingee.] \item Additional checks for errors in input data. \end{itemize} \item \texttt{hist.im}: New argument \texttt{xname}. %%I \item \texttt{identify.psp}: Improved placement of labels. Arguments can be passed to \texttt{text.default} to control the plotting of labels. \item \texttt{influence.ppm}: For Gibbs models, memory usage has been dramatically reduced, so the code can handle larger datasets and finer quadrature schemes. \item \texttt{integral.linfun}: New argument \texttt{delta} controls step length of approximation to integral. \item \texttt{intensity.ppm}: Intensity approximation is now implemented for area-interaction model, and Geyer saturation model. \item \texttt{ippm}: \begin{itemize} \item Accelerated. \item The internal format of the result has been extended slightly. \item Improved defaults for numerical algorithm parameters. \end{itemize} %%J %%K \item \texttt{Kcross.inhom}, \texttt{Kdot.inhom}, \texttt{Kmulti.inhom}: These functions now allow intensity values to be given by a fitted point process model. New arguments \texttt{update}, \texttt{leaveoneout}, \texttt{lambdaX}. \item \texttt{Kest} Accelerated computation (for translation and rigid corrections) when window is an irregular shape. \item \texttt{Kest.fft}: Now has \verb!...! arguments allowing control of spatial resolution. \item \texttt{Kinhom}: \begin{itemize} \item New argument \texttt{ratio}. \item Stops gracefully if \texttt{lambda} contains any zero values. \end{itemize} \item \texttt{kppm}: \begin{itemize} \item Fitting a model with \texttt{clusters="LGCP"} no longer requires the package \pkg{RandomFields} to be loaded explicitly. \item New argument \texttt{algorithm} specifies the choice of optimisation algorithm. \item Left hand side of formula can now involve entries in the list \texttt{data}. \item refuses to fit a log-Gaussian Cox model with anisotropic covariance. \item A warning about infinite values of the summary function no longer occurs when the default settings are used. Also affects \texttt{mincontrast}, \texttt{cauchy.estpcf}, \texttt{lgcp.estpcf}, \texttt{matclust.estpcf}, \texttt{thomas.estpcf}, \texttt{vargamma.estpcf}. \item Improved printed output. \end{itemize} %%L \item \texttt{Lcross.inhom}, \texttt{Ldot.inhom}: These functions now allow intensity values to be given by a fitted point process model. New arguments \texttt{update}, \texttt{leaveoneout}, \texttt{lambdaX}. \item \texttt{lengths.psp}: New argument \texttt{squared}. \item \texttt{leverage.ppm}: For Gibbs models, memory usage has been dramatically reduced, so the code can handle larger datasets and finer quadrature schemes. \item \texttt{leverage.ppm}, \texttt{influence.ppm}, \texttt{dfbetas.ppm}: These methods now work for models that were fitted by logistic composite likelihood (\texttt{method='logi'}). \item \texttt{linearK}, \texttt{linearpcf} and relatives: \\ \begin{itemize} \item substantially accelerated. \item ratio calculations are now supported. \item new argument \texttt{ratio}. \end{itemize} \item \texttt{linearKinhom}: new argument \texttt{normpower}. \item \texttt{linearKinhom}, \texttt{linearpcfinhom}: \begin{itemize} \item Changed behaviour when \texttt{lambda} is a fitted model. \item New arguments \texttt{update} and \texttt{leaveoneout}. \end{itemize} \item \texttt{linearpcf}: new argument \texttt{normpower}. \item \texttt{linim}: \begin{itemize} \item The image \texttt{Z} is now automatically restricted to the network. \item New argument \texttt{restrict}. \end{itemize} \item \texttt{linnet}: \begin{itemize} \item The internal format of a \texttt{linnet} (linear network) object has been changed. Existing datasets of class \texttt{linnet} are still supported. However, computation will be faster if they are converted to the new format. To convert a linnet object \texttt{L} to the new format, use \verb!L <- as.linnet(L)!. \item If the argument \texttt{edges} is given, then this argument now determines the ordering of the sequence of line segments. For example, the \texttt{i}-th row of \texttt{edges} specifies the \texttt{i}-th line segment in \texttt{as.psp(L)}. \item New argument \texttt{warn}. \end{itemize} \item \texttt{lintess}: Argument \texttt{df} can be missing or \texttt{NULL}, resulting in a tesellation with only one tile. \item \texttt{logLik.ppm}: \begin{itemize} \item New argument \texttt{absolute}. \item The warning about pseudolikelihood (`log likelihood not available') is given only once, and is not repeated in subsequent calls, within a spatstat session. \end{itemize} \item \texttt{logLik.mppm}: new argument \texttt{warn}. \item \texttt{lohboot}: Algorithm has been corrected and extended thanks to Christophe Biscio and Rasmus Waagepetersen. New arguments \texttt{block}, \texttt{basicboot}, \texttt{Vcorrection}. \item \texttt{lpp}: \begin{itemize} \item The internal format of an \texttt{lpp} object has been changed. Existing datasets of class \texttt{lpp} are still supported. However, computation will be faster if they are converted to the new format. To convert an \texttt{lpp} object \texttt{X} to the new format, use \verb!X <- as.lpp(X)!. \item \texttt{X} can be missing or \texttt{NULL}, resulting in an empty point pattern. \end{itemize} \item \texttt{lpp}, \texttt{as.lpp}: These functions now handle the case where coordinates \texttt{seg} and \texttt{tp} are given but \texttt{x} and \texttt{y} are missing. \item \texttt{lppm}: \begin{itemize} \item New argument \texttt{random} controls placement of dummy points. \item Computation accelerated. \end{itemize} %%M \item \texttt{markcorr}: New argument \texttt{weights} allows computation of the weighted version of the mark correlation function. \item \texttt{mppm}: \begin{itemize} \item Now handles models with a random effect component. (This is covered in \cite[Chap.\ 16]{baddrubaturn15}.) \item New argument \texttt{random} is a formula specifying the random effect. (This is covered in \cite[Chap.\ 16]{baddrubaturn15}.) \item Performs more checks for consistency of the input data. \item New arguments \texttt{gcontrol} and \texttt{reltol.pql} control the fitting algorithm. \end{itemize} %%N \item \texttt{nbfires}: \begin{itemize} \item the unit of length for the coordinates is now specified in this dataset. \item This dataset now includes information about the different land and sea borders of New Brunswick. \end{itemize} \item \texttt{nndist.lpp, nnwhich.lpp, nncross.lpp, distfun.lpp}: New argument \texttt{k} allows computation of $k$-th nearest point. Computation accelerated. \texttt{nnfun.lpp}: New argument \texttt{k}. %%O %%P \item \texttt{padimage}: New argument \texttt{W} allows an image to be padded out to fill any window. \item \texttt{pcf.ppp}: \begin{itemize} \item New argument \code{close} for advanced use. \item New argument \texttt{ratio} allows several estimates of pcf to be pooled. \item Now calculates an analytic approximation to the variance of the estimate of the pair correlation function (when \texttt{var.approx=TRUE}). \item Now returns the smoothing bandwidth used, as an attribute of the result. \item New argument \texttt{close} for advanced use. \end{itemize} \item \texttt{pcfinhom}: \begin{itemize} \item New argument \code{close} for advanced use. \item Default behaviour is changed when \texttt{lambda} is a fitted model. The default is now to re-fit the model to the data before computing pcf. New arguments \texttt{update} and \texttt{leaveoneout} control this. \item New argument \texttt{close} for advanced use. \end{itemize} \item \texttt{pixellate.ppp}: \begin{itemize} \item If the pattern is empty, the result is an integer-valued image (by default) for consistency with the results for non-empty patterns. \item Accelerated in the case where weights are given. \item New arguments \texttt{fractional} and \texttt{preserve} for more accurate discretisation. \end{itemize} \item \texttt{plot.anylist}: \begin{itemize} \item If a list entry \verb!x[[i]]! belongs to class \texttt{"anylist"}, it will be expanded so that each entry \verb!x[[i]][[j]]! will be plotted as a separate panel. \item New arguments \texttt{panel.begin.args}, \texttt{panel.end.args} \item Result is now an (invisible) list containing the result from executing the plot of each panel. \end{itemize} \item \texttt{plot.im}: \begin{itemize} \item Now handles complex-valued images. \item New argument \texttt{workaround} to avoid a bug in some MacOS device drivers that causes the image to be displayed in the wrong spatial orientation. \item The number of tick marks in the colour ribbon can now be controlled using the argument \texttt{nint} in \texttt{ribargs}. \end{itemize} \item \texttt{plot.imlist}: Result is now an (invisible) list containing the result from executing the plot of each panel. \item \texttt{plot.influence.ppm}: New argument \texttt{multiplot}. \item \texttt{plot.kppm}: \begin{itemize} \item New arguments \texttt{pause} and \texttt{xname}. \item The argument \texttt{what="all"} is now recognised: it selects all the available options. [This is also the default.] \end{itemize} \item \texttt{plot.leverage.ppm}: \begin{itemize} \item New argument \texttt{multiplot}. \item A contour line showing the average value of leverage is now drawn on the colour ribbon, as well as on the main image. New argument \texttt{args.contour}. \end{itemize} \item \texttt{plot.linfun}: \begin{itemize} \item Now passes arguments to the function being plotted. \item A scale bar is now plotted when \texttt{style="width"}. \item New argument \texttt{legend}. \item The return value has a different format. \end{itemize} \item \texttt{plot.linim}: \begin{itemize} \item The return value has a different format. \item A scale bar is now plotted when \texttt{style="width"}. \item When \texttt{style="width"}, negative values are plotted in red (by default). New argument \texttt{negative.args} controls this. \item New argument \texttt{zlim} specifies the range of values to be mapped. \end{itemize} \item \texttt{plot.lintess}: Improved plot method, with more options. \item \texttt{plot.lpp}: \begin{itemize} \item New argument \texttt{show.network}. \item For a point pattern with continuous marks (``real numbers'') the colour arguments \texttt{cols}, \texttt{fg}, \texttt{bg} can now be vectors of colour values, and will be used to determine the default colour map for the marks. \end{itemize} \item \texttt{plot.mppm}: New argument \texttt{se}. \item \texttt{plot.msr}: \begin{itemize} \item Now handles multitype measures. \item New argument \texttt{multiplot}. \item New argument \texttt{massthresh}. \item New arguments \texttt{equal.markscale} and \texttt{equal.ribbon}. \end{itemize} \item \texttt{plot.pp3}: New arguments \texttt{box.front}, \texttt{box.back} control plotting of the box. \item \texttt{plot.ppp}: \begin{itemize} \item The default colour for the points is now a transparent grey, if this is supported by the plot device. \item For a point pattern with continuous marks (``real numbers'') the colour arguments \texttt{cols}, \texttt{fg}, \texttt{bg} can now be vectors of colour values, and will be used to determine the default colour map for the marks. \item Now recognises graphics parameters for text, such as \texttt{family} and \texttt{srt} \item When \texttt{clipwin} is given, any parts of the boundary of the window of \texttt{x} that lie inside \texttt{clipwin} will also be plotted. \end{itemize} \item \texttt{plot.profilepl} ,\texttt{plot.quadratcount}, \texttt{plot.quadrattest}, \texttt{plot.tess}: Now recognise graphics parameters for text, such as \texttt{family} and \texttt{srt} \item \texttt{plot.solist}: \begin{itemize} \item New arguments \texttt{panel.begin.args}, \texttt{panel.end.args} \item Result is now an (invisible) list containing the result from executing the plot of each panel. \end{itemize} \item \texttt{plot.symbolmap}: New argument \texttt{nsymbols} controls the number of symbols plotted. \item \code{ponderosa}: In this installed dataset, the function \code{ponderosa.extra\$plotit} has changed slightly (to accommodate the dependence on the package \pkg{spatstat.utils}). \item \texttt{polynom}: This function now has a help file. \item \texttt{pool.fv}: \begin{itemize} \item The default plot of the pooled function no longer includes the variance curves. \item New arguments \texttt{relabel} and \texttt{variance}. \end{itemize} \item \texttt{pool.rat}: New arguments \texttt{weights}, \texttt{relabel} and \texttt{variance}. \item \texttt{ppm}: \begin{itemize} \item Argument \code{interaction} can now be a function that makes an interaction, such as \code{Poisson}, \code{Hardcore}, \code{MultiHard}. \item Argument \texttt{subset} can now be a window (class \texttt{"owin"}) specifying the sub-region of data to which the model should be fitted. \end{itemize} \item \texttt{ppm.ppp, ppm.quad}: \begin{itemize} \item New argument \texttt{emend}, equivalent to \texttt{project}. \item New arguments \texttt{subset} and \texttt{clipwin}. \end{itemize} \item \texttt{ppp}: \begin{itemize} \item New argument \texttt{checkdup}. \item If the coordinate vectors \code{x} and \code{y} contain \code{NA}, \code{NaN} or infinite values, these points are deleted with a warning, instead of causing a fatal error. \end{itemize} \item \texttt{predict.kppm, residuals.kppm} Now issues a warning when the calculation ignores the cluster/Cox component and treats the model as if it were Poisson. (This currently happens in predict.kppm when se=TRUE or interval != "none", and in residuals.kppm when type != "raw"). \item \texttt{predict.lppm}: Argument \texttt{locations} can now be an \texttt{lpp} object. \item \texttt{predict.mppm}: The argument \texttt{type="all"} is now recognised: it selects all the available options. [This is also the default.] \item \texttt{predict.rhohat}: New argument \texttt{what} determines which value should be calculated: the function estimate, the upper/lower confidence limits, or the standard error. \item \texttt{print.linim}: More information is printed. \item \texttt{print.quad}: More information is printed. \item \texttt{progressreport} \begin{itemize} \item Behaviour improved. \item New arguments \texttt{state}, \texttt{tick}, \texttt{showtime}. \item New option: \verb!style="tk"! \end{itemize} %%Q \item \texttt{quadratcount.ppp}: Computation accelerated in some cases. \item \texttt{quadrat.test.ppm}: Computation accelerated in some cases. \item \texttt{quantile.ewcdf}: The function is now normalised to the range \verb![0,1]! before the quantiles are computed. This can be suppressed by setting \texttt{normalise=FALSE}. \item \texttt{qqplot.ppm} Argument \texttt{expr} can now be a list of point patterns, or an envelope object containing a list of point patterns. %%R \item \texttt{rcellnumber}: New argument \texttt{mu}. \item \texttt{rgbim, hsvim}: New argument \texttt{A} controls the alpha (transparency) channel. \item \texttt{rgb2hex, col2hex, paletteindex, is.colour, samecolour,} \texttt{complementarycolour, is.grey, to.grey} These colour tools now handle transparent colours. \item \texttt{rgb2hex}: New argument \texttt{maxColorValue} \texttt{rhohat}: New argument \texttt{subset} allows computation for a subset of the data. \texttt{rhohat.lpp}: New argument \texttt{random} controls placement of dummy points. \item \texttt{rLGCP}: This function no longer requires the package \pkg{RandomFields} to be loaded explicitly. \item \texttt{rMaternI, rMaternII}: These functions can now generate random patterns in three dimensions and higher dimensions, when the argument \texttt{win} is of class \texttt{box3} or \texttt{boxx}. \item \texttt{rmh}: Accelerated, in the case where multiple patterns are saved using \texttt{nsave}. \item \texttt{rmh.ppm, rmhmodel.ppm, simulate.ppm}: A model fitted using the \texttt{Penttinen} interaction can now be simulated. \item \texttt{rmh.default, rmhmodel.default}: \begin{itemize} \item These functions now recognise \verb!cif='penttinen'! for the Penttinen interaction. \item New arguments \texttt{nsim}, \texttt{saveinfo}. \end{itemize} \item \texttt{rmhcontrol}: New parameter \texttt{pstage} determines when to generate random proposal points. \item \texttt{rose.default} New argument \texttt{weights}. \item \texttt{rose} New arguments \texttt{start} and \texttt{clockwise} specify the convention for measuring and plotting angles. \item \texttt{rotmean}: New argument \texttt{padzero}. Default behaviour has changed. \item \texttt{rpoispp}: Accelerated, when \texttt{lambda} is a pixel image. \item \texttt{rpoisppx}: New argument \code{drop}. \item \texttt{rpoisline}: Also returns information about the original infinite random lines. \item \texttt{rStrauss, rHardcore, rStraussHard, rDiggleGratton, rDGS, rPenttinen:} New argument \texttt{drop}. \item \texttt{rthin} \begin{itemize} \item Accelerated, when \texttt{P} is a single number. \item \texttt{X} can now be a point pattern on a linear network (class \texttt{lpp}). \end{itemize} \item \texttt{rThomas, rMatClust, rCauchy, rVarGamma}: \begin{itemize} \item When the model is approximately Poisson, it is simulated using rpoispp. This avoids computations which would require huge amounts of memory. New argument \texttt{poisthresh} controls this behaviour. \item New argument \texttt{saveparents}. \end{itemize} \item \texttt{runifpointx}: New argument \code{drop}. %%S \item Simulation: Several basic simulation algorithms have been accelerated. Consequently, simulation outcomes are not identical to those obtained with previous versions of \spst, even when the same random seed is used. To ensure compatibility with previous versions of spatstat, revert to the slower code by setting \texttt{spatstat.options(fastthin=FALSE, fastpois=FALSE)}. \item \code{shapley}: In this installed dataset, the function \code{shapley.extra\$plotit} has changed slightly (to accommodate the dependence on the package \pkg{spatstat.utils}). \item \texttt{simulate.ppm} New argument \texttt{w} controls the window of the simulated patterns. New argument \texttt{verbose}. \item \texttt{Smooth.ppp}: \begin{itemize} \item A non-Gaussian kernel can now be specified using the argument \texttt{kernel}. \item Argument \texttt{weights} can now be a pixel image. \item Accelerated by about 30\% in the case where \texttt{at="pixels"}. \item Accelerated by about 15\% in the case where \texttt{at="points"} and \texttt{kernel="gaussian"}. \item Now exits gracefully if any mark values are \texttt{NA}, \texttt{NaN} or \texttt{Inf}. \item New argument \texttt{geometric} supports geometric-mean smoothing. \end{itemize} \item \texttt{spatstat.options} New options \texttt{fastthin} and \texttt{fastpois} enable fast simulation algorithms. Set these options to \texttt{FALSE} to reproduce results obtained with previous versions of \spst. \item \texttt{split.ppp} The splitting variable \texttt{f} can now be a logical vector. \item \texttt{square}: Handles a common error in the format of the arguments. \item \texttt{step}: now works for models of class \texttt{"mppm"}. \item \texttt{stieltjes}: Argument \texttt{M} can be a stepfun object (such as an empirical CDF). \item \texttt{subset.ppp}, \texttt{subset.lpp}, \texttt{subset.pp3}, \texttt{subset.ppx}: The argument \texttt{subset} can now be any argument acceptable to the \verb!"["! method. \item summary functions The argument \texttt{correction="all"} is now recognised: it selects all the available options. \begin{quote} This applies to \texttt{Fest}, \texttt{F3est}, \texttt{Gest}, \texttt{Gcross}, \texttt{Gdot}, \texttt{Gmulti}, \texttt{G3est}, \texttt{Gfox}, \texttt{Gcom}, \texttt{Gres}, \texttt{Hest}, \texttt{Jest}, \texttt{Jmulti}, \texttt{Jcross}, \texttt{Jdot}, \texttt{Jfox}, \texttt{Kest}, \texttt{Kinhom}, \texttt{Kmulti}, \texttt{Kcross}, \texttt{Kdot}, \texttt{Kcom}, \texttt{Kres}, \texttt{Kmulti.inhom}, \texttt{Kcross.inhom}, \texttt{Kdot.inhom}, \texttt{Kscaled}, \texttt{Ksector}, \texttt{Kmark}, \texttt{K3est}, \texttt{Lscaled}, \texttt{markcorr}, \texttt{markcrosscorr}, \texttt{nnorient}, \texttt{pairorient}, \texttt{pcfinhom}, \texttt{pcfcross.inhom}, \texttt{pcfcross}, \texttt{pcf}, \texttt{Tstat}. \end{quote} \item \texttt{Summary.linim}: Recognises the argument \texttt{finite} so that \texttt{range(x, finite=TRUE)} works for a linim object \texttt{x}. \item \texttt{summary.ppm}: New argument \texttt{fine} selects the algorithm for variance estimation. \item \texttt{summary.owin}, \texttt{summary.im}: The fraction of frame area that is occupied by the window/image is now reported. \item \texttt{sumouter}: New argument \texttt{y} allows computation of asymmetric outer products. \item \texttt{symbolmap}: \begin{itemize} \item Now accepts a vector of colour values for the arguments \texttt{col}, \texttt{cols}, \texttt{fg}, \texttt{bg} if the argument \texttt{range} is given. \item New option: \texttt{shape="arrows"}. \end{itemize} %%T \item \texttt{tess}: Argument \texttt{window} is ignored when xgrid, ygrid are given. \item \texttt{texturemap}: Argument \texttt{textures} can be missing or NULL. \item \texttt{textureplot}: Argument \texttt{x} can now be something acceptable to \texttt{as.im}. \item \texttt{to.grey} New argument \texttt{transparent}. %%U \item \texttt{union.owin}: Improved behaviour when there are more than 2 windows. \item \texttt{update}: now works for models of class \texttt{"mppm"}. \item \texttt{update.kppm}: \begin{itemize} \item New argument \texttt{evaluate}. \item Now handles additional arguments in any order, with or without names. \item Changed arguments. \item Improved behaviour. \end{itemize} \item \texttt{update.ppm}: For the case \texttt{update(model, X)} where \texttt{X} is a point pattern, if the window of \texttt{X} is different from the original window, then the model is re-fitted from scratch (i.e. \texttt{use.internal=FALSE}). %%V \item \texttt{valid.ppm} This is now a method for the generic function \texttt{valid}. \item \texttt{vcov.mppm}: Now handles models with Gibbs interactions. \item \texttt{vcov.ppm}: Performance slightly improved, for Gibbs models. %%W %%X %%Y %%Z \item \verb![<-.im! Accepts an array for \texttt{value}. \item \verb![.im! The subset index \texttt{i} can now be a linear network. Then the result of \verb!x[i, drop=FALSE]! is a pixel image of class \texttt{linim}. \item \verb![.layered!: \begin{itemize} \item Subset index \texttt{i} can now be an \texttt{owin} object. \item Additional arguments \verb!...! are now passed to other methods. \end{itemize} \item \verb![.leverage.ppm!: New argument \texttt{update}. \item \verb![.linnet!: \begin{itemize} \item New argument \texttt{snip} determines what to do with segments of the network that cross the boundary of the window. Default behaviour has changed. \item More robust against artefacts when the subset index is a pixel mask. \end{itemize} \item \verb![.linim!: More robust against artefacts. \item \verb![.lpp!: New argument \texttt{snip} determines what to do with segments of the network that cross the boundary of the window. Default behaviour has changed. \item \verb![.ppx!: The subset index \texttt{i} may now be a spatial domain of class \texttt{boxx} or \texttt{box3}. \item \verb![.ppp! New argument \texttt{clip} determines whether the window is clipped. \item \verb![.ppp! The previously-unused argument \texttt{drop} now determines whether to remove unused levels of a factor. \item \verb![.pp3!, \verb![.lpp!, \verb![.ppx!, \texttt{subset.ppp, subset.pp3, subset.lpp, subset.ppx}: These methods now have an argument \texttt{drop} which determines whether to remove unused levels of a factor. \item \verb![.psp!: New argument \texttt{fragments} specifies whether to keep fragments of line segments that are cut by the new window, or only to retain segments that lie entirely inside the window. \item \verb![.solist!: Subset index \texttt{i} can now be an \texttt{owin} object. \end{itemize} \section{Serious Bugs Fixed} <>= nbugs <- nrow(news(grepl("^BUG", Category), package="spatstat")) nbugssince <- nrow(news(Version > "1.42-0" & grepl("^BUG", Category), package="spatstat")) @ Hundreds of bugs have been detected and fixed in \spst. Bugs that may have affected the user are listed in the package \texttt{NEWS} file. To read all these bug reports, type <>= news(grepl("^BUG", Category), package="spatstat") @ which currently produces a list of \Sexpr{nbugs} bugs, of which \Sexpr{nbugssince} were detected after publication of the book \cite{baddrubaturn15}. Following is a list of the {\bf most serious bugs} only, in order of potential impact. \newcommand\bugger[4]{% \\ {} % {\small (Bug introduced in \texttt{spatstat {#1}}, {#2}; % fixed in \texttt{spatstat {#3}}, {#4})}% } \begin{itemize} %% LEVEL 1: always completely wrong, broad impact \item \texttt{nncross.ppp}: Results were completely incorrect if $k > 1$. \bugger{1.31-2}{april 2013}{1.35-0}{december 2013} \item \texttt{nncross.pp3}: Results were completely incorrect in some cases. \bugger{1.32-0}{august 2013}{1.34-0}{october 2013} \item \texttt{cdf.test.ppm}: Calculation of $p$-values was incorrect for Gibbs models: $1-p$ was computed instead of $p$. \bugger{1.40-0}{december 2014}{1.45-2}{may 2016} \item \texttt{Smooth.ppp}: Results of \verb!Smooth(X, at="points", leaveoneout=FALSE)! were completely incorrect. \bugger{1.20-5}{august 2010}{1.46-0}{july 2016} \item \texttt{rmh}: \begin{itemize} \item Simulation was completely incorrect in the case of a multitype point process with an interaction that does not depend on the marks, such as \verb!ppm(betacells, ~marks, Strauss(60))! due to a coding error in the \texttt{C} interface. \bugger{1.22-3}{march 2010}{1.22-3}{june 2011} \item Simulation of the Area-Interaction model was completely incorrect. \bugger{1.23-6}{october 2011}{1.31-0}{january 2013} \item Simulation of the Geyer saturation process was completely incorrect. \bugger{1.31-0}{january 2013}{1.31-1}{march 2013} \item Simulation of the Strauss-Hard Core process was partially incorrect, giving point patterns with a slightly lower intensity. \bugger{1.31-0}{january 2013}{1.37-0}{may 2014} \item The result of simulating a model with a hard core did not necessarily respect the hard core constraint, and simulation of a model with strong inhibition did not necessarily converge. This only happened if the first order trend was large, the starting state (\texttt{n.start} or \texttt{x.start}) was not given, and the number of iterations \texttt{nrep} was not very large. It occurred because of a poor choice for the default starting state. {\small (Bug was present since about 2010. Fixed in \texttt{spatstat 1.40-0}, december 2014)} \item Simulation was incorrect in the case of an inhomogeneous multitype model with \texttt{fixall=TRUE} (i.e.\ with a fixed number of points of each type) if the model was segregated (i.e.\ if different types of points had different first order trend). The effect of the error was that all types of points had the same first order trend. {\small (Bug was present since about 2010. Fixed in \texttt{spatstat 1.43-0}, september 2015)} \item Simulation of the Geyer saturation process was incorrectly initialised, so that the results of a short run (i.e. small value of \texttt{nrep}) were incorrect, while long runs were correct. \bugger{1.17-0}{october 2009}{1.31-1}{march 2013} \end{itemize} \item \texttt{rVarGamma}: Simulations were incorrect; they were generated using the wrong value of the parameter \texttt{nu.ker}. \bugger{1.25-0}{december 2011}{1.35-0}{december 2013} \item \texttt{rCauchy}: Simulations were incorrect; they were generated using the wrong value of the parameter \texttt{omega}. \bugger{1.25-0}{december 2011}{1.25-2}{january 2012} \item \texttt{lppm}: For multitype patterns, the fitted model was completely incorrect due to an error in constructing the quadrature scheme. \bugger{1.23-0}{july 2011}{1.30-0}{december 2012} \item \verb![.lpp!: The local coordinate \texttt{seg} was completely incorrect, when \texttt{i} was a window. \bugger{1.31-2}{april 2013}{1.45-0}{march 2016} \item \texttt{lohboot}: Implementation was completely incorrect. \bugger{1.26-1}{april 2012}{1.53-2}{october 2017} \item \texttt{leverage.ppm}, \texttt{influence.ppm}, \texttt{dfbetas.ppm}: Results were incorrect for non-Poisson processes due to a mathematical error. \bugger{1.25-0}{december 2011}{1.51-0}{may 2017} %% LEVEL 2: often completely wrong, moderate impact \item \texttt{bw.pcf}: Results were totally incorrect due to a typo. \bugger{1.51-0}{may 2017}{1.52-0}{august 2017} \item \texttt{predict.rho2hat}: Results were incorrect for a \texttt{rho2hat} object computed from a point pattern. \bugger{1.42-0}{may 2015}{1.52-0}{august 2017} \item \texttt{envelope.ppm}: If the model was an inhomogeneous Poisson process, the resulting envelope object was incorrect (the simulations were correct, but the envelopes were calculated assuming the model was CSR). \bugger{1.23-5}{september 2011}{1.23-6}{october 2011} \item \texttt{linearK}, \texttt{linearpcf}, \texttt{linearKinhom}, \texttt{linearpcfinhom} and multitype versions: These functions were sometimes greatly underestimated when the network had segments shorter than 10 coordinate units. \bugger{1.44-0}{december 2015}{1.46-2}{july 2016} \item \texttt{nncross}, \texttt{distfun}, \texttt{AreaInter}: Results of \texttt{nncross} were possibly incorrect when \code{X} and \code{Y} did not have the same window. This bug affected values of \texttt{distfun} and may also have affected ppm objects with interaction \texttt{AreaInter}. \bugger{1.9-4}{june 2006}{1.25-2}{january 2012} \item \texttt{update.kppm}: If the call to \texttt{update} did not include a formula argument or a point pattern argument, then all arguments were ignored. Example: \texttt{update(fit, improve.type="quasi")} was identical to \texttt{fit}. \bugger{1.42-2}{june 2015}{1.45-0}{march 2016} \item \texttt{markcorrint}: Results were completely incorrect. \bugger{1.39-0}{october 2014}{1.40-0}{december 2014} %% LEVEL 3: substantially incorrect, moderate impact \item \texttt{density.ppp}: Values of \verb!density(X, at="points")! and \verb!Smooth(X, at="points")! were sometimes incorrect, due to omission of the contribution from the data point with the smallest $x$ coordinate. \bugger{1.26-0}{april 2012}{1.46-1}{july 2016} \item \texttt{update.ppm}: If the argument \texttt{Q} was given, the results were usually incorrect, or an error was generated. \bugger{1.38-0}{august 2014}{1.38-1}{august 2014} \item \texttt{subfits}: The interaction coefficients of the submodels were incorrect for Gibbs models with a multitype interaction (\texttt{MultiStrauss}, etc). \bugger{1.35-0}{december 2013}{1.45-2}{may 2016} \item \texttt{F3est}: Estimates of $F(r)$ for the largest value of $r$ were wildly incorrect. {\small (Bug was present since about 2010. Fixed in \texttt{spatstat 1.48-0}, december 2016)} \item \texttt{kppm}, \texttt{matclust.estpcf}, \texttt{pcfmodel}: The pair correlation function of the M\'atern Cluster Process was evaluated incorrectly at distances close to 0. This could have affected the fitted parameters in \texttt{matclust.estpcf()} or \texttt{kppm(clusters="MatClust")}. \bugger{1.20-2}{august 2010}{1.33-0}{september 2013} \item \texttt{ppm}: Results were incorrect for the Geyer saturation model with a non-integer value of the saturation parameter \texttt{sat}. \bugger{1.20-0}{july 2010}{1.31-2}{april 2013} \item \texttt{clip.infline}: Results were incorrect unless the midpoint of the window was the coordinate origin. \bugger{1.15-1}{april 2009}{1.48-0}{december 2016} \item \texttt{intensity.ppm}: Result was incorrect for Gibbs models if the model was exactly equivalent to a Poisson process (i.e. if all interaction coefficients were exactly zero). \bugger{1.28-1}{june 2012}{1.47-0}{october 2016} \item \texttt{funxy}: Did not correctly handle one-line functions. The resulting objects evaluated the wrong function in some cases. \bugger{1.45-0}{march 2016}{1.46-0}{july 2016} %% LEVEL 4: partially incorrect \item \texttt{density.ppp}: If the smoothing bandwidth \texttt{sigma} was very small (e.g.\ less than the width of a pixel), results were inaccurate if the default resolution was used, and completely incorrect if a user-specified resolution was given. \bugger{1.26-0}{april 2012}{1.52-0}{august 2017} \item \texttt{selfcrossing.psp}: $y$ coordinate values were incorrect. \bugger{1.23-2}{august 2011}{1.25-3}{february 2012} \item \texttt{Geyer}: For point process models with the \texttt{Geyer} interaction, \texttt{vcov.ppm} and \texttt{suffstat} sometimes gave incorrect answers. \bugger{1.27-0}{may 2012}{1.30-0}{december 2012} \item \texttt{leverage.ppm}, \texttt{influence.ppm}, \texttt{dfbetas.ppm}: Calculations were incorrect for a Geyer model fitted using an edge correction other than \texttt{"border"} or \texttt{"none"}. \bugger{1.25-0}{december 2011}{1.51-0}{may 2017} \item \texttt{vcov.ppm}, \texttt{suffstat}: These functions sometimes gave incorrect values for marked point process models. \bugger{1.27-0}{may 2012}{1.29-0}{october 2012} \item \texttt{diagnose.ppm}: When applied to a model obtained from \texttt{subfits()}, in the default case (\texttt{oldstyle=FALSE}) the variance calculations were incorrect. Consequently the dotted lines representing significance bands were incorrect. An error or warning about negative variances occurred sometimes. However, calculations with \texttt{oldstyle=TRUE} were correct. The default has now been changed to \texttt{oldstyle=TRUE} for such models. \bugger{1.35-0}{december 2013}{1.45-0}{march 2016} \item \texttt{Smooth.ppp}: Results for \verb!at="points"! were garbled, for some values of \texttt{sigma}, if \texttt{X} had more than one column of marks. \bugger{1.38-0}{october 2014}{1.46-0}{july 2016} \item \texttt{linearK}, \texttt{linearKinhom}: If any data points were located exactly at a vertex of the linear network, the weights for Ang's correction were incorrect, due to numerical error. This sometimes produced infinite or NA values of the linear $K$ function. \bugger{1.23-0}{july 2011}{1.27-0}{may 2012} \item \texttt{Kinhom}, \texttt{Linhom}: the results were not renormalised (even if \texttt{renormalise=TRUE}) in some cases. \bugger{1.21-0}{december 2010}{1.37-0}{may 2014} \item \texttt{Kinhom}, \texttt{Linhom}: Ignored argument \texttt{reciplambda2} in some cases. \bugger{1.39-0}{october 2014}{1.40-0}{december 2014} \item \texttt{Kinhom}, \texttt{Linhom}: Calculations were incorrect if \texttt{lambda} was a fitted point process model. \bugger{1.38-0}{august 2014}{1.38-1}{august 2014} \item \texttt{integral.linim}, \texttt{integral.linfun}: \begin{itemize} \item results were inaccurate because of a bias in the distribution of sample points. \bugger{1.41-0}{february 2015}{1.47-0}{october 2016} \item results were inaccurate if many of the segment lengths were shorter than the width of a pixel. \bugger{1.41-0}{february 2015}{1.48-0}{december 2016} \end{itemize} \item \texttt{predict.ppm}: Calculation of the conditional intensity omitted the edge correction if \texttt{correction='translate'} or \texttt{correction='periodic'}. \bugger{1.17-0}{october 2009}{1.31-3}{may 2013} \item \texttt{varblock}: Calculations were incorrect if more than one column of edge corrections was computed. \bugger{1.21-1}{november 2010}{1.39-0}{october 2014} \item \texttt{scan.test} Results were sometimes incorrect due to numerical instability (a 'Gibbs phenomenon'). \bugger{1.24-1}{october 2011}{1.26-1}{april 2012} \item \texttt{relrisk}: When \verb!at="pixels"!, a small fraction of pixel values were sometimes wildly inaccurate, due to numerical errors. This affected the range of values in the result, and therefore the appearance of plots. {\small (Bug fixed in \texttt{spatstat 1.40-0}, december 2014)} \item \texttt{predict.slrm}: Results of \texttt{predict(object, newdata)} were incorrect if the spatial domain of \texttt{newdata} was larger than the original domain. \bugger{1.21-0}{november 2010}{1.25-3}{february 2012} \item \texttt{Lest}: The variance approximations (Lotwick-Silverman and Ripley) obtained with \texttt{var.approx=TRUE} were incorrect for \texttt{Lest} (although they were correct for \texttt{Kest}) due to a coding error. \bugger{1.24-1}{october 2011}{1.24-2}{november 2011} \item \texttt{bw.diggle}: Bandwidth was too large by a factor of 2. \bugger{1.23-4}{september 2011}{1.23-5}{september 2011} \item pair correlation functions (\texttt{pcf.ppp}, \texttt{pcfdot}, \texttt{pcfcross} etc:) The result had a negative bias at the maximum $r$ value, because contributions to the pcf estimate from interpoint distances greater than \texttt{max(r)} were mistakenly omitted. {\small (Bugs fixed in \texttt{spatstat 1.35-0}, december 2013)} \item \texttt{Kest}, \texttt{Lest}: Gave incorrect values in very large datasets, due to numerical overflow. `Very large' typically means about 1 million points in a random pattern, or 100,000 points in a tightly clustered pattern. [Overflow cannot occur unless there are at least 46,341 points.] \item \texttt{bw.relrisk}: Implementation of \texttt{method="weightedleastsquares"} was incorrect and was equivalent to \texttt{method="leastsquares"}. \bugger{1.21-0}{november 2010}{1.23-4}{september 2011} \item \texttt{triangulate.owin}: Results were incorrect in some special cases. \bugger{1.42-2}{june 2015}{1.44-0}{december 2015} \item \texttt{crosspairs}: If \texttt{X} and \texttt{Y} were identical point patterns, the result was not necessarily symmetric (on some machines) due to numerical artifacts. \bugger{1.35-0}{december 2013}{1.44-0}{december 2015} \item \texttt{bdist.tiles}: Values were incorrect in some cases due to numerical error. {\small (Bug fixed in \texttt{spatstat 1.29-0}, october 2012)} \item \texttt{Kest.fft}: Result was incorrectly normalised. \bugger{1.21-2}{january 2011}{1.44-0}{december 2015} \item \texttt{crossdist.ppp}: Ignored argument \texttt{squared} if \texttt{periodic=FALSE}. {\small (Bug fixed in \texttt{spatstat 1.38-0}, july 2014)} \item polygon geometry: The point-in-polygon test gave the wrong answer in some boundary cases. {\small (Bug fixed in \texttt{spatstat 1.23-2}, august 2011)} \item \texttt{MultiStraussHard}: If a fitted model with \texttt{MultiStraussHard} interaction was invalid, \texttt{project.ppm} sometimes yielded a model that was still invalid. {\small (Bug fixed in \texttt{spatstat 1.42-0}, may 2015)} \item \texttt{pool.envelope}: Did not always respect the value of \texttt{use.theory}. \bugger{1.23-5}{september 2011}{1.43-0}{september 2015} \item \texttt{nncross.lpp}, \texttt{nnwhich.lpp}, \texttt{distfun.lpp}: Sometimes caused a segmentation fault. \bugger{1.44-0}{december 2015}{1.44-1}{december 2015} \item \texttt{anova.ppm}: If a single \texttt{object} was given, and it was a Gibbs model, then \texttt{adjust} was effectively set to \texttt{FALSE}. \bugger{1.39-0}{october 2014}{1.44-1}{december 2015} \end{itemize} \begin{thebibliography}{1} \bibitem{badd10wshop} A.~Baddeley. \newblock Analysing spatial point patterns in {{R}}. \newblock Technical report, CSIRO, 2010. \newblock Version 4. \newblock URL \texttt{https://research.csiro.au/software/r-workshop-notes/} \bibitem{baddrubaturn15} A. Baddeley, E. Rubak, and R. Turner. \newblock {\em Spatial Point Patterns: Methodology and Applications with {{R}}}. \newblock Chapman \& Hall/CRC Press, 2015. \end{thebibliography} \end{document} spatstat/vignettes/replicated.Rnw0000644000176200001440000014151513115225157016742 0ustar liggesusers\documentclass[11pt]{article} % \VignetteIndexEntry{Analysing Replicated Point Patterns in Spatstat} \usepackage{graphicx} \usepackage{Sweave} \usepackage{bm} \usepackage{anysize} \marginsize{2cm}{2cm}{2cm}{2cm} \newcommand{\pkg}[1]{\texttt{#1}} \newcommand{\code}[1]{\texttt{#1}} \newcommand{\R}{{\sf R}} \newcommand{\spst}{\pkg{spatstat}} \newcommand{\Spst}{\pkg{Spatstat}} \newcommand{\bold}[1]{{\textbf {#1}}} \newcommand{\indicate}[1]{\boldmaths{1}\{ {#1} \}} \newcommand{\dee}[1]{\, {\rm d}{#1}} \newcommand{\boldmaths}[1]{{\ensuremath\boldsymbol{#1}}} \newcommand{\xx}{\boldmaths{x}} \begin{document} \bibliographystyle{plain} \thispagestyle{empty} <>= options(SweaveHooks=list(fig=function() par(mar=c(1,1,1,1)))) @ \SweaveOpts{eps=TRUE} \setkeys{Gin}{width=0.6\textwidth} <>= library(spatstat) spatstat.options(image.colfun=function(n) { grey(seq(0,1,length=n)) }) sdate <- read.dcf(file = system.file("DESCRIPTION", package = "spatstat"), fields = "Date") sversion <- read.dcf(file = system.file("DESCRIPTION", package = "spatstat"), fields = "Version") options(useFancyQuotes=FALSE) @ \title{Analysing replicated point patterns in \texttt{spatstat}} \author{Adrian Baddeley} \date{For \spst\ version \texttt{\Sexpr{sversion}}} \maketitle \begin{abstract} This document describes \spst's capabilities for fitting models to replicated point patterns. More generally it applies to data from a designed experiment in which the response from each unit is a spatial point pattern. \end{abstract} \tableofcontents \newpage %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Introduction} `Replicated point patterns' are datasets consisting of several point patterns which can be regarded as independent repetitions of the same experiment. For example, three point patterns taken from micrographs of three pipette samples of the same jug of milk, could be assumed to be replicated observations. More generally we could have several experimental groups, with replicated point pattern data in each group. For example there may be two jugs of milk that were treated differently, and we take three pipette samples from each jug. Even more generally our point patterns could be the result of a designed experiment involving control and treatment groups, covariates such as temperature, and even spatial covariates (such as image data). This document describes some capabilities available in the \spst\ package for analysing such data. \textbf{For further detail, see Chapter 16 of the spatstat book \cite{TheBook}.} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Overview of software} The main components needed are: \begin{itemize} \item the model-fitting function \texttt{mppm}, an extension of the \texttt{spatstat} function \texttt{ppm}, that will fit Gibbs point process models to multiple point pattern datasets; \item support for the class \texttt{"mppm"} of point process models fitted by \texttt{mppm} (e.g. functions to print and plot the fitted model, analysis of deviance for Poisson models) \item some tools for exploratory data analysis; \item basic support for the data from such experiments by storing the data in a \emph{``hyperframe''}. A hyperframe is like a data frame, except that each entry in a column can be a point pattern or a pixel image, as well as a single number or categorical value. \item four example datasets. \end{itemize} \section{Formulating the problem} We view the experiment as involving a series of {\em `units'\/}. Each unit is subjected to a known set of experimental conditions (described by the values of the {\em covariates\/}), and each unit yields a {\em response\/} which is a spatial point pattern. The value of a particular covariate for each unit can be either a single value (numerical, logical or factor), or a pixel image. Three important cases are: \begin{description} \item[independent replicates:] We observe $n$ different point patterns that can be regarded as independent replicates, i.e.\ independent realisations of the same point process. The `responses' are the point patterns; there are no covariates. \item[replication in groups:] there are $K$ different experimental groups (e.g. control, aspirin, nurofen). In group $k$ ($k=1,\ldots,K$) we observe $n_k$ point patterns which can be regarded as independent replicates within this group. We regard this as an experiment with $n = \sum_k n_k$ units. The responses are the point patterns; there is one covariate which is a factor (categorical variable) identifying which group each point pattern belongs to. \item[general case:] there are covariates other than factors that influence the response. The point patterns are assumed to be independent, but no two patterns have the same distribution. \end{description} Examples of these three cases are given in the datasets \texttt{waterstriders}, \texttt{pyramidal} and \texttt{demohyper} respectively, which are installed in \spst. \section{Installed datasets} The following datasets are currently installed in \spst. \begin{itemize} \item \texttt{waterstriders}: Penttinen's \cite{pent84} waterstriders data recording the locations of insect larvae on a pond in 3 independent experiments. \item \texttt{pyramidal}: data from Diggle, Lange and Benes \cite{digglangbene91} on the locations of pyramidal neurons in human brain, 31 human subjects grouped into 3 groups (controls, schizoaffective and schizophrenic). \item \texttt{flu}: data from Chen et al \cite{chenetal08} giving the locations of two different virus proteins on the membranes of cells infected with influenza virus; 41 multitype point patterns divided into two virus types (wild and mutant) and two stain types. \item \texttt{simba}: simulated data from an experiment with two groups and 5 replicate point patterns per group. \item \texttt{demohyper}: simulated data from an experiment with two groups in which each experimental unit has a point pattern response and a pixel image covariate. \end{itemize} \section{Lists of point patterns} First we need a convenient way to store the \emph{responses} from all the units in an experiment. An individual point pattern is stored as an object of class \verb!"ppp"!. The easiest way to store all the responses is to form a list of \verb!"ppp"! objects. \subsection{Waterstriders data} The \texttt{waterstriders} data are an example of this type. The data consist of 3 independent point patterns representing the locations of insect larvae on a pond. See \texttt{help(waterstriders)}. <<>>= waterstriders @ The \texttt{waterstriders} dataset is a list of point patterns. It is a list, each of whose entries is a point pattern (object of class \verb!"ppp"!). Note that the observation windows of the three point patterns are {\tt not\/} identical. \subsection{The class \texttt{listof}} For convenience, the \texttt{waterstriders} dataset also belongs to the class \verb!"listof"!. This is a simple mechanism to allow us to handle the list neatly --- for example, we can provide special methods for printing, plotting and summarising the list. \SweaveOpts{width=6,height=2} \setkeys{Gin}{width=0.9\textwidth} <>= plot(waterstriders, main="") @ Notice that the plot method displays each entry of the list in a separate panel. There's also the summary method: <<>>= summary(waterstriders) @ \subsection{Creating a \texttt{listof} object} For example, here is a simulated dataset containing three independent realisations of the Poisson process with intensity 100. <<>>= X <- listof(rpoispp(100), rpoispp(100), rpoispp(100)) @ Then it can be printed and plotted. <>= plot(X) X @ To convert an existing list to the class \code{listof}, use \code{as.listof}. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Hyperframes} A \emph{hyperframe} is like a data frame, except that its entries can be objects of any kind. A hyperframe is effectively a two-dimensional array in which each column consists of values of one type (as in a data frame) or consists of objects of one class. The entries in a hyperframe can be point patterns, pixel images, windows, or any other objects. To analyse an experiment, we will store {\bf all} the data from the experiment in a single hyperframe. The rows of the hyperframe will correspond to different experimental units, while the columns represent different variables (response variables or covariates). \subsection{Creating hyperframes} The function \texttt{hyperframe} will create a hyperframe. <>= hyperframe(...) @ The arguments \verb!...! are any number of arguments of the form \texttt{tag=value}. Each \texttt{value} will become a column of the array. The \texttt{tag} determines the name of the column. Each \texttt{value} can be either \begin{itemize} \item an atomic vector or factor (i.e. numeric vector, integer vector, character vector, logical vector, complex vector or factor) \item a list of objects which are all of the same class \item one atomic value, which will be replicated to make an atomic vector or factor \item one object, which will be replicated to make a list of identical objects. \end{itemize} All columns (vectors, factors and lists) must be of the same length, if their length is greater than 1. For example, here is a hyperframe containing a column of numbers and a column of \emph{functions}: <<>>= H <- hyperframe(X=1:3, Y=list(sin,cos,tan)) H @ Note that a column of character strings will be converted to a factor, unless you set \texttt{stringsAsFactors=FALSE} in the call to \code{hyperframe}. This is the same behaviour as for the function \code{data.frame}. <<>>= G <- hyperframe(X=1:3, Y=letters[1:3], Z=factor(letters[1:3]), W=list(rpoispp(100),rpoispp(100), rpoispp(100)), U=42, V=rpoispp(100), stringsAsFactors=FALSE) G @ This hyperframe has 3 rows. The columns named \texttt{U} and \texttt{V} are constant (all entries in a column are the same). The column named \texttt{Y} is a character vector. \subsection{Hyperframes of data} To analyse an experiment, we will store {\bf all} the data from the experiment in a single hyperframe. The rows of the hyperframe will correspond to different experimental units, while the columns represent different variables (response variables or covariates). Several examples of hyperframes are provided with the package, including \texttt{demohyper}, \texttt{flu}, \texttt{simba} and \texttt{pyramidal}, described above. The \texttt{simba} dataset contains simulated data from an experiment with a `control' group and a `treatment' group, each group containing 5 experimental units. The responses in the control group are independent Poisson point patterns with intensity 80. The responses in the treatment group are independent realisations of a Strauss process (see \texttt{help(simba)} for details). The \texttt{simba} dataset is a hyperframe with 10 rows and 2 columns: \texttt{Points} (the point patterns) and \texttt{group} (a factor with levels \texttt{control} and \texttt{treatment}). <<>>= simba @ The \texttt{pyramidal} dataset contains data from Diggle, Lange and Benes \cite{digglangbene91} on the locations of pyramidal neurons in human brain. One point pattern was observed in each of 31 human subjects. The subjects were classified into 3 groups (controls, schizoaffective and schizophrenic). The \texttt{pyramidal} dataset is a hyperframe with 31 rows and 2 columns: \code{Neurons} (the point patterns) and \code{group} (a factor with levels \texttt{control}, \texttt{schizoaffective} and \texttt{schizophrenic}). <<>>= pyramidal @ The \texttt{waterstriders} dataset is not a hyperframe; it's just a list of point patterns. It can easily be converted into a hyperframe: <<>>= ws <- hyperframe(Striders=waterstriders) @ \subsection{Columns of a hyperframe} Individual columns of a hyperframe can be extracted using \verb!$!: <<>>= H$X H$Y @ The result of \verb!$! is a vector or factor if the column contains atomic values; otherwise it is a list of objects (with class \texttt{"listof"} to make it easier to print and plot). Individual columns can also be assigned (overwritten or created) using \verb!$<-!: <<>>= H$U <- letters[1:3] H @ This can be used to build up a hyperframe column-by-column: <<>>= G <- hyperframe() G$X <- waterstriders G$Y <- 1:3 G @ \subsection{Subsets of a hyperframe} Other subsets of a hyperframe can be extracted with \verb![!: <<>>= H[,1] H[2,] H[2:3, ] H[1,1] @ The result of \verb![! is a hyperframe, unless you set \verb!drop=TRUE! and the subset consists of only one element or one column: <<>>= H[,1,drop=TRUE] H[1,1,drop=TRUE] H[1,2,drop=TRUE] @ Currently there is no method for \verb![<-! that would allow you to assign values to a subset of a hyperframe. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Plotting} \subsection{Plotting a \code{listof} object} The plot method for \code{listof} objects has formal arguments <>= plot.listof(x, ..., main, arrange = TRUE, nrows = NULL, ncols = NULL) @ where \code{main} is a title for the entire page. If \code{arrange=TRUE} then the entries of the list are displayed in separate panels on the same page (with \code{nrows} rows and \code{ncols} columns of panels), while if \code{arrange=FALSE} then the entries are just plotted as a series of plot frames. The extra arguments \verb!...! control the individual plot panels. These arguments will be passed to the plot method that displays each entry of the list. Suitable arguments depend on the type of entries. <>= plot(waterstriders, pch=16, nrows=1) @ \subsection{Plotting a hyperframe} \subsubsection{Plotting one column} If \code{h} is a hyperframe, then the default action of \code{plot(h)} is to extract the first column of \code{h} and plot each of the entries in a separate panel on one page (actually using the plot method for class \verb!"listof"!). \SweaveOpts{width=7,height=5} \setkeys{Gin}{width=0.9\textwidth} <>= plot(simba) @ This only works if the entries in the first column are objects for which a plot method is defined (for example, point patterns, images, windows). To select a different column, use \verb!$! or \verb![!: \SweaveOpts{width=6,height=2} \setkeys{Gin}{width=0.9\textwidth} <>= H <- hyperframe(X=1:3, Y=list(sin,cos,tan)) plot(H$Y) @ The plot can be controlled using the arguments for \code{plot.listof} (and, in this case, \code{plot.function}, since \verb!H$Y! consists of functions). \subsubsection{Complex plots} More generally, we can display any kind of higher-order plot involving one or more columns of a hyperframe: <>= plot(h, e) @ where \code{h} is a hyperframe and \code{e} is an \R\ language call or expression that must be evaluated in each row to generate each plot panel. \SweaveOpts{width=9,height=5} \setkeys{Gin}{width=0.9\textwidth} <>= plot(demohyper, quote({ plot(Image, main=""); plot(Points, add=TRUE) })) @ Note the use of \code{quote}, which prevents the code inside the braces from being evaluated immediately. To plot the $K$-functions of each of the patterns in the \code{waterstriders} dataset, \SweaveOpts{width=6,height=2} \setkeys{Gin}{width=0.9\textwidth} <>= H <- hyperframe(Bugs=waterstriders) plot(H, quote(plot(Kest(Bugs))), marsize=1) @ %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Data analysis} \subsection{Computing with hyperframes} Often we want to perform some computation on each row of a hyperframe. In a data frame, this can be done using the command \code{with}: <<>>= df <- data.frame(A=1:10, B=10:1) with(df, A-B) @ In this example, the expression \code{A-B} is evaluated in each row of the data frame, and the result is a vector containing the computed values for each row. The function \code{with} is generic, and has a method for data frames, \code{with.data.frame}. The computation above was executed by \code{with.data.frame}. The same syntax is available for hyperframes using the method \code{with.hyperframe}: <>= with(h,e) @ Here \code{h} is a hyperframe, and \code{e} is an {\sf R} language construct involving the names of columns in \code{h}. For each row of \code{h}, the expression \code{e} will be evaluated in such a way that each entry in the row is identified by its column name. <<>>= H <- hyperframe(Bugs=waterstriders) with(H, npoints(Bugs)) with(H, distmap(Bugs)) @ The result of \code{with.hyperframe} is a list of objects (of class \verb!"listof"!), or a vector or factor if appropriate. Notice that (unlike the situation for data frames) the operations in the expression \code{e} do not have to be vectorised. For example, \code{distmap} expects a single point pattern, and is not vectorised to deal with a list of point patterns. Instead, the expression \code{distmap(Bugs)} is evaluated separately in each row of the hyperframe. \subsection{Summary statistics} One application of \code{with.hyperframe} is to calculate summary statistics for each row of a hyperframe. For example, the number of points in a point pattern \code{X} is returned by \code{npoints(X)}. To calculate this for each of the responses in the \code{simba} dataset, <<>>= with(simba, npoints(Points)) @ The summary statistic can be any kind of object. For example, to compute the empirical $K$-functions for each of the patterns in the \code{waterstriders} dataset, <<>>= H <- hyperframe(Bugs=waterstriders) K <- with(H, Kest(Bugs)) @ To plot these $K$-functions you can then just type \SweaveOpts{width=6,height=2} \setkeys{Gin}{width=0.9\textwidth} <>= plot(K) @ The summary statistic for each row could be a numeric vector: <<>>= H <- hyperframe(Bugs=waterstriders) with(H, nndist(Bugs)) @ The result is a list, each entry being a vector of nearest neighbour distances. To find the minimum interpoint distance in each pattern: <<>>= with(H, min(nndist(Bugs))) @ \subsection{Generating new columns} New columns of a hyperframe can be created by computation from the existing columns. For example, I can add a new column to the \code{simba} dataset that contains pixel images of the distance maps for each of the point pattern responses. <>= simba$Dist <- with(simba, distmap(Points)) @ \subsection{Simulation} This can be useful for simulation. For example, to generate Poisson point patterns with different intensities, where the intensities are given by a numeric vector \code{lambda}: \SweaveOpts{width=6,height=6} \setkeys{Gin}{width=0.7\textwidth} <>= lambda <- rexp(6, rate=1/50) H <- hyperframe(lambda=lambda) H$Points <- with(H, rpoispp(lambda)) plot(H, quote(plot(Points, main=paste("lambda=", signif(lambda, 4))))) @ It's even simpler to generate 10 independent Poisson point patterns with the \emph{same} intensity 50, say: <>= H$X <- with(H, rpoispp(50)) @ (the expression \code{rpoispp(50)} is evaluated once in each row, yielding a different point pattern in each row because of the randomness). %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Exploratory data analysis} Before fitting models to the data, it is prudent to explore the data to detect unusual features and to suggest appropriate models. \subsection{Exploring spatial trend and covariate effects} Points may be distributed non-uniformly either because they are intrinsically non-uniform (``spatial trend'') or because their abundance depends on a spatial covariate (``covariate effects''). Non-uniformity of a point pattern can be investigated using the kernel smoothed intensity. This is the convolution of the point pattern with a smooth density called the kernel. Effectively each point in the pattern is replaced by a copy of the kernel, and the sum of all copies of the kernel is the kernel-smoothed intensity function. It is computed by \texttt{density.ppp} separately for each point pattern. <>= plot(simba, quote(plot(density(Points), main="")), nrows=2) @ Covariate effects due to a real-valued spatial covariate (a real-valued pixel image) can be investigated using the command \code{rhohat}. This uses a kernel smoothing technique to fit a model of the form \[ \lambda(u) = \rho(Z(u)) \] where $\lambda(u)$ is the point process intensity at a location $u$, and $Z(u)$ is the value of the spatial covariate at that location. Here $\rho$ is an unknown, smooth function which is to be estimated. The function $\rho$ expresses the effect of the spatial covariate on the point process intensity. If $\rho$ turns out to be constant, then the covariate has no effect on point process intensity (and the constant value of $\rho$ is the constant intensity of the point process). <>= rhos <- with(demohyper, rhohat(Points, Image)) plot(rhos) @ \SweaveOpts{width=6,height=4} \setkeys{Gin}{width=0.9\textwidth} \subsection{Exploring interpoint interaction} Still to be written. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Fitting models of spatial trend} The command \code{mppm} fits models to multiple point patterns. Its syntax is very similar to that of \code{lm} and \code{glm}: <>= mppm(formula, data, interaction, ...) @ where \code{formula} is a formula describing the systematic trend part of the model, \code{data} is a hyperframe containing all the data (responses and covariates), and \code{interaction} determines the stochastic interpoint interaction part of the model. For example: <>= mppm(Points ~ group, simba, Poisson()) @ Note that the formula has a left hand side, which identifies the response. This should be the name of a column of \code{data}. \subsection{Trend formula} The right side of \code{formula} is an expression for the linear predictor (effectively the {\bf logarithm} of the spatial trend). The variables appearing in the right hand side of \code{formula} should be either \begin{itemize} \item names of columns in \code{data} \item objects in the {\sf R} global environment (such as \code{pi} and \code{log}) \item the reserved names \code{x}, \code{y} (representing Cartesian coordinates), \code{marks} (representing mark values attached to points) or \code{id} (a factor representing the row number in the hyperframe). \end{itemize} \subsubsection{Design covariates} The variables in the trend could be `design covariates'. For example, to fit a model to the \code{simba} dataset in which all patterns are independent replicates of the same uniform Poisson process, with the same constant intensity: <<>>= mppm(Points ~ 1, simba) @ To fit a model in which the two groups of patterns (control and treatment groups) each consist of independent replicates of a uniform Poisson process, but with possibly different intensity in each group: <<>>= mppm(Points ~ group, simba) @ To fit a uniform Poisson process to each pattern, with different intensity for each pattern: <<>>= mppm(Points ~ id, simba) @ \subsubsection{Spatial covariates} The variables in the trend could be `spatial covariates'. For example, the \code{demohyper} dataset has a column \code{Image} containing pixel images. <<>>= mppm(Points ~ Image, data=demohyper) @ This model postulates that each pattern is a Poisson process with intensity of the form \[ \lambda(u) = \exp(\beta_0 + \beta_1 Z(u)) \] at location $u$, where $\beta_0, \beta_1$ are coefficients to be estimated, and $Z(u)$ is the value of the pixel image \code{Image} at location $u$. It may or may not be appropriate to assume that the intensity of the points is an exponential function of the image pixel value $Z$. If instead we wanted the intensity $\lambda(u)$ to be \emph{proportional} to $Z(u)$, the appropriate model is <>= mppm(Points ~ offset(log(Image)), data=demohyper) @ which corresponds to an intensity proportional to \code{Image}, \[ \lambda(u) = \exp(\beta_0 + \log Z(u)) = e^{\beta_0} \; Z(u). \] The \code{offset} indicates that there is no coefficient in front of $\log Z(u)$. Alternatively we could allow a coefficient: <>= mppm(Points ~ log(Image), data=demop) @ which corresponds to a gamma transformation of \code{Image}, \[ \lambda(u) = \exp(\beta_0 + \beta_1 \log Z(u)) = e^{\beta_0} \; Z(u)^{\beta_1}. \] %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Interpoint interaction} The stochastic interpoint interaction in a point process model is specified by the arguments \code{interaction} and (optionally) \code{iformula} in <>= mppm(formula, data, interaction, ..., iformula=NULL) @ \subsection{Same interaction for all patterns} In the simplest case, the argument \texttt{interaction} is one of the familiar objects that describe the point process interaction structure. It is an object of class \texttt{"interact"} created by calling one of the functions \begin{center} \begin{tabular}{rl} \texttt{Poisson()} & the Poisson point process\\ \texttt{Hardcore()} & the hard core process \\ \texttt{Strauss()} & the Strauss process \\ \texttt{StraussHard()} & the Strauss/hard core point process\\ \texttt{Softcore()} & pairwise interaction, soft core potential\\ \texttt{PairPiece()} & pairwise interaction, piecewise constant \\ \texttt{DiggleGatesStibbard() } & Diggle-Gates-Stibbard pair potential \\ \texttt{DiggleGratton() } & Diggle-Gratton pair potential \\ \texttt{Fiksel() } & Fiksel pair potential \\ \texttt{LennardJones() } & Lennard-Jones pair potential \\ \texttt{Pairwise()} & pairwise interaction, user-supplied potential\\ \texttt{AreaInter()} & area-interaction potential\\ \texttt{Geyer()} & Geyer's saturation process\\ \texttt{BadGey()} & multiscale Geyer saturation process\\ \texttt{Saturated()} & Saturated pair model, user-supplied potential\\ \texttt{OrdThresh()} & Ord process, threshold potential\\ \texttt{Ord()} & Ord model, user-supplied potential \\ \texttt{MultiStrauss()} & multitype Strauss process \\ \texttt{MultiStraussHard()} & multitype Strauss/hard core process \\ \texttt{Concom()} & connected component interaction \\ \texttt{Hybrid()} & hybrid of several interactions \\ \end{tabular} \end{center} In this `simple' usage of \texttt{mppm}, the point process model assumes that all point patterns have exactly the same interpoint interaction, (with the same interaction parameters), and only differ in their spatial trend. \subsection{Hyperframe of interactions} More generally the argument \code{interaction} can be a hyperframe containing objects of class \texttt{"interact"}. For example, we might want to fit a Strauss process to each point pattern, but with a different Strauss interaction radius for each pattern. <>= radii <- with(simba, mean(nndist(Points))) @ Then \code{radii} is a vector of numbers which we could use as the values of the interaction radius for each case. First we need to make the interaction objects: <<>>= Rad <- hyperframe(R=radii) Str <- with(Rad, Strauss(R)) @ Then we put them into a hyperframe and fit the model: <<>>= Int <- hyperframe(str=Str) mppm(Points ~ 1, simba, interaction=Int) @ An important constraint is that all of the interaction objects in one column must be \emph{instances of the same process} (e.g. Strauss) albeit possibly having different parameter values. For example, you cannot put Poisson and Strauss processes in the same column. \subsection{Interaction formula} If \code{interaction} is a hyperframe, then the additional argument \code{iformula} may be used to fully specify the interaction. (An \code{iformula} is also required if \code{interaction} has more than one column.) The \code{iformula} should be a formula without a left hand side. Variables on the right hand side are typically the names of columns in \code{interaction}. \subsubsection{Selecting one column} If the right hand side of \code{iformula} is a single name, then this identifies the column in \code{interaction} to be used as the interpoint interaction structure. <<>>= h <- hyperframe(Y=waterstriders) g <- hyperframe(po=Poisson(), str4 = Strauss(4), str7= Strauss(7)) mppm(Y ~ 1, data=h, interaction=g, iformula=~str4) @ \subsubsection{Interaction depending on design} The \code{iformula} can also involve columns of \code{data}, but only those columns that are vectors or factors. This allows us to specify an interaction that depends on the experimental design. [This feature is {\bf experimental}.] For example <<>>= fit <- mppm(Points ~ 1, simba, Strauss(0.07), iformula = ~Interaction*group) @ Since \code{Strauss(0.1)} is not a hyperframe, it is first converted to a hyperframe with a single column named \code{Interaction}. The \code{iformula = ~Interaction*group} specifies (since \code{group} is a factor) that the interpoint interaction shall have a different coefficient in each experimental group. That is, we fit a model which has two different values for the Strauss interaction parameter $\gamma$, one for the control group and one for the treatment group. When you print the result of such a fit, the package tries to do `automatic interpretation' of the fitted model (translating the fitted interaction coefficients into meaningful numbers like $\gamma$). This will be successful in \emph{most} cases: <<>>= fit @ <>= co <- coef(fit) si <- function(x) { signif(x, 4) } @ Thus we see that the estimate of the Strauss parameter $\gamma$ for the control group is \Sexpr{si(exp(co[2]))}, and for the treatment group \Sexpr{si(exp(sum(co[c(2,4)])))} (the correct values in this simulated dataset were $1$ and $0.5$). The fitted model can also be interpreted directly from the fitted canonical coefficients: <<>>= coef(fit) @ The last output shows all the coefficients $\beta_j$ in the linear predictor for the (log) conditional intensity. The interpretation of the model coefficients, for any fitted model in \R, depends on the \emph{contrasts} which were applicable when the model was fitted. This is part of the core {\sf R} system: see \code{help(contrasts)} or \code{options(contrasts)}. If you did not specify otherwise, the default is to use \emph{treatment contrasts}. This means that, for an explanatory variable which is a \texttt{factor} with $N$ levels, the first level of the factor is used as a baseline, and the fitted model coefficients represent the factor levels $2, 3, \ldots, N$ relative to this baseline. In the output above, there is a coefficient for \code{(Intercept)} and one for \code{grouptreatment}. These are coefficients related to the \code{group} factor. According to the ``treatment contrasts'' rule, the \code{(Intercept)} coefficient is the estimated effect for the control group, and the \code{grouptreatment} coefficient is the estimated difference between the treatment and control groups. Thus the fitted first order trend is $\exp(\Sexpr{si(co[1])}) = \Sexpr{si(exp(co[1]))}$ for the control group and $\exp(\Sexpr{si(co[1])} + \Sexpr{si(co[3])}) = \Sexpr{si(exp(sum(co[c(1,3)])))}$ for the treatment group. The correct values in this simulated dataset were $80$ and $100$. The remaining coefficients in the output are \code{Interaction} and \code{Interaction:grouptreatment}. Recall that the Strauss process interaction term is $\gamma^{t(u,\xx)} = \exp(t(u,\xx) \log\gamma)$ at a spatial location $u$, for a point pattern $\xx$. Since we're using treatment contrasts, the coefficient \code{Interaction} is the estimate of $\log\gamma$ for the control group. The coefficient \code{Interaction:grouptreatment} is the estimate of the difference in $\log\gamma$ between the treatment and control groups. Thus the estimated Strauss interaction parameter $\gamma$ is $\exp(\Sexpr{si(co[2])}) = \Sexpr{si(exp(co[2]))}$ for the control group and $\exp(\Sexpr{si(co[2])} + (\Sexpr{si(co[4])})) = \Sexpr{si(exp(co[2]+co[4]))}$ for the treatment group. The correct values were $1$ and $0.5$. \subsubsection{Completely different interactions for different cases} In the previous example, when we fitted a Strauss model to all point patterns in the \code{simba} dataset, the fitted model for the patterns in the control group was close to Poisson ($\gamma \approx 1$). Suppose we now want to fit a model which {\it is} Poisson in the control group, and Strauss in the treatment group. The Poisson and Strauss interactions must be given as separate columns in a hyperframe of interactions: <>= interaction=hyperframe(po=Poisson(), str=Strauss(0.07)) @ What do we write for the \code{iformula}? The following \emph{will not} work: <>= iformula=~ifelse(group=="control", po, str) @ This does not work because the Poisson and Strauss models are `incompatible' inside such expressions. The canonical sufficient statistics for the Poisson and Strauss processes do not have the same dimension. Internally in \code{mppm} we translate the symbols \code{po} and \code{str} into matrices; the dimensions of these matrices are different, so the \code{ifelse} expression cannot be evaluated. Instead we need something like the following: <>= iformula=~I((group=="control")*po) + I((group=="treatment") * str) @ The letter \code{I} here is a standard R function that prevents its argument from being interpreted as a formula (thus the \code{*} is interpreted as multiplication instead of a model interaction). The expression \code{(group=="control")} is logical, and when multiplied by the matrix \code{po}, yields a matrix. So the following does work: <<>>= g <- hyperframe(po=Poisson(), str=Strauss(0.07)) fit2 <- mppm(Points ~ 1, simba, g, iformula=~I((group=="control")*po) + I((group=="treatment") * str)) fit2 @ %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %#%^!ifdef RANDOMEFFECTS %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Random effects} \subsection{Mixed effects models} It is also possible to fit models that include `random effects'. Effectively, some of the coefficients in the model are assumed to be Normally-distributed random variables instead of constants. \subsubsection{Mixed Poisson model} Consider the simplest model of a uniform Poisson process which we fitted to the 3 point patterns of waterstriders. It might be sensible to assume that each pattern is a realisation of a Poisson process, but with {\em random intensity\/}. In each realisation the intensity $\lambda$ is constant across different locations, but it is a different, random value in different realisations. This example is called a `mixed Poisson process' and belongs to the class of `Cox processes' (Poisson processes with random intensity functions). Let's assume further that the log-intensity is a Normal random variable. Then the model is a (very degenerate) special case of a `log-Gaussian Cox process'. To fit such a model we use the standard techniques of mixed effects models \cite{lairware82,davigilt95,pinhbate00}. The mixed Poisson process which we discussed above would be written in standard form \begin{equation} \label{mixPois} \lambda_i(u) = \exp(\mu + Z_i) \end{equation} for the $i$th point pattern, where $\mu$ is a parameter to be estimated (the `fixed effect') and $Z_i \sim N(0, \sigma^2)$ is a zero-mean Normal random variable (the `random effect' for point pattern $i$). In the simplest case we would assume that $Z_1, \ldots, Z_n$ are independent. The variance $\sigma^2$ of the random effects would be estimated. One can also estimate the individual realised values $z_i$ of the random effects for each point pattern, although these are usually not of such great interest. Since the model includes both fixed and random effects, it is called a ``mixed-effects'' model. \subsubsection{Dependence structure} When we formulate a random-effects or mixed-effects model, we must specify the dependence structure of the random effects. In the model above we assumed that the $Z_i$ are independent for all point patterns $i$. If the experiment consists of two groups, we could alternatively assume that $Z_i = Z_j$ whenever $i$ and $j$ belong to the same group. In other words all the patterns in one group have the same value of the random effect. So the random effect is associated with the group rather than with individual patterns. This could be appropriate if, for example, the groups represent different batches of a chemical. Each batch is prepared under slightly different conditions so we believe that there are random variations between batches, but within a batch we believe that the chemical is well-mixed. \subsubsection{Random effects are coefficients} In the mixed Poisson model (\ref{mixPois}), the random effect is an additive constant (with a random value) in the log-intensity. In general, a random effect is a \emph{coefficient} of one of the covariates. For example if $v$ is a real-valued design covariate (e.g. `temperature'), with value $v_i$ for the $i$th point pattern, then we could assume \begin{equation} \label{ranef2} \lambda_i(u) = \exp(\mu + Z_i v_i) \end{equation} where $Z_i \sim N(0, \sigma^2)$ are independent for different $i$. This model has a random effect in the dependence on $v$. We could also have a random effect for a spatial covariate $V$. Suppose $V_i$ is a real-valued image for the $i$th pattern (so that $V_i(u)$ is the value of some covariate at the location $u$ for the $i$th case). Then we could assume \begin{equation} \label{ranef3} \lambda_i(u) = \exp(\mu + Z_i V_i(u)) \end{equation} where $Z_i \sim N(0, \sigma^2)$ are independent for different $i$. This kind of random effect would be appropriate if, for example, the images $V_i$ are not `normalised' or `standardised' relative to each other (e.g.\ they are images taken under different illumination). Then the coefficients $Z_i$ effectively include the rescaling necessary to standardise the images. \subsection{Fitting a mixed-effects model} The call to \texttt{mppm} can also include the argument \texttt{random}. This should be a formula (with no left-hand side) describing the structure of random effects. The formula for random effects must be recognisable to \texttt{lme}. It is typically of the form \begin{verbatim} ~x1 + ... + xn | g \end{verbatim} or \begin{verbatim} ~x1 + ... + xn | g1/.../gm \end{verbatim} where \verb!x1 + ... + xn! specifies the covariates for the random effects and \texttt{g} or \verb!g1/.../gm! determines the grouping (dependence) structure. Here \code{g} or \code{g1, \ldots, gm} should be factors. To fit the mixed Poisson model (\ref{mixPois}) to the waterstriders, we want to have a random intercept coefficient (so \texttt{x} is \texttt{1}) that varies for different point patterns (so \texttt{g} is \texttt{id}). The reserved name \code{id} is a factor referring to the individual point pattern. Thus <<>>= H <- hyperframe(P=waterstriders) mppm(P ~ 1, H, random=~1|id) @ To fit the mixed effects model (\ref{ranef2}) to the coculture data with the \code{AstroIm} covariate, with a random effect associated with each well, <>= mppm(Neurons ~ AstroIm, random=~AstroIm|WellNumber) @ %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %#%^!endif %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Studying the fitted model} Fitted models produced by \code{mppm} can be examined and validated in many ways. \subsection{Fits for each pattern} \subsubsection{Subfits} The command \code{subfits} takes an \code{mppm} object and extracts, for each individual point pattern, the fitted point process model for that pattern \emph{that is implied by the overall fit}. It returns a list of objects of class \code{ppm}. <<>>= H <- hyperframe(W=waterstriders) fit <- mppm(W ~ 1, H) subfits(fit) @ In this example the result is a list of three \code{ppm} objects representing the implied fits for each of the three point patterns in the \code{waterstriders} dataset. Notice that {\bf the fitted coefficients are the same} in all three models. Note that there are some unresolved difficulties with the implementation of \code{subfits}. Two completely different implementations are supplied in the package; they are called \code{subfits.old} %(used in versions 0.1--1 and earlier) and \code{subfits.new}.% (introduced in 0.1--2). The old version would occasionally crash. Unfortunately the newer version \code{subfits.new} is quite memory-hungry and sometimes causes R to hang. We're still working on this problem. So for the time being, \code{subfits} is the same as \code{subfits.old}. You can change this simply by reassigning, e.g. <>= subfits <- subfits.new @ \subsubsection{Fitting separately to each pattern} For comparison, we could fit a point process model separately to each point pattern dataset using \code{ppm}. The easy way to do this is with \code{with.hyperframe}. To fit a \emph{separate} uniform Poisson point process to each of the three waterstriders patterns, <<>>= H <- hyperframe(W=waterstriders) with(H, ppm(W)) @ The result is again a list of three fitted point process models (objects of class \code{ppm}), but now the fitted coefficients are different. \subsection{Residuals} One standard way to check a fitted model is to examine the residuals. \subsubsection{Point process residuals} Some recent papers \cite{baddetal05,baddmollpake08} have defined residuals for a fitted point process model (fitted to a \emph{single} point pattern). These residuals are implemented in \code{spatstat} as \code{residuals.ppm} and apply to an object of class \code{ppm}, that is, a model fitted to a \emph{single} point pattern. The command \code{residuals.mppm} computes the point process residuals for an \code{mppm} object. <<>>= fit <- mppm(P ~ x, hyperframe(P=waterstriders)) res <- residuals(fit) @ The result is a list, with one entry for each of the point pattern datasets. Each list entry contains the point process residuals for the corresponding point pattern dataset. Each entry in the list is a signed measure (object of class \code{"msr"}) as explained in the help for \code{residuals.ppm}). It can be plotted: <>= plot(res) @ You probably want the smoothed residual field: <>= smor <- with(hyperframe(res=res), Smooth(res, sigma=4)) plot(smor) @ \subsubsection{Sums of residuals} It would be useful to have a residual that is a single value for each point pattern (representing how much that point pattern departs from the model fitted to all the point patterns). That can be computed by \emph{integrating} the residual measures using the function \code{integral.msr}: <<>>= fit <- mppm(P ~ x, hyperframe(P=waterstriders)) res <- residuals(fit) totres <- sapply(res, integral.msr) @ In designed experiments we can plot these total residuals against the design covariates: <>= fit <- mppm(Points~Image, data=demohyper) resids <- residuals(fit, type="Pearson") totres <- sapply(resids, integral.msr) areas <- with(demohyper, area.owin(as.owin(Points))) df <- as.data.frame(demohyper[, "Group"]) df$resids <- totres/areas plot(resids~Group, df) @ \subsubsection{Four-panel diagnostic plots} Sometimes a more useful tool is the function \code{diagnose.ppm} which produces a four-panel diagnostic plot based on the point process residuals. However, it is only available for \code{ppm} objects. To obtain a four-panel diagnostic plot for each of the point patterns, do the following: \begin{enumerate} \item fit a model to multiple point patterns using \code{mppm}. \item extract the individual fits using \code{subfits}. \item plot the residuals of the individual fits. \end{enumerate} For example: <>= fit <- mppm(P ~ 1, hyperframe(P=waterstriders)) sub <- hyperframe(Model=subfits(fit)) plot(sub, quote(diagnose.ppm(Model))) @ (One could also do this for models fitted separately to the individual point patterns.) \subsubsection{Residuals of the parameter estimates} We can also compare the parameter estimates obtained by fitting the model simultaneously to all patterns (using \code{mppm}) with those obtained by fitting the model separately to each pattern (using \code{ppm}). <<>>= H <- hyperframe(P = waterstriders) fitall <- mppm(P ~ 1, H) together <- subfits(fitall) separate <- with(H, ppm(P)) Fits <- hyperframe(Together=together, Separate=separate) dr <- with(Fits, unlist(coef(Separate)) - unlist(coef(Together))) dr exp(dr) @ One could also try deletion residuals, etc. \subsection{Goodness-of-fit tests} \subsubsection{Quadrat count test} The $\chi^2$ goodness-of-fit test based on quadrat counts is implemented for objects of class \code{ppm} (in \code{quadrat.test.ppm}) and also for objects of class \code{mppm} (in \code{quadrat.test.mppm}). This is a goodness-of-fit test for a fitted {\bf Poisson} point process model only. The model could be uniform or non-uniform and the intensity might depend on covariates. <<>>= H <- hyperframe(X=waterstriders) # Poisson with constant intensity for all patterns fit1 <- mppm(X~1, H) quadrat.test(fit1, nx=2) # uniform Poisson with different intensity for each pattern fit2 <- mppm(X ~ id, H) quadrat.test(fit2, nx=2) @ See the help for \code{quadrat.test.ppm} and \code{quadrat.test.mppm} for further details. \subsubsection{Kolmogorov-Smirnov test} The Kolmogorov-Smirnov test of goodness-of-fit of a Poisson point process model compares the observed and predicted distributions of the values of a spatial covariate. We want to test the null hypothesis $H_0$ that the observed point pattern ${\mathbf x}$ is a realisation from the Poisson process with intensity function $\lambda(u)$ (for locations $u$ in the window $W$). Let $Z(u)$ be a given, real-valued covariate defined at each spatial location $u$. Under $H_0$, the \emph{observed} values of $Z$ at the data points, $Z(x_i)$ for each $x_i \in {\mathbf x}$, are independent random variables with common probability distribution function \[ F_0(z) = \frac{\int_W \lambda(u) \indicate{Z(u) \le z} \dee u} {\int_W \lambda(u) \dee u}. \] We can therefore apply the Kolmogorov-Smirnov test of goodness-of-fit. This compares the empirical cumulative distribution of the observed values $Z(x_i)$ to the predicted c.d.f. $F_0$. The test is implemented as \code{kstest.ppm}. The syntax is <>= kstest.mppm(model, covariate) @ where \code{model} is a fitted model (of class \texttt{"mppm"}) and \code{covariate} is either \begin{itemize} \item a \code{function(x,y)} making it possible to compute the value of the covariate at any location \code{(x,y)} \item a pixel image containing the covariate values \item a list of functions, one for each row of the hyperframe of original data \item a list of pixel images, one for each row of the hyperframe of original data \item a hyperframe with one column containing either functions or pixel images. \end{itemize} \newpage \addcontentsline{toc}{section}{Bibliography} %\bibliography{% %extra,% %extra2,% %biblio/badd,% %biblio/bioscience,% %biblio/censoring,% %biblio/mcmc,% %biblio/spatstat,% %biblio/stat,% %biblio/stochgeom% %} \begin{thebibliography}{1} \bibitem{baddmollpake08} A. Baddeley, J. M{\o}ller, and A.G. Pakes. \newblock Properties of residuals for spatial point processes. \newblock {\em Annals of the Institute of Statistical Mathematics}, 60:627--649, 2008. \bibitem{TheBook} A. Baddeley, E. Rubak, and R. Turner. \newblock {\em Spatial Point Patterns: Methodology and Applications with R}. \newblock Chapman \& Hall/CRC Press, 2015. \bibitem{statpaper} A. Baddeley, I. Sintorn, L. Bischof, R. Turner, and S. Heggarty. \newblock Analysing designed experiments where the response is a spatial point pattern. \newblock In preparation. \bibitem{baddetal05} A. Baddeley, R. Turner, J. M{\o}ller, and M. Hazelton. \newblock Residual analysis for spatial point processes (with discussion). \newblock {\em Journal of the Royal Statistical Society, series B}, 67(5):617--666, 2005. \bibitem{chenetal08} B.J. Chen, G.P. Leser, D. Jackson, and R.A. Lamb. \newblock The influenza virus {M2} protein cytoplasmic tail interacts with the {M1} protein and influences virus assembly at the site of virus budding. \newblock {\em Journal of Virology}, 82:10059--10070, 2008. %#%^!ifdef RANDOMEFFECTS \bibitem{davigilt95} M. Davidian and D.M. Giltinan. \newblock {\em Nonlinear Mixed Effects Models for Repeated Measurement Data}. \newblock Chapman and Hall, 1995. %#%^!endif \bibitem{digglangbene91} P.J. Diggle, N. Lange, and F. M. Benes. \newblock Analysis of variance for replicated spatial point patterns in clinical neuroanatomy. \newblock {\em Journal of the {A}merican {S}tatistical {A}ssociation}, 86:618--625, 1991. %#%^!ifdef RANDOMEFFECTS \bibitem{lairware82} N.M. Laird and J.H. Ware. \newblock Random-effects models for longitudinal data. \newblock {\em Biometrics}, 38:963--974, 1982. %#%^!endif \bibitem{pent84} A. Penttinen. \newblock {\em Modelling Interaction in Spatial Point Patterns: Parameter Estimation by the Maximum Likelihood Method}. \newblock Number 7 in {Jyv\"askyl\"a} Studies in Computer Science, Economics and Statistics. University of {Jyv\"askyl\"a}, 1984. %#%^!ifdef RANDOMEFFECTS \bibitem{pinhbate00} J.C. Pinheiro and D.M. Bates. \newblock {\em Mixed-Effects Models in {S} and {S-PLUS}}. \newblock Springer, 2000. %#%^!endif \end{thebibliography} %\addcontentsline{toc}{section}{Index} %\printindex \end{document} spatstat/vignettes/getstart.Rnw0000644000176200001440000003144413115225157016462 0ustar liggesusers\documentclass[11pt]{article} % \VignetteIndexEntry{Getting Started with Spatstat} <>= options(SweaveHooks=list(fig=function() par(mar=c(1,1,1,1)))) @ \usepackage{graphicx} \usepackage{anysize} \marginsize{2cm}{2cm}{2cm}{2cm} \newcommand{\pkg}[1]{\texttt{#1}} \newcommand{\bold}[1]{{\textbf {#1}}} \newcommand{\R}{{\sf R}} \newcommand{\spst}{\pkg{spatstat}} \newcommand{\Spst}{\pkg{Spatstat}} \begin{document} \bibliographystyle{plain} \thispagestyle{empty} \SweaveOpts{eps=TRUE} \setkeys{Gin}{width=0.6\textwidth} <>= library(spatstat) spatstat.options(image.colfun=function(n) { grey(seq(0,1,length=n)) }) sdate <- read.dcf(file = system.file("DESCRIPTION", package = "spatstat"), fields = "Date") sversion <- read.dcf(file = system.file("DESCRIPTION", package = "spatstat"), fields = "Version") options(useFancyQuotes=FALSE) @ \title{Getting started with \texttt{spatstat}} \author{Adrian Baddeley, Rolf Turner and Ege Rubak} \date{For \spst\ version \texttt{\Sexpr{sversion}}} \maketitle Welcome to \spst, a package in the \R\ language for analysing spatial point patterns. This document will help you to get started with \spst. It gives you a quick overview of \spst, and some cookbook recipes for doing basic calculations. \section*{What kind of data does \spst\ handle?} \Spst\ is mainly designed for analysing \emph{spatial point patterns}. For example, suppose you are an ecologist studying plant seedlings. You have pegged out a $10 \times 10$ metre rectangle for your survey. Inside the rectangle you identify all the seedlings of the species you want, and record their $(x,y)$ locations. You can plot the $(x,y)$ locations: <>= data(redwood) plot(redwood, pch=16, main="") @ This is a \emph{spatial point pattern} dataset. Methods for analysing this kind of data are summarised in the highly recommended book by Diggle \cite{digg03} and other references in the bibliography. \nocite{handbook10,bivapebegome08} Alternatively the points could be locations in one dimension (such as road accidents recorded on a road network) or in three dimensions (such as cells observed in 3D microscopy). You might also have recorded additional information about each seedling, such as its height, or the number of fronds. Such information, attached to each point in the point pattern, is called a \emph{mark} variable. For example, here is a stand of pine trees, with each tree marked by its diameter at breast height (dbh). The circle radii represent the dbh values (not to scale). <>= data(longleaf) plot(longleaf, main="") @ You might also have recorded supplementary data, such as the terrain elevation, which might serve as explanatory variables. These data can be in any format. \Spst\ does not usually provide capabilities for analysing such data in their own right, but \spst\ does allow such explanatory data to be taken into account in the analysis of a spatial point pattern. \Spst\ is \underline{\bf not} designed to handle point data where the $(x,y)$ locations are fixed (e.g.\ temperature records from the state capital cities in Australia) or where the different $(x,y)$ points represent the same object at different times (e.g.\ hourly locations of a tiger shark with a GPS tag). These are different statistical problems, for which you need different methodology. \section*{What can \spst\ do?} \Spst\ supports a very wide range of popular techniques for statistical analysis for spatial point patterns, for example \begin{itemize} \item kernel estimation of density/intensity \item quadrat counting and clustering indices \item detection of clustering using Ripley's $K$-function \item spatial logistic regression \item model-fitting \item Monte Carlo tests \end{itemize} as well as some advanced statistical techniques. \Spst\ is one of the largest packages available for \R, containing over 1000 commands. It is the product of 15 years of software development by leading researchers in spatial statistics. \section*{How do I start using \spst?} \begin{enumerate} \item Install \R\ on your computer \begin{quote} Go to \texttt{r-project.org} and follow the installation instructions. \end{quote} \item Install the \spst\ package in your \R\ system \begin{quote} Start \R\ and type \verb!install.packages("spatstat")!. If that doesn't work, go to \texttt{r-project.org} to learn how to install Contributed Packages. \end{quote} \item Start \R\ \item Type \texttt{library(spatstat)} to load the package. \item Type \texttt{help(spatstat)} for information. \end{enumerate} \section*{How do I get my data into \spst?} <>= data(finpines) mypattern <- unmark(finpines) mydata <- round(as.data.frame(finpines), 2) @ Here is a cookbook example. Suppose you've recorded the $(x,y)$ locations of seedlings, in an Excel spreadsheet. You should also have recorded the dimensions of the survey area in which the seedlings were mapped. \begin{enumerate} \item In Excel, save the spreadsheet into a comma-separated values (CSV) file. \item Start \R\ \item Read your data into \R\ using \texttt{read.csv}. \begin{quote} If your CSV file is called \texttt{myfile.csv} then you could type something like <>= mydata <- read.csv("myfile.csv") @ to read the data from the file and save them in an object called \texttt{mydata} (or whatever you want to call it). You may need to set various options to get this to work for your file format: type \texttt{help(read.csv)} for information. \end{quote} \item Check that \texttt{mydata} contains the data you expect. \begin{quote} For example, to see the first few rows of data from the spreadsheet, type <<>>= head(mydata) @ To select a particular column of data, you can type \texttt{mydata[,3]} to extract the third column, or \verb!mydata$x! to extract the column labelled \texttt{x}. \end{quote} \item Type \texttt{library(spatstat)} to load the \spst\ package \item Now convert the data to a point pattern object using the \spst\ command \texttt{ppp}. \begin{quote} Suppose that the \texttt{x} and \texttt{y} coordinates were stored in columns 3 and 7 of the spreadsheet. Suppose that the sampling plot was a rectangle, with the $x$ coordinates ranging from 100 to 200, and the $y$ coordinates ranging from 10 to 90. Then you would type <>= mypattern <- ppp(mydata[,3], mydata[,7], c(100,200), c(10,90)) @ The general form is <>= ppp(x.coordinates, y.coordinates, x.range, y.range) @ Note that this only stores the seedling locations. If you have additional columns of data (such as seedling height, seedling sex, etc) these can be added as \emph{marks}, later. \end{quote} \item Check that the point pattern looks right by plotting it: <>= plot(mypattern) @ \item Now you are ready to do some statistical analysis. Try the following: \begin{itemize} \item Basic summary of data: type <>= summary(mypattern) @ \item Ripley's $K$-function: <>= options(SweaveHooks=list(fig=function() par(mar=rep(4,4)+0.1))) @ <>= plot(Kest(mypattern)) @ For more information, type \texttt{help(Kest)} \item Envelopes of $K$-function: <>= plot(envelope(mypattern,Kest)) @ <>= env <- envelope(mypattern,Kest, nsim=39) @ <>= plot(env, main="envelope(mypattern, Kest)") @ <>= options(SweaveHooks=list(fig=function() par(mar=c(1,1,1,1)))) @ For more information, type \texttt{help(envelope)} \item kernel smoother of point density: <>= plot(density(mypattern)) @ For more information, type \texttt{help(density.ppp)} \end{itemize} \item Next if you have additional columns of data recording (for example) the seedling height and seedling sex, you can add these data as \emph{marks}. Suppose that columns 5 and 9 of the spreadsheet contained such values. Then do something like <>= marks(mypattern) <- mydata[, c(5,9)] @ <>= mypattern <-finpines @ Now you can try things like the kernel smoother of mark values: <>= plot(Smooth(mypattern)) @ \setkeys{Gin}{width=0.8\textwidth} <>= plot(Smooth(mypattern, sigma=1.2), main="Smooth(mypattern)") @ \setkeys{Gin}{width=0.4\textwidth} \item You are airborne! Now look at the book \cite{baddrubaturn15} for more hints. \end{enumerate} \section*{How do I find out which command to use?} Information sources for \spst\ include: \begin{itemize} \item the Quick Reference guide: a list of the most useful commands. \begin{quote} To view the quick reference guide, start \R, then type \texttt{library(spatstat)} and then \texttt{help(spatstat)}. Alternatively you can download a pdf of the Quick Reference guide from the website \texttt{www.spatstat.org} \end{quote} \item online help: \begin{quote} The online help files are useful --- they give detailed information and advice about each command. They are available when you are running \spst. To get help about a particular command \texttt{blah}, type \texttt{help(blah)}. There is a graphical help interface, which you can start by typing \texttt{help.start()}. Alternatively you can download a pdf of the entire manual (1000 pages!) from the website \texttt{www.spatstat.org}. \end{quote} \item vignettes: \begin{quote} \Spst\ comes installed with several `vignettes' (introductory documents with examples) which can be accessed using the graphical help interface. They include a document about \texttt{Handling shapefiles}. \end{quote} \item workshop notes: \begin{quote} The notes from a two-day workshop on \spst\ are available online \cite{badd10wshop}. These are now rather out-of-date, but still useful. \end{quote} \item book: \begin{quote} The forthcoming book \cite{baddrubaturn15} contains a complete course on \texttt{spatstat}. \end{quote} \item website: \begin{quote} Visit the \spst\ package website \texttt{www.spatstat.org} \end{quote} \item forums: \begin{quote} Join the forum \texttt{R-sig-geo} by visiting \texttt{r-project.org}. Then email your questions to the forum. Alternatively you can ask the authors of the \spst\ package (their email addresses are given in the package documentation). \end{quote} \end{itemize} \begin{thebibliography}{10} \bibitem{badd10wshop} A. Baddeley. \newblock Analysing spatial point patterns in {{R}}. \newblock Technical report, CSIRO, 2010. \newblock Version 4. \newblock URL \texttt{https://research.csiro.au/software/r-workshop-notes/} \bibitem{baddrubaturn15} A. Baddeley, E. Rubak, and R. Turner. \newblock {\em Spatial Point Patterns: Methodology and Applications with {{R}}}. \newblock Chapman \& Hall/CRC Press, 2015. \bibitem{bivapebegome08} R. Bivand, E.J. Pebesma, and V. G{\'{o}}mez-Rubio. \newblock {\em Applied spatial data analysis with {R}}. \newblock Springer, 2008. \bibitem{cres93} N.A.C. Cressie. \newblock {\em Statistics for Spatial Data}. \newblock {John Wiley and Sons}, {New York}, second edition, 1993. \bibitem{digg03} P.J. Diggle. \newblock {\em Statistical Analysis of Spatial Point Patterns}. \newblock Hodder Arnold, London, second edition, 2003. \bibitem{fortdale05} M.J. Fortin and M.R.T. Dale. \newblock {\em Spatial analysis: a guide for ecologists}. \newblock Cambridge University Press, Cambridge, UK, 2005. \bibitem{fothroge09handbook} A.S. Fotheringham and P.A. Rogers, editors. \newblock {\em The {SAGE} {H}andbook on {S}patial {A}nalysis}. \newblock SAGE Publications, London, 2009. \bibitem{gaetguyo09} C. Gaetan and X. Guyon. \newblock {\em Spatial statistics and modeling}. \newblock Springer, 2009. \newblock Translated by Kevin Bleakley. \bibitem{handbook10} A.E. Gelfand, P.J. Diggle, M. Fuentes, and P. Guttorp, editors. \newblock {\em Handbook of Spatial Statistics}. \newblock CRC Press, 2010. \bibitem{illietal08} J. Illian, A. Penttinen, H. Stoyan, and D. Stoyan. \newblock {\em Statistical Analysis and Modelling of Spatial Point Patterns}. \newblock John Wiley and Sons, Chichester, 2008. \bibitem{mollwaag04} J. M{\o}ller and R.P. Waagepetersen. \newblock {\em Statistical Inference and Simulation for Spatial Point Processes}. \newblock Chapman and Hall/CRC, Boca Raton, 2004. \bibitem{pfeietal08} D.U. Pfeiffer, T. Robinson, M. Stevenson, K. Stevens, D. Rogers, and A. Clements. \newblock {\em Spatial analysis in epidemiology}. \newblock Oxford University Press, Oxford, UK, 2008. \bibitem{wallgotw04} L.A. Waller and C.A. Gotway. \newblock {\em Applied spatial statistics for public health data}. \newblock Wiley, 2004. \end{thebibliography} \end{document} spatstat/vignettes/hexagon.pdf0000644000176200001440000000473213115225157016261 0ustar liggesusers%PDF-1.4 %쏢 5 0 obj <> stream x-1As^1/@`X^` 2E0T YA=qNOc?4M 9,T;SvzI!ŕehV)OkbJ#S|8xh+] bendstream endobj 6 0 obj 118 endobj 4 0 obj <> /Contents 5 0 R >> endobj 3 0 obj << /Type /Pages /Kids [ 4 0 R ] /Count 1 >> endobj 1 0 obj <> endobj 7 0 obj <>endobj 8 0 obj <> endobj 9 0 obj <>stream 2013-12-23T19:49:36+08:00 2013-12-23T19:49:36+08:00 fig2dev Version 3.2 Patchlevel 5d hexagon.fig endstream endobj 2 0 obj <>endobj xref 0 10 0000000000 65535 f 0000000410 00000 n 0000001982 00000 n 0000000351 00000 n 0000000222 00000 n 0000000015 00000 n 0000000203 00000 n 0000000474 00000 n 0000000515 00000 n 0000000544 00000 n trailer << /Size 10 /Root 1 0 R /Info 2 0 R /ID [<7169BA68125AE1AEC0984268ECC4E10A><7169BA68125AE1AEC0984268ECC4E10A>] >> startxref 2169 %%EOF spatstat/MD50000644000176200001440000024013113166520620012427 0ustar liggesusersd33fbc2233f16d564499cc69844fb82b *DESCRIPTION 49a37f2f02625bf381aec0c2bede43e8 *NAMESPACE af935be9e20d655e9adab9cbb90cfb80 *NEWS 5e1711cba2ad40a53219b172b6fb99dc *R/FGmultiInhom.R 380a93554202560db6d72939b511ec99 *R/Fest.R 75ecfdd9626f487f191a991dded636d1 *R/First.R de486529eadf1b89d727c4865135efd7 *R/GJfox.R 227d455ca897f91d7dd879c3f8539a65 *R/Gcom.R 634f9eb8fec91a40ed0eb1389dbd7042 *R/Gest.R d3fa3abc1515437f625927587ac80278 *R/Gmulti.R 1101d753c8f35af184bfa8ff36a64486 *R/Gres.R f23e2cbb6ee07011049657331935581f *R/Hest.R 2a2cef04f4aa319405856086ffed48e2 *R/Iest.R d3e7548bb5eb3f305e9da1e5485ece35 *R/Jest.R d49147625234ff5703f9ff441ada2813 *R/Jinhom.R e840187383b058d56058fa83b82db02f *R/Jmulti.R 47d818e8f09b9eaae578d7a94a4f6e71 *R/Kcom.R b048e9405af29aa208aebf691ad05a68 *R/Kest.R 6b9860067099f470e17def25c9c9f246 *R/Kinhom.R fce7353c575a80a31b49a8c7d515aaab *R/Kmeasure.R a8742620874e75312563d5844b76ac3d *R/Kmodel.R 1d1f72a7c0fffefd2f96b93bec8ffafd *R/Kmulti.R 5e417749366d82e0a0a0bf33d08b55fb *R/Kmulti.inhom.R abbf6db440f53371a4f757cb17274de6 *R/Kres.R ea1f01b5689ec60ffa0488a28ffac454 *R/Kscaled.R 94033aebde371087c4c246cb9fc7ef16 *R/Ksector.R 4e2cdb96fc1f583ebab0b39195656906 *R/Math.im.R 6ea4db23fe667009de99cf5bacfc5511 *R/Math.imlist.R 3de7fd9c7b3b8ea381d7aeb667a5b6d7 *R/Math.linim.R cd21940efea7c50e968c4f6775c8e5cf *R/Tstat.R 69fd69139c158314a9cfc75cdf7c4762 *R/aaaa.R d528b57827c35f5df4ee4d01ed20a3ff *R/adaptive.density.R 286e37ecff8524697d89955543166505 *R/addvar.R d6804d59baa350c92b853c7d9e1ca7d2 *R/affine.R 948e229cfe013701e32cbf3690856c2d *R/allstats.R fc73a78175654eac4f754898bb0de421 *R/alltypes.R 11baa91da1e8a2e68529ccc3f21b3c09 *R/anova.mppm.R 24f6d3abeff639bc98c5226cb79e64c5 *R/anova.ppm.R 842580bf1e8ff51f0749f24582ffe646 *R/applynbd.R 2ffb106a46f58e99a86469d6e9c71174 *R/areadiff.R 6823bd6c644835b979f7407d60099de3 *R/areainter.R 6bbac6df44f40f42ffdae2d3fb7a5862 *R/as.im.R e933ffbde340ca92d90226034dc027eb *R/auc.R aed3d05661b614e76d2ca1181af71d5c *R/badgey.R 85b90545fe7081b0deef6d0b2ed2481a *R/bc.R 789f24a9b671b204cca2bea93c57a49e *R/beginner.R f1193955614b0b88873d0c653b99bdbd *R/bermantest.R 64f8eee699bbfcf3c0d90ad1814f963d *R/blur.R 7434cff6e94c6ac8eac21623c7532e8c *R/boundingbox.R cc3f8bffb96b961a96ba8ad83c30886c *R/boundingcircle.R 7ec0a56277da1b01a244c3749806934a *R/breakpts.R 5c98f522a635f8da3fdad5ccf60cb388 *R/bugtable.R 5ff3ffed95f449815b9432494eeae8b3 *R/bw.diggle.R 42f231888d5fdce7e53c41efb64f9fd7 *R/bw.optim.R 378dc97af1f488f3610f31c61a00d960 *R/bw.pcf.R a0e399a8f635ed69a7904e2f5164f2a6 *R/bw.ppl.R af810a4aec1706ad6f5b8ea1de775f0e *R/by.ppp.R f6c50293c09bd8b03c89f5f8a6051a6d *R/cdf.test.mppm.R cd63b2734588421c555f8fb9572575b8 *R/cdftest.R 5efd1803e37d7b6109ce6a9e60879748 *R/centroid.R ab416fd2e977a6192d5764c5dce2eda9 *R/circdensity.R c17c44e7db04f6205bbcc061b9bdf7fe *R/clarkevans.R 55b8ccb851f8a2cf879ff18a57963304 *R/classes.R e317941f2ea501d3f8f31733bd17173b *R/clickjoin.R 46f71fabebc3f2cd3060bbc35dcab8d4 *R/clicklpp.R 661b2e31b502ac5b57ddd74f8c5b89c6 *R/clickpoly.R 3c61b16ed3d95747d6e1eda91adb36bc *R/clickppp.R 75551661b0aed9ecd75bed68400e638c *R/clip.psp.R 77d64459b26599f5ec5590d8a798ad8b *R/close3Dpairs.R c4d4821b820bffba026deedc8277c62d *R/closepairs.R 665d650e33e2625b0f275b7ec449b48f *R/clusterfunctions.R 510010060f80bc874ded543566e267d8 *R/clusterinfo.R b2a666ee329acd511ee4f5f10dad5c79 *R/clusterset.R 04a6a7ee95ebfb72d926543a1f648253 *R/colourschemes.R af5b68ffeff7856ea2d22c9f8b9a918c *R/colourtables.R d04b501a2d7090f736d1ab04d4ffb713 *R/colourtools.R 8b56cc31365f5a6fa4fa148894eb0a67 *R/compareFit.R a4b6af14e0796fe45a4abd3f37f69f66 *R/compileK.R ee9efbe243dad84530fab391beea5b45 *R/concom.R 4db6c643d4110b3966eb17866823f230 *R/connected.R eecb29c4bf786f00b4eff413d5a04efd *R/convexify.R bab6320f88300558ee6bd2f361a22cb4 *R/copyExampleFiles.R 507c554ad23eb96d9f5e7dde076fb80e *R/covariates.R 34513811a31ead96ea98ba0fd695390b *R/covering.R da21dc26f7cdb0a08c645116a05481ce *R/crossdistlpp.R 96e52355fc9f3275a0d195245a047f77 *R/cut.ppp.R 4b5abf8151752cda014ad4404c4d3b3d *R/daogenton.R f955b7d8038fa1585e10430b6bf9fc01 *R/datasetup.R 18bc06f2ec35c5681ed5cffcb3c384eb *R/dclftest.R 53e0fb6d1af23e321722eea5e595653a *R/defaultwin.R 9c0bd6afbb507bedcf7f6283f47f7b7b *R/deldir.R 539a7b69048787ca2824e60941858704 *R/deltametric.R ee23eb1b6a982aa6b9ff074a13d4ae56 *R/density.lpp.R 3a6da5d450f5bda8ce033e5352de1547 *R/density.ppp.R 5aa565ac5016411dd2c9f00a0fcb0f46 *R/density.psp.R d158fb2a1a99c51f82b61aca2073d15d *R/derivfv.R c027a0425cb16790aa2f605b68e48a75 *R/detPPF-class.R 2926aabd8533958849d4b1d4858823d0 *R/detpointprocfamilyfun.R 3fe6c1fd4e144554204c5789944d6e83 *R/dg.R 528ad0fe5084e7c9772de1fd40cca1d5 *R/dgs.R b2d3b97a49bfce78011088375dfbc7a2 *R/diagnoseppm.R 0ee9ce9fa1700351bfc71402908a6b46 *R/diagram.R 4f08e0198ab68d963ac80012063fb6fb *R/digestCovariates.R 2382f86d9fe3dc1ad12175d6249706d0 *R/disc.R afc28b279d4ee328a6e2935dced9175e *R/discarea.R a8ec36b9d6c7dae5266f2c1961296c74 *R/dist2dpath.R eb8b9b627f5ffdb68e93a208ff165a77 *R/distan3D.R 9820e884830f7694ed97358c6bb04913 *R/distances.R a90f2d580f886a276c423ff8cdc0221a *R/distances.psp.R 7a9087886b66301769fcf94a6e20a8cf *R/distanxD.R 37abb1b9eb553fee07699fd9c78b5a9c *R/distbdry.R aa66fd2c2cb128704755391efffe402f *R/distcdf.R e077a6b98a5774abf85b0023d392ff7c *R/distfun.R fb30d8c97ce5a52baa12b6cd400ce371 *R/distfunlpp.R 3d5b8e18bb3fd5ff0176a9c0e008bf27 *R/distmap.R d0a40c2539530b80a2c56153fb9c5491 *R/dppm.R cf9f2dca552d561b0a9b1a31bbcaa0be *R/dppmclass.R ed7beb4930906f00a1d7b9f0fa47f18c *R/dummify.R 7815c8e89c75969fa1166564cff61227 *R/dummy.R aea30200bddaa63d824a1727405795fc *R/edgeRipley.R 11c2b02b8bd7ddf3c422a19b1cf9b451 *R/edgeTrans.R 5dedf671d0e785e963af44634c27c62e *R/edges2triangles.R 1539189a0ae07f00bc8488fb54b8f071 *R/edit.R f41cfef5a017692ee84c46e395aa4c36 *R/eem.R fa491205118f62630678f36924d233ca *R/effectfun.R 16ca4a0d6243bf8ac92e38d1be870f3c *R/envelope.R 137a3968b56d57a1316076471b8e028a *R/envelope3.R 70b7255db85f551617e5ffac3c610299 *R/envelopeArray.R a207b14a959b369c5d7d837a2f7b940f *R/envelopelpp.R e330bb6fa7a99e028497cf61aa37905c *R/eval.fasp.R ff3cd0276897dc1e4fb035339e201e63 *R/eval.fv.R acf0120150f94dc97578d37c7098613d *R/eval.im.R b20038e7fc2c56af1db1000f4331c69c *R/evalcovar.R ca0aae5d3534b1aa8f816aa1b8bf7c81 *R/ewcdf.R cf259b89afe368a0b167a6c58c615648 *R/exactMPLEstrauss.R 93f4d4218957ab82492423dfb49f2522 *R/exactPdt.R ed28a5a602cccb2462693d267888c468 *R/exactdt.R 55128c282f0fbecda3fbccec5c55be21 *R/factors.R 6c3913843ec541a115f8b3c8a2cb1fb3 *R/fardist.R dd968a2f9b87caf7bbd83ecb20ca7f40 *R/fasp.R a613494a2f7dfc0e060ad785ebbbdc78 *R/fgk3.R 8c54fb7878e2deddad520a63fe98b826 *R/fii.R 53803076aff675d0ac5fd7a4722e4f1c *R/fiksel.R d0c7203c1b8b5bd514913583fb2733c2 *R/fitted.mppm.R 6ae89590c88402caf9a722d2fbfa9fd8 *R/fitted.ppm.R 9b4accc71b99440893950bbb817ba851 *R/flipxy.R d9aa777d1b59addc43b9a2fcb6d8711b *R/fourierbasis.R 78fdc58026c4b2c6747b07289224f1b6 *R/fryplot.R 86264b95ef1b50e8fd33e9b8f14f243c *R/funxy.R 512956a4c8e8d65b6984e7c37eb7e8c7 *R/fv.R 99f1495bb864342fd9a86339e2fc9c49 *R/geyer.R eb1f3c876a7c6627ca82ec3d8c193260 *R/hackglmm.R 3d1c0a72df0c59e40b784cb04029694f *R/hardcore.R f1e16ee9c975eda27002f44980d1ea57 *R/harmonic.R 27d1b0c9a1d43eab62f466db5fa90b85 *R/hasclose.R 7576337da12b9579db9130c9e609822f *R/hasenvelope.R 44ad75b9d79a08a4689e2a101d8c6bd5 *R/headtail.R 123085aba3859361ca3f643e7c956c6f *R/hermite.R 1e6f05124674b83e0d62227fa4588b2c *R/hexagons.R 0fb7e0bb6cdf5a5a43a68bf3f63b04c4 *R/hierarchy.R e076a46be3a1610a4c6627bc52f50c71 *R/hierhard.R b54beaf8c8cd0e0a447bb11d1e7e4972 *R/hierpair.family.R 0bbb0d6e10a91c54f17142006cb5811a *R/hierstrauss.R 7d769e6877d34cf5b2c9ca9d4b5c78af *R/hierstrhard.R cb789e61235a77960c6b51d880da4c8d *R/ho.R e3447ff115c6062ebb3c5d3f30b68f1b *R/hopskel.R 8f27935d61bd20d7f3db6b1ee67ca06d *R/hybrid.R a941bb1777fdc6a6ba28776240487a33 *R/hybrid.family.R 3079dd3bd618f23c663071f5c7a7e755 *R/hyperframe.R 148c4d628f13507ae4e5cc5783d1f274 *R/hypersub.R 80057f9f11a0a26a9a3b75407a0c8f05 *R/idw.R c3df9c2ff02a1dfc0de1ae145004f6be *R/images.R d2a3e9a8e03f10a0978d3d8af8b32180 *R/indicator.R a87e74c429097d3740cb20466f0ecfc9 *R/indices.R 97169059771b69f8f89ea8dc15d2867f *R/infline.R b5d86a0ef4b0631df4e32747acd13171 *R/inforder.family.R e302e9fadd6204cbb46265a58e415c4a *R/intensity.R 870fa33ac789dc5dd7bb031d70cb01e6 *R/interact.R 475c2212ff5bb6318748b0c8243c4caf *R/interactions.R 39a5a07c0cb7f85bf84d6c9d8edde030 *R/interp.im.R db5574753bf54b88204aee53f13f86e3 *R/iplot.R e60ea20fa4389884ee22c550d12ef058 *R/iplotlayered.R d5ec9fb4f8c91e3183b71d7f4ebf6783 *R/ippm.R be78559ca577d66f04b72bbc5a2684cb *R/is.cadlag.R 3daa3f95a778a039d0aec8f57b6ebfe4 *R/is.subset.owin.R e4b9b1f05b000b5a69c2e992ca7aaa09 *R/istat.R 587fae1d11000f1650e45d955b2675ef *R/kernel2d.R 9a419fe9b5320d538bbd6c8b61faa8b5 *R/kernels.R 79abe3ad5c54621a3dc219c8051ad102 *R/kmrs.R 5b4ffe1f13fcc25da22bb23e97328d8e *R/kppm.R c6a963c659a5874585a22522cc07d285 *R/laslett.R a44ea103d52b7cfc4445578bf5636ac6 *R/layered.R bea5de9ece8720246fe8c7932645a492 *R/lennard.R 5affa028f6bbd36a38ebb1ecba7e7f5e *R/levelset.R 9ff04dcf17ac70c82db08e1147d1d656 *R/leverage.R 9df10e52c06ff2e69c7560f8c4379068 *R/linalg.R 9a1c867e7de72d005b245e777c105685 *R/lindirichlet.R fe2945637341ddce52b478f75d30d3b9 *R/linearK.R 429271155d7ad823164444bec3068e76 *R/linearKmulti.R 2af0a9aa60f0e5dc19b92fff09e576f5 *R/lineardisc.R f2f04bfaec86cf00552471bb235c43db *R/linearmrkcon.R fffeaff44ff9ac5712b6e309bfa6c051 *R/linearpcf.R ea3c990d1867f7d1361ceb2b3609b82c *R/linearpcfmulti.R d4afccb12a075e58904517028bdcc4e6 *R/linequad.R f656ced26a02f15f41a5c9fefe01d6f5 *R/linfun.R b92393a2a6ddde1922c9491c5af1edad *R/linim.R 36ebfeefaca5dc9ea58e6a1b04000f0d *R/linnet.R 69565ef1de12c467e47fa78b18391b20 *R/linnetsurgery.R c3466b8e9c2613362539deb2d204478e *R/lintess.R 52a89f9b4655094430fc919796ae31c7 *R/listof.R 1cf11ae8fa00ac9c5ce88f2596beb8c3 *R/lixellate.R d6c15515249187e299711d037a496eb0 *R/localK.R 046b3c934e596c11a3ef23c6d3050e8c *R/localpcf.R 227a83bb1bf3b2979f0e15517b238473 *R/logistic.R 267a45c8603e55403351f74582504e99 *R/lohboot.R a3248357bb457e9f26aebdb7ec7f07e2 *R/lpp.R 716eea8507eceb7c73f92383a966327f *R/lppm.R 638aaf3c095860e6cd3383c2b374d8a3 *R/lurking.R 37deccc3696c5a48c6acef19dba2c0cc *R/markcorr.R e03a3921cbb451137f495c068fcd33e6 *R/marks.R 6a3d9ee234a765226a04a6c3c23663eb *R/marktable.R ea3c639566286389d8c3aa4549f1cb6e *R/matrixpower.R 1f01f9f3cc3b0aa01e4b7b555dd18c5c *R/measures.R 11274026a6676f44edd79ba3ddc7a80b *R/mincontrast.R 6abbec9263d9ff7773029523af1c1aaa *R/minkowski.R 7948768c9430d5b3b2365726523e78dc *R/minnndist.R ba8782eb90ee38cc0a41750b16962bda *R/model.depends.R 5a89d6b1f9f3df7749d8953db2583f14 *R/morisita.R a69e2a67f2135cfb2e9414b0d9154bb2 *R/morphology.R 6dcf38af8f56cb3cc23fdb5406e1ea76 *R/mpl.R 6fa509593629ac83c8956f7bdaa462c4 *R/mppm.R dc1d4c0c4b31cd65c3ddbc6b86ba4ff4 *R/multihard.R 23ade743ff6f39f31ff1bf52ee73f088 *R/multipair.util.R 8f2545648901dd2988e32bd2cd61a99c *R/multistrauss.R fd6a391d09ffa9ea20e479950bf1d3c8 *R/multistrhard.R a2995358cffbe933b3a5c22933b03537 *R/nearestsegment.R d369ba3ff42b8dded86eee5209751352 *R/newformula.R 1cbd67649b5abf2fb99bd67b1f14b3a4 *R/news.R ab0739722eeb45406471a20c68fbc49a *R/nnclean.R 56e256896deacb82338889e6124f3644 *R/nncorr.R 26b334ec773508a099b853feca7feee2 *R/nncross.R 09b95d6428ea9d64f0ab8caed513f688 *R/nncross3D.R eba47f8399e92f6bdd92ad6504973280 *R/nndensity.R dd035ea9b9c38630ad82778b8bea2734 *R/nndist.R b17089e11b6fc28803715e0c2c4c80a5 *R/nndistlpp.R ee673cb9586c442138794718ee2bdb2d *R/nnfromvertex.R f68e4d831bd072237a5c8fb60ff294d2 *R/nnfun.R 074d8beee30549e746082d7146b82f11 *R/nnfunlpp.R 02c47fe1a3b2104bbf0935da6b8f25dc *R/nnmap.R b221ae2d690a07454c1faae331565807 *R/nnmark.R 9b9905828a135e8b36e2d031b06e3a56 *R/nnorient.R a2b94b1e048d56444616460d62da489d *R/objsurf.R b80aa0341a5e4d53548273662bcb7ff0 *R/options.R 39b46ec1a232b9a703ad0760cda45f5a *R/ord.R 351116d5be6f0c962874e927ccf6c416 *R/ord.family.R ef444b3f933f760902368994e197c59e *R/ordthresh.R 930fed507a70fa0c78d412c296228e1b *R/otherpackages.R 717ef43c7fa89d127ff9919f740f1b21 *R/pairdistlpp.R 04a1d68481b2f4f676610397cdaf0b21 *R/pairorient.R 19584df2d0ad0061648ee51f70d0bb2a *R/pairpiece.R 7a32a6552c99225a840ac73444a48ed4 *R/pairs.im.R 466b544caf507d4c55ab60882d0b7945 *R/pairsat.family.R 64f20b43bc0da9058da304d59354e44a *R/pairwise.R 88f67d802f23a0958625804f2865c606 *R/pairwise.family.R 7e219c0e44487ff76d3c06bb86a48984 *R/parameters.R 72113584f098ace9ff784e8f1bbae131 *R/parres.R afbbcf7829d779789195527e235ae8b9 *R/pcf.R b67128056bc43645c83e5386d5744045 *R/pcfinhom.R c76e896661080247e9b5e95e3d1cab0b *R/pcfmulti.R 8dfd5c29c944568943c7144165e91bab *R/pcfmulti.inhom.R dc3c2e276c6590be4760bb56d26b4bdf *R/penttinen.R dfb884749a05ca2792358f6d1ff35f0e *R/percy.R 4e2cfc0fbe156faabfc39466b943f522 *R/periodify.R 1d16cfd9960cff167575d94927c65244 *R/persp.im.R 8ef771abd0f6cd80c90354fc70e205f5 *R/pickoption.R 750978b020d2374379a5ceec9daf914b *R/pixellate.R daeae8671f459318105d860ec917a026 *R/plot.anylist.R 6cd130d91ac285c2220504f42fda6c46 *R/plot.fasp.R cfac4f99a5923164d43b9600d198faa1 *R/plot.fv.R 8756fd2ad17305eba64122f832828203 *R/plot.im.R 2773c384cfbd702d010b28ec7852c1d6 *R/plot.mppm.R e587b76b875421c8abbf6e2a123a52fe *R/plot.owin.R d5a505a3595402321313152da0cf42a2 *R/plot.plotppm.R 125840a6a0920802ff82d6989a807046 *R/plot.ppm.R c022ec3902fff1cfdd06eef5a651e3c0 *R/plot.ppp.R afe5f45c9640d77644e16d89f7ccf620 *R/plot3d.R 3700a0ace938e63861194b9aa6de5a83 *R/pointsonlines.R 1a0c9f29d3c585dd7bbb9c9872188e05 *R/poisson.R d219c34acd3456bdf4bebff5254f6ab8 *R/polygood.R eff91a3e3e11682e74644666801c4fc9 *R/polynom.R ced1f0fff95ddcc732a15c28d5374c5d *R/pool.R c90a0106194e05afb693abf65b6fbf98 *R/pp3.R 703b5127b412ddd7ac9ebdedd01f47e9 *R/ppm.R cc99af57b4ae25100643e24c974108f5 *R/ppmclass.R d7e7ab96efb78bd01ab9a37ad8361cb2 *R/ppp.R 711ec61ac0331e821a46dfd8fab43f27 *R/pppmatch.R d06c9bdab82a9d64ccabe3154e91bcb9 *R/ppqq.R c11360c41b18ce0b5b3d65e7987f214f *R/ppx.R 14f10941149da57fd2835cb84916fb4a *R/predict.ppm.R 67236f840124ed31fe5be47d6724e4a0 *R/predictmppm.R 46e63193b40fffba5a5b96509ef6212a *R/profilepl.R f1a20af4200e23511ecbc06c75be2e33 *R/progress.R b277464f26417697b752bb43c63c0f90 *R/psp.R 810df9bfc77d25ffae6eb75f376a2e9f *R/psp2pix.R 64a4d0dc641b780930791d125a5e8978 *R/pspcross.R 0f544843cf76c9e96904d9cb9b758c3d *R/psst.R b794c41496b10af9b552c6c57a2b81df *R/psstA.R de957e4255898c05cced10e7f08c065e *R/psstG.R 037230fd025f8fd9308b0b4b98760799 *R/qqplotppm.R b5d703a4b5c573d3f70a7bae6dc86620 *R/quadclass.R 427ceadf3465047c40765f37a1b8d0e6 *R/quadratcount.R 99aa240d18a1c29ee5156bc18017f63f *R/quadratmtest.R cc7fc0270683fcf28f0f85df7986c0cf *R/quadratresample.R 2951d9945ef610ae7a314070db1823c6 *R/quadrattest.R ba81abd6fe6794df409e9434dc1897f4 *R/quadscheme.R 98e0c4bd3399ee5af59ee8bff8311876 *R/quantess.R 24f39fe4ae6c36437f7fb5caa8cab62c *R/quantiledensity.R 115a41aaf2d3c55b68f56802fcd56020 *R/quasirandom.R c94cc3c652b342b1f61d7b1a4c2d3cf2 *R/rLGCP.R 826c16455c2ff593392615c64c4eada5 *R/rPerfect.R 6e0b2255bf8b42f5a236c391b10306de *R/rags.R c3e2ce9e0b3eb45a04f06ff0f9bbf0c8 *R/random.R 73b70afa74d324923fd971bc1a4f8bbc *R/randomImage.R 33cf2609e42688de9d12fb8511bc6871 *R/randomNS.R 8207a3585cc3256849c84509d3482718 *R/randomlpp.R 6a53fb3470cda89aaf6ecfb6467dc33c *R/randommk.R e202d8e0becb58b38447cea341089432 *R/randomonlines.R 2e3a3bb0b90807144b8d36e69d4df3cb *R/randomseg.R 24972883102c87da259be0da8e982db7 *R/randomtess.R 6483654e7478864c9833dbf2247f6881 *R/rat.R a8a6f895acc18aa94be66b546be6c17f *R/reach.R 511b88658d51796d9a0daf71b44b9cb4 *R/reduceformula.R 73737547daa1a7dff58c5f68c6beb350 *R/relrisk.R 10087f7be400e45e753c146c9c36a054 *R/relrisk.ppm.R 99f3f901362509f3494658b3b853981a *R/replace.ppp.R 75609e221d9edcba28f52c6554cc8639 *R/rescale.R 7a685c4e7cf92aa99ec1c6bebe0e7cd5 *R/rescue.rectangle.R fc977246af62b82b80bdf1ecec365664 *R/resid4plot.R 7dda79e949765275356a0552b47e2a2b *R/residppm.R 15f1ea6eff30e3b2b696ca6e9a3f5d4f *R/residuals.mppm.R 45e4675f61f4637fbfbb4f2c22b86ffd *R/rho2hat.R 5a8fe62283785c37f31f8c5856f6f020 *R/rhohat.R 9f131d0cb36ed648e36e623b90b674a9 *R/ripras.R f47c0eccaa94d6655967d988635109f5 *R/rknn.R 568ec3eb53def1d7b801f58b3dc58452 *R/rlabel.R 35f1e78c4ec615ebdf02bf522d15dbe7 *R/rmh.R 7a1597cd81e8bf23f5c1f1923a2485de *R/rmh.default.R 167c21797276863ddd5ce4086c4cdd13 *R/rmh.ppm.R 7968359dbc6c8929bbae8698ca339565 *R/rmhResolveTypes.R 6d56dc58f41590dcf1f7ae9187a609a4 *R/rmhcontrol.R 15998f271ee8c97b82a16a551a524cf4 *R/rmhexpand.R 1d184c09d72a11669946213002d04fda *R/rmhmodel.R e9b62883c43b618c2eed5ad3729c0a23 *R/rmhmodel.ppm.R cf21774f29b957fa630c08d4db3c9cb5 *R/rmhsnoop.R 112482932314aa9b1dba3ec93d6d0892 *R/rmhstart.R bf21ab55a2690786c4631d4efb808e9f *R/rmhtemper.R e9b546ea6437c8c859621b2d15e211ea *R/rose.R f55cbf2dddbd415b9d1be26b0f2e2af0 *R/rotate.R 3e578da354709729b7022d4764c6a1ec *R/rotmean.R 00c58075e8e3a791b26efd1f11b7f3e6 *R/round.R 4fcd2dee7f630b38180d057ea7802a20 *R/rppm.R cc177bd23c6c65d1dc16d42dbda7dc8f *R/rshift.R d232999f512896464f8a1cc92f6d1661 *R/rshift.psp.R e64dda6db52ba50d4e4049ded9c6fc8a *R/satpiece.R 39289933fcd94c7fefc4c303bf6887f5 *R/saturated.R 42850d1f9770d402d5c115f8437ace70 *R/scanstat.R 154960e7b9e6f83009976521bcae5734 *R/scriptUtils.R b78826700a4657b758d4f198b635f9d0 *R/sdr.R 6e9989251f16919b9802f21093e8ac69 *R/segtest.R 4e79b8bc03182442d9246b637a42c7cb *R/setcov.R 048ef0497b1b7b5b715a35c0d88bd4f9 *R/sharpen.R c0e7adf01137747788fad043b581c8e7 *R/sigtrace.R a6f1e5c108c1a2700af50c9fbe28d417 *R/simplepanel.R 726d8b7d4040f25222b228f1d99beea3 *R/simulate.detPPF.R 1770fb0dff8c1a7a9a7ad77431489b00 *R/simulatelppm.R 1ccfb5949adbf54095c4f314e7c7db12 *R/slrm.R bd8f2db009e68112b36e1b12e83a0fcf *R/smooth.ppp.R cbecefbec629cf2780114a39e1b29eff *R/smoothfun.R 385402b5f2a439e787efb62f28aa65e9 *R/smoothfv.R d013ffbb016df9e12452a90d803c9b18 *R/softcore.R 3f487a15f7f755b7e12b38ca0d2b0d07 *R/solist.R ecf9a1367b59dc3c38a434e1be27f6f8 *R/sparse3Darray.R 9e0d912170f34c8517000d089cb88f03 *R/sparsecommon.R 8f0ae7e41ae1795c6e0386b1bd8b5718 *R/sparselinalg.R e424c7d5c7eecff9656de3d1844d69ba *R/spatialcdf.R 7c54cdbe15c58ab578c6e6a56fae631d *R/split.ppp.R 5db355095825c7dd19a2b30df08f4652 *R/split.ppx.R 503eeb0499d4ebec56f62b4805416799 *R/ssf.R 3fa186fc8b0829bee18e98a5f76e1afb *R/stienen.R fd1e6bb816634ff979534dbd6f951c20 *R/strauss.R aa3c672e1030e60e1dbce383181f9f09 *R/strausshard.R 8a634ba847c42ffc724f3f888a864b69 *R/studpermutest.R 943cb60f12087230d18f53f21b4515e8 *R/subfits.R f4585ca133a2ae2c121018e12a765e76 *R/subset.R 5ce5c7351ab113de0450a01fa495759d *R/suffstat.R 0bcf7dd8d2ba1f985741420fc8c7c0f3 *R/summary.im.R e3e70ccc44580a8f7bbd86be9d3f47db *R/summary.kppm.R 398d2b3d789d26c1e7f285134c4f8fce *R/summary.mppm.R f770a5206c8b9e290f91bdf99fbfadad *R/summary.ppm.R f3c8a499c7512c3d0d7bea8db3564fe9 *R/summary.quad.R 17ac6b693751c046d1f9d2d8ecdff095 *R/superimpose.R 281e382142d37951b3726b7909f404cb *R/symbolmap.R 09369c4f72f175f60bdb3cc3f590a505 *R/sysdata.rda 52c7080a314ab087e20d9303f89d7d05 *R/terse.R 60680732fdb1509bd32f6275b6e74e35 *R/tess.R 911200b0d21f6772f56dce486f9d07d9 *R/texture.R c3032a7090caf3ec6606ec27f85a6772 *R/timed.R ac4c53fd6fb6d473d0477a842831fa4c *R/transect.R 7143eaf3c90beea6b15d60fc9fb65c77 *R/transmat.R 98ac37e50a983e82784c24f1b41bc91f *R/treebranches.R 746a816204b6077878f2bb7e3bdb4fdb *R/triangulate.R 6852ab624a8f0e62a8e6a362efb8e055 *R/triplet.family.R 101269992fdb2a14ef41176c3f61d697 *R/triplets.R 652c45292114c90424b3b7f657a7b096 *R/unique.ppp.R d9d1240c99738775d158dafac3dced4b *R/units.R 0b29ba3c73fde1d1d67bea586bcf29eb *R/unnormdensity.R 07416272f0608019708764e89ec51944 *R/unstack.R 600d317510863cfba04bd16968bdaabb *R/update.ppm.R 28c6bc1a35ca918f09fee0153493e030 *R/util.R 7ed4538851138ac6212059a289531087 *R/varblock.R 902d97c8541b066b0b2d8f5bf86be07e *R/varcount.R 02692ff1a7f933f21c52ddb780ec580e *R/vblogistic.R ef46faf16027dd9ccf4e6785680b2699 *R/vcov.kppm.R 1394c73ff04627b29af0e76e78027b6a *R/vcov.mppm.R c591ed93e6809f3e917c6a3f7bb79f25 *R/vcov.ppm.R 81e6b9cd4f4b32664161bea0fe03a84d *R/versions.R 466c043d66aebf5056e3a8ea2970d74d *R/weightedStats.R fc84b5f741b3893c2683b7d12250b053 *R/weights.R fc780d786f60a57e8f7bb9e66f957f88 *R/window.R c8cec96411760c4c3e6611c87b98ae07 *R/wingeom.R 32a5d788998a986177a7360f4434ec13 *R/zclustermodel.R 0246cd2389064d092b1edb4901c20991 *build/vignette.rds 864845ec756c9bdd720c4399e3aff644 *demo/00Index 68d9e760c09449956b5a218bf1c91fbd *demo/data.R 69fec98d1b46c8c3205c44b677e6da15 *demo/diagnose.R 7b2ce490538f0c5f81ec16714d8eac9c *demo/spatstat.R b88a8fdba50e164d51309cfb78c2028e *demo/sumfun.R 407a8cca8bfc8a3407cc02f319e92e98 *inst/CITATION 40b7c225a16a40129627f60b05dcbe32 *inst/doc/BEGINNER.txt 6ce6ecfbb918ce3120ca3b4f107f5324 *inst/doc/Nickname.txt a8aceee49515b91de5fe873de081bca6 *inst/doc/datasets.R f7d63ee0d4773b5af29d7264a5ebcfb2 *inst/doc/datasets.Rnw 6c13e9cc7a1ef712a3d00c7826d164b8 *inst/doc/datasets.pdf 85bf7ddebb18b1556010a9d2c6a22e61 *inst/doc/getstart.R 3cc6b729932901e25778c21cb679eab0 *inst/doc/getstart.Rnw 85375ef27c1f3edff47ec32bffeb8145 *inst/doc/getstart.pdf acf90340ff3db767dc5be73de4fe3dab *inst/doc/packagesizes.txt 04c10abc1b6237495542834c2e9c7868 *inst/doc/replicated.R f420eafa6b29f360b72269c91c485167 *inst/doc/replicated.Rnw 4280dcdddb6dc67741ae1c6d44463b60 *inst/doc/replicated.pdf 6ad41658f8ffaf90445efc191679fef8 *inst/doc/shapefiles.R 593a297bf4451016def2e174ea62b099 *inst/doc/shapefiles.Rnw e2ac1d05f75a1f5c95e913bf4a135d53 *inst/doc/shapefiles.pdf fbb9c768d0595adca9e5325b7fbd849b *inst/doc/spatstatlocalsize.txt 309f1d1f19c160d7776a2a8c55994f3a *inst/doc/updates.R b4605a8b9810b15e15d42cd0844a0fd2 *inst/doc/updates.Rnw 2dad96d54a128d73c8fa8376802cc28e *inst/doc/updates.pdf 12e68895fef0d3aa0bde45a0ddbadfa4 *inst/ratfor/Makefile 22e8a5189942ba190c13475b35459c7f *inst/ratfor/dppll.r 6d471ec061ea91398ba16323de56b9db *inst/ratfor/inxypOld.r 8fedcb4ea9b0d19e2da232b4b45c0626 *man/AreaInter.Rd f441ed342c42aab693974f9d6482997b *man/BadGey.Rd 1d8d9afeacbb8e67e56abf455ebfe6d6 *man/CDF.Rd fd2b04c59c0df2498ca684f01f3f1f86 *man/Concom.Rd 282a83310afb9506c9a035c96e55bedd *man/DiggleGatesStibbard.Rd c59392fc5fa60782bf91c8c0675de144 *man/DiggleGratton.Rd 7c842797134bed1851c806d99de4a0a0 *man/Emark.Rd 951ee2df828ad3de8c17340e0c5255b9 *man/Extract.anylist.Rd d6cce504ef640b6cdd4e88a012fd52de *man/Extract.fasp.Rd e50f40eb875c124095941fc99a486f36 *man/Extract.fv.Rd 6baaa916bde99c7c78b8ee52dd949a1f *man/Extract.hyperframe.Rd 7e4de82b92ba6d2135c405cc5ec7564b *man/Extract.im.Rd 62c8609253cd6cca6b78e76ead3864f0 *man/Extract.influence.ppm.Rd 9df24cebfa86a43b332426d55b0d65cf *man/Extract.layered.Rd c165f89bfa18ceb0aafb57ba7c3977f9 *man/Extract.leverage.ppm.Rd de0b5e2b05e4341b1f5de7261804a993 *man/Extract.linim.Rd dc6c0e3c01c25332b25485da48c1f918 *man/Extract.linnet.Rd bfa41eea5bb69facbbecd9de7dc33106 *man/Extract.listof.Rd 5538d78ad2f67efa3bbf9fea43483a5f *man/Extract.lpp.Rd 91e440f304e3e4a4e021236bcce45108 *man/Extract.msr.Rd 740e2f7917d0e8e8df64e3f223ea09d6 *man/Extract.owin.Rd 5c9311308cb414457f540e618d59078a *man/Extract.ppp.Rd 3dc6242c1c60d1c44b59899b58f07129 *man/Extract.ppx.Rd 8ff65133fc2b94fdba9cf10fed0a92b0 *man/Extract.psp.Rd 298518688a3bb5c06e7f4b332965f163 *man/Extract.quad.Rd 4555b288918db5e360c216aad5b314e9 *man/Extract.solist.Rd d19bef46a23ecb0ab93af0ed35850d76 *man/Extract.splitppp.Rd 279ecfbecb82ff618333037b97bb953b *man/Extract.ssf.Rd 7f25d30fc29a62aa0499b509dcc22d52 *man/Extract.tess.Rd 563f9349613f728ccdc5c7e2edd9db37 *man/F3est.Rd 2ebdff1ff498c5d21fe8920a6255a6a5 *man/Fest.Rd 21ddb0ef529a342599d30a4ed85da941 *man/Fiksel.Rd 403214182a5ae9ab45bd0ed4ed5b2b96 *man/Finhom.Rd 2cde77506f7625f97adcda2d2056a7d2 *man/FmultiInhom.Rd ba39391af699fcb8246c5b2af6a2ce06 *man/Frame.Rd 4d35bfd3f54d1f1733a73d4727ae6f94 *man/G3est.Rd ebf156c5c9a5852a3f6ece9a52ea3d0c *man/Gcom.Rd 5a51479a48d4247feef59af98acc62cc *man/Gcross.Rd 0f7fb9b0fb9c1a658f34bf706cb1cc73 *man/Gdot.Rd d876a87fd0e28f9a30d2deb945f9ac7c *man/Gest.Rd 7bc2fc829a69f32d91fc2f8bae9bbf57 *man/Geyer.Rd c22248f619df98176a5f15bd24c15602 *man/Gfox.Rd 1c0f40bfecfeb2b67f90fa3ea8c9b83e *man/Ginhom.Rd fc20837f195b91fff99f485ff9403fe2 *man/Gmulti.Rd 887f381bda44dabd416d26336263f902 *man/GmultiInhom.Rd 4020d4c91210a91557500533d771652c *man/Gres.Rd d3d2b569cf8a6c694141d49f8484a80c *man/Hardcore.Rd cddf79075d4c3849899b29d6a4552132 *man/Hest.Rd 77c49a32e912ecaced766cadad6476ee *man/HierHard.Rd ebcb391ba5dcf25006f76797e8140278 *man/HierStrauss.Rd 1234e600c429e1b4e513e6aafa007cec *man/HierStraussHard.Rd 4759ad6c9c0cdee6c1759f7d13a4fa0d *man/Hybrid.Rd 857f637abeb713f20381e6ad5277852c *man/Iest.Rd f349addeda8ccc8db7f7663a57cbc067 *man/Jcross.Rd 5cdbc2a706600d5a77de0c44976184ab *man/Jdot.Rd a4b9764604f960f6ba23305ba69332f5 *man/Jest.Rd 342fa010fd85fe9d2d58c313e40c390a *man/Jinhom.Rd c4c794997556f644475e44bfeaaab636 *man/Jmulti.Rd 1e284fb57855d43a5579dd9fe62e8f2d *man/K3est.Rd 102e8e2aa4917c2eb31986fa9749a45a *man/Kcom.Rd 7911449fe5a316294577d1badf980820 *man/Kcross.Rd 9a8ff697e04046d15b5d648c9185de4a *man/Kcross.inhom.Rd 0906bcda182df31e3b9690933b189a3e *man/Kdot.Rd 4f6a45d1aed62498f357ae00833e640a *man/Kdot.inhom.Rd 5591528fc6e40dd45438938a335e4985 *man/Kest.Rd e8604ed0e68a6c592107645cb6555b62 *man/Kest.fft.Rd ee1996918d537d56874e84afb1b5b2c9 *man/Kinhom.Rd 1a6b32af2b8f01a02c072267551d29be *man/Kmark.Rd eab68e49bbe614323b1a9a6efab6121b *man/Kmeasure.Rd c396e17ccc63be7995b7902317b7f3e6 *man/Kmodel.Rd 89f20169df3dfbd7ea1826d2e87003f4 *man/Kmodel.dppm.Rd 421fcb36cf31cd17e9514bea3346fed8 *man/Kmodel.kppm.Rd 9adad596d661756331cac79aa953ec94 *man/Kmodel.ppm.Rd 37b26fec6bb4442cb7f0cc82cd1bd64e *man/Kmulti.Rd 07a5f036384ed840eb006ac2d24e1f10 *man/Kmulti.inhom.Rd af6036e4b4a4c599c1b233bdf5d92d7e *man/Kres.Rd 2409f9d2191ef5ef6e05b5655c9d094e *man/Kscaled.Rd c5987d1db8f0582adf5d742e70cd7377 *man/Ksector.Rd 9115a22a373040ef2d7209718e4fbe29 *man/LambertW.Rd b443285d64e6b9f2755d92c3f06e6120 *man/Lcross.Rd 42f98a6d2a6dc5671ab0a5cbd4883d33 *man/Lcross.inhom.Rd 50e55c7b1f5ffd7badeac28cd2a80adf *man/Ldot.Rd ec909f6e04ba4dcd4f9d1579900be538 *man/Ldot.inhom.Rd 4794ecef3ec1412d2423325f35cfec42 *man/LennardJones.Rd a668e989958013ed86179a52f603952f *man/Lest.Rd cded9f0225c7ade832b012dff7941d39 *man/Linhom.Rd 019a96958fd216110d6c221a32cc5605 *man/Math.im.Rd 7fd06b632c6c5cd04786c925bd2c3999 *man/Math.imlist.Rd cd470834ba92eb4d985d63e956fab73d *man/Math.linim.Rd fcd0b10c4c71c6d01500b70c894e578d *man/MinkowskiSum.Rd 26a9db71cd8fa55fdc1eb42afaa2907f *man/MultiHard.Rd 62f6b6f26e3d078704b4742b9e43bb13 *man/MultiStrauss.Rd bf2dcf70457431c00a3049bb814dbb33 *man/MultiStraussHard.Rd 176bbee178c7111abc5d6a0fe97ba0fd *man/Ops.msr.Rd e61d4cfd0d9bacea2346f5c064f28fe6 *man/Ord.Rd 37b2dff8a8916eea7e7927961b3c86bc *man/OrdThresh.Rd 3856350ef8ce867f1b9fa855082b74f4 *man/PPversion.Rd e5df8b20b03a422103c24fa834a8f32c *man/PairPiece.Rd 404b13dc8185a43d0206f2e54e3878a0 *man/Pairwise.Rd 084575ea7ae835815f09f0f3db1824f4 *man/Penttinen.Rd 04bb79d763acc68544149340fc7b7dd9 *man/Poisson.Rd 5470d9f56e7d27ffd495d9dc345f0e75 *man/Replace.im.Rd b747613a11bc6f45761958859dec5b48 *man/Replace.linim.Rd d4fddff9acfab9982c86ae4f9b79343d *man/SatPiece.Rd 586b157510810340fd0b1f34adba6819 *man/Saturated.Rd 89216a124723c5fb1c2347c7446f8ce6 *man/Smooth.Rd 466dcdc6cc4b3995e072f9ff9c958ccf *man/Smooth.fv.Rd 2348f3e403116fe986e0f4b056cabbe3 *man/Smooth.msr.Rd 30109888bd3df0c3940bbdff3d1d05c0 *man/Smooth.ppp.Rd 8ad465a3b182d156f6e38d0c928542bc *man/Smooth.ssf.Rd 1c62395938f274307c386238cbf0ebe1 *man/Smoothfun.ppp.Rd 10e77a36abf54decc7cab4196a6989af *man/Softcore.Rd 8ba2cb10456824cd83c49a70fe1d41a8 *man/Strauss.Rd dfb8b417187d6dfae4408a4caa8fefa0 *man/StraussHard.Rd d5569b50779abf3f0033d9381c6fc13c *man/Triplets.Rd 207c090d4efc85174fc0d99a0464f89d *man/Tstat.Rd e900b1d972859fc84b3204e049bf9968 *man/Window.Rd f116c40218b20e1743e1febdb81a4287 *man/WindowOnly.Rd 2f49d9f7b5b69c352ab0c8b90ff470fb *man/adaptive.density.Rd da5e83b6e5a25aedb3393de676c087eb *man/add.texture.Rd 96d3635cd31ad5fa40a142d03ebf11a6 *man/addvar.Rd ad01dbc80f6193c73a59989b0e1d03c1 *man/affine.Rd f321206a180ad57fe5acecc685b7644d *man/affine.im.Rd ef8a00b237a0279520f8c333a533b44d *man/affine.linnet.Rd 86d9daaea62cea829ae99d97391db561 *man/affine.lpp.Rd 2bda2d4b230d431cf1158e897dac57f9 *man/affine.owin.Rd 21f132fd230e2ece3fdd1d9e9e372093 *man/affine.ppp.Rd 4ca5484af674c88bbcbfc0a54368835d *man/affine.psp.Rd b87eed274e5d9c4bea4722c59b418c16 *man/affine.tess.Rd f936f4c065ca59299257510de3a62fc9 *man/allstats.Rd 117241411b6dac48f191670497a39e80 *man/alltypes.Rd bcd959ac0b1e77e9bb3a19c00681bdf3 *man/angles.psp.Rd 5fcd23067d4ca2c676f57bf3dc7c71d5 *man/anova.lppm.Rd b843c4f3e8f1ff429a7972f995548da9 *man/anova.mppm.Rd d30797ca2a0b728a7ece2cb01daf07b1 *man/anova.ppm.Rd d8d90d340989a5c40405dc5a97f5487d *man/anova.slrm.Rd 63c074e020a138be1c4661864b75e937 *man/anyNA.im.Rd 3afe3645a550e7085f8ee4813516f8e8 *man/anylist.Rd aee56da55b0f47fb6cc49314ccafaf8a *man/append.psp.Rd ebafa74187c6ebc98eaec232d17e43af *man/applynbd.Rd 57adf249c91fde7afa6a3e4932f5ae54 *man/area.owin.Rd 4ac55fdf97cfc648eb839bf267e35255 *man/areaGain.Rd 34ea12a4ef430cccd02fb208a0980d11 *man/areaLoss.Rd ceacda934ca7dddd84f18eae8e28ae9c *man/as.box3.Rd 037c6fddb0fde0d07a30da270c7d804c *man/as.boxx.Rd 2dd8d36757c24a29aaed2c837229539f *man/as.data.frame.envelope.Rd 0ec47d1f67cdada328f5a9d4b9b71916 *man/as.data.frame.hyperframe.Rd 2b0d501dcd65f686140f155e2079cdab *man/as.data.frame.im.Rd 436c8c6f3941b1c98b4f310161ef71db *man/as.data.frame.owin.Rd 9b1b64af3fe5a91f74937c9622aee20b *man/as.data.frame.ppp.Rd 625df6c52e611eb00c67e1b12efd4efd *man/as.data.frame.psp.Rd d643ee803cbff8a55324e84208db6787 *man/as.data.frame.tess.Rd 028d508f316097c621dcbaef53a7d9b4 *man/as.function.fv.Rd 3097e1e816d23fd1e56fc1d63ebbad45 *man/as.function.im.Rd ac2273a069a3ce20d19cd8e5a1c4bcb6 *man/as.function.leverage.ppm.Rd 9d688576d849022fa3326ff0a5a251b8 *man/as.function.owin.Rd 216c2723695f08917d5bc6dccb80c483 *man/as.function.tess.Rd e7f77be0adf5a08ab2c965f624a83f80 *man/as.fv.Rd 14eb58bdc3b207105128c6dc51fb86e5 *man/as.hyperframe.Rd 53a356f14c2d00ff5328f09fe2b6d210 *man/as.hyperframe.ppx.Rd 7d359e7a3e6d41a9bae0d141c8f541ff *man/as.im.Rd 5e20b5864259039bd8ca272aee68027f *man/as.interact.Rd 1e916c8013dbf03efc8967072d5a147b *man/as.layered.Rd 949d97f0ac584fa259d33e0c49e6910f *man/as.linfun.Rd a485b734be9b2fc8236c648d0013aae2 *man/as.linim.Rd ffbac6474298e0323e33c45b918f4437 *man/as.linnet.linim.Rd d52772e67af0ba3019677890f016af27 *man/as.linnet.psp.Rd a87cd4c1ecdfa262347b3a0d97b41192 *man/as.lpp.Rd 8574dfba2a3941a4217c9cf2ffd9a2a0 *man/as.mask.Rd 4092204230e2003cb69e4659f0772972 *man/as.mask.psp.Rd 36c4175e14e918f18f124fb401e25943 *man/as.matrix.im.Rd 4f0323fe12b26266d955603f279fe9fe *man/as.matrix.owin.Rd 4e69bae442947b114c8e71434ce2f44c *man/as.owin.Rd 5c624477d81097c64ae4879ebf3f18e3 *man/as.polygonal.Rd dc165e3f06b729b80bd883fb226f700a *man/as.ppm.Rd 272b9e0258c8af6bcaea3f272a67cd84 *man/as.ppp.Rd 13cbca3dff795f0ce585afe910e25d10 *man/as.psp.Rd 43498a7fed17e562b24c5141290e177a *man/as.rectangle.Rd 6a714102b91252c9af411c4426119bc3 *man/as.solist.Rd df020957289f4dbcd83e1800b6efb124 *man/as.tess.Rd 1ffc758f7822de70bbbad12b6a2f7961 *man/auc.Rd 3df7907e462038f9d2f410983c4be948 *man/bc.ppm.Rd 5d2ca9b9cb773088b584b20013096828 *man/bdist.pixels.Rd 6f91e4b42fe96806b3677d7bec18d9cd *man/bdist.points.Rd 2001c44828382ca663e828c03a993233 *man/bdist.tiles.Rd 8fc8b29789a472e000c4c22734b12865 *man/beachcolours.Rd a843cd73ef1835fe4ea1a0ae21377f01 *man/beginner.Rd 34bfe7bb20f5983fe4266325b268d30b *man/begins.Rd ec3d1d0d1e0275d05f8c8620ae5ae3bc *man/berman.test.Rd 5da7cfc96a5c926f20691a304f45d901 *man/bind.fv.Rd 0e95688703c12c3882be360885d45f53 *man/bits.test.Rd 2bd2f8d1858fcf9dc15783bfae44b7e6 *man/blur.Rd e6ba5d3a73902ccab3c5756130de7e44 *man/border.Rd ffc9dcc131ee08a2de771dc48376ba9a *man/bounding.box.xy.Rd 0ce808e45591c98f62d77bbb46d4287d *man/boundingbox.Rd c3a04159e912cbcde9a325492fdde92c *man/boundingcircle.Rd 3d31bf6cfa4a213f6ef7524001440339 *man/box3.Rd 54834701b5ec9fb27880597f2e7593e3 *man/boxx.Rd eed7fe041f626623f67b718b48f0c387 *man/branchlabelfun.Rd 67668120b7f4ffdeaa3ea8f442d749e7 *man/bugfixes.Rd 2ddd26ffb3961c8d0dd7e222e611bd2a *man/bw.diggle.Rd 64d95c72ae1007af65d991f6e02dcf80 *man/bw.frac.Rd 7a26763b16c9994d15ca4e530f854e66 *man/bw.pcf.Rd 3c8bb59eae14904e02f3d38de3987176 *man/bw.ppl.Rd 02615cf3a642d7ce83c6a8c41878c74e *man/bw.relrisk.Rd 814c3f54ba9dad42e6eafb4531cb27d5 *man/bw.scott.Rd d98e541b77851d88f1b0d4cf31cad3b4 *man/bw.smoothppp.Rd 0640aa92927b2ff274cfdbbc620cbc7c *man/bw.stoyan.Rd 4b6e4a877960de8cb174776093ba332d *man/by.im.Rd c431579f078cbfa38cb40ff75266f494 *man/by.ppp.Rd 93cc9742468264a22db86420d04ac4bf *man/cauchy.estK.Rd 74388c55b993f154afaa7a6bd14fd048 *man/cauchy.estpcf.Rd 4e6af84798c586d0fb2639e82d615251 *man/cbind.hyperframe.Rd 85f17f8c9f597df93b4c60efa8038e76 *man/cdf.test.Rd 18015a55635c4993c9b9c5309f37fdaa *man/cdf.test.mppm.Rd 721dafe0173a124d815016e25927e960 *man/centroid.owin.Rd ac1f611664f0e91ed78191eabe1d6ecd *man/chop.tess.Rd 48ff174a1fddbefc4c47fbf7bb09f816 *man/circdensity.Rd 3b351b5f30e29753d670d961d8415c17 *man/clarkevans.Rd f8f1dce1636d1274a13844b7ba1ebf05 *man/clarkevans.test.Rd 7654a284e984253e10452f98b495237f *man/clickbox.Rd bc2149002879baf26a34499534af39e1 *man/clickdist.Rd ca00f4d880a5cd81ce7d9a4b125bf2e0 *man/clickjoin.Rd 6dd835e65a81b6072b8d24272c47b600 *man/clicklpp.Rd 3295d098a0ee9741a140b28cad7307c9 *man/clickpoly.Rd 0db2141942eebc8b8461975ca3ed3dc1 *man/clickppp.Rd bde6cf2f59210136c60d71b1b2923138 *man/clip.infline.Rd 0251bef6f229a72adbfa0b1e7209f32a *man/closepairs.Rd 17fa0c49ef74a43f240889d90665acea *man/closepairs.pp3.Rd 8e7548e11708ceb8cda1b98cca310fd3 *man/closetriples.Rd c655b9c9bcd0726251bb8239cff191ff *man/closing.Rd 20b584adbf9de04a16dd5d241887ea03 *man/clusterfield.Rd 3d329a051f092321d473c272564b3e6e *man/clusterfit.Rd d4547fd8acb0c39986339c0e6aadca9d *man/clusterkernel.Rd 06c717935a0c03f8b50b63ef8d7b35bb *man/clusterradius.Rd 9969967ef0c3cb82ce73a0be6f08fe39 *man/clusterset.Rd dd5b0370ff9308b4ff96985941b94cd7 *man/coef.mppm.Rd 0c3bbbf66c63e7ff5c00665c2d6692dc *man/coef.ppm.Rd 8e1270ae95df370c3c9ef1ec6ec8d3bd *man/coef.slrm.Rd cb6e2c28b4393eaae15198da6c0a6028 *man/collapse.fv.Rd 815b557eec83bb3e480b1f238446c245 *man/colourmap.Rd fccf4a85a80abff62c0f97657fd5021c *man/colourtools.Rd 7577667cef680abd3a2ec8d13fa413c0 *man/commonGrid.Rd 4fced4c12b414fa7eb95606a0aee8e59 *man/compareFit.Rd daa2f0ca4dbf0a35bf279c038507ec42 *man/compatible.Rd bbdd91aecc3f370e6d5349d7a72d56fa *man/compatible.fasp.Rd f38f5c88a38c76ceff89005c2cfcc8b7 *man/compatible.fv.Rd 0dbb6f2874f36a2409367b425a20970b *man/compatible.im.Rd 86f39d6bbc2448fa0a8ea7c8f5405c1b *man/compileK.Rd edc5b84dd7d0d6f60e5c27a2a7b8874f *man/complement.owin.Rd 9c37062f39c1f519e59ef50d6dabf3fe *man/concatxy.Rd 855d8ab6682b817023e28929db270f91 *man/connected.Rd 66c243bbd2743d13c1b5d5745b1f1d08 *man/connected.linnet.Rd 601e1ae3f06558038923e882c069b456 *man/connected.lpp.Rd 280b7a290611fa3a37ed7a049bd5640e *man/connected.ppp.Rd 7c22775202e81c10f23a210beba00e2c *man/contour.im.Rd 5fb384aadaccd28b925cc1ebc69f135a *man/contour.imlist.Rd 4cf1b288cffdead7f85cf0c0b86d42ea *man/convexhull.Rd 6ae9c7cf8d7a0140679b72f24916669f *man/convexhull.xy.Rd b323e7ff70db6054fe6b1412bd88e92f *man/convexify.Rd b79d752bb9228bc32fec25a2c488fb2f *man/convolve.im.Rd 94c8ba44e070e19b5aeb426294a865ae *man/coords.Rd 043d477a1eb8019a72195306231fa2be *man/corners.Rd 2c34a94c784871f85be912723b7bfb46 *man/covering.Rd 270668697df2da8368c716055fa16a39 *man/crossdist.Rd 0a3b28ff053854b6d9cb321304a3cfd0 *man/crossdist.default.Rd b0c7f58b1d9393deb439402c83ad0fbb *man/crossdist.lpp.Rd cbf5b84279b9901707b0e61ba9b80290 *man/crossdist.pp3.Rd 86bc3c6c78087ee16d8523de2ba09428 *man/crossdist.ppp.Rd b69412588854da90e9af9cc0c38a49c9 *man/crossdist.ppx.Rd ef11492e48f734cbdf46f6768ab05be5 *man/crossdist.psp.Rd 58cac3a3319f60ca98f5572ad296bd41 *man/crossing.linnet.Rd 661c50d342e9e32e7cc02d041c7ac0be *man/crossing.psp.Rd af5b5fcc49b56411f212cb487cb1b0ce *man/cut.im.Rd a6cd74dea247cd759420023c3a9fd0ea *man/cut.lpp.Rd b3403e532bd0990c22a6d08cb888a8d9 *man/cut.ppp.Rd 827aff2a554b82e1613672fd2baa0542 *man/data.ppm.Rd 5a937a4d7cd2da5b2babdd2066816ae6 *man/dclf.progress.Rd cfe58cc740d905ec11772f662a1115a2 *man/dclf.sigtrace.Rd 5a7f493aac4e5f2be9a88712eb73fe9c *man/dclf.test.Rd c53a24542be4a9eb16621ec99a4bb45e *man/default.dummy.Rd 0ec93174356b4c09b9e90c5886dd50b8 *man/default.expand.Rd abb5e748d59b40a306e411526a5b2b17 *man/default.rmhcontrol.Rd aa8044cc7f49b4534077806138c7bbd6 *man/delaunay.Rd 0c1110c95832a3655e0db53d8d809ea7 *man/delaunayDistance.Rd 652c6ff5511e6a5ad1fd5c338590fef8 *man/delaunayNetwork.Rd c9aabaae8e19078decca8cb19c6b7ab5 *man/deletebranch.Rd b374a9ff6e2ac8635022a2d778d3e8a5 *man/deltametric.Rd 062e5ffe5a4b34071de57ac2c4ea6486 *man/density.lpp.Rd 48e490ba4c9e04344f9be91aa6eb3704 *man/density.ppp.Rd 09926a4ef79118437810d30413f3acac *man/density.psp.Rd 2513790a42beb1924372b3d6a9bef779 *man/density.splitppp.Rd 50fca06c24aac752c750d95c8f56f7f6 *man/deriv.fv.Rd cbdbe94416949b5299d9ae68b0875705 *man/detpointprocfamilyfun.Rd 453e9e64e024c7d9b2bf3a16a265dd3d *man/dfbetas.ppm.Rd bf827df6f97572729e9e8c4268d94c77 *man/dg.envelope.Rd d81c8d8e2470d6081243c61dd0829a14 *man/dg.progress.Rd 869ceab579b8674d6f7a686c01d3197b *man/dg.sigtrace.Rd 5edbb7cfbde253a31a72d93c3659126b *man/dg.test.Rd 0cbe18d4651cf05e1fba2b83d7aab0ec *man/diagnose.ppm.Rd feca6dece00297bcde1938f768c71985 *man/diameter.Rd 464efabd5c13eb3ea9c32c6f63c786f5 *man/diameter.box3.Rd 4c28781cc6cdbb4a0122e422396920f3 *man/diameter.boxx.Rd 041bedc39fc1f905ec3c6964cbed8119 *man/diameter.linnet.Rd 03f9b542f39c6984b4c97467e4c8b482 *man/diameter.owin.Rd 40a782e809b0b3254e8ec2e8a0eea4a7 *man/dilated.areas.Rd 5c772598d07d22fd501e097fec2efa95 *man/dilation.Rd 382a56f92a804d52582cf716cdf79b09 *man/dim.detpointprocfamily.Rd b72e48220d7edbac9fc1686c28abd50f *man/dimhat.Rd 6a1b619a11efac3fd663196971650713 *man/dirichlet.Rd 1ad1403e16fd89dac665e9a8aa252076 *man/dirichletAreas.Rd b8cb279192ea77edccb3110fc3319209 *man/dirichletVertices.Rd dd23abfd3e95775f341cac33045517e1 *man/dirichletWeights.Rd d76568876272da7abd5e822e75d8097a *man/disc.Rd b8173608c10e7504c3a424e9a45388e9 *man/discpartarea.Rd 972e9a47b875c69b39489b73231909c1 *man/discretise.Rd 78a3f03297cf56c59e91165460890f93 *man/discs.Rd ea33970727fe02497a888d725d9f24ea *man/distcdf.Rd b40ca490fdec737f0b3a62fbdb0502b3 *man/distfun.Rd 645074b7fae18a7483a1a15b0ba8a371 *man/distfun.lpp.Rd faf4889f5aa8f5a78ec409ceadff1866 *man/distmap.Rd e3c58049d4baecc3370bf36eae896977 *man/distmap.owin.Rd 39d6077ce1b03560432870885cc17985 *man/distmap.ppp.Rd d342c07a66859851fde8c661f720b0d2 *man/distmap.psp.Rd 873905c02500608230d9ebd9d245a839 *man/divide.linnet.Rd 80cc6cd76ccc874db32f2df71e16435b *man/dkernel.Rd 8203fba31ada9ac91ebc681f14b3ab27 *man/dmixpois.Rd a6aa371b1538557b295c207c2541d1dc *man/domain.Rd e93b2a5629b7d0568ee47583179a89eb *man/dppBessel.Rd b86411019f323d1c651babd1b0b3d1af *man/dppCauchy.Rd 4c062a1337877fd761fbf42041a09de8 *man/dppGauss.Rd 6d17cd8399d74a1baeffe9689e09e00d *man/dppMatern.Rd f7b2bc9db23620c49b897a61935ccede *man/dppPowerExp.Rd 43fca14b1c64f1a91fbdd8ec2e56b40f *man/dppapproxkernel.Rd 71f1f80278289aab6dbf5823a5131626 *man/dppapproxpcf.Rd edb8c34118a8284a8946cddfda3392d6 *man/dppeigen.Rd 96abd9ffe6be8d538ddffd6bcab72001 *man/dppkernel.Rd 8a55e7897568dfd9f7991420051a449b *man/dppm.Rd 435c26403f233030ea066d2135f121c8 *man/dppparbounds.Rd 976360c41648d086a5d572c80803ee46 *man/dppspecden.Rd 1f7ad57545508cbaf3ebdf359509c96f *man/dppspecdenrange.Rd cfe1652069012d2187f56f99553015aa *man/dummify.Rd c24c8a8fa5eb8b63614db81c18feb432 *man/dummy.ppm.Rd f6b361ee19802275e239b31f06d60879 *man/duplicated.ppp.Rd c7dafd56c8c1989b45484ebf5d49371a *man/edge.Ripley.Rd 57d514e98cfdcf5106306495284b167f *man/edge.Trans.Rd 2c8a2832c36a28895f184e502e3b6624 *man/edges.Rd 34b1bf16cb0a8c40bffabcd2d64655ed *man/edges2triangles.Rd b1150d865b52154abe4431d659e9142f *man/edges2vees.Rd 1e473e02de85c9fcf7691e98ff40d1f1 *man/edit.hyperframe.Rd b7283957228c1bd0b45da50abde3bc0b *man/edit.ppp.Rd 87e2d038d7d67ecc47943d0ab8648c67 *man/eem.Rd 67ac031a69f72a6d41fe228e101637df *man/effectfun.Rd 73298504ab6eb8d5f0a67fd42576eb9d *man/ellipse.Rd 62f625d02e3671424d4280d6c7de0428 *man/emend.Rd 98047ebf1418a60d55f2609f089ef970 *man/emend.ppm.Rd c05922d67d600d554bd789b4811359c3 *man/endpoints.psp.Rd c4e6b043ea0551041d560626facc5849 *man/envelope.Rd 0cc8bc4984ea85d93d847009e5777e48 *man/envelope.envelope.Rd 5f5c7922a4e28946c70f6ff5c5b9de06 *man/envelope.lpp.Rd c74cb2b8a121a0866ca70d2e09d8e8c9 *man/envelope.pp3.Rd 417ab6fe29f16cd10ec714b550af8374 *man/envelopeArray.Rd 406ab1cde1b506f881777c6895445180 *man/eroded.areas.Rd 417361fe6d1aaa8a94539398ce43533e *man/erosion.Rd efb075b7df9320223a187d100cc85c27 *man/erosionAny.Rd e5b8141ede5431bec1877bd7cc706ae0 *man/eval.fasp.Rd fe65f0c5f1631b2f524b77123f982895 *man/eval.fv.Rd 87a4a3bb708ab0b130c2580149e5e674 *man/eval.im.Rd 285508b0731c9de89e05f8f99d73a982 *man/eval.linim.Rd 9d5bb092702c6a62cde37a5c4e330e10 *man/ewcdf.Rd 9376bcd787b75df61fbdea8934659ec4 *man/exactMPLEstrauss.Rd 6e1d4f4674976dcd96b752dcf9063a90 *man/expand.owin.Rd 0cf5188fd022cb4c001997fafddc5600 *man/fardist.Rd 60624a18119aca69226f751383d3d871 *man/fasp.object.Rd 0daf1da7e9646e66c05f1202aac28638 *man/fitin.Rd c0ea753a8c1dd4801157715779193173 *man/fitted.lppm.Rd 786b1dd29f8c2ef4ac4dcb377d273734 *man/fitted.mppm.Rd d5745de4c00845499688737c8f9c7ab4 *man/fitted.ppm.Rd 9f6a06b4a7c477ca26c6bcc62a345213 *man/fitted.slrm.Rd ced7b79616a49227632d3d793f3fbeb1 *man/fixef.mppm.Rd 753f4eafa062d230841d8009565c22a1 *man/flipxy.Rd 782d7ab24890616e64a5bacb50c9fbc3 *man/foo.Rd 590d81abc370d01da0718fe8c8ed0c77 *man/formula.fv.Rd da28ccd90714621c7ba25c37b8297431 *man/formula.ppm.Rd 87606fcae485faaaf60a53c3136bf71a *man/fourierbasis.Rd e411490a37b7ee3668c8ce893aa27a51 *man/fryplot.Rd 88544b48ae1f433d914a1bd882642a5e *man/funxy.Rd cd5826c148618e9ff20da78773361299 *man/fv.Rd 81a53ee2dd2c8afdda608a90d37c9f04 *man/fv.object.Rd b6564e6129a8973dfde9e469969cf283 *man/fvnames.Rd 8786b2679753de57cff66911fd4822a9 *man/gauss.hermite.Rd 4014660ad0eea9fd14e7848f88612216 *man/gridcentres.Rd 7c4ae72e764a190364f40fc736c9907f *man/gridweights.Rd 8682c3236a127ecaa7ae430c3f9e72e3 *man/grow.boxx.Rd 30375bff5e98a7539edce32aa22edb24 *man/grow.rectangle.Rd c341ff01dd0540df584ea23c2e055443 *man/harmonic.Rd e5f06d04dbaca03d1cbbf798e5a313ae *man/harmonise.Rd 15ceb7a4e4d0ff84b3769143d69f82da *man/harmonise.fv.Rd 7657b089c0d5ba1e359906786c7d80f8 *man/harmonise.im.Rd b2477cde55321142f298c95657f38e34 *man/harmonise.msr.Rd 05eae00175dbeeaf8860df3f4b2559eb *man/harmonise.owin.Rd ba4ff281b41ed359b2c19118c4266f13 *man/has.close.Rd aed9f3cceb2feee6b031b650b76de1e3 *man/headtail.Rd 316a20049d16deea0ef33177d5fde00d *man/hextess.Rd 059480c5911a91480fa9c38114732e51 *man/hierpair.family.Rd 724e1c23039b744db082d4f33aabb3e5 *man/hist.funxy.Rd b47d63a2c205b434fe8d474bf5743b7a *man/hist.im.Rd 823fe8a8414ef12e57e9da5e850bb61c *man/hopskel.Rd ca60f43560f10690985bc9e1144ff921 *man/hybrid.family.Rd adaeecad93471373f5014890c54b3084 *man/hyperframe.Rd 96f775face0dab2a7597a80a87dd5f99 *man/identify.ppp.Rd f9d14cd7306cd179193746eb85db3abe *man/identify.psp.Rd 5d8223387c54210727270d31ebc8dd14 *man/idw.Rd dcdf56d0f94e5f1848a701defb8615b6 *man/im.Rd fb116c8c2e11187dcd5792dd08ab18f0 *man/im.apply.Rd 569c8cf7b3cec628e96e74ff545dfa8b *man/im.object.Rd 5cf099bfda09c699261c0f8f04e2d2d0 *man/imcov.Rd 4247b7dde78daaaf7aed382865eb0302 *man/improve.kppm.Rd 36fb37cec394cd3a85fe7e53fa7f83e4 *man/incircle.Rd 15c74c0a3626c007f606b73c46c658a0 *man/increment.fv.Rd af42f01a5a5904b676358687d1615fee *man/infline.Rd ceb9faaca226d310b73fca29f226f14b *man/influence.ppm.Rd 75939f951b1f8f20b1a585050ee676c9 *man/inforder.family.Rd f7d7ae965369db9c0bb7fc97665a9008 *man/insertVertices.Rd a1ebb4c40a9876e357c402dd4952d415 *man/inside.boxx.Rd 5f2fb734bab533e325c12831ddf3295d *man/inside.owin.Rd b02df1b8b7baaa48fe54339c406c4ba7 *man/integral.im.Rd 91177756f188db2d9f75c3f6ba611350 *man/integral.linim.Rd fe606542bccb1f91c5de90f4d4e86cbd *man/integral.msr.Rd 60293f2d3c2c7afa1f197d3b5ce74afb *man/intensity.Rd 753620f42fe3e253ec926fc3a250add3 *man/intensity.dppm.Rd e696007219b960dc30c74bf2cfdcd222 *man/intensity.lpp.Rd 8255a135069e2b020f2bb2a536805556 *man/intensity.ppm.Rd bb796724312d4a8c5861ea710aaeefea *man/intensity.ppp.Rd 5460b9db190b08032b212593e6575759 *man/intensity.ppx.Rd 09c1f979ba1643a0765b78dea079aabe *man/intensity.quadratcount.Rd 9603be44e328138b5dc4e9d71f939ed2 *man/interp.colourmap.Rd 8e4a20bcbfe9daa593882d3800ba570e *man/interp.im.Rd f67a7a66cdbba06a5feae81cd6d648ac *man/intersect.owin.Rd 59de099fdf6f0a824ca78e977871859d *man/intersect.tess.Rd bc0d899193807240689833f26a3b01b6 *man/invoke.symbolmap.Rd 61c0965c0544368c8f81d8f57495a0d9 *man/iplot.Rd e3c53ab29f18a056079a7113e287c288 *man/ippm.Rd 8912e01a6e2ff5bddbe831d50d93ac6f *man/is.connected.Rd 4ca865e9e624a8130ee672fbd01dd78f *man/is.connected.ppp.Rd a3704f6c85f8c3d9fa4d111160467730 *man/is.convex.Rd 342ef62d8db2ccc867de36df82a4cec6 *man/is.dppm.Rd 034fc8f4553fa6fb630179b1a8f4af1a *man/is.empty.Rd 9760b1ba9982fd060eeb48c4fb3f5d4f *man/is.hybrid.Rd d6d24c19b232dd6551c046c8e5077e8b *man/is.im.Rd 35e01c299472deb5431bca514719f6df *man/is.lpp.Rd bd911908bcb00c0812bcf7e5b0a5d543 *man/is.marked.Rd 5ff0e261c80b99f8c541a0402eadcb08 *man/is.marked.ppm.Rd b2c5082d855b24db4d45b8baa3e0e27d *man/is.marked.ppp.Rd bc221a7db503a4d7017a66b3c83f27f4 *man/is.multitype.Rd 429f15ef85108a2834c9591eaeb3b8ac *man/is.multitype.ppm.Rd f425ee2ba8b468d239158951f776472a *man/is.multitype.ppp.Rd 0e40fc24543a9a2b342776b6ff5973ee *man/is.owin.Rd 68c1be4581db533f711fc7534b037741 *man/is.ppm.Rd 6b810b065b12c1b3272e6cfd52f317c2 *man/is.ppp.Rd 006f6e69e5c82636c59f1c8f31b42e99 *man/is.rectangle.Rd 4a19e24fdd91dcf51f330c5a0d7b452d *man/is.stationary.Rd 6402d47263125b9acf4330fa429db005 *man/is.subset.owin.Rd c3bb8b1ff39d47aaa9b7e7740758d2c5 *man/istat.Rd 55a3c5b55340b4242fe64a2e244235f9 *man/kaplan.meier.Rd 4baac51cd91d96502fc48a9953680066 *man/kernel.factor.Rd c3c9b102625d526bf520ddfcb16eb08b *man/kernel.moment.Rd 549a8a0da9ff9822fed5d908f2c602bd *man/kernel.squint.Rd 1efcd196df498043d360a07bacdda65e *man/km.rs.Rd 041f7d395a50d6041df8bb160e60a260 *man/kppm.Rd a93fc9982cfd2e1ef0f14f2d2413b871 *man/laslett.Rd 1887eaf6447de3ac419dac4de48c479f *man/latest.news.Rd e3ddd04a6557fd348a3345aef1f75d6b *man/layered.Rd 15b9725c1210edccb56275d9aa304aa4 *man/layerplotargs.Rd 7fc06f7ce92236daa4481801dfa2cf11 *man/layout.boxes.Rd bc40e1e1903d77258899e0a24554cbc1 *man/lengths.psp.Rd 210fc7aaf9caf507e3a1fe5364b1cab4 *man/levelset.Rd 57e2adfb239f48e0e96329b0d99df7d6 *man/leverage.ppm.Rd d83d1b8412b1fd69adb36643f3402aa0 *man/lgcp.estK.Rd 337ec904c2cd88c7a0954e011599684e *man/lgcp.estpcf.Rd 5670b9dced04d800e69b6fe9a7b0166a *man/linearK.Rd d9cd6ac53d4ee887cfadd5daa6804086 *man/linearKcross.Rd 1a3f04915ac19e6ffb73e14296ba6a0e *man/linearKcross.inhom.Rd 8870826dd05afbb3073eaddfdc5720fb *man/linearKdot.Rd 22b5b3e8b9f8a8d2aea8e655702f6646 *man/linearKdot.inhom.Rd ec0561370f5cd03b012d18df42178259 *man/linearKinhom.Rd 61a9c0ee9a32fd5cb5cbf1f3c7cbe7a4 *man/lineardirichlet.Rd 7e56d02c053058c6dc7131f5b8251dfd *man/lineardisc.Rd 371c104feea0d1006a03244b6a7da51d *man/linearmarkconnect.Rd 82a2fcf819afea83c7ec4dd19444b28d *man/linearmarkequal.Rd 68490464e097e0b5444c9d6661239168 *man/linearpcf.Rd 7ca5a2a446f5988f03a55ef23a41f008 *man/linearpcfcross.Rd 60dbcb91f212de8c899f15461b434785 *man/linearpcfcross.inhom.Rd 48a488b33f714b2f10d03b9e46d6c419 *man/linearpcfdot.Rd 7591a888809ac480e0e4b99a824d9e6b *man/linearpcfdot.inhom.Rd 00213d6b96fdc117fcd312c2e252d0b3 *man/linearpcfinhom.Rd 5d975d941b25a1a85efa787f5ce8445e *man/linequad.Rd d84c050f2b06d80230539d24eade3fb4 *man/linfun.Rd 06adc9370b13c10de5c898592d0b4b26 *man/linim.Rd 714b65bb20936037e3ee67e8a80573eb *man/linnet.Rd bc7c9a4a640118fd29bb9be9f2a8e183 *man/lintess.Rd cc314c90ddb3ebc15144776d5d532b6e *man/lixellate.Rd 35c1383dd89ecbec5d86cc0599ce2a3a *man/localK.Rd c04ad5c7ef4dd4df2e6a7d383f6afb2b *man/localKinhom.Rd 28c667be5ea403f73423832e10a02511 *man/localpcf.Rd 7caeac313765d2713f998e992babcd1b *man/logLik.dppm.Rd 7f0b8d2d587fb6a191ffccb73a3f40ec *man/logLik.kppm.Rd de9a9690e5ef55aaddd586b960f3a0a5 *man/logLik.mppm.Rd 92a5886b04d3ec37055d6911fd104738 *man/logLik.ppm.Rd ca7223a4495046290195eadc2f265c6f *man/logLik.slrm.Rd c88c32c12685c9e4f14df878651140ec *man/lohboot.Rd 5e041885fdbf52c04d05b05709f75699 *man/lpp.Rd bf49bae5aaa3f6fed410486f152c7317 *man/lppm.Rd 69bc04df7ec86067ffda73ac1531a574 *man/lurking.Rd 77bd8c2a20c818ce6deaf30547dfdd2c *man/lut.Rd 8ff61b15a83accee7cc5be6b9363b347 *man/macros/defns.Rd f4b50c29a62b24f51e0ab42ae004ed57 *man/markconnect.Rd 9a69d529c353003ad90ffbbbe239a6e0 *man/markcorr.Rd bb18727cba02e6e22e104c57e95382e3 *man/markcrosscorr.Rd d2a9ee8b6a6e538dbf76f281f811f7da *man/marks.Rd cbe8805c7a3c8b2b452921800ab86f4e *man/marks.psp.Rd 439fc0facc5077a83a691e35f1b1721c *man/marks.tess.Rd ed22c51331fd52293a4bb2be76a9b3d6 *man/markstat.Rd 6001c3ed60cf32729c2272a2411ee82a *man/marktable.Rd 9cff51b3f36ee7c8d536cc0cc6d9c1a5 *man/markvario.Rd c6b7406bc7d4c64c92ed0ff18466d388 *man/matchingdist.Rd f9904c9903eaa1d3acd8275691f3a8b4 *man/matclust.estK.Rd 09e2aba2c8a675f5959dc3ca460ea2c3 *man/matclust.estpcf.Rd 2bbafee72c33faca8f1edf1c0a747419 *man/matrixpower.Rd b3a9f6b4f7be5b70bde09bf76a15e81c *man/maxnndist.Rd 85c96d136f6c8e9dc5c9c8aa8870c98e *man/mean.im.Rd 29c7e9c341f6f886f03a1879daf361b7 *man/mean.linim.Rd 03ac1df8cc2261c02be9b71361c5f963 *man/measureVariation.Rd d7deffaef7f7438de2c1fb8b261838e7 *man/mergeLevels.Rd fa199b0a326a764f216644737864ad6e *man/methods.box3.Rd 7d4203dcec605900f9384915a75a1ab6 *man/methods.boxx.Rd a9506c6df413353de7decacd955bc0b7 *man/methods.dppm.Rd bc0d58ebfe4623fbd2189083d39694de *man/methods.fii.Rd 7df59a0670ba7bb652fbd49d23ff57eb *man/methods.funxy.Rd 8a584ba69dc28ef1c9d3b8f4e60896aa *man/methods.kppm.Rd f9e72503a0844daef9fae2ee9e9d6fd2 *man/methods.layered.Rd c7092d0e6e5ec9cb37a4570dec353e2a *man/methods.linfun.Rd 2a61d8e256f05f52d82a00a9a26e1cb1 *man/methods.linim.Rd 9d28b79bc945da4be167dddf404e24a1 *man/methods.linnet.Rd 246f2128f8257fe960c3ab49cdd2e434 *man/methods.lpp.Rd d4b958e06f1771e797d014f3eba2c6b5 *man/methods.lppm.Rd 7134071842e63af453a7f892b219c80d *man/methods.objsurf.Rd 1def2b75a472a68c70d30228b758972b *man/methods.pp3.Rd 97fbcb516f976097d46319b4e6e2ce3a *man/methods.ppx.Rd 4af538fd19d3df6cbf3e5594fef186c0 *man/methods.rho2hat.Rd 07479c50b6c25452ceed595a27321a33 *man/methods.rhohat.Rd 149524760f50d0c8a3464db5658e5d0b *man/methods.slrm.Rd 8b16e399ce9824952249f0f5c191ed42 *man/methods.ssf.Rd d1305f3f7d5688a0279c483f30c7177a *man/methods.units.Rd 1a20b184eb2ada390fc37b75dd319fbc *man/methods.zclustermodel.Rd 0e3707b8d67aa954eb254c5e8c23ee75 *man/midpoints.psp.Rd 49777e619db7fb9cd8c155956f34bf01 *man/mincontrast.Rd aa070c264b489fd2cf5873bc3cd8a7b4 *man/miplot.Rd bc47733e01454abe17ed482062b87989 *man/model.depends.Rd 88e8bf9a4c5321313dc8ee242b8befc8 *man/model.frame.ppm.Rd e1070be931a9d90999dc20c01222b778 *man/model.images.Rd f00ed50435704c29e0b0efb94c13eb0d *man/model.matrix.ppm.Rd 79e1e90120fcca70b67df62340a8fe59 *man/model.matrix.slrm.Rd 13f1e66542a80cef46b4ccca75fa286f *man/moribund.Rd ffc32bc802cc35ba416a1d0072288e1e *man/mppm.Rd 924a30e423945269945021ef8235ca78 *man/msr.Rd fd7c2d5c8c968ca3d3e04a464f725b30 *man/multiplicity.ppp.Rd e05c4f7abf7dc0c6542a15c9f492932f *man/nearest.raster.point.Rd dccd900d22e8fe855ae3c65eaf8b7dc1 *man/nearestsegment.Rd 238f51712a59ac7b198bd400cbcaf693 *man/nestsplit.Rd 0c3d278f757e4c193c54966f4146f6d6 *man/nnclean.Rd 3200cea0010f61f950984b088c1982a5 *man/nncorr.Rd 9e8545168f071d57745cf82c82373ed9 *man/nncross.Rd af1283be6eb9ac2adee7c074df7f2db8 *man/nncross.lpp.Rd 73f03fa0d438544d0d3e869fadbc8cb4 *man/nncross.pp3.Rd 50ece9f5db4cda508c95aa459fe1a38b *man/nndensity.Rd 30f7b4e6f339e889be7d466511d9f430 *man/nndist.Rd e451198f4e4ed0016adf019c46ed98d7 *man/nndist.lpp.Rd 1f5b99cb663fe52063e037e0858b7909 *man/nndist.pp3.Rd b35b16b4268865e5de862c1eb1fd961b *man/nndist.ppx.Rd 65f0c650eb79dfc75348587c3519cf79 *man/nndist.psp.Rd 6f96676af04f6ce9098bbbce11655196 *man/nnfromvertex.Rd 95f8e34ec2dc3f3e57b119b3520ed30a *man/nnfun.Rd b9e62915866e51f520d1e6ec63023195 *man/nnfun.lpp.Rd d0696d6e56df786abdc2c9b6949e12d5 *man/nnmap.Rd 481424540c1db4f01a71e201d1162412 *man/nnmark.Rd 418a896aa7d1c53313e7022184ea350a *man/nnorient.Rd 092cf57239d359afc339f54592310756 *man/nnwhich.Rd 85383deb36661f8e585646c68b914b59 *man/nnwhich.lpp.Rd 0c2cde6a1be83d4ca0fee3da38549f49 *man/nnwhich.pp3.Rd c42d68ad1588309b050beb4a53d5ec6b *man/nnwhich.ppx.Rd 5ca7e87fe4f00c669c03283a114f734c *man/nobjects.Rd f67feb8a8e18129e39306376861386f5 *man/npfun.Rd fbe2ea78291bdba8b54502e7dc2098f2 *man/npoints.Rd 20d138bd69544c6250d3dadb29305c6f *man/nsegments.Rd 4e831d956a71811de072131538ffa9f0 *man/nvertices.Rd 291f0f4c0c6d0f396f6c793b6b432f1b *man/objsurf.Rd 52f27b19004ee43069f7d9a187bf71c5 *man/opening.Rd fbb2162039c49aa32691d13932f84263 *man/ord.family.Rd 0b059320eb292ee2c82683b6630bac7e *man/overlap.owin.Rd ae656f0bd4d46dc1596e9ca11f74dbcb *man/owin.Rd 09a475e019a4a377357f721f11bb5ff9 *man/owin.object.Rd a334b67ef716e9124152624f15662c5f *man/padimage.Rd 8853d6a32603f3fa8a5d8314c23139d7 *man/pairdist.Rd 009548ceb3b1273a5d50f1fb404df79b *man/pairdist.default.Rd 4165070b062fb4d950205c19e2464b52 *man/pairdist.lpp.Rd 55dfd9519eb3f69a926a3ffdfcf345b0 *man/pairdist.pp3.Rd 6c69280a6330cdbf13fa31eb8c424641 *man/pairdist.ppp.Rd a9042dfb1c08d23195d8d3d85ff372e9 *man/pairdist.ppx.Rd 24967c12a5bfd7520004da9088fb1d55 *man/pairdist.psp.Rd 918a8ff0eeda4cbba23369b7b8ace2c0 *man/pairorient.Rd f818fa41fa27f203d98e291eb930fb83 *man/pairs.im.Rd 8b8744286691b257a63805405a626ed0 *man/pairs.linim.Rd e693f86f09458e493af2b7b6712fd770 *man/pairsat.family.Rd 175a81ea3c116a4f938a8cec99adb42c *man/pairwise.family.Rd 499e39168a72133ae33c5bf8f2461a94 *man/panel.contour.Rd 7c1d6a7436f6cbf698c044221d39c622 *man/parameters.Rd 59e860a968da911fbe03bef31987f44b *man/parres.Rd 9b06494a831f88b18f8196c687770fa4 *man/pcf.Rd dfadd4ff1016669603491af6a4362311 *man/pcf.fasp.Rd aeb5cdc38dbcd2acefb53b9b192eb5a5 *man/pcf.fv.Rd 2c231d6b423c8e9b48aba24a6b0fad3c *man/pcf.ppp.Rd 1cab396336b8eab6fca38a9d12e8ec79 *man/pcf3est.Rd 35e9c91309a30d2ed93ea6ceecf41442 *man/pcfcross.Rd f9f8dbf05c3a13aa2d765b14a84f6865 *man/pcfcross.inhom.Rd c745bbb2ee3919ce065ea50628f8e657 *man/pcfdot.Rd b84a4cb9f3aa18d23a8c8e34a452a240 *man/pcfdot.inhom.Rd 3a2b13ef8e8011a2c7a7289b8d099bde *man/pcfinhom.Rd 5bcf6eda621e887fdcb11d5b309a97ef *man/pcfmulti.Rd b55454aa2048e9b74f11307440aecfe1 *man/perimeter.Rd 330185579b97739a5fbbd58d4d11cf5c *man/periodify.Rd 7a9781d54573d010f6fe988f6f26ccdb *man/persp.im.Rd 699bce269544143e18548ec248b25212 *man/perspPoints.Rd 218a10f9f210cd6a8832231405a01fc5 *man/pixelcentres.Rd 7365e405b0ee8bff0d995d463d094ea4 *man/pixellate.Rd fe37268ae52121b4177a1774d1b2b952 *man/pixellate.owin.Rd 73171e37934907d02d3215c9dc100edc *man/pixellate.ppp.Rd 956fc61c9d1adbb69b41cf26b45520c8 *man/pixellate.psp.Rd a2aafee99f73fb9b36ce11734cf8fbd2 *man/pixelquad.Rd 7e1e9e78288eb74fde9af85eddcf00ce *man/plot.anylist.Rd 9ff682b1593457aa4003c2b041400b96 *man/plot.bermantest.Rd d308f7035d40be415c0a1d4959e2bd80 *man/plot.cdftest.Rd ff7e74301e126b393e855ebf323aace0 *man/plot.colourmap.Rd 40a182b39983f3f816409890bfffaf17 *man/plot.dppm.Rd d3467a14b7475b1bd3d345be2413305e *man/plot.envelope.Rd 00112835ac7e9ca52154b8d7e8b15bc4 *man/plot.fasp.Rd 2902f7b797e405cc56a82bfbde45558b *man/plot.fv.Rd bbceedd23382559bced05aeab5d33761 *man/plot.hyperframe.Rd a148ed8131518c36cd6d6f12ac7865a2 *man/plot.im.Rd 0115240b221ea63bb355f83505a4c08c *man/plot.imlist.Rd 7b833d3f4991ea3ac2b66dc6a2e2f105 *man/plot.influence.ppm.Rd 2b31e001e7b3cae5affd64006249ea77 *man/plot.kppm.Rd 3ef61cef6dcb869e4bdfa7de9b14ba78 *man/plot.laslett.Rd 9aa99e1d1b95df354762b42cdf4dc356 *man/plot.layered.Rd 8110086f49584196f17f13144d15e6d9 *man/plot.leverage.ppm.Rd 6c8b22eb2ba51163b502c904ae1992e9 *man/plot.linim.Rd c2a1e4dc6ad004174de60d028e7ee574 *man/plot.linnet.Rd a403097554aa2970b4d347d9c8ec9d8b *man/plot.lintess.Rd 0c939757473bce2b6586d6c6ff0f5b22 *man/plot.listof.Rd aa59caa5ecc3fc10efa0b2ec3a5cfae1 *man/plot.lpp.Rd 8af4ffb510808a99e8df8abed117eedf *man/plot.lppm.Rd 7480127473564ad5c3a57efdf68d9d36 *man/plot.mppm.Rd 3bcbfcd3b88b35277c8d1d8531cc5dfb *man/plot.msr.Rd cabf331ae711a9fa76a4499c17f699c9 *man/plot.onearrow.Rd 096cd0aa4d9e07f1d00b0e81df9fb8ba *man/plot.owin.Rd b3f8636aee9f1ddea0a9baabcf2d9e37 *man/plot.plotppm.Rd 94ab7f2bc0e1f75829355964133d1866 *man/plot.pp3.Rd 165d0b38adc114a488d82a308cc2c30c *man/plot.ppm.Rd babab0c051ae4b6a10fbfc7fc2505be3 *man/plot.ppp.Rd c85403475d9fb92f79f106ab8f415b42 *man/plot.psp.Rd f2a2afff874266688981a56ba0f0887d *man/plot.quad.Rd 5d1d72327dba7d662ec2ab7b8ea72a28 *man/plot.quadratcount.Rd 4be5e426df9bf6b93ab71ac270e35417 *man/plot.quadrattest.Rd 29a48bdc9d2be508ee8f66afaf0f475d *man/plot.rppm.Rd 623d09d5790ab06711fbdbc9e72b145c *man/plot.scan.test.Rd 8c87c3c115a50447499916049d547a01 *man/plot.slrm.Rd 5c5b810250001e46c3d09d5fa659a376 *man/plot.solist.Rd 4ef4ce06a8d1027d9988db609fbb92b8 *man/plot.splitppp.Rd 05201ed055b58b3a7219bad00c740d20 *man/plot.ssf.Rd 7a7dd6a11007c6401fbd10b20b02c663 *man/plot.symbolmap.Rd a74733004df1d1c7984a38b4ac7c3dc9 *man/plot.tess.Rd 33824b3b288724f2e4f23977f60d8564 *man/plot.textstring.Rd 00cd55cb42db85a314c8511ce34128cb *man/plot.texturemap.Rd 9eaa193b31b3538c61dfc1d41f0686d6 *man/plot.yardstick.Rd d8fc082a4e08900675049aa011262b07 *man/points.lpp.Rd b43cc7007474913e4029cebf81b0d9b8 *man/pointsOnLines.Rd daf959532330f2c700243ef0693ffa37 *man/polynom.Rd 1e4ffe51385b95fa44f17d5ebbc1e023 *man/pool.Rd 7b0c3d7a78fc8ff459956f54d439c066 *man/pool.anylist.Rd ab8ac452b1a923e9577b138e7d4be21b *man/pool.envelope.Rd 22838536f87dc87f6bb34bd5761f1966 *man/pool.fasp.Rd a0059ae9ec4770fc7f7760eb11e28344 *man/pool.fv.Rd 29a3c5eb8f035c5c972e7bc8f5b25ae4 *man/pool.quadrattest.Rd a0cf222f8b437868440d320d8febb4b4 *man/pool.rat.Rd 91d70b91a0ac2d74cda084c57f6bc76d *man/pp3.Rd 2528a47d883094f9f3ba758660efc7e9 *man/ppm.Rd e812bf103f917895aed07763d3edaf58 *man/ppm.object.Rd f872d02037ecfaa8afaf65dc88062807 *man/ppm.ppp.Rd 06ded16b9b26cd20a35a660fb3d0140a *man/ppmInfluence.Rd c691a22008d88d1874b5ff1ad005ea9d *man/ppp.Rd 485b77516b954a0461c22531e21952d2 *man/ppp.object.Rd 6f75e88f3528f9c911993cf41f935f04 *man/pppdist.Rd 98837b590eaee95efc01b043300ec182 *man/pppmatching.Rd 4bdb6dcbce07348faefcb5b76986e72b *man/pppmatching.object.Rd 7c4452d5ed6b0d2fb04c2d829d62ebec *man/ppx.Rd 55186ded7f38c9d5289aeb25034517aa *man/predict.dppm.Rd 3136a25f701450a7b2ed5c0caf25b3f6 *man/predict.kppm.Rd 0a4a7f74691f676391a543f30d8c4a20 *man/predict.lppm.Rd 60e0b9c0c4f793dc28883f44719b614e *man/predict.mppm.Rd 722265a38047d3a640598a42c9055117 *man/predict.ppm.Rd baf7a18910afda5c22e97a8b394f35ec *man/predict.rppm.Rd cfb7e1f07e6b80ba77bdaa92e2fcc486 *man/predict.slrm.Rd 600900c6d48dfab3d5aef02377c99e6a *man/print.im.Rd d245091b62b8fe10f8913b76dad465fe *man/print.owin.Rd 9963e52e777b7d2d562f0ced86f1148d *man/print.ppm.Rd 9efd9c2dc831f6343afb506be3497144 *man/print.ppp.Rd 6e0624fc0182d41c6b557eb57c682a31 *man/print.psp.Rd bdfd0ef9b27f33d246fb7a02b2008eae *man/print.quad.Rd f6d45d749d48c4a76d4c9e306c4c0b3f *man/profilepl.Rd 5cfbfd93e44996df886024331b390082 *man/progressreport.Rd a0abb0988a6e917ae0eba09cffb0a7b3 *man/project2segment.Rd 9df38351cc29ede4dd7ffae6706f5d68 *man/project2set.Rd a8d0bb1cb4832789478c2fd9fdff494c *man/prune.rppm.Rd e8261efc22ee48c1163505690ad6a058 *man/pseudoR2.Rd e42dda996499718a515fc0c75d079256 *man/psib.Rd 679efdc74051fcbf726f1a5ab2704715 *man/psp.Rd e5eeca299bd8b3b0e45c1ed7d5f5c0e3 *man/psp.object.Rd 65b965bae874bef9d0cd00f478b8defe *man/psst.Rd bd21593fe299ad1aa20407c1ab1148ee *man/psstA.Rd fd787ca540ddd5b4a08004d2a405fff1 *man/psstG.Rd 642843730b1200deb20fc22a6708dcfc *man/qqplot.ppm.Rd 64ff11570ca6ac940fddc6c7bdb26b0b *man/quad.object.Rd 72261006dfc38b28a929ebbf29310c7a *man/quad.ppm.Rd dc5d046a5b2e31d19a41bdafd60b3ebc *man/quadrat.test.Rd e5e8567142ba69f23a29231576d8a6c0 *man/quadrat.test.mppm.Rd ea895b1d7a9679c3c48123012a7e01e0 *man/quadrat.test.splitppp.Rd 58514018045e526b5126305571b87c9e *man/quadratcount.Rd 399b5a89c244df698c3c85b7c620d388 *man/quadratresample.Rd 80dae4e98404c3d5507e8245fc8b296b *man/quadrats.Rd 1753816156050fb9daf986acb7462896 *man/quadscheme.Rd 8837f075074d53be595ccd9f7184a746 *man/quadscheme.logi.Rd 70b14b52d6c94fdeceffceb6035f4c6d *man/quantess.Rd f74a00203731aed9b80c02c66ff765a1 *man/quantile.density.Rd 7ebe2c0d4c1bbd72eb5dc80ce6c7bba2 *man/quantile.ewcdf.Rd dd719c0c8110bc2772f8d7912ecebcda *man/quantile.im.Rd 68961b333902f94238d28d7dff64bfdf *man/quasirandom.Rd e10e5d6e7d8fbd709e9562445bd65293 *man/rCauchy.Rd c44b5314f861024182e01c7c4c3af77d *man/rDGS.Rd aa1f43d63a6a816cc1fc01c64bfb97ac *man/rDiggleGratton.Rd 08e89870e624574222db2a21bd3cb9b7 *man/rGaussPoisson.Rd c35b42f9c82b60228d8bc46b57603d48 *man/rHardcore.Rd 883b0d553c3fd612afb9ba11ff2479d0 *man/rLGCP.Rd e695a3ed67890793342883aededb7b66 *man/rMatClust.Rd 511430ade45d202f94a1f900ec32e11a *man/rMaternI.Rd e299efe394110eb20ec344b65b84798d *man/rMaternII.Rd 40d40454aa82ff81249c9d31e8b930a6 *man/rMosaicField.Rd 168e3c311208ef80aebb1b8fa91a1010 *man/rMosaicSet.Rd bbbe71903aabcf8ceecfef2706a0f8c2 *man/rNeymanScott.Rd 90ca689e7c131820d65877d1861a3558 *man/rPenttinen.Rd 958b981db668a82a6e9f119302584b10 *man/rPoissonCluster.Rd 7266a51131d3884bf96b03e561721671 *man/rQuasi.Rd 946044fbcef67d750f2a19149852d447 *man/rSSI.Rd 93d08f7522dd01a17f3ce0f98c25a7e1 *man/rStrauss.Rd decf0b6738f33dbe72f5d9fb4440e236 *man/rStraussHard.Rd 945e082e1dfc1677d07440e78737d41a *man/rThomas.Rd 1e3830535c87494f824cfc8afe448078 *man/rVarGamma.Rd 4aa8c9349b680a3eae25a4ef140a0338 *man/rags.Rd d799247ffbfec55868bfcb7ba832bcef *man/ragsAreaInter.Rd ba10e2653bf0888cecb2e0cc2d7005e1 *man/ragsMultiHard.Rd 94ccf22fc4d72433a17890fabb98cf4a *man/ranef.mppm.Rd f5859cdb173e22e9551ab30f0c78f1d0 *man/range.fv.Rd cb18fac0c337eab9dd887f2de53fdbe7 *man/raster.x.Rd 48db7d85e4d70016c314a208457d4c86 *man/rat.Rd b0e60f3b6aabef35da869dbae738e921 *man/rcell.Rd 55aeb0c742804dd2fd18971d10ebdce1 *man/rcellnumber.Rd 4f00b83f740761ad77278241656c18ee *man/rdpp.Rd 15e63641e557c242a4e892e75db64181 *man/reach.Rd dab1cca5d71faa0a2925104fcda1cffd *man/reach.dppm.Rd 396ba365547cdcad60faa9d6210ece8c *man/reduced.sample.Rd a0c68ea64422a6edba5f9231338f0807 *man/reflect.Rd df95624d2972e9f5eb296c1ee9856092 *man/regularpolygon.Rd 20be7aeda8e4da71d02f871e2202115b *man/relevel.im.Rd b141bc23f32e04b067b2c4c969071f29 *man/reload.or.compute.Rd 8738ccac6e37447e056972c18eb48254 *man/relrisk.Rd 86698c033a7ba6d0c3fe448bdf389e1d *man/relrisk.ppm.Rd 23ac60965284d4e0ba9ecbf98bba4ab4 *man/relrisk.ppp.Rd 6a34e089d79f2632c80c0772cea5a9c9 *man/requireversion.Rd 1fe871400bf9e2fe2c9b94a40153baa1 *man/rescale.Rd 20004859dc29aa4363ad80a948fe23db *man/rescale.im.Rd 058b59b5213d55db80da61e4a0de97fc *man/rescale.owin.Rd d0dded0a368a3eaefcd26b1e5636d998 *man/rescale.ppp.Rd 13920d3f7b1391f8bd02f8e2b325a40d *man/rescale.psp.Rd 12334801657f6ed3d3b0e6b3c80eee35 *man/rescue.rectangle.Rd bcd155a7da4cc55760a6bded7ddc8a8e *man/residuals.dppm.Rd 0418405470085449656f5fc8d4d87999 *man/residuals.kppm.Rd 97247522acadec62cece039cefef1232 *man/residuals.mppm.Rd 8a53451a5f8633fb83ad5a80ab337090 *man/residuals.ppm.Rd 627fcde54f29940a08a9c1def1673bfc *man/rex.Rd c324aefed511422dad008c9391a9ec75 *man/rgbim.Rd fa83ddd0842b49a342c54511d97b787c *man/rho2hat.Rd 419d53816d43851d1ec76cabe10e45c3 *man/rhohat.Rd ee9d83dbf3d66ff2f0ee41dd85c5d319 *man/ripras.Rd 3dd03a5b2c65b157024e2525b6502630 *man/rjitter.Rd 6dc4bbb5b1b2e45f381673a7488bbd44 *man/rknn.Rd b8233b7e743adb538fb6fe0945a22551 *man/rlabel.Rd 1303979d82a3cc654db7fbe57f3a8b90 *man/rlinegrid.Rd 4c0dc89855eeaef976d52181c2ec7184 *man/rlpp.Rd 3a88872dff11d1c5d3ce1e2414fce8ce *man/rmh.Rd 184c9687898d2c737cdb8c9de7cddb10 *man/rmh.default.Rd a45639e352f724bc9b50a25f26eb24b8 *man/rmh.ppm.Rd f521aabb67e4121f2ba35c162d1d5949 *man/rmhcontrol.Rd 7fb92fafe4152451c5f54116faae6d69 *man/rmhexpand.Rd e8f0e4807085e833452c79734ba703e9 *man/rmhmodel.Rd c2a413171b993aba80c492b6fa400f1a *man/rmhmodel.default.Rd 754d31bbe18493634e1fd5021d3bc163 *man/rmhmodel.list.Rd b74fce12c103d1c45c14c78ebf3e4495 *man/rmhmodel.ppm.Rd 0e15021edfdc11ed67d82edb9279d1d7 *man/rmhstart.Rd 6daa23722b901914bfec1925fe57ec22 *man/rmpoint.Rd c9efb98fb4600489851034e4914d0cbc *man/rmpoispp.Rd 00b9cb8b6413301c0182c77f3c7180d6 *man/rnoise.Rd b3f9f224e26c1d86df005584781d2bd9 *man/roc.Rd b062a825c0b76bc5292d540ff065b8bf *man/rose.Rd 46de1489970165e679298e0bfa806389 *man/rotate.Rd 4d6db4921d7dc47a815b93d02076a05c *man/rotate.im.Rd 1cca2bf91ce0897c70c83eebe2e0df46 *man/rotate.infline.Rd 420f42b78f4b75d037ce9e68ab287e90 *man/rotate.owin.Rd c8f5365f2f6e58785862f72a7d6e8244 *man/rotate.ppp.Rd 9f3fade667205c62415a1f97fd609bcb *man/rotate.psp.Rd 23e4e349594aaf9d57f6307a596e0feb *man/rotmean.Rd 51349aa10f2e3d2f2cae88a16c642a39 *man/round.ppp.Rd c9d186c7862c636325ad11cad7a62bfb *man/rounding.Rd e7439e3db078d957ad0bb78411706335 *man/rpoint.Rd b6a91ef76fbc45e3cb1bef941d8e4b83 *man/rpoisline.Rd c7a03bb1f0e2e57e0fe02e29d9e5c935 *man/rpoislinetess.Rd 431cc7fdc28659d5404cbacc19720b52 *man/rpoislpp.Rd 1267b0b52b75d4626575776dabc3e18c *man/rpoispp.Rd 5a98dd78a76b9d187fa5cc2fce68d8e5 *man/rpoispp3.Rd 94244193c38e8b8d8d051949eaca339b *man/rpoisppOnLines.Rd a6b80bce2cc88f746bf34ad4e7048d6f *man/rpoisppx.Rd 2071b7797faa3874b4cafa424d149b3c *man/rppm.Rd df2d3a4e251d836e48a93416afc150ce *man/rshift.Rd 48db298e9fc094f8d5f422336d44cdb7 *man/rshift.ppp.Rd 7025e64603cca3771c59a17930a9d413 *man/rshift.psp.Rd 7e169778102b366e7422e82c1f8b424f *man/rshift.splitppp.Rd 1638325f01a8308a7eba1f4d3efc5b5b *man/rstrat.Rd 034d6d14ca1d6cf729f94f7e17531169 *man/rsyst.Rd 907a1f4d777f2a09565480f0202197f1 *man/rtemper.Rd a0c8a5fd715c2806a4cfc73bf89da58b *man/rthin.Rd c5d3d8890255ea2ed99542aa58eb4e81 *man/run.simplepanel.Rd 0f58540ffbc0d6b01fc785934fde788c *man/runifdisc.Rd d5d02f9cd0793e69a1c46b8eadeca5a9 *man/runiflpp.Rd f00c10fda16472141dae745742629b39 *man/runifpoint.Rd 2de1693c1362e6e987c05312d0f8a150 *man/runifpoint3.Rd 08314d4010367c3ab6e780e4632b3226 *man/runifpointOnLines.Rd a9273f2fccb179783c06c7ff39ec6492 *man/runifpointx.Rd d7fd8b0fe60dd663e1559c6893526f5b *man/scalardilate.Rd a847cfd828fed5a9b2405240961865c5 *man/scaletointerval.Rd be7df2e3d96dd962d36880cb3c21d655 *man/scan.test.Rd b9fab8b1b77760071c342225e9d34130 *man/scanLRTS.Rd a8c5e46c67438271cec58b90060c1da4 *man/scanpp.Rd 645cc8ca418fd2d56b14df5a54f15420 *man/sdr.Rd 20d7ec0572c3f2faa5b0916ae5f5398b *man/sdrPredict.Rd f7720a4e8908af1298edd04ad488848b *man/segregation.test.Rd 844656835d998b29a13720cf3dc80200 *man/selfcrossing.psp.Rd 400bd1e88e42c09193aabe2ec5b719a3 *man/selfcut.psp.Rd ab09297e840f225a974b8cba729ad138 *man/sessionLibs.Rd 903812cd2ac3c69daed146cbac19ec4d *man/setcov.Rd 1072ec85cf03b2046765d9b449371fb9 *man/sharpen.Rd c9d619e191ae1c73f0df5fe95c1185ef *man/shift.Rd 874df23a3e9ff4ba46a58c6b5ef0c117 *man/shift.im.Rd 53a3724667d6deab8c4bd7349cd5f8b0 *man/shift.owin.Rd 1ea27441fa6ec6477756c169b4938c86 *man/shift.ppp.Rd bccef0619f0c7eac00444722e187366b *man/shift.psp.Rd ecbaeaebcafe20952c1a38fb8410e0ce *man/sidelengths.owin.Rd 881207cb3e615e02e85e14abf7bdf82c *man/simplepanel.Rd 99ebdd81548bc884bd7dc69feed637a2 *man/simplify.owin.Rd 2bf34305fa23d735a296918cfaa07ecc *man/simulate.dppm.Rd 9b3094be5f1b129a78ea3e4e0e709a4e *man/simulate.kppm.Rd a77f193e9fc39cc11d662323d2194f43 *man/simulate.lppm.Rd 33b7b6d9e3c230b14c9197929c78676d *man/simulate.mppm.Rd a327ab464756e2b6c6a447e6b99967a7 *man/simulate.ppm.Rd 4e92e07224d7111e24cadf4b88a12b6b *man/simulate.slrm.Rd 66e8d8b3849d7ccefb9704b881e445c8 *man/slrm.Rd efad4027d784fc5e84d50f618186763a *man/solapply.Rd 9e7d2dd1f496c85303346a3ae6bfff89 *man/solist.Rd 364be560f6b2de1dcffa0e5fd6d923eb *man/solutionset.Rd 6ed1fc01d32b892f519262a6bd79631a *man/spatdim.Rd 485b15a8d2d0e0a32aa9b0cca58db576 *man/spatialcdf.Rd 6b4d49938686bfe2e954db93576dfbcd *man/spatstat-deprecated.Rd 7f1c93424765c5c8683695f0bc9ae8b4 *man/spatstat-internal.Rd 070adaf27faddca39a7828f2dd501a06 *man/spatstat-package.Rd f53c1df3734c3d34c08885d33ee29aa1 *man/spatstat.options.Rd c542af7c96e45067fd97f43574d48da6 *man/split.hyperframe.Rd c46240165ce8970f5b8d1cf27ade18e1 *man/split.im.Rd a85f30741ee936b330a7aba10aa312d9 *man/split.msr.Rd bea1ff73e6daf2875a0220816856d99a *man/split.ppp.Rd d6988b958f39b67bdfd3579061ec50c5 *man/split.ppx.Rd f8ca3f4632db9ba53e39edb98c39e95c *man/spokes.Rd 4a8813dd800e5b74f847a19947e1d682 *man/square.Rd 079af91858f6ac8d4e2dde7034740579 *man/ssf.Rd 8d86f7821a1c4b1b4bbbb1b95279fa33 *man/stieltjes.Rd 4badd1f6e4c47d32dadaac43970b5422 *man/stienen.Rd a6106ff5f15272f6e0bcd6920caf56c5 *man/stratrand.Rd f7843d1eb6cd970539806af30d1a561b *man/studpermu.test.Rd b6eb34e0f266961c3a67430ba21f1e62 *man/subfits.Rd 623138d90e1dc24eba152d8c2b5928c2 *man/subset.hyperframe.Rd 0affd4c192dbe497ed6458020faff409 *man/subset.ppp.Rd f1c7accea61aea9da256092d74cce2aa *man/subspaceDistance.Rd 26cd85f0b3b557bd27cf21dc6f68a63a *man/suffstat.Rd 6f40b323e1ce8a8774f8a5368bed3167 *man/summary.anylist.Rd d0fe66866ca1d4901ad822a22de28094 *man/summary.im.Rd beed1c5e94dfbb7907d0803c88b879a0 *man/summary.kppm.Rd 48df7eebf9876aa61c2a0b5271fac5d9 *man/summary.listof.Rd 4892b549bbe36df282b1b29bb478858f *man/summary.owin.Rd db0406db36fe20690fbc5ac295ce28d1 *man/summary.ppm.Rd 3539853c347f338355fd6a84a8f493e2 *man/summary.ppp.Rd f3e0a6f7d1ecd0c771e02c3ecf8f2bf9 *man/summary.psp.Rd 7a1165051c8ab100aab5b7f05d4dd02e *man/summary.quad.Rd 935671509f14b24888c6baa8152b53b7 *man/summary.solist.Rd 38e24aa9efb53685da86c344a2159827 *man/summary.splitppp.Rd 0c537cfad89f4f0caa8739f96dc44977 *man/sumouter.Rd 4e61d58afb5646fa59a4f0daf6cfadec *man/superimpose.Rd 9555c94642212f7cfbb22fe921eab696 *man/superimpose.lpp.Rd 09a3c8bb94a975eb23a398d8123bc4f0 *man/symbolmap.Rd e10b29a48f2f3fad126dc0e911d6f997 *man/tess.Rd e72165080367c49df535892b9af379be *man/text.ppp.Rd 44e4516ec3b5e2d588d381b7ac48697e *man/texturemap.Rd 2c95ed0887fe249bd0c7f20649fc29d8 *man/textureplot.Rd 99e7580b6a1f46033c1e2d67611a671a *man/thinNetwork.Rd 85668ad3685e742f5f540a3269406d5d *man/thomas.estK.Rd 5083b5bec798fe2f17b0c8986d8ac24c *man/thomas.estpcf.Rd 2f381662f92656dc153405817e035cc8 *man/tile.areas.Rd b7e64c4e39d547e9bb2a2ad12ff3972a *man/tile.lengths.Rd 1859aa0f8ece255a91fcbb2a689a211f *man/tileindex.Rd f15cbd559cc22c0cbca8d85ce131c8c6 *man/tilenames.Rd 1e0468de33d16a5faf3848ec1f6a6693 *man/tiles.Rd fd49521e7c21490bf42ec11b02aca531 *man/tiles.empty.Rd ede1768dec67583e60706b8e226d5481 *man/timeTaken.Rd 992911899fba6ca5fc6439da3cf51805 *man/timed.Rd f05325c96c5a7f9e85e8e420af4d9726 *man/transect.im.Rd 33855ed0e811bb2984cdf15156ca0a21 *man/transmat.Rd ac1d70b547af8d8efc12a3d4c28ee0ed *man/treebranchlabels.Rd a76fcd8c05b143c22a30edb3248e45a9 *man/treeprune.Rd fc56759d36af58ff75ffddb35ed1fca5 *man/triangulate.owin.Rd df2c4468d4c21b91dca2b6b89cf20bd9 *man/trim.rectangle.Rd b64a871fdee9d5938466314f3b4e4a11 *man/triplet.family.Rd 487c7d402407cfc551069cd97000c0e9 *man/tweak.colourmap.Rd 9363f374d1d9638193af018b2b9b956b *man/union.quad.Rd 372e71d05d84621b1d50af8470af914f *man/unique.ppp.Rd 570b6a76b685cd738eadb3715c3251c5 *man/unitname.Rd 61992a11d7919419002356813917f96b *man/unmark.Rd 898e839b9ce21c5f019b69814bd91220 *man/unnormdensity.Rd d11a2ad5dd1042b6caff2a7aac7aa935 *man/unstack.msr.Rd ce187617e16d3d669f8f8462ffbc0aa2 *man/unstack.ppp.Rd d97b7f4286814cf89ce5f06a76409744 *man/update.detpointprocfamily.Rd 7e613050b5075767ca9d48d7070dd386 *man/update.interact.Rd 5f73a555da54aa666f7e9d8f39d3f412 *man/update.kppm.Rd 98ce0cb16c8a4fe6980b9265881b58ea *man/update.ppm.Rd 70f976c07e44c9fe6bf41b9d55d326cc *man/update.rmhcontrol.Rd 8ca5be2ace0c48113afbaf2822674a55 *man/update.symbolmap.Rd 47bd28833a40a74899d25734078640d6 *man/valid.Rd 9449cb5c1fec24621c998a506be0eac2 *man/valid.detpointprocfamily.Rd 1ed9f6e59dad62161fc6867d14156a24 *man/valid.ppm.Rd 9b9f643ceb5ba73627013d63dd7515d6 *man/varblock.Rd 75ce8887852822a4d08a9e44076c5976 *man/varcount.Rd dfa61aa27f9908e772b4dbfc8db2d708 *man/vargamma.estK.Rd 20bdec51627e17637f8e487de060250e *man/vargamma.estpcf.Rd 60e741ac8e8fafd23cba745a70bc5ec0 *man/vcov.kppm.Rd f85824c3c9ec3a4c31f04be59552caf7 *man/vcov.mppm.Rd b3a372e669c2c9bb08716b065974bf18 *man/vcov.ppm.Rd eb7578d51b7ad9f21067d0bbba362167 *man/vcov.slrm.Rd bc6d0d00510542ec7b835bf5fc94fbd1 *man/vertices.Rd 1d850409bd4f915052fa4be2b1e62ae1 *man/volume.Rd b3fe75f22e0494e4484532f8b079a50d *man/weighted.median.Rd dd00a09e89c41f24564ceab5dba86724 *man/where.max.Rd 03f030dc0305af42897046f755430da8 *man/whichhalfplane.Rd cd1d44aff4d46566233ded55e833a25e *man/whist.Rd 513778fbca80df00f2ea2b710263fe3c *man/will.expand.Rd f42290be6d3a75590b290967ad76c483 *man/with.fv.Rd b135bf634a66784497a9cb068415a686 *man/with.hyperframe.Rd bcdfdae48e2a3d9d36fc53fa185801c4 *man/with.msr.Rd d02099194c00a636349b66309fed886e *man/with.ssf.Rd be681008100cebc209f8f1b6e89c65e5 *man/yardstick.Rd 33376b40fd1dfd0f3fad8c6ec146fcd4 *man/zapsmall.im.Rd 46bdd584bf584fb298bfe431934e36cd *man/zclustermodel.Rd d61686fa57dc89a776b964008f2ab788 *src/Ediggatsti.c 30c67666b16108cd865c83c4a9136d1f *src/Ediggra.c 4afd831630f91c00e28b7d0ac10f0318 *src/Efiksel.c 2eb027adf82b6e2fbee693030507ea46 *src/Egeyer.c e7911c3e7d963bdd0ecc494457ccaa43 *src/Estrauss.c 183f2fb6304e391e54ab0ed6c6928237 *src/Kborder.c f5ddb0253e3280cc428e534f1c3b76a6 *src/Kborder.h 83589fde9faccd8b888bce02b02fddfc *src/Knone.c c394c76e3bf450475cc8cc5789b8ebf5 *src/Knone.h 5d190c3c4bf0a137fd9ef0d9918b5c5d *src/Krect.c 707949facbb1443a42c08d275604ce41 *src/KrectBody.h 30d4b06e1c13087513d74d41528b75e4 *src/KrectFunDec.h f7ad99f0f44158cc2c9100e38cc1b185 *src/KrectIncrem.h 08a4f417aed6959d94da3abc8bc55d0f *src/KrectV1.h 92268e0af4222764daf4b106a12bbadc *src/KrectV2.h 263f2296e12aee28f73dff92cef5dd61 *src/KrectV3.h 4ab4852a66f7a56b52e999f326f179c2 *src/KrectV4.h 34e856d0118899ad881a1b784cbbf13e *src/Perfect.cc c0ac68682e8c7e26ce52b8c76fd3d4ab *src/PerfectDGS.h 3a040c136a2b157e7854524d2c5c9c11 *src/PerfectDiggleGratton.h 5bc6a1478093ba8401ef8ff47d939298 *src/PerfectHardcore.h af1babec03bedabd8cdd2ccb694352f4 *src/PerfectPenttinen.h 786067ae57897a4392667fa2acab7595 *src/PerfectStrauss.h ec004bfa0111b9b3f3c55e7ede7fb4a4 *src/PerfectStraussHard.h 051ca2afe6d6e5904ebba115bdcbf8e4 *src/areadiff.c 6ba6b3e1b37ebe8ac0680c5e071188f6 *src/areaint.c 9255d4a6902de2807401c997cd223174 *src/areapair.c 15c96da5753675c30125760d4cd874a7 *src/auctionbf.c 89cad006e13a81a4b793d89b2b3bb7cf *src/badgey.c af433219da832d098301a5bb0321c3f9 *src/bdrymask.c 27c73a0901b9d346ba84974f7481ff3c *src/call3d.c 3c6ee73e77c45154372fe8dbb893902f *src/chunkloop.h 55666e0e32f21396ec026be9f10432f4 *src/close3pair.c 3026aae4bccf4129d9571ece001f2d47 *src/closefuns.h 347f9c6df447899f91d2ac76f0388a5d *src/closepair.c 72e219e7bf86c2a0eba609edb6be5aeb *src/connect.c e41c08db1ce0cf5deab867112738c6ac *src/constants.h 7436c08a79ce611f48ebac47564a24b1 *src/corrections.c 635ab7b038a7cd6f11e4410584f3af0d *src/crossloop.h d758abce63375967e345d4da2fe6511d *src/denspt.c 787ec21b1dbf8298ca482a2751cfc2cd *src/densptcross.c fd5c0ecd545b4d9e50d0d4e1e202beb3 *src/dgs.c d7d12f4878e93a3932efc744f93aaa35 *src/digber.c 57fcffca69c7d7deb5937acbb6955dad *src/diggra.c 9d438b3137f879ab21f85378bba410a6 *src/dinfty.c 7e1e7445f9d0faeac100e98764c73391 *src/discarea.c d045f6d3df16aef7f02d55321072f53a *src/discs.c 69884b5b42f142e6d0e863b1eafc3ea8 *src/dist2.c e36dd81e7d3589ea68b5d51e232d2139 *src/dist2.h 30e6782eea1d200aeb5af29554e86e99 *src/dist2dpath.c a1809fc2566fabacd87f85780fed4431 *src/dist2dpath.h 3bbbb08f1bd01cd2632e7f68329e34a8 *src/distan3.c fef8cbc507da19739ace9ba4b4530f7d *src/distances.c 7114501c777e2bf2f5cf36bcfb13494b *src/distmapbin.c 7028f09de82307e1e46de69c104b7423 *src/dwpure.c e73182d2331b6f41c46a793bf8606e88 *src/exactPdist.c 60642750dc3fb722bc05b35a2237f5ca *src/exactdist.c 79bdd83f960746df0bef5cea8bbaaead *src/f3.c 1df01cb8fdb284bf568e84b097777f9e *src/fardist.c 392c9edaa96b756912bf59450dd72ebd *src/fardist.h ab7588df53688ba2bb383afaaa58d0d7 *src/fexitc.c 9ad3159a4625df6b45245dedba8b816d *src/fiksel.c 0ba81075007b6ab741f3eda829da8e99 *src/functable.h f2bb0d35bc86962384f01c6959409981 *src/g3.c 3280a084e3cdcb931801c42fcd111d2e *src/geom3.h 5e13151e750d3fedb93005afc8c67954 *src/getcif.c c4d587523e2d2e58615eb0d2084a2167 *src/geyer.c 3228576b7ca41179fe5a99fd0a4d4001 *src/hardcore.c f64c32ad80b0c295009994ffb7299670 *src/hasclose.c 4b4909519c2235d56140ed94a66374b8 *src/hasclose.h 50f80d5b4ec82eb0b7e86bbf5a98c1d0 *src/idw.c 09de7f4f3337a1957d9880fa46a703c4 *src/init.c 9c79e8972c24446466e9dcb30ad82217 *src/k3.c 04fa575485349dece8695de21e011188 *src/knn3Ddist.h 493c3e5d2f83c09bf713b173a9e6658a *src/knn3DdistX.h f129ad504bf8946cf6a755c7f04fe253 *src/knnXdist.h 07039b406a87967282c97eb2bfd60707 *src/knndist.h 7ecc9842efabd93bc452bbcf485dbbb8 *src/knndistance.c 55295547fd74e0cdd004ba3547e181e2 *src/knngrid.c d2225a652d72aa868364c0cbaedc4a68 *src/knngrid.h 5ca88ac5e99e094f0b91183500a4f433 *src/lennard.c a81dc0ace22bc97932e31ddbb1e7b95f *src/linSnncross.c 4dc17cc3b593bbc0c362ce52ec762afb *src/linSnncross.h 82ee6cc3ed8a206c2a61fa929efc078e *src/linalg.c 0a65286b5bc50c1afd304fbc390db156 *src/lincrossdist.c 899d81e77cf5c2150a8bbe1e735bca12 *src/lineardisc.c a1abe981338133807bc3c97f98f06bb1 *src/linearradius.c e816be5e76902429899caba859ad35a9 *src/linequad.c b6334a6d4f756bef8eabaf5188201630 *src/linequad.h 8633ccb9f797ad3d142543da4ac94e94 *src/linknnd.c cc1a021dae521e4b69f5572a72d494b5 *src/linknnd.h bb011958a4675e5acc2fcd32718c3dde *src/linnncross.c 76444dbcb11ebd259c5b9a0cd96448a3 *src/linnncross.h 82b4b8dad34974380b777eb5c38f4c07 *src/linnndist.c 79080eb00c6ea7482a81cf056d83c0a5 *src/linpairdist.c 4e28ba1603b36e8900ed9ae2a5468f32 *src/linvdist.c 7b18944df45bde5b578c54c204005349 *src/linvdist.h 33d43b2b976a913012876a9268fbe00b *src/linvknndist.c e717a4283c2f753738a147d6f5d27b92 *src/lixel.c ea49927ad721530df8bc059694b46df9 *src/localpcf.c cc9a75e32ca0e80ff4e45b9721888faa *src/localpcf.h 65afd0160cdcbe16b1905b34878c685e *src/loccum.c af8d07773e8fff1a7b1eee6cbe26d45d *src/loccums.h 576eb8024f1b70c876b3f81359ec7f97 *src/loccumx.h ec8a6f16dafb28572ff44a370cb1ab02 *src/lookup.c d4f690790bb0e2585fd2a2645e0556d2 *src/looptest.h 458aaf8343056834e27096e20cfb4a98 *src/massdisthack.c a9c90573cb2c76ea5a936e8c6d8f53d5 *src/maxnnd.h 9e0b28ecd67dd085ab596f3ae9fafa50 *src/methas.c 69d57274cda1c955d631a7c241cb9a00 *src/methas.h 20c0be6b768369562ef2ce594041f394 *src/mhloop.h 4bfdc5f406a612060458999e9319bbbc *src/mhsnoop.c 3aec5d482a96acefc8bcf9ccef064f57 *src/mhsnoop.h dbcb22b795dda5203ac34fc61e930365 *src/mhsnoopdef.h 19ca30742351e422fac32fe27613a504 *src/mhv1.h 9729a578722aa471c9330a01af5a5c09 *src/mhv2.h 3d9d655e94d771cbf55ffdfbb1124492 *src/mhv3.h 4bca34bd89e9bcb606838b28f3ea8eaf *src/mhv4.h 3c46198668482eb6d0fa28ab76a0abdf *src/mhv5.h 302994d092504354cf1ffb8ccdbcf892 *src/minnnd.c 2b6fcc4df2171c2f5924bc9ea120e076 *src/minnnd.h dc4453e1a8317eab70e9710371c384d2 *src/multihard.c bbf9e1d275d180289b9155f04db9de6b *src/nn3Ddist.c ee5ed316bb3f302e25ab36eab25078fe *src/nn3Ddist.h 609029dcaa0bbcf85efbe6f330d1ddce *src/nn3DdistX.h e56ce2952ae715addc650117b828caa3 *src/nnMDdist.c 77417067aa7539794fef337365495dff *src/nndist.h af1ef3af29ac5dc30a190234e9b28e0b *src/nndistX.h 5deb863d2ce3f7706c0c6c35244017ff *src/nndistance.c 93dff60f269800a42b3dc330294e8c97 *src/nngrid.c 74149ebdd825d1d392ce4636d9e5fc7e *src/nngrid.h 4e3eeae474afde5606e63d6e9e49a508 *src/pairloop.h 9d1981b78382e7e368e2cf9cee262342 *src/pcf3.c 887daec80901782cc831ba2dbcd5b3be *src/penttinen.c 0f0d9de8e74630e340e60769b8dce84f *src/poly2im.c e0091d7c5731f6158eaf2299b8996d8a *src/proto.h dc7d8f0ee5ffe7f9397c25b648d93c1e *src/quasirandom.c a387ad5b47dd254334ec4bdf510e7b35 *src/raster.h 7de5b856327c5b46ba0b02afdfc7a179 *src/rthin.c cdda9b160cf3edae4c6cadbce7bad53f *src/scan.c abe76267dd3491976da91e9c6c4c9d6f *src/seg2pix.c 5fdaae31c5336c9f4f4ca799c7faf722 *src/seg2pix.h c85b38af977450a3adb73c3e9d168574 *src/segdens.c 3a5e04ac4ad9fc0efd10ef39dc55f041 *src/sftcr.c d6299bd9553d4ecbc7b84482f4c0dcd3 *src/sparselinalg.c 1f5554a9241e29019ef254f6781aff22 *src/spasumsymout.h 616cfb8ef04f625dd3395fb5e6a38f14 *src/sphefrac.c 7877dac5d832e95257442e8b7fa8f02b *src/sphevol.c 18b99b034669b76b4b9ccaef945200f0 *src/straush.c e072e3a74914a74af746481c3a3b8b3b *src/straushm.c 28d7ac41aaef4367e9d57b020ed5fb3c *src/strauss.c 0cf60fa5405e4b7f31cde35a0d390351 *src/straussm.c 2143b5d2f472c4190dea1114a8fef54a *src/sumsymouter.h 7e5839c8b2ed13edc4206ffb94d3862a *src/trigraf.c 03e65a27588194512db2649bec6e5277 *src/triplets.c 94d4b6605e4a2c5271dd567ea7648fd0 *src/veegraf.c 04de34ab52bb76737583dd68367bb10a *src/whist.c 41552329d886ee7870caddcf0f580243 *src/xyseg.c 5c127a9d5ddeaee8cc8f34b32218a3a5 *src/yesno.h 5662cc3feeb8b25a9e413547d82f4074 *tests/badwindow.txt c1624129ac630ef4653e3119d70ffa5b *tests/selfcross.txt 9aed2f34d444c81eca7c865bf3bc6661 *tests/testsAtoF.R c118a4f58a3607a2865b92e07342715d *tests/testsGtoK.R 079b444d735bec083c8513311c5d561b *tests/testsLtoM.R 5cc2ab3c075b448f3c2f7758013eefa6 *tests/testsNtoP.R de6343ad4989615bf62cc92fb3807039 *tests/testsQtoR.R ebebd7e7aee6f456169e9f416accbca7 *tests/testsStoZ.R f7d63ee0d4773b5af29d7264a5ebcfb2 *vignettes/datasets.Rnw 3cc6b729932901e25778c21cb679eab0 *vignettes/getstart.Rnw 8cda84badf5153c61e2b6b2e7bf14322 *vignettes/hexagon.eps 28c409e6cfde065a32cdc922787086ec *vignettes/hexagon.pdf 5d818e3b6c4cc36b55b35289c3282394 *vignettes/irregpoly.eps 1dd34a3acaa93d24bf0388fa83caf892 *vignettes/irregpoly.pdf f420eafa6b29f360b72269c91c485167 *vignettes/replicated.Rnw 593a297bf4451016def2e174ea62b099 *vignettes/shapefiles.Rnw b4605a8b9810b15e15d42cd0844a0fd2 *vignettes/updates.Rnw spatstat/build/0000755000176200001440000000000013166361210013213 5ustar liggesusersspatstat/build/vignette.rds0000644000176200001440000000056213166361210015555 0ustar liggesusersSMK1Mmg!/ 7 ^Vk؝m$$Koo:n"!MLeD钤% }#C\Osfk&sQleFعUYcԬl꡶T7CN ]](T-MYʖ3.Z.t!vU=Nrkm^g-ha(i7LjhΎ vTb^3@ Py[jesoEVy 9B"7h]dFMOt4 XM ^b0ً?C%fh.$MAȍa[Iq[he5 bgyi}:JfN\I1/7-tspatstat/DESCRIPTION0000644000176200001440000001430213166520620013624 0ustar liggesusersPackage: spatstat Version: 1.53-2 Date: 2017-10-08 Title: Spatial Point Pattern Analysis, Model-Fitting, Simulation, Tests Author: Adrian Baddeley , Rolf Turner and Ege Rubak , with substantial contributions of code by Kasper Klitgaard Berthelsen; Ottmar Cronie; Yongtao Guan; Ute Hahn; Abdollah Jalilian; Marie-Colette van Lieshout; Greg McSwiggan; Tuomas Rajala; Suman Rakshit; Dominic Schuhmacher; Rasmus Waagepetersen; and Hangsheng Wang. Additional contributions by M. Adepeju; C. Anderson; Q.W. Ang; M. Austenfeld; S. Azaele; M. Baddeley; C. Beale; M. Bell; R. Bernhardt; T. Bendtsen; A. Bevan; B. Biggerstaff; A. Bilgrau; L. Bischof; C. Biscio; R. Bivand; J.M. Blanco Moreno; F. Bonneu; J. Burgos; S. Byers; Y.M. Chang; J.B. Chen; I. Chernayavsky; Y.C. Chin; B. Christensen; J.-F. Coeurjolly; K. Colyvas; R. Corria Ainslie; R. Cotton; M. de la Cruz; P. Dalgaard; M. D'Antuono; S. Das; T. Davies; P.J. Diggle; P. Donnelly; I. Dryden; S. Eglen; A. El-Gabbas; B. Fandohan; O. Flores; E.D. Ford; P. Forbes; S. Frank; J. Franklin; N. Funwi-Gabga; O. Garcia; A. Gault; J. Geldmann; M. Genton; S. Ghalandarayeshi; J. Gilbey; J. Goldstick; P. Grabarnik; C. Graf; U. Hahn; A. Hardegen; M.B. Hansen; M. Hazelton; J. Heikkinen; M. Hering; M. Herrmann; P. Hewson; K. Hingee; K. Hornik; P. Hunziker; J. Hywood; R. Ihaka; C. Icos; A. Jammalamadaka; R. John-Chandran; D. Johnson; M. Khanmohammadi; R. Klaver; P. Kovesi; M. Kuhn; J. Laake; F. Lavancier; T. Lawrence; R.A. Lamb; J. Lee; G.P. Leser; H.T. Li; G. Limitsios; A. Lister; B. Madin; M. Maechler; J. Marcus; K. Marchikanti; R. Mark; J. Mateu; P. McCullagh; U. Mehlig; F. Mestre; S. Meyer; X.C. Mi; L. De Middeleer; R.K. Milne; E. Miranda; J. Moller; M. Moradi; V. Morera Pujol; E. Mudrak; G.M. Nair; N. Najari; N. Nava; L.S. Nielsen; F. Nunes; J.R. Nyengaard; J. Oehlschlaegel; T. Onkelinx; S. O'Riordan; E. Parilov; J. Picka; N. Picard; M. Porter; S. Protsiv; A. Raftery; S. Rakshit; B. Ramage; P. Ramon; X. Raynaud; N. Read; M. Reiter; I. Renner; T.O. Richardson; B.D. Ripley; E. Rosenbaum; B. Rowlingson; J. Rudokas; J. Rudge; C. Ryan; F. Safavimanesh; A. Sarkka; C. Schank; K. Schladitz; S. Schutte; B.T. Scott; O. Semboli; F. Semecurbe; V. Shcherbakov; G.C. Shen; P. Shi; H.-J. Ship; T.L. Silva; I.-M. Sintorn; Y. Song; M. Spiess; M. Stevenson; K. Stucki; M. Sumner; P. Surovy; B. Taylor; T. Thorarinsdottir; B. Turlach; T. Tvedebrink; K. Ummer; M. Uppala; A. van Burgel; T. Verbeke; M. Vihtakari; A. Villers; F. Vinatier; S. Voss; S. Wagner; H. Wang; H. Wendrock; J. Wild; C. Witthoft; S. Wong; M. Woringer; M.E. Zamboni and A. Zeileis. Maintainer: Adrian Baddeley Depends: R (>= 3.3.0), spatstat.data (>= 1.1-0), stats, graphics, grDevices, utils, methods, nlme, rpart Imports: spatstat.utils (>= 1.7-1), mgcv, Matrix, deldir (>= 0.0-21), abind, tensor, polyclip (>= 1.5-0), goftest Suggests: sm, maptools, gsl, locfit, spatial, rpanel, tkrplot, RandomFields (>= 3.1.24.1), RandomFieldsUtils(>= 0.3.3.1), fftwtools (>= 0.9-8) Description: Comprehensive open-source toolbox for analysing Spatial Point Patterns. Focused mainly on two-dimensional point patterns, including multitype/marked points, in any spatial region. Also supports three-dimensional point patterns, space-time point patterns in any number of dimensions, point patterns on a linear network, and patterns of other geometrical objects. Supports spatial covariate data such as pixel images. Contains over 2000 functions for plotting spatial data, exploratory data analysis, model-fitting, simulation, spatial sampling, model diagnostics, and formal inference. Data types include point patterns, line segment patterns, spatial windows, pixel images, tessellations, and linear networks. Exploratory methods include quadrat counts, K-functions and their simulation envelopes, nearest neighbour distance and empty space statistics, Fry plots, pair correlation function, kernel smoothed intensity, relative risk estimation with cross-validated bandwidth selection, mark correlation functions, segregation indices, mark dependence diagnostics, and kernel estimates of covariate effects. Formal hypothesis tests of random pattern (chi-squared, Kolmogorov-Smirnov, Monte Carlo, Diggle-Cressie-Loosmore-Ford, Dao-Genton, two-stage Monte Carlo) and tests for covariate effects (Cox-Berman-Waller-Lawson, Kolmogorov-Smirnov, ANOVA) are also supported. Parametric models can be fitted to point pattern data using the functions ppm(), kppm(), slrm(), dppm() similar to glm(). Types of models include Poisson, Gibbs and Cox point processes, Neyman-Scott cluster processes, and determinantal point processes. Models may involve dependence on covariates, inter-point interaction, cluster formation and dependence on marks. Models are fitted by maximum likelihood, logistic regression, minimum contrast, and composite likelihood methods. A model can be fitted to a list of point patterns (replicated point pattern data) using the function mppm(). The model can include random effects and fixed effects depending on the experimental design, in addition to all the features listed above. Fitted point process models can be simulated, automatically. Formal hypothesis tests of a fitted model are supported (likelihood ratio test, analysis of deviance, Monte Carlo tests) along with basic tools for model selection (stepwise(), AIC()). Tools for validating the fitted model include simulation envelopes, residuals, residual plots and Q-Q plots, leverage and influence diagnostics, partial residuals, and added variable plots. License: GPL (>= 2) URL: http://www.spatstat.org LazyData: true NeedsCompilation: yes ByteCompile: true BugReports: https://github.com/spatstat/spatstat/issues Packaged: 2017-10-08 08:32:19 UTC; adrian Repository: CRAN Date/Publication: 2017-10-08 22:07:12 UTC spatstat/man/0000755000176200001440000000000013164364124012674 5ustar liggesusersspatstat/man/where.max.Rd0000644000176200001440000000334013160710621015052 0ustar liggesusers\name{where.max} \alias{where.max} \alias{where.min} \title{ Find Location of Maximum in a Pixel Image } \description{ Finds the spatial location(s) where a given pixel image attains its maximum or minimum value. } \usage{ where.max(x, first = TRUE) where.min(x, first = TRUE) } \arguments{ \item{x}{ A pixel image (object of class \code{"im"}). } \item{first}{ Logical value. If \code{TRUE} (the default), then only one location will be returned. If \code{FALSE}, then all locations where the maximum is achieved will be returned. } } \details{ This function finds the spatial location or locations where the pixel image \code{x} attains its maximum or minimum value. The result is a point pattern giving the locations. If \code{first=TRUE} (the default), then only one location will be returned, namely the location with the smallest \eqn{y} coordinate value which attains the maximum or minimum. This behaviour is analogous to the functions \code{\link[base]{which.min}} and \code{\link[base:which.min]{which.max}}. If \code{first=FALSE}, then the function returns the locations of all pixels where the maximum (or minimum) value is attained. This could be a large number of points. } \value{ A point pattern (object of class \code{"ppp"}). } \author{ \adrian } \seealso{ \code{\link{Summary.im}} for computing the minimum and maximum of pixel values; \code{\link{eval.im}} and \code{\link{Math.im}} for mathematical expressions involving images; \code{\link{solutionset}} for finding the set of pixels where a statement is true. } \examples{ D <- distmap(letterR, invert=TRUE) plot(D) plot(where.max(D), add=TRUE, pch=16, cols="green") } \keyword{spatial} \keyword{math} spatstat/man/rpoislpp.Rd0000644000176200001440000000345513160710621015033 0ustar liggesusers\name{rpoislpp} \alias{rpoislpp} \title{ Poisson Point Process on a Linear Network } \description{ Generates a realisation of the Poisson point process with specified intensity on the given linear network. } \usage{ rpoislpp(lambda, L, \dots, nsim=1, drop=TRUE) } \arguments{ \item{lambda}{ Intensity of the Poisson process. A single number, a \code{function(x,y)}, a pixel image (object of class \code{"im"}), or a vector of numbers, a list of functions, or a list of images. } \item{L}{ A linear network (object of class \code{"linnet"}, see \code{\link{linnet}}). Can be omitted in some cases: see Details. } \item{\dots}{ Arguments passed to \code{\link{rpoisppOnLines}}. } \item{nsim}{Number of simulated realisations to generate.} \item{drop}{ Logical value indicating what to do when \code{nsim=1}. If \code{drop=TRUE} (the default), the result is a point pattern. If \code{drop=FALSE}, the result is a list with one entry which is a point pattern. } } \details{ This function uses \code{\link{rpoisppOnLines}} to generate the random points. Argument \code{L} can be omitted, and defaults to \code{as.linnet(lambda)}, when \code{lambda} is a function on a linear network (class \code{"linfun"}) or a pixel image on a linear network (\code{"linim"}). } \value{ If \code{nsim = 1} and \code{drop=TRUE}, a point pattern on the linear network, i.e.\ an object of class \code{"lpp"}. Otherwise, a list of such point patterns. } \author{ Ang Qi Wei \email{aqw07398@hotmail.com} and \adrian } \seealso{ \code{\link{runiflpp}}, \code{\link{rlpp}}, \code{\link{lpp}}, \code{\link{linnet}} } \examples{ X <- rpoislpp(5, simplenet) plot(X) # multitype X <- rpoislpp(c(a=5, b=5), simplenet) } \keyword{spatial} \keyword{datagen} spatstat/man/as.boxx.Rd0000644000176200001440000000217413160710571014546 0ustar liggesusers\name{as.boxx} \alias{as.boxx} \title{Convert Data to Multi-Dimensional Box} \description{Interprets data as the dimensions of a multi-dimensional box.} \usage{ as.boxx(\dots, warn.owin = TRUE) } \arguments{ \item{\dots}{ Data that can be interpreted as giving the dimensions of a multi-dimensional box. See Details. } \item{warn.owin}{ Logical value indicating whether to print a warning if a non-rectangular window (object of class \code{"owin"}) is supplied. } } \details{ Either a single argument should be provided which is one of the following: \itemize{ \item an object of class \code{"boxx"} \item an object of class \code{"box3"} \item an object of class \code{"owin"} \item a numeric vector of even length, specifying the corners of the box. See Examples } or a list of arguments acceptable to \code{\link{boxx}}. } \value{A \code{"boxx"} object.} \author{ \adrian \rolf and \ege } \examples{ # Convert unit square to two dimensional box. W <- owin() as.boxx(W) # Make three dimensional box [0,1]x[0,1]x[0,1] from numeric vector as.boxx(c(0,1,0,1,0,1)) } spatstat/man/harmonic.Rd0000644000176200001440000000405613160710621014761 0ustar liggesusers\name{harmonic} \alias{harmonic} \title{Basis for Harmonic Functions} \description{ Evaluates a basis for the harmonic polynomials in \eqn{x} and \eqn{y} of degree less than or equal to \eqn{n}. } \usage{ harmonic(x, y, n) } \arguments{ \item{x}{ Vector of \eqn{x} coordinates } \item{y}{ Vector of \eqn{y} coordinates } \item{n}{ Maximum degree of polynomial } } \value{ A data frame with \code{2 * n} columns giving the values of the basis functions at the coordinates. Each column is labelled by an algebraic expression for the corresponding basis function. } \details{ This function computes a basis for the harmonic polynomials in two variables \eqn{x} and \eqn{y} up to a given degree \eqn{n} and evaluates them at given \eqn{x,y} locations. It can be used in model formulas (for example in the model-fitting functions \code{\link{lm},\link{glm},\link{gam}} and \code{\link{ppm}}) to specify a linear predictor which is a harmonic function. A function \eqn{f(x,y)} is harmonic if \deqn{\frac{\partial^2}{\partial x^2} f + \frac{\partial^2}{\partial y^2}f = 0.}{ (d/dx)^2 f + (d/dy)^2 f = 0.} The harmonic polynomials of degree less than or equal to \eqn{n} have a basis consisting of \eqn{2 n} functions. This function was implemented on a suggestion of P. McCullagh for fitting nonstationary spatial trend to point process models. } \seealso{ \code{\link{ppm}}, \code{\link{polynom}} } \examples{ # inhomogeneous point pattern X <- unmark(longleaf) \testonly{ # smaller dataset X <- X[seq(1,npoints(X), by=50)] } # fit Poisson point process with log-cubic intensity fit.3 <- ppm(X ~ polynom(x,y,3), Poisson()) # fit Poisson process with log-cubic-harmonic intensity fit.h <- ppm(X ~ harmonic(x,y,3), Poisson()) # Likelihood ratio test lrts <- 2 * (logLik(fit.3) - logLik(fit.h)) df <- with(coords(X), ncol(polynom(x,y,3)) - ncol(harmonic(x,y,3))) pval <- 1 - pchisq(lrts, df=df) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{models} spatstat/man/plot.ppm.Rd0000644000176200001440000001531613160710621014733 0ustar liggesusers\name{plot.ppm} \alias{plot.ppm} \title{plot a Fitted Point Process Model} \description{ Given a fitted point process model obtained by \code{\link{ppm}}, create spatial trend and conditional intensity surfaces of the model, in a form suitable for plotting, and (optionally) plot these surfaces. } \usage{ \method{plot}{ppm}(x, ngrid = c(40,40), superimpose = TRUE, trend = TRUE, cif = TRUE, se = TRUE, pause = interactive(), how=c("persp","image", "contour"), plot.it = TRUE, locations = NULL, covariates=NULL, \dots) } \arguments{ \item{x}{ A fitted point process model, typically obtained from the model-fitting algorithm \code{\link{ppm}}. An object of class \code{"ppm"}. } \item{ngrid}{ The dimensions for a grid on which to evaluate, for plotting, the spatial trend and conditional intensity. A vector of 1 or 2 integers. If it is of length 1, \code{ngrid} is replaced by \code{c(ngrid,ngrid)}. } \item{superimpose}{ logical flag; if \code{TRUE} (and if \code{plot=TRUE}) the original data point pattern will be superimposed on the plots. } \item{trend}{ logical flag; if \code{TRUE}, the spatial trend surface will be produced. } \item{cif}{ logical flag; if \code{TRUE}, the conditional intensity surface will be produced. } \item{se}{ logical flag; if \code{TRUE}, the estimated standard error of the spatial trend surface will be produced. } \item{pause}{ logical flag indicating whether to pause with a prompt after each plot. Set \code{pause=FALSE} if plotting to a file. (This flag is ignored if \code{plot=FALSE}). } \item{how}{ character string or character vector indicating the style or styles of plots to be performed. Ignored if \code{plot=FALSE}. } \item{plot.it}{ logical scalar; should a plot be produced immediately? } \item{locations}{ If present, this determines the locations of the pixels at which predictions are computed. It must be a binary pixel image (an object of class \code{"owin"} with type \code{"mask"}). (Incompatible with \code{ngrid}). } \item{covariates}{ Values of external covariates required by the fitted model. Passed to \code{\link{predict.ppm}}. } \item{\dots}{ extra arguments to the plotting functions \code{\link{persp}}, \code{\link{image}} and \code{\link{contour}}. } } \value{ An object of class \code{plotppm}. Such objects may be plotted by \code{\link{plot.plotppm}()}. This is a list with components named \code{trend} and \code{cif}, either of which may be missing. They will be missing if the corresponding component does not make sense for the model, or if the corresponding argument was set equal to \code{FALSE}. Both \code{trend} and \code{cif} are lists of images. If the model is an unmarked point process, then they are lists of length 1, so that \code{trend[[1]]} is an image of the spatial trend and \code{cif[[1]]} is an image of the conditional intensity. If the model is a marked point process, then \code{trend[[i]]} is an image of the spatial trend for the mark \code{m[i]}, and \code{cif[[1]]} is an image of the conditional intensity for the mark \code{m[i]}, where \code{m} is the vector of levels of the marks. } \details{ This is the \code{plot} method for the class \code{"ppm"} (see \code{\link{ppm.object}} for details of this class). It invokes \code{\link{predict.ppm}} to compute the spatial trend and conditional intensity of the fitted point process model. See \code{\link{predict.ppm}} for more explanation about spatial trend and conditional intensity. The default action is to create a rectangular grid of points in (the bounding box of) the observation window of the data point pattern, and evaluate the spatial trend and conditional intensity of the fitted spatial point process model \code{x} at these locations. If the argument \code{locations=} is supplied, then the spatial trend and conditional intensity are calculated at the grid of points specified by this argument. The argument \code{locations}, if present, should be a binary image mask (an object of class \code{"owin"} and type \code{"mask"}). This determines a rectangular grid of locations, or a subset of such a grid, at which predictions will be computed. Binary image masks are conveniently created using \code{\link{as.mask}}. The argument \code{covariates} gives the values of any spatial covariates at the prediction locations. If the trend formula in the fitted model involves spatial covariates (other than the Cartesian coordinates \code{x}, \code{y}) then \code{covariates} is required. The argument \code{covariates} has the same format and interpretation as in \code{\link{predict.ppm}}. It may be either a data frame (the number of whose rows must match the number of pixels in \code{locations} multiplied by the number of possible marks in the point pattern), or a list of images. If argument \code{locations} is not supplied, and \code{covariates} \bold{is} supplied, then it \bold{must} be a list of images. If the fitted model was a marked (multitype) point process, then predictions are made for each possible mark value in turn. If the fitted model had no spatial trend, then the default is to omit calculating this (flat) surface, unless \code{trend=TRUE} is set explicitly. If the fitted model was Poisson, so that there were no spatial interactions, then the conditional intensity and spatial trend are identical, and the default is to omit the conditional intensity, unless \code{cif=TRUE} is set explicitly. If \code{plot.it=TRUE} then \code{\link{plot.plotppm}()} is called upon to plot the class \code{plotppm} object which is produced. (That object is also returned, silently.) Plots are produced successively using \code{\link{persp}}, \code{\link{image}} and \code{\link{contour}} (or only a selection of these three, if \code{how} is given). Extra graphical parameters controlling the display may be passed directly via the arguments \code{...} or indirectly reset using \code{\link{spatstat.options}}. } \seealso{ \code{\link{plot.plotppm}}, \code{\link{ppm}}, \code{\link{ppm.object}}, \code{\link{predict.ppm}}, \code{\link{print.ppm}}, \code{\link{persp}}, \code{\link{image}}, \code{\link{contour}}, \code{\link{plot}}, \code{\link{spatstat.options}} } \section{Warnings}{ See warnings in \code{\link{predict.ppm}}. } \examples{ m <- ppm(cells ~1, Strauss(0.05)) pm <- plot(m) # The object ``pm'' will be plotted as well as saved # for future plotting. } \author{ \spatstatAuthors } \keyword{spatial} \keyword{hplot} \keyword{models} spatstat/man/Kmodel.kppm.Rd0000644000176200001440000000365513160710571015352 0ustar liggesusers\name{Kmodel.kppm} \alias{Kmodel.kppm} \alias{pcfmodel.kppm} \title{K Function or Pair Correlation Function of Cluster Model or Cox model} \description{ Returns the theoretical \eqn{K} function or the pair correlation function of a cluster point process model or Cox point process model. } \usage{ \method{Kmodel}{kppm}(model, \dots) \method{pcfmodel}{kppm}(model, \dots) } \arguments{ \item{model}{ A fitted cluster point process model (object of class \code{"kppm"}) typically obtained from the model-fitting algorithm \code{\link{kppm}}. } \item{\dots}{ Ignored. } } \value{ A \code{function} in the \R language, which takes one argument \code{r}. } \details{ For certain types of point process models, it is possible to write down a mathematical expression for the \eqn{K} function or the pair correlation function of the model. In particular this is possible for a fitted cluster point process model (object of class \code{"kppm"} obtained from \code{\link{kppm}}). The functions \code{\link{Kmodel}} and \code{\link{pcfmodel}} are generic. The functions documented here are the methods for the class \code{"kppm"}. The return value is a \code{function} in the \R language, which takes one argument \code{r}. Evaluation of this function, on a numeric vector \code{r}, yields values of the desired \eqn{K} function or pair correlation function at these distance values. } \seealso{ \code{\link{Kest}} or \code{\link{pcf}} to estimate the \eqn{K} function or pair correlation function nonparametrically from data. \code{\link{kppm}} to fit cluster models. \code{\link{Kmodel}} for the generic functions. \code{\link{Kmodel.ppm}} for the method for Gibbs processes. } \examples{ data(redwood) fit <- kppm(redwood, ~x, "MatClust") K <- Kmodel(fit) K(c(0.1, 0.2)) curve(K(x), from=0, to=0.25) } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} spatstat/man/thomas.estpcf.Rd0000644000176200001440000001472513160710621015743 0ustar liggesusers\name{thomas.estpcf} \alias{thomas.estpcf} \title{Fit the Thomas Point Process by Minimum Contrast} \description{ Fits the Thomas point process to a point pattern dataset by the Method of Minimum Contrast using the pair correlation function. } \usage{ thomas.estpcf(X, startpar=c(kappa=1,scale=1), lambda=NULL, q = 1/4, p = 2, rmin = NULL, rmax = NULL, ..., pcfargs=list()) } \arguments{ \item{X}{ Data to which the Thomas model will be fitted. Either a point pattern or a summary statistic. See Details. } \item{startpar}{ Vector of starting values for the parameters of the Thomas process. } \item{lambda}{ Optional. An estimate of the intensity of the point process. } \item{q,p}{ Optional. Exponents for the contrast criterion. } \item{rmin, rmax}{ Optional. The interval of \eqn{r} values for the contrast criterion. } \item{\dots}{ Optional arguments passed to \code{\link[stats]{optim}} to control the optimisation algorithm. See Details. } \item{pcfargs}{ Optional list containing arguments passed to \code{\link{pcf.ppp}} to control the smoothing in the estimation of the pair correlation function. } } \details{ This algorithm fits the Thomas point process model to a point pattern dataset by the Method of Minimum Contrast, using the pair correlation function \code{\link{pcf}}. The argument \code{X} can be either \describe{ \item{a point pattern:}{An object of class \code{"ppp"} representing a point pattern dataset. The pair correlation function of the point pattern will be computed using \code{\link{pcf}}, and the method of minimum contrast will be applied to this. } \item{a summary statistic:}{An object of class \code{"fv"} containing the values of a summary statistic, computed for a point pattern dataset. The summary statistic should be the pair correlation function, and this object should have been obtained by a call to \code{\link{pcf}} or one of its relatives. } } The algorithm fits the Thomas point process to \code{X}, by finding the parameters of the Thomas model which give the closest match between the theoretical pair correlation function of the Thomas process and the observed pair correlation function. For a more detailed explanation of the Method of Minimum Contrast, see \code{\link{mincontrast}}. The Thomas point process is described in \Moller and Waagepetersen (2003, pp. 61--62). It is a cluster process formed by taking a pattern of parent points, generated according to a Poisson process with intensity \eqn{\kappa}{kappa}, and around each parent point, generating a random number of offspring points, such that the number of offspring of each parent is a Poisson random variable with mean \eqn{\mu}{mu}, and the locations of the offspring points of one parent are independent and isotropically Normally distributed around the parent point with standard deviation \eqn{\sigma}{sigma} which is equal to the parameter \code{scale}. The named vector of stating values can use either \code{sigma2} (\eqn{\sigma^2}{sigma^2}) or \code{scale} as the name of the second component, but the latter is recommended for consistency with other cluster models. The theoretical pair correlation function of the Thomas process is \deqn{ g(r) = 1 + \frac 1 {4\pi \kappa \sigma^2} \exp(-\frac{r^2}{4\sigma^2})). }{ g(r) = 1 + exp(-r^2/(4 * sigma^2)))/(4 * pi * kappa * sigma^2). } The theoretical intensity of the Thomas process is \eqn{\lambda = \kappa \mu}{lambda=kappa* mu}. In this algorithm, the Method of Minimum Contrast is first used to find optimal values of the parameters \eqn{\kappa}{kappa} and \eqn{\sigma^2}{sigma^2}. Then the remaining parameter \eqn{\mu}{mu} is inferred from the estimated intensity \eqn{\lambda}{lambda}. If the argument \code{lambda} is provided, then this is used as the value of \eqn{\lambda}{lambda}. Otherwise, if \code{X} is a point pattern, then \eqn{\lambda}{lambda} will be estimated from \code{X}. If \code{X} is a summary statistic and \code{lambda} is missing, then the intensity \eqn{\lambda}{lambda} cannot be estimated, and the parameter \eqn{\mu}{mu} will be returned as \code{NA}. The remaining arguments \code{rmin,rmax,q,p} control the method of minimum contrast; see \code{\link{mincontrast}}. The Thomas process can be simulated, using \code{\link{rThomas}}. Homogeneous or inhomogeneous Thomas process models can also be fitted using the function \code{\link{kppm}}. The optimisation algorithm can be controlled through the additional arguments \code{"..."} which are passed to the optimisation function \code{\link[stats]{optim}}. For example, to constrain the parameter values to a certain range, use the argument \code{method="L-BFGS-B"} to select an optimisation algorithm that respects box constraints, and use the arguments \code{lower} and \code{upper} to specify (vectors of) minimum and maximum values for each parameter. } \value{ An object of class \code{"minconfit"}. There are methods for printing and plotting this object. It contains the following main components: \item{par }{Vector of fitted parameter values.} \item{fit }{Function value table (object of class \code{"fv"}) containing the observed values of the summary statistic (\code{observed}) and the theoretical values of the summary statistic computed from the fitted model parameters. } } \references{ Diggle, P. J., Besag, J. and Gleaves, J. T. (1976) Statistical analysis of spatial point patterns by means of distance methods. \emph{Biometrics} \bold{32} 659--667. \Moller, J. and Waagepetersen, R. (2003). Statistical Inference and Simulation for Spatial Point Processes. Chapman and Hall/CRC, Boca Raton. Thomas, M. (1949) A generalisation of Poisson's binomial limit for use in ecology. \emph{Biometrika} \bold{36}, 18--25. Waagepetersen, R. (2007) An estimating function approach to inference for inhomogeneous Neyman-Scott processes. \emph{Biometrics} \bold{63}, 252--258. } \author{ \adrian } \seealso{ \code{\link{thomas.estK}} \code{\link{mincontrast}}, \code{\link{pcf}}, \code{\link{rThomas}} to simulate the fitted model. } \examples{ data(redwood) u <- thomas.estpcf(redwood, c(kappa=10, scale=0.1)) u plot(u, legendpos="topright") u2 <- thomas.estpcf(redwood, c(kappa=10, scale=0.1), pcfargs=list(stoyan=0.12)) } \keyword{spatial} \keyword{models} spatstat/man/Kcross.Rd0000644000176200001440000001714413160710571014433 0ustar liggesusers\name{Kcross} \alias{Kcross} \title{ Multitype K Function (Cross-type) } \description{ For a multitype point pattern, estimate the multitype \eqn{K} function which counts the expected number of points of type \eqn{j} within a given distance of a point of type \eqn{i}. } \usage{ Kcross(X, i, j, r=NULL, breaks=NULL, correction, \dots, ratio=FALSE, from, to ) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the cross type \eqn{K} function \eqn{K_{ij}(r)}{Kij(r)} will be computed. It must be a multitype point pattern (a marked point pattern whose marks are a factor). See under Details. } \item{i}{The type (mark value) of the points in \code{X} from which distances are measured. A character string (or something that will be converted to a character string). Defaults to the first level of \code{marks(X)}. } \item{j}{The type (mark value) of the points in \code{X} to which distances are measured. A character string (or something that will be converted to a character string). Defaults to the second level of \code{marks(X)}. } \item{r}{numeric vector. The values of the argument \eqn{r} at which the distribution function \eqn{K_{ij}(r)}{Kij(r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{breaks}{ This argument is for internal use only. } \item{correction}{ A character vector containing any selection of the options \code{"border"}, \code{"bord.modif"}, \code{"isotropic"}, \code{"Ripley"}, \code{"translate"}, \code{"translation"}, \code{"none"} or \code{"best"}. It specifies the edge correction(s) to be applied. Alternatively \code{correction="all"} selects all options. } \item{\dots}{Ignored.} \item{ratio}{ Logical. If \code{TRUE}, the numerator and denominator of each edge-corrected estimate will also be saved, for use in analysing replicated point patterns. } \item{from,to}{ An alternative way to specify \code{i} and \code{j} respectively. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame containing numeric columns \item{r}{the values of the argument \eqn{r} at which the function \eqn{K_{ij}(r)}{Kij(r)} has been estimated } \item{theo}{the theoretical value of \eqn{K_{ij}(r)}{Kij(r)} for a marked Poisson process, namely \eqn{\pi r^2}{pi * r^2} } together with a column or columns named \code{"border"}, \code{"bord.modif"}, \code{"iso"} and/or \code{"trans"}, according to the selected edge corrections. These columns contain estimates of the function \eqn{K_{ij}(r)}{Kij(r)} obtained by the edge corrections named. If \code{ratio=TRUE} then the return value also has two attributes called \code{"numerator"} and \code{"denominator"} which are \code{"fv"} objects containing the numerators and denominators of each estimate of \eqn{K(r)}. } \details{ This function \code{Kcross} and its companions \code{\link{Kdot}} and \code{\link{Kmulti}} are generalisations of the function \code{\link{Kest}} to multitype point patterns. A multitype point pattern is a spatial pattern of points classified into a finite number of possible ``colours'' or ``types''. In the \pkg{spatstat} package, a multitype pattern is represented as a single point pattern object in which the points carry marks, and the mark value attached to each point determines the type of that point. The argument \code{X} must be a point pattern (object of class \code{"ppp"}) or any data that are acceptable to \code{\link{as.ppp}}. It must be a marked point pattern, and the mark vector \code{X$marks} must be a factor. The arguments \code{i} and \code{j} will be interpreted as levels of the factor \code{X$marks}. If \code{i} and \code{j} are missing, they default to the first and second level of the marks factor, respectively. The ``cross-type'' (type \eqn{i} to type \eqn{j}) \eqn{K} function of a stationary multitype point process \eqn{X} is defined so that \eqn{\lambda_j K_{ij}(r)}{lambda[j] Kij(r)} equals the expected number of additional random points of type \eqn{j} within a distance \eqn{r} of a typical point of type \eqn{i} in the process \eqn{X}. Here \eqn{\lambda_j}{lambda[j]} is the intensity of the type \eqn{j} points, i.e. the expected number of points of type \eqn{j} per unit area. The function \eqn{K_{ij}}{Kij} is determined by the second order moment properties of \eqn{X}. An estimate of \eqn{K_{ij}(r)}{Kij(r)} is a useful summary statistic in exploratory data analysis of a multitype point pattern. If the process of type \eqn{i} points were independent of the process of type \eqn{j} points, then \eqn{K_{ij}(r)}{Kij(r)} would equal \eqn{\pi r^2}{pi * r^2}. Deviations between the empirical \eqn{K_{ij}}{Kij} curve and the theoretical curve \eqn{\pi r^2}{pi * r^2} may suggest dependence between the points of types \eqn{i} and \eqn{j}. This algorithm estimates the distribution function \eqn{K_{ij}(r)}{Kij(r)} from the point pattern \code{X}. It assumes that \code{X} can be treated as a realisation of a stationary (spatially homogeneous) random spatial point process in the plane, observed through a bounded window. The window (which is specified in \code{X} as \code{Window(X)}) may have arbitrary shape. Biases due to edge effects are treated in the same manner as in \code{\link{Kest}}, using the border correction. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{K_{ij}(r)}{Kij(r)} should be evaluated. The values of \eqn{r} must be increasing nonnegative numbers and the maximum \eqn{r} value must not exceed the radius of the largest disc contained in the window. The pair correlation function can also be applied to the result of \code{Kcross}; see \code{\link{pcf}}. } \references{ Cressie, N.A.C. \emph{Statistics for spatial data}. John Wiley and Sons, 1991. Diggle, P.J. \emph{Statistical analysis of spatial point patterns}. Academic Press, 1983. Harkness, R.D and Isham, V. (1983) A bivariate spatial point pattern of ants' nests. \emph{Applied Statistics} \bold{32}, 293--303 Lotwick, H. W. and Silverman, B. W. (1982). Methods for analysing spatial processes of several types of points. \emph{J. Royal Statist. Soc. Ser. B} \bold{44}, 406--413. Ripley, B.D. \emph{Statistical inference for spatial processes}. Cambridge University Press, 1988. Stoyan, D, Kendall, W.S. and Mecke, J. \emph{Stochastic geometry and its applications}. 2nd edition. Springer Verlag, 1995. } \section{Warnings}{ The arguments \code{i} and \code{j} are always interpreted as levels of the factor \code{X$marks}. They are converted to character strings if they are not already character strings. The value \code{i=1} does \bold{not} refer to the first level of the factor. } \seealso{ \code{\link{Kdot}}, \code{\link{Kest}}, \code{\link{Kmulti}}, \code{\link{pcf}} } \examples{ # amacrine cells data K01 <- Kcross(amacrine, "off", "on") plot(K01) \testonly{ K01 <- Kcross(amacrine, "off", "on", ratio=TRUE) } \dontrun{ K10 <- Kcross(amacrine, "on", "off") # synthetic example: point pattern with marks 0 and 1 pp <- runifpoispp(50) pp <- pp \%mark\% factor(sample(0:1, npoints(pp), replace=TRUE)) K <- Kcross(pp, "0", "1") K <- Kcross(pp, 0, 1) # equivalent } } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat/man/anova.ppm.Rd0000644000176200001440000001456713160710571015074 0ustar liggesusers\name{anova.ppm} \alias{anova.ppm} \title{ANOVA for Fitted Point Process Models} \description{ Performs analysis of deviance for one or more fitted point process models. } \usage{ \method{anova}{ppm}(object, \dots, test=NULL, adjust=TRUE, warn=TRUE, fine=FALSE) } \arguments{ \item{object}{ A fitted point process model (object of class \code{"ppm"}). } \item{\dots}{ Optional. Additional objects of class \code{"ppm"}. } \item{test}{ Character string, partially matching one of \code{"Chisq"}, \code{"LRT"}, \code{"Rao"}, \code{"score"}, \code{"F"} or \code{"Cp"}, or \code{NULL} indicating that no test should be performed. } \item{adjust}{ Logical value indicating whether to correct the pseudolikelihood ratio when some of the models are not Poisson processes. } \item{warn}{ Logical value indicating whether to issue warnings if problems arise. } \item{fine}{ Logical value, passed to \code{\link{vcov.ppm}}, indicating whether to use a quick estimate (\code{fine=FALSE}, the default) or a slower, more accurate estimate (\code{fine=TRUE}) of variance terms. Relevant only when some of the models are not Poisson and \code{adjust=TRUE}. } } \value{ An object of class \code{"anova"}, or \code{NULL}. } \details{ This is a method for \code{\link[stats]{anova}} for fitted point process models (objects of class \code{"ppm"}, usually generated by the model-fitting function \code{\link{ppm}}). If the fitted models are all Poisson point processes, then by default, this function performs an Analysis of Deviance of the fitted models. The output shows the deviance differences (i.e. 2 times log likelihood ratio), the difference in degrees of freedom, and (if \code{test="Chi"} or \code{test="LRT"}) the two-sided p-values for the chi-squared tests. Their interpretation is very similar to that in \code{\link[stats]{anova.glm}}. If \code{test="Rao"} or \code{test="score"}, the \emph{score test} (Rao, 1948) is performed instead. If some of the fitted models are \emph{not} Poisson point processes, the `deviance' differences in this table are 'pseudo-deviances' equal to 2 times the differences in the maximised values of the log pseudolikelihood (see \code{\link{ppm}}). It is not valid to compare these values to the chi-squared distribution. In this case, if \code{adjust=TRUE} (the default), the pseudo-deviances will be adjusted using the method of Pace et al (2011) and Baddeley et al (2015) so that the chi-squared test is valid. It is strongly advisable to perform this adjustment. } \section{Errors and warnings}{ \describe{ \item{models not nested:}{ There may be an error message that the models are not \dQuote{nested}. For an Analysis of Deviance the models must be nested, i.e. one model must be a special case of the other. For example the point process model with formula \code{~x} is a special case of the model with formula \code{~x+y}, so these models are nested. However the two point process models with formulae \code{~x} and \code{~y} are not nested. If you get this error message and you believe that the models should be nested, the problem may be the inability of \R to recognise that the two formulae are nested. Try modifying the formulae to make their relationship more obvious. } \item{different sizes of dataset:}{ There may be an error message from \code{anova.glmlist} that \dQuote{models were not all fitted to the same size of dataset}. This implies that the models were fitted using different quadrature schemes (see \code{\link{quadscheme}}) and/or with different edge corrections or different values of the border edge correction distance \code{rbord}. To ensure that models are comparable, check the following: \itemize{ \item the models must all have been fitted to the same point pattern dataset, in the same window. \item all models must have been fitted by the same fitting method as specified by the argument \code{method} in \code{\link{ppm}}. \item If some of the models depend on covariates, then they should all have been fitted using the same list of covariates, and using \code{allcovar=TRUE} to ensure that the same quadrature scheme is used. \item all models must have been fitted using the same edge correction as specified by the arguments \code{correction} and \code{rbord}. If you did not specify the value of \code{rbord}, then it may have taken a different value for different models. The default value of \code{rbord} is equal to zero for a Poisson model, and otherwise equals the reach (interaction distance) of the interaction term (see \code{\link{reach}}). To ensure that the models are comparable, set \code{rbord} to equal the maximum reach of the interactions that you are fitting. } } } } \seealso{ \code{\link{ppm}}, \code{\link{vcov.ppm}} } \section{Error messages}{ An error message that reports \emph{system is computationally singular} indicates that the determinant of the Fisher information matrix of one of the models was either too large or too small for reliable numerical calculation. See \code{\link{vcov.ppm}} for suggestions on how to handle this. } \examples{ mod0 <- ppm(swedishpines ~1) modx <- ppm(swedishpines ~x) # Likelihood ratio test anova(mod0, modx, test="Chi") # Score test anova(mod0, modx, test="Rao") # Single argument modxy <- ppm(swedishpines ~x + y) anova(modxy, test="Chi") # Adjusted composite likelihood ratio test modP <- ppm(swedishpines ~1, rbord=9) modS <- ppm(swedishpines ~1, Strauss(9)) anova(modP, modS, test="Chi") } \references{ Baddeley, A., Turner, R. and Rubak, E. (2015) Adjusted composite likelihood ratio test for Gibbs point processes. \emph{Journal of Statistical Computation and Simulation} \bold{86} (5) 922--941. DOI: 10.1080/00949655.2015.1044530. Pace, L., Salvan, A. and Sartori, N. (2011) Adjusting composite likelihood ratio statistics. \emph{Statistica Sinica} \bold{21}, 129--148. Rao, C.R. (1948) Large sample tests of statistical hypotheses concerning several parameters with applications to problems of estimation. \emph{Proceedings of the Cambridge Philosophical Society} \bold{44}, 50--57. } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{models} \keyword{methods} spatstat/man/distmap.owin.Rd0000644000176200001440000000556413160710571015606 0ustar liggesusers\name{distmap.owin} \alias{distmap.owin} \title{Distance Map of Window} \description{ Computes the distance from each pixel to the nearest point in the given window. } \usage{ \method{distmap}{owin}(X, \dots, discretise=FALSE, invert=FALSE) } \arguments{ \item{X}{ A window (object of class \code{"owin"}). } \item{\dots}{ Arguments passed to \code{\link{as.mask}} to control pixel resolution. } \item{discretise}{ Logical flag controlling the choice of algorithm when \code{X} is a polygonal window. See Details. } \item{invert}{ If \code{TRUE}, compute the distance transform of the complement of the window. } } \value{ A pixel image (object of class \code{"im"}) whose greyscale values are the values of the distance map. The return value has an attribute \code{"bdry"} which is a pixel image. } \details{ The ``distance map'' of a window \eqn{W} is the function \eqn{f} whose value \code{f(u)} is defined for any two-dimensional location \eqn{u} as the shortest distance from \eqn{u} to \eqn{W}. This function computes the distance map of the window \code{X} and returns the distance map as a pixel image. The greyscale value at a pixel \eqn{u} equals the distance from \eqn{u} to the nearest pixel in \code{X}. Additionally, the return value has an attribute \code{"bdry"} which is also a pixel image. The grey values in \code{"bdry"} give the distance from each pixel to the bounding rectangle of the image. If \code{X} is a binary pixel mask, the distance values computed are not the usual Euclidean distances. Instead the distance between two pixels is measured by the length of the shortest path connecting the two pixels. A path is a series of steps between neighbouring pixels (each pixel has 8 neighbours). This is the standard `distance transform' algorithm of image processing (Rosenfeld and Kak, 1968; Borgefors, 1986). If \code{X} is a polygonal window, then exact Euclidean distances will be computed if \code{discretise=FALSE}. If \code{discretise=TRUE} then the window will first be converted to a binary pixel mask and the discrete path distances will be computed. The arguments \code{\dots} are passed to \code{\link{as.mask}} to control the pixel resolution. This function is a method for the generic \code{\link{distmap}}. } \seealso{ \code{\link{distmap}}, \code{\link{distmap.ppp}}, \code{\link{distmap.psp}} } \examples{ data(letterR) U <- distmap(letterR) \dontrun{ plot(U) plot(attr(U, "bdry")) } } \references{ Borgefors, G. Distance transformations in digital images. \emph{Computer Vision, Graphics and Image Processing} \bold{34} (1986) 344--371. Rosenfeld, A. and Pfalz, J.L. Distance functions on digital pictures. \emph{Pattern Recognition} \bold{1} (1968) 33-61. } \author{\adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/MultiStrauss.Rd0000644000176200001440000000765313160710571015652 0ustar liggesusers\name{MultiStrauss} \alias{MultiStrauss} \title{The Multitype Strauss Point Process Model} \description{ Creates an instance of the multitype Strauss point process model which can then be fitted to point pattern data. } \usage{ MultiStrauss(radii, types=NULL) } \arguments{ \item{radii}{Matrix of interaction radii} \item{types}{Optional; vector of all possible types (i.e. the possible levels of the \code{marks} variable in the data)} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of the multitype Strauss process with interaction radii \eqn{radii[i,j]}. } \details{ The (stationary) multitype Strauss process with \eqn{m} types, with interaction radii \eqn{r_{ij}}{r[i,j]} and parameters \eqn{\beta_j}{beta[j]} and \eqn{\gamma_{ij}}{gamma[i,j]} is the pairwise interaction point process in which each point of type \eqn{j} contributes a factor \eqn{\beta_j}{beta[j]} to the probability density of the point pattern, and a pair of points of types \eqn{i} and \eqn{j} closer than \eqn{r_{ij}}{r[i,j]} units apart contributes a factor \eqn{\gamma_{ij}}{gamma[i,j]} to the density. The nonstationary multitype Strauss process is similar except that the contribution of each individual point \eqn{x_i}{x[i]} is a function \eqn{\beta(x_i)}{beta(x[i])} of location and type, rather than a constant beta. The function \code{\link{ppm}()}, which fits point process models to point pattern data, requires an argument of class \code{"interact"} describing the interpoint interaction structure of the model to be fitted. The appropriate description of the multitype Strauss process pairwise interaction is yielded by the function \code{MultiStrauss()}. See the examples below. The argument \code{types} need not be specified in normal use. It will be determined automatically from the point pattern data set to which the MultiStrauss interaction is applied, when the user calls \code{\link{ppm}}. However, the user should be confident that the ordering of types in the dataset corresponds to the ordering of rows and columns in the matrix \code{radii}. The matrix \code{radii} must be symmetric, with entries which are either positive numbers or \code{NA}. A value of \code{NA} indicates that no interaction term should be included for this combination of types. Note that only the interaction radii are specified in \code{MultiStrauss}. The canonical parameters \eqn{\log(\beta_j)}{log(beta[j])} and \eqn{\log(\gamma_{ij})}{log(gamma[i,j])} are estimated by \code{\link{ppm}()}, not fixed in \code{MultiStrauss()}. } \seealso{ \code{\link{ppm}}, \code{\link{pairwise.family}}, \code{\link{ppm.object}}, \code{\link{Strauss}}, \code{\link{MultiHard}} } \examples{ r <- matrix(c(1,2,2,1), nrow=2,ncol=2) MultiStrauss(r) # prints a sensible description of itself r <- 0.03 * matrix(c(1,2,2,1), nrow=2,ncol=2) X <- amacrine \testonly{ X <- X[ owin(c(0, 0.8), c(0, 1)) ] } ppm(X ~1, MultiStrauss(r)) # fit the stationary multitype Strauss process to `amacrine' \dontrun{ ppm(X ~polynom(x,y,3), MultiStrauss(r, c("off","on"))) # fit a nonstationary multitype Strauss process with log-cubic trend } } \section{Warnings}{ In order that \code{\link{ppm}} can fit the multitype Strauss model correctly to a point pattern \code{X}, this pattern must be marked, with \code{markformat} equal to \code{vector} and the mark vector \code{marks(X)} must be a factor. If the argument \code{types} is specified it is interpreted as a set of factor levels and this set must equal \code{levels(marks(X))}. } \section{Changed Syntax}{ Before \pkg{spatstat} version \code{1.37-0}, the syntax of this function was different: \code{MultiStrauss(types=NULL, radii)}. The new code attempts to handle the old syntax as well. } \author{\adrian , \rolf and \ege } \keyword{spatial} \keyword{models} spatstat/man/vertices.Rd0000644000176200001440000000257713160710621015013 0ustar liggesusers\name{vertices} \alias{vertices} \alias{vertices.owin} \title{Vertices of a Window} \description{ Finds the vertices of a window, or similar object. } \usage{ vertices(w) \method{vertices}{owin}(w) } \arguments{ \item{w}{A window (object of class \code{"owin"}) or similar object.} } \value{ A list with components \code{x} and \code{y} giving the coordinates of the vertices. } \details{ This function computes the vertices (`corners') of a spatial window or other object. For \code{vertices.owin}, the argument \code{w} should be a window (an object of class \code{"owin"}, see \code{\link{owin.object}} for details). If \code{w} is a rectangle, the coordinates of the four corner points are returned. If \code{w} is a polygonal window (consisting of one or more polygons), the coordinates of the vertices of all polygons are returned. If \code{w} is a binary mask, then a `boundary pixel' is defined to be a pixel inside the window which has at least one neighbour outside the window. The coordinates of the centres of all boundary pixels are returned. } \seealso{ \code{\link{owin.object}}. } \examples{ data(letterR) vert <- vertices(letterR) plot(letterR, main="Polygonal vertices") points(vert) plot(letterR, main="Boundary pixels") points(vertices(as.mask(letterR))) } \author{\adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/nearest.raster.point.Rd0000644000176200001440000000412613160710621017247 0ustar liggesusers\name{nearest.raster.point} \alias{nearest.raster.point} \title{Find Pixel Nearest to a Given Point} \description{ Given cartesian coordinates, find the nearest pixel. } \usage{ nearest.raster.point(x,y,w, indices=TRUE) } \arguments{ \item{x}{Numeric vector of \eqn{x} coordinates of any points} \item{y}{Numeric vector of \eqn{y} coordinates of any points} \item{w}{An image (object of class \code{"im"}) or a binary mask window (an object of class \code{"owin"} of type \code{"mask"}). } \item{indices}{Logical flag indicating whether to return the row and column indices, or the actual \eqn{x,y} coordinates. } } \value{ If \code{indices=TRUE}, a list containing two vectors \code{rr} and \code{cc} giving row and column positions (in the image matrix). If \code{indices=FALSE}, a list containing vectors \code{x} and \code{y} giving actual coordinates of the pixels. } \details{ The argument \code{w} should be either a pixel image (object of class \code{"im"}) or a window (an object of class \code{"owin"}, see \code{\link{owin.object}} for details) of type \code{"mask"}. The arguments \code{x} and \code{y} should be numeric vectors of equal length. They are interpreted as the coordinates of points in space. For each point \code{(x[i], y[i])}, the function finds the nearest pixel in the grid of pixels for \code{w}. If \code{indices=TRUE}, this function returns a list containing two vectors \code{rr} and \code{cc} giving row and column positions (in the image matrix). For the location \code{(x[i],y[i])} the nearest pixel is at row \code{rr[i]} and column \code{cc[i]} of the image. If \code{indices=FALSE}, the function returns a list containing two vectors \code{x} and \code{y} giving the actual coordinates of the pixels. } \seealso{ \code{\link{owin.object}}, \code{\link{as.mask}} } \examples{ w <- owin(c(0,1), c(0,1), mask=matrix(TRUE, 100,100)) # 100 x 100 grid nearest.raster.point(0.5, 0.3, w) nearest.raster.point(0.5, 0.3, w, indices=FALSE) } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/boundingcircle.Rd0000644000176200001440000000423413160710571016152 0ustar liggesusers\name{boundingcircle} \alias{boundingradius} \alias{boundingradius.owin} \alias{boundingradius.ppp} \alias{boundingcentre} \alias{boundingcircle} \alias{boundingcentre.owin} \alias{boundingcircle.owin} \alias{boundingcentre.ppp} \alias{boundingcircle.ppp} \title{ Smallest Enclosing Circle } \description{ Find the smallest circle enclosing a spatial window or other object. Return its radius, or the location of its centre, or the circle itself. } \usage{ boundingradius(x, \dots) boundingcentre(x, \dots) boundingcircle(x, \dots) \method{boundingradius}{owin}(x, \dots) \method{boundingcentre}{owin}(x, \dots) \method{boundingcircle}{owin}(x, \dots) \method{boundingradius}{ppp}(x, \dots) \method{boundingcentre}{ppp}(x, \dots) \method{boundingcircle}{ppp}(x, \dots) } \arguments{ \item{x}{ A window (object of class \code{"owin"}), or another spatial object. } \item{\dots}{ Arguments passed to \code{\link{as.mask}} to determine the pixel resolution for the calculation. } } \details{ The \code{boundingcircle} of a spatial region \eqn{W} is the smallest circle that contains \eqn{W}. The \code{boundingradius} is the radius of this circle, and the \code{boundingcentre} is the centre of the circle. The functions \code{boundingcircle}, \code{boundingcentre} and \code{boundingradius} are generic. There are methods for objects of class \code{"owin"}, \code{"ppp"} and \code{"linnet"}. } \value{ The result of \code{boundingradius} is a single numeric value. The result of \code{boundingcentre} is a point pattern containing a single point. The result of \code{boundingcircle} is a window representing the boundingcircle. } \author{ \adrian } \seealso{ \code{\link{boundingradius.linnet}} } \examples{ boundingradius(letterR) plot(grow.rectangle(Frame(letterR), 0.2), main="", type="n") plot(letterR, add=TRUE, col="grey") plot(boundingcircle(letterR), add=TRUE, border="green", lwd=2) plot(boundingcentre(letterR), pch="+", cex=2, col="blue", add=TRUE) X <- runifpoint(5) plot(X) plot(boundingcircle(X), add=TRUE) plot(boundingcentre(X), pch="+", cex=2, col="blue", add=TRUE) } \keyword{spatial} \keyword{math} spatstat/man/Extract.lpp.Rd0000644000176200001440000000655013160710621015366 0ustar liggesusers\name{Extract.lpp} \alias{[.lpp} \title{Extract Subset of Point Pattern on Linear Network} \description{ Extract a subset of a point pattern on a linear network. } \usage{ \method{[}{lpp}(x, i, j, drop=FALSE, \dots, snip=TRUE) } \arguments{ \item{x}{ A point pattern on a linear network (object of class \code{"lpp"}). } \item{i}{ Subset index. A valid subset index in the usual \R sense, indicating which points should be retained. } \item{j}{ Spatial window (object of class \code{"owin"}) delineating the region that should be retained. } \item{drop}{ Logical value indicating whether to remove unused levels of the marks, if the marks are a factor. } \item{snip}{ Logical. If \code{TRUE} (the default), segments of the network which cross the boundary of the window \code{j} will be cut by the boundary. If \code{FALSE}, these segments will be deleted. } \item{\dots}{ Ignored. } } \value{ A point pattern on a linear network (of class \code{"lpp"}). } \details{ This function extracts a designated subset of a point pattern on a linear network. The function \code{[.lpp} is a method for \code{\link{[}} for the class \code{"lpp"}. It extracts a designated subset of a point pattern. The argument \code{i} should be a subset index in the usual \R sense: either a numeric vector of positive indices (identifying the points to be retained), a numeric vector of negative indices (identifying the points to be deleted) or a logical vector of length equal to the number of points in the point pattern \code{x}. In the latter case, the points \code{(x$x[i], x$y[i])} for which \code{subset[i]=TRUE} will be retained, and the others will be deleted. The argument \code{j}, if present, should be a spatial window. The pattern inside the region will be retained. \emph{Line segments that cross the boundary of the window are deleted} in the current implementation. The argument \code{drop} determines whether to remove unused levels of a factor, if the point pattern is multitype (i.e. the marks are a factor) or if the marks are a data frame or hyperframe in which some of the columns are factors. The argument \code{snip} specifies what to do with segments of the network which cross the boundary of the window \code{j}. If \code{snip=FALSE}, such segments are simply deleted. If \code{snip=TRUE} (the default), such segments are cut into pieces by the boundary of \code{j}, and those pieces which lie inside the window \code{ji} are included in the resulting network. Use \code{\link{unmark}} to remove all the marks in a marked point pattern, and \code{\link{subset.lpp}} to remove only some columns of marks. } \seealso{ \code{\link{lpp}}, \code{\link{subset.lpp}} } \examples{ # Chicago crimes data - remove cases of assault chicago[marks(chicago) != "assault"] # equivalent to subset(chicago, select=-assault) # spatial window subset B <- owin(c(350, 700), c(600, 1000)) plot(chicago) plot(B, add=TRUE, lty=2, border="red", lwd=3) op <- par(mfrow=c(1,2), mar=0.6+c(0,0,1,0)) plot(B, main="chicago[B, snip=FALSE]", lty=3, border="red") plot(chicago[, B, snip=FALSE], add=TRUE) plot(B, main="chicago[B, snip=TRUE]", lty=3, border="red") plot(chicago[, B, snip=TRUE], add=TRUE) par(op) } \author{ \adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/addvar.Rd0000644000176200001440000001507313160710571014427 0ustar liggesusers\name{addvar} \alias{addvar} \title{ Added Variable Plot for Point Process Model } \description{ Computes the coordinates for an Added Variable Plot for a fitted point process model. } \usage{ addvar(model, covariate, ..., subregion=NULL, bw="nrd0", adjust=1, from=NULL, to=NULL, n=512, bw.input = c("points", "quad"), bw.restrict = FALSE, covname, crosscheck=FALSE) } \arguments{ \item{model}{ Fitted point process model (object of class \code{"ppm"}). } \item{covariate}{ The covariate to be added to the model. Either a pixel image, a \code{function(x,y)}, or a character string giving the name of a covariate that was supplied when the model was fitted. } \item{subregion}{ Optional. A window (object of class \code{"owin"}) specifying a subset of the spatial domain of the data. The calculation will be confined to the data in this subregion. } \item{bw}{ Smoothing bandwidth or bandwidth rule (passed to \code{\link[stats]{density.default}}). } \item{adjust}{ Smoothing bandwidth adjustment factor (passed to \code{\link[stats]{density.default}}). } \item{n, from, to}{ Arguments passed to \code{\link[stats]{density.default}} to control the number and range of values at which the function will be estimated. } \item{\dots}{ Additional arguments passed to \code{\link[stats]{density.default}}. } \item{bw.input}{ Character string specifying the input data used for automatic bandwidth selection. } \item{bw.restrict}{ Logical value, specifying whether bandwidth selection is performed using data from the entire spatial domain or from the \code{subregion}. } \item{covname}{ Optional. Character string to use as the name of the covariate. } \item{crosscheck}{ For developers only. Logical value indicating whether to perform cross-checks on the validity of the calculation. } } \details{ This command generates the plot coordinates for an Added Variable Plot for a spatial point process model. Added Variable Plots (Cox, 1958, sec 4.5; Wang, 1985) are commonly used in linear models and generalized linear models, to decide whether a model with response \eqn{y} and predictors \eqn{x} would be improved by including another predictor \eqn{z}. In a (generalised) linear model with response \eqn{y} and predictors \eqn{x}, the Added Variable Plot for a new covariate \eqn{z} is a plot of the smoothed Pearson residuals from the original model against the scaled residuals from a weighted linear regression of \eqn{z} on \eqn{x}. If this plot has nonzero slope, then the new covariate \eqn{z} is needed. For general advice see Cook and Weisberg(1999); Harrell (2001). Essentially the same technique can be used for a spatial point process model (Baddeley et al, 2012). The argument \code{model} should be a fitted spatial point process model (object of class \code{"ppm"}). The argument \code{covariate} identifies the covariate that is to be considered for addition to the model. It should be either a pixel image (object of class \code{"im"}) or a \code{function(x,y)} giving the values of the covariate at any spatial location. Alternatively \code{covariate} may be a character string, giving the name of a covariate that was supplied (in the \code{covariates} argument to \code{\link{ppm}}) when the model was fitted, but was not used in the model. The result of \code{addvar(model, covariate)} is an object belonging to the classes \code{"addvar"} and \code{"fv"}. Plot this object to generate the added variable plot. Note that the plot method shows the pointwise significance bands for a test of the \emph{null} model, i.e. the null hypothesis that the new covariate has no effect. The smoothing bandwidth is controlled by the arguments \code{bw}, \code{adjust}, \code{bw.input} and \code{bw.restrict}. If \code{bw} is a numeric value, then the bandwidth is taken to be \code{adjust * bw}. If \code{bw} is a string representing a bandwidth selection rule (recognised by \code{\link[stats]{density.default}}) then the bandwidth is selected by this rule. The data used for automatic bandwidth selection are specified by \code{bw.input} and \code{bw.restrict}. If \code{bw.input="points"} (the default) then bandwidth selection is based on the covariate values at the points of the original point pattern dataset to which the model was fitted. If \code{bw.input="quad"} then bandwidth selection is based on the covariate values at every quadrature point used to fit the model. If \code{bw.restrict=TRUE} then the bandwidth selection is performed using only data from inside the \code{subregion}. } \section{Slow computation}{ In a large dataset, computation can be very slow if the default settings are used, because the smoothing bandwidth is selected automatically. To avoid this, specify a numerical value for the bandwidth \code{bw}. One strategy is to use a coarser subset of the data to select \code{bw} automatically. The selected bandwidth can be read off the print output for \code{addvar}. } \value{ An object of class \code{"addvar"} containing the coordinates for the added variable plot. There is a \code{plot} method. } \section{Internal data}{ The return value has an attribute \code{"spatial"} which contains the internal data: the computed values of the residuals, and of all relevant covariates, at each quadrature point of the model. It is an object of class \code{"ppp"} with a data frame of marks. } \references{ Baddeley, A., Chang, Y.-M., Song, Y. and Turner, R. (2013) Residual diagnostics for covariate effects in spatial point process models. \emph{Journal of Computational and Graphical Statistics}, \bold{22}, 886--905. Cook, R.D. and Weisberg, S. (1999) \emph{Applied regression, including computing and graphics}. New York: Wiley. Cox, D.R. (1958) \emph{Planning of Experiments}. New York: Wiley. Harrell, F. (2001) \emph{Regression Modeling Strategies}. New York: Springer. Wang, P. (1985) Adding a variable in generalized linear models. \emph{Technometrics} \bold{27}, 273--276. } \author{ \adrian, \rolf, Ya-Mei Chang and Yong Song. } \seealso{ \code{\link{parres}}, \code{\link{rhohat}}, \code{\link{rho2hat}}. } \examples{ X <- rpoispp(function(x,y){exp(3+3*x)}) model <- ppm(X, ~y) adv <- addvar(model, "x") plot(adv) adv <- addvar(model, "x", subregion=square(0.5)) } \keyword{spatial} \keyword{models} spatstat/man/Lest.Rd0000644000176200001440000000546213160710571014076 0ustar liggesusers\name{Lest} \alias{Lest} \title{L-function} \description{ Calculates an estimate of the \eqn{L}-function (Besag's transformation of Ripley's \eqn{K}-function) for a spatial point pattern. } \usage{ Lest(X, ...) } \arguments{ \item{X}{ The observed point pattern, from which an estimate of \eqn{L(r)} will be computed. An object of class \code{"ppp"}, or data in any format acceptable to \code{\link{as.ppp}()}. } \item{\dots}{ Other arguments passed to \code{\link{Kest}} to control the estimation procedure. } } \details{ This command computes an estimate of the \eqn{L}-function for the spatial point pattern \code{X}. The \eqn{L}-function is a transformation of Ripley's \eqn{K}-function, \deqn{L(r) = \sqrt{\frac{K(r)}{\pi}}}{L(r) = sqrt(K(r)/pi)} where \eqn{K(r)} is the \eqn{K}-function. See \code{\link{Kest}} for information about Ripley's \eqn{K}-function. The transformation to \eqn{L} was proposed by Besag (1977). The command \code{Lest} first calls \code{\link{Kest}} to compute the estimate of the \eqn{K}-function, and then applies the square root transformation. For a completely random (uniform Poisson) point pattern, the theoretical value of the \eqn{L}-function is \eqn{L(r) = r}. The square root also has the effect of stabilising the variance of the estimator, so that \eqn{L(r)} is more appropriate for use in simulation envelopes and hypothesis tests. See \code{\link{Kest}} for the list of arguments. } \section{Variance approximations}{ If the argument \code{var.approx=TRUE} is given, the return value includes columns \code{rip} and \code{ls} containing approximations to the variance of \eqn{\hat L(r)}{Lest(r)} under CSR. These are obtained by the delta method from the variance approximations described in \code{\link{Kest}}. } \value{ An object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. Essentially a data frame containing columns \item{r}{the vector of values of the argument \eqn{r} at which the function \eqn{L} has been estimated } \item{theo}{the theoretical value \eqn{L(r) = r} for a stationary Poisson process } together with columns named \code{"border"}, \code{"bord.modif"}, \code{"iso"} and/or \code{"trans"}, according to the selected edge corrections. These columns contain estimates of the function \eqn{L(r)} obtained by the edge corrections named. } \references{ Besag, J. (1977) Discussion of Dr Ripley's paper. \emph{Journal of the Royal Statistical Society, Series B}, \bold{39}, 193--195. } \seealso{ \code{\link{Kest}}, \code{\link{pcf}} } \examples{ data(cells) L <- Lest(cells) plot(L, main="L function for cells") } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat/man/dfbetas.ppm.Rd0000644000176200001440000000635413160710571015373 0ustar liggesusers\name{dfbetas.ppm} \alias{dfbetas.ppm} \title{ Parameter influence measure } \description{ Computes the deletion influence measure for each parameter in a fitted point process model. } \usage{ \method{dfbetas}{ppm}(model, ..., drop = FALSE, iScore=NULL, iHessian=NULL, iArgs=NULL) } \arguments{ \item{model}{ Fitted point process model (object of class \code{"ppm"}). } \item{\dots}{ Ignored. } \item{drop}{ Logical. Whether to include (\code{drop=FALSE}) or exclude (\code{drop=TRUE}) contributions from quadrature points that were not used to fit the model. } \item{iScore,iHessian}{ Components of the score vector and Hessian matrix for the irregular parameters, if required. See Details. } \item{iArgs}{ List of extra arguments for the functions \code{iScore}, \code{iHessian} if required. } } \details{ Given a fitted spatial point process \code{model}, this function computes the influence measure for each parameter, as described in Baddeley, Chang and Song (2013). This is a method for the generic function \code{\link[stats]{dfbetas}}. The influence measure for each parameter \eqn{\theta}{\theta} is a signed measure in two-dimensional space. It consists of a discrete mass on each data point (i.e. each point in the point pattern to which the \code{model} was originally fitted) and a continuous density at all locations. The mass at a data point represents the change in the fitted value of the parameter \eqn{\theta}{\theta} that would occur if this data point were to be deleted. The density at other non-data locations represents the effect (on the fitted value of \eqn{\theta}{\theta}) of deleting these locations (and their associated covariate values) from the input to the fitting procedure. If the point process model trend has irregular parameters that were fitted (using \code{\link{ippm}}) then the influence calculation requires the first and second derivatives of the log trend with respect to the irregular parameters. The argument \code{iScore} should be a list, with one entry for each irregular parameter, of \R functions that compute the partial derivatives of the log trend (i.e. log intensity or log conditional intensity) with respect to each irregular parameter. The argument \code{iHessian} should be a list, with \eqn{p^2} entries where \eqn{p} is the number of irregular parameters, of \R functions that compute the second order partial derivatives of the log trend with respect to each pair of irregular parameters. } \value{ An object of class \code{"msr"} representing a signed or vector-valued measure. } \references{ Baddeley, A. and Chang, Y.M. and Song, Y. (2013) Leverage and influence diagnostics for spatial point process models. \emph{Scandinavian Journal of Statistics} \bold{40}, 86--104. } \author{ \spatstatAuthors. } \seealso{ \code{\link{leverage.ppm}}, \code{\link{influence.ppm}}, \code{\link{ppmInfluence}} } \examples{ \testonly{op <- spatstat.options(npixel=32)} X <- rpoispp(function(x,y) { exp(3+3*x) }) fit <- ppm(X, ~x+y) \testonly{fit <- ppm(X, ~x+y, nd=16)} plot(dfbetas(fit)) plot(Smooth(dfbetas(fit))) \testonly{spatstat.options(op)} } \keyword{spatial} \keyword{models} spatstat/man/plot.hyperframe.Rd0000644000176200001440000000667013160710621016304 0ustar liggesusers\name{plot.hyperframe} \alias{plot.hyperframe} \title{Plot Entries in a Hyperframe} \description{ Plots the entries in a hyperframe, in a series of panels, one panel for each row of the hyperframe. } \usage{ \method{plot}{hyperframe}(x, e, \dots, main, arrange=TRUE, nrows=NULL, ncols=NULL, parargs=list(mar=mar * marsize), marsize=1, mar=c(1,1,3,1)) } \arguments{ \item{x}{ Data to be plotted. A hyperframe (object of class \code{"hyperframe"}, see \code{\link{hyperframe}}). } \item{e}{ How to plot each row. Optional. An \R language call or expression (typically enclosed in \code{\link{quote}()} that will be evaluated in each row of the hyperframe to generate the plots. } \item{\dots}{ Extra arguments controlling the plot (when \code{e} is missing). } \item{main}{Overall title for the array of plots.} \item{arrange}{ Logical flag indicating whether to plot the objects side-by-side on a single page (\code{arrange=TRUE}) or plot them individually in a succession of frames (\code{arrange=FALSE}). } \item{nrows,ncols}{ Optional. The number of rows/columns in the plot layout (assuming \code{arrange=TRUE}). You can specify either or both of these numbers. } \item{parargs}{ Optional list of arguments passed to \code{\link{par}} before plotting each panel. Can be used to control margin sizes, etc. } \item{marsize}{ Optional scale parameter controlling the sizes of margins around the panels. Incompatible with \code{parargs}. } \item{mar}{ Optional numeric vector of length 1, 2 or 4 controlling the relative sizes of margins between the panels. Incompatible with \code{parargs}. } } \details{ This is the \code{plot} method for the class \code{"hyperframe"}. The argument \code{x} must be a hyperframe (like a data frame, except that the entries can be objects of any class; see \code{\link{hyperframe}}). This function generates a series of plots, one plot for each row of the hyperframe. If \code{arrange=TRUE} (the default), then these plots are arranged in a neat array of panels within a single plot frame. If \code{arrange=FALSE}, the plots are simply executed one after another. Exactly what is plotted, and how it is plotted, depends on the argument \code{e}. The default (if \code{e} is missing) is to plot only the first column of \code{x}. Each entry in the first column is plotted using the generic \code{\link{plot}} command, together with any extra arguments given in \code{\dots}. If \code{e} is present, it should be an \R language expression involving the column names of \code{x}. (It is typically created using \code{\link{quote}} or \code{\link{expression}}.) The expression will be evaluated once for each row of \code{x}. It will be evaluated in an environment where each column name of \code{x} is interpreted as meaning the object in that column in the current row. See the Examples. } \value{ \code{NULL}. } \seealso{ \code{\link{hyperframe}}, \code{\link{with.hyperframe}} } \examples{ H <- hyperframe(id=1:10) H$X <- with(H, rpoispp(100)) H$D <- with(H, distmap(X)) # points only plot(H[,"X"]) plot(H, quote(plot(X, main=id))) # points superimposed on images plot(H, quote({plot(D, main=id); plot(X, add=TRUE)})) } \author{\adrian and \rolf } \keyword{spatial} \keyword{hplot} spatstat/man/valid.ppm.Rd0000644000176200001440000000532513160710621015053 0ustar liggesusers\name{valid.ppm} \alias{valid.ppm} \title{ Check Whether Point Process Model is Valid } \description{ Determines whether a fitted point process model satisfies the integrability conditions for existence of the point process. } \usage{ \method{valid}{ppm}(object, warn=TRUE, \dots) } \arguments{ \item{object}{ Fitted point process model (object of class \code{"ppm"}). } \item{warn}{ Logical value indicating whether to issue a warning if the validity of the model cannot be checked (due to unavailability of the required code). } \item{\dots}{Ignored.} } \details{ This is a method for the generic function \code{\link{valid}} for Poisson and Gibbs point process models (class \code{"ppm"}). The model-fitting function \code{\link{ppm}} fits Gibbs point process models to point pattern data. By default, \code{\link{ppm}} does not check whether the fitted model actually exists as a point process. This checking is done by \code{valid.ppm}. Unlike a regression model, which is well-defined for any values of the fitted regression coefficients, a Gibbs point process model is only well-defined if the fitted interaction parameters satisfy some constraints. A famous example is the Strauss process (see \code{\link{Strauss}}) which exists only when the interaction parameter \eqn{\gamma}{gamma} is less than or equal to 1. For values \eqn{\gamma > 1}{gamma > 1}, the probability density is not integrable and the process does not exist (and cannot be simulated). By default, \code{\link{ppm}} does not enforce the constraint that a fitted Strauss process (for example) must satisfy \eqn{\gamma \le 1}{gamma <= 1}. This is because a fitted parameter value of \eqn{\gamma > 1}{gamma > 1} could be useful information for data analysis, as it indicates that the Strauss model is not appropriate, and suggests a clustered model should be fitted. The function \code{valid.ppm} checks whether the fitted model \code{object} specifies a well-defined point process. It returns \code{TRUE} if the model is well-defined. Another possible reason for invalid models is that the data may not be adequate for estimation of the model parameters. In this case, some of the fitted coefficients could be \code{NA} or infinite values. If this happens then \code{valid.ppm} returns \code{FALSE}. Use the function \code{\link{project.ppm}} to force the fitted model to be valid. } \value{ A logical value, or \code{NA}. } \author{\adrian and \rolf } \seealso{ \code{\link{ppm}}, \code{\link{project.ppm}} } \examples{ fit1 <- ppm(cells, ~1, Strauss(0.1)) valid(fit1) fit2 <- ppm(redwood, ~1, Strauss(0.1)) valid(fit2) } \keyword{spatial} \keyword{models} spatstat/man/Kdot.Rd0000644000176200001440000001701113160710571014061 0ustar liggesusers\name{Kdot} \alias{Kdot} \title{ Multitype K Function (i-to-any) } \description{ For a multitype point pattern, estimate the multitype \eqn{K} function which counts the expected number of other points of the process within a given distance of a point of type \eqn{i}. } \usage{ Kdot(X, i, r=NULL, breaks=NULL, correction, ..., ratio=FALSE, from) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the multitype \eqn{K} function \eqn{K_{i\bullet}(r)}{Ki.(r)} will be computed. It must be a multitype point pattern (a marked point pattern whose marks are a factor). See under Details. } \item{i}{The type (mark value) of the points in \code{X} from which distances are measured. A character string (or something that will be converted to a character string). Defaults to the first level of \code{marks(X)}. } \item{r}{numeric vector. The values of the argument \eqn{r} at which the distribution function \eqn{K_{i\bullet}(r)}{Ki.(r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{breaks}{ This argument is for internal use only. } \item{correction}{ A character vector containing any selection of the options \code{"border"}, \code{"bord.modif"}, \code{"isotropic"}, \code{"Ripley"}, \code{"translate"}, \code{"translation"}, \code{"none"} or \code{"best"}. It specifies the edge correction(s) to be applied. Alternatively \code{correction="all"} selects all options. } \item{\dots}{Ignored.} \item{ratio}{ Logical. If \code{TRUE}, the numerator and denominator of each edge-corrected estimate will also be saved, for use in analysing replicated point patterns. } \item{from}{An alternative way to specify \code{i}.} } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame containing numeric columns \item{r}{the values of the argument \eqn{r} at which the function \eqn{K_{i\bullet}(r)}{Ki.(r)} has been estimated } \item{theo}{the theoretical value of \eqn{K_{i\bullet}(r)}{Ki.(r)} for a marked Poisson process, namely \eqn{\pi r^2}{pi * r^2} } together with a column or columns named \code{"border"}, \code{"bord.modif"}, \code{"iso"} and/or \code{"trans"}, according to the selected edge corrections. These columns contain estimates of the function \eqn{K_{i\bullet}(r)}{Ki.(r)} obtained by the edge corrections named. If \code{ratio=TRUE} then the return value also has two attributes called \code{"numerator"} and \code{"denominator"} which are \code{"fv"} objects containing the numerators and denominators of each estimate of \eqn{K(r)}. } \details{ This function \code{Kdot} and its companions \code{\link{Kcross}} and \code{\link{Kmulti}} are generalisations of the function \code{\link{Kest}} to multitype point patterns. A multitype point pattern is a spatial pattern of points classified into a finite number of possible ``colours'' or ``types''. In the \pkg{spatstat} package, a multitype pattern is represented as a single point pattern object in which the points carry marks, and the mark value attached to each point determines the type of that point. The argument \code{X} must be a point pattern (object of class \code{"ppp"}) or any data that are acceptable to \code{\link{as.ppp}}. It must be a marked point pattern, and the mark vector \code{X$marks} must be a factor. The argument \code{i} will be interpreted as a level of the factor \code{X$marks}. If \code{i} is missing, it defaults to the first level of the marks factor, \code{i = levels(X$marks)[1]}. The ``type \eqn{i} to any type'' multitype \eqn{K} function of a stationary multitype point process \eqn{X} is defined so that \eqn{\lambda K_{i\bullet}(r)}{lambda Ki.(r)} equals the expected number of additional random points within a distance \eqn{r} of a typical point of type \eqn{i} in the process \eqn{X}. Here \eqn{\lambda}{lambda} is the intensity of the process, i.e. the expected number of points of \eqn{X} per unit area. The function \eqn{K_{i\bullet}}{Ki.} is determined by the second order moment properties of \eqn{X}. An estimate of \eqn{K_{i\bullet}(r)}{Ki.(r)} is a useful summary statistic in exploratory data analysis of a multitype point pattern. If the subprocess of type \eqn{i} points were independent of the subprocess of points of all types not equal to \eqn{i}, then \eqn{K_{i\bullet}(r)}{Ki.(r)} would equal \eqn{\pi r^2}{pi * r^2}. Deviations between the empirical \eqn{K_{i\bullet}}{Ki.} curve and the theoretical curve \eqn{\pi r^2}{pi * r^2} may suggest dependence between types. This algorithm estimates the distribution function \eqn{K_{i\bullet}(r)}{Ki.(r)} from the point pattern \code{X}. It assumes that \code{X} can be treated as a realisation of a stationary (spatially homogeneous) random spatial point process in the plane, observed through a bounded window. The window (which is specified in \code{X} as \code{Window(X)}) may have arbitrary shape. Biases due to edge effects are treated in the same manner as in \code{\link{Kest}}, using the border correction. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{K_{i\bullet}(r)}{Ki.(r)} should be evaluated. The values of \eqn{r} must be increasing nonnegative numbers and the maximum \eqn{r} value must exceed the radius of the largest disc contained in the window. The pair correlation function can also be applied to the result of \code{Kdot}; see \code{\link{pcf}}. } \references{ Cressie, N.A.C. \emph{Statistics for spatial data}. John Wiley and Sons, 1991. Diggle, P.J. \emph{Statistical analysis of spatial point patterns}. Academic Press, 1983. Harkness, R.D and Isham, V. (1983) A bivariate spatial point pattern of ants' nests. \emph{Applied Statistics} \bold{32}, 293--303 Lotwick, H. W. and Silverman, B. W. (1982). Methods for analysing spatial processes of several types of points. \emph{J. Royal Statist. Soc. Ser. B} \bold{44}, 406--413. Ripley, B.D. \emph{Statistical inference for spatial processes}. Cambridge University Press, 1988. Stoyan, D, Kendall, W.S. and Mecke, J. \emph{Stochastic geometry and its applications}. 2nd edition. Springer Verlag, 1995. } \section{Warnings}{ The argument \code{i} is interpreted as a level of the factor \code{X$marks}. It is converted to a character string if it is not already a character string. The value \code{i=1} does \bold{not} refer to the first level of the factor. The reduced sample estimator of \eqn{K_{i\bullet}}{Ki.} is pointwise approximately unbiased, but need not be a valid distribution function; it may not be a nondecreasing function of \eqn{r}. Its range is always within \eqn{[0,1]}. } \seealso{ \code{\link{Kdot}}, \code{\link{Kest}}, \code{\link{Kmulti}}, \code{\link{pcf}} } \examples{ # Lansing woods data: 6 types of trees woods <- lansing \testonly{woods <- woods[seq(1, npoints(woods), by=80)]} Kh. <- Kdot(woods, "hickory") # diagnostic plot for independence between hickories and other trees plot(Kh.) \dontrun{ # synthetic example with two marks "a" and "b" pp <- runifpoispp(50) pp <- pp \%mark\% factor(sample(c("a","b"), npoints(pp), replace=TRUE)) K <- Kdot(pp, "a") } } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat/man/plot.lppm.Rd0000644000176200001440000000250613160710621015104 0ustar liggesusers\name{plot.lppm} \alias{plot.lppm} \title{ Plot a Fitted Point Process Model on a Linear Network } \description{ Plots the fitted intensity of a point process model on a linear network. } \usage{ \method{plot}{lppm}(x, ..., type="trend") } \arguments{ \item{x}{ An object of class \code{"lppm"} representing a fitted point process model on a linear network. } \item{\dots}{ Arguments passed to \code{\link{plot.linim}} to control the plot. } \item{type}{ Character string (either \code{"trend"} or \code{"cif"}) determining whether to plot the fitted first order trend or the conditional intensity. } } \details{ This function is the plot method for the class \code{"lppm"}. It computes the fitted intensity of the point process model, and displays it using \code{\link{plot.linim}}. The default is to display intensity values as colours. Alternatively if the argument \code{style="width"} is given, intensity values are displayed as the widths of thick lines drawn over the network. } \value{ Null. } \author{ \adrian } \seealso{ \code{\link{lppm}}, \code{\link{plot.linim}}, \code{\link{methods.lppm}}, \code{\link{predict.lppm}}. } \examples{ X <- runiflpp(10, simplenet) fit <- lppm(X ~x) plot(fit) plot(fit, style="width") } \keyword{spatial} \keyword{models} spatstat/man/bw.diggle.Rd0000644000176200001440000000677613160710571015042 0ustar liggesusers\name{bw.diggle} \alias{bw.diggle} \title{ Cross Validated Bandwidth Selection for Kernel Density } \description{ Uses cross-validation to select a smoothing bandwidth for the kernel estimation of point process intensity. } \usage{ bw.diggle(X, ..., correction="good", hmax=NULL, nr=512) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"}). } \item{\dots}{Ignored.} \item{correction}{ Character string passed to \code{\link{Kest}} determining the edge correction to be used to calculate the \eqn{K} function. } \item{hmax}{ Numeric. Maximum value of bandwidth that should be considered. } \item{nr}{ Integer. Number of steps in the distance value \eqn{r} to use in computing numerical integrals. } } \details{ This function selects an appropriate bandwidth \code{sigma} for the kernel estimator of point process intensity computed by \code{\link{density.ppp}}. The bandwidth \eqn{\sigma}{\sigma} is chosen to minimise the mean-square error criterion defined by Diggle (1985). The algorithm uses the method of Berman and Diggle (1989) to compute the quantity \deqn{ M(\sigma) = \frac{\mbox{MSE}(\sigma)}{\lambda^2} - g(0) }{ M(\sigma) = MSE(\sigma)/\lambda^2 - g(0) } as a function of bandwidth \eqn{\sigma}{\sigma}, where \eqn{\mbox{MSE}(\sigma)}{MSE(\sigma)} is the mean squared error at bandwidth \eqn{\sigma}{\sigma}, while \eqn{\lambda}{\lambda} is the mean intensity, and \eqn{g} is the pair correlation function. See Diggle (2003, pages 115-118) for a summary of this method. The result is a numerical value giving the selected bandwidth. The result also belongs to the class \code{"bw.optim"} which can be plotted to show the (rescaled) mean-square error as a function of \code{sigma}. } \section{Definition of bandwidth}{ The smoothing parameter \code{sigma} returned by \code{bw.diggle} (and displayed on the horizontal axis of the plot) corresponds to \code{h/2}, where \code{h} is the smoothing parameter described in Diggle (2003, pages 116-118) and Berman and Diggle (1989). In those references, the smoothing kernel is the uniform density on the disc of radius \code{h}. In \code{\link{density.ppp}}, the smoothing kernel is the isotropic Gaussian density with standard deviation \code{sigma}. When replacing one kernel by another, the usual practice is to adjust the bandwidths so that the kernels have equal variance (cf. Diggle 2003, page 118). This implies that \code{sigma = h/2}. } \value{ A numerical value giving the selected bandwidth. The result also belongs to the class \code{"bw.optim"} which can be plotted. } \seealso{ \code{\link{density.ppp}}, \code{\link{bw.ppl}}, \code{\link{bw.scott}} } \examples{ data(lansing) attach(split(lansing)) b <- bw.diggle(hickory) plot(b, ylim=c(-2, 0), main="Cross validation for hickories") \donttest{ plot(density(hickory, b)) } } \references{ Berman, M. and Diggle, P. (1989) Estimating weighted integrals of the second-order intensity of a spatial point process. \emph{Journal of the Royal Statistical Society, series B} \bold{51}, 81--92. Diggle, P.J. (1985) A kernel method for smoothing point process data. \emph{Applied Statistics} (Journal of the Royal Statistical Society, Series C) \bold{34} (1985) 138--147. Diggle, P.J. (2003) \emph{Statistical analysis of spatial point patterns}, Second edition. Arnold. } \author{\adrian and \rolf } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat/man/clicklpp.Rd0000644000176200001440000000550713160710571014770 0ustar liggesusers\name{clicklpp} \alias{clicklpp} \title{Interactively Add Points on a Linear Network} \description{ Allows the user to create a point pattern on a linear network by point-and-click in the display. } \usage{ clicklpp(L, n=NULL, types=NULL, \dots, add=FALSE, main=NULL, hook=NULL) } \arguments{ \item{L}{ Linear network on which the points will be placed. An object of class \code{"linnet"}. } \item{n}{ Number of points to be added (if this is predetermined). } \item{types}{ Vector of types, when creating a multitype point pattern. } \item{\dots}{ Optional extra arguments to be passed to \code{\link[graphics]{locator}} to control the display. } \item{add}{ Logical value indicating whether to create a new plot (\code{add=FALSE}) or draw over the existing plot (\code{add=TRUE}). } \item{main}{ Main heading for plot. } \item{hook}{For internal use only. Do not use this argument.} } \value{ A point pattern (object of class \code{"lpp"}). } \details{ This function allows the user to create a point pattern on a linear network by interactively clicking on the screen display. First the linear network \code{L} is plotted on the current screen device. Then the user is prompted to point the mouse at any desired locations and click the left mouse button to add each point. Interactive input stops after \code{n} clicks (if \code{n} was given) or when the middle mouse button is pressed. The return value is a point pattern on the network \code{L}, containing the locations of all the clicked points, after they have been projected onto the network \code{L}. Any points that were clicked outside the bounding window of the network will be ignored. If the argument \code{types} is given, then a multitype point pattern will be created. The user is prompted to input the locations of points of type \code{type[i]}, for each successive index \code{i}. (If the argument \code{n} was given, there will be \code{n} points of \emph{each} type.) The return value is a multitype point pattern on a linear network. This function uses the \R{} command \code{\link[graphics]{locator}} to input the mouse clicks. It only works on screen devices such as \sQuote{X11}, \sQuote{windows} and \sQuote{quartz}. Arguments that can be passed to \code{\link[graphics]{locator}} through \code{\dots} include \code{pch} (plotting character), \code{cex} (character expansion factor) and \code{col} (colour). See \code{\link[graphics]{locator}} and \code{\link[graphics]{par}}. } \seealso{ \code{\link{clickppp}}, \code{\link{identify.lpp}}, \code{\link[graphics]{locator}}, \code{\link{clickpoly}}, \code{\link{clickbox}}, \code{\link{clickdist}} } \author{ \spatstatAuthors, based on an idea by Dominic Schuhmacher. } \keyword{spatial} \keyword{iplot} spatstat/man/insertVertices.Rd0000644000176200001440000000435113160710621016170 0ustar liggesusers\name{insertVertices} \alias{insertVertices} \title{ Insert New Vertices in a Linear Network } \description{ Adds new vertices to a linear network at specified locations along the network. } \usage{ insertVertices(L, \dots) } \arguments{ \item{L}{ Linear network (object of class \code{"linnet"}) or point pattern on a linear network (object of class \code{"lpp"}). } \item{\dots}{ Additional arguments passed to \code{\link{as.lpp}} specifying the positions of the new vertices along the network. } } \details{ This function adds new vertices at locations along an existing linear network. The argument \code{L} can be either a linear network (class \code{"linnet"}) or some other object that includes a linear network. The new vertex locations can be specified either as a point pattern (class \code{"lpp"} or \code{"ppp"}) or using coordinate vectors \code{x,y} or \code{seg,tp} or \code{x,y,seg,tp} as explained in the help for \code{\link{as.lpp}}. This function breaks the existing line segments of \code{L} into pieces at the locations specified by the coordinates \code{seg,tp} and creates new vertices at these locations. The result is the modified object, with an attribute \code{"id"} such that the \code{i}th added vertex has become the \code{id[i]}th vertex of the new network. } \value{ An object of the same class as \code{L} representing the result of adding the new vertices. The result also has an attribute \code{"id"} as described in Details. } \author{ Adrian Baddeley } \seealso{ \code{\link{as.lpp}} } \examples{ opa <- par(mfrow=c(1,3), mar=rep(0,4)) simplenet plot(simplenet, main="") plot(vertices(simplenet), add=TRUE) # add two new vertices at specified local coordinates L <- insertVertices(simplenet, seg=c(3,7), tp=c(0.2, 0.5)) L plot(L, main="") plot(vertices(L), add=TRUE) id <- attr(L, "id") id plot(vertices(L)[id], add=TRUE, pch=16) # add new vertices at three randomly-generated points X <- runiflpp(3, simplenet) LL <- insertVertices(simplenet, X) plot(LL, main="") plot(vertices(LL), add=TRUE) ii <- attr(LL, "id") plot(vertices(LL)[ii], add=TRUE, pch=16) par(opa) } \keyword{spatial} \keyword{manip} spatstat/man/Kmodel.dppm.Rd0000644000176200001440000000200313160710571015325 0ustar liggesusers\name{Kmodel.dppm} \alias{Kmodel.detpointprocfamily} \alias{pcfmodel.detpointprocfamily} \alias{Kmodel.dppm} \alias{pcfmodel.dppm} \title{ K-function or Pair Correlation Function of a Determinantal Point Process Model } \description{Returns the theoretical \eqn{K}-function or theoretical pair correlation function of a determinantal point process model as a function of one argument \eqn{r}. } \usage{ \method{Kmodel}{dppm}(model, \dots) \method{pcfmodel}{dppm}(model, \dots) \method{Kmodel}{detpointprocfamily}(model, \dots) \method{pcfmodel}{detpointprocfamily}(model, \dots) } \arguments{ \item{model}{Model of class \code{"detpointprocfamily"} or \code{"dppm"}.} \item{\dots}{Ignored (not quite true -- there is some undocumented internal use)} } \author{ \adrian \rolf and \ege } \examples{ model <- dppMatern(lambda=100, alpha=.01, nu=1, d=2) KMatern <- Kmodel(model) pcfMatern <- pcfmodel(model) plot(KMatern, xlim = c(0,0.05)) plot(pcfMatern, xlim = c(0,0.05)) } spatstat/man/union.quad.Rd0000644000176200001440000000200713160710621015234 0ustar liggesusers\name{union.quad} \alias{union.quad} \title{Union of Data and Dummy Points} \description{ Combines the data and dummy points of a quadrature scheme into a single point pattern. } \usage{ union.quad(Q) } \arguments{ \item{Q}{A quadrature scheme (an object of class \code{"quad"}).} } \value{ A point pattern (of class \code{"ppp"}). } \details{ The argument \code{Q} should be a quadrature scheme (an object of class \code{"quad"}, see \code{\link{quad.object}} for details). This function combines the data and dummy points of \code{Q} into a single point pattern. If either the data or the dummy points are marked, the result is a marked point pattern. The function \code{\link{as.ppp}} will perform the same task. } \seealso{ \code{\link{quad.object}}, \code{\link{as.ppp}} } \examples{ data(simdat) Q <- quadscheme(simdat, default.dummy(simdat)) U <- union.quad(Q) \dontrun{plot(U)} # equivalent: U <- as.ppp(Q) } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/compatible.Rd0000644000176200001440000000163613160710571015305 0ustar liggesusers\name{compatible} \alias{compatible} \title{Test Whether Objects Are Compatible} \description{ Tests whether two or more objects of the same class are compatible. } \usage{ compatible(A, B, \dots) } \arguments{ \item{A,B,\dots}{Two or more objects of the same class} } \details{ This generic function is used to check whether the objects \code{A} and \code{B} (and any additional objects \code{\dots}) are compatible. What is meant by \sQuote{compatible} depends on the class of object. There are methods for the classes \code{"fv"}, \code{"fasp"}, \code{"im"} and \code{"units"}. } \value{ Logical value: \code{TRUE} if the objects are compatible, and \code{FALSE} if they are not. } \seealso{ \code{\link{compatible.fv}}, \code{\link{compatible.fasp}}, \code{\link{compatible.im}}, \code{\link{compatible.units}} } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/crossdist.lpp.Rd0000644000176200001440000000411413160710571015767 0ustar liggesusers\name{crossdist.lpp} \alias{crossdist.lpp} \title{Pairwise distances between two point patterns on a linear network} \description{ Computes the distances between pairs of points taken from two different point patterns on the same linear network. } \usage{ \method{crossdist}{lpp}(X, Y, \dots, method="C") } \arguments{ \item{X,Y}{ Point patterns on a linear network (objects of class \code{"lpp"}). They must lie on the \emph{same} network. } \item{\dots}{ Ignored. } \item{method}{String specifying which method of calculation to use. Values are \code{"C"} and \code{"interpreted"}. } } \value{ A matrix whose \code{[i,j]} entry is the distance from the \code{i}-th point in \code{X} to the \code{j}-th point in \code{Y}. Matrix entries are nonnegative numbers or infinity (\code{Inf}). } \details{ Given two point patterns on a linear network, this function computes the Euclidean distance from each point in the first pattern to each point in the second pattern, measuring distance by the shortest path in the network. This is a method for the generic function \code{\link{crossdist}} for point patterns on a linear network (objects of class \code{"lpp"}). This function expects two point pattern objects \code{X} and \code{Y} on the \emph{same} linear network, and returns the matrix whose \code{[i,j]} entry is the shortest-path distance from \code{X[i]} to \code{Y[j]}. The argument \code{method} is not normally used. It is retained only for checking the validity of the software. If \code{method = "interpreted"} then the distances are computed using interpreted R code only. If \code{method="C"} (the default) then C code is used. The C code is much faster. If two points cannot be joined by a path, the distance between them is infinite (\code{Inf}). } \seealso{ \code{\link{crossdist}}, \code{\link{crossdist.ppp}}, \code{\link{pairdist}}, \code{\link{nndist}} } \examples{ v <- split(chicago) X <- v$cartheft Y <- v$burglary d <- crossdist(X, Y) } \author{ \adrian. } \keyword{spatial} \keyword{math} spatstat/man/mean.linim.Rd0000644000176200001440000000306413160710621015206 0ustar liggesusers\name{mean.linim} \alias{mean.linim} \alias{median.linim} \alias{quantile.linim} \title{Mean, Median, Quantiles of Pixel Values on a Linear Network} \description{ Calculates the mean, median, or quantiles of the pixel values in a pixel image on a linear network. } \usage{ \method{mean}{linim}(x, \dots) \method{median}{linim}(x, \dots) \method{quantile}{linim}(x, probs=seq(0,1,0.25), \dots) } \arguments{ \item{x}{ A pixel image on a linear network (object of class \code{"linim"}). } \item{probs}{ Vector of probabilities for which quantiles should be calculated. } \item{\dots}{Arguments passed to other methods.} } \details{ These functions calculate the mean, median and quantiles of the pixel values in the image \code{x} on a linear network. An object of class \code{"linim"} describes a pixel image on a linear network. See \code{\link{linim}}. The functions described here are methods for the generic \code{\link{mean}}, \code{\link[stats]{median}} and \code{\link[stats]{quantile}} for the class \code{"linim"}. } \value{ For \code{mean} and \code{median}, a single number. For \code{quantile}, a numeric vector of the same length as \code{probs}. } \seealso{ \code{\link{mean}}, \code{\link[stats]{median}}, \code{\link[stats]{quantile}}, \code{\link{mean.im}}. } \examples{ M <- as.mask.psp(as.psp(simplenet)) Z <- as.im(function(x,y) {x-y}, W=M) X <- linim(simplenet, Z) X mean(X) median(X) quantile(X) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{methods} \keyword{univar} spatstat/man/segregation.test.Rd0000644000176200001440000000557313160710621016453 0ustar liggesusers\name{segregation.test} \alias{segregation.test} \alias{segregation.test.ppp} \title{ Test of Spatial Segregation of Types } \description{ Performs a Monte Carlo test of spatial segregation of the types in a multitype point pattern. } \usage{ segregation.test(X, \dots) \method{segregation.test}{ppp}(X, \dots, nsim = 19, permute = TRUE, verbose = TRUE, Xname) } \arguments{ \item{X}{ Multitype point pattern (object of class \code{"ppp"} with factor-valued marks). } \item{\dots}{ Additional arguments passed to \code{\link{relrisk.ppp}} to control the smoothing parameter or bandwidth selection. } \item{nsim}{ Number of simulations for the Monte Carlo test. } \item{permute}{ Argument passed to \code{\link{rlabel}}. If \code{TRUE} (the default), randomisation is performed by randomly permuting the labels of \code{X}. If \code{FALSE}, randomisation is performing by resampling the labels with replacement. } \item{verbose}{ Logical value indicating whether to print progress reports. } \item{Xname}{ Optional character string giving the name of the dataset \code{X}. } } \details{ The Monte Carlo test of spatial segregation of types, proposed by Kelsall and Diggle (1995) and Diggle et al (2005), is applied to the point pattern \code{X}. The test statistic is \deqn{ T = \sum_i \sum_m \left( \widehat p(m \mid x_i) - \overline p_m \right)^2 }{ T = sum[i] sum[m] (phat(m | x[i]) - pbar[m])^2 } where \eqn{\widehat p(m \mid x_i)}{phat(m | x[i])} is the leave-one-out kernel smoothing estimate of the probability that the \eqn{i}-th data point has type \eqn{m}, and \eqn{\overline p_m}{pbar[m]} is the average fraction of data points which are of type \eqn{m}. The statistic \eqn{T} is evaluated for the data and for \code{nsim} randomised versions of \code{X}, generated by randomly permuting or resampling the marks. Note that, by default, automatic bandwidth selection will be performed separately for each randomised pattern. This computation can be very time-consuming but is necessary for the test to be valid in most conditions. A short-cut is to specify the value of the smoothing bandwidth \code{sigma} as shown in the examples. } \value{ An object of class \code{"htest"} representing the result of the test. } \references{ Kelsall, J.E. and Diggle, P.J. (1995) Kernel estimation of relative risk. \emph{Bernoulli} \bold{1}, 3--16. Diggle, P.J., Zheng, P. and Durr, P. (2005) Non-parametric estimation of spatial segregation in a multivariate point process: bovine tuberculosis in Cornwall, UK. \emph{Applied Statistics} \bold{54}, 645--658. } \seealso{ \code{\link{relrisk}} } \examples{ segregation.test(hyytiala, 5) if(interactive()) segregation.test(hyytiala, hmin=0.05) } \author{\adrian \rolf and \ege } \keyword{spatial} \keyword{htest} spatstat/man/linearK.Rd0000644000176200001440000000406413160710621014545 0ustar liggesusers\name{linearK} \alias{linearK} \title{ Linear K Function } \description{ Computes an estimate of the linear \eqn{K} function for a point pattern on a linear network. } \usage{ linearK(X, r=NULL, ..., correction="Ang", ratio=FALSE) } \arguments{ \item{X}{ Point pattern on linear network (object of class \code{"lpp"}). } \item{r}{ Optional. Numeric vector of values of the function argument \eqn{r}. There is a sensible default. } \item{\dots}{ Ignored. } \item{correction}{ Geometry correction. Either \code{"none"} or \code{"Ang"}. See Details. } \item{ratio}{ Logical. If \code{TRUE}, the numerator and denominator of the estimate will also be saved, for use in analysing replicated point patterns. } } \details{ This command computes the linear \eqn{K} function from point pattern data on a linear network. If \code{correction="none"}, the calculations do not include any correction for the geometry of the linear network. The result is the network \eqn{K} function as defined by Okabe and Yamada (2001). If \code{correction="Ang"}, the pair counts are weighted using Ang's correction (Ang, 2010; Ang et al, 2012). } \value{ Function value table (object of class \code{"fv"}). } \author{ Ang Qi Wei \email{aqw07398@hotmail.com} and \adrian. } \references{ Ang, Q.W. (2010) Statistical methodology for spatial point patterns on a linear network. MSc thesis, University of Western Australia. Ang, Q.W., Baddeley, A. and Nair, G. (2012) Geometrically corrected second-order analysis of events on a linear network, with applications to ecology and criminology. \emph{Scandinavian Journal of Statistics} \bold{39}, 591--617. Okabe, A. and Yamada, I. (2001) The K-function method on a network and its computational implementation. \emph{Geographical Analysis} \bold{33}, 271-290. } \seealso{ \code{\link{compileK}}, \code{\link{lpp}} } \examples{ data(simplenet) X <- rpoislpp(5, simplenet) linearK(X) linearK(X, correction="none") } \keyword{spatial} \keyword{nonparametric} spatstat/man/plot.colourmap.Rd0000644000176200001440000000615313160710621016137 0ustar liggesusers\name{plot.colourmap} \alias{plot.colourmap} \title{Plot a Colour Map} \description{ Displays a colour map as a colour ribbon } \usage{ \method{plot}{colourmap}(x, ..., main, xlim = NULL, ylim = NULL, vertical = FALSE, axis = TRUE, labelmap=NULL, gap=0.25, add=FALSE) } \arguments{ \item{x}{Colour map to be plotted. An object of class \code{"colourmap"}.} \item{\dots}{ Graphical arguments passed to \code{\link{image.default}} or \code{\link{axis}}. } \item{main}{Main title for plot. A character string.} \item{xlim}{ Optional range of \code{x} values for the location of the colour ribbon. } \item{ylim}{ Optional range of \code{y} values for the location of the colour ribbon. } \item{vertical}{Logical flag determining whether the colour ribbon is plotted as a horizontal strip (\code{FALSE}) or a vertical strip (\code{TRUE}).} \item{axis}{Logical flag determining whether an axis should be plotted showing the numerical values that are mapped to the colours. } \item{labelmap}{ Function. If this is present, then the labels on the plot, which indicate the input values corresponding to particular colours, will be transformed by \code{labelmap} before being displayed on the plot. Typically used to simplify or shorten the labels on the plot. } \item{gap}{ Distance between separate blocks of colour, as a fraction of the width of one block, if the colourmap is discrete. } \item{add}{ Logical value indicating whether to add the colourmap to the existing plot (\code{add=TRUE}), or to start a new plot (\code{add=FALSE}, the default). } } \details{ This is the plot method for the class \code{"colourmap"}. An object of this class (created by the function \code{\link{colourmap}}) represents a colour map or colour lookup table associating colours with each data value. The command \code{plot.colourmap} displays the colour map as a colour ribbon or as a colour legend (a sequence of blocks of colour). This plot can be useful on its own to inspect the colour map. If the domain of the colourmap is an interval of real numbers, the colourmap is displayed as a continuous ribbon of colour. If the domain of the colourmap is a finite set of inputs, the colours are displayed as separate blocks of colour. The separation between blocks is equal to \code{gap} times the width of one block. To annotate an existing plot with an explanatory colour ribbon or colour legend, specify \code{add=TRUE} and use the arguments \code{xlim} and/or \code{ylim} to control the physical position of the ribbon on the plot. Labels explaining the colour map are drawn by \code{\link[graphics]{axis}} and can be modified by specifying arguments that will be passed to this function. } \value{ None. } \seealso{\code{\link{colourmap}}} \examples{ co <- colourmap(rainbow(100), breaks=seq(-1,1,length=101)) plot(co) plot(co, col.ticks="pink") ca <- colourmap(rainbow(8), inputs=letters[1:8]) plot(ca, vertical=TRUE) } \author{\adrian and \rolf } \keyword{spatial} \keyword{color} \keyword{hplot} spatstat/man/predict.kppm.Rd0000644000176200001440000000277113160710621015563 0ustar liggesusers\name{predict.kppm} \alias{predict.kppm} \alias{fitted.kppm} \title{Prediction from a Fitted Cluster Point Process Model} \description{ Given a fitted cluster point process model, these functions compute the fitted intensity. } \usage{ \method{fitted}{kppm}(object, ...) \method{predict}{kppm}(object, ...) } \arguments{ \item{object}{ Fitted cluster point process model. An object of class \code{"kppm"}. } \item{\dots}{ Arguments passed to \code{\link{fitted.ppm}} or \code{\link{predict.ppm}} respectively. } } \details{ These functions are methods for the generic functions \code{\link[stats]{fitted}} and \code{\link[stats]{predict}}. The argument \code{object} should be a cluster point process model (object of class \code{"kppm"}) obtained using the function \code{\link{kppm}}. The \emph{intensity} of the fitted model is computed, using \code{\link{fitted.ppm}} or \code{\link{predict.ppm}} respectively. } \value{ The value of \code{fitted.kppm} is a numeric vector giving the fitted values at the quadrature points. The value of \code{predict.kppm} is usually a pixel image (object of class \code{"im"}), but see \code{\link{predict.ppm}} for details. } \seealso{ \code{\link{kppm}}, \code{\link{plot.kppm}}, \code{\link{vcov.kppm}}, \code{\link{fitted.ppm}}, \code{\link{predict.ppm}} } \examples{ data(redwood) fit <- kppm(redwood ~ x, "Thomas") predict(fit) } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} spatstat/man/nnmark.Rd0000644000176200001440000000670113160710621014446 0ustar liggesusers\name{nnmark} \alias{nnmark} \title{ Mark of Nearest Neighbour } \description{ Given a marked point pattern dataset \code{X} this function computes, for each desired location \code{y}, the mark attached to the nearest neighbour of \code{y} in \code{X}. The desired locations \code{y} can be either a pixel grid or the point pattern \code{X} itself. } \usage{ nnmark(X, \dots, k = 1, at=c("pixels", "points")) } \arguments{ \item{X}{ A marked point pattern (object of class \code{"ppp"}). } \item{\dots}{ Arguments passed to \code{\link{as.mask}} to determine the pixel resolution. } \item{k}{ Single integer. The \code{k}th nearest data point will be used. } \item{at}{ String specifying whether to compute the values at a grid of pixel locations (\code{at="pixels"}) or only at the points of \code{X} (\code{at="points"}). } } \details{ Given a marked point pattern dataset \code{X} this function computes, for each desired location \code{y}, the mark attached to the point of \code{X} that is nearest to \code{y}. The desired locations \code{y} can be either a pixel grid or the point pattern \code{X} itself. The argument \code{X} must be a marked point pattern (object of class \code{"ppp"}, see \code{\link{ppp.object}}). The marks are allowed to be a vector or a data frame. \itemize{ \item If \code{at="points"}, then for each point in \code{X}, the algorithm finds the nearest \emph{other} point in \code{X}, and extracts the mark attached to it. The result is a vector or data frame containing the marks of the neighbours of each point. \item If \code{at="pixels"} (the default), then for each pixel in a rectangular grid, the algorithm finds the nearest point in \code{X}, and extracts the mark attached to it. The result is an image or a list of images containing the marks of the neighbours of each pixel. The pixel resolution is controlled by the arguments \code{\dots} passed to \code{\link{as.mask}}. } If the argument \code{k} is given, then the \code{k}-th nearest neighbour will be used. } \value{ \emph{If \code{X} has a single column of marks:} \itemize{ \item If \code{at="pixels"} (the default), the result is a pixel image (object of class \code{"im"}). The value at each pixel is the mark attached to the nearest point of \code{X}. \item If \code{at="points"}, the result is a vector or factor of length equal to the number of points in \code{X}. Entries are the mark values of the nearest neighbours of each point of \code{X}. } \emph{If \code{X} has a data frame of marks:} \itemize{ \item If \code{at="pixels"} (the default), the result is a named list of pixel images (object of class \code{"im"}). There is one image for each column of marks. This list also belongs to the class \code{"solist"}, for which there is a plot method. \item If \code{at="points"}, the result is a data frame with one row for each point of \code{X}, Entries are the mark values of the nearest neighbours of each point of \code{X}. } } \author{\adrian \rolf and \ege } \seealso{ \code{\link{Smooth.ppp}}, \code{\link{marktable}}, \code{\link{nnwhich}} } \examples{ plot(nnmark(ants)) v <- nnmark(ants, at="points") v[1:10] plot(nnmark(finpines)) vf <- nnmark(finpines, at="points") vf[1:5,] } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat/man/Kres.Rd0000644000176200001440000000571713160710571014076 0ustar liggesusers\name{Kres} \Rdversion{1.1} \alias{Kres} \title{ Residual K Function } \description{ Given a point process model fitted to a point pattern dataset, this function computes the residual \eqn{K} function, which serves as a diagnostic for goodness-of-fit of the model. } \usage{ Kres(object, ...) } \arguments{ \item{object}{ Object to be analysed. Either a fitted point process model (object of class \code{"ppm"}), a point pattern (object of class \code{"ppp"}), a quadrature scheme (object of class \code{"quad"}), or the value returned by a previous call to \code{\link{Kcom}}. } \item{\dots}{ Arguments passed to \code{\link{Kcom}}. } } \details{ This command provides a diagnostic for the goodness-of-fit of a point process model fitted to a point pattern dataset. It computes a residual version of the \eqn{K} function of the dataset, which should be approximately zero if the model is a good fit to the data. In normal use, \code{object} is a fitted point process model or a point pattern. Then \code{Kres} first calls \code{\link{Kcom}} to compute both the nonparametric estimate of the \eqn{K} function and its model compensator. Then \code{Kres} computes the difference between them, which is the residual \eqn{K}-function. Alternatively, \code{object} may be a function value table (object of class \code{"fv"}) that was returned by a previous call to \code{\link{Kcom}}. Then \code{Kres} computes the residual from this object. } \value{ A function value table (object of class \code{"fv"}), essentially a data frame of function values. There is a plot method for this class. See \code{\link{fv.object}}. } \references{ Baddeley, A., Rubak, E. and \ifelse{latex}{\out{M\o ller}}{Moller}, J. (2011) Score, pseudo-score and residual diagnostics for spatial point process models. \emph{Statistical Science} \bold{26}, 613--646. } \author{ \adrian \ege and Jesper \ifelse{latex}{\out{M\o ller}}{Moller}. } \seealso{ Related functions: \code{\link{Kcom}}, \code{\link{Kest}}. Alternative functions: \code{\link{Gres}}, \code{\link{psstG}}, \code{\link{psstA}}, \code{\link{psst}}. Point process models: \code{\link{ppm}}. } \examples{ data(cells) fit0 <- ppm(cells, ~1) # uniform Poisson \testonly{ fit0 <- ppm(cells, ~1, nd=16)} K0 <- Kres(fit0) K0 plot(K0) # isotropic-correction estimate plot(K0, ires ~ r) # uniform Poisson is clearly not correct fit1 <- ppm(cells, ~1, Strauss(0.08)) \testonly{fit1 <- ppm(cells, ~1, Strauss(0.08), nd=16)} K1 <- Kres(fit1) if(interactive()) { plot(K1, ires ~ r) # fit looks approximately OK; try adjusting interaction distance plot(Kres(cells, interaction=Strauss(0.12))) } # How to make envelopes \dontrun{ E <- envelope(fit1, Kres, model=fit1, nsim=19) plot(E) } # For computational efficiency Kc <- Kcom(fit1) K1 <- Kres(Kc) } \keyword{spatial} \keyword{models} spatstat/man/inforder.family.Rd0000644000176200001440000000225313160710621016246 0ustar liggesusers\name{inforder.family} \alias{inforder.family} \title{Infinite Order Interaction Family} \description{ An object describing the family of all Gibbs point processes with infinite interaction order. } \details{ \bold{Advanced Use Only!} This structure would not normally be touched by the user. It describes the interaction structure of Gibbs point processes which have infinite order of interaction, such as the area-interaction process \cite{\link{AreaInter}}. Anyway, \code{inforder.family} is an object of class \code{"isf"} containing a function \code{inforder.family$eval} for evaluating the sufficient statistics of a Gibbs point process model taking an exponential family form. } \seealso{ \code{\link{AreaInter}} to create the area interaction process structure. Other families: \code{\link{pairwise.family}}, \code{\link{pairsat.family}}, \code{\link{ord.family}}. } \references{ Baddeley, A. and Turner, R. (2000) Practical maximum pseudolikelihood for spatial point patterns. \emph{Australian and New Zealand Journal of Statistics} \bold{42}, 283--322. } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} spatstat/man/interp.im.Rd0000644000176200001440000000305613160710621015065 0ustar liggesusers\name{interp.im} \alias{interp.im} \title{Interpolate a Pixel Image} \description{ Interpolates the values of a pixel image at any desired location in the frame. } \usage{ interp.im(Z, x, y=NULL) } \arguments{ \item{Z}{ Pixel image (object of class \code{"im"}) with numeric or integer values. } \item{x,y}{ Vectors of Cartesian coordinates. Alternatively \code{x} can be a point pattern and \code{y} can be missing. } } \details{ A value at each location \code{(x[i],y[i])} will be interpolated using the pixel values of \code{Z} at the four surrounding pixel centres, by simple bilinear interpolation. At the boundary (where \code{(x[i],y[i])} is not surrounded by four pixel centres) the value at the nearest pixel is taken. The arguments \code{x,y} can be anything acceptable to \code{\link[grDevices]{xy.coords}}. } \value{ Vector of interpolated values, with \code{NA} for points that lie outside the domain of the image. } \examples{ opa <- par(mfrow=c(1,2)) # coarse image V <- as.im(function(x,y) { x^2 + y }, owin(), dimyx=10) plot(V, main="coarse image", col=terrain.colors(256)) # lookup value at location (0.5,0.5) V[list(x=0.5,y=0.5)] # interpolated value at location (0.5,0.5) interp.im(V, 0.5, 0.5) # true value is 0.75 # how to obtain an interpolated image at a desired resolution U <- as.im(interp.im, W=owin(), Z=V, dimyx=256) plot(U, main="interpolated image", col=terrain.colors(256)) par(opa) } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/kernel.moment.Rd0000644000176200001440000000321413160710621015732 0ustar liggesusers\name{kernel.moment} \alias{kernel.moment} \title{Moment of Smoothing Kernel} \description{ Computes the complete or incomplete \eqn{m}th moment of a smoothing kernel. } \usage{ kernel.moment(m, r, kernel = "gaussian") } \arguments{ \item{m}{ Exponent (order of moment). An integer. } \item{r}{ Upper limit of integration for the incomplete moment. A numeric value or numeric vector. Set \code{r=Inf} to obtain the complete moment. } \item{kernel}{ String name of the kernel. Options are \code{"gaussian"}, \code{"rectangular"}, \code{"triangular"}, \code{"epanechnikov"}, \code{"biweight"}, \code{"cosine"} and \code{"optcosine"}. (Partial matching is used). } } \details{ Kernel estimation of a probability density in one dimension is performed by \code{\link[stats]{density.default}} using a kernel function selected from the list above. For more information about these kernels, see \code{\link[stats]{density.default}}. The function \code{kernel.moment} computes the partial integral \deqn{ \int_{-\infty}^r t^m k(t) dt }{ integral[-Inf][r] t^m k(t) dt } where \eqn{k(t)} is the selected kernel, \eqn{r} is the upper limit of integration, and \eqn{m} is the exponent or order. } \value{ A single number, or a numeric vector of the same length as \code{r}. } \seealso{ \code{\link[stats]{density.default}}, \code{\link{dkernel}}, \code{\link{kernel.factor}}, } \examples{ kernel.moment(1, 0.1, "epa") curve(kernel.moment(2, x, "epa"), from=-1, to=1) } \author{ \adrian and Martin Hazelton. } \keyword{methods} \keyword{nonparametric} \keyword{smooth} spatstat/man/matclust.estpcf.Rd0000644000176200001440000001504213160710621016275 0ustar liggesusers\name{matclust.estpcf} \alias{matclust.estpcf} \title{Fit the Matern Cluster Point Process by Minimum Contrast Using Pair Correlation} \description{ Fits the Matern Cluster point process to a point pattern dataset by the Method of Minimum Contrast using the pair correlation function. } \usage{ matclust.estpcf(X, startpar=c(kappa=1,scale=1), lambda=NULL, q = 1/4, p = 2, rmin = NULL, rmax = NULL, ..., pcfargs=list()) } \arguments{ \item{X}{ Data to which the Matern Cluster model will be fitted. Either a point pattern or a summary statistic. See Details. } \item{startpar}{ Vector of starting values for the parameters of the Matern Cluster process. } \item{lambda}{ Optional. An estimate of the intensity of the point process. } \item{q,p}{ Optional. Exponents for the contrast criterion. } \item{rmin, rmax}{ Optional. The interval of \eqn{r} values for the contrast criterion. } \item{\dots}{ Optional arguments passed to \code{\link[stats]{optim}} to control the optimisation algorithm. See Details. } \item{pcfargs}{ Optional list containing arguments passed to \code{\link{pcf.ppp}} to control the smoothing in the estimation of the pair correlation function. } } \details{ This algorithm fits the Matern Cluster point process model to a point pattern dataset by the Method of Minimum Contrast, using the pair correlation function. The argument \code{X} can be either \describe{ \item{a point pattern:}{An object of class \code{"ppp"} representing a point pattern dataset. The pair correlation function of the point pattern will be computed using \code{\link{pcf}}, and the method of minimum contrast will be applied to this. } \item{a summary statistic:}{An object of class \code{"fv"} containing the values of a summary statistic, computed for a point pattern dataset. The summary statistic should be the pair correlation function, and this object should have been obtained by a call to \code{\link{pcf}} or one of its relatives. } } The algorithm fits the Matern Cluster point process to \code{X}, by finding the parameters of the Matern Cluster model which give the closest match between the theoretical pair correlation function of the Matern Cluster process and the observed pair correlation function. For a more detailed explanation of the Method of Minimum Contrast, see \code{\link{mincontrast}}. The Matern Cluster point process is described in \ifelse{latex}{\out{M\o ller}}{Moller} and Waagepetersen (2003, p. 62). It is a cluster process formed by taking a pattern of parent points, generated according to a Poisson process with intensity \eqn{\kappa}{kappa}, and around each parent point, generating a random number of offspring points, such that the number of offspring of each parent is a Poisson random variable with mean \eqn{\mu}{mu}, and the locations of the offspring points of one parent are independent and uniformly distributed inside a circle of radius \eqn{R} centred on the parent point, where \eqn{R}{R} is equal to the parameter \code{scale}. The named vector of stating values can use either \code{R} or \code{scale} as the name of the second component, but the latter is recommended for consistency with other cluster models. The theoretical pair correlation function of the Matern Cluster process is \deqn{ g(r) = 1 + \frac 1 {4\pi R \kappa r} h(\frac{r}{2R}) }{ g(r) = 1 + h(r/(2*R))/(4 * pi * R * kappa * r) } where the radius R is the parameter \code{scale} and \deqn{ h(z) = \frac {16} \pi [ z \mbox{arccos}(z) - z^2 \sqrt{1 - z^2} ] }{ h(z) = (16/pi) * ((z * arccos(z) - z^2 * sqrt(1 - z^2)) } for \eqn{z <= 1}, and \eqn{h(z) = 0} for \eqn{z > 1}. The theoretical intensity of the Matern Cluster process is \eqn{\lambda = \kappa \mu}{lambda=kappa* mu}. In this algorithm, the Method of Minimum Contrast is first used to find optimal values of the parameters \eqn{\kappa}{kappa} and \eqn{R}{R}. Then the remaining parameter \eqn{\mu}{mu} is inferred from the estimated intensity \eqn{\lambda}{lambda}. If the argument \code{lambda} is provided, then this is used as the value of \eqn{\lambda}{lambda}. Otherwise, if \code{X} is a point pattern, then \eqn{\lambda}{lambda} will be estimated from \code{X}. If \code{X} is a summary statistic and \code{lambda} is missing, then the intensity \eqn{\lambda}{lambda} cannot be estimated, and the parameter \eqn{\mu}{mu} will be returned as \code{NA}. The remaining arguments \code{rmin,rmax,q,p} control the method of minimum contrast; see \code{\link{mincontrast}}. The Matern Cluster process can be simulated, using \code{\link{rMatClust}}. Homogeneous or inhomogeneous Matern Cluster models can also be fitted using the function \code{\link{kppm}}. The optimisation algorithm can be controlled through the additional arguments \code{"..."} which are passed to the optimisation function \code{\link[stats]{optim}}. For example, to constrain the parameter values to a certain range, use the argument \code{method="L-BFGS-B"} to select an optimisation algorithm that respects box constraints, and use the arguments \code{lower} and \code{upper} to specify (vectors of) minimum and maximum values for each parameter. } \value{ An object of class \code{"minconfit"}. There are methods for printing and plotting this object. It contains the following main components: \item{par }{Vector of fitted parameter values.} \item{fit }{Function value table (object of class \code{"fv"}) containing the observed values of the summary statistic (\code{observed}) and the theoretical values of the summary statistic computed from the fitted model parameters. } } \references{ \ifelse{latex}{\out{M\o ller}}{Moller}, J. and Waagepetersen, R. (2003). Statistical Inference and Simulation for Spatial Point Processes. Chapman and Hall/CRC, Boca Raton. Waagepetersen, R. (2007) An estimating function approach to inference for inhomogeneous Neyman-Scott processes. \emph{Biometrics} \bold{63}, 252--258. } \author{ \adrian } \seealso{ \code{\link{kppm}}, \code{\link{matclust.estK}}, \code{\link{thomas.estpcf}}, \code{\link{thomas.estK}}, \code{\link{lgcp.estK}}, \code{\link{mincontrast}}, \code{\link{pcf}}, \code{\link{rMatClust}} to simulate the fitted model. } \examples{ data(redwood) u <- matclust.estpcf(redwood, c(kappa=10, R=0.1)) u plot(u, legendpos="topright") } \keyword{spatial} \keyword{models} spatstat/man/varcount.Rd0000644000176200001440000000656613160710621015032 0ustar liggesusers\name{varcount} \alias{varcount} \title{ Predicted Variance of the Number of Points } \description{ Given a fitted point process model, calculate the predicted variance of the number of points in a nominated set \code{B}. } \usage{ varcount(model, B, \dots, dimyx = NULL) } \arguments{ \item{model}{ A fitted point process model (object of class \code{"ppm"}, \code{"kppm"} or \code{"dppm"}). } \item{B}{ A window (object of class \code{"owin"} specifying the region in which the points are counted. Alternatively a pixel image (object of class \code{"im"}) or a function of spatial coordinates specifying a numerical weight for each random point. } \item{\dots}{ Additional arguments passed to \code{B} when it is a function. } \item{dimyx}{ Spatial resolution for the calculations. Argument passed to \code{\link{as.mask}}. } } \details{ This command calculates the variance of the number of points falling in a specified window \code{B} according to the \code{model}. It can also calculate the variance of a sum of weights attached to each random point. The \code{model} should be a fitted point process model (object of class \code{"ppm"}, \code{"kppm"} or \code{"dppm"}). \itemize{ \item{ If \code{B} is a window, this command calculates the variance of the number of points falling in \code{B}, according to the fitted \code{model}. If the \code{model} depends on spatial covariates other than the Cartesian coordinates, then \code{B} should be a subset of the domain in which these covariates are defined. } \item{ If \code{B} is a pixel image, this command calculates the variance of \eqn{T = \sum_i B(x_i)}{T = sum[i] B(x[i])}, the sum of the values of \code{B} over all random points falling in the domain of the image. If the \code{model} depends on spatial covariates other than the Cartesian coordinates, then the domain of the pixel image, \code{as.owin(B)}, should be a subset of the domain in which these covariates are defined. } \item{ If \code{B} is a \code{function(x,y)} or \code{function(x,y,...)} this command calculates the variance of \eqn{T = \sum_i B(x_i)}{T = sum[i] B(x[i])}, the sum of the values of \code{B} over all random points falling inside the window \code{W=as.owin(model)}, the window in which the original data were observed. } } The variance calculation involves the intensity and the pair correlation function of the model. The calculation is exact (up to discretisation error) for models of class \code{"kppm"} and \code{"dppm"}, and for Poisson point process models of class \code{"ppm"}. For Gibbs point process models of class \code{"ppm"} the calculation depends on the Poisson-saddlepoint approximations to the intensity and pair correlation function, which are rough approximations. The approximation is not yet implemented for some Gibbs models. } \value{ A single number. } \author{ \spatstatAuthors } \seealso{ \code{\link{predict.ppm}}, \code{\link{predict.kppm}}, \code{\link{predict.dppm}} } \examples{ fitT <- kppm(redwood ~ 1, "Thomas") B <- owin(c(0, 0.5), c(-0.5, 0)) varcount(fitT, B) fitS <- ppm(swedishpines ~ 1, Strauss(9)) BS <- square(50) varcount(fitS, BS) } \keyword{spatial} \keyword{models} spatstat/man/pcf.fv.Rd0000644000176200001440000001134213160710621014337 0ustar liggesusers\name{pcf.fv} \alias{pcf.fv} \title{Pair Correlation Function obtained from K Function} \description{ Estimates the pair correlation function of a point pattern, given an estimate of the K function. } \usage{ \method{pcf}{fv}(X, \dots, method="c") } \arguments{ \item{X}{ An estimate of the \eqn{K} function or one of its variants. An object of class \code{"fv"}. } \item{\dots}{ Arguments controlling the smoothing spline function \code{smooth.spline}. } \item{method}{ Letter \code{"a"}, \code{"b"}, \code{"c"} or \code{"d"} indicating the method for deriving the pair correlation function from the \code{K} function. } } \value{ A function value table (object of class \code{"fv"}, see \code{\link{fv.object}}) representing a pair correlation function. Essentially a data frame containing (at least) the variables \item{r}{the vector of values of the argument \eqn{r} at which the pair correlation function \eqn{g(r)} has been estimated } \item{pcf}{vector of values of \eqn{g(r)} } } \details{ The pair correlation function of a stationary point process is \deqn{ g(r) = \frac{K'(r)}{2\pi r} }{ g(r) = K'(r)/ ( 2 * pi * r) } where \eqn{K'(r)} is the derivative of \eqn{K(r)}, the reduced second moment function (aka ``Ripley's \eqn{K} function'') of the point process. See \code{\link{Kest}} for information about \eqn{K(r)}. For a stationary Poisson process, the pair correlation function is identically equal to 1. Values \eqn{g(r) < 1} suggest inhibition between points; values greater than 1 suggest clustering. We also apply the same definition to other variants of the classical \eqn{K} function, such as the multitype \eqn{K} functions (see \code{\link{Kcross}}, \code{\link{Kdot}}) and the inhomogeneous \eqn{K} function (see \code{\link{Kinhom}}). For all these variants, the benchmark value of \eqn{K(r) = \pi r^2}{K(r) = pi * r^2} corresponds to \eqn{g(r) = 1}. This routine computes an estimate of \eqn{g(r)} from an estimate of \eqn{K(r)} or its variants, using smoothing splines to approximate the derivative. It is a method for the generic function \code{\link{pcf}} for the class \code{"fv"}. The argument \code{X} should be an estimated \eqn{K} function, given as a function value table (object of class \code{"fv"}, see \code{\link{fv.object}}). This object should be the value returned by \code{\link{Kest}}, \code{\link{Kcross}}, \code{\link{Kmulti}} or \code{\link{Kinhom}}. The smoothing spline operations are performed by \code{\link{smooth.spline}} and \code{\link{predict.smooth.spline}} from the \code{modreg} library. Four numerical methods are available: \itemize{ \item \bold{"a"} apply smoothing to \eqn{K(r)}, estimate its derivative, and plug in to the formula above; \item \bold{"b"} apply smoothing to \eqn{Y(r) = \frac{K(r)}{2 \pi r}}{Y(r) = K(r)/(2 * pi * r)} constraining \eqn{Y(0) = 0}, estimate the derivative of \eqn{Y}, and solve; \item \bold{"c"} apply smoothing to \eqn{Z(r) = \frac{K(r)}{\pi r^2}}{Y(r) = K(r)/(pi * r^2)} constraining \eqn{Z(0)=1}, estimate its derivative, and solve. \item \bold{"d"} apply smoothing to \eqn{V(r) = \sqrt{K(r)}}{V(r) = sqrt(K(r))}, estimate its derivative, and solve. } Method \code{"c"} seems to be the best at suppressing variability for small values of \eqn{r}. However it effectively constrains \eqn{g(0) = 1}. If the point pattern seems to have inhibition at small distances, you may wish to experiment with method \code{"b"} which effectively constrains \eqn{g(0)=0}. Method \code{"a"} seems comparatively unreliable. Useful arguments to control the splines include the smoothing tradeoff parameter \code{spar} and the degrees of freedom \code{df}. See \code{\link{smooth.spline}} for details. } \references{ Stoyan, D, Kendall, W.S. and Mecke, J. (1995) \emph{Stochastic geometry and its applications}. 2nd edition. Springer Verlag. Stoyan, D. and Stoyan, H. (1994) Fractals, random shapes and point fields: methods of geometrical statistics. John Wiley and Sons. } \seealso{ \code{\link{pcf}}, \code{\link{pcf.ppp}}, \code{\link{Kest}}, \code{\link{Kinhom}}, \code{\link{Kcross}}, \code{\link{Kdot}}, \code{\link{Kmulti}}, \code{\link{alltypes}}, \code{\link{smooth.spline}}, \code{\link{predict.smooth.spline}} } \examples{ # univariate point pattern X <- simdat \testonly{ X <- X[seq(1,npoints(X), by=4)] } K <- Kest(X) p <- pcf.fv(K, spar=0.5, method="b") plot(p, main="pair correlation function for simdat") # indicates inhibition at distances r < 0.3 } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{nonparametric} spatstat/man/as.linfun.Rd0000644000176200001440000000373513160710571015065 0ustar liggesusers\name{as.linfun} \alias{as.linfun} \alias{as.linfun.linim} \alias{as.linfun.lintess} \title{ Convert Data to a Function on a Linear Network } \description{ Convert some kind of data to an object of class \code{"linfun"} representing a function on a linear network. } \usage{ as.linfun(X, \dots) \method{as.linfun}{linim}(X, \dots) \method{as.linfun}{lintess}(X, \dots, values, navalue=NA) } \arguments{ \item{X}{ Some kind of data to be converted. } \item{\dots}{ Other arguments passed to methods. } \item{values}{ Optional. Vector of function values, one entry associated with each tile of the tessellation. } \item{navalue}{ Optional. Function value associated with locations that do not belong to a tile of the tessellation. } } \details{ An object of class \code{"linfun"} represents a function defined on a linear network. The function \code{as.linfun} is generic. The method \code{as.linfun.linim} converts objects of class \code{"linim"} (pixel images on a linear network) to functions on the network. The method \code{as.linfun.lintess} converts a tessellation on a linear network into a function with a different value on each tile of the tessellation. If the argument \code{values} is missing or null, then the function returns factor values identifying which tile contains each given point. If \code{values} is given, it should be a vector with one entry for each tile of the tessellation: any point lying in tile number \code{i} will return the value \code{v[i]}. } \value{ Object of class \code{"linfun"}. } \author{ \spatstatAuthors. } \seealso{ \code{\link{linfun}} } \examples{ X <- runiflpp(2, simplenet) Y <- runiflpp(5, simplenet) # image on network D <- density(Y, 0.1, verbose=FALSE) f <- as.linfun(D) f f(X) # tessellation on network Z <- lineardirichlet(Y) g <- as.linfun(Z) g(X) h <- as.linfun(Z, values = runif(5)) h(X) } \keyword{spatial} \keyword{manip} spatstat/man/erosionAny.Rd0000644000176200001440000000362313160710621015306 0ustar liggesusers\name{erosionAny} \alias{erosionAny} \alias{\%(-)\%} %DoNotExport %NAMESPACE export("%(-)%") \title{Morphological Erosion of Windows} \description{ Compute the morphological erosion of one spatial window by another. } \usage{ erosionAny(A, B) A \%(-)\% B } \arguments{ \item{A,B}{ Windows (objects of class \code{"owin"}). } } \value{ Another window (object of class \code{"owin"}). } \details{ The operator \code{A \%(-)\% B} and function \code{erosionAny(A,B)} are synonymous: they both compute the morphological erosion of the window \code{A} by the window \code{B}. The morphological erosion \eqn{A \ominus B}{A \%(-)\% B} of region \eqn{A} by region \eqn{B} is the spatial region consisting of all vectors \eqn{z} such that, when \eqn{B} is shifted by the vector \eqn{z}, the result is a subset of \eqn{A}. Equivalently \deqn{ A \ominus B = ((A^c \oplus (-B))^c }{ (A^c \%+\% (-B))^c } where \eqn{\oplus}{\%+\%} is the Minkowski sum, \eqn{A^c} denotes the set complement, and \eqn{(-B)} is the reflection of \eqn{B} through the origin, consisting of all vectors \eqn{-b} where \eqn{b} is a point in \eqn{B}. If \code{B} is a disc of radius \code{r}, then \code{erosionAny(A, B)} is equivalent to \code{erosion(A, r)}. See \code{\link{erosion}}. The algorithm currently computes the result as a polygonal window using the \pkg{polyclip} library. It will be quite slow if applied to binary mask windows. } \seealso{ \code{\link{erosion}}, \code{\link{MinkowskiSum}} } \examples{ B <- square(c(-0.1, 0.1)) RminusB <- letterR \%(-)\% B FR <- grow.rectangle(Frame(letterR), 0.3) plot(FR, main="", type="n") plot(letterR, add=TRUE, lwd=2, hatch=TRUE, box=FALSE) plot(RminusB, add=TRUE, col="blue", box=FALSE) plot(shift(B, vec=c(3.49, 2.98)), add=TRUE, border="red", lwd=2) } \author{ \spatstatAuthors } \keyword{spatial} \keyword{math} spatstat/man/beginner.Rd0000644000176200001440000000142413160710571014752 0ustar liggesusers\name{beginner} \alias{beginner} \title{ Print Introduction For Beginners } \description{ Prints an introduction for beginners to the \code{spatstat} package, or another specified package. } \usage{ beginner(package = "spatstat") } \arguments{ \item{package}{ Name of package. } } \details{ This function prints an introduction for beginners to the \pkg{spatstat} package. The function can be executed simply by typing \code{beginner} without parentheses. If the argument \code{package} is given, then the function prints the beginner's help file \code{BEGINNER.txt} from the specified package (if it has one). } \value{ Null. } \author{\adrian and \rolf } \seealso{ \code{\link{latest.news}} } \examples{ beginner } \keyword{documentation} spatstat/man/spatstat-package.Rd0000644000176200001440000023651213160710571016425 0ustar liggesusers\name{spatstat-package} \alias{spatstat-package} \alias{spatstat} \docType{package} \title{The Spatstat Package} \description{ This is a summary of the features of \pkg{spatstat}, a package in \R for the statistical analysis of spatial point patterns. } \details{ \pkg{spatstat} is a package for the statistical analysis of spatial data. Its main focus is the analysis of spatial patterns of points in two-dimensional space. The points may carry auxiliary data (`marks'), and the spatial region in which the points were recorded may have arbitrary shape. The package is designed to support a complete statistical analysis of spatial data. It supports \itemize{ \item creation, manipulation and plotting of point patterns; \item exploratory data analysis; \item spatial random sampling; \item simulation of point process models; \item parametric model-fitting; \item non-parametric smoothing and regression; \item formal inference (hypothesis tests, confidence intervals); \item model diagnostics. } Apart from two-dimensional point patterns and point processes, \pkg{spatstat} also supports point patterns in three dimensions, point patterns in multidimensional space-time, point patterns on a linear network, patterns of line segments in two dimensions, and spatial tessellations and random sets in two dimensions. The package can fit several types of point process models to a point pattern dataset: \itemize{ \item Poisson point process models (by Berman-Turner approximate maximum likelihood or by spatial logistic regression) \item Gibbs/Markov point process models (by Baddeley-Turner approximate maximum pseudolikelihood, Coeurjolly-Rubak logistic likelihood, or Huang-Ogata approximate maximum likelihood) \item Cox/cluster point process models (by Waagepetersen's two-step fitting procedure and minimum contrast, composite likelihood, or Palm likelihood) \item determinantal point process models (by Waagepetersen's two-step fitting procedure and minimum contrast, composite likelihood, or Palm likelihood) } The models may include spatial trend, dependence on covariates, and complicated interpoint interactions. Models are specified by a \code{formula} in the \R language, and are fitted using a function analogous to \code{\link{lm}} and \code{\link{glm}}. Fitted models can be printed, plotted, predicted, simulated and so on. } \section{Getting Started}{ For a quick introduction to \pkg{spatstat}, read the package vignette \emph{Getting started with spatstat} installed with \pkg{spatstat}. To read that document, you can either \itemize{ \item visit \url{cran.r-project.org/web/packages/spatstat} and click on \code{Getting Started with Spatstat} \item start \R, type \code{library(spatstat)} and \code{vignette('getstart')} \item start \R, type \code{help.start()} to open the help browser, and navigate to \code{Packages > spatstat > Vignettes}. } Once you have installed \pkg{spatstat}, start \R and type \code{library(spatstat)}. Then type \code{beginner} for a beginner's introduction, or \code{demo(spatstat)} for a demonstration of the package's capabilities. For a complete course on \pkg{spatstat}, and on statistical analysis of spatial point patterns, read the book by Baddeley, Rubak and Turner (2015). Other recommended books on spatial point process methods are Diggle (2014), Gelfand et al (2010) and Illian et al (2008). The \pkg{spatstat} package includes over 50 datasets, which can be useful when learning the package. Type \code{demo(data)} to see plots of all datasets available in the package. Type \code{vignette('datasets')} for detailed background information on these datasets, and plots of each dataset. For information on converting your data into \pkg{spatstat} format, read Chapter 3 of Baddeley, Rubak and Turner (2015). This chapter is available free online, as one of the sample chapters at the book companion website, \url{spatstat.github.io/book}. For information about handling data in \bold{shapefiles}, see Chapter 3, or the Vignette \emph{Handling shapefiles in the spatstat package}, installed with \pkg{spatstat}, accessible as \code{vignette('shapefiles')}. } \section{Updates}{ New versions of \pkg{spatstat} are released every 8 weeks. Users are advised to update their installation of \pkg{spatstat} regularly. Type \code{latest.news} to read the news documentation about changes to the current installed version of \pkg{spatstat}. See the Vignette \emph{Summary of recent updates}, installed with \pkg{spatstat}, which describes the main changes to \pkg{spatstat} since the book (Baddeley, Rubak and Turner, 2015) was published. It is accessible as \code{vignette('updates')}. Type \code{news(package="spatstat")} to read news documentation about all previous versions of the package. } \section{FUNCTIONS AND DATASETS}{ Following is a summary of the main functions and datasets in the \pkg{spatstat} package. Alternatively an alphabetical list of all functions and datasets is available by typing \code{library(help=spatstat)}. For further information on any of these, type \code{help(name)} or \code{?name} where \code{name} is the name of the function or dataset. } \section{CONTENTS:}{ \tabular{ll}{ I. \tab Creating and manipulating data \cr II. \tab Exploratory Data Analysis \cr III. \tab Model fitting (Cox and cluster models) \cr IV. \tab Model fitting (Poisson and Gibbs models) \cr V. \tab Model fitting (determinantal point processes)\cr VI. \tab Model fitting (spatial logistic regression)\cr VII. \tab Simulation \cr VIII. \tab Tests and diagnostics\cr IX. \tab Documentation } } \section{I. CREATING AND MANIPULATING DATA}{ \bold{Types of spatial data:} The main types of spatial data supported by \pkg{spatstat} are: \tabular{ll}{ \code{\link{ppp}} \tab point pattern \cr \code{\link{owin}} \tab window (spatial region) \cr \code{\link{im}} \tab pixel image \cr \code{\link{psp}} \tab line segment pattern \cr \code{\link{tess}} \tab tessellation \cr \code{\link{pp3}} \tab three-dimensional point pattern \cr \code{\link{ppx}} \tab point pattern in any number of dimensions \cr \code{\link{lpp}} \tab point pattern on a linear network } \bold{To create a point pattern:} \tabular{ll}{ \code{\link{ppp}} \tab create a point pattern from \eqn{(x,y)} and window information \cr \tab \code{ppp(x, y, xlim, ylim)} for rectangular window\cr \tab \code{ppp(x, y, poly)} for polygonal window \cr \tab \code{ppp(x, y, mask)} for binary image window \cr \code{\link{as.ppp}} \tab convert other types of data to a \code{ppp} object \cr \code{\link{clickppp}} \tab interactively add points to a plot \cr \code{\link{marks<-}}, \code{\%mark\%} \tab attach/reassign marks to a point pattern } \bold{To simulate a random point pattern:} \tabular{ll}{ \code{\link{runifpoint}} \tab generate \eqn{n} independent uniform random points \cr \code{\link{rpoint}} \tab generate \eqn{n} independent random points \cr \code{\link{rmpoint}} \tab generate \eqn{n} independent multitype random points \cr \code{\link{rpoispp}} \tab simulate the (in)homogeneous Poisson point process \cr \code{\link{rmpoispp}} \tab simulate the (in)homogeneous multitype Poisson point process \cr \code{\link{runifdisc}} \tab generate \eqn{n} independent uniform random points in disc\cr \code{\link{rstrat}} \tab stratified random sample of points \cr \code{\link{rsyst}} \tab systematic random sample of points \cr \code{\link{rjitter}} \tab apply random displacements to points in a pattern\cr \code{\link{rMaternI}} \tab simulate the \ifelse{latex}{\out{Mat\'ern}}{Matern} Model I inhibition process\cr \code{\link{rMaternII}} \tab simulate the \ifelse{latex}{\out{Mat\'ern}}{Matern} Model II inhibition process\cr \code{\link{rSSI}} \tab simulate Simple Sequential Inhibition process\cr \code{\link{rStrauss}} \tab simulate Strauss process (perfect simulation)\cr \code{\link{rHardcore}} \tab simulate Hard Core process (perfect simulation)\cr \code{\link{rStraussHard}} \tab simulate Strauss-hard core process (perfect simulation)\cr \code{\link{rDiggleGratton}} \tab simulate Diggle-Gratton process (perfect simulation)\cr \code{\link{rDGS}} \tab simulate Diggle-Gates-Stibbard process (perfect simulation)\cr \code{\link{rPenttinen}} \tab simulate Penttinen process (perfect simulation)\cr \code{\link{rNeymanScott}} \tab simulate a general Neyman-Scott process\cr \code{\link{rPoissonCluster}} \tab simulate a general Poisson cluster process\cr \code{\link{rMatClust}} \tab simulate the \ifelse{latex}{\out{Mat\'ern}}{Matern} Cluster process\cr \code{\link{rThomas}} \tab simulate the Thomas process \cr \code{\link{rGaussPoisson}} \tab simulate the Gauss-Poisson cluster process\cr \code{\link{rCauchy}} \tab simulate Neyman-Scott Cauchy cluster process \cr \code{\link{rVarGamma}} \tab simulate Neyman-Scott Variance Gamma cluster process \cr \code{\link{rthin}} \tab random thinning \cr \code{\link{rcell}} \tab simulate the Baddeley-Silverman cell process \cr \code{\link{rmh}} \tab simulate Gibbs point process using Metropolis-Hastings \cr \code{\link{simulate.ppm}} \tab simulate Gibbs point process using Metropolis-Hastings \cr \code{\link{runifpointOnLines}} \tab generate \eqn{n} random points along specified line segments \cr \code{\link{rpoisppOnLines}} \tab generate Poisson random points along specified line segments } \bold{To randomly change an existing point pattern:} \tabular{ll}{ \code{\link{rshift}} \tab random shifting of points \cr \code{\link{rjitter}} \tab apply random displacements to points in a pattern\cr \code{\link{rthin}} \tab random thinning \cr \code{\link{rlabel}} \tab random (re)labelling of a multitype point pattern \cr \code{\link{quadratresample}} \tab block resampling } \bold{Standard point pattern datasets:} Datasets in \pkg{spatstat} are lazy-loaded, so you can simply type the name of the dataset to use it; there is no need to type \code{\link{data}(amacrine)} etc. Type \code{demo(data)} to see a display of all the datasets installed with the package. Type \code{vignette('datasets')} for a document giving an overview of all datasets, including background information, and plots. \tabular{ll}{ \code{\link[spatstat.data]{amacrine}} \tab Austin Hughes' rabbit amacrine cells \cr \code{\link[spatstat.data]{anemones}} \tab Upton-Fingleton sea anemones data\cr \code{\link[spatstat.data]{ants}} \tab Harkness-Isham ant nests data\cr \code{\link[spatstat.data]{bdspots}} \tab Breakdown spots in microelectrodes \cr \code{\link[spatstat.data]{bei}} \tab Tropical rainforest trees \cr \code{\link[spatstat.data]{betacells}} \tab Waessle et al. cat retinal ganglia data \cr \code{\link[spatstat.data]{bramblecanes}} \tab Bramble Canes data \cr \code{\link[spatstat.data]{bronzefilter}} \tab Bronze Filter Section data \cr \code{\link[spatstat.data]{cells}} \tab Crick-Ripley biological cells data \cr \code{\link[spatstat.data]{chicago}} \tab Chicago crimes \cr \code{\link[spatstat.data]{chorley}} \tab Chorley-Ribble cancer data \cr \code{\link[spatstat.data]{clmfires}} \tab Castilla-La Mancha forest fires \cr \code{\link[spatstat.data]{copper}} \tab Berman-Huntington copper deposits data \cr \code{\link[spatstat.data]{dendrite}} \tab Dendritic spines \cr \code{\link[spatstat.data]{demohyper}} \tab Synthetic point patterns\cr \code{\link[spatstat.data]{demopat}} \tab Synthetic point pattern \cr \code{\link[spatstat.data]{finpines}} \tab Finnish Pines data \cr \code{\link[spatstat.data]{flu}} \tab Influenza virus proteins \cr \code{\link[spatstat.data]{gordon}} \tab People in Gordon Square, London \cr \code{\link[spatstat.data]{gorillas}} \tab Gorilla nest sites \cr \code{\link[spatstat.data]{hamster}} \tab Aherne's hamster tumour data \cr \code{\link[spatstat.data]{humberside}} \tab North Humberside childhood leukaemia data \cr \code{\link[spatstat.data]{hyytiala}} \tab {Mixed forest in \ifelse{latex}{\out{Hyyti{\"a}l{\"a}}}{Hyytiala}, Finland}\cr \code{\link[spatstat.data]{japanesepines}} \tab Japanese Pines data \cr \code{\link[spatstat.data]{lansing}} \tab Lansing Woods data \cr \code{\link[spatstat.data]{longleaf}} \tab Longleaf Pines data \cr \code{\link[spatstat.data]{mucosa}} \tab Cells in gastric mucosa \cr \code{\link[spatstat.data]{murchison}} \tab Murchison gold deposits \cr \code{\link[spatstat.data]{nbfires}} \tab New Brunswick fires data \cr \code{\link[spatstat.data]{nztrees}} \tab Mark-Esler-Ripley trees data \cr \code{\link[spatstat.data]{osteo}} \tab Osteocyte lacunae (3D, replicated) \cr \code{\link[spatstat.data]{paracou}} \tab Kimboto trees in Paracou, French Guiana \cr \code{\link[spatstat.data]{ponderosa}} \tab Getis-Franklin ponderosa pine trees data \cr \code{\link[spatstat.data]{pyramidal}} \tab Pyramidal neurons from 31 brains \cr \code{\link[spatstat.data]{redwood}} \tab Strauss-Ripley redwood saplings data \cr \code{\link[spatstat.data]{redwoodfull}} \tab Strauss redwood saplings data (full set) \cr \code{\link[spatstat.data]{residualspaper}} \tab Data from Baddeley et al (2005) \cr \code{\link[spatstat.data]{shapley}} \tab Galaxies in an astronomical survey \cr \code{\link[spatstat.data]{simdat}} \tab Simulated point pattern (inhomogeneous, with interaction) \cr \code{\link[spatstat.data]{spiders}} \tab Spider webs on mortar lines of brick wall \cr \code{\link[spatstat.data]{sporophores}} \tab Mycorrhizal fungi around a tree \cr \code{\link[spatstat.data]{spruces}} \tab Spruce trees in Saxonia \cr \code{\link[spatstat.data]{swedishpines}} \tab Strand-Ripley Swedish pines data \cr \code{\link[spatstat.data]{urkiola}} \tab Urkiola Woods data \cr \code{\link[spatstat.data]{waka}} \tab Trees in Waka national park \cr \code{\link[spatstat.data]{waterstriders}} \tab Insects on water surface } \bold{To manipulate a point pattern:} \tabular{ll}{ \code{\link{plot.ppp}} \tab plot a point pattern (e.g. \code{plot(X)}) \cr \code{\link{iplot}} \tab plot a point pattern interactively \cr \code{\link{edit.ppp}} \tab interactive text editor \cr \code{\link{[.ppp}} \tab extract or replace a subset of a point pattern \cr \tab \code{pp[subset]} or \code{pp[subwindow]} \cr \code{\link{subset.ppp}} \tab extract subset of point pattern satisfying a condition \cr \code{\link{superimpose}} \tab combine several point patterns \cr \code{\link{by.ppp}} \tab apply a function to sub-patterns of a point pattern \cr \code{\link{cut.ppp}} \tab classify the points in a point pattern \cr \code{\link{split.ppp}} \tab divide pattern into sub-patterns \cr \code{\link{unmark}} \tab remove marks \cr \code{\link{npoints}} \tab count the number of points \cr \code{\link{coords}} \tab extract coordinates, change coordinates \cr \code{\link{marks}} \tab extract marks, change marks or attach marks \cr \code{\link{rotate}} \tab rotate pattern \cr \code{\link{shift} } \tab translate pattern \cr \code{\link{flipxy} } \tab swap \eqn{x} and \eqn{y} coordinates \cr \code{\link{reflect} } \tab reflect in the origin \cr \code{\link{periodify} } \tab make several translated copies \cr \code{\link{affine}} \tab apply affine transformation\cr \code{\link{scalardilate}} \tab apply scalar dilation\cr \code{\link{density.ppp}} \tab kernel estimation of point pattern intensity\cr \code{\link{Smooth.ppp}} \tab kernel smoothing of marks of point pattern\cr \code{\link{nnmark}} \tab mark value of nearest data point\cr \code{\link{sharpen.ppp}} \tab data sharpening\cr \code{\link{identify.ppp}} \tab interactively identify points \cr \code{\link{unique.ppp}} \tab remove duplicate points \cr \code{\link{duplicated.ppp}} \tab determine which points are duplicates \cr \code{\link{connected.ppp}} \tab find clumps of points \cr \code{\link{dirichlet}} \tab compute Dirichlet-Voronoi tessellation \cr \code{\link{delaunay}} \tab compute Delaunay triangulation \cr \code{\link{delaunayDistance}} \tab graph distance in Delaunay triangulation \cr \code{\link{convexhull}} \tab compute convex hull \cr \code{\link{discretise}} \tab discretise coordinates \cr \code{\link{pixellate.ppp}} \tab approximate point pattern by pixel image \cr \code{\link{as.im.ppp}} \tab approximate point pattern by pixel image } See \code{\link{spatstat.options}} to control plotting behaviour. \bold{To create a window:} An object of class \code{"owin"} describes a spatial region (a window of observation). \tabular{ll}{ \code{\link{owin}} \tab Create a window object \cr \tab \code{owin(xlim, ylim)} for rectangular window \cr \tab \code{owin(poly)} for polygonal window \cr \tab \code{owin(mask)} for binary image window \cr \code{\link{Window}} \tab Extract window of another object \cr \code{\link{Frame}} \tab Extract the containing rectangle ('frame') of another object \cr \code{\link{as.owin}} \tab Convert other data to a window object \cr \code{\link{square}} \tab make a square window \cr \code{\link{disc}} \tab make a circular window \cr \code{\link{ellipse}} \tab make an elliptical window \cr \code{\link{ripras}} \tab Ripley-Rasson estimator of window, given only the points \cr \code{\link{convexhull}} \tab compute convex hull of something \cr \code{\link[spatstat.data]{letterR}} \tab polygonal window in the shape of the \R logo \cr \code{\link{clickpoly}} \tab interactively draw a polygonal window \cr \code{\link{clickbox}} \tab interactively draw a rectangle } \bold{To manipulate a window:} \tabular{ll}{ \code{\link{plot.owin}} \tab plot a window. \cr \tab \code{plot(W)}\cr \code{\link{boundingbox}} \tab Find a tight bounding box for the window \cr \code{\link{erosion}} \tab erode window by a distance r\cr \code{\link{dilation}} \tab dilate window by a distance r\cr \code{\link{closing}} \tab close window by a distance r\cr \code{\link{opening}} \tab open window by a distance r\cr \code{\link{border}} \tab difference between window and its erosion/dilation \cr \code{\link{complement.owin}} \tab invert (swap inside and outside)\cr \code{\link{simplify.owin}} \tab approximate a window by a simple polygon \cr \code{\link{rotate}} \tab rotate window \cr \code{\link{flipxy}} \tab swap \eqn{x} and \eqn{y} coordinates \cr \code{\link{shift} } \tab translate window \cr \code{\link{periodify} } \tab make several translated copies \cr \code{\link{affine}} \tab apply affine transformation \cr \code{\link{as.data.frame.owin}} \tab convert window to data frame } \bold{Digital approximations:} \tabular{ll}{ \code{\link{as.mask}} \tab Make a discrete pixel approximation of a given window \cr \code{\link{as.im.owin}} \tab convert window to pixel image \cr \code{\link{pixellate.owin}} \tab convert window to pixel image \cr \code{\link{commonGrid}} \tab find common pixel grid for windows \cr \code{\link{nearest.raster.point}} \tab map continuous coordinates to raster locations\cr \code{\link{raster.x}} \tab raster x coordinates \cr \code{\link{raster.y}} \tab raster y coordinates \cr \code{\link{raster.xy}} \tab raster x and y coordinates \cr \code{\link{as.polygonal}} \tab convert pixel mask to polygonal window } See \code{\link{spatstat.options}} to control the approximation \bold{Geometrical computations with windows:} \tabular{ll}{ \code{\link{edges}} \tab extract boundary edges \cr \code{\link{intersect.owin}} \tab intersection of two windows\cr \code{\link{union.owin}} \tab union of two windows\cr \code{\link{setminus.owin}} \tab set subtraction of two windows\cr \code{\link{inside.owin}} \tab determine whether a point is inside a window\cr \code{\link{area.owin}} \tab compute area \cr \code{\link{perimeter}} \tab compute perimeter length \cr \code{\link{diameter.owin}} \tab compute diameter\cr \code{\link{incircle}} \tab find largest circle inside a window \cr \code{\link{inradius}} \tab radius of incircle \cr \code{\link{connected.owin}} \tab find connected components of window \cr \code{\link{eroded.areas}} \tab compute areas of eroded windows\cr \code{\link{dilated.areas}} \tab compute areas of dilated windows\cr \code{\link{bdist.points}} \tab compute distances from data points to window boundary \cr \code{\link{bdist.pixels}} \tab compute distances from all pixels to window boundary \cr \code{\link{bdist.tiles}} \tab boundary distance for each tile in tessellation \cr \code{\link{distmap.owin}} \tab distance transform image \cr \code{\link{distfun.owin}} \tab distance transform \cr \code{\link{centroid.owin}} \tab compute centroid (centre of mass) of window\cr \code{\link{is.subset.owin}} \tab determine whether one window contains another \cr \code{\link{is.convex}} \tab determine whether a window is convex \cr \code{\link{convexhull}} \tab compute convex hull \cr \code{\link{triangulate.owin}} \tab decompose into triangles \cr \code{\link{as.mask}} \tab pixel approximation of window \cr \code{\link{as.polygonal}} \tab polygonal approximation of window \cr \code{\link{is.rectangle}} \tab test whether window is a rectangle \cr \code{\link{is.polygonal}} \tab test whether window is polygonal \cr \code{\link{is.mask}} \tab test whether window is a mask \cr \code{\link{setcov}} \tab spatial covariance function of window \cr \code{\link{pixelcentres}} \tab extract centres of pixels in mask \cr \code{\link{clickdist}} \tab measure distance between two points clicked by user } \bold{Pixel images:} An object of class \code{"im"} represents a pixel image. Such objects are returned by some of the functions in \pkg{spatstat} including \code{\link{Kmeasure}}, \code{\link{setcov}} and \code{\link{density.ppp}}. \tabular{ll}{ \code{\link{im}} \tab create a pixel image\cr \code{\link{as.im}} \tab convert other data to a pixel image\cr \code{\link{pixellate}} \tab convert other data to a pixel image\cr \code{\link{as.matrix.im}} \tab convert pixel image to matrix\cr \code{\link{as.data.frame.im}} \tab convert pixel image to data frame\cr \code{\link{as.function.im}} \tab convert pixel image to function\cr \code{\link{plot.im}} \tab plot a pixel image on screen as a digital image\cr \code{\link{contour.im}} \tab draw contours of a pixel image \cr \code{\link{persp.im}} \tab draw perspective plot of a pixel image \cr \code{\link{rgbim}} \tab create colour-valued pixel image \cr \code{\link{hsvim}} \tab create colour-valued pixel image \cr \code{\link{[.im}} \tab extract a subset of a pixel image\cr \code{\link{[<-.im}} \tab replace a subset of a pixel image\cr \code{\link{rotate.im}} \tab rotate pixel image \cr \code{\link{shift.im}} \tab apply vector shift to pixel image \cr \code{\link{affine.im}} \tab apply affine transformation to image \cr \code{X} \tab print very basic information about image \code{X}\cr \code{\link{summary}(X)} \tab summary of image \code{X} \cr \code{\link{hist.im}} \tab histogram of image \cr \code{\link{mean.im}} \tab mean pixel value of image \cr \code{\link{integral.im}} \tab integral of pixel values \cr \code{\link{quantile.im}} \tab quantiles of image \cr \code{\link{cut.im}} \tab convert numeric image to factor image \cr \code{\link{is.im}} \tab test whether an object is a pixel image\cr \code{\link{interp.im}} \tab interpolate a pixel image\cr \code{\link{blur}} \tab apply Gaussian blur to image\cr \code{\link{Smooth.im}} \tab apply Gaussian blur to image\cr \code{\link{connected.im}} \tab find connected components \cr \code{\link{compatible.im}} \tab test whether two images have compatible dimensions \cr \code{\link{harmonise.im}} \tab make images compatible \cr \code{\link{commonGrid}} \tab find a common pixel grid for images \cr \code{\link{eval.im}} \tab evaluate any expression involving images\cr \code{\link{scaletointerval}} \tab rescale pixel values \cr \code{\link{zapsmall.im}} \tab set very small pixel values to zero \cr \code{\link{levelset}} \tab level set of an image\cr \code{\link{solutionset}} \tab region where an expression is true \cr \code{\link{imcov}} \tab spatial covariance function of image \cr \code{\link{convolve.im}} \tab spatial convolution of images \cr \code{\link{transect.im}} \tab line transect of image \cr \code{\link{pixelcentres}} \tab extract centres of pixels \cr \code{\link{transmat}} \tab convert matrix of pixel values \cr \tab to a different indexing convention \cr \code{\link{rnoise}} \tab random pixel noise } \bold{Line segment patterns} An object of class \code{"psp"} represents a pattern of straight line segments. \tabular{ll}{ \code{\link{psp}} \tab create a line segment pattern \cr \code{\link{as.psp}} \tab convert other data into a line segment pattern \cr \code{\link{edges}} \tab extract edges of a window \cr \code{\link{is.psp}} \tab determine whether a dataset has class \code{"psp"} \cr \code{\link{plot.psp}} \tab plot a line segment pattern \cr \code{\link{print.psp}} \tab print basic information \cr \code{\link{summary.psp}} \tab print summary information \cr \code{\link{[.psp}} \tab extract a subset of a line segment pattern \cr \code{\link{as.data.frame.psp}} \tab convert line segment pattern to data frame \cr \code{\link{marks.psp}} \tab extract marks of line segments \cr \code{\link{marks<-.psp}} \tab assign new marks to line segments \cr \code{\link{unmark.psp}} \tab delete marks from line segments \cr \code{\link{midpoints.psp}} \tab compute the midpoints of line segments \cr \code{\link{endpoints.psp}} \tab extract the endpoints of line segments \cr \code{\link{lengths.psp}} \tab compute the lengths of line segments \cr \code{\link{angles.psp}} \tab compute the orientation angles of line segments \cr \code{\link{superimpose}} \tab combine several line segment patterns \cr \code{\link{flipxy}} \tab swap \eqn{x} and \eqn{y} coordinates \cr \code{\link{rotate.psp}} \tab rotate a line segment pattern \cr \code{\link{shift.psp}} \tab shift a line segment pattern \cr \code{\link{periodify}} \tab make several shifted copies \cr \code{\link{affine.psp}} \tab apply an affine transformation \cr \code{\link{pixellate.psp}} \tab approximate line segment pattern by pixel image \cr \code{\link{as.mask.psp}} \tab approximate line segment pattern by binary mask \cr \code{\link{distmap.psp}} \tab compute the distance map of a line segment pattern \cr \code{\link{distfun.psp}} \tab compute the distance map of a line segment pattern \cr \code{\link{density.psp}} \tab kernel smoothing of line segments\cr \code{\link{selfcrossing.psp}} \tab find crossing points between line segments \cr \code{\link{selfcut.psp}} \tab cut segments where they cross \cr \code{\link{crossing.psp}} \tab find crossing points between two line segment patterns \cr \code{\link{nncross}} \tab find distance to nearest line segment from a given point\cr \code{\link{nearestsegment}} \tab find line segment closest to a given point \cr \code{\link{project2segment}} \tab find location along a line segment closest to a given point \cr \code{\link{pointsOnLines}} \tab generate points evenly spaced along line segment \cr \code{\link{rpoisline}} \tab generate a realisation of the Poisson line process inside a window\cr \code{\link{rlinegrid}} \tab generate a random array of parallel lines through a window } \bold{Tessellations} An object of class \code{"tess"} represents a tessellation. \tabular{ll}{ \code{\link{tess}} \tab create a tessellation \cr \code{\link{quadrats}} \tab create a tessellation of rectangles\cr \code{\link{hextess}} \tab create a tessellation of hexagons \cr \code{\link{quantess}} \tab quantile tessellation \cr \code{\link{as.tess}} \tab convert other data to a tessellation \cr \code{\link{plot.tess}} \tab plot a tessellation \cr \code{\link{tiles}} \tab extract all the tiles of a tessellation \cr \code{\link{[.tess}} \tab extract some tiles of a tessellation \cr \code{\link{[<-.tess}} \tab change some tiles of a tessellation \cr \code{\link{intersect.tess}} \tab intersect two tessellations \cr \tab or restrict a tessellation to a window \cr \code{\link{chop.tess}} \tab subdivide a tessellation by a line \cr \code{\link{dirichlet}} \tab compute Dirichlet-Voronoi tessellation of points\cr \code{\link{delaunay}} \tab compute Delaunay triangulation of points\cr \code{\link{rpoislinetess}} \tab generate tessellation using Poisson line process \cr \code{\link{tile.areas}} \tab area of each tile in tessellation \cr \code{\link{bdist.tiles}} \tab boundary distance for each tile in tessellation } \bold{Three-dimensional point patterns} An object of class \code{"pp3"} represents a three-dimensional point pattern in a rectangular box. The box is represented by an object of class \code{"box3"}. \tabular{ll}{ \code{\link{pp3}} \tab create a 3-D point pattern \cr \code{\link{plot.pp3}} \tab plot a 3-D point pattern \cr \code{\link{coords}} \tab extract coordinates \cr \code{\link{as.hyperframe}} \tab extract coordinates \cr \code{\link{subset.pp3}} \tab extract subset of 3-D point pattern \cr \code{\link{unitname.pp3}} \tab name of unit of length \cr \code{\link{npoints}} \tab count the number of points \cr \code{\link{runifpoint3}} \tab generate uniform random points in 3-D \cr \code{\link{rpoispp3}} \tab generate Poisson random points in 3-D \cr \code{\link{envelope.pp3}} \tab generate simulation envelopes for 3-D pattern \cr \code{\link{box3}} \tab create a 3-D rectangular box \cr \code{\link{as.box3}} \tab convert data to 3-D rectangular box \cr \code{\link{unitname.box3}} \tab name of unit of length \cr \code{\link{diameter.box3}} \tab diameter of box \cr \code{\link{volume.box3}} \tab volume of box \cr \code{\link{shortside.box3}} \tab shortest side of box \cr \code{\link{eroded.volumes}} \tab volumes of erosions of box } \bold{Multi-dimensional space-time point patterns} An object of class \code{"ppx"} represents a point pattern in multi-dimensional space and/or time. \tabular{ll}{ \code{\link{ppx}} \tab create a multidimensional space-time point pattern \cr \code{\link{coords}} \tab extract coordinates \cr \code{\link{as.hyperframe}} \tab extract coordinates \cr \code{\link{subset.ppx}} \tab extract subset \cr \code{\link{unitname.ppx}} \tab name of unit of length \cr \code{\link{npoints}} \tab count the number of points \cr \code{\link{runifpointx}} \tab generate uniform random points \cr \code{\link{rpoisppx}} \tab generate Poisson random points \cr \code{\link{boxx}} \tab define multidimensional box \cr \code{\link{diameter.boxx}} \tab diameter of box \cr \code{\link{volume.boxx}} \tab volume of box \cr \code{\link{shortside.boxx}} \tab shortest side of box \cr \code{\link{eroded.volumes.boxx}} \tab volumes of erosions of box } \bold{Point patterns on a linear network} An object of class \code{"linnet"} represents a linear network (for example, a road network). \tabular{ll}{ \code{\link{linnet}} \tab create a linear network \cr \code{\link{clickjoin}} \tab interactively join vertices in network \cr \code{\link{iplot.linnet}} \tab interactively plot network \cr \code{\link[spatstat.data]{simplenet}} \tab simple example of network \cr \code{\link{lineardisc}} \tab disc in a linear network \cr \code{\link{delaunayNetwork}} \tab network of Delaunay triangulation \cr \code{\link{dirichletNetwork}} \tab network of Dirichlet edges \cr \code{\link{methods.linnet}} \tab methods for \code{linnet} objects\cr \code{\link{vertices.linnet}} \tab nodes of network \cr \code{\link{pixellate.linnet}} \tab approximate by pixel image } An object of class \code{"lpp"} represents a point pattern on a linear network (for example, road accidents on a road network). \tabular{ll}{ \code{\link{lpp}} \tab create a point pattern on a linear network \cr \code{\link{methods.lpp}} \tab methods for \code{lpp} objects \cr \code{\link{subset.lpp}} \tab method for \code{subset} \cr \code{\link{rpoislpp}} \tab simulate Poisson points on linear network \cr \code{\link{runiflpp}} \tab simulate random points on a linear network \cr \code{\link[spatstat.data]{chicago}} \tab Chicago crime data \cr \code{\link[spatstat.data]{dendrite}} \tab Dendritic spines data \cr \code{\link[spatstat.data]{spiders}} \tab Spider webs on mortar lines of brick wall } \bold{Hyperframes} A hyperframe is like a data frame, except that the entries may be objects of any kind. \tabular{ll}{ \code{\link{hyperframe}} \tab create a hyperframe \cr \code{\link{as.hyperframe}} \tab convert data to hyperframe \cr \code{\link{plot.hyperframe}} \tab plot hyperframe \cr \code{\link{with.hyperframe}} \tab evaluate expression using each row of hyperframe \cr \code{\link{cbind.hyperframe}} \tab combine hyperframes by columns\cr \code{\link{rbind.hyperframe}} \tab combine hyperframes by rows\cr \code{\link{as.data.frame.hyperframe}} \tab convert hyperframe to data frame \cr \code{\link{subset.hyperframe}} \tab method for \code{subset} \cr \code{\link{head.hyperframe}} \tab first few rows of hyperframe \cr \code{\link{tail.hyperframe}} \tab last few rows of hyperframe } \bold{Layered objects} A layered object represents data that should be plotted in successive layers, for example, a background and a foreground. \tabular{ll}{ \code{\link{layered}} \tab create layered object \cr \code{\link{plot.layered}} \tab plot layered object\cr \code{\link{[.layered}} \tab extract subset of layered object } \bold{Colour maps} A colour map is a mechanism for associating colours with data. It can be regarded as a function, mapping data to colours. Using a \code{colourmap} object in a plot command ensures that the mapping from numbers to colours is the same in different plots. \tabular{ll}{ \code{\link{colourmap}} \tab create a colour map \cr \code{\link{plot.colourmap}} \tab plot the colour map only\cr \code{\link{tweak.colourmap}} \tab alter individual colour values \cr \code{\link{interp.colourmap}} \tab make a smooth transition between colours \cr \code{\link{beachcolourmap}} \tab one special colour map } } \section{II. EXPLORATORY DATA ANALYSIS}{ \bold{Inspection of data:} \tabular{ll}{ \code{\link{summary}(X)} \tab print useful summary of point pattern \code{X}\cr \code{X} \tab print basic description of point pattern \code{X} \cr \code{any(duplicated(X))} \tab check for duplicated points in pattern \code{X} \cr \code{\link{istat}(X)} \tab Interactive exploratory analysis \cr \code{\link{View}(X)} \tab spreadsheet-style viewer } \bold{Classical exploratory tools:} \tabular{ll}{ \code{\link{clarkevans}} \tab Clark and Evans aggregation index \cr \code{\link{fryplot}} \tab Fry plot \cr \code{\link{miplot}} \tab Morisita Index plot } \bold{Smoothing:} \tabular{ll}{ \code{\link{density.ppp}} \tab kernel smoothed density/intensity\cr \code{\link{relrisk}} \tab kernel estimate of relative risk\cr \code{\link{Smooth.ppp}} \tab spatial interpolation of marks \cr \code{\link{bw.diggle}} \tab cross-validated bandwidth selection for \code{\link{density.ppp}}\cr \code{\link{bw.ppl}} \tab likelihood cross-validated bandwidth selection for \code{\link{density.ppp}}\cr \code{\link{bw.scott}} \tab Scott's rule of thumb for density estimation\cr \code{\link{bw.relrisk}} \tab cross-validated bandwidth selection for \code{\link{relrisk}} \cr \code{\link{bw.smoothppp}} \tab cross-validated bandwidth selection for \code{\link{Smooth.ppp}} \cr \code{\link{bw.frac}} \tab bandwidth selection using window geometry\cr \code{\link{bw.stoyan}} \tab Stoyan's rule of thumb for bandwidth for \code{\link{pcf}} } \bold{Modern exploratory tools:} \tabular{ll}{ \code{\link{clusterset}} \tab Allard-Fraley feature detection \cr \code{\link{nnclean}} \tab Byers-Raftery feature detection \cr \code{\link{sharpen.ppp}} \tab Choi-Hall data sharpening \cr \code{\link{rhohat}} \tab Kernel estimate of covariate effect\cr \code{\link{rho2hat}} \tab Kernel estimate of effect of two covariates\cr \code{\link{spatialcdf}} \tab Spatial cumulative distribution function\cr \code{\link{roc}} \tab Receiver operating characteristic curve } \bold{Summary statistics for a point pattern:} Type \code{demo(sumfun)} for a demonstration of many of the summary statistics. \tabular{ll}{ \code{\link{intensity}} \tab Mean intensity \cr \code{\link{quadratcount}} \tab Quadrat counts \cr \code{\link{intensity.quadratcount}} \tab Mean intensity in quadrats \cr \code{\link{Fest}} \tab empty space function \eqn{F} \cr \code{\link{Gest}} \tab nearest neighbour distribution function \eqn{G} \cr \code{\link{Jest}} \tab \eqn{J}-function \eqn{J = (1-G)/(1-F)} \cr \code{\link{Kest}} \tab Ripley's \eqn{K}-function\cr \code{\link{Lest}} \tab Besag \eqn{L}-function\cr \code{\link{Tstat}} \tab Third order \eqn{T}-function \cr \code{\link{allstats}} \tab all four functions \eqn{F}, \eqn{G}, \eqn{J}, \eqn{K} \cr \code{\link{pcf}} \tab pair correlation function \cr \code{\link{Kinhom}} \tab \eqn{K} for inhomogeneous point patterns \cr \code{\link{Linhom}} \tab \eqn{L} for inhomogeneous point patterns \cr \code{\link{pcfinhom}} \tab pair correlation for inhomogeneous patterns\cr \code{\link{Finhom}} \tab \eqn{F} for inhomogeneous point patterns \cr \code{\link{Ginhom}} \tab \eqn{G} for inhomogeneous point patterns \cr \code{\link{Jinhom}} \tab \eqn{J} for inhomogeneous point patterns \cr \code{\link{localL}} \tab Getis-Franklin neighbourhood density function\cr \code{\link{localK}} \tab neighbourhood K-function\cr \code{\link{localpcf}} \tab local pair correlation function\cr \code{\link{localKinhom}} \tab local \eqn{K} for inhomogeneous point patterns \cr \code{\link{localLinhom}} \tab local \eqn{L} for inhomogeneous point patterns \cr \code{\link{localpcfinhom}} \tab local pair correlation for inhomogeneous patterns\cr \code{\link{Ksector}} \tab Directional \eqn{K}-function\cr \code{\link{Kscaled}} \tab locally scaled \eqn{K}-function \cr \code{\link{Kest.fft}} \tab fast \eqn{K}-function using FFT for large datasets \cr \code{\link{Kmeasure}} \tab reduced second moment measure \cr \code{\link{envelope}} \tab simulation envelopes for a summary function \cr \code{\link{varblock}} \tab variances and confidence intervals\cr \tab for a summary function \cr \code{\link{lohboot}} \tab bootstrap for a summary function } Related facilities: \tabular{ll}{ \code{\link{plot.fv}} \tab plot a summary function\cr \code{\link{eval.fv}} \tab evaluate any expression involving summary functions\cr \code{\link{harmonise.fv}} \tab make functions compatible \cr \code{\link{eval.fasp}} \tab evaluate any expression involving an array of functions\cr \code{\link{with.fv}} \tab evaluate an expression for a summary function\cr \code{\link{Smooth.fv}} \tab apply smoothing to a summary function\cr \code{\link{deriv.fv}} \tab calculate derivative of a summary function\cr \code{\link{pool.fv}} \tab pool several estimates of a summary function\cr \code{\link{nndist}} \tab nearest neighbour distances \cr \code{\link{nnwhich}} \tab find nearest neighbours \cr \code{\link{pairdist}} \tab distances between all pairs of points\cr \code{\link{crossdist}} \tab distances between points in two patterns\cr \code{\link{nncross}} \tab nearest neighbours between two point patterns \cr \code{\link{exactdt}} \tab distance from any location to nearest data point\cr \code{\link{distmap}} \tab distance map image\cr \code{\link{distfun}} \tab distance map function\cr \code{\link{nnmap}} \tab nearest point image \cr \code{\link{nnfun}} \tab nearest point function \cr \code{\link{density.ppp}} \tab kernel smoothed density\cr \code{\link{Smooth.ppp}} \tab spatial interpolation of marks \cr \code{\link{relrisk}} \tab kernel estimate of relative risk\cr \code{\link{sharpen.ppp}} \tab data sharpening \cr \code{\link{rknn}} \tab theoretical distribution of nearest neighbour distance } \bold{Summary statistics for a multitype point pattern:} A multitype point pattern is represented by an object \code{X} of class \code{"ppp"} such that \code{marks(X)} is a factor. \tabular{ll}{ \code{\link{relrisk}} \tab kernel estimation of relative risk \cr \code{\link{scan.test}} \tab spatial scan test of elevated risk \cr \code{\link{Gcross},\link{Gdot},\link{Gmulti}} \tab multitype nearest neighbour distributions \eqn{G_{ij}, G_{i\bullet}}{G[i,j], G[i.]} \cr \code{\link{Kcross},\link{Kdot}, \link{Kmulti}} \tab multitype \eqn{K}-functions \eqn{K_{ij}, K_{i\bullet}}{K[i,j], K[i.]} \cr \code{\link{Lcross},\link{Ldot}} \tab multitype \eqn{L}-functions \eqn{L_{ij}, L_{i\bullet}}{L[i,j], L[i.]} \cr \code{\link{Jcross},\link{Jdot},\link{Jmulti}} \tab multitype \eqn{J}-functions \eqn{J_{ij}, J_{i\bullet}}{J[i,j],J[i.]} \cr \code{\link{pcfcross}} \tab multitype pair correlation function \eqn{g_{ij}}{g[i,j]} \cr \code{\link{pcfdot}} \tab multitype pair correlation function \eqn{g_{i\bullet}}{g[i.]} \cr \code{\link{pcfmulti}} \tab general pair correlation function \cr \code{\link{markconnect}} \tab marked connection function \eqn{p_{ij}}{p[i,j]} \cr \code{\link{alltypes}} \tab estimates of the above for all \eqn{i,j} pairs \cr \code{\link{Iest}} \tab multitype \eqn{I}-function\cr \code{\link{Kcross.inhom},\link{Kdot.inhom}} \tab inhomogeneous counterparts of \code{Kcross}, \code{Kdot} \cr \code{\link{Lcross.inhom},\link{Ldot.inhom}} \tab inhomogeneous counterparts of \code{Lcross}, \code{Ldot} \cr \code{\link{pcfcross.inhom},\link{pcfdot.inhom}} \tab inhomogeneous counterparts of \code{pcfcross}, \code{pcfdot} } \bold{Summary statistics for a marked point pattern:} A marked point pattern is represented by an object \code{X} of class \code{"ppp"} with a component \code{X$marks}. The entries in the vector \code{X$marks} may be numeric, complex, string or any other atomic type. For numeric marks, there are the following functions: \tabular{ll}{ \code{\link{markmean}} \tab smoothed local average of marks \cr \code{\link{markvar}} \tab smoothed local variance of marks \cr \code{\link{markcorr}} \tab mark correlation function \cr \code{\link{markcrosscorr}} \tab mark cross-correlation function \cr \code{\link{markvario}} \tab mark variogram \cr \code{\link{Kmark}} \tab mark-weighted \eqn{K} function \cr \code{\link{Emark}} \tab mark independence diagnostic \eqn{E(r)} \cr \code{\link{Vmark}} \tab mark independence diagnostic \eqn{V(r)} \cr \code{\link{nnmean}} \tab nearest neighbour mean index \cr \code{\link{nnvario}} \tab nearest neighbour mark variance index } For marks of any type, there are the following: \tabular{ll}{ \code{\link{Gmulti}} \tab multitype nearest neighbour distribution \cr \code{\link{Kmulti}} \tab multitype \eqn{K}-function \cr \code{\link{Jmulti}} \tab multitype \eqn{J}-function } Alternatively use \code{\link{cut.ppp}} to convert a marked point pattern to a multitype point pattern. \bold{Programming tools:} \tabular{ll}{ \code{\link{applynbd}} \tab apply function to every neighbourhood in a point pattern \cr \code{\link{markstat}} \tab apply function to the marks of neighbours in a point pattern \cr \code{\link{marktable}} \tab tabulate the marks of neighbours in a point pattern \cr \code{\link{pppdist}} \tab find the optimal match between two point patterns } \bold{Summary statistics for a point pattern on a linear network:} These are for point patterns on a linear network (class \code{lpp}). For unmarked patterns: \tabular{ll}{ \code{\link{linearK}} \tab \eqn{K} function on linear network \cr \code{\link{linearKinhom}} \tab inhomogeneous \eqn{K} function on linear network \cr \code{\link{linearpcf}} \tab pair correlation function on linear network \cr \code{\link{linearpcfinhom}} \tab inhomogeneous pair correlation on linear network } For multitype patterns: \tabular{ll}{ \code{\link{linearKcross}} \tab \eqn{K} function between two types of points \cr \code{\link{linearKdot}} \tab \eqn{K} function from one type to any type \cr \code{\link{linearKcross.inhom}} \tab Inhomogeneous version of \code{\link{linearKcross}} \cr \code{\link{linearKdot.inhom}} \tab Inhomogeneous version of \code{\link{linearKdot}} \cr \code{\link{linearmarkconnect}} \tab Mark connection function on linear network \cr \code{\link{linearmarkequal}} \tab Mark equality function on linear network \cr \code{\link{linearpcfcross}} \tab Pair correlation between two types of points \cr \code{\link{linearpcfdot}} \tab Pair correlation from one type to any type \cr \code{\link{linearpcfcross.inhom}} \tab Inhomogeneous version of \code{\link{linearpcfcross}} \cr \code{\link{linearpcfdot.inhom}} \tab Inhomogeneous version of \code{\link{linearpcfdot}} } Related facilities: \tabular{ll}{ \code{\link{pairdist.lpp}} \tab distances between pairs \cr \code{\link{crossdist.lpp}} \tab distances between pairs \cr \code{\link{nndist.lpp}} \tab nearest neighbour distances \cr \code{\link{nncross.lpp}} \tab nearest neighbour distances \cr \code{\link{nnwhich.lpp}} \tab find nearest neighbours \cr \code{\link{nnfun.lpp}} \tab find nearest data point \cr \code{\link{density.lpp}} \tab kernel smoothing estimator of intensity \cr \code{\link{distfun.lpp}} \tab distance transform \cr \code{\link{envelope.lpp}} \tab simulation envelopes \cr \code{\link{rpoislpp}} \tab simulate Poisson points on linear network \cr \code{\link{runiflpp}} \tab simulate random points on a linear network } It is also possible to fit point process models to \code{lpp} objects. See Section IV. \bold{Summary statistics for a three-dimensional point pattern:} These are for 3-dimensional point pattern objects (class \code{pp3}). \tabular{ll}{ \code{\link{F3est}} \tab empty space function \eqn{F} \cr \code{\link{G3est}} \tab nearest neighbour function \eqn{G} \cr \code{\link{K3est}} \tab \eqn{K}-function \cr \code{\link{pcf3est}} \tab pair correlation function } Related facilities: \tabular{ll}{ \code{\link{envelope.pp3}} \tab simulation envelopes \cr \code{\link{pairdist.pp3}} \tab distances between all pairs of points \cr \code{\link{crossdist.pp3}} \tab distances between points in two patterns \cr \code{\link{nndist.pp3}} \tab nearest neighbour distances \cr \code{\link{nnwhich.pp3}} \tab find nearest neighbours \cr \code{\link{nncross.pp3}} \tab find nearest neighbours in another pattern } \bold{Computations for multi-dimensional point pattern:} These are for multi-dimensional space-time point pattern objects (class \code{ppx}). \tabular{ll}{ \code{\link{pairdist.ppx}} \tab distances between all pairs of points \cr \code{\link{crossdist.ppx}} \tab distances between points in two patterns \cr \code{\link{nndist.ppx}} \tab nearest neighbour distances \cr \code{\link{nnwhich.ppx}} \tab find nearest neighbours } \bold{Summary statistics for random sets:} These work for point patterns (class \code{ppp}), line segment patterns (class \code{psp}) or windows (class \code{owin}). \tabular{ll}{ \code{\link{Hest}} \tab spherical contact distribution \eqn{H} \cr \code{\link{Gfox}} \tab Foxall \eqn{G}-function \cr \code{\link{Jfox}} \tab Foxall \eqn{J}-function } } \section{III. MODEL FITTING (COX AND CLUSTER MODELS)}{ Cluster process models (with homogeneous or inhomogeneous intensity) and Cox processes can be fitted by the function \code{\link{kppm}}. Its result is an object of class \code{"kppm"}. The fitted model can be printed, plotted, predicted, simulated and updated. \tabular{ll}{ \code{\link{kppm}} \tab Fit model\cr \code{\link{plot.kppm}} \tab Plot the fitted model\cr \code{\link{summary.kppm}} \tab Summarise the fitted model\cr \code{\link{fitted.kppm}} \tab Compute fitted intensity \cr \code{\link{predict.kppm}} \tab Compute fitted intensity \cr \code{\link{update.kppm}} \tab Update the model \cr \code{\link{improve.kppm}} \tab Refine the estimate of trend \cr \code{\link{simulate.kppm}} \tab Generate simulated realisations \cr \code{\link{vcov.kppm}} \tab Variance-covariance matrix of coefficients \cr \code{\link[spatstat:methods.kppm]{coef.kppm}} \tab Extract trend coefficients \cr \code{\link[spatstat:methods.kppm]{formula.kppm}} \tab Extract trend formula \cr \code{\link{parameters}} \tab Extract all model parameters \cr \code{\link{clusterfield}} \tab Compute offspring density \cr \code{\link{clusterradius}} \tab Radius of support of offspring density \cr \code{\link{Kmodel.kppm}} \tab \eqn{K} function of fitted model \cr \code{\link{pcfmodel.kppm}} \tab Pair correlation of fitted model } For model selection, you can also use the generic functions \code{\link{step}}, \code{\link{drop1}} and \code{\link{AIC}} on fitted point process models. The theoretical models can also be simulated, for any choice of parameter values, using \code{\link{rThomas}}, \code{\link{rMatClust}}, \code{\link{rCauchy}}, \code{\link{rVarGamma}}, and \code{\link{rLGCP}}. Lower-level fitting functions include: \tabular{ll}{ \code{\link{lgcp.estK}} \tab fit a log-Gaussian Cox process model\cr \code{\link{lgcp.estpcf}} \tab fit a log-Gaussian Cox process model\cr \code{\link{thomas.estK}} \tab fit the Thomas process model \cr \code{\link{thomas.estpcf}} \tab fit the Thomas process model \cr \code{\link{matclust.estK}} \tab fit the Matern Cluster process model \cr \code{\link{matclust.estpcf}} \tab fit the Matern Cluster process model \cr \code{\link{cauchy.estK}} \tab fit a Neyman-Scott Cauchy cluster process \cr \code{\link{cauchy.estpcf}} \tab fit a Neyman-Scott Cauchy cluster process\cr \code{\link{vargamma.estK}} \tab fit a Neyman-Scott Variance Gamma process\cr \code{\link{vargamma.estpcf}} \tab fit a Neyman-Scott Variance Gamma process\cr \code{\link{mincontrast}} \tab low-level algorithm for fitting models \cr \tab by the method of minimum contrast } } \section{IV. MODEL FITTING (POISSON AND GIBBS MODELS)}{ \bold{Types of models} Poisson point processes are the simplest models for point patterns. A Poisson model assumes that the points are stochastically independent. It may allow the points to have a non-uniform spatial density. The special case of a Poisson process with a uniform spatial density is often called Complete Spatial Randomness. Poisson point processes are included in the more general class of Gibbs point process models. In a Gibbs model, there is \emph{interaction} or dependence between points. Many different types of interaction can be specified. For a detailed explanation of how to fit Poisson or Gibbs point process models to point pattern data using \pkg{spatstat}, see Baddeley and Turner (2005b) or Baddeley (2008). \bold{To fit a Poisson or Gibbs point process model:} Model fitting in \pkg{spatstat} is performed mainly by the function \code{\link{ppm}}. Its result is an object of class \code{"ppm"}. Here are some examples, where \code{X} is a point pattern (class \code{"ppp"}): \tabular{ll}{ \emph{command} \tab \emph{model} \cr \code{ppm(X)} \tab Complete Spatial Randomness \cr \code{ppm(X ~ 1)} \tab Complete Spatial Randomness \cr \code{ppm(X ~ x)} \tab Poisson process with \cr \tab intensity loglinear in \eqn{x} coordinate \cr \code{ppm(X ~ 1, Strauss(0.1))} \tab Stationary Strauss process \cr \code{ppm(X ~ x, Strauss(0.1))} \tab Strauss process with \cr \tab conditional intensity loglinear in \eqn{x} } It is also possible to fit models that depend on other covariates. \bold{Manipulating the fitted model:} \tabular{ll}{ \code{\link{plot.ppm}} \tab Plot the fitted model\cr \code{\link{predict.ppm}} \tab Compute the spatial trend and conditional intensity\cr \tab of the fitted point process model \cr \code{\link{coef.ppm}} \tab Extract the fitted model coefficients\cr \code{\link{parameters}} \tab Extract all model parameters\cr \code{\link{formula.ppm}} \tab Extract the trend formula\cr \code{\link{intensity.ppm}} \tab Compute fitted intensity \cr \code{\link{Kmodel.ppm}} \tab \eqn{K} function of fitted model \cr \code{\link{pcfmodel.ppm}} \tab pair correlation of fitted model \cr \code{\link{fitted.ppm}} \tab Compute fitted conditional intensity at quadrature points \cr \code{\link{residuals.ppm}} \tab Compute point process residuals at quadrature points \cr \code{\link{update.ppm}} \tab Update the fit \cr \code{\link{vcov.ppm}} \tab Variance-covariance matrix of estimates\cr \code{\link{rmh.ppm}} \tab Simulate from fitted model \cr \code{\link{simulate.ppm}} \tab Simulate from fitted model \cr \code{\link{print.ppm}} \tab Print basic information about a fitted model\cr \code{\link{summary.ppm}} \tab Summarise a fitted model\cr \code{\link{effectfun}} \tab Compute the fitted effect of one covariate\cr \code{\link{logLik.ppm}} \tab log-likelihood or log-pseudolikelihood\cr \code{\link{anova.ppm}} \tab Analysis of deviance \cr \code{\link{model.frame.ppm}} \tab Extract data frame used to fit model \cr \code{\link{model.images}} \tab Extract spatial data used to fit model \cr \code{\link{model.depends}} \tab Identify variables in the model \cr \code{\link{as.interact}} \tab Interpoint interaction component of model \cr \code{\link{fitin}} \tab Extract fitted interpoint interaction \cr \code{\link{is.hybrid}} \tab Determine whether the model is a hybrid \cr \code{\link{valid.ppm}} \tab Check the model is a valid point process \cr \code{\link{project.ppm}} \tab Ensure the model is a valid point process } For model selection, you can also use the generic functions \code{\link{step}}, \code{\link{drop1}} and \code{\link{AIC}} on fitted point process models. See \code{\link{spatstat.options}} to control plotting of fitted model. \bold{To specify a point process model:} The first order ``trend'' of the model is determined by an \R language formula. The formula specifies the form of the \emph{logarithm} of the trend. \tabular{ll}{ \code{X ~ 1} \tab No trend (stationary) \cr \code{X ~ x} \tab Loglinear trend \eqn{\lambda(x,y) = \exp(\alpha + \beta x)}{lambda(x,y) = exp(alpha + beta * x)} \cr \tab where \eqn{x,y} are Cartesian coordinates \cr \code{X ~ polynom(x,y,3)} \tab Log-cubic polynomial trend \cr \code{X ~ harmonic(x,y,2)} \tab Log-harmonic polynomial trend \cr \code{X ~ Z} \tab Loglinear function of covariate \code{Z} \cr \tab \eqn{\lambda(x,y) = \exp(\alpha + \beta Z(x,y))}{lambda(x,y) = exp(alpha + beta * Z(x,y))} } The higher order (``interaction'') components are described by an object of class \code{"interact"}. Such objects are created by: \tabular{ll}{ \code{\link{Poisson}()} \tab the Poisson point process\cr \code{\link{AreaInter}()} \tab Area-interaction process\cr \code{\link{BadGey}()} \tab multiscale Geyer process\cr \code{\link{Concom}()} \tab connected component interaction\cr \code{\link{DiggleGratton}() } \tab Diggle-Gratton potential \cr \code{\link{DiggleGatesStibbard}() } \tab Diggle-Gates-Stibbard potential \cr \code{\link{Fiksel}()} \tab Fiksel pairwise interaction process\cr \code{\link{Geyer}()} \tab Geyer's saturation process\cr \code{\link{Hardcore}()} \tab Hard core process\cr \code{\link{HierHard}()} \tab Hierarchical multiype hard core process\cr \code{\link{HierStrauss}()} \tab Hierarchical multiype Strauss process\cr \code{\link{HierStraussHard}()} \tab Hierarchical multiype Strauss-hard core process\cr \code{\link{Hybrid}()} \tab Hybrid of several interactions\cr \code{\link{LennardJones}() } \tab Lennard-Jones potential \cr \code{\link{MultiHard}()} \tab multitype hard core process \cr \code{\link{MultiStrauss}()} \tab multitype Strauss process \cr \code{\link{MultiStraussHard}()} \tab multitype Strauss/hard core process \cr \code{\link{OrdThresh}()} \tab Ord process, threshold potential\cr \code{\link{Ord}()} \tab Ord model, user-supplied potential \cr \code{\link{PairPiece}()} \tab pairwise interaction, piecewise constant \cr \code{\link{Pairwise}()} \tab pairwise interaction, user-supplied potential\cr \code{\link{Penttinen}()} \tab Penttinen pairwise interaction\cr \code{\link{SatPiece}()} \tab Saturated pair model, piecewise constant potential\cr \code{\link{Saturated}()} \tab Saturated pair model, user-supplied potential\cr \code{\link{Softcore}()} \tab pairwise interaction, soft core potential\cr \code{\link{Strauss}()} \tab Strauss process \cr \code{\link{StraussHard}()} \tab Strauss/hard core point process \cr \code{\link{Triplets}()} \tab Geyer triplets process } Note that it is also possible to combine several such interactions using \code{\link{Hybrid}}. \bold{Finer control over model fitting:} A quadrature scheme is represented by an object of class \code{"quad"}. To create a quadrature scheme, typically use \code{\link{quadscheme}}. \tabular{ll}{ \code{\link{quadscheme}} \tab default quadrature scheme \cr \tab using rectangular cells or Dirichlet cells\cr \code{\link{pixelquad}} \tab quadrature scheme based on image pixels \cr \code{\link{quad}} \tab create an object of class \code{"quad"} } To inspect a quadrature scheme: \tabular{ll}{ \code{plot(Q)} \tab plot quadrature scheme \code{Q}\cr \code{print(Q)} \tab print basic information about quadrature scheme \code{Q}\cr \code{\link{summary}(Q)} \tab summary of quadrature scheme \code{Q} } A quadrature scheme consists of data points, dummy points, and weights. To generate dummy points: \tabular{ll}{ \code{\link{default.dummy}} \tab default pattern of dummy points \cr \code{\link{gridcentres}} \tab dummy points in a rectangular grid \cr \code{\link{rstrat}} \tab stratified random dummy pattern \cr \code{\link{spokes}} \tab radial pattern of dummy points \cr \code{\link{corners}} \tab dummy points at corners of the window } To compute weights: \tabular{ll}{ \code{\link{gridweights}} \tab quadrature weights by the grid-counting rule \cr \code{\link{dirichletWeights}} \tab quadrature weights are Dirichlet tile areas } \bold{Simulation and goodness-of-fit for fitted models:} \tabular{ll}{ \code{\link{rmh.ppm}} \tab simulate realisations of a fitted model \cr \code{\link{simulate.ppm}} \tab simulate realisations of a fitted model \cr \code{\link{envelope}} \tab compute simulation envelopes for a fitted model } \bold{Point process models on a linear network:} An object of class \code{"lpp"} represents a pattern of points on a linear network. Point process models can also be fitted to these objects. Currently only Poisson models can be fitted. \tabular{ll}{ \code{\link{lppm}} \tab point process model on linear network \cr \code{\link{anova.lppm}} \tab analysis of deviance for \cr \tab point process model on linear network \cr \code{\link{envelope.lppm}} \tab simulation envelopes for \cr \tab point process model on linear network \cr \code{\link{fitted.lppm}} \tab fitted intensity values \cr \code{\link{predict.lppm}} \tab model prediction on linear network \cr \code{\link{linim}} \tab pixel image on linear network \cr \code{\link{plot.linim}} \tab plot a pixel image on linear network \cr \code{\link{eval.linim}} \tab evaluate expression involving images \cr \code{\link{linfun}} \tab function defined on linear network \cr \code{\link{methods.linfun}} \tab conversion facilities } } \section{V. MODEL FITTING (DETERMINANTAL POINT PROCESS MODELS)}{ Code for fitting \emph{determinantal point process models} has recently been added to \pkg{spatstat}. For information, see the help file for \code{\link{dppm}}. } \section{VI. MODEL FITTING (SPATIAL LOGISTIC REGRESSION)}{ \bold{Logistic regression} Pixel-based spatial logistic regression is an alternative technique for analysing spatial point patterns that is widely used in Geographical Information Systems. It is approximately equivalent to fitting a Poisson point process model. In pixel-based logistic regression, the spatial domain is divided into small pixels, the presence or absence of a data point in each pixel is recorded, and logistic regression is used to model the presence/absence indicators as a function of any covariates. Facilities for performing spatial logistic regression are provided in \pkg{spatstat} for comparison purposes. \bold{Fitting a spatial logistic regression} Spatial logistic regression is performed by the function \code{\link{slrm}}. Its result is an object of class \code{"slrm"}. There are many methods for this class, including methods for \code{print}, \code{fitted}, \code{predict}, \code{simulate}, \code{anova}, \code{coef}, \code{logLik}, \code{terms}, \code{update}, \code{formula} and \code{vcov}. For example, if \code{X} is a point pattern (class \code{"ppp"}): \tabular{ll}{ \emph{command} \tab \emph{model} \cr \code{slrm(X ~ 1)} \tab Complete Spatial Randomness \cr \code{slrm(X ~ x)} \tab Poisson process with \cr \tab intensity loglinear in \eqn{x} coordinate \cr \code{slrm(X ~ Z)} \tab Poisson process with \cr \tab intensity loglinear in covariate \code{Z} } \bold{Manipulating a fitted spatial logistic regression} \tabular{ll}{ \code{\link{anova.slrm}} \tab Analysis of deviance \cr \code{\link{coef.slrm}} \tab Extract fitted coefficients \cr \code{\link{vcov.slrm}} \tab Variance-covariance matrix of fitted coefficients \cr \code{\link{fitted.slrm}} \tab Compute fitted probabilities or intensity \cr \code{\link{logLik.slrm}} \tab Evaluate loglikelihood of fitted model \cr \code{\link{plot.slrm}} \tab Plot fitted probabilities or intensity \cr \code{\link{predict.slrm}} \tab Compute predicted probabilities or intensity with new data \cr \code{\link{simulate.slrm}} \tab Simulate model } There are many other undocumented methods for this class, including methods for \code{print}, \code{update}, \code{formula} and \code{terms}. Stepwise model selection is possible using \code{step} or \code{stepAIC}. } \section{VII. SIMULATION}{ There are many ways to generate a random point pattern, line segment pattern, pixel image or tessellation in \pkg{spatstat}. \bold{Random point patterns:} \tabular{ll}{ \code{\link{runifpoint}} \tab generate \eqn{n} independent uniform random points \cr \code{\link{rpoint}} \tab generate \eqn{n} independent random points \cr \code{\link{rmpoint}} \tab generate \eqn{n} independent multitype random points \cr \code{\link{rpoispp}} \tab simulate the (in)homogeneous Poisson point process \cr \code{\link{rmpoispp}} \tab simulate the (in)homogeneous multitype Poisson point process \cr \code{\link{runifdisc}} \tab generate \eqn{n} independent uniform random points in disc\cr \code{\link{rstrat}} \tab stratified random sample of points \cr \code{\link{rsyst}} \tab systematic random sample (grid) of points \cr \code{\link{rMaternI}} \tab simulate the \ifelse{latex}{\out{Mat\'ern}}{Matern} Model I inhibition process\cr \code{\link{rMaternII}} \tab simulate the \ifelse{latex}{\out{Mat\'ern}}{Matern} Model II inhibition process\cr \code{\link{rSSI}} \tab simulate Simple Sequential Inhibition process\cr \code{\link{rHardcore}} \tab simulate hard core process (perfect simulation)\cr \code{\link{rStrauss}} \tab simulate Strauss process (perfect simulation)\cr \code{\link{rStraussHard}} \tab simulate Strauss-hard core process (perfect simulation)\cr \code{\link{rDiggleGratton}} \tab simulate Diggle-Gratton process (perfect simulation)\cr \code{\link{rDGS}} \tab simulate Diggle-Gates-Stibbard process (perfect simulation)\cr \code{\link{rPenttinen}} \tab simulate Penttinen process (perfect simulation)\cr \code{\link{rNeymanScott}} \tab simulate a general Neyman-Scott process\cr \code{\link{rMatClust}} \tab simulate the \ifelse{latex}{\out{Mat\'ern}}{Matern} Cluster process\cr \code{\link{rThomas}} \tab simulate the Thomas process \cr \code{\link{rLGCP}} \tab simulate the log-Gaussian Cox process \cr \code{\link{rGaussPoisson}} \tab simulate the Gauss-Poisson cluster process\cr \code{\link{rCauchy}} \tab simulate Neyman-Scott process with Cauchy clusters \cr \code{\link{rVarGamma}} \tab simulate Neyman-Scott process with Variance Gamma clusters \cr \code{\link{rcell}} \tab simulate the Baddeley-Silverman cell process \cr \code{\link{runifpointOnLines}} \tab generate \eqn{n} random points along specified line segments \cr \code{\link{rpoisppOnLines}} \tab generate Poisson random points along specified line segments } \bold{Resampling a point pattern:} \tabular{ll}{ \code{\link{quadratresample}} \tab block resampling \cr \code{\link{rjitter}} \tab apply random displacements to points in a pattern\cr \code{\link{rshift}} \tab random shifting of (subsets of) points\cr \code{\link{rthin}} \tab random thinning } See also \code{\link{varblock}} for estimating the variance of a summary statistic by block resampling, and \code{\link{lohboot}} for another bootstrap technique. \bold{Fitted point process models:} If you have fitted a point process model to a point pattern dataset, the fitted model can be simulated. Cluster process models are fitted by the function \code{\link{kppm}} yielding an object of class \code{"kppm"}. To generate one or more simulated realisations of this fitted model, use \code{\link{simulate.kppm}}. Gibbs point process models are fitted by the function \code{\link{ppm}} yielding an object of class \code{"ppm"}. To generate a simulated realisation of this fitted model, use \code{\link{rmh}}. To generate one or more simulated realisations of the fitted model, use \code{\link{simulate.ppm}}. \bold{Other random patterns:} \tabular{ll}{ \code{\link{rlinegrid}} \tab generate a random array of parallel lines through a window \cr \code{\link{rpoisline}} \tab simulate the Poisson line process within a window \cr \code{\link{rpoislinetess}} \tab generate random tessellation using Poisson line process \cr \code{\link{rMosaicSet}} \tab generate random set by selecting some tiles of a tessellation \cr \code{\link{rMosaicField}} \tab generate random pixel image by assigning random values in each tile of a tessellation } \bold{Simulation-based inference} \tabular{ll}{ \code{\link{envelope}} \tab critical envelope for Monte Carlo test of goodness-of-fit \cr \code{\link{qqplot.ppm}} \tab diagnostic plot for interpoint interaction \cr \code{\link{scan.test}} \tab spatial scan statistic/test \cr \code{\link{studpermu.test}} \tab studentised permutation test\cr \code{\link{segregation.test}} \tab test of segregation of types } } \section{VIII. TESTS AND DIAGNOSTICS}{ \bold{Hypothesis tests:} \tabular{ll}{ \code{\link{quadrat.test}} \tab \eqn{\chi^2}{chi^2} goodness-of-fit test on quadrat counts \cr \code{\link{clarkevans.test}} \tab Clark and Evans test \cr \code{\link{cdf.test}} \tab Spatial distribution goodness-of-fit test\cr \code{\link{berman.test}} \tab Berman's goodness-of-fit tests\cr \code{\link{envelope}} \tab critical envelope for Monte Carlo test of goodness-of-fit \cr \code{\link{scan.test}} \tab spatial scan statistic/test \cr \code{\link{dclf.test}} \tab Diggle-Cressie-Loosmore-Ford test \cr \code{\link{mad.test}} \tab Mean Absolute Deviation test \cr \code{\link{anova.ppm}} \tab Analysis of Deviance for point process models } More recently-developed tests: \tabular{ll}{ \code{\link{dg.test}} \tab Dao-Genton test \cr \code{\link{bits.test}} \tab Balanced independent two-stage test \cr \code{\link{dclf.progress}} \tab Progress plot for DCLF test \cr \code{\link{mad.progress}} \tab Progress plot for MAD test } \bold{Sensitivity diagnostics:} Classical measures of model sensitivity such as leverage and influence have been adapted to point process models. \tabular{ll}{ \code{\link{leverage.ppm}} \tab Leverage for point process model\cr \code{\link{influence.ppm}} \tab Influence for point process model\cr \code{\link{dfbetas.ppm}} \tab Parameter influence\cr } \bold{Diagnostics for covariate effect:} Classical diagnostics for covariate effects have been adapted to point process models. \tabular{ll}{ \code{\link{parres}} \tab Partial residual plot\cr \code{\link{addvar}} \tab Added variable plot \cr \code{\link{rhohat}} \tab Kernel estimate of covariate effect\cr \code{\link{rho2hat}} \tab Kernel estimate of covariate effect (bivariate) } \bold{Residual diagnostics:} Residuals for a fitted point process model, and diagnostic plots based on the residuals, were introduced in Baddeley et al (2005) and Baddeley, Rubak and \ifelse{latex}{\out{M\o ller}}{Moller} (2011). Type \code{demo(diagnose)} for a demonstration of the diagnostics features. \tabular{ll}{ \code{\link{diagnose.ppm}} \tab diagnostic plots for spatial trend\cr \code{\link{qqplot.ppm}} \tab diagnostic Q-Q plot for interpoint interaction\cr \code{\link[spatstat.data]{residualspaper}} \tab examples from Baddeley et al (2005) \cr \code{\link{Kcom}} \tab model compensator of \eqn{K} function \cr \code{\link{Gcom}} \tab model compensator of \eqn{G} function \cr \code{\link{Kres}} \tab score residual of \eqn{K} function \cr \code{\link{Gres}} \tab score residual of \eqn{G} function \cr \code{\link{psst}} \tab pseudoscore residual of summary function \cr \code{\link{psstA}} \tab pseudoscore residual of empty space function \cr \code{\link{psstG}} \tab pseudoscore residual of \eqn{G} function \cr \code{\link{compareFit}} \tab compare compensators of several fitted models } \bold{Resampling and randomisation procedures} You can build your own tests based on randomisation and resampling using the following capabilities: \tabular{ll}{ \code{\link{quadratresample}} \tab block resampling \cr \code{\link{rjitter}} \tab apply random displacements to points in a pattern\cr \code{\link{rshift}} \tab random shifting of (subsets of) points\cr \code{\link{rthin}} \tab random thinning } } \section{IX. DOCUMENTATION}{ The online manual entries are quite detailed and should be consulted first for information about a particular function. The book Baddeley, Rubak and Turner (2015) is a complete course on analysing spatial point patterns, with full details about \pkg{spatstat}. Older material (which is now out-of-date but is freely available) includes Baddeley and Turner (2005a), a brief overview of the package in its early development; Baddeley and Turner (2005b), a more detailed explanation of how to fit point process models to data; and Baddeley (2010), a complete set of notes from a 2-day workshop on the use of \pkg{spatstat}. Type \code{citation("spatstat")} to get a list of these references. } \references{ Baddeley, A. (2010) \emph{Analysing spatial point patterns in R}. Workshop notes, Version 4.1. Online technical publication, CSIRO. \url{https://research.csiro.au/software/wp-content/uploads/sites/6/2015/02/Rspatialcourse_CMIS_PDF-Standard.pdf} Baddeley, A., Rubak, E. and Turner, R. (2015) \emph{Spatial Point Patterns: Methodology and Applications with R}. Chapman and Hall/CRC Press. Baddeley, A. and Turner, R. (2005a) Spatstat: an R package for analyzing spatial point patterns. \emph{Journal of Statistical Software} \bold{12}:6, 1--42. URL: \code{www.jstatsoft.org}, ISSN: 1548-7660. Baddeley, A. and Turner, R. (2005b) Modelling spatial point patterns in R. In: A. Baddeley, P. Gregori, J. Mateu, R. Stoica, and D. Stoyan, editors, \emph{Case Studies in Spatial Point Pattern Modelling}, Lecture Notes in Statistics number 185. Pages 23--74. Springer-Verlag, New York, 2006. ISBN: 0-387-28311-0. Baddeley, A., Turner, R., \ifelse{latex}{\out{M\o ller}}{Moller}, J. and Hazelton, M. (2005) Residual analysis for spatial point processes. \emph{Journal of the Royal Statistical Society, Series B} \bold{67}, 617--666. Baddeley, A., Rubak, E. and \ifelse{latex}{\out{M\o ller}}{Moller}, J. (2011) Score, pseudo-score and residual diagnostics for spatial point process models. \emph{Statistical Science} \bold{26}, 613--646. Baddeley, A., Turner, R., Mateu, J. and Bevan, A. (2013) Hybrids of Gibbs point process models and their implementation. \emph{Journal of Statistical Software} \bold{55}:11, 1--43. \url{http://www.jstatsoft.org/v55/i11/} Diggle, P.J. (2003) \emph{Statistical analysis of spatial point patterns}, Second edition. Arnold. Diggle, P.J. (2014) \emph{Statistical Analysis of Spatial and Spatio-Temporal Point Patterns}, Third edition. {Chapman and Hall/CRC}. Gelfand, A.E., Diggle, P.J., Fuentes, M. and Guttorp, P., editors (2010) \emph{Handbook of Spatial Statistics}. CRC Press. Huang, F. and Ogata, Y. (1999) Improvements of the maximum pseudo-likelihood estimators in various spatial statistical models. \emph{Journal of Computational and Graphical Statistics} \bold{8}, 510--530. Illian, J., Penttinen, A., Stoyan, H. and Stoyan, D. (2008) \emph{Statistical Analysis and Modelling of Spatial Point Patterns.} Wiley. Waagepetersen, R. An estimating function approach to inference for inhomogeneous Neyman-Scott processes. \emph{Biometrics} \bold{63} (2007) 252--258. } \section{Licence}{ This library and its documentation are usable under the terms of the "GNU General Public License", a copy of which is distributed with the package. } \author{ \spatstatAuthors. } \section{Acknowledgements}{ Kasper Klitgaard Berthelsen, Ottmar Cronie, Yongtao Guan, Ute Hahn, Abdollah Jalilian, Marie-Colette van Lieshout, Greg McSwiggan, Tuomas Rajala, Suman Rakshit, Dominic Schuhmacher, Rasmus Waagepetersen and Hangsheng Wang made substantial contributions of code. Additional contributions and suggestions from Monsuru Adepeju, Corey Anderson, Ang Qi Wei, Marcel Austenfeld, Sandro Azaele, Malissa Baddeley, Guy Bayegnak, Colin Beale, Melanie Bell, Thomas Bendtsen, Ricardo Bernhardt, Andrew Bevan, Brad Biggerstaff, Anders Bilgrau, Leanne Bischof, Christophe Biscio, Roger Bivand, Jose M. Blanco Moreno, Florent Bonneu, Julian Burgos, Simon Byers, Ya-Mei Chang, Jianbao Chen, Igor Chernayavsky, Y.C. Chin, Bjarke Christensen, Jean-Francois Coeurjolly, Kim Colyvas, Robin Corria Ainslie, Richard Cotton, Marcelino de la Cruz, Peter Dalgaard, Mario D'Antuono, Sourav Das, Tilman Davies, Peter Diggle, Patrick Donnelly, Ian Dryden, Stephen Eglen, Ahmed El-Gabbas, Belarmain Fandohan, Olivier Flores, David Ford, Peter Forbes, Shane Frank, Janet Franklin, Funwi-Gabga Neba, Oscar Garcia, Agnes Gault, Jonas Geldmann, Marc Genton, Shaaban Ghalandarayeshi, Julian Gilbey, Jason Goldstick, Pavel Grabarnik, C. Graf, Ute Hahn, Andrew Hardegen, Martin \ifelse{latex}{\out{B{\o}gsted}}{Bogsted} Hansen, Martin Hazelton, Juha Heikkinen, Mandy Hering, Markus Herrmann, Paul Hewson, Kassel Hingee, Kurt Hornik, Philipp Hunziker, Jack Hywood, Ross Ihaka, \ifelse{latex}{\out{\u{C}enk I\c{c}\"{o}s}}{Cenk Icos}, Aruna Jammalamadaka, Robert John-Chandran, Devin Johnson, Mahdieh Khanmohammadi, Bob Klaver, Peter Kovesi, Mike Kuhn, Jeff Laake, Frederic Lavancier, Tom Lawrence, Robert Lamb, Jonathan Lee, George Leser, Li Haitao, George Limitsios, Andrew Lister, Ben Madin, Martin Maechler, Kiran Marchikanti, Jeff Marcus, Robert Mark, Peter McCullagh, Monia Mahling, Jorge Mateu Mahiques, Ulf Mehlig, Frederico Mestre, Sebastian Wastl Meyer, Mi Xiangcheng, Lore De Middeleer, Robin Milne, Enrique Miranda, Jesper \ifelse{latex}{\out{M\o ller}}{Moller}, Mehdi Moradi, Virginia Morera Pujol, Erika Mudrak, Gopalan Nair, Nader Najari, Nicoletta Nava, Linda Stougaard Nielsen, Felipe Nunes, Jens Randel Nyengaard, Jens \ifelse{latex}{\out{Oehlschl\"{a}gel}}{Oehlschlaegel}, Thierry Onkelinx, Sean O'Riordan, Evgeni Parilov, Jeff Picka, Nicolas Picard, Mike Porter, Sergiy Protsiv, Adrian Raftery, Suman Rakshit, Ben Ramage, Pablo Ramon, Xavier Raynaud, Nicholas Read, Matt Reiter, Ian Renner, Tom Richardson, Brian Ripley, Ted Rosenbaum, Barry Rowlingson, Jason Rudokas, John Rudge, Christopher Ryan, Farzaneh Safavimanesh, Aila \ifelse{latex}{\out{S\"{a}rkk\"{a}}}{Sarkka}, Cody Schank, Katja Schladitz, Sebastian Schutte, Bryan Scott, Olivia Semboli, \ifelse{latex}{\out{Fran\c{c}ois S\'{e}m\'{e}curbe}}{Francois Semecurbe}, Vadim Shcherbakov, Shen Guochun, Shi Peijian, Harold-Jeffrey Ship, Tammy L Silva, Ida-Maria Sintorn, Yong Song, Malte Spiess, Mark Stevenson, Kaspar Stucki, Michael Sumner, P. Surovy, Ben Taylor, Thordis Linda Thorarinsdottir, Berwin Turlach, Torben Tvedebrink, Kevin Ummer, Medha Uppala, Andrew van Burgel, Tobias Verbeke, Mikko Vihtakari, Alexendre Villers, Fabrice Vinatier, Sasha Voss, Sven Wagner, Hao Wang, H. Wendrock, Jan Wild, Carl G. Witthoft, Selene Wong, Maxime Woringer, Mike Zamboni and Achim Zeileis. } \keyword{spatial} \keyword{package} spatstat/man/distmap.psp.Rd0000644000176200001440000000444213160710571015426 0ustar liggesusers\name{distmap.psp} \alias{distmap.psp} \title{ Distance Map of Line Segment Pattern } \description{ Computes the distance from each pixel to the nearest line segment in the given line segment pattern. } \usage{ \method{distmap}{psp}(X, \dots) } \arguments{ \item{X}{A line segment pattern (object of class \code{"psp"}). } \item{\dots}{Arguments passed to \code{\link{as.mask}} to control pixel resolution. } } \value{ A pixel image (object of class \code{"im"}) whose greyscale values are the values of the distance map. The return value has attributes \code{"index"} and \code{"bdry"} which are also pixel images. } \details{ The ``distance map'' of a line segment pattern \eqn{X} is the function \eqn{f} whose value \code{f(u)} is defined for any two-dimensional location \eqn{u} as the shortest distance from \eqn{u} to \eqn{X}. This function computes the distance map of the line segment pattern \code{X} and returns the distance map as a pixel image. The greyscale value at a pixel \eqn{u} equals the distance from \eqn{u} to the nearest line segment of the pattern \code{X}. Distances are computed using analytic geometry. Additionally, the return value has two attributes, \code{"index"} and \code{"bdry"}, which are also pixel images. The grey values in \code{"bdry"} give the distance from each pixel to the bounding rectangle of the image. The grey values in \code{"index"} are integers identifying which line segment of \code{X} is closest. This is a method for the generic function \code{\link{distmap}}. Note that this function gives the exact distance from the centre of each pixel to the nearest line segment. To compute the exact distance from the points in a point pattern to the nearest line segment, use \code{\link{distfun}} or one of the low-level functions \code{\link{nncross}} or \code{\link{project2segment}}. } \seealso{ \code{\link{distmap}}, \code{\link{distmap.owin}}, \code{\link{distmap.ppp}}, \code{\link{distfun}}, \code{\link{nncross}}, \code{\link{nearestsegment}}, \code{\link{project2segment}}. } \examples{ a <- psp(runif(20),runif(20),runif(20),runif(20), window=owin()) Z <- distmap(a) plot(Z) plot(a, add=TRUE) } \author{\adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/emend.ppm.Rd0000644000176200001440000000776513160710571015062 0ustar liggesusers\name{emend.ppm} \alias{emend.ppm} \alias{project.ppm} \title{ Force Point Process Model to be Valid } \description{ Ensures that a fitted point process model satisfies the integrability conditions for existence of the point process. } \usage{ project.ppm(object, \dots, fatal=FALSE, trace=FALSE) \method{emend}{ppm}(object, \dots, fatal=FALSE, trace=FALSE) } \arguments{ \item{object}{ Fitted point process model (object of class \code{"ppm"}). } \item{\dots}{Ignored.} \item{fatal}{ Logical value indicating whether to generate an error if the model cannot be projected to a valid model. } \item{trace}{ Logical value indicating whether to print a trace of the decision process. } } \details{ The functions \code{emend.ppm} and \code{project.ppm} are identical: \code{emend.ppm} is a method for the generic \code{\link{emend}}, while \code{project.ppm} is an older name for the same function. The purpose of the function is to ensure that a fitted model is valid. The model-fitting function \code{\link{ppm}} fits Gibbs point process models to point pattern data. By default, the fitted model returned by \code{\link{ppm}} may not actually exist as a point process. First, some of the fitted coefficients of the model may be \code{NA} or infinite values. This usually occurs when the data are insufficient to estimate all the parameters. The model is said to be \emph{unidentifiable} or \emph{confounded}. Second, unlike a regression model, which is well-defined for any finite values of the fitted regression coefficients, a Gibbs point process model is only well-defined if the fitted interaction parameters satisfy some constraints. A famous example is the Strauss process (see \code{\link{Strauss}}) which exists only when the interaction parameter \eqn{\gamma}{gamma} is less than or equal to 1. For values \eqn{\gamma > 1}{gamma > 1}, the probability density is not integrable and the process does not exist (and cannot be simulated). By default, \code{\link{ppm}} does not enforce the constraint that a fitted Strauss process (for example) must satisfy \eqn{\gamma \le 1}{gamma <= 1}. This is because a fitted parameter value of \eqn{\gamma > 1}{gamma > 1} could be useful information for data analysis, as it indicates that the Strauss model is not appropriate, and suggests a clustered model should be fitted. The function \code{emend.ppm} or \code{project.ppm} modifies the model \code{object} so that the model is valid. It identifies the terms in the model \code{object} that are associated with illegal parameter values (i.e. parameter values which are either \code{NA}, infinite, or outside their permitted range). It considers all possible sub-models of \code{object} obtained by deleting one or more of these terms. It identifies which of these submodels are valid, and chooses the valid submodel with the largest pseudolikelihood. The result of \code{emend.ppm} or \code{project.ppm} is the true maximum pseudolikelihood fit to the data. For large datasets or complex models, the algorithm used in \code{emend.ppm} or \code{project.ppm} may be time-consuming, because it takes time to compute all the sub-models. A faster, approximate algorithm can be applied by setting \code{spatstat.options(project.fast=TRUE)}. This produces a valid submodel, which may not be the maximum pseudolikelihood submodel. Use the function \code{\link{valid.ppm}} to check whether a fitted model object specifies a well-defined point process. Use the expression \code{all(is.finite(coef(object)))} to determine whether all parameters are identifiable. } \value{ Another point process model (object of class \code{"ppm"}). } \author{\adrian and \rolf } \seealso{ \code{\link{ppm}}, \code{\link{valid.ppm}}, \code{\link{emend}}, \code{\link{spatstat.options}} } \examples{ fit <- ppm(redwood, ~1, Strauss(0.1)) coef(fit) fit2 <- emend(fit) coef(fit2) } \keyword{spatial} \keyword{models} spatstat/man/eval.im.Rd0000644000176200001440000000534613160710621014517 0ustar liggesusers\name{eval.im} \alias{eval.im} \title{Evaluate Expression Involving Pixel Images} \description{ Evaluates any expression involving one or more pixel images, and returns a pixel image. } \usage{ eval.im(expr, envir, harmonize=TRUE) } \arguments{ \item{expr}{An expression.} \item{envir}{Optional. The environment in which to evaluate the expression, or a named list containing pixel images to be used in the expression.} \item{harmonize}{ Logical. Whether to resolve inconsistencies between the pixel grids. } } \details{ This function is a wrapper to make it easier to perform pixel-by-pixel calculations in an image. Pixel images in \pkg{spatstat} are represented by objects of class \code{"im"} (see \code{\link{im.object}}). These are essentially matrices of pixel values, with extra attributes recording the pixel dimensions, etc. Suppose \code{X} is a pixel image. Then \code{eval.im(X+3)} will add 3 to the value of every pixel in \code{X}, and return the resulting pixel image. Suppose \code{X} and \code{Y} are two pixel images with compatible dimensions: they have the same number of pixels, the same physical size of pixels, and the same bounding box. Then \code{eval.im(X + Y)} will add the corresponding pixel values in \code{X} and \code{Y}, and return the resulting pixel image. In general, \code{expr} can be any expression in the R language involving (a) the \emph{names} of pixel images, (b) scalar constants, and (c) functions which are vectorised. See the Examples. First \code{eval.im} determines which of the \emph{variable names} in the expression \code{expr} refer to pixel images. Each such name is replaced by a matrix containing the pixel values. The expression is then evaluated. The result should be a matrix; it is taken as the matrix of pixel values. The expression \code{expr} must be vectorised. There must be at least one pixel image in the expression. All images must have compatible dimensions. If \code{harmonize=TRUE}, images that have incompatible dimensions will be resampled so that they are compatible. If \code{harmonize=FALSE}, images that are incompatible will cause an error. } \value{ An image object of class \code{"im"}. } \seealso{ \code{\link{as.im}}, \code{\link{compatible.im}}, \code{\link{harmonise.im}}, \code{\link{im.object}} } \examples{ # test images X <- as.im(function(x,y) { x^2 - y^2 }, unit.square()) Y <- as.im(function(x,y) { 3 * x + y }, unit.square()) eval.im(X + 3) eval.im(X - Y) eval.im(abs(X - Y)) Z <- eval.im(sin(X * pi) + Y) ## Use of 'envir' W <- eval.im(sin(U), list(U=density(cells))) } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} \keyword{programming} spatstat/man/rags.Rd0000644000176200001440000000337313160710621014116 0ustar liggesusers\name{rags} \alias{rags} \title{ Alternating Gibbs Sampler for Multitype Point Processes } \description{ Simulate a realisation of a point process model using the alternating Gibbs sampler. } \usage{ rags(model, \dots, ncycles = 100) } \arguments{ \item{model}{ Data specifying some kind of point process model. } \item{\dots}{ Additional arguments passed to other code. } \item{ncycles}{ Number of cycles of the alternating Gibbs sampler that should be performed. } } \details{ The Alternating Gibbs Sampler for a multitype point process is an iterative simulation procedure. Each step of the sampler updates the pattern of points of a particular type \code{i}, by drawing a realisation from the conditional distribution of points of type \code{i} given the points of all other types. Successive steps of the sampler update the points of type 1, then type 2, type 3, and so on. This is an experimental implementation which currently works only for multitype hard core processes (see \code{\link{MultiHard}}) in which there is no interaction between points of the same type. The argument \code{model} should be an object describing a point process model. At the moment, the only permitted format for \code{model} is of the form \code{list(beta, hradii)} where \code{beta} gives the first order trend and \code{hradii} is the matrix of interaction radii. See \code{\link{ragsMultiHard}} for full details. } \value{ A point pattern (object of class \code{"ppp"}). } \author{ \adrian } \seealso{ \code{\link{ragsMultiHard}}, \code{\link{ragsAreaInter}} } \examples{ mo <- list(beta=c(30, 20), hradii = 0.05 * matrix(c(0,1,1,0), 2, 2)) rags(mo, ncycles=10) } \keyword{spatial} \keyword{datagen} spatstat/man/methods.dppm.Rd0000644000176200001440000000302313160710621015554 0ustar liggesusers\name{methods.dppm} \alias{methods.dppm} %DoNotExport \alias{coef.dppm} \alias{formula.dppm} \alias{print.dppm} \alias{terms.dppm} \alias{labels.dppm} \title{ Methods for Determinantal Point Process Models } \description{ These are methods for the class \code{"dppm"}. } \usage{ \method{coef}{dppm}(object, \dots) \method{formula}{dppm}(x, \dots) \method{print}{dppm}(x, ...) \method{terms}{dppm}(x, \dots) \method{labels}{dppm}(object, \dots) } \arguments{ \item{x,object}{ An object of class \code{"dppm"}, representing a fitted determinantal point process model. } \item{\dots}{ Arguments passed to other methods. } } \details{ These functions are methods for the generic commands \code{\link{coef}}, \code{\link{formula}}, \code{\link{print}}, \code{\link{terms}} and \code{\link{labels}} for the class \code{"dppm"}. An object of class \code{"dppm"} represents a fitted determinantal point process model. It is obtained from \code{\link{dppm}}. The method \code{coef.dppm} returns the vector of \emph{regression coefficients} of the fitted model. It does not return the interaction parameters. } \value{ See the help files for the corresponding generic functions. } \author{ \adrian } \seealso{ \code{\link{dppm}}, \code{\link{plot.dppm}}, \code{\link{predict.dppm}}, \code{\link{simulate.dppm}}, \code{\link{as.ppm.dppm}}. } \examples{ fit <- dppm(swedishpines ~ x + y, dppGauss()) coef(fit) formula(fit) tf <- terms(fit) labels(fit) } \keyword{spatial} \keyword{methods} spatstat/man/Fiksel.Rd0000644000176200001440000000743213160710571014403 0ustar liggesusers\name{Fiksel} \alias{Fiksel} \title{The Fiksel Interaction} \description{ Creates an instance of Fiksel's double exponential pairwise interaction point process model, which can then be fitted to point pattern data. } \usage{ Fiksel(r, hc=NA, kappa) } \arguments{ \item{r}{The interaction radius of the Fiksel model} \item{hc}{The hard core distance} \item{kappa}{The rate parameter} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of the Fiksel process with interaction radius \eqn{r}, hard core distance \code{hc} and rate parameter \code{kappa}. } \details{ Fiksel (1984) introduced a pairwise interaction point process with the following interaction function \eqn{c}. For two points \eqn{u} and \eqn{v} separated by a distance \eqn{d=||u-v||}, the interaction \eqn{c(u,v)} is equal to \eqn{0} if \eqn{d < h}, equal to \eqn{1} if \eqn{d > r}, and equal to \deqn{ \exp(a \exp(-\kappa d))}{exp(a * exp(-kappa * d))} if \eqn{h \le d \le r}{h <= d <= r}, where \eqn{h,r,\kappa,a}{h,r,kappa,a} are parameters. A graph of this interaction function is shown in the Examples. The interpretation of the parameters is as follows. \itemize{ \item \eqn{h} is the hard core distance: distinct points are not permitted to come closer than a distance \eqn{h} apart. \item \eqn{r} is the interaction range: points further than this distance do not interact. \item \eqn{\kappa}{kappa} is the rate or slope parameter, controlling the decay of the interaction as distance increases. \item \eqn{a} is the interaction strength parameter, controlling the strength and type of interaction. If \eqn{a} is zero, the process is Poisson. If \code{a} is positive, the process is clustered. If \code{a} is negative, the process is inhibited (regular). } The function \code{\link{ppm}()}, which fits point process models to point pattern data, requires an argument of class \code{"interact"} describing the interpoint interaction structure of the model to be fitted. The appropriate description of the Fiksel pairwise interaction is yielded by the function \code{Fiksel()}. See the examples below. The parameters \eqn{h}, \eqn{r} and \eqn{\kappa}{kappa} must be fixed and given in the call to \code{Fiksel}, while the canonical parameter \eqn{a} is estimated by \code{\link{ppm}()}. To estimate \eqn{h}, \eqn{r} and\eqn{\kappa}{kappa} it is possible to use \code{\link{profilepl}}. The maximum likelihood estimator of\eqn{h} is the minimum interpoint distance. If the hard core distance argument \code{hc} is missing or \code{NA}, it will be estimated from the data when \code{\link{ppm}} is called. The estimated value of \code{hc} is the minimum nearest neighbour distance multiplied by \eqn{n/(n+1)}, where \eqn{n} is the number of data points. See also Stoyan, Kendall and Mecke (1987) page 161. } \seealso{ \code{\link{ppm}}, \code{\link{pairwise.family}}, \code{\link{ppm.object}}, \code{\link{StraussHard}} } \references{ Baddeley, A. and Turner, R. (2000) Practical maximum pseudolikelihood for spatial point patterns. \emph{Australian and New Zealand Journal of Statistics} \bold{42}, 283--322. Fiksel, T. (1984) Estimation of parameterized pair potentials of marked and non-marked Gibbsian point processes. \emph{Electronische Informationsverabeitung und Kybernetika} \bold{20}, 270--278. Stoyan, D, Kendall, W.S. and Mecke, J. (1987) \emph{Stochastic geometry and its applications}. Wiley. } \examples{ Fiksel(r=1,hc=0.02, kappa=2) # prints a sensible description of itself data(spruces) X <- unmark(spruces) fit <- ppm(X ~ 1, Fiksel(r=3.5, kappa=1)) plot(fitin(fit)) } \author{ \adrian and \rolf } \keyword{spatial} \keyword{models} spatstat/man/summary.kppm.Rd0000644000176200001440000000532713160710621015626 0ustar liggesusers\name{summary.kppm} \alias{summary.kppm} \alias{print.summary.kppm} \title{Summarizing a Fitted Cox or Cluster Point Process Model} \description{ \code{summary} method for class \code{"kppm"}. } \usage{ \method{summary}{kppm}(object, \dots, quick=FALSE) \method{print}{summary.kppm}(x, \dots) } \arguments{ \item{object}{ A fitted Cox or cluster point process model (object of class \code{"kppm"}). } \item{quick}{Logical value controlling the scope of the summary.} \item{\dots}{Arguments passed to \code{\link{summary.ppm}} or \code{\link{print.summary.ppm}} controlling the treatment of the trend component of the model.} \item{x}{Object of class \code{"summary.kppm"} as returned by \code{summary.kppm}. } } \details{ This is a method for the generic \code{\link{summary}} for the class \code{"kppm"}. An object of class \code{"kppm"} describes a fitted Cox or cluster point process model. See \code{\link{kppm}}. \code{summary.kppm} extracts information about the type of model that has been fitted, the data to which the model was fitted, and the values of the fitted coefficients. \code{print.summary.kppm} prints this information in a comprehensible format. In normal usage, \code{print.summary.kppm} is invoked implicitly when the user calls \code{summary.kppm} without assigning its value to anything. See the examples. You can also type \code{coef(summary(object))} to extract a table of the fitted coefficients of the point process model \code{object} together with standard errors and confidence limits. } \value{ \code{summary.kppm} returns an object of class \code{"summary.kppm"}, while \code{print.summary.kppm} returns \code{NULL}. The result of \code{summary.kppm} includes at least the following components: \item{Xname}{character string name of the original point pattern data} \item{stationary}{logical value indicating whether the model is stationary} \item{clusters}{the \code{clusters} argument to \code{\link{kppm}}} \item{modelname}{character string describing the model} \item{isPCP}{\code{TRUE} if the model is a Poisson cluster process, \code{FALSE} if it is a log-Gaussian Cox process} \item{lambda}{Estimated intensity: numeric value, or pixel image} \item{mu}{Mean cluster size: numeric value, pixel image, or \code{NULL}} \item{clustpar}{list of fitted parameters for the cluster model} \item{clustargs}{list of fixed parameters for the cluster model, if any} \item{callstring}{character string representing the original call to \code{\link{kppm}}} } \examples{ fit <- kppm(redwood ~ 1, "Thomas") summary(fit) coef(summary(fit)) } \author{ \spatstatAuthors } \keyword{spatial} \keyword{methods} \keyword{models} spatstat/man/default.dummy.Rd0000644000176200001440000000644613160710571015750 0ustar liggesusers\name{default.dummy} \alias{default.dummy} \title{Generate a Default Pattern of Dummy Points} \description{ Generates a default pattern of dummy points for use in a quadrature scheme. } \usage{ default.dummy(X, nd, random=FALSE, ntile=NULL, npix=NULL, quasi=FALSE, \dots, eps=NULL, verbose=FALSE) } \arguments{ \item{X}{ The observed data point pattern. An object of class \code{"ppp"} or in a format recognised by \code{\link{as.ppp}()} } \item{nd}{ Optional. Integer, or integer vector of length 2, specifying an \code{nd * nd} or \code{nd[1] * nd[2]} rectangular array of dummy points. } \item{random}{ Logical value. If \code{TRUE}, the dummy points are generated randomly. } \item{quasi}{ Logical value. If \code{TRUE}, the dummy points are generated by a quasirandom sequence. } \item{ntile}{ Optional. Integer or pair of integers specifying the number of rows and columns of tiles used in the counting rule. } \item{npix}{ Optional. Integer or pair of integers specifying the number of rows and columns of pixels used in computing approximate areas. } \item{\dots}{ Ignored. } \item{eps}{ Optional. Grid spacing. A positive number, or a vector of two positive numbers, giving the horizontal and vertical spacing, respectively, of the grid of dummy points. Incompatible with \code{nd}. } \item{verbose}{ If \code{TRUE}, information about the construction of the quadrature scheme is printed. } } \value{ A point pattern (an object of class \code{"ppp"}, see \code{\link{ppp.object}}) containing the dummy points. } \details{ This function provides a sensible default for the dummy points in a quadrature scheme. A quadrature scheme consists of the original data point pattern, an additional pattern of dummy points, and a vector of quadrature weights for all these points. See \code{\link{quad.object}} for further information about quadrature schemes. If \code{random} and \code{quasi} are both false (the default), then the function creates dummy points in a regular \code{nd[1]} by \code{nd[1]} rectangular grid. If \code{random} is true and \code{quasi} is false, then the frame of the window is divided into an \code{nd[1]} by \code{nd[1]} array of tiles, and one dummy point is generated at random inside each tile. If \code{quasi} is true, a quasirandom pattern of \code{nd[1] * nd[2]} points is generated. In all cases, the four corner points of the frame of the window are added. Then if the window is not rectangular, any dummy points lying outside it are deleted. If \code{nd} is missing, a default value (depending on the data pattern \code{X}) is computed by \code{default.ngrid}. Alternative functions for creating dummy patterns include \code{\link{corners}}, \code{\link{gridcentres}}, \code{\link{stratrand}} and \code{\link{spokes}}. } \seealso{ \code{\link{quad.object}}, \code{\link{quadscheme}}, \code{\link{corners}}, \code{\link{gridcentres}}, \code{\link{stratrand}}, \code{\link{spokes}} } \examples{ data(simdat) P <- simdat D <- default.dummy(P, 100) \dontrun{plot(D)} Q <- quadscheme(P, D, "grid") \dontrun{plot(union.quad(Q))} } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat/man/rotate.im.Rd0000644000176200001440000000211113160710621015051 0ustar liggesusers\name{rotate.im} \alias{rotate.im} \title{Rotate a Pixel Image} \description{ Rotates a pixel image } \usage{ \method{rotate}{im}(X, angle=pi/2, \dots, centre=NULL) } \arguments{ \item{X}{A pixel image (object of class \code{"im"}).} \item{angle}{Angle of rotation, in radians.} \item{\dots}{Ignored.} \item{centre}{ Centre of rotation. Either a vector of length 2, or a character string (partially matched to \code{"centroid"}, \code{"midpoint"} or \code{"bottomleft"}). The default is the coordinate origin \code{c(0,0)}. } } \value{ Another object of class \code{"im"} representing the rotated pixel image. } \details{ The image is rotated by the angle specified. Angles are measured in radians, anticlockwise. The default is to rotate the image 90 degrees anticlockwise. } \seealso{ \code{\link{affine.im}}, \code{\link{shift.im}}, \code{\link{rotate}} } \examples{ Z <- distmap(letterR) X <- rotate(Z) \dontrun{ plot(X) } Y <- rotate(X, centre="midpoint") } \author{\adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/rshift.Rd0000644000176200001440000000300713160710621014453 0ustar liggesusers\name{rshift} \alias{rshift} \title{Random Shift} \description{ Randomly shifts the points of a point pattern or line segment pattern. Generic. } \usage{ rshift(X, \dots) } \arguments{ \item{X}{Pattern to be subjected to a random shift. A point pattern (class \code{"ppp"}), a line segment pattern (class \code{"psp"}) or an object of class \code{"splitppp"}. } \item{\dots}{ Arguments controlling the generation of the random shift vector, or specifying which parts of the pattern will be shifted. } } \value{ An object of the same type as \code{X}. } \details{ This operation applies a random shift (vector displacement) to the points in a point pattern, or to the segments in a line segment pattern. The argument \code{X} may be \itemize{ \item a point pattern (an object of class \code{"ppp"}) \item a line segment pattern (an object of class \code{"psp"}) \item an object of class \code{"splitppp"} (basically a list of point patterns, obtained from \code{\link{split.ppp}}). } The function \code{rshift} is generic, with methods for the three classes \code{"ppp"}, \code{"psp"} and \code{"splitppp"}. See the help pages for these methods, \code{\link{rshift.ppp}}, \code{\link{rshift.psp}} and \code{\link{rshift.splitppp}}, for further information. } \seealso{ \code{\link{rshift.ppp}}, \code{\link{rshift.psp}}, \code{\link{rshift.splitppp}} } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat/man/subspaceDistance.Rd0000644000176200001440000000263213160710621016437 0ustar liggesusers\name{subspaceDistance} \alias{subspaceDistance} \title{ Distance Between Linear Spaces } \description{ Evaluate the distance between two linear subspaces using the measure proposed by Li, Zha and Chiaromonte (2005). } \usage{ subspaceDistance(B0, B1) } \arguments{ \item{B0}{ Matrix whose columns are a basis for the first subspace. } \item{B1}{ Matrix whose columns are a basis for the second subspace. } } \details{ This algorithm calculates the maximum absolute value of the eigenvalues of \eqn{P1-P0} where \eqn{P0,P1} are the projection matrices onto the subspaces generated by \code{B0,B1}. This measure of distance was proposed by Li, Zha and Chiaromonte (2005). See also Xia (2007). } \value{ A single numeric value. } \references{ Guan, Y. and Wang, H. (2010) Sufficient dimension reduction for spatial point processes directed by Gaussian random fields. \emph{Journal of the Royal Statistical Society, Series B}, \bold{72}, 367--387. Li, B., Zha, H. and Chiaromonte, F. (2005) Contour regression: a general approach to dimension reduction. \emph{Annals of Statistics} \bold{33}, 1580--1616. Xia, Y. (2007) A constructive approach to the estimation of dimension reduction directions. \emph{Annals of Statistics} \bold{35}, 2654--2690. } \author{ Matlab original by Yongtao Guan, translated to \R by Suman Rakshit. } \keyword{multivariate} \keyword{algebra} spatstat/man/rotate.owin.Rd0000644000176200001440000000273313160710621015432 0ustar liggesusers\name{rotate.owin} \alias{rotate.owin} \title{Rotate a Window} \description{ Rotates a window } \usage{ \method{rotate}{owin}(X, angle=pi/2, \dots, rescue=TRUE, centre=NULL) } \arguments{ \item{X}{A window (object of class \code{"owin"}).} \item{angle}{Angle of rotation.} \item{rescue}{ Logical. If \code{TRUE}, the rotated window will be processed by \code{\link{rescue.rectangle}}. } \item{\dots}{ Optional arguments passed to \code{\link{as.mask}} controlling the resolution of the rotated window, if \code{X} is a binary pixel mask. Ignored if \code{X} is not a binary mask. } \item{centre}{ Centre of rotation. Either a vector of length 2, or a character string (partially matched to \code{"centroid"}, \code{"midpoint"} or \code{"bottomleft"}). The default is the coordinate origin \code{c(0,0)}. } } \value{ Another object of class \code{"owin"} representing the rotated window. } \details{ Rotates the window by the specified angle. Angles are measured in radians, anticlockwise. The default is to rotate the window 90 degrees anticlockwise. The centre of rotation is the origin, by default, unless \code{centre} is specified. } \seealso{ \code{\link{owin.object}} } \examples{ w <- owin(c(0,1),c(0,1)) v <- rotate(w, pi/3) e <- rotate(w, pi/2, centre="midpoint") \dontrun{ plot(v) } w <- as.mask(letterR) v <- rotate(w, pi/5) } \author{\adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/fixef.mppm.Rd0000644000176200001440000000305213160710621015225 0ustar liggesusers\name{fixef.mppm} \alias{fixef.mppm} \title{ Extract Fixed Effects from Point Process Model } \description{ Given a point process model fitted to a list of point patterns, extract the fixed effects of the model. A method for \code{fixef}. } \usage{ \method{fixef}{mppm}(object, \dots) } \arguments{ \item{object}{ A fitted point process model (an object of class \code{"mppm"}). } \item{\dots}{ Ignored. } } \details{ This is a method for the generic function \code{\link[nlme]{fixef}}. The argument \code{object} must be a fitted point process model (object of class \code{"mppm"}) produced by the fitting algorithm \code{\link{mppm}}). This represents a point process model that has been fitted to a list of several point pattern datasets. See \code{\link{mppm}} for information. This function extracts the coefficients of the fixed effects of the model. } \value{ A numeric vector of coefficients. } \references{ Baddeley, A., Rubak, E. and Turner, R. (2015) \emph{Spatial Point Patterns: Methodology and Applications with R}. London: Chapman and Hall/CRC Press. } \author{ Adrian Baddeley, Ida-Maria Sintorn and Leanne Bischoff. Implemented in \pkg{spatstat} by \spatstatAuthors. } \seealso{ \code{\link{coef.mppm}} } \examples{ H <- hyperframe(Y = waterstriders) # Tweak data to exaggerate differences H$Y[[1]] <- rthin(H$Y[[1]], 0.3) m1 <- mppm(Y ~ id, data=H, Strauss(7)) fixef(m1) m2 <- mppm(Y ~ 1, random=~1|id, data=H, Strauss(7)) fixef(m2) } \keyword{spatial} \keyword{methods} \keyword{models} spatstat/man/berman.test.Rd0000644000176200001440000001553013160710571015406 0ustar liggesusers\name{berman.test} \alias{berman.test} \alias{berman.test.ppm} \alias{berman.test.ppp} \alias{berman.test.lppm} \alias{berman.test.lpp} \title{Berman's Tests for Point Process Model} \description{ Tests the goodness-of-fit of a Poisson point process model using methods of Berman (1986). } \usage{ berman.test(...) \method{berman.test}{ppp}(X, covariate, which = c("Z1", "Z2"), alternative = c("two.sided", "less", "greater"), ...) \method{berman.test}{ppm}(model, covariate, which = c("Z1", "Z2"), alternative = c("two.sided", "less", "greater"), ...) \method{berman.test}{lpp}(X, covariate, which = c("Z1", "Z2"), alternative = c("two.sided", "less", "greater"), ...) \method{berman.test}{lppm}(model, covariate, which = c("Z1", "Z2"), alternative = c("two.sided", "less", "greater"), ...) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"} or \code{"lpp"}). } \item{model}{ A fitted point process model (object of class \code{"ppm"} or \code{"lppm"}). } \item{covariate}{ The spatial covariate on which the test will be based. An image (object of class \code{"im"}) or a function. } \item{which}{ Character string specifying the choice of test. } \item{alternative}{ Character string specifying the alternative hypothesis. } \item{\dots}{ Additional arguments controlling the pixel resolution (arguments \code{dimyx} and \code{eps} passed to \code{\link{as.mask}}) or other undocumented features. } } \details{ These functions perform a goodness-of-fit test of a Poisson point process model fitted to point pattern data. The observed distribution of the values of a spatial covariate at the data points, and the predicted distribution of the same values under the model, are compared using either of two test statistics \eqn{Z_1}{Z[1]} and \eqn{Z_2}{Z[2]} proposed by Berman (1986). The \eqn{Z_1}{Z[1]} test is also known as the Lawson-Waller test. The function \code{berman.test} is generic, with methods for point patterns (\code{"ppp"} or \code{"lpp"}) and point process models (\code{"ppm"} or \code{"lppm"}). \itemize{ \item If \code{X} is a point pattern dataset (object of class \code{"ppp"} or \code{"lpp"}), then \code{berman.test(X, ...)} performs a goodness-of-fit test of the uniform Poisson point process (Complete Spatial Randomness, CSR) for this dataset. \item If \code{model} is a fitted point process model (object of class \code{"ppm"} or \code{"lppm"}) then \code{berman.test(model, ...)} performs a test of goodness-of-fit for this fitted model. In this case, \code{model} should be a Poisson point process. } The test is performed by comparing the observed distribution of the values of a spatial covariate at the data points, and the predicted distribution of the same covariate under the model. Thus, you must nominate a spatial covariate for this test. The argument \code{covariate} should be either a \code{function(x,y)} or a pixel image (object of class \code{"im"} containing the values of a spatial function. If \code{covariate} is an image, it should have numeric values, and its domain should cover the observation window of the \code{model}. If \code{covariate} is a function, it should expect two arguments \code{x} and \code{y} which are vectors of coordinates, and it should return a numeric vector of the same length as \code{x} and \code{y}. First the original data point pattern is extracted from \code{model}. The values of the \code{covariate} at these data points are collected. Next the values of the \code{covariate} at all locations in the observation window are evaluated. The point process intensity of the fitted model is also evaluated at all locations in the window. \itemize{ \item If \code{which="Z1"}, the test statistic \eqn{Z_1}{Z[1]} is computed as follows. The sum \eqn{S} of the covariate values at all data points is evaluated. The predicted mean \eqn{\mu}{\mu} and variance \eqn{\sigma^2}{\sigma^2} of \eqn{S} are computed from the values of the covariate at all locations in the window. Then we compute \eqn{Z_1 = (S-\mu)/\sigma}{Z[1]=(S-\mu)/\sigma}. Closely-related tests were proposed independently by Waller et al (1993) and Lawson (1993) so this test is often termed the Lawson-Waller test in epidemiological literature. \item If \code{which="Z2"}, the test statistic \eqn{Z_2}{Z[2]} is computed as follows. The values of the \code{covariate} at all locations in the observation window, weighted by the point process intensity, are compiled into a cumulative distribution function \eqn{F}. The probability integral transformation is then applied: the values of the \code{covariate} at the original data points are transformed by the predicted cumulative distribution function \eqn{F} into numbers between 0 and 1. If the model is correct, these numbers are i.i.d. uniform random numbers. The standardised sample mean of these numbers is the statistic \eqn{Z_2}{Z[2]}. } In both cases the null distribution of the test statistic is the standard normal distribution, approximately. The return value is an object of class \code{"htest"} containing the results of the hypothesis test. The print method for this class gives an informative summary of the test outcome. } \value{ An object of class \code{"htest"} (hypothesis test) and also of class \code{"bermantest"}, containing the results of the test. The return value can be plotted (by \code{\link{plot.bermantest}}) or printed to give an informative summary of the test. } \section{Warning}{ The meaning of a one-sided test must be carefully scrutinised: see the printed output. } \author{\adrian , \rolf and \ege. } \seealso{ \code{\link{cdf.test}}, \code{\link{quadrat.test}}, \code{\link{ppm}} } \references{ Berman, M. (1986) Testing for spatial association between a point process and another stochastic process. \emph{Applied Statistics} \bold{35}, 54--62. Lawson, A.B. (1993) On the analysis of mortality events around a prespecified fixed point. \emph{Journal of the Royal Statistical Society, Series A} \bold{156} (3) 363--377. Waller, L., Turnbull, B., Clark, L.C. and Nasca, P. (1992) Chronic Disease Surveillance and testing of clustering of disease and exposure: Application to leukaemia incidence and TCE-contaminated dumpsites in upstate New York. \emph{Environmetrics} \bold{3}, 281--300. } \examples{ # Berman's data data(copper) X <- copper$SouthPoints L <- copper$SouthLines D <- distmap(L, eps=1) # test of CSR berman.test(X, D) berman.test(X, D, "Z2") } \keyword{htest} \keyword{spatial} spatstat/man/pool.Rd0000644000176200001440000000171713160710621014133 0ustar liggesusers\name{pool} \alias{pool} \title{ Pool Data } \description{ Pool the data from several objects of the same class. } \usage{ pool(...) } \arguments{ \item{\dots}{ Objects of the same type. } } \details{ The function \code{pool} is generic. There are methods for several classes, listed below. \code{pool} is used to combine the data from several objects of the same type, and to compute statistics based on the combined dataset. It may be used to pool the estimates obtained from replicated datasets. It may also be used in high-performance computing applications, when the objects \code{\dots} have been computed on different processors or in different batch runs, and we wish to combine them. } \value{ An object of the same class as the arguments \code{\dots}. } \seealso{ \code{\link{pool.envelope}}, \code{\link{pool.fasp}}, \code{\link{pool.rat}}, \code{\link{pool.fv}} } \author{\adrian and \rolf } \keyword{spatial} spatstat/man/BadGey.Rd0000644000176200001440000000777113160710571014327 0ustar liggesusers\name{BadGey} \alias{BadGey} \title{Hybrid Geyer Point Process Model} \description{ Creates an instance of the Baddeley-Geyer point process model, defined as a hybrid of several Geyer interactions. The model can then be fitted to point pattern data. } \usage{ BadGey(r, sat) } \arguments{ \item{r}{vector of interaction radii} \item{sat}{ vector of saturation parameters, or a single common value of saturation parameter } } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of a point process. } \details{ This is Baddeley's generalisation of the Geyer saturation point process model, described in \code{\link{Geyer}}, to a process with multiple interaction distances. The BadGey point process with interaction radii \eqn{r_1,\ldots,r_k}{r[1], \ldots, r[k]}, saturation thresholds \eqn{s_1,\ldots,s_k}{s[1],\ldots,s[k]}, intensity parameter \eqn{\beta}{\beta} and interaction parameters \eqn{\gamma_1,\ldots,gamma_k}{\gamma[1], \ldots, \gamma[k]}, is the point process in which each point \eqn{x_i}{x[i]} in the pattern \eqn{X} contributes a factor \deqn{ \beta \gamma_1^{v_1(x_i, X)} \ldots gamma_k^{v_k(x_i,X)} }{ \beta \gamma[1]^v(1, x_i, X) \ldots \gamma[k]^v(k, x_i, X) } to the probability density of the point pattern, where \deqn{ v_j(x_i, X) = \min( s_j, t_j(x_i,X) ) }{ v(j, x[i], X) = min(s[j], t(j, x[i], X)) } where \eqn{t_j(x_i, X)}{t(j,x[i],X)} denotes the number of points in the pattern \eqn{X} which lie within a distance \eqn{r_j}{r[j]} from the point \eqn{x_i}{x[i]}. \code{BadGey} is used to fit this model to data. The function \code{\link{ppm}()}, which fits point process models to point pattern data, requires an argument of class \code{"interact"} describing the interpoint interaction structure of the model to be fitted. The appropriate description of the piecewise constant Saturated pairwise interaction is yielded by the function \code{BadGey()}. See the examples below. The argument \code{r} specifies the vector of interaction distances. The entries of \code{r} must be strictly increasing, positive numbers. The argument \code{sat} specifies the vector of saturation parameters that are applied to the point counts \eqn{t_j(x_i, X)}{t(j,x[i],X)}. It should be a vector of the same length as \code{r}, and its entries should be nonnegative numbers. Thus \code{sat[1]} is applied to the count of points within a distance \code{r[1]}, and \code{sat[2]} to the count of points within a distance \code{r[2]}, etc. Alternatively \code{sat} may be a single number, and this saturation value will be applied to every count. Infinite values of the saturation parameters are also permitted; in this case \eqn{v_j(x_i,X) = t_j(x_i,X)}{v(j, x[i], X) = t(j, x[i], X)} and there is effectively no `saturation' for the distance range in question. If all the saturation parameters are set to \code{Inf} then the model is effectively a pairwise interaction process, equivalent to \code{\link{PairPiece}} (however the interaction parameters \eqn{\gamma}{\gamma} obtained from \code{\link{BadGey}} have a complicated relationship to the interaction parameters \eqn{\gamma}{\gamma} obtained from \code{\link{PairPiece}}). If \code{r} is a single number, this model is virtually equivalent to the Geyer process, see \code{\link{Geyer}}. } \seealso{ \code{\link{ppm}}, \code{\link{pairsat.family}}, \code{\link{Geyer}}, \code{\link{PairPiece}}, \code{\link{SatPiece}} } \examples{ BadGey(c(0.1,0.2), c(1,1)) # prints a sensible description of itself BadGey(c(0.1,0.2), 1) data(cells) # fit a stationary Baddeley-Geyer model ppm(cells, ~1, BadGey(c(0.07, 0.1, 0.13), 2)) # nonstationary process with log-cubic polynomial trend \dontrun{ ppm(cells, ~polynom(x,y,3), BadGey(c(0.07, 0.1, 0.13), 2)) } } \author{ \adrian and \rolf in collaboration with Hao Wang and Jeff Picka } \keyword{spatial} \keyword{models} spatstat/man/rHardcore.Rd0000644000176200001440000000704613160710621015074 0ustar liggesusers\name{rHardcore} \alias{rHardcore} \title{Perfect Simulation of the Hardcore Process} \description{ Generate a random pattern of points, a simulated realisation of the Hardcore process, using a perfect simulation algorithm. } \usage{ rHardcore(beta, R = 0, W = owin(), expand=TRUE, nsim=1, drop=TRUE) } \arguments{ \item{beta}{ intensity parameter (a positive number). } \item{R}{ hard core distance (a non-negative number). } \item{W}{ window (object of class \code{"owin"}) in which to generate the random pattern. Currently this must be a rectangular window. } \item{expand}{ Logical. If \code{FALSE}, simulation is performed in the window \code{W}, which must be rectangular. If \code{TRUE} (the default), simulation is performed on a larger window, and the result is clipped to the original window \code{W}. Alternatively \code{expand} can be an object of class \code{"rmhexpand"} (see \code{\link{rmhexpand}}) determining the expansion method. } \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a point pattern. } } \details{ This function generates a realisation of the Hardcore point process in the window \code{W} using a \sQuote{perfect simulation} algorithm. The Hardcore process is a model for strong spatial inhibition. Two points of the process are forbidden to lie closer than \code{R} units apart. The Hardcore process is the special case of the Strauss process (see \code{\link{rStrauss}}) with interaction parameter \eqn{\gamma}{gamma} equal to zero. The simulation algorithm used to generate the point pattern is \sQuote{dominated coupling from the past} as implemented by Berthelsen and \ifelse{latex}{\out{M\o ller}}{Moller} (2002, 2003). This is a \sQuote{perfect simulation} or \sQuote{exact simulation} algorithm, so called because the output of the algorithm is guaranteed to have the correct probability distribution exactly (unlike the Metropolis-Hastings algorithm used in \code{\link{rmh}}, whose output is only approximately correct). There is a tiny chance that the algorithm will run out of space before it has terminated. If this occurs, an error message will be generated. } \value{ If \code{nsim = 1}, a point pattern (object of class \code{"ppp"}). If \code{nsim > 1}, a list of point patterns. } \references{ Berthelsen, K.K. and \ifelse{latex}{\out{M\o ller}}{Moller}, J. (2002) A primer on perfect simulation for spatial point processes. \emph{Bulletin of the Brazilian Mathematical Society} 33, 351-367. Berthelsen, K.K. and \ifelse{latex}{\out{M\o ller}}{Moller}, J. (2003) Likelihood and non-parametric Bayesian MCMC inference for spatial point processes based on perfect simulation and path sampling. \emph{Scandinavian Journal of Statistics} 30, 549-564. \ifelse{latex}{\out{M\o ller}}{Moller}, J. and Waagepetersen, R. (2003). \emph{Statistical Inference and Simulation for Spatial Point Processes.} Chapman and Hall/CRC. } \author{ \adrian based on original code for the Strauss process by Kasper Klitgaard Berthelsen. } \examples{ X <- rHardcore(0.05,1.5,square(141.4)) Z <- rHardcore(100,0.05) } \seealso{ \code{\link{rmh}}, \code{\link{Hardcore}}, \code{\link{rStrauss}}, \code{\link{rStraussHard}}, \code{\link{rDiggleGratton}}. \code{\link{rDGS}}, \code{\link{rPenttinen}}. } \keyword{spatial} \keyword{datagen} spatstat/man/is.subset.owin.Rd0000644000176200001440000000246213160710621016052 0ustar liggesusers\name{is.subset.owin} \alias{is.subset.owin} \title{Determine Whether One Window is Contained In Another} \description{ Tests whether window \code{A} is a subset of window \code{B}. } \usage{ is.subset.owin(A, B) } \arguments{ \item{A}{A window object (see Details).} \item{B}{A window object (see Details).} } \value{ Logical scalar; \code{TRUE} if \code{A} is a sub-window of \code{B}, otherwise \code{FALSE}. } \details{ This function tests whether the window \code{A} is a subset of the window \code{B}. The arguments \code{A} and \code{B} must be window objects (either objects of class \code{"owin"}, or data that can be coerced to this class by \code{\link{as.owin}}). Various algorithms are used, depending on the geometrical type of the two windows. Note that if \code{B} is not rectangular, the algorithm proceeds by discretising \code{A}, converting it to a pixel mask using \code{\link{as.mask}}. In this case the resulting answer is only ``approximately correct''. The accuracy of the approximation can be controlled: see \code{\link{as.mask}}. } \author{\adrian and \rolf } \examples{ w1 <- as.owin(c(0,1,0,1)) w2 <- as.owin(c(-1,2,-1,2)) is.subset.owin(w1,w2) # Returns TRUE. is.subset.owin(w2,w1) # Returns FALSE. } \keyword{spatial} \keyword{math} spatstat/man/infline.Rd0000644000176200001440000000603013160710621014577 0ustar liggesusers\name{infline} \alias{infline} \alias{plot.infline} \alias{print.infline} \title{Infinite Straight Lines} \description{ Define the coordinates of one or more straight lines in the plane } \usage{ infline(a = NULL, b = NULL, h = NULL, v = NULL, p = NULL, theta = NULL) \method{print}{infline}(x, \dots) \method{plot}{infline}(x, \dots) } \arguments{ \item{a,b}{Numeric vectors of equal length giving the intercepts \eqn{a} and slopes \eqn{b} of the lines. Incompatible with \code{h,v,p,theta} } \item{h}{Numeric vector giving the positions of horizontal lines when they cross the \eqn{y} axis. Incompatible with \code{a,b,v,p,theta} } \item{v}{Numeric vector giving the positions of vertical lines when they cross the \eqn{x} axis. Incompatible with \code{a,b,h,p,theta} } \item{p,theta}{Numeric vectors of equal length giving the polar coordinates of the line. Incompatible with \code{a,b,h,v} } \item{x}{An object of class \code{"infline"}} \item{\dots}{ Extra arguments passed to \code{\link[base]{print}} for printing or \code{\link[graphics]{abline}} for plotting } } \details{ The class \code{infline} is a convenient way to handle infinite straight lines in the plane. The position of a line can be specified in several ways: \itemize{ \item its intercept \eqn{a} and slope \eqn{b} in the equation \eqn{y = a + b x}{y = a + b * x} can be used unless the line is vertical. \item for vertical lines we can use the position \eqn{v} where the line crosses the \eqn{y} axis \item for horizontal lines we can use the position \eqn{h} where the line crosses the \eqn{x} axis \item the polar coordinates \eqn{p} and \eqn{\theta}{theta} can be used for any line. The line equation is \deqn{ y \cos\theta + x \sin\theta = p }{ y * cos(theta) + x * sin(theta) = p } } The command \code{infline} will accept line coordinates in any of these formats. The arguments \code{a,b,h,v} have the same interpretation as they do in the line-plotting function \code{\link[graphics]{abline}}. The command \code{infline} converts between different coordinate systems (e.g. from \code{a,b} to \code{p,theta}) and returns an object of class \code{"infline"} that contains a representation of the lines in each appropriate coordinate system. This object can be printed and plotted. } \value{ The value of \code{infline} is an object of class \code{"infline"} which is basically a data frame with columns \code{a,b,h,v,p,theta}. Each row of the data frame represents one line. Entries may be \code{NA} if a coordinate is not applicable to a particular line. } \seealso{ \code{\link{rotate.infline}}, \code{\link{clip.infline}}, \code{\link{chop.tess}}, \code{\link{whichhalfplane}} } \examples{ infline(a=10:13,b=1) infline(p=1:3, theta=pi/4) plot(c(-1,1),c(-1,1),type="n",xlab="",ylab="", asp=1) plot(infline(p=0.4, theta=seq(0,pi,length=20))) } \author{ \adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat/man/thomas.estK.Rd0000644000176200001440000001426113160710621015360 0ustar liggesusers\name{thomas.estK} \alias{thomas.estK} \title{Fit the Thomas Point Process by Minimum Contrast} \description{ Fits the Thomas point process to a point pattern dataset by the Method of Minimum Contrast using the K function. } \usage{ thomas.estK(X, startpar=c(kappa=1,scale=1), lambda=NULL, q = 1/4, p = 2, rmin = NULL, rmax = NULL, ...) } \arguments{ \item{X}{ Data to which the Thomas model will be fitted. Either a point pattern or a summary statistic. See Details. } \item{startpar}{ Vector of starting values for the parameters of the Thomas process. } \item{lambda}{ Optional. An estimate of the intensity of the point process. } \item{q,p}{ Optional. Exponents for the contrast criterion. } \item{rmin, rmax}{ Optional. The interval of \eqn{r} values for the contrast criterion. } \item{\dots}{ Optional arguments passed to \code{\link[stats]{optim}} to control the optimisation algorithm. See Details. } } \details{ This algorithm fits the Thomas point process model to a point pattern dataset by the Method of Minimum Contrast, using the \eqn{K} function. The argument \code{X} can be either \describe{ \item{a point pattern:}{An object of class \code{"ppp"} representing a point pattern dataset. The \eqn{K} function of the point pattern will be computed using \code{\link{Kest}}, and the method of minimum contrast will be applied to this. } \item{a summary statistic:}{An object of class \code{"fv"} containing the values of a summary statistic, computed for a point pattern dataset. The summary statistic should be the \eqn{K} function, and this object should have been obtained by a call to \code{\link{Kest}} or one of its relatives. } } The algorithm fits the Thomas point process to \code{X}, by finding the parameters of the Thomas model which give the closest match between the theoretical \eqn{K} function of the Thomas process and the observed \eqn{K} function. For a more detailed explanation of the Method of Minimum Contrast, see \code{\link{mincontrast}}. The Thomas point process is described in \Moller and Waagepetersen (2003, pp. 61--62). It is a cluster process formed by taking a pattern of parent points, generated according to a Poisson process with intensity \eqn{\kappa}{kappa}, and around each parent point, generating a random number of offspring points, such that the number of offspring of each parent is a Poisson random variable with mean \eqn{\mu}{mu}, and the locations of the offspring points of one parent are independent and isotropically Normally distributed around the parent point with standard deviation \eqn{\sigma}{sigma} which is equal to the parameter \code{scale}. The named vector of stating values can use either \code{sigma2} (\eqn{\sigma^2}{sigma^2}) or \code{scale} as the name of the second component, but the latter is recommended for consistency with other cluster models. The theoretical \eqn{K}-function of the Thomas process is \deqn{ K(r) = \pi r^2 + \frac 1 \kappa (1 - \exp(-\frac{r^2}{4\sigma^2})). }{ K(r) = pi r^2 + (1 - exp(-r^2/(4 sigma^2)))/kappa. } The theoretical intensity of the Thomas process is \eqn{\lambda = \kappa \mu}{lambda=kappa* mu}. In this algorithm, the Method of Minimum Contrast is first used to find optimal values of the parameters \eqn{\kappa}{kappa} and \eqn{\sigma^2}{sigma^2}. Then the remaining parameter \eqn{\mu}{mu} is inferred from the estimated intensity \eqn{\lambda}{lambda}. If the argument \code{lambda} is provided, then this is used as the value of \eqn{\lambda}{lambda}. Otherwise, if \code{X} is a point pattern, then \eqn{\lambda}{lambda} will be estimated from \code{X}. If \code{X} is a summary statistic and \code{lambda} is missing, then the intensity \eqn{\lambda}{lambda} cannot be estimated, and the parameter \eqn{\mu}{mu} will be returned as \code{NA}. The remaining arguments \code{rmin,rmax,q,p} control the method of minimum contrast; see \code{\link{mincontrast}}. The Thomas process can be simulated, using \code{\link{rThomas}}. Homogeneous or inhomogeneous Thomas process models can also be fitted using the function \code{\link{kppm}}. The optimisation algorithm can be controlled through the additional arguments \code{"..."} which are passed to the optimisation function \code{\link[stats]{optim}}. For example, to constrain the parameter values to a certain range, use the argument \code{method="L-BFGS-B"} to select an optimisation algorithm that respects box constraints, and use the arguments \code{lower} and \code{upper} to specify (vectors of) minimum and maximum values for each parameter. } \value{ An object of class \code{"minconfit"}. There are methods for printing and plotting this object. It contains the following main components: \item{par }{Vector of fitted parameter values.} \item{fit }{Function value table (object of class \code{"fv"}) containing the observed values of the summary statistic (\code{observed}) and the theoretical values of the summary statistic computed from the fitted model parameters. } } \references{ Diggle, P. J., Besag, J. and Gleaves, J. T. (1976) Statistical analysis of spatial point patterns by means of distance methods. \emph{Biometrics} \bold{32} 659--667. \Moller, J. and Waagepetersen, R. (2003). Statistical Inference and Simulation for Spatial Point Processes. Chapman and Hall/CRC, Boca Raton. Thomas, M. (1949) A generalisation of Poisson's binomial limit for use in ecology. \emph{Biometrika} \bold{36}, 18--25. Waagepetersen, R. (2007) An estimating function approach to inference for inhomogeneous Neyman-Scott processes. \emph{Biometrics} \bold{63}, 252--258. } \author{ Rasmus Waagepetersen \email{rw@math.auc.dk} Adapted for \pkg{spatstat} by \adrian } \seealso{ \code{\link{kppm}}, \code{\link{lgcp.estK}}, \code{\link{matclust.estK}}, \code{\link{mincontrast}}, \code{\link{Kest}}, \code{\link{rThomas}} to simulate the fitted model. } \examples{ data(redwood) u <- thomas.estK(redwood, c(kappa=10, scale=0.1)) u plot(u) } \keyword{spatial} \keyword{models} spatstat/man/shift.Rd0000644000176200001440000000170413160710621014273 0ustar liggesusers\name{shift} \alias{shift} \title{Apply Vector Translation} \description{ Applies a vector shift of the plane to a geometrical object, such as a point pattern or a window. } \usage{ shift(X, \dots) } \arguments{ \item{X}{Any suitable dataset representing a two-dimensional object, such as a point pattern (object of class \code{"ppp"}), or a window (object of class \code{"owin"}).} \item{\dots}{Arguments determining the shift vector.} } \value{ Another object of the same type, representing the result of applying the shift. } \details{ This is generic. Methods are provided for point patterns (\code{\link{shift.ppp}}) and windows (\code{\link{shift.owin}}). The object is translated by the vector \code{vec}. } \seealso{ \code{\link{shift.ppp}}, \code{\link{shift.owin}}, \code{\link{rotate}}, \code{\link{affine}}, \code{\link{periodify}} } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/edges2triangles.Rd0000644000176200001440000000332113160710571016241 0ustar liggesusers\name{edges2triangles} \alias{edges2triangles} \title{ List Triangles in a Graph } \description{ Given a list of edges between vertices, compile a list of all triangles formed by these edges. } \usage{ edges2triangles(iedge, jedge, nvert=max(iedge, jedge), \dots, check=TRUE, friendly=rep(TRUE, nvert)) } \arguments{ \item{iedge,jedge}{ Integer vectors, of equal length, specifying the edges. } \item{nvert}{ Number of vertices in the network. } \item{\dots}{Ignored} \item{check}{Logical. Whether to check validity of input data.} \item{friendly}{ Optional. For advanced use. See Details. } } \details{ This low level function finds all the triangles (cliques of size 3) in a finite graph with \code{nvert} vertices and with edges specified by \code{iedge, jedge}. The interpretation of \code{iedge, jedge} is that each successive pair of entries specifies an edge in the graph. The \eqn{k}th edge joins vertex \code{iedge[k]} to vertex \code{jedge[k]}. Entries of \code{iedge} and \code{jedge} must be integers from 1 to \code{nvert}. To improve efficiency in some applications, the optional argument \code{friendly} can be used. It should be a logical vector of length \code{nvert} specifying a labelling of the vertices, such that two vertices \code{j,k} which are \emph{not} friendly (\code{friendly[j] = friendly[k] = FALSE}) are \emph{never} connected by an edge. } \value{ A 3-column matrix of integers, in which each row represents a triangle. } \seealso{ \code{\link{edges2vees}} } \author{\adrian and \rolf } \examples{ i <- c(1, 2, 5, 5, 1, 4, 2) j <- c(2, 3, 3, 1, 3, 2, 5) edges2triangles(i, j) } \keyword{spatial} \keyword{manip} spatstat/man/runifdisc.Rd0000644000176200001440000000346713160710621015154 0ustar liggesusers\name{runifdisc} \alias{runifdisc} \title{Generate N Uniform Random Points in a Disc} \description{ Generate a random point pattern containing \eqn{n} independent uniform random points in a circular disc. } \usage{ runifdisc(n, radius=1, centre=c(0,0), ..., nsim=1, drop=TRUE) } \arguments{ \item{n}{ Number of points. } \item{radius}{Radius of the circle.} \item{centre}{Coordinates of the centre of the circle.} \item{\dots}{ Arguments passed to \code{\link{disc}} controlling the accuracy of approximation to the circle. } \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a point pattern. } } \value{ A point pattern (an object of class \code{"ppp"}) if \code{nsim=1}, or a list of point patterns if \code{nsim > 1}. } \details{ This function generates \code{n} independent random points, uniformly distributed in a circular disc. It is faster (for a circular window) than the general code used in \code{\link{runifpoint}}. To generate random points in an ellipse, first generate points in a circle using \code{runifdisc}, then transform to an ellipse using \code{\link{affine}}, as shown in the examples. To generate random points in other windows, use \code{\link{runifpoint}}. To generate non-uniform random points, use \code{\link{rpoint}}. } \seealso{ \code{\link{disc}}, \code{\link{runifpoint}}, \code{\link{rpoint}} } \examples{ # 100 random points in the unit disc plot(runifdisc(100)) # 42 random points in the ellipse with major axis 3 and minor axis 1 X <- runifdisc(42) Y <- affine(X, mat=diag(c(3,1))) plot(Y) } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat/man/tile.areas.Rd0000644000176200001440000000175413160710621015212 0ustar liggesusers\name{tile.areas} \alias{tile.areas} \title{Compute Areas of Tiles in a Tessellation} \description{ Computes the area of each tile in a tessellation. } \usage{ tile.areas(x) } \arguments{ \item{x}{A tessellation (object of class \code{"tess"}).} } \details{ A tessellation is a collection of disjoint spatial regions (called \emph{tiles}) that fit together to form a larger spatial region. See \code{\link{tess}}. This command computes the area of each of the tiles that make up the tessellation \code{x}. The result is a numeric vector in the same order as the tiles would be listed by \code{tiles(x)}. } \value{ A numeric vector. } \seealso{ \code{\link{tess}}, \code{\link{tiles}}, \code{\link{tilenames}}, \code{\link{tiles.empty}} } \examples{ A <- tess(xgrid=0:2,ygrid=0:2) tile.areas(A) v <- as.im(function(x,y){factor(round(x^2 + y^2))}, W=owin()) E <- tess(image=v) tile.areas(E) } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/F3est.Rd0000644000176200001440000001227513160710571014153 0ustar liggesusers\name{F3est} \Rdversion{1.1} \alias{F3est} \title{ Empty Space Function of a Three-Dimensional Point Pattern } \description{ Estimates the empty space function \eqn{F_3(r)}{F3(r)} from a three-dimensional point pattern. } \usage{ F3est(X, ..., rmax = NULL, nrval = 128, vside = NULL, correction = c("rs", "km", "cs"), sphere = c("fudge", "ideal", "digital")) } \arguments{ \item{X}{ Three-dimensional point pattern (object of class \code{"pp3"}). } \item{\dots}{ Ignored. } \item{rmax}{ Optional. Maximum value of argument \eqn{r} for which \eqn{F_3(r)}{F3(r)} will be estimated. } \item{nrval}{ Optional. Number of values of \eqn{r} for which \eqn{F_3(r)}{F3(r)} will be estimated. A large value of \code{nrval} is required to avoid discretisation effects. } \item{vside}{ Optional. Side length of the voxels in the discrete approximation. } \item{correction}{ Optional. Character vector specifying the edge correction(s) to be applied. See Details. } \item{sphere}{ Optional. Character string specifying how to calculate the theoretical value of \eqn{F_3(r)}{F3(r)} for a Poisson process. See Details. } } \details{ For a stationary point process \eqn{\Phi}{Phi} in three-dimensional space, the empty space function is \deqn{ F_3(r) = P(d(0,\Phi) \le r) }{ F3(r) = P(d(0,Phi) <= r) } where \eqn{d(0,\Phi)}{d(0,Phi)} denotes the distance from a fixed origin \eqn{0} to the nearest point of \eqn{\Phi}{Phi}. The three-dimensional point pattern \code{X} is assumed to be a partial realisation of a stationary point process \eqn{\Phi}{Phi}. The empty space function of \eqn{\Phi}{Phi} can then be estimated using techniques described in the References. The box containing the point pattern is discretised into cubic voxels of side length \code{vside}. The distance function \eqn{d(u,\Phi)}{d(u,Phi)} is computed for every voxel centre point \eqn{u} using a three-dimensional version of the distance transform algorithm (Borgefors, 1986). The empirical cumulative distribution function of these values, with appropriate edge corrections, is the estimate of \eqn{F_3(r)}{F3(r)}. The available edge corrections are: \describe{ \item{\code{"rs"}:}{ the reduced sample (aka minus sampling, border correction) estimator (Baddeley et al, 1993) } \item{\code{"km"}:}{ the three-dimensional version of the Kaplan-Meier estimator (Baddeley and Gill, 1997) } \item{\code{"cs"}:}{ the three-dimensional generalisation of the Chiu-Stoyan or Hanisch estimator (Chiu and Stoyan, 1998). } } Alternatively \code{correction="all"} selects all options. The result includes a column \code{theo} giving the theoretical value of \eqn{F_3(r)}{F3(r)} for a uniform Poisson process (Complete Spatial Randomness). This value depends on the volume of the sphere of radius \code{r} measured in the discretised distance metric. The argument \code{sphere} determines how this will be calculated. \itemize{ \item If \code{sphere="ideal"} the calculation will use the volume of an ideal sphere of radius \eqn{r} namely \eqn{(4/3) \pi r^3}{(4/3) * pi * r^3}. This is not recommended because the theoretical values of \eqn{F_3(r)}{F3(r)} are inaccurate. \item If \code{sphere="fudge"} then the volume of the ideal sphere will be multiplied by 0.78, which gives the approximate volume of the sphere in the discretised distance metric. \item If \code{sphere="digital"} then the volume of the sphere in the discretised distance metric is computed exactly using another distance transform. This takes longer to compute, but is exact. } } \value{ A function value table (object of class \code{"fv"}) that can be plotted, printed or coerced to a data frame containing the function values. } \references{ Baddeley, A.J, Moyeed, R.A., Howard, C.V. and Boyde, A. Analysis of a three-dimensional point pattern with replication. \emph{Applied Statistics} \bold{42} (1993) 641--668. Baddeley, A.J. and Gill, R.D. (1997) Kaplan-Meier estimators of interpoint distance distributions for spatial point processes. \emph{Annals of Statistics} \bold{25}, 263--292. Borgefors, G. (1986) Distance transformations in digital images. \emph{Computer Vision, Graphics and Image Processing} \bold{34}, 344--371. Chiu, S.N. and Stoyan, D. (1998) Estimators of distance distributions for spatial patterns. \emph{Statistica Neerlandica} \bold{52}, 239--246. } \author{ \adrian and Rana Moyeed. } \section{Warnings}{ A small value of \code{vside} and a large value of \code{nrval} are required for reasonable accuracy. The default value of \code{vside} ensures that the total number of voxels is \code{2^22} or about 4 million. To change the default number of voxels, see \code{\link{spatstat.options}("nvoxel")}. } \seealso{ \code{\link{G3est}}, \code{\link{K3est}}, \code{\link{pcf3est}}. } \examples{ \testonly{op <- spatstat.options(nvoxel=2^18)} X <- rpoispp3(42) Z <- F3est(X) if(interactive()) plot(Z) \testonly{spatstat.options(op)} } \keyword{spatial} \keyword{nonparametric} spatstat/man/colourtools.Rd0000644000176200001440000001437413160710571015555 0ustar liggesusers\name{colourtools} \alias{colourtools} %DoNotExport \alias{paletteindex} \alias{rgb2hex} \alias{rgb2hsva} \alias{col2hex} \alias{paletteindex} \alias{samecolour} \alias{complementarycolour} \alias{interp.colours} \alias{is.colour} \alias{is.grey} \alias{to.grey} \alias{to.opaque} \alias{to.transparent} \title{ Convert and Compare Colours in Different Formats } \description{ These functions convert between different formats for specifying a colour in \R, determine whether colours are equivalent, and convert colour to greyscale. } \usage{ col2hex(x) rgb2hex(v, maxColorValue=255) rgb2hsva(red, green=NULL, blue=NULL, alpha=NULL, maxColorValue=255) paletteindex(x) samecolour(x,y) complementarycolour(x) interp.colours(x, length.out=512) is.colour(x) to.grey(x, weights=c(0.299, 0.587, 0.114), transparent=FALSE) is.grey(x) to.opaque(x) to.transparent(x, fraction) } \arguments{ \item{x,y}{ Any valid specification for a colour or sequence of colours accepted by \code{\link[grDevices]{col2rgb}}. } \item{v}{ A numeric vector of length 3, giving the RGB values of a single colour, or a 3-column matrix giving the RGB values of several colours. Alternatively a vector of length 4 or a matrix with 4 columns, giving the RGB and alpha (transparency) values. } \item{red,green,blue,alpha}{ Arguments acceptable to \code{\link[grDevices]{rgb}} determining the red, green, blue channels and optionally the alpha (transparency) channel. Note that \code{red} can also be a matrix with 3 \bold{rows} giving the RGB values, or a matrix with 4 rows giving RGB and alpha values. } \item{maxColorValue}{ Number giving the maximum possible value for the entries in \code{v} or \code{red,green,blue,alpha}. } \item{weights}{ Numeric vector of length 3 giving relative weights for the red, green, and blue channels respectively. } \item{transparent}{ Logical value indicating whether transparent colours should be converted to transparent grey values (\code{transparent=TRUE}) or converted to opaque grey values (\code{transparent=FALSE}, the default). } \item{fraction}{ Transparency fraction. Numerical value or vector of values between 0 and 1, giving the opaqueness of a colour. A fully opaque colour has \code{fraction=1}. } \item{length.out}{ Integer. Length of desired sequence. } } \details{ \code{is.colour(x)} can be applied to any kind of data \code{x} and returns \code{TRUE} if \code{x} can be interpreted as a colour or colours. The remaining functions expect data that can be interpreted as colours. \code{col2hex} converts colours specified in any format into their hexadecimal character codes. \code{rgb2hex} converts RGB colour values into their hexadecimal character codes. It is a very minor extension to \code{\link[grDevices]{rgb}}. Arguments to \code{rgb2hex} should be similar to arguments to \code{\link[grDevices]{rgb}}. \code{rgb2hsva} converts RGB colour values into HSV colour values including the alpha (transparency) channel. It is an extension of \code{\link[grDevices]{rgb2hsv}}. Arguments to \code{rgb2hsva} should be similar to arguments to \code{\link[grDevices]{rgb2hsv}}. \code{paletteindex} checks whether the colour or colours specified by \code{x} are available in the default palette returned by \code{\link[grDevices]{palette}()}. If so, it returns the index or indices of the colours in the palette. If not, it returns \code{NA}. \code{samecolour} decides whether two colours \code{x} and \code{y} are equivalent. \code{is.grey} determines whether each entry of \code{x} is a greyscale colour, and returns a logical vector. \code{to.grey} converts the colour data in \code{x} to greyscale colours. Alternatively \code{x} can be an object of class \code{"colourmap"} and \code{to.grey(x)} is the modified colour map. \code{to.opaque} converts the colours in \code{x} to opaque (non-transparent) colours, and \code{to.transparent} converts them to transparent colours with a specified transparency value. Note that \code{to.transparent(x,1)} is equivalent to \code{to.opaque(x)}. For \code{to.grey}, \code{to.opaque} and \code{to.transparent}, if all the data in \code{x} specifies colours from the standard palette, and if the result would be equivalent to \code{x}, then the result is identical to \code{x}. \code{complementarycolour} replaces each colour by its complementary colour in RGB space (the colour obtained by replacing RGB values \code{(r, g, b)} by \code{(255-r, 255-g, 255-b)}). The transparency value is not changed. Alternatively \code{x} can be an object of class \code{"colourmap"} and \code{complementarycolour(x)} is the modified colour map. \code{interp.colours} interpolates between each successive pair of colours in a sequence of colours, to generate a more finely-spaced sequence. It uses linear interpolation in HSV space (with hue represented as a two-dimensional unit vector). } \section{Warning}{ \code{paletteindex("green")} returns \code{NA} because the green colour in the default palette is called \code{"green3"}. } \value{ For \code{col2hex} and \code{rgb2hex} a character vector containing hexadecimal colour codes. For \code{to.grey}, \code{to.opaque} and \code{to.transparent}, either a character vector containing hexadecimal colour codes, or a value identical to the input \code{x}. For \code{rgb2hsva}, a matrix with 3 or 4 rows containing HSV colour values. For \code{paletteindex}, an integer vector, possibly containing \code{NA} values. For \code{samecolour} and \code{is.grey}, a logical value or logical vector. } \author{\adrian and \rolf } \seealso{ \code{\link[grDevices]{col2rgb}}, \code{\link[grDevices]{rgb2hsv}}, \code{\link[grDevices]{palette}}. See also the class of colour map objects in the \pkg{spatstat} package: \code{\link{colourmap}}, \code{\link{interp.colourmap}}, \code{\link{tweak.colourmap}}. } \examples{ samecolour("grey", "gray") paletteindex("grey") col2hex("orange") to.grey("orange") complementarycolour("orange") is.grey("lightgrey") is.grey(8) to.transparent("orange", 0.5) to.opaque("red") interp.colours(c("orange", "red", "violet"), 5) } \keyword{color} spatstat/man/Tstat.Rd0000644000176200001440000000562513160710571014267 0ustar liggesusers\name{Tstat} \alias{Tstat} \title{ Third order summary statistic } \description{ Computes the third order summary statistic \eqn{T(r)} of a spatial point pattern. } \usage{ Tstat(X, ..., r = NULL, rmax = NULL, correction = c("border", "translate"), ratio = FALSE, verbose=TRUE) } \arguments{ \item{X}{The observed point pattern, from which an estimate of \eqn{T(r)} will be computed. An object of class \code{"ppp"}, or data in any format acceptable to \code{\link{as.ppp}()}. } \item{\dots}{Ignored.} \item{r}{ Optional. Vector of values for the argument \eqn{r} at which \eqn{T(r)} should be evaluated. Users are advised \emph{not} to specify this argument; there is a sensible default. } \item{rmax}{ Optional. Numeric. The maximum value of \eqn{r} for which \eqn{T(r)} should be estimated. } \item{correction}{ Optional. A character vector containing any selection of the options \code{"none"}, \code{"border"}, \code{"bord.modif"}, \code{"translate"}, \code{"translation"}, or \code{"best"}. It specifies the edge correction(s) to be applied. Alternatively \code{correction="all"} selects all options. } \item{ratio}{ Logical. If \code{TRUE}, the numerator and denominator of each edge-corrected estimate will also be saved, for use in analysing replicated point patterns. } \item{verbose}{ Logical. If \code{TRUE}, an estimate of the computation time is printed. } } \details{ This command calculates the third-order summary statistic \eqn{T(r)} for a spatial point patterns, defined by Schladitz and Baddeley (2000). The definition of \eqn{T(r)} is similar to the definition of Ripley's \eqn{K} function \eqn{K(r)}, except that \eqn{K(r)} counts pairs of points while \eqn{T(r)} counts triples of points. Essentially \eqn{T(r)} is a rescaled cumulative distribution function of the diameters of triangles in the point pattern. The diameter of a triangle is the length of its longest side. } \section{Computation time}{ If the number of points is large, the algorithm can take a very long time to inspect all possible triangles. A rough estimate of the total computation time will be printed at the beginning of the calculation. If this estimate seems very large, stop the calculation using the user interrupt signal, and call \code{Tstat} again, using \code{rmax} to restrict the range of \code{r} values, thus reducing the number of triangles to be inspected. } \value{ An object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. } \references{ Schladitz, K. and Baddeley, A. (2000) A third order point process characteristic. \emph{Scandinavian Journal of Statistics} \bold{27} (2000) 657--671. } \seealso{ \code{\link{Kest}} } \examples{ plot(Tstat(redwood)) } \author{\adrian } \keyword{spatial} \keyword{nonparametric} spatstat/man/dppMatern.Rd0000644000176200001440000000257313160710571015121 0ustar liggesusers\name{dppMatern} \alias{dppMatern} \title{Whittle-Matern Determinantal Point Process Model} \description{ Function generating an instance of the Whittle-Matern determinantal point process model } \usage{dppMatern(\dots)} \arguments{ \item{\dots}{arguments of the form \code{tag=value} specifying the parameters. See Details. } } \details{ The Whittle-\ifelse{latex}{\out{Mat\' ern}}{Matern} DPP is defined in (Lavancier, \ifelse{latex}{\out{M\o ller}}{Moller} and Rubak, 2015) The possible parameters are: \itemize{ \item the intensity \code{lambda} as a positive numeric \item the scale parameter \code{alpha} as a positive numeric \item the shape parameter \code{nu} as a positive numeric (artificially required to be less than 20 in the code for numerical stability) \item the dimension \code{d} as a positive integer } } \value{An object of class \code{"detpointprocfamily"}.} \author{ \adrian \rolf and \ege } \references{ Lavancier, F. \ifelse{latex}{\out{M\o ller}}{Moller}, J. and Rubak, E. (2015) Determinantal point process models and statistical inference \emph{Journal of the Royal Statistical Society, Series B} \bold{77}, 853--977. } \examples{ m <- dppMatern(lambda=100, alpha=.02, nu=1, d=2) } \seealso{ \code{\link{dppBessel}}, \code{\link{dppCauchy}}, \code{\link{dppGauss}}, \code{\link{dppPowerExp}} } spatstat/man/CDF.Rd0000644000176200001440000000245413160710571013561 0ustar liggesusers\name{CDF} \alias{CDF} \alias{CDF.density} \title{ Cumulative Distribution Function From Kernel Density Estimate } \description{ Given a kernel estimate of a probability density, compute the corresponding cumulative distribution function. } \usage{ CDF(f, \dots) \method{CDF}{density}(f, \dots, warn = TRUE) } \arguments{ \item{f}{ Density estimate (object of class \code{"density"}). } \item{\dots}{ Ignored. } \item{warn}{ Logical value indicating whether to issue a warning if the density estimate \code{f} had to be renormalised because it was computed in a restricted interval. } } \details{ \code{CDF} is generic, with a method for class \code{"density"}. This calculates the cumulative distribution function whose probability density has been estimated and stored in the object \code{f}. The object \code{f} must belong to the class \code{"density"}, and would typically have been obtained from a call to the function \code{\link[stats]{density}}. } \value{ A function, which can be applied to any numeric value or vector of values. } \author{ \spatstatAuthors } \seealso{ \code{\link[stats]{density}}, \code{\link{quantile.density}} } \examples{ b <- density(runif(10)) f <- CDF(b) f(0.5) plot(f) } \keyword{nonparametric} \keyword{univar} spatstat/man/quadrat.test.Rd0000644000176200001440000002465513160710621015607 0ustar liggesusers\name{quadrat.test} \alias{quadrat.test} \alias{quadrat.test.ppp} \alias{quadrat.test.ppm} \alias{quadrat.test.quadratcount} \title{Dispersion Test for Spatial Point Pattern Based on Quadrat Counts} \description{ Performs a test of Complete Spatial Randomness for a given point pattern, based on quadrat counts. Alternatively performs a goodness-of-fit test of a fitted inhomogeneous Poisson model. By default performs chi-squared tests; can also perform Monte Carlo based tests. } \usage{ quadrat.test(X, ...) \method{quadrat.test}{ppp}(X, nx=5, ny=nx, alternative=c("two.sided", "regular", "clustered"), method=c("Chisq", "MonteCarlo"), conditional=TRUE, CR=1, lambda=NULL, ..., xbreaks=NULL, ybreaks=NULL, tess=NULL, nsim=1999) \method{quadrat.test}{ppm}(X, nx=5, ny=nx, alternative=c("two.sided", "regular", "clustered"), method=c("Chisq", "MonteCarlo"), conditional=TRUE, CR=1, ..., xbreaks=NULL, ybreaks=NULL, tess=NULL, nsim=1999) \method{quadrat.test}{quadratcount}(X, alternative=c("two.sided", "regular", "clustered"), method=c("Chisq", "MonteCarlo"), conditional=TRUE, CR=1, lambda=NULL, ..., nsim=1999) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"}) to be subjected to the goodness-of-fit test. Alternatively a fitted point process model (object of class \code{"ppm"}) to be tested. Alternatively \code{X} can be the result of applying \code{\link{quadratcount}} to a point pattern. } \item{nx,ny}{ Numbers of quadrats in the \eqn{x} and \eqn{y} directions. Incompatible with \code{xbreaks} and \code{ybreaks}. } \item{alternative}{ Character string (partially matched) specifying the alternative hypothesis. } \item{method}{ Character string (partially matched) specifying the test to use: either \code{method="Chisq"} for the chi-squared test (the default), or \code{method="MonteCarlo"} for a Monte Carlo test. } \item{conditional}{ Logical. Should the Monte Carlo test be conducted conditionally upon the observed number of points of the pattern? Ignored if \code{method="Chisq"}. } \item{CR}{ Optional. Numerical value of the index \eqn{\lambda}{lambda} for the Cressie-Read test statistic. } \item{lambda}{ Optional. Pixel image (object of class \code{"im"}) or function (class \code{"funxy"}) giving the predicted intensity of the point process. } \item{\dots}{Ignored.} \item{xbreaks}{ Optional. Numeric vector giving the \eqn{x} coordinates of the boundaries of the quadrats. Incompatible with \code{nx}. } \item{ybreaks}{ Optional. Numeric vector giving the \eqn{y} coordinates of the boundaries of the quadrats. Incompatible with \code{ny}. } \item{tess}{ Tessellation (object of class \code{"tess"} or something acceptable to \code{\link{as.tess}}) determining the quadrats. Incompatible with \code{nx, ny, xbreaks, ybreaks}. } \item{nsim}{ The number of simulated samples to generate when \code{method="MonteCarlo"}. } } \details{ These functions perform \eqn{\chi^2}{chi^2} tests or Monte Carlo tests of goodness-of-fit for a point process model, based on quadrat counts. The function \code{quadrat.test} is generic, with methods for point patterns (class \code{"ppp"}), split point patterns (class \code{"splitppp"}), point process models (class \code{"ppm"}) and quadrat count tables (class \code{"quadratcount"}). \itemize{ \item if \code{X} is a point pattern, we test the null hypothesis that the data pattern is a realisation of Complete Spatial Randomness (the uniform Poisson point process). Marks in the point pattern are ignored. (If \code{lambda} is given then the null hypothesis is the Poisson process with intensity \code{lambda}.) \item if \code{X} is a split point pattern, then for each of the component point patterns (taken separately) we test the null hypotheses of Complete Spatial Randomness. See \code{\link{quadrat.test.splitppp}} for documentation. \item If \code{X} is a fitted point process model, then it should be a Poisson point process model. The data to which this model was fitted are extracted from the model object, and are treated as the data point pattern for the test. We test the null hypothesis that the data pattern is a realisation of the (inhomogeneous) Poisson point process specified by \code{X}. } In all cases, the window of observation is divided into tiles, and the number of data points in each tile is counted, as described in \code{\link{quadratcount}}. The quadrats are rectangular by default, or may be regions of arbitrary shape specified by the argument \code{tess}. The expected number of points in each quadrat is also calculated, as determined by CSR (in the first case) or by the fitted model (in the second case). Then the Pearson \eqn{X^2} statistic \deqn{ X^2 = sum((observed - expected)^2/expected) } is computed. If \code{method="Chisq"} then a \eqn{\chi^2}{chi^2} test of goodness-of-fit is performed by comparing the test statistic to the \eqn{\chi^2}{chi^2} distribution with \eqn{m-k} degrees of freedom, where \code{m} is the number of quadrats and \eqn{k} is the number of fitted parameters (equal to 1 for \code{quadrat.test.ppp}). The default is to compute the \emph{two-sided} \eqn{p}-value, so that the test will be declared significant if \eqn{X^2} is either very large or very small. One-sided \eqn{p}-values can be obtained by specifying the \code{alternative}. An important requirement of the \eqn{\chi^2}{chi^2} test is that the expected counts in each quadrat be greater than 5. If \code{method="MonteCarlo"} then a Monte Carlo test is performed, obviating the need for all expected counts to be at least 5. In the Monte Carlo test, \code{nsim} random point patterns are generated from the null hypothesis (either CSR or the fitted point process model). The Pearson \eqn{X^2} statistic is computed as above. The \eqn{p}-value is determined by comparing the \eqn{X^2} statistic for the observed point pattern, with the values obtained from the simulations. Again the default is to compute the \emph{two-sided} \eqn{p}-value. If \code{conditional} is \code{TRUE} then the simulated samples are generated from the multinomial distribution with the number of \dQuote{trials} equal to the number of observed points and the vector of probabilities equal to the expected counts divided by the sum of the expected counts. Otherwise the simulated samples are independent Poisson counts, with means equal to the expected counts. If the argument \code{CR} is given, then instead of the Pearson \eqn{X^2} statistic, the Cressie-Read (1984) power divergence test statistic \deqn{ 2nI = \frac{2}{\lambda(\lambda+1)} \sum_i \left[ \left( \frac{X_i}{E_i} \right)^\lambda - 1 \right] }{ 2nI = (2/(lambda * (lambda+1))) * sum((X[i]/E[i])^lambda - 1) } is computed, where \eqn{X_i}{X[i]} is the \eqn{i}th observed count and \eqn{E_i}{E[i]} is the corresponding expected count, and the exponent \eqn{\lambda}{lambda} is equal to \code{CR}. The value \code{CR=1} gives the Pearson \eqn{X^2} statistic; \code{CR=0} gives the likelihood ratio test statistic \eqn{G^2}; \code{CR=-1/2} gives the Freeman-Tukey statistic \eqn{T^2}; \code{CR=-1} gives the modified likelihood ratio test statistic \eqn{GM^2}; and \code{CR=-2} gives Neyman's modified statistic \eqn{NM^2}. In all cases the asymptotic distribution of this test statistic is the same \eqn{\chi^2}{chi^2} distribution as above. The return value is an object of class \code{"htest"}. Printing the object gives comprehensible output about the outcome of the test. The return value also belongs to the special class \code{"quadrat.test"}. Plotting the object will display the quadrats, annotated by their observed and expected counts and the Pearson residuals. See the examples. } \seealso{ \code{\link{quadrat.test.splitppp}}, \code{\link{quadratcount}}, \code{\link{quadrats}}, \code{\link{quadratresample}}, \code{\link{chisq.test}}, \code{\link{cdf.test}}. To test a Poisson point process model against a specific alternative, use \code{\link{anova.ppm}}. } \value{ An object of class \code{"htest"}. See \code{\link{chisq.test}} for explanation. The return value is also an object of the special class \code{"quadrattest"}, and there is a plot method for this class. See the examples. } \references{ Cressie, N. and Read, T.R.C. (1984) Multinomial goodness-of-fit tests. \emph{Journal of the Royal Statistical Society, Series B} \bold{46}, 440--464. } \examples{ data(simdat) quadrat.test(simdat) quadrat.test(simdat, 4, 3) quadrat.test(simdat, alternative="regular") quadrat.test(simdat, alternative="clustered") # Using Monte Carlo p-values quadrat.test(swedishpines) # Get warning, small expected values. \dontrun{ quadrat.test(swedishpines, method="M", nsim=4999) quadrat.test(swedishpines, method="M", nsim=4999, conditional=FALSE) } \testonly{ quadrat.test(swedishpines, method="M", nsim=19) quadrat.test(swedishpines, method="M", nsim=19, conditional=FALSE) } # quadrat counts qS <- quadratcount(simdat, 4, 3) quadrat.test(qS) # fitted model: inhomogeneous Poisson fitx <- ppm(simdat, ~x, Poisson()) quadrat.test(fitx) te <- quadrat.test(simdat, 4) residuals(te) # Pearson residuals plot(te) plot(simdat, pch="+", cols="green", lwd=2) plot(te, add=TRUE, col="red", cex=1.4, lty=2, lwd=3) sublab <- eval(substitute(expression(p[chi^2]==z), list(z=signif(te$p.value,3)))) title(sub=sublab, cex.sub=3) # quadrats of irregular shape B <- dirichlet(runifpoint(6, Window(simdat))) qB <- quadrat.test(simdat, tess=B) plot(simdat, main="quadrat.test(simdat, tess=B)", pch="+") plot(qB, add=TRUE, col="red", lwd=2, cex=1.2) } \author{\adrian and \rolf } \keyword{spatial} \keyword{htest} spatstat/man/quadrats.Rd0000644000176200001440000000550613160710621015006 0ustar liggesusers\name{quadrats} \alias{quadrats} \title{Divide Region into Quadrats} \description{ Divides window into rectangular quadrats and returns the quadrats as a tessellation. } \usage{ quadrats(X, nx = 5, ny = nx, xbreaks = NULL, ybreaks = NULL, keepempty=FALSE) } \arguments{ \item{X}{ A window (object of class \code{"owin"}) or anything that can be coerced to a window using \code{\link{as.owin}}, such as a point pattern. } \item{nx,ny}{ Numbers of quadrats in the \eqn{x} and \eqn{y} directions. Incompatible with \code{xbreaks} and \code{ybreaks}. } \item{xbreaks}{ Numeric vector giving the \eqn{x} coordinates of the boundaries of the quadrats. Incompatible with \code{nx}. } \item{ybreaks}{ Numeric vector giving the \eqn{y} coordinates of the boundaries of the quadrats. Incompatible with \code{ny}. } \item{keepempty}{ Logical value indicating whether to delete or retain empty quadrats. See Details. } } \details{ If the window \code{X} is a rectangle, it is divided into an \code{nx * ny} grid of rectangular tiles or `quadrats'. If \code{X} is not a rectangle, then the bounding rectangle of \code{X} is first divided into an \code{nx * ny} grid of rectangular tiles, and these tiles are then intersected with the window \code{X}. The resulting tiles are returned as a tessellation (object of class \code{"tess"}) which can be plotted and used in other analyses. If \code{xbreaks} is given, it should be a numeric vector giving the \eqn{x} coordinates of the quadrat boundaries. If it is not given, it defaults to a sequence of \code{nx+1} values equally spaced over the range of \eqn{x} coordinates in the window \code{Window(X)}. Similarly if \code{ybreaks} is given, it should be a numeric vector giving the \eqn{y} coordinates of the quadrat boundaries. It defaults to a vector of \code{ny+1} values equally spaced over the range of \eqn{y} coordinates in the window. The lengths of \code{xbreaks} and \code{ybreaks} may be different. By default (if \code{keepempty=FALSE}), any rectangular tile which does not intersect the window \code{X} is ignored, and only the non-empty intersections are treated as quadrats, so the tessellation may consist of fewer than \code{nx * ny} tiles. If \code{keepempty=TRUE}, empty intersections are retained, and the tessellation always contains exactly \code{nx * ny} tiles, some of which may be empty. } \value{ A tessellation (object of class \code{"tess"}) as described under \code{\link{tess}}. } \examples{ W <- square(10) Z <- quadrats(W, 4, 5) plot(Z) data(letterR) plot(quadrats(letterR, 5, 7)) } \seealso{ \code{\link{tess}}, \code{\link{quadratcount}}, \code{\link{quadrat.test}}, \code{\link{quadratresample}} } \author{\adrian and \rolf } \keyword{utilities} \keyword{datagen} spatstat/man/logLik.ppm.Rd0000644000176200001440000001125313160710621015172 0ustar liggesusers\name{logLik.ppm} \alias{logLik.ppm} \alias{deviance.ppm} \alias{AIC.ppm} \alias{extractAIC.ppm} \alias{nobs.ppm} \title{Log Likelihood and AIC for Point Process Model} \description{ Extracts the log likelihood, deviance, and AIC of a fitted Poisson point process model, or analogous quantities based on the pseudolikelihood or logistic likelihood for a fitted Gibbs point process model. } \usage{ \method{logLik}{ppm}(object, \dots, new.coef=NULL, warn=TRUE, absolute=FALSE) \method{deviance}{ppm}(object, \dots) \method{AIC}{ppm}(object, \dots, k=2, takeuchi=TRUE) \method{extractAIC}{ppm}(fit, scale=0, k=2, \dots, takeuchi=TRUE) \method{nobs}{ppm}(object, \dots) } \arguments{ \item{object,fit}{Fitted point process model. An object of class \code{"ppm"}. } \item{\dots}{Ignored.} \item{warn}{ If \code{TRUE}, a warning is given when the pseudolikelihood or logistic likelihood is returned instead of the likelihood. } \item{absolute}{ Logical value indicating whether to include constant terms in the loglikelihood. } \item{scale}{Ignored.} \item{k}{Numeric value specifying the weight of the equivalent degrees of freedom in the AIC. See Details. } \item{new.coef}{ New values for the canonical parameters of the model. A numeric vector of the same length as \code{coef(object)}. } \item{takeuchi}{ Logical value specifying whether to use the Takeuchi penalty (\code{takeuchi=TRUE}) or the number of fitted parameters (\code{takeuchi=FALSE}) in calculating AIC. } } \details{ These functions are methods for the generic commands \code{\link[stats]{logLik}}, \code{\link[stats]{deviance}}, \code{\link[stats]{extractAIC}} and \code{\link[stats]{nobs}} for the class \code{"ppm"}. An object of class \code{"ppm"} represents a fitted Poisson or Gibbs point process model. It is obtained from the model-fitting function \code{\link{ppm}}. The method \code{logLik.ppm} computes the maximised value of the log likelihood for the fitted model \code{object} (as approximated by quadrature using the Berman-Turner approximation) is extracted. If \code{object} is not a Poisson process, the maximised log \emph{pseudolikelihood} is returned, with a warning (if \code{warn=TRUE}). The Akaike Information Criterion AIC for a fitted model is defined as \deqn{ AIC = -2 \log(L) + k \times \mbox{penalty} }{ AIC = -2 * log(L) + k * penalty } where \eqn{L} is the maximised likelihood of the fitted model, and \eqn{\mbox{penalty}}{penalty} is a penalty for model complexity, usually equal to the effective degrees of freedom of the model. The method \code{extractAIC.ppm} returns the \emph{analogous} quantity \eqn{AIC*} in which \eqn{L} is replaced by \eqn{L*}, the quadrature approximation to the likelihood (if \code{fit} is a Poisson model) or the pseudolikelihood or logistic likelihood (if \code{fit} is a Gibbs model). The \eqn{\mbox{penalty}}{penalty} term is calculated as follows. If \code{takeuchi=FALSE} then \eqn{\mbox{penalty}}{penalty} is the number of fitted parameters. If \code{takeuchi=TRUE} then \eqn{\mbox{penalty} = \mbox{trace}(J H^{-1})}{penalty = trace(J H^(-1))} where \eqn{J} and \eqn{H} are the estimated variance and hessian, respectively, of the composite score. These two choices are equivalent for a Poisson process. The method \code{nobs.ppm} returns the number of points in the original data point pattern to which the model was fitted. The \R function \code{\link[stats]{step}} uses these methods. } \value{ \code{logLik} returns a numerical value, belonging to the class \code{"logLik"}, with an attribute \code{"df"} giving the degrees of freedom. \code{AIC} returns a numerical value. \code{extractAIC} returns a numeric vector of length 2 containing the degrees of freedom and the AIC value. \code{nobs} returns an integer value. } \references{ Varin, C. and Vidoni, P. (2005) A note on composite likelihood inference and model selection. \emph{Biometrika} \bold{92}, 519--528. } \seealso{ \code{\link{ppm}}, \code{\link{as.owin}}, \code{\link{coef.ppm}}, \code{\link{fitted.ppm}}, \code{\link{formula.ppm}}, \code{\link{model.frame.ppm}}, \code{\link{model.matrix.ppm}}, \code{\link{plot.ppm}}, \code{\link{predict.ppm}}, \code{\link{residuals.ppm}}, \code{\link{simulate.ppm}}, \code{\link{summary.ppm}}, \code{\link{terms.ppm}}, \code{\link{update.ppm}}, \code{\link{vcov.ppm}}. } \author{\adrian \rolf and \ege } \examples{ data(cells) fit <- ppm(cells, ~x) nobs(fit) logLik(fit) deviance(fit) extractAIC(fit) AIC(fit) step(fit) } \keyword{spatial} \keyword{models} spatstat/man/anova.mppm.Rd0000644000176200001440000001113213160710621015226 0ustar liggesusers\name{anova.mppm} \alias{anova.mppm} \title{ANOVA for Fitted Point Process Models for Replicated Patterns} \description{ Performs analysis of deviance for one or more point process models fitted to replicated point pattern data. } \usage{ \method{anova}{mppm}(object, \dots, test=NULL, adjust=TRUE, fine=FALSE, warn=TRUE) } \arguments{ \item{object}{ Object of class \code{"mppm"} representing a point process model that was fitted to replicated point patterns. } \item{\dots}{ Optional. Additional objects of class \code{"mppm"}. } \item{test}{ Type of hypothesis test to perform. A character string, partially matching one of \code{"Chisq"}, \code{"LRT"}, \code{"Rao"}, \code{"score"}, \code{"F"} or \code{"Cp"}, or \code{NULL} indicating that no test should be performed. } \item{adjust}{ Logical value indicating whether to correct the pseudolikelihood ratio when some of the models are not Poisson processes. } \item{fine}{ Logical value passed to \code{\link{vcov.ppm}} indicating whether to use a quick estimate (\code{fine=FALSE}, the default) or a slower, more accurate estimate (\code{fine=TRUE}) of the variance of the fitted coefficients of each model. Relevant only when some of the models are not Poisson and \code{adjust=TRUE}. } \item{warn}{ Logical value indicating whether to issue warnings if problems arise. } } \value{ An object of class \code{"anova"}, or \code{NULL}. } \details{ This is a method for \code{\link{anova}} for comparing several fitted point process models of class \code{"mppm"}, usually generated by the model-fitting function \code{\link{mppm}}). If the fitted models are all Poisson point processes, then this function performs an Analysis of Deviance of the fitted models. The output shows the deviance differences (i.e. 2 times log likelihood ratio), the difference in degrees of freedom, and (if \code{test="Chi"}) the two-sided p-values for the chi-squared tests. Their interpretation is very similar to that in \code{\link{anova.glm}}. If some of the fitted models are \emph{not} Poisson point processes, the `deviance' differences in this table are 'pseudo-deviances' equal to 2 times the differences in the maximised values of the log pseudolikelihood (see \code{\link{ppm}}). It is not valid to compare these values to the chi-squared distribution. In this case, if \code{adjust=TRUE} (the default), the pseudo-deviances will be adjusted using the method of Pace et al (2011) and Baddeley, Turner and Rubak (2015) so that the chi-squared test is valid. It is strongly advisable to perform this adjustment. The argument \code{test} determines which hypothesis test, if any, will be performed to compare the models. The argument \code{test} should be a character string, partially matching one of \code{"Chisq"}, \code{"F"} or \code{"Cp"}, or \code{NULL}. The first option \code{"Chisq"} gives the likelihood ratio test based on the asymptotic chi-squared distribution of the deviance difference. The meaning of the other options is explained in \code{\link{anova.glm}}. For random effects models, only \code{"Chisq"} is available, and again gives the likelihood ratio test. } \section{Error messages}{ An error message that reports \emph{system is computationally singular} indicates that the determinant of the Fisher information matrix of one of the models was either too large or too small for reliable numerical calculation. See \code{\link{vcov.ppm}} for suggestions on how to handle this. } \seealso{ \code{\link{mppm}} } \examples{ H <- hyperframe(X=waterstriders) mod0 <- mppm(X~1, data=H, Poisson()) modx <- mppm(X~x, data=H, Poisson()) anova(mod0, modx, test="Chi") mod0S <- mppm(X~1, data=H, Strauss(2)) modxS <- mppm(X~x, data=H, Strauss(2)) anova(mod0S, modxS, test="Chi") } \references{ Baddeley, A., Rubak, E. and Turner, R. (2015) \emph{Spatial Point Patterns: Methodology and Applications with R}. London: Chapman and Hall/CRC Press. Baddeley, A., Turner, R. and Rubak, E. (2015) Adjusted composite likelihood ratio test for Gibbs point processes. \emph{Journal of Statistical Computation and Simulation} \bold{86} (5) 922--941. DOI: 10.1080/00949655.2015.1044530. Pace, L., Salvan, A. and Sartori, N. (2011) Adjusting composite likelihood ratio statistics. \emph{Statistica Sinica} \bold{21}, 129--148. } \author{ Adrian Baddeley, Ida-Maria Sintorn and Leanne Bischoff. Implemented by \spatstatAuthors. } \keyword{spatial} \keyword{models} \keyword{methods} spatstat/man/suffstat.Rd0000644000176200001440000001072513160710621015020 0ustar liggesusers\name{suffstat} \alias{suffstat} \title{Sufficient Statistic of Point Process Model} \description{ The canonical sufficient statistic of a point process model is evaluated for a given point pattern. } \usage{ suffstat(model, X=data.ppm(model)) } \arguments{ \item{model}{A fitted point process model (object of class \code{"ppm"}). } \item{X}{ A point pattern (object of class \code{"ppp"}). } } \value{ A numeric vector of sufficient statistics. The entries correspond to the model coefficients \code{coef(model)}. } \details{ The canonical sufficient statistic of \code{model} is evaluated for the point pattern \code{X}. This computation is useful for various Monte Carlo methods. Here \code{model} should be a point process model (object of class \code{"ppm"}, see \code{\link{ppm.object}}), typically obtained from the model-fitting function \code{\link{ppm}}. The argument \code{X} should be a point pattern (object of class \code{"ppp"}). Every point process model fitted by \code{\link{ppm}} has a probability density of the form \deqn{f(x) = Z(\theta) \exp(\theta^T S(x))}{f(x) = Z(theta) exp(theta * S(x))} where \eqn{x} denotes a typical realisation (i.e. a point pattern), \eqn{\theta}{theta} is the vector of model coefficients, \eqn{Z(\theta)}{Z(theta)} is a normalising constant, and \eqn{S(x)} is a function of the realisation \eqn{x}, called the ``canonical sufficient statistic'' of the model. For example, the stationary Poisson process has canonical sufficient statistic \eqn{S(x)=n(x)}, the number of points in \eqn{x}. The stationary Strauss process with interaction range \eqn{r} (and fitted with no edge correction) has canonical sufficient statistic \eqn{S(x)=(n(x),s(x))} where \eqn{s(x)} is the number of pairs of points in \eqn{x} which are closer than a distance \eqn{r} to each other. \code{suffstat(model, X)} returns the value of \eqn{S(x)}, where \eqn{S} is the canonical sufficient statistic associated with \code{model}, evaluated when \eqn{x} is the given point pattern \code{X}. The result is a numeric vector, with entries which correspond to the entries of the coefficient vector \code{coef(model)}. The sufficient statistic \eqn{S} does not depend on the fitted coefficients of the model. However it does depend on the irregular parameters which are fixed in the original call to \code{\link{ppm}}, for example, the interaction range \code{r} of the Strauss process. The sufficient statistic also depends on the edge correction that was used to fit the model. For example in a Strauss process, \itemize{ \item If the model is fitted with \code{correction="none"}, the sufficient statistic is \eqn{S(x) = (n(x), s(x))} where \eqn{n(x)} is the number of points and \eqn{s(x)} is the number of pairs of points which are closer than \eqn{r} units apart. \item If the model is fitted with \code{correction="periodic"}, the sufficient statistic is the same as above, except that distances are measured in the periodic sense. \item If the model is fitted with \code{correction="translate"}, then \eqn{n(x)} is unchanged but \eqn{s(x)} is replaced by a weighted sum (the sum of the translation correction weights for all pairs of points which are closer than \eqn{r} units apart). \item If the model is fitted with \code{correction="border"} (the default), then points lying less than \eqn{r} units from the boundary of the observation window are treated as fixed. Thus \eqn{n(x)} is replaced by the number \eqn{n_r(x)}{n[r](x)} of points lying at least \eqn{r} units from the boundary of the observation window, and \eqn{s(x)} is replaced by the number \eqn{s_r(x)}{s[r](x)} of pairs of points, which are closer than \eqn{r} units apart, and at least one of which lies more than \eqn{r} units from the boundary of the observation window. } Non-finite values of the sufficient statistic (\code{NA} or \code{-Inf}) may be returned if the point pattern \code{X} is not a possible realisation of the model (i.e. if \code{X} has zero probability of occurring under \code{model} for all values of the canonical coefficients \eqn{\theta}{theta}). } \seealso{ \code{\link{ppm}} } \examples{ fitS <- ppm(swedishpines~1, Strauss(7)) X <- rpoispp(intensity(swedishpines), win=Window(swedishpines)) suffstat(fitS, X) } \author{ \adrian \rolf and \ege } \keyword{spatial} \keyword{models} spatstat/man/WindowOnly.Rd0000644000176200001440000000455213160710571015277 0ustar liggesusers\name{WindowOnly} \alias{Window.ppm} \alias{Window.kppm} \alias{Window.dppm} \alias{Window.lpp} \alias{Window.lppm} \alias{Window.msr} \alias{Window.quad} \alias{Window.quadratcount} \alias{Window.quadrattest} \alias{Window.tess} \alias{Window.layered} \alias{Window.distfun} \alias{Window.nnfun} \alias{Window.funxy} \alias{Window.rmhmodel} \alias{Window.leverage.ppm} \alias{Window.influence.ppm} \title{Extract Window of Spatial Object} \description{ Given a spatial object (such as a point pattern or pixel image) in two dimensions, these functions extract the window in which the object is defined. } \usage{ \method{Window}{ppm}(X, \dots, from=c("points", "covariates")) \method{Window}{kppm}(X, \dots, from=c("points", "covariates")) \method{Window}{dppm}(X, \dots, from=c("points", "covariates")) \method{Window}{lpp}(X, \dots) \method{Window}{lppm}(X, \dots) \method{Window}{msr}(X, \dots) \method{Window}{quad}(X, \dots) \method{Window}{quadratcount}(X, \dots) \method{Window}{quadrattest}(X, \dots) \method{Window}{tess}(X, \dots) \method{Window}{layered}(X, \dots) \method{Window}{distfun}(X, \dots) \method{Window}{nnfun}(X, \dots) \method{Window}{funxy}(X, \dots) \method{Window}{rmhmodel}(X, \dots) \method{Window}{leverage.ppm}(X, \dots) \method{Window}{influence.ppm}(X, \dots) } \arguments{ \item{X}{A spatial object.} \item{\dots}{Ignored.} \item{from}{Character string. See Details.} } \value{ An object of class \code{"owin"} (see \code{\link{owin.object}}) specifying an observation window. } \details{ These are methods for the generic function \code{\link{Window}} which extract the spatial window in which the object \code{X} is defined. The argument \code{from} applies when \code{X} is a fitted point process model (object of class \code{"ppm"}, \code{"kppm"} or \code{"dppm"}). If \code{from="data"} (the default), \code{Window} extracts the window of the original point pattern data to which the model was fitted. If \code{from="covariates"} then \code{Window} returns the window in which the spatial covariates of the model were provided. } \seealso{ \code{\link{Window}}, \code{\link{Window.ppp}}, \code{\link{Window.psp}}. \code{\link{owin.object}} } \examples{ X <- quadratcount(cells, 4) Window(X) } \author{\adrian \rolf and \ege } \keyword{spatial} \keyword{manip} spatstat/man/edges.Rd0000644000176200001440000000202213160710571014243 0ustar liggesusers\name{edges} \alias{edges} \title{ Extract Boundary Edges of a Window. } \description{ Extracts the boundary edges of a window and returns them as a line segment pattern. } \usage{ edges(x, \dots, window = NULL, check = FALSE) } \arguments{ \item{x}{ A window (object of class \code{"owin"}), or data acceptable to \code{\link{as.owin}}, specifying the window whose boundary is to be extracted. } \item{\dots}{ Ignored. } \item{window}{ Window to contain the resulting line segments. Defaults to \code{as.rectangle(x)}. } \item{check}{ Logical. Whether to check the validity of the resulting segment pattern. } } \details{ The boundary edges of the window \code{x} will be extracted as a line segment pattern. } \value{ A line segment pattern (object of class \code{"psp"}). } \seealso{ \code{\link{perimeter}} for calculating the total length of the boundary. } \examples{ edges(square(1)) edges(letterR) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{manip} spatstat/man/MultiStraussHard.Rd0000644000176200001440000000673613160710571016452 0ustar liggesusers\name{MultiStraussHard} \alias{MultiStraussHard} \title{The Multitype/Hard Core Strauss Point Process Model} \description{ Creates an instance of the multitype/hard core Strauss point process model which can then be fitted to point pattern data. } \usage{ MultiStraussHard(iradii, hradii, types=NULL) } \arguments{ \item{iradii}{Matrix of interaction radii} \item{hradii}{Matrix of hard core radii} \item{types}{Optional; vector of all possible types (i.e. the possible levels of the \code{marks} variable in the data)} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of the multitype/hard core Strauss process with interaction radii \eqn{iradii[i,j]} and hard core radii \eqn{hradii[i,j]}. } \details{ This is a hybrid of the multitype Strauss process (see \code{\link{MultiStrauss}}) and the hard core process (case \eqn{\gamma=0}{gamma = 0} of the Strauss process). A pair of points of types \eqn{i} and \eqn{j} must not lie closer than \eqn{h_{ij}}{h[i,j]} units apart; if the pair lies more than \eqn{h_{ij}}{h[i,j]} and less than \eqn{r_{ij}}{r[i,j]} units apart, it contributes a factor \eqn{\gamma_{ij}}{gamma[i,j]} to the probability density. The argument \code{types} need not be specified in normal use. It will be determined automatically from the point pattern data set to which the MultiStraussHard interaction is applied, when the user calls \code{\link{ppm}}. However, the user should be confident that the ordering of types in the dataset corresponds to the ordering of rows and columns in the matrices \code{iradii} and \code{hradii}. The matrices \code{iradii} and \code{hradii} must be symmetric, with entries which are either positive numbers or \code{NA}. A value of \code{NA} indicates that no interaction term should be included for this combination of types. Note that only the interaction radii and hardcore radii are specified in \code{MultiStraussHard}. The canonical parameters \eqn{\log(\beta_j)}{log(beta[j])} and \eqn{\log(\gamma_{ij})}{log(gamma[i,j])} are estimated by \code{\link{ppm}()}, not fixed in \code{MultiStraussHard()}. } \seealso{ \code{\link{ppm}}, \code{\link{pairwise.family}}, \code{\link{ppm.object}}, \code{\link{MultiStrauss}}, \code{\link{MultiHard}}, \code{\link{Strauss}} } \examples{ r <- matrix(3, nrow=2,ncol=2) h <- matrix(c(1,2,2,1), nrow=2,ncol=2) MultiStraussHard(r,h) # prints a sensible description of itself r <- 0.04 * matrix(c(1,2,2,1), nrow=2,ncol=2) h <- 0.02 * matrix(c(1,NA,NA,1), nrow=2,ncol=2) X <- amacrine \testonly{ X <- X[owin(c(0,0.8), c(0,1))] } fit <- ppm(X ~1, MultiStraussHard(r,h)) # fit stationary multitype hardcore Strauss process to `amacrine' } \section{Warnings}{ In order that \code{\link{ppm}} can fit the multitype/hard core Strauss model correctly to a point pattern \code{X}, this pattern must be marked, with \code{markformat} equal to \code{vector} and the mark vector \code{marks(X)} must be a factor. If the argument \code{types} is specified it is interpreted as a set of factor levels and this set must equal \code{levels(marks(X))}. } \section{Changed Syntax}{ Before \pkg{spatstat} version \code{1.37-0}, the syntax of this function was different: \code{MultiStraussHard(types=NULL, iradii, hradii)}. The new code attempts to handle the old syntax as well. } \author{\adrian , \rolf and \ege } \keyword{spatial} \keyword{models} spatstat/man/duplicated.ppp.Rd0000644000176200001440000000550313160710571016077 0ustar liggesusers\name{duplicated.ppp} \alias{duplicated.ppp} \alias{duplicated.ppx} \alias{anyDuplicated.ppp} \alias{anyDuplicated.ppx} \title{Determine Duplicated Points in a Spatial Point Pattern} \description{ Determines which points in a spatial point pattern are duplicates of previous points, and returns a logical vector. } \usage{ \method{duplicated}{ppp}(x, \dots, rule=c("spatstat", "deldir", "unmark")) \method{duplicated}{ppx}(x, \dots) \method{anyDuplicated}{ppp}(x, \dots) \method{anyDuplicated}{ppx}(x, \dots) } \arguments{ \item{x}{ A spatial point pattern (object of class \code{"ppp"} or \code{"ppx"}). } \item{\dots}{ Ignored. } \item{rule}{ Character string. The rule for determining duplicated points. } } \value{ \code{duplicated(x)} returns a logical vector of length equal to the number of points in \code{x}. \code{anyDuplicated(x)} is a number equal to 0 if there are no duplicated points, and otherwise is equal to the index of the first duplicated point. } \details{ These are methods for the generic functions \code{\link{duplicated}} and \code{\link{anyDuplicated}} for point pattern datasets (of class \code{"ppp"}, see \code{\link{ppp.object}}, or class \code{"ppx"}). \code{anyDuplicated(x)} is a faster version of \code{any(duplicated(x))}. Two points in a point pattern are deemed to be identical if their \eqn{x,y} coordinates are the same, and their marks are also the same (if they carry marks). The Examples section illustrates how it is possible for a point pattern to contain a pair of identical points. This function determines which points in \code{x} duplicate other points that appeared earlier in the sequence. It returns a logical vector with entries that are \code{TRUE} for duplicated points and \code{FALSE} for unique (non-duplicated) points. If \code{rule="spatstat"} (the default), two points are deemed identical if their coordinates are equal according to \code{==}, \emph{and} their marks are equal according to \code{==}. This is the most stringent possible test. If \code{rule="unmark"}, duplicated points are determined by testing equality of their coordinates only, using \code{==}. If \code{rule="deldir"}, duplicated points are determined by testing equality of their coordinates only, using the function \code{\link[deldir]{duplicatedxy}} in the package \pkg{deldir}, which currently uses \code{\link{duplicated.data.frame}}. Setting \code{rule="deldir"} will ensure consistency with functions in the \pkg{deldir} package. } \seealso{ \code{\link{ppp.object}}, \code{\link{unique.ppp}}, \code{\link{multiplicity.ppp}} } \examples{ X <- ppp(c(1,1,0.5), c(2,2,1), window=square(3)) duplicated(X) duplicated(X, rule="deldir") } \author{\adrian and \rolf } \keyword{spatial} \keyword{methods} spatstat/man/iplot.Rd0000644000176200001440000000470313160710621014307 0ustar liggesusers\name{iplot} \alias{iplot} \alias{iplot.ppp} \alias{iplot.layered} \alias{iplot.linnet} \alias{iplot.lpp} \alias{iplot.default} \title{Point and Click Interface for Displaying Spatial Data} \description{ Plot spatial data with interactive (point-and-click) control over the plot. } \usage{ iplot(x, ...) \method{iplot}{ppp}(x, ..., xname) \method{iplot}{linnet}(x, ..., xname) \method{iplot}{lpp}(x, ..., xname) \method{iplot}{layered}(x, ..., xname, visible) \method{iplot}{default}(x, ..., xname) } \arguments{ \item{x}{ The spatial object to be plotted. An object of class \code{"ppp"}, \code{"psp"}, \code{"im"}, \code{"owin"}, \code{"linnet"}, \code{"lpp"} or \code{"layered"}. } \item{\dots}{Ignored.} \item{xname}{ Optional. Character string to use as the title of the dataset. } \item{visible}{ Optional. Logical vector indicating which layers of \code{x} should initially be turned on (visible). } } \value{ \code{NULL}. } \details{ The function \code{iplot} generates a plot of the spatial dataset \code{x} and allows interactive control over the appearance of the plot using a point-and-click interface. The function \code{iplot} is generic, with methods for for point patterns (\code{\link{iplot.ppp}}), layered objects (\code{\link{iplot.layered}}) and a default method. The default method will handle objects of class \code{"psp"}, \code{"im"} and \code{"owin"} at least. A new popup window is launched. The spatial dataset \code{x} is displayed in the middle of the window using the appropriate \code{plot} method. The left side of the window contains buttons and sliders allowing the user to change the plot parameters. The right side of the window contains navigation controls for zooming (changing magnification), panning (shifting the field of view relative to the data), redrawing and exiting. If the user clicks in the area where the point pattern is displayed, the field of view will be re-centred at the point that was clicked. } \seealso{ \code{\link{istat}} } \section{Package Dependence}{ This function requires the package \pkg{rpanel} to be loaded. } \examples{ if(interactive() && require(rpanel)) { iplot(cells) iplot(amacrine) iplot(lansing) L <- layered(D=distmap(cells), P=cells, plotargs=list(list(ribbon=FALSE), list(pch=16))) iplot(L) } } \author{\adrian and \rolf } \keyword{spatial} \keyword{hplot} spatstat/man/dg.sigtrace.Rd0000644000176200001440000001503313160710571015354 0ustar liggesusers\name{dg.sigtrace} \alias{dg.sigtrace} \title{ Significance Trace of Dao-Genton Test } \description{ Generates a Significance Trace of the Dao and Genton (2014) test for a spatial point pattern. } \usage{ dg.sigtrace(X, fun = Lest, \dots, exponent = 2, nsim = 19, nsimsub = nsim - 1, alternative = c("two.sided", "less", "greater"), rmin=0, leaveout=1, interpolate = FALSE, confint = TRUE, alpha = 0.05, savefuns=FALSE, savepatterns=FALSE, verbose=FALSE) } \arguments{ \item{X}{ Either a point pattern (object of class \code{"ppp"}, \code{"lpp"} or other class), a fitted point process model (object of class \code{"ppm"}, \code{"kppm"} or other class) or an envelope object (class \code{"envelope"}). } \item{fun}{ Function that computes the desired summary statistic for a point pattern. } \item{\dots}{ Arguments passed to \code{\link{envelope}}. } \item{exponent}{ Positive number. Exponent used in the test statistic. Use \code{exponent=2} for the Diggle-Cressie-Loosmore-Ford test, and \code{exponent=Inf} for the Maximum Absolute Deviation test. See Details. } \item{nsim}{ Number of repetitions of the basic test. } \item{nsimsub}{ Number of simulations in each basic test. There will be \code{nsim} repetitions of the basic test, each involving \code{nsimsub} simulated realisations, so there will be a total of \code{nsim * (nsimsub + 1)} simulations. } \item{alternative}{ Character string specifying the alternative hypothesis. The default (\code{alternative="two.sided"}) is that the true value of the summary function is not equal to the theoretical value postulated under the null hypothesis. If \code{alternative="less"} the alternative hypothesis is that the true value of the summary function is lower than the theoretical value. } \item{rmin}{ Optional. Left endpoint for the interval of \eqn{r} values on which the test statistic is calculated. } \item{leaveout}{ Optional integer 0, 1 or 2 indicating how to calculate the deviation between the observed summary function and the nominal reference value, when the reference value must be estimated by simulation. See Details. } \item{interpolate}{ Logical value indicating whether to interpolate the distribution of the test statistic by kernel smoothing, as described in Dao and Genton (2014, Section 5). } \item{confint}{ Logical value indicating whether to compute a confidence interval for the \sQuote{true} \eqn{p}-value. } \item{alpha}{ Significance level to be plotted (this has no effect on the calculation but is simply plotted as a reference value). } \item{savefuns}{ Logical flag indicating whether to save the simulated function values (from the first stage). } \item{savepatterns}{ Logical flag indicating whether to save the simulated point patterns (from the first stage). } \item{verbose}{ Logical flag indicating whether to print progress reports. } } \details{ The Dao and Genton (2014) test for a spatial point pattern is described in \code{\link{dg.test}}. This test depends on the choice of an interval of distance values (the argument \code{rinterval}). A \emph{significance trace} (Bowman and Azzalini, 1997; Baddeley et al, 2014, 2015) of the test is a plot of the \eqn{p}-value obtained from the test against the length of the interval \code{rinterval}. The command \code{dg.sigtrace} effectively performs \code{\link{dg.test}} on \code{X} using all possible intervals of the form \eqn{[0,R]}, and returns the resulting \eqn{p}-values as a function of \eqn{R}. The result is an object of class \code{"fv"} that can be plotted to obtain the significance trace. The plot shows the Dao-Genton adjusted \eqn{p}-value (solid black line), the critical value \code{0.05} (dashed red line), and a pointwise 95\% confidence band (grey shading) for the \sQuote{true} (Neyman-Pearson) \eqn{p}-value. The confidence band is based on the Agresti-Coull (1998) confidence interval for a binomial proportion. If \code{X} is an envelope object and \code{fun=NULL} then the code will re-use the simulated functions stored in \code{X}. If the argument \code{rmin} is given, it specifies the left endpoint of the interval defining the test statistic: the tests are performed using intervals \eqn{[r_{\mbox{\scriptsize min}},R]}{[rmin,R]} where \eqn{R \ge r_{\mbox{\scriptsize min}}}{R \ge rmin}. The argument \code{leaveout} specifies how to calculate the discrepancy between the summary function for the data and the nominal reference value, when the reference value must be estimated by simulation. The values \code{leaveout=0} and \code{leaveout=1} are both algebraically equivalent (Baddeley et al, 2014, Appendix) to computing the difference \code{observed - reference} where the \code{reference} is the mean of simulated values. The value \code{leaveout=2} gives the leave-two-out discrepancy proposed by Dao and Genton (2014). } \value{ An object of class \code{"fv"} that can be plotted to obtain the significance trace. } \references{ Agresti, A. and Coull, B.A. (1998) Approximate is better than \dQuote{Exact} for interval estimation of binomial proportions. \emph{American Statistician} \bold{52}, 119--126. Baddeley, A., Diggle, P., Hardegen, A., Lawrence, T., Milne, R. and Nair, G. (2014) On tests of spatial pattern based on simulation envelopes. \emph{Ecological Monographs} \bold{84}(3) 477--489. Baddeley, A., Hardegen, A., Lawrence, L., Milne, R.K., Nair, G.M. and Rakshit, S. (2015) Pushing the envelope: extensions of graphical Monte Carlo tests. Submitted for publication. Bowman, A.W. and Azzalini, A. (1997) \emph{Applied smoothing techniques for data analysis: the kernel approach with S-Plus illustrations}. Oxford University Press, Oxford. Dao, N.A. and Genton, M. (2014) A Monte Carlo adjusted goodness-of-fit test for parametric models describing spatial point patterns. \emph{Journal of Graphical and Computational Statistics} \bold{23}, 497--517. } \author{ \adrian, Andrew Hardegen, Tom Lawrence, Robin Milne, Gopalan Nair and Suman Rakshit. Implemented by \spatstatAuthors. } \seealso{ \code{\link{dg.test}} for the Dao-Genton test, \code{\link{dclf.sigtrace}} for significance traces of other tests. } \examples{ ns <- if(interactive()) 19 else 5 plot(dg.sigtrace(cells, nsim=ns)) } \keyword{spatial} \keyword{htest} spatstat/man/ppp.Rd0000644000176200001440000002304013160710621013752 0ustar liggesusers\name{ppp} \alias{ppp} \title{Create a Point Pattern} \description{ Creates an object of class \code{"ppp"} representing a point pattern dataset in the two-dimensional plane. } \usage{ ppp(x,y, \dots, window, marks, check=TRUE, checkdup=check, drop=TRUE) } \arguments{ \item{x}{Vector of \eqn{x} coordinates of data points} \item{y}{Vector of \eqn{y} coordinates of data points} \item{window}{window of observation, an object of class \code{"owin"}} \item{\dots}{arguments passed to \code{\link{owin}} to create the window, if \code{window} is missing} \item{marks}{(optional) mark values for the points. A vector or data frame.} \item{check}{ Logical value indicating whether to check that all the \eqn{(x,y)} points lie inside the specified window. Do not set this to \code{FALSE} unless you are absolutely sure that this check is unnecessary. See Warnings below. } \item{checkdup}{ Logical value indicating whether to check for duplicated coordinates. See Warnings below. } \item{drop}{ Logical flag indicating whether to simplify data frames of marks. See Details. } } \value{ An object of class \code{"ppp"} describing a point pattern in the two-dimensional plane (see \code{\link{ppp.object}}). } \details{ In the \pkg{spatstat} library, a point pattern dataset is described by an object of class \code{"ppp"}. This function creates such objects. The vectors \code{x} and \code{y} must be numeric vectors of equal length. They are interpreted as the cartesian coordinates of the points in the pattern. Note that \code{x} and \code{y} are permitted to have length zero, corresponding to an empty point pattern; this is the default if these arguments are missing. A point pattern dataset is assumed to have been observed within a specific region of the plane called the observation window. An object of class \code{"ppp"} representing a point pattern contains information specifying the observation window. This window must always be specified when creating a point pattern dataset; there is intentionally no default action of ``guessing'' the window dimensions from the data points alone. You can specify the observation window in several (mutually exclusive) ways: \itemize{ \item \code{xrange, yrange} specify a rectangle with these dimensions; \item \code{poly} specifies a polygonal boundary. If the boundary is a single polygon then \code{poly} must be a list with components \code{x,y} giving the coordinates of the vertices. If the boundary consists of several disjoint polygons then \code{poly} must be a list of such lists so that \code{poly[[i]]$x} gives the \eqn{x} coordinates of the vertices of the \eqn{i}th boundary polygon. \item \code{mask} specifies a binary pixel image with entries that are \code{TRUE} if the corresponding pixel is inside the window. \item \code{window} is an object of class \code{"owin"} specifying the window. A window object can be created by \code{\link{owin}} from raw coordinate data. Special shapes of windows can be created by the functions \code{\link{square}}, \code{\link{hexagon}}, \code{\link{regularpolygon}}, \code{\link{disc}} and \code{\link{ellipse}}. See the Examples. } The arguments \code{xrange, yrange} or \code{poly} or \code{mask} are passed to the window creator function \code{\link{owin}} for interpretation. See \code{\link{owin}} for further details. The argument \code{window}, if given, must be an object of class \code{"owin"}. It is a full description of the window geometry, and could have been obtained from \code{\link{owin}} or \code{\link{as.owin}}, or by just extracting the observation window of another point pattern, or by manipulating such windows. See \code{\link{owin}} or the Examples below. The points with coordinates \code{x} and \code{y} \bold{must} lie inside the specified window, in order to define a valid object of this class. Any points which do not lie inside the window will be removed from the point pattern, and a warning will be issued. See the section on Rejected Points. The name of the unit of length for the \code{x} and \code{y} coordinates can be specified in the dataset, using the argument \code{unitname}, which is passed to \code{\link{owin}}. See the examples below, or the help file for \code{\link{owin}}. The optional argument \code{marks} is given if the point pattern is marked, i.e. if each data point carries additional information. For example, points which are classified into two or more different types, or colours, may be regarded as having a mark which identifies which colour they are. Data recording the locations and heights of trees in a forest can be regarded as a marked point pattern where the mark is the tree height. The argument \code{marks} can be either \itemize{ \item a vector, of the same length as \code{x} and \code{y}, which is interpreted so that \code{marks[i]} is the mark attached to the point \code{(x[i],y[i])}. If the mark is a real number then \code{marks} should be a numeric vector, while if the mark takes only a finite number of possible values (e.g. colours or types) then \code{marks} should be a \code{factor}. \item a data frame, with the number of rows equal to the number of points in the point pattern. The \code{i}th row of the data frame is interpreted as containing the mark values for the \code{i}th point in the point pattern. The columns of the data frame correspond to different mark variables (e.g. tree species and tree diameter). } If \code{drop=TRUE} (the default), then a data frame with only one column will be converted to a vector, and a data frame with no columns will be converted to \code{NULL}. See \code{\link{ppp.object}} for a description of the class \code{"ppp"}. Users would normally invoke \code{ppp} to create a point pattern, but the functions \code{\link{as.ppp}} and \code{scanpp} may sometimes be convenient. } \section{Invalid coordinate values}{ The coordinate vectors \code{x} and \code{y} must contain only finite numerical values. If the coordinates include any of the values \code{NA}, \code{NaN}, \code{Inf} or \code{-Inf}, these will be removed. } \section{Rejected points}{ The points with coordinates \code{x} and \code{y} \bold{must} lie inside the specified window, in order to define a valid object of class \code{"ppp"}. Any points which do not lie inside the window will be removed from the point pattern, and a warning will be issued. The rejected points are still accessible: they are stored as an attribute of the point pattern called \code{"rejects"} (which is an object of class \code{"ppp"} containing the rejected points in a large window). However, rejected points in a point pattern will be ignored by all other functions except \code{\link{plot.ppp}}. To remove the rejected points altogether, use \code{\link{as.ppp}}. To include the rejected points, you will need to find a larger window that contains them, and use this larger window in a call to \code{ppp}. } \section{Warnings}{ The code will check for problems with the data, and issue a warning if any problems are found. The checks and warnings can be switched off, for efficiency's sake, but this should only be done if you are confident that the data do not have these problems. Setting \code{check=FALSE} will disable all the checking procedures: the check for points outside the window, and the check for duplicated points. This is extremely dangerous, because points lying outside the window will break many of the procedures in \pkg{spatstat}, causing crashes and strange errors. Set \code{check=FALSE} only if you are absolutely sure that there are no points outside the window. If duplicated points are found, a warning is issued, but no action is taken. Duplicated points are not illegal, but may cause unexpected problems later. Setting \code{checkdup=FALSE} will disable the check for duplicated points. Do this only if you already know the answer. Methodology and software for spatial point patterns often assume that all points are distinct so that there are no duplicated points. If duplicated points are present, the consequence could be an incorrect result or a software crash. To the best of our knowledge, all \pkg{spatstat} code handles duplicated points correctly. However, if duplicated points are present, we advise using \code{\link{unique.ppp}} or \code{\link{multiplicity.ppp}} to eliminate duplicated points and re-analyse the data. } \seealso{ \code{\link{ppp.object}}, \code{\link{as.ppp}}, \code{\link{owin.object}}, \code{\link{owin}}, \code{\link{as.owin}} } \examples{ # some arbitrary coordinates in [0,1] x <- runif(20) y <- runif(20) # the following are equivalent X <- ppp(x, y, c(0,1), c(0,1)) X <- ppp(x, y) X <- ppp(x, y, window=owin(c(0,1),c(0,1))) # specify that the coordinates are given in metres X <- ppp(x, y, c(0,1), c(0,1), unitname=c("metre","metres")) \dontrun{plot(X)} # marks m <- sample(1:2, 20, replace=TRUE) m <- factor(m, levels=1:2) X <- ppp(x, y, c(0,1), c(0,1), marks=m) \dontrun{plot(X)} # polygonal window X <- ppp(x, y, poly=list(x=c(0,10,0), y=c(0,0,10))) \dontrun{plot(X)} # circular window of radius 2 X <- ppp(x, y, window=disc(2)) # copy the window from another pattern data(cells) X <- ppp(x, y, window=Window(cells)) } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat/man/density.lpp.Rd0000644000176200001440000001024713160710571015435 0ustar liggesusers\name{density.lpp} \alias{density.lpp} \alias{density.splitppx} \title{ Kernel Estimate of Intensity on a Linear Network } \description{ Estimates the intensity of a point process on a linear network by applying kernel smoothing to the point pattern data. } \usage{ \method{density}{lpp}(x, sigma, \dots, weights=NULL, kernel="gaussian", continuous=TRUE, epsilon = 1e-06, verbose = TRUE, debug = FALSE, savehistory = TRUE, old=FALSE) \method{density}{splitppx}(x, sigma, \dots) } \arguments{ \item{x}{ Point pattern on a linear network (object of class \code{"lpp"}) to be smoothed. } \item{sigma}{ Smoothing bandwidth (standard deviation of the kernel) in the same units as the spatial coordinates of \code{x}. } \item{\dots}{ Arguments passed to \code{\link{as.mask}} determining the resolution of the result. } \item{weights}{ Optional. Numeric vector of weights associated with the points of \code{x}. Weights may be positive, negative or zero. } \item{kernel}{ Character string specifying the smoothing kernel. See \code{\link{dkernel}} for possible options. } \item{continuous}{ Logical value indicating whether to compute the \dQuote{equal-split continuous} smoother (\code{continuous=TRUE}, the default) or the \dQuote{equal-split discontinuous} smoother (\code{continuous=FALSE}). } \item{epsilon}{ Tolerance value. A tail of the kernel with total mass less than \code{epsilon} may be deleted. } \item{verbose}{ Logical value indicating whether to print progress reports. } \item{debug}{ Logical value indicating whether to print debugging information. } \item{savehistory}{ Logical value indicating whether to save the entire history of the algorithm, for the purposes of evaluating performance. } \item{old}{ Logical value indicating whether to use the old, very slow algorithm for the equal-split continuous estimator. } } \details{ Kernel smoothing is applied to the points of \code{x} using one of the rules described in Okabe and Sugihara (2012) and McSwiggan et al (2016). The result is a pixel image on the linear network (class \code{"linim"}) which can be plotted. If \code{continuous=TRUE} (the default), smoothing is performed using the \dQuote{equal-split continuous} rule described in Section 9.2.3 of Okabe and Sugihara (2012). The resulting function is continuous on the linear network. If \code{continuous=FALSE}, smoothing is performed using the \dQuote{equal-split discontinuous} rule described in Section 9.2.2 of Okabe and Sugihara (2012). The resulting function is not continuous. In the default case (where \code{continuous=TRUE} and \code{kernel="gaussian"} and \code{old=FALSE}), computation is performed rapidly by solving the classical heat equation on the network, as described in McSwiggan et al (2016). Computational time is short, but increases quadratically with \code{sigma}. The arguments \code{epsilon,debug,verbose,savehistory} are ignored. In all other cases, computation is performed by path-tracing as described in Okabe and Sugihara (2012); computation can be extremely slow, and time increases exponentially with \code{sigma}. There is also a method for split point patterns on a linear network (class \code{"splitppx"}) which will return a list of pixel images. } \value{ A pixel image on the linear network (object of class \code{"linim"}). } \references{ McSwiggan, G., Baddeley, A. and Nair, G. (2016) Kernel density estimation on a linear network. \emph{Scandinavian Journal of Statistics}, In press. Okabe, A. and Sugihara, K. (2012) \emph{Spatial analysis along networks}. Wiley. } \author{ \adrian and Greg McSwiggan. } \seealso{ \code{\link{lpp}}, \code{\link{linim}} } \examples{ X <- runiflpp(3, simplenet) D <- density(X, 0.2, verbose=FALSE) plot(D, style="w", main="", adjust=2) Dw <- density(X, 0.2, weights=c(1,2,-1), verbose=FALSE) De <- density(X, 0.2, kernel="epanechnikov", verbose=FALSE) Ded <- density(X, 0.2, kernel="epanechnikov", continuous=FALSE, verbose=FALSE) } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat/man/rmh.ppm.Rd0000644000176200001440000002116113160710621014536 0ustar liggesusers\name{rmh.ppm} \alias{rmh.ppm} \title{Simulate from a Fitted Point Process Model} \description{ Given a point process model fitted to data, generate a random simulation of the model, using the Metropolis-Hastings algorithm. } \usage{ \method{rmh}{ppm}(model, start=NULL, control=default.rmhcontrol(model, w=w), \dots, w = NULL, project=TRUE, nsim=1, drop=TRUE, saveinfo=TRUE, verbose=TRUE, new.coef=NULL) } \arguments{ \item{model}{A fitted point process model (object of class \code{"ppm"}, see \code{\link{ppm.object}}) which it is desired to simulate. This fitted model is usually the result of a call to \code{\link{ppm}}. See \bold{Details} below. } \item{start}{Data determining the initial state of the Metropolis-Hastings algorithm. See \code{\link{rmhstart}} for description of these arguments. Defaults to \code{list(x.start=data.ppm(model))} } \item{control}{Data controlling the iterative behaviour of the Metropolis-Hastings algorithm. See \code{\link{rmhcontrol}} for description of these arguments. } \item{\dots}{ Further arguments passed to \code{\link{rmhcontrol}}, or to \code{\link{rmh.default}}, or to covariate functions in the model. } \item{w}{ Optional. Window in which the simulations should be generated. Default is the window of the original data. } \item{project}{ Logical flag indicating what to do if the fitted model is invalid (in the sense that the values of the fitted coefficients do not specify a valid point process). If \code{project=TRUE} the closest valid model will be simulated; if \code{project=FALSE} an error will occur. } \item{nsim}{ Number of simulated point patterns that should be generated. } \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a single point pattern. } \item{saveinfo}{ Logical value indicating whether to save auxiliary information. } \item{verbose}{ Logical flag indicating whether to print progress reports. } \item{new.coef}{ New values for the canonical parameters of the model. A numeric vector of the same length as \code{coef(model)}. } } \value{ A point pattern (an object of class \code{"ppp"}; see \code{\link{ppp.object}}) or a list of point patterns. } \details{ This function generates simulated realisations from a point process model that has been fitted to point pattern data. It is a method for the generic function \code{\link{rmh}} for the class \code{"ppm"} of fitted point process models. To simulate other kinds of point process models, see \code{\link{rmh}} or \code{\link{rmh.default}}. The argument \code{model} describes the fitted model. It must be an object of class \code{"ppm"} (see \code{\link{ppm.object}}), and will typically be the result of a call to the point process model fitting function \code{\link{ppm}}. The current implementation enables simulation from any fitted model involving the interactions \code{\link{AreaInter}}, \code{\link{DiggleGratton}}, \code{\link{DiggleGatesStibbard}}, \code{\link{Geyer}}, \code{\link{Hardcore}}, \code{\link{MultiStrauss}}, \code{\link{MultiStraussHard}}, \code{\link{PairPiece}}, \code{\link{Poisson}}, \code{\link{Strauss}}, \code{\link{StraussHard}} and \code{\link{Softcore}}, including nonstationary models. See the examples. It is also possible to simulate \emph{hybrids} of several such models. See \code{\link{Hybrid}} and the examples. It is possible that the fitted coefficients of a point process model may be ``illegal'', i.e. that there may not exist a mathematically well-defined point process with the given parameter values. For example, a Strauss process with interaction parameter \eqn{\gamma > 1}{gamma > 1} does not exist, but the model-fitting procedure used in \code{\link{ppm}} will sometimes produce values of \eqn{\gamma}{gamma} greater than 1. In such cases, if \code{project=FALSE} then an error will occur, while if \code{project=TRUE} then \code{rmh.ppm} will find the nearest legal model and simulate this model instead. (The nearest legal model is obtained by projecting the vector of coefficients onto the set of valid coefficient vectors. The result is usually the Poisson process with the same fitted intensity.) The arguments \code{start} and \code{control} are lists of parameters determining the initial state and the iterative behaviour, respectively, of the Metropolis-Hastings algorithm. The argument \code{start} is passed directly to \code{\link{rmhstart}}. See \code{\link{rmhstart}} for details of the parameters of the initial state, and their default values. The argument \code{control} is first passed to \code{\link{rmhcontrol}}. Then if any additional arguments \code{\dots} are given, \code{\link{update.rmhcontrol}} is called to update the parameter values. See \code{\link{rmhcontrol}} for details of the iterative behaviour parameters, and \code{\link{default.rmhcontrol}} for their default values. Note that if you specify expansion of the simulation window using the parameter \code{expand} (so that the model will be simulated on a window larger than the original data window) then the model must be capable of extrapolation to this larger window. This is usually not possible for models which depend on external covariates, because the domain of a covariate image is usually the same as the domain of the fitted model. After extracting the relevant information from the fitted model object \code{model}, \code{rmh.ppm} invokes the default \code{rmh} algorithm \code{\link{rmh.default}}, unless the model is Poisson. If the model is Poisson then the Metropolis-Hastings algorithm is not needed, and the model is simulated directly, using one of \code{\link{rpoispp}}, \code{\link{rmpoispp}}, \code{\link{rpoint}} or \code{\link{rmpoint}}. See \code{\link{rmh.default}} for further information about the implementation, or about the Metropolis-Hastings algorithm. } \section{Warnings}{ See Warnings in \code{\link{rmh.default}}. } \seealso{ \code{\link{simulate.ppm}}, \code{\link{rmh}}, \code{\link{rmhmodel}}, \code{\link{rmhcontrol}}, \code{\link{default.rmhcontrol}}, \code{\link{update.rmhcontrol}}, \code{\link{rmhstart}}, \code{\link{rmh.default}}, \code{\link{ppp.object}}, \code{\link{ppm}}, Interactions: \code{\link{AreaInter}}, \code{\link{DiggleGratton}}, \code{\link{DiggleGatesStibbard}}, \code{\link{Geyer}}, \code{\link{Hardcore}}, \code{\link{Hybrid}}, \code{\link{MultiStrauss}}, \code{\link{MultiStraussHard}}, \code{\link{PairPiece}}, \code{\link{Poisson}}, \code{\link{Strauss}}, \code{\link{StraussHard}}, \code{\link{Softcore}} } \examples{ live <- interactive() op <- spatstat.options() spatstat.options(rmh.nrep=1e5) Nrep <- 1e5 X <- swedishpines if(live) plot(X, main="Swedish Pines data") # Poisson process fit <- ppm(X, ~1, Poisson()) Xsim <- rmh(fit) if(live) plot(Xsim, main="simulation from fitted Poisson model") # Strauss process fit <- ppm(X, ~1, Strauss(r=7)) Xsim <- rmh(fit) if(live) plot(Xsim, main="simulation from fitted Strauss model") \dontrun{ # Strauss process simulated on a larger window # then clipped to original window Xsim <- rmh(fit, control=list(nrep=Nrep, expand=1.1, periodic=TRUE)) Xsim <- rmh(fit, nrep=Nrep, expand=2, periodic=TRUE) } \dontrun{ X <- rSSI(0.05, 100) # piecewise-constant pairwise interaction function fit <- ppm(X, ~1, PairPiece(seq(0.02, 0.1, by=0.01))) Xsim <- rmh(fit) } # marked point pattern Y <- amacrine \dontrun{ # marked Poisson models fit <- ppm(Y) fit <- ppm(Y,~marks) fit <- ppm(Y,~polynom(x,2)) fit <- ppm(Y,~marks+polynom(x,2)) fit <- ppm(Y,~marks*polynom(x,y,2)) Ysim <- rmh(fit) } # multitype Strauss models MS <- MultiStrauss(radii=matrix(0.07, ncol=2, nrow=2), types = levels(Y$marks)) \dontrun{ fit <- ppm(Y ~marks, MS) Ysim <- rmh(fit) } fit <- ppm(Y ~ marks*polynom(x,y,2), MS) Ysim <- rmh(fit) if(live) plot(Ysim, main="simulation from fitted inhomogeneous Multitype Strauss") spatstat.options(op) \dontrun{ # Hybrid model fit <- ppm(redwood, ~1, Hybrid(A=Strauss(0.02), B=Geyer(0.1, 2))) Y <- rmh(fit) } } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} \keyword{datagen} spatstat/man/nncross.Rd0000644000176200001440000001602213160710621014642 0ustar liggesusers\name{nncross} \alias{nncross} \alias{nncross.ppp} \alias{nncross.default} \title{Nearest Neighbours Between Two Patterns} \description{ Given two point patterns \code{X} and \code{Y}, finds the nearest neighbour in \code{Y} of each point of \code{X}. Alternatively \code{Y} may be a line segment pattern. } \usage{ nncross(X, Y, \dots) \method{nncross}{ppp}(X, Y, iX=NULL, iY=NULL, what = c("dist", "which"), \dots, k = 1, sortby=c("range", "var", "x", "y"), is.sorted.X = FALSE, is.sorted.Y = FALSE) \method{nncross}{default}(X, Y, \dots) } \arguments{ \item{X}{Point pattern (object of class \code{"ppp"}).} \item{Y}{Either a point pattern (object of class \code{"ppp"}) or a line segment pattern (object of class \code{"psp"}).} \item{iX, iY}{Optional identifiers, applicable only in the case where \code{Y} is a point pattern, used to determine whether a point in \code{X} is identical to a point in \code{Y}. See Details. } \item{what}{ Character string specifying what information should be returned. Either the nearest neighbour distance (\code{"dist"}), the identifier of the nearest neighbour (\code{"which"}), or both. } \item{k}{ Integer, or integer vector. The algorithm will compute the distance to the \code{k}th nearest neighbour. } \item{sortby}{ Determines which coordinate to use to sort the point patterns. See Details. } \item{is.sorted.X, is.sorted.Y}{ Logical values attesting whether the point patterns \code{X} and \code{Y} have been sorted. See Details. } \item{\dots}{Ignored.} } \details{ Given two point patterns \code{X} and \code{Y} this function finds, for each point of \code{X}, the nearest point of \code{Y}. The distance between these points is also computed. If the argument \code{k} is specified, then the \code{k}-th nearest neighbours will be found. Alternatively if \code{X} is a point pattern and \code{Y} is a line segment pattern, the function finds the nearest line segment to each point of \code{X}, and computes the distance. The return value is a data frame, with rows corresponding to the points of \code{X}. The first column gives the nearest neighbour distances (i.e. the \code{i}th entry is the distance from the \code{i}th point of \code{X} to the nearest element of \code{Y}). The second column gives the indices of the nearest neighbours (i.e.\ the \code{i}th entry is the index of the nearest element in \code{Y}.) If \code{what="dist"} then only the vector of distances is returned. If \code{what="which"} then only the vector of indices is returned. The argument \code{k} may be an integer or an integer vector. If it is a single integer, then the \code{k}-th nearest neighbours are computed. If it is a vector, then the \code{k[i]}-th nearest neighbours are computed for each entry \code{k[i]}. For example, setting \code{k=1:3} will compute the nearest, second-nearest and third-nearest neighbours. The result is a data frame. Note that this function is not symmetric in \code{X} and \code{Y}. To find the nearest neighbour in \code{X} of each point in \code{Y}, where \code{Y} is a point pattern, use \code{nncross(Y,X)}. The arguments \code{iX} and \code{iY} are used when the two point patterns \code{X} and \code{Y} have some points in common. In this situation \code{nncross(X, Y)} would return some zero distances. To avoid this, attach a unique integer identifier to each point, such that two points are identical if their identifying numbers are equal. Let \code{iX} be the vector of identifier values for the points in \code{X}, and \code{iY} the vector of identifiers for points in \code{Y}. Then the code will only compare two points if they have different values of the identifier. See the Examples. } \section{Sorting data and pre-sorted data}{ Read this section if you care about the speed of computation. For efficiency, the algorithm sorts the point patterns \code{X} and \code{Y} into increasing order of the \eqn{x} coordinate or increasing order of the the \eqn{y} coordinate. Sorting is only an intermediate step; it does not affect the output, which is always given in the same order as the original data. By default (if \code{sortby="range"}), the sorting will occur on the coordinate that has the larger range of values (according to the frame of the enclosing window of \code{Y}). If \code{sortby = "var"}), sorting will occur on the coordinate that has the greater variance (in the pattern \code{Y}). Setting \code{sortby="x"} or \code{sortby = "y"} will specify that sorting should occur on the \eqn{x} or \eqn{y} coordinate, respectively. If the point pattern \code{X} is already sorted, then the corresponding argument \code{is.sorted.X} should be set to \code{TRUE}, and \code{sortby} should be set equal to \code{"x"} or \code{"y"} to indicate which coordinate is sorted. Similarly if \code{Y} is already sorted, then \code{is.sorted.Y} should be set to \code{TRUE}, and \code{sortby} should be set equal to \code{"x"} or \code{"y"} to indicate which coordinate is sorted. If both \code{X} and \code{Y} are sorted \emph{on the same coordinate axis} then both \code{is.sorted.X} and \code{is.sorted.Y} should be set to \code{TRUE}, and \code{sortby} should be set equal to \code{"x"} or \code{"y"} to indicate which coordinate is sorted. } \value{ A data frame, or a vector if the data frame would contain only one column. By default (if \code{what=c("dist", "which")} and \code{k=1}) a data frame with two columns: \item{dist}{Nearest neighbour distance} \item{which}{Nearest neighbour index in \code{Y}} If \code{what="dist"} and \code{k=1}, a vector of nearest neighbour distances. If \code{what="which"} and \code{k=1}, a vector of nearest neighbour indices. If \code{k} is specified, the result is a data frame with columns containing the \code{k}-th nearest neighbour distances and/or nearest neighbour indices. } \seealso{ \code{\link{nndist}} for nearest neighbour distances in a single point pattern. } \examples{ # two different point patterns X <- runifpoint(15) Y <- runifpoint(20) N <- nncross(X,Y)$which # note that length(N) = 15 plot(superimpose(X=X,Y=Y), main="nncross", cols=c("red","blue")) arrows(X$x, X$y, Y[N]$x, Y[N]$y, length=0.15) # third-nearest neighbour NXY <- nncross(X, Y, k=3) NXY[1:3,] # second and third nearest neighbours NXY <- nncross(X, Y, k=2:3) NXY[1:3,] # two patterns with some points in common Z <- runifpoint(50) X <- Z[1:30] Y <- Z[20:50] iX <- 1:30 iY <- 20:50 N <- nncross(X,Y, iX, iY)$which N <- nncross(X,Y, iX, iY, what="which") #faster plot(superimpose(X=X, Y=Y), main="nncross", cols=c("red","blue")) arrows(X$x, X$y, Y[N]$x, Y[N]$y, length=0.15) # point pattern and line segment pattern X <- runifpoint(15) Y <- rpoisline(10) N <- nncross(X,Y) } \author{ \adrian , \rolf , and Jens Oehlschlaegel } \keyword{spatial} \keyword{math} spatstat/man/as.rectangle.Rd0000644000176200001440000000335413160710571015533 0ustar liggesusers\name{as.rectangle} \alias{as.rectangle} \title{Window Frame} \description{ Extract the window frame of a window or other spatial dataset } \usage{ as.rectangle(w, \dots) } \arguments{ \item{w}{A window, or a dataset that has a window. Either a window (object of class \code{"owin"}), a pixel image (object of class \code{"im"}) or other data determining such a window. } \item{\dots}{ Optional. Auxiliary data to help determine the window. If \code{w} does not belong to a recognised class, the arguments \code{w} and \code{\dots} are passed to \code{\link{as.owin}} to determine the window. } } \value{ A window (object of class \code{"owin"}) of type \code{"rectangle"} representing a rectangle. } \details{ This function is the quickest way to determine a bounding rectangle for a spatial dataset. If \code{w} is a window, the function just extracts the outer bounding rectangle of \code{w} as given by its elements \code{xrange,yrange}. The function can also be applied to any spatial dataset that has a window: for example, a point pattern (object of class \code{"ppp"}) or a line segment pattern (object of class \code{"psp"}). The bounding rectangle of the window of the dataset is extracted. Use the function \code{\link{boundingbox}} to compute the \emph{smallest} bounding rectangle of a dataset. } \seealso{ \code{\link{owin}}, \code{\link{as.owin}}, \code{\link{boundingbox}} } \examples{ w <- owin(c(0,10),c(0,10), poly=list(x=c(1,2,3,2,1), y=c(2,3,4,6,7))) r <- as.rectangle(w) # returns a 10 x 10 rectangle data(lansing) as.rectangle(lansing) data(copper) as.rectangle(copper$SouthLines) } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/timeTaken.Rd0000644000176200001440000000213413160710621015075 0ustar liggesusers\name{timeTaken} \alias{timeTaken} \title{ Extract the Total Computation Time } \description{ Given an object or objects that contain timing information (reporting the amount of computer time taken to compute each object), this function extracts the timing data and evaluates the total time taken. } \usage{ timeTaken(..., warn=TRUE) } \arguments{ \item{\dots}{ One or more objects of class \code{"timed"} containing timing data. } \item{warn}{ Logical value indicating whether a warning should be issued if some of the arguments do not contain timing information. } } \details{ An object of class \code{"timed"} contains information on the amount of computer time that was taken to compute the object. See \code{\link{timed}}. This function extracts the timing information from one or more such objects, and calculates the total time. } \value{ An object inheriting the class \code{"timed"}. } \examples{ A <- timed(Kest(cells)) B <- timed(Gest(cells)) A B timeTaken(A,B) } \seealso{ \code{\link{timed}} } \author{ \spatstatAuthors. } \keyword{utilities} spatstat/man/rpoisppx.Rd0000644000176200001440000000307313160710621015043 0ustar liggesusers\name{rpoisppx} \alias{rpoisppx} \title{ Generate Poisson Point Pattern in Any Dimensions } \description{ Generate a random multi-dimensional point pattern using the homogeneous Poisson process. } \usage{ rpoisppx(lambda, domain, nsim=1, drop=TRUE) } \arguments{ \item{lambda}{ Intensity of the Poisson process. A single positive number. } \item{domain}{ Multi-dimensional box in which the process should be generated. An object of class \code{"boxx"}. } \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a single point pattern. } } \value{ If \code{nsim = 1} and \code{drop=TRUE}, a point pattern (an object of class \code{"ppx"}). If \code{nsim > 1} or \code{drop=FALSE}, a list of such point patterns. } \details{ This function generates a realisation of the homogeneous Poisson process in multi dimensions, with intensity \code{lambda} (points per unit volume). The realisation is generated inside the multi-dimensional region \code{domain} which currently must be a rectangular box (object of class \code{"boxx"}). } \note{ The intensity \code{lambda} is the expected number of points \emph{per unit volume}. } \seealso{ \code{\link{runifpointx}}, \code{\link{ppx}}, \code{\link{boxx}} } \examples{ w <- boxx(x=c(0,1), y=c(0,1), z=c(0,1), t=c(0,3)) X <- rpoisppx(10, w) } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat/man/crossdist.default.Rd0000644000176200001440000000525013160710571016622 0ustar liggesusers\name{crossdist.default} \alias{crossdist.default} \title{Pairwise distances between two different sets of points} \description{ Computes the distances between each pair of points taken from two different sets of points. } \usage{ \method{crossdist}{default}(X, Y, x2, y2, \dots, period=NULL, method="C", squared=FALSE) } \arguments{ \item{X,Y}{ Numeric vectors of equal length specifying the coordinates of the first set of points. } \item{x2,y2}{ Numeric vectors of equal length specifying the coordinates of the second set of points. } \item{\dots}{ Ignored. } \item{period}{ Optional. Dimensions for periodic edge correction. } \item{method}{ String specifying which method of calculation to use. Values are \code{"C"} and \code{"interpreted"}. } \item{squared}{ Logical. If \code{squared=TRUE}, the squared distances are returned instead (this computation is faster). } } \value{ A matrix whose \code{[i,j]} entry is the distance from the \code{i}-th point in the first set of points to the \code{j}-th point in the second set of points. } \details{ Given two sets of points, this function computes the Euclidean distance from each point in the first set to each point in the second set, and returns a matrix containing these distances. This is a method for the generic function \code{\link{crossdist}}. This function expects \code{X} and \code{Y} to be numeric vectors of equal length specifying the coordinates of the first set of points. The arguments \code{x2},\code{y2} specify the coordinates of the second set of points. Alternatively if \code{period} is given, then the distances will be computed in the `periodic' sense (also known as `torus' distance). The points will be treated as if they are in a rectangle of width \code{period[1]} and height \code{period[2]}. Opposite edges of the rectangle are regarded as equivalent. The argument \code{method} is not normally used. It is retained only for checking the validity of the software. If \code{method = "interpreted"} then the distances are computed using interpreted R code only. If \code{method="C"} (the default) then C code is used. The C code is faster by a factor of 4. } \seealso{ \code{\link{crossdist}}, \code{\link{crossdist.ppp}}, \code{\link{crossdist.psp}}, \code{\link{pairdist}}, \code{\link{nndist}}, \code{\link{Gest}} } \examples{ d <- crossdist(runif(7), runif(7), runif(12), runif(12)) d <- crossdist(runif(7), runif(7), runif(12), runif(12), period=c(1,1)) } \author{Pavel Grabarnik \email{pavel.grabar@issp.serpukhov.su} and \adrian } \keyword{spatial} \keyword{math} spatstat/man/rcellnumber.Rd0000644000176200001440000000312713160710621015471 0ustar liggesusers\name{rcellnumber} \alias{rcellnumber} \title{ Generate Random Numbers of Points for Cell Process } \description{ Generates random integers for the Baddeley-Silverman counterexample. } \usage{ rcellnumber(n, N = 10, mu=1) } \arguments{ \item{n}{ Number of random integers to be generated. } \item{N}{ Distributional parameter: the largest possible value (when \code{mu <= 1}). An integer greater than 1. } \item{mu}{ Mean of the distribution (equals the variance). Any positive real number. } } \details{ If \code{mu = 1} (the default), this function generates random integers which have mean and variance equal to 1, but which do not have a Poisson distribution. The random integers take the values \eqn{0}, \eqn{1} and \eqn{N} with probabilities \eqn{1/N}, \eqn{(N-2)/(N-1)} and \eqn{1/(N(N-1))} respectively. See Baddeley and Silverman (1984). If \code{mu} is another positive number, the random integers will have mean and variance equal to \code{mu}. They are obtained by generating the one-dimensional counterpart of the cell process and counting the number of points in the interval from \code{0} to \code{mu}. The maximum possible value of each random integer is \code{N * ceiling(mu)}. } \value{ An integer vector of length \code{n}. } \references{ Baddeley, A.J. and Silverman, B.W. (1984) A cautionary example on the use of second-order methods for analyzing point patterns. \emph{Biometrics} \bold{40}, 1089-1094. } \author{ \spatstatAuthors. } \seealso{ \code{\link{rcell}} } \examples{ rcellnumber(30, 3) } \keyword{datagen} spatstat/man/summary.quad.Rd0000644000176200001440000000312713160710621015605 0ustar liggesusers\name{summary.quad} \alias{summary.quad} \alias{print.summary.quad} \title{Summarizing a Quadrature Scheme} \description{ \code{summary} method for class \code{"quad"}. } \usage{ \method{summary}{quad}(object, \dots, checkdup=FALSE) \method{print}{summary.quad}(x, \dots, dp=3) } \arguments{ \item{object}{A quadrature scheme.} \item{\dots}{Ignored.} \item{checkdup}{ Logical value indicating whether to test for duplicated points. } \item{dp}{Number of significant digits to print.} \item{x}{Object of class \code{"summary.quad"} returned by \code{summary.quad}.} } \details{ This is a method for the generic \code{\link{summary}} for the class \code{"quad"}. An object of class \code{"quad"} describes a quadrature scheme, used to fit a point process model. See \code{\link{quad.object}}) for details of this class. \code{summary.quad} extracts information about the quadrature scheme, and \code{print.summary.quad} prints this information in a comprehensible format. In normal usage, \code{print.summary.quad} is invoked implicitly when the user calls \code{summary.quad} without assigning its value to anything. See the examples. } \value{ \code{summary.quad} returns an object of class \code{"summary.quad"}, while \code{print.summary.quad} returns \code{NULL}. } \examples{ # make a quadrature scheme Q <- quadscheme(rpoispp(42)) # summarize it summary(Q) # save the summary s <- summary(Q) # print it print(s) s # extract total quadrature weight s$w$all$sum } \author{\adrian and \rolf } \keyword{spatial} \keyword{methods} spatstat/man/Extract.ssf.Rd0000644000176200001440000000145613160710621015366 0ustar liggesusers\name{[.ssf} \alias{[.ssf} \title{ Subset of spatially sampled function } \description{ Extract a subset of the data for a spatially sampled function. } \usage{ \method{[}{ssf}(x, i, j, ..., drop) } \arguments{ \item{x}{ Object of class \code{"ssf"}. } \item{i}{ Subset index applying to the locations where the function is sampled. } \item{j}{ Subset index applying to the columns (variables) measured at each location. } \item{\dots, drop}{ Ignored. } } \details{ This is the subset operator for the class \code{"ssf"}. } \value{ Another object of class \code{"ssf"}. } \author{ \adrian. } \seealso{ \code{\link{ssf}}, \code{\link{with.ssf}} } \examples{ f <- ssf(cells, data.frame(d=nndist(cells), i=1:42)) f f[1:10,] f[ ,1] } \keyword{spatial} \keyword{manip} spatstat/man/harmonise.Rd0000644000176200001440000000261013160710621015140 0ustar liggesusers\name{harmonise} \alias{harmonise} \alias{harmonize} \title{Make Objects Compatible} \description{ Converts several objects of the same class to a common format so that they can be combined or compared. } \usage{ harmonise(\dots) harmonize(\dots) } \arguments{ \item{\dots}{ Any number of objects of the same class. } } \details{ This generic command takes any number of objects of the same class, and \emph{attempts} to make them compatible in the sense of \code{\link{compatible}} so that they can be combined or compared. There are methods for the classes \code{"fv"} (\code{\link{harmonise.fv}}) and \code{"im"} (\code{\link{harmonise.im}}). All arguments \code{\dots} must be objects of the same class. The result will be a list, of length equal to the number of arguments \code{\dots}, containing new versions of each of these objects, converted to a common format. If the arguments were named (\code{name=value}) then the return value also carries these names. } \value{ A list, of length equal to the number of arguments \code{\dots}, whose entries are objects of the same class. If the arguments were named (\code{name=value}) then the return value also carries these names. } \author{\adrian , \rolf and \ege. } \seealso{ \code{\link{compatible}}, \code{\link{harmonise.fv}}, \code{\link{harmonise.im}} } \keyword{spatial} \keyword{manip} spatstat/man/edges2vees.Rd0000644000176200001440000000303713160710571015217 0ustar liggesusers\name{edges2vees} \alias{edges2vees} \title{ List Dihedral Triples in a Graph } \description{ Given a list of edges between vertices, compile a list of all \sQuote{vees} or dihedral triples formed by these edges. } \usage{ edges2vees(iedge, jedge, nvert=max(iedge, jedge), \dots, check=TRUE) } \arguments{ \item{iedge,jedge}{ Integer vectors, of equal length, specifying the edges. } \item{nvert}{ Number of vertices in the network. } \item{\dots}{Ignored} \item{check}{Logical. Whether to check validity of input data.} } \details{ Given a finite graph with \code{nvert} vertices and with edges specified by \code{iedge, jedge}, this low-level function finds all \sQuote{vees} or \sQuote{dihedral triples} in the graph, that is, all triples of vertices \code{(i,j,k)} where \code{i} and \code{j} are joined by an edge and \code{i} and \code{k} are joined by an edge. The interpretation of \code{iedge, jedge} is that each successive pair of entries specifies an edge in the graph. The \eqn{k}th edge joins vertex \code{iedge[k]} to vertex \code{jedge[k]}. Entries of \code{iedge} and \code{jedge} must be integers from 1 to \code{nvert}. } \value{ A 3-column matrix of integers, in which each row represents a triple of vertices, with the first vertex joined to the other two vertices. } \seealso{ \code{\link{edges2triangles}} } \author{\adrian and \rolf } \examples{ i <- c(1, 2, 5, 5, 1, 4, 2) j <- c(2, 3, 3, 1, 3, 2, 5) edges2vees(i, j) } \keyword{spatial} \keyword{manip} spatstat/man/sdr.Rd0000644000176200001440000000663513160710621013756 0ustar liggesusers\name{sdr} \alias{sdr} \title{ Sufficient Dimension Reduction } \description{ Given a point pattern and a set of predictors, find a minimal set of new predictors, each constructed as a linear combination of the original predictors. } \usage{ sdr(X, covariates, method = c("DR", "NNIR", "SAVE", "SIR", "TSE"), Dim1 = 1, Dim2 = 1, predict=FALSE) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"}). } \item{covariates}{ A list of pixel images (objects of class \code{"im"}) to serve as predictor variables. } \item{method}{ Character string indicating which method to use. See Details. } \item{Dim1}{ Dimension of the first order Central Intensity Subspace (applicable when \code{method} is \code{"DR"}, \code{"NNIR"}, \code{"SAVE"} or \code{"TSE"}). } \item{Dim2}{ Dimension of the second order Central Intensity Subspace (applicable when \code{method="TSE"}). } \item{predict}{ Logical value indicating whether to compute the new predictors as well. } } \details{ Given a point pattern \eqn{X} and predictor variables \eqn{Z_1, \dots, Z_p}{Z[1], ..., Z[p]}, Sufficient Dimension Reduction methods (Guan and Wang, 2010) attempt to find a minimal set of new predictor variables, each constructed by taking a linear combination of the original predictors, which explain the dependence of \eqn{X} on \eqn{Z_1, \dots, Z_p}{Z[1], ..., Z[p]}. The methods do not assume any particular form of dependence of the point pattern on the predictors. The predictors are assumed to be Gaussian random fields. Available methods are: \tabular{ll}{ \code{method="DR"} \tab directional regression \cr \code{method="NNIR"} \tab nearest neighbour inverse regression \cr \code{method="SAVE"} & sliced average variance estimation \cr \code{method="SIR"} & sliced inverse regression \cr \code{method="TSE"} & two-step estimation \cr } The result includes a matrix \code{B} whose columns are estimates of the basis vectors of the space of new predictors. That is, the \code{j}th column of \code{B} expresses the \code{j}th new predictor as a linear combination of the original predictors. If \code{predict=TRUE}, the new predictors are also evaluated. They can also be evaluated using \code{\link{sdrPredict}}. } \value{ A list with components \code{B, M} or \code{B, M1, M2} where \code{B} is a matrix whose columns are estimates of the basis vectors for the space, and \code{M} or \code{M1,M2} are matrices containing estimates of the kernel. If \code{predict=TRUE}, the result also includes a component \code{Y} which is a list of pixel images giving the values of the new predictors. } \examples{ A <- sdr(bei, bei.extra, predict=TRUE) A Y1 <- A$Y[[1]] plot(Y1) points(bei, pch=".", cex=2) # investigate likely form of dependence plot(rhohat(bei, Y1)) } \seealso{ \code{\link{sdrPredict}} to compute the new predictors from the coefficient matrix. \code{\link{dimhat}} to estimate the subspace dimension. \code{\link{subspaceDistance}} } \references{ Guan, Y. and Wang, H. (2010) Sufficient dimension reduction for spatial point processes directed by Gaussian random fields. \emph{Journal of the Royal Statistical Society, Series B}, \bold{72}, 367--387. } \author{ Matlab original by Yongtao Guan, translated to \R by Suman Rakshit. } \keyword{spatial} \keyword{multivariate} spatstat/man/rho2hat.Rd0000644000176200001440000000730513160710621014530 0ustar liggesusers\name{rho2hat} \alias{rho2hat} \title{ Smoothed Relative Density of Pairs of Covariate Values } \description{ Given a point pattern and two spatial covariates \eqn{Z_1}{Z1} and \eqn{Z_2}{Z2}, construct a smooth estimate of the relative risk of the pair \eqn{(Z_1,Z_2)}{(Z1, Z2)}. } \usage{ rho2hat(object, cov1, cov2, ..., method=c("ratio", "reweight")) } \arguments{ \item{object}{ A point pattern (object of class \code{"ppp"}), a quadrature scheme (object of class \code{"quad"}) or a fitted point process model (object of class \code{"ppm"}). } \item{cov1,cov2}{ The two covariates. Each argument is either a \code{function(x,y)} or a pixel image (object of class \code{"im"}) providing the values of the covariate at any location, or one of the strings \code{"x"} or \code{"y"} signifying the Cartesian coordinates. } \item{\dots}{ Additional arguments passed to \code{\link{density.ppp}} to smooth the scatterplots. } \item{method}{ Character string determining the smoothing method. See Details. } } \details{ This is a bivariate version of \code{\link{rhohat}}. If \code{object} is a point pattern, this command produces a smoothed version of the scatterplot of the values of the covariates \code{cov1} and \code{cov2} observed at the points of the point pattern. The covariates \code{cov1,cov2} must have continuous values. If \code{object} is a fitted point process model, suppose \code{X} is the original data point pattern to which the model was fitted. Then this command assumes \code{X} is a realisation of a Poisson point process with intensity function of the form \deqn{ \lambda(u) = \rho(Z_1(u), Z_2(u)) \kappa(u) }{ lambda(u) = rho(Z1(u), Z2(u)) * kappa(u) } where \eqn{\kappa(u)}{kappa(u)} is the intensity of the fitted model \code{object}, and \eqn{\rho(z_1,z_2)}{rho(z1, z2)} is a function to be estimated. The algorithm computes a smooth estimate of the function \eqn{\rho}{rho}. The \code{method} determines how the density estimates will be combined to obtain an estimate of \eqn{\rho(z_1, z_2)}{rho(z1, z2)}: \itemize{ \item If \code{method="ratio"}, then \eqn{\rho(z_1, z_2)}{rho(z1,z2)} is estimated by the ratio of two density estimates. The numerator is a (rescaled) density estimate obtained by smoothing the points \eqn{(Z_1(y_i), Z_2(y_i))}{(Z1(y[i]), Z2(y[i]))} obtained by evaluating the two covariate \eqn{Z_1, Z_2}{Z1, Z2} at the data points \eqn{y_i}{y[i]}. The denominator is a density estimate of the reference distribution of \eqn{(Z_1,Z_2)}{(Z1, Z2)}. \item If \code{method="reweight"}, then \eqn{\rho(z_1, z_2)}{rho(z1,z2)} is estimated by applying density estimation to the points \eqn{(Z_1(y_i), Z_2(y_i))}{(Z1(y[i]), Z2(y[i]))} obtained by evaluating the two covariate \eqn{Z_1, Z_2}{Z1, Z2} at the data points \eqn{y_i}{y[i]}, with weights inversely proportional to the reference density of \eqn{(Z_1,Z_2)}{(Z1, Z2)}. } } \value{ A pixel image (object of class \code{"im"}). Also belongs to the special class \code{"rho2hat"} which has a plot method. } \references{ Baddeley, A., Chang, Y.-M., Song, Y. and Turner, R. (2012) Nonparametric estimation of the dependence of a point process on spatial covariates. \emph{Statistics and Its Interface} \bold{5} (2), 221--236. } \author{ \adrian } \seealso{ \code{\link{rhohat}}, \code{\link{methods.rho2hat}} } \examples{ data(bei) attach(bei.extra) plot(rho2hat(bei, elev, grad)) fit <- ppm(bei, ~elev, covariates=bei.extra) \dontrun{ plot(rho2hat(fit, elev, grad)) } plot(rho2hat(fit, elev, grad, method="reweight")) } \keyword{spatial} \keyword{models} spatstat/man/G3est.Rd0000644000176200001440000000701213160710571014145 0ustar liggesusers\name{G3est} \Rdversion{1.1} \alias{G3est} \title{ Nearest Neighbour Distance Distribution Function of a Three-Dimensional Point Pattern } \description{ Estimates the nearest-neighbour distance distribution function \eqn{G_3(r)}{G3(r)} from a three-dimensional point pattern. } \usage{ G3est(X, ..., rmax = NULL, nrval = 128, correction = c("rs", "km", "Hanisch")) } \arguments{ \item{X}{ Three-dimensional point pattern (object of class \code{"pp3"}). } \item{\dots}{ Ignored. } \item{rmax}{ Optional. Maximum value of argument \eqn{r} for which \eqn{G_3(r)}{G3(r)} will be estimated. } \item{nrval}{ Optional. Number of values of \eqn{r} for which \eqn{G_3(r)}{G3(r)} will be estimated. A large value of \code{nrval} is required to avoid discretisation effects. } \item{correction}{ Optional. Character vector specifying the edge correction(s) to be applied. See Details. } } \details{ For a stationary point process \eqn{\Phi}{Phi} in three-dimensional space, the nearest-neighbour function is \deqn{ G_3(r) = P(d^\ast(x,\Phi) \le r \mid x \in \Phi) }{ G3(r) = P(d*(x,Phi) <= r | x in Phi) } the cumulative distribution function of the distance \eqn{d^\ast(x,\Phi)}{d*(x,Phi)} from a typical point \eqn{x} in \eqn{\Phi}{Phi} to its nearest neighbour, i.e. to the nearest \emph{other} point of \eqn{\Phi}{Phi}. The three-dimensional point pattern \code{X} is assumed to be a partial realisation of a stationary point process \eqn{\Phi}{Phi}. The nearest neighbour function of \eqn{\Phi}{Phi} can then be estimated using techniques described in the References. For each data point, the distance to the nearest neighbour is computed. The empirical cumulative distribution function of these values, with appropriate edge corrections, is the estimate of \eqn{G_3(r)}{G3(r)}. The available edge corrections are: \describe{ \item{\code{"rs"}:}{ the reduced sample (aka minus sampling, border correction) estimator (Baddeley et al, 1993) } \item{\code{"km"}:}{ the three-dimensional version of the Kaplan-Meier estimator (Baddeley and Gill, 1997) } \item{\code{"Hanisch"}:}{ the three-dimensional generalisation of the Hanisch estimator (Hanisch, 1984). } } Alternatively \code{correction="all"} selects all options. } \value{ A function value table (object of class \code{"fv"}) that can be plotted, printed or coerced to a data frame containing the function values. } \references{ Baddeley, A.J, Moyeed, R.A., Howard, C.V. and Boyde, A. (1993) Analysis of a three-dimensional point pattern with replication. \emph{Applied Statistics} \bold{42}, 641--668. Baddeley, A.J. and Gill, R.D. (1997) Kaplan-Meier estimators of interpoint distance distributions for spatial point processes. \emph{Annals of Statistics} \bold{25}, 263--292. Hanisch, K.-H. (1984) Some remarks on estimators of the distribution function of nearest neighbour distance in stationary spatial point patterns. \emph{Mathematische Operationsforschung und Statistik, series Statistics} \bold{15}, 409--412. } \author{ \adrian and Rana Moyeed. } \section{Warnings}{ A large value of \code{nrval} is required in order to avoid discretisation effects (due to the use of histograms in the calculation). } \seealso{ \code{\link{F3est}}, \code{\link{K3est}}, \code{\link{pcf3est}} } \examples{ X <- rpoispp3(42) Z <- G3est(X) if(interactive()) plot(Z) } \keyword{spatial} \keyword{nonparametric} spatstat/man/pcfdot.Rd0000644000176200001440000001400413160710621014432 0ustar liggesusers\name{pcfdot} \alias{pcfdot} \title{Multitype pair correlation function (i-to-any)} \description{ Calculates an estimate of the multitype pair correlation function (from points of type \code{i} to points of any type) for a multitype point pattern. } \usage{ pcfdot(X, i, ..., r = NULL, kernel = "epanechnikov", bw = NULL, stoyan = 0.15, correction = c("isotropic", "Ripley", "translate"), divisor = c("r", "d")) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the dot-type pair correlation function \eqn{g_{i\bullet}(r)}{gdot[i](r)} will be computed. It must be a multitype point pattern (a marked point pattern whose marks are a factor). } \item{i}{The type (mark value) of the points in \code{X} from which distances are measured. A character string (or something that will be converted to a character string). Defaults to the first level of \code{marks(X)}. } \item{\dots}{ Ignored. } \item{r}{ Vector of values for the argument \eqn{r} at which \eqn{g(r)} should be evaluated. There is a sensible default. } \item{kernel}{ Choice of smoothing kernel, passed to \code{\link{density.default}}. } \item{bw}{ Bandwidth for smoothing kernel, passed to \code{\link{density.default}}. } \item{stoyan}{ Coefficient for default bandwidth rule; see Details. } \item{correction}{ Choice of edge correction. } \item{divisor}{ Choice of divisor in the estimation formula: either \code{"r"} (the default) or \code{"d"}. See Details. } } \details{ This is a generalisation of the pair correlation function \code{\link{pcf}} to multitype point patterns. For two locations \eqn{x} and \eqn{y} separated by a nonzero distance \eqn{r}, the probability \eqn{p(r)} of finding a point of type \eqn{i} at location \eqn{x} and a point of any type at location \eqn{y} is \deqn{ p(r) = \lambda_i \lambda g_{i\bullet}(r) \,{\rm d}x \, {\rm d}y }{ p(r) = lambda[i] * lambda * gdot[i](r) dx dy } where \eqn{\lambda}{lambda} is the intensity of all points, and \eqn{\lambda_i}{lambda[i]} is the intensity of the points of type \eqn{i}. For a completely random Poisson marked point process, \eqn{p(r) = \lambda_i \lambda}{p(r) = lambda[i] * lambda} so \eqn{g_{i\bullet}(r) = 1}{gdot[i](r) = 1}. For a stationary multitype point process, the type-\code{i}-to-any-type pair correlation function between marks \eqn{i} and \eqn{j} is formally defined as \deqn{ g_{i\bullet}(r) = \frac{K_{i\bullet}^\prime(r)}{2\pi r} }{ g(r) = Kdot[i]'(r)/ ( 2 * pi * r) } where \eqn{K_{i\bullet}^\prime}{Kdot[i]'(r)} is the derivative of the type-\code{i}-to-any-type \eqn{K} function \eqn{K_{i\bullet}(r)}{Kdot[i](r)}. of the point process. See \code{\link{Kdot}} for information about \eqn{K_{i\bullet}(r)}{Kdot[i](r)}. The command \code{pcfdot} computes a kernel estimate of the multitype pair correlation function from points of type \eqn{i} to points of any type. \itemize{ \item If \code{divisor="r"} (the default), then the multitype counterpart of the standard kernel estimator (Stoyan and Stoyan, 1994, pages 284--285) is used. By default, the recommendations of Stoyan and Stoyan (1994) are followed exactly. \item If \code{divisor="d"} then a modified estimator is used: the contribution from an interpoint distance \eqn{d_{ij}}{d[ij]} to the estimate of \eqn{g(r)} is divided by \eqn{d_{ij}}{d[ij]} instead of dividing by \eqn{r}. This usually improves the bias of the estimator when \eqn{r} is close to zero. } There is also a choice of spatial edge corrections (which are needed to avoid bias due to edge effects associated with the boundary of the spatial window): \code{correction="translate"} is the Ohser-Stoyan translation correction, and \code{correction="isotropic"} or \code{"Ripley"} is Ripley's isotropic correction. The choice of smoothing kernel is controlled by the argument \code{kernel} which is passed to \code{\link{density}}. The default is the Epanechnikov kernel. The bandwidth of the smoothing kernel can be controlled by the argument \code{bw}. Its precise interpretation is explained in the documentation for \code{\link{density.default}}. For the Epanechnikov kernel with support \eqn{[-h,h]}, the argument \code{bw} is equivalent to \eqn{h/\sqrt{5}}{h/sqrt(5)}. If \code{bw} is not specified, the default bandwidth is determined by Stoyan's rule of thumb (Stoyan and Stoyan, 1994, page 285). That is, \eqn{h = c/\sqrt{\lambda}}{h = c/sqrt(lambda)}, where \eqn{\lambda}{lambda} is the (estimated) intensity of the unmarked point process, and \eqn{c} is a constant in the range from 0.1 to 0.2. The argument \code{stoyan} determines the value of \eqn{c}. The companion function \code{\link{pcfcross}} computes the corresponding analogue of \code{\link{Kcross}}. } \value{ An object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. Essentially a data frame containing columns \item{r}{the vector of values of the argument \eqn{r} at which the function \eqn{g_{i\bullet}}{gdot[i]} has been estimated } \item{theo}{the theoretical value \eqn{g_{i\bullet}(r) = 1}{gdot[i](r) = r} for independent marks. } together with columns named \code{"border"}, \code{"bord.modif"}, \code{"iso"} and/or \code{"trans"}, according to the selected edge corrections. These columns contain estimates of the function \eqn{g_{i,j}}{g[i,j]} obtained by the edge corrections named. } \seealso{ Mark connection function \code{\link{markconnect}}. Multitype pair correlation \code{\link{pcfcross}}, \code{\link{pcfmulti}}. Pair correlation \code{\link{pcf}},\code{\link{pcf.ppp}}. \code{\link{Kdot}} } \examples{ data(amacrine) p <- pcfdot(amacrine, "on") p <- pcfdot(amacrine, "on", stoyan=0.1) plot(p) } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat/man/dkernel.Rd0000644000176200001440000000611713160710571014611 0ustar liggesusers\name{dkernel} \alias{dkernel} \alias{pkernel} \alias{qkernel} \alias{rkernel} \title{Kernel distributions and random generation} \description{Density, distribution function, quantile function and random generation for several distributions used in kernel estimation for numerical data. } \usage{ dkernel(x, kernel = "gaussian", mean = 0, sd = 1) pkernel(q, kernel = "gaussian", mean = 0, sd = 1, lower.tail = TRUE) qkernel(p, kernel = "gaussian", mean = 0, sd = 1, lower.tail = TRUE) rkernel(n, kernel = "gaussian", mean = 0, sd = 1) } \arguments{ \item{x, q}{Vector of quantiles.} \item{p}{Vector of probabilities.} \item{kernel}{ String name of the kernel. Options are \code{"gaussian"}, \code{"rectangular"}, \code{"triangular"}, \code{"epanechnikov"}, \code{"biweight"}, \code{"cosine"} and \code{"optcosine"}. (Partial matching is used). } \item{n}{Number of observations.} \item{mean}{Mean of distribution.} \item{sd}{Standard deviation of distribution.} \item{lower.tail}{logical; if \code{TRUE} (the default), then probabilities are \eqn{P(X \le x)}{P[X \le x]}, otherwise, \eqn{P(X > x)}. } } \details{ These functions give the probability density, cumulative distribution function, quantile function and random generation for several distributions used in kernel estimation for one-dimensional (numerical) data. The available kernels are those used in \code{\link[stats]{density.default}}, namely \code{"gaussian"}, \code{"rectangular"}, \code{"triangular"}, \code{"epanechnikov"}, \code{"biweight"}, \code{"cosine"} and \code{"optcosine"}. For more information about these kernels, see \code{\link[stats]{density.default}}. \code{dkernel} gives the probability density, \code{pkernel} gives the cumulative distribution function, \code{qkernel} gives the quantile function, and \code{rkernel} generates random deviates. } \value{ A numeric vector. For \code{dkernel}, a vector of the same length as \code{x} containing the corresponding values of the probability density. For \code{pkernel}, a vector of the same length as \code{x} containing the corresponding values of the cumulative distribution function. For \code{qkernel}, a vector of the same length as \code{p} containing the corresponding quantiles. For \code{rkernel}, a vector of length \code{n} containing randomly generated values. } \examples{ x <- seq(-3,3,length=100) plot(x, dkernel(x, "epa"), type="l", main=c("Epanechnikov kernel", "probability density")) plot(x, pkernel(x, "opt"), type="l", main=c("OptCosine kernel", "cumulative distribution function")) p <- seq(0,1, length=256) plot(p, qkernel(p, "biw"), type="l", main=c("Biweight kernel", "cumulative distribution function")) y <- rkernel(100, "tri") hist(y, main="Random variates from triangular density") rug(y) } \seealso{ \code{\link[stats]{density.default}}, \code{\link{kernel.factor}} } \author{\adrian \email{adrian@maths.uwa.edu.au} and Martin Hazelton } \keyword{methods} \keyword{nonparametric} \keyword{smooth} spatstat/man/quadratresample.Rd0000644000176200001440000000460513160710621016353 0ustar liggesusers\name{quadratresample} \alias{quadratresample} \title{Resample a Point Pattern by Resampling Quadrats} \description{ Given a point pattern dataset, create a resampled point pattern by dividing the window into rectangular quadrats and randomly resampling the list of quadrats. } \usage{ quadratresample(X, nx, ny=nx, ..., replace = FALSE, nsamples = 1, verbose = (nsamples > 1)) } \arguments{ \item{X}{ A point pattern dataset (object of class \code{"ppp"}). } \item{nx,ny}{ Numbers of quadrats in the \eqn{x} and \eqn{y} directions. } \item{\dots}{Ignored.} \item{replace}{ Logical value. Specifies whether quadrats should be sampled with or without replacement. } \item{nsamples}{Number of randomised point patterns to be generated.} \item{verbose}{Logical value indicating whether to print progress reports.} } \details{ This command implements a very simple bootstrap resampling procedure for spatial point patterns \code{X}. The dataset \code{X} must be a point pattern (object of class \code{"ppp"}) and its observation window must be a rectangle. The window is first divided into \code{N = nx * ny} rectangular tiles (quadrats) of equal size and shape. To generate one resampled point pattern, a random sample of \code{N} quadrats is selected from the list of \code{N} quadrats, with replacement (if \code{replace=TRUE}) or without replacement (if \code{replace=FALSE}). The \eqn{i}th quadrat in the original dataset is then replaced by the \eqn{i}th sampled quadrat, after the latter is shifted so that it occupies the correct spatial position. The quadrats are then reconstituted into a point pattern inside the same window as \code{X}. If \code{replace=FALSE}, this procedure effectively involves a random permutation of the quadrats. The resulting resampled point pattern has the same number of points as \code{X}. If \code{replace=TRUE}, the number of points in the resampled point pattern is random. } \value{ A point pattern (if \code{nsamples = 1}) or a list of point patterns (if \code{nsamples > 1}). } \author{\adrian and \rolf } \seealso{ \code{\link{quadrats}}, \code{\link{quadratcount}}. See \code{\link{varblock}} to estimate the variance of a summary statistic by block resampling. } \examples{ data(bei) quadratresample(bei, 6, 3) } \keyword{spatial} \keyword{datagen} spatstat/man/delaunayNetwork.Rd0000644000176200001440000000250413160710571016335 0ustar liggesusers\name{delaunayNetwork} \alias{delaunayNetwork} \alias{dirichletNetwork} \title{ Linear Network of Delaunay Triangulation or Dirichlet Tessellation } \description{ Computes the edges of the Delaunay triangulation or Dirichlet tessellation of a point pattern, and returns the result as a linear network object. } \usage{ delaunayNetwork(X) dirichletNetwork(X, \dots) } \arguments{ \item{X}{A point pattern (object of class \code{"ppp"}).} \item{\dots}{Arguments passed to \code{\link{as.linnet.psp}}} } \details{ For \code{delaunayNetwork}, points of \code{X} which are neighbours in the Delaunay triangulation (see \code{\link{delaunay}}) will be joined by a straight line. The result will be returned as a linear network (object of class \code{"linnet"}). For \code{dirichletNetwork}, the Dirichlet tessellation is computed (see \code{\link{dirichlet}}) and the edges of the tiles of the tessellation are extracted. This is converted to a linear network using \code{\link{as.linnet.psp}}. } \value{ Linear network (object of class \code{"linnet"}) or \code{NULL}. } \author{ \adrian \rolf and \ege } \seealso{ \code{\link{delaunay}}, \code{\link{dirichlet}}, \code{\link{delaunayDistance}} } \examples{ LE <- delaunayNetwork(cells) LI <- dirichletNetwork(cells) } \keyword{spatial} \keyword{manip} spatstat/man/Poisson.Rd0000644000176200001440000000345313160710571014617 0ustar liggesusers\name{Poisson} \alias{Poisson} \title{Poisson Point Process Model} \description{ Creates an instance of the Poisson point process model which can then be fitted to point pattern data. } \usage{ Poisson() } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of the Poisson point process (namely, there are no interactions). } \details{ The function \code{\link{ppm}}, which fits point process models to point pattern data, requires an argument \code{interaction} of class \code{"interact"} describing the interpoint interaction structure of the model to be fitted. The appropriate description of the Poisson process is provided by the value of the function \code{Poisson}. This works for all types of Poisson processes including multitype and nonstationary Poisson processes. } \seealso{ \code{\link{ppm}}, \code{\link{Strauss}} } \examples{ ppm(nztrees ~1, Poisson()) # fit the stationary Poisson process to 'nztrees' # no edge correction needed lon <- longleaf \testonly{ lon <- lon[seq(1, npoints(lon), by=50)] } longadult <- unmark(subset(lon, marks >= 30)) ppm(longadult ~ x, Poisson()) # fit the nonstationary Poisson process # with intensity lambda(x,y) = exp( a + bx) # trees marked by species lans <- lansing \testonly{ lans <- lans[seq(1, npoints(lans), by=30)] } ppm(lans ~ marks, Poisson()) # fit stationary marked Poisson process # with different intensity for each species \dontrun{ ppm(lansing ~ marks * polynom(x,y,3), Poisson()) } # fit nonstationary marked Poisson process # with different log-cubic trend for each species \testonly{ # equivalent functionality - smaller dataset ppm(amacrine ~ marks * polynom(x,y,2), Poisson()) } } \author{ \spatstatAuthors } \keyword{spatial} \keyword{models} spatstat/man/treebranchlabels.Rd0000644000176200001440000000442113160710621016455 0ustar liggesusers\name{treebranchlabels} \alias{treebranchlabels} \title{ Label Vertices of a Tree by Branch Membership } \description{ Given a linear network which is a tree (acyclic graph), this function assigns a label to each vertex, indicating its position in the tree. } \usage{ treebranchlabels(L, root = 1) } \arguments{ \item{L}{ Linear network (object of class \code{"linnet"}). The network must have no loops. } \item{root}{ Root of the tree. An integer index identifying which point in \code{vertices(L)} is the root of the tree. } } \details{ The network \code{L} should be a tree, that is, it must have no loops. This function computes a character string label for each vertex of the network \code{L}. The vertex identified by \code{root} (that is, \code{vertices(L)[root]}) is taken as the root of the tree and is given the empty label \code{""}. \itemize{ \item If there are several line segments which meet at the root vertex, each of these segments is the start of a new branch of the tree; the other endpoints of these segments are assigned the labels \code{"a"}, \code{"b"}, \code{"c"} and so on. \item If only one segment issues from the root vertex, the other endpoint of this segment is assigned the empty label \code{""}. } A similar rule is then applied to each of the newly-labelled vertices. If the vertex labelled \code{"a"} is joined to two other unlabelled vertices, these will be labelled \code{"aa"} and \code{"ab"}. The rule is applied recursively until all vertices have been labelled. If \code{L} is not a tree, the algorithm will terminate, but the results will be nonsense. } \value{ A vector of character strings, with one entry for each point in \code{vertices(L)}. } \author{ \spatstatAuthors } \seealso{ \code{\link{deletebranch}}, \code{\link{extractbranch}}, \code{\link{treeprune}} for manipulating a network using the branch labels. \code{\link{linnet}} for creating a network. } \examples{ # make a simple tree m <- simplenet$m m[8,10] <- m[10,8] <- FALSE L <- linnet(vertices(simplenet), m) plot(L, main="") # compute branch labels tb <- treebranchlabels(L, 1) tbc <- paste0("[", tb, "]") text(vertices(L), labels=tbc, cex=2) } \keyword{spatial} \keyword{math} spatstat/man/distcdf.Rd0000644000176200001440000000676213160710571014613 0ustar liggesusers\name{distcdf} \alias{distcdf} \title{Distribution Function of Interpoint Distance } \description{ Computes the cumulative distribution function of the distance between two independent random points in a given window or windows. } \usage{ distcdf(W, V=W, \dots, dW=1, dV=dW, nr=1024, regularise=TRUE) } \arguments{ \item{W}{ A window (object of class \code{"owin"}) containing the first random point. } \item{V}{ Optional. Another window containing the second random point. Defaults to \code{W}. } \item{\dots}{ Arguments passed to \code{\link{as.mask}} to determine the pixel resolution for the calculation. } \item{dV, dW}{ Optional. Probability densities (not necessarily normalised) for the first and second random points respectively. Data in any format acceptable to \code{\link{as.im}}, for example, a \code{function(x,y)} or a pixel image or a numeric value. The default corresponds to a uniform distribution over the window. } \item{nr}{ Integer. The number of values of interpoint distance \eqn{r} for which the CDF will be computed. Should be a large value! } \item{regularise}{ Logical value indicating whether to smooth the results for very small distances, to avoid discretisation artefacts. } } \value{ An object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. } \details{ This command computes the Cumulative Distribution Function \eqn{ CDF(r) = Prob(T \le r) }{ CDF(r) = Prob(T \le r) } of the Euclidean distance \eqn{T = \|X_1 - X_2\|}{T = |X1-X2|} between two independent random points \eqn{X_1}{X1} and \eqn{X_2}{X2}. In the simplest case, the command \code{distcdf(W)}, the random points are assumed to be uniformly distributed in the same window \code{W}. Alternatively the two random points may be uniformly distributed in two different windows \code{W} and \code{V}. In the most general case the first point \eqn{X_1}{X1} is random in the window \code{W} with a probability density proportional to \code{dW}, and the second point \eqn{X_2}{X2} is random in a different window \code{V} with probability density proportional to \code{dV}. The values of \code{dW} and \code{dV} must be finite and nonnegative. The calculation is performed by numerical integration of the set covariance function \code{\link{setcov}} for uniformly distributed points, and by computing the covariance function \code{\link{imcov}} in the general case. The accuracy of the result depends on the pixel resolution used to represent the windows: this is controlled by the arguments \code{\dots} which are passed to \code{\link{as.mask}}. For example use \code{eps=0.1} to specify pixels of size 0.1 units. The arguments \code{W} or \code{V} may also be point patterns (objects of class \code{"ppp"}). The result is the cumulative distribution function of the distance from a randomly selected point in the point pattern, to a randomly selected point in the other point pattern or window. If \code{regularise=TRUE} (the default), values of the cumulative distribution function for very short distances are smoothed to avoid discretisation artefacts. Smoothing is applied to all distances shorter than the width of 7 pixels. } \seealso{ \code{\link{setcov}}, \code{\link{as.mask}}. } \examples{ # The unit disc B <- disc() plot(distcdf(B)) } \author{\adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/plot.splitppp.Rd0000644000176200001440000000306613160710621016011 0ustar liggesusers\name{plot.splitppp} \alias{plot.splitppp} \title{Plot a List of Point Patterns} \description{ Plots a list of point patterns. } \usage{ \method{plot}{splitppp}(x, \dots, main) } \arguments{ \item{x}{ A named list of point patterns, typically obtained from \code{\link{split.ppp}}. } \item{\dots}{ Arguments passed to \code{\link{plot.listof}} which control the layout of the plot panels, their appearance, and the plot behaviour in individual plot panels. } \item{main}{ Optional main title for the plot. } } \value{ Null. } \details{ This is the \code{plot} method for the class \code{"splitppp"}. It is typically used to plot the result of the function \code{\link{split.ppp}}. The argument \code{x} should be a named list of point patterns (objects of class \code{"ppp"}, see \code{\link{ppp.object}}). Each of these point patterns will be plotted in turn using \code{\link{plot.ppp}}. Plotting is performed by \code{\link{plot.listof}}. } \seealso{ \code{\link{plot.listof}} for arguments controlling the plot. \code{\link{split.ppp}}, \code{\link{plot.ppp}}, \code{\link{ppp.object}}. } \section{Error messages}{ If the error message \sQuote{Figure margins too large} occurs, ensure that \code{equal.scales=FALSE} and increase the values of \code{mar.panel}. } \examples{ # Multitype point pattern plot(split(amacrine)) plot(split(amacrine), main="", panel.begin=function(i, y, ...) { plot(density(y), ribbon=FALSE, ...) }) } \author{\adrian and \rolf } \keyword{spatial} \keyword{hplot} spatstat/man/selfcrossing.psp.Rd0000644000176200001440000000200013160710621016446 0ustar liggesusers\name{selfcrossing.psp} \alias{selfcrossing.psp} \title{Crossing Points in a Line Segment Pattern} \description{ Finds any crossing points between the line segments in a line segment pattern. } \usage{ selfcrossing.psp(A) } \arguments{ \item{A}{ Line segment pattern (object of class \code{"psp"}). } } \value{ Point pattern (object of class \code{"ppp"}). } \details{ This function finds any crossing points between different line segments in the line segment pattern \code{A}. A crossing point occurs whenever one of the line segments in \code{A} intersects another line segment in \code{A}, at a nonzero angle of intersection. } \seealso{ \code{\link{crossing.psp}}, \code{\link{psp.object}}, \code{\link{ppp.object}}. } \examples{ a <- psp(runif(10), runif(10), runif(10), runif(10), window=owin()) plot(a, col="green", main="selfcrossing.psp") P <- selfcrossing.psp(a) plot(P, add=TRUE, col="red") } \author{ \adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/plot.fv.Rd0000644000176200001440000002113213160710621014543 0ustar liggesusers\name{plot.fv} \alias{plot.fv} \title{Plot Function Values} \description{ Plot method for the class \code{"fv"}. } \usage{ \method{plot}{fv}(x, fmla, \dots, subset=NULL, lty=NULL, col=NULL, lwd=NULL, xlim=NULL, ylim=NULL, xlab=NULL, ylab=NULL, ylim.covers=NULL, legend=!add, legendpos="topleft", legendavoid=missing(legendpos), legendmath=TRUE, legendargs=list(), shade=fvnames(x, ".s"), shadecol="grey", add=FALSE, log="", mathfont=c("italic", "plain", "bold", "bolditalic"), limitsonly=FALSE) } \arguments{ \item{x}{ An object of class \code{"fv"}, containing the variables to be plotted or variables from which the plotting coordinates can be computed. } \item{fmla}{ an R language formula determining which variables or expressions are plotted. Either a formula object, or a string that can be parsed as a formula. See Details. } \item{subset}{ (optional) subset of rows of the data frame that will be plotted. } \item{lty}{ (optional) numeric vector of values of the graphical parameter \code{lty} controlling the line style of each plot. } \item{col}{ (optional) numeric vector of values of the graphical parameter \code{col} controlling the colour of each plot. } \item{lwd}{ (optional) numeric vector of values of the graphical parameter \code{lwd} controlling the line width of each plot. } \item{xlim}{ (optional) range of x axis } \item{ylim}{ (optional) range of y axis } \item{xlab}{ (optional) label for x axis } \item{ylab}{ (optional) label for y axis } \item{\dots}{ Extra arguments passed to \code{plot.default}. } \item{ylim.covers}{ Optional vector of \eqn{y} values that must be included in the \eqn{y} axis. For example \code{ylim.covers=0} will ensure that the \eqn{y} axis includes the origin. } \item{legend}{ Logical flag or \code{NULL}. If \code{legend=TRUE}, the algorithm plots a legend in the top left corner of the plot, explaining the meaning of the different line types and colours. } \item{legendpos}{ The position of the legend. Either a character string keyword (see \code{\link[graphics]{legend}} for keyword options) or a pair of coordinates in the format \code{list(x,y)}. Alternatively if \code{legendpos="float"}, a location will be selected inside the plot region, avoiding the graphics. } \item{legendavoid}{ Whether to avoid collisions between the legend and the graphics. Logical value. If \code{TRUE}, the code will check for collisions between the legend box and the graphics, and will override \code{legendpos} if a collision occurs. If \code{FALSE}, the value of \code{legendpos} is always respected. } \item{legendmath}{ Logical. If \code{TRUE}, the legend will display the mathematical notation for each curve. If \code{FALSE}, the legend text is the identifier (column name) for each curve. } \item{legendargs}{ Named list containing additional arguments to be passed to \code{\link{legend}} controlling the appearance of the legend. } \item{shade}{ A character vector giving the names of two columns of \code{x}, or another type of index that identifies two columns. When the corresponding curves are plotted, the region between the curves will be shaded in light grey. The object \code{x} may or may not contain two columns which are designated as boundaries for shading; they are identified by \code{fvnames(x, ".s")}. The default is to shade between these two curves if they exist. To suppress this behaviour, set \code{shade=NULL}. } \item{shadecol}{ The colour to be used in the \code{shade} plot. A character string or an integer specifying a colour. } \item{add}{ Logical. Whether the plot should be added to an existing plot } \item{log}{ A character string which contains \code{"x"} if the x axis is to be logarithmic, \code{"y"} if the y axis is to be logarithmic and \code{"xy"} or \code{"yx"} if both axes are to be logarithmic. } \item{mathfont}{ Character string. The font to be used for mathematical expressions in the axis labels and the legend. } \item{limitsonly}{ Logical. If \code{FALSE}, plotting is performed normally. If \code{TRUE}, no plotting is performed at all; just the \eqn{x} and \eqn{y} limits of the plot are computed and returned. } } \value{ Invisible: either \code{NULL}, or a data frame giving the meaning of the different line types and colours. } \details{ This is the \code{plot} method for the class \code{"fv"}. The use of the argument \code{fmla} is like \code{plot.formula}, but offers some extra functionality. The left and right hand sides of \code{fmla} are evaluated, and the results are plotted against each other (the left side on the \eqn{y} axis against the right side on the \eqn{x} axis). The left and right hand sides of \code{fmla} may be the names of columns of the data frame \code{x}, or expressions involving these names. If a variable in \code{fmla} is not the name of a column of \code{x}, the algorithm will search for an object of this name in the environment where \code{plot.fv} was called, and then in the enclosing environment, and so on. Multiple curves may be specified by a single formula of the form \code{cbind(y1,y2,\dots,yn) ~ x}, where \code{x,y1,y2,\dots,yn} are expressions involving the variables in the data frame. Each of the variables \code{y1,y2,\dots,yn} in turn will be plotted against \code{x}. See the examples. Convenient abbreviations which can be used in the formula are \itemize{ \item the symbol \code{.} which represents all the columns in the data frame that will be plotted by default; \item the symbol \code{.x} which represents the function argument; \item the symbol \code{.y} which represents the recommended value of the function. } For further information, see \code{\link{fvnames}}. The value returned by this plot function indicates the meaning of the line types and colours in the plot. It can be used to make a suitable legend for the plot if you want to do this by hand. See the examples. The argument \code{shade} can be used to display critical bands or confidence intervals. If it is not \code{NULL}, then it should be a subset index for the columns of \code{x}, that identifies exactly 2 columns. When the corresponding curves are plotted, the region between the curves will be shaded in light grey. See the Examples. The default values of \code{lty}, \code{col} and \code{lwd} can be changed using \code{\link{spatstat.options}("plot.fv")}. Use \code{type = "n"} to create the plot region and draw the axes without plotting any data. Use \code{limitsonly=TRUE} to suppress all plotting and just compute the \eqn{x} and \eqn{y} limits. This can be used to calculate common \eqn{x} and \eqn{y} scales for several plots. To change the kind of parenthesis enclosing the explanatory text about the unit of length, use \code{\link{spatstat.options}('units.paren')} } \examples{ K <- Kest(cells) # K is an object of class "fv" plot(K, iso ~ r) # plots iso against r plot(K, sqrt(iso/pi) ~ r) # plots sqrt(iso/r) against r plot(K, cbind(iso,theo) ~ r) # plots iso against r AND theo against r plot(K, . ~ r) # plots all available estimates of K against r plot(K, sqrt(./pi) ~ r) # plots all estimates of L-function # L(r) = sqrt(K(r)/pi) plot(K, cbind(iso,theo) ~ r, col=c(2,3)) # plots iso against r in colour 2 # and theo against r in colour 3 plot(K, iso ~ r, subset=quote(r < 0.2)) # plots iso against r for r < 10 # Can't remember the names of the columns? No problem.. plot(K, sqrt(./pi) ~ .x) # making a legend by hand v <- plot(K, . ~ r, legend=FALSE) legend("topleft", legend=v$meaning, lty=v$lty, col=v$col) # significance bands KE <- envelope(cells, Kest, nsim=19) plot(KE, shade=c("hi", "lo")) # how to display two functions on a common scale Kr <- Kest(redwood) a <- plot(K, limitsonly=TRUE) b <- plot(Kr, limitsonly=TRUE) xlim <- range(a$xlim, b$xlim) ylim <- range(a$ylim, b$ylim) opa <- par(mfrow=c(1,2)) plot(K, xlim=xlim, ylim=ylim) plot(Kr, xlim=xlim, ylim=ylim) par(opa) } \seealso{ \code{\link{fv.object}}, \code{\link{Kest}} } \author{\adrian and \rolf } \keyword{spatial} \keyword{hplot} spatstat/man/as.data.frame.owin.Rd0000644000176200001440000000375113160710571016545 0ustar liggesusers\name{as.data.frame.owin} \alias{as.data.frame.owin} \title{Convert Window to Data Frame} \description{ Converts a window object to a data frame. } \usage{ \method{as.data.frame}{owin}(x, \dots, drop=TRUE) } \arguments{ \item{x}{ Window (object of class \code{"owin"}). } \item{\dots}{Further arguments passed to \code{\link[base:as.data.frame]{as.data.frame.default}} to determine the row names and other features. } \item{drop}{ Logical value indicating whether to discard pixels that are outside the window, when \code{x} is a binary mask. } } \details{ This function returns a data frame specifying the coordinates of the window. If \code{x} is a binary mask window, the result is a data frame with columns \code{x} and \code{y} containing the spatial coordinates of each \emph{pixel}. If \code{drop=TRUE} (the default), only pixels inside the window are retained. If \code{drop=FALSE}, all pixels are retained, and the data frame has an extra column \code{inside} containing the logical value of each pixel (\code{TRUE} for pixels inside the window, \code{FALSE} for outside). If \code{x} is a rectangle or a polygonal window, the result is a data frame with columns \code{x} and \code{y} containing the spatial coordinates of the \emph{vertices} of the window. If the boundary consists of several polygons, the data frame has additional columns \code{id}, identifying which polygon is being traced, and \code{sign}, indicating whether the polygon is an outer or inner boundary (\code{sign=1} and \code{sign=-1} respectively). } \value{ A data frame with columns named \code{x} and \code{y}, and possibly other columns. } \author{ \spatstatAuthors. } \seealso{ \code{\link{as.data.frame.im}} } \examples{ as.data.frame(square(1)) holey <- owin(poly=list( list(x=c(0,10,0), y=c(0,0,10)), list(x=c(2,2,4,4), y=c(2,4,4,2)))) as.data.frame(holey) } \keyword{spatial} \keyword{methods} spatstat/man/plot.influence.ppm.Rd0000644000176200001440000000474113160710621016702 0ustar liggesusers\name{plot.influence.ppm} \alias{plot.influence.ppm} \title{ Plot Influence Measure } \description{ Plots an influence measure that has been computed by \code{\link{influence.ppm}}. } \usage{ \method{plot}{influence.ppm}(x, ..., multiplot=TRUE) } \arguments{ \item{x}{ Influence measure (object of class \code{"influence.ppm"}) computed by \code{\link{influence.ppm}}. } \item{\dots}{ Arguments passed to \code{\link{plot.ppp}} to control the plotting. } \item{multiplot}{ Logical value indicating whether it is permissible to plot more than one panel. This happens if the original point process model is multitype. } } \details{ This is the plot method for objects of class \code{"influence.ppm"}. These objects are computed by the command \code{\link{influence.ppm}}. For a point process model fitted by maximum likelihood or maximum pseudolikelihood (the default), influence values are associated with the data points. The display shows circles centred at the data points with radii proportional to the influence values. If the original data were a multitype point pattern, then if \code{multiplot=TRUE} (the default), there is one such display for each possible type of point, while if \code{multiplot=FALSE} there is a single plot combining all data points regardless of type. For a model fitted by logistic composite likelihood (\code{method="logi"} in \code{\link{ppm}}) influence values are associated with the data points and also with the dummy points used to fit the model. The display consist of two panels, for the data points and dummy points respectively, showing circles with radii proportional to the influence values. If the original data were a multitype point pattern, then if \code{multiplot=TRUE} (the default), there is one pair of panels for each possible type of point, while if \code{multiplot=FALSE} there is a single plot combining all data and dummy points regardless of type. Use the argument \code{clipwin} to restrict the plot to a subset of the full data. } \value{ None. } \references{ Baddeley, A. and Chang, Y.M. and Song, Y. (2013) Leverage and influence diagnostics for spatial point process models. \emph{Scandinavian Journal of Statistics} \bold{40}, 86--104. } \author{ \spatstatAuthors. } \seealso{ \code{\link{influence.ppm}} } \examples{ X <- rpoispp(function(x,y) { exp(3+3*x) }) fit <- ppm(X, ~x+y) plot(influence(fit)) } \keyword{spatial} \keyword{models} spatstat/man/rSSI.Rd0000644000176200001440000001111313160710621013771 0ustar liggesusers\name{rSSI} \alias{rSSI} \title{Simulate Simple Sequential Inhibition} \description{ Generate a random point pattern, a realisation of the Simple Sequential Inhibition (SSI) process. } \usage{ rSSI(r, n=Inf, win = square(1), giveup = 1000, x.init=NULL, ..., f=NULL, fmax=NULL, nsim=1, drop=TRUE) } \arguments{ \item{r}{ Inhibition distance. } \item{n}{ Maximum number of points allowed. If \code{n} is finite, stop when the \emph{total} number of points in the point pattern reaches \code{n}. If \code{n} is infinite (the default), stop only when it is apparently impossible to add any more points. See \bold{Details}. } \item{win}{ Window in which to simulate the pattern. An object of class \code{"owin"} or something acceptable to \code{\link{as.owin}}. The default window is the unit square, unless \code{x.init} is specified, when the default window is the window of \code{x.init}. } \item{giveup}{ Number of rejected proposals after which the algorithm should terminate. } \item{x.init}{ Optional. Initial configuration of points. A point pattern (object of class \code{"ppp"}). The pattern returned by \code{rSSI} consists of this pattern together with the points added via simple sequential inhibition. See \bold{Details}. } \item{\dots}{Ignored.} \item{f,fmax}{ Optional arguments passed to \code{\link{rpoint}} to specify a non-uniform probability density for the random points. } \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a point pattern. } } \value{ A point pattern (an object of class \code{"ppp"}) if \code{nsim=1}, or a list of point patterns if \code{nsim > 1}. } \details{ This algorithm generates one or more realisations of the Simple Sequential Inhibition point process inside the window \code{win}. Starting with an empty window (or with the point pattern \code{x.init} if specified), the algorithm adds points one-by-one. Each new point is generated uniformly in the window and independently of preceding points. If the new point lies closer than \code{r} units from an existing point, then it is rejected and another random point is generated. The algorithm terminates when either \describe{ \item{(a)}{ the desired number \code{n} of points is reached, or } \item{(b)}{ the current point configuration has not changed for \code{giveup} iterations, suggesting that it is no longer possible to add new points. } } If \code{n} is infinite (the default) then the algorithm terminates only when (b) occurs. The result is sometimes called a \emph{Random Sequential Packing}. Note that argument \code{n} specifies the maximum permitted \bold{total} number of points in the pattern returned by \code{rSSI()}. If \code{x.init} is not \code{NULL} then the number of points that are \emph{added} is at most \code{n - npoints(x.init)} if \code{n} is finite. Thus if \code{x.init} is not \code{NULL} then argument \code{n} must be at least as large as \code{npoints(x.init)}, otherwise an error is given. If \code{n==npoints(x.init)} then a warning is given and the call to \code{rSSI()} has no real effect; \code{x.init} is returned. There is no requirement that the points of \code{x.init} be at a distance at least \code{r} from each other. All of the \emph{added} points will be at a distance at least \code{r} from each other and from any point of \code{x.init}. The points will be generated inside the window \code{win} and the result will be a point pattern in the same window. The default window is the unit square, \code{win = square(1)}, unless \code{x.init} is specified, when the default is \code{win=Window(x.init)}, the window of \code{x.init}. If both \code{win} and \code{x.init} are specified, and if the two windows are different, then a warning will be issued. Any points of \code{x.init} lying outside \code{win} will be removed, with a warning. } \seealso{ \code{\link{rpoispp}}, \code{\link{rMaternI}}, \code{\link{rMaternII}}. } \examples{ Vinf <- rSSI(0.07) V100 <- rSSI(0.07, 100) X <- runifpoint(100) Y <- rSSI(0.03,142,x.init=X) # Y consists of X together with # 42 added points. plot(Y, main="rSSI") plot(X,add=TRUE,chars=20,cols="red") ## inhomogeneous Z <- rSSI(0.07, 50, f=function(x,y){x}) plot(Z) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{datagen} spatstat/man/contour.imlist.Rd0000644000176200001440000000251013160710571016147 0ustar liggesusers\name{contour.imlist} \alias{contour.imlist} \alias{contour.listof} \title{Array of Contour Plots} \description{ Generates an array of contour plots. } \usage{ \method{contour}{imlist}(x, \dots) \method{contour}{listof}(x, \dots) } \arguments{ \item{x}{ An object of the class \code{"imlist"} representing a list of pixel images. Alternatively \code{x} may belong to the outdated class \code{"listof"}. } \item{\dots}{ Arguments passed to \code{\link{plot.solist}} to control the spatial arrangement of panels, and arguments passed to \code{\link{contour.im}} to control the display of each panel. } } \value{ Null. } \details{ This is a method for the generic command \code{contour} for the class \code{"imlist"}. An object of class \code{"imlist"} represents a list of pixel images. (The outdated class \code{"listof"} is also handled.) Each entry in the list \code{x} will be displayed as a contour plot, in an array of panels laid out on the same graphics display, using \code{\link{plot.solist}}. Invididual panels are plotted by \code{\link{contour.im}}. } \seealso{ \code{\link{plot.solist}}, \code{\link{contour.im}} } \examples{ # Multitype point pattern contour(D <- density(split(amacrine))) } \author{\adrian \rolf and \ege } \keyword{spatial} \keyword{hplot} spatstat/man/pcf.fasp.Rd0000644000176200001440000001110013160710621014645 0ustar liggesusers\name{pcf.fasp} \alias{pcf.fasp} \title{Pair Correlation Function obtained from array of K functions} \description{ Estimates the (bivariate) pair correlation functions of a point pattern, given an array of (bivariate) K functions. } \usage{ \method{pcf}{fasp}(X, \dots, method="c") } \arguments{ \item{X}{ An array of multitype \eqn{K} functions (object of class \code{"fasp"}). } \item{\dots}{ Arguments controlling the smoothing spline function \code{smooth.spline}. } \item{method}{ Letter \code{"a"}, \code{"b"}, \code{"c"} or \code{"d"} indicating the method for deriving the pair correlation function from the \code{K} function. } } \value{ A function array (object of class \code{"fasp"}, see \code{\link{fasp.object}}) representing an array of pair correlation functions. This can be thought of as a matrix \code{Y} each of whose entries \code{Y[i,j]} is a function value table (class \code{"fv"}) representing the pair correlation function between points of type \code{i} and points of type \code{j}. } \details{ The pair correlation function of a stationary point process is \deqn{ g(r) = \frac{K'(r)}{2\pi r} }{ g(r) = K'(r)/ ( 2 * pi * r) } where \eqn{K'(r)} is the derivative of \eqn{K(r)}, the reduced second moment function (aka ``Ripley's \eqn{K} function'') of the point process. See \code{\link{Kest}} for information about \eqn{K(r)}. For a stationary Poisson process, the pair correlation function is identically equal to 1. Values \eqn{g(r) < 1} suggest inhibition between points; values greater than 1 suggest clustering. We also apply the same definition to other variants of the classical \eqn{K} function, such as the multitype \eqn{K} functions (see \code{\link{Kcross}}, \code{\link{Kdot}}) and the inhomogeneous \eqn{K} function (see \code{\link{Kinhom}}). For all these variants, the benchmark value of \eqn{K(r) = \pi r^2}{K(r) = pi * r^2} corresponds to \eqn{g(r) = 1}. This routine computes an estimate of \eqn{g(r)} from an array of estimates of \eqn{K(r)} or its variants, using smoothing splines to approximate the derivatives. It is a method for the generic function \code{\link{pcf}}. The argument \code{X} should be a function array (object of class \code{"fasp"}, see \code{\link{fasp.object}}) containing several estimates of \eqn{K} functions. This should have been obtained from \code{\link{alltypes}} with the argument \code{fun="K"}. The smoothing spline operations are performed by \code{\link{smooth.spline}} and \code{\link{predict.smooth.spline}} from the \code{modreg} library. Four numerical methods are available: \itemize{ \item \bold{"a"} apply smoothing to \eqn{K(r)}, estimate its derivative, and plug in to the formula above; \item \bold{"b"} apply smoothing to \eqn{Y(r) = \frac{K(r)}{2 \pi r}}{Y(r) = K(r)/(2 * pi * r)} constraining \eqn{Y(0) = 0}, estimate the derivative of \eqn{Y}, and solve; \item \bold{"c"} apply smoothing to \eqn{Z(r) = \frac{K(r)}{\pi r^2}}{Y(r) = K(r)/(pi * r^2)} constraining \eqn{Z(0)=1}, estimate its derivative, and solve. \item \bold{"d"} apply smoothing to \eqn{V(r) = \sqrt{K(r)}}{V(r) = sqrt(K(r))}, estimate its derivative, and solve. } Method \code{"c"} seems to be the best at suppressing variability for small values of \eqn{r}. However it effectively constrains \eqn{g(0) = 1}. If the point pattern seems to have inhibition at small distances, you may wish to experiment with method \code{"b"} which effectively constrains \eqn{g(0)=0}. Method \code{"a"} seems comparatively unreliable. Useful arguments to control the splines include the smoothing tradeoff parameter \code{spar} and the degrees of freedom \code{df}. See \code{\link{smooth.spline}} for details. } \references{ Stoyan, D, Kendall, W.S. and Mecke, J. (1995) \emph{Stochastic geometry and its applications}. 2nd edition. Springer Verlag. Stoyan, D. and Stoyan, H. (1994) Fractals, random shapes and point fields: methods of geometrical statistics. John Wiley and Sons. } \seealso{ \code{\link{Kest}}, \code{\link{Kinhom}}, \code{\link{Kcross}}, \code{\link{Kdot}}, \code{\link{Kmulti}}, \code{\link{alltypes}}, \code{\link{smooth.spline}}, \code{\link{predict.smooth.spline}} } \examples{ # multitype point pattern KK <- alltypes(amacrine, "K") p <- pcf.fasp(KK, spar=0.5, method="b") plot(p) # strong inhibition between points of the same type } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat/man/cut.ppp.Rd0000644000176200001440000001070713160710571014556 0ustar liggesusers\name{cut.ppp} \alias{cut.ppp} \title{Classify Points in a Point Pattern} \description{ Classifies the points in a point pattern into distinct types according to the numerical marks in the pattern, or according to another variable. } \usage{ \method{cut}{ppp}(x, z=marks(x), ...) } \arguments{ \item{x}{ A two-dimensional point pattern. An object of class \code{"ppp"}. } \item{z}{ Data determining the classification. A numeric vector, a factor, a pixel image, a window, a tessellation, or a string giving the name of a column of marks or the name of a spatial coordinate. } \item{\dots}{ Arguments passed to \code{\link{cut.default}}. They determine the breakpoints for the mapping from numerical values in \code{z} to factor values in the output. See \code{\link{cut.default}}. } } \value{ A multitype point pattern, that is, a point pattern object (of class \code{"ppp"}) with a \code{marks} vector that is a factor. } \details{ This function has the effect of classifying each point in the point pattern \code{x} into one of several possible types. The classification is based on the dataset \code{z}, which may be either \itemize{ \item a factor (of length equal to the number of points in \code{z}) determining the classification of each point in \code{x}. Levels of the factor determine the classification. \item a numeric vector (of length equal to the number of points in \code{z}). The range of values of \code{z} will be divided into bands (the number of bands is determined by \code{\dots}) and \code{z} will be converted to a factor using \code{\link{cut.default}}. \item a pixel image (object of class \code{"im"}). The value of \code{z} at each point of \code{x} will be used as the classifying variable. \item a tessellation (object of class \code{"tess"}, see \code{\link{tess}}). Each point of \code{x} will be classified according to the tile of the tessellation into which it falls. \item a window (object of class \code{"owin"}). Each point of \code{x} will be classified according to whether it falls inside or outside this window. \item a character string, giving the name of one of the columns of \code{marks(x)}, if this is a data frame. \item a character string \code{"x"} or \code{"y"} identifying one of the spatial coordinates. } The default is to take \code{z} to be the vector of marks in \code{x} (or the first column in the data frame of marks of \code{x}, if it is a data frame). If the marks are numeric, then the range of values of the numerical marks is divided into several intervals, and each interval is associated with a level of a factor. The result is a marked point pattern, with the same window and point locations as \code{x}, but with the numeric mark of each point discretised by replacing it by the factor level. This is a convenient way to transform a marked point pattern which has numeric marks into a multitype point pattern, for example to plot it or analyse it. See the examples. To select some points from a point pattern, use the subset operators \code{\link{[.ppp}} or \code{\link{subset.ppp}} instead. } \seealso{ \code{\link{cut}}, \code{\link{ppp.object}}, \code{\link{tess}} } \examples{ # (1) cutting based on numeric marks of point pattern trees <- longleaf # Longleaf Pines data # the marks are positive real numbers indicating tree diameters. \testonly{ # smaller dataset trees <- trees[seq(1, npoints(trees), by=80)] } \dontrun{ plot(trees) } # cut the range of tree diameters into three intervals long3 <- cut(trees, breaks=3) \dontrun{ plot(long3) } # adult trees defined to have diameter at least 30 cm long2 <- cut(trees, breaks=c(0,30,100), labels=c("Sapling", "Adult")) plot(long2) plot(long2, cols=c("green","blue")) # (2) cutting based on another numeric vector # Divide Swedish Pines data into 3 classes # according to nearest neighbour distance swedishpines plot(cut(swedishpines, nndist(swedishpines), breaks=3)) # (3) cutting based on tessellation # Divide Swedish Pines study region into a 4 x 4 grid of rectangles # and classify points accordingly tes <- tess(xgrid=seq(0,96,length=5),ygrid=seq(0,100,length=5)) plot(cut(swedishpines, tes)) plot(tes, lty=2, add=TRUE) # (4) multivariate marks finpines cut(finpines, "height", breaks=4) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{methods} spatstat/man/unnormdensity.Rd0000644000176200001440000000435113160710621016075 0ustar liggesusers\name{unnormdensity} \alias{unnormdensity} \title{ Weighted kernel smoother } \description{ An unnormalised version of kernel density estimation where the weights are not required to sum to 1. The weights may be positive, negative or zero. } \usage{ unnormdensity(x, ..., weights = NULL) } \arguments{ \item{x}{ Numeric vector of data } \item{\dots}{ Arguments passed to \code{\link{density.default}}. Arguments must be \emph{named}. }` \item{weights}{ Optional numeric vector of weights for the data. } } \details{ This is an alternative to the standard \R kernel density estimation function \code{\link{density.default}}. The standard \code{\link{density.default}} requires the \code{weights} to be nonnegative numbers that add up to 1, and returns a probability density (a function that integrates to 1). This function \code{unnormdensity} does not impose any requirement on the \code{weights} except that they be finite. Individual weights may be positive, negative or zero. The result is a function that does not necessarily integrate to 1 and may be negative. The result is the convolution of the kernel \eqn{k} with the weighted data, \deqn{ f(x) = \sum_i w_i k(x- x_i) }{ f(x) = sum of w[i] * k(x - x[i]) } where \eqn{x_i}{x[i]} are the data points and \eqn{w_i}{w[i]} are the weights. The algorithm first selects the kernel bandwidth by applying \code{\link{density.default}} to the data \code{x} with normalised, positive weight vector \code{w = abs(weights)/sum(abs(weights))} and extracting the selected bandwidth. Then the result is computed by applying applying \code{\link{density.default}} to \code{x} twice using the normalised positive and negative parts of the weights. Note that the arguments \code{\dots} must be passed by name, i.e. in the form (\code{name=value}). Arguments that do not match an argument of \code{\link{density.default}} will be ignored \emph{silently}. } \value{ Object of class \code{"density"} as described in \code{\link{density.default}}. } \author{\adrian and \rolf } \seealso{ \code{\link{density.default}} } \examples{ d <- unnormdensity(1:3, weights=c(-1,0,1)) if(interactive()) plot(d) } \keyword{smooth} spatstat/man/pairwise.family.Rd0000644000176200001440000000273713160710621016270 0ustar liggesusers\name{pairwise.family} \alias{pairwise.family} \title{Pairwise Interaction Process Family} \description{ An object describing the family of all pairwise interaction Gibbs point processes. } \details{ \bold{Advanced Use Only!} This structure would not normally be touched by the user. It describes the pairwise interaction family of point process models. If you need to create a specific pairwise interaction model for use in modelling, use the function \code{\link{Pairwise}} or one of the existing functions listed below. Anyway, \code{pairwise.family} is an object of class \code{"isf"} containing a function \code{pairwise.family$eval} for evaluating the sufficient statistics of any pairwise interaction point process model taking an exponential family form. } \seealso{ Other families: \code{\link{pairsat.family}}, \code{\link{ord.family}}, \code{\link{inforder.family}}. Pairwise interactions: \code{\link{Poisson}}, \code{\link{Pairwise}}, \code{\link{PairPiece}}, \code{\link{Fiksel}}, \code{\link{Hardcore}}, \code{\link{LennardJones}}, \code{\link{MultiHard}}, \code{\link{MultiStrauss}}, \code{\link{MultiStraussHard}}, \code{\link{Strauss}}, \code{\link{StraussHard}}, \code{\link{Softcore}}. Other interactions: \code{\link{AreaInter}}, \code{\link{Geyer}}, \code{\link{Saturated}}, \code{\link{Ord}}, \code{\link{OrdThresh}}. } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} spatstat/man/markvario.Rd0000644000176200001440000000713513160710621015155 0ustar liggesusers\name{markvario} \alias{markvario} \title{Mark Variogram} \description{ Estimate the mark variogram of a marked point pattern. } \usage{ markvario(X, correction = c("isotropic", "Ripley", "translate"), r = NULL, method = "density", ..., normalise=FALSE) } \arguments{ \item{X}{The observed point pattern. An object of class \code{"ppp"} or something acceptable to \code{\link{as.ppp}}. It must have marks which are numeric. } \item{correction}{ A character vector containing any selection of the options \code{"isotropic"}, \code{"Ripley"} or \code{"translate"}. It specifies the edge correction(s) to be applied. } \item{r}{numeric vector. The values of the argument \eqn{r} at which the mark variogram \eqn{\gamma(r)}{gamma(r)} should be evaluated. There is a sensible default. } \item{method}{ A character vector indicating the user's choice of density estimation technique to be used. Options are \code{"density"}, \code{"loess"}, \code{"sm"} and \code{"smrep"}. } \item{\dots}{ Arguments passed to the density estimation routine (\code{\link{density}}, \code{\link{loess}} or \code{sm.density}) selected by \code{method}. } \item{normalise}{If \code{TRUE}, normalise the variogram by dividing it by the estimated mark variance. } } \details{ The mark variogram \eqn{\gamma(r)}{gamma(r)} of a marked point process \eqn{X} is a measure of the dependence between the marks of two points of the process a distance \eqn{r} apart. It is informally defined as \deqn{ \gamma(r) = E[\frac 1 2 (M_1 - M_2)^2] }{ gamma(r) = E[(1/2) * (M1 - M2)^2 ] } where \eqn{E[ ]} denotes expectation and \eqn{M_1,M_2}{M1,M2} are the marks attached to two points of the process a distance \eqn{r} apart. The mark variogram of a marked point process is analogous, but \bold{not equivalent}, to the variogram of a random field in geostatistics. See Waelder and Stoyan (1996). } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame containing numeric columns \item{r}{the values of the argument \eqn{r} at which the mark variogram \eqn{\gamma(r)}{gamma(r)} has been estimated } \item{theo}{the theoretical value of \eqn{\gamma(r)}{gamma(r)} when the marks attached to different points are independent; equal to the sample variance of the marks } together with a column or columns named \code{"iso"} and/or \code{"trans"}, according to the selected edge corrections. These columns contain estimates of the function \eqn{\gamma(r)}{gamma(r)} obtained by the edge corrections named. } \references{ Cressie, N.A.C. (1991) \emph{Statistics for spatial data}. John Wiley and Sons, 1991. Mase, S. (1996) The threshold method for estimating annual rainfall. \emph{Annals of the Institute of Statistical Mathematics} \bold{48} (1996) 201-213. Waelder, O. and Stoyan, D. (1996) On variograms in point process statistics. \emph{Biometrical Journal} \bold{38} (1996) 895-905. } \seealso{ Mark correlation function \code{\link{markcorr}} for numeric marks. Mark connection function \code{\link{markconnect}} and multitype K-functions \code{\link{Kcross}}, \code{\link{Kdot}} for factor-valued marks. } \examples{ # Longleaf Pine data # marks represent tree diameter data(longleaf) # Subset of this large pattern swcorner <- owin(c(0,100),c(0,100)) sub <- longleaf[ , swcorner] # mark correlation function mv <- markvario(sub) plot(mv) } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat/man/nncorr.Rd0000644000176200001440000001737413160710621014471 0ustar liggesusers\name{nncorr} \alias{nncorr} \alias{nnmean} \alias{nnvario} \title{Nearest-Neighbour Correlation Indices of Marked Point Pattern} \description{ Computes nearest-neighbour correlation indices of a marked point pattern, including the nearest-neighbour mark product index (default case of \code{nncorr}), the nearest-neighbour mark index (\code{nnmean}), and the nearest-neighbour variogram index (\code{nnvario}). } \usage{ nncorr(X, f = function(m1, m2) { m1 * m2 }, k = 1, \dots, use = "all.obs", method = c("pearson", "kendall", "spearman"), denominator=NULL) nnmean(X, k=1) nnvario(X, k=1) } \arguments{ \item{X}{ The observed point pattern. An object of class \code{"ppp"}. } \item{f}{ Function \eqn{f} used in the definition of the nearest neighbour correlation. There is a sensible default that depends on the type of marks of \code{X}. } \item{k}{ Integer. The \code{k}-th nearest neighbour of each point will be used. } \item{\dots}{ Extra arguments passed to \code{f}. } \item{use,method}{ Arguments passed to the standard correlation function \code{\link{cor}}. } \item{denominator}{ Internal use only. } } \details{ The nearest neighbour correlation index \eqn{\bar n_f}{nbar} of a marked point process \eqn{X} is a number measuring the dependence between the mark of a typical point and the mark of its nearest neighbour. The command \code{nncorr} computes the nearest neighbour correlation index based on any test function \code{f} provided by the user. The default behaviour of \code{nncorr} is to compute the nearest neighbour mark product index. The commands \code{nnmean} and \code{nnvario} are convenient abbreviations for other special choices of \code{f}. In the default case, \code{nncorr(X)} computes three different versions of the nearest-neighbour correlation index: the unnormalised, normalised, and classical correlations. \describe{ \item{unnormalised:}{ The \bold{unnormalised} nearest neighbour correlation (Stoyan and Stoyan, 1994, section 14.7) is defined as \deqn{\bar n_f = E[f(M, M^\ast)]}{nbar[f] = E[f(M, M*)]} where \eqn{E[]} denotes mean value, \eqn{M} is the mark attached to a typical point of the point process, and \eqn{M^\ast}{M*} is the mark attached to its nearest neighbour (i.e. the nearest other point of the point process). Here \eqn{f} is any function \eqn{f(m_1,m_2)}{f(m1,m2)} with two arguments which are possible marks of the pattern, and which returns a nonnegative real value. Common choices of \eqn{f} are: for continuous real-valued marks, \deqn{f(m_1,m_2) = m_1 m_2}{f(m1,m2)= m1 * m2} for discrete marks (multitype point patterns), \deqn{f(m_1,m_2) = 1(m_1 = m_2)}{f(m1,m2)= (m1 == m2)} and for marks taking values in \eqn{[0,2\pi)}{[0,2 * pi)}, \deqn{f(m_1,m_2) = \sin(m_1 - m_2)}{f(m1,m2) = sin(m1-m2).} For example, in the second case, the unnormalised nearest neighbour correlation \eqn{\bar n_f}{nbar[f]} equals the proportion of points in the pattern which have the same mark as their nearest neighbour. Note that \eqn{\bar n_f}{nbar[f]} is not a ``correlation'' in the usual statistical sense. It can take values greater than 1. } \item{normalised:}{ We can define a \bold{normalised} nearest neighbour correlation by \deqn{\bar m_f = \frac{E[f(M,M^\ast)]}{E[f(M,M')]}}{mbar[f] = E[f(M,M*)]/E[f(M,M')]} where again \eqn{M} is the mark attached to a typical point, \eqn{M^\ast}{M*} is the mark attached to its nearest neighbour, and \eqn{M'} is an independent copy of \eqn{M} with the same distribution. This normalisation is also not a ``correlation'' in the usual statistical sense, but is normalised so that the value 1 suggests ``lack of correlation'': if the marks attached to the points of \code{X} are independent and identically distributed, then \eqn{\bar m_f = 1}{mbar[f] = 1}. The interpretation of values larger or smaller than 1 depends on the choice of function \eqn{f}. } \item{classical:}{ Finally if the marks of \code{X} are real numbers, we can also compute the \bold{classical} correlation, that is, the correlation coefficient of the two random variables \eqn{M} and \eqn{M^\ast}{M*}. The classical correlation has a value between \eqn{-1} and \eqn{1}. Values close to \eqn{-1} or \eqn{1} indicate strong dependence between the marks. } } In the default case where \code{f} is not given, \code{nncorr(X)} computes \itemize{ \item If the marks of \code{X} are real numbers, the unnormalised and normalised versions of the nearest-neighbour product index \eqn{E[M \, M^\ast]}{E[M * M*]}, and the classical correlation between \eqn{M} and \eqn{M^\ast}{M*}. \item If the marks of \code{X} are factor valued, the unnormalised and normalised versions of the nearest-neighbour equality index \eqn{P[M = M^\ast]}{P[M = M*]}. } The wrapper functions \code{nnmean} and \code{nnvario} compute the correlation indices for two special choices of the function \eqn{f(m_1,m_2)}{f(m1,m2)}. \itemize{ \item \code{nnmean} computes the correlation indices for \eqn{f(m_1,m_2) = m_1}{f(m1,m2) = m1}. The unnormalised index is simply the mean value of the mark of the neighbour of a typical point, \eqn{E[M^\ast]}{E[M*]}, while the normalised index is \eqn{E[M^\ast]/E[M]}{E[M*]/E[M]}, the ratio of the mean mark of the neighbour of a typical point to the mean mark of a typical point. \item \code{nnvario} computes the correlation indices for \eqn{f(m_1,m_2) = (1/2) (m_1-m_2)^2}{f(m1,m2) = (1/2) * (m1-m2)^2}. } The argument \code{X} must be a point pattern (object of class \code{"ppp"}) and must be a marked point pattern. (The marks may be a data frame, containing several columns of mark variables; each column is treated separately.) If the argument \code{f} is given, it must be a function, accepting two arguments \code{m1} and \code{m2} which are vectors of equal length containing mark values (of the same type as the marks of \code{X}). It must return a vector of numeric values of the same length as \code{m1} and \code{m2}. The values must be non-negative. The arguments \code{use} and \code{method} control the calculation of the classical correlation using \code{\link{cor}}, as explained in the help file for \code{\link{cor}}. Other arguments may be passed to \code{f} through the \code{...} argument. This algorithm assumes that \code{X} can be treated as a realisation of a stationary (spatially homogeneous) random spatial point process in the plane, observed through a bounded window. The window (which is specified in \code{X} as \code{Window(X)}) may have arbitrary shape. Biases due to edge effects are treated using the \sQuote{border method} edge correction. } \value{ Labelled vector of length 2 or 3 containing the unnormalised and normalised nearest neighbour correlations, and the classical correlation if appropriate. Alternatively a matrix with 2 or 3 rows, containing this information for each mark variable. } \examples{ data(finpines) nncorr(finpines) # heights of neighbouring trees are slightly negatively correlated data(amacrine) nncorr(amacrine) # neighbouring cells are usually of different type } \references{ Stoyan, D. and Stoyan, H. (1994) Fractals, random shapes and point fields: methods of geometrical statistics. John Wiley and Sons. } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat/man/is.ppp.Rd0000644000176200001440000000130613160710621014365 0ustar liggesusers\name{is.ppp} \alias{is.ppp} \title{Test Whether An Object Is A Point Pattern} \description{ Checks whether its argument is a point pattern (object of class \code{"ppp"}). } \usage{ is.ppp(x) } \arguments{ \item{x}{Any object.} } \details{ This function tests whether the object \code{x} is a point pattern object of class \code{"ppp"}. See \code{\link{ppm.object}} for details of this class. The result is determined to be \code{TRUE} if \code{x} inherits from \code{"ppp"}, i.e. if \code{x} has \code{"ppp"} amongst its classes. } \value{ \code{TRUE} if \code{x} is a point pattern, otherwise \code{FALSE}. } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/anylist.Rd0000644000176200001440000000204713160710571014646 0ustar liggesusers\name{anylist} \alias{anylist} \alias{as.anylist} \title{ List of Objects } \description{ Make a list of objects of any type. } \usage{ anylist(\dots) as.anylist(x) } \arguments{ \item{\dots}{ Any number of arguments of any type. } \item{x}{ A list. } } \details{ An object of class \code{"anylist"} is a list of objects that the user intends to treat in a similar fashion. For example it may be desired to plot each of the objects side-by-side: this can be done using the function \code{\link{plot.anylist}}. The objects can belong to any class; they may or may not all belong to the same class. In the \pkg{spatstat} package, various functions produce an object of class \code{"anylist"}. } \value{ A list, belonging to the class \code{"anylist"}, containing the original objects. } \author{\adrian \rolf and \ege } \seealso{ \code{\link{solist}}, \code{\link{as.solist}}, \code{\link{anylapply}}. } \examples{ anylist(cells, intensity(cells), Kest(cells)) } \keyword{list} \keyword{manip} spatstat/man/Ord.Rd0000644000176200001440000000352613160710571013712 0ustar liggesusers\name{Ord} \alias{Ord} \title{Generic Ord Interaction model} \description{ Creates an instance of an Ord-type interaction point process model which can then be fitted to point pattern data. } \usage{ Ord(pot, name) } \arguments{ \item{pot}{An S language function giving the user-supplied interaction potential.} \item{name}{Character string.} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of a point process. } \details{ Ord's point process model (Ord, 1977) is a Gibbs point process of infinite order. Each point \eqn{x_i}{x[i]} in the point pattern \eqn{x} contributes a factor \eqn{g(a_i)}{g(a[i])} where \eqn{a_i = a(x_i, x)}{a[i] = a(x[i], x)} is the area of the tile associated with \eqn{x_i}{x[i]} in the Dirichlet tessellation of \eqn{x}. Ord (1977) proposed fitting this model to forestry data when \eqn{g(a)} has a simple ``threshold'' form. That model is implemented in our function \code{\link{OrdThresh}}. The present function \code{Ord} implements the case of a completely general Ord potential \eqn{g(a)} specified as an S language function \code{pot}. This is experimental. } \references{ Baddeley, A. and Turner, R. (2000) Practical maximum pseudolikelihood for spatial point patterns. \emph{Australian and New Zealand Journal of Statistics} \bold{42}, 283--322. Ord, J.K. (1977) Contribution to the discussion of Ripley (1977). Ord, J.K. (1978) How many trees in a forest? \emph{Mathematical Scientist} \bold{3}, 23--33. Ripley, B.D. (1977) Modelling spatial patterns (with discussion). \emph{Journal of the Royal Statistical Society, Series B}, \bold{39}, 172 -- 212. } \seealso{ \code{\link{ppm}}, \code{\link{ppm.object}}, \code{\link{OrdThresh}} } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} spatstat/man/as.data.frame.tess.Rd0000644000176200001440000000327613160710571016551 0ustar liggesusers\name{as.data.frame.tess} \alias{as.data.frame.tess} \title{Convert Tessellation to Data Frame} \description{ Converts a spatial tessellation object to a data frame. } \usage{ \method{as.data.frame}{tess}(x, \dots) } \arguments{ \item{x}{ Tessellation (object of class \code{"tess"}). } \item{\dots}{Further arguments passed to \code{\link{as.data.frame.owin}} or \code{\link{as.data.frame.im}} and ultimately to \code{\link[base:as.data.frame]{as.data.frame.default}} to determine the row names and other features. } } \details{ This function converts the tessellation \code{x} to a data frame. If \code{x} is a pixel image tessellation (a pixel image with factor values specifying the tile membership of each pixel) then this pixel image is converted to a data frame by \code{\link{as.data.frame.im}}. The result is a data frame with columns \code{x} and \code{y} giving the pixel coordinates, and \code{Tile} identifying the tile containing the pixel. If \code{x} is a tessellation consisting of a rectangular grid of tiles or a list of polygonal tiles, then each tile is converted to a data frame by \code{\link{as.data.frame.owin}}, and these data frames are joined together, yielding a single large data frame containing columns \code{x}, \code{y} giving the coordinates of vertices of the polygons, and \code{Tile} identifying the tile. } \value{ A data frame with columns named \code{x}, \code{y}, \code{Tile}, and possibly other columns. } \author{ \spatstatAuthors. } \seealso{ \code{\link{as.data.frame.owin}}, \code{\link{as.data.frame.im}} } \examples{ Z <- as.data.frame(dirichlet(cells)) head(Z, 10) } \keyword{spatial} \keyword{methods} spatstat/man/Jdot.Rd0000644000176200001440000001640013160710571014061 0ustar liggesusers\name{Jdot} \alias{Jdot} \title{ Multitype J Function (i-to-any) } \description{ For a multitype point pattern, estimate the multitype \eqn{J} function summarising the interpoint dependence between the type \eqn{i} points and the points of any type. } \usage{ Jdot(X, i, eps=NULL, r=NULL, breaks=NULL, \dots, correction=NULL) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the multitype \eqn{J} function \eqn{J_{i\bullet}(r)}{Ji.(r)} will be computed. It must be a multitype point pattern (a marked point pattern whose marks are a factor). See under Details. } \item{i}{The type (mark value) of the points in \code{X} from which distances are measured. A character string (or something that will be converted to a character string). Defaults to the first level of \code{marks(X)}. } \item{eps}{A positive number. The resolution of the discrete approximation to Euclidean distance (see below). There is a sensible default. } \item{r}{numeric vector. The values of the argument \eqn{r} at which the function \eqn{J_{i\bullet}(r)}{Ji.(r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{breaks}{ This argument is for internal use only. } \item{\dots}{Ignored.} \item{correction}{ Optional. Character string specifying the edge correction(s) to be used. Options are \code{"none"}, \code{"rs"}, \code{"km"}, \code{"Hanisch"} and \code{"best"}. Alternatively \code{correction="all"} selects all options. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame containing six numeric columns \item{J}{the recommended estimator of \eqn{J_{i\bullet}(r)}{Ji.(r)}, currently the Kaplan-Meier estimator. } \item{r}{the values of the argument \eqn{r} at which the function \eqn{J_{i\bullet}(r)}{Ji.(r)} has been estimated } \item{km}{the Kaplan-Meier estimator of \eqn{J_{i\bullet}(r)}{Ji.(r)} } \item{rs}{the ``reduced sample'' or ``border correction'' estimator of \eqn{J_{i\bullet}(r)}{Ji.(r)} } \item{han}{the Hanisch-style estimator of \eqn{J_{i\bullet}(r)}{Ji.(r)} } \item{un}{the ``uncorrected'' estimator of \eqn{J_{i\bullet}(r)}{Ji.(r)} formed by taking the ratio of uncorrected empirical estimators of \eqn{1 - G_{i\bullet}(r)}{1 - Gi.(r)} and \eqn{1 - F_{\bullet}(r)}{1 - F.(r)}, see \code{\link{Gdot}} and \code{\link{Fest}}. } \item{theo}{the theoretical value of \eqn{J_{i\bullet}(r)}{Ji.(r)} for a marked Poisson process, namely 1. } The result also has two attributes \code{"G"} and \code{"F"} which are respectively the outputs of \code{\link{Gdot}} and \code{\link{Fest}} for the point pattern. } \details{ This function \code{Jdot} and its companions \code{\link{Jcross}} and \code{\link{Jmulti}} are generalisations of the function \code{\link{Jest}} to multitype point patterns. A multitype point pattern is a spatial pattern of points classified into a finite number of possible ``colours'' or ``types''. In the \pkg{spatstat} package, a multitype pattern is represented as a single point pattern object in which the points carry marks, and the mark value attached to each point determines the type of that point. The argument \code{X} must be a point pattern (object of class \code{"ppp"}) or any data that are acceptable to \code{\link{as.ppp}}. It must be a marked point pattern, and the mark vector \code{X$marks} must be a factor. The argument \code{i} will be interpreted as a level of the factor \code{X$marks}. (Warning: this means that an integer value \code{i=3} will be interpreted as the number 3, \bold{not} the 3rd smallest level.) The ``type \eqn{i} to any type'' multitype \eqn{J} function of a stationary multitype point process \eqn{X} was introduced by Van lieshout and Baddeley (1999). It is defined by \deqn{J_{i\bullet}(r) = \frac{1 - G_{i\bullet}(r)}{1 - F_{\bullet}(r)}}{Ji.(r) = (1 - Gi.(r))/(1-F.(r))} where \eqn{G_{i\bullet}(r)}{Gi.(r)} is the distribution function of the distance from a type \eqn{i} point to the nearest other point of the pattern, and \eqn{F_{\bullet}(r)}{F.(r)} is the distribution function of the distance from a fixed point in space to the nearest point of the pattern. An estimate of \eqn{J_{i\bullet}(r)}{Ji.(r)} is a useful summary statistic in exploratory data analysis of a multitype point pattern. If the pattern is a marked Poisson point process, then \eqn{J_{i\bullet}(r) \equiv 1}{Ji.(r) = 1}. If the subprocess of type \eqn{i} points is independent of the subprocess of points of all types not equal to \eqn{i}, then \eqn{J_{i\bullet}(r)}{Ji.(r)} equals \eqn{J_{ii}(r)}{Jii(r)}, the ordinary \eqn{J} function (see \code{\link{Jest}} and Van Lieshout and Baddeley (1996)) of the points of type \eqn{i}. Hence deviations from zero of the empirical estimate of \eqn{J_{i\bullet} - J_{ii}}{Ji.-Jii} may suggest dependence between types. This algorithm estimates \eqn{J_{i\bullet}(r)}{Ji.(r)} from the point pattern \code{X}. It assumes that \code{X} can be treated as a realisation of a stationary (spatially homogeneous) random spatial point process in the plane, observed through a bounded window. The window (which is specified in \code{X} as \code{Window(X)}) may have arbitrary shape. Biases due to edge effects are treated in the same manner as in \code{\link{Jest}}, using the Kaplan-Meier and border corrections. The main work is done by \code{\link{Gmulti}} and \code{\link{Fest}}. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{J_{i\bullet}(r)}{Ji.(r)} should be evaluated. The values of \eqn{r} must be increasing nonnegative numbers and the maximum \eqn{r} value must exceed the radius of the largest disc contained in the window. } \references{ Van Lieshout, M.N.M. and Baddeley, A.J. (1996) A nonparametric measure of spatial interaction in point patterns. \emph{Statistica Neerlandica} \bold{50}, 344--361. Van Lieshout, M.N.M. and Baddeley, A.J. (1999) Indices of dependence between types in multivariate point patterns. \emph{Scandinavian Journal of Statistics} \bold{26}, 511--532. } \section{Warnings}{ The argument \code{i} is interpreted as a level of the factor \code{X$marks}. It is converted to a character string if it is not already a character string. The value \code{i=1} does \bold{not} refer to the first level of the factor. } \seealso{ \code{\link{Jcross}}, \code{\link{Jest}}, \code{\link{Jmulti}} } \examples{ # Lansing woods data: 6 types of trees woods <- lansing \testonly{ woods <- woods[seq(1,npoints(woods), by=30), ] } Jh. <- Jdot(woods, "hickory") plot(Jh.) # diagnostic plot for independence between hickories and other trees Jhh <- Jest(split(woods)$hickory) plot(Jhh, add=TRUE, legendpos="bottom") \dontrun{ # synthetic example with two marks "a" and "b" pp <- runifpoint(30) \%mark\% factor(sample(c("a","b"), 30, replace=TRUE)) J <- Jdot(pp, "a") } } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat/man/midpoints.psp.Rd0000644000176200001440000000127013160710621015763 0ustar liggesusers\name{midpoints.psp} \alias{midpoints.psp} \title{Midpoints of Line Segment Pattern} \description{ Computes the midpoints of each line segment in a line segment pattern. } \usage{ midpoints.psp(x) } \arguments{ \item{x}{ A line segment pattern (object of class \code{"psp"}). } } \value{ Point pattern (object of class \code{"ppp"}). } \details{ The midpoint of each line segment is computed. } \seealso{ \code{\link{summary.psp}}, \code{\link{lengths.psp}}, \code{\link{angles.psp}} } \examples{ a <- psp(runif(10), runif(10), runif(10), runif(10), window=owin()) b <- midpoints.psp(a) } \author{ \adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/stieltjes.Rd0000644000176200001440000000434313160710621015166 0ustar liggesusers\name{stieltjes} \alias{stieltjes} \title{Compute Integral of Function Against Cumulative Distribution} \description{ Computes the Stieltjes integral of a function \eqn{f} with respect to a function \eqn{M}. } \usage{ stieltjes(f, M, ...) } \arguments{ \item{f}{ The integrand. A function in the \R language. } \item{M}{ The cumulative function against which \code{f} will be integrated. An object of class \code{"fv"} or \code{"stepfun"}. } \item{\dots}{ Additional arguments passed to \code{f}. } } \details{ This command computes the Stieltjes integral \deqn{I = \int f(x) dM(x)}{I = integral f(x) dM(x)} of a real-valued function \eqn{f(x)} with respect to a nondecreasing function \eqn{M(x)}. One common use of the Stieltjes integral is to find the mean value of a random variable from its cumulative distribution function \eqn{F(x)}. The mean value is the Stieltjes integral of \eqn{f(x)=x} with respect to \eqn{F(x)}. The argument \code{f} should be a \code{function} in the \R language. It should accept a numeric vector argument \code{x} and should return a numeric vector of the same length. The argument \code{M} should be either a step function (object of class \code{"stepfun"}) or a function value table (object of class \code{"fv"}, see \code{\link{fv.object}}). Objects of class \code{"stepfun"} are returned by \code{\link[stats]{ecdf}}, \code{\link{ewcdf}}, \code{\link{spatialcdf}} and other utilities. Objects of class \code{"fv"} are returned by the commands \code{\link{Kest}}, \code{\link{Gest}}, etc. } \value{ A list containing the value of the Stieltjes integral computed using each of the versions of the function \code{M}. } \seealso{ \code{\link{fv.object}}, \code{\link{Gest}} } \examples{ # estimate cdf of nearest neighbour distance in redwood data G <- Gest(redwood) # compute estimate of mean nearest neighbour distance stieltjes(function(x){x}, G) # estimated probability of a distance in the interval [0.1,0.2] stieltjes(function(x,a,b){ (x >= a) & (x <= b)}, G, a=0.1, b=0.2) # stepfun example H <- spatialcdf(bei.extra$elev, normalise=TRUE) stieltjes(function(x){x}, H) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{math} spatstat/man/fitted.lppm.Rd0000644000176200001440000000527113160710621015407 0ustar liggesusers\name{fitted.lppm} \alias{fitted.lppm} \title{ Fitted Intensity for Point Process on Linear Network } \description{ Given a point process model fitted to a point pattern on a linear network, compute the fitted intensity of the model at the points of the pattern, or at the points of the quadrature scheme used to fit the model. } \usage{ \method{fitted}{lppm}(object, \dots, dataonly = FALSE, new.coef = NULL, leaveoneout = FALSE) } \arguments{ \item{object}{ Fitted point process model on a linear network (object of class \code{"lppm"}). } \item{\dots}{ Ignored. } \item{dataonly}{ Logical value indicating whether to computed fitted intensities at the points of the original point pattern dataset (\code{dataonly=TRUE}) or at all the quadrature points of the quadrature scheme used to fit the model (\code{dataonly=FALSE}, the default). } \item{new.coef}{ Numeric vector of parameter values to replace the fitted model parameters \code{coef(object)}. } \item{leaveoneout}{ Logical. If \code{TRUE} the fitted value at each data point will be computed using a leave-one-out method. See Details. } } \details{ This is a method for the generic function \code{\link[stats]{fitted}} for the class \code{"lppm"} of fitted point process models on a linear network. The locations \eqn{u} at which the fitted conditional intensity/trend is evaluated, are the points of the quadrature scheme used to fit the model in \code{\link{ppm}}. They include the data points (the points of the original point pattern dataset \code{x}) and other ``dummy'' points in the window of observation. If \code{leaveoneout=TRUE}, fitted values will be computed for the data points only, using a \sQuote{leave-one-out} rule: the fitted value at \code{X[i]} is effectively computed by deleting this point from the data and re-fitting the model to the reduced pattern \code{X[-i]}, then predicting the value at \code{X[i]}. (Instead of literally performing this calculation, we apply a Taylor approximation using the influence function computed in \code{\link{dfbetas.ppm}}. } \value{ A vector containing the values of the fitted spatial trend. Entries in this vector correspond to the quadrature points (data or dummy points) used to fit the model. The quadrature points can be extracted from \code{object} by \code{union.quad(quad.ppm(object))}. } \author{ \adrian \rolf and \ege } \seealso{ \code{\link{lppm}}, \code{\link{predict.lppm}} } \examples{ fit <- lppm(spiders~x+y) a <- fitted(fit) b <- fitted(fit, dataonly=TRUE) } \keyword{spatial} \keyword{methods} \keyword{models} spatstat/man/predict.ppm.Rd0000644000176200001440000003510313160710621015403 0ustar liggesusers\name{predict.ppm} \alias{predict.ppm} \title{Prediction from a Fitted Point Process Model} \description{ Given a fitted point process model obtained by \code{\link{ppm}}, evaluate the spatial trend or the conditional intensity of the model at new locations. } \usage{ \method{predict}{ppm}(object, window=NULL, ngrid=NULL, locations=NULL, covariates=NULL, type=c("trend", "cif", "intensity", "count"), se=FALSE, interval=c("none", "confidence", "prediction"), level = 0.95, X=data.ppm(object), correction, \dots, new.coef=NULL, check=TRUE, repair=TRUE) } \arguments{ \item{object}{ A fitted point process model, typically obtained from the model-fitting algorithm \code{\link{ppm}}. An object of class \code{"ppm"} (see \code{\link{ppm.object}}). } \item{window}{ Optional. A window (object of class \code{"owin"}) \emph{delimiting} the locations where predictions should be computed. Defaults to the window of the original data used to fit the model \code{object}. } \item{ngrid}{ Optional. Dimensions of a rectangular grid of locations inside \code{window} where the predictions should be computed. An integer, or an integer vector of length 2, specifying the number of grid points in the \eqn{y} and \eqn{x} directions. (Incompatible with \code{locations}) } \item{locations}{ Optional. Data giving the exact \eqn{x,y} coordinates (and marks, if required) of locations at which predictions should be computed. Either a point pattern, or a data frame with columns named \code{x} and \code{y}, or a binary image mask, or a pixel image. (Incompatible with \code{ngrid}) } \item{covariates}{ Values of external covariates required by the model. Either a data frame or a list of images. See Details. } \item{type}{ Character string. Indicates which property of the fitted model should be predicted. Options are \code{"trend"} for the spatial trend, \code{"cif"} or \code{"lambda"} for the conditional intensity, \code{"intensity"} for the intensity, and \code{"count"} for the total number of points in \code{window}. } \item{se}{ Logical value indicating whether to calculate standard errors as well. } \item{interval}{ String (partially matched) indicating whether to produce estimates (\code{interval="none"}, the default) or a confidence interval (\code{interval="confidence"}) or a prediction interval (\code{interval="prediction"}). } \item{level}{ Coverage probability for the confidence or prediction interval. } \item{X}{ Optional. A point pattern (object of class \code{"ppp"}) to be taken as the data point pattern when calculating the conditional intensity. The default is to use the original data to which the model was fitted. } \item{correction}{ Name of the edge correction to be used in calculating the conditional intensity. Options include \code{"border"} and \code{"none"}. Other options may include \code{"periodic"}, \code{"isotropic"} and \code{"translate"} depending on the model. The default correction is the one that was used to fit \code{object}. } \item{\dots}{ Ignored. } \item{new.coef}{ Numeric vector of parameter values to replace the fitted model parameters \code{coef(object)}. } \item{check}{ Logical value indicating whether to check the internal format of \code{object}. If there is any possibility that this object has been restored from a dump file, or has otherwise lost track of the environment where it was originally computed, set \code{check=TRUE}. } \item{repair}{ Logical value indicating whether to repair the internal format of \code{object}, if it is found to be damaged. } } \value{ \emph{If \code{total} is given:} a numeric vector or matrix. \emph{If \code{locations} is given and is a data frame:} a vector of predicted values for the spatial locations (and marks, if required) given in \code{locations}. \emph{If \code{ngrid} is given, or if \code{locations} is given and is a binary image mask or a pixel image:} If \code{object} is an unmarked point process, the result is a pixel image object (of class \code{"im"}, see \code{\link{im.object}}) containing the predictions. If \code{object} is a multitype point process, the result is a list of pixel images, containing the predictions for each type at the same grid of locations. The ``predicted values'' are either values of the spatial trend (if \code{type="trend"}), values of the conditional intensity (if \code{type="cif"} or \code{type="lambda"}), values of the intensity (if \code{type="intensity"}) or numbers of points (if \code{type="count"}). If \code{se=TRUE}, then the result is a list with two entries, the first being the predicted values in the format described above, and the second being the standard errors in the same format. } \details{ This function computes properties of a fitted spatial point process model (object of class \code{"ppm"}). For a Poisson point process it can compute the fitted intensity function, or the expected number of points in a region. For a Gibbs point process it can compute the spatial trend (first order potential), conditional intensity, and approximate intensity of the process. Point estimates, standard errors, confidence intervals and prediction intervals are available. Given a point pattern dataset, we may fit a point process model to the data using the model-fitting algorithm \code{\link{ppm}}. This returns an object of class \code{"ppm"} representing the fitted point process model (see \code{\link{ppm.object}}). The parameter estimates in this fitted model can be read off simply by printing the \code{ppm} object. The spatial trend, conditional intensity and intensity of the fitted model are evaluated using this function \code{predict.ppm}. The default action is to create a rectangular grid of points in the observation window of the data point pattern, and evaluate the spatial trend at these locations. The argument \code{type} specifies the values that are desired: \describe{ \item{If \code{type="trend"}:}{ the ``spatial trend'' of the fitted model is evaluated at each required spatial location \eqn{u}. See below. } \item{If \code{type="cif"}:}{ the conditional intensity \eqn{\lambda(u, X)}{lambda(u,X)} of the fitted model is evaluated at each required spatial location \eqn{u}, with respect to the data point pattern \eqn{X}. } \item{If \code{type="intensity"}:}{ the intensity \eqn{\lambda(u)}{lambda(u)} of the fitted model is evaluated at each required spatial location \eqn{u}. } \item{If \code{type="count"}:}{ the expected total number of points (or the expected number of points falling in \code{window}) is evaluated. If \code{window} is a tessellation, the expected number of points in each tile of the tessellation is evaluated. } } The spatial trend, conditional intensity, and intensity are all equivalent if the fitted model is a Poisson point process. However, if the model is not a Poisson process, then they are all different. The ``spatial trend'' is the (exponentiated) first order potential, and not the intensity of the process. [For example if we fit the stationary Strauss process with parameters \eqn{\beta}{beta} and \eqn{\gamma}{gamma}, then the spatial trend is constant and equal to \eqn{\beta}{beta}, while the intensity is a smaller value.] The default is to compute an estimate of the desired quantity. If \code{interval="confidence"} or \code{interval="prediction"}, the estimate is replaced by a confidence interval or prediction interval. If \code{se=TRUE}, then a standard error is also calculated, and is returned together with the (point or interval) estimate. The spatial locations where predictions are required, are determined by the (incompatible) arguments \code{ngrid} and \code{locations}. \itemize{ \item If the argument \code{ngrid} is present, then predictions are performed at a rectangular grid of locations in the window \code{window}. The result of prediction will be a pixel image or images. \item If \code{locations} is present, then predictions will be performed at the spatial locations given by this dataset. These may be an arbitrary list of spatial locations, or they may be a rectangular grid. The result of prediction will be either a numeric vector or a pixel image or images. \item If neither \code{ngrid} nor \code{locations} is given, then \code{ngrid} is assumed. The value of \code{ngrid} defaults to \code{\link{spatstat.options}("npixel")}, which is initialised to 128 when \pkg{spatstat} is loaded. } The argument \code{locations} may be a point pattern, a data frame or a list specifying arbitrary locations; or it may be a binary image mask (an object of class \code{"owin"} with type \code{"mask"}) or a pixel image (object of class \code{"im"}) specifying (a subset of) a rectangular grid of locations. \itemize{ \item If \code{locations} is a point pattern (object of class \code{"ppp"}), then prediction will be performed at the points of the point pattern. The result of prediction will be a vector of predicted values, one value for each point. If the model is a marked point process, then \code{locations} should be a marked point pattern, with marks of the same kind as the model; prediction will be performed at these marked points. The result of prediction will be a vector of predicted values, one value for each (marked) point. \item If \code{locations} is a data frame or list, then it must contain vectors \code{locations$x} and \code{locations$y} specifying the \eqn{x,y} coordinates of the prediction locations. Additionally, if the model is a marked point process, then \code{locations} must also contain a factor \code{locations$marks} specifying the marks of the prediction locations. These vectors must have equal length. The result of prediction will be a vector of predicted values, of the same length. \item If \code{locations} is a binary image mask, then prediction will be performed at each pixel in this binary image where the pixel value is \code{TRUE} (in other words, at each pixel that is inside the window). If the fitted model is an unmarked point process, then the result of prediction will be an image. If the fitted model is a marked point process, then prediction will be performed for each possible value of the mark at each such location, and the result of prediction will be a list of images, one for each mark value. \item If \code{locations} is a pixel image (object of class \code{"im"}), then prediction will be performed at each pixel in this image where the pixel value is defined (i.e.\ where the pixel value is not \code{NA}). } The argument \code{covariates} gives the values of any spatial covariates at the prediction locations. If the trend formula in the fitted model involves spatial covariates (other than the Cartesian coordinates \code{x}, \code{y}) then \code{covariates} is required. The format and use of \code{covariates} are analogous to those of the argument of the same name in \code{\link{ppm}}. It is either a data frame or a list of images. \itemize{ \item If \code{covariates} is a list of images, then the names of the entries should correspond to the names of covariates in the model formula \code{trend}. Each entry in the list must be an image object (of class \code{"im"}, see \code{\link{im.object}}). The software will look up the pixel values of each image at the quadrature points. \item If \code{covariates} is a data frame, then the \code{i}th row of \code{covariates} is assumed to contain covariate data for the \code{i}th location. When \code{locations} is a data frame, this just means that each row of \code{covariates} contains the covariate data for the location specified in the corresponding row of \code{locations}. When \code{locations} is a binary image mask, the row \code{covariates[i,]} must correspond to the location \code{x[i],y[i]} where \code{x = as.vector(raster.x(locations))} and \code{y = as.vector(raster.y(locations))}. } Note that if you only want to use prediction in order to generate a plot of the predicted values, it may be easier to use \code{\link{plot.ppm}} which calls this function and plots the results. } \references{ Baddeley, A. and Turner, R. Practical maximum pseudolikelihood for spatial point patterns. \emph{Australian and New Zealand Journal of Statistics} \bold{42} (2000) 283--322. Berman, M. and Turner, T.R. Approximating point process likelihoods with GLIM. \emph{Applied Statistics} \bold{41} (1992) 31--38. } \seealso{ \code{\link{ppm}}, \code{\link{ppm.object}}, \code{\link{plot.ppm}}, \code{\link{print.ppm}}, \code{\link{fitted.ppm}}, \code{\link{spatstat.options}} } \section{Warnings}{ The current implementation invokes \code{\link{predict.glm}} so that \bold{prediction is wrong} if the trend formula in \code{object} involves terms in \code{ns()}, \code{bs()} or \code{poly()}. This is a weakness of \code{\link{predict.glm}} itself! Error messages may be very opaque, as they tend to come from deep in the workings of \code{\link{predict.glm}}. If you are passing the \code{covariates} argument and the function crashes, it is advisable to start by checking that all the conditions listed above are satisfied. } \examples{ \testonly{op <- spatstat.options(npixel=32)} m <- ppm(cells ~ polynom(x,y,2), Strauss(0.05)) trend <- predict(m, type="trend") \dontrun{ image(trend) points(cells) } cif <- predict(m, type="cif") \dontrun{ persp(cif) } data(japanesepines) mj <- ppm(japanesepines ~ harmonic(x,y,2)) se <- predict(mj, se=TRUE) # prediction interval for total number of points predict(mj, type="count", interval="p") # prediction at arbitrary locations predict(mj, locations=data.frame(x=0.3, y=0.4)) X <- runifpoint(5, Window(japanesepines)) predict(mj, locations=X, se=TRUE) # multitype rr <- matrix(0.06, 2, 2) ma <- ppm(amacrine ~ marks, MultiStrauss(rr)) Z <- predict(ma) Z <- predict(ma, type="cif") predict(ma, locations=data.frame(x=0.8, y=0.5,marks="on"), type="cif") \testonly{spatstat.options(op)} } \author{ \adrian and \rolf } \keyword{spatial} \keyword{models} spatstat/man/envelope.Rd0000644000176200001440000007575613160710621015015 0ustar liggesusers\name{envelope} \alias{envelope} \alias{envelope.ppp} \alias{envelope.ppm} \alias{envelope.kppm} \title{Simulation Envelopes of Summary Function} \description{ Computes simulation envelopes of a summary function. } \usage{ envelope(Y, fun, \dots) \method{envelope}{ppp}(Y, fun=Kest, nsim=99, nrank=1, \dots, funargs=list(), funYargs=funargs, simulate=NULL, fix.n=FALSE, fix.marks=FALSE, verbose=TRUE, clipdata=TRUE, transform=NULL, global=FALSE, ginterval=NULL, use.theory=NULL, alternative=c("two.sided", "less", "greater"), scale=NULL, clamp=FALSE, savefuns=FALSE, savepatterns=FALSE, nsim2=nsim, VARIANCE=FALSE, nSD=2, Yname=NULL, maxnerr=nsim, do.pwrong=FALSE, envir.simul=NULL) \method{envelope}{ppm}(Y, fun=Kest, nsim=99, nrank=1, \dots, funargs=list(), funYargs=funargs, simulate=NULL, fix.n=FALSE, fix.marks=FALSE, verbose=TRUE, clipdata=TRUE, start=NULL, control=update(default.rmhcontrol(Y), nrep=nrep), nrep=1e5, transform=NULL, global=FALSE, ginterval=NULL, use.theory=NULL, alternative=c("two.sided", "less", "greater"), scale=NULL, clamp=FALSE, savefuns=FALSE, savepatterns=FALSE, nsim2=nsim, VARIANCE=FALSE, nSD=2, Yname=NULL, maxnerr=nsim, do.pwrong=FALSE, envir.simul=NULL) \method{envelope}{kppm}(Y, fun=Kest, nsim=99, nrank=1, \dots, funargs=list(), funYargs=funargs, simulate=NULL, verbose=TRUE, clipdata=TRUE, transform=NULL, global=FALSE, ginterval=NULL, use.theory=NULL, alternative=c("two.sided", "less", "greater"), scale=NULL, clamp=FALSE, savefuns=FALSE, savepatterns=FALSE, nsim2=nsim, VARIANCE=FALSE, nSD=2, Yname=NULL, maxnerr=nsim, do.pwrong=FALSE, envir.simul=NULL) } \arguments{ \item{Y}{ Object containing point pattern data. A point pattern (object of class \code{"ppp"}) or a fitted point process model (object of class \code{"ppm"} or \code{"kppm"}). } \item{fun}{ Function that computes the desired summary statistic for a point pattern. } \item{nsim}{ Number of simulated point patterns to be generated when computing the envelopes. } \item{nrank}{ Integer. Rank of the envelope value amongst the \code{nsim} simulated values. A rank of 1 means that the minimum and maximum simulated values will be used. } \item{\dots}{ Extra arguments passed to \code{fun}. } \item{funargs}{ A list, containing extra arguments to be passed to \code{fun}. } \item{funYargs}{ Optional. A list, containing extra arguments to be passed to \code{fun} when applied to the original data \code{Y} only. } \item{simulate}{ Optional. Specifies how to generate the simulated point patterns. If \code{simulate} is an expression in the R language, then this expression will be evaluated \code{nsim} times, to obtain \code{nsim} point patterns which are taken as the simulated patterns from which the envelopes are computed. If \code{simulate} is a list of point patterns, then the entries in this list will be treated as the simulated patterns from which the envelopes are computed. Alternatively \code{simulate} may be an object produced by the \code{envelope} command: see Details. } \item{fix.n}{ Logical. If \code{TRUE}, simulated patterns will have the same number of points as the original data pattern. This option is currently not available for \code{envelope.kppm}. } \item{fix.marks}{ Logical. If \code{TRUE}, simulated patterns will have the same number of points \emph{and} the same marks as the original data pattern. In a multitype point pattern this means that the simulated patterns will have the same number of points \emph{of each type} as the original data. This option is currently not available for \code{envelope.kppm}. } \item{verbose}{ Logical flag indicating whether to print progress reports during the simulations. } \item{clipdata}{ Logical flag indicating whether the data point pattern should be clipped to the same window as the simulated patterns, before the summary function for the data is computed. This should usually be \code{TRUE} to ensure that the data and simulations are properly comparable. } \item{start,control}{ Optional. These specify the arguments \code{start} and \code{control} of \code{rmh}, giving complete control over the simulation algorithm. Applicable only when \code{Y} is a fitted model of class \code{"ppm"}. } \item{nrep}{ Number of iterations in the Metropolis-Hastings simulation algorithm. Applicable only when \code{Y} is a fitted model of class \code{"ppm"}. } \item{transform}{ Optional. A transformation to be applied to the function values, before the envelopes are computed. An expression object (see Details). } \item{global}{ Logical flag indicating whether envelopes should be pointwise (\code{global=FALSE}) or simultaneous (\code{global=TRUE}). } \item{ginterval}{ Optional. A vector of length 2 specifying the interval of \eqn{r} values for the simultaneous critical envelopes. Only relevant if \code{global=TRUE}. } \item{use.theory}{ Logical value indicating whether to use the theoretical value, computed by \code{fun}, as the reference value for simultaneous envelopes. Applicable only when \code{global=TRUE}. Default is \code{use.theory=TRUE} if \code{Y} is a point pattern, or a point process model equivalent to Complete Spatial Randomness, and \code{use.theory=FALSE} otherwise. } \item{alternative}{ Character string determining whether the envelope corresponds to a two-sided test (\code{side="two.sided"}, the default) or a one-sided test with a lower critical boundary (\code{side="less"}) or a one-sided test with an upper critical boundary (\code{side="greater"}). } \item{scale}{ Optional. Scaling function for global envelopes. A function in the \R language which determines the relative scale of deviations, as a function of distance \eqn{r}, when computing the global envelopes. Applicable only when \code{global=TRUE}. Summary function values for distance \code{r} will be \emph{divided} by \code{scale(r)} before the maximum deviation is computed. The resulting global envelopes will have width proportional to \code{scale(r)}. } \item{clamp}{ Logical value indicating how to compute envelopes when \code{alternative="less"} or \code{alternative="greater"}. Deviations of the observed summary function from the theoretical summary function are initially evaluated as signed real numbers, with large positive values indicating consistency with the alternative hypothesis. If \code{clamp=FALSE} (the default), these values are not changed. If \code{clamp=TRUE}, any negative values are replaced by zero. } \item{savefuns}{ Logical flag indicating whether to save all the simulated function values. } \item{savepatterns}{ Logical flag indicating whether to save all the simulated point patterns. } \item{nsim2}{ Number of extra simulated point patterns to be generated if it is necessary to use simulation to estimate the theoretical mean of the summary function. Only relevant when \code{global=TRUE} and the simulations are not based on CSR. } \item{VARIANCE}{ Logical. If \code{TRUE}, critical envelopes will be calculated as sample mean plus or minus \code{nSD} times sample standard deviation. } \item{nSD}{ Number of estimated standard deviations used to determine the critical envelopes, if \code{VARIANCE=TRUE}. } \item{Yname}{ Character string that should be used as the name of the data point pattern \code{Y} when printing or plotting the results. } \item{maxnerr}{ Maximum number of rejected patterns. If \code{fun} yields an error when applied to a simulated point pattern (for example, because the pattern is empty and \code{fun} requires at least one point), the pattern will be rejected and a new random point pattern will be generated. If this happens more than \code{maxnerr} times, the algorithm will give up. } \item{do.pwrong}{ Logical. If \code{TRUE}, the algorithm will also estimate the true significance level of the \dQuote{wrong} test (the test that declares the summary function for the data to be significant if it lies outside the \emph{pointwise} critical boundary at any point). This estimate is printed when the result is printed. } \item{envir.simul}{ Environment in which to evaluate the expression \code{simulate}, if not the current environment. } } \value{ An object of class \code{"envelope"} and \code{"fv"}, see \code{\link{fv.object}}, which can be printed and plotted directly. Essentially a data frame containing columns \item{r}{the vector of values of the argument \eqn{r} at which the summary function \code{fun} has been estimated } \item{obs}{ values of the summary function for the data point pattern } \item{lo}{ lower envelope of simulations } \item{hi}{ upper envelope of simulations } and \emph{either} \item{theo}{ theoretical value of the summary function under CSR (Complete Spatial Randomness, a uniform Poisson point process) if the simulations were generated according to CSR } \item{mmean}{ estimated theoretical value of the summary function, computed by averaging simulated values, if the simulations were not generated according to CSR. } Additionally, if \code{savepatterns=TRUE}, the return value has an attribute \code{"simpatterns"} which is a list containing the \code{nsim} simulated patterns. If \code{savefuns=TRUE}, the return value has an attribute \code{"simfuns"} which is an object of class \code{"fv"} containing the summary functions computed for each of the \code{nsim} simulated patterns. } \details{ The \code{envelope} command performs simulations and computes envelopes of a summary statistic based on the simulations. The result is an object that can be plotted to display the envelopes. The envelopes can be used to assess the goodness-of-fit of a point process model to point pattern data. For the most basic use, if you have a point pattern \code{X} and you want to test Complete Spatial Randomness (CSR), type \code{plot(envelope(X, Kest,nsim=39))} to see the \eqn{K} function for \code{X} plotted together with the envelopes of the \eqn{K} function for 39 simulations of CSR. The \code{envelope} function is generic, with methods for the classes \code{"ppp"}, \code{"ppm"} and \code{"kppm"} described here. There are also methods for the classes \code{"pp3"}, \code{"lpp"} and \code{"lppm"} which are described separately under \code{\link{envelope.pp3}} and \code{\link{envelope.lpp}}. Envelopes can also be computed from other envelopes, using \code{\link{envelope.envelope}}. To create simulation envelopes, the command \code{envelope(Y, ...)} first generates \code{nsim} random point patterns in one of the following ways. \itemize{ \item If \code{Y} is a point pattern (an object of class \code{"ppp"}) and \code{simulate=NULL}, then we generate \code{nsim} simulations of Complete Spatial Randomness (i.e. \code{nsim} simulated point patterns each being a realisation of the uniform Poisson point process) with the same intensity as the pattern \code{Y}. (If \code{Y} is a multitype point pattern, then the simulated patterns are also given independent random marks; the probability distribution of the random marks is determined by the relative frequencies of marks in \code{Y}.) \item If \code{Y} is a fitted point process model (an object of class \code{"ppm"} or \code{"kppm"}) and \code{simulate=NULL}, then this routine generates \code{nsim} simulated realisations of that model. \item If \code{simulate} is supplied, then it determines how the simulated point patterns are generated. It may be either \itemize{ \item an expression in the R language, typically containing a call to a random generator. This expression will be evaluated \code{nsim} times to yield \code{nsim} point patterns. For example if \code{simulate=expression(runifpoint(100))} then each simulated pattern consists of exactly 100 independent uniform random points. \item a list of point patterns. The entries in this list will be taken as the simulated patterns. \item an object of class \code{"envelope"}. This should have been produced by calling \code{envelope} with the argument \code{savepatterns=TRUE}. The simulated point patterns that were saved in this object will be extracted and used as the simulated patterns for the new envelope computation. This makes it possible to plot envelopes for two different summary functions based on exactly the same set of simulated point patterns. } } The summary statistic \code{fun} is applied to each of these simulated patterns. Typically \code{fun} is one of the functions \code{Kest}, \code{Gest}, \code{Fest}, \code{Jest}, \code{pcf}, \code{Kcross}, \code{Kdot}, \code{Gcross}, \code{Gdot}, \code{Jcross}, \code{Jdot}, \code{Kmulti}, \code{Gmulti}, \code{Jmulti} or \code{Kinhom}. It may also be a character string containing the name of one of these functions. The statistic \code{fun} can also be a user-supplied function; if so, then it must have arguments \code{X} and \code{r} like those in the functions listed above, and it must return an object of class \code{"fv"}. Upper and lower critical envelopes are computed in one of the following ways: \describe{ \item{pointwise:}{by default, envelopes are calculated pointwise (i.e. for each value of the distance argument \eqn{r}), by sorting the \code{nsim} simulated values, and taking the \code{m}-th lowest and \code{m}-th highest values, where \code{m = nrank}. For example if \code{nrank=1}, the upper and lower envelopes are the pointwise maximum and minimum of the simulated values. The pointwise envelopes are \bold{not} \dQuote{confidence bands} for the true value of the function! Rather, they specify the critical points for a Monte Carlo test (Ripley, 1981). The test is constructed by choosing a \emph{fixed} value of \eqn{r}, and rejecting the null hypothesis if the observed function value lies outside the envelope \emph{at this value of} \eqn{r}. This test has exact significance level \code{alpha = 2 * nrank/(1 + nsim)}. } \item{simultaneous:}{if \code{global=TRUE}, then the envelopes are determined as follows. First we calculate the theoretical mean value of the summary statistic (if we are testing CSR, the theoretical value is supplied by \code{fun}; otherwise we perform a separate set of \code{nsim2} simulations, compute the average of all these simulated values, and take this average as an estimate of the theoretical mean value). Then, for each simulation, we compare the simulated curve to the theoretical curve, and compute the maximum absolute difference between them (over the interval of \eqn{r} values specified by \code{ginterval}). This gives a deviation value \eqn{d_i}{d[i]} for each of the \code{nsim} simulations. Finally we take the \code{m}-th largest of the deviation values, where \code{m=nrank}, and call this \code{dcrit}. Then the simultaneous envelopes are of the form \code{lo = expected - dcrit} and \code{hi = expected + dcrit} where \code{expected} is either the theoretical mean value \code{theo} (if we are testing CSR) or the estimated theoretical value \code{mmean} (if we are testing another model). The simultaneous critical envelopes have constant width \code{2 * dcrit}. The simultaneous critical envelopes allow us to perform a different Monte Carlo test (Ripley, 1981). The test rejects the null hypothesis if the graph of the observed function lies outside the envelope \bold{at any value of} \eqn{r}. This test has exact significance level \code{alpha = nrank/(1 + nsim)}. This test can also be performed using \code{\link{mad.test}}. } \item{based on sample moments:}{if \code{VARIANCE=TRUE}, the algorithm calculates the (pointwise) sample mean and sample variance of the simulated functions. Then the envelopes are computed as mean plus or minus \code{nSD} standard deviations. These envelopes do not have an exact significance interpretation. They are a naive approximation to the critical points of the Neyman-Pearson test assuming the summary statistic is approximately Normally distributed. } } The return value is an object of class \code{"fv"} containing the summary function for the data point pattern, the upper and lower simulation envelopes, and the theoretical expected value (exact or estimated) of the summary function for the model being tested. It can be plotted using \code{\link{plot.envelope}}. If \code{VARIANCE=TRUE} then the return value also includes the sample mean, sample variance and other quantities. Arguments can be passed to the function \code{fun} through \code{...}. This means that you simply specify these arguments in the call to \code{envelope}, and they will be passed to \code{fun}. In particular, the argument \code{correction} determines the edge correction to be used to calculate the summary statistic. See the section on Edge Corrections, and the Examples. Arguments can also be passed to the function \code{fun} through the list \code{funargs}. This mechanism is typically used if an argument of \code{fun} has the same name as an argument of \code{envelope}. The list \code{funargs} should contain entries of the form \code{name=value}, where each \code{name} is the name of an argument of \code{fun}. There is also an option, rarely used, in which different function arguments are used when computing the summary function for the data \code{Y} and for the simulated patterns. If \code{funYargs} is given, it will be used when the summary function for the data \code{Y} is computed, while \code{funargs} will be used when computing the summary function for the simulated patterns. This option is only needed in rare cases: usually the basic principle requires that the data and simulated patterns must be treated equally, so that \code{funargs} and \code{funYargs} should be identical. If \code{Y} is a fitted cluster point process model (object of class \code{"kppm"}), and \code{simulate=NULL}, then the model is simulated directly using \code{\link{simulate.kppm}}. If \code{Y} is a fitted Gibbs point process model (object of class \code{"ppm"}), and \code{simulate=NULL}, then the model is simulated by running the Metropolis-Hastings algorithm \code{\link{rmh}}. Complete control over this algorithm is provided by the arguments \code{start} and \code{control} which are passed to \code{\link{rmh}}. For simultaneous critical envelopes (\code{global=TRUE}) the following options are also useful: \describe{ \item{\code{ginterval}}{determines the interval of \eqn{r} values over which the deviation between curves is calculated. It should be a numeric vector of length 2. There is a sensible default (namely, the recommended plotting interval for \code{fun(X)}, or the range of \code{r} values if \code{r} is explicitly specified). } \item{\code{transform}}{specifies a transformation of the summary function \code{fun} that will be carried out before the deviations are computed. Such transforms are useful if \code{global=TRUE} or \code{VARIANCE=TRUE}. The \code{transform} must be an expression object using the symbol \code{.} to represent the function value (and possibly other symbols recognised by \code{\link{with.fv}}). For example, the conventional way to normalise the \eqn{K} function (Ripley, 1981) is to transform it to the \eqn{L} function \eqn{L(r) = \sqrt{K(r)/\pi}}{L(r) = sqrt(K(r)/\pi)} and this is implemented by setting \code{transform=expression(sqrt(./pi))}. } } It is also possible to extract the summary functions for each of the individual simulated point patterns, by setting \code{savefuns=TRUE}. Then the return value also has an attribute \code{"simfuns"} containing all the summary functions for the individual simulated patterns. It is an \code{"fv"} object containing functions named \code{sim1, sim2, ...} representing the \code{nsim} summary functions. It is also possible to save the simulated point patterns themselves, by setting \code{savepatterns=TRUE}. Then the return value also has an attribute \code{"simpatterns"} which is a list of length \code{nsim} containing all the simulated point patterns. See \code{\link{plot.envelope}} and \code{\link{plot.fv}} for information about how to plot the envelopes. Different envelopes can be recomputed from the same data using \code{\link{envelope.envelope}}. Envelopes can be combined using \code{\link{pool.envelope}}. } \section{Errors and warnings}{ An error may be generated if one of the simulations produces a point pattern that is empty, or is otherwise unacceptable to the function \code{fun}. The upper envelope may be \code{NA} (plotted as plus or minus infinity) if some of the function values computed for the simulated point patterns are \code{NA}. Whether this occurs will depend on the function \code{fun}, but it usually happens when the simulated point pattern does not contain enough points to compute a meaningful value. } \section{Confidence intervals}{ Simulation envelopes do \bold{not} compute confidence intervals; they generate significance bands. If you really need a confidence interval for the true summary function of the point process, use \code{\link{lohboot}}. See also \code{\link{varblock}}. } \section{Edge corrections}{ It is common to apply a correction for edge effects when calculating a summary function such as the \eqn{K} function. Typically the user has a choice between several possible edge corrections. In a call to \code{envelope}, the user can specify the edge correction to be applied in \code{fun}, using the argument \code{correction}. See the Examples below. \describe{ \item{Summary functions in \pkg{spatstat}}{ Summary functions that are available in \pkg{spatstat}, such as \code{\link{Kest}}, \code{\link{Gest}} and \code{\link{pcf}}, have a standard argument called \code{correction} which specifies the name of one or more edge corrections. The list of available edge corrections is different for each summary function, and may also depend on the kind of window in which the point pattern is recorded. In the case of \code{Kest} (the default and most frequently used value of \code{fun}) the best edge correction is Ripley's isotropic correction if the window is rectangular or polygonal, and the translation correction if the window is a binary mask. See the help files for the individual functions for more information. All the summary functions in \pkg{spatstat} recognise the option \code{correction="best"} which gives the \dQuote{best} (most accurate) available edge correction for that function. In a call to \code{envelope}, if \code{fun} is one of the summary functions provided in \pkg{spatstat}, then the default is \code{correction="best"}. This means that \emph{by default, the envelope will be computed using the \dQuote{best} available edge correction}. The user can override this default by specifying the argument \code{correction}. For example the computation can be accelerated by choosing another edge correction which is less accurate than the \dQuote{best} one, but faster to compute. } \item{User-written summary functions}{ If \code{fun} is a function written by the user, then \code{envelope} has to guess what to do. If \code{fun} has an argument called \code{correction}, or has \code{\dots} arguments, then \code{envelope} assumes that the function can handle a correction argument. To compute the envelope, \code{fun} will be called with a \code{correction} argument. The default is \code{correction="best"}, unless overridden in the call to \code{envelope}. Otherwise, if \code{fun} does not have an argument called \code{correction} and does not have \code{\dots} arguments, then \code{envelope} assumes that the function \emph{cannot} handle a correction argument. To compute the envelope, \code{fun} is called without a correction argument. } } } \references{ Baddeley, A., Diggle, P.J., Hardegen, A., Lawrence, T., Milne, R.K. and Nair, G. (2014) On tests of spatial pattern based on simulation envelopes. \emph{Ecological Monographs} \bold{84} (3) 477--489. Cressie, N.A.C. \emph{Statistics for spatial data}. John Wiley and Sons, 1991. Diggle, P.J. \emph{Statistical analysis of spatial point patterns}. Arnold, 2003. Ripley, B.D. (1981) \emph{Spatial statistics}. John Wiley and Sons. Ripley, B.D. \emph{Statistical inference for spatial processes}. Cambridge University Press, 1988. Stoyan, D. and Stoyan, H. (1994) Fractals, random shapes and point fields: methods of geometrical statistics. John Wiley and Sons. } \seealso{ \code{\link{dclf.test}}, \code{\link{mad.test}} for envelope-based tests. \code{\link{fv.object}}, \code{\link{plot.envelope}}, \code{\link{plot.fv}}, \code{\link{envelope.envelope}}, \code{\link{pool.envelope}} for handling envelopes. There are also methods for \code{print} and \code{summary}. \code{\link{Kest}}, \code{\link{Gest}}, \code{\link{Fest}}, \code{\link{Jest}}, \code{\link{pcf}}, \code{\link{ppp}}, \code{\link{ppm}}, \code{\link{default.expand}} } \examples{ X <- simdat # Envelope of K function under CSR \dontrun{ plot(envelope(X)) } \testonly{ plot(envelope(X, nsim=3)) } # Translation edge correction (this is also FASTER): \dontrun{ plot(envelope(X, correction="translate")) } \testonly{ E <- envelope(X, nsim=3, correction="translate") } # Global envelopes \dontrun{ plot(envelope(X, Lest, global=TRUE)) plot(envelope(X, Kest, global=TRUE, scale=function(r) { r })) } \testonly{ E <- envelope(X, Lest, nsim=3, global=TRUE) E <- envelope(X, Kest, nsim=3, global=TRUE, scale=function(r) { r }) E summary(E) } # Envelope of K function for simulations from Gibbs model \dontrun{ fit <- ppm(cells ~1, Strauss(0.05)) plot(envelope(fit)) plot(envelope(fit), global=TRUE) } \testonly{ fit <- ppm(cells ~1, Strauss(0.05), nd=20) E <- envelope(fit, nsim=3, correction="border", nrep=100) E <- envelope(fit, nsim=3, correction="border", global=TRUE, nrep=100) } # Envelope of K function for simulations from cluster model fit <- kppm(redwood ~1, "Thomas") \dontrun{ plot(envelope(fit, Gest)) plot(envelope(fit, Gest, global=TRUE)) } \testonly{ E <- envelope(fit, Gest, correction="rs", nsim=3, global=TRUE, nrep=100) } # Envelope of G function under CSR \dontrun{ plot(envelope(X, Gest)) } \testonly{ E <- envelope(X, Gest, correction="rs", nsim=3) } # Envelope of L function under CSR # L(r) = sqrt(K(r)/pi) \dontrun{ E <- envelope(X, Kest) plot(E, sqrt(./pi) ~ r) } \testonly{ E <- envelope(X, Kest, correction="border", nsim=3) plot(E, sqrt(./pi) ~ r) } # Simultaneous critical envelope for L function # (alternatively, use Lest) \dontrun{ plot(envelope(X, Kest, transform=expression(sqrt(./pi)), global=TRUE)) } \testonly{ E <- envelope(X, Kest, nsim=3, correction="border", transform=expression(sqrt(./pi)), global=TRUE) } ## One-sided envelope \dontrun{ plot(envelope(X, Lest, alternative="less")) } \testonly{ E <- envelope(X, Lest, nsim=3, alternative="less") } # How to pass arguments needed to compute the summary functions: # We want envelopes for Jcross(X, "A", "B") # where "A" and "B" are types of points in the dataset 'demopat' data(demopat) \dontrun{ plot(envelope(demopat, Jcross, i="A", j="B")) } \testonly{ plot(envelope(demopat, Jcross, correction="rs", i="A", j="B", nsim=3)) } # Use of `simulate' \dontrun{ plot(envelope(cells, Gest, simulate=expression(runifpoint(42)))) plot(envelope(cells, Gest, simulate=expression(rMaternI(100,0.02)))) } \testonly{ plot(envelope(cells, Gest, correction="rs", simulate=expression(runifpoint(42)), nsim=3)) plot(envelope(cells, Gest, correction="rs", simulate=expression(rMaternI(100, 0.02)), nsim=3, global=TRUE)) } # Envelope under random toroidal shifts data(amacrine) \dontrun{ plot(envelope(amacrine, Kcross, i="on", j="off", simulate=expression(rshift(amacrine, radius=0.25)))) } # Envelope under random shifts with erosion \dontrun{ plot(envelope(amacrine, Kcross, i="on", j="off", simulate=expression(rshift(amacrine, radius=0.1, edge="erode")))) } # Envelope of INHOMOGENEOUS K-function with fitted trend # The following is valid. # Setting lambda=fit means that the fitted model is re-fitted to # each simulated pattern to obtain the intensity estimates for Kinhom. # (lambda=NULL would also be valid) fit <- kppm(redwood ~1, clusters="MatClust") \dontrun{ plot(envelope(fit, Kinhom, lambda=fit, nsim=19)) } \testonly{ envelope(fit, Kinhom, lambda=fit, nsim=3) } # Note that the principle of symmetry, essential to the validity of # simulation envelopes, requires that both the observed and # simulated patterns be subjected to the same method of intensity # estimation. In the following example it would be incorrect to set the # argument 'lambda=red.dens' in the envelope command, because this # would mean that the inhomogeneous K functions of the simulated # patterns would be computed using the intensity function estimated # from the original redwood data, violating the symmetry. There is # still a concern about the fact that the simulations are generated # from a model that was fitted to the data; this is only a problem in # small datasets. \dontrun{ red.dens <- density(redwood, sigma=bw.diggle) plot(envelope(redwood, Kinhom, sigma=bw.diggle, simulate=expression(rpoispp(red.dens)))) } # Precomputed list of point patterns \dontrun{ nX <- npoints(X) PatList <- list() for(i in 1:19) PatList[[i]] <- runifpoint(nX) E <- envelope(X, Kest, nsim=19, simulate=PatList) } \testonly{ PatList <- list() for(i in 1:3) PatList[[i]] <- runifpoint(10) E <- envelope(X, Kest, nsim=3, simulate=PatList) } # re-using the same point patterns \dontrun{ EK <- envelope(X, Kest, savepatterns=TRUE) EG <- envelope(X, Gest, simulate=EK) } \testonly{ EK <- envelope(X, Kest, nsim=3, savepatterns=TRUE) EG <- envelope(X, Gest, nsim=3, simulate=EK) } } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{htest} \keyword{hplot} \keyword{iteration} spatstat/man/methods.objsurf.Rd0000644000176200001440000000256013160710621016273 0ustar liggesusers\name{methods.objsurf} \Rdversion{1.1} \alias{methods.objsurf} %DoNotExport \alias{print.objsurf} \alias{plot.objsurf} \alias{persp.objsurf} \alias{image.objsurf} \alias{contour.objsurf} \title{ Methods for Objective Function Surfaces } \description{ Methods for printing and plotting an objective function surface. } \usage{ \method{print}{objsurf}(x, ...) \method{plot}{objsurf}(x, ...) \method{image}{objsurf}(x, ...) \method{contour}{objsurf}(x, ...) \method{persp}{objsurf}(x, ...) } \arguments{ \item{x}{ Object of class \code{"objsurf"} representing an objective function surface. } \item{\dots}{ Additional arguments passed to plot methods. } } \details{ These are methods for the generic functions \code{\link{print}}, \code{\link{plot}}, \code{\link{image}}, \code{\link{contour}} and \code{\link{persp}} for the class \code{"objsurf"}. } \value{ For \code{print.objsurf}, \code{plot.objsurf} and \code{image.objsurf} the value is \code{NULL}. For \code{contour.objsurf} and \code{persp.objsurf} the value is described in the help for \code{\link{contour.default}} and \code{\link{persp.default}} respectively. } \author{\adrian and \ege. } \seealso{ \code{\link{objsurf}} } \examples{ fit <- kppm(redwood ~ 1, "Thomas") os <- objsurf(fit) os plot(os) contour(os, add=TRUE) persp(os) } \keyword{spatial} \keyword{hplot} spatstat/man/npfun.Rd0000644000176200001440000000147613160710621014312 0ustar liggesusers\name{npfun} \alias{npfun} \title{ Dummy Function Returns Number of Points } \description{ Returns a summary function which is constant with value equal to the number of points in the point pattern. } \usage{ npfun(X, ..., r) } \arguments{ \item{X}{ Point pattern. } \item{\dots}{ Ignored. } \item{r}{ Vector of values of the distance argument \eqn{r}. } } \details{ This function is normally not called by the user. Instead it is passed as an argument to the function \code{\link{psst}}. } \value{ Object of class \code{"fv"} representing a constant function. } \author{ \adrian \ege and Jesper \ifelse{latex}{\out{M\o ller}}{Moller}. } \seealso{ \code{\link{psst}} } \examples{ fit0 <- ppm(cells, ~1, nd=10) v <- psst(fit0, npfun) } \keyword{spatial} \keyword{nonparametric} spatstat/man/connected.ppp.Rd0000644000176200001440000000344413160710571015725 0ustar liggesusers\name{connected.ppp} \Rdversion{1.1} \alias{connected.ppp} \title{ Connected Components of a Point Pattern } \description{ Finds the topologically-connected components of a point pattern, when all pairs of points closer than a threshold distance are joined. } \usage{ \method{connected}{ppp}(X, R, \dots) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"}). } \item{R}{ Threshold distance. Pairs of points closer than \code{R} units apart will be joined together. } \item{\dots}{ Other arguments, not recognised by these methods. } } \details{ This function can be used to identify clumps of points in a point pattern. The function \code{connected} is generic. This is the method for point patterns (objects of class \code{"ppp"}). The point pattern \code{X} is first converted into an abstract graph by joining every pair of points that lie closer than \code{R} units apart. Then the connected components of this graph are identified. Two points in \code{X} belong to the same connected component if they can be reached by a series of steps between points of \code{X}, each step being shorter than \code{R} units in length. The result is a vector of labels for the points of \code{X} where all the points in a connected component have the same label. } \value{ A point pattern, equivalent to \code{X} except that the points have factor-valued marks, with levels corresponding to the connected components. } \seealso{ \code{\link{connected.im}}, \code{\link{im.object}}, \code{\link{tess}} } \examples{ Y <- connected(redwoodfull, 0.1) if(interactive()) { plot(Y, cols=1:length(levels(marks(Y))), main="connected(redwoodfull, 0.1)") } } \author{ \adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/nnwhich.pp3.Rd0000644000176200001440000000477713160710621015332 0ustar liggesusers\name{nnwhich.pp3} \alias{nnwhich.pp3} \title{Nearest neighbours in three dimensions} \description{ Finds the nearest neighbour of each point in a three-dimensional point pattern. } \usage{ \method{nnwhich}{pp3}(X, \dots, k=1) } \arguments{ \item{X}{ Three-dimensional point pattern (object of class \code{"pp3"}). } \item{\dots}{ Ignored. } \item{k}{ Integer, or integer vector. The algorithm will compute the distance to the \code{k}th nearest neighbour. } } \value{ Numeric vector or matrix giving, for each point, the index of its nearest neighbour (or \code{k}th nearest neighbour). If \code{k = 1} (the default), the return value is a numeric vector \code{v} giving the indices of the nearest neighbours (the nearest neighbout of the \code{i}th point is the \code{j}th point where \code{j = v[i]}). If \code{k} is a single integer, then the return value is a numeric vector giving the indices of the \code{k}th nearest neighbours. If \code{k} is a vector, then the return value is a matrix \code{m} such that \code{m[i,j]} is the index of the \code{k[j]}th nearest neighbour for the \code{i}th data point. } \details{ For each point in the given three-dimensional point pattern, this function finds its nearest neighbour (the nearest other point of the pattern). By default it returns a vector giving, for each point, the index of the point's nearest neighbour. If \code{k} is specified, the algorithm finds each point's \code{k}th nearest neighbour. The function \code{nnwhich} is generic. This is the method for the class \code{"pp3"}. If there are no points in the pattern, a numeric vector of length zero is returned. If there is only one point, then the nearest neighbour is undefined, and a value of \code{NA} is returned. In general if the number of points is less than or equal to \code{k}, then a vector of \code{NA}'s is returned. To evaluate the \emph{distance} between a point and its nearest neighbour, use \code{\link{nndist}}. To find the nearest neighbours from one point pattern to another point pattern, use \code{\link{nncross}}. } \section{Warnings}{ A value of \code{NA} is returned if there is only one point in the point pattern. } \seealso{ \code{\link{nnwhich}}, \code{\link{nndist}}, \code{\link{nncross}} } \examples{ X <- runifpoint3(30) m <- nnwhich(X) m2 <- nnwhich(X, k=2) } \author{ \adrian based on two-dimensional code by Pavel Grabarnik } \keyword{spatial} \keyword{math} spatstat/man/SatPiece.Rd0000644000176200001440000001076613160710571014667 0ustar liggesusers\name{SatPiece} \alias{SatPiece} \title{Piecewise Constant Saturated Pairwise Interaction Point Process Model} \description{ Creates an instance of a saturated pairwise interaction point process model with piecewise constant potential function. The model can then be fitted to point pattern data. } \usage{ SatPiece(r, sat) } \arguments{ \item{r}{vector of jump points for the potential function} \item{sat}{ vector of saturation values, or a single saturation value } } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of a point process. } \details{ This is a generalisation of the Geyer saturation point process model, described in \code{\link{Geyer}}, to the case of multiple interaction distances. It can also be described as the saturated analogue of a pairwise interaction process with piecewise-constant pair potential, described in \code{\link{PairPiece}}. The saturated point process with interaction radii \eqn{r_1,\ldots,r_k}{r[1], ..., r[k]}, saturation thresholds \eqn{s_1,\ldots,s_k}{s[1],...,s[k]}, intensity parameter \eqn{\beta}{beta} and interaction parameters \eqn{\gamma_1,\ldots,gamma_k}{gamma[1], ..., gamma[k]}, is the point process in which each point \eqn{x_i}{x[i]} in the pattern \eqn{X} contributes a factor \deqn{ \beta \gamma_1^{v_1(x_i, X)} \ldots gamma_k^{v_k(x_i,X)} }{ beta gamma[1]^v(1, x_i, X) ... gamma[k]^v(k, x_i, X) } to the probability density of the point pattern, where \deqn{ v_j(x_i, X) = \min( s_j, t_j(x_i,X) ) }{ v(j, x_i, X) = min(s[j], t(j, x_i, X)) } where \eqn{t_j(x_i, X)}{t(j,x[i],X)} denotes the number of points in the pattern \eqn{X} which lie at a distance between \eqn{r_{j-1}}{r[j-1]} and \eqn{r_j}{r[j]} from the point \eqn{x_i}{x[i]}. We take \eqn{r_0 = 0}{r[0] = 0} so that \eqn{t_1(x_i,X)}{t(1, x[i], X)} is the number of points of \eqn{X} that lie within a distance \eqn{r_1}{r[1]} of the point \eqn{x_i}{x[i]}. \code{SatPiece} is used to fit this model to data. The function \code{\link{ppm}()}, which fits point process models to point pattern data, requires an argument of class \code{"interact"} describing the interpoint interaction structure of the model to be fitted. The appropriate description of the piecewise constant Saturated pairwise interaction is yielded by the function \code{SatPiece()}. See the examples below. Simulation of this point process model is not yet implemented. This model is not locally stable (the conditional intensity is unbounded). The argument \code{r} specifies the vector of interaction distances. The entries of \code{r} must be strictly increasing, positive numbers. The argument \code{sat} specifies the vector of saturation parameters. It should be a vector of the same length as \code{r}, and its entries should be nonnegative numbers. Thus \code{sat[1]} corresponds to the distance range from \code{0} to \code{r[1]}, and \code{sat[2]} to the distance range from \code{r[1]} to \code{r[2]}, etc. Alternatively \code{sat} may be a single number, and this saturation value will be applied to every distance range. Infinite values of the saturation parameters are also permitted; in this case \eqn{v_j(x_i,X) = t_j(x_i,X)}{v(j, x_i, X) = t(j, x_i, X)} and there is effectively no `saturation' for the distance range in question. If all the saturation parameters are set to \code{Inf} then the model is effectively a pairwise interaction process, equivalent to \code{\link{PairPiece}} (however the interaction parameters \eqn{\gamma}{gamma} obtained from \code{\link{SatPiece}} are the square roots of the parameters \eqn{\gamma}{gamma} obtained from \code{\link{PairPiece}}). If \code{r} is a single number, this model is virtually equivalent to the Geyer process, see \code{\link{Geyer}}. } \seealso{ \code{\link{ppm}}, \code{\link{pairsat.family}}, \code{\link{Geyer}}, \code{\link{PairPiece}}, \code{\link{BadGey}}. } \examples{ SatPiece(c(0.1,0.2), c(1,1)) # prints a sensible description of itself SatPiece(c(0.1,0.2), 1) data(cells) ppm(cells, ~1, SatPiece(c(0.07, 0.1, 0.13), 2)) # fit a stationary piecewise constant Saturated pairwise interaction process \dontrun{ ppm(cells, ~polynom(x,y,3), SatPiece(c(0.07, 0.1, 0.13), 2)) # nonstationary process with log-cubic polynomial trend } } \author{\adrian and \rolf in collaboration with Hao Wang and Jeff Picka } \keyword{spatial} \keyword{models} spatstat/man/area.owin.Rd0000644000176200001440000000323213160710571015043 0ustar liggesusers\name{area.owin} \alias{area} \alias{area.owin} \alias{area.default} \alias{volume.owin} \title{Area of a Window} \description{ Computes the area of a window } \usage{ area(w) \method{area}{owin}(w) \method{area}{default}(w) \method{volume}{owin}(x) } \arguments{ \item{w}{A window, whose area will be computed. This should be an object of class \code{\link{owin}}, or can be given in any format acceptable to \code{\link{as.owin}()}. } \item{x}{Object of class \code{\link{owin}}} } \value{ A numerical value giving the area of the window. } \details{ If the window \code{w} is of type \code{"rectangle"} or \code{"polygonal"}, the area of this rectangular window is computed by analytic geometry. If \code{w} is of type \code{"mask"} the area of the discrete raster approximation of the window is computed by summing the binary image values and adjusting for pixel size. The function \code{volume.owin} is identical to \code{area.owin} except for the argument name. It is a method for the generic function \code{volume}. } \seealso{ \code{\link{perimeter}}, \code{\link{diameter.owin}}, \code{\link{owin.object}}, \code{\link{as.owin}} } \examples{ w <- unit.square() area(w) # returns 1.00000 k <- 6 theta <- 2 * pi * (0:(k-1))/k co <- cos(theta) si <- sin(theta) mas <- owin(c(-1,1), c(-1,1), poly=list(x=co, y=si)) area(mas) # returns approx area of k-gon mas <- as.mask(square(2), eps=0.01) X <- raster.x(mas) Y <- raster.y(mas) mas$m <- ((X - 1)^2 + (Y - 1)^2 <= 1) area(mas) # returns 3.14 approx } \author{\adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/tiles.Rd0000644000176200001440000000160713160710621014300 0ustar liggesusers\name{tiles} \alias{tiles} \title{Extract List of Tiles in a Tessellation} \description{ Extracts a list of the tiles that make up a tessellation. } \usage{ tiles(x) } \arguments{ \item{x}{A tessellation (object of class \code{"tess"}).} } \details{ A tessellation is a collection of disjoint spatial regions (called \emph{tiles}) that fit together to form a larger spatial region. See \code{\link{tess}}. The tiles that make up the tessellation \code{x} are returned in a list. } \value{ A list of windows (objects of class \code{"owin"}). } \seealso{ \code{\link{tess}}, \code{\link{tilenames}}, \code{\link{tile.areas}}, \code{\link{tiles.empty}} } \examples{ A <- tess(xgrid=0:2,ygrid=0:2) tiles(A) v <- as.im(function(x,y){factor(round(x^2 + y^2))}, W=owin()) E <- tess(image=v) tiles(E) } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/unmark.Rd0000644000176200001440000000240013160710621014445 0ustar liggesusers\name{unmark} \alias{unmark} \alias{unmark.ppp} \alias{unmark.splitppp} \alias{unmark.psp} \alias{unmark.ppx} \title{Remove Marks} \description{ Remove the mark information from a spatial dataset. } \usage{ unmark(X) \method{unmark}{ppp}(X) \method{unmark}{splitppp}(X) \method{unmark}{psp}(X) \method{unmark}{ppx}(X) } \arguments{ \item{X}{A point pattern (object of class \code{"ppp"}), a split point pattern (object of class \code{"splitppp"}), a line segment pattern (object of class \code{"psp"}) or a multidimensional space-time point pattern (object of class \code{"ppx"}). } } \value{ An object of the same class as \code{X} with any mark information deleted. } \details{ A `mark' is a value attached to each point in a spatial point pattern, or attached to each line segment in a line segment pattern, etc. The function \code{unmark} is a simple way to remove the marks from such a dataset. } \seealso{ \code{\link{ppp.object}}, \code{\link{psp.object}} } \examples{ data(lansing) hicks <- lansing[lansing$marks == "hickory", ] \dontrun{ plot(hicks) # still a marked point pattern, but only 1 value of marks plot(unmark(hicks)) # unmarked } } \author{ \adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/sharpen.Rd0000644000176200001440000000644513160710621014625 0ustar liggesusers\name{sharpen} \alias{sharpen} \alias{sharpen.ppp} \title{Data Sharpening of Point Pattern} \description{ Performs Choi-Hall data sharpening of a spatial point pattern. } \usage{ sharpen(X, \dots) \method{sharpen}{ppp}(X, sigma=NULL, \dots, varcov=NULL, edgecorrect=FALSE) } \arguments{ \item{X}{A marked point pattern (object of class \code{"ppp"}).} \item{sigma}{ Standard deviation of isotropic Gaussian smoothing kernel. } \item{varcov}{ Variance-covariance matrix of anisotropic Gaussian kernel. Incompatible with \code{sigma}. } \item{edgecorrect}{ Logical value indicating whether to apply edge effect bias correction. } \item{\dots}{Arguments passed to \code{\link{density.ppp}} to control the pixel resolution of the result.} } \details{ Choi and Hall (2001) proposed a procedure for \emph{data sharpening} of spatial point patterns. This procedure is appropriate for earthquake epicentres and other point patterns which are believed to exhibit strong concentrations of points along a curve. Data sharpening causes such points to concentrate more tightly along the curve. If the original data points are \eqn{X_1, \ldots, X_n}{X[1],..., X[n]} then the sharpened points are \deqn{ \hat X_i = \frac{\sum_j X_j k(X_j-X_i)}{\sum_j k(X_j - X_i)} }{ X^[i] = (sum[j] X[j] * k(X[j] - X[i]))/(sum[j] k(X[j] - X[i])) } where \eqn{k} is a smoothing kernel in two dimensions. Thus, the new point \eqn{\hat X_i}{X^[i]} is a vector average of the nearby points \eqn{X[j]}. The function \code{sharpen} is generic. It currently has only one method, for two-dimensional point patterns (objects of class \code{"ppp"}). If \code{sigma} is given, the smoothing kernel is the isotropic two-dimensional Gaussian density with standard deviation \code{sigma} in each axis. If \code{varcov} is given, the smoothing kernel is the Gaussian density with variance-covariance matrix \code{varcov}. The data sharpening procedure tends to cause the point pattern to contract away from the boundary of the window. That is, points \code{X_i}{X[i]} that lie `quite close to the edge of the window of the point pattern tend to be displaced inward. If \code{edgecorrect=TRUE} then the algorithm is modified to correct this vector bias. } \value{ A point pattern (object of class \code{"ppp"}) in the same window as the original pattern \code{X}, and with the same marks as \code{X}. } \seealso{ \code{\link{density.ppp}}, \code{\link{Smooth.ppp}}. } \examples{ data(shapley) X <- unmark(shapley) \dontshow{ if(!(interactive())) X <- rthin(X, 0.05) } Y <- sharpen(X, sigma=0.5) Z <- sharpen(X, sigma=0.5, edgecorrect=TRUE) opa <- par(mar=rep(0.2, 4)) plot(solist(X, Y, Z), main= " ", main.panel=c("data", "sharpen", "sharpen, correct"), pch=".", equal.scales=TRUE, mar.panel=0.2) par(opa) } \references{ Choi, E. and Hall, P. (2001) Nonparametric analysis of earthquake point-process data. In M. de Gunst, C. Klaassen and A. van der Vaart (eds.) \emph{State of the art in probability and statistics: Festschrift for Willem R. van Zwet}, Institute of Mathematical Statistics, Beachwood, Ohio. Pages 324--344. } \author{ \spatstatAuthors } \keyword{spatial} \keyword{nonparametric} spatstat/man/mincontrast.Rd0000644000176200001440000001443613160710621015525 0ustar liggesusers\name{mincontrast} \alias{mincontrast} \title{Method of Minimum Contrast} \description{ A general low-level algorithm for fitting theoretical point process models to point pattern data by the Method of Minimum Contrast. } \usage{ mincontrast(observed, theoretical, startpar, \dots, ctrl=list(q = 1/4, p = 2, rmin=NULL, rmax=NULL), fvlab=list(label=NULL, desc="minimum contrast fit"), explain=list(dataname=NULL, modelname=NULL, fname=NULL), adjustment=NULL) } \arguments{ \item{observed}{ Summary statistic, computed for the data. An object of class \code{"fv"}. } \item{theoretical}{ An R language function that calculates the theoretical expected value of the summary statistic, given the model parameters. See Details. } \item{startpar}{ Vector of initial values of the parameters of the point process model (passed to \code{theoretical}). } \item{\dots}{ Additional arguments passed to the function \code{theoretical} and to the optimisation algorithm \code{\link[stats]{optim}}. } \item{ctrl}{ Optional. List of arguments controlling the optimisation. See Details. } \item{fvlab}{ Optional. List containing some labels for the return value. See Details. } \item{explain}{ Optional. List containing strings that give a human-readable description of the model, the data and the summary statistic. } \item{adjustment}{ Internal use only. } } \details{ This function is a general algorithm for fitting point process models by the Method of Minimum Contrast. If you want to fit the Thomas process, see \code{\link{thomas.estK}}. If you want to fit a log-Gaussian Cox process, see \code{\link{lgcp.estK}}. If you want to fit the Matern cluster process, see \code{\link{matclust.estK}}. The Method of Minimum Contrast (Diggle and Gratton, 1984) is a general technique for fitting a point process model to point pattern data. First a summary function (typically the \eqn{K} function) is computed from the data point pattern. Second, the theoretical expected value of this summary statistic under the point process model is derived (if possible, as an algebraic expression involving the parameters of the model) or estimated from simulations of the model. Then the model is fitted by finding the optimal parameter values for the model to give the closest match between the theoretical and empirical curves. The argument \code{observed} should be an object of class \code{"fv"} (see \code{\link{fv.object}}) containing the values of a summary statistic computed from the data point pattern. Usually this is the function \eqn{K(r)} computed by \code{\link{Kest}} or one of its relatives. The argument \code{theoretical} should be a user-supplied function that computes the theoretical expected value of the summary statistic. It must have an argument named \code{par} that will be the vector of parameter values for the model (the length and format of this vector are determined by the starting values in \code{startpar}). The function \code{theoretical} should also expect a second argument (the first argument other than \code{par}) containing values of the distance \eqn{r} for which the theoretical value of the summary statistic \eqn{K(r)} should be computed. The value returned by \code{theoretical} should be a vector of the same length as the given vector of \eqn{r} values. The argument \code{ctrl} determines the contrast criterion (the objective function that will be minimised). The algorithm minimises the criterion \deqn{ D(\theta)= \int_{r_{\mbox{\scriptsize min}}}^{r_{\mbox{\scriptsize max}}} |\hat F(r)^q - F_\theta(r)^q|^p \, {\rm d}r }{ D(theta) = integral from rmin to rmax of abs(Fhat(r)^q - F(theta,r)^q)^p } where \eqn{\theta}{theta} is the vector of parameters of the model, \eqn{\hat F(r)}{Fhat(r)} is the observed value of the summary statistic computed from the data, \eqn{F_\theta(r)}{F(theta,r)} is the theoretical expected value of the summary statistic, and \eqn{p,q} are two exponents. The default is \code{q = 1/4}, \code{p=2} so that the contrast criterion is the integrated squared difference between the fourth roots of the two functions (Waagepetersen, 2006). The other arguments just make things print nicely. The argument \code{fvlab} contains labels for the component \code{fit} of the return value. The argument \code{explain} contains human-readable strings describing the data, the model and the summary statistic. The \code{"..."} argument of \code{mincontrast} can be used to pass extra arguments to the function \code{theoretical} and/or to the optimisation function \code{\link[stats]{optim}}. In this case, the function \code{theoretical} should also have a \code{"..."} argument and should ignore it (so that it ignores arguments intended for \code{\link[stats]{optim}}). } \value{ An object of class \code{"minconfit"}. There are methods for printing and plotting this object. It contains the following components: \item{par }{Vector of fitted parameter values.} \item{fit }{Function value table (object of class \code{"fv"}) containing the observed values of the summary statistic (\code{observed}) and the theoretical values of the summary statistic computed from the fitted model parameters. } \item{opt }{The return value from the optimizer \code{\link{optim}}.} \item{crtl }{The control parameters of the algorithm.} \item{info }{List of explanatory strings.} } \references{ Diggle, P.J. and Gratton, R.J. (1984) Monte Carlo methods of inference for implicit statistical models. \emph{Journal of the Royal Statistical Society, series B} \bold{46}, 193 -- 212. \ifelse{latex}{\out{M\o ller}}{Moller}, J. and Waagepetersen, R. (2003). Statistical Inference and Simulation for Spatial Point Processes. Chapman and Hall/CRC, Boca Raton. Waagepetersen, R. (2006). An estimating function approach to inference for inhomogeneous Neyman-Scott processes. \emph{Biometrics} \bold{63} (2007) 252--258. } \author{Rasmus Waagepetersen \email{rw@math.auc.dk}, adapted for \pkg{spatstat} by \adrian } \seealso{ \code{\link{kppm}}, \code{\link{lgcp.estK}}, \code{\link{matclust.estK}}, \code{\link{thomas.estK}}, } \keyword{spatial} \keyword{models} spatstat/man/rdpp.Rd0000644000176200001440000000267113160710621014127 0ustar liggesusers\name{rdpp} \alias{rdpp} \title{Simulation of a Determinantal Point Process} \description{ Generates simulated realisations from a determinantal point process. } \usage{ rdpp(eig, index, basis = "fourierbasis", window = boxx(rep(list(0:1), ncol(index))), reject_max = 10000, progress = 0, debug = FALSE, \dots) } \arguments{ \item{eig}{ vector of values between 0 and 1 specifying the non-zero eigenvalues for the process. } \item{index}{ \code{data.frame} or \code{matrix} (or something acceptable to \code{\link{as.matrix}}) specifying indices of the basis functions. } \item{basis}{character string giving the name of the basis.} \item{window}{ window (of class \code{"owin"}, \code{"box3"} or \code{"boxx"}) giving the domain of the point process. } \item{reject_max}{ integer giving the maximal number of trials for rejection sampling. } \item{progress}{ integer giving the interval for making a progress report. The value zero turns reporting off. } \item{debug}{ logical value indicating whether debug informationb should be outputted. } \item{\dots}{Ignored.} } \author{ \adrian \rolf and \ege } \examples{ index <- expand.grid(-2:2,-2:2) eig <- exp(-rowSums(index^2)) X <- rdpp(eig, index) X ## To simulate a det. projection p. p. with the given indices set eig=1: XX <- rdpp(1, index) XX } \keyword{datagen} \keyword{spatial} \keyword{models} spatstat/man/valid.Rd0000644000176200001440000000211513160710621014252 0ustar liggesusers\name{valid} \alias{valid} \title{ Check Whether Point Process Model is Valid } \description{ Determines whether a point process model object corresponds to a valid point process. } \usage{ valid(object, \dots) } \arguments{ \item{object}{ Object of some class, describing a point process model. } \item{\dots}{ Additional arguments passed to methods. } } \details{ The function \code{valid} is generic, with methods for the classes \code{"ppm"} and \code{"dppmodel"}. An object representing a point process is called valid if all its parameter values are known (for example, no parameter takes the value \code{NA} or \code{NaN}) and the parameter values correspond to a well-defined point process (for example, the parameter values satisfy all the constraints that are imposed by mathematical theory.) See the methods for further details. } \value{ A logical value, or \code{NA}. } \author{ \adrian \rolf and \ege } \seealso{ \code{\link{valid.ppm}}, \code{\link{valid.detpointprocfamily}} } \keyword{spatial} \keyword{models} spatstat/man/Pairwise.Rd0000644000176200001440000000675113160710571014754 0ustar liggesusers\name{Pairwise} \alias{Pairwise} \title{Generic Pairwise Interaction model} \description{ Creates an instance of a pairwise interaction point process model which can then be fitted to point pattern data. } \usage{ Pairwise(pot, name, par, parnames, printfun) } \arguments{ \item{pot}{An R language function giving the user-supplied pairwise interaction potential.} \item{name}{Character string.} \item{par}{List of numerical values for irregular parameters} \item{parnames}{Vector of names of irregular parameters} \item{printfun}{Do not specify this argument: for internal use only.} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of a point process. } \details{ This code constructs a member of the pairwise interaction family \code{\link{pairwise.family}} with arbitrary pairwise interaction potential given by the user. Each pair of points in the point pattern contributes a factor \eqn{h(d)} to the probability density, where \eqn{d} is the distance between the two points. The factor term \eqn{h(d)} is \deqn{h(d) = \exp(-\theta \mbox{pot}(d))}{h(d) = exp(-theta * pot(d))} provided \eqn{\mbox{pot}(d)}{pot(d)} is finite, where \eqn{\theta}{theta} is the coefficient vector in the model. The function \code{pot} must take as its first argument a matrix of interpoint distances, and evaluate the potential for each of these distances. The result must be either a matrix with the same dimensions as its input, or an array with its first two dimensions the same as its input (the latter case corresponds to a vector-valued potential). If irregular parameters are present, then the second argument to \code{pot} should be a vector of the same type as \code{par} giving those parameter values. The values returned by \code{pot} may be finite numeric values, or \code{-Inf} indicating a hard core (that is, the corresponding interpoint distance is forbidden). We define \eqn{h(d) = 0} if \eqn{\mbox{pot}(d) = -\infty}{pot(d) = -Inf}. Thus, a potential value of minus infinity is \emph{always} interpreted as corresponding to \eqn{h(d) = 0}, regardless of the sign and magnitude of \eqn{\theta}{theta}. } \seealso{ \code{\link{ppm}}, \code{\link{pairwise.family}}, \code{\link{ppm.object}} } \examples{ #This is the same as StraussHard(r=0.7,h=0.05) strpot <- function(d,par) { r <- par$r h <- par$h value <- (d <= r) value[d < h] <- -Inf value } mySH <- Pairwise(strpot, "StraussHard process", list(r=0.7,h=0.05), c("interaction distance r", "hard core distance h")) data(cells) ppm(cells, ~ 1, mySH, correction="isotropic") # Fiksel (1984) double exponential interaction # see Stoyan, Kendall, Mecke 1987 p 161 fikspot <- function(d, par) { r <- par$r h <- par$h zeta <- par$zeta value <- exp(-zeta * d) value[d < h] <- -Inf value[d > r] <- 0 value } Fiksel <- Pairwise(fikspot, "Fiksel double exponential process", list(r=3.5, h=1, zeta=1), c("interaction distance r", "hard core distance h", "exponential coefficient zeta")) data(spruces) fit <- ppm(unmark(spruces), ~1, Fiksel, rbord=3.5) fit plot(fitin(fit), xlim=c(0,4)) coef(fit) # corresponding values obtained by Fiksel (1984) were -1.9 and -6.0 } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} spatstat/man/plot.cdftest.Rd0000644000176200001440000000642613160710621015575 0ustar liggesusers\name{plot.cdftest} \alias{plot.cdftest} \title{Plot a Spatial Distribution Test} \description{ Plot the result of a spatial distribution test computed by \code{cdf.test}. } \usage{ \method{plot}{cdftest}(x, ..., style=c("cdf", "PP", "QQ"), lwd=par("lwd"), col=par("col"), lty=par("lty"), lwd0=lwd, col0=2, lty0=2, do.legend) } \arguments{ \item{x}{ Object to be plotted. An object of class \code{"cdftest"} produced by a method for \code{\link{cdf.test}}. } \item{\dots}{ extra arguments that will be passed to the plotting function \code{\link{plot.default}}. } \item{style}{ Style of plot. See Details. } \item{col,lwd,lty}{ The width, colour and type of lines used to plot the empirical curve (the empirical distribution, or PP plot or QQ plot). } \item{col0,lwd0,lty0}{ The width, colour and type of lines used to plot the reference curve (the predicted distribution, or the diagonal). } \item{do.legend}{ Logical value indicating whether to add an explanatory legend. Applies only when \code{style="cdf"}. } } \value{ \code{NULL}. } \details{ This is the \code{plot} method for the class \code{"cdftest"}. An object of this class represents the outcome of a spatial distribution test, computed by \code{\link{cdf.test}}, and based on either the Kolmogorov-Smirnov, \ifelse{latex}{\out{Cram\'er}}{Cramer}-von Mises or Anderson-Darling test. If \code{style="cdf"} (the default), the plot displays the two cumulative distribution functions that are compared by the test: namely the empirical cumulative distribution function of the covariate at the data points, and the predicted cumulative distribution function of the covariate under the model, both plotted against the value of the covariate. The Kolmogorov-Smirnov test statistic (for example) is the maximum vertical separation between the two curves. If \code{style="PP"} then the P-P plot is drawn. The \eqn{x} coordinates of the plot are cumulative probabilities for the covariate under the model. The \eqn{y} coordinates are cumulative probabilities for the covariate at the data points. The diagonal line \eqn{y=x} is also drawn for reference. The Kolmogorov-Smirnov test statistic is the maximum vertical separation between the P-P plot and the diagonal reference line. If \code{style="QQ"} then the Q-Q plot is drawn. The \eqn{x} coordinates of the plot are quantiles of the covariate under the model. The \eqn{y} coordinates are quantiles of the covariate at the data points. The diagonal line \eqn{y=x} is also drawn for reference. The Kolmogorov-Smirnov test statistic cannot be read off the Q-Q plot. } \seealso{ \code{\link{cdf.test}} } \examples{ op <- options(useFancyQuotes=FALSE) # synthetic data: nonuniform Poisson process X <- rpoispp(function(x,y) { 100 * exp(x) }, win=square(1)) # fit uniform Poisson process fit0 <- ppm(X, ~1) # test covariate = x coordinate xcoord <- function(x,y) { x } # test wrong model k <- cdf.test(fit0, xcoord) # plot result of test plot(k, lwd0=3) plot(k, style="PP") plot(k, style="QQ") options(op) } \author{\adrian and \rolf } \keyword{spatial} \keyword{hplot} spatstat/man/hyperframe.Rd0000644000176200001440000000663013160710621015323 0ustar liggesusers\name{hyperframe} \alias{hyperframe} \title{Hyper Data Frame} \description{ Create a hyperframe: a two-dimensional array in which each column consists of values of the same atomic type (like the columns of a data frame) or objects of the same class. } \usage{ hyperframe(..., row.names=NULL, check.rows=FALSE, check.names=TRUE, stringsAsFactors=default.stringsAsFactors()) } \arguments{ \item{\dots}{ Arguments of the form \code{value} or \code{tag=value}. Each \code{value} is either an atomic vector, or a list of objects of the same class, or a single atomic value, or a single object. Each \code{value} will become a column of the array. The \code{tag} determines the name of the column. See Details. } \item{row.names,check.rows,check.names,stringsAsFactors}{ Arguments passed to \code{\link{data.frame}} controlling the names of the rows, whether to check that rows are consistent, whether to check validity of the column names, and whether to convert character columns to factors. } } \details{ A hyperframe is like a data frame, except that its entries can be objects of any kind. A hyperframe is a two-dimensional array in which each column consists of values of one atomic type (as in a data frame) or consists of objects of one class. The arguments \code{\dots} are any number of arguments of the form \code{value} or \code{tag=value}. Each \code{value} will become a column of the array. The \code{tag} determines the name of the column. Each \code{value} can be either \itemize{ \item an atomic vector or factor (i.e. numeric vector, integer vector, character vector, logical vector, complex vector or factor) \item a list of objects which are all of the same class \item one atomic value, which will be replicated to make an atomic vector or factor \item one object, which will be replicated to make a list of objects. } All columns (vectors, factors and lists) must be of the same length, if their length is greater than 1. } \section{Methods for Hyperframes}{ There are methods for \code{print}, \code{plot}, \code{summary}, \code{with}, \code{split}, \code{[}, \code{[<},\code{$}, \code{$<-}, \code{names}, \code{as.data.frame} \code{as.list}, \code{cbind} and \code{rbind} for the class of hyperframes. There is also \code{is.hyperframe} and \code{\link{as.hyperframe}}. } \value{ An object of class \code{"hyperframe"}. } \author{\adrian and \rolf } \seealso{ \code{\link{as.hyperframe}}, \code{\link{as.hyperframe.ppx}}, \code{\link{plot.hyperframe}}, \code{\link{[.hyperframe}}, \code{\link{with.hyperframe}}, \code{\link{split.hyperframe}}, \code{\link{as.data.frame.hyperframe}}, \code{\link{cbind.hyperframe}}, \code{\link{rbind.hyperframe}} } \examples{ # equivalent to a data frame hyperframe(X=1:10, Y=3) # list of functions hyperframe(f=list(sin, cos, tan)) # table of functions and matching expressions hyperframe(f=list(sin, cos, tan), e=list(expression(sin(x)), expression(cos(x)), expression(tan(x)))) hyperframe(X=1:10, Y=letters[1:10], Z=factor(letters[1:10]), stringsAsFactors=FALSE) lambda <- runif(4, min=50, max=100) X <- lapply(as.list(lambda), function(x) { rpoispp(x) }) h <- hyperframe(lambda=lambda, X=X) h h$lambda2 <- lambda^2 h[, "lambda3"] <- lambda^3 h[, "Y"] <- X } \keyword{spatial} \keyword{manip} spatstat/man/incircle.Rd0000644000176200001440000000240013160710621014740 0ustar liggesusers\name{incircle} \alias{incircle} \alias{inradius} \title{Find Largest Circle Inside Window} \description{ Find the largest circle contained in a given window. } \usage{ incircle(W) inradius(W) } \arguments{ \item{W}{A window (object of class \code{"owin"}).} } \details{ Given a window \code{W} of any type and shape, the function \code{incircle} determines the largest circle that is contained inside \code{W}, while \code{inradius} computes its radius only. For non-rectangular windows, the incircle is computed approximately by finding the maximum of the distance map (see \code{\link{distmap}}) of the complement of the window. } \value{ The result of \code{incircle} is a list with entries \code{x,y,r} giving the location \code{(x,y)} and radius \code{r} of the incircle. The result of \code{inradius} is the numerical value of radius. } \seealso{ \code{\link{centroid.owin}} } \examples{ W <- square(1) Wc <- incircle(W) plot(W) plot(disc(Wc$r, c(Wc$x, Wc$y)), add=TRUE) plot(letterR) Rc <- incircle(letterR) plot(disc(Rc$r, c(Rc$x, Rc$y)), add=TRUE) W <- as.mask(letterR) plot(W) Rc <- incircle(W) plot(disc(Rc$r, c(Rc$x, Rc$y)), add=TRUE) } \author{\adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/HierHard.Rd0000644000176200001440000001054713160710571014655 0ustar liggesusers\name{HierHard} \alias{HierHard} \title{The Hierarchical Hard Core Point Process Model} \description{ Creates an instance of the hierarchical hard core point process model which can then be fitted to point pattern data. } \usage{ HierHard(hradii=NULL, types=NULL, archy=NULL) } \arguments{ \item{hradii}{Optional matrix of hard core distances} \item{types}{Optional; vector of all possible types (i.e. the possible levels of the \code{marks} variable in the data)} \item{archy}{Optional: the hierarchical order. See Details.} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of the hierarchical hard core process with hard core distances \eqn{hradii[i,j]}. } \details{ This is a hierarchical point process model for a multitype point pattern (\ifelse{latex}{\out{H{\"o}gmander}}{Hogmander} and \ifelse{latex}{\out{S{\"a}rkk{\"a}}}{Sarkka}, 1999; Grabarnik and \ifelse{latex}{\out{S\"{a}rkk\"{a}}}{Sarkka}, 2009). It is appropriate for analysing multitype point pattern data in which the types are ordered so that the points of type \eqn{j} depend on the points of type \eqn{1,2,\ldots,j-1}{1,2,...,j-1}. The hierarchical version of the (stationary) hard core process with \eqn{m} types, with hard core distances \eqn{h_{ij}}{h[i,j]} and parameters \eqn{\beta_j}{beta[j]}, is a point process in which each point of type \eqn{j} contributes a factor \eqn{\beta_j}{beta[j]} to the probability density of the point pattern. If any pair of points of types \eqn{i} and \eqn{j} lies closer than \eqn{h_{ij}}{h[i,j]} units apart, the configuration of points is impossible (probability density zero). The nonstationary hierarchical hard core process is similar except that the contribution of each individual point \eqn{x_i}{x[i]} is a function \eqn{\beta(x_i)}{beta(x[i])} of location and type, rather than a constant beta. The function \code{\link{ppm}()}, which fits point process models to point pattern data, requires an argument of class \code{"interact"} describing the interpoint interaction structure of the model to be fitted. The appropriate description of the hierarchical hard core process pairwise interaction is yielded by the function \code{HierHard()}. See the examples below. The argument \code{types} need not be specified in normal use. It will be determined automatically from the point pattern data set to which the HierHard interaction is applied, when the user calls \code{\link{ppm}}. However, the user should be confident that the ordering of types in the dataset corresponds to the ordering of rows and columns in the matrix \code{radii}. The argument \code{archy} can be used to specify a hierarchical ordering of the types. It can be either a vector of integers or a character vector matching the possible types. The default is the sequence \eqn{1,2, \ldots, m}{1,2, ..., m} meaning that type \eqn{j} depends on types \eqn{1,2, \ldots, j-1}{1,2, ..., j-1}. The matrix \code{iradii} must be square, with entries which are either positive numbers, or zero or \code{NA}. A value of zero or \code{NA} indicates that no hard core interaction term should be included for this combination of types. Note that only the hard core distances are specified in \code{HierHard}. The canonical parameters \eqn{\log(\beta_j)}{log(beta[j])} are estimated by \code{\link{ppm}()}, not fixed in \code{HierHard()}. } \seealso{ \code{\link{MultiHard}} for the corresponding symmetrical interaction. \code{\link{HierStrauss}}, \code{\link{HierStraussHard}}. } \examples{ h <- matrix(c(4, NA, 10, 15), 2, 2) HierHard(h) # prints a sensible description of itself ppm(ants ~1, HierHard(h)) # fit the stationary hierarchical hard core process to ants data } \author{\adrian , \rolf and \ege. } \references{ Grabarnik, P. and \ifelse{latex}{\out{S\"{a}rkk\"{a}}}{Sarkka}, A. (2009) Modelling the spatial structure of forest stands by multivariate point processes with hierarchical interactions. \emph{Ecological Modelling} \bold{220}, 1232--1240. \ifelse{latex}{\out{H{\"o}gmander}}{Hogmander}, H. and \ifelse{latex}{\out{S{\"a}rkk{\"a}}}{Sarkka}, A. (1999) Multitype spatial point patterns with hierarchical interactions. \emph{Biometrics} \bold{55}, 1051--1058. } \keyword{spatial} \keyword{models} spatstat/man/Replace.linim.Rd0000644000176200001440000000345413160710621015644 0ustar liggesusers\name{Replace.linim} \alias{[<-.linim} \title{Reset Values in Subset of Image on Linear Network} \description{ Reset the values in a subset of a pixel image on a linear network. } \usage{ \method{[}{linim}(x, i, j) <- value } \arguments{ \item{x}{ A pixel image on a linear network. An object of class \code{"linim"}. } \item{i}{ Object defining the subregion or subset to be replaced. Either a spatial window (an object of class \code{"owin"}), or a pixel image with logical values, or a point pattern (an object of class \code{"ppp"}), or any type of index that applies to a matrix, or something that can be converted to a point pattern by \code{\link{as.ppp}} (using the window of \code{x}). } \item{j}{ An integer or logical vector serving as the column index if matrix indexing is being used. Ignored if \code{i} is appropriate to some sort of replacement \emph{other than} matrix indexing. } \item{value}{ Vector, matrix, factor or pixel image containing the replacement values. Short vectors will be recycled. } } \value{ The image \code{x} with the values replaced. } \details{ This function changes some of the pixel values in a pixel image. The image \code{x} must be an object of class \code{"linim"} representing a pixel image on a linear network. The pixel values are replaced according to the rules described in the help for \code{\link{[<-.im}}. Then the auxiliary data are updated. } \seealso{ \code{\link{[<-.im}}. } \examples{ # make a function Y <- as.linim(distfun(runiflpp(5, simplenet))) # replace some values B <- square(c(0.25, 0.55)) Y[B] <- 2 plot(Y, main="") plot(B, add=TRUE, lty=3) X <- runiflpp(4, simplenet) Y[X] <- 5 } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{manip} spatstat/man/is.hybrid.Rd0000644000176200001440000000350313160710621015050 0ustar liggesusers\name{is.hybrid} \alias{is.hybrid} \alias{is.hybrid.ppm} \alias{is.hybrid.interact} \title{ Test Whether Object is a Hybrid } \description{ Tests where a point process model or point process interaction is a hybrid of several interactions. } \usage{ is.hybrid(x) \method{is.hybrid}{ppm}(x) \method{is.hybrid}{interact}(x) } \arguments{ \item{x}{ A point process model (object of class \code{"ppm"}) or a point process interaction structure (object of class \code{"interact"}). } } \details{ A \emph{hybrid} (Baddeley, Turner, Mateu and Bevan, 2012) is a point process model created by combining two or more point process models, or an interpoint interaction created by combining two or more interpoint interactions. The function \code{is.hybrid} is generic, with methods for point process models (objects of class \code{"ppm"}) and point process interactions (objects of class \code{"interact"}). These functions return \code{TRUE} if the object \code{x} is a hybrid, and \code{FALSE} if it is not a hybrid. Hybrids of two or more interpoint interactions are created by the function \code{\link{Hybrid}}. Such a hybrid interaction can then be fitted to point pattern data using \code{\link{ppm}}. } \value{ \code{TRUE} if the object is a hybrid, and \code{FALSE} otherwise. } \references{ Baddeley, A., Turner, R., Mateu, J. and Bevan, A. (2013) Hybrids of Gibbs point process models and their implementation. \emph{Journal of Statistical Software} \bold{55}:11, 1--43. \url{http://www.jstatsoft.org/v55/i11/} } \seealso{ \code{\link{Hybrid}} } \examples{ S <- Strauss(0.1) is.hybrid(S) H <- Hybrid(Strauss(0.1), Geyer(0.2, 3)) is.hybrid(H) data(redwood) fit <- ppm(redwood, ~1, H) is.hybrid(fit) } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} spatstat/man/kernel.squint.Rd0000644000176200001440000000274313160710621015764 0ustar liggesusers\name{kernel.squint} \alias{kernel.squint} \title{Integral of Squared Kernel} \description{ Computes the integral of the squared kernel, for the kernels used in density estimation for numerical data. } \usage{ kernel.squint(kernel = "gaussian", bw=1) } \arguments{ \item{kernel}{ String name of the kernel. Options are \code{"gaussian"}, \code{"rectangular"}, \code{"triangular"}, \code{"epanechnikov"}, \code{"biweight"}, \code{"cosine"} and \code{"optcosine"}. (Partial matching is used). } \item{bw}{ Bandwidth (standard deviation) of the kernel. } } \details{ Kernel estimation of a probability density in one dimension is performed by \code{\link[stats]{density.default}} using a kernel function selected from the list above. This function computes the integral of the squared kernel, \deqn{ R = \int_{-\infty}^{\infty} k(x)^2 \, {\rm d}x }{ R = integral of k(x)^2 dx from x = -infinity to x = +infinity } where \eqn{k(x)} is the kernel with bandwidth \code{bw}. } \value{ A single number. } \seealso{ \code{\link[stats]{density.default}}, \code{\link{dkernel}}, \code{\link{kernel.moment}}, \code{\link{kernel.factor}} } \examples{ kernel.squint("gaussian", 3) # integral of squared Epanechnikov kernel with half-width h=1 h <- 1 bw <- h/kernel.factor("epa") kernel.squint("epa", bw) } \author{ \spatstatAuthors and Martin Hazelton } \keyword{methods} \keyword{nonparametric} \keyword{smooth} spatstat/man/diagnose.ppm.Rd0000644000176200001440000004137713160710571015560 0ustar liggesusers\name{diagnose.ppm} \alias{diagnose.ppm} \alias{plot.diagppm} \title{ Diagnostic Plots for Fitted Point Process Model } \description{ Given a point process model fitted to a point pattern, produce diagnostic plots based on residuals. } \usage{ diagnose.ppm(object, \dots, type="raw", which="all", sigma=NULL, rbord=reach(object), cumulative=TRUE, plot.it=TRUE, rv = NULL, compute.sd=is.poisson(object), compute.cts=TRUE, envelope=FALSE, nsim=39, nrank=1, typename, check=TRUE, repair=TRUE, oldstyle=FALSE, splineargs=list(spar=0.5)) \method{plot}{diagppm}(x, \dots, which, plot.neg=c("image", "discrete", "contour", "imagecontour"), plot.smooth=c("imagecontour", "image", "contour", "persp"), plot.sd, spacing=0.1, outer=3, srange=NULL, monochrome=FALSE, main=NULL) } \arguments{ \item{object}{ The fitted point process model (an object of class \code{"ppm"}) for which diagnostics should be produced. This object is usually obtained from \code{\link{ppm}}. } \item{type}{ String indicating the type of residuals or weights to be used. Current options are \code{"eem"} for the Stoyan-Grabarnik exponential energy weights, \code{"raw"} for the raw residuals, \code{"inverse"} for the inverse-lambda residuals, and \code{"pearson"} for the Pearson residuals. A partial match is adequate. } \item{which}{ Character string or vector indicating the choice(s) of plots to be generated. Options are \code{"all"}, \code{"marks"}, \code{"smooth"}, \code{"x"}, \code{"y"} and \code{"sum"}. Multiple choices may be given but must be matched exactly. See Details. } \item{sigma}{ Bandwidth for kernel smoother in \code{"smooth"} option. } \item{rbord}{ Width of border to avoid edge effects. The diagnostic calculations will be confined to those points of the data pattern which are at least \code{rbord} units away from the edge of the window. (An infinite value of \code{rbord} will be ignored.) } \item{cumulative}{ Logical flag indicating whether the lurking variable plots for the \eqn{x} and \eqn{y} coordinates will be the plots of cumulative sums of marks (\code{cumulative=TRUE}) or the plots of marginal integrals of the smoothed residual field (\code{cumulative=FALSE}). } \item{plot.it}{ Logical value indicating whether plots should be shown. If \code{plot.it=FALSE}, the computed diagnostic quantities are returned without plotting them. } \item{plot.neg}{ String indicating how the density part of the residual measure should be plotted. } \item{plot.smooth}{ String indicating how the smoothed residual field should be plotted. } \item{compute.sd,plot.sd}{ Logical values indicating whether error bounds should be computed and added to the \code{"x"} and \code{"y"} plots. The default is \code{TRUE} for Poisson models and \code{FALSE} for non-Poisson models. See Details. } \item{envelope,nsim,nrank}{ Arguments passed to \code{\link{lurking}} in order to plot simulation envelopes for the lurking variable plots. } \item{rv}{ Usually absent. Advanced use only. If this argument is present, the values of the residuals will not be calculated from the fitted model \code{object} but will instead be taken directly from \code{rv}. } \item{spacing}{ The spacing between plot panels (when a four-panel plot is generated) expressed as a fraction of the width of the window of the point pattern. } \item{outer}{ The distance from the outermost line of text to the nearest plot panel, expressed as a multiple of the spacing between plot panels. } \item{srange}{ Vector of length 2 that will be taken as giving the range of values of the smoothed residual field, when generating an image plot of this field. This is useful if you want to generate diagnostic plots for two different fitted models using the same colour map. } \item{monochrome}{ Flag indicating whether images should be displayed in greyscale (suitable for publication) or in colour (suitable for the screen). The default is to display in colour. } \item{check}{ Logical value indicating whether to check the internal format of \code{object}. If there is any possibility that this object has been restored from a dump file, or has otherwise lost track of the environment where it was originally computed, set \code{check=TRUE}. } \item{repair}{ Logical value indicating whether to repair the internal format of \code{object}, if it is found to be damaged. } \item{oldstyle}{ Logical flag indicating whether error bounds should be plotted using the approximation given in the original paper (\code{oldstyle=TRUE}), or using the correct asymptotic formula (\code{oldstyle=FALSE}). } \item{splineargs}{ Argument passed to \code{\link{lurking}} to control the smoothing in the lurking variable plot. } \item{x}{The value returned from a previous call to \code{diagnose.ppm}. An object of class \code{"diagppm"}. } \item{typename}{String to be used as the name of the residuals.} \item{main}{Main title for the plot.} \item{\dots}{ Extra arguments, controlling either the resolution of the smoothed image (passed from \code{diagnose.ppm} to \code{\link{density.ppp}}) or the appearance of the plots (passed from \code{diagnose.ppm} to \code{plot.diagppm} and from \code{plot.diagppm} to \code{\link{plot.default}}). } \item{compute.cts}{Advanced use only.} } \value{ An object of class \code{"diagppm"} which contains the coordinates needed to reproduce the selected plots. This object can be plotted using \code{plot.diagppm} and printed using \code{print.diagppm}. } \details{ The function \code{diagnose.ppm} generates several diagnostic plots for a fitted point process model. The plots display the residuals from the fitted model (Baddeley et al, 2005) or alternatively the `exponential energy marks' (Stoyan and Grabarnik, 1991). These plots can be used to assess goodness-of-fit, to identify outliers in the data, and to reveal departures from the fitted model. See also the companion function \code{\link{qqplot.ppm}}. The argument \code{object} must be a fitted point process model (object of class \code{"ppm"}) typically produced by the maximum pseudolikelihood fitting algorithm \code{\link{ppm}}). The argument \code{type} selects the type of residual or weight that will be computed. Current options are: \describe{ \item{\code{"eem"}:}{ exponential energy marks (Stoyan and Grabarnik, 1991) computed by \code{\link{eem}}. These are positive weights attached to the data points (i.e. the points of the point pattern dataset to which the model was fitted). If the fitted model is correct, then the sum of these weights for all data points in a spatial region \eqn{B} has expected value equal to the area of \eqn{B}. See \code{\link{eem}} for further explanation. } \item{\code{"raw"}, \code{"inverse"} or \code{"pearson"}:}{ point process residuals (Baddeley et al, 2005) computed by the function \code{\link{residuals.ppm}}. These are residuals attached both to the data points and to some other points in the window of observation (namely, to the dummy points of the quadrature scheme used to fit the model). If the fitted model is correct, then the sum of the residuals in a spatial region \eqn{B} has mean zero. The options are \itemize{ \item \code{"raw"}: the raw residuals; \item \code{"inverse"}: the `inverse-lambda' residuals, a counterpart of the exponential energy weights; \item \code{"pearson"}: the Pearson residuals. } See \code{\link{residuals.ppm}} for further explanation. } } The argument \code{which} selects the type of plot that is produced. Options are: \describe{ \item{\code{"marks"}:}{ plot the residual measure. For the exponential energy weights (\code{type="eem"}) this displays circles centred at the points of the data pattern, with radii proportional to the exponential energy weights. For the residuals (\code{type="raw"}, \code{type="inverse"} or \code{type="pearson"}) this again displays circles centred at the points of the data pattern with radii proportional to the (positive) residuals, while the plotting of the negative residuals depends on the argument \code{plot.neg}. If \code{plot.neg="image"} then the negative part of the residual measure, which is a density, is plotted as a colour image. If \code{plot.neg="discrete"} then the discretised negative residuals (obtained by approximately integrating the negative density using the quadrature scheme of the fitted model) are plotted as squares centred at the dummy points with side lengths proportional to the (negative) residuals. [To control the size of the circles and squares, use the argument \code{maxsize}.] } \item{\code{"smooth"}:}{ plot a kernel-smoothed version of the residual measure. Each data or dummy point is taken to have a `mass' equal to its residual or exponential energy weight. (Note that residuals can be negative). This point mass is then replaced by a bivariate isotropic Gaussian density with standard deviation \code{sigma}. The value of the smoothed residual field at any point in the window is the sum of these weighted densities. If the fitted model is correct, this smoothed field should be flat, and its height should be close to 0 (for the residuals) or 1 (for the exponential energy weights). The field is plotted either as an image, contour plot or perspective view of a surface, according to the argument \code{plot.smooth}. The range of values of the smoothed field is printed if the option \code{which="sum"} is also selected. } \item{\code{"x"}:}{ produce a `lurking variable' plot for the \eqn{x} coordinate. This is a plot of \eqn{h(x)} against \eqn{x} (solid lines) and of \eqn{E(h(x))} against \eqn{x} (dashed lines), where \eqn{h(x)} is defined below, and \eqn{E(h(x))} denotes the expectation of \eqn{h(x)} assuming the fitted model is true. \itemize{ \item if \code{cumulative=TRUE} then \eqn{h(x)} is the cumulative sum of the weights or residuals for all points which have \eqn{X} coordinate less than or equal to \eqn{x}. For the residuals \eqn{E(h(x)) = 0}, and for the exponential energy weights \eqn{E(h(x)) = } area of the subset of the window to the left of the line \eqn{X=x}. \item if \code{cumulative=FALSE} then \eqn{h(x)} is the marginal integral of the smoothed residual field (see the case \code{which="smooth"} described above) on the \eqn{x} axis. This is approximately the derivative of the plot for \code{cumulative=TRUE}. The value of \eqn{h(x)} is computed by summing the values of the smoothed residual field over all pixels with the given \eqn{x} coordinate. For the residuals \eqn{E(h(x)) = 0}, and for the exponential energy weights \eqn{E(h(x)) = } length of the intersection between the observation window and the line \eqn{X=x}. } If \code{plot.sd = TRUE}, then superimposed on the lurking variable plot are the pointwise two-standard-deviation error limits for \eqn{h(x)} calculated for the inhomogeneous Poisson process. The default is \code{plot.sd = TRUE} for Poisson models and \code{plot.sd = FALSE} for non-Poisson models. } \item{\code{"y"}:}{ produce a similar lurking variable plot for the \eqn{y} coordinate. } \item{\code{"sum"}:}{ print the sum of the weights or residuals for all points in the window (clipped by a margin \code{rbord} if required) and the area of the same window. If the fitted model is correct the sum of the exponential energy weights should equal the area of the window, while the sum of the residuals should equal zero. Also print the range of values of the smoothed field displayed in the \code{"smooth"} case. } \item{\code{"all"}:}{ All four of the diagnostic plots listed above are plotted together in a two-by-two display. Top left panel is \code{"marks"} plot. Bottom right panel is \code{"smooth"} plot. Bottom left panel is \code{"x"} plot. Top right panel is \code{"y"} plot, rotated 90 degrees. } } The argument \code{rbord} ensures there are no edge effects in the computation of the residuals. The diagnostic calculations will be confined to those points of the data pattern which are at least \code{rbord} units away from the edge of the window. The value of \code{rbord} should be greater than or equal to the range of interaction permitted in the model. By default, the two-standard-deviation limits are calculated from the exact formula for the asymptotic variance of the residuals under the asymptotic normal approximation, equation (37) of Baddeley et al (2006). However, for compatibility with the original paper of Baddeley et al (2005), if \code{oldstyle=TRUE}, the two-standard-deviation limits are calculated using the innovation variance, an over-estimate of the true variance of the residuals. (However, see the section about Replicated Data). The argument \code{rv} would normally be used only by experts. It enables the user to substitute arbitrary values for the residuals or marks, overriding the usual calculations. If \code{rv} is present, then instead of calculating the residuals from the fitted model, the algorithm takes the residuals from the object \code{rv}, and plots them in the manner appropriate to the type of residual or mark selected by \code{type}. If \code{type ="eem"} then \code{rv} should be similar to the return value of \code{\link{eem}}, namely, a numeric vector of length equal to the number of points in the original data point pattern. Otherwise, \code{rv} should be similar to the return value of \code{\link{residuals.ppm}}, that is, it should be an object of class \code{"msr"} (see \code{\link{msr}}) representing a signed measure. The return value of \code{diagnose.ppm} is an object of class \code{"diagppm"}. The \code{plot} method for this class is documented here. There is also a \code{print} method. See the Examples. In \code{plot.diagppm}, if a four-panel diagnostic plot is produced (the default), then the extra arguments \code{xlab}, \code{ylab}, \code{rlab} determine the text labels for the \eqn{x} and \eqn{y} coordinates and the residuals, respectively. The undocumented arguments \code{col.neg} and \code{col.smooth} control the colour maps used in the top left and bottom right panels respectively. See also the companion functions \code{\link{qqplot.ppm}}, which produces a Q-Q plot of the residuals, and \code{\link{lurking}}, which produces lurking variable plots for any spatial covariate. } \section{Replicated Data}{ Note that if \code{object} is a model that was obtained by first fitting a model to replicated point pattern data using \code{\link{mppm}} and then using \code{\link{subfits}} to extract a model for one of the individual point patterns, then the variance calculations are only implemented for the innovation variance (\code{oldstyle=TRUE}) and this is the default in such cases. } \references{ Baddeley, A., Turner, R., \Moller, J. and Hazelton, M. (2005) Residual analysis for spatial point processes. \emph{Journal of the Royal Statistical Society, Series B} \bold{67}, 617--666. Baddeley, A., \Moller, J. and Pakes, A.G. (2008) Properties of residuals for spatial point processes. \emph{Annals of the Institute of Statistical Mathematics} \bold{60}, 627--649. Stoyan, D. and Grabarnik, P. (1991) Second-order characteristics for stochastic structures connected with Gibbs point processes. \emph{Mathematische Nachrichten}, 151:95--100. } \seealso{ \code{\link{residuals.ppm}}, \code{\link{eem}}, \code{\link{ppm.object}}, \code{\link{qqplot.ppm}}, \code{\link{lurking}}, \code{\link{ppm}} } \examples{ fit <- ppm(cells ~x, Strauss(r=0.15)) diagnose.ppm(fit) \dontrun{ diagnose.ppm(fit, type="pearson") } diagnose.ppm(fit, which="marks") diagnose.ppm(fit, type="raw", plot.neg="discrete") diagnose.ppm(fit, type="pearson", which="smooth") # save the diagnostics and plot them later u <- diagnose.ppm(fit, rbord=0.15, plot.it=FALSE) \dontrun{ plot(u) plot(u, which="marks") } } \author{ \adrian and \rolf } \keyword{spatial} \keyword{models} \keyword{hplot} spatstat/man/crossing.linnet.Rd0000644000176200001440000000200213160710571016271 0ustar liggesusers\name{crossing.linnet} \alias{crossing.linnet} \title{ Crossing Points between Linear Network and Other Lines } \description{ Find all the crossing-points between a linear network and another pattern of lines or line segments. } \usage{ crossing.linnet(X, Y) } \arguments{ \item{X}{ Linear network (object of class \code{"linnet"}). } \item{Y}{ A linear network, or a spatial pattern of line segments (class \code{"psp"}) or infinite lines (class \code{"infline"}). } } \details{ All crossing-points between \code{X} and \code{Y} are determined. The result is a point pattern on the network \code{X}. } \value{ Point pattern on a linear network (object of class \code{"lpp"}). } \author{ \adrian. } \seealso{ \code{\link{crossing.psp}} % \code{\link{chop.linnet}} } \examples{ plot(simplenet, main="") L <- infline(p=runif(3), theta=runif(3, max=pi/2)) plot(L, col="red") Y <- crossing.linnet(simplenet, L) plot(Y, add=TRUE, cols="blue") } \keyword{spatial} \keyword{manip} spatstat/man/rotate.infline.Rd0000644000176200001440000000361613160710621016103 0ustar liggesusers\name{rotate.infline} \alias{rotate.infline} \alias{shift.infline} \alias{reflect.infline} \alias{flipxy.infline} \title{ Rotate or Shift Infinite Lines } \description{ Given the coordinates of one or more infinite straight lines in the plane, apply a rotation or shift. } \usage{ \method{rotate}{infline}(X, angle = pi/2, \dots) \method{shift}{infline}(X, vec = c(0,0), \dots) \method{reflect}{infline}(X) \method{flipxy}{infline}(X) } \arguments{ \item{X}{ Object of class \code{"infline"} representing one or more infinite straight lines in the plane. } \item{angle}{ Angle of rotation, in radians. } \item{vec}{ Translation (shift) vector: a numeric vector of length 2, or a \code{list(x,y)}, or a point pattern containing one point. } \item{\dots}{ Ignored. } } \details{ These functions are methods for the generic \code{\link{shift}}, \code{\link{rotate}}, \code{\link{reflect}} and \code{\link{flipxy}} for the class \code{"infline"}. An object of class \code{"infline"} represents one or more infinite lines in the plane. } \value{ Another \code{"infline"} object representing the result of the transformation. } \author{ \adrian. } \seealso{ \code{\link{infline}} } \examples{ L <- infline(v=0.5) plot(square(c(-1,1)), main="rotate lines", type="n") points(0, 0, pch=3) plot(L, col="green") plot(rotate(L, pi/12), col="red") plot(rotate(L, pi/6), col="red") plot(rotate(L, pi/4), col="red") L <- infline(p=c(0.4, 0.9), theta=pi* c(0.2, 0.6)) plot(square(c(-1,1)), main="shift lines", type="n") L <- infline(p=c(0.7, 0.8), theta=pi* c(0.2, 0.6)) plot(L, col="green") plot(shift(L, c(-0.5, -0.4)), col="red") plot(square(c(-1,1)), main="reflect lines", type="n") points(0, 0, pch=3) L <- infline(p=c(0.7, 0.8), theta=pi* c(0.2, 0.6)) plot(L, col="green") plot(reflect(L), col="red") } \keyword{spatial} \keyword{manip} spatstat/man/linearpcfcross.inhom.Rd0000644000176200001440000001074413160710621017310 0ustar liggesusers\name{linearpcfcross.inhom} \alias{linearpcfcross.inhom} \title{ Inhomogeneous Multitype Pair Correlation Function (Cross-type) for Linear Point Pattern } \description{ For a multitype point pattern on a linear network, estimate the inhomogeneous multitype pair correlation function from points of type \eqn{i} to points of type \eqn{j}. } \usage{ linearpcfcross.inhom(X, i, j, lambdaI, lambdaJ, r=NULL, \dots, correction="Ang", normalise=TRUE) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the \eqn{i}-to-any pair correlation function \eqn{g_{ij}(r)}{g[ij](r)} will be computed. An object of class \code{"lpp"} which must be a multitype point pattern (a marked point pattern whose marks are a factor). } \item{i}{Number or character string identifying the type (mark value) of the points in \code{X} from which distances are measured. Defaults to the first level of \code{marks(X)}. } \item{j}{Number or character string identifying the type (mark value) of the points in \code{X} to which distances are measured. Defaults to the second level of \code{marks(X)}. } \item{lambdaI}{ Intensity values for the points of type \code{i}. Either a numeric vector, a \code{function}, a pixel image (object of class \code{"im"} or \code{"linim"}) or a fitted point process model (object of class \code{"ppm"} or \code{"lppm"}). } \item{lambdaJ}{ Intensity values for the points of type \code{j}. Either a numeric vector, a \code{function}, a pixel image (object of class \code{"im"} or \code{"linim"}) or a fitted point process model (object of class \code{"ppm"} or \code{"lppm"}). } \item{r}{numeric vector. The values of the argument \eqn{r} at which the function \eqn{g_{ij}(r)}{g[ij](r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{correction}{ Geometry correction. Either \code{"none"} or \code{"Ang"}. See Details. } \item{\dots}{ Arguments passed to \code{\link[stats]{density.default}} to control the kernel smoothing. } \item{normalise}{ Logical. If \code{TRUE} (the default), the denominator of the estimator is data-dependent (equal to the sum of the reciprocal intensities at the points of type \code{i}), which reduces the sampling variability. If \code{FALSE}, the denominator is the length of the network. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). } \details{ This is a counterpart of the function \code{\link{pcfcross.inhom}} for a point pattern on a linear network (object of class \code{"lpp"}). The argument \code{i} will be interpreted as levels of the factor \code{marks(X)}. If \code{i} is missing, it defaults to the first level of the marks factor. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{g_{ij}(r)}{g[ij](r)} should be evaluated. The values of \eqn{r} must be increasing nonnegative numbers and the maximum \eqn{r} value must not exceed the radius of the largest disc contained in the window. If \code{lambdaI} or \code{lambdaJ} is a fitted point process model, the default behaviour is to update the model by re-fitting it to the data, before computing the fitted intensity. This can be disabled by setting \code{update=FALSE}. } \references{ Baddeley, A, Jammalamadaka, A. and Nair, G. (to appear) Multitype point process analysis of spines on the dendrite network of a neuron. \emph{Applied Statistics} (Journal of the Royal Statistical Society, Series C), In press. } \section{Warnings}{ The argument \code{i} is interpreted as a level of the factor \code{marks(X)}. Beware of the usual trap with factors: numerical values are not interpreted in the same way as character values. } \seealso{ \code{\link{linearpcfdot}}, \code{\link{linearpcf}}, \code{\link{pcfcross.inhom}}. } \examples{ lam <- table(marks(chicago))/(summary(chicago)$totlength) lamI <- function(x,y,const=lam[["assault"]]){ rep(const, length(x)) } lamJ <- function(x,y,const=lam[["robbery"]]){ rep(const, length(x)) } g <- linearpcfcross.inhom(chicago, "assault", "robbery", lamI, lamJ) \dontrun{ fit <- lppm(chicago, ~marks + x) linearpcfcross.inhom(chicago, "assault", "robbery", fit, fit) } } \author{\adrian } \keyword{spatial} \keyword{nonparametric} spatstat/man/has.close.Rd0000644000176200001440000000401213160710621015030 0ustar liggesusers\name{has.close} \alias{has.close} \alias{has.close.default} \alias{has.close.ppp} \alias{has.close.pp3} \title{ Check Whether Points Have Close Neighbours } \description{ For each point in a point pattern, determine whether the point has a close neighbour in the same pattern. } \usage{ has.close(X, r, Y=NULL, \dots) \method{has.close}{default}(X,r, Y=NULL, \dots, periodic=FALSE) \method{has.close}{ppp}(X,r, Y=NULL, \dots, periodic=FALSE, sorted=FALSE) \method{has.close}{pp3}(X,r, Y=NULL, \dots, periodic=FALSE, sorted=FALSE) } \arguments{ \item{X,Y}{ Point patterns of class \code{"ppp"} or \code{"pp3"} or \code{"lpp"}. } \item{r}{ Threshold distance: a number greater than zero. } \item{periodic}{ Logical value indicating whether to measure distances in the periodic sense, so that opposite sides of the (rectangular) window are treated as identical. } \item{sorted}{ Logical value, indicating whether the points of \code{X} (and \code{Y}, if given) are already sorted into increasing order of the \eqn{x} coordinates. } \item{\dots}{Other arguments are ignored.} } \details{ This is simply a faster version of \code{(nndist(X) <= r)} or \code{(nncross(X,Y,what="dist") <= r)}. \code{has.close(X,r)} determines, for each point in the pattern \code{X}, whether or not this point has a neighbour in the same pattern \code{X} which lies at a distance less than or equal to \code{r}. \code{has.close(X,r,Y)} determines, for each point in the pattern \code{X}, whether or not this point has a neighbour in the \emph{other} pattern \code{Y} which lies at a distance less than or equal to \code{r}. The function \code{has.close} is generic, with methods for \code{"ppp"} and \code{"pp3"} and a default method. } \value{ A logical vector, with one entry for each point of \code{X}. } \author{ \adrian. } \seealso{ \code{\link{nndist}} } \examples{ has.close(redwood, 0.05) with(split(amacrine), has.close(on, 0.05, off)) } \keyword{spatial} \keyword{math} spatstat/man/summary.listof.Rd0000644000176200001440000000150313160710621016147 0ustar liggesusers\name{summary.listof} \alias{summary.listof} \title{Summary of a List of Things} \description{ Prints a useful summary of each item in a list of things. } \usage{ \method{summary}{listof}(object, \dots) } \arguments{ \item{object}{ An object of class \code{"listof"}. } \item{\dots}{ Ignored. } } \details{ This is a method for the generic function \code{\link{summary}}. An object of the class \code{"listof"} is effectively a list of things which are all of the same class. This function extracts a useful summary of each of the items in the list. } \seealso{ \code{\link{summary}}, \code{\link{plot.listof}} } \examples{ x <- list(A=runif(10), B=runif(10), C=runif(10)) class(x) <- c("listof", class(x)) summary(x) } \author{\adrian and \rolf } \keyword{spatial} \keyword{methods} spatstat/man/box3.Rd0000644000176200001440000000301013160710571014025 0ustar liggesusers\name{box3} \Rdversion{1.1} \alias{box3} \title{ Three-Dimensional Box } \description{ Creates an object representing a three-dimensional box. } \usage{ box3(xrange = c(0, 1), yrange = xrange, zrange = yrange, unitname = NULL) } \arguments{ \item{xrange, yrange, zrange}{ Dimensions of the box in the \eqn{x,y,z} directions. Each of these arguments should be a numeric vector of length 2. } \item{unitname}{ Optional. Name of the unit of length. See Details. } } \details{ This function creates an object representing a three-dimensional rectangular parallelepiped (box) with sides parallel to the coordinate axes. The object can be used to specify the domain of a three-dimensional point pattern (see \code{\link{pp3}}) and in various geometrical calculations (see \code{\link{volume.box3}}, \code{\link{diameter.box3}}, \code{\link{eroded.volumes}}). The optional argument \code{unitname} specifies the name of the unit of length. See \code{\link{unitname}} for valid formats. The function \code{\link{as.box3}} can be used to convert other kinds of data to this format. } \value{ An object of class \code{"box3"}. There is a print method for this class. } \author{\adrian and \rolf } \seealso{ \code{\link{as.box3}}, \code{\link{pp3}}, \code{\link{volume.box3}}, \code{\link{diameter.box3}}, \code{\link{eroded.volumes}}. } \examples{ box3() box3(c(0,10),c(0,10),c(0,5), unitname=c("metre","metres")) box3(c(-1,1)) } \keyword{spatial} \keyword{datagen} spatstat/man/as.data.frame.envelope.Rd0000644000176200001440000000260713160710571017405 0ustar liggesusers\name{as.data.frame.envelope} \alias{as.data.frame.envelope} \title{Coerce Envelope to Data Frame} \description{ Converts an envelope object to a data frame. } \usage{ \method{as.data.frame}{envelope}(x, \dots, simfuns=FALSE) } \arguments{ \item{x}{Envelope object (class \code{"envelope"}).} \item{\dots}{Ignored.} \item{simfuns}{Logical value indicating whether the result should include the values of the simulated functions that were used to build the envelope. } } \details{ This is a method for the generic function \code{\link{as.data.frame}} for the class of envelopes (see \code{\link{envelope}}. The result is a data frame with columns containing the values of the function argument (usually named \code{r}), the function estimate for the original point pattern data (\code{obs}), the upper and lower envelope limits (\code{hi} and \code{lo}), and possibly additional columns. If \code{simfuns=TRUE}, the result also includes columns of values of the simulated functions that were used to compute the envelope. This is possible only when the envelope was computed with the argument \code{savefuns=TRUE} in the call to \code{\link{envelope}}. } \value{ A data frame. } \examples{ E <- envelope(cells, nsim=5, savefuns=TRUE) tail(as.data.frame(E)) tail(as.data.frame(E, simfuns=TRUE)) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{manip} spatstat/man/identify.psp.Rd0000644000176200001440000000351413160710621015573 0ustar liggesusers\name{identify.psp} \alias{identify.psp} \title{Identify Segments in a Line Segment Pattern} \description{ If a line segment pattern is plotted in the graphics window, this function will find the segment which is nearest to the mouse position, and print its serial number. } \usage{ \method{identify}{psp}(x, \dots, labels=seq_len(nsegments(x)), n=nsegments(x), plot=TRUE) } \arguments{ \item{x}{ A line segment pattern (object of class \code{"psp"}). } \item{labels}{ Labels associated with the segments, to be plotted when the segments are identified. A character vector or numeric vector of length equal to the number of segments in \code{x}. } \item{n}{ Maximum number of segments to be identified. } \item{plot}{ Logical. Whether to plot the labels when a segment is identified. } \item{\dots}{ Arguments passed to \code{\link[graphics]{text.default}} controlling the plotting of the labels. } } \value{ Vector containing the serial numbers of the segments in the pattern \code{x} that were identified. } \details{ This is a method for the generic function \code{\link[graphics]{identify}} for line segment pattern objects. The line segment pattern \code{x} should first be plotted using \code{\link{plot.psp}}. Then \code{identify(x)} reads the position of the graphics pointer each time the left mouse button is pressed. It then finds the segment in the pattern \code{x} that is closest to the mouse position. This segment's index will be returned as part of the value of the call. Each time a segment is identified, text will be displayed next to the point, showing its serial number (or the relevant entry of \code{labels}). } \seealso{ \code{\link[graphics]{identify}}, \code{\link{identify.ppp}}. } \author{ \spatstatAuthors } \keyword{spatial} \keyword{iplot} spatstat/man/periodify.Rd0000644000176200001440000000732213160710621015152 0ustar liggesusers\name{periodify} \alias{periodify} \alias{periodify.owin} \alias{periodify.ppp} \alias{periodify.psp} \title{ Make Periodic Copies of a Spatial Pattern } \description{ Given a spatial pattern (point pattern, line segment pattern, window, etc) make shifted copies of the pattern and optionally combine them to make a periodic pattern. } \usage{ periodify(X, ...) \method{periodify}{ppp}(X, nx = 1, ny = 1, ..., combine=TRUE, warn=TRUE, check=TRUE, ix=(-nx):nx, iy=(-ny):ny, ixy=expand.grid(ix=ix,iy=iy)) \method{periodify}{psp}(X, nx = 1, ny = 1, ..., combine=TRUE, warn=TRUE, check=TRUE, ix=(-nx):nx, iy=(-ny):ny, ixy=expand.grid(ix=ix,iy=iy)) \method{periodify}{owin}(X, nx = 1, ny = 1, ..., combine=TRUE, warn=TRUE, ix=(-nx):nx, iy=(-ny):ny, ixy=expand.grid(ix=ix,iy=iy)) } \arguments{ \item{X}{ An object representing a spatial pattern (point pattern, line segment pattern or window). } \item{nx,ny}{ Integers. Numbers of additional copies of \code{X} in each direction. The result will be a grid of \code{2 * nx + 1} by \code{2 * ny + 1} copies of the original object. (Overruled by \code{ix, iy, ixy}). } \item{\dots}{ Ignored. } \item{combine}{ Logical flag determining whether the copies should be superimposed to make an object like \code{X} (if \code{combine=TRUE}) or simply returned as a list of objects (\code{combine=FALSE}). } \item{warn}{ Logical flag determining whether to issue warnings. } \item{check}{ Logical flag determining whether to check the validity of the combined pattern. } \item{ix, iy}{ Integer vectors determining the grid positions of the copies of \code{X}. (Overruled by \code{ixy}). } \item{ixy}{ Matrix or data frame with two columns, giving the grid positions of the copies of \code{X}. } } \details{ Given a spatial pattern (point pattern, line segment pattern, etc) this function makes a number of shifted copies of the pattern and optionally combines them. The function \code{periodify} is generic, with methods for various kinds of spatial objects. The default is to make a 3 by 3 array of copies of \code{X} and combine them into a single pattern of the same kind as \code{X}. This can be used (for example) to compute toroidal or periodic edge corrections for various operations on \code{X}. If the arguments \code{nx}, \code{ny} are given and other arguments are missing, the original object will be copied \code{nx} times to the right and \code{nx} times to the left, then \code{ny} times upward and \code{ny} times downward, making \code{(2 * nx + 1) * (2 * ny + 1)} copies altogether, arranged in a grid, centred on the original object. If the arguments \code{ix}, \code{iy} or \code{ixy} are specified, then these determine the grid positions of the copies of \code{X} that will be made. For example \code{(ix,iy) = (1, 2)} means a copy of \code{X} shifted by the vector \code{(ix * w, iy * h)} where \code{w,h} are the width and height of the bounding rectangle of \code{X}. If \code{combine=TRUE} (the default) the copies of \code{X} are superimposed to create an object of the same kind as \code{X}. If \code{combine=FALSE} the copies of \code{X} are returned as a list. } \value{ If \code{combine=TRUE}, an object of the same class as \code{X}. If \code{combine=FALSE}, a list of objects of the same class as \code{X}. } \seealso{ \code{\link{shift}} } \examples{ data(cells) plot(periodify(cells)) a <- lapply(periodify(Window(cells), combine=FALSE), plot, add=TRUE,lty=2) } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/methods.zclustermodel.Rd0000644000176200001440000000261413160710621017515 0ustar liggesusers\name{methods.zclustermodel} \alias{methods.zclustermodel} % DoNotExport \alias{pcfmodel.zclustermodel} \alias{predict.zclustermodel} \alias{print.zclustermodel} \title{ Methods for Cluster Models } \description{ Methods for the experimental class of cluster models. } \usage{ \method{pcfmodel}{zclustermodel}(model, \dots) \method{predict}{zclustermodel}(object, \dots, locations, type = "intensity", ngrid = NULL) \method{print}{zclustermodel}(x, \dots) } \arguments{ \item{model,object,x}{ Object of class \code{"zclustermodel"}. } \item{\dots}{ Arguments passed to other methods. } \item{locations}{ Locations where prediction should be performed. A window or a point pattern. } \item{type}{ Currently must equal \code{"intensity"}. } \item{ngrid}{ Pixel grid dimensions for prediction, if \code{locations} is a rectangle or polygon. } } \details{ Experimental. } \value{ Same as for other methods. } \author{ \adrian } \seealso{ \code{\link{zclustermodel}} } \examples{ m <- zclustermodel("Thomas", kappa=10, mu=5, scale=0.1) m2 <- zclustermodel("VarGamma", kappa=10, mu=10, scale=0.1, nu=0.7) m m2 g <- pcfmodel(m) g(0.2) g2 <- pcfmodel(m2) g2(1) Z <- predict(m, locations=square(2)) Z2 <- predict(m2, locations=square(1)) varcount(m, square(1)) varcount(m2, square(1)) } \keyword{spatial} \keyword{models} spatstat/man/begins.Rd0000644000176200001440000000147413160710571014435 0ustar liggesusers\name{begins} \alias{begins} \title{ Check Start of Character String } \description{ Checks whether a character string begins with a particular prefix. } \usage{ begins(x, firstbit) } \arguments{ \item{x}{ Character string, or vector of character strings, to be tested. } \item{firstbit}{ A single character string. } } \details{ This simple wrapper function checks whether (each entry in) \code{x} begins with the string \code{firstbit}, and returns a logical value or logical vector with one entry for each entry of \code{x}. This function is useful mainly for reducing complexity in model formulae. } \value{ Logical vector of the same length as \code{x}. } \author{ \adrian \rolf and \ege } \examples{ begins(c("Hello", "Goodbye"), "Hell") begins("anything", "") } \keyword{character} spatstat/man/areaLoss.Rd0000644000176200001440000000402413160710571014731 0ustar liggesusers\name{areaLoss} \alias{areaLoss} \title{Difference of Disc Areas} \description{ Computes the area of that part of a disc that is not covered by other discs. } \usage{ areaLoss(X, r, ..., W=as.owin(X), subset=NULL, exact=FALSE, ngrid=spatstat.options("ngrid.disc")) } \arguments{ \item{X}{ Locations of the centres of discs. A point pattern (object of class \code{"ppp"}). } \item{r}{ Disc radius, or vector of disc radii. } \item{\dots}{Ignored.} \item{W}{ Optional. Window (object of class \code{"owin"}) inside which the area should be calculated. } \item{subset}{ Optional. Index identifying a subset of the points of \code{X} for which the area difference should be computed. } \item{exact}{ Choice of algorithm. If \code{exact=TRUE}, areas are computed exactly using analytic geometry. If \code{exact=FALSE} then a faster algorithm is used to compute a discrete approximation to the areas. } \item{ngrid}{ Integer. Number of points in the square grid used to compute the discrete approximation, when \code{exact=FALSE}. } } \value{ A matrix with one row for each point in \code{X} (or \code{X[subset]}) and one column for each value in \code{r}. } \details{ This function computes, for each point \code{X[i]} in \code{X} and for each radius \code{r}, the area of that part of the disc of radius \code{r} centred at the location \code{X[i]} that is \emph{not} covered by any of the other discs of radius \code{r} centred at the points \code{X[j]} for \code{j} not equal to \code{i}. This area is important in some calculations related to the area-interaction model \code{\link{AreaInter}}. The result is a matrix, with one row for each point in \code{X} and one column for each entry of \code{r}. } \seealso{ \code{\link{AreaInter}}, \code{\link{areaGain}}, \code{\link{dilated.areas}} } \examples{ data(cells) areaLoss(cells, 0.1) } \author{\adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/Gcom.Rd0000644000176200001440000002264213160710571014053 0ustar liggesusers\name{Gcom} \Rdversion{1.1} \alias{Gcom} \title{ Model Compensator of Nearest Neighbour Function } \description{ Given a point process model fitted to a point pattern dataset, this function computes the \emph{compensator} of the nearest neighbour distance distribution function \eqn{G} based on the fitted model (as well as the usual nonparametric estimates of \eqn{G} based on the data alone). Comparison between the nonparametric and model-compensated \eqn{G} functions serves as a diagnostic for the model. } \usage{ Gcom(object, r = NULL, breaks = NULL, ..., correction = c("border", "Hanisch"), conditional = !is.poisson(object), restrict=FALSE, model=NULL, trend = ~1, interaction = Poisson(), rbord = reach(interaction), ppmcorrection="border", truecoef = NULL, hi.res = NULL) } \arguments{ \item{object}{ Object to be analysed. Either a fitted point process model (object of class \code{"ppm"}) or a point pattern (object of class \code{"ppp"}) or quadrature scheme (object of class \code{"quad"}). } \item{r}{ Optional. Vector of values of the argument \eqn{r} at which the function \eqn{G(r)} should be computed. This argument is usually not specified. There is a sensible default. } \item{breaks}{ This argument is for internal use only. } \item{correction}{ Edge correction(s) to be employed in calculating the compensator. Options are \code{"border"}, \code{"Hanisch"} and \code{"best"}. Alternatively \code{correction="all"} selects all options. } \item{conditional}{ Optional. Logical value indicating whether to compute the estimates for the conditional case. See Details. } \item{restrict}{ Logical value indicating whether to compute the restriction estimator (\code{restrict=TRUE}) or the reweighting estimator (\code{restrict=FALSE}, the default). Applies only if \code{conditional=TRUE}. See Details. } \item{model}{ Optional. A fitted point process model (object of class \code{"ppm"}) to be re-fitted to the data using \code{\link{update.ppm}}, if \code{object} is a point pattern. Overrides the arguments \code{trend,interaction,rbord,ppmcorrection}. } \item{trend,interaction,rbord}{ Optional. Arguments passed to \code{\link{ppm}} to fit a point process model to the data, if \code{object} is a point pattern. See \code{\link{ppm}} for details. } \item{\dots}{ Extra arguments passed to \code{\link{ppm}}. } \item{ppmcorrection}{ The \code{correction} argument to \code{\link{ppm}}. } \item{truecoef}{ Optional. Numeric vector. If present, this will be treated as if it were the true coefficient vector of the point process model, in calculating the diagnostic. Incompatible with \code{hi.res}. } \item{hi.res}{ Optional. List of parameters passed to \code{\link{quadscheme}}. If this argument is present, the model will be re-fitted at high resolution as specified by these parameters. The coefficients of the resulting fitted model will be taken as the true coefficients. Then the diagnostic will be computed for the default quadrature scheme, but using the high resolution coefficients. } } \details{ This command provides a diagnostic for the goodness-of-fit of a point process model fitted to a point pattern dataset. It computes different estimates of the nearest neighbour distance distribution function \eqn{G} of the dataset, which should be approximately equal if the model is a good fit to the data. The first argument, \code{object}, is usually a fitted point process model (object of class \code{"ppm"}), obtained from the model-fitting function \code{\link{ppm}}. For convenience, \code{object} can also be a point pattern (object of class \code{"ppp"}). In that case, a point process model will be fitted to it, by calling \code{\link{ppm}} using the arguments \code{trend} (for the first order trend), \code{interaction} (for the interpoint interaction) and \code{rbord} (for the erosion distance in the border correction for the pseudolikelihood). See \code{\link{ppm}} for details of these arguments. The algorithm first extracts the original point pattern dataset (to which the model was fitted) and computes the standard nonparametric estimates of the \eqn{G} function. It then also computes the \emph{model-compensated} \eqn{G} function. The different functions are returned as columns in a data frame (of class \code{"fv"}). The interpretation of the columns is as follows (ignoring edge corrections): \describe{ \item{\code{bord}:}{ the nonparametric border-correction estimate of \eqn{G(r)}, \deqn{ \hat G(r) = \frac{\sum_i I\{ d_i \le r\} I\{ b_i > r \}}{\sum_i I\{ b_i > r\}} }{ G(r) = (sum[i] I(d[i] <= r) I(b[i] > r))/(sum[i] I(b[i] > r)) } where \eqn{d_i}{d[i]} is the distance from the \eqn{i}-th data point to its nearest neighbour, and \eqn{b_i}{b[i]} is the distance from the \eqn{i}-th data point to the boundary of the window \eqn{W}. } \item{\code{bcom}:}{ the model compensator of the border-correction estimate \deqn{ {\bf C}\, \hat G(r) = \frac{\int \lambda(u,x) I\{ b(u) > r\} I\{ d(u,x) \le r\}}{ 1 + \sum_i I\{ b_i > r\} } }{ C G(r) = (integral[u] lambda(u,x) I(b(u) > r) I( d(u,x) <= r ))/(1 + sum[i] I(b[i] > r)) } where \eqn{\lambda(u,x)}{lambda(u,x)} denotes the conditional intensity of the model at the location \eqn{u}, and \eqn{d(u,x)} denotes the distance from \eqn{u} to the nearest point in \eqn{x}, while \eqn{b(u)} denotes the distance from \eqn{u} to the boundary of the window\eqn{W}. } \item{\code{han}:}{ the nonparametric Hanisch estimate of \eqn{G(r)} \deqn{ \hat G(r) = \frac{D(r)}{D(\infty)} }{ G(r) = D(r)/D(infty) } where \deqn{ D(r) = \sum_i \frac{ I\{x_i \in W_{\ominus d_i}\} I\{d_i \le r\} }{ \mbox{area}(W_{\ominus d_i}) } }{ D(r) = sum[i] I(x[i] in W[-r]) I(d[i] <= r)/area(W[-d[i]]) } in which \eqn{W_{\ominus r}}{W[-r]} denotes the erosion of the window \eqn{W} by a distance \eqn{r}. } \item{\code{hcom}:}{ the corresponding model-compensated function \deqn{ {\bf C} \, G(r) = \int_W \frac{ \lambda(u,x) I(u \in W_{\ominus d(u)}) I(d(u) \le r) }{ \hat D(\infty) \mbox{area}(W_{\ominus d(u)}) + 1 } }{ C G(r) = integral[u] lambda(u,x) I(u in W[-d(u)]) I(d(u) <= r)/ (1 + D(infty) area(W[-d(u)])) } where \eqn{d(u) = d(u, x)} is the (`empty space') distance from location \eqn{u} to the nearest point of \eqn{x}. } } If the fitted model is a Poisson point process, then the formulae above are exactly what is computed. If the fitted model is not Poisson, the formulae above are modified slightly to handle edge effects. The modification is determined by the arguments \code{conditional} and \code{restrict}. The value of \code{conditional} defaults to \code{FALSE} for Poisson models and \code{TRUE} for non-Poisson models. If \code{conditional=FALSE} then the formulae above are not modified. If \code{conditional=TRUE}, then the algorithm calculates the \emph{restriction estimator} if \code{restrict=TRUE}, and calculates the \emph{reweighting estimator} if \code{restrict=FALSE}. See Appendix E of Baddeley, Rubak and \ifelse{latex}{\out{M\o ller}}{Moller} (2011). See also \code{\link{spatstat.options}('eroded.intensity')}. Thus, by default, the reweighting estimator is computed for non-Poisson models. The border-corrected and Hanisch-corrected estimates of \eqn{G(r)} are approximately unbiased estimates of the \eqn{G}-function, assuming the point process is stationary. The model-compensated functions are unbiased estimates \emph{of the mean value of the corresponding nonparametric estimate}, assuming the model is true. Thus, if the model is a good fit, the mean value of the difference between the nonparametric and model-compensated estimates is approximately zero. To compute the difference between the nonparametric and model-compensated functions, use \code{\link{Gres}}. } \value{ A function value table (object of class \code{"fv"}), essentially a data frame of function values. There is a plot method for this class. See \code{\link{fv.object}}. } \references{ Baddeley, A., Rubak, E. and \ifelse{latex}{\out{M\o ller}}{Moller}, J. (2011) Score, pseudo-score and residual diagnostics for spatial point process models. \emph{Statistical Science} \bold{26}, 613--646. } \author{ \adrian \ege and Jesper \ifelse{latex}{\out{M\o ller}}{Moller}. } \seealso{ Related functions: \code{\link{Gest}}, \code{\link{Gres}}. Alternative functions: \code{\link{Kcom}}, \code{\link{psstA}}, \code{\link{psstG}}, \code{\link{psst}}. Model fitting: \code{\link{ppm}}. } \examples{ data(cells) fit0 <- ppm(cells, ~1) # uniform Poisson G0 <- Gcom(fit0) G0 plot(G0) # uniform Poisson is clearly not correct # Hanisch estimates only plot(Gcom(fit0), cbind(han, hcom) ~ r) fit1 <- ppm(cells, ~1, Strauss(0.08)) plot(Gcom(fit1), cbind(han, hcom) ~ r) # Try adjusting interaction distance fit2 <- update(fit1, Strauss(0.10)) plot(Gcom(fit2), cbind(han, hcom) ~ r) G3 <- Gcom(cells, interaction=Strauss(0.12)) plot(G3, cbind(han, hcom) ~ r) } \keyword{spatial} \keyword{models} spatstat/man/pcfmulti.Rd0000644000176200001440000001144213160710621015001 0ustar liggesusers\name{pcfmulti} \alias{pcfmulti} \title{ Marked pair correlation function } \description{ For a marked point pattern, estimate the multitype pair correlation function using kernel methods. } \usage{ pcfmulti(X, I, J, ..., r = NULL, kernel = "epanechnikov", bw = NULL, stoyan = 0.15, correction = c("translate", "Ripley"), divisor = c("r", "d"), Iname = "points satisfying condition I", Jname = "points satisfying condition J") } \arguments{ \item{X}{The observed point pattern, from which an estimate of the cross-type pair correlation function \eqn{g_{ij}(r)}{g[i,j](r)} will be computed. It must be a multitype point pattern (a marked point pattern whose marks are a factor). } \item{I}{Subset index specifying the points of \code{X} from which distances are measured. } \item{J}{Subset index specifying the points in \code{X} to which distances are measured. } \item{\dots}{ Ignored. } \item{r}{ Vector of values for the argument \eqn{r} at which \eqn{g(r)} should be evaluated. There is a sensible default. } \item{kernel}{ Choice of smoothing kernel, passed to \code{\link{density.default}}. } \item{bw}{ Bandwidth for smoothing kernel, passed to \code{\link{density.default}}. } \item{stoyan}{ Coefficient for default bandwidth rule. } \item{correction}{ Choice of edge correction. } \item{divisor}{ Choice of divisor in the estimation formula: either \code{"r"} (the default) or \code{"d"}. } \item{Iname,Jname}{ Optional. Character strings describing the members of the subsets \code{I} and \code{J}. } } \details{ This is a generalisation of \code{\link{pcfcross}} to arbitrary collections of points. The algorithm measures the distance from each data point in subset \code{I} to each data point in subset \code{J}, excluding identical pairs of points. The distances are kernel-smoothed and renormalised to form a pair correlation function. \itemize{ \item If \code{divisor="r"} (the default), then the multitype counterpart of the standard kernel estimator (Stoyan and Stoyan, 1994, pages 284--285) is used. By default, the recommendations of Stoyan and Stoyan (1994) are followed exactly. \item If \code{divisor="d"} then a modified estimator is used: the contribution from an interpoint distance \eqn{d_{ij}}{d[ij]} to the estimate of \eqn{g(r)} is divided by \eqn{d_{ij}}{d[ij]} instead of dividing by \eqn{r}. This usually improves the bias of the estimator when \eqn{r} is close to zero. } There is also a choice of spatial edge corrections (which are needed to avoid bias due to edge effects associated with the boundary of the spatial window): \code{correction="translate"} is the Ohser-Stoyan translation correction, and \code{correction="isotropic"} or \code{"Ripley"} is Ripley's isotropic correction. The arguments \code{I} and \code{J} specify two subsets of the point pattern \code{X}. They may be any type of subset indices, for example, logical vectors of length equal to \code{npoints(X)}, or integer vectors with entries in the range 1 to \code{npoints(X)}, or negative integer vectors. Alternatively, \code{I} and \code{J} may be \bold{functions} that will be applied to the point pattern \code{X} to obtain index vectors. If \code{I} is a function, then evaluating \code{I(X)} should yield a valid subset index. This option is useful when generating simulation envelopes using \code{\link{envelope}}. The choice of smoothing kernel is controlled by the argument \code{kernel} which is passed to \code{\link{density}}. The default is the Epanechnikov kernel. The bandwidth of the smoothing kernel can be controlled by the argument \code{bw}. Its precise interpretation is explained in the documentation for \code{\link{density.default}}. For the Epanechnikov kernel with support \eqn{[-h,h]}, the argument \code{bw} is equivalent to \eqn{h/\sqrt{5}}{h/sqrt(5)}. If \code{bw} is not specified, the default bandwidth is determined by Stoyan's rule of thumb (Stoyan and Stoyan, 1994, page 285) applied to the points of type \code{j}. That is, \eqn{h = c/\sqrt{\lambda}}{h = c/sqrt(lambda)}, where \eqn{\lambda}{lambda} is the (estimated) intensity of the point process of type \code{j}, and \eqn{c} is a constant in the range from 0.1 to 0.2. The argument \code{stoyan} determines the value of \eqn{c}. } \value{ An object of class \code{"fv"}. } \seealso{ \code{\link{pcfcross}}, \code{\link{pcfdot}}, \code{\link{pcf.ppp}}. } \examples{ adult <- (marks(longleaf) >= 30) juvenile <- !adult p <- pcfmulti(longleaf, adult, juvenile) } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat/man/quad.object.Rd0000644000176200001440000000567713160710621015372 0ustar liggesusers\name{quad.object} \alias{quad.object} %DoNotExport \title{Class of Quadrature Schemes} \description{ A class \code{"quad"} to represent a quadrature scheme. } \details{ A (finite) quadrature scheme is a list of quadrature points \eqn{u_j}{u[j]} and associated weights \eqn{w_j}{w[j]} which is used to approximate an integral by a finite sum: \deqn{ \int f(x) dx \approx \sum_j f(u_j) w_j }{ integral(f(x) dx) ~= sum( f(u[j]) w[j] ) } Given a point pattern dataset, a \emph{Berman-Turner} quadrature scheme is one which includes all these data points, as well as a nonzero number of other (``dummy'') points. These quadrature schemes are used to approximate the pseudolikelihood of a point process, in the method of Baddeley and Turner (2000) (see Berman and Turner (1992)). Accuracy and computation time both increase with the number of points in the quadrature scheme. An object of class \code{"quad"} represents a Berman-Turner quadrature scheme. It can be passed as an argument to the model-fitting function \code{\link{ppm}}, which requires a quadrature scheme. An object of this class contains at least the following elements: \tabular{ll}{ \code{data}: \tab an object of class \code{"ppp"} \cr \tab giving the locations (and marks) of the data points.\cr \code{dummy}: \tab an object of class \code{"ppp"} \cr \tab giving the locations (and marks) of the dummy points.\cr \code{w}: \tab vector of nonnegative weights for the quadrature points\cr } Users are strongly advised not to manipulate these entries directly. The domain of quadrature is specified by \code{Window(dummy)} while the observation window (if this needs to be specified separately) is taken to be \code{Window(data)}. The weights vector \code{w} may also have an attribute \code{attr(w, "zeroes")} equivalent to the logical vector \code{(w == 0)}. If this is absent then all points are known to have positive weights. To create an object of class \code{"quad"}, users would typically call the high level function \code{\link{quadscheme}}. (They are actually created by the low level function \code{quad}.) Entries are extracted from a \code{"quad"} object by the functions \code{x.quad}, \code{y.quad}, \code{w.quad} and \code{marks.quad}, which extract the \eqn{x} coordinates, \eqn{y} coordinates, weights, and marks, respectively. The function \code{n.quad} returns the total number of quadrature points (dummy plus data). An object of class \code{"quad"} can be converted into an ordinary point pattern by the function \code{\link{union.quad}} which simply takes the union of the data and dummy points. Quadrature schemes can be plotted using \code{\link{plot.quad}} (a method for the generic \code{\link{plot}}). } \seealso{ \code{\link{quadscheme}}, \code{\link{ppm}} } \author{\adrian and \rolf } \keyword{spatial} \keyword{attribute} spatstat/man/Kmodel.Rd0000644000176200001440000000326413160710571014400 0ustar liggesusers\name{Kmodel} \alias{Kmodel} \alias{pcfmodel} \title{K Function or Pair Correlation Function of a Point Process Model} \description{ Returns the theoretical \eqn{K} function or the pair correlation function of a point process model. } \usage{ Kmodel(model, \dots) pcfmodel(model, \dots) } \arguments{ \item{model}{ A fitted point process model of some kind. } \item{\dots}{ Ignored. } } \value{ A \code{function} in the \R language, which takes one argument \code{r}. } \details{ For certain types of point process models, it is possible to write down a mathematical expression for the \eqn{K} function or the pair correlation function of the model. The functions \code{Kmodel} and \code{pcfmodel} give the theoretical \eqn{K}-function and the theoretical pair correlation function for a point process model that has been fitted to data. The functions \code{Kmodel} and \code{pcfmodel} are generic, with methods for the classes \code{"kppm"} (cluster processes and Cox processes) and \code{"ppm"} (Gibbs processes). The return value is a \code{function} in the \R language, which takes one argument \code{r}. Evaluation of this function, on a numeric vector \code{r}, yields values of the desired \eqn{K} function or pair correlation function at these distance values. } \seealso{ \code{\link{Kest}} or \code{\link{pcf}} to estimate the \eqn{K} function or pair correlation function nonparametrically from data. \code{\link{Kmodel.kppm}} for the method for cluster processes and Cox processes. \code{\link{Kmodel.ppm}} for the method for Gibbs processes. } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} spatstat/man/model.frame.ppm.Rd0000644000176200001440000000433513160710621016145 0ustar liggesusers\name{model.frame.ppm} \alias{model.frame.ppm} \alias{model.frame.kppm} \alias{model.frame.dppm} \alias{model.frame.lppm} \title{ Extract the Variables in a Point Process Model } \description{ Given a fitted point process model, this function returns a data frame containing all the variables needed to fit the model using the Berman-Turner device. } \usage{ \method{model.frame}{ppm}(formula, ...) \method{model.frame}{kppm}(formula, ...) \method{model.frame}{dppm}(formula, ...) \method{model.frame}{lppm}(formula, ...) } \arguments{ \item{formula}{ A fitted point process model. An object of class \code{"ppm"} or \code{"kppm"} or \code{"dppm"} or \code{"lppm"}. } \item{\dots}{ Additional arguments passed to \code{\link{model.frame.glm}}. } } \details{ The function \code{\link{model.frame}} is generic. These functions are method for \code{\link{model.frame}} for fitted point process models (objects of class \code{"ppm"} or \code{"kppm"} or \code{"dppm"} or \code{"lppm"}). The first argument should be a fitted point process model; it has to be named \code{formula} for consistency with the generic function. The result is a data frame containing all the variables used in fitting the model. The data frame has one row for each quadrature point used in fitting the model. The quadrature scheme can be extracted using \code{\link{quad.ppm}}. } \value{ A \code{data.frame} containing all the variables used in the fitted model, plus additional variables specified in \code{\dots}. It has an additional attribute \code{"terms"} containing information about the model formula. For details see \code{\link{model.frame.glm}}. } \references{ Baddeley, A. and Turner, R. (2000) Practical maximum pseudolikelihood for spatial point patterns. \emph{Australian and New Zealand Journal of Statistics} \bold{42}, 283--322. } \seealso{ \code{\link{ppm}}, \code{\link{kppm}}, \code{\link{dppm}}, \code{\link{lppm}}, \code{\link{model.frame}}, \code{\link{model.matrix.ppm}} } \examples{ fit <- ppm(cells ~ x) mf <- model.frame(fit) kfit <- kppm(redwood ~ x, "Thomas") kmf <- model.frame(kfit) } \author{\adrian \rolf and \ege } \keyword{spatial} \keyword{models} spatstat/man/as.im.Rd0000644000176200001440000002243313160710571014173 0ustar liggesusers\name{as.im} \alias{as.im} \alias{as.im.im} \alias{as.im.leverage.ppm} \alias{as.im.owin} \alias{as.im.matrix} \alias{as.im.tess} \alias{as.im.function} \alias{as.im.funxy} \alias{as.im.distfun} \alias{as.im.nnfun} \alias{as.im.Smoothfun} \alias{as.im.data.frame} \alias{as.im.default} \title{Convert to Pixel Image} \description{ Converts various kinds of data to a pixel image } \usage{ as.im(X, \dots) \method{as.im}{im}(X, W=NULL, \dots, eps=NULL, dimyx=NULL, xy=NULL, na.replace=NULL) \method{as.im}{owin}(X, W=NULL, \dots, eps=NULL, dimyx=NULL, xy=NULL, na.replace=NULL, value=1) \method{as.im}{matrix}(X, W=NULL, \dots) \method{as.im}{tess}(X, W=NULL, \dots, eps=NULL, dimyx=NULL, xy=NULL, na.replace=NULL) \method{as.im}{function}(X, W=NULL, \dots, eps=NULL, dimyx=NULL, xy=NULL, na.replace=NULL, strict=FALSE) \method{as.im}{funxy}(X, W=Window(X), \dots) \method{as.im}{distfun}(X, W=NULL, \dots, eps=NULL, dimyx=NULL, xy=NULL, na.replace=NULL, approx=TRUE) \method{as.im}{nnfun}(X, W=NULL, \dots, eps=NULL, dimyx=NULL, xy=NULL, na.replace=NULL) \method{as.im}{Smoothfun}(X, W=NULL, \dots) \method{as.im}{leverage.ppm}(X, \dots) \method{as.im}{data.frame}(X, \dots, step, fatal=TRUE, drop=TRUE) \method{as.im}{default}(X, W=NULL, \dots, eps=NULL, dimyx=NULL, xy=NULL, na.replace=NULL) } \arguments{ \item{X}{Data to be converted to a pixel image.} \item{W}{Window object which determines the spatial domain and pixel array geometry. } \item{\dots}{Additional arguments passed to \code{X} when \code{X} is a function.} \item{eps,dimyx,xy}{ Optional parameters passed to \code{\link{as.mask}} which determine the pixel array geometry. See \code{\link{as.mask}}. } \item{na.replace}{Optional value to replace \code{NA} entries in the output image. } \item{value}{Optional. The value to be assigned to pixels inside the window, if \code{X} is a window. } \item{strict}{ Logical value indicating whether to match formal arguments of \code{X} when \code{X} is a function. If \code{strict=FALSE} (the default), all the \code{\dots} arguments are passed to \code{X}. If \code{strict=TRUE}, only named arguments are passed, and only if they match the names of formal arguments of \code{X}. } \item{step}{ Optional. A single number, or numeric vector of length 2, giving the grid step lengths in the \eqn{x} and \eqn{y} directions. } \item{fatal}{ Logical value indicating what to do if the resulting image would be too large for available memory. If \code{fatal=TRUE} (the default), an error occurs. If \code{fatal=FALSE}, a warning is issued and \code{NULL} is returned. } \item{drop}{ Logical value indicating what to do when \code{X} is a data frame with 3 columns. If \code{drop=TRUE} (the default), the result is a pixel image. If \code{drop=FALSE}, the result is a list containing one image. } \item{approx}{ Logical value indicating whether to compute an approximate result at faster speed, by using \code{\link{distmap}}, when \code{X} is a distance function. } } \details{ This function converts the data \code{X} into a pixel image object of class \code{"im"} (see \code{\link{im.object}}). The function \code{as.im} is generic, with methods for the classes listed above. Currently \code{X} may be any of the following: \itemize{ \item a pixel image object, of class \code{"im"}. \item a window object, of class \code{"owin"} (see \code{\link{owin.object}}). The result is an image with all pixel entries equal to \code{value} inside the window \code{X}, and \code{NA} outside. \item a matrix. \item a tessellation (object of class \code{"tess"}). The result is a factor-valued image, with one factor level corresponding to each tile of the tessellation. Pixels are classified according to the tile of the tessellation into which they fall. \item a single number (or a single logical, complex, factor or character value). The result is an image with all pixel entries equal to this constant value inside the window \code{W} (and \code{NA} outside, unless the argument \code{na.replace} is given). Argument \code{W} is required. \item a function of the form \code{function(x, y, ...)} which is to be evaluated to yield the image pixel values. In this case, the additional argument \code{W} must be present. This window will be converted to a binary image mask. Then the function \code{X} will be evaluated in the form \code{X(x, y, ...)} where \code{x} and \code{y} are \bold{vectors} containing the \eqn{x} and \eqn{y} coordinates of all the pixels in the image mask, and \code{...} are any extra arguments given. This function must return a vector or factor of the same length as the input vectors, giving the pixel values. \item an object of class \code{"funxy"} representing a \code{function(x,y,...)} \item an object of class \code{"distfun"} representing a distance function (created by the command \code{\link{distfun}}). \item an object of class \code{"nnfun"} representing a nearest neighbour function (created by the command \code{\link{nnfun}}). \item a list with entries \code{x, y, z} in the format expected by the standard \code{R} functions \code{\link{image.default}} and \code{\link{contour.default}}. That is, \code{z} is a matrix of pixel values, \code{x} and \code{y} are vectors of \eqn{x} and \eqn{y} coordinates respectively, and \code{z[i,j]} is the pixel value for the location \code{(x[i],y[j])}. \item a point pattern (object of class \code{"ppp"}). See the separate documentation for \code{\link{as.im.ppp}}. \item A data frame with at least three columns. Columns named \code{x}, \code{y} and \code{z}, if present, will be assumed to contain the spatial coordinates and the pixel values, respectively. Otherwise the \code{x} and \code{y} coordinates will be taken from the first two columns of the data frame, and any remaining columns will be interpreted as pixel values. } The spatial domain (enclosing rectangle) of the pixel image is determined by the argument \code{W}. If \code{W} is absent, the spatial domain is determined by \code{X}. When \code{X} is a function, a matrix, or a single numerical value, \code{W} is required. The pixel array dimensions of the final resulting image are determined by (in priority order) \itemize{ \item the argument \code{eps}, \code{dimyx} or \code{xy} if present; \item the pixel dimensions of the window \code{W}, if it is present and if it is a binary mask; \item the pixel dimensions of \code{X} if it is an image, a binary mask, or a \code{list(x,y,z)}; \item the default pixel dimensions, controlled by \code{\link{spatstat.options}}. } Note that if \code{eps}, \code{dimyx} or \code{xy} is given, this will override the pixel dimensions of \code{X} if it has them. Thus, \code{as.im} can be used to change an image's pixel dimensions. If the argument \code{na.replace} is given, then all \code{NA} entries in the image will be replaced by this value. The resulting image is then defined everwhere on the full rectangular domain, instead of a smaller window. Here \code{na.replace} should be a single value, of the same type as the other entries in the image. If \code{X} is a pixel image that was created by an older version of \pkg{spatstat}, the command \code{X <- as.im(X)} will repair the internal format of \code{X} so that it conforms to the current version of \pkg{spatstat}. If \code{X} is a data frame with \code{m} columns, then \code{m-2} columns of data are interpreted as pixel values, yielding \code{m-2} pixel images. The result of \code{as.im.data.frame} is a list of pixel images, belonging to the class \code{"imlist"}. If \code{m = 3} and \code{drop=TRUE} (the default), then the result is a pixel image rather than a list containing this image. } \value{ A pixel image (object of class \code{"im"}), or a list of pixel images, or \code{NULL} if the conversion failed. } \seealso{ Separate documentation for \code{\link{as.im.ppp}} } \examples{ data(demopat) # window object W <- Window(demopat) plot(W) Z <- as.im(W) image(Z) # function Z <- as.im(function(x,y) {x^2 + y^2}, unit.square()) image(Z) # function with extra arguments f <- function(x, y, x0, y0) { sqrt((x - x0)^2 + (y-y0)^2) } Z <- as.im(f, unit.square(), x0=0.5, y0=0.5) image(Z) # Revisit the Sixties data(letterR) Z <- as.im(f, letterR, x0=2.5, y0=2) image(Z) # usual convention in S stuff <- list(x=1:10, y=1:10, z=matrix(1:100, nrow=10)) Z <- as.im(stuff) # convert to finer grid Z <- as.im(Z, dimyx=256) # pixellate the Dirichlet tessellation Di <- dirichlet(runifpoint(10)) plot(as.im(Di)) plot(Di, add=TRUE) # as.im.data.frame is the reverse of as.data.frame.im grad <- bei.extra$grad slopedata <- as.data.frame(grad) slope <- as.im(slopedata) unitname(slope) <- c("metre","metres") all.equal(slope, grad) # TRUE } \author{ \spatstatAuthors } \keyword{spatial} \keyword{manip} spatstat/man/pool.rat.Rd0000644000176200001440000000655713160710621014727 0ustar liggesusers\name{pool.rat} \alias{pool.rat} \title{ Pool Data from Several Ratio Objects } \description{ Pool the data from several ratio objects (objects of class \code{"rat"}) and compute a pooled estimate. } \usage{ \method{pool}{rat}(..., weights=NULL, relabel=TRUE, variance=TRUE) } \arguments{ \item{\dots}{ Objects of class \code{"rat"}. } \item{weights}{ Numeric vector of weights. } \item{relabel}{ Logical value indicating whether the result should be relabelled to show that it was obtained by pooling. } \item{variance}{ Logical value indicating whether to compute the sample variance and related terms. } } \details{ The function \code{\link{pool}} is generic. This is the method for the class \code{"rat"} of ratio objects. It is used to combine several estimates of the same quantity when each estimate is a ratio. Each of the arguments \code{\dots} must be an object of class \code{"rat"} representing a ratio object (basically a numerator and a denominator; see \code{\link{rat}}). We assume that these ratios are all estimates of the same quantity. If the objects are called \eqn{R_1, \ldots, R_n}{R[1], \dots, R[n]} and if \eqn{R_i}{R[i]} has numerator \eqn{Y_i}{Y[i]} and denominator \eqn{X_i}{X[i]}, so that notionally \eqn{R_i = Y_i/X_i}{R[i] = Y[i]/X[i]}, then the pooled estimate is the ratio-of-sums estimator \deqn{ R = \frac{\sum_i Y_i}{\sum_i X_i}. }{ R = (Y[1]+\dots+Y[n])/(X[1]+\dots+X[n]). } The standard error of \eqn{R} is computed using the delta method as described in Baddeley \emph{et al.} (1993) or Cochran (1977, pp 154, 161). If the argument \code{weights} is given, it should be a numeric vector of length equal to the number of objects to be pooled. The pooled estimator is the ratio-of-sums estimator \deqn{ R = \frac{\sum_i w_i Y_i}{\sum_i w_i X_i} }{ R = (w[1] * Y[1]+\dots+ w[n] * Y[n])/(w[1] * X[1]+\dots+w[n] * X[n]) } where \code{w_i}{w[i]} is the \code{i}th weight. This calculation is implemented only for certain classes of objects where the arithmetic can be performed. This calculation is currently implemented only for objects which also belong to the class \code{"fv"} (function value tables). For example, if \code{\link{Kest}} is called with argument \code{ratio=TRUE}, the result is a suitable object (belonging to the classes \code{"rat"} and \code{"fv"}). Warnings or errors will be issued if the ratio objects \code{\dots} appear to be incompatible. However, the code is not smart enough to decide whether it is sensible to pool the data. } \value{ An object of the same class as the input. } \seealso{ \code{\link{rat}}, \code{\link{pool}}, \code{\link{pool.fv}}, \code{\link{Kest}} } \examples{ K1 <- Kest(runifpoint(42), ratio=TRUE, correction="iso") K2 <- Kest(runifpoint(42), ratio=TRUE, correction="iso") K3 <- Kest(runifpoint(42), ratio=TRUE, correction="iso") K <- pool(K1, K2, K3) plot(K, pooliso ~ r, shade=c("hiiso", "loiso")) } \references{ Baddeley, A.J, Moyeed, R.A., Howard, C.V. and Boyde, A. (1993) Analysis of a three-dimensional point pattern with replication. \emph{Applied Statistics} \bold{42}, 641--668. Cochran, W.G. (1977) \emph{Sampling techniques}, 3rd edition. New York: John Wiley and Sons. } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{nonparametric} spatstat/man/Saturated.Rd0000644000176200001440000000136513160710571015121 0ustar liggesusers\name{Saturated} \alias{Saturated} \title{Saturated Pairwise Interaction model} \description{ Experimental. } \usage{ Saturated(pot, name) } \arguments{ \item{pot}{An S language function giving the user-supplied pairwise interaction potential.} \item{name}{Character string.} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of a point process. } \details{ This is experimental. It constructs a member of the ``saturated pairwise'' family \code{\link{pairsat.family}}. } \seealso{ \code{\link{ppm}}, \code{\link{pairsat.family}}, \code{\link{Geyer}}, \code{\link{SatPiece}}, \code{\link{ppm.object}} } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} spatstat/man/cbind.hyperframe.Rd0000644000176200001440000000324613160710571016405 0ustar liggesusers\name{cbind.hyperframe} \alias{cbind.hyperframe} \alias{rbind.hyperframe} \title{ Combine Hyperframes by Rows or by Columns } \description{ Methods for \code{cbind} and \code{rbind} for hyperframes. } \usage{ \method{cbind}{hyperframe}(...) \method{rbind}{hyperframe}(...) } \arguments{ \item{\dots}{ Any number of hyperframes (objects of class \code{\link{hyperframe}}). } } \details{ These are methods for \code{\link{cbind}} and \code{\link{rbind}} for hyperframes. Note that \emph{all} the arguments must be hyperframes (because of the peculiar dispatch rules of \code{\link{cbind}} and \code{\link{rbind}}). To combine a hyperframe with a data frame, one should either convert the data frame to a hyperframe using \code{\link{as.hyperframe}}, or explicitly invoke the function \code{cbind.hyperframe} or \code{rbind.hyperframe}. In other words: if \code{h} is a hyperframe and \code{d} is a data frame, the result of \code{cbind(h,d)} will be the same as \code{cbind(as.data.frame(h), d)}, so that all hypercolumns of \code{h} will be deleted (and a warning will be issued). To combine \code{h} with \code{d} so that all columns of \code{h} are retained, type either \code{cbind(h, as.hyperframe(d))} or \code{cbind.hyperframe(h,d)}. } \value{ Another hyperframe. } \author{\adrian and \rolf } \seealso{ \code{\link{hyperframe}}, \code{\link{as.hyperframe}} } \examples{ lambda <- runif(5, min=10, max=30) X <- lapply(as.list(lambda), function(x) { rpoispp(x) }) h <- hyperframe(lambda=lambda, X=X) g <- hyperframe(id=letters[1:5], Y=rev(X)) gh <- cbind(h, g) hh <- rbind(h, h) } \keyword{spatial} \keyword{manip} spatstat/man/intensity.lpp.Rd0000644000176200001440000000203713160710621015776 0ustar liggesusers\name{intensity.lpp} \alias{intensity.lpp} \title{ Empirical Intensity of Point Pattern on Linear Network } \description{ Computes the average number of points per unit length in a point pattern on a linear network. } \usage{ \method{intensity}{lpp}(X, ...) } \arguments{ \item{X}{ A point pattern on a linear network (object of class \code{"lpp"}). } \item{\dots}{ Ignored. } } \details{ This is a method for the generic function \code{\link{intensity}} It computes the empirical intensity of a point pattern on a linear network (object of class \code{"lpp"}), i.e. the average density of points per unit length. If the point pattern is multitype, the intensities of the different types are computed separately. } \value{ A numeric value (giving the intensity) or numeric vector (giving the intensity for each possible type). } \seealso{ \code{\link{intensity}}, \code{\link{intensity.ppp}} } \examples{ intensity(chicago) } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat/man/ssf.Rd0000644000176200001440000000307013160710621013747 0ustar liggesusers\name{ssf} \alias{ssf} \title{ Spatially Sampled Function } \description{ Create an object that represents a spatial function which has been evaluated or sampled at an irregular set of points. } \usage{ ssf(loc, val) } \arguments{ \item{loc}{ The spatial locations at which the function has been evaluated. A point pattern (object of class \code{"ppp"}). } \item{val}{ The function values at these locations. A numeric vector with one entry for each point of \code{loc}, or a data frame with one row for each point of \code{loc}. } } \details{ An object of class \code{"ssf"} represents a real-valued or vector-valued function that has been evaluated or sampled at an irregular set of points. An example would be a spatial covariate that has only been measured at certain locations. An object of this class also inherits the class \code{"ppp"}, and is essentially the same as a marked point pattern, except for the class membership which enables it to be handled in a different way. There are methods for \code{plot}, \code{print} etc; see \code{\link{plot.ssf}} and \code{\link{methods.ssf}}. Use \code{\link[spatstat]{unmark}} to extract only the point locations, and \code{\link{marks.ssf}} to extract only the function values. } \value{ Object of class \code{"ssf"}. } \author{ \adrian } \seealso{ \code{\link{plot.ssf}}, \code{\link{methods.ssf}}, \code{\link{Smooth.ssf}}, \code{\link{with.ssf}}, \code{\link{[.ssf}}. } \examples{ ssf(cells, nndist(cells, k=1:3)) } \keyword{spatial} \keyword{datagen} spatstat/man/msr.Rd0000644000176200001440000001166413160710621013765 0ustar liggesusers\name{msr} \alias{msr} \title{ Signed or Vector-Valued Measure } \description{ Defines an object representing a signed measure or vector-valued measure on a spatial domain. } \usage{ msr(qscheme, discrete, density, check=TRUE) } \arguments{ \item{qscheme}{ A quadrature scheme (object of class \code{"quad"} usually extracted from a fitted point process model). } \item{discrete}{ Vector or matrix containing the values (masses) of the discrete component of the measure, for each of the data points in \code{qscheme}. } \item{density}{ Vector or matrix containing values of the density of the diffuse component of the measure, for each of the quadrature points in \code{qscheme}. } \item{check}{ Logical. Whether to check validity of the arguments. } } \details{ This function creates an object that represents a signed or vector valued \emph{measure} on the two-dimensional plane. It is not normally called directly by the user. A signed measure is a classical mathematical object (Diestel and Uhl, 1977) which can be visualised as a collection of electric charges, positive and/or negative, spread over the plane. Electric charges may be concentrated at specific points (atoms), or spread diffusely over a region. An object of class \code{"msr"} represents a signed (i.e. real-valued) or vector-valued measure in the \pkg{spatstat} package. Spatial residuals for point process models (Baddeley et al, 2005, 2008) take the form of a real-valued or vector-valued measure. The function \code{\link{residuals.ppm}} returns an object of class \code{"msr"} representing the residual measure. The function \code{msr} would not normally be called directly by the user. It is the low-level creator function that makes an object of class \code{"msr"} from raw data. The first argument \code{qscheme} is a quadrature scheme (object of class \code{"quad"}). It is typically created by \code{\link{quadscheme}} or extracted from a fitted point process model using \code{\link{quad.ppm}}. A quadrature scheme contains both data points and dummy points. The data points of \code{qscheme} are used as the locations of the atoms of the measure. All quadrature points (i.e. both data points and dummy points) of \code{qscheme} are used as sampling points for the density of the continuous component of the measure. The argument \code{discrete} gives the values of the atomic component of the measure for each \emph{data point} in \code{qscheme}. It should be either a numeric vector with one entry for each data point, or a numeric matrix with one row for each data point. The argument \code{density} gives the values of the \emph{density} of the diffuse component of the measure, at each \emph{quadrature point} in \code{qscheme}. It should be either a numeric vector with one entry for each quadrature point, or a numeric matrix with one row for each quadrature point. If both \code{discrete} and \code{density} are vectors (or one-column matrices) then the result is a signed (real-valued) measure. Otherwise, the result is a vector-valued measure, with the dimension of the vector space being determined by the number of columns in the matrices \code{discrete} and/or \code{density}. (If one of these is a \eqn{k}-column matrix and the other is a 1-column matrix, then the latter is replicated to \eqn{k} columns). The class \code{"msr"} has methods for \code{print}, \code{plot} and \code{[}. There is also a function \code{\link{Smooth.msr}} for smoothing a measure. } \value{ An object of class \code{"msr"} that can be plotted by \code{\link{plot.msr}}. } \references{ Baddeley, A., Turner, R., \ifelse{latex}{\out{M\o ller}}{Moller}, J. and Hazelton, M. (2005) Residual analysis for spatial point processes. \emph{Journal of the Royal Statistical Society, Series B} \bold{67}, 617--666. Baddeley, A., \ifelse{latex}{\out{M\o ller}}{Moller}, J. and Pakes, A.G. (2008) Properties of residuals for spatial point processes. \emph{Annals of the Institute of Statistical Mathematics} \bold{60}, 627--649. Diestel, J. and Uhl, J.J. Jr (1977) \emph{Vector measures}. Providence, RI, USA: American Mathematical Society. Halmos, P.R. (1950) \emph{Measure Theory}. Van Nostrand. } \author{ \adrian } \seealso{ \code{\link{plot.msr}}, \code{\link{Smooth.msr}}, \code{\link{[.msr}}, \code{\link{with.msr}}, \code{\link{split.msr}}, \code{\link{Ops.msr}}, \code{\link{measureVariation}}. } \examples{ X <- rpoispp(function(x,y) { exp(3+3*x) }) fit <- ppm(X, ~x+y) rp <- residuals(fit, type="pearson") rp rs <- residuals(fit, type="score") rs colnames(rs) # An equivalent way to construct the Pearson residual measure by hand Q <- quad.ppm(fit) lambda <- fitted(fit) slam <- sqrt(lambda) Z <- is.data(Q) m <- msr(Q, discrete=1/slam[Z], density = -slam) m } \keyword{spatial} \keyword{models} spatstat/man/clickpoly.Rd0000644000176200001440000000410513160710571015151 0ustar liggesusers\name{clickpoly} \alias{clickpoly} \title{Interactively Define a Polygon} \description{ Allows the user to create a polygon by point-and-click in the display. } \usage{ clickpoly(add=FALSE, nv=NULL, np=1, \dots) } \arguments{ \item{add}{ Logical value indicating whether to create a new plot (\code{add=FALSE}) or draw over the existing plot (\code{add=TRUE}). } \item{nv}{ Number of vertices of the polygon (if this is predetermined). } \item{np}{ Number of polygons to create. } \item{\dots}{ Arguments passed to \code{\link[graphics]{locator}} to control the interactive plot, and to \code{\link[graphics]{polygon}} to plot the polygons. } } \value{ A window (object of class \code{"owin"}) representing the polygon. } \details{ This function allows the user to create a polygonal window by interactively clicking on the screen display. The user is prompted to point the mouse at any desired locations for the polygon vertices, and click the left mouse button to add each point. Interactive input stops after \code{nv} clicks (if \code{nv} was given) or when the middle mouse button is pressed. The return value is a window (object of class \code{"owin"}) representing the polygon. This function uses the \R command \code{\link[graphics]{locator}} to input the mouse clicks. It only works on screen devices such as \sQuote{X11}, \sQuote{windows} and \sQuote{quartz}. Arguments that can be passed to \code{\link[graphics]{locator}} through \code{\dots} include \code{pch} (plotting character), \code{cex} (character expansion factor) and \code{col} (colour). See \code{\link[graphics]{locator}} and \code{\link[graphics]{par}}. Multiple polygons can also be drawn, by specifying \code{np > 1}. The polygons must be disjoint. The result is a single window object consisting of all the polygons. } \seealso{ \code{\link{identify.ppp}}, \code{\link{clickbox}}, \code{\link{clickppp}}, \code{\link{clickdist}}, \code{\link[graphics]{locator}} } \author{ \adrian and \rolf. } \keyword{spatial} \keyword{iplot} spatstat/man/runiflpp.Rd0000644000176200001440000000272113160710621015015 0ustar liggesusers\name{runiflpp} \alias{runiflpp} \title{ Uniform Random Points on a Linear Network } \description{ Generates \eqn{n} random points, independently and uniformly distributed, on a linear network. } \usage{ runiflpp(n, L, nsim=1, drop=TRUE) } \arguments{ \item{n}{ Number of random points to generate. A nonnegative integer, or a vector of integers specifying the number of points of each type. } \item{L}{ A linear network (object of class \code{"linnet"}, see \code{\link{linnet}}). } \item{nsim}{Number of simulated realisations to generate.} \item{drop}{ Logical value indicating what to do when \code{nsim=1}. If \code{drop=TRUE} (the default), the result is a point pattern. If \code{drop=FALSE}, the result is a list with one entry which is a point pattern. } } \details{ This function uses \code{\link{runifpointOnLines}} to generate the random points. } \value{ If \code{nsim = 1} and \code{drop=TRUE}, a point pattern on the linear network, i.e.\ an object of class \code{"lpp"}. Otherwise, a list of such point patterns. } \author{ Ang Qi Wei \email{aqw07398@hotmail.com} and \adrian } \seealso{ \code{\link{rlpp}} for non-uniform random points; \code{\link{rpoislpp}} for Poisson point process; \code{\link{lpp}}, \code{\link{linnet}} } \examples{ data(simplenet) X <- runiflpp(10, simplenet) plot(X) # marked Z <- runiflpp(c(a=10, b=3), simplenet) } \keyword{spatial} \keyword{datagen} spatstat/man/clickjoin.Rd0000644000176200001440000000432513160710571015131 0ustar liggesusers\name{clickjoin} \alias{clickjoin} \title{ Interactively join vertices on a plot } \description{ Given a point pattern representing a set of vertices, this command gives a point-and-click interface allowing the user to join pairs of selected vertices by edges. } \usage{ clickjoin(X, \dots, add = TRUE, m = NULL, join = TRUE) } \arguments{ \item{X}{ Point pattern of vertices. An object of class \code{"ppp"}. } \item{\dots}{ Arguments passed to \code{\link{segments}} to control the plotting of the new edges. } \item{add}{ Logical. Whether the point pattern \code{X} should be added to the existing plot (\code{add=TRUE}) or a new plot should be created (\code{add=FALSE}). } \item{m}{ Optional. Logical matrix specifying an initial set of edges. There is an edge between vertices \code{i} and \code{j} if \code{m[i,j] = TRUE}. } \item{join}{ Optional. If \code{TRUE}, then each user click will join a pair of vertices. If \code{FALSE}, then each user click will delete an existing edge. This is only relevant if \code{m} is supplied. } } \details{ This function makes it easier for the user to create a linear network or a planar graph, given a set of vertices. The function first displays the point pattern \code{X}, then repeatedly prompts the user to click on a pair of points in \code{X}. Each selected pair of points will be joined by an edge. The function returns a logical matrix which has entries equal to \code{TRUE} for each pair of vertices joined by an edge. The selection of points is performed using \code{\link{identify.ppp}} which typically expects the user to click the left mouse button. This point-and-click interaction continues until the user terminates it, by pressing the middle mouse button, or pressing the right mouse button and selecting \code{stop}. The return value can be used in \code{\link{linnet}} to create a linear network. } \value{ Logical matrix \code{m} with value \code{m[i,j] = TRUE} for every pair of vertices \code{X[i]} and \code{X[j]} that should be joined by an edge. } \author{ \adrian. } \seealso{ \code{\link{linnet}}, \code{\link{clickppp}} } \keyword{spatial} \keyword{datagen} spatstat/man/idw.Rd0000644000176200001440000000765313160710621013752 0ustar liggesusers\name{idw} \alias{idw} \title{Inverse-distance weighted smoothing of observations at irregular points} \description{ Performs spatial smoothing of numeric values observed at a set of irregular locations using inverse-distance weighting. } \usage{ idw(X, power=2, at="pixels", ...) } \arguments{ \item{X}{A marked point pattern (object of class \code{"ppp"}).} \item{power}{Numeric. Power of distance used in the weighting.} \item{at}{ String specifying whether to compute the intensity values at a grid of pixel locations (\code{at="pixels"}) or only at the points of \code{X} (\code{at="points"}). } \item{\dots}{Arguments passed to \code{\link{as.mask}} to control the pixel resolution of the result.} } \details{ This function performs spatial smoothing of numeric values observed at a set of irregular locations. Smoothing is performed by inverse distance weighting. If the observed values are \eqn{v_1,\ldots,v_n}{v[1],...,v[n]} at locations \eqn{x_1,\ldots,x_n}{x[1],...,x[n]} respectively, then the smoothed value at a location \eqn{u} is \deqn{ g(u) = \frac{\sum_i w_i v_i}{\sum_i w_i} }{ g(u) = (sum of w[i] * v[i])/(sum of w[i]) } where the weights are the inverse \eqn{p}-th powers of distance, \deqn{ w_i = \frac 1 {d(u,x_i)^p} }{ w[i] = 1/d(u,x[i])^p } where \eqn{d(u,x_i) = ||u - x_i||}{d(u,x[i])} is the Euclidean distance from \eqn{u} to \eqn{x_i}{x[i]}. The argument \code{X} must be a marked point pattern (object of class \code{"ppp"}, see \code{\link{ppp.object}}). The points of the pattern are taken to be the observation locations \eqn{x_i}{x[i]}, and the marks of the pattern are taken to be the numeric values \eqn{v_i}{v[i]} observed at these locations. The marks are allowed to be a data frame. Then the smoothing procedure is applied to each column of marks. If \code{at="pixels"} (the default), the smoothed mark value is calculated at a grid of pixels, and the result is a pixel image. The arguments \code{\dots} control the pixel resolution. See \code{\link{as.mask}}. If \code{at="points"}, the smoothed mark values are calculated at the data points only, using a leave-one-out rule (the mark value at a data point is excluded when calculating the smoothed value for that point). An alternative to inverse-distance weighting is kernel smoothing, which is performed by \code{\link{Smooth.ppp}}. } \value{ \emph{If \code{X} has a single column of marks:} \itemize{ \item If \code{at="pixels"} (the default), the result is a pixel image (object of class \code{"im"}). Pixel values are values of the interpolated function. \item If \code{at="points"}, the result is a numeric vector of length equal to the number of points in \code{X}. Entries are values of the interpolated function at the points of \code{X}. } \emph{If \code{X} has a data frame of marks:} \itemize{ \item If \code{at="pixels"} (the default), the result is a named list of pixel images (object of class \code{"im"}). There is one image for each column of marks. This list also belongs to the class \code{"solist"}, for which there is a plot method. \item If \code{at="points"}, the result is a data frame with one row for each point of \code{X}, and one column for each column of marks. Entries are values of the interpolated function at the points of \code{X}. } } \seealso{ \code{\link{density.ppp}}, \code{\link{ppp.object}}, \code{\link{im.object}}. See \code{\link{Smooth.ppp}} for kernel smoothing and \code{\link{nnmark}} for nearest-neighbour interpolation. To perform other kinds of interpolation, see also the \code{akima} package. } \examples{ # data frame of marks: trees marked by diameter and height data(finpines) plot(idw(finpines)) idw(finpines, at="points")[1:5,] } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat/man/rlabel.Rd0000644000176200001440000000423513160710621014421 0ustar liggesusers\name{rlabel} \alias{rlabel} \title{Random Re-Labelling of Point Pattern} \description{ Randomly allocates marks to a point pattern, or permutes the existing marks, or resamples from the existing marks. } \usage{ rlabel(X, labels=marks(X), permute=TRUE) } \arguments{ \item{X}{ Point pattern (object of class \code{"ppp"}, \code{"lpp"}, \code{"pp3"} or \code{"ppx"}). } \item{labels}{ Vector of values from which the new marks will be drawn at random. Defaults to the vector of existing marks. } \item{permute}{ Logical value indicating whether to generate new marks by randomly permuting \code{labels} or by drawing a random sample with replacement. } } \value{ A marked point pattern (of the same class as \code{X}). } \details{ This very simple function allocates random marks to an existing point pattern \code{X}. It is useful for hypothesis testing purposes. In the simplest case, the command \code{rlabel(X)} yields a point pattern obtained from \code{X} by randomly permuting the marks of the points. If \code{permute=TRUE}, then \code{labels} should be a vector of length equal to the number of points in \code{X}. The result of \code{rlabel} will be a point pattern with locations given by \code{X} and marks given by a random permutation of \code{labels} (i.e. a random sample without replacement). If \code{permute=FALSE}, then \code{labels} may be a vector of any length. The result of \code{rlabel} will be a point pattern with locations given by \code{X} and marks given by a random sample from \code{labels} (with replacement). } \seealso{ \code{\link{marks<-}} to assign arbitrary marks. } \examples{ data(amacrine) # Randomly permute the marks "on" and "off" # Result always has 142 "off" and 152 "on" Y <- rlabel(amacrine) # randomly allocate marks "on" and "off" # with probabilities p(off) = 0.48, p(on) = 0.52 Y <- rlabel(amacrine, permute=FALSE) # randomly allocate marks "A" and "B" with equal probability data(cells) Y <- rlabel(cells, labels=factor(c("A", "B")), permute=FALSE) } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat/man/Kscaled.Rd0000644000176200001440000002165313160710571014535 0ustar liggesusers\name{Kscaled} \alias{Kscaled} \alias{Lscaled} \title{Locally Scaled K-function} \description{ Estimates the locally-rescaled \eqn{K}-function of a point process. } \usage{ Kscaled(X, lambda=NULL, \dots, r = NULL, breaks = NULL, rmax = 2.5, correction=c("border", "isotropic", "translate"), renormalise=FALSE, normpower=1, sigma=NULL, varcov=NULL) Lscaled(\dots) } \arguments{ \item{X}{ The observed data point pattern, from which an estimate of the locally scaled \eqn{K} function will be computed. An object of class \code{"ppp"} or in a format recognised by \code{\link{as.ppp}()}. } \item{lambda}{ Optional. Values of the estimated intensity function. Either a vector giving the intensity values at the points of the pattern \code{X}, a pixel image (object of class \code{"im"}) giving the intensity values at all locations, a \code{function(x,y)} which can be evaluated to give the intensity value at any location, or a fitted point process model (object of class \code{"ppm"}). } \item{\dots}{ Arguments passed from \code{Lscaled} to \code{Kscaled} and from \code{Kscaled} to \code{\link{density.ppp}} if \code{lambda} is omitted. } \item{r}{ vector of values for the argument \eqn{r} at which the locally scaled \eqn{K} function should be evaluated. (These are rescaled distances.) Not normally given by the user; there is a sensible default. } \item{breaks}{ This argument is for internal use only. } \item{rmax}{ maximum value of the argument \eqn{r} that should be used. (This is the rescaled distance). } \item{correction}{ A character vector containing any selection of the options \code{"border"}, \code{"isotropic"}, \code{"Ripley"}, \code{"translate"}, \code{"translation"}, \code{"none"} or \code{"best"}. It specifies the edge correction(s) to be applied. Alternatively \code{correction="all"} selects all options. } \item{renormalise}{ Logical. Whether to renormalise the estimate. See Details. } \item{normpower}{ Integer (usually either 1 or 2). Normalisation power. See Details. } \item{sigma,varcov}{ Optional arguments passed to \code{\link{density.ppp}} to control the smoothing bandwidth, when \code{lambda} is estimated by kernel smoothing. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame containing at least the following columns, \item{r}{the vector of values of the argument \eqn{r} at which the pair correlation function \eqn{g(r)} has been estimated } \item{theo}{vector of values of \eqn{\pi r^2}{pi * r^2}, the theoretical value of \eqn{K_{\rm scaled}(r)}{Kscaled(r)} for an inhomogeneous Poisson process } and containing additional columns according to the choice specified in the \code{correction} argument. The additional columns are named \code{border}, \code{trans} and \code{iso} and give the estimated values of \eqn{K_{\rm scaled}(r)}{Kscaled(r)} using the border correction, translation correction, and Ripley isotropic correction, respectively. } \details{ \code{Kscaled} computes an estimate of the \eqn{K} function for a locally scaled point process. \code{Lscaled} computes the corresponding \eqn{L} function \eqn{L(r) = \sqrt{K(r)/\pi}}{L(r) = sqrt(K(r)/pi)}. Locally scaled point processes are a class of models for inhomogeneous point patterns, introduced by Hahn et al (2003). They include inhomogeneous Poisson processes, and many other models. The template \eqn{K} function of a locally-scaled process is a counterpart of the ``ordinary'' Ripley \eqn{K} function, in which the distances between points of the process are measured on a spatially-varying scale (such that the locally rescaled process has unit intensity). The template \eqn{K} function is an indicator of interaction between the points. For an inhomogeneous Poisson process, the theoretical template \eqn{K} function is approximately equal to \eqn{K(r) = \pi r^2}{K(r) = pi * r^2}. Values \eqn{K_{\rm scaled}(r) > \pi r^2}{Kscaled(r) > pi * r^2} are suggestive of clustering. \code{Kscaled} computes an estimate of the template \eqn{K} function and \code{Lscaled} computes the corresponding \eqn{L} function \eqn{L(r) = \sqrt{K(r)/\pi}}{L(r) = sqrt(K(r)/pi)}. The locally scaled interpoint distances are computed using an approximation proposed by Hahn (2007). The Euclidean distance between two points is multiplied by the average of the square roots of the intensity values at the two points. The argument \code{lambda} should supply the (estimated) values of the intensity function \eqn{\lambda}{lambda}. It may be either \describe{ \item{a numeric vector}{ containing the values of the intensity function at the points of the pattern \code{X}. } \item{a pixel image}{ (object of class \code{"im"}) assumed to contain the values of the intensity function at all locations in the window. } \item{a function}{ which can be evaluated to give values of the intensity at any locations. } \item{omitted:}{ if \code{lambda} is omitted, then it will be estimated using a `leave-one-out' kernel smoother. } } If \code{lambda} is a numeric vector, then its length should be equal to the number of points in the pattern \code{X}. The value \code{lambda[i]} is assumed to be the the (estimated) value of the intensity \eqn{\lambda(x_i)}{lambda(x[i])} for the point \eqn{x_i}{x[i]} of the pattern \eqn{X}. Each value must be a positive number; \code{NA}'s are not allowed. If \code{lambda} is a pixel image, the domain of the image should cover the entire window of the point pattern. If it does not (which may occur near the boundary because of discretisation error), then the missing pixel values will be obtained by applying a Gaussian blur to \code{lambda} using \code{\link{blur}}, then looking up the values of this blurred image for the missing locations. (A warning will be issued in this case.) If \code{lambda} is a function, then it will be evaluated in the form \code{lambda(x,y)} where \code{x} and \code{y} are vectors of coordinates of the points of \code{X}. It should return a numeric vector with length equal to the number of points in \code{X}. If \code{lambda} is omitted, then it will be estimated using a `leave-one-out' kernel smoother, as described in Baddeley, \Moller and Waagepetersen (2000). The estimate \code{lambda[i]} for the point \code{X[i]} is computed by removing \code{X[i]} from the point pattern, applying kernel smoothing to the remaining points using \code{\link{density.ppp}}, and evaluating the smoothed intensity at the point \code{X[i]}. The smoothing kernel bandwidth is controlled by the arguments \code{sigma} and \code{varcov}, which are passed to \code{\link{density.ppp}} along with any extra arguments. If \code{renormalise=TRUE}, the estimated intensity \code{lambda} is multiplied by \eqn{c^(normpower/2)} before performing other calculations, where \eqn{c = area(W)/sum[i] (1/lambda(x[i]))}. This renormalisation has about the same effect as in \code{\link{Kinhom}}, reducing the variability and bias of the estimate in small samples and in cases of very strong inhomogeneity. Edge corrections are used to correct bias in the estimation of \eqn{K_{\rm scaled}}{Kscaled}. First the interpoint distances are rescaled, and then edge corrections are applied as in \code{\link{Kest}}. See \code{\link{Kest}} for details of the edge corrections and the options for the argument \code{correction}. The pair correlation function can also be applied to the result of \code{Kscaled}; see \code{\link{pcf}} and \code{\link{pcf.fv}}. } \references{ Baddeley, A., \Moller, J. and Waagepetersen, R. (2000) Non- and semiparametric estimation of interaction in inhomogeneous point patterns. \emph{Statistica Neerlandica} \bold{54}, 329--350. Hahn, U. (2007) \emph{Global and Local Scaling in the Statistics of Spatial Point Processes}. Habilitationsschrift, Universitaet Augsburg. Hahn, U., Jensen, E.B.V., van Lieshout, M.N.M. and Nielsen, L.S. (2003) Inhomogeneous spatial point processes by location-dependent scaling. \emph{Advances in Applied Probability} \bold{35}, 319--336. \Prokesova, M., Hahn, U. and Vedel Jensen, E.B. (2006) Statistics for locally scaled point patterns. In A. Baddeley, P. Gregori, J. Mateu, R. Stoica and D. Stoyan (eds.) \emph{Case Studies in Spatial Point Pattern Modelling}. Lecture Notes in Statistics 185. New York: Springer Verlag. Pages 99--123. } \seealso{ \code{\link{Kest}}, \code{\link{pcf}} } \examples{ data(bronzefilter) X <- unmark(bronzefilter) K <- Kscaled(X) fit <- ppm(X, ~x) lam <- predict(fit) K <- Kscaled(X, lam) } \author{Ute Hahn, \adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat/man/append.psp.Rd0000644000176200001440000000212613160710571015231 0ustar liggesusers\name{append.psp} \alias{append.psp} \title{Combine Two Line Segment Patterns} \description{ Combine two line segment patterns into a single pattern. } \usage{ append.psp(A, B) } \arguments{ \item{A,B}{ Line segment patterns (objects of class \code{"psp"}). } } \value{ Another line segment pattern (object of class \code{"psp"}). } \details{ This function is used to superimpose two line segment patterns \code{A} and \code{B}. The two patterns must have \bold{identical} windows. If one pattern has marks, then the other must also have marks of the same type. It the marks are data frames then the number of columns of these data frames, and the names of the columns must be identical. (To combine two point patterns, see \code{superimpose}). } \seealso{ \code{\link{psp}}, \code{\link{as.psp}}, \code{\link{superimpose}}, } \examples{ X <- psp(runif(20), runif(20), runif(20), runif(20), window=owin()) Y <- psp(runif(5), runif(5), runif(5), runif(5), window=owin()) append.psp(X,Y) } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/ppm.object.Rd0000644000176200001440000001357513160710621015230 0ustar liggesusers\name{ppm.object} \alias{ppm.object} %DoNotExport \alias{methods.ppm} %DoNotExport \title{Class of Fitted Point Process Models} \description{ A class \code{ppm} to represent a fitted stochastic model for a point process. The output of \code{\link{ppm}}. } \details{ An object of class \code{ppm} represents a stochastic point process model that has been fitted to a point pattern dataset. Typically it is the output of the model fitter, \code{\link{ppm}}. The class \code{ppm} has methods for the following standard generic functions: \tabular{lll}{ generic \tab method \tab description \cr \code{print} \tab \code{\link{print.ppm}} \tab print details \cr \code{plot} \tab \code{\link{plot.ppm}} \tab plot fitted model \cr \code{predict} \tab \code{\link{predict.ppm}} \tab fitted intensity and conditional intensity \cr \code{fitted} \tab \code{\link{fitted.ppm}} \tab fitted intensity \cr \code{coef} \tab \code{\link{coef.ppm}} \tab fitted coefficients of model \cr \code{anova} \tab \code{\link{anova.ppm}} \tab Analysis of Deviance \cr \code{formula} \tab \code{\link{formula.ppm}} \tab Extract model formula \cr \code{terms} \tab \code{\link{terms.ppm}} \tab Terms in the model formula \cr \code{labels} \tab \code{labels.ppm} \tab Names of estimable terms in the model formula \cr \code{residuals} \tab \code{\link{residuals.ppm}} \tab Point process residuals \cr \code{simulate} \tab \code{\link{simulate.ppm}} \tab Simulate the fitted model \cr \code{update} \tab \code{\link{update.ppm}} \tab Change or refit the model \cr \code{vcov} \tab \code{\link{vcov.ppm}} \tab Variance/covariance matrix of parameter estimates \cr \code{model.frame} \tab \code{\link{model.frame.ppm}} \tab Model frame \cr \code{model.matrix} \tab \code{\link{model.matrix.ppm}} \tab Design matrix \cr \code{logLik} \tab \code{\link{logLik.ppm}} \tab log \emph{pseudo} likelihood \cr \code{extractAIC} \tab \code{\link{extractAIC.ppm}} \tab pseudolikelihood counterpart of AIC \cr \code{nobs} \tab \code{\link{nobs.ppm}} \tab number of observations } Objects of class \code{ppm} can also be handled by the following standard functions, without requiring a special method: \tabular{ll}{ name \tab description \cr \code{\link{confint}} \tab Confidence intervals for parameters \cr \code{\link{step}} \tab Stepwise model selection \cr \code{\link{drop1}} \tab One-step model improvement \cr \code{\link{add1}} \tab One-step model improvement } The class \code{ppm} also has methods for the following generic functions defined in the \pkg{spatstat} package: \tabular{lll}{ generic \tab method \tab description \cr \code{\link{as.interact}} \tab \code{\link{as.interact.ppm}} \tab Interpoint interaction structure \cr \code{\link{as.owin}} \tab \code{\link{as.owin.ppm}} \tab Observation window of data \cr \code{\link{berman.test}} \tab \code{\link{berman.test.ppm}} \tab Berman's test \cr \code{\link{envelope}} \tab \code{\link{envelope.ppm}} \tab Simulation envelopes \cr \code{\link{fitin}} \tab \code{\link{fitin.ppm}} \tab Fitted interaction \cr \code{\link{is.marked}} \tab \code{\link{is.marked.ppm}} \tab Determine whether the model is marked \cr \code{\link{is.multitype}} \tab \code{\link{is.multitype.ppm}} \tab Determine whether the model is multitype \cr \code{\link{is.poisson}} \tab \code{\link{is.poisson.ppm}} \tab Determine whether the model is Poisson \cr \code{\link{is.stationary}} \tab \code{\link{is.stationary.ppm}} \tab Determine whether the model is stationary \cr \code{\link{cdf.test}} \tab \code{\link{cdf.test.ppm}} \tab Spatial distribution test \cr \code{\link{quadrat.test}} \tab \code{\link{quadrat.test.ppm}} \tab Quadrat counting test \cr \code{\link{reach}} \tab \code{\link{reach.ppm}} \tab Interaction range of model \cr \code{\link{rmhmodel}} \tab \code{\link{rmhmodel.ppm}} \tab Model in a form that can be simulated \cr \code{\link{rmh}} \tab \code{\link{rmh.ppm}} \tab Perform simulation \cr \code{\link{unitname}} \tab \code{\link{unitname.ppm}} \tab Name of unit of length } Information about the data (to which the model was fitted) can be extracted using \code{\link{data.ppm}}, \code{\link{dummy.ppm}} and \code{\link{quad.ppm}}. } \section{Internal format}{ If you really need to get at the internals, a \code{ppm} object contains at least the following entries: \tabular{ll}{ \code{coef} \tab the fitted regular parameters (as returned by \code{glm}) \cr \code{trend} \tab the trend formula or \code{NULL} \cr \code{interaction} \tab the point process interaction family (an object of class \code{"interact"}) or \code{NULL} \cr \code{Q} \tab the quadrature scheme used \cr \code{maxlogpl} \tab the maximised value of log pseudolikelihood \cr \code{correction} \tab name of edge correction method used \cr } See \code{\link{ppm}} for explanation of these concepts. The irregular parameters (e.g. the interaction radius of the Strauss process) are encoded in the \code{interaction} entry. However see the Warnings. } \seealso{ \code{\link{ppm}}, \code{\link{coef.ppm}}, \code{\link{fitted.ppm}}, \code{\link{print.ppm}}, \code{\link{predict.ppm}}, \code{\link{plot.ppm}}. } \section{Warnings}{ The internal representation of \code{ppm} objects may change slightly between releases of the \pkg{spatstat} package. } \examples{ data(cells) fit <- ppm(cells, ~ x, Strauss(0.1), correction="periodic") fit coef(fit) \dontrun{ pred <- predict(fit) } pred <- predict(fit, ngrid=20, type="trend") \dontrun{ plot(fit) } } \author{\adrian and \rolf } \keyword{spatial} \keyword{attribute} spatstat/man/rcell.Rd0000644000176200001440000000646113160710621014264 0ustar liggesusers\name{rcell} \alias{rcell} \title{Simulate Baddeley-Silverman Cell Process} \description{ Generates a random point pattern, a simulated realisation of the Baddeley-Silverman cell process model. } \usage{ rcell(win=square(1), nx=NULL, ny=nx, \dots, dx=NULL, dy=dx, N=10, nsim=1, drop=TRUE) } \arguments{ \item{win}{ A window. An object of class \code{\link{owin}}, or data in any format acceptable to \code{\link{as.owin}()}. } \item{nx}{ Number of columns of cells in the window. Incompatible with \code{dx}. } \item{ny}{ Number of rows of cells in the window. Incompatible with \code{dy}. } \item{\dots}{Ignored.} \item{dx}{ Width of the cells. Incompatible with \code{nx}. } \item{dy}{ Height of the cells. Incompatible with \code{ny}. } \item{N}{ Integer. Distributional parameter: the maximum number of random points in each cell. Passed to \code{\link{rcellnumber}}. } \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a point pattern. } } \value{ A point pattern (an object of class \code{"ppp"}) if \code{nsim=1}, or a list of point patterns if \code{nsim > 1}. } \details{ This function generates a simulated realisation of the \dQuote{cell process} (Baddeley and Silverman, 1984), a random point process with the same second-order properties as the uniform Poisson process. In particular, the \eqn{K} function of this process is identical to the \eqn{K} function of the uniform Poisson process (aka Complete Spatial Randomness). The same holds for the pair correlation function and all other second-order properties. The cell process is a counterexample to the claim that the \eqn{K} function completely characterises a point pattern. A cell process is generated by dividing space into equal rectangular tiles. In each tile, a random number of random points is placed. By default, there are either \eqn{0}, \eqn{1} or \eqn{10} points, with probabilities \eqn{1/10}, \eqn{8/9} and \eqn{1/90} respectively. The points within a tile are independent and uniformly distributed in that tile, and the numbers of points in different tiles are independent random integers. The tile width is determined either by the number of columns \code{nx} or by the horizontal spacing \code{dx}. The tile height is determined either by the number of rows \code{ny} or by the vertical spacing \code{dy}. The cell process is then generated in these tiles. The random numbers of points are generated by \code{\link{rcellnumber}}. Some of the resulting random points may lie outside the window \code{win}: if they do, they are deleted. The result is a point pattern inside the window \code{win}. } \seealso{ \code{\link{rcellnumber}}, \code{\link{rstrat}}, \code{\link{rsyst}}, \code{\link{runifpoint}}, \code{\link{Kest}} } \examples{ X <- rcell(nx=15) plot(X) plot(Kest(X)) } \references{ Baddeley, A.J. and Silverman, B.W. (1984) A cautionary example on the use of second-order methods for analyzing point patterns. \emph{Biometrics} \bold{40}, 1089-1094. } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat/man/interp.colourmap.Rd0000644000176200001440000000261613160710621016462 0ustar liggesusers\name{interp.colourmap} \alias{interp.colourmap} \title{ Interpolate smoothly between specified colours } \description{ Given a colourmap object which maps numbers to colours, this function interpolates smoothly between the colours, yielding a new colour map. } \usage{ interp.colourmap(m, n = 512) } \arguments{ \item{m}{ A colour map (object of class \code{"colourmap"}). } \item{n}{ Number of colour steps to be created in the new colour map. } } \details{ Given a colourmap object \code{m}, which maps numerical values to colours, this function interpolates the mapping, yielding a new colour map. This makes it easy to build a colour map that has smooth gradation between different colours or shades. First specify a small vector of numbers \code{x} which should be mapped to specific colours \code{y}. Use \code{m <- colourmap(y, inputs=x)} to create a colourmap that represents this simple mapping. Then apply \code{interp.colourmap(m)} to obtain a smooth transition between these points. } \value{ Another colour map (object of class \code{"colourmap"}). } \seealso{ \code{\link{colourmap}}, \code{\link{tweak.colourmap}}, \code{\link[spatstat:colourtools]{colourtools}}. } \examples{ co <- colourmap(inputs=c(0, 0.5, 1), c("black", "red", "white")) plot(interp.colourmap(co)) } \author{\adrian and \rolf } \keyword{spatial} \keyword{color} spatstat/man/overlap.owin.Rd0000644000176200001440000000160013160710621015574 0ustar liggesusers\name{overlap.owin} \alias{overlap.owin} \title{ Compute Area of Overlap } \description{ Computes the area of the overlap (intersection) of two windows. } \usage{ overlap.owin(A, B) } \arguments{ \item{A,B}{ Windows (objects of class \code{"owin"}). } } \details{ This function computes the area of the overlap between the two windows \code{A} and \code{B}. If one of the windows is a binary mask, then both windows are converted to masks on the same grid, and the area is computed by counting pixels. Otherwise, the area is computed analytically (using the discrete Stokes theorem). } \value{ A single numeric value. } \seealso{ \code{\link{intersect.owin}}, \code{\link{area.owin}}, \code{\link{setcov}}. } \examples{ A <- square(1) B <- shift(A, c(0.3, 0.2)) overlap.owin(A, B) } \author{\adrian and \rolf } \keyword{spatial} \keyword{math}spatstat/man/ellipse.Rd0000644000176200001440000000411613160710571014617 0ustar liggesusers\name{ellipse} \alias{ellipse} \title{ Elliptical Window. } \description{ Create an elliptical window. } \usage{ ellipse(a, b, centre=c(0,0), phi=0, \dots, mask=FALSE, npoly = 128) } \arguments{ \item{a,b}{ The half-lengths of the axes of the ellipse. } \item{centre}{ The centre of the ellipse. } \item{phi}{ The (anti-clockwise) angle through which the ellipse should be rotated (about its centre) starting from an orientation in which the axis of half-length \code{a} is horizontal. } \item{mask}{ Logical value controlling the type of approximation to a perfect ellipse. See Details. } \item{\dots}{ Arguments passed to \code{\link{as.mask}} to determine the pixel resolution, if \code{mask} is \code{TRUE}. } \item{npoly}{ The number of edges in the polygonal approximation to the ellipse. } } \details{ This command creates a window object representing an ellipse with the given centre and axes. By default, the ellipse is approximated by a polygon with \code{npoly} edges. If \code{mask=TRUE}, then the ellipse is approximated by a binary pixel mask. The resolution of the mask is controlled by the arguments \code{\dots} which are passed to \code{\link{as.mask}}. The arguments \code{a} and \code{b} must be single positive numbers. The argument \code{centre} specifies the ellipse centre: it can be either a numeric vector of length 2 giving the coordinates, or a \code{list(x,y)} giving the coordinates of exactly one point, or a point pattern (object of class \code{"ppp"}) containing exactly one point. } \value{ An object of class \code{owin} (either of type \dQuote{polygonal} or of type \dQuote{mask}) specifying an elliptical window. } \author{\adrian and \rolf } \seealso{ \code{\link{disc}}, \code{\link{owin.object}}, \code{\link{owin}}, \code{\link{as.mask}} } \examples{ W <- ellipse(a=5,b=2,centre=c(5,1),phi=pi/6) plot(W,lwd=2,border="red") WM <- ellipse(a=5,b=2,centre=c(5,1),phi=pi/6,mask=TRUE,dimyx=512) plot(WM,add=TRUE,box=FALSE) } \keyword{spatial} \keyword{datagen} spatstat/man/cauchy.estK.Rd0000644000176200001440000001302013160710571015335 0ustar liggesusers\name{cauchy.estK} \alias{cauchy.estK} \title{Fit the Neyman-Scott cluster process with Cauchy kernel} \description{ Fits the Neyman-Scott Cluster point process with Cauchy kernel to a point pattern dataset by the Method of Minimum Contrast. } \usage{ cauchy.estK(X, startpar=c(kappa=1,scale=1), lambda=NULL, q = 1/4, p = 2, rmin = NULL, rmax = NULL, ...) } \arguments{ \item{X}{ Data to which the model will be fitted. Either a point pattern or a summary statistic. See Details. } \item{startpar}{ Vector of starting values for the parameters of the model. } \item{lambda}{ Optional. An estimate of the intensity of the point process. } \item{q,p}{ Optional. Exponents for the contrast criterion. } \item{rmin, rmax}{ Optional. The interval of \eqn{r} values for the contrast criterion. } \item{\dots}{ Optional arguments passed to \code{\link[stats]{optim}} to control the optimisation algorithm. See Details. } } \details{ This algorithm fits the Neyman-Scott cluster point process model with Cauchy kernel to a point pattern dataset by the Method of Minimum Contrast, using the \eqn{K} function. The argument \code{X} can be either \describe{ \item{a point pattern:}{An object of class \code{"ppp"} representing a point pattern dataset. The \eqn{K} function of the point pattern will be computed using \code{\link{Kest}}, and the method of minimum contrast will be applied to this. } \item{a summary statistic:}{An object of class \code{"fv"} containing the values of a summary statistic, computed for a point pattern dataset. The summary statistic should be the \eqn{K} function, and this object should have been obtained by a call to \code{\link{Kest}} or one of its relatives. } } The algorithm fits the Neyman-Scott cluster point process with Cauchy kernel to \code{X}, by finding the parameters of the Matern Cluster model which give the closest match between the theoretical \eqn{K} function of the Matern Cluster process and the observed \eqn{K} function. For a more detailed explanation of the Method of Minimum Contrast, see \code{\link{mincontrast}}. The model is described in Jalilian et al (2013). It is a cluster process formed by taking a pattern of parent points, generated according to a Poisson process with intensity \eqn{\kappa}{\kappa}, and around each parent point, generating a random number of offspring points, such that the number of offspring of each parent is a Poisson random variable with mean \eqn{\mu}{\mu}, and the locations of the offspring points of one parent follow a common distribution described in Jalilian et al (2013). If the argument \code{lambda} is provided, then this is used as the value of the point process intensity \eqn{\lambda}{\lambda}. Otherwise, if \code{X} is a point pattern, then \eqn{\lambda}{\lambda} will be estimated from \code{X}. If \code{X} is a summary statistic and \code{lambda} is missing, then the intensity \eqn{\lambda}{\lambda} cannot be estimated, and the parameter \eqn{\mu}{\mu} will be returned as \code{NA}. The remaining arguments \code{rmin,rmax,q,p} control the method of minimum contrast; see \code{\link{mincontrast}}. The corresponding model can be simulated using \code{\link{rCauchy}}. For computational reasons, the optimisation procedure uses the parameter \code{eta2}, which is equivalent to \code{4 * scale^2} where \code{scale} is the scale parameter for the model as used in \code{\link{rCauchy}}. Homogeneous or inhomogeneous Neyman-Scott/Cauchy models can also be fitted using the function \code{\link{kppm}} and the fitted models can be simulated using \code{\link{simulate.kppm}}. The optimisation algorithm can be controlled through the additional arguments \code{"..."} which are passed to the optimisation function \code{\link[stats]{optim}}. For example, to constrain the parameter values to a certain range, use the argument \code{method="L-BFGS-B"} to select an optimisation algorithm that respects box constraints, and use the arguments \code{lower} and \code{upper} to specify (vectors of) minimum and maximum values for each parameter. } \value{ An object of class \code{"minconfit"}. There are methods for printing and plotting this object. It contains the following main components: \item{par }{Vector of fitted parameter values.} \item{fit }{Function value table (object of class \code{"fv"}) containing the observed values of the summary statistic (\code{observed}) and the theoretical values of the summary statistic computed from the fitted model parameters. } } \references{ Ghorbani, M. (2012) Cauchy cluster process. \emph{Metrika}, to appear. Jalilian, A., Guan, Y. and Waagepetersen, R. (2013) Decomposition of variance for spatial Cox processes. \emph{Scandinavian Journal of Statistics} \bold{40}, 119-137. Waagepetersen, R. (2007) An estimating function approach to inference for inhomogeneous Neyman-Scott processes. \emph{Biometrics} \bold{63}, 252--258. } \author{Abdollah Jalilian and Rasmus Waagepetersen. Adapted for \pkg{spatstat} by \adrian } \seealso{ \code{\link{kppm}}, \code{\link{cauchy.estpcf}}, \code{\link{lgcp.estK}}, \code{\link{thomas.estK}}, \code{\link{vargamma.estK}}, \code{\link{mincontrast}}, \code{\link{Kest}}, \code{\link{Kmodel}}. \code{\link{rCauchy}} to simulate the model. } \examples{ u <- cauchy.estK(redwood) u plot(u) } \keyword{spatial} \keyword{models} spatstat/man/triangulate.owin.Rd0000644000176200001440000000236713160710621016456 0ustar liggesusers\name{triangulate.owin} \alias{triangulate.owin} \title{ Decompose Window into Triangles } \description{ Given a spatial window, this function decomposes the window into disjoint triangles. The result is a tessellation of the window in which each tile is a triangle. } \usage{ triangulate.owin(W) } \arguments{ \item{W}{Window (object of class \code{"owin"}).} } \details{ The window \code{W} will be decomposed into disjoint triangles. The result is a tessellation of \code{W} in which each tile is a triangle. All triangle vertices lie on the boundary of the original polygon. The window is first converted to a polygonal window using \code{\link{as.polygonal}}. The vertices of the polygonal window are extracted, and the Delaunay triangulation of these vertices is computed using \code{\link{delaunay}}. Each Delaunay triangle is intersected with the window: if the result is not a triangle, the triangulation procedure is applied recursively to this smaller polygon. } \value{ Tessellation (object of class \code{"tess"}). } \author{ \spatstatAuthors } \seealso{ \code{\link{tess}}, \code{\link{delaunay}}, \code{\link{as.polygonal}} } \examples{ plot(triangulate.owin(letterR)) } \keyword{spatial} \keyword{manip} spatstat/man/linearpcfdot.Rd0000644000176200001440000000537413160710621015637 0ustar liggesusers\name{linearpcfdot} \alias{linearpcfdot} \title{ Multitype Pair Correlation Function (Dot-type) for Linear Point Pattern } \description{ For a multitype point pattern on a linear network, estimate the multitype pair correlation function from points of type \eqn{i} to points of any type. } \usage{ linearpcfdot(X, i, r=NULL, \dots, correction="Ang") } \arguments{ \item{X}{The observed point pattern, from which an estimate of the \eqn{i}-to-any pair correlation function \eqn{g_{i\bullet}(r)}{g[i.](r)} will be computed. An object of class \code{"lpp"} which must be a multitype point pattern (a marked point pattern whose marks are a factor). } \item{i}{Number or character string identifying the type (mark value) of the points in \code{X} from which distances are measured. Defaults to the first level of \code{marks(X)}. } \item{r}{numeric vector. The values of the argument \eqn{r} at which the function \eqn{g_{i\bullet}(r)}{g[i.](r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{correction}{ Geometry correction. Either \code{"none"} or \code{"Ang"}. See Details. } \item{\dots}{ Arguments passed to \code{\link[stats]{density.default}} to control the kernel smoothing. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). } \details{ This is a counterpart of the function \code{\link{pcfdot}} for a point pattern on a linear network (object of class \code{"lpp"}). The argument \code{i} will be interpreted as levels of the factor \code{marks(X)}. If \code{i} is missing, it defaults to the first level of the marks factor. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{g_{i\bullet}(r)}{g[i.](r)} should be evaluated. The values of \eqn{r} must be increasing nonnegative numbers and the maximum \eqn{r} value must not exceed the radius of the largest disc contained in the window. } \references{ Baddeley, A, Jammalamadaka, A. and Nair, G. (to appear) Multitype point process analysis of spines on the dendrite network of a neuron. \emph{Applied Statistics} (Journal of the Royal Statistical Society, Series C), In press. } \section{Warnings}{ The argument \code{i} is interpreted as a level of the factor \code{marks(X)}. Beware of the usual trap with factors: numerical values are not interpreted in the same way as character values. } \seealso{ \code{\link{linearpcfcross}}, \code{\link{linearpcf}}. \code{\link{pcfcross}}. } \examples{ data(chicago) g <- linearpcfdot(chicago, "assault") } \author{\adrian } \keyword{spatial} \keyword{nonparametric} spatstat/man/pairdist.Rd0000644000176200001440000000257713160710621015006 0ustar liggesusers\name{pairdist} \alias{pairdist} \title{Pairwise distances} \description{ Computes the matrix of distances between all pairs of `things' in a dataset } \usage{ pairdist(X, \dots) } \arguments{ \item{X}{ Object specifying the locations of a set of `things' (such as a set of points or a set of line segments). } \item{\dots}{ Further arguments depending on the method. } } \value{ A square matrix whose \code{[i,j]} entry is the distance between the `things' numbered \code{i} and \code{j}. } \details{ Given a dataset \code{X} and \code{Y} (representing either a point pattern or a line segment pattern) \code{pairdist} computes the distance between each pair of `things' in the dataset, and returns a matrix containing these distances. The function \code{pairdist} is generic, with methods for point patterns (objects of class \code{"ppp"}), line segment patterns (objects of class \code{"psp"}) and a default method. See the documentation for \code{\link{pairdist.ppp}}, \code{\link{pairdist.psp}} or \code{\link{pairdist.default}} for details. } \seealso{ \code{\link{pairdist.ppp}}, \code{\link{pairdist.psp}}, \code{\link{pairdist.default}}, \code{\link{crossdist}}, \code{\link{nndist}}, \code{\link{Kest}} } \author{Pavel Grabarnik \email{pavel.grabar@issp.serpukhov.su} and \adrian } \keyword{spatial} \keyword{math} spatstat/man/residuals.mppm.Rd0000644000176200001440000000476113160710621016127 0ustar liggesusers\name{residuals.mppm} \alias{residuals.mppm} \title{Residuals for Point Process Model Fitted to Multiple Point Patterns} \description{ Given a point process model fitted to multiple point patterns, compute residuals for each pattern. } \usage{ \method{residuals}{mppm}(object, type = "raw", ..., fittedvalues = fitted.mppm(object)) } \arguments{ \item{object}{Fitted point process model (object of class \code{"mppm"}).} \item{\dots}{Ignored.} \item{type}{Type of residuals: either \code{"raw"}, \code{"pearson"} or \code{"inverse"}. Partially matched.} \item{fittedvalues}{Advanced use only. Fitted values of the model to be used in the calculation. } } \details{ Baddeley et al (2005) defined residuals for the fit of a point process model to spatial point pattern data. For an explanation of these residuals, see the help file for \code{\link{residuals.ppm}}. This function computes the residuals for a point process model fitted to \emph{multiple} point patterns. The \code{object} should be an object of class \code{"mppm"} obtained from \code{\link{mppm}}. The return value is a list. The number of entries in the list equals the number of point patterns in the original data. Each entry in the list has the same format as the output of \code{\link{residuals.ppm}}. That is, each entry in the list is a signed measure (object of class \code{"msr"}) giving the residual measure for the corresponding point pattern. } \value{ A list of signed measures (objects of class \code{"msr"}) giving the residual measure for each of the original point patterns. See Details. } \examples{ fit <- mppm(Bugs ~ x, hyperframe(Bugs=waterstriders)) r <- residuals(fit) # compute total residual for each point pattern rtot <- sapply(r, integral.msr) # standardise the total residuals areas <- sapply(windows.mppm(fit), area.owin) rtot/sqrt(areas) } \references{ Baddeley, A., Turner, R., Moller, J. and Hazelton, M. (2005) Residual analysis for spatial point processes. \emph{Journal of the Royal Statistical Society, Series B} \bold{67}, 617--666. Baddeley, A., Rubak, E. and Turner, R. (2015) \emph{Spatial Point Patterns: Methodology and Applications with R}. London: Chapman and Hall/CRC Press. } \author{ \adrian, Ida-Maria Sintorn and Leanne Bischoff. Implemented by \adrian \rolf and \ege } \seealso{ \code{\link{mppm}}, \code{\link{residuals.mppm}} } \keyword{spatial} \keyword{models} spatstat/man/as.mask.Rd0000644000176200001440000000662713160710571014530 0ustar liggesusers\name{as.mask} \alias{as.mask} \title{Pixel Image Approximation of a Window} \description{ Obtain a discrete (pixel image) approximation of a given window } \usage{ as.mask(w, eps=NULL, dimyx=NULL, xy=NULL) } \arguments{ \item{w}{A window (object of class \code{"owin"}) or data acceptable to \code{\link{as.owin}}.} \item{eps}{(optional) width and height of pixels.} \item{dimyx}{(optional) pixel array dimensions} \item{xy}{(optional) data containing pixel coordinates} } \value{ A window (object of class \code{"owin"}) of type \code{"mask"} representing a binary pixel image. } \details{ This function generates a rectangular grid of locations in the plane, tests whether each of these locations lies inside the window \code{w}, and stores the results as a binary pixel image or `mask' (an object of class \code{"owin"}, see \code{\link{owin.object}}). The most common use of this function is to approximate the shape of another window \code{w} by a binary pixel image. In this case, we will usually want to have a very fine grid of pixels. This function can also be used to generate a coarsely-spaced grid of locations inside a window, for purposes such as subsampling and prediction. The grid spacing and location are controlled by the arguments \code{eps}, \code{dimyx} and \code{xy}, which are mutually incompatible. If \code{eps} is given, then it determines the grid spacing. If \code{eps} is a single number, then the grid spacing will be approximately \code{eps} in both the \eqn{x} and \eqn{y} directions. If \code{eps} is a vector of length 2, then the grid spacing will be approximately \code{eps[1]} in the \eqn{x} direction and \code{eps[2]} in the \eqn{y} direction. If \code{dimyx} is given, then the pixel grid will be an \eqn{m \times n}{m * n} rectangular grid where \eqn{m, n} are given by \code{dimyx[2]}, \code{dimyx[1]} respectively. \bold{Warning:} \code{dimyx[1]} is the number of pixels in the \eqn{y} direction, and \code{dimyx[2]} is the number in the \eqn{x} direction. If \code{xy} is given, then this should be some kind of data specifing the coordinates of a pixel grid. It may be \itemize{ \item a list or structure containing elements \code{x} and \code{y} which are numeric vectors of equal length. These will be taken as \eqn{x} and \code{y} coordinates of the margins of the grid. The pixel coordinates will be generated from these two vectors. \item a pixel image (object of class \code{"im"}). \item a window (object of class \code{"owin"}) which is of type \code{"mask"} so that it contains pixel coordinates. } If \code{xy} is given, \code{w} may be omitted. If neither \code{eps} nor \code{dimyx} nor \code{xy} is given, the pixel raster dimensions are obtained from \code{\link{spatstat.options}("npixel")}. There is no inverse of this function. However, the function \code{\link{as.polygonal}} will compute a polygonal approximation of a binary mask. } \seealso{ \code{\link{owin.object}}, \code{\link{as.rectangle}}, \code{\link{as.polygonal}}, \code{\link{spatstat.options}} } \examples{ w <- owin(c(0,10),c(0,10), poly=list(x=c(1,2,3,2,1), y=c(2,3,4,6,7))) \dontrun{plot(w)} m <- as.mask(w) \dontrun{plot(m)} x <- 1:9 y <- seq(0.25, 9.75, by=0.5) m <- as.mask(w, xy=list(x=x, y=y)) } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/as.data.frame.hyperframe.Rd0000644000176200001440000000275113160710571017732 0ustar liggesusers\name{as.data.frame.hyperframe} \alias{as.data.frame.hyperframe} \title{Coerce Hyperframe to Data Frame} \description{ Converts a hyperframe to a data frame. } \usage{ \method{as.data.frame}{hyperframe}(x, row.names = NULL, optional = FALSE, ..., discard=TRUE, warn=TRUE) } \arguments{ \item{x}{Hyperframe (object of class \code{"hyperframe"}).} \item{row.names}{Optional character vector of row names.} \item{optional}{Argument passed to \code{\link{as.data.frame}} controlling what happens to row names.} \item{\dots}{Ignored.} \item{discard}{Logical. Whether to discard columns of the hyperframe that do not contain atomic data. See Details. } \item{warn}{Logical. Whether to issue a warning when columns are discarded.} } \details{ This is a method for the generic function \code{\link{as.data.frame}} for the class of hyperframes (see \code{\link{hyperframe}}. If \code{discard=TRUE}, any columns of the hyperframe that do not contain atomic data will be removed (and a warning will be issued if \code{warn=TRUE}). If \code{discard=FALSE}, then such columns are converted to strings indicating what class of data they originally contained. } \value{ A data frame. } \examples{ h <- hyperframe(X=1:3, Y=letters[1:3], f=list(sin, cos, tan)) as.data.frame(h, discard=TRUE, warn=FALSE) as.data.frame(h, discard=FALSE) } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/affine.linnet.Rd0000644000176200001440000000505513160710571015705 0ustar liggesusers\name{affine.linnet} \alias{affine.linnet} \alias{shift.linnet} \alias{rotate.linnet} \alias{rescale.linnet} \alias{scalardilate.linnet} \title{Apply Geometrical Transformations to a Linear Network} \description{ Apply geometrical transformations to a linear network. } \usage{ \method{affine}{linnet}(X, mat=diag(c(1,1)), vec=c(0,0), \dots) \method{shift}{linnet}(X, vec=c(0,0), \dots, origin=NULL) \method{rotate}{linnet}(X, angle=pi/2, \dots, centre=NULL) \method{scalardilate}{linnet}(X, f, \dots) \method{rescale}{linnet}(X, s, unitname) } \arguments{ \item{X}{Linear network (object of class \code{"linnet"}).} \item{mat}{Matrix representing a linear transformation.} \item{vec}{Vector of length 2 representing a translation.} \item{angle}{Rotation angle in radians.} \item{f}{Scalar dilation factor.} \item{s}{ Unit conversion factor: the new units are \code{s} times the old units. } \item{\dots}{ Arguments passed to other methods. } \item{origin}{ Character string determining a location that will be shifted to the origin. Options are \code{"centroid"}, \code{"midpoint"} and \code{"bottomleft"}. Partially matched. } \item{centre}{ Centre of rotation. Either a vector of length 2, or a character string (partially matched to \code{"centroid"}, \code{"midpoint"} or \code{"bottomleft"}). The default is the coordinate origin \code{c(0,0)}. } \item{unitname}{ Optional. New name for the unit of length. A value acceptable to the function \code{\link{unitname<-}} } } \value{ Another linear network (of class \code{"linnet"}) representing the result of applying the geometrical transformation. } \details{ These functions are methods for the generic functions \code{\link{affine}}, \code{\link{shift}}, \code{\link{rotate}}, \code{\link{rescale}} and \code{\link{scalardilate}} applicable to objects of class \code{"linnet"}. All of these functions perform geometrical transformations on the object \code{X}, except for \code{rescale}, which simply rescales the units of length. } \seealso{ \code{\link{linnet}} and \code{\link{as.linnet}}. Generic functions \code{\link{affine}}, \code{\link{shift}}, \code{\link{rotate}}, \code{\link{scalardilate}}, \code{\link{rescale}}. } \examples{ U <- rotate(simplenet, pi) stretch <- diag(c(2,3)) Y <- affine(simplenet, mat=stretch) shear <- matrix(c(1,0,0.6,1),ncol=2, nrow=2) Z <- affine(simplenet, mat=shear, vec=c(0, 1)) } \author{\adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/layered.Rd0000644000176200001440000000564313160710621014611 0ustar liggesusers\name{layered} \alias{layered} \title{ Create List of Plotting Layers } \description{ Given several objects which are capable of being plotted, create a list containing these objects as if they were successive layers of a plot. The list can then be plotted in different ways. } \usage{ layered(..., plotargs = NULL, LayerList=NULL) } \arguments{ \item{\dots}{ Objects which can be plotted by \code{plot}. } \item{plotargs}{ Default values of the plotting arguments for each of the objects. A list of lists of arguments of the form \code{name=value}. } \item{LayerList}{ A list of objects. Incompatible with \code{\dots}. } } \details{ Layering is a simple mechanism for controlling a high-level plot that is composed of several successive plots, for example, a background and a foreground plot. The layering mechanism makes it easier to issue the plot command, to switch on or off the plotting of each individual layer, to control the plotting arguments that are passed to each layer, and to zoom in. Each individual layer in the plot should be saved as an object that can be plotted using \code{plot}. It will typically belong to some class, which has a method for the generic function \code{plot}. The command \code{layered} simply saves the objects \code{\dots} as a list of class \code{"layered"}. This list can then be plotted by the method \code{\link{plot.layered}}. Thus, you only need to type a single \code{plot} command to produce the multi-layered plot. Individual layers of the plot can be switched on or off, or manipulated, using arguments to \code{\link{plot.layered}}. The argument \code{plotargs} contains default values of the plotting arguments for each layer. It should be a list, with one entry for each object in \code{\dots}. Each entry of \code{plotargs} should be a list of arguments in the form \code{name=value}, which are recognised by the \code{plot} method for the relevant layer. The \code{plotargs} can also include an argument named \code{.plot} specifying (the name of) a function to perform the plotting instead of the generic \code{plot}. The length of \code{plotargs} should either be equal to the number of layers, or equal to 1. In the latter case it will be replicated to the appropriate length. } \value{ A list, belonging to the class \code{"layered"}. There are methods for \code{plot}, \code{"["}, \code{"shift"}, \code{"affine"}, \code{"rotate"} and \code{"rescale"}. } \author{\adrian and \rolf } \seealso{ \code{\link{plot.layered}}, \code{\link{methods.layered}}, \code{\link{as.layered}}, \code{\link{[.layered}}, \code{\link{layerplotargs}}. } \examples{ D <- distmap(cells) L <- layered(D, cells) L L <- layered(D, cells, plotargs=list(list(ribbon=FALSE), list(pch=16))) plot(L) layerplotargs(L)[[1]] <- list(.plot="contour") plot(L) } \keyword{spatial} \keyword{hplot} spatstat/man/pairs.linim.Rd0000644000176200001440000000402113160710621015376 0ustar liggesusers\name{pairs.linim} \alias{pairs.linim} \title{ Scatterplot Matrix for Pixel Images on a Linear Network } \description{ Produces a scatterplot matrix of the pixel values in two or more pixel images on a linear network. } \usage{ \method{pairs}{linim}(..., plot=TRUE, eps=NULL) } \arguments{ \item{\dots}{ Any number of arguments, each of which is either a pixel image on a linear network (object of class \code{"linim"}), a pixel image (object of class \code{"im"}), or a named argument to be passed to \code{\link{pairs.default}}. } \item{plot}{ Logical. If \code{TRUE}, the scatterplot matrix is plotted. } \item{eps}{ Optional. Spacing between sample points on the network. A positive number. } } \details{ This is a method for the generic function \code{\link{pairs}} for the class of pixel images on a linear network. It produces a square array of plot panels, in which each panel shows a scatterplot of the pixel values of one image against the corresponding pixel values of another image. At least two of the arguments \code{\dots} should be a pixel image on a linear network (object of class \code{"linim"}). They should be defined on the \bold{same} linear network, but may have different pixel resolutions. First the pixel values of each image are extracted at a set of sample points equally-spaced across the network. Then \code{\link{pairs.default}} is called to plot the scatterplot matrix. Any arguments in \code{\dots} which are not pixel images will be passed to \code{\link{pairs.default}} to control the plot. } \value{ Invisible. A \code{data.frame} containing the corresponding pixel values for each image. The return value also belongs to the class \code{plotpairsim} which has a plot method, so that it can be re-plotted. } \seealso{ \code{\link{pairs.default}}, \code{\link{pairs.im}} } \examples{ fit <- lppm(chicago ~ marks * (x+y)) lam <- predict(fit) do.call(pairs, lam) } \author{ \spatstatAuthors } \keyword{spatial} \keyword{hplot} spatstat/man/progressreport.Rd0000644000176200001440000000756113160710621016265 0ustar liggesusers\name{progressreport} \alias{progressreport} \title{Print Progress Reports} \description{ Prints Progress Reports during a loop or iterative calculation. } \usage{ progressreport(i, n, every = min(100,max(1, ceiling(n/100))), tick = 1, nperline = NULL, charsperline = getOption("width"), style = spatstat.options("progress"), showtime = NULL, state=NULL) } \arguments{ \item{i}{ Integer. The current iteration number (from 1 to \code{n}). } \item{n}{ Integer. The (maximum) number of iterations to be computed. } \item{every}{ Optional integer. Iteration number will be printed when \code{i} is a multiple of \code{every}. } \item{tick}{ Optional integer. A tick mark or dot will be printed when \code{i} is a multiple of \code{tick}. } \item{nperline}{ Optional integer. Number of iterations per line of output. } \item{charsperline}{ Optional integer. The number of characters in a line of output. } \item{style}{ Character string determining the style of display. Options are \code{"tty"} (the default), \code{"tk"} and \code{"txtbar"}. See Details. } \item{showtime}{ Optional. Logical value indicating whether to print the estimated time remaining. Applies only when \code{style="tty"}. } \item{state}{ Optional. A list containing the internal data. } } \details{ This is a convenient function for reporting progress during an iterative sequence of calculations or a suite of simulations. \itemize{ \item If \code{style="tk"} then \code{tcltk::tkProgressBar} is used to pop-up a new graphics window showing a progress bar. This requires the package \pkg{tcltk}. As \code{i} increases from 1 to \code{n}, the bar will lengthen. The arguments \code{every, tick, nperline, showtime} are ignored. \item If \code{style="txtbar"} then \code{\link[utils]{txtProgressBar}} is used to represent progress as a bar made of text characters in the \R interpreter window. As \code{i} increases from 1 to \code{n}, the bar will lengthen. The arguments \code{every, tick, nperline, showtime} are ignored. \item If \code{style="tty"} (the default), then progress reports are printed to the console. This only seems to work well under Linux. As \code{i} increases from 1 to \code{n}, the output will be a sequence of dots (one dot for every \code{tick} iterations), iteration numbers (printed when iteration number is a multiple of \code{every} or is less than 4), and optionally the estimated time remaining. For example \code{[etd 1:20:05]} means an estimated time of 1 hour, 20 minutes and 5 seconds until finished. The estimated time remaining will be printed only if \code{style="tty"}, and the argument \code{state} is given, and either \code{showtime=TRUE}, or \code{showtime=NULL} and the iterations are slow (defined as: the estimated time remaining is longer than 3 minutes, or the average time per iteration is longer than 20 seconds). } It is optional, but strongly advisable, to use the argument \code{state} to store and update the internal data for the progress reports (such as the cumulative time taken for computation) as shown in the last example below. This avoids conflicts with other programs that might be calling \code{progressreport} at the same time. } \value{ If \code{state} was \code{NULL}, the result is \code{NULL}. Otherwise the result is the updated value of \code{state}. } \author{ \spatstatAuthors. } \examples{ for(i in 1:40) { # # code that does something... # progressreport(i, 40) } # saving internal state: *recommended* sta <- list() for(i in 1:20) { # some code ... sta <- progressreport(i, 20, state=sta) } } \keyword{print} spatstat/man/summary.im.Rd0000644000176200001440000000360613160710621015262 0ustar liggesusers\name{summary.im} \alias{summary.im} \alias{print.summary.im} \title{Summarizing a Pixel Image} \description{ \code{summary} method for class \code{"im"}. } \usage{ \method{summary}{im}(object, \dots) \method{print}{summary.im}(x, \dots) } \arguments{ \item{object}{A pixel image.} \item{\dots}{Ignored.} \item{x}{Object of class \code{"summary.im"} as returned by \code{summary.im}. } } \details{ This is a method for the generic \code{\link{summary}} for the class \code{"im"}. An object of class \code{"im"} describes a pixel image. See \code{\link{im.object}}) for details of this class. \code{summary.im} extracts information about the pixel image, and \code{print.summary.im} prints this information in a comprehensible format. In normal usage, \code{print.summary.im} is invoked implicitly when the user calls \code{summary.im} without assigning its value to anything. See the examples. The information extracted by \code{summary.im} includes \describe{ \item{range}{The range of the image values.} \item{mean}{The mean of the image values.} \item{integral}{The ``integral'' of the image values, calculated as the sum of the image values multiplied by the area of one pixel.} \item{dim}{The dimensions of the pixel array: \code{dim[1]} is the number of rows in the array, corresponding to the \bold{y} coordinate.} } } \value{ \code{summary.im} returns an object of class \code{"summary.im"}, while \code{print.summary.im} returns \code{NULL}. } \seealso{ \code{\link{mean.im}}, \code{\link{integral.im}}, \code{\link{anyNA.im}} } \examples{ # make an image X <- as.im(function(x,y) {x^2}, unit.square()) # summarize it summary(X) # save the summary s <- summary(X) # print it print(X) s # extract stuff X$dim X$range X$integral } \author{\adrian and \rolf } \keyword{spatial} \keyword{methods} spatstat/man/Kest.Rd0000644000176200001440000003166513160710571014101 0ustar liggesusers\name{Kest} \alias{Kest} \title{K-function} \description{ Estimates Ripley's reduced second moment function \eqn{K(r)} from a point pattern in a window of arbitrary shape. } \usage{ Kest(X, \dots, r=NULL, rmax=NULL, breaks=NULL, correction=c("border", "isotropic", "Ripley", "translate"), nlarge=3000, domain=NULL, var.approx=FALSE, ratio=FALSE) } \arguments{ \item{X}{The observed point pattern, from which an estimate of \eqn{K(r)} will be computed. An object of class \code{"ppp"}, or data in any format acceptable to \code{\link{as.ppp}()}. } \item{\dots}{Ignored.} \item{r}{ Optional. Vector of values for the argument \eqn{r} at which \eqn{K(r)} should be evaluated. Users are advised \emph{not} to specify this argument; there is a sensible default. If necessary, specify \code{rmax}. } \item{rmax}{ Optional. Maximum desired value of the argument \eqn{r}. } \item{breaks}{ This argument is for internal use only. } \item{correction}{ Optional. A character vector containing any selection of the options \code{"none"}, \code{"border"}, \code{"bord.modif"}, \code{"isotropic"}, \code{"Ripley"}, \code{"translate"}, \code{"translation"}, \code{"rigid"}, \code{"none"}, \code{"good"} or \code{"best"}. It specifies the edge correction(s) to be applied. Alternatively \code{correction="all"} selects all options. } \item{nlarge}{ Optional. Efficiency threshold. If the number of points exceeds \code{nlarge}, then only the border correction will be computed (by default), using a fast algorithm. } \item{domain}{ Optional. Calculations will be restricted to this subset of the window. See Details. } \item{var.approx}{Logical. If \code{TRUE}, the approximate variance of \eqn{\hat K(r)}{Kest(r)} under CSR will also be computed. } \item{ratio}{ Logical. If \code{TRUE}, the numerator and denominator of each edge-corrected estimate will also be saved, for use in analysing replicated point patterns. } } \value{ An object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. Essentially a data frame containing columns \item{r}{the vector of values of the argument \eqn{r} at which the function \eqn{K} has been estimated } \item{theo}{the theoretical value \eqn{K(r) = \pi r^2}{K(r) = pi * r^2} for a stationary Poisson process } together with columns named \code{"border"}, \code{"bord.modif"}, \code{"iso"} and/or \code{"trans"}, according to the selected edge corrections. These columns contain estimates of the function \eqn{K(r)} obtained by the edge corrections named. If \code{var.approx=TRUE} then the return value also has columns \code{rip} and \code{ls} containing approximations to the variance of \eqn{\hat K(r)}{Kest(r)} under CSR. If \code{ratio=TRUE} then the return value also has two attributes called \code{"numerator"} and \code{"denominator"} which are \code{"fv"} objects containing the numerators and denominators of each estimate of \eqn{K(r)}. } \details{ The \eqn{K} function (variously called ``Ripley's K-function'' and the ``reduced second moment function'') of a stationary point process \eqn{X} is defined so that \eqn{\lambda K(r)}{lambda K(r)} equals the expected number of additional random points within a distance \eqn{r} of a typical random point of \eqn{X}. Here \eqn{\lambda}{lambda} is the intensity of the process, i.e. the expected number of points of \eqn{X} per unit area. The \eqn{K} function is determined by the second order moment properties of \eqn{X}. An estimate of \eqn{K} derived from a spatial point pattern dataset can be used in exploratory data analysis and formal inference about the pattern (Cressie, 1991; Diggle, 1983; Ripley, 1977, 1988). In exploratory analyses, the estimate of \eqn{K} is a useful statistic summarising aspects of inter-point ``dependence'' and ``clustering''. For inferential purposes, the estimate of \eqn{K} is usually compared to the true value of \eqn{K} for a completely random (Poisson) point process, which is \eqn{K(r) = \pi r^2}{K(r) = pi * r^2}. Deviations between the empirical and theoretical \eqn{K} curves may suggest spatial clustering or spatial regularity. This routine \code{Kest} estimates the \eqn{K} function of a stationary point process, given observation of the process inside a known, bounded window. The argument \code{X} is interpreted as a point pattern object (of class \code{"ppp"}, see \code{\link{ppp.object}}) and can be supplied in any of the formats recognised by \code{\link{as.ppp}()}. The estimation of \eqn{K} is hampered by edge effects arising from the unobservability of points of the random pattern outside the window. An edge correction is needed to reduce bias (Baddeley, 1998; Ripley, 1988). The corrections implemented here are \describe{ \item{border}{the border method or ``reduced sample'' estimator (see Ripley, 1988). This is the least efficient (statistically) and the fastest to compute. It can be computed for a window of arbitrary shape. } \item{isotropic/Ripley}{Ripley's isotropic correction (see Ripley, 1988; Ohser, 1983). This is implemented for rectangular and polygonal windows (not for binary masks). } \item{translate/translation}{Translation correction (Ohser, 1983). Implemented for all window geometries, but slow for complex windows. } \item{rigid}{Rigid motion correction (Ohser and Stoyan, 1981). Implemented for all window geometries, but slow for complex windows. } \item{none}{ Uncorrected estimate. An estimate of the K function \emph{without} edge correction. (i.e. setting \eqn{e_{ij} = 1}{e[i,j] = 1} in the equation below. This estimate is \bold{biased} and should not be used for data analysis, \emph{unless} you have an extremely large point pattern (more than 100,000 points). } \item{best}{ Selects the best edge correction that is available for the geometry of the window. Currently this is Ripley's isotropic correction for a rectangular or polygonal window, and the translation correction for masks. } \item{good}{ Selects the best edge correction that can be computed in a reasonable time. This is the same as \code{"best"} for datasets with fewer than 3000 points; otherwise the selected edge correction is \code{"border"}, unless there are more than 100,000 points, when it is \code{"none"}. } } The estimates of \eqn{K(r)} are of the form \deqn{ \hat K(r) = \frac a {n (n-1) } \sum_i \sum_j I(d_{ij}\le r) e_{ij} }{ Kest(r) = (a/(n * (n-1))) * sum[i,j] I(d[i,j] <= r) e[i,j]) } where \eqn{a} is the area of the window, \eqn{n} is the number of data points, and the sum is taken over all ordered pairs of points \eqn{i} and \eqn{j} in \code{X}. Here \eqn{d_{ij}}{d[i,j]} is the distance between the two points, and \eqn{I(d_{ij} \le r)}{I(d[i,j] <= r)} is the indicator that equals 1 if the distance is less than or equal to \eqn{r}. The term \eqn{e_{ij}}{e[i,j]} is the edge correction weight (which depends on the choice of edge correction listed above). Note that this estimator assumes the process is stationary (spatially homogeneous). For inhomogeneous point patterns, see \code{\link{Kinhom}}. If the point pattern \code{X} contains more than about 3000 points, the isotropic and translation edge corrections can be computationally prohibitive. The computations for the border method are much faster, and are statistically efficient when there are large numbers of points. Accordingly, if the number of points in \code{X} exceeds the threshold \code{nlarge}, then only the border correction will be computed. Setting \code{nlarge=Inf} or \code{correction="best"} will prevent this from happening. Setting \code{nlarge=0} is equivalent to selecting only the border correction with \code{correction="border"}. If \code{X} contains more than about 100,000 points, even the border correction is time-consuming. You may want to consider setting \code{correction="none"} in this case. There is an even faster algorithm for the uncorrected estimate. Approximations to the variance of \eqn{\hat K(r)}{Kest(r)} are available, for the case of the isotropic edge correction estimator, \bold{assuming complete spatial randomness} (Ripley, 1988; Lotwick and Silverman, 1982; Diggle, 2003, pp 51-53). If \code{var.approx=TRUE}, then the result of \code{Kest} also has a column named \code{rip} giving values of Ripley's (1988) approximation to \eqn{\mbox{var}(\hat K(r))}{var(Kest(r))}, and (if the window is a rectangle) a column named \code{ls} giving values of Lotwick and Silverman's (1982) approximation. If the argument \code{domain} is given, the calculations will be restricted to a subset of the data. In the formula for \eqn{K(r)} above, the \emph{first} point \eqn{i} will be restricted to lie inside \code{domain}. The result is an approximately unbiased estimate of \eqn{K(r)} based on pairs of points in which the first point lies inside \code{domain} and the second point is unrestricted. This is useful in bootstrap techniques. The argument \code{domain} should be a window (object of class \code{"owin"}) or something acceptable to \code{\link{as.owin}}. It must be a subset of the window of the point pattern \code{X}. The estimator \code{Kest} ignores marks. Its counterparts for multitype point patterns are \code{\link{Kcross}}, \code{\link{Kdot}}, and for general marked point patterns see \code{\link{Kmulti}}. Some writers, particularly Stoyan (1994, 1995) advocate the use of the ``pair correlation function'' \deqn{ g(r) = \frac{K'(r)}{2\pi r} }{ g(r) = K'(r)/ ( 2 * pi * r) } where \eqn{K'(r)} is the derivative of \eqn{K(r)}. See \code{\link{pcf}} on how to estimate this function. } \section{Envelopes, significance bands and confidence intervals}{ To compute simulation envelopes for the \eqn{K}-function under CSR, use \code{\link{envelope}}. To compute a confidence interval for the true \eqn{K}-function, use \code{\link{varblock}} or \code{\link{lohboot}}. } \references{ Baddeley, A.J. Spatial sampling and censoring. In O.E. Barndorff-Nielsen, W.S. Kendall and M.N.M. van Lieshout (eds) \emph{Stochastic Geometry: Likelihood and Computation}. Chapman and Hall, 1998. Chapter 2, pages 37--78. Cressie, N.A.C. \emph{Statistics for spatial data}. John Wiley and Sons, 1991. Diggle, P.J. \emph{Statistical analysis of spatial point patterns}. Academic Press, 1983. Ohser, J. (1983) On estimators for the reduced second moment measure of point processes. \emph{Mathematische Operationsforschung und Statistik, series Statistics}, \bold{14}, 63 -- 71. Ohser, J. and Stoyan, D. (1981) On the second-order and orientation analysis of planar stationary point processes. \emph{Biometrical Journal} \bold{23}, 523--533. Ripley, B.D. (1977) Modelling spatial patterns (with discussion). \emph{Journal of the Royal Statistical Society, Series B}, \bold{39}, 172 -- 212. Ripley, B.D. \emph{Statistical inference for spatial processes}. Cambridge University Press, 1988. Stoyan, D, Kendall, W.S. and Mecke, J. (1995) \emph{Stochastic geometry and its applications}. 2nd edition. Springer Verlag. Stoyan, D. and Stoyan, H. (1994) Fractals, random shapes and point fields: methods of geometrical statistics. John Wiley and Sons. } \section{Warnings}{ The estimator of \eqn{K(r)} is approximately unbiased for each fixed \eqn{r}. Bias increases with \eqn{r} and depends on the window geometry. For a rectangular window it is prudent to restrict the \eqn{r} values to a maximum of \eqn{1/4} of the smaller side length of the rectangle. Bias may become appreciable for point patterns consisting of fewer than 15 points. While \eqn{K(r)} is always a non-decreasing function, the estimator of \eqn{K} is not guaranteed to be non-decreasing. This is rarely a problem in practice. } \seealso{ \code{\link{localK}} to extract individual summands in the \eqn{K} function. \code{\link{pcf}} for the pair correlation. \code{\link{Fest}}, \code{\link{Gest}}, \code{\link{Jest}} for alternative summary functions. \code{\link{Kcross}}, \code{\link{Kdot}}, \code{\link{Kinhom}}, \code{\link{Kmulti}} for counterparts of the \eqn{K} function for multitype point patterns. \code{\link{reduced.sample}} for the calculation of reduced sample estimators. } \examples{ X <- runifpoint(50) K <- Kest(X) K <- Kest(cells, correction="isotropic") plot(K) plot(K, main="K function for cells") # plot the L function plot(K, sqrt(iso/pi) ~ r) plot(K, sqrt(./pi) ~ r, ylab="L(r)", main="L function for cells") } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat/man/influence.ppm.Rd0000644000176200001440000000653313160710621015726 0ustar liggesusers\name{influence.ppm} \alias{influence.ppm} \title{ Influence Measure for Spatial Point Process Model } \description{ Computes the influence measure for a fitted spatial point process model. } \usage{ \method{influence}{ppm}(model, ..., drop = FALSE, iScore=NULL, iHessian=NULL, iArgs=NULL) } \arguments{ \item{model}{ Fitted point process model (object of class \code{"ppm"}). } \item{\dots}{ Ignored. } \item{drop}{ Logical. Whether to include (\code{drop=FALSE}) or exclude (\code{drop=TRUE}) contributions from quadrature points that were not used to fit the model. } \item{iScore,iHessian}{ Components of the score vector and Hessian matrix for the irregular parameters, if required. See Details. } \item{iArgs}{ List of extra arguments for the functions \code{iScore}, \code{iHessian} if required. } } \details{ Given a fitted spatial point process model \code{model}, this function computes the influence measure described in Baddeley, Chang and Song (2013). The function \code{\link[stats]{influence}} is generic, and \code{influence.ppm} is the method for objects of class \code{"ppm"} representing point process models. The influence of a point process model is a value attached to each data point (i.e. each point of the point pattern to which the \code{model} was fitted). The influence value \eqn{s(x_i)}{s(x[i])} at a data point \eqn{x_i}{x[i]} represents the change in the maximised log (pseudo)likelihood that occurs when the point \eqn{x_i}{x[i]} is deleted. A relatively large value of \eqn{s(x_i)}{s(x[i])} indicates a data point with a large influence on the fitted model. If the point process model trend has irregular parameters that were fitted (using \code{\link{ippm}}) then the influence calculation requires the first and second derivatives of the log trend with respect to the irregular parameters. The argument \code{iScore} should be a list, with one entry for each irregular parameter, of \R functions that compute the partial derivatives of the log trend (i.e. log intensity or log conditional intensity) with respect to each irregular parameter. The argument \code{iHessian} should be a list, with \eqn{p^2} entries where \eqn{p} is the number of irregular parameters, of \R functions that compute the second order partial derivatives of the log trend with respect to each pair of irregular parameters. The result of \code{influence.ppm} is an object of class \code{"influence.ppm"}. It can be plotted (by \code{\link{plot.influence.ppm}}), or converted to a marked point pattern by \code{as.ppp} (see \code{\link{as.ppp.influence.ppm}}). } \value{ An object of class \code{"influence.ppm"} that can be plotted (by \code{\link{plot.influence.ppm}}). There are also methods for \code{print}, \code{[}, \code{as.ppp} and \code{as.owin}. } \references{ Baddeley, A. and Chang, Y.M. and Song, Y. (2013) Leverage and influence diagnostics for spatial point process models. \emph{Scandinavian Journal of Statistics} \bold{40}, 86--104. } \author{ \spatstatAuthors. } \seealso{ \code{\link{leverage.ppm}}, \code{\link{dfbetas.ppm}}, \code{\link{ppmInfluence}}, \code{\link{plot.influence.ppm}} } \examples{ X <- rpoispp(function(x,y) { exp(3+3*x) }) fit <- ppm(X ~x+y) plot(influence(fit)) } \keyword{spatial} \keyword{models} spatstat/man/dg.test.Rd0000644000176200001440000001250413160710571014532 0ustar liggesusers\name{dg.test} \alias{dg.test} \title{ Dao-Genton Adjusted Goodness-Of-Fit Test } \description{ Performs the Dao and Genton (2014) adjusted goodness-of-fit test of spatial pattern. } \usage{ dg.test(X, \dots, exponent = 2, nsim=19, nsimsub=nsim-1, alternative=c("two.sided", "less", "greater"), reuse = TRUE, leaveout=1, interpolate = FALSE, savefuns=FALSE, savepatterns=FALSE, verbose = TRUE) } \arguments{ \item{X}{ Either a point pattern dataset (object of class \code{"ppp"}, \code{"lpp"} or \code{"pp3"}) or a fitted point process model (object of class \code{"ppm"}, \code{"kppm"}, \code{"lppm"} or \code{"slrm"}). } \item{\dots}{ Arguments passed to \code{\link{dclf.test}} or \code{\link{mad.test}} or \code{\link{envelope}} to control the conduct of the test. Useful arguments include \code{fun} to determine the summary function, \code{rinterval} to determine the range of \eqn{r} values used in the test, and \code{use.theory} described under Details. } \item{exponent}{ Exponent used in the test statistic. Use \code{exponent=2} for the Diggle-Cressie-Loosmore-Ford test, and \code{exponent=Inf} for the Maximum Absolute Deviation test. } \item{nsim}{ Number of repetitions of the basic test. } \item{nsimsub}{ Number of simulations in each basic test. There will be \code{nsim} repetitions of the basic test, each involving \code{nsimsub} simulated realisations, so there will be a total of \code{nsim * (nsimsub + 1)} simulations. } \item{alternative}{ Character string specifying the alternative hypothesis. The default (\code{alternative="two.sided"}) is that the true value of the summary function is not equal to the theoretical value postulated under the null hypothesis. If \code{alternative="less"} the alternative hypothesis is that the true value of the summary function is lower than the theoretical value. } \item{reuse}{ Logical value indicating whether to re-use the first stage simulations at the second stage, as described by Dao and Genton (2014). } \item{leaveout}{ Optional integer 0, 1 or 2 indicating how to calculate the deviation between the observed summary function and the nominal reference value, when the reference value must be estimated by simulation. See Details. } \item{interpolate}{ Logical value indicating whether to interpolate the distribution of the test statistic by kernel smoothing, as described in Dao and Genton (2014, Section 5). } \item{savefuns}{ Logical flag indicating whether to save the simulated function values (from the first stage). } \item{savepatterns}{ Logical flag indicating whether to save the simulated point patterns (from the first stage). } \item{verbose}{ Logical value indicating whether to print progress reports. } } \details{ Performs the Dao-Genton (2014) adjusted Monte Carlo goodness-of-fit test, in the equivalent form described by Baddeley et al (2014). If \code{X} is a point pattern, the null hypothesis is CSR. If \code{X} is a fitted model, the null hypothesis is that model. The argument \code{use.theory} passed to \code{\link{envelope}} determines whether to compare the summary function for the data to its theoretical value for CSR (\code{use.theory=TRUE}) or to the sample mean of simulations from CSR (\code{use.theory=FALSE}). The argument \code{leaveout} specifies how to calculate the discrepancy between the summary function for the data and the nominal reference value, when the reference value must be estimated by simulation. The values \code{leaveout=0} and \code{leaveout=1} are both algebraically equivalent (Baddeley et al, 2014, Appendix) to computing the difference \code{observed - reference} where the \code{reference} is the mean of simulated values. The value \code{leaveout=2} gives the leave-two-out discrepancy proposed by Dao and Genton (2014). The Dao-Genton test is biased when the significance level is very small (small \eqn{p}-values are not reliable) and we recommend \code{\link{bits.test}} in this case. } \value{ A hypothesis test (object of class \code{"htest"} which can be printed to show the outcome of the test. } \references{ Dao, N.A. and Genton, M. (2014) A Monte Carlo adjusted goodness-of-fit test for parametric models describing spatial point patterns. \emph{Journal of Graphical and Computational Statistics} \bold{23}, 497--517. Baddeley, A., Diggle, P.J., Hardegen, A., Lawrence, T., Milne, R.K. and Nair, G. (2014) On tests of spatial pattern based on simulation envelopes. \emph{Ecological Monographs} \bold{84} (3) 477--489. Baddeley, A., Hardegen, A., Lawrence, L., Milne, R.K., Nair, G.M. and Rakshit, S. (2017) On two-stage Monte Carlo tests of composite hypotheses. \emph{Computational Statistics and Data Analysis}, in press. } \author{ Adrian Baddeley, Andrew Hardegen, Tom Lawrence, Robin Milne, Gopalan Nair and Suman Rakshit. Implemented by \spatstatAuthors. } \seealso{ \code{\link{bits.test}}, \code{\link{dclf.test}}, \code{\link{mad.test}} } \examples{ ns <- if(interactive()) 19 else 4 dg.test(cells, nsim=ns) dg.test(cells, alternative="less", nsim=ns) dg.test(cells, nsim=ns, interpolate=TRUE) } \keyword{spatial} \keyword{htest} spatstat/man/nnorient.Rd0000644000176200001440000000675513160710621015025 0ustar liggesusers\name{nnorient} \alias{nnorient} \title{ Nearest Neighbour Orientation Distribution } \description{ Computes the distribution of the orientation of the vectors from each point to its nearest neighbour. } \usage{ nnorient(X, \dots, cumulative = FALSE, correction, k = 1, unit = c("degree", "radian"), domain = NULL, ratio = FALSE) } \arguments{ \item{X}{ Point pattern (object of class \code{"ppp"}). } \item{\dots}{ Arguments passed to \code{\link{circdensity}} to control the kernel smoothing, if \code{cumulative=FALSE}. } \item{cumulative}{ Logical value specifying whether to estimate the probability density (\code{cumulative=FALSE}, the default) or the cumulative distribution function (\code{cumulative=TRUE}). } \item{correction}{ Character vector specifying edge correction or corrections. Options are \code{"none"}, \code{"bord.modif"}, \code{"good"} and \code{"best"}. Alternatively \code{correction="all"} selects all options. } \item{k}{ Integer. The \eqn{k}th nearest neighbour will be used. } \item{ratio}{ Logical. If \code{TRUE}, the numerator and denominator of each edge-corrected estimate will also be saved, for use in analysing replicated point patterns. } \item{unit}{ Unit in which the angles should be expressed. Either \code{"degree"} or \code{"radian"}. } \item{domain}{ Optional window. The first point \eqn{x_i}{x[i]} of each pair of points will be constrained to lie in \code{domain}. } } \details{ This algorithm considers each point in the pattern \code{X} and finds its nearest neighbour (or \eqn{k}th nearest neighour). The \emph{direction} of the arrow joining the data point to its neighbour is measured, as an angle in degrees or radians, anticlockwise from the \eqn{x} axis. If \code{cumulative=FALSE} (the default), a kernel estimate of the probability density of the angles is calculated using \code{\link{circdensity}}. This is the function \eqn{\vartheta(\phi)}{theta(phi)} defined in Illian et al (2008), equation (4.5.3), page 253. If \code{cumulative=TRUE}, then the cumulative distribution function of these angles is calculated. In either case the result can be plotted as a rose diagram by \code{\link{rose}}, or as a function plot by \code{\link{plot.fv}}. The algorithm gives each observed direction a weight, determined by an edge correction, to adjust for the fact that some interpoint distances are more likely to be observed than others. The choice of edge correction or corrections is determined by the argument \code{correction}. It is also possible to calculate an estimate of the probability density from the cumulative distribution function, by numerical differentiation. Use \code{\link{deriv.fv}} with the argument \code{Dperiodic=TRUE}. } \value{ A function value table (object of class \code{"fv"}) containing the estimates of the probability density or the cumulative distribution function of angles, in degrees (if \code{unit="degree"}) or radians (if \code{unit="radian"}). } \references{ Illian, J., Penttinen, A., Stoyan, H. and Stoyan, D. (2008) \emph{Statistical Analysis and Modelling of Spatial Point Patterns.} Wiley. } \seealso{ \code{\link{pairorient}} } \examples{ rose(nnorient(redwood, adjust=0.6), col="grey") plot(CDF <- nnorient(redwood, cumulative=TRUE)) } \author{\adrian \rolf and \ege } \keyword{spatial} \keyword{nonparametric} spatstat/man/rmh.Rd0000644000176200001440000000565313160710621013753 0ustar liggesusers\name{rmh} \alias{rmh} \title{Simulate point patterns using the Metropolis-Hastings algorithm.} \description{ Generic function for running the Metropolis-Hastings algorithm to produce simulated realisations of a point process model. } \usage{rmh(model, \dots)} \arguments{ \item{model}{The point process model to be simulated. } \item{\dots}{Further arguments controlling the simulation. } } \details{ The Metropolis-Hastings algorithm can be used to generate simulated realisations from a wide range of spatial point processes. For caveats, see below. The function \code{rmh} is generic; it has methods \code{\link{rmh.ppm}} (for objects of class \code{"ppm"}) and \code{\link{rmh.default}} (the default). The actual implementation of the Metropolis-Hastings algorithm is contained in \code{\link{rmh.default}}. For details of its use, see \code{\link{rmh.ppm}} or \code{\link{rmh.default}}. [If the model is a Poisson process, then Metropolis-Hastings is not used; the Poisson model is generated directly using \code{\link{rpoispp}} or \code{\link{rmpoispp}}.] In brief, the Metropolis-Hastings algorithm is a Markov Chain, whose states are spatial point patterns, and whose limiting distribution is the desired point process. After running the algorithm for a very large number of iterations, we may regard the state of the algorithm as a realisation from the desired point process. However, there are difficulties in deciding whether the algorithm has run for ``long enough''. The convergence of the algorithm may indeed be extremely slow. No guarantees of convergence are given! While it is fashionable to decry the Metropolis-Hastings algorithm for its poor convergence and other properties, it has the advantage of being easy to implement for a wide range of models. } \section{Warning}{ As of version 1.22-1 of \code{spatstat} a subtle change was made to \code{rmh.default()}. We had noticed that the results produced were sometimes not ``scalable'' in that two models, differing in effect only by the units in which distances are measured and starting from the same seed, gave different results. This was traced to an idiosyncracy of floating point arithmetic. The code of \code{rmh.default()} has been changed so that the results produced by \code{rmh} are now scalable. The downside of this is that code which users previously ran may now give results which are different from what they formerly were. In order to recover former behaviour (so that previous results can be reproduced) set \code{spatstat.options(scalable=FALSE)}. See the last example in the help for \code{\link{rmh.default}}. } \value{ A point pattern, in the form of an object of class \code{"ppp"}. See \code{\link{rmh.default}} for details. } \seealso{ \code{\link{rmh.default}} } \examples{ # See examples in rmh.default and rmh.ppm } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat/man/Smooth.msr.Rd0000644000176200001440000000411213160710621015223 0ustar liggesusers\name{Smooth.msr} \alias{Smooth.msr} \title{ Smooth a Signed or Vector-Valued Measure } \description{ Apply kernel smoothing to a signed measure or vector-valued measure. } \usage{ \method{Smooth}{msr}(X, ..., drop=TRUE) } \arguments{ \item{X}{ Object of class \code{"msr"} representing a signed measure or vector-valued measure. } \item{\dots}{ Arguments passed to \code{\link{density.ppp}} controlling the smoothing bandwidth and the pixel resolution. } \item{drop}{ Logical. If \code{TRUE} (the default), the result of smoothing a scalar-valued measure is a pixel image. If \code{FALSE}, the result of smoothing a scalar-valued measure is a list containing one pixel image. } } \details{ This function applies kernel smoothing to a signed measure or vector-valued measure \code{X}. The Gaussian kernel is used. The object \code{X} would typically have been created by \code{\link{residuals.ppm}} or \code{\link{msr}}. } \value{ A pixel image or a list of pixel images. For scalar-valued measures, a pixel image (object of class \code{"im"}) provided \code{drop=TRUE}. For vector-valued measures (or if \code{drop=FALSE}), a list of pixel images; the list also belongs to the class \code{"solist"} so that it can be printed and plotted. } \references{ Baddeley, A., Turner, R., \ifelse{latex}{\out{M\o ller}}{Moller}, J. and Hazelton, M. (2005) Residual analysis for spatial point processes. \emph{Journal of the Royal Statistical Society, Series B} \bold{67}, 617--666. Baddeley, A., \ifelse{latex}{\out{M\o ller}}{Moller}, J. and Pakes, A.G. (2008) Properties of residuals for spatial point processes. \emph{Annals of the Institute of Statistical Mathematics} \bold{60}, 627--649. } \author{ \adrian } \seealso{ \code{\link{Smooth}}, \code{\link{msr}}, \code{\link{plot.msr}} } \examples{ X <- rpoispp(function(x,y) { exp(3+3*x) }) fit <- ppm(X, ~x+y) rp <- residuals(fit, type="pearson") rs <- residuals(fit, type="score") plot(Smooth(rp)) plot(Smooth(rs)) } \keyword{spatial} \keyword{models} spatstat/man/quadscheme.Rd0000644000176200001440000001305513160710621015277 0ustar liggesusers\name{quadscheme} \alias{quadscheme} \title{Generate a Quadrature Scheme from a Point Pattern} \description{ Generates a quadrature scheme (an object of class \code{"quad"}) from point patterns of data and dummy points. } \usage{ quadscheme(data, dummy, method="grid", \dots) } \arguments{ \item{data}{ The observed data point pattern. An object of class \code{"ppp"} or in a format recognised by \code{\link{as.ppp}()} } \item{dummy}{ The pattern of dummy points for the quadrature. An object of class \code{"ppp"} or in a format recognised by \code{\link{as.ppp}()} Defaults to \code{default.dummy(data, ...)} } \item{method}{ The name of the method for calculating quadrature weights: either \code{"grid"} or \code{"dirichlet"}. } \item{\dots}{ Parameters of the weighting method (see below) and parameters for constructing the dummy points if necessary. } } \value{ An object of class \code{"quad"} describing the quadrature scheme (data points, dummy points, and quadrature weights) suitable as the argument \code{Q} of the function \code{\link{ppm}()} for fitting a point process model. The quadrature scheme can be inspected using the \code{print} and \code{plot} methods for objects of class \code{"quad"}. } \details{ This is the primary method for producing a quadrature schemes for use by \code{\link{ppm}}. The function \code{\link{ppm}} fits a point process model to an observed point pattern using the Berman-Turner quadrature approximation (Berman and Turner, 1992; Baddeley and Turner, 2000) to the pseudolikelihood of the model. It requires a quadrature scheme consisting of the original data point pattern, an additional pattern of dummy points, and a vector of quadrature weights for all these points. Such quadrature schemes are represented by objects of class \code{"quad"}. See \code{\link{quad.object}} for a description of this class. Quadrature schemes are created by the function \code{quadscheme}. The arguments \code{data} and \code{dummy} specify the data and dummy points, respectively. There is a sensible default for the dummy points (provided by \code{\link{default.dummy}}). Alternatively the dummy points may be specified arbitrarily and given in any format recognised by \code{\link{as.ppp}}. There are also functions for creating dummy patterns including \code{\link{corners}}, \code{\link{gridcentres}}, \code{\link{stratrand}} and \code{\link{spokes}}. The quadrature region is the region over which we are integrating, and approximating integrals by finite sums. If \code{dummy} is a point pattern object (class \code{"ppp"}) then the quadrature region is taken to be \code{Window(dummy)}. If \code{dummy} is just a list of \eqn{x, y} coordinates then the quadrature region defaults to the observation window of the data pattern, \code{Window(data)}. If \code{dummy} is missing, then a pattern of dummy points will be generated using \code{\link{default.dummy}}, taking account of the optional arguments \code{...}. By default, the dummy points are arranged in a rectangular grid; recognised arguments include \code{nd} (the number of grid points in the horizontal and vertical directions) and \code{eps} (the spacing between dummy points). If \code{random=TRUE}, a systematic random pattern of dummy points is generated instead. See \code{\link{default.dummy}} for details. If \code{method = "grid"} then the optional arguments (for \code{\dots}) are \code{(nd, ntile, eps)}. The quadrature region (defined above) is divided into an \code{ntile[1]} by \code{ntile[2]} grid of rectangular tiles. The weight for each quadrature point is the area of a tile divided by the number of quadrature points in that tile. If \code{method="dirichlet"} then the optional arguments are \code{(exact=TRUE, nd, eps)}. The quadrature points (both data and dummy) are used to construct the Dirichlet tessellation. The quadrature weight of each point is the area of its Dirichlet tile inside the quadrature region. If \code{exact == TRUE} then this area is computed exactly using the package \code{deldir}; otherwise it is computed approximately by discretisation. } \references{ Baddeley, A. and Turner, R. Practical maximum pseudolikelihood for spatial point patterns. \emph{Australian and New Zealand Journal of Statistics} \bold{42} (2000) 283--322. Berman, M. and Turner, T.R. Approximating point process likelihoods with GLIM. \emph{Applied Statistics} \bold{41} (1992) 31--38. } \seealso{ \code{\link{ppm}}, \code{\link{as.ppp}}, \code{\link{quad.object}}, \code{\link{gridweights}}, \code{\link{dirichletWeights}}, \code{\link{corners}}, \code{\link{gridcentres}}, \code{\link{stratrand}}, \code{\link{spokes}} } \examples{ data(simdat) # grid weights Q <- quadscheme(simdat) Q <- quadscheme(simdat, method="grid") Q <- quadscheme(simdat, eps=0.5) # dummy point spacing 0.5 units Q <- quadscheme(simdat, nd=50) # 1 dummy point per tile Q <- quadscheme(simdat, ntile=25, nd=50) # 4 dummy points per tile # Dirichlet weights Q <- quadscheme(simdat, method="dirichlet", exact=FALSE) # random dummy pattern \dontrun{ D <- runifpoint(250, Window(simdat)) Q <- quadscheme(simdat, D, method="dirichlet", exact=FALSE) } # polygonal window data(demopat) X <- unmark(demopat) Q <- quadscheme(X) # mask window Window(X) <- as.mask(Window(X)) Q <- quadscheme(X) } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat/man/hextess.Rd0000644000176200001440000000461213160710621014642 0ustar liggesusers\name{hextess} \alias{hexgrid} \alias{hextess} \title{ Hexagonal Grid or Tessellation } \description{ Construct a hexagonal grid of points, or a hexagonal tessellation. } \usage{ hexgrid(W, s, offset = c(0, 0), origin=NULL, trim = TRUE) hextess(W, s, offset = c(0, 0), origin=NULL, trim = TRUE) } \arguments{ \item{W}{ Window in which to construct the hexagonal grid or tessellation. An object of class \code{"owin"}. } \item{s}{ Side length of hexagons. A positive number. } \item{offset}{ Numeric vector of length 2 specifying a shift of the hexagonal grid. See Details. } \item{origin}{ Numeric vector of length 2 specifying the initial origin of the hexagonal grid, before the offset is applied. See Details. } \item{trim}{ Logical value indicating whether to restrict the result to the window \code{W}. See Details. } } \details{ \code{hexgrid} constructs a hexagonal grid of points on the window \code{W}. If \code{trim=TRUE} (the default), the grid is intersected with \code{W} so that all points lie inside \code{W}. If \code{trim=FALSE}, then we retain all grid points which are the centres of hexagons that intersect \code{W}. \code{hextess} constructs a tessellation of hexagons on the window \code{W}. If \code{trim=TRUE} (the default), the tessellation is restricted to the interior of \code{W}, so that there will be some fragmentary hexagons near the boundary of \code{W}. If \code{trim=FALSE}, the tessellation consists of all hexagons which intersect \code{W}. The points of \code{hexgrid(...)} are the centres of the tiles of \code{hextess(...)} in the same order. In the initial position of the grid or tessellation, one of the grid points (tile centres) is placed at the \code{origin}, which defaults to the midpoint of the bounding rectangle of \code{W}. The grid can be shifted relative to this origin by specifing the \code{offset}. } \value{ The value of \code{hexgrid} is a point pattern (object of class \code{"ppp"}). The value of \code{hextess} is a tessellation (object of class \code{"tess"}). } \seealso{ \code{\link{tess}} \code{\link{hexagon}} } \examples{ if(interactive()) { W <- Window(chorley) s <- 0.7 } else { W <- letterR s <- 0.3 } plot(hextess(W, s)) plot(hexgrid(W, s), add=TRUE) } \author{ \spatstatAuthors } \keyword{spatial} \keyword{datagen} spatstat/man/markstat.Rd0000644000176200001440000000672613160710621015015 0ustar liggesusers\name{markstat} \alias{markstat} \title{Summarise Marks in Every Neighbourhood in a Point Pattern} \description{ Visit each point in a point pattern, find the neighbouring points, and summarise their marks } \usage{ markstat(X, fun, N=NULL, R=NULL, \dots) } \arguments{ \item{X}{ A marked point pattern. An object of class \code{"ppp"}. } \item{fun}{ Function to be applied to the vector of marks. } \item{N}{ Integer. If this argument is present, the neighbourhood of a point of \code{X} is defined to consist of the \code{N} points of \code{X} which are closest to it. } \item{R}{ Nonnegative numeric value. If this argument is present, the neighbourhood of a point of \code{X} is defined to consist of all points of \code{X} which lie within a distance \code{R} of it. } \item{\dots}{ extra arguments passed to the function \code{fun}. They must be given in the form \code{name=value}. } } \value{ Similar to the result of \code{\link{apply}}. if each call to \code{fun} returns a single numeric value, the result is a vector of dimension \code{npoints(X)}, the number of points in \code{X}. If each call to \code{fun} returns a vector of the same length \code{m}, then the result is a matrix of dimensions \code{c(m,n)}; note the transposition of the indices, as usual for the family of \code{apply} functions. If the calls to \code{fun} return vectors of different lengths, the result is a list of length \code{npoints(X)}. } \details{ This algorithm visits each point in the point pattern \code{X}, determines which points of \code{X} are ``neighbours'' of the current point, extracts the marks of these neighbouring points, applies the function \code{fun} to the marks, and collects the value or values returned by \code{fun}. The definition of ``neighbours'' depends on the arguments \code{N} and \code{R}, exactly one of which must be given. If \code{N} is given, then the neighbours of the current point are the \code{N} points of \code{X} which are closest to the current point (including the current point itself). If \code{R} is given, then the neighbourhood of the current point consists of all points of \code{X} which lie closer than a distance \code{R} from the current point. Each point of \code{X} is visited; the neighbourhood of the current point is determined; the marks of these points are extracted as a vector \code{v}; then the function \code{fun} is called as: \code{fun(v, \dots)} where \code{\dots} are the arguments passed from the call to \code{markstat}. The results of each call to \code{fun} are collected and returned according to the usual rules for \code{\link{apply}} and its relatives. See the section on \bold{Value}. This function is just a convenient wrapper for a common use of the function \code{\link{applynbd}}. For more complex tasks, use \code{\link{applynbd}}. To simply tabulate the marks in every \code{R}-neighbourhood, use \code{\link{marktable}}. } \seealso{ \code{\link{applynbd}}, \code{\link{marktable}}, \code{\link{ppp.object}}, \code{\link{apply}} } \examples{ trees <- longleaf \testonly{ trees <- trees[seq(1, npoints(trees), by=6)] } # average diameter of 5 closest neighbours of each tree md <- markstat(trees, mean, N=5) # range of diameters of trees within 10 metre radius rd <- markstat(trees, range, R=10) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{programming} spatstat/man/clusterfield.Rd0000644000176200001440000000674013160710571015654 0ustar liggesusers\name{clusterfield} \alias{clusterfield} \alias{clusterfield.character} \alias{clusterfield.function} \alias{clusterfield.kppm} \title{Field of clusters} \description{ Calculate the superposition of cluster kernels at the location of a point pattern. } \usage{ clusterfield(model, locations = NULL, \dots) \method{clusterfield}{character}(model, locations = NULL, \dots) \method{clusterfield}{function}(model, locations = NULL, \dots, mu = NULL) \method{clusterfield}{kppm}(model, locations = NULL, \dots) } \arguments{ \item{model}{ Cluster model. Either a fitted cluster model (object of class \code{"kppm"}), a character string specifying the type of cluster model, or a function defining the cluster kernel. See Details. } \item{locations}{ A point pattern giving the locations of the kernels. Defaults to the centroid of the observation window for the \code{"kppm"} method and to the center of a unit square otherwise. } \item{\dots}{ Additional arguments passed to \code{\link{density.ppp}} or the cluster kernel. See Details. } \item{mu}{ Mean number of offspring per cluster. A single number or a pixel image. } } \details{ The actual calculations are preformed by \code{\link{density.ppp}} and \code{\dots} arguments are passed thereto for control over the pixel resolution etc. (These arguments are then passed on to \code{\link{pixellate.ppp}} and \code{\link{as.mask}}.) For the function method the given kernel function should accept vectors of x and y coordinates as its first two arguments. Any additional arguments may be passed through the \code{\dots}. The function method also accepts the optional parameter \code{mu} (defaulting to 1) specifying the mean number of points per cluster (as a numeric) or the inhomogeneous reference cluster intensity (as an \code{"im"} object or a \code{function(x,y)}). The interpretation of \code{mu} is as explained in the simulation functions referenced in the See Also section below. For the character method \code{model} must be one of: \code{model="Thomas"} for the Thomas process, \code{model="MatClust"} for the Matern cluster process, \code{model="Cauchy"} for the Neyman-Scott cluster process with Cauchy kernel, or \code{model="VarGamma"} for the Neyman-Scott cluster process with Variance Gamma kernel. For all these models the parameter \code{scale} is required and passed through \code{\dots} as well as the parameter \code{nu} when \code{model="VarGamma"}. This method calls \code{clusterfield.function} so the parameter \code{mu} may also be passed through \code{\dots} and will be interpreted as explained above. The kppm method extracts the relevant information from the fitted model (including \code{mu}) and calls \code{clusterfield.function}. } \value{ A pixel image (object of class \code{"im"}). } \seealso{ \code{\link{density.ppp}} and \code{\link{kppm}} Simulation algorithms for cluster models: \code{\link{rCauchy}} \code{\link{rMatClust}} \code{\link{rThomas}} \code{\link{rVarGamma}} } \examples{ # method for fitted model fit <- kppm(redwood~1, "Thomas") clusterfield(fit, eps = 0.01) # method for functions kernel <- function(x,y,scal) { r <- sqrt(x^2 + y^2) ifelse(r > 0, dgamma(r, shape=5, scale=scal)/(2 * pi * r), 0) } X <- runifpoint(10) clusterfield(kernel, X, scal=0.05) } \author{\adrian , \rolf and \ege . } \keyword{spatial} spatstat/man/methods.rho2hat.Rd0000644000176200001440000000412413160710621016166 0ustar liggesusers\name{methods.rho2hat} \alias{methods.rho2hat} %DoNotExport \alias{predict.rho2hat} \alias{print.rho2hat} \alias{plot.rho2hat} \title{ Methods for Intensity Functions of Two Spatial Covariates } \description{ These are methods for the class \code{"rho2hat"}. } \usage{ \method{plot}{rho2hat}(x, \dots, do.points=FALSE) \method{print}{rho2hat}(x, \dots) \method{predict}{rho2hat}(object, \dots, relative=FALSE) } \arguments{ \item{x,object}{ An object of class \code{"rho2hat"}. } \item{\dots}{ Arguments passed to other methods. } \item{do.points}{ Logical value indicating whether to plot the observed values of the covariates at the data points. } \item{relative}{ Logical value indicating whether to compute the estimated point process intensity (\code{relative=FALSE}) or the relative risk (\code{relative=TRUE}) in the case of a relative risk estimate. } } \details{ These functions are methods for the generic commands \code{\link{print}}, \code{\link{predict}} and \code{\link{plot}} for the class \code{"rho2hat"}. An object of class \code{"rho2hat"} is an estimate of the intensity of a point process, as a function of two given spatial covariates. See \code{\link{rho2hat}}. The method \code{plot.rho2hat} displays the estimated function \eqn{\rho}{rho} using \code{\link{plot.fv}}, and optionally adds a \code{\link{rug}} plot of the observed values of the covariate. In this plot the two axes represent possible values of the two covariates. The method \code{predict.rho2hat} computes a pixel image of the intensity \eqn{\rho(Z_1(u), Z_2(u))}{rho(Z1(u), Z2(u))} at each spatial location \eqn{u}, where \eqn{Z_1(u)}{Z1(u)} and \eqn{Z_2(u)}{Z2(u)} are the two spatial covariates. } \value{ For \code{predict.rho2hat} the value is a pixel image (object of class \code{"im"}). For other functions, the value is \code{NULL}. } \author{ \adrian } \seealso{ \code{\link{rho2hat}} } \examples{ r2 <- with(bei.extra, rho2hat(bei, elev, grad)) r2 plot(r2) plot(predict(r2)) } \keyword{spatial} \keyword{methods} spatstat/man/vargamma.estK.Rd0000644000176200001440000001470113160710621015657 0ustar liggesusers\name{vargamma.estK} \alias{vargamma.estK} \title{Fit the Neyman-Scott Cluster Point Process with Variance Gamma kernel} \description{ Fits the Neyman-Scott cluster point process, with Variance Gamma kernel, to a point pattern dataset by the Method of Minimum Contrast. } \usage{ vargamma.estK(X, startpar=c(kappa=1,scale=1), nu = -1/4, lambda=NULL, q = 1/4, p = 2, rmin = NULL, rmax = NULL, ...) } \arguments{ \item{X}{ Data to which the model will be fitted. Either a point pattern or a summary statistic. See Details. } \item{startpar}{ Vector of starting values for the parameters of the model. } \item{nu}{ Numerical value controlling the shape of the tail of the clusters. A number greater than \code{-1/2}. } \item{lambda}{ Optional. An estimate of the intensity of the point process. } \item{q,p}{ Optional. Exponents for the contrast criterion. } \item{rmin, rmax}{ Optional. The interval of \eqn{r} values for the contrast criterion. } \item{\dots}{ Optional arguments passed to \code{\link[stats]{optim}} to control the optimisation algorithm. See Details. } } \details{ This algorithm fits the Neyman-Scott Cluster point process model with Variance Gamma kernel (Jalilian et al, 2013) to a point pattern dataset by the Method of Minimum Contrast, using the \eqn{K} function. The argument \code{X} can be either \describe{ \item{a point pattern:}{An object of class \code{"ppp"} representing a point pattern dataset. The \eqn{K} function of the point pattern will be computed using \code{\link{Kest}}, and the method of minimum contrast will be applied to this. } \item{a summary statistic:}{An object of class \code{"fv"} containing the values of a summary statistic, computed for a point pattern dataset. The summary statistic should be the \eqn{K} function, and this object should have been obtained by a call to \code{\link{Kest}} or one of its relatives. } } The algorithm fits the Neyman-Scott Cluster point process with Variance Gamma kernel to \code{X}, by finding the parameters of the model which give the closest match between the theoretical \eqn{K} function of the model and the observed \eqn{K} function. For a more detailed explanation of the Method of Minimum Contrast, see \code{\link{mincontrast}}. The Neyman-Scott cluster point process with Variance Gamma kernel is described in Jalilian et al (2013). It is a cluster process formed by taking a pattern of parent points, generated according to a Poisson process with intensity \eqn{\kappa}{kappa}, and around each parent point, generating a random number of offspring points, such that the number of offspring of each parent is a Poisson random variable with mean \eqn{\mu}{mu}, and the locations of the offspring points of one parent have a common distribution described in Jalilian et al (2013). The shape of the kernel is determined by the dimensionless index \code{nu}. This is the parameter \eqn{\nu^\prime = \alpha/2-1}{nu' = alpha/2 - 1} appearing in equation (12) on page 126 of Jalilian et al (2013). In previous versions of spatstat instead of specifying \code{nu} (called \code{nu.ker} at that time) the user could specify \code{nu.pcf} which is the parameter \eqn{\nu=\alpha-1}{nu = alpha-1} appearing in equation (13), page 127 of Jalilian et al (2013). These are related by \code{nu.pcf = 2 * nu.ker + 1} and \code{nu.ker = (nu.pcf - 1)/2}. This syntax is still supported but not recommended for consistency across the package. In that case exactly one of \code{nu.ker} or \code{nu.pcf} must be specified. If the argument \code{lambda} is provided, then this is used as the value of the point process intensity \eqn{\lambda}{lambda}. Otherwise, if \code{X} is a point pattern, then \eqn{\lambda}{lambda} will be estimated from \code{X}. If \code{X} is a summary statistic and \code{lambda} is missing, then the intensity \eqn{\lambda}{lambda} cannot be estimated, and the parameter \eqn{\mu}{mu} will be returned as \code{NA}. The remaining arguments \code{rmin,rmax,q,p} control the method of minimum contrast; see \code{\link{mincontrast}}. The corresponding model can be simulated using \code{\link{rVarGamma}}. The parameter \code{eta} appearing in \code{startpar} is equivalent to the scale parameter \code{omega} used in \code{\link{rVarGamma}}. Homogeneous or inhomogeneous Neyman-Scott/VarGamma models can also be fitted using the function \code{\link{kppm}} and the fitted models can be simulated using \code{\link{simulate.kppm}}. The optimisation algorithm can be controlled through the additional arguments \code{"..."} which are passed to the optimisation function \code{\link[stats]{optim}}. For example, to constrain the parameter values to a certain range, use the argument \code{method="L-BFGS-B"} to select an optimisation algorithm that respects box constraints, and use the arguments \code{lower} and \code{upper} to specify (vectors of) minimum and maximum values for each parameter. } \value{ An object of class \code{"minconfit"}. There are methods for printing and plotting this object. It contains the following main components: \item{par }{Vector of fitted parameter values.} \item{fit }{Function value table (object of class \code{"fv"}) containing the observed values of the summary statistic (\code{observed}) and the theoretical values of the summary statistic computed from the fitted model parameters. } } \references{ Jalilian, A., Guan, Y. and Waagepetersen, R. (2013) Decomposition of variance for spatial Cox processes. \emph{Scandinavian Journal of Statistics} \bold{40}, 119-137. Waagepetersen, R. (2007) An estimating function approach to inference for inhomogeneous Neyman-Scott processes. \emph{Biometrics} \bold{63}, 252--258. } \author{Abdollah Jalilian and Rasmus Waagepetersen. Adapted for \pkg{spatstat} by \adrian } \seealso{ \code{\link{kppm}}, \code{\link{vargamma.estpcf}}, \code{\link{lgcp.estK}}, \code{\link{thomas.estK}}, \code{\link{cauchy.estK}}, \code{\link{mincontrast}}, \code{\link{Kest}}, \code{\link{Kmodel}}. \code{\link{rVarGamma}} to simulate the model. } \examples{ \testonly{ u <- vargamma.estK(redwood, startpar=c(kappa=15, eta=0.075)) } if(interactive()) { u <- vargamma.estK(redwood) u plot(u) } } \keyword{spatial} \keyword{models} spatstat/man/pool.anylist.Rd0000644000176200001440000000235213160710621015611 0ustar liggesusers\name{pool.anylist} \alias{pool.anylist} \title{ Pool Data from a List of Objects } \description{ Pool the data from the objects in a list. } \usage{ \method{pool}{anylist}(x, ...) } \arguments{ \item{x}{ A list, belonging to the class \code{"anylist"}, containing objects that can be pooled. } \item{\dots}{ Optional additional objects which can be pooled with the elements of \code{x}. } } \details{ The function \code{\link{pool}} is generic. Its purpose is to combine data from several objects of the same type (typically computed from different datasets) into a common, pooled estimate. The function \code{pool.anyist} is the method for the class \code{"anylist"}. It is used when the objects to be pooled are given in a list \code{x}. Each of the elements of the list \code{x}, and each of the subsequent arguments \code{\dots} if provided, must be an object of the same class. } \value{ An object of the same class as each of the entries in \code{x}. } \seealso{ \code{\link{anylist}}, \code{\link{pool}}. } \examples{ Keach <- anylapply(waterstriders, Kest, ratio=TRUE, correction="iso") K <- pool(Keach) } \author{ \adrian \rolf and \ege } \keyword{spatial} \keyword{nonparametric} spatstat/man/scaletointerval.Rd0000644000176200001440000000262413160710621016357 0ustar liggesusers\name{scaletointerval} \alias{scaletointerval} \alias{scaletointerval.default} \alias{scaletointerval.im} \title{Rescale Data to Lie Between Specified Limits} \description{ Rescales a dataset so that the values range exactly between the specified limits. } \usage{ scaletointerval(x, from=0, to=1, xrange=range(x)) \method{scaletointerval}{default}(x, from=0, to=1, xrange=range(x)) \method{scaletointerval}{im}(x, from=0, to=1, xrange=range(x)) } \arguments{ \item{x}{Data to be rescaled.} \item{from,to}{Lower and upper endpoints of the interval to which the values of \code{x} should be rescaled. } \item{xrange}{ Optional range of values of \code{x} that should be mapped to the new interval. } } \details{ These functions rescale a dataset \code{x} so that its values range exactly between the limits \code{from} and \code{to}. The method for pixel images (objects of class \code{"im"}) applies this scaling to the pixel values of \code{x}. Rescaling cannot be performed if the values in \code{x} are not interpretable as numeric, or if the values in \code{x} are all equal. } \value{ An object of the same type as \code{x}. } \seealso{ \code{\link{scale}} } \examples{ X <- as.im(function(x,y) {x+y+3}, unit.square()) summary(X) Y <- scaletointerval(X) summary(Y) } \author{\adrian and \rolf } \keyword{spatial} \keyword{methods} \keyword{univar} spatstat/man/intensity.ppp.Rd0000644000176200001440000000536013160710621016004 0ustar liggesusers\name{intensity.ppp} \alias{intensity.ppp} \alias{intensity.splitppp} \title{ Empirical Intensity of Point Pattern } \description{ Computes the average number of points per unit area in a point pattern dataset. } \usage{ \method{intensity}{ppp}(X, ..., weights=NULL) \method{intensity}{splitppp}(X, ..., weights=NULL) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"}). } \item{weights}{ Optional. Numeric vector of weights attached to the points of \code{X}. Alternatively, an \code{expression} which can be evaluated to give a vector of weights. } \item{\dots}{ Ignored. } } \details{ This is a method for the generic function \code{\link{intensity}}. It computes the empirical intensity of a point pattern (object of class \code{"ppp"}), i.e. the average density of points per unit area. If the point pattern is multitype, the intensities of the different types are computed separately. Note that the intensity will be computed as the number of points per square unit, based on the unit of length for \code{X}, given by \code{unitname(X)}. If the unit of length is a strange multiple of a standard unit, like \code{5.7 metres}, then it can be converted to the standard unit using \code{\link{rescale}}. See the Examples. If \code{weights} are given, then the intensity is computed as the total \emph{weight} per square unit. The argument \code{weights} should be a numeric vector of weights for each point of \code{X} (weights may be negative or zero). Alternatively \code{weights} can be an \code{expression} which will be evaluated for the dataset to yield a vector of weights. The expression may involve the Cartesian coordinates \eqn{x,y} of the points, and the marks of the points, if any. Variable names permitted in the expression include \code{x} and \code{y}, the name \code{marks} if \code{X} has a single column of marks, the names of any columns of marks if \code{X} has a data frame of marks, and the names of constants or functions that exist in the global environment. See the Examples. } \value{ A numeric value (giving the intensity) or numeric vector (giving the intensity for each possible type). } \seealso{ \code{\link{intensity}}, \code{\link{intensity.ppm}} } \examples{ japanesepines intensity(japanesepines) unitname(japanesepines) intensity(rescale(japanesepines)) intensity(amacrine) intensity(split(amacrine)) # numeric vector of weights volumes <- with(marks(finpines), (pi/4) * height * diameter^2) intensity(finpines, weights=volumes) # expression for weights intensity(finpines, weights=expression((pi/4) * height * diameter^2)) } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat/man/rgbim.Rd0000644000176200001440000000454013160710621014257 0ustar liggesusers\name{rgbim} \alias{rgbim} \alias{hsvim} \title{Create Colour-Valued Pixel Image} \description{ Creates an object of class \code{"im"} representing a two-dimensional pixel image whose pixel values are colours. } \usage{ rgbim(R, G, B, A, maxColorValue=255, autoscale=FALSE) hsvim(H, S, V, A, autoscale=FALSE) } \arguments{ \item{R,G,B}{ Pixel images (objects of class \code{"im"}) or constants giving the red, green, and blue components of a colour, respectively. } \item{A}{ Optional. Pixel image or constant value giving the alpha (transparency) component of a colour. } \item{maxColorValue}{ Maximum colour channel value for \code{R,G,B,A}. } \item{H,S,V}{ Pixel images (objects of class \code{"im"}) or constants giving the hue, saturation, and value components of a colour, respectively. } \item{autoscale}{ Logical. If \code{TRUE}, input values are automatically rescaled to fit the permitted range. RGB values are scaled to lie between 0 and \code{maxColorValue}. HSV values are scaled to lie between 0 and 1. } } \details{ These functions take three pixel images, with real or integer pixel values, and create a single pixel image whose pixel values are colours recognisable to \R. Some of the arguments may be constant numeric values, but at least one of the arguments must be a pixel image. The image arguments should be compatible (in array dimension and in spatial position). \code{rgbim} calls \code{\link{rgb}} to compute the colours, while \code{hsvim} calls \code{\link{hsv}}. See the help for the relevant function for more information about the meaning of the colour channels. } \seealso{ \code{\link{im.object}}, \code{\link{rgb}}, \code{\link{hsv}}. See \code{\link[spatstat:colourtools]{colourtools}} for additional colour tools. } \examples{ \testonly{ op <- spatstat.options(npixel=32) } # create three images with values in [0,1] X <- setcov(owin()) X <- eval.im(pmin(1,X)) M <- Window(X) Y <- as.im(function(x,y){(x+1)/2}, W=M) Z <- as.im(function(x,y){(y+1)/2}, W=M) RGB <- rgbim(X, Y, Z, maxColorValue=1) HSV <- hsvim(X, Y, Z) plot(RGB, valuesAreColours=TRUE) plot(HSV, valuesAreColours=TRUE) \testonly{ spatstat.options(op) } } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} \keyword{datagen} spatstat/man/nndist.psp.Rd0000644000176200001440000000540313160710621015256 0ustar liggesusers\name{nndist.psp} \alias{nndist.psp} \title{Nearest neighbour distances between line segments} \description{ Computes the distance from each line segment to its nearest neighbour in a line segment pattern. Alternatively finds the distance to the second nearest, third nearest etc. } \usage{ \method{nndist}{psp}(X, \dots, k=1, method="C") } \arguments{ \item{X}{ A line segment pattern (object of class \code{"psp"}). } \item{\dots}{ Ignored. } \item{k}{ Integer, or integer vector. The algorithm will compute the distance to the \code{k}th nearest neighbour. } \item{method}{ String specifying which method of calculation to use. Values are \code{"C"} and \code{"interpreted"}. Usually not specified. } } \value{ Numeric vector or matrix containing the nearest neighbour distances for each line segment. If \code{k = 1} (the default), the return value is a numeric vector \code{v} such that \code{v[i]} is the nearest neighbour distance for the \code{i}th segment. If \code{k} is a single integer, then the return value is a numeric vector \code{v} such that \code{v[i]} is the \code{k}th nearest neighbour distance for the \code{i}th segment. If \code{k} is a vector, then the return value is a matrix \code{m} such that \code{m[i,j]} is the \code{k[j]}th nearest neighbour distance for the \code{i}th segment. } \details{ This is a method for the generic function \code{\link{nndist}} for the class \code{"psp"}. If \code{k=1}, this function computes the distance from each line segment to the nearest other line segment in \code{X}. In general it computes the distance from each line segment to the \code{k}th nearest other line segment. The argument \code{k} can also be a vector, and this computation will be performed for each value of \code{k}. Distances are calculated using the Hausdorff metric. The Hausdorff distance between two line segments is the maximum distance from any point on one of the segments to the nearest point on the other segment. If there are fewer than \code{max(k)+1} line segments in the pattern, some of the nearest neighbour distances will be infinite (\code{Inf}). The argument \code{method} is not normally used. It is retained only for checking the validity of the software. If \code{method = "interpreted"} then the distances are computed using interpreted \R code only. If \code{method="C"} (the default) then compiled \code{C} code is used. The \code{C} code is somewhat faster. } \seealso{ \code{\link{nndist}}, \code{\link{nndist.ppp}} } \examples{ L <- psp(runif(10), runif(10), runif(10), runif(10), owin()) D <- nndist(L) D <- nndist(L, k=1:3) } \author{ \adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/edge.Ripley.Rd0000644000176200001440000000574613160710571015343 0ustar liggesusers\name{edge.Ripley} \alias{edge.Ripley} \alias{rmax.Ripley} \title{ Ripley's Isotropic Edge Correction } \description{ Computes Ripley's isotropic edge correction weights for a point pattern. } \usage{ edge.Ripley(X, r, W = Window(X), method = "C", maxweight = 100) rmax.Ripley(W) } \arguments{ \item{X}{ Point pattern (object of class \code{"ppp"}). } \item{W}{ Window for which the edge correction is required. } \item{r}{ Vector or matrix of interpoint distances for which the edge correction should be computed. } \item{method}{ Choice of algorithm. Either \code{"interpreted"} or \code{"C"}. This is needed only for debugging purposes. } \item{maxweight}{ Maximum permitted value of the edge correction weight. } } \details{ The function \code{edge.Ripley} computes Ripley's (1977) isotropic edge correction weight, which is used in estimating the \eqn{K} function and in many other contexts. The function \code{rmax.Ripley} computes the maximum value of distance \eqn{r} for which the isotropic edge correction estimate of \eqn{K(r)} is valid. For a single point \eqn{x} in a window \eqn{W}, and a distance \eqn{r > 0}, the isotropic edge correction weight is \deqn{ e(u, r) = \frac{2\pi r}{\mbox{length}(c(u,r) \cap W)} }{ e(u, r) = 2 * \pi * r/length(intersection(c(u,r), W)) } where \eqn{c(u,r)} is the circle of radius \eqn{r} centred at the point \eqn{u}. The denominator is the length of the overlap between this circle and the window \eqn{W}. The function \code{edge.Ripley} computes this edge correction weight for each point in the point pattern \code{X} and for each corresponding distance value in the vector or matrix \code{r}. If \code{r} is a vector, with one entry for each point in \code{X}, then the result is a vector containing the edge correction weights \code{e(X[i], r[i])} for each \code{i}. If \code{r} is a matrix, with one row for each point in \code{X}, then the result is a matrix whose \code{i,j} entry gives the edge correction weight \code{e(X[i], r[i,j])}. For example \code{edge.Ripley(X, pairdist(X))} computes all the edge corrections required for the \eqn{K}-function. If any value of the edge correction weight exceeds \code{maxwt}, it is set to \code{maxwt}. The function \code{rmax.Ripley} computes the smallest distance \eqn{r} such that it is possible to draw a circle of radius \eqn{r}, centred at a point of \code{W}, such that the circle does not intersect the interior of \code{W}. } \value{ A numeric vector or matrix. } \references{ Ripley, B.D. (1977) Modelling spatial patterns (with discussion). \emph{Journal of the Royal Statistical Society, Series B}, \bold{39}, 172 -- 212. } \seealso{ \code{\link{edge.Trans}}, \code{\link{rmax.Trans}}, \code{\link{Kest}} } \examples{ v <- edge.Ripley(cells, pairdist(cells)) rmax.Ripley(Window(cells)) } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat/man/dppapproxpcf.Rd0000644000176200001440000000156613160710571015676 0ustar liggesusers\name{dppapproxpcf} \alias{dppapproxpcf} \title{Approximate Pair Correlation Function of Determinantal Point Process Model} \description{ Returns an approximation to the theoretical pair correlation function of a determinantal point process model, as a function of one argument \eqn{x}. } \usage{dppapproxpcf(model, trunc = 0.99, W = NULL)} \arguments{ \item{model}{Object of class \code{"detpointprocfamily"}.} \item{trunc}{Numeric specifying how the model truncation is performed. See Details section of \code{\link{simulate.detpointprocfamily}}.} \item{W}{Optional window -- undocumented at the moment.} } \details{This function is usually NOT needed for anything. It only exists for investigative purposes.} \author{ \adrian \rolf and \ege } \examples{ f <- dppapproxpcf(dppMatern(lambda = 100, alpha=.028, nu=1, d=2)) plot(f, xlim = c(0,0.1)) } spatstat/man/quadrat.test.splitppp.Rd0000644000176200001440000000362113160710621017447 0ustar liggesusers\name{quadrat.test.splitppp} \alias{quadrat.test.splitppp} \title{Dispersion Test of CSR for Split Point Pattern Based on Quadrat Counts} \description{ Performs a test of Complete Spatial Randomness for each of the component patterns in a split point pattern, based on quadrat counts. By default performs chi-squared tests; can also perform Monte Carlo based tests. } \usage{ \method{quadrat.test}{splitppp}(X, ..., df=NULL, df.est=NULL, Xname=NULL) } \arguments{ \item{X}{ A split point pattern (object of class \code{"splitppp"}), each component of which will be subjected to the goodness-of-fit test. } \item{\dots}{Arguments passed to \code{\link{quadrat.test.ppp}}.} \item{df,df.est,Xname}{Arguments passed to \code{\link{pool.quadrattest}}.} } \details{ The function \code{quadrat.test} is generic, with methods for point patterns (class \code{"ppp"}), split point patterns (class \code{"splitppp"}) and point process models (class \code{"ppm"}). If \code{X} is a split point pattern, then for each of the component point patterns (taken separately) we test the null hypotheses of Complete Spatial Randomness, then combine the result into a single test. The method \code{quadrat.test.ppp} is applied to each component point pattern. Then the results are pooled using \code{\link{pool.quadrattest}} to obtain a single test. } \seealso{ \code{\link{quadrat.test}}, \code{\link{quadratcount}}, \code{\link{quadrats}}, \code{\link{quadratresample}}, \code{\link{chisq.test}}, \code{\link{cdf.test}}. To test a Poisson point process model against a specific Poisson alternative, use \code{\link{anova.ppm}}. } \value{ An object of class \code{"quadrattest"} which can be printed and plotted. } \examples{ data(humberside) qH <- quadrat.test(split(humberside), 2, 3) plot(qH) qH } \author{\adrian and \rolf } \keyword{spatial} \keyword{htest} spatstat/man/lengths.psp.Rd0000644000176200001440000000213113160710621015416 0ustar liggesusers\name{lengths.psp} \alias{lengths.psp} \title{Lengths of Line Segments} \description{ Computes the length of each line segment in a line segment pattern. } \usage{ lengths.psp(x, squared=FALSE) } \arguments{ \item{x}{ A line segment pattern (object of class \code{"psp"}). } \item{squared}{ Logical value indicating whether to return the squared lengths (\code{squared=TRUE}) or the lengths themselves (\code{squared=FALSE}, the default). } } \value{ Numeric vector. } \details{ The length of each line segment is computed and the lengths are returned as a numeric vector. Using squared lengths may be more efficient for some purposes, for example, to find the length of the shortest segment, \code{sqrt(min(lengths.psp(x, squared=TRUE)))} is faster than \code{min(lengths.psp(x))}. } \seealso{ \code{\link{summary.psp}}, \code{\link{midpoints.psp}}, \code{\link{angles.psp}} } \examples{ a <- psp(runif(10), runif(10), runif(10), runif(10), window=owin()) b <- lengths.psp(a) } \author{ \adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/ppm.Rd0000644000176200001440000003677413160710621013771 0ustar liggesusers\name{ppm} \alias{ppm} \alias{ppm.formula} \concept{point process model} \concept{Poisson point process} \concept{Gibbs point process} \title{ Fit Point Process Model to Data } \description{ Fits a point process model to an observed point pattern. } \usage{ ppm(Q, \dots) \method{ppm}{formula}(Q, interaction=NULL, \dots, data=NULL, subset) } \arguments{ \item{Q}{ A \code{formula} in the \R language describing the model to be fitted. } \item{interaction}{ An object of class \code{"interact"} describing the point process interaction structure, or a function that makes such an object, or \code{NULL} indicating that a Poisson process (stationary or nonstationary) should be fitted. } \item{\dots}{ Arguments passed to \code{\link{ppm.ppp}} or \code{\link{ppm.quad}} to control the model-fitting process. } \item{data}{ Optional. The values of spatial covariates (other than the Cartesian coordinates) required by the model. Either a data frame, or a list whose entries are images, functions, windows, tessellations or single numbers. See Details. } \item{subset}{ Optional. An expression (which may involve the names of the Cartesian coordinates \code{x} and \code{y} and the names of entries in \code{data}) defining a subset of the spatial domain, to which the model-fitting should be restricted. The result of evaluating the expression should be either a logical vector, or a window (object of class \code{"owin"}) or a logical-valued pixel image (object of class \code{"im"}). } } \value{ An object of class \code{"ppm"} describing a fitted point process model. See \code{\link{ppm.object}} for details of the format of this object and methods available for manipulating it. } \details{ This function fits a point process model to an observed point pattern. The model may include spatial trend, interpoint interaction, and dependence on covariates. The model fitted by \code{ppm} is either a Poisson point process (in which different points do not interact with each other) or a Gibbs point process (in which different points typically inhibit each other). For clustered point process models, use \code{\link{kppm}}. The function \code{ppm} is generic, with methods for the classes \code{formula}, \code{ppp} and \code{quad}. This page describes the method for a \code{formula}. The first argument is a \code{formula} in the \R language describing the spatial trend model to be fitted. It has the general form \code{pattern ~ trend} where the left hand side \code{pattern} is usually the name of a spatial point pattern (object of class \code{"ppp"}) to which the model should be fitted, or an expression which evaluates to a point pattern; and the right hand side \code{trend} is an expression specifying the spatial trend of the model. Systematic effects (spatial trend and/or dependence on spatial covariates) are specified by the \code{trend} expression on the right hand side of the formula. The trend may involve the Cartesian coordinates \code{x}, \code{y}, the marks \code{marks}, the names of entries in the argument \code{data} (if supplied), or the names of objects that exist in the \R session. The trend formula specifies the \bold{logarithm} of the intensity of a Poisson process, or in general, the logarithm of the first order potential of the Gibbs process. The formula should not use any names beginning with \code{.mpl} as these are reserved for internal use. If the formula is \code{pattern~1}, then the model to be fitted is stationary (or at least, its first order potential is constant). The symbol \code{.} in the trend expression stands for all the covariates supplied in the argument \code{data}. For example the formula \code{pattern ~ .} indicates an additive model with a main effect for each covariate in \code{data}. Stochastic interactions between random points of the point process are defined by the argument \code{interaction}. This is an object of class \code{"interact"} which is initialised in a very similar way to the usage of family objects in \code{\link{glm}} and \code{gam}. The interaction models currently available are: \GibbsInteractionsList. See the examples below. Note that it is possible to combine several interactions using \code{\link{Hybrid}}. If \code{interaction} is missing or \code{NULL}, then the model to be fitted has no interpoint interactions, that is, it is a Poisson process (stationary or nonstationary according to \code{trend}). In this case the methods of maximum pseudolikelihood and maximum logistic likelihood coincide with maximum likelihood. The fitted point process model returned by this function can be printed (by the print method \code{\link{print.ppm}}) to inspect the fitted parameter values. If a nonparametric spatial trend was fitted, this can be extracted using the predict method \code{\link{predict.ppm}}. To fit a model involving spatial covariates other than the Cartesian coordinates \eqn{x} and \eqn{y}, the values of the covariates should either be supplied in the argument \code{data}, or should be stored in objects that exist in the \R session. Note that it is not sufficient to have observed the covariate only at the points of the data point pattern; the covariate must also have been observed at other locations in the window. If it is given, the argument \code{data} is typically a list, with names corresponding to variables in the \code{trend} formula. Each entry in the list is either \describe{ \item{a pixel image,}{ giving the values of a spatial covariate at a fine grid of locations. It should be an object of class \code{"im"}, see \code{\link{im.object}}. } \item{a function,}{ which can be evaluated at any location \code{(x,y)} to obtain the value of the spatial covariate. It should be a \code{function(x, y)} or \code{function(x, y, ...)} in the \R language. The first two arguments of the function should be the Cartesian coordinates \eqn{x} and \eqn{y}. The function may have additional arguments; if the function does not have default values for these additional arguments, then the user must supply values for them, in \code{covfunargs}. See the Examples. } \item{a window,}{ interpreted as a logical variable which is \code{TRUE} inside the window and \code{FALSE} outside it. This should be an object of class \code{"owin"}. } \item{a tessellation,}{ interpreted as a factor covariate. For each spatial location, the factor value indicates which tile of the tessellation it belongs to. This should be an object of class \code{"tess"}. } \item{a single number,}{indicating a covariate that is constant in this dataset. } } The software will look up the values of each covariate at the required locations (quadrature points). Note that, for covariate functions, only the \emph{name} of the function appears in the trend formula. A covariate function is treated as if it were a single variable. The function arguments do not appear in the trend formula. See the Examples. If \code{data} is a list, the list entries should have names corresponding to (some of) the names of covariates in the model formula \code{trend}. The variable names \code{x}, \code{y} and \code{marks} are reserved for the Cartesian coordinates and the mark values, and these should not be used for variables in \code{data}. Alternatively, \code{data} may be a data frame giving the values of the covariates at specified locations. Then \code{pattern} should be a quadrature scheme (object of class \code{"quad"}) giving the corresponding locations. See \code{\link{ppm.quad}} for details. } \section{Interaction parameters}{ Apart from the Poisson model, every point process model fitted by \code{ppm} has parameters that determine the strength and range of \sQuote{interaction} or dependence between points. These parameters are of two types: \describe{ \item{regular parameters:}{ A parameter \eqn{\phi}{phi} is called \emph{regular} if the log likelihood is a linear function of \eqn{\theta}{theta} where \eqn{\theta = \theta(\psi)}{theta = theta(psi)} is some transformation of \eqn{\psi}{psi}. [Then \eqn{\theta}{theta} is called the canonical parameter.] } \item{irregular parameters}{ Other parameters are called \emph{irregular}. } } Typically, regular parameters determine the \sQuote{strength} of the interaction, while irregular parameters determine the \sQuote{range} of the interaction. For example, the Strauss process has a regular parameter \eqn{\gamma}{gamma} controlling the strength of interpoint inhibition, and an irregular parameter \eqn{r} determining the range of interaction. The \code{ppm} command is only designed to estimate regular parameters of the interaction. It requires the values of any irregular parameters of the interaction to be fixed. For example, to fit a Strauss process model to the \code{cells} dataset, you could type \code{ppm(cells ~ 1, Strauss(r=0.07))}. Note that the value of the irregular parameter \code{r} must be given. The result of this command will be a fitted model in which the regular parameter \eqn{\gamma}{gamma} has been estimated. To determine the irregular parameters, there are several practical techniques, but no general statistical theory available. Useful techniques include maximum profile pseudolikelihood, which is implemented in the command \code{\link{profilepl}}, and Newton-Raphson maximisation, implemented in the experimental command \code{\link{ippm}}. Some irregular parameters can be estimated directly from data: the hard-core radius in the model \code{\link{Hardcore}} and the matrix of hard-core radii in \code{\link{MultiHard}} can be estimated easily from data. In these cases, \code{ppm} allows the user to specify the interaction without giving the value of the irregular parameter. The user can give the hard core interaction as \code{interaction=Hardcore()} or even \code{interaction=Hardcore}, and the hard core radius will then be estimated from the data. } \section{Technical Warnings and Error Messages}{ See \code{\link{ppm.ppp}} for some technical warnings about the weaknesses of the algorithm, and explanation of some common error messages. } \references{ Baddeley, A., Coeurjolly, J.-F., Rubak, E. and Waagepetersen, R. (2014) Logistic regression for spatial Gibbs point processes. \emph{Biometrika} \bold{101} (2) 377--392. Baddeley, A. and Turner, R. (2000) Practical maximum pseudolikelihood for spatial point patterns. \emph{Australian and New Zealand Journal of Statistics} \bold{42} 283--322. Berman, M. and Turner, T.R. (1992) Approximating point process likelihoods with GLIM. \emph{Applied Statistics} \bold{41}, 31--38. Besag, J. (1975) Statistical analysis of non-lattice data. \emph{The Statistician} \bold{24}, 179-195. Diggle, P.J., Fiksel, T., Grabarnik, P., Ogata, Y., Stoyan, D. and Tanemura, M. (1994) On parameter estimation for pairwise interaction processes. \emph{International Statistical Review} \bold{62}, 99-117. Huang, F. and Ogata, Y. (1999) Improvements of the maximum pseudo-likelihood estimators in various spatial statistical models. \emph{Journal of Computational and Graphical Statistics} \bold{8}, 510--530. Jensen, J.L. and Moeller, M. (1991) Pseudolikelihood for exponential family models of spatial point processes. \emph{Annals of Applied Probability} \bold{1}, 445--461. Jensen, J.L. and Kuensch, H.R. (1994) On asymptotic normality of pseudo likelihood estimates for pairwise interaction processes, \emph{Annals of the Institute of Statistical Mathematics} \bold{46}, 475--486. } \seealso{ \code{\link{ppm.ppp}} and \code{\link{ppm.quad}} for more details on the fitting technique and edge correction. \code{\link{ppm.object}} for details of how to print, plot and manipulate a fitted model. \code{\link{ppp}} and \code{\link{quadscheme}} for constructing data. Interactions: \GibbsInteractionsList. See \code{\link{profilepl}} for advice on fitting nuisance parameters in the interaction, and \code{\link{ippm}} for irregular parameters in the trend. See \code{\link{valid.ppm}} and \code{\link{project.ppm}} for ensuring the fitted model is a valid point process. See \code{\link{kppm}} for fitting Cox point process models and cluster point process models, and \code{\link{dppm}} for fitting determinantal point process models. } \examples{ # fit the stationary Poisson process # to point pattern 'nztrees' ppm(nztrees ~ 1) \dontrun{ Q <- quadscheme(nztrees) ppm(Q ~ 1) # equivalent. } fit1 <- ppm(nztrees ~ x) # fit the nonstationary Poisson process # with intensity function lambda(x,y) = exp(a + bx) # where x,y are the Cartesian coordinates # and a,b are parameters to be estimated fit1 coef(fit1) coef(summary(fit1)) \dontrun{ ppm(nztrees ~ polynom(x,2)) } \testonly{ ppm(nztrees ~ polynom(x,2), nd=16) } # fit the nonstationary Poisson process # with intensity function lambda(x,y) = exp(a + bx + cx^2) \dontrun{ library(splines) ppm(nztrees ~ bs(x,df=3)) } # WARNING: do not use predict.ppm() on this result # Fits the nonstationary Poisson process # with intensity function lambda(x,y) = exp(B(x)) # where B is a B-spline with df = 3 \dontrun{ ppm(nztrees ~ 1, Strauss(r=10), rbord=10) } \testonly{ ppm(nztrees ~ 1, Strauss(r=10), rbord=10, nd=16) } # Fit the stationary Strauss process with interaction range r=10 # using the border method with margin rbord=10 \dontrun{ ppm(nztrees ~ x, Strauss(13), correction="periodic") } \testonly{ ppm(nztrees ~ x, Strauss(13), correction="periodic", nd=16) } # Fit the nonstationary Strauss process with interaction range r=13 # and exp(first order potential) = activity = beta(x,y) = exp(a+bx) # using the periodic correction. # Compare Maximum Pseudolikelihood, Huang-Ogata and Variational Bayes fits: \dontrun{ppm(swedishpines ~ 1, Strauss(9))} \dontrun{ppm(swedishpines ~ 1, Strauss(9), method="ho")} \testonly{ppm(swedishpines ~ 1, Strauss(9), method="ho", nd=16, nsim=8)} ppm(swedishpines ~ 1, Strauss(9), method="VBlogi") # COVARIATES # X <- rpoispp(42) weirdfunction <- function(x,y){ 10 * x^2 + 5 * sin(10 * y) } # # (a) covariate values as function ppm(X ~ y + weirdfunction) # # (b) covariate values in pixel image Zimage <- as.im(weirdfunction, unit.square()) ppm(X ~ y + Z, covariates=list(Z=Zimage)) # # (c) covariate values in data frame Q <- quadscheme(X) xQ <- x.quad(Q) yQ <- y.quad(Q) Zvalues <- weirdfunction(xQ,yQ) ppm(Q ~ y + Z, data=data.frame(Z=Zvalues)) # Note Q not X # COVARIATE FUNCTION WITH EXTRA ARGUMENTS # f <- function(x,y,a){ y - a } ppm(X ~ x + f, covfunargs=list(a=1/2)) # COVARIATE: inside/outside window b <- owin(c(0.1, 0.6), c(0.1, 0.9)) ppm(X ~ b) ## MULTITYPE POINT PROCESSES ### # fit stationary marked Poisson process # with different intensity for each species \dontrun{ppm(lansing ~ marks, Poisson())} \testonly{ ama <- amacrine[square(0.7)] a <- ppm(ama ~ marks, Poisson(), nd=16) } # fit nonstationary marked Poisson process # with different log-cubic trend for each species \dontrun{ppm(lansing ~ marks * polynom(x,y,3), Poisson())} \testonly{b <- ppm(ama ~ marks * polynom(x,y,2), Poisson(), nd=16)} } \author{ \spatstatAuthors } \keyword{spatial} \keyword{models} spatstat/man/nnfun.lpp.Rd0000644000176200001440000000532113160710621015073 0ustar liggesusers\name{nnfun.lpp} \Rdversion{1.1} \alias{nnfun.lpp} \title{ Nearest Neighbour Map on Linear Network } \description{ Compute the nearest neighbour function of a point pattern on a linear network. } \usage{ \method{nnfun}{lpp}(X, ..., k=1) } \arguments{ \item{X}{ A point pattern on a linear network (object of class \code{"lpp"}). } \item{k}{ Integer. The algorithm finds the \code{k}th nearest neighbour in \code{X} from any spatial location. } \item{\dots}{ Other arguments are ignored. } } \details{ The (geodesic) \emph{nearest neighbour function} of a point pattern \code{X} on a linear network \code{L} tells us which point of \code{X} is closest to any given location. If \code{X} is a point pattern on a linear network \code{L}, the \emph{nearest neighbour function} of \code{X} is the mathematical function \eqn{f} defined for any location \eqn{s} on the network by \code{f(s) = i}, where \code{X[i]} is the closest point of \code{X} to the location \code{s} measured by the shortest path. In other words the value of \code{f(s)} is the identifier or serial number of the closest point of \code{X}. The command \code{nnfun.lpp} is a method for the generic command \code{\link{nnfun}} for the class \code{"lpp"} of point patterns on a linear network. If \code{X} is a point pattern on a linear network, \code{f <- nnfun(X)} returns a \emph{function} in the \R language, with arguments \code{x,y, \dots}, that represents the nearest neighbour function of \code{X}. Evaluating the function \code{f} in the form \code{v <- f(x,y)}, where \code{x} and \code{y} are any numeric vectors of equal length containing coordinates of spatial locations, yields a vector of identifiers or serial numbers of the data points closest to these spatial locations. More efficiently \code{f} can take the arguments \code{x, y, seg, tp} where \code{seg} and \code{tp} are the local coordinates on the network. The result of \code{f <- nnfun(X)} also belongs to the class \code{"linfun"}. It can be printed and plotted immediately as shown in the Examples. It can be converted to a pixel image using \code{\link{as.linim}}. } \value{ A \code{function} in the \R language, with arguments \code{x,y} and optional arguments \code{seg,tp}. It also belongs to the class \code{"linfun"} which has methods for \code{plot}, \code{print} etc. } \seealso{ \code{\link{linfun}}, \code{\link{methods.linfun}}. To compute the \emph{distance} to the nearest neighbour, see \code{\link{distfun.lpp}}. } \examples{ data(letterR) X <- runiflpp(3, simplenet) f <- nnfun(X) f plot(f) } \author{\adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/quad.ppm.Rd0000644000176200001440000000625013160710621014704 0ustar liggesusers\name{quad.ppm} \alias{quad.ppm} \title{Extract Quadrature Scheme Used to Fit a Point Process Model} \description{ Given a fitted point process model, this function extracts the quadrature scheme used to fit the model. } \usage{ quad.ppm(object, drop=FALSE, clip=FALSE) } \arguments{ \item{object}{ fitted point process model (an object of class \code{"ppm"} or \code{"kppm"} or \code{"lppm"}). } \item{drop}{ Logical value determining whether to delete quadrature points that were not used to fit the model. } \item{clip}{ Logical value determining whether to erode the window, if \code{object} was fitted using the border correction. See Details. } } \value{ A quadrature scheme (object of class \code{"quad"}). } \details{ An object of class \code{"ppm"} represents a point process model that has been fitted to data. It is typically produced by the model-fitting algorithm \code{\link{ppm}}. The maximum pseudolikelihood algorithm in \code{\link{ppm}} approximates the pseudolikelihood integral by a sum over a finite set of quadrature points, which is constructed by augmenting the original data point pattern by a set of ``dummy'' points. The fitted model object returned by \code{\link{ppm}} contains complete information about this quadrature scheme. See \code{\link{ppm}} or \code{\link{ppm.object}} for further information. This function \code{quad.ppm} extracts the quadrature scheme. A typical use of this function would be to inspect the quadrature scheme (points and weights) to gauge the accuracy of the approximation to the exact pseudolikelihood. Some quadrature points may not have been used in fitting the model. This happens if the border correction is used, and in other cases (e.g. when the value of a covariate is \code{NA} at these points). The argument \code{drop} specifies whether these unused quadrature points shall be deleted (\code{drop=TRUE}) or retained (\code{drop=FALSE}) in the return value. The quadrature scheme has a \emph{window}, which by default is set to equal the window of the original data. However this window may be larger than the actual domain of integration of the pseudolikelihood or composite likelihood that was used to fit the model. If \code{clip=TRUE} then the window of the quadrature scheme is set to the actual domain of integration. This option only has an effect when the model was fitted using the border correction; then the window is obtained by eroding the original data window by the border correction distance. See \code{\link{ppm.object}} for a list of all operations that can be performed on objects of class \code{"ppm"}. See \code{\link{quad.object}} for a list of all operations that can be performed on objects of class \code{"quad"}. This function can also be applied to objects of class \code{"kppm"} and \code{"lppm"}. } \seealso{ \code{\link{ppm.object}}, \code{\link{quad.object}}, \code{\link{ppm}} } \examples{ fit <- ppm(cells ~1, Strauss(r=0.1)) Q <- quad.ppm(fit) \dontrun{plot(Q)} npoints(Q$data) npoints(Q$dummy) } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} \keyword{models} spatstat/man/Math.im.Rd0000644000176200001440000000673213160710571014465 0ustar liggesusers\name{Math.im} \alias{Math.im} \alias{Ops.im} \alias{Complex.im} \alias{Summary.im} \title{S3 Group Generic methods for images} \description{ These are group generic methods for images of class \code{"im"}, which allows for usual mathematical functions and operators to be applied directly to images. See Details for a list of implemented functions. } \usage{ ## S3 methods for group generics have prototypes: \special{Math(x, \dots)} \special{Ops(e1, e2)} \special{Complex(z)} \special{Summary(\dots, na.rm=FALSE, drop=TRUE)} %NAMESPACE S3method("Math", "im") %NAMESPACE S3method("Ops", "im") %NAMESPACE S3method("Complex", "im") %NAMESPACE S3method("Summary", "im") } \arguments{ \item{x, z, e1, e2}{objects of class \code{"im"}.} \item{\dots}{further arguments passed to methods.} \item{na.rm,drop}{ Logical values specifying whether missing values should be removed. This will happen if either \code{na.rm=TRUE} or \code{drop=TRUE}. See Details. } } \details{ Below is a list of mathematical functions and operators which are defined for images. Not all functions will make sense for all types of images. For example, none of the functions in the \code{"Math"} group make sense for character-valued images. Note that the \code{"Ops"} group methods are implemented using \code{\link{eval.im}}, which tries to harmonise images via \code{\link{harmonise.im}} if they aren't compatible to begin with. \enumerate{ \item Group \code{"Math"}: \itemize{ \item \code{abs}, \code{sign}, \code{sqrt},\cr \code{floor}, \code{ceiling}, \code{trunc},\cr \code{round}, \code{signif} \item \code{exp}, \code{log}, \code{expm1}, \code{log1p},\cr \code{cos}, \code{sin}, \code{tan},\cr \code{cospi}, \code{sinpi}, \code{tanpi},\cr \code{acos}, \code{asin}, \code{atan} \code{cosh}, \code{sinh}, \code{tanh},\cr \code{acosh}, \code{asinh}, \code{atanh} \item \code{lgamma}, \code{gamma}, \code{digamma}, \code{trigamma} \item \code{cumsum}, \code{cumprod}, \code{cummax}, \code{cummin} } \item Group \code{"Ops"}: \itemize{ \item \code{"+"}, \code{"-"}, \code{"*"}, \code{"/"}, \code{"^"}, \code{"\%\%"}, \code{"\%/\%"} \item \code{"&"}, \code{"|"}, \code{"!"} \item \code{"=="}, \code{"!="}, \code{"<"}, \code{"<="}, \code{">="}, \code{">"} } \item Group \code{"Summary"}: \itemize{ \item \code{all}, \code{any} \item \code{sum}, \code{prod} \item \code{min}, \code{max} \item \code{range} } \item Group \code{"Complex"}: \itemize{ \item \code{Arg}, \code{Conj}, \code{Im}, \code{Mod}, \code{Re} } } For the \code{Summary} group, the generic has an argument \code{na.rm=FALSE}, but for pixel images it makes sense to set \code{na.rm=TRUE} so that pixels outside the domain of the image are ignored. To enable this, we added the argument \code{drop}. Pixel values that are \code{NA} are removed if \code{drop=TRUE} or if \code{na.rm=TRUE}. } \seealso{ \code{\link{eval.im}} for evaluating expressions involving images. } \examples{ ## Convert gradient values to angle of inclination: V <- atan(bei.extra$grad) * 180/pi ## Make logical image which is TRUE when heat equals 'Moderate': A <- (gorillas.extra$heat == "Moderate") ## Summary: any(A) ## Complex: Z <- exp(1 + V * 1i) Z Re(Z) } \author{ \spatstatAuthors and Kassel Hingee. } \keyword{spatial} \keyword{methods} spatstat/man/allstats.Rd0000644000176200001440000000603213160710571015010 0ustar liggesusers\name{allstats} \alias{allstats} \title{Calculate four standard summary functions of a point pattern.} \description{ Calculates the \eqn{F}, \eqn{G}, \eqn{J}, and \eqn{K} summary functions for an unmarked point pattern. Returns them as a function array (of class \code{"fasp"}, see \code{\link{fasp.object}}). } \usage{ allstats(pp, \dots, dataname=NULL, verb=FALSE) } \arguments{ \item{pp}{The observed point pattern, for which summary function estimates are required. An object of class \code{"ppp"}. It must not be marked. } \item{\dots}{ Optional arguments passed to the summary functions \code{\link{Fest}}, \code{\link{Gest}}, \code{\link{Jest}} and \code{\link{Kest}}. } \item{dataname}{A character string giving an optional (alternative) name for the point pattern. } \item{verb}{A logical value meaning ``verbose''. If \code{TRUE}, progress reports are printed during calculation. } } \details{ This computes four standard summary statistics for a point pattern: the empty space function \eqn{F(r)}, nearest neighbour distance distribution function \eqn{G(r)}, van Lieshout-Baddeley function \eqn{J(r)} and Ripley's function \eqn{K(r)}. The real work is done by \code{\link{Fest}}, \code{\link{Gest}}, \code{\link{Jest}} and \code{\link{Kest}} respectively. Consult the help files for these functions for further information about the statistical interpretation of \eqn{F}, \eqn{G}, \eqn{J} and \eqn{K}. If \code{verb} is \code{TRUE}, then ``progress reports'' (just indications of completion) are printed out when the calculations are finished for each of the four function types. The overall title of the array of four functions (for plotting by \code{\link{plot.fasp}}) will be formed from the argument \code{dataname}. If this is not given, it defaults to the expression for \code{pp} given in the call to \code{allstats}. } \value{ A list of length 4 containing the \eqn{F}, \eqn{G}, \eqn{J} and \eqn{K} functions respectively. The list can be plotted directly using \code{plot} (which dispatches to \code{\link{plot.solist}}). Each list entry retains the format of the output of the relevant estimating routine \code{\link{Fest}}, \code{\link{Gest}}, \code{\link{Jest}} or \code{\link{Kest}}. Thus each entry in the list is a function value table (object of class \code{"fv"}, see \code{\link{fv.object}}). The default formulae for plotting these functions are \code{cbind(km,theo) ~ r} for F, G, and J, and \code{cbind(trans,theo) ~ r} for K. } \author{\adrian and \rolf } \seealso{ \code{\link{plot.solist}}, \code{\link{plot.fv}}, \code{\link{fv.object}}, \code{\link{Fest}}, \code{\link{Gest}}, \code{\link{Jest}}, \code{\link{Kest}} } \examples{ data(swedishpines) a <- allstats(swedishpines,dataname="Swedish Pines") \dontrun{ plot(a) plot(a, subset=list("r<=15","r<=15","r<=15","r<=50")) } } \keyword{spatial} \keyword{nonparametric} spatstat/man/plot.linim.Rd0000644000176200001440000001001213160710621015233 0ustar liggesusers\name{plot.linim} \alias{plot.linim} \title{ Plot Pixel Image on Linear Network } \description{ Given a pixel image on a linear network, the pixel values are displayed either as colours or as line widths. } \usage{ \method{plot}{linim}(x, ..., style = c("colour", "width"), scale, adjust = 1, negative.args = list(col=2), legend=TRUE, leg.side=c("right", "left", "bottom", "top"), leg.sep=0.1, leg.wid=0.1, leg.args=list(), leg.scale=1, zlim, do.plot=TRUE) } \arguments{ \item{x}{ The pixel image to be plotted. An object of class \code{"linim"}. } \item{\dots}{ Extra graphical parameters, passed to \code{\link{plot.im}} if \code{style="colour"}, or to \code{\link{polygon}} if \code{style="width"}. } \item{style}{ Character string specifying the type of plot. See Details. } \item{scale}{ Physical scale factor for representing the pixel values as line widths. } \item{adjust}{ Adjustment factor for the default scale. } \item{negative.args}{ A list of arguments to be passed to \code{\link[graphics]{polygon}} specifying how to plot negative values of \code{x} when \code{style="width"}. } \item{legend}{ Logical value indicating whether to plot a legend (colour ribbon or scale bar). } \item{leg.side}{ Character string indicating where to display the legend relative to the main image. } \item{leg.sep}{ Factor controlling the space between the legend and the image. } \item{leg.wid}{ Factor controlling the width of the legend. } \item{leg.scale}{ Rescaling factor for annotations on the legend. The values on the numerical scale printed beside the legend will be multiplied by this rescaling factor. } \item{leg.args}{ List of additional arguments passed to \code{\link[graphics]{image.default}}, \code{\link[graphics]{axis}} or \code{\link[graphics]{text.default}} to control the display of the legend. These may override the \code{\dots} arguments. } \item{zlim}{ The range of numerical values that should be mapped. A numeric vector of length 2. Defaults to the range of values of \code{x}. } \item{do.plot}{ Logical value indicating whether to actually perform the plot. } } \details{ This is the \code{plot} method for objects of class \code{"linim"}. Such an object represents a pixel image defined on a linear network. If \code{style="colour"} (the default) then the pixel values of \code{x} are plotted as colours, using \code{\link{plot.im}}. If \code{style="width"} then the pixel values of \code{x} are used to determine the widths of thick lines centred on the line segments of the linear network. } \value{ If \code{style="colour"}, the result is an object of class \code{"colourmap"} specifying the colour map used. If \code{style="width"}, the result is a numeric value \code{v} giving the physical scale: one unit of pixel value is represented as \code{v} physical units on the plot. The result also has an attribute \code{"bbox"} giving a bounding box for the plot. The bounding box includes the ribbon or scale bar, if present, but not the main title. } \author{ \adrian } \seealso{ \code{\link{linim}}, \code{\link{plot.im}}, \code{\link{polygon}} } \references{ Ang, Q.W., Baddeley, A. and Nair, G. (2012) Geometrically corrected second-order analysis of events on a linear network, with applications to ecology and criminology. \emph{Scandinavian Journal of Statistics} \bold{39}, 591--617. } \examples{ X <- linfun(function(x,y,seg,tp){y^2+x}, simplenet) X <- as.linim(X) plot(X) plot(X, style="width", main="Width proportional to function value") # signed values f <- linfun(function(x,y,seg,tp){y-x}, simplenet) plot(f, style="w", main="Negative values in red") plot(f, style="w", negative.args=list(density=10), main="Negative values are hatched") } \keyword{spatial} spatstat/man/qqplot.ppm.Rd0000644000176200001440000003570013160710621015274 0ustar liggesusers\name{qqplot.ppm} \alias{qqplot.ppm} \title{ Q-Q Plot of Residuals from Fitted Point Process Model } \description{ Given a point process model fitted to a point pattern, produce a Q-Q plot based on residuals from the model. } \usage{ qqplot.ppm(fit, nsim=100, expr=NULL, \dots, type="raw", style="mean", fast=TRUE, verbose=TRUE, plot.it=TRUE, dimyx=NULL, nrep=if(fast) 5e4 else 1e5, control=update(default.rmhcontrol(fit), nrep=nrep), saveall=FALSE, monochrome=FALSE, limcol=if(monochrome) "black" else "red", maxerr=max(100, ceiling(nsim/10)), check=TRUE, repair=TRUE, envir.expr) } \arguments{ \item{fit}{ The fitted point process model, which is to be assessed using the Q-Q plot. An object of class \code{"ppm"}. Smoothed residuals obtained from this fitted model will provide the ``data'' quantiles for the Q-Q plot. } \item{nsim}{ The number of simulations from the ``reference'' point process model. } \item{expr}{ Determines the simulation mechanism which provides the ``theoretical'' quantiles for the Q-Q plot. See Details. } \item{\dots}{ Arguments passed to \code{\link{diagnose.ppm}} influencing the computation of residuals. } \item{type}{ String indicating the type of residuals or weights to be used. Current options are \code{"eem"} for the Stoyan-Grabarnik exponential energy weights, \code{"raw"} for the raw residuals, \code{"inverse"} for the inverse-lambda residuals, and \code{"pearson"} for the Pearson residuals. A partial match is adequate. } \item{style}{ Character string controlling the type of Q-Q plot. Options are \code{"classical"} and \code{"mean"}. See Details. } \item{fast}{ Logical flag controlling the speed and accuracy of computation. Use \code{fast=TRUE} for interactive use and \code{fast=FALSE} for publication standard plots. See Details. } \item{verbose}{ Logical flag controlling whether the algorithm prints progress reports during long computations. } \item{plot.it}{ Logical flag controlling whether the function produces a plot or simply returns a value (silently). } \item{dimyx}{ Dimensions of the pixel grid on which the smoothed residual field will be calculated. A vector of two integers. } \item{nrep}{ If \code{control} is absent, then \code{nrep} gives the number of iterations of the Metropolis-Hastings algorithm that should be used to generate one simulation of the fitted point process. } \item{control}{ List of parameters controlling the Metropolis-Hastings algorithm \code{\link{rmh}} which generates each simulated realisation from the model (unless the model is Poisson). This list becomes the argument \code{control} of \code{\link{rmh.default}}. It overrides \code{nrep}. } \item{saveall}{ Logical flag indicating whether to save all the intermediate calculations. } \item{monochrome}{ Logical flag indicating whether the plot should be in black and white (\code{monochrome=TRUE}), or in colour (\code{monochrome=FALSE}). } \item{limcol}{ String. The colour to be used when plotting the 95\% limit curves. } \item{maxerr}{ Maximum number of failures tolerated while generating simulated realisations. See Details. } \item{check}{ Logical value indicating whether to check the internal format of \code{fit}. If there is any possibility that this object has been restored from a dump file, or has otherwise lost track of the environment where it was originally computed, set \code{check=TRUE}. } \item{repair}{ Logical value indicating whether to repair the internal format of \code{fit}, if it is found to be damaged. } \item{envir.expr}{ Optional. An environment in which the expression \code{expr} should be evaluated. } } \value{ An object of class \code{"qqppm"} containing the information needed to reproduce the Q-Q plot. Entries \code{x} and \code{y} are numeric vectors containing quantiles of the simulations and of the data, respectively. } \details{ This function generates a Q-Q plot of the residuals from a fitted point process model. It is an addendum to the suite of diagnostic plots produced by the function \code{\link{diagnose.ppm}}, kept separate because it is computationally intensive. The quantiles of the theoretical distribution are estimated by simulation. In classical statistics, a Q-Q plot of residuals is a useful diagnostic for checking the distributional assumptions. Analogously, in spatial statistics, a Q-Q plot of the (smoothed) residuals from a fitted point process model is a useful way to check the interpoint interaction part of the model (Baddeley et al, 2005). The systematic part of the model (spatial trend, covariate effects, etc) is assessed using other plots made by \code{\link{diagnose.ppm}}. The argument \code{fit} represents the fitted point process model. It must be an object of class \code{"ppm"} (typically produced by the maximum pseudolikelihood fitting algorithm \code{\link{ppm}}). Residuals will be computed for this fitted model using \code{\link{residuals.ppm}}, and the residuals will be kernel-smoothed to produce a ``residual field''. The values of this residual field will provide the ``data'' quantiles for the Q-Q plot. The argument \code{expr} is not usually specified. It provides a way to modify the ``theoretical'' or ``reference'' quantiles for the Q-Q plot. In normal usage we set \code{expr=NULL}. The default is to generate \code{nsim} simulated realisations of the fitted model \code{fit}, re-fit this model to each of the simulated patterns, evaluate the residuals from these fitted models, and use the kernel-smoothed residual field from these fitted models as a sample from the reference distribution for the Q-Q plot. In advanced use, \code{expr} may be an \code{expression}. It will be re-evaluated \code{nsim} times, and should include random computations so that the results are not identical each time. The result of evaluating \code{expr} should be either a point pattern (object of class \code{"ppp"}) or a fitted point process model (object of class \code{"ppm"}). If the value is a point pattern, then the original fitted model \code{fit} will be fitted to this new point pattern using \code{\link{update.ppm}}, to yield another fitted model. Smoothed residuals obtained from these \code{nsim} fitted models will yield the ``theoretical'' quantiles for the Q-Q plot. Alternatively \code{expr} can be a list of point patterns, or an \code{envelope} object that contains a list of point patterns (typically generated by calling \code{\link{envelope}} with \code{savepatterns=TRUE}). These point patterns will be used as the simulated patterns. Simulation is performed (if \code{expr=NULL}) using the Metropolis-Hastings algorithm \code{\link{rmh}}. Each simulated realisation is the result of running the Metropolis-Hastings algorithm from an independent random starting state each time. The iterative and termination behaviour of the Metropolis-Hastings algorithm are governed by the argument \code{control}. See \code{\link{rmhcontrol}} for information about this argument. As a shortcut, the argument \code{nrep} determines the number of Metropolis-Hastings iterations used to generate each simulated realisation, if \code{control} is absent. By default, simulations are generated in an expanded window. Use the argument \code{control} to change this, as explained in the section on \emph{Warning messages}. The argument \code{type} selects the type of residual or weight that will be computed. For options, see \code{\link{diagnose.ppm}}. The argument \code{style} determines the type of Q-Q plot. It is highly recommended to use the default, \code{style="mean"}. \describe{ \item{\code{style="classical"}}{ The quantiles of the residual field for the data (on the \eqn{y} axis) are plotted against the quantiles of the \bold{pooled} simulations (on the \eqn{x} axis). This plot is biased, and therefore difficult to interpret, because of strong autocorrelations in the residual field and the large differences in sample size. } \item{\code{style="mean"}}{ The order statistics of the residual field for the data are plotted against the sample means, over the \code{nsim} simulations, of the corresponding order statistics of the residual field for the simulated datasets. Dotted lines show the 2.5 and 97.5 percentiles, over the \code{nsim} simulations, of each order statistic. } } The argument \code{fast} is a simple way to control the accuracy and speed of computation. If \code{fast=FALSE}, the residual field is computed on a fine grid of pixels (by default 100 by 100 pixels, see below) and the Q-Q plot is based on the complete set of order statistics (usually 10,000 quantiles). If \code{fast=TRUE}, the residual field is computed on a coarse grid (at most 40 by 40 pixels) and the Q-Q plot is based on the \emph{percentiles} only. This is about 7 times faster. It is recommended to use \code{fast=TRUE} for interactive data analysis and \code{fast=FALSE} for definitive plots for publication. The argument \code{dimyx} gives full control over the resolution of the pixel grid used to calculate the smoothed residuals. Its interpretation is the same as the argument \code{dimyx} to the function \code{\link{as.mask}}. Note that \code{dimyx[1]} is the number of pixels in the \eqn{y} direction, and \code{dimyx[2]} is the number in the \eqn{x} direction. If \code{dimyx} is not present, then the default pixel grid dimensions are controlled by \code{spatstat.options("npixel")}. Since the computation is so time-consuming, \code{qqplot.ppm} returns a list containing all the data necessary to re-display the Q-Q plot. It is advisable to assign the result of \code{qqplot.ppm} to something (or use \code{.Last.value} if you forgot to.) The return value is an object of class \code{"qqppm"}. There are methods for \code{\link{plot.qqppm}} and \code{\link{print.qqppm}}. See the Examples. The argument \code{saveall} is usually set to \code{FALSE}. If \code{saveall=TRUE}, then the intermediate results of calculation for each simulated realisation are saved and returned. The return value includes a 3-dimensional array \code{sim} containing the smoothed residual field images for each of the \code{nsim} realisations. When \code{saveall=TRUE}, the return value is an object of very large size, and should not be saved on disk. Errors may occur during the simulation process, because random data are generated. For example: \itemize{ \item one of the simulated patterns may be empty. \item one of the simulated patterns may cause an error in the code that fits the point process model. \item the user-supplied argument \code{expr} may have a bug. } Empty point patterns do not cause a problem for the code, but they are reported. Other problems that would lead to a crash are trapped; the offending simulated data are discarded, and the simulation is retried. The argument \code{maxerr} determines the maximum number of times that such errors will be tolerated (mainly as a safeguard against an infinite loop). } \section{Side Effects}{ Produces a Q-Q plot if \code{plot.it} is TRUE. } \section{Warning messages}{ A warning message will be issued if any of the simulations trapped an error (a potential crash). A warning message will be issued if all, or many, of the simulated point patterns are empty. This usually indicates a problem with the simulation procedure. The default behaviour of \code{qqplot.ppm} is to simulate patterns on an expanded window (specified through the argument \code{control}) in order to avoid edge effects. The model's trend is extrapolated over this expanded window. If the trend is strongly inhomogeneous, the extrapolated trend may have very large (or even infinite) values. This can cause the simulation algorithm to produce empty patterns. The only way to suppress this problem entirely is to prohibit the expansion of the window, by setting the \code{control} argument to something like \code{control=list(nrep=1e6, expand=1)}. Here \code{expand=1} means there will be no expansion. See \code{\link{rmhcontrol}} for more information about the argument \code{control}. } \references{ Baddeley, A., Turner, R., \ifelse{latex}{\out{M\o ller}}{Moller}, J. and Hazelton, M. (2005) Residual analysis for spatial point processes. \emph{Journal of the Royal Statistical Society, Series B} \bold{67}, 617--666. Stoyan, D. and Grabarnik, P. (1991) Second-order characteristics for stochastic structures connected with Gibbs point processes. \emph{Mathematische Nachrichten}, 151:95--100. } \seealso{ \code{\link{diagnose.ppm}}, \code{\link{lurking}}, \code{\link{residuals.ppm}}, \code{\link{eem}}, \code{\link{ppm.object}}, \code{\link{ppm}}, \code{\link{rmh}}, \code{\link{rmhcontrol}} } \examples{ data(cells) fit <- ppm(cells, ~1, Poisson()) diagnose.ppm(fit) # no suggestion of departure from stationarity \dontrun{qqplot.ppm(fit, 80) # strong evidence of non-Poisson interaction} \testonly{qqplot.ppm(fit, 4)} \dontrun{ diagnose.ppm(fit, type="pearson") qqplot.ppm(fit, type="pearson") } \testonly{qqplot.ppm(fit, 4, type="pearson")} ########################################### ## oops, I need the plot coordinates mypreciousdata <- .Last.value \dontrun{mypreciousdata <- qqplot.ppm(fit, type="pearson")} \testonly{mypreciousdata <- qqplot.ppm(fit, 4, type="pearson")} plot(mypreciousdata) ###################################################### # Q-Q plots based on fixed n # The above QQ plots used simulations from the (fitted) Poisson process. # But I want to simulate conditional on n, instead of Poisson # Do this by setting rmhcontrol(p=1) fixit <- list(p=1) \dontrun{qqplot.ppm(fit, 100, control=fixit)} \testonly{qqplot.ppm(fit, 4, control=fixit)} ###################################################### # Inhomogeneous Poisson data X <- rpoispp(function(x,y){1000 * exp(-3*x)}, 1000) plot(X) # Inhomogeneous Poisson model fit <- ppm(X, ~x, Poisson()) \dontrun{qqplot.ppm(fit, 100)} \testonly{qqplot.ppm(fit, 4)} # conclusion: fitted inhomogeneous Poisson model looks OK ###################################################### # Advanced use of 'expr' argument # # set the initial conditions in Metropolis-Hastings algorithm # expr <- expression(rmh(fit, start=list(n.start=42), verbose=FALSE)) \dontrun{qqplot.ppm(fit, 100, expr)} \testonly{qqplot.ppm(fit, 4, expr)} } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} \keyword{hplot} spatstat/man/as.matrix.im.Rd0000644000176200001440000000232513160710571015474 0ustar liggesusers\name{as.matrix.im} \alias{as.matrix.im} \alias{as.array.im} \title{Convert Pixel Image to Matrix or Array} \description{ Converts a pixel image to a matrix or an array. } \usage{ \method{as.matrix}{im}(x, ...) \method{as.array}{im}(x, ...) } \arguments{ \item{x}{A pixel image (object of class \code{"im"}).} \item{\dots}{See below.} } \details{ The function \code{as.matrix.im} converts the pixel image \code{x} into a matrix containing the pixel values. It is handy when you want to extract a summary of the pixel values. See the Examples. The function \code{as.array.im} converts the pixel image to an array. By default this is a three-dimensional array of dimension \eqn{n} by \eqn{m} by \eqn{1}. If the extra arguments \code{\dots} are given, they will be passed to \code{\link{array}}, and they may change the dimensions of the array. } \value{ A matrix or array. } \seealso{ \code{\link{as.matrix.owin}} } \examples{ # artificial image Z <- setcov(square(1)) M <- as.matrix(Z) median(M) \dontrun{ # plot the cumulative distribution function of pixel values plot(ecdf(as.matrix(Z))) } } \author{\adrian and \rolf } \keyword{spatial} \keyword{methods} spatstat/man/distfun.Rd0000644000176200001440000000560213160710571014637 0ustar liggesusers\name{distfun} \Rdversion{1.1} \alias{distfun} \alias{distfun.ppp} \alias{distfun.psp} \alias{distfun.owin} \title{ Distance Map as a Function } \description{ Compute the distance function of an object, and return it as a function. } \usage{ distfun(X, \dots) \method{distfun}{ppp}(X, \dots, k=1) \method{distfun}{psp}(X, \dots) \method{distfun}{owin}(X, \dots, invert=FALSE) } \arguments{ \item{X}{Any suitable dataset representing a two-dimensional object, such as a point pattern (object of class \code{"ppp"}), a window (object of class \code{"owin"}) or a line segment pattern (object of class \code{"psp"}). } \item{\dots}{ Extra arguments are ignored. } \item{k}{ An integer. The distance to the \code{k}th nearest point will be computed. } \item{invert}{ If \code{TRUE}, compute the distance transform of the complement of \code{X}. } } \details{ The \dQuote{distance function} of a set of points \eqn{A} is the mathematical function \eqn{f} such that, for any two-dimensional spatial location \eqn{(x,y)}, the function value \code{f(x,y)} is the shortest distance from \eqn{(x,y)} to \eqn{A}. The command \code{f <- distfun(X)} returns a \emph{function} in the \R language, with arguments \code{x,y}, that represents the distance function of \code{X}. Evaluating the function \code{f} in the form \code{v <- f(x,y)}, where \code{x} and \code{y} are any numeric vectors of equal length containing coordinates of spatial locations, yields the values of the distance function at these locations. Alternatively \code{x} can be a point pattern (object of class \code{"ppp"} or \code{"lpp"}) of locations at which the distance function should be computed (and then \code{y} should be missing). This should be contrasted with the related command \code{\link{distmap}} which computes the distance function of \code{X} on a grid of locations, and returns the distance values in the form of a pixel image. The result of \code{f <- distfun(X)} also belongs to the class \code{"funxy"} and to the special class \code{"distfun"}. It can be printed and plotted immediately as shown in the Examples. A \code{distfun} object can be converted to a pixel image using \code{\link{as.im}}. } \value{ A \code{function} with arguments \code{x,y}. The function also belongs to the class \code{"distfun"} which has a method for \code{print}. It also belongs to the class \code{"funxy"} which has methods for \code{plot}, \code{contour} and \code{persp}. } \seealso{ \code{\link{distmap}}, \code{\link{plot.funxy}} } \examples{ data(letterR) f <- distfun(letterR) f plot(f) f(0.2, 0.3) plot(distfun(letterR, invert=TRUE), eps=0.1) d <- distfun(cells) d2 <- distfun(cells, k=2) d(0.5, 0.5) d2(0.5, 0.5) z <- d(japanesepines) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{math} spatstat/man/Ldot.inhom.Rd0000644000176200001440000000656313160710571015205 0ustar liggesusers\name{Ldot.inhom} \alias{Ldot.inhom} \title{ Inhomogeneous Multitype L Dot Function } \description{ For a multitype point pattern, estimate the inhomogeneous version of the dot \eqn{L} function. } \usage{ Ldot.inhom(X, i, \dots) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the inhomogeneous cross type \eqn{L} function \eqn{L_{i\bullet}(r)}{Li.(r)} will be computed. It must be a multitype point pattern (a marked point pattern whose marks are a factor). See under Details. } \item{i}{The type (mark value) of the points in \code{X} from which distances are measured. A character string (or something that will be converted to a character string). Defaults to the first level of \code{marks(X)}. } \item{\dots}{ Other arguments passed to \code{\link{Kdot.inhom}}. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame containing numeric columns \item{r}{the values of the argument \eqn{r} at which the function \eqn{L_{i\bullet}(r)}{Li.(r)} has been estimated } \item{theo}{the theoretical value of \eqn{L_{i\bullet}(r)}{Li.(r)} for a marked Poisson process, identical to \eqn{r}. } together with a column or columns named \code{"border"}, \code{"bord.modif"}, \code{"iso"} and/or \code{"trans"}, according to the selected edge corrections. These columns contain estimates of the function \eqn{L_{i\bullet}(r)}{Li.(r)} obtained by the edge corrections named. } \details{ This a generalisation of the function \code{\link{Ldot}} to include an adjustment for spatially inhomogeneous intensity, in a manner similar to the function \code{\link{Linhom}}. All the arguments are passed to \code{\link{Kdot.inhom}}, which estimates the inhomogeneous multitype K function \eqn{K_{i\bullet}(r)}{Ki.(r)} for the point pattern. The resulting values are then transformed by taking \eqn{L(r) = \sqrt{K(r)/\pi}}{L(r) = sqrt(K(r)/pi)}. } \references{ \ifelse{latex}{\out{M\o ller}}{Moller}, J. and Waagepetersen, R. Statistical Inference and Simulation for Spatial Point Processes Chapman and Hall/CRC Boca Raton, 2003. } \section{Warnings}{ The argument \code{i} is interpreted as a level of the factor \code{X$marks}. It is converted to a character string if it is not already a character string. The value \code{i=1} does \bold{not} refer to the first level of the factor. } \seealso{ \code{\link{Ldot}}, \code{\link{Linhom}}, \code{\link{Kdot.inhom}}, \code{\link{Lcross.inhom}}. } \examples{ # Lansing Woods data lan <- lansing lan <- lan[seq(1,npoints(lan), by=10)] ma <- split(lan)$maple lg <- unmark(lan) # Estimate intensities by nonparametric smoothing lambdaM <- density.ppp(ma, sigma=0.15, at="points") lambdadot <- density.ppp(lg, sigma=0.15, at="points") L <- Ldot.inhom(lan, "maple", lambdaI=lambdaM, lambdadot=lambdadot) # synthetic example: type A points have intensity 50, # type B points have intensity 50 + 100 * x lamB <- as.im(function(x,y){50 + 100 * x}, owin()) lamdot <- as.im(function(x,y) { 100 + 100 * x}, owin()) X <- superimpose(A=runifpoispp(50), B=rpoispp(lamB)) L <- Ldot.inhom(X, "B", lambdaI=lamB, lambdadot=lamdot) } \author{ \spatstatAuthors } \keyword{spatial} \keyword{nonparametric} spatstat/man/nnfromvertex.Rd0000644000176200001440000000250313160710621015711 0ustar liggesusers\name{nnfromvertex} \alias{nnfromvertex} \title{ Nearest Data Point From Each Vertex in a Network } \description{ Given a point pattern on a linear network, for each vertex of the network find the nearest data point. } \usage{ nnfromvertex(X, what = c("dist", "which"), k = 1) } \arguments{ \item{X}{ Point pattern on a linear network (object of class \code{"lpp"}). } \item{what}{ Character string specifying whether to return the nearest-neighbour distances, nearest-neighbour identifiers, or both. } \item{k}{ Integer, or integer vector, specifying that the \code{k}th nearest neighbour should be returned. } } \details{ For each vertex (node) of the linear network, this algorithm finds the nearest data point to the vertex, and returns either the distance from the vertex to its nearest neighbour in \code{X}, or the serial number of the nearest neighbour in \code{X}, or both. If \code{k} is an integer, then the \code{k}-th nearest neighbour is found instead. If \code{k} is an integer vector, this is repeated for each integer in \code{k}. } \value{ A numeric vector, matrix, or data frame. } \author{ \adrian. } \seealso{ \code{\link{nndist.lpp}} } \examples{ X <- runiflpp(5, simplenet) nnfromvertex(X) nnfromvertex(X, k=1:3) } \keyword{spatial} \keyword{math} spatstat/man/inside.boxx.Rd0000644000176200001440000000353513160710621015414 0ustar liggesusers\name{inside.boxx} \alias{inside.boxx} \title{Test Whether Points Are Inside A Multidimensional Box} \description{ Test whether points lie inside or outside a given multidimensional box. } \usage{ inside.boxx(\dots, w) } \arguments{ \item{\dots}{ Coordinates of points to be tested. One vector for each dimension (all of same length). (Alternatively, a single point pattern object of class \code{"\link{ppx}"} or its coordinates as a \code{"\link{hyperframe}"}) } \item{w}{A window. This should be an object of class \code{\link{boxx}}, or can be given in any format acceptable to \code{\link{as.boxx}()}. } } \value{ Logical vector whose \code{i}th entry is \code{TRUE} if the corresponding point is inside \code{w}. } \details{ This function tests whether each of the points \code{(x[i],y[i])} lies inside or outside the window \code{w} and returns \code{TRUE} if it is inside. The boundary of the window is treated as being inside. Normally each argument provided (except \code{w}) must be numeric vectors of equal length (length zero is allowed) containing the coordinates of points. Alternatively a single point pattern (object of class \code{"ppx"}) can be given; then the coordinates of the point pattern are extracted. } \seealso{ \code{\link{boxx}}, \code{\link{as.boxx}} } \examples{ # Random points in box with side [0,2] w <- boxx(c(0,2), c(0,2), c(0,2)) # Random points in box with side [-1,3] x <- runif(30, min=-1, max=3) y <- runif(30, min=-1, max=3) z <- runif(30, min=-1, max=3) # Points falling in smaller box ok <- inside.boxx(x, y, z, w=w) # Same using a point pattern as argument: X <- ppx(data = cbind(x, y, z), domain = boxx(c(0,3), c(0,3), c(0,3))) ok2 <- inside.boxx(X, w=w) } \author{ \adrian \rolf and \ege } \keyword{spatial} \keyword{math} spatstat/man/npoints.Rd0000644000176200001440000000162313160710621014650 0ustar liggesusers\name{npoints} \alias{npoints} \alias{npoints.ppp} \alias{npoints.pp3} \alias{npoints.ppx} \title{Number of Points in a Point Pattern} \description{ Returns the number of points in a point pattern of any kind. } \usage{ npoints(x) \method{npoints}{ppp}(x) \method{npoints}{pp3}(x) \method{npoints}{ppx}(x) } \arguments{ \item{x}{ A point pattern (object of class \code{"ppp"}, \code{"pp3"}, \code{"ppx"} or some other suitable class). } } \value{ Integer. } \details{ This function returns the number of points in a point pattern. The function \code{npoints} is generic with methods for the classes \code{"ppp"}, \code{"pp3"}, \code{"ppx"} and possibly other classes. } \seealso{ \code{\link{ppp.object}}, \code{\link{print.pp3}}, \code{\link{print.ppx}}. } \examples{ data(cells) npoints(cells) } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/default.rmhcontrol.Rd0000644000176200001440000000277513160710571017005 0ustar liggesusers\name{default.rmhcontrol} \alias{default.rmhcontrol} \title{Set Default Control Parameters for Metropolis-Hastings Algorithm.} \description{ Given a fitted point process model, this command sets appropriate default values of the parameters controlling the iterative behaviour of the Metropolis-Hastings algorithm. } \usage{ default.rmhcontrol(model, w=NULL) } \arguments{ \item{model}{ A fitted point process model (object of class \code{"ppm"}) } \item{w}{ Optional. Window for the resulting simulated patterns. } } \value{ An object of class \code{"rmhcontrol"}. See \code{\link{rmhcontrol}}. } \details{ This function sets the values of the parameters controlling the iterative behaviour of the Metropolis-Hastings simulation algorithm. It uses default values that would be appropriate for the fitted point process model \code{model}. The expansion parameter \code{expand} is set to \code{\link{default.expand}(model, w)}. All other parameters revert to their defaults given in \code{\link{rmhcontrol.default}}. See \code{\link{rmhcontrol}} for the full list of control parameters. To override default parameters, use \code{\link{update.rmhcontrol}}. } \seealso{ \code{\link{rmhcontrol}}, \code{\link{update.rmhcontrol}}, \code{\link{ppm}}, \code{\link{default.expand}} } \examples{ fit <- ppm(cells, ~1, Strauss(0.1)) default.rmhcontrol(fit) default.rmhcontrol(fit, w=square(2)) } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat/man/update.rmhcontrol.Rd0000644000176200001440000000210413160710621016621 0ustar liggesusers\name{update.rmhcontrol} \alias{update.rmhcontrol} \title{Update Control Parameters of Metropolis-Hastings Algorithm} \description{ \code{update} method for class \code{"rmhcontrol"}. } \usage{ \method{update}{rmhcontrol}(object, \dots) } \arguments{ \item{object}{ Object of class \code{"rmhcontrol"} containing control parameters for a Metropolis-Hastings algorithm. } \item{\dots}{ Arguments to be updated in the new call to \code{\link{rmhcontrol}}. } } \details{ This is a method for the generic function \code{\link{update}} for the class \code{"rmhcontrol"}. An object of class \code{"rmhcontrol"} describes a set of control parameters for the Metropolis-Hastings simulation algorithm. See \code{\link{rmhcontrol}}). \code{update.rmhcontrol} will modify the parameters specified by \code{object} according to the new arguments given. } \value{ Another object of class \code{"rmhcontrol"}. } \examples{ a <- rmhcontrol(expand=1) update(a, expand=2) } \author{\adrian and \rolf } \keyword{spatial} \keyword{methods} \keyword{models} spatstat/man/blur.Rd0000644000176200001440000000671313160710571014133 0ustar liggesusers\name{blur} \alias{blur} \alias{Smooth.im} \title{Apply Gaussian Blur to a Pixel Image} \description{ Applies a Gaussian blur to a pixel image. } \usage{ blur(x, sigma = NULL, ..., normalise=FALSE, bleed = TRUE, varcov=NULL) \method{Smooth}{im}(X, sigma = NULL, ..., normalise=FALSE, bleed = TRUE, varcov=NULL) } \arguments{ \item{x,X}{The pixel image. An object of class \code{"im"}.} \item{sigma}{ Standard deviation of isotropic Gaussian smoothing kernel. } \item{\dots}{ Ignored. } \item{normalise}{ Logical flag indicating whether the output values should be divided by the corresponding blurred image of the window itself. See Details. } \item{bleed}{ Logical flag indicating whether to allow blur to extend outside the original domain of the image. See Details. } \item{varcov}{ Variance-covariance matrix of anisotropic Gaussian kernel. Incompatible with \code{sigma}. } } \details{ This command applies a Gaussian blur to the pixel image \code{x}. \code{Smooth.im} is a method for the generic \code{\link{Smooth}} for pixel images. It is currently identical to \code{blur}, apart from the name of the first argument. The blurring kernel is the isotropic Gaussian kernel with standard deviation \code{sigma}, or the anisotropic Gaussian kernel with variance-covariance matrix \code{varcov}. The arguments \code{sigma} and \code{varcov} are incompatible. Also \code{sigma} may be a vector of length 2 giving the standard deviations of two independent Gaussian coordinates, thus equivalent to \code{varcov = diag(sigma^2)}. If the pixel values of \code{x} include some \code{NA} values (meaning that the image domain does not completely fill the rectangular frame) then these \code{NA} values are first reset to zero. The algorithm then computes the convolution \eqn{x \ast G}{x * G} of the (zero-padded) pixel image \eqn{x} with the specified Gaussian kernel \eqn{G}. If \code{normalise=FALSE}, then this convolution \eqn{x\ast G}{x * G} is returned. If \code{normalise=TRUE}, then the convolution \eqn{x \ast G}{x * G} is normalised by dividing it by the convolution \eqn{w \ast G}{w * G} of the image domain \code{w} with the same Gaussian kernel. Normalisation ensures that the result can be interpreted as a weighted average of input pixel values, without edge effects due to the shape of the domain. If \code{bleed=FALSE}, then pixel values outside the original image domain are set to \code{NA}. Thus the output is a pixel image with the same domain as the input. If \code{bleed=TRUE}, then no such alteration is performed, and the result is a pixel image defined everywhere in the rectangular frame containing the input image. Computation is performed using the Fast Fourier Transform. } \value{ A pixel image with the same pixel array as the input image \code{x}. } \seealso{ \code{\link{interp.im}} for interpolating a pixel image to a finer resolution, \code{\link{density.ppp}} for blurring a point pattern, \code{\link{Smooth.ppp}} for interpolating marks attached to points. } \examples{ data(letterR) Z <- as.im(function(x,y) { 4 * x^2 + 3 * y }, letterR) par(mfrow=c(1,3)) plot(Z) plot(letterR, add=TRUE) plot(blur(Z, 0.3, bleed=TRUE)) plot(letterR, add=TRUE) plot(blur(Z, 0.3, bleed=FALSE)) plot(letterR, add=TRUE) par(mfrow=c(1,1)) } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat/man/rjitter.Rd0000644000176200001440000000427413160710621014646 0ustar liggesusers\name{rjitter} \alias{rjitter} \title{Random Perturbation of a Point Pattern} \description{ Applies independent random displacements to each point in a point pattern. } \usage{ rjitter(X, radius, retry=TRUE, giveup = 10000, \dots, nsim=1, drop=TRUE) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"}). } \item{radius}{ Scale of perturbations. A positive numerical value. The displacement vectors will be uniformly distributed in a circle of this radius. There is a sensible default. } \item{retry}{ What to do when a perturbed point lies outside the window of the original point pattern. If \code{retry=FALSE}, the point will be lost; if \code{retry=TRUE}, the algorithm will try again. } \item{giveup}{ Maximum number of unsuccessful attempts. } \item{\dots}{Ignored.} \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a point pattern. } } \value{ A point pattern (an object of class \code{"ppp"}) if \code{nsim=1}, or a list of point patterns if \code{nsim > 1}, in the same window as \code{X}. } \details{ Each of the points in the point pattern \code{X} is subjected to an independent random displacement. The displacement vectors are uniformly distributed in a circle of radius \code{radius}. If a displaced point lies outside the window, then if \code{retry=FALSE} the point will be lost. However if \code{retry=TRUE}, the algorithm will try again: each time a perturbed point lies outside the window, the algorithm will reject it and generate another proposed perturbation of the original point, until one lies inside the window, or until \code{giveup} unsuccessful attempts have been made. In the latter case, any unresolved points will be included without any perturbation. The return value will always be a point pattern with the same number of points as \code{X}. } \examples{ X <- rsyst(owin(), 10, 10) Y <- rjitter(X, 0.02) plot(Y) Z <- rjitter(X) } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat/man/pairs.im.Rd0000644000176200001440000000561113160710621014701 0ustar liggesusers\name{pairs.im} \alias{pairs.im} \title{ Scatterplot Matrix for Pixel Images } \description{ Produces a scatterplot matrix of the pixel values in two or more pixel images. } \usage{ \method{pairs}{im}(..., plot=TRUE) } \arguments{ \item{\dots}{ Any number of arguments, each of which is either a pixel image (object of class \code{"im"}) or a named argument to be passed to \code{\link{pairs.default}}. } \item{plot}{ Logical. If \code{TRUE}, the scatterplot matrix is plotted. } } \details{ This is a method for the generic function \code{\link{pairs}} for the class of pixel images. It produces a square array of plot panels, in which each panel shows a scatterplot of the pixel values of one image against the corresponding pixel values of another image. At least two of the arguments \code{\dots} should be pixel images (objects of class \code{"im"}). Their spatial domains must overlap, but need not have the same pixel dimensions. First the pixel image domains are intersected, and converted to a common pixel resolution. Then the corresponding pixel values of each image are extracted. Then \code{\link{pairs.default}} is called to plot the scatterplot matrix. Any arguments in \code{\dots} which are not pixel images will be passed to \code{\link{pairs.default}} to control the plot. } \section{Image or Contour Plots}{ Since the scatterplots may show very dense concentrations of points, it may be useful to set \code{panel=panel.image} or \code{panel=panel.contour} to draw a colour image or contour plot of the kernel-smoothed density of the scatterplot in each panel. The argument \code{panel} is passed to \code{\link{pairs.default}}. See the help for \code{\link{panel.image}} and \code{\link{panel.contour}}. } \section{Low Level Control of Graphics}{ To control the appearance of the individual scatterplot panels, see \code{\link{pairs.default}}, \code{\link{points}} or \code{\link{par}}. To control the plotting symbol for the points in the scatterplot, use the arguments \code{pch}, \code{col}, \code{bg} as described under \code{\link{points}} (because the default panel plotter is the function \code{\link{points}}). To suppress the tick marks on the plot axes, type \code{par(xaxt="n", yaxt="n")} before calling \code{pairs}. } \value{ Invisible. A \code{data.frame} containing the corresponding pixel values for each image. The return value also belongs to the class \code{plotpairsim} which has a plot method, so that it can be re-plotted. } \seealso{ \code{\link{pairs}}, \code{\link{pairs.default}}, \code{\link{panel.contour}}, \code{\link{panel.image}}, \code{\link{plot.im}}, \code{\link{im}}, \code{\link{par}} } \examples{ X <- density(rpoispp(30)) Y <- density(rpoispp(40)) Z <- density(rpoispp(30)) pairs(X,Y,Z) } \author{\adrian and \rolf } \keyword{spatial} \keyword{hplot} spatstat/man/as.data.frame.psp.Rd0000644000176200001440000000253413160710571016371 0ustar liggesusers\name{as.data.frame.psp} \alias{as.data.frame.psp} \title{Coerce Line Segment Pattern to a Data Frame} \description{ Extracts the coordinates of the endpoints in a line segment pattern, and their marks if any, and returns them in a data frame. } \usage{ \method{as.data.frame}{psp}(x, row.names = NULL, ...) } \arguments{ \item{x}{Line segment pattern (object of class \code{"psp"}).} \item{row.names}{Optional character vector of row names.} \item{\dots}{Ignored.} } \details{ This is a method for the generic function \code{\link{as.data.frame}} for the class \code{"psp"} of line segment patterns. It extracts the coordinates of the endpoints of the line segments, and returns them as columns named \code{x0}, \code{y0}, \code{x1} and \code{y1} in a data frame. If the line segments were marked, the marks are appended as an extra column or columns to the data frame which is returned. If the marks are a vector then a single column named \code{marks} is appended. in the data frame, with the same type as in the line segment pattern dataset. If the marks are a data frame, then the columns of this data frame are appended (retaining their names). } \value{ A data frame with 4 or 5 columns. } \examples{ data(copper) df <- as.data.frame(copper$Lines) } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/contour.im.Rd0000644000176200001440000000636213160710571015264 0ustar liggesusers\name{contour.im} \alias{contour.im} \title{Contour plot of pixel image} \description{ Generates a contour plot of a pixel image. } \usage{ \method{contour}{im}(x, \dots, main, axes=FALSE, add=FALSE, col=par("fg"), clipwin=NULL, show.all=!add, do.plot=TRUE) } \arguments{ \item{x}{ Pixel image to be plotted. An object of class \code{"im"}. } \item{main}{ Character string to be displayed as the main title. } \item{axes}{ Logical. If \code{TRUE}, coordinate axes are plotted (with tick marks) around a region slightly larger than the image window. If \code{FALSE} (the default), no axes are plotted, and a box is drawn tightly around the image window. Ignored if \code{add=TRUE}. } \item{add}{ Logical. If \code{FALSE}, a new plot is created. If \code{TRUE}, the contours are drawn over the existing plot. } \item{col}{ Colour in which to draw the contour lines. Either a single value that can be interpreted as a colour value, or a \code{colourmap} object. } \item{clipwin}{ Optional. A window (object of class \code{"owin"}). Only this subset of the data will be displayed. } \item{\dots}{ Other arguments passed to \code{\link{contour.default}} controlling the contour plot; see Details. } \item{show.all}{ Logical value indicating whether to display all plot elements including the main title, bounding box, and (if \code{axis=TRUE}) coordinate axis markings. Default is \code{TRUE} for new plots and \code{FALSE} for added plots. } \item{do.plot}{ Logical value indicating whether to actually perform the plot. } } \details{ This is a method for the generic \code{contour} function, for objects of the class \code{"im"}. An object of class \code{"im"} represents a pixel image; see \code{\link{im.object}}. This function displays the values of the pixel image \code{x} as a contour plot on the current plot device, using equal scales on the \eqn{x} and \eqn{y} axes. The appearance of the plot can be modified using any of the arguments listed in the help for \code{\link{contour.default}}. Useful ones include: \describe{ \item{nlevels}{ Number of contour levels to plot. } \item{drawlabels}{ Whether to label the contour lines with text. } \item{col,lty,lwd}{ Colour, type, and width of contour lines. } } See \code{\link{contour.default}} for a full list of these arguments. The defaults for any of the abovementioned arguments can be reset using \code{\link{spatstat.options}("par.contour")}. If \code{col} is a colour map (object of class \code{"colourmap"}, see \code{\link{colourmap}}) then the contours will be plotted in different colours as determined by the colour map. The contour at level \code{z} will be plotted in the colour \code{col(z)} associated with this level in the colour map. } \value{ none. } \examples{ # an image Z <- setcov(owin()) contour(Z, axes=TRUE) contour(Z) co <- colourmap(rainbow(100), range=c(0,1)) contour(Z, col=co, lwd=2) } \seealso{ \code{\link{im.object}}, \code{\link{plot.im}}, \code{\link{persp.im}} } \author{ \adrian \rolf and \ege } \keyword{spatial} \keyword{hplot} spatstat/man/nndist.ppx.Rd0000644000176200001440000000611513160710621015264 0ustar liggesusers\name{nndist.ppx} \alias{nndist.ppx} \title{Nearest Neighbour Distances in Any Dimensions} \description{ Computes the distance from each point to its nearest neighbour in a multi-dimensional point pattern. Alternatively computes the distance to the second nearest neighbour, or third nearest, etc. } \usage{ \method{nndist}{ppx}(X, \dots, k=1) } \arguments{ \item{X}{ Multi-dimensional point pattern (object of class \code{"ppx"}). } \item{\dots}{ Arguments passed to \code{\link{coords.ppx}} to determine which coordinates should be used. } \item{k}{ Integer, or integer vector. The algorithm will compute the distance to the \code{k}th nearest neighbour. } } \value{ Numeric vector or matrix containing the nearest neighbour distances for each point. If \code{k = 1} (the default), the return value is a numeric vector \code{v} such that \code{v[i]} is the nearest neighbour distance for the \code{i}th data point. If \code{k} is a single integer, then the return value is a numeric vector \code{v} such that \code{v[i]} is the \code{k}th nearest neighbour distance for the \code{i}th data point. If \code{k} is a vector, then the return value is a matrix \code{m} such that \code{m[i,j]} is the \code{k[j]}th nearest neighbour distance for the \code{i}th data point. } \details{ This function computes the Euclidean distance from each point in a multi-dimensional point pattern to its nearest neighbour (the nearest other point of the pattern). If \code{k} is specified, it computes the distance to the \code{k}th nearest neighbour. The function \code{nndist} is generic; this function \code{nndist.ppx} is the method for the class \code{"ppx"}. The argument \code{k} may be a single integer, or an integer vector. If it is a vector, then the \eqn{k}th nearest neighbour distances are computed for each value of \eqn{k} specified in the vector. If there is only one point (if \code{x} has length 1), then a nearest neighbour distance of \code{Inf} is returned. If there are no points (if \code{x} has length zero) a numeric vector of length zero is returned. To identify \emph{which} point is the nearest neighbour of a given point, use \code{\link{nnwhich}}. To find the nearest neighbour distances from one point pattern to another point pattern, use \code{\link{nncross}}. By default, both spatial and temporal coordinates are extracted. To obtain the spatial distance between points in a space-time point pattern, set \code{temporal=FALSE}. } \section{Warnings}{ An infinite or \code{NA} value is returned if the distance is not defined (e.g. if there is only one point in the point pattern). } \seealso{ \code{\link{nndist}}, \code{\link{pairdist}}, \code{\link{nnwhich}} } \examples{ df <- data.frame(x=runif(5),y=runif(5),z=runif(5),w=runif(5)) X <- ppx(data=df) # nearest neighbours d <- nndist(X) # second nearest neighbours d2 <- nndist(X, k=2) # first, second and third nearest d1to3 <- nndist(X, k=1:3) } \author{ \adrian } \keyword{spatial} \keyword{math} spatstat/man/affine.owin.Rd0000644000176200001440000000330313160710571015362 0ustar liggesusers\name{affine.owin} \alias{affine.owin} \title{Apply Affine Transformation To Window} \description{ Applies any affine transformation of the plane (linear transformation plus vector shift) to a window. } \usage{ \method{affine}{owin}(X, mat=diag(c(1,1)), vec=c(0,0), \dots, rescue=TRUE) } \arguments{ \item{X}{Window (object of class \code{"owin"}).} \item{mat}{Matrix representing a linear transformation.} \item{vec}{Vector of length 2 representing a translation.} \item{rescue}{ Logical. If \code{TRUE}, the transformed window will be processed by \code{\link{rescue.rectangle}}. } \item{\dots}{ Optional arguments passed to \code{\link{as.mask}} controlling the pixel resolution of the transformed window, if \code{X} is a binary pixel mask. } } \value{ Another window (of class \code{"owin"}) representing the result of applying the affine transformation. } \details{ The window is subjected first to the linear transformation represented by \code{mat} (multiplying on the left by \code{mat}), and then the result is translated by the vector \code{vec}. The argument \code{mat} must be a nonsingular \eqn{2 \times 2}{2 * 2} matrix. This is a method for the generic function \code{\link{affine}}. } \seealso{ \code{\link{affine}}, \code{\link{affine.ppp}}, \code{\link{affine.psp}}, \code{\link{affine.im}}, \code{\link{rotate}}, \code{\link{shift}} } \examples{ # shear transformation shear <- matrix(c(1,0,0.6,1),ncol=2) X <- affine(owin(), shear) \dontrun{ plot(X) } data(letterR) affine(letterR, shear, c(0, 0.5)) affine(as.mask(letterR), shear, c(0, 0.5)) } \author{\adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/as.ppm.Rd0000644000176200001440000000365013160710571014362 0ustar liggesusers\name{as.ppm} \alias{as.ppm} \alias{as.ppm.ppm} \alias{as.ppm.profilepl} \alias{as.ppm.kppm} \alias{as.ppm.dppm} \title{Extract Fitted Point Process Model} \description{ Extracts the fitted point process model from some kind of fitted model. } \usage{ as.ppm(object) \method{as.ppm}{ppm}(object) \method{as.ppm}{profilepl}(object) \method{as.ppm}{kppm}(object) \method{as.ppm}{dppm}(object) } \arguments{ \item{object}{An object that includes a fitted Poisson or Gibbs point process model. An object of class \code{"ppm"}, \code{"profilepl"}, \code{"kppm"} or \code{"dppm"} or possibly other classes. } } \details{ The function \code{as.ppm} extracts the fitted point process model (of class \code{"ppm"}) from a suitable object. The function \code{as.ppm} is generic, with methods for the classes \code{"ppm"}, \code{"profilepl"}, \code{"kppm"} and \code{"dppm"}, and possibly for other classes. For the class \code{"profilepl"} of models fitted by maximum profile pseudolikelihood, the method \code{as.ppm.profilepl} extracts the fitted point process model (with the optimal values of the irregular parameters). For the class \code{"kppm"} of models fitted by minimum contrast (or Palm or composite likelihood) using Waagepetersen's two-step estimation procedure (see \code{\link{kppm}}), the method \code{as.ppm.kppm} extracts the Poisson point process model that is fitted in the first stage of the procedure. The behaviour for the class \code{"dppm"} is analogous to the \code{"kppm"} case above. } \value{ An object of class \code{"ppm"}. } \author{\adrian \rolf and \ege } \seealso{ \code{\link{ppm}}, \code{\link{profilepl}}. } \examples{ # fit a model by profile maximum pseudolikelihood rvals <- data.frame(r=(1:10)/100) pfit <- profilepl(rvals, Strauss, cells, ~1) # extract the fitted model fit <- as.ppm(pfit) } \keyword{spatial} \keyword{models} spatstat/man/bdist.tiles.Rd0000644000176200001440000000206313160710571015405 0ustar liggesusers\name{bdist.tiles} \alias{bdist.tiles} \title{Distance to Boundary of Window} \description{ Computes the shortest distances from each tile in a tessellation to the boundary of the window. } \usage{ bdist.tiles(X) } \arguments{ \item{X}{A tessellation (object of class \code{"tess"}).} } \value{ A numeric vector, giving the shortest distance from each tile in the tessellation to the boundary of the window. Entries of the vector correspond to the entries of \code{tiles(X)}. } \details{ This function computes, for each tile \eqn{s_i}{s[[i]]} in the tessellation \code{X}, the shortest distance from \eqn{s_i}{s[[i]]} to the boundary of the window \eqn{W} containing the tessellation. } \seealso{ \code{\link{tess}}, \code{\link{bdist.points}}, \code{\link{bdist.pixels}} } \examples{ P <- runifpoint(15) X <- dirichlet(P) plot(X, col="red") B <- bdist.tiles(X) # identify tiles that do not touch the boundary plot(X[B > 0], add=TRUE, col="green", lwd=3) } \author{\adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/polynom.Rd0000644000176200001440000000324713160710621014657 0ustar liggesusers\name{polynom} \alias{polynom} \title{ Polynomial in One or Two Variables } \description{ This function is used to represent a polynomial term in a model formula. It computes the homogeneous terms in the polynomial of degree \code{n} in one variable \code{x} or two variables \code{x,y}. } \usage{ polynom(x, \dots) } \arguments{ \item{x}{ A numerical vector. } \item{\dots}{ Either a single integer \code{n} specifying the degree of the polynomial, or two arguments \code{y,n} giving another vector of data \code{y} and the degree of the polynomial. } } \details{ This function is typically used inside a model formula in order to specify the most general possible polynomial of order \code{n} involving one numerical variable \code{x} or two numerical variables \code{x,y}. It is equivalent to \code{\link[stats]{poly}(, raw=TRUE)}. If only one numerical vector argument \code{x} is given, the function computes the vectors \code{x^k} for \code{k = 1, 2, \dots, n}. These vectors are combined into a matrix with \code{n} columns. If two numerical vector arguments \code{x,y} are given, the function computes the vectors \code{x^k * y^m} for \code{k >= 0} and \code{m >= 0} satisfying \code{0 < k + m <= n}. These vectors are combined into a matrix with one column for each homogeneous term. } \value{ A numeric matrix, with rows corresponding to the entries of \code{x}, and columns corresponding to the terms in the polynomial. } \author{ \spatstatAuthors. } \seealso{ \code{\link[stats]{poly}}, \code{\link{harmonic}} } \examples{ x <- 1:4 y <- 10 * (0:3) polynom(x, 3) polynom(x, y, 3) } \keyword{arith} spatstat/man/boundingbox.Rd0000644000176200001440000000403613160710571015501 0ustar liggesusers\name{boundingbox} \alias{boundingbox} \alias{boundingbox.default} \alias{boundingbox.im} \alias{boundingbox.owin} \alias{boundingbox.ppp} \alias{boundingbox.solist} \title{ Bounding Box of a Window, Image, or Point Pattern } \description{ Find the smallest rectangle containing a given window(s), image(s) or point pattern(s). } \usage{ boundingbox(\dots) \method{boundingbox}{default}(\dots) \method{boundingbox}{im}(\dots) \method{boundingbox}{owin}(\dots) \method{boundingbox}{ppp}(\dots) \method{boundingbox}{solist}(\dots) } \arguments{ \item{\dots}{One or more windows (objects of class \code{"owin"}), pixel images (objects of class \code{"im"}) or point patterns (objects of class \code{"ppp"}). Alternatively, the argument may be a list of such objects, of class \code{"solist"}. } } \details{ This function finds the smallest rectangle (with sides parallel to the coordinate axes) that contains all the given objects. For a window (object of class \code{"owin"}), the bounding box is the smallest rectangle that contains all the vertices of the window (this is generally smaller than the enclosing frame, which is returned by \code{\link{as.rectangle}}). For a point pattern (object of class \code{"ppp"}), the bounding box is the smallest rectangle that contains all the points of the pattern. For a pixel image (object of class \code{"im"}), the image will be converted to a window using \code{\link{as.owin}}, and the bounding box of this window is obtained. If the argument is a list of several objects, then this function finds the smallest rectangle that contains all the bounding boxes of the objects. } \value{ \code{\link{owin}}, \code{\link{as.owin}}, \code{\link{as.rectangle}} } \examples{ w <- owin(c(0,10),c(0,10), poly=list(x=c(1,2,3,2,1), y=c(2,3,4,6,7))) r <- boundingbox(w) # returns rectangle [1,3] x [2,7] w2 <- unit.square() r <- boundingbox(w, w2) # returns rectangle [0,3] x [0,7] } \author{\adrian and \rolf } \keyword{spatial} \keyword{utilities} spatstat/man/lintess.Rd0000644000176200001440000000515313160710621014641 0ustar liggesusers\name{lintess} \alias{lintess} \title{ Tessellation on a Linear Network } \description{ Create a tessellation on a linear network. } \usage{ lintess(L, df) } \arguments{ \item{L}{ Linear network (object of class \code{"linnet"}). } \item{df}{ Data frame of coordinates of endpoints of the tiles of the tessellation. } } \details{ A tessellation on a linear network \code{L} is a partition of the network into non-overlapping pieces (tiles). Each tile consists of one or more line segments which are subsets of the line segments making up the network. A tile can consist of several disjoint pieces. The data frame \code{df} should have columns named \code{seg}, \code{t0}, \code{t1} and \code{tile}. Each row of the data frame specifies one sub-segment of the network and allocates it to a particular tile. The \code{seg} column specifies which line segment of the network contains the sub-segment. Values of \code{seg} are integer indices for the segments in \code{as.psp(L)}. The \code{t0} and \code{t1} columns specify the start and end points of the sub-segment. They should be numeric values between 0 and 1 inclusive, where the values 0 and 1 representing the network vertices that are joined by this network segment. The \code{tile} column specifies which tile of the tessellation includes this sub-segment. It will be coerced to a factor and its levels will be the names of the tiles. If \code{df} is missing or \code{NULL}, the result is a tessellation with only one tile, consisting of the entire network \code{L}. } \value{ An object of class \code{"lintess"}. There are methods for \code{print}, \code{plot} and \code{summary} for this object. } \author{ \adrian and Greg McSwiggan. } \seealso{ \code{\link{linnet}} for linear networks. \code{\link{plot.lintess}} for plotting. \code{\link{divide.linnet}} to make a tessellation demarcated by given points. \code{\link{lineardirichlet}} to create the Dirichlet-Voronoi tessellation from a point pattern on a linear network. \code{\link{as.linfun.lintess}}, \code{\link{as.linnet.lintess}} and \code{\link{as.linim}} to convert to other classes. \code{\link{tile.lengths}} to compute the length of each tile in the tessellation. The undocumented methods \code{Window.lintess} and \code{as.owin.lintess} extract the spatial window. } \examples{ # tessellation consisting of one tile for each existing segment ns <- nsegments(simplenet) df <- data.frame(seg=1:ns, t0=0, t1=1, tile=letters[1:ns]) u <- lintess(simplenet, df) u plot(u) } \keyword{spatial} \keyword{datagen} spatstat/man/dclf.test.Rd0000644000176200001440000002576613160710571015066 0ustar liggesusers\name{dclf.test} \alias{dclf.test} \alias{mad.test} \title{ Diggle-Cressie-Loosmore-Ford and Maximum Absolute Deviation Tests } \description{ Perform the Diggle (1986) / Cressie (1991) / Loosmore and Ford (2006) test or the Maximum Absolute Deviation test for a spatial point pattern. } \usage{ dclf.test(X, \dots, alternative=c("two.sided", "less", "greater"), rinterval = NULL, leaveout=1, scale=NULL, clamp=FALSE, interpolate=FALSE) mad.test(X, \dots, alternative=c("two.sided", "less", "greater"), rinterval = NULL, leaveout=1, scale=NULL, clamp=FALSE, interpolate=FALSE) } \arguments{ \item{X}{ Data for the test. Either a point pattern (object of class \code{"ppp"}, \code{"lpp"} or other class), a fitted point process model (object of class \code{"ppm"}, \code{"kppm"} or other class), a simulation envelope (object of class \code{"envelope"}) or a previous result of \code{dclf.test} or \code{mad.test}. } \item{\dots}{ Arguments passed to \code{\link{envelope}}. Useful arguments include \code{fun} to determine the summary function, \code{nsim} to specify the number of Monte Carlo simulations, \code{verbose=FALSE} to turn off the messages, \code{savefuns} or \code{savepatterns} to save the simulation results, and \code{use.theory} described under Details. } \item{alternative}{ The alternative hypothesis. A character string. The default is a two-sided alternative. See Details. } \item{rinterval}{ Interval of values of the summary function argument \code{r} over which the maximum absolute deviation, or the integral, will be computed for the test. A numeric vector of length 2. } \item{leaveout}{ Optional integer 0, 1 or 2 indicating how to calculate the deviation between the observed summary function and the nominal reference value, when the reference value must be estimated by simulation. See Details. } \item{scale}{ Optional. A function in the \R language which determines the relative scale of deviations, as a function of distance \eqn{r}. Summary function values for distance \code{r} will be \emph{divided} by \code{scale(r)} before the test statistic is computed. } \item{clamp}{ Logical value indicating how to compute deviations in a one-sided test. Deviations of the observed summary function from the theoretical summary function are initially evaluated as signed real numbers, with large positive values indicating consistency with the alternative hypothesis. If \code{clamp=FALSE} (the default), these values are not changed. If \code{clamp=TRUE}, any negative values are replaced by zero. } \item{interpolate}{ Logical value specifying whether to calculate the \eqn{p}-value by interpolation. If \code{interpolate=FALSE} (the default), a standard Monte Carlo test is performed, yielding a \eqn{p}-value of the form \eqn{(k+1)/(n+1)} where \eqn{n} is the number of simulations and \eqn{k} is the number of simulated values which are more extreme than the observed value. If \code{interpolate=TRUE}, the \eqn{p}-value is calculated by applying kernel density estimation to the simulated values, and computing the tail probability for this estimated distribution. } } \details{ These functions perform hypothesis tests for goodness-of-fit of a point pattern dataset to a point process model, based on Monte Carlo simulation from the model. \code{dclf.test} performs the test advocated by Loosmore and Ford (2006) which is also described in Diggle (1986), Cressie (1991, page 667, equation (8.5.42)) and Diggle (2003, page 14). See Baddeley et al (2014) for detailed discussion. \code{mad.test} performs the \sQuote{global} or \sQuote{Maximum Absolute Deviation} test described by Ripley (1977, 1981). See Baddeley et al (2014). The type of test depends on the type of argument \code{X}. \itemize{ \item If \code{X} is some kind of point pattern, then a test of Complete Spatial Randomness (CSR) will be performed. That is, the null hypothesis is that the point pattern is completely random. \item If \code{X} is a fitted point process model, then a test of goodness-of-fit for the fitted model will be performed. The model object contains the data point pattern to which it was originally fitted. The null hypothesis is that the data point pattern is a realisation of the model. \item If \code{X} is an envelope object generated by \code{\link{envelope}}, then it should have been generated with \code{savefuns=TRUE} or \code{savepatterns=TRUE} so that it contains simulation results. These simulations will be treated as realisations from the null hypothesis. \item Alternatively \code{X} could be a previously-performed test of the same kind (i.e. the result of calling \code{dclf.test} or \code{mad.test}). The simulations used to perform the original test will be re-used to perform the new test (provided these simulations were saved in the original test, by setting \code{savefuns=TRUE} or \code{savepatterns=TRUE}). } The argument \code{alternative} specifies the alternative hypothesis, that is, the direction of deviation that will be considered statistically significant. If \code{alternative="two.sided"} (the default), both positive and negative deviations (between the observed summary function and the theoretical function) are significant. If \code{alternative="less"}, then only negative deviations (where the observed summary function is lower than the theoretical function) are considered. If \code{alternative="greater"}, then only positive deviations (where the observed summary function is higher than the theoretical function) are considered. In all cases, the algorithm will first call \code{\link{envelope}} to generate or extract the simulated summary functions. The number of simulations that will be generated or extracted, is determined by the argument \code{nsim}, and defaults to 99. The summary function that will be computed is determined by the argument \code{fun} (or the first unnamed argument in the list \code{\dots}) and defaults to \code{\link{Kest}} (except when \code{X} is an envelope object generated with \code{savefuns=TRUE}, when these functions will be taken). The choice of summary function \code{fun} affects the power of the test. It is normally recommended to apply a variance-stabilising transformation (Ripley, 1981). If you are using the \eqn{K} function, the normal practice is to replace this by the \eqn{L} function (Besag, 1977) computed by \code{\link{Lest}}. If you are using the \eqn{F} or \eqn{G} functions, the recommended practice is to apply Fisher's variance-stabilising transformation \eqn{\sin^{-1}\sqrt x}{asin(sqrt(x))} using the argument \code{transform}. See the Examples. The argument \code{rinterval} specifies the interval of distance values \eqn{r} which will contribute to the test statistic (either maximising over this range of values for \code{mad.test}, or integrating over this range of values for \code{dclf.test}). This affects the power of the test. General advice and experiments in Baddeley et al (2014) suggest that the maximum \eqn{r} value should be slightly larger than the maximum possible range of interaction between points. The \code{dclf.test} is quite sensitive to this choice, while the \code{mad.test} is relatively insensitive. It is also possible to specify a pointwise test (i.e. taking a single, fixed value of distance \eqn{r}) by specifing \code{rinterval = c(r,r)}. The argument \code{use.theory} passed to \code{\link{envelope}} determines whether to compare the summary function for the data to its theoretical value for CSR (\code{use.theory=TRUE}) or to the sample mean of simulations from CSR (\code{use.theory=FALSE}). The argument \code{leaveout} specifies how to calculate the discrepancy between the summary function for the data and the nominal reference value, when the reference value must be estimated by simulation. The values \code{leaveout=0} and \code{leaveout=1} are both algebraically equivalent (Baddeley et al, 2014, Appendix) to computing the difference \code{observed - reference} where the \code{reference} is the mean of simulated values. The value \code{leaveout=2} gives the leave-two-out discrepancy proposed by Dao and Genton (2014). } \section{Handling Ties}{ If the observed value of the test statistic is equal to one or more of the simulated values (called a \emph{tied value}), then the tied values will be assigned a random ordering, and a message will be printed. } \value{ An object of class \code{"htest"}. Printing this object gives a report on the result of the test. The \eqn{p}-value is contained in the component \code{p.value}. } \references{ Baddeley, A., Diggle, P.J., Hardegen, A., Lawrence, T., Milne, R.K. and Nair, G. (2014) On tests of spatial pattern based on simulation envelopes. \emph{Ecological Monographs} \bold{84}(3) 477--489. Baddeley, A., Hardegen, A., Lawrence, T., Milne, R.K. and Nair, G. (2015) \emph{Pushing the envelope}. In preparation. Besag, J. (1977) Discussion of Dr Ripley's paper. \emph{Journal of the Royal Statistical Society, Series B}, \bold{39}, 193--195. Cressie, N.A.C. (1991) \emph{Statistics for spatial data}. John Wiley and Sons, 1991. Dao, N.A. and Genton, M. (2014) A Monte Carlo adjusted goodness-of-fit test for parametric models describing spatial point patterns. \emph{Journal of Graphical and Computational Statistics} \bold{23}, 497--517. Diggle, P. J. (1986). Displaced amacrine cells in the retina of a rabbit : analysis of a bivariate spatial point pattern. \emph{J. Neuroscience Methods} \bold{18}, 115--125. Diggle, P.J. (2003) \emph{Statistical analysis of spatial point patterns}, Second edition. Arnold. Loosmore, N.B. and Ford, E.D. (2006) Statistical inference using the \emph{G} or \emph{K} point pattern spatial statistics. \emph{Ecology} \bold{87}, 1925--1931. Ripley, B.D. (1977) Modelling spatial patterns (with discussion). \emph{Journal of the Royal Statistical Society, Series B}, \bold{39}, 172 -- 212. Ripley, B.D. (1981) \emph{Spatial statistics}. John Wiley and Sons. } \author{ \adrian , Andrew Hardegen and Suman Rakshit. } \seealso{ \code{\link{envelope}}, \code{\link{dclf.progress}} } \examples{ dclf.test(cells, Lest, nsim=39) m <- mad.test(cells, Lest, verbose=FALSE, rinterval=c(0, 0.1), nsim=19) m # extract the p-value m$p.value # variance stabilised G function dclf.test(cells, Gest, transform=expression(asin(sqrt(.))), verbose=FALSE, nsim=19) ## one-sided test ml <- mad.test(cells, Lest, verbose=FALSE, nsim=19, alternative="less") ## scaled mad.test(cells, Kest, verbose=FALSE, nsim=19, rinterval=c(0.05, 0.2), scale=function(r) { r }) } \keyword{spatial} \keyword{htest} spatstat/man/pairdist.ppp.Rd0000644000176200001440000000465113160710621015577 0ustar liggesusers\name{pairdist.ppp} \alias{pairdist.ppp} \title{Pairwise distances} \description{ Computes the matrix of distances between all pairs of points in a point pattern. } \usage{ \method{pairdist}{ppp}(X, \dots, periodic=FALSE, method="C", squared=FALSE) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"}). } \item{\dots}{ Ignored. } \item{periodic}{ Logical. Specifies whether to apply a periodic edge correction. } \item{method}{ String specifying which method of calculation to use. Values are \code{"C"} and \code{"interpreted"}. Usually not specified. } \item{squared}{ Logical. If \code{squared=TRUE}, the squared distances are returned instead (this computation is faster). } } \value{ A square matrix whose \code{[i,j]} entry is the distance between the points numbered \code{i} and \code{j}. } \details{ This is a method for the generic function \code{pairdist}. Given a point pattern \code{X} (an object of class \code{"ppp"}), this function computes the Euclidean distances between all pairs of points in \code{X}, and returns the matrix of distances. Alternatively if \code{periodic=TRUE} and the window containing \code{X} is a rectangle, then the distances will be computed in the `periodic' sense (also known as `torus' distance): opposite edges of the rectangle are regarded as equivalent. This is meaningless if the window is not a rectangle. If \code{squared=TRUE} then the \emph{squared} Euclidean distances \eqn{d^2} are returned, instead of the Euclidean distances \eqn{d}. The squared distances are faster to calculate, and are sufficient for many purposes (such as finding the nearest neighbour of a point). The argument \code{method} is not normally used. It is retained only for checking the validity of the software. If \code{method = "interpreted"} then the distances are computed using interpreted R code only. If \code{method="C"} (the default) then C code is used. The C code is somewhat faster. } \seealso{ \code{\link{pairdist}}, \code{\link{pairdist.default}}, \code{\link{pairdist.psp}}, \code{\link{crossdist}}, \code{\link{nndist}}, \code{\link{Kest}} } \examples{ data(cells) d <- pairdist(cells) d <- pairdist(cells, periodic=TRUE) d <- pairdist(cells, squared=TRUE) } \author{Pavel Grabarnik \email{pavel.grabar@issp.serpukhov.su} and \adrian } \keyword{spatial} \keyword{math} spatstat/man/as.function.leverage.ppm.Rd0000644000176200001440000000243213160710571017774 0ustar liggesusers\name{as.function.leverage.ppm} \alias{as.function.leverage.ppm} \title{ Convert Leverage Object to Function of Coordinates } \description{ Converts an object of class \code{"leverage.ppm"} to a function of the \eqn{x} and \eqn{y} coordinates. } \usage{ \method{as.function}{leverage.ppm}(x, ...) } \arguments{ \item{x}{ Object of class \code{"leverage.ppm"} produced by \code{\link{leverage.ppm}}. } \item{\dots}{ Ignored. } } \details{ An object of class \code{"leverage.ppm"} represents the leverage function of a fitted point process model. This command converts the object to a \code{function(x,y)} where the arguments \code{x} and \code{y} are (vectors of) spatial coordinates. This function returns the leverage values at the specified locations (calculated by referring to the nearest location where the leverage has been computed). } \value{ A function in the \R language, also belonging to the class \code{"funxy"}. } \author{ \spatstatAuthors. } \seealso{ \code{\link{as.im.leverage.ppm}} } \examples{ X <- rpoispp(function(x,y) { exp(3+3*x) }) fit <- ppm(X ~x+y) lev <- leverage(fit) f <- as.function(lev) f(0.2, 0.3) # evaluate at (x,y) coordinates y <- f(X) # evaluate at a point pattern } \keyword{spatial} \keyword{manip} spatstat/man/envelope.pp3.Rd0000644000176200001440000002106013160710621015471 0ustar liggesusers\name{envelope.pp3} \alias{envelope.pp3} \title{Simulation Envelopes of Summary Function for 3D Point Pattern} \description{ Computes simulation envelopes of a summary function for a three-dimensional point pattern. } \usage{ \method{envelope}{pp3}(Y, fun=K3est, nsim=99, nrank=1, \dots, funargs=list(), funYargs=funargs, simulate=NULL, verbose=TRUE, transform=NULL,global=FALSE,ginterval=NULL,use.theory=NULL, alternative=c("two.sided", "less", "greater"), scale=NULL, clamp=FALSE, savefuns=FALSE, savepatterns=FALSE, nsim2=nsim, VARIANCE=FALSE, nSD=2, Yname=NULL, maxnerr=nsim, do.pwrong=FALSE, envir.simul=NULL) } \arguments{ \item{Y}{ A three-dimensional point pattern (object of class \code{"pp3"}). } \item{fun}{ Function that computes the desired summary statistic for a 3D point pattern. } \item{nsim}{ Number of simulated point patterns to be generated when computing the envelopes. } \item{nrank}{ Integer. Rank of the envelope value amongst the \code{nsim} simulated values. A rank of 1 means that the minimum and maximum simulated values will be used. } \item{\dots}{ Extra arguments passed to \code{fun}. } \item{funargs}{ A list, containing extra arguments to be passed to \code{fun}. } \item{funYargs}{ Optional. A list, containing extra arguments to be passed to \code{fun} when applied to the original data \code{Y} only. } \item{simulate}{ Optional. Specifies how to generate the simulated point patterns. If \code{simulate} is an expression in the R language, then this expression will be evaluated \code{nsim} times, to obtain \code{nsim} point patterns which are taken as the simulated patterns from which the envelopes are computed. If \code{simulate} is a list of point patterns, then the entries in this list will be treated as the simulated patterns from which the envelopes are computed. Alternatively \code{simulate} may be an object produced by the \code{envelope} command: see Details. } \item{verbose}{ Logical flag indicating whether to print progress reports during the simulations. } \item{transform}{ Optional. A transformation to be applied to the function values, before the envelopes are computed. An expression object (see Details). } \item{global}{ Logical flag indicating whether envelopes should be pointwise (\code{global=FALSE}) or simultaneous (\code{global=TRUE}). } \item{ginterval}{ Optional. A vector of length 2 specifying the interval of \eqn{r} values for the simultaneous critical envelopes. Only relevant if \code{global=TRUE}. } \item{use.theory}{ Logical value indicating whether to use the theoretical value, computed by \code{fun}, as the reference value for simultaneous envelopes. Applicable only when \code{global=TRUE}. } \item{alternative}{ Character string determining whether the envelope corresponds to a two-sided test (\code{side="two.sided"}, the default) or a one-sided test with a lower critical boundary (\code{side="less"}) or a one-sided test with an upper critical boundary (\code{side="greater"}). } \item{scale}{ Optional. Scaling function for global envelopes. A function in the \R language which determines the relative scale of deviations, as a function of distance \eqn{r}, when computing the global envelopes. Applicable only when \code{global=TRUE}. Summary function values for distance \code{r} will be \emph{divided} by \code{scale(r)} before the maximum deviation is computed. The resulting global envelopes will have width proportional to \code{scale(r)}. } \item{clamp}{ Logical value indicating how to compute envelopes when \code{alternative="less"} or \code{alternative="greater"}. Deviations of the observed summary function from the theoretical summary function are initially evaluated as signed real numbers, with large positive values indicating consistency with the alternative hypothesis. If \code{clamp=FALSE} (the default), these values are not changed. If \code{clamp=TRUE}, any negative values are replaced by zero. } \item{savefuns}{ Logical flag indicating whether to save all the simulated function values. } \item{savepatterns}{ Logical flag indicating whether to save all the simulated point patterns. } \item{nsim2}{ Number of extra simulated point patterns to be generated if it is necessary to use simulation to estimate the theoretical mean of the summary function. Only relevant when \code{global=TRUE} and the simulations are not based on CSR. } \item{VARIANCE}{ Logical. If \code{TRUE}, critical envelopes will be calculated as sample mean plus or minus \code{nSD} times sample standard deviation. } \item{nSD}{ Number of estimated standard deviations used to determine the critical envelopes, if \code{VARIANCE=TRUE}. } \item{Yname}{ Character string that should be used as the name of the data point pattern \code{Y} when printing or plotting the results. } \item{maxnerr}{ Maximum number of rejected patterns. If \code{fun} yields an error when applied to a simulated point pattern (for example, because the pattern is empty and \code{fun} requires at least one point), the pattern will be rejected and a new random point pattern will be generated. If this happens more than \code{maxnerr} times, the algorithm will give up. } \item{do.pwrong}{ Logical. If \code{TRUE}, the algorithm will also estimate the true significance level of the \dQuote{wrong} test (the test that declares the summary function for the data to be significant if it lies outside the \emph{pointwise} critical boundary at any point). This estimate is printed when the result is printed. } \item{envir.simul}{ Environment in which to evaluate the expression \code{simulate}, if not the current environment. } } \value{ A function value table (object of class \code{"fv"}) which can be plotted directly. See \code{\link{envelope}} for further details. } \details{ The \code{envelope} command performs simulations and computes envelopes of a summary statistic based on the simulations. The result is an object that can be plotted to display the envelopes. The envelopes can be used to assess the goodness-of-fit of a point process model to point pattern data. The \code{envelope} function is generic, with methods for the classes \code{"ppp"}, \code{"ppm"} and \code{"kppm"} described in the help file for \code{\link{envelope}}. This function \code{envelope.pp3} is the method for three-dimensional point patterns (objects of class \code{"pp3"}). For the most basic use, if you have a 3D point pattern \code{X} and you want to test Complete Spatial Randomness (CSR), type \code{plot(envelope(X, K3est,nsim=39))} to see the three-dimensional \eqn{K} function for \code{X} plotted together with the envelopes of the three-dimensional \eqn{K} function for 39 simulations of CSR. To create simulation envelopes, the command \code{envelope(Y, ...)} first generates \code{nsim} random point patterns in one of the following ways. \itemize{ \item If \code{simulate=NULL}, then we generate \code{nsim} simulations of Complete Spatial Randomness (i.e. \code{nsim} simulated point patterns each being a realisation of the uniform Poisson point process) with the same intensity as the pattern \code{Y}. \item If \code{simulate} is supplied, then it determines how the simulated point patterns are generated. See \code{\link{envelope}} for details. } The summary statistic \code{fun} is applied to each of these simulated patterns. Typically \code{fun} is one of the functions \code{K3est}, \code{G3est}, \code{F3est} or \code{pcf3est}. It may also be a character string containing the name of one of these functions. For further information, see the documentation for \code{\link{envelope}}. } \references{ Baddeley, A.J, Moyeed, R.A., Howard, C.V. and Boyde, A. (1993) Analysis of a three-dimensional point pattern with replication. \emph{Applied Statistics} \bold{42}, 641--668. } \seealso{ \code{\link{pp3}}, \code{\link{rpoispp3}}, \code{\link{K3est}}, \code{\link{G3est}}, \code{\link{F3est}}, \code{\link{pcf3est}}. } \examples{ X <- rpoispp3(20, box3()) \dontrun{ plot(envelope(X, nsim=39)) } \testonly{ plot(envelope(X, nsim=4)) } } \author{\adrian and \rolf } \keyword{spatial} \keyword{htest} \keyword{hplot} \keyword{iteration} spatstat/man/localKinhom.Rd0000644000176200001440000001150613160710621015417 0ustar liggesusers\name{localKinhom} \alias{localKinhom} \alias{localLinhom} \title{Inhomogeneous Neighbourhood Density Function} \description{ Computes spatially-weighted versions of the the local \eqn{K}-function or \eqn{L}-function. } \usage{ localKinhom(X, lambda, ..., correction = "Ripley", verbose = TRUE, rvalue=NULL, sigma = NULL, varcov = NULL) localLinhom(X, lambda, ..., correction = "Ripley", verbose = TRUE, rvalue=NULL, sigma = NULL, varcov = NULL) } \arguments{ \item{X}{A point pattern (object of class \code{"ppp"}).} \item{lambda}{ Optional. Values of the estimated intensity function. Either a vector giving the intensity values at the points of the pattern \code{X}, a pixel image (object of class \code{"im"}) giving the intensity values at all locations, a fitted point process model (object of class \code{"ppm"}) or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. } \item{\dots}{ Extra arguments. Ignored if \code{lambda} is present. Passed to \code{\link{density.ppp}} if \code{lambda} is omitted. } \item{correction}{ String specifying the edge correction to be applied. Options are \code{"none"}, \code{"translate"}, \code{"Ripley"}, \code{"translation"}, \code{"isotropic"} or \code{"best"}. Only one correction may be specified. } \item{verbose}{Logical flag indicating whether to print progress reports during the calculation. } \item{rvalue}{Optional. A \emph{single} value of the distance argument \eqn{r} at which the function L or K should be computed. } \item{sigma, varcov}{ Optional arguments passed to \code{\link{density.ppp}} to control the kernel smoothing procedure for estimating \code{lambda}, if \code{lambda} is missing. } } \details{ The functions \code{localKinhom} and \code{localLinhom} are inhomogeneous or weighted versions of the neighbourhood density function implemented in \code{\link{localK}} and \code{\link{localL}}. Given a spatial point pattern \code{X}, the inhomogeneous neighbourhood density function \eqn{L_i(r)}{L[i](r)} associated with the \eqn{i}th point in \code{X} is computed by \deqn{ L_i(r) = \sqrt{\frac 1 \pi \sum_j \frac{e_{ij}}{\lambda_j}} }{ L[i](r) = sqrt( (1/pi) * sum[j] e[i,j]/lambda[j]) } where the sum is over all points \eqn{j \neq i}{j != i} that lie within a distance \eqn{r} of the \eqn{i}th point, \eqn{\lambda_j}{\lambda[j]} is the estimated intensity of the point pattern at the point \eqn{j}, and \eqn{e_{ij}}{e[i,j]} is an edge correction term (as described in \code{\link{Kest}}). The value of \eqn{L_i(r)}{L[i](r)} can also be interpreted as one of the summands that contributes to the global estimate of the inhomogeneous L function (see \code{\link{Linhom}}). By default, the function \eqn{L_i(r)}{L[i](r)} or \eqn{K_i(r)}{K[i](r)} is computed for a range of \eqn{r} values for each point \eqn{i}. The results are stored as a function value table (object of class \code{"fv"}) with a column of the table containing the function estimates for each point of the pattern \code{X}. Alternatively, if the argument \code{rvalue} is given, and it is a single number, then the function will only be computed for this value of \eqn{r}, and the results will be returned as a numeric vector, with one entry of the vector for each point of the pattern \code{X}. } \value{ If \code{rvalue} is given, the result is a numeric vector of length equal to the number of points in the point pattern. If \code{rvalue} is absent, the result is an object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. Essentially a data frame containing columns \item{r}{the vector of values of the argument \eqn{r} at which the function \eqn{K} has been estimated } \item{theo}{the theoretical value \eqn{K(r) = \pi r^2}{K(r) = pi * r^2} or \eqn{L(r)=r} for a stationary Poisson process } together with columns containing the values of the neighbourhood density function for each point in the pattern. Column \code{i} corresponds to the \code{i}th point. The last two columns contain the \code{r} and \code{theo} values. } \seealso{ \code{\link{Kinhom}}, \code{\link{Linhom}}, \code{\link{localK}}, \code{\link{localL}}. } \examples{ data(ponderosa) X <- ponderosa # compute all the local L functions L <- localLinhom(X) # plot all the local L functions against r plot(L, main="local L functions for ponderosa", legend=FALSE) # plot only the local L function for point number 7 plot(L, iso007 ~ r) # compute the values of L(r) for r = 12 metres L12 <- localL(X, rvalue=12) } \author{ Mike Kuhn, \adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat/man/update.symbolmap.Rd0000644000176200001440000000226113160710621016441 0ustar liggesusers\name{update.symbolmap} \alias{update.symbolmap} \title{ Update a Graphics Symbol Map. } \description{ This command updates the \code{object} using the arguments given. } \usage{ \method{update}{symbolmap}(object, \dots) } \arguments{ \item{object}{ Graphics symbol map (object of class \code{"symbolmap"}). } \item{\dots}{ Additional or replacement arguments to \code{\link{symbolmap}}. } } \details{ This is a method for the generic function \code{\link[stats]{update}} for the class \code{"symbolmap"} of graphics symbol maps. It updates the \code{object} using the parameters given in the extra arguments \code{\dots}. The extra arguments must be given in the form \code{name=value} and must be recognisable to \code{\link{symbolmap}}. They override any parameters of the same name in \code{object}. } \value{ Another object of class \code{"symbolmap"}. } \author{\adrian , \rolf and \ege. } \seealso{ \code{\link{symbolmap}} to create a graphics symbol map. } \examples{ g <- symbolmap(size=function(x) x/50) g update(g, range=c(0,1)) update(g, size=42) update(g, shape="squares", range=c(0,1)) } \keyword{spatial} \keyword{hplot} spatstat/man/split.ppp.Rd0000644000176200001440000001447213160710621015115 0ustar liggesusers\name{split.ppp} \alias{split.ppp} \alias{split<-.ppp} \title{Divide Point Pattern into Sub-patterns} \description{ Divides a point pattern into several sub-patterns, according to their marks, or according to any user-specified grouping. } \usage{ \method{split}{ppp}(x, f = marks(x), drop=FALSE, un=NULL, reduce=FALSE, \dots) \method{split}{ppp}(x, f = marks(x), drop=FALSE, un=missing(f), \dots) <- value } \arguments{ \item{x}{ A two-dimensional point pattern. An object of class \code{"ppp"}. } \item{f}{ Data determining the grouping. Either a factor, a logical vector, a pixel image with factor values, a tessellation, a window, or the name of one of the columns of marks. } \item{drop}{ Logical. Determines whether empty groups will be deleted. } \item{un}{ Logical. Determines whether the resulting subpatterns will be unmarked (i.e. whether marks will be removed from the points in each subpattern). } \item{reduce}{ Logical. Determines whether to delete the column of marks used to split the pattern, when the marks are a data frame. } \item{\dots}{ Other arguments are ignored. } \item{value}{ List of point patterns. } } \value{ The value of \code{split.ppp} is a list of point patterns. The components of the list are named by the levels of \code{f}. The list also has the class \code{"splitppp"}. The assignment form \code{split<-.ppp} returns the updated point pattern \code{x}. } \details{ The function \code{split.ppp} divides up the points of the point pattern \code{x} into several sub-patterns according to the values of \code{f}. The result is a list of point patterns. The argument \code{f} may be \itemize{ \item a factor, of length equal to the number of points in \code{x}. The levels of \code{f} determine the destination of each point in \code{x}. The \code{i}th point of \code{x} will be placed in the sub-pattern \code{split.ppp(x)$l} where \code{l = f[i]}. \item a pixel image (object of class \code{"im"}) with factor values. The pixel value of \code{f} at each point of \code{x} will be used as the classifying variable. \item a tessellation (object of class \code{"tess"}). Each point of \code{x} will be classified according to the tile of the tessellation into which it falls. \item a window (object of class \code{"owin"}). Each point of \code{x} will be classified according to whether it falls inside or outside this window. \item a character string, matching the name of one of the columns of marks, if \code{marks(x)} is a data frame. This column should be a factor. } If \code{f} is missing, then it will be determined by the marks of the point pattern. The pattern \code{x} can be either \itemize{ \item a multitype point pattern (a marked point pattern whose marks vector is a factor). Then \code{f} is taken to be the marks vector. The effect is that the points of each type are separated into different point patterns. \item a marked point pattern with a data frame of marks, containing at least one column that is a factor. The first such column will be used to determine the splitting factor \code{f}. } Some of the sub-patterns created by the split may be empty. If \code{drop=TRUE}, then empty sub-patterns will be deleted from the list. If \code{drop=FALSE} then they are retained. The argument \code{un} determines how to handle marks in the case where \code{x} is a marked point pattern. If \code{un=TRUE} then the marks of the points will be discarded when they are split into groups, while if \code{un=FALSE} then the marks will be retained. If \code{f} and \code{un} are both missing, then the default is \code{un=TRUE} for multitype point patterns and \code{un=FALSE} for marked point patterns with a data frame of marks. If the marks of \code{x} are a data frame, then \code{split(x, reduce=TRUE)} will discard only the column of marks that was used to split the pattern. This applies only when the argument \code{f} is missing. The result of \code{split.ppp} has class \code{"splitppp"} and can be plotted using \code{\link{plot.splitppp}}. The assignment function \code{split<-.ppp} updates the point pattern \code{x} so that it satisfies \code{split(x, f, drop, un) = value}. The argument \code{value} is expected to be a list of point patterns, one for each level of \code{f}. These point patterns are expected to be compatible with the type of data in the original pattern \code{x}. Splitting can also be undone by the function \code{\link{superimpose}}, but this typically changes the ordering of the data. } \seealso{ \code{\link{cut.ppp}}, \code{\link{plot.splitppp}}, \code{\link{superimpose}}, \code{\link{im}}, \code{\link{tess}}, \code{\link{ppp.object}} } \examples{ # (1) Splitting by marks # Multitype point pattern: separate into types u <- split(amacrine) # plot them plot(split(amacrine)) # the following are equivalent: amon <- split(amacrine)$on amon <- unmark(amacrine[amacrine$marks == "on"]) amon <- subset(amacrine, marks == "on", -marks) # the following are equivalent: amon <- split(amacrine, un=FALSE)$on amon <- amacrine[amacrine$marks == "on"] # Scramble the locations of the 'on' cells X <- amacrine u <- split(X) u$on <- runifpoint(ex=amon) split(X) <- u # Point pattern with continuous marks trees <- longleaf \testonly{ # smaller dataset trees <- trees[seq(1, npoints(trees), by=80)] } # cut the range of tree diameters into three intervals # using cut.ppp long3 <- cut(trees, breaks=3) # now split them long3split <- split(long3) # (2) Splitting by a factor # Unmarked point pattern swedishpines # cut & split according to nearest neighbour distance f <- cut(nndist(swedishpines), 3) u <- split(swedishpines, f) # (3) Splitting over a tessellation tes <- tess(xgrid=seq(0,96,length=5),ygrid=seq(0,100,length=5)) v <- split(swedishpines, tes) # (4) how to apply an operation to selected points: # split into components, transform desired component, then un-split # e.g. apply random jitter to 'on' points only X <- amacrine Y <- split(X) Y$on <- rjitter(Y$on, 0.1) split(X) <- Y } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{methods} \keyword{manip} spatstat/man/default.expand.Rd0000644000176200001440000000771513160710571016074 0ustar liggesusers\name{default.expand} \alias{default.expand} \title{Default Expansion Rule for Simulation of Model} \description{ Defines the default expansion window or expansion rule for simulation of a fitted point process model. } \usage{ default.expand(object, m=2, epsilon=1e-6, w=Window(object)) } \arguments{ \item{object}{ A point process model (object of class \code{"ppm"} or \code{"rmhmodel"}). } \item{m}{ A single numeric value. The window will be expanded by a distance \code{m * reach(object)} along each side. } \item{epsilon}{ Threshold argument passed to \code{\link{reach}} to determine \code{reach(object)}. } \item{w}{ Optional. The un-expanded window in which the model is defined. The resulting simulated point patterns will lie in this window. } } \value{ A window expansion rule (object of class \code{"rmhexpand"}). } \details{ This function computes a default value for the expansion rule (the argument \code{expand} in \code{\link{rmhcontrol}}) given a fitted point process model \code{object}. This default is used by \code{\link{envelope}}, \code{\link{qqplot.ppm}}, \code{\link{simulate.ppm}} and other functions. Suppose we wish to generate simulated realisations of a fitted point process model inside a window \code{w}. It is advisable to first simulate the pattern on a larger window, and then clip it to the original window \code{w}. This avoids edge effects in the simulation. It is called \emph{expansion} of the simulation window. Accordingly, for the Metropolis-Hastings simulation algorithm \code{\link{rmh}}, the algorithm control parameters specified by \code{\link{rmhcontrol}} include an argument \code{expand} that determines the expansion of the simulation window. The function \code{default.expand} determines the default expansion rule for a fitted point process model \code{object}. If the model is Poisson, then no expansion is necessary. No expansion is performed by default, and \code{default.expand} returns a rule representing no expansion. The simulation window is the original window \code{w = Window(object)}. If the model depends on external covariates (i.e.\ covariates other than the Cartesian covariates \code{x} and \code{y} and the \code{marks}) then no expansion is feasible, in general, because the spatial domain of the covariates is not guaranteed to be large enough. \code{default.expand} returns a rule representing no expansion. The simulation window is the original window \code{w = Window(object)}. If the model depends on the Cartesian covariates \code{x} and \code{y}, it would be feasible to expand the simulation window, and this was the default for \pkg{spatstat} version 1.24-1 and earlier. However this sometimes produces artefacts (such as an empty point pattern) or memory overflow, because the fitted trend, extrapolated outside the original window of the data, may become very large. In \pkg{spatstat} version 1.24-2 and later, the default rule is \emph{not} to expand if the model depends on \code{x} or \code{y}. Again \code{default.expand} returns a rule representing no expansion. Otherwise, expansion will occur. The original window \code{w = Window(object)} is expanded by a distance \code{m * rr}, where \code{rr} is the interaction range of the model, computed by \code{\link{reach}}. If \code{w} is a rectangle then each edge of \code{w} is displaced outward by distance \code{m * rr}. If \code{w} is not a rectangle then \code{w} is dilated by distance \code{m * rr} using \code{\link{dilation}}. } \seealso{ \code{\link{rmhexpand}}, \code{\link{rmhcontrol}}, \code{\link{rmh}}, \code{\link{envelope}}, \code{\link{qqplot.ppm}} } \examples{ data(cells) fit <- ppm(cells, ~1, Strauss(0.07)) default.expand(fit) mod <- rmhmodel(cif="strauss", par=list(beta=100, gamma=0.5, r=0.07)) default.expand(fit) } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat/man/Smooth.fv.Rd0000644000176200001440000000550013160710621015037 0ustar liggesusers\name{Smooth.fv} \alias{Smooth.fv} \title{ Apply Smoothing to Function Values } \description{ Applies smoothing to the values in selected columns of a function value table. } \usage{ \method{Smooth}{fv}(X, which = "*", ..., method=c("smooth.spline", "loess"), xinterval=NULL) } \arguments{ \item{X}{ Values to be smoothed. A function value table (object of class \code{"fv"}, see \code{\link{fv.object}}). } \item{which}{ Character vector identifying which columns of the table should be smoothed. Either a vector containing names of columns, or one of the wildcard strings \code{"*"} or \code{"."} explained below. } \item{\dots}{ Extra arguments passed to \code{\link[stats]{smooth.spline}} or \code{\link[stats]{loess}} to control the smoothing. } \item{method}{ Smoothing algorithm. A character string, partially matched to either \code{"smooth.spline"} or \code{"loess"}. } \item{xinterval}{ Optional. Numeric vector of length 2 specifying a range of \eqn{x} values. Smoothing will be performed only on the part of the function corresponding to this range. } } \details{ The command \code{Smooth.fv} applies smoothing to the function values in a function value table (object of class \code{"fv"}). \code{Smooth.fv} is a method for the generic function \code{\link{Smooth}}. The smoothing is performed either by \code{\link[stats]{smooth.spline}} or by \code{\link[stats]{loess}}. Smoothing is applied to every column (or to each of the selected columns) of function values in turn, using the function argument as the \eqn{x} coordinate and the selected column as the \eqn{y} coordinate. The original function values are then replaced by the corresponding smooth interpolated function values. The optional argument \code{which} specifies which of the columns of function values in \code{x} will be smoothed. The default (indicated by the wildcard \code{which="*"}) is to smooth all function values, i.e.\ all columns except the function argument. Alternatively \code{which="."} designates the subset of function values that are displayed in the default plot. Alternatively \code{which} can be a character vector containing the names of columns of \code{x}. If the argument \code{xinterval} is given, then smoothing will be performed only in the specified range of \eqn{x} values. } \value{ Another function value table (object of class \code{"fv"}) of the same format. } \author{\adrian and \rolf } \seealso{ \code{\link{Smooth}}, \code{\link{with.fv}}, \code{\link{fv.object}}, \code{\link[stats]{smooth.spline}}, \code{\link[stats]{smooth.spline}} } \examples{ data(cells) G <- Gest(cells) plot(G) plot(Smooth(G, df=9), add=TRUE) } \keyword{spatial} \keyword{nonparametric} spatstat/man/valid.detpointprocfamily.Rd0000644000176200001440000000121413160710621020164 0ustar liggesusers\name{valid.detpointprocfamily} \alias{valid.detpointprocfamily} \title{Check Validity of a Determinantal Point Process Model} \description{ Checks the validity of a determinantal point process model. } \usage{ \method{valid}{detpointprocfamily}(object, \dots) } \arguments{ \item{object}{Model of class \code{"detpointprocfamily"}.} \item{\dots}{Ignored.} } \value{Logical} \author{ \adrian \rolf and \ege } \examples{ model1 <- dppMatern(lambda=100, alpha=.01, nu=1, d=2) valid(model1) model2 <- dppMatern(lambda=100, alpha=1, nu=1, d=2) valid(model2) } \seealso{ \code{\link{valid}} } \keyword{spatial} \keyword{models} spatstat/man/grow.rectangle.Rd0000644000176200001440000000325513160710621016102 0ustar liggesusers\name{grow.rectangle} \alias{grow.rectangle} \title{Add margins to rectangle} \description{ Adds a margin to a rectangle. } \usage{ grow.rectangle(W, xmargin=0, ymargin=xmargin, fraction=NULL) } \arguments{ \item{W}{ A window (object of class \code{"owin"}). Must be of type \code{"rectangle"}. } \item{xmargin}{Width of horizontal margin to be added. A single nonnegative number, or a vector of length 2 indicating margins of unequal width at left and right. } \item{ymargin}{Height of vertical margin to be added. A single nonnegative number, or a vector of length 2 indicating margins of unequal width at bottom and top. } \item{fraction}{ Fraction of width and height to be added. A number greater than zero, or a numeric vector of length 2 indicating different fractions of width and of height, respectively. Incompatible with specifying \code{xmargin} and \code{ymargin}. } } \value{ Another object of class \code{"owin"} representing the window after margins are added. } \details{ This is a simple convenience function to add a margin of specified width and height on each side of a rectangular window. Unequal margins can also be added. } \seealso{ \code{\link{trim.rectangle}}, \code{\link{dilation}}, \code{\link{erosion}}, \code{\link{owin.object}} } \examples{ w <- square(10) # add a margin of width 1 on all four sides square12 <- grow.rectangle(w, 1) # add margin of width 3 on the right side # and margin of height 4 on top. v <- grow.rectangle(w, c(0,3), c(0,4)) # grow by 5 percent on all sides grow.rectangle(w, fraction=0.05) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{manip} spatstat/man/rpoispp3.Rd0000644000176200001440000000300613160710621014732 0ustar liggesusers\name{rpoispp3} \alias{rpoispp3} \title{ Generate Poisson Point Pattern in Three Dimensions } \description{ Generate a random three-dimensional point pattern using the homogeneous Poisson process. } \usage{ rpoispp3(lambda, domain = box3(), nsim=1, drop=TRUE) } \arguments{ \item{lambda}{ Intensity of the Poisson process. A single positive number. } \item{domain}{ Three-dimensional box in which the process should be generated. An object of class \code{"box3"}. } \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a point pattern. } } \value{ If \code{nsim = 1} and \code{drop=TRUE}, a point pattern in three dimensions (an object of class \code{"pp3"}). If \code{nsim > 1}, a list of such point patterns. } \details{ This function generates a realisation of the homogeneous Poisson process in three dimensions, with intensity \code{lambda} (points per unit volume). The realisation is generated inside the three-dimensional region \code{domain} which currently must be a rectangular box (object of class \code{"box3"}). } \note{ The intensity \code{lambda} is the expected number of points \emph{per unit volume}. } \seealso{ \code{\link{runifpoint3}}, \code{\link{pp3}}, \code{\link{box3}} } \examples{ X <- rpoispp3(50) } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat/man/methods.linim.Rd0000644000176200001440000000546313160710621015736 0ustar liggesusers\name{methods.linim} \Rdversion{1.1} \alias{methods.linim} %DoNotExport \alias{as.im.linim} \alias{as.data.frame.linim} \alias{print.linim} \alias{summary.linim} \alias{affine.linim} \alias{scalardilate.linim} \alias{shift.linim} \title{ Methods for Images on a Linear Network } \description{ Methods for the class \code{"linim"} of functions on a linear network. } \usage{ \method{print}{linim}(x, \dots) \method{summary}{linim}(object, \dots) \method{as.im}{linim}(X, \dots) \method{as.data.frame}{linim}(x, \dots) \method{shift}{linim}(X, \dots) \method{scalardilate}{linim}(X, f, \dots, origin=NULL) \method{affine}{linim}(X, mat=diag(c(1,1)), vec=c(0,0), \dots) } \arguments{ \item{X,x,object}{ A pixel image on a linear network (object of class \code{"linim"}). } \item{\dots}{ Extra arguments passed to other methods. } \item{f}{Numeric. Scalar dilation factor.} \item{mat}{Numeric matrix representing the linear transformation.} \item{vec}{Numeric vector of length 2 specifying the shift vector.} \item{origin}{Character string determining a location that will be shifted to the origin. Options are \code{"centroid"}, \code{"midpoint"} and \code{"bottomleft"}. Partially matched. } } \details{ These are methods for the generic functions \code{\link{print}}, \code{\link{summary}} and \code{\link{as.data.frame}}, and the \pkg{spatstat} generic functions \code{\link{as.im}}, \code{\link{shift}}, \code{\link{scalardilate}} and \code{\link{affine}}. An object of class \code{"linfun"} represents a pixel image defined on a linear network. The method \code{as.im.linim} extracts the pixel values and returns a pixel image of class \code{"im"}. The method \code{as.data.frame.linim} returns a data frame giving spatial locations (in cartesian and network coordinates) and corresponding function values. The methods \code{shift.linim}, \code{scalardilate.linim} and \code{affine.linim} apply geometric transformations to the pixels and the underlying linear network, without changing the pixel values. } \value{ For \code{print.linim} the result is \code{NULL}. The function \code{summary.linim} returns an object of class \code{"summary.linim"}. In normal usage this summary is automatically printed by \code{\link{print.summary.linim}}. For \code{as.im.linim} the result is an object of class \code{"im"}. For the geometric transformations \code{shift.linim}, \code{scalardilate.linim} and \code{affine.linim}, the result is another object of class \code{"linim"}. } \examples{ M <- as.mask.psp(as.psp(simplenet)) Z <- as.im(function(x,y) {x-y}, W=M) X <- linim(simplenet, Z) X shift(X, c(1,1)) scalardilate(X, 2) head(as.data.frame(X)) } \author{ \spatstatAuthors } \keyword{spatial} \keyword{math} spatstat/man/Extract.ppp.Rd0000644000176200001440000001437713160710621015400 0ustar liggesusers\name{Extract.ppp} \alias{[.ppp} \alias{[<-.ppp} \title{Extract or Replace Subset of Point Pattern} \description{ Extract or replace a subset of a point pattern. Extraction of a subset has the effect of thinning the points and/or trimming the window. } \usage{ \method{[}{ppp}(x, i, j, drop=FALSE, \dots, clip=FALSE) \method{[}{ppp}(x, i, j) <- value } \arguments{ \item{x}{ A two-dimensional point pattern. An object of class \code{"ppp"}. } \item{i}{ Subset index. Either a valid subset index in the usual \R sense, indicating which points should be retained, or a window (an object of class \code{"owin"}) delineating a subset of the original observation window, or a pixel image with logical values defining a subset of the original observation window. } \item{value}{ Replacement value for the subset. A point pattern. } \item{j}{ Redundant. Included for backward compatibility. } \item{drop}{ Logical value indicating whether to remove unused levels of the marks, if the marks are a factor. } \item{clip}{ Logical value indicating how to form the window of the resulting point pattern, when \code{i} is a window. If \code{clip=FALSE} (the default), the result has window equal to \code{i}. If \code{clip=TRUE}, the resulting window is the intersection between the window of \code{x} and the window \code{i}. } \item{\dots}{ Ignored. This argument is required for compatibility with the generic function. } } \value{ A point pattern (of class \code{"ppp"}). } \details{ These functions extract a designated subset of a point pattern, or replace the designated subset with another point pattern. The function \code{[.ppp} is a method for \code{\link{[}} for the class \code{"ppp"}. It extracts a designated subset of a point pattern, either by ``\emph{thinning}'' (retaining/deleting some points of a point pattern) or ``\emph{trimming}'' (reducing the window of observation to a smaller subregion and retaining only those points which lie in the subregion) or both. The pattern will be ``thinned'' if \code{i} is a subset index in the usual \R sense: either a numeric vector of positive indices (identifying the points to be retained), a numeric vector of negative indices (identifying the points to be deleted) or a logical vector of length equal to the number of points in the point pattern \code{x}. In the latter case, the points \code{(x$x[i], x$y[i])} for which \code{subset[i]=TRUE} will be retained, and the others will be deleted. The pattern will be ``trimmed'' if \code{i} is an object of class \code{"owin"} specifying a window of observation. The points of \code{x} lying inside the new window \code{i} will be retained. Alternatively \code{i} may be a pixel image (object of class \code{"im"}) with logical values; the pixels with the value \code{TRUE} will be interpreted as a window. The argument \code{drop} determines whether to remove unused levels of a factor, if the point pattern is multitype (i.e. the marks are a factor) or if the marks are a data frame in which some of the columns are factors. The function \code{[<-.ppp} is a method for \code{\link{[<-}} for the class \code{"ppp"}. It replaces the designated subset with the point pattern \code{value}. The subset of \code{x} to be replaced is designated by the argument \code{i} as above. The replacement point pattern \code{value} must lie inside the window of the original pattern \code{x}. The ordering of points in \code{x} will be preserved if the replacement pattern \code{value} has the same number of points as the subset to be replaced. Otherwise the ordering is unpredictable. If the original pattern \code{x} has marks, then the replacement pattern \code{value} must also have marks, of the same type. Use the function \code{\link{unmark}} to remove marks from a marked point pattern. Use the function \code{\link{split.ppp}} to select those points in a marked point pattern which have a specified mark. } \seealso{ \code{\link{subset.ppp}}. \code{\link{ppp.object}}, \code{\link{owin.object}}, \code{\link{unmark}}, \code{\link{split.ppp}}, \code{\link{cut.ppp}} } \section{Warnings}{ The function does not check whether \code{i} is a subset of \code{Window(x)}. Nor does it check whether \code{value} lies inside \code{Window(x)}. } \examples{ # Longleaf pines data lon <- longleaf \dontrun{ plot(lon) } \testonly{lon <- lon[seq(1,npoints(lon),by=10)]} # adult trees defined to have diameter at least 30 cm longadult <- subset(lon, marks >= 30) \dontrun{ plot(longadult) } # note that the marks are still retained. # Use unmark(longadult) to remove the marks # New Zealand trees data \dontrun{ plot(nztrees) # plot shows a line of trees at the far right abline(v=148, lty=2) # cut along this line } nzw <- owin(c(0,148),c(0,95)) # the subwindow # trim dataset to this subwindow nzsub <- nztrees[nzw] \dontrun{ plot(nzsub) } # Redwood data \dontrun{ plot(redwood) } # Random thinning: delete 60\% of data retain <- (runif(npoints(redwood)) < 0.4) thinred <- redwood[retain] \dontrun{ plot(thinred) } # Scramble 60\% of data X <- redwood modif <- (runif(npoints(X)) < 0.6) X[modif] <- runifpoint(ex=X[modif]) # Lansing woods data - multitype points lan <- lansing \testonly{ lan <- lan[seq(1, npoints(lan), length=100)] } # Hickory trees hicks <- split(lansing)$hickory # Trees in subwindow win <- owin(c(0.3, 0.6),c(0.2, 0.5)) lsub <- lan[win] # Scramble the locations of trees in subwindow, retaining their marks lan[win] <- runifpoint(ex=lsub) \%mark\% marks(lsub) # Extract oaks only oaknames <- c("redoak", "whiteoak", "blackoak") oak <- lan[marks(lan) \%in\% oaknames, drop=TRUE] oak <- subset(lan, marks \%in\% oaknames, drop=TRUE) # To clip or not to clip X <- runifpoint(25, letterR) B <- owin(c(2.2, 3.9), c(2, 3.5)) opa <- par(mfrow=c(1,2)) plot(X, main="X[B]") plot(X[B], border="red", cols="red", add=TRUE, show.all=TRUE, main="") plot(X, main="X[B, clip=TRUE]") plot(B, add=TRUE, lty=2) plot(X[B, clip=TRUE], border="blue", cols="blue", add=TRUE, show.all=TRUE, main="") par(opa) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{manip} spatstat/man/dirichletAreas.Rd0000644000176200001440000000173513160710571016111 0ustar liggesusers\name{dirichletAreas} \alias{dirichletAreas} \title{ Compute Areas of Tiles in Dirichlet Tessellation } \description{ Calculates the area of each tile in the Dirichlet-Voronoi tessellation of a point pattern. } \usage{ dirichletAreas(X) } \arguments{ \item{X}{ Point pattern (object of class \code{"ppp"}). } } \details{ This is an efficient algorithm to calculate the areas of the tiles in the Dirichlet-Voronoi tessellation. If the window of \code{X} is a binary pixel mask, the tile areas are computed by counting pixels. Otherwise the areas are computed exactly using analytic geometry. If any points of \code{X} are duplicated, the duplicates will have tile area zero. } \value{ Numeric vector with one entry for each point of \code{X}. } \author{ \adrian \rolf and \ege } \seealso{ \code{\link{dirichlet}}, \code{\link{dirichletVertices}} } \examples{ aa <- dirichletAreas(cells) } \keyword{spatial} \keyword{math} \keyword{manip} spatstat/man/spokes.Rd0000644000176200001440000000536613160710621014472 0ustar liggesusers\name{spokes} \alias{spokes} \title{Spokes pattern of dummy points} \description{ Generates a pattern of dummy points in a window, given a data point pattern. The dummy points lie on the radii of circles emanating from each data point. } \usage{ spokes(x, y, nrad = 3, nper = 3, fctr = 1.5, Mdefault = 1) } \arguments{ \item{x}{ Vector of \eqn{x} coordinates of data points, or a list with components \code{x} and \code{y}, or a point pattern (an object of class \code{ppp}). } \item{y}{ Vector of \eqn{y} coordinates of data points. Ignored unless \code{x} is a vector. } \item{nrad}{ Number of radii emanating from each data point. } \item{nper}{ Number of dummy points per radius. } \item{fctr}{ Scale factor. Length of largest spoke radius is \code{fctr * M} where \code{M} is the mean nearest neighbour distance for the data points. } \item{Mdefault}{ Value of \code{M} to be used if \code{x} has length 1. } } \value{ If argument \code{x} is a point pattern, a point pattern with window equal to that of \code{x}. Otherwise a list with two components \code{x} and \code{y}. In either case the components \code{x} and \code{y} of the value are numeric vectors giving the coordinates of the dummy points. } \details{ This function is useful in creating dummy points for quadrature schemes (see \code{\link{quadscheme}}). Given the data points, the function creates a collection of \code{nrad * nper * length(x)} dummy points. Around each data point \code{(x[i],y[i])} there are \code{nrad * nper} dummy points, lying on \code{nrad} radii emanating from \code{(x[i],y[i])}, with \code{nper} dummy points equally spaced along each radius. The (equal) spacing of dummy points along each radius is controlled by the factor \code{fctr}. The distance from a data point to the furthest of its associated dummy points is \code{fctr * M} where \code{M} is the mean nearest neighbour distance for the data points. If there is only one data point the nearest neighbour distance is infinite, so the value \code{Mdefault} will be used in place of \code{M}. If \code{x} is a point pattern, then the value returned is also a point pattern, which is clipped to the window of \code{x}. Hence there may be fewer than \code{nrad * nper * length(x)} dummy points in the pattern returned. } \seealso{ \code{\link{quad.object}}, \code{\link{quadscheme}}, \code{\link{inside.owin}}, \code{\link{gridcentres}}, \code{\link{stratrand}} } \examples{ dat <- runifrect(10) dum <- spokes(dat$x, dat$y, 5, 3, 0.7) plot(dum) Q <- quadscheme(dat, dum, method="dirichlet") plot(Q, tiles=TRUE) } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat/man/predict.mppm.Rd0000644000176200001440000001140613160710621015560 0ustar liggesusers\name{predict.mppm} \alias{predict.mppm} \title{Prediction for Fitted Multiple Point Process Model} \description{ Given a fitted multiple point process model obtained by \code{\link{mppm}}, evaluate the spatial trend and/or the conditional intensity of the model. By default, predictions are evaluated over a grid of locations, yielding pixel images of the trend and conditional intensity. Alternatively predictions may be evaluated at specified locations with specified values of the covariates. } \usage{ \method{predict}{mppm}(object, ..., newdata = NULL, type = c("trend", "cif"), ngrid = 40, locations=NULL, verbose=FALSE) } \arguments{ \item{object}{The fitted model. An object of class \code{"mppm"} obtained from \code{\link{mppm}}. } \item{\dots}{Ignored.} \item{newdata}{ New values of the covariates, for which the predictions should be computed. If \code{newdata=NULL}, predictions are computed for the original values of the covariates, to which the model was fitted. Otherwise \code{newdata} should be a hyperframe (see \code{\link{hyperframe}}) containing columns of covariates as required by the model. If \code{type} includes \code{"cif"}, then \code{newdata} must also include a column of spatial point pattern responses, in order to compute the conditional intensity. } \item{type}{ Type of predicted values required. A character string or vector of character strings. Options are \code{"trend"} for the spatial trend (first-order term) and \code{"cif"} or \code{"lambda"} for the conditional intensity. Alternatively \code{type="all"} selects all options. } \item{ngrid}{ Dimensions of the grid of spatial locations at which prediction will be performed (if \code{locations=NULL}). An integer or a pair of integers. } \item{locations}{ Optional. The locations at which predictions should be performed. A list of point patterns, with one entry for each row of \code{newdata}. } \item{verbose}{ Logical flag indicating whether to print progress reports. } } \details{ This function computes the spatial trend and the conditional intensity of a fitted multiple spatial point process model. See Baddeley and Turner (2000) and Baddeley et al (2007) for explanation and examples. Note that by ``spatial trend'' we mean the (exponentiated) first order potential and not the intensity of the process. [For example if we fit the stationary Strauss process with parameters \eqn{\beta}{beta} and \eqn{\gamma}{gamma}, then the spatial trend is constant and equal to \eqn{\beta}{beta}.] The conditional intensity \eqn{\lambda(u,X)}{lambda(u,X)} of the fitted model is evaluated at each required spatial location u, with respect to the response point pattern X. If \code{locations=NULL}, then predictions are performed at an \code{ngrid} by \code{ngrid} grid of locations in the window for each response point pattern. The result will be a hyperframe containing a column of images of the trend (if selected) and a column of images of the conditional intensity (if selected). The result can be plotted. If \code{locations} is given, then it should be a list of point patterns (objects of class \code{"ppp"}). Predictions are performed at these points. The result is a hyperframe containing a column of marked point patterns where the locations each point. } \value{ A hyperframe with columns named \code{trend} and \code{cif}. If \code{locations=NULL}, the entries of the hyperframe are pixel images. If \code{locations} is not null, the entries are marked point patterns constructed by attaching the predicted values to the \code{locations} point patterns. } \references{ Baddeley, A. and Turner, R. Practical maximum pseudolikelihood for spatial point patterns. \emph{Australian and New Zealand Journal of Statistics} \bold{42} (2000) 283--322. Baddeley, A., Bischof, L., Sintorn, I.-M., Haggarty, S., Bell, M. and Turner, R. Analysis of a designed experiment where the response is a spatial point pattern. In preparation. Baddeley, A., Rubak, E. and Turner, R. (2015) \emph{Spatial Point Patterns: Methodology and Applications with R}. London: Chapman and Hall/CRC Press. } \author{ \adrian, Ida-Maria Sintorn and Leanne Bischoff. Implemented by \adrian \rolf and \ege } \seealso{ \code{\link{mppm}}, \code{\link{fitted.mppm}}, \code{\link{hyperframe}} } \examples{ h <- hyperframe(Bugs=waterstriders) fit <- mppm(Bugs ~ x, data=h, interaction=Strauss(7)) # prediction on a grid p <- predict(fit) plot(p$trend) # prediction at specified locations loc <- with(h, runifpoint(20, Window(Bugs))) p2 <- predict(fit, locations=loc) plot(p2$trend) } \keyword{spatial} \keyword{models} spatstat/man/update.kppm.Rd0000644000176200001440000000450213160710621015405 0ustar liggesusers\name{update.kppm} \alias{update.kppm} \title{Update a Fitted Cluster Point Process Model} \description{ \code{update} method for class \code{"kppm"}. } \usage{ \method{update}{kppm}(object, \dots, evaluate=TRUE) } \arguments{ \item{object}{ Fitted cluster point process model. An object of class \code{"kppm"}, obtained from \code{\link{kppm}}. } \item{\dots}{ Arguments passed to \code{\link{kppm}}. } \item{evaluate}{ Logical value indicating whether to return the updated fitted model (\code{evaluate=TRUE}, the default) or just the updated call to \code{kppm} (\code{evaluate=FALSE}). } } \details{ \code{object} should be a fitted cluster point process model, obtained from the model-fitting function \code{\link{kppm}}. The model will be updated according to the new arguments provided. If the argument \code{trend} is provided, it determines the intensity in the updated model. It should be an \R formula (with or without a left hand side). It may include the symbols \code{+} or \code{-} to specify addition or deletion of terms in the current model formula, as shown in the Examples below. The symbol \code{.} refers to the current contents of the formula. The intensity in the updated model is determined by the argument \code{trend} if it is provided, or otherwise by any unnamed argument that is a formula, or otherwise by the formula of the original model, \code{formula(object)}. The spatial point pattern data to which the new model is fitted is determined by the left hand side of the updated model formula, if this is present. Otherwise it is determined by the argument \code{X} if it is provided, or otherwise by any unnamed argument that is a point pattern or a quadrature scheme. The model is refitted using \code{\link{kppm}}. } \value{ Another fitted cluster point process model (object of class \code{"kppm"}. } \seealso{ \code{\link{kppm}}, \code{\link{plot.kppm}}, \code{\link{predict.kppm}}, \code{\link{simulate.kppm}}, \code{\link{methods.kppm}}, \code{\link{vcov.kppm}} } \examples{ fit <- kppm(redwood ~1, "Thomas") fitx <- update(fit, ~ . + x) fitM <- update(fit, clusters="MatClust") fitC <- update(fit, cells) fitCx <- update(fit, cells ~ x) } \author{ \adrian \rolf and \ege } \keyword{spatial} \keyword{models} spatstat/man/nndist.pp3.Rd0000644000176200001440000000601113160710621015152 0ustar liggesusers\name{nndist.pp3} \alias{nndist.pp3} \title{Nearest neighbour distances in three dimensions} \description{ Computes the distance from each point to its nearest neighbour in a three-dimensional point pattern. Alternatively computes the distance to the second nearest neighbour, or third nearest, etc. } \usage{ \method{nndist}{pp3}(X, \dots, k=1) } \arguments{ \item{X}{ Three-dimensional point pattern (object of class \code{"pp3"}). } \item{\dots}{ Ignored. } \item{k}{ Integer, or integer vector. The algorithm will compute the distance to the \code{k}th nearest neighbour. } } \value{ Numeric vector or matrix containing the nearest neighbour distances for each point. If \code{k = 1} (the default), the return value is a numeric vector \code{v} such that \code{v[i]} is the nearest neighbour distance for the \code{i}th data point. If \code{k} is a single integer, then the return value is a numeric vector \code{v} such that \code{v[i]} is the \code{k}th nearest neighbour distance for the \code{i}th data point. If \code{k} is a vector, then the return value is a matrix \code{m} such that \code{m[i,j]} is the \code{k[j]}th nearest neighbour distance for the \code{i}th data point. } \details{ This function computes the Euclidean distance from each point in a three-dimensional point pattern to its nearest neighbour (the nearest other point of the pattern). If \code{k} is specified, it computes the distance to the \code{k}th nearest neighbour. The function \code{nndist} is generic; this function \code{nndist.pp3} is the method for the class \code{"pp3"}. The argument \code{k} may be a single integer, or an integer vector. If it is a vector, then the \eqn{k}th nearest neighbour distances are computed for each value of \eqn{k} specified in the vector. If there is only one point (if \code{x} has length 1), then a nearest neighbour distance of \code{Inf} is returned. If there are no points (if \code{x} has length zero) a numeric vector of length zero is returned. To identify \emph{which} point is the nearest neighbour of a given point, use \code{\link{nnwhich}}. To use the nearest neighbour distances for statistical inference, it is often advisable to use the edge-corrected empirical distribution, computed by \code{\link{G3est}}. To find the nearest neighbour distances from one point pattern to another point pattern, use \code{\link{nncross}}. } \section{Warnings}{ An infinite or \code{NA} value is returned if the distance is not defined (e.g. if there is only one point in the point pattern). } \seealso{ \code{\link{nndist}}, \code{\link{pairdist}}, \code{\link{G3est}}, \code{\link{nnwhich}} } \examples{ X <- runifpoint3(40) # nearest neighbours d <- nndist(X) # second nearest neighbours d2 <- nndist(X, k=2) # first, second and third nearest d1to3 <- nndist(X, k=1:3) } \author{ \adrian based on code for two dimensions by Pavel Grabarnik } \keyword{spatial} \keyword{math} spatstat/man/Extract.hyperframe.Rd0000644000176200001440000000735513160710621016741 0ustar liggesusers\name{Extract.hyperframe} \alias{[.hyperframe} \alias{[<-.hyperframe} \alias{$.hyperframe} \alias{$<-.hyperframe} \title{Extract or Replace Subset of Hyperframe} \description{ Extract or replace a subset of a hyperframe. } \usage{ \method{[}{hyperframe}(x, i, j, drop, strip=drop, \dots) \method{[}{hyperframe}(x, i, j) <- value \method{$}{hyperframe}(x, name) \method{$}{hyperframe}(x, name) <- value } \arguments{ \item{x}{ A hyperframe (object of class \code{"hyperframe"}). } \item{i,j}{ Row and column indices. } \item{drop,strip}{ Logical values indicating what to do when the hyperframe has only one row or column. See Details. } \item{\dots}{ Ignored. } \item{name}{ Name of a column of the hyperframe. } \item{value}{ Replacement value for the subset. A hyperframe or (if the subset is a single column) a list or an atomic vector. } } \value{ A hyperframe (of class \code{"hyperframe"}). } \details{ These functions extract a designated subset of a hyperframe, or replace the designated subset with another hyperframe. The function \code{[.hyperframe} is a method for the subset operator \code{\link{[}} for the class \code{"hyperframe"}. It extracts the subset of \code{x} specified by the row index \code{i} and column index \code{j}. The argument \code{drop} determines whether the array structure will be discarded if possible. The argument \code{strip} determines whether the list structure in a row or column or cell will be discarded if possible. If \code{drop=FALSE} (the default), the return value is always a hyperframe or data frame. If \code{drop=TRUE}, and if the selected subset has only one row, or only one column, or both, then \itemize{ \item{ if \code{strip=FALSE}, the result is a list, with one entry for each array cell that was selected. } \item{ if \code{strip=TRUE}, \itemize{ \item if the subset has one row containing several columns, the result is a list or (if possible) an atomic vector; \item if the subset has one column containing several rows, the result is a list or (if possible) an atomic vector; \item if the subset has exactly one row and exactly one column, the result is the object (or atomic value) contained in this row and column. } } } The function \code{[<-.hyperframe} is a method for the subset replacement operator \code{\link{[<-}} for the class \code{"hyperframe"}. It replaces the designated subset with the hyperframe \code{value}. The subset of \code{x} to be replaced is designated by the arguments \code{i} and \code{j} as above. The replacement \code{value} should be a hyperframe with the appropriate dimensions, or (if the specified subset is a single column) a list of the appropriate length. The function \code{$.hyperframe} is a method for \code{\link{$}} for hyperframes. It extracts the relevant column of the hyperframe. The result is always a list (i.e. equivalent to using \code{[.hyperframe} with \code{strip=FALSE}). The function \code{$<-.hyperframe} is a method for \code{\link{$<-}} for hyperframes. It replaces the relevant column of the hyperframe. The replacement value should be a list of the appropriate length. } \seealso{ \code{\link{hyperframe}} } \examples{ h <- hyperframe(X=list(square(1), square(2)), Y=list(sin, cos)) h h[1, ] h[1, ,drop=TRUE] h[ , 1] h[ , 1, drop=TRUE] h[1,1] h[1,1,drop=TRUE] h[1,1,drop=TRUE,strip=FALSE] h[1,1] <- list(square(3)) # extract column h$X # replace existing column h$Y <- list(cells, cells) # add new column h$Z <- list(cells, cells) } \author{ \spatstatAuthors } \keyword{spatial} \keyword{manip} spatstat/man/pppmatching.Rd0000644000176200001440000000512113160710621015465 0ustar liggesusers\name{pppmatching} \alias{pppmatching} \title{Create a Point Matching} \description{ Creates an object of class \code{"pppmatching"} representing a matching of two planar point patterns (objects of class \code{"ppp"}). } \usage{ pppmatching(X, Y, am, type = NULL, cutoff = NULL, q = NULL, mdist = NULL) } \arguments{ \item{X,Y}{Two point patterns (objects of class \code{"ppp"}).} \item{am}{ An \code{npoints(X)} by \code{npoints(Y)} matrix with entries \eqn{\geq 0}{>= 0} that specifies which points are matched and with what weight; alternatively, an object that can be coerced to this form by \code{as.matrix}. } \item{type}{ A character string giving the type of the matching. One of \code{"spa"}, \code{"ace"} or \code{"mat"}, or \code{NULL} for a generic or unknown matching. } \item{cutoff, q}{ Numerical values specifying the cutoff value \eqn{> 0} for interpoint distances and the order \eqn{q \in [1,\infty]}{q in [0,Inf]} of the average that is applied to them. \code{NULL} if not applicable or unknown. } \item{mdist}{ Numerical value for the distance to be associated with the matching. } } \details{ The argument \code{am} is interpreted as a "generalized adjacency matrix": if the \code{[i,j]}-th entry is positive, then the \code{i}-th point of \code{X} and the \code{j}-th point of \code{Y} are matched and the value of the entry gives the corresponding weight of the match. For an unweighted matching all the weights should be set to \eqn{1}. The remaining arguments are optional and allow to save additional information about the matching. See the help files for \code{\link{pppdist}} and \code{\link{matchingdist}} for details on the meaning of these parameters. } \author{ Dominic Schuhmacher \email{dominic.schuhmacher@stat.unibe.ch} \url{http://www.dominic.schuhmacher.name} } \seealso{ \code{\link{pppmatching.object}} \code{\link{matchingdist}} } \examples{ # a random unweighted complete matching X <- runifpoint(10) Y <- runifpoint(10) am <- r2dtable(1, rep(1,10), rep(1,10))[[1]] # generates a random permutation matrix m <- pppmatching(X, Y, am) summary(m) m$matrix \dontrun{ plot(m) } # a random weighted complete matching X <- runifpoint(7) Y <- runifpoint(7) am <- r2dtable(1, rep(10,7), rep(10,7))[[1]]/10 # generates a random doubly stochastic matrix m2 <- pppmatching(X, Y, am) summary(m2) m2$matrix \dontrun{ # Note: plotting does currently not distinguish # between different weights plot(m2) } } \keyword{spatial} \keyword{datagen} spatstat/man/subset.ppp.Rd0000644000176200001440000001147513160710621015267 0ustar liggesusers\name{subset.ppp} \alias{subset.ppp} \alias{subset.pp3} \alias{subset.lpp} \alias{subset.ppx} \title{ Subset of Point Pattern Satisfying A Condition } \description{ Given a point pattern, return the subset of points which satisfy a specified condition. } \usage{ \method{subset}{ppp}(x, subset, select, drop=FALSE, \dots) \method{subset}{pp3}(x, subset, select, drop=FALSE, \dots) \method{subset}{lpp}(x, subset, select, drop=FALSE, \dots) \method{subset}{ppx}(x, subset, select, drop=FALSE, \dots) } \arguments{ \item{x}{ A point pattern (object of class \code{"ppp"}, \code{"lpp"}, \code{"pp3"} or \code{"ppx"}). } \item{subset}{ Logical expression indicating which points are to be kept. The expression may involve the names of spatial coordinates (\code{x}, \code{y}, etc), the \code{marks}, and (if there is more than one column of marks) the names of individual columns of marks. Missing values are taken as false. See Details. } \item{select}{ Expression indicating which columns of marks should be kept. The \emph{names} of columns of marks can be used in this expression, and will be treated as if they were column indices. See Details. } \item{drop}{ Logical value indicating whether to remove unused levels of the marks, if the marks are a factor. } \item{\dots}{ Ignored. } } \details{ This is a method for the generic function \code{\link{subset}}. It extracts the subset of points of \code{x} that satisfy the logical expression \code{subset}, and retains only the columns of marks that are specified by the expression \code{select}. The result is always a point pattern, with the same window as \code{x}. The argument \code{subset} determines the subset of points that will be extracted. It should be a logical expression. It may involve the variable names \code{x} and \code{y} representing the Cartesian coordinates; the names of other spatial coordinates or local coordinates; the name \code{marks} representing the marks; and (if there is more than one column of marks) the names of individual columns of marks. The default is to keep all points. The argument \code{select} determines which columns of marks will be retained (if there are several columns of marks). It should be an expression involving the names of columns of marks (which will be interpreted as integers representing the positions of these columns). For example if there are columns of marks named \code{A} to \code{Z}, then \code{select=D:F} is a valid expression and means that columns \code{D}, \code{E} and \code{F} will be retained. Similarly \code{select=-(A:C)} is valid and means that columns \code{A} to \code{C} will be deleted. The default is to retain all columns. Setting \code{subset=FALSE} will produce an empty point pattern (i.e. containing zero points) in the same window as \code{x}. Setting \code{select=FALSE} or \code{select= -marks} will remove all the marks from \code{x}. The argument \code{drop} determines whether to remove unused levels of a factor, if the resulting point pattern is multitype (i.e. the marks are a factor) or if the marks are a data frame in which some of the columns are factors. The result is always a point pattern, of the same class as \code{x}. Spatial coordinates (and local coordinates) are always retained. To extract only some columns of marks or coordinates as a data frame, use \code{subset(as.data.frame(x), ...)} } \section{Other kinds of subset arguments}{ Alternatively the argument \code{subset} can be any kind of subset index acceptable to \code{\link{[.ppp}}, \code{\link{[.pp3}}, \code{\link{[.ppx}}. This argument selects which points of \code{x} will be retained. \bold{Warning:} if the argument \code{subset} is a window, this is interpreted as specifying the subset of points that fall inside that window, but the resulting point pattern has the same window as the original pattern \code{x}. } \value{ A point pattern of the same class as \code{x}, in the same spatial window as \code{x}. The result is a subset of \code{x}, possibly with some columns of marks removed. } \author{ \spatstatAuthors. } \seealso{ \code{\link[base]{subset}}, \code{\link{[.ppp}}, \code{\link{[.pp3}}, \code{\link{[.lpp}}, \code{\link{[.ppx}} } \examples{ plot(subset(cells, x > 0.5)) subset(amacrine, marks == "on") subset(amacrine, marks == "on", drop=TRUE) subset(redwood, nndist(redwood) > 0.04) subset(finpines, select=height) subset(finpines, diameter > 2, height) subset(nbfires, year==1999 & ign.src == "campfire", select=cause:fnl.size) v <- subset(chicago, x + y > 1100 & marks == "assault") vv <- subset(chicago, x + y > 1100 & marks == "assault", drop=TRUE) a <- subset(rpoispp3(40), z > 0.5) } \keyword{spatial} \keyword{manip} spatstat/man/coef.ppm.Rd0000644000176200001440000000370613160710571014675 0ustar liggesusers\name{coef.ppm} \alias{coef.ppm} \title{ Coefficients of Fitted Point Process Model } \description{ Given a point process model fitted to a point pattern, extract the coefficients of the fitted model. A method for \code{coef}. } \usage{ \method{coef}{ppm}(object, \dots) } \arguments{ \item{object}{ The fitted point process model (an object of class \code{"ppm"}) } \item{\dots}{ Ignored. } } \value{ A vector containing the fitted coefficients. } \details{ This function is a method for the generic function \code{\link{coef}}. The argument \code{object} must be a fitted point process model (object of class \code{"ppm"}). Such objects are produced by the maximum pseudolikelihood fitting algorithm \code{\link{ppm}}). This function extracts the vector of coefficients of the fitted model. This is the estimate of the parameter vector \eqn{\theta}{\theta} such that the conditional intensity of the model is of the form \deqn{ \lambda(u,x) = \exp(\theta S(u,x)) }{ \lambda(u,x) = exp(\theta . S(u,x)) } where \eqn{S(u,x)} is a (vector-valued) statistic. For example, if the model \code{object} is the uniform Poisson process, then \code{coef(object)} will yield a single value (named \code{"(Intercept)"}) which is the logarithm of the fitted intensity of the Poisson process. Use \code{\link{print.ppm}} to print a more useful description of the fitted model. } \seealso{ \code{\link{print.ppm}}, \code{\link{ppm.object}}, \code{\link{ppm}} } \examples{ data(cells) poi <- ppm(cells, ~1, Poisson()) coef(poi) # This is the log of the fitted intensity of the Poisson process stra <- ppm(cells, ~1, Strauss(r=0.07)) coef(stra) # The two entries "(Intercept)" and "Interaction" # are respectively log(beta) and log(gamma) # in the usual notation for Strauss(beta, gamma, r) } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} \keyword{methods} spatstat/man/plot.kppm.Rd0000644000176200001440000000563613160710621015112 0ustar liggesusers\name{plot.kppm} \alias{plot.kppm} \title{Plot a fitted cluster point process} \description{ Plots a fitted cluster point process model, displaying the fitted intensity and the fitted \eqn{K}-function. } \usage{ \method{plot}{kppm}(x, \dots, what=c("intensity", "statistic", "cluster"), pause=interactive(), xname) } \arguments{ \item{x}{ Fitted cluster point process model. An object of class \code{"kppm"}. } \item{\dots}{ Arguments passed to \code{\link{plot.ppm}} and \code{\link{plot.fv}} to control the plot. } \item{what}{ Character vector determining what will be plotted. } \item{pause}{ Logical value specifying whether to pause between plots. } \item{xname}{ Optional. Character string. The name of the object \code{x} for use in the title of the plot. } } \details{ This is a method for the generic function \code{\link[graphics]{plot}} for the class \code{"kppm"} of fitted cluster point process models. The argument \code{x} should be a cluster point process model (object of class \code{"kppm"}) obtained using the function \code{\link{kppm}}. The choice of plots (and the order in which they are displayed) is controlled by the argument \code{what}. The options (partially matched) are \code{"intensity"}, \code{"statistic"} and \code{"cluster"}. This command is capable of producing three different plots: \describe{ \item{what="intensity"}{specifies the fitted intensity of the model, which is plotted using \code{\link{plot.ppm}}. By default this plot is not produced for stationary models.} \item{what="statistic"}{specifies the empirical and fitted summary statistics, which are plotted using \code{\link{plot.fv}}. This is only meaningful if the model has been fitted using the Method of Minimum Contrast, and it is turned off otherwise.} \item{what="cluster"}{specifies a fitted cluster, which is computed by \code{\link{clusterfield}} and plotted by \code{\link{plot.im}}. It is only meaningful for Poisson cluster (incl. Neyman-Scott) processes, and it is turned off for log-Gaussian Cox processes (LGCP). If the model is stationary (and non-LGCP) this option is turned on by default and shows a fitted cluster positioned at the centroid of the observation window. For non-stationary (and non-LGCP) models this option is only invoked if explicitly told so, and in that case an additional argument \code{locations} (see \code{\link{clusterfield}}) must be given to specify where to position the parent point(s) .} } Alternatively \code{what="all"} selects all available options. } \value{ Null. } \examples{ data(redwood) fit <- kppm(redwood~1, "Thomas") plot(fit) } \seealso{ \code{\link{kppm}}, \code{\link{plot.ppm}}, } \author{ \adrian \rolf and \ege } \keyword{spatial} \keyword{models} spatstat/man/selfcut.psp.Rd0000644000176200001440000000244713160710621015431 0ustar liggesusers\name{selfcut.psp} \alias{selfcut.psp} \title{Cut Line Segments Where They Intersect} \description{ Finds any crossing points between the line segments in a line segment pattern, and cuts the segments into pieces at these crossing-points. } \usage{ selfcut.psp(A, \dots, eps) } \arguments{ \item{A}{ Line segment pattern (object of class \code{"psp"}). } \item{eps}{ Optional. Smallest permissible length of the resulting line segments. There is a sensible default. } \item{\dots}{Ignored.} } \details{ This function finds any crossing points between different line segments in the line segment pattern \code{A}, and cuts the line segments into pieces at these intersection points. A crossing point occurs whenever one of the line segments in \code{A} intersects another line segment in \code{A}, at a nonzero angle of intersection. } \value{ Another line segment pattern (object of class \code{"psp"}) in the same window as \code{A} with the same kind of marks as \code{A}. } \author{ \spatstatAuthors. } \seealso{ \code{\link{selfcrossing.psp}} } \examples{ X <- psp(runif(10), runif(10), runif(10), runif(10), window=owin()) Y <- selfcut.psp(X) n <- nsegments(Y) plot(Y \%mark\% factor(sample(seq_len(n), n, replace=TRUE))) } \keyword{spatial} \keyword{manip} spatstat/man/pairdist.lpp.Rd0000644000176200001440000000264013160710621015567 0ustar liggesusers\name{pairdist.lpp} \alias{pairdist.lpp} \title{ Pairwise shortest-path distances between points on a linear network } \description{ Given a pattern of points on a linear network, compute the matrix of distances between all pairs of points, measuring distance by the shortest path in the network. } \usage{ \method{pairdist}{lpp}(X, ..., method="C") } \arguments{ \item{X}{ Point pattern on linear network (object of class \code{"lpp"}). } \item{method}{ Optional string determining the method of calculation. Either \code{"interpreted"} or \code{"C"}. } \item{\dots}{ Ignored. } } \details{ Given a pattern of points on a linear network, this function computes the matrix of distances between all pairs of points, measuring distance by the shortest path in the network. If \code{method="C"} the distances are computed using code in the C language. If \code{method="interpreted"} then the computation is performed using interpreted \R code. The \R code is much slower, but is provided for checking purposes. If two points cannot be joined by a path, the distance between them is infinite (\code{Inf}). } \value{ A symmetric matrix, whose values are nonnegative numbers or infinity (\code{Inf}). } \author{ Ang Qi Wei \email{aqw07398@hotmail.com} and \adrian. } \seealso{ \code{\link{lpp}} } \examples{ X <- runiflpp(12, simplenet) pairdist(X) } \keyword{spatial} spatstat/man/convolve.im.Rd0000644000176200001440000000446513160710571015430 0ustar liggesusers\name{convolve.im} \alias{convolve.im} \title{Convolution of Pixel Images} \description{ Computes the convolution of two pixel images. } \usage{ convolve.im(X, Y=X, \dots, reflectX=FALSE, reflectY=FALSE) } \arguments{ \item{X}{ A pixel image (object of class \code{"im"}. } \item{Y}{ Optional. Another pixel image. } \item{\dots}{Ignored.} \item{reflectX,reflectY}{ Logical values specifying whether the images \code{X} and \code{Y} (respectively) should be reflected in the origin before computing the convolution. } } \value{ A pixel image (an object of class \code{"im"}) representing the convolution of \code{X} and \code{Y}. } \details{ The \emph{convolution} of two pixel images \eqn{X} and \eqn{Y} in the plane is the function \eqn{C(v)} defined for each vector \eqn{v} as \deqn{ C(v) = \int X(u)Y(v-u)\, {\rm d}u }{ C(v) = integral of X(u) * Y(v-u) du } where the integral is over all spatial locations \eqn{u}, and where \eqn{X(u)} and \eqn{Y(u)} denote the pixel values of \eqn{X} and \eqn{Y} respectively at location \eqn{u}. This command computes a discretised approximation to the convolution, using the Fast Fourier Transform. The return value is another pixel image (object of class \code{"im"}) whose greyscale values are values of the convolution. If \code{reflectX = TRUE} then the pixel image \code{X} is reflected in the origin (see \code{\link{reflect}}) before the convolution is computed, so that \code{convolve.im(X,Y,reflectX=TRUE)} is mathematically equivalent to \code{convolve.im(reflect(X), Y)}. (These two commands are not exactly equivalent, because the reflection is performed in the Fourier domain in the first command, and reflection is performed in the spatial domain in the second command). Similarly if \code{reflectY = TRUE} then the pixel image \code{Y} is reflected in the origin before the convolution is computed, so that \code{convolve.im(X,Y,reflectY=TRUE)} is mathematically equivalent to \code{convolve.im(X, reflect(Y))}. } \seealso{ \code{\link{imcov}}, \code{\link{reflect}} } \examples{ X <- as.im(letterR) Y <- as.im(square(1)) plot(convolve.im(X, Y)) plot(convolve.im(X, Y, reflectX=TRUE)) plot(convolve.im(X)) } \author{\adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/methods.pp3.Rd0000644000176200001440000000311313160710621015316 0ustar liggesusers\name{methods.pp3} \Rdversion{1.1} \alias{methods.pp3} %DoNotExport \alias{print.pp3} \alias{summary.pp3} \alias{print.summary.pp3} \alias{unitname.pp3} \alias{unitname<-.pp3} \title{ Methods for three-dimensional point patterns } \description{ Methods for class \code{"pp3"}. } \usage{ \method{print}{pp3}(x, ...) \method{print}{summary.pp3}(x, ...) \method{summary}{pp3}(object, ...) \method{unitname}{pp3}(x) \method{unitname}{pp3}(x) <- value } \arguments{ \item{x,object}{ Object of class \code{"pp3"}. } \item{\dots}{ Ignored. } \item{value}{ Name of the unit of length. See \code{\link{unitname}}. } } \details{ These are methods for the generic functions \code{\link{print}}, \code{\link{summary}}, \code{\link{unitname}} and \code{\link{unitname<-}} for the class \code{"pp3"} of three-dimensional point patterns. The \code{print} and \code{summary} methods print a description of the point pattern. The \code{unitname} method extracts the name of the unit of length in which the point coordinates are expressed. The \code{unitname<-} method assigns the name of the unit of length. } \value{ For \code{print.pp3} the value is \code{NULL}. For \code{unitname.pp3} an object of class \code{"units"}. } \author{\adrian and \rolf } \seealso{ \code{\link{pp3}}, \code{\link{print}}, \code{\link{unitname}} \code{\link{unitname<-}} } \examples{ X <- pp3(runif(42),runif(42),runif(42), box3(c(0,1), unitname="mm")) X unitname(X) unitname(X) <- c("foot", "feet") summary(X) } \keyword{spatial} \keyword{methods} spatstat/man/compatible.fv.Rd0000644000176200001440000000215613160710571015715 0ustar liggesusers\name{compatible.fv} \alias{compatible.fv} \title{Test Whether Function Objects Are Compatible} \description{ Tests whether two or more function objects (class \code{"fv"}) are compatible. } \usage{ \method{compatible}{fv}(A, B, \dots) } \arguments{ \item{A,B,\dots}{Two or more function value objects (class \code{"fv"}).} } \details{ An object of class \code{"fv"} is essentially a data frame containing several different statistical estimates of the same function. Such objects are returned by \code{\link{Kest}} and its relatives. This command tests whether such objects are compatible (so that, for example, they could be added or subtracted). It is a method for the generic command \code{\link{compatible}}. The functions are compatible if they have been evaluated at the same sequence of values of the argument \code{r}, and if the statistical estimates have the same names. } \value{ Logical value: \code{TRUE} if the objects are compatible, and \code{FALSE} if they are not. } \seealso{ \code{\link{eval.fv}} } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/rescale.psp.Rd0000644000176200001440000000376113160710621015402 0ustar liggesusers\name{rescale.psp} \alias{rescale.psp} \title{Convert Line Segment Pattern to Another Unit of Length} \description{ Converts a line segment pattern dataset to another unit of length. } \usage{ \method{rescale}{psp}(X, s, unitname) } \arguments{ \item{X}{Line segment pattern (object of class \code{"psp"}).} \item{s}{Conversion factor: the new units are \code{s} times the old units.} \item{unitname}{ Optional. New name for the unit of length. See \code{\link{unitname}}. } } \value{ Another line segment pattern (of class \code{"psp"}), representing the same data, but expressed in the new units. } \details{ This is a method for the generic function \code{\link{rescale}}. The spatial coordinates in the line segment pattern \code{X} (and its window) will be re-expressed in terms of a new unit of length that is \code{s} times the current unit of length given in \code{X}. (Thus, the coordinate values are \emph{divided} by \code{s}, while the unit value is multiplied by \code{s}). The result is a line segment pattern representing the \emph{same} data but re-expressed in a different unit. Mark values are unchanged. If \code{s} is missing, then the coordinates will be re-expressed in \sQuote{native} units; for example if the current unit is equal to 0.1 metres, then the coordinates will be re-expressed in metres. } \section{Note}{ The result of this operation is equivalent to the original segment pattern. If you want to actually change the coordinates by a linear transformation, producing a segment pattern that is not equivalent to the original one, use \code{\link{affine}}. } \seealso{ \code{\link{units}}, \code{\link{affine}}, \code{\link{rotate}}, \code{\link{shift}} } \examples{ data(copper) X <- copper$Lines X # data are in km # convert to metres rescale(X, 1/1000) # convert data and rename unit rescale(X, 1/1000, c("metre", "metres")) } \author{\adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/Extract.psp.Rd0000644000176200001440000000633613160710621015377 0ustar liggesusers\name{Extract.psp} \alias{[.psp} \title{Extract Subset of Line Segment Pattern} \description{ Extract a subset of a line segment pattern. } \usage{ \method{[}{psp}(x, i, j, drop, \dots, fragments=TRUE) } \arguments{ \item{x}{ A two-dimensional line segment pattern. An object of class \code{"psp"}. } \item{i}{ Subset index. Either a valid subset index in the usual \R sense, indicating which segments should be retained, or a window (an object of class \code{"owin"}) delineating a subset of the original observation window. } \item{j}{ Redundant - included for backward compatibility. } \item{drop}{ Ignored. Required for compatibility with generic function. } \item{\dots}{ Ignored. } \item{fragments}{ Logical value indicating whether to retain all pieces of line segments that intersect the new window (\code{fragments=TRUE}, the default) or to retain only those line segments that lie entirely inside the new window (\code{fragments=FALSE}). } } \value{ A line segment pattern (of class \code{"psp"}). } \details{ These functions extract a designated subset of a line segment pattern. The function \code{[.psp} is a method for \code{\link{[}} for the class \code{"psp"}. It extracts a designated subset of a line segment pattern, either by ``\emph{thinning}'' (retaining/deleting some line segments of a line segment pattern) or ``\emph{trimming}'' (reducing the window of observation to a smaller subregion and clipping the line segments to this boundary) or both. The pattern will be ``thinned'' if \code{subset} is specified. The line segments designated by \code{subset} will be retained. Here \code{subset} can be a numeric vector of positive indices (identifying the line segments to be retained), a numeric vector of negative indices (identifying the line segments to be deleted) or a logical vector of length equal to the number of line segments in the line segment pattern \code{x}. In the latter case, the line segments for which \code{subset[i]=TRUE} will be retained, and the others will be deleted. The pattern will be ``trimmed'' if \code{window} is specified. This should be an object of class \code{\link{owin}} specifying a window of observation to which the line segment pattern \code{x} will be trimmed. Line segments of \code{x} lying inside the new \code{window} will be retained unchanged. Line segments lying partially inside the new \code{window} and partially outside it will, by default, be clipped so that they lie entirely inside the window; but if \code{fragments=FALSE}, such segments will be removed. Both ``thinning'' and ``trimming'' can be performed together. } \seealso{ \code{\link{psp.object}}, \code{\link{owin.object}} } \examples{ a <- psp(runif(20),runif(20),runif(20),runif(20), window=owin()) plot(a) # thinning id <- sample(c(TRUE, FALSE), 20, replace=TRUE) b <- a[id] plot(b, add=TRUE, lwd=3) # trimming plot(a) w <- owin(c(0.1,0.7), c(0.2, 0.8)) b <- a[w] plot(b, add=TRUE, col="red", lwd=2) plot(w, add=TRUE) u <- a[w, fragments=FALSE] plot(u, add=TRUE, col="blue", lwd=3) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{manip} spatstat/man/im.apply.Rd0000644000176200001440000000245413160710621014712 0ustar liggesusers\name{im.apply} \alias{im.apply} \title{ Apply Function Pixelwise to List of Images } \description{ Returns a pixel image obtained by applying a function to the values of corresponding pixels in several pixel images. } \usage{ im.apply(X, FUN, ...) } \arguments{ \item{X}{ A list of pixel images (objects of class \code{"im"}). } \item{FUN}{ A function that can be applied to vectors, or a character string giving the name of such a function. } \item{\dots}{ Additional arguments to \code{FUN}. } } \details{ The argument \code{X} should be a list of pixel images (objects of class \code{"im"}). If the images do not have identical pixel grids, they will be converted to a common grid using \code{\link{harmonise.im}}. At each pixel location, the values of the images in \code{X} at that pixel will be extracted as a vector. The function \code{FUN} will be applied to this vector. The result (which should be a single value) becomes the pixel value of the resulting image. } \value{ A pixel image (object of class \code{"im"}). } \seealso{ \code{\link{eval.im}} for algebraic operations with images. } \examples{ DA <- density(split(amacrine)) DA im.apply(DA, max) } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} \keyword{programming} spatstat/man/Jcross.Rd0000644000176200001440000001604213160710571014426 0ustar liggesusers\name{Jcross} \alias{Jcross} \title{ Multitype J Function (i-to-j) } \description{ For a multitype point pattern, estimate the multitype \eqn{J} function summarising the interpoint dependence between points of type \eqn{i} and of type \eqn{j}. } \usage{ Jcross(X, i, j, eps=NULL, r=NULL, breaks=NULL, \dots, correction=NULL) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the multitype \eqn{J} function \eqn{J_{ij}(r)}{Jij(r)} will be computed. It must be a multitype point pattern (a marked point pattern whose marks are a factor). See under Details. } \item{i}{The type (mark value) of the points in \code{X} from which distances are measured. A character string (or something that will be converted to a character string). Defaults to the first level of \code{marks(X)}. } \item{j}{The type (mark value) of the points in \code{X} to which distances are measured. A character string (or something that will be converted to a character string). Defaults to the second level of \code{marks(X)}. } \item{eps}{A positive number. The resolution of the discrete approximation to Euclidean distance (see below). There is a sensible default. } \item{r}{Optional. Numeric vector. The values of the argument \eqn{r} at which the function \eqn{J_{ij}(r)}{Jij(r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{breaks}{ This argument is for internal use only. } \item{\dots}{Ignored.} \item{correction}{ Optional. Character string specifying the edge correction(s) to be used. Options are \code{"none"}, \code{"rs"}, \code{"km"}, \code{"Hanisch"} and \code{"best"}. Alternatively \code{correction="all"} selects all options. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame containing six numeric columns \item{J}{the recommended estimator of \eqn{J_{ij}(r)}{Jij(r)}, currently the Kaplan-Meier estimator. } \item{r}{the values of the argument \eqn{r} at which the function \eqn{J_{ij}(r)}{Jij(r)} has been estimated } \item{km}{the Kaplan-Meier estimator of \eqn{J_{ij}(r)}{Jij(r)} } \item{rs}{the ``reduced sample'' or ``border correction'' estimator of \eqn{J_{ij}(r)}{Jij(r)} } \item{han}{the Hanisch-style estimator of \eqn{J_{ij}(r)}{Jij(r)} } \item{un}{the ``uncorrected'' estimator of \eqn{J_{ij}(r)}{Jij(r)} formed by taking the ratio of uncorrected empirical estimators of \eqn{1 - G_{ij}(r)}{1 - Gij(r)} and \eqn{1 - F_{j}(r)}{1 - Fj(r)}, see \code{\link{Gdot}} and \code{\link{Fest}}. } \item{theo}{the theoretical value of \eqn{J_{ij}(r)}{Jij(r)} for a marked Poisson process, namely 1. } The result also has two attributes \code{"G"} and \code{"F"} which are respectively the outputs of \code{\link{Gcross}} and \code{\link{Fest}} for the point pattern. } \details{ This function \code{Jcross} and its companions \code{\link{Jdot}} and \code{\link{Jmulti}} are generalisations of the function \code{\link{Jest}} to multitype point patterns. A multitype point pattern is a spatial pattern of points classified into a finite number of possible ``colours'' or ``types''. In the \pkg{spatstat} package, a multitype pattern is represented as a single point pattern object in which the points carry marks, and the mark value attached to each point determines the type of that point. The argument \code{X} must be a point pattern (object of class \code{"ppp"}) or any data that are acceptable to \code{\link{as.ppp}}. It must be a marked point pattern, and the mark vector \code{X$marks} must be a factor. The argument \code{i} will be interpreted as a level of the factor \code{X$marks}. (Warning: this means that an integer value \code{i=3} will be interpreted as the number 3, \bold{not} the 3rd smallest level). The ``type \eqn{i} to type \eqn{j}'' multitype \eqn{J} function of a stationary multitype point process \eqn{X} was introduced by Van lieshout and Baddeley (1999). It is defined by \deqn{J_{ij}(r) = \frac{1 - G_{ij}(r)}{1 - F_{j}(r)}}{Jij(r) = (1 - Gij(r))/(1-Fj(r))} where \eqn{G_{ij}(r)}{Gij(r)} is the distribution function of the distance from a type \eqn{i} point to the nearest point of type \eqn{j}, and \eqn{F_{j}(r)}{Fj(r)} is the distribution function of the distance from a fixed point in space to the nearest point of type \eqn{j} in the pattern. An estimate of \eqn{J_{ij}(r)}{Jij(r)} is a useful summary statistic in exploratory data analysis of a multitype point pattern. If the subprocess of type \eqn{i} points is independent of the subprocess of points of type \eqn{j}, then \eqn{J_{ij}(r) \equiv 1}{Jij(r) = 1}. Hence deviations of the empirical estimate of \eqn{J_{ij}}{Jij} from the value 1 may suggest dependence between types. This algorithm estimates \eqn{J_{ij}(r)}{Jij(r)} from the point pattern \code{X}. It assumes that \code{X} can be treated as a realisation of a stationary (spatially homogeneous) random spatial point process in the plane, observed through a bounded window. The window (which is specified in \code{X} as \code{Window(X)}) may have arbitrary shape. Biases due to edge effects are treated in the same manner as in \code{\link{Jest}}, using the Kaplan-Meier and border corrections. The main work is done by \code{\link{Gmulti}} and \code{\link{Fest}}. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{J_{ij}(r)}{Jij(r)} should be evaluated. The values of \eqn{r} must be increasing nonnegative numbers and the maximum \eqn{r} value must exceed the radius of the largest disc contained in the window. } \references{ Van Lieshout, M.N.M. and Baddeley, A.J. (1996) A nonparametric measure of spatial interaction in point patterns. \emph{Statistica Neerlandica} \bold{50}, 344--361. Van Lieshout, M.N.M. and Baddeley, A.J. (1999) Indices of dependence between types in multivariate point patterns. \emph{Scandinavian Journal of Statistics} \bold{26}, 511--532. } \section{Warnings}{ The arguments \code{i} and \code{j} are always interpreted as levels of the factor \code{X$marks}. They are converted to character strings if they are not already character strings. The value \code{i=1} does \bold{not} refer to the first level of the factor. } \seealso{ \code{\link{Jdot}}, \code{\link{Jest}}, \code{\link{Jmulti}} } \examples{ # Lansing woods data: 6 types of trees woods <- lansing \testonly{ woods <- woods[seq(1,npoints(woods), by=30)] } Jhm <- Jcross(woods, "hickory", "maple") # diagnostic plot for independence between hickories and maples plot(Jhm) # synthetic example with two types "a" and "b" pp <- runifpoint(30) \%mark\% factor(sample(c("a","b"), 30, replace=TRUE)) J <- Jcross(pp) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{nonparametric} spatstat/man/unitname.Rd0000644000176200001440000000660713160710621015005 0ustar liggesusers\name{unitname} \alias{unitname} \alias{unitname.dppm} \alias{unitname.im} \alias{unitname.kppm} \alias{unitname.minconfit} \alias{unitname.owin} \alias{unitname.ppp} \alias{unitname.ppm} \alias{unitname.psp} \alias{unitname.quad} \alias{unitname.slrm} \alias{unitname.tess} \alias{unitname<-} \alias{unitname<-.dppm} \alias{unitname<-.im} \alias{unitname<-.kppm} \alias{unitname<-.minconfit} \alias{unitname<-.owin} \alias{unitname<-.ppp} \alias{unitname<-.ppm} \alias{unitname<-.psp} \alias{unitname<-.quad} \alias{unitname<-.slrm} \alias{unitname<-.tess} \title{Name for Unit of Length} \description{ Inspect or change the name of the unit of length in a spatial dataset. } \usage{ unitname(x) \method{unitname}{dppm}(x) \method{unitname}{im}(x) \method{unitname}{kppm}(x) \method{unitname}{minconfit}(x) \method{unitname}{owin}(x) \method{unitname}{ppm}(x) \method{unitname}{ppp}(x) \method{unitname}{psp}(x) \method{unitname}{quad}(x) \method{unitname}{slrm}(x) \method{unitname}{tess}(x) unitname(x) <- value \method{unitname}{dppm}(x) <- value \method{unitname}{im}(x) <- value \method{unitname}{kppm}(x) <- value \method{unitname}{minconfit}(x) <- value \method{unitname}{owin}(x) <- value \method{unitname}{ppm}(x) <- value \method{unitname}{ppp}(x) <- value \method{unitname}{psp}(x) <- value \method{unitname}{quad}(x) <- value \method{unitname}{slrm}(x) <- value \method{unitname}{tess}(x) <- value } \arguments{ \item{x}{A spatial dataset. Either a point pattern (object of class \code{"ppp"}), a line segment pattern (object of class \code{"psp"}), a window (object of class \code{"owin"}), a pixel image (object of class \code{"im"}), a tessellation (object of class \code{"tess"}), a quadrature scheme (object of class \code{"quad"}), or a fitted point process model (object of class \code{"ppm"} or \code{"kppm"} or \code{"slrm"} or \code{"dppm"} or \code{"minconfit"}). } \item{value}{ Name of the unit of length. See Details. } } \details{ Spatial datasets in the \pkg{spatstat} package may include the name of the unit of length. This name is used when printing or plotting the dataset, and in some other applications. \code{unitname(x)} extracts this name, and \code{unitname(x) <- value} sets the name to \code{value}. A valid name is either \itemize{ \item a single character string \item a vector of two character strings giving the singular and plural forms of the unit name \item a list of length 3, containing two character strings giving the singular and plural forms of the basic unit, and a number specifying the multiple of this unit. } Note that re-setting the name of the unit of length \emph{does not} affect the numerical values in \code{x}. It changes only the string containing the name of the unit of length. To rescale the numerical values, use \code{\link{rescale}}. } \value{ The return value of \code{unitname} is an object of class \code{"units"} containing the name of the unit of length in \code{x}. There are methods for \code{print} and \code{summary}. } \author{\adrian and \rolf } \seealso{ \code{\link{rescale}}, \code{\link{owin}}, \code{\link{ppp}} } \examples{ X <- runifpoint(20) # if the unit of length is 1 metre: unitname(X) <- c("metre", "metres") # if the unit of length is 6 inches: unitname(X) <- list("inch", "inches", 6) } \keyword{spatial} \keyword{manip} spatstat/man/dppkernel.Rd0000644000176200001440000000121513160710571015143 0ustar liggesusers\name{dppkernel} \alias{dppkernel} \title{Extract Kernel from Determinantal Point Process Model Object} \description{ Returns the kernel of a determinantal point process model as a function of one argument \code{x}. } \usage{dppkernel(model, \dots)} \arguments{ \item{model}{Model of class \code{"detpointprocfamily"}.} \item{\dots}{Arguments passed to \code{\link{dppapproxkernel}} if the exact kernel is unknown} } \value{A function} \author{ \adrian \rolf and \ege } \examples{ kernelMatern <- dppkernel(dppMatern(lambda = 100, alpha=.01, nu=1, d=2)) plot(kernelMatern, xlim = c(0,0.1)) } \keyword{spatial} \keyword{models} spatstat/man/treeprune.Rd0000644000176200001440000000306013160710621015164 0ustar liggesusers\name{treeprune} \alias{treeprune} \title{ Prune Tree to Given Level } \description{ Prune a tree by removing all the branches above a given level. } \usage{ treeprune(X, root = 1, level = 0) } \arguments{ \item{X}{ Object of class \code{"linnet"} or \code{"lpp"}. } \item{root}{ Index of the root vertex amongst the vertices of \code{as.linnet(X)}. } \item{level}{ Integer specifying the level above which the tree should be pruned. } } \details{ The object \code{X} must be either a linear network, or a derived object such as a point pattern on a linear network. The linear network must be an acyclic graph (i.e. must not contain any loops) so that it can be interpreted as a tree. This function removes all vertices for which \code{\link{treebranchlabels}} gives a string more than \code{level} characters long. } \value{ Object of the same kind as \code{X}. } \author{ \spatstatAuthors } \seealso{ \code{\link{treebranchlabels}} for calculating the branch labels. \code{\link{deletebranch}} for removing entire branches. \code{\link{extractbranch}} for extracting entire branches. \code{\link{linnet}} for creating networks. } \examples{ # make a simple tree m <- simplenet$m m[8,10] <- m[10,8] <- FALSE L <- linnet(vertices(simplenet), m) plot(L, main="") # compute branch labels tb <- treebranchlabels(L, 1) tbc <- paste0("[", tb, "]") text(vertices(L), labels=tbc, cex=2) # prune tree tp <- treeprune(L, root=1, 1) plot(tp, add=TRUE, col="blue", lwd=3) } \keyword{spatial} \keyword{manip} spatstat/man/quadscheme.logi.Rd0000644000176200001440000001230413160710621016224 0ustar liggesusers\name{quadscheme.logi} \alias{quadscheme.logi} \title{Generate a Logistic Regression Quadrature Scheme from a Point Pattern} \description{ Generates a logistic regression quadrature scheme (an object of class \code{"logiquad"} inheriting from \code{"quad"}) from point patterns of data and dummy points. } \usage{ quadscheme.logi(data, dummy, dummytype = "stratrand", nd = NULL, mark.repeat = FALSE, \dots) } \arguments{ \item{data}{ The observed data point pattern. An object of class \code{"ppp"} or in a format recognised by \code{\link{as.ppp}()} } \item{dummy}{ The pattern of dummy points for the quadrature. An object of class \code{"ppp"} or in a format recognised by \code{\link{as.ppp}()}. If missing a sensible default is generated. } \item{dummytype}{ The name of the type of dummy points to use when \code{"dummy"} is missing. Currently available options are: \code{"stratrand"} (default), \code{"binomial"}, \code{"poisson"}, \code{"grid"} and \code{"transgrid"}. } \item{nd}{ Integer, or integer vector of length 2 controlling the intensity of dummy points when \code{"dummy"} is missing. } \item{mark.repeat}{ Repeating the dummy points for each level of a marked data pattern when \code{"dummy"} is missing. (See details.) } \item{\dots}{ Ignored. } } \value{ An object of class \code{"logiquad"} inheriting from \code{"quad"} describing the quadrature scheme (data points, dummy points, and quadrature weights) suitable as the argument \code{Q} of the function \code{\link{ppm}()} for fitting a point process model. The quadrature scheme can be inspected using the \code{print} and \code{plot} methods for objects of class \code{"quad"}. } \details{ This is the primary method for producing a quadrature schemes for use by \code{\link{ppm}} when the logistic regression approximation (Baddeley et al. 2013) to the pseudolikelihood of the model is applied (i.e. when \code{method="logi"} in \code{\link{ppm}}). The function \code{\link{ppm}} fits a point process model to an observed point pattern. When used with the option \code{method="logi"} it requires a quadrature scheme consisting of the original data point pattern and an additional pattern of dummy points. Such quadrature schemes are represented by objects of class \code{"logiquad"}. Quadrature schemes are created by the function \code{quadscheme.logi}. The arguments \code{data} and \code{dummy} specify the data and dummy points, respectively. There is a sensible default for the dummy points. Alternatively the dummy points may be specified arbitrarily and given in any format recognised by \code{\link{as.ppp}}. The quadrature region is the region over which we are integrating, and approximating integrals by finite sums. If \code{dummy} is a point pattern object (class \code{"ppp"}) then the quadrature region is taken to be \code{Window(dummy)}. If \code{dummy} is just a list of \eqn{x, y} coordinates then the quadrature region defaults to the observation window of the data pattern, \code{Window(data)}. If \code{dummy} is missing, then a pattern of dummy points will be generated, taking account of the optional arguments \code{dummytype}, \code{nd}, and \code{mark.repeat}. The currently accepted values for \code{dummytype} are: \itemize{ \item \code{"grid"} where the frame of the window is divided into a \code{nd * nd} or \code{nd[1] * nd[2]} regular grid of tiles and the centers constitutes the dummy points. \item \code{"transgrid"} where a regular grid as above is translated by a random vector. \item \code{"stratrand"} where each point of a regular grid as above is randomly translated within its tile. \item \code{"binomial"} where \code{nd * nd} or \code{nd[1] * nd[2]} points are generated uniformly in the frame of the window. \code{"poisson"} where a homogeneous Poisson point process with intensity \code{nd * nd} or \code{nd[1] * nd[2]} is generated within the frame of observation window. } Then if the window is not rectangular, any dummy points lying outside it are deleted. If \code{data} is a multitype point pattern the dummy points should also be marked (with the same levels of the marks as \code{data}). If \code{dummy} is missing and the dummy pattern is generated by \code{quadscheme.logi} the default behaviour is to attach a uniformly distributed mark (from the levels of the marks) to each dummy point. Alternatively, if \code{mark.repeat=TRUE} each dummy point is repeated as many times as there are levels of the marks with a distinct mark value attached to it. Finally, each point (data and dummy) is assigned the weight 1. The weights are never used and only appear to be compatible with the class \code{"quad"} from which the \code{"logiquad"} object inherits. } \references{ Baddeley, A., Coeurjolly, J.-F., Rubak, E. and Waagepetersen, R. (2014) Logistic regression for spatial Gibbs point processes. \emph{Biometrika} \bold{101} (2) 377--392. } \seealso{ \code{\link{ppm}}, \code{\link{as.ppp}} } \examples{ data(simdat) Q <- quadscheme.logi(simdat) } \author{\adrian , \rolf and \ege . } \keyword{spatial} \keyword{datagen} spatstat/man/spatdim.Rd0000644000176200001440000000176213160710621014623 0ustar liggesusers\name{spatdim} \alias{spatdim} \title{Spatial Dimension of a Dataset} \description{ Extracts the spatial dimension of an object in the \pkg{spatstat} package. } \usage{spatdim(X)} \arguments{ \item{X}{Object belonging to any class defined in the \pkg{spatstat} package.} } \value{ An integer, or \code{NA}. } \details{ This function returns the number of spatial coordinate dimensions of the dataset \code{X}. The results for some of the more common types of objects are as follows: \tabular{ll}{ \bold{object class} \tab \bold{dimension} \cr \code{"ppp"} \tab 2 \cr \code{"lpp"} \tab 2 \cr \code{"pp3"} \tab 3 \cr \code{"ppx"} \tab number of \emph{spatial} dimensions \cr \code{"owin"} \tab 2 \cr \code{"psp"} \tab 2 \cr \code{"ppm"} \tab 2 } Note that time dimensions are not counted. If \code{X} is not a recognised spatial object, the result is \code{NA}. } \author{ \adrian \rolf and \ege } \examples{ spatdim(lansing) } spatstat/man/anova.lppm.Rd0000644000176200001440000000733713160710571015245 0ustar liggesusers\name{anova.lppm} \alias{anova.lppm} \title{ANOVA for Fitted Point Process Models on Linear Network} \description{ Performs analysis of deviance for two or more fitted point process models on a linear network. } \usage{ \method{anova}{lppm}(object, \dots, test=NULL) } \arguments{ \item{object}{A fitted point process model on a linear network (object of class \code{"lppm"}). } \item{\dots}{ One or more fitted point process models on the same linear network. } \item{test}{ Character string, partially matching one of \code{"Chisq"}, \code{"F"} or \code{"Cp"}. } } \value{ An object of class \code{"anova"}, or \code{NULL}. } \details{ This is a method for \code{\link{anova}} for fitted point process models on a linear network (objects of class \code{"lppm"}, usually generated by the model-fitting function \code{\link{lppm}}). If the fitted models are all Poisson point processes, then this function performs an Analysis of Deviance of the fitted models. The output shows the deviance differences (i.e. 2 times log likelihood ratio), the difference in degrees of freedom, and (if \code{test="Chi"}) the two-sided p-values for the chi-squared tests. Their interpretation is very similar to that in \code{\link{anova.glm}}. If some of the fitted models are \emph{not} Poisson point processes, then the deviance difference is replaced by the adjusted composite likelihood ratio (Pace et al, 2011; Baddeley et al, 2014). } \section{Errors and warnings}{ \describe{ \item{models not nested:}{ There may be an error message that the models are not \dQuote{nested}. For an Analysis of Deviance the models must be nested, i.e. one model must be a special case of the other. For example the point process model with formula \code{~x} is a special case of the model with formula \code{~x+y}, so these models are nested. However the two point process models with formulae \code{~x} and \code{~y} are not nested. If you get this error message and you believe that the models should be nested, the problem may be the inability of \R to recognise that the two formulae are nested. Try modifying the formulae to make their relationship more obvious. } \item{different sizes of dataset:}{ There may be an error message from \code{anova.glmlist} that \dQuote{models were not all fitted to the same size of dataset}. This generally occurs when the point process models are fitted on different linear networks. } } } \seealso{ \code{\link{lppm}} } \examples{ X <- runiflpp(10, simplenet) mod0 <- lppm(X ~1) modx <- lppm(X ~x) anova(mod0, modx, test="Chi") } \author{\adrian } \references{ Ang, Q.W. (2010) \emph{Statistical methodology for events on a network}. Master's thesis, School of Mathematics and Statistics, University of Western Australia. Ang, Q.W., Baddeley, A. and Nair, G. (2012) Geometrically corrected second-order analysis of events on a linear network, with applications to ecology and criminology. \emph{Scandinavian Journal of Statistics} \bold{39}, 591--617. Baddeley, A., Turner, R. and Rubak, E. (2015) Adjusted composite likelihood ratio test for Gibbs point processes. \emph{Journal of Statistical Computation and Simulation} \bold{86} (5) 922--941. DOI: 10.1080/00949655.2015.1044530. McSwiggan, G., Nair, M.G. and Baddeley, A. (2012) Fitting Poisson point process models to events on a linear network. Manuscript in preparation. Pace, L., Salvan, A. and Sartori, N. (2011) Adjusting composite likelihood ratio statistics. \emph{Statistica Sinica} \bold{21}, 129--148. } \keyword{spatial} \keyword{models} \keyword{methods} spatstat/man/MinkowskiSum.Rd0000644000176200001440000000730413160710571015624 0ustar liggesusers\name{MinkowskiSum} \alias{MinkowskiSum} \alias{\%(+)\%} %DoNotExport %NAMESPACE export("%(+)%") \alias{dilationAny} \title{Minkowski Sum of Windows} \description{ Compute the Minkowski sum of two spatial windows. } \usage{ MinkowskiSum(A, B) A \%(+)\% B dilationAny(A, B) } \arguments{ \item{A,B}{ Windows (objects of class \code{"owin"}), point patterns (objects of class \code{"ppp"}) or line segment patterns (objects of class \code{"psp"}) in any combination. } } \value{ A window (object of class \code{"owin"}) except that if \code{A} is a point pattern, then the result is an object of the same type as \code{B} (and vice versa). } \details{ The operator \code{A \%(+)\% B} and function \code{MinkowskiSum(A,B)} are synonymous: they both compute the Minkowski sum of the windows \code{A} and \code{B}. The function \code{dilationAny} computes the Minkowski dilation \code{A \%(+)\% reflect(B)}. The Minkowski sum of two spatial regions \eqn{A} and \eqn{B} is another region, formed by taking all possible pairs of points, one in \eqn{A} and one in \eqn{B}, and adding them as vectors. The Minkowski Sum \eqn{A \oplus B}{A \%(+)\% B} is the set of all points \eqn{a+b} where \eqn{a} is in \eqn{A} and \eqn{b} is in \eqn{B}. A few common facts about the Minkowski sum are: \itemize{ \item The sum is symmetric: \eqn{A \oplus B = B \oplus A}{A \%(+)\% B = B \%(+)\% A}. \item If \eqn{B} is a single point, then \eqn{A \oplus B}{A \%(+)\% B} is a shifted copy of \eqn{A}. \item If \eqn{A} is a square of side length \eqn{a}, and \eqn{B} is a square of side length \eqn{b}, with sides that are parallel to the coordinate axes, then \eqn{A \oplus B}{A \%(+)\% B} is a square of side length \eqn{a+b}. \item If \eqn{A} and \eqn{B} are discs of radius \eqn{r} and \eqn{s} respectively, then \eqn{A \oplus B}{A \%(+)\% B} is a disc of redius \eqn{r+s}. \item If \eqn{B} is a disc of radius \eqn{r} centred at the origin, then \eqn{A \oplus B}{A \%(+)\% B} is equivalent to the \emph{morphological dilation} of \eqn{A} by distance \eqn{r}. See \code{\link{dilation}}. } The Minkowski dilation is the closely-related region \eqn{A \oplus (-B)}{A \%(+)\% (-B)} where \eqn{(-B)} is the reflection of \eqn{B} through the origin. The Minkowski dilation is the set of all vectors \eqn{z} such that, if \eqn{B} is shifted by \eqn{z}, the resulting set \eqn{B+z} has nonempty intersection with \eqn{A}. The algorithm currently computes the result as a polygonal window using the \pkg{polyclip} library. It will be quite slow if applied to binary mask windows. The arguments \code{A} and \code{B} can also be point patterns or line segment patterns. These are interpreted as spatial regions, the Minkowski sum is computed, and the result is returned as an object of the most appropriate type. The Minkowski sum of two point patterns is another point pattern. The Minkowski sum of a point pattern and a line segment pattern is another line segment pattern. } \seealso{ \code{\link{dilation}}, \code{\link{erosionAny}} } \examples{ B <- square(0.2) RplusB <- letterR \%(+)\% B opa <- par(mfrow=c(1,2)) FR <- grow.rectangle(Frame(letterR), 0.3) plot(FR, main="") plot(letterR, add=TRUE, lwd=2, hatch=TRUE, hatchargs=list(texture=5)) plot(shift(B, vec=c(3.675, 3)), add=TRUE, border="red", lwd=2) plot(FR, main="") plot(letterR, add=TRUE, lwd=2, hatch=TRUE, hatchargs=list(texture=5)) plot(RplusB, add=TRUE, border="blue", lwd=2, hatch=TRUE, hatchargs=list(col="blue")) par(opa) plot(cells \%(+)\% square(0.1)) } \author{ \adrian } \keyword{spatial} \keyword{math} spatstat/man/shift.im.Rd0000644000176200001440000000336413160710621014703 0ustar liggesusers\name{shift.im} \alias{shift.im} \title{Apply Vector Translation To Pixel Image} \description{ Applies a vector shift to a pixel image } \usage{ \method{shift}{im}(X, vec=c(0,0), \dots, origin=NULL) } \arguments{ \item{X}{Pixel image (object of class \code{"im"}).} \item{vec}{Vector of length 2 representing a translation.} \item{\dots}{Ignored} \item{origin}{ Character string determining a location that will be shifted to the origin. Options are \code{"centroid"}, \code{"midpoint"} and \code{"bottomleft"}. Partially matched. } } \value{ Another pixel image (of class \code{"im"}) representing the result of applying the vector shift. } \details{ The spatial location of each pixel in the image is translated by the vector \code{vec}. This is a method for the generic function \code{\link{shift}}. If \code{origin} is given, then it should be one of the character strings \code{"centroid"}, \code{"midpoint"} or \code{"bottomleft"}. The argument \code{vec} will be ignored; instead the shift will be performed so that the specified geometric location is shifted to the origin. If \code{origin="centroid"} then the centroid of the image window will be shifted to the origin. If \code{origin="midpoint"} then the centre of the bounding rectangle of the image will be shifted to the origin. If \code{origin="bottomleft"} then the bottom left corner of the bounding rectangle of the image will be shifted to the origin. } \seealso{ \code{\link{shift}} } \examples{ # make up an image X <- setcov(unit.square()) plot(X) Y <- shift(X, c(10,10)) plot(Y) # no discernible difference except coordinates are different shift(X, origin="mid") } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/pool.fasp.Rd0000644000176200001440000000350713160710621015062 0ustar liggesusers\name{pool.fasp} \alias{pool.fasp} \title{ Pool Data from Several Function Arrays } \description{ Pool the simulation data from several function arrays (objects of class \code{"fasp"}) and compute a new function array. } \usage{ \method{pool}{fasp}(...) } \arguments{ \item{\dots}{ Objects of class \code{"fasp"}. } } \details{ The function \code{\link{pool}} is generic. This is the method for the class \code{"fasp"} of function arrays. It is used to combine the simulation data from several arrays of simulation envelopes and to compute a new array of envelopes based on the combined data. Each of the arguments \code{\dots} must be a function array (object of class \code{"fasp"}) containing simulation envelopes. This is typically created by running the command \code{\link{alltypes}} with the arguments \code{envelope=TRUE} and \code{savefuns=TRUE}. This ensures that each object is an array of simulation envelopes, and that each envelope contains the simulated data (summary function values) that were used to construct the envelope. The simulated data are extracted from each object and combined. A new array of envelopes is computed from the combined set of simulations. Warnings or errors will be issued if the objects \code{\dots} appear to be incompatible. However, the code is not smart enough to decide whether it is sensible to pool the data. } \value{ An object of class \code{"fasp"}. } \seealso{ \code{\link{fasp}}, \code{\link{alltypes}}, \code{\link{pool.envelope}}, \code{\link{pool}} } \examples{ data(amacrine) A1 <- alltypes(amacrine,"K",nsim=9,envelope=TRUE,savefuns=TRUE) A2 <- alltypes(amacrine,"K",nsim=10,envelope=TRUE,savefuns=TRUE) pool(A1, A2) } \author{\adrian and \rolf } \keyword{spatial} \keyword{htest} \keyword{hplot} \keyword{iteration} spatstat/man/methods.fii.Rd0000644000176200001440000000504413160710621015370 0ustar liggesusers\name{methods.fii} \alias{methods.fii} %DoNotExport \Rdversion{1.1} \alias{print.fii} \alias{plot.fii} \alias{coef.fii} \alias{summary.fii} \alias{print.summary.fii} \alias{coef.summary.fii} \title{ Methods for Fitted Interactions } \description{ These are methods specifically for the class \code{"fii"} of fitted interpoint interactions. } \usage{ \method{print}{fii}(x, \dots) \method{coef}{fii}(object, \dots) \method{plot}{fii}(x, \dots) \method{summary}{fii}(object,\dots) \method{print}{summary.fii}(x, ...) \method{coef}{summary.fii}(object, ...) } \arguments{ \item{x,object}{ An object of class \code{"fii"} representing a fitted interpoint interaction. } \item{\dots}{ Arguments passed to other methods. } } \details{ These are methods for the class \code{"fii"}. An object of class \code{"fii"} represents a fitted interpoint interaction. It is usually obtained by using the command \code{\link{fitin}} to extract the fitted interaction part of a fitted point process model. See \code{\link{fitin}} for further explanation of this class. The commands listed here are methods for the generic functions \code{\link{print}}, \code{\link{summary}}, \code{\link{plot}} and \code{\link{coef}} for objects of the class \code{"fii"}. Following the usual convention, \code{summary.fii} returns an object of class \code{summary.fii}, for which there is a print method. The effect is that, when the user types \code{summary(x)}, the summary is printed, but when the user types \code{y <- summary(x)}, the summary information is saved. The method \code{coef.fii} extracts the canonical coefficients of the fitted interaction, and returns them as a numeric vector. The method \code{coef.summary.fii} transforms these values into quantities that are more easily interpretable, in a format that depends on the particular model. There are also methods for the generic commands \code{\link{reach}} and \code{\link{as.interact}}, described elsewhere. } \value{ The \code{print} and \code{plot} methods return \code{NULL}. The \code{summary} method returns an object of class \code{summary.fii}. \code{coef.fii} returns a numeric vector. \code{coef.summary.fii} returns data whose structure depends on the model. } \author{ \adrian } \seealso{ \code{\link{fitin}}, \code{\link{reach.fii}}, \code{\link{as.interact.fii}} } \examples{ mod <- ppm(cells, ~1, Strauss(0.1)) f <- fitin(mod) f summary(f) plot(f) coef(f) coef(summary(f)) } \keyword{spatial} \keyword{methods} spatstat/man/lppm.Rd0000644000176200001440000000723113160710621014127 0ustar liggesusers\name{lppm} \alias{lppm} \alias{lppm.formula} \alias{lppm.lpp} \title{ Fit Point Process Model to Point Pattern on Linear Network } \description{ Fit a point process model to a point pattern dataset on a linear network } \usage{ lppm(X, ...) \method{lppm}{formula}(X, interaction=NULL, ..., data=NULL) \method{lppm}{lpp}(X, ..., eps=NULL, nd=1000, random=FALSE) } \arguments{ \item{X}{ Either an object of class \code{"lpp"} specifying a point pattern on a linear network, or a \code{formula} specifying the point process model. } \item{\dots}{ Arguments passed to \code{\link{ppm}}. } \item{interaction}{ An object of class \code{"interact"} describing the point process interaction structure, or \code{NULL} indicating that a Poisson process (stationary or nonstationary) should be fitted. } \item{data}{ Optional. The values of spatial covariates (other than the Cartesian coordinates) required by the model. A list whose entries are images, functions, windows, tessellations or single numbers. } \item{eps}{ Optional. Spacing between dummy points along each segment of the network. } \item{nd}{ Optional. Total number of dummy points placed on the network. Ignored if \code{eps} is given. } \item{random}{ Logical value indicating whether the grid of dummy points should be placed at a randomised starting position. } } \details{ This function fits a point process model to data that specify a point pattern on a linear network. It is a counterpart of the model-fitting function \code{\link{ppm}} designed to work with objects of class \code{"lpp"} instead of \code{"ppp"}. The function \code{lppm} is generic, with methods for the classes \code{formula} and \code{lppp}. In \code{lppm.lpp} the first argument \code{X} should be an object of class \code{"lpp"} (created by the command \code{\link{lpp}}) specifying a point pattern on a linear network. In \code{lppm.formula}, the first argument is a \code{formula} in the \R language describing the spatial trend model to be fitted. It has the general form \code{pattern ~ trend} where the left hand side \code{pattern} is usually the name of a point pattern on a linear network (object of class \code{"lpp"}) to which the model should be fitted, or an expression which evaluates to such a point pattern; and the right hand side \code{trend} is an expression specifying the spatial trend of the model. Other arguments \code{...} are passed from \code{lppm.formula} to \code{lppm.lpp} and from \code{lppm.lpp} to \code{\link{ppm}}. } \value{ An object of class \code{"lppm"} representing the fitted model. There are methods for \code{print}, \code{predict}, \code{coef} and similar functions. } \author{ \adrian and Greg McSwiggan. } \seealso{ \code{\link{methods.lppm}}, \code{\link{predict.lppm}}, \code{\link{ppm}}, \code{\link{lpp}}. } \examples{ X <- runiflpp(15, simplenet) lppm(X ~1) lppm(X ~x) marks(X) <- factor(rep(letters[1:3], 5)) lppm(X ~ marks) lppm(X ~ marks * x) } \references{ Ang, Q.W. (2010) \emph{Statistical methodology for events on a network}. Master's thesis, School of Mathematics and Statistics, University of Western Australia. Ang, Q.W., Baddeley, A. and Nair, G. (2012) Geometrically corrected second-order analysis of events on a linear network, with applications to ecology and criminology. \emph{Scandinavian Journal of Statistics} \bold{39}, 591--617. McSwiggan, G., Nair, M.G. and Baddeley, A. (2012) Fitting Poisson point process models to events on a linear network. Manuscript in preparation. } \keyword{spatial} \keyword{models} spatstat/man/Extract.fv.Rd0000644000176200001440000000613613160710621015206 0ustar liggesusers\name{Extract.fv} \alias{[.fv} \alias{[<-.fv} \alias{$<-.fv} \title{Extract or Replace Subset of Function Values} \description{ Extract or replace a subset of an object of class \code{"fv"}. } \usage{ \method{[}{fv}(x, i, j, \dots, drop=FALSE) \method{[}{fv}(x, i, j) <- value \method{$}{fv}(x, name) <- value } \arguments{ \item{x}{ a function value object, of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame. } \item{i}{ any appropriate subset index. Selects a subset of the rows of the data frame, i.e. a subset of the domain of the function(s) represented by \code{x}. } \item{j}{ any appropriate subset index for the columns of the data frame. Selects some of the functions present in \code{x}. } \item{name}{ the name of a column of the data frame. } \item{\dots}{ Ignored. } \item{drop}{ Logical. If \code{TRUE}, the result is a data frame or vector containing the selected rows and columns of data. If \code{FALSE} (the default), the result is another object of class \code{"fv"}. } \item{value}{ Replacement value for the column or columns selected by \code{name} or \code{j}. } } \value{ The result of \code{[.fv} with \code{drop=TRUE} is a data frame or vector. Otherwise, the result is another object of class \code{"fv"}. } \details{ These functions extract a designated subset of an object of class \code{"fv"}, or replace the designated subset with other data, or delete the designated subset. The subset is specified by the row index \code{i} and column index \code{j}, or by the column name \code{name}. Either \code{i} or \code{j} may be missing, or both may be missing. The function \code{[.fv} is a method for the generic operator \code{\link{[}} for the class \code{"fv"}. It extracts the designated subset of \code{x}, and returns it as another object of class \code{"fv"} (if \code{drop=FALSE}) or as a data frame or vector (if \code{drop=TRUE}). The function \code{[<-.fv} is a method for the generic operator \code{\link{[<-}} for the class \code{"fv"}. If \code{value} is \code{NULL}, the designated subset of \code{x} will be deleted from \code{x}. Otherwise, the designated subset of \code{x} will be replaced by the data contained in \code{value}. The return value is the modified object \code{x}. The function \code{$<-.fv} is a method for the generic operator \code{\link{$<-}} for the class \code{"fv"}. If \code{value} is \code{NULL}, the designated column of \code{x} will be deleted from \code{x}. Otherwise, the designated column of \code{x} will be replaced by the data contained in \code{value}. The return value is the modified object \code{x}. } \seealso{ \code{\link{fv.object}} } \examples{ K <- Kest(cells) # discard the estimates of K(r) for r > 0.1 Ksub <- K[K$r <= 0.1, ] # extract the border method estimates bor <- K[ , "border", drop=TRUE] # or equivalently bor <- K$border # remove the border-method estimates K$border <- NULL K } \author{ \adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/Extract.listof.Rd0000644000176200001440000000212613160710621016066 0ustar liggesusers\name{Extract.listof} \alias{[<-.listof} \title{Extract or Replace Subset of a List of Things} \description{ Replace a subset of a list of things. } \usage{ \method{[}{listof}(x, i) <- value } \arguments{ \item{x}{ An object of class \code{"listof"} representing a list of things which all belong to one class. } \item{i}{ Subset index. Any valid subset index in the usual \R sense. } \item{value}{ Replacement value for the subset. } } \value{ Another object of class \code{"listof"}. } \details{ This is a subset replacement method for the class \code{"listof"}. The argument \code{x} should be an object of class \code{"listof"} representing a list of things that all belong to one class. The method replaces a designated subset of \code{x}, and returns an object of class \code{"listof"}. } \seealso{ \code{\link{plot.listof}}, \code{\link{summary.listof}} } \examples{ x <- list(A=runif(10), B=runif(10), C=runif(10)) class(x) <- c("listof", class(x)) x[1] <- list(A=rnorm(10)) } \author{ \spatstatAuthors } \keyword{spatial} \keyword{manip} spatstat/man/inside.owin.Rd0000644000176200001440000000446213160710621015410 0ustar liggesusers\name{inside.owin} \alias{inside.owin} \title{Test Whether Points Are Inside A Window} \description{ Test whether points lie inside or outside a given window. } \usage{ inside.owin(x, y, w) } \arguments{ \item{x}{ Vector of \eqn{x} coordinates of points to be tested. (Alternatively, a point pattern object providing both \eqn{x} and \eqn{y} coordinates.) } \item{y}{ Vector of \eqn{y} coordinates of points to be tested. } \item{w}{A window. This should be an object of class \code{\link{owin}}, or can be given in any format acceptable to \code{\link{as.owin}()}. } } \value{ Logical vector whose \code{i}th entry is \code{TRUE} if the corresponding point \code{(x[i],y[i])} is inside \code{w}. } \details{ This function tests whether each of the points \code{(x[i],y[i])} lies inside or outside the window \code{w} and returns \code{TRUE} if it is inside. The boundary of the window is treated as being inside. If \code{w} is of type \code{"rectangle"} or \code{"polygonal"}, the algorithm uses analytic geometry (the discrete Stokes theorem). Computation time is linear in the number of points and (for polygonal windows) in the number of vertices of the boundary polygon. Boundary cases are correct to single precision accuracy. If \code{w} is of type \code{"mask"} then the pixel closest to \code{(x[i],y[i])} is tested. The results may be incorrect for points lying within one pixel diameter of the window boundary. Normally \code{x} and \code{y} must be numeric vectors of equal length (length zero is allowed) containing the coordinates of points. Alternatively \code{x} can be a point pattern (object of class \code{"ppp"}) while \code{y} is missing; then the coordinates of the point pattern are extracted. } \seealso{ \code{\link{owin.object}}, \code{\link{as.owin}} } \examples{ # hexagonal window k <- 6 theta <- 2 * pi * (0:(k-1))/k co <- cos(theta) si <- sin(theta) mas <- owin(c(-1,1), c(-1,1), poly=list(x=co, y=si)) \dontrun{ plot(mas) } # random points in rectangle x <- runif(30,min=-1, max=1) y <- runif(30,min=-1, max=1) ok <- inside.owin(x, y, mas) \dontrun{ points(x[ok], y[ok]) points(x[!ok], y[!ok], pch="x") } } \author{\adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/LambertW.Rd0000644000176200001440000000252613160710571014702 0ustar liggesusers\name{LambertW} \alias{LambertW} \title{ Lambert's W Function } \description{ Computes Lambert's W-function. } \usage{ LambertW(x) } \arguments{ \item{x}{ Vector of nonnegative numbers. } } \details{ Lambert's W-function is the inverse function of \eqn{f(y) = y e^y}{f(y) = y * exp(y)}. That is, \eqn{W} is the function such that \deqn{ W(x) e^{W(x)} = x }{ W(x) * exp(W(x)) = x } This command \code{LambertW} computes \eqn{W(x)} for each entry in the argument \code{x}. If the library \pkg{gsl} has been installed, then the function \code{lambert_W0} in that library is invoked. Otherwise, values of the W-function are computed by root-finding, using the function \code{\link[stats]{uniroot}}. Computation using \pkg{gsl} is about 100 times faster. If any entries of \code{x} are infinite or \code{NA}, the corresponding results are \code{NA}. } \value{ Numeric vector. } \references{ Corless, R, Gonnet, G, Hare, D, Jeffrey, D and Knuth, D (1996), On the Lambert W function. \emph{Computational Mathematics}, \bold{5}, 325--359. Roy, R and Olver, F (2010), Lambert W function. In Olver, F, Lozier, D and Boisvert, R (eds.), \emph{{NIST} Handbook of Mathematical Functions}, Cambridge University Press. } \author{\adrian and \rolf } \examples{ LambertW(exp(1)) } \keyword{math} spatstat/man/bw.frac.Rd0000644000176200001440000000420413160710571014502 0ustar liggesusers\name{bw.frac} \alias{bw.frac} \title{ Bandwidth Selection Based on Window Geometry } \description{ Select a smoothing bandwidth for smoothing a point pattern, based only on the geometry of the spatial window. The bandwidth is a specified quantile of the distance between two independent random points in the window. } \usage{ bw.frac(X, \dots, f=1/4) } \arguments{ \item{X}{ A window (object of class \code{"owin"}) or point pattern (object of class \code{"ppp"}) or other data which can be converted to a window using \code{\link{as.owin}}. } \item{\dots}{ Arguments passed to \code{\link{distcdf}}. } \item{f}{ Probability value (between 0 and 1) determining the quantile of the distribution. } } \details{ This function selects an appropriate bandwidth \code{sigma} for the kernel estimator of point process intensity computed by \code{\link{density.ppp}}. The bandwidth \eqn{\sigma}{\sigma} is computed as a quantile of the distance between two independent random points in the window. The default is the lower quartile of this distribution. If \eqn{F(r)} is the cumulative distribution function of the distance between two independent random points uniformly distributed in the window, then the value returned is the quantile with probability \eqn{f}. That is, the bandwidth is the value \eqn{r} such that \eqn{F(r) = f}. The cumulative distribution function \eqn{F(r)} is computed using \code{\link{distcdf}}. We then we compute the smallest number \eqn{r} such that \eqn{F(r) \ge f}{F(r) >= f}. } \value{ A numerical value giving the selected bandwidth. The result also belongs to the class \code{"bw.frac"} which can be plotted to show the cumulative distribution function and the selected quantile. } \seealso{ \code{\link{density.ppp}}, \code{\link{bw.diggle}}, \code{\link{bw.ppl}}, \code{\link{bw.relrisk}}, \code{\link{bw.scott}}, \code{\link{bw.smoothppp}}, \code{\link{bw.stoyan}} } \examples{ h <- bw.frac(letterR) h plot(h, main="bw.frac(letterR)") } \author{\adrian and \rolf } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat/man/kaplan.meier.Rd0000644000176200001440000000604513160710621015527 0ustar liggesusers\name{kaplan.meier} \alias{kaplan.meier} \title{Kaplan-Meier Estimator using Histogram Data} \description{ Compute the Kaplan-Meier estimator of a survival time distribution function, from histogram data } \usage{ kaplan.meier(obs, nco, breaks, upperobs=0) } \arguments{ \item{obs}{vector of \eqn{n} integers giving the histogram of all observations (censored or uncensored survival times) } \item{nco}{vector of \eqn{n} integers giving the histogram of uncensored observations (those survival times that are less than or equal to the censoring time) } \item{breaks}{Vector of \eqn{n+1} breakpoints which were used to form both histograms. } \item{upperobs}{ Number of observations beyond the rightmost breakpoint, if any. } } \value{ A list with two elements: \item{km}{Kaplan-Meier estimate of the survival time c.d.f. \eqn{F(t)} } \item{lambda}{corresponding Nelson-Aalen estimate of the hazard rate \eqn{\lambda(t)}{lambda(t)} } These are numeric vectors of length \eqn{n}. } \details{ This function is needed mainly for internal use in \pkg{spatstat}, but may be useful in other applications where you want to form the Kaplan-Meier estimator from a huge dataset. Suppose \eqn{T_i}{T[i]} are the survival times of individuals \eqn{i=1,\ldots,M} with unknown distribution function \eqn{F(t)} which we wish to estimate. Suppose these times are right-censored by random censoring times \eqn{C_i}{C[i]}. Thus the observations consist of right-censored survival times \eqn{\tilde T_i = \min(T_i,C_i)}{T*[i] = min(T[i],C[i])} and non-censoring indicators \eqn{D_i = 1\{T_i \le C_i\}}{D[i] = 1(T[i] <= C[i])} for each \eqn{i}. If the number of observations \eqn{M} is large, it is efficient to use histograms. Form the histogram \code{obs} of all observed times \eqn{\tilde T_i}{T*[i]}. That is, \code{obs[k]} counts the number of values \eqn{\tilde T_i}{T*[i]} in the interval \code{(breaks[k],breaks[k+1]]} for \eqn{k > 1} and \code{[breaks[1],breaks[2]]} for \eqn{k = 1}. Also form the histogram \code{nco} of all uncensored times, i.e. those \eqn{\tilde T_i}{T*[i]} such that \eqn{D_i=1}{D[i]=1}. These two histograms are the arguments passed to \code{kaplan.meier}. The vectors \code{km} and \code{lambda} returned by \code{kaplan.meier} are (histogram approximations to) the Kaplan-Meier estimator of \eqn{F(t)} and its hazard rate \eqn{\lambda(t)}{lambda(t)}. Specifically, \code{km[k]} is an estimate of \code{F(breaks[k+1])}, and \code{lambda[k]} is an estimate of the average of \eqn{\lambda(t)}{lambda(t)} over the interval \code{(breaks[k],breaks[k+1])}. The histogram breaks must include \eqn{0}. If the histogram breaks do not span the range of the observations, it is important to count how many survival times \eqn{\tilde T_i}{T*[i]} exceed the rightmost breakpoint, and give this as the value \code{upperobs}. } \seealso{ \code{\link{reduced.sample}}, \code{\link{km.rs}} } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat/man/simulate.slrm.Rd0000644000176200001440000000503713160710621015760 0ustar liggesusers\name{simulate.slrm} \alias{simulate.slrm} \title{Simulate a Fitted Spatial Logistic Regression Model} \description{ Generates simulated realisations from a fitted spatial logistic regresson model } \usage{ \method{simulate}{slrm}(object, nsim = 1, seed=NULL, ..., window=NULL, covariates=NULL, verbose=TRUE, drop=FALSE) } \arguments{ \item{object}{ Fitted spatial logistic regression model. An object of class \code{"slrm"}. } \item{nsim}{ Number of simulated realisations. } \item{seed}{ an object specifying whether and how to initialise the random number generator. Either \code{NULL} or an integer that will be used in a call to \code{\link[base:Random]{set.seed}} before simulating the point patterns. } \item{\dots}{Ignored.} \item{window}{ Optional. Window (object of class \code{"owin"}) in which the model should be simulated. } \item{covariates}{ Optional. A named list containing new values for the covariates in the model. } \item{verbose}{ Logical. Whether to print progress reports (when \code{nsim > 1}). } \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE}, the result will be a point pattern, rather than a list containing a point pattern. } } \details{ This function is a method for the generic function \code{\link[stats]{simulate}} for the class \code{"slrm"} of fitted spatial logistic regression models. Simulations are performed by \code{\link{rpoispp}} after the intensity has been computed by \code{\link{predict.slrm}}. The return value is a list of point patterns. It also carries an attribute \code{"seed"} that captures the initial state of the random number generator. This follows the convention used in \code{simulate.lm} (see \code{\link[stats]{simulate}}). It can be used to force a sequence of simulations to be repeated exactly, as shown in the examples for \code{\link[stats]{simulate}}. } \value{ A list of length \code{nsim} containing simulated point patterns (objects of class \code{"ppp"}). The return value also carries an attribute \code{"seed"} that captures the initial state of the random number generator. See Details. } \examples{ X <- copper$SouthPoints fit <- slrm(X ~ 1) simulate(fit, 2) fitxy <- slrm(X ~ x+y) simulate(fitxy, 2, window=square(2)) } \seealso{ \code{\link{slrm}}, \code{\link{rpoispp}}, \code{\link{simulate.ppm}}, \code{\link{simulate.kppm}}, \code{\link[stats]{simulate}} } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} spatstat/man/points.lpp.Rd0000644000176200001440000000364713160710621015274 0ustar liggesusers\name{points.lpp} \alias{points.lpp} \title{ Draw Points on Existing Plot } \description{ For a point pattern on a linear network, this function draws the coordinates of the points only, on the existing plot display. } \usage{ \method{points}{lpp}(x, \dots) } \arguments{ \item{x}{ A point pattern on a linear network (object of class \code{"lpp"}). } \item{\dots}{ Additional arguments passed to \code{\link[graphics]{points.default}}. } } \details{ This is a method for the generic function \code{\link[graphics]{points}} for the class \code{"lpp"} of point patterns on a linear network. If \code{x} is a point pattern on a linear network, then \code{points(x)} plots the spatial coordinates of the points only, on the existing plot display, without plotting the underlying network. It is an error to call this function if a plot has not yet been initialised. The spatial coordinates are extracted and passed to \code{\link[graphics]{points.default}} along with any extra arguments. Arguments controlling the colours and the plot symbols are interpreted by \code{\link[graphics]{points.default}}. For example, if the argument \code{col} is a vector, then the \code{i}th point is drawn in the colour \code{col[i]}. } \section{Difference from plot method}{ The more usual way to plot the points is using \code{\link{plot.lpp}}. For example \code{plot(x)} would plot both the points and the underlying network, while \code{plot(x, add=TRUE)} would plot only the points. The interpretation of arguments controlling the colours and plot symbols is different here: they determine a symbol map, as explained in the help for \code{\link{plot.ppp}}. } \value{ Null. } \author{ \spatstatAuthors. } \seealso{ \code{\link{plot.lpp}}, \code{\link[graphics]{points.default}} } \examples{ plot(Frame(spiders), main="Spiders on a Brick Wall") points(spiders) } \keyword{spatial} \keyword{hplot} spatstat/man/as.polygonal.Rd0000644000176200001440000000362213160710571015571 0ustar liggesusers\name{as.polygonal} \Rdversion{1.1} \alias{as.polygonal} \title{ Convert a Window to a Polygonal Window } \description{ Given a window \code{W} of any geometric type (rectangular, polygonal or binary mask), this function returns a polygonal window that represents the same spatial domain. } \usage{ as.polygonal(W, repair=FALSE) } \arguments{ \item{W}{ A window (object of class \code{"owin"}). } \item{repair}{ Logical value indicating whether to check the validity of the polygon data and repair it, if \code{W} is already a polygonal window. } } \details{ Given a window \code{W} of any geometric type (rectangular, polygonal or binary mask), this function returns a polygonal window that represents the same spatial domain. If \code{W} is a rectangle, it is converted to a polygon with 4 vertices. If \code{W} is already polygonal, it is returned unchanged, by default. However if \code{repair=TRUE} then the validity of the polygonal coordinates will be checked (for example to check the boundary is not self-intersecting) and repaired if necessary, so that the result could be different from \code{W}. If \code{W} is a binary mask, then each pixel in the mask is replaced by a small square or rectangle, and the union of these squares or rectangles is computed. The result is a polygonal window that has only horizontal and vertical edges. (Use \code{\link{simplify.owin}} to remove the staircase appearance, if desired). } \value{ A polygonal window (object of class \code{"owin"} and of type \code{"polygonal"}). } \author{ \spatstatAuthors } \seealso{ \code{\link{owin}}, \code{\link{as.owin}}, \code{\link{as.mask}}, \code{\link{simplify.owin}} } \examples{ data(letterR) m <- as.mask(letterR, dimyx=32) p <- as.polygonal(m) if(interactive()) { plot(m) plot(p, add=TRUE, lwd=2) } } \keyword{spatial} \keyword{manip} spatstat/man/hist.im.Rd0000644000176200001440000000462213160710621014533 0ustar liggesusers\name{hist.im} \alias{hist.im} \title{Histogram of Pixel Values in an Image} \description{ Computes and displays a histogram of the pixel values in a pixel image. The \code{hist} method for class \code{"im"}. } \usage{ \method{hist}{im}(x, \dots, probability=FALSE, xname) } \arguments{ \item{x}{A pixel image (object of class \code{"im"}).} \item{\dots}{Arguments passed to \code{\link{hist.default}} or \code{\link{barplot}}.} \item{probability}{Logical. If \code{TRUE}, the histogram will be normalised to give probabilities or probability densities. } \item{xname}{Optional. Character string to be used as the name of the dataset \code{x}. } } \details{ This function computes and (by default) displays a histogram of the pixel values in the image \code{x}. An object of class \code{"im"} describes a pixel image. See \code{\link{im.object}}) for details of this class. The function \code{hist.im} is a method for the generic function \code{\link{hist}} for the class \code{"im"}. Any arguments in \code{...} are passed to \code{\link{hist.default}} (for numeric valued images) or \code{\link{barplot}} (for factor or logical images). For example, such arguments control the axes, and may be used to suppress the plotting. } \value{ For numeric-valued images, an object of class \code{"histogram"} as returned by \code{\link[graphics:hist]{hist.default}}. This object can be plotted. For factor-valued or logical images, an object of class \code{"barplotdata"}, which can be plotted. This is a list with components called \code{counts} (contingency table of counts of the numbers of pixels taking each possible value), \code{probs} (corresponding relative frequencies) and \code{mids} (graphical \eqn{x}-coordinates of the midpoints of the bars in the barplot). } \seealso{ \code{\link{spatialcdf}} for the cumulative distribution function of an image. \code{\link{hist}}, \code{\link{hist.default}}, \code{\link{barplot}}. For other statistical graphics such as Q-Q plots, use \code{X[]} to extract the pixel values of image \code{X}, and apply the usual statistical graphics commands. For information about pixel images see \code{\link{im.object}}, \code{\link{summary.im}}. } \examples{ X <- as.im(function(x,y) {x^2}, unit.square()) hist(X) hist(cut(X,3)) } \author{ \adrian and \rolf } \keyword{spatial} \keyword{methods} spatstat/man/colourmap.Rd0000644000176200001440000000776413160710571015177 0ustar liggesusers\name{colourmap} \alias{colourmap} \title{Colour Lookup Tables} \description{ Create a colour map (colour lookup table). } \usage{ colourmap(col, ..., range=NULL, breaks=NULL, inputs=NULL) } \arguments{ \item{col}{Vector of values specifying colours} \item{\dots}{Ignored.} \item{range}{ Interval to be mapped. A numeric vector of length 2, specifying the endpoints of the range of values to be mapped. Incompatible with \code{breaks} or \code{inputs}. } \item{inputs}{ Values to which the colours are associated. A factor or vector of the same length as \code{col}. Incompatible with \code{breaks} or \code{range}. } \item{breaks}{ Breakpoints for the colour map. A numeric vector of length equal to \code{length(col)+1}. Incompatible with \code{range} or \code{inputs}. } } \details{ A colour map is a mechanism for associating colours with data. It can be regarded as a function, mapping data to colours. The command \code{colourmap} creates an object representing a colour map, which can then be used to control the plot commands in the \pkg{spatstat} package. It can also be used to compute the colour assigned to any data value. The argument \code{col} specifies the colours to which data values will be mapped. It should be a vector whose entries can be interpreted as colours by the standard \R graphics system. The entries can be string names of colours like \code{"red"}, or integers that refer to colours in the standard palette, or strings containing six-letter hexadecimal codes like \code{"#F0A0FF"}. Exactly one of the arguments \code{range}, \code{inputs} or \code{breaks} must be specified by name. If \code{inputs} is given, then it should be a vector or factor, of the same length as \code{col}. The entries of \code{inputs} can be any atomic type (e.g. numeric, logical, character, complex) or factor values. The resulting colour map associates the value \code{inputs[i]} with the colour \code{col[i]}. If \code{range} is given, then it determines the interval of the real number line that will be mapped. It should be a numeric vector of length 2. If \code{breaks} is given, then it determines the precise intervals of the real number line which are mapped to each colour. It should be a numeric vector, of length at least 2, with entries that are in increasing order. Infinite values are allowed. Any number in the range between \code{breaks[i]} and \code{breaks[i+1]} will be mapped to the colour \code{col[i]}. The result is an object of class \code{"colourmap"}. There are \code{print} and \code{plot} methods for this class. Some plot commands in the \pkg{spatstat} package accept an object of this class as a specification of the colour map. The result is also a function \code{f} which can be used to compute the colour assigned to any data value. That is, \code{f(x)} returns the character value of the colour assigned to \code{x}. This also works for vectors of data values. } \value{ A function, which is also an object of class \code{"colourmap"}. } \seealso{ The plot method \code{\link{plot.colourmap}}. See the \R help file on \code{\link[grDevices:colors]{colours}} for information about the colours that \R recognises, and how to manipulate them. To make a smooth transition between colours, see \code{\link{interp.colourmap}}. To alter individual colour values, see \code{\link{tweak.colourmap}}. See \code{\link[spatstat:colourtools]{colourtools}} for more tools to manipulate colour values. See \code{\link{lut}} for lookup tables. } \examples{ # colour map for real numbers, using breakpoints cr <- colourmap(c("red", "blue", "green"), breaks=c(0,5,10,15)) cr cr(3.2) cr(c(3,5,7)) # a large colour map co <- colourmap(rainbow(100), range=c(-1,1)) co(0.2) # colour map for discrete set of values ct <- colourmap(c("red", "green"), inputs=c(FALSE, TRUE)) ct(TRUE) } \author{\adrian and \rolf } \keyword{spatial} \keyword{color} spatstat/man/Window.Rd0000644000176200001440000000521313160710571014430 0ustar liggesusers\name{Window} \alias{Window} \alias{Window<-} \alias{Window.ppp} \alias{Window<-.ppp} \alias{Window.psp} \alias{Window<-.psp} \alias{Window.im} \alias{Window<-.im} \title{ Extract or Change the Window of a Spatial Object } \description{ Given a spatial object (such as a point pattern or pixel image) in two dimensions, these functions extract or change the window in which the object is defined. } \usage{ Window(X, \dots) Window(X, \dots) <- value \method{Window}{ppp}(X, \dots) \method{Window}{ppp}(X, \dots) <- value \method{Window}{psp}(X, \dots) \method{Window}{psp}(X, \dots) <- value \method{Window}{im}(X, \dots) \method{Window}{im}(X, \dots) <- value } \arguments{ \item{X}{ A spatial object such as a point pattern, line segment pattern or pixel image. } \item{\dots}{ Extra arguments. They are ignored by all the methods listed here. } \item{value}{ Another window (object of class \code{"owin"}) to be used as the window for \code{X}. } } \details{ The functions \code{Window} and \code{Window<-} are generic. \code{Window(X)} extracts the spatial window in which \code{X} is defined. \code{Window(X) <- W} changes the window in which \code{X} is defined to the new window \code{W}, and \emph{discards any data outside} \code{W}. In particular: \itemize{ \item If \code{X} is a point pattern (object of class \code{"ppp"}) then \code{Window(X) <- W} discards any points of \code{X} which fall outside \code{W}. \item If \code{X} is a line segment pattern (object of class \code{"psp"}) then \code{Window(X) <- W} clips the segments of \code{X} to the boundaries of \code{W}. \item If \code{X} is a pixel image (object of class \code{"im"}) then \code{Window(X) <- W} has the effect that pixels lying outside \code{W} are retained but their pixel values are set to \code{NA}. } Many other classes of spatial object have a method for \code{Window}, but not \code{Window<-}. See \code{\link{Window.ppm}}. } \value{ The result of \code{Window} is a window (object of class \code{"owin"}). The result of \code{Window<-} is the updated object \code{X}, of the same class as \code{X}. } \author{\adrian \rolf and \ege } \seealso{ \code{\link{Window.ppm}} } \examples{ ## point patterns Window(cells) X <- demopat Window(X) Window(X) <- as.rectangle(Window(X)) ## line segment patterns X <- psp(runif(10), runif(10), runif(10), runif(10), window=owin()) Window(X) Window(X) <- square(0.5) ## images Z <- setcov(owin()) Window(Z) Window(Z) <- square(0.5) } \keyword{spatial} \keyword{manip} spatstat/man/simulate.ppm.Rd0000644000176200001440000001017713160710621015600 0ustar liggesusers\name{simulate.ppm} \alias{simulate.ppm} \title{Simulate a Fitted Gibbs Point Process Model} \description{ Generates simulated realisations from a fitted Gibbs or Poisson point process model. } \usage{ \method{simulate}{ppm}(object, nsim=1, ..., singlerun = FALSE, start = NULL, control = default.rmhcontrol(object, w=w), w = NULL, project=TRUE, new.coef=NULL, verbose=FALSE, progress=(nsim > 1), drop=FALSE) } \arguments{ \item{object}{ Fitted point process model. An object of class \code{"ppm"}. } \item{nsim}{ Number of simulated realisations. } \item{singlerun}{ Logical. Whether to generate the simulated realisations from a single long run of the Metropolis-Hastings algorithm (\code{singlerun=TRUE}) or from separate, independent runs of the algorithm (\code{singlerun=FALSE}, the default). } \item{start}{Data determining the initial state of the Metropolis-Hastings algorithm. See \code{\link{rmhstart}} for description of these arguments. Defaults to \code{list(n.start=npoints(data.ppm(object)))} meaning that the initial state of the algorithm has the same number of points as the original dataset. } \item{control}{Data controlling the running of the Metropolis-Hastings algorithm. See \code{\link{rmhcontrol}} for description of these arguments. } \item{w}{ Optional. The window in which the model is defined. An object of class \code{"owin"}. } \item{\dots}{ Further arguments passed to \code{\link{rmhcontrol}}, or to \code{\link{rmh.default}}, or to covariate functions in the model. } \item{project}{ Logical flag indicating what to do if the fitted model is invalid (in the sense that the values of the fitted coefficients do not specify a valid point process). If \code{project=TRUE} the closest valid model will be simulated; if \code{project=FALSE} an error will occur. } \item{verbose}{ Logical flag indicating whether to print progress reports from \code{\link{rmh.ppm}} during the simulation of each point pattern. } \item{progress}{ Logical flag indicating whether to print progress reports for the sequence of simulations. } \item{new.coef}{ New values for the canonical parameters of the model. A numeric vector of the same length as \code{coef(object)}. } \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE}, the result will be a point pattern, rather than a list containing a point pattern. } } \details{ This function is a method for the generic function \code{\link[stats]{simulate}} for the class \code{"ppm"} of fitted point process models. Simulations are performed by \code{\link{rmh.ppm}}. If \code{singlerun=FALSE} (the default), the simulated patterns are the results of independent runs of the Metropolis-Hastings algorithm. If \code{singlerun=TRUE}, a single long run of the algorithm is performed, and the state of the simulation is saved every \code{nsave} iterations to yield the simulated patterns. In the case of a single run, the behaviour is controlled by the parameters \code{nsave,nburn,nrep}. These are described in \code{\link{rmhcontrol}}. They may be passed in the \code{\dots} arguments or included in \code{control}. It is sufficient to specify two of the three parameters \code{nsave,nburn,nrep}. } \value{ A list of length \code{nsim} containing simulated point patterns (objects of class \code{"ppp"}). It also belongs to the class \code{"solist"}, so that it can be plotted, and the class \code{"timed"}, so that the total computation time is recorded. } \examples{ \testonly{op <- spatstat.options(rmh.nrep=10)} fit <- ppm(japanesepines, ~1, Strauss(0.1)) simulate(fit, 2) simulate(fit, 2, singlerun=TRUE, nsave=1e4, nburn=1e4) \testonly{spatstat.options(op)} } \seealso{ \code{\link{ppm}}, \code{\link{simulate.kppm}}, \code{\link[stats]{simulate}} } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} spatstat/man/ewcdf.Rd0000644000176200001440000000337313160710621014252 0ustar liggesusers\name{ewcdf} \alias{ewcdf} \title{Weighted Empirical Cumulative Distribution Function} \description{ Compute a weighted version of the empirical cumulative distribution function. } \usage{ ewcdf(x, weights = rep(1/length(x), length(x))) } \arguments{ \item{x}{Numeric vector of observations.} \item{weights}{Numeric vector of non-negative weights for \code{x}.} } \details{ This is a modification of the standard function \code{\link{ecdf}} allowing the observations \code{x} to have weights. The weighted e.c.d.f. (empirical cumulative distribution function) \code{Fn} is defined so that, for any real number \code{y}, the value of \code{Fn(y)} is equal to the total weight of all entries of \code{x} that are less than or equal to \code{y}. That is \code{Fn(y) = sum(weights[x <= y])}. Thus \code{Fn} is a step function which jumps at the values of \code{x}. The height of the jump at a point \code{y} is the total weight of all entries in \code{x} number of tied observations at that value. Missing values are ignored. If \code{weights} is omitted, the default is equivalent to \code{ecdf(x)} except for the class membership. The result of \code{ewcdf} is a function, of class \code{"ewcdf"}, inheriting from the classes \code{"ecdf"} and \code{"stepfun"}. The class \code{ewcdf} has methods for \code{print} and \code{quantile}. The inherited class \code{ecdf} has methods for \code{plot} and \code{summary}. } \value{ A function, of class \code{"ewcdf"}, inheriting from \code{"ecdf"} and \code{"stepfun"}. } \author{ \spatstatAuthors. } \seealso{ \code{\link{ecdf}}. \code{\link{quantile.ewcdf}} } \examples{ x <- rnorm(100) w <- runif(100) plot(ewcdf(x,w)) } \keyword{nonparametric} \keyword{univar} spatstat/man/pixelcentres.Rd0000644000176200001440000000237013160710621015663 0ustar liggesusers\name{pixelcentres} \alias{pixelcentres} \title{ Extract Pixel Centres as Point Pattern } \description{ Given a pixel image or binary mask window, extract the centres of all pixels and return them as a point pattern. } \usage{ pixelcentres(X, W = NULL, ...) } \arguments{ \item{X}{ Pixel image (object of class \code{"im"}) or window (object of class \code{"owin"}). } \item{W}{ Optional window to contain the resulting point pattern. } \item{\dots}{ Optional arguments defining the pixel resolution. } } \details{ If the argument \code{X} is a pixel image, the result is a point pattern, consisting of the centre of every pixel whose pixel value is not \code{NA}. If \code{X} is a window which is a binary mask, the result is a point pattern consisting of the centre of every pixel inside the window (i.e. every pixel for which the mask value is \code{TRUE}). Otherwise, \code{X} is first converted to a window, then converted to a mask using \code{\link{as.mask}}, then handled as above. } \value{ A point pattern (object of class \code{"ppp"}). } \seealso{ \code{\link{raster.xy}} } \examples{ pixelcentres(letterR, dimyx=5) } \author{\adrian , \rolf and \ege } \keyword{spatial} \keyword{manip} spatstat/man/with.fv.Rd0000644000176200001440000000745213160710621014551 0ustar liggesusers\name{with.fv} \alias{with.fv} \title{Evaluate an Expression in a Function Table} \description{ Evaluate an R expression in a function value table (object of class \code{"fv"}). } \usage{ \method{with}{fv}(data, expr, ..., fun = NULL, enclos=NULL) } \arguments{ \item{data}{A function value table (object of class \code{"fv"}) in which the expression will be evaluated. } \item{expr}{The expression to be evaluated. An \R language expression, which may involve the names of columns in \code{data}, the special abbreviations \code{.}, \code{.x} and \code{.y}, and global constants or functions. } \item{\dots}{Ignored.} \item{fun}{Logical value, specifying whether the result should be interpreted as another function (\code{fun=TRUE}) or simply returned as a numeric vector or array (\code{fun=FALSE}). See Details. } \item{enclos}{ An environment in which to search for variables that are not found in \code{data}. Defaults to \code{\link{parent.frame}()}. } } \details{ This is a method for the generic command \code{\link{with}} for an object of class \code{"fv"} (function value table). An object of class \code{"fv"} is a convenient way of storing and plotting several different estimates of the same function. It is effectively a data frame with extra attributes. See \code{\link{fv.object}} for further explanation. This command makes it possible to perform computations that involve different estimates of the same function. For example we use it to compute the arithmetic difference between two different edge-corrected estimates of the \eqn{K} function of a point pattern. The argument \code{expr} should be an \R language expression. The expression may involve \itemize{ \item the name of any column in \code{data}, referring to one of the estimates of the function; \item the symbol \code{.} which stands for all the available estimates of the function; \item the symbol \code{.y} which stands for the recommended estimate of the function (in an \code{"fv"} object, one of the estimates is always identified as the recommended estimate); \item the symbol \code{.x} which stands for the argument of the function; \item global constants or functions. } See the Examples. The expression should be capable of handling vectors and matrices. The interpretation of the argument \code{fun} is as follows: \itemize{ \item If \code{fun=FALSE}, the result of evaluating the expression \code{expr} will be returned as a numeric vector, matrix or data frame. \item If \code{fun=TRUE}, then the result of evaluating \code{expr} will be interpreted as containing the values of a new function. The return value will be an object of class \code{"fv"}. (This can only happen if the result has the right dimensions.) \item The default is \code{fun=TRUE} if the result of evaluating \code{expr} has more than one column, and \code{fun=FALSE} otherwise. } To perform calculations involving \emph{several} objects of class \code{"fv"}, use \code{\link{eval.fv}}. } \value{ A function value table (object of class \code{"fv"}) or a numeric vector or data frame. } \seealso{ \code{\link{with}}, \code{\link{fv.object}}, \code{\link{eval.fv}}, \code{\link{Kest}} } \examples{ # compute 4 estimates of the K function X <- rpoispp(42) K <- Kest(X) plot(K) # derive 4 estimates of the L function L(r) = sqrt(K(r)/pi) L <- with(K, sqrt(./pi)) plot(L) # compute 4 estimates of V(r) = L(r)/r V <- with(L, ./.x) plot(V) # compute the maximum absolute difference between # the isotropic and translation correction estimates of K(r) D <- with(K, max(abs(iso - trans))) } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} \keyword{programming} spatstat/man/as.matrix.owin.Rd0000644000176200001440000000220213160710571016035 0ustar liggesusers\name{as.matrix.owin} \alias{as.matrix.owin} \title{Convert Pixel Image to Matrix} \description{ Converts a pixel image to a matrix. } \usage{ \method{as.matrix}{owin}(x, ...) } \arguments{ \item{x}{A window (object of class \code{"owin"}).} \item{\dots}{Arguments passed to \code{\link{as.mask}} to control the pixel resolution.} } \details{ The function \code{as.matrix.owin} converts a window to a logical matrux. It first converts the window \code{x} into a binary pixel mask using \code{\link{as.mask}}. It then extracts the pixel entries as a logical matrix. The resulting matrix has entries that are \code{TRUE} if the corresponding pixel is inside the window, and \code{FALSE} if it is outside. The function \code{as.matrix} is generic. The function \code{as.matrix.owin} is the method for windows (objects of class \code{"owin"}). Use \code{\link{as.im}} to convert a window to a pixel image. } \value{ A logical matrix. } \examples{ m <- as.matrix(letterR) } \seealso{ \code{\link{as.matrix.im}}, \code{\link{as.im}} } \author{\adrian and \rolf } \keyword{spatial} \keyword{methods} spatstat/man/logLik.slrm.Rd0000644000176200001440000000324013160710621015350 0ustar liggesusers\name{logLik.slrm} \Rdversion{1.1} \alias{logLik.slrm} \title{ Loglikelihood of Spatial Logistic Regression } \description{ Computes the (maximised) loglikelihood of a fitted Spatial Logistic Regression model. } \usage{ \method{logLik}{slrm}(object, ..., adjust = TRUE) } \arguments{ \item{object}{ a fitted spatial logistic regression model. An object of class \code{"slrm"}. } \item{\dots}{ Ignored. } \item{adjust}{ Logical value indicating whether to adjust the loglikelihood of the model to make it comparable with a point process likelihood. See Details. } } \details{ This is a method for \code{\link[stats]{logLik}} for fitted spatial logistic regression models (objects of class \code{"slrm"}, usually obtained from the function \code{\link{slrm}}). It computes the log-likelihood of a fitted spatial logistic regression model. If \code{adjust=FALSE}, the loglikelihood is computed using the standard formula for the loglikelihood of a logistic regression model for a finite set of (pixel) observations. If \code{adjust=TRUE} then the loglikelihood is adjusted so that it is approximately comparable with the likelihood of a point process in continuous space, by subtracting the value \eqn{n \log(a)}{n * log(a)} where \eqn{n} is the number of points in the original point pattern dataset, and \eqn{a} is the area of one pixel. } \value{ A numerical value. } \seealso{ \code{\link{slrm}} } \examples{ X <- rpoispp(42) fit <- slrm(X ~ x+y) logLik(fit) logLik(fit, adjust=FALSE) } \author{\adrian \email{adrian@maths.uwa.edu.au} and \rolf } \keyword{spatial} \keyword{models} \keyword{methods} spatstat/man/Gmulti.Rd0000644000176200001440000001722713160710571014432 0ustar liggesusers\name{Gmulti} \alias{Gmulti} \title{ Marked Nearest Neighbour Distance Function } \description{ For a marked point pattern, estimate the distribution of the distance from a typical point in subset \code{I} to the nearest point of subset \eqn{J}. } \usage{ Gmulti(X, I, J, r=NULL, breaks=NULL, \dots, disjoint=NULL, correction=c("rs", "km", "han")) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the multitype distance distribution function \eqn{G_{IJ}(r)}{GIJ(r)} will be computed. It must be a marked point pattern. See under Details. } \item{I}{Subset of points of \code{X} from which distances are measured. } \item{J}{Subset of points in \code{X} to which distances are measured. } \item{r}{Optional. Numeric vector. The values of the argument \eqn{r} at which the distribution function \eqn{G_{IJ}(r)}{GIJ(r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{breaks}{ This argument is for internal use only. } \item{\dots}{Ignored.} \item{disjoint}{Optional flag indicating whether the subsets \code{I} and \code{J} are disjoint. If missing, this value will be computed by inspecting the vectors \code{I} and \code{J}. } \item{correction}{ Optional. Character string specifying the edge correction(s) to be used. Options are \code{"none"}, \code{"rs"}, \code{"km"}, \code{"hanisch"} and \code{"best"}. Alternatively \code{correction="all"} selects all options. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame containing six numeric columns \item{r}{the values of the argument \eqn{r} at which the function \eqn{G_{IJ}(r)}{GIJ(r)} has been estimated } \item{rs}{the ``reduced sample'' or ``border correction'' estimator of \eqn{G_{IJ}(r)}{GIJ(r)} } \item{han}{the Hanisch-style estimator of \eqn{G_{IJ}(r)}{GIJ(r)} } \item{km}{the spatial Kaplan-Meier estimator of \eqn{G_{IJ}(r)}{GIJ(r)} } \item{hazard}{the hazard rate \eqn{\lambda(r)}{lambda(r)} of \eqn{G_{IJ}(r)}{GIJ(r)} by the spatial Kaplan-Meier method } \item{raw}{the uncorrected estimate of \eqn{G_{IJ}(r)}{GIJ(r)}, i.e. the empirical distribution of the distances from each point of type \eqn{i} to the nearest point of type \eqn{j} } \item{theo}{the theoretical value of \eqn{G_{IJ}(r)}{GIJ(r)} for a marked Poisson process with the same estimated intensity } } \details{ The function \code{Gmulti} generalises \code{\link{Gest}} (for unmarked point patterns) and \code{\link{Gdot}} and \code{\link{Gcross}} (for multitype point patterns) to arbitrary marked point patterns. Suppose \eqn{X_I}{X[I]}, \eqn{X_J}{X[J]} are subsets, possibly overlapping, of a marked point process. This function computes an estimate of the cumulative distribution function \eqn{G_{IJ}(r)}{GIJ(r)} of the distance from a typical point of \eqn{X_I}{X[I]} to the nearest distinct point of \eqn{X_J}{X[J]}. The argument \code{X} must be a point pattern (object of class \code{"ppp"}) or any data that are acceptable to \code{\link{as.ppp}}. The arguments \code{I} and \code{J} specify two subsets of the point pattern. They may be any type of subset indices, for example, logical vectors of length equal to \code{npoints(X)}, or integer vectors with entries in the range 1 to \code{npoints(X)}, or negative integer vectors. Alternatively, \code{I} and \code{J} may be \bold{functions} that will be applied to the point pattern \code{X} to obtain index vectors. If \code{I} is a function, then evaluating \code{I(X)} should yield a valid subset index. This option is useful when generating simulation envelopes using \code{\link{envelope}}. This algorithm estimates the distribution function \eqn{G_{IJ}(r)}{GIJ(r)} from the point pattern \code{X}. It assumes that \code{X} can be treated as a realisation of a stationary (spatially homogeneous) random spatial point process in the plane, observed through a bounded window. The window (which is specified in \code{X} as \code{Window(X)}) may have arbitrary shape. Biases due to edge effects are treated in the same manner as in \code{\link{Gest}}. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{G_{IJ}(r)}{GIJ(r)} should be evaluated. It is also used to determine the breakpoints (in the sense of \code{\link{hist}}) for the computation of histograms of distances. The reduced-sample and Kaplan-Meier estimators are computed from histogram counts. In the case of the Kaplan-Meier estimator this introduces a discretisation error which is controlled by the fineness of the breakpoints. First-time users would be strongly advised not to specify \code{r}. However, if it is specified, \code{r} must satisfy \code{r[1] = 0}, and \code{max(r)} must be larger than the radius of the largest disc contained in the window. Furthermore, the successive entries of \code{r} must be finely spaced. The algorithm also returns an estimate of the hazard rate function, \eqn{\lambda(r)}{lambda(r)}, of \eqn{G_{IJ}(r)}{GIJ(r)}. This estimate should be used with caution as \eqn{G_{IJ}(r)}{GIJ(r)} is not necessarily differentiable. The naive empirical distribution of distances from each point of the pattern \code{X} to the nearest other point of the pattern, is a biased estimate of \eqn{G_{IJ}}{GIJ}. However this is also returned by the algorithm, as it is sometimes useful in other contexts. Care should be taken not to use the uncorrected empirical \eqn{G_{IJ}}{GIJ} as if it were an unbiased estimator of \eqn{G_{IJ}}{GIJ}. } \references{ Cressie, N.A.C. \emph{Statistics for spatial data}. John Wiley and Sons, 1991. Diggle, P.J. \emph{Statistical analysis of spatial point patterns}. Academic Press, 1983. Diggle, P. J. (1986). Displaced amacrine cells in the retina of a rabbit : analysis of a bivariate spatial point pattern. \emph{J. Neurosci. Meth.} \bold{18}, 115--125. Harkness, R.D and Isham, V. (1983) A bivariate spatial point pattern of ants' nests. \emph{Applied Statistics} \bold{32}, 293--303 Lotwick, H. W. and Silverman, B. W. (1982). Methods for analysing spatial processes of several types of points. \emph{J. Royal Statist. Soc. Ser. B} \bold{44}, 406--413. Ripley, B.D. \emph{Statistical inference for spatial processes}. Cambridge University Press, 1988. Stoyan, D, Kendall, W.S. and Mecke, J. \emph{Stochastic geometry and its applications}. 2nd edition. Springer Verlag, 1995. Van Lieshout, M.N.M. and Baddeley, A.J. (1999) Indices of dependence between types in multivariate point patterns. \emph{Scandinavian Journal of Statistics} \bold{26}, 511--532. } \section{Warnings}{ The function \eqn{G_{IJ}}{GIJ} does not necessarily have a density. The reduced sample estimator of \eqn{G_{IJ}}{GIJ} is pointwise approximately unbiased, but need not be a valid distribution function; it may not be a nondecreasing function of \eqn{r}. Its range is always within \eqn{[0,1]}. The spatial Kaplan-Meier estimator of \eqn{G_{IJ}}{GIJ} is always nondecreasing but its maximum value may be less than \eqn{1}. } \seealso{ \code{\link{Gcross}}, \code{\link{Gdot}}, \code{\link{Gest}} } \examples{ trees <- longleaf # Longleaf Pine data: marks represent diameter \testonly{ trees <- trees[seq(1, npoints(trees), by=50), ] } Gm <- Gmulti(trees, marks(trees) <= 15, marks(trees) >= 25) plot(Gm) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{nonparametric} spatstat/man/intersect.owin.Rd0000644000176200001440000000605613160710621016136 0ustar liggesusers\name{intersect.owin} \alias{intersect.owin} \alias{union.owin} \alias{setminus.owin} \title{Intersection, Union or Set Subtraction of Windows} \description{ Yields the intersection, union or set subtraction of windows. } \usage{ intersect.owin(\dots, fatal=TRUE, p) union.owin(\dots, p) setminus.owin(A, B, \dots, p) } \arguments{ \item{A,B}{Windows (objects of class \code{"owin"}).} \item{\dots}{ Windows, or arguments passed to \code{\link{as.mask}} to control the discretisation. } \item{fatal}{Logical. Determines what happens if the intersection is empty. } \item{p}{ Optional list of parameters passed to \code{\link[polyclip]{polyclip}} to control the accuracy of polygon geometry. } } \value{ A window (object of class \code{"owin"}) or possibly \code{NULL}. } \details{ The function \code{intersect.owin} computes the intersection between the windows given in \code{\dots}, while \code{union.owin} computes their union. The function \code{setminus.owin} computes the intersection of \code{A} with the complement of \code{B}. For \code{intersect.owin} and \code{union.owin}, the arguments \code{\dots} must be either \itemize{ \item window objects of class \code{"owin"}, \item data that can be coerced to this class by \code{\link{as.owin}}), \item lists of windows, of class \code{"solist"}, \item named arguments of \code{\link{as.mask}} to control the discretisation if required. } For \code{setminus.owin}, the arguments \code{\dots} must be named arguments of \code{\link{as.mask}}. If the intersection is empty, then if \code{fatal=FALSE} the result is NULL, while if \code{fatal=TRUE} an error occurs. } \author{ \adrian \rolf and \ege } \seealso{ \code{\link{is.subset.owin}}, \code{\link{overlap.owin}}, \code{\link{boundingbox}}, \code{\link{owin.object}} } \examples{ # rectangles u <- unit.square() v <- owin(c(0.5,3.5), c(0.4,2.5)) # polygon data(letterR) # mask m <- as.mask(letterR) # two rectangles intersect.owin(u, v) union.owin(u,v) setminus.owin(u,v) # polygon and rectangle intersect.owin(letterR, v) union.owin(letterR,v) setminus.owin(letterR,v) # mask and rectangle intersect.owin(m, v) union.owin(m,v) setminus.owin(m,v) # mask and polygon p <- rotate(v, 0.2) intersect.owin(m, p) union.owin(m,p) setminus.owin(m,p) # two polygons A <- letterR B <- rotate(letterR, 0.2) plot(boundingbox(A,B), main="intersection") w <- intersect.owin(A, B) plot(w, add=TRUE, col="lightblue") plot(A, add=TRUE) plot(B, add=TRUE) plot(boundingbox(A,B), main="union") w <- union.owin(A,B) plot(w, add=TRUE, col="lightblue") plot(A, add=TRUE) plot(B, add=TRUE) plot(boundingbox(A,B), main="set minus") w <- setminus.owin(A,B) plot(w, add=TRUE, col="lightblue") plot(A, add=TRUE) plot(B, add=TRUE) # intersection and union of three windows C <- shift(B, c(0.2, 0.3)) plot(union.owin(A,B,C)) plot(intersect.owin(A,B,C)) } \keyword{spatial} \keyword{math} spatstat/man/owin.Rd0000644000176200001440000001616513160710621014141 0ustar liggesusers\name{owin} \alias{owin} \title{Create a Window} \description{ Creates an object of class \code{"owin"} representing an observation window in the two-dimensional plane } \usage{ owin(xrange=c(0,1), yrange=c(0,1), ..., poly=NULL, mask=NULL, unitname=NULL, xy=NULL) } \arguments{ \item{xrange}{\eqn{x} coordinate limits of enclosing box} \item{yrange}{\eqn{y} coordinate limits of enclosing box} \item{\dots}{Ignored.} \item{poly}{ Optional. Polygonal boundary of window. Incompatible with \code{mask}. } \item{mask}{ Optional. Logical matrix giving binary image of window. Incompatible with \code{poly}. } \item{unitname}{ Optional. Name of unit of length. Either a single character string, or a vector of two character strings giving the singular and plural forms, respectively. } \item{xy}{ Optional. List with components \code{x} and \code{y} specifying the pixel coordinates for \code{mask}. } } \value{ An object of class \code{"owin"} describing a window in the two-dimensional plane. } \details{ In the \pkg{spatstat} library, a point pattern dataset must include information about the window of observation. This is represented by an object of class \code{"owin"}. See \code{\link{owin.object}} for an overview. To create a window in its own right, users would normally invoke \code{owin}, although sometimes \code{\link{as.owin}} may be convenient. A window may be rectangular, polygonal, or a mask (a binary image). \itemize{ \item \bold{rectangular windows:} If only \code{xrange} and \code{yrange} are given, then the window will be rectangular, with its \eqn{x} and \eqn{y} coordinate dimensions given by these two arguments (which must be vectors of length 2). If no arguments are given at all, the default is the unit square with dimensions \code{xrange=c(0,1)} and \code{yrange=c(0,1)}. \item \bold{polygonal windows:} If \code{poly} is given, then the window will be polygonal. \itemize{ \item \emph{single polygon:} If \code{poly} is a matrix or data frame with two columns, or a structure with two component vectors \code{x} and \code{y} of equal length, then these values are interpreted as the cartesian coordinates of the vertices of a polygon circumscribing the window. The vertices must be listed \emph{anticlockwise}. No vertex should be repeated (i.e. do not repeat the first vertex). \item \emph{multiple polygons or holes:} If \code{poly} is a list, each entry \code{poly[[i]]} of which is a matrix or data frame with two columns or a structure with two component vectors \code{x} and \code{y} of equal length, then the successive list members \code{poly[[i]]} are interpreted as separate polygons which together make up the boundary of the window. The vertices of each polygon must be listed \emph{anticlockwise} if the polygon is part of the external boundary, but \emph{clockwise} if the polygon is the boundary of a hole in the window. Again, do not repeat any vertex. } \item \bold{binary masks:} If \code{mask} is given, then the window will be a binary image. \itemize{ \item \emph{Specified by logical matrix:} Normally the argument \code{mask} should be a logical matrix such that \code{mask[i,j]} is \code{TRUE} if the point \code{(x[j],y[i])} belongs to the window, and \code{FALSE} if it does not. Note carefully that rows of \code{mask} correspond to the \eqn{y} coordinate, and columns to the \eqn{x} coordinate. Here \code{x} and \code{y} are vectors of \eqn{x} and \eqn{y} coordinates equally spaced over \code{xrange} and \code{yrange} respectively. The pixel coordinate vectors \code{x} and \code{y} may be specified explicitly using the argument \code{xy}, which should be a list containing components \code{x} and \code{y}. Alternatively there is a sensible default. \item \emph{Specified by list of pixel coordinates:} Alternatively the argument \code{mask} can be a data frame with 2 or 3 columns. If it has 2 columns, it is expected to contain the spatial coordinates of all the pixels which are inside the window. If it has 3 columns, it should contain the spatial coordinates \eqn{(x,y)} of every pixel in the grid, and the logical value associated with each pixel. The pixels may be listed in any order. } } To create a window which is mathematically defined by inequalities in the Cartesian coordinates, use \code{\link{raster.x}()} and \code{\link{raster.y}()} as in the examples below. Functions \code{\link{square}} and \code{\link{disc}} will create square and circular windows, respectively. } \section{Validity of polygon data}{ Polygon data may contain geometrical inconsistencies such as self-intersections and overlaps. These inconsistencies must be removed to prevent problems in other \pkg{spatstat} functions. By default, polygon data will be repaired automatically using polygon-clipping code. The repair process may change the number of vertices in a polygon and the number of polygon components. To disable the repair process, set \code{spatstat.options(fixpolygons=FALSE)}. } \seealso{ \code{\link{owin.object}}, \code{\link{as.owin}}, \code{\link{complement.owin}}, \code{\link{ppp.object}}, \code{\link{ppp}} \code{\link{square}}, \code{\link{hexagon}}, \code{\link{regularpolygon}}, \code{\link{disc}}, \code{\link{ellipse}}. } \examples{ w <- owin() w <- owin(c(0,1), c(0,1)) # the unit square w <- owin(c(10,20), c(10,30), unitname=c("foot","feet")) # a rectangle of dimensions 10 x 20 feet # with lower left corner at (10,10) # polygon (diamond shape) w <- owin(poly=list(x=c(0.5,1,0.5,0),y=c(0,1,2,1))) w <- owin(c(0,1), c(0,2), poly=list(x=c(0.5,1,0.5,0),y=c(0,1,2,1))) # polygon with hole ho <- owin(poly=list(list(x=c(0,1,1,0), y=c(0,0,1,1)), list(x=c(0.6,0.4,0.4,0.6), y=c(0.2,0.2,0.4,0.4)))) w <- owin(c(-1,1), c(-1,1), mask=matrix(TRUE, 100,100)) # 100 x 100 image, all TRUE X <- raster.x(w) Y <- raster.y(w) wm <- owin(w$xrange, w$yrange, mask=(X^2 + Y^2 <= 1)) # discrete approximation to the unit disc \dontrun{ if(FALSE) { plot(c(0,1),c(0,1),type="n") bdry <- locator() # click the vertices of a polygon (anticlockwise) } } \testonly{ bdry <- list(x=c(0.1,0.3,0.7,0.4,0.2), y=c(0.1,0.1,0.5,0.7,0.3)) } w <- owin(poly=bdry) \dontrun{plot(w)} \dontrun{ im <- as.logical(matrix(scan("myfile"), nrow=128, ncol=128)) # read in an arbitrary 128 x 128 digital image from text file rim <- im[, 128:1] # Assuming it was given in row-major order in the file # i.e. scanning left-to-right in rows from top-to-bottom, # the use of matrix() has effectively transposed rows & columns, # so to convert it to our format just reverse the column order. w <- owin(mask=rim) plot(w) # display it to check! } } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat/man/Gfox.Rd0000644000176200001440000000706213160710571014070 0ustar liggesusers\name{Gfox} \alias{Gfox} \alias{Jfox} \title{ Foxall's Distance Functions } \description{ Given a point pattern \code{X} and a spatial object \code{Y}, compute estimates of Foxall's \eqn{G} and \eqn{J} functions. } \usage{ Gfox(X, Y, r = NULL, breaks = NULL, correction = c("km", "rs", "han"), ...) Jfox(X, Y, r = NULL, breaks = NULL, correction = c("km", "rs", "han"), ...) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"}) from which distances will be measured. } \item{Y}{ An object of class \code{"ppp"}, \code{"psp"} or \code{"owin"} to which distances will be measured. } \item{r}{Optional. Numeric vector. The values of the argument \eqn{r} at which \eqn{Gfox(r)} or \eqn{Jfox(r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{breaks}{ This argument is for internal use only. } \item{correction}{ Optional. The edge correction(s) to be used to estimate \eqn{Gfox(r)} or \eqn{Jfox(r)}. A vector of character strings selected from \code{"none"}, \code{"rs"}, \code{"km"}, \code{"cs"} and \code{"best"}. Alternatively \code{correction="all"} selects all options. } \item{\dots}{ Extra arguments affecting the discretisation of distances. These arguments are ignored by \code{Gfox}, but \code{Jfox} passes them to \code{\link{Hest}} to determine the discretisation of the spatial domain. } } \details{ Given a point pattern \code{X} and another spatial object \code{Y}, these functions compute two nonparametric measures of association between \code{X} and \code{Y}, introduced by Foxall (Foxall and Baddeley, 2002). Let the random variable \eqn{R} be the distance from a typical point of \code{X} to the object \code{Y}. Foxall's \eqn{G}-function is the cumulative distribution function of \eqn{R}: \deqn{G(r) = P(R \le r)}{P(R <= r)} Let the random variable \eqn{S} be the distance from a \emph{fixed} point in space to the object \code{Y}. The cumulative distribution function of \eqn{S} is the (unconditional) spherical contact distribution function \deqn{H(r) = P(S \le r)}{H(r) = P(S <= r)} which is computed by \code{\link{Hest}}. Foxall's \eqn{J}-function is the ratio \deqn{ J(r) = \frac{1-G(r)}{1-H(r)} }{ J(r) = (1-G(r))/(1-H(r)) } For further interpretation, see Foxall and Baddeley (2002). Accuracy of \code{Jfox} depends on the pixel resolution, which is controlled by the arguments \code{eps}, \code{dimyx} and \code{xy} passed to \code{\link{as.mask}}. For example, use \code{eps=0.1} to specify square pixels of side 0.1 units, and \code{dimyx=256} to specify a 256 by 256 grid of pixels. } \value{ A function value table (object of class \code{"fv"}) which can be printed, plotted, or converted to a data frame of values. } \references{ Foxall, R. and Baddeley, A. (2002) Nonparametric measures of association between a spatial point process and a random set, with geological applications. \emph{Applied Statistics} \bold{51}, 165--182. } \seealso{ \code{\link{Gest}}, \code{\link{Hest}}, \code{\link{Jest}}, \code{\link{Fest}} } \examples{ data(copper) X <- copper$SouthPoints Y <- copper$SouthLines G <- Gfox(X,Y) J <- Jfox(X,Y, correction="km") \testonly{ J <- Jfox(X,Y, correction="km", eps=1) } \dontrun{ J <- Jfox(X,Y, correction="km", eps=0.25) } } \author{Rob Foxall and \adrian } \keyword{spatial} \keyword{nonparametric} spatstat/man/distmap.ppp.Rd0000644000176200001440000000420113160710571015414 0ustar liggesusers\name{distmap.ppp} \alias{distmap.ppp} \title{ Distance Map of Point Pattern } \description{ Computes the distance from each pixel to the nearest point in the given point pattern. } \usage{ \method{distmap}{ppp}(X, \dots) } \arguments{ \item{X}{A point pattern (object of class \code{"ppp"}). } \item{\dots}{Arguments passed to \code{\link{as.mask}} to control pixel resolution. } } \value{ A pixel image (object of class \code{"im"}) whose greyscale values are the values of the distance map. The return value has attributes \code{"index"} and \code{"bdry"} which are also pixel images. } \details{ The ``distance map'' of a point pattern \eqn{X} is the function \eqn{f} whose value \code{f(u)} is defined for any two-dimensional location \eqn{u} as the shortest distance from \eqn{u} to \eqn{X}. This function computes the distance map of the point pattern \code{X} and returns the distance map as a pixel image. The greyscale value at a pixel \eqn{u} equals the distance from \eqn{u} to the nearest point of the pattern \code{X}. Additionally, the return value has two attributes, \code{"index"} and \code{"bdry"}, which are also pixel images. The grey values in \code{"bdry"} give the distance from each pixel to the bounding rectangle of the image. The grey values in \code{"index"} are integers identifying which point of \code{X} is closest. This is a method for the generic function \code{\link{distmap}}. Note that this function gives the distance from the \emph{centre of each pixel} to the nearest data point. To compute the exact distance from a given spatial location to the nearest data point in \code{X}, use \code{\link{distfun}} or \code{\link{nncross}}. } \seealso{ Generic function \code{\link{distmap}} and other methods \code{\link{distmap.psp}}, \code{\link{distmap.owin}}. Generic function \code{\link{distfun}}. Nearest neighbour distance \code{\link{nncross}} } \examples{ data(cells) U <- distmap(cells) \dontrun{ plot(U) plot(attr(U, "bdry")) plot(attr(U, "index")) } } \author{\adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/is.dppm.Rd0000644000176200001440000000062113160710621014525 0ustar liggesusers\name{is.dppm} \alias{is.dppm} \title{Recognise Fitted Determinantal Point Process Models} \description{Check that an object inherits the class dppm} \usage{is.dppm(x)} \arguments{ \item{x}{Any object.} } \value{A single logical value.} \author{\ege , \adrian and \rolf } \keyword{spatial} \keyword{manip} \keyword{models} spatstat/man/nndist.lpp.Rd0000644000176200001440000000343613160710621015253 0ustar liggesusers\name{nndist.lpp} \alias{nndist.lpp} \title{ Nearest neighbour distances on a linear network } \description{ Given a pattern of points on a linear network, compute the nearest-neighbour distances, measured by the shortest path in the network. } \usage{ \method{nndist}{lpp}(X, ..., k=1, method="C") } \arguments{ \item{X}{ Point pattern on linear network (object of class \code{"lpp"}). } \item{method}{ Optional string determining the method of calculation. Either \code{"interpreted"} or \code{"C"}. } \item{k}{ Integer, or integer vector. The algorithm will compute the distance to the \code{k}th nearest neighbour. } \item{\dots}{ Ignored. } } \details{ Given a pattern of points on a linear network, this function computes the nearest neighbour distance for each point (i.e. the distance from each point to the nearest other point), measuring distance by the shortest path in the network. If \code{method="C"} the distances are computed using code in the C language. If \code{method="interpreted"} then the computation is performed using interpreted \R code. The \R code is much slower, but is provided for checking purposes. The \code{k}th nearest neighbour distance is infinite if the \code{k}th nearest neighbour does not exist. This can occur if there are fewer than \code{k+1} points in the dataset, or if the linear network is not connected. } \value{ A numeric vector, of length equal to the number of points in \code{X}, or a matrix, with one row for each point in \code{X} and one column for each entry of \code{k}. Entries are nonnegative numbers or infinity (\code{Inf}). } \author{ \adrian } \seealso{ \code{\link{lpp}} } \examples{ X <- runiflpp(12, simplenet) nndist(X) nndist(X, k=2) } \keyword{spatial} spatstat/man/integral.msr.Rd0000644000176200001440000000302513160710621015561 0ustar liggesusers\name{integral.msr} \alias{integral.msr} \title{ Integral of a Measure } \description{ Computes the integral (total value) of a measure over its domain. } \usage{ \method{integral}{msr}(f, domain=NULL, \dots) } \arguments{ \item{f}{ A signed measure or vector-valued measure (object of class \code{"msr"}). } \item{domain}{ Optional window specifying the domain of integration. Alternatively a tessellation. } \item{\dots}{ Ignored. } } \details{ The integral (total value of the measure over its domain) is calculated. If \code{domain} is a window (class \code{"owin"}) then the integration will be restricted to this window. If \code{domain} is a tessellation (class \code{"tess"}) then the integral of \code{f} in each tile of \code{domain} will be computed. For a multitype measure \code{m}, use \code{\link{split.msr}} to separate the contributions for each type of point, as shown in the Examples. } \value{ A numeric value (for a signed measure) or a vector of values (for a vector-valued measure). } \seealso{ \code{\link{msr}}, \code{\link{integral}} } \examples{ fit <- ppm(cells ~ x) rr <- residuals(fit) integral(rr) # vector-valued measure rs <- residuals(fit, type="score") integral(rs) # multitype fitA <- ppm(amacrine ~ x) rrA <- residuals(fitA) sapply(split(rrA), integral) # multitype and vector-valued rsA <- residuals(fitA, type="score") sapply(split(rsA), integral) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{math} spatstat/man/coef.slrm.Rd0000644000176200001440000000177613160710571015063 0ustar liggesusers\name{coef.slrm} \Rdversion{1.1} \alias{coef.slrm} \title{ Coefficients of Fitted Spatial Logistic Regression Model } \description{ Extracts the coefficients (parameters) from a fitted Spatial Logistic Regression model. } \usage{ \method{coef}{slrm}(object, ...) } \arguments{ \item{object}{ a fitted spatial logistic regression model. An object of class \code{"slrm"}. } \item{\dots}{ Ignored. } } \details{ This is a method for \code{\link{coef}} for fitted spatial logistic regression models (objects of class \code{"slrm"}, usually obtained from the function \code{\link{slrm}}). It extracts the fitted canonical parameters, i.e.\ the coefficients in the linear predictor of the spatial logistic regression. } \value{ Numeric vector of coefficients. } \seealso{ \code{\link{slrm}} } \examples{ X <- rpoispp(42) fit <- slrm(X ~ x+y) coef(fit) } \author{\adrian \email{adrian@maths.uwa.edu.au} and \rolf } \keyword{spatial} \keyword{models} \keyword{methods} spatstat/man/plot.quadrattest.Rd0000644000176200001440000000311413160710621016471 0ustar liggesusers\name{plot.quadrattest} \alias{plot.quadrattest} \title{ Display the result of a quadrat counting test. } \description{ Given the result of a quadrat counting test, graphically display the quadrats that were used, the observed and expected counts, and the residual in each quadrat. } \usage{ \method{plot}{quadrattest}(x, ..., textargs=list()) } \arguments{ \item{x}{ Object of class \code{"quadrattest"} containing the result of \code{\link{quadrat.test}}. } \item{\dots}{ Additional arguments passed to \code{\link{plot.tess}} to control the display of the quadrats. } \item{textargs}{ List of additional arguments passed to \code{\link[graphics]{text.default}} to control the appearance of the text. } } \details{ This is the plot method for objects of class \code{"quadrattest"}. Such an object is produced by \code{\link{quadrat.test}} and represents the result of a \eqn{\chi^2}{chi^2} test for a spatial point pattern. The quadrats are first plotted using \code{\link{plot.tess}}. Then in each quadrat, the observed and expected counts and the Pearson residual are displayed as text using \code{\link[graphics]{text.default}}. Observed count is displayed at top left; expected count at top right; and Pearson residual at bottom. } \value{ Null. } \examples{ plot(quadrat.test(swedishpines, 3)) } \seealso{ \code{\link{quadrat.test}}, \code{\link{plot.tess}}, \code{\link[graphics]{text.default}}, \code{\link{plot.quadratcount}} } \author{\adrian and \rolf } \keyword{spatial} \keyword{htest} \keyword{hplot} spatstat/man/as.function.owin.Rd0000644000176200001440000000203413160710571016361 0ustar liggesusers\name{as.function.owin} \alias{as.function.owin} \title{ Convert Window to Indicator Function } \description{ Converts a spatial window to a function of the \eqn{x} and \eqn{y} coordinates returning the value 1 inside the window and 0 outside. } \usage{ \method{as.function}{owin}(x, \dots) } \arguments{ \item{x}{ Pixel image (object of class \code{"owin"}). } \item{\dots}{ Ignored. } } \details{ This command converts a spatial window (object of class \code{"owin"}) to a \code{function(x,y)} where the arguments \code{x} and \code{y} are (vectors of) spatial coordinates. This is the indicator function of the window: it returns the value 1 for locations inside the window, and returns 0 for values outside the window. } \value{ A function in the \R language. } \author{ \spatstatAuthors. } \seealso{ \code{\link{as.im.owin}} } \examples{ W <- Window(humberside) f <- as.function(W) f(5000, 4500) f(123456, 78910) X <- runifpoint(5, Frame(humberside)) f(X) } \keyword{spatial} \keyword{manip} spatstat/man/anova.slrm.Rd0000644000176200001440000000307013160710571015240 0ustar liggesusers\name{anova.slrm} \Rdversion{1.1} \alias{anova.slrm} \title{ Analysis of Deviance for Spatial Logistic Regression Models } \description{ Performs Analysis of Deviance for two or more fitted Spatial Logistic Regression models. } \usage{ \method{anova}{slrm}(object, ..., test = NULL) } \arguments{ \item{object}{ a fitted spatial logistic regression model. An object of class \code{"slrm"}. } \item{\dots}{ additional objects of the same type (optional). } \item{test}{ a character string, (partially) matching one of \code{"Chisq"}, \code{"F"} or \code{"Cp"}, indicating the reference distribution that should be used to compute \eqn{p}-values. } } \details{ This is a method for \code{\link[stats]{anova}} for fitted spatial logistic regression models (objects of class \code{"slrm"}, usually obtained from the function \code{\link{slrm}}). The output shows the deviance differences (i.e. 2 times log likelihood ratio), the difference in degrees of freedom, and (if \code{test="Chi"}) the two-sided \eqn{p}-values for the chi-squared tests. Their interpretation is very similar to that in \code{\link[stats]{anova.glm}}. } \value{ An object of class \code{"anova"}, inheriting from class \code{"data.frame"}, representing the analysis of deviance table. } \seealso{ \code{\link{slrm}} } \examples{ X <- rpoispp(42) fit0 <- slrm(X ~ 1) fit1 <- slrm(X ~ x+y) anova(fit0, fit1, test="Chi") } \author{\adrian \email{adrian@maths.uwa.edu.au} and \rolf } \keyword{spatial} \keyword{models} \keyword{methods} spatstat/man/model.matrix.slrm.Rd0000644000176200001440000000324213160710621016534 0ustar liggesusers\name{model.matrix.slrm} \alias{model.matrix.slrm} \title{Extract Design Matrix from Spatial Logistic Regression Model} \description{ This function extracts the design matrix of a spatial logistic regression model. } \usage{ \method{model.matrix}{slrm}(object, ..., keepNA=TRUE) } \arguments{ \item{object}{ A fitted spatial logistic regression model. An object of class \code{"slrm"}. } \item{\dots}{ Other arguments (such as \code{na.action}) passed to \code{\link{model.matrix.lm}}. } \item{keepNA}{ Logical. Determines whether rows containing NA values will be deleted or retained. } } \details{ This command is a method for the generic function \code{\link{model.matrix}}. It extracts the design matrix of a spatial logistic regression. The \code{object} must be a fitted spatial logistic regression (object of class \code{"slrm"}). Such objects are produced by the model-fitting function \code{\link{slrm}}. Usually the result is a matrix with one column for every constructed covariate in the model, and one row for every pixel in the grid used to fit the model. If \code{object} was fitted using split pixels (by calling \code{\link{slrm}} using the argument \code{splitby}) then the matrix has one row for every pixel or half-pixel. } \value{ A matrix. Columns of the matrix are canonical covariates in the model. } \author{\adrian and \rolf } \seealso{ \code{\link{model.matrix}}, \code{\link{model.images}}, \code{\link{slrm}}. } \examples{ fit <- slrm(japanesepines ~x) head(model.matrix(fit)) # matrix with two columns: '(Intercept)' and 'x' } \keyword{spatial} \keyword{models} spatstat/man/sidelengths.owin.Rd0000644000176200001440000000304313160710621016440 0ustar liggesusers\name{sidelengths.owin} \alias{sidelengths.owin} \alias{shortside.owin} \title{Side Lengths of Enclosing Rectangle of a Window} \description{ Computes the side lengths of the (enclosing rectangle of) a window. } \usage{ \method{sidelengths}{owin}(x) \method{shortside}{owin}(x) } \arguments{ \item{x}{ A window whose side lengths will be computed. Object of class \code{"owin"}. } } \value{ For \code{sidelengths.owin}, a numeric vector of length 2 giving the side-lengths (\eqn{x} then \eqn{y}) of the enclosing rectangle. For \code{shortside.owin}, a numeric value. } \details{ The functions \code{shortside} and \code{sidelengths} are generic. The functions documented here are the methods for the class \code{"owin"}. \code{sidelengths.owin} computes the side-lengths of the enclosing rectangle of the window \code{x}. For safety, both functions give a warning if the window is not a rectangle. To suppress the warning, first convert the window to a rectangle using \code{\link{as.rectangle}}. \code{shortside.owin} computes the minimum of the two side-lengths. } \seealso{ \code{\link{shortside}}, \code{\link{sidelengths}} for the generic functions. \code{\link{area.owin}}, \code{\link{diameter.owin}}, \code{\link{perimeter}} for other geometric calculations on \code{"owin"} objects. \code{\link{owin}}, \code{\link{as.owin}}. } \examples{ w <- owin(c(0,2),c(-1,3)) sidelengths(w) shortside(as.rectangle(letterR)) } \author{\adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/update.ppm.Rd0000644000176200001440000001431513160710621015235 0ustar liggesusers\name{update.ppm} \alias{update.ppm} \title{Update a Fitted Point Process Model} \description{ \code{update} method for class \code{"ppm"}. } \usage{ \method{update}{ppm}(object, \dots, fixdummy=TRUE, use.internal=NULL, envir=environment(terms(object))) } \arguments{ \item{object}{ An existing fitted point process model, typically produced by \code{\link{ppm}}. } \item{\dots}{ Arguments to be updated in the new call to \code{\link{ppm}}. } \item{fixdummy}{ Logical flag indicating whether the quadrature scheme for the call to \code{\link{ppm}} should use the same set of dummy points as that in the original call. } \item{use.internal}{ Optional. Logical flag indicating whether the model should be refitted using the internally saved data (\code{use.internal=TRUE}) or by re-evaluating these data in the current frame (\code{use.internal=FALSE}). } \item{envir}{ Environment in which to re-evaluate the call to \code{\link{ppm}}. } } \details{ This is a method for the generic function \code{\link{update}} for the class \code{"ppm"}. An object of class \code{"ppm"} describes a fitted point process model. See \code{\link{ppm.object}}) for details of this class. \code{update.ppm} will modify the point process model specified by \code{object} according to the new arguments given, then re-fit it. The actual re-fitting is performed by the model-fitting function \code{\link{ppm}}. If you are comparing several model fits to the same data, or fits of the same model to different data, it is strongly advisable to use \code{update.ppm} rather than trying to fit them by hand. This is because \code{update.ppm} re-fits the model in a way which is comparable to the original fit. The arguments \code{...} are matched to the formal arguments of \code{\link{ppm}} as follows. First, all the \emph{named} arguments in \code{...} are matched with the formal arguments of \code{\link{ppm}}. Use \code{name=NULL} to remove the argument \code{name} from the call. Second, any \emph{unnamed} arguments in \code{...} are matched with formal arguments of \code{\link{ppm}} if the matching is obvious from the class of the object. Thus \code{...} may contain \itemize{ \item exactly one argument of class \code{"ppp"} or \code{"quad"}, which will be interpreted as the named argument \code{Q}; \item exactly one argument of class \code{"formula"}, which will be interpreted as the named argument \code{trend} (or as specifying a change to the trend formula); \item exactly one argument of class \code{"interact"}, which will be interpreted as the named argument \code{interaction}; \item exactly one argument of class \code{"data.frame"}, which will be interpreted as the named argument \code{covariates}. } The \code{trend} argument can be a formula that specifies a \emph{change} to the current trend formula. For example, the formula \code{~ . + Z} specifies that the additional covariate \code{Z} will be added to the right hand side of the trend formula in the existing \code{object}. The argument \code{fixdummy=TRUE} ensures comparability of the objects before and after updating. When \code{fixdummy=FALSE}, calling \code{update.ppm} is exactly the same as calling \code{ppm} with the updated arguments. However, the original and updated models are not strictly comparable (for example, their pseudolikelihoods are not strictly comparable) unless they used the same set of dummy points for the quadrature scheme. Setting \code{fixdummy=TRUE} ensures that the re-fitting will be performed using the same set of dummy points. This is highly recommended. The value of \code{use.internal} determines where to find data to re-evaluate the model (data for the arguments mentioned in the original call to \code{ppm} that are not overwritten by arguments to \code{update.ppm}). If \code{use.internal=FALSE}, then arguments to \code{ppm} are \emph{re-evaluated} in the frame where you call \code{update.ppm}. This is like the behaviour of the other methods for \code{\link{update}}. This means that if you have changed any of the objects referred to in the call, these changes will be taken into account. Also if the original call to \code{ppm} included any calls to random number generators, these calls will be recomputed, so that you will get a different outcome of the random numbers. If \code{use.internal=TRUE}, then arguments to \code{ppm} are extracted from internal data stored inside the current fitted model \code{object}. This is useful if you don't want to re-evaluate anything. It is also necessary if if \code{object} has been restored from a dump file using \code{\link{load}} or \code{\link{source}}. In such cases, we have lost the environment in which \code{object} was fitted, and data cannot be re-evaluated. By default, if \code{use.internal} is missing, \code{update.ppm} will re-evaluate the arguments if this is possible, and use internal data if not. } \value{ Another fitted point process model (object of class \code{"ppm"}). } \examples{ data(nztrees) data(cells) # fit the stationary Poisson process fit <- ppm(nztrees, ~ 1) # fit a nonstationary Poisson process fitP <- update(fit, trend=~x) fitP <- update(fit, ~x) # change the trend formula: add another term to the trend fitPxy <- update(fitP, ~ . + y) # change the trend formula: remove the x variable fitPy <- update(fitPxy, ~ . - x) # fit a stationary Strauss process fitS <- update(fit, interaction=Strauss(13)) fitS <- update(fit, Strauss(13)) # refit using a different edge correction fitS <- update(fitS, correction="isotropic") # re-fit the model to a subset # of the original point pattern nzw <- owin(c(0,148),c(0,95)) nzsub <- nztrees[,nzw] fut <- update(fitS, Q=nzsub) fut <- update(fitS, nzsub) # WARNING: the point pattern argument is called 'Q' ranfit <- ppm(rpoispp(42), ~1, Poisson()) ranfit # different random data! update(ranfit) # the original data update(ranfit, use.internal=TRUE) } \author{\adrian and \rolf } \keyword{spatial} \keyword{methods} \keyword{models} spatstat/man/rotate.Rd0000644000176200001440000000147213160710621014456 0ustar liggesusers\name{rotate} \alias{rotate} \title{Rotate} \description{ Applies a rotation to any two-dimensional object, such as a point pattern or a window. } \usage{ rotate(X, \dots) } \arguments{ \item{X}{Any suitable dataset representing a two-dimensional object, such as a point pattern (object of class \code{"ppp"}), or a window (object of class \code{"owin"}).} \item{\dots}{Data specifying the rotation.} } \value{ Another object of the same type, representing the result of rotating \code{X} through the specified angle. } \details{ This is generic. Methods are provided for point patterns (\code{\link{rotate.ppp}}) and windows (\code{\link{rotate.owin}}). } \seealso{ \code{\link{rotate.ppp}}, \code{\link{rotate.owin}} } \author{\adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/rescue.rectangle.Rd0000644000176200001440000000252613160710621016412 0ustar liggesusers\name{rescue.rectangle} \alias{rescue.rectangle} \title{Convert Window Back To Rectangle} \description{ Determines whether the given window is really a rectangle aligned with the coordinate axes, and if so, converts it to a rectangle object. } \usage{ rescue.rectangle(W) } \arguments{ \item{W}{A window (object of class \code{"owin"}).} } \value{ Another object of class \code{"owin"} representing the same window. } \details{ This function decides whether the window \code{W} is actually a rectangle aligned with the coordinate axes. This will be true if \code{W} is \itemize{ \item a rectangle (window object of type \code{"rectangle"}); \item a polygon (window object of type \code{"polygonal"} with a single polygonal boundary) that is a rectangle aligned with the coordinate axes; \item a binary mask (window object of type \code{"mask"}) in which all the pixel entries are \code{TRUE}. } If so, the function returns this rectangle, a window object of type \code{"rectangle"}. If not, the function returns \code{W}. } \seealso{ \code{\link{as.owin}}, \code{\link{owin.object}} } \examples{ w <- owin(poly=list(x=c(0,1,1,0),y=c(0,0,1,1))) rw <- rescue.rectangle(w) w <- as.mask(unit.square()) rw <- rescue.rectangle(w) } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/dirichletVertices.Rd0000644000176200001440000000326313160710571016640 0ustar liggesusers\name{dirichletVertices} \alias{dirichletVertices} \alias{dirichletEdges} \title{ Vertices and Edges of Dirichlet Tessellation } \description{ Computes the Dirichlet-Voronoi tessellation of a point pattern and extracts the vertices or edges of the tiles. } \usage{ dirichletVertices(X) dirichletEdges(X) } \arguments{ \item{X}{ Point pattern (object of class \code{"ppp"}). } } \details{ These function compute the Dirichlet-Voronoi tessellation of \code{X} (see \code{\link{dirichlet}}) and extract the vertices or edges of the tiles of the tessellation. The Dirichlet vertices are the spatial locations which are locally farthest away from \code{X}, that is, where the distance function of \code{X} reaches a local maximum. The Dirichlet edges are the dividing lines equally distant between a pair of points of \code{X}. The Dirichlet tessellation of \code{X} is computed using \code{\link{dirichlet}}. The vertices or edges of all tiles of the tessellation are extracted. For \code{dirichletVertices}, any vertex which lies on the boundary of the window of \code{X} is deleted. The remaining vertices are returned, as a point pattern, without duplicated entries. } \value{ \code{dirichletVertices} returns a point pattern (object of class \code{"ppp"}) in the same window as \code{X}. \code{dirichletEdges} returns a line segment pattern (object of class \code{"psp"}). } \seealso{ \code{\link{dirichlet}}, \code{\link{dirichletAreas}} } \examples{ plot(dirichlet(cells)) plot(dirichletVertices(cells), add=TRUE) ed <- dirichletEdges(cells) } \author{\adrian \rolf and \ege } \keyword{spatial} \keyword{math} \keyword{manip} spatstat/man/intensity.dppm.Rd0000644000176200001440000000133213160710621016140 0ustar liggesusers\name{intensity.dppm} \alias{intensity.dppm} \alias{intensity.detpointprocfamily} \title{Intensity of Determinantal Point Process Model} \description{Extracts the intensity of a determinantal point process model.} \usage{ \method{intensity}{detpointprocfamily}(X, \dots) \method{intensity}{dppm}(X, \dots) } \arguments{ \item{X}{ A determinantal point process model (object of class \code{"detpointprocfamily"} or \code{"dppm"}). } \item{\dots}{Ignored.} } \value{ A numeric value (if the model is stationary), a pixel image (if the model is non-stationary) or \code{NA} if the intensity is unknown for the model. } \author{ \adrian \rolf and \ege } \keyword{spatial} \keyword{models} spatstat/man/psib.Rd0000644000176200001440000000304313160710621014111 0ustar liggesusers\name{psib} \alias{psib} \alias{psib.kppm} \title{ Sibling Probability of Cluster Point Process } \description{ Computes the sibling probability of a cluster point process model. } \usage{ psib(object) \method{psib}{kppm}(object) } \arguments{ \item{object}{ Fitted cluster point process model (object of class \code{"kppm"}). } } \details{ In a Poisson cluster process, two points are called \emph{siblings} if they belong to the same cluster, that is, if they had the same parent point. If two points of the process are separated by a distance \eqn{r}, the probability that they are siblings is \eqn{p(r) = 1 - 1/g(r)} where \eqn{g} is the pair correlation function of the process. The value \eqn{p(0) = 1 - 1/g(0)} is the probability that, if two points of the process are situated very close to each other, they came from the same cluster. This probability is an index of the strength of clustering, with high values suggesting strong clustering. This concept was proposed in Baddeley, Rubak and Turner (2015, page 479) and Baddeley (2016). } \value{ A single number. } \references{ Baddeley, A. (2016) Local composite likelihood for spatial point processes. \emph{Spatial Statistics}, in press. Baddeley, A., Rubak, E. and Turner, R. (2015) \emph{Spatial Point Patterns: Methodology and Applications with R}. Chapman and Hall/CRC Press. } \author{ \adrian. } \seealso{ \code{\link[spatstat]{kppm}} } \examples{ fit <- kppm(redwood ~1, "Thomas") psib(fit) } \keyword{spatial} \keyword{models} spatstat/man/emend.Rd0000644000176200001440000000165713160710571014261 0ustar liggesusers\name{emend} \alias{emend} \title{ Force Model to be Valid } \description{ Check whether a model is valid, and if not, find the nearest model which is valid. } \usage{ emend(object, \dots) } \arguments{ \item{object}{ A statistical model, belonging to some class. } \item{\dots}{Arguments passed to methods.} } \details{ The function \code{emend} is generic, and has methods for several classes of statistical models in the \pkg{spatstat} package (mostly point process models). Its purpose is to check whether a given model is valid (for example, that none of the model parameters are \code{NA}) and, if not, to find the nearest model which is valid. See the methods for more information. } \value{ Another model of the same kind. } \author{ \adrian \rolf and \ege } \seealso{ \code{\link{emend.ppm}}, \code{\link{emend.lppm}}, \code{\link{valid}}. } \keyword{spatial} \keyword{models} spatstat/man/eem.Rd0000644000176200001440000000520313160710571013726 0ustar liggesusers\name{eem} \alias{eem} \title{ Exponential Energy Marks } \description{ Given a point process model fitted to a point pattern, compute the Stoyan-Grabarnik diagnostic ``exponential energy marks'' for the data points. } \usage{ eem(fit, check=TRUE) } \arguments{ \item{fit}{ The fitted point process model. An object of class \code{"ppm"}. } \item{check}{ Logical value indicating whether to check the internal format of \code{fit}. If there is any possibility that this object has been restored from a dump file, or has otherwise lost track of the environment where it was originally computed, set \code{check=TRUE}. } } \value{ A vector containing the values of the exponential energy mark for each point in the pattern. } \details{ Stoyan and Grabarnik (1991) proposed a diagnostic tool for point process models fitted to spatial point pattern data. Each point \eqn{x_i}{x[i]} of the data pattern \eqn{X} is given a `mark' or `weight' \deqn{m_i = \frac 1 {\hat\lambda(x_i,X)}}{m[i] = 1/\lambda(x[i],X)} where \eqn{\hat\lambda(x_i,X)}{\lambda(x[i],X)} is the conditional intensity of the fitted model. If the fitted model is correct, then the sum of these marks for all points in a region \eqn{B} has expected value equal to the area of \eqn{B}. The argument \code{fit} must be a fitted point process model (object of class \code{"ppm"}). Such objects are produced by the maximum pseudolikelihood fitting algorithm \code{\link{ppm}}). This fitted model object contains complete information about the original data pattern and the model that was fitted to it. The value returned by \code{eem} is the vector of weights \eqn{m[i]}{m_i} associated with the points \eqn{x[i]}{x_i} of the original data pattern. The original data pattern (in corresponding order) can be extracted from \code{fit} using \code{\link{data.ppm}}. The function \code{\link{diagnose.ppm}} produces a set of sensible diagnostic plots based on these weights. } \references{ Stoyan, D. and Grabarnik, P. (1991) Second-order characteristics for stochastic structures connected with Gibbs point processes. \emph{Mathematische Nachrichten}, 151:95--100. } \seealso{ \code{\link{diagnose.ppm}}, \code{\link{ppm.object}}, \code{\link{data.ppm}}, \code{\link{residuals.ppm}}, \code{\link{ppm}} } \examples{ data(cells) fit <- ppm(cells, ~x, Strauss(r=0.15)) ee <- eem(fit) sum(ee)/area(Window(cells)) # should be about 1 if model is correct Y <- setmarks(cells, ee) plot(Y, main="Cells data\n Exponential energy marks") } \author{ \adrian and \rolf } \keyword{spatial} \keyword{models} spatstat/man/edge.Trans.Rd0000644000176200001440000001124513160710571015155 0ustar liggesusers\name{edge.Trans} \alias{edge.Trans} \alias{rmax.Trans} \title{ Translation Edge Correction } \description{ Computes Ohser and Stoyan's translation edge correction weights for a point pattern. } \usage{ edge.Trans(X, Y = X, W = Window(X), exact = FALSE, paired = FALSE, ..., trim = spatstat.options("maxedgewt"), dx=NULL, dy=NULL, give.rmax=FALSE, gW=NULL) rmax.Trans(W, g=setcov(W)) } \arguments{ \item{X,Y}{ Point patterns (objects of class \code{"ppp"}). } \item{W}{ Window for which the edge correction is required. } \item{exact}{ Logical. If \code{TRUE}, a slow algorithm will be used to compute the exact value. If \code{FALSE}, a fast algorithm will be used to compute the approximate value. } \item{paired}{ Logical value indicating whether \code{X} and \code{Y} are paired. If \code{TRUE}, compute the edge correction for corresponding points \code{X[i], Y[i]} for all \code{i}. If \code{FALSE}, compute the edge correction for each possible pair of points \code{X[i], Y[j]} for all \code{i} and \code{j}. } \item{\dots}{Ignored.} \item{trim}{ Maximum permitted value of the edge correction weight. } \item{dx,dy}{ Alternative data giving the \eqn{x} and \eqn{y} coordinates of the vector differences between the points. Incompatible with \code{X} and \code{Y}. See Details. } \item{give.rmax}{ Logical. If \code{TRUE}, also compute the value of \code{rmax.Trans(W)} and return it as an attribute of the result. } \item{g, gW}{ Optional. Set covariance of \code{W}, if it has already been computed. Not required if \code{W} is a rectangle. } } \details{ The function \code{edge.Trans} computes Ohser and Stoyan's translation edge correction weight, which is used in estimating the \eqn{K} function and in many other contexts. The function \code{rmax.Trans} computes the maximum value of distance \eqn{r} for which the translation edge correction estimate of \eqn{K(r)} is valid. For a pair of points \eqn{x} and \eqn{y} in a window \eqn{W}, the translation edge correction weight is \deqn{ e(u, r) = \frac{\mbox{area}(W)}{\mbox{area}(W \cap (W + y - x))} }{ e(u, r) = area(W) / area(intersect(W, W + y - x)) } where \eqn{W + y - x} is the result of shifting the window \eqn{W} by the vector \eqn{y - x}. The denominator is the area of the overlap between this shifted window and the original window. The function \code{edge.Trans} computes this edge correction weight. If \code{paired=TRUE}, then \code{X} and \code{Y} should contain the same number of points. The result is a vector containing the edge correction weights \code{e(X[i], Y[i])} for each \code{i}. If \code{paired=FALSE}, then the result is a matrix whose \code{i,j} entry gives the edge correction weight \code{e(X[i], Y[j])}. Computation is exact if the window is a rectangle. Otherwise, \itemize{ \item if \code{exact=TRUE}, the edge correction weights are computed exactly using \code{\link{overlap.owin}}, which can be quite slow. \item if \code{exact=FALSE} (the default), the weights are computed rapidly by evaluating the set covariance function \code{\link{setcov}} using the Fast Fourier Transform. } If any value of the edge correction weight exceeds \code{trim}, it is set to \code{trim}. The arguments \code{dx} and \code{dy} can be provided as an alternative to \code{X} and \code{Y}. If \code{paired=TRUE} then \code{dx,dy} should be vectors of equal length such that the vector difference of the \eqn{i}th pair is \code{c(dx[i], dy[i])}. If \code{paired=FALSE} then \code{dx,dy} should be matrices of the same dimensions, such that the vector difference between \code{X[i]} and \code{Y[j]} is \code{c(dx[i,j], dy[i,j])}. The argument \code{W} is needed. The value of \code{rmax.Trans} is the shortest distance from the origin \eqn{(0,0)} to the boundary of the support of the set covariance function of \code{W}. It is computed by pixel approximation using \code{\link{setcov}}, unless \code{W} is a rectangle, when \code{rmax.Trans(W)} is the length of the shortest side of the rectangle. } \value{ Numeric vector or matrix. } \references{ Ohser, J. (1983) On estimators for the reduced second moment measure of point processes. \emph{Mathematische Operationsforschung und Statistik, series Statistics}, \bold{14}, 63 -- 71. } \seealso{ \code{\link{rmax.Trans}}, \code{\link{edge.Ripley}}, \code{\link{setcov}}, \code{\link{Kest}} } \examples{ v <- edge.Trans(cells) rmax.Trans(Window(cells)) } \author{\adrian and \rolf. } \keyword{spatial} \keyword{nonparametric} spatstat/man/Kdot.inhom.Rd0000644000176200001440000002770013160710571015200 0ustar liggesusers\name{Kdot.inhom} \alias{Kdot.inhom} \title{ Inhomogeneous Multitype K Dot Function } \description{ For a multitype point pattern, estimate the inhomogeneous version of the dot \eqn{K} function, which counts the expected number of points of any type within a given distance of a point of type \eqn{i}, adjusted for spatially varying intensity. } \usage{ Kdot.inhom(X, i, lambdaI=NULL, lambdadot=NULL, \dots, r=NULL, breaks=NULL, correction = c("border", "isotropic", "Ripley", "translate"), sigma=NULL, varcov=NULL, lambdaIdot=NULL, lambdaX=NULL, update=TRUE, leaveoneout=TRUE) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the inhomogeneous cross type \eqn{K} function \eqn{K_{i\bullet}(r)}{Ki.(r)} will be computed. It must be a multitype point pattern (a marked point pattern whose marks are a factor). See under Details. } \item{i}{The type (mark value) of the points in \code{X} from which distances are measured. A character string (or something that will be converted to a character string). Defaults to the first level of \code{marks(X)}. } \item{lambdaI}{ Optional. Values of the estimated intensity of the sub-process of points of type \code{i}. Either a pixel image (object of class \code{"im"}), a numeric vector containing the intensity values at each of the type \code{i} points in \code{X}, a fitted point process model (object of class \code{"ppm"} or \code{"kppm"} or \code{"dppm"}), or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. } \item{lambdadot}{ Optional. Values of the estimated intensity of the entire point process, Either a pixel image (object of class \code{"im"}), a numeric vector containing the intensity values at each of the points in \code{X}, a fitted point process model (object of class \code{"ppm"} or \code{"kppm"} or \code{"dppm"}), or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. } \item{\dots}{ Ignored. } \item{r}{ Optional. Numeric vector giving the values of the argument \eqn{r} at which the cross K function \eqn{K_{ij}(r)}{Kij(r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{breaks}{ This argument is for internal use only. } \item{correction}{ A character vector containing any selection of the options \code{"border"}, \code{"bord.modif"}, \code{"isotropic"}, \code{"Ripley"}, \code{"translate"}, \code{"translation"}, \code{"none"} or \code{"best"}. It specifies the edge correction(s) to be applied. Alternatively \code{correction="all"} selects all options. } \item{sigma}{ Standard deviation of isotropic Gaussian smoothing kernel, used in computing leave-one-out kernel estimates of \code{lambdaI}, \code{lambdadot} if they are omitted. } \item{varcov}{ Variance-covariance matrix of anisotropic Gaussian kernel, used in computing leave-one-out kernel estimates of \code{lambdaI}, \code{lambdadot} if they are omitted. Incompatible with \code{sigma}. } \item{lambdaIdot}{ Optional. A matrix containing estimates of the product of the intensities \code{lambdaI} and \code{lambdadot} for each pair of points, the first point of type \code{i} and the second of any type. } \item{lambdaX}{ Optional. Values of the intensity for all points of \code{X}. Either a pixel image (object of class \code{"im"}), a numeric vector containing the intensity values at each of the points in \code{X}, a fitted point process model (object of class \code{"ppm"} or \code{"kppm"} or \code{"dppm"}), or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. If present, this argument overrides both \code{lambdaI} and \code{lambdadot}. } \item{update}{ Logical value indicating what to do when \code{lambdaI}, \code{lambdadot} or \code{lambdaX} is a fitted point process model (class \code{"ppm"}, \code{"kppm"} or \code{"dppm"}). If \code{update=TRUE} (the default), the model will first be refitted to the data \code{X} (using \code{\link{update.ppm}} or \code{\link{update.kppm}}) before the fitted intensity is computed. If \code{update=FALSE}, the fitted intensity of the model will be computed without re-fitting it to \code{X}. } \item{leaveoneout}{ Logical value (passed to \code{\link{density.ppp}} or \code{\link{fitted.ppm}}) specifying whether to use a leave-one-out rule when calculating the intensity. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame containing numeric columns \item{r}{the values of the argument \eqn{r} at which the function \eqn{K_{i\bullet}(r)}{Ki.(r)} has been estimated } \item{theo}{the theoretical value of \eqn{K_{i\bullet}(r)}{Ki.(r)} for a marked Poisson process, namely \eqn{\pi r^2}{pi * r^2} } together with a column or columns named \code{"border"}, \code{"bord.modif"}, \code{"iso"} and/or \code{"trans"}, according to the selected edge corrections. These columns contain estimates of the function \eqn{K_{i\bullet}(r)}{Ki.(r)} obtained by the edge corrections named. } \details{ This is a generalisation of the function \code{\link{Kdot}} to include an adjustment for spatially inhomogeneous intensity, in a manner similar to the function \code{\link{Kinhom}}. Briefly, given a multitype point process, consider the points without their types, and suppose this unmarked point process has intensity function \eqn{\lambda(u)}{lambda(u)} at spatial locations \eqn{u}. Suppose we place a mass of \eqn{1/\lambda(\zeta)}{1/lambda(z)} at each point \eqn{\zeta}{z} of the process. Then the expected total mass per unit area is 1. The inhomogeneous ``dot-type'' \eqn{K} function \eqn{K_{i\bullet}^{\mbox{inhom}}(r)}{K[i.]inhom(r)} equals the expected total mass within a radius \eqn{r} of a point of the process of type \eqn{i}, discounting this point itself. If the process of type \eqn{i} points were independent of the points of other types, then \eqn{K_{i\bullet}^{\mbox{inhom}}(r)}{K[i.]inhom(r)} would equal \eqn{\pi r^2}{pi * r^2}. Deviations between the empirical \eqn{K_{i\bullet}}{Ki.} curve and the theoretical curve \eqn{\pi r^2}{pi * r^2} suggest dependence between the points of types \eqn{i} and \eqn{j} for \eqn{j\neq i}{j != i}. The argument \code{X} must be a point pattern (object of class \code{"ppp"}) or any data that are acceptable to \code{\link{as.ppp}}. It must be a marked point pattern, and the mark vector \code{X$marks} must be a factor. The argument \code{i} will be interpreted as a level of the factor \code{X$marks}. (Warning: this means that an integer value \code{i=3} will be interpreted as the number 3, \bold{not} the 3rd smallest level). If \code{i} is missing, it defaults to the first level of the marks factor, \code{i = levels(X$marks)[1]}. The argument \code{lambdaI} supplies the values of the intensity of the sub-process of points of type \code{i}. It may be either \describe{ \item{a pixel image}{(object of class \code{"im"}) which gives the values of the type \code{i} intensity at all locations in the window containing \code{X}; } \item{a numeric vector}{containing the values of the type \code{i} intensity evaluated only at the data points of type \code{i}. The length of this vector must equal the number of type \code{i} points in \code{X}. } \item{a function}{ of the form \code{function(x,y)} which can be evaluated to give values of the intensity at any locations. } \item{a fitted point process model}{ (object of class \code{"ppm"}, \code{"kppm"} or \code{"dppm"}) whose fitted \emph{trend} can be used as the fitted intensity. (If \code{update=TRUE} the model will first be refitted to the data \code{X} before the trend is computed.) } \item{omitted:}{ if \code{lambdaI} is omitted then it will be estimated using a leave-one-out kernel smoother. } } If \code{lambdaI} is omitted, then it will be estimated using a `leave-one-out' kernel smoother, as described in Baddeley, \Moller and Waagepetersen (2000). The estimate of \code{lambdaI} for a given point is computed by removing the point from the point pattern, applying kernel smoothing to the remaining points using \code{\link{density.ppp}}, and evaluating the smoothed intensity at the point in question. The smoothing kernel bandwidth is controlled by the arguments \code{sigma} and \code{varcov}, which are passed to \code{\link{density.ppp}} along with any extra arguments. Similarly the argument \code{lambdadot} should contain estimated values of the intensity of the entire point process. It may be either a pixel image, a numeric vector of length equal to the number of points in \code{X}, a function, or omitted. Alternatively if the argument \code{lambdaX} is given, then it specifies the intensity values for all points of \code{X}, and the arguments \code{lambdaI}, \code{lambdadot} will be ignored. (The two arguments \code{lambdaI}, \code{lambdadot} allow the user to specify two different methods for calculating the intensities of the two kinds of points, while \code{lambdaX} ensures that the same method is used for both kinds of points.) For advanced use only, the optional argument \code{lambdaIdot} is a matrix containing estimated values of the products of these two intensities for each pair of points, the first point of type \code{i} and the second of any type. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{K_{i\bullet}(r)}{Ki.(r)} should be evaluated. The values of \eqn{r} must be increasing nonnegative numbers and the maximum \eqn{r} value must exceed the radius of the largest disc contained in the window. The argument \code{correction} chooses the edge correction as explained e.g. in \code{\link{Kest}}. The pair correlation function can also be applied to the result of \code{Kcross.inhom}; see \code{\link{pcf}}. } \references{ \ifelse{latex}{\out{M\o ller}}{Moller}, J. and Waagepetersen, R. Statistical Inference and Simulation for Spatial Point Processes Chapman and Hall/CRC Boca Raton, 2003. } \section{Warnings}{ The argument \code{i} is interpreted as a level of the factor \code{X$marks}. It is converted to a character string if it is not already a character string. The value \code{i=1} does \bold{not} refer to the first level of the factor. } \seealso{ \code{\link{Kdot}}, \code{\link{Kinhom}}, \code{\link{Kcross.inhom}}, \code{\link{Kmulti.inhom}}, \code{\link{pcf}} } \examples{ # Lansing Woods data woods <- lansing woods <- woods[seq(1,npoints(woods), by=10)] ma <- split(woods)$maple lg <- unmark(woods) # Estimate intensities by nonparametric smoothing lambdaM <- density.ppp(ma, sigma=0.15, at="points") lambdadot <- density.ppp(lg, sigma=0.15, at="points") K <- Kdot.inhom(woods, "maple", lambdaI=lambdaM, lambdadot=lambdadot) # Equivalent K <- Kdot.inhom(woods, "maple", sigma=0.15) # Fit model fit <- ppm(woods ~ marks * polynom(x,y,2)) K <- Kdot.inhom(woods, "maple", lambdaX=fit, update=FALSE) # synthetic example: type A points have intensity 50, # type B points have intensity 50 + 100 * x lamB <- as.im(function(x,y){50 + 100 * x}, owin()) lamdot <- as.im(function(x,y) { 100 + 100 * x}, owin()) X <- superimpose(A=runifpoispp(50), B=rpoispp(lamB)) K <- Kdot.inhom(X, "B", lambdaI=lamB, lambdadot=lamdot) } \author{ \spatstatAuthors } \keyword{spatial} \keyword{nonparametric} spatstat/man/pairsat.family.Rd0000644000176200001440000000435213160710621016103 0ustar liggesusers\name{pairsat.family} \alias{pairsat.family} \title{Saturated Pairwise Interaction Point Process Family} \description{ An object describing the Saturated Pairwise Interaction family of point process models } \details{ \bold{Advanced Use Only!} This structure would not normally be touched by the user. It describes the ``saturated pairwise interaction'' family of point process models. If you need to create a specific interaction model for use in spatial pattern analysis, use the function \code{\link{Saturated}()} or the two existing implementations of models in this family, \code{\link{Geyer}()} and \code{\link{SatPiece}()}. Geyer (1999) introduced the ``saturation process'', a modification of the Strauss process in which the total contribution to the potential from each point (from its pairwise interaction with all other points) is trimmed to a maximum value \eqn{c}. This model is implemented in the function \code{\link{Geyer}()}. The present class \code{pairsat.family} is the extension of this saturation idea to all pairwise interactions. Note that the resulting models are no longer pairwise interaction processes - they have interactions of infinite order. \code{pairsat.family} is an object of class \code{"isf"} containing a function \code{pairwise$eval} for evaluating the sufficient statistics of any saturated pairwise interaction point process model in which the original pair potentials take an exponential family form. } \references{ Geyer, C.J. (1999) Likelihood Inference for Spatial Point Processes. Chapter 3 in O.E. Barndorff-Nielsen, W.S. Kendall and M.N.M. Van Lieshout (eds) \emph{Stochastic Geometry: Likelihood and Computation}, Chapman and Hall / CRC, Monographs on Statistics and Applied Probability, number 80. Pages 79--140. } \seealso{ \code{\link{Geyer}} to create the Geyer saturation process. \code{\link{SatPiece}} to create a saturated process with piecewise constant pair potential. \code{\link{Saturated}} to create a more general saturation model. Other families: \code{\link{inforder.family}}, \code{\link{ord.family}}, \code{\link{pairwise.family}}. } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} spatstat/man/FmultiInhom.Rd0000644000176200001440000000425313160710571015417 0ustar liggesusers\name{FmultiInhom} \alias{FmultiInhom} \title{ Inhomogeneous Marked F-Function } \description{ For a marked point pattern, estimate the inhomogeneous version of the multitype \eqn{F} function, effectively the cumulative distribution function of the distance from a fixed point to the nearest point in subset \eqn{J}, adjusted for spatially varying intensity. } \usage{ FmultiInhom(X, J, lambda = NULL, lambdaJ = NULL, lambdamin = NULL, \dots, r = NULL) } \arguments{ \item{X}{ A spatial point pattern (object of class \code{"ppp"}. } \item{J}{ A subset index specifying the subset of points to which distances are measured. Any kind of subset index acceptable to \code{\link{[.ppp}}. } \item{lambda}{ Intensity estimates for each point of \code{X}. A numeric vector of length equal to \code{npoints(X)}. Incompatible with \code{lambdaJ}. } \item{lambdaJ}{ Intensity estimates for each point of \code{X[J]}. A numeric vector of length equal to \code{npoints(X[J])}. Incompatible with \code{lambda}. } \item{lambdamin}{ A lower bound for the intensity, or at least a lower bound for the values in \code{lambdaJ} or \code{lambda[J]}. } \item{\dots}{ Ignored. } \item{r}{ Vector of distance values at which the inhomogeneous \eqn{G} function should be estimated. There is a sensible default. } } \details{ See Cronie and Van Lieshout (2015). } \value{ Object of class \code{"fv"} containing the estimate of the inhomogeneous multitype \eqn{F} function. } \references{ Cronie, O. and Van Lieshout, M.N.M. (2015) Summary statistics for inhomogeneous marked point processes. \emph{Annals of the Institute of Statistical Mathematics} DOI: 10.1007/s10463-015-0515-z } \author{ Ottmar Cronie and Marie-Colette van Lieshout. Rewritten for \pkg{spatstat} by \adrian. } \seealso{ \code{\link{Finhom}} } \examples{ X <- amacrine J <- (marks(X) == "off") mod <- ppm(X ~ marks * x) lam <- fitted(mod, dataonly=TRUE) lmin <- min(predict(mod)[["off"]]) * 0.9 plot(FmultiInhom(X, J, lambda=lam, lambdamin=lmin)) } \keyword{spatial} \keyword{nonparametric} spatstat/man/dppPowerExp.Rd0000644000176200001440000000257513160710571015446 0ustar liggesusers\name{dppPowerExp} \alias{dppPowerExp} \title{Power Exponential Spectral Determinantal Point Process Model} \description{Function generating an instance of the Power Exponential Spectral determinantal point process model.} \usage{dppPowerExp(\dots)} \arguments{ \item{\dots}{arguments of the form \code{tag=value} specifying the parameters. See Details.} } \details{ The Power Exponential Spectral DPP is defined in (Lavancier, \ifelse{latex}{\out{M\o ller}}{Moller} and Rubak, 2015) The possible parameters are: \itemize{ \item the intensity \code{lambda} as a positive numeric \item the scale parameter \code{alpha} as a positive numeric \item the shape parameter \code{nu} as a positive numeric (artificially required to be less than 20 in the code for numerical stability) \item the dimension \code{d} as a positive integer } } \value{An object of class \code{"detpointprocfamily"}.} \author{ \adrian \rolf and \ege } \references{ Lavancier, F. \ifelse{latex}{\out{M\o ller}}{Moller}, J. and Rubak, E. (2015) Determinantal point process models and statistical inference \emph{Journal of the Royal Statistical Society, Series B} \bold{77}, 853--977. } \examples{ m <- dppPowerExp(lambda=100, alpha=.01, nu=1, d=2) } \seealso{ \code{\link{dppBessel}}, \code{\link{dppCauchy}}, \code{\link{dppGauss}}, \code{\link{dppMatern}} } spatstat/man/Extract.anylist.Rd0000644000176200001440000000226613160710621016256 0ustar liggesusers\name{Extract.anylist} \alias{[.anylist} \alias{[<-.anylist} \title{Extract or Replace Subset of a List of Things} \description{ Extract or replace a subset of a list of things. } \usage{ \method{[}{anylist}(x, i, \dots) \method{[}{anylist}(x, i) <- value } \arguments{ \item{x}{ An object of class \code{"anylist"} representing a list of things. } \item{i}{ Subset index. Any valid subset index in the usual \R sense. } \item{value}{ Replacement value for the subset. } \item{\dots}{Ignored.} } \value{ Another object of class \code{"anylist"}. } \details{ These are the methods for extracting and replacing subsets for the class \code{"anylist"}. The argument \code{x} should be an object of class \code{"anylist"} representing a list of things. See \code{\link{anylist}}. The method replaces a designated subset of \code{x}, and returns an object of class \code{"anylist"}. } \seealso{ \code{\link{anylist}}, \code{\link{plot.anylist}}, \code{\link{summary.anylist}} } \examples{ x <- anylist(A=runif(10), B=runif(10), C=runif(10)) x[1] <- list(A=rnorm(10)) } \author{ \spatstatAuthors } \keyword{spatial} \keyword{list} \keyword{manip} spatstat/man/rescale.ppp.Rd0000644000176200001440000000375313160710621015400 0ustar liggesusers\name{rescale.ppp} \alias{rescale.ppp} \title{Convert Point Pattern to Another Unit of Length} \description{ Converts a point pattern dataset to another unit of length. } \usage{ \method{rescale}{ppp}(X, s, unitname) } \arguments{ \item{X}{Point pattern (object of class \code{"ppp"}).} \item{s}{Conversion factor: the new units are \code{s} times the old units.} \item{unitname}{ Optional. New name for the unit of length. See \code{\link{unitname}}. } } \value{ Another point pattern (of class \code{"ppp"}), representing the same data, but expressed in the new units. } \details{ This is a method for the generic function \code{\link{rescale}}. The spatial coordinates in the point pattern \code{X} (and its window) will be re-expressed in terms of a new unit of length that is \code{s} times the current unit of length given in \code{X}. (Thus, the coordinate values are \emph{divided} by \code{s}, while the unit value is multiplied by \code{s}). The result is a point pattern representing the \emph{same} data but re-expressed in a different unit. Mark values are unchanged. If \code{s} is missing, then the coordinates will be re-expressed in \sQuote{native} units; for example if the current unit is equal to 0.1 metres, then the coordinates will be re-expressed in metres. } \section{Note}{ The result of this operation is equivalent to the original point pattern. If you want to actually change the coordinates by a linear transformation, producing a point pattern that is not equivalent to the original one, use \code{\link{affine}}. } \seealso{ \code{\link{unitname}}, \code{\link{rescale}}, \code{\link{rescale.owin}}, \code{\link{affine}}, \code{\link{rotate}}, \code{\link{shift}} } \examples{ # Bramble Canes data: 1 unit = 9 metres data(bramblecanes) # convert to metres bram <- rescale(bramblecanes, 1/9) # or equivalently bram <- rescale(bramblecanes) } \author{\adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/Jest.Rd0000644000176200001440000002246213160710571014073 0ustar liggesusers\name{Jest} \alias{Jest} \title{Estimate the J-function} \description{ Estimates the summary function \eqn{J(r)} for a point pattern in a window of arbitrary shape. } \usage{ Jest(X, ..., eps=NULL, r=NULL, breaks=NULL, correction=NULL) } \arguments{ \item{X}{The observed point pattern, from which an estimate of \eqn{J(r)} will be computed. An object of class \code{"ppp"}, or data in any format acceptable to \code{\link{as.ppp}()}. } \item{\dots}{Ignored.} \item{eps}{ the resolution of the discrete approximation to Euclidean distance (see below). There is a sensible default. } \item{r}{vector of values for the argument \eqn{r} at which \eqn{J(r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \code{r}. } \item{breaks}{ This argument is for internal use only. } \item{correction}{ Optional. Character string specifying the choice of edge correction(s) in \code{\link{Fest}} and \code{\link{Gest}}. See Details. } } \value{ An object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. Essentially a data frame containing \item{r}{the vector of values of the argument \eqn{r} at which the function \eqn{J} has been estimated} \item{rs}{the ``reduced sample'' or ``border correction'' estimator of \eqn{J(r)} computed from the border-corrected estimates of \eqn{F} and \eqn{G} } \item{km}{the spatial Kaplan-Meier estimator of \eqn{J(r)} computed from the Kaplan-Meier estimates of \eqn{F} and \eqn{G} } \item{han}{the Hanisch-style estimator of \eqn{J(r)} computed from the Hanisch estimate of \eqn{G} and the Chiu-Stoyan estimate of \eqn{F} } \item{un}{the uncorrected estimate of \eqn{J(r)} computed from the uncorrected estimates of \eqn{F} and \eqn{G} } \item{theo}{the theoretical value of \eqn{J(r)} for a stationary Poisson process: identically equal to \eqn{1} } The data frame also has \bold{attributes} \item{F}{ the output of \code{\link{Fest}} for this point pattern, containing three estimates of the empty space function \eqn{F(r)} and an estimate of its hazard function } \item{G}{ the output of \code{\link{Gest}} for this point pattern, containing three estimates of the nearest neighbour distance distribution function \eqn{G(r)} and an estimate of its hazard function } } \note{ Sizeable amounts of memory may be needed during the calculation. } \details{ The \eqn{J} function (Van Lieshout and Baddeley, 1996) of a stationary point process is defined as \deqn{J(r) = \frac{1-G(r)}{1-F(r)} }{ % J(r) = (1-G(r))/(1-F(r))} where \eqn{G(r)} is the nearest neighbour distance distribution function of the point process (see \code{\link{Gest}}) and \eqn{F(r)} is its empty space function (see \code{\link{Fest}}). For a completely random (uniform Poisson) point process, the \eqn{J}-function is identically equal to \eqn{1}. Deviations \eqn{J(r) < 1} or \eqn{J(r) > 1} typically indicate spatial clustering or spatial regularity, respectively. The \eqn{J}-function is one of the few characteristics that can be computed explicitly for a wide range of point processes. See Van Lieshout and Baddeley (1996), Baddeley et al (2000), Thonnes and Van Lieshout (1999) for further information. An estimate of \eqn{J} derived from a spatial point pattern dataset can be used in exploratory data analysis and formal inference about the pattern. The estimate of \eqn{J(r)} is compared against the constant function \eqn{1}. Deviations \eqn{J(r) < 1} or \eqn{J(r) > 1} may suggest spatial clustering or spatial regularity, respectively. This algorithm estimates the \eqn{J}-function from the point pattern \code{X}. It assumes that \code{X} can be treated as a realisation of a stationary (spatially homogeneous) random spatial point process in the plane, observed through a bounded window. The window (which is specified in \code{X} as \code{Window(X)}) may have arbitrary shape. The argument \code{X} is interpreted as a point pattern object (of class \code{"ppp"}, see \code{\link{ppp.object}}) and can be supplied in any of the formats recognised by \code{\link{as.ppp}()}. The functions \code{\link{Fest}} and \code{\link{Gest}} are called to compute estimates of \eqn{F(r)} and \eqn{G(r)} respectively. These estimates are then combined by simply taking the ratio \eqn{J(r) = (1-G(r))/(1-F(r))}. In fact several different estimates are computed using different edge corrections (Baddeley, 1998). The Kaplan-Meier estimate (returned as \code{km}) is the ratio \code{J = (1-G)/(1-F)} of the Kaplan-Meier estimates of \eqn{1-F} and \eqn{1-G} computed by \code{\link{Fest}} and \code{\link{Gest}} respectively. This is computed if \code{correction=NULL} or if \code{correction} includes \code{"km"}. The Hanisch-style estimate (returned as \code{han}) is the ratio \code{J = (1-G)/(1-F)} where \code{F} is the Chiu-Stoyan estimate of \eqn{F} and \code{G} is the Hanisch estimate of \eqn{G}. This is computed if \code{correction=NULL} or if \code{correction} includes \code{"cs"} or \code{"han"}. The reduced-sample or border corrected estimate (returned as \code{rs}) is the same ratio \code{J = (1-G)/(1-F)} of the border corrected estimates. This is computed if \code{correction=NULL} or if \code{correction} includes \code{"rs"} or \code{"border"}. These edge-corrected estimators are slightly biased for \eqn{J}, since they are ratios of approximately unbiased estimators. The logarithm of the Kaplan-Meier estimate is exactly unbiased for \eqn{\log J}{log J}. The uncorrected estimate (returned as \code{un} and computed only if \code{correction} includes \code{"none"}) is the ratio \code{J = (1-G)/(1-F)} of the uncorrected (``raw'') estimates of the survival functions of \eqn{F} and \eqn{G}, which are the empirical distribution functions of the empty space distances \code{Fest(X,\dots)$raw} and of the nearest neighbour distances \code{Gest(X,\dots)$raw}. The uncorrected estimates of \eqn{F} and \eqn{G} are severely biased. However the uncorrected estimate of \eqn{J} is approximately unbiased (if the process is close to Poisson); it is insensitive to edge effects, and should be used when edge effects are severe (see Baddeley et al, 2000). The algorithm for \code{\link{Fest}} uses two discrete approximations which are controlled by the parameter \code{eps} and by the spacing of values of \code{r} respectively. See \code{\link{Fest}} for details. First-time users are strongly advised not to specify these arguments. Note that the value returned by \code{Jest} includes the output of \code{\link{Fest}} and \code{\link{Gest}} as attributes (see the last example below). If the user is intending to compute the \code{F,G} and \code{J} functions for the point pattern, it is only necessary to call \code{Jest}. } \references{ Baddeley, A.J. Spatial sampling and censoring. In O.E. Barndorff-Nielsen, W.S. Kendall and M.N.M. van Lieshout (eds) \emph{Stochastic Geometry: Likelihood and Computation}. Chapman and Hall, 1998. Chapter 2, pages 37--78. Baddeley, A.J. and Gill, R.D. The empty space hazard of a spatial pattern. Research Report 1994/3, Department of Mathematics, University of Western Australia, May 1994. Baddeley, A.J. and Gill, R.D. Kaplan-Meier estimators of interpoint distance distributions for spatial point processes. \emph{Annals of Statistics} \bold{25} (1997) 263--292. Baddeley, A., Kerscher, M., Schladitz, K. and Scott, B.T. Estimating the \emph{J} function without edge correction. \emph{Statistica Neerlandica} \bold{54} (2000) 315--328. Borgefors, G. Distance transformations in digital images. \emph{Computer Vision, Graphics and Image Processing} \bold{34} (1986) 344--371. Cressie, N.A.C. \emph{Statistics for spatial data}. John Wiley and Sons, 1991. Diggle, P.J. \emph{Statistical analysis of spatial point patterns}. Academic Press, 1983. Ripley, B.D. \emph{Statistical inference for spatial processes}. Cambridge University Press, 1988. Stoyan, D, Kendall, W.S. and Mecke, J. \emph{Stochastic geometry and its applications}. 2nd edition. Springer Verlag, 1995. Thonnes, E. and Van Lieshout, M.N.M, A comparative study on the power of Van Lieshout and Baddeley's J-function. \emph{Biometrical Journal} \bold{41} (1999) 721--734. Van Lieshout, M.N.M. and Baddeley, A.J. A nonparametric measure of spatial interaction in point patterns. \emph{Statistica Neerlandica} \bold{50} (1996) 344--361. } \seealso{ \code{\link{Jinhom}}, \code{\link{Fest}}, \code{\link{Gest}}, \code{\link{Kest}}, \code{\link{km.rs}}, \code{\link{reduced.sample}}, \code{\link{kaplan.meier}} } \examples{ data(cells) J <- Jest(cells, 0.01) plot(J, main="cells data") # values are far above J = 1, indicating regular pattern data(redwood) J <- Jest(redwood, 0.01, legendpos="center") plot(J, main="redwood data") # values are below J = 1, indicating clustered pattern } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat/man/spatstat-deprecated.Rd0000644000176200001440000000621213160710571017122 0ustar liggesusers\name{spatstat-deprecated} \alias{as.psp.owin} \alias{bounding.box} \alias{clf.test} \alias{conspire} \alias{eval.hyper} \alias{ksmooth.ppp} \alias{smooth.ppp} \alias{smooth.fv} \alias{smooth.msr} \alias{ks.test.ppm} \alias{mpl} \alias{rtoro} \alias{superimposePSP} \alias{which.max.im} \alias{delaunay.distance} \alias{delaunay.network} \alias{dirichlet.edges} \alias{dirichlet.network} \alias{dirichlet.vertices} \alias{dirichlet.weights} \alias{circumradius} \alias{circumradius.owin} \alias{circumradius.ppp} \alias{circumradius.linnet} \title{Deprecated spatstat functions} \description{ Deprecated spatstat functions. } \usage{ \method{as.psp}{owin}(x, \dots, window=NULL, check=spatstat.options("checksegments"), fatal=TRUE) bounding.box(\dots) clf.test(\dots) conspire(\dots) ksmooth.ppp(x, sigma, \dots, edge=TRUE) smooth.ppp(X, ..., weights = rep(1, npoints(X)), at="pixels") smooth.fv(x, which = "*", ..., method=c("smooth.spline", "loess"), xinterval=NULL) smooth.msr(X, ...) ks.test.ppm(\dots) mpl(Q, trend, interaction, data, correction, rbord, use.gam) rtoro(X, which=NULL, radius=NULL, width=NULL, height=NULL) eval.hyper(e, h, simplify=TRUE, ee=NULL) superimposePSP(\dots, W=NULL, check=TRUE) which.max.im(x) delaunay.distance(\dots) delaunay.network(\dots) dirichlet.edges(\dots) dirichlet.network(\dots) dirichlet.vertices(\dots) dirichlet.weights(\dots) circumradius(x, \dots) \method{circumradius}{owin}(x, \dots) \method{circumradius}{ppp}(x, \dots) \method{circumradius}{linnet}(x, \dots) } \details{ These functions are deprecated, and will eventually be deleted from the \pkg{spatstat} package. \code{as.psp.owin} has been replaced by \code{\link{edges}}. \code{bounding.box} has been replaced by \code{\link{boundingbox}}. \code{clf.test} has been renamed \code{\link{dclf.test}}. \code{conspire} has been replaced by \code{\link{plot.fv}}. \code{ksmooth.ppp} has been replaced by \code{\link{density.ppp}}. \code{smooth.ppp} has been replaced by \code{\link{Smooth.ppp}}. \code{smooth.fv} has been replaced by \code{\link{Smooth.fv}}. \code{smooth.msr} has been replaced by \code{\link{Smooth.msr}}. \code{mpl} has been replaced by \code{\link{ppm}}. \code{ks.test.ppm} has been replaced by \code{\link{cdf.test}}. \code{rtoro} has been replaced by \code{\link{rshift}}. \code{eval.hyper} has been replaced by \code{\link{with.hyperframe}}. \code{superimposePSP} has been replaced by \code{\link{superimpose.psp}} which is a method for the generic function \code{\link{superimpose}}. \code{which.max.im(x)} is replaced by \code{\link{im.apply}(x, which.max)}. \code{delaunay.distance} is replaced by \code{delaunayDistance}. \code{delaunay.network} is replaced by \code{delaunayNetwork}. \code{dirichlet.edges} is replaced by \code{dirichletEdges}. \code{dirichlet.network} is replaced by \code{dirichletNetwork}. \code{dirichlet.vertices} is replaced by \code{dirichletVertices}. \code{dirichlet.weights} is replaced by \code{dirichletWeights}. \code{circumradius} is replaced by the more appropriately named \code{boundingradius}. } \keyword{internal} spatstat/man/markcorr.Rd0000644000176200001440000002606313160710621015003 0ustar liggesusers\name{markcorr} \alias{markcorr} \title{ Mark Correlation Function } \description{ Estimate the marked correlation function of a marked point pattern. } \usage{ markcorr(X, f = function(m1, m2) { m1 * m2}, r=NULL, correction=c("isotropic", "Ripley", "translate"), method="density", \dots, weights=NULL, f1=NULL, normalise=TRUE, fargs=NULL) } \arguments{ \item{X}{The observed point pattern. An object of class \code{"ppp"} or something acceptable to \code{\link{as.ppp}}. } \item{f}{Optional. Test function \eqn{f} used in the definition of the mark correlation function. An \R function with at least two arguments. There is a sensible default. } \item{r}{Optional. Numeric vector. The values of the argument \eqn{r} at which the mark correlation function \eqn{k_f(r)}{k[f](r)} should be evaluated. There is a sensible default. } \item{correction}{ A character vector containing any selection of the options \code{"isotropic"}, \code{"Ripley"}, \code{"translate"}, \code{"translation"}, \code{"none"} or \code{"best"}. It specifies the edge correction(s) to be applied. Alternatively \code{correction="all"} selects all options. } \item{method}{ A character vector indicating the user's choice of density estimation technique to be used. Options are \code{"density"}, \code{"loess"}, \code{"sm"} and \code{"smrep"}. } \item{\dots}{ Arguments passed to the density estimation routine (\code{\link{density}}, \code{\link{loess}} or \code{sm.density}) selected by \code{method}. } \item{weights}{ Optional numeric vector of weights for each data point in \code{X}. } \item{f1}{ An alternative to \code{f}. If this argument is given, then \eqn{f} is assumed to take the form \eqn{f(u,v)=f_1(u)f_1(v)}{f(u,v)=f1(u) * f1(v)}. } \item{normalise}{ If \code{normalise=FALSE}, compute only the numerator of the expression for the mark correlation. } \item{fargs}{ Optional. A list of extra arguments to be passed to the function \code{f} or \code{f1}. } } \value{ A function value table (object of class \code{"fv"}) or a list of function value tables, one for each column of marks. An object of class \code{"fv"} (see \code{\link{fv.object}}) is essentially a data frame containing numeric columns \item{r}{the values of the argument \eqn{r} at which the mark correlation function \eqn{k_f(r)}{k[f](r)} has been estimated } \item{theo}{the theoretical value of \eqn{k_f(r)}{k[f](r)} when the marks attached to different points are independent, namely 1 } together with a column or columns named \code{"iso"} and/or \code{"trans"}, according to the selected edge corrections. These columns contain estimates of the mark correlation function \eqn{k_f(r)}{k[f](r)} obtained by the edge corrections named. } \details{ By default, this command calculates an estimate of Stoyan's mark correlation \eqn{k_{mm}(r)}{k[mm](r)} for the point pattern. Alternatively if the argument \code{f} or \code{f1} is given, then it calculates Stoyan's generalised mark correlation \eqn{k_f(r)}{k[f](r)} with test function \eqn{f}. Theoretical definitions are as follows (see Stoyan and Stoyan (1994, p. 262)): \itemize{ \item For a point process \eqn{X} with numeric marks, Stoyan's mark correlation function \eqn{k_{mm}(r)}{k[mm](r)}, is \deqn{ k_{mm}(r) = \frac{E_{0u}[M(0) M(u)]}{E[M,M']} }{ k[mm](r) = E[0u](M(0) * M(u))/E(M * M') } where \eqn{E_{0u}}{E[0u]} denotes the conditional expectation given that there are points of the process at the locations \eqn{0} and \eqn{u} separated by a distance \eqn{r}, and where \eqn{M(0),M(u)} denote the marks attached to these two points. On the denominator, \eqn{M,M'} are random marks drawn independently from the marginal distribution of marks, and \eqn{E} is the usual expectation. \item For a multitype point process \eqn{X}, the mark correlation is \deqn{ k_{mm}(r) = \frac{P_{0u}[M(0) M(u)]}{P[M = M']} }{ k[mm](r) = P[0u](M(0) = M(u))/P(M = M') } where \eqn{P} and \eqn{P_{0u}}{P[0u]} denote the probability and conditional probability. \item The \emph{generalised} mark correlation function \eqn{k_f(r)}{k[f](r)} of a marked point process \eqn{X}, with test function \eqn{f}, is \deqn{ k_f(r) = \frac{E_{0u}[f(M(0),M(u))]}{E[f(M,M')]} }{ k[f](r) = E[0u](f(M(0),M(u))]/E(f(M,M')) } } The test function \eqn{f} is any function \eqn{f(m_1,m_2)}{f(m1,m2)} with two arguments which are possible marks of the pattern, and which returns a nonnegative real value. Common choices of \eqn{f} are: for continuous nonnegative real-valued marks, \deqn{f(m_1,m_2) = m_1 m_2}{f(m1,m2)= m1 * m2} for discrete marks (multitype point patterns), \deqn{f(m_1,m_2) = 1(m_1 = m_2)}{f(m1,m2)= (m1 == m2)} and for marks taking values in \eqn{[0,2\pi)}{[0,2 * pi)}, \deqn{f(m_1,m_2) = \sin(m_1 - m_2)}{f(m1,m2) = sin(m1-m2)}. Note that \eqn{k_f(r)}{k[f](r)} is not a ``correlation'' in the usual statistical sense. It can take any nonnegative real value. The value 1 suggests ``lack of correlation'': if the marks attached to the points of \code{X} are independent and identically distributed, then \eqn{k_f(r) \equiv 1}{k[f](r) = 1}. The interpretation of values larger or smaller than 1 depends on the choice of function \eqn{f}. The argument \code{X} must be a point pattern (object of class \code{"ppp"}) or any data that are acceptable to \code{\link{as.ppp}}. It must be a marked point pattern. The argument \code{f} determines the function to be applied to pairs of marks. It has a sensible default, which depends on the kind of marks in \code{X}. If the marks are numeric values, then \code{f <- function(m1, m2) { m1 * m2}} computes the product of two marks. If the marks are a factor (i.e. if \code{X} is a multitype point pattern) then \code{f <- function(m1, m2) { m1 == m2}} yields the value 1 when the two marks are equal, and 0 when they are unequal. These are the conventional definitions for numerical marks and multitype points respectively. The argument \code{f} may be specified by the user. It must be an \R function, accepting two arguments \code{m1} and \code{m2} which are vectors of equal length containing mark values (of the same type as the marks of \code{X}). (It may also take additional arguments, passed through \code{fargs}). It must return a vector of numeric values of the same length as \code{m1} and \code{m2}. The values must be non-negative, and \code{NA} values are not permitted. Alternatively the user may specify the argument \code{f1} instead of \code{f}. This indicates that the test function \eqn{f} should take the form \eqn{f(u,v)=f_1(u)f_1(v)}{f(u,v)=f1(u) * f1(v)} where \eqn{f_1(u)}{f1(u)} is given by the argument \code{f1}. The argument \code{f1} should be an \R function with at least one argument. (It may also take additional arguments, passed through \code{fargs}). The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{k_f(r)}{k[f](r)} is estimated. This algorithm assumes that \code{X} can be treated as a realisation of a stationary (spatially homogeneous) random spatial point process in the plane, observed through a bounded window. The window (which is specified in \code{X} as \code{Window(X)}) may have arbitrary shape. Biases due to edge effects are treated in the same manner as in \code{\link{Kest}}. The edge corrections implemented here are \describe{ \item{isotropic/Ripley}{Ripley's isotropic correction (see Ripley, 1988; Ohser, 1983). This is implemented only for rectangular and polygonal windows (not for binary masks). } \item{translate}{Translation correction (Ohser, 1983). Implemented for all window geometries, but slow for complex windows. } } Note that the estimator assumes the process is stationary (spatially homogeneous). The numerator and denominator of the mark correlation function (in the expression above) are estimated using density estimation techniques. The user can choose between \describe{ \item{\code{"density"}}{ which uses the standard kernel density estimation routine \code{\link{density}}, and works only for evenly-spaced \code{r} values; } \item{\code{"loess"}}{ which uses the function \code{loess} in the package \pkg{modreg}; } \item{\code{"sm"}}{ which uses the function \code{sm.density} in the package \pkg{sm} and is extremely slow; } \item{\code{"smrep"}}{ which uses the function \code{sm.density} in the package \pkg{sm} and is relatively fast, but may require manual control of the smoothing parameter \code{hmult}. } } If \code{normalise=FALSE} then the algorithm will compute only the numerator \deqn{ c_f(r) = E_{0u} f(M(0),M(u)) }{ c[f](r) = E[0u] f(M(0),M(u)) } of the expression for the mark correlation function. } \references{ Stoyan, D. and Stoyan, H. (1994) Fractals, random shapes and point fields: methods of geometrical statistics. John Wiley and Sons. } \seealso{ Mark variogram \code{\link{markvario}} for numeric marks. Mark connection function \code{\link{markconnect}} and multitype K-functions \code{\link{Kcross}}, \code{\link{Kdot}} for factor-valued marks. Mark cross-correlation function \code{\link{markcrosscorr}} for point patterns with several columns of marks. \code{\link{Kmark}} to estimate a cumulative function related to the mark correlation function. } \examples{ # CONTINUOUS-VALUED MARKS: # (1) Spruces # marks represent tree diameter # mark correlation function ms <- markcorr(spruces) plot(ms) # (2) simulated data with independent marks X <- rpoispp(100) X <- X \%mark\% runif(npoints(X)) \dontrun{ Xc <- markcorr(X) plot(Xc) } # MULTITYPE DATA: # Hughes' amacrine data # Cells marked as 'on'/'off' # (3) Kernel density estimate with Epanecnikov kernel # (as proposed by Stoyan & Stoyan) M <- markcorr(amacrine, function(m1,m2) {m1==m2}, correction="translate", method="density", kernel="epanechnikov") plot(M) # Note: kernel="epanechnikov" comes from help(density) # (4) Same again with explicit control over bandwidth \dontrun{ M <- markcorr(amacrine, correction="translate", method="density", kernel="epanechnikov", bw=0.02) # see help(density) for correct interpretation of 'bw' } \testonly{ niets <- markcorr(amacrine, function(m1,m2){m1 == m2}, method="loess") if(require(sm)) niets <- markcorr(X, correction="isotropic", method="smrep", hmult=2) } # weighted mark correlation Y <- subset(betacells, select=type) a <- marks(betacells)$area v <- markcorr(Y, weights=a) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{nonparametric} spatstat/man/split.msr.Rd0000644000176200001440000000476513160710621015123 0ustar liggesusers\name{split.msr} \alias{split.msr} \title{ Divide a Measure into Parts } \description{ Decomposes a measure into components, each component being a measure. } \usage{ \method{split}{msr}(x, f, drop = FALSE, \dots) } \arguments{ \item{x}{ Measure (object of class \code{"msr"}) to be decomposed. } \item{f}{ Factor or tessellation determining the decomposition. Argument passed to \code{\link{split.ppp}}. See Details. } \item{drop}{ Logical value indicating whether empty components should be retained in the list (\code{drop=FALSE}, the default) or deleted (\code{drop=TRUE}). } \item{\dots}{ Ignored. } } \details{ An object of class \code{"msr"} represents a signed (i.e. real-valued) or vector-valued measure in the \pkg{spatstat} package. See \code{\link{msr}} for explanation. This function is a method for the generic \code{\link[base]{split}}. It divides the measure \code{x} into components, each of which is a measure. A measure \code{x} is represented in \pkg{spatstat} by a finite set of sample points with values attached to them. The function \code{split.msr} divides this pattern of sample points into several sub-patterns of points using \code{\link{split.ppp}}. For each sub-pattern, the values attached to these points are extracted from \code{x}, and these values and sample points determine a measure, which is a component or piece of the original \code{x}. The argument \code{f} can be missing, if the sample points of \code{x} are multitype points. In this case, \code{x} represents a measure associated with marked spatial locations, and the command \code{split(x)} separates \code{x} into a list of component measures, one for each possible mark. Otherwise the argument \code{f} is passed to \code{\link{split.ppp}}. It should be either a factor (of length equal to the number of sample points of \code{x}) or a tessellation (object of class \code{"tess"} representing a division of space into tiles) as documented under \code{\link{split.ppp}}. } \value{ A list, each of whose entries is a measure (object of class \code{"msr"}). } \author{ \spatstatAuthors } \seealso{ \code{\link{msr}}, \code{\link{[.msr}}, \code{\link{with.msr}} } \examples{ ## split by tessellation a <- residuals(ppm(cells ~ x)) aa <- split(a, dirichlet(runifpoint(4))) aa sapply(aa, integral) ## split by type of point b <- residuals(ppm(amacrine ~ marks + x)) bb <- split(b) bb } \keyword{spatial} \keyword{manip} spatstat/man/detpointprocfamilyfun.Rd0000644000176200001440000001473513160710571017617 0ustar liggesusers\name{detpointprocfamilyfun} \alias{detpointprocfamilyfun} \title{Construct a New Determinantal Point Process Model Family Function} \description{ Function to ease the implementation of a new determinantal point process model family. } \usage{detpointprocfamilyfun(kernel = NULL, specden = NULL, basis = "fourierbasis", convkernel = NULL, Kfun = NULL, valid = NULL, intensity = NULL, dim = 2, name = "User-defined", isotropic = TRUE, range = NULL, parbounds = NULL, specdenrange = NULL, startpar = NULL, \dots) } \arguments{ \item{kernel}{ function specifying the kernel. May be set to \code{NULL}. See Details. } \item{specden}{ function specifying the spectral density. May be set to \code{NULL}. See Details. } \item{basis}{ character string giving the name of the basis. Defaults to the Fourier basis. See Details. } \item{convkernel}{ function specifying the k-fold auto-convolution of the kernel. May be set to \code{NULL}. See Details. } \item{Kfun}{ function specifying the K-function. May be set to \code{NULL}. See Details. } \item{valid}{ function determining whether a given set of parameter values yields a valid model. May be set to \code{NULL}. See Examples. } \item{intensity}{ character string specifying which parameter is the intensity in the model family. Should be \code{NULL} if the model family has no intensity parameter. } \item{dim}{ character strig specifying which parameter is the dimension of the state space in this model family (if any). Alternatively a positive integer specifying the dimension. } \item{name}{ character string giving the name of the model family used for printing. } \item{isotropic}{ logical value indicating whether or not the model is isotropic. } \item{range}{ function determining the interaction range of the model. May be set to \code{NULL}. See Examples. } \item{parbounds}{ function determining the bounds for each model parameter when all other parameters are fixed. May be set to \code{NULL}. See Examples. } \item{specdenrange}{ function specifying the the range of the spectral density if it is finite (only the case for very few models). May be set to \code{NULL}. } \item{startpar}{ function determining starting values for parameters in any estimation algorithm. May be set to \code{NULL}. See Examples. } \item{\dots}{ Additional arguments for inclusion in the returned model object. These are not checked in any way. } } \details{ A determinantal point process family is specified either in terms of a kernel (a positive semi-definite function, i.e. a covariance function) or a spectral density, or preferably both. One of these can be \code{NULL} if it is unknown, but not both. When both are supplied they must have the same arguments. The first argument gives the values at which the function should be evaluated. In general the function should accept an \eqn{n} by \eqn{d} matrix or \code{data.frame} specifying \eqn{n (>=0)} points in dimension \eqn{d}. If the model is isotropic it only needs to accept a non-negative valued numeric of length \eqn{n}. (In fact there is currently almost no support for non-isotropic models, so it is recommended not to specify such a model.) The name of this argument could be chosen freely, but \eqn{x} is recommended. The remaining arguments are the parameters of the model. If one of these is an intensity parameter the name should be mentioned in the argument \code{intensity}. If one of these specifies the dimension of the model it should be mentioned in the argument \code{dim}. The kernel and spectral density is with respect to a specific set of basis functions, which is typically the Fourier basis. However this can be changed to any user-supplied basis in the argument \code{basis}. If such an alternative is supplied it must be the name of a function expecting the same arguments as \code{\link{fourierbasis}} and returning the results in the same form as \code{\link{fourierbasis}}. If supplied, the arguments of convkernel must obey the following: first argument should be like the first argument of kernel and/or specden (see above). The second argument (preferably called \code{k}) should be the positive integer specifying how many times the auto-convolution is done (i.e. the \eqn{k} in \eqn{k}-fold auto-convolution). The remaining arguments must agree with the arguments of \code{kernel} and/or \code{specden} (see above). If supplied, the arguments of \code{Kfun} should be like the arguments of \code{kernel} and \code{specden} (see above). } \author{ \adrian \rolf and \ege } \examples{ ## Example of how to define the Gauss family exGauss <- detpointprocfamilyfun( name="Gaussian", kernel=function(x, lambda, alpha, d){ lambda*exp(-(x/alpha)^2) }, specden=function(x, lambda, alpha, d){ lambda * (sqrt(pi)*alpha)^d * exp(-(x*alpha*pi)^2) }, convkernel=function(x, k, lambda, alpha, d){ logres <- k*log(lambda*pi*alpha^2) - log(pi*k*alpha^2) - x^2/(k*alpha^2) return(exp(logres)) }, Kfun = function(x, lambda, alpha, d){ pi*x^2 - pi*alpha^2/2*(1-exp(-2*x^2/alpha^2)) }, valid=function(lambda, alpha, d){ lambda>0 && alpha>0 && d>=1 && lambda <= (sqrt(pi)*alpha)^(-d) }, isotropic=TRUE, intensity="lambda", dim="d", range=function(alpha, bound = .99){ if(missing(alpha)) stop("The parameter alpha is missing.") if(!(is.numeric(bound)&&bound>0&&bound<1)) stop("Argument bound must be a numeric between 0 and 1.") return(alpha*sqrt(-log(sqrt(1-bound)))) }, parbounds=function(name, lambda, alpha, d){ switch(name, lambda = c(0, (sqrt(pi)*alpha)^(-d)), alpha = c(0, lambda^(-1/d)/sqrt(pi)), stop("Parameter name misspecified") ) }, startpar=function(model, X){ rslt <- NULL if("lambda" \%in\% model$freepar){ lambda <- intensity(X) rslt <- c(rslt, "lambda" = lambda) model <- update(model, lambda=lambda) } if("alpha" \%in\% model$freepar){ alpha <- .8*dppparbounds(model, "alpha")[2] rslt <- c(rslt, "alpha" = alpha) } return(rslt) } ) exGauss m <- exGauss(lambda=100, alpha=.05, d=2) m } \keyword{spatial} \keyword{models} spatstat/man/plot.msr.Rd0000644000176200001440000000702713160710621014740 0ustar liggesusers\name{plot.msr} \alias{plot.msr} \title{Plot a Signed or Vector-Valued Measure} \description{ Plot a signed measure or vector-valued measure. } \usage{ \method{plot}{msr}(x, \dots, add = FALSE, how = c("image", "contour", "imagecontour"), main = NULL, do.plot = TRUE, multiplot = TRUE, massthresh = 0, equal.markscale = FALSE, equal.ribbon = FALSE) } \arguments{ \item{x}{ The signed or vector measure to be plotted. An object of class \code{"msr"} (see \code{\link{msr}}). } \item{\dots}{ Extra arguments passed to \code{\link{Smooth.ppp}} to control the interpolation of the continuous density component of \code{x}, or passed to \code{\link{plot.im}} or \code{\link{plot.ppp}} to control the appearance of the plot. } \item{add}{ Logical flag; if \code{TRUE}, the graphics are added to the existing plot. If \code{FALSE} (the default) a new plot is initialised. } \item{how}{ String indicating how to display the continuous density component. } \item{main}{ String. Main title for the plot. } \item{do.plot}{ Logical value determining whether to actually perform the plotting. } \item{multiplot}{ Logical value indicating whether it is permissible to display a plot with multiple panels (representing different components of a vector-valued measure, or different types of points in a multitype measure.) } \item{massthresh}{ Threshold for plotting atoms. A single numeric value or \code{NULL}. If \code{massthresh=0} (the default) then only atoms with nonzero mass will be plotted. If \code{massthresh > 0} then only atoms whose absolute mass exceeds \code{massthresh} will be plotted. If \code{massthresh=NULL}, then all atoms of the measure will be plotted. } \item{equal.markscale}{ Logical value indicating whether different panels should use the same symbol map (to represent the masses of atoms of the measure). } \item{equal.ribbon}{ Logical value indicating whether different panels should use the same colour map (to represent the density values in the diffuse component of the measure). } } \value{ (Invisible) colour map (object of class \code{"colourmap"}) for the colour image. } \details{ This is the \code{plot} method for the class \code{"msr"}. The continuous density component of \code{x} is interpolated from the existing data by \code{\link{Smooth.ppp}}, and then displayed as a colour image by \code{\link{plot.im}}. The discrete atomic component of \code{x} is then superimposed on this image by plotting the atoms as circles (for positive mass) or squares (for negative mass) by \code{\link{plot.ppp}}. By default, atoms with zero mass are not plotted at all. To smooth both the discrete and continuous components, use \code{\link{Smooth.msr}}. Use the argument \code{clipwin} to restrict the plot to a subset of the full data. To remove atoms with tiny masses, use the argument \code{massthresh}. } \seealso{ \code{\link{msr}}, \code{\link{Smooth.ppp}}, \code{\link{Smooth.msr}}, \code{\link{plot.im}}, \code{\link{plot.ppp}} } \examples{ X <- rpoispp(function(x,y) { exp(3+3*x) }) fit <- ppm(X, ~x+y) rp <- residuals(fit, type="pearson") rs <- residuals(fit, type="score") plot(rp) plot(rs) plot(rs, how="contour") } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{hplot} spatstat/man/as.layered.Rd0000644000176200001440000000375713160710571015223 0ustar liggesusers\name{as.layered} \alias{as.layered} \alias{as.layered.default} \alias{as.layered.ppp} \alias{as.layered.splitppp} \alias{as.layered.solist} \alias{as.layered.listof} \alias{as.layered.msr} \title{Convert Data To Layered Object} \description{ Converts spatial data into a layered object. } \usage{ as.layered(X) \method{as.layered}{default}(X) \method{as.layered}{ppp}(X) \method{as.layered}{splitppp}(X) \method{as.layered}{solist}(X) \method{as.layered}{listof}(X) \method{as.layered}{msr}(X) } \arguments{ \item{X}{ Some kind of spatial data. } } \value{ An object of class \code{"layered"} (see \code{\link{layered}}). } \details{ This function converts the object \code{X} into an object of class \code{"layered"}. The argument \code{X} should contain some kind of spatial data such as a point pattern, window, or pixel image. If \code{X} is a simple object then it will be converted into a \code{layered} object containing only one layer which is equivalent to \code{X}. If \code{X} can be interpreted as consisting of multiple layers of data, then the result will be a \code{layered} object consisting of these separate layers of data. \itemize{ \item if \code{X} is a list of class \code{"listof"} or \code{"solist"}, then \code{as.layered(X)} consists of several layers, one for each entry in the list \code{X}; \item if \code{X} is a multitype point pattern, then \code{as.layered(X)} consists of several layers, each containing the sub-pattern consisting of points of one type; \item if \code{X} is a vector-valued measure, then \code{as.layered(X)} consists of several layers, each containing a scalar-valued measure. } } \seealso{ \code{\link{layered}}, \code{\link{split.ppp}} } \examples{ as.layered(cells) as.layered(amacrine) P <- rpoispp(100) fit <- ppm(P ~ x+y) rs <- residuals(fit, type="score") as.layered(rs) } \author{\adrian \rolf and \ege } \keyword{spatial} \keyword{manip} spatstat/man/summary.anylist.Rd0000644000176200001440000000156013160710621016335 0ustar liggesusers\name{summary.anylist} \alias{summary.anylist} \title{Summary of a List of Things} \description{ Prints a useful summary of each item in a list of things. } \usage{ \method{summary}{anylist}(object, \dots) } \arguments{ \item{object}{ An object of class \code{"anylist"}. } \item{\dots}{ Ignored. } } \details{ This is a method for the generic function \code{\link{summary}}. An object of the class \code{"anylist"} is effectively a list of things which are intended to be treated in a similar way. See \code{\link{anylist}}. This function extracts a useful summary of each of the items in the list. } \seealso{ \code{\link{anylist}}, \code{\link{summary}}, \code{\link{plot.anylist}} } \examples{ x <- anylist(A=runif(10), B=runif(10), C=runif(10)) summary(x) } \author{\adrian and \rolf } \keyword{spatial} \keyword{methods} spatstat/man/bind.fv.Rd0000644000176200001440000000675213160710571014520 0ustar liggesusers\name{bind.fv} \alias{bind.fv} \alias{cbind.fv} \title{ Combine Function Value Tables } \description{ Advanced Use Only. Combine objects of class \code{"fv"}, or glue extra columns of data onto an existing \code{"fv"} object. } \usage{ \method{cbind}{fv}(...) bind.fv(x, y, labl = NULL, desc = NULL, preferred = NULL, clip=FALSE) } \arguments{ \item{\dots}{ Any number of arguments, which are objects of class \code{"fv"}. } \item{x}{ An object of class \code{"fv"}. } \item{y}{ Either a data frame or an object of class \code{"fv"}. } \item{labl}{ Plot labels (see \code{\link{fv}}) for columns of \code{y}. A character vector. } \item{desc}{ Descriptions (see \code{\link{fv}}) for columns of \code{y}. A character vector. } \item{preferred}{ Character string specifying the column which is to be the new recommended value of the function. } \item{clip}{ Logical value indicating whether each object must have exactly the same domain, that is, the same sequence of values of the function argument (\code{clip=FALSE}, the default) or whether objects with different domains are permissible and will be restricted to a common domain (\code{clip=TRUE}). } } \details{ This documentation is provided for experienced programmers who want to modify the internal behaviour of \pkg{spatstat}. The function \code{cbind.fv} is a method for the generic \R function \code{\link{cbind}}. It combines any number of objects of class \code{"fv"} into a single object of class \code{"fv"}. The objects must be compatible, in the sense that they have identical values of the function argument. The function \code{bind.fv} is a lower level utility which glues additional columns onto an existing object \code{x} of class \code{"fv"}. It has two modes of use: \itemize{ \item If the additional dataset \code{y} is an object of class \code{"fv"}, then \code{x} and \code{y} must be compatible as described above. Then the columns of \code{y} that contain function values will be appended to the object \code{x}. \item Alternatively if \code{y} is a data frame, then \code{y} must have the same number of rows as \code{x}. All columns of \code{y} will be appended to \code{x}. } The arguments \code{labl} and \code{desc} provide plot labels and description strings (as described in \code{\link{fv}}) for the \emph{new} columns. If \code{y} is an object of class \code{"fv"} then \code{labl} and \code{desc} are optional, and default to the relevant entries in the object \code{y}. If \code{y} is a data frame then \code{labl} and \code{desc} must be provided. } \value{ An object of class \code{"fv"}. } \author{ \spatstatAuthors. } \examples{ data(cells) K1 <- Kest(cells, correction="border") K2 <- Kest(cells, correction="iso") # remove column 'theo' to avoid duplication K2 <- K2[, names(K2) != "theo"] cbind(K1, K2) bind.fv(K1, K2, preferred="iso") # constrain border estimate to be monotonically increasing bm <- cumsum(c(0, pmax(0, diff(K1$border)))) bind.fv(K1, data.frame(bmono=bm), "\%s[bmo](r)", "monotone border-corrected estimate of \%s", "bmono") } \seealso{ \code{\link{fv}}, \code{\link{with.fv}}. \emph{Undocumented} functions for modifying an \code{"fv"} object include \code{fvnames}, \code{fvnames<-}, \code{tweak.fv.entry} and \code{rebadge.fv}. } \keyword{spatial} \keyword{attribute} spatstat/man/perimeter.Rd0000644000176200001440000000221613160710621015151 0ustar liggesusers\name{perimeter} \Rdversion{1.1} \alias{perimeter} \title{ Perimeter Length of Window } \description{ Computes the perimeter length of a window } \usage{ perimeter(w) } \arguments{ \item{w}{ A window (object of class \code{"owin"}) or data that can be converted to a window by \code{\link{as.owin}}. } } \details{ This function computes the perimeter (length of the boundary) of the window \code{w}. If \code{w} is a rectangle or a polygonal window, the perimeter is the sum of the lengths of the edges of \code{w}. If \code{w} is a mask, it is first converted to a polygonal window using \code{\link{as.polygonal}}, then staircase edges are removed using \code{\link{simplify.owin}}, and the perimeter of the resulting polygon is computed. } \value{ A numeric value giving the perimeter length of the window. } \seealso{ \code{\link{area.owin}} \code{\link{diameter.owin}}, \code{\link{owin.object}}, \code{\link{as.owin}} } \examples{ perimeter(square(3)) data(letterR) perimeter(letterR) if(interactive()) print(perimeter(as.mask(letterR))) } \author{\adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/fitin.Rd0000644000176200001440000000472313165362536014310 0ustar liggesusers\name{fitin.ppm} \alias{fitin} \alias{fitin.ppm} \alias{fitin.profilepl} \title{Extract the Interaction from a Fitted Point Process Model} \description{ Given a point process model that has been fitted to point pattern data, this function extracts the interpoint interaction part of the model as a separate object. } \usage{ fitin(object) \method{fitin}{ppm}(object) \method{fitin}{profilepl}(object) } \arguments{ \item{object}{A fitted point process model (object of class \code{"ppm"} or \code{"profilepl"}). } } \details{ An object of class \code{"ppm"} describes a fitted point process model. It contains information about the original data to which the model was fitted, the spatial trend that was fitted, the interpoint interaction that was fitted, and other data. See \code{\link{ppm.object}}) for details of this class. The function \code{fitin} extracts from this model the information about the fitted interpoint interaction only. The information is organised as an object of class \code{"fii"} (fitted interpoint interaction). This object can be printed or plotted. Users may find this a convenient way to plot the fitted interpoint interaction term, as shown in the Examples. For a pairwise interaction, the plot of the fitted interaction shows the pair interaction function (the contribution to the probability density from a pair of points as a function of the distance between them). For a higher-order interaction, the plot shows the strongest interaction (the value most different from 1) that could ever arise at the given distance. The fitted interaction coefficients can also be extracted from this object using \code{\link{coef}}. } \value{ An object of class \code{"fii"} representing the fitted interpoint interaction. This object can be printed and plotted. } \author{ \spatstatAuthors. } \seealso{ Methods for handling fitted interactions: \code{\link{methods.fii}}, \code{\link{reach.fii}}, \code{\link{as.interact.fii}}. Background: \code{\link{ppm}}, \code{\link{ppm.object}}. } \examples{ # unmarked model <- ppm(swedishpines ~1, PairPiece(seq(3,19,by=4))) f <- fitin(model) f plot(f) # extract fitted interaction coefficients coef(f) # multitype # fit the stationary multitype Strauss process to `amacrine' r <- 0.02 * matrix(c(1,2,2,1), nrow=2,ncol=2) model <- ppm(amacrine ~1, MultiStrauss(r)) f <- fitin(model) f plot(f) } \keyword{spatial} \keyword{models} spatstat/man/Hardcore.Rd0000644000176200001440000000522313160710571014711 0ustar liggesusers\name{Hardcore} \alias{Hardcore} \title{The Hard Core Point Process Model} \description{ Creates an instance of the hard core point process model which can then be fitted to point pattern data. } \usage{ Hardcore(hc=NA) } \arguments{ \item{hc}{The hard core distance} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of the hard core process with hard core distance \code{hc}. } \details{ A hard core process with hard core distance \eqn{h} and abundance parameter \eqn{\beta}{beta} is a pairwise interaction point process in which distinct points are not allowed to come closer than a distance \eqn{h} apart. The probability density is zero if any pair of points is closer than \eqn{h} units apart, and otherwise equals \deqn{ f(x_1,\ldots,x_n) = \alpha \beta^{n(x)} }{ f(x_1,\ldots,x_n) = alpha . beta^n(x) } where \eqn{x_1,\ldots,x_n}{x[1],\ldots,x[n]} represent the points of the pattern, \eqn{n(x)} is the number of points in the pattern, and \eqn{\alpha}{alpha} is the normalising constant. The function \code{\link{ppm}()}, which fits point process models to point pattern data, requires an argument of class \code{"interact"} describing the interpoint interaction structure of the model to be fitted. The appropriate description of the hard core process pairwise interaction is yielded by the function \code{Hardcore()}. See the examples below. If the hard core distance argument \code{hc} is missing or \code{NA}, it will be estimated from the data when \code{\link{ppm}} is called. The estimated value of \code{hc} is the minimum nearest neighbour distance multiplied by \eqn{n/(n+1)}, where \eqn{n} is the number of data points. } \seealso{ \code{\link{Strauss}}, \code{\link{StraussHard}}, \code{\link{MultiHard}}, \code{\link{ppm}}, \code{\link{pairwise.family}}, \code{\link{ppm.object}} } \references{ Baddeley, A. and Turner, R. (2000) Practical maximum pseudolikelihood for spatial point patterns. \emph{Australian and New Zealand Journal of Statistics} \bold{42}, 283--322. Ripley, B.D. (1981) \emph{Spatial statistics}. John Wiley and Sons. } \examples{ Hardcore(0.02) # prints a sensible description of itself \dontrun{ ppm(cells, ~1, Hardcore(0.05)) # fit the stationary hard core process to `cells' } # estimate hard core radius from data ppm(cells, ~1, Hardcore()) ppm(cells, ~1, Hardcore) ppm(cells, ~ polynom(x,y,3), Hardcore(0.05)) # fit a nonstationary hard core process # with log-cubic polynomial trend } \author{ \adrian and \rolf } \keyword{spatial} \keyword{models} spatstat/man/harmonise.fv.Rd0000644000176200001440000000575413160710621015566 0ustar liggesusers\name{harmonise.fv} \alias{harmonise.fv} \alias{harmonize.fv} \title{Make Function Tables Compatible} \description{ Convert several objects of class \code{"fv"} to the same values of the function argument. } \usage{ \method{harmonise}{fv}(\dots, strict=FALSE) \method{harmonize}{fv}(\dots, strict=FALSE) } \arguments{ \item{\dots}{ Any number of function tables (objects of class \code{"fv"}). } \item{strict}{ Logical. If \code{TRUE}, a column of data will be deleted if columns of the same name do not appear in every object. } } \details{ A function value table (object of class \code{"fv"}) is essentially a data frame giving the values of a function \eqn{f(x)} (or several alternative estimates of this value) at equally-spaced values of the function argument \eqn{x}. The command \code{\link{harmonise}} is generic. This is the method for objects of class \code{"fv"}. This command makes any number of \code{"fv"} objects compatible, in the loose sense that they have the same sequence of values of \eqn{x}. They can then be combined by \code{\link{cbind.fv}}, but not necessarily by \code{\link{eval.fv}}. All arguments \code{\dots} must be function value tables (objects of class \code{"fv"}). The result will be a list, of length equal to the number of arguments \code{\dots}, containing new versions of each of these functions, converted to a common sequence of \eqn{x} values. If the arguments were named (\code{name=value}) then the return value also carries these names. The range of \eqn{x} values in the resulting functions will be the intersection of the ranges of \eqn{x} values in the original functions. The spacing of \eqn{x} values in the resulting functions will be the finest (narrowest) of the spacings of the \eqn{x} values in the original functions. Function values are interpolated using \code{\link[stats]{approxfun}}. If \code{strict=TRUE}, each column of data will be retained only if a column of the same name appears in all of the arguments \code{\dots}. This ensures that the resulting objects are strictly compatible in the sense of \code{\link{compatible.fv}}, and can be combined using \code{\link{eval.fv}} or \code{\link{collapse.fv}}. If \code{strict=FALSE} (the default), this does not occur, and then the resulting objects are \bold{not} guaranteed to be compatible in the sense of \code{\link{compatible.fv}}. } \value{ A list, of length equal to the number of arguments \code{\dots}, whose entries are objects of class \code{"fv"}. If the arguments were named (\code{name=value}) then the return value also carries these names. } \author{\adrian , \rolf and \ege. } \examples{ H <- harmonise(K=Kest(cells), G=Gest(cells)) H \dontrun{ ## generates a warning about duplicated columns try(cbind(H$K, H$G)) } } \seealso{ \code{\link{fv.object}}, \code{\link{cbind.fv}}, \code{\link{eval.fv}}, \code{\link{compatible.fv}} } \keyword{spatial} \keyword{manip} spatstat/man/pppmatching.object.Rd0000644000176200001440000000663213160710621016742 0ustar liggesusers\name{pppmatching.object} \alias{pppmatching.object} %DoNotExport \title{Class of Point Matchings} \description{ A class \code{"pppmatching"} to represent a matching of two planar point patterns. Optionally includes information about the construction of the matching and its associated distance between the point patterns. } \details{ This class represents a (possibly weighted and incomplete) matching between two planar point patterns (objects of class \code{"ppp"}). A matching can be thought of as a bipartite weighted graph where the vertices are given by the two point patterns and edges of positive weights are drawn each time a point of the first point pattern is "matched" with a point of the second point pattern. If \code{m} is an object of type \code{pppmatching}, it contains the following elements \tabular{ll}{ \code{pp1, pp2} \tab the two point patterns to be matched (vertices) \cr \code{matrix} \tab a matrix specifying which points are matched \cr \tab and with what weights (edges) \cr \code{type} \tab (optional) a character string for the type of \cr \tab the matching (one of \code{"spa"}, \code{"ace"} or \code{"mat"}) \cr \code{cutoff} \tab (optional) cutoff value for interpoint distances \cr \code{q} \tab (optional) the order for taking averages of \cr \tab interpoint distances \cr \code{distance} \tab (optional) the distance associated with the matching } The element \code{matrix} is a "generalized adjacency matrix". The numbers of rows and columns match the cardinalities of the first and second point patterns, respectively. The \code{[i,j]}-th entry is positive if the \code{i}-th point of \code{X} and the \code{j}-th point of \code{Y} are matched (zero otherwise) and its value then gives the corresponding weight of the match. For an unweighted matching all the weights are set to \eqn{1}. The optional elements are for saving details about matchings in the context of optimal point matching techniques. \code{type} can be one of \code{"spa"} (for "subpattern assignment"), \code{"ace"} (for "assignment only if cardinalities differ") or \code{"mat"} (for "mass transfer"). \code{cutoff} is a positive numerical value that specifies the maximal interpoint distance and \code{q} is a value in \eqn{[1,\infty]}{[1,Inf]} that gives the order of the average applied to the interpoint distances. See the help files for \code{\link{pppdist}} and \code{\link{matchingdist}} for detailed information about these elements. Objects of class \code{"pppmatching"} may be created by the function \code{\link{pppmatching}}, and are most commonly obtained as output of the function \code{\link{pppdist}}. There are methods \code{plot}, \code{print} and \code{summary} for this class. } \author{ Dominic Schuhmacher \email{dominic.schuhmacher@stat.unibe.ch} \url{http://www.dominic.schuhmacher.name} } \seealso{ \code{\link{matchingdist}} \code{\link{pppmatching}} } \examples{ # a random complete unweighted matching X <- runifpoint(10) Y <- runifpoint(10) am <- r2dtable(1, rep(1,10), rep(1,10))[[1]] # generates a random permutation matrix m <- pppmatching(X, Y, am) summary(m) m$matrix \dontrun{ plot(m) } # an optimal complete unweighted matching m2 <- pppdist(X,Y) summary(m2) m2$matrix \dontrun{ plot(m2) } } \keyword{spatial} \keyword{attribute} spatstat/man/rDiggleGratton.Rd0000644000176200001440000001065613160710621016100 0ustar liggesusers\name{rDiggleGratton} \alias{rDiggleGratton} \title{Perfect Simulation of the Diggle-Gratton Process} \description{ Generate a random pattern of points, a simulated realisation of the Diggle-Gratton process, using a perfect simulation algorithm. } \usage{ rDiggleGratton(beta, delta, rho, kappa=1, W = owin(), expand=TRUE, nsim=1, drop=TRUE) } \arguments{ \item{beta}{ intensity parameter (a positive number). } \item{delta}{ hard core distance (a non-negative number). } \item{rho}{ interaction range (a number greater than \code{delta}). } \item{kappa}{ interaction exponent (a non-negative number). } \item{W}{ window (object of class \code{"owin"}) in which to generate the random pattern. Currently this must be a rectangular window. } \item{expand}{ Logical. If \code{FALSE}, simulation is performed in the window \code{W}, which must be rectangular. If \code{TRUE} (the default), simulation is performed on a larger window, and the result is clipped to the original window \code{W}. Alternatively \code{expand} can be an object of class \code{"rmhexpand"} (see \code{\link{rmhexpand}}) determining the expansion method. } \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a point pattern. } } \details{ This function generates a realisation of the Diggle-Gratton point process in the window \code{W} using a \sQuote{perfect simulation} algorithm. Diggle and Gratton (1984, pages 208-210) introduced the pairwise interaction point process with pair potential \eqn{h(t)} of the form \deqn{ h(t) = \left( \frac{t-\delta}{\rho-\delta} \right)^\kappa \quad\quad \mbox{ if } \delta \le t \le \rho }{ h(t) = ((t - delta)/(rho - delta))^kappa, { } delta <= t <= rho } with \eqn{h(t) = 0} for \eqn{t < \delta}{t < delta} and \eqn{h(t) = 1} for \eqn{t > \rho}{t > rho}. Here \eqn{\delta}{delta}, \eqn{\rho}{rho} and \eqn{\kappa}{kappa} are parameters. Note that we use the symbol \eqn{\kappa}{kappa} where Diggle and Gratton (1984) use \eqn{\beta}{beta}, since in \pkg{spatstat} we reserve the symbol \eqn{\beta}{beta} for an intensity parameter. The parameters must all be nonnegative, and must satisfy \eqn{\delta \le \rho}{delta <= rho}. The simulation algorithm used to generate the point pattern is \sQuote{dominated coupling from the past} as implemented by Berthelsen and \ifelse{latex}{\out{M\o ller}}{Moller} (2002, 2003). This is a \sQuote{perfect simulation} or \sQuote{exact simulation} algorithm, so called because the output of the algorithm is guaranteed to have the correct probability distribution exactly (unlike the Metropolis-Hastings algorithm used in \code{\link{rmh}}, whose output is only approximately correct). There is a tiny chance that the algorithm will run out of space before it has terminated. If this occurs, an error message will be generated. } \value{ If \code{nsim = 1}, a point pattern (object of class \code{"ppp"}). If \code{nsim > 1}, a list of point patterns. } \references{ Berthelsen, K.K. and \ifelse{latex}{\out{M\o ller}}{Moller}, J. (2002) A primer on perfect simulation for spatial point processes. \emph{Bulletin of the Brazilian Mathematical Society} 33, 351-367. Berthelsen, K.K. and \ifelse{latex}{\out{M\o ller}}{Moller}, J. (2003) Likelihood and non-parametric Bayesian MCMC inference for spatial point processes based on perfect simulation and path sampling. \emph{Scandinavian Journal of Statistics} 30, 549-564. Diggle, P.J. and Gratton, R.J. (1984) Monte Carlo methods of inference for implicit statistical models. \emph{Journal of the Royal Statistical Society, series B} \bold{46}, 193 -- 212. \ifelse{latex}{\out{M\o ller}}{Moller}, J. and Waagepetersen, R. (2003). \emph{Statistical Inference and Simulation for Spatial Point Processes.} Chapman and Hall/CRC. } \author{ \adrian based on original code for the Strauss process by Kasper Klitgaard Berthelsen. } \examples{ X <- rDiggleGratton(50, 0.02, 0.07) } \seealso{ \code{\link{rmh}}, \code{\link{DiggleGratton}}. \code{\link{rStrauss}}, \code{\link{rHardcore}}, \code{\link{rStraussHard}}, \code{\link{rDGS}}, \code{\link{rPenttinen}}. } \keyword{spatial} \keyword{datagen} spatstat/man/logLik.mppm.Rd0000644000176200001440000001023513160710621015346 0ustar liggesusers\name{logLik.mppm} \alias{logLik.mppm} \alias{AIC.mppm} \alias{extractAIC.mppm} \alias{nobs.mppm} \alias{getCall.mppm} \alias{terms.mppm} \title{Log Likelihood and AIC for Multiple Point Process Model} \description{ For a point process model that has been fitted to multiple point patterns, these functions extract the log likelihood and AIC, or analogous quantities based on the pseudolikelihood. } \usage{ \method{logLik}{mppm}(object, \dots, warn=TRUE) \method{AIC}{mppm}(object, \dots, k=2, takeuchi=TRUE) \method{extractAIC}{mppm}(fit, scale = 0, k = 2, \dots, takeuchi = TRUE) \method{nobs}{mppm}(object, \dots) \method{getCall}{mppm}(x, \dots) \method{terms}{mppm}(x, \dots) } \arguments{ \item{object,fit,x}{ Fitted point process model (fitted to multiple point patterns). An object of class \code{"mppm"}. } \item{\dots}{Ignored.} \item{warn}{ If \code{TRUE}, a warning is given when the pseudolikelihood is returned instead of the likelihood. } \item{scale}{Ignored.} \item{k}{Numeric value specifying the weight of the equivalent degrees of freedom in the AIC. See Details. } \item{takeuchi}{ Logical value specifying whether to use the Takeuchi penalty (\code{takeuchi=TRUE}) or the number of fitted parameters (\code{takeuchi=FALSE}) in calculating AIC. } } \details{ These functions are methods for the generic commands \code{\link[stats]{logLik}}, \code{\link[stats]{AIC}}, \code{\link[stats]{extractAIC}}, \code{\link[stats]{terms}} and \code{\link[stats:update]{getCall}} for the class \code{"mppm"}. An object of class \code{"mppm"} represents a fitted Poisson or Gibbs point process model fitted to several point patterns. It is obtained from the model-fitting function \code{\link{mppm}}. The method \code{logLik.mppm} extracts the maximised value of the log likelihood for the fitted model (as approximated by quadrature using the Berman-Turner approximation). If \code{object} is not a Poisson process, the maximised log \emph{pseudolikelihood} is returned, with a warning. The Akaike Information Criterion AIC for a fitted model is defined as \deqn{ AIC = -2 \log(L) + k \times \mbox{penalty} }{ AIC = -2 * log(L) + k * penalty } where \eqn{L} is the maximised likelihood of the fitted model, and \eqn{\mbox{penalty}}{penalty} is a penalty for model complexity, usually equal to the effective degrees of freedom of the model. The method \code{extractAIC.mppm} returns the \emph{analogous} quantity \eqn{AIC*} in which \eqn{L} is replaced by \eqn{L*}, the quadrature approximation to the likelihood (if \code{fit} is a Poisson model) or the pseudolikelihood (if \code{fit} is a Gibbs model). The \eqn{\mbox{penalty}}{penalty} term is calculated as follows. If \code{takeuchi=FALSE} then \eqn{\mbox{penalty}}{penalty} is the number of fitted parameters. If \code{takeuchi=TRUE} then \eqn{\mbox{penalty} = \mbox{trace}(J H^{-1})}{penalty = trace(J H^(-1))} where \eqn{J} and \eqn{H} are the estimated variance and hessian, respectively, of the composite score. These two choices are equivalent for a Poisson process. The method \code{nobs.mppm} returns the total number of points in the original data point patterns to which the model was fitted. The method \code{getCall.mppm} extracts the original call to \code{\link{mppm}} which caused the model to be fitted. The method \code{terms.mppm} extracts the covariate terms in the model formula as a \code{terms} object. Note that these terms do not include the interaction component of the model. The \R function \code{\link[stats]{step}} uses these methods. } \value{ See the help files for the corresponding generic functions. } \seealso{ \code{\link{mppm}} } \references{ Baddeley, A., Rubak, E. and Turner, R. (2015) \emph{Spatial Point Patterns: Methodology and Applications with R}. London: Chapman and Hall/CRC Press. } \author{ Adrian Baddeley, Ida-Maria Sintorn and Leanne Bischoff. Implemented by \spatstatAuthors. } \examples{ fit <- mppm(Bugs ~ x, hyperframe(Bugs=waterstriders)) logLik(fit) AIC(fit) nobs(fit) getCall(fit) } \keyword{spatial} \keyword{models} spatstat/man/quadratcount.Rd0000644000176200001440000001357413160710621015700 0ustar liggesusers\name{quadratcount} \alias{quadratcount} \alias{quadratcount.ppp} \alias{quadratcount.splitppp} \title{Quadrat counting for a point pattern} \description{ Divides window into quadrats and counts the numbers of points in each quadrat. } \usage{ quadratcount(X, \dots) \method{quadratcount}{ppp}(X, nx=5, ny=nx, \dots, xbreaks=NULL, ybreaks=NULL, tess=NULL) \method{quadratcount}{splitppp}(X, \dots) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"}) or a split point pattern (object of class \code{"splitppp"}). } \item{nx,ny}{ Numbers of rectangular quadrats in the \eqn{x} and \eqn{y} directions. Incompatible with \code{xbreaks} and \code{ybreaks}. } \item{\dots}{Additional arguments passed to \code{quadratcount.ppp}.} \item{xbreaks}{ Numeric vector giving the \eqn{x} coordinates of the boundaries of the rectangular quadrats. Incompatible with \code{nx}. } \item{ybreaks}{ Numeric vector giving the \eqn{y} coordinates of the boundaries of the rectangular quadrats. Incompatible with \code{ny}. } \item{tess}{ Tessellation (object of class \code{"tess"} or something acceptable to \code{\link{as.tess}}) determining the quadrats. Incompatible with \code{nx,ny,xbreaks,ybreaks}. } } \value{ The value of \code{quadratcount.ppp} is a contingency table containing the number of points in each quadrat. The table is also an object of the special class \code{"quadratcount"} and there is a plot method for this class. The value of \code{quadratcount.splitppp} is a list of such contingency tables, each containing the quadrat counts for one of the component point patterns in \code{X}. This list also has the class \code{"solist"} which has print and plot methods. } \details{ Quadrat counting is an elementary technique for analysing spatial point patterns. See Diggle (2003). \bold{If \code{X} is a point pattern}, then by default, the window containing the point pattern \code{X} is divided into an \code{nx * ny} grid of rectangular tiles or `quadrats'. (If the window is not a rectangle, then these tiles are intersected with the window.) The number of points of \code{X} falling in each quadrat is counted. These numbers are returned as a contingency table. If \code{xbreaks} is given, it should be a numeric vector giving the \eqn{x} coordinates of the quadrat boundaries. If it is not given, it defaults to a sequence of \code{nx+1} values equally spaced over the range of \eqn{x} coordinates in the window \code{Window(X)}. Similarly if \code{ybreaks} is given, it should be a numeric vector giving the \eqn{y} coordinates of the quadrat boundaries. It defaults to a vector of \code{ny+1} values equally spaced over the range of \eqn{y} coordinates in the window. The lengths of \code{xbreaks} and \code{ybreaks} may be different. Alternatively, quadrats of any shape may be used. The argument \code{tess} can be a tessellation (object of class \code{"tess"}) whose tiles will serve as the quadrats. The algorithm counts the number of points of \code{X} falling in each quadrat, and returns these counts as a contingency table. The return value is a \code{table} which can be printed neatly. The return value is also a member of the special class \code{"quadratcount"}. Plotting the object will display the quadrats, annotated by their counts. See the examples. To perform a chi-squared test based on the quadrat counts, use \code{\link{quadrat.test}}. To calculate an estimate of intensity based on the quadrat counts, use \code{\link{intensity.quadratcount}}. To extract the quadrats used in a \code{quadratcount} object, use \code{\link{as.tess}}. \bold{If \code{X} is a split point pattern} (object of class \code{"splitppp"} then quadrat counting will be performed on each of the components point patterns, and the resulting contingency tables will be returned in a list. This list can be printed or plotted. Marks attached to the points are ignored by \code{quadratcount.ppp}. To obtain a separate contingency table for each type of point in a multitype point pattern, first separate the different points using \code{\link{split.ppp}}, then apply \code{quadratcount.splitppp}. See the Examples. } \note{ To perform a chi-squared test based on the quadrat counts, use \code{\link{quadrat.test}}. } \section{Warning}{ If \code{Q} is the result of \code{quadratcount} using rectangular tiles, then \code{as.numeric(Q)} extracts the counts \bold{in the wrong order}. To obtain the quadrat counts in the same order as the tiles of the corresponding tessellation would be listed, use \code{as.vector(t(Q))}, which works in all cases. } \seealso{ \code{\link{plot.quadratcount}}, \code{\link{intensity.quadratcount}}, \code{\link{quadrats}}, \code{\link{quadrat.test}}, \code{\link{tess}}, \code{\link{hextess}}, \code{\link{quadratresample}}, \code{\link{miplot}} } \references{ Diggle, P.J. \emph{Statistical analysis of spatial point patterns}. Academic Press, 2003. Stoyan, D. and Stoyan, H. (1994) \emph{Fractals, random shapes and point fields: methods of geometrical statistics.} John Wiley and Sons. } \examples{ X <- runifpoint(50) quadratcount(X) quadratcount(X, 4, 5) quadratcount(X, xbreaks=c(0, 0.3, 1), ybreaks=c(0, 0.4, 0.8, 1)) qX <- quadratcount(X, 4, 5) # plotting: plot(X, pch="+") plot(qX, add=TRUE, col="red", cex=1.5, lty=2) # irregular window data(humberside) plot(humberside) qH <- quadratcount(humberside, 2, 3) plot(qH, add=TRUE, col="blue", cex=1.5, lwd=2) # multitype - split plot(quadratcount(split(humberside), 2, 3)) # quadrats determined by tessellation: B <- dirichlet(runifpoint(6)) qX <- quadratcount(X, tess=B) plot(X, pch="+") plot(qX, add=TRUE, col="red", cex=1.5, lty=2) } \author{\adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/plot.fasp.Rd0000644000176200001440000001251713160710621015070 0ustar liggesusers\name{plot.fasp} \alias{plot.fasp} \title{Plot a Function Array} \description{ Plots an array of summary functions, usually associated with a point pattern, stored in an object of class \code{"fasp"}. A method for \code{plot}. } \usage{ \method{plot}{fasp}(x,formule=NULL, \dots, subset=NULL, title=NULL, banner=TRUE, transpose=FALSE, samex=FALSE, samey=FALSE, mar.panel=NULL, outerlabels=TRUE, cex.outerlabels=1.25, legend=FALSE) } \arguments{ \item{x}{An object of class \code{"fasp"} representing a function array. } \item{formule}{ A formula or list of formulae indicating what variables are to be plotted against what variable. Each formula is either an R language formula object, or a string that can be parsed as a formula. If \code{formule} is a list, its \eqn{k^{th}}{k-th} component should be applicable to the \eqn{(i,j)^{th}}{(i,j)-th} plot where \code{x$which[i,j]=k}. If the formula is left as \code{NULL}, then \code{plot.fasp} attempts to use the component \code{default.formula} of \code{x}. If that component is NULL as well, it gives up. } \item{\dots}{ Arguments passed to \code{\link{plot.fv}} to control the individual plot panels. } \item{subset}{ A logical vector, or a vector of indices, or an expression or a character string, or a \bold{list} of such, indicating a subset of the data to be included in each plot. If \code{subset} is a list, its \eqn{k^{th}}{k-th} component should be applicable to the \eqn{(i,j)^{th}}{(i,j)-th} plot where \code{x$which[i,j]=k}. } \item{title}{ Overall title for the plot. } \item{banner}{ Logical. If \code{TRUE}, the overall title is plotted. If \code{FALSE}, the overall title is not plotted and no space is allocated for it. } \item{transpose}{ Logical. If \code{TRUE}, rows and columns will be exchanged. } \item{samex,samey}{ Logical values indicating whether all individual plot panels should have the same x axis limits and the same y axis limits, respectively. This makes it easier to compare the plots. } \item{mar.panel}{ Vector of length 4 giving the value of the graphics parameter \code{mar} controlling the size of plot margins for each individual plot panel. See \code{\link{par}}. } \item{outerlabels}{Logical. If \code{TRUE}, the row and column names of the array of functions are plotted in the margins of the array of plot panels. If \code{FALSE}, each individual plot panel is labelled by its row and column name. } \item{cex.outerlabels}{ Character expansion factor for row and column labels of array. } \item{legend}{ Logical flag determining whether to plot a legend in each panel. } } \details{ An object of class \code{"fasp"} represents an array of summary functions, usually associated with a point pattern. See \code{\link{fasp.object}} for details. Such an object is created, for example, by \code{\link{alltypes}}. The function \code{plot.fasp} is a method for \code{plot}. It calls \code{\link{plot.fv}} to plot the individual panels. For information about the interpretation of the arguments \code{formule} and \code{subset}, see \code{\link{plot.fv}}. Arguments that are often passed through \code{...} include \code{col} to control the colours of the different lines in a panel, and \code{lty} and \code{lwd} to control the line type and line width of the different lines in a panel. The argument \code{shade} can also be used to display confidence intervals or significance bands as filled grey shading. See \code{\link{plot.fv}}. The argument \code{title}, if present, will determine the overall title of the plot. If it is absent, it defaults to \code{x$title}. Titles for the individual plot panels will be taken from \code{x$titles}. } \value{None.} \section{Warnings}{ (Each component of) the \code{subset} argument may be a logical vector (of the same length as the vectors of data which are extracted from \code{x}), or a vector of indices, or an \bold{expression} such as \code{expression(r<=0.2)}, or a text string, such as \code{"r<=0.2"}. Attempting a syntax such as \code{subset = r<=0.2} (without wrapping \code{r<=0.2} either in quote marks or in \code{expression()}) will cause this function to fall over. Variables referred to in any formula must exist in the data frames stored in \code{x}. What the names of these variables are will of course depend upon the nature of \code{x}. } \seealso{ \code{\link{alltypes}}, \code{\link{plot.fv}}, \code{\link{fasp.object}} } \examples{ \dontrun{ # Bramble Canes data. data(bramblecanes) X.G <- alltypes(bramblecanes,"G",dataname="Bramblecanes",verb=TRUE) plot(X.G) plot(X.G,subset="r<=0.2") plot(X.G,formule=asin(sqrt(cbind(km,theo))) ~ asin(sqrt(theo))) plot(X.G,fo=cbind(km,theo) - theo~r,subset="r<=0.2") # Simulated data. pp <- runifpoint(350, owin(c(0,1),c(0,1))) pp <- pp \%mark\% factor(c(rep(1,50),rep(2,100),rep(3,200))) X.K <- alltypes(pp,"K",verb=TRUE,dataname="Fake Data") plot(X.K,fo=cbind(border,theo)~theo,subset="theo<=0.75") } } \author{\adrian and \rolf } \keyword{spatial} \keyword{hplot} spatstat/man/plot.layered.Rd0000644000176200001440000000706613160710621015567 0ustar liggesusers\name{plot.layered} \alias{plot.layered} \title{ Layered Plot } \description{ Generates a layered plot. The plot method for objects of class \code{"layered"}. } \usage{ \method{plot}{layered}(x, ..., which = NULL, plotargs = NULL, add=FALSE, show.all=!add, main=NULL, do.plot=TRUE) } \arguments{ \item{x}{ An object of class \code{"layered"} created by the function \code{\link{layered}}. } \item{\dots}{ Arguments to be passed to the \code{plot} method for \emph{every} layer. } \item{which}{ Subset index specifying which layers should be plotted. } \item{plotargs}{ Arguments to be passed to the \code{plot} methods for individual layers. A list of lists of arguments of the form \code{name=value}. } \item{add}{Logical value indicating whether to add the graphics to an existing plot. } \item{show.all}{ Logical value indicating whether the \emph{first} layer should be displayed in full (including the main title, bounding window, coordinate axes, colour ribbon, and so on). } \item{main}{Main title for the plot} \item{do.plot}{Logical value indicating whether to actually do the plotting.} } \details{ Layering is a simple mechanism for controlling a high-level plot that is composed of several successive plots, for example, a background and a foreground plot. The layering mechanism makes it easier to plot, to switch on or off the plotting of each individual layer, to control the plotting arguments that are passed to each layer, and to zoom in on a subregion. The layers of data to be plotted should first be converted into a single object of class \code{"layered"} using the function \code{\link{layered}}. Then the layers can be plotted using the method \code{plot.layered}. To zoom in on a subregion, apply the subset operator \code{\link{[.layered}} to \code{x} before plotting. Graphics parameters for each layer are determined by (in order of precedence) \code{\dots}, \code{plotargs}, and \code{\link{layerplotargs}(x)}. The graphics parameters may also include the special argument \code{.plot} specifying (the name of) a function which will be used to perform the plotting instead of the generic \code{plot}. The argument \code{show.all} is recognised by many plot methods in \pkg{spatstat}. It determines whether a plot is drawn with all its additional components such as the main title, bounding window, coordinate axes, colour ribbons and legends. The default is \code{TRUE} for new plots and \code{FALSE} for added plots. In \code{plot.layered}, the argument \code{show.all} applies only to the \bold{first} layer. The subsequent layers are plotted with \code{show.all=FALSE}. To override this, that is, if you really want to draw all the components of \bold{all} layers of \code{x}, insert the argument \code{show.all=TRUE} in each entry of \code{plotargs} or \code{\link{layerplotargs}(x)}. } \value{ (Invisibly) a list containing the return values from the plot commands for each layer. This list has an attribute \code{"bbox"} giving a bounding box for the entire plot. } \author{\adrian and \rolf } \seealso{ \code{\link{layered}}, \code{\link{layerplotargs}}, \code{\link{[.layered}}, \code{\link{plot}}. } \examples{ data(cells) D <- distmap(cells) L <- layered(D, cells) plot(L) plot(L, which = 2) plot(L, plotargs=list(list(ribbon=FALSE), list(pch=3, cols="white"))) # plot a subregion plot(L[, square(0.5)]) } \keyword{spatial} \keyword{hplot} spatstat/man/relrisk.ppm.Rd0000644000176200001440000001720313160710621015425 0ustar liggesusers\name{relrisk.ppm} \alias{relrisk.ppm} \title{ Parametric Estimate of Spatially-Varying Relative Risk } \description{ Given a point process model fitted to a multitype point pattern, this function computes the fitted spatially-varying probability of each type of point, or the ratios of such probabilities, according to the fitted model. Optionally the standard errors of the estimates are also computed. } \usage{ \method{relrisk}{ppm}(X, \dots, at = c("pixels", "points"), relative = FALSE, se = FALSE, casecontrol = TRUE, control = 1, case, ngrid = NULL, window = NULL) } \arguments{ \item{X}{ A fitted point process model (object of class \code{"ppm"}). } \item{\dots}{ Ignored. } \item{at}{ String specifying whether to compute the probability values at a grid of pixel locations (\code{at="pixels"}) or only at the points of \code{X} (\code{at="points"}). } \item{relative}{ Logical. If \code{FALSE} (the default) the algorithm computes the probabilities of each type of point. If \code{TRUE}, it computes the \emph{relative risk}, the ratio of probabilities of each type relative to the probability of a control. } \item{se}{ Logical value indicating whether to compute standard errors as well. } \item{casecontrol}{ Logical. Whether to treat a bivariate point pattern as consisting of cases and controls, and return only the probability or relative risk of a case. Ignored if there are more than 2 types of points. See Details. } \item{control}{ Integer, or character string, identifying which mark value corresponds to a control. } \item{case}{ Integer, or character string, identifying which mark value corresponds to a case (rather than a control) in a bivariate point pattern. This is an alternative to the argument \code{control} in a bivariate point pattern. Ignored if there are more than 2 types of points. } \item{ngrid}{ Optional. Dimensions of a rectangular grid of locations inside \code{window} where the predictions should be computed. An integer, or an integer vector of length 2, specifying the number of grid points in the \eqn{y} and \eqn{x} directions. (Applies only when \code{at="pixels"}.) } \item{window}{ Optional. A window (object of class \code{"owin"}) \emph{delimiting} the locations where predictions should be computed. Defaults to the window of the original data used to fit the model \code{object}. (Applies only when \code{at="pixels"}.) } } \details{ The command \code{\link{relrisk}} is generic and can be used to estimate relative risk in different ways. This function \code{relrisk.ppm} is the method for fitted point process models (class \code{"ppm"}). It computes \emph{parametric} estimates of relative risk, using the fitted model. If \code{X} is a bivariate point pattern (a multitype point pattern consisting of two types of points) then by default, the points of the first type (the first level of \code{marks(X)}) are treated as controls or non-events, and points of the second type are treated as cases or events. Then by default this command computes the spatially-varying \emph{probability} of a case, i.e. the probability \eqn{p(u)} that a point at spatial location \eqn{u} will be a case. If \code{relative=TRUE}, it computes the spatially-varying \emph{relative risk} of a case relative to a control, \eqn{r(u) = p(u)/(1- p(u))}. If \code{X} is a multitype point pattern with \eqn{m > 2} types, or if \code{X} is a bivariate point pattern and \code{casecontrol=FALSE}, then by default this command computes, for each type \eqn{j}, a nonparametric estimate of the spatially-varying \emph{probability} of an event of type \eqn{j}. This is the probability \eqn{p_j(u)}{p[j](u)} that a point at spatial location \eqn{u} will belong to type \eqn{j}. If \code{relative=TRUE}, the command computes the \emph{relative risk} of an event of type \eqn{j} relative to a control, \eqn{r_j(u) = p_j(u)/p_k(u)}{r[j](u) = p[j](u)/p[k](u)}, where events of type \eqn{k} are treated as controls. The argument \code{control} determines which type \eqn{k} is treated as a control. If \code{at = "pixels"} the calculation is performed for every spatial location \eqn{u} on a fine pixel grid, and the result is a pixel image representing the function \eqn{p(u)} or a list of pixel images representing the functions \eqn{p_j(u)}{p[j](u)} or \eqn{r_j(u)}{r[j](u)} for \eqn{j = 1,\ldots,m}{j = 1,...,m}. An infinite value of relative risk (arising because the probability of a control is zero) will be returned as \code{NA}. If \code{at = "points"} the calculation is performed only at the data points \eqn{x_i}{x[i]}. By default the result is a vector of values \eqn{p(x_i)}{p(x[i])} giving the estimated probability of a case at each data point, or a matrix of values \eqn{p_j(x_i)}{p[j](x[i])} giving the estimated probability of each possible type \eqn{j} at each data point. If \code{relative=TRUE} then the relative risks \eqn{r(x_i)}{r(x[i])} or \eqn{r_j(x_i)}{r[j](x[i])} are returned. An infinite value of relative risk (arising because the probability of a control is zero) will be returned as \code{Inf}. Probabilities and risks are computed from the fitted intensity of the model, using \code{\link{predict.ppm}}. If \code{se=TRUE} then standard errors will also be computed, based on asymptotic theory, using \code{\link{vcov.ppm}}. } \value{ If \code{se=FALSE} (the default), the format is described below. If \code{se=TRUE}, the result is a list of two entries, \code{estimate} and \code{SE}, each having the format described below. If \code{X} consists of only two types of points, and if \code{casecontrol=TRUE}, the result is a pixel image (if \code{at="pixels"}) or a vector (if \code{at="points"}). The pixel values or vector values are the probabilities of a case if \code{relative=FALSE}, or the relative risk of a case (probability of a case divided by the probability of a control) if \code{relative=TRUE}. If \code{X} consists of more than two types of points, or if \code{casecontrol=FALSE}, the result is: \itemize{ \item (if \code{at="pixels"}) a list of pixel images, with one image for each possible type of point. The result also belongs to the class \code{"solist"} so that it can be printed and plotted. \item (if \code{at="points"}) a matrix of probabilities, with rows corresponding to data points \eqn{x_i}{x[i]}, and columns corresponding to types \eqn{j}. } The pixel values or matrix entries are the probabilities of each type of point if \code{relative=FALSE}, or the relative risk of each type (probability of each type divided by the probability of a control) if \code{relative=TRUE}. If \code{relative=FALSE}, the resulting values always lie between 0 and 1. If \code{relative=TRUE}, the results are either non-negative numbers, or the values \code{Inf} or \code{NA}. } \author{\adrian \rolf and \ege } \seealso{ There is another method \code{\link{relrisk.ppp}} for point pattern datasets which computes \emph{nonparametric} estimates of relative risk by kernel smoothing. See also \code{\link{relrisk}}, \code{\link{relrisk.ppp}}, \code{\link{ppm}} } \examples{ fit <- ppm(chorley ~ marks * (x+y)) rr <- relrisk(fit, relative=TRUE, control="lung", se=TRUE) plot(rr$estimate) plot(rr$SE) rrX <- relrisk(fit, at="points", relative=TRUE, control="lung") } \keyword{spatial} \keyword{models} spatstat/man/lut.Rd0000644000176200001440000000623513160710621013766 0ustar liggesusers\name{lut} \alias{lut} \title{Lookup Tables} \description{ Create a lookup table. } \usage{ lut(outputs, ..., range=NULL, breaks=NULL, inputs=NULL) } \arguments{ \item{outputs}{Vector of output values} \item{\dots}{Ignored.} \item{range}{ Interval of numbers to be mapped. A numeric vector of length 2, specifying the ends of the range of values to be mapped. Incompatible with \code{breaks} or \code{inputs}. } \item{inputs}{ Input values to which the output values are associated. A factor or vector of the same length as \code{outputs}. Incompatible with \code{breaks} or \code{range}. } \item{breaks}{ Breakpoints for the lookup table. A numeric vector of length equal to \code{length(outputs)+1}. Incompatible with \code{range} or \code{inputs}. } } \details{ A lookup table is a function, mapping input values to output values. The command \code{lut} creates an object representing a lookup table, which can then be used to control various behaviour in the \pkg{spatstat} package. It can also be used to compute the output value assigned to any input value. The argument \code{outputs} specifies the output values to which input data values will be mapped. It should be a vector of any atomic type (e.g. numeric, logical, character, complex) or factor values. Exactly one of the arguments \code{range}, \code{inputs} or \code{breaks} must be specified by name. If \code{inputs} is given, then it should be a vector or factor, of the same length as \code{outputs}. The entries of \code{inputs} can be any atomic type (e.g. numeric, logical, character, complex) or factor values. The resulting lookup table associates the value \code{inputs[i]} with the value \code{outputs[i]}. If \code{range} is given, then it determines the interval of the real number line that will be mapped. It should be a numeric vector of length 2. If \code{breaks} is given, then it determines intervals of the real number line which are mapped to each output value. It should be a numeric vector, of length at least 2, with entries that are in increasing order. Infinite values are allowed. Any number in the range between \code{breaks[i]} and \code{breaks[i+1]} will be mapped to the value \code{outputs[i]}. The result is an object of class \code{"lut"}. There is a \code{print} method for this class. Some plot commands in the \pkg{spatstat} package accept an object of this class as a specification of a lookup table. The result is also a function \code{f} which can be used to compute the output value assigned to any input data value. That is, \code{f(x)} returns the output value assigned to \code{x}. This also works for vectors of input data values. } \value{ A function, which is also an object of class \code{"lut"}. } \seealso{ \code{\link{colourmap}}. } \examples{ # lookup table for real numbers, using breakpoints cr <- lut(factor(c("low", "medium", "high")), breaks=c(0,5,10,15)) cr cr(3.2) cr(c(3,5,7)) # lookup table for discrete set of values ct <- lut(c(0,1), inputs=c(FALSE, TRUE)) ct(TRUE) } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/density.ppp.Rd0000644000176200001440000003457213160710571015450 0ustar liggesusers\name{density.ppp} \alias{density.ppp} \title{Kernel Smoothed Intensity of Point Pattern} \description{ Compute a kernel smoothed intensity function from a point pattern. } \usage{ \method{density}{ppp}(x, sigma=NULL, \dots, weights=NULL, edge=TRUE, varcov=NULL, at="pixels", leaveoneout=TRUE, adjust=1, diggle=FALSE, se=FALSE, kernel="gaussian", scalekernel=is.character(kernel), positive=FALSE, verbose=TRUE) } \arguments{ \item{x}{ Point pattern (object of class \code{"ppp"}). } \item{sigma}{ Standard deviation of isotropic smoothing kernel. Either a numerical value, or a function that computes an appropriate value of \code{sigma}. } \item{weights}{ Optional weights to be attached to the points. A numeric vector, numeric matrix, an \code{expression}, or a pixel image. } \item{\dots}{ Additional arguments passed to \code{\link{pixellate.ppp}} and \code{\link{as.mask}} to determine the pixel resolution, or passed to \code{sigma} if it is a function. } \item{edge}{ Logical value indicating whether to apply edge correction. } \item{varcov}{ Variance-covariance matrix of anisotropic smoothing kernel. Incompatible with \code{sigma}. } \item{at}{ String specifying whether to compute the intensity values at a grid of pixel locations (\code{at="pixels"}) or only at the points of \code{x} (\code{at="points"}). } \item{leaveoneout}{ Logical value indicating whether to compute a leave-one-out estimator. Applicable only when \code{at="points"}. } \item{adjust}{ Optional. Adjustment factor for the smoothing parameter. } \item{diggle}{ Logical. If \code{TRUE}, use the Jones-Diggle improved edge correction, which is more accurate but slower to compute than the default correction. } \item{kernel}{ The smoothing kernel. A character string specifying the smoothing kernel (current options are \code{"gaussian"}, \code{"epanechnikov"}, \code{"quartic"} or \code{"disc"}), or a pixel image (object of class \code{"im"}) containing values of the kernel, or a \code{function(x,y)} which yields values of the kernel. } \item{scalekernel}{ Logical value. If \code{scalekernel=TRUE}, then the kernel will be rescaled to the bandwidth determined by \code{sigma} and \code{varcov}: this is the default behaviour when \code{kernel} is a character string. If \code{scalekernel=FALSE}, then \code{sigma} and \code{varcov} will be ignored: this is the default behaviour when \code{kernel} is a function or a pixel image. } \item{se}{ Logical value indicating whether to compute standard errors as well. } \item{positive}{ Logical value indicating whether to force all density values to be positive numbers. Default is \code{FALSE}. } \item{verbose}{ Logical value indicating whether to issue warnings about numerical problems and conditions. } } \value{ By default, the result is a pixel image (object of class \code{"im"}). Pixel values are estimated intensity values, expressed in \dQuote{points per unit area}. If \code{at="points"}, the result is a numeric vector of length equal to the number of points in \code{x}. Values are estimated intensity values at the points of \code{x}. In either case, the return value has attributes \code{"sigma"} and \code{"varcov"} which report the smoothing bandwidth that was used. If \code{weights} is a matrix with more than one column, then the result is a list of images (if \code{at="pixels"}) or a matrix of numerical values (if \code{at="points"}). If \code{se=TRUE}, the result is a list with two elements named \code{estimate} and \code{SE}, each of the format described above. } \details{ This is a method for the generic function \code{density}. It computes a fixed-bandwidth kernel estimate (Diggle, 1985) of the intensity function of the point process that generated the point pattern \code{x}. By default it computes the convolution of the isotropic Gaussian kernel of standard deviation \code{sigma} with point masses at each of the data points in \code{x}. Anisotropic Gaussian kernels are also supported. Each point has unit weight, unless the argument \code{weights} is given. If \code{edge=TRUE}, the intensity estimate is corrected for edge effect bias in one of two ways: \itemize{ \item If \code{diggle=FALSE} (the default) the intensity estimate is correted by dividing it by the convolution of the Gaussian kernel with the window of observation. This is the approach originally described in Diggle (1985). Thus the intensity value at a point \eqn{u} is \deqn{ \hat\lambda(u) = e(u) \sum_i k(x_i - u) w_i }{ \lambda(u) = e(u) \sum[i] k(x[i] - u) w[i] } where \eqn{k} is the Gaussian smoothing kernel, \eqn{e(u)} is an edge correction factor, and \eqn{w_i}{w[i]} are the weights. \item If \code{diggle=TRUE} then the code uses the improved edge correction described by Jones (1993) and Diggle (2010, equation 18.9). This has been shown to have better performance (Jones, 1993) but is slightly slower to compute. The intensity value at a point \eqn{u} is \deqn{ \hat\lambda(u) = \sum_i k(x_i - u) w_i e(x_i) }{ \lambda(u) = \sum[i] k(x[i] - u) w[i] e(x[i]) } where again \eqn{k} is the Gaussian smoothing kernel, \eqn{e(x_i)}{e(x[i])} is an edge correction factor, and \eqn{w_i}{w[i]} are the weights. } In both cases, the edge correction term \eqn{e(u)} is the reciprocal of the kernel mass inside the window: \deqn{ \frac{1}{e(u)} = \int_W k(v-u) \, {\rm d}v }{ 1/e(u) = integral[v in W] k(v-u) dv } where \eqn{W} is the observation window. The smoothing kernel is determined by the arguments \code{sigma}, \code{varcov} and \code{adjust}. \itemize{ \item if \code{sigma} is a single numerical value, this is taken as the standard deviation of the isotropic Gaussian kernel. \item alternatively \code{sigma} may be a function that computes an appropriate bandwidth for the isotropic Gaussian kernel from the data point pattern by calling \code{sigma(x)}. To perform automatic bandwidth selection using cross-validation, it is recommended to use the functions \code{\link{bw.diggle}} or \code{\link{bw.ppl}}. \item The smoothing kernel may be chosen to be any Gaussian kernel, by giving the variance-covariance matrix \code{varcov}. The arguments \code{sigma} and \code{varcov} are incompatible. \item Alternatively \code{sigma} may be a vector of length 2 giving the standard deviations of two independent Gaussian coordinates, thus equivalent to \code{varcov = diag(rep(sigma^2, 2))}. \item if neither \code{sigma} nor \code{varcov} is specified, an isotropic Gaussian kernel will be used, with a default value of \code{sigma} calculated by a simple rule of thumb that depends only on the size of the window. \item The argument \code{adjust} makes it easy for the user to change the bandwidth specified by any of the rules above. The value of \code{sigma} will be multiplied by the factor \code{adjust}. The matrix \code{varcov} will be multiplied by \code{adjust^2}. To double the smoothing bandwidth, set \code{adjust=2}. } If \code{at="pixels"} (the default), intensity values are computed at every location \eqn{u} in a fine grid, and are returned as a pixel image. The point pattern is first discretised using \code{\link{pixellate.ppp}}, then the intensity is computed using the Fast Fourier Transform. Accuracy depends on the pixel resolution and the discretisation rule. The pixel resolution is controlled by the arguments \code{\dots} passed to \code{\link{as.mask}} (specify the number of pixels by \code{dimyx} or the pixel size by \code{eps}). The discretisation rule is controlled by the arguments \code{\dots} passed to \code{\link{pixellate.ppp}} (the default rule is that each point is allocated to the nearest pixel centre; this can be modified using the arguments \code{fractional} and \code{preserve}). If \code{at="points"}, the intensity values are computed to high accuracy at the points of \code{x} only. Computation is performed by directly evaluating and summing the Gaussian kernel contributions without discretising the data. The result is a numeric vector giving the density values. The intensity value at a point \eqn{x_i}{x[i]} is (if \code{diggle=FALSE}) \deqn{ \hat\lambda(x_i) = e(x_i) \sum_j k(x_j - x_i) w_j }{ \lambda(x[i]) = e(x[i]) \sum[j] k(x[j] - x[i]) w[j] } or (if \code{diggle=TRUE}) \deqn{ \hat\lambda(x_i) = \sum_j k(x_j - x_i) w_j e(x_j) }{ \lambda(x[i]) = \sum[j] k(x[j] - x[i]) w[j] e(x[j]) } If \code{leaveoneout=TRUE} (the default), then the sum in the equation is taken over all \eqn{j} not equal to \eqn{i}, so that the intensity value at a data point is the sum of kernel contributions from all \emph{other} data points. If \code{leaveoneout=FALSE} then the sum is taken over all \eqn{j}, so that the intensity value at a data point includes a contribution from the same point. If \code{weights} is a matrix with more than one column, then the calculation is effectively repeated for each column of weights. The result is a list of images (if \code{at="pixels"}) or a matrix of numerical values (if \code{at="points"}). The argument \code{weights} can also be an \code{expression}. It will be evaluated in the data frame \code{as.data.frame(x)} to obtain a vector or matrix of weights. The expression may involve the symbols \code{x} and \code{y} representing the Cartesian coordinates, the symbol \code{marks} representing the mark values if there is only one column of marks, and the names of the columns of marks if there are several columns. The argument \code{weights} can also be a pixel image (object of class \code{"im"}). numerical weights for the data points will be extracted from this image (by looking up the pixel values at the locations of the data points in \code{x}). To select the bandwidth \code{sigma} automatically by cross-validation, use \code{\link{bw.diggle}} or \code{\link{bw.ppl}}. To perform spatial interpolation of values that were observed at the points of a point pattern, use \code{\link{Smooth.ppp}}. For adaptive nonparametric estimation, see \code{\link{adaptive.density}}. For data sharpening, see \code{\link{sharpen.ppp}}. To compute a relative risk surface or probability map for two (or more) types of points, use \code{\link{relrisk}}. } \seealso{ \code{\link{bw.diggle}}, \code{\link{bw.ppl}}, \code{\link{Smooth.ppp}}, \code{\link{sharpen.ppp}}, \code{\link{adaptive.density}}, \code{\link{relrisk}}, \code{\link{ppp.object}}, \code{\link{im.object}} } \note{ This function is often misunderstood. The result of \code{density.ppp} is not a spatial smoothing of the marks or weights attached to the point pattern. To perform spatial interpolation of values that were observed at the points of a point pattern, use \code{\link{Smooth.ppp}}. The result of \code{density.ppp} is not a probability density. It is an estimate of the \emph{intensity function} of the point process that generated the point pattern data. Intensity is the expected number of random points per unit area. The units of intensity are \dQuote{points per unit area}. Intensity is usually a function of spatial location, and it is this function which is estimated by \code{density.ppp}. The integral of the intensity function over a spatial region gives the expected number of points falling in this region. Inspecting an estimate of the intensity function is usually the first step in exploring a spatial point pattern dataset. For more explanation, see Baddeley, Rubak and Turner (2015) or Diggle (2003, 2010). If you have two (or more) types of points, and you want a probability map or relative risk surface (the spatially-varying probability of a given type), use \code{\link{relrisk}}. } \section{Negative Values}{ Negative and zero values of the density estimate are possible when \code{at="pixels"} because of numerical errors in finite-precision arithmetic. By default, \code{density.ppp} does not try to repair such errors. This would take more computation time and is not always needed. (Also it would not be appropriate if \code{weights} include negative values.) To ensure that the resulting density values are always positive, set \code{positive=TRUE}. } \examples{ if(interactive()) { opa <- par(mfrow=c(1,2)) plot(density(cells, 0.05)) plot(density(cells, 0.05, diggle=TRUE)) par(opa) v <- diag(c(0.05, 0.07)^2) plot(density(cells, varcov=v)) } \donttest{ Z <- density(cells, 0.05) Z <- density(cells, 0.05, diggle=TRUE) Z <- density(cells, 0.05, se=TRUE) Z <- density(cells, varcov=diag(c(0.05^2, 0.07^2))) Z <- density(cells, 0.05, weights=data.frame(a=1:42,b=42:1)) Z <- density(cells, 0.05, weights=expression(x)) } # automatic bandwidth selection plot(density(cells, sigma=bw.diggle(cells))) # equivalent: plot(density(cells, bw.diggle)) # evaluate intensity at points density(cells, 0.05, at="points") plot(density(cells, sigma=0.4, kernel="epanechnikov")) # relative risk calculation by hand (see relrisk.ppp) lung <- split(chorley)$lung larynx <- split(chorley)$larynx D <- density(lung, sigma=2) plot(density(larynx, sigma=2, weights=1/D)) } \references{ Baddeley, A., Rubak, E. and Turner, R. (2015) \emph{Spatial Point Patterns: Methodology and Applications with R}. Chapman and Hall/CRC Press. Diggle, P.J. (1985) A kernel method for smoothing point process data. \emph{Applied Statistics} (Journal of the Royal Statistical Society, Series C) \bold{34} (1985) 138--147. Diggle, P.J. (2003) \emph{Statistical analysis of spatial point patterns}, Second edition. Arnold. Diggle, P.J. (2010) Nonparametric methods. Chapter 18, pp. 299--316 in A.E. Gelfand, P.J. Diggle, M. Fuentes and P. Guttorp (eds.) \emph{Handbook of Spatial Statistics}, CRC Press, Boca Raton, FL. Jones, M.C. (1993) Simple boundary corrections for kernel density estimation. \emph{Statistics and Computing} \bold{3}, 135--146. } \author{ \spatstatAuthors } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat/man/dimhat.Rd0000644000176200001440000000267513160710571014440 0ustar liggesusers\name{dimhat} \alias{dimhat} \title{ Estimate Dimension of Central Subspace } \description{ Given the kernel matrix that characterises a central subspace, this function estimates the dimension of the subspace. } \usage{ dimhat(M) } \arguments{ \item{M}{ Kernel of subspace. A symmetric, non-negative definite, numeric matrix, typically obtained from \code{\link{sdr}}. } } \details{ This function computes the maximum descent estimate of the dimension of the central subspace with a given kernel matrix \code{M}. The matrix \code{M} should be the kernel matrix of a central subspace, which can be obtained from \code{\link{sdr}}. It must be a symmetric, non-negative-definite, numeric matrix. The algorithm finds the eigenvalues \eqn{\lambda_1 \ge \ldots \ge \lambda_n}{lambda[1] \ge ...\ge lambda[n]} of \eqn{M}, and then determines the index \eqn{k} for which \eqn{\lambda_k/\lambda_{k-1}}{lambda[k]/lambda[k-1]} is greatest. } \value{ A single integer giving the estimated dimension. } \seealso{ \code{\link{sdr}}, \code{\link{subspaceDistance}} } \references{ Guan, Y. and Wang, H. (2010) Sufficient dimension reduction for spatial point processes directed by Gaussian random fields. \emph{Journal of the Royal Statistical Society, Series B}, \bold{72}, 367--387. } \author{ Matlab original by Yongtao Guan, translated to \R by Suman Rakshit. } \keyword{array} \keyword{algebra} \keyword{multivariate} spatstat/man/auc.Rd0000644000176200001440000000663713160710571013744 0ustar liggesusers\name{auc} \alias{auc} \alias{auc.ppp} \alias{auc.lpp} \alias{auc.ppm} \alias{auc.kppm} \alias{auc.lppm} \title{ Area Under ROC Curve } \description{ Compute the AUC (area under the Receiver Operating Characteristic curve) for a fitted point process model. } \usage{ auc(X, \dots) \method{auc}{ppp}(X, covariate, \dots, high = TRUE) \method{auc}{ppm}(X, \dots) \method{auc}{kppm}(X, \dots) \method{auc}{lpp}(X, covariate, \dots, high = TRUE) \method{auc}{lppm}(X, \dots) } \arguments{ \item{X}{ Point pattern (object of class \code{"ppp"} or \code{"lpp"}) or fitted point process model (object of class \code{"ppm"} or \code{"kppm"} or \code{"lppm"}). } \item{covariate}{ Spatial covariate. Either a \code{function(x,y)}, a pixel image (object of class \code{"im"}), or one of the strings \code{"x"} or \code{"y"} indicating the Cartesian coordinates. } \item{\dots}{ Arguments passed to \code{\link{as.mask}} controlling the pixel resolution for calculations. } \item{high}{ Logical value indicating whether the threshold operation should favour high or low values of the covariate. } } \details{ This command computes the AUC, the area under the Receiver Operating Characteristic curve. The ROC itself is computed by \code{\link{roc}}. For a point pattern \code{X} and a covariate \code{Z}, the AUC is a numerical index that measures the ability of the covariate to separate the spatial domain into areas of high and low density of points. Let \eqn{x_i}{x[i]} be a randomly-chosen data point from \code{X} and \eqn{U} a randomly-selected location in the study region. The AUC is the probability that \eqn{Z(x_i) > Z(U)}{Z(x[i]) > Z(U)} assuming \code{high=TRUE}. That is, AUC is the probability that a randomly-selected data point has a higher value of the covariate \code{Z} than does a randomly-selected spatial location. The AUC is a number between 0 and 1. A value of 0.5 indicates a complete lack of discriminatory power. For a fitted point process model \code{X}, the AUC measures the ability of the fitted model intensity to separate the spatial domain into areas of high and low density of points. Suppose \eqn{\lambda(u)}{\lambda(u)} is the intensity function of the model. The AUC is the probability that \eqn{\lambda(x_i) > \lambda(U)}{\lambda(x[i]) > \lambda(U)}. That is, AUC is the probability that a randomly-selected data point has higher predicted intensity than does a randomly-selected spatial location. The AUC is \bold{not} a measure of the goodness-of-fit of the model (Lobo et al, 2007). } \value{ A numeric vector of length 2 giving the AUC value and the theoretically expected AUC value for this model. } \references{ Lobo, J.M., \ifelse{latex}{\out{Jim{\'e}nez}}{Jimenez}-Valverde, A. and Real, R. (2007) AUC: a misleading measure of the performance of predictive distribution models. \emph{Global Ecology and Biogeography} \bold{17}(2) 145--151. Nam, B.-H. and D'Agostino, R. (2002) Discrimination index, the area under the {ROC} curve. Pages 267--279 in Huber-Carol, C., Balakrishnan, N., Nikulin, M.S. and Mesbah, M., \emph{Goodness-of-fit tests and model validity}, \ifelse{latex}{\out{Birkh{\"a}user}}{Birkhauser}, Basel. } \author{ \spatstatAuthors. } \seealso{ \code{\link{roc}} } \examples{ fit <- ppm(swedishpines ~ x+y) auc(fit) auc(swedishpines, "x") } \keyword{spatial} spatstat/man/linearKdot.inhom.Rd0000644000176200001440000001033513160710621016363 0ustar liggesusers\name{linearKdot.inhom} \alias{linearKdot.inhom} \title{ Inhomogeneous multitype K Function (Dot-type) for Linear Point Pattern } \description{ For a multitype point pattern on a linear network, estimate the inhomogeneous multitype \eqn{K} function which counts the expected number of points (of any type) within a given distance of a point of type \eqn{i}. } \usage{ linearKdot.inhom(X, i, lambdaI, lambdadot, r=NULL, \dots, correction="Ang", normalise=TRUE) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the dot type \eqn{K} function \eqn{K_{i\bullet}(r)}{K[i.](r)} will be computed. An object of class \code{"lpp"} which must be a multitype point pattern (a marked point pattern whose marks are a factor). } \item{i}{Number or character string identifying the type (mark value) of the points in \code{X} from which distances are measured. Defaults to the first level of \code{marks(X)}. } \item{lambdaI}{ Intensity values for the points of type \code{i}. Either a numeric vector, a \code{function}, a pixel image (object of class \code{"im"} or \code{"linim"}) or a fitted point process model (object of class \code{"ppm"} or \code{"lppm"}). } \item{lambdadot}{ Intensity values for all points of \code{X}. Either a numeric vector, a \code{function}, a pixel image (object of class \code{"im"} or \code{"linim"}) or a fitted point process model (object of class \code{"ppm"} or \code{"lppm"}). } \item{r}{numeric vector. The values of the argument \eqn{r} at which the \eqn{K}-function \eqn{K_{i\bullet}(r)}{K[i.](r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{correction}{ Geometry correction. Either \code{"none"} or \code{"Ang"}. See Details. } \item{\dots}{ Arguments passed to \code{lambdaI} and \code{lambdadot} if they are functions. } \item{normalise}{ Logical. If \code{TRUE} (the default), the denominator of the estimator is data-dependent (equal to the sum of the reciprocal intensities at the points of type \code{i}), which reduces the sampling variability. If \code{FALSE}, the denominator is the length of the network. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). } \details{ This is a counterpart of the function \code{\link{Kdot.inhom}} for a point pattern on a linear network (object of class \code{"lpp"}). The argument \code{i} will be interpreted as levels of the factor \code{marks(X)}. If \code{i} is missing, it defaults to the first level of the marks factor. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{K_{i\bullet}(r)}{Ki.(r)} should be evaluated. The values of \eqn{r} must be increasing nonnegative numbers and the maximum \eqn{r} value must not exceed the radius of the largest disc contained in the window. If \code{lambdaI} or \code{lambdadot} is a fitted point process model, the default behaviour is to update the model by re-fitting it to the data, before computing the fitted intensity. This can be disabled by setting \code{update=FALSE}. } \references{ Baddeley, A, Jammalamadaka, A. and Nair, G. (to appear) Multitype point process analysis of spines on the dendrite network of a neuron. \emph{Applied Statistics} (Journal of the Royal Statistical Society, Series C), In press. } \section{Warnings}{ The argument \code{i} is interpreted as a level of the factor \code{marks(X)}. Beware of the usual trap with factors: numerical values are not interpreted in the same way as character values. } \seealso{ \code{\link{linearKdot}}, \code{\link{linearK}}. } \examples{ lam <- table(marks(chicago))/(summary(chicago)$totlength) lamI <- function(x,y,const=lam[["assault"]]){ rep(const, length(x)) } lam. <- function(x,y,const=sum(lam)){ rep(const, length(x)) } K <- linearKdot.inhom(chicago, "assault", lamI, lam.) \dontrun{ fit <- lppm(chicago, ~marks + x) linearKdot.inhom(chicago, "assault", fit, fit) } } \author{\adrian } \keyword{spatial} \keyword{nonparametric} spatstat/man/adaptive.density.Rd0000644000176200001440000000737513160710571016447 0ustar liggesusers\name{adaptive.density} \alias{adaptive.density} \title{Intensity Estimate of Point Pattern Using Tessellation} \description{ Computes an adaptive estimate of the intensity function of a point pattern. } \usage{ adaptive.density(X, f = 0.1, ..., nrep = 1, verbose=TRUE) } \arguments{ \item{X}{Point pattern dataset (object of class \code{"ppp"}).} \item{f}{Fraction (between 0 and 1 inclusive) of the data points that will be removed from the data and used to determine a tessellation for the intensity estimate. } \item{\dots}{Arguments passed to \code{\link{as.im}} determining the pixel resolution of the result. } \item{nrep}{Number of independent repetitions of the randomised procedure.} \item{verbose}{Logical value indicating whether to print progress reports.} } \details{ This function is an alternative to \code{\link{density.ppp}}. It computes an estimate of the intensity function of a point pattern dataset. The result is a pixel image giving the estimated intensity, If \code{f=1}, the Voronoi estimate (Barr and Schoenberg, 2010) is computed: the point pattern \code{X} is used to construct a Voronoi/Dirichlet tessellation (see \code{\link{dirichlet}}); the areas of the Dirichlet tiles are computed; the estimated intensity in each tile is the reciprocal of the tile area. If \code{f=0}, the intensity estimate at every location is equal to the average intensity (number of points divided by window area). If \code{f} is strictly between 0 and 1, the dataset \code{X} is randomly split into two patterns \code{A} and \code{B} containing a fraction \code{f} and \code{1-f}, respectively, of the original data. The subpattern \code{A} is used to construct a Dirichlet tessellation, while the subpattern \code{B} is retained for counting. For each tile of the Dirichlet tessellation, we count the number of points of \code{B} falling in the tile, and divide by the area of the same tile, to obtain an estimate of the intensity of the pattern \code{B} in the tile. This estimate is divided by \code{1-f} to obtain an estimate of the intensity of \code{X} in the tile. The result is a pixel image of intensity estimates which are constant on each tile of the tessellation. If \code{nrep} is greater than 1, this randomised procedure is repeated \code{nrep} times, and the results are averaged. This technique has been used by Ogata et al. (2003), Ogata (2004) and Baddeley (2007). } \value{ A pixel image (object of class \code{"im"}) whose values are estimates of the intensity of \code{X}. } \seealso{ \code{\link{density.ppp}}, \code{\link{dirichlet}}, \code{\link{im.object}}. } \references{ Baddeley, A. (2007) Validation of statistical models for spatial point patterns. In J.G. Babu and E.D. Feigelson (eds.) \emph{SCMA IV: Statistical Challenges in Modern Astronomy IV}, volume 317 of Astronomical Society of the Pacific Conference Series, San Francisco, California USA, 2007. Pages 22--38. Barr, C., and Schoenberg, F.P. (2010). On the Voronoi estimator for the intensity of an inhomogeneous planar Poisson process. \emph{Biometrika} \bold{97} (4), 977--984. Ogata, Y. (2004) Space-time model for regional seismicity and detection of crustal stress changes. \emph{Journal of Geophysical Research}, \bold{109}, 2004. Ogata, Y., Katsura, K. and Tanemura, M. (2003). Modelling heterogeneous space-time occurrences of earthquakes and its residual analysis. \emph{Applied Statistics} \bold{52} 499--509. } \examples{ plot(adaptive.density(nztrees, 1), main="Voronoi estimate") nr <- if(interactive()) 100 else 5 plot(adaptive.density(nztrees, nrep=nr), main="Adaptive estimate") } \author{ \adrian and \rolf } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat/man/Emark.Rd0000644000176200001440000001417413160710571014226 0ustar liggesusers\name{Emark} \alias{Emark} \alias{Vmark} \title{ Diagnostics for random marking } \description{ Estimate the summary functions \eqn{E(r)} and \eqn{V(r)} for a marked point pattern, proposed by Schlather et al (2004) as diagnostics for dependence between the points and the marks. } \usage{ Emark(X, r=NULL, correction=c("isotropic", "Ripley", "translate"), method="density", \dots, normalise=FALSE) Vmark(X, r=NULL, correction=c("isotropic", "Ripley", "translate"), method="density", \dots, normalise=FALSE) } \arguments{ \item{X}{The observed point pattern. An object of class \code{"ppp"} or something acceptable to \code{\link{as.ppp}}. The pattern should have numeric marks. } \item{r}{Optional. Numeric vector. The values of the argument \eqn{r} at which the function \eqn{E(r)} or \eqn{V(r)} should be evaluated. There is a sensible default. } \item{correction}{ A character vector containing any selection of the options \code{"isotropic"}, \code{"Ripley"} or \code{"translate"}. It specifies the edge correction(s) to be applied. } \item{method}{ A character vector indicating the user's choice of density estimation technique to be used. Options are \code{"density"}, \code{"loess"}, \code{"sm"} and \code{"smrep"}. } \item{\dots}{ Arguments passed to the density estimation routine (\code{\link{density}}, \code{\link{loess}} or \code{sm.density}) selected by \code{method}. } \item{normalise}{ If\code{TRUE}, normalise the estimate of \eqn{E(r)} or \eqn{V(r)} so that it would have value equal to 1 if the marks are independent of the points. } } \value{ If \code{marks(X)} is a numeric vector, the result is an object of class \code{"fv"} (see \code{\link{fv.object}}). If \code{marks(X)} is a data frame, the result is a list of objects of class \code{"fv"}, one for each column of marks. An object of class \code{"fv"} is essentially a data frame containing numeric columns \item{r}{the values of the argument \eqn{r} at which the function \eqn{E(r)} or \eqn{V(r)} has been estimated } \item{theo}{the theoretical, constant value of \eqn{E(r)} or \eqn{V(r)} when the marks attached to different points are independent } together with a column or columns named \code{"iso"} and/or \code{"trans"}, according to the selected edge corrections. These columns contain estimates of the function \eqn{E(r)} or \eqn{V(r)} obtained by the edge corrections named. } \details{ For a marked point process, Schlather et al (2004) defined the functions \eqn{E(r)} and \eqn{V(r)} to be the conditional mean and conditional variance of the mark attached to a typical random point, given that there exists another random point at a distance \eqn{r} away from it. More formally, \deqn{ E(r) = E_{0u}[M(0)] }{ E(r) = E[0u] M(0) } and \deqn{ V(r) = E_{0u}[(M(0)-E(u))^2] }{ V(r) = E[0u]((M(0)-E(u))^2) } where \eqn{E_{0u}}{E[0u]} denotes the conditional expectation given that there are points of the process at the locations \eqn{0} and \eqn{u} separated by a distance \eqn{r}, and where \eqn{M(0)} denotes the mark attached to the point \eqn{0}. These functions may serve as diagnostics for dependence between the points and the marks. If the points and marks are independent, then \eqn{E(r)} and \eqn{V(r)} should be constant (not depending on \eqn{r}). See Schlather et al (2004). The argument \code{X} must be a point pattern (object of class \code{"ppp"}) or any data that are acceptable to \code{\link{as.ppp}}. It must be a marked point pattern with numeric marks. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{k_f(r)}{k[f](r)} is estimated. This algorithm assumes that \code{X} can be treated as a realisation of a stationary (spatially homogeneous) random spatial point process in the plane, observed through a bounded window. The window (which is specified in \code{X} as \code{Window(X)}) may have arbitrary shape. Biases due to edge effects are treated in the same manner as in \code{\link{Kest}}. The edge corrections implemented here are \describe{ \item{isotropic/Ripley}{Ripley's isotropic correction (see Ripley, 1988; Ohser, 1983). This is implemented only for rectangular and polygonal windows (not for binary masks). } \item{translate}{Translation correction (Ohser, 1983). Implemented for all window geometries, but slow for complex windows. } } Note that the estimator assumes the process is stationary (spatially homogeneous). The numerator and denominator of the mark correlation function (in the expression above) are estimated using density estimation techniques. The user can choose between \describe{ \item{\code{"density"}}{ which uses the standard kernel density estimation routine \code{\link{density}}, and works only for evenly-spaced \code{r} values; } \item{\code{"loess"}}{ which uses the function \code{loess} in the package \pkg{modreg}; } \item{\code{"sm"}}{ which uses the function \code{sm.density} in the package \pkg{sm} and is extremely slow; } \item{\code{"smrep"}}{ which uses the function \code{sm.density} in the package \pkg{sm} and is relatively fast, but may require manual control of the smoothing parameter \code{hmult}. } } } \references{ Schlather, M. and Ribeiro, P. and Diggle, P. (2004) Detecting dependence between marks and locations of marked point processes. \emph{Journal of the Royal Statistical Society, series B} \bold{66} (2004) 79-83. } \seealso{ Mark correlation \code{\link{markcorr}}, mark variogram \code{\link{markvario}} for numeric marks. Mark connection function \code{\link{markconnect}} and multitype K-functions \code{\link{Kcross}}, \code{\link{Kdot}} for factor-valued marks. } \examples{ plot(Emark(spruces)) E <- Emark(spruces, method="density", kernel="epanechnikov") plot(Vmark(spruces)) } \author{ \adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat/man/fasp.object.Rd0000644000176200001440000000621413160710621015355 0ustar liggesusers\name{fasp.object} \alias{fasp.object} %DoNotExport \title{Function Arrays for Spatial Patterns} \description{ A class \code{"fasp"} to represent a \dQuote{matrix} of functions, amenable to plotting as a matrix of plot panels. } \details{ An object of this class is a convenient way of storing (and later plotting, editing, etc) a set of functions \eqn{f_{i,j}(r)}{f[i,j](r)} of a real argument \eqn{r}, defined for each possible pair \eqn{(i,j)} of indices \eqn{1 \le i,j \le n}{1 <= i,j <= n}. We may think of this as a matrix or array of functions \eqn{f_{i,j}}{f[i,j]}. Function arrays are particularly useful in the analysis of a multitype point pattern (a point pattern in which the points are identified as belonging to separate types). We may want to compute a summary function for the points of type \eqn{i} only, for each of the possible types \eqn{i}. This produces a \eqn{1 \times m}{1 * m} array of functions. Alternatively we may compute a summary function for each possible pair of types \eqn{(i,j)}. This produces an \eqn{m \times m}{m * m} array of functions. For multitype point patterns the command \code{\link{alltypes}} will compute arrays of summary functions for each possible type or for each possible pair of types. The function \code{\link{alltypes}} returns an object of class \code{"fasp"}. An object of class \code{"fasp"} is a list containing at least the following components: \describe{ \item{fns}{ A list of data frames, each representing one of the functions. } \item{which}{ A matrix representing the spatial arrangement of the functions. If \code{which[i,j] = k} then the function represented by \code{fns[[k]]} should be plotted in the panel at position \eqn{(i,j)}. If \code{which[i,j] = NA} then nothing is plotted in that position. } \item{titles}{ A list of character strings, providing suitable plotting titles for the functions. } \item{default.formulae}{ A list of default formulae for plotting each of the functions. } \item{title}{ A character string, giving a default title for the array when it is plotted. } } } \section{Functions available}{ There are methods for \code{plot}, \code{print} and \code{"["} for this class. The plot method displays the entire array of functions. The method \code{\link{[.fasp}} selects a sub-array using the natural indices \code{i,j}. The command \code{\link{eval.fasp}} can be used to apply a transformation to each function in the array, and to combine two arrays. } \seealso{ \code{\link{alltypes}}, \code{\link{plot.fasp}}, \code{\link{[.fasp}}, \code{\link{eval.fasp}} } \examples{ # multitype point pattern data(amacrine) GG <- alltypes(amacrine, "G") plot(GG) # select the row corresponding to cells of type "on" Gon <- GG["on", ] plot(Gon) # extract the G function for i = "on", j = "off" Gonoff <- GG["on", "off", drop=TRUE] # Fisher variance stabilising transformation GGfish <- eval.fasp(asin(sqrt(GG))) plot(GGfish) } \author{\adrian and \rolf } \keyword{spatial} \keyword{attribute} spatstat/man/rmhmodel.default.Rd0000644000176200001440000005120113160710621016405 0ustar liggesusers\name{rmhmodel.default} \alias{rmhmodel.default} \title{Build Point Process Model for Metropolis-Hastings Simulation.} \description{ Builds a description of a point process model for use in simulating the model by the Metropolis-Hastings algorithm. } \usage{ \method{rmhmodel}{default}(..., cif=NULL, par=NULL, w=NULL, trend=NULL, types=NULL) } \arguments{ \item{\dots}{Ignored.} \item{cif}{Character string specifying the choice of model} \item{par}{Parameters of the model} \item{w}{Spatial window in which to simulate} \item{trend}{Specification of the trend in the model} \item{types}{A vector of factor levels defining the possible marks, for a multitype process. } } \value{ An object of class \code{"rmhmodel"}, which is essentially a list of parameter values for the model. There is a \code{print} method for this class, which prints a sensible description of the model chosen. } \details{ The generic function \code{\link{rmhmodel}} takes a description of a point process model in some format, and converts it into an object of class \code{"rmhmodel"} so that simulations of the model can be generated using the Metropolis-Hastings algorithm \code{\link{rmh}}. This function \code{rmhmodel.default} is the default method. It builds a description of the point process model from the simple arguments listed. The argument \code{cif} is a character string specifying the choice of interpoint interaction for the point process. The current options are \describe{ \item{\code{'areaint'}}{Area-interaction process.} \item{\code{'badgey'}}{Baddeley-Geyer (hybrid Geyer) process.} \item{\code{'dgs'}}{Diggle, Gates and Stibbard (1987) process} \item{\code{'diggra'}}{Diggle and Gratton (1984) process} \item{\code{'fiksel'}}{Fiksel double exponential process (Fiksel, 1984).} \item{\code{'geyer'}}{Saturation process (Geyer, 1999).} \item{\code{'hardcore'}}{Hard core process} \item{\code{'lennard'}}{Lennard-Jones process} \item{\code{'lookup'}}{General isotropic pairwise interaction process, with the interaction function specified via a ``lookup table''.} \item{\code{'multihard'}}{Multitype hardcore process} \item{\code{'penttinen'}}{The Penttinen process} \item{\code{'strauss'}}{The Strauss process} \item{\code{'straush'}}{The Strauss process with hard core} \item{\code{'sftcr'}}{The Softcore process} \item{\code{'straussm'}}{ The multitype Strauss process} \item{\code{'straushm'}}{Multitype Strauss process with hard core} \item{\code{'triplets'}}{Triplets process (Geyer, 1999).} } It is also possible to specify a \emph{hybrid} of these interactions in the sense of Baddeley et al (2013). In this case, \code{cif} is a character vector containing names from the list above. For example, \code{cif=c('strauss', 'geyer')} would specify a hybrid of the Strauss and Geyer models. The argument \code{par} supplies parameter values appropriate to the conditional intensity function being invoked. For the interactions listed above, these parameters are: \describe{ \item{areaint:}{ (Area-interaction process.) A \bold{named} list with components \code{beta,eta,r} which are respectively the ``base'' intensity, the scaled interaction parameter and the interaction radius. } \item{badgey:}{ (Baddeley-Geyer process.) A \bold{named} list with components \code{beta} (the ``base'' intensity), \code{gamma} (a vector of non-negative interaction parameters), \code{r} (a vector of interaction radii, of the same length as \code{gamma}, in \emph{increasing} order), and \code{sat} (the saturation parameter(s); this may be a scalar, or a vector of the same length as \code{gamma} and \code{r}; all values should be at least 1). Note that because of the presence of ``saturation'' the \code{gamma} values are permitted to be larger than 1. } \item{dgs:}{ (Diggle, Gates, and Stibbard process. See Diggle, Gates, and Stibbard (1987)) A \bold{named} list with components \code{beta} and \code{rho}. This process has pairwise interaction function equal to \deqn{ e(t) = \sin^2\left(\frac{\pi t}{2\rho}\right) }{ e(t) = sin^2((pi * t)/(2 * rho)) } for \eqn{t < \rho}{t < rho}, and equal to 1 for \eqn{t \ge \rho}{t >= rho}. } \item{diggra:}{ (Diggle-Gratton process. See Diggle and Gratton (1984) and Diggle, Gates and Stibbard (1987).) A \bold{named} list with components \code{beta}, \code{kappa}, \code{delta} and \code{rho}. This process has pairwise interaction function \eqn{e(t)} equal to 0 for \eqn{t < \delta}{t < delta}, equal to \deqn{ \left(\frac{t-\delta}{\rho-\delta}\right)^\kappa }{ ((t-delta)/(rho-delta))^kappa } for \eqn{\delta \le t < \rho}{delta <= t < rho}, and equal to 1 for \eqn{t \ge \rho}{t >= rho}. Note that here we use the symbol \eqn{\kappa}{kappa} where Diggle, Gates, and Stibbard use \eqn{\beta}{beta} since we reserve the symbol \eqn{\beta}{beta} for an intensity parameter. } \item{fiksel:}{ (Fiksel double exponential process, see Fiksel (1984)) A \bold{named} list with components \code{beta}, \code{r}, \code{hc}, \code{kappa} and \code{a}. This process has pairwise interaction function \eqn{e(t)} equal to 0 for \eqn{t < hc}, equal to \deqn{ \exp(a \exp(- \kappa t)) }{ exp(a * exp( - kappa * t)) } for \eqn{hc \le t < r}{hc <= t < r}, and equal to 1 for \eqn{t \ge r}{t >= r}. } \item{geyer:}{ (Geyer's saturation process. See Geyer (1999).) A \bold{named} list with components \code{beta}, \code{gamma}, \code{r}, and \code{sat}. The components \code{beta}, \code{gamma}, \code{r} are as for the Strauss model, and \code{sat} is the ``saturation'' parameter. The model is Geyer's ``saturation'' point process model, a modification of the Strauss process in which we effectively impose an upper limit (\code{sat}) on the number of neighbours which will be counted as close to a given point. Explicitly, a saturation point process with interaction radius \eqn{r}, saturation threshold \eqn{s}, and parameters \eqn{\beta}{beta} and \eqn{\gamma}{gamma}, is the point process in which each point \eqn{x_i}{x[i]} in the pattern \eqn{X} contributes a factor \deqn{\beta \gamma^{\min(s, t(x_i,X))}}{beta gamma^min(s,t(x[i],X))} to the probability density of the point pattern, where \eqn{t(x_i,X)}{t(x[i],X)} denotes the number of ``\eqn{r}-close neighbours'' of \eqn{x_i}{x[i]} in the pattern \eqn{X}. If the saturation threshold \eqn{s} is infinite, the Geyer process reduces to a Strauss process with interaction parameter \eqn{\gamma^2}{gamma^2} rather than \eqn{\gamma}{gamma}. } \item{hardcore:}{ (Hard core process.) A \bold{named} list with components \code{beta} and \code{hc} where \code{beta} is the base intensity and \code{hc} is the hard core distance. This process has pairwise interaction function \eqn{e(t)} equal to 1 if \eqn{t > hc} and 0 if \eqn{t <= hc}. } \item{lennard:}{ (Lennard-Jones process.) A \bold{named} list with components \code{sigma} and \code{epsilon}, where \code{sigma} is the characteristic diameter and \code{epsilon} is the well depth. See \code{\link{LennardJones}} for explanation. } \item{multihard:}{ (Multitype hard core process.) A \bold{named} list with components \code{beta} and \code{hradii}, where \code{beta} is a vector of base intensities for each type of point, and \code{hradii} is a matrix of hard core radii between each pair of types. } \item{penttinen:}{ (Penttinen process.) A \bold{named} list with components \code{beta,gamma,r} which are respectively the ``base'' intensity, the pairwise interaction parameter, and the disc radius. Note that \code{gamma} must be less than or equal to 1. See \code{\link{Penttinen}} for explanation. (Note that there is also an algorithm for perfect simulation of the Penttinen process, \code{\link{rPenttinen}}) } \item{strauss:}{ (Strauss process.) A \bold{named} list with components \code{beta,gamma,r} which are respectively the ``base'' intensity, the pairwise interaction parameter and the interaction radius. Note that \code{gamma} must be less than or equal to 1. (Note that there is also an algorithm for perfect simulation of the Strauss process, \code{\link{rStrauss}}) } \item{straush:}{ (Strauss process with hardcore.) A \bold{named} list with entries \code{beta,gamma,r,hc} where \code{beta}, \code{gamma}, and \code{r} are as for the Strauss process, and \code{hc} is the hardcore radius. Of course \code{hc} must be less than \code{r}. } \item{sftcr:}{ (Softcore process.) A \bold{named} list with components \code{beta,sigma,kappa}. Again \code{beta} is a ``base'' intensity. The pairwise interaction between two points \eqn{u \neq v}{u != v} is \deqn{ \exp \left \{ - \left ( \frac{\sigma}{||u-v||} \right )^{2/\kappa} \right \} }{-(sigma/||u-v||)^(2/kappa)} Note that it is necessary that \eqn{0 < \kappa < 1}{0 < kappa <1}. } \item{straussm:}{ (Multitype Strauss process.) A \bold{named} list with components \itemize{ \item \code{beta}: A vector of ``base'' intensities, one for each possible type. \item \code{gamma}: A \bold{symmetric} matrix of interaction parameters, with \eqn{\gamma_{ij}}{gamma_ij} pertaining to the interaction between type \eqn{i} and type \eqn{j}. \item \code{radii}: A \bold{symmetric} matrix of interaction radii, with entries \eqn{r_{ij}}{r_ij} pertaining to the interaction between type \eqn{i} and type \eqn{j}. } } \item{straushm:}{ (Multitype Strauss process with hardcore.) A \bold{named} list with components \code{beta} and \code{gamma} as for \code{straussm} and \bold{two} ``radii'' components: \itemize{ \item \code{iradii}: the interaction radii \item \code{hradii}: the hardcore radii } which are both symmetric matrices of nonnegative numbers. The entries of \code{hradii} must be less than the corresponding entries of \code{iradii}. } \item{triplets:}{ (Triplets process.) A \bold{named} list with components \code{beta,gamma,r} which are respectively the ``base'' intensity, the triplet interaction parameter and the interaction radius. Note that \code{gamma} must be less than or equal to 1. } \item{lookup:}{ (Arbitrary pairwise interaction process with isotropic interaction.) A \bold{named} list with components \code{beta}, \code{r}, and \code{h}, or just with components \code{beta} and \code{h}. This model is the pairwise interaction process with an isotropic interaction given by any chosen function \eqn{H}. Each pair of points \eqn{x_i, x_j}{x[i], x[j]} in the point pattern contributes a factor \eqn{H(d(x_i, x_j))}{H(d(x[i],x[j]))} to the probability density, where \eqn{d} denotes distance and \eqn{H} is the pair interaction function. The component \code{beta} is a (positive) scalar which determines the ``base'' intensity of the process. In this implementation, \eqn{H} must be a step function. It is specified by the user in one of two ways. \itemize{ \item \bold{as a vector of values:} If \code{r} is present, then \code{r} is assumed to give the locations of jumps in the function \eqn{H}, while the vector \code{h} gives the corresponding values of the function. Specifically, the interaction function \eqn{H(t)} takes the value \code{h[1]} for distances \eqn{t} in the interval \code{[0, r[1])}; takes the value \code{h[i]} for distances \eqn{t} in the interval \code{[r[i-1], r[i])} where \eqn{i = 2,\ldots, n}{i = 2, ..., n}; and takes the value 1 for \eqn{t \ge r[n]}{t >= r[n]}. Here \eqn{n} denotes the length of \code{r}. The components \code{r} and \code{h} must be numeric vectors of equal length. The \code{r} values must be strictly positive, and sorted in increasing order. The entries of \code{h} must be non-negative. If any entry of \code{h} is greater than 1, then the entry \code{h[1]} must be 0 (otherwise the specified process is non-existent). Greatest efficiency is achieved if the values of \code{r} are equally spaced. [\bold{Note:} The usage of \code{r} and \code{h} has \emph{changed} from the previous usage in \pkg{spatstat} versions 1.4-7 to 1.5-1, in which ascending order was not required, and in which the first entry of \code{r} had to be 0.] \item \bold{as a stepfun object:} If \code{r} is absent, then \code{h} must be an object of class \code{"stepfun"} specifying a step function. Such objects are created by \code{\link{stepfun}}. The stepfun object \code{h} must be right-continuous (which is the default using \code{\link{stepfun}}.) The values of the step function must all be nonnegative. The values must all be less than 1 unless the function is identically zero on some initial interval \eqn{[0,r)}. The rightmost value (the value of \code{h(t)} for large \code{t}) must be equal to 1. Greatest efficiency is achieved if the jumps (the ``knots'' of the step function) are equally spaced. } } } For a hybrid model, the argument \code{par} should be a list, of the same length as \code{cif}, such that \code{par[[i]]} is a list of the parameters required for the interaction \code{cif[i]}. See the Examples. The optional argument \code{trend} determines the spatial trend in the model, if it has one. It should be a function or image (or a list of such, if the model is multitype) to provide the value of the trend at an arbitrary point. \describe{ \item{trend given as a function:}{A trend function may be a function of any number of arguments, but the first two must be the \eqn{x,y} coordinates of a point. Auxiliary arguments may be passed to the \code{trend} function at the time of simulation, via the \code{\dots} argument to \code{\link{rmh}}. The function \bold{must} be \bold{vectorized}. That is, it must be capable of accepting vector valued \code{x} and \code{y} arguments. Put another way, it must be capable of calculating the trend value at a number of points, simultaneously, and should return the \bold{vector} of corresponding trend values. } \item{trend given as an image:}{ An image (see \code{\link{im.object}}) provides the trend values at a grid of points in the observation window and determines the trend value at other points as the value at the nearest grid point. } } Note that the trend or trends must be \bold{non-negative}; no checking is done for this. The optional argument \code{w} specifies the window in which the pattern is to be generated. If specified, it must be in a form which can be coerced to an object of class \code{owin} by \code{\link{as.owin}}. The optional argument \code{types} specifies the possible types in a multitype point process. If the model being simulated is multitype, and \code{types} is not specified, then this vector defaults to \code{1:ntypes} where \code{ntypes} is the number of types. } \references{ Baddeley, A., Turner, R., Mateu, J. and Bevan, A. (2013) Hybrids of Gibbs point process models and their implementation. \emph{Journal of Statistical Software} \bold{55}:11, 1--43. \url{http://www.jstatsoft.org/v55/i11/} Diggle, P. J. (2003) \emph{Statistical Analysis of Spatial Point Patterns} (2nd ed.) Arnold, London. Diggle, P.J. and Gratton, R.J. (1984) Monte Carlo methods of inference for implicit statistical models. \emph{Journal of the Royal Statistical Society, series B} \bold{46}, 193 -- 212. Diggle, P.J., Gates, D.J., and Stibbard, A. (1987) A nonparametric estimator for pairwise-interaction point processes. Biometrika \bold{74}, 763 -- 770. \emph{Scandinavian Journal of Statistics} \bold{21}, 359--373. Fiksel, T. (1984) Estimation of parameterized pair potentials of marked and non-marked Gibbsian point processes. \emph{Electronische Informationsverabeitung und Kybernetika} \bold{20}, 270--278. Geyer, C.J. (1999) Likelihood Inference for Spatial Point Processes. Chapter 3 in O.E. Barndorff-Nielsen, W.S. Kendall and M.N.M. Van Lieshout (eds) \emph{Stochastic Geometry: Likelihood and Computation}, Chapman and Hall / CRC, Monographs on Statistics and Applied Probability, number 80. Pages 79--140. } \section{Warnings in Respect of ``lookup''}{ For the \code{lookup} cif, the entries of the \code{r} component of \code{par} must be \emph{strictly positive} and sorted into ascending order. Note that if you specify the \code{lookup} pairwise interaction function via \code{\link{stepfun}()} the arguments \code{x} and \code{y} which are passed to \code{stepfun()} are slightly different from \code{r} and \code{h}: \code{length(y)} is equal to \code{1+length(x)}; the final entry of \code{y} must be equal to 1 --- i.e. this value is explicitly supplied by the user rather than getting tacked on internally. The step function returned by \code{stepfun()} must be right continuous (this is the default behaviour of \code{stepfun()}) otherwise an error is given. } \seealso{ \code{\link{rmh}}, \code{\link{rmhcontrol}}, \code{\link{rmhstart}}, \code{\link{ppm}}, \rmhInteractionsList. } \examples{ # Strauss process: mod01 <- rmhmodel(cif="strauss",par=list(beta=2,gamma=0.2,r=0.7), w=c(0,10,0,10)) # The above could also be simulated using 'rStrauss' # Strauss with hardcore: mod04 <- rmhmodel(cif="straush",par=list(beta=2,gamma=0.2,r=0.7,hc=0.3), w=owin(c(0,10),c(0,5))) # Hard core: mod05 <- rmhmodel(cif="hardcore",par=list(beta=2,hc=0.3), w=square(5)) # Soft core: w <- square(10) mod07 <- rmhmodel(cif="sftcr", par=list(beta=0.8,sigma=0.1,kappa=0.5), w=w) # Area-interaction process: mod42 <- rmhmodel(cif="areaint",par=list(beta=2,eta=1.6,r=0.7), w=c(0,10,0,10)) # Baddeley-Geyer process: mod99 <- rmhmodel(cif="badgey",par=list(beta=0.3, gamma=c(0.2,1.8,2.4),r=c(0.035,0.07,0.14),sat=5), w=unit.square()) # Multitype Strauss: beta <- c(0.027,0.008) gmma <- matrix(c(0.43,0.98,0.98,0.36),2,2) r <- matrix(c(45,45,45,45),2,2) mod08 <- rmhmodel(cif="straussm", par=list(beta=beta,gamma=gmma,radii=r), w=square(250)) # specify types mod09 <- rmhmodel(cif="straussm", par=list(beta=beta,gamma=gmma,radii=r), w=square(250), types=c("A", "B")) # Multitype Hardcore: rhc <- matrix(c(9.1,5.0,5.0,2.5),2,2) mod08hard <- rmhmodel(cif="multihard", par=list(beta=beta,hradii=rhc), w=square(250), types=c("A", "B")) # Multitype Strauss hardcore with trends for each type: beta <- c(0.27,0.08) ri <- matrix(c(45,45,45,45),2,2) rhc <- matrix(c(9.1,5.0,5.0,2.5),2,2) tr3 <- function(x,y){x <- x/250; y <- y/250; exp((6*x + 5*y - 18*x^2 + 12*x*y - 9*y^2)/6) } # log quadratic trend tr4 <- function(x,y){x <- x/250; y <- y/250; exp(-0.6*x+0.5*y)} # log linear trend mod10 <- rmhmodel(cif="straushm",par=list(beta=beta,gamma=gmma, iradii=ri,hradii=rhc),w=c(0,250,0,250), trend=list(tr3,tr4)) # Triplets process: mod11 <- rmhmodel(cif="triplets",par=list(beta=2,gamma=0.2,r=0.7), w=c(0,10,0,10)) # Lookup (interaction function h_2 from page 76, Diggle (2003)): r <- seq(from=0,to=0.2,length=101)[-1] # Drop 0. h <- 20*(r-0.05) h[r<0.05] <- 0 h[r>0.10] <- 1 mod17 <- rmhmodel(cif="lookup",par=list(beta=4000,h=h,r=r),w=c(0,1,0,1)) # hybrid model modhy <- rmhmodel(cif=c('strauss', 'geyer'), par=list(list(beta=100,gamma=0.5,r=0.05), list(beta=1, gamma=0.7,r=0.1, sat=2)), w=square(1)) } \author{ \adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat/man/invoke.symbolmap.Rd0000644000176200001440000000364313160710621016457 0ustar liggesusers\name{invoke.symbolmap} \alias{invoke.symbolmap} \title{ Plot Data Using Graphics Symbol Map } \description{ Apply a graphics symbol map to a vector of data values and plot the resulting symbols. } \usage{ invoke.symbolmap(map, values, x=NULL, y = NULL, \dots, add = FALSE, do.plot = TRUE, started = add && do.plot) } \arguments{ \item{map}{ Graphics symbol map (object of class \code{"symbolmap"}). } \item{values}{ Vector of data that can be mapped by the symbol map. } \item{x,y}{ Coordinate vectors for the spatial locations of the symbols to be plotted. } \item{\dots}{ Additional graphics parameters. } \item{add}{ Logical value indicating whether to add the symbols to an existing plot (\code{add=TRUE}) or to initialise a new plot (\code{add=FALSE}, the default). } \item{do.plot}{ Logical value indicating whether to actually perform the plotting. } \item{started}{ Logical value indicating whether the plot has already been initialised. } } \details{ A symbol map is an association between data values and graphical symbols. This command applies the symbol map \code{map} to the data \code{values} and plots the resulting symbols at the locations given by \code{\link{xy.coords}(x,y)}. } \value{ (Invisibly) the maximum diameter of the symbols, in user coordinate units. } \author{\adrian , \rolf and \ege. } \seealso{ \code{\link{plot.symbolmap}} to plot the graphics map itself. \code{\link{symbolmap}} to create a graphics map. } \examples{ g <- symbolmap(range=c(-1,1), shape=function(x) ifelse(x > 0, "circles", "squares"), size=function(x) sqrt(ifelse(x > 0, x/pi, -x))/15, bg=function(x) ifelse(x > 0, "green", "red")) plot(square(1), main="") a <- invoke.symbolmap(g, runif(10, -1, 1), runifpoint(10), add=TRUE) a } \keyword{spatial} \keyword{hplot} spatstat/man/grow.boxx.Rd0000644000176200001440000000250213160710621015110 0ustar liggesusers\name{grow.boxx} \alias{grow.boxx} \alias{grow.box3} \title{Add margins to box in any dimension} \description{ Adds a margin to a box of class boxx. } \usage{ grow.boxx(W, left, right = left) grow.box3(W, left, right = left) } \arguments{ \item{W}{ A box (object of class \code{"boxx"} or \code{"box3"}). } \item{left}{Width of margin to be added to left endpoint of box side in every dimension. A single nonnegative number, or a vector of same length as the dimension of the box to add different left margin in each dimension. } \item{right}{Width of margin to be added to right endpoint of box side in every dimension. A single nonnegative number, or a vector of same length as the dimension of the box to add different right margin in each dimension. } } \value{ Another object of the same class \code{"boxx"} or \code{"box3"} representing the window after margins are added. } \seealso{ \code{\link{grow.rectangle}}, \code{\link{boxx}}, \code{\link{box3}} } \examples{ w <- boxx(c(0,10), c(0,10), c(0,10), c(0,10)) # add a margin of size 1 on both sides in all four dimensions b12 <- grow.boxx(w, 1) # add margin of size 2 at left, and margin of size 3 at right, # in each dimension. v <- grow.boxx(w, 2, 3) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{manip} spatstat/man/plot.anylist.Rd0000644000176200001440000002052413160710621015617 0ustar liggesusers\name{plot.anylist} \alias{plot.anylist} \title{Plot a List of Things} \description{ Plots a list of things } \usage{ \method{plot}{anylist}(x, \dots, main, arrange=TRUE, nrows=NULL, ncols=NULL, main.panel=NULL, mar.panel=c(2,1,1,2), hsep=0, vsep=0, panel.begin=NULL, panel.end=NULL, panel.args=NULL, panel.begin.args=NULL, panel.end.args=NULL, plotcommand="plot", adorn.left=NULL, adorn.right=NULL, adorn.top=NULL, adorn.bottom=NULL, adorn.size=0.2, equal.scales=FALSE, halign=FALSE, valign=FALSE) } \arguments{ \item{x}{ An object of the class \code{"anylist"}. Essentially a list of objects. } \item{\dots}{ Arguments passed to \code{\link{plot}} when generating each plot panel. } \item{main}{ Overall heading for the plot. } \item{arrange}{ Logical flag indicating whether to plot the objects side-by-side on a single page (\code{arrange=TRUE}) or plot them individually in a succession of frames (\code{arrange=FALSE}). } \item{nrows,ncols}{ Optional. The number of rows/columns in the plot layout (assuming \code{arrange=TRUE}). You can specify either or both of these numbers. } \item{main.panel}{ Optional. A character string, or a vector of character strings, giving the headings for each of the objects. } \item{mar.panel}{ Size of the margins outside each plot panel. A numeric vector of length 4 giving the bottom, left, top, and right margins in that order. (Alternatively the vector may have length 1 or 2 and will be replicated to length 4). See the section on \emph{Spacing between plots}. } \item{hsep,vsep}{ Additional horizontal and vertical separation between plot panels, expressed in the same units as \code{mar.panel}. } \item{panel.begin,panel.end}{ Optional. Functions that will be executed before and after each panel is plotted. See Details. } \item{panel.args}{ Optional. Function that determines different plot arguments for different panels. See Details. } \item{panel.begin.args}{ Optional. List of additional arguments for \code{panel.begin} when it is a function. } \item{panel.end.args}{ Optional. List of additional arguments for \code{panel.end} when it is a function. } \item{plotcommand}{ Optional. Character string containing the name of the command that should be executed to plot each panel. } \item{adorn.left,adorn.right,adorn.top,adorn.bottom}{ Optional. Functions (with no arguments) that will be executed to generate additional plots at the margins (left, right, top and/or bottom, respectively) of the array of plots. } \item{adorn.size}{ Relative width (as a fraction of the other panels' widths) of the margin plots. } \item{equal.scales}{ Logical value indicating whether the components should be plotted at (approximately) the same physical scale. } \item{halign,valign}{ Logical values indicating whether panels in a column should be aligned to the same \eqn{x} coordinate system (\code{halign=TRUE}) and whether panels in a row should be aligned to the same \eqn{y} coordinate system (\code{valign=TRUE}). These are applicable only if \code{equal.scales=TRUE}. } } \value{ Null. } \details{ This is the \code{plot} method for the class \code{"anylist"}. An object of class \code{"anylist"} represents a list of objects intended to be treated in the same way. This is the method for \code{plot}. In the \pkg{spatstat} package, various functions produce an object of class \code{"anylist"}, essentially a list of objects of the same kind. These objects can be plotted in a nice arrangement using \code{plot.anylist}. See the Examples. The argument \code{panel.args} determines extra graphics parameters for each panel. It should be a function that will be called as \code{panel.args(i)} where \code{i} is the panel number. Its return value should be a list of graphics parameters that can be passed to the relevant \code{plot} method. These parameters override any parameters specified in the \code{\dots} arguments. The arguments \code{panel.begin} and \code{panel.end} determine graphics that will be plotted before and after each panel is plotted. They may be objects of some class that can be plotted with the generic \code{plot} command. Alternatively they may be functions that will be called as \code{panel.begin(i, y, main=main.panel[i])} and \code{panel.end(i, y, add=TRUE)} where \code{i} is the panel number and \code{y = x[[i]]}. If all entries of \code{x} are pixel images, the function \code{\link{image.listof}} is called to control the plotting. The arguments \code{equal.ribbon} and \code{col} can be used to determine the colour map or maps applied. If \code{equal.scales=FALSE} (the default), then the plot panels will have equal height on the plot device (unless there is only one column of panels, in which case they will have equal width on the plot device). This means that the objects are plotted at different physical scales, by default. If \code{equal.scales=TRUE}, then the dimensions of the plot panels on the plot device will be proportional to the spatial dimensions of the corresponding components of \code{x}. This means that the objects will be plotted at \emph{approximately} equal physical scales. If these objects have very different spatial sizes, the plot command could fail (when it tries to plot the smaller objects at a tiny scale), with an error message that the figure margins are too large. The objects will be plotted at \emph{exactly} equal physical scales, and \emph{exactly} aligned on the device, under the following conditions: \itemize{ \item every component of \code{x} is a spatial object whose position can be shifted by \code{\link{shift}}; \item \code{panel.begin} and \code{panel.end} are either \code{NULL} or they are spatial objects whose position can be shifted by \code{\link{shift}}; \item \code{adorn.left}, \code{adorn.right}, \code{adorn.top} and \code{adorn.bottom} are all \code{NULL}. } Another special case is when every component of \code{x} is an object of class \code{"fv"} representing a function. If \code{equal.scales=TRUE} then all these functions will be plotted with the same axis scales (i.e. with the same \code{xlim} and the same \code{ylim}). } \section{Spacing between plots}{ The spacing between individual plots is controlled by the parameters \code{mar.panel}, \code{hsep} and \code{vsep}. If \code{equal.scales=FALSE}, the plot panels are logically separate plots. The margins for each panel are determined by the argument \code{mar.panel} which becomes the graphics parameter \code{mar} described in the help file for \code{\link{par}}. One unit of \code{mar} corresponds to one line of text in the margin. If \code{hsep} or \code{vsep} are present, \code{mar.panel} is augmented by \code{c(vsep, hsep, vsep, hsep)/2}. If \code{equal.scales=TRUE}, all the plot panels are drawn in the same coordinate system which represents a physical scale. The unit of measurement for \code{mar.panel[1,3]} is one-sixth of the greatest height of any object plotted in the same row of panels, and the unit for \code{mar.panel[2,4]} is one-sixth of the greatest width of any object plotted in the same column of panels. If \code{hsep} or \code{vsep} are present, they are interpreted in the same units as \code{mar.panel[2]} and \code{mar.panel[1]} respectively. } \seealso{ \code{\link{contour.listof}}, \code{\link{image.listof}}, \code{\link{density.splitppp}} } \section{Error messages}{ If the error message \sQuote{Figure margins too large} occurs, this generally means that one of the objects had a much smaller physical scale than the others. Ensure that \code{equal.scales=FALSE} and increase the values of \code{mar.panel}. } \examples{ trichotomy <- list(regular=cells, random=japanesepines, clustered=redwood) K <- lapply(trichotomy, Kest) K <- as.anylist(K) plot(K, main="") # list of 3D point patterns ape1 <- osteo[osteo$shortid==4, "pts", drop=TRUE] class(ape1) plot(ape1, main.panel="", mar.panel=0.1, hsep=0.7, vsep=1, cex=1.5, pch=21, bg='white') } \author{\adrian \rolf and \ege } \keyword{spatial} \keyword{hplot} spatstat/man/discretise.Rd0000644000176200001440000000544113160710571015322 0ustar liggesusers\name{discretise} \alias{discretise} \title{ Safely Convert Point Pattern Window to Binary Mask } \description{ Given a point pattern, discretise its window by converting it to a binary pixel mask, adjusting the mask so that it still contains all the points. } \usage{ discretise(X, eps = NULL, dimyx = NULL, xy = NULL) } \arguments{ \item{X}{A point pattern (object of class \code{"ppp"}) to be converted.} \item{eps}{(optional) width and height of each pixel} \item{dimyx}{(optional) pixel array dimensions} \item{xy}{(optional) pixel coordinates} } \details{ This function modifies the point pattern \code{X} by converting its observation window \code{Window(X)} to a binary pixel image (a window of type \code{"mask"}). It ensures that no points of \code{X} are deleted by the discretisation. The window is first discretised using \code{\link{as.mask}}. It can happen that points of \code{X} that were inside the original window may fall outside the new mask. The \code{discretise} function corrects this by augmenting the mask (so that the mask includes any pixel that contains a point of the pattern). The arguments \code{eps}, \code{dimyx} and \code{xy} control the fineness of the pixel array. They are passed to \code{\link{as.mask}}. If \code{eps}, \code{dimyx} and \code{xy} are all absent or \code{NULL}, and if the window of \code{X} is of type \code{"mask"} to start with, then \code{discretise(X)} returns \code{X} unchanged. See \code{\link{as.mask}} for further details about the arguments \code{eps}, \code{dimyx}, and \code{xy}, and the process of converting a window to one of type \code{mask}. } \section{Error checking}{ Before doing anything, \code{discretise} checks that all the points of the pattern are actually inside the original window. This is guaranteed to be the case if the pattern was constructed using \code{\link{ppp}} or \code{\link{as.ppp}}. However anomalies are possible if the point pattern was created or manipulated inappropriately. These will cause an error. } \value{ A point pattern (object of class \code{"ppp"}), identical to \code{X}, except that its observation window has been converted to one of type \code{mask}. } \author{\adrian and \rolf } \seealso{ \code{\link{as.mask}} } \examples{ data(demopat) X <- demopat plot(X, main="original pattern") Y <- discretise(X, dimyx=50) plot(Y, main="discretise(X)") stopifnot(npoints(X) == npoints(Y)) # what happens if we just convert the window to a mask? W <- Window(X) M <- as.mask(W, dimyx=50) plot(M, main="window of X converted to mask") plot(X, add=TRUE, pch=16) plot(X[M], add=TRUE, pch=1, cex=1.5) XM <- X[M] cat(paste(npoints(X) - npoints(XM), "points of X lie outside M\n")) } \keyword{spatial} \keyword{manip} spatstat/man/crossdist.Rd0000644000176200001440000000252413160710571015200 0ustar liggesusers\name{crossdist} \alias{crossdist} \title{Pairwise distances} \description{ Computes the distances between pairs of `things' taken from two different datasets. } \usage{ crossdist(X, Y, \dots) } \arguments{ \item{X,Y}{ Two objects of the same class. } \item{\dots}{ Additional arguments depending on the method. } } \value{ A matrix whose \code{[i,j]} entry is the distance from the \code{i}-th thing in the first dataset to the \code{j}-th thing in the second dataset. } \details{ Given two datasets \code{X} and \code{Y} (representing either two point patterns or two line segment patterns) \code{crossdist} computes the Euclidean distance from each thing in the first dataset to each thing in the second dataset, and returns a matrix containing these distances. The function \code{crossdist} is generic, with methods for point patterns (objects of class \code{"ppp"}), line segment patterns (objects of class \code{"psp"}), and a default method. See the documentation for \code{\link{crossdist.ppp}}, \code{\link{crossdist.psp}} or \code{\link{crossdist.default}} for further details. } \seealso{ \code{\link{crossdist.ppp}}, \code{\link{crossdist.psp}}, \code{\link{crossdist.default}}, \code{\link{pairdist}}, \code{\link{nndist}} } \author{ \adrian } \keyword{spatial} \keyword{math} spatstat/man/coords.Rd0000644000176200001440000000470213160710571014454 0ustar liggesusers\name{coords} \Rdversion{1.1} \alias{coords} \alias{coords.ppp} \alias{coords.ppx} \alias{coords<-} \alias{coords<-.ppp} \alias{coords<-.ppx} \title{ Extract or Change Coordinates of a Spatial or Spatiotemporal Point Pattern } \description{ Given any kind of spatial or space-time point pattern, this function extracts the (space and/or time and/or local) coordinates of the points and returns them as a data frame. } \usage{ coords(x, ...) \method{coords}{ppp}(x, ...) \method{coords}{ppx}(x, ..., spatial = TRUE, temporal = TRUE, local=TRUE) coords(x, ...) <- value \method{coords}{ppp}(x, ...) <- value \method{coords}{ppx}(x, ..., spatial = TRUE, temporal = TRUE, local=TRUE) <- value } \arguments{ \item{x}{ A point pattern: either a two-dimensional point pattern (object of class \code{"ppp"}), a three-dimensional point pattern (object of class \code{"pp3"}), or a general multidimensional space-time point pattern (object of class \code{"ppx"}). } \item{\dots}{ Further arguments passed to methods. } \item{spatial,temporal,local}{ Logical values indicating whether to extract spatial, temporal and local coordinates, respectively. The default is to return all such coordinates. (Only relevant to \code{ppx} objects). } \item{value}{ New values of the coordinates. A numeric vector with one entry for each point in \code{x}, or a numeric matrix or data frame with one row for each point in \code{x}. } } \details{ The function \code{coords} extracts the coordinates from a point pattern. The function \code{coords<-} replaces the coordinates of the point pattern with new values. Both functions \code{coords} and \code{coords<-} are generic, with methods for the classes \code{"ppp"}) and \code{"ppx"}. An object of class \code{"pp3"} also inherits from \code{"ppx"} and is handled by the method for \code{"ppx"}. } \value{ \code{coords} returns a \code{data.frame} with one row for each point, containing the coordinates. \code{coords<-} returns the altered point pattern. } \author{\adrian and \rolf } \seealso{ \code{\link{ppx}}, \code{\link{pp3}}, \code{\link{ppp}}, \code{as.hyperframe.ppx}, \code{as.data.frame.ppx}. } \examples{ df <- data.frame(x=runif(4),y=runif(4),t=runif(4)) X <- ppx(data=df, coord.type=c("s","s","t")) coords(X) coords(X, temporal=FALSE) coords(X) <- matrix(runif(12), ncol=3) } \keyword{spatial} \keyword{manip} spatstat/man/square.Rd0000644000176200001440000000266613160710621014466 0ustar liggesusers\name{square} \alias{square} \alias{unit.square} \title{Square Window} \description{ Creates a square window } \usage{ square(r=1, unitname=NULL) unit.square() } \arguments{ \item{r}{Numeric. The side length of the square, or a vector giving the minimum and maximum coordinate values. } \item{unitname}{ Optional. Name of unit of length. Either a single character string, or a vector of two character strings giving the singular and plural forms, respectively. } } \value{ An object of class \code{"owin"} (see \code{\link{owin.object}}) specifying a window. } \details{ If \code{r} is a number, \code{square(r)} is a shortcut for creating a window object representing the square \eqn{[0,r] \times [0,r]}{[0,r] * [0,r]}. It is equivalent to the command \code{owin(c(0,r),c(0,r))}. If \code{r} is a vector of length 2, then \code{square(r)} creates the square with \code{x} and \code{y} coordinates ranging from \code{r[1]} to \code{r[2]}. \code{unit.square} creates the unit square \eqn{[0,1] \times [0,1]}{[0,1] * [0,1]}. It is equivalent to \code{square(1)} or \code{square()} or \code{owin(c(0,1),c(0,1))}. These commands are included for convenience, and to improve the readability of some code. } \seealso{ \code{\link{owin.object}}, \code{\link{owin}} } \examples{ W <- square(10) W <- square(c(-1,1)) } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat/man/summary.ppm.Rd0000644000176200001440000000510413160710621015444 0ustar liggesusers\name{summary.ppm} \alias{summary.ppm} \alias{print.summary.ppm} \title{Summarizing a Fitted Point Process Model} \description{ \code{summary} method for class \code{"ppm"}. } \usage{ \method{summary}{ppm}(object, \dots, quick=FALSE, fine=FALSE) \method{print}{summary.ppm}(x, \dots) } \arguments{ \item{object}{A fitted point process model.} \item{\dots}{Ignored.} \item{quick}{Logical flag controlling the scope of the summary.} \item{fine}{ Logical value passed to \code{\link{vcov.ppm}} determining whether to compute the quick, coarse estimate of variance (\code{fine=FALSE}, the default) or the slower, finer estimate (\code{fine=TRUE}). } \item{x}{Object of class \code{"summary.ppm"} as returned by \code{summary.ppm}. } } \details{ This is a method for the generic \code{\link{summary}} for the class \code{"ppm"}. An object of class \code{"ppm"} describes a fitted point process model. See \code{\link{ppm.object}}) for details of this class. \code{summary.ppm} extracts information about the type of model that has been fitted, the data to which the model was fitted, and the values of the fitted coefficients. (If \code{quick=TRUE} then only the information about the type of model is extracted.) \code{print.summary.ppm} prints this information in a comprehensible format. In normal usage, \code{print.summary.ppm} is invoked implicitly when the user calls \code{summary.ppm} without assigning its value to anything. See the examples. You can also type \code{coef(summary(object))} to extract a table of the fitted coefficients of the point process model \code{object} together with standard errors and confidence limits. } \value{ \code{summary.ppm} returns an object of class \code{"summary.ppm"}, while \code{print.summary.ppm} returns \code{NULL}. } \examples{ # invent some data X <- rpoispp(42) # fit a model to it fit <- ppm(X ~ x, Strauss(r=0.1)) # summarize the fitted model summary(fit) # `quick' option summary(fit, quick=TRUE) # coefficients with standard errors and CI coef(summary(fit)) coef(summary(fit, fine=TRUE)) # save the full summary s <- summary(fit) # print it print(s) s # extract stuff names(s) coef(s) s$args$correction s$name s$trend$value \dontrun{ # multitype pattern data(demopat) fit <- ppm(demopat, ~marks, Poisson()) summary(fit) } # model with external covariates fitX <- ppm(X, ~Z, covariates=list(Z=function(x,y){x+y})) summary(fitX) } \author{\adrian and \rolf } \keyword{spatial} \keyword{methods} \keyword{models} spatstat/man/rmhexpand.Rd0000644000176200001440000001273213160710621015147 0ustar liggesusers\name{rmhexpand} \alias{rmhexpand} \title{ Specify Simulation Window or Expansion Rule } \description{ Specify a spatial domain in which point process simulations will be performed. Alternatively, specify a rule which will be used to determine the simulation window. } \usage{ rmhexpand(x = NULL, ..., area = NULL, length = NULL, distance = NULL) } \arguments{ \item{x}{ Any kind of data determining the simulation window or the expansion rule. A window (object of class \code{"owin"}) specifying the simulation window, a numerical value specifying an expansion factor or expansion distance, a list containing one numerical value, an object of class \code{"rmhexpand"}, or \code{NULL}. } \item{\dots}{ Ignored. } \item{area}{ Area expansion factor. Incompatible with other arguments. } \item{length}{ Length expansion factor. Incompatible with other arguments. } \item{distance}{ Expansion distance (buffer width). Incompatible with other arguments. } } \details{ In the Metropolis-Hastings algorithm \code{\link{rmh}} for simulating spatial point processes, simulations are usually carried out on a spatial domain that is larger than the original window of the point process model, then subsequently clipped to the original window. The command \code{rmhexpand} can be used to specify the simulation window, or to specify a rule which will later be used to determine the simulation window from data. The arguments are all incompatible: at most one of them should be given. If the first argument \code{x} is given, it may be any of the following: \itemize{ \item a window (object of class \code{"owin"}) specifying the simulation window. \item an object of class \code{"rmhexpand"} specifying the expansion rule. \item a single numerical value, without attributes. This will be interpreted as the value of the argument \code{area}. \item either \code{c(area=v)} or \code{list(area=v)}, where \code{v} is a single numeric value. This will be interpreted as the value of the argument \code{area}. \item either \code{c(length=v)} or \code{list(length=v)}, where \code{v} is a single numeric value. This will be interpreted as the value of the argument \code{length}. \item either \code{c(distance=v)} or \code{list(distance=v)}, where \code{v} is a single numeric value. This will be interpreted as the value of the argument \code{distance}. \item \code{NULL}, meaning that the expansion rule is not yet determined. } If one of the arguments \code{area}, \code{length} or \code{distance} is given, then the simulation window is determined from the original data window as follows. \describe{ \item{area}{ The bounding box of the original data window will be extracted, and the simulation window will be a scalar dilation of this rectangle. The argument \code{area} should be a numerical value, greater than or equal to 1. It specifies the area expansion factor, i.e. the ratio of the area of the simulation window to the area of the original point process window's bounding box. } \item{length}{ The bounding box of the original data window will be extracted, and the simulation window will be a scalar dilation of this rectangle. The argument \code{length} should be a numerical value, greater than or equal to 1. It specifies the length expansion factor, i.e. the ratio of the width (height) of the simulation window to the width (height) of the original point process window's bounding box. } \item{distance}{ The argument \code{distance} should be a numerical value, greater than or equal to 0. It specifies the width of a buffer region around the original data window. If the original data window is a rectangle, then this window is extended by a margin of width equal to \code{distance} around all sides of the original rectangle. The result is a rectangle. If the original data window is not a rectangle, then morphological dilation is applied using \code{\link{dilation.owin}} so that a margin or buffer of width equal to \code{distance} is created around all sides of the original window. The result is a non-rectangular window, typically of a different shape. } } } \section{Undetermined expansion}{ If \code{expand=NULL}, this is interpreted to mean that the expansion rule is \dQuote{not yet decided}. Expansion will be decided later, by the simulation algorithm \code{\link{rmh}}. If the model cannot be expanded (for example if the covariate data in the model are not available on a larger domain) then expansion will not occur. If the model can be expanded, then if the point process model has a finite interaction range \code{r}, the default is \code{rmhexpand(distance=2*r)}, and otherwise \code{rmhexpand(area=2)}. } \value{ An object of class \code{"rmhexpand"} specifying the expansion rule. There is a \code{print} method for this class. } \author{\adrian and \rolf } \seealso{ \code{\link{expand.owin}} to apply the rule to a window. \code{\link{will.expand}} to test whether expansion will occur. \code{\link{rmh}}, \code{\link{rmhcontrol}} for background details. } \examples{ rmhexpand() rmhexpand(2) rmhexpand(1) rmhexpand(length=1.5) rmhexpand(distance=0.1) rmhexpand(letterR) } \keyword{spatial} \keyword{datagen} spatstat/man/is.multitype.ppp.Rd0000644000176200001440000000435313160710621016425 0ustar liggesusers\name{is.multitype.ppp} \alias{is.multitype.ppp} \alias{is.multitype.lpp} \title{Test Whether A Point Pattern is Multitype} \description{ Tests whether a point pattern has ``marks'' attached to the points which classify the points into several types. } \usage{ \method{is.multitype}{ppp}(X, na.action="warn", \dots) \method{is.multitype}{lpp}(X, na.action="warn", \dots) } \arguments{ \item{X}{ Point pattern (object of class \code{"ppp"} or \code{"lpp"}) } \item{na.action}{ String indicating what to do if \code{NA} values are encountered amongst the marks. Options are \code{"warn"}, \code{"fatal"} and \code{"ignore"}. } \item{\dots}{ Ignored. } } \value{ Logical value, equal to \code{TRUE} if \code{X} is a multitype point pattern. } \details{ ``Marks'' are observations attached to each point of a point pattern. For example the \code{\link[spatstat.data]{longleaf}} dataset contains the locations of trees, each tree being marked by its diameter; the \code{\link[spatstat.data]{amacrine}} dataset gives the locations of cells of two types (on/off) and the type of cell may be regarded as a mark attached to the location of the cell. This function tests whether the point pattern \code{X} contains or involves marked points, \bold{and} that the marks are a factor. It is a method for the generic function \code{\link{is.multitype}}. For example, the \code{\link[spatstat.data]{amacrine}} dataset is multitype (there are two types of cells, on and off), but the \code{\link[spatstat.data]{longleaf}} dataset is \emph{not} multitype (the marks are real numbers). The argument \code{na.action} determines what action will be taken if the point pattern has a vector of marks but some or all of the marks are \code{NA}. Options are \code{"fatal"} to cause a fatal error; \code{"warn"} to issue a warning and then return \code{TRUE}; and \code{"ignore"} to take no action except returning \code{TRUE}. } \seealso{ \code{\link{is.multitype}}, \code{\link{is.multitype.ppm}} } \examples{ is.multitype(cells) #FALSE - no marks is.multitype(longleaf) #FALSE - real valued marks is.multitype(amacrine) #TRUE } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/superimpose.lpp.Rd0000644000176200001440000000501613160710621016323 0ustar liggesusers\name{superimpose.lpp} \alias{superimpose.lpp} \title{Superimpose Several Point Patterns on Linear Network} \description{ Superimpose any number of point patterns on the same linear network. } \usage{ \method{superimpose}{lpp}(\dots, L=NULL) } \arguments{ \item{\dots}{ Any number of arguments, each of which represents a point pattern on the same linear network. Each argument can be either an object of class \code{"lpp"}, giving both the spatial coordinates of the points and the linear network, or a \code{list(x,y)} or \code{list(x,y,seg,tp)} giving just the spatial coordinates of the points. } \item{L}{ Optional. The linear network. An object of class \code{"linnet"}. This argument is required if none of the other arguments is of class \code{"lpp"}. } } \value{ An object of class \code{"lpp"} representing the combined point pattern on the linear network. } \details{ This function is used to superimpose several point patterns on the same linear network. It is a method for the generic function \code{\link{superimpose}}. Each of the arguments \code{\dots} can be either a point pattern on a linear network (object of class \code{"lpp"} giving both the spatial coordinates of the points and the linear network), or a \code{list(x,y)} or \code{list(x,y,seg,tp)} giving just the spatial coordinates of the points. These arguments must represent point patterns on the \emph{same} linear network. The argument \code{L} is an alternative way to specify the linear network, and is required if none of the arguments \code{\dots} is an object of class \code{"lpp"}. The arguments \code{\dots} may be \emph{marked} patterns. The marks of each component pattern must have the same format. Numeric and character marks may be ``mixed''. If there is such mixing then the numeric marks are coerced to character in the combining process. If the mark structures are all data frames, then these data frames must have the same number of columns and identical column names. If the arguments \code{\dots} are given in the form \code{name=value}, then the \code{name}s will be used as an extra column of marks attached to the elements of the corresponding patterns. } \seealso{ \code{\link{superimpose}} } \examples{ X <- rpoislpp(5, simplenet) Y <- rpoislpp(10, simplenet) superimpose(X,Y) # not marked superimpose(A=X, B=Y) # multitype with types A and B } \author{\adrian \rolf \ege and Greg McSwiggan. } \keyword{spatial} \keyword{manip} spatstat/man/scanpp.Rd0000644000176200001440000000625613160710621014451 0ustar liggesusers\name{scanpp} \alias{scanpp} \title{Read Point Pattern From Data File} \description{ Reads a point pattern dataset from a text file. } \usage{ scanpp(filename, window, header=TRUE, dir="", factor.marks=NULL, ...) } \arguments{ \item{filename}{ String name of the file containing the coordinates of the points in the point pattern, and their marks if any. } \item{window}{ Window for the point pattern. An object of class \code{"owin"}. } \item{header}{ Logical flag indicating whether the first line of the file contains headings for the columns. Passed to \code{\link[utils]{read.table}}. } \item{dir}{ String containing the path name of the directory in which \code{filename} is to be found. Default is the current directory. } \item{factor.marks}{ Logical vector (or NULL) indicating whether marks are to be interpreted as factors. Defaults to \code{NULL} which means that strings will be interpreted as factors while numeric variables will not. See details. } \item{\dots}{ Ignored. } } \value{ A point pattern (an object of class \code{"ppp"}, see \code{\link{ppp.object}}). } \details{ This simple function reads a point pattern dataset from a file containing the cartesian coordinates of its points, and optionally the mark values for these points. The file identified by \code{filename} in directory \code{dir} should be a text file that can be read using \code{\link[utils]{read.table}}. Thus, each line of the file (except possibly the first line) contains data for one point in the point pattern. Data are arranged in columns. There should be either two columns (for an unmarked point pattern) or more columns (for a marked point pattern). If \code{header=FALSE} then the first two columns of data will be interpreted as the \eqn{x} and \eqn{y} coordinates of points. Remaining columns, if present, will be interpreted as containing the marks for these points. If \code{header=TRUE} then the first line of the file should contain string names for each of the columns of data. If there are columns named \code{x} and \code{y} then these will be taken as the cartesian coordinates, and any remaining columns will be taken as the marks. If there are no columns named \code{x} and \code{y} then the first and second columns will be taken as the cartesian coordinates. If a logical vector is provided for \code{factor.marks} the length should equal the number of mark columns (a shorter \code{factor.marks} is recycled to this length). This vector is then used to determine which mark columns should be interpreted as factors. Note: Strings will not be interpreted as factors if the corresponding entry in \code{factor.marks} is \code{FALSE}. Note that there is intentionally no default for \code{window}. The window of observation should be specified. If you really need to estimate the window, use the Ripley-Rasson estimator \code{\link{ripras}}. } \seealso{ \code{\link{ppp.object}}, \code{\link{ppp}}, \code{\link{as.ppp}}, \code{\link{ripras}} } \author{\adrian and \rolf } \keyword{spatial} \keyword{IO} spatstat/man/transmat.Rd0000644000176200001440000000571313160710621015013 0ustar liggesusers\name{transmat} \alias{transmat} \title{ Convert Pixel Array Between Different Conventions } \description{ This function provides a simple way to convert arrays of pixel data between different display conventions. } \usage{ transmat(m, from, to) } \arguments{ \item{m}{ A matrix. } \item{from,to}{ Specifications of the spatial arrangement of the pixels. See Details. } } \details{ Pixel images are handled by many different software packages. In virtually all of these, the pixel values are stored in a matrix, and are accessed using the row and column indices of the matrix. However, different pieces of software use different conventions for mapping the matrix indices \eqn{[i,j]} to the spatial coordinates \eqn{(x,y)}. \itemize{ \item In the \emph{Cartesian} convention, the first matrix index \eqn{i} is associated with the first Cartesian coordinate \eqn{x}, and \eqn{j} is associated with \eqn{y}. This convention is used in \code{\link[graphics]{image.default}}. \item In the \emph{European reading order} convention, a matrix is displayed in the spatial coordinate system as it would be printed in a page of text: \eqn{i} is effectively associated with the negative \eqn{y} coordinate, and \eqn{j} is associated with \eqn{x}. This convention is used in some image file formats. \item In the \code{spatstat} convention, \eqn{i} is associated with the increasing \eqn{y} coordinate, and \eqn{j} is associated with \eqn{x}. This is also used in some image file formats. } To convert between these conventions, use the function \code{transmat}. If a matrix \code{m} contains pixel image data that is correctly displayed by software that uses the Cartesian convention, and we wish to convert it to the European reading convention, we can type \code{mm <- transmat(m, from="Cartesian", to="European")}. The transformed matrix \code{mm} will then be correctly displayed by software that uses the European convention. Each of the arguments \code{from} and \code{to} can be one of the names \code{"Cartesian"}, \code{"European"} or \code{"spatstat"} (partially matched) or it can be a list specifying another convention. For example \code{to=list(x="-i", y="-j")!} specifies that rows of the output matrix are expected to be displayed as vertical columns in the plot, starting at the right side of the plot, as in the traditional Chinese, Japanese and Korean writing order. } \value{ Another matrix obtained by rearranging the entries of \code{m}. } \author{ \adrian \rolf and \ege } \examples{ opa <- par(mfrow=c(1,2)) # image in spatstat format Z <- bei.extra$elev plot(Z, main="plot.im", ribbon=FALSE) m <- as.matrix(Z) # convert matrix to format suitable for display by image.default Y <- transmat(m, from="spatstat", to="Cartesian") image(Y, asp=0.5, main="image.default", axes=FALSE) par(opa) } \keyword{spatial} \keyword{hplot} \keyword{manip} spatstat/man/rmpoint.Rd0000644000176200001440000002547113160710621014655 0ustar liggesusers\name{rmpoint} \alias{rmpoint} \title{Generate N Random Multitype Points} \description{ Generate a random multitype point pattern with a fixed number of points, or a fixed number of points of each type. } \usage{ rmpoint(n, f=1, fmax=NULL, win=unit.square(), types, ptypes, \dots, giveup=1000, verbose=FALSE, nsim=1, drop=TRUE) } \arguments{ \item{n}{ Number of marked points to generate. Either a single number specifying the total number of points, or a vector specifying the number of points of each type. } \item{f}{ The probability density of the multitype points, usually un-normalised. Either a constant, a vector, a function \code{f(x,y,m, ...)}, a pixel image, a list of functions \code{f(x,y,...)} or a list of pixel images. } \item{fmax}{ An upper bound on the values of \code{f}. If missing, this number will be estimated. } \item{win}{ Window in which to simulate the pattern. Ignored if \code{f} is a pixel image or list of pixel images. } \item{types}{ All the possible types for the multitype pattern. } \item{ptypes}{ Optional vector of probabilities for each type. } \item{\dots}{ Arguments passed to \code{f} if it is a function. } \item{giveup}{ Number of attempts in the rejection method after which the algorithm should stop trying to generate new points. } \item{verbose}{ Flag indicating whether to report details of performance of the simulation algorithm. } \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a point pattern. } } \value{ A point pattern (an object of class \code{"ppp"}) if \code{nsim=1}, or a list of point patterns if \code{nsim > 1}. } \details{ This function generates random multitype point patterns consisting of a fixed number of points. Three different models are available: \describe{ \item{I. Random location and type:}{ If \code{n} is a single number and the argument \code{ptypes} is missing, then \code{n} independent, identically distributed random multitype points are generated. Their locations \code{(x[i],y[i])} and types \code{m[i]} have joint probability density proportional to \eqn{f(x,y,m)}. } \item{II. Random type, and random location given type:}{ If \code{n} is a single number and \code{ptypes} is given, then \code{n} independent, identically distributed random multitype points are generated. Their types \code{m[i]} have probability distribution \code{ptypes}. Given the types, the locations \code{(x[i],y[i])} have conditional probability density proportional to \eqn{f(x,y,m)}. } \item{III. Fixed types, and random location given type:}{ If \code{n} is a vector, then we generate \code{n[i]} independent, identically distributed random points of type \code{types[i]}. For points of type \eqn{m} the conditional probability density of location \eqn{(x,y)} is proportional to \eqn{f(x,y,m)}. } } Note that the density \code{f} is normalised in different ways in Model I and Models II and III. In Model I the normalised joint density is \eqn{g(x,y,m)=f(x,y,m)/Z} where \deqn{ Z = \sum_m \int\int \lambda(x,y,m) {\rm d}x \, {\rm d}y }{ Z = sum_[m] integral lambda(x,y,m) dx dy } while in Models II and III the normalised conditional density is \eqn{g(x,y\mid m) = f(x,y,m)/Z_m}{g(x,y|m) = f(x,y,m)/Z[m]} where \deqn{ Z_m = \int\int \lambda(x,y,m) {\rm d}x \, {\rm d}y. }{ Z[m] = integral lambda(x,y,m) dx dy. } In Model I, the marginal distribution of types is \eqn{p_m = Z_m/Z}{p[m] = Z[m]/Z}. The unnormalised density \code{f} may be specified in any of the following ways. \describe{ \item{single number:}{ If \code{f} is a single number, the conditional density of location given type is uniform. That is, the points of each type are uniformly distributed. In Model I, the marginal distribution of types is also uniform (all possible types have equal probability). } \item{vector:}{ If \code{f} is a numeric vector, the conditional density of location given type is uniform. That is, the points of each type are uniformly distributed. In Model I, the marginal distribution of types is proportional to the vector \code{f}. In Model II, the marginal distribution of types is \code{ptypes}, that is, the values in \code{f} are ignored. The argument \code{types} defaults to \code{names(f)}, or if that is null, \code{1:length(f)}. } \item{function:}{ If \code{f} is a function, it will be called in the form \code{f(x,y,m,\dots)} at spatial location \code{(x,y)} for points of type \code{m}. In Model I, the joint probability density of location and type is proportional to \code{f(x,y,m,\dots)}. In Models II and III, the conditional probability density of location \code{(x,y)} given type \code{m} is proportional to \code{f(x,y,m,\dots)}. The function \code{f} must work correctly with vectors \code{x}, \code{y} and \code{m}, returning a vector of function values. (Note that \code{m} will be a factor with levels \code{types}.) The value \code{fmax} must be given and must be an upper bound on the values of \code{f(x,y,m,\dots)} for all locations \code{(x, y)} inside the window \code{win} and all types \code{m}. The argument \code{types} must be given. } \item{list of functions:}{ If \code{f} is a list of functions, then the functions will be called in the form \code{f[[i]](x,y,\dots)} at spatial location \code{(x,y)} for points of type \code{types[i]}. In Model I, the joint probability density of location and type is proportional to \code{f[[m]](x,y,\dots)}. In Models II and III, the conditional probability density of location \code{(x,y)} given type \code{m} is proportional to \code{f[[m]](x,y,\dots)}. The function \code{f[[i]]} must work correctly with vectors \code{x} and \code{y}, returning a vector of function values. The value \code{fmax} must be given and must be an upper bound on the values of \code{f[[i]](x,y,\dots)} for all locations \code{(x, y)} inside the window \code{win}. The argument \code{types} defaults to \code{names(f)}, or if that is null, \code{1:length(f)}. } \item{pixel image:}{ If \code{f} is a pixel image object of class \code{"im"} (see \code{\link{im.object}}), the unnormalised density at a location \code{(x,y)} for points of any type is equal to the pixel value of \code{f} for the pixel nearest to \code{(x,y)}. In Model I, the marginal distribution of types is uniform. The argument \code{win} is ignored; the window of the pixel image is used instead. The argument \code{types} must be given. } \item{list of pixel images:}{ If \code{f} is a list of pixel images, then the image \code{f[[i]]} determines the density values of points of type \code{types[i]}. The argument \code{win} is ignored; the window of the pixel image is used instead. The argument \code{types} defaults to \code{names(f)}, or if that is null, \code{1:length(f)}. } } The implementation uses the rejection method. For Model I, \code{\link{rmpoispp}} is called repeatedly until \code{n} points have been generated. It gives up after \code{giveup} calls if there are still fewer than \code{n} points. For Model II, the types are first generated according to \code{ptypes}, then the locations of the points of each type are generated using \code{\link{rpoint}}. For Model III, the locations of the points of each type are generated using \code{\link{rpoint}}. } \seealso{ \code{\link{ppp.object}}, \code{\link{owin.object}} } \examples{ abc <- c("a","b","c") ##### Model I rmpoint(25, types=abc) rmpoint(25, 1, types=abc) # 25 points, equal probability for each type, uniformly distributed locations rmpoint(25, function(x,y,m) {rep(1, length(x))}, types=abc) # same as above rmpoint(25, list(function(x,y){rep(1, length(x))}, function(x,y){rep(1, length(x))}, function(x,y){rep(1, length(x))}), types=abc) # same as above rmpoint(25, function(x,y,m) { x }, types=abc) # 25 points, equal probability for each type, # locations nonuniform with density proportional to x rmpoint(25, function(x,y,m) { ifelse(m == "a", 1, x) }, types=abc) rmpoint(25, list(function(x,y) { rep(1, length(x)) }, function(x,y) { x }, function(x,y) { x }), types=abc) # 25 points, UNEQUAL probabilities for each type, # type "a" points uniformly distributed, # type "b" and "c" points nonuniformly distributed. ##### Model II rmpoint(25, 1, types=abc, ptypes=rep(1,3)/3) rmpoint(25, 1, types=abc, ptypes=rep(1,3)) # 25 points, equal probability for each type, # uniformly distributed locations rmpoint(25, function(x,y,m) {rep(1, length(x))}, types=abc, ptypes=rep(1,3)) # same as above rmpoint(25, list(function(x,y){rep(1, length(x))}, function(x,y){rep(1, length(x))}, function(x,y){rep(1, length(x))}), types=abc, ptypes=rep(1,3)) # same as above rmpoint(25, function(x,y,m) { x }, types=abc, ptypes=rep(1,3)) # 25 points, equal probability for each type, # locations nonuniform with density proportional to x rmpoint(25, function(x,y,m) { ifelse(m == "a", 1, x) }, types=abc, ptypes=rep(1,3)) # 25 points, EQUAL probabilities for each type, # type "a" points uniformly distributed, # type "b" and "c" points nonuniformly distributed. ###### Model III rmpoint(c(12, 8, 4), 1, types=abc) # 12 points of type "a", # 8 points of type "b", # 4 points of type "c", # each uniformly distributed rmpoint(c(12, 8, 4), function(x,y,m) { ifelse(m=="a", 1, x)}, types=abc) rmpoint(c(12, 8, 4), list(function(x,y) { rep(1, length(x)) }, function(x,y) { x }, function(x,y) { x }), types=abc) # 12 points of type "a", uniformly distributed # 8 points of type "b", nonuniform # 4 points of type "c", nonuniform ######### ## Randomising an existing point pattern: # same numbers of points of each type, uniform random locations (Model III) rmpoint(table(marks(demopat)), 1, win=Window(demopat)) # same total number of points, distribution of types estimated from X, # uniform random locations (Model II) rmpoint(npoints(demopat), 1, types=levels(marks(demopat)), win=Window(demopat), ptypes=table(marks(demopat))) } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat/man/intensity.ppm.Rd0000644000176200001440000000567413160710621016011 0ustar liggesusers\name{intensity.ppm} \alias{intensity.ppm} \title{ Intensity of Fitted Point Process Model } \description{ Computes the intensity of a fitted point process model. } \usage{ \method{intensity}{ppm}(X, \dots) } \arguments{ \item{X}{ A fitted point process model (object of class \code{"ppm"}). } \item{\dots}{ Arguments passed to \code{\link{predict.ppm}} in some cases. See Details. } } \details{ This is a method for the generic function \code{\link{intensity}} for fitted point process models (class \code{"ppm"}). The intensity of a point process model is the expected number of random points per unit area. If \code{X} is a Poisson point process model, the intensity of the process is computed exactly. The result is a numerical value if \code{X} is a stationary Poisson point process, and a pixel image if \code{X} is non-stationary. (In the latter case, the resolution of the pixel image is controlled by the arguments \code{\dots} which are passed to \code{\link{predict.ppm}}.) If \code{X} is another Gibbs point process model, the intensity is computed approximately using the Poisson-saddlepoint approximation (Baddeley and Nair, 2012a, 2012b, 2016; Anderssen et al, 2014). The approximation is currently available for pairwise-interaction models (Baddeley and Nair, 2012a, 2012b) and for the area-interaction model and Geyer saturation model (Baddeley and Nair, 2016). For a non-stationary Gibbs model, the pseudostationary solution (Baddeley and Nair, 2012b; Anderssen et al, 2014) is used. The result is a pixel image, whose resolution is controlled by the arguments \code{\dots} which are passed to \code{\link{predict.ppm}}. } \value{ A numeric value (if the model is stationary) or a pixel image. } \references{ Anderssen, R.S., Baddeley, A., DeHoog, F.R. and Nair, G.M. (2014) Solution of an integral equation arising in spatial point process theory. \emph{Journal of Integral Equations and Applications} \bold{26} (4) 437--453. Baddeley, A. and Nair, G. (2012a) Fast approximation of the intensity of Gibbs point processes. \emph{Electronic Journal of Statistics} \bold{6} 1155--1169. Baddeley, A. and Nair, G. (2012b) Approximating the moments of a spatial point process. \emph{Stat} \bold{1}, 1, 18--30. doi: 10.1002/sta4.5 Baddeley, A. and Nair, G. (2016) Poisson-saddlepoint approximation for spatial point processes with infinite order interaction. Submitted for publication. } \seealso{ \code{\link{intensity}}, \code{\link{intensity.ppp}} } \examples{ fitP <- ppm(swedishpines ~ 1) intensity(fitP) fitS <- ppm(swedishpines ~ 1, Strauss(9)) intensity(fitS) fitSx <- ppm(swedishpines ~ x, Strauss(9)) lamSx <- intensity(fitSx) fitG <- ppm(swedishpines ~ 1, Geyer(9, 1)) lamG <- intensity(fitG) fitA <- ppm(swedishpines ~ 1, AreaInter(7)) lamA <- intensity(fitA) } \author{ \adrian and Gopalan Nair. } \keyword{spatial} \keyword{models} spatstat/man/rThomas.Rd0000644000176200001440000001521013160710621014570 0ustar liggesusers\name{rThomas} \alias{rThomas} \title{Simulate Thomas Process} \description{ Generate a random point pattern, a realisation of the Thomas cluster process. } \usage{ rThomas(kappa, scale, mu, win = owin(c(0,1),c(0,1)), nsim=1, drop=TRUE, saveLambda=FALSE, expand = 4*scale, ..., poisthresh=1e-6, saveparents=TRUE) } \arguments{ \item{kappa}{ Intensity of the Poisson process of cluster centres. A single positive number, a function, or a pixel image. } \item{scale}{ Standard deviation of random displacement (along each coordinate axis) of a point from its cluster centre. } \item{mu}{ Mean number of points per cluster (a single positive number) or reference intensity for the cluster points (a function or a pixel image). } \item{win}{ Window in which to simulate the pattern. An object of class \code{"owin"} or something acceptable to \code{\link{as.owin}}. } \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a point pattern. } \item{saveLambda}{ Logical. If \code{TRUE} then the random intensity corresponding to the simulated parent points will also be calculated and saved, and returns as an attribute of the point pattern. } \item{expand}{ Numeric. Size of window expansion for generation of parent points. Has a sensible default. } \item{\dots}{ Passed to \code{\link{clusterfield}} to control the image resolution when \code{saveLambda=TRUE} and to \code{\link{clusterradius}} when \code{expand} is missing. } \item{poisthresh}{ Numerical threshold below which the model will be treated as a Poisson process. See Details. } \item{saveparents}{ Logical value indicating whether to save the locations of the parent points as an attribute. } } \value{ A point pattern (an object of class \code{"ppp"}) if \code{nsim=1}, or a list of point patterns if \code{nsim > 1}. Additionally, some intermediate results of the simulation are returned as attributes of this point pattern (see \code{\link{rNeymanScott}}). Furthermore, the simulated intensity function is returned as an attribute \code{"Lambda"}, if \code{saveLambda=TRUE}. } \details{ This algorithm generates a realisation of the (`modified') Thomas process, a special case of the Neyman-Scott process, inside the window \code{win}. In the simplest case, where \code{kappa} and \code{mu} are single numbers, the algorithm generates a uniform Poisson point process of \dQuote{parent} points with intensity \code{kappa}. Then each parent point is replaced by a random cluster of \dQuote{offspring} points, the number of points per cluster being Poisson (\code{mu}) distributed, and their positions being isotropic Gaussian displacements from the cluster parent location. The resulting point pattern is a realisation of the classical \dQuote{stationary Thomas process} generated inside the window \code{win}. This point process has intensity \code{kappa * mu}. The algorithm can also generate spatially inhomogeneous versions of the Thomas process: \itemize{ \item The parent points can be spatially inhomogeneous. If the argument \code{kappa} is a \code{function(x,y)} or a pixel image (object of class \code{"im"}), then it is taken as specifying the intensity function of an inhomogeneous Poisson process that generates the parent points. \item The offspring points can be inhomogeneous. If the argument \code{mu} is a \code{function(x,y)} or a pixel image (object of class \code{"im"}), then it is interpreted as the reference density for offspring points, in the sense of Waagepetersen (2007). For a given parent point, the offspring constitute a Poisson process with intensity function equal to \code{mu * f}, where \code{f} is the Gaussian probability density centred at the parent point. Equivalently we first generate, for each parent point, a Poisson (\code{mumax}) random number of offspring (where \eqn{M} is the maximum value of \code{mu}) with independent Gaussian displacements from the parent location, and then randomly thin the offspring points, with retention probability \code{mu/M}. \item Both the parent points and the offspring points can be spatially inhomogeneous, as described above. } Note that if \code{kappa} is a pixel image, its domain must be larger than the window \code{win}. This is because an offspring point inside \code{win} could have its parent point lying outside \code{win}. In order to allow this, the simulation algorithm first expands the original window \code{win} by a distance \code{expand} and generates the Poisson process of parent points on this larger window. If \code{kappa} is a pixel image, its domain must contain this larger window. The intensity of the Thomas process is \code{kappa * mu} if either \code{kappa} or \code{mu} is a single number. In the general case the intensity is an integral involving \code{kappa}, \code{mu} and \code{f}. The Thomas process with homogeneous parents (i.e. where \code{kappa} is a single number) can be fitted to data using \code{\link{kppm}}. Currently it is not possible to fit the Thomas model with inhomogeneous parents. If the pair correlation function of the model is very close to that of a Poisson process, deviating by less than \code{poisthresh}, then the model is approximately a Poisson process, and will be simulated as a Poisson process with intensity \code{kappa * mu}, using \code{\link{rpoispp}}. This avoids computations that would otherwise require huge amounts of memory. } \seealso{ \code{\link{rpoispp}}, \code{\link{rMatClust}}, \code{\link{rCauchy}}, \code{\link{rVarGamma}}, \code{\link{rNeymanScott}}, \code{\link{rGaussPoisson}}, \code{\link{kppm}}, \code{\link{clusterfit}}. } \references{ Diggle, P. J., Besag, J. and Gleaves, J. T. (1976) Statistical analysis of spatial point patterns by means of distance methods. \emph{Biometrics} \bold{32} 659--667. Thomas, M. (1949) A generalisation of Poisson's binomial limit for use in ecology. \emph{Biometrika} \bold{36}, 18--25. Waagepetersen, R. (2007) An estimating function approach to inference for inhomogeneous Neyman-Scott processes. \emph{Biometrics} \bold{63}, 252--258. } \examples{ #homogeneous X <- rThomas(10, 0.2, 5) #inhomogeneous Z <- as.im(function(x,y){ 5 * exp(2 * x - 1) }, owin()) Y <- rThomas(10, 0.2, Z) } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat/man/as.hyperframe.ppx.Rd0000644000176200001440000000452513160710571016540 0ustar liggesusers\name{as.hyperframe.ppx} \Rdversion{1.1} \alias{as.hyperframe.ppx} \alias{as.data.frame.ppx} \alias{as.matrix.ppx} \title{ Extract coordinates and marks of multidimensional point pattern } \description{ Given any kind of spatial or space-time point pattern, extract the coordinates and marks of the points. } \usage{ \method{as.hyperframe}{ppx}(x, ...) \method{as.data.frame}{ppx}(x, ...) \method{as.matrix}{ppx}(x, ...) } \arguments{ \item{x}{ A general multidimensional space-time point pattern (object of class \code{"ppx"}). } \item{\dots}{ Ignored. } } \details{ An object of class \code{"ppx"} (see \code{\link{ppx}}) represents a marked point pattern in multidimensional space and/or time. There may be any number of spatial coordinates, any number of temporal coordinates, and any number of mark variables. The individual marks may be atomic (numeric values, factor values, etc) or objects of any kind. The function \code{as.hyperframe.ppx} extracts the coordinates and the marks as a \code{"hyperframe"} (see \code{\link{hyperframe}}) with one row of data for each point in the pattern. This is a method for the generic function \code{\link{as.hyperframe}}. The function \code{as.data.frame.ppx} discards those mark variables which are not atomic values, and extracts the coordinates and the remaining marks as a \code{data.frame} with one row of data for each point in the pattern. This is a method for the generic function \code{\link{as.data.frame}}. Finally \code{as.matrix(x)} is equivalent to \code{as.matrix(as.data.frame(x))} for an object of class \code{"ppx"}. Be warned that, if there are any columns of non-numeric data (i.e. if there are mark variables that are factors), the result will be a matrix of character values. } \value{ A \code{hyperframe}, \code{data.frame} or \code{matrix} as appropriate. } \author{\adrian and \rolf } \seealso{ \code{\link{ppx}}, \code{\link{hyperframe}}, \code{\link{as.hyperframe}}. } \examples{ df <- data.frame(x=runif(4),y=runif(4),t=runif(4)) X <- ppx(data=df, coord.type=c("s","s","t")) as.data.frame(X) val <- runif(4) E <- lapply(val, function(s) { rpoispp(s) }) hf <- hyperframe(t=val, e=as.listof(E)) Z <- ppx(data=hf, domain=c(0,1)) as.hyperframe(Z) as.data.frame(Z) } \keyword{spatial} \keyword{manip} spatstat/man/plot.scan.test.Rd0000644000176200001440000000471013160710621016035 0ustar liggesusers\name{plot.scan.test} \alias{plot.scan.test} \alias{as.im.scan.test} \title{ Plot Result of Scan Test } \description{ Computes or plots an image showing the likelihood ratio test statistic for the scan test, or the optimal circle radius. } \usage{ \method{plot}{scan.test}(x, \dots, what=c("statistic", "radius"), do.window = TRUE) \method{as.im}{scan.test}(X, \dots, what=c("statistic", "radius")) } \arguments{ \item{x,X}{ Result of a scan test. An object of class \code{"scan.test"} produced by \code{\link{scan.test}}. } \item{\dots}{ Arguments passed to \code{\link{plot.im}} to control the appearance of the plot. } \item{what}{ Character string indicating whether to produce an image of the (profile) likelihood ratio test statistic (\code{what="statistic"}, the default) or an image of the optimal value of circle radius (\code{what="radius"}). } \item{do.window}{ Logical value indicating whether to plot the original window of the data as well. } } \details{ These functions extract, and plot, the spatially-varying value of the likelihood ratio test statistic which forms the basis of the scan test. If the test result \code{X} was based on circles of the same radius \code{r}, then \code{as.im(X)} is a pixel image of the likelihood ratio test statistic as a function of the position of the centre of the circle. If the test result \code{X} was based on circles of several different radii \code{r}, then \code{as.im(X)} is a pixel image of the profile (maximum value over all radii \code{r}) likelihood ratio test statistic as a function of the position of the centre of the circle, and \code{as.im(X, what="radius")} is a pixel image giving for each location \eqn{u} the value of \code{r} which maximised the likelihood ratio test statistic at that location. The \code{plot} method plots the corresponding image. } \value{ The value of \code{as.im.scan.test} is a pixel image (object of class \code{"im"}). The value of \code{plot.scan.test} is \code{NULL}. } \author{\adrian and \rolf } \seealso{ \code{\link{scan.test}}, \code{\link{scanLRTS}} } \examples{ if(interactive()) { a <- scan.test(redwood, seq(0.04, 0.1, by=0.01), method="poisson", nsim=19) } else { a <- scan.test(redwood, c(0.05, 0.1), method="poisson", nsim=2) } plot(a) as.im(a) plot(a, what="radius") } \keyword{htest} \keyword{spatial} spatstat/man/is.ppm.Rd0000644000176200001440000000145513160710621014367 0ustar liggesusers\name{is.ppm} \alias{is.ppm} \alias{is.lppm} \alias{is.kppm} \alias{is.slrm} \title{Test Whether An Object Is A Fitted Point Process Model} \description{ Checks whether its argument is a fitted point process model (object of class \code{"ppm"}, \code{"kppm"}, \code{"lppm"} or \code{"slrm"}). } \usage{ is.ppm(x) is.kppm(x) is.lppm(x) is.slrm(x) } \arguments{ \item{x}{Any object.} } \details{ These functions test whether the object \code{x} is a fitted point process model object of the specified class. The result of \code{is.ppm(x)} is \code{TRUE} if \code{x} has \code{"ppm"} amongst its classes, and otherwise \code{FALSE}. Similarly for the other functions. } \value{ A single logical value. } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} \keyword{models} spatstat/man/fardist.Rd0000644000176200001440000000271713160710621014617 0ustar liggesusers\name{fardist} \alias{fardist} \alias{fardist.ppp} \alias{fardist.owin} \title{ Farthest Distance to Boundary of Window } \description{ Computes the farthest distance from each pixel, or each data point, to the boundary of the window. } \usage{ fardist(X, \dots) \method{fardist}{owin}(X, \dots, squared=FALSE) \method{fardist}{ppp}(X, \dots, squared=FALSE) } \arguments{ \item{X}{ A spatial object such as a window or point pattern. } \item{\dots}{ Arguments passed to \code{\link{as.mask}} to determine the pixel resolution, if required. } \item{squared}{ Logical. If \code{TRUE}, the squared distances will be returned. } } \details{ The function \code{fardist} is generic, with methods for the classes \code{owin} and \code{ppp}. For a window \code{W}, the command \code{fardist(W)} returns a pixel image in which the value at each pixel is the \emph{largest} distance from that pixel to the boundary of \code{W}. For a point pattern \code{X}, with window \code{W}, the command \code{fardist(X)} returns a numeric vector with one entry for each point of \code{X}, giving the largest distance from that data point to the boundary of \code{W}. } \value{ For \code{fardist.owin}, a pixel image (object of class \code{"im"}). For \code{fardist.ppp}, a numeric vector. } \examples{ fardist(cells) plot(FR <- fardist(letterR)) } \author{\adrian \rolf and \ege } \keyword{spatial} \keyword{math} spatstat/man/plot.texturemap.Rd0000644000176200001440000000537313160710621016337 0ustar liggesusers\name{plot.texturemap} \alias{plot.texturemap} \title{ Plot a Texture Map } \description{ Plot a representation of a texture map, similar to a plot legend. } \usage{ \method{plot}{texturemap}(x, \dots, main, xlim = NULL, ylim = NULL, vertical = FALSE, axis = TRUE, labelmap = NULL, gap = 0.25, spacing = NULL, add = FALSE) } \arguments{ \item{x}{ Texture map object (class \code{"texturemap"}). } \item{\dots}{ Additional graphics arguments passed to \code{\link{add.texture}} or \code{\link{axis.default}}. } \item{main}{ Main title for plot. } \item{xlim,ylim}{ Optional vectors of length 2 giving the \eqn{x} and \eqn{y} limits of the plot. } \item{vertical}{ Logical value indicating whether to arrange the texture boxes in a vertical column (\code{vertical=TRUE} or a horizontal row (\code{vertical=FALSE}, the default). } \item{axis}{ Logical value indicating whether to plot an axis line joining the texture boxes. } \item{labelmap}{ Optional. A \code{function} which will be applied to the data values (the inputs of the texture map) before they are displayed on the plot. } \item{gap}{ Separation between texture boxes, as a fraction of the width or height of a box. } \item{spacing}{ Argument passed to \code{\link{add.texture}} controlling the density of lines in a texture. Expressed in spatial coordinate units. } \item{add}{ Logical value indicating whether to add the graphics to an existing plot (\code{add=TRUE}) or to initialise a new plot (\code{add=FALSE}, the default). } } \details{ A texture map is an association between data values and graphical textures. An object of class \code{"texturemap"} represents a texture map. Such objects are returned from the plotting function \code{\link{textureplot}}, and can be created directly by the function \code{\link{texturemap}}. This function \code{plot.texturemap} is a method for the generic \code{\link{plot}} for the class \code{"texturemap"}. It displays a sample of each of the textures in the texture map, in a separate box, annotated by the data value which is mapped to that texture. The arrangement and position of the boxes is controlled by the arguments \code{vertical}, \code{xlim}, \code{ylim} and \code{gap}. } \value{ Null. } \author{\adrian \rolf and \ege } \seealso{ \code{\link{texturemap}}, \code{\link{textureplot}}, \code{\link{add.texture}}. } \examples{ tm <- texturemap(c("First", "Second", "Third"), 2:4, col=2:4) plot(tm, vertical=FALSE) ## abbreviate the labels plot(tm, labelmap=function(x) substr(x, 1, 2)) } \keyword{spatial} \keyword{hplot} spatstat/man/Extract.splitppp.Rd0000644000176200001440000000252213160710621016441 0ustar liggesusers\name{Extract.splitppp} \alias{[.splitppp} \alias{[<-.splitppp} \title{Extract or Replace Sub-Patterns} \description{ Extract or replace some of the sub-patterns in a split point pattern. } \usage{ \method{[}{splitppp}(x, ...) \method{[}{splitppp}(x, ...) <- value } \arguments{ \item{x}{ An object of class \code{"splitppp"}, representing a point pattern separated into a list of sub-patterns. } \item{\dots}{ Subset index. Any valid subset index in the usual \R sense. } \item{value}{ Replacement value for the subset. A list of point patterns. } } \value{ Another object of class \code{"splitppp"}. } \details{ These are subset methods for the class \code{"splitppp"}. The argument \code{x} should be an object of class \code{"splitppp"}, representing a point pattern that has been separated into a list of sub-patterns. It is created by \code{\link{split.ppp}}. The methods extract or replace a designated subset of the list \code{x}, and return an object of class \code{"splitppp"}. } \seealso{ \code{\link{split.ppp}}, \code{\link{plot.splitppp}}, \code{\link{summary.splitppp}} } \examples{ data(amacrine) # multitype point pattern y <- split(amacrine) y[1] y["off"] y[1] <- list(runifpoint(42, Window(amacrine))) } \author{ \adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/dppeigen.Rd0000644000176200001440000000124313160710571014753 0ustar liggesusers\name{dppeigen} \alias{dppeigen} \title{Internal function calculating eig and index} \description{This function is mainly for internal package use and is usually not called by the user.} \usage{dppeigen(model, trunc, Wscale, stationary = FALSE)} \arguments{ \item{model}{object of class \code{"detpointprocfamily"} } \item{trunc}{numeric giving the truncation} \item{Wscale}{numeric giving the scale of the window relative to a unit box} \item{stationary}{logical indicating whether the stationarity of the model should be used (only works in dimension 2).} } \value{A list} \author{ \adrian \rolf and \ege } \keyword{spatial} \keyword{models} spatstat/man/imcov.Rd0000644000176200001440000000334613160710621014277 0ustar liggesusers\name{imcov} \alias{imcov} \title{Spatial Covariance of a Pixel Image} \description{ Computes the unnormalised spatial covariance function of a pixel image. } \usage{ imcov(X, Y=X) } \arguments{ \item{X}{ A pixel image (object of class \code{"im"}. } \item{Y}{ Optional. Another pixel image. } } \value{ A pixel image (an object of class \code{"im"}) representing the spatial covariance function of \code{X}, or the cross-covariance of \code{X} and \code{Y}. } \details{ The (uncentred, unnormalised) \emph{spatial covariance function} of a pixel image \eqn{X} in the plane is the function \eqn{C(v)} defined for each vector \eqn{v} as \deqn{ C(v) = \int X(u)X(u-v)\, {\rm d}u }{ C(v) = integral of X(u) * X(u-v) du } where the integral is over all spatial locations \eqn{u}, and where \eqn{X(u)} denotes the pixel value at location \eqn{u}. This command computes a discretised approximation to the spatial covariance function, using the Fast Fourier Transform. The return value is another pixel image (object of class \code{"im"}) whose greyscale values are values of the spatial covariance function. If the argument \code{Y} is present, then \code{imcov(X,Y)} computes the set \emph{cross-covariance} function \eqn{C(u)} defined as \deqn{ C(v) = \int X(u)Y(u-v)\, {\rm d}u. }{ C(v) = integral of X(u) * Y(u-v) du. } Note that \code{imcov(X,Y)} is equivalent to \code{convolve.im(X,Y,reflectY=TRUE)}. } \seealso{ \code{\link{setcov}}, \code{\link{convolve.im}}, \code{\link{owin}}, \code{\link{as.owin}}, \code{\link{erosion}} } \examples{ X <- as.im(square(1)) v <- imcov(X) plot(v) } \author{\adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/linearmarkconnect.Rd0000644000176200001440000000560513160710621016661 0ustar liggesusers\name{linearmarkconnect} \alias{linearmarkconnect} \title{ Mark Connection Function for Multitype Point Pattern on Linear Network } \description{ For a multitype point pattern on a linear network, estimate the mark connection function from points of type \eqn{i} to points of type \eqn{j}. } \usage{ linearmarkconnect(X, i, j, r=NULL, \dots) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the mark connection function \eqn{p_{ij}(r)}{p[ij](r)} will be computed. An object of class \code{"lpp"} which must be a multitype point pattern (a marked point pattern whose marks are a factor). } \item{i}{Number or character string identifying the type (mark value) of the points in \code{X} from which distances are measured. Defaults to the first level of \code{marks(X)}. } \item{j}{Number or character string identifying the type (mark value) of the points in \code{X} to which distances are measured. Defaults to the second level of \code{marks(X)}. } \item{r}{numeric vector. The values of the argument \eqn{r} at which the function \eqn{p_{ij}(r)}{p[ij](r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{\dots}{ Arguments passed to \code{\link{linearpcfcross}} and \code{\link{linearpcf}}. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). } \details{ This is a counterpart of the function \code{\link{markconnect}} for a point pattern on a linear network (object of class \code{"lpp"}). The argument \code{i} will be interpreted as levels of the factor \code{marks(X)}. If \code{i} is missing, it defaults to the first level of the marks factor. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{p_{ij}(r)}{p[ij](r)} should be evaluated. The values of \eqn{r} must be increasing nonnegative numbers and the maximum \eqn{r} value must not exceed the radius of the largest disc contained in the window. } \references{ Baddeley, A, Jammalamadaka, A. and Nair, G. (to appear) Multitype point process analysis of spines on the dendrite network of a neuron. \emph{Applied Statistics} (Journal of the Royal Statistical Society, Series C), In press. } \section{Warnings}{ The argument \code{i} is interpreted as a level of the factor \code{marks(X)}. Beware of the usual trap with factors: numerical values are not interpreted in the same way as character values. } \seealso{ \code{\link{linearpcfcross}}, \code{\link{linearpcf}}, \code{\link{linearmarkequal}}, \code{\link{markconnect}}. } \examples{ pab <- linearmarkconnect(chicago, "assault", "burglary") \dontrun{ plot(alltypes(chicago, linearmarkconnect)) } } \author{\adrian } \keyword{spatial} \keyword{nonparametric} spatstat/man/Extract.solist.Rd0000644000176200001440000000306013160710621016101 0ustar liggesusers\name{Extract.solist} \alias{[.solist} \alias{[<-.solist} \title{Extract or Replace Subset of a List of Spatial Objects} \description{ Extract or replace some entries in a list of spatial objects, or extract a designated sub-region in each object. } \usage{ \method{[}{solist}(x, i, \dots) \method{[}{solist}(x, i) <- value } \arguments{ \item{x}{ An object of class \code{"solist"} representing a list of two-dimensional spatial objects. } \item{i}{ Subset index. Any valid subset index for vectors in the usual \R sense, or a window (object of class \code{"owin"}). } \item{value}{ Replacement value for the subset. } \item{\dots}{Ignored.} } \value{ Another object of the same class as \code{x}. } \details{ These are methods for extracting and replacing subsets for the class \code{"solist"}. The argument \code{x} should be an object of class \code{"solist"} representing a list of two-dimensional spatial objects. See \code{\link{solist}}. For the subset method, the subset index \code{i} can be either a vector index (specifying some elements of the list) or a spatial window (specifying a spatial sub-region). For the replacement method, \code{i} must be a vector index: the designated elements will be replaced. } \seealso{ \code{\link{solist}}, \code{\link{plot.solist}}, \code{\link{summary.solist}} } \examples{ x <- solist(japanesepines, cells, redwood) x[2:3] x[square(0.5)] x[1] <- list(finpines) } \author{ \spatstatAuthors } \keyword{spatial} \keyword{list} \keyword{manip} spatstat/man/triplet.family.Rd0000644000176200001440000000230713160710621016121 0ustar liggesusers\name{triplet.family} \alias{triplet.family} \title{Triplet Interaction Family} \description{ An object describing the family of all Gibbs point processes with interaction order equal to 3. } \details{ \bold{Advanced Use Only!} This structure would not normally be touched by the user. It describes the interaction structure of Gibbs point processes which have infinite order of interaction, such as the triplet interaction process \cite{\link{Triplets}}. Anyway, \code{triplet.family} is an object of class \code{"isf"} containing a function \code{triplet.family$eval} for evaluating the sufficient statistics of a Gibbs point process model taking an exponential family form. } \seealso{ \code{\link{Triplets}} to create the triplet interaction process structure. Other families: \code{\link{pairwise.family}}, \code{\link{pairsat.family}}, \code{\link{inforder.family}}, \code{\link{ord.family}}. } \references{ Baddeley, A. and Turner, R. (2000) Practical maximum pseudolikelihood for spatial point patterns. \emph{Australian and New Zealand Journal of Statistics} \bold{42}, 283--322. } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} spatstat/man/plot.rppm.Rd0000644000176200001440000000425413160710621015114 0ustar liggesusers\name{plot.rppm} \alias{plot.rppm} \title{ Plot a Recursively Partitioned Point Process Model } \description{ Given a model which has been fitted to point pattern data by recursive partitioning, plot the partition tree or the fitted intensity. } \usage{ \method{plot}{rppm}(x, \dots, what = c("tree", "spatial"), treeplot=NULL) } \arguments{ \item{x}{ Fitted point process model of class \code{"rppm"} produced by the function \code{\link{rppm}}. } \item{what}{ Character string (partially matched) specifying whether to plot the partition tree or the fitted intensity. } \item{\dots}{ Arguments passed to \code{\link[rpart]{plot.rpart}} and \code{\link[rpart]{text.rpart}} (if \code{what="tree"}) or passed to \code{\link{plot.im}} (if \code{what="spatial"}) controlling the appearance of the plot. } \item{treeplot}{ Optional. A function to be used to plot and label the partition tree, replacing the two functions \code{\link[rpart]{plot.rpart}} and \code{\link[rpart]{text.rpart}}. } } \details{ If \code{what="tree"} (the default), the partition tree will be plotted using \code{\link[rpart]{plot.rpart}}, and labelled using \code{\link[rpart]{text.rpart}}. If the argument \code{treeplot} is given, then plotting and labelling will be performed by \code{treeplot} instead. A good choice is the function \code{prp} in package \pkg{rpart.plot}. If \code{what="spatial"}, the predicted intensity will be computed using \code{\link{predict.rppm}}, and this intensity will be plotted as an image using \code{\link{plot.im}}. } \value{ If \code{what="tree"}, a list containing \code{x} and \code{y} coordinates of the plotted nodes of the tree. If \code{what="spatial"}, the return value of \code{\link{plot.im}}. } \author{ \spatstatAuthors } \seealso{ \code{\link{rppm}} } \examples{ # Murchison gold data mur <- solapply(murchison, rescale, s=1000, unitname="km") mur$dfault <- distfun(mur$faults) # fit <- rppm(gold ~ dfault + greenstone, data=mur) # opa <- par(mfrow=c(1,2)) plot(fit) plot(fit, what="spatial") par(opa) } \keyword{spatial} \keyword{hplot} \keyword{models} spatstat/man/lineardirichlet.Rd0000644000176200001440000000257613160710621016330 0ustar liggesusers\name{lineardirichlet} \alias{lineardirichlet} \title{ Dirichlet Tessellation on a Linear Network } \description{ Given a point pattern on a linear network, compute the Dirichlet (or Voronoi or Thiessen) tessellation induced by the points. } \usage{ lineardirichlet(X) } \arguments{ \item{X}{ Point pattern on a linear network (object of class \code{"lpp"}). } } \details{ The Dirichlet tessellation induced by a point pattern \code{X} on a linear network \code{L} is a partition of \code{L} into subsets. The subset \code{L[i]} associated with the data point \code{X[i]} is the part of \code{L} lying closer to \code{X[i]} than to any other data point \code{X[j]}, where distance is measured by the shortest path. } \section{Missing tiles}{ If the linear network is not connected, and if one of the connected components contains no data points, then the Dirichlet tessellation is mathematically undefined inside this component. The resulting tessellation object includes a tile with label \code{NA}, which contains this component of the network. A plot of the tessellation will not show this tile. } \value{ A tessellation on a linear network (object of class \code{"lintess"}). } \author{ \adrian. } \seealso{ \code{\link{lintess}} } \examples{ X <- runiflpp(5, simplenet) plot(lineardirichlet(X), lwd=3) points(X) } \keyword{spatial} \keyword{manip} spatstat/man/centroid.owin.Rd0000644000176200001440000000367413160710571015754 0ustar liggesusers\name{centroid.owin} \alias{centroid.owin} \title{Centroid of a window} \description{ Computes the centroid (centre of mass) of a window } \usage{ centroid.owin(w, as.ppp = FALSE) } \arguments{ \item{w}{A window} \item{as.ppp}{Logical flag indicating whether to return the centroid as a point pattern (\code{ppp} object)} } \value{ Either a list with components \code{x, y}, or a point pattern (of class \code{ppp}) consisting of a single point, giving the coordinates of the centroid of the window \code{w}. } \details{ The centroid of the window \code{w} is computed. The centroid (``centre of mass'') is the point whose \eqn{x} and \eqn{y} coordinates are the mean values of the \eqn{x} and \eqn{y} coordinates of all points in the window. The argument \code{w} should be a window (an object of class \code{"owin"}, see \code{\link{owin.object}} for details) or can be given in any format acceptable to \code{\link{as.owin}()}. The calculation uses an exact analytic formula for the case of polygonal windows. Note that the centroid of a window is not necessarily inside the window, unless the window is convex. If \code{as.ppp=TRUE} and the centroid of \code{w} lies outside \code{w}, then the window of the returned point pattern will be a rectangle containing the original window (using \code{\link{as.rectangle}}. } \seealso{ \code{\link{owin}}, \code{\link{as.owin}} } \examples{ w <- owin(c(0,1),c(0,1)) centroid.owin(w) # returns 0.5, 0.5 data(demopat) w <- Window(demopat) # an irregular window cent <- centroid.owin(w, as.ppp = TRUE) \dontrun{ plot(cent) # plot the window and its centroid } wapprox <- as.mask(w) # pixel approximation of window \dontrun{ points(centroid.owin(wapprox)) # should be indistinguishable } \testonly{ centroid.owin(w) centroid.owin(wapprox) } } \author{\adrian , \rolf and \ege } \keyword{spatial} \keyword{math} spatstat/man/yardstick.Rd0000644000176200001440000000505013160710621015151 0ustar liggesusers\name{yardstick} \alias{textstring} \alias{onearrow} \alias{yardstick} \title{ Text, Arrow or Scale Bar in a Diagram } \description{ Create spatial objects that represent a text string, an arrow, or a yardstick (scale bar). } \usage{ textstring(x, y, txt = NULL, \dots) onearrow(x0, y0, x1, y1, txt = NULL, \dots) yardstick(x0, y0, x1, y1, txt = NULL, \dots) } \arguments{ \item{x,y}{ Coordinates where the text should be placed. } \item{x0,y0,x1,y1}{ Spatial coordinates of both ends of the arrow or yardstick. Alternatively \code{x0} can be a point pattern (class \code{"ppp"}) containing exactly two points, or a line segment pattern (class \code{"psp"}) consisting of exactly one line segment. } \item{txt}{ The text to be displayed beside the line segment. Either a character string or an expression. } \item{\dots}{ Additional named arguments for plotting the object. } } \details{ These commands create objects that represent components of a diagram: \itemize{ \item \code{textstring} creates an object that represents a string of text at a particular spatial location. \item \code{onearrow} creates an object that represents an arrow between two locations. \item \code{yardstick} creates an object that represents a scale bar: a line segment indicating the scale of the plot. } To display the relevant object, it should be plotted, using \code{plot}. See the help files for the plot methods \code{\link{plot.textstring}}, \code{\link{plot.onearrow}} and \code{\link{plot.yardstick}}. These objects are designed to be included as components in a \code{\link{layered}} object or a \code{\link{solist}}. This makes it possible to build up a diagram consisting of many spatial objects, and to annotate the diagram with arrows, text and so on, so that ultimately the entire diagram is plotted using \code{plot}. } \value{ An object of class \code{"diagramobj"} which also belongs to one of the special classes \code{"textstring"}, \code{"onearrow"} or \code{"yardstick"}. There are methods for \code{plot}, \code{print}, \code{"["} and \code{\link{shift}}. } \author{\adrian \rolf and \ege } \seealso{ \code{\link{plot.textstring}}, \code{\link{plot.onearrow}}, \code{\link{plot.yardstick}}. } \examples{ X <- rescale(swedishpines) plot(X, pch=16, main="") ys <- yardstick(as.psp(list(xmid=4, ymid=0.5, length=1, angle=0), window=Window(X)), txt="1 m") plot(ys, angle=90) } \keyword{spatial} \keyword{hplot} spatstat/man/vcov.kppm.Rd0000644000176200001440000000561513160710621015106 0ustar liggesusers\name{vcov.kppm} \alias{vcov.kppm} \title{Variance-Covariance Matrix for a Fitted Cluster Point Process Model} \description{ Returns the variance-covariance matrix of the estimates of the parameters of a fitted cluster point process model. } \usage{ \method{vcov}{kppm}(object, ..., what=c("vcov", "corr", "fisher", "internals"), fast = NULL, rmax = NULL, eps.rmax = 0.01, verbose = TRUE) } \arguments{ \item{object}{ A fitted cluster point process model (an object of class \code{"kppm"}.) } \item{\dots}{ Ignored. } \item{what}{ Character string (partially-matched) that specifies what matrix is returned. Options are \code{"vcov"} for the variance-covariance matrix, \code{"corr"} for the correlation matrix, and \code{"fisher"} for the Fisher information matrix. } \item{fast}{ Logical specifying whether tapering (using sparse matrices from \pkg{Matrix}) should be used to speed up calculations. Warning: This is expected to underestimate the true asymptotic variances/covariances. } \item{rmax}{ Optional. The dependence range. Not usually specified by the user. Only used when \code{fast=TRUE}. } \item{eps.rmax}{ Numeric. A small positive number which is used to determine \code{rmax} from the tail behaviour of the pair correlation function when fast option (\code{fast=TRUE}) is used. Namely \code{rmax} is the smallest value of \eqn{r} at which \eqn{(g(r)-1)/(g(0)-1)} falls below \code{eps.rmax}. Only used when \code{fast=TRUE}. Ignored if \code{rmax} is provided. } \item{verbose}{ Logical value indicating whether to print progress reports during very long calculations. } } \details{ This function computes the asymptotic variance-covariance matrix of the estimates of the canonical (regression) parameters in the cluster point process model \code{object}. It is a method for the generic function \code{\link{vcov}}. The result is an \code{n * n} matrix where \code{n = length(coef(model))}. To calculate a confidence interval for a regression parameter, use \code{\link[stats]{confint}} as shown in the examples. } \value{ A square matrix. } \references{ Waagepetersen, R. (2007) Estimating functions for inhomogeneous spatial point processes with incomplete covariate data. \emph{Biometrika} \bold{95}, 351--363. } \author{ Abdollah Jalilian and Rasmus Waagepetersen. Ported to \pkg{spatstat} by \adrian and \ege. } \seealso{ \code{\link{kppm}}, \code{\link{vcov}}, \code{\link{vcov.ppm}} } \examples{ data(redwood) fit <- kppm(redwood ~ x + y) vcov(fit) vcov(fit, what="corr") # confidence interval confint(fit) # cross-check the confidence interval by hand: sd <- sqrt(diag(vcov(fit))) t(coef(fit) + 1.96 * outer(sd, c(lower=-1, upper=1))) } \keyword{spatial} \keyword{methods} \keyword{models} spatstat/man/Softcore.Rd0000644000176200001440000001052013160710571014742 0ustar liggesusers\name{Softcore} \alias{Softcore} \title{The Soft Core Point Process Model} \description{ Creates an instance of the Soft Core point process model which can then be fitted to point pattern data. } \usage{ Softcore(kappa, sigma0=NA) } \arguments{ \item{kappa}{The exponent \eqn{\kappa}{kappa} of the Soft Core interaction} \item{sigma0}{ Optional. Initial estimate of the parameter \eqn{\sigma}{sigma}. A positive number. } } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of the Soft Core process with exponent \eqn{\kappa}{kappa}. } \details{ The (stationary) Soft Core point process with parameters \eqn{\beta}{beta} and \eqn{\sigma}{sigma} and exponent \eqn{\kappa}{kappa} is the pairwise interaction point process in which each point contributes a factor \eqn{\beta}{beta} to the probability density of the point pattern, and each pair of points contributes a factor \deqn{ \exp \left\{ - \left( \frac{\sigma}{d} \right)^{2/\kappa} \right\} }{ exp( - (sigma/d)^(2/kappa) ) } to the density, where \eqn{d} is the distance between the two points. Thus the process has probability density \deqn{ f(x_1,\ldots,x_n) = \alpha \beta^{n(x)} \exp \left\{ - \sum_{i < j} \left( \frac{\sigma}{||x_i-x_j||} \right)^{2/\kappa} \right\} }{ f(x_1,\ldots,x_n) = alpha . beta^n(x) exp( - sum (sigma/||x[i]-x[j]||)^(2/kappa)) } where \eqn{x_1,\ldots,x_n}{x[1],\ldots,x[n]} represent the points of the pattern, \eqn{n(x)} is the number of points in the pattern, \eqn{\alpha}{alpha} is the normalising constant, and the sum on the right hand side is over all unordered pairs of points of the pattern. This model describes an ``ordered'' or ``inhibitive'' process, with the interpoint interaction decreasing smoothly with distance. The strength of interaction is controlled by the parameter \eqn{\sigma}{sigma}, a positive real number, with larger values corresponding to stronger interaction; and by the exponent \eqn{\kappa}{kappa} in the range \eqn{(0,1)}, with larger values corresponding to weaker interaction. If \eqn{\sigma = 0}{sigma = 0} the model reduces to the Poisson point process. If \eqn{\sigma > 0}{sigma > 0}, the process is well-defined only for \eqn{\kappa}{kappa} in \eqn{(0,1)}. The limit of the model as \eqn{\kappa \to 0}{kappa -> 0} is the hard core process with hard core distance \eqn{h=\sigma}{h=sigma}. The nonstationary Soft Core process is similar except that the contribution of each individual point \eqn{x_i}{x[i]} is a function \eqn{\beta(x_i)}{beta(x[i])} of location, rather than a constant beta. The function \code{\link{ppm}()}, which fits point process models to point pattern data, requires an argument of class \code{"interact"} describing the interpoint interaction structure of the model to be fitted. The appropriate description of the Soft Core process pairwise interaction is yielded by the function \code{Softcore()}. See the examples below. The main argument is the exponent \code{kappa}. When \code{kappa} is fixed, the model becomes an exponential family with canonical parameters \eqn{\log \beta}{log(beta)} and \deqn{ \log \gamma = \frac{2}{\kappa} \log\sigma }{ log(gamma) = (2/kappa) log(sigma) } The canonical parameters are estimated by \code{\link{ppm}()}, not fixed in \code{Softcore()}. The optional argument \code{sigma0} can be used to improve numerical stability. If \code{sigma0} is given, it should be a positive number, and it should be a rough estimate of the parameter \eqn{\sigma}{sigma}. } \seealso{ \code{\link{ppm}}, \code{\link{pairwise.family}}, \code{\link{ppm.object}} } \references{ Ogata, Y, and Tanemura, M. (1981). Estimation of interaction potentials of spatial point patterns through the maximum likelihood procedure. \emph{Annals of the Institute of Statistical Mathematics}, B \bold{33}, 315--338. Ogata, Y, and Tanemura, M. (1984). Likelihood analysis of spatial point patterns. \emph{Journal of the Royal Statistical Society, series B} \bold{46}, 496--518. } \examples{ data(cells) ppm(cells, ~1, Softcore(kappa=0.5), correction="isotropic") # fit the stationary Soft Core process to `cells' } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} spatstat/man/symbolmap.Rd0000644000176200001440000001213013160710621015154 0ustar liggesusers\name{symbolmap} \alias{symbolmap} \title{ Graphics Symbol Map } \description{ Create a graphics symbol map that associates data values with graphical symbols. } \usage{ symbolmap(\dots, range = NULL, inputs = NULL) } \arguments{ \item{\dots}{ Named arguments specifying the graphical parameters. See Details. } \item{range}{ Optional. Range of numbers that are mapped. A numeric vector of length 2 giving the minimum and maximum values that will be mapped. Incompatible with \code{inputs}. } \item{inputs}{ Optional. A vector containing all the data values that will be mapped to symbols. Incompatible with \code{range}. } } \details{ A graphical symbol map is an association between data values and graphical symbols. The command \code{symbolmap} creates an object of class \code{"symbolmap"} that represents a graphical symbol map. Once a symbol map has been created, it can be applied to any suitable data to generate a plot of those data. This makes it easy to ensure that the \emph{same} symbol map is used in two different plots. The symbol map can be plotted as a legend to the plots, and can also be plotted in its own right. The possible values of data that will be mapped are specified by \code{range} or \code{inputs}. \itemize{ \item if \code{range} is given, it should be a numeric vector of length 2 giving the minimum and maximum values of the range of numbers that will be mapped. These limits must be finite. \item if \code{inputs} is given, it should be a vector of any atomic type (e.g. numeric, character, logical, factor). This vector contains all the possible data values that will be mapped. \item If neither \code{range} nor \code{inputs} is given, it is assumed that the possible values are real numbers. } The association of data values with graphical symbols is specified by the other arguments \code{\dots} which are given in \code{name=value} form. These arguments specify the kinds of symbols that will be used, the sizes of the symbols, and graphics parameters for drawing the symbols. Each graphics parameter can be either a single value, for example \code{shape="circles"}, or a \code{function(x)} which determines the value of the graphics parameter as a function of the data \code{x}, for example \code{shape=function(x) ifelse(x > 0, "circles", "squares")}. Colourmaps (see \code{\link{colourmap}}) are also acceptable because they are functions. Currently recognised graphics parameters, and their allowed values, are: \describe{ \item{shape}{ The shape of the symbol: currently either \code{"circles"}, \code{"squares"}, \code{"arrows"} or \code{NA}. This parameter takes precedence over \code{pch}. } \item{size}{ The size of the symbol: a positive number or zero. } \item{pch}{ Graphics character code: a positive integer, or a single character. See \code{\link[graphics]{par}}. } \item{cex}{ Graphics character expansion factor. } \item{cols}{ Colour of plotting characters. } \item{fg,bg}{ Colour of foreground (or symbol border) and background (or symbol interior). } \item{col,lwd,lty}{ Colour, width and style of lines. } \item{etch}{ Logical. If \code{TRUE}, each symbol is surrounded by a border drawn in the opposite colour, which improves its visibility against the background. Default is \code{FALSE}. } \item{direction,headlength,headangle,arrowtype}{ Numeric parameters of arrow symbols, applicable when \code{shape="arrows"}. Here \code{direction} is the direction of the arrow in degrees anticlockwise from the \eqn{x} axis; \code{headlength} is the length of the head of the arrow in coordinate units; \code{headangle} is the angle subtended by the point of the arrow; and \code{arrowtype} is an integer code specifying which ends of the shaft have arrowheads attached (0 means no arrowheads, 1 is an arrowhead at the start of the shaft, 2 is an arrowhead at the end of the shaft, and 3 is arrowheads at both ends). } } A vector of colour values is also acceptable for the arguments \code{col,cols,fg,bg} if \code{range} is specified. } \value{ An object of class \code{"symbolmap"}. } \author{ \spatstatAuthors. } \seealso{ \code{\link{plot.symbolmap}} to plot the symbol map itself. \code{\link{invoke.symbolmap}} to apply the symbol map to some data and plot the resulting symbols. \code{\link{update.symbolmap}} to change the symbol map. } \examples{ g <- symbolmap(inputs=letters[1:10], pch=11:20) g1 <- symbolmap(range=c(0,100), size=function(x) x/50) g2 <- symbolmap(shape=function(x) ifelse(x > 0, "circles", "squares"), size=function(x) sqrt(ifelse(x > 0, x/pi, -x)), bg = function(x) ifelse(abs(x) < 1, "red", "black")) colmap <- colourmap(topo.colors(20), range=c(0,10)) g3 <- symbolmap(pch=21, bg=colmap, range=c(0,10)) plot(g3) } \keyword{spatial} \keyword{hplot} spatstat/man/K3est.Rd0000644000176200001440000000662513160710571014162 0ustar liggesusers\name{K3est} \Rdversion{1.1} \alias{K3est} \title{ K-function of a Three-Dimensional Point Pattern } \description{ Estimates the \eqn{K}-function from a three-dimensional point pattern. } \usage{ K3est(X, \dots, rmax = NULL, nrval = 128, correction = c("translation", "isotropic"), ratio=FALSE) } \arguments{ \item{X}{ Three-dimensional point pattern (object of class \code{"pp3"}). } \item{\dots}{ Ignored. } \item{rmax}{ Optional. Maximum value of argument \eqn{r} for which \eqn{K_3(r)}{K3(r)} will be estimated. } \item{nrval}{ Optional. Number of values of \eqn{r} for which \eqn{K_3(r)}{K3(r)} will be estimated. A large value of \code{nrval} is required to avoid discretisation effects. } \item{correction}{ Optional. Character vector specifying the edge correction(s) to be applied. See Details. } \item{ratio}{ Logical. If \code{TRUE}, the numerator and denominator of each edge-corrected estimate will also be saved, for use in analysing replicated point patterns. } } \details{ For a stationary point process \eqn{\Phi}{Phi} in three-dimensional space, the three-dimensional \eqn{K} function is \deqn{ K_3(r) = \frac 1 \lambda E(N(\Phi, x, r) \mid x \in \Phi) }{ K3(r) = (1/lambda) E(N(Phi,x,r) | x in Phi) } where \eqn{\lambda}{lambda} is the intensity of the process (the expected number of points per unit volume) and \eqn{N(\Phi,x,r)}{N(Phi,x,r)} is the number of points of \eqn{\Phi}{Phi}, other than \eqn{x} itself, which fall within a distance \eqn{r} of \eqn{x}. This is the three-dimensional generalisation of Ripley's \eqn{K} function for two-dimensional point processes (Ripley, 1977). The three-dimensional point pattern \code{X} is assumed to be a partial realisation of a stationary point process \eqn{\Phi}{Phi}. The distance between each pair of distinct points is computed. The empirical cumulative distribution function of these values, with appropriate edge corrections, is renormalised to give the estimate of \eqn{K_3(r)}{K3(r)}. The available edge corrections are: \describe{ \item{\code{"translation"}:}{ the Ohser translation correction estimator (Ohser, 1983; Baddeley et al, 1993) } \item{\code{"isotropic"}:}{ the three-dimensional counterpart of Ripley's isotropic edge correction (Ripley, 1977; Baddeley et al, 1993). } } Alternatively \code{correction="all"} selects all options. } \value{ A function value table (object of class \code{"fv"}) that can be plotted, printed or coerced to a data frame containing the function values. } \references{ Baddeley, A.J, Moyeed, R.A., Howard, C.V. and Boyde, A. (1993) Analysis of a three-dimensional point pattern with replication. \emph{Applied Statistics} \bold{42}, 641--668. Ohser, J. (1983) On estimators for the reduced second moment measure of point processes. \emph{Mathematische Operationsforschung und Statistik, series Statistics}, \bold{14}, 63 -- 71. Ripley, B.D. (1977) Modelling spatial patterns (with discussion). \emph{Journal of the Royal Statistical Society, Series B}, \bold{39}, 172 -- 212. } \author{ \adrian and Rana Moyeed. } \seealso{ \code{\link{F3est}}, \code{\link{G3est}}, \code{\link{pcf3est}} } \examples{ X <- rpoispp3(42) Z <- K3est(X) if(interactive()) plot(Z) } \keyword{spatial} \keyword{nonparametric} spatstat/man/rpoislinetess.Rd0000644000176200001440000000273613160710621016067 0ustar liggesusers\name{rpoislinetess} \alias{rpoislinetess} \title{Poisson Line Tessellation} \description{ Generate a tessellation delineated by the lines of the Poisson line process } \usage{ rpoislinetess(lambda, win = owin()) } \arguments{ \item{lambda}{ Intensity of the Poisson line process. A positive number. } \item{win}{ Window in which to simulate the pattern. An object of class \code{"owin"} or something acceptable to \code{\link{as.owin}}. Currently, the window must be a rectangle. } } \details{ This algorithm generates a realisation of the uniform Poisson line process, and divides the window \code{win} into tiles separated by these lines. The argument \code{lambda} must be a positive number. It controls the intensity of the process. The expected number of lines intersecting a convex region of the plane is equal to \code{lambda} times the perimeter length of the region. The expected total length of the lines crossing a region of the plane is equal to \code{lambda * pi} times the area of the region. } \value{ A tessellation (object of class \code{"tess"}). Also has an attribute \code{"lines"} containing the realisation of the Poisson line process, as an object of class \code{"infline"}. } \author{\adrian and \rolf } \seealso{ \code{\link{rpoisline}} to generate the lines only. } \examples{ X <- rpoislinetess(3) plot(as.im(X), main="rpoislinetess(3)") plot(X, add=TRUE) } \keyword{spatial} \keyword{datagen} spatstat/man/simulate.dppm.Rd0000644000176200001440000001105713160710621015742 0ustar liggesusers\name{simulate.dppm} \alias{simulate.dppm} \alias{simulate.detpointprocfamily} \title{Simulation of Determinantal Point Process Model} \description{ Generates simulated realisations from a determinantal point process model. } \usage{ \method{simulate}{dppm}(object, nsim = 1, seed = NULL, \dots, W = NULL, trunc = 0.99, correction = "periodic", rbord = reach(object)) \method{simulate}{detpointprocfamily}(object, nsim = 1, seed = NULL, \dots, W = NULL, trunc = 0.99, correction = "periodic", rbord = reach(object)) } \arguments{ \item{object}{ Determinantal point process model. An object of class \code{"detpointprocfamily"} or \code{"dppm"}. } \item{nsim}{Number of simulated realisations.} \item{seed}{ an object specifying whether and how to initialise the random number generator. Either \code{NULL} or an integer that will be used in a call to \code{\link[base:Random]{set.seed}} before simulating the point patterns. } \item{\dots}{Arguments passed on to \code{\link{rdpp}}.} \item{W}{ Object specifying the window of simulation (defaults to a unit box if nothing else is sensible -- see Details). Can be any single argument acceptable to \code{\link{as.boxx}} (e.g. an \code{"owin"}, \code{"box3"} or \code{"boxx"} object). } \item{trunc}{ Numeric value specifying how the model truncation is preformed. See Details. } \item{correction}{ Character string specifying the type of correction to use. The options are "periodic" (default) and "border". See Details. } \item{rbord}{ Numeric value specifying the extent of the border correction if this correction is used. See Details. } } \details{ These functions are methods for the generic function \code{\link[stats]{simulate}} for the classes \code{"detpointprocfamily"} and \code{"dppm"} of determinantal point process models. The return value is a list of \code{nsim} point patterns. It also carries an attribute \code{"seed"} that captures the initial state of the random number generator. This follows the convention used in \code{simulate.lm} (see \code{\link[stats]{simulate}}). It can be used to force a sequence of simulations to be repeated exactly, as shown in the examples for \code{\link[stats]{simulate}}. The exact simulation of a determinantal point process model involves an infinite series, which typically has no analytical solution. In the implementation a truncation is performed. The truncation \code{trunc} can be specified either directly as a positive integer or as a fraction between 0 and 1. In the latter case the truncation is chosen such that the expected number of points in a simulation is \code{trunc} times the theoretical expected number of points in the model. The default is 0.99. The window of the returned point pattern(s) can be specified via the argument \code{W}. For a fitted model (of class \code{"dppm"}) it defaults to the observation window of the data used to fit the model. For inhomogeneous models it defaults to the window of the intensity image. Otherwise it defaults to a unit box. For non-rectangular windows simulation is done in the containing rectangle and then restricted to the window. For inhomogeneous models a stationary model is first simulated using the maximum intensity and then the result is obtained by thinning. The default is to use periodic edge correction for simulation such that opposite edges are glued together. If border correction is used then the simulation is done in an extended window. Edge effects are theoretically completely removed by doubling the size of the window in each spatial dimension, but for practical purposes much less extension may be sufficient. The numeric \code{rbord} determines the extend of the extra space added to the window. } \value{ A list of length \code{nsim} containing simulated point patterns (objects of class \code{"ppp"}). The list has class \code{"solist"}. The return value also carries an attribute \code{"seed"} that captures the initial state of the random number generator. See Details. } \references{ Lavancier, F. \ifelse{latex}{\out{M\o ller}}{Moller}, J. and Rubak, E. (2015) Determinantal point process models and statistical inference \emph{Journal of the Royal Statistical Society, Series B} \bold{77}, 853--977. } \author{ \adrian \rolf and \ege } \seealso{ \code{\link{rdpp}}, \code{\link[stats]{simulate}} } \examples{ model <- dppGauss(lambda=100, alpha=.05, d=2) simulate(model, 2) } \keyword{datagen} \keyword{spatial} \keyword{models} spatstat/man/Extract.fasp.Rd0000644000176200001440000000343313160710621015521 0ustar liggesusers\name{Extract.fasp} \alias{[.fasp} \title{Extract Subset of Function Array} \description{ Extract a subset of a function array (an object of class \code{"fasp"}). } \usage{ \method{[}{fasp}(x, I, J, drop=TRUE,\dots) } \arguments{ \item{x}{ A function array. An object of class \code{"fasp"}. } \item{I}{ any valid expression for a subset of the row indices of the array. } \item{J}{ any valid expression for a subset of the column indices of the array. } \item{drop}{ Logical. When the selected subset consists of only one cell of the array, if \code{drop=FALSE} the result is still returned as a \eqn{1 \times 1}{1 * 1} array of functions (class \code{"fasp"}) while if \code{drop=TRUE} it is returned as a function (class \code{"fv"}). } \item{\dots}{Ignored.} } \value{ A function array (of class \code{"fasp"}). Exceptionally, if the array has only one cell, and if \code{drop=TRUE}, then the result is a function value table (class \code{"fv"}). } \details{ A function array can be regarded as a matrix whose entries are functions. See \code{\link{fasp.object}} for an explanation of function arrays. This routine extracts a sub-array according to the usual conventions for matrix indexing. } \seealso{ \code{\link{fasp.object}} } \examples{ # Lansing woods data - multitype points with 6 types woods <- lansing \testonly{ # smaller dataset woods <- woods[ seq(1,npoints(woods),by=45)] } # compute 6 x 6 array of all cross-type K functions a <- alltypes(woods, "K") # extract first three marks only b <- a[1:3,1:3] \dontrun{plot(b)} # subset of array pertaining to hickories h <- a[levels(marks(woods)) == "hickory", ] \dontrun{plot(h)} } \author{ \spatstatAuthors } \keyword{spatial} \keyword{manip} spatstat/man/pseudoR2.Rd0000644000176200001440000000272313160710621014663 0ustar liggesusers\name{pseudoR2} \alias{pseudoR2} \alias{pseudoR2.ppm} \alias{pseudoR2.lppm} \title{ Calculate Pseudo-R-Squared for Point Process Model } \description{ Given a fitted point process model, calculate the pseudo-R-squared value, which measures the fraction of variation in the data that is explained by the model. } \usage{ pseudoR2(object, \dots) \method{pseudoR2}{ppm}(object, \dots) \method{pseudoR2}{lppm}(object, \dots) } \arguments{ \item{object}{ Fitted point process model. An object of class \code{"ppm"} or \code{"lppm"}. } \item{\dots}{ Additional arguments passed to \code{\link{deviance.ppm}} or \code{\link{deviance.lppm}}. } } \details{ The function \code{pseudoR2} is generic, with methods for fitted point process models of class \code{"ppm"} and \code{"lppm"}. This function computes McFadden's pseudo-Rsquared \deqn{ R^2 = 1 - \frac{D}{D_0} }{ R^2 = 1 - D/D0 } where \eqn{D} is the deviance of the fitted model \code{object}, and \eqn{D_0}{D0} is the deviance of the null model (obtained by refitting \code{object} using the trend formula \code{~1}). Deviance is defined as twice the negative log-likelihood or log-pseudolikelihood. } \value{ A single numeric value. } \author{ \adrian \rolf and \ege } \seealso{ \code{\link{deviance.ppm}}, \code{\link{deviance.lppm}}. } \examples{ fit <- ppm(swedishpines ~ x+y) pseudoR2(fit) } \keyword{spatial} \keyword{models} spatstat/man/Kmodel.ppm.Rd0000644000176200001440000000450113160710571015166 0ustar liggesusers\name{Kmodel.ppm} \alias{Kmodel.ppm} \alias{pcfmodel.ppm} \title{K Function or Pair Correlation Function of Gibbs Point Process model} \description{ Returns the theoretical \eqn{K} function or the pair correlation function of a fitted Gibbs point process model. } \usage{ \method{Kmodel}{ppm}(model, \dots) \method{pcfmodel}{ppm}(model, \dots) } \arguments{ \item{model}{ A fitted Poisson or Gibbs point process model (object of class \code{"ppm"}) typically obtained from the model-fitting algorithm \code{\link{ppm}}. } \item{\dots}{ Ignored. } } \value{ A \code{function} in the \R language, which takes one argument \code{r}. } \details{ This function computes an \emph{approximation} to the \eqn{K} function or the pair correlation function of a Gibbs point process. The functions \code{\link{Kmodel}} and \code{\link{pcfmodel}} are generic. The functions documented here are the methods for the class \code{"ppm"}. The approximation is only available for stationary pairwise-interaction models. It uses the second order Poisson-saddlepoint approximation (Baddeley and Nair, 2012b) which is a combination of the Poisson-Boltzmann-Emden and Percus-Yevick approximations. The return value is a \code{function} in the \R language, which takes one argument \code{r}. Evaluation of this function, on a numeric vector \code{r}, yields values of the desired \eqn{K} function or pair correlation function at these distance values. } \seealso{ \code{\link{Kest}} or \code{\link{pcf}} to estimate the \eqn{K} function or pair correlation function nonparametrically from data. \code{\link{ppm}} to fit Gibbs models. \code{\link{Kmodel}} for the generic functions. \code{\link{Kmodel.kppm}} for the method for cluster/Cox processes. } \examples{ fit <- ppm(swedishpines, ~1, Strauss(8)) p <- pcfmodel(fit) K <- Kmodel(fit) p(6) K(8) curve(K(x), from=0, to=15) } \references{ Baddeley, A. and Nair, G. (2012a) Fast approximation of the intensity of Gibbs point processes. \emph{Electronic Journal of Statistics} \bold{6} 1155--1169. Baddeley, A. and Nair, G. (2012b) Approximating the moments of a spatial point process. \emph{Stat} \bold{1}, 1, 18--30. doi: 10.1002/sta4.5 } \author{\adrian and Gopalan Nair. } \keyword{spatial} \keyword{models} spatstat/man/clickppp.Rd0000644000176200001440000000542413160710571014772 0ustar liggesusers\name{clickppp} \alias{clickppp} \title{Interactively Add Points} \description{ Allows the user to create a point pattern by point-and-click in the display. } \usage{ clickppp(n=NULL, win=square(1), types=NULL, \dots, add=FALSE, main=NULL, hook=NULL) } \arguments{ \item{n}{ Number of points to be added (if this is predetermined). } \item{win}{ Window in which to create the point pattern. An object of class \code{"owin"}. } \item{types}{ Vector of types, when creating a multitype point pattern. } \item{\dots}{ Optional extra arguments to be passed to \code{\link[graphics]{locator}} to control the display. } \item{add}{ Logical value indicating whether to create a new plot (\code{add=FALSE}) or draw over the existing plot (\code{add=TRUE}). } \item{main}{ Main heading for plot. } \item{hook}{For internal use only. Do not use this argument.} } \value{ A point pattern (object of class \code{"ppp"}). } \details{ This function allows the user to create a point pattern by interactively clicking on the screen display. First the window \code{win} is plotted on the current screen device. Then the user is prompted to point the mouse at any desired locations and click the left mouse button to add each point. Interactive input stops after \code{n} clicks (if \code{n} was given) or when the middle mouse button is pressed. The return value is a point pattern containing the locations of all the clicked points inside the original window \code{win}, provided that all of the clicked locations were inside this window. Otherwise, the window is expanded to a box large enough to contain all the points (as well as containing the original window). If the argument \code{types} is given, then a multitype point pattern will be created. The user is prompted to input the locations of points of type \code{type[i]}, for each successive index \code{i}. (If the argument \code{n} was given, there will be \code{n} points of \emph{each} type.) The return value is a multitype point pattern. This function uses the \R{} command \code{\link[graphics]{locator}} to input the mouse clicks. It only works on screen devices such as \sQuote{X11}, \sQuote{windows} and \sQuote{quartz}. Arguments that can be passed to \code{\link[graphics]{locator}} through \code{\dots} include \code{pch} (plotting character), \code{cex} (character expansion factor) and \code{col} (colour). See \code{\link[graphics]{locator}} and \code{\link[graphics]{par}}. } \seealso{ \code{\link{identify.ppp}}, \code{\link[graphics]{locator}}, \code{\link{clickpoly}}, \code{\link{clickbox}}, \code{\link{clickdist}} } \author{Original by Dominic Schuhmacher. Adapted by \adrian and \rolf. } \keyword{spatial} \keyword{iplot} spatstat/man/rCauchy.Rd0000644000176200001440000001354313160710621014560 0ustar liggesusers\name{rCauchy} \alias{rCauchy} \title{Simulate Neyman-Scott Point Process with Cauchy cluster kernel} \description{ Generate a random point pattern, a simulated realisation of the Neyman-Scott process with Cauchy cluster kernel. } \usage{ rCauchy(kappa, scale, mu, win = owin(), thresh = 0.001, nsim=1, drop=TRUE, saveLambda=FALSE, expand = NULL, \dots, poisthresh=1e-6, saveparents=TRUE) } \arguments{ \item{kappa}{ Intensity of the Poisson process of cluster centres. A single positive number, a function, or a pixel image. } \item{scale}{ Scale parameter for cluster kernel. Determines the size of clusters. A positive number, in the same units as the spatial coordinates. } \item{mu}{ Mean number of points per cluster (a single positive number) or reference intensity for the cluster points (a function or a pixel image). } \item{win}{ Window in which to simulate the pattern. An object of class \code{"owin"} or something acceptable to \code{\link{as.owin}}. } \item{thresh}{ Threshold relative to the cluster kernel value at the origin (parent location) determining when the cluster kernel will be treated as zero for simulation purposes. Will be overridden by argument \code{expand} if that is given. } \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a point pattern. } \item{saveLambda}{ Logical. If \code{TRUE} then the random intensity corresponding to the simulated parent points will also be calculated and saved, and returns as an attribute of the point pattern. } \item{expand}{ Numeric. Size of window expansion for generation of parent points. By default determined by calling \code{\link{clusterradius}} with the numeric threshold value given in \code{thresh}. } \item{\dots}{ Passed to \code{\link{clusterfield}} to control the image resolution when \code{saveLambda=TRUE} and to \code{\link{clusterradius}} when \code{expand} is missing or \code{NULL}. } \item{poisthresh}{ Numerical threshold below which the model will be treated as a Poisson process. See Details. } \item{saveparents}{ Logical value indicating whether to save the locations of the parent points as an attribute. } } \value{ A point pattern (an object of class \code{"ppp"}) if \code{nsim=1}, or a list of point patterns if \code{nsim > 1}. Additionally, some intermediate results of the simulation are returned as attributes of this point pattern (see \code{\link{rNeymanScott}}). Furthermore, the simulated intensity function is returned as an attribute \code{"Lambda"}, if \code{saveLambda=TRUE}. } \details{ This algorithm generates a realisation of the Neyman-Scott process with Cauchy cluster kernel, inside the window \code{win}. The process is constructed by first generating a Poisson point process of ``parent'' points with intensity \code{kappa}. Then each parent point is replaced by a random cluster of points, the number of points in each cluster being random with a Poisson (\code{mu}) distribution, and the points being placed independently and uniformly according to a Cauchy kernel. In this implementation, parent points are not restricted to lie in the window; the parent process is effectively the uniform Poisson process on the infinite plane. This model can be fitted to data by the method of minimum contrast, maximum composite likelihood or Palm likelihood using \code{\link{kppm}}. The algorithm can also generate spatially inhomogeneous versions of the cluster process: \itemize{ \item The parent points can be spatially inhomogeneous. If the argument \code{kappa} is a \code{function(x,y)} or a pixel image (object of class \code{"im"}), then it is taken as specifying the intensity function of an inhomogeneous Poisson process that generates the parent points. \item The offspring points can be inhomogeneous. If the argument \code{mu} is a \code{function(x,y)} or a pixel image (object of class \code{"im"}), then it is interpreted as the reference density for offspring points, in the sense of Waagepetersen (2006). } When the parents are homogeneous (\code{kappa} is a single number) and the offspring are inhomogeneous (\code{mu} is a function or pixel image), the model can be fitted to data using \code{\link{kppm}}. If the pair correlation function of the model is very close to that of a Poisson process, deviating by less than \code{poisthresh}, then the model is approximately a Poisson process, and will be simulated as a Poisson process with intensity \code{kappa * mu}, using \code{\link{rpoispp}}. This avoids computations that would otherwise require huge amounts of memory. } \seealso{ \code{\link{rpoispp}}, \code{\link{rMatClust}}, \code{\link{rThomas}}, \code{\link{rVarGamma}}, \code{\link{rNeymanScott}}, \code{\link{rGaussPoisson}}, \code{\link{kppm}}, \code{\link{clusterfit}}. } \examples{ # homogeneous X <- rCauchy(30, 0.01, 5) # inhomogeneous ff <- function(x,y){ exp(2 - 3 * abs(x)) } Z <- as.im(ff, W= owin()) Y <- rCauchy(50, 0.01, Z) YY <- rCauchy(ff, 0.01, 5) } \references{ Ghorbani, M. (2013) Cauchy cluster process. \emph{Metrika} \bold{76}, 697-706. Jalilian, A., Guan, Y. and Waagepetersen, R. (2013) Decomposition of variance for spatial Cox processes. \emph{Scandinavian Journal of Statistics} \bold{40}, 119-137. Waagepetersen, R. (2007) An estimating function approach to inference for inhomogeneous Neyman-Scott processes. \emph{Biometrics} \bold{63}, 252--258. } \author{Abdollah Jalilian and Rasmus Waagepetersen. Adapted for \pkg{spatstat} by \adrian } \keyword{spatial} \keyword{datagen} spatstat/man/Math.linim.Rd0000644000176200001440000000572513160710571015171 0ustar liggesusers\name{Math.linim} \alias{Math.linim} \alias{Ops.linim} \alias{Summary.linim} \alias{Complex.linim} \title{S3 Group Generic Methods for Images on a Linear Network} \description{ These are group generic methods for images of class \code{"linim"}, which allows for usual mathematical functions and operators to be applied directly to pixel images on a linear network. See Details for a list of implemented functions. } \usage{ ## S3 methods for group generics have prototypes: \special{Math(x, \dots)} \special{Ops(e1, e2)} \special{Complex(z)} \special{Summary(\dots, na.rm = FALSE)} %NAMESPACE S3method("Math", "linim") %NAMESPACE S3method("Ops", "linim") %NAMESPACE S3method("Complex", "linim") %NAMESPACE S3method("Summary", "linim") } \arguments{ \item{x, z, e1, e2}{objects of class \code{"linim"}.} \item{\dots}{further arguments passed to methods.} \item{na.rm}{logical: should missing values be removed?} } \details{ An object of class \code{"linim"} represents a pixel image on a linear network. See \code{\link{linim}}. Below is a list of mathematical functions and operators which are defined for these images. Not all functions will make sense for all types of images. For example, none of the functions in the \code{"Math"} group make sense for character-valued images. Note that the \code{"Ops"} group methods are implemented using \code{\link{eval.linim}}. \enumerate{ \item Group \code{"Math"}: \itemize{ \item \code{abs}, \code{sign}, \code{sqrt},\cr \code{floor}, \code{ceiling}, \code{trunc},\cr \code{round}, \code{signif} \item \code{exp}, \code{log}, \code{expm1}, \code{log1p},\cr \code{cos}, \code{sin}, \code{tan},\cr \code{cospi}, \code{sinpi}, \code{tanpi},\cr \code{acos}, \code{asin}, \code{atan} \code{cosh}, \code{sinh}, \code{tanh},\cr \code{acosh}, \code{asinh}, \code{atanh} \item \code{lgamma}, \code{gamma}, \code{digamma}, \code{trigamma} \item \code{cumsum}, \code{cumprod}, \code{cummax}, \code{cummin} } \item Group \code{"Ops"}: \itemize{ \item \code{"+"}, \code{"-"}, \code{"*"}, \code{"/"}, \code{"^"}, \code{"\%\%"}, \code{"\%/\%"} \item \code{"&"}, \code{"|"}, \code{"!"} \item \code{"=="}, \code{"!="}, \code{"<"}, \code{"<="}, \code{">="}, \code{">"} } \item Group \code{"Summary"}: \itemize{ \item \code{all}, \code{any} \item \code{sum}, \code{prod} \item \code{min}, \code{max} \item \code{range} } \item Group \code{"Complex"}: \itemize{ \item \code{Arg}, \code{Conj}, \code{Im}, \code{Mod}, \code{Re} } } } \seealso{ \code{\link{eval.linim}} for evaluating expressions involving images. } \examples{ fx <- function(x,y,seg,tp) { (x - y)^2 } fL <- linfun(fx, simplenet) Z <- as.linim(fL) A <- Z+2 A <- -Z A <- sqrt(Z) A <- !(Z > 0.1) } \author{\adrian \rolf and \ege } \keyword{spatial} \keyword{methods} spatstat/man/density.psp.Rd0000644000176200001440000000446413160710571015450 0ustar liggesusers\name{density.psp} \alias{density.psp} \title{Kernel Smoothing of Line Segment Pattern} \description{ Compute a kernel smoothed intensity function from a line segment pattern. } \usage{ \method{density}{psp}(x, sigma, \dots, edge=TRUE, method=c("FFT", "C", "interpreted")) } \arguments{ \item{x}{ Line segment pattern (object of class \code{"psp"}) to be smoothed. } \item{sigma}{ Standard deviation of isotropic Gaussian smoothing kernel. } \item{\dots}{ Extra arguments passed to \code{\link{as.mask}} which determine the resolution of the resulting image. } \item{edge}{ Logical flag indicating whether to apply edge correction. } \item{method}{ Character string (partially matched) specifying the method of computation. Option \code{"FFT"} is the fastest, while \code{"C"} is the most accurate. } } \value{ A pixel image (object of class \code{"im"}). } \details{ This is a method for the generic function \code{\link{density}}. A kernel estimate of the intensity of the line segment pattern is computed. The result is the convolution of the isotropic Gaussian kernel, of standard deviation \code{sigma}, with the line segments. The result is computed as follows: \itemize{ \item if \code{method="FFT"}, the line segments are discretised using \code{\link{pixellate.psp}}, then the Fast Fourier Transform is used to calculate the convolution. This method is the fastest, but is slightly less accurate. \item if \code{method="C"} the exact value of the convolution at the centre of each pixel is computed analytically using \code{C} code; \item if \code{method="interpreted"}, the exact value of the convolution at the centre of each pixel is computed analytically using \code{R} code. This method is the slowest. } If \code{edge=TRUE} this result is adjusted for edge effects by dividing it by the convolution of the same Gaussian kernel with the observation window. } \seealso{ \code{\link{psp.object}}, \code{\link{im.object}}, \code{\link{density}} } \examples{ L <- psp(runif(20),runif(20),runif(20),runif(20), window=owin()) D <- density(L, sigma=0.03) plot(D, main="density(L)") plot(L, add=TRUE) } \author{\adrian and \rolf } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat/man/pool.quadrattest.Rd0000644000176200001440000000530613160710621016471 0ustar liggesusers\name{pool.quadrattest} \alias{pool.quadrattest} \title{ Pool Several Quadrat Tests } \description{ Pool several quadrat tests into a single quadrat test. } \usage{ \method{pool}{quadrattest}(..., df=NULL, df.est=NULL, nsim=1999, Xname=NULL, CR=NULL) } \arguments{ \item{\dots}{ Any number of objects, each of which is a quadrat test (object of class \code{"quadrattest"}). } \item{df}{ Optional. Number of degrees of freedom of the test statistic. Relevant only for \eqn{\chi^2}{chi^2} tests. Incompatible with \code{df.est}. } \item{df.est}{ Optional. The number of fitted parameters, or the number of degrees of freedom lost by estimation of parameters. Relevant only for \eqn{\chi^2}{chi^2} tests. Incompatible with \code{df}. } \item{nsim}{ Number of simulations, for Monte Carlo test. } \item{Xname}{ Optional. Name of the original data. } \item{CR}{ Optional. Numeric value of the Cressie-Read exponent \code{CR} overriding the value used in the tests. } } \details{ The function \code{\link{pool}} is generic. This is the method for the class \code{"quadrattest"}. An object of class \code{"quadrattest"} represents a \eqn{\chi^2}{chi^2} test or Monte Carlo test of goodness-of-fit for a point process model, based on quadrat counts. Such objects are created by the command \code{\link{quadrat.test}}. Each of the arguments \code{\dots} must be an object of class \code{"quadrattest"}. They must all be the same type of test (chi-squared test or Monte Carlo test, conditional or unconditional) and must all have the same type of alternative hypothesis. The test statistic of the pooled test is the Pearson \eqn{X^2} statistic taken over all cells (quadrats) of all tests. The \eqn{p} value of the pooled test is then computed using either a Monte Carlo test or a \eqn{\chi^2}{chi^2} test. For a pooled \eqn{\chi^2}{chi^2} test, the number of degrees of freedom of the combined test is computed by adding the degrees of freedom of all the tests (equivalent to assuming the tests are independent) unless it is determined by the arguments \code{df} or \code{df.est}. The resulting \eqn{p} value is computed to obtain the pooled test. For a pooled Monte Carlo test, new simulations are performed to determine the pooled Monte Carlo \eqn{p} value. } \value{ Another object of class \code{"quadrattest"}. } \seealso{ \code{\link{pool}}, \code{\link{quadrat.test}} } \examples{ Y <- split(humberside) test1 <- quadrat.test(Y[[1]]) test2 <- quadrat.test(Y[[2]]) pool(test1, test2, Xname="Humberside") } \author{\adrian and \rolf } \keyword{spatial} \keyword{htest} spatstat/man/split.im.Rd0000644000176200001440000000401413160710621014712 0ustar liggesusers\name{split.im} \alias{split.im} \title{Divide Image Into Sub-images} \description{ Divides a pixel image into several sub-images according to the value of a factor, or according to the tiles of a tessellation. } \usage{ \method{split}{im}(x, f, ..., drop = FALSE) } \arguments{ \item{x}{Pixel image (object of class \code{"im"}).} \item{f}{ Splitting criterion. Either a tessellation (object of class \code{"tess"}) or a pixel image with factor values. } \item{\dots}{Ignored.} \item{drop}{Logical value determining whether each subset should be returned as a pixel images (\code{drop=FALSE}) or as a one-dimensional vector of pixel values (\code{drop=TRUE}). } } \details{ This is a method for the generic function \code{\link{split}} for the class of pixel images. The image \code{x} will be divided into subsets determined by the data \code{f}. The result is a list of these subsets. The splitting criterion may be either \itemize{ \item a tessellation (object of class \code{"tess"}). Each tile of the tessellation delineates a subset of the spatial domain. \item a pixel image (object of class \code{"im"}) with factor values. The levels of the factor determine subsets of the spatial domain. } If \code{drop=FALSE} (the default), the result is a list of pixel images, each one a subset of the pixel image \code{x}, obtained by restricting the pixel domain to one of the subsets. If \code{drop=TRUE}, then the pixel values are returned as numeric vectors. } \value{ If \code{drop=FALSE}, a list of pixel images (objects of class \code{"im"}). It is also of class \code{"solist"} so that it can be plotted immediately. If \code{drop=TRUE}, a list of numeric vectors. } \seealso{ \code{\link{by.im}}, \code{\link{tess}}, \code{\link{im}} } \examples{ W <- square(1) X <- as.im(function(x,y){sqrt(x^2+y^2)}, W) Y <- dirichlet(runifpoint(12, W)) plot(split(X,Y)) } \author{\adrian and \rolf } \keyword{spatial} \keyword{methods} \keyword{manip} spatstat/man/project2set.Rd0000644000176200001440000000272013160710621015421 0ustar liggesusers\name{project2set} \alias{project2set} \title{ Find Nearest Point in a Region } \description{ For each data point in a point pattern \code{X}, find the nearest location in a given spatial region \code{W}. } \usage{ project2set(X, W, \dots) } \arguments{ \item{X}{ Point pattern (object of class \code{"ppp"}). } \item{W}{ Window (object of class \code{"owin"}) or something acceptable to \code{\link{as.owin}}. } \item{\dots}{ Arguments passed to \code{\link{as.mask}} controlling the pixel resolution. } } \details{ The window \code{W} is first discretised as a binary mask using \code{\link{as.mask}}. For each data point \code{X[i]} in the point pattern \code{X}, the algorithm finds the nearest pixel in \code{W}. The result is a point pattern \code{Y} containing these nearest points, that is, \code{Y[i]} is the nearest point in \code{W} to the point \code{X[i]}. } \value{ A point pattern (object of class \code{"ppp"}) with the same number of points as \code{X} in the window \code{W}. } \author{\adrian \rolf and \ege } \seealso{ \code{\link{project2segment}}, \code{\link{nncross}} } \examples{ He <- heather$fine[owin(c(2.8, 7.4), c(4.0, 7.8))] plot(He, main="project2set") X <- runifpoint(4, erosion(complement.owin(He), 0.2)) points(X, col="red") Y <- project2set(X, He) points(Y, col="green") arrows(X$x, X$y, Y$x, Y$y, angle=15, length=0.2) } \keyword{spatial} \keyword{math} spatstat/man/pcf.ppp.Rd0000644000176200001440000002303213160710621014522 0ustar liggesusers\name{pcf.ppp} \alias{pcf.ppp} \title{Pair Correlation Function of Point Pattern} \description{ Estimates the pair correlation function of a point pattern using kernel methods. } \usage{ \method{pcf}{ppp}(X, \dots, r = NULL, kernel="epanechnikov", bw=NULL, stoyan=0.15, correction=c("translate", "Ripley"), divisor = c("r", "d"), var.approx = FALSE, domain=NULL, ratio=FALSE, close=NULL) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"}). } \item{r}{ Vector of values for the argument \eqn{r} at which \eqn{g(r)} should be evaluated. There is a sensible default. } \item{kernel}{ Choice of smoothing kernel, passed to \code{\link{density.default}}. } \item{bw}{ Bandwidth for smoothing kernel, passed to \code{\link{density.default}}. Either a single numeric value, or a character string specifying a bandwidth selection rule recognised by \code{\link{density.default}}. If \code{bw} is missing or \code{NULL}, the default value is computed using Stoyan's rule of thumb: see Details. } \item{\dots}{ Other arguments passed to the kernel density estimation function \code{\link{density.default}}. } \item{stoyan}{ Coefficient for Stoyan's bandwidth selection rule; see Details. } \item{correction}{ Choice of edge correction. } \item{divisor}{ Choice of divisor in the estimation formula: either \code{"r"} (the default) or \code{"d"}. See Details. } \item{var.approx}{ Logical value indicating whether to compute an analytic approximation to the variance of the estimated pair correlation. } \item{domain}{ Optional. Calculations will be restricted to this subset of the window. See Details. } \item{ratio}{ Logical. If \code{TRUE}, the numerator and denominator of each edge-corrected estimate will also be saved, for use in analysing replicated point patterns. } \item{close}{ Advanced use only. Precomputed data. See section on Advanced Use. } } \value{ A function value table (object of class \code{"fv"}). Essentially a data frame containing the variables \item{r}{the vector of values of the argument \eqn{r} at which the pair correlation function \eqn{g(r)} has been estimated } \item{theo}{vector of values equal to 1, the theoretical value of \eqn{g(r)} for the Poisson process } \item{trans}{vector of values of \eqn{g(r)} estimated by translation correction } \item{iso}{vector of values of \eqn{g(r)} estimated by Ripley isotropic correction } \item{v}{vector of approximate values of the variance of the estimate of \eqn{g(r)} } as required. If \code{ratio=TRUE} then the return value also has two attributes called \code{"numerator"} and \code{"denominator"} which are \code{"fv"} objects containing the numerators and denominators of each estimate of \eqn{g(r)}. The return value also has an attribute \code{"bw"} giving the smoothing bandwidth that was used. } \details{ The pair correlation function \eqn{g(r)} is a summary of the dependence between points in a spatial point process. The best intuitive interpretation is the following: the probability \eqn{p(r)} of finding two points at locations \eqn{x} and \eqn{y} separated by a distance \eqn{r} is equal to \deqn{ p(r) = \lambda^2 g(r) \,{\rm d}x \, {\rm d}y }{ p(r) = lambda^2 * g(r) dx dy } where \eqn{\lambda}{lambda} is the intensity of the point process. For a completely random (uniform Poisson) process, \eqn{p(r) = \lambda^2 \,{\rm d}x \, {\rm d}y}{p(r) = lambda^2 dx dy} so \eqn{g(r) = 1}. Formally, the pair correlation function of a stationary point process is defined by \deqn{ g(r) = \frac{K'(r)}{2\pi r} }{ g(r) = K'(r)/ ( 2 * pi * r) } where \eqn{K'(r)} is the derivative of \eqn{K(r)}, the reduced second moment function (aka ``Ripley's \eqn{K} function'') of the point process. See \code{\link{Kest}} for information about \eqn{K(r)}. For a stationary Poisson process, the pair correlation function is identically equal to 1. Values \eqn{g(r) < 1} suggest inhibition between points; values greater than 1 suggest clustering. This routine computes an estimate of \eqn{g(r)} by kernel smoothing. \itemize{ \item If \code{divisor="r"} (the default), then the standard kernel estimator (Stoyan and Stoyan, 1994, pages 284--285) is used. By default, the recommendations of Stoyan and Stoyan (1994) are followed exactly. \item If \code{divisor="d"} then a modified estimator is used: the contribution from an interpoint distance \eqn{d_{ij}}{d[ij]} to the estimate of \eqn{g(r)} is divided by \eqn{d_{ij}}{d[ij]} instead of dividing by \eqn{r}. This usually improves the bias of the estimator when \eqn{r} is close to zero. } There is also a choice of spatial edge corrections (which are needed to avoid bias due to edge effects associated with the boundary of the spatial window): \itemize{ \item If \code{correction="translate"} or \code{correction="translation"} then the translation correction is used. For \code{divisor="r"} the translation-corrected estimate is given in equation (15.15), page 284 of Stoyan and Stoyan (1994). \item If \code{correction="Ripley"} then Ripley's isotropic edge correction is used. For \code{divisor="r"} the isotropic-corrected estimate is given in equation (15.18), page 285 of Stoyan and Stoyan (1994). \item If \code{correction=c("translate", "Ripley")} then both estimates will be computed. } Alternatively \code{correction="all"} selects all options. The choice of smoothing kernel is controlled by the argument \code{kernel} which is passed to \code{\link{density.default}}. The default is the Epanechnikov kernel, recommended by Stoyan and Stoyan (1994, page 285). The bandwidth of the smoothing kernel can be controlled by the argument \code{bw}. Its precise interpretation is explained in the documentation for \code{\link{density.default}}. For the Epanechnikov kernel, the argument \code{bw} is equivalent to \eqn{h/\sqrt{5}}{h/sqrt(5)}. Stoyan and Stoyan (1994, page 285) recommend using the Epanechnikov kernel with support \eqn{[-h,h]} chosen by the rule of thumn \eqn{h = c/\sqrt{\lambda}}{h = c/sqrt(lambda)}, where \eqn{\lambda}{lambda} is the (estimated) intensity of the point process, and \eqn{c} is a constant in the range from 0.1 to 0.2. See equation (15.16). If \code{bw} is missing or \code{NULL}, then this rule of thumb will be applied. The argument \code{stoyan} determines the value of \eqn{c}. The smoothing bandwidth that was used in the calculation is returned as an attribute of the final result. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{g(r)} should be evaluated. There is a sensible default. If it is specified, \code{r} must be a vector of increasing numbers starting from \code{r[1] = 0}, and \code{max(r)} must not exceed half the diameter of the window. If the argument \code{domain} is given, estimation will be restricted to this region. That is, the estimate of \eqn{g(r)} will be based on pairs of points in which the first point lies inside \code{domain} and the second point is unrestricted. The argument \code{domain} should be a window (object of class \code{"owin"}) or something acceptable to \code{\link{as.owin}}. It must be a subset of the window of the point pattern \code{X}. To compute a confidence band for the true value of the pair correlation function, use \code{\link{lohboot}}. If \code{var.approx = TRUE}, the variance of the estimate of the pair correlation will also be calculated using an analytic approximation (Illian et al, 2008, page 234) which is valid for stationary point processes which are not too clustered. This calculation is not yet implemented when the argument \code{domain} is given. } \section{Advanced Use}{ To perform the same computation using several different bandwidths \code{bw}, it is efficient to use the argument \code{close}. This should be the result of \code{\link{closepairs}(X, rmax)} for a suitably large value of \code{rmax}, namely \code{rmax >= max(r) + 3 * bw}. } \references{ Illian, J., Penttinen, A., Stoyan, H. and Stoyan, D. (2008) \emph{Statistical Analysis and Modelling of Spatial Point Patterns.} Wiley. Stoyan, D. and Stoyan, H. (1994) \emph{Fractals, random shapes and point fields: methods of geometrical statistics.} John Wiley and Sons. } \seealso{ \code{\link{Kest}}, \code{\link{pcf}}, \code{\link{density.default}}, \code{\link{bw.stoyan}}, \code{\link{bw.pcf}}, \code{\link{lohboot}}. } \examples{ X <- simdat \testonly{ X <- X[seq(1,npoints(X), by=4)] } p <- pcf(X) plot(p, main="pair correlation function for X") # indicates inhibition at distances r < 0.3 pd <- pcf(X, divisor="d") # compare estimates plot(p, cbind(iso, theo) ~ r, col=c("blue", "red"), ylim.covers=0, main="", lwd=c(2,1), lty=c(1,3), legend=FALSE) plot(pd, iso ~ r, col="green", lwd=2, add=TRUE) legend("center", col=c("blue", "green"), lty=1, lwd=2, legend=c("divisor=r","divisor=d")) # calculate approximate variance and show POINTWISE confidence bands pv <- pcf(X, var.approx=TRUE) plot(pv, cbind(iso, iso+2*sqrt(v), iso-2*sqrt(v)) ~ r) } \author{ \spatstatAuthors and Martin Hazelton. } \keyword{spatial} \keyword{nonparametric} spatstat/man/diameter.linnet.Rd0000644000176200001440000000247513160710571016252 0ustar liggesusers\name{diameter.linnet} \alias{boundingradius.linnet} \alias{diameter.linnet} \title{ Diameter and Bounding Radius of a Linear Network } \description{ Compute the diameter or bounding radius of a linear network measured using the shortest path distance. } \usage{ \method{diameter}{linnet}(x) \method{boundingradius}{linnet}(x, \dots) } \arguments{ \item{x}{ Linear network (object of class \code{"linnet"}). } \item{\dots}{Ignored.} } \details{ The diameter of a linear network (in the shortest path distance) is the maximum value of the shortest-path distance between any two points \eqn{u} and \eqn{v} on the network. The bounding radius of a linear network (in the shortest path distance) is the minimum value, over all points \eqn{u} on the network, of the maximum shortest-path distance from \eqn{u} to another point \eqn{v} on the network. The functions \code{\link{boundingradius}} and \code{\link{diameter}} are generic; the functions \code{boundingradius.linnet} and \code{diameter.linnet} are the methods for objects of class \code{linnet}. } \value{ A single numeric value. } \author{ \adrian } \seealso{ \code{\link{boundingradius}}, \code{\link{diameter}}, \code{\link{linnet}} } \examples{ diameter(simplenet) boundingradius(simplenet) } \keyword{spatial} \keyword{math} spatstat/man/rMaternII.Rd0000644000176200001440000000551713160710621015016 0ustar liggesusers\name{rMaternII} \alias{rMaternII} \title{Simulate Matern Model II} \description{ Generate a random point pattern, a simulated realisation of the \ifelse{latex}{\out{Mat\'ern}}{Matern} Model II inhibition process. } \usage{ rMaternII(kappa, r, win = owin(c(0,1),c(0,1)), stationary=TRUE, ..., nsim=1, drop=TRUE) } \arguments{ \item{kappa}{ Intensity of the Poisson process of proposal points. A single positive number. } \item{r}{ Inhibition distance. } \item{win}{ Window in which to simulate the pattern. An object of class \code{"owin"} or something acceptable to \code{\link{as.owin}}. Alternatively a higher-dimensional box of class \code{"box3"} or \code{"boxx"}. } \item{stationary}{ Logical. Whether to start with a stationary process of proposal points (\code{stationary=TRUE}) or to generate the proposal points only inside the window (\code{stationary=FALSE}). } \item{\dots}{Ignored.} \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a point pattern. } } \value{ A point pattern if \code{nsim=1}, or a list of point patterns if \code{nsim > 1}. Each point pattern is normally an object of class \code{"ppp"}, but may be of class \code{"pp3"} or \code{"ppx"} depending on the window. } \details{ This algorithm generates one or more realisations of \ifelse{latex}{\out{Mat\'ern}}{Matern}'s Model II inhibition process inside the window \code{win}. The process is constructed by first generating a uniform Poisson point process of ``proposal'' points with intensity \code{kappa}. If \code{stationary = TRUE} (the default), the proposal points are generated in a window larger than \code{win} that effectively means the proposals are stationary. If \code{stationary=FALSE} then the proposal points are only generated inside the window \code{win}. Then each proposal point is marked by an ``arrival time'', a number uniformly distributed in \eqn{[0,1]} independently of other variables. A proposal point is deleted if it lies within \code{r} units' distance of another proposal point \emph{that has an earlier arrival time}. Otherwise it is retained. The retained points constitute \ifelse{latex}{\out{Mat\'ern}}{Matern}'s Model II. The difference between \ifelse{latex}{\out{Mat\'ern}}{Matern}'s Model I and II is the italicised statement above. Model II has a higher intensity for the same parameter values. } \seealso{ \code{\link{rpoispp}}, \code{\link{rMatClust}}, \code{\link{rMaternI}} } \examples{ X <- rMaternII(20, 0.05) Y <- rMaternII(20, 0.05, stationary=FALSE) } \author{ \adrian , Ute Hahn, \rolf and \ege } \keyword{spatial} \keyword{datagen} spatstat/man/Gest.Rd0000644000176200001440000002134413160710571014066 0ustar liggesusers\name{Gest} \alias{Gest} \alias{nearest.neighbour} \title{ Nearest Neighbour Distance Function G } \description{ Estimates the nearest neighbour distance distribution function \eqn{G(r)} from a point pattern in a window of arbitrary shape. } \usage{ Gest(X, r=NULL, breaks=NULL, \dots, correction=c("rs", "km", "han"), domain=NULL) } \arguments{ \item{X}{The observed point pattern, from which an estimate of \eqn{G(r)} will be computed. An object of class \code{ppp}, or data in any format acceptable to \code{\link{as.ppp}()}. } \item{r}{Optional. Numeric vector. The values of the argument \eqn{r} at which \eqn{G(r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{breaks}{ This argument is for internal use only. } \item{\dots}{Ignored.} \item{correction}{ Optional. The edge correction(s) to be used to estimate \eqn{G(r)}. A vector of character strings selected from \code{"none"}, \code{"rs"}, \code{"km"}, \code{"Hanisch"} and \code{"best"}. Alternatively \code{correction="all"} selects all options. } \item{domain}{ Optional. Calculations will be restricted to this subset of the window. See Details. } } \value{ An object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. Essentially a data frame containing some or all of the following columns: \item{r}{the values of the argument \eqn{r} at which the function \eqn{G(r)} has been estimated } \item{rs}{the ``reduced sample'' or ``border correction'' estimator of \eqn{G(r)} } \item{km}{the spatial Kaplan-Meier estimator of \eqn{G(r)} } \item{hazard}{the hazard rate \eqn{\lambda(r)}{lambda(r)} of \eqn{G(r)} by the spatial Kaplan-Meier method } \item{raw}{the uncorrected estimate of \eqn{G(r)}, i.e. the empirical distribution of the distances from each point in the pattern \code{X} to the nearest other point of the pattern } \item{han}{the Hanisch correction estimator of \eqn{G(r)} } \item{theo}{the theoretical value of \eqn{G(r)} for a stationary Poisson process of the same estimated intensity. } } \details{ The nearest neighbour distance distribution function (also called the ``\emph{event-to-event}'' or ``\emph{inter-event}'' distribution) of a point process \eqn{X} is the cumulative distribution function \eqn{G} of the distance from a typical random point of \eqn{X} to the nearest other point of \eqn{X}. An estimate of \eqn{G} derived from a spatial point pattern dataset can be used in exploratory data analysis and formal inference about the pattern (Cressie, 1991; Diggle, 1983; Ripley, 1988). In exploratory analyses, the estimate of \eqn{G} is a useful statistic summarising one aspect of the ``clustering'' of points. For inferential purposes, the estimate of \eqn{G} is usually compared to the true value of \eqn{G} for a completely random (Poisson) point process, which is \deqn{G(r) = 1 - e^{ - \lambda \pi r^2} }{% G(r) = 1 - exp( - lambda * pi * r^2)} where \eqn{\lambda}{lambda} is the intensity (expected number of points per unit area). Deviations between the empirical and theoretical \eqn{G} curves may suggest spatial clustering or spatial regularity. This algorithm estimates the nearest neighbour distance distribution function \eqn{G} from the point pattern \code{X}. It assumes that \code{X} can be treated as a realisation of a stationary (spatially homogeneous) random spatial point process in the plane, observed through a bounded window. The window (which is specified in \code{X} as \code{Window(X)}) may have arbitrary shape. The argument \code{X} is interpreted as a point pattern object (of class \code{"ppp"}, see \code{\link{ppp.object}}) and can be supplied in any of the formats recognised by \code{\link{as.ppp}()}. The estimation of \eqn{G} is hampered by edge effects arising from the unobservability of points of the random pattern outside the window. An edge correction is needed to reduce bias (Baddeley, 1998; Ripley, 1988). The edge corrections implemented here are the border method or ``\emph{reduced sample}'' estimator, the spatial Kaplan-Meier estimator (Baddeley and Gill, 1997) and the Hanisch estimator (Hanisch, 1984). The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{G(r)} should be evaluated. It is also used to determine the breakpoints (in the sense of \code{\link{hist}}) for the computation of histograms of distances. The estimators are computed from histogram counts. This introduces a discretisation error which is controlled by the fineness of the breakpoints. First-time users would be strongly advised not to specify \code{r}. However, if it is specified, \code{r} must satisfy \code{r[1] = 0}, and \code{max(r)} must be larger than the radius of the largest disc contained in the window. Furthermore, the successive entries of \code{r} must be finely spaced. The algorithm also returns an estimate of the hazard rate function, \eqn{\lambda(r)}{lambda(r)}, of \eqn{G(r)}. The hazard rate is defined as the derivative \deqn{\lambda(r) = - \frac{d}{dr} \log (1 - G(r))}{% lambda(r) = - (d/dr) log(1 - G(r))} This estimate should be used with caution as \eqn{G} is not necessarily differentiable. If the argument \code{domain} is given, the estimate of \eqn{G(r)} will be based only on the nearest neighbour distances measured from points falling inside \code{domain} (although their nearest neighbours may lie outside \code{domain}). This is useful in bootstrap techniques. The argument \code{domain} should be a window (object of class \code{"owin"}) or something acceptable to \code{\link{as.owin}}. It must be a subset of the window of the point pattern \code{X}. The naive empirical distribution of distances from each point of the pattern \code{X} to the nearest other point of the pattern, is a biased estimate of \eqn{G}. However it is sometimes useful. It can be returned by the algorithm, by selecting \code{correction="none"}. Care should be taken not to use the uncorrected empirical \eqn{G} as if it were an unbiased estimator of \eqn{G}. To simply compute the nearest neighbour distance for each point in the pattern, use \code{\link{nndist}}. To determine which point is the nearest neighbour of a given point, use \code{\link{nnwhich}}. } \references{ Baddeley, A.J. Spatial sampling and censoring. In O.E. Barndorff-Nielsen, W.S. Kendall and M.N.M. van Lieshout (eds) \emph{Stochastic Geometry: Likelihood and Computation}. Chapman and Hall, 1998. Chapter 2, pages 37-78. Baddeley, A.J. and Gill, R.D. Kaplan-Meier estimators of interpoint distance distributions for spatial point processes. \emph{Annals of Statistics} \bold{25} (1997) 263-292. Cressie, N.A.C. \emph{Statistics for spatial data}. John Wiley and Sons, 1991. Diggle, P.J. \emph{Statistical analysis of spatial point patterns}. Academic Press, 1983. Hanisch, K.-H. (1984) Some remarks on estimators of the distribution function of nearest-neighbour distance in stationary spatial point patterns. \emph{Mathematische Operationsforschung und Statistik, series Statistics} \bold{15}, 409--412. Ripley, B.D. \emph{Statistical inference for spatial processes}. Cambridge University Press, 1988. Stoyan, D, Kendall, W.S. and Mecke, J. \emph{Stochastic geometry and its applications}. 2nd edition. Springer Verlag, 1995. } \section{Warnings}{ The function \eqn{G} does not necessarily have a density. Any valid c.d.f. may appear as the nearest neighbour distance distribution function of a stationary point process. The reduced sample estimator of \eqn{G} is pointwise approximately unbiased, but need not be a valid distribution function; it may not be a nondecreasing function of \eqn{r}. Its range is always within \eqn{[0,1]}. The spatial Kaplan-Meier estimator of \eqn{G} is always nondecreasing but its maximum value may be less than \eqn{1}. } \seealso{ \code{\link{nndist}}, \code{\link{nnwhich}}, \code{\link{Fest}}, \code{\link{Jest}}, \code{\link{Kest}}, \code{\link{km.rs}}, \code{\link{reduced.sample}}, \code{\link{kaplan.meier}} } \examples{ data(cells) G <- Gest(cells) plot(G) # P-P style plot plot(G, cbind(km,theo) ~ theo) # the empirical G is below the Poisson G, # indicating an inhibited pattern \dontrun{ plot(G, . ~ r) plot(G, . ~ theo) plot(G, asin(sqrt(.)) ~ asin(sqrt(theo))) } } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat/man/plot.ssf.Rd0000644000176200001440000000550413160710621014730 0ustar liggesusers\name{plot.ssf} \alias{plot.ssf} \alias{image.ssf} \alias{contour.ssf} \title{ Plot a Spatially Sampled Function } \description{ Plot a spatially sampled function object. } \usage{ \method{plot}{ssf}(x, \dots, how = c("smoothed", "nearest", "points"), style = c("image", "contour", "imagecontour"), sigma = NULL, contourargs=list()) \method{image}{ssf}(x, \dots) \method{contour}{ssf}(x, ..., main, sigma = NULL) } \arguments{ \item{x}{ Spatially sampled function (object of class \code{"ssf"}). } \item{\dots}{ Arguments passed to \code{\link[graphics]{image.default}} or \code{\link[spatstat]{plot.ppp}} to control the plot. } \item{how}{ Character string determining whether to display the function values at the data points (\code{how="points"}), a smoothed interpolation of the function (\code{how="smoothed"}), or the function value at the nearest data point (\code{how="nearest"}). } \item{style}{ Character string indicating whether to plot the smoothed function as a colour image, a contour map, or both. } \item{contourargs}{ Arguments passed to \code{\link[graphics]{contour.default}} to control the contours, if \code{style="contour"} or \code{style="imagecontour"}. } \item{sigma}{ Smoothing bandwidth for smooth interpolation. } \item{main}{ Optional main title for the plot. } } \details{ These are methods for the generic \code{\link[graphics]{plot}}, \code{\link[graphics]{image}} and \code{\link[graphics]{contour}} for the class \code{"ssf"}. An object of class \code{"ssf"} represents a function (real- or vector-valued) that has been sampled at a finite set of points. For \code{plot.ssf} there are three types of display. If \code{how="points"} the exact function values will be displayed as circles centred at the locations where they were computed. If \code{how="smoothed"} (the default) these values will be kernel-smoothed using \code{\link{smooth.ppp}} and displayed as a pixel image. If \code{how="nearest"} the values will be interpolated by nearest neighbour interpolation using \code{\link{nnmark}} and displayed as a pixel image. For \code{image.ssf} and \code{contour.ssf} the values are kernel-smoothed before being displayed. } \value{ \code{NULL}. } \references{ Baddeley, A. (2016) Local composite likelihood for spatial point processes. \emph{Spatial Statistics}, in press. Baddeley, A., Rubak, E. and Turner, R. (2015) \emph{Spatial Point Patterns: Methodology and Applications with R}. Chapman and Hall/CRC Press. } \author{ \adrian. } \seealso{ \code{\link{ssf}} } \examples{ a <- ssf(cells, nndist(cells, k=1:3)) plot(a, how="points") plot(a, how="smoothed") plot(a, how="nearest") } \keyword{spatial} \keyword{hplot} spatstat/man/ranef.mppm.Rd0000644000176200001440000000321313160710621015216 0ustar liggesusers\name{ranef.mppm} \alias{ranef.mppm} \title{ Extract Random Effects from Point Process Model } \description{ Given a point process model fitted to a list of point patterns, extract the fixed effects of the model. A method for \code{ranef}. } \usage{ \method{ranef}{mppm}(object, \dots) } \arguments{ \item{object}{ A fitted point process model (an object of class \code{"mppm"}). } \item{\dots}{ Ignored. } } \details{ This is a method for the generic function \code{\link[nlme]{ranef}}. The argument \code{object} must be a fitted point process model (object of class \code{"mppm"}) produced by the fitting algorithm \code{\link{mppm}}). This represents a point process model that has been fitted to a list of several point pattern datasets. See \code{\link{mppm}} for information. This function extracts the coefficients of the random effects of the model. } \value{ A data frame, or list of data frames, as described in the help for \code{\link[nlme]{ranef.lme}}. } \references{ Baddeley, A., Rubak, E. and Turner, R. (2015) \emph{Spatial Point Patterns: Methodology and Applications with R}. London: Chapman and Hall/CRC Press. } \author{ Adrian Baddeley, Ida-Maria Sintorn and Leanne Bischoff. Implemented in \pkg{spatstat} by \spatstatAuthors. } \seealso{ \code{\link{fixef.mppm}}, \code{\link{coef.mppm}} } \examples{ H <- hyperframe(Y = waterstriders) # Tweak data to exaggerate differences H$Y[[1]] <- rthin(H$Y[[1]], 0.3) m1 <- mppm(Y ~ id, data=H, Strauss(7)) ranef(m1) m2 <- mppm(Y ~ 1, random=~1|id, data=H, Strauss(7)) ranef(m2) } \keyword{spatial} \keyword{methods} \keyword{models} spatstat/man/nnwhich.Rd0000644000176200001440000001323013160710621014611 0ustar liggesusers\name{nnwhich} \alias{nnwhich} \alias{nnwhich.ppp} \alias{nnwhich.default} \title{Nearest neighbour} \description{ Finds the nearest neighbour of each point in a point pattern. } \usage{ nnwhich(X, \dots) \method{nnwhich}{ppp}(X, \dots, k=1, by=NULL, method="C") \method{nnwhich}{default}(X, Y=NULL, \dots, k=1, by=NULL, method="C") } \arguments{ \item{X,Y}{ Arguments specifying the locations of a set of points. For \code{nnwhich.ppp}, the argument \code{X} should be a point pattern (object of class \code{"ppp"}). For \code{nnwhich.default}, typically \code{X} and \code{Y} would be numeric vectors of equal length. Alternatively \code{Y} may be omitted and \code{X} may be a list with two components \code{x} and \code{y}, or a matrix with two columns. } \item{\dots}{ Ignored by \code{nnwhich.ppp} and \code{nnwhich.default}. } \item{k}{ Integer, or integer vector. The algorithm will compute the distance to the \code{k}th nearest neighbour. } \item{by}{ Optional. A factor, which separates \code{X} into groups. The algorithm will find the nearest neighbour in each group. } \item{method}{String specifying which method of calculation to use. Values are \code{"C"} and \code{"interpreted"}. } } \value{ Numeric vector or matrix giving, for each point, the index of its nearest neighbour (or \code{k}th nearest neighbour). If \code{k = 1} (the default), the return value is a numeric vector \code{v} giving the indices of the nearest neighbours (the nearest neighbout of the \code{i}th point is the \code{j}th point where \code{j = v[i]}). If \code{k} is a single integer, then the return value is a numeric vector giving the indices of the \code{k}th nearest neighbours. If \code{k} is a vector, then the return value is a matrix \code{m} such that \code{m[i,j]} is the index of the \code{k[j]}th nearest neighbour for the \code{i}th data point. If the argument \code{by} is given, then the result is a data frame containing the indices described above, from each point of \code{X}, to the nearest point in each subset of \code{X} defined by the factor \code{by}. } \details{ For each point in the given point pattern, this function finds its nearest neighbour (the nearest other point of the pattern). By default it returns a vector giving, for each point, the index of the point's nearest neighbour. If \code{k} is specified, the algorithm finds each point's \code{k}th nearest neighbour. The function \code{nnwhich} is generic, with method for point patterns (objects of class \code{"ppp"}) and a default method which are described here, as well as a method for three-dimensional point patterns (objects of class \code{"pp3"}, described in \code{\link{nnwhich.pp3}}. The method \code{nnwhich.ppp} expects a single point pattern argument \code{X}. The default method expects that \code{X} and \code{Y} will determine the coordinates of a set of points. Typically \code{X} and \code{Y} would be numeric vectors of equal length. Alternatively \code{Y} may be omitted and \code{X} may be a list with two components named \code{x} and \code{y}, or a matrix or data frame with two columns. The argument \code{k} may be a single integer, or an integer vector. If it is a vector, then the \eqn{k}th nearest neighbour distances are computed for each value of \eqn{k} specified in the vector. If the argument \code{by} is given, it should be a \code{factor}, of length equal to the number of points in \code{X}. This factor effectively partitions \code{X} into subsets, each subset associated with one of the levels of \code{X}. The algorithm will then find, for each point of \code{X}, the nearest neighbour \emph{in each subset}. If there are no points (if \code{x} has length zero) a numeric vector of length zero is returned. If there is only one point (if \code{x} has length 1), then the nearest neighbour is undefined, and a value of \code{NA} is returned. In general if the number of points is less than or equal to \code{k}, then a vector of \code{NA}'s is returned. The argument \code{method} is not normally used. It is retained only for checking the validity of the software. If \code{method = "interpreted"} then the distances are computed using interpreted R code only. If \code{method="C"} (the default) then C code is used. The C code is faster by two to three orders of magnitude and uses much less memory. To evaluate the \emph{distance} between a point and its nearest neighbour, use \code{\link{nndist}}. To find the nearest neighbours from one point pattern to another point pattern, use \code{\link{nncross}}. } \section{Nearest neighbours of each type}{ If \code{X} is a multitype point pattern and \code{by=marks(X)}, then the algorithm will find, for each point of \code{X}, the nearest neighbour of each type. See the Examples. } \section{Warnings}{ A value of \code{NA} is returned if there is only one point in the point pattern. } \seealso{ \code{\link{nndist}}, \code{\link{nncross}} } \examples{ data(cells) plot(cells) m <- nnwhich(cells) m2 <- nnwhich(cells, k=2) # plot nearest neighbour links b <- cells[m] arrows(cells$x, cells$y, b$x, b$y, angle=15, length=0.15, col="red") # find points which are the neighbour of their neighbour self <- (m[m] == seq(m)) # plot them A <- cells[self] B <- cells[m[self]] plot(cells) segments(A$x, A$y, B$x, B$y) # nearest neighbours of each type head(nnwhich(ants, by=marks(ants))) } \author{Pavel Grabarnik \email{pavel.grabar@issp.serpukhov.su} and \adrian } \keyword{spatial} \keyword{math} spatstat/man/is.marked.ppp.Rd0000644000176200001440000000340413160710621015630 0ustar liggesusers\name{is.marked.ppp} \alias{is.marked.ppp} \title{Test Whether A Point Pattern is Marked} \description{ Tests whether a point pattern has ``marks'' attached to the points. } \usage{ \method{is.marked}{ppp}(X, na.action="warn", \dots) } \arguments{ \item{X}{ Point pattern (object of class \code{"ppp"}) } \item{na.action}{ String indicating what to do if \code{NA} values are encountered amongst the marks. Options are \code{"warn"}, \code{"fatal"} and \code{"ignore"}. } \item{\dots}{ Ignored. } } \value{ Logical value, equal to \code{TRUE} if \code{X} is a marked point pattern. } \details{ ``Marks'' are observations attached to each point of a point pattern. For example the \code{\link[spatstat.data]{longleaf}} dataset contains the locations of trees, each tree being marked by its diameter; the \code{\link[spatstat.data]{amacrine}} dataset gives the locations of cells of two types (on/off) and the type of cell may be regarded as a mark attached to the location of the cell. This function tests whether the point pattern \code{X} contains or involves marked points. It is a method for the generic function \code{\link{is.marked}}. The argument \code{na.action} determines what action will be taken if the point pattern has a vector of marks but some or all of the marks are \code{NA}. Options are \code{"fatal"} to cause a fatal error; \code{"warn"} to issue a warning and then return \code{TRUE}; and \code{"ignore"} to take no action except returning \code{TRUE}. } \seealso{ \code{\link{is.marked}}, \code{\link{is.marked.ppm}} } \examples{ data(cells) is.marked(cells) #FALSE data(longleaf) is.marked(longleaf) #TRUE } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/print.psp.Rd0000644000176200001440000000133613160710621015114 0ustar liggesusers\name{print.psp} \alias{print.psp} \title{Print Brief Details of a Line Segment Pattern Dataset} \description{ Prints a very brief description of a line segment pattern dataset. } \usage{ \method{print}{psp}(x, \dots) } \arguments{ \item{x}{Line segment pattern (object of class \code{"psp"}).} \item{\dots}{Ignored.} } \details{ A very brief description of the line segment pattern \code{x} is printed. This is a method for the generic function \code{\link{print}}. } \seealso{ \code{\link{print}}, \code{\link{print.owin}}, \code{\link{summary.psp}} } \examples{ a <- psp(runif(10), runif(10), runif(10), runif(10), window=owin()) a } \author{\adrian and \rolf } \keyword{spatial} \keyword{print} spatstat/man/is.stationary.Rd0000644000176200001440000000631713160710621015772 0ustar liggesusers\name{is.stationary} \alias{is.stationary} \alias{is.stationary.ppm} \alias{is.stationary.kppm} \alias{is.stationary.lppm} \alias{is.stationary.slrm} \alias{is.stationary.rmhmodel} \alias{is.stationary.dppm} \alias{is.stationary.detpointprocfamily} \alias{is.poisson} \alias{is.poisson.ppm} \alias{is.poisson.kppm} \alias{is.poisson.lppm} \alias{is.poisson.slrm} \alias{is.poisson.rmhmodel} \alias{is.poisson.interact} \title{ Recognise Stationary and Poisson Point Process Models } \description{ Given a point process model that has been fitted to data, determine whether the model is a stationary point process, and whether it is a Poisson point process. } \usage{ is.stationary(x) \method{is.stationary}{ppm}(x) \method{is.stationary}{kppm}(x) \method{is.stationary}{lppm}(x) \method{is.stationary}{slrm}(x) \method{is.stationary}{rmhmodel}(x) \method{is.stationary}{dppm}(x) \method{is.stationary}{detpointprocfamily}(x) is.poisson(x) \method{is.poisson}{ppm}(x) \method{is.poisson}{kppm}(x) \method{is.poisson}{lppm}(x) \method{is.poisson}{slrm}(x) \method{is.poisson}{rmhmodel}(x) \method{is.poisson}{interact}(x) } \arguments{ \item{x}{ A fitted spatial point process model (object of class \code{"ppm"}, \code{"kppm"}, \code{"lppm"}, \code{"dppm"} or \code{"slrm"}) or similar object. } } \details{ The argument \code{x} represents a fitted spatial point process model or a similar object. \code{is.stationary(x)} returns \code{TRUE} if \code{x} represents a stationary point process, and \code{FALSE} if not. \code{is.poisson(x)} returns \code{TRUE} if \code{x} represents a Poisson point process, and \code{FALSE} if not. The functions \code{is.stationary} and \code{is.poisson} are generic, with methods for the classes \code{"ppm"} (Gibbs point process models), \code{"kppm"} (cluster or Cox point process models), \code{"slrm"} (spatial logistic regression models) and \code{"rmhmodel"} (model specifications for the Metropolis-Hastings algorithm). Additionally \code{is.stationary} has a method for classes \code{"detpointprocfamily"} and \code{"dppm"} (both determinantal point processes) and \code{is.poisson} has a method for class \code{"interact"} (interaction structures for Gibbs models). \code{is.poisson.kppm} will return \code{FALSE}, unless the model \code{x} is degenerate: either \code{x} has zero intensity so that its realisations are empty with probability 1, or it is a log-Gaussian Cox process where the log intensity has zero variance. \code{is.poisson.slrm} will always return \code{TRUE}, by convention. } \value{ A logical value. } \author{ \adrian \rolf and \ege } \seealso{ \code{\link{is.marked}} to determine whether a model is a marked point process. \code{\link{summary.ppm}} for detailed information. Model-fitting functions \code{\link{ppm}}, \code{\link{dppm}}, \code{\link{kppm}}, \code{\link{lppm}}, \code{\link{slrm}}. } \examples{ data(cells) data(redwood) fit <- ppm(cells ~ x) is.stationary(fit) is.poisson(fit) fut <- kppm(redwood ~ 1, "MatClust") is.stationary(fut) is.poisson(fut) fot <- slrm(cells ~ x) is.stationary(fot) is.poisson(fot) } \keyword{spatial} \keyword{models} spatstat/man/LennardJones.Rd0000644000176200001440000001143613160710571015547 0ustar liggesusers\name{LennardJones} \alias{LennardJones} \title{The Lennard-Jones Potential} \description{ Creates the Lennard-Jones pairwise interaction structure which can then be fitted to point pattern data. } \usage{ LennardJones(sigma0=NA) } \value{ An object of class \code{"interact"} describing the Lennard-Jones interpoint interaction structure. } \arguments{ \item{sigma0}{ Optional. Initial estimate of the parameter \eqn{\sigma}{sigma}. A positive number. } } \details{ In a pairwise interaction point process with the Lennard-Jones pair potential (Lennard-Jones, 1924) each pair of points in the point pattern, a distance \eqn{d} apart, contributes a factor \deqn{ v(d) = \exp \left\{ - 4\epsilon \left[ \left( \frac{\sigma}{d} \right)^{12} - \left( \frac{\sigma}{d} \right)^6 \right] \right\} }{ v(d) = exp( - 4 * epsilon * ((sigma/d)^12 - (sigma/d)^6)) } to the probability density, where \eqn{\sigma}{sigma} and \eqn{\epsilon}{epsilon} are positive parameters to be estimated. See \bold{Examples} for a plot of this expression. This potential causes very strong inhibition between points at short range, and attraction between points at medium range. The parameter \eqn{\sigma}{sigma} is called the \emph{characteristic diameter} and controls the scale of interaction. The parameter \eqn{\epsilon}{epsilon} is called the \emph{well depth} and determines the strength of attraction. The potential switches from inhibition to attraction at \eqn{d=\sigma}{d=sigma}. The maximum value of the pair potential is \eqn{\exp(\epsilon)}{exp(epsilon)} occuring at distance \eqn{d = 2^{1/6} \sigma}{d = 2^(1/6) * sigma}. Interaction is usually considered to be negligible for distances \eqn{d > 2.5 \sigma \max\{1,\epsilon^{1/6}\}}{d > 2.5 * sigma * max(1, epsilon^(1/6))}. This potential is used to model interactions between uncharged molecules in statistical physics. The function \code{\link{ppm}()}, which fits point process models to point pattern data, requires an argument of class \code{"interact"} describing the interpoint interaction structure of the model to be fitted. The appropriate description of the Lennard-Jones pairwise interaction is yielded by the function \code{LennardJones()}. See the examples below. } \section{Rescaling}{ To avoid numerical instability, the interpoint distances \code{d} are rescaled when fitting the model. Distances are rescaled by dividing by \code{sigma0}. In the formula for \eqn{v(d)} above, the interpoint distance \eqn{d} will be replaced by \code{d/sigma0}. The rescaling happens automatically by default. If the argument \code{sigma0} is missing or \code{NA} (the default), then \code{sigma0} is taken to be the minimum nearest-neighbour distance in the data point pattern (in the call to \code{\link{ppm}}). If the argument \code{sigma0} is given, it should be a positive number, and it should be a rough estimate of the parameter \eqn{\sigma}{sigma}. The ``canonical regular parameters'' estimated by \code{\link{ppm}} are \eqn{\theta_1 = 4 \epsilon (\sigma/\sigma_0)^{12}}{theta1 = 4 * epsilon * (sigma/sigma0)^12} and \eqn{\theta_2 = 4 \epsilon (\sigma/\sigma_0)^6}{theta2 = 4 * epsilon * (sigma/sigma0)^6}. } \section{Warnings and Errors}{ Fitting the Lennard-Jones model is extremely unstable, because of the strong dependence between the functions \eqn{d^{-12}}{d^(-12)} and \eqn{d^{-6}}{d^(-6)}. The fitting algorithm often fails to converge. Try increasing the number of iterations of the GLM fitting algorithm, by setting \code{gcontrol=list(maxit=1e3)} in the call to \code{\link{ppm}}. Errors are likely to occur if this model is fitted to a point pattern dataset which does not exhibit both short-range inhibition and medium-range attraction between points. The values of the parameters \eqn{\sigma}{sigma} and \eqn{\epsilon}{epsilon} may be \code{NA} (because the fitted canonical parameters have opposite sign, which usually occurs when the pattern is completely random). An absence of warnings does not mean that the fitted model is sensible. A negative value of \eqn{\epsilon}{epsilon} may be obtained (usually when the pattern is strongly clustered); this does not correspond to a valid point process model, but the software does not issue a warning. } \seealso{ \code{\link{ppm}}, \code{\link{pairwise.family}}, \code{\link{ppm.object}} } \examples{ fit <- ppm(cells ~1, LennardJones(), rbord=0.1) fit plot(fitin(fit)) } \references{ Lennard-Jones, J.E. (1924) On the determination of molecular fields. \emph{Proc Royal Soc London A} \bold{106}, 463--477. } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} spatstat/man/closing.Rd0000644000176200001440000000463213160710571014623 0ustar liggesusers\name{closing} \alias{closing} \alias{closing.owin} \alias{closing.ppp} \alias{closing.psp} \title{Morphological Closing} \description{ Perform morphological closing of a window, a line segment pattern or a point pattern. } \usage{ closing(w, r, \dots) \method{closing}{owin}(w, r, \dots, polygonal=NULL) \method{closing}{ppp}(w, r, \dots, polygonal=TRUE) \method{closing}{psp}(w, r, \dots, polygonal=TRUE) } \arguments{ \item{w}{ A window (object of class \code{"owin"} or a line segment pattern (object of class \code{"psp"}) or a point pattern (object of class \code{"ppp"}). } \item{r}{positive number: the radius of the closing.} \item{\dots}{extra arguments passed to \code{\link{as.mask}} controlling the pixel resolution, if a pixel approximation is used} \item{polygonal}{ Logical flag indicating whether to compute a polygonal approximation to the erosion (\code{polygonal=TRUE}) or a pixel grid approximation (\code{polygonal=FALSE}). } } \value{ If \code{r > 0}, an object of class \code{"owin"} representing the closed region. If \code{r=0}, the result is identical to \code{w}. } \details{ The morphological closing (Serra, 1982) of a set \eqn{W} by a distance \eqn{r > 0} is the set of all points that cannot be separated from \eqn{W} by any circle of radius \eqn{r}. That is, a point \eqn{x} belongs to the closing \eqn{W*} if it is impossible to draw any circle of radius \eqn{r} that has \eqn{x} on the inside and \eqn{W} on the outside. The closing \eqn{W*} contains the original set \eqn{W}. For a small radius \eqn{r}, the closing operation has the effect of smoothing out irregularities in the boundary of \eqn{W}. For larger radii, the closing operation smooths out concave features in the boundary. For very large radii, the closed set \eqn{W*} becomes more and more convex. The algorithm applies \code{\link{dilation}} followed by \code{\link{erosion}}. } \seealso{ \code{\link{opening}} for the opposite operation. \code{\link{dilation}}, \code{\link{erosion}} for the basic operations. \code{\link{owin}}, \code{\link{as.owin}} for information about windows. } \examples{ v <- closing(letterR, 0.25) plot(v, main="closing") plot(letterR, add=TRUE) } \references{ Serra, J. (1982) Image analysis and mathematical morphology. Academic Press. } \author{\adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/with.ssf.Rd0000644000176200001440000000301613160710621014721 0ustar liggesusers\name{with.ssf} \alias{with.ssf} \alias{apply.ssf} \title{ Evaluate Expression in a Spatially Sampled Function } \description{ Given a spatially sampled function, evaluate an expression involving the function values. } \usage{ apply.ssf(X, \dots) \method{with}{ssf}(data, \dots) } \arguments{ \item{X, data}{ A spatially sampled function (object of class \code{"ssf"}). } \item{\dots}{ Arguments passed to \code{\link{with.default}} or \code{\link{apply}} specifying what to compute. } } \details{ An object of class \code{"ssf"} represents a function (real- or vector-valued) that has been sampled at a finite set of points. It contains a data frame which provides the function values at the sample points. In \code{with.ssf}, the expression specified by \code{\dots} will be evaluated in this dataframe. In \code{apply.ssf}, the dataframe will be subjected to the \code{\link{apply}} operator using the additional arguments \code{\dots}. If the result of evaluation is a data frame with one row for each data point, or a numeric vector with one entry for each data point, then the result will be an object of class \code{"ssf"} containing this information. Otherwise, the result will be a numeric vector. } \value{ An object of class \code{"ssf"} or a numeric vector. } \author{ \adrian. } \seealso{ \code{\link{ssf}} } \examples{ a <- ssf(cells, data.frame(d=nndist(cells), i=1:npoints(cells))) with(a, i/d) } \keyword{spatial} \keyword{manip} \keyword{programming} spatstat/man/dppCauchy.Rd0000644000176200001440000000254713160710571015110 0ustar liggesusers\name{dppCauchy} \alias{dppCauchy} \title{Generalized Cauchy Determinantal Point Process Model} \description{ Function generating an instance of the (generalized) Cauchy determinantal point process model. } \usage{dppCauchy(\dots)} \arguments{ \item{\dots}{arguments of the form \code{tag=value} specifying the parameters. See Details.} } \details{ The (generalized) Cauchy DPP is defined in (Lavancier, \ifelse{latex}{\out{M\o ller}}{Moller} and Rubak, 2015) The possible parameters are: \itemize{ \item the intensity \code{lambda} as a positive numeric \item the scale parameter \code{alpha} as a positive numeric \item the shape parameter \code{nu} as a positive numeric (artificially required to be less than 20 in the code for numerical stability) \item the dimension \code{d} as a positive integer } } \value{An object of class \code{"detpointprocfamily"}.} \author{ \adrian \rolf and \ege } \references{ Lavancier, F. \ifelse{latex}{\out{M\o ller}}{Moller}, J. and Rubak, E. (2015) Determinantal point process models and statistical inference \emph{Journal of the Royal Statistical Society, Series B} \bold{77}, 853--977. } \examples{ m <- dppCauchy(lambda=100, alpha=.05, nu=1, d=2) } \seealso{ \code{\link{dppBessel}}, \code{\link{dppGauss}}, \code{\link{dppMatern}}, \code{\link{dppPowerExp}} } spatstat/man/solist.Rd0000644000176200001440000000606613160710621014501 0ustar liggesusers\name{solist} \alias{solist} \title{ List of Two-Dimensional Spatial Objects } \description{ Make a list of two-dimensional spatial objects. } \usage{ solist(\dots, check=TRUE, promote=TRUE, demote=FALSE) } \arguments{ \item{\dots}{ Any number of objects, each representing a two-dimensional spatial dataset. } \item{check}{ Logical value. If \code{TRUE}, check that each of the objects is a 2D spatial object. } \item{promote}{ Logical value. If \code{TRUE}, test whether all objects belong to the \emph{same} class, and if so, promote the list of objects to the appropriate class of list. } \item{demote}{ Logical value determining what should happen if any of the objects is not a 2D spatial object: if \code{demote=FALSE} (the default), a fatal error occurs; if \code{demote=TRUE}, a list of class \code{"anylist"} is returned. } } \details{ This command creates an object of class \code{"solist"} (spatial object list) which represents a list of two-dimensional spatial datasets. The datasets do not necessarily belong to the same class. Typically the intention is that the datasets in the list should be treated in the same way, for example, they should be plotted side-by-side. The \pkg{spatstat} package provides a plotting function, \code{\link{plot.solist}}, and many other functions for this class. In the \pkg{spatstat} package, various functions produce an object of class \code{"solist"}. For example, when a point pattern is split into several point patterns by \code{\link{split.ppp}}, or an image is split into several images by \code{\link{split.im}}, the result is of class \code{"solist"}. If \code{check=TRUE} then the code will check whether all objects in \code{\dots} belong to the classes of two-dimensional spatial objects defined in the \pkg{spatstat} package. They do not have to belong to the \emph{same} class. Set \code{check=FALSE} for efficiency, but only if you are sure that all the objects are valid. If some of the objects in \code{\dots} are not two-dimensional spatial objects, the action taken depends on the argument \code{demote}. If \code{demote=TRUE}, the result will belong to the more general class \code{"anylist"} instead of \code{"solist"}. If \code{demote=FALSE} (the default), an error occurs. If \code{promote=TRUE} then the code will check whether all the objects \code{\dots} belong to the same class. If they are all point patterns (class \code{"ppp"}), the result will also belong to the class \code{"ppplist"}. If they are all pixel images (class \code{"im"}), the result will also belong to the class \code{"imlist"}. Use \code{\link{as.solist}} to convert a list to a \code{"solist"}. } \value{ A list, usually belonging to the class \code{"solist"}. } \author{\adrian \rolf and \ege } \seealso{ \code{\link{as.solist}}, \code{\link{anylist}}, \code{\link{solapply}} } \examples{ solist(cells, density(cells)) solist(cells, japanesepines, redwood) } \keyword{spatial} \keyword{list} \keyword{manip} spatstat/man/pppdist.Rd0000644000176200001440000002214513160710621014643 0ustar liggesusers\name{pppdist} \alias{pppdist} \title{Distance Between Two Point Patterns} \description{ Given two point patterns, find the distance between them based on optimal point matching. } \usage{ pppdist(X, Y, type = "spa", cutoff = 1, q = 1, matching = TRUE, ccode = TRUE, auction = TRUE, precision = NULL, approximation = 10, show.rprimal = FALSE, timelag = 0) } \arguments{ \item{X,Y}{Two point patterns (objects of class \code{"ppp"}).} \item{type}{ A character string giving the type of distance to be computed. One of \code{"spa"} (default), \code{"ace"} or \code{"mat"}, indicating whether the algorithm should find the optimal matching based on \dQuote{subpattern assignment}, \dQuote{assignment only if cardinalities are equal} or \dQuote{mass transfer}. See Details. } \item{cutoff}{ The value \eqn{> 0} at which interpoint distances are cut off. } \item{q}{ The order of the average that is applied to the interpoint distances. May be \code{Inf}, in which case the maximum of the interpoint distances is taken. } \item{matching}{ Logical. Whether to return the optimal matching or only the associated distance. } \item{ccode}{ Logical. If \code{FALSE}, \R code is used which allows for higher precision, but is much slower. } \item{auction}{ Logical. By default a version of Bertsekas' auction algorithm is used to compute an optimal point matching if \code{type} is either \code{"spa"} or \code{"ace"}. If \code{auction} is \code{FALSE} (or \code{type} is \code{"mat"}) a specialized primal-dual algorithm is used instead. This was the standard in earlier versions of \pkg{spatstat}, but is several orders of magnitudes slower. } \item{precision}{ Index controlling accuracy of algorithm. The \code{q}-th powers of interpoint distances will be rounded to the nearest multiple of \code{10^(-precision)}. There is a sensible default which depends on \code{ccode}. } \item{approximation}{ If \code{q = Inf}, compute distance based on the optimal matching for the corresponding distance of order \code{approximation}. Can be \code{Inf}, but this makes computations extremely slow. } \item{show.rprimal}{ Logical. Whether to plot the progress of the primal-dual algorithm. If \code{TRUE}, slow primal-dual \R code is used, regardless of the arguments \code{ccode} and \code{auction}. } \item{timelag}{ Time lag, in seconds, between successive displays of the iterative solution of the restricted primal problem. } } \details{ Computes the distance between point patterns \code{X} and \code{Y} based on finding the matching between them which minimizes the average of the distances between matched points (if \code{q=1}), the maximum distance between matched points (if \code{q=Inf}), and in general the \code{q}-th order average (i.e. the \code{1/q}th power of the sum of the \code{q}th powers) of the distances between matched points. Distances between matched points are Euclidean distances cut off at the value of \code{cutoff}. The parameter \code{type} controls the behaviour of the algorithm if the cardinalities of the point patterns are different. For the type \code{"spa"} (subpattern assignment) the subpattern of the point pattern with the larger cardinality \eqn{n} that is closest to the point pattern with the smaller cardinality \eqn{m} is determined; then the \code{q}-th order average is taken over \eqn{n} values: the \eqn{m} distances of matched points and \eqn{n-m} "penalty distances" of value \code{cutoff} for the unmatched points. For the type \code{"ace"} (assignment only if cardinalities equal) the matching is empty and the distance returned is equal to \code{cutoff} if the cardinalities differ. For the type \code{"mat"} (mass transfer) each point pattern is assumed to have total mass \eqn{m} (= the smaller cardinality) distributed evenly among its points; the algorithm finds then the "mass transfer plan" that minimizes the \code{q}-th order weighted average of the distances, where the weights are given by the transferred mass divided by \eqn{m}. The result is a fractional matching (each match of two points has a weight in \eqn{(0,1]}) with the minimized quantity as the associated distance. The central problem to be solved is the assignment problem (for types \code{"spa"} and \code{"ace"}) or the more general transport problem (for type \code{"mat"}). Both are well-known problems in discrete optimization, see e.g. Luenberger (2003). For the assignment problem \code{pppdist} uses by default the forward/backward version of Bertsekas' auction algorithm with automated epsilon scaling; see Bertsekas (1992). The implemented version gives good overall performance and can handle point patterns with several thousand points. For the transport problem a specialized primal-dual algorithm is employed; see Luenberger (2003), Section 5.9. The C implementation used by default can handle patterns with a few hundreds of points, but should not be used with thousands of points. By setting \code{show.rprimal = TRUE}, some insight in the working of the algorithm can be gained. For a broader selection of optimal transport algorithms that are not restricted to spatial point patterns and allow for additional fine tuning, we recommend the \R package \pkg{transport}. For moderate and large values of \code{q} there can be numerical issues based on the fact that the \code{q}-th powers of distances are taken and some positive values enter the optimization algorithm as zeroes because they are too small in comparison with the larger values. In this case the number of zeroes introduced is given in a warning message, and it is possible then that the matching obtained is not optimal and the associated distance is only a strict upper bound of the true distance. As a general guideline (which can be very wrong in special situations) a small number of zeroes (up to about 50\% of the smaller point pattern cardinality \eqn{m}) usually still results in the right matching, and the number can even be quite a bit higher and usually still provides a highly accurate upper bound for the distance. These numerical problems can be reduced by enforcing (much slower) \R code via the argument \code{ccode = FALSE}. For \code{q = Inf} there is no fast algorithm available, which is why approximation is normally used: for finding the optimal matching, \code{q} is set to the value of \code{approximation}. The resulting distance is still given as the maximum rather than the \code{q}-th order average in the corresponding distance computation. If \code{approximation = Inf}, approximation is suppressed and a very inefficient exhaustive search for the best matching is performed. The value of \code{precision} should normally not be supplied by the user. If \code{ccode = TRUE}, this value is preset to the highest exponent of 10 that the C code still can handle (usually \eqn{9}). If \code{ccode = FALSE}, the value is preset according to \code{q} (usually \eqn{15} if \code{q} is small), which can sometimes be changed to obtain less severe warning messages. } \value{ Normally an object of class \code{pppmatching} that contains detailed information about the parameters used and the resulting distance. See \code{\link{pppmatching.object}} for details. If \code{matching = FALSE}, only the numerical value of the distance is returned. } \references{ Bertsekas, D.P. (1992). Auction algorithms for network flow problems: a tutorial introduction. Computational Optimization and Applications 1, 7-66. Luenberger, D.G. (2003). \emph{Linear and nonlinear programming.} Second edition. Kluwer. Schuhmacher, D. (2014). \emph{transport: optimal transport in various forms.} R package version 0.6-2 (or later) Schuhmacher, D. and Xia, A. (2008). A new metric between distributions of point processes. \emph{Advances in Applied Probability} \bold{40}, 651--672 Schuhmacher, D., Vo, B.-T. and Vo, B.-N. (2008). A consistent metric for performance evaluation of multi-object filters. \emph{IEEE Transactions on Signal Processing} \bold{56}, 3447--3457. } \author{ Dominic Schuhmacher \email{dominic.schuhmacher@mathematik.uni-goettingen.de} \cr \url{http://www.dominic.schuhmacher.name} } \seealso{ \code{\link{pppmatching.object}}, \code{\link{matchingdist}} } \examples{ # equal cardinalities set.seed(140627) X <- runifpoint(500) Y <- runifpoint(500) m <- pppdist(X, Y) m \dontrun{ plot(m)} # differing cardinalities X <- runifpoint(14) Y <- runifpoint(10) m1 <- pppdist(X, Y, type="spa") m2 <- pppdist(X, Y, type="ace") m3 <- pppdist(X, Y, type="mat", auction=FALSE) summary(m1) summary(m2) summary(m3) \dontrun{ m1$matrix m2$matrix m3$matrix} # q = Inf X <- runifpoint(10) Y <- runifpoint(10) mx1 <- pppdist(X, Y, q=Inf, matching=FALSE) mx2 <- pppdist(X, Y, q=Inf, matching=FALSE, ccode=FALSE, approximation=50) mx3 <- pppdist(X, Y, q=Inf, matching=FALSE, approximation=Inf) all.equal(mx1,mx2,mx3) # sometimes TRUE all.equal(mx2,mx3) # very often TRUE } \keyword{spatial} \keyword{math} spatstat/man/fitted.slrm.Rd0000644000176200001440000000222713160710621015412 0ustar liggesusers\name{fitted.slrm} \Rdversion{1.1} \alias{fitted.slrm} \title{ Fitted Probabilities for Spatial Logistic Regression } \description{ Given a fitted Spatial Logistic Regression model, this function computes the fitted probabilities for each pixel. } \usage{ \method{fitted}{slrm}(object, ...) } \arguments{ \item{object}{ a fitted spatial logistic regression model. An object of class \code{"slrm"}. } \item{\dots}{ Ignored. } } \details{ This is a method for the generic function \code{\link[stats:fitted.values]{fitted}} for spatial logistic regression models (objects of class \code{"slrm"}, usually obtained from the function \code{\link{slrm}}). The algorithm computes the fitted probabilities of the presence of a random point in each pixel. } \value{ A pixel image (object of class \code{"im"}) containing the fitted probability for each pixel. } \seealso{ \code{\link{slrm}}, \code{\link[stats:fitted.values]{fitted}} } \examples{ X <- rpoispp(42) fit <- slrm(X ~ x+y) plot(fitted(fit)) } \author{\adrian \email{adrian@maths.uwa.edu.au} and \rolf } \keyword{spatial} \keyword{models} \keyword{methods} spatstat/man/as.function.fv.Rd0000644000176200001440000000741313160710571016026 0ustar liggesusers\name{as.function.fv} \alias{as.function.fv} \alias{as.function.rhohat} \title{ Convert Function Value Table to Function } \description{ Converts an object of class \code{"fv"} to an \R language function. } \usage{ \method{as.function}{fv}(x, ..., value=".y", extrapolate=FALSE) \method{as.function}{rhohat}(x, ..., value=".y", extrapolate=TRUE) } \arguments{ \item{x}{ Object of class \code{"fv"} or \code{"rhohat"}. } \item{\dots}{ Ignored. } \item{value}{ Optional. Character string or character vector selecting one or more of the columns of \code{x} for use as the function value. See Details. } \item{extrapolate}{ Logical, indicating whether to extrapolate the function outside the domain of \code{x}. See Details. } } \details{ A function value table (object of class \code{"fv"}) is a convenient way of storing and plotting several different estimates of the same function. Objects of this class are returned by many commands in \pkg{spatstat}, such as \code{\link{Kest}} which returns an estimate of Ripley's \eqn{K}-function for a point pattern dataset. Sometimes it is useful to convert the function value table to a \code{function} in the \R language. This is done by \code{as.function.fv}. It converts an object \code{x} of class \code{"fv"} to an \R function \code{f}. If \code{f <- as.function(x)} then \code{f} is an \R function that accepts a numeric argument and returns a corresponding value for the summary function by linear interpolation between the values in the table \code{x}. Argument values lying outside the range of the table yield an \code{NA} value (if \code{extrapolate=FALSE}) or the function value at the nearest endpoint of the range (if \code{extrapolate = TRUE}). To apply different rules to the left and right extremes, use \code{extrapolate=c(TRUE,FALSE)} and so on. Typically the table \code{x} contains several columns of function values corresponding to different edge corrections. Auxiliary information for the table identifies one of these columns as the \emph{recommended value}. By default, the values of the function \code{f <- as.function(x)} are taken from this column of recommended values. This default can be changed using the argument \code{value}, which can be a character string or character vector of names of columns of \code{x}. Alternatively \code{value} can be one of the abbreviations used by \code{\link{fvnames}}. If \code{value} specifies a single column of the table, then the result is a function \code{f(r)} with a single numeric argument \code{r} (with the same name as the orginal argument of the function table). If \code{value} specifies several columns of the table, then the result is a function \code{f(r,what)} where \code{r} is the numeric argument and \code{what} is a character string identifying the column of values to be used. The formal arguments of the resulting function are \code{f(r, what=value)}, which means that in a call to this function \code{f}, the permissible values of \code{what} are the entries of the original vector \code{value}; the default value of \code{what} is the first entry of \code{value}. The command \code{as.function.fv} is a method for the generic command \code{\link{as.function}}. } \value{ A \code{function(r)} or \code{function(r,what)} where \code{r} is the name of the original argument of the function table. } \author{ \adrian and \rolf } \seealso{ \code{\link{fv}}, \code{\link{fv.object}}, \code{\link{fvnames}}, \code{\link{plot.fv}}, \code{\link{Kest}} } \examples{ K <- Kest(cells) f <- as.function(K) f f(0.1) g <- as.function(K, value=c("iso", "trans")) g g(0.1, "trans") } \keyword{spatial} \keyword{methods} spatstat/man/unique.ppp.Rd0000644000176200001440000000274113160710621015264 0ustar liggesusers\name{unique.ppp} \alias{unique.ppp} \alias{unique.ppx} \title{Extract Unique Points from a Spatial Point Pattern} \description{ Removes any points that are identical to other points in a spatial point pattern. } \usage{ \method{unique}{ppp}(x, \dots, warn=FALSE) \method{unique}{ppx}(x, \dots, warn=FALSE) } \arguments{ \item{x}{ A spatial point pattern (object of class \code{"ppp"} or \code{"ppx"}). } \item{\dots}{ Arguments passed to \code{\link{duplicated.ppp}} or \code{\link{duplicated.data.frame}}. } \item{warn}{ Logical. If \code{TRUE}, issue a warning message if any duplicated points were found. } } \value{ Another point pattern object. } \details{ These are methods for the generic function \code{unique} for point pattern datasets (of class \code{"ppp"}, see \code{\link{ppp.object}}, or class \code{"ppx"}). This function removes duplicate points in \code{x}, and returns a point pattern. Two points in a point pattern are deemed to be identical if their \eqn{x,y} coordinates are the same, \emph{and} their marks are the same (if they carry marks). This is the default rule: see \code{\link{duplicated.ppp}} for other options. } \seealso{ \code{\link{ppp.object}}, \code{\link{duplicated.ppp}}, \code{\link{multiplicity.ppp}} } \examples{ X <- ppp(c(1,1,0.5), c(2,2,1), window=square(3)) unique(X) unique(X, rule="deldir") } \author{\adrian and \rolf } \keyword{spatial} \keyword{methods} spatstat/man/miplot.Rd0000644000176200001440000000405513160710621014464 0ustar liggesusers\name{miplot} \alias{miplot} \title{Morisita Index Plot} \description{ Displays the Morisita Index Plot of a spatial point pattern. } \usage{ miplot(X, ...) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"}) or something acceptable to \code{\link{as.ppp}}. } \item{\dots}{Optional arguments to control the appearance of the plot.} } \details{ Morisita (1959) defined an index of spatial aggregation for a spatial point pattern based on quadrat counts. The spatial domain of the point pattern is first divided into \eqn{Q} subsets (quadrats) of equal size and shape. The numbers of points falling in each quadrat are counted. Then the Morisita Index is computed as \deqn{ \mbox{MI} = Q \frac{\sum_{i=1}^Q n_i (n_i - 1)}{N(N-1)} }{ MI = Q * sum(n[i] (n[i]-1))/(N(N-1)) } where \eqn{n_i}{n[i]} is the number of points falling in the \eqn{i}-th quadrat, and \eqn{N} is the total number of points. If the pattern is completely random, \code{MI} should be approximately equal to 1. Values of \code{MI} greater than 1 suggest clustering. The \emph{Morisita Index plot} is a plot of the Morisita Index \code{MI} against the linear dimension of the quadrats. The point pattern dataset is divided into \eqn{2 \times 2}{2 * 2} quadrats, then \eqn{3 \times 3}{3 * 3} quadrats, etc, and the Morisita Index is computed each time. This plot is an attempt to discern different scales of dependence in the point pattern data. } \value{ None. } \references{ M. Morisita (1959) Measuring of the dispersion of individuals and analysis of the distributional patterns. Memoir of the Faculty of Science, Kyushu University, Series E: Biology. \bold{2}: 215--235. } \seealso{ \code{\link{quadratcount}} } \examples{ data(longleaf) miplot(longleaf) opa <- par(mfrow=c(2,3)) data(cells) data(japanesepines) data(redwood) plot(cells) plot(japanesepines) plot(redwood) miplot(cells) miplot(japanesepines) miplot(redwood) par(opa) } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat/man/fv.object.Rd0000644000176200001440000000301713160710621015035 0ustar liggesusers\name{fv.object} \alias{fv.object} %DoNotExport \title{Function Value Table} \description{ A class \code{"fv"} to support the convenient plotting of several estimates of the same function. } \details{ An object of this class is a convenient way of storing and plotting several different estimates of the same function. It is a data frame with extra attributes indicating the recommended way of plotting the function, and other information. There are methods for \code{print} and \code{plot} for this class. Objects of class \code{"fv"} are returned by \code{\link{Fest}}, \code{\link{Gest}},\code{\link{Jest}}, and \code{\link{Kest}} along with many other functions. } \seealso{ Objects of class \code{"fv"} are returned by \code{\link{Fest}}, \code{\link{Gest}},\code{\link{Jest}}, and \code{\link{Kest}} along with many other functions. See \code{\link{plot.fv}} for plotting an \code{"fv"} object. See \code{\link{as.function.fv}} to convert an \code{"fv"} object to an \R function. Use \code{\link{cbind.fv}} to combine several \code{"fv"} objects. Use \code{\link{bind.fv}} to glue additional columns onto an existing \code{"fv"} object. \emph{Undocumented} functions for modifying an \code{"fv"} object include \code{fvnames}, \code{fvnames<-}, \code{tweak.fv.entry} and \code{rebadge.fv}. } \examples{ data(cells) K <- Kest(cells) class(K) K # prints a sensible summary plot(K) } \author{\adrian and \rolf } \keyword{spatial} \keyword{attribute} spatstat/man/affine.psp.Rd0000644000176200001440000000334013160710571015211 0ustar liggesusers\name{affine.psp} \alias{affine.psp} \title{Apply Affine Transformation To Line Segment Pattern} \description{ Applies any affine transformation of the plane (linear transformation plus vector shift) to a line segment pattern. } \usage{ \method{affine}{psp}(X, mat=diag(c(1,1)), vec=c(0,0), \dots) } \arguments{ \item{X}{Line Segment pattern (object of class \code{"psp"}).} \item{mat}{Matrix representing a linear transformation.} \item{vec}{Vector of length 2 representing a translation.} \item{\dots}{Arguments passed to \code{\link{affine.owin}} affecting the handling of the observation window, if it is a binary pixel mask. } } \value{ Another line segment pattern (of class \code{"psp"}) representing the result of applying the affine transformation. } \details{ The line segment pattern, and its window, are subjected first to the linear transformation represented by \code{mat} (multiplying on the left by \code{mat}), and are then translated by the vector \code{vec}. The argument \code{mat} must be a nonsingular \eqn{2 \times 2}{2 * 2} matrix. This is a method for the generic function \code{\link{affine}}. } \seealso{ \code{\link{affine}}, \code{\link{affine.owin}}, \code{\link{affine.ppp}}, \code{\link{affine.im}}, \code{\link{flipxy}}, \code{\link{rotate}}, \code{\link{shift}} } \examples{ oldpar <- par(mfrow=c(2,1)) X <- psp(runif(10), runif(10), runif(10), runif(10), window=owin()) plot(X, main="original") # shear transformation Y <- affine(X, matrix(c(1,0,0.6,1),ncol=2)) plot(Y, main="transformed") par(oldpar) # # rescale y coordinates by factor 0.2 affine(X, diag(c(1,0.2))) } \author{\adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/pcfdot.inhom.Rd0000644000176200001440000001144513160710621015551 0ustar liggesusers\name{pcfdot.inhom} \alias{pcfdot.inhom} \title{ Inhomogeneous Multitype Pair Correlation Function (Type-i-To-Any-Type) } \description{ Estimates the inhomogeneous multitype pair correlation function (from type \eqn{i} to any type) for a multitype point pattern. } \usage{ pcfdot.inhom(X, i, lambdaI = NULL, lambdadot = NULL, ..., r = NULL, breaks = NULL, kernel="epanechnikov", bw=NULL, stoyan=0.15, correction = c("isotropic", "Ripley", "translate"), sigma = NULL, varcov = NULL) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the inhomogeneous multitype pair correlation function \eqn{g_{i\bullet}(r)}{g[i.](r)} will be computed. It must be a multitype point pattern (a marked point pattern whose marks are a factor). } \item{i}{The type (mark value) of the points in \code{X} from which distances are measured. A character string (or something that will be converted to a character string). Defaults to the first level of \code{marks(X)}. } \item{lambdaI}{ Optional. Values of the estimated intensity function of the points of type \code{i}. Either a vector giving the intensity values at the points of type \code{i}, a pixel image (object of class \code{"im"}) giving the intensity values at all locations, or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. } \item{lambdadot}{ Optional. Values of the estimated intensity function of the point pattern \code{X}. A numeric vector, pixel image or \code{function(x,y)}. } \item{r}{ Vector of values for the argument \eqn{r} at which \eqn{g_{i\bullet}(r)}{g[i.](r)} should be evaluated. There is a sensible default. } \item{breaks}{ This argument is for internal use only. } \item{kernel}{ Choice of smoothing kernel, passed to \code{\link{density.default}}. } \item{bw}{ Bandwidth for smoothing kernel, passed to \code{\link{density.default}}. } \item{\dots}{ Other arguments passed to the kernel density estimation function \code{\link{density.default}}. } \item{stoyan}{ Bandwidth coefficient; see Details. } \item{correction}{ Choice of edge correction. } \item{sigma,varcov}{ Optional arguments passed to \code{\link{density.ppp}} to control the smoothing bandwidth, when \code{lambdaI} or \code{lambdadot} is estimated by kernel smoothing. } } \details{ The inhomogeneous multitype (type \eqn{i} to any type) pair correlation function \eqn{g_{i\bullet}(r)}{g[i.](r)} is a summary of the dependence between different types of points in a multitype spatial point process that does not have a uniform density of points. The best intuitive interpretation is the following: the probability \eqn{p(r)} of finding a point of type \eqn{i} at location \eqn{x} and another point of any type at location \eqn{y}, where \eqn{x} and \eqn{y} are separated by a distance \eqn{r}, is equal to \deqn{ p(r) = \lambda_i(x) lambda(y) g(r) \,{\rm d}x \, {\rm d}y }{ p(r) = lambda[i](x) * lambda(y) * g(r) dx dy } where \eqn{\lambda_i}{lambda[i]} is the intensity function of the process of points of type \eqn{i}, and where \eqn{\lambda}{lambda} is the intensity function of the points of all types. For a multitype Poisson point process, this probability is \eqn{p(r) = \lambda_i(x) \lambda(y)}{p(r) = lambda[i](x) * lambda(y)} so \eqn{g_{i\bullet}(r) = 1}{g[i.](r) = 1}. The command \code{pcfdot.inhom} estimates the inhomogeneous multitype pair correlation using a modified version of the algorithm in \code{\link{pcf.ppp}}. If the arguments \code{lambdaI} and \code{lambdadot} are missing or null, they are estimated from \code{X} by kernel smoothing using a leave-one-out estimator. } \value{ A function value table (object of class \code{"fv"}). Essentially a data frame containing the variables \item{r}{ the vector of values of the argument \eqn{r} at which the inhomogeneous multitype pair correlation function \eqn{g_{i\bullet}(r)}{g[i.](r)} has been estimated } \item{theo}{vector of values equal to 1, the theoretical value of \eqn{g_{i\bullet}(r)}{g[i.](r)} for the Poisson process } \item{trans}{vector of values of \eqn{g_{i\bullet}(r)}{g[i.](r)} estimated by translation correction } \item{iso}{vector of values of \eqn{g_{i\bullet}(r)}{g[i.](r)} estimated by Ripley isotropic correction } as required. } \seealso{ \code{\link{pcf.ppp}}, \code{\link{pcfinhom}}, \code{\link{pcfdot}}, \code{\link{pcfcross.inhom}} } \examples{ data(amacrine) plot(pcfdot.inhom(amacrine, "on", stoyan=0.1), legendpos="bottom") } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat/man/Smooth.ssf.Rd0000644000176200001440000000155113160710621015221 0ustar liggesusers\name{Smooth.ssf} \alias{Smooth.ssf} \title{ Smooth a Spatially Sampled Function } \description{ Applies kernel smoothing to a spatially sampled function. } \usage{ \method{Smooth}{ssf}(X, \dots) } \arguments{ \item{X}{ Object of class \code{"ssf"}. } \item{\dots}{ Arguments passed to \code{\link[spatstat]{Smooth.ppp}} to control the smoothing. } } \details{ An object of class \code{"ssf"} represents a real-valued or vector-valued function that has been evaluated or sampled at an irregular set of points. The function values will be smoothed using a Gaussian kernel. } \value{ A pixel image or a list of pixel images. } \author{ \adrian. } \seealso{ \code{\link{ssf}}, \code{\link{Smooth.ppp}} } \examples{ f <- ssf(redwood, nndist(redwood)) Smooth(f, sigma=0.1) } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat/man/rsyst.Rd0000644000176200001440000000500413160710621014337 0ustar liggesusers\name{rsyst} \alias{rsyst} \title{Simulate systematic random point pattern} \description{ Generates a \dQuote{systematic random} pattern of points in a window, consisting of a grid of equally-spaced points with a random common displacement. } \usage{ rsyst(win=square(1), nx=NULL, ny=nx, \dots, dx=NULL, dy=dx, nsim=1, drop=TRUE) } \arguments{ \item{win}{ A window. An object of class \code{\link{owin}}, or data in any format acceptable to \code{\link{as.owin}()}. } \item{nx}{Number of columns of grid points in the window. Incompatible with \code{dx}. } \item{ny}{Number of rows of grid points in the window. Incompatible with \code{dy}. } \item{\dots}{Ignored.} \item{dx}{Spacing of grid points in \eqn{x} direction. Incompatible with \code{nx}. } \item{dy}{Spacing of grid points in \eqn{y} direction. Incompatible with \code{ny}. } \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a point pattern. } } \value{ A point pattern (an object of class \code{"ppp"}) if \code{nsim=1}, or a list of point patterns if \code{nsim > 1}. } \details{ This function generates a \dQuote{systematic random} pattern of points in the window \code{win}. The pattern consists of a rectangular grid of points with a random common displacement. The grid spacing in the \eqn{x} direction is determined either by the number of columns \code{nx} or by the horizontal spacing \code{dx}. The grid spacing in the \eqn{y} direction is determined either by the number of rows \code{ny} or by the vertical spacing \code{dy}. The grid is then given a random displacement (the common displacement of the grid points is a uniformly distributed random vector in the tile of dimensions \code{dx, dy}). Some of the resulting grid points may lie outside the window \code{win}: if they do, they are deleted. The result is a point pattern inside the window \code{win}. This function is useful in creating dummy points for quadrature schemes (see \code{\link{quadscheme}}) as well as in simulating random point patterns. } \seealso{ \code{\link{rstrat}}, \code{\link{runifpoint}}, \code{\link{quadscheme}} } \examples{ X <- rsyst(nx=10) plot(X) # polygonal boundary data(letterR) X <- rsyst(letterR, 5, 10) plot(X) } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat/man/km.rs.Rd0000644000176200001440000000610313160710621014206 0ustar liggesusers\name{km.rs} \alias{km.rs} \title{Kaplan-Meier and Reduced Sample Estimator using Histograms} \description{ Compute the Kaplan-Meier and Reduced Sample estimators of a survival time distribution function, using histogram techniques } \usage{ km.rs(o, cc, d, breaks) } \arguments{ \item{o}{vector of observed survival times } \item{cc}{vector of censoring times } \item{d}{vector of non-censoring indicators } \item{breaks}{Vector of breakpoints to be used to form histograms. } } \value{ A list with five elements \item{rs}{Reduced-sample estimate of the survival time c.d.f. \eqn{F(t)} } \item{km}{Kaplan-Meier estimate of the survival time c.d.f. \eqn{F(t)} } \item{hazard}{corresponding Nelson-Aalen estimate of the hazard rate \eqn{\lambda(t)}{lambda(t)} } \item{r}{values of \eqn{t} for which \eqn{F(t)} is estimated } \item{breaks}{the breakpoints vector } } \details{ This function is needed mainly for internal use in \pkg{spatstat}, but may be useful in other applications where you want to form the Kaplan-Meier estimator from a huge dataset. Suppose \eqn{T_i}{T[i]} are the survival times of individuals \eqn{i=1,\ldots,M} with unknown distribution function \eqn{F(t)} which we wish to estimate. Suppose these times are right-censored by random censoring times \eqn{C_i}{C[i]}. Thus the observations consist of right-censored survival times \eqn{\tilde T_i = \min(T_i,C_i)}{T*[i] = min(T[i],C[i])} and non-censoring indicators \eqn{D_i = 1\{T_i \le C_i\}}{D[i] = 1(T[i] <= C[i])} for each \eqn{i}. The arguments to this function are vectors \code{o}, \code{cc}, \code{d} of observed values of \eqn{\tilde T_i}{T*[i]}, \eqn{C_i}{C[i]} and \eqn{D_i}{D[i]} respectively. The function computes histograms and forms the reduced-sample and Kaplan-Meier estimates of \eqn{F(t)} by invoking the functions \code{\link{kaplan.meier}} and \code{\link{reduced.sample}}. This is efficient if the lengths of \code{o}, \code{cc}, \code{d} (i.e. the number of observations) is large. The vectors \code{km} and \code{hazard} returned by \code{kaplan.meier} are (histogram approximations to) the Kaplan-Meier estimator of \eqn{F(t)} and its hazard rate \eqn{\lambda(t)}{lambda(t)}. Specifically, \code{km[k]} is an estimate of \code{F(breaks[k+1])}, and \code{lambda[k]} is an estimate of the average of \eqn{\lambda(t)}{lambda(t)} over the interval \code{(breaks[k],breaks[k+1])}. This approximation is exact only if the survival times are discrete and the histogram breaks are fine enough to ensure that each interval \code{(breaks[k],breaks[k+1])} contains only one possible value of the survival time. The vector \code{rs} is the reduced-sample estimator, \code{rs[k]} being the reduced sample estimate of \code{F(breaks[k+1])}. This value is exact, i.e. the use of histograms does not introduce any approximation error in the reduced-sample estimator. } \seealso{ \code{\link{reduced.sample}}, \code{\link{kaplan.meier}} } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat/man/rshift.psp.Rd0000644000176200001440000001044113160710621015254 0ustar liggesusers\name{rshift.psp} \alias{rshift.psp} \title{Randomly Shift a Line Segment Pattern} \description{ Randomly shifts the segments in a line segment pattern. } \usage{ \method{rshift}{psp}(X, \dots, group=NULL, which=NULL) } \arguments{ \item{X}{Line segment pattern to be subjected to a random shift. An object of class \code{"psp"}. } \item{\dots}{ Arguments controlling the randomisation and the handling of edge effects. See \code{\link{rshift.ppp}}. } \item{group}{ Optional. Factor specifying a grouping of the line segments of \code{X}, or \code{NULL} indicating that all line segments belong to the same group. Each group will be shifted together, and separately from other groups. } \item{which}{ Optional. Identifies which groups of the pattern will be shifted, while other groups are not shifted. A vector of levels of \code{group}. } } \value{ A line segment pattern (object of class \code{"psp"}). } \details{ This operation randomly shifts the locations of the line segments in a line segment pattern. The function \code{rshift} is generic. This function \code{rshift.psp} is the method for line segment patterns. The line segments of \code{X} are first divided into groups, then the line segments within a group are shifted by a common random displacement vector. Different groups of line segments are shifted independently. If the argument \code{group} is present, then this determines the grouping. Otherwise, all line segments belong to a single group. The argument \code{group} should be a factor, of length equal to the number of line segments in \code{X}. Alternatively \code{group} may be \code{NULL}, which specifies that all line segments of \code{X} belong to a single group. By default, every group of line segments will be shifted. The argument \code{which} indicates that only some of the groups should be shifted, while other groups should be left unchanged. \code{which} must be a vector of levels of \code{group} indicating which groups are to be shifted. The displacement vector, i.e. the vector by which the data line segments are shifted, is generated at random. The \emph{default} behaviour is to generate a displacement vector at random with equal probability for all possible displacements. This means that the \eqn{x} and \eqn{y} coordinates of the displacement vector are independent random variables, uniformly distributed over the range of possible coordinates. Alternatively, the displacement vector can be generated by another random mechanism, controlled by the arguments \code{radius}, \code{width} and \code{height}. \describe{ \item{rectangular:}{ if \code{width} and \code{height} are given, then the displacement vector is uniformly distributed in a rectangle of these dimensions, centred at the origin. The maximum possible displacement in the \eqn{x} direction is \code{width/2}. The maximum possible displacement in the \eqn{y} direction is \code{height/2}. The \eqn{x} and \eqn{y} displacements are independent. (If \code{width} and \code{height} are actually equal to the dimensions of the observation window, then this is equivalent to the default.) } \item{radial:}{ if \code{radius} is given, then the displacement vector is generated by choosing a random line segment inside a disc of the given radius, centred at the origin, with uniform probability density over the disc. Thus the argument \code{radius} determines the maximum possible displacement distance. The argument \code{radius} is incompatible with the arguments \code{width} and \code{height}. } } The argument \code{edge} controls what happens when a shifted line segment lies partially or completely outside the window of \code{X}. Currently the only option is \code{"erode"} which specifies that the segments will be clipped to a smaller window. The optional argument \code{clip} specifies a smaller window to which the pattern should be restricted. } \seealso{ \code{\link{rshift}}, \code{\link{rshift.ppp}} } \examples{ X <- psp(runif(20), runif(20), runif(20), runif(20), window=owin()) Y <- rshift(X, radius=0.1) } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat/man/rstrat.Rd0000644000176200001440000000401113160710621014467 0ustar liggesusers\name{rstrat} \alias{rstrat} \title{Simulate Stratified Random Point Pattern} \description{ Generates a ``stratified random'' pattern of points in a window, by dividing the window into rectangular tiles and placing \code{k} random points independently in each tile. } \usage{ rstrat(win=square(1), nx, ny=nx, k = 1, nsim=1, drop=TRUE) } \arguments{ \item{win}{ A window. An object of class \code{\link{owin}}, or data in any format acceptable to \code{\link{as.owin}()}. } \item{nx}{Number of tiles in each column. } \item{ny}{Number of tiles in each row. } \item{k}{Number of random points to generate in each tile. } \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a point pattern. } } \value{ A point pattern (an object of class \code{"ppp"}) if \code{nsim=1}, or a list of point patterns if \code{nsim > 1}. } \details{ This function generates a random pattern of points in a ``stratified random'' sampling design. It can be useful for generating random spatial sampling points. The bounding rectangle of \code{win} is divided into a regular \eqn{nx \times ny}{nx * ny} grid of rectangular tiles. In each tile, \code{k} random points are generated independently with a uniform distribution in that tile. Some of these grid points may lie outside the window \code{win}: if they do, they are deleted. The result is a point pattern inside the window \code{win}. This function is useful in creating dummy points for quadrature schemes (see \code{\link{quadscheme}}) as well as in simulating random point patterns. } \seealso{ \code{\link{rsyst}}, \code{\link{runifpoint}}, \code{\link{quadscheme}} } \examples{ X <- rstrat(nx=10) plot(X) # polygonal boundary data(letterR) X <- rstrat(letterR, 5, 10, k=3) plot(X) } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat/man/unstack.ppp.Rd0000644000176200001440000000341513160710621015425 0ustar liggesusers\name{unstack.ppp} \alias{unstack.ppp} \alias{unstack.psp} \alias{unstack.lpp} \title{ Separate Multiple Columns of Marks } \description{ Given a spatial pattern with several columns of marks, take one column at a time, and return a list of spatial patterns each having only one column of marks. } \usage{ \method{unstack}{ppp}(x, \dots) \method{unstack}{psp}(x, \dots) \method{unstack}{lpp}(x, \dots) } \arguments{ \item{x}{ A spatial point pattern (object of class \code{"ppp"} or \code{"lpp"}) or a spatial pattern of line segments (object of class \code{"psp"}). } \item{\dots}{ Ignored. } } \details{ The functions defined here are methods for the generic \code{\link[utils]{unstack}}. The functions expect a spatial object \code{x} which has several columns of marks; they separate the columns, and return a list of spatial objects, each having only one column of marks. If \code{x} has several columns of marks (i.e. \code{marks(x)} is a matrix, data frame or hyperframe with several columns), then \code{y <- unstack(x)} is a list of spatial objects, each of the same kind as \code{x}. The \code{j}th entry \code{y[[j]]} is equivalent to \code{x} except that it only includes the \code{j}th column of \code{marks(x)}. If \code{x} has no marks, or has only a single column of marks, the result is a list consisting of one entry, which is \code{x}. } \value{ A list, of class \code{"solist"}, whose entries are objects of the same type as \code{x}. } \author{ \spatstatAuthors. } \seealso{ \code{\link[utils]{unstack}} \code{\link{unstack.msr}} See also methods for the generic \code{\link[base]{split}} such as \code{\link{split.ppp}}. } \examples{ finpines unstack(finpines) } \keyword{spatial} \keyword{manip} spatstat/man/StraussHard.Rd0000644000176200001440000001000613160710571015420 0ustar liggesusers\name{StraussHard} \alias{StraussHard} \title{The Strauss / Hard Core Point Process Model} \description{ Creates an instance of the ``Strauss/ hard core'' point process model which can then be fitted to point pattern data. } \usage{ StraussHard(r, hc=NA) } \arguments{ \item{r}{The interaction radius of the Strauss interaction} \item{hc}{The hard core distance. Optional.} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of the ``Strauss/hard core'' process with Strauss interaction radius \eqn{r} and hard core distance \code{hc}. } \details{ A Strauss/hard core process with interaction radius \eqn{r}, hard core distance \eqn{h < r}, and parameters \eqn{\beta}{beta} and \eqn{\gamma}{gamma}, is a pairwise interaction point process in which \itemize{ \item distinct points are not allowed to come closer than a distance \eqn{h} apart \item each pair of points closer than \eqn{r} units apart contributes a factor \eqn{\gamma}{gamma} to the probability density. } This is a hybrid of the Strauss process and the hard core process. The probability density is zero if any pair of points is closer than \eqn{h} units apart, and otherwise equals \deqn{ f(x_1,\ldots,x_n) = \alpha \beta^{n(x)} \gamma^{s(x)} }{ f(x_1,\ldots,x_n) = alpha . beta^n(x) gamma^s(x) } where \eqn{x_1,\ldots,x_n}{x[1],\ldots,x[n]} represent the points of the pattern, \eqn{n(x)} is the number of points in the pattern, \eqn{s(x)} is the number of distinct unordered pairs of points that are closer than \eqn{r} units apart, and \eqn{\alpha}{alpha} is the normalising constant. The interaction parameter \eqn{\gamma}{gamma} may take any positive value (unlike the case for the Strauss process). If \eqn{\gamma < 1}{gamma < 1}, the model describes an ``ordered'' or ``inhibitive'' pattern. If \eqn{\gamma > 1}{gamma > 1}, the model is ``ordered'' or ``inhibitive'' up to the distance \eqn{h}, but has an ``attraction'' between points lying at distances in the range between \eqn{h} and \eqn{r}. If \eqn{\gamma = 1}{gamma = 1}, the process reduces to a classical hard core process with hard core distance \eqn{h}. If \eqn{\gamma = 0}{gamma = 0}, the process reduces to a classical hard core process with hard core distance \eqn{r}. The function \code{\link{ppm}()}, which fits point process models to point pattern data, requires an argument of class \code{"interact"} describing the interpoint interaction structure of the model to be fitted. The appropriate description of the Strauss/hard core process pairwise interaction is yielded by the function \code{StraussHard()}. See the examples below. The canonical parameter \eqn{\log(\gamma)}{log(gamma)} is estimated by \code{\link{ppm}()}, not fixed in \code{StraussHard()}. If the hard core distance argument \code{hc} is missing or \code{NA}, it will be estimated from the data when \code{\link{ppm}} is called. The estimated value of \code{hc} is the minimum nearest neighbour distance multiplied by \eqn{n/(n+1)}, where \eqn{n} is the number of data points. } \seealso{ \code{\link{ppm}}, \code{\link{pairwise.family}}, \code{\link{ppm.object}} } \references{ Baddeley, A. and Turner, R. (2000) Practical maximum pseudolikelihood for spatial point patterns. \emph{Australian and New Zealand Journal of Statistics} \bold{42}, 283--322. Ripley, B.D. (1981) \emph{Spatial statistics}. John Wiley and Sons. Strauss, D.J. (1975) A model for clustering. \emph{Biometrika} \bold{62}, 467--475. } \examples{ StraussHard(r=1,hc=0.02) # prints a sensible description of itself data(cells) \dontrun{ ppm(cells, ~1, StraussHard(r=0.1, hc=0.05)) # fit the stationary Strauss/hard core process to `cells' } ppm(cells, ~ polynom(x,y,3), StraussHard(r=0.1, hc=0.05)) # fit a nonstationary Strauss/hard core process # with log-cubic polynomial trend } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} spatstat/man/plot.mppm.Rd0000644000176200001440000000463313160710621015110 0ustar liggesusers\name{plot.mppm} \alias{plot.mppm} \title{plot a Fitted Multiple Point Process Model} \description{ Given a point process model fitted to multiple point patterns by \code{\link{mppm}}, compute spatial trend or conditional intensity surface of the model, in a form suitable for plotting, and (optionally) plot this surface. } \usage{ \method{plot}{mppm}(x, \dots, trend=TRUE, cif=FALSE, se=FALSE, how=c("image", "contour", "persp")) } \arguments{ \item{x}{ A point process model fitted to multiple point patterns, typically obtained from the model-fitting algorithm \code{\link{mppm}}. An object of class \code{"mppm"}. } \item{\dots}{ Arguments passed to \code{\link{plot.ppm}} or \code{\link{plot.anylist}} controlling the plot. } \item{trend}{ Logical value indicating whether to plot the fitted trend. } \item{cif}{ Logical value indicating whether to plot the fitted conditional intensity. } \item{se}{ Logical value indicating whether to plot the standard error of the fitted trend. } \item{how}{ Single character string indicating the style of plot to be performed. } } \value{ \code{NULL}. } \details{ This is the \code{plot} method for the class \code{"mppm"} of point process models fitted to multiple point patterns (see \code{\link{mppm}}). It invokes \code{\link{subfits}} to compute the fitted model for each individual point pattern dataset, then calls \code{\link{plot.ppm}} to plot these individual models. These individual plots are displayed using \code{\link{plot.anylist}}, which generates either a series of separate plot frames or an array of plot panels on a single page. } \seealso{ \code{\link{plot.ppm}}, \code{\link{mppm}}, \code{\link{plot.listof}} } \references{ Baddeley, A., Rubak, E. and Turner, R. (2015) \emph{Spatial Point Patterns: Methodology and Applications with R}. London: Chapman and Hall/CRC Press. } \examples{ # Synthetic data from known model n <- 9 H <- hyperframe(V=1:n, U=runif(n, min=-1, max=1)) H$Z <- setcov(square(1)) H$U <- with(H, as.im(U, as.rectangle(Z))) H$Y <- with(H, rpoispp(eval.im(exp(2+3*Z)))) fit <- mppm(Y ~Z + U + V, data=H) plot(fit) } \author{ \adrian, Ida-Maria Sintorn and Leanne Bischoff. Implemented by \adrian \rolf and \ege } \keyword{spatial} \keyword{hplot} \keyword{models} spatstat/man/is.im.Rd0000644000176200001440000000117313160710621014175 0ustar liggesusers\name{is.im} \alias{is.im} \title{Test Whether An Object Is A Pixel Image} \description{ Tests whether its argument is a pixel image (object of class \code{"im"}). } \usage{ is.im(x) } \arguments{ \item{x}{Any object.} } \details{ This function tests whether the argument \code{x} is a pixel image object of class \code{"im"}. For details of this class, see \code{\link{im.object}}. The object is determined to be an image if it inherits from class \code{"im"}. } \value{ \code{TRUE} if \code{x} is a pixel image, otherwise \code{FALSE}. } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/rmhstart.Rd0000644000176200001440000000703113160710621015021 0ustar liggesusers\name{rmhstart} \alias{rmhstart} \alias{rmhstart.default} \title{Determine Initial State for Metropolis-Hastings Simulation.} \description{ Builds a description of the initial state for the Metropolis-Hastings algorithm. } \usage{ rmhstart(start, \dots) \method{rmhstart}{default}(start=NULL, \dots, n.start=NULL, x.start=NULL) } \arguments{ \item{start}{An existing description of the initial state in some format. Incompatible with the arguments listed below. } \item{\dots}{There should be no other arguments.} \item{n.start}{ Number of initial points (to be randomly generated). Incompatible with \code{x.start}. } \item{x.start}{ Initial point pattern configuration. Incompatible with \code{n.start}. } } \value{ An object of class \code{"rmhstart"}, which is essentially a list of parameters describing the initial point pattern and (optionally) the initial state of the random number generator. There is a \code{print} method for this class, which prints a sensible description of the initial state. } \details{ Simulated realisations of many point process models can be generated using the Metropolis-Hastings algorithm implemented in \code{\link{rmh}}. This function \code{rmhstart} creates a full description of the initial state of the Metropolis-Hastings algorithm, \emph{including possibly the initial state of the random number generator}, for use in a subsequent call to \code{\link{rmh}}. It also checks that the initial state is valid. The initial state should be specified \bold{either} by the first argument \code{start} \bold{or} by the other arguments \code{n.start}, \code{x.start} etc. If \code{start} is a list, then it should have components named \code{n.start} or \code{x.start}, with the same interpretation as described below. The arguments are: \describe{ \item{n.start}{ The number of \dQuote{initial} points to be randomly (uniformly) generated in the simulation window \code{w}. Incompatible with \code{x.start}. For a multitype point process, \code{n.start} may be a vector (of length equal to the number of types) giving the number of points of each type to be generated. If expansion of the simulation window is selected (see the argument \code{expand} to \code{\link{rmhcontrol}}), then the actual number of starting points in the simulation will be \code{n.start} multiplied by the expansion factor (ratio of the areas of the expanded window and original window). For faster convergence of the Metropolis-Hastings algorithm, the value of \code{n.start} should be roughly equal to (an educated guess at) the expected number of points for the point process inside the window. } \item{x.start}{ Initial point pattern configuration. Incompatible with \code{n.start}. \code{x.start} may be a point pattern (an object of class \code{ppp}), or an object which can be coerced to this class by \code{\link{as.ppp}}, or a dataset containing vectors \code{x} and \code{y}. If \code{x.start} is specified, then expansion of the simulation window (the argument \code{expand} of \code{\link{rmhcontrol}}) is not permitted. } } The parameters \code{n.start} and \code{x.start} are \emph{incompatible}. } \seealso{ \code{\link{rmh}}, \code{\link{rmhcontrol}}, \code{\link{rmhmodel}} } \examples{ # 30 random points a <- rmhstart(n.start=30) # a particular point pattern data(cells) b <- rmhstart(x.start=cells) } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat/man/improve.kppm.Rd0000644000176200001440000001140713160710621015606 0ustar liggesusers\name{improve.kppm} \alias{improve.kppm} \title{Improve Intensity Estimate of Fitted Cluster Point Process Model} \description{ Update the fitted intensity of a fitted cluster point process model. } \usage{ improve.kppm(object, type=c("quasi", "wclik1", "clik1"), rmax = NULL, eps.rmax = 0.01, dimyx = 50, maxIter = 100, tolerance = 1e-06, fast = TRUE, vcov = FALSE, fast.vcov = FALSE, verbose = FALSE, save.internals = FALSE) } \arguments{ \item{object}{ Fitted cluster point process model (object of class \code{"kppm"}). } \item{type}{ A character string indicating the method of estimation. Current options are \code{"clik1"}, \code{"wclik1"} and \code{"quasi"} for, respectively, first order composite (Poisson) likelihood, weighted first order composite likelihood and quasi-likelihood. } \item{rmax}{ Optional. The dependence range. Not usually specified by the user. } \item{eps.rmax}{ Numeric. A small positive number which is used to determine \code{rmax} from the tail behaviour of the pair correlation function. Namely \code{rmax} is the smallest value of \eqn{r} at which \eqn{(g(r)-1)/(g(0)-1)} falls below \code{eps.rmax}. Ignored if \code{rmax} is provided. } \item{dimyx}{ Pixel array dimensions. See Details. } \item{maxIter}{ Integer. Maximum number of iterations of iterative weighted least squares (Fisher scoring). } \item{tolerance}{ Numeric. Tolerance value specifying when to stop iterative weighted least squares (Fisher scoring). } \item{fast}{ Logical value indicating whether tapering should be used to make the computations faster (requires the package \pkg{Matrix}). } \item{vcov}{ Logical value indicating whether to calculate the asymptotic variance covariance/matrix. } \item{fast.vcov}{ Logical value indicating whether tapering should be used for the variance/covariance matrix to make the computations faster (requires the package \pkg{Matrix}). Caution: This is expected to underestimate the true asymptotic variances/covariances. } \item{verbose}{ A logical indicating whether the details of computations should be printed. } \item{save.internals}{ A logical indicating whether internal quantities should be saved in the returned object (mostly for development purposes). } } \value{ A fitted cluster point process model of class \code{"kppm"}. } \details{ This function reestimates the intensity parameters in a fitted \code{"kppm"} object. If \code{type="clik1"} estimates are based on the first order composite (Poisson) likelihood, which ignores dependence between the points. Note that \code{type="clik1"} is mainly included for testing purposes and is not recommended for the typical user; instead the more efficient \code{\link{kppm}} with \code{improve.type="none"} should be used. When \code{type="quasi"} or \code{type="wclik1"} the dependence structure between the points is incorporated in the estimation procedure by using the estimated pair correlation function in the estimating equation. In all cases the estimating equation is based on dividing the observation window into small subregions and count the number of points in each subregion. To do this the observation window is first converted into a digital mask by \code{\link{as.mask}} where the resolution is controlled by the argument \code{dimyx}. The computational time grows with the cube of the number of subregions, so fine grids may take very long to compute (or even run out of memory). } \seealso{ \code{\link{ppm}}, \code{\link{kppm}}, \code{\link{improve.kppm}} } \references{ Waagepetersen, R. (2007) An estimating function approach to inference for inhomogeneous Neyman-Scott processes, \emph{Biometrics}, \bold{63}, 252-258. Guan, Y. and Shen, Y. (2010) A weighted estimating equation approach to inference for inhomogeneous spatial point processes, \emph{Biometrika}, \bold{97}, 867-880. Guan, Y., Jalilian, A. and Waagepetersen, R. (2015) Quasi-likelihood for spatial point processes. \emph{Journal of the Royal Statistical Society, Series B} \bold{77}, 677--697. } \examples{ # fit a Thomas process using minimum contrast estimation method # to model interaction between points of the pattern fit0 <- kppm(bei ~ elev + grad, data = bei.extra) # fit the log-linear intensity model with quasi-likelihood method fit1 <- improve.kppm(fit0, type="quasi") # compare coef(fit0) coef(fit1) } \author{Abdollah Jalilian \email{jalilian@razi.ac.ir} % \url{http://www.razi.ac.ir/ajalilian/} and Rasmus Waagepetersen \email{rw@math.aau.dk} adapted for \pkg{spatstat} by \adrian and \ege } \keyword{spatial} \keyword{fit model} spatstat/man/Kmeasure.Rd0000644000176200001440000001574313160710571014746 0ustar liggesusers\name{Kmeasure} \alias{Kmeasure} \title{Reduced Second Moment Measure} \description{ Estimates the reduced second moment measure \eqn{\kappa}{Kappa} from a point pattern in a window of arbitrary shape. } \usage{ Kmeasure(X, sigma, edge=TRUE, \dots, varcov=NULL) } \arguments{ \item{X}{The observed point pattern, from which an estimate of \eqn{\kappa}{Kappa} will be computed. An object of class \code{"ppp"}, or data in any format acceptable to \code{\link{as.ppp}()}. } \item{sigma}{ Standard deviation \eqn{\sigma}{sigma} of the Gaussian smoothing kernel. Incompatible with \code{varcov}. } \item{edge}{ Logical value indicating whether an edge correction should be applied. } \item{\dots}{ Arguments passed to \code{\link{as.mask}} controlling the pixel resolution. } \item{varcov}{ Variance-covariance matrix of the Gaussian smoothing kernel. Incompatible with \code{sigma}. } } \value{ A real-valued pixel image (an object of class \code{"im"}, see \code{\link{im.object}}) whose pixel values are estimates of the density of the reduced second moment measure at each location. } \details{ Given a point pattern dataset, this command computes an estimate of the reduced second moment measure \eqn{\kappa}{Kappa} of the point process. The result is a pixel image whose pixel values are estimates of the density of the reduced second moment measure. The reduced second moment measure \eqn{\kappa}{Kappa} can be regarded as a generalisation of the more familiar \eqn{K}-function. An estimate of \eqn{\kappa}{Kappa} derived from a spatial point pattern dataset can be useful in exploratory data analysis. Its advantage over the \eqn{K}-function is that it is also sensitive to anisotropy and directional effects. In a nutshell, the command \code{Kmeasure} computes a smoothed version of the \emph{Fry plot}. As explained under \code{\link{fryplot}}, the Fry plot is a scatterplot of the vectors joining all pairs of points in the pattern. The reduced second moment measure is (essentially) defined as the average of the Fry plot over different realisations of the point process. The command \code{Kmeasure} effectively smooths the Fry plot of a dataset to obtain an estimate of the reduced second moment measure. In formal terms, the reduced second moment measure \eqn{\kappa}{Kappa} of a stationary point process \eqn{X} is a measure defined on the two-dimensional plane such that, for a `typical' point \eqn{x} of the process, the expected number of other points \eqn{y} of the process such that the vector \eqn{y - x} lies in a region \eqn{A}, equals \eqn{\lambda \kappa(A)}{lambda * Kappa(A)}. Here \eqn{\lambda}{lambda} is the intensity of the process, i.e. the expected number of points of \eqn{X} per unit area. The \eqn{K}-function is a special case. The function value \eqn{K(t)} is the value of the reduced second moment measure for the disc of radius \eqn{t} centred at the origin; that is, \eqn{K(t) = \kappa(b(0,t))}{K(t) = Kappa(b(0,t))}. The command \code{Kmeasure} computes an estimate of \eqn{\kappa}{Kappa} from a point pattern dataset \code{X}, which is assumed to be a realisation of a stationary point process, observed inside a known, bounded window. Marks are ignored. The algorithm approximates the point pattern and its window by binary pixel images, introduces a Gaussian smoothing kernel and uses the Fast Fourier Transform \code{\link{fft}} to form a density estimate of \eqn{\kappa}{Kappa}. The calculation corresponds to the edge correction known as the ``translation correction''. The Gaussian smoothing kernel may be specified by either of the arguments \code{sigma} or \code{varcov}. If \code{sigma} is a single number, this specifies an isotropic Gaussian kernel with standard deviation \code{sigma} on each coordinate axis. If \code{sigma} is a vector of two numbers, this specifies a Gaussian kernel with standard deviation \code{sigma[1]} on the \eqn{x} axis, standard deviation \code{sigma[2]} on the \eqn{y} axis, and zero correlation between the \eqn{x} and \eqn{y} axes. If \code{varcov} is given, this specifies the variance-covariance matrix of the Gaussian kernel. There do not seem to be any well-established rules for selecting the smoothing kernel in this context. The density estimate of \eqn{\kappa}{Kappa} is returned in the form of a real-valued pixel image. Pixel values are estimates of the normalised second moment density at the centre of the pixel. (The uniform Poisson process would have values identically equal to \eqn{1}.) The image \code{x} and \code{y} coordinates are on the same scale as vector displacements in the original point pattern window. The point \code{x=0, y=0} corresponds to the `typical point'. A peak in the image near \code{(0,0)} suggests clustering; a dip in the image near \code{(0,0)} suggests inhibition; peaks or dips at other positions suggest possible periodicity. If desired, the value of \eqn{\kappa(A)}{Kappa(A)} for a region \eqn{A} can be estimated by computing the integral of the pixel image over the domain \eqn{A}, i.e.\ summing the pixel values and multiplying by pixel area, using \code{\link{integral.im}}. One possible application is to compute anisotropic counterparts of the \eqn{K}-function (in which the disc of radius \eqn{t} is replaced by another shape). See Examples. } \section{Warning}{ Some writers use the term \emph{reduced second moment measure} when they mean the \eqn{K}-function. This has caused confusion. As originally defined, the reduced second moment measure is a measure, obtained by modifying the second moment measure, while the \eqn{K}-function is a function obtained by evaluating this measure for discs of increasing radius. In \pkg{spatstat}, the \eqn{K}-function is computed by \code{\link{Kest}} and the reduced second moment measure is computed by \code{Kmeasure}. } \references{ Stoyan, D, Kendall, W.S. and Mecke, J. (1995) \emph{Stochastic geometry and its applications}. 2nd edition. Springer Verlag. Stoyan, D. and Stoyan, H. (1994) Fractals, random shapes and point fields: methods of geometrical statistics. John Wiley and Sons. } \seealso{ \code{\link{Kest}}, \code{\link{fryplot}}, \code{\link{spatstat.options}}, \code{\link{integral.im}}, \code{\link{im.object}} } \examples{ data(cells) plot(Kmeasure(cells, 0.05)) # shows pronounced dip around origin consistent with strong inhibition data(redwood) plot(Kmeasure(redwood, 0.03), col=grey(seq(1,0,length=32))) # shows peaks at several places, reflecting clustering and ?periodicity M <- Kmeasure(cells, 0.05) # evaluate measure on a sector W <- Window(M) ang <- as.im(atan2, W) rad <- as.im(function(x,y){sqrt(x^2+y^2)}, W) sector <- solutionset(ang > 0 & ang < 1 & rad < 0.6) integral.im(M[sector, drop=FALSE]) } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat/man/Extract.influence.ppm.Rd0000644000176200001440000000340613160710621017333 0ustar liggesusers\name{Extract.influence.ppm} \alias{[.influence.ppm} \title{Extract Subset of Influence Object} \description{ Extract a subset of an influence object, or extract the influence values at specified locations. } \usage{ \method{[}{influence.ppm}(x, i, ...) } \arguments{ \item{x}{ A influence object (of class \code{"influence.ppm"}) computed by \code{\link{influence.ppm}}. } \item{i}{ Subset index (passed to \code{\link{[.ppp}}). Either a spatial window (object of class \code{"owin"}) or an integer index. } \item{\dots}{ Ignored. } } \value{ Another object of class \code{"influence.ppm"}. } \details{ An object of class \code{"influence.ppm"} contains the values of the likelihood influence for a point process model, computed by \code{\link{influence.ppm}}. This is effectively a marked point pattern obtained by marking each of the original data points with its likelihood influence. This function extracts a designated subset of the influence values, either as another influence object, or as a vector of numeric values. The function \code{[.influence.ppm} is a method for \code{\link{[}} for the class \code{"influence.ppm"}. The argument \code{i} should be an index applicable to a point pattern. It may be either a spatial window (object of class \code{"owin"}) or a sequence index. The result will be another influence object (of class \code{influence.ppm}). To extract the influence values as a numeric vector, use \code{marks(as.ppp(x))}. } \seealso{ \code{\link{influence.ppm}}. } \examples{ fit <- ppm(cells, ~x) infl <- influence(fit) b <- owin(c(0.1, 0.3), c(0.2, 0.4)) infl[b] infl[1:5] marks(as.ppp(infl))[1:3] } \author{ \spatstatAuthors } \keyword{spatial} \keyword{manip} spatstat/man/convexhull.Rd0000644000176200001440000000153413160710571015352 0ustar liggesusers\name{convexhull} \alias{convexhull} \title{Convex Hull} \description{ Computes the convex hull of a spatial object. } \usage{ convexhull(x) } \arguments{ \item{x}{ a window (object of class \code{"owin"}), a point pattern (object of class \code{"ppp"}), a line segment pattern (object of class \code{"psp"}), or an object that can be converted to a window by \code{\link{as.owin}}. } } \value{ A window (an object of class \code{"owin"}). } \details{ This function computes the convex hull of the spatial object \code{x}. } \seealso{ \code{\link{owin}}, \code{\link{convexhull.xy}}, \code{\link{is.convex}} } \examples{ data(demopat) W <- Window(demopat) plot(convexhull(W), col="lightblue", border=NA) plot(W, add=TRUE, lwd=2) } \author{\adrian and \rolf } \keyword{spatial} \keyword{utilities} spatstat/man/with.msr.Rd0000644000176200001440000000526113160710621014733 0ustar liggesusers\name{with.msr} \alias{with.msr} \title{Evaluate Expression Involving Components of a Measure} \description{ An expression involving the names of components of a measure is evaluated. } \usage{ \method{with}{msr}(data, expr, \dots) } \arguments{ \item{data}{ A measure (object of class \code{"msr"}). } \item{expr}{ An expression to be evaluated. } \item{\dots}{ Ignored. } } \details{ This is a method for the generic function \code{\link[base]{with}} for the class \code{"msr"}. The argument \code{data} should be an object of class \code{"msr"} representing a measure (a function which assigns a value to each subset of two-dimensional space). This function can be used to extract the components of the measure, or to perform more complicated manipulations of the components. The argument \code{expr} should be an un-evaluated expression in the \R language. The expression may involve any of the variable names listed below with their corresponding meanings. \tabular{ll}{ \code{qlocations} \tab (point pattern) all quadrature locations \cr \code{qweights} \tab (numeric) all quadrature weights \cr \code{density} \tab (numeric) density value at each quadrature point \cr \code{discrete} \tab (numeric) discrete mass at each quadrature point \cr \code{continuous} \tab (numeric) increment of continuous component \cr \code{increment} \tab (numeric) increment of measure \cr \code{is.atom} \tab (logical) whether quadrature point is an atom \cr \code{atoms} \tab (point pattern) locations of atoms \cr \code{atommass} \tab (numeric) massess of atoms } The measure is the sum of discrete and continuous components. The discrete component assigns non-zero mass to several points called atoms. The continuous component has a density which should be integrated over a region to determine the value for that region. An object of class \code{"msr"} approximates the continuous component by a sum over quadrature points. The quadrature points are chosen so that they include the atoms of the measure. In the list above, we have \code{increment = continuous + discrete}, \code{continuous = density * qweights}, \code{is.atom = (discrete > 0)}, \code{atoms = qlocations[is.atom]} and \code{atommass = discrete[is.atom]}. } \value{ The result of evaluating the expression could be an object of any kind. } \author{ \spatstatAuthors. } \seealso{ \code{\link{msr}}, \code{\link{split.msr}} } \examples{ X <- rpoispp(function(x,y) { exp(3+3*x) }) fit <- ppm(X, ~x+y) rp <- residuals(fit, type="pearson") with(rp, atoms) with(rp, qlocations \%mark\% continuous) } \keyword{spatial} \keyword{manip} spatstat/man/rescale.im.Rd0000644000176200001440000000345513160710621015205 0ustar liggesusers\name{rescale.im} \alias{rescale.im} \title{Convert Pixel Image to Another Unit of Length} \description{ Converts a pixel image to another unit of length. } \usage{ \method{rescale}{im}(X, s, unitname) } \arguments{ \item{X}{Pixel image (object of class \code{"im"}).} \item{s}{Conversion factor: the new units are \code{s} times the old units.} \item{unitname}{ Optional. New name for the unit of length. See \code{\link{unitname}}. } } \value{ Another pixel image (of class \code{"im"}), containing the same pixel values, but with pixel coordinates expressed in the new units. } \details{ This is a method for the generic function \code{\link{rescale}}. The spatial coordinates of the pixels in \code{X} will be re-expressed in terms of a new unit of length that is \code{s} times the current unit of length given in \code{X}. (Thus, the coordinate values are \emph{divided} by \code{s}, while the unit value is multiplied by \code{s}). If \code{s} is missing, then the coordinates will be re-expressed in \sQuote{native} units; for example if the current unit is equal to 0.1 metres, then the coordinates will be re-expressed in metres. The result is a pixel image representing the \emph{same} data but re-expressed in a different unit. Pixel values are unchanged. This may not be what you intended! } \seealso{ \code{\link{im}}, \code{\link{rescale}}, \code{\link{unitname}}, \code{\link{eval.im}} } \examples{ # Bramble Canes data: 1 unit = 9 metres data(bramblecanes) # distance transform Z <- distmap(bramblecanes) # convert to metres # first alter the pixel values Zm <- eval.im(9 * Z) # now rescale the pixel coordinates Z <- rescale(Zm, 1/9) # or equivalently Z <- rescale(Zm) } \author{\adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/crossdist.ppx.Rd0000644000176200001440000000335413160710571016010 0ustar liggesusers\name{crossdist.ppx} \alias{crossdist.ppx} \title{Pairwise Distances Between Two Different Multi-Dimensional Point Patterns} \description{ Computes the distances between pairs of points taken from two different multi-dimensional point patterns. } \usage{ \method{crossdist}{ppx}(X, Y, \dots) } \arguments{ \item{X,Y}{ Multi-dimensional point patterns (objects of class \code{"ppx"}). } \item{\dots}{ Arguments passed to \code{\link{coords.ppx}} to determine which coordinates should be used. } } \value{ A matrix whose \code{[i,j]} entry is the distance from the \code{i}-th point in \code{X} to the \code{j}-th point in \code{Y}. } \details{ Given two point patterns in multi-dimensional space, this function computes the Euclidean distance from each point in the first pattern to each point in the second pattern, and returns a matrix containing these distances. This is a method for the generic function \code{\link{crossdist}} for three-dimensional point patterns (objects of class \code{"ppx"}). This function expects two multidimensional point patterns \code{X} and \code{Y}, and returns the matrix whose \code{[i,j]} entry is the distance from \code{X[i]} to \code{Y[j]}. By default, both spatial and temporal coordinates are extracted. To obtain the spatial distance between points in a space-time point pattern, set \code{temporal=FALSE}. } \seealso{ \code{\link{crossdist}}, \code{\link{pairdist}}, \code{\link{nndist}} } \examples{ df <- data.frame(x=runif(3),y=runif(3),z=runif(3),w=runif(3)) X <- ppx(data=df) df <- data.frame(x=runif(5),y=runif(5),z=runif(5),w=runif(5)) Y <- ppx(data=df) d <- crossdist(X, Y) } \author{ \adrian } \keyword{spatial} \keyword{math} spatstat/man/psstG.Rd0000644000176200001440000001207013160710621014254 0ustar liggesusers\name{psstG} \Rdversion{1.1} \alias{psstG} \title{ Pseudoscore Diagnostic For Fitted Model against Saturation Alternative } \description{ Given a point process model fitted to a point pattern dataset, this function computes the pseudoscore diagnostic of goodness-of-fit for the model, against moderately clustered or moderately inhibited alternatives of saturation type. } \usage{ psstG(object, r = NULL, breaks = NULL, \dots, model=NULL, trend = ~1, interaction = Poisson(), rbord = reach(interaction), truecoef = NULL, hi.res = NULL) } \arguments{ \item{object}{ Object to be analysed. Either a fitted point process model (object of class \code{"ppm"}) or a point pattern (object of class \code{"ppp"}) or quadrature scheme (object of class \code{"quad"}). } \item{r}{ Optional. Vector of values of the argument \eqn{r} at which the diagnostic should be computed. This argument is usually not specified. There is a sensible default. } \item{breaks}{ Optional alternative to \code{r} for advanced use. } \item{\dots}{ Ignored. } \item{model}{ Optional. A fitted point process model (object of class \code{"ppm"}) to be re-fitted to the data using \code{\link{update.ppm}}, if \code{object} is a point pattern. Overrides the arguments \code{trend,interaction,rbord,ppmcorrection}. } \item{trend,interaction,rbord}{ Optional. Arguments passed to \code{\link{ppm}} to fit a point process model to the data, if \code{object} is a point pattern. See \code{\link{ppm}} for details. } \item{truecoef}{ Optional. Numeric vector. If present, this will be treated as if it were the true coefficient vector of the point process model, in calculating the diagnostic. Incompatible with \code{hi.res}. } \item{hi.res}{ Optional. List of parameters passed to \code{\link{quadscheme}}. If this argument is present, the model will be re-fitted at high resolution as specified by these parameters. The coefficients of the resulting fitted model will be taken as the true coefficients. Then the diagnostic will be computed for the default quadrature scheme, but using the high resolution coefficients. } } \details{ This function computes the pseudoscore test statistic which can be used as a diagnostic for goodness-of-fit of a fitted point process model. Consider a point process model fitted to \eqn{x}, with conditional intensity \eqn{\lambda(u,x)}{lambda(u,x)} at location \eqn{u}. For the purpose of testing goodness-of-fit, we regard the fitted model as the null hypothesis. The alternative hypothesis is a family of hybrid models obtained by combining the fitted model with the Geyer saturation process (see \code{\link{Geyer}}) with saturation parameter 1. The family of alternatives includes models that are more regular than the fitted model, and others that are more clustered than the fitted model. For any point pattern \eqn{x}, and any \eqn{r > 0}, let \eqn{S(x,r)} be the number of points in \eqn{x} whose nearest neighbour (the nearest other point in \eqn{x}) is closer than \eqn{r} units. Then the pseudoscore for the null model is \deqn{ V(r) = \sum_i \Delta S(x_i, x, r ) - \int_W \Delta S(u,x,r) \lambda(u,x) {\rm d} u }{ V(r) = sum( Delta S(x[i], x, r)) - integral( Delta S(u,x, r) lambda(u,x) du) } where the \eqn{\Delta}{Delta} operator is \deqn{ \Delta S(u,x,r) = S(x\cup\{u\}, r) - S(x\setminus u, r) }{ Delta S(u,x, r) = S(x union u, r) - S(x setminus u, r) } the difference between the values of \eqn{S} for the point pattern with and without the point \eqn{u}. According to the Georgii-Nguyen-Zessin formula, \eqn{V(r)} should have mean zero if the model is correct (ignoring the fact that the parameters of the model have been estimated). Hence \eqn{V(r)} can be used as a diagnostic for goodness-of-fit. The diagnostic \eqn{V(r)} is also called the \bold{pseudoresidual} of \eqn{S}. On the right hand side of the equation for \eqn{V(r)} given above, the sum over points of \eqn{x} is called the \bold{pseudosum} and the integral is called the \bold{pseudocompensator}. } \value{ A function value table (object of class \code{"fv"}), essentially a data frame of function values. Columns in this data frame include \code{dat} for the pseudosum, \code{com} for the compensator and \code{res} for the pseudoresidual. There is a plot method for this class. See \code{\link{fv.object}}. } \references{ Baddeley, A., Rubak, E. and \ifelse{latex}{\out{M\o ller}}{Moller}, J. (2011) Score, pseudo-score and residual diagnostics for spatial point process models. \emph{Statistical Science} \bold{26}, 613--646. } \author{ \adrian \ege and Jesper \ifelse{latex}{\out{M\o ller}}{Moller}. } \seealso{ Alternative functions: \code{\link{psstA}}, \code{\link{psst}}, \code{\link{Kres}}, \code{\link{Gres}}. } \examples{ X <- rStrauss(200,0.1,0.05) plot(psstG(X)) plot(psstG(X, interaction=Strauss(0.05))) } \keyword{spatial} \keyword{models} spatstat/man/intensity.ppx.Rd0000644000176200001440000000175013160710621016013 0ustar liggesusers\name{intensity.ppx} \alias{intensity.ppx} \title{Intensity of a Multidimensional Space-Time Point Pattern} \description{ Calculates the intensity of points in a multi-dimensional point pattern of class \code{"ppx"} or \code{"pp3"}. } \usage{ \method{intensity}{ppx}(X, \dots) } \arguments{ \item{X}{Point pattern of class \code{"ppx"} or \code{"pp3"}.} \item{\dots}{Ignored.} } \value{ A single number or a numeric vector. } \details{ This is a method for the generic function \code{\link{intensity}}. It computes the empirical intensity of a multi-dimensional point pattern (object of class \code{"ppx"} including \code{"pp3"}), i.e. the average density of points per unit volume. If the point pattern is multitype, the intensities of the different types are computed separately. } \author{ \adrian \rolf and \ege } \examples{ X <- osteo$pts[[1]] intensity(X) marks(X) <- factor(sample(letters[1:3], npoints(X), replace=TRUE)) intensity(X) } spatstat/man/connected.Rd0000644000176200001440000000766113160710571015134 0ustar liggesusers\name{connected} \Rdversion{1.1} \alias{connected} \alias{connected.im} \alias{connected.owin} \title{ Connected components } \description{ Finds the topologically-connected components of a spatial object, such as the connected clumps of pixels in a binary image. } \usage{ connected(X, \dots) \method{connected}{owin}(X, \dots, method="C") \method{connected}{im}(X, \dots, background = NA, method="C") } \arguments{ \item{X}{ A spatial object such as a pixel image (object of class \code{"im"}) or a window (object of class \code{"owin"}). } \item{background}{ Optional. Treat pixels with this value as being part of the background. } \item{method}{ String indicating the algorithm to be used. Either \code{"C"} or \code{"interpreted"}. See Details. } \item{\dots}{ Arguments passed to \code{\link{as.mask}} to determine the pixel resolution. } } \details{ The function \code{connected} is generic, with methods for pixel images (class \code{"im"}) and windows (class \code{"owin"}) described here. There is also a method for point patterns described in \code{\link{connected.ppp}}. The functions described here compute the connected component transform (Rosenfeld and Pfalz, 1966) of a binary image or binary mask. The argument \code{X} is first converted into a pixel image with logical values. Then the algorithm identifies the connected components (topologically-connected clumps of pixels) in the foreground. Two pixels belong to the same connected component if they have the value \code{TRUE} and if they are neighbours (in the 8-connected sense). This rule is applied repeatedly until it terminates. Then each connected component contains all the pixels that can be reached by stepping from neighbour to neighbour. If \code{method="C"}, the computation is performed by a compiled C language implementation of the classical algorithm of Rosenfeld and Pfalz (1966). If \code{method="interpreted"}, the computation is performed by an \R implementation of the algorithm of Park et al (2000). The result is a factor-valued image, with levels that correspond to the connected components. The Examples show how to extract each connected component as a separate window object. } \value{ A pixel image (object of class \code{"im"}) with factor values. The levels of the factor correspond to the connected components. } \references{ Park, J.-M., Looney, C.G. and Chen, H.-C. (2000) Fast connected component labeling algorithm using a divide and conquer technique. Pages 373-376 in S.Y. Shin (ed) \emph{Computers and Their Applications:} Proceedings of the ISCA 15th International Conference on Computers and Their Applications, March 29-31, 2000, New Orleans, Louisiana USA. ISCA 2000, ISBN 1-880843-32-3. Rosenfeld, A. and Pfalz, J.L. (1966) Sequential operations in digital processing. \emph{Journal of the Association for Computing Machinery} \bold{13} 471-494. } \seealso{ \code{\link{connected.ppp}}, \code{\link{im.object}}, \code{\link{tess}} } \section{Warnings}{ It may be hard to distinguish different components in the default plot because the colours of nearby components may be very similar. See the Examples for a randomised colour map. The algorithm for \code{method="interpreted"} can be very slow for large images (or images where the connected components include a large number of pixels). } \examples{ d <- distmap(cells, dimyx=256) X <- levelset(d, 0.07) plot(X) Z <- connected(X) plot(Z) # or equivalently Z <- connected(d <= 0.07) # number of components nc <- length(levels(Z)) # plot with randomised colour map plot(Z, col=hsv(h=sample(seq(0,1,length=nc), nc))) # how to extract the components as a list of windows W <- tiles(tess(image=Z)) } \author{ Original \R code by Julian Burgos, University of Washington. Adapted for \pkg{spatstat} by \adrian and \rolf. } \keyword{spatial} \keyword{math} spatstat/man/pairdist.psp.Rd0000644000176200001440000000436313160710621015602 0ustar liggesusers\name{pairdist.psp} \alias{pairdist.psp} \title{Pairwise distances between line segments} \description{ Computes the matrix of distances between all pairs of line segments in a line segment pattern. } \usage{ \method{pairdist}{psp}(X, \dots, method="C", type="Hausdorff") } \arguments{ \item{X}{ A line segment pattern (object of class \code{"psp"}). } \item{\dots}{ Ignored. } \item{method}{ String specifying which method of calculation to use. Values are \code{"C"} and \code{"interpreted"}. Usually not specified. } \item{type}{ Type of distance to be computed. Options are \code{"Hausdorff"} and \code{"separation"}. Partial matching is used. } } \value{ A square matrix whose \code{[i,j]} entry is the distance between the line segments numbered \code{i} and \code{j}. } \details{ This function computes the distance between each pair of line segments in \code{X}, and returns the matrix of distances. This is a method for the generic function \code{\link{pairdist}} for the class \code{"psp"}. The distances between line segments are measured in one of two ways: \itemize{ \item if \code{type="Hausdorff"}, distances are computed in the Hausdorff metric. The Hausdorff distance between two line segments is the \emph{maximum} distance from any point on one of the segments to the nearest point on the other segment. \item if \code{type="separation"}, distances are computed as the \emph{minimum} distance from a point on one line segment to a point on the other line segment. For example, line segments which cross over each other have separation zero. } The argument \code{method} is not normally used. It is retained only for checking the validity of the software. If \code{method = "interpreted"} then the distances are computed using interpreted \R code only. If \code{method="C"} (the default) then compiled \code{C} code is used, which is somewhat faster. } \seealso{ \code{\link{crossdist}}, \code{\link{nndist}}, \code{\link{pairdist.ppp}} } \examples{ L <- psp(runif(10), runif(10), runif(10), runif(10), owin()) D <- pairdist(L) S <- pairdist(L, type="sep") } \author{ \adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/Extract.ppx.Rd0000644000176200001440000000417113160710621015377 0ustar liggesusers\name{Extract.ppx} \alias{[.ppx} \title{Extract Subset of Multidimensional Point Pattern} \description{ Extract a subset of a multidimensional point pattern. } \usage{ \method{[}{ppx}(x, i, drop=FALSE, ...) } \arguments{ \item{x}{ A multidimensional point pattern (object of class \code{"ppx"}). } \item{i}{ Subset index. A valid subset index in the usual \R sense, indicating which points should be retained; or a spatial domain of class \code{"boxx"} or \code{"box3"}. } \item{drop}{ Logical value indicating whether to remove unused levels of the marks, if the marks are a factor. } \item{\dots}{ Ignored. } } \value{ A multidimensional point pattern (of class \code{"ppx"}). } \details{ This function extracts a designated subset of a multidimensional point pattern. The function \code{[.ppx} is a method for \code{\link{[}} for the class \code{"ppx"}. It extracts a designated subset of a point pattern. The argument \code{i} may be either \itemize{ \item a subset index in the usual \R sense: either a numeric vector of positive indices (identifying the points to be retained), a numeric vector of negative indices (identifying the points to be deleted) or a logical vector of length equal to the number of points in the point pattern \code{x}. In the latter case, the points \code{(x$x[i], x$y[i])} for which \code{subset[i]=TRUE} will be retained, and the others will be deleted. \item a spatial domain of class \code{"boxx"} or \code{"box3"}. Points falling inside this region will be retained. } The argument \code{drop} determines whether to remove unused levels of a factor, if the point pattern is multitype (i.e. the marks are a factor) or if the marks are a data frame or hyperframe in which some of the columns are factors. Use the function \code{\link{unmark}} to remove marks from a marked point pattern. } \seealso{ \code{\link{ppx}} } \examples{ df <- data.frame(x=runif(4),y=runif(4),z=runif(4)) X <- ppx(data=df, coord.type=c("s","s","t")) X[-2] } \author{ \spatstatAuthors } \keyword{spatial} \keyword{manip} spatstat/man/applynbd.Rd0000644000176200001440000002003513160710571014771 0ustar liggesusers\name{applynbd} \alias{applynbd} \title{Apply Function to Every Neighbourhood in a Point Pattern} \description{ Visit each point in a point pattern, find the neighbouring points, and apply a given function to them. } \usage{ applynbd(X, FUN, N=NULL, R=NULL, criterion=NULL, exclude=FALSE, \dots) } \arguments{ \item{X}{ Point pattern. An object of class \code{"ppp"}, or data which can be converted into this format by \code{\link{as.ppp}}. } \item{FUN}{ Function to be applied to each neighbourhood. The arguments of \code{FUN} are described under \bold{Details}. } \item{N}{ Integer. If this argument is present, the neighbourhood of a point of \code{X} is defined to consist of the \code{N} points of \code{X} which are closest to it. } \item{R}{ Nonnegative numeric value. If this argument is present, the neighbourhood of a point of \code{X} is defined to consist of all points of \code{X} which lie within a distance \code{R} of it. } \item{criterion}{ Function. If this argument is present, the neighbourhood of a point of \code{X} is determined by evaluating this function. See under \bold{Details}. } \item{exclude}{ Logical. If \code{TRUE} then the point currently being visited is excluded from its own neighbourhood. } \item{\dots}{ extra arguments passed to the function \code{FUN}. They must be given in the form \code{name=value}. } } \value{ Similar to the result of \code{\link{apply}}. If each call to \code{FUN} returns a single numeric value, the result is a vector of dimension \code{npoints(X)}, the number of points in \code{X}. If each call to \code{FUN} returns a vector of the same length \code{m}, then the result is a matrix of dimensions \code{c(m,n)}; note the transposition of the indices, as usual for the family of \code{apply} functions. If the calls to \code{FUN} return vectors of different lengths, the result is a list of length \code{npoints(X)}. } \details{ This is an analogue of \code{\link{apply}} for point patterns. It visits each point in the point pattern \code{X}, determines which points of \code{X} are ``neighbours'' of the current point, applies the function \code{FUN} to this neighbourhood, and collects the values returned by \code{FUN}. The definition of ``neighbours'' depends on the arguments \code{N}, \code{R} and \code{criterion}. Also the argument \code{exclude} determines whether the current point is excluded from its own neighbourhood. \itemize{ \item If \code{N} is given, then the neighbours of the current point are the \code{N} points of \code{X} which are closest to the current point (including the current point itself unless \code{exclude=TRUE}). \item If \code{R} is given, then the neighbourhood of the current point consists of all points of \code{X} which lie closer than a distance \code{R} from the current point. \item If \code{criterion} is given, then it must be a function with two arguments \code{dist} and \code{drank} which will be vectors of equal length. The interpretation is that \code{dist[i]} will be the distance of a point from the current point, and \code{drank[i]} will be the rank of that distance (the three points closest to the current point will have rank 1, 2 and 3). This function must return a logical vector of the same length as \code{dist} and \code{drank} whose \code{i}-th entry is \code{TRUE} if the corresponding point should be included in the neighbourhood. See the examples below. \item If more than one of the arguments \code{N}, \code{R} and \code{criterion} is given, the neighbourhood is defined as the \emph{intersection} of the neighbourhoods specified by these arguments. For example if \code{N=3} and \code{R=5} then the neighbourhood is formed by finding the 3 nearest neighbours of current point, and retaining only those neighbours which lie closer than 5 units from the current point. } When \code{applynbd} is executed, each point of \code{X} is visited, and the following happens for each point: \itemize{ \item the neighbourhood of the current point is determined according to the chosen rule, and stored as a point pattern \code{Y}; \item the function \code{FUN} is called as: \code{FUN(Y=Y, current=current, dists=dists, dranks=dranks, \dots)} where \code{current} is the location of the current point (in a format explained below), \code{dists} is a vector of distances from the current point to each of the points in \code{Y}, \code{dranks} is a vector of the ranks of these distances with respect to the full point pattern \code{X}, and \code{\dots} are the arguments passed from the call to \code{applynbd}; \item The result of the call to \code{FUN} is stored. } The results of each call to \code{FUN} are collected and returned according to the usual rules for \code{\link{apply}} and its relatives. See the \bold{Value} section of this help file. The format of the argument \code{current} is as follows. If \code{X} is an unmarked point pattern, then \code{current} is a vector of length 2 containing the coordinates of the current point. If \code{X} is marked, then \code{current} is a point pattern containing exactly one point, so that \code{current$x} is its \eqn{x}-coordinate and \code{current$marks} is its mark value. In either case, the coordinates of the current point can be referred to as \code{current$x} and \code{current$y}. Note that \code{FUN} will be called exactly as described above, with each argument named explicitly. Care is required when writing the function \code{FUN} to ensure that the arguments will match up. See the Examples. See \code{\link{markstat}} for a common use of this function. To simply tabulate the marks in every \code{R}-neighbourhood, use \code{\link{marktable}}. } \seealso{ \code{\link{ppp.object}}, \code{\link{apply}}, \code{\link{markstat}}, \code{\link{marktable}} } \examples{ redwood # count the number of points within radius 0.2 of each point of X nneighbours <- applynbd(redwood, R=0.2, function(Y, ...){npoints(Y)-1}) # equivalent to: nneighbours <- applynbd(redwood, R=0.2, function(Y, ...){npoints(Y)}, exclude=TRUE) # compute the distance to the second nearest neighbour of each point secondnndist <- applynbd(redwood, N = 2, function(dists, ...){max(dists)}, exclude=TRUE) # marked point pattern trees <- longleaf \testonly{ # smaller dataset trees <- trees[seq(1, npoints(trees), by=80)] } # compute the median of the marks of all neighbours of a point # (see also 'markstat') dbh.med <- applynbd(trees, R=90, exclude=TRUE, function(Y, ...) { median(marks(Y))}) # ANIMATION explaining the definition of the K function # (arguments `fullpicture' and 'rad' are passed to FUN) if(interactive()) { showoffK <- function(Y, current, dists, dranks, fullpicture,rad) { plot(fullpicture, main="") points(Y, cex=2) ux <- current[["x"]] uy <- current[["y"]] points(ux, uy, pch="+",cex=3) theta <- seq(0,2*pi,length=100) polygon(ux + rad * cos(theta), uy+rad*sin(theta)) text(ux + rad/3, uy + rad/2,npoints(Y),cex=3) if(interactive()) Sys.sleep(if(runif(1) < 0.1) 1.5 else 0.3) return(npoints(Y)) } applynbd(redwood, R=0.2, showoffK, fullpicture=redwood, rad=0.2, exclude=TRUE) # animation explaining the definition of the G function showoffG <- function(Y, current, dists, dranks, fullpicture) { plot(fullpicture, main="") points(Y, cex=2) u <- current points(u[1],u[2],pch="+",cex=3) v <- c(Y$x[1],Y$y[1]) segments(u[1],u[2],v[1],v[2],lwd=2) w <- (u + v)/2 nnd <- dists[1] text(w[1],w[2],round(nnd,3),cex=2) if(interactive()) Sys.sleep(if(runif(1) < 0.1) 1.5 else 0.3) return(nnd) } applynbd(cells, N=1, showoffG, exclude=TRUE, fullpicture=cells) } } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{programming} \keyword{iteration} spatstat/man/ragsAreaInter.Rd0000644000176200001440000000565213160710621015713 0ustar liggesusers\name{ragsAreaInter} \alias{ragsAreaInter} \title{ Alternating Gibbs Sampler for Area-Interaction Process } \description{ Generate a realisation of the area-interaction process using the alternating Gibbs sampler. Applies only when the interaction parameter \eqn{eta} is greater than 1. } \usage{ ragsAreaInter(beta, eta, r, \dots, win = NULL, bmax = NULL, periodic = FALSE, ncycles = 100) } \arguments{ \item{beta}{ First order trend. A number, a pixel image (object of class \code{"im"}), or a \code{function(x,y)}. } \item{eta}{ Interaction parameter (canonical form) as described in the help for \code{\link{AreaInter}}. A number greater than 1. } \item{r}{ Disc radius in the model. A number greater than 1. } \item{\dots}{ Additional arguments for \code{beta} if it is a function. } \item{win}{ Simulation window. An object of class \code{"owin"}. (Ignored if \code{beta} is a pixel image.) } \item{bmax}{ Optional. The maximum possible value of \code{beta}, or a number larger than this. } \item{periodic}{ Logical value indicating whether to treat opposite sides of the simulation window as being the same, so that points close to one side may interact with points close to the opposite side. Feasible only when the window is a rectangle. } \item{ncycles}{ Number of cycles of the alternating Gibbs sampler to be performed. } } \details{ This function generates a simulated realisation of the area-interaction process (see \code{\link{AreaInter}}) using the alternating Gibbs sampler (see \code{\link{rags}}). It exploits a mathematical relationship between the (unmarked) area-interaction process and the two-type hard core process (Baddeley and Van Lieshout, 1995; Widom and Rowlinson, 1970). This relationship only holds when the interaction parameter \code{eta} is greater than 1 so that the area-interaction process is clustered. The parameters \code{beta,eta} are the canonical parameters described in the help for \code{\link{AreaInter}}. The first order trend \code{beta} may be a constant, a function, or a pixel image. The simulation window is determined by \code{beta} if it is a pixel image, and otherwise by the argument \code{win} (the default is the unit square). } \value{ A point pattern (object of class \code{"ppp"}). } \references{ Baddeley, A.J. and Van Lieshout, M.N.M. (1995). Area-interaction point processes. \emph{Annals of the Institute of Statistical Mathematics} \bold{47} (1995) 601--619. Widom, B. and Rowlinson, J.S. (1970). New model for the study of liquid-vapor phase transitions. \emph{The Journal of Chemical Physics} \bold{52} (1970) 1670--1684. } \author{ \adrian. } \seealso{ \code{\link{rags}}, \code{\link{ragsMultiHard}} \code{\link{AreaInter}} } \examples{ plot(ragsAreaInter(100, 2, 0.07, ncycles=15)) } \keyword{spatial} \keyword{datagen} spatstat/man/setcov.Rd0000644000176200001440000000361313160710621014462 0ustar liggesusers\name{setcov} \alias{setcov} \title{Set Covariance of a Window} \description{ Computes the set covariance function of a window. } \usage{ setcov(W, V=W, \dots) } \arguments{ \item{W}{ A window (object of class \code{"owin"}. } \item{V}{ Optional. Another window. } \item{\dots}{ Optional arguments passed to \code{\link{as.mask}} to control the pixel resolution. } } \value{ A pixel image (an object of class \code{"im"}) representing the set covariance function of \code{W}, or the cross-covariance of \code{W} and \code{V}. } \details{ The set covariance function of a region \eqn{W} in the plane is the function \eqn{C(v)} defined for each vector \eqn{v} as the area of the intersection between \eqn{W} and \eqn{W+v}, where \eqn{W+v} is the set obtained by shifting (translating) \eqn{W} by \eqn{v}. We may interpret \eqn{C(v)} as the area of the set of all points \eqn{x} in \eqn{W} such that \eqn{x+v} also lies in \eqn{W}. This command computes a discretised approximation to the set covariance function of any plane region \eqn{W} represented as a window object (of class \code{"owin"}, see \code{\link{owin.object}}). The return value is a pixel image (object of class \code{"im"}) whose greyscale values are values of the set covariance function. The set covariance is computed using the Fast Fourier Transform, unless \code{W} is a rectangle, when an exact formula is used. If the argument \code{V} is present, then \code{setcov(W,V)} computes the set \emph{cross-covariance} function \eqn{C(x)} defined for each vector \eqn{x} as the area of the intersection between \eqn{W} and \eqn{V+x}. } \seealso{ \code{\link{imcov}}, \code{\link{owin}}, \code{\link{as.owin}}, \code{\link{erosion}} } \examples{ w <- owin(c(0,1),c(0,1)) v <- setcov(w) plot(v) } \author{\adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/methods.box3.Rd0000644000176200001440000000254113160710621015473 0ustar liggesusers\name{methods.box3} \Rdversion{1.1} \alias{methods.box3} %DoNotExport \alias{print.box3} \alias{unitname.box3} \alias{unitname<-.box3} \title{ Methods for Three-Dimensional Box } \description{ Methods for class \code{"box3"}. } \usage{ \method{print}{box3}(x, ...) \method{unitname}{box3}(x) \method{unitname}{box3}(x) <- value } \arguments{ \item{x}{ Object of class \code{"box3"} representing a three-dimensional box. } \item{\dots}{ Other arguments passed to \code{print.default}. } \item{value}{ Name of the unit of length. See \code{\link{unitname}}. } } \details{ These are methods for the generic functions \code{\link{print}} and \code{\link{unitname}} for the class \code{"box3"} of three-dimensional boxes. The \code{print} method prints a description of the box, while the \code{unitname} method extracts the name of the unit of length in which the box coordinates are expressed. } \value{ For \code{print.box3} the value is \code{NULL}. For \code{unitname.box3} an object of class \code{"units"}. } \author{\adrian and \rolf } \seealso{ \code{\link{box3}}, \code{\link{print}}, \code{\link{unitname}} } \examples{ X <- box3(c(0,10),c(0,10),c(0,5), unitname=c("metre", "metres")) X unitname(X) # Northern European usage unitname(X) <- "meter" } \keyword{spatial} \keyword{methods} spatstat/man/by.ppp.Rd0000644000176200001440000000522713160710571014376 0ustar liggesusers\name{by.ppp} \alias{by.ppp} \title{Apply a Function to a Point Pattern Broken Down by Factor} \description{ Splits a point pattern into sub-patterns, and applies the function to each sub-pattern. } \usage{ \method{by}{ppp}(data, INDICES=marks(data), FUN, ...) } \arguments{ \item{data}{Point pattern (object of class \code{"ppp"}).} \item{INDICES}{Grouping variable. Either a factor, a pixel image with factor values, or a tessellation.} \item{FUN}{Function to be applied to subsets of \code{data}.} \item{\dots}{Additional arguments to \code{FUN}.} } \details{ This is a method for the generic function \code{\link{by}} for point patterns (class \code{"ppp"}). The point pattern \code{data} is first divided into subsets according to \code{INDICES}. Then the function \code{FUN} is applied to each subset. The results of each computation are returned in a list. The argument \code{INDICES} may be \itemize{ \item a factor, of length equal to the number of points in \code{data}. The levels of \code{INDICES} determine the destination of each point in \code{data}. The \code{i}th point of \code{data} will be placed in the sub-pattern \code{split.ppp(data)$l} where \code{l = f[i]}. \item a pixel image (object of class \code{"im"}) with factor values. The pixel value of \code{INDICES} at each point of \code{data} will be used as the classifying variable. \item a tessellation (object of class \code{"tess"}). Each point of \code{data} will be classified according to the tile of the tessellation into which it falls. } If \code{INDICES} is missing, then \code{data} must be a multitype point pattern (a marked point pattern whose marks vector is a factor). Then the effect is that the points of each type are separated into different point patterns. } \value{ A list (also of class \code{"anylist"} or \code{"solist"} as appropriate) containing the results returned from \code{FUN} for each of the subpatterns. } \seealso{ \code{\link{ppp}}, \code{\link{split.ppp}}, \code{\link{cut.ppp}}, \code{\link{tess}}, \code{\link{im}}. } \examples{ # multitype point pattern, broken down by type data(amacrine) by(amacrine, FUN=density) by(amacrine, FUN=function(x) { min(nndist(x)) } ) # how to pass additional arguments to FUN by(amacrine, FUN=clarkevans, correction=c("Donnelly","cdf")) # point pattern broken down by tessellation data(swedishpines) tes <- quadrats(swedishpines, 5, 5) B <- by(swedishpines, tes, clarkevans, correction="Donnelly") unlist(lapply(B, as.numeric)) } \author{\adrian and \rolf } \keyword{spatial} \keyword{methods} \keyword{manip} spatstat/man/Jinhom.Rd0000644000176200001440000001502313160710571014405 0ustar liggesusers\name{Jinhom} \alias{Jinhom} \title{ Inhomogeneous J-function } \description{ Estimates the inhomogeneous \eqn{J} function of a non-stationary point pattern. } \usage{ Jinhom(X, lambda = NULL, lmin = NULL, ..., sigma = NULL, varcov = NULL, r = NULL, breaks = NULL, update = TRUE) } \arguments{ \item{X}{ The observed data point pattern, from which an estimate of the inhomogeneous \eqn{J} function will be computed. An object of class \code{"ppp"} or in a format recognised by \code{\link{as.ppp}()} } \item{lambda}{ Optional. Values of the estimated intensity function. Either a vector giving the intensity values at the points of the pattern \code{X}, a pixel image (object of class \code{"im"}) giving the intensity values at all locations, a fitted point process model (object of class \code{"ppm"} or \code{"kppm"}) or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. } \item{lmin}{ Optional. The minimum possible value of the intensity over the spatial domain. A positive numerical value. } \item{sigma,varcov}{ Optional arguments passed to \code{\link{density.ppp}} to control the smoothing bandwidth, when \code{lambda} is estimated by kernel smoothing. } \item{\dots}{ Extra arguments passed to \code{\link{as.mask}} to control the pixel resolution, or passed to \code{\link{density.ppp}} to control the smoothing bandwidth. } \item{r}{ vector of values for the argument \eqn{r} at which the inhomogeneous \eqn{K} function should be evaluated. Not normally given by the user; there is a sensible default. } \item{breaks}{ This argument is for internal use only. } \item{update}{ Logical. If \code{lambda} is a fitted model (class \code{"ppm"} or \code{"kppm"}) and \code{update=TRUE} (the default), the model will first be refitted to the data \code{X} (using \code{\link{update.ppm}} or \code{\link{update.kppm}}) before the fitted intensity is computed. If \code{update=FALSE}, the fitted intensity of the model will be computed without fitting it to \code{X}. } } \details{ This command computes estimates of the inhomogeneous \eqn{J}-function (Van Lieshout, 2010) of a point pattern. It is the counterpart, for inhomogeneous spatial point patterns, of the \eqn{J} function for homogeneous point patterns computed by \code{\link{Jest}}. The argument \code{X} should be a point pattern (object of class \code{"ppp"}). The inhomogeneous \eqn{J} function is computed as \eqn{Jinhom(r) = (1 - Ginhom(r))/(1-Finhom(r))} where \eqn{Ginhom, Finhom} are the inhomogeneous \eqn{G} and \eqn{F} functions computed using the border correction (equations (7) and (6) respectively in Van Lieshout, 2010). The argument \code{lambda} should supply the (estimated) values of the intensity function \eqn{\lambda}{lambda} of the point process. It may be either \describe{ \item{a numeric vector}{ containing the values of the intensity function at the points of the pattern \code{X}. } \item{a pixel image}{ (object of class \code{"im"}) assumed to contain the values of the intensity function at all locations in the window. } \item{a fitted point process model}{ (object of class \code{"ppm"} or \code{"kppm"}) whose fitted \emph{trend} can be used as the fitted intensity. (If \code{update=TRUE} the model will first be refitted to the data \code{X} before the trend is computed.) } \item{a function}{ which can be evaluated to give values of the intensity at any locations. } \item{omitted:}{ if \code{lambda} is omitted, then it will be estimated using a `leave-one-out' kernel smoother. } } If \code{lambda} is a numeric vector, then its length should be equal to the number of points in the pattern \code{X}. The value \code{lambda[i]} is assumed to be the the (estimated) value of the intensity \eqn{\lambda(x_i)}{lambda(x[i])} for the point \eqn{x_i}{x[i]} of the pattern \eqn{X}. Each value must be a positive number; \code{NA}'s are not allowed. If \code{lambda} is a pixel image, the domain of the image should cover the entire window of the point pattern. If it does not (which may occur near the boundary because of discretisation error), then the missing pixel values will be obtained by applying a Gaussian blur to \code{lambda} using \code{\link{blur}}, then looking up the values of this blurred image for the missing locations. (A warning will be issued in this case.) If \code{lambda} is a function, then it will be evaluated in the form \code{lambda(x,y)} where \code{x} and \code{y} are vectors of coordinates of the points of \code{X}. It should return a numeric vector with length equal to the number of points in \code{X}. If \code{lambda} is omitted, then it will be estimated using a `leave-one-out' kernel smoother, as described in Baddeley, \ifelse{latex}{\out{M\o ller}}{Moller} and Waagepetersen (2000). The estimate \code{lambda[i]} for the point \code{X[i]} is computed by removing \code{X[i]} from the point pattern, applying kernel smoothing to the remaining points using \code{\link{density.ppp}}, and evaluating the smoothed intensity at the point \code{X[i]}. The smoothing kernel bandwidth is controlled by the arguments \code{sigma} and \code{varcov}, which are passed to \code{\link{density.ppp}} along with any extra arguments. } \value{ An object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. } \references{ Baddeley, A., \ifelse{latex}{\out{M\o ller}}{Moller}, J. and Waagepetersen, R. (2000) Non- and semiparametric estimation of interaction in inhomogeneous point patterns. \emph{Statistica Neerlandica} \bold{54}, 329--350. van Lieshout, M.N.M. and Baddeley, A.J. (1996) A nonparametric measure of spatial interaction in point patterns. \emph{Statistica Neerlandica} \bold{50}, 344--361. van Lieshout, M.N.M. (2010) A J-function for inhomogeneous point processes. \emph{Statistica Neerlandica} \bold{65}, 183--201. } \seealso{ \code{\link{Ginhom}}, \code{\link{Finhom}}, \code{\link{Jest}} } \examples{ \dontrun{ plot(Jinhom(swedishpines, sigma=bw.diggle, adjust=2)) } plot(Jinhom(swedishpines, sigma=10)) } \author{ Original code by Marie-Colette van Lieshout. C implementation and R adaptation by \adrian and \ege. } \keyword{spatial} \keyword{nonparametric} spatstat/man/lgcp.estpcf.Rd0000644000176200001440000002045313160710621015370 0ustar liggesusers\name{lgcp.estpcf} \alias{lgcp.estpcf} \title{Fit a Log-Gaussian Cox Point Process by Minimum Contrast} \description{ Fits a log-Gaussian Cox point process model to a point pattern dataset by the Method of Minimum Contrast using the pair correlation function. } \usage{ lgcp.estpcf(X, startpar=c(var=1,scale=1), covmodel=list(model="exponential"), lambda=NULL, q = 1/4, p = 2, rmin = NULL, rmax = NULL, ..., pcfargs=list()) } \arguments{ \item{X}{ Data to which the model will be fitted. Either a point pattern or a summary statistic. See Details. } \item{startpar}{ Vector of starting values for the parameters of the log-Gaussian Cox process model. } \item{covmodel}{ Specification of the covariance model for the log-Gaussian field. See Details. } \item{lambda}{ Optional. An estimate of the intensity of the point process. } \item{q,p}{ Optional. Exponents for the contrast criterion. } \item{rmin, rmax}{ Optional. The interval of \eqn{r} values for the contrast criterion. } \item{\dots}{ Optional arguments passed to \code{\link[stats]{optim}} to control the optimisation algorithm. See Details. } \item{pcfargs}{ Optional list containing arguments passed to \code{\link{pcf.ppp}} to control the smoothing in the estimation of the pair correlation function. } } \details{ This algorithm fits a log-Gaussian Cox point process (LGCP) model to a point pattern dataset by the Method of Minimum Contrast, using the estimated pair correlation function of the point pattern. The shape of the covariance of the LGCP must be specified: the default is the exponential covariance function, but other covariance models can be selected. The argument \code{X} can be either \describe{ \item{a point pattern:}{An object of class \code{"ppp"} representing a point pattern dataset. The pair correlation function of the point pattern will be computed using \code{\link{pcf}}, and the method of minimum contrast will be applied to this. } \item{a summary statistic:}{An object of class \code{"fv"} containing the values of a summary statistic, computed for a point pattern dataset. The summary statistic should be the pair correlation function, and this object should have been obtained by a call to \code{\link{pcf}} or one of its relatives. } } The algorithm fits a log-Gaussian Cox point process (LGCP) model to \code{X}, by finding the parameters of the LGCP model which give the closest match between the theoretical pair correlation function of the LGCP model and the observed pair correlation function. For a more detailed explanation of the Method of Minimum Contrast, see \code{\link{mincontrast}}. The model fitted is a stationary, isotropic log-Gaussian Cox process (\ifelse{latex}{\out{M\o ller}}{Moller} and Waagepetersen, 2003, pp. 72-76). To define this process we start with a stationary Gaussian random field \eqn{Z} in the two-dimensional plane, with constant mean \eqn{\mu}{mu} and covariance function \eqn{C(r)}. Given \eqn{Z}, we generate a Poisson point process \eqn{Y} with intensity function \eqn{\lambda(u) = \exp(Z(u))}{lambda(u) = exp(Z(u))} at location \eqn{u}. Then \eqn{Y} is a log-Gaussian Cox process. The theoretical pair correlation function of the LGCP is \deqn{ g(r) = \exp(C(s)) }{ g(r) = exp(C(s)) } The intensity of the LGCP is \deqn{ \lambda = \exp(\mu + \frac{C(0)}{2}). }{ lambda= exp(mu + C(0)/2). } The covariance function \eqn{C(r)} takes the form \deqn{ C(r) = \sigma^2 c(r/\alpha) }{ C(r) = sigma^2 * c(-r/alpha) } where \eqn{\sigma^2}{sigma^2} and \eqn{\alpha}{alpha} are parameters controlling the strength and the scale of autocorrelation, respectively, and \eqn{c(r)} is a known covariance function determining the shape of the covariance. The strength and scale parameters \eqn{\sigma^2}{sigma^2} and \eqn{\alpha}{alpha} will be estimated by the algorithm. The template covariance function \eqn{c(r)} must be specified as explained below. In this algorithm, the Method of Minimum Contrast is first used to find optimal values of the parameters \eqn{\sigma^2}{sigma^2} and \eqn{\alpha}{alpha^2}. Then the remaining parameter \eqn{\mu}{mu} is inferred from the estimated intensity \eqn{\lambda}{lambda}. The template covariance function \eqn{c(r)} is specified using the argument \code{covmodel}. This should be of the form \code{list(model="modelname", \dots)} where \code{modelname} is a string identifying the template model as explained below, and \code{\dots} are optional arguments of the form \code{tag=value} giving the values of parameters controlling the \emph{shape} of the template model. The default is the exponential covariance \eqn{c(r) = e^{-r}}{c(r) = e^(-r)} so that the scaled covariance is \deqn{ C(r) = \sigma^2 e^{-r/\alpha}. }{ C(r) = sigma^2 * exp(-r/alpha). } To determine the template model, the string \code{"modelname"} will be prefixed by \code{"RM"} and the code will search for a function of this name in the \pkg{RandomFields} package. For a list of available models see \code{\link[RandomFields]{RMmodel}} in the \pkg{RandomFields} package. For example the Matern covariance with exponent \eqn{\nu=0.3}{nu = 0.3} is specified by \code{covmodel=list(model="matern", nu=0.3)} corresponding to the function \code{RMmatern} in the \pkg{RandomFields} package. If the argument \code{lambda} is provided, then this is used as the value of \eqn{\lambda}{lambda}. Otherwise, if \code{X} is a point pattern, then \eqn{\lambda}{lambda} will be estimated from \code{X}. If \code{X} is a summary statistic and \code{lambda} is missing, then the intensity \eqn{\lambda}{lambda} cannot be estimated, and the parameter \eqn{\mu}{mu} will be returned as \code{NA}. The remaining arguments \code{rmin,rmax,q,p} control the method of minimum contrast; see \code{\link{mincontrast}}. The optimisation algorithm can be controlled through the additional arguments \code{"..."} which are passed to the optimisation function \code{\link[stats]{optim}}. For example, to constrain the parameter values to a certain range, use the argument \code{method="L-BFGS-B"} to select an optimisation algorithm that respects box constraints, and use the arguments \code{lower} and \code{upper} to specify (vectors of) minimum and maximum values for each parameter. } \value{ An object of class \code{"minconfit"}. There are methods for printing and plotting this object. It contains the following main components: \item{par }{Vector of fitted parameter values.} \item{fit }{Function value table (object of class \code{"fv"}) containing the observed values of the summary statistic (\code{observed}) and the theoretical values of the summary statistic computed from the fitted model parameters. } } \references{ \ifelse{latex}{\out{M\o ller}}{Moller}, J., Syversveen, A. and Waagepetersen, R. (1998) Log Gaussian Cox Processes. \emph{Scandinavian Journal of Statistics} \bold{25}, 451--482. \ifelse{latex}{\out{M\o ller}}{Moller}, J. and Waagepetersen, R. (2003). Statistical Inference and Simulation for Spatial Point Processes. Chapman and Hall/CRC, Boca Raton. Waagepetersen, R. (2007) An estimating function approach to inference for inhomogeneous Neyman-Scott processes. \emph{Biometrics} \bold{63}, 252--258. } \author{ \adrian with modifications by Shen Guochun and Rasmus Waagepetersen \email{rw@math.auc.dk} and \ege. } \seealso{ \code{\link{lgcp.estK}} for alternative method of fitting LGCP. \code{\link{matclust.estpcf}}, \code{\link{thomas.estpcf}} for other models. \code{\link{mincontrast}} for the generic minimum contrast fitting algorithm, including important parameters that affect the accuracy of the fit. \code{\link[RandomFields]{RMmodel}} in the \pkg{RandomFields} package, for covariance function models. \code{\link{pcf}} for the pair correlation function. } \examples{ data(redwood) u <- lgcp.estpcf(redwood, c(var=1, scale=0.1)) u plot(u) if(require(RandomFields)) { lgcp.estpcf(redwood, covmodel=list(model="matern", nu=0.3)) } } \keyword{spatial} \keyword{models} spatstat/man/runifpointOnLines.Rd0000644000176200001440000000347713160710621016654 0ustar liggesusers\name{runifpointOnLines} \alias{runifpointOnLines} \title{Generate N Uniform Random Points On Line Segments} \description{ Given a line segment pattern, generate a random point pattern consisting of \code{n} points uniformly distributed on the line segments. } \usage{ runifpointOnLines(n, L, nsim=1) } \arguments{ \item{n}{Number of points to generate.} \item{L}{Line segment pattern (object of class \code{"psp"}) on which the points should lie. } \item{nsim}{Number of simulated realisations to be generated.} } \details{ This command generates a point pattern consisting of \code{n} independent random points, each point uniformly distributed on the line segment pattern. This means that, for each random point, \itemize{ \item the probability of falling on a particular segment is proportional to the length of the segment; and \item given that the point falls on a particular segment, it has uniform probability density along that segment. } If \code{n} is a single integer, the result is an unmarked point pattern containing \code{n} points. If \code{n} is a vector of integers, the result is a marked point pattern, with \code{m} different types of points, where \code{m = length(n)}, in which there are \code{n[j]} points of type \code{j}. } \value{ If \code{nsim = 1}, a point pattern (object of class \code{"ppp"}) with the same window as \code{L}. If \code{nsim > 1}, a list of point patterns. } \seealso{ \code{\link{psp}}, \code{\link{ppp}}, \code{\link{pointsOnLines}}, \code{\link{runifpoint}} } \examples{ X <- psp(runif(10), runif(10), runif(10), runif(10), window=owin()) Y <- runifpointOnLines(20, X) plot(X, main="") plot(Y, add=TRUE) Z <- runifpointOnLines(c(5,5), X) } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat/man/model.depends.Rd0000644000176200001440000000672013160710621015702 0ustar liggesusers\name{model.depends} \alias{model.depends} \alias{model.is.additive} \alias{model.covariates} \alias{has.offset.term} \alias{has.offset} \title{ Identify Covariates Involved in each Model Term } \description{ Given a fitted model (of any kind), identify which of the covariates is involved in each term of the model. } \usage{ model.depends(object) model.is.additive(object) model.covariates(object, fitted=TRUE, offset=TRUE) has.offset.term(object) has.offset(object) } \arguments{ \item{object}{ A fitted model of any kind. } \item{fitted,offset}{ Logical values determining which type of covariates to include. } } \details{ The \code{object} can be a fitted model of any kind, including models of the classes \code{\link{lm}}, \code{\link{glm}} and \code{\link{ppm}}. To be precise, \code{object} must belong to a class for which there are methods for \code{\link{formula}}, \code{\link{terms}} and \code{\link{model.matrix}}. The command \code{model.depends} determines the relationship between the original covariates (the data supplied when \code{object} was fitted) and the canonical covariates (the columns of the design matrix). It returns a logical matrix, with one row for each canonical covariate, and one column for each of the original covariates, with the \code{i,j} entry equal to \code{TRUE} if the \code{i}th canonical covariate depends on the \code{j}th original covariate. If the model formula of \code{object} includes offset terms (see \code{\link{offset}}), then the return value of \code{model.depends} also has an attribute \code{"offset"}. This is a logical value or matrix with one row for each offset term and one column for each of the original covariates, with the \code{i,j} entry equal to \code{TRUE} if the \code{i}th offset term depends on the \code{j}th original covariate. The command \code{model.covariates} returns a character vector containing the names of all (original) covariates that were actually used to fit the model. By default, this includes all covariates that appear in the model formula, including offset terms as well as canonical covariate terms. To omit the offset terms, set \code{offset=FALSE}. To omit the canonical covariate terms, set \code{fitted=FALSE}. The command \code{model.is.additive} determines whether the model is additive, in the sense that there is no canonical covariate that depends on two or more original covariates. It returns a logical value. The command \code{has.offset.term} is a faster way to determine whether the model \emph{formula} includes an \code{offset} term. The functions \code{model.depends} and \code{has.offset.term} only detect \code{offset} terms which are present in the model formula. They do not detect numerical offsets in the model object, that were inserted using the \code{offset} argument in \code{lm}, \code{glm} etc. To detect the presence of offsets of both kinds, use \code{has.offset}. } \value{ A logical value or matrix. } \seealso{ \code{\link{ppm}}, \code{\link{model.matrix}} } \examples{ x <- 1:10 y <- 3*x + 2 z <- rep(c(-1,1), 5) fit <- lm(y ~ poly(x,2) + sin(z)) model.depends(fit) model.covariates(fit) model.is.additive(fit) fitoff1 <- lm(y ~ x + offset(z)) fitoff2 <- lm(y ~ x, offset=z) has.offset.term(fitoff1) has.offset(fitoff1) has.offset.term(fitoff2) has.offset(fitoff2) } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} spatstat/man/markcrosscorr.Rd0000644000176200001440000000730113160710621016047 0ustar liggesusers\name{markcrosscorr} \alias{markcrosscorr} \title{ Mark Cross-Correlation Function } \description{ Given a spatial point pattern with several columns of marks, this function computes the mark correlation function between each pair of columns of marks. } \usage{ markcrosscorr(X, r = NULL, correction = c("isotropic", "Ripley", "translate"), method = "density", \dots, normalise = TRUE, Xname = NULL) } \arguments{ \item{X}{The observed point pattern. An object of class \code{"ppp"} or something acceptable to \code{\link{as.ppp}}. } \item{r}{Optional. Numeric vector. The values of the argument \eqn{r} at which the mark correlation function \eqn{k_f(r)}{k[f](r)} should be evaluated. There is a sensible default. } \item{correction}{ A character vector containing any selection of the options \code{"isotropic"}, \code{"Ripley"}, \code{"translate"}, \code{"translation"}, \code{"none"} or \code{"best"}. It specifies the edge correction(s) to be applied. Alternatively \code{correction="all"} selects all options. } \item{method}{ A character vector indicating the user's choice of density estimation technique to be used. Options are \code{"density"}, \code{"loess"}, \code{"sm"} and \code{"smrep"}. } \item{\dots}{ Arguments passed to the density estimation routine (\code{\link{density}}, \code{\link{loess}} or \code{sm.density}) selected by \code{method}. } \item{normalise}{ If \code{normalise=FALSE}, compute only the numerator of the expression for the mark correlation. } \item{Xname}{ Optional character string name for the dataset \code{X}. } } \details{ First, all columns of marks are converted to numerical values. A factor with \eqn{m} possible levels is converted to \eqn{m} columns of dummy (indicator) values. Next, each pair of columns is considered, and the mark cross-correlation is defined as \deqn{ k_{mm}(r) = \frac{E_{0u}[M_i(0) M_j(u)]}{E[M_i,M_j]} }{ k[mm](r) = E[0u](M(i,0) * M(j,u))/E(Mi * Mj) } where \eqn{E_{0u}}{E[0u]} denotes the conditional expectation given that there are points of the process at the locations \eqn{0} and \eqn{u} separated by a distance \eqn{r}. On the numerator, \eqn{M_i(0)}{M(i,0)} and \eqn{M_j(u)}{M(j,u)} are the marks attached to locations \eqn{0} and \eqn{u} respectively in the \eqn{i}th and \eqn{j}th columns of marks respectively. On the denominator, \eqn{M_i}{Mi} and \eqn{M_j}{Mj} are independent random values drawn from the \eqn{i}th and \eqn{j}th columns of marks, respectively, and \eqn{E} is the usual expectation. Note that \eqn{k_{mm}(r)}{k[mm](r)} is not a ``correlation'' in the usual statistical sense. It can take any nonnegative real value. The value 1 suggests ``lack of correlation'': if the marks attached to the points of \code{X} are independent and identically distributed, then \eqn{k_{mm}(r) \equiv 1}{k[mm](r) = 1}. The argument \code{X} must be a point pattern (object of class \code{"ppp"}) or any data that are acceptable to \code{\link{as.ppp}}. It must be a marked point pattern. The cross-correlations are estimated in the same manner as for \code{\link{markcorr}}. } \value{ A function array (object of class \code{"fasp"}) containing the mark cross-correlation functions for each possible pair of columns of marks. } \author{ \adrian \rolf and \ege } \seealso{ \code{\link{markcorr}} } \examples{ # The dataset 'betacells' has two columns of marks: # 'type' (factor) # 'area' (numeric) if(interactive()) plot(betacells) plot(markcrosscorr(betacells)) } \keyword{spatial} \keyword{nonparametric} spatstat/man/Frame.Rd0000644000176200001440000000305113160710571014211 0ustar liggesusers\name{Frame} \alias{Frame} \alias{Frame<-} \alias{Frame.default} \alias{Frame<-.owin} \alias{Frame<-.ppp} \alias{Frame<-.im} \title{ Extract or Change the Containing Rectangle of a Spatial Object } \description{ Given a spatial object (such as a point pattern or pixel image) in two dimensions, these functions extract or change the containing rectangle inside which the object is defined. } \usage{ Frame(X) \method{Frame}{default}(X) Frame(X) <- value \method{Frame}{owin}(X) <- value \method{Frame}{ppp}(X) <- value \method{Frame}{im}(X) <- value } \arguments{ \item{X}{ A spatial object such as a point pattern, line segment pattern or pixel image. } \item{value}{ A rectangular window (object of class \code{"owin"} of type \code{"rectangle"}) to be used as the new containing rectangle for \code{X}. } } \details{ The functions \code{Frame} and \code{Frame<-} are generic. \code{Frame(X)} extracts the rectangle inside which \code{X} is defined. \code{Frame(X) <- R} changes the rectangle inside which \code{X} is defined to the new rectangle \code{R}. } \value{ The result of \code{Frame} is a rectangular window (object of class \code{"owin"} of type \code{"rectangle"}). The result of \code{Frame<-} is the updated object \code{X}, of the same class as \code{X}. } \author{\adrian \rolf and \ege } \seealso{ \code{\link{Window}} } \examples{ Frame(cells) X <- demopat Frame(X) Frame(X) <- owin(c(0, 11000), c(400, 8000)) } \keyword{spatial} \keyword{manip} spatstat/man/dppapproxkernel.Rd0000644000176200001440000000120613160710571016375 0ustar liggesusers\name{dppapproxkernel} \alias{dppapproxkernel} \title{Approximate Determinantal Point Process Kernel} \description{ Returns an approximation to the kernel of a determinantal point process, as a function of one argument \eqn{x}. } \usage{dppapproxkernel(model, trunc = 0.99, W = NULL)} \arguments{ \item{model}{Object of class \code{"detpointprocfamily"}.} \item{trunc}{Numeric specifying how the model truncation is performed. See Details section of \code{\link{simulate.detpointprocfamily}}. } \item{W}{Optional window -- undocumented at the moment.} } \value{A function} \author{ \adrian \rolf and \ege } spatstat/man/pixellate.psp.Rd0000644000176200001440000000467213160710621015755 0ustar liggesusers\name{pixellate.psp} \alias{pixellate.psp} \title{ Convert Line Segment Pattern to Pixel Image } \description{ Converts a line segment pattern to a pixel image by measuring the length or number of lines intersecting each pixel. } \usage{ \method{pixellate}{psp}(x, W=NULL, ..., weights = NULL, what=c("length", "number")) } \arguments{ \item{x}{ Line segment pattern (object of class \code{"psp"}). } \item{W}{ Optional window (object of class \code{"owin"}) determining the pixel resolution. } \item{\dots}{ Optional arguments passed to \code{\link{as.mask}} to determine the pixel resolution. } \item{weights}{ Optional vector of weights associated with each line segment. } \item{what}{ String (partially matched) indicating whether to compute the total length of intersection (\code{what="length"}, the default) or the total number of segments intersecting each pixel (\code{what="number"}). } } \details{ This function converts a line segment pattern to a pixel image by computing, for each pixel, the total length of intersection between the pixel and the line segments. Alternatively it can count the number of line segments intersecting each pixel. This is a method for the generic function \code{\link{pixellate}} for the class of line segment patterns. The pixel raster is determined by \code{W} and the optional arguments \code{\dots}. If \code{W} is missing or \code{NULL}, it defaults to the window containing \code{x}. Then \code{W} is converted to a binary pixel mask using \code{\link{as.mask}}. The arguments \code{\dots} are passed to \code{\link{as.mask}} to control the pixel resolution. If \code{weights} are given, then the length of the intersection between line segment \code{i} and pixel \code{j} is multiplied by \code{weights[i]} before the lengths are summed for each pixel. } \value{ A pixel image (object of class \code{"im"}) with numeric values. } \seealso{ \code{\link{pixellate}}, \code{\link{as.mask}}, \code{\link{as.mask.psp}}. Use \code{\link{as.mask.psp}} if you only want to know which pixels are intersected by lines. } \examples{ X <- psp(runif(10),runif(10), runif(10), runif(10), window=owin()) plot(pixellate(X)) plot(X, add=TRUE) sum(lengths.psp(X)) sum(pixellate(X)) plot(pixellate(X, what="n")) } \author{\adrian \rolf and \ege } \keyword{spatial} \keyword{manip} spatstat/man/methods.ssf.Rd0000644000176200001440000000524713160710621015421 0ustar liggesusers\name{methods.ssf} \alias{methods.ssf} %DoNotExport \alias{marks.ssf} \alias{marks<-.ssf} \alias{unmark.ssf} \alias{as.im.ssf} \alias{as.function.ssf} \alias{as.ppp.ssf} \alias{print.ssf} \alias{range.ssf} \alias{min.ssf} \alias{max.ssf} \alias{integral.ssf} \title{Methods for Spatially Sampled Functions} \description{ Methods for various generic commands, for the class \code{"ssf"} of spatially sampled functions. } \usage{ \method{marks}{ssf}(x, \dots) \method{marks}{ssf}(x, \dots) <- value \method{unmark}{ssf}(X) \method{as.im}{ssf}(X, \dots) \method{as.function}{ssf}(x, \dots) \method{as.ppp}{ssf}(X, \dots) \method{print}{ssf}(x, \dots, brief=FALSE) \method{range}{ssf}(x, \dots) \method{min}{ssf}(x, \dots) \method{max}{ssf}(x, \dots) \method{integral}{ssf}(f, domain=NULL, ..., weights=attr(f, "weights")) } \arguments{ \item{x,X,f}{ A spatially sampled function (object of class \code{"ssf"}). } \item{\dots}{Arguments passed to the default method.} \item{brief}{Logical value controlling the amount of detail printed.} \item{value}{Matrix of replacement values for the function.} \item{domain}{Optional. Domain of integration. An object of class\code{"owin"}. } \item{weights}{ Optional. Numeric vector of weights associated with the sample points. } } \value{ \code{marks} returns a matrix. \code{marks(x) <- value} returns an object of class \code{"ssf"}. \code{as.owin} returns a window (object of class \code{"owin"}). \code{as.ppp} and \code{unmark} return a point pattern (object of class \code{"ppp"}). \code{as.function} returns a \code{function(x,y)} of class \code{"funxy"}. \code{print} returns \code{NULL}. \code{range} returns a numeric vector of length 2. \code{min} and \code{max} return a single numeric value. \code{integral} returns a numeric value (if \code{x} had numeric values) or a numeric vector (if \code{x} had vector values). } \details{ An object of class \code{"ssf"} represents a function (real- or vector-valued) that has been sampled at a finite set of points. The commands documented here are methods for this class, for the generic commands \code{\link[spatstat]{marks}}, \code{\link[spatstat]{marks<-}}, \code{\link[spatstat]{unmark}}, \code{\link[spatstat]{as.im}}, \code{\link{as.function}}, \code{\link[spatstat]{as.ppp}}, \code{\link{print}}, \code{\link{range}}, \code{\link{min}}, \code{\link{max}} and \code{\link[spatstat]{integral}}. } \seealso{ \code{\link{ssf}} } \examples{ X <- cells[1:4] f <- ssf(X, nndist(X, k=1:3)) f marks(f) as.ppp(f) as.im(f) } \author{Adrian Baddeley} \keyword{spatial} \keyword{methods} spatstat/man/studpermu.test.Rd0000644000176200001440000001004313160710621016160 0ustar liggesusers\name{studpermu.test} \alias{studpermu.test} \title{ Studentised Permutation Test } \description{ Perform a studentised permutation test for a difference between groups of point patterns. } \usage{ studpermu.test(X, formula, summaryfunction = Kest, \dots, rinterval = NULL, nperm = 999, use.Tbar = FALSE, minpoints = 20, rsteps = 128, r = NULL, arguments.in.data = FALSE) } \arguments{ \item{X}{ Data. Either a \code{hyperframe} or a list of lists of point patterns. } \item{formula}{ Formula describing the grouping, when \code{X} is a hyperframe. The left side of the formula identifies which column of \code{X} contains the point patterns. The right side identifies the grouping factor. If the formula is missing, the grouping variable is taken to be the first column of \code{X} that contains a factor, and the point patterns are taken from the first column that contains point patterns. } \item{summaryfunction}{ Summary function applicable to point patterns. } \item{\dots}{ Additional arguments passed to \code{summaryfunction}. } \item{rinterval}{ Interval of distance values \eqn{r} over which the summary function should be evaluated and over which the test statistic will be integrated. If \code{NULL}, the default range of the summary statistic is used (taking the intersection of these ranges over all patterns). } \item{nperm}{ Number of random permutations for the test. } \item{use.Tbar}{ Logical value indicating choice of test statistic. If \code{TRUE}, use the alternative test statistic, which is appropriate for summary functions with roughly constant variance, such as \eqn{K(r)/r} or \eqn{L(r)}. } \item{minpoints}{ Minimum permissible number of points in a point pattern for inclusion in the test calculation. } \item{rsteps}{ Number of discretisation steps in the \code{rinterval}. } \item{r}{ Optional vector of distance values as the argument for \code{summaryfunction}. Should not usually be given. There is a sensible default. } \item{arguments.in.data}{ Logical. If \code{TRUE}, individual extra arguments to \code{summaryfunction} will be taken from \code{X} (which must be a hyperframe). This assumes that the first argument of \code{summaryfunction} is the point pattern dataset. } } \details{ This function performs the studentized permutation test of Hahn (2012) for a difference between groups of point patterns. The first argument \code{X} should be either \describe{ \item{a list of lists of point patterns.}{ Each element of \code{X} will be interpreted as a group of point patterns, assumed to be replicates of the same point process. } \item{a hyperframe:}{ One column of the hyperframe should contain point patterns, and another column should contain a factor indicating the grouping. The argument \code{formula} should be a formula in the \R language specifying the grouping: it should be of the form \code{P ~ G} where \code{P} is the name of the column of point patterns, and \code{G} is the name of the factor. } } A group needs to contain at least two point patterns with at least \code{minpoints} points in each pattern. The function returns an object of class \code{"htest"} and \code{"studpermutest"} that can be printed and plotted. The printout shows the test result and \eqn{p}-value. The plot shows the summary functions for the groups (and the group means if requested). } \value{ Object of class \code{"studpermutest"}. } \references{ Hahn, U. (2012) A studentized permutation test for the comparison of spatial point patterns. \emph{Journal of the American Statistical Association} \bold{107} (498), 754--764. } \author{ Ute Hahn. Modified for \code{spatstat} by \adrian \rolf and \ege } \examples{ np <- if(interactive()) 99 else 19 testpyramidal <- studpermu.test(pyramidal, Neurons ~ group, nperm=np) testpyramidal } \keyword{spatial} \keyword{htest} spatstat/man/Kmulti.Rd0000644000176200001440000001700513160710571014430 0ustar liggesusers\name{Kmulti} \alias{Kmulti} \title{ Marked K-Function } \description{ For a marked point pattern, estimate the multitype \eqn{K} function which counts the expected number of points of subset \eqn{J} within a given distance from a typical point in subset \code{I}. } \usage{ Kmulti(X, I, J, r=NULL, breaks=NULL, correction, \dots, ratio=FALSE) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the multitype \eqn{K} function \eqn{K_{IJ}(r)}{KIJ(r)} will be computed. It must be a marked point pattern. See under Details. } \item{I}{Subset index specifying the points of \code{X} from which distances are measured. See Details. } \item{J}{Subset index specifying the points in \code{X} to which distances are measured. See Details. } \item{r}{numeric vector. The values of the argument \eqn{r} at which the multitype \eqn{K} function \eqn{K_{IJ}(r)}{KIJ(r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{breaks}{ This argument is for internal use only. } \item{correction}{ A character vector containing any selection of the options \code{"border"}, \code{"bord.modif"}, \code{"isotropic"}, \code{"Ripley"}, \code{"translate"}, \code{"translation"}, \code{"none"} or \code{"best"}. It specifies the edge correction(s) to be applied. Alternatively \code{correction="all"} selects all options. } \item{\dots}{Ignored.} \item{ratio}{ Logical. If \code{TRUE}, the numerator and denominator of each edge-corrected estimate will also be saved, for use in analysing replicated point patterns. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame containing numeric columns \item{r}{the values of the argument \eqn{r} at which the function \eqn{K_{IJ}(r)}{KIJ(r)} has been estimated } \item{theo}{the theoretical value of \eqn{K_{IJ}(r)}{KIJ(r)} for a marked Poisson process, namely \eqn{\pi r^2}{pi * r^2} } together with a column or columns named \code{"border"}, \code{"bord.modif"}, \code{"iso"} and/or \code{"trans"}, according to the selected edge corrections. These columns contain estimates of the function \eqn{K_{IJ}(r)}{KIJ(r)} obtained by the edge corrections named. If \code{ratio=TRUE} then the return value also has two attributes called \code{"numerator"} and \code{"denominator"} which are \code{"fv"} objects containing the numerators and denominators of each estimate of \eqn{K(r)}. } \details{ The function \code{Kmulti} generalises \code{\link{Kest}} (for unmarked point patterns) and \code{\link{Kdot}} and \code{\link{Kcross}} (for multitype point patterns) to arbitrary marked point patterns. Suppose \eqn{X_I}{X[I]}, \eqn{X_J}{X[J]} are subsets, possibly overlapping, of a marked point process. The multitype \eqn{K} function is defined so that \eqn{\lambda_J K_{IJ}(r)}{lambda[J] KIJ(r)} equals the expected number of additional random points of \eqn{X_J}{X[J]} within a distance \eqn{r} of a typical point of \eqn{X_I}{X[I]}. Here \eqn{\lambda_J}{lambda[J]} is the intensity of \eqn{X_J}{X[J]} i.e. the expected number of points of \eqn{X_J}{X[J]} per unit area. The function \eqn{K_{IJ}}{KIJ} is determined by the second order moment properties of \eqn{X}. The argument \code{X} must be a point pattern (object of class \code{"ppp"}) or any data that are acceptable to \code{\link{as.ppp}}. The arguments \code{I} and \code{J} specify two subsets of the point pattern. They may be any type of subset indices, for example, logical vectors of length equal to \code{npoints(X)}, or integer vectors with entries in the range 1 to \code{npoints(X)}, or negative integer vectors. Alternatively, \code{I} and \code{J} may be \bold{functions} that will be applied to the point pattern \code{X} to obtain index vectors. If \code{I} is a function, then evaluating \code{I(X)} should yield a valid subset index. This option is useful when generating simulation envelopes using \code{\link{envelope}}. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{K_{IJ}(r)}{KIJ(r)} should be evaluated. It is also used to determine the breakpoints (in the sense of \code{\link{hist}}) for the computation of histograms of distances. First-time users would be strongly advised not to specify \code{r}. However, if it is specified, \code{r} must satisfy \code{r[1] = 0}, and \code{max(r)} must be larger than the radius of the largest disc contained in the window. This algorithm assumes that \code{X} can be treated as a realisation of a stationary (spatially homogeneous) random spatial point process in the plane, observed through a bounded window. The window (which is specified in \code{X} as \code{Window(X)}) may have arbitrary shape. Biases due to edge effects are treated in the same manner as in \code{\link{Kest}}. The edge corrections implemented here are \describe{ \item{border}{the border method or ``reduced sample'' estimator (see Ripley, 1988). This is the least efficient (statistically) and the fastest to compute. It can be computed for a window of arbitrary shape. } \item{isotropic/Ripley}{Ripley's isotropic correction (see Ripley, 1988; Ohser, 1983). This is currently implemented only for rectangular and polygonal windows. } \item{translate}{Translation correction (Ohser, 1983). Implemented for all window geometries. } } The pair correlation function \code{\link{pcf}} can also be applied to the result of \code{Kmulti}. } \references{ Cressie, N.A.C. \emph{Statistics for spatial data}. John Wiley and Sons, 1991. Diggle, P.J. \emph{Statistical analysis of spatial point patterns}. Academic Press, 1983. Diggle, P. J. (1986). Displaced amacrine cells in the retina of a rabbit : analysis of a bivariate spatial point pattern. \emph{J. Neurosci. Meth.} \bold{18}, 115--125. Harkness, R.D and Isham, V. (1983) A bivariate spatial point pattern of ants' nests. \emph{Applied Statistics} \bold{32}, 293--303 Lotwick, H. W. and Silverman, B. W. (1982). Methods for analysing spatial processes of several types of points. \emph{J. Royal Statist. Soc. Ser. B} \bold{44}, 406--413. Ripley, B.D. \emph{Statistical inference for spatial processes}. Cambridge University Press, 1988. Stoyan, D, Kendall, W.S. and Mecke, J. \emph{Stochastic geometry and its applications}. 2nd edition. Springer Verlag, 1995. Van Lieshout, M.N.M. and Baddeley, A.J. (1999) Indices of dependence between types in multivariate point patterns. \emph{Scandinavian Journal of Statistics} \bold{26}, 511--532. } \section{Warnings}{ The function \eqn{K_{IJ}}{KIJ} is not necessarily differentiable. The border correction (reduced sample) estimator of \eqn{K_{IJ}}{KIJ} used here is pointwise approximately unbiased, but need not be a nondecreasing function of \eqn{r}, while the true \eqn{K_{IJ}}{KIJ} must be nondecreasing. } \seealso{ \code{\link{Kcross}}, \code{\link{Kdot}}, \code{\link{Kest}}, \code{\link{pcf}} } \examples{ # Longleaf Pine data: marks represent diameter trees <- longleaf \testonly{ trees <- trees[seq(1,npoints(trees), by=50), ] } K <- Kmulti(trees, marks(trees) <= 15, marks(trees) >= 25) plot(K) # functions determining subsets f1 <- function(X) { marks(X) <= 15 } f2 <- function(X) { marks(X) >= 15 } K <- Kmulti(trees, f1, f2) \testonly{ rm(trees) } } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{nonparametric} spatstat/man/Gres.Rd0000644000176200001440000000544513160710571014070 0ustar liggesusers\name{Gres} \Rdversion{1.1} \alias{Gres} \title{ Residual G Function } \description{ Given a point process model fitted to a point pattern dataset, this function computes the residual \eqn{G} function, which serves as a diagnostic for goodness-of-fit of the model. } \usage{ Gres(object, ...) } \arguments{ \item{object}{ Object to be analysed. Either a fitted point process model (object of class \code{"ppm"}), a point pattern (object of class \code{"ppp"}), a quadrature scheme (object of class \code{"quad"}), or the value returned by a previous call to \code{\link{Gcom}}. } \item{\dots}{ Arguments passed to \code{\link{Gcom}}. } } \details{ This command provides a diagnostic for the goodness-of-fit of a point process model fitted to a point pattern dataset. It computes a residual version of the \eqn{G} function of the dataset, which should be approximately zero if the model is a good fit to the data. In normal use, \code{object} is a fitted point process model or a point pattern. Then \code{Gres} first calls \code{\link{Gcom}} to compute both the nonparametric estimate of the \eqn{G} function and its model compensator. Then \code{Gres} computes the difference between them, which is the residual \eqn{G}-function. Alternatively, \code{object} may be a function value table (object of class \code{"fv"}) that was returned by a previous call to \code{\link{Gcom}}. Then \code{Gres} computes the residual from this object. } \value{ A function value table (object of class \code{"fv"}), essentially a data frame of function values. There is a plot method for this class. See \code{\link{fv.object}}. } \references{ Baddeley, A., Rubak, E. and \ifelse{latex}{\out{M\o ller}}{Moller}, J. (2011) Score, pseudo-score and residual diagnostics for spatial point process models. \emph{Statistical Science} \bold{26}, 613--646. } \author{ \adrian \ege and Jesper \ifelse{latex}{\out{M\o ller}}{Moller}. } \seealso{ Related functions: \code{\link{Gcom}}, \code{\link{Gest}}. Alternative functions: \code{\link{Kres}}, \code{\link{psstA}}, \code{\link{psstG}}, \code{\link{psst}}. Model-fitting: \code{\link{ppm}}. } \examples{ data(cells) fit0 <- ppm(cells, ~1) # uniform Poisson G0 <- Gres(fit0) plot(G0) # Hanisch correction estimate plot(G0, hres ~ r) # uniform Poisson is clearly not correct fit1 <- ppm(cells, ~1, Strauss(0.08)) plot(Gres(fit1), hres ~ r) # fit looks approximately OK; try adjusting interaction distance plot(Gres(cells, interaction=Strauss(0.12))) # How to make envelopes \dontrun{ E <- envelope(fit1, Gres, model=fit1, nsim=39) plot(E) } # For computational efficiency Gc <- Gcom(fit1) G1 <- Gres(Gc) } \keyword{spatial} \keyword{models} spatstat/man/runifpoint.Rd0000644000176200001440000000636213160710621015360 0ustar liggesusers\name{runifpoint} \alias{runifpoint} \title{Generate N Uniform Random Points} \description{ Generate a random point pattern containing \eqn{n} independent uniform random points. } \usage{ runifpoint(n, win=owin(c(0,1),c(0,1)), giveup=1000, warn=TRUE, \dots, nsim=1, drop=TRUE, ex=NULL) } \arguments{ \item{n}{ Number of points. } \item{win}{ Window in which to simulate the pattern. An object of class \code{"owin"} or something acceptable to \code{\link{as.owin}}. } \item{giveup}{ Number of attempts in the rejection method after which the algorithm should stop trying to generate new points. } \item{warn}{ Logical. Whether to issue a warning if \code{n} is very large. See Details. } \item{\dots}{Ignored.} \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a point pattern. } \item{ex}{ Optional. A point pattern to use as the example. If \code{ex} is given and \code{n} and \code{win} are missing, then \code{n} and \code{win} will be calculated from the point pattern \code{ex}. } } \value{ A point pattern (an object of class \code{"ppp"}) if \code{nsim=1}, or a list of point patterns if \code{nsim > 1}. } \details{ This function generates \code{n} independent random points, uniformly distributed in the window \code{win}. (For nonuniform distributions, see \code{\link{rpoint}}.) The algorithm depends on the type of window, as follows: \itemize{ \item If \code{win} is a rectangle then \eqn{n} independent random points, uniformly distributed in the rectangle, are generated by assigning uniform random values to their cartesian coordinates. \item If \code{win} is a binary image mask, then a random sequence of pixels is selected (using \code{\link{sample}}) with equal probabilities. Then for each pixel in the sequence we generate a uniformly distributed random point in that pixel. \item If \code{win} is a polygonal window, the algorithm uses the rejection method. It finds a rectangle enclosing the window, generates points in this rectangle, and tests whether they fall in the desired window. It gives up when \code{giveup * n} tests have been performed without yielding \code{n} successes. } The algorithm for binary image masks is faster than the rejection method but involves discretisation. If \code{warn=TRUE}, then a warning will be issued if \code{n} is very large. The threshold is \code{\link{spatstat.options}("huge.npoints")}. This warning has no consequences, but it helps to trap a number of common errors. } \seealso{ \code{\link{ppp.object}}, \code{\link{owin.object}}, \code{\link{rpoispp}}, \code{\link{rpoint}} } \examples{ # 100 random points in the unit square pp <- runifpoint(100) # irregular window data(letterR) # polygonal pp <- runifpoint(100, letterR) # binary image mask pp <- runifpoint(100, as.mask(letterR)) ## # randomising an existing point pattern runifpoint(npoints(cells), win=Window(cells)) runifpoint(ex=cells) } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat/man/affine.Rd0000644000176200001440000000224413160710571014412 0ustar liggesusers\name{affine} \alias{affine} \title{Apply Affine Transformation} \description{ Applies any affine transformation of the plane (linear transformation plus vector shift) to a plane geometrical object, such as a point pattern or a window. } \usage{ affine(X, \dots) } \arguments{ \item{X}{Any suitable dataset representing a two-dimensional object, such as a point pattern (object of class \code{"ppp"}), a line segment pattern (object of class \code{"psp"}), a window (object of class \code{"owin"}) or a pixel image (object of class \code{"im"}). } \item{\dots}{Arguments determining the affine transformation.} } \value{ Another object of the same type, representing the result of applying the affine transformation. } \details{ This is generic. Methods are provided for point patterns (\code{\link{affine.ppp}}) and windows (\code{\link{affine.owin}}). } \seealso{ \code{\link{affine.ppp}}, \code{\link{affine.psp}}, \code{\link{affine.owin}}, \code{\link{affine.im}}, \code{\link{flipxy}}, \code{\link{reflect}}, \code{\link{rotate}}, \code{\link{shift}} } \author{\adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/ppx.Rd0000644000176200001440000000621313160710621013765 0ustar liggesusers\name{ppx} \Rdversion{1.1} \alias{ppx} \title{ Multidimensional Space-Time Point Pattern } \description{ Creates a multidimensional space-time point pattern with any kind of coordinates and marks. } \usage{ ppx(data, domain=NULL, coord.type=NULL, simplify=FALSE) } \arguments{ \item{data}{ The coordinates and marks of the points. A \code{data.frame} or \code{hyperframe}. } \item{domain}{ Optional. The space-time domain containing the points. An object in some appropriate format, or \code{NULL}. } \item{coord.type}{ Character vector specifying how each column of \code{data} should be interpreted: as a spatial coordinate, a temporal coordinate, a local coordinate or a mark. Entries are partially matched to the values \code{"spatial"}, \code{"temporal"}, \code{"local"} and \code{"mark"}. } \item{simplify}{ Logical value indicating whether to simplify the result in special cases. If \code{simplify=TRUE}, a two-dimensional point pattern will be returned as an object of class \code{"ppp"}, and a three-dimensional point pattern will be returned as an object of class \code{"pp3"}. If \code{simplify=FALSE} (the default) then the result is always an object of class \code{"ppx"}. } } \details{ An object of class \code{"ppx"} represents a marked point pattern in multidimensional space and/or time. There may be any number of spatial coordinates, any number of temporal coordinates, any number of local coordinates, and any number of mark variables. The individual marks may be atomic (numeric values, factor values, etc) or objects of any kind. The argument \code{data} should contain the coordinates and marks of the points. It should be a \code{data.frame} or more generally a \code{hyperframe} (see \code{\link{hyperframe}}) with one row of data for each point. Each column of \code{data} is either a spatial coordinate, a temporal coordinate, a local coordinate, or a mark variable. The argument \code{coord.type} determines how each column is interpreted. It should be a character vector, of length equal to the number of columns of \code{data}. It should contain strings that partially match the values \code{"spatial"}, \code{"temporal"}, \code{"local"} and \code{"mark"}. (The first letters will be sufficient.) By default (if \code{coord.type} is missing or \code{NULL}), columns of numerical data are assumed to represent spatial coordinates, while other columns are assumed to be marks. } \value{ Usually an object of class \code{"ppx"}. If \code{simplify=TRUE} the result may be an object of class \code{"ppp"} or \code{"pp3"}. } \author{\adrian and \rolf } \seealso{ \code{\link{pp3}}, \code{\link{print.ppx}} } \examples{ df <- data.frame(x=runif(4),y=runif(4),t=runif(4), age=rep(c("old", "new"), 2), size=runif(4)) X <- ppx(data=df, coord.type=c("s","s","t","m","m")) X val <- 20 * runif(4) E <- lapply(val, function(s) { rpoispp(s) }) hf <- hyperframe(t=val, e=as.listof(E)) Z <- ppx(data=hf, domain=c(0,1)) Z } \keyword{spatial} \keyword{datagen} spatstat/man/nncross.lpp.Rd0000644000176200001440000001042413160710621015434 0ustar liggesusers\name{nncross.lpp} \alias{nncross.lpp} \title{Nearest Neighbours on a Linear Network} \description{ Given two point patterns \code{X} and \code{Y} on a linear network, finds the nearest neighbour in \code{Y} of each point of \code{X} using the shortest path in the network. } \usage{ \method{nncross}{lpp}(X, Y, iX=NULL, iY=NULL, what = c("dist", "which"), \dots, k = 1, method="C") } \arguments{ \item{X,Y}{ Point patterns on a linear network (objects of class \code{"lpp"}). They must lie on the \emph{same} linear network. } \item{iX, iY}{ Optional identifiers, used to determine whether a point in \code{X} is identical to a point in \code{Y}. See Details. } \item{what}{ Character string specifying what information should be returned. Either the nearest neighbour distance (\code{"dist"}), the identifier of the nearest neighbour (\code{"which"}), or both. } \item{\dots}{Ignored.} \item{k}{ Integer, or integer vector. The algorithm will compute the distance to the \code{k}th nearest neighbour, for each value of \code{k}. } \item{method}{ Internal use only. } } \details{ Given two point patterns \code{X} and \code{Y} on the same linear network, this function finds, for each point of \code{X}, the nearest point of \code{Y}, measuring distance by the shortest path in the network. The distance between these points is also computed. The return value is a data frame, with rows corresponding to the points of \code{X}. The first column gives the nearest neighbour distances (i.e. the \code{i}th entry is the distance from the \code{i}th point of \code{X} to the nearest element of \code{Y}). The second column gives the indices of the nearest neighbours (i.e.\ the \code{i}th entry is the index of the nearest element in \code{Y}.) If \code{what="dist"} then only the vector of distances is returned. If \code{what="which"} then only the vector of indices is returned. Note that this function is not symmetric in \code{X} and \code{Y}. To find the nearest neighbour in \code{X} of each point in \code{Y}, use \code{nncross(Y,X)}. The arguments \code{iX} and \code{iY} are used when the two point patterns \code{X} and \code{Y} have some points in common. In this situation \code{nncross(X, Y)} would return some zero distances. To avoid this, attach a unique integer identifier to each point, such that two points are identical if their identifying numbers are equal. Let \code{iX} be the vector of identifier values for the points in \code{X}, and \code{iY} the vector of identifiers for points in \code{Y}. Then the code will only compare two points if they have different values of the identifier. See the Examples. The \code{k}th nearest neighbour may be undefined, for example if there are fewer than \code{k+1} points in the dataset, or if the linear network is not connected. In this case, the \code{k}th nearest neighbour distance is infinite. } \value{ By default (if \code{what=c("dist", "which")} and \code{k=1}) a data frame with two columns: \item{dist}{Nearest neighbour distance} \item{which}{Nearest neighbour index in \code{Y}} If \code{what="dist"}, a vector of nearest neighbour distances. If \code{what="which"}, a vector of nearest neighbour indices. If \code{k} is a vector of integers, the result is a matrix with one row for each point in \code{X}, giving the distances and/or indices of the \code{k}th nearest neighbours in \code{Y}. } \seealso{ \code{\link{nndist.lpp}} for nearest neighbour distances in a single point pattern. \code{\link{nnwhich.lpp}} to identify which points are nearest neighbours in a single point pattern. } \examples{ # two different point patterns X <- runiflpp(3, simplenet) Y <- runiflpp(5, simplenet) nn <- nncross(X,Y) nn plot(simplenet, main="nncross") plot(X, add=TRUE, cols="red") plot(Y, add=TRUE, cols="blue", pch=16) XX <- as.ppp(X) YY <- as.ppp(Y) i <- nn$which arrows(XX$x, XX$y, YY[i]$x, YY[i]$y, length=0.15) # nearest and second-nearest neighbours nncross(X, Y, k=1:2) # two patterns with some points in common X <- Y[1:2] iX <- 1:2 iY <- 1:5 nncross(X,Y, iX, iY) } \author{ \spatstatAuthors } \keyword{spatial} \keyword{math} spatstat/man/methods.linnet.Rd0000644000176200001440000001015713160710621016113 0ustar liggesusers\name{methods.linnet} \alias{methods.linnet} %DoNotExport \Rdversion{1.1} \alias{as.linnet} \alias{as.linnet.linnet} \alias{as.owin.linnet} \alias{as.psp.linnet} \alias{nsegments.linnet} \alias{nvertices.linnet} \alias{pixellate.linnet} \alias{print.linnet} \alias{summary.linnet} \alias{unitname.linnet} \alias{unitname<-.linnet} \alias{vertexdegree} \alias{vertices.linnet} \alias{volume.linnet} \alias{Window.linnet} \title{ Methods for Linear Networks } \description{ These are methods for the class \code{"linnet"} of linear networks. } \usage{ as.linnet(X, \dots) \method{as.linnet}{linnet}(X, \dots, sparse) \method{as.owin}{linnet}(W, \dots) \method{as.psp}{linnet}(x, \dots, fatal=TRUE) \method{nsegments}{linnet}(x) \method{nvertices}{linnet}(x, \dots) \method{pixellate}{linnet}(x, \dots) \method{print}{linnet}(x, \dots) \method{summary}{linnet}(object, \dots) \method{unitname}{linnet}(x) \method{unitname}{linnet}(x) <- value vertexdegree(x) \method{vertices}{linnet}(w) \method{volume}{linnet}(x) \method{Window}{linnet}(X, \dots) } \arguments{ \item{x,X,object,w,W}{ An object of class \code{"linnet"} representing a linear network. } \item{\dots}{ Arguments passed to other methods. } \item{value}{ A valid name for the unit of length for \code{x}. See \code{\link{unitname}}. } \item{fatal}{ Logical value indicating whether data in the wrong format should lead to an error (\code{fatal=TRUE}) or a warning (\code{fatal=FALSE}). } \item{sparse}{ Logical value indicating whether to use a sparse matrix representation, as explained in \code{\link{linnet}}. Default is to keep the same representation as in \code{X}. } } \details{ The function \code{as.linnet} is generic. It converts data from some other format into an object of class \code{"linnet"}. The method \code{as.linnet.lpp} extracts the linear network information from an \code{lpp} object. The other functions are methods for the generic commands \code{\link{as.owin}}, \code{\link{as.psp}}, \code{\link{nsegments}}, \code{\link{nvertices}}, \code{\link{pixellate}}, \code{\link{print}}, \code{\link{summary}}, \code{\link{unitname}}, \code{\link{unitname<-}}, \code{\link{vertices}}, \code{\link{volume}} and \code{\link{Window}} for the class \code{"linnet"}. The methods \code{as.owin.linnet} and \code{Window.linnet} extract the window containing the linear network, and return it as an object of class \code{"owin"}. The method \code{as.psp.linnet} extracts the lines of the linear network as a line segment pattern (object of class \code{"psp"}) while \code{nsegments.linnet} simply counts the number of line segments. The method \code{vertices.linnet} extracts the vertices (nodes) of the linear network and \code{nvertices.linnet} simply counts the vertices. The function \code{vertexdegree} calculates the topological degree of each vertex (the number of lines emanating from that vertex) and returns these values as an integer vector. The method \code{pixellate.linnet} applies \code{\link{as.psp.linnet}} to convert the network to a collection of line segments, then invokes \code{\link{pixellate.psp}}. } \value{ For \code{as.linnet} the value is an object of class \code{"linnet"}. For other functions, see the help file for the corresponding generic function. } \author{ \adrian } \seealso{ \code{\link{linnet}}. Generic functions: \code{\link{as.owin}}, \code{\link{as.psp}}, \code{\link{nsegments}}, \code{\link{nvertices}}, \code{\link{pixellate}}, \code{\link{print}}, \code{\link{summary}}, \code{\link{unitname}}, \code{\link{unitname<-}}, \code{\link{vertices}}, \code{\link{volume}} and \code{\link{Window}}. Special tools: \code{\link{thinNetwork}}, \code{\link{insertVertices}}, \code{\link{connected.linnet}}. \code{\link{lixellate}} for dividing segments into shorter segments. } \examples{ simplenet summary(simplenet) nsegments(simplenet) nvertices(simplenet) volume(simplenet) unitname(simplenet) <- c("cubit", "cubits") Window(simplenet) } \keyword{spatial} \keyword{methods} spatstat/man/predict.dppm.Rd0000644000176200001440000000275313160710621015554 0ustar liggesusers\name{predict.dppm} \alias{predict.dppm} \alias{fitted.dppm} \title{Prediction from a Fitted Determinantal Point Process Model} \description{ Given a fitted determinantal point process model, these functions compute the fitted intensity. } \usage{ \method{fitted}{dppm}(object, ...) \method{predict}{dppm}(object, ...) } \arguments{ \item{object}{ Fitted determinantal point process model. An object of class \code{"dppm"}. } \item{\dots}{ Arguments passed to \code{\link{fitted.ppm}} or \code{\link{predict.ppm}} respectively. } } \details{ These functions are methods for the generic functions \code{\link{fitted}} and \code{\link{predict}}. The argument \code{object} should be a determinantal point process model (object of class \code{"dppm"}) obtained using the function \code{\link{dppm}}. The \emph{intensity} of the fitted model is computed, using \code{\link{fitted.ppm}} or \code{\link{predict.ppm}} respectively. } \value{ The value of \code{fitted.dppm} is a numeric vector giving the fitted values at the quadrature points. The value of \code{predict.dppm} is usually a pixel image (object of class \code{"im"}), but see \code{\link{predict.ppm}} for details. } \seealso{ \code{\link{dppm}}, \code{\link{plot.dppm}}, \code{\link{fitted.ppm}}, \code{\link{predict.ppm}} } \examples{ fit <- dppm(swedishpines ~ x + y, dppGauss()) predict(fit) } \author{\adrian , \rolf and \ege } \keyword{spatial} \keyword{models} spatstat/man/Linhom.Rd0000644000176200001440000000545213160710571014414 0ustar liggesusers\name{Linhom} \alias{Linhom} \title{L-function} \description{ Calculates an estimate of the inhomogeneous version of the \eqn{L}-function (Besag's transformation of Ripley's \eqn{K}-function) for a spatial point pattern. } \usage{ Linhom(...) } \arguments{ \item{\dots}{ Arguments passed to \code{\link{Kinhom}} to estimate the inhomogeneous K-function. } } \details{ This command computes an estimate of the inhomogeneous version of the \eqn{L}-function for a spatial point pattern The original \eqn{L}-function is a transformation (proposed by Besag) of Ripley's \eqn{K}-function, \deqn{L(r) = \sqrt{\frac{K(r)}{\pi}}}{L(r) = sqrt(K(r)/pi)} where \eqn{K(r)} is the Ripley \eqn{K}-function of a spatially homogeneous point pattern, estimated by \code{\link{Kest}}. The inhomogeneous \eqn{L}-function is the corresponding transformation of the inhomogeneous \eqn{K}-function, estimated by \code{\link{Kinhom}}. It is appropriate when the point pattern clearly does not have a homogeneous intensity of points. It was proposed by Baddeley, \ifelse{latex}{\out{M\o ller}}{Moller} and Waagepetersen (2000). The command \code{Linhom} first calls \code{\link{Kinhom}} to compute the estimate of the inhomogeneous K-function, and then applies the square root transformation. For a Poisson point pattern (homogeneous or inhomogeneous), the theoretical value of the inhomogeneous \eqn{L}-function is \eqn{L(r) = r}. The square root also has the effect of stabilising the variance of the estimator, so that \eqn{L} is more appropriate for use in simulation envelopes and hypothesis tests. } \value{ An object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. Essentially a data frame containing columns \item{r}{the vector of values of the argument \eqn{r} at which the function \eqn{L} has been estimated } \item{theo}{the theoretical value \eqn{L(r) = r} for a stationary Poisson process } together with columns named \code{"border"}, \code{"bord.modif"}, \code{"iso"} and/or \code{"trans"}, according to the selected edge corrections. These columns contain estimates of the function \eqn{L(r)} obtained by the edge corrections named. } \references{ Baddeley, A., \ifelse{latex}{\out{M\o ller}}{Moller}, J. and Waagepetersen, R. (2000) Non- and semiparametric estimation of interaction in inhomogeneous point patterns. \emph{Statistica Neerlandica} \bold{54}, 329--350. } \seealso{ \code{\link{Kest}}, \code{\link{Lest}}, \code{\link{Kinhom}}, \code{\link{pcf}} } \examples{ data(japanesepines) X <- japanesepines L <- Linhom(X, sigma=0.1) plot(L, main="Inhomogeneous L function for Japanese Pines") } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat/man/summary.solist.Rd0000644000176200001440000000155513160710621016173 0ustar liggesusers\name{summary.solist} \alias{summary.solist} \title{Summary of a List of Spatial Objects} \description{ Prints a useful summary of each entry in a list of two-dimensional spatial objects. } \usage{ \method{summary}{solist}(object, \dots) } \arguments{ \item{object}{ An object of class \code{"solist"}. } \item{\dots}{ Ignored. } } \details{ This is a method for the generic function \code{\link{summary}}. An object of the class \code{"solist"} is effectively a list of two-dimensional spatial datasets. See \code{\link{solist}}. This function extracts a useful summary of each of the datasets. } \seealso{ \code{\link{solist}}, \code{\link{summary}}, \code{\link{plot.solist}} } \examples{ x <- solist(cells, japanesepines, redwood) summary(x) } \author{\adrian \rolf and \ege } \keyword{spatial} \keyword{methods} spatstat/man/Lcross.inhom.Rd0000644000176200001440000000776413160710571015554 0ustar liggesusers\name{Lcross.inhom} \alias{Lcross.inhom} \title{ Inhomogeneous Cross Type L Function } \description{ For a multitype point pattern, estimate the inhomogeneous version of the cross-type \eqn{L} function. } \usage{ Lcross.inhom(X, i, j, \dots) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the inhomogeneous cross type \eqn{L} function \eqn{L_{ij}(r)}{Lij(r)} will be computed. It must be a multitype point pattern (a marked point pattern whose marks are a factor). See under Details. } \item{i}{The type (mark value) of the points in \code{X} from which distances are measured. A character string (or something that will be converted to a character string). Defaults to the first level of \code{marks(X)}. } \item{j}{The type (mark value) of the points in \code{X} to which distances are measured. A character string (or something that will be converted to a character string). Defaults to the second level of \code{marks(X)}. } \item{\dots}{ Other arguments passed to \code{\link{Kcross.inhom}}. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame containing numeric columns \item{r}{the values of the argument \eqn{r} at which the function \eqn{L_{ij}(r)}{Lij(r)} has been estimated } \item{theo}{the theoretical value of \eqn{L_{ij}(r)}{Lij(r)} for a marked Poisson process, identically equal to \code{r} } together with a column or columns named \code{"border"}, \code{"bord.modif"}, \code{"iso"} and/or \code{"trans"}, according to the selected edge corrections. These columns contain estimates of the function \eqn{L_{ij}(r)}{Lij(r)} obtained by the edge corrections named. } \details{ This is a generalisation of the function \code{\link{Lcross}} to include an adjustment for spatially inhomogeneous intensity, in a manner similar to the function \code{\link{Linhom}}. All the arguments are passed to \code{\link{Kcross.inhom}}, which estimates the inhomogeneous multitype K function \eqn{K_{ij}(r)}{Kij(r)} for the point pattern. The resulting values are then transformed by taking \eqn{L(r) = \sqrt{K(r)/\pi}}{L(r) = sqrt(K(r)/pi)}. } \references{ \ifelse{latex}{\out{M\o ller}}{Moller}, J. and Waagepetersen, R. Statistical Inference and Simulation for Spatial Point Processes Chapman and Hall/CRC Boca Raton, 2003. } \section{Warnings}{ The arguments \code{i} and \code{j} are always interpreted as levels of the factor \code{X$marks}. They are converted to character strings if they are not already character strings. The value \code{i=1} does \bold{not} refer to the first level of the factor. } \seealso{ \code{\link{Lcross}}, \code{\link{Linhom}}, \code{\link{Kcross.inhom}} } \examples{ # Lansing Woods data woods <- lansing \testonly{woods <- woods[seq(1,npoints(woods), by=10)]} ma <- split(woods)$maple wh <- split(woods)$whiteoak # method (1): estimate intensities by nonparametric smoothing lambdaM <- density.ppp(ma, sigma=0.15, at="points") lambdaW <- density.ppp(wh, sigma=0.15, at="points") L <- Lcross.inhom(woods, "whiteoak", "maple", lambdaW, lambdaM) # method (2): fit parametric intensity model fit <- ppm(woods ~marks * polynom(x,y,2)) # evaluate fitted intensities at data points # (these are the intensities of the sub-processes of each type) inten <- fitted(fit, dataonly=TRUE) # split according to types of points lambda <- split(inten, marks(woods)) L <- Lcross.inhom(woods, "whiteoak", "maple", lambda$whiteoak, lambda$maple) # synthetic example: type A points have intensity 50, # type B points have intensity 100 * x lamB <- as.im(function(x,y){50 + 100 * x}, owin()) X <- superimpose(A=runifpoispp(50), B=rpoispp(lamB)) L <- Lcross.inhom(X, "A", "B", lambdaI=as.im(50, Window(X)), lambdaJ=lamB) } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat/man/erosion.Rd0000644000176200001440000000626613160710621014644 0ustar liggesusers\name{erosion} \alias{erosion} \alias{erosion.owin} \alias{erosion.ppp} \alias{erosion.psp} \title{Morphological Erosion by a Disc} \description{ Perform morphological erosion of a window, a line segment pattern or a point pattern by a disc. } \usage{ erosion(w, r, \dots) \method{erosion}{owin}(w, r, shrink.frame=TRUE, \dots, strict=FALSE, polygonal=NULL) \method{erosion}{ppp}(w, r,\dots) \method{erosion}{psp}(w, r,\dots) } \arguments{ \item{w}{ A window (object of class \code{"owin"} or a line segment pattern (object of class \code{"psp"}) or a point pattern (object of class \code{"ppp"}). } \item{r}{positive number: the radius of erosion.} \item{shrink.frame}{logical: if \code{TRUE}, erode the bounding rectangle as well.} \item{\dots}{extra arguments to \code{\link{as.mask}} controlling the pixel resolution, if pixel approximation is used.} \item{strict}{Logical flag determining the fate of boundary pixels, if pixel approximation is used. See details.} \item{polygonal}{ Logical flag indicating whether to compute a polygonal approximation to the erosion (\code{polygonal=TRUE}) or a pixel grid approximation (\code{polygonal=FALSE}). } } \value{ If \code{r > 0}, an object of class \code{"owin"} representing the eroded region (or \code{NULL} if this region is empty). If \code{r=0}, the result is identical to \code{w}. } \details{ The morphological erosion of a set \eqn{W} by a distance \eqn{r > 0} is the subset consisting of all points \eqn{x \in W}{x in W} such that the distance from \eqn{x} to the boundary of \eqn{W} is greater than or equal to \eqn{r}. In other words it is the result of trimming a margin of width \eqn{r} off the set \eqn{W}. If \code{polygonal=TRUE} then a polygonal approximation to the erosion is computed. If \code{polygonal=FALSE} then a pixel approximation to the erosion is computed from the distance map of \code{w}. The arguments \code{"\dots"} are passed to \code{\link{as.mask}} to control the pixel resolution. The erosion consists of all pixels whose distance from the boundary of \code{w} is strictly greater than \code{r} (if \code{strict=TRUE}) or is greater than or equal to \code{r} (if \code{strict=FALSE}). When \code{w} is a window, the default (when \code{polygonal=NULL}) is to compute a polygonal approximation if \code{w} is a rectangle or polygonal window, and to compute a pixel approximation if \code{w} is a window of type \code{"mask"}. If \code{shrink.frame} is false, the resulting window is given the same outer, bounding rectangle as the original window \code{w}. If \code{shrink.frame} is true, the original bounding rectangle is also eroded by the same distance \code{r}. To simply compute the area of the eroded window, use \code{\link{eroded.areas}}. } \seealso{ \code{\link{dilation}} for the opposite operation. \code{\link{erosionAny}} for morphological erosion using any shape. \code{\link{owin}}, \code{\link{as.owin}}, \code{\link{eroded.areas}} } \examples{ plot(letterR, main="erosion(letterR, 0.2)") plot(erosion(letterR, 0.2), add=TRUE, col="red") } \author{ \spatstatAuthors } \keyword{spatial} \keyword{math} spatstat/man/as.data.frame.ppp.Rd0000644000176200001440000000201513160710571016360 0ustar liggesusers\name{as.data.frame.ppp} \alias{as.data.frame.ppp} \title{Coerce Point Pattern to a Data Frame} \description{ Extracts the coordinates of the points in a point pattern, and their marks if any, and returns them in a data frame. } \usage{ \method{as.data.frame}{ppp}(x, row.names = NULL, ...) } \arguments{ \item{x}{Point pattern (object of class \code{"ppp"}).} \item{row.names}{Optional character vector of row names.} \item{\dots}{Ignored.} } \details{ This is a method for the generic function \code{\link{as.data.frame}} for the class \code{"ppp"} of point patterns. It extracts the coordinates of the points in the point pattern, and returns them as columns named \code{x} and \code{y} in a data frame. If the points were marked, the marks are returned as a column named \code{marks} with the same type as in the point pattern dataset. } \value{ A data frame. } \examples{ data(amacrine) df <- as.data.frame(amacrine) df[1:5,] } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/thinNetwork.Rd0000644000176200001440000000515513160710621015476 0ustar liggesusers\name{thinNetwork} \alias{thinNetwork} \title{ Remove Vertices or Segments from a Linear Network } \description{ Delete some vertices and/or segments from a linear network or related object. } \usage{ thinNetwork(X, retainvertices, retainedges) } \arguments{ \item{X}{ A linear network (object of class \code{"linnet"}), or a point pattern on a linear network (object of class \code{"lpp"}). } \item{retainvertices}{ Optional. Subset index specifying which vertices should be retained (not deleted). } \item{retainedges}{ Optional. Subset index specifying which edges (segments) should be retained (not deleted). } } \details{ This function deletes some of the vertices and edges (segments) in the linear network. The arguments \code{retainvertices} and \code{retainedges} can be any kind of subset index: a vector of positive integers specifying which vertices/edges should be retained; a vector of negative integers specifying which vertices/edges should be deleted; or a logical vector specifying whether each vertex/edge should be retained (\code{TRUE}) or deleted (\code{FALSE}). Vertices are indexed in the same sequence as in \code{vertices(as.linnet(X))}. Segments are indexed in the same sequence as in \code{as.psp(as.linnet(X))}. The argument \code{retainedges} has higher precedence than \code{retainvertices} in the sense that: \itemize{ \item If \code{retainedges} is given, then any vertex which is an endpoint of a retained edge will also be retained. \item If \code{retainvertices} is given and \code{retainedges} is \bold{missing}, then any segment joining two retained vertices will also be retained. \item Thus, when both \code{retainvertices} and \code{retainedges} are given, it is possible that more vertices will be retained than those specified by \code{retainvertices}. } After the network has been altered, other consequential changes will occur, including renumbering of the segments and vertices. If \code{X} is a point pattern on a linear network, then data points will be deleted if they lie on a deleted edge. } \value{ An object of the same kind as \code{X}. } \author{ \adrian and Suman Rakshit. } \seealso{ \code{\link{linnet}} to make a network; \code{\link{connected.linnet}} to extract connected components. } \examples{ L <- simplenet plot(L, main="thinNetwork(L, retainedges=c(-3, -5))") text(midpoints.psp(as.psp(L)), labels=1:nsegments(L), pos=3) Lsub <- thinNetwork(L, retainedges=c(-3, -5)) plot(Lsub, add=TRUE, col="blue", lwd=2) } \keyword{spatial} \keyword{manip} spatstat/man/nnclean.Rd0000644000176200001440000001020213160710621014565 0ustar liggesusers\name{nnclean} \alias{nnclean} \alias{nnclean.ppp} \alias{nnclean.pp3} \title{ Nearest Neighbour Clutter Removal } \description{ Detect features in a 2D or 3D spatial point pattern using nearest neighbour clutter removal. } \usage{ nnclean(X, k, ...) \method{nnclean}{ppp}(X, k, ..., edge.correct = FALSE, wrap = 0.1, convergence = 0.001, plothist = FALSE, verbose = TRUE, maxit = 50) \method{nnclean}{pp3}(X, k, ..., convergence = 0.001, plothist = FALSE, verbose = TRUE, maxit = 50) } \arguments{ \item{X}{ A two-dimensional spatial point pattern (object of class \code{"ppp"}) or a three-dimensional point pattern (object of class \code{"pp3"}). } \item{k}{ Degree of neighbour: \code{k=1} means nearest neighbour, \code{k=2} means second nearest, etc. } \item{\dots}{ Arguments passed to \code{\link{hist.default}} to control the appearance of the histogram, if \code{plothist=TRUE}. } \item{edge.correct}{ Logical flag specifying whether periodic edge correction should be performed (only implemented in 2 dimensions). } \item{wrap}{ Numeric value specifying the relative size of the margin in which data will be replicated for the periodic edge correction (if \code{edge.correct=TRUE}). A fraction of window width and window height. } \item{convergence}{ Relative tolerance threshold for testing convergence of EM algorithm. } \item{maxit}{ Maximum number of iterations for EM algorithm. } \item{plothist}{ Logical flag specifying whether to plot a diagnostic histogram of the nearest neighbour distances and the fitted distribution. } \item{verbose}{ Logical flag specifying whether to print progress reports. } } \details{ Byers and Raftery (1998) developed a technique for recognising features in a spatial point pattern in the presence of random clutter. For each point in the pattern, the distance to the \eqn{k}th nearest neighbour is computed. Then the E-M algorithm is used to fit a mixture distribution to the \eqn{k}th nearest neighbour distances. The mixture components represent the feature and the clutter. The mixture model can be used to classify each point as belong to one or other component. The function \code{nnclean} is generic, with methods for two-dimensional point patterns (class \code{"ppp"}) and three-dimensional point patterns (class \code{"pp3"}) currently implemented. The result is a point pattern (2D or 3D) with two additional columns of marks: \describe{ \item{class}{ A factor, with levels \code{"noise"} and \code{"feature"}, indicating the maximum likelihood classification of each point. } \item{prob}{ Numeric vector giving the estimated probabilities that each point belongs to a feature. } } The object also has extra information stored in attributes: \code{"theta"} contains the fitted parameters of the mixture model, \code{"info"} contains information about the fitting procedure, and \code{"hist"} contains the histogram structure returned from \code{\link{hist.default}} if \code{plothist = TRUE}. } \value{ An object of the same kind as \code{X}, obtained by attaching marks to the points of \code{X}. The object also has attributes, as described under Details. } \references{ Byers, S. and Raftery, A.E. (1998) Nearest-neighbour clutter removal for estimating features in spatial point processes. \emph{Journal of the American Statistical Association} \bold{93}, 577--584. } \author{ Original by Simon Byers and Adrian Raftery. Adapted for \pkg{spatstat} by \adrian. } \seealso{ \code{\link{nndist}}, \code{\link{split.ppp}}, \code{\link{cut.ppp}} } \examples{ data(shapley) X <- nnclean(shapley, k=17, plothist=TRUE) plot(X, which.marks=1, chars=c(".", "+"), cols=1:2) plot(X, which.marks=2, cols=function(x)hsv(0.2+0.8*(1-x),1,1)) Y <- split(X, un=TRUE) plot(Y, chars="+", cex=0.5) marks(X) <- marks(X)$prob plot(cut(X, breaks=3), chars=c(".", "+", "+"), cols=1:3) } \keyword{spatial} \keyword{classif} spatstat/man/mppm.Rd0000644000176200001440000002467213160710621014140 0ustar liggesusers\name{mppm} \alias{mppm} \title{Fit Point Process Model to Several Point Patterns} \description{ Fits a Gibbs point process model to several point patterns simultaneously. } \usage{ mppm(formula, data, interaction=Poisson(), ..., iformula=NULL, random=NULL, use.gam = FALSE, reltol.pql=1e-3, gcontrol=list()) } \arguments{ \item{formula}{ A formula describing the systematic part of the model. Variables in the formula are names of columns in \code{data}. } \item{data}{ A hyperframe (object of class \code{"hyperframe"}, see \code{\link{hyperframe}}) containing the point pattern responses and the explanatory variables. } \item{interaction}{ Interpoint interaction(s) appearing in the model. Either an object of class \code{"interact"} describing the point process interaction structure, or a hyperframe (with the same number of rows as \code{data}) whose entries are objects of class \code{"interact"}. } \item{\dots}{Arguments passed to \code{\link{ppm}} controlling the fitting procedure. } \item{iformula}{ Optional. A formula (with no left hand side) describing the interaction to be applied to each case. Each variable name in the formula should either be the name of a column in the hyperframe \code{interaction}, or the name of a column in the hyperframe \code{data} that is a vector or factor. } \item{random}{ Optional. A formula (with no left hand side) describing a random effect. Variable names in the formula may be any of the column names of \code{data} and \code{interaction}. The formula must be recognisable to \code{\link{lme}}. } \item{use.gam}{Logical flag indicating whether to fit the model using \code{\link[mgcv]{gam}} or \code{\link[stats]{glm}}. } \item{reltol.pql}{ Relative tolerance for successive steps in the penalised quasi-likelihood algorithm, used when the model includes random effects. The algorithm terminates when the root mean square of the relative change in coefficients is less than \code{reltol.pql}. } \item{gcontrol}{ List of arguments to control the fitting algorithm. Arguments are passed to \code{\link[stats]{glm.control}} or \code{\link[mgcv]{gam.control}} or \code{\link[nlme]{lmeControl}} depending on the kind of model being fitted. If the model has random effects, the arguments are passed to \code{\link[nlme]{lmeControl}}. Otherwise, if \code{use.gam=TRUE} the arguments are passed to \code{\link[mgcv]{gam.control}}, and if \code{use.gam=FALSE} (the default) they are passed to \code{\link[stats]{glm.control}}. } } \details{ This function fits a common point process model to a dataset containing several different point patterns. It extends the capabilities of the function \code{\link{ppm}} to deal with data such as \itemize{ \item replicated observations of spatial point patterns \item two groups of spatial point patterns \item a designed experiment in which the response from each unit is a point pattern. } The syntax of this function is similar to that of standard \R model-fitting functions like \code{\link{lm}} and \code{\link{glm}}. The first argument \code{formula} is an \R formula describing the systematic part of the model. The second argument \code{data} contains the responses and the explanatory variables. Other arguments determine the stochastic structure of the model. Schematically, the data are regarded as the results of a designed experiment involving \eqn{n} experimental units. Each unit has a \sQuote{response}, and optionally some \sQuote{explanatory variables} (covariates) describing the experimental conditions for that unit. In this context, \emph{the response from each unit is a point pattern}. The value of a particular covariate for each unit can be either a single value (numerical, logical or factor), or a spatial covariate. A \sQuote{spatial} covariate is a quantity that depends on spatial location, for example, the soil acidity or altitude at each location. For the purposes of \code{mppm}, a spatial covariate must be stored as a pixel image (object of class \code{"im"}) which gives the values of the covariate at a fine grid of locations. The argument \code{data} is a hyperframe (a generalisation of a data frame, see \code{\link{hyperframe}}). This is like a data frame except that the entries can be objects of any class. The hyperframe has one row for each experimental unit, and one column for each variable (response or explanatory variable). The \code{formula} should be an \R formula. The left hand side of \code{formula} determines the \sQuote{response} variable. This should be a single name, which should correspond to a column in \code{data}. The right hand side of \code{formula} determines the spatial trend of the model. It specifies the linear predictor, and effectively represents the \bold{logarithm} of the spatial trend. Variables in the formula must be the names of columns of \code{data}, or one of the reserved names \describe{ \item{x,y}{Cartesian coordinates of location} \item{marks}{Mark attached to point} \item{id}{which is a factor representing the serial number (\eqn{1} to \eqn{n}) of the point pattern, i.e. the row number in the data hyperframe. } } The column of responses in \code{data} must consist of point patterns (objects of class \code{"ppp"}). The individual point pattern responses can be defined in different spatial windows. If some of the point patterns are marked, then they must all be marked, and must have the same type of marks. The scope of models that can be fitted to each pattern is the same as the scope of \code{\link{ppm}}, that is, Gibbs point processes with interaction terms that belong to a specified list, including for example the Poisson process, Strauss process, Geyer's saturation model, and piecewise constant pairwise interaction models. Additionally, it is possible to include random effects as explained in the section on Random Effects below. The stochastic part of the model is determined by the arguments \code{interaction} and (optionally) \code{iformula}. \itemize{ \item In the simplest case, \code{interaction} is an object of class \code{"interact"}, determining the interpoint interaction structure of the point process model, for all experimental units. \item Alternatively, \code{interaction} may be a hyperframe, whose entries are objects of class \code{"interact"}. It should have the same number of rows as \code{data}. \itemize{ \item If \code{interaction} consists of only one column, then the entry in row \code{i} is taken to be the interpoint interaction for the \code{i}th experimental unit (corresponding to the \code{i}th row of \code{data}). \item If \code{interaction} has more than one column, then the argument \code{iformula} is also required. Each row of \code{interaction} determines several interpoint interaction structures that might be applied to the corresponding row of \code{data}. The choice of interaction is determined by \code{iformula}; this should be an \R formula, without a left hand side. For example if \code{interaction} has two columns called \code{A} and \code{B} then \code{iformula = ~B} indicates that the interpoint interactions are taken from the second column. } } Variables in \code{iformula} typically refer to column names of \code{interaction}. They can also be names of columns in \code{data}, but only for columns of numeric, logical or factor values. For example \code{iformula = ~B * group} (where \code{group} is a column of \code{data} that contains a factor) causes the model with interpoint interaction \code{B} to be fitted with different interaction parameters for each level of \code{group}. } \section{Random Effects}{ It is also possible to include random effects in the trend term. The argument \code{random} is a formula, with no left-hand side, that specifies the structure of the random effects. The formula should be recognisable to \code{\link{lme}} (see the description of the argument \code{random} for \code{\link{lme}}). The names in the formula \code{random} may be any of the covariates supplied by \code{data}. Additionally the formula may involve the name \code{id}, which is a factor representing the serial number (\eqn{1} to \eqn{n}) of the point pattern in the list \code{X}. } \value{ An object of class \code{"mppm"} representing the fitted model. There are methods for \code{print}, \code{summary}, \code{coef}, \code{AIC}, \code{anova}, \code{fitted}, \code{fixef}, \code{logLik}, \code{plot}, \code{predict}, \code{ranef}, \code{residuals}, \code{summary}, \code{terms} and \code{vcov} for this class. The default methods for \code{\link[stats]{update}} and \code{\link[stats]{formula}} also work on this class. } \references{ Baddeley, A. and Turner, R. Practical maximum pseudolikelihood for spatial point patterns. \emph{Australian and New Zealand Journal of Statistics} \bold{42} (2000) 283--322. Baddeley, A., Bischof, L., Sintorn, I.-M., Haggarty, S., Bell, M. and Turner, R. Analysis of a designed experiment where the response is a spatial point pattern. In preparation. Baddeley, A., Rubak, E. and Turner, R. (2015) \emph{Spatial Point Patterns: Methodology and Applications with R}. London: Chapman and Hall/CRC Press. Bell, M. and Grunwald, G. (2004) Mixed models for the analysis of replicated spatial point patterns. \emph{Biostatistics} \bold{5}, 633--648. } \author{ Adrian Baddeley, Ida-Maria Sintorn and Leanne Bischoff. Implemented in \pkg{spatstat} by \spatstatAuthors. } \seealso{ \code{\link{ppm}}, \code{\link{print.mppm}}, \code{\link{summary.mppm}}, \code{\link{coef.mppm}}, } \examples{ # Waterstriders data H <- hyperframe(Y = waterstriders) mppm(Y ~ 1, data=H) mppm(Y ~ 1, data=H, Strauss(7)) mppm(Y ~ id, data=H) mppm(Y ~ x, data=H) # Synthetic data from known model n <- 10 H <- hyperframe(V=1:n, U=runif(n, min=-1, max=1), M=factor(letters[1 + (1:n) \%\% 3])) H$Z <- setcov(square(1)) H$U <- with(H, as.im(U, as.rectangle(Z))) H$Y <- with(H, rpoispp(eval.im(exp(2+3*Z)))) fit <- mppm(Y ~Z + U + V, data=H) } \keyword{spatial} \keyword{models} spatstat/man/convexhull.xy.Rd0000644000176200001440000000236313160710571016012 0ustar liggesusers\name{convexhull.xy} \alias{convexhull.xy} \title{Convex Hull of Points} \description{ Computes the convex hull of a set of points in two dimensions. } \usage{ convexhull.xy(x, y=NULL) } \arguments{ \item{x}{ vector of \code{x} coordinates of observed points, or a 2-column matrix giving \code{x,y} coordinates, or a list with components \code{x,y} giving coordinates (such as a point pattern object of class \code{"ppp"}.) } \item{y}{(optional) vector of \code{y} coordinates of observed points, if \code{x} is a vector.} } \value{ A window (an object of class \code{"owin"}). } \details{ Given an observed pattern of points with coordinates given by \code{x} and \code{y}, this function computes the convex hull of the points, and returns it as a window. } \seealso{ \code{\link{owin}}, \code{\link{as.owin}}, \code{\link{convexhull}}, \code{\link{bounding.box.xy}}, \code{\link{ripras}} } \examples{ x <- runif(30) y <- runif(30) w <- convexhull.xy(x,y) plot(owin(), main="convexhull.xy(x,y)", lty=2) plot(w, add=TRUE) points(x,y) X <- rpoispp(30) plot(X, main="convexhull.xy(X)") plot(convexhull.xy(X), add=TRUE) } \author{\adrian and \rolf } \keyword{spatial} \keyword{utilities} spatstat/man/pairdist.ppx.Rd0000644000176200001440000000243013160710621015600 0ustar liggesusers\name{pairdist.ppx} \alias{pairdist.ppx} \title{Pairwise Distances in Any Dimensions} \description{ Computes the matrix of distances between all pairs of points in a multi-dimensional point pattern. } \usage{ \method{pairdist}{ppx}(X, \dots) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppx"}). } \item{\dots}{ Arguments passed to \code{\link{coords.ppx}} to determine which coordinates should be used. } } \value{ A square matrix whose \code{[i,j]} entry is the distance between the points numbered \code{i} and \code{j}. } \details{ This is a method for the generic function \code{pairdist}. Given a multi-dimensional point pattern \code{X} (an object of class \code{"ppx"}), this function computes the Euclidean distances between all pairs of points in \code{X}, and returns the matrix of distances. By default, both spatial and temporal coordinates are extracted. To obtain the spatial distance between points in a space-time point pattern, set \code{temporal=FALSE}. } \seealso{ \code{\link{pairdist}}, \code{\link{crossdist}}, \code{\link{nndist}} } \examples{ df <- data.frame(x=runif(4),y=runif(4),z=runif(4),w=runif(4)) X <- ppx(data=df) pairdist(X) } \author{ \adrian } \keyword{spatial} \keyword{math} spatstat/man/cauchy.estpcf.Rd0000644000176200001440000001354213160710571015724 0ustar liggesusers\name{cauchy.estpcf} \alias{cauchy.estpcf} \title{Fit the Neyman-Scott cluster process with Cauchy kernel} \description{ Fits the Neyman-Scott Cluster point process with Cauchy kernel to a point pattern dataset by the Method of Minimum Contrast, using the pair correlation function. } \usage{ cauchy.estpcf(X, startpar=c(kappa=1,scale=1), lambda=NULL, q = 1/4, p = 2, rmin = NULL, rmax = NULL, ..., pcfargs = list()) } \arguments{ \item{X}{ Data to which the model will be fitted. Either a point pattern or a summary statistic. See Details. } \item{startpar}{ Vector of starting values for the parameters of the model. } \item{lambda}{ Optional. An estimate of the intensity of the point process. } \item{q,p}{ Optional. Exponents for the contrast criterion. } \item{rmin, rmax}{ Optional. The interval of \eqn{r} values for the contrast criterion. } \item{\dots}{ Optional arguments passed to \code{\link[stats]{optim}} to control the optimisation algorithm. See Details. } \item{pcfargs}{ Optional list containing arguments passed to \code{\link{pcf.ppp}} to control the smoothing in the estimation of the pair correlation function. } } \details{ This algorithm fits the Neyman-Scott cluster point process model with Cauchy kernel to a point pattern dataset by the Method of Minimum Contrast, using the pair correlation function. The argument \code{X} can be either \describe{ \item{a point pattern:}{An object of class \code{"ppp"} representing a point pattern dataset. The pair correlation function of the point pattern will be computed using \code{\link{pcf}}, and the method of minimum contrast will be applied to this. } \item{a summary statistic:}{An object of class \code{"fv"} containing the values of a summary statistic, computed for a point pattern dataset. The summary statistic should be the pair correlation function, and this object should have been obtained by a call to \code{\link{pcf}} or one of its relatives. } } The algorithm fits the Neyman-Scott cluster point process with Cauchy kernel to \code{X}, by finding the parameters of the Matern Cluster model which give the closest match between the theoretical pair correlation function of the Matern Cluster process and the observed pair correlation function. For a more detailed explanation of the Method of Minimum Contrast, see \code{\link{mincontrast}}. The model is described in Jalilian et al (2013). It is a cluster process formed by taking a pattern of parent points, generated according to a Poisson process with intensity \eqn{\kappa}{\kappa}, and around each parent point, generating a random number of offspring points, such that the number of offspring of each parent is a Poisson random variable with mean \eqn{\mu}{\mu}, and the locations of the offspring points of one parent follow a common distribution described in Jalilian et al (2013). If the argument \code{lambda} is provided, then this is used as the value of the point process intensity \eqn{\lambda}{\lambda}. Otherwise, if \code{X} is a point pattern, then \eqn{\lambda}{\lambda} will be estimated from \code{X}. If \code{X} is a summary statistic and \code{lambda} is missing, then the intensity \eqn{\lambda}{\lambda} cannot be estimated, and the parameter \eqn{\mu}{\mu} will be returned as \code{NA}. The remaining arguments \code{rmin,rmax,q,p} control the method of minimum contrast; see \code{\link{mincontrast}}. The corresponding model can be simulated using \code{\link{rCauchy}}. For computational reasons, the optimisation procedure internally uses the parameter \code{eta2}, which is equivalent to \code{4 * scale^2} where \code{scale} is the scale parameter for the model as used in \code{\link{rCauchy}}. Homogeneous or inhomogeneous Neyman-Scott/Cauchy models can also be fitted using the function \code{\link{kppm}} and the fitted models can be simulated using \code{\link{simulate.kppm}}. The optimisation algorithm can be controlled through the additional arguments \code{"..."} which are passed to the optimisation function \code{\link[stats]{optim}}. For example, to constrain the parameter values to a certain range, use the argument \code{method="L-BFGS-B"} to select an optimisation algorithm that respects box constraints, and use the arguments \code{lower} and \code{upper} to specify (vectors of) minimum and maximum values for each parameter. } \value{ An object of class \code{"minconfit"}. There are methods for printing and plotting this object. It contains the following main components: \item{par }{Vector of fitted parameter values.} \item{fit }{Function value table (object of class \code{"fv"}) containing the observed values of the summary statistic (\code{observed}) and the theoretical values of the summary statistic computed from the fitted model parameters. } } \references{ Ghorbani, M. (2012) Cauchy cluster process. \emph{Metrika}, to appear. Jalilian, A., Guan, Y. and Waagepetersen, R. (2013) Decomposition of variance for spatial Cox processes. \emph{Scandinavian Journal of Statistics} \bold{40}, 119-137. Waagepetersen, R. (2007) An estimating function approach to inference for inhomogeneous Neyman-Scott processes. \emph{Biometrics} \bold{63}, 252--258. } \author{Abdollah Jalilian and Rasmus Waagepetersen. Adapted for \pkg{spatstat} by \adrian } \seealso{ \code{\link{kppm}}, \code{\link{cauchy.estK}}, \code{\link{lgcp.estpcf}}, \code{\link{thomas.estpcf}}, \code{\link{vargamma.estpcf}}, \code{\link{mincontrast}}, \code{\link{pcf}}, \code{\link{pcfmodel}}. \code{\link{rCauchy}} to simulate the model. } \examples{ u <- cauchy.estpcf(redwood) u plot(u, legendpos="topright") } \keyword{spatial} \keyword{models} spatstat/man/eval.fv.Rd0000644000176200001440000001202313160710621014513 0ustar liggesusers\name{eval.fv} \alias{eval.fv} \title{Evaluate Expression Involving Functions} \description{ Evaluates any expression involving one or more function value (fv) objects, and returns another object of the same kind. } \usage{ eval.fv(expr, envir, dotonly=TRUE, equiv=NULL, relabel=TRUE) } \arguments{ \item{expr}{An expression.} \item{envir}{ Optional. The environment in which to evaluate the expression, or a named list containing \code{"fv"} objects to be used in the expression. } \item{dotonly}{Logical. See Details.} \item{equiv}{Mapping between column names of different objects that are deemed to be equivalent. See Details.} \item{relabel}{ Logical value indicating whether to compute appropriate labels for the resulting function. This should normally be \code{TRUE} (the default). See Details. } } \details{ This is a wrapper to make it easier to perform pointwise calculations with the summary functions used in spatial statistics. An object of class \code{"fv"} is essentially a data frame containing several different statistical estimates of the same function. Such objects are returned by \code{\link{Kest}} and its relatives. For example, suppose \code{X} is an object of class \code{"fv"} containing several different estimates of the Ripley's K function \eqn{K(r)}, evaluated at a sequence of values of \eqn{r}. Then \code{eval.fv(X+3)} effectively adds 3 to each function estimate in \code{X}, and returns the resulting object. Suppose \code{X} and \code{Y} are two objects of class \code{"fv"} which are compatible (in particular they have the same vector of \eqn{r} values). Then \code{eval.im(X + Y)} will add the corresponding function values in \code{X} and \code{Y}, and return the resulting function. In general, \code{expr} can be any expression involving (a) the \emph{names} of objects of class \code{"fv"}, (b) scalar constants, and (c) functions which are vectorised. See the Examples. First \code{eval.fv} determines which of the \emph{variable names} in the expression \code{expr} refer to objects of class \code{"fv"}. Each such name is replaced by a vector containing the function values. The expression is then evaluated. The result should be a vector; it is taken as the new vector of function values. The expression \code{expr} must be vectorised. There must be at least one object of class \code{"fv"} in the expression. If the objects are not compatible, they will be made compatible by \code{\link{harmonise.fv}}. If \code{dotonly=TRUE} (the default), the expression will be evaluated only for those columns of an \code{"fv"} object that contain values of the function itself (rather than values of the derivative of the function, the hazard rate, etc). If \code{dotonly=FALSE}, the expression will be evaluated for all columns. For example the result of \code{\link{Fest}} includes several columns containing estimates of the empty space function \eqn{F(r)}, but also includes an estimate of the \emph{hazard} \eqn{h(r)} of \eqn{F(r)}. Transformations that are valid for \eqn{F} may not be valid for \eqn{h}. Accordingly, \eqn{h} would normally be omitted from the calculation. The columns of an object \code{x} that represent the function itself are identified by its \dQuote{dot} names, \code{fvnames(x, ".")}. They are the columns normally plotted by \code{\link{plot.fv}} and identified by the symbol \code{"."} in plot formulas in \code{\link{plot.fv}}. The argument \code{equiv} can be used to specify that two different column names in different function objects are mathematically equivalent or cognate. It should be a list of \code{name=value} pairs, or a named vector of character strings, indicating the pairing of equivalent names. (Without this argument, these columns would be discarded.) See the Examples. The argument \code{relabel} should normally be \code{TRUE} (the default). It determines whether to compute appropriate mathematical labels and descriptions for the resulting function object (used when the object is printed or plotted). If \code{relabel=FALSE} then this does not occur, and the mathematical labels and descriptions in the result are taken from the function object that appears first in the expression. This reduces computation time slightly (for advanced use only). } \value{ Another object of class \code{"fv"}. } \seealso{ \code{\link{fv.object}}, \code{\link{Kest}} } \examples{ # manipulating the K function X <- rpoispp(42) Ks <- Kest(X) eval.fv(Ks + 3) Ls <- eval.fv(sqrt(Ks/pi)) # manipulating two K functions Y <- rpoispp(20) Kr <- Kest(Y) Kdif <- eval.fv(Ks - Kr) Z <- eval.fv(sqrt(Ks/pi) - sqrt(Kr/pi)) ## Use of 'envir' U <- eval.fv(sqrt(K), list(K=Kest(cells))) ## Use of 'equiv' Fc <- Fest(cells) Gc <- Gest(cells) # Hanisch and Chiu-Stoyan estimators are cognate Dc <- eval.fv(Fc - Gc, equiv=list(cs="han")) } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} \keyword{programming} spatstat/man/Finhom.Rd0000644000176200001440000001451513160710571014406 0ustar liggesusers\name{Finhom} \alias{Finhom} \title{ Inhomogeneous Empty Space Function } \description{ Estimates the inhomogeneous empty space function of a non-stationary point pattern. } \usage{ Finhom(X, lambda = NULL, lmin = NULL, ..., sigma = NULL, varcov = NULL, r = NULL, breaks = NULL, ratio = FALSE, update = TRUE) } \arguments{ \item{X}{ The observed data point pattern, from which an estimate of the inhomogeneous \eqn{F} function will be computed. An object of class \code{"ppp"} or in a format recognised by \code{\link{as.ppp}()} } \item{lambda}{ Optional. Values of the estimated intensity function. Either a vector giving the intensity values at the points of the pattern \code{X}, a pixel image (object of class \code{"im"}) giving the intensity values at all locations, a fitted point process model (object of class \code{"ppm"}) or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. } \item{lmin}{ Optional. The minimum possible value of the intensity over the spatial domain. A positive numerical value. } \item{sigma,varcov}{ Optional arguments passed to \code{\link{density.ppp}} to control the smoothing bandwidth, when \code{lambda} is estimated by kernel smoothing. } \item{\dots}{ Extra arguments passed to \code{\link{as.mask}} to control the pixel resolution, or passed to \code{\link{density.ppp}} to control the smoothing bandwidth. } \item{r}{ vector of values for the argument \eqn{r} at which the inhomogeneous \eqn{K} function should be evaluated. Not normally given by the user; there is a sensible default. } \item{breaks}{ This argument is for internal use only. } \item{ratio}{ Logical. If \code{TRUE}, the numerator and denominator of the estimate will also be saved, for use in analysing replicated point patterns. } \item{update}{ Logical. If \code{lambda} is a fitted model (class \code{"ppm"} or \code{"kppm"}) and \code{update=TRUE} (the default), the model will first be refitted to the data \code{X} (using \code{\link{update.ppm}} or \code{\link{update.kppm}}) before the fitted intensity is computed. If \code{update=FALSE}, the fitted intensity of the model will be computed without fitting it to \code{X}. } } \details{ This command computes estimates of the inhomogeneous \eqn{F}-function (van Lieshout, 2010) of a point pattern. It is the counterpart, for inhomogeneous spatial point patterns, of the empty space function \eqn{F} for homogeneous point patterns computed by \code{\link{Fest}}. The argument \code{X} should be a point pattern (object of class \code{"ppp"}). The inhomogeneous \eqn{F} function is computed using the border correction, equation (6) in Van Lieshout (2010). The argument \code{lambda} should supply the (estimated) values of the intensity function \eqn{\lambda}{lambda} of the point process. It may be either \describe{ \item{a numeric vector}{ containing the values of the intensity function at the points of the pattern \code{X}. } \item{a pixel image}{ (object of class \code{"im"}) assumed to contain the values of the intensity function at all locations in the window. } \item{a fitted point process model}{ (object of class \code{"ppm"} or \code{"kppm"}) whose fitted \emph{trend} can be used as the fitted intensity. (If \code{update=TRUE} the model will first be refitted to the data \code{X} before the trend is computed.) } \item{a function}{ which can be evaluated to give values of the intensity at any locations. } \item{omitted:}{ if \code{lambda} is omitted, then it will be estimated using a `leave-one-out' kernel smoother. } } If \code{lambda} is a numeric vector, then its length should be equal to the number of points in the pattern \code{X}. The value \code{lambda[i]} is assumed to be the the (estimated) value of the intensity \eqn{\lambda(x_i)}{lambda(x[i])} for the point \eqn{x_i}{x[i]} of the pattern \eqn{X}. Each value must be a positive number; \code{NA}'s are not allowed. If \code{lambda} is a pixel image, the domain of the image should cover the entire window of the point pattern. If it does not (which may occur near the boundary because of discretisation error), then the missing pixel values will be obtained by applying a Gaussian blur to \code{lambda} using \code{\link{blur}}, then looking up the values of this blurred image for the missing locations. (A warning will be issued in this case.) If \code{lambda} is a function, then it will be evaluated in the form \code{lambda(x,y)} where \code{x} and \code{y} are vectors of coordinates of the points of \code{X}. It should return a numeric vector with length equal to the number of points in \code{X}. If \code{lambda} is omitted, then it will be estimated using a `leave-one-out' kernel smoother, as described in Baddeley, \ifelse{latex}{\out{M\o ller}}{Moller} and Waagepetersen (2000). The estimate \code{lambda[i]} for the point \code{X[i]} is computed by removing \code{X[i]} from the point pattern, applying kernel smoothing to the remaining points using \code{\link{density.ppp}}, and evaluating the smoothed intensity at the point \code{X[i]}. The smoothing kernel bandwidth is controlled by the arguments \code{sigma} and \code{varcov}, which are passed to \code{\link{density.ppp}} along with any extra arguments. } \value{ An object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. } \references{ Van Lieshout, M.N.M. and Baddeley, A.J. (1996) A nonparametric measure of spatial interaction in point patterns. \emph{Statistica Neerlandica} \bold{50}, 344--361. Van Lieshout, M.N.M. (2010) A J-function for inhomogeneous point processes. \emph{Statistica Neerlandica} \bold{65}, 183--201. } \seealso{ \code{\link{Ginhom}}, \code{\link{Jinhom}}, \code{\link{Fest}} } \examples{ \dontrun{ plot(Finhom(swedishpines, sigma=bw.diggle, adjust=2)) } plot(Finhom(swedishpines, sigma=10)) } \author{ Original code by Marie-Colette van Lieshout. C implementation and R adaptation by \adrian and \ege. } \keyword{spatial} \keyword{nonparametric} spatstat/man/rex.Rd0000644000176200001440000000523313160710621013755 0ustar liggesusers\name{rex} \alias{rex} \title{ Richardson Extrapolation } \description{ Performs Richardson Extrapolation on a sequence of approximate values. } \usage{ rex(x, r = 2, k = 1, recursive = FALSE) } \arguments{ \item{x}{ A numeric vector or matrix, whose columns are successive estimates or approximations to a vector of parameters. } \item{r}{ A number greater than 1. The ratio of successive step sizes. See Details. } \item{k}{ Integer. The order of convergence assumed. See Details. } \item{recursive}{ Logical value indicating whether to perform one step of Richardson extrapolation (\code{recursive=FALSE}, the default) or repeat the extrapolation procedure until a best estimate is obtained (\code{recursive=TRUE}. } } \details{ Richardson extrapolation is a general technique for improving numerical approximations, often used in numerical integration (Brezinski and Zaglia, 1991). It can also be used to improve parameter estimates in statistical models (Baddeley and Turner, 2014). The successive columns of \code{x} are assumed to have been obtained using approximations with step sizes \eqn{a, a/r, a/r^2, \ldots}{a, a/r, a/r^2, ...} where \eqn{a} is the initial step size (which does not need to be specified). Estimates based on a step size \eqn{s} are assumed to have an error of order \eqn{s^k}. Thus, the default values \code{r=2} and {k=1} imply that the errors in the second column of \code{x} should be roughly \eqn{(1/r)^k = 1/2} as large as the errors in the first column, and so on. } \value{ A matrix whose columns contain a sequence of improved estimates. } \references{ Baddeley, A. and Turner, R. (2014) Bias correction for parameter estimates of spatial point process models. \emph{Journal of Statistical Computation and Simulation} \bold{84}, 1621--1643. DOI: 10.1080/00949655.2012.755976 Brezinski, C. and Zaglia, M.R. (1991) \emph{Extrapolation Methods. Theory and Practice}. North-Holland. } \author{ \adrian and \rolf. } \seealso{ \code{\link{bc}} } \examples{ # integrals of sin(x) and cos(x) from 0 to pi # correct answers: 2, 0 est <- function(nsteps) { xx <- seq(0, pi, length=nsteps) ans <- pi * c(mean(sin(xx)), mean(cos(xx))) names(ans) <- c("sin", "cos") ans } X <- cbind(est(10), est(20), est(40)) X rex(X) rex(X, recursive=TRUE) # fitted Gibbs point process model fit0 <- ppm(cells ~ 1, Strauss(0.07), nd=16) fit1 <- update(fit0, nd=32) fit2 <- update(fit0, nd=64) co <- cbind(coef(fit0), coef(fit1), coef(fit2)) co rex(co, k=2, recursive=TRUE) } \keyword{math} \keyword{optimize} spatstat/man/objsurf.Rd0000644000176200001440000000533113160710621014630 0ustar liggesusers\name{objsurf} \alias{objsurf} \alias{objsurf.dppm} \alias{objsurf.kppm} \alias{objsurf.minconfit} \title{ Objective Function Surface } \description{ For a model that was fitted by optimisation, compute the values of the objective function in a neighbourhood of the optimal value. } \usage{ objsurf(x, \dots) \method{objsurf}{dppm}(x, ..., ngrid = 32, ratio = 1.5, verbose = TRUE) \method{objsurf}{kppm}(x, ..., ngrid = 32, ratio = 1.5, verbose = TRUE) \method{objsurf}{minconfit}(x, ..., ngrid = 32, ratio = 1.5, verbose = TRUE) } \arguments{ \item{x}{ Some kind of model that was fitted by finding the optimal value of an objective function. An object of class \code{"dppm"}, \code{"kppm"} or \code{"minconfit"}. } \item{\dots}{ Extra arguments are usually ignored. } \item{ngrid}{ Number of grid points to evaluate along each axis. Either a single integer, or a pair of integers. For example \code{ngrid=32} would mean a \code{32 * 32} grid. } \item{ratio}{ Number greater than 1 determining the range of parameter values to be considered. If the optimal parameter value is \code{opt} then the objective function will be evaluated for values between \code{opt/ratio} and \code{opt * ratio}. } \item{verbose}{ Logical value indicating whether to print progress reports. } } \details{ The object \code{x} should be some kind of model that was fitted by maximising or minimising the value of an objective function. The objective function will be evaluated on a grid of values of the model parameters. Currently the following types of objects are accepted: \itemize{ \item an object of class \code{"dppm"} representing a determinantal point process. See \code{\link{dppm}}. \item an object of class \code{"kppm"} representing a cluster point process or Cox point process. See \code{\link{kppm}}. \item an object of class \code{"minconfit"} representing a minimum-contrast fit between a summary function and its theoretical counterpart. See \code{\link{mincontrast}}. } The result is an object of class \code{"objsurf"} which can be printed and plotted: see \code{\link{methods.objsurf}}. } \value{ An object of class \code{"objsurf"} which can be printed and plotted. Essentially a list containing entries \code{x}, \code{y}, \code{z} giving the parameter values and objective function values. } \author{ \adrian and \ege. } \seealso{ \code{\link{methods.objsurf}}, \code{\link{kppm}}, \code{\link{mincontrast}} } \examples{ fit <- kppm(redwood ~ 1, "Thomas") os <- objsurf(fit) if(interactive()) { plot(os) contour(os, add=TRUE) persp(os) } } \keyword{spatial} \keyword{models} spatstat/man/Jmulti.Rd0000644000176200001440000001364313160710571014433 0ustar liggesusers\name{Jmulti} \alias{Jmulti} \title{ Marked J Function } \description{ For a marked point pattern, estimate the multitype \eqn{J} function summarising dependence between the points in subset \eqn{I} and those in subset \eqn{J}. } \usage{ Jmulti(X, I, J, eps=NULL, r=NULL, breaks=NULL, \dots, disjoint=NULL, correction=NULL) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the multitype distance distribution function \eqn{J_{IJ}(r)}{J[IJ](r)} will be computed. It must be a marked point pattern. See under Details. } \item{I}{Subset of points of \code{X} from which distances are measured. See Details. } \item{J}{Subset of points in \code{X} to which distances are measured. See Details. } \item{eps}{A positive number. The pixel resolution of the discrete approximation to Euclidean distance (see \code{\link{Jest}}). There is a sensible default. } \item{r}{numeric vector. The values of the argument \eqn{r} at which the distribution function \eqn{J_{IJ}(r)}{J[IJ](r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{breaks}{ This argument is for internal use only. } \item{\dots}{Ignored.} \item{disjoint}{Optional flag indicating whether the subsets \code{I} and \code{J} are disjoint. If missing, this value will be computed by inspecting the vectors \code{I} and \code{J}. } \item{correction}{ Optional. Character string specifying the edge correction(s) to be used. Options are \code{"none"}, \code{"rs"}, \code{"km"}, \code{"Hanisch"} and \code{"best"}. Alternatively \code{correction="all"} selects all options. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame containing six numeric columns \item{r}{the values of the argument \eqn{r} at which the function \eqn{J_{IJ}(r)}{J[IJ](r)} has been estimated } \item{rs}{the ``reduced sample'' or ``border correction'' estimator of \eqn{J_{IJ}(r)}{J[IJ](r)} } \item{km}{the spatial Kaplan-Meier estimator of \eqn{J_{IJ}(r)}{J[IJ](r)} } \item{han}{the Hanisch-style estimator of \eqn{J_{IJ}(r)}{J[IJ](r)} } \item{un}{the uncorrected estimate of \eqn{J_{IJ}(r)}{J[IJ](r)}, formed by taking the ratio of uncorrected empirical estimators of \eqn{1 - G_{IJ}(r)}{1 - G[IJ](r)} and \eqn{1 - F_{J}(r)}{1 - F[J](r)}, see \code{\link{Gdot}} and \code{\link{Fest}}. } \item{theo}{the theoretical value of \eqn{J_{IJ}(r)}{J[IJ](r)} for a marked Poisson process with the same estimated intensity, namely 1. } } \details{ The function \code{Jmulti} generalises \code{\link{Jest}} (for unmarked point patterns) and \code{\link{Jdot}} and \code{\link{Jcross}} (for multitype point patterns) to arbitrary marked point patterns. Suppose \eqn{X_I}{X[I]}, \eqn{X_J}{X[J]} are subsets, possibly overlapping, of a marked point process. Define \deqn{J_{IJ}(r) = \frac{1 - G_{IJ}(r)}{1 - F_J(r)}}{ J[IJ](r) = (1 - G[IJ](r))/(1 - F[J](r))} where \eqn{F_J(r)}{F[J](r)} is the cumulative distribution function of the distance from a fixed location to the nearest point of \eqn{X_J}{X[J]}, and \eqn{G_{IJ}(r)}{GJ(r)} is the distribution function of the distance from a typical point of \eqn{X_I}{X[I]} to the nearest distinct point of \eqn{X_J}{X[J]}. The argument \code{X} must be a point pattern (object of class \code{"ppp"}) or any data that are acceptable to \code{\link{as.ppp}}. The arguments \code{I} and \code{J} specify two subsets of the point pattern. They may be any type of subset indices, for example, logical vectors of length equal to \code{npoints(X)}, or integer vectors with entries in the range 1 to \code{npoints(X)}, or negative integer vectors. Alternatively, \code{I} and \code{J} may be \bold{functions} that will be applied to the point pattern \code{X} to obtain index vectors. If \code{I} is a function, then evaluating \code{I(X)} should yield a valid subset index. This option is useful when generating simulation envelopes using \code{\link{envelope}}. It is assumed that \code{X} can be treated as a realisation of a stationary (spatially homogeneous) random spatial point process in the plane, observed through a bounded window. The window (which is specified in \code{X} as \code{Window(X)}) may have arbitrary shape. Biases due to edge effects are treated in the same manner as in \code{\link{Jest}}. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{J_{IJ}(r)}{J[IJ](r)} should be evaluated. It is also used to determine the breakpoints (in the sense of \code{\link{hist}}) for the computation of histograms of distances. The reduced-sample and Kaplan-Meier estimators are computed from histogram counts. In the case of the Kaplan-Meier estimator this introduces a discretisation error which is controlled by the fineness of the breakpoints. First-time users would be strongly advised not to specify \code{r}. However, if it is specified, \code{r} must satisfy \code{r[1] = 0}, and \code{max(r)} must be larger than the radius of the largest disc contained in the window. Furthermore, the successive entries of \code{r} must be finely spaced. } \references{ Van Lieshout, M.N.M. and Baddeley, A.J. (1999) Indices of dependence between types in multivariate point patterns. \emph{Scandinavian Journal of Statistics} \bold{26}, 511--532. } \seealso{ \code{\link{Jcross}}, \code{\link{Jdot}}, \code{\link{Jest}} } \examples{ trees <- longleaf # Longleaf Pine data: marks represent diameter \testonly{ trees <- trees[seq(1,npoints(trees), by=50)] } Jm <- Jmulti(trees, marks(trees) <= 15, marks(trees) >= 25) plot(Jm) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{nonparametric} spatstat/man/Lcross.Rd0000644000176200001440000000561013160710571014427 0ustar liggesusers\name{Lcross} \alias{Lcross} \title{Multitype L-function (cross-type)} \description{ Calculates an estimate of the cross-type L-function for a multitype point pattern. } \usage{ Lcross(X, i, j, ..., from, to) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the cross-type \eqn{L} function \eqn{L_{ij}(r)}{Lij(r)} will be computed. It must be a multitype point pattern (a marked point pattern whose marks are a factor). See under Details. } \item{i}{The type (mark value) of the points in \code{X} from which distances are measured. A character string (or something that will be converted to a character string). Defaults to the first level of \code{marks(X)}. } \item{j}{The type (mark value) of the points in \code{X} to which distances are measured. A character string (or something that will be converted to a character string). Defaults to the second level of \code{marks(X)}. } \item{\dots}{ Arguments passed to \code{\link{Kcross}}. } \item{from,to}{ An alternative way to specify \code{i} and \code{j} respectively. } } \details{ The cross-type L-function is a transformation of the cross-type K-function, \deqn{L_{ij}(r) = \sqrt{\frac{K_{ij}(r)}{\pi}}}{Lij(r) = sqrt(Kij(r)/pi)} where \eqn{K_{ij}(r)}{Kij(r)} is the cross-type K-function from type \code{i} to type \code{j}. See \code{\link{Kcross}} for information about the cross-type K-function. The command \code{Lcross} first calls \code{\link{Kcross}} to compute the estimate of the cross-type K-function, and then applies the square root transformation. For a marked point pattern in which the points of type \code{i} are independent of the points of type \code{j}, the theoretical value of the L-function is \eqn{L_{ij}(r) = r}{Lij(r) = r}. The square root also has the effect of stabilising the variance of the estimator, so that \eqn{L_{ij}}{Lij} is more appropriate for use in simulation envelopes and hypothesis tests. } \value{ An object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. Essentially a data frame containing columns \item{r}{the vector of values of the argument \eqn{r} at which the function \eqn{L_{ij}}{Lij} has been estimated } \item{theo}{the theoretical value \eqn{L_{ij}(r) = r}{Lij(r) = r} for a stationary Poisson process } together with columns named \code{"border"}, \code{"bord.modif"}, \code{"iso"} and/or \code{"trans"}, according to the selected edge corrections. These columns contain estimates of the function \eqn{L_{ij}}{Lij} obtained by the edge corrections named. } \seealso{ \code{\link{Kcross}}, \code{\link{Ldot}}, \code{\link{Lest}} } \examples{ data(amacrine) L <- Lcross(amacrine, "off", "on") plot(L) } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat/man/hopskel.Rd0000644000176200001440000000631513160710621014626 0ustar liggesusers\name{hopskel} \alias{hopskel} \alias{hopskel.test} \title{Hopkins-Skellam Test} \description{ Perform the Hopkins-Skellam test of Complete Spatial Randomness, or simply calculate the test statistic. } \usage{ hopskel(X) hopskel.test(X, \dots, alternative=c("two.sided", "less", "greater", "clustered", "regular"), method=c("asymptotic", "MonteCarlo"), nsim=999) } \arguments{ \item{X}{ Point pattern (object of class \code{"ppp"}). } \item{alternative}{ String indicating the type of alternative for the hypothesis test. Partially matched. } \item{method}{ Method of performing the test. Partially matched. } \item{nsim}{ Number of Monte Carlo simulations to perform, if a Monte Carlo p-value is required. } \item{\dots}{Ignored.} } \details{ Hopkins and Skellam (1954) proposed a test of Complete Spatial Randomness based on comparing nearest-neighbour distances with point-event distances. If the point pattern \code{X} contains \code{n} points, we first compute the nearest-neighbour distances \eqn{P_1, \ldots, P_n}{P[1], ..., P[n]} so that \eqn{P_i}{P[i]} is the distance from the \eqn{i}th data point to the nearest other data point. Then we generate another completely random pattern \code{U} with the same number \code{n} of points, and compute for each point of \code{U} the distance to the nearest point of \code{X}, giving distances \eqn{I_1, \ldots, I_n}{I[1], ..., I[n]}. The test statistic is \deqn{ A = \frac{\sum_i P_i^2}{\sum_i I_i^2} }{ A = (sum[i] P[i]^2) / (sum[i] I[i]^2) } The null distribution of \eqn{A} is roughly an \eqn{F} distribution with shape parameters \eqn{(2n,2n)}. (This is equivalent to using the test statistic \eqn{H=A/(1+A)} and referring \eqn{H} to the Beta distribution with parameters \eqn{(n,n)}). The function \code{hopskel} calculates the Hopkins-Skellam test statistic \eqn{A}, and returns its numeric value. This can be used as a simple summary of spatial pattern: the value \eqn{H=1} is consistent with Complete Spatial Randomness, while values \eqn{H < 1} are consistent with spatial clustering, and values \eqn{H > 1} are consistent with spatial regularity. The function \code{hopskel.test} performs the test. If \code{method="asymptotic"} (the default), the test statistic \eqn{H} is referred to the \eqn{F} distribution. If \code{method="MonteCarlo"}, a Monte Carlo test is performed using \code{nsim} simulated point patterns. } \value{ The value of \code{hopskel} is a single number. The value of \code{hopskel.test} is an object of class \code{"htest"} representing the outcome of the test. It can be printed. } \references{ Hopkins, B. and Skellam, J.G. (1954) A new method of determining the type of distribution of plant individuals. \emph{Annals of Botany} \bold{18}, 213--227. } \seealso{ \code{\link{clarkevans}}, \code{\link{clarkevans.test}}, \code{\link{nndist}}, \code{\link{nncross}} } \examples{ hopskel(redwood) hopskel(redwood) hopskel.test(redwood, alternative="clustered") } \author{\adrian \rolf and \ege } \keyword{spatial} \keyword{nonparametric} \keyword{htest} spatstat/man/reduced.sample.Rd0000644000176200001440000000640013160710621016047 0ustar liggesusers\name{reduced.sample} \alias{reduced.sample} \title{Reduced Sample Estimator using Histogram Data} \description{ Compute the Reduced Sample estimator of a survival time distribution function, from histogram data } \usage{ reduced.sample(nco, cen, ncc, show=FALSE, uppercen=0) } \arguments{ \item{nco}{vector of counts giving the histogram of uncensored observations (those survival times that are less than or equal to the censoring time) } \item{cen}{vector of counts giving the histogram of censoring times } \item{ncc}{vector of counts giving the histogram of censoring times for the uncensored observations only } \item{uppercen}{ number of censoring times greater than the rightmost histogram breakpoint (if there are any) } \item{show}{Logical value controlling the amount of detail returned by the function value (see below) } } \value{ If \code{show = FALSE}, a numeric vector giving the values of the reduced sample estimator. If \code{show=TRUE}, a list with three components which are vectors of equal length, \item{rs}{Reduced sample estimate of the survival time c.d.f. \eqn{F(t)} } \item{numerator}{numerator of the reduced sample estimator } \item{denominator}{denominator of the reduced sample estimator } } \details{ This function is needed mainly for internal use in \pkg{spatstat}, but may be useful in other applications where you want to form the reduced sample estimator from a huge dataset. Suppose \eqn{T_i}{T[i]} are the survival times of individuals \eqn{i=1,\ldots,M} with unknown distribution function \eqn{F(t)} which we wish to estimate. Suppose these times are right-censored by random censoring times \eqn{C_i}{C[i]}. Thus the observations consist of right-censored survival times \eqn{\tilde T_i = \min(T_i,C_i)}{T*[i] = min(T[i],C[i])} and non-censoring indicators \eqn{D_i = 1\{T_i \le C_i\}}{D[i] = 1(T[i] <= C[i])} for each \eqn{i}. If the number of observations \eqn{M} is large, it is efficient to use histograms. Form the histogram \code{cen} of all censoring times \eqn{C_i}{C[i]}. That is, \code{obs[k]} counts the number of values \eqn{C_i}{C[i]} in the interval \code{(breaks[k],breaks[k+1]]} for \eqn{k > 1} and \code{[breaks[1],breaks[2]]} for \eqn{k = 1}. Also form the histogram \code{nco} of all uncensored times, i.e. those \eqn{\tilde T_i}{T*[i]} such that \eqn{D_i=1}{D[i]=1}, and the histogram of all censoring times for which the survival time is uncensored, i.e. those \eqn{C_i}{C[i]} such that \eqn{D_i=1}{D[i]=1}. These three histograms are the arguments passed to \code{kaplan.meier}. The return value \code{rs} is the reduced-sample estimator of the distribution function \eqn{F(t)}. Specifically, \code{rs[k]} is the reduced sample estimate of \code{F(breaks[k+1])}. The value is exact, i.e. the use of histograms does not introduce any approximation error. Note that, for the results to be valid, either the histogram breaks must span the censoring times, or the number of censoring times that do not fall in a histogram cell must have been counted in \code{uppercen}. } \seealso{ \code{\link{kaplan.meier}}, \code{\link{km.rs}} } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat/man/quasirandom.Rd0000644000176200001440000000621013160710621015476 0ustar liggesusers\name{quasirandom} \alias{quasirandom} %DoNotExport \alias{vdCorput} \alias{Halton} \alias{Hammersley} \title{ Quasirandom Patterns } \description{ Generates quasirandom sequences of numbers and quasirandom spatial patterns of points in any dimension. } \usage{ vdCorput(n, base) Halton(n, bases = c(2, 3), raw = FALSE, simplify = TRUE) Hammersley(n, bases = 2, raw = FALSE, simplify = TRUE) } \arguments{ \item{n}{ Number of points to generate. } \item{base}{ A prime number giving the base of the sequence. } \item{bases}{ Vector of prime numbers giving the bases of the sequences for each coordinate axis. } \item{raw}{ Logical value indicating whether to return the coordinates as a matrix (\code{raw=TRUE}) or as a spatial point pattern (\code{raw=FALSE}, the default). } \item{simplify}{ Argument passed to \code{\link{ppx}} indicating whether point patterns of dimension 2 or 3 should be returned as objects of class \code{"ppp"} or \code{"pp3"} respectively (\code{simplify=TRUE}, the default) or as objects of class \code{"ppx"} (\code{simplify=FALSE}). } } \details{ The function \code{vdCorput} generates the quasirandom sequence of Van der Corput (1935) of length \code{n} with the given \code{base}. These are numbers between 0 and 1 which are in some sense uniformly distributed over the interval. The function \code{Halton} generates the Halton quasirandom sequence of points in \code{d}-dimensional space, where \code{d = length(bases)}. The values of the \eqn{i}-th coordinate of the points are generated using the van der Corput sequence with base equal to \code{bases[i]}. The function \code{Hammersley} generates the Hammersley set of points in \code{d+1}-dimensional space, where \code{d = length(bases)}. The first \code{d} coordinates of the points are generated using the van der Corput sequence with base equal to \code{bases[i]}. The \code{d+1}-th coordinate is the sequence \code{1/n, 2/n, ..., 1}. If \code{raw=FALSE} (the default) then the Halton and Hammersley sets are interpreted as spatial point patterns of the appropriate dimension. They are returned as objects of class \code{"ppx"} (multidimensional point patterns) unless \code{simplify=TRUE} and \code{d=2} or \code{d=3} when they are returned as objects of class \code{"ppp"} or \code{"pp3"}. If \code{raw=TRUE}, the coordinates are returned as a matrix with \code{n} rows and \code{D} columns where \code{D} is the spatial dimension. } \value{ For \code{vdCorput}, a numeric vector. For \code{Halton} and \code{Hammersley}, an object of class \code{"ppp"}, \code{"pp3"} or \code{"ppx"}; or if \code{raw=TRUE}, a numeric matrix. } \references{ Van der Corput, J. G. (1935) Verteilungsfunktionen. \emph{Proc. Ned. Akad. v. Wetensch.} \bold{38}: 813--821. Kuipers, L. and Niederreiter, H. (2005) \emph{Uniform distribution of sequences}, Dover Publications. } \seealso{ \code{\link{rQuasi}} } \examples{ vdCorput(10, 2) plot(Halton(256, c(2,3))) plot(Hammersley(256, 3)) } \author{\adrian , \rolf and \ege. } \keyword{spatial} \keyword{datagen} spatstat/man/pixellate.owin.Rd0000644000176200001440000000425613160710621016125 0ustar liggesusers\name{pixellate.owin} \Rdversion{1.1} \alias{pixellate.owin} \title{ Convert Window to Pixel Image } \description{ Convert a window to a pixel image by measuring the area of intersection between the window and each pixel in a raster. } \usage{ \method{pixellate}{owin}(x, W = NULL, ...) } \arguments{ \item{x}{ Window (object of class \code{"owin"}) to be converted. } \item{W}{ Optional. Window determining the pixel raster on which the conversion should occur. } \item{\dots}{ Optional. Extra arguments passed to \code{\link{as.mask}} to determine the pixel raster. } } \details{ This is a method for the generic function \code{pixellate}. It converts a window \code{x} into a pixel image, by measuring the \emph{amount} of \code{x} that is inside each pixel. (The related function \code{\link{as.im}} also converts \code{x} into a pixel image, but records only the presence or absence of \code{x} in each pixel.) The pixel raster for the conversion is determined by the argument \code{W} and the extra arguments \code{\dots}. \itemize{ \item If \code{W} is given, and it is a binary mask (a window of type \code{"mask"}) then it determines the pixel raster. \item If \code{W} is given, but it is not a binary mask (it is a window of another type) then it will be converted to a binary mask using \code{as.mask(W, \dots)}. \item If \code{W} is not given, it defaults to \code{as.mask(as.rectangle(x), \dots)} } In the second and third cases it would be common to use the argument \code{dimyx} to control the number of pixels. See the Examples. The algorithm then computes the area of intersection of each pixel with the window. The result is a pixel image with pixel entries equal to these intersection areas. } \value{ A pixel image (object of class \code{"im"}). } \seealso{ \code{\link{pixellate.ppp}}, \code{\link{pixellate}}, \code{\link{as.im}} } \examples{ data(letterR) plot(pixellate(letterR, dimyx=15)) W <- grow.rectangle(as.rectangle(letterR), 0.2) plot(pixellate(letterR, W, dimyx=15)) } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/pixellate.ppp.Rd0000644000176200001440000001055113160710621015743 0ustar liggesusers\name{pixellate.ppp} \alias{pixellate.ppp} \alias{as.im.ppp} \title{Convert Point Pattern to Pixel Image} \description{ Converts a point pattern to a pixel image. The value in each pixel is the number of points falling in that pixel, and is typically either 0 or 1. } \usage{ \method{pixellate}{ppp}(x, W=NULL, \dots, weights = NULL, padzero=FALSE, fractional=FALSE, preserve=FALSE) \method{as.im}{ppp}(X, \dots) } \arguments{ \item{x,X}{Point pattern (object of class \code{"ppp"}).} \item{\dots}{Arguments passed to \code{\link{as.mask}} to determine the pixel resolution} \item{W}{Optional window mask (object of class \code{"owin"}) determining the pixel raster. } \item{weights}{Optional vector of weights associated with the points.} \item{padzero}{ Logical value indicating whether to set pixel values to zero outside the window. } \item{fractional,preserve}{ Logical values determining the type of discretisation. See Details. } } \details{ The functions \code{pixellate.ppp} and \code{as.im.ppp} convert a spatial point pattern \code{x} into a pixel image, by counting the number of points (or the total weight of points) falling in each pixel. Calling \code{as.im.ppp} is equivalent to calling \code{pixellate.ppp} with its default arguments. Note that \code{pixellate.ppp} is more general than \code{as.im.ppp} (it has additional arguments for greater flexibility). The functions \code{as.im.ppp} and \code{pixellate.ppp} are methods for the generic functions \code{\link{as.im}} and \code{\link{pixellate}} respectively, for the class of point patterns. The pixel raster (in which points are counted) is determined by the argument \code{W} if it is present (for \code{pixellate.ppp} only). In this case \code{W} should be a binary mask (a window object of class \code{"owin"} with type \code{"mask"}). Otherwise the pixel raster is determined by extracting the window containing \code{x} and converting it to a binary pixel mask using \code{\link{as.mask}}. The arguments \code{\dots} are passed to \code{\link{as.mask}} to control the pixel resolution. If \code{weights} is \code{NULL}, then for each pixel in the mask, the algorithm counts how many points in \code{x} fall in the pixel. This count is usually either 0 (for a pixel with no data points in it) or 1 (for a pixel containing one data point) but may be greater than 1. The result is an image with these counts as its pixel values. If \code{weights} is given, it should be a numeric vector of the same length as the number of points in \code{x}. For each pixel, the algorithm finds the total weight associated with points in \code{x} that fall in the given pixel. The result is an image with these total weights as its pixel values. By default (if \code{zeropad=FALSE}) the resulting pixel image has the same spatial domain as the window of the point pattern \code{x}. If \code{zeropad=TRUE} then the resulting pixel image has a rectangular domain; pixels outside the original window are assigned the value zero. The discretisation procedure is controlled by the arguments \code{fractional} and \code{preserve}. \itemize{ \item The argument \code{fractional} specifies how data points are mapped to pixels. If \code{fractional=FALSE} (the default), each data point is allocated to the nearest pixel centre. If \code{fractional=TRUE}, each data point is allocated with fractional weight to four pixel centres (the corners of a rectangle containing the data point). \item The argument \code{preserve} specifies what to do with pixels lying near the boundary of the window, if the window is not a rectangle. If \code{preserve=FALSE} (the default), any contributions that are attributed to pixel centres lying outside the window are reset to zero. If \code{preserve=TRUE}, any such contributions are shifted to the nearest pixel lying inside the window, so that the total mass is preserved. } } \value{ A pixel image (object of class \code{"im"}). } \seealso{ \code{\link{pixellate}}, \code{\link{im}}, \code{\link{as.im}}, \code{\link{density.ppp}}, \code{\link{Smooth.ppp}}. } \examples{ data(humberside) plot(pixellate(humberside)) plot(pixellate(humberside, fractional=TRUE)) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{manip} spatstat/man/dirichlet.Rd0000644000176200001440000000313013160710571015124 0ustar liggesusers\name{dirichlet} \alias{dirichlet} \title{Dirichlet Tessellation of Point Pattern} \description{ Computes the Dirichlet tessellation of a spatial point pattern. Also known as the Voronoi or Thiessen tessellation. } \usage{ dirichlet(X) } \arguments{ \item{X}{Spatial point pattern (object of class \code{"ppp"}).} } \details{ In a spatial point pattern \code{X}, the Dirichlet tile associated with a particular point \code{X[i]} is the region of space that is closer to \code{X[i]} than to any other point in \code{X}. The Dirichlet tiles divide the two-dimensional plane into disjoint regions, forming a tessellation. The Dirichlet tessellation is also known as the Voronoi or Thiessen tessellation. This function computes the Dirichlet tessellation (within the original window of \code{X}) using the function \code{\link[deldir]{deldir}} in the package \pkg{deldir}. To ensure that there is a one-to-one correspondence between the points of \code{X} and the tiles of \code{dirichlet(X)}, duplicated points in \code{X} should first be removed by \code{X <- unique(X, rule="deldir")}. The tiles of the tessellation will be computed as polygons if the original window is a rectangle or a polygon. Otherwise the tiles will be computed as binary masks. } \value{ A tessellation (object of class \code{"tess"}). } \seealso{ \code{\link{tess}}, \code{\link{delaunay}}, \code{\link{ppp}}, \code{\link{dirichletVertices}} } \examples{ X <- runifpoint(42) plot(dirichlet(X)) plot(X, add=TRUE) } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/Extract.owin.Rd0000644000176200001440000000217613160710621015547 0ustar liggesusers\name{Extract.owin} \alias{[.owin} \title{Extract Subset of Window} \description{ Extract a subset of a window. } \usage{ \method{[}{owin}(x, i, \dots) } \arguments{ \item{x}{ A spatial window (object of class \code{"owin"}). } \item{i}{ Object defining the subregion. Either a spatial window, or a pixel image with logical values. } \item{\dots}{Ignored.} } \value{ Another spatial window (object of class \code{"owin"}). } \details{ This function computes the intersection between the window \code{x} and the domain specified by \code{i}, using \code{\link{intersect.owin}}. This function is a method for the subset operator \code{"["} for spatial windows (objects of class \code{"owin"}). It is provided mainly for completeness. The index \code{i} may be either a window, or a pixel image with logical values (the \code{TRUE} values of the image specify the spatial domain). } \seealso{ \code{\link{intersect.owin}} } \examples{ W <- owin(c(2.5, 3.2), c(1.4, 2.9)) plot(letterR) plot(letterR[W], add=TRUE, col="red") } \author{ \adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/summary.ppp.Rd0000644000176200001440000000306713160710621015455 0ustar liggesusers\name{summary.ppp} \alias{summary.ppp} \title{Summary of a Point Pattern Dataset} \description{ Prints a useful summary of a point pattern dataset. } \usage{ \method{summary}{ppp}(object, \dots, checkdup=TRUE) } \arguments{ \item{object}{ Point pattern (object of class \code{"ppp"}). } \item{\dots}{ Ignored. } \item{checkdup}{ Logical value indicating whether to check for the presence of duplicate points. } } \details{ A useful summary of the point pattern \code{object} is printed. This is a method for the generic function \code{\link{summary}}. If \code{checkdup=TRUE}, the pattern will be checked for the presence of duplicate points, using \code{\link{duplicated.ppp}}. This can be time-consuming if the pattern contains many points, so the checking can be disabled by setting \code{checkdup=FALSE}. If the point pattern was generated by simulation using \code{\link{rmh}}, the parameters of the algorithm are printed. } \seealso{ \code{\link{summary}}, \code{\link{summary.owin}}, \code{\link{print.ppp}} } \examples{ summary(cells) # plain vanilla point pattern # multitype point pattern woods <- lansing \testonly{woods <- woods[seq(1, npoints(woods), length=40)]} summary(woods) # tabulates frequencies of each mark # numeric marks trees <- longleaf \testonly{trees <- trees[seq(1, npoints(trees), length=40)]} summary(trees) # prints summary.default(marks(trees)) # weird polygonal window summary(demopat) # describes it } \author{ \spatstatAuthors } \keyword{spatial} \keyword{methods} spatstat/man/project2segment.Rd0000644000176200001440000000441413160710621016272 0ustar liggesusers\name{project2segment} \alias{project2segment} \title{Move Point To Nearest Line} \description{ Given a point pattern and a line segment pattern, this function moves each point to the closest location on a line segment. } \usage{ project2segment(X, Y) } \arguments{ \item{X}{A point pattern (object of class \code{"ppp"}).} \item{Y}{A line segment pattern (object of class \code{"psp"}).} } \details{ For each point \code{x} in the point pattern \code{X}, this function finds the closest line segment \code{y} in the line segment pattern \code{Y}. It then `projects' the point \code{x} onto the line segment \code{y} by finding the position \code{z} along \code{y} which is closest to \code{x}. This position \code{z} is returned, along with supplementary information. } \value{ A list with the following components. Each component has length equal to the number of points in \code{X}, and its entries correspond to the points of \code{X}. \item{Xproj }{ Point pattern (object of class \code{"ppp"} containing the projected points. } \item{mapXY }{ Integer vector identifying the nearest segment to each point. } \item{d}{ Numeric vector of distances from each point of \code{X} to the corresponding projected point. } \item{tp}{ Numeric vector giving the scaled parametric coordinate \eqn{0 \le t_p \le 1}{0 <= tp <= 1} of the position of the projected point along the segment. } For example suppose \code{mapXY[2] = 5} and \code{tp[2] = 0.33}. Then \code{Y[5]} is the line segment lying closest to \code{X[2]}. The projection of the point \code{X[2]} onto the segment \code{Y[5]} is the point \code{Xproj[2]}, which lies one-third of the way between the first and second endpoints of the line segment \code{Y[5]}. } \author{ \adrian and \rolf } \seealso{ \code{\link{nearestsegment}} for a faster way to determine which segment is closest to each point. } \examples{ X <- rstrat(square(1), 5) Y <- as.psp(matrix(runif(20), 5, 4), window=owin()) plot(Y, lwd=3, col="green") plot(X, add=TRUE, col="red", pch=16) v <- project2segment(X,Y) Xproj <- v$Xproj plot(Xproj, add=TRUE, pch=16) arrows(X$x, X$y, Xproj$x, Xproj$y, angle=10, length=0.15, col="red") } \keyword{spatial} \keyword{math} spatstat/man/pcfinhom.Rd0000644000176200001440000001576413160710621014774 0ustar liggesusers\name{pcfinhom} \alias{pcfinhom} \title{ Inhomogeneous Pair Correlation Function } \description{ Estimates the inhomogeneous pair correlation function of a point pattern using kernel methods. } \usage{ pcfinhom(X, lambda = NULL, ..., r = NULL, kernel = "epanechnikov", bw = NULL, stoyan = 0.15, correction = c("translate", "Ripley"), divisor = c("r", "d"), renormalise = TRUE, normpower=1, update = TRUE, leaveoneout = TRUE, reciplambda = NULL, sigma = NULL, varcov = NULL, close=NULL) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"}). } \item{lambda}{ Optional. Values of the estimated intensity function. Either a vector giving the intensity values at the points of the pattern \code{X}, a pixel image (object of class \code{"im"}) giving the intensity values at all locations, a fitted point process model (object of class \code{"ppm"}) or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. } \item{r}{ Vector of values for the argument \eqn{r} at which \eqn{g(r)} should be evaluated. There is a sensible default. } \item{kernel}{ Choice of smoothing kernel, passed to \code{\link{density.default}}. } \item{bw}{ Bandwidth for smoothing kernel, passed to \code{\link{density.default}}. Either a single numeric value, or a character string specifying a bandwidth selection rule recognised by \code{\link{density.default}}. If \code{bw} is missing or \code{NULL}, the default value is computed using Stoyan's rule of thumb: see \code{\link{bw.stoyan}}. } \item{\dots}{ Other arguments passed to the kernel density estimation function \code{\link{density.default}}. } \item{stoyan}{ Coefficient for Stoyan's bandwidth selection rule; see \code{\link{bw.stoyan}}. } \item{correction}{ Choice of edge correction. } \item{divisor}{ Choice of divisor in the estimation formula: either \code{"r"} (the default) or \code{"d"}. See \code{\link{pcf.ppp}}. } \item{renormalise}{ Logical. Whether to renormalise the estimate. See Details. } \item{normpower}{ Integer (usually either 1 or 2). Normalisation power. See Details. } \item{update}{ Logical. If \code{lambda} is a fitted model (class \code{"ppm"}, \code{"kppm"} or \code{"dppm"}) and \code{update=TRUE} (the default), the model will first be refitted to the data \code{X} (using \code{\link{update.ppm}} or \code{\link{update.kppm}}) before the fitted intensity is computed. If \code{update=FALSE}, the fitted intensity of the model will be computed without re-fitting it to \code{X}. } \item{leaveoneout}{ Logical value (passed to \code{\link{density.ppp}} or \code{\link{fitted.ppm}}) specifying whether to use a leave-one-out rule when calculating the intensity. } \item{reciplambda}{ Alternative to \code{lambda}. Values of the estimated \emph{reciprocal} \eqn{1/\lambda}{1/lambda} of the intensity function. Either a vector giving the reciprocal intensity values at the points of the pattern \code{X}, a pixel image (object of class \code{"im"}) giving the reciprocal intensity values at all locations, or a \code{function(x,y)} which can be evaluated to give the reciprocal intensity value at any location. } \item{sigma,varcov}{ Optional arguments passed to \code{\link{density.ppp}} to control the smoothing bandwidth, when \code{lambda} is estimated by kernel smoothing. } \item{close}{ Advanced use only. Precomputed data. See section on Advanced Use. } } \details{ The inhomogeneous pair correlation function \eqn{g_{\rm inhom}(r)}{ginhom(r)} is a summary of the dependence between points in a spatial point process that does not have a uniform density of points. The best intuitive interpretation is the following: the probability \eqn{p(r)} of finding two points at locations \eqn{x} and \eqn{y} separated by a distance \eqn{r} is equal to \deqn{ p(r) = \lambda(x) lambda(y) g(r) \,{\rm d}x \, {\rm d}y }{ p(r) = lambda(x) * lambda(y) * g(r) dx dy } where \eqn{\lambda}{lambda} is the intensity function of the point process. For a Poisson point process with intensity function \eqn{\lambda}{lambda}, this probability is \eqn{p(r) = \lambda(x) \lambda(y)}{p(r) = lambda(x) * lambda(y)} so \eqn{g_{\rm inhom}(r) = 1}{ginhom(r) = 1}. The inhomogeneous pair correlation function is related to the inhomogeneous \eqn{K} function through \deqn{ g_{\rm inhom}(r) = \frac{K'_{\rm inhom}(r)}{2\pi r} }{ ginhom(r) = Kinhom'(r)/ ( 2 * pi * r) } where \eqn{K'_{\rm inhom}(r)}{Kinhom'(r)} is the derivative of \eqn{K_{\rm inhom}(r)}{Kinhom(r)}, the inhomogeneous \eqn{K} function. See \code{\link{Kinhom}} for information about \eqn{K_{\rm inhom}(r)}{Kinhom(r)}. The command \code{pcfinhom} estimates the inhomogeneous pair correlation using a modified version of the algorithm in \code{\link{pcf.ppp}}. If \code{renormalise=TRUE} (the default), then the estimates are multiplied by \eqn{c^{\mbox{normpower}}}{c^normpower} where \eqn{ c = \mbox{area}(W)/\sum (1/\lambda(x_i)). }{ c = area(W)/sum[i] (1/lambda(x[i])). } This rescaling reduces the variability and bias of the estimate in small samples and in cases of very strong inhomogeneity. The default value of \code{normpower} is 1 but the most sensible value is 2, which would correspond to rescaling the \code{lambda} values so that \eqn{ \sum (1/\lambda(x_i)) = \mbox{area}(W). }{ sum[i] (1/lambda(x[i])) = area(W). } } \value{ A function value table (object of class \code{"fv"}). Essentially a data frame containing the variables \item{r}{ the vector of values of the argument \eqn{r} at which the inhomogeneous pair correlation function \eqn{g_{\rm inhom}(r)}{ginhom(r)} has been estimated } \item{theo}{vector of values equal to 1, the theoretical value of \eqn{g_{\rm inhom}(r)}{ginhom(r)} for the Poisson process } \item{trans}{vector of values of \eqn{g_{\rm inhom}(r)}{ginhom(r)} estimated by translation correction } \item{iso}{vector of values of \eqn{g_{\rm inhom}(r)}{ginhom(r)} estimated by Ripley isotropic correction } as required. } \section{Advanced Use}{ To perform the same computation using several different bandwidths \code{bw}, it is efficient to use the argument \code{close}. This should be the result of \code{\link{closepairs}(X, rmax)} for a suitably large value of \code{rmax}, namely \code{rmax >= max(r) + 3 * bw}. } \seealso{ \code{\link{pcf}}, \code{\link{pcf.ppp}}, \code{\link{bw.stoyan}}, \code{\link{bw.pcf}}, \code{\link{Kinhom}} } \examples{ data(residualspaper) X <- residualspaper$Fig4b plot(pcfinhom(X, stoyan=0.2, sigma=0.1)) fit <- ppm(X, ~polynom(x,y,2)) plot(pcfinhom(X, lambda=fit, normpower=2)) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{nonparametric} spatstat/man/im.object.Rd0000644000176200001440000000714613160710621015036 0ustar liggesusers\name{im.object} \alias{im.object} %DoNotExport \title{Class of Images} \description{ A class \code{"im"} to represent a two-dimensional pixel image. } \details{ An object of this class represents a two-dimensional pixel image. It specifies \itemize{ \item the dimensions of the rectangular array of pixels \item \eqn{x} and \eqn{y} coordinates for the pixels \item a numeric value (``grey value'') at each pixel } If \code{X} is an object of type \code{im}, it contains the following elements: \tabular{ll}{ \code{v} \tab matrix of values \cr \code{dim} \tab dimensions of matrix \code{v} \cr \code{xrange} \tab range of \eqn{x} coordinates of image window \cr \code{yrange} \tab range of \eqn{y} coordinates of image window \cr \code{xstep} \tab width of one pixel \cr \code{ystep} \tab height of one pixel \cr \code{xcol} \tab vector of \eqn{x} coordinates of centres of pixels \cr \code{yrow} \tab vector of \eqn{y} coordinates of centres of pixels } Users are strongly advised not to manipulate these entries directly. Objects of class \code{"im"} may be created by the functions \code{\link{im}} and \code{\link{as.im}}. Image objects are also returned by various functions including \code{\link{distmap}}, \code{\link{Kmeasure}}, \code{\link{setcov}}, \code{\link{eval.im}} and \code{\link{cut.im}}. Image objects may be displayed using the methods \code{\link{plot.im}}, \code{image.im}, \code{\link{persp.im}} and \code{contour.im}. There are also methods \code{\link{print.im}} for printing information about an image, \code{\link{summary.im}} for summarising an image, \code{\link{mean.im}} for calculating the average pixel value, \code{\link{hist.im}} for plotting a histogram of pixel values, \code{\link{quantile.im}} for calculating quantiles of pixel values, and \code{\link{cut.im}} for dividing the range of pixel values into categories. Pixel values in an image may be extracted using the subset operator \code{\link{[.im}}. To extract all pixel values from an image object, use \code{\link{as.matrix.im}}. The levels of a factor-valued image can be extracted and changed with \code{levels} and \code{levels<-}. Calculations involving one or more images (for example, squaring all the pixel values in an image, converting numbers to factor levels, or subtracting one image from another) can often be done easily using \code{\link{eval.im}}. To find all pixels satisfying a certain constraint, use \code{\link{solutionset}}. Note carefully that the entry \code{v[i,j]} gives the pixel value at the location \code{(xcol[j],yrow[i]}. That is, the \bold{row} index of the matrix \code{v} corresponds to increasing \bold{y} coordinate, while the column index of \code{mat} corresponds to increasing \bold{x} coordinate. Thus \code{yrow} has one entry for each row of \code{v} and \code{xcol} has one entry for each column of \code{v}. Under the usual convention in \R, a correct display of the image would be obtained by transposing the matrix, e.g. \code{image.default(xcol, yrow, t(v))}, if you wanted to do it by hand. } \seealso{ \code{\link{im}}, \code{\link{as.im}}, \code{\link{plot.im}}, \code{\link{persp.im}}, \code{\link{eval.im}}, \code{\link{[.im}} } \section{Warnings}{ The internal representation of images is likely to change in future releases of \pkg{spatstat}. Do not address the entries in an image directly. To extract all pixel values from an image object, use \code{\link{as.matrix.im}}. } \author{\adrian and \rolf } \keyword{spatial} \keyword{attribute} spatstat/man/disc.Rd0000644000176200001440000000452013160710571014103 0ustar liggesusers\name{disc} \alias{disc} \title{Circular Window} \description{ Creates a circular window } \usage{ disc(radius=1, centre=c(0,0), \dots, mask=FALSE, npoly=128, delta=NULL) } \arguments{ \item{radius}{Radius of the circle.} \item{centre}{The centre of the circle.} \item{mask}{Logical flag controlling the type of approximation to a perfect circle. See Details. } \item{npoly}{Number of edges of the polygonal approximation, if \code{mask=FALSE}. Incompatible with \code{delta}. } \item{delta}{ Tolerance of polygonal approximation: the length of arc that will be replaced by one edge of the polygon. Incompatible with \code{npoly}. } \item{\dots}{Arguments passed to \code{as.mask} determining the pixel resolution, if \code{mask=TRUE}. } } \value{ An object of class \code{"owin"} (see \code{\link{owin.object}}) specifying a window. } \details{ This command creates a window object representing a disc, with the given radius and centre. By default, the circle is approximated by a polygon with \code{npoly} edges. If \code{mask=TRUE}, then the disc is approximated by a binary pixel mask. The resolution of the mask is controlled by the arguments \code{\dots} which are passed to \code{\link{as.mask}}. The argument \code{radius} must be a single positive number. The argument \code{centre} specifies the disc centre: it can be either a numeric vector of length 2 giving the coordinates, or a \code{list(x,y)} giving the coordinates of exactly one point, or a point pattern (object of class \code{"ppp"}) containing exactly one point. } \seealso{ \code{\link{ellipse}}, \code{\link{discs}}, \code{\link{owin.object}}, \code{\link{owin}}, \code{\link{as.mask}} } \note{This function can also be used to generate regular polygons, by setting \code{npoly} to a small integer value. For example \code{npoly=5} generates a pentagon and \code{npoly=13} a triskaidecagon. } \examples{ # unit disc W <- disc() # disc of radius 3 centred at x=10, y=5 W <- disc(3, c(10,5)) # plot(disc()) plot(disc(mask=TRUE)) # nice smooth circle plot(disc(npoly=256)) # how to control the resolution of the mask plot(disc(mask=TRUE, dimyx=256)) # check accuracy of approximation area(disc())/pi area(disc(mask=TRUE))/pi } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat/man/affine.ppp.Rd0000644000176200001440000000310313160710571015203 0ustar liggesusers\name{affine.ppp} \alias{affine.ppp} \title{Apply Affine Transformation To Point Pattern} \description{ Applies any affine transformation of the plane (linear transformation plus vector shift) to a point pattern. } \usage{ \method{affine}{ppp}(X, mat=diag(c(1,1)), vec=c(0,0), \dots) } \arguments{ \item{X}{Point pattern (object of class \code{"ppp"}).} \item{mat}{Matrix representing a linear transformation.} \item{vec}{Vector of length 2 representing a translation.} \item{\dots}{Arguments passed to \code{\link{affine.owin}} affecting the handling of the observation window, if it is a binary pixel mask. } } \value{ Another point pattern (of class \code{"ppp"}) representing the result of applying the affine transformation. } \details{ The point pattern, and its window, are subjected first to the linear transformation represented by \code{mat} (multiplying on the left by \code{mat}), and are then translated by the vector \code{vec}. The argument \code{mat} must be a nonsingular \eqn{2 \times 2}{2 * 2} matrix. This is a method for the generic function \code{\link{affine}}. } \seealso{ \code{\link{affine}}, \code{\link{affine.owin}}, \code{\link{affine.psp}}, \code{\link{affine.im}}, \code{\link{flipxy}}, \code{\link{rotate}}, \code{\link{shift}} } \examples{ data(cells) # shear transformation X <- affine(cells, matrix(c(1,0,0.6,1),ncol=2)) \dontrun{ plot(X) # rescale y coordinates by factor 1.3 plot(affine(cells, diag(c(1,1.3)))) } } \author{\adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/Kest.fft.Rd0000644000176200001440000000615013160710571014646 0ustar liggesusers\name{Kest.fft} \alias{Kest.fft} \title{K-function using FFT} \description{ Estimates the reduced second moment function \eqn{K(r)} from a point pattern in a window of arbitrary shape, using the Fast Fourier Transform. } \usage{ Kest.fft(X, sigma, r=NULL, \dots, breaks=NULL) } \arguments{ \item{X}{The observed point pattern, from which an estimate of \eqn{K(r)} will be computed. An object of class \code{"ppp"}, or data in any format acceptable to \code{\link{as.ppp}()}. } \item{sigma}{ Standard deviation of the isotropic Gaussian smoothing kernel. } \item{r}{ Optional. Vector of values for the argument \eqn{r} at which \eqn{K(r)} should be evaluated. There is a sensible default. } \item{\dots}{ Arguments passed to \code{\link{as.mask}} determining the spatial resolution for the FFT calculation. } \item{breaks}{ This argument is for internal use only. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame containing columns \item{r}{the vector of values of the argument \eqn{r} at which the function \eqn{K} has been estimated } \item{border}{the estimates of \eqn{K(r)} for these values of \eqn{r} } \item{theo}{the theoretical value \eqn{K(r) = \pi r^2}{K(r) = pi * r^2} for a stationary Poisson process } } \details{ This is an alternative to the function \code{\link{Kest}} for estimating the \eqn{K} function. It may be useful for very large patterns of points. Whereas \code{\link{Kest}} computes the distance between each pair of points analytically, this function discretises the point pattern onto a rectangular pixel raster and applies Fast Fourier Transform techniques to estimate \eqn{K(t)}. The hard work is done by the function \code{\link{Kmeasure}}. The result is an approximation whose accuracy depends on the resolution of the pixel raster. The resolution is controlled by the arguments \code{\dots}, or by setting the parameter \code{npixel} in \code{\link{spatstat.options}}. } \references{ Cressie, N.A.C. \emph{Statistics for spatial data}. John Wiley and Sons, 1991. Diggle, P.J. \emph{Statistical analysis of spatial point patterns}. Academic Press, 1983. Ohser, J. (1983) On estimators for the reduced second moment measure of point processes. \emph{Mathematische Operationsforschung und Statistik, series Statistics}, \bold{14}, 63 -- 71. Ripley, B.D. \emph{Statistical inference for spatial processes}. Cambridge University Press, 1988. Stoyan, D, Kendall, W.S. and Mecke, J. (1995) \emph{Stochastic geometry and its applications}. 2nd edition. Springer Verlag. Stoyan, D. and Stoyan, H. (1994) Fractals, random shapes and point fields: methods of geometrical statistics. John Wiley and Sons. } \seealso{ \code{\link{Kest}}, \code{\link{Kmeasure}}, \code{\link{spatstat.options}} } \examples{ pp <- runifpoint(10000) \testonly{ op <- spatstat.options(npixel=125) } Kpp <- Kest.fft(pp, 0.01) plot(Kpp) \testonly{spatstat.options(op)} } \author{ \spatstatAuthors } \keyword{spatial} \keyword{nonparametric} spatstat/man/effectfun.Rd0000644000176200001440000000736713160710571015142 0ustar liggesusers\name{effectfun} \alias{effectfun} \title{Compute Fitted Effect of a Spatial Covariate in a Point Process Model} \description{ Compute the trend or intensity of a fitted point process model as a function of one of its covariates. } \usage{ effectfun(model, covname, \dots, se.fit=FALSE) } \arguments{ \item{model}{ A fitted point process model (object of class \code{"ppm"}, \code{"kppm"}, \code{"lppm"}, \code{"dppm"}, \code{"rppm"} or \code{"profilepl"}). } \item{covname}{ The name of the covariate. A character string. (Needed only if the model has more than one covariate.) } \item{\dots}{ The fixed values of other covariates (in the form \code{name=value}) if required. } \item{se.fit}{ Logical. If \code{TRUE}, asymptotic standard errors of the estimates will be computed, together with a 95\% confidence interval. } } \details{ The object \code{model} should be an object of class \code{"ppm"}, \code{"kppm"}, \code{"lppm"}, \code{"dppm"}, \code{"rppm"} or \code{"profilepl"} representing a point process model fitted to point pattern data. The model's trend formula should involve a spatial covariate named \code{covname}. This could be \code{"x"} or \code{"y"} representing one of the Cartesian coordinates. More commonly the covariate is another, external variable that was supplied when fitting the model. The command \code{effectfun} computes the fitted trend of the point process \code{model} as a function of the covariate named \code{covname}. The return value can be plotted immediately, giving a plot of the fitted trend against the value of the covariate. If the model also involves covariates other than \code{covname}, then these covariates will be held fixed. Values for these other covariates must be provided as arguments to \code{effectfun} in the form \code{name=value}. If \code{se.fit=TRUE}, the algorithm also calculates the asymptotic standard error of the fitted trend, and a (pointwise) asymptotic 95\% confidence interval for the true trend. This command is just a wrapper for the prediction method \code{\link{predict.ppm}}. For more complicated computations about the fitted intensity, use \code{\link{predict.ppm}}. } \section{Trend and intensity}{ For a Poisson point process model, the trend is the same as the intensity of the point process. For a more general Gibbs model, the trend is the first order potential in the model (the first order term in the Gibbs representation). In Poisson or Gibbs models fitted by \code{\link{ppm}}, the trend is the only part of the model that depends on the covariates. } \section{Determinantal point process models with fixed intensity}{ The function \code{\link{dppm}} which fits a determinantal point process model allows the user to specify the intensity \code{lambda}. In such cases the effect function is undefined, and \code{effectfun} stops with an error message. } \value{ A data frame containing a column of values of the covariate and a column of values of the fitted trend. If \code{se.fit=TRUE}, there are 3 additional columns containing the standard error and the upper and lower limits of a confidence interval. If the covariate named \code{covname} is numeric (rather than a factor or logical variable), the return value is also of class \code{"fv"} so that it can be plotted immediately. } \seealso{ \code{\link{ppm}}, \code{\link{predict.ppm}}, \code{\link{fv.object}} } \examples{ X <- copper$SouthPoints D <- distfun(copper$SouthLines) fit <- ppm(X ~ polynom(D, 5)) effectfun(fit) plot(effectfun(fit, se.fit=TRUE)) fitx <- ppm(X ~ x + polynom(D, 5)) plot(effectfun(fitx, "D", x=20)) } \author{ \adrian and \rolf. } \keyword{spatial} \keyword{models} spatstat/man/methods.ppx.Rd0000644000176200001440000000263513160710621015433 0ustar liggesusers\name{methods.ppx} \Rdversion{1.1} \alias{methods.ppx} %DoNotExport \alias{print.ppx} \alias{plot.ppx} \alias{unitname.ppx} \alias{unitname<-.ppx} \title{ Methods for Multidimensional Space-Time Point Patterns } \description{ Methods for printing and plotting a general multidimensional space-time point pattern. } \usage{ \method{print}{ppx}(x, ...) \method{plot}{ppx}(x, ...) \method{unitname}{ppx}(x) \method{unitname}{ppx}(x) <- value } \arguments{ \item{x}{ Multidimensional point pattern (object of class \code{"ppx"}). } \item{\dots}{ Additional arguments passed to plot methods. } \item{value}{ Name of the unit of length. See \code{\link{unitname}}. } } \details{ These are methods for the generic functions \code{\link{print}}, \code{\link{plot}}, \code{\link{unitname}} and \code{\link{unitname<-}} for the class \code{"ppx"} of multidimensional point patterns. The \code{print} method prints a description of the point pattern and its spatial domain. The \code{unitname} method extracts the name of the unit of length in which the point coordinates are expressed. The \code{unitname<-} method assigns the name of the unit of length. } \value{ For \code{print.ppx} the value is \code{NULL}. For \code{unitname.ppx} an object of class \code{"units"}. } \author{\adrian and \rolf } \seealso{ \code{\link{ppx}}, \code{\link{unitname}} } \keyword{spatial} spatstat/man/as.linim.Rd0000644000176200001440000000575113160710571014702 0ustar liggesusers\name{as.linim} \alias{as.linim} \alias{as.linim.linim} \alias{as.linim.linfun} \alias{as.linim.default} \title{Convert to Pixel Image on Linear Network} \description{ Converts various kinds of data to a pixel image on a linear network. } \usage{ as.linim(X, \dots) \method{as.linim}{linim}(X, \dots) \method{as.linim}{default}(X, L, \dots, eps = NULL, dimyx = NULL, xy = NULL, delta=NULL) \method{as.linim}{linfun}(X, L=domain(X), \dots, eps = NULL, dimyx = NULL, xy = NULL, delta=NULL) } \arguments{ \item{X}{ Data to be converted to a pixel image on a linear network. } \item{L}{ Linear network (object of class \code{"linnet"}). } \item{\dots}{Additional arguments passed to \code{X} when \code{X} is a function. } \item{eps,dimyx,xy}{ Optional arguments passed to \code{\link{as.mask}} to control the pixel resolution. } \item{delta}{ Optional. Numeric value giving the approximate distance (in coordinate units) between successive sample points along each segment of the network. } } \details{ This function converts the data \code{X} into a pixel image on a linear network, an object of class \code{"linim"} (see \code{\link{linim}}). The argument \code{X} may be any of the following: \itemize{ \item a function on a linear network, an object of class \code{"linfun"}. \item a pixel image on a linear network, an object of class \code{"linim"}. \item a pixel image, an object of class \code{"im"}. \item any type of data acceptable to \code{\link{as.im}}, such as a function, numeric value, or window. } First \code{X} is converted to a pixel image object \code{Y} (object of class \code{"im"}). The conversion is performed by \code{\link{as.im}}. The arguments \code{eps}, \code{dimyx} and \code{xy} determine the pixel resolution. Next \code{Y} is converted to a pixel image on a linear network using \code{\link{linim}}. The argument \code{L} determines the linear network. If \code{L} is missing or \code{NULL}, then \code{X} should be an object of class \code{"linim"}, and \code{L} defaults to the linear network on which \code{X} is defined. In addition to converting the function to a pixel image, the algorithm also generates a fine grid of sample points evenly spaced along each segment of the network (with spacing at most \code{delta} coordinate units). The function values at these sample points are stored in the resulting object as a data frame (the argument \code{df} of \code{\link{linim}}). This mechanism allows greater accuracy for some calculations (such as \code{\link{integral.linim}}). } \value{ An image object on a linear network; an object of class \code{"linim"}. } \seealso{ \code{\link{as.im}} } \examples{ f <- function(x,y){ x + y } plot(as.linim(f, simplenet)) } \author{ \spatstatAuthors } \keyword{spatial} \keyword{manip} spatstat/man/rPoissonCluster.Rd0000644000176200001440000001121213160710621016327 0ustar liggesusers\name{rPoissonCluster} \alias{rPoissonCluster} \title{Simulate Poisson Cluster Process} \description{ Generate a random point pattern, a realisation of the general Poisson cluster process. } \usage{ rPoissonCluster(kappa, expand, rcluster, win = owin(c(0,1),c(0,1)), \dots, lmax=NULL, nsim=1, drop=TRUE, saveparents=TRUE) } \arguments{ \item{kappa}{ Intensity of the Poisson process of cluster centres. A single positive number, a function, or a pixel image. } \item{expand}{ Size of the expansion of the simulation window for generating parent points. A single non-negative number. } \item{rcluster}{ A function which generates random clusters. } \item{win}{ Window in which to simulate the pattern. An object of class \code{"owin"} or something acceptable to \code{\link{as.owin}}. } \item{\dots}{ Arguments passed to \code{rcluster} } \item{lmax}{ Optional. Upper bound on the values of \code{kappa} when \code{kappa} is a function or pixel image. } \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a point pattern. } \item{saveparents}{ Logical value indicating whether to save the locations of the parent points as an attribute. } } \value{ A point pattern (an object of class \code{"ppp"}) if \code{nsim=1}, or a list of point patterns if \code{nsim > 1}. Additionally, some intermediate results of the simulation are returned as attributes of the point pattern: see Details. } \details{ This algorithm generates a realisation of the general Poisson cluster process, with the cluster mechanism given by the function \code{rcluster}. First, the algorithm generates a Poisson point process of ``parent'' points with intensity \code{kappa} in an expanded window as explained below.. Here \code{kappa} may be a single positive number, a function \code{kappa(x, y)}, or a pixel image object of class \code{"im"} (see \code{\link{im.object}}). See \code{\link{rpoispp}} for details. Second, each parent point is replaced by a random cluster of points, created by calling the function \code{rcluster}. These clusters are combined together to yield a single point pattern, and the restriction of this pattern to the window \code{win} is then returned as the result of \code{rPoissonCluster}. The expanded window consists of \code{\link{as.rectangle}(win)} extended by the amount \code{expand} in each direction. The size of the expansion is saved in the attribute \code{"expand"} and may be extracted by \code{attr(X, "expand")} where \code{X} is the generated point pattern. The function \code{rcluster} should expect to be called as \code{rcluster(xp[i],yp[i],\dots)} for each parent point at a location \code{(xp[i],yp[i])}. The return value of \code{rcluster} should be a list with elements \code{x,y} which are vectors of equal length giving the absolute \eqn{x} and \code{y} coordinates of the points in the cluster. If the return value of \code{rcluster} is a point pattern (object of class \code{"ppp"}) then it may have marks. The result of \code{rPoissonCluster} will then be a marked point pattern. If required, the intermediate stages of the simulation (the parents and the individual clusters) can also be extracted from the return value of \code{rPoissonCluster} through the attributes \code{"parents"} and \code{"parentid"}. The attribute \code{"parents"} is the point pattern of parent points. The attribute \code{"parentid"} is an integer vector specifying the parent for each of the points in the simulated pattern. (If these data are not required, it is more efficient to set \code{saveparents=FALSE}.) } \seealso{ \code{\link{rpoispp}}, \code{\link{rMatClust}}, \code{\link{rThomas}}, \code{\link{rCauchy}}, \code{\link{rVarGamma}}, \code{\link{rNeymanScott}}, \code{\link{rGaussPoisson}}. } \examples{ # each cluster consist of 10 points in a disc of radius 0.2 nclust <- function(x0, y0, radius, n) { return(runifdisc(n, radius, centre=c(x0, y0))) } plot(rPoissonCluster(10, 0.2, nclust, radius=0.2, n=5)) # multitype Neyman-Scott process (each cluster is a multitype process) nclust2 <- function(x0, y0, radius, n, types=c("a", "b")) { X <- runifdisc(n, radius, centre=c(x0, y0)) M <- sample(types, n, replace=TRUE) marks(X) <- M return(X) } plot(rPoissonCluster(15,0.1,nclust2, radius=0.1, n=5)) } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat/man/relevel.im.Rd0000644000176200001440000000256013160710621015221 0ustar liggesusers\name{relevel.im} \alias{relevel.im} \alias{relevel.ppp} \alias{relevel.ppx} \title{ Reorder Levels of a Factor-Valued Image or Pattern } \description{ For a pixel image with factor values, or a point pattern with factor-valued marks, the levels of the factor are re-ordered so that the level \code{ref} is first and the others are moved down. } \usage{ \method{relevel}{im}(x, ref, \dots) \method{relevel}{ppp}(x, ref, \dots) \method{relevel}{ppx}(x, ref, \dots) } \arguments{ \item{x}{ A pixel image (object of class \code{"im"}) with factor values, or a point pattern (object of class \code{"ppp"}, \code{"ppx"}, \code{"lpp"} or \code{"pp3"}) with factor-valued marks. } \item{ref}{ The reference level. } \item{\dots}{ Ignored. } } \details{ These functions are methods for the generic \code{\link[stats]{relevel}}. If \code{x} is a pixel image (object of class \code{"im"}) with factor values, or a point pattern (object of class \code{"ppp"}, \code{"ppx"}, \code{"lpp"} or \code{"pp3"}) with factor-valued marks, the levels of the factor are changed so that the level specified by \code{ref} comes first. } \value{ Object of the same kind as \code{x}. } \author{ \adrian \rolf and \ege } \seealso{ \code{\link{mergeLevels}} } \examples{ amacrine relevel(amacrine, "on") } \keyword{manip} \keyword{spatial} spatstat/man/vcov.mppm.Rd0000644000176200001440000000562413160710621015110 0ustar liggesusers\name{vcov.mppm} \alias{vcov.mppm} \title{Calculate Variance-Covariance Matrix for Fitted Multiple Point Process Model} \description{ Given a fitted multiple point process model, calculate the variance-covariance matrix of the parameter estimates. } \usage{ \method{vcov}{mppm}(object, ..., what="vcov", err="fatal") } \arguments{ \item{object}{ A multiple point process model (object of class \code{"mppm"}). } \item{\dots}{ Arguments recognised by \code{\link{vcov.ppm}}. } \item{what}{ Character string indicating which quantity should be calculated. Options include \code{"vcov"} for the variance-covariance matrix, \code{"corr"} for the correlation matrix, and \code{"fisher"} for the Fisher information matrix. } \item{err}{ Character string indicating what action to take if an error occurs. Either \code{"fatal"}, \code{"warn"} or \code{"null"}. } } \details{ This is a method for the generic function \code{\link{vcov}}. The argument \code{object} should be a fitted multiple point process model (object of class \code{"mppm"}) generated by \code{\link{mppm}}. The variance-covariance matrix of the parameter estimates is computed using asymptotic theory for maximum likelihood (for Poisson processes) or estimating equations (for other Gibbs models). If \code{what="vcov"} (the default), the variance-covariance matrix is returned. If \code{what="corr"}, the variance-covariance matrix is normalised to yield a correlation matrix, and this is returned. If \code{what="fisher"}, the Fisher information matrix is returned instead. In all three cases, the rows and columns of the matrix correspond to the parameters (coefficients) in the same order as in \code{coef{model}}. If errors or numerical problems occur, the argument \code{err} determines what will happen. If \code{err="fatal"} an error will occur. If \code{err="warn"} a warning will be issued and \code{NA} will be returned. If \code{err="null"}, no warning is issued, but \code{NULL} is returned. } \value{ A numeric matrix (or \code{NA} or \code{NULL}). } \section{Error messages}{ An error message that reports \emph{system is computationally singular} indicates that the determinant of the Fisher information matrix of one of the models was either too large or too small for reliable numerical calculation. See \code{\link{vcov.ppm}} for suggestions on how to handle this. } \seealso{ \code{\link{vcov}}, \code{\link{vcov.ppm}}, \code{\link{mppm}} } \examples{ fit <- mppm(Wat ~x, data=hyperframe(Wat=waterstriders)) vcov(fit) } \references{ Baddeley, A., Rubak, E. and Turner, R. (2015) \emph{Spatial Point Patterns: Methodology and Applications with R}. London: Chapman and Hall/CRC Press. } \author{ Adrian Baddeley, Ida-Maria Sintorn and Leanne Bischoff. Implemented by \spatstatAuthors. } \keyword{spatial} \keyword{methods} \keyword{models} spatstat/man/Geyer.Rd0000644000176200001440000001061613160710571014237 0ustar liggesusers\name{Geyer} \alias{Geyer} \title{Geyer's Saturation Point Process Model} \description{ Creates an instance of Geyer's saturation point process model which can then be fitted to point pattern data. } \usage{ Geyer(r,sat) } \arguments{ \item{r}{Interaction radius. A positive real number.} \item{sat}{Saturation threshold. A non-negative real number.} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of Geyer's saturation point process with interaction radius \eqn{r} and saturation threshold \code{sat}. } \details{ Geyer (1999) introduced the \dQuote{saturation process}, a modification of the Strauss process (see \code{\link{Strauss}}) in which the total contribution to the potential from each point (from its pairwise interaction with all other points) is trimmed to a maximum value \eqn{s}. The interaction structure of this model is implemented in the function \code{\link{Geyer}()}. The saturation point process with interaction radius \eqn{r}, saturation threshold \eqn{s}, and parameters \eqn{\beta}{beta} and \eqn{\gamma}{gamma}, is the point process in which each point \eqn{x_i}{x[i]} in the pattern \eqn{X} contributes a factor \deqn{ \beta \gamma^{\min(s, t(x_i, X))} }{ beta gamma^min(s, t(x[i],X)) } to the probability density of the point pattern, where \eqn{t(x_i, X)}{t(x[i],X)} denotes the number of \sQuote{close neighbours} of \eqn{x_i}{x[i]} in the pattern \eqn{X}. A close neighbour of \eqn{x_i}{x[i]} is a point \eqn{x_j}{x[j]} with \eqn{j \neq i}{j != i} such that the distance between \eqn{x_i}{x[i]} and \eqn{x_j}{x[j]} is less than or equal to \eqn{r}. If the saturation threshold \eqn{s} is set to infinity, this model reduces to the Strauss process (see \code{\link{Strauss}}) with interaction parameter \eqn{\gamma^2}{gamma^2}. If \eqn{s = 0}, the model reduces to the Poisson point process. If \eqn{s} is a finite positive number, then the interaction parameter \eqn{\gamma}{gamma} may take any positive value (unlike the case of the Strauss process), with values \eqn{\gamma < 1}{gamma < 1} describing an \sQuote{ordered} or \sQuote{inhibitive} pattern, and values \eqn{\gamma > 1}{gamma > 1} describing a \sQuote{clustered} or \sQuote{attractive} pattern. The nonstationary saturation process is similar except that the value \eqn{\beta}{beta} is replaced by a function \eqn{\beta(x_i)}{beta(x[i])} of location. The function \code{\link{ppm}()}, which fits point process models to point pattern data, requires an argument of class \code{"interact"} describing the interpoint interaction structure of the model to be fitted. The appropriate description of the saturation process interaction is yielded by \code{Geyer(r, sat)} where the arguments \code{r} and \code{sat} specify the Strauss interaction radius \eqn{r} and the saturation threshold \eqn{s}, respectively. See the examples below. Note the only arguments are the interaction radius \code{r} and the saturation threshold \code{sat}. When \code{r} and \code{sat} are fixed, the model becomes an exponential family. The canonical parameters \eqn{\log(\beta)}{log(beta)} and \eqn{\log(\gamma)}{log(gamma)} are estimated by \code{\link{ppm}()}, not fixed in \code{Geyer()}. } \section{Zero saturation}{ The value \code{sat=0} is permitted by \code{Geyer}, but this is not very useful. For technical reasons, when \code{\link{ppm}} fits a Geyer model with \code{sat=0}, the default behaviour is to return an \dQuote{invalid} fitted model in which the estimate of \eqn{\gamma}{gamma} is \code{NA}. In order to get a Poisson process model returned when \code{sat=0}, you would need to set \code{emend=TRUE} in the call to \code{\link{ppm}}. } \seealso{ \code{\link{ppm}}, \code{\link{pairwise.family}}, \code{\link{ppm.object}}, \code{\link{Strauss}}, \code{\link{SatPiece}} } \references{ Geyer, C.J. (1999) Likelihood Inference for Spatial Point Processes. Chapter 3 in O.E. Barndorff-Nielsen, W.S. Kendall and M.N.M. Van Lieshout (eds) \emph{Stochastic Geometry: Likelihood and Computation}, Chapman and Hall / CRC, Monographs on Statistics and Applied Probability, number 80. Pages 79--140. } \examples{ ppm(cells, ~1, Geyer(r=0.07, sat=2)) # fit the stationary saturation process to `cells' } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} spatstat/man/methods.layered.Rd0000644000176200001440000000411213160710621016241 0ustar liggesusers\name{methods.layered} \Rdversion{1.1} \alias{methods.layered} %DoNotExport \alias{shift.layered} \alias{reflect.layered} \alias{flipxy.layered} \alias{rotate.layered} \alias{affine.layered} \alias{rescale.layered} \alias{scalardilate.layered} \title{ Methods for Layered Objects } \description{ Methods for geometrical transformations of layered objects (class \code{"layered"}). } \usage{ \method{shift}{layered}(X, vec=c(0,0), ...) \method{rotate}{layered}(X, ..., centre=NULL) \method{affine}{layered}(X, ...) \method{reflect}{layered}(X) \method{flipxy}{layered}(X) \method{rescale}{layered}(X, s, unitname) \method{scalardilate}{layered}(X, ...) } \arguments{ \item{X}{ Object of class \code{"layered"}. } \item{\dots}{ Arguments passed to the relevant methods when applying the operation to each layer of \code{X}. } \item{s}{ Rescaling factor passed to the relevant method for \code{\link{rescale}}. May be missing. } \item{vec}{ Shift vector (numeric vector of length 2). } \item{centre}{ Centre of rotation. Either a vector of length 2, or a character string (partially matched to \code{"centroid"}, \code{"midpoint"} or \code{"bottomleft"}). The default is the coordinate origin \code{c(0,0)}. } \item{unitname}{ Optional. New name for the unit of length. A value acceptable to the function \code{\link{unitname<-}} } } \details{ These are methods for the generic functions \code{\link{shift}}, \code{\link{rotate}}, \code{\link{reflect}}, \code{\link{affine}}, \code{\link{rescale}}, \code{\link{scalardilate}} and \code{\link{flipxy}} for the class of layered objects. A layered object represents data that should be plotted in successive layers, for example, a background and a foreground. See \code{\link{layered}}. } \value{ Another object of class \code{"layered"}. } \author{\adrian and \rolf } \seealso{ \code{\link{layered}} } \examples{ L <- layered(letterR, runifpoint(20, letterR)) plot(L) plot(rotate(L, pi/4)) } \keyword{spatial} \keyword{methods} spatstat/man/vargamma.estpcf.Rd0000644000176200001440000001521613160710621016237 0ustar liggesusers\name{vargamma.estpcf} \alias{vargamma.estpcf} \title{Fit the Neyman-Scott Cluster Point Process with Variance Gamma kernel} \description{ Fits the Neyman-Scott cluster point process, with Variance Gamma kernel, to a point pattern dataset by the Method of Minimum Contrast, using the pair correlation function. } \usage{ vargamma.estpcf(X, startpar=c(kappa=1,scale=1), nu = -1/4, lambda=NULL, q = 1/4, p = 2, rmin = NULL, rmax = NULL, ..., pcfargs = list()) } \arguments{ \item{X}{ Data to which the model will be fitted. Either a point pattern or a summary statistic. See Details. } \item{startpar}{ Vector of starting values for the parameters of the model. } \item{nu}{ Numerical value controlling the shape of the tail of the clusters. A number greater than \code{-1/2}. } \item{lambda}{ Optional. An estimate of the intensity of the point process. } \item{q,p}{ Optional. Exponents for the contrast criterion. } \item{rmin, rmax}{ Optional. The interval of \eqn{r} values for the contrast criterion. } \item{\dots}{ Optional arguments passed to \code{\link[stats]{optim}} to control the optimisation algorithm. See Details. } \item{pcfargs}{ Optional list containing arguments passed to \code{\link{pcf.ppp}} to control the smoothing in the estimation of the pair correlation function. } } \details{ This algorithm fits the Neyman-Scott Cluster point process model with Variance Gamma kernel (Jalilian et al, 2013) to a point pattern dataset by the Method of Minimum Contrast, using the pair correlation function. The argument \code{X} can be either \describe{ \item{a point pattern:}{An object of class \code{"ppp"} representing a point pattern dataset. The pair correlation function of the point pattern will be computed using \code{\link{pcf}}, and the method of minimum contrast will be applied to this. } \item{a summary statistic:}{An object of class \code{"fv"} containing the values of a summary statistic, computed for a point pattern dataset. The summary statistic should be the pair correlation function, and this object should have been obtained by a call to \code{\link{pcf}} or one of its relatives. } } The algorithm fits the Neyman-Scott Cluster point process with Variance Gamma kernel to \code{X}, by finding the parameters of the model which give the closest match between the theoretical pair correlation function of the model and the observed pair correlation function. For a more detailed explanation of the Method of Minimum Contrast, see \code{\link{mincontrast}}. The Neyman-Scott cluster point process with Variance Gamma kernel is described in Jalilian et al (2013). It is a cluster process formed by taking a pattern of parent points, generated according to a Poisson process with intensity \eqn{\kappa}{kappa}, and around each parent point, generating a random number of offspring points, such that the number of offspring of each parent is a Poisson random variable with mean \eqn{\mu}{mu}, and the locations of the offspring points of one parent have a common distribution described in Jalilian et al (2013). The shape of the kernel is determined by the dimensionless index \code{nu}. This is the parameter \eqn{\nu^\prime = \alpha/2-1}{nu' = alpha/2 - 1} appearing in equation (12) on page 126 of Jalilian et al (2013). In previous versions of spatstat instead of specifying \code{nu} (called \code{nu.ker} at that time) the user could specify \code{nu.pcf} which is the parameter \eqn{\nu=\alpha-1}{nu = alpha-1} appearing in equation (13), page 127 of Jalilian et al (2013). These are related by \code{nu.pcf = 2 * nu.ker + 1} and \code{nu.ker = (nu.pcf - 1)/2}. This syntax is still supported but not recommended for consistency across the package. In that case exactly one of \code{nu.ker} or \code{nu.pcf} must be specified. If the argument \code{lambda} is provided, then this is used as the value of the point process intensity \eqn{\lambda}{lambda}. Otherwise, if \code{X} is a point pattern, then \eqn{\lambda}{lambda} will be estimated from \code{X}. If \code{X} is a summary statistic and \code{lambda} is missing, then the intensity \eqn{\lambda}{lambda} cannot be estimated, and the parameter \eqn{\mu}{mu} will be returned as \code{NA}. The remaining arguments \code{rmin,rmax,q,p} control the method of minimum contrast; see \code{\link{mincontrast}}. The corresponding model can be simulated using \code{\link{rVarGamma}}. The parameter \code{eta} appearing in \code{startpar} is equivalent to the scale parameter \code{omega} used in \code{\link{rVarGamma}}. Homogeneous or inhomogeneous Neyman-Scott/VarGamma models can also be fitted using the function \code{\link{kppm}} and the fitted models can be simulated using \code{\link{simulate.kppm}}. The optimisation algorithm can be controlled through the additional arguments \code{"..."} which are passed to the optimisation function \code{\link[stats]{optim}}. For example, to constrain the parameter values to a certain range, use the argument \code{method="L-BFGS-B"} to select an optimisation algorithm that respects box constraints, and use the arguments \code{lower} and \code{upper} to specify (vectors of) minimum and maximum values for each parameter. } \value{ An object of class \code{"minconfit"}. There are methods for printing and plotting this object. It contains the following main components: \item{par }{Vector of fitted parameter values.} \item{fit }{Function value table (object of class \code{"fv"}) containing the observed values of the summary statistic (\code{observed}) and the theoretical values of the summary statistic computed from the fitted model parameters. } } \references{ Jalilian, A., Guan, Y. and Waagepetersen, R. (2013) Decomposition of variance for spatial Cox processes. \emph{Scandinavian Journal of Statistics} \bold{40}, 119-137. Waagepetersen, R. (2007) An estimating function approach to inference for inhomogeneous Neyman-Scott processes. \emph{Biometrics} \bold{63}, 252--258. } \author{Abdollah Jalilian and Rasmus Waagepetersen. Adapted for \pkg{spatstat} by \adrian } \seealso{ \code{\link{kppm}}, \code{\link{vargamma.estK}}, \code{\link{lgcp.estpcf}}, \code{\link{thomas.estpcf}}, \code{\link{cauchy.estpcf}}, \code{\link{mincontrast}}, \code{\link{pcf}}, \code{\link{pcfmodel}}. \code{\link{rVarGamma}} to simulate the model. } \examples{ u <- vargamma.estpcf(redwood) u plot(u, legendpos="topright") } \keyword{spatial} \keyword{models} spatstat/man/texturemap.Rd0000644000176200001440000000323613160710621015356 0ustar liggesusers\name{texturemap} \alias{texturemap} \title{ Texture Map } \description{ Create a map that associates data values with graphical textures. } \usage{ texturemap(inputs, textures, ...) } \arguments{ \item{inputs}{ A vector containing all the data values that will be mapped to textures. } \item{textures}{ Optional. A vector of integer codes specifying the textures to which the \code{inputs} will be mapped. } \item{\dots}{ Other graphics parameters such as \code{col}, \code{lwd}, \code{lty}. } } \details{ A texture map is an association between data values and graphical textures. The command \code{texturemap} creates an object of class \code{"texturemap"} that represents a texture map. Once a texture map has been created, it can be applied to any suitable data to generate a texture plot of those data using \code{\link{textureplot}}. This makes it easy to ensure that the \emph{same} texture map is used in two different plots. The texture map can also be plotted in its own right. The argument \code{inputs} should be a vector containing all the possible data values (such as the levels of a factor) that are to be mapped. The \code{textures} should be integer values between 1 and 8, representing the eight possible textures described in the help for \code{\link{add.texture}}. The default is \code{textures = 1:n} where \code{n} is the length of \code{inputs}. } \value{ An object of class \code{"texturemap"} representing the texture map. } \author{ \spatstatAuthors. } \seealso{ \code{\link{textureplot}} } \examples{ texturemap(letters[1:4], 2:5, col=1:4, lwd=2) } \keyword{spatial} \keyword{hplot} spatstat/man/deltametric.Rd0000644000176200001440000000565113160710571015464 0ustar liggesusers\name{deltametric} \Rdversion{1.1} \alias{deltametric} \title{ Delta Metric } \description{ Computes the discrepancy between two sets \eqn{A} and \eqn{B} according to Baddeley's delta-metric. } \usage{ deltametric(A, B, p = 2, c = Inf, ...) } \arguments{ \item{A,B}{ The two sets which will be compared. Windows (objects of class \code{"owin"}), point patterns (objects of class \code{"ppp"}) or line segment patterns (objects of class \code{"psp"}). } \item{p}{ Index of the \eqn{L^p} metric. Either a positive numeric value, or \code{Inf}. } \item{c}{ Distance threshold. Either a positive numeric value, or \code{Inf}. } \item{\dots}{ Arguments passed to \code{\link{as.mask}} to determine the pixel resolution of the distance maps computed by \code{\link{distmap}}. } } \details{ Baddeley (1992a, 1992b) defined a distance between two sets \eqn{A} and \eqn{B} contained in a space \eqn{W} by \deqn{ \Delta(A,B) = \left[ \frac 1 {|W|} \int_W \left| \min(c, d(x,A)) - \min(c, d(x,B)) \right|^p \, {\rm d}x \right]^{1/p} }{ \Delta(A,B) = [ (1/|W|) * integral of |min(c, d(x,A))-min(c, d(x,B))|^p dx ]^(1/p) } where \eqn{c \ge 0}{c \ge 0} is a distance threshold parameter, \eqn{0 < p \le \infty}{0 < p \le Inf} is the exponent parameter, and \eqn{d(x,A)} denotes the shortest distance from a point \eqn{x} to the set \eqn{A}. Also \code{|W|} denotes the area or volume of the containing space \eqn{W}. This is defined so that it is a \emph{metric}, i.e. \itemize{ \item \eqn{\Delta(A,B)=0}{\Delta(A,B)=0} if and only if \eqn{A=B} \item \eqn{\Delta(A,B)=\Delta(B,A)}{\Delta(A,B)=\Delta(B,A)} \item \eqn{\Delta(A,C) \le \Delta(A,B) + \Delta(B,C)}{\Delta(A,C) \le \Delta(A,B) + \Delta(B,C)} } It is topologically equivalent to the Hausdorff metric (Baddeley, 1992a) but has better stability properties in practical applications (Baddeley, 1992b). If \eqn{p=\infty}{p=Inf} and \eqn{c=\infty}{c=Inf} the Delta metric is equal to the Hausdorff metric. The algorithm uses \code{\link{distmap}} to compute the distance maps \eqn{d(x,A)} and \eqn{d(x,B)}, then approximates the integral numerically. The accuracy of the computation depends on the pixel resolution which is controlled through the extra arguments \code{\dots} passed to \code{\link{as.mask}}. } \value{ A numeric value. } \references{ Baddeley, A.J. (1992a) Errors in binary images and an \eqn{L^p} version of the Hausdorff metric. \emph{Nieuw Archief voor Wiskunde} \bold{10}, 157--183. Baddeley, A.J. (1992b) An error metric for binary images. In W. Foerstner and S. Ruwiedel (eds) \emph{Robust Computer Vision}. Karlsruhe: Wichmann. Pages 59--78. } \author{ \adrian and \rolf } \seealso{ \code{\link{distmap}} } \examples{ X <- runifpoint(20) Y <- runifpoint(10) deltametric(X, Y, p=1,c=0.1) } \keyword{spatial} \keyword{math} spatstat/man/collapse.fv.Rd0000644000176200001440000000557213160710571015405 0ustar liggesusers\name{collapse.fv} \alias{collapse.fv} \alias{collapse.anylist} \title{ Collapse Several Function Tables into One } \description{ Combines several function tables (objects of class \code{"fv"}) into a single function table, merging columns that are identical and relabelling columns that are different. } \usage{ \method{collapse}{fv}(object, \dots, same = NULL, different = NULL) \method{collapse}{anylist}(object, \dots, same = NULL, different = NULL) } \arguments{ \item{object}{ An object of class \code{"fv"}, or a list of such objects. } \item{\dots}{ Additional objects of class \code{"fv"}. } \item{same}{ Character string or character vector specifying a column or columns, present in each \code{"fv"} object, that are identical in each object. This column or columns will be included only once. } \item{different}{ Character string or character vector specifying a column or columns, present in each \code{"fv"} object, that contain different values in each object. Each of these columns of data will be included, with labels that distinguish them from each other. } } \details{ This is a method for the generic function \code{\link[nlme]{collapse}}. It combines the data in several function tables (objects of class \code{"fv"}, see \code{\link{fv.object}}) to make a single function table. It is essentially a smart wrapper for \code{\link{cbind.fv}}. A typical application is to calculate the same summary statistic (such as the \eqn{K} function) for different point patterns, and then to use \code{collapse.fv} to combine the results into a single object that can easily be plotted. See the Examples. The arguments \code{object} and \code{\dots} should be function tables (objects of class \code{"fv"}, see \code{\link{fv.object}}) that are compatible in the sense that they have the same values of the function argument. The argument \code{same} identifies any columns that are present in each function table, and which are known to contain exactly the same values in each table. This column or columns will be included only once in the result. The argument \code{different} identifies any columns that are present in each function table, and which contain different numerical values in each table. Each of these columns will be included, with labels to distinguish them. Columns that are not named in \code{same} or \code{different} will not be included. } \value{ Object of class \code{"fv"}. } \seealso{ \code{\link{fv.object}}, \code{\link{cbind.fv}} } \examples{ # generate simulated data X <- replicate(3, rpoispp(100), simplify=FALSE) names(X) <- paste("Simulation", 1:3) # compute K function estimates Klist <- anylapply(X, Kest) # collapse K <- collapse(Klist, same="theo", different="iso") K } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/mergeLevels.Rd0000644000176200001440000000376013160710621015434 0ustar liggesusers\name{mergeLevels} \alias{mergeLevels} \title{ Merge Levels of a Factor } \description{ Specified levels of the factor will be merged into a single level. } \usage{ mergeLevels(.f, \dots) } \arguments{ \item{.f}{ A factor (or a factor-valued pixel image or a point pattern with factor-valued marks). } \item{\dots}{ List of \code{name=value} pairs, where \code{name} is the new merged level, and \code{value} is the vector of old levels that will be merged. } } \details{ This utility function takes a factor \code{.f} and merges specified levels of the factor. The grouping is specified by the arguments \code{\dots} which must each be given in the form \code{new=old}, where \code{new} is the name for the new merged level, and \code{old} is a character vector containing the old levels that are to be merged. The result is a new factor (or factor-valued object), in which the levels listed in \code{old} have been replaced by a single level \code{new}. An argument of the form \code{name=character(0)} or \code{name=NULL} is interpreted to mean that all other levels of the old factor should be mapped to \code{name}. } \value{ Another factor of the same length as \code{.f} (or object of the same kind as \code{.f}). } \section{Tips for manipulating factor levels}{ To remove unused levels from a factor \code{f}, just type \code{f <- factor(f)}. To change the ordering of levels in a factor, use \code{\link[base]{factor}(f, levels=l)} or \code{\link[stats]{relevel}(f, ref)}. } \seealso{ \code{\link[base]{factor}}, \code{\link[stats]{relevel}} } \author{ \adrian \rolf and \ege } \examples{ likert <- c("Strongly Agree", "Agree", "Neutral", "Disagree", "Strongly Disagree") answers <- factor(sample(likert, 15, replace=TRUE), levels=likert) answers mergeLevels(answers, Positive=c("Strongly Agree", "Agree"), Negative=c("Strongly Disagree", "Disagree")) } \keyword{manip} \keyword{spatial} spatstat/man/deletebranch.Rd0000644000176200001440000000463713160710571015612 0ustar liggesusers\name{deletebranch} \alias{deletebranch} \alias{deletebranch.linnet} \alias{deletebranch.lpp} \alias{extractbranch} \alias{extractbranch.linnet} \alias{extractbranch.lpp} \title{ Delete or Extract a Branch of a Tree } \description{ Deletes or extracts a given branch of a tree. } \usage{ deletebranch(X, \dots) \method{deletebranch}{linnet}(X, code, labels, \dots) \method{deletebranch}{lpp}(X, code, labels, \dots) extractbranch(X, \dots) \method{extractbranch}{linnet}(X, code, labels, \dots, which=NULL) \method{extractbranch}{lpp}(X, code, labels, \dots, which=NULL) } \arguments{ \item{X}{ Linear network (object of class \code{"linnet"}) or point pattern on a linear network (object of class \code{"lpp"}). } \item{code}{ Character string. Label of the branch to be deleted or extracted. } \item{labels}{ Vector of character strings. Branch labels for the vertices of the network, usually obtained from \code{\link{treebranchlabels}}. } \item{\dots}{Arguments passed to methods.} \item{which}{ Logical vector indicating which vertices of the network should be extracted. Overrides \code{code} and \code{labels}. } } \details{ The linear network \code{L <- X} or \code{L <- as.linnet(X)} must be a tree, that is, it has no loops. The argument \code{labels} should be a character vector giving tree branch labels for each vertex of the network. It is usually obtained by calling \code{\link{treebranchlabels}}. The branch designated by the string \code{code} will be deleted or extracted. The return value is the result of deleting or extracting this branch from \code{X} along with any data associated with this branch (such as points or marks). } \value{ Another object of the same type as \code{X} obtained by deleting or extracting the specified branch. } \author{ \spatstatAuthors } \seealso{ \code{\link{treebranchlabels}}, \code{\link{branchlabelfun}}, \code{\link{linnet}} } \examples{ # make a simple tree m <- simplenet$m m[8,10] <- m[10,8] <- FALSE L <- linnet(vertices(simplenet), m) plot(L, main="") # compute branch labels tb <- treebranchlabels(L, 1) tbc <- paste0("[", tb, "]") text(vertices(L), labels=tbc, cex=2) # delete branch B LminusB <- deletebranch(L, "b", tb) plot(LminusB, add=TRUE, col="green") # extract branch B LB <- extractbranch(L, "b", tb) plot(LB, add=TRUE, col="red") } \keyword{spatial} \keyword{manip} spatstat/man/nsegments.Rd0000644000176200001440000000133113160710621015155 0ustar liggesusers\name{nsegments} \alias{nsegments} \alias{nsegments.psp} \title{ Number of Line Segments in a Line Segment Pattern } \description{ Returns the number of line segments in a line segment pattern. } \usage{ nsegments(x) \method{nsegments}{psp}(x) } \arguments{ \item{x}{ A line segment pattern, i.e. an object of class \code{psp}, or an object containing a linear network. } } \details{ This function is generic, with methods for classes \code{psp}, \code{linnet} and \code{lpp}. } \value{ Integer. } \author{ \spatstatAuthors } \seealso{ \code{\link{npoints}()}, \code{\link{psp.object}()} } \examples{ nsegments(copper$Lines) nsegments(copper$SouthLines) } \keyword{spatial} \keyword{manip} spatstat/man/pairorient.Rd0000644000176200001440000000716113160710621015335 0ustar liggesusers\name{pairorient} \alias{pairorient} \title{ Point Pair Orientation Distribution } \description{ Computes the distribution of the orientation of vectors joining pairs of points at a particular range of distances. } \usage{ pairorient(X, r1, r2, ..., cumulative=FALSE, correction, ratio = FALSE, unit=c("degree", "radian"), domain=NULL) } \arguments{ \item{X}{ Point pattern (object of class \code{"ppp"}). } \item{r1,r2}{ Minimum and maximum values of distance to be considered. } \item{\dots}{ Arguments passed to \code{\link{circdensity}} to control the kernel smoothing, if \code{cumulative=FALSE}. } \item{cumulative}{ Logical value specifying whether to estimate the probability density (\code{cumulative=FALSE}, the default) or the cumulative distribution function (\code{cumulative=TRUE}). } \item{correction}{ Character vector specifying edge correction or corrections. Options are \code{"none"}, \code{"isotropic"}, \code{"translate"}, \code{"good"} and \code{"best"}. Alternatively \code{correction="all"} selects all options. } \item{ratio}{ Logical. If \code{TRUE}, the numerator and denominator of each edge-corrected estimate will also be saved, for use in analysing replicated point patterns. } \item{unit}{ Unit in which the angles should be expressed. Either \code{"degree"} or \code{"radian"}. } \item{domain}{ Optional window. The first point \eqn{x_i}{x[i]} of each pair of points will be constrained to lie in \code{domain}. } } \details{ This algorithm considers all pairs of points in the pattern \code{X} that lie more than \code{r1} and less than \code{r2} units apart. The \emph{direction} of the arrow joining the points is measured, as an angle in degrees or radians, anticlockwise from the \eqn{x} axis. If \code{cumulative=FALSE} (the default), a kernel estimate of the probability density of the orientations is calculated using \code{\link{circdensity}}. If \code{cumulative=TRUE}, then the cumulative distribution function of these directions is calculated. This is the function \eqn{O_{r1,r2}(\phi)}{O[r1,r2](phi)} defined in Stoyan and Stoyan (1994), equation (14.53), page 271. In either case the result can be plotted as a rose diagram by \code{\link{rose}}, or as a function plot by \code{\link{plot.fv}}. The algorithm gives each observed direction a weight, determined by an edge correction, to adjust for the fact that some interpoint distances are more likely to be observed than others. The choice of edge correction or corrections is determined by the argument \code{correction}. It is also possible to calculate an estimate of the probability density from the cumulative distribution function, by numerical differentiation. Use \code{\link{deriv.fv}} with the argument \code{Dperiodic=TRUE}. } \value{ A function value table (object of class \code{"fv"}) containing the estimates of the probability density or the cumulative distribution function of angles, in degrees (if \code{unit="degree"}) or radians (if \code{unit="radian"}). } \references{ Stoyan, D. and Stoyan, H. (1994) Fractals, random shapes and point fields: methods of geometrical statistics. John Wiley and Sons. } \seealso{ \code{\link{Kest}}, \code{\link{Ksector}}, \code{\link{nnorient}} } \examples{ rose(pairorient(redwood, 0.05, 0.15, sigma=8), col="grey") plot(CDF <- pairorient(redwood, 0.05, 0.15, cumulative=TRUE)) plot(f <- deriv(CDF, spar=0.6, Dperiodic=TRUE)) } \author{\adrian \rolf and \ege } \keyword{spatial} \keyword{nonparametric} spatstat/man/logLik.kppm.Rd0000644000176200001440000000541013160710621015343 0ustar liggesusers\name{logLik.kppm} \alias{logLik.kppm} \alias{AIC.kppm} \alias{extractAIC.kppm} \alias{nobs.kppm} \title{Log Likelihood and AIC for Fitted Cox or Cluster Point Process Model} \description{ Extracts the log Palm likelihood, deviance, and AIC of a fitted Cox or cluster point process model. } \usage{ \method{logLik}{kppm}(object, ...) \method{AIC}{kppm}(object, \dots, k=2) \method{extractAIC}{kppm}(fit, scale=0, k=2, \dots) \method{nobs}{kppm}(object, ...) } \arguments{ \item{object,fit}{Fitted point process model. An object of class \code{"kppm"}. } \item{\dots}{Ignored.} \item{scale}{Ignored.} \item{k}{Numeric value specifying the weight of the equivalent degrees of freedom in the AIC. See Details. } } \details{ These functions are methods for the generic commands \code{\link{logLik}}, \code{\link{extractAIC}} and \code{\link{nobs}} for the class \code{"kppm"}. An object of class \code{"kppm"} represents a fitted Cox or cluster point process model. It is obtained from the model-fitting function \code{\link{kppm}}. These methods apply only when the model was fitted by maximising the Palm likelihood (Tanaka et al, 2008) by calling \code{\link{kppm}} with the argument \code{method="palm"}. The method \code{logLik.kppm} computes the maximised value of the log Palm likelihood for the fitted model \code{object}. The methods \code{AIC.kppm} and \code{extractAIC.kppm} compute the Akaike Information Criterion AIC for the fitted model based on the Palm likelihood (Tanaka et al, 2008) \deqn{ AIC = -2 \log(PL) + k \times \mbox{edf} }{ AIC = -2 * log(PL) + k * edf } where \eqn{PL} is the maximised Palm likelihood of the fitted model, and \eqn{\mbox{edf}}{edf} is the effective degrees of freedom of the model. The method \code{nobs.kppm} returns the number of points in the original data point pattern to which the model was fitted. The \R function \code{\link{step}} uses these methods. } \value{ \code{logLik} returns a numerical value, belonging to the class \code{"logLik"}, with an attribute \code{"df"} giving the degrees of freedom. \code{AIC} returns a numerical value. \code{extractAIC} returns a numeric vector of length 2 containing the degrees of freedom and the AIC value. \code{nobs} returns an integer value. } \references{ Tanaka, U. and Ogata, Y. and Stoyan, D. (2008) Parameter estimation and model selection for Neyman-Scott point processes. \emph{Biometrical Journal} \bold{50}, 43--57. } \seealso{ \code{\link{kppm}}, \code{\link{logLik.ppm}} } \author{\adrian \rolf and \ege } \examples{ fit <- kppm(redwood ~ x, "Thomas", method="palm") nobs(fit) logLik(fit) extractAIC(fit) AIC(fit) step(fit) } \keyword{spatial} \keyword{models} spatstat/man/MultiHard.Rd0000644000176200001440000000555013160710571015056 0ustar liggesusers\name{MultiHard} \alias{MultiHard} \title{The Multitype Hard Core Point Process Model} \description{ Creates an instance of the multitype hard core point process model which can then be fitted to point pattern data. } \usage{ MultiHard(hradii, types=NULL) } \arguments{ \item{hradii}{Matrix of hard core radii} \item{types}{Optional; vector of all possible types (i.e. the possible levels of the \code{marks} variable in the data)} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of the multitype hard core process with hard core radii \eqn{hradii[i,j]}. } \details{ This is a multitype version of the hard core process. A pair of points of types \eqn{i} and \eqn{j} must not lie closer than \eqn{h_{ij}}{h[i,j]} units apart. The argument \code{types} need not be specified in normal use. It will be determined automatically from the point pattern data set to which the MultiStrauss interaction is applied, when the user calls \code{\link{ppm}}. However, the user should be confident that the ordering of types in the dataset corresponds to the ordering of rows and columns in the matrix \code{hradii}. The matrix \code{hradii} must be symmetric, with entries which are either positive numbers or \code{NA}. A value of \code{NA} indicates that no distance constraint should be applied for this combination of types. Note that only the hardcore radii are specified in \code{MultiHard}. The canonical parameters \eqn{\log(\beta_j)}{log(beta[j])} are estimated by \code{\link{ppm}()}, not fixed in \code{MultiHard()}. } \seealso{ \code{\link{ppm}}, \code{\link{pairwise.family}}, \code{\link{ppm.object}}, \code{\link{MultiStrauss}}, \code{\link{MultiStraussHard}}, \code{\link{Strauss}}. See \code{\link{ragsMultiHard}} and \code{\link{rmh}} for simulation. } \examples{ h <- matrix(c(1,2,2,1), nrow=2,ncol=2) # prints a sensible description of itself MultiHard(h) # Fit the stationary multitype hardcore process to `amacrine' # with hard core operating only between cells of the same type. h <- 0.02 * matrix(c(1, NA, NA, 1), nrow=2,ncol=2) ppm(amacrine ~1, MultiHard(h)) } \section{Warnings}{ In order that \code{\link{ppm}} can fit the multitype hard core model correctly to a point pattern \code{X}, this pattern must be marked, with \code{markformat} equal to \code{vector} and the mark vector \code{marks(X)} must be a factor. If the argument \code{types} is specified it is interpreted as a set of factor levels and this set must equal \code{levels(marks(X))}. } \section{Changed Syntax}{ Before \pkg{spatstat} version \code{1.37-0}, the syntax of this function was different: \code{MultiHard(types=NULL, hradii)}. The new code attempts to handle the old syntax as well. } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{models} spatstat/man/tess.Rd0000644000176200001440000001262213160710621014135 0ustar liggesusers\name{tess} \alias{tess} \title{Create a Tessellation} \description{ Creates an object of class \code{"tess"} representing a tessellation of a spatial region. } \usage{ tess(..., xgrid = NULL, ygrid = NULL, tiles = NULL, image = NULL, window=NULL, marks=NULL, keepempty=FALSE, unitname=NULL, check=TRUE) } \arguments{ \item{\dots}{Ignored.} \item{xgrid,ygrid}{Cartesian coordinates of vertical and horizontal lines determining a grid of rectangles. Incompatible with other arguments. } \item{tiles}{List of tiles in the tessellation. A list, each of whose elements is a window (object of class \code{"owin"}). Incompatible with other arguments. } \item{image}{ Pixel image which specifies the tessellation. Incompatible with other arguments. } \item{window}{ Optional. The spatial region which is tessellated (i.e. the union of all the tiles). An object of class \code{"owin"}. } \item{marks}{ Optional vector or data frame of marks associated with the tiles. } \item{keepempty}{ Logical flag indicating whether empty tiles should be retained or deleted. } \item{unitname}{ Optional. Name of unit of length. Either a single character string, or a vector of two character strings giving the singular and plural forms, respectively. If this argument is missing or \code{NULL}, information about the unitname will be extracted from the other arguments. If this argument is given, it overrides any other information about the unitname. } \item{check}{ Logical value indicating whether to check the validity of the input data. It is strongly recommended to use the default value \code{check=TRUE}. } } \details{ A tessellation is a collection of disjoint spatial regions (called \emph{tiles}) that fit together to form a larger spatial region. This command creates an object of class \code{"tess"} that represents a tessellation. Three types of tessellation are supported: \describe{ \item{rectangular:}{ tiles are rectangles, with sides parallel to the \code{x} and \code{y} axes. They may or may not have equal size and shape. The arguments \code{xgrid} and \code{ygrid} determine the positions of the vertical and horizontal grid lines, respectively. (See \code{\link{quadrats}} for another way to do this.) } \item{tile list:}{ tiles are arbitrary spatial regions. The argument \code{tiles} is a list of these tiles, which are objects of class \code{"owin"}. } \item{pixel image:}{ Tiles are subsets of a fine grid of pixels. The argument \code{image} is a pixel image (object of class \code{"im"}) with factor values. Each level of the factor represents a different tile of the tessellation. The pixels that have a particular value of the factor constitute a tile. } } The optional argument \code{window} specifies the spatial region formed by the union of all the tiles. In other words it specifies the spatial region that is divided into tiles by the tessellation. If this argument is missing or \code{NULL}, it will be determined by computing the set union of all the tiles. This is a time-consuming computation. For efficiency it is advisable to specify the window. Note that the validity of the window will not be checked. Empty tiles may occur, either because one of the entries in the list \code{tiles} is an empty window, or because one of the levels of the factor-valued pixel image \code{image} does not occur in the pixel data. When \code{keepempty=TRUE}, empty tiles are permitted. When \code{keepempty=FALSE} (the default), tiles are not allowed to be empty, and any empty tiles will be removed from the tessellation. There are methods for \code{print}, \code{plot}, \code{[} and \code{[<-} for tessellations. Use \code{\link{tiles}} to extract the list of tiles in a tessellation, \code{\link{tilenames}} to extract the names of the tiles, and \code{\link{tile.areas}} to compute their areas. The tiles may have marks, which can be extracted by \code{\link{marks.tess}} and changed by \code{\link{marks<-.tess}}. Tessellations can be used to classify the points of a point pattern, in \code{\link{split.ppp}}, \code{\link{cut.ppp}} and \code{\link{by.ppp}}. To construct particular tessellations, see \code{\link{quadrats}}, \code{\link{hextess}}, \code{\link{dirichlet}}, \code{\link{delaunay}} and \code{\link{rpoislinetess}}. } \value{ An object of class \code{"tess"} representing the tessellation. } \seealso{ \code{\link{marks.tess}}, \code{\link{plot.tess}}, \code{\link{[.tess}}, \code{\link{as.tess}}, \code{\link{tiles}}, \code{\link{intersect.tess}}, \code{\link{split.ppp}}, \code{\link{cut.ppp}}, \code{\link{by.ppp}}, \code{\link{bdist.tiles}}, \code{\link{tile.areas}}. To construct particular tessellations, see \code{\link{quadrats}}, \code{\link{hextess}}, \code{\link{dirichlet}}, \code{\link{delaunay}} and \code{\link{rpoislinetess}}. To divide space into pieces containing equal amounts of stuff, use \code{\link{quantess}}. } \examples{ A <- tess(xgrid=0:4,ygrid=0:4) A B <- A[c(1, 2, 5, 7, 9)] B v <- as.im(function(x,y){factor(round(5 * (x^2 + y^2)))}, W=owin()) levels(v) <- letters[seq(length(levels(v)))] E <- tess(image=v) E } \author{ \adrian \rolf and \ege } \keyword{spatial} \keyword{datagen} spatstat/man/crossdist.psp.Rd0000644000176200001440000000474113160710571016004 0ustar liggesusers\name{crossdist.psp} \alias{crossdist.psp} \title{Pairwise distances between two different line segment patterns} \description{ Computes the distances between all pairs of line segments taken from two different line segment patterns. } \usage{ \method{crossdist}{psp}(X, Y, \dots, method="C", type="Hausdorff") } \arguments{ \item{X,Y}{ Line segment patterns (objects of class \code{"psp"}). } \item{\dots}{ Ignored. } \item{method}{String specifying which method of calculation to use. Values are \code{"C"} and \code{"interpreted"}. Usually not specified. } \item{type}{ Type of distance to be computed. Options are \code{"Hausdorff"} and \code{"separation"}. Partial matching is used. } } \value{ A matrix whose \code{[i,j]} entry is the distance from the \code{i}-th line segment in \code{X} to the \code{j}-th line segment in \code{Y}. } \details{ This is a method for the generic function \code{\link{crossdist}}. Given two line segment patterns, this function computes the distance from each line segment in the first pattern to each line segment in the second pattern, and returns a matrix containing these distances. The distances between line segments are measured in one of two ways: \itemize{ \item if \code{type="Hausdorff"}, distances are computed in the Hausdorff metric. The Hausdorff distance between two line segments is the \emph{maximum} distance from any point on one of the segments to the nearest point on the other segment. \item if \code{type="separation"}, distances are computed as the \emph{minimum} distance from a point on one line segment to a point on the other line segment. For example, line segments which cross over each other have separation zero. } The argument \code{method} is not normally used. It is retained only for checking the validity of the software. If \code{method = "interpreted"} then the distances are computed using interpreted \R code only. If \code{method="C"} (the default) then compiled \code{C} code is used. The \code{C} code is several times faster. } \seealso{ \code{\link{pairdist}}, \code{\link{nndist}}, \code{\link{Gest}} } \examples{ L1 <- psp(runif(5), runif(5), runif(5), runif(5), owin()) L2 <- psp(runif(10), runif(10), runif(10), runif(10), owin()) D <- crossdist(L1, L2) #result is a 5 x 10 matrix S <- crossdist(L1, L2, type="sep") } \author{ \adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/rmhmodel.ppm.Rd0000644000176200001440000001102013160710621015550 0ustar liggesusers\name{rmhmodel.ppm} \alias{rmhmodel.ppm} \title{Interpret Fitted Model for Metropolis-Hastings Simulation.} \description{ Converts a fitted point process model into a format that can be used to simulate the model by the Metropolis-Hastings algorithm. } \usage{ \method{rmhmodel}{ppm}(model, w, ..., verbose=TRUE, project=TRUE, control=rmhcontrol(), new.coef=NULL) } \arguments{ \item{model}{ Fitted point process model (object of class \code{"ppm"}). } \item{w}{ Optional. Window in which the simulations should be generated. } \item{\dots}{Ignored.} \item{verbose}{ Logical flag indicating whether to print progress reports while the model is being converted. } \item{project}{Logical flag indicating what to do if the fitted model does not correspond to a valid point process. See Details.} \item{control}{ Parameters determining the iterative behaviour of the simulation algorithm. Passed to \code{\link{rmhcontrol}}. } \item{new.coef}{ New values for the canonical parameters of the model. A numeric vector of the same length as \code{coef(model)}. } } \value{ An object of class \code{"rmhmodel"}, which is essentially a list of parameter values for the model. There is a \code{print} method for this class, which prints a sensible description of the model chosen. } \details{ The generic function \code{\link{rmhmodel}} takes a description of a point process model in some format, and converts it into an object of class \code{"rmhmodel"} so that simulations of the model can be generated using the Metropolis-Hastings algorithm \code{\link{rmh}}. This function \code{rmhmodel.ppm} is the method for the class \code{"ppm"} of fitted point process models. The argument \code{model} should be a fitted point process model (object of class \code{"ppm"}) typically obtained from the model-fitting function \code{\link{ppm}}. This will be converted into an object of class \code{"rmhmodel"}. The optional argument \code{w} specifies the window in which the pattern is to be generated. If specified, it must be in a form which can be coerced to an object of class \code{owin} by \code{\link{as.owin}}. Not all fitted point process models obtained from \code{\link{ppm}} can be simulated. We have not yet implemented simulation code for the \code{\link{LennardJones}} and \code{\link{OrdThresh}} models. It is also possible that a fitted point process model obtained from \code{\link{ppm}} may not correspond to a valid point process. For example a fitted model with the \code{\link{Strauss}} interpoint interaction may have any value of the interaction parameter \eqn{\gamma}{gamma}; however the Strauss process is not well-defined for \eqn{\gamma > 1}{gamma > 1} (Kelly and Ripley, 1976). The argument \code{project} determines what to do in such cases. If \code{project=FALSE}, a fatal error will occur. If \code{project=TRUE}, the fitted model parameters will be adjusted to the nearest values which do correspond to a valid point process. For example a Strauss process with \eqn{\gamma > 1}{gamma > 1} will be projected to a Strauss process with \eqn{\gamma = 1}{gamma = 1}, equivalent to a Poisson process. } \references{ Diggle, P. J. (2003) \emph{Statistical Analysis of Spatial Point Patterns} (2nd ed.) Arnold, London. Diggle, P.J. and Gratton, R.J. (1984) Monte Carlo methods of inference for implicit statistical models. \emph{Journal of the Royal Statistical Society, series B} \bold{46}, 193 -- 212. Geyer, C.J. (1999) Likelihood Inference for Spatial Point Processes. Chapter 3 in O.E. Barndorff-Nielsen, W.S. Kendall and M.N.M. Van Lieshout (eds) \emph{Stochastic Geometry: Likelihood and Computation}, Chapman and Hall / CRC, Monographs on Statistics and Applied Probability, number 80. Pages 79--140. Kelly, F.P. and Ripley, B.D. (1976) On Strauss's model for clustering. \emph{Biometrika} \bold{63}, 357--360. } \seealso{ \code{\link{rmhmodel}}, \code{\link{rmhmodel.list}}, \code{\link{rmhmodel.default}}, \code{\link{rmh}}, \code{\link{rmhcontrol}}, \code{\link{rmhstart}}, \code{\link{ppm}}, \rmhInteractionsList. } \examples{ fit1 <- ppm(cells ~1, Strauss(0.07)) mod1 <- rmhmodel(fit1) fit2 <- ppm(cells ~x, Geyer(0.07, 2)) mod2 <- rmhmodel(fit2) fit3 <- ppm(cells ~x, Hardcore(0.07)) mod3 <- rmhmodel(fit3) # Then rmh(mod1), etc } \author{ \adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat/man/Triplets.Rd0000644000176200001440000000632413160710571014773 0ustar liggesusers\name{Triplets} \alias{Triplets} \title{The Triplet Point Process Model} \description{ Creates an instance of Geyer's triplet interaction point process model which can then be fitted to point pattern data. } \usage{ Triplets(r) } \arguments{ \item{r}{The interaction radius of the Triplets process} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of the Triplets process with interaction radius \eqn{r}. } \details{ The (stationary) Geyer triplet process (Geyer, 1999) with interaction radius \eqn{r} and parameters \eqn{\beta}{beta} and \eqn{\gamma}{gamma} is the point process in which each point contributes a factor \eqn{\beta}{beta} to the probability density of the point pattern, and each triplet of close points contributes a factor \eqn{\gamma}{gamma} to the density. A triplet of close points is a group of 3 points, each pair of which is closer than \eqn{r} units apart. Thus the probability density is \deqn{ f(x_1,\ldots,x_n) = \alpha \beta^{n(x)} \gamma^{s(x)} }{ f(x_1,\ldots,x_n) = alpha . beta^n(x) gamma^s(x) } where \eqn{x_1,\ldots,x_n}{x[1],\ldots,x[n]} represent the points of the pattern, \eqn{n(x)} is the number of points in the pattern, \eqn{s(x)} is the number of unordered triples of points that are closer than \eqn{r} units apart, and \eqn{\alpha}{alpha} is the normalising constant. The interaction parameter \eqn{\gamma}{gamma} must be less than or equal to \eqn{1} so that this model describes an ``ordered'' or ``inhibitive'' pattern. The nonstationary Triplets process is similar except that the contribution of each individual point \eqn{x_i}{x[i]} is a function \eqn{\beta(x_i)}{beta(x[i])} of location, rather than a constant beta. The function \code{\link{ppm}()}, which fits point process models to point pattern data, requires an argument of class \code{"interact"} describing the interpoint interaction structure of the model to be fitted. The appropriate description of the Triplets process pairwise interaction is yielded by the function \code{Triplets()}. See the examples below. Note the only argument is the interaction radius \code{r}. When \code{r} is fixed, the model becomes an exponential family. The canonical parameters \eqn{\log(\beta)}{log(beta)} and \eqn{\log(\gamma)}{log(gamma)} are estimated by \code{\link{ppm}()}, not fixed in \code{Triplets()}. } \seealso{ \code{\link{ppm}}, \code{\link{triplet.family}}, \code{\link{ppm.object}} } \references{ Geyer, C.J. (1999) Likelihood Inference for Spatial Point Processes. Chapter 3 in O.E. Barndorff-Nielsen, W.S. Kendall and M.N.M. Van Lieshout (eds) \emph{Stochastic Geometry: Likelihood and Computation}, Chapman and Hall / CRC, Monographs on Statistics and Applied Probability, number 80. Pages 79--140. } \examples{ Triplets(r=0.1) # prints a sensible description of itself \dontrun{ ppm(cells, ~1, Triplets(r=0.2)) # fit the stationary Triplets process to `cells' } ppm(cells, ~polynom(x,y,3), Triplets(r=0.2)) # fit a nonstationary Triplets process with log-cubic polynomial trend } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} spatstat/man/Extract.im.Rd0000644000176200001440000001731213160710621015176 0ustar liggesusers\name{Extract.im} \alias{[.im} \title{Extract Subset of Image} \description{ Extract a subset or subregion of a pixel image. } \usage{ \method{[}{im}(x, i, j, \dots, drop=TRUE, tight=FALSE, raster=NULL, rescue=is.owin(i)) } \arguments{ \item{x}{ A two-dimensional pixel image. An object of class \code{"im"}. } \item{i}{ Object defining the subregion or subset to be extracted. Either a spatial window (an object of class \code{"owin"}), or a pixel image with logical values, or a linear network (object of class \code{"linnet"}) or a point pattern (an object of class \code{"ppp"}), or any type of index that applies to a matrix, or something that can be converted to a point pattern by \code{\link{as.ppp}} (using the window of \code{x}). } \item{j}{ An integer or logical vector serving as the column index if matrix indexing is being used. Ignored if \code{i} is a spatial object. } \item{\dots}{Ignored.} \item{drop}{ Logical value. Locations in \code{w} that lie outside the spatial domain of the image \code{x} return a pixel value of \code{NA} if \code{drop=FALSE}, and are omitted if \code{drop=TRUE}. } \item{tight}{ Logical value. If \code{tight=TRUE}, and if the result of the subset operation is an image, the image will be trimmed to the smallest possible rectangle. } \item{raster}{ Optional. An object of class \code{"owin"} or \code{"im"} determining a pixel grid. } \item{rescue}{ Logical value indicating whether rectangular blocks of data should always be returned as pixel images. } } \value{ Either a pixel image or a vector of pixel values. See Details. } \details{ This function extracts a subset of the pixel values in a pixel image. (To reassign the pixel values, see \code{\link{[<-.im}}). The image \code{x} must be an object of class \code{"im"} representing a pixel image defined inside a rectangle in two-dimensional space (see \code{\link{im.object}}). The subset to be extracted is determined by the arguments \code{i,j} according to the following rules (which are checked in this order): \enumerate{ \item \code{i} is a spatial object such as a window, a pixel image with logical values, a linear network, or a point pattern; or \item \code{i,j} are indices for the matrix \code{as.matrix(x)}; or \item \code{i} can be converted to a point pattern by \code{\link{as.ppp}(i, W=Window(x))}, and \code{i} is not a matrix. } If \code{i} is a spatial window (an object of class \code{"owin"}), the values of the image inside this window are extracted (after first clipping the window to the spatial domain of the image if necessary). If \code{i} is a linear network (object of class \code{"linnet"}), the values of the image on this network are extracted. If \code{i} is a pixel image with logical values, it is interpreted as a spatial window (with \code{TRUE} values inside the window and \code{FALSE} outside). If \code{i} is a point pattern (an object of class \code{"ppp"}), then the values of the pixel image at the points of this pattern are extracted. This is a simple way to read the pixel values at a given spatial location. At locations outside the spatial domain of the image, the pixel value is undefined, and is taken to be \code{NA}. The logical argument \code{drop} determines whether such \code{NA} values will be returned or omitted. It also influences the format of the return value. If \code{i} is a point pattern (or something that can be converted to a point pattern), then \code{X[i, drop=FALSE]} is a numeric vector containing the pixel values at each of the points of the pattern. Its length is equal to the number of points in the pattern \code{i}. It may contain \code{NA}s corresponding to points which lie outside the spatial domain of the image \code{x}. By contrast, \code{X[i]} or \code{X[i, drop=TRUE]} contains only those pixel values which are not \code{NA}. It may be shorter. If \code{i} is a spatial window then \code{X[i, drop=FALSE]} is another pixel image of the same dimensions as \code{X} obtained by setting all pixels outside the window \code{i} to have value \code{NA}. When the result is displayed by \code{\link{plot.im}} the effect is that the pixel image \code{x} is clipped to the window \code{i}. If \code{i} is a linear network (object of class \code{"linnet"}) then \code{X[i, drop=FALSE]} is another pixel image of the same dimensions as \code{X} obtained by restricting the pixel image \code{X} to the linear network. The result also belongs to the class \code{"linim"} (pixel image on a linear network). If \code{i} is a spatial window then \code{X[i, drop=TRUE]} is either: \itemize{ \item a numeric vector containing the pixel values for all pixels that lie inside the window \code{i}. This happens if \code{i} is \emph{not} a rectangle (i.e. \code{i$type != "rectangle"}) or if \code{rescue=FALSE}. \item a pixel image. This happens only if \code{i} is a rectangle (\code{i$type = "rectangle"}) and \code{rescue=TRUE} (the default). } If the optional argument \code{raster} is given, then it should be a binary image mask or a pixel image. Then \code{x} will first be converted to an image defined on the pixel grid implied by \code{raster}, before the subset operation is carried out. In particular, \code{x[i, raster=i, drop=FALSE]} will return an image defined on the same pixel array as the object \code{i}. If \code{i} does not satisfy any of the conditions above, then the algorithm attempts to interpret \code{i} and \code{j} as indices for the matrix \code{as.matrix(x)}. Either \code{i} or \code{j} may be missing or blank. The result is usually a vector or matrix of pixel values. Exceptionally the result is a pixel image if \code{i,j} determines a rectangular subset of the pixel grid, and if the user specifies \code{rescue=TRUE}. Finally, if none of the above conditions is met, the object \code{i} may also be a data frame or list of \code{x,y} coordinates which will be converted to a point pattern, taking the observation window to be \code{Window(x)}. Then the pixel values at these points will be extracted as a vector. } \section{Warnings}{ If you have a 2-column matrix containing the \eqn{x,y} coordinates of point locations, then to prevent this being interpreted as an array index, you should convert it to a \code{data.frame} or to a point pattern. If \code{W} is a window or a pixel image, then \code{x[W, drop=FALSE]} will return an image defined on the same pixel array as the original image \code{x}. If you want to obtain an image whose pixel dimensions agree with those of \code{W}, use the \code{raster} argument, \code{x[W, raster=W, drop=FALSE]}. } \seealso{ \code{\link{im.object}}, \code{\link{[<-.im}}, \code{\link{ppp.object}}, \code{\link{as.ppp}}, \code{\link{owin.object}}, \code{\link{plot.im}} } \examples{ # make up an image X <- setcov(unit.square()) plot(X) # a rectangular subset W <- owin(c(0,0.5),c(0.2,0.8)) Y <- X[W] plot(Y) # a polygonal subset R <- affine(letterR, diag(c(1,1)/2), c(-2,-0.7)) plot(X[R, drop=FALSE]) plot(X[R, drop=FALSE, tight=TRUE]) # a point pattern P <- rpoispp(20) Y <- X[P] # look up a specified location X[list(x=0.1,y=0.2)] # 10 x 10 pixel array X <- as.im(function(x,y) { x + y }, owin(c(-1,1),c(-1,1)), dimyx=10) # 100 x 100 W <- as.mask(disc(1, c(0,0)), dimyx=100) # 10 x 10 raster X[W,drop=FALSE] # 100 x 100 raster X[W, raster=W, drop=FALSE] } \author{ \spatstatAuthors } \keyword{spatial} \keyword{manip} spatstat/man/compatible.im.Rd0000644000176200001440000000212413160710571015702 0ustar liggesusers\name{compatible.im} \alias{compatible.im} \title{Test Whether Pixel Images Are Compatible} \description{ Tests whether two or more pixel image objects have compatible dimensions. } \usage{ \method{compatible}{im}(A, B, \dots, tol=1e-6) } \arguments{ \item{A,B,\dots}{Two or more pixel images (objects of class \code{"im"}).} \item{tol}{Tolerance factor} } \details{ This function tests whether the pixel images \code{A} and \code{B} (and any additional images \code{\dots}) have compatible pixel dimensions. They are compatible if they have the same number of rows and columns, the same physical pixel dimensions, and occupy the same rectangle in the plane. The argument \code{tol} specifies the maximum tolerated error in the pixel coordinates, expressed as a fraction of the dimensions of a single pixel. } \value{ Logical value: \code{TRUE} if the images are compatible, and \code{FALSE} if they are not. } \seealso{ \code{\link{eval.im}}, \code{\link{harmonise.im}}, \code{\link{commonGrid}} } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/psp.object.Rd0000644000176200001440000000616713160710621015235 0ustar liggesusers\name{psp.object} \alias{psp.object} %DoNotExport \title{Class of Line Segment Patterns} \description{ A class \code{"psp"} to represent a spatial pattern of line segments in the plane. Includes information about the window in which the pattern was observed. Optionally includes marks. } \details{ An object of this class represents a two-dimensional pattern of line segments. It specifies \itemize{ \item the locations of the line segments (both endpoints) \item the window in which the pattern was observed \item optionally, a ``mark'' attached to each line segment (extra information such as a type label). } If \code{X} is an object of type \code{psp}, it contains the following elements: \tabular{ll}{ \code{ends} \tab data frame with entries \code{x0, y0, x1, y1} \cr \tab giving coordinates of segment endpoints \cr \code{window} \tab window of observation \cr \tab (an object of class \code{\link{owin}}) \cr \code{n} \tab number of line segments \cr \code{marks} \tab optional vector or data frame of marks \cr \code{markformat} \tab character string specifying the format of the \cr \tab marks; \dQuote{none}, \dQuote{vector}, or \dQuote{dataframe} } Users are strongly advised not to manipulate these entries directly. Objects of class \code{"psp"} may be created by the function \code{\link{psp}} and converted from other types of data by the function \code{\link{as.psp}}. Note that you must always specify the window of observation; there is intentionally no default action of ``guessing'' the window dimensions from the line segments alone. Subsets of a line segment pattern may be obtained by the functions \code{\link{[.psp}} and \code{\link{clip.psp}}. Line segment pattern objects can be plotted just by typing \code{plot(X)} which invokes the \code{plot} method for line segment pattern objects, \code{\link{plot.psp}}. See \code{\link{plot.psp}} for further information. There are also methods for \code{summary} and \code{print} for line segment patterns. Use \code{summary(X)} to see a useful description of the data. Utilities for line segment patterns include \code{\link{midpoints.psp}} (to compute the midpoints of each segment), \code{\link{lengths.psp}}, (to compute the length of each segment), \code{\link{angles.psp}}, (to compute the angle of orientation of each segment), and \code{\link{distmap.psp}} to compute the distance map of a line segment pattern. } \seealso{ \code{\link{psp}}, \code{\link{as.psp}}, \code{\link{[.psp}} } \examples{ # creating a <- psp(runif(20),runif(20),runif(20),runif(20), window=owin()) # converting from other formats a <- as.psp(matrix(runif(80), ncol=4), window=owin()) a <- as.psp(data.frame(x0=runif(20), y0=runif(20), x1=runif(20), y1=runif(20)), window=owin()) # clipping w <- owin(c(0.1,0.7), c(0.2, 0.8)) b <- clip.psp(a, w) b <- a[w] # the last two lines are equivalent. } \author{\adrian and \rolf } \keyword{spatial} \keyword{attribute} spatstat/man/spatstat-internal.Rd0000644000176200001440000014122613165062035016643 0ustar liggesusers\name{spatstat-internal} \title{Internal spatstat functions} \alias{[.pp3} \alias{[.localpcfmatrix} \alias{[.rat} \alias{[.splitppx} \alias{[.diagramobj} \alias{[<-.splitppx} \alias{acedist.show} \alias{acedist.noshow} \alias{active.interactions} \alias{adaptcoef} \alias{adjust.ratfv} \alias{affinexy} \alias{affinexypolygon} \alias{ang2rad} \alias{anycrossing.psp} \alias{ApplyConnected} \alias{applytolayers} \alias{applyPolyclipArgs} \alias{areadelta2} \alias{areaGain.diri} \alias{areaGain.grid} \alias{areaLoss.diri} \alias{areaLoss.grid} \alias{assemble.plot.objects} \alias{AsymmDistance.psp} \alias{as.breakpts} \alias{as.character.units} \alias{as.data.frame.bw.optim} \alias{as.data.frame.fv} \alias{as.double.im} \alias{as.linfun.linfun} \alias{as.list.hyperframe} \alias{as.listof} \alias{as.owin.lintess} \alias{as.units} \alias{augment.msr} \alias{BartCalc} \alias{bbEngine} \alias{bermantestEngine} \alias{bdry.mask} \alias{bind.ratfv} \alias{blankcoefnames} \alias{bounding.box3} \alias{break.holes} \alias{breakpts} \alias{breakpts.from.r} \alias{bt.frame} \alias{bw.optim} \alias{calc.DR} \alias{calc.NNIR} \alias{calc.SAVE} \alias{calc.SIR} \alias{calc.TSE} \alias{cannot.update} \alias{cartesian} \alias{cellmiddles} \alias{censtimeCDFest} \alias{change.default.expand} \alias{checkbigmatrix} \alias{checkfields} \alias{checksolve} \alias{check.finespacing} \alias{check.hist.lengths} \alias{check.mat.mul} \alias{check.testfun} \alias{circticks} \alias{clarkevansCalc} \alias{clip.psp} \alias{cliprect.psp} \alias{clippoly.psp} \alias{closethresh} \alias{coef.summary.kppm} \alias{coef.summary.ppm} \alias{coef.vblogit} \alias{coerce.marks.numeric} \alias{compatible.rat} \alias{compileCDF} \alias{conform.ratfv} \alias{crosspairquad} \alias{cobble.xy} \alias{codetime} \alias{col.args.to.grey} \alias{colouroutputs} \alias{colouroutputs<-} \alias{commonPolyclipArgs} \alias{conform.imagelist} \alias{countingweights} \alias{CressieReadStatistic} \alias{CressieReadSymbol} \alias{CressieReadName} \alias{CVforPCF} \alias{damaged.ppm} \alias{data.mppm} \alias{datagen.runifpointOnLines} \alias{datagen.runifpoisppOnLines} \alias{datagen.rpoisppOnLines} \alias{default.clipwindow} \alias{default.linnet.tolerance} \alias{default.n.tiling} \alias{default.ntile} \alias{deltasuffstat} \alias{Deviation} \alias{dflt.redraw} \alias{densitycrossEngine} \alias{densitypointsEngine} \alias{diagnose.ppm.engine} \alias{diagramobj} \alias{digestCovariates} \alias{digital.volume} \alias{dilate.owin} \alias{dim.fasp} \alias{dim.hyperframe} \alias{dim.im} \alias{dim.msr} \alias{dim.owin} \alias{dimnames.fasp} \alias{dimnames<-.fasp} \alias{dimnames.msr} \alias{distributecbind} \alias{dist2dpath} \alias{do.as.im} \alias{do.call.plotfun} \alias{do.istat} \alias{doMultiStraussHard} \alias{dppmFixAlgorithm} \alias{dppmFixIntensity} \alias{emptywindow} \alias{envelopeEngine} \alias{envelopeProgressData} \alias{envelopeTest} \alias{envelope.hasenvelope} \alias{envelope.matrix} \alias{equalpairs} \alias{equalpairs.quad} \alias{equals.quad} \alias{equalsfun.quad} \alias{erodemask} \alias{erode.owin} \alias{evalCovar} \alias{evalCovar.ppm} \alias{evalCovar.lppm} \alias{evalCovariate} \alias{evalInteraction} \alias{evalInterEngine} \alias{evalPairPotential} \alias{evalSparse3Dentrywise} \alias{evaluate2Dkernel} \alias{even.breaks.owin} \alias{exactdt} \alias{exactPdt} \alias{existsSpatstatVariable} \alias{expandSpecialLists} \alias{expandwinPerfect} \alias{ExpSmoothLog} \alias{extractAIC.slrm} \alias{extractAtomicQtests} \alias{fakemaintitle} \alias{family.vblogit} \alias{f3engine} \alias{f3Cengine} \alias{fasp} \alias{FDMKERNEL} \alias{fft2D} \alias{fftwAvailable} \alias{fill.coefs} \alias{findbestlegendpos} \alias{findCovariate} \alias{findcbind} \alias{fii} \alias{fillNA} \alias{flatfname} \alias{flipxypolygon} \alias{forbid.logi} \alias{format.numberwithunit} \alias{FormatFaspFormulae} \alias{framebottomleft} \alias{fvexprmap} \alias{fvlabels} \alias{fvlabels<-} \alias{fvlabelmap} \alias{fvlegend} \alias{g3engine} \alias{g3Cengine} \alias{getdataname} \alias{getfields} \alias{getglmdata} \alias{getglmfit} \alias{getglmsubset} \alias{getlambda.lpp} \alias{getlastshift} \alias{getppmdatasubset} \alias{getppmOriginalCovariates} \alias{getRandomFieldsModelGen} \alias{getSpatstatVariable} \alias{getSumFun} \alias{geyercounts} \alias{geyerdelta2} \alias{GLMpredict} \alias{good.correction.K} %\alias{gridadjacencymatrix} %DoNotExport \alias{gridindex} \alias{grid1index} \alias{grokIndexVector} \alias{grow.mask} \alias{hackglmmPQL} \alias{hasenvelope} \alias{HermiteCoefs} \alias{handle.r.b.args} \alias{handle.rshift.args} \alias{head.hyperframe} \alias{hierarchicalordering} \alias{hiermat} \alias{ho.engine} \alias{hsvNA} \alias{IdenticalRows} \alias{idorempty} \alias{illegal.iformula} \alias{implemented.for.K} \alias{impliedpresence} \alias{impliedcoefficients} \alias{inpoint} \alias{instantiate.interact} \alias{interactionfamilyname} \alias{intermaker} \alias{intX.owin} \alias{intX.xypolygon} \alias{intY.owin} \alias{intY.xypolygon} \alias{invokeColourmapRule} \alias{is.atomicQtest} \alias{is.cadlag} \alias{is.col.argname} \alias{is.data} \alias{is.expandable} \alias{is.expandable.ppm} \alias{is.expandable.rmhmodel} \alias{is.fv} \alias{is.hyperframe} \alias{is.infline} \alias{is.interact} \alias{is.marked.default} \alias{is.marked.msr} \alias{is.marked.psp} \alias{is.marked.quad} \alias{is.mppm} \alias{is.multitype.msr} \alias{is.multitype.quad} \alias{is.multitype.default} \alias{is.poisson.mppm} \alias{is.pp3} \alias{is.ppx} \alias{is.psp} \alias{is.scov} \alias{is.sob} \alias{is.tess} \alias{k3engine} \alias{Kborder.engine} \alias{Knone.engine} \alias{Krect.engine} \alias{Kount} \alias{Kwtsum} \alias{Kpcf.kppm} \alias{killinteraction} \alias{km.rs.opt} \alias{kppmComLik} \alias{kppmMinCon} \alias{kppmPalmLik} \alias{kraever} \alias{kraeverRandomFields} \alias{labels.ppm} \alias{ldtEngine} \alias{levels.im} \alias{levels<-.im} \alias{levelsAsFactor} \alias{linearKengine} \alias{linearKmulti} \alias{linearKmulti.inhom} \alias{linearKmultiEngine} \alias{linearpcfengine} \alias{linearpcfmulti} \alias{linearpcfmulti.inhom} \alias{linearPCFmultiEngine} \alias{listof} \alias{localKengine} \alias{localpcfengine} \alias{localpcfmatrix} \alias{local2lpp} \alias{logicalIndex} \alias{logi.dummy} \alias{logi.engine} \alias{logLik.vblogit} \alias{makeLinnetTolerance} \alias{maskLaslett} \alias{match2DkernelName} \alias{parbreak} \alias{plan.legend.layout} \alias{PDEdensityLPP} \alias{PoisSaddle} \alias{PoisSaddleArea} \alias{PoisSaddleGeyer} \alias{PoisSaddlePairwise} \alias{polyLaslett} \alias{polytileareaEngine} \alias{positiveIndex} \alias{PPMmodelmatrix} \alias{putSpatstatVariable} \alias{lookup.im} \alias{lookup2DkernelInfo} \alias{majorminorversion} \alias{make.even.breaks} \alias{makefvlabel} \alias{makeunits} \alias{markappend} \alias{markcbind} \alias{markformat} \alias{markformat.ppp} \alias{markformat.ppx} \alias{markformat.psp} \alias{markformat.default} \alias{mark.scale.default} \alias{markspace.integral} \alias{marks.default} \alias{marks.quad} \alias{\%mapp\%} %DoNotExport %NAMESPACE export("%mapp%") \alias{markappendop} \alias{marksubset} \alias{markreplicateop} \alias{\%mrep\%} %DoNotExport %NAMESPACE export("%mrep%") \alias{marksubsetop} \alias{\%msub\%} %DoNotExport %NAMESPACE export("%msub%") \alias{mask2df} \alias{match.kernel} \alias{maxflow} \alias{mctestSigtraceEngine} \alias{meanlistfv} \alias{meanX.owin} \alias{meanY.owin} \alias{model.se.image} \alias{modelFrameGam} \alias{mpl.engine} \alias{mpl.get.covariates} \alias{mpl.prepare} \alias{mpl.usable} \alias{MultiPair.checkmatrix} \alias{multiply.only.finite.entries} \alias{multiplicityNumeric} \alias{na.handle.im} \alias{names.hyperframe} \alias{names<-.fv} \alias{names<-.hyperframe} \alias{nearest.pixel} \alias{nearest.valid.pixel} \alias{newformula} \alias{newstyle.coeff.handling} \alias{nncleanEngine} \alias{nndcumfun} \alias{no.trend.ppm} \alias{n.quad} \alias{numberwithunit} \alias{numeric.columns} \alias{objsurfEngine} \alias{onecolumn} \alias{optimStatus} \alias{outdated.interact} \alias{oversize.quad} \alias{owinpolycheck} \alias{owinpoly2mask} \alias{owin2polypath} \alias{pairs.listof} \alias{pairs.solist} \alias{param.quad} \alias{partialModelMatrix} \alias{pcf3engine} \alias{pcfmulti.inhom} \alias{pickoption} \alias{plotEachLayer} \alias{ploterodewin} \alias{ploterodeimage} \alias{plot3Dpoints} \alias{plotPolygonBdry} \alias{plot.addvar} \alias{plot.barplotdata} \alias{plot.bw.frac} \alias{plot.bw.optim} \alias{plot.localpcfmatrix} \alias{plot.lurk} \alias{plot.minconfit} \alias{plot.parres} \alias{plot.plotpairsim} \alias{plot.pppmatching} \alias{plot.profilepl} \alias{plot.qqppm} \alias{plot.spatialcdf} \alias{plot.studpermutest} \alias{ppllengine} \alias{ppm.default} \alias{ppmCovariates} \alias{ppmDerivatives} \alias{ppmInfluenceEngine} \alias{pppdist.mat} \alias{pppdist.prohorov} \alias{ppsubset} \alias{predict.vblogit} \alias{prefixfv} \alias{prepareTitle} \alias{printStatus} \alias{print.addvar} \alias{print.anylist} \alias{print.autoexec} \alias{print.bt.frame} \alias{print.bugtable} \alias{print.bw.frac} \alias{print.bw.optim} \alias{print.colourmap} \alias{print.diagppm} \alias{print.distfun} \alias{print.detpointprocfamily} \alias{print.detpointprocfamilyfun} \alias{print.envelope} \alias{print.ewcdf} \alias{print.fasp} \alias{print.fv} \alias{print.fvfun} \alias{print.funxy} \alias{print.hasenvelope} \alias{print.hierarchicalordering} \alias{print.hyperframe} \alias{print.indicfun} \alias{print.influence.ppm} \alias{print.interact} \alias{print.intermaker} \alias{print.isf} \alias{print.laslett} \alias{print.layered} \alias{print.leverage.ppm} \alias{print.lintess} \alias{print.localpcfmatrix} \alias{print.lut} \alias{print.minconfit} \alias{print.mppm} \alias{print.msr} \alias{print.nnfun} \alias{print.numberwithunit} \alias{print.onearrow} \alias{print.parres} \alias{print.plotpairsim} \alias{print.plotppm} \alias{print.pppmatching} \alias{print.profilepl} \alias{print.quadrattest} \alias{print.qqppm} \alias{print.rat} \alias{print.rmhcontrol} \alias{print.rmhexpand} \alias{print.rmhmodel} \alias{print.rmhstart} \alias{print.rmhInfoList} \alias{print.rppm} \alias{print.splitppp} \alias{print.simplepanel} \alias{print.Smoothfun} \alias{print.solist} \alias{print.splitppx} \alias{print.summary.hyperframe} \alias{print.summary.listof} \alias{print.summary.linim} \alias{print.summary.linnet} \alias{print.summary.lintess} \alias{print.summary.logiquad} \alias{print.summary.lut} \alias{print.summary.mppm} \alias{print.summary.owin} \alias{print.summary.ppp} \alias{print.summary.psp} \alias{print.summary.rmhexpand} \alias{print.summary.solist} \alias{print.summary.splitppp} \alias{print.summary.splitppx} \alias{print.summary.units} \alias{print.symbolmap} \alias{print.textstring} \alias{print.texturemap} \alias{print.tess} \alias{print.timed} \alias{print.vblogit} \alias{print.yardstick} \alias{project3Dhom} \alias{putlastshift} \alias{quad} \alias{quad.mppm} \alias{quadBlockSizes} \alias{RandomFieldsSafe} \alias{ratfv} \alias{recognise.spatstat.type} \alias{rectquadrat.breaks} \alias{rectquadrat.countEngine} \alias{reduceformula} \alias{reheat} \alias{RelevantDeviation} \alias{repair.image.xycoords} \alias{replacementIndex} \alias{representativeRows} \alias{resolveEinfo} \alias{resolve.vargamma.shape} \alias{rgbNA} \alias{rhohatEngine} \alias{rhohatCalc} \alias{rMaternInhibition} \alias{RmhExpandRule} \alias{rmhsnoop} \alias{rocData} \alias{rocModel} \alias{roseContinuous} \alias{ruletextline} \alias{quadrat.testEngine} \alias{quadscheme.replicated} \alias{quadscheme.spatial} \alias{pointgrid} \alias{rastersample} \alias{rasterx.mask} \alias{rastery.mask} \alias{rasterxy.mask} \alias{rasterx.im} \alias{rastery.im} \alias{rasterxy.im} \alias{rebadge.fv} \alias{rebadge.as.crossfun} \alias{rebadge.as.dotfun} \alias{rebound} \alias{rebound.im} \alias{rebound.ppp} \alias{rebound.psp} \alias{rebound.owin} \alias{reconcile.fv} \alias{rename.fv} \alias{repair.old.factor.image} \alias{reincarnate.interact} \alias{resid4plot} \alias{resid1plot} \alias{resid1panel} \alias{resolve.2D.kernel} \alias{restrict.mask} \alias{reversePolyclipArgs} \alias{rmax.Rigid} \alias{rmax.rule} \alias{rotxy} \alias{rotxypolygon} \alias{row.names.hyperframe} \alias{row.names<-.hyperframe} \alias{runifpoispp} \alias{runifpoisppOnLines} \alias{runifrect} \alias{rmhResolveControl} \alias{rmhResolveExpansion} \alias{rmhResolveTypes} \alias{rmhSnoopEnv} \alias{rmhcontrol.rmhcontrol} \alias{rmhcontrol.list} \alias{rmhEngine} \alias{rmhmodel.rmhmodel} \alias{rmhstart.rmhstart} \alias{rmhstart.list} \alias{rmpoint.I.allim} \alias{rpoint.multi} \alias{safedeldir} \alias{safelookup} \alias{scalardilate.breakpts} \alias{scalardilate.diagramobj} \alias{scalardilate.msr} \alias{scanmeasure} \alias{scanmeasure.ppp} \alias{scanmeasure.im} \alias{scanBinomLRTS} \alias{scanPoisLRTS} \alias{second.moment.calc} \alias{second.moment.engine} \alias{sewpcf} \alias{sewsmod} \alias{shift.diagramobj} \alias{shift.influence.ppm} \alias{shift.leverage.ppm} \alias{shift.msr} \alias{shift.quadratcount} \alias{shift.quadrattest} \alias{shiftxy} \alias{shiftxypolygon} \alias{signalStatus} \alias{simulate.profilepl} \alias{simulrecipe} \alias{slr.prepare} \alias{slrAssemblePixelData} \alias{Smooth.solist} \alias{smoothcrossEngine} \alias{smoothpointsEngine} \alias{sort.im} \alias{sortalongsegment} \alias{spatstat.xy.coords} \alias{spatstatClusterModelInfo} \alias{spatstatDPPModelInfo} \alias{spatstatRmhInfo} \alias{spatialCDFframe} \alias{spatialCDFtest} \alias{splitHybridInteraction} \alias{sp.foundclass} \alias{sp.foundclasses} \alias{sphere.volume} \alias{store.versionstring.spatstat} \alias{str.hyperframe} \alias{strausscounts} \alias{suffloc} \alias{suffstat.generic} \alias{suffstat.poisson} \alias{summarise.trend} \alias{summary.envelope} \alias{summary.funxy} \alias{summary.hyperframe} \alias{summary.lintess} \alias{summary.logiquad} \alias{summary.lut} \alias{summary.mppm} \alias{summary.profilepl} \alias{summary.pppmatching} \alias{summary.ppx} \alias{summary.splitppx} \alias{summary.rmhexpand} \alias{summary.vblogit} \alias{sumsymouter} \alias{superimposeMarks} \alias{symbolmaptype} \alias{tail.hyperframe} \alias{tensor1x1} \alias{test.crossing.psp} \alias{test.selfcrossing.psp} \alias{thinjump} \alias{tilecentroids} \alias{trianglediameters} \alias{trim.mask} \alias{tweak.fv.entry} \alias{tweak.ratfv.entry} \alias{tweak.coefs} \alias{twostage.test} \alias{\%unit\%} %DoNotExport %NAMESPACE export("%unit%") \alias{unitname.default} \alias{unitname<-.default} \alias{unstack.solist} \alias{unstack.layered} \alias{unstackFilter} \alias{update.im} \alias{update.ippm} \alias{update.rmhstart} \alias{validradius} \alias{validate2Dkernel} \alias{validate.angles} \alias{validate.lpp.coords} \alias{validate.mask} \alias{validate.quad} \alias{validate.weights} \alias{vanilla.fv} \alias{varcountEngine} %\alias{vblogit} %DoNotExport %\alias{vblogit.fmla} %DoNotExport \alias{versionstring.interact} \alias{versionstring.ppm} \alias{versionstring.spatstat} \alias{verifyclass} \alias{vnnFind} \alias{Window.lintess} \alias{Window<-.linnet} \alias{Window<-.lpp} \alias{warn.once} \alias{waxlyrical} \alias{windows.mppm} \alias{w.quad} \alias{x.quad} \alias{y.quad} \alias{xy.grid} \alias{X2testEngine} \alias{xtfrm.im} \alias{xypolygon2psp} \alias{xypolyselfint} %%%% sparse 3D arrays \alias{sparse3Darray} \alias{as.sparse3Darray} \alias{dim.sparse3Darray} \alias{dim<-.sparse3Darray} \alias{dimnames.sparse3Darray} \alias{dimnames<-.sparse3Darray} \alias{print.sparse3Darray} \alias{aperm.sparse3Darray} \alias{as.array.sparse3Darray} \alias{[.sparse3Darray} \alias{[<-.sparse3Darray} \alias{anyNA.sparse3Darray} \alias{RelevantZero} \alias{RelevantEmpty} \alias{isRelevantZero} \alias{unionOfSparseIndices} \alias{Math.sparse3Darray} \alias{Ops.sparse3Darray} \alias{Summary.sparse3Darray} \alias{inside3Darray} \alias{SparseEntries} \alias{SparseIndices} \alias{EntriesToSparse} \alias{mapSparseEntries} \alias{applySparseEntries} \alias{sumsymouterSparse} \alias{tenseur} \alias{marginSums} \alias{rbindCompatibleDataFrames} \alias{bind.sparse3Darray} %%%% \alias{spatstatDiagnostic} %% \alias{as.ppplist} \alias{as.imlist} \alias{pointsAlongNetwork} \alias{expandSparse} \alias{allElementsIdentical} \alias{resampleNetworkDataFrame} \alias{sparseVectorCumul} %% \alias{as.ppm.lppm} \alias{as.ppm.rppm} \alias{predict.profilepl} %%%%%%% \description{ Internal spatstat functions. } \usage{ \method{[}{splitppx}(x, \dots) \method{[}{splitppx}(x, \dots) <- value \method{[}{diagramobj}(x, \dots) \method{[}{rat}(x, \dots) acedist.show(X, Y, n, d, timelag) acedist.noshow(X, Y, n, d) active.interactions(object) adaptcoef(new.coef, fitcoef, drop) adjust.ratfv(f, columns, numfactor, denfactor) affinexy(X, mat, vec, invert) affinexypolygon(p, mat, vec, detmat) ang2rad(ang, unit, start, clockwise) anycrossing.psp(A,B) ApplyConnected(X, Engine, r, \dots, rule, auxdata) applytolayers(L, FUN, \dots) applyPolyclipArgs(x, p) areadelta2(X, r, \dots, sparseOK) areaGain.diri(u, X, r, \dots, W, verbose) areaGain.grid(u, X, r, \dots, W, ngrid) areaLoss.diri(X, r, \dots, W, subset) areaLoss.grid(X, r, \dots, W, subset, method = c("count", "distmap"), ngrid = spatstat.options("ngrid.disc"), exact = FALSE) assemble.plot.objects(xlim, ylim, \dots, lines, polygon) AsymmDistance.psp(X, Y, metric, method) as.breakpts(\dots) \method{as.character}{units}(x, \dots) \method{as.data.frame}{bw.optim}(x, \dots) \method{as.data.frame}{fv}(x, \dots) \method{as.double}{im}(x, \dots) \method{as.linfun}{linfun}(X, \dots) \method{as.list}{hyperframe}(x, \dots) as.listof(x) \method{as.owin}{lintess}(W, \dots) as.units(s) augment.msr(x, \dots, sigma) BartCalc(fY, fK) bbEngine(\dots) bermantestEngine(model, covariate, which, alternative, \dots, modelname, covname, dataname) bdry.mask(W) bind.ratfv(x, numerator, denominator, labl, desc, preferred, ratio, quotient) blankcoefnames(x) bounding.box3(\dots) break.holes(x, splitby, depth, maxdepth) breakpts(val, maxi, even = FALSE, npos = NULL, step = NULL) breakpts.from.r(r) bt.frame(Q, trend, interaction, \dots, covariates, correction, rbord, use.gam, allcovar) bw.optim(cv, h, iopt, \dots, cvname, hname, criterion, unitname) calc.DR(COV, z, Dim) calc.NNIR(COV, z, pos, Dim) calc.SAVE(COV, z, Dim) calc.SIR(COV, z) calc.TSE(COV, z, pos, Dim1, Dim2) cannot.update(\dots) cartesian(pp, markset, fac = TRUE) cellmiddles(W, nx, ny, npix, distances) censtimeCDFest(o, cc, d, breaks, \dots, KM, RS, HAN, RAW, han.denom, tt, pmax) change.default.expand(x, newdefault) checkbigmatrix(n, m, fatal, silent) checkfields(X,L) checksolve(M, action, descrip, target) check.finespacing(r, eps, win, rmaxdefault, context, action, rname) check.hist.lengths(hist,breaks) check.mat.mul(A, B, Acols, Brows, fatal) check.testfun(f, f1, X) circticks(R, at, unit, start, clockwise, labels) clarkevansCalc(X, correction, clipregion, working) clip.psp(x, window, check, fragments) cliprect.psp(x, window, fragments) clippoly.psp(s, window, fragments) closethresh(X,R,S,twice,\dots) \method{coef}{summary.kppm}(object, \dots) \method{coef}{summary.ppm}(object, \dots) \method{coef}{vblogit}(object, \dots) coerce.marks.numeric(X, warn) \method{compatible}{rat}(A, B, \dots) compileCDF(D, B, r, \dots, han.denom, check) conform.ratfv(x) crosspairquad(Q,rmax,what) cobble.xy(x, y, f, fatal, \dots) codetime(x, hms, what) col.args.to.grey(x, \dots) colouroutputs(x) colouroutputs(x) <- value commonPolyclipArgs(\dots, p) conform.imagelist(X, Zlist) countingweights(id, areas, check = TRUE) CressieReadStatistic(OBS,EXP,lambda) CressieReadSymbol(lambda) CressieReadName(lambda) CVforPCF(bw, stuff) damaged.ppm(object) data.mppm(x) datagen.runifpointOnLines(n, L) datagen.runifpoisppOnLines(lambda, L) datagen.rpoisppOnLines(lambda, L, lmax, \dots, check) default.clipwindow(object, epsilon) default.linnet.tolerance(L) default.n.tiling(X, nd, ntile, npix, eps, random, quasi, verbose) default.ntile(X) deltasuffstat(model, \dots, restrict, dataonly, force, quadsub, sparseOK) Deviation(x, ref, leaveout, n, xi) dflt.redraw(button, name, env) densitycrossEngine(Xdata, Xquery, sigma, \dots, weights, edge, varcov, diggle, sorted) densitypointsEngine(x, sigma, \dots, kernel, scalekernel, weights, edge, varcov, leaveoneout, diggle, sorted, spill, cutoff) diagnose.ppm.engine(object, \dots, type, typename, opt, sigma, rbord, compute.sd, compute.cts, envelope, nsim, nrank, rv, oldstyle, splineargs, verbose) diagramobj(X, \dots) digestCovariates(\dots, W) digital.volume(range, nval, vside) dilate.owin(\dots) \method{dim}{fasp}(x) \method{dim}{hyperframe}(x) \method{dim}{im}(x) \method{dim}{msr}(x) \method{dim}{owin}(x) \method{dimnames}{fasp}(x) \method{dimnames}{fasp}(x) <- value \method{dimnames}{msr}(x) distributecbind(x) dist2dpath(dist, method="C") do.as.im(x, action, \dots, W, eps, dimyx, xy, na.replace) do.call.plotfun(fun, arglist, \dots) do.istat(panel) doMultiStraussHard(iradii, hradii, types) dppmFixIntensity(DPP, lambda, po) dppmFixAlgorithm(algorithm, changealgorithm, clusters, startpar) emptywindow(w) envelopeEngine(X, fun, simul, nsim, nrank, \dots, funargs, funYargs, verbose, clipdata, transform, global, ginterval, use.theory, alternative, scale, clamp, savefuns, savepatterns, saveresultof, weights, nsim2, VARIANCE, nSD, Yname, maxnerr, internal, cl, envir.user, expected.arg, do.pwrong, foreignclass, collectrubbish) envelopeProgressData(X, fun, \dots, exponent, alternative, leaveout, scale, clamp, normalize, deflate, rmin, save.envelope, savefuns, savepatterns) envelopeTest(X, \dots, exponent, alternative, rinterval, leaveout, scale, clamp, tie.rule, interpolate, save.interpolant, save.envelope, savefuns, savepatterns, Xname, verbose) \method{envelope}{hasenvelope}(Y, \dots, Yname) \method{envelope}{matrix}(Y, \dots, rvals, observed, theory, funX, nsim, nsim2, jsim, jsim.mean, type, alternative, scale, clamp, csr, use.theory, nrank, ginterval, nSD, savefuns, check, Yname, do.pwrong, weights, precomputed) equalpairs(U, X, marked=FALSE) equalpairs.quad(Q) equals.quad(Q) equalsfun.quad(Q) erodemask(w,r,strict) erode.owin(\dots) evalCovar(model, covariate, \dots) \method{evalCovar}{ppm}(model, covariate, \dots, lambdatype, dimyx, eps, interpolate, jitter, modelname, covname, dataname, subset) \method{evalCovar}{lppm}(model, covariate, \dots, lambdatype, eps, nd, interpolate, jitter, modelname, covname, dataname, subset) evalCovariate(covariate, locations) evalInteraction(X,P,E,interaction,correction,\dots,precomputed,savecomputed) evalInterEngine(X,P,E,interaction,correction,\dots, Reach,precomputed,savecomputed) evalPairPotential(X,P,E,pairpot,potpars,R) evalSparse3Dentrywise(expr, envir) evaluate2Dkernel(kernel, x, y, sigma, varcov, \dots, scalekernel) even.breaks.owin(w) exactdt(X, \dots) exactPdt(w) existsSpatstatVariable(name) expandSpecialLists(x, special) expandwinPerfect(W, expand, amount) ExpSmoothLog(X, \dots, at, weights) \method{extractAIC}{slrm}(fit, scale = 0, k = 2, \dots) extractAtomicQtests(x) fakemaintitle(bb, main, \dots) \method{family}{vblogit}(object, \dots) f3engine(x, y, z, box, vside, range, nval, correction) f3Cengine(x, y, z, box, vside, rmax, nrval) fasp(fns, which, formulae, dataname, title, rowNames, colNames, checkfv) FDMKERNEL(lppobj, sigma, dtt, weights, iterMax, sparse, dtx) fft2D(z, inverse, west) fftwAvailable() fill.coefs(coefs, required) findbestlegendpos(\dots) findCovariate(covname, scope, scopename=NULL) findcbind(root, depth, maxdepth) fii(interaction, coefs, Vnames, IsOffset) fillNA(x, value) flatfname(x) flipxypolygon(p) forbid.logi(object) \method{format}{numberwithunit}(x, \dots, collapse, modifier) FormatFaspFormulae(f, argname) framebottomleft(w) fvexprmap(x) fvlabels(x, expand=FALSE) fvlabels(x) <- value fvlabelmap(x, dot=TRUE) fvlegend(object, elang) g3engine(x, y, z, box, rmax, nrval, correction) g3Cengine(x, y, z, box, rmax, nrval) getdataname(defaultvalue, \dots, dataname) getfields(X, L, fatal = TRUE) getglmdata(object, drop=FALSE) getglmfit(object) getglmsubset(object) getlambda.lpp(lambda, X, subset, \dots, update, leaveoneout, loo.given, lambdaname) getlastshift(X) getppmdatasubset(object) getppmOriginalCovariates(object) getRandomFieldsModelGen(model) getSpatstatVariable(name) getSumFun(abbreviation, classname, ismarked, fatal) geyercounts(U,X,r,sat,Xcounts,EqualPairs) geyerdelta2(X,r,sat,\dots,sparseOK) GLMpredict(fit, data, coefs, changecoef, type) good.correction.K(X) %gridadjacencymatrix(dims) gridindex(x, y, xrange, yrange, nx, ny) grid1index(x, xrange, nx) grokIndexVector(ind, len, nama) grow.mask(M, xmargin=0, ymargin=xmargin) hackglmmPQL(fixed, random, family, data, correlation, weights, control, niter, verbose, subset, \dots, reltol) hasenvelope(X, E) HermiteCoefs(order) handle.r.b.args(r = NULL, breaks = NULL, window, pixeps = NULL, rmaxdefault) handle.rshift.args(W, \dots, radius, width, height, edge, clip, edgedefault) \method{head}{hyperframe}(x,n,\dots) hierarchicalordering(i, s) hiermat(x, h) ho.engine(model, \dots, nsim, nrmh, start, control, verb) hsvNA(h, s, v, alpha) IdenticalRows(i,j,a,b) idorempty(w, r, caller) illegal.iformula(ifmla, itags, dfvarnames) implemented.for.K(correction, windowtype, explicit) impliedpresence(tags, formula, df, extranames=character(0)) impliedcoefficients(object, tag) inpoint(W) instantiate.interact(x, par) interactionfamilyname(x) intermaker(f, blank) intX.owin(w) intX.xypolygon(polly) intY.owin(w) intY.xypolygon(polly) invokeColourmapRule(colfun, x, \dots, zlim, colargs) is.atomicQtest(x) is.cadlag(s) is.col.argname(x) is.data(Q) is.expandable(x) \method{is.expandable}{ppm}(x) \method{is.expandable}{rmhmodel}(x) is.fv(x) is.hyperframe(x) is.infline(x) is.interact(x) \method{is.marked}{default}(\dots) \method{is.marked}{msr}(X, \dots) \method{is.marked}{psp}(X, \dots) \method{is.marked}{quad}(X, na.action="warn", \dots) is.mppm(x) \method{is.multitype}{default}(X, \dots) \method{is.multitype}{msr}(X, \dots) \method{is.multitype}{quad}(X, na.action="warn", \dots) \method{is.poisson}{mppm}(x) is.pp3(x) is.ppx(x) is.psp(x) is.scov(x) is.sob(x) is.tess(x) k3engine(x, y, z, box, rmax, nrval, correction) Kborder.engine(X, rmax, nr, correction, weights, ratio) Knone.engine(X, rmax, nr, weights, ratio) Krect.engine(X, rmax, nr, correction, weights, ratio, fname) Kount(dIJ, bI, b, breaks) Kwtsum(dIJ, bI, wIJ, b, w, breaks) Kpcf.kppm(model, what) killinteraction(model) km.rs.opt(o, cc, d, breaks, KM, RS) kppmComLik(X, Xname, po, clusters, control, weightfun, rmax, algorithm, DPP, \dots) kppmMinCon(X, Xname, po, clusters, control, statistic, statargs, algorithm, DPP, \dots) kppmPalmLik(X, Xname, po, clusters, control, weightfun, rmax, algorithm, DPP, \dots) kraever(package, fatal) kraeverRandomFields() \method{labels}{ppm}(object, \dots) ldtEngine(nv, ns, from, to, seglen, huge, coUXord, vnndist, vnnwhich, vnnlab) \method{levels}{im}(x) \method{levels}{im}(x) <- value levelsAsFactor(x) linearKengine(X, \dots, r, reweight, denom, correction, ratio, showworking) linearKmulti(X, I, J, r, \dots, correction) linearKmulti.inhom(X, I, J, lambdaI, lambdaJ, r, \dots, correction, normalise) linearpcfengine(X, \dots, r, reweight, denom, correction, ratio) linearpcfmulti(X, I, J, r, \dots, correction) linearpcfmulti.inhom(X, I, J, lambdaI, lambdaJ, r, \dots, correction, normalise) linearKmultiEngine(X, I, J, \dots, r, reweight, denom, correction, showworking) linearPCFmultiEngine(X, I, J, \dots, r, reweight, denom, correction, showworking) listof(\dots) localKengine(X, \dots, wantL, lambda, correction, verbose, rvalue) localpcfengine(X, \dots, delta, rmax, nr, stoyan, lambda) localpcfmatrix(X, i, \dots, lambda, delta, rmax, nr, stoyan) local2lpp(L, seg, tp, X, df.only) logicalIndex(i, nama, len) logi.dummy(X, dummytype, nd, mark.repeat, \dots) logi.engine(Q, trend, interaction, \dots, covariates, subsetexpr, clipwin, correction, rbord, covfunargs, allcovar, vnamebase, vnameprefix, justQ, savecomputed, precomputed, VB) \method{logLik}{vblogit}(object, \dots) makeLinnetTolerance maskLaslett(X, \dots, eps, dimyx, xy, oldX, verbose, plotit) match2DkernelName(kernel) parbreak(terse) plan.legend.layout(B, \dots, side, sep, size, sep.frac, size.frac, started, map) PDEdensityLPP(x, sigma, \dots, weights, dx, dt, fun, finespacing, finedata) PoisSaddle(beta, fi) PoisSaddleArea(beta, fi) PoisSaddleGeyer(beta, fi) PoisSaddlePairwise(beta, fi) polyLaslett(X, \dots, oldX, verbose, plotit) polytileareaEngine(P, xrange, yrange, nx, ny) positiveIndex(i, nama, len) PPMmodelmatrix(object, data, \dots, Q, keepNA, irregular) \method{print}{localpcfmatrix}(x, \dots) \method{plot}{localpcfmatrix}(x, \dots) putSpatstatVariable(name, value) \method{[}{localpcfmatrix}(x, i, \dots) \method{[}{pp3}(x, i, drop, \dots) lookup.im(Z, x, y, naok, strict) lookup2DkernelInfo(kernel) majorminorversion(v) make.even.breaks(bmax, npos, bstep) makefvlabel(op, accent, fname, sub, argname) makeunits(sing, plur, mul) markappend(\dots) markcbind(\dots) markformat(x) \method{markformat}{ppp}(x) \method{markformat}{ppx}(x) \method{markformat}{psp}(x) \method{markformat}{default}(x) mark.scale.default(marx, w, \dots, markscale, maxsize, meansize, characters) markspace.integral(X) \method{marks}{default}(x, \dots) \method{marks}{quad}(x, dfok=FALSE, \dots) markappendop(x, y) x \%mapp\% y marksubset(x, index, format) marksubsetop(x, i) x \%msub\% i markreplicateop(x, n) x \%mrep\% n mask2df(w) match.kernel(kernel) maxflow(costm) mctestSigtraceEngine(R, devdata, devsim, \dots, interpolate, confint, alpha, exponent, unitname) meanlistfv(z, \dots) meanX.owin(w) meanY.owin(w) model.se.image(fit, W, \dots, what) modelFrameGam(formula, \dots) mpl.engine(Q, trend, interaction, \dots, covariates, subsetexpr, clipwin, covfunargs, correction, rbord, use.gam, gcontrol, GLM, GLMfamily, GLMcontrol, famille, forcefit, nd, eps, allcovar, callstring, precomputed, savecomputed, preponly, rename.intercept, justQ, weightfactor) mpl.get.covariates(covariates, locations, type, covfunargs, need.deriv) mpl.prepare(Q, X, P, trend, interaction, covariates, want.trend, want.inter, correction, rbord, Pname, callstring, \dots, subsetexpr, covfunargs, allcovar, precomputed, savecomputed, vnamebase, vnameprefix, warn.illegal, warn.unidentifiable, weightfactor, skip.border) mpl.usable(x) MultiPair.checkmatrix(mat, n, matname, naok, zerook, asymmok) multiplicityNumeric(x) multiply.only.finite.entries(x, a) na.handle.im(X, na.replace) \method{names}{fv}(x) <- value \method{names}{hyperframe}(x) \method{names}{hyperframe}(x) <- value nearest.pixel(x, y, Z) nearest.valid.pixel(x, y, Z) newformula(old, change, eold, enew) newstyle.coeff.handling(object) nncleanEngine(kthNND, k, d, \dots, tol, maxit, plothist, lineargs, verbose, Xname) nndcumfun(X, \dots, r) no.trend.ppm(x) n.quad(Q) numberwithunit(x, u) numeric.columns(M, logical, others) objsurfEngine(objfun, optpar, objargs, \dots, dotargs, objname, ngrid, ratio, verbose) onecolumn(m) optimStatus(x, call) printStatus(x, errors.only) signalStatus(x, errors.only) outdated.interact(object) oversize.quad(Q, \dots, nU, nX, p) owinpolycheck(W, verbose=TRUE) owinpoly2mask(w, rasta, check=TRUE) owin2polypath(w) \method{pairs}{listof}(\dots, plot=TRUE) \method{pairs}{solist}(\dots, plot=TRUE) param.quad(Q) partialModelMatrix(X,D,model,callstring,\dots) pcf3engine(x, y, z, box, rmax, nrval, correction, delta) pcfmulti.inhom(X, I, J, lambdaI = NULL, lambdaJ = NULL, \dots, r = NULL, breaks = NULL, kernel = "epanechnikov", bw = NULL, stoyan = 0.15, correction = c("translate", "Ripley"), sigma = NULL, varcov = NULL, Iname = "points satisfying condition I", Jname = "points satisfying condition J") pickoption(what="option", key, keymap, \dots, exact=FALSE, list.on.err=TRUE, die=TRUE, multi=FALSE, allow.all=TRUE) plotEachLayer(x, \dots, main, plotargs, add, show.all, do.plot) ploterodewin(W1, W2, col.edge, col.inside, do.plot, \dots) ploterodeimage(W, Z, \dots, Wcol, rangeZ, colsZ, do.plot) plot3Dpoints(xyz, eye, org, \dots, type, xlim, ylim, zlim, add, box, main, cex, box.back, box.front) plotPolygonBdry(x, \dots) \method{plot}{addvar}(x, \dots, do.points=FALSE) \method{plot}{barplotdata}(x, \dots) \method{plot}{bw.frac}(x, \dots) \method{plot}{bw.optim}(x, \dots, showopt, optargs) \method{plot}{lurk}(x, \dots, shade) \method{plot}{minconfit}(x, \dots) \method{plot}{parres}(x, \dots) \method{plot}{pppmatching}(x, addmatch = NULL, main = NULL, \dots) \method{plot}{plotpairsim}(x, \dots) \method{plot}{profilepl}(x, \dots, add, main, tag, coeff, xvariable, col, lty, lwd, col.opt, lty.opt, lwd.opt) \method{plot}{qqppm}(x, \dots, limits=TRUE, monochrome=spatstat.options('monochrome'), limcol=if(monochrome) "black" else "red") \method{plot}{spatialcdf}(x, \dots, xlab, ylab) \method{plot}{studpermutest}(x, fmla, \dots, lty = NULL, col = NULL, lwd = NULL, lty.theo = NULL, col.theo = NULL, lwd.theo = NULL, lwd.mean = if (meanonly) 1 else NULL, lty.mean = lty, col.mean = col, separately = FALSE, meanonly = FALSE, main = if (meanonly) "group means" else NULL, xlim = NULL, ylim = NULL, ylab = NULL, legend = !add, legendpos = "topleft", lbox = FALSE, add = FALSE) ppllengine(X, Y, action="project", check=FALSE) \method{ppm}{default}(Q, trend, interaction, \dots, covariates, data, covfunargs, subset, clipwin, correction, rbord, use.gam, method, forcefit, emend, project, prior.mean, prior.var, nd, eps, gcontrol, nsim, nrmh, start, control, verb, callstring) ppmCovariates(model) ppmDerivatives(fit, what, Dcovfun, loc, covfunargs) ppmInfluenceEngine(fit, what, \dots, iScore, iHessian, iArgs, drop, method, precomputed, sparseOK, fitname, multitypeOK, entrywise, matrix.action, geomsmooth) pppdist.mat(X, Y, cutoff = 1, q = 1, matching = TRUE, precision = 9, approximation = 10) pppdist.prohorov(X, Y, n, dfix, type, cutoff, matching, ccode, auction, precision, approximation) ppsubset(X, I, Iname, fatal) \method{predict}{vblogit}(object, newdata, type, se.fit, dispersion, terms, na.action, \dots) prefixfv(x, tagprefix, descprefix, lablprefix, whichtags) prepareTitle(main) \method{print}{addvar}(x, \dots) \method{print}{anylist}(x, \dots) \method{print}{autoexec}(x, \dots) \method{print}{bt.frame}(x, \dots) \method{print}{bugtable}(x, \dots) \method{print}{bw.frac}(x, \dots) \method{print}{bw.optim}(x, \dots) \method{print}{colourmap}(x, \dots) \method{print}{diagppm}(x, \dots) \method{print}{distfun}(x, \dots) \method{print}{detpointprocfamily}(x, \dots) \method{print}{detpointprocfamilyfun}(x, \dots) \method{print}{envelope}(x, \dots) \method{print}{ewcdf}(x, digits, \dots) \method{print}{fasp}(x, \dots) \method{print}{funxy}(x, \dots) \method{print}{fv}(x, \dots, tight) \method{print}{fvfun}(x, \dots) \method{print}{hasenvelope}(x, \dots) \method{print}{hierarchicalordering}(x, \dots) \method{print}{hyperframe}(x, \dots) \method{print}{indicfun}(x, \dots) \method{print}{influence.ppm}(x, \dots) \method{print}{interact}(x, \dots, family, brief, banner) \method{print}{intermaker}(x, \dots) \method{print}{isf}(x, \dots) \method{print}{laslett}(x, \dots) \method{print}{layered}(x, \dots) \method{print}{leverage.ppm}(x, \dots) \method{print}{lintess}(x, \dots) \method{print}{lut}(x, \dots) \method{print}{minconfit}(x, \dots) \method{print}{mppm}(x, \dots) \method{print}{msr}(x, \dots) \method{print}{nnfun}(x, \dots) \method{print}{numberwithunit}(x, \dots) \method{print}{onearrow}(x, \dots) \method{print}{parres}(x, \dots) \method{print}{plotppm}(x, \dots) \method{print}{plotpairsim}(x, \dots) \method{print}{pppmatching}(x, \dots) \method{print}{profilepl}(x, \dots) \method{print}{quadrattest}(x, \dots) \method{print}{qqppm}(x, \dots) \method{print}{rat}(x, \dots) \method{print}{rmhcontrol}(x, \dots) \method{print}{rmhexpand}(x, \dots, prefix=TRUE) \method{print}{rmhmodel}(x, \dots) \method{print}{rmhstart}(x, \dots) \method{print}{rmhInfoList}(x, \dots) \method{print}{rppm}(x, \dots) \method{print}{simplepanel}(x, \dots) \method{print}{Smoothfun}(x, \dots) \method{print}{solist}(x, \dots) \method{print}{splitppp}(x, \dots) \method{print}{splitppx}(x, \dots) \method{print}{summary.hyperframe}(x, \dots) \method{print}{summary.linim}(x, \dots) \method{print}{summary.linnet}(x, \dots) \method{print}{summary.lintess}(x, \dots) \method{print}{summary.listof}(x, \dots) \method{print}{summary.logiquad}(x, \dots, dp=3) \method{print}{summary.lut}(x, \dots) \method{print}{summary.mppm}(x, \dots, brief) \method{print}{summary.owin}(x, \dots) \method{print}{summary.ppp}(x, \dots, dp) \method{print}{summary.psp}(x, \dots) \method{print}{summary.rmhexpand}(x, \dots) \method{print}{summary.splitppp}(x, \dots) \method{print}{summary.solist}(x, \dots) \method{print}{summary.splitppx}(x, \dots) \method{print}{summary.units}(x, \dots) \method{print}{symbolmap}(x, \dots) \method{print}{textstring}(x, \dots) \method{print}{texturemap}(x, \dots) \method{print}{tess}(x, \dots, brief=FALSE) \method{print}{timed}(x, \dots) \method{print}{vblogit}(x, \dots) \method{print}{yardstick}(x, \dots) project3Dhom(xyz, eye, org, vert) putlastshift(X, vec) quad(data, dummy, w, param) quad.mppm(x) quadBlockSizes(nX, nD, p, nMAX, announce) RandomFieldsSafe() ratfv(df, numer, denom, \dots, ratio) recognise.spatstat.type(x) rectquadrat.breaks(xr, yr, nx = 5, ny = nx, xbreaks = NULL, ybreaks = NULL) rectquadrat.countEngine(x, y, xbreaks, ybreaks, weights) reduceformula(fmla, deletevar, verbose) reheat(model, invtemp) RelevantDeviation(x, alternative, clamp, scaling) repair.image.xycoords(x) replacementIndex(ii, stuff) representativeRows(x) resolveEinfo(x, what, fallback, warn, atomic) resolve.vargamma.shape(\dots, nu.ker, nu.pcf, default = FALSE) rgbNA(red, green, blue, alpha, maxColorValue) rhohatEngine(model, covariate, reference, volume, \dots, subset, weights, method, horvitz, smoother, resolution, n, bw, adjust, from, to, bwref, covname, covunits, confidence, modelcall, callstring) rhohatCalc(ZX, Zvalues, lambda, denom, \dots, weights, lambdaX, method, horvitz, smoother, n, bw, adjust, from, to, bwref, covname, confidence, covunits, modelcall, callstring, savestuff) rMaternInhibition(type, kappa, r, win, stationary, \dots, nsim, drop) RmhExpandRule(nama) rocData(covariate, nullmodel, \dots, high) rocModel(lambda, nullmodel, \dots, high) rmhsnoop(\dots, Wsim, Wclip, R, xcoords, ycoords, mlevels, mcodes, irep, itype, proptype, proplocn, propmark, propindx, numerator, denominator) roseContinuous(ang, rad, unit, \dots, start, clockwise, main, labels, at, do.plot) ruletextline(ch, n, terse) quadrat.testEngine(X, nx, ny, alternative, method, conditional, CR, \dots, nsim, Xcount, xbreaks, ybreaks, tess, fit, Xname, fitname) quadscheme.replicated(data, dummy, method, \dots) quadscheme.spatial(data, dummy, method, \dots) pointgrid(W, ngrid) rastersample(X, Y) rasterx.mask(w, drop) rastery.mask(w, drop) rasterxy.mask(w, drop) rasterx.im(x) rastery.im(x) rasterxy.im(x, drop) rebadge.fv(x, new.ylab, new.fname, tags, new.desc, new.labl, new.yexp, new.dotnames, new.preferred, new.formula, new.tags) rebadge.as.crossfun(x, main, sub, i, j) rebadge.as.dotfun(x, main, sub, i) rebound(x, rect) \method{rebound}{im}(x, rect) \method{rebound}{ppp}(x, rect) \method{rebound}{psp}(x, rect) \method{rebound}{owin}(x, rect) reconcile.fv(\dots) rename.fv(x, fname, ylab, yexp) repair.old.factor.image(x) reincarnate.interact(object) resid4plot(RES, plot.neg, plot.smooth, spacing, outer, srange, monochrome, main, xlab, ylab, rlab, col.neg, col.smooth, \dots) resid1plot(RES, opt, plot.neg, plot.smooth, srange, monochrome, main, add, show.all, do.plot, col.neg, col.smooth, \dots) resid1panel(observedX, observedV, theoreticalX, theoreticalV, theoreticalSD, xlab,ylab, \dots, do.plot) resolve.2D.kernel(\dots, sigma, varcov, x, mindist, adjust, bwfun, allow.zero) restrict.mask(M, W) reversePolyclipArgs(x, p) rmax.Rigid(X, g) rmax.rule(fun, W, lambda) rotxy(X, angle = pi/2) rotxypolygon(p, angle = pi/2) rmhResolveControl(control, model) rmhResolveExpansion(win, control, imagelist, itype) rmhResolveTypes(model, start, control) rmhSnoopEnv(Xinit, Wclip, R) \method{rmhcontrol}{rmhcontrol}(\dots) \method{rmhcontrol}{list}(\dots) rmhEngine(InfoList, \dots, verbose, kitchensink, preponly, snoop, overrideXstart, overrideclip) \method{rmhmodel}{rmhmodel}(model, \dots) \method{rmhstart}{rmhstart}(start, \dots) \method{rmhstart}{list}(start, \dots) rmpoint.I.allim(n, f, types) \method{row.names}{hyperframe}(x) \method{row.names}{hyperframe}(x) <- value rpoint.multi(n, f, fmax, marks, win, giveup, verbose, warn, nsim, drop) runifpoispp(lambda, win, \dots, nsim, drop) runifpoisppOnLines(lambda, L, nsim) runifrect(n, win, nsim, drop) safedeldir(X) safelookup(Z, x, factor, warn) \method{scalardilate}{breakpts}(X, f, \dots) \method{scalardilate}{diagramobj}(X, f, \dots) \method{scalardilate}{msr}(X, f, \dots) scanmeasure(X, \dots) \method{scanmeasure}{ppp}(X, r, \dots, method) \method{scanmeasure}{im}(X, r, \dots) scanPoisLRTS(nZ, nG, muZ, muG, alternative) scanBinomLRTS(nZ, nG, muZ, muG, alternative) second.moment.calc(x, sigma, edge, what, \dots, varcov=NULL, expand=FALSE, debug=FALSE) second.moment.engine(x, sigma, edge, what, \dots, kernel, obswin, varcov, npts, debug) sewpcf(d, w, denargs, lambda2area, divisor) sewsmod(d, ff, wt, Ef, rvals, method="smrep", \dots, nwtsteps=500) \method{shift}{diagramobj}(X, \dots) \method{shift}{influence.ppm}(X, \dots) \method{shift}{leverage.ppm}(X, \dots) \method{shift}{msr}(X, \dots) \method{shift}{quadratcount}(X, \dots) \method{shift}{quadrattest}(X, \dots) shiftxy(X, vec = c(0, 0)) shiftxypolygon(p, vec = c(0, 0)) \method{simulate}{profilepl}(object, \dots) simulrecipe(type, expr, envir, csr, pois, constraints) slr.prepare(CallInfo, envir, data, dataAtPoints, splitby, clip) slrAssemblePixelData(Y, Yname, W, covimages, dataAtPoints, pixelarea) \method{Smooth}{solist}(X, \dots) smoothcrossEngine(Xdata, Xquery, values, sigma, \dots, weights, varcov, sorted) smoothpointsEngine(x, values, sigma, \dots, weights, varcov, leaveoneout, sorted, cutoff) \method{sort}{im}(x, \dots) sortalongsegment(df) spatstat.xy.coords(x, y) spatstatClusterModelInfo(name, onlyPCP) spatstatDPPModelInfo(model) spatstatRmhInfo(cifname) spatialCDFframe(model, covariate, \dots) spatialCDFtest(model, covariate, test, \dots, dimyx, eps, interpolate, jitter, nsim, verbose, modelname, covname, dataname) sphere.volume(range, nval = 10) splitHybridInteraction(coeffs, inte) sp.foundclass(cname, inlist, formalname, argsgiven) sp.foundclasses(cnames, inlist, formalname, argsgiven) store.versionstring.spatstat() \method{str}{hyperframe}(object, \dots) strausscounts(U,X,r,EqualPairs) suffloc(object) suffstat.generic(model, X, callstring) suffstat.poisson(model, X, callstring) summarise.trend(trend, w, a) \method{summary}{envelope}(object,\dots) \method{summary}{funxy}(object,\dots) \method{summary}{hyperframe}(object, \dots, brief=FALSE) \method{summary}{lintess}(object, \dots) \method{summary}{logiquad}(object, \dots, checkdup=FALSE) \method{summary}{lut}(object, \dots) \method{summary}{mppm}(object, \dots, brief=FALSE) \method{summary}{profilepl}(object, \dots) \method{summary}{pppmatching}(object, \dots) \method{summary}{ppx}(object, \dots) \method{summary}{rmhexpand}(object, \dots) \method{summary}{splitppx}(object, \dots) \method{summary}{vblogit}(object, \dots) sumsymouter(x, w) superimposeMarks(arglist, nobj) symbolmaptype(x) \method{tail}{hyperframe}(x,n,\dots) tensor1x1(A,B) test.crossing.psp(A,B) test.selfcrossing.psp(A) thinjump(n, p) tilecentroids(W, nx, ny) trianglediameters(iedge, jedge, edgelength, \dots, nvert, dmax, check) trim.mask(M, R, tolerant) tweak.fv.entry(x, current.tag, new.labl, new.desc, new.tag) tweak.ratfv.entry(x, \dots) tweak.coefs(model, new.coef) twostage.test(X, \dots, exponent, nsim, nsimsub, alternative, reuse, leaveout, interpolate, savefuns, savepatterns, verbose, testblurb) x \%unit\% u \method{unitname}{default}(x) \method{unitname}{default}(x) <- value \method{unstack}{solist}(x, \dots) \method{unstack}{layered}(x, \dots) unstackFilter(x) \method{update}{im}(object, \dots) \method{update}{ippm}(object, \dots, envir) \method{update}{rmhstart}(object, \dots) validradius(r, caller) validate2Dkernel(kernel, fatal) validate.angles(angles, unit, guess) validate.lpp.coords(X, fatal, context) validate.mask(w, fatal=TRUE) validate.quad(Q, fatal, repair, announce) validate.weights(x, recip, how, allowzero, allowinf) vanilla.fv(x) varcountEngine(g, B, lambdaB, f) %vblogit(y, X, offset, eps, m0, S0, S0i, xi0, verb, maxiter, \dots) %vblogit.fmla(formula, offset, data, subset, weights, verbose, epsilon, \dots) versionstring.interact(object) versionstring.ppm(object) versionstring.spatstat() verifyclass(X, C, N = deparse(substitute(X)), fatal = TRUE) vnnFind(seg, tp, ns, nv, from, to, seglen, huge, tol, kmax) \method{Window}{lintess}(X, \dots) \method{Window}{linnet}(X, \dots, check=TRUE) <- value \method{Window}{lpp}(X, \dots, check=TRUE) <- value warn.once(key, \dots) waxlyrical(type, terse) windows.mppm(x) w.quad(Q) x.quad(Q) y.quad(Q) xy.grid(xr, yr, nx, ny, dx, dy) X2testEngine(OBS, EXP, \dots, method, CR, df, nsim, conditional, alternative, testname, dataname) \method{xtfrm}{im}(x) xypolyselfint(p, eps, proper, yesorno, checkinternal) xypolygon2psp(p, w, check) %%% sparse 3D arrays sparse3Darray(i,j,k,x,dims,dimnames,strict,nonzero) as.sparse3Darray(x, \dots) \method{dim}{sparse3Darray}(x) \method{dim}{sparse3Darray}(x) <- value \method{dimnames}{sparse3Darray}(x) \method{dimnames}{sparse3Darray}(x) <- value \method{print}{sparse3Darray}(x, \dots) \method{aperm}{sparse3Darray}(a, perm, resize, \dots) \method{as.array}{sparse3Darray}(x, \dots) \method{[}{sparse3Darray}(x, i, j, k, drop, \dots) \method{[}{sparse3Darray}(x, i, j, k, \dots) <- value \method{anyNA}{sparse3Darray}(x, recursive) RelevantZero(x) RelevantEmpty(x) isRelevantZero(x) unionOfSparseIndices(A,B) \special{Math(x, \dots)} \special{Ops(e1, e2)} \special{Summary(\dots, na.rm = FALSE)} %NAMESPACE S3method("Math", "sparse3Darray") %NAMESPACE S3method("Ops", "sparse3Darray") %NAMESPACE S3method("Summary", "sparse3Darray") inside3Darray(d, i, j, k) SparseEntries(x) SparseIndices(x) EntriesToSparse(df, dims) mapSparseEntries(x, margin, values, conform, across) applySparseEntries(x, f, \dots) sumsymouterSparse(x, w, dbg) tenseur(A, B, alongA, alongB) marginSums(X, MARGIN) rbindCompatibleDataFrames(x) bind.sparse3Darray(A, B, along) %% spatstatDiagnostic(msg) %% as.ppplist(x, check) as.imlist(x, check) pointsAlongNetwork(L, delta) expandSparse(x, n, across) allElementsIdentical(x, entry) resampleNetworkDataFrame(df, template) sparseVectorCumul(x, i, length) %% \method{as.ppm}{lppm}(object) \method{as.ppm}{rppm}(object) \method{predict}{profilepl}(object, \dots) %%%%%%% } \details{ These internal \pkg{spatstat} functions are not usually called directly by the user. Their names and capabilities may change without warning from one version of \pkg{spatstat} to the next. } \keyword{internal} spatstat/man/methods.lpp.Rd0000644000176200001440000000473213160710621015417 0ustar liggesusers\name{methods.lpp} \alias{methods.lpp} %DoNotExport \Rdversion{1.1} \alias{as.ppp.lpp} \alias{as.psp.lpp} \alias{marks<-.lpp} \alias{nsegments.lpp} \alias{print.lpp} \alias{print.summary.lpp} \alias{summary.lpp} \alias{unitname.lpp} \alias{unitname<-.lpp} \alias{unmark.lpp} \title{ Methods for Point Patterns on a Linear Network } \description{ These are methods specifically for the class \code{"lpp"} of point patterns on linear networks. } \usage{ \method{as.ppp}{lpp}(X, ..., fatal=TRUE) \method{as.psp}{lpp}(x, ..., fatal=TRUE) \method{marks}{lpp}(x, ...) <- value \method{nsegments}{lpp}(x) \method{print}{lpp}(x, ...) \method{print}{summary.lpp}(x, ...) \method{summary}{lpp}(object, ...) \method{unitname}{lpp}(x) \method{unitname}{lpp}(x) <- value \method{unmark}{lpp}(X) } \arguments{ \item{x,X,object}{ An object of class \code{"lpp"} representing a point pattern on a linear network. } \item{\dots}{ Arguments passed to other methods. } \item{value}{ Replacement value for the \code{marks} or \code{unitname} of \code{x}. See Details. } \item{fatal}{ Logical value indicating whether data in the wrong format should lead to an error (\code{fatal=TRUE}) or a warning (\code{fatal=FALSE}). } } \details{ These are methods for the generic functions \code{\link{as.ppp}}, \code{\link{as.psp}}, \code{\link{marks<-}}, \code{\link{nsegments}}, \code{\link{print}}, \code{\link{summary}}, \code{\link{unitname}}, \code{\link{unitname<-}} and \code{\link{unmark}} for objects of the class \code{"lpp"}. For \code{"marks<-.lpp"} the replacement \code{value} should be either \code{NULL}, or a vector of length equal to the number of points in \code{x}, or a data frame with one row for each point in \code{x}. For \code{"unitname<-.lpp"} the replacement \code{value} should be a valid name for the unit of length, as described in \code{\link{unitname}}. } \section{Other methods}{ An object of class \code{"lpp"} also inherits the class \code{"ppx"} for which many other methods are available. See \code{\link[spatstat:methods.ppx]{methods.ppx}}. } \value{ See the documentation on the corresponding generic function. } \author{ \adrian } \seealso{ \code{\link{lpp}}, \code{\link{intensity.lpp}}, \code{\link[spatstat:methods.ppx]{methods.ppx}} } \examples{ X <- runiflpp(10, simplenet) X as.ppp(X) summary(X) unitname(X) <- c("furlong", "furlongs") } \keyword{spatial} \keyword{methods} spatstat/man/solutionset.Rd0000644000176200001440000000523313160710621015547 0ustar liggesusers\name{solutionset} \alias{solutionset} \title{Evaluate Logical Expression Involving Pixel Images and Return Region Where Expression is True} \description{ Given a logical expression involving one or more pixel images, find all pixels where the expression is true, and assemble these pixels into a window. } \usage{ solutionset(\dots, envir) } \arguments{ \item{\dots}{An expression in the \R language, involving one or more pixel images.} \item{envir}{Optional. The environment in which to evaluate the expression.} } \details{ Given a logical expression involving one or more pixel images, this function will find all pixels where the expression is true, and assemble these pixels into a spatial window. Pixel images in \code{spatstat} are represented by objects of class \code{"im"} (see \code{\link{im.object}}). These are essentially matrices of pixel values, with extra attributes recording the pixel dimensions, etc. Suppose \code{X} is a pixel image. Then \code{solutionset(abs(X) > 3)} will find all the pixels in \code{X} for which the pixel value is greater than 3 in absolute value, and return a window containing all these pixels. If \code{X} and \code{Y} are two pixel images, \code{solutionset(X > Y)} will find all pixels for which the pixel value of \code{X} is greater than the corresponding pixel value of \code{Y}, and return a window containing these pixels. In general, \code{\dots} can be any logical expression involving pixel images. The code first tries to evaluate the expression using \code{\link{eval.im}}. This is successful if the expression involves only (a) the \emph{names} of pixel images, (b) scalar constants, and (c) functions which are vectorised. There must be at least one pixel image in the expression. The expression \code{expr} must be vectorised. See the Examples. If this is unsuccessful, the code then tries to evaluate the expression using pixel arithmetic. This is successful if all the arithmetic operations in the expression are listed in \code{\link{Math.im}}. } \value{ A spatial window (object of class \code{"owin"}, see \code{\link{owin.object}}). } \seealso{ \code{\link{im.object}}, \code{\link{owin.object}}, \code{\link{eval.im}}, \code{\link{levelset}} } \examples{ # test images X <- as.im(function(x,y) { x^2 - y^2 }, unit.square()) Y <- as.im(function(x,y) { 3 * x + y - 1}, unit.square()) W <- solutionset(abs(X) > 0.1) W <- solutionset(X > Y) W <- solutionset(X + Y >= 1) area(solutionset(X < Y)) solutionset(density(cells) > 20) } \author{\adrian and \rolf } \keyword{spatial} \keyword{programming} \keyword{manip} spatstat/man/expand.owin.Rd0000644000176200001440000000232313160710621015406 0ustar liggesusers\name{expand.owin} \alias{expand.owin} \title{Apply Expansion Rule} \description{ Applies an expansion rule to a window. } \usage{ expand.owin(W, \dots) } \arguments{ \item{W}{A window.} \item{\dots}{ Arguments passed to \code{\link{rmhexpand}} to determine an expansion rule. } } \value{ A window (object of class \code{"owin"}). } \details{ The argument \code{W} should be a window (an object of class \code{"owin"}). This command applies the expansion rule specified by the arguments \code{\dots} to the window \code{W}, yielding another window. The arguments \code{\dots} are passed to \code{\link{rmhexpand}} to determine the expansion rule. For other transformations of the scale, location and orientation of a window, see \code{\link{shift}}, \code{\link{affine}} and \code{\link{rotate}}. } \seealso{ \code{\link{rmhexpand}} about expansion rules. \code{\link{shift}}, \code{\link{rotate}}, \code{\link{affine}} for other types of manipulation. } \examples{ expand.owin(square(1), 9) expand.owin(square(1), distance=0.5) expand.owin(letterR, length=2) expand.owin(letterR, distance=0.1) } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/clusterfit.Rd0000644000176200001440000001160013160710571015342 0ustar liggesusers\name{clusterfit} \alias{clusterfit} \title{Fit Cluster or Cox Point Process Model via Minimum Contrast} \description{ Fit a homogeneous or inhomogeneous cluster process or Cox point process model to a point pattern by the Method of Minimum Contrast. } \usage{ clusterfit(X, clusters, lambda = NULL, startpar = NULL, q = 1/4, p = 2, rmin = NULL, rmax = NULL, \dots, statistic = NULL, statargs = NULL, algorithm="Nelder-Mead") } \arguments{ \item{X}{ Data to which the cluster or Cox model will be fitted. Either a point pattern or a summary statistic. See Details. } \item{clusters}{ Character string determining the cluster or Cox model. Partially matched. Options are \code{"Thomas"}, \code{"MatClust"}, \code{"Cauchy"}, \code{"VarGamma"} and \code{"LGCP"}. } \item{lambda}{ Optional. An estimate of the intensity of the point process. Either a single numeric specifying a constant intensity, a pixel image (object of class \code{"im"}) giving the intensity values at all locations, a fitted point process model (object of class \code{"ppm"} or \code{"kppm"}) or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. } \item{startpar}{ Vector of initial values of the parameters of the point process mode. If \code{X} is a point pattern sensible defaults are used. Otherwise rather arbitrary values are used. } \item{q,p}{ Optional. Exponents for the contrast criterion. } \item{rmin, rmax}{ Optional. The interval of \eqn{r} values for the contrast criterion. } \item{\dots}{ Additional arguments passed to \code{\link{mincontrast}.} } \item{statistic}{ Optional. Name of the summary statistic to be used for minimum contrast estimation: either \code{"K"} or \code{"pcf"}. } \item{statargs}{ Optional list of arguments to be used when calculating the \code{statistic}. See Details. } \item{algorithm}{ Character string determining the mathematical optimisation algorithm to be used by \code{\link[stats]{optim}}. See the argument \code{method} of \code{\link[stats]{optim}}. } } \details{ This function fits the clustering parameters of a cluster or Cox point process model by the Method of Minimum Contrast, that is, by matching the theoretical \eqn{K}-function of the model to the empirical \eqn{K}-function of the data, as explained in \code{\link{mincontrast}}. If \code{statistic="pcf"} (or \code{X} appears to be an estimated pair correlation function) then instead of using the \eqn{K}-function, the algorithm will use the pair correlation function. If \code{X} is a point pattern of class \code{"ppp"} an estimate of the summary statistic specfied by \code{statistic} (defaults to \code{"K"}) is first computed before minimum contrast estimation is carried out as described above. In this case the argument \code{statargs} can be used for controlling the summary statistic estimation. The precise algorithm for computing the summary statistic depends on whether the intensity specification (\code{lambda}) is: \describe{ \item{homogeneous:}{ If \code{lambda} is \code{NUll} or a single numeric the pattern is considered homogeneous and either \code{\link{Kest}} or \code{\link{pcf}} is invoked. In this case \code{lambda} is \bold{not} used for anything when estimating the summary statistic. } \item{inhomogeneous:}{ If \code{lambda} is a pixel image (object of class \code{"im"}), a fitted point process model (object of class \code{"ppm"} or \code{"kppm"}) or a \code{function(x,y)} the pattern is considered inhomogeneous. In this case either \code{\link{Kinhom}} or \code{\link{pcfinhom}} is invoked with \code{lambda} as an argument. } } After the clustering parameters of the model have been estimated by minimum contrast \code{lambda} (if non-null) is used to compute the additional model parameter \eqn{\mu}{\mu}. } \value{ An object of class \code{"minconfit"}. There are methods for printing and plotting this object. See \code{\link{mincontrast}}. } \references{ Diggle, P.J. and Gratton, R.J. (1984) Monte Carlo methods of inference for implicit statistical models. \emph{Journal of the Royal Statistical Society, series B} \bold{46}, 193 -- 212. \ifelse{latex}{\out{M\o ller}}{Moller}, J. and Waagepetersen, R. (2003). Statistical Inference and Simulation for Spatial Point Processes. Chapman and Hall/CRC, Boca Raton. Waagepetersen, R. (2007). An estimating function approach to inference for inhomogeneous Neyman-Scott processes. \emph{Biometrics} \bold{63} (2007) 252--258. } \author{\adrian , \rolf and \ege } \seealso{ \code{\link{kppm}} } \examples{ fit <- clusterfit(redwood, "Thomas") fit if(interactive()){ plot(fit) } } \keyword{spatial} \keyword{models} spatstat/man/discpartarea.Rd0000644000176200001440000000373713160710571015634 0ustar liggesusers\name{discpartarea} \Rdversion{1.1} \alias{discpartarea} \title{ Area of Part of Disc } \description{ Compute area of intersection between a disc and a window } \usage{ discpartarea(X, r, W=as.owin(X)) } \arguments{ \item{X}{ Point pattern (object of class \code{"ppp"}) specifying the centres of the discs. Alternatively, \code{X} may be in any format acceptable to \code{\link{as.ppp}}. } \item{r}{ Matrix, vector or numeric value specifying the radii of the discs. } \item{W}{ Window (object of class \code{"owin"}) with which the discs should be intersected. } } \details{ This algorithm computes the exact area of the intersection between a window \code{W} and a disc (or each of several discs). The centres of the discs are specified by the point pattern \code{X}, and their radii are specified by \code{r}. If \code{r} is a single numeric value, then the algorithm computes the area of intersection between \code{W} and the disc of radius \code{r} centred at each point of \code{X}, and returns a one-column matrix containing one entry for each point of \code{X}. If \code{r} is a vector of length \code{m}, then the algorithm returns an \code{n * m} matrix in which the entry on row \code{i}, column \code{j} is the area of the intersection between \code{W} and the disc centred at \code{X[i]} with radius \code{r[j]}. If \code{r} is a matrix, it should have one row for each point in \code{X}. The algorithm returns a matrix in which the entry on row \code{i}, column \code{j} is the area of the intersection between \code{W} and the disc centred at \code{X[i]} with radius \code{r[i,j]}. Areas are computed by analytic geometry. } \value{ Numeric matrix, with one row for each point of \code{X}. } \seealso{ \code{\link{owin}}, \code{\link{disc}} } \examples{ data(letterR) X <- runifpoint(3, letterR) discpartarea(X, 0.2) } \author{\adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/linearpcf.Rd0000644000176200001440000000625513160710621015127 0ustar liggesusers\name{linearpcf} \alias{linearpcf} \title{ Linear Pair Correlation Function } \description{ Computes an estimate of the linear pair correlation function for a point pattern on a linear network. } \usage{ linearpcf(X, r=NULL, ..., correction="Ang", ratio=FALSE) } \arguments{ \item{X}{ Point pattern on linear network (object of class \code{"lpp"}). } \item{r}{ Optional. Numeric vector of values of the function argument \eqn{r}. There is a sensible default. } \item{\dots}{ Arguments passed to \code{\link{density.default}} to control the smoothing. } \item{correction}{ Geometry correction. Either \code{"none"} or \code{"Ang"}. See Details. } \item{ratio}{ Logical. If \code{TRUE}, the numerator and denominator of each estimate will also be saved, for use in analysing replicated point patterns. } } \details{ This command computes the linear pair correlation function from point pattern data on a linear network. The pair correlation function is estimated from the shortest-path distances between each pair of data points, using the fixed-bandwidth kernel smoother \code{\link{density.default}}, with a bias correction at each end of the interval of \eqn{r} values. To switch off the bias correction, set \code{endcorrect=FALSE}. The bandwidth for smoothing the pairwise distances is determined by arguments \code{\dots} passed to \code{\link{density.default}}, mainly the arguments \code{bw} and \code{adjust}. The default is to choose the bandwidth by Silverman's rule of thumb \code{bw="nrd0"} explained in \code{\link{density.default}}. If \code{correction="none"}, the calculations do not include any correction for the geometry of the linear network. The result is an estimate of the first derivative of the network \eqn{K} function defined by Okabe and Yamada (2001). If \code{correction="Ang"}, the pair counts are weighted using Ang's correction (Ang, 2010). The result is an estimate of the pair correlation function in the linear network. } \value{ Function value table (object of class \code{"fv"}). If \code{ratio=TRUE} then the return value also has two attributes called \code{"numerator"} and \code{"denominator"} which are \code{"fv"} objects containing the numerators and denominators of each estimate of \eqn{g(r)}. } \author{ Ang Qi Wei \email{aqw07398@hotmail.com} and \adrian. } \references{ Ang, Q.W. (2010) Statistical methodology for spatial point patterns on a linear network. MSc thesis, University of Western Australia. Ang, Q.W., Baddeley, A. and Nair, G. (2012) Geometrically corrected second-order analysis of events on a linear network, with applications to ecology and criminology. \emph{Scandinavian Journal of Statistics} \bold{39}, 591--617. Okabe, A. and Yamada, I. (2001) The K-function method on a network and its computational implementation. \emph{Geographical Analysis} \bold{33}, 271-290. } \seealso{ \code{\link{linearK}}, \code{\link{linearpcfinhom}}, \code{\link{lpp}} } \examples{ data(simplenet) X <- rpoislpp(5, simplenet) linearpcf(X) linearpcf(X, correction="none") } \keyword{spatial} \keyword{nonparametric} spatstat/man/relrisk.ppp.Rd0000644000176200001440000002154613160710621015435 0ustar liggesusers\name{relrisk.ppp} \alias{relrisk.ppp} \title{ Nonparametric Estimate of Spatially-Varying Relative Risk } \description{ Given a multitype point pattern, this function estimates the spatially-varying probability of each type of point, or the ratios of such probabilities, using kernel smoothing. The default smoothing bandwidth is selected by cross-validation. } \usage{ \method{relrisk}{ppp}(X, sigma = NULL, ..., varcov = NULL, at = "pixels", relative=FALSE, se=FALSE, casecontrol=TRUE, control=1, case) } \arguments{ \item{X}{ A multitype point pattern (object of class \code{"ppp"} which has factor valued marks). } \item{sigma}{ Optional. The numeric value of the smoothing bandwidth (the standard deviation of isotropic Gaussian smoothing kernel). Alternatively \code{sigma} may be a function which can be used to select a different bandwidth for each type of point. See Details. } \item{\dots}{ Arguments passed to \code{\link{bw.relrisk}} to select the bandwidth, or passed to \code{\link{density.ppp}} to control the pixel resolution. } \item{varcov}{ Optional. Variance-covariance matrix of anisotopic Gaussian smoothing kernel. Incompatible with \code{sigma}. } \item{at}{ String specifying whether to compute the probability values at a grid of pixel locations (\code{at="pixels"}) or only at the points of \code{X} (\code{at="points"}). } \item{relative}{ Logical. If \code{FALSE} (the default) the algorithm computes the probabilities of each type of point. If \code{TRUE}, it computes the \emph{relative risk}, the ratio of probabilities of each type relative to the probability of a control. } \item{se}{ Logical value indicating whether to compute standard errors as well. } \item{casecontrol}{ Logical. Whether to treat a bivariate point pattern as consisting of cases and controls, and return only the probability or relative risk of a case. Ignored if there are more than 2 types of points. See Details. } \item{control}{ Integer, or character string, identifying which mark value corresponds to a control. } \item{case}{ Integer, or character string, identifying which mark value corresponds to a case (rather than a control) in a bivariate point pattern. This is an alternative to the argument \code{control} in a bivariate point pattern. Ignored if there are more than 2 types of points. } } \details{ The command \code{\link{relrisk}} is generic and can be used to estimate relative risk in different ways. This function \code{relrisk.ppp} is the method for point pattern datasets. It computes \emph{nonparametric} estimates of relative risk by kernel smoothing. If \code{X} is a bivariate point pattern (a multitype point pattern consisting of two types of points) then by default, the points of the first type (the first level of \code{marks(X)}) are treated as controls or non-events, and points of the second type are treated as cases or events. Then by default this command computes the spatially-varying \emph{probability} of a case, i.e. the probability \eqn{p(u)} that a point at spatial location \eqn{u} will be a case. If \code{relative=TRUE}, it computes the spatially-varying \emph{relative risk} of a case relative to a control, \eqn{r(u) = p(u)/(1- p(u))}. If \code{X} is a multitype point pattern with \eqn{m > 2} types, or if \code{X} is a bivariate point pattern and \code{casecontrol=FALSE}, then by default this command computes, for each type \eqn{j}, a nonparametric estimate of the spatially-varying \emph{probability} of an event of type \eqn{j}. This is the probability \eqn{p_j(u)}{p[j](u)} that a point at spatial location \eqn{u} will belong to type \eqn{j}. If \code{relative=TRUE}, the command computes the \emph{relative risk} of an event of type \eqn{j} relative to a control, \eqn{r_j(u) = p_j(u)/p_k(u)}{r[j](u) = p[j](u)/p[k](u)}, where events of type \eqn{k} are treated as controls. The argument \code{control} determines which type \eqn{k} is treated as a control. If \code{at = "pixels"} the calculation is performed for every spatial location \eqn{u} on a fine pixel grid, and the result is a pixel image representing the function \eqn{p(u)} or a list of pixel images representing the functions \eqn{p_j(u)}{p[j](u)} or \eqn{r_j(u)}{r[j](u)} for \eqn{j = 1,\ldots,m}{j = 1,...,m}. An infinite value of relative risk (arising because the probability of a control is zero) will be returned as \code{NA}. If \code{at = "points"} the calculation is performed only at the data points \eqn{x_i}{x[i]}. By default the result is a vector of values \eqn{p(x_i)}{p(x[i])} giving the estimated probability of a case at each data point, or a matrix of values \eqn{p_j(x_i)}{p[j](x[i])} giving the estimated probability of each possible type \eqn{j} at each data point. If \code{relative=TRUE} then the relative risks \eqn{r(x_i)}{r(x[i])} or \eqn{r_j(x_i)}{r[j](x[i])} are returned. An infinite value of relative risk (arising because the probability of a control is zero) will be returned as \code{Inf}. Estimation is performed by a simple Nadaraja-Watson type kernel smoother (Diggle, 2003). The smoothing bandwidth can be specified in any of the following ways: \itemize{ \item \code{sigma} is a single numeric value, giving the standard deviation of the isotropic Gaussian kernel. \item \code{sigma} is a numeric vector of length 2, giving the standard deviations in the \eqn{x} and \eqn{y} directions of a Gaussian kernel. \item \code{varcov} is a 2 by 2 matrix giving the variance-covariance matrix of the Gaussian kernel. \item \code{sigma} is a \code{function} which selects the bandwidth. Bandwidth selection will be applied \bold{separately to each type of point}. An example of such a function is \code{\link{bw.diggle}}. \item \code{sigma} and \code{varcov} are both missing or null. Then a \bold{common} smoothing bandwidth \code{sigma} will be selected by cross-validation using \code{\link{bw.relrisk}}. } If \code{se=TRUE} then standard errors will also be computed, based on asymptotic theory, \emph{assuming a Poisson process}. } \value{ If \code{se=FALSE} (the default), the format is described below. If \code{se=TRUE}, the result is a list of two entries, \code{estimate} and \code{SE}, each having the format described below. If \code{X} consists of only two types of points, and if \code{casecontrol=TRUE}, the result is a pixel image (if \code{at="pixels"}) or a vector (if \code{at="points"}). The pixel values or vector values are the probabilities of a case if \code{relative=FALSE}, or the relative risk of a case (probability of a case divided by the probability of a control) if \code{relative=TRUE}. If \code{X} consists of more than two types of points, or if \code{casecontrol=FALSE}, the result is: \itemize{ \item (if \code{at="pixels"}) a list of pixel images, with one image for each possible type of point. The result also belongs to the class \code{"solist"} so that it can be printed and plotted. \item (if \code{at="points"}) a matrix of probabilities, with rows corresponding to data points \eqn{x_i}{x[i]}, and columns corresponding to types \eqn{j}. } The pixel values or matrix entries are the probabilities of each type of point if \code{relative=FALSE}, or the relative risk of each type (probability of each type divided by the probability of a control) if \code{relative=TRUE}. If \code{relative=FALSE}, the resulting values always lie between 0 and 1. If \code{relative=TRUE}, the results are either non-negative numbers, or the values \code{Inf} or \code{NA}. } \seealso{ There is another method \code{\link{relrisk.ppm}} for point process models which computes \emph{parametric} estimates of relative risk, using the fitted model. See also \code{\link{bw.relrisk}}, \code{\link{density.ppp}}, \code{\link{Smooth.ppp}}, \code{\link{eval.im}} } \examples{ p.oak <- relrisk(urkiola, 20) if(interactive()) { plot(p.oak, main="proportion of oak") plot(eval.im(p.oak > 0.3), main="More than 30 percent oak") plot(split(lansing), main="Lansing Woods") p.lan <- relrisk(lansing, 0.05, se=TRUE) plot(p.lan$estimate, main="Lansing Woods species probability") plot(p.lan$SE, main="Lansing Woods standard error") wh <- im.apply(p.lan$estimate, which.max) types <- levels(marks(lansing)) wh <- eval.im(types[wh]) plot(wh, main="Most common species") } } \references{ Diggle, P.J. (2003) \emph{Statistical analysis of spatial point patterns}, Second edition. Arnold. } \author{\adrian and \rolf } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat/man/measureVariation.Rd0000644000176200001440000000416713160710621016502 0ustar liggesusers\name{measureVariation} \alias{measureVariation} \alias{measurePositive} \alias{measureNegative} \alias{totalVariation} \title{ Positive and Negative Parts, and Variation, of a Measure } \description{ Given a measure \code{A} (object of class \code{"msr"}) these functions find the positive part, negative part and variation of \code{A}. } \usage{ measurePositive(x) measureNegative(x) measureVariation(x) totalVariation(x) } \arguments{ \item{x}{ A measure (object of class \code{"msr"}). } } \details{ The functions \code{measurePositive} and \code{measureNegative} return the positive and negative parts of the measure, and \code{measureVariation} returns the variation (sum of positive and negative parts). The function \code{totalVariation} returns the total variation norm. If \eqn{\mu} is a signed measure, it can be represented as \deqn{\mu = \mu_{+} - \mu_{-}}{\mu = \mu[+] - \mu[-]} where \eqn{\mu_{+}}{\mu[+]} and \eqn{\mu_{-}}{\mu[-]} are \emph{nonnegative} measures called the positive and negative parts of \eqn{\mu}. In a nutshell, the positive part of \eqn{\mu} consists of all positive contributions or increments, and the negative part consists of all negative contributions multiplied by \code{-1}. The variation \eqn{|\mu|} is defined by \deqn{\mu = \mu_{+} + \mu_{-}}{\mu = \mu[+] + \mu[-]} and is also a nonnegative measure. The total variation norm is the integral of the variation. } \value{ The result of \code{measurePositive}, \code{measureNegative} and \code{measureVariation} is another measure (object of class \code{"msr"}) on the same spatial domain. The result of \code{totalVariation} is a non-negative number. } \references{ Halmos, P.R. (1950) \emph{Measure Theory}. Van Nostrand. } \author{ \adrian. } \seealso{ \code{\link{msr}}, \code{\link{with.msr}}, \code{\link{split.msr}} } \examples{ X <- rpoispp(function(x,y) { exp(3+3*x) }) fit <- ppm(X, ~x+y) rp <- residuals(fit, type="pearson") measurePositive(rp) measureNegative(rp) measureVariation(rp) # total variation norm totalVariation(rp) } \keyword{spatial} \keyword{math} spatstat/man/diameter.owin.Rd0000644000176200001440000000206413160710571015727 0ustar liggesusers\name{diameter.owin} \alias{diameter.owin} \title{Diameter of a Window} \description{ Computes the diameter of a window. } \usage{ \method{diameter}{owin}(x) } \arguments{ \item{x}{ A window whose diameter will be computed. } } \value{ The numerical value of the diameter of the window. } \details{ This function computes the diameter of a window of arbitrary shape, i.e. the maximum distance between any two points in the window. The argument \code{x} should be a window (an object of class \code{"owin"}, see \code{\link{owin.object}} for details) or can be given in any format acceptable to \code{\link{as.owin}()}. The function \code{diameter} is generic. This function is the method for the class \code{"owin"}. } \seealso{ \code{\link{area.owin}}, \code{\link{perimeter}}, \code{\link{edges}}, \code{\link{owin}}, \code{\link{as.owin}} } \examples{ w <- owin(c(0,1),c(0,1)) diameter(w) # returns sqrt(2) data(letterR) diameter(letterR) } \author{\adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/print.ppp.Rd0000644000176200001440000000154713160710621015115 0ustar liggesusers\name{print.ppp} \alias{print.ppp} \title{Print Brief Details of a Point Pattern Dataset} \description{ Prints a very brief description of a point pattern dataset. } \usage{ \method{print}{ppp}(x, \dots) } \arguments{ \item{x}{Point pattern (object of class \code{"ppp"}).} \item{\dots}{Ignored.} } \details{ A very brief description of the point pattern \code{x} is printed. This is a method for the generic function \code{\link{print}}. } \seealso{ \code{\link{print}}, \code{\link{print.owin}}, \code{\link{summary.ppp}} } \examples{ data(cells) # plain vanilla point pattern cells data(lansing) # multitype point pattern lansing data(longleaf) # numeric marks longleaf data(demopat) # weird polygonal window demopat } \author{\adrian and \rolf } \keyword{spatial} \keyword{print} spatstat/man/corners.Rd0000644000176200001440000000173113160710571014635 0ustar liggesusers\name{corners} \alias{corners} \title{Corners of a rectangle} \description{ Returns the four corners of a rectangle } \usage{ corners(window) } \arguments{ \item{window}{A window. An object of class \code{\link{owin}}, or data in any format acceptable to \code{\link{as.owin}()}. } } \value{ A list with two components \code{x} and \code{y}, which are numeric vectors of length 4 giving the coordinates of the four corner points of the (bounding rectangle of the) window. } \details{ This trivial function is occasionally convenient. If \code{window} is of type \code{"rectangle"} this returns the four corners of the window itself; otherwise, it returns the corners of the bounding rectangle of the window. } \seealso{ \code{\link{quad.object}}, \code{\link{quadscheme}} } \examples{ w <- unit.square() corners(w) # returns list(x=c(0,1,0,1),y=c(0,0,1,1)) } \author{\adrian and \rolf } \keyword{spatial} \keyword{utilities} spatstat/man/plot.solist.Rd0000644000176200001440000002025313160710621015450 0ustar liggesusers\name{plot.solist} \alias{plot.solist} \title{Plot a List of Spatial Objects} \description{ Plots a list of two-dimensional spatial objects. } \usage{ \method{plot}{solist}(x, \dots, main, arrange=TRUE, nrows=NULL, ncols=NULL, main.panel=NULL, mar.panel=c(2,1,1,2), hsep=0, vsep=0, panel.begin=NULL, panel.end=NULL, panel.args=NULL, panel.begin.args=NULL, panel.end.args=NULL, plotcommand="plot", adorn.left=NULL, adorn.right=NULL, adorn.top=NULL, adorn.bottom=NULL, adorn.size=0.2, equal.scales=FALSE, halign=FALSE, valign=FALSE) } \arguments{ \item{x}{ An object of the class \code{"solist"}, essentially a list of two-dimensional spatial datasets. } \item{\dots}{ Arguments passed to \code{\link{plot}} when generating each plot panel. } \item{main}{ Overall heading for the plot. } \item{arrange}{ Logical flag indicating whether to plot the objects side-by-side on a single page (\code{arrange=TRUE}) or plot them individually in a succession of frames (\code{arrange=FALSE}). } \item{nrows,ncols}{ Optional. The number of rows/columns in the plot layout (assuming \code{arrange=TRUE}). You can specify either or both of these numbers. } \item{main.panel}{ Optional. A character string, or a vector of character strings, giving the headings for each of the objects. } \item{mar.panel}{ Size of the margins outside each plot panel. A numeric vector of length 4 giving the bottom, left, top, and right margins in that order. (Alternatively the vector may have length 1 or 2 and will be replicated to length 4). See the section on \emph{Spacing between plots}. } \item{hsep,vsep}{ Additional horizontal and vertical separation between plot panels, expressed in the same units as \code{mar.panel}. } \item{panel.begin,panel.end}{ Optional. Functions that will be executed before and after each panel is plotted. See Details. } \item{panel.args}{ Optional. Function that determines different plot arguments for different panels. See Details. } \item{panel.begin.args}{ Optional. List of additional arguments for \code{panel.begin} when it is a function. } \item{panel.end.args}{ Optional. List of additional arguments for \code{panel.end} when it is a function. } \item{plotcommand}{ Optional. Character string containing the name of the command that should be executed to plot each panel. } \item{adorn.left,adorn.right,adorn.top,adorn.bottom}{ Optional. Functions (with no arguments) that will be executed to generate additional plots at the margins (left, right, top and/or bottom, respectively) of the array of plots. } \item{adorn.size}{ Relative width (as a fraction of the other panels' widths) of the margin plots. } \item{equal.scales}{ Logical value indicating whether the components should be plotted at (approximately) the same physical scale. } \item{halign,valign}{ Logical values indicating whether panels in a column should be aligned to the same \eqn{x} coordinate system (\code{halign=TRUE}) and whether panels in a row should be aligned to the same \eqn{y} coordinate system (\code{valign=TRUE}). These are applicable only if \code{equal.scales=TRUE}. } } \value{ Null. } \details{ This is the \code{plot} method for the class \code{"solist"}. An object of class \code{"solist"} represents a list of two-dimensional spatial datasets. This is the \code{plot} method for such objects. In the \pkg{spatstat} package, various functions produce an object of class \code{"solist"}. These objects can be plotted in a nice arrangement using \code{plot.solist}. See the Examples. The argument \code{panel.args} determines extra graphics parameters for each panel. It should be a function that will be called as \code{panel.args(i)} where \code{i} is the panel number. Its return value should be a list of graphics parameters that can be passed to the relevant \code{plot} method. These parameters override any parameters specified in the \code{\dots} arguments. The arguments \code{panel.begin} and \code{panel.end} determine graphics that will be plotted before and after each panel is plotted. They may be objects of some class that can be plotted with the generic \code{plot} command. Alternatively they may be functions that will be called as \code{panel.begin(i, y, main=main.panel[i])} and \code{panel.end(i, y, add=TRUE)} where \code{i} is the panel number and \code{y = x[[i]]}. If all entries of \code{x} are pixel images, the function \code{\link{image.listof}} is called to control the plotting. The arguments \code{equal.ribbon} and \code{col} can be used to determine the colour map or maps applied. If \code{equal.scales=FALSE} (the default), then the plot panels will have equal height on the plot device (unless there is only one column of panels, in which case they will have equal width on the plot device). This means that the objects are plotted at different physical scales, by default. If \code{equal.scales=TRUE}, then the dimensions of the plot panels on the plot device will be proportional to the spatial dimensions of the corresponding components of \code{x}. This means that the objects will be plotted at \emph{approximately} equal physical scales. If these objects have very different spatial sizes, the plot command could fail (when it tries to plot the smaller objects at a tiny scale), with an error message that the figure margins are too large. The objects will be plotted at \emph{exactly} equal physical scales, and \emph{exactly} aligned on the device, under the following conditions: \itemize{ \item every component of \code{x} is a spatial object whose position can be shifted by \code{\link{shift}}; \item \code{panel.begin} and \code{panel.end} are either \code{NULL} or they are spatial objects whose position can be shifted by \code{\link{shift}}; \item \code{adorn.left}, \code{adorn.right}, \code{adorn.top} and \code{adorn.bottom} are all \code{NULL}. } Another special case is when every component of \code{x} is an object of class \code{"fv"} representing a function. If \code{equal.scales=TRUE} then all these functions will be plotted with the same axis scales (i.e. with the same \code{xlim} and the same \code{ylim}). } \section{Spacing between plots}{ The spacing between individual plots is controlled by the parameters \code{mar.panel}, \code{hsep} and \code{vsep}. If \code{equal.scales=FALSE}, the plot panels are logically separate plots. The margins for each panel are determined by the argument \code{mar.panel} which becomes the graphics parameter \code{mar} described in the help file for \code{\link{par}}. One unit of \code{mar} corresponds to one line of text in the margin. If \code{hsep} or \code{vsep} are present, \code{mar.panel} is augmented by \code{c(vsep, hsep, vsep, hsep)/2}. If \code{equal.scales=TRUE}, all the plot panels are drawn in the same coordinate system which represents a physical scale. The unit of measurement for \code{mar.panel[1,3]} is one-sixth of the greatest height of any object plotted in the same row of panels, and the unit for \code{mar.panel[2,4]} is one-sixth of the greatest width of any object plotted in the same column of panels. If \code{hsep} or \code{vsep} are present, they are interpreted in the same units as \code{mar.panel[2]} and \code{mar.panel[1]} respectively. } \seealso{ \code{\link{plot.anylist}}, \code{\link{contour.listof}}, \code{\link{image.listof}}, \code{\link{density.splitppp}} } \section{Error messages}{ If the error message \sQuote{Figure margins too large} occurs, this generally means that one of the objects had a much smaller physical scale than the others. Ensure that \code{equal.scales=FALSE} and increase the values of \code{mar.panel}. } \examples{ # Intensity estimate of multitype point pattern plot(D <- density(split(amacrine))) plot(D, main="", equal.ribbon=TRUE, panel.end=function(i,y,...){contour(y, ...)}) } \author{\adrian \rolf and \ege } \keyword{spatial} \keyword{hplot} spatstat/man/lineardisc.Rd0000644000176200001440000000643013160710621015274 0ustar liggesusers\name{lineardisc} \alias{lineardisc} \alias{countends} \title{ Compute Disc of Given Radius in Linear Network } \description{ Computes the \sQuote{disc} of given radius and centre in a linear network. } \usage{ lineardisc(L, x = locator(1), r, plotit = TRUE, cols=c("blue", "red","green")) countends(L, x = locator(1), r, toler=NULL) } \arguments{ \item{L}{ Linear network (object of class \code{"linnet"}). } \item{x}{ Location of centre of disc. Either a point pattern (object of class \code{"ppp"}) containing exactly 1 point, or a numeric vector of length 2. } \item{r}{ Radius of disc. } \item{plotit}{ Logical. Whether to plot the disc. } \item{cols}{ Colours for plotting the disc. A numeric or character vector of length 3 specifying the colours of the disc centre, disc lines and disc endpoints respectively. } \item{toler}{ Optional. Distance threshold for \code{countends}. See Details. There is a sensible default. } } \details{ The \sQuote{disc} \eqn{B(u,r)} of centre \eqn{x} and radius \eqn{r} in a linear network \eqn{L} is the set of all points \eqn{u} in \eqn{L} such that the shortest path distance from \eqn{x} to \eqn{u} is less than or equal to \eqn{r}. This is a union of line segments contained in \eqn{L}. The \emph{relative boundary} of the disc \eqn{B(u,r)} is the set of points \eqn{v} such that the shortest path distance from \eqn{x} to \eqn{u} is \emph{equal} to \eqn{r}. The function \code{lineardisc} computes the disc of radius \eqn{r} and its relative boundary, optionally plots them, and returns them. The faster function \code{countends} simply counts the number of points in the relative boundary. The optional threshold \code{toler} is used to suppress numerical errors in \code{countends}. If the distance from \eqn{u} to a network vertex \eqn{v} is between \code{r-toler} and \code{r+toler}, the vertex will be treated as lying on the relative boundary. } \value{ The value of \code{lineardisc} is a list with two entries: \item{lines }{Line segment pattern (object of class \code{"psp"}) representing the interior disc} \item{endpoints}{Point pattern (object of class \code{"ppp"}) representing the relative boundary of the disc. } The value of \code{countends} is an integer giving the number of points in the relative boundary. } \author{ Ang Qi Wei \email{aqw07398@hotmail.com} and \adrian } \seealso{ \code{\link{linnet}} } \references{ Ang, Q.W. (2010) \emph{Statistical methodology for events on a network}. Master's thesis, School of Mathematics and Statistics, University of Western Australia. Ang, Q.W., Baddeley, A. and Nair, G. (2012) Geometrically corrected second-order analysis of events on a linear network, with applications to ecology and criminology. \emph{Scandinavian Journal of Statistics} \bold{39}, 591--617. } \examples{ # letter 'A' v <- ppp(x=(-2):2, y=3*c(0,1,2,1,0), c(-3,3), c(-1,7)) edg <- cbind(1:4, 2:5) edg <- rbind(edg, c(2,4)) letterA <- linnet(v, edges=edg) lineardisc(letterA, c(0,3), 1.6) # count the endpoints countends(letterA, c(0,3), 1.6) # cross-check (slower) en <- lineardisc(letterA, c(0,3), 1.6, plotit=FALSE)$endpoints npoints(en) } \keyword{spatial} spatstat/man/Ldot.Rd0000644000176200001440000000510513160710571014063 0ustar liggesusers\name{Ldot} \alias{Ldot} \title{Multitype L-function (i-to-any)} \description{ Calculates an estimate of the multitype L-function (from type \code{i} to any type) for a multitype point pattern. } \usage{ Ldot(X, i, ..., from) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the dot-type \eqn{L} function \eqn{L_{ij}(r)}{Lij(r)} will be computed. It must be a multitype point pattern (a marked point pattern whose marks are a factor). See under Details. } \item{i}{The type (mark value) of the points in \code{X} from which distances are measured. A character string (or something that will be converted to a character string). Defaults to the first level of \code{marks(X)}. } \item{\dots}{ Arguments passed to \code{\link{Kdot}}. } \item{from}{An alternative way to specify \code{i}.} } \details{ This command computes \deqn{L_{i\bullet}(r) = \sqrt{\frac{K_{i\bullet}(r)}{\pi}}}{Li.(r) = sqrt(Ki.(r)/pi)} where \eqn{K_{i\bullet}(r)}{Ki.(r)} is the multitype \eqn{K}-function from points of type \code{i} to points of any type. See \code{\link{Kdot}} for information about \eqn{K_{i\bullet}(r)}{Ki.(r)}. The command \code{Ldot} first calls \code{\link{Kdot}} to compute the estimate of the \code{i}-to-any \eqn{K}-function, and then applies the square root transformation. For a marked Poisson point process, the theoretical value of the L-function is \eqn{L_{i\bullet}(r) = r}{Li.(r) = r}. The square root also has the effect of stabilising the variance of the estimator, so that \eqn{L_{i\bullet}}{Li.} is more appropriate for use in simulation envelopes and hypothesis tests. } \value{ An object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. Essentially a data frame containing columns \item{r}{the vector of values of the argument \eqn{r} at which the function \eqn{L_{i\bullet}}{Li.} has been estimated } \item{theo}{the theoretical value \eqn{L_{i\bullet}(r) = r}{Li.(r) = r} for a stationary Poisson process } together with columns named \code{"border"}, \code{"bord.modif"}, \code{"iso"} and/or \code{"trans"}, according to the selected edge corrections. These columns contain estimates of the function \eqn{L_{i\bullet}}{Li.} obtained by the edge corrections named. } \seealso{ \code{\link{Kdot}}, \code{\link{Lcross}}, \code{\link{Lest}} } \examples{ data(amacrine) L <- Ldot(amacrine, "off") plot(L) } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat/man/is.convex.Rd0000644000176200001440000000201213160710621015063 0ustar liggesusers\name{is.convex} \alias{is.convex} \title{Test Whether a Window is Convex} \description{ Determines whether a window is convex. } \usage{ is.convex(x) } \arguments{ \item{x}{ Window (object of class \code{"owin"}). } } \value{ Logical value, equal to \code{TRUE} if \code{x} is convex. } \details{ If \code{x} is a rectangle, the result is TRUE. If \code{x} is polygonal, the result is TRUE if \code{x} consists of a single polygon and this polygon is equal to the minimal convex hull of its vertices computed by \code{\link[grDevices]{chull}}. If \code{x} is a mask, the algorithm first extracts all boundary pixels of \code{x} using \code{\link{vertices}}. Then it computes the (polygonal) convex hull \eqn{K} of the boundary pixels. The result is TRUE if every boundary pixel lies within one pixel diameter of an edge of \eqn{K}. } \seealso{ \code{\link{owin}}, \code{\link{convexhull.xy}}, \code{\link{vertices}} } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/DiggleGatesStibbard.Rd0000644000176200001440000000504413160710571017015 0ustar liggesusers\name{DiggleGatesStibbard} \alias{DiggleGatesStibbard} \title{Diggle-Gates-Stibbard Point Process Model} \description{ Creates an instance of the Diggle-Gates-Stibbard point process model which can then be fitted to point pattern data. } \usage{ DiggleGatesStibbard(rho) } \arguments{ \item{rho}{Interaction range} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of the Diggle-Gates-Stibbard process with interaction range \code{rho}. } \details{ Diggle, Gates and Stibbard (1987) proposed a pairwise interaction point process in which each pair of points separated by a distance \eqn{d} contributes a factor \eqn{e(d)} to the probability density, where \deqn{ e(d) = \sin^2\left(\frac{\pi d}{2\rho}\right) }{ e(d) = sin^2((\pi * d)/(2 * \rho)) } for \eqn{d < \rho}{d < \rho}, and \eqn{e(d)} is equal to 1 for \eqn{d \ge \rho}{d \ge \rho}. The function \code{\link{ppm}()}, which fits point process models to point pattern data, requires an argument of class \code{"interact"} describing the interpoint interaction structure of the model to be fitted. The appropriate description of the Diggle-Gates-Stibbard pairwise interaction is yielded by the function \code{DiggleGatesStibbard()}. See the examples below. Note that this model does not have any regular parameters (as explained in the section on Interaction Parameters in the help file for \code{\link{ppm}}). The parameter \eqn{\rho} is not estimated by \code{\link{ppm}}. } \seealso{ \code{\link{ppm}}, \code{\link{pairwise.family}}, \code{\link{DiggleGratton}}, \code{\link{rDGS}}, \code{\link{ppm.object}} } \references{ Baddeley, A. and Turner, R. (2000) Practical maximum pseudolikelihood for spatial point patterns. \emph{Australian and New Zealand Journal of Statistics} \bold{42}, 283--322. Ripley, B.D. (1981) \emph{Spatial statistics}. John Wiley and Sons. Diggle, P.J., Gates, D.J., and Stibbard, A. (1987) A nonparametric estimator for pairwise-interaction point processes. Biometrika \bold{74}, 763 -- 770. \emph{Scandinavian Journal of Statistics} \bold{21}, 359--373. } \examples{ DiggleGatesStibbard(0.02) # prints a sensible description of itself \dontrun{ ppm(cells ~1, DiggleGatesStibbard(0.05)) # fit the stationary D-G-S process to `cells' } ppm(cells ~ polynom(x,y,3), DiggleGatesStibbard(0.05)) # fit a nonstationary D-G-S process # with log-cubic polynomial trend } \author{ \spatstatAuthors } \keyword{spatial} \keyword{models} spatstat/man/crossdist.pp3.Rd0000644000176200001440000000406513160710571015703 0ustar liggesusers\name{crossdist.pp3} \alias{crossdist.pp3} \title{Pairwise distances between two different three-dimensional point patterns} \description{ Computes the distances between pairs of points taken from two different three-dimensional point patterns. } \usage{ \method{crossdist}{pp3}(X, Y, \dots, periodic=FALSE, squared=FALSE) } \arguments{ \item{X,Y}{ Point patterns in three dimensions (objects of class \code{"pp3"}). } \item{\dots}{ Ignored. } \item{periodic}{ Logical. Specifies whether to apply a periodic edge correction. } \item{squared}{ Logical. If \code{squared=TRUE}, the squared distances are returned instead (this computation is faster). } } \value{ A matrix whose \code{[i,j]} entry is the distance from the \code{i}-th point in \code{X} to the \code{j}-th point in \code{Y}. } \details{ Given two point patterns in three-dimensional space, this function computes the Euclidean distance from each point in the first pattern to each point in the second pattern, and returns a matrix containing these distances. This is a method for the generic function \code{\link{crossdist}} for three-dimensional point patterns (objects of class \code{"pp3"}). This function expects two point patterns \code{X} and \code{Y}, and returns the matrix whose \code{[i,j]} entry is the distance from \code{X[i]} to \code{Y[j]}. Alternatively if \code{periodic=TRUE}, then provided the windows containing \code{X} and \code{Y} are identical and are rectangular, then the distances will be computed in the `periodic' sense (also known as `torus' distance): opposite edges of the rectangle are regarded as equivalent. This is meaningless if the window is not a rectangle. } \seealso{ \code{\link{crossdist}}, \code{\link{pairdist}}, \code{\link{nndist}}, \code{\link{G3est}} } \examples{ X <- runifpoint3(20) Y <- runifpoint3(30) d <- crossdist(X, Y) d <- crossdist(X, Y, periodic=TRUE) } \author{ \adrian based on code for two dimensions by Pavel Grabarnik. } \keyword{spatial} \keyword{math} spatstat/man/regularpolygon.Rd0000644000176200001440000000350113160710621016224 0ustar liggesusers\name{regularpolygon} \alias{regularpolygon} \alias{hexagon} \title{ Create A Regular Polygon } \description{ Create a window object representing a regular (equal-sided) polygon. } \usage{ regularpolygon(n, edge = 1, centre = c(0, 0), \dots, align = c("bottom", "top", "left", "right", "no")) hexagon(edge = 1, centre = c(0,0), \dots, align = c("bottom", "top", "left", "right", "no")) } \arguments{ \item{n}{ Number of edges in the polygon. } \item{edge}{ Length of each edge in the polygon. A single positive number. } \item{centre}{ Coordinates of the centre of the polygon. A numeric vector of length 2, or a \code{list(x,y)} giving the coordinates of exactly one point, or a point pattern (object of class \code{"ppp"}) containing exactly one point. } \item{align}{ Character string specifying whether to align one of the edges with a vertical or horizontal boundary. } \item{\dots}{ Ignored. } } \details{ The function \code{regularpolygon} creates a regular (equal-sided) polygon with \code{n} sides, centred at \code{centre}, with sides of equal length \code{edge}. The function \code{hexagon} is the special case \code{n=6}. The orientation of the polygon is determined by the argument \code{align}. If \code{align="no"}, one vertex of the polygon is placed on the \eqn{x}-axis. Otherwise, an edge of the polygon is aligned with one side of the frame, specified by the value of \code{align}. } \value{ A window (object of class \code{"owin"}). } \author{ \spatstatAuthors. } \seealso{ \code{\link{disc}}, \code{\link{ellipse}}, \code{\link{owin}}. \code{\link{hextess}} for hexagonal tessellations. } \examples{ plot(hexagon()) plot(regularpolygon(7)) plot(regularpolygon(7, align="left")) } \keyword{spatial} \keyword{datagen} spatstat/man/logLik.dppm.Rd0000644000176200001440000000554513160710621015345 0ustar liggesusers\name{logLik.dppm} \alias{logLik.dppm} \alias{AIC.dppm} \alias{extractAIC.dppm} \alias{nobs.dppm} \title{Log Likelihood and AIC for Fitted Determinantal Point Process Model} \description{ Extracts the log Palm likelihood, deviance, and AIC of a fitted determinantal point process model. } \usage{ \method{logLik}{dppm}(object, ...) \method{AIC}{dppm}(object, \dots, k=2) \method{extractAIC}{dppm}(fit, scale=0, k=2, \dots) \method{nobs}{dppm}(object, ...) } \arguments{ \item{object,fit}{Fitted point process model. An object of class \code{"dppm"}. } \item{\dots}{Ignored.} \item{scale}{Ignored.} \item{k}{Numeric value specifying the weight of the equivalent degrees of freedom in the AIC. See Details. } } \details{ These functions are methods for the generic commands \code{\link{logLik}}, \code{\link{extractAIC}} and \code{\link{nobs}} for the class \code{"dppm"}. An object of class \code{"dppm"} represents a fitted Cox or cluster point process model. It is obtained from the model-fitting function \code{\link{dppm}}. These methods apply only when the model was fitted by maximising the Palm likelihood (Tanaka et al, 2008) by calling \code{\link{dppm}} with the argument \code{method="palm"}. The method \code{logLik.dppm} computes the maximised value of the log Palm likelihood for the fitted model \code{object}. The methods \code{AIC.dppm} and \code{extractAIC.dppm} compute the Akaike Information Criterion AIC for the fitted model based on the Palm likelihood (Tanaka et al, 2008) \deqn{ AIC = -2 \log(PL) + k \times \mbox{edf} }{ AIC = -2 * log(PL) + k * edf } where \eqn{PL} is the maximised Palm likelihood of the fitted model, and \eqn{\mbox{edf}}{edf} is the effective degrees of freedom of the model. The method \code{nobs.dppm} returns the number of points in the original data point pattern to which the model was fitted. The \R function \code{\link{step}} uses these methods, but it does not work for determinantal models yet due to a missing implementation of \code{update.dppm}. } \value{ \code{logLik} returns a numerical value, belonging to the class \code{"logLik"}, with an attribute \code{"df"} giving the degrees of freedom. \code{AIC} returns a numerical value. \code{extractAIC} returns a numeric vector of length 2 containing the degrees of freedom and the AIC value. \code{nobs} returns an integer value. } \references{ Tanaka, U. and Ogata, Y. and Stoyan, D. (2008) Parameter estimation and model selection for Neyman-Scott point processes. \emph{Biometrical Journal} \bold{50}, 43--57. } \seealso{ \code{\link{dppm}}, \code{\link{logLik.ppm}} } \author{\adrian \rolf and \ege } \examples{ fit <- dppm(swedishpines ~ x, dppGauss(), method="palm") nobs(fit) logLik(fit) extractAIC(fit) AIC(fit) } \keyword{spatial} \keyword{models} spatstat/man/bw.smoothppp.Rd0000644000176200001440000000562013160710571015623 0ustar liggesusers\name{bw.smoothppp} \alias{bw.smoothppp} \title{ Cross Validated Bandwidth Selection for Spatial Smoothing } \description{ Uses least-squares cross-validation to select a smoothing bandwidth for spatial smoothing of marks. } \usage{ bw.smoothppp(X, nh = spatstat.options("n.bandwidth"), hmin=NULL, hmax=NULL, warn=TRUE) } \arguments{ \item{X}{ A marked point pattern with numeric marks. } \item{nh}{ Number of trial values of smoothing bandwith \code{sigma} to consider. The default is 32. } \item{hmin, hmax}{ Optional. Numeric values. Range of trial values of smoothing bandwith \code{sigma} to consider. There is a sensible default. } \item{warn}{ Logical. If \code{TRUE}, issue a warning if the minimum of the cross-validation criterion occurs at one of the ends of the search interval. } } \details{ This function selects an appropriate bandwidth for the nonparametric smoothing of mark values using \code{\link{Smooth.ppp}}. The argument \code{X} must be a marked point pattern with a vector or data frame of marks. All mark values must be numeric. The bandwidth is selected by least-squares cross-validation. Let \eqn{y_i}{y[i]} be the mark value at the \eqn{i}th data point. For a particular choice of smoothing bandwidth, let \eqn{\hat y_i}{y*[i]} be the smoothed value at the \eqn{i}th data point. Then the bandwidth is chosen to minimise the squared error of the smoothed values \eqn{\sum_i (y_i - \hat y_i)^2}{sum (y[i] - y*[i])^2}. The result of \code{bw.smoothppp} is a numerical value giving the selected bandwidth \code{sigma}. The result also belongs to the class \code{"bw.optim"} allowing it to be printed and plotted. The plot shows the cross-validation criterion as a function of bandwidth. The range of values for the smoothing bandwidth \code{sigma} is set by the arguments \code{hmin, hmax}. There is a sensible default, based on the nearest neighbour distances. If the optimal bandwidth is achieved at an endpoint of the interval \code{[hmin, hmax]}, the algorithm will issue a warning (unless \code{warn=FALSE}). If this occurs, then it is probably advisable to expand the interval by changing the arguments \code{hmin, hmax}. Computation time depends on the number \code{nh} of trial values considered, and also on the range \code{[hmin, hmax]} of values considered, because larger values of \code{sigma} require calculations involving more pairs of data points. } \value{ A numerical value giving the selected bandwidth. The result also belongs to the class \code{"bw.optim"} which can be plotted. } \seealso{ \code{\link{Smooth.ppp}} } \examples{ data(longleaf) \testonly{op <- spatstat.options(n.bandwidth=8)} b <- bw.smoothppp(longleaf) b plot(b) \testonly{spatstat.options(op)} } \author{\adrian and \rolf } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat/man/rlinegrid.Rd0000644000176200001440000000155713160710621015143 0ustar liggesusers\name{rlinegrid} \alias{rlinegrid} \title{Generate grid of parallel lines with random displacement} \description{ Generates a grid of parallel lines, equally spaced, inside the specified window. } \usage{ rlinegrid(angle = 45, spacing = 0.1, win = owin()) } \arguments{ \item{angle}{Common orientation of the lines, in degrees anticlockwise from the x axis. } \item{spacing}{Spacing between successive lines.} \item{win}{Window in which to generate the lines. An object of class \code{"owin"} or something acceptable to \code{\link{as.owin}}. } } \details{ The grid is randomly displaced from the origin. } \value{ A line segment pattern (object of class \code{"psp"}). } \seealso{ \code{\link{psp}}, \code{\link{rpoisline}} } \examples{ plot(rlinegrid(30, 0.05)) } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat/man/spatstat.options.Rd0000644000176200001440000003740413160710621016521 0ustar liggesusers\name{spatstat.options} \alias{spatstat.options} \alias{reset.spatstat.options} \title{Internal Options in Spatstat Package} \description{ Allows the user to examine and reset the values of global parameters which control actions in the \pkg{spatstat} package. } \usage{ spatstat.options(...) reset.spatstat.options() } \arguments{ \item{\dots}{ Either empty, or a succession of parameter names in quotes, or a succession of \code{name=value} pairs. See below for the parameter names. } } \value{ Either a list of parameters and their values, or a single value. See Details. } \details{ The function \code{spatstat.options} allows the user to examine and reset the values of global parameters which control actions in the \pkg{spatstat} package. It is analogous to the system function \code{\link[base]{options}}. The function \code{reset.spatstat.options} resets all the global parameters in \pkg{spatstat} to their original, default values. The global parameters of interest to the user are: \describe{ \item{checkpolygons}{ Logical flag indicating whether the functions \code{\link[spatstat]{owin}} and \code{\link[spatstat]{as.owin}} should apply very strict checks on the validity of polygon data. These strict checks are no longer necessary, and the default is \code{checkpolygons=FALSE}. See also \code{fixpolygons} below. } \item{checksegments}{ Logical flag indicating whether the functions \code{\link[spatstat]{psp}} and \code{\link[spatstat]{as.psp}} should check the validity of line segment data (in particular, checking that the endpoints of the line segments are inside the specified window). It is advisable to leave this flag set to \code{TRUE}. } \item{eroded.intensity}{ Logical flag affecting the behaviour of the score and pseudo-score residual functions \code{\link[spatstat]{Gcom}}, \code{\link[spatstat]{Gres}} \code{\link[spatstat]{Kcom}}, \code{\link[spatstat]{Kres}}, \code{\link[spatstat]{psstA}}, \code{\link[spatstat]{psstG}}, \code{\link[spatstat]{psst}}. The flag indicates whether to compute intensity estimates on an eroded window (\code{eroded.intensity=TRUE}) or on the original data window (\code{eroded.intensity=FALSE}, the default). } \item{expand}{ The default expansion factor (area inflation factor) for expansion of the simulation window in \code{\link[spatstat]{rmh}} (see \code{\link[spatstat]{rmhcontrol}}). Initialised to \code{2}. } \item{expand.polynom}{ Logical. Whether expressions involving \code{\link[spatstat]{polynom}} in a model formula should be expanded, so that \code{polynom(x,2)} is replaced by \code{x + I(x^2)} and so on. Initialised to \code{TRUE}. } \item{fastpois}{ Logical. Whether to use a fast algorithm (introduced in \pkg{spatstat 1.42-3}) for simulating the Poisson point process in \code{\link[spatstat]{rpoispp}} when the argument \code{lambda} is a pixel image. Initialised to \code{TRUE}. Should be set to \code{FALSE} if needed to guarantee repeatability of results computed using earlier versions of \pkg{spatstat}. } \item{fastthin}{ Logical. Whether to use a fast C language algorithm (introduced in \pkg{spatstat 1.42-3}) for random thinning in \code{\link[spatstat]{rthin}} when the argument \code{P} is a single number. Initialised to \code{TRUE}. Should be set to \code{FALSE} if needed to guarantee repeatability of results computed using earlier versions of \pkg{spatstat}. } \item{fastK.lgcp}{ Logical. Whether to use fast or slow algorithm to compute the (theoretical) \eqn{K}-function of a log-Gaussian Cox process for use in \code{\link[spatstat]{lgcp.estK}} or \code{\link[spatstat]{Kmodel}}. The slow algorithm uses accurate numerical integration; the fast algorithm uses Simpson's Rule for numerical integration, and is about two orders of magnitude faster. Initialised to \code{FALSE}. } \item{fixpolygons}{ Logical flag indicating whether the functions \code{\link[spatstat]{owin}} and \code{\link[spatstat]{as.owin}} should repair errors in polygon data. For example, self-intersecting polygons and overlapping polygons will be repaired. The default is \code{fixpolygons=TRUE}. } \item{fftw}{ Logical value indicating whether the two-dimensional Fast Fourier Transform should be computed using the package \pkg{fftwtools}, instead of the \code{fft} function in the \pkg{stats} package. This affects the speed of \code{\link[spatstat]{density.ppp}}, \code{\link[spatstat]{density.psp}}, \code{\link[spatstat]{blur}} \code{\link[spatstat]{setcov}} and \code{\link[spatstat]{Smooth.ppp}}. } \item{gpclib}{ Defunct. This parameter was used to permit or forbid the use of the package \pkg{gpclib}, because of its restricted software licence. This package is no longer needed. } \item{huge.npoints}{ The maximum value of \code{n} for which \code{runif(n)} will not generate an error (possible errors include failure to allocate sufficient memory, and integer overflow of \code{n}). An attempt to generate more than this number of random points triggers a warning from \code{\link[spatstat]{runifpoint}} and other functions. Defaults to \code{1e6}. } \item{image.colfun}{ Function determining the default colour map for \code{\link[spatstat]{plot.im}}. When called with one integer argument \code{n}, this function should return a character vector of length \code{n} specifying \code{n} different colours. } \item{Kcom.remove.zeroes}{ Logical value, determining whether the algorithm in \code{\link[spatstat]{Kcom}} and \code{\link[spatstat]{Kres}} removes or retains the contributions to the function from pairs of points that are identical. If these are retained then the function has a jump at \eqn{r=0}. Initialised to \code{TRUE}. } \item{maxedgewt}{ Edge correction weights will be trimmed so as not to exceed this value. This applies to the weights computed by \code{\link[spatstat]{edge.Trans}} or \code{\link[spatstat]{edge.Ripley}} and used in \code{\link[spatstat]{Kest}} and its relatives. } \item{maxmatrix}{ The maximum permitted size (rows times columns) of matrices generated by \pkg{spatstat}'s internal code. Used by \code{\link[spatstat]{ppm}} and \code{\link[spatstat]{predict.ppm}} (for example) to decide when to split a large calculation into blocks. Defaults to \code{2^24=16777216}. } \item{monochrome}{ Logical flag indicating whether graphics should be plotted in grey scale (\code{monochrome=TRUE}) or in colour (\code{monochrome=FALSE}, the default). } \item{n.bandwidth}{ Integer. Number of trial values of smoothing bandwidth to use for cross-validation in \code{\link[spatstat]{bw.relrisk}} and similar functions. } \item{ndummy.min}{ The minimum number of dummy points in a quadrature scheme created by \code{\link[spatstat]{default.dummy}}. Either an integer or a pair of integers giving the minimum number of dummy points in the \code{x} and \code{y} directions respectively. } \item{ngrid.disc}{ Number of points in the square grid used to compute a discrete approximation to the areas of discs in \code{\link[spatstat]{areaLoss}} and \code{\link[spatstat]{areaGain}} when exact calculation is not available. A single integer. } \item{npixel}{ Default number of pixels in a binary mask or pixel image. Either an integer, or a pair of integers, giving the number of pixels in the \code{x} and \code{y} directions respectively. } \item{nvoxel}{ Default number of voxels in a 3D image, typically for calculating the distance transform in \code{\link[spatstat]{F3est}}. Initialised to 4 megavoxels: \code{nvoxel = 2^22 = 4194304}. } \item{par.binary}{ List of arguments to be passed to the function \code{\link[graphics]{image}} when displaying a binary image mask (in \code{\link[spatstat]{plot.owin}} or \code{\link[spatstat]{plot.ppp}}). Typically used to reset the colours of foreground and background. } \item{par.contour}{ List of arguments controlling contour plots of pixel images by \code{\link[spatstat]{contour.im}}. } \item{par.fv}{ List of arguments controlling the plotting of functions by \code{\link[spatstat]{plot.fv}} and its relatives. } \item{par.persp}{ List of arguments to be passed to the function \code{\link[graphics]{persp}} when displaying a real-valued image, such as the fitted surfaces in \code{\link[spatstat]{plot.ppm}}. } \item{par.points}{ List of arguments controlling the plotting of point patterns by \code{\link[spatstat]{plot.ppp}}. } \item{par.pp3}{ List of arguments controlling the plotting of three-dimensional point patterns by \code{\link[spatstat]{plot.pp3}}. } \item{print.ppm.SE}{ Default rule used by \code{\link[spatstat]{print.ppm}} to decide whether to calculate and print standard errors of the estimated coefficients of the model. One of the strings \code{"always"}, \code{"never"} or \code{"poisson"} (the latter indicating that standard errors will be calculated only for Poisson models). The default is \code{"poisson"} because the calculation for non-Poisson models can take a long time. } \item{progress}{ Character string determining the style of progress reports printed by \code{\link[spatstat]{progressreport}}. Either \code{"tty"}, \code{"tk"} or \code{"txtbar"}. For explanation of these options, see \code{\link[spatstat]{progressreport}}. } \item{project.fast}{ Logical. If \code{TRUE}, the algorithm of \code{\link[spatstat]{project.ppm}} will be accelerated using a shorcut. Initialised to \code{FALSE}. } \item{psstA.ngrid}{ Single integer, controlling the accuracy of the discrete approximation of areas computed in the function \code{\link[spatstat]{psstA}}. The area of a disc is approximated by counting points on an \eqn{n \times n}{n * n} grid. Initialised to 32. } \item{psstA.nr}{ Single integer, determining the number of distances \eqn{r} at which the function \code{\link[spatstat]{psstA}} will be evaluated (in the default case where argument \code{r} is absent). Initialised to 30. } \item{psstG.remove.zeroes}{ Logical value, determining whether the algorithm in \code{\link[spatstat]{psstG}} removes or retains the contributions to the function from pairs of points that are identical. If these are retained then the function has a jump at \eqn{r=0}. Initialised to \code{TRUE}. } \item{rmh.p, rmh.q, rmh.nrep}{ New default values for the parameters \code{p}, \code{q} and \code{nrep} in the Metropolis-Hastings simulation algorithm. These override the defaults in \code{\link[spatstat]{rmhcontrol.default}}. } \item{scalable}{ Logical flag indicating whether the new code in \code{rmh.default} which makes the results scalable (invariant to change of units) should be used. In order to recover former behaviour (so that previous results can be reproduced) set this option equal to \code{FALSE}. See the \dQuote{Warning} section in the help for \code{\link[spatstat]{rmh}()} for more detail. } \item{terse}{ Integer between 0 and 4. The level of terseness (brevity) in printed output from many functions in \pkg{spatstat}. Higher values mean shorter output. A rough guide is the following: \tabular{ll}{ 0 \tab Full output\cr 1 \tab Avoid wasteful output \cr 2 \tab Remove space between paragraphs\cr 3 \tab Suppress extras such as standard errors \cr 4 \tab Compress text, suppress internal warnings } The value of \code{terse} is initialised to 0. } \item{transparent}{ Logical value indicating whether default colour maps are allowed to include semi-transparent colours, where possible. Default is \code{TRUE}. Currently this only affects \code{\link[spatstat]{plot.ppp}}. } \item{units.paren}{ The kind of parenthesis which encloses the text that explains a \code{unitname}. This text is seen in the text output of functions like \code{\link[spatstat]{print.ppp}} and in the graphics generated by \code{\link[spatstat]{plot.fv}}. The value should be one of the character strings \code{'('}, \code{'['}, \code{'{'} or \code{''}. The default is \code{'('}. } } If no arguments are given, the current values of all parameters are returned, in a list. If one parameter name is given, the current value of this parameter is returned (\bold{not} in a list, just the value). If several parameter names are given, the current values of these parameters are returned, in a list. If \code{name=value} pairs are given, the named parameters are reset to the given values, and the \bold{previous} values of these parameters are returned, in a list. } \section{Internal parameters}{ The following parameters may also be specified to \code{spatstat.options} but are intended for software development or testing purposes. \describe{ \item{closepairs.newcode}{ Logical. Whether to use new version of the code for \code{\link[spatstat]{closepairs}}. Initialised to \code{TRUE}. } \item{crossing.psp.useCall}{ Logical. Whether to use new version of the code for \code{\link[spatstat]{crossing.psp}}. Initialised to \code{TRUE}. } \item{crosspairs.newcode}{ Logical. Whether to use new version of the code for \code{\link[spatstat]{crosspairs}}. Initialised to \code{TRUE}. } \item{densityC}{ Logical. Indicates whether to use accelerated C code (\code{densityC=TRUE}) or interpreted R code (\code{densityC=FALSE}) to evaluate \code{density.ppp(X, at="points")}. Initialised to \code{TRUE}. } \item{exactdt.checks.data}{ Logical. Do not change this value, unless you are \adrian. } \item{fasteval}{ One of the strings \code{'off'}, \code{'on'} or \code{'test'} determining whether to use accelerated C code to evaluate the conditional intensity of a Gibbs model. Initialised to \code{'on'}. } \item{old.morpho.psp}{ Logical. Whether to use old R code for morphological operations. Initialise to \code{FALSE}. } \item{selfcrossing.psp.useCall}{ Logical. Whether to use new version of the code for \code{\link[spatstat]{selfcrossing.psp}}. Initialised to \code{TRUE}. } \item{use.Krect}{ Logical. Whether to use new code for the K-function in a rectangular window. Initialised to \code{TRUE}. } } } \seealso{ \code{\link[base]{options}} } \examples{ # save current values oldopt <- spatstat.options() spatstat.options("npixel") spatstat.options(npixel=150) spatstat.options(npixel=c(100,200)) spatstat.options(par.binary=list(col=grey(c(0.5,1)))) spatstat.options(par.persp=list(theta=-30,phi=40,d=4)) # see help(persp.default) for other options # revert spatstat.options(oldopt) } \author{\adrian and \rolf } \keyword{spatial} spatstat/man/ppmInfluence.Rd0000644000176200001440000000544513160710621015611 0ustar liggesusers\name{ppmInfluence} \alias{ppmInfluence} \title{ Leverage and Influence Measures for Spatial Point Process Model } \description{ Calculates all the leverage and influence measures described in \code{\link{influence.ppm}}, \code{\link{leverage.ppm}} and \code{\link{dfbetas.ppm}}. } \usage{ ppmInfluence(fit, what = c("leverage", "influence", "dfbetas"), \dots, iScore = NULL, iHessian = NULL, iArgs = NULL, drop = FALSE, fitname = NULL) } \arguments{ \item{fit}{ A fitted point process model of class \code{"ppm"}. } \item{what}{ Character vector specifying which quantities are to be calculated. Default is to calculate all quantities. } \item{\dots}{ Ignored. } \item{iScore,iHessian}{ Components of the score vector and Hessian matrix for the irregular parameters, if required. See Details. } \item{iArgs}{ List of extra arguments for the functions \code{iScore}, \code{iHessian} if required. } \item{drop}{ Logical. Whether to include (\code{drop=FALSE}) or exclude (\code{drop=TRUE}) contributions from quadrature points that were not used to fit the model. } \item{fitname}{ Optional character string name for the fitted model \code{fit}. } } \details{ This function calculates all the leverage and influence measures described in \code{\link{influence.ppm}}, \code{\link{leverage.ppm}} and \code{\link{dfbetas.ppm}}. When analysing large datasets, the user can call \code{ppmInfluence} to perform the calculations efficiently, then extract the leverage and influence values as desired. If the point process model trend has irregular parameters that were fitted (using \code{\link{ippm}}) then the influence calculation requires the first and second derivatives of the log trend with respect to the irregular parameters. The argument \code{iScore} should be a list, with one entry for each irregular parameter, of \R functions that compute the partial derivatives of the log trend (i.e. log intensity or log conditional intensity) with respect to each irregular parameter. The argument \code{iHessian} should be a list, with \eqn{p^2} entries where \eqn{p} is the number of irregular parameters, of \R functions that compute the second order partial derivatives of the log trend with respect to each pair of irregular parameters. } \value{ A list containing the leverage and influence measures specified by \code{what}. } \author{ \adrian } \seealso{ \code{\link{leverage.ppm}}, \code{\link{influence.ppm}}, \code{\link{dfbetas.ppm}} } \examples{ X <- rpoispp(function(x,y) { exp(3+3*x) }) fit <- ppm(X ~ x+y) fI <- ppmInfluence(fit) fI$influence fI$leverage fI$dfbetas } \keyword{spatial} \keyword{models} spatstat/man/marks.Rd0000644000176200001440000000652013160710621014274 0ustar liggesusers\name{marks} \alias{marks} \alias{marks.ppp} \alias{marks.ppx} \alias{marks<-} \alias{marks<-.ppp} \alias{marks<-.ppx} \alias{setmarks} \alias{\%mark\%} %DoNotExport %NAMESPACE export("%mark%") \title{Marks of a Point Pattern} \description{ Extract or change the marks attached to a point pattern dataset. } \usage{ marks(x, \dots) \method{marks}{ppp}(x, \dots, dfok=TRUE, drop=TRUE) \method{marks}{ppx}(x, \dots, drop=TRUE) marks(x, \dots) <- value \method{marks}{ppp}(x, \dots, dfok=TRUE, drop=TRUE) <- value \method{marks}{ppx}(x, \dots) <- value setmarks(x, value) x \%mark\% value } \arguments{ \item{x}{ Point pattern dataset (object of class \code{"ppp"} or \code{"ppx"}). } \item{\dots}{ Ignored. } \item{dfok}{ Logical. If \code{FALSE}, data frames of marks are not permitted and will generate an error. } \item{drop}{ Logical. If \code{TRUE}, a data frame consisting of a single column of marks will be converted to a vector or factor. } \item{value}{ Replacement value. A vector, data frame or hyperframe of mark values, or \code{NULL}. } } \value{ For \code{marks(x)}, the result is a vector, factor, data frame or hyperframe, containing the mark values attached to the points of \code{x}. For \code{marks(x) <- value}, the result is the updated point pattern \code{x} (with the side-effect that the dataset \code{x} is updated in the current environment). For \code{setmarks(x,value)} and \code{x \%mark\% value}, the return value is the point pattern obtained by replacing the marks of \code{x} by \code{value}. } \details{ These functions extract or change the marks attached to the points of the point pattern \code{x}. The expression \code{marks(x)} extracts the marks of \code{x}. The assignment \code{marks(x) <- value} assigns new marks to the dataset \code{x}, and updates the dataset \code{x} in the current environment. The expression \code{setmarks(x,value)} or equivalently \code{x \%mark\% value} returns a point pattern obtained by replacing the marks of \code{x} by \code{value}, but does not change the dataset \code{x} itself. For point patterns in two-dimensional space (objects of class \code{"ppp"}) the marks can be a vector, a factor, or a data frame. For general point patterns (objects of class "ppx") the marks can be a vector, a factor, a data frame or a hyperframe. For the assignment \code{marks(x) <- value}, the \code{value} should be a vector or factor of length equal to the number of points in \code{x}, or a data frame or hyperframe with as many rows as there are points in \code{x}. If \code{value} is a single value, or a data frame or hyperframe with one row, then it will be replicated so that the same marks will be attached to each point. To remove marks, use \code{marks(x) <- NULL} or \code{\link{unmark}(x)}. Use \code{\link{ppp}} or \code{\link{ppx}} to create point patterns in more general situations. } \seealso{ \code{\link{ppp.object}}, \code{\link{ppx}}, \code{\link{unmark}}, \code{\link{hyperframe}} } \examples{ X <- amacrine # extract marks m <- marks(X) # recode the mark values "off", "on" as 0, 1 marks(X) <- as.integer(m == "on") } \author{ \spatstatAuthors } \keyword{spatial} \keyword{manip} spatstat/man/stienen.Rd0000644000176200001440000000407513160710621014627 0ustar liggesusers\name{stienen} \alias{stienen} \alias{stienenSet} \title{ Stienen Diagram } \description{ Draw the Stienen diagram of a point pattern, or compute the region covered by the Stienen diagram. } \usage{ stienen(X, \dots, bg = "grey", border = list(bg = NULL)) stienenSet(X, edge=TRUE) } \arguments{ \item{X}{ Point pattern (object of class \code{"ppp"}). } \item{\dots}{ Arguments passed to \code{\link{plot.ppp}} to control the plot. } \item{bg}{ Fill colour for circles. } \item{border}{ Either a list of arguments passed to \code{\link{plot.ppp}} to control the display of circles at the border of the diagram, or the value \code{FALSE} indicating that the border circles should not be plotted. } \item{edge}{ Logical value indicating whether to include the circles at the border of the diagram. } } \details{ The Stienen diagram of a point pattern (Stienen, 1982) is formed by drawing a circle around each point of the pattern, with diameter equal to the nearest-neighbour distance for that point. These circles do not overlap. If two points are nearest neighbours of each other, then the corresponding circles touch. \code{stienenSet(X)} computes the union of these circles and returns it as a window (object of class \code{"owin"}). \code{stienen(X)} generates a plot of the Stienen diagram of the point pattern \code{X}. By default, circles are shaded in grey if they lie inside the window of \code{X}, and are not shaded otherwise. } \value{ The plotting function \code{stienen} returns \code{NULL}. The return value of \code{stienenSet} is a window (object of class \code{"owin"}). } \references{ Stienen, H. (1982) \emph{Die Vergroeberung von Karbiden in reinen Eisen-Kohlenstoff Staehlen}. Dissertation, RWTH Aachen. } \seealso{ \code{\link{nndist}}, \code{\link{plot.ppp}} } \examples{ Y <- stienenSet(cells) stienen(redwood) stienen(redwood, border=list(bg=NULL, lwd=2, cols="red")) } \author{\adrian \rolf and \ege } \keyword{spatial} \keyword{math} \keyword{manip} spatstat/man/rmpoispp.Rd0000644000176200001440000001743113160710621015033 0ustar liggesusers\name{rmpoispp} \alias{rmpoispp} \title{Generate Multitype Poisson Point Pattern} \description{ Generate a random point pattern, a realisation of the (homogeneous or inhomogeneous) multitype Poisson process. } \usage{ rmpoispp(lambda, lmax=NULL, win, types, \dots, nsim=1, drop=TRUE, warnwin=!missing(win)) } \arguments{ \item{lambda}{ Intensity of the multitype Poisson process. Either a single positive number, a vector, a \code{function(x,y,m, \dots)}, a pixel image, a list of functions \code{function(x,y, \dots)}, or a list of pixel images. } \item{lmax}{ An upper bound for the value of \code{lambda}. May be omitted } \item{win}{ Window in which to simulate the pattern. An object of class \code{"owin"} or something acceptable to \code{\link{as.owin}}. Ignored if \code{lambda} is a pixel image or list of images. } \item{types}{ All the possible types for the multitype pattern. } \item{\dots}{ Arguments passed to \code{lambda} if it is a function. } \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a point pattern. } \item{warnwin}{ Logical value specifying whether to issue a warning when \code{win} is ignored. } } \value{ A point pattern (an object of class \code{"ppp"}) if \code{nsim=1}, or a list of point patterns if \code{nsim > 1}. Each point pattern is multitype (it carries a vector of marks which is a factor). } \details{ This function generates a realisation of the marked Poisson point process with intensity \code{lambda}. Note that the intensity function \eqn{\lambda(x,y,m)}{lambda(x,y,m)} is the average number of points \bold{of type m} per unit area near the location \eqn{(x,y)}. Thus a marked point process with a constant intensity of 10 and three possible types will have an average of 30 points per unit area, with 10 points of each type on average. The intensity function may be specified in any of the following ways. \describe{ \item{single number:}{ If \code{lambda} is a single number, then this algorithm generates a realisation of the uniform marked Poisson process inside the window \code{win} with intensity \code{lambda} for each type. The total intensity of points of all types is \code{lambda * length(types)}. The argument \code{types} must be given and determines the possible types in the multitype pattern. } \item{vector:}{ If \code{lambda} is a numeric vector, then this algorithm generates a realisation of the stationary marked Poisson process inside the window \code{win} with intensity \code{lambda[i]} for points of type \code{types[i]}. The total intensity of points of all types is \code{sum(lambda)}. The argument \code{types} defaults to \code{names(lambda)}, or if that is null, \code{1:length(lambda)}. } \item{function:}{ If \code{lambda} is a function, the process has intensity \code{lambda(x,y,m,\dots)} at spatial location \code{(x,y)} for points of type \code{m}. The function \code{lambda} must work correctly with vectors \code{x}, \code{y} and \code{m}, returning a vector of function values. (Note that \code{m} will be a factor with levels equal to \code{types}.) The value \code{lmax}, if present, must be an upper bound on the values of \code{lambda(x,y,m,\dots)} for all locations \code{(x, y)} inside the window \code{win} and all types \code{m}. The argument \code{types} must be given. } \item{list of functions:}{ If \code{lambda} is a list of functions, the process has intensity \code{lambda[[i]](x,y,\dots)} at spatial location \code{(x,y)} for points of type \code{types[i]}. The function \code{lambda[[i]]} must work correctly with vectors \code{x} and \code{y}, returning a vector of function values. The value \code{lmax}, if given, must be an upper bound on the values of \code{lambda(x,y,\dots)} for all locations \code{(x, y)} inside the window \code{win}. The argument \code{types} defaults to \code{names(lambda)}, or if that is null, \code{1:length(lambda)}. } \item{pixel image:}{ If \code{lambda} is a pixel image object of class \code{"im"} (see \code{\link{im.object}}), the intensity at a location \code{(x,y)} for points of any type is equal to the pixel value of \code{lambda} for the pixel nearest to \code{(x,y)}. The argument \code{win} is ignored; the window of the pixel image is used instead. The argument \code{types} must be given. } \item{list of pixel images:}{ If \code{lambda} is a list of pixel images, then the image \code{lambda[[i]]} determines the intensity of points of type \code{types[i]}. The argument \code{win} is ignored; the window of the pixel image is used instead. The argument \code{types} defaults to \code{names(lambda)}, or if that is null, \code{1:length(lambda)}. } } If \code{lmax} is missing, an approximate upper bound will be calculated. To generate an inhomogeneous Poisson process the algorithm uses ``thinning'': it first generates a uniform Poisson process of intensity \code{lmax} for points of each type \code{m}, then randomly deletes or retains each point independently, with retention probability \eqn{p(x,y,m) = \lambda(x,y,m)/\mbox{lmax}}{p(x,y,m) = lambda(x,y)/lmax}. } \seealso{ \code{\link{rpoispp}} for unmarked Poisson point process; \code{\link{rmpoint}} for a fixed number of random marked points; \code{\link{ppp.object}}, \code{\link{owin.object}}. } \examples{ # uniform bivariate Poisson process with total intensity 100 in unit square pp <- rmpoispp(50, types=c("a","b")) # stationary bivariate Poisson process with intensity A = 30, B = 70 pp <- rmpoispp(c(30,70), types=c("A","B")) pp <- rmpoispp(c(30,70)) # works in any window data(letterR) pp <- rmpoispp(c(30,70), win=letterR, types=c("A","B")) # inhomogeneous lambda(x,y,m) # note argument 'm' is a factor lam <- function(x,y,m) { 50 * (x^2 + y^3) * ifelse(m=="A", 2, 1)} pp <- rmpoispp(lam, win=letterR, types=c("A","B")) # extra arguments lam <- function(x,y,m,scal) { scal * (x^2 + y^3) * ifelse(m=="A", 2, 1)} pp <- rmpoispp(lam, win=letterR, types=c("A","B"), scal=50) # list of functions lambda[[i]](x,y) lams <- list(function(x,y){50 * x^2}, function(x,y){20 * abs(y)}) pp <- rmpoispp(lams, win=letterR, types=c("A","B")) pp <- rmpoispp(lams, win=letterR) # functions with extra arguments lams <- list(function(x,y,scal){5 * scal * x^2}, function(x,y, scal){2 * scal * abs(y)}) pp <- rmpoispp(lams, win=letterR, types=c("A","B"), scal=10) pp <- rmpoispp(lams, win=letterR, scal=10) # florid example lams <- list(function(x,y){ 100*exp((6*x + 5*y - 18*x^2 + 12*x*y - 9*y^2)/6) } # log quadratic trend , function(x,y){ 100*exp(-0.6*x+0.5*y) } # log linear trend ) X <- rmpoispp(lams, win=unit.square(), types=c("on", "off")) # pixel image Z <- as.im(function(x,y){30 * (x^2 + y^3)}, letterR) pp <- rmpoispp(Z, types=c("A","B")) # list of pixel images ZZ <- list( as.im(function(x,y){20 * (x^2 + y^3)}, letterR), as.im(function(x,y){40 * (x^3 + y^2)}, letterR)) pp <- rmpoispp(ZZ, types=c("A","B")) pp <- rmpoispp(ZZ) # randomising an existing point pattern rmpoispp(intensity(amacrine), win=Window(amacrine)) } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat/man/linearpcfinhom.Rd0000644000176200001440000001213413160710621016153 0ustar liggesusers\name{linearpcfinhom} \alias{linearpcfinhom} \title{ Inhomogeneous Linear Pair Correlation Function } \description{ Computes an estimate of the inhomogeneous linear pair correlation function for a point pattern on a linear network. } \usage{ linearpcfinhom(X, lambda=NULL, r=NULL, ..., correction="Ang", normalise=TRUE, normpower=1, update = TRUE, leaveoneout = TRUE, ratio = FALSE) } \arguments{ \item{X}{ Point pattern on linear network (object of class \code{"lpp"}). } \item{lambda}{ Intensity values for the point pattern. Either a numeric vector, a \code{function}, a pixel image (object of class \code{"im"}) or a fitted point process model (object of class \code{"ppm"} or \code{"lppm"}). } \item{r}{ Optional. Numeric vector of values of the function argument \eqn{r}. There is a sensible default. } \item{\dots}{ Arguments passed to \code{\link{density.default}} to control the smoothing. } \item{correction}{ Geometry correction. Either \code{"none"} or \code{"Ang"}. See Details. } \item{normalise}{ Logical. If \code{TRUE} (the default), the denominator of the estimator is data-dependent (equal to the sum of the reciprocal intensities at the data points, raised to \code{normpower}), which reduces the sampling variability. If \code{FALSE}, the denominator is the length of the network. } \item{normpower}{ Integer (usually either 1 or 2). Normalisation power. See explanation in \code{\link{linearKinhom}}. } \item{update}{ Logical value indicating what to do when \code{lambda} is a fitted model (class \code{"lppm"} or \code{"ppm"}). If \code{update=TRUE} (the default), the model will first be refitted to the data \code{X} (using \code{\link{update.lppm}} or \code{\link{update.ppm}}) before the fitted intensity is computed. If \code{update=FALSE}, the fitted intensity of the model will be computed without re-fitting it to \code{X}. } \item{leaveoneout}{ Logical value (passed to \code{\link{fitted.lppm}} or \code{\link{fitted.ppm}}) specifying whether to use a leave-one-out rule when calculating the intensity, when \code{lambda} is a fitted model. Supported only when \code{update=TRUE}. } \item{ratio}{ Logical. If \code{TRUE}, the numerator and denominator of each estimate will also be saved, for use in analysing replicated point patterns. } } \details{ This command computes the inhomogeneous version of the linear pair correlation function from point pattern data on a linear network. If \code{lambda = NULL} the result is equivalent to the homogeneous pair correlation function \code{\link{linearpcf}}. If \code{lambda} is given, then it is expected to provide estimated values of the intensity of the point process at each point of \code{X}. The argument \code{lambda} may be a numeric vector (of length equal to the number of points in \code{X}), or a \code{function(x,y)} that will be evaluated at the points of \code{X} to yield numeric values, or a pixel image (object of class \code{"im"}) or a fitted point process model (object of class \code{"ppm"} or \code{"lppm"}). If \code{lambda} is a fitted point process model, the default behaviour is to update the model by re-fitting it to the data, before computing the fitted intensity. This can be disabled by setting \code{update=FALSE}. If \code{correction="none"}, the calculations do not include any correction for the geometry of the linear network. If \code{correction="Ang"}, the pair counts are weighted using Ang's correction (Ang, 2010). The bandwidth for smoothing the pairwise distances is determined by arguments \code{\dots} passed to \code{\link{density.default}}, mainly the arguments \code{bw} and \code{adjust}. The default is to choose the bandwidth by Silverman's rule of thumb \code{bw="nrd0"} explained in \code{\link{density.default}}. } \value{ Function value table (object of class \code{"fv"}). If \code{ratio=TRUE} then the return value also has two attributes called \code{"numerator"} and \code{"denominator"} which are \code{"fv"} objects containing the numerators and denominators of each estimate of \eqn{g(r)}. } \author{ Ang Qi Wei \email{aqw07398@hotmail.com} and \adrian. } \references{ Ang, Q.W. (2010) Statistical methodology for spatial point patterns on a linear network. MSc thesis, University of Western Australia. Ang, Q.W., Baddeley, A. and Nair, G. (2012) Geometrically corrected second-order analysis of events on a linear network, with applications to ecology and criminology. \emph{Scandinavian Journal of Statistics} \bold{39}, 591--617. Okabe, A. and Yamada, I. (2001) The K-function method on a network and its computational implementation. \emph{Geographical Analysis} \bold{33}, 271-290. } \seealso{ \code{\link{linearpcf}}, \code{\link{linearKinhom}}, \code{\link{lpp}} } \examples{ data(simplenet) X <- rpoislpp(5, simplenet) fit <- lppm(X ~x) K <- linearpcfinhom(X, lambda=fit) plot(K) } \keyword{spatial} \keyword{nonparametric} spatstat/man/simplify.owin.Rd0000644000176200001440000000256613160710621015774 0ustar liggesusers\name{simplify.owin} \Rdversion{1.1} \alias{simplify.owin} \title{ Approximate a Polygon by a Simpler Polygon } \description{ Given a polygonal window, this function finds a simpler polygon that approximates it. } \usage{ simplify.owin(W, dmin) } \arguments{ \item{W}{ The polygon which is to be simplied. An object of class \code{"owin"}. } \item{dmin}{ Numeric value. The smallest permissible length of an edge. } } \details{ This function simplifies a polygon \code{W} by recursively deleting the shortest edge of \code{W} until all remaining edges are longer than the specified minimum length \code{dmin}, or until there are only three edges left. The argument \code{W} must be a window (object of class \code{"owin"}). It should be of type \code{"polygonal"}. If \code{W} is a rectangle, it is returned without alteration. The simplification algorithm is not yet implemented for binary masks. If \code{W} is a mask, an error is generated. } \value{ Another window (object of class \code{"owin"}) of type \code{"polygonal"}. } \author{ \spatstatAuthors. } \seealso{ \code{\link{owin}} } \examples{ plot(letterR, col="red") plot(simplify.owin(letterR, 0.3), col="blue", add=TRUE) W <- Window(chorley) plot(W) WS <- simplify.owin(W, 2) plot(WS, add=TRUE, border="green") points(vertices(WS)) } \keyword{spatial} \keyword{math} spatstat/man/moribund.Rd0000644000176200001440000000232513160710621014775 0ustar liggesusers\name{moribund} \alias{kstest} \alias{kstest.ppp} \alias{kstest.ppm} \alias{kstest.lpp} \alias{kstest.lppm} \alias{kstest.slrm} \alias{plot.kstest} \alias{bermantest} \alias{bermantest.ppm} \alias{bermantest.ppp} \alias{bermantest.lppm} \alias{bermantest.lpp} \title{Outdated Functions} \description{ These outdated functions are retained only for compatibility; they will soon be marked as Deprecated. } \usage{ kstest(\dots) kstest.ppp(\dots) kstest.ppm(\dots) kstest.lpp(\dots) kstest.lppm(\dots) kstest.slrm(\dots) \method{plot}{kstest}(x, \dots) bermantest(\dots) bermantest.ppp(\dots) bermantest.ppm(\dots) bermantest.lpp(\dots) bermantest.lppm(\dots) } \arguments{ \item{x}{ An object of class \code{"kstest"} or \code{"cdftest"}. } \item{\dots}{ Arguments passed to other functions. } } \details{ These functions will be Deprecated in future releases of \pkg{spatstat}. The \code{kstest} functions have been superseded by \code{\link{cdf.test}}. The \code{bermantest} functions have been superseded by \code{\link{berman.test}}. } \author{\adrian , \rolf and \ege. } \seealso{ \code{\link{cdf.test}}, \code{\link{berman.test}}, \code{\link{plot.cdftest}} } \keyword{spatial} spatstat/man/alltypes.Rd0000644000176200001440000002242513160710571015022 0ustar liggesusers\name{alltypes} \alias{alltypes} \title{Calculate Summary Statistic for All Types in a Multitype Point Pattern} \description{ Given a marked point pattern, this computes the estimates of a selected summary function (\eqn{F},\eqn{G}, \eqn{J}, \eqn{K} etc) of the pattern, for all possible combinations of marks, and returns these functions in an array. } \usage{ alltypes(X, fun="K", \dots, dataname=NULL,verb=FALSE,envelope=FALSE,reuse=TRUE) } \arguments{ \item{X}{The observed point pattern, for which summary function estimates are required. An object of class \code{"ppp"} or \code{"lpp"}. } \item{fun}{The summary function. Either an \R function, or a character string indicating the summary function required. Options for strings are \code{"F"}, \code{"G"}, \code{"J"}, \code{"K"}, \code{"L"}, \code{"pcf"}, \code{"Gcross"}, \code{"Jcross"}, \code{"Kcross"}, \code{"Lcross"}, \code{"Gdot"}, \code{"Jdot"}, \code{"Kdot"}, \code{"Ldot"}. } \item{\dots}{ Arguments passed to the summary function (and to the function \code{\link{envelope}} if appropriate) } \item{dataname}{Character string giving an optional (alternative) name to the point pattern, different from what is given in the call. This name, if supplied, may be used by \code{\link{plot.fasp}()} in forming the title of the plot. If not supplied it defaults to the parsing of the argument supplied as \code{X} in the call. } \item{verb}{ Logical value. If \code{verb} is true then terse ``progress reports'' (just the values of the mark indices) are printed out when the calculations for that combination of marks are completed. } \item{envelope}{ Logical value. If \code{envelope} is true, then simulation envelopes of the summary function will also be computed. See Details. } \item{reuse}{ Logical value indicating whether the envelopes in each panel should be based on the same set of simulated patterns (\code{reuse=TRUE}) or on different, independent sets of simulated patterns (\code{reuse=FALSE}). } } \details{ This routine is a convenient way to analyse the dependence between types in a multitype point pattern. It computes the estimates of a selected summary function of the pattern, for all possible combinations of marks. It returns these functions in an array (an object of class \code{"fasp"}) amenable to plotting by \code{\link{plot.fasp}()}. The argument \code{fun} specifies the summary function that will be evaluated for each type of point, or for each pair of types. It may be either an \R function or a character string. Suppose that the points have possible types \eqn{1,2,\ldots,m} and let \eqn{X_i}{X[i]} denote the pattern of points of type \eqn{i} only. If \code{fun="F"} then this routine calculates, for each possible type \eqn{i}, an estimate of the Empty Space Function \eqn{F_i(r)}{F[i](r)} of \eqn{X_i}{X[i]}. See \code{\link{Fest}} for explanation of the empty space function. The estimate is computed by applying \code{\link{Fest}} to \eqn{X_i}{X[i]} with the optional arguments \code{\dots}. If \code{fun} is \code{"Gcross"}, \code{"Jcross"}, \code{"Kcross"} or \code{"Lcross"}, the routine calculates, for each pair of types \eqn{(i,j)}, an estimate of the ``\code{i}-to\code{j}'' cross-type function \eqn{G_{ij}(r)}{G[i,j](r)}, \eqn{J_{ij}(r)}{J[i,j](r)}, \eqn{K_{ij}(r)}{K[i,j](r)} or \eqn{L_{ij}(r)}{L[i,j](r)} respectively describing the dependence between \eqn{X_i}{X[i]} and \eqn{X_j}{X[j]}. See \code{\link{Gcross}}, \code{\link{Jcross}}, \code{\link{Kcross}} or \code{\link{Lcross}} respectively for explanation of these functions. The estimate is computed by applying the relevant function (\code{\link{Gcross}} etc) to \code{X} using each possible value of the arguments \code{i,j}, together with the optional arguments \code{\dots}. If \code{fun} is \code{"pcf"} the routine calculates the cross-type pair correlation function \code{\link{pcfcross}} between each pair of types. If \code{fun} is \code{"Gdot"}, \code{"Jdot"}, \code{"Kdot"} or \code{"Ldot"}, the routine calculates, for each type \eqn{i}, an estimate of the ``\code{i}-to-any'' dot-type function \eqn{G_{i\bullet}(r)}{G[i.](r)}, \eqn{J_{i\bullet}(r)}{J[i.](r)} or \eqn{K_{i\bullet}(r)}{K[i.](r)} or \eqn{L_{i\bullet}(r)}{L[i.](r)} respectively describing the dependence between \eqn{X_i}{X[i]} and \eqn{X}{X}. See \code{\link{Gdot}}, \code{\link{Jdot}}, \code{\link{Kdot}} or \code{\link{Ldot}} respectively for explanation of these functions. The estimate is computed by applying the relevant function (\code{\link{Gdot}} etc) to \code{X} using each possible value of the argument \code{i}, together with the optional arguments \code{\dots}. The letters \code{"G"}, \code{"J"}, \code{"K"} and \code{"L"} are interpreted as abbreviations for \code{\link{Gcross}}, \code{\link{Jcross}}, \code{\link{Kcross}} and \code{\link{Lcross}} respectively, assuming the point pattern is marked. If the point pattern is unmarked, the appropriate function \code{\link{Fest}}, \code{\link{Jest}}, \code{\link{Kest}} or \code{\link{Lest}} is invoked instead. If \code{envelope=TRUE}, then as well as computing the value of the summary function for each combination of types, the algorithm also computes simulation envelopes of the summary function for each combination of types. The arguments \code{\dots} are passed to the function \code{\link{envelope}} to control the number of simulations, the random process generating the simulations, the construction of envelopes, and so on. } \value{ A function array (an object of class \code{"fasp"}, see \code{\link{fasp.object}}). This can be plotted using \code{\link{plot.fasp}}. If the pattern is not marked, the resulting ``array'' has dimensions \eqn{1 \times 1}{1 x 1}. Otherwise the following is true: If \code{fun="F"}, the function array has dimensions \eqn{m \times 1}{m * 1} where \eqn{m} is the number of different marks in the point pattern. The entry at position \code{[i,1]} in this array is the result of applying \code{\link{Fest}} to the points of type \code{i} only. If \code{fun} is \code{"Gdot"}, \code{"Jdot"}, \code{"Kdot"} or \code{"Ldot"}, the function array again has dimensions \eqn{m \times 1}{m * 1}. The entry at position \code{[i,1]} in this array is the result of \code{Gdot(X, i)}, \code{Jdot(X, i)} \code{Kdot(X, i)} or \code{Ldot(X, i)} respectively. If \code{fun} is \code{"Gcross"}, \code{"Jcross"}, \code{"Kcross"} or \code{"Lcross"} (or their abbreviations \code{"G"}, \code{"J"}, \code{"K"} or \code{"L"}), the function array has dimensions \eqn{m \times m}{m * m}. The \code{[i,j]} entry of the function array (for \eqn{i \neq j}{i != j}) is the result of applying the function \code{\link{Gcross}}, \code{\link{Jcross}}, \code{\link{Kcross}} or\code{\link{Lcross}} to the pair of types \code{(i,j)}. The diagonal \code{[i,i]} entry of the function array is the result of applying the univariate function \code{\link{Gest}}, \code{\link{Jest}}, \code{\link{Kest}} or \code{\link{Lest}} to the points of type \code{i} only. If \code{envelope=FALSE}, then each function entry \code{fns[[i]]} retains the format of the output of the relevant estimating routine \code{\link{Fest}}, \code{\link{Gest}}, \code{\link{Jest}}, \code{\link{Kest}}, \code{\link{Lest}}, \code{\link{Gcross}}, \code{\link{Jcross}} ,\code{\link{Kcross}}, \code{\link{Lcross}}, \code{\link{Gdot}}, \code{\link{Jdot}}, \code{\link{Kdot}} or \code{\link{Ldot}} The default formulae for plotting these functions are \code{cbind(km,theo) ~ r} for F, G, and J functions, and \code{cbind(trans,theo) ~ r} for K and L functions. If \code{envelope=TRUE}, then each function entry \code{fns[[i]]} has the same format as the output of the \code{\link{envelope}} command. } \note{ Sizeable amounts of memory may be needed during the calculation. } \seealso{ \code{\link{plot.fasp}}, \code{\link{fasp.object}}, \code{\link{Fest}}, \code{\link{Gest}}, \code{\link{Jest}}, \code{\link{Kest}}, \code{\link{Lest}}, \code{\link{Gcross}}, \code{\link{Jcross}}, \code{\link{Kcross}}, \code{\link{Lcross}}, \code{\link{Gdot}}, \code{\link{Jdot}}, \code{\link{Kdot}}, \code{\link{envelope}}. } \examples{ # bramblecanes (3 marks). bram <- bramblecanes \testonly{ bram <- bram[c(seq(1, 744, by=20), seq(745, 823, by=4))] } bF <- alltypes(bram,"F",verb=TRUE) plot(bF) if(interactive()) { plot(alltypes(bram,"G")) plot(alltypes(bram,"Gdot")) } # Swedishpines (unmarked). swed <- swedishpines \testonly{ swed <- swed[1:25] } plot(alltypes(swed,"K")) plot(alltypes(amacrine, "pcf"), ylim=c(0,1.3)) # A setting where you might REALLY want to use dataname: \dontrun{ xxx <- alltypes(ppp(Melvin$x,Melvin$y, window=as.owin(c(5,20,15,50)),marks=clyde), fun="F",verb=TRUE,dataname="Melvin") } # envelopes bKE <- alltypes(bram,"K",envelope=TRUE,nsim=19) \dontrun{ bFE <- alltypes(bram,"F",envelope=TRUE,nsim=19,global=TRUE) } # extract one entry as.fv(bKE[1,1]) } \author{\adrian and \rolf. } \keyword{spatial} \keyword{nonparametric} spatstat/man/persp.im.Rd0000644000176200001440000001341013160710621014710 0ustar liggesusers\name{persp.im} \alias{persp.im} \title{Perspective Plot of Pixel Image} \description{ Displays a perspective plot of a pixel image. } \usage{ \method{persp}{im}(x, \dots, colmap=NULL, colin=x, apron=FALSE, visible=FALSE) } \arguments{ \item{x}{ The pixel image to be plotted as a surface. An object of class \code{"im"} (see \code{\link{im.object}}). } \item{\dots}{ Extra arguments passed to \code{\link{persp.default}} to control the display. } \item{colmap}{ Optional data controlling the colour map. See Details. } \item{colin}{ Optional. Colour input. Another pixel image (of the same dimensions as \code{x}) containing the values that will be mapped to colours. } \item{apron}{ Logical. If \code{TRUE}, a grey apron is placed around the sides of the perspective plot. } \item{visible}{ Logical value indicating whether to compute which pixels of \code{x} are visible in the perspective view. See Details. } } \value{ (invisibly) the 3D transformation matrix returned by \code{\link{persp.default}}, together with an attribute \code{"expand"} which gives the relative scale of the \eqn{z} coordinate. If argument \code{visible=TRUE} was given, the return value also has an attribute \code{"visible"} which is a pixel image, compatible with \code{x}, with logical values which are \emph{TRUE} when the corresponding pixel is visible in the perspective view, and \code{FALSE} when it is obscured. } \details{ This is the \code{persp} method for the class \code{"im"}. The pixel image \code{x} must have real or integer values. These values are treated as heights of a surface, and the surface is displayed as a perspective plot on the current plot device, using equal scales on the \code{x} and \code{y} axes. The optional argument \code{colmap} gives an easy way to display different altitudes in different colours (if this is what you want). \itemize{ \item If \code{colmap} is a colour map (object of class \code{"colourmap"}, created by the function \code{\link{colourmap}}) then this colour map will be used to associate altitudes with colours. \item If \code{colmap} is a character vector, then the range of altitudes in the perspective plot will be divided into \code{length(colmap)} intervals, and those parts of the surface which lie in a particular altitude range will be assigned the corresponding colour from \code{colmap}. \item If \code{colmap} is a function in the \R language of the form \code{function(n, ...)}, this function will be called with an appropriate value of \code{n} to generate a character vector of \code{n} colours. Examples of such functions are \code{\link[grDevices]{heat.colors}}, \code{\link[grDevices]{terrain.colors}}, \code{\link[grDevices]{topo.colors}} and \code{\link[grDevices]{cm.colors}}. \item If \code{colmap} is a function in the \R language of the form \code{function(range, ...)} then it will be called with \code{range} equal to the range of altitudes, to determine the colour values or colour map. Examples of such functions are \code{\link{beachcolours}} and \code{\link{beachcolourmap}}. \item If \code{colmap} is a list with entries \code{breaks} and \code{col}, then \code{colmap$breaks} determines the breakpoints of the altitude intervals, and \code{colmap$col} provides the corresponding colours. } Alternatively, if the argument \code{colin} (\emph{colour input}) is present, then the colour map \code{colmap} will be applied to the pixel values of \code{colin} instead of the pixel values of \code{x}. The result is a perspective view of a surface with heights determined by \code{x} and colours determined by \code{colin} (mapped by \code{colmap}). If \code{apron=TRUE}, vertical surface is drawn around the boundary of the perspective plot, so that the terrain appears to have been cut out of a solid material. If colour data were supplied, then the apron is coloured light grey. Graphical parameters controlling the perspective plot are passed through the \code{...} arguments directly to the function \code{\link{persp.default}}. See the examples in \code{\link{persp.default}} or in \code{demo(persp)}. The vertical scale is controlled by the argument \code{expand}: setting \code{expand=1} will interpret the pixel values as being in the same units as the spatial coordinates \eqn{x} and \eqn{y} and represent them at the same scale. If \code{visible=TRUE}, the algorithm also computes whether each pixel in \code{x} is visible in the perspective view. In order to be visible, a pixel must not be obscured by another pixel which lies in front of it (as seen from the viewing direction), and the three-dimensional vector normal to the surface must be pointing toward the viewer. The return value of \code{persp.im} then has an attribute \code{"visible"} which is a pixel image, compatible with \code{x}, with pixel value equal to \code{TRUE} if the corresponding pixel in \code{x} is visible, and \code{FALSE} if it is not visible. } \seealso{ \code{\link{perspPoints}}, \code{\link{perspLines}} for drawing additional points or lines on the surface. \code{\link{im.object}}, \code{\link{plot.im}}, \code{\link{contour.im}} } \examples{ # an image Z <- setcov(owin()) persp(Z, colmap=terrain.colors(128)) co <- colourmap(range=c(0,1), col=rainbow(128)) persp(Z, colmap=co, axes=FALSE, shade=0.3) ## Terrain elevation persp(bei.extra$elev, colmap=terrain.colors(128), apron=TRUE, theta=-30, phi=20, zlab="Elevation", main="", ticktype="detailed", expand=6) } \author{\adrian \rolf and \ege } \keyword{spatial} \keyword{hplot} spatstat/man/closetriples.Rd0000644000176200001440000000160513160710571015672 0ustar liggesusers\name{closetriples} \alias{closetriples} \title{ Close Triples of Points } \description{ Low-level function to find all close triples of points. } \usage{ closetriples(X, rmax) } \arguments{ \item{X}{ Point pattern (object of class \code{"ppp"} or \code{"pp3"}). } \item{rmax}{ Maximum distance between each pair of points in a triple. } } \details{ This low-level function finds all triples of points in a point pattern in which each pair lies closer than \code{rmax}. } \value{ A data frame with columns \code{i,j,k} giving the indices of the points in each triple, and a column \code{diam} giving the diameter (maximum pairwise distance) in the triple. } \author{ \spatstatAuthors. } \seealso{ \code{\link{closepairs}}, \code{\link{Tstat}}. } \examples{ closetriples(redwoodfull, 0.02) closetriples(redwoodfull, 0.005) } \keyword{spatial} \keyword{math} spatstat/man/parres.Rd0000644000176200001440000001722513160710621014457 0ustar liggesusers\name{parres} \alias{parres} \title{ Partial Residuals for Point Process Model } \description{ Computes the smoothed partial residuals, a diagnostic for transformation of a covariate in a Poisson point process model. } \usage{ parres(model, covariate, ..., smooth.effect=FALSE, subregion=NULL, bw = "nrd0", adjust=1, from = NULL, to = NULL, n = 512, bw.input = c("points", "quad"), bw.restrict=FALSE, covname) } \arguments{ \item{model}{ Fitted point process model (object of class \code{"ppm"}). } \item{covariate}{ The covariate of interest. Either a character string matching the name of one of the canonical covariates in the model, or one of the names \code{"x"} or \code{"y"} referring to the Cartesian coordinates, or one of the names of the covariates given when \code{model} was fitted, or a pixel image (object of class \code{"im"}) or \code{function(x,y)} supplying the values of a covariate at any location. } \item{smooth.effect}{ Logical. Determines the choice of algorithm. See Details. } \item{subregion}{ Optional. A window (object of class \code{"owin"}) specifying a subset of the spatial domain of the data. The calculation will be confined to the data in this subregion. } \item{bw}{ Smoothing bandwidth or bandwidth rule (passed to \code{\link[stats]{density.default}}). } \item{adjust}{ Smoothing bandwidth adjustment factor (passed to \code{\link[stats]{density.default}}). } \item{n, from, to}{ Arguments passed to \code{\link[stats]{density.default}} to control the number and range of values at which the function will be estimated. } \item{\dots}{ Additional arguments passed to \code{\link[stats]{density.default}}. } \item{bw.input}{ Character string specifying the input data used for automatic bandwidth selection. } \item{bw.restrict}{ Logical value, specifying whether bandwidth selection is performed using data from the entire spatial domain or from the \code{subregion}. } \item{covname}{ Optional. Character string to use as the name of the covariate. } } \details{ This command computes the smoothed partial residual diagnostic (Baddeley, Chang, Song and Turner, 2012) for the transformation of a covariate in a Poisson point process model. The argument \code{model} must be a fitted Poisson point process model. The diagnostic works in two different ways: \describe{ \item{Canonical covariate:}{ The argument \code{covariate} may be a character string which is the name of one of the \emph{canonical covariates} in the model. The canonical covariates are the functions \eqn{Z_j}{Z[j]} that appear in the expression for the Poisson point process intensity \deqn{ \lambda(u) = \exp(\beta_1 Z_1(u) + \ldots + \beta_p Z_p(u)) }{ lambda(u) = exp(beta[1] * Z[1](u) + \ldots + \beta[p] * Z[p](u)) } at spatial location \eqn{u}. Type \code{names(coef(model))} to see the names of the canonical covariates in \code{model}. If the selected covariate is \eqn{Z_j}{Z[j]}, then the diagnostic plot concerns the model term \eqn{\beta_j Z_j(u)}{beta[j] * Z[j](u)}. The plot shows a smooth estimate of a function \eqn{h(z)} that should replace this linear term, that is, \eqn{\beta_j Z_j(u)}{beta[j] * Z[j](u)} should be replaced by \eqn{h(Z_j(u))}{h(Z[j](u))}. The linear function is also plotted as a dotted line. } \item{New covariate:}{ If the argument \code{covariate} is a pixel image (object of class \code{"im"}) or a \code{function(x,y)}, it is assumed to provide the values of a covariate that is not present in the model. Alternatively \code{covariate} can be the name of a covariate that was supplied when the model was fitted (i.e. in the call to \code{\link{ppm}}) but which does not feature in the model formula. In either case we speak of a new covariate \eqn{Z(u)}. If the fitted model intensity is \eqn{\lambda(u)}{lambda(u)} then we consider modifying this to \eqn{\lambda(u) \exp(h(Z(u)))}{lambda(u) * exp(h(Z(u)))} where \eqn{h(z)} is some function. The diagnostic plot shows an estimate of \eqn{h(z)}. \bold{Warning: in this case the diagnostic is not theoretically justified. This option is provided for research purposes.} } } Alternatively \code{covariate} can be one of the character strings \code{"x"} or \code{"y"} signifying the Cartesian coordinates. The behaviour here depends on whether the coordinate was one of the canonical covariates in the model. If there is more than one canonical covariate in the model that depends on the specified \code{covariate}, then the covariate effect is computed using all these canonical covariates. For example in a log-quadratic model which includes the terms \code{x} and \code{I(x^2)}, the quadratic effect involving both these terms will be computed. There are two choices for the algorithm. If \code{smooth.effect=TRUE}, the fitted covariate effect (according to \code{model}) is added to the point process residuals, then smoothing is applied to these values. If \code{smooth.effect=FALSE}, the point process residuals are smoothed first, and then the fitted covariate effect is added to the result. The smoothing bandwidth is controlled by the arguments \code{bw}, \code{adjust}, \code{bw.input} and \code{bw.restrict}. If \code{bw} is a numeric value, then the bandwidth is taken to be \code{adjust * bw}. If \code{bw} is a string representing a bandwidth selection rule (recognised by \code{\link[stats]{density.default}}) then the bandwidth is selected by this rule. The data used for automatic bandwidth selection are specified by \code{bw.input} and \code{bw.restrict}. If \code{bw.input="points"} (the default) then bandwidth selection is based on the covariate values at the points of the original point pattern dataset to which the model was fitted. If \code{bw.input="quad"} then bandwidth selection is based on the covariate values at every quadrature point used to fit the model. If \code{bw.restrict=TRUE} then the bandwidth selection is performed using only data from inside the \code{subregion}. } \section{Slow computation}{ In a large dataset, computation can be very slow if the default settings are used, because the smoothing bandwidth is selected automatically. To avoid this, specify a numerical value for the bandwidth \code{bw}. One strategy is to use a coarser subset of the data to select \code{bw} automatically. The selected bandwidth can be read off the print output for \code{parres}. } \value{ A function value table (object of class \code{"fv"}) containing the values of the smoothed partial residual, the estimated variance, and the fitted effect of the covariate. Also belongs to the class \code{"parres"} which has methods for \code{print} and \code{plot}. } \references{ Baddeley, A., Chang, Y.-M., Song, Y. and Turner, R. (2013) Residual diagnostics for covariate effects in spatial point process models. \emph{Journal of Computational and Graphical Statistics}, \bold{22}, 886--905. } \author{ \adrian , \rolf , Ya-Mei Chang and Yong Song. } \seealso{ \code{\link{addvar}}, \code{\link{rhohat}}, \code{\link{rho2hat}} } \examples{ X <- rpoispp(function(x,y){exp(3+x+2*x^2)}) model <- ppm(X, ~x+y) tra <- parres(model, "x") plot(tra) plot(parres(model, "x", subregion=square(0.5))) model2 <- ppm(X, ~x+I(x^2)+y) plot(parres(model2, "x")) Z <- setcov(owin()) plot(parres(model2, Z)) } \keyword{spatial} \keyword{models} spatstat/man/plot.textstring.Rd0000644000176200001440000000215613160710621016350 0ustar liggesusers\name{plot.textstring} \alias{plot.textstring} \title{Plot a Text String} \description{Plots an object of class \code{"textstring"}.} \usage{ \method{plot}{textstring}(x, \dots, do.plot = TRUE) } \arguments{ \item{x}{ Object of class \code{"textstring"} to be plotted. This object is created by the command \code{\link{textstring}}. } \item{\dots}{ Additional graphics arguments passed to \code{\link[graphics]{text}} to control the plotting of text. } \item{do.plot}{ Logical value indicating whether to actually plot the text. } } \details{ The argument \code{x} should be an object of class \code{"textstring"} created by the command \code{\link{textstring}}. This function displays the text using \code{\link[graphics]{text}}. } \value{ A window (class \code{"owin"}) enclosing the plotted graphics. } \examples{ W <- Window(humberside) te <- textstring(centroid.owin(W), txt="Humberside", cex=2.5) plot(layered(W, te), main="") } \author{\adrian \rolf and \ege } \seealso{ \code{\link{onearrow}}, \code{\link{yardstick}} } \keyword{spatial} \keyword{hplot} spatstat/man/compileK.Rd0000644000176200001440000001017313160710571014725 0ustar liggesusers\name{compileK} \alias{compileK} \alias{compilepcf} \title{ Generic Calculation of K Function and Pair Correlation Function } \description{ Low-level functions which calculate the estimated \eqn{K} function and estimated pair correlation function (or any similar functions) from a matrix of pairwise distances and optional weights. } \usage{ compileK(D, r, weights = NULL, denom = 1, check = TRUE, ratio = FALSE, fname = "K") compilepcf(D, r, weights = NULL, denom = 1, check = TRUE, endcorrect = TRUE, ratio=FALSE, \dots, fname = "g") } \arguments{ \item{D}{ A square matrix giving the distances between all pairs of points. } \item{r}{ An equally spaced, finely spaced sequence of distance values. } \item{weights}{ Optional numerical weights for the pairwise distances. A numeric matrix with the same dimensions as \code{D}. If absent, the weights are taken to equal 1. } \item{denom}{ Denominator for the estimator. A single number, or a numeric vector with the same length as \code{r}. See Details. } \item{check}{ Logical value specifying whether to check that \code{D} is a valid matrix of pairwise distances. } \item{ratio}{ Logical value indicating whether to store ratio information. See Details. } \item{\dots}{ Optional arguments passed to \code{\link[stats]{density.default}} controlling the kernel smoothing. } \item{endcorrect}{ Logical value indicating whether to apply End Correction of the pair correlation estimate at \code{r=0}. } \item{fname}{ Character string giving the name of the function being estimated. } } \details{ These low-level functions construct estimates of the \eqn{K} function or pair correlation function, or any similar functions, given only the matrix of pairwise distances and optional weights associated with these distances. These functions are useful for code development and for teaching, because they perform a common task, and do the housekeeping required to make an object of class \code{"fv"} that represents the estimated function. However, they are not very efficient. \code{compileK} calculates the weighted estimate of the \eqn{K} function, \deqn{ \hat K(r) = (1/v(r)) \sum_i \sum_j 1\{ d_{ij} \le r\} w_{ij} }{ K(r) = (1/v(r)) \sum[i] \sum[j] 1(d[i,j] \le r) w[i,j] } and \code{compilepcf} calculates the weighted estimate of the pair correlation function, \deqn{ \hat g(r) = (1/v(r)) \sum_i \sum_j \kappa( d_{ij} - r ) w_{ij} }{ g(r) = (1/v(r)) \sum[i] \sum[j] \kappa ( d[i,j] - r) w[i,j] } where \eqn{d_{ij}}{d[i,j]} is the distance between spatial points \eqn{i} and \eqn{j}, with corresponding weight \eqn{w_{ij}}{w[i,j]}, and \eqn{v(r)} is a specified denominator. Here \eqn{\kappa}{\kappa} is a fixed-bandwidth smoothing kernel. For a point pattern in two dimensions, the usual denominator \eqn{v(r)} is constant for the \eqn{K} function, and proportional to \eqn{r} for the pair correlation function. See the Examples. The result is an object of class \code{"fv"} representing the estimated function. This object has only one column of function values. Additional columns (such as a column giving the theoretical value) must be added by the user, with the aid of \code{\link{bind.fv}}. If \code{ratio=TRUE}, the result also belongs to class \code{"rat"} and has attributes containing the numerator and denominator of the function estimate. This allows function estimates from several datasets to be pooled using \code{\link{pool}}. } \value{ An object of class \code{"fv"} representing the estimated function. } \author{ \adrian } \seealso{ \code{\link{Kest}}, \code{\link{pcf}} for definitions of the \eqn{K} function and pair correlation function. \code{\link{bind.fv}} to add more columns. } \examples{ X <- japanesepines D <- pairdist(X) Wt <- edge.Ripley(X, D) lambda <- intensity(X) a <- (npoints(X)-1) * lambda r <- seq(0, 0.25, by=0.01) K <- compileK(D=D, r=r, weights=Wt, denom=a) g <- compilepcf(D=D, r=r, weights=Wt, denom= a * 2 * pi * r) } \keyword{spatial} \keyword{nonparametric} spatstat/man/plot.onearrow.Rd0000644000176200001440000000476013160710621015774 0ustar liggesusers\name{plot.onearrow} \alias{plot.onearrow} \title{Plot an Arrow} \description{Plots an object of class \code{"onearrow"}.} \usage{ \method{plot}{onearrow}(x, \dots, add = FALSE, main = "", retract = 0.05, headfraction = 0.25, headangle = 12, headnick = 0.1, col.head = NA, lwd.head = lwd, lwd = 1, col = 1, zap = FALSE, zapfraction = 0.07, pch = 1, cex = 1, do.plot = TRUE, do.points = FALSE, show.all = !add) } \arguments{ \item{x}{ Object of class \code{"onearrow"} to be plotted. This object is created by the command \code{\link{onearrow}}. } \item{\dots}{ Additional graphics arguments passed to \code{\link[graphics]{segments}} to control the appearance of the line. } \item{add}{Logical value indicating whether to add graphics to the existing plot (\code{add=TRUE}) or to start a new plot (\code{add=FALSE}). } \item{main}{Main title for the plot.} \item{retract}{ Fraction of length of arrow to remove at each end. } \item{headfraction}{ Length of arrow head as a fraction of overall length of arrow. } \item{headangle}{ Angle (in degrees) between the outer edge of the arrow head and the shaft of the arrow. } \item{headnick}{ Size of the nick in the trailing edge of the arrow head as a fraction of length of arrow head. } \item{col.head,lwd.head}{ Colour and line style of the filled arrow head. } \item{col,lwd}{ Colour and line style of the arrow shaft. } \item{zap}{ Logical value indicating whether the arrow should include a Z-shaped (lightning-bolt) feature in the middle of the shaft. } \item{zapfraction}{ Size of Z-shaped deviation as a fraction of total arrow length. } \item{pch,cex}{ Plot character and character size for the two end points of the arrow, if \code{do.points=TRUE}. } \item{do.plot}{ Logical. Whether to actually perform the plot. } \item{do.points}{ Logical. Whether to display the two end points of the arrow as well. } \item{show.all}{ Internal use only. } } \details{ The argument \code{x} should be an object of class \code{"onearrow"} created by the command \code{\link{onearrow}}. } \value{ A window (class \code{"owin"}) enclosing the plotted graphics. } \examples{ oa <- onearrow(cells[c(1, 42)]) plot(oa) plot(oa, zap=TRUE, do.points=TRUE, col.head="pink", col="red") } \author{\adrian \rolf and \ege } \seealso{ \code{\link{onearrow}}, \code{\link{yardstick}} } \keyword{spatial} \keyword{hplot} spatstat/man/as.tess.Rd0000644000176200001440000000476013160710571014547 0ustar liggesusers\name{as.tess} \alias{as.tess} \alias{as.tess.tess} \alias{as.tess.im} \alias{as.tess.owin} \alias{as.tess.quadratcount} \alias{as.tess.quadrattest} \alias{as.tess.list} \title{Convert Data To Tessellation} \description{ Converts data specifying a tessellation, in any of several formats, into an object of class \code{"tess"}. } \usage{ as.tess(X) \method{as.tess}{tess}(X) \method{as.tess}{im}(X) \method{as.tess}{owin}(X) \method{as.tess}{quadratcount}(X) \method{as.tess}{quadrattest}(X) \method{as.tess}{list}(X) } \arguments{ \item{X}{Data to be converted to a tessellation.} } \value{ An object of class \code{"tess"} specifying a tessellation. } \details{ A tessellation is a collection of disjoint spatial regions (called \emph{tiles}) that fit together to form a larger spatial region. This command creates an object of class \code{"tess"} that represents a tessellation. This function converts data in any of several formats into an object of class \code{"tess"} for use by the \pkg{spatstat} package. The argument \code{X} may be \itemize{ \item an object of class \code{"tess"}. The object will be stripped of any extraneous attributes and returned. \item a pixel image (object of class \code{"im"}) with pixel values that are logical or factor values. Each level of the factor will determine a tile of the tessellation. \item a window (object of class \code{"owin"}). The result will be a tessellation consisting of a single tile. \item a set of quadrat counts (object of class \code{"quadratcount"}) returned by the command \code{\link{quadratcount}}. The quadrats used to generate the counts will be extracted and returned as a tessellation. \item a quadrat test (object of class \code{"quadrattest"}) returned by the command \code{\link{quadrat.test}}. The quadrats used to perform the test will be extracted and returned as a tessellation. \item a list of windows (objects of class \code{"owin"}) giving the tiles of the tessellation. } The function \code{as.tess} is generic, with methods for various classes, as listed above. } \seealso{ \code{\link{tess}} } \examples{ # pixel image v <- as.im(function(x,y){factor(round(5 * (x^2 + y^2)))}, W=owin()) levels(v) <- letters[seq(length(levels(v)))] as.tess(v) # quadrat counts data(nztrees) qNZ <- quadratcount(nztrees, nx=4, ny=3) as.tess(qNZ) } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/clickdist.Rd0000644000176200001440000000170113160710571015130 0ustar liggesusers\name{clickdist} \alias{clickdist} \title{Interactively Measure Distance} \description{ Measures the distance between two points which the user has clicked on. } \usage{ clickdist() } \value{ A single nonnegative number. } \details{ This function allows the user to measure the distance between two spatial locations, interactively, by clicking on the screen display. When \code{clickdist()} is called, the user is expected to click two points in the current graphics device. The distance between these points will be returned. This function uses the \R{} command \code{\link[graphics]{locator}} to input the mouse clicks. It only works on screen devices such as \sQuote{X11}, \sQuote{windows} and \sQuote{quartz}. } \seealso{ \code{\link[graphics]{locator}}, \code{\link{clickppp}}, \code{\link{clicklpp}}, \code{\link{clickpoly}}, \code{\link{clickbox}} } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{iplot} spatstat/man/Ksector.Rd0000644000176200001440000000556013160710571014600 0ustar liggesusers\name{Ksector} \alias{Ksector} \title{Sector K-function} \description{ A directional counterpart of Ripley's \eqn{K} function, in which pairs of points are counted only when the vector joining the pair happens to lie in a particular range of angles. } \usage{ Ksector(X, begin = 0, end = 360, \dots, units = c("degrees", "radians"), r = NULL, breaks = NULL, correction = c("border", "isotropic", "Ripley", "translate"), domain=NULL, ratio = FALSE, verbose=TRUE) } \arguments{ \item{X}{The observed point pattern, from which an estimate of \eqn{K(r)} will be computed. An object of class \code{"ppp"}, or data in any format acceptable to \code{\link{as.ppp}()}. } \item{begin,end}{ Numeric values giving the range of angles inside which points will be counted. Angles are measured in degrees (if \code{units="degrees"}, the default) or radians (if \code{units="radians"}) anti-clockwise from the positive \eqn{x}-axis. } \item{\dots}{Ignored.} \item{units}{ Units in which the angles \code{begin} and \code{end} are expressed. } \item{r}{ Optional. Vector of values for the argument \eqn{r} at which \eqn{K(r)} should be evaluated. Users are advised \emph{not} to specify this argument; there is a sensible default. } \item{breaks}{ This argument is for internal use only. } \item{correction}{ Optional. A character vector containing any selection of the options \code{"none"}, \code{"border"}, \code{"bord.modif"}, \code{"isotropic"}, \code{"Ripley"}, \code{"translate"}, \code{"translation"}, \code{"none"}, \code{"good"} or \code{"best"}. It specifies the edge correction(s) to be applied. Alternatively \code{correction="all"} selects all options. } \item{domain}{ Optional window. The first point \eqn{x_i}{x[i]} of each pair of points will be constrained to lie in \code{domain}. } \item{ratio}{ Logical. If \code{TRUE}, the numerator and denominator of each edge-corrected estimate will also be saved, for use in analysing replicated point patterns. } \item{verbose}{ Logical value indicating whether to print progress reports and warnings. } } \details{ This is a directional counterpart of Ripley's \eqn{K} function (see \code{\link{Kest}}) in which, instead of counting all pairs of points within a specified distance \eqn{r}, we count only the pairs \eqn{(x_i, x_j)}{x[i], x[j]} for which the vector \eqn{x_j - x_i}{x[j] - x[i]} falls in a particular range of angles. This can be used to evaluate evidence for anisotropy in the point pattern \code{X}. } \value{ An object of class \code{"fv"} containing the estimated function. } \seealso{ \code{\link{Kest}} } \examples{ K <- Ksector(swedishpines, 0, 90) plot(K) } \author{\adrian \rolf and \ege } \keyword{spatial} \keyword{nonparametric} spatstat/man/parameters.Rd0000644000176200001440000000266213160710621015325 0ustar liggesusers\name{parameters} \alias{parameters} \alias{parameters.dppm} \alias{parameters.kppm} \alias{parameters.ppm} \alias{parameters.profilepl} \alias{parameters.interact} \alias{parameters.fii} \title{ Extract Model Parameters in Understandable Form } \description{ Given a fitted model of some kind, this function extracts all the parameters needed to specify the model, and returns them as a list. } \usage{ parameters(model, \dots) \method{parameters}{dppm}(model, \dots) \method{parameters}{kppm}(model, \dots) \method{parameters}{ppm}(model, \dots) \method{parameters}{profilepl}(model, \dots) \method{parameters}{fii}(model, \dots) \method{parameters}{interact}(model, \dots) } \arguments{ \item{model}{ A fitted model of some kind. } \item{\dots}{ Arguments passed to methods. } } \details{ The argument \code{model} should be a fitted model of some kind. This function extracts all the parameters that would be needed to specify the model, and returns them as a list. The function \code{parameters} is generic, with methods for class \code{"ppm"}, \code{"kppm"}, \code{"dppm"} and \code{"profilepl"} and other classes. } \value{ A named list, whose format depends on the fitted model. } \author{ \spatstatAuthors } \seealso{ \code{\link{coef}} } \examples{ fit1 <- ppm(cells ~ x, Strauss(0.1)) parameters(fit1) fit2 <- kppm(redwood ~ x, "Thomas") parameters(fit2) } \keyword{spatial} \keyword{models} spatstat/man/summary.psp.Rd0000644000176200001440000000136613160710621015460 0ustar liggesusers\name{summary.psp} \alias{summary.psp} \title{Summary of a Line Segment Pattern Dataset} \description{ Prints a useful summary of a line segment pattern dataset. } \usage{ \method{summary}{psp}(object, \dots) } \arguments{ \item{object}{Line segment pattern (object of class \code{"psp"}).} \item{\dots}{Ignored.} } \details{ A useful summary of the line segment pattern \code{object} is printed. This is a method for the generic function \code{\link{summary}}. } \seealso{ \code{\link{summary}}, \code{\link{summary.owin}}, \code{\link{print.psp}} } \examples{ a <- psp(runif(10), runif(10), runif(10), runif(10), window=owin()) summary(a) # describes it } \author{\adrian and \rolf } \keyword{spatial} \keyword{methods} spatstat/man/is.connected.Rd0000644000176200001440000000304113160710621015526 0ustar liggesusers\name{is.connected} \Rdversion{1.1} \alias{is.connected} \alias{is.connected.default} \alias{is.connected.linnet} \title{ Determine Whether an Object is Connected } \description{ Determine whether an object is topologically connected. } \usage{ is.connected(X, \dots) \method{is.connected}{default}(X, \dots) \method{is.connected}{linnet}(X, \dots) } \arguments{ \item{X}{ A spatial object such as a pixel image (object of class \code{"im"}), a window (object of class \code{"owin"}) or a linear network (object of class \code{"linnet"}). } \item{\dots}{ Arguments passed to \code{\link{connected}} to determine the connected components. } } \details{ The command \code{is.connected(X)} returns \code{TRUE} if the object \code{X} consists of a single, topologically-connected piece, and returns \code{FALSE} if \code{X} consists of several pieces which are not joined together. The function \code{is.connected} is generic. The default method \code{is.connected.default} works for many classes of objects, including windows (class \code{"owin"}) and images (class \code{"im"}). There is a method for linear networks, \code{is.connected.linnet}, described here, and a method for point patterns described in \code{\link{is.connected.ppp}}. } \value{ A logical value. } \seealso{ \code{\link{connected}}, \code{\link{is.connected.ppp}}. } \examples{ d <- distmap(cells, dimyx=256) X <- levelset(d, 0.07) plot(X) is.connected(X) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{math} spatstat/man/rmh.default.Rd0000644000176200001440000006572313160710621015402 0ustar liggesusers\name{rmh.default} \alias{rmh.default} \title{Simulate Point Process Models using the Metropolis-Hastings Algorithm.} \description{ Generates a random point pattern, simulated from a chosen point process model, using the Metropolis-Hastings algorithm. } \usage{ \method{rmh}{default}(model, start=NULL, control=default.rmhcontrol(model), \dots, nsim=1, drop=TRUE, saveinfo=TRUE, verbose=TRUE, snoop=FALSE) } \arguments{ \item{model}{Data specifying the point process model that is to be simulated. } \item{start}{Data determining the initial state of the algorithm. } \item{control}{Data controlling the iterative behaviour and termination of the algorithm. } \item{\dots}{ Further arguments passed to \code{\link{rmhcontrol}} or to trend functions in \code{model}. } \item{nsim}{ Number of simulated point patterns that should be generated. } \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a single point pattern. } \item{saveinfo}{ Logical value indicating whether to save auxiliary information. } \item{verbose}{ Logical value indicating whether to print progress reports. } \item{snoop}{ Logical. If \code{TRUE}, activate the visual debugger. } } \value{ A point pattern (an object of class \code{"ppp"}, see \code{\link{ppp.object}}) or a list of point patterns. The returned value has an attribute \code{info} containing modified versions of the arguments \code{model}, \code{start}, and \code{control} which together specify the exact simulation procedure. The \code{info} attribute can be printed (and is printed automatically by \code{\link{summary.ppp}}). For computational efficiency, the \code{info} attribute can be omitted by setting \code{saveinfo=FALSE}. The value of \code{\link[base:Random]{.Random.seed}} at the start of the simulations is also saved and returned as an attribute \code{seed}. If the argument \code{track=TRUE} was given (see \code{\link{rmhcontrol}}), the transition history of the algorithm is saved, and returned as an attribute \code{history}. The transition history is a data frame containing a factor \code{proposaltype} identifying the proposal type (Birth, Death or Shift) and a logical vector \code{accepted} indicating whether the proposal was accepted. The data frame also has columns \code{numerator}, \code{denominator} which give the numerator and denominator of the Hastings ratio for the proposal. If the argument \code{nsave} was given (see \code{\link{rmhcontrol}}), the return value has an attribute \code{saved} which is a list of point patterns, containing the intermediate states of the algorithm. } \details{ This function generates simulated realisations from any of a range of spatial point processes, using the Metropolis-Hastings algorithm. It is the default method for the generic function \code{\link{rmh}}. This function executes a Metropolis-Hastings algorithm with birth, death and shift proposals as described in Geyer and \ifelse{latex}{\out{M\o ller}}{Moller} (1994). The argument \code{model} specifies the point process model to be simulated. It is either a list, or an object of class \code{"rmhmodel"}, with the following components: \describe{ \item{cif}{A character string specifying the choice of interpoint interaction for the point process. } \item{par}{ Parameter values for the conditional intensity function. } \item{w}{ (Optional) window in which the pattern is to be generated. An object of class \code{"owin"}, or data acceptable to \code{\link{as.owin}}. } \item{trend}{ Data specifying the spatial trend in the model, if it has a trend. This may be a function, a pixel image (of class \code{"im"}), (or a list of functions or images if the model is multitype). If the trend is a function or functions, any auxiliary arguments \code{...} to \code{rmh.default} will be passed to these functions, which should be of the form \code{function(x, y, ...)}. } \item{types}{ List of possible types, for a multitype point process. } } For full details of these parameters, see \code{\link{rmhmodel.default}}. The argument \code{start} determines the initial state of the Metropolis-Hastings algorithm. It is either \code{NULL}, or an object of class \code{"rmhstart"}, or a list with the following components: \describe{ \item{n.start}{ Number of points in the initial point pattern. A single integer, or a vector of integers giving the numbers of points of each type in a multitype point pattern. Incompatible with \code{x.start}. } \item{x.start}{ Initial point pattern configuration. Incompatible with \code{n.start}. \code{x.start} may be a point pattern (an object of class \code{"ppp"}), or data which can be coerced to this class by \code{\link{as.ppp}}, or an object with components \code{x} and \code{y}, or a two-column matrix. In the last two cases, the window for the pattern is determined by \code{model$w}. In the first two cases, if \code{model$w} is also present, then the final simulated pattern will be clipped to the window \code{model$w}. } } For full details of these parameters, see \code{\link{rmhstart}}. The third argument \code{control} controls the simulation procedure (including \emph{conditional simulation}), iterative behaviour, and termination of the Metropolis-Hastings algorithm. It is either \code{NULL}, or a list, or an object of class \code{"rmhcontrol"}, with components: \describe{ \item{p}{The probability of proposing a ``shift'' (as opposed to a birth or death) in the Metropolis-Hastings algorithm. } \item{q}{The conditional probability of proposing a death (rather than a birth) given that birth/death has been chosen over shift. } \item{nrep}{The number of repetitions or iterations to be made by the Metropolis-Hastings algorithm. It should be large. } \item{expand}{ Either a numerical expansion factor, or a window (object of class \code{"owin"}). Indicates that the process is to be simulated on a larger domain than the original data window \code{w}, then clipped to \code{w} when the algorithm has finished. The default is to expand the simulation window if the model is stationary and non-Poisson (i.e. it has no trend and the interaction is not Poisson) and not to expand in all other cases. If the model has a trend, then in order for expansion to be feasible, the trend must be given either as a function, or an image whose bounding box is large enough to contain the expanded window. } \item{periodic}{A logical scalar; if \code{periodic} is \code{TRUE} we simulate a process on the torus formed by identifying opposite edges of a rectangular window. } \item{ptypes}{A vector of probabilities (summing to 1) to be used in assigning a random type to a new point. } \item{fixall}{A logical scalar specifying whether to condition on the number of points of each type. } \item{nverb}{An integer specifying how often ``progress reports'' (which consist simply of the number of repetitions completed) should be printed out. If nverb is left at 0, the default, the simulation proceeds silently. } \item{x.cond}{If this argument is present, then \emph{conditional simulation} will be performed, and \code{x.cond} specifies the conditioning points and the type of conditioning. } \item{nsave,nburn}{ If these values are specified, then intermediate states of the simulation algorithm will be saved every \code{nsave} iterations, after an initial burn-in period of \code{nburn} iterations. } \item{track}{ Logical flag indicating whether to save the transition history of the simulations. } } For full details of these parameters, see \code{\link{rmhcontrol}}. The control parameters can also be given in the \code{\dots} arguments. } \section{Conditional Simulation}{ There are several kinds of conditional simulation. \itemize{ \item Simulation \emph{conditional upon the number of points}, that is, holding the number of points fixed. To do this, set \code{control$p} (the probability of a shift) equal to 1. The number of points is then determined by the starting state, which may be specified either by setting \code{start$n.start} to be a scalar, or by setting the initial pattern \code{start$x.start}. \item In the case of multitype processes, it is possible to simulate the model \emph{conditionally upon the number of points of each type}, i.e. holding the number of points of each type to be fixed. To do this, set \code{control$p} equal to 1 and \code{control$fixall} to be \code{TRUE}. The number of points is then determined by the starting state, which may be specified either by setting \code{start$n.start} to be an integer vector, or by setting the initial pattern \code{start$x.start}. \item Simulation \emph{conditional on the configuration observed in a sub-window}, that is, requiring that, inside a specified sub-window \eqn{V}, the simulated pattern should agree with a specified point pattern \eqn{y}.To do this, set \code{control$x.cond} to equal the specified point pattern \eqn{y}, making sure that it is an object of class \code{"ppp"} and that the window \code{Window(control$x.cond)} is the conditioning window \eqn{V}. \item Simulation \emph{conditional on the presence of specified points}, that is, requiring that the simulated pattern should include a specified set of points. This is simulation from the Palm distribution of the point process given a pattern \eqn{y}. To do this, set \code{control$x.cond} to be a \code{data.frame} containing the coordinates (and marks, if appropriate) of the specified points. } For further information, see \code{\link{rmhcontrol}}. Note that, when we simulate conditionally on the number of points, or conditionally on the number of points of each type, no expansion of the window is possible. } \section{Visual Debugger}{ If \code{snoop = TRUE}, an interactive debugger is activated. On the current plot device, the debugger displays the current state of the Metropolis-Hastings algorithm together with the proposed transition to the next state. Clicking on this graphical display (using the left mouse button) will re-centre the display at the clicked location. Surrounding this graphical display is an array of boxes representing different actions. Clicking on one of the action boxes (using the left mouse button) will cause the action to be performed. Debugger actions include: \itemize{ \item Zooming in or out \item Panning (shifting the field of view) left, right, up or down \item Jumping to the next iteration \item Skipping 10, 100, 1000, 10000 or 100000 iterations \item Jumping to the next Birth proposal (etc) \item Changing the fate of the proposal (i.e. changing whether the proposal is accepted or rejected) \item Dumping the current state and proposal to a file \item Printing detailed information at the terminal \item Exiting the debugger (so that the simulation algorithm continues without further interruption). } Right-clicking the mouse will also cause the debugger to exit. } \references{ Baddeley, A. and Turner, R. (2000) Practical maximum pseudolikelihood for spatial point patterns. \emph{Australian and New Zealand Journal of Statistics} \bold{42}, 283 -- 322. Diggle, P. J. (2003) \emph{Statistical Analysis of Spatial Point Patterns} (2nd ed.) Arnold, London. Diggle, P.J. and Gratton, R.J. (1984) Monte Carlo methods of inference for implicit statistical models. \emph{Journal of the Royal Statistical Society, series B} \bold{46}, 193 -- 212. Diggle, P.J., Gates, D.J., and Stibbard, A. (1987) A nonparametric estimator for pairwise-interaction point processes. Biometrika \bold{74}, 763 -- 770. Geyer, C.J. and \ifelse{latex}{\out{M\o ller}}{Moller}, J. (1994) Simulation procedures and likelihood inference for spatial point processes. \emph{Scandinavian Journal of Statistics} \bold{21}, 359--373. Geyer, C.J. (1999) Likelihood Inference for Spatial Point Processes. Chapter 3 in O.E. Barndorff-Nielsen, W.S. Kendall and M.N.M. Van Lieshout (eds) \emph{Stochastic Geometry: Likelihood and Computation}, Chapman and Hall / CRC, Monographs on Statistics and Applied Probability, number 80. Pages 79--140. } \section{Warnings}{ There is never a guarantee that the Metropolis-Hastings algorithm has converged to its limiting distribution. If \code{start$x.start} is specified then \code{expand} is set equal to 1 and simulation takes place in \code{Window(x.start)}. Any specified value for \code{expand} is simply ignored. The presence of both a component \code{w} of \code{model} and a non-null value for \code{Window(x.start)} makes sense ONLY if \code{w} is contained in \code{Window(x.start)}. For multitype processes make sure that, even if there is to be no trend corresponding to a particular type, there is still a component (a NULL component) for that type, in the list. } \seealso{ \code{\link{rmh}}, \code{\link{rmh.ppm}}, \code{\link{rStrauss}}, \code{\link{ppp}}, \code{\link{ppm}}, \code{\link{AreaInter}}, \code{\link{BadGey}}, \code{\link{DiggleGatesStibbard}}, \code{\link{DiggleGratton}}, \code{\link{Fiksel}}, \code{\link{Geyer}}, \code{\link{Hardcore}}, \code{\link{LennardJones}}, \code{\link{MultiHard}}, \code{\link{MultiStrauss}}, \code{\link{MultiStraussHard}}, \code{\link{PairPiece}}, \code{\link{Poisson}}, \code{\link{Softcore}}, \code{\link{Strauss}}, \code{\link{StraussHard}}, \code{\link{Triplets}} } \section{Other models}{ In theory, any finite point process model can be simulated using the Metropolis-Hastings algorithm, provided the conditional intensity is uniformly bounded. In practice, the list of point process models that can be simulated using \code{rmh.default} is limited to those that have been implemented in the package's internal C code. More options will be added in the future. Note that the \code{lookup} conditional intensity function permits the simulation (in theory, to any desired degree of approximation) of any pairwise interaction process for which the interaction depends only on the distance between the pair of points. } \section{Reproducible simulations}{ If the user wants the simulation to be exactly reproducible (e.g. for a figure in a journal article, where it is useful to have the figure consistent from draft to draft) then the state of the random number generator should be set before calling \code{rmh.default}. This can be done either by calling \code{\link[base:Random]{set.seed}} or by assigning a value to \code{\link[base:Random]{.Random.seed}}. In the examples below, we use \code{\link[base:Random]{set.seed}}. If a simulation has been performed and the user now wants to repeat it exactly, the random seed should be extracted from the simulated point pattern \code{X} by \code{seed <- attr(x, "seed")}, then assigned to the system random nunber state by \code{.Random.seed <- seed} before calling \code{rmh.default}. } \examples{ if(interactive()) { nr <- 1e5 nv <- 5000 ns <- 200 } else { nr <- 10 nv <- 5 ns <- 20 oldopt <- spatstat.options() spatstat.options(expand=1.1) } set.seed(961018) # Strauss process. mod01 <- list(cif="strauss",par=list(beta=2,gamma=0.2,r=0.7), w=c(0,10,0,10)) X1.strauss <- rmh(model=mod01,start=list(n.start=ns), control=list(nrep=nr,nverb=nv)) if(interactive()) plot(X1.strauss) # Strauss process, conditioning on n = 42: X2.strauss <- rmh(model=mod01,start=list(n.start=42), control=list(p=1,nrep=nr,nverb=nv)) # Tracking algorithm progress: X <- rmh(model=mod01,start=list(n.start=ns), control=list(nrep=nr, nsave=nr/5, nburn=nr/2, track=TRUE)) History <- attr(X, "history") Saved <- attr(X, "saved") head(History) plot(Saved) # Hard core process: mod02 <- list(cif="hardcore",par=list(beta=2,hc=0.7),w=c(0,10,0,10)) X3.hardcore <- rmh(model=mod02,start=list(n.start=ns), control=list(nrep=nr,nverb=nv)) if(interactive()) plot(X3.hardcore) # Strauss process equal to pure hardcore: mod02s <- list(cif="strauss",par=list(beta=2,gamma=0,r=0.7),w=c(0,10,0,10)) X3.strauss <- rmh(model=mod02s,start=list(n.start=ns), control=list(nrep=nr,nverb=nv)) # Strauss process in a polygonal window. x <- c(0.55,0.68,0.75,0.58,0.39,0.37,0.19,0.26,0.42) y <- c(0.20,0.27,0.68,0.99,0.80,0.61,0.45,0.28,0.33) mod03 <- list(cif="strauss",par=list(beta=2000,gamma=0.6,r=0.07), w=owin(poly=list(x=x,y=y))) X4.strauss <- rmh(model=mod03,start=list(n.start=ns), control=list(nrep=nr,nverb=nv)) if(interactive()) plot(X4.strauss) # Strauss process in a polygonal window, conditioning on n = 80. X5.strauss <- rmh(model=mod03,start=list(n.start=ns), control=list(p=1,nrep=nr,nverb=nv)) # Strauss process, starting off from X4.strauss, but with the # polygonal window replace by a rectangular one. At the end, # the generated pattern is clipped to the original polygonal window. xxx <- X4.strauss Window(xxx) <- as.owin(c(0,1,0,1)) X6.strauss <- rmh(model=mod03,start=list(x.start=xxx), control=list(nrep=nr,nverb=nv)) # Strauss with hardcore: mod04 <- list(cif="straush",par=list(beta=2,gamma=0.2,r=0.7,hc=0.3), w=c(0,10,0,10)) X1.straush <- rmh(model=mod04,start=list(n.start=ns), control=list(nrep=nr,nverb=nv)) # Another Strauss with hardcore (with a perhaps surprising result): mod05 <- list(cif="straush",par=list(beta=80,gamma=0.36,r=45,hc=2.5), w=c(0,250,0,250)) X2.straush <- rmh(model=mod05,start=list(n.start=ns), control=list(nrep=nr,nverb=nv)) # Pure hardcore (identical to X3.strauss). mod06 <- list(cif="straush",par=list(beta=2,gamma=1,r=1,hc=0.7), w=c(0,10,0,10)) X3.straush <- rmh(model=mod06,start=list(n.start=ns), control=list(nrep=nr,nverb=nv)) # Soft core: w <- c(0,10,0,10) mod07 <- list(cif="sftcr",par=list(beta=0.8,sigma=0.1,kappa=0.5), w=c(0,10,0,10)) X.sftcr <- rmh(model=mod07,start=list(n.start=ns), control=list(nrep=nr,nverb=nv)) if(interactive()) plot(X.sftcr) # Area-interaction process: mod42 <- rmhmodel(cif="areaint",par=list(beta=2,eta=1.6,r=0.7), w=c(0,10,0,10)) X.area <- rmh(model=mod42,start=list(n.start=ns), control=list(nrep=nr,nverb=nv)) if(interactive()) plot(X.area) # Triplets process modtrip <- list(cif="triplets",par=list(beta=2,gamma=0.2,r=0.7), w=c(0,10,0,10)) X.triplets <- rmh(model=modtrip, start=list(n.start=ns), control=list(nrep=nr,nverb=nv)) if(interactive()) plot(X.triplets) # Multitype Strauss: beta <- c(0.027,0.008) gmma <- matrix(c(0.43,0.98,0.98,0.36),2,2) r <- matrix(c(45,45,45,45),2,2) mod08 <- list(cif="straussm",par=list(beta=beta,gamma=gmma,radii=r), w=c(0,250,0,250)) X1.straussm <- rmh(model=mod08,start=list(n.start=ns), control=list(ptypes=c(0.75,0.25),nrep=nr,nverb=nv)) if(interactive()) plot(X1.straussm) # Multitype Strauss conditioning upon the total number # of points being 80: X2.straussm <- rmh(model=mod08,start=list(n.start=ns), control=list(p=1,ptypes=c(0.75,0.25),nrep=nr, nverb=nv)) # Conditioning upon the number of points of type 1 being 60 # and the number of points of type 2 being 20: X3.straussm <- rmh(model=mod08,start=list(n.start=c(60,20)), control=list(fixall=TRUE,p=1,ptypes=c(0.75,0.25), nrep=nr,nverb=nv)) # Multitype Strauss hardcore: rhc <- matrix(c(9.1,5.0,5.0,2.5),2,2) mod09 <- list(cif="straushm",par=list(beta=beta,gamma=gmma, iradii=r,hradii=rhc),w=c(0,250,0,250)) X.straushm <- rmh(model=mod09,start=list(n.start=ns), control=list(ptypes=c(0.75,0.25),nrep=nr,nverb=nv)) # Multitype Strauss hardcore with trends for each type: beta <- c(0.27,0.08) tr3 <- function(x,y){x <- x/250; y <- y/250; exp((6*x + 5*y - 18*x^2 + 12*x*y - 9*y^2)/6) } # log quadratic trend tr4 <- function(x,y){x <- x/250; y <- y/250; exp(-0.6*x+0.5*y)} # log linear trend mod10 <- list(cif="straushm",par=list(beta=beta,gamma=gmma, iradii=r,hradii=rhc),w=c(0,250,0,250), trend=list(tr3,tr4)) X1.straushm.trend <- rmh(model=mod10,start=list(n.start=ns), control=list(ptypes=c(0.75,0.25), nrep=nr,nverb=nv)) if(interactive()) plot(X1.straushm.trend) # Multitype Strauss hardcore with trends for each type, given as images: bigwin <- square(250) i1 <- as.im(tr3, bigwin) i2 <- as.im(tr4, bigwin) mod11 <- list(cif="straushm",par=list(beta=beta,gamma=gmma, iradii=r,hradii=rhc),w=bigwin, trend=list(i1,i2)) X2.straushm.trend <- rmh(model=mod11,start=list(n.start=ns), control=list(ptypes=c(0.75,0.25),expand=1, nrep=nr,nverb=nv)) # Diggle, Gates, and Stibbard: mod12 <- list(cif="dgs",par=list(beta=3600,rho=0.08),w=c(0,1,0,1)) X.dgs <- rmh(model=mod12,start=list(n.start=ns), control=list(nrep=nr,nverb=nv)) if(interactive()) plot(X.dgs) # Diggle-Gratton: mod13 <- list(cif="diggra", par=list(beta=1800,kappa=3,delta=0.02,rho=0.04), w=square(1)) X.diggra <- rmh(model=mod13,start=list(n.start=ns), control=list(nrep=nr,nverb=nv)) if(interactive()) plot(X.diggra) # Fiksel: modFik <- list(cif="fiksel", par=list(beta=180,r=0.15,hc=0.07,kappa=2,a= -1.0), w=square(1)) X.fiksel <- rmh(model=modFik,start=list(n.start=ns), control=list(nrep=nr,nverb=nv)) if(interactive()) plot(X.fiksel) # Geyer: mod14 <- list(cif="geyer",par=list(beta=1.25,gamma=1.6,r=0.2,sat=4.5), w=c(0,10,0,10)) X1.geyer <- rmh(model=mod14,start=list(n.start=ns), control=list(nrep=nr,nverb=nv)) if(interactive()) plot(X1.geyer) # Geyer; same as a Strauss process with parameters # (beta=2.25,gamma=0.16,r=0.7): mod15 <- list(cif="geyer",par=list(beta=2.25,gamma=0.4,r=0.7,sat=10000), w=c(0,10,0,10)) X2.geyer <- rmh(model=mod15,start=list(n.start=ns), control=list(nrep=nr,nverb=nv)) mod16 <- list(cif="geyer",par=list(beta=8.1,gamma=2.2,r=0.08,sat=3)) data(redwood) X3.geyer <- rmh(model=mod16,start=list(x.start=redwood), control=list(periodic=TRUE,nrep=nr,nverb=nv)) # Geyer, starting from the redwood data set, simulating # on a torus, and conditioning on n: X4.geyer <- rmh(model=mod16,start=list(x.start=redwood), control=list(p=1,periodic=TRUE,nrep=nr,nverb=nv)) # Lookup (interaction function h_2 from page 76, Diggle (2003)): r <- seq(from=0,to=0.2,length=101)[-1] # Drop 0. h <- 20*(r-0.05) h[r<0.05] <- 0 h[r>0.10] <- 1 mod17 <- list(cif="lookup",par=list(beta=4000,h=h,r=r),w=c(0,1,0,1)) X.lookup <- rmh(model=mod17,start=list(n.start=ns), control=list(nrep=nr,nverb=nv)) if(interactive()) plot(X.lookup) # Strauss with trend tr <- function(x,y){x <- x/250; y <- y/250; exp((6*x + 5*y - 18*x^2 + 12*x*y - 9*y^2)/6) } beta <- 0.3 gmma <- 0.5 r <- 45 modStr <- list(cif="strauss",par=list(beta=beta,gamma=gmma,r=r), w=square(250), trend=tr) X1.strauss.trend <- rmh(model=modStr,start=list(n.start=ns), control=list(nrep=nr,nverb=nv)) # Baddeley-Geyer r <- seq(0,0.2,length=8)[-1] gmma <- c(0.5,0.6,0.7,0.8,0.7,0.6,0.5) mod18 <- list(cif="badgey",par=list(beta=4000, gamma=gmma,r=r,sat=5), w=square(1)) X1.badgey <- rmh(model=mod18,start=list(n.start=ns), control=list(nrep=nr,nverb=nv)) mod19 <- list(cif="badgey", par=list(beta=4000, gamma=gmma,r=r,sat=1e4), w=square(1)) set.seed(1329) X2.badgey <- rmh(model=mod18,start=list(n.start=ns), control=list(nrep=nr,nverb=nv)) # Check: h <- ((prod(gmma)/cumprod(c(1,gmma)))[-8])^2 hs <- stepfun(r,c(h,1)) mod20 <- list(cif="lookup",par=list(beta=4000,h=hs),w=square(1)) set.seed(1329) X.check <- rmh(model=mod20,start=list(n.start=ns), control=list(nrep=nr,nverb=nv)) # X2.badgey and X.check will be identical. mod21 <- list(cif="badgey",par=list(beta=300,gamma=c(1,0.4,1), r=c(0.035,0.07,0.14),sat=5), w=square(1)) X3.badgey <- rmh(model=mod21,start=list(n.start=ns), control=list(nrep=nr,nverb=nv)) # Same result as Geyer model with beta=300, gamma=0.4, r=0.07, # sat = 5 (if seeds and control parameters are the same) # Or more simply: mod22 <- list(cif="badgey", par=list(beta=300,gamma=0.4,r=0.07, sat=5), w=square(1)) X4.badgey <- rmh(model=mod22,start=list(n.start=ns), control=list(nrep=nr,nverb=nv)) # Same again --- i.e. the BadGey model includes the Geyer model. # Illustrating scalability. \dontrun{ M1 <- rmhmodel(cif="strauss",par=list(beta=60,gamma=0.5,r=0.04),w=owin()) set.seed(496) X1 <- rmh(model=M1,start=list(n.start=300)) M2 <- rmhmodel(cif="strauss",par=list(beta=0.6,gamma=0.5,r=0.4), w=owin(c(0,10),c(0,10))) set.seed(496) X2 <- rmh(model=M2,start=list(n.start=300)) chk <- affine(X1,mat=diag(c(10,10))) all.equal(chk,X2,check.attributes=FALSE) # Under the default spatstat options the foregoing all.equal() # will yield TRUE. Setting spatstat.options(scalable=FALSE) and # re-running the code will reveal differences between X1 and X2. } if(!interactive()) spatstat.options(oldopt) } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat/man/pp3.Rd0000644000176200001440000000212113160710621013652 0ustar liggesusers\name{pp3} \Rdversion{1.1} \alias{pp3} \title{ Three Dimensional Point Pattern } \description{ Create a three-dimensional point pattern } \usage{ pp3(x, y, z, ...) } \arguments{ \item{x,y,z}{ Numeric vectors of equal length, containing Cartesian coordinates of points in three-dimensional space. } \item{\dots}{ Arguments passed to \code{\link{as.box3}} to determine the three-dimensional box in which the points have been observed. } } \details{ An object of class \code{"pp3"} represents a pattern of points in three-dimensional space. The points are assumed to have been observed by exhaustively inspecting a three-dimensional rectangular box. The boundaries of the box are included as part of the dataset. } \value{ Object of class \code{"pp3"} representing a three dimensional point pattern. Also belongs to class \code{"ppx"}. } \author{\adrian and \rolf } \seealso{ \code{\link{box3}}, \code{\link{print.pp3}}, \code{\link{ppx}} } \examples{ X <- pp3(runif(10), runif(10), runif(10), box3(c(0,1))) } \keyword{spatial} \keyword{datagen} spatstat/man/Extract.leverage.ppm.Rd0000644000176200001440000000446213160710621017160 0ustar liggesusers\name{Extract.leverage.ppm} \alias{[.leverage.ppm} \title{Extract Subset of Leverage Object} \description{ Extract a subset of a leverage map, or extract the leverage values at specified locations. } \usage{ \method{[}{leverage.ppm}(x, i, \dots, update=TRUE) } \arguments{ \item{x}{ A leverage object (of class \code{"leverage.ppm"}) computed by \code{\link{leverage.ppm}}. } \item{i}{ Subset index (passed to \code{\link{[.im}}). Either a spatial window (object of class \code{"owin"}) or a spatial point pattern (object of class \code{"ppp"}). } \item{\dots}{ Further arguments passed to \code{\link{[.im}}, especially the argument \code{drop}. } \item{update}{ Logical value indicating whether to update the internally-stored value of the mean leverage, by averaging over the specified subset. } } \value{ Another object of class \code{"leverage.ppm"}, or a vector of numeric values of leverage. } \details{ An object of class \code{"leverage.ppm"} contains the values of the leverage function for a point process model, computed by \code{\link{leverage.ppm}}. This function extracts a designated subset of the leverage values, either as another leverage object, or as a vector of numeric values. The function \code{[.leverage.ppm} is a method for \code{\link{[}} for the class \code{"leverage.ppm"}. The argument \code{i} should be either \itemize{ \item a spatial window (object of class \code{"owin"}) determining a region where the leverage map is required. The result will typically be another leverage map (object of class \code{leverage.ppm}). \item a spatial point pattern (object of class \code{"ppp"}) specifying locations at which the leverage values are required. The result will be a numeric vector. } The subset operator for images, \code{\link{[.im}}, is applied to the leverage map. If this yields a pixel image, then the result of \code{\link{[.leverage.ppm}} is another leverage object. Otherwise, a vector containing the numeric values of leverage is returned. } \seealso{ \code{\link{leverage.ppm}}. } \examples{ fit <- ppm(cells ~x) lev <- leverage(fit) b <- owin(c(0.1, 0.3), c(0.2, 0.4)) lev[b] lev[cells] } \author{ \spatstatAuthors } \keyword{spatial} \keyword{manip} spatstat/man/localK.Rd0000644000176200001440000001105413160710621014362 0ustar liggesusers\name{localK} \alias{localK} \alias{localL} \title{Neighbourhood density function} \description{ Computes the neighbourhood density function, a local version of the \eqn{K}-function or \eqn{L}-function, defined by Getis and Franklin (1987). } \usage{ localK(X, ..., correction = "Ripley", verbose = TRUE, rvalue=NULL) localL(X, ..., correction = "Ripley", verbose = TRUE, rvalue=NULL) } \arguments{ \item{X}{A point pattern (object of class \code{"ppp"}).} \item{\dots}{Ignored.} \item{correction}{String specifying the edge correction to be applied. Options are \code{"none"}, \code{"translate"}, \code{"translation"}, \code{"Ripley"}, \code{"isotropic"} or \code{"best"}. Only one correction may be specified. } \item{verbose}{Logical flag indicating whether to print progress reports during the calculation. } \item{rvalue}{Optional. A \emph{single} value of the distance argument \eqn{r} at which the function L or K should be computed. } } \details{ The command \code{localL} computes the \emph{neighbourhood density function}, a local version of the \eqn{L}-function (Besag's transformation of Ripley's \eqn{K}-function) that was proposed by Getis and Franklin (1987). The command \code{localK} computes the corresponding local analogue of the K-function. Given a spatial point pattern \code{X}, the neighbourhood density function \eqn{L_i(r)}{L[i](r)} associated with the \eqn{i}th point in \code{X} is computed by \deqn{ L_i(r) = \sqrt{\frac a {(n-1) \pi} \sum_j e_{ij}} }{ L[i](r) = sqrt( (a/((n-1)* pi)) * sum[j] e[i,j]) } where the sum is over all points \eqn{j \neq i}{j != i} that lie within a distance \eqn{r} of the \eqn{i}th point, \eqn{a} is the area of the observation window, \eqn{n} is the number of points in \code{X}, and \eqn{e_{ij}}{e[i,j]} is an edge correction term (as described in \code{\link{Kest}}). The value of \eqn{L_i(r)}{L[i](r)} can also be interpreted as one of the summands that contributes to the global estimate of the L function. By default, the function \eqn{L_i(r)}{L[i](r)} or \eqn{K_i(r)}{K[i](r)} is computed for a range of \eqn{r} values for each point \eqn{i}. The results are stored as a function value table (object of class \code{"fv"}) with a column of the table containing the function estimates for each point of the pattern \code{X}. Alternatively, if the argument \code{rvalue} is given, and it is a single number, then the function will only be computed for this value of \eqn{r}, and the results will be returned as a numeric vector, with one entry of the vector for each point of the pattern \code{X}. Inhomogeneous counterparts of \code{localK} and \code{localL} are computed by \code{localKinhom} and \code{localLinhom}. } \value{ If \code{rvalue} is given, the result is a numeric vector of length equal to the number of points in the point pattern. If \code{rvalue} is absent, the result is an object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. Essentially a data frame containing columns \item{r}{the vector of values of the argument \eqn{r} at which the function \eqn{K} has been estimated } \item{theo}{the theoretical value \eqn{K(r) = \pi r^2}{K(r) = pi * r^2} or \eqn{L(r)=r} for a stationary Poisson process } together with columns containing the values of the neighbourhood density function for each point in the pattern. Column \code{i} corresponds to the \code{i}th point. The last two columns contain the \code{r} and \code{theo} values. } \references{ Getis, A. and Franklin, J. (1987) Second-order neighbourhood analysis of mapped point patterns. \emph{Ecology} \bold{68}, 473--477. } \seealso{ \code{\link{Kest}}, \code{\link{Lest}}, \code{\link{localKinhom}}, \code{\link{localLinhom}}. } \examples{ data(ponderosa) X <- ponderosa # compute all the local L functions L <- localL(X) # plot all the local L functions against r plot(L, main="local L functions for ponderosa", legend=FALSE) # plot only the local L function for point number 7 plot(L, iso007 ~ r) # compute the values of L(r) for r = 12 metres L12 <- localL(X, rvalue=12) # Spatially interpolate the values of L12 # Compare Figure 5(b) of Getis and Franklin (1987) X12 <- X \%mark\% L12 Z <- Smooth(X12, sigma=5, dimyx=128) plot(Z, col=topo.colors(128), main="smoothed neighbourhood density") contour(Z, add=TRUE) points(X, pch=16, cex=0.5) } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat/man/rVarGamma.Rd0000644000176200001440000001473513160710621015043 0ustar liggesusers\name{rVarGamma} \alias{rVarGamma} \title{Simulate Neyman-Scott Point Process with Variance Gamma cluster kernel} \description{ Generate a random point pattern, a simulated realisation of the Neyman-Scott process with Variance Gamma (Bessel) cluster kernel. } \usage{ rVarGamma(kappa, nu, scale, mu, win = owin(), thresh = 0.001, nsim=1, drop=TRUE, saveLambda=FALSE, expand = NULL, ..., poisthresh=1e-6, saveparents=TRUE) } \arguments{ \item{kappa}{ Intensity of the Poisson process of cluster centres. A single positive number, a function, or a pixel image. } \item{nu}{ Shape parameter for the cluster kernel. A number greater than -1. } \item{scale}{ Scale parameter for cluster kernel. Determines the size of clusters. A positive number in the same units as the spatial coordinates. } \item{mu}{ Mean number of points per cluster (a single positive number) or reference intensity for the cluster points (a function or a pixel image). } \item{win}{ Window in which to simulate the pattern. An object of class \code{"owin"} or something acceptable to \code{\link{as.owin}}. } \item{thresh}{ Threshold relative to the cluster kernel value at the origin (parent location) determining when the cluster kernel will be treated as zero for simulation purposes. Will be overridden by argument \code{expand} if that is given. } \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a point pattern. } \item{saveLambda}{ Logical. If \code{TRUE} then the random intensity corresponding to the simulated parent points will also be calculated and saved, and returns as an attribute of the point pattern. } \item{expand}{ Numeric. Size of window expansion for generation of parent points. By default determined by calling \code{\link{clusterradius}} with the numeric threshold value given in \code{thresh}. } \item{\dots}{ Passed to \code{\link{clusterfield}} to control the image resolution when \code{saveLambda=TRUE} and to \code{\link{clusterradius}} when \code{expand} is missing or \code{NULL}. } \item{poisthresh}{ Numerical threshold below which the model will be treated as a Poisson process. See Details. } \item{saveparents}{ Logical value indicating whether to save the locations of the parent points as an attribute. } } \value{ A point pattern (an object of class \code{"ppp"}) if \code{nsim=1}, or a list of point patterns if \code{nsim > 1}. Additionally, some intermediate results of the simulation are returned as attributes of this point pattern (see \code{\link{rNeymanScott}}). Furthermore, the simulated intensity function is returned as an attribute \code{"Lambda"}, if \code{saveLambda=TRUE}. } \details{ This algorithm generates a realisation of the Neyman-Scott process with Variance Gamma (Bessel) cluster kernel, inside the window \code{win}. The process is constructed by first generating a Poisson point process of ``parent'' points with intensity \code{kappa}. Then each parent point is replaced by a random cluster of points, the number of points in each cluster being random with a Poisson (\code{mu}) distribution, and the points being placed independently and uniformly according to a Variance Gamma kernel. The shape of the kernel is determined by the dimensionless index \code{nu}. This is the parameter \eqn{\nu^\prime = \alpha/2-1}{nu' = alpha/2 - 1} appearing in equation (12) on page 126 of Jalilian et al (2013). The scale of the kernel is determined by the argument \code{scale}, which is the parameter \eqn{\eta}{eta} appearing in equations (12) and (13) of Jalilian et al (2013). It is expressed in units of length (the same as the unit of length for the window \code{win}). In this implementation, parent points are not restricted to lie in the window; the parent process is effectively the uniform Poisson process on the infinite plane. This model can be fitted to data by the method of minimum contrast, maximum composite likelihood or Palm likelihood using \code{\link{kppm}}. The algorithm can also generate spatially inhomogeneous versions of the cluster process: \itemize{ \item The parent points can be spatially inhomogeneous. If the argument \code{kappa} is a \code{function(x,y)} or a pixel image (object of class \code{"im"}), then it is taken as specifying the intensity function of an inhomogeneous Poisson process that generates the parent points. \item The offspring points can be inhomogeneous. If the argument \code{mu} is a \code{function(x,y)} or a pixel image (object of class \code{"im"}), then it is interpreted as the reference density for offspring points, in the sense of Waagepetersen (2006). } When the parents are homogeneous (\code{kappa} is a single number) and the offspring are inhomogeneous (\code{mu} is a function or pixel image), the model can be fitted to data using \code{\link{kppm}}, or using \code{\link{vargamma.estK}} or \code{\link{vargamma.estpcf}} applied to the inhomogeneous \eqn{K} function. If the pair correlation function of the model is very close to that of a Poisson process, deviating by less than \code{poisthresh}, then the model is approximately a Poisson process, and will be simulated as a Poisson process with intensity \code{kappa * mu}, using \code{\link{rpoispp}}. This avoids computations that would otherwise require huge amounts of memory. } \seealso{ \code{\link{rpoispp}}, \code{\link{rNeymanScott}}, \code{\link{kppm}}. \code{\link{vargamma.estK}}, \code{\link{vargamma.estpcf}}. } \examples{ # homogeneous X <- rVarGamma(30, 2, 0.02, 5) # inhomogeneous ff <- function(x,y){ exp(2 - 3 * abs(x)) } Z <- as.im(ff, W= owin()) Y <- rVarGamma(30, 2, 0.02, Z) YY <- rVarGamma(ff, 2, 0.02, 3) } \references{ Jalilian, A., Guan, Y. and Waagepetersen, R. (2013) Decomposition of variance for spatial Cox processes. \emph{Scandinavian Journal of Statistics} \bold{40}, 119-137. Waagepetersen, R. (2007) An estimating function approach to inference for inhomogeneous Neyman-Scott processes. \emph{Biometrics} \bold{63}, 252--258. } \author{Abdollah Jalilian and Rasmus Waagepetersen. Adapted for \pkg{spatstat} by \adrian } \keyword{spatial} \keyword{datagen} spatstat/man/headtail.Rd0000644000176200001440000000354213160710621014733 0ustar liggesusers\name{headtail} \alias{head.ppp} \alias{head.ppx} \alias{head.psp} \alias{head.tess} \alias{tail.ppp} \alias{tail.ppx} \alias{tail.psp} \alias{tail.tess} \title{ First or Last Part of a Spatial Pattern } \description{ Returns the first few elements (\code{head}) or the last few elements (\code{tail}) of a spatial pattern. } \usage{ \method{head}{ppp}(x, n = 6L, \dots) \method{head}{ppx}(x, n = 6L, \dots) \method{head}{psp}(x, n = 6L, \dots) \method{head}{tess}(x, n = 6L, \dots) \method{tail}{ppp}(x, n = 6L, \dots) \method{tail}{ppx}(x, n = 6L, \dots) \method{tail}{psp}(x, n = 6L, \dots) \method{tail}{tess}(x, n = 6L, \dots) } \arguments{ \item{x}{ A spatial pattern of geometrical figures, such as a spatial pattern of points (an object of class \code{"ppp"}, \code{"pp3"}, \code{"ppx"} or \code{"lpp"}) or a spatial pattern of line segments (an object of class \code{"psp"}) or a tessellation (object of class \code{"tess"}). } \item{n}{ Integer. The number of elements of the pattern that should be extracted. } \item{\dots}{ Ignored. } } \details{ These are methods for the generic functions \code{\link[utils]{head}} and \code{\link[utils]{tail}}. They extract the first or last \code{n} elements from \code{x} and return them as an object of the same kind as \code{x}. To inspect the spatial coordinates themselves, use \code{\link[utils]{View}(x)} or \code{head(as.data.frame(x))}. } \value{ An object of the same class as \code{x}. } \author{ \spatstatAuthors. } \seealso{ \code{\link[utils]{View}}, \code{\link[utils]{edit}}. Conversion to data frame: \code{\link{as.data.frame.ppp}}, \code{\link{as.data.frame.ppx}}, \code{\link{as.data.frame.psp}} } \examples{ head(cells) tail(as.psp(spiders), 10) head(dirichlet(cells), 4) } \keyword{spatial} \keyword{manip} spatstat/man/round.ppp.Rd0000644000176200001440000000204313160710621015100 0ustar liggesusers\name{round.ppp} \alias{round.ppp} \alias{round.pp3} \alias{round.ppx} \title{ Apply Numerical Rounding to Spatial Coordinates } \description{ Apply numerical rounding to the spatial coordinates of a point pattern. } \usage{ \method{round}{ppp}(x, digits = 0) \method{round}{pp3}(x, digits = 0) \method{round}{ppx}(x, digits = 0) } \arguments{ \item{x}{ A spatial point pattern in any dimension (object of class \code{"ppp"}, \code{"pp3"} or \code{"ppx"}). } \item{digits}{ integer indicating the number of decimal places. } } \details{ These functions are methods for the generic function \code{\link[base]{round}}. They apply numerical rounding to the spatial coordinates of the point pattern \code{x}. } \value{ A point pattern object, of the same class as \code{x}. } \author{ \adrian and \rolf } \seealso{ \code{\link{rounding}} to determine whether numbers have been rounded. \code{\link[base]{round}} in the Base package. } \examples{ round(cells, 1) } \keyword{spatial} \keyword{manip} spatstat/man/pool.fv.Rd0000644000176200001440000000317213160710621014542 0ustar liggesusers\name{pool.fv} \alias{pool.fv} \title{Pool Several Functions} \description{ Combine several summary functions into a single function. } \usage{ \method{pool}{fv}(..., weights=NULL, relabel=TRUE, variance=TRUE) } \arguments{ \item{\dots}{ Objects of class \code{"fv"}. } \item{weights}{ Optional numeric vector of weights for the functions. } \item{relabel}{ Logical value indicating whether the columns of the resulting function should be labelled to show that they were obtained by pooling. } \item{variance}{ Logical value indicating whether to compute the sample variance and related terms. } } \details{ The function \code{\link{pool}} is generic. This is the method for the class \code{"fv"} of summary functions. It is used to combine several estimates of the same function into a single function. Each of the arguments \code{\dots} must be an object of class \code{"fv"}. They must be compatible, in that they are estimates of the same function, and were computed using the same options. The sample mean and sample variance of the corresponding estimates will be computed. } \value{ An object of class \code{"fv"}. } \seealso{ \code{\link{pool}}, \code{\link{pool.anylist}}, \code{\link{pool.rat}} } \examples{ K <- lapply(waterstriders, Kest, correction="iso") Kall <- pool(K[[1]], K[[2]], K[[3]]) Kall <- pool(as.anylist(K)) plot(Kall, cbind(pooliso, pooltheo) ~ r, shade=c("loiso", "hiiso"), main="Pooled K function of waterstriders") } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{htest} \keyword{hplot} \keyword{iteration} spatstat/man/as.linnet.linim.Rd0000644000176200001440000000316113160710571016163 0ustar liggesusers\name{as.linnet.linim} \alias{as.linnet.lpp} \alias{as.linnet.linim} \alias{as.linnet.linfun} \alias{as.linnet.lintess} \title{ Extract Linear Network from Data on a Linear Network } \description{ Given some kind of data on a linear network, the command \code{as.linnet} extracts the linear network itself. } \usage{ \method{as.linnet}{linim}(X, \dots) \method{as.linnet}{linfun}(X, \dots) \method{as.linnet}{lintess}(X, \dots) \method{as.linnet}{lpp}(X, \dots, fatal=TRUE, sparse) } \arguments{ \item{X}{ Data on a linear network. A point pattern (class \code{"lpp"}), pixel image (class \code{"linim"}), function (class \code{"linfun"}) or tessellation (class \code{"lintess"}) on a linear network. } \item{\dots}{ Ignored. } \item{fatal}{ Logical value indicating whether data in the wrong format should lead to an error (\code{fatal=TRUE}) or a warning (\code{fatal=FALSE}). } \item{sparse}{ Logical value indicating whether to use a sparse matrix representation, as explained in \code{\link{linnet}}. Default is to keep the same representation as in \code{X}. } } \details{ These are methods for the generic \code{\link{as.linnet}} for various classes. The network on which the data are defined is extracted. } \value{ A linear network (object of class \code{"linnet"}). } \author{ \spatstatAuthors. } \seealso{ \code{\link{linnet}}, \code{\link{methods.linnet}}. } \examples{ # make some data xcoord <- linfun(function(x,y,seg,tp) { x }, simplenet) as.linnet(xcoord) X <- as.linim(xcoord) as.linnet(X) } \keyword{spatial} \keyword{manip} spatstat/man/pixellate.Rd0000644000176200001440000000323613160710621015147 0ustar liggesusers\name{pixellate} \Rdversion{1.1} \alias{pixellate} \title{ Convert Spatial Object to Pixel Image } \description{ Convert a spatial object to a pixel image by measuring the amount of stuff in each pixel. } \usage{ pixellate(x, ...) } \arguments{ \item{x}{ Spatial object to be converted. A point pattern (object of class \code{"ppp"}), a window (object of class \code{"owin"}), a line segment pattern (object of class \code{"psp"}), or some other suitable data. } \item{\dots}{ Arguments passed to methods. } } \details{ The function \code{pixellate} converts a geometrical object \code{x} into a pixel image, by measuring the \emph{amount} of \code{x} that is inside each pixel. If \code{x} is a point pattern, \code{pixellate(x)} counts the number of points of \code{x} falling in each pixel. If \code{x} is a window, \code{pixellate(x)} measures the area of intersection of each pixel with the window. The function \code{pixellate} is generic, with methods for point patterns (\code{\link{pixellate.ppp}}), windows (\code{\link{pixellate.owin}}), and line segment patterns (\code{\link{pixellate.psp}}), See the separate documentation for these methods. The related function \code{\link{as.im}} also converts \code{x} into a pixel image, but typically measures only the presence or absence of \code{x} inside each pixel. } \value{ A pixel image (object of class \code{"im"}). } \seealso{ \code{\link{pixellate.ppp}}, \code{\link{pixellate.owin}}, \code{\link{pixellate.psp}}, \code{\link{pixellate.linnet}}, \code{\link{as.im}} } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/layerplotargs.Rd0000644000176200001440000000331613160710621016047 0ustar liggesusers\name{layerplotargs} \alias{layerplotargs} \alias{layerplotargs<-} \title{ Extract or Replace the Plot Arguments of a Layered Object } \description{ Extracts or replaces the plot arguments of a layered object. } \usage{ layerplotargs(L) layerplotargs(L) <- value } \arguments{ \item{L}{ An object of class \code{"layered"} created by the function \code{\link{layered}}. } \item{value}{ Replacement value. A list, with the same length as \code{L}, whose elements are lists of plot arguments. } } \details{ These commands extract or replace the \code{plotargs} in a layered object. See \code{\link{layered}}. The replacement \code{value} should normally have the same length as the current value. However, it can also be a list with \emph{one} element which is a list of parameters. This will be replicated to the required length. For the assignment function \code{layerplotargs<-}, the argument \code{L} can be any spatial object; it will be converted to a \code{layered} object with a single layer. } \value{ \code{layerplotargs} returns a list of lists of plot arguments. \code{"layerplotargs<-"} returns the updated object of class \code{"layered"}. } \author{\adrian and \rolf } \seealso{ \code{\link{layered}}, \code{\link{methods.layered}}, \code{\link{[.layered}}. } \examples{ W <- square(2) L <- layered(W=W, X=cells) ## The following are equivalent layerplotargs(L) <- list(list(), list(pch=16)) layerplotargs(L)[[2]] <- list(pch=16) layerplotargs(L)$X <- list(pch=16) ## The following are equivalent layerplotargs(L) <- list(list(cex=2), list(cex=2)) layerplotargs(L) <- list(list(cex=2)) } \keyword{spatial} \keyword{hplot} spatstat/man/delaunay.Rd0000644000176200001440000000237213160710571014766 0ustar liggesusers\name{delaunay} \alias{delaunay} \title{Delaunay Triangulation of Point Pattern} \description{ Computes the Delaunay triangulation of a spatial point pattern. } \usage{ delaunay(X) } \arguments{ \item{X}{Spatial point pattern (object of class \code{"ppp"}).} } \details{ The Delaunay triangulation of a spatial point pattern \code{X} is defined as follows. First the Dirichlet/Voronoi tessellation of \code{X} computed; see \code{\link{dirichlet}}. Then two points of \code{X} are defined to be Delaunay neighbours if their Dirichlet/Voronoi tiles share a common boundary. Every pair of Delaunay neighbours is joined by a straight line. The result is a tessellation, consisting of disjoint triangles. The union of these triangles is the convex hull of \code{X}. } \value{ A tessellation (object of class \code{"tess"}). The window of the tessellation is the convex hull of \code{X}, not the original window of \code{X}. } \seealso{ \code{\link{tess}}, \code{\link{dirichlet}}, \code{\link{convexhull.xy}}, \code{\link{ppp}}, \code{\link{delaunayDistance}}, \code{\link{delaunayNetwork}} } \examples{ X <- runifpoint(42) plot(delaunay(X)) plot(X, add=TRUE) } \author{ \adrian \rolf and \ege } \keyword{spatial} \keyword{manip} spatstat/man/eroded.areas.Rd0000644000176200001440000000314713160710621015515 0ustar liggesusers\name{eroded.areas} \alias{eroded.areas} \title{Areas of Morphological Erosions} \description{ Computes the areas of successive morphological erosions of a window. } \usage{ eroded.areas(w, r, subset=NULL) } \arguments{ \item{w}{A window.} \item{r}{Numeric vector of radii at which erosions will be performed.} \item{subset}{ Optional window inside which the areas should be computed. } } \value{ Numeric vector, of the same length as \code{r}, giving the areas of the successive erosions. } \details{ This function computes the areas of the erosions of the window \code{w} by each of the radii \code{r[i]}. The morphological erosion of a set \eqn{W} by a distance \eqn{r > 0} is the subset consisting of all points \eqn{x \in W}{x in W} such that the distance from \eqn{x} to the boundary of \eqn{W} is greater than or equal to \eqn{r}. In other words it is the result of trimming a margin of width \eqn{r} off the set \eqn{W}. The argument \code{r} should be a vector of positive numbers. The argument \code{w} should be a window (an object of class \code{"owin"}, see \code{\link{owin.object}} for details) or can be given in any format acceptable to \code{\link{as.owin}()}. Unless \code{w} is a rectangle, the computation is performed using a pixel raster approximation. To compute the eroded window itself, use \code{\link{erosion}}. } \seealso{ \code{\link{owin}}, \code{\link{as.owin}}, \code{\link{erosion}} } \examples{ w <- owin(c(0,1),c(0,1)) a <- eroded.areas(w, seq(0.01,0.49,by=0.01)) } \author{\adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/plot.envelope.Rd0000644000176200001440000000273113160710621015751 0ustar liggesusers\name{plot.envelope} \alias{plot.envelope} \title{Plot a Simulation Envelope} \description{ Plot method for the class \code{"envelope"}. } \usage{ \method{plot}{envelope}(x, \dots, main) } \arguments{ \item{x}{ An object of class \code{"envelope"}, containing the variables to be plotted or variables from which the plotting coordinates can be computed. } \item{main}{Main title for plot.} \item{\dots}{ Extra arguments passed to \code{\link{plot.fv}}. } } \value{ Either \code{NULL}, or a data frame giving the meaning of the different line types and colours. } \details{ This is the \code{plot} method for the class \code{"envelope"} of simulation envelopes. Objects of this class are created by the command \code{\link{envelope}}. This plot method is currently identical to \code{\link{plot.fv}}. Its default behaviour is to shade the region between the upper and lower envelopes in a light grey colour. To suppress the shading and plot the upper and lower envelopes as curves, set \code{shade=NULL}. To change the colour of the shading, use the argument \code{shadecol} which is passed to \code{\link{plot.fv}}. See \code{\link{plot.fv}} for further information on how to control the plot. } \examples{ data(cells) E <- envelope(cells, Kest, nsim=19) plot(E) plot(E, sqrt(./pi) ~ r) } \seealso{ \code{\link{envelope}}, \code{\link{plot.fv}} } \author{\adrian and \rolf } \keyword{spatial} \keyword{hplot} spatstat/man/intensity.quadratcount.Rd0000644000176200001440000000424213160710621017715 0ustar liggesusers\name{intensity.quadratcount} \alias{intensity.quadratcount} \title{ Intensity Estimates Using Quadrat Counts } \description{ Uses quadrat count data to estimate the intensity of a point pattern in each tile of a tessellation, assuming the intensity is constant in each tile. } \usage{ \method{intensity}{quadratcount}(X, ..., image=FALSE) } \arguments{ \item{X}{ An object of class \code{"quadratcount"}. } \item{image}{ Logical value specifying whether to return a table of estimated intensities (the default) or a pixel image of the estimated intensity (\code{image=TRUE}). } \item{\dots}{ Arguments passed to \code{\link{as.mask}} to determine the resolution of the pixel image, if \code{image=TRUE}. } } \details{ This is a method for the generic function \code{\link{intensity}}. It computes an estimate of the intensity of a point pattern from its quadrat counts. The argument \code{X} should be an object of class \code{"quadratcount"}. It would have been obtained by applying the function \code{\link{quadratcount}} to a point pattern (object of class \code{"ppp"}). It contains the counts of the numbers of points of the point pattern falling in each tile of a tessellation. Using this information, \code{intensity.quadratcount} divides the quadrat counts by the tile areas, yielding the average density of points per unit area in each tile of the tessellation. If \code{image=FALSE} (the default), these intensity values are returned in a contingency table. Cells of the contingency table correspond to tiles of the tessellation. If \code{image=TRUE}, the estimated intensity function is returned as a pixel image. For each pixel, the pixel value is the estimated intensity in the tile which contains that pixel. } \value{ If \code{image=FALSE} (the default), a contingency table. If \code{image=TRUE}, a pixel image (object of class \code{"im"}). } \seealso{ \code{\link{intensity}}, \code{\link{quadratcount}} } \examples{ qa <- quadratcount(swedishpines, 4,3) qa intensity(qa) plot(intensity(qa, image=TRUE)) } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat/man/Extract.linim.Rd0000644000176200001440000000402113160710621015672 0ustar liggesusers\name{Extract.linim} \alias{[.linim} \title{Extract Subset of Pixel Image on Linear Network} \description{ Extract a subset of a pixel image on a linear network. } \usage{ \method{[}{linim}(x, i, \dots, drop=TRUE) } \arguments{ \item{x}{ A pixel image on a linear network (object of class \code{"linim"}). } \item{i}{ Spatial window defining the subregion. Either a spatial window (an object of class \code{"owin"}), or a logical-valued pixel image, or any type of index that applies to a matrix, or a point pattern (an object of class \code{"lpp"} or \code{"ppp"}), or something that can be converted to a point pattern by \code{\link{as.lpp}} (using the network on which \code{x} is defined). } \item{\dots}{Additional arguments passed to \code{[.im}.} \item{drop}{Logical value indicating whether \code{NA} values should be omitted from the result.} } \value{ Another pixel image on a linear network (object of class \code{"linim"}) or a vector of pixel values. } \details{ This function is a method for the subset operator \code{"["} for pixel images on linear networks (objects of class \code{"linim"}). The pixel image \code{x} will be restricted to the domain specified by \code{i}. Pixels outside the domain of \code{x} are assigned the value \code{NA}; if \code{drop=TRUE} (the default) such \code{NA} values are deleted from the result; if \code{drop=FALSE}, then \code{NA} values are retained. If \code{i} is a window (or a logical-valued pixel image) then \code{x[i]} is another pixel image of class \code{"linim"}, representing the restriction of \code{x} to the spatial domain specified by \code{i}. If \code{i} is a point pattern, then \code{x[i]} is the vector of pixel values of \code{x} at the locations specified by \code{i}. } \examples{ M <- as.mask.psp(as.psp(simplenet)) Z <- as.im(function(x,y){x}, W=M) Y <- linim(simplenet, Z) X <- runiflpp(4, simplenet) Y[X] Y[square(c(0.3, 0.6))] } \author{ \adrian } \keyword{spatial} \keyword{manip} spatstat/man/harmonise.msr.Rd0000644000176200001440000000173313160710621015745 0ustar liggesusers\name{harmonise.msr} \alias{harmonise.msr} \title{Make Measures Compatible} \description{ Convert several measures to a common quadrature scheme } \usage{ \method{harmonise}{msr}(\dots) } \arguments{ \item{\dots}{ Any number of measures (objects of class \code{"msr"}). } } \details{ This function makes any number of measures compatible, by converting them all to a common quadrature scheme. The command \code{\link{harmonise}} is generic. This is the method for objects of class \code{"msr"}. } \value{ A list, of length equal to the number of arguments \code{\dots}, whose entries are measures. } \author{ \spatstatAuthors. } \examples{ fit1 <- ppm(cells ~ x) fit2 <- ppm(rpoispp(ex=cells) ~ x) m1 <- residuals(fit1) m2 <- residuals(fit2) harmonise(m1, m2) s1 <- residuals(fit1, type="score") s2 <- residuals(fit2, type="score") harmonise(s1, s2) } \seealso{ \code{\link{harmonise}}, \code{\link{msr}} } \keyword{spatial} \keyword{manip} spatstat/man/nncross.pp3.Rd0000644000176200001440000001456213160710621015352 0ustar liggesusers\name{nncross.pp3} \alias{nncross.pp3} \title{Nearest Neighbours Between Two Patterns in 3D} \description{ Given two point patterns \code{X} and \code{Y} in three dimensions, finds the nearest neighbour in \code{Y} of each point of \code{X}. } \usage{ \method{nncross}{pp3}(X, Y, iX=NULL, iY=NULL, what = c("dist", "which"), \dots, k = 1, sortby=c("range", "var", "x", "y", "z"), is.sorted.X = FALSE, is.sorted.Y = FALSE) } \arguments{ \item{X,Y}{Point patterns in three dimensions (objects of class \code{"pp3"}).} \item{iX, iY}{Optional identifiers, used to determine whether a point in \code{X} is identical to a point in \code{Y}. See Details. } \item{what}{ Character string specifying what information should be returned. Either the nearest neighbour distance (\code{"dist"}), the identifier of the nearest neighbour (\code{"which"}), or both. } \item{k}{ Integer, or integer vector. The algorithm will compute the distance to the \code{k}th nearest neighbour. } \item{sortby}{ Determines which coordinate to use to sort the point patterns. See Details. } \item{is.sorted.X, is.sorted.Y}{ Logical values attesting whether the point patterns \code{X} and \code{Y} have been sorted. See Details. } \item{\dots}{Ignored.} } \details{ Given two point patterns \code{X} and \code{Y} in three dimensions, this function finds, for each point of \code{X}, the nearest point of \code{Y}. The distance between these points is also computed. If the argument \code{k} is specified, then the \code{k}-th nearest neighbours will be found. The return value is a data frame, with rows corresponding to the points of \code{X}. The first column gives the nearest neighbour distances (i.e. the \code{i}th entry is the distance from the \code{i}th point of \code{X} to the nearest element of \code{Y}). The second column gives the indices of the nearest neighbours (i.e.\ the \code{i}th entry is the index of the nearest element in \code{Y}.) If \code{what="dist"} then only the vector of distances is returned. If \code{what="which"} then only the vector of indices is returned. The argument \code{k} may be an integer or an integer vector. If it is a single integer, then the \code{k}-th nearest neighbours are computed. If it is a vector, then the \code{k[i]}-th nearest neighbours are computed for each entry \code{k[i]}. For example, setting \code{k=1:3} will compute the nearest, second-nearest and third-nearest neighbours. The result is a data frame. Note that this function is not symmetric in \code{X} and \code{Y}. To find the nearest neighbour in \code{X} of each point in \code{Y}, use \code{nncross(Y,X)}. The arguments \code{iX} and \code{iY} are used when the two point patterns \code{X} and \code{Y} have some points in common. In this situation \code{nncross(X, Y)} would return some zero distances. To avoid this, attach a unique integer identifier to each point, such that two points are identical if their identifying numbers are equal. Let \code{iX} be the vector of identifier values for the points in \code{X}, and \code{iY} the vector of identifiers for points in \code{Y}. Then the code will only compare two points if they have different values of the identifier. See the Examples. } \section{Sorting data and pre-sorted data}{ Read this section if you care about the speed of computation. For efficiency, the algorithm sorts both the point patterns \code{X} and \code{Y} into increasing order of the \eqn{x} coordinate, or both into increasing order of the \eqn{y} coordinate, or both into increasing order of the \eqn{z} coordinate. Sorting is only an intermediate step; it does not affect the output, which is always given in the same order as the original data. By default (if \code{sortby="range"}), the sorting will occur on the coordinate that has the largest range of values (according to the frame of the enclosing window of \code{Y}). If \code{sortby = "var"}), sorting will occur on the coordinate that has the greater variance (in the pattern \code{Y}). Setting \code{sortby="x"} or \code{sortby = "y"} or \code{sortby = "z"} will specify that sorting should occur on the \eqn{x}, \eqn{y} or \eqn{z} coordinate, respectively. If the point pattern \code{X} is already sorted, then the corresponding argument \code{is.sorted.X} should be set to \code{TRUE}, and \code{sortby} should be set equal to \code{"x"}, \code{"y"} or \code{"z"} to indicate which coordinate is sorted. Similarly if \code{Y} is already sorted, then \code{is.sorted.Y} should be set to \code{TRUE}, and \code{sortby} should be set equal to \code{"x"}, \code{"y"} or \code{"z"} to indicate which coordinate is sorted. If both \code{X} and \code{Y} are sorted \emph{on the same coordinate axis} then both \code{is.sorted.X} and \code{is.sorted.Y} should be set to \code{TRUE}, and \code{sortby} should be set equal to \code{"x"}, \code{"y"} or \code{"z"} to indicate which coordinate is sorted. } \value{ A data frame, or a vector if the data frame would contain only one column. By default (if \code{what=c("dist", "which")} and \code{k=1}) a data frame with two columns: \item{dist}{Nearest neighbour distance} \item{which}{Nearest neighbour index in \code{Y}} If \code{what="dist"} and \code{k=1}, a vector of nearest neighbour distances. If \code{what="which"} and \code{k=1}, a vector of nearest neighbour indices. If \code{k} is specified, the result is a data frame with columns containing the \code{k}-th nearest neighbour distances and/or nearest neighbour indices. } \seealso{ \code{\link{nndist}} for nearest neighbour distances in a single point pattern. } \examples{ # two different point patterns X <- pp3(runif(10), runif(10), runif(10), box3(c(0,1))) Y <- pp3(runif(20), runif(20), runif(20), box3(c(0,1))) N <- nncross(X,Y)$which N <- nncross(X,Y, what="which") #faster # note that length(N) = 10 # k-nearest neighbours N3 <- nncross(X, Y, k=1:3) # two patterns with some points in common Z <- pp3(runif(20), runif(20), runif(20), box3(c(0,1))) X <- Z[1:15] Y <- Z[10:20] iX <- 1:15 iY <- 10:20 N <- nncross(X,Y, iX, iY, what="which") } \author{ \adrian , \rolf , and Jens Oehlschlaegel } \keyword{spatial} \keyword{math} spatstat/man/diameter.boxx.Rd0000644000176200001440000000346113160710571015735 0ustar liggesusers\name{diameter.boxx} \Rdversion{1.1} \alias{diameter.boxx} \alias{volume.boxx} \alias{shortside.boxx} \alias{sidelengths.boxx} \alias{eroded.volumes.boxx} \title{ Geometrical Calculations for Multi-Dimensional Box } \description{ Calculates the volume, diameter, shortest side, side lengths, or eroded volume of a multi-dimensional box. } \usage{ \method{diameter}{boxx}(x) \method{volume}{boxx}(x) \method{shortside}{boxx}(x) \method{sidelengths}{boxx}(x) \method{eroded.volumes}{boxx}(x, r) } \arguments{ \item{x}{ Multi-dimensional box (object of class \code{"boxx"}). } \item{r}{ Numeric value or vector of numeric values for which eroded volumes should be calculated. } } \details{ \code{diameter.boxx}, \code{volume.boxx} and \code{shortside.boxx} compute the diameter, volume and shortest side length of the box. \code{sidelengths.boxx} returns the lengths of each side of the box. \code{eroded.volumes.boxx} computes, for each entry \code{r[i]}, the volume of the smaller box obtained by removing a slab of thickness \code{r[i]} from each face of the box. This smaller box is the subset consisting of points that lie at least \code{r[i]} units away from the boundary of the box. } \value{ For \code{diameter.boxx}, \code{shortside.boxx} and \code{volume.boxx}, a single numeric value. For \code{sidelengths.boxx}, a numeric vector of length equal to the number of spatial dimensions. For \code{eroded.volumes.boxx}, a numeric vector of the same length as \code{r}. } \author{\adrian and \rolf } \seealso{ \code{\link{boxx}} } \examples{ X <- boxx(c(0,10),c(0,10),c(0,5),c(0,2)) diameter(X) volume(X) shortside(X) sidelengths(X) hd <- shortside(X)/2 eroded.volumes(X, seq(0,hd, length=10)) } \keyword{spatial} \keyword{math} spatstat/man/split.ppx.Rd0000644000176200001440000000734313160710621015124 0ustar liggesusers\name{split.ppx} \alias{split.ppx} \title{Divide Multidimensional Point Pattern into Sub-patterns} \description{ Divides a multidimensional point pattern into several sub-patterns, according to their marks, or according to any user-specified grouping. } \usage{ \method{split}{ppx}(x, f = marks(x), drop=FALSE, un=NULL, \dots) } \arguments{ \item{x}{ A multi-dimensional point pattern. An object of class \code{"ppx"}. } \item{f}{ Data determining the grouping. Either a factor, or the name of one of the columns of marks. } \item{drop}{ Logical. Determines whether empty groups will be deleted. } \item{un}{ Logical. Determines whether the resulting subpatterns will be unmarked (i.e. whether marks will be removed from the points in each subpattern). } \item{\dots}{ Other arguments are ignored. } } \value{ A list of point patterns. The components of the list are named by the levels of \code{f}. The list also has the class \code{"splitppx"} and \code{"anylist"}. } \details{ The generic command \code{\link[base]{split}} allows a dataset to be separated into subsets according to the value of a grouping variable. The function \code{split.ppx} is a method for the generic \code{\link[base]{split}} for the class \code{"ppx"} of multidimensional point patterns. It divides up the points of the point pattern \code{x} into several sub-patterns according to the values of \code{f}. The result is a list of point patterns. The argument \code{f} may be \itemize{ \item a factor, of length equal to the number of points in \code{x}. The levels of \code{f} determine the destination of each point in \code{x}. The \code{i}th point of \code{x} will be placed in the sub-pattern \code{split.ppx(x)$l} where \code{l = f[i]}. \item a character string, matching the name of one of the columns of marks, if \code{marks(x)} is a data frame. This column should be a factor. } If \code{f} is missing, then it will be determined by the marks of the point pattern. The pattern \code{x} can be either \itemize{ \item a multitype point pattern (a marked point pattern whose marks vector is a factor). Then \code{f} is taken to be the marks vector. The effect is that the points of each type are separated into different point patterns. \item a marked point pattern with a data frame or hyperframe of marks, containing at least one column that is a factor. The first such column will be used to determine the splitting factor \code{f}. } Some of the sub-patterns created by the split may be empty. If \code{drop=TRUE}, then empty sub-patterns will be deleted from the list. If \code{drop=FALSE} then they are retained. The argument \code{un} determines how to handle marks in the case where \code{x} is a marked point pattern. If \code{un=TRUE} then the marks of the points will be discarded when they are split into groups, while if \code{un=FALSE} then the marks will be retained. If \code{f} and \code{un} are both missing, then the default is \code{un=TRUE} for multitype point patterns and \code{un=FALSE} for marked point patterns with a data frame of marks. The result of \code{split.ppx} has class \code{"splitppx"} and \code{"anylist"}. There are methods for \code{print}, \code{summary} and \code{plot}. } \seealso{ \code{\link{ppx}}, \code{\link{plot.anylist}} } \examples{ df <- data.frame(x=runif(4),y=runif(4),t=runif(4), age=rep(c("old", "new"), 2), size=runif(4)) X <- ppx(data=df, coord.type=c("s","s","t","m","m")) X split(X) } \author{\adrian and \rolf } \keyword{spatial} \keyword{methods} \keyword{manip} spatstat/man/branchlabelfun.Rd0000644000176200001440000000302013160710571016121 0ustar liggesusers\name{branchlabelfun} \alias{branchlabelfun} \title{ Tree Branch Membership Labelling Function } \description{ Creates a function which returns the tree branch membership label for any location on a linear network. } \usage{ branchlabelfun(L, root = 1) } \arguments{ \item{L}{ Linear network (object of class \code{"linnet"}). The network must have no loops. } \item{root}{ Root of the tree. An integer index identifying which point in \code{vertices(L)} is the root of the tree. } } \details{ The linear network \code{L} must be an acyclic graph (i.e. must not contain any loops) so that it can be interpreted as a tree. The result of \code{f <- branchlabelfun(L, root)} is a function \code{f} which gives, for each location on the linear network \code{L}, the tree branch label at that location. Tree branch labels are explained in \code{\link{treebranchlabels}}. The result \code{f} also belongs to the class \code{"linfun"}. It can be called using several different kinds of data, as explained in the help for \code{\link{linfun}}. The values of the function are character strings. } \value{ A function (of class \code{"linfun"}). } \author{ \adrian \rolf and \ege } \seealso{ \code{\link{treebranchlabels}}, \code{\link{linfun}} } \examples{ # make a simple tree m <- simplenet$m m[8,10] <- m[10,8] <- FALSE L <- linnet(vertices(simplenet), m) # make function f <- branchlabelfun(L, 1) plot(f) X <- runiflpp(5, L) f(X) } \keyword{spatial} \keyword{math} spatstat/man/methods.units.Rd0000644000176200001440000000416713160710621015770 0ustar liggesusers\name{methods.units} \Rdversion{1.1} \alias{methods.units} %DoNotExport \alias{print.units} \alias{summary.units} \alias{rescale.units} \alias{compatible.units} \title{ Methods for Units } \description{ Methods for class \code{"units"}. } \usage{ \method{print}{units}(x, ...) \method{summary}{units}(object, ...) \method{rescale}{units}(X, s, unitname) \method{compatible}{units}(A,B, ..., coerce=TRUE) } \arguments{ \item{x,X,A,B,object}{ Objects of class \code{"units"} representing units of length. } \item{s}{Conversion factor: the new units are \code{s} times the old units.} \item{\dots}{ Other arguments. For \code{print.units} these arguments are passed to \code{print.default}. For \code{summary.units} they are ignored. For \code{compatible.units} these arguments are other objects of class \code{"units"}. } \item{coerce}{ Logical. If \code{TRUE}, a null unit of length is compatible with any non-null unit. } \item{unitname}{Optional new name for the unit. If present, this overrides the rescaling operation and simply substitutes the new name for the old one.} } \details{ These are methods for the generic functions \code{\link{print}}, \code{\link{summary}}, \code{\link{rescale}} and \code{\link{compatible}} for the class \code{"units"}. An object of class \code{"units"} represents a unit of length. The \code{print} method prints a description of the unit of length, and the \code{summary} method gives a more detailed description. The \code{rescale} method changes the unit of length by rescaling it. The \code{compatible} method tests whether two or more units of length are compatible. } \value{ For \code{print.units} the value is \code{NULL}. For \code{summary.units} the value is an object of class \code{summary.units} (with its own print method). For \code{rescale.units} the value is another object of class \code{"units"}. For \code{compatible.units} the result is logical. } \author{\adrian and \rolf } \seealso{ \code{\link{box3}}, \code{\link{print}}, \code{\link{unitname}} } \keyword{spatial} \keyword{methods} spatstat/man/panel.contour.Rd0000644000176200001440000000420113160710621015740 0ustar liggesusers\name{panel.contour} \alias{panel.contour} \alias{panel.image} \alias{panel.histogram} \title{ Panel Plots using Colour Image or Contour Lines } \description{ These functions can be passed to \code{\link[graphics]{pairs}} or \code{\link[graphics]{coplot}} to determine what kind of plotting is done in each panel of a multi-panel graphical display. } \usage{ panel.contour(x, y, ..., sigma = NULL) panel.image(x, y, ..., sigma = NULL) panel.histogram(x, ...) } \arguments{ \item{x,y}{ Coordinates of points in a scatterplot. } \item{\dots}{ Extra graphics arguments, passed to \code{\link{contour.im}}, \code{\link{plot.im}} or \code{\link[graphics]{rect}}, respectively, to control the appearance of the panel. } \item{sigma}{ Bandwidth of kernel smoother, on a scale where \eqn{x} and \eqn{y} range between 0 and 1. } } \details{ These functions can serve as one of the arguments \code{panel}, \code{lower.panel}, \code{upper.panel}, \code{diag.panel} passed to graphics commands like \code{\link[graphics]{pairs}} or \code{\link[graphics]{coplot}}, to determine what kind of plotting is done in each panel of a multi-panel graphical display. In particular they work with \code{\link{pairs.im}}. The functions \code{panel.contour} and \code{panel.contour} are suitable for the off-diagonal plots which involve two datasets \code{x} and \code{y}. They first rescale \code{x} and \code{y} to the unit square, then apply kernel smoothing with bandwidth \code{sigma} using \code{\link{density.ppp}}. Then \code{panel.contour} draws a contour plot while \code{panel.image} draws a colour image. The function \code{panel.histogram} is suitable for the diagonal plots which involve a single dataset \code{x}. It displays a histogram of the data. } \value{ Null. } \author{\adrian \rolf and \ege } \seealso{ \code{\link{pairs.im}}, \code{\link{pairs.default}}, \code{\link{panel.smooth}} } \examples{ with(bei.extra, pairs(grad, elev, panel = panel.contour, diag.panel = panel.histogram)) } \keyword{spatial} \keyword{hplot} spatstat/man/rpoint.Rd0000644000176200001440000001003613160710621014467 0ustar liggesusers\name{rpoint} \alias{rpoint} \title{Generate N Random Points} \description{ Generate a random point pattern containing \eqn{n} independent, identically distributed random points with any specified distribution. } \usage{ rpoint(n, f, fmax=NULL, win=unit.square(), \dots, giveup=1000, verbose=FALSE, nsim=1, drop=TRUE) } \arguments{ \item{n}{ Number of points to generate. } \item{f}{ The probability density of the points, possibly un-normalised. Either a constant, a function \code{f(x,y,...)}, or a pixel image object. } \item{fmax}{ An upper bound on the values of \code{f}. If missing, this number will be estimated. } \item{win}{ Window in which to simulate the pattern. Ignored if \code{f} is a pixel image. } \item{\dots}{ Arguments passed to the function \code{f}. } \item{giveup}{ Number of attempts in the rejection method after which the algorithm should stop trying to generate new points. } \item{verbose}{ Flag indicating whether to report details of performance of the simulation algorithm. } \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a point pattern. } } \value{ A point pattern (an object of class \code{"ppp"}) if \code{nsim=1}, or a list of point patterns if \code{nsim > 1}. } \details{ This function generates \code{n} independent, identically distributed random points with common probability density proportional to \code{f}. The argument \code{f} may be \describe{ \item{a numerical constant:}{ uniformly distributed random points will be generated. } \item{a function:}{random points will be generated in the window \code{win} with probability density proportional to \code{f(x,y,...)} where \code{x} and \code{y} are the cartesian coordinates. The function \code{f} must accept two \emph{vectors} of coordinates \code{x,y} and return the corresponding vector of function values. Additional arguments \code{...} of any kind may be passed to the function. } \item{a pixel image:}{if \code{f} is a pixel image object of class \code{"im"} (see \code{\link{im.object}}) then random points will be generated in the window of this pixel image, with probability density proportional to the pixel values of \code{f}. } } The algorithm is as follows: \itemize{ \item If \code{f} is a constant, we invoke \code{\link{runifpoint}}. \item If \code{f} is a function, then we use the rejection method. Proposal points are generated from the uniform distribution. A proposal point \eqn{(x,y)} is accepted with probability \code{f(x,y,...)/fmax} and otherwise rejected. The algorithm continues until \code{n} points have been accepted. It gives up after \code{giveup * n} proposals if there are still fewer than \code{n} points. \item If \code{f} is a pixel image, then a random sequence of pixels is selected (using \code{\link{sample}}) with probabilities proportional to the pixel values of \code{f}. Then for each pixel in the sequence we generate a uniformly distributed random point in that pixel. } The algorithm for pixel images is more efficient than that for functions. } \seealso{ \code{\link{ppp.object}}, \code{\link{owin.object}}, \code{\link{runifpoint}} } \examples{ # 100 uniform random points in the unit square X <- rpoint(100) # 100 random points with probability density proportional to x^2 + y^2 X <- rpoint(100, function(x,y) { x^2 + y^2}, 1) # `fmax' may be omitted X <- rpoint(100, function(x,y) { x^2 + y^2}) # irregular window data(letterR) X <- rpoint(100, function(x,y) { x^2 + y^2}, win=letterR) # make a pixel image Z <- setcov(letterR) # 100 points with density proportional to pixel values X <- rpoint(100, Z) } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat/man/is.rectangle.Rd0000644000176200001440000000152113160710621015531 0ustar liggesusers\name{is.rectangle} \alias{is.rectangle} \alias{is.polygonal} \alias{is.mask} \title{Determine Type of Window} \description{ Determine whether a window is a rectangle, a polygonal region, or a binary mask. } \usage{ is.rectangle(w) is.polygonal(w) is.mask(w) } \arguments{ \item{w}{ Window to be inspected. An object of class \code{"owin"}. } } \value{ Logical value, equal to \code{TRUE} if \code{w} is a window of the specified type. } \details{ These simple functions determine whether a window \code{w} (object of class \code{"owin"}) is a rectangle (\code{is.rectangle(w) = TRUE}), a domain with polygonal boundary (\code{is.polygonal(w) = TRUE}), or a binary pixel mask (\code{is.mask(w) = TRUE}). } \seealso{ \code{\link{owin}} } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/with.hyperframe.Rd0000644000176200001440000000504713160710621016276 0ustar liggesusers\name{with.hyperframe} \alias{with.hyperframe} \title{Evaluate an Expression in Each Row of a Hyperframe} \description{ An expression, involving the names of columns in a hyperframe, is evaluated separately for each row of the hyperframe. } \usage{ \method{with}{hyperframe}(data, expr, ..., simplify = TRUE, ee = NULL, enclos=NULL) } \arguments{ \item{data}{A hyperframe (object of class \code{"hyperframe"}) containing data. } \item{expr}{An \R language expression to be evaluated.} \item{\dots}{Ignored.} \item{simplify}{ Logical. If \code{TRUE}, the return value will be simplified to a vector whenever possible. } \item{ee}{ Alternative form of \code{expr}, as an object of class \code{"expression"}. } \item{enclos}{ An environment in which to search for objects that are not found in the hyperframe. Defaults to \code{\link{parent.frame}()}. } } \details{ This function evaluates the expression \code{expr} in each row of the hyperframe \code{data}. It is a method for the generic function \code{\link{with}}. The argument \code{expr} should be an \R language expression in which each variable name is either the name of a column in the hyperframe \code{data}, or the name of an object in the parent frame (the environment in which \code{with} was called.) The argument \code{ee} can be used as an alternative to \code{expr} and should be an expression object (of class \code{"expression"}). For each row of \code{data}, the expression will be evaluated so that variables which are column names of \code{data} are interpreted as the entries for those columns in the current row. For example, if a hyperframe \code{h} has columns called \code{A} and \code{B}, then \code{with(h, A != B)} inspects each row of \code{data} in turn, tests whether the entries in columns \code{A} and \code{B} are equal, and returns the \eqn{n} logical values. } \value{ Normally a list of length \eqn{n} (where \eqn{n} is the number of rows) containing the results of evaluating the expression for each row. If \code{simplify=TRUE} and each result is a single atomic value, then the result is a vector or factor containing the same values. } \author{\adrian and \rolf } \seealso{ \code{\link{hyperframe}}, \code{\link{plot.hyperframe}} } \examples{ # generate Poisson point patterns with intensities 10 to 100 H <- hyperframe(L=seq(10,100, by=10)) X <- with(H, rpoispp(L)) } \keyword{spatial} \keyword{manip} \keyword{programming} spatstat/man/split.hyperframe.Rd0000644000176200001440000000365613160710621016462 0ustar liggesusers\name{split.hyperframe} \alias{split.hyperframe} \alias{split<-.hyperframe} \title{ Divide Hyperframe Into Subsets and Reassemble } \description{ \code{split} divides the data \code{x} into subsets defined by \code{f}. The replacement form replaces values corresponding to such a division. } \usage{ \method{split}{hyperframe}(x, f, drop = FALSE, ...) \method{split}{hyperframe}(x, f, drop = FALSE, ...) <- value } \arguments{ \item{x}{ Hyperframe (object of class \code{"hyperframe"}). } \item{f}{ a \code{factor} in the sense that \code{as.factor(f)} defines the grouping, or a list of such factors in which case their interaction is used for the grouping. } \item{drop}{ logical value, indicating whether levels that do not occur should be dropped from the result. } \item{value}{ a list of hyperframes which arose (or could have arisen) from the command \code{split(x,f,drop=drop)}. } \item{\dots}{ Ignored. } } \details{ These are methods for the generic functions \code{\link{split}} and \code{\link{split<-}} for hyperframes (objects of class \code{"hyperframe"}). A hyperframe is like a data frame, except that its entries can be objects of any kind. The behaviour of these methods is analogous to the corresponding methods for data frames. } \value{ The value returned from \code{split.hyperframe} is a list of hyperframe containing the values for the groups. The components of the list are named by the levels of \code{f} (after converting to a factor, or if already a factor and \code{drop = TRUE}, dropping unused levels). The replacement method \code{split<-.hyperframe} returns a new hyperframe \code{x} for which \code{split(x,f)} equals \code{value}. } \author{\adrian , \rolf and \ege } \seealso{ \code{\link{hyperframe}}, \code{\link{[.hyperframe}} } \examples{ split(pyramidal, pyramidal$group) } \keyword{spatial} \keyword{manip} spatstat/man/fvnames.Rd0000644000176200001440000000505213160710621014615 0ustar liggesusers\name{fvnames} \alias{fvnames} \alias{fvnames<-} \title{ Abbreviations for Groups of Columns in Function Value Table } \description{ Groups of columns in a function value table (object of class \code{"fv"}) identified by standard abbreviations. } \usage{ fvnames(X, a = ".") fvnames(X, a = ".") <- value } \arguments{ \item{X}{ Function value table (object of class \code{"fv"}). See \code{\link{fv.object}}. } \item{a}{ One of the standard abbreviations listed below. } \item{value}{ Character vector containing names of columns of \code{X}. } } \details{ An object of class \code{"fv"} represents a table of values of a function, usually a summary function for spatial data such as the \eqn{K}-function, for which several different statistical estimators may be available. The different estimates are stored as columns of the table. Auxiliary information carried in the object \code{X} specifies some columns or groups of columns of this table that should be used for particular purposes. For convenience these groups can be referred to by standard abbreviations which are recognised by various functions in the \pkg{spatstat} package, such as \code{\link{plot.fv}}. These abbreviations are: \tabular{ll}{ \code{".x"} \tab the function argument \cr \code{".y"} \tab the recommended value of the function \cr \code{"."} \tab all function values to be plotted by default \cr \tab (in order of plotting) \cr \code{".s"} \tab the upper and lower limits of shading \cr \tab (for envelopes and confidence intervals)\cr \code{".a"} \tab all function values } The command \code{fvnames(X, a)} expands the abbreviation \code{a} and returns a character vector containing the names of the columns. The assignment \code{fvnames(X, a) <- value} changes the definition of the abbreviation \code{a} to the character vector \code{value}. It does not change the labels of any columns. Note that \code{fvnames(x, ".")} lists the columns of values that will be plotted by default, in the order that they would be plotted, not in order of the column position. The order in which curves are plotted affects the colours and line styles associated with the curves. } \value{ For \code{fvnames}, a character vector. For \code{fvnames<-}, the updated object. } \author{\adrian and \rolf } \seealso{ \code{\link{fv.object}}, \code{\link{plot.fv}} } \examples{ K <- Kest(cells) fvnames(K, ".y") fvnames(K, ".y") <- "trans" } \keyword{spatial} \keyword{manip} spatstat/man/maxnndist.Rd0000644000176200001440000000257113160710621015166 0ustar liggesusers\name{maxnndist} \alias{maxnndist} \alias{minnndist} \title{ Compute Minimum or Maximum Nearest-Neighbour Distance } \description{ A faster way to compute the minimum or maximum nearest-neighbour distance in a point pattern. } \usage{ minnndist(X, positive=FALSE) maxnndist(X, positive=FALSE) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"}). } \item{positive}{ Logical. If \code{FALSE} (the default), compute the usual nearest-neighbour distance. If \code{TRUE}, ignore coincident points, so that the nearest neighbour distance for each point is greater than zero. } } \details{ These functions find the minimum and maximum values of nearest-neighbour distances in the point pattern \code{X}. \code{minnndist(X)} and \code{maxnndist(X)} are equivalent to, but faster than, \code{min(nndist(X))} and \code{max(nndist(X))} respectively. The value is \code{NA} if \code{npoints(X) < 2}. } \value{ A single numeric value (possibly \code{NA}). } \seealso{ \code{\link{nndist}} } \examples{ min(nndist(swedishpines)) minnndist(swedishpines) max(nndist(swedishpines)) maxnndist(swedishpines) minnndist(lansing, positive=TRUE) if(interactive()) { X <- rpoispp(1e6) system.time(min(nndist(X))) system.time(minnndist(X)) } } \author{ \adrian , \rolf and \ege. } \keyword{spatial} \keyword{math} spatstat/man/as.linnet.psp.Rd0000644000176200001440000000367013160710571015662 0ustar liggesusers\name{as.linnet.psp} \alias{as.linnet.psp} \title{ Convert Line Segment Pattern to Linear Network } \description{ Converts a line segment pattern to a linear network. } \usage{ \method{as.linnet}{psp}(X, \dots, eps, sparse=FALSE) } \arguments{ \item{X}{ Line segment pattern (object of class \code{"psp"}). } \item{\dots}{ Ignored. } \item{eps}{ Optional. Distance threshold. If two segment endpoints are closer than \code{eps} units apart, they will be treated as the same point, and will become a single vertex in the linear network. } \item{sparse}{ Logical value indicating whether to use a sparse matrix representation, as explained in \code{\link{linnet}}. } } \details{ This command converts any collection of line segments into a linear network by guessing the connectivity of the network, using the distance threshold \code{eps}. If any segments in \code{X} cross over each other, they are first cut into pieces using \code{\link{selfcut.psp}}. Then any pair of segment endpoints lying closer than \code{eps} units apart, is treated as a single vertex. The linear network is then constructed using \code{\link{linnet}}. It would be wise to check the result by plotting the degree of each vertex, as shown in the Examples. If \code{X} has marks, then these are stored in the resulting linear network \code{Y <- as.linnet(X)}, and can be extracted as \code{marks(as.psp(Y))} or \code{marks(Y$lines)}. } \value{ A linear network (object of class \code{"linnet"}). } \author{ \adrian \rolf and \ege } \seealso{ \code{\link{linnet}}, \code{\link{selfcut.psp}}, \code{\link{methods.linnet}}. } \examples{ # make some data A <- psp(0.09, 0.55, 0.79, 0.80, window=owin()) B <- superimpose(A, as.psp(simplenet)) # convert to a linear network D <- as.linnet(B) # check validity D plot(D) text(vertices(D), labels=vertexdegree(D)) } \keyword{spatial} \keyword{manip} spatstat/man/Extract.linnet.Rd0000644000176200001440000000351313160710621016060 0ustar liggesusers\name{Extract.linnet} \alias{[.linnet} \title{Extract Subset of Linear Network} \description{ Extract a subset of a linear network. } \usage{ \method{[}{linnet}(x, i, \dots, snip=TRUE) } \arguments{ \item{x}{ A linear network (object of class \code{"linnet"}). } \item{i}{ Spatial window defining the subregion. An object of class \code{"owin"}. } \item{snip}{ Logical. If \code{TRUE} (the default), segments of \code{x} which cross the boundary of \code{i} will be cut by the boundary. If \code{FALSE}, these segments will be deleted. } \item{\dots}{Ignored.} } \value{ Another linear network (object of class \code{"linnet"}). } \details{ This function computes the intersection between the linear network \code{x} and the domain specified by \code{i}. This function is a method for the subset operator \code{"["} for linear networks (objects of class \code{"linnet"}). It is provided mainly for completeness. The index \code{i} should be a window. The argument \code{snip} specifies what to do with segments of \code{x} which cross the boundary of \code{i}. If \code{snip=FALSE}, such segments are simply deleted. If \code{snip=TRUE} (the default), such segments are cut into pieces by the boundary of \code{i}, and those pieces which lie inside the window \code{i} are included in the resulting network. } \examples{ p <- par(mfrow=c(1,2), mar=0.2+c(0,0,1,0)) B <- owin(c(0.1,0.7),c(0.19,0.6)) plot(simplenet, main="x[w, snip=TRUE]") plot(simplenet[B], add=TRUE, col="green", lwd=3) plot(B, add=TRUE, border="red", lty=3) plot(simplenet, main="x[w, snip=FALSE]") plot(simplenet[B, snip=FALSE], add=TRUE, col="green", lwd=3) plot(B, add=TRUE, border="red", lty=3) par(p) } \author{ \adrian, \rolf, \ege and Suman Rakshit. } \keyword{spatial} \keyword{manip} spatstat/man/Gcross.Rd0000644000176200001440000002210313160710571014416 0ustar liggesusers\name{Gcross} \alias{Gcross} \title{ Multitype Nearest Neighbour Distance Function (i-to-j) } \description{ For a multitype point pattern, estimate the distribution of the distance from a point of type \eqn{i} to the nearest point of type \eqn{j}. } \usage{ Gcross(X, i, j, r=NULL, breaks=NULL, \dots, correction=c("rs", "km", "han")) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the cross type distance distribution function \eqn{G_{ij}(r)}{Gij(r)} will be computed. It must be a multitype point pattern (a marked point pattern whose marks are a factor). See under Details. } \item{i}{The type (mark value) of the points in \code{X} from which distances are measured. A character string (or something that will be converted to a character string). Defaults to the first level of \code{marks(X)}. } \item{j}{The type (mark value) of the points in \code{X} to which distances are measured. A character string (or something that will be converted to a character string). Defaults to the second level of \code{marks(X)}. } \item{r}{Optional. Numeric vector. The values of the argument \eqn{r} at which the distribution function \eqn{G_{ij}(r)}{Gij(r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{breaks}{ This argument is for internal use only. } \item{\dots}{ Ignored. } \item{correction}{ Optional. Character string specifying the edge correction(s) to be used. Options are \code{"none"}, \code{"rs"}, \code{"km"}, \code{"hanisch"} and \code{"best"}. Alternatively \code{correction="all"} selects all options. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame containing six numeric columns \item{r}{the values of the argument \eqn{r} at which the function \eqn{G_{ij}(r)}{Gij(r)} has been estimated } \item{rs}{the ``reduced sample'' or ``border correction'' estimator of \eqn{G_{ij}(r)}{Gij(r)} } \item{han}{the Hanisch-style estimator of \eqn{G_{ij}(r)}{Gij(r)} } \item{km}{the spatial Kaplan-Meier estimator of \eqn{G_{ij}(r)}{Gij(r)} } \item{hazard}{the hazard rate \eqn{\lambda(r)}{lambda(r)} of \eqn{G_{ij}(r)}{Gij(r)} by the spatial Kaplan-Meier method } \item{raw}{the uncorrected estimate of \eqn{G_{ij}(r)}{Gij(r)}, i.e. the empirical distribution of the distances from each point of type \eqn{i} to the nearest point of type \eqn{j} } \item{theo}{the theoretical value of \eqn{G_{ij}(r)}{Gij(r)} for a marked Poisson process with the same estimated intensity (see below). } } \details{ This function \code{Gcross} and its companions \code{\link{Gdot}} and \code{\link{Gmulti}} are generalisations of the function \code{\link{Gest}} to multitype point patterns. A multitype point pattern is a spatial pattern of points classified into a finite number of possible ``colours'' or ``types''. In the \pkg{spatstat} package, a multitype pattern is represented as a single point pattern object in which the points carry marks, and the mark value attached to each point determines the type of that point. The argument \code{X} must be a point pattern (object of class \code{"ppp"}) or any data that are acceptable to \code{\link{as.ppp}}. It must be a marked point pattern, and the mark vector \code{X$marks} must be a factor. The arguments \code{i} and \code{j} will be interpreted as levels of the factor \code{X$marks}. (Warning: this means that an integer value \code{i=3} will be interpreted as the number 3, \bold{not} the 3rd smallest level). The ``cross-type'' (type \eqn{i} to type \eqn{j}) nearest neighbour distance distribution function of a multitype point process is the cumulative distribution function \eqn{G_{ij}(r)}{Gij(r)} of the distance from a typical random point of the process with type \eqn{i} the nearest point of type \eqn{j}. An estimate of \eqn{G_{ij}(r)}{Gij(r)} is a useful summary statistic in exploratory data analysis of a multitype point pattern. If the process of type \eqn{i} points were independent of the process of type \eqn{j} points, then \eqn{G_{ij}(r)}{Gij(r)} would equal \eqn{F_j(r)}{Fj(r)}, the empty space function of the type \eqn{j} points. For a multitype Poisson point process where the type \eqn{i} points have intensity \eqn{\lambda_i}{lambda[i]}, we have \deqn{G_{ij}(r) = 1 - e^{ - \lambda_j \pi r^2} }{% Gij(r) = 1 - exp( - lambda[j] * pi * r^2)} Deviations between the empirical and theoretical \eqn{G_{ij}}{Gij} curves may suggest dependence between the points of types \eqn{i} and \eqn{j}. This algorithm estimates the distribution function \eqn{G_{ij}(r)}{Gij(r)} from the point pattern \code{X}. It assumes that \code{X} can be treated as a realisation of a stationary (spatially homogeneous) random spatial point process in the plane, observed through a bounded window. The window (which is specified in \code{X} as \code{Window(X)}) may have arbitrary shape. Biases due to edge effects are treated in the same manner as in \code{\link{Gest}}. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{G_{ij}(r)}{Gij(r)} should be evaluated. It is also used to determine the breakpoints (in the sense of \code{\link{hist}}) for the computation of histograms of distances. The reduced-sample and Kaplan-Meier estimators are computed from histogram counts. In the case of the Kaplan-Meier estimator this introduces a discretisation error which is controlled by the fineness of the breakpoints. First-time users would be strongly advised not to specify \code{r}. However, if it is specified, \code{r} must satisfy \code{r[1] = 0}, and \code{max(r)} must be larger than the radius of the largest disc contained in the window. Furthermore, the successive entries of \code{r} must be finely spaced. The algorithm also returns an estimate of the hazard rate function, \eqn{\lambda(r)}{lambda(r)}, of \eqn{G_{ij}(r)}{Gij(r)}. This estimate should be used with caution as \eqn{G_{ij}(r)}{Gij(r)} is not necessarily differentiable. The naive empirical distribution of distances from each point of the pattern \code{X} to the nearest other point of the pattern, is a biased estimate of \eqn{G_{ij}}{Gij}. However this is also returned by the algorithm, as it is sometimes useful in other contexts. Care should be taken not to use the uncorrected empirical \eqn{G_{ij}}{Gij} as if it were an unbiased estimator of \eqn{G_{ij}}{Gij}. } \references{ Cressie, N.A.C. \emph{Statistics for spatial data}. John Wiley and Sons, 1991. Diggle, P.J. \emph{Statistical analysis of spatial point patterns}. Academic Press, 1983. Diggle, P. J. (1986). Displaced amacrine cells in the retina of a rabbit : analysis of a bivariate spatial point pattern. \emph{J. Neurosci. Meth.} \bold{18}, 115--125. Harkness, R.D and Isham, V. (1983) A bivariate spatial point pattern of ants' nests. \emph{Applied Statistics} \bold{32}, 293--303 Lotwick, H. W. and Silverman, B. W. (1982). Methods for analysing spatial processes of several types of points. \emph{J. Royal Statist. Soc. Ser. B} \bold{44}, 406--413. Ripley, B.D. \emph{Statistical inference for spatial processes}. Cambridge University Press, 1988. Stoyan, D, Kendall, W.S. and Mecke, J. \emph{Stochastic geometry and its applications}. 2nd edition. Springer Verlag, 1995. Van Lieshout, M.N.M. and Baddeley, A.J. (1999) Indices of dependence between types in multivariate point patterns. \emph{Scandinavian Journal of Statistics} \bold{26}, 511--532. } \section{Warnings}{ The arguments \code{i} and \code{j} are always interpreted as levels of the factor \code{X$marks}. They are converted to character strings if they are not already character strings. The value \code{i=1} does \bold{not} refer to the first level of the factor. The function \eqn{G_{ij}}{Gij} does not necessarily have a density. The reduced sample estimator of \eqn{G_{ij}}{Gij} is pointwise approximately unbiased, but need not be a valid distribution function; it may not be a nondecreasing function of \eqn{r}. Its range is always within \eqn{[0,1]}. The spatial Kaplan-Meier estimator of \eqn{G_{ij}}{Gij} is always nondecreasing but its maximum value may be less than \eqn{1}. } \seealso{ \code{\link{Gdot}}, \code{\link{Gest}}, \code{\link{Gmulti}} } \examples{ # amacrine cells data G01 <- Gcross(amacrine) # equivalent to: \dontrun{ G01 <- Gcross(amacrine, "off", "on") } plot(G01) # empty space function of `on' points \dontrun{ F1 <- Fest(split(amacrine)$on, r = G01$r) lines(F1$r, F1$km, lty=3) } # synthetic example pp <- runifpoispp(30) pp <- pp \%mark\% factor(sample(0:1, npoints(pp), replace=TRUE)) G <- Gcross(pp, "0", "1") # note: "0" not 0 } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat/man/plot.laslett.Rd0000644000176200001440000000324213160710621015602 0ustar liggesusers\name{plot.laslett} \alias{plot.laslett} \title{ Plot Laslett Transform } \description{ Plot the result of Laslett's Transform. } \usage{ \method{plot}{laslett}(x, \dots, Xpars = list(box = TRUE, col = "grey"), pointpars = list(pch = 3, cols = "blue"), rectpars = list(lty = 3, border = "green")) } \arguments{ \item{x}{ Object of class \code{"laslett"} produced by \code{\link{laslett}} representing the result of Laslett's transform. } \item{\dots}{ Additional plot arguments passed to \code{\link{plot.solist}}. } \item{Xpars}{ A list of plot arguments passed to \code{\link{plot.owin}} or \code{\link{plot.im}} to display the original region \code{X} before transformation. } \item{pointpars}{ A list of plot arguments passed to \code{\link{plot.ppp}} to display the tangent points. } \item{rectpars}{ A list of plot arguments passed to \code{\link{plot.owin}} to display the maximal rectangle. } } \details{ This is the \code{plot} method for the class \code{"laslett"}. The function \code{\link{laslett}} applies Laslett's Transform to a spatial region \code{X} and returns an object of class \code{"laslett"} representing the result of the transformation. The result is plotted by this method. The plot function \code{\link{plot.solist}} is used to align the before-and-after pictures. See \code{\link{plot.solist}} for further options to control the plot. } \value{ None. } \author{ Kassel Hingee and \adrian. } \seealso{ \code{\link{laslett}} } \examples{ b <- laslett(heather$coarse, plotit=FALSE) plot(b, main="Heather Data") } \keyword{spatial} \keyword{hplot} spatstat/man/rLGCP.Rd0000644000176200001440000001172213160710621014066 0ustar liggesusers\name{rLGCP} \alias{rLGCP} \title{Simulate Log-Gaussian Cox Process} \description{ Generate a random point pattern, a realisation of the log-Gaussian Cox process. } \usage{ rLGCP(model="exp", mu = 0, param = NULL, \dots, win=NULL, saveLambda=TRUE, nsim=1, drop=TRUE) } \arguments{ \item{model}{ character string: the short name of a covariance model for the Gaussian random field. After adding the prefix \code{"RM"}, the code will search for a function of this name in the \pkg{RandomFields} package. } \item{mu}{ mean function of the Gaussian random field. Either a single number, a \code{function(x,y, ...)} or a pixel image (object of class \code{"im"}). } \item{param}{ List of parameters for the covariance. Standard arguments are \code{var} and \code{scale}. } \item{\dots}{ Additional parameters for the covariance, or arguments passed to \code{\link{as.mask}} to determine the pixel resolution. } \item{win}{ Window in which to simulate the pattern. An object of class \code{"owin"}. } \item{saveLambda}{ Logical. If \code{TRUE} (the default) then the simulated random intensity will also be saved, and returns as an attribute of the point pattern. } \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a point pattern. } } \value{ A point pattern (object of class \code{"ppp"}) or a list of point patterns. Additionally, the simulated intensity function for each point pattern is returned as an attribute \code{"Lambda"} of the point pattern, if \code{saveLambda=TRUE}. } \details{ This function generates a realisation of a log-Gaussian Cox process (LGCP). This is a Cox point process in which the logarithm of the random intensity is a Gaussian random field with mean function \eqn{\mu} and covariance function \eqn{c(r)}. Conditional on the random intensity, the point process is a Poisson process with this intensity. The string \code{model} specifies the covariance function of the Gaussian random field, and the parameters of the covariance are determined by \code{param} and \code{\dots}. To determine the covariance model, the string \code{model} is prefixed by \code{"RM"}, and a function of this name is sought in the \pkg{RandomFields} package. For a list of available models see \code{\link[RandomFields]{RMmodel}} in the \pkg{RandomFields} package. For example the \ifelse{latex}{\out{Mat\'ern}}{Matern} covariance is specified by \code{model="matern"}, corresponding to the function \code{RMmatern} in the \pkg{RandomFields} package. Standard variance parameters (for all functions beginning with \code{"RM"} in the \pkg{RandomFields} package) are \code{var} for the variance at distance zero, and \code{scale} for the scale parameter. Other parameters are specified in the help files for the individual functions beginning with \code{"RM"}. For example the help file for \code{RMmatern} states that \code{nu} is a parameter for this model. This algorithm uses the function \code{\link[RandomFields]{RFsimulate}} in the \pkg{RandomFields} package to generate values of a Gaussian random field, with the specified mean function \code{mu} and the covariance specified by the arguments \code{model} and \code{param}, on the points of a regular grid. The exponential of this random field is taken as the intensity of a Poisson point process, and a realisation of the Poisson process is then generated by the function \code{\link{rpoispp}} in the \pkg{spatstat} package. If the simulation window \code{win} is missing or \code{NULL}, then it defaults to \code{Window(mu)} if \code{mu} is a pixel image, and it defaults to the unit square otherwise. The LGCP model can be fitted to data using \code{\link{kppm}}. } \seealso{ \code{\link{rpoispp}}, \code{\link{rMatClust}}, \code{\link{rGaussPoisson}}, \code{\link{rNeymanScott}}, \code{\link{lgcp.estK}}, \code{\link{kppm}} } \references{ \ifelse{latex}{\out{M\o ller}}{Moller}, J., Syversveen, A. and Waagepetersen, R. (1998) Log Gaussian Cox Processes. \emph{Scandinavian Journal of Statistics} \bold{25}, 451--482. } \examples{ if(require(RandomFields)) { # homogeneous LGCP with exponential covariance function X <- rLGCP("exp", 3, var=0.2, scale=.1) # inhomogeneous LGCP with Gaussian covariance function m <- as.im(function(x, y){5 - 1.5 * (x - 0.5)^2 + 2 * (y - 0.5)^2}, W=owin()) X <- rLGCP("gauss", m, var=0.15, scale =0.5) plot(attr(X, "Lambda")) points(X) # inhomogeneous LGCP with Matern covariance function X <- rLGCP("matern", function(x, y){ 1 - 0.4 * x}, var=2, scale=0.7, nu=0.5, win = owin(c(0, 10), c(0, 10))) plot(X) } } \author{Abdollah Jalilian and Rasmus Waagepetersen. Modified by \spatstatAuthors. } \keyword{spatial} \keyword{datagen} spatstat/man/hybrid.family.Rd0000644000176200001440000000167113160710621015722 0ustar liggesusers\name{hybrid.family} \alias{hybrid.family} \title{ Hybrid Interaction Family } \description{ An object describing the family of all hybrid interactions. } \details{ \bold{Advanced Use Only!} This structure would not normally be touched by the user. It describes the family of all hybrid point process models. If you need to create a specific hybrid interaction model for use in modelling, use the function \code{\link{Hybrid}}. Anyway, \code{hybrid.family} is an object of class \code{"isf"} containing a function \code{hybrid.family$eval} for evaluating the sufficient statistics of any hybrid interaction point process model. } \seealso{ Use \code{\link{Hybrid}} to make hybrid interactions. Other families: \code{\link{pairwise.family}}, \code{\link{pairsat.family}}, \code{\link{ord.family}}, \code{\link{inforder.family}}. } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} spatstat/man/rmhmodel.list.Rd0000644000176200001440000001127613160710621015744 0ustar liggesusers\name{rmhmodel.list} \alias{rmhmodel.list} \title{Define Point Process Model for Metropolis-Hastings Simulation.} \description{ Given a list of parameters, builds a description of a point process model for use in simulating the model by the Metropolis-Hastings algorithm. } \usage{ \method{rmhmodel}{list}(model, ...) } \arguments{ \item{model}{A list of parameters. See Details.} \item{\dots}{ Optional list of additional named parameters. } } \value{ An object of class \code{"rmhmodel"}, which is essentially a validated list of parameter values for the model. There is a \code{print} method for this class, which prints a sensible description of the model chosen. } \details{ The generic function \code{\link{rmhmodel}} takes a description of a point process model in some format, and converts it into an object of class \code{"rmhmodel"} so that simulations of the model can be generated using the Metropolis-Hastings algorithm \code{\link{rmh}}. This function \code{rmhmodel.list} is the method for lists. The argument \code{model} should be a named list of parameters of the form \code{list(cif, par, w, trend, types)} where \code{cif} and \code{par} are required and the others are optional. For details about these components, see \code{\link{rmhmodel.default}}. The subsequent arguments \code{\dots} (if any) may also have these names, and they will take precedence over elements of the list \code{model}. } \references{ Diggle, P. J. (2003) \emph{Statistical Analysis of Spatial Point Patterns} (2nd ed.) Arnold, London. Diggle, P.J. and Gratton, R.J. (1984) Monte Carlo methods of inference for implicit statistical models. \emph{Journal of the Royal Statistical Society, series B} \bold{46}, 193 -- 212. Diggle, P.J., Gates, D.J., and Stibbard, A. (1987) A nonparametric estimator for pairwise-interaction point processes. Biometrika \bold{74}, 763 -- 770. \emph{Scandinavian Journal of Statistics} \bold{21}, 359--373. Geyer, C.J. (1999) Likelihood Inference for Spatial Point Processes. Chapter 3 in O.E. Barndorff-Nielsen, W.S. Kendall and M.N.M. Van Lieshout (eds) \emph{Stochastic Geometry: Likelihood and Computation}, Chapman and Hall / CRC, Monographs on Statistics and Applied Probability, number 80. Pages 79--140. } \seealso{ \code{\link{rmhmodel}}, \code{\link{rmhmodel.default}}, \code{\link{rmhmodel.ppm}}, \code{\link{rmh}}, \code{\link{rmhcontrol}}, \code{\link{rmhstart}}, \code{\link{ppm}}, \code{\link{Strauss}}, \code{\link{Softcore}}, \code{\link{StraussHard}}, \code{\link{MultiStrauss}}, \code{\link{MultiStraussHard}}, \code{\link{DiggleGratton}}, \code{\link{PairPiece}} } \examples{ # Strauss process: mod01 <- list(cif="strauss",par=list(beta=2,gamma=0.2,r=0.7), w=c(0,10,0,10)) mod01 <- rmhmodel(mod01) # Strauss with hardcore: mod04 <- list(cif="straush",par=list(beta=2,gamma=0.2,r=0.7,hc=0.3), w=owin(c(0,10),c(0,5))) mod04 <- rmhmodel(mod04) # Soft core: w <- square(10) mod07 <- list(cif="sftcr", par=list(beta=0.8,sigma=0.1,kappa=0.5), w=w) mod07 <- rmhmodel(mod07) # Multitype Strauss: beta <- c(0.027,0.008) gmma <- matrix(c(0.43,0.98,0.98,0.36),2,2) r <- matrix(c(45,45,45,45),2,2) mod08 <- list(cif="straussm", par=list(beta=beta,gamma=gmma,radii=r), w=square(250)) mod08 <- rmhmodel(mod08) # specify types mod09 <- rmhmodel(list(cif="straussm", par=list(beta=beta,gamma=gmma,radii=r), w=square(250), types=c("A", "B"))) # Multitype Strauss hardcore with trends for each type: beta <- c(0.27,0.08) ri <- matrix(c(45,45,45,45),2,2) rhc <- matrix(c(9.1,5.0,5.0,2.5),2,2) tr3 <- function(x,y){x <- x/250; y <- y/250; exp((6*x + 5*y - 18*x^2 + 12*x*y - 9*y^2)/6) } # log quadratic trend tr4 <- function(x,y){x <- x/250; y <- y/250; exp(-0.6*x+0.5*y)} # log linear trend mod10 <- list(cif="straushm",par=list(beta=beta,gamma=gmma, iradii=ri,hradii=rhc),w=c(0,250,0,250), trend=list(tr3,tr4)) mod10 <- rmhmodel(mod10) # Lookup (interaction function h_2 from page 76, Diggle (2003)): r <- seq(from=0,to=0.2,length=101)[-1] # Drop 0. h <- 20*(r-0.05) h[r<0.05] <- 0 h[r>0.10] <- 1 mod17 <- list(cif="lookup",par=list(beta=4000,h=h,r=r),w=c(0,1,0,1)) mod17 <- rmhmodel(mod17) } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat/man/integral.linim.Rd0000644000176200001440000000217113160710621016071 0ustar liggesusers\name{integral.linim} \alias{integral.linim} \alias{integral.linfun} \title{ Integral on a Linear Network } \description{ Computes the integral (total value) of a function or pixel image over a linear network. } \usage{ \method{integral}{linim}(f, domain=NULL, ...) \method{integral}{linfun}(f, domain=NULL, ..., delta) } \arguments{ \item{f}{ A pixel image on a linear network (class \code{"linim"}) or a function on a linear network (class \code{"linfun"}). } \item{domain}{ Optional window specifying the domain of integration. } \item{\dots}{ Ignored. } \item{delta}{ Optional. The step length (in coordinate units) for computing the approximate integral. A single positive number. } } \details{ The integral (total value of the function over the network) is calculated. } \value{ A numeric value. } \seealso{ \code{\link{linim}}, \code{\link{integral.im}} } \examples{ # make some data xcoord <- linfun(function(x,y,seg,tp) { x }, simplenet) integral(xcoord) X <- as.linim(xcoord) integral(X) } \author{ \adrian \rolf and \ege } \keyword{spatial} \keyword{math} spatstat/man/intersect.tess.Rd0000644000176200001440000000425513160710621016137 0ustar liggesusers\name{intersect.tess} \alias{intersect.tess} \title{Intersection of Two Tessellations} \description{ Yields the intersection of two tessellations, or the intersection of a tessellation with a window. } \usage{ intersect.tess(X, Y, \dots, keepmarks=FALSE) } \arguments{ \item{X,Y}{Two tessellations (objects of class \code{"tess"}), or windows (objects of class \code{"tess"}), or other data that can be converted to tessellations by \code{\link{as.tess}}. } \item{\dots}{ Optional arguments passed to \code{\link{as.mask}} to control the discretisation, if required. } \item{keepmarks}{ Logical value. If \code{TRUE}, the marks attached to the tiles of \code{X} and \code{Y} will be retained as marks of the intersection tiles. } } \value{ A tessellation (object of class \code{"tess"}). } \details{ A tessellation is a collection of disjoint spatial regions (called \emph{tiles}) that fit together to form a larger spatial region. See \code{\link{tess}}. If \code{X} and \code{Y} are not tessellations, they are first converted into tessellations by \code{\link{as.tess}}. The function \code{intersect.tess} then computes the intersection between the two tessellations. This is another tessellation, each of whose tiles is the intersection of a tile from \code{X} and a tile from \code{Y}. One possible use of this function is to slice a window \code{W} into subwindows determined by a tessellation. See the Examples. } \author{\adrian and \rolf } \seealso{ \code{\link{tess}}, \code{\link{as.tess}}, \code{\link{intersect.owin}} } \examples{ opa <- par(mfrow=c(1,3)) # polygon data(letterR) plot(letterR) # tessellation of rectangles X <- tess(xgrid=seq(2, 4, length=10), ygrid=seq(0, 3.5, length=8)) plot(X) plot(intersect.tess(X, letterR)) A <- runifpoint(10) B <- runifpoint(10) plot(DA <- dirichlet(A)) plot(DB <- dirichlet(B)) plot(intersect.tess(DA, DB)) par(opa) marks(DA) <- 1:10 marks(DB) <- 1:10 plot(Z <- intersect.tess(DA,DB, keepmarks=TRUE)) mZ <- marks(Z) tZ <- tiles(Z) for(i in which(mZ[,1] == 3)) plot(tZ[[i]], add=TRUE, col="pink") } \keyword{spatial} \keyword{math} spatstat/man/areaGain.Rd0000644000176200001440000000443313160710571014673 0ustar liggesusers\name{areaGain} \alias{areaGain} \title{Difference of Disc Areas} \description{ Computes the area of that part of a disc that is not covered by other discs. } \usage{ areaGain(u, X, r, ..., W=as.owin(X), exact=FALSE, ngrid=spatstat.options("ngrid.disc")) } \arguments{ \item{u}{ Coordinates of the centre of the disc of interest. A vector of length 2. Alternatively, a point pattern (object of class \code{"ppp"}). } \item{X}{ Locations of the centres of other discs. A point pattern (object of class \code{"ppp"}). } \item{r}{ Disc radius, or vector of disc radii. } \item{\dots}{Ignored.} \item{W}{ Window (object of class \code{"owin"}) in which the area should be computed. } \item{exact}{ Choice of algorithm. If \code{exact=TRUE}, areas are computed exactly using analytic geometry. If \code{exact=FALSE} then a faster algorithm is used to compute a discrete approximation to the areas. } \item{ngrid}{ Integer. Number of points in the square grid used to compute the discrete approximation, when \code{exact=FALSE}. } } \value{ A matrix with one row for each point in \code{u} and one column for each value in \code{r}. } \details{ This function computes the area of that part of the disc of radius \code{r} centred at the location \code{u} that is \emph{not} covered by any of the discs of radius \code{r} centred at the points of the pattern \code{X}. This area is important in some calculations related to the area-interaction model \code{\link{AreaInter}}. If \code{u} is a point pattern and \code{r} is a vector, the result is a matrix, with one row for each point in \code{u} and one column for each entry of \code{r}. The \code{[i,j]} entry in the matrix is the area of that part of the disc of radius \code{r[j]} centred at the location \code{u[i]} that is \emph{not} covered by any of the discs of radius \code{r[j]} centred at the points of the pattern \code{X}. If \code{W} is not \code{NULL}, then the areas are computed only inside the window \code{W}. } \seealso{ \code{\link{AreaInter}}, \code{\link{areaLoss}} } \examples{ data(cells) u <- c(0.5,0.5) areaGain(u, cells, 0.1) } \author{\adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/model.images.Rd0000644000176200001440000001130713160710621015522 0ustar liggesusers\name{model.images} \alias{model.images} \alias{model.images.ppm} \alias{model.images.dppm} \alias{model.images.kppm} \alias{model.images.lppm} \alias{model.images.slrm} \title{Compute Images of Constructed Covariates} \description{ For a point process model fitted to spatial point pattern data, this function computes pixel images of the covariates in the design matrix. } \usage{ model.images(object, ...) \method{model.images}{ppm}(object, W = as.owin(object), ...) \method{model.images}{kppm}(object, W = as.owin(object), ...) \method{model.images}{dppm}(object, W = as.owin(object), ...) \method{model.images}{lppm}(object, L = as.linnet(object), ...) \method{model.images}{slrm}(object, ...) } \arguments{ \item{object}{ The fitted point process model. An object of class \code{"ppm"} or \code{"kppm"} or \code{"lppm"} or \code{"slrm"} or \code{"dppm"}. } \item{W}{ A window (object of class \code{"owin"}) in which the images should be computed. Defaults to the window in which the model was fitted. } \item{L}{ A linear network (object of class \code{"linnet"}) in which the images should be computed. Defaults to the network in which the model was fitted. } \item{\dots}{ Other arguments (such as \code{na.action}) passed to \code{\link{model.matrix.lm}}. } } \details{ This command is similar to \code{\link{model.matrix.ppm}} except that it computes pixel images of the covariates, instead of computing the covariate values at certain points only. The \code{object} must be a fitted spatial point process model object of class \code{"ppm"} (produced by the model-fitting function \code{\link{ppm}}) or class \code{"kppm"} (produced by the fitting function \code{\link{kppm}}) or class \code{"dppm"} (produced by the fitting function \code{\link{dppm}}) or class \code{"lppm"} (produced by \code{\link{lppm}}) or class \code{"slrm"} (produced by \code{\link{slrm}}). The spatial covariates required by the model-fitting procedure are computed at every pixel location in the window \code{W}. For \code{lppm} objects, the covariates are computed at every location on the network \code{L}. For \code{slrm} objects, the covariates are computed on the pixels that were used to fit the model. Note that the spatial covariates computed here are not the original covariates that were supplied when fitting the model. Rather, they are the covariates that actually appear in the loglinear representation of the (conditional) intensity and in the columns of the design matrix. For example, they might include dummy or indicator variables for different levels of a factor, depending on the contrasts that are in force. The pixel resolution is determined by \code{W} if \code{W} is a mask (that is \code{W$type = "mask"}). Otherwise, the pixel resolution is determined by \code{\link{spatstat.options}}. The format of the result depends on whether the original point pattern data were marked or unmarked. \itemize{ \item If the original dataset was unmarked, the result is a named list of pixel images (objects of class \code{"im"}) containing the values of the spatial covariates. The names of the list elements are the names of the covariates determined by \code{\link{model.matrix.lm}}. The result is also of class \code{"solist"} so that it can be plotted immediately. \item If the original dataset was a multitype point pattern, the result is a \code{\link{hyperframe}} with one column for each possible type of points. Each column is a named list of pixel images (objects of class \code{"im"}) containing the values of the spatial covariates. The row names of the hyperframe are the names of the covariates determined by \code{\link{model.matrix.lm}}. } } \value{ A list (of class \code{"solist"}) or array (of class \code{"hyperframe"}) containing pixel images (objects of class \code{"im"}). For \code{model.images.lppm}, the images are also of class \code{"linim"}. } \author{\adrian \rolf and \ege } \seealso{ \code{\link{model.matrix.ppm}}, \code{\link[stats]{model.matrix}}, \code{\link{ppm}}, \code{\link{ppm.object}}, \code{\link{lppm}}, \code{\link{dppm}}, \code{\link{kppm}}, \code{\link{slrm}}, \code{\link{im}}, \code{\link{im.object}}, \code{\link{plot.solist}}, \code{\link{spatstat.options}} } \examples{ fit <- ppm(cells ~ x) model.images(fit) B <- owin(c(0.2, 0.4), c(0.3, 0.8)) model.images(fit, B) fit2 <- ppm(cells ~ cut(x,3)) model.images(fit2) fit3 <- slrm(japanesepines ~ x) model.images(fit3) fit4 <- ppm(amacrine ~ marks + x) model.images(fit4) } \keyword{spatial} \keyword{models} spatstat/man/clusterkernel.Rd0000644000176200001440000000231713160710571016045 0ustar liggesusers\name{clusterkernel} \alias{clusterkernel} \alias{clusterkernel.character} \alias{clusterkernel.kppm} \title{ Extract Cluster Offspring Kernel } \description{ Given a cluster point process model, this command returns the probability density of the cluster offspring. } \usage{ clusterkernel(model, \dots) \method{clusterkernel}{kppm}(model, \dots) \method{clusterkernel}{character}(model, \dots) } \arguments{ \item{model}{ Cluster model. Either a fitted cluster or Cox model (object of class \code{"kppm"}), or a character string specifying the type of cluster model. } \item{\dots}{ Parameter values for the model, when \code{model} is a character string. } } \details{ Given a specification of a cluster point process model, this command returns a \code{function(x,y)} giving the two-dimensional probability density of the cluster offspring points assuming a cluster parent located at the origin. } \value{ A function in the \R\ language with arguments \code{x,y,\dots}. } \author{ \adrian \rolf and \ege } \seealso{ \code{\link{clusterfield}}, \code{\link{kppm}} } \examples{ fit <- kppm(redwood ~ x, "MatClust") f <- clusterkernel(fit) f(0.1, 0.2) } \keyword{spatial} spatstat/man/plot.bermantest.Rd0000644000176200001440000000551413160710621016302 0ustar liggesusers\name{plot.bermantest} \alias{plot.bermantest} \title{Plot Result of Berman Test} \description{ Plot the result of Berman's test of goodness-of-fit } \usage{ \method{plot}{bermantest}(x, ..., lwd=par("lwd"), col=par("col"), lty=par("lty"), lwd0=lwd, col0=2, lty0=2) } \arguments{ \item{x}{ Object to be plotted. An object of class \code{"bermantest"} produced by \code{\link{berman.test}}. } \item{\dots}{ extra arguments that will be passed to the plotting function \code{\link{plot.ecdf}}. } \item{col,lwd,lty}{ The width, colour and type of lines used to plot the empirical distribution curve. } \item{col0,lwd0,lty0}{ The width, colour and type of lines used to plot the predicted (null) distribution curve. } } \value{ \code{NULL}. } \details{ This is the \code{plot} method for the class \code{"bermantest"}. An object of this class represents the outcome of Berman's test of goodness-of-fit of a spatial Poisson point process model, computed by \code{\link{berman.test}}. For the \emph{Z1} test (i.e. if \code{x} was computed using \code{berman.test( ,which="Z1")}), the plot displays the two cumulative distribution functions that are compared by the test: namely the empirical cumulative distribution function of the covariate at the data points, \eqn{\hat F}{Fhat}, and the predicted cumulative distribution function of the covariate under the model, \eqn{F_0}{F0}, both plotted against the value of the covariate. Two vertical lines show the mean values of these two distributions. If the model is correct, the two curves should be close; the test is based on comparing the two vertical lines. For the \emph{Z2} test (i.e. if \code{x} was computed using \code{berman.test( ,which="Z2")}), the plot displays the empirical cumulative distribution function of the values \eqn{U_i = F_0(Y_i)}{U[i] = F0(Y[i])} where \eqn{Y_i}{Y[i]} is the value of the covariate at the \eqn{i}-th data point. The diagonal line with equation \eqn{y=x} is also shown. Two vertical lines show the mean of the values \eqn{U_i}{U[i]} and the value \eqn{1/2}. If the model is correct, the two curves should be close. The test is based on comparing the two vertical lines. } \seealso{ \code{\link{berman.test}} } \examples{ # synthetic data: nonuniform Poisson process X <- rpoispp(function(x,y) { 100 * exp(-x) }, win=square(1)) # fit uniform Poisson process fit0 <- ppm(X, ~1) # test covariate = x coordinate xcoord <- function(x,y) { x } # test wrong model k <- berman.test(fit0, xcoord, "Z1") # plot result of test plot(k, col="red", col0="green") # Z2 test k2 <- berman.test(fit0, xcoord, "Z2") plot(k2, col="red", col0="green") } \author{\adrian , \rolf and \ege } \keyword{spatial} \keyword{hplot} spatstat/man/solapply.Rd0000644000176200001440000000325213160710621015021 0ustar liggesusers\name{solapply} \alias{solapply} \alias{anylapply} \title{ Apply a Function Over a List and Obtain a List of Objects } \description{ Applies the function \code{FUN} to each element of the list \code{X}, and returns the result as a list of class \code{"solist"} or \code{"anylist"} as appropriate. } \usage{ anylapply(X, FUN, \dots) solapply(X, FUN, \dots, check = TRUE, promote = TRUE, demote = FALSE) } \arguments{ \item{X}{A list.} \item{FUN}{ Function to be applied to each element of \code{X}. } \item{\dots}{ Additional arguments to \code{FUN}. } \item{check,promote,demote}{ Arguments passed to \code{\link{solist}} which determine how to handle different classes of objects. } } \details{ These convenience functions are similar to \code{\link[base]{lapply}} except that they return a list of class \code{"solist"} or \code{"anylist"}. In both functions, the result is computed by \code{lapply(X, FUN, \dots)}. In \code{anylapply} the result is converted to a list of class \code{"anylist"} and returned. In \code{solapply} the result is converted to a list of class \code{"solist"} \bold{if possible}, using \code{\link{as.solist}}. If this is not possible, then the behaviour depends on the argument \code{demote}. If \code{demote=TRUE} the result will be returned as a list of class \code{"anylist"}. If \code{demote=FALSE} (the default), an error occurs. } \value{ A list, usually of class \code{"solist"}. } \author{\adrian \rolf and \ege } \seealso{ \code{\link{solist}}, \code{\link{anylist}}. } \examples{ solapply(waterstriders, density) } \keyword{spatial} \keyword{list} \keyword{manip} spatstat/man/rMosaicSet.Rd0000644000176200001440000000261313160710621015227 0ustar liggesusers\name{rMosaicSet} \alias{rMosaicSet} \title{Mosaic Random Set} \description{ Generate a random set by taking a random selection of tiles of a given tessellation. } \usage{ rMosaicSet(X, p=0.5) } \arguments{ \item{X}{ A tessellation (object of class \code{"tess"}). } \item{p}{ Probability of including a given tile. A number strictly between 0 and 1. } } \details{ Given a tessellation \code{X}, this function randomly selects some of the tiles of \code{X}, including each tile with probability \eqn{p} independently of the other tiles. The selected tiles are then combined to form a set in the plane. One application of this is Switzer's (1965) example of a random set which has a Markov property. It is constructed by generating \code{X} according to a Poisson line tessellation (see \code{\link{rpoislinetess}}). } \value{ A window (object of class \code{"owin"}). } \references{ Switzer, P. A random set process in the plane with a Markovian property. \emph{Annals of Mathematical Statistics} \bold{36} (1965) 1859--1863. } \author{\adrian and \rolf } \seealso{ \code{\link{rpoislinetess}}, \code{\link{rMosaicField}} } \examples{ # Switzer's random set X <- rpoislinetess(3) plot(rMosaicSet(X, 0.5), col="green", border=NA) # another example plot(rMosaicSet(dirichlet(runifpoint(30)), 0.4)) } \keyword{spatial} \keyword{datagen} spatstat/man/Extract.tess.Rd0000644000176200001440000000444213160710621015547 0ustar liggesusers\name{Extract.tess} \alias{[.tess} \alias{[<-.tess} \title{Extract or Replace Subset of Tessellation} \description{ Extract, change or delete a subset of the tiles of a tessellation, to make a new tessellation. } \usage{ \method{[}{tess}(x, i, \dots) \method{[}{tess}(x, i, \dots) <- value } \arguments{ \item{x}{A tessellation (object of class \code{"tess"}).} \item{i}{ Subset index for the tiles of the tessellation. Alternatively a window (object of class \code{"owin"}). } \item{\dots}{ One argument that specifies the subset to be extracted or changed. Any valid format for the subset index in a list. } \item{value}{ Replacement value for the selected tiles of the tessellation. A list of windows (objects of class \code{"owin"}) or \code{NULL}. } } \details{ A tessellation (object of class \code{"tess"}, see \code{\link{tess}}) is effectively a list of tiles (spatial regions) that cover a spatial region. The subset operator \code{[.tess} extracts some of these tiles and forms a new tessellation, which of course covers a smaller region than the original. For \code{[.tess} only, the subset index can also be a window (object of class \code{"owin"}). The tessellation \code{x} is then intersected with the window. The replacement operator changes the selected tiles. The replacement \code{value} may be either \code{NULL} (which causes the selected tiles to be removed from \code{x}) or a list of the same length as the selected subset. The entries of \code{value} may be windows (objects of class \code{"owin"}) or \code{NULL} to indicate that the corresponding tile should be deleted. Generally it does not make sense to replace a tile in a tessellation with a completely different tile, because the tiles are expected to fit together. However this facility is sometimes useful for making small adjustments to polygonal tiles. } \value{ A tessellation (object of class \code{"tess"}). } \seealso{ \code{\link{tess}}, \code{\link{tiles}}, \code{\link{intersect.tess}}. } \examples{ \testonly{op <- spatstat.options(npixel=10)} A <- tess(xgrid=0:4, ygrid=0:3) B <- A[c(1, 3, 7)] E <- A[-1] A[c(2, 5, 11)] <- NULL \testonly{spatstat.options(op)} } \author{ \spatstatAuthors } \keyword{spatial} \keyword{manip} spatstat/man/as.mask.psp.Rd0000644000176200001440000000301413160710571015314 0ustar liggesusers\name{as.mask.psp} \alias{as.mask.psp} \title{ Convert Line Segment Pattern to Binary Pixel Mask } \description{ Converts a line segment pattern to a binary pixel mask by determining which pixels intersect the lines. } \usage{ as.mask.psp(x, W=NULL, ...) } \arguments{ \item{x}{ Line segment pattern (object of class \code{"psp"}). } \item{W}{ Optional window (object of class \code{"owin"}) determining the pixel raster. } \item{\dots}{ Optional extra arguments passed to \code{\link{as.mask}} to determine the pixel resolution. } } \details{ This function converts a line segment pattern to a binary pixel mask by determining which pixels intersect the lines. The pixel raster is determined by \code{W} and the optional arguments \code{\dots}. If \code{W} is missing or \code{NULL}, it defaults to the window containing \code{x}. Then \code{W} is converted to a binary pixel mask using \code{\link{as.mask}}. The arguments \code{\dots} are passed to \code{\link{as.mask}} to control the pixel resolution. } \value{ A window (object of class \code{"owin"}) which is a binary pixel mask (type \code{"mask"}). } \seealso{ \code{\link{pixellate.psp}}, \code{\link{as.mask}}. Use \code{\link{pixellate.psp}} if you want to measure the length of line in each pixel. } \examples{ X <- psp(runif(10), runif(10), runif(10), runif(10), window=owin()) plot(as.mask.psp(X)) plot(X, add=TRUE, col="red") } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/rMosaicField.Rd0000644000176200001440000000262013160710621015515 0ustar liggesusers\name{rMosaicField} \alias{rMosaicField} \title{Mosaic Random Field} \description{ Generate a realisation of a random field which is piecewise constant on the tiles of a given tessellation. } \usage{ rMosaicField(X, rgen = function(n) { sample(0:1, n, replace = TRUE)}, ..., rgenargs=NULL) } \arguments{ \item{X}{ A tessellation (object of class \code{"tess"}). } \item{\dots}{ Arguments passed to \code{\link{as.mask}} determining the pixel resolution. } \item{rgen}{ Function that generates random values for the tiles of the tessellation. } \item{rgenargs}{ List containing extra arguments that should be passed to \code{rgen} (typically specifying parameters of the distribution of the values). } } \details{ This function generates a realisation of a random field which is piecewise constant on the tiles of the given tessellation \code{X}. The values in each tile are independent and identically distributed. } \value{ A pixel image (object of class \code{"im"}). } \author{\adrian and \rolf } \seealso{ \code{\link{rpoislinetess}}, \code{\link{rMosaicSet}} } \examples{ X <- rpoislinetess(3) plot(rMosaicField(X, runif)) plot(rMosaicField(X, runif, dimyx=256)) plot(rMosaicField(X, rnorm, rgenargs=list(mean=10, sd=2))) plot(rMosaicField(dirichlet(runifpoint(30)), rnorm)) } \keyword{spatial} \keyword{datagen} spatstat/man/leverage.ppm.Rd0000644000176200001440000000706713160710621015553 0ustar liggesusers\name{leverage.ppm} \alias{leverage} \alias{leverage.ppm} \title{ Leverage Measure for Spatial Point Process Model } \description{ Computes the leverage measure for a fitted spatial point process model. } \usage{ leverage(model, ...) \method{leverage}{ppm}(model, ..., drop = FALSE, iScore=NULL, iHessian=NULL, iArgs=NULL) } \arguments{ \item{model}{ Fitted point process model (object of class \code{"ppm"}). } \item{\dots}{ Ignored. } \item{drop}{ Logical. Whether to include (\code{drop=FALSE}) or exclude (\code{drop=TRUE}) contributions from quadrature points that were not used to fit the model. } \item{iScore,iHessian}{ Components of the score vector and Hessian matrix for the irregular parameters, if required. See Details. } \item{iArgs}{ List of extra arguments for the functions \code{iScore}, \code{iHessian} if required. } } \details{ The function \code{leverage} is generic, and \code{leverage.ppm} is the method for objects of class \code{"ppm"}. Given a fitted spatial point process model \code{model}, the function \code{leverage.ppm} computes the leverage of the model, described in Baddeley, Chang and Song (2013). The leverage of a spatial point process model is a function of spatial location, and is typically displayed as a colour pixel image. The leverage value \eqn{h(u)} at a spatial location \eqn{u} represents the change in the fitted trend of the fitted point process model that would have occurred if a data point were to have occurred at the location \eqn{u}. A relatively large value of \eqn{h()} indicates a part of the space where the data have a \emph{potentially} strong effect on the fitted model (specifically, a strong effect on the intensity or trend of the fitted model) due to the values of the covariates. If the point process model trend has irregular parameters that were fitted (using \code{\link{ippm}}) then the leverage calculation requires the first and second derivatives of the log trend with respect to the irregular parameters. The argument \code{iScore} should be a list, with one entry for each irregular parameter, of \R functions that compute the partial derivatives of the log trend (i.e. log intensity or log conditional intensity) with respect to each irregular parameter. The argument \code{iHessian} should be a list, with \eqn{p^2} entries where \eqn{p} is the number of irregular parameters, of \R functions that compute the second order partial derivatives of the log trend with respect to each pair of irregular parameters. The result of \code{leverage.ppm} is an object of class \code{"leverage.ppm"}. It can be plotted (by \code{\link{plot.leverage.ppm}}) or converted to a pixel image by \code{as.im} (see \code{\link{as.im.leverage.ppm}}). } \value{ An object of class \code{"leverage.ppm"} that can be plotted (by \code{\link{plot.leverage.ppm}}). There are also methods for \code{persp}, \code{print}, \code{[}, \code{as.im}, \code{as.function} and \code{as.owin}. } \references{ Baddeley, A., Chang, Y.M. and Song, Y. (2013) Leverage and influence diagnostics for spatial point process models. \emph{Scandinavian Journal of Statistics} \bold{40}, 86--104. } \author{ \spatstatAuthors. } \seealso{ \code{\link{influence.ppm}}, \code{\link{dfbetas.ppm}}, \code{\link{ppmInfluence}}, \code{\link{plot.leverage.ppm}} \code{\link{as.function.leverage.ppm}} } \examples{ X <- rpoispp(function(x,y) { exp(3+3*x) }) fit <- ppm(X ~x+y) plot(leverage(fit)) } \keyword{spatial} \keyword{models} spatstat/man/dppspecdenrange.Rd0000644000176200001440000000111113160710571016314 0ustar liggesusers\name{dppspecdenrange} \alias{dppspecdenrange} \title{Range of Spectral Density of a Determinantal Point Process Model} \description{ Computes the range of the spectral density of a determinantal point process model. } \usage{dppspecdenrange(model)} \arguments{ \item{model}{Model of class \code{"detpointprocfamily"}.} } \value{Numeric value (possibly \code{Inf}).} \author{ \adrian \rolf and \ege } \examples{ m <- dppBessel(lambda=100, alpha=0.05, sigma=1, d=2) dppspecdenrange(m) } \seealso{ \code{\link{dppspecden}} } \keyword{spatial} \keyword{models} spatstat/man/Strauss.Rd0000644000176200001440000000600013160710571014620 0ustar liggesusers\name{Strauss} \alias{Strauss} \title{The Strauss Point Process Model} \description{ Creates an instance of the Strauss point process model which can then be fitted to point pattern data. } \usage{ Strauss(r) } \arguments{ \item{r}{The interaction radius of the Strauss process} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of the Strauss process with interaction radius \eqn{r}. } \details{ The (stationary) Strauss process with interaction radius \eqn{r} and parameters \eqn{\beta}{beta} and \eqn{\gamma}{gamma} is the pairwise interaction point process in which each point contributes a factor \eqn{\beta}{beta} to the probability density of the point pattern, and each pair of points closer than \eqn{r} units apart contributes a factor \eqn{\gamma}{gamma} to the density. Thus the probability density is \deqn{ f(x_1,\ldots,x_n) = \alpha \beta^{n(x)} \gamma^{s(x)} }{ f(x_1,\ldots,x_n) = alpha . beta^n(x) gamma^s(x) } where \eqn{x_1,\ldots,x_n}{x[1],\ldots,x[n]} represent the points of the pattern, \eqn{n(x)} is the number of points in the pattern, \eqn{s(x)} is the number of distinct unordered pairs of points that are closer than \eqn{r} units apart, and \eqn{\alpha}{alpha} is the normalising constant. The interaction parameter \eqn{\gamma}{gamma} must be less than or equal to \eqn{1} so that this model describes an ``ordered'' or ``inhibitive'' pattern. The nonstationary Strauss process is similar except that the contribution of each individual point \eqn{x_i}{x[i]} is a function \eqn{\beta(x_i)}{beta(x[i])} of location, rather than a constant beta. The function \code{\link{ppm}()}, which fits point process models to point pattern data, requires an argument of class \code{"interact"} describing the interpoint interaction structure of the model to be fitted. The appropriate description of the Strauss process pairwise interaction is yielded by the function \code{Strauss()}. See the examples below. Note the only argument is the interaction radius \code{r}. When \code{r} is fixed, the model becomes an exponential family. The canonical parameters \eqn{\log(\beta)}{log(beta)} and \eqn{\log(\gamma)}{log(gamma)} are estimated by \code{\link{ppm}()}, not fixed in \code{Strauss()}. } \seealso{ \code{\link{ppm}}, \code{\link{pairwise.family}}, \code{\link{ppm.object}} } \references{ Kelly, F.P. and Ripley, B.D. (1976) On Strauss's model for clustering. \emph{Biometrika} \bold{63}, 357--360. Strauss, D.J. (1975) A model for clustering. \emph{Biometrika} \bold{62}, 467--475. } \examples{ Strauss(r=0.1) # prints a sensible description of itself data(cells) \dontrun{ ppm(cells, ~1, Strauss(r=0.07)) # fit the stationary Strauss process to `cells' } ppm(cells, ~polynom(x,y,3), Strauss(r=0.07)) # fit a nonstationary Strauss process with log-cubic polynomial trend } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} spatstat/man/Hest.Rd0000644000176200001440000001321613160710571014066 0ustar liggesusers\name{Hest} \alias{Hest} \title{Spherical Contact Distribution Function} \description{ Estimates the spherical contact distribution function of a random set. } \usage{ Hest(X, r=NULL, breaks=NULL, ..., W, correction=c("km", "rs", "han"), conditional=TRUE) } \arguments{ \item{X}{The observed random set. An object of class \code{"ppp"}, \code{"psp"} or \code{"owin"}. Alternatively a pixel image (class \code{"im"}) with logical values. } \item{r}{ Optional. Vector of values for the argument \eqn{r} at which \eqn{H(r)} should be evaluated. Users are advised \emph{not} to specify this argument; there is a sensible default. } \item{breaks}{ This argument is for internal use only. } \item{\dots}{Arguments passed to \code{\link{as.mask}} to control the discretisation. } \item{W}{ Optional. A window (object of class \code{"owin"}) to be taken as the window of observation. The contact distribution function will be estimated from values of the contact distance inside \code{W}. } \item{correction}{ Optional. The edge correction(s) to be used to estimate \eqn{H(r)}. A vector of character strings selected from \code{"none"}, \code{"rs"}, \code{"km"}, \code{"han"} and \code{"best"}. Alternatively \code{correction="all"} selects all options. } \item{conditional}{ Logical value indicating whether to compute the conditional or unconditional distribution. See Details. } } \details{ The spherical contact distribution function of a stationary random set \eqn{X} is the cumulative distribution function \eqn{H} of the distance from a fixed point in space to the nearest point of \eqn{X}, given that the point lies outside \eqn{X}. That is, \eqn{H(r)} equals the probability that \code{X} lies closer than \eqn{r} units away from the fixed point \eqn{x}, given that \code{X} does not cover \eqn{x}. Let \eqn{D = d(x,X)} be the shortest distance from an arbitrary point \eqn{x} to the set \code{X}. Then the spherical contact distribution function is \deqn{H(r) = P(D \le r \mid D > 0)}{H(r) = P(D <= r | D > 0)} For a point process, the spherical contact distribution function is the same as the empty space function \eqn{F} discussed in \code{\link{Fest}}. The argument \code{X} may be a point pattern (object of class \code{"ppp"}), a line segment pattern (object of class \code{"psp"}) or a window (object of class \code{"owin"}). It is assumed to be a realisation of a stationary random set. The algorithm first calls \code{\link{distmap}} to compute the distance transform of \code{X}, then computes the Kaplan-Meier and reduced-sample estimates of the cumulative distribution following Hansen et al (1999). If \code{conditional=TRUE} (the default) the algorithm returns an estimate of the spherical contact function \eqn{H(r)} as defined above. If \code{conditional=FALSE}, it instead returns an estimate of the cumulative distribution function \eqn{H^\ast(r) = P(D \le r)}{H*(r) = P(D <= r)} which includes a jump at \eqn{r=0} if \code{X} has nonzero area. Accuracy depends on the pixel resolution, which is controlled by the arguments \code{eps}, \code{dimyx} and \code{xy} passed to \code{\link{as.mask}}. For example, use \code{eps=0.1} to specify square pixels of side 0.1 units, and \code{dimyx=256} to specify a 256 by 256 grid of pixels. } \value{ An object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. Essentially a data frame containing up to six columns: \item{r}{the values of the argument \eqn{r} at which the function \eqn{H(r)} has been estimated } \item{rs}{the ``reduced sample'' or ``border correction'' estimator of \eqn{H(r)} } \item{km}{the spatial Kaplan-Meier estimator of \eqn{H(r)} } \item{hazard}{the hazard rate \eqn{\lambda(r)}{lambda(r)} of \eqn{H(r)} by the spatial Kaplan-Meier method } \item{han}{the spatial Hanisch-Chiu-Stoyan estimator of \eqn{H(r)} } \item{raw}{the uncorrected estimate of \eqn{H(r)}, i.e. the empirical distribution of the distance from a fixed point in the window to the nearest point of \code{X} } } \references{ Baddeley, A.J. Spatial sampling and censoring. In O.E. Barndorff-Nielsen, W.S. Kendall and M.N.M. van Lieshout (eds) \emph{Stochastic Geometry: Likelihood and Computation}. Chapman and Hall, 1998. Chapter 2, pages 37-78. Baddeley, A.J. and Gill, R.D. The empty space hazard of a spatial pattern. Research Report 1994/3, Department of Mathematics, University of Western Australia, May 1994. Hansen, M.B., Baddeley, A.J. and Gill, R.D. First contact distributions for spatial patterns: regularity and estimation. \emph{Advances in Applied Probability} \bold{31} (1999) 15-33. Ripley, B.D. \emph{Statistical inference for spatial processes}. Cambridge University Press, 1988. Stoyan, D, Kendall, W.S. and Mecke, J. \emph{Stochastic geometry and its applications}. 2nd edition. Springer Verlag, 1995. } \seealso{\code{\link{Fest}}} \examples{ X <- runifpoint(42) H <- Hest(X) Y <- rpoisline(10) H <- Hest(Y) H <- Hest(Y, dimyx=256) X <- heather$coarse plot(Hest(X)) H <- Hest(X, conditional=FALSE) P <- owin(poly=list(x=c(5.3, 8.5, 8.3, 3.7, 1.3, 3.7), y=c(9.7, 10.0, 13.6, 14.4, 10.7, 7.2))) plot(X) plot(P, add=TRUE, col="red") H <- Hest(X, W=P) Z <- as.im(FALSE, Frame(X)) Z[X] <- TRUE Z <- Z[P, drop=FALSE] plot(Z) H <- Hest(Z) } \author{ \spatstatAuthors with contributions from Kassel Hingee. } \keyword{spatial} \keyword{nonparametric} spatstat/man/spatialcdf.Rd0000644000176200001440000000647313160710621015300 0ustar liggesusers\name{spatialcdf} \alias{spatialcdf} \title{ Spatial Cumulative Distribution Function } \description{ Compute the spatial cumulative distribution function of a spatial covariate, optionally using spatially-varying weights. } \usage{ spatialcdf(Z, weights = NULL, normalise = FALSE, ..., W = NULL, Zname = NULL) } \arguments{ \item{Z}{ Spatial covariate. A pixel image or a \code{function(x,y,...)} } \item{weights}{ Spatial weighting for different locations. A pixel image, a \code{function(x,y,...)}, a window, a constant value, or a fitted point process model (object of class \code{"ppm"} or \code{"kppm"}). } \item{normalise}{ Logical. Whether the weights should be normalised so that they sum to 1. } \item{\dots}{ Arguments passed to \code{\link{as.mask}} to determine the pixel resolution, or extra arguments passed to \code{Z} if it is a function. } \item{W}{ Optional window (object of class \code{"owin"}) defining the spatial domain. } \item{Zname}{ Optional character string for the name of the covariate \code{Z} used in plots. } } \details{ If \code{weights} is missing or \code{NULL}, it defaults to 1. The values of the covariate \code{Z} are computed on a grid of pixels. The weighted cumulative distribution function of \code{Z} values is computed, taking each value with weight equal to the pixel area. The resulting function \eqn{F} is such that \eqn{F(t)} is the area of the region of space where \eqn{Z \le t}{Z <= t}. If \code{weights} is a pixel image or a function, then the values of \code{weights} and of the covariate \code{Z} are computed on a grid of pixels. The \code{weights} are multiplied by the pixel area. Then the weighted empirical cumulative distribution function of \code{Z} values is computed using \code{\link{ewcdf}}. The resulting function \eqn{F} is such that \eqn{F(t)} is the total weight (or weighted area) of the region of space where \eqn{Z \le t}{Z <= t}. If \code{weights} is a fitted point process model, then it should be a Poisson process. The fitted intensity of the model, and the value of the covariate \code{Z}, are evaluated at the quadrature points used to fit the model. The \code{weights} are multiplied by the weights of the quadrature points. Then the weighted empirical cumulative distribution of \code{Z} values is computed using \code{\link{ewcdf}}. The resulting function \eqn{F} is such that \eqn{F(t)} is the expected number of points in the point process that will fall in the region of space where \eqn{Z \le t}{Z <= t}. If \code{normalise=TRUE}, the function is normalised so that its maximum value equals 1, so that it gives the cumulative \emph{fraction} of weight or cumulative fraction of points. The result can be printed, plotted, and used as a function. } \value{ A cumulative distribution function object belonging to the classes \code{"spatialcdf"}, \code{"ewcdf"}, \code{"ecdf"} and \code{"stepfun"}. } \author{ \adrian , \rolf and \ege } \seealso{ \code{\link{ewcdf}}, \code{\link{cdf.test}} } \examples{ with(bei.extra, { plot(spatialcdf(grad)) fit <- ppm(bei ~ elev) plot(spatialcdf(grad, predict(fit))) plot(A <- spatialcdf(grad, fit)) A(0.1) }) } \keyword{spatial} \keyword{nonparametric} spatstat/man/dclf.progress.Rd0000644000176200001440000001340613160710571015737 0ustar liggesusers\name{dclf.progress} \alias{dclf.progress} \alias{mad.progress} \alias{mctest.progress} \title{ Progress Plot of Test of Spatial Pattern } \description{ Generates a progress plot (envelope representation) of the Diggle-Cressie-Loosmore-Ford test or the Maximum Absolute Deviation test for a spatial point pattern. } \usage{ dclf.progress(X, \dots) mad.progress(X, \dots) mctest.progress(X, fun = Lest, \dots, exponent = 1, nrank = 1, interpolate = FALSE, alpha, rmin=0) } \arguments{ \item{X}{ Either a point pattern (object of class \code{"ppp"}, \code{"lpp"} or other class), a fitted point process model (object of class \code{"ppm"}, \code{"kppm"} or other class) or an envelope object (class \code{"envelope"}). } \item{\dots}{ Arguments passed to \code{mctest.progress} or to \code{\link{envelope}}. Useful arguments include \code{fun} to determine the summary function, \code{nsim} to specify the number of Monte Carlo simulations, \code{alternative} to specify one-sided or two-sided envelopes, and \code{verbose=FALSE} to turn off the messages. } \item{fun}{ Function that computes the desired summary statistic for a point pattern. } \item{exponent}{ Positive number. The exponent of the \eqn{L^p} distance. See Details. } \item{nrank}{ Integer. The rank of the critical value of the Monte Carlo test, amongst the \code{nsim} simulated values. A rank of 1 means that the minimum and maximum simulated values will become the critical values for the test. } \item{interpolate}{ Logical value indicating how to compute the critical value. If \code{interpolate=FALSE} (the default), a standard Monte Carlo test is performed, and the critical value is the largest simulated value of the test statistic (if \code{nrank=1}) or the \code{nrank}-th largest (if \code{nrank} is another number). If \code{interpolate=TRUE}, kernel density estimation is applied to the simulated values, and the critical value is the upper \code{alpha} quantile of this estimated distribution. } \item{alpha}{ Optional. The significance level of the test. Equivalent to \code{nrank/(nsim+1)} where \code{nsim} is the number of simulations. } \item{rmin}{ Optional. Left endpoint for the interval of \eqn{r} values on which the test statistic is calculated. } } \details{ The Diggle-Cressie-Loosmore-Ford test and the Maximum Absolute Deviation test for a spatial point pattern are described in \code{\link{dclf.test}}. These tests depend on the choice of an interval of distance values (the argument \code{rinterval}). A \emph{progress plot} or \emph{envelope representation} of the test (Baddeley et al, 2014) is a plot of the test statistic (and the corresponding critical value) against the length of the interval \code{rinterval}. The command \code{dclf.progress} performs \code{\link{dclf.test}} on \code{X} using all possible intervals of the form \eqn{[0,R]}, and returns the resulting values of the test statistic, and the corresponding critical values of the test, as a function of \eqn{R}. Similarly \code{mad.progress} performs \code{\link{mad.test}} using all possible intervals and returns the test statistic and critical value. More generally, \code{mctest.progress} performs a test based on the \eqn{L^p} discrepancy between the curves. The deviation between two curves is measured by the \eqn{p}th root of the integral of the \eqn{p}th power of the absolute value of the difference between the two curves. The exponent \eqn{p} is given by the argument \code{exponent}. The case \code{exponent=2} is the Cressie-Loosmore-Ford test, while \code{exponent=Inf} is the MAD test. If the argument \code{rmin} is given, it specifies the left endpoint of the interval defining the test statistic: the tests are performed using intervals \eqn{[r_{\mbox{\scriptsize min}},R]}{[rmin,R]} where \eqn{R \ge r_{\mbox{\scriptsize min}}}{R \ge rmin}. The result of each command is an object of class \code{"fv"} that can be plotted to obtain the progress plot. The display shows the test statistic (solid black line) and the Monte Carlo acceptance region (grey shading). The significance level for the Monte Carlo test is \code{nrank/(nsim+1)}. Note that \code{nsim} defaults to 99, so if the values of \code{nrank} and \code{nsim} are not given, the default is a test with significance level 0.01. If \code{X} is an envelope object, then some of the data stored in \code{X} may be re-used: \itemize{ \item If \code{X} is an envelope object containing simulated functions, and \code{fun=NULL}, then the code will re-use the simulated functions stored in \code{X}. \item If \code{X} is an envelope object containing simulated point patterns, then \code{fun} will be applied to the stored point patterns to obtain the simulated functions. If \code{fun} is not specified, it defaults to \code{\link{Lest}}. \item Otherwise, new simulations will be performed, and \code{fun} defaults to \code{\link{Lest}}. } } \value{ An object of class \code{"fv"} that can be plotted to obtain the progress plot. } \references{ Baddeley, A., Diggle, P., Hardegen, A., Lawrence, T., Milne, R. and Nair, G. (2014) On tests of spatial pattern based on simulation envelopes. \emph{Ecological Monographs} \bold{84} (3) 477--489. } \author{ \adrian , Andrew Hardegen, Tom Lawrence, Gopal Nair and Robin Milne. } \seealso{ \code{\link{dclf.test}} and \code{\link{mad.test}} for the tests. See \code{\link{plot.fv}} for information on plotting objects of class \code{"fv"}. } \examples{ plot(dclf.progress(cells, nsim=19)) } \keyword{spatial} \keyword{htest} spatstat/man/envelope.lpp.Rd0000644000176200001440000002240313160710621015564 0ustar liggesusers\name{envelope.lpp} \alias{envelope.lpp} \alias{envelope.lppm} \title{ Envelope for Point Patterns on Linear Network } \description{ Enables envelopes to be computed for point patterns on a linear network. } \usage{ \method{envelope}{lpp}(Y, fun=linearK, nsim=99, nrank=1, \dots, funargs=list(), funYargs=funargs, simulate=NULL, fix.n=FALSE, fix.marks=FALSE, verbose=TRUE, transform=NULL,global=FALSE,ginterval=NULL,use.theory=NULL, alternative=c("two.sided", "less", "greater"), scale=NULL, clamp=FALSE, savefuns=FALSE, savepatterns=FALSE, nsim2=nsim, VARIANCE=FALSE, nSD=2, Yname=NULL, do.pwrong=FALSE, envir.simul=NULL) \method{envelope}{lppm}(Y, fun=linearK, nsim=99, nrank=1, \dots, funargs=list(), funYargs=funargs, simulate=NULL, fix.n=FALSE, fix.marks=FALSE, verbose=TRUE, transform=NULL,global=FALSE,ginterval=NULL,use.theory=NULL, alternative=c("two.sided", "less", "greater"), scale=NULL, clamp=FALSE, savefuns=FALSE, savepatterns=FALSE, nsim2=nsim, VARIANCE=FALSE, nSD=2, Yname=NULL, do.pwrong=FALSE, envir.simul=NULL) } \arguments{ \item{Y}{ A point pattern on a linear network (object of class \code{"lpp"}) or a fitted point process model on a linear network (object of class \code{"lppm"}). } \item{fun}{ Function that is to be computed for each simulated pattern. } \item{nsim}{ Number of simulations to perform. } \item{nrank}{ Integer. Rank of the envelope value amongst the \code{nsim} simulated values. A rank of 1 means that the minimum and maximum simulated values will be used. } \item{\dots}{ Extra arguments passed to \code{fun}. } \item{funargs}{ A list, containing extra arguments to be passed to \code{fun}. } \item{funYargs}{ Optional. A list, containing extra arguments to be passed to \code{fun} when applied to the original data \code{Y} only. } \item{simulate}{ Optional. Specifies how to generate the simulated point patterns. If \code{simulate} is an expression in the R language, then this expression will be evaluated \code{nsim} times, to obtain \code{nsim} point patterns which are taken as the simulated patterns from which the envelopes are computed. If \code{simulate} is a list of point patterns, then the entries in this list will be treated as the simulated patterns from which the envelopes are computed. Alternatively \code{simulate} may be an object produced by the \code{envelope} command: see Details. } \item{fix.n}{ Logical. If \code{TRUE}, simulated patterns will have the same number of points as the original data pattern. } \item{fix.marks}{ Logical. If \code{TRUE}, simulated patterns will have the same number of points \emph{and} the same marks as the original data pattern. In a multitype point pattern this means that the simulated patterns will have the same number of points \emph{of each type} as the original data. } \item{verbose}{ Logical flag indicating whether to print progress reports during the simulations. } \item{transform}{ Optional. A transformation to be applied to the function values, before the envelopes are computed. An expression object (see Details). } \item{global}{ Logical flag indicating whether envelopes should be pointwise (\code{global=FALSE}) or simultaneous (\code{global=TRUE}). } \item{ginterval}{ Optional. A vector of length 2 specifying the interval of \eqn{r} values for the simultaneous critical envelopes. Only relevant if \code{global=TRUE}. } \item{use.theory}{ Logical value indicating whether to use the theoretical value, computed by \code{fun}, as the reference value for simultaneous envelopes. Applicable only when \code{global=TRUE}. } \item{alternative}{ Character string determining whether the envelope corresponds to a two-sided test (\code{side="two.sided"}, the default) or a one-sided test with a lower critical boundary (\code{side="less"}) or a one-sided test with an upper critical boundary (\code{side="greater"}). } \item{scale}{ Optional. Scaling function for global envelopes. A function in the \R language which determines the relative scale of deviations, as a function of distance \eqn{r}, when computing the global envelopes. Applicable only when \code{global=TRUE}. Summary function values for distance \code{r} will be \emph{divided} by \code{scale(r)} before the maximum deviation is computed. The resulting global envelopes will have width proportional to \code{scale(r)}. } \item{clamp}{ Logical value indicating how to compute envelopes when \code{alternative="less"} or \code{alternative="greater"}. Deviations of the observed summary function from the theoretical summary function are initially evaluated as signed real numbers, with large positive values indicating consistency with the alternative hypothesis. If \code{clamp=FALSE} (the default), these values are not changed. If \code{clamp=TRUE}, any negative values are replaced by zero. } \item{savefuns}{ Logical flag indicating whether to save all the simulated function values. } \item{savepatterns}{ Logical flag indicating whether to save all the simulated point patterns. } \item{nsim2}{ Number of extra simulated point patterns to be generated if it is necessary to use simulation to estimate the theoretical mean of the summary function. Only relevant when \code{global=TRUE} and the simulations are not based on CSR. } \item{VARIANCE}{ Logical. If \code{TRUE}, critical envelopes will be calculated as sample mean plus or minus \code{nSD} times sample standard deviation. } \item{nSD}{ Number of estimated standard deviations used to determine the critical envelopes, if \code{VARIANCE=TRUE}. } \item{Yname}{ Character string that should be used as the name of the data point pattern \code{Y} when printing or plotting the results. } \item{do.pwrong}{ Logical. If \code{TRUE}, the algorithm will also estimate the true significance level of the \dQuote{wrong} test (the test that declares the summary function for the data to be significant if it lies outside the \emph{pointwise} critical boundary at any point). This estimate is printed when the result is printed. } \item{envir.simul}{ Environment in which to evaluate the expression \code{simulate}, if not the current environment. } } \details{ This is a method for the generic function \code{\link{envelope}} applicable to point patterns on a linear network. The argument \code{Y} can be either a point pattern on a linear network, or a fitted point process model on a linear network. The function \code{fun} will be evaluated for the data and also for \code{nsim} simulated point patterns on the same linear network. The upper and lower envelopes of these evaluated functions will be computed as described in \code{\link{envelope}}. The type of simulation is determined as follows. \itemize{ \item if \code{Y} is a point pattern (object of class \code{"lpp"}) and \code{simulate} is missing or \code{NULL}, then random point patterns will be generated according to a Poisson point process on the linear network on which \code{Y} is defined, with intensity estimated from \code{Y}. \item if \code{Y} is a fitted point process model (object of class \code{"lppm"}) and \code{simulate} is missing or \code{NULL}, then random point patterns will be generated by simulating from the fitted model. \item If \code{simulate} is present, it should be an expression that can be evaluated to yield random point patterns on the same linear network as \code{Y}. } The function \code{fun} should accept as its first argument a point pattern on a linear network (object of class \code{"lpp"}) and should have another argument called \code{r} or a \code{\dots} argument. } \value{ Function value table (object of class \code{"fv"}) with additional information, as described in \code{\link{envelope}}. } \author{ Ang Qi Wei \email{aqw07398@hotmail.com} and \adrian } \seealso{ \code{\link{envelope}}, \code{\link{linearK}} } \references{ Ang, Q.W. (2010) \emph{Statistical methodology for events on a network}. Master's thesis, School of Mathematics and Statistics, University of Western Australia. Ang, Q.W., Baddeley, A. and Nair, G. (2012) Geometrically corrected second-order analysis of events on a linear network, with applications to ecology and criminology. \emph{Scandinavian Journal of Statistics} \bold{39}, 591--617. Okabe, A. and Yamada, I. (2001) The K-function method on a network and its computational implementation. \emph{Geographical Analysis} \bold{33}, 271-290. } \examples{ if(interactive()) { ns <- 39 np <- 40 } else { ns <- np <- 3 } X <- runiflpp(np, simplenet) # uniform Poisson: random numbers of points envelope(X, nsim=ns) # uniform Poisson: conditional on observed number of points envelope(X, fix.n=TRUE, nsim=ns) # nonuniform Poisson fit <- lppm(X ~x) envelope(fit, nsim=ns) #multitype marks(X) <- sample(letters[1:2], np, replace=TRUE) envelope(X, nsim=ns) } \keyword{spatial} spatstat/man/marks.psp.Rd0000644000176200001440000000445113160710621015076 0ustar liggesusers\name{marks.psp} \alias{marks.psp} \alias{marks<-.psp} \title{Marks of a Line Segment Pattern} \description{ Extract or change the marks attached to a line segment pattern. } \usage{ \method{marks}{psp}(x, \dots, dfok=TRUE) \method{marks}{psp}(x, \dots) <- value } \arguments{ \item{x}{ Line segment pattern dataset (object of class \code{"psp"}). } \item{\dots}{ Ignored. } \item{dfok}{ Logical. If \code{FALSE}, data frames of marks are not permitted and will generate an error. } \item{value}{ Vector or data frame of mark values, or \code{NULL}. } } \value{ For \code{marks(x)}, the result is a vector, factor or data frame, containing the mark values attached to the line segments of \code{x}. If there are no marks, the result is \code{NULL}. For \code{marks(x) <- value}, the result is the updated line segment pattern \code{x} (with the side-effect that the dataset \code{x} is updated in the current environment). } \details{ These functions extract or change the marks attached to each of the line segments in the pattern \code{x}. They are methods for the generic functions \code{\link{marks}} and \code{\link{marks<-}} for the class \code{"psp"} of line segment patterns. The expression \code{marks(x)} extracts the marks of \code{x}. The assignment \code{marks(x) <- value} assigns new marks to the dataset \code{x}, and updates the dataset \code{x} in the current environment. The marks can be a vector, a factor, or a data frame. For the assignment \code{marks(x) <- value}, the \code{value} should be a vector or factor of length equal to the number of segments in \code{x}, or a data frame with as many rows as there are segments in \code{x}. If \code{value} is a single value, or a data frame with one row, then it will be replicated so that the same marks will be attached to each segment. To remove marks, use \code{marks(x) <- NULL} or \code{unmark(x)}. } \seealso{ \code{\link{psp.object}}, \code{\link{marks}}, \code{\link{marks<-}} } \examples{ m <- data.frame(A=1:10, B=letters[1:10]) X <- psp(runif(10), runif(10), runif(10), runif(10), window=owin(), marks=m) marks(X) marks(X)[,2] marks(X) <- 42 marks(X) <- NULL } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{manip} spatstat/man/rlpp.Rd0000644000176200001440000000450413160710621014134 0ustar liggesusers\name{rlpp} \alias{rlpp} \title{ Random Points on a Linear Network } \description{ Generates \eqn{n} independent random points on a linear network with a specified probability density. } \usage{ rlpp(n, f, \dots, nsim=1, drop=TRUE) } \arguments{ \item{n}{ Number of random points to generate. A nonnegative integer giving the number of points, or an integer vector giving the numbers of points of each type. } \item{f}{ Probability density (not necessarily normalised). A pixel image on a linear network (object of class \code{"linim"}) or a function on a linear network (object of class \code{"linfun"}). Alternatively, \code{f} can be a list of functions or pixel images, giving the densities of points of each type. } \item{\dots}{ Additional arguments passed to \code{f} if it is a function or a list of functions. } \item{nsim}{Number of simulated realisations to generate.} \item{drop}{ Logical value indicating what to do when \code{nsim=1}. If \code{drop=TRUE} (the default), the result is a point pattern. If \code{drop=FALSE}, the result is a list with one entry which is a point pattern. } } \details{ The linear network \code{L}, on which the points will be generated, is determined by the argument \code{f}. If \code{f} is a function, it is converted to a pixel image on the linear network, using any additional function arguments \code{\dots}. If \code{n} is a single integer and \code{f} is a function or pixel image, then independent random points are generated on \code{L} with probability density proportional to \code{f}. If \code{n} is an integer vector and \code{f} is a list of functions or pixel images, where \code{n} and \code{f} have the same length, then independent random points of several types are generated on \code{L}, with \code{n[i]} points of type \code{i} having probability density proportional to \code{f[[i]]}. } \value{ If \code{nsim = 1} and \code{drop=TRUE}, a point pattern on the linear network, i.e.\ an object of class \code{"lpp"}. Otherwise, a list of such point patterns. } \author{ \adrian } \seealso{ \code{\link{runiflpp}} } \examples{ g <- function(x, y, seg, tp) { exp(x + 3*y) } f <- linfun(g, simplenet) rlpp(20, f) plot(rlpp(20, f, nsim=3)) } \keyword{spatial} \keyword{datagen} spatstat/man/runifpointx.Rd0000644000176200001440000000245713160710621015551 0ustar liggesusers\name{runifpointx} \alias{runifpointx} \title{ Generate N Uniform Random Points in Any Dimensions } \description{ Generate a random point pattern containing \code{n} independent, uniform random points in any number of spatial dimensions. } \usage{ runifpointx(n, domain, nsim=1, drop=TRUE) } \arguments{ \item{n}{ Number of points to be generated. } \item{domain}{ Multi-dimensional box in which the process should be generated. An object of class \code{"boxx"}. } \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a single point pattern. } } \value{ If \code{nsim = 1} and \code{drop=TRUE}, a point pattern (an object of class \code{"ppx"}). If \code{nsim > 1} or \code{drop=FALSE}, a list of such point patterns. } \details{ This function generates a pattern of \code{n} independent random points, uniformly distributed in the multi-dimensional box \code{domain}. } \seealso{ \code{\link{rpoisppx}}, \code{\link{ppx}}, \code{\link{boxx}} } \examples{ w <- boxx(x=c(0,1), y=c(0,1), z=c(0,1), t=c(0,3)) X <- runifpointx(50, w) } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat/man/ppm.ppp.Rd0000644000176200001440000010643313165060312014555 0ustar liggesusers\name{ppm.ppp} \alias{ppm.ppp} \alias{ppm.quad} \concept{point process model} \concept{Poisson point process} \concept{Gibbs point process} \title{ Fit Point Process Model to Point Pattern Data } \description{ Fits a point process model to an observed point pattern. } \usage{ \method{ppm}{ppp}(Q, trend=~1, interaction=Poisson(), \dots, covariates=data, data=NULL, covfunargs = list(), subset, clipwin, correction="border", rbord=reach(interaction), use.gam=FALSE, method="mpl", forcefit=FALSE, emend=project, project=FALSE, prior.mean = NULL, prior.var = NULL, nd = NULL, eps = NULL, gcontrol=list(), nsim=100, nrmh=1e5, start=NULL, control=list(nrep=nrmh), verb=TRUE, callstring=NULL) \method{ppm}{quad}(Q, trend=~1, interaction=Poisson(), \dots, covariates=data, data=NULL, covfunargs = list(), subset, clipwin, correction="border", rbord=reach(interaction), use.gam=FALSE, method="mpl", forcefit=FALSE, emend=project, project=FALSE, prior.mean = NULL, prior.var = NULL, nd = NULL, eps = NULL, gcontrol=list(), nsim=100, nrmh=1e5, start=NULL, control=list(nrep=nrmh), verb=TRUE, callstring=NULL) } \arguments{ \item{Q}{ A data point pattern (of class \code{"ppp"}) to which the model will be fitted, or a quadrature scheme (of class \code{"quad"}) containing this pattern. } \item{trend}{ An \R formula object specifying the spatial trend to be fitted. The default formula, \code{~1}, indicates the model is stationary and no trend is to be fitted. } \item{interaction}{ An object of class \code{"interact"} describing the point process interaction structure, or a function that makes such an object, or \code{NULL} indicating that a Poisson process (stationary or nonstationary) should be fitted. } \item{\dots}{Ignored.} \item{data,covariates}{ The values of any spatial covariates (other than the Cartesian coordinates) required by the model. Either a data frame, or a list whose entries are images, functions, windows, tessellations or single numbers. See Details. } \item{subset}{ Optional. An expression (which may involve the names of the Cartesian coordinates \code{x} and \code{y} and the names of entries in \code{data}) defining a subset of the spatial domain, to which the likelihood or pseudolikelihood should be restricted. See Details. The result of evaluating the expression should be either a logical vector, or a window (object of class \code{"owin"}) or a logical-valued pixel image (object of class \code{"im"}). } \item{clipwin}{ Optional. A spatial window (object of class \code{"owin"}) to which data will be restricted, before model-fitting is performed. See Details. } \item{covfunargs}{ A named list containing the values of any additional arguments required by covariate functions. } \item{correction}{ The name of the edge correction to be used. The default is \code{"border"} indicating the border correction. Other possibilities may include \code{"Ripley"}, \code{"isotropic"}, \code{"periodic"}, \code{"translate"} and \code{"none"}, depending on the \code{interaction}. } \item{rbord}{ If \code{correction = "border"} this argument specifies the distance by which the window should be eroded for the border correction. } \item{use.gam}{ Logical flag; if \code{TRUE} then computations are performed using \code{gam} instead of \code{\link{glm}}. } \item{method}{ The method used to fit the model. Options are \code{"mpl"} for the method of Maximum PseudoLikelihood, \code{"logi"} for the Logistic Likelihood method, \code{"VBlogi"} for the Variational Bayes Logistic Likelihood method, and \code{"ho"} for the Huang-Ogata approximate maximum likelihood method. } \item{forcefit}{ Logical flag for internal use. If \code{forcefit=FALSE}, some trivial models will be fitted by a shortcut. If \code{forcefit=TRUE}, the generic fitting method will always be used. } \item{emend,project}{ (These are equivalent: \code{project} is an older name for \code{emend}.) Logical value. Setting \code{emend=TRUE} will ensure that the fitted model is always a valid point process by applying \code{\link{emend.ppm}}. } \item{prior.mean}{ Optional vector of prior means for canonical parameters (for \code{method="VBlogi"}). See Details. } \item{prior.var}{ Optional prior variance covariance matrix for canonical parameters (for \code{method="VBlogi"}). See Details. } \item{nd}{ Optional. Integer or pair of integers. The dimension of the grid of dummy points (\code{nd * nd} or \code{nd[1] * nd[2]}) used to evaluate the integral in the pseudolikelihood. Incompatible with \code{eps}. } \item{eps}{ Optional. A positive number, or a vector of two positive numbers, giving the horizontal and vertical spacing, respectively, of the grid of dummy points. Incompatible with \code{nd}. } \item{gcontrol}{ Optional. List of parameters passed to \code{\link{glm.control}} (or passed to \code{\link{gam.control}} if \code{use.gam=TRUE}) controlling the model-fitting algorithm. } \item{nsim}{ Number of simulated realisations to generate (for \code{method="ho"}) } \item{nrmh}{ Number of Metropolis-Hastings iterations for each simulated realisation (for \code{method="ho"}) } \item{start,control}{ Arguments passed to \code{\link{rmh}} controlling the behaviour of the Metropolis-Hastings algorithm (for \code{method="ho"}) } \item{verb}{ Logical flag indicating whether to print progress reports (for \code{method="ho"}) } \item{callstring}{ Internal use only. } } \value{ An object of class \code{"ppm"} describing a fitted point process model. See \code{\link{ppm.object}} for details of the format of this object and methods available for manipulating it. } \details{ \bold{NOTE:} This help page describes the \bold{old syntax} of the function \code{ppm}, described in many older documents. This old syntax is still supported. However, if you are learning about \code{ppm} for the first time, we recommend you use the \bold{new syntax} described in the help file for \code{\link{ppm}}. This function fits a point process model to an observed point pattern. The model may include spatial trend, interpoint interaction, and dependence on covariates. \describe{ \item{basic use:}{ In basic use, \code{Q} is a point pattern dataset (an object of class \code{"ppp"}) to which we wish to fit a model. The syntax of \code{ppm()} is closely analogous to the \R functions \code{\link{glm}} and \code{gam}. The analogy is: \tabular{ll}{ \bold{glm} \tab \bold{ppm} \cr \code{formula} \tab \code{trend} \cr \code{family} \tab \code{interaction} } The point process model to be fitted is specified by the arguments \code{trend} and \code{interaction} which are respectively analogous to the \code{formula} and \code{family} arguments of glm(). Systematic effects (spatial trend and/or dependence on spatial covariates) are specified by the argument \code{trend}. This is an \R formula object, which may be expressed in terms of the Cartesian coordinates \code{x}, \code{y}, the marks \code{marks}, or the variables in \code{covariates} (if supplied), or both. It specifies the \bold{logarithm} of the first order potential of the process. The formula should not use any names beginning with \code{.mpl} as these are reserved for internal use. If \code{trend} is absent or equal to the default, \code{~1}, then the model to be fitted is stationary (or at least, its first order potential is constant). The symbol \code{.} in the trend expression stands for all the covariates supplied in the argument \code{data}. For example the formula \code{~ .} indicates an additive model with a main effect for each covariate in \code{data}. Stochastic interactions between random points of the point process are defined by the argument \code{interaction}. This is an object of class \code{"interact"} which is initialised in a very similar way to the usage of family objects in \code{\link{glm}} and \code{gam}. The models currently available are: \GibbsInteractionsList. See the examples below. It is also possible to combine several interactions using \code{\link{Hybrid}}. If \code{interaction} is missing or \code{NULL}, then the model to be fitted has no interpoint interactions, that is, it is a Poisson process (stationary or nonstationary according to \code{trend}). In this case the methods of maximum pseudolikelihood and maximum logistic likelihood coincide with maximum likelihood. The fitted point process model returned by this function can be printed (by the print method \code{\link{print.ppm}}) to inspect the fitted parameter values. If a nonparametric spatial trend was fitted, this can be extracted using the predict method \code{\link{predict.ppm}}. } \item{Models with covariates:}{ To fit a model involving spatial covariates other than the Cartesian coordinates \eqn{x} and \eqn{y}, the values of the covariates should be supplied in the argument \code{covariates}. Note that it is not sufficient to have observed the covariate only at the points of the data point pattern; the covariate must also have been observed at other locations in the window. Typically the argument \code{covariates} is a list, with names corresponding to variables in the \code{trend} formula. Each entry in the list is either \describe{ \item{a pixel image,}{ giving the values of a spatial covariate at a fine grid of locations. It should be an object of class \code{"im"}, see \code{\link{im.object}}. } \item{a function,}{ which can be evaluated at any location \code{(x,y)} to obtain the value of the spatial covariate. It should be a \code{function(x, y)} or \code{function(x, y, ...)} in the \R language. The first two arguments of the function should be the Cartesian coordinates \eqn{x} and \eqn{y}. The function may have additional arguments; if the function does not have default values for these additional arguments, then the user must supply values for them, in \code{covfunargs}. See the Examples. } \item{a window,}{ interpreted as a logical variable which is \code{TRUE} inside the window and \code{FALSE} outside it. This should be an object of class \code{"owin"}. } \item{a tessellation,}{ interpreted as a factor covariate. For each spatial location, the factor value indicates which tile of the tessellation it belongs to. This should be an object of class \code{"tess"}. } \item{a single number,}{indicating a covariate that is constant in this dataset. } } The software will look up the values of each covariate at the required locations (quadrature points). Note that, for covariate functions, only the \emph{name} of the function appears in the trend formula. A covariate function is treated as if it were a single variable. The function arguments do not appear in the trend formula. See the Examples. If \code{covariates} is a list, the list entries should have names corresponding to the names of covariates in the model formula \code{trend}. The variable names \code{x}, \code{y} and \code{marks} are reserved for the Cartesian coordinates and the mark values, and these should not be used for variables in \code{covariates}. If \code{covariates} is a data frame, \code{Q} must be a quadrature scheme (see under Quadrature Schemes below). Then \code{covariates} must have as many rows as there are points in \code{Q}. The \eqn{i}th row of \code{covariates} should contain the values of spatial variables which have been observed at the \eqn{i}th point of \code{Q}. } \item{Quadrature schemes:}{ In advanced use, \code{Q} may be a `quadrature scheme'. This was originally just a technicality but it has turned out to have practical uses, as we explain below. Quadrature schemes are required for our implementation of the method of maximum pseudolikelihood. The definition of the pseudolikelihood involves an integral over the spatial window containing the data. In practice this integral must be approximated by a finite sum over a set of quadrature points. We use the technique of Baddeley and Turner (2000), a generalisation of the Berman-Turner (1992) device. In this technique the quadrature points for the numerical approximation include all the data points (points of the observed point pattern) as well as additional `dummy' points. Quadrature schemes are also required for the method of maximum logistic likelihood, which combines the data points with additional `dummy' points. A quadrature scheme is an object of class \code{"quad"} (see \code{\link{quad.object}}) which specifies both the data point pattern and the dummy points for the quadrature scheme, as well as the quadrature weights associated with these points. If \code{Q} is simply a point pattern (of class \code{"ppp"}, see \code{\link{ppp.object}}) then it is interpreted as specifying the data points only; a set of dummy points specified by \code{\link{default.dummy}()} is added, and the default weighting rule is invoked to compute the quadrature weights. Finer quadrature schemes (i.e. those with more dummy points) generally yield a better approximation, at the expense of higher computational load. An easy way to fit models using a finer quadrature scheme is to let \code{Q} be the original point pattern data, and use the argument \code{nd} to determine the number of dummy points in the quadrature scheme. Complete control over the quadrature scheme is possible. See \code{\link{quadscheme}} for an overview. Use \code{quadscheme(X, D, method="dirichlet")} to compute quadrature weights based on the Dirichlet tessellation, or \code{quadscheme(X, D, method="grid")} to compute quadrature weights by counting points in grid squares, where \code{X} and \code{D} are the patterns of data points and dummy points respectively. Alternatively use \code{\link{pixelquad}} to make a quadrature scheme with a dummy point at every pixel in a pixel image. A practical advantage of quadrature schemes arises when we want to fit a model involving covariates (e.g. soil pH). Suppose we have only been able to observe the covariates at a small number of locations. Suppose \code{cov.dat} is a data frame containing the values of the covariates at the data points (i.e.\ \code{cov.dat[i,]} contains the observations for the \code{i}th data point) and \code{cov.dum} is another data frame (with the same columns as \code{cov.dat}) containing the covariate values at another set of points whose locations are given by the point pattern \code{Y}. Then setting \code{Q = quadscheme(X,Y)} combines the data points and dummy points into a quadrature scheme, and \code{covariates = rbind(cov.dat, cov.dum)} combines the covariate data frames. We can then fit the model by calling \code{ppm(Q, ..., covariates)}. } \item{Model-fitting technique:}{ There are several choices for the technique used to fit the model. \describe{ \item{method="mpl"}{ (the default): the model will be fitted by maximising the pseudolikelihood (Besag, 1975) using the Berman-Turner computational approximation (Berman and Turner, 1992; Baddeley and Turner, 2000). Maximum pseudolikelihood is equivalent to maximum likelihood if the model is a Poisson process. Maximum pseudolikelihood is biased if the interpoint interaction is very strong, unless there is a large number of dummy points. The default settings for \code{method='mpl'} specify a moderately large number of dummy points, striking a compromise between speed and accuracy. } \item{method="logi":}{ the model will be fitted by maximising the logistic likelihood (Baddeley et al, 2014). This technique is roughly equivalent in speed to maximum pseudolikelihood, but is believed to be less biased. Because it is less biased, the default settings for \code{method='logi'} specify a relatively small number of dummy points, so that this method is the fastest, in practice. } \item{method="VBlogi":}{ the model will be fitted in a Bayesian setup by maximising the posterior probability density for the canonical model parameters. This uses the variational Bayes approximation to the posterior derived from the logistic likelihood as described in Rajala (2014). The prior is assumed to be multivariate Gaussian with mean vector \code{prior.mean} and variance-covariance matrix \code{prior.var}. } \item{method="ho":}{ the model will be fitted by applying the approximate maximum likelihood method of Huang and Ogata (1999). See below. The Huang-Ogata method is slower than the other options, but has better statistical properties. } } Note that \code{method='logi'}, \code{method='VBlogi'} and \code{method='ho'} involve randomisation, so that the results are subject to random variation. } \item{Huang-Ogata method:}{ If \code{method="ho"} then the model will be fitted using the Huang-Ogata (1999) approximate maximum likelihood method. First the model is fitted by maximum pseudolikelihood as described above, yielding an initial estimate of the parameter vector \eqn{\theta_0}{theta0}. From this initial model, \code{nsim} simulated realisations are generated. The score and Fisher information of the model at \eqn{\theta=\theta_0}{theta=theta0} are estimated from the simulated realisations. Then one step of the Fisher scoring algorithm is taken, yielding an updated estimate \eqn{\theta_1}{theta1}. The corresponding model is returned. Simulated realisations are generated using \code{\link{rmh}}. The iterative behaviour of the Metropolis-Hastings algorithm is controlled by the arguments \code{start} and \code{control} which are passed to \code{\link{rmh}}. As a shortcut, the argument \code{nrmh} determines the number of Metropolis-Hastings iterations run to produce one simulated realisation (if \code{control} is absent). Also if \code{start} is absent or equal to \code{NULL}, it defaults to \code{list(n.start=N)} where \code{N} is the number of points in the data point pattern. } \item{Edge correction}{ Edge correction should be applied to the sufficient statistics of the model, to reduce bias. The argument \code{correction} is the name of an edge correction method. The default \code{correction="border"} specifies the border correction, in which the quadrature window (the domain of integration of the pseudolikelihood) is obtained by trimming off a margin of width \code{rbord} from the observation window of the data pattern. Not all edge corrections are implemented (or implementable) for arbitrary windows. Other options depend on the argument \code{interaction}, but these generally include \code{correction="periodic"} (the periodic or toroidal edge correction in which opposite edges of a rectangular window are identified) and \code{correction="translate"} (the translation correction, see Baddeley 1998 and Baddeley and Turner 2000). For pairwise interaction models there is also Ripley's isotropic correction, identified by \code{correction="isotropic"} or \code{"Ripley"}. } \item{Subsetting}{ The arguments \code{subset} and \code{clipwin} specify that the model should be fitted to a restricted subset of the available data. These arguments are equivalent for Poisson point process models, but different for Gibbs models. If \code{clipwin} is specified, then all the available data will be restricted to this spatial region, and data outside this region will be discarded, before the model is fitted. If \code{subset} is specified, then no data are deleted, but the domain of integration of the likelihood or pseudolikelihood is restricted to the \code{subset}. For Poisson models, these two arguments have the same effect; but for a Gibbs model, interactions between points inside and outside the \code{subset} are taken into account, while interactions between points inside and outside the \code{clipwin} are ignored. } } } \section{Interaction parameters}{ Apart from the Poisson model, every point process model fitted by \code{ppm} has parameters that determine the strength and range of \sQuote{interaction} or dependence between points. These parameters are of two types: \describe{ \item{regular parameters:}{ A parameter \eqn{\phi}{phi} is called \emph{regular} if the log likelihood is a linear function of \eqn{\theta}{theta} where \eqn{\theta = \theta(\psi)}{theta = theta(psi)} is some transformation of \eqn{\psi}{psi}. [Then \eqn{\theta}{theta} is called the canonical parameter.] } \item{irregular parameters}{ Other parameters are called \emph{irregular}. } } Typically, regular parameters determine the \sQuote{strength} of the interaction, while irregular parameters determine the \sQuote{range} of the interaction. For example, the Strauss process has a regular parameter \eqn{\gamma}{gamma} controlling the strength of interpoint inhibition, and an irregular parameter \eqn{r} determining the range of interaction. The \code{ppm} command is only designed to estimate regular parameters of the interaction. It requires the values of any irregular parameters of the interaction to be fixed. For example, to fit a Strauss process model to the \code{cells} dataset, you could type \code{ppm(cells, ~1, Strauss(r=0.07))}. Note that the value of the irregular parameter \code{r} must be given. The result of this command will be a fitted model in which the regular parameter \eqn{\gamma}{gamma} has been estimated. To determine the irregular parameters, there are several practical techniques, but no general statistical theory available. Useful techniques include maximum profile pseudolikelihood, which is implemented in the command \code{\link{profilepl}}, and Newton-Raphson maximisation, implemented in the experimental command \code{\link{ippm}}. Some irregular parameters can be estimated directly from data: the hard-core radius in the model \code{\link{Hardcore}} and the matrix of hard-core radii in \code{\link{MultiHard}} can be estimated easily from data. In these cases, \code{ppm} allows the user to specify the interaction without giving the value of the irregular parameter. The user can give the hard core interaction as \code{interaction=Hardcore()} or even \code{interaction=Hardcore}, and the hard core radius will then be estimated from the data. } \references{ Baddeley, A., Coeurjolly, J.-F., Rubak, E. and Waagepetersen, R. (2014) Logistic regression for spatial Gibbs point processes. \emph{Biometrika} \bold{101} (2) 377--392. Baddeley, A. and Turner, R. Practical maximum pseudolikelihood for spatial point patterns. \emph{Australian and New Zealand Journal of Statistics} \bold{42} (2000) 283--322. Berman, M. and Turner, T.R. Approximating point process likelihoods with GLIM. \emph{Applied Statistics} \bold{41} (1992) 31--38. Besag, J. Statistical analysis of non-lattice data. \emph{The Statistician} \bold{24} (1975) 179-195. Diggle, P.J., Fiksel, T., Grabarnik, P., Ogata, Y., Stoyan, D. and Tanemura, M. On parameter estimation for pairwise interaction processes. \emph{International Statistical Review} \bold{62} (1994) 99-117. Huang, F. and Ogata, Y. Improvements of the maximum pseudo-likelihood estimators in various spatial statistical models. \emph{Journal of Computational and Graphical Statistics} \bold{8} (1999) 510-530. Jensen, J.L. and Moeller, M. Pseudolikelihood for exponential family models of spatial point processes. \emph{Annals of Applied Probability} \bold{1} (1991) 445--461. Jensen, J.L. and Kuensch, H.R. On asymptotic normality of pseudo likelihood estimates for pairwise interaction processes, \emph{Annals of the Institute of Statistical Mathematics} \bold{46} (1994) 475-486. Rajala T. (2014) \emph{A note on Bayesian logistic regression for spatial exponential family Gibbs point processes}, Preprint on ArXiv.org. \url{http://arxiv.org/abs/1411.0539} } \seealso{ \code{\link{ppm.object}} for details of how to print, plot and manipulate a fitted model. \code{\link{ppp}} and \code{\link{quadscheme}} for constructing data. Interactions: \GibbsInteractionsList. See \code{\link{profilepl}} for advice on fitting nuisance parameters in the interaction, and \code{\link{ippm}} for irregular parameters in the trend. See \code{\link{valid.ppm}} and \code{\link{emend.ppm}} for ensuring the fitted model is a valid point process. } \section{Error and Warning Messages}{ Some common error messages and warning messages are listed below, with explanations. \describe{ \item{\dQuote{System is computationally singular}}{ The Fisher information matrix of the fitted model has a determinant close to zero, so that the matrix cannot be inverted, and the software cannot calculate standard errors or confidence intervals. This error is usually reported when the model is printed, because the \code{print} method calculates standard errors for the fitted parameters. Singularity usually occurs because the spatial coordinates in the original data were very large numbers (e.g. expressed in metres) so that the fitted coefficients were very small numbers. The simple remedy is to \bold{rescale the data}, for example, to convert from metres to kilometres by \code{X <- \link{rescale}(X, 1000)}, then re-fit the model. Singularity can also occur if the covariate values are very large numbers, or if the covariates are approximately collinear. } \item{\dQuote{Covariate values were NA or undefined at X\% (M out of N) of the quadrature points}}{ The covariate data (typically a pixel image) did not provide values of the covariate at some of the spatial locations in the observation window of the point pattern. This means that the spatial domain of the pixel image does not completely cover the observation window of the point pattern. If the percentage is small, this warning can be ignored - typically it happens because of rounding effects which cause the pixel image to be one-pixel-width narrower than the observation window. However if more than a few percent of covariate values are undefined, it would be prudent to check that the pixel images are correct, and are correctly registered in their spatial relation to the observation window. } \item{\dQuote{Model is unidentifiable}}{ It is not possible to estimate all the model parameters from this dataset. The error message gives a further explanation, such as \dQuote{data pattern is empty}. Choose a simpler model, or check the data. } \item{\dQuote{N data points are illegal (zero conditional intensity)}}{ In a Gibbs model (i.e. with interaction between points), the conditional intensity may be zero at some spatial locations, indicating that the model forbids the presence of a point at these locations. However if the conditional intensity is zero \emph{at a data point}, this means that the model is inconsistent with the data. Modify the interaction parameters so that the data point is not illegal (e.g. reduce the value of the hard core radius) or choose a different interaction. } } } \section{Warnings}{ The implementation of the Huang-Ogata method is experimental; several bugs were fixed in \pkg{spatstat} 1.19-0. See the comments above about the possible inefficiency and bias of the maximum pseudolikelihood estimator. The accuracy of the Berman-Turner approximation to the pseudolikelihood depends on the number of dummy points used in the quadrature scheme. The number of dummy points should at least equal the number of data points. The parameter values of the fitted model do not necessarily determine a valid point process. Some of the point process models are only defined when the parameter values lie in a certain subset. For example the Strauss process only exists when the interaction parameter \eqn{\gamma}{gamma} is less than or equal to \eqn{1}, corresponding to a value of \code{ppm()$theta[2]} less than or equal to \code{0}. By default (if \code{emend=FALSE}) the algorithm maximises the pseudolikelihood without constraining the parameters, and does not apply any checks for sanity after fitting the model. This is because the fitted parameter value could be useful information for data analysis. To constrain the parameters to ensure that the model is a valid point process, set \code{emend=TRUE}. See also the functions \code{\link{valid.ppm}} and \code{\link{emend.ppm}}. The \code{trend} formula should not use any variable names beginning with the prefixes \code{.mpl} or \code{Interaction} as these names are reserved for internal use. The data frame \code{covariates} should have as many rows as there are points in \code{Q}. It should not contain variables called \code{x}, \code{y} or \code{marks} as these names are reserved for the Cartesian coordinates and the marks. If the model formula involves one of the functions \code{poly()}, \code{bs()} or \code{ns()} (e.g. applied to spatial coordinates \code{x} and \code{y}), the fitted coefficients can be misleading. The resulting fit is not to the raw spatial variates (\code{x}, \code{x^2}, \code{x*y}, etc.) but to a transformation of these variates. The transformation is implemented by \code{poly()} in order to achieve better numerical stability. However the resulting coefficients are appropriate for use with the transformed variates, not with the raw variates. This affects the interpretation of the constant term in the fitted model, \code{logbeta}. Conventionally, \eqn{\beta}{beta} is the background intensity, i.e. the value taken by the conditional intensity function when all predictors (including spatial or ``trend'' predictors) are set equal to \eqn{0}. However the coefficient actually produced is the value that the log conditional intensity takes when all the predictors, including the \emph{transformed} spatial predictors, are set equal to \code{0}, which is not the same thing. Worse still, the result of \code{\link{predict.ppm}} can be completely wrong if the trend formula contains one of the functions \code{poly()}, \code{bs()} or \code{ns()}. This is a weakness of the underlying function \code{\link{predict.glm}}. If you wish to fit a polynomial trend, we offer an alternative to \code{\link{poly}()}, namely \code{polynom()}, which avoids the difficulty induced by transformations. It is completely analogous to \code{poly} except that it does not orthonormalise. The resulting coefficient estimates then have their natural interpretation and can be predicted correctly. Numerical stability may be compromised. Values of the maximised pseudolikelihood are not comparable if they have been obtained with different values of \code{rbord}. } \examples{ # fit the stationary Poisson process # to point pattern 'nztrees' ppm(nztrees) ppm(nztrees ~ 1) \dontrun{ Q <- quadscheme(nztrees) ppm(Q) # equivalent. } \dontrun{ ppm(nztrees, nd=128) } \testonly{ ppm(nztrees, nd=16) } fit1 <- ppm(nztrees, ~ x) # fit the nonstationary Poisson process # with intensity function lambda(x,y) = exp(a + bx) # where x,y are the Cartesian coordinates # and a,b are parameters to be estimated fit1 coef(fit1) coef(summary(fit1)) \dontrun{ ppm(nztrees, ~ polynom(x,2)) } \testonly{ ppm(nztrees, ~ polynom(x,2), nd=16) } # fit the nonstationary Poisson process # with intensity function lambda(x,y) = exp(a + bx + cx^2) \dontrun{ library(splines) ppm(nztrees, ~ bs(x,df=3)) } # WARNING: do not use predict.ppm() on this result # Fits the nonstationary Poisson process # with intensity function lambda(x,y) = exp(B(x)) # where B is a B-spline with df = 3 \dontrun{ ppm(nztrees, ~1, Strauss(r=10), rbord=10) } \testonly{ ppm(nztrees, ~1, Strauss(r=10), rbord=10, nd=16) } # Fit the stationary Strauss process with interaction range r=10 # using the border method with margin rbord=10 \dontrun{ ppm(nztrees, ~ x, Strauss(13), correction="periodic") } \testonly{ ppm(nztrees, ~ x, Strauss(13), correction="periodic", nd=16) } # Fit the nonstationary Strauss process with interaction range r=13 # and exp(first order potential) = activity = beta(x,y) = exp(a+bx) # using the periodic correction. # Compare Maximum Pseudolikelihood, Huang-Ogata and VB fits: \dontrun{ppm(swedishpines, ~1, Strauss(9))} \dontrun{ppm(swedishpines, ~1, Strauss(9), method="ho")} \testonly{ppm(swedishpines, ~1, Strauss(9), method="ho", nd=16, nsim=8)} ppm(swedishpines, ~1, Strauss(9), method="VBlogi") # COVARIATES # X <- rpoispp(42) weirdfunction <- function(x,y){ 10 * x^2 + 5 * sin(10 * y) } # # (a) covariate values as function ppm(X, ~ y + Z, covariates=list(Z=weirdfunction)) # # (b) covariate values in pixel image Zimage <- as.im(weirdfunction, unit.square()) ppm(X, ~ y + Z, covariates=list(Z=Zimage)) # # (c) covariate values in data frame Q <- quadscheme(X) xQ <- x.quad(Q) yQ <- y.quad(Q) Zvalues <- weirdfunction(xQ,yQ) ppm(Q, ~ y + Z, covariates=data.frame(Z=Zvalues)) # Note Q not X # COVARIATE FUNCTION WITH EXTRA ARGUMENTS # f <- function(x,y,a){ y - a } ppm(X, ~x + f, covariates=list(f=f), covfunargs=list(a=1/2)) # COVARIATE: inside/outside window b <- owin(c(0.1, 0.6), c(0.1, 0.9)) ppm(X, ~w, covariates=list(w=b)) ## MULTITYPE POINT PROCESSES ### # fit stationary marked Poisson process # with different intensity for each species \dontrun{ppm(lansing, ~ marks, Poisson())} \testonly{ ama <- amacrine[square(0.7)] a <- ppm(ama, ~ marks, Poisson(), nd=16) } # fit nonstationary marked Poisson process # with different log-cubic trend for each species \dontrun{ppm(lansing, ~ marks * polynom(x,y,3), Poisson())} \testonly{b <- ppm(ama, ~ marks * polynom(x,y,2), Poisson(), nd=16)} } \author{ \spatstatAuthors } \keyword{spatial} \keyword{models} spatstat/man/funxy.Rd0000644000176200001440000000340513160710621014327 0ustar liggesusers\name{funxy} \Rdversion{1.1} \alias{funxy} \title{ Spatial Function Class } \description{ A simple class of functions of spatial location } \usage{ funxy(f, W) } \arguments{ \item{f}{ A \code{function} in the \R language with arguments \code{x,y} (at least) } \item{W}{ Window (object of class \code{"owin"}) inside which the function is well-defined. } } \details{ This creates an object of class \code{"funxy"}. This is a simple mechanism for handling a function of spatial location \eqn{f(x,y)} to make it easier to display and manipulate. \code{f} should be a \code{function} in the \R language. The first two arguments of \code{f} must be named \code{x} and \code{y} respectively. \code{W} should be a window (object of class \code{"owin"}) inside which the function \code{f} is well-defined. The function \code{f} should be vectorised: that is, if \code{x} and \code{y} are numeric vectors of the same length \code{n}, then \code{v <- f(x,y)} should be a vector of length \code{n}. The resulting function \code{g <- funxy(f, W)} has the same formal arguments as \code{f}. It accepts numeric vectors \code{x,y} as described above, but if \code{y} is missing, then \code{x} may be a point pattern (object of class \code{"ppp"} or \code{"lpp"}) from which the coordinates should be extracted. } \value{ A \code{function} with the same arguments as \code{f}, which also belongs to the class \code{"funxy"}. This class has methods for \code{print}, \code{plot}, \code{contour} and \code{persp}. } \seealso{ \code{\link{plot.funxy}} } \examples{ f <- function(x,y) { x^2 + y^2 - 1} g <- funxy(f, square(2)) g(0.2, 0.3) g g(cells[1:4]) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{math} spatstat/man/reload.or.compute.Rd0000644000176200001440000000404113160710621016513 0ustar liggesusers\name{reload.or.compute} \alias{reload.or.compute} \title{ Compute Unless Previously Saved } \description{ If the designated file does not yet exist, evaluate the expression and save the results in the file. If the file already exists, re-load the results from the file. } \usage{ reload.or.compute(filename, expr, objects = NULL, destination = parent.frame()) } \arguments{ \item{filename}{ Name of data file. A character string. } \item{expr}{ \R language expression to be evaluated. } \item{objects}{ Optional character vector of names of objects to be saved in \code{filename} after evaluating \code{expr}, or names of objects that should be present in \code{filename} when loaded. } \item{destination}{ Environment in which the resulting objects should be assigned. } } \details{ This facility is useful for saving, and later re-loading, the results of time-consuming computations. It would typically be used in an \R script file or an \code{\link[utils]{Sweave}} document. If the file called \code{filename} does not yet exist, then \code{expr} will be evaluated and the results will be saved in \code{filename}. The optional argument \code{objects} specifies which results should be saved to the file: the default is to save all objects that were created by evaluating the expression. If the file called \code{filename} already exists, then it will be loaded. The optional argument \code{objects} specifies the names of objects that should be present in the file; a warning is issued if any of them are missing. The resulting objects can be assigned into any desired \code{destination}. The default behaviour is equivalent to evaluating \code{expr} in the current environment. } \value{ Character vector (invisible) giving the names of the objects computed or loaded. } \examples{ \dontrun{ if(FALSE) { reload.or.compute("mydata.rda", { x <- very.long.computation() y <- 42 }) } } } \author{\adrian and \rolf } \keyword{utilities} spatstat/man/divide.linnet.Rd0000644000176200001440000000212113160710571015710 0ustar liggesusers\name{divide.linnet} \alias{divide.linnet} \title{ Divide Linear Network at Cut Points } \description{ Make a tessellation of a linear network by dividing it into pieces demarcated by the points of a point pattern. } \usage{ divide.linnet(X) } \arguments{ \item{X}{ Point pattern on a linear network (object of class \code{"lpp"}). } } \details{ The points \code{X} are interpreted as dividing the linear network \code{L=as.linnet(X)} into separate pieces. Two locations on \code{L} belong to the same piece if and only if they can be joined by a path in \code{L} that does not cross any of the points of \code{X}. The result is a tessellation of the network (object of class \code{"lintess"}) representing the division of \code{L} into pieces. } \value{ A tessellation on a linear network (object of class \code{"lintess"}). } \author{ \spatstatAuthors and Greg McSwiggan. } \seealso{ \code{\link{linnet}}, \code{\link{lintess}}. } \examples{ X <- runiflpp(5, simplenet) plot(divide.linnet(X)) plot(X, add=TRUE, pch=16) } \keyword{spatial} \keyword{manip} spatstat/man/Fest.Rd0000644000176200001440000002703213160710571014065 0ustar liggesusers\name{Fest} \alias{Fest} \alias{Fhazard} \title{Estimate the Empty Space Function or its Hazard Rate} \description{ Estimates the empty space function \eqn{F(r)} or its hazard rate \eqn{h(r)} from a point pattern in a window of arbitrary shape. } \usage{ Fest(X, \dots, eps, r=NULL, breaks=NULL, correction=c("rs", "km", "cs"), domain=NULL) Fhazard(X, \dots) } \arguments{ \item{X}{The observed point pattern, from which an estimate of \eqn{F(r)} will be computed. An object of class \code{ppp}, or data in any format acceptable to \code{\link{as.ppp}()}. } \item{\dots}{ Extra arguments, passed from \code{Fhazard} to \code{Fest}. Extra arguments to \code{Fest} are ignored. } \item{eps}{Optional. A positive number. The resolution of the discrete approximation to Euclidean distance (see below). There is a sensible default. } \item{r}{Optional. Numeric vector. The values of the argument \eqn{r} at which \eqn{F(r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{breaks}{ This argument is for internal use only. } \item{correction}{ Optional. The edge correction(s) to be used to estimate \eqn{F(r)}. A vector of character strings selected from \code{"none"}, \code{"rs"}, \code{"km"}, \code{"cs"} and \code{"best"}. Alternatively \code{correction="all"} selects all options. } \item{domain}{ Optional. Calculations will be restricted to this subset of the window. See Details. } } \value{ An object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. The result of \code{Fest} is essentially a data frame containing up to seven columns: \item{r}{the values of the argument \eqn{r} at which the function \eqn{F(r)} has been estimated } \item{rs}{the ``reduced sample'' or ``border correction'' estimator of \eqn{F(r)} } \item{km}{the spatial Kaplan-Meier estimator of \eqn{F(r)} } \item{hazard}{the hazard rate \eqn{\lambda(r)}{lambda(r)} of \eqn{F(r)} by the spatial Kaplan-Meier method } \item{cs}{the Chiu-Stoyan estimator of \eqn{F(r)} } \item{raw}{the uncorrected estimate of \eqn{F(r)}, i.e. the empirical distribution of the distance from a random point in the window to the nearest point of the data pattern \code{X} } \item{theo}{the theoretical value of \eqn{F(r)} for a stationary Poisson process of the same estimated intensity. } The result of \code{Fhazard} contains only three columns \item{r}{the values of the argument \eqn{r} at which the hazard rate \eqn{h(r)} has been estimated } \item{hazard}{the spatial Kaplan-Meier estimate of the hazard rate \eqn{h(r)}} \item{theo}{ the theoretical value of \eqn{h(r)} for a stationary Poisson process of the same estimated intensity. } } \details{ \code{Fest} computes an estimate of the empty space function \eqn{F(r)}, and \code{Fhazard} computes an estimate of its hazard rate \eqn{h(r)}. The empty space function (also called the ``\emph{spherical contact distribution}'' or the ``\emph{point-to-nearest-event}'' distribution) of a stationary point process \eqn{X} is the cumulative distribution function \eqn{F} of the distance from a fixed point in space to the nearest point of \eqn{X}. An estimate of \eqn{F} derived from a spatial point pattern dataset can be used in exploratory data analysis and formal inference about the pattern (Cressie, 1991; Diggle, 1983; Ripley, 1988). In exploratory analyses, the estimate of \eqn{F} is a useful statistic summarising the sizes of gaps in the pattern. For inferential purposes, the estimate of \eqn{F} is usually compared to the true value of \eqn{F} for a completely random (Poisson) point process, which is \deqn{F(r) = 1 - e^{ - \lambda \pi r^2}}{% F(r) = 1 - exp( - \lambda * \pi * r^2) % } where \eqn{\lambda}{\lambda} is the intensity (expected number of points per unit area). Deviations between the empirical and theoretical \eqn{F} curves may suggest spatial clustering or spatial regularity. This algorithm estimates the empty space function \eqn{F} from the point pattern \code{X}. It assumes that \code{X} can be treated as a realisation of a stationary (spatially homogeneous) random spatial point process in the plane, observed through a bounded window. The window (which is specified in \code{X}) may have arbitrary shape. The argument \code{X} is interpreted as a point pattern object (of class \code{"ppp"}, see \code{\link{ppp.object}}) and can be supplied in any of the formats recognised by \code{\link{as.ppp}}. The algorithm uses two discrete approximations which are controlled by the parameter \code{eps} and by the spacing of values of \code{r} respectively. (See below for details.) First-time users are strongly advised not to specify these arguments. The estimation of \eqn{F} is hampered by edge effects arising from the unobservability of points of the random pattern outside the window. An edge correction is needed to reduce bias (Baddeley, 1998; Ripley, 1988). The edge corrections implemented here are the border method or "\emph{reduced sample}" estimator, the spatial Kaplan-Meier estimator (Baddeley and Gill, 1997) and the Chiu-Stoyan estimator (Chiu and Stoyan, 1998). Our implementation makes essential use of the distance transform algorithm of image processing (Borgefors, 1986). A fine grid of pixels is created in the observation window. The Euclidean distance between two pixels is approximated by the length of the shortest path joining them in the grid, where a path is a sequence of steps between adjacent pixels, and horizontal, vertical and diagonal steps have length \eqn{1}, \eqn{1} and \eqn{\sqrt 2}{sqrt(2)} respectively in pixel units. If the pixel grid is sufficiently fine then this is an accurate approximation. The parameter \code{eps} is the pixel width of the rectangular raster used to compute the distance transform (see below). It must not be too large: the absolute error in distance values due to discretisation is bounded by \code{eps}. If \code{eps} is not specified, the function checks whether the window \code{Window(X)} contains pixel raster information. If so, then \code{eps} is set equal to the pixel width of the raster; otherwise, \code{eps} defaults to 1/100 of the width of the observation window. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{F(r)} should be evaluated. It is also used to determine the breakpoints (in the sense of \code{\link{hist}}) for the computation of histograms of distances. The estimators are computed from histogram counts. This introduces a discretisation error which is controlled by the fineness of the breakpoints. First-time users would be strongly advised not to specify \code{r}. However, if it is specified, \code{r} must satisfy \code{r[1] = 0}, and \code{max(r)} must be larger than the radius of the largest disc contained in the window. Furthermore, the spacing of successive \code{r} values must be very fine (ideally not greater than \code{eps/4}). The algorithm also returns an estimate of the hazard rate function, \eqn{h(r)} of \eqn{F(r)}. The hazard rate is defined by \deqn{h(r) = - \frac{d}{dr} \log(1 - F(r))}{% h(r) = - (d/dr) log(1 - F(r)) % } The hazard rate of \eqn{F} has been proposed as a useful exploratory statistic (Baddeley and Gill, 1994). The estimate of \eqn{h(r)} given here is a discrete approximation to the hazard rate of the Kaplan-Meier estimator of \eqn{F}. Note that \eqn{F} is absolutely continuous (for any stationary point process \eqn{X}), so the hazard function always exists (Baddeley and Gill, 1997). If the argument \code{domain} is given, the estimate of \eqn{F(r)} will be based only on the empty space distances measured from locations inside \code{domain} (although their nearest data points may lie outside \code{domain}). This is useful in bootstrap techniques. The argument \code{domain} should be a window (object of class \code{"owin"}) or something acceptable to \code{\link{as.owin}}. It must be a subset of the window of the point pattern \code{X}. The naive empirical distribution of distances from each location in the window to the nearest point of the data pattern, is a biased estimate of \eqn{F}. However this is also returned by the algorithm (if \code{correction="none"}), as it is sometimes useful in other contexts. Care should be taken not to use the uncorrected empirical \eqn{F} as if it were an unbiased estimator of \eqn{F}. } \note{ Sizeable amounts of memory may be needed during the calculation. } \references{ Baddeley, A.J. Spatial sampling and censoring. In O.E. Barndorff-Nielsen, W.S. Kendall and M.N.M. van Lieshout (eds) \emph{Stochastic Geometry: Likelihood and Computation}. Chapman and Hall, 1998. Chapter 2, pages 37-78. Baddeley, A.J. and Gill, R.D. The empty space hazard of a spatial pattern. Research Report 1994/3, Department of Mathematics, University of Western Australia, May 1994. Baddeley, A.J. and Gill, R.D. Kaplan-Meier estimators of interpoint distance distributions for spatial point processes. \emph{Annals of Statistics} \bold{25} (1997) 263-292. Borgefors, G. Distance transformations in digital images. \emph{Computer Vision, Graphics and Image Processing} \bold{34} (1986) 344-371. Chiu, S.N. and Stoyan, D. (1998) Estimators of distance distributions for spatial patterns. \emph{Statistica Neerlandica} \bold{52}, 239--246. Cressie, N.A.C. \emph{Statistics for spatial data}. John Wiley and Sons, 1991. Diggle, P.J. \emph{Statistical analysis of spatial point patterns}. Academic Press, 1983. Ripley, B.D. \emph{Statistical inference for spatial processes}. Cambridge University Press, 1988. Stoyan, D, Kendall, W.S. and Mecke, J. \emph{Stochastic geometry and its applications}. 2nd edition. Springer Verlag, 1995. } \section{Warnings}{ The reduced sample (border method) estimator of \eqn{F} is pointwise approximately unbiased, but need not be a valid distribution function; it may not be a nondecreasing function of \eqn{r}. Its range is always within \eqn{[0,1]}. The spatial Kaplan-Meier estimator of \eqn{F} is always nondecreasing but its maximum value may be less than \eqn{1}. The estimate of hazard rate \eqn{h(r)} returned by the algorithm is an approximately unbiased estimate for the integral of \eqn{h()} over the corresponding histogram cell. It may exhibit oscillations due to discretisation effects. We recommend modest smoothing, such as kernel smoothing with kernel width equal to the width of a histogram cell, using \code{\link{Smooth.fv}}. } \seealso{ \code{\link{Gest}}, \code{\link{Jest}}, \code{\link{Kest}}, \code{\link{km.rs}}, \code{\link{reduced.sample}}, \code{\link{kaplan.meier}} } \examples{ Fc <- Fest(cells, 0.01) # Tip: don't use F for the left hand side! # That's an abbreviation for FALSE plot(Fc) # P-P style plot plot(Fc, cbind(km, theo) ~ theo) # The empirical F is above the Poisson F # indicating an inhibited pattern \dontrun{ plot(Fc, . ~ theo) plot(Fc, asin(sqrt(.)) ~ asin(sqrt(theo))) } \testonly{ Fh <- Fhazard(cells) } } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat/man/connected.lpp.Rd0000644000176200001440000000505513160710571015721 0ustar liggesusers\name{connected.lpp} \alias{connected.lpp} \title{ Connected Components of a Point Pattern on a Linear Network } \description{ Finds the topologically-connected components of a point pattern on a linear network, when all pairs of points closer than a threshold distance are joined. } \usage{ \method{connected}{lpp}(X, R=Inf, \dots, dismantle=TRUE) } \arguments{ \item{X}{ A linear network (object of class \code{"lpp"}). } \item{R}{ Threshold distance. Pairs of points will be joined together if they are closer than \code{R} units apart, measured by the shortest path in the network. The default \code{R=Inf} implies that points will be joined together if they are mutually connected by any path in the network. } \item{dismantle}{ Logical. If \code{TRUE} (the default), the network itself will be divided into its path-connected components using \code{\link{connected.linnet}}. } \item{\dots}{ Ignored. } } \details{ The function \code{connected} is generic. This is the method for point patterns on a linear network (objects of class \code{"lpp"}). It divides the point pattern \code{X} into one or more groups of points. If \code{R=Inf} (the default), then \code{X} is divided into groups such that any pair of points in the same group can be joined by a path in the network. If \code{R} is a finite number, then two points of \code{X} are declared to be \emph{R-close} if they lie closer than \code{R} units apart, measured by the length of the shortest path in the network. Two points are \emph{R-connected} if they can be reached by a series of steps between R-close pairs of points of \code{X}. Then \code{X} is divided into groups such that any pair of points in the same group is R-connected. If \code{dismantle=TRUE} (the default) the algorithm first checks whether the network is connected (i.e. whether any pair of vertices can be joined by a path in the network), and if not, the network is decomposed into its connected components. } \value{ A point pattern (of class \code{"lpp"}) with marks indicating the grouping, or a list of such point patterns. } \author{ \adrian. } \seealso{ \code{\link{thinNetwork}} } \examples{ # remove some edges from a network to make it disconnected plot(simplenet, col="grey", main="", lty=2) A <- thinNetwork(simplenet, retainedges=-c(3,5)) plot(A, add=TRUE, lwd=2) X <- runiflpp(10, A) # find the connected components cX <- connected(X) plot(cX[[1]], add=TRUE, col="blue", lwd=2) } \keyword{spatial} \keyword{manip} spatstat/man/as.owin.Rd0000644000176200001440000001760413160710571014546 0ustar liggesusers\name{as.owin} \alias{as.owin} \alias{as.owin.owin} \alias{as.owin.ppp} \alias{as.owin.ppm} \alias{as.owin.kppm} \alias{as.owin.dppm} \alias{as.owin.lpp} \alias{as.owin.lppm} \alias{as.owin.msr} \alias{as.owin.psp} \alias{as.owin.quad} \alias{as.owin.quadratcount} \alias{as.owin.quadrattest} \alias{as.owin.tess} \alias{as.owin.im} \alias{as.owin.layered} \alias{as.owin.data.frame} \alias{as.owin.distfun} \alias{as.owin.nnfun} \alias{as.owin.funxy} \alias{as.owin.boxx} \alias{as.owin.rmhmodel} \alias{as.owin.leverage.ppm} \alias{as.owin.influence.ppm} \alias{as.owin.default} \title{Convert Data To Class owin} \description{ Converts data specifying an observation window in any of several formats, into an object of class \code{"owin"}. } \usage{ as.owin(W, \dots, fatal=TRUE) \method{as.owin}{owin}(W, \dots, fatal=TRUE) \method{as.owin}{ppp}(W, \dots, fatal=TRUE) \method{as.owin}{ppm}(W, \dots, from=c("points", "covariates"), fatal=TRUE) \method{as.owin}{kppm}(W, \dots, from=c("points", "covariates"), fatal=TRUE) \method{as.owin}{dppm}(W, \dots, from=c("points", "covariates"), fatal=TRUE) \method{as.owin}{lpp}(W, \dots, fatal=TRUE) \method{as.owin}{lppm}(W, \dots, fatal=TRUE) \method{as.owin}{msr}(W, \dots, fatal=TRUE) \method{as.owin}{psp}(W, \dots, fatal=TRUE) \method{as.owin}{quad}(W, \dots, fatal=TRUE) \method{as.owin}{quadratcount}(W, \dots, fatal=TRUE) \method{as.owin}{quadrattest}(W, \dots, fatal=TRUE) \method{as.owin}{tess}(W, \dots, fatal=TRUE) \method{as.owin}{im}(W, \dots, fatal=TRUE) \method{as.owin}{layered}(W, \dots, fatal=TRUE) \method{as.owin}{data.frame}(W, \dots, step, fatal=TRUE) \method{as.owin}{distfun}(W, \dots, fatal=TRUE) \method{as.owin}{nnfun}(W, \dots, fatal=TRUE) \method{as.owin}{funxy}(W, \dots, fatal=TRUE) \method{as.owin}{boxx}(W, \dots, fatal=TRUE) \method{as.owin}{rmhmodel}(W, \dots, fatal=FALSE) \method{as.owin}{leverage.ppm}(W, \dots, fatal=TRUE) \method{as.owin}{influence.ppm}(W, \dots, fatal=TRUE) \method{as.owin}{default}(W, \dots, fatal=TRUE) } \arguments{ \item{W}{Data specifying an observation window, in any of several formats described under \emph{Details} below.} \item{fatal}{Logical flag determining what to do if the data cannot be converted to an observation window. See Details. } \item{\dots}{Ignored.} \item{from}{Character string. See Details.} \item{step}{ Optional. A single number, or numeric vector of length 2, giving the grid step lengths in the \eqn{x} and \eqn{y} directions. } } \value{ An object of class \code{"owin"} (see \code{\link{owin.object}}) specifying an observation window. } \details{ The class \code{"owin"} is a way of specifying the observation window for a point pattern. See \code{\link{owin.object}} for an overview. This function converts data in any of several formats into an object of class \code{"owin"} for use by the \pkg{spatstat} package. The function \code{as.owin} is generic, with methods for different classes of objects, and a default method. The argument \code{W} may be \itemize{ \item an object of class \code{"owin"} \item a structure with entries \code{xrange}, \code{yrange} specifying the \eqn{x} and \eqn{y} dimensions of a rectangle \item a four-element vector (interpreted as \code{(xmin, xmax, ymin, ymax)}) specifying the \eqn{x} and \eqn{y} dimensions of a rectangle \item a structure with entries \code{xl}, \code{xu}, \code{yl}, \code{yu} specifying the \eqn{x} and \eqn{y} dimensions of a rectangle as \code{(xmin, xmax) = (xl, xu)} and \code{(ymin, ymax) = (yl, yu)}. This will accept objects of class \code{spp} used in the Venables and Ripley \pkg{spatial} library. \item an object of class \code{"ppp"} representing a point pattern. In this case, the object's \code{window} structure will be extracted. \item an object of class \code{"psp"} representing a line segment pattern. In this case, the object's \code{window} structure will be extracted. \item an object of class \code{"tess"} representing a tessellation. In this case, the object's \code{window} structure will be extracted. \item an object of class \code{"quad"} representing a quadrature scheme. In this case, the window of the \code{data} component will be extracted. \item an object of class \code{"im"} representing a pixel image. In this case, a window of type \code{"mask"} will be returned, with the same pixel raster coordinates as the image. An image pixel value of \code{NA}, signifying that the pixel lies outside the window, is transformed into the logical value \code{FALSE}, which is the corresponding convention for window masks. \item an object of class \code{"ppm"}, \code{"kppm"} or \code{"dppm"} representing a fitted point process model. In this case, if \code{from="data"} (the default), \code{as.owin} extracts the original point pattern data to which the model was fitted, and returns the observation window of this point pattern. If \code{from="covariates"} then \code{as.owin} extracts the covariate images to which the model was fitted, and returns a binary mask window that specifies the pixel locations. \item an object of class \code{"lpp"} representing a point pattern on a linear network. In this case, \code{as.owin} extracts the linear network and returns a window containing this network. \item an object of class \code{"lppm"} representing a fitted point process model on a linear network. In this case, \code{as.owin} extracts the linear network and returns a window containing this network. \item A \code{data.frame} with exactly three columns. Each row of the data frame corresponds to one pixel. Each row contains the \eqn{x} and \eqn{y} coordinates of a pixel, and a logical value indicating whether the pixel lies inside the window. \item A \code{data.frame} with exactly two columns. Each row of the data frame contains the \eqn{x} and \eqn{y} coordinates of a pixel that lies inside the window. \item an object of class \code{"distfun"}, \code{"nnfun"} or \code{"funxy"} representing a function of spatial location, defined on a spatial domain. The spatial domain of the function will be extracted. \item an object of class \code{"rmhmodel"} representing a point process model that can be simulated using \code{\link{rmh}}. The window (spatial domain) of the model will be extracted. The window may be \code{NULL} in some circumstances (indicating that the simulation window has not yet been determined). This is not treated as an error, because the argument \code{fatal} defaults to \code{FALSE} for this method. \item an object of class \code{"layered"} representing a list of spatial objects. See \code{\link{layered}}. In this case, \code{as.owin} will be applied to each of the objects in the list, and the union of these windows will be returned. } If the argument \code{W} is not in one of these formats and cannot be converted to a window, then an error will be generated (if \code{fatal=TRUE}) or a value of \code{NULL} will be returned (if \code{fatal=FALSE}). When \code{W} is a data frame, the argument \code{step} can be used to specify the pixel grid spacing; otherwise, the spacing will be guessed from the data. } \seealso{ \code{\link{owin.object}}, \code{\link{owin}} } \examples{ w <- as.owin(c(0,1,0,1)) w <- as.owin(list(xrange=c(0,5),yrange=c(0,10))) # point pattern data(demopat) w <- as.owin(demopat) # image Z <- as.im(function(x,y) { x + 3}, unit.square()) w <- as.owin(Z) # Venables & Ripley 'spatial' package require(spatial) towns <- ppinit("towns.dat") w <- as.owin(towns) detach(package:spatial) } \author{\adrian \rolf and \ege } \keyword{spatial} \keyword{manip} spatstat/man/matclust.estK.Rd0000644000176200001440000001452413160710621015723 0ustar liggesusers\name{matclust.estK} \alias{matclust.estK} \title{Fit the Matern Cluster Point Process by Minimum Contrast} \description{ Fits the Matern Cluster point process to a point pattern dataset by the Method of Minimum Contrast. } \usage{ matclust.estK(X, startpar=c(kappa=1,scale=1), lambda=NULL, q = 1/4, p = 2, rmin = NULL, rmax = NULL, ...) } \arguments{ \item{X}{ Data to which the Matern Cluster model will be fitted. Either a point pattern or a summary statistic. See Details. } \item{startpar}{ Vector of starting values for the parameters of the Matern Cluster process. } \item{lambda}{ Optional. An estimate of the intensity of the point process. } \item{q,p}{ Optional. Exponents for the contrast criterion. } \item{rmin, rmax}{ Optional. The interval of \eqn{r} values for the contrast criterion. } \item{\dots}{ Optional arguments passed to \code{\link[stats]{optim}} to control the optimisation algorithm. See Details. } } \details{ This algorithm fits the Matern Cluster point process model to a point pattern dataset by the Method of Minimum Contrast, using the \eqn{K} function. The argument \code{X} can be either \describe{ \item{a point pattern:}{An object of class \code{"ppp"} representing a point pattern dataset. The \eqn{K} function of the point pattern will be computed using \code{\link{Kest}}, and the method of minimum contrast will be applied to this. } \item{a summary statistic:}{An object of class \code{"fv"} containing the values of a summary statistic, computed for a point pattern dataset. The summary statistic should be the \eqn{K} function, and this object should have been obtained by a call to \code{\link{Kest}} or one of its relatives. } } The algorithm fits the Matern Cluster point process to \code{X}, by finding the parameters of the Matern Cluster model which give the closest match between the theoretical \eqn{K} function of the Matern Cluster process and the observed \eqn{K} function. For a more detailed explanation of the Method of Minimum Contrast, see \code{\link{mincontrast}}. The Matern Cluster point process is described in \ifelse{latex}{\out{M\o ller}}{Moller} and Waagepetersen (2003, p. 62). It is a cluster process formed by taking a pattern of parent points, generated according to a Poisson process with intensity \eqn{\kappa}{kappa}, and around each parent point, generating a random number of offspring points, such that the number of offspring of each parent is a Poisson random variable with mean \eqn{\mu}{mu}, and the locations of the offspring points of one parent are independent and uniformly distributed inside a circle of radius \eqn{R}{R} centred on the parent point, where \eqn{R}{R} is equal to the parameter \code{scale}. The named vector of stating values can use either \code{R} or \code{scale} as the name of the second component, but the latter is recommended for consistency with other cluster models. The theoretical \eqn{K}-function of the Matern Cluster process is \deqn{ K(r) = \pi r^2 + \frac 1 \kappa h(\frac{r}{2R}) }{ K(r) = pi r^2 + h(r/(2*R))/kappa } where the radius R is the parameter \code{scale} and \deqn{ h(z) = 2 + \frac 1 \pi [ ( 8 z^2 - 4 ) \mbox{arccos}(z) - 2 \mbox{arcsin}(z) + 4 z \sqrt{(1 - z^2)^3} - 6 z \sqrt{1 - z^2} ] }{ h(z) = 2 + (1/pi) * ((8 * z^2 - 4) * arccos(z) - 2 * arcsin(z) + 4 * z * sqrt((1 - z^2)^3) - 6 * z * sqrt(1 - z^2)) } for \eqn{z <= 1}, and \eqn{h(z) = 1} for \eqn{z > 1}. The theoretical intensity of the Matern Cluster process is \eqn{\lambda = \kappa \mu}{lambda=kappa* mu}. In this algorithm, the Method of Minimum Contrast is first used to find optimal values of the parameters \eqn{\kappa}{kappa} and \eqn{R}{R}. Then the remaining parameter \eqn{\mu}{mu} is inferred from the estimated intensity \eqn{\lambda}{lambda}. If the argument \code{lambda} is provided, then this is used as the value of \eqn{\lambda}{lambda}. Otherwise, if \code{X} is a point pattern, then \eqn{\lambda}{lambda} will be estimated from \code{X}. If \code{X} is a summary statistic and \code{lambda} is missing, then the intensity \eqn{\lambda}{lambda} cannot be estimated, and the parameter \eqn{\mu}{mu} will be returned as \code{NA}. The remaining arguments \code{rmin,rmax,q,p} control the method of minimum contrast; see \code{\link{mincontrast}}. The Matern Cluster process can be simulated, using \code{\link{rMatClust}}. Homogeneous or inhomogeneous Matern Cluster models can also be fitted using the function \code{\link{kppm}}. The optimisation algorithm can be controlled through the additional arguments \code{"..."} which are passed to the optimisation function \code{\link[stats]{optim}}. For example, to constrain the parameter values to a certain range, use the argument \code{method="L-BFGS-B"} to select an optimisation algorithm that respects box constraints, and use the arguments \code{lower} and \code{upper} to specify (vectors of) minimum and maximum values for each parameter. } \value{ An object of class \code{"minconfit"}. There are methods for printing and plotting this object. It contains the following main components: \item{par }{Vector of fitted parameter values.} \item{fit }{Function value table (object of class \code{"fv"}) containing the observed values of the summary statistic (\code{observed}) and the theoretical values of the summary statistic computed from the fitted model parameters. } } \references{ \ifelse{latex}{\out{M\o ller}}{Moller}, J. and Waagepetersen, R. (2003). Statistical Inference and Simulation for Spatial Point Processes. Chapman and Hall/CRC, Boca Raton. Waagepetersen, R. (2007) An estimating function approach to inference for inhomogeneous Neyman-Scott processes. \emph{Biometrics} \bold{63}, 252--258. } \author{Rasmus Waagepetersen \email{rw@math.auc.dk} Adapted for \pkg{spatstat} by \adrian } \seealso{ \code{\link{kppm}}, \code{\link{lgcp.estK}}, \code{\link{thomas.estK}}, \code{\link{mincontrast}}, \code{\link{Kest}}, \code{\link{rMatClust}} to simulate the fitted model. } \examples{ data(redwood) u <- matclust.estK(redwood, c(kappa=10, scale=0.1)) u plot(u) } \keyword{spatial} \keyword{models} spatstat/man/rpoisline.Rd0000644000176200001440000000276013160710621015165 0ustar liggesusers\name{rpoisline} \alias{rpoisline} \title{Generate Poisson Random Line Process} \description{ Generate a random pattern of line segments obtained from the Poisson line process. } \usage{ rpoisline(lambda, win=owin()) } \arguments{ \item{lambda}{ Intensity of the Poisson line process. A positive number. } \item{win}{ Window in which to simulate the pattern. An object of class \code{"owin"} or something acceptable to \code{\link{as.owin}}. } } \value{ A line segment pattern (an object of class \code{"psp"}). The result also has an attribute called \code{"lines"} (an object of class \code{"infline"} specifying the original infinite random lines) and an attribute \code{"linemap"} (an integer vector mapping the line segments to their parent lines). } \details{ This algorithm generates a realisation of the uniform Poisson line process, and clips it to the window \code{win}. The argument \code{lambda} must be a positive number. It controls the intensity of the process. The expected number of lines intersecting a convex region of the plane is equal to \code{lambda} times the perimeter length of the region. The expected total length of the lines crossing a region of the plane is equal to \code{lambda * pi} times the area of the region. } \seealso{ \code{\link{psp}} } \examples{ # uniform Poisson line process with intensity 10, # clipped to the unit square rpoisline(10) } \author{ \adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat/man/commonGrid.Rd0000644000176200001440000000342513160710571015262 0ustar liggesusers\name{commonGrid} \alias{commonGrid} \title{Determine A Common Spatial Domain And Pixel Resolution} \description{ Determine a common spatial domain and pixel resolution for several spatial objects such as images, masks, windows and point patterns. } \usage{ commonGrid(\dots) } \arguments{ \item{\dots}{ Any number of pixel images (objects of class \code{"im"}), binary masks (objects of class \code{"owin"} of type \code{"mask"}) or data which can be converted to binary masks by \code{\link{as.mask}}. } } \details{ This function determines a common spatial resolution and spatial domain for several spatial objects. The arguments \code{\dots} may be pixel images, binary masks, or other spatial objects acceptable to \code{\link{as.mask}}. The common pixel grid is determined by inspecting all the pixel images and binary masks in the argument list, finding the pixel grid with the highest spatial resolution, and extending this pixel grid to cover the bounding box of all the spatial objects. The return value is a binary mask \code{M}, representing the bounding box at the chosen pixel resolution. Use \code{\link{as.im}(X, W=M)} to convert a pixel image \code{X} to this new pixel resolution. Use \code{\link{as.mask}(W, xy=M)} to convert a window \code{W} to a binary mask at this new pixel resolution. See the Examples. } \value{ A binary mask (object of class \code{"owin"} and type \code{"mask"}). } \author{\adrian and \rolf } \examples{ A <- setcov(square(1)) G <- density(runifpoint(42), dimyx=16) H <- commonGrid(A, letterR, G) newR <- as.mask(letterR, xy=H) newG <- as.im(G, W=H) } \seealso{ \code{\link{harmonise.im}}, \code{\link{compatible.im}}, \code{\link{as.im}} } \keyword{spatial} \keyword{manip} spatstat/man/connected.linnet.Rd0000644000176200001440000000323713160710571016417 0ustar liggesusers\name{connected.linnet} \alias{connected.linnet} \title{ Connected Components of a Linear Network } \description{ Find the topologically-connected components of a linear network. } \usage{ \method{connected}{linnet}(X, \dots, what = c("labels", "components")) } \arguments{ \item{X}{ A linear network (object of class \code{"linnet"}). } \item{\dots}{ Ignored. } \item{what}{ Character string specifying the kind of result. } } \details{ The function \code{connected} is generic. This is the method for linear networks (objects of class \code{"linnet"}). Two vertices of the network are connected if they are joined by a path in the network. This function divides the network into subsets, such that all points in a subset are connected to each other. If \code{what="labels"} the return value is a factor with one entry for each vertex of \code{X}, identifying which connected component the vertex belongs to. If \code{what="components"} the return value is a list of linear networks, which are the connected components of \code{X}. } \value{ If \code{what="labels"}, a factor. If \code{what="components"}, a list of linear networks. } \author{ \adrian and Suman Rakshit. } \seealso{ \code{\link{thinNetwork}} } \examples{ # remove some edges from a network to make it disconnected plot(simplenet, col="grey", main="", lty=2) A <- thinNetwork(simplenet, retainedges=-c(3,5)) plot(A, add=TRUE, lwd=2) # find the connected components connected(A) cA <- connected(A, what="components") plot(cA[[1]], add=TRUE, col="green", lwd=2) plot(cA[[2]], add=TRUE, col="blue", lwd=2) } \keyword{spatial} \keyword{manip} spatstat/man/rescale.owin.Rd0000644000176200001440000000364713160710621015557 0ustar liggesusers\name{rescale.owin} \alias{rescale.owin} \title{Convert Window to Another Unit of Length} \description{ Converts a window to another unit of length. } \usage{ \method{rescale}{owin}(X, s, unitname) } \arguments{ \item{X}{Window (object of class \code{"owin"}).} \item{s}{Conversion factor: the new units are \code{s} times the old units.} \item{unitname}{ Optional. New name for the unit of length. See \code{\link{unitname}}. } } \value{ Another window object (of class \code{"owin"}) representing the same window, but expressed in the new units. } \details{ This is a method for the generic function \code{\link{rescale}}. The spatial coordinates in the window \code{X} (and its window) will be re-expressed in terms of a new unit of length that is \code{s} times the current unit of length given in \code{X}. (Thus, the coordinate values are \emph{divided} by \code{s}, while the unit value is multiplied by \code{s}). The result is a window representing the \emph{same} region of space, but re-expressed in a different unit. If \code{s} is missing, then the coordinates will be re-expressed in \sQuote{native} units; for example if the current unit is equal to 0.1 metres, then the coordinates will be re-expressed in metres. } \section{Note}{ The result of this operation is equivalent to the original window. If you want to actually change the coordinates by a linear transformation, producing a window that is larger or smaller than the original one, use \code{\link{affine}}. } \seealso{ \code{\link{unitname}}, \code{\link{rescale}}, \code{\link{rescale.owin}}, \code{\link{affine}}, \code{\link{rotate}}, \code{\link{shift}} } \examples{ data(swedishpines) W <- Window(swedishpines) W # coordinates are in decimetres (0.1 metre) # convert to metres: rescale(W, 10) # or equivalently rescale(W) } \author{\adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/bw.ppl.Rd0000644000176200001440000000560013160710571014363 0ustar liggesusers\name{bw.ppl} \alias{bw.ppl} \title{ Likelihood Cross Validation Bandwidth Selection for Kernel Density } \description{ Uses likelihood cross-validation to select a smoothing bandwidth for the kernel estimation of point process intensity. } \usage{ bw.ppl(X, \dots, srange=NULL, ns=16, sigma=NULL, weights=NULL) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"}). } \item{\dots}{Ignored.} \item{srange}{ Optional numeric vector of length 2 giving the range of values of bandwidth to be searched. } \item{ns}{ Optional integer giving the number of values of bandwidth to search. } \item{sigma}{ Optional. Vector of values of the bandwidth to be searched. Overrides the values of \code{ns} and \code{srange}. } \item{weights}{ Optional. Numeric vector of weights for the points of \code{X}. Argument passed to \code{\link{density.ppp}}. } } \details{ This function selects an appropriate bandwidth \code{sigma} for the kernel estimator of point process intensity computed by \code{\link{density.ppp}}. The bandwidth \eqn{\sigma}{\sigma} is chosen to maximise the point process likelihood cross-validation criterion \deqn{ \mbox{LCV}(\sigma) = \sum_i \log\hat\lambda_{-i}(x_i) - \int_W \hat\lambda(u) \, {\rm d}u }{ LCV(\sigma) = sum[i] log(\lambda[-i](x[i])) - integral[W] \lambda(u) du } where the sum is taken over all the data points \eqn{x_i}{x[i]}, where \eqn{\hat\lambda_{-i}(x_i)}{\lambda[-i](x_i)} is the leave-one-out kernel-smoothing estimate of the intensity at \eqn{x_i}{x[i]} with smoothing bandwidth \eqn{\sigma}{\sigma}, and \eqn{\hat\lambda(u)}{\lambda(u)} is the kernel-smoothing estimate of the intensity at a spatial location \eqn{u} with smoothing bandwidth \eqn{\sigma}{\sigma}. See Loader(1999, Section 5.3). The value of \eqn{\mbox{LCV}(\sigma)}{LCV(\sigma)} is computed directly, using \code{\link{density.ppp}}, for \code{ns} different values of \eqn{\sigma}{\sigma} between \code{srange[1]} and \code{srange[2]}. The result is a numerical value giving the selected bandwidth. The result also belongs to the class \code{"bw.optim"} which can be plotted to show the (rescaled) mean-square error as a function of \code{sigma}. } \value{ A numerical value giving the selected bandwidth. The result also belongs to the class \code{"bw.optim"} which can be plotted. } \seealso{ \code{\link{density.ppp}}, \code{\link{bw.diggle}}, \code{\link{bw.scott}} } \examples{ \donttest{ b <- bw.ppl(redwood) plot(b, main="Likelihood cross validation for redwoods") plot(density(redwood, b)) } \testonly{ b <- bw.ppl(redwood, srange=c(0.03, 0.07), ns=2) } } \references{ Loader, C. (1999) \emph{Local Regression and Likelihood}. Springer, New York. } \author{\adrian and \rolf } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat/man/dppBessel.Rd0000644000176200001440000000174713160710571015112 0ustar liggesusers\name{dppBessel} \alias{dppBessel} \title{Bessel Type Determinantal Point Process Model} \description{ Function generating an instance of the Bessel-type determinantal point process model. } \usage{dppBessel(\dots)} \arguments{ \item{\dots}{arguments of the form \code{tag=value} specifying the model parameters. See Details. } } \details{ The possible parameters are: \itemize{ \item the intensity \code{lambda} as a positive numeric \item the scale parameter \code{alpha} as a positive numeric \item the shape parameter \code{sigma} as a non-negative numeric \item the dimension \code{d} as a positive integer } } \value{An object of class \code{"detpointprocfamily"}.} \author{ Frederic Lavancier and Christophe Biscio. Modified by \ege , \adrian and \rolf } \examples{ m <- dppBessel(lambda=100, alpha=.05, sigma=0, d=2) } \seealso{ \code{\link{dppCauchy}}, \code{\link{dppGauss}}, \code{\link{dppMatern}}, \code{\link{dppPowerExp}} } spatstat/man/hierpair.family.Rd0000644000176200001440000000172413160710621016243 0ustar liggesusers\name{hierpair.family} \alias{hierpair.family} \title{Hierarchical Pairwise Interaction Process Family} \description{ An object describing the family of all hierarchical pairwise interaction Gibbs point processes. } \details{ \bold{Advanced Use Only!} This structure would not normally be touched by the user. It describes the hierarchical pairwise interaction family of point process models. Anyway, \code{hierpair.family} is an object of class \code{"isf"} containing a function \code{hierpair.family$eval} for evaluating the sufficient statistics of any hierarchical pairwise interaction point process model taking an exponential family form. } \seealso{ Other families: \code{\link{pairwise.family}}, \code{\link{pairsat.family}}, \code{\link{ord.family}}, \code{\link{inforder.family}}. Hierarchical Strauss interaction: \code{\link{HierStrauss}}. } \author{\adrian \rolf and \ege. } \keyword{spatial} \keyword{models} spatstat/man/dilation.Rd0000644000176200001440000000541713160710571014772 0ustar liggesusers\name{dilation} \alias{dilation} \alias{dilation.owin} \alias{dilation.ppp} \alias{dilation.psp} \title{Morphological Dilation} \description{ Perform morphological dilation of a window, a line segment pattern or a point pattern } \usage{ dilation(w, r, \dots) \method{dilation}{owin}(w, r, \dots, polygonal=NULL, tight=TRUE) \method{dilation}{ppp}(w, r, \dots, polygonal=TRUE, tight=TRUE) \method{dilation}{psp}(w, r, \dots, polygonal=TRUE, tight=TRUE) } \arguments{ \item{w}{ A window (object of class \code{"owin"} or a line segment pattern (object of class \code{"psp"}) or a point pattern (object of class \code{"ppp"}). } \item{r}{positive number: the radius of dilation.} \item{\dots}{extra arguments passed to \code{\link{as.mask}} controlling the pixel resolution, if the pixel approximation is used; or passed to \code{\link{disc}} if the polygonal approximation is used. } \item{polygonal}{ Logical flag indicating whether to compute a polygonal approximation to the dilation (\code{polygonal=TRUE}) or a pixel grid approximation (\code{polygonal=FALSE}). } \item{tight}{ Logical flag indicating whether the bounding frame of the window should be taken as the smallest rectangle enclosing the dilated region (\code{tight=TRUE}), or should be the dilation of the bounding frame of \code{w} (\code{tight=FALSE}). } } \value{ If \code{r > 0}, an object of class \code{"owin"} representing the dilated region. If \code{r=0}, the result is identical to \code{w}. } \details{ The morphological dilation of a set \eqn{W} by a distance \eqn{r > 0} is the set consisting of all points lying at most \eqn{r} units away from \eqn{W}. Effectively, dilation adds a margin of width \eqn{r} onto the set \eqn{W}. If \code{polygonal=TRUE} then a polygonal approximation to the dilation is computed. If \code{polygonal=FALSE} then a pixel approximation to the dilation is computed from the distance map of \code{w}. The arguments \code{"\dots"} are passed to \code{\link{as.mask}} to control the pixel resolution. When \code{w} is a window, the default (when \code{polygonal=NULL}) is to compute a polygonal approximation if \code{w} is a rectangle or polygonal window, and to compute a pixel approximation if \code{w} is a window of type \code{"mask"}. } \seealso{ \code{\link{erosion}} for the opposite operation. \code{\link{dilationAny}} for morphological dilation using any shape. \code{\link{owin}}, \code{\link{as.owin}} } \examples{ plot(dilation(letterR, 0.2)) plot(letterR, add=TRUE, lwd=2, border="red") X <- psp(runif(10), runif(10), runif(10), runif(10), window=owin()) plot(dilation(X, 0.1)) plot(X, add=TRUE, col="red") } \author{ \spatstatAuthors } \keyword{spatial} \keyword{math} spatstat/man/tile.lengths.Rd0000644000176200001440000000171413160710621015557 0ustar liggesusers\name{tile.lengths} \alias{tile.lengths} \title{Compute Lengths of Tiles in a Tessellation on a Network} \description{ Computes the length of each tile in a tessellation on a linear network. } \usage{ tile.lengths(x) } \arguments{ \item{x}{A tessellation on a linear network (object of class \code{"lintess"}).} } \details{ A tessellation on a linear network \code{L} is a partition of the network into non-overlapping pieces (tiles). Each tile consists of one or more line segments which are subsets of the line segments making up the network. A tile can consist of several disjoint pieces. This command computes the length of each of the tiles that make up the tessellation \code{x}. The result is a numeric vector. } \value{ A numeric vector. } \seealso{ \code{\link{lintess}} } \examples{ X <- runiflpp(5, simplenet) A <- lineardirichlet(X) plot(A) tile.lengths(A) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{manip} spatstat/man/tiles.empty.Rd0000644000176200001440000000247513160710621015441 0ustar liggesusers\name{tiles.empty} \alias{tiles.empty} \title{Check For Empty Tiles in a Tessellation} \description{ Checks whether each tile in a tessellation is empty or non-empty. } \usage{ tiles.empty(x) } \arguments{ \item{x}{A tessellation (object of class \code{"tess"}).} } \details{ A tessellation is a collection of disjoint spatial regions (called \emph{tiles}) that fit together to form a larger spatial region. See \code{\link{tess}}. It is possible for some tiles of a tessellation to be empty. For example, this can happen when the tessellation \code{x} is obtained by restricting another tessellation \code{y} to a smaller spatial domain \code{w}. The function \code{tiles.empty} checks whether each tile is empty or non-empty. The result is a logical vector, with entries equal to \code{TRUE} when the corresponding tile is empty. Results are given in the same order as the tiles would be listed by \code{tiles(x)}. } \value{ A logical vector. } \seealso{ \code{\link{tess}}, \code{\link{tiles}}, \code{\link{tilenames}}, \code{\link{tile.areas}} } \examples{ A <- tess(xgrid=0:2,ygrid=0:2) tiles.empty(A) v <- as.im(function(x,y){factor(round(x^2 + y^2))}, W=owin()) E <- tess(image=v) tiles.empty(E) } \author{ \adrian \rolf and \ege } \keyword{spatial} \keyword{manip} spatstat/man/Math.imlist.Rd0000644000176200001440000000612513160710571015355 0ustar liggesusers\name{Math.imlist} \alias{Math.imlist} \alias{Ops.imlist} \alias{Complex.imlist} \alias{Summary.imlist} \title{S3 Group Generic methods for List of Images} \description{ These are group generic methods for the class \code{"imlist"} of lists of images. These methods allows the usual mathematical functions and operators to be applied directly to lists of images. See Details for a list of implemented functions. } \usage{ ## S3 methods for group generics have prototypes: \special{Math(x, \dots)} \special{Ops(e1, e2)} \special{Complex(z)} \special{Summary(\dots, na.rm = TRUE)} %NAMESPACE S3method("Math", "imlist") %NAMESPACE S3method("Ops", "imlist") %NAMESPACE S3method("Complex", "imlist") %NAMESPACE S3method("Summary", "imlist") } \arguments{ \item{x, z, e1, e2}{objects of class \code{"imlist"}.} \item{\dots}{further arguments passed to methods.} \item{na.rm}{logical: should missing values be removed?} } \details{ Below is a list of mathematical functions and operators which are defined for lists of images. Not all functions will make sense for all types of images. For example, none of the functions in the \code{"Math"} group make sense for character-valued images. Note that the \code{"Ops"} group methods are implemented using \code{\link{eval.im}}, which tries to harmonise images via \code{\link{harmonise.im}} if they aren't compatible to begin with. \enumerate{ \item Group \code{"Math"}: \itemize{ \item \code{abs}, \code{sign}, \code{sqrt},\cr \code{floor}, \code{ceiling}, \code{trunc},\cr \code{round}, \code{signif} \item \code{exp}, \code{log}, \code{expm1}, \code{log1p},\cr \code{cos}, \code{sin}, \code{tan},\cr \code{cospi}, \code{sinpi}, \code{tanpi},\cr \code{acos}, \code{asin}, \code{atan} \code{cosh}, \code{sinh}, \code{tanh},\cr \code{acosh}, \code{asinh}, \code{atanh} \item \code{lgamma}, \code{gamma}, \code{digamma}, \code{trigamma} \item \code{cumsum}, \code{cumprod}, \code{cummax}, \code{cummin} } \item Group \code{"Ops"}: \itemize{ \item \code{"+"}, \code{"-"}, \code{"*"}, \code{"/"}, \code{"^"}, \code{"\%\%"}, \code{"\%/\%"} \item \code{"&"}, \code{"|"}, \code{"!"} \item \code{"=="}, \code{"!="}, \code{"<"}, \code{"<="}, \code{">="}, \code{">"} } \item Group \code{"Summary"}: \itemize{ \item \code{all}, \code{any} \item \code{sum}, \code{prod} \item \code{min}, \code{max} \item \code{range} } \item Group \code{"Complex"}: \itemize{ \item \code{Arg}, \code{Conj}, \code{Im}, \code{Mod}, \code{Re} } } } \value{ The result of \code{"Math"}, \code{"Ops"} and \code{"Complex"} group operations is another list of images. The result of \code{"Summary"} group operations is a numeric vector of length 1 or 2. } \seealso{ \code{\link{Math.im}} or \code{\link{eval.im}} for evaluating expressions involving images. } \examples{ a <- Smooth(finpines, 2) log(a)/2 - sqrt(a) range(a) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{methods} spatstat/man/ripras.Rd0000644000176200001440000000612413160710621014457 0ustar liggesusers\name{ripras} \alias{ripras} \title{Estimate window from points alone} \description{ Given an observed pattern of points, computes the Ripley-Rasson estimate of the spatial domain from which they came. } \usage{ ripras(x, y=NULL, shape="convex", f) } \arguments{ \item{x}{ vector of \code{x} coordinates of observed points, or a 2-column matrix giving \code{x,y} coordinates, or a list with components \code{x,y} giving coordinates (such as a point pattern object of class \code{"ppp"}.) } \item{y}{(optional) vector of \code{y} coordinates of observed points, if \code{x} is a vector.} \item{shape}{String indicating the type of window to be estimated: either \code{"convex"} or \code{"rectangle"}. } \item{f}{ (optional) scaling factor. See Details. } } \value{ A window (an object of class \code{"owin"}). } \details{ Given an observed pattern of points with coordinates given by \code{x} and \code{y}, this function computes an estimate due to Ripley and Rasson (1977) of the spatial domain from which the points came. The points are assumed to have been generated independently and uniformly distributed inside an unknown domain \eqn{D}. If \code{shape="convex"} (the default), the domain \eqn{D} is assumed to be a convex set. The maximum likelihood estimate of \eqn{D} is the convex hull of the points (computed by \code{\link{convexhull.xy}}). Analogously to the problems of estimating the endpoint of a uniform distribution, the MLE is not optimal. Ripley and Rasson's estimator is a rescaled copy of the convex hull, centred at the centroid of the convex hull. The scaling factor is \eqn{1/sqrt(1 - m/n)}{1/\sqrt{1 - \frac m n}} where \eqn{n} is the number of data points and \eqn{m} the number of vertices of the convex hull. The scaling factor may be overridden using the argument \code{f}. If \code{shape="rectangle"}, the domain \eqn{D} is assumed to be a rectangle with sides parallel to the coordinate axes. The maximum likelihood estimate of \eqn{D} is the bounding box of the points (computed by \code{\link{bounding.box.xy}}). The Ripley-Rasson estimator is a rescaled copy of the bounding box, with scaling factor \eqn{(n+1)/(n-1)} where \eqn{n} is the number of data points, centred at the centroid of the bounding box. The scaling factor may be overridden using the argument \code{f}. } \seealso{ \code{\link{owin}}, \code{\link{as.owin}}, \code{\link{bounding.box.xy}}, \code{\link{convexhull.xy}} } \examples{ x <- runif(30) y <- runif(30) w <- ripras(x,y) plot(owin(), main="ripras(x,y)") plot(w, add=TRUE) points(x,y) X <- rpoispp(15) plot(X, main="ripras(X)") plot(ripras(X), add=TRUE) # two points insufficient ripras(c(0,1),c(0,0)) # triangle ripras(c(0,1,0.5), c(0,0,1)) # three collinear points ripras(c(0,0,0), c(0,1,2)) } \references{ Ripley, B.D. and Rasson, J.-P. (1977) Finding the edge of a Poisson forest. \emph{Journal of Applied Probability}, \bold{14}, 483 -- 491. } \author{\adrian and \rolf } \keyword{spatial} \keyword{utilities} spatstat/man/print.ppm.Rd0000644000176200001440000000303013160710621015077 0ustar liggesusers\name{print.ppm} \alias{print.ppm} \title{Print a Fitted Point Process Model} \description{ Default \code{print} method for a fitted point process model. } \usage{ \method{print}{ppm}(x,\dots, what=c("all", "model", "trend", "interaction", "se", "errors")) } \arguments{ \item{x}{ A fitted point process model, typically obtained from the model-fittingg algorithm \code{\link{ppm}}. An object of class \code{"ppm"}. } \item{what}{ Character vector (partially-matched) indicating what information should be printed. } \item{\dots}{Ignored.} } \value{ none. } \details{ This is the \code{print} method for the class \code{"ppm"}. It prints information about the fitted model in a sensible format. The argument \code{what} makes it possible to print only some of the information. If \code{what} is missing, then by default, standard errors for the estimated coefficients of the model will be printed only if the model is a Poisson point process. To print the standard errors for a non-Poisson model, call \code{print.ppm} with the argument \code{what} given explicitly, or reset the default rule by typing \code{spatstat.options(print.ppm.SE="always")}. } \seealso{ \code{\link{ppm.object}} for details of the class \code{"ppm"}. \code{\link{ppm}} for generating these objects. \code{\link{plot.ppm}}, \code{\link{predict.ppm}} } \examples{ \dontrun{ m <- ppm(cells, ~1, Strauss(0.05)) m } } \author{\adrian and \rolf } \keyword{spatial} \keyword{print} \keyword{models} spatstat/man/nnwhich.lpp.Rd0000644000176200001440000000346713160710621015416 0ustar liggesusers\name{nnwhich.lpp} \alias{nnwhich.lpp} \title{ Identify Nearest Neighbours on a Linear Network } \description{ Given a pattern of points on a linear network, identify the nearest neighbour for each point, measured by the shortest path in the network. } \usage{ \method{nnwhich}{lpp}(X, ..., k=1, method="C") } \arguments{ \item{X}{ Point pattern on linear network (object of class \code{"lpp"}). } \item{method}{ Optional string determining the method of calculation. Either \code{"interpreted"} or \code{"C"}. } \item{k}{ Integer, or integer vector. The algorithm will find the \code{k}th nearest neighbour. } \item{\dots}{ Ignored. } } \details{ Given a pattern of points on a linear network, this function finds the nearest neighbour of each point (i.e. for each point it identifies the nearest other point) measuring distance by the shortest path in the network. If \code{method="C"} the task is performed using code in the C language. If \code{method="interpreted"} then the computation is performed using interpreted \R code. The \R code is much slower, but is provided for checking purposes. The result is \code{NA} if the \code{k}th nearest neighbour does not exist. This can occur if there are fewer than \code{k+1} points in the dataset, or if the linear network is not connected. } \value{ An integer vector, of length equal to the number of points in \code{X}, identifying the nearest neighbour of each point. If \code{nnwhich(X)[2] = 4} then the nearest neighbour of point 2 is point 4. Alternatively a matrix with one row for each point in \code{X} and one column for each entry of \code{k}. } \author{ \adrian } \seealso{ \code{\link{lpp}} } \examples{ X <- runiflpp(10, simplenet) nnwhich(X) nnwhich(X, k=2) } \keyword{spatial} spatstat/man/bc.ppm.Rd0000644000176200001440000000363513160710571014346 0ustar liggesusers\name{bc.ppm} \alias{bc} \alias{bc.ppm} \title{ Bias Correction for Fitted Model } \description{ Applies a first-order bias correction to a fitted model. } \usage{ bc(fit, \dots) \method{bc}{ppm}(fit, \dots, nfine = 256) } \arguments{ \item{fit}{ A fitted point process model (object of class \code{"ppm"}) or a model of some other class. } \item{\dots}{ Additional arguments are currently ignored. } \item{nfine}{ Grid dimensions for fine grid of locations. An integer, or a pair of integers. See Details. } } \details{ This command applies the first order Newton-Raphson bias correction method of Baddeley and Turner (2014, sec 4.2) to a fitted model. The function \code{bc} is generic, with a method for fitted point process models of class \code{"ppm"}. A fine grid of locations, of dimensions \code{nfine * nfine} or \code{nfine[2] * nfine[1]}, is created over the original window of the data, and the intensity or conditional intensity of the fitted model is calculated on this grid. The result is used to update the fitted model parameters once by a Newton-Raphson update. This is only useful if the quadrature points used to fit the original model \code{fit} are coarser than the grid of points specified by \code{nfine}. } \value{ A numeric vector, of the same length as \code{coef(fit)}, giving updated values for the fitted model coefficients. } \references{ Baddeley, A. and Turner, R. (2014) Bias correction for parameter estimates of spatial point process models. \emph{Journal of Statistical Computation and Simulation} \bold{84}, 1621--1643. DOI: 10.1080/00949655.2012.755976 } \author{ \adrian and \rolf. } \seealso{ \code{\link{rex}} } \examples{ fit <- ppm(cells ~ x, Strauss(0.07)) coef(fit) if(!interactive()) { bc(fit, nfine=64) } else { bc(fit) } } \keyword{spatial} \keyword{models} \keyword{math} \keyword{optimize} spatstat/man/methods.kppm.Rd0000644000176200001440000000310013160710621015557 0ustar liggesusers\name{methods.kppm} \alias{methods.kppm} %DoNotExport \alias{coef.kppm} \alias{formula.kppm} \alias{print.kppm} \alias{terms.kppm} \alias{labels.kppm} \title{ Methods for Cluster Point Process Models } \description{ These are methods for the class \code{"kppm"}. } \usage{ \method{coef}{kppm}(object, \dots) \method{formula}{kppm}(x, \dots) \method{print}{kppm}(x, ...) \method{terms}{kppm}(x, \dots) \method{labels}{kppm}(object, \dots) } \arguments{ \item{x,object}{ An object of class \code{"kppm"}, representing a fitted cluster point process model. } \item{\dots}{ Arguments passed to other methods. } } \details{ These functions are methods for the generic commands \code{\link{coef}}, \code{\link{formula}}, \code{\link{print}}, \code{\link{terms}} and \code{\link{labels}} for the class \code{"kppm"}. An object of class \code{"kppm"} represents a fitted cluster point process model. It is obtained from \code{\link{kppm}}. The method \code{coef.kppm} returns the vector of \emph{regression coefficients} of the fitted model. It does not return the clustering parameters. } \value{ See the help files for the corresponding generic functions. } \author{ \adrian } \seealso{ \code{\link{kppm}}, \code{\link{plot.kppm}}, \code{\link{predict.kppm}}, \code{\link{simulate.kppm}}, \code{\link{update.kppm}}, \code{\link{vcov.kppm}}, \code{\link{as.ppm.kppm}}. } \examples{ data(redwood) fit <- kppm(redwood ~ x, "MatClust") coef(fit) formula(fit) tf <- terms(fit) labels(fit) } \keyword{spatial} \keyword{methods} spatstat/man/pool.envelope.Rd0000644000176200001440000000544413160710621015750 0ustar liggesusers\name{pool.envelope} \alias{pool.envelope} \title{ Pool Data from Several Envelopes } \description{ Pool the simulation data from several simulation envelopes (objects of class \code{"envelope"}) and compute a new envelope. } \usage{ \method{pool}{envelope}(..., savefuns=FALSE, savepatterns=FALSE) } \arguments{ \item{\dots}{ Objects of class \code{"envelope"}. } \item{savefuns}{ Logical flag indicating whether to save all the simulated function values. } \item{savepatterns}{ Logical flag indicating whether to save all the simulated point patterns. } } \details{ The function \code{\link{pool}} is generic. This is the method for the class \code{"envelope"} of simulation envelopes. It is used to combine the simulation data from several simulation envelopes and to compute an envelope based on the combined data. Each of the arguments \code{\dots} must be an object of class \code{"envelope"}. These envelopes must be compatible, in that they are envelopes for the same function, and were computed using the same options. \itemize{ \item In normal use, each envelope object will have been created by running the command \code{\link{envelope}} with the argument \code{savefuns=TRUE}. This ensures that each object contains the simulated data (summary function values for the simulated point patterns) that were used to construct the envelope. The simulated data are extracted from each object and combined. A new envelope is computed from the combined set of simulations. \item Alternatively, if each envelope object was created by running \code{\link{envelope}} with \code{VARIANCE=TRUE}, then the saved functions are not required. The sample means and sample variances from each envelope will be pooled. A new envelope is computed from the pooled mean and variance. } Warnings or errors will be issued if the envelope objects \code{\dots} appear to be incompatible. Apart from these basic checks, the code is not smart enough to decide whether it is sensible to pool the data. To modify the envelope parameters or the type of envelope that is computed, first pool the envelope data using \code{pool.envelope}, then use \code{\link{envelope.envelope}} to modify the envelope parameters. } \value{ An object of class \code{"envelope"}. } \seealso{ \code{\link{envelope}}, \code{\link{envelope.envelope}}, \code{\link{pool}}, \code{\link{pool.fasp}} } \examples{ E1 <- envelope(cells, Kest, nsim=10, savefuns=TRUE) E2 <- envelope(cells, Kest, nsim=20, savefuns=TRUE) pool(E1, E2) V1 <- envelope(E1, VARIANCE=TRUE) V2 <- envelope(E2, VARIANCE=TRUE) pool(V1, V2) } \author{\adrian and \rolf } \keyword{spatial} \keyword{htest} \keyword{hplot} \keyword{iteration} spatstat/man/rescale.Rd0000644000176200001440000000510713160710621014575 0ustar liggesusers\name{rescale} \alias{rescale} \title{Convert dataset to another unit of length} \description{ Converts between different units of length in a spatial dataset, such as a point pattern or a window. } \usage{ rescale(X, s, unitname) } \arguments{ \item{X}{Any suitable dataset representing a two-dimensional object, such as a point pattern (object of class \code{"ppp"}), or a window (object of class \code{"owin"}).} \item{s}{Conversion factor: the new units are \code{s} times the old units.} \item{unitname}{ Optional. New name for the unit of length. See \code{\link{unitname}}. } } \value{ Another object of the same type, representing the same data, but expressed in the new units. } \details{ This is generic. Methods are provided for many spatial objects. The spatial coordinates in the dataset \code{X} will be re-expressed in terms of a new unit of length that is \code{s} times the current unit of length given in \code{X}. The name of the unit of length will also be adjusted. The result is an object of the same type, representing the same data, but expressed in the new units. For example if \code{X} is a dataset giving coordinates in metres, then \code{rescale(X,1000)} will take the new unit of length to be 1000 metres. To do this, it will divide the old coordinate values by 1000 to obtain coordinates expressed in kilometres, and change the name of the unit of length from \code{"metres"} to \code{"1000 metres"}. If \code{unitname} is given, it will be taken as the new name of the unit of length. It should be a valid name for the unit of length, as described in the help for \code{\link{unitname}}. For example if \code{X} is a dataset giving coordinates in metres, \code{rescale(X, 1000, "km")} will divide the coordinate values by 1000 to obtain coordinates in kilometres, and the unit name will be changed to \code{"km"}. } \section{Note}{ The result of this operation is equivalent to the original dataset. If you want to actually change the coordinates by a linear transformation, producing a dataset that is not equivalent to the original one, use \code{\link{affine}}. } \seealso{ Available methods: \code{\link{rescale.im}}, \code{\link{rescale.layered}}, \code{\link{rescale.linnet}}, \code{\link{rescale.lpp}}, \code{\link{rescale.owin}}, \code{\link{rescale.ppp}}, \code{\link{rescale.psp}} and \code{\link{rescale.units}}. Other generics: \code{\link{unitname}}, \code{\link{affine}}, \code{\link{rotate}}, \code{\link{shift}}. } \author{\adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/coef.mppm.Rd0000644000176200001440000000643713160710621015052 0ustar liggesusers\name{coef.mppm} \alias{coef.mppm} \title{ Coefficients of Point Process Model Fitted to Multiple Point Patterns } \description{ Given a point process model fitted to a list of point patterns, extract the coefficients of the fitted model. A method for \code{coef}. } \usage{ \method{coef}{mppm}(object, \dots) } \arguments{ \item{object}{ The fitted point process model (an object of class \code{"mppm"}) } \item{\dots}{ Ignored. } } \value{ Either a vector containing the fitted coefficients, or a data frame containing the fitted coefficients for each point pattern. } \details{ This function is a method for the generic function \code{\link{coef}}. The argument \code{object} must be a fitted point process model (object of class \code{"mppm"}) produced by the fitting algorithm \code{\link{mppm}}). This represents a point process model that has been fitted to a list of several point pattern datasets. See \code{\link{mppm}} for information. This function extracts the vector of coefficients of the fitted model. This is the estimate of the parameter vector \eqn{\theta}{\theta} such that the conditional intensity of the model is of the form \deqn{ \lambda(u,x) = \exp(\theta S(u,x)) }{ \lambda(u,x) = \exp(\theta . S(u,x)) } where \eqn{S(u,x)} is a (vector-valued) statistic. For example, if the model \code{object} is the uniform Poisson process, then \code{coef(object)} will yield a single value (named \code{"(Intercept)"}) which is the logarithm of the fitted intensity of the Poisson process. If the fitted model includes random effects (i.e. if the argument \code{random} was specified in the call to \code{\link{mppm}}), then the fitted coefficients are different for each point pattern in the original data, so \code{coef(object)} is a data frame with one row for each point pattern, and one column for each parameter. Use \code{\link{fixef.mppm}} to extract the vector of fixed effect coefficients, and \code{\link{ranef.mppm}} to extract the random effect coefficients at each level. Use \code{\link{print.mppm}} to print a more useful description of the fitted model. } \seealso{ \code{\link{fixef.mppm}} and \code{\link{ranef.mppm}} for the fixed and random effect coefficients in a model that includes random effects. \code{\link{print.mppm}}, \code{\link{mppm}} } \examples{ H <- hyperframe(X=waterstriders) fit.Poisson <- mppm(X ~ 1, H) coef(fit.Poisson) # The single entry "(Intercept)" # is the log of the fitted intensity of the Poisson process fit.Strauss <- mppm(X~1, H, Strauss(7)) coef(fit.Strauss) # The two entries "(Intercept)" and "Interaction" # are respectively log(beta) and log(gamma) # in the usual notation for Strauss(beta, gamma, r) # Tweak data to exaggerate differences H$X[[1]] <- rthin(H$X[[1]], 0.3) # Model with random effects fitran <- mppm(X ~ 1, H, random=~1|id) coef(fitran) } \references{ Baddeley, A., Rubak, E. and Turner, R. (2015) \emph{Spatial Point Patterns: Methodology and Applications with R}. London: Chapman and Hall/CRC Press. } \author{ Adrian Baddeley, Ida-Maria Sintorn and Leanne Bischoff. Implemented in \pkg{spatstat} by \spatstatAuthors. } \keyword{spatial} \keyword{methods} \keyword{models} spatstat/man/fitted.mppm.Rd0000644000176200001440000000530413160710621015405 0ustar liggesusers\name{fitted.mppm} \alias{fitted.mppm} \title{Fitted Conditional Intensity for Multiple Point Process Model} \description{ Given a point process model fitted to multiple point patterns, compute the fitted conditional intensity of the model at the points of each data pattern, or at the points of the quadrature schemes used to fit the model. } \usage{ \method{fitted}{mppm}(object, ..., type = "lambda", dataonly = FALSE) } \arguments{ \item{object}{ The fitted model. An object of class \code{"mppm"} obtained from \code{\link{mppm}}. } \item{\dots}{Ignored.} \item{type}{ Type of fitted values: either \code{"trend"} for the spatial trend, or \code{"lambda"} or \code{"cif"} for the conditional intensity. } \item{dataonly}{ If \code{TRUE}, fitted values are computed only for the points of the data point patterns. If \code{FALSE}, fitted values are computed for the points of the quadrature schemes used to fit the model. } } \details{ This function evaluates the conditional intensity \eqn{\hat\lambda(u,x)}{lambdahat(u,x)} or spatial trend \eqn{\hat{b(u)}}{bhat(u)} of the fitted point process model for certain locations \eqn{u}, for each of the original point patterns \eqn{x} to which the model was fitted. The locations \eqn{u} at which the fitted conditional intensity/trend is evaluated, are the points of the quadrature schemes used to fit the model in \code{\link{mppm}}. They include the data points (the points of the original point pattern datasets) and other ``dummy'' points in the window of observation. Use \code{\link{predict.mppm}} to compute the fitted conditional intensity at other locations or with other values of the explanatory variables. } \value{ A list of vectors (one for each row of the original hyperframe, i.e. one vector for each of the original point patterns) containing the values of the fitted conditional intensity or (if \code{type="trend"}) the fitted spatial trend. Entries in these vector correspond to the quadrature points (data or dummy points) used to fit the model. The quadrature points can be extracted from \code{object} by \code{\link{quad.mppm}(object)}. } \references{ Baddeley, A., Rubak, E. and Turner, R. (2015) \emph{Spatial Point Patterns: Methodology and Applications with R}. London: Chapman and Hall/CRC Press. } \author{ \adrian, Ida-Maria Sintorn and Leanne Bischoff. Implemented by \adrian \rolf and \ege } \examples{ model <- mppm(Bugs ~ x, data=hyperframe(Bugs=waterstriders), interaction=Strauss(7)) cifs <- fitted(model) } \seealso{ \code{\link{mppm}}, \code{\link{predict.mppm}} } \keyword{spatial} \keyword{models} spatstat/man/update.interact.Rd0000644000176200001440000000227713160710621016256 0ustar liggesusers\name{update.interact} \alias{update.interact} \title{ Update an Interpoint Interaction } \description{ This command updates the \code{object} using the arguments given. } \usage{ \method{update}{interact}(object, \dots) } \arguments{ \item{object}{ Interpoint interaction (object of class \code{"interact"}). } \item{\dots}{ Additional or replacement values of parameters of \code{object}. } } \details{ This is a method for the generic function \code{\link[stats]{update}} for the class \code{"interact"} of interpoint interactions. It updates the \code{object} using the parameters given in the extra arguments \code{\dots}. The extra arguments must be given in the form \code{name=value} and must be recognisable to the interaction object. They override any parameters of the same name in \code{object}. } \value{ Another object of class \code{"interact"}, equivalent to \code{object} except for changes in parameter values. } \author{ \spatstatAuthors. } \seealso{ \code{\link{update.ppm}} } \examples{ Str <- Strauss(r=1) Str update(Str, r=2) M <- MultiStrauss(radii=matrix(1,2,2)) update(M, types=c("on", "off")) } \keyword{spatial} \keyword{models} spatstat/man/circdensity.Rd0000644000176200001440000000265113160710571015504 0ustar liggesusers\name{circdensity} \alias{circdensity} \title{ Density Estimation for Circular Data } \description{ Computes a kernel smoothed estimate of the probability density for angular data. } \usage{ circdensity(x, sigma = "nrd0", \dots, bw = NULL, weights=NULL, unit = c("degree", "radian")) } \arguments{ \item{x}{ Numeric vector, containing angular data. } \item{sigma}{ Smoothing bandwidth, or bandwidth selection rule, passed to \code{\link[stats]{density.default}}. } \item{bw}{Alternative to \code{sigma} for consistency with other functions.} \item{\dots}{ Additional arguments passed to \code{\link[stats]{density.default}}, such as \code{kernel} and \code{weights}. } \item{weights}{ Optional numeric vector of weights for the data in \code{x}. } \item{unit}{ The unit of angle in which \code{x} is expressed. } } \details{ The angular values \code{x} are smoothed using (by default) the wrapped Gaussian kernel with standard deviation \code{sigma}. } \value{ An object of class \code{"density"} (produced by \code{\link[stats]{density.default}}) which can be plotted by \code{plot} or by \code{\link{rose}}. } \author{\adrian \rolf and \ege } \seealso{ \code{\link[stats]{density.default}}), \code{\link{rose}}. } \examples{ ang <- runif(1000, max=360) rose(circdensity(ang, 12)) } \keyword{nonparametric} \keyword{smooth} spatstat/man/runifpoint3.Rd0000644000176200001440000000233513160710621015437 0ustar liggesusers\name{runifpoint3} \alias{runifpoint3} \title{ Generate N Uniform Random Points in Three Dimensions } \description{ Generate a random point pattern containing \code{n} independent, uniform random points in three dimensions. } \usage{ runifpoint3(n, domain = box3(), nsim=1, drop=TRUE) } \arguments{ \item{n}{ Number of points to be generated. } \item{domain}{ Three-dimensional box in which the process should be generated. An object of class \code{"box3"}. } \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a point pattern. } } \value{ If \code{nsim = 1} and \code{drop=TRUE}, a point pattern in three dimensions (an object of class \code{"pp3"}). If \code{nsim > 1}, a list of such point patterns. } \details{ This function generates \code{n} independent random points, uniformly distributed in the three-dimensional box \code{domain}. } \seealso{ \code{\link{rpoispp3}}, \code{\link{pp3}}, \code{\link{box3}} } \examples{ X <- runifpoint3(50) } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat/man/linearKcross.Rd0000644000176200001440000000563313160710621015622 0ustar liggesusers\name{linearKcross} \alias{linearKcross} \title{ Multitype K Function (Cross-type) for Linear Point Pattern } \description{ For a multitype point pattern on a linear network, estimate the multitype \eqn{K} function which counts the expected number of points of type \eqn{j} within a given distance of a point of type \eqn{i}. } \usage{ linearKcross(X, i, j, r=NULL, \dots, correction="Ang") } \arguments{ \item{X}{The observed point pattern, from which an estimate of the cross type \eqn{K} function \eqn{K_{ij}(r)}{Kij(r)} will be computed. An object of class \code{"lpp"} which must be a multitype point pattern (a marked point pattern whose marks are a factor). } \item{i}{Number or character string identifying the type (mark value) of the points in \code{X} from which distances are measured. Defaults to the first level of \code{marks(X)}. } \item{j}{Number or character string identifying the type (mark value) of the points in \code{X} to which distances are measured. Defaults to the second level of \code{marks(X)}. } \item{r}{numeric vector. The values of the argument \eqn{r} at which the \eqn{K}-function \eqn{K_{ij}(r)}{Kij(r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{correction}{ Geometry correction. Either \code{"none"} or \code{"Ang"}. See Details. } \item{\dots}{Ignored.} } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). } \details{ This is a counterpart of the function \code{\link{Kcross}} for a point pattern on a linear network (object of class \code{"lpp"}). The arguments \code{i} and \code{j} will be interpreted as levels of the factor \code{marks(X)}. If \code{i} and \code{j} are missing, they default to the first and second level of the marks factor, respectively. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{K_{ij}(r)}{Kij(r)} should be evaluated. The values of \eqn{r} must be increasing nonnegative numbers and the maximum \eqn{r} value must not exceed the radius of the largest disc contained in the window. } \references{ Baddeley, A, Jammalamadaka, A. and Nair, G. (to appear) Multitype point process analysis of spines on the dendrite network of a neuron. \emph{Applied Statistics} (Journal of the Royal Statistical Society, Series C), In press. } \section{Warnings}{ The arguments \code{i} and \code{j} are interpreted as levels of the factor \code{marks(X)}. Beware of the usual trap with factors: numerical values are not interpreted in the same way as character values. } \seealso{ \code{\link{linearKdot}}, \code{\link{linearK}}. } \examples{ data(chicago) K <- linearKcross(chicago, "assault", "robbery") } \author{\adrian } \keyword{spatial} \keyword{nonparametric} spatstat/man/whichhalfplane.Rd0000644000176200001440000000202113160710621016124 0ustar liggesusers\name{whichhalfplane} \alias{whichhalfplane} \title{ Test Which Side of Infinite Line a Point Falls On } \description{ Given an infinite line and a spatial point location, determine which side of the line the point falls on. } \usage{ whichhalfplane(L, x, y = NULL) } \arguments{ \item{L}{ Object of class \code{"infline"} specifying one or more infinite straight lines in two dimensions. } \item{x,y}{ Arguments acceptable to \code{\link[grDevices]{xy.coords}} specifying the locations of the points. } } \details{ An infinite line \eqn{L} divides the two-dimensional plane into two half-planes. This function returns a matrix \code{M} of logical values in which \code{M[i,j] = TRUE} if the \code{j}th spatial point lies below or to the left of the \code{i}th line. } \value{ A logical matrix. } \author{ \adrian. } \seealso{ \code{\link{infline}} } \examples{ L <- infline(p=runif(3), theta=runif(3, max=2*pi)) X <- runifpoint(4) whichhalfplane(L, X) } \keyword{spatial} \keyword{manip} spatstat/man/plot.dppm.Rd0000644000176200001440000000351013160710621015070 0ustar liggesusers\name{plot.dppm} \alias{plot.dppm} \title{Plot a fitted determinantal point process} \description{ Plots a fitted determinantal point process model, displaying the fitted intensity and the fitted summary function. } \usage{ \method{plot}{dppm}(x, ..., what=c("intensity", "statistic")) } \arguments{ \item{x}{ Fitted determinantal point process model. An object of class \code{"dppm"}. } \item{\dots}{ Arguments passed to \code{\link{plot.ppm}} and \code{\link{plot.fv}} to control the plot. } \item{what}{ Character vector determining what will be plotted. } } \details{ This is a method for the generic function \code{\link{plot}} for the class \code{"dppm"} of fitted determinantal point process models. The argument \code{x} should be a determinantal point process model (object of class \code{"dppm"}) obtained using the function \code{\link{dppm}}. The choice of plots (and the order in which they are displayed) is controlled by the argument \code{what}. The options (partially matched) are \code{"intensity"} and \code{"statistic"}. This command is capable of producing two different plots: \describe{ \item{what="intensity"}{specifies the fitted intensity of the model, which is plotted using \code{\link{plot.ppm}}. By default this plot is not produced for stationary models.} \item{what="statistic"}{specifies the empirical and fitted summary statistics, which are plotted using \code{\link{plot.fv}}. This is only meaningful if the model has been fitted using the Method of Minimum Contrast, and it is turned off otherwise.} } } \value{ Null. } \examples{ fit <- dppm(swedishpines ~ x + y, dppGauss()) plot(fit) } \seealso{ \code{\link{dppm}}, \code{\link{plot.ppm}}, } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} spatstat/man/dg.progress.Rd0000644000176200001440000001472513160710571015426 0ustar liggesusers\name{dg.progress} \alias{dg.progress} \title{ Progress Plot of Dao-Genton Test of Spatial Pattern } \description{ Generates a progress plot (envelope representation) of the Dao-Genton test for a spatial point pattern. } \usage{ dg.progress(X, fun = Lest, \dots, exponent = 2, nsim = 19, nsimsub = nsim - 1, nrank = 1, alpha, leaveout=1, interpolate = FALSE, rmin=0, savefuns = FALSE, savepatterns = FALSE, verbose=TRUE) } \arguments{ \item{X}{ Either a point pattern (object of class \code{"ppp"}, \code{"lpp"} or other class), a fitted point process model (object of class \code{"ppm"}, \code{"kppm"} or other class) or an envelope object (class \code{"envelope"}). } \item{fun}{ Function that computes the desired summary statistic for a point pattern. } \item{\dots}{ Arguments passed to \code{\link{envelope}}. Useful arguments include \code{alternative} to specify one-sided or two-sided envelopes. } \item{exponent}{ Positive number. The exponent of the \eqn{L^p} distance. See Details. } \item{nsim}{ Number of repetitions of the basic test. } \item{nsimsub}{ Number of simulations in each basic test. There will be \code{nsim} repetitions of the basic test, each involving \code{nsimsub} simulated realisations, so there will be a total of \code{nsim * (nsimsub + 1)} simulations. } \item{nrank}{ Integer. The rank of the critical value of the Monte Carlo test, amongst the \code{nsim} simulated values. A rank of 1 means that the minimum and maximum simulated values will become the critical values for the test. } \item{alpha}{ Optional. The significance level of the test. Equivalent to \code{nrank/(nsim+1)} where \code{nsim} is the number of simulations. } \item{leaveout}{ Optional integer 0, 1 or 2 indicating how to calculate the deviation between the observed summary function and the nominal reference value, when the reference value must be estimated by simulation. See Details. } \item{interpolate}{ Logical value indicating how to compute the critical value. If \code{interpolate=FALSE} (the default), a standard Monte Carlo test is performed, and the critical value is the largest simulated value of the test statistic (if \code{nrank=1}) or the \code{nrank}-th largest (if \code{nrank} is another number). If \code{interpolate=TRUE}, kernel density estimation is applied to the simulated values, and the critical value is the upper \code{alpha} quantile of this estimated distribution. } \item{rmin}{ Optional. Left endpoint for the interval of \eqn{r} values on which the test statistic is calculated. } \item{savefuns}{ Logical value indicating whether to save the simulated function values (from the first stage). } \item{savepatterns}{ Logical value indicating whether to save the simulated point patterns (from the first stage). } \item{verbose}{ Logical value indicating whether to print progress reports. } } \details{ The Dao and Genton (2014) test for a spatial point pattern is described in \code{\link{dg.test}}. This test depends on the choice of an interval of distance values (the argument \code{rinterval}). A \emph{progress plot} or \emph{envelope representation} of the test (Baddeley et al, 2014) is a plot of the test statistic (and the corresponding critical value) against the length of the interval \code{rinterval}. The command \code{dg.progress} effectively performs \code{\link{dg.test}} on \code{X} using all possible intervals of the form \eqn{[0,R]}, and returns the resulting values of the test statistic, and the corresponding critical values of the test, as a function of \eqn{R}. The result is an object of class \code{"fv"} that can be plotted to obtain the progress plot. The display shows the test statistic (solid black line) and the test acceptance region (grey shading). If \code{X} is an envelope object, then some of the data stored in \code{X} may be re-used: \itemize{ \item If \code{X} is an envelope object containing simulated functions, and \code{fun=NULL}, then the code will re-use the simulated functions stored in \code{X}. \item If \code{X} is an envelope object containing simulated point patterns, then \code{fun} will be applied to the stored point patterns to obtain the simulated functions. If \code{fun} is not specified, it defaults to \code{\link{Lest}}. \item Otherwise, new simulations will be performed, and \code{fun} defaults to \code{\link{Lest}}. } If the argument \code{rmin} is given, it specifies the left endpoint of the interval defining the test statistic: the tests are performed using intervals \eqn{[r_{\mbox{\scriptsize min}},R]}{[rmin,R]} where \eqn{R \ge r_{\mbox{\scriptsize min}}}{R \ge rmin}. The argument \code{leaveout} specifies how to calculate the discrepancy between the summary function for the data and the nominal reference value, when the reference value must be estimated by simulation. The values \code{leaveout=0} and \code{leaveout=1} are both algebraically equivalent (Baddeley et al, 2014, Appendix) to computing the difference \code{observed - reference} where the \code{reference} is the mean of simulated values. The value \code{leaveout=2} gives the leave-two-out discrepancy proposed by Dao and Genton (2014). } \value{ An object of class \code{"fv"} that can be plotted to obtain the progress plot. } \references{ Baddeley, A., Diggle, P., Hardegen, A., Lawrence, T., Milne, R. and Nair, G. (2014) On tests of spatial pattern based on simulation envelopes. \emph{Ecological Monographs} \bold{84} (3) 477--489. Baddeley, A., Hardegen, A., Lawrence, L., Milne, R.K., Nair, G.M. and Rakshit, S. (2015) Pushing the envelope: extensions of graphical Monte Carlo tests. Submitted for publication. Dao, N.A. and Genton, M. (2014) A Monte Carlo adjusted goodness-of-fit test for parametric models describing spatial point patterns. \emph{Journal of Graphical and Computational Statistics} \bold{23}, 497--517. } \author{ \adrian, Andrew Hardegen, Tom Lawrence, Robin Milne, Gopalan Nair and Suman Rakshit. Implemented by \adrian \rolf and \ege } \seealso{ \code{\link{dg.test}}, \code{\link{dclf.progress}} } \examples{ ns <- if(interactive()) 19 else 5 plot(dg.progress(cells, nsim=ns)) } \keyword{spatial} \keyword{htest} spatstat/man/rStrauss.Rd0000644000176200001440000001202713160710621015004 0ustar liggesusers\name{rStrauss} \alias{rStrauss} \title{Perfect Simulation of the Strauss Process} \description{ Generate a random pattern of points, a simulated realisation of the Strauss process, using a perfect simulation algorithm. } \usage{ rStrauss(beta, gamma = 1, R = 0, W = owin(), expand=TRUE, nsim=1, drop=TRUE) } \arguments{ \item{beta}{ intensity parameter (a positive number). } \item{gamma}{ interaction parameter (a number between 0 and 1, inclusive). } \item{R}{ interaction radius (a non-negative number). } \item{W}{ window (object of class \code{"owin"}) in which to generate the random pattern. } \item{expand}{ Logical. If \code{FALSE}, simulation is performed in the window \code{W}, which must be rectangular. If \code{TRUE} (the default), simulation is performed on a larger window, and the result is clipped to the original window \code{W}. Alternatively \code{expand} can be an object of class \code{"rmhexpand"} (see \code{\link{rmhexpand}}) determining the expansion method. } \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a point pattern. } } \details{ This function generates a realisation of the Strauss point process in the window \code{W} using a \sQuote{perfect simulation} algorithm. The Strauss process (Strauss, 1975; Kelly and Ripley, 1976) is a model for spatial inhibition, ranging from a strong `hard core' inhibition to a completely random pattern according to the value of \code{gamma}. The Strauss process with interaction radius \eqn{R} and parameters \eqn{\beta}{beta} and \eqn{\gamma}{gamma} is the pairwise interaction point process with probability density \deqn{ f(x_1,\ldots,x_n) = \alpha \beta^{n(x)} \gamma^{s(x)} }{ f(x_1,\ldots,x_n) = alpha . beta^n(x) gamma^s(x) } where \eqn{x_1,\ldots,x_n}{x[1],\ldots,x[n]} represent the points of the pattern, \eqn{n(x)} is the number of points in the pattern, \eqn{s(x)} is the number of distinct unordered pairs of points that are closer than \eqn{R} units apart, and \eqn{\alpha}{alpha} is the normalising constant. Intuitively, each point of the pattern contributes a factor \eqn{\beta}{beta} to the probability density, and each pair of points closer than \eqn{r} units apart contributes a factor \eqn{\gamma}{gamma} to the density. The interaction parameter \eqn{\gamma}{gamma} must be less than or equal to \eqn{1} in order that the process be well-defined (Kelly and Ripley, 1976). This model describes an ``ordered'' or ``inhibitive'' pattern. If \eqn{\gamma=1}{gamma=1} it reduces to a Poisson process (complete spatial randomness) with intensity \eqn{\beta}{beta}. If \eqn{\gamma=0}{gamma=0} it is called a ``hard core process'' with hard core radius \eqn{R/2}, since no pair of points is permitted to lie closer than \eqn{R} units apart. The simulation algorithm used to generate the point pattern is \sQuote{dominated coupling from the past} as implemented by Berthelsen and \ifelse{latex}{\out{M\o ller}}{Moller} (2002, 2003). This is a \sQuote{perfect simulation} or \sQuote{exact simulation} algorithm, so called because the output of the algorithm is guaranteed to have the correct probability distribution exactly (unlike the Metropolis-Hastings algorithm used in \code{\link{rmh}}, whose output is only approximately correct). There is a tiny chance that the algorithm will run out of space before it has terminated. If this occurs, an error message will be generated. } \value{ If \code{nsim = 1}, a point pattern (object of class \code{"ppp"}). If \code{nsim > 1}, a list of point patterns. } \references{ Berthelsen, K.K. and \ifelse{latex}{\out{M\o ller}}{Moller}, J. (2002) A primer on perfect simulation for spatial point processes. \emph{Bulletin of the Brazilian Mathematical Society} 33, 351-367. Berthelsen, K.K. and \ifelse{latex}{\out{M\o ller}}{Moller}, J. (2003) Likelihood and non-parametric Bayesian MCMC inference for spatial point processes based on perfect simulation and path sampling. \emph{Scandinavian Journal of Statistics} 30, 549-564. Kelly, F.P. and Ripley, B.D. (1976) On Strauss's model for clustering. \emph{Biometrika} \bold{63}, 357--360. \ifelse{latex}{\out{M\o ller}}{Moller}, J. and Waagepetersen, R. (2003). \emph{Statistical Inference and Simulation for Spatial Point Processes.} Chapman and Hall/CRC. Strauss, D.J. (1975) A model for clustering. \emph{Biometrika} \bold{62}, 467--475. } \author{ Kasper Klitgaard Berthelsen, adapted for \pkg{spatstat} by \adrian } \examples{ X <- rStrauss(0.05,0.2,1.5,square(141.4)) Z <- rStrauss(100,0.7,0.05) } \seealso{ \code{\link{rmh}}, \code{\link{Strauss}}, \code{\link{rHardcore}}, \code{\link{rStraussHard}}, \code{\link{rDiggleGratton}}, \code{\link{rDGS}}, \code{\link{rPenttinen}}. } \keyword{spatial} \keyword{datagen} spatstat/man/is.owin.Rd0000644000176200001440000000127113160710621014543 0ustar liggesusers\name{is.owin} \alias{is.owin} \title{Test Whether An Object Is A Window} \description{ Checks whether its argument is a window (object of class \code{"owin"}). } \usage{ is.owin(x) } \arguments{ \item{x}{Any object.} } \details{ This function tests whether the object \code{x} is a window object of class \code{"owin"}. See \code{\link{owin.object}} for details of this class. The result is determined to be \code{TRUE} if \code{x} inherits from \code{"owin"}, i.e. if \code{x} has \code{"owin"} amongst its classes. } \value{ \code{TRUE} if \code{x} is a point pattern, otherwise \code{FALSE}. } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/dummy.ppm.Rd0000644000176200001440000000431313160710571015107 0ustar liggesusers\name{dummy.ppm} \alias{dummy.ppm} \title{Extract Dummy Points Used to Fit a Point Process Model} \description{ Given a fitted point process model, this function extracts the `dummy points' of the quadrature scheme used to fit the model. } \usage{ dummy.ppm(object, drop=FALSE) } \arguments{ \item{object}{ fitted point process model (an object of class \code{"ppm"}). } \item{drop}{ Logical value determining whether to delete dummy points that were not used to fit the model. } } \value{ A point pattern (object of class \code{"ppp"}). } \details{ An object of class \code{"ppm"} represents a point process model that has been fitted to data. It is typically produced by the model-fitting algorithm \code{\link{ppm}}. The maximum pseudolikelihood algorithm in \code{\link{ppm}} approximates the pseudolikelihood integral by a sum over a finite set of quadrature points, which is constructed by augmenting the original data point pattern by a set of ``dummy'' points. The fitted model object returned by \code{\link{ppm}} contains complete information about this quadrature scheme. See \code{\link{ppm}} or \code{\link{ppm.object}} for further information. This function \code{dummy.ppm} extracts the dummy points of the quadrature scheme. A typical use of this function would be to count the number of dummy points, to gauge the accuracy of the approximation to the exact pseudolikelihood. It may happen that some dummy points are not actually used in fitting the model (typically because the value of a covariate is \code{NA} at these points). The argument \code{drop} specifies whether these unused dummy points shall be deleted (\code{drop=TRUE}) or retained (\code{drop=FALSE}) in the return value. See \code{\link{ppm.object}} for a list of all operations that can be performed on objects of class \code{"ppm"}. } \seealso{ \code{\link{ppm.object}}, \code{\link{ppp.object}}, \code{\link{ppm}} } \examples{ data(cells) fit <- ppm(cells, ~1, Strauss(r=0.1)) X <- dummy.ppm(fit) npoints(X) # this is the number of dummy points in the quadrature scheme } \author{\adrian and \rolf } \keyword{spatial} \keyword{utilities} \keyword{models} spatstat/man/rpoisppOnLines.Rd0000644000176200001440000000713013160710621016141 0ustar liggesusers\name{rpoisppOnLines} \alias{rpoisppOnLines} \title{Generate Poisson Point Pattern on Line Segments} \description{ Given a line segment pattern, generate a Poisson random point pattern on the line segments. } \usage{ rpoisppOnLines(lambda, L, lmax = NULL, ..., nsim=1) } \arguments{ \item{lambda}{Intensity of the Poisson process. A single number, a \code{function(x,y)}, a pixel image (object of class \code{"im"}), or a vector of numbers, a list of functions, or a list of images. } \item{L}{Line segment pattern (object of class \code{"psp"}) on which the points should be generated. } \item{lmax}{ Optional upper bound (for increased computational efficiency). A known upper bound for the values of \code{lambda}, if \code{lambda} is a function or a pixel image. That is, \code{lmax} should be a number which is known to be greater than or equal to all values of \code{lambda}. } \item{\dots}{Additional arguments passed to \code{lambda} if it is a function. } \item{nsim}{Number of simulated realisations to be generated.} } \details{ This command generates a Poisson point process on the one-dimensional system of line segments in \code{L}. The result is a point pattern consisting of points lying on the line segments in \code{L}. The number of random points falling on any given line segment follows a Poisson distribution. The patterns of points on different segments are independent. The intensity \code{lambda} is the expected number of points per unit \bold{length} of line segment. It may be constant, or it may depend on spatial location. In order to generate an unmarked Poisson process, the argument \code{lambda} may be a single number, or a \code{function(x,y)}, or a pixel image (object of class \code{"im"}). In order to generate a \emph{marked} Poisson process, \code{lambda} may be a numeric vector, a list of functions, or a list of images, each entry giving the intensity for a different mark value. If \code{lambda} is not numeric, then the (Lewis-Shedler) rejection method is used. The rejection method requires knowledge of \code{lmax}, the maximum possible value of \code{lambda}. This should be either a single number, or a numeric vector of the same length as \code{lambda}. If \code{lmax} is not given, it will be computed approximately, by sampling many values of \code{lambda}. If \code{lmax} is given, then it \bold{must} be larger than any possible value of \code{lambda}, otherwise the results of the algorithm will be incorrect. } \value{ If \code{nsim = 1}, a point pattern (object of class \code{"ppp"}) in the same window as \code{L}. If \code{nsim > 1}, a list of such point patterns. } \seealso{ \code{\link{psp}}, \code{\link{ppp}}, \code{\link{runifpointOnLines}}, \code{\link{rpoispp}} } \examples{ live <- interactive() L <- psp(runif(10), runif(10), runif(10), runif(10), window=owin()) if(live) plot(L, main="") # uniform intensity Y <- rpoisppOnLines(4, L) if(live) plot(Y, add=TRUE, pch="+") # uniform MARKED process with types 'a' and 'b' Y <- rpoisppOnLines(c(a=4, b=5), L) if(live) { plot(L, main="") plot(Y, add=TRUE, pch="+") } # intensity is a function Y <- rpoisppOnLines(function(x,y){ 10 * x^2}, L, 10) if(live) { plot(L, main="") plot(Y, add=TRUE, pch="+") } # intensity is an image Z <- as.im(function(x,y){10 * sqrt(x+y)}, unit.square()) Y <- rpoisppOnLines(Z, L, 15) if(live) { plot(L, main="") plot(Y, add=TRUE, pch="+") } } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat/man/dppparbounds.Rd0000644000176200001440000000145013160710571015661 0ustar liggesusers\name{dppparbounds} \alias{dppparbounds} \title{Parameter Bound for a Determinantal Point Process Model} \description{ Returns the lower and upper bound for a specific parameter of a determinantal point process model when all other parameters are fixed. } \usage{dppparbounds(model, name, \dots)} \arguments{ \item{model}{Model of class \code{"detpointprocfamily"}.} \item{name}{name of the parameter for which the bound should be computed.} \item{\dots}{ Additional arguments passed to the \code{parbounds} function of the given model } } \value{A \code{data.frame} containing lower and upper bounds.} \author{ \adrian \rolf and \ege } \examples{ model <- dppMatern(lambda=100, alpha=.01, nu=1, d=2) dppparbounds(model, "lambda") } \keyword{spatial} \keyword{models} spatstat/man/tweak.colourmap.Rd0000644000176200001440000000274213160710621016274 0ustar liggesusers\name{tweak.colourmap} \alias{tweak.colourmap} \title{ Change Colour Values in a Colour Map } \description{ Assign new colour values to some of the entries in a colour map. } \usage{ tweak.colourmap(m, col, ..., inputs=NULL, range=NULL) } \arguments{ \item{m}{ A colour map (object of class \code{"colourmap"}). } \item{inputs}{ Input values to the colour map, to be assigned new colours. Incompatible with \code{range}. } \item{range}{ Numeric vector of length 2 specifying a range of numerical values which should be assigned a new colour. Incompatible with \code{inputs}. } \item{col}{ Replacement colours for the specified \code{inputs} or the specified \code{range} of values. } \item{\dots}{Other arguments are ignored.} } \details{ This function changes the colour map \code{m} by assigning new colours to each of the input values specified by \code{inputs}, or by assigning a single new colour to the range of input values specified by \code{range}. The modified colour map is returned. } \value{ Another colour map (object of class \code{"colourmap"}). } \seealso{ \code{\link{colourmap}}, \code{\link{interp.colourmap}}, \code{\link[spatstat:colourtools]{colourtools}}. } \examples{ co <- colourmap(rainbow(32), range=c(0,1)) plot(tweak.colourmap(co, inputs=c(0.5, 0.6), "white")) plot(tweak.colourmap(co, range=c(0.5,0.6), "white")) } \author{\adrian and \rolf } \keyword{spatial} \keyword{color} spatstat/man/diameter.Rd0000644000176200001440000000176413160710571014762 0ustar liggesusers\name{diameter} \alias{diameter} \title{Diameter of an Object} \description{ Computes the diameter of an object such as a two-dimensional window or three-dimensional box. } \usage{ diameter(x) } \arguments{ \item{x}{ A window or other object whose diameter will be computed. } } \value{ The numerical value of the diameter of the object. } \details{ This function computes the diameter of an object such as a two-dimensional window or a three-dimensional box. The diameter is the maximum distance between any two points in the object. The function \code{diameter} is generic, with methods for the class \code{"owin"} (two-dimensional windows), \code{"box3"} (three-dimensional boxes), \code{"boxx"} (multi-dimensional boxes) and \code{"linnet"} (linear networks). } \seealso{ \code{\link{diameter.owin}}, \code{\link{diameter.box3}}, \code{\link{diameter.boxx}}, \code{\link{diameter.linnet}} } \author{\adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/plot.psp.Rd0000644000176200001440000000724613160710621014744 0ustar liggesusers\name{plot.psp} \alias{plot.psp} \title{plot a Spatial Line Segment Pattern} \description{ Plot a two-dimensional line segment pattern } \usage{ \method{plot}{psp}(x, \dots, main, add=FALSE, show.all=!add, show.window=show.all, which.marks=1, ribbon=show.all, ribsep=0.15, ribwid=0.05, ribn=1024, do.plot=TRUE) } \arguments{ \item{x}{ The line segment pattern to be plotted. An object of class \code{"psp"}, or data which can be converted into this format by \code{\link{as.psp}()}. } \item{\dots}{ extra arguments that will be passed to the plotting functions \code{\link{segments}} (to plot the segments) and \code{\link{plot.owin}} (to plot the observation window). } \item{main}{ Character string giving a title for the plot. } \item{add}{ Logical. If \code{TRUE}, the current plot is not erased; the segments are plotted on top of the current plot, and the window is not plotted (by default). } \item{show.all}{ Logical value specifying whether to plot everything including the window, main title, and colour ribbon. } \item{show.window}{ Logical value specifying whether to plot the window. } \item{which.marks}{ Index determining which column of marks to use, if the marks of \code{x} are a data frame. A character string or an integer. Defaults to \code{1} indicating the first column of marks. } \item{ribbon}{ Logical flag indicating whether to display a ribbon showing the colour map (in which mark values are associated with colours). } \item{ribsep}{ Factor controlling the space between the ribbon and the image. } \item{ribwid}{ Factor controlling the width of the ribbon. } \item{ribn}{ Number of different values to display in the ribbon. } \item{do.plot}{ Logical value indicating whether to actually perform the plot. } } \value{ (Invisibly) a colour map object specifying the association between marks and colours, if any. The return value also has an attribute \code{"bbox"} giving a bounding box for the plot. } \details{ This is the \code{plot} method for line segment pattern datasets (of class \code{"psp"}, see \code{\link{psp.object}}). It plots both the observation window \code{Window(x)} and the line segments themselves. Plotting of the window \code{Window(x)} is performed by \code{\link{plot.owin}}. This plot may be modified through the \code{...} arguments. Plotting of the segments themselves is performed by the standard R function \code{\link{segments}}. Its plotting behaviour may also be modified through the \code{...} arguments. For a \emph{marked} line segment pattern (i.e. if \code{marks(x)} is not \code{NULL}) the line segments are plotted in colours determined by the mark values. If \code{marks(x)} is a data frame, the default is to use the first column of \code{marks(x)} to determine the colours. To specify another column, use the argument \code{which.marks}. The colour map (associating mark values with colours) will be displayed as a vertical colour ribbon to the right of the plot, if \code{ribbon=TRUE}. } \seealso{ \code{\link{psp.object}}, \code{\link{plot}}, \code{\link{par}}, \code{\link{plot.owin}}, \code{\link{text.psp}}, \code{\link{symbols}} } \examples{ X <- psp(runif(20), runif(20), runif(20), runif(20), window=owin()) plot(X) plot(X, lwd=3) lettuce <- sample(letters[1:4], 20, replace=TRUE) marks(X) <- data.frame(A=1:20, B=factor(lettuce)) plot(X) plot(X, which.marks="B") } \author{\adrian and \rolf } \keyword{spatial} \keyword{hplot} spatstat/man/ragsMultiHard.Rd0000644000176200001440000000543213160710621015726 0ustar liggesusers\name{ragsMultiHard} \alias{ragsMultiHard} \title{ Alternating Gibbs Sampler for Multitype Hard Core Process } \description{ Generate a realisation of the multitype hard core point process using the alternating Gibbs sampler. } \usage{ ragsMultiHard(beta, hradii, \dots, types=NULL, bmax = NULL, periodic=FALSE, ncycles = 100) } \arguments{ \item{beta}{ First order trend. A numeric vector, a pixel image, a function, a list of functions, or a list of pixel images. } \item{hradii}{ Matrix of hard core radii between each pair of types. Diagonal entries should be \code{0} or \code{NA}. } \item{types}{ Vector of all possible types for the multitype point pattern. } \item{\dots}{ Arguments passed to \code{\link{rmpoispp}} when generating random points. } \item{bmax}{ Optional upper bound on \code{beta}. } \item{periodic}{ Logical value indicating whether to measure distances in the periodic sense, so that opposite sides of the (rectangular) window are treated as identical. } \item{ncycles}{ Number of cycles of the sampler to be performed. } } \details{ The Alternating Gibbs Sampler for a multitype point process is an iterative simulation procedure. Each step of the sampler updates the pattern of points of a particular type \code{i}, by drawing a realisation from the conditional distribution of points of type \code{i} given the points of all other types. Successive steps of the sampler update the points of type 1, then type 2, type 3, and so on. This is an experimental implementation which currently works only for multitype hard core processes (see \code{\link{MultiHard}}) in which there is no interaction between points of the same type, and for the area-interaction process (see \code{\link{ragsAreaInter}}). The argument \code{beta} gives the first order trend for each possible type of point. It may be a single number, a numeric vector, a \code{function(x,y)}, a pixel image, a list of functions, a \code{function(x,y,m)}, or a list of pixel images. The argument \code{hradii} is the matrix of hard core radii between each pair of possible types of points. Two points of types \code{i} and \code{j} respectively are forbidden to lie closer than a distance \code{hradii[i,j]} apart. The diagonal of this matrix must contain \code{NA} or \code{0} values, indicating that there is no hard core constraint applying between points of the same type. } \value{ A point pattern (object of class \code{"ppp"}). } \author{ \adrian } \seealso{ \code{\link{rags}}, \code{\link{ragsAreaInter}} } \examples{ b <- c(30,20) h <- 0.05 * matrix(c(0,1,1,0), 2, 2) ragsMultiHard(b, h, ncycles=10) ragsMultiHard(b, h, ncycles=5, periodic=TRUE) } \keyword{spatial} \keyword{datagen} spatstat/man/Kinhom.Rd0000644000176200001440000003550513160710571014415 0ustar liggesusers\name{Kinhom} \alias{Kinhom} \title{Inhomogeneous K-function} \description{ Estimates the inhomogeneous \eqn{K} function of a non-stationary point pattern. } \usage{ Kinhom(X, lambda=NULL, \dots, r = NULL, breaks = NULL, correction=c("border", "bord.modif", "isotropic", "translate"), renormalise=TRUE, normpower=1, update=TRUE, leaveoneout=TRUE, nlarge = 1000, lambda2=NULL, reciplambda=NULL, reciplambda2=NULL, diagonal=TRUE, sigma=NULL, varcov=NULL, ratio=FALSE) } \arguments{ \item{X}{ The observed data point pattern, from which an estimate of the inhomogeneous \eqn{K} function will be computed. An object of class \code{"ppp"} or in a format recognised by \code{\link{as.ppp}()} } \item{lambda}{ Optional. Values of the estimated intensity function. Either a vector giving the intensity values at the points of the pattern \code{X}, a pixel image (object of class \code{"im"}) giving the intensity values at all locations, a fitted point process model (object of class \code{"ppm"} or \code{"kppm"}) or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. } \item{\dots}{ Extra arguments. Ignored if \code{lambda} is present. Passed to \code{\link{density.ppp}} if \code{lambda} is omitted. } \item{r}{ vector of values for the argument \eqn{r} at which the inhomogeneous \eqn{K} function should be evaluated. Not normally given by the user; there is a sensible default. } \item{breaks}{ This argument is for internal use only. } \item{correction}{ A character vector containing any selection of the options \code{"border"}, \code{"bord.modif"}, \code{"isotropic"}, \code{"Ripley"}, \code{"translate"}, \code{"translation"}, \code{"none"} or \code{"best"}. It specifies the edge correction(s) to be applied. Alternatively \code{correction="all"} selects all options. } \item{renormalise}{ Logical. Whether to renormalise the estimate. See Details. } \item{normpower}{ Integer (usually either 1 or 2). Normalisation power. See Details. } \item{update}{ Logical value indicating what to do when \code{lambda} is a fitted model (class \code{"ppm"}, \code{"kppm"} or \code{"dppm"}). If \code{update=TRUE} (the default), the model will first be refitted to the data \code{X} (using \code{\link{update.ppm}} or \code{\link{update.kppm}}) before the fitted intensity is computed. If \code{update=FALSE}, the fitted intensity of the model will be computed without re-fitting it to \code{X}. } \item{leaveoneout}{ Logical value (passed to \code{\link{density.ppp}} or \code{\link{fitted.ppm}}) specifying whether to use a leave-one-out rule when calculating the intensity. } \item{nlarge}{ Optional. Efficiency threshold. If the number of points exceeds \code{nlarge}, then only the border correction will be computed, using a fast algorithm. } \item{lambda2}{ Advanced use only. Matrix containing estimates of the products \eqn{\lambda(x_i)\lambda(x_j)}{lambda(x[i]) * lambda(x[j])} of the intensities at each pair of data points \eqn{x_i}{x[i]} and \eqn{x_j}{x[j]}. } \item{reciplambda}{ Alternative to \code{lambda}. Values of the estimated \emph{reciprocal} \eqn{1/\lambda}{1/lambda} of the intensity function. Either a vector giving the reciprocal intensity values at the points of the pattern \code{X}, a pixel image (object of class \code{"im"}) giving the reciprocal intensity values at all locations, or a \code{function(x,y)} which can be evaluated to give the reciprocal intensity value at any location. } \item{reciplambda2}{ Advanced use only. Alternative to \code{lambda2}. A matrix giving values of the estimated \emph{reciprocal products} \eqn{1/\lambda(x_i)\lambda(x_j)}{1/(lambda(x[i]) * lambda(x[j]))} of the intensities at each pair of data points \eqn{x_i}{x[i]} and \eqn{x_j}{x[j]}. } \item{diagonal}{ Do not use this argument. } \item{sigma,varcov}{ Optional arguments passed to \code{\link{density.ppp}} to control the smoothing bandwidth, when \code{lambda} is estimated by kernel smoothing. } \item{ratio}{ Logical. If \code{TRUE}, the numerator and denominator of each edge-corrected estimate will also be saved, for use in analysing replicated point patterns. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame containing at least the following columns, \item{r}{the vector of values of the argument \eqn{r} at which \eqn{K_{\mbox{\scriptsize\rm inhom}}(r)}{Kinhom(r)} has been estimated } \item{theo}{vector of values of \eqn{\pi r^2}{pi * r^2}, the theoretical value of \eqn{K_{\mbox{\scriptsize\rm inhom}}(r)}{Kinhom(r)} for an inhomogeneous Poisson process } and containing additional columns according to the choice specified in the \code{correction} argument. The additional columns are named \code{border}, \code{trans} and \code{iso} and give the estimated values of \eqn{K_{\mbox{\scriptsize\rm inhom}}(r)}{Kinhom(r)} using the border correction, translation correction, and Ripley isotropic correction, respectively. If \code{ratio=TRUE} then the return value also has two attributes called \code{"numerator"} and \code{"denominator"} which are \code{"fv"} objects containing the numerators and denominators of each estimate of \eqn{K_{\mbox{\scriptsize\rm inhom}}(r)}{Kinhom(r)}. } \details{ This computes a generalisation of the \eqn{K} function for inhomogeneous point patterns, proposed by Baddeley, \ifelse{latex}{\out{M\o ller}}{Moller} and Waagepetersen (2000). The ``ordinary'' \eqn{K} function (variously known as the reduced second order moment function and Ripley's \eqn{K} function), is described under \code{\link{Kest}}. It is defined only for stationary point processes. The inhomogeneous \eqn{K} function \eqn{K_{\mbox{\scriptsize\rm inhom}}(r)}{Kinhom(r)} is a direct generalisation to nonstationary point processes. Suppose \eqn{x} is a point process with non-constant intensity \eqn{\lambda(u)}{lambda(u)} at each location \eqn{u}. Define \eqn{K_{\mbox{\scriptsize\rm inhom}}(r)}{Kinhom(r)} to be the expected value, given that \eqn{u} is a point of \eqn{x}, of the sum of all terms \eqn{1/\lambda(x_j)}{1/lambda(x[j])} over all points \eqn{x_j}{x[j]} in the process separated from \eqn{u} by a distance less than \eqn{r}. This reduces to the ordinary \eqn{K} function if \eqn{\lambda()}{lambda()} is constant. If \eqn{x} is an inhomogeneous Poisson process with intensity function \eqn{\lambda(u)}{lambda(u)}, then \eqn{K_{\mbox{\scriptsize\rm inhom}}(r) = \pi r^2}{Kinhom(r) = pi * r^2}. Given a point pattern dataset, the inhomogeneous \eqn{K} function can be estimated essentially by summing the values \eqn{1/(\lambda(x_i)\lambda(x_j))}{1/(lambda(x[i]) * lambda(x[j]))} for all pairs of points \eqn{x_i, x_j}{x[i], x[j]} separated by a distance less than \eqn{r}. This allows us to inspect a point pattern for evidence of interpoint interactions after allowing for spatial inhomogeneity of the pattern. Values \eqn{K_{\mbox{\scriptsize\rm inhom}}(r) > \pi r^2}{Kinhom(r) > pi * r^2} are suggestive of clustering. The argument \code{lambda} should supply the (estimated) values of the intensity function \eqn{\lambda}{lambda}. It may be either \describe{ \item{a numeric vector}{ containing the values of the intensity function at the points of the pattern \code{X}. } \item{a pixel image}{ (object of class \code{"im"}) assumed to contain the values of the intensity function at all locations in the window. } \item{a fitted point process model}{ (object of class \code{"ppm"}, \code{"kppm"} or \code{"dppm"}) whose fitted \emph{trend} can be used as the fitted intensity. (If \code{update=TRUE} the model will first be refitted to the data \code{X} before the trend is computed.) } \item{a function}{ which can be evaluated to give values of the intensity at any locations. } \item{omitted:}{ if \code{lambda} is omitted, then it will be estimated using a `leave-one-out' kernel smoother. } } If \code{lambda} is a numeric vector, then its length should be equal to the number of points in the pattern \code{X}. The value \code{lambda[i]} is assumed to be the the (estimated) value of the intensity \eqn{\lambda(x_i)}{lambda(x[i])} for the point \eqn{x_i}{x[i]} of the pattern \eqn{X}. Each value must be a positive number; \code{NA}'s are not allowed. If \code{lambda} is a pixel image, the domain of the image should cover the entire window of the point pattern. If it does not (which may occur near the boundary because of discretisation error), then the missing pixel values will be obtained by applying a Gaussian blur to \code{lambda} using \code{\link{blur}}, then looking up the values of this blurred image for the missing locations. (A warning will be issued in this case.) If \code{lambda} is a function, then it will be evaluated in the form \code{lambda(x,y)} where \code{x} and \code{y} are vectors of coordinates of the points of \code{X}. It should return a numeric vector with length equal to the number of points in \code{X}. If \code{lambda} is omitted, then it will be estimated using a `leave-one-out' kernel smoother, as described in Baddeley, \ifelse{latex}{\out{M\o ller}}{Moller} and Waagepetersen (2000). The estimate \code{lambda[i]} for the point \code{X[i]} is computed by removing \code{X[i]} from the point pattern, applying kernel smoothing to the remaining points using \code{\link{density.ppp}}, and evaluating the smoothed intensity at the point \code{X[i]}. The smoothing kernel bandwidth is controlled by the arguments \code{sigma} and \code{varcov}, which are passed to \code{\link{density.ppp}} along with any extra arguments. Edge corrections are used to correct bias in the estimation of \eqn{K_{\mbox{\scriptsize\rm inhom}}}{Kinhom}. Each edge-corrected estimate of \eqn{K_{\mbox{\scriptsize\rm inhom}}(r)}{Kinhom(r)} is of the form \deqn{ \widehat K_{\mbox{\scriptsize\rm inhom}}(r) = (1/A) \sum_i \sum_j \frac{1\{d_{ij} \le r\} e(x_i,x_j,r)}{\lambda(x_i)\lambda(x_j)} }{ K^inhom(r)= (1/A) sum[i] sum[j] 1(d[i,j] <= r) * e(x[i],x[j],r)/(lambda(x[i]) * lambda(x[j])) } where \code{A} is a constant denominator, \eqn{d_{ij}}{d[i,j]} is the distance between points \eqn{x_i}{x[i]} and \eqn{x_j}{x[j]}, and \eqn{e(x_i,x_j,r)}{e(x[i],x[j],r)} is an edge correction factor. For the `border' correction, \deqn{ e(x_i,x_j,r) = \frac{1(b_i > r)}{\sum_j 1(b_j > r)/\lambda(x_j)} }{ 1(b[i] > r)/(sum[j] 1(b[j] > r)/lambda(x[j])) } where \eqn{b_i}{b[i]} is the distance from \eqn{x_i}{x[i]} to the boundary of the window. For the `modified border' correction, \deqn{ e(x_i,x_j,r) = \frac{1(b_i > r)}{\mbox{area}(W \ominus r)} }{ 1(b[i] > r)/area(W [-] r) } where \eqn{W \ominus r}{W [-] r} is the eroded window obtained by trimming a margin of width \eqn{r} from the border of the original window. For the `translation' correction, \deqn{ e(x_i,x_j,r) = \frac 1 {\mbox{area}(W \cap (W + (x_j - x_i)))} }{ 1/area(W intersect (W + x[j]-x[i])) } and for the `isotropic' correction, \deqn{ e(x_i,x_j,r) = \frac 1 {\mbox{area}(W) g(x_i,x_j)} }{ 1/(area(W) g(x[i],x[j])) } where \eqn{g(x_i,x_j)}{g(x[i],x[j])} is the fraction of the circumference of the circle with centre \eqn{x_i}{x[i]} and radius \eqn{||x_i - x_j||}{||x[i]-x[j]||} which lies inside the window. If \code{renormalise=TRUE} (the default), then the estimates described above are multiplied by \eqn{c^{\mbox{normpower}}}{c^normpower} where \eqn{ c = \mbox{area}(W)/\sum (1/\lambda(x_i)). }{ c = area(W)/sum[i] (1/lambda(x[i])). } This rescaling reduces the variability and bias of the estimate in small samples and in cases of very strong inhomogeneity. The default value of \code{normpower} is 1 (for consistency with previous versions of \pkg{spatstat}) but the most sensible value is 2, which would correspond to rescaling the \code{lambda} values so that \eqn{ \sum (1/\lambda(x_i)) = \mbox{area}(W). }{ sum[i] (1/lambda(x[i])) = area(W). } If the point pattern \code{X} contains more than about 1000 points, the isotropic and translation edge corrections can be computationally prohibitive. The computations for the border method are much faster, and are statistically efficient when there are large numbers of points. Accordingly, if the number of points in \code{X} exceeds the threshold \code{nlarge}, then only the border correction will be computed. Setting \code{nlarge=Inf} or \code{correction="best"} will prevent this from happening. Setting \code{nlarge=0} is equivalent to selecting only the border correction with \code{correction="border"}. The pair correlation function can also be applied to the result of \code{Kinhom}; see \code{\link{pcf}}. } \references{ Baddeley, A., \ifelse{latex}{\out{M\o ller}}{Moller}, J. and Waagepetersen, R. (2000) Non- and semiparametric estimation of interaction in inhomogeneous point patterns. \emph{Statistica Neerlandica} \bold{54}, 329--350. } \seealso{ \code{\link{Kest}}, \code{\link{pcf}} } \examples{ # inhomogeneous pattern of maples X <- unmark(split(lansing)$maple) \testonly{ sub <- sample(c(TRUE,FALSE), npoints(X), replace=TRUE, prob=c(0.1,0.9)) X <- X[sub] } # (1) intensity function estimated by model-fitting # Fit spatial trend: polynomial in x and y coordinates fit <- ppm(X, ~ polynom(x,y,2), Poisson()) # (a) predict intensity values at points themselves, # obtaining a vector of lambda values lambda <- predict(fit, locations=X, type="trend") # inhomogeneous K function Ki <- Kinhom(X, lambda) plot(Ki) # (b) predict intensity at all locations, # obtaining a pixel image lambda <- predict(fit, type="trend") Ki <- Kinhom(X, lambda) plot(Ki) # (2) intensity function estimated by heavy smoothing Ki <- Kinhom(X, sigma=0.1) plot(Ki) # (3) simulated data: known intensity function lamfun <- function(x,y) { 50 + 100 * x } # inhomogeneous Poisson process Y <- rpoispp(lamfun, 150, owin()) # inhomogeneous K function Ki <- Kinhom(Y, lamfun) plot(Ki) # How to make simulation envelopes: # Example shows method (2) \dontrun{ smo <- density.ppp(X, sigma=0.1) Ken <- envelope(X, Kinhom, nsim=99, simulate=expression(rpoispp(smo)), sigma=0.1, correction="trans") plot(Ken) } } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat/man/nndensity.Rd0000644000176200001440000000550113160710621015170 0ustar liggesusers\name{nndensity.ppp} \alias{nndensity} \alias{nndensity.ppp} \title{ Estimate Intensity of Point Pattern Using Nearest Neighbour Distances } \description{ Estimates the intensity of a point pattern using the distance from each spatial location to the \code{k}th nearest data point. } \usage{ nndensity(x, ...) \method{nndensity}{ppp}(x, k, ..., verbose = TRUE) } \arguments{ \item{x}{ A point pattern (object of class \code{"ppp"}) or some other spatial object. } \item{k}{ Integer. The distance to the \code{k}th nearest data point will be computed. There is a sensible default. } \item{\dots}{ Arguments passed to \code{\link{nnmap}} and \code{\link{as.mask}} controlling the pixel resolution. } \item{verbose}{ Logical. If \code{TRUE}, print the value of \code{k} when it is automatically selected. If \code{FALSE}, remain silent. } } \details{ This function computes a quick estimate of the intensity of the point process that generated the point pattern \code{x}. For each spatial location \eqn{s}, let \eqn{d(s)} be the distance from \eqn{s} to the \eqn{k}-th nearest point in the dataset \code{x}. If the data came from a homogeneous Poisson process with intensity \eqn{\lambda}{lambda}, then \eqn{\pi d(s)^2}{pi * d(s)^2} would follow a negative exponential distribution with mean \eqn{1/\lambda}{1/lambda}, and the maximum likelihood estimate of \eqn{\lambda}{lambda} would be \eqn{1/(\pi d(s)^2)}{1/(pi * d(s)^2)}. This is the estimate computed by \code{nndensity}, apart from an edge effect correction. This estimator of intensity is relatively fast to compute, and is spatially adaptive (so that it can handle wide variation in the intensity function). However, it implicitly assumes the points are independent, so it does not perform well if the pattern is strongly clustered or strongly inhibited. The value of \code{k} should be greater than 1 in order to avoid infinite peaks in the intensity estimate around each data point. The default value of \code{k} is the square root of the number of points in \code{x}, which seems to work well in many cases. The window of \code{x} is digitised using \code{\link{as.mask}} and the values \eqn{d(s)} are computed using \code{\link{nnmap}}. To control the pixel resolution, see \code{\link{as.mask}}. } \value{ A pixel image (object of class \code{"im"}) giving the estimated intensity of the point process at each spatial location. Pixel values are intensities (number of points per unit area). } \references{ NEED REFERENCES. TRY CRESSIE } \seealso{ \code{\link{density.ppp}}, \code{\link{intensity}} for alternative estimates of point process intensity. } \examples{ plot(nndensity(swedishpines)) } \author{\adrian and \rolf } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat/man/chop.tess.Rd0000644000176200001440000000304513160710571015070 0ustar liggesusers\name{chop.tess} \alias{chop.tess} \title{Subdivide a Window or Tessellation using a Set of Lines} \description{ Divide a given window into tiles delineated by a set of infinite straight lines, obtaining a tessellation of the window. Alternatively, given a tessellation, divide each tile of the tessellation into sub-tiles delineated by the lines. } \usage{ chop.tess(X, L) } \arguments{ \item{X}{ A window (object of class \code{"owin"}) or tessellation (object of class \code{"tess"}) to be subdivided by lines. } \item{L}{ A set of infinite straight lines (object of class \code{"infline"}) } } \details{ The argument \code{L} should be a set of infinite straight lines in the plane (stored in an object \code{L} of class \code{"infline"} created by the function \code{\link{infline}}). If \code{X} is a window, then it is divided into tiles delineated by the lines in \code{L}. If \code{X} is a tessellation, then each tile of \code{X} is subdivided into sub-tiles delineated by the lines in \code{L}. The result is a tessellation. } \section{Warning}{ If \code{X} is a non-convex window, or a tessellation containing non-convex tiles, then \code{chop.tess(X,L)} may contain a tile which consists of several unconnected pieces. } \value{ A tessellation (object of class \code{"tess"}). } \author{\adrian and \rolf } \seealso{ \code{\link{infline}}, \code{\link{clip.infline}} } \examples{ L <- infline(p=1:3, theta=pi/4) W <- square(4) chop.tess(W, L) } \keyword{spatial} \keyword{math} spatstat/man/nnfun.Rd0000644000176200001440000000512413160710621014302 0ustar liggesusers\name{nnfun} \Rdversion{1.1} \alias{nnfun} \alias{nnfun.ppp} \alias{nnfun.psp} \title{ Nearest Neighbour Index Map as a Function } \description{ Compute the nearest neighbour index map of an object, and return it as a function. } \usage{ nnfun(X, ...) \method{nnfun}{ppp}(X, ..., k=1) \method{nnfun}{psp}(X, ...) } \arguments{ \item{X}{ Any suitable dataset representing a two-dimensional collection of objects, such as a point pattern (object of class \code{"ppp"}) or a line segment pattern (object of class \code{"psp"}). } \item{k}{ A single integer. The \code{k}th nearest neighbour will be found. } \item{\dots}{ Extra arguments are ignored. } } \details{ For a collection \eqn{X} of two dimensional objects (such as a point pattern or a line segment pattern), the \dQuote{nearest neighbour index function} of \eqn{X} is the mathematical function \eqn{f} such that, for any two-dimensional spatial location \eqn{(x,y)}, the function value \code{f(x,y)} is the index \eqn{i} identifying the closest member of \eqn{X}. That is, if \eqn{i = f(x,y)} then \eqn{X[i]} is the closest member of the collection \eqn{X} to the location \eqn{(x,y)}. The command \code{f <- nnfun(X)} returns a \emph{function} in the \R language, with arguments \code{x,y}, that represents the nearest neighbour index function of \code{X}. Evaluating the function \code{f} in the form \code{v <- f(x,y)}, where \code{x} and \code{y} are any numeric vectors of equal length containing coordinates of spatial locations, yields the indices of the nearest neighbours to these locations. If the argument \code{k} is specified then the \code{k}-th nearest neighbour will be found. The result of \code{f <- nnfun(X)} also belongs to the class \code{"funxy"} and to the special class \code{"nnfun"}. It can be printed and plotted immediately as shown in the Examples. A \code{nnfun} object can be converted to a pixel image using \code{\link{as.im}}. } \value{ A \code{function} with arguments \code{x,y}. The function also belongs to the class \code{"nnfun"} which has a method for \code{print}. It also belongs to the class \code{"funxy"} which has methods for \code{plot}, \code{contour} and \code{persp}. } \seealso{ \code{\link{distfun}}, \code{\link{plot.funxy}} } \examples{ f <- nnfun(cells) f plot(f) f(0.2, 0.3) g <- nnfun(cells, k=2) g(0.2, 0.3) L <- psp(runif(10), runif(10), runif(10), runif(10), window=owin()) h <- nnfun(L) h(0.2, 0.3) } \author{\adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/shift.psp.Rd0000644000176200001440000000362613160710621015101 0ustar liggesusers\name{shift.psp} \alias{shift.psp} \title{Apply Vector Translation To Line Segment Pattern} \description{ Applies a vector shift to a line segment pattern. } \usage{ \method{shift}{psp}(X, vec=c(0,0), \dots, origin=NULL) } \arguments{ \item{X}{Line Segment pattern (object of class \code{"psp"}).} \item{vec}{Vector of length 2 representing a translation.} \item{\dots}{Ignored} \item{origin}{Character string determining a location that will be shifted to the origin. Options are \code{"centroid"}, \code{"midpoint"} and \code{"bottomleft"}. Partially matched. } } \value{ Another line segment pattern (of class \code{"psp"}) representing the result of applying the vector shift. } \details{ The line segment pattern, and its window, are translated by the vector \code{vec}. This is a method for the generic function \code{\link{shift}}. If \code{origin} is given, then it should be one of the character strings \code{"centroid"}, \code{"midpoint"} or \code{"bottomleft"}. The argument \code{vec} will be ignored; instead the shift will be performed so that the specified geometric location is shifted to the origin. If \code{origin="centroid"} then the centroid of the window will be shifted to the origin. If \code{origin="midpoint"} then the centre of the bounding rectangle of the window will be shifted to the origin. If \code{origin="bottomleft"} then the bottom left corner of the bounding rectangle of the window will be shifted to the origin. } \seealso{ \code{\link{shift}}, \code{\link{shift.owin}}, \code{\link{shift.ppp}}, \code{\link{periodify}}, \code{\link{rotate}}, \code{\link{affine}} } \examples{ X <- psp(runif(10), runif(10), runif(10), runif(10), window=owin()) plot(X, col="red") Y <- shift(X, c(0.05,0.05)) plot(Y, add=TRUE, col="blue") shift(Y, origin="mid") } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/linearKdot.Rd0000644000176200001440000000524613160710621015257 0ustar liggesusers\name{linearKdot} \alias{linearKdot} \title{ Multitype K Function (Dot-type) for Linear Point Pattern } \description{ For a multitype point pattern on a linear network, estimate the multitype \eqn{K} function which counts the expected number of points (of any type) within a given distance of a point of type \eqn{i}. } \usage{ linearKdot(X, i, r=NULL, \dots, correction="Ang") } \arguments{ \item{X}{The observed point pattern, from which an estimate of the dot type \eqn{K} function \eqn{K_{i\bullet}(r)}{K[i.](r)} will be computed. An object of class \code{"lpp"} which must be a multitype point pattern (a marked point pattern whose marks are a factor). } \item{i}{Number or character string identifying the type (mark value) of the points in \code{X} from which distances are measured. Defaults to the first level of \code{marks(X)}. } \item{r}{numeric vector. The values of the argument \eqn{r} at which the \eqn{K}-function \eqn{K_{i\bullet}(r)}{K[i.](r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{correction}{ Geometry correction. Either \code{"none"} or \code{"Ang"}. See Details. } \item{\dots}{Ignored.} } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). } \details{ This is a counterpart of the function \code{\link{Kdot}} for a point pattern on a linear network (object of class \code{"lpp"}). The argument \code{i} will be interpreted as levels of the factor \code{marks(X)}. If \code{i} is missing, it defaults to the first level of the marks factor. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{K_{i\bullet}(r)}{Ki.(r)} should be evaluated. The values of \eqn{r} must be increasing nonnegative numbers and the maximum \eqn{r} value must not exceed the radius of the largest disc contained in the window. } \references{ Baddeley, A, Jammalamadaka, A. and Nair, G. (to appear) Multitype point process analysis of spines on the dendrite network of a neuron. \emph{Applied Statistics} (Journal of the Royal Statistical Society, Series C), In press. } \section{Warnings}{ The argument \code{i} is interpreted as a level of the factor \code{marks(X)}. Beware of the usual trap with factors: numerical values are not interpreted in the same way as character values. } \seealso{ \code{\link{Kdot}}, \code{\link{linearKcross}}, \code{\link{linearK}}. } \examples{ data(chicago) K <- linearKdot(chicago, "assault") } \author{\adrian } \keyword{spatial} \keyword{nonparametric} spatstat/man/residuals.kppm.Rd0000644000176200001440000000213113160710621016112 0ustar liggesusers\name{residuals.kppm} \alias{residuals.kppm} \title{ Residuals for Fitted Cox or Cluster Point Process Model } \description{ Given a Cox or cluster point process model fitted to a point pattern, compute residuals. } \usage{ \method{residuals}{kppm}(object, \dots) } \arguments{ \item{object}{ The fitted point process model (an object of class \code{"kppm"}) for which residuals should be calculated. } \item{\dots}{ Arguments passed to \code{\link{residuals.ppm}}. } } \value{ An object of class \code{"msr"} representing a signed measure or vector-valued measure (see \code{\link{msr}}). This object can be plotted. } \details{ This function extracts the intensity component of the model using \code{\link{as.ppm}} and then applies \code{\link{residuals.ppm}} to compute the residuals. Use \code{\link{plot.msr}} to plot the residuals directly. } \seealso{ \code{\link{msr}}, \code{\link{kppm}} } \examples{ fit <- kppm(redwood ~ x, "Thomas") rr <- residuals(fit) } \author{ \adrian \rolf and \ege } \keyword{spatial} \keyword{models} \keyword{methods} spatstat/man/prune.rppm.Rd0000644000176200001440000000255013160710621015264 0ustar liggesusers\name{prune.rppm} \alias{prune.rppm} \title{ Prune a Recursively Partitioned Point Process Model } \description{ Given a model which has been fitted to point pattern data by recursive partitioning, apply pruning to reduce the complexity of the partition tree. } \usage{ \method{prune}{rppm}(tree, \dots) } \arguments{ \item{tree}{ Fitted point process model of class \code{"rppm"} produced by the function \code{\link{rppm}}. } \item{\dots}{ Arguments passed to \code{\link[rpart]{prune.rpart}} to control the pruning procedure. } } \details{ This is a method for the generic function \code{\link[rpart]{prune}} for the class \code{"rppm"}. An object of this class is a point process model, fitted to point pattern data by recursive partitioning, by the function \code{\link{rppm}}. The recursive partition tree will be pruned using \code{\link[rpart]{prune.rpart}}. The result is another object of class \code{"rppm"}. } \value{ Object of class \code{"rppm"}. } \author{ \spatstatAuthors } \seealso{ \code{\link{rppm}}, \code{\link{plot.rppm}}, \code{\link{predict.rppm}}. } \examples{ # Murchison gold data mur <- solapply(murchison, rescale, s=1000, unitname="km") mur$dfault <- distfun(mur$faults) fit <- rppm(gold ~ dfault + greenstone, data=mur) fit prune(fit, cp=0.1) } \keyword{spatial} \keyword{models} spatstat/man/is.marked.Rd0000644000176200001440000000251213160710621015031 0ustar liggesusers\name{is.marked} \alias{is.marked} \title{Test Whether Marks Are Present} \description{ Generic function to test whether a given object (usually a point pattern or something related to a point pattern) has ``marks'' attached to the points. } \usage{ is.marked(X, \dots) } \arguments{ \item{X}{ Object to be inspected } \item{\dots}{ Other arguments. } } \value{ Logical value, equal to \code{TRUE} if \code{X} is marked. } \details{ ``Marks'' are observations attached to each point of a point pattern. For example the \code{\link[spatstat.data]{longleaf}} dataset contains the locations of trees, each tree being marked by its diameter; the \code{\link[spatstat.data]{amacrine}} dataset gives the locations of cells of two types (on/off) and the type of cell may be regarded as a mark attached to the location of the cell. Other objects related to point patterns, such as point process models, may involve marked points. This function tests whether the object \code{X} contains or involves marked points. It is generic; methods are provided for point patterns (objects of class \code{"ppp"}) and point process models (objects of class \code{"ppm"}). } \seealso{ \code{\link{is.marked.ppp}}, \code{\link{is.marked.ppm}} } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/print.im.Rd0000644000176200001440000000120513160710621014712 0ustar liggesusers\name{print.im} \alias{print.im} \title{Print Brief Details of an Image} \description{ Prints a very brief description of a pixel image object. } \usage{ \method{print}{im}(x, \dots) } \arguments{ \item{x}{Pixel image (object of class \code{"im"}).} \item{\dots}{Ignored.} } \details{ A very brief description of the pixel image \code{x} is printed. This is a method for the generic function \code{\link{print}}. } \seealso{ \code{\link{print}}, \code{\link{im.object}}, \code{\link{summary.im}} } \examples{ data(letterR) U <- as.im(letterR) U } \author{\adrian and \rolf } \keyword{spatial} \keyword{print} spatstat/man/plot.slrm.Rd0000644000176200001440000000234013160710621015105 0ustar liggesusers\name{plot.slrm} \Rdversion{1.1} \alias{plot.slrm} \title{ Plot a Fitted Spatial Logistic Regression } \description{ Plots a fitted Spatial Logistic Regression model. } \usage{ \method{plot}{slrm}(x, ..., type = "intensity") } \arguments{ \item{x}{ a fitted spatial logistic regression model. An object of class \code{"slrm"}. } \item{\dots}{ Extra arguments passed to \code{\link{plot.im}} to control the appearance of the plot. } \item{type}{ Character string (partially) matching one of \code{"probabilities"}, \code{"intensity"} or \code{"link"}. } } \details{ This is a method for \code{\link{plot}} for fitted spatial logistic regression models (objects of class \code{"slrm"}, usually obtained from the function \code{\link{slrm}}). This function plots the result of \code{\link{predict.slrm}}. } \value{ None. } \seealso{ \code{\link{slrm}}, \code{\link{predict.slrm}}, \code{\link{plot.im}} } \examples{ data(copper) X <- copper$SouthPoints Y <- copper$SouthLines Z <- distmap(Y) fit <- slrm(X ~ Z) plot(fit) plot(fit, type="link") } \author{\adrian \email{adrian@maths.uwa.edu.au} and \rolf } \keyword{spatial} \keyword{hplot} \keyword{models} spatstat/man/predict.lppm.Rd0000644000176200001440000000651413160710621015563 0ustar liggesusers\name{predict.lppm} \alias{predict.lppm} \title{ Predict Point Process Model on Linear Network } \description{ Given a fitted point process model on a linear network, compute the fitted intensity or conditional intensity of the model. } \usage{ \method{predict}{lppm}(object, ..., type = "trend", locations = NULL, new.coef=NULL) } \arguments{ \item{object}{ The fitted model. An object of class \code{"lppm"}, see \code{\link{lppm}}. } \item{type}{ Type of values to be computed. Either \code{"trend"}, \code{"cif"} or \code{"se"}. } \item{locations}{ Optional. Locations at which predictions should be computed. Either a data frame with two columns of coordinates, or a binary image mask. } \item{new.coef}{ Optional. Numeric vector of model coefficients, to be used instead of the fitted coefficients \code{coef(object)} when calculating the prediction. } \item{\dots}{ Optional arguments passed to \code{\link{as.mask}} to determine the pixel resolution (if \code{locations} is missing). } } \details{ This function computes the fitted poin process intensity, fitted conditional intensity, or standard error of the fitted intensity, for a point process model on a linear network. It is a method for the generic \code{\link[stats]{predict}} for the class \code{"lppm"}. The argument \code{object} should be an object of class \code{"lppm"} (produced by \code{\link{lppm}}) representing a point process model on a linear network. Predicted values are computed at the locations given by the argument \code{locations}. If this argument is missing, then predicted values are computed at a fine grid of points on the linear network. \itemize{ \item If \code{locations} is missing or \code{NULL} (the default), the return value is a pixel image (object of class \code{"linim"} which inherits class \code{"im"}) corresponding to a discretisation of the linear network, with numeric pixel values giving the predicted values at each location on the linear network. \item If \code{locations} is a data frame, the result is a numeric vector of predicted values at the locations specified by the data frame. \item If \code{locations} is a binary mask, the result is a pixel image with predicted values computed at the pixels of the mask. } } \value{ A pixel image (object of class \code{"linim"} which inherits class \code{"im"}) or a numeric vector, depending on the argument \code{locations}. See Details. } \author{ \adrian } \seealso{ \code{\link{lpp}}, \code{\link{linim}} } \examples{ X <- runiflpp(12, simplenet) fit <- lppm(X ~ x) v <- predict(fit, type="trend") plot(v) } \references{ Ang, Q.W. (2010) \emph{Statistical methodology for events on a network}. Master's thesis, School of Mathematics and Statistics, University of Western Australia. Ang, Q.W., Baddeley, A. and Nair, G. (2012) Geometrically corrected second-order analysis of events on a linear network, with applications to ecology and criminology. \emph{Scandinavian Journal of Statistics} \bold{39}, 591--617. McSwiggan, G., Nair, M.G. and Baddeley, A. (2012) Fitting Poisson point process models to events on a linear network. Manuscript in preparation. } \keyword{spatial} \keyword{models} spatstat/man/Smoothfun.ppp.Rd0000644000176200001440000000372713160710621015745 0ustar liggesusers\name{Smoothfun.ppp} \alias{Smoothfun} \alias{Smoothfun.ppp} \title{ Smooth Interpolation of Marks as a Spatial Function } \description{ Perform spatial smoothing of numeric values observed at a set of irregular locations, and return the result as a function of spatial location. } \usage{ Smoothfun(X, \dots) \method{Smoothfun}{ppp}(X, sigma = NULL, \dots, weights = NULL, edge = TRUE, diggle = FALSE) } \arguments{ \item{X}{ Marked point pattern (object of class \code{"ppp"}). } \item{sigma}{ Smoothing bandwidth, or bandwidth selection function, passed to \code{\link{Smooth.ppp}}. } \item{\dots}{ Additional arguments passed to \code{\link{Smooth.ppp}}. } \item{weights}{ Optional vector of weights associated with the points of \code{X}. } \item{edge,diggle}{ Logical arguments controlling the edge correction. Arguments passed to \code{\link{Smooth.ppp}}. } } \details{ The commands \code{Smoothfun} and \code{\link{Smooth}} both perform kernel-smoothed spatial interpolation of numeric values observed at irregular spatial locations. The difference is that \code{\link{Smooth}} returns a pixel image, containing the interpolated values at a grid of locations, while \code{Smoothfun} returns a \code{function(x,y)} which can be used to compute the interpolated value at \emph{any} spatial location. For purposes such as model-fitting it is more accurate to use \code{Smoothfun} to interpolate data. } \value{ A \code{function} with arguments \code{x,y}. The function also belongs to the class \code{"Smoothfun"} which has methods for \code{print} and \code{\link{as.im}}. It also belongs to the class \code{"funxy"} which has methods for \code{plot}, \code{contour} and \code{persp}. } \seealso{ \code{\link{Smooth}} } \examples{ f <- Smoothfun(longleaf) f f(120, 80) plot(f) } \author{\adrian , \rolf and \ege. } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat/man/simulate.kppm.Rd0000644000176200001440000000514713160710621015754 0ustar liggesusers\name{simulate.kppm} \alias{simulate.kppm} \title{Simulate a Fitted Cluster Point Process Model} \description{ Generates simulated realisations from a fitted cluster point process model. } \usage{ \method{simulate}{kppm}(object, nsim = 1, seed=NULL, ..., window=NULL, covariates=NULL, verbose=TRUE, retry=10, drop=FALSE) } \arguments{ \item{object}{ Fitted cluster point process model. An object of class \code{"kppm"}. } \item{nsim}{ Number of simulated realisations. } \item{seed}{ an object specifying whether and how to initialise the random number generator. Either \code{NULL} or an integer that will be used in a call to \code{\link[base:Random]{set.seed}} before simulating the point patterns. } \item{\dots}{Ignored.} \item{window}{ Optional. Window (object of class \code{"owin"}) in which the model should be simulated. } \item{covariates}{ Optional. A named list containing new values for the covariates in the model. } \item{verbose}{ Logical. Whether to print progress reports (when \code{nsim > 1}). } \item{retry}{ Number of times to repeat the simulation if it fails (e.g. because of insufficient memory). } \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE}, the result will be a point pattern, rather than a list containing a point pattern. } } \details{ This function is a method for the generic function \code{\link[stats]{simulate}} for the class \code{"kppm"} of fitted cluster point process models. Simulations are performed by \code{\link{rThomas}}, \code{\link{rMatClust}} or \code{\link{rLGCP}} depending on the model. The return value is a list of point patterns. It also carries an attribute \code{"seed"} that captures the initial state of the random number generator. This follows the convention used in \code{simulate.lm} (see \code{\link[stats]{simulate}}). It can be used to force a sequence of simulations to be repeated exactly, as shown in the examples for \code{\link[stats]{simulate}}. } \value{ A list of length \code{nsim} containing simulated point patterns (objects of class \code{"ppp"}). The return value also carries an attribute \code{"seed"} that captures the initial state of the random number generator. See Details. } \examples{ data(redwood) fit <- kppm(redwood, ~1, "Thomas") simulate(fit, 2) } \seealso{ \code{\link{kppm}}, \code{\link{rThomas}}, \code{\link{rMatClust}}, \code{\link{rLGCP}}, \code{\link{simulate.ppm}}, \code{\link[stats]{simulate}} } \author{ \spatstatAuthors } \keyword{spatial} \keyword{models} spatstat/man/unstack.msr.Rd0000644000176200001440000000223613160710621015427 0ustar liggesusers\name{unstack.msr} \alias{unstack.msr} \title{ Separate a Vector Measure into its Scalar Components } \description{ Converts a vector-valued measure into a list of scalar-valued measures. } \usage{ \method{unstack}{msr}(x, \dots) } \arguments{ \item{x}{ A measure (object of class \code{"msr"}). } \item{\dots}{ Ignored. } } \details{ This is a method for the generic \code{\link[utils]{unstack}} for the class \code{"msr"} of measures. If \code{x} is a vector-valued measure, then \code{y <- unstack(x)} is a list of scalar-valued measures defined by the components of \code{x}. The \code{j}th entry of the list, \code{y[[j]]}, is equivalent to the \code{j}th component of the vector measure \code{x}. If \code{x} is a scalar-valued measure, then the result is a list consisting of one entry, which is \code{x}. } \value{ A list of measures, of class \code{"solist"}. } \author{ \spatstatAuthors. } \seealso{ \code{\link[utils]{unstack}} \code{\link{unstack.ppp}} \code{\link{split.msr}}. } \examples{ fit <- ppm(cells ~ x) m <- residuals(fit, type="score") m unstack(m) } \keyword{spatial} \keyword{manip} spatstat/man/covering.Rd0000644000176200001440000000267713160710571015010 0ustar liggesusers\name{covering} \alias{covering} \title{Cover Region with Discs} \description{ Given a spatial region, this function finds an efficient covering of the region using discs of a chosen radius. } \usage{ covering(W, r, \dots, giveup=1000) } \arguments{ \item{W}{ A window (object of class \code{"owin"}). } \item{r}{positive number: the radius of the covering discs.} \item{\dots}{ extra arguments passed to \code{\link{as.mask}} controlling the pixel resolution for the calculations. } \item{giveup}{ Maximum number of attempts to place additional discs. } } \value{ A point pattern (object of class \code{"ppp"}) giving the centres of the discs. } \details{ This function finds an efficient covering of the window \code{W} using discs of the given radius \code{r}. The result is a point pattern giving the centres of the discs. The algorithm tries to use as few discs as possible, but is not guaranteed to find the minimal number of discs. It begins by placing a hexagonal grid of points inside \code{W}, then adds further points until every location inside \code{W} lies no more than \code{r} units away from one of the points. } \examples{ rr <- 0.5 X <- covering(letterR, rr) plot(grow.rectangle(Frame(X), rr), type="n", main="") plot(X, pch=16, add=TRUE, col="red") plot(letterR, add=TRUE, lwd=3) plot(X \%mark\% (2*rr), add=TRUE, markscale=1) } \author{ \adrian } \keyword{spatial} \keyword{math} spatstat/man/envelopeArray.Rd0000644000176200001440000000517413160710621015777 0ustar liggesusers\name{envelopeArray} \alias{envelopeArray} \title{ Array of Simulation Envelopes of Summary Function } \description{ Compute an array of simulation envelopes using a summary function that returns an array of curves. } \usage{ envelopeArray(X, fun, \dots, dataname = NULL, verb = FALSE, reuse = TRUE) } \arguments{ \item{X}{ Object containing point pattern data. A point pattern (object of class \code{"ppp"}, \code{"lpp"}, \code{"pp3"} or \code{"ppx"}) or a fitted point process model (object of class \code{"ppm"}, \code{"kppm"} or \code{"lppm"}). } \item{fun}{ Function that computes the desired summary statistic for a point pattern. The result of \code{fun} should be a function array (object of class \code{"fasp"}). } \item{\dots}{ Arguments passed to \code{\link{envelope}} to control the simulations, or passed to \code{fun} when evaluating the function. } \item{dataname}{ Optional character string name for the data. } \item{verb}{ Logical value indicating whether to print progress reports. } \item{reuse}{ Logical value indicating whether the envelopes in each panel should be based on the same set of simulated patterns (\code{reuse=TRUE}, the default) or on different, independent sets of simulated patterns (\code{reuse=FALSE}). } } \details{ This command is the counterpart of \code{\link{envelope}} when the function \code{fun} that is evaluated on each simulated point pattern will return an object of class \code{"fasp"} representing an array of summary functions. Simulated point patterns are generated according to the rules described for \code{\link{envelope}}. In brief, if \code{X} is a point pattern, the algorithm generates simulated point patterns of the same kind, according to complete spatial randomness. If \code{X} is a fitted model, the algorithm generates simulated point patterns according to this model. For each simulated point pattern \code{Y}, the function \code{fun} is invoked. The result \code{Z <- fun(Y, ...)} should be an object of class \code{"fasp"} representing an array of summary functions. The dimensions of the array \code{Z} should be the same for each simulated pattern \code{Y}. This algorithm finds the simulation envelope of the summary functions in each cell of the array. } \value{ An object of class \code{"fasp"} representing an array of envelopes. } \author{ \spatstatAuthors. } \seealso{ \code{\link{envelope}}, \code{\link{alltypes}}. } \examples{ A <- envelopeArray(finpines, markcrosscorr, nsim=9) plot(A) } \keyword{spatial} \keyword{nonparametric} \keyword{iteration} spatstat/man/plot.linnet.Rd0000644000176200001440000000232413160710621015423 0ustar liggesusers\name{plot.linnet} \alias{plot.linnet} \title{ Plot a linear network } \description{ Plots a linear network } \usage{ \method{plot}{linnet}(x, ..., main=NULL, add=FALSE, vertices=FALSE, window=FALSE, do.plot=TRUE) } \arguments{ \item{x}{ Linear network (object of class \code{"linnet"}). } \item{\dots}{ Arguments passed to \code{\link{plot.psp}} controlling the plot. } \item{main}{ Main title for plot. Use \code{main=""} to suppress it. } \item{add}{ Logical. If code{TRUE}, superimpose the graphics over the current plot. If \code{FALSE}, generate a new plot. } \item{vertices}{ Logical. Whether to plot the vertices as well. } \item{window}{ Logical. Whether to plot the window containing the linear network. } \item{do.plot}{ Logical. Whether to actually perform the plot. } } \details{ This is the plot method for class \code{"linnet"}. } \value{ An (invisible) object of class \code{"owin"} giving the bounding box of the network. } \author{ Ang Qi Wei \email{aqw07398@hotmail.com} and \adrian } \seealso{ \code{\link{linnet}} } \examples{ plot(simplenet) } \keyword{spatial} spatstat/man/perspPoints.Rd0000644000176200001440000000457413160710621015514 0ustar liggesusers\name{perspPoints} \alias{perspPoints} \alias{perspSegments} \alias{perspLines} \alias{perspContour} \title{ Draw Points or Lines on a Surface Viewed in Perspective } \description{ After a surface has been plotted in a perspective view using \code{\link{persp.im}}, these functions can be used to draw points or lines on the surface. } \usage{ perspPoints(x, y=NULL, \dots, Z, M) perspLines(x, y = NULL, \dots, Z, M) perspSegments(x0, y0 = NULL, x1 = NULL, y1 = NULL, \dots, Z, M) perspContour(Z, M, \dots, nlevels=10, levels=pretty(range(Z), nlevels)) } \arguments{ \item{x,y}{ Spatial coordinates, acceptable to \code{\link[grDevices]{xy.coords}}, for the points or lines on the horizontal plane. } \item{Z}{ Pixel image (object of class \code{"im"}) specifying the surface heights. } \item{M}{ Projection matrix returned from \code{\link{persp.im}} when \code{Z} was plotted. } \item{\dots}{ Graphical arguments passed to \code{\link[graphics]{points}}, \code{\link[graphics]{lines}} or \code{\link[graphics]{segments}} to control the drawing. } \item{x0,y0,x1,y1}{ Spatial coordinates of the line segments, on the horizontal plane. Alternatively \code{x0} can be a line segment pattern (object of class \code{"psp"}) and \code{y0,x1,y1} can be \code{NULL}. } \item{nlevels}{Number of contour levels} \item{levels}{Vector of heights of contours.} } \details{ After a surface has been plotted in a perspective view, these functions can be used to draw points or lines on the surface. The user should already have called \code{\link{persp.im}} in the form \code{M <- persp(Z, visible=TRUE, ...)} to display the perspective view of the surface \code{Z}. Only points and lines which are visible from the viewer's standpoint will be drawn. } \value{ Same as the return value from \code{\link[graphics]{points}} or \code{\link[graphics]{segments}}. } \seealso{ \code{\link{persp.im}} } \examples{ M <- persp(bei.extra$elev, colmap=terrain.colors(128), apron=TRUE, theta=-30, phi=20, zlab="Elevation", main="", expand=6, visible=TRUE, shade=0.3) perspContour(bei.extra$elev, M=M, col="pink", nlevels=12) perspPoints(bei, Z=bei.extra$elev, M=M, pch=16, cex=0.3, col="chartreuse") } \author{ \spatstatAuthors } \keyword{spatial} \keyword{hplot} spatstat/man/add.texture.Rd0000644000176200001440000000313713160710571015413 0ustar liggesusers\name{add.texture} \alias{add.texture} \title{ Fill Plot With Texture } \description{ Draws a simple texture inside a region on the plot. } \usage{ add.texture(W, texture = 4, spacing = NULL, ...) } \arguments{ \item{W}{ Window (object of class \code{"owin"}) inside which the texture should be drawn. } \item{texture}{ Integer from 1 to 8 identifying the type of texture. See Details. } \item{spacing}{ Spacing between elements of the texture, in units of the current plot. } \item{\dots}{ Further arguments controlling the plot colour, line width etc. } } \details{ The chosen texture, confined to the window \code{W}, will be added to the current plot. The available textures are: \describe{ \item{texture=1:}{ Small crosses arranged in a square grid. } \item{texture=2:}{ Parallel vertical lines. } \item{texture=3:}{ Parallel horizontal lines. } \item{texture=4:}{ Parallel diagonal lines at 45 degrees from the horizontal. } \item{texture=5:}{ Parallel diagonal lines at 135 degrees from the horizontal. } \item{texture=6:}{ Grid of horizontal and vertical lines. } \item{texture=7:}{ Grid of diagonal lines at 45 and 135 degrees from the horizontal. } \item{texture=8:}{ Grid of hexagons. } } } \author{ \adrian and \rolf } \seealso{ \code{\link{owin}}, \code{\link{plot.owin}}, \code{\link{textureplot}}, \code{\link{texturemap}}. } \examples{ W <- Window(chorley) plot(W, main="") add.texture(W, 7) } \keyword{spatial} \keyword{hplot} spatstat/man/rtemper.Rd0000644000176200001440000000503313160710621014633 0ustar liggesusers\name{rtemper} \alias{rtemper} \title{ Simulated Annealing or Simulated Tempering for Gibbs Point Processes } \description{ Performs simulated annealing or simulated tempering for a Gibbs point process model using a specified annealing schedule. } \usage{ rtemper(model, invtemp, nrep, \dots, start = NULL, verbose = FALSE) } \arguments{ \item{model}{ A Gibbs point process model: a fitted Gibbs point process model (object of class \code{"ppm"}), or any data acceptable to \code{\link{rmhmodel}}. } \item{invtemp}{ A numeric vector of positive numbers. The sequence of values of inverse temperature that will be used. } \item{nrep}{ An integer vector of the same length as \code{invtemp}. The value \code{nrep[i]} specifies the number of steps of the Metropolis-Hastings algorithm that will be performed at inverse temperature \code{invtemp[i]}. } \item{start}{ Initial starting state for the simulation. Any data acceptable to \code{\link{rmhstart}}. } \item{\dots}{ Additional arguments passed to \code{\link{rmh.default}}. } \item{verbose}{ Logical value indicating whether to print progress reports. } } \details{ The Metropolis-Hastings simulation algorithm \code{\link{rmh}} is run for \code{nrep[1]} steps at inverse temperature \code{invtemp[1]}, then for \code{nrep[2]} steps at inverse temperature \code{invtemp[2]}, and so on. Setting the inverse temperature to a value \eqn{\alpha}{alpha} means that the probability density of the Gibbs model, \eqn{f(x)}, is replaced by \eqn{g(x) = C\, f(x)^\alpha}{g(x) = C f(x)^alpha} where \eqn{C} is a normalising constant depending on \eqn{\alpha}{alpha}. Larger values of \eqn{\alpha}{alpha} exaggerate the high and low values of probability density, while smaller values of \eqn{\alpha}{alpha} flatten out the probability density. For example if the original \code{model} is a Strauss process, the modified model is close to a hard core process for large values of inverse temperature, and close to a Poisson process for small values of inverse temperature. } \value{ A point pattern (object of class \code{"ppp"}). } \author{ \adrian \rolf and \ege } \seealso{ \code{\link{rmh.default}}, \code{\link{rmh}}. } \examples{ stra <- rmhmodel(cif="strauss", par=list(beta=2,gamma=0.2,r=0.7), w=square(10)) nr <- if(interactive()) 1e5 else 1e4 Y <- rtemper(stra, c(1, 2, 4, 8), nr * (1:4), verbose=TRUE) } \keyword{spatial} \keyword{datagen} spatstat/man/markconnect.Rd0000644000176200001440000001421513160710621015463 0ustar liggesusers\name{markconnect} \alias{markconnect} \title{ Mark Connection Function } \description{ Estimate the marked connection function of a multitype point pattern. } \usage{ markconnect(X, i, j, r=NULL, correction=c("isotropic", "Ripley", "translate"), method="density", \dots, normalise=FALSE) } \arguments{ \item{X}{The observed point pattern. An object of class \code{"ppp"} or something acceptable to \code{\link{as.ppp}}. } \item{i}{Number or character string identifying the type (mark value) of the points in \code{X} from which distances are measured. } \item{j}{Number or character string identifying the type (mark value) of the points in \code{X} to which distances are measured. } \item{r}{numeric vector. The values of the argument \eqn{r} at which the mark connection function \eqn{p_{ij}(r)}{p[ij](r)} should be evaluated. There is a sensible default. } \item{correction}{ A character vector containing any selection of the options \code{"isotropic"}, \code{"Ripley"} or \code{"translate"}. It specifies the edge correction(s) to be applied. } \item{method}{ A character vector indicating the user's choice of density estimation technique to be used. Options are \code{"density"}, \code{"loess"}, \code{"sm"} and \code{"smrep"}. } \item{\dots}{ Arguments passed to the density estimation routine (\code{\link{density}}, \code{\link{loess}} or \code{sm.density}) selected by \code{method}. } \item{normalise}{ If \code{TRUE}, normalise the pair connection function by dividing it by \eqn{p_i p_j}{p[i]*p[j]}, the estimated probability that randomly-selected points will have marks \eqn{i} and \eqn{j}. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame containing numeric columns \item{r}{the values of the argument \eqn{r} at which the mark connection function \eqn{p_{ij}(r)}{p[i,j](r)} has been estimated } \item{theo}{the theoretical value of \eqn{p_{ij}(r)}{p[i,j](r)} when the marks attached to different points are independent } together with a column or columns named \code{"iso"} and/or \code{"trans"}, according to the selected edge corrections. These columns contain estimates of the function \eqn{p_{ij}(r)}{p[i,j](r)} obtained by the edge corrections named. } \details{ The mark connection function \eqn{p_{ij}(r)}{p[i,j](r)} of a multitype point process \eqn{X} is a measure of the dependence between the types of two points of the process a distance \eqn{r} apart. Informally \eqn{p_{ij}(r)}{p[i,j](r)} is defined as the conditional probability, given that there is a point of the process at a location \eqn{u} and another point of the process at a location \eqn{v} separated by a distance \eqn{||u-v|| = r}, that the first point is of type \eqn{i} and the second point is of type \eqn{j}. See Stoyan and Stoyan (1994). If the marks attached to the points of \code{X} are independent and identically distributed, then \eqn{p_{ij}(r) \equiv p_i p_j}{p[i,j](r) = p[i]p[j]} where \eqn{p_i}{p[i]} denotes the probability that a point is of type \eqn{i}. Values larger than this, \eqn{p_{ij}(r) > p_i p_j}{p[i,j](r) > p[i]p[j]}, indicate positive association between the two types, while smaller values indicate negative association. The argument \code{X} must be a point pattern (object of class \code{"ppp"}) or any data that are acceptable to \code{\link{as.ppp}}. It must be a multitype point pattern (a marked point pattern with factor-valued marks). The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{p_{ij}(r)}{p[i,j](r)} is estimated. There is a sensible default. This algorithm assumes that \code{X} can be treated as a realisation of a stationary (spatially homogeneous) random spatial point process in the plane, observed through a bounded window. The window (which is specified in \code{X} as \code{Window(X)}) may have arbitrary shape. Biases due to edge effects are treated in the same manner as in \code{\link{Kest}}. The edge corrections implemented here are \describe{ \item{isotropic/Ripley}{Ripley's isotropic correction (see Ripley, 1988; Ohser, 1983). This is implemented only for rectangular and polygonal windows (not for binary masks). } \item{translate}{Translation correction (Ohser, 1983). Implemented for all window geometries, but slow for complex windows. } } Note that the estimator assumes the process is stationary (spatially homogeneous). The mark connection function is estimated using density estimation techniques. The user can choose between \describe{ \item{\code{"density"}}{ which uses the standard kernel density estimation routine \code{\link{density}}, and works only for evenly-spaced \code{r} values; } \item{\code{"loess"}}{ which uses the function \code{loess} in the package \pkg{modreg}; } \item{\code{"sm"}}{ which uses the function \code{sm.density} in the package \pkg{sm} and is extremely slow; } \item{\code{"smrep"}}{ which uses the function \code{sm.density} in the package \pkg{sm} and is relatively fast, but may require manual control of the smoothing parameter \code{hmult}. } } } \references{ Stoyan, D. and Stoyan, H. (1994) Fractals, random shapes and point fields: methods of geometrical statistics. John Wiley and Sons. } \seealso{ Multitype pair correlation \code{\link{pcfcross}} and multitype K-functions \code{\link{Kcross}}, \code{\link{Kdot}}. Use \code{\link{alltypes}} to compute the mark connection functions between all pairs of types. Mark correlation \code{\link{markcorr}} and mark variogram \code{\link{markvario}} for numeric-valued marks. } \examples{ # Hughes' amacrine data # Cells marked as 'on'/'off' data(amacrine) M <- markconnect(amacrine, "on", "off") plot(M) # Compute for all pairs of types at once plot(alltypes(amacrine, markconnect)) } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat/man/fryplot.Rd0000644000176200001440000001315313160710621014656 0ustar liggesusers\name{fryplot} \alias{fryplot} \alias{frypoints} \title{Fry Plot of Point Pattern} \description{ Displays the Fry plot (Patterson plot) of a spatial point pattern. } \usage{ fryplot(X, ..., width=NULL, from=NULL, to=NULL, axes=FALSE) frypoints(X, from=NULL, to=NULL, dmax=Inf) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"}) or something acceptable to \code{\link{as.ppp}}. } \item{\dots}{Optional arguments to control the appearance of the plot.} \item{width}{Optional parameter indicating the width of a box for a zoomed-in view of the Fry plot near the origin.} \item{from,to}{ Optional. Subset indices specifying which points of \code{X} will be considered when forming the vectors (drawn from each point of \code{from}, to each point of \code{to}.) } \item{axes}{ Logical value indicating whether to draw axes, crossing at the origin. } \item{dmax}{ Maximum distance between points. Pairs at greater distances do not contribute to the result. The default means there is no maximum distance. } } \details{ The function \code{fryplot} generates a Fry plot (or Patterson plot); \code{frypoints} returns the points of the Fry plot as a point pattern dataset. Fry (1979) and Hanna and Fry (1979) introduced a manual graphical method for investigating features of a spatial point pattern of mineral deposits. A transparent sheet, marked with an origin or centre point, is placed over the point pattern. The transparent sheet is shifted so that the origin lies over one of the data points, and the positions of all the \emph{other} data points are copied onto the transparent sheet. This procedure is repeated for each data point in turn. The resulting plot (the Fry plot) is a pattern of \eqn{n(n-1)} points, where \eqn{n} is the original number of data points. This procedure was previously proposed by Patterson (1934, 1935) for studying inter-atomic distances in crystals, and is also known as a Patterson plot. The function \code{fryplot} generates the Fry/Patterson plot. Standard graphical parameters such as \code{main}, \code{pch}, \code{lwd}, \code{col}, \code{bg}, \code{cex} can be used to control the appearance of the plot. To zoom in (to view only a subset of the Fry plot at higher magnification), use the argument \code{width} to specify the width of a rectangular field of view centred at the origin, or the standard graphical arguments \code{xlim} and \code{ylim} to specify another rectangular field of view. (The actual field of view may be slightly larger, depending on the graphics device.) The function \code{frypoints} returns the points of the Fry plot as a point pattern object. There may be a large number of points in this pattern, so this function should be used only if further analysis of the Fry plot is required. Fry plots are particularly useful for recognising anisotropy in regular point patterns. A void around the origin in the Fry plot suggests regularity (inhibition between points) and the shape of the void gives a clue to anisotropy in the pattern. Fry plots are also useful for detecting periodicity or rounding of the spatial coordinates. In mathematical terms, the Fry plot of a point pattern \code{X} is simply a plot of the vectors \code{X[i] - X[j]} connecting all pairs of distinct points in \code{X}. The Fry plot is related to the \eqn{K} function (see \code{\link{Kest}}) and the reduced second moment measure (see \code{\link{Kmeasure}}). For example, the number of points in the Fry plot lying within a circle of given radius is an unnormalised and uncorrected version of the \eqn{K} function. The Fry plot has a similar appearance to the plot of the reduced second moment measure \code{\link{Kmeasure}} when the smoothing parameter \code{sigma} is very small. The Fry plot does not adjust for the effect of the size and shape of the sampling window. The density of points in the Fry plot tapers off near the edges of the plot. This is an edge effect, a consequence of the bounded sampling window. In geological applications this is usually not important, because interest is focused on the behaviour near the origin where edge effects can be ignored. To correct for the edge effect, use \code{\link{Kmeasure}} or \code{\link{Kest}} or its relatives. } \value{ \code{fryplot} returns \code{NULL}. \code{frypoints} returns a point pattern (object of class \code{"ppp"}). } \references{ Fry, N. (1979) Random point distributions and strain measurement in rocks. \emph{Tectonophysics} \bold{60}, 89--105. Hanna, S.S. and Fry, N. (1979) A comparison of methods of strain determination in rocks from southwest Dyfed (Pembrokeshire) and adjacent areas. \emph{Journal of Structural Geology} \bold{1}, 155--162. Patterson, A.L. (1934) A Fourier series method for the determination of the component of inter-atomic distances in crystals. \emph{Physics Reviews} \bold{46}, 372--376. Patterson, A.L. (1935) A direct method for the determination of the components of inter-atomic distances in crystals. \emph{Zeitschrift fuer Krystallographie} \bold{90}, 517--554. } \seealso{ \code{\link{Kmeasure}}, \code{\link{Kest}} } \examples{ ## unmarked data fryplot(cells) Y <- frypoints(cells) ## numerical marks fryplot(longleaf, width=4, axes=TRUE) ## multitype points fryplot(amacrine, width=0.2, from=(marks(amacrine) == "on"), chars=c(3,16), cols=2:3, main="Fry plot centred at an On-cell") points(0,0) } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat/man/rmhmodel.Rd0000644000176200001440000000570013160710621014765 0ustar liggesusers\name{rmhmodel} \alias{rmhmodel} \title{Define Point Process Model for Metropolis-Hastings Simulation.} \description{ Builds a description of a point process model for use in simulating the model by the Metropolis-Hastings algorithm. } \usage{ rmhmodel(...) } \arguments{ \item{\dots}{Arguments specifying the point process model in some format. } } \value{ An object of class \code{"rmhmodel"}, which is essentially a list of parameter values for the model. There is a \code{print} method for this class, which prints a sensible description of the model chosen. } \details{ Simulated realisations of many point process models can be generated using the Metropolis-Hastings algorithm \code{\link{rmh}}. The algorithm requires the model to be specified in a particular format: an object of class \code{"rmhmodel"}. The function \code{\link{rmhmodel}} takes a description of a point process model in some other format, and converts it into an object of class \code{"rmhmodel"}. It also checks that the parameters of the model are valid. The function \code{\link{rmhmodel}} is generic, with methods for \describe{ \item{fitted point process models:}{ an object of class \code{"ppm"}, obtained by a call to the model-fitting function \code{\link{ppm}}. See \code{\link{rmhmodel.ppm}}. } \item{lists:}{ a list of parameter values in a certain format. See \code{\link{rmhmodel.list}}. } \item{default:}{ parameter values specified as separate arguments to \code{\dots}. See \code{\link{rmhmodel.default}}. } } } \references{ Diggle, P. J. (2003) \emph{Statistical Analysis of Spatial Point Patterns} (2nd ed.) Arnold, London. Diggle, P.J. and Gratton, R.J. (1984) Monte Carlo methods of inference for implicit statistical models. \emph{Journal of the Royal Statistical Society, series B} \bold{46}, 193 -- 212. Diggle, P.J., Gates, D.J., and Stibbard, A. (1987) A nonparametric estimator for pairwise-interaction point processes. Biometrika \bold{74}, 763 -- 770. \emph{Scandinavian Journal of Statistics} \bold{21}, 359--373. Geyer, C.J. (1999) Likelihood Inference for Spatial Point Processes. Chapter 3 in O.E. Barndorff-Nielsen, W.S. Kendall and M.N.M. Van Lieshout (eds) \emph{Stochastic Geometry: Likelihood and Computation}, Chapman and Hall / CRC, Monographs on Statistics and Applied Probability, number 80. Pages 79--140. } \seealso{ \code{\link{rmhmodel.ppm}}, \code{\link{rmhmodel.default}}, \code{\link{rmhmodel.list}}, \code{\link{rmh}}, \code{\link{rmhcontrol}}, \code{\link{rmhstart}}, \code{\link{ppm}}, \code{\link{Strauss}}, \code{\link{Softcore}}, \code{\link{StraussHard}}, \code{\link{Triplets}}, \code{\link{MultiStrauss}}, \code{\link{MultiStraussHard}}, \code{\link{DiggleGratton}}, \code{\link{PairPiece}} } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat/man/fv.Rd0000644000176200001440000001627613160710621013603 0ustar liggesusers\name{fv} \alias{fv} \title{ Create a Function Value Table } \description{ Advanced Use Only. This low-level function creates an object of class \code{"fv"} from raw numerical data. } \usage{ fv(x, argu = "r", ylab = NULL, valu, fmla = NULL, alim = NULL, labl = names(x), desc = NULL, unitname = NULL, fname = NULL, yexp = ylab) } \arguments{ \item{x}{ A data frame with at least 2 columns containing the values of the function argument and the corresponding values of (one or more versions of) the function. } \item{argu}{ String. The name of the column of \code{x} that contains the values of the function argument. } \item{ylab}{ Either \code{NULL}, or an \R language expression representing the mathematical name of the function. See Details. } \item{valu}{ String. The name of the column of \code{x} that should be taken as containing the function values, in cases where a single column is required. } \item{fmla}{ Either \code{NULL}, or a \code{formula} specifying the default plotting behaviour. See Details. } \item{alim}{ Optional. The default range of values of the function argument for which the function will be plotted. Numeric vector of length 2. } \item{labl}{ Optional. Plot labels for the columns of \code{x}. A vector of strings, with one entry for each column of \code{x}. } \item{desc}{ Optional. Descriptions of the columns of \code{x}. A vector of strings, with one entry for each column of \code{x}. } \item{unitname}{ Optional. Name of the unit (usually a unit of length) in which the function argument is expressed. Either a single character string, or a vector of two character strings giving the singular and plural forms, respectively. } \item{fname}{ Optional. The name of the function itself. A character string. } \item{yexp}{ Optional. Alternative form of \code{ylab} more suitable for annotating an axis of the plot. See Details. } } \details{ This documentation is provided for experienced programmers who want to modify the internal behaviour of \pkg{spatstat}. Other users please see \code{\link{fv.object}}. The low-level function \code{fv} is used to create an object of class \code{"fv"} from raw numerical data. The data frame \code{x} contains the numerical data. It should have one column (typically but not necessarily named \code{"r"}) giving the values of the function argument for which the function has been evaluated; and at least one other column, containing the corresponding values of the function. Typically there is more than one column of function values. These columns typically give the values of different versions or estimates of the same function, for example, different estimates of the \eqn{K} function obtained using different edge corrections. However they may also contain the values of related functions such as the derivative or hazard rate. \code{argu} specifies the name of the column of \code{x} that contains the values of the function argument (typically \code{argu="r"} but this is not compulsory). \code{valu} specifies the name of another column that contains the \sQuote{recommended} estimate of the function. It will be used to provide function values in those situations where a single column of data is required. For example, \code{\link{envelope}} computes its simulation envelopes using the recommended value of the summary function. \code{fmla} specifies the default plotting behaviour. It should be a formula, or a string that can be converted to a formula. Variables in the formula are names of columns of \code{x}. See \code{\link{plot.fv}} for the interpretation of this formula. \code{alim} specifies the recommended range of the function argument. This is used in situations where statistical theory or statistical practice indicates that the computed estimates of the function are not trustworthy outside a certain range of values of the function argument. By default, \code{\link{plot.fv}} will restrict the plot to this range. \code{fname} is a string giving the name of the function itself. For example, the \eqn{K} function would have \code{fname="K"}. \code{ylab} is a mathematical expression for the function value, used when labelling an axis of the plot, or when printing a description of the function. It should be an \R language object. For example the \eqn{K} function's mathematical name \eqn{K(r)} is rendered by \code{ylab=quote(K(r))}. If \code{yexp} is present, then \code{ylab} will be used only for printing, and \code{yexp} will be used for annotating axes in a plot. (Otherwise \code{yexp} defaults to \code{ylab}). For example the cross-type \eqn{K} function \eqn{K_{1,2}(r)}{K[1,2](r)} is rendered by something like \code{ylab=quote(Kcross[1,2](r))} and \code{yexp=quote(Kcross[list(1,2)](r))} to get the most satisfactory behaviour. (A useful tip: use \code{\link{substitute}} instead of \code{\link{quote}} to insert values of variables into an expression, e.g. \code{substitute(Kcross[i,j](r), list(i=42,j=97))} yields the same as \code{quote(Kcross[42, 97](r))}.) \code{labl} is a character vector specifying plot labels for each column of \code{x}. These labels will appear on the plot axes (in non-default plots), legends and printed output. Entries in \code{labl} may contain the string \code{"\%s"} which will be replaced by \code{fname}. For example the border-corrected estimate of the \eqn{K} function has label \code{"\%s[bord](r)"} which becomes \code{"K[bord](r)"}. \code{desc} is a character vector containing intelligible explanations of each column of \code{x}. Entries in \code{desc} may contain the string \code{"\%s"} which will be replaced by \code{ylab}. For example the border correction estimate of the \eqn{K} function has description \code{"border correction estimate of \%s"}. } \value{ An object of class \code{"fv"}, see \code{\link{fv.object}}. } \seealso{ See \code{\link{plot.fv}} for plotting an \code{"fv"} object. See \code{\link{as.function.fv}} to convert an \code{"fv"} object to an \R function. Use \code{\link{cbind.fv}} to combine several \code{"fv"} objects. Use \code{\link{bind.fv}} to glue additional columns onto an existing \code{"fv"} object. Use \code{\link{range.fv}} to compute the range of \eqn{y} values for a function, and \code{\link{with.fv}} for more complicated calculations. The functions \code{fvnames}, \code{fvnames<-} allow the user to use standard abbreviations to refer to columns of an \code{"fv"} object. \emph{Undocumented} functions for modifying an \code{"fv"} object include \code{tweak.fv.entry} and \code{rebadge.fv}. } \author{\adrian and \rolf } \examples{ df <- data.frame(r=seq(0,5,by=0.1)) df <- transform(df, a=pi*r^2, b=3*r^2) X <- fv(df, "r", quote(A(r)), "a", cbind(a, b) ~ r, alim=c(0,4), labl=c("r", "\%s[true](r)", "\%s[approx](r)"), desc=c("radius of circle", "true area \%s", "rough area \%s"), fname="A") X } \keyword{spatial} \keyword{classes} spatstat/man/rknn.Rd0000644000176200001440000000365113160710621014131 0ustar liggesusers\name{rknn} \alias{dknn} \alias{pknn} \alias{qknn} \alias{rknn} \title{ Theoretical Distribution of Nearest Neighbour Distance } \description{ Density, distribution function, quantile function and random generation for the random distance to the \eqn{k}th nearest neighbour in a Poisson point process in \eqn{d} dimensions. } \usage{ dknn(x, k = 1, d = 2, lambda = 1) pknn(q, k = 1, d = 2, lambda = 1) qknn(p, k = 1, d = 2, lambda = 1) rknn(n, k = 1, d = 2, lambda = 1) } \arguments{ \item{x,q}{vector of quantiles.} \item{p}{vector of probabilities.} \item{n}{number of observations to be generated.} \item{k}{order of neighbour.} \item{d}{dimension of space.} \item{lambda}{intensity of Poisson point process.} } \details{ In a Poisson point process in \eqn{d}-dimensional space, let the random variable \eqn{R} be the distance from a fixed point to the \eqn{k}-th nearest random point, or the distance from a random point to the \eqn{k}-th nearest other random point. Then \eqn{R^d} has a Gamma distribution with shape parameter \eqn{k} and rate \eqn{\lambda * \alpha}{lambda * alpha} where \eqn{\alpha}{alpha} is a constant (equal to the volume of the unit ball in \eqn{d}-dimensional space). See e.g. Cressie (1991, page 61). These functions support calculation and simulation for the distribution of \eqn{R}. } \value{ A numeric vector: \code{dknn} returns the probability density, \code{pknn} returns cumulative probabilities (distribution function), \code{qknn} returns quantiles, and \code{rknn} generates random deviates. } \references{ Cressie, N.A.C. (1991) \emph{Statistics for spatial data}. John Wiley and Sons, 1991. } \author{\adrian and \rolf } \examples{ x <- seq(0, 5, length=20) densities <- dknn(x, k=3, d=2) cdfvalues <- pknn(x, k=3, d=2) randomvalues <- rknn(100, k=3, d=2) deciles <- qknn((1:9)/10, k=3, d=2) } \keyword{spatial} \keyword{distribution} spatstat/man/Concom.Rd0000644000176200001440000001337213160710571014404 0ustar liggesusers\name{Concom} \alias{Concom} \title{The Connected Component Process Model} \description{ Creates an instance of the Connected Component point process model which can then be fitted to point pattern data. } \usage{ Concom(r) } \arguments{ \item{r}{Threshold distance} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of the connected component process with disc radius \eqn{r}. } \details{ This function defines the interpoint interaction structure of a point process called the connected component process. It can be used to fit this model to point pattern data. The function \code{\link{ppm}()}, which fits point process models to point pattern data, requires an argument of class \code{"interact"} describing the interpoint interaction structure of the model to be fitted. The appropriate description of the connected component interaction is yielded by the function \code{Concom()}. See the examples below. In \bold{standard form}, the connected component process (Baddeley and \ifelse{latex}{\out{M\o ller}}{Moller}, 1989) with disc radius \eqn{r}, intensity parameter \eqn{\kappa}{\kappa} and interaction parameter \eqn{\gamma}{\gamma} is a point process with probability density \deqn{ f(x_1,\ldots,x_n) = \alpha \kappa^{n(x)} \gamma^{-C(x)} }{ f(x[1],\ldots,x[n]) = \alpha . \kappa^n(x) . \gamma^(-C(x)) } for a point pattern \eqn{x}, where \eqn{x_1,\ldots,x_n}{x[1],\ldots,x[n]} represent the points of the pattern, \eqn{n(x)} is the number of points in the pattern, and \eqn{C(x)} is defined below. Here \eqn{\alpha}{\alpha} is a normalising constant. To define the term \code{C(x)}, suppose that we construct a planar graph by drawing an edge between each pair of points \eqn{x_i,x_j}{x[i],x[j]} which are less than \eqn{r} units apart. Two points belong to the same connected component of this graph if they are joined by a path in the graph. Then \eqn{C(x)} is the number of connected components of the graph. The interaction parameter \eqn{\gamma}{\gamma} can be any positive number. If \eqn{\gamma = 1}{\gamma = 1} then the model reduces to a Poisson process with intensity \eqn{\kappa}{\kappa}. If \eqn{\gamma < 1}{\gamma < 1} then the process is regular, while if \eqn{\gamma > 1}{\gamma > 1} the process is clustered. Thus, a connected-component interaction process can be used to model either clustered or regular point patterns. In \pkg{spatstat}, the model is parametrised in a different form, which is easier to interpret. In \bold{canonical form}, the probability density is rewritten as \deqn{ f(x_1,\ldots,x_n) = \alpha \beta^{n(x)} \gamma^{-U(x)} }{ f(x_1,\ldots,x_n) = \alpha . \beta^n(x) \gamma^(-U(x)) } where \eqn{\beta}{\beta} is the new intensity parameter and \eqn{U(x) = C(x) - n(x)} is the interaction potential. In this formulation, each isolated point of the pattern contributes a factor \eqn{\beta}{\beta} to the probability density (so the first order trend is \eqn{\beta}{\beta}). The quantity \eqn{U(x)} is a true interaction potential, in the sense that \eqn{U(x) = 0} if the point pattern \eqn{x} does not contain any points that lie close together. When a new point \eqn{u} is added to an existing point pattern \eqn{x}, the rescaled potential \eqn{-U(x)} increases by zero or a positive integer. The increase is zero if \eqn{u} is not close to any point of \eqn{x}. The increase is a positive integer \eqn{k} if there are \eqn{k} different connected components of \eqn{x} that lie close to \eqn{u}. Addition of the point \eqn{u} contributes a factor \eqn{\beta \eta^\delta}{\beta * \eta^\delta} to the probability density, where \eqn{\delta}{\delta} is the increase in potential. If desired, the original parameter \eqn{\kappa}{\kappa} can be recovered from the canonical parameter by \eqn{\kappa = \beta\gamma}{\kappa = \beta * \gamma}. The \emph{nonstationary} connected component process is similar except that the contribution of each individual point \eqn{x_i}{x[i]} is a function \eqn{\beta(x_i)}{\beta(x[i])} of location, rather than a constant beta. Note the only argument of \code{Concom()} is the threshold distance \code{r}. When \code{r} is fixed, the model becomes an exponential family. The canonical parameters \eqn{\log(\beta)}{log(\beta)} and \eqn{\log(\gamma)}{log(\gamma)} are estimated by \code{\link{ppm}()}, not fixed in \code{Concom()}. } \seealso{ \code{\link{ppm}}, \code{\link{pairwise.family}}, \code{\link{ppm.object}} } \section{Edge correction}{ The interaction distance of this process is infinite. There are no well-established procedures for edge correction for fitting such models, and accordingly the model-fitting function \code{\link{ppm}} will give an error message saying that the user must specify an edge correction. A reasonable solution is to use the border correction at the same distance \code{r}, as shown in the Examples. } \examples{ # prints a sensible description of itself Concom(r=0.1) # Fit the stationary connected component process to redwood data ppm(redwood, ~1, Concom(r=0.07), rbord=0.07) # Fit the stationary connected component process to `cells' data ppm(cells, ~1, Concom(r=0.06), rbord=0.06) # eta=0 indicates hard core process. # Fit a nonstationary connected component model # with log-cubic polynomial trend \dontrun{ ppm(swedishpines, ~polynom(x/10,y/10,3), Concom(r=7), rbord=7) } } \references{ Baddeley, A.J. and \ifelse{latex}{\out{M\o ller}}{Moller}, J. (1989) Nearest-neighbour Markov point processes and random sets. \emph{International Statistical Review} \bold{57}, 89--121. } \author{ \spatstatAuthors } \keyword{spatial} \keyword{models} spatstat/man/gridweights.Rd0000644000176200001440000000436213160710621015501 0ustar liggesusers\name{gridweights} \alias{gridweights} \title{Compute Quadrature Weights Based on Grid Counts} \description{ Computes quadrature weights for a given set of points, using the ``counting weights'' for a grid of rectangular tiles. } \usage{ gridweights(X, ntile, \dots, window=NULL, verbose=FALSE, npix=NULL, areas=NULL) } \arguments{ \item{X}{Data defining a point pattern.} \item{ntile}{Number of tiles in each row and column of the rectangular grid. An integer vector of length 1 or 2. } \item{\dots}{Ignored.} \item{window}{Default window for the point pattern} \item{verbose}{Logical flag. If \code{TRUE}, information will be printed about the computation of the grid weights. } \item{npix}{Dimensions of pixel grid to use when computing a digital approximation to the tile areas. } \item{areas}{Vector of areas of the tiles, if they are already known.} } \value{ Vector of nonnegative weights for each point in \code{X}. } \details{ This function computes a set of quadrature weights for a given pattern of points (typically comprising both ``data'' and `dummy'' points). See \code{\link{quad.object}} for an explanation of quadrature weights and quadrature schemes. The weights are computed by the ``counting weights'' rule based on a regular grid of rectangular tiles. First \code{X} and (optionally) \code{window} are converted into a point pattern object. Then the bounding rectangle of the window of the point pattern is divided into a regular \code{ntile[1] * ntile[2]} grid of rectangular tiles. The weight attached to a point of \code{X} is the area of the tile in which it lies, divided by the number of points of \code{X} lying in that tile. For non-rectangular windows the tile areas are currently calculated by approximating the window as a binary mask. The accuracy of this approximation is controlled by \code{npix}, which becomes the argument \code{dimyx} of \code{\link{as.mask}}. } \seealso{ \code{\link{quad.object}}, \code{\link{dirichletWeights}} } \examples{ Q <- quadscheme(runifpoispp(10)) X <- as.ppp(Q) # data and dummy points together w <- gridweights(X, 10) w <- gridweights(X, c(10, 10)) } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat/man/predict.rppm.Rd0000644000176200001440000000511413160710621015564 0ustar liggesusers\name{predict.rppm} \alias{fitted.rppm} \alias{predict.rppm} \title{ Make Predictions From a Recursively Partitioned Point Process Model } \description{ Given a model which has been fitted to point pattern data by recursive partitioning, compute the predicted intensity of the model. } \usage{ \method{predict}{rppm}(object, \dots) \method{fitted}{rppm}(object, \dots) } \arguments{ \item{object}{ Fitted point process model of class \code{"rppm"} produced by the function \code{\link{rppm}}. } \item{\dots}{ Optional arguments passed to \code{\link{predict.ppm}} to specify the locations where prediction is required. (Ignored by \code{fitted.rppm}) } } \details{ These functions are methods for the generic functions \code{\link[stats]{fitted}} and \code{\link[stats]{predict}}. They compute the fitted intensity of a point process model. The argument \code{object} should be a fitted point process model of class \code{"rppm"} produced by the function \code{\link{rppm}}. The \code{fitted} method computes the fitted intensity at the original data points, yielding a numeric vector with one entry for each data point. The \code{predict} method computes the fitted intensity at any locations. By default, predictions are calculated at a regular grid of spatial locations, and the result is a pixel image giving the predicted intensity values at these locations. Alternatively, predictions can be performed at other locations, or a finer grid of locations, or only at certain specified locations, using additional arguments \code{\dots} which will be interpreted by \code{\link{predict.ppm}}. Common arguments are \code{ngrid} to increase the grid resolution, \code{window} to specify the prediction region, and \code{locations} to specify the exact locations of predictions. See \code{\link{predict.ppm}} for details of these arguments. Predictions are computed by evaluating the explanatory covariates at each desired location, and applying the recursive partitioning rule to each set of covariate values. } \value{ The result of \code{fitted.rppm} is a numeric vector. The result of \code{predict.rppm} is a pixel image, a list of pixel images, or a numeric vector. } \author{ \spatstatAuthors. } \seealso{ \code{\link{rppm}}, \code{\link{plot.rppm}} } \examples{ fit <- rppm(unmark(gorillas) ~ vegetation, data=gorillas.extra) plot(predict(fit)) lambdaX <- fitted(fit) lambdaX[1:5] # Mondriaan pictures plot(predict(rppm(redwoodfull ~ x + y))) points(redwoodfull) } \keyword{spatial} \keyword{models} spatstat/man/Gdot.Rd0000644000176200001440000002132313160710571014056 0ustar liggesusers\name{Gdot} \alias{Gdot} \title{ Multitype Nearest Neighbour Distance Function (i-to-any) } \description{ For a multitype point pattern, estimate the distribution of the distance from a point of type \eqn{i} to the nearest other point of any type. } \usage{ Gdot(X, i, r=NULL, breaks=NULL, \dots, correction=c("km", "rs", "han")) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the distance distribution function \eqn{G_{i\bullet}(r)}{Gi.(r)} will be computed. It must be a multitype point pattern (a marked point pattern whose marks are a factor). See under Details. } \item{i}{The type (mark value) of the points in \code{X} from which distances are measured. A character string (or something that will be converted to a character string). Defaults to the first level of \code{marks(X)}. } \item{r}{Optional. Numeric vector. The values of the argument \eqn{r} at which the distribution function \eqn{G_{i\bullet}(r)}{Gi.(r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{breaks}{ This argument is for internal use only. } \item{\dots}{Ignored.} \item{correction}{ Optional. Character string specifying the edge correction(s) to be used. Options are \code{"none"}, \code{"rs"}, \code{"km"}, \code{"hanisch"} and \code{"best"}. Alternatively \code{correction="all"} selects all options. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame containing six numeric columns \item{r}{the values of the argument \eqn{r} at which the function \eqn{G_{i\bullet}(r)}{Gi.(r)} has been estimated } \item{rs}{the ``reduced sample'' or ``border correction'' estimator of \eqn{G_{i\bullet}(r)}{Gi.(r)} } \item{han}{the Hanisch-style estimator of \eqn{G_{i\bullet}(r)}{Gi.(r)} } \item{km}{the spatial Kaplan-Meier estimator of \eqn{G_{i\bullet}(r)}{Gi.(r)} } \item{hazard}{the hazard rate \eqn{\lambda(r)}{lambda(r)} of \eqn{G_{i\bullet}(r)}{Gi.(r)} by the spatial Kaplan-Meier method } \item{raw}{the uncorrected estimate of \eqn{G_{i\bullet}(r)}{Gi.(r)}, i.e. the empirical distribution of the distances from each point of type \eqn{i} to the nearest other point of any type. } \item{theo}{the theoretical value of \eqn{G_{i\bullet}(r)}{Gi.(r)} for a marked Poisson process with the same estimated intensity (see below). } } \details{ This function \code{Gdot} and its companions \code{\link{Gcross}} and \code{\link{Gmulti}} are generalisations of the function \code{\link{Gest}} to multitype point patterns. A multitype point pattern is a spatial pattern of points classified into a finite number of possible ``colours'' or ``types''. In the \pkg{spatstat} package, a multitype pattern is represented as a single point pattern object in which the points carry marks, and the mark value attached to each point determines the type of that point. The argument \code{X} must be a point pattern (object of class \code{"ppp"}) or any data that are acceptable to \code{\link{as.ppp}}. It must be a marked point pattern, and the mark vector \code{X$marks} must be a factor. The argument will be interpreted as a level of the factor \code{X$marks}. (Warning: this means that an integer value \code{i=3} will be interpreted as the number 3, \bold{not} the 3rd smallest level.) The ``dot-type'' (type \eqn{i} to any type) nearest neighbour distance distribution function of a multitype point process is the cumulative distribution function \eqn{G_{i\bullet}(r)}{Gi.(r)} of the distance from a typical random point of the process with type \eqn{i} the nearest other point of the process, regardless of type. An estimate of \eqn{G_{i\bullet}(r)}{Gi.(r)} is a useful summary statistic in exploratory data analysis of a multitype point pattern. If the type \eqn{i} points were independent of all other points, then \eqn{G_{i\bullet}(r)}{Gi.(r)} would equal \eqn{G_{ii}(r)}{Gii(r)}, the nearest neighbour distance distribution function of the type \eqn{i} points alone. For a multitype Poisson point process with total intensity \eqn{\lambda}{lambda}, we have \deqn{G_{i\bullet}(r) = 1 - e^{ - \lambda \pi r^2} }{% Gi.(r) = 1 - exp( - lambda * pi * r^2)} Deviations between the empirical and theoretical \eqn{G_{i\bullet}}{Gi.} curves may suggest dependence of the type \eqn{i} points on the other points. This algorithm estimates the distribution function \eqn{G_{i\bullet}(r)}{Gi.(r)} from the point pattern \code{X}. It assumes that \code{X} can be treated as a realisation of a stationary (spatially homogeneous) random spatial point process in the plane, observed through a bounded window. The window (which is specified in \code{X} as \code{Window(X)}) may have arbitrary shape. Biases due to edge effects are treated in the same manner as in \code{\link{Gest}}. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{G_{i\bullet}(r)}{Gi.(r)} should be evaluated. It is also used to determine the breakpoints (in the sense of \code{\link{hist}}) for the computation of histograms of distances. The reduced-sample and Kaplan-Meier estimators are computed from histogram counts. In the case of the Kaplan-Meier estimator this introduces a discretisation error which is controlled by the fineness of the breakpoints. First-time users would be strongly advised not to specify \code{r}. However, if it is specified, \code{r} must satisfy \code{r[1] = 0}, and \code{max(r)} must be larger than the radius of the largest disc contained in the window. Furthermore, the successive entries of \code{r} must be finely spaced. The algorithm also returns an estimate of the hazard rate function, \eqn{\lambda(r)}{lambda(r)}, of \eqn{G_{i\bullet}(r)}{Gi.(r)}. This estimate should be used with caution as \eqn{G_{i\bullet}(r)}{Gi.(r)} is not necessarily differentiable. The naive empirical distribution of distances from each point of the pattern \code{X} to the nearest other point of the pattern, is a biased estimate of \eqn{G_{i\bullet}}{Gi.}. However this is also returned by the algorithm, as it is sometimes useful in other contexts. Care should be taken not to use the uncorrected empirical \eqn{G_{i\bullet}}{Gi.} as if it were an unbiased estimator of \eqn{G_{i\bullet}}{Gi.}. } \references{ Cressie, N.A.C. \emph{Statistics for spatial data}. John Wiley and Sons, 1991. Diggle, P.J. \emph{Statistical analysis of spatial point patterns}. Academic Press, 1983. Diggle, P. J. (1986). Displaced amacrine cells in the retina of a rabbit : analysis of a bivariate spatial point pattern. \emph{J. Neurosci. Meth.} \bold{18}, 115--125. Harkness, R.D and Isham, V. (1983) A bivariate spatial point pattern of ants' nests. \emph{Applied Statistics} \bold{32}, 293--303 Lotwick, H. W. and Silverman, B. W. (1982). Methods for analysing spatial processes of several types of points. \emph{J. Royal Statist. Soc. Ser. B} \bold{44}, 406--413. Ripley, B.D. \emph{Statistical inference for spatial processes}. Cambridge University Press, 1988. Stoyan, D, Kendall, W.S. and Mecke, J. \emph{Stochastic geometry and its applications}. 2nd edition. Springer Verlag, 1995. Van Lieshout, M.N.M. and Baddeley, A.J. (1999) Indices of dependence between types in multivariate point patterns. \emph{Scandinavian Journal of Statistics} \bold{26}, 511--532. } \section{Warnings}{ The argument \code{i} is interpreted as a level of the factor \code{X$marks}. It is converted to a character string if it is not already a character string. The value \code{i=1} does \bold{not} refer to the first level of the factor. The function \eqn{G_{i\bullet}}{Gi.} does not necessarily have a density. The reduced sample estimator of \eqn{G_{i\bullet}}{Gi.} is pointwise approximately unbiased, but need not be a valid distribution function; it may not be a nondecreasing function of \eqn{r}. Its range is always within \eqn{[0,1]}. The spatial Kaplan-Meier estimator of \eqn{G_{i\bullet}}{Gi.} is always nondecreasing but its maximum value may be less than \eqn{1}. } \seealso{ \code{\link{Gcross}}, \code{\link{Gest}}, \code{\link{Gmulti}} } \examples{ # amacrine cells data G0. <- Gdot(amacrine, "off") plot(G0.) # synthetic example pp <- runifpoispp(30) pp <- pp \%mark\% factor(sample(0:1, npoints(pp), replace=TRUE)) G <- Gdot(pp, "0") G <- Gdot(pp, 0) # equivalent } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat/man/as.interact.Rd0000644000176200001440000000364113160710571015377 0ustar liggesusers\name{as.interact} \alias{as.interact} \alias{as.interact.fii} \alias{as.interact.interact} \alias{as.interact.ppm} \title{Extract Interaction Structure} \description{ Extracts the interpoint interaction structure from a point pattern model. } \usage{ as.interact(object) \method{as.interact}{fii}(object) \method{as.interact}{interact}(object) \method{as.interact}{ppm}(object) } \arguments{ \item{object}{A fitted point process model (object of class \code{"ppm"}) or an interpoint interaction structure (object of class \code{"interact"}). } } \details{ The function \code{as.interact} extracts the interpoint interaction structure from a suitable object. An object of class \code{"interact"} describes an interpoint interaction structure, before it has been fitted to point pattern data. The irregular parameters of the interaction (such as the interaction range) are fixed, but the regular parameters (such as interaction strength) are undetermined. Objects of this class are created by the functions \code{\link{Poisson}}, \code{\link{Strauss}} and so on. The main use of such objects is in a call to \code{\link{ppm}}. The function \code{as.interact} is generic, with methods for the classes \code{"ppm"}, \code{"fii"} and \code{"interact"}. The result is an object of class \code{"interact"} which can be printed. } \section{Note on parameters}{ This function does \bold{not} extract the fitted coefficients of the interaction. To extract the fitted interaction including the fitted coefficients, use \code{\link{fitin}}. } \value{ An object of class \code{"interact"} representing the interpoint interaction. This object can be printed and plotted. } \author{\adrian and \rolf } \seealso{ \code{\link{fitin}}, \code{\link{ppm}}. } \examples{ data(cells) model <- ppm(cells, ~1, Strauss(0.07)) f <- as.interact(model) f } \keyword{spatial} \keyword{models} spatstat/man/plot.quad.Rd0000644000176200001440000000427513160710621015073 0ustar liggesusers\name{plot.quad} \alias{plot.quad} \title{Plot a Spatial Quadrature Scheme} \description{ Plot a two-dimensional spatial quadrature scheme. } \usage{ \method{plot}{quad}(x, ..., main, add=FALSE, dum=list(), tiles=FALSE) } \arguments{ \item{x}{ The spatial quadrature scheme to be plotted. An object of class \code{"quad"}. } \item{\dots}{ extra arguments controlling the plotting of the data points of the quadrature scheme. } \item{main}{ text to be displayed as a title above the plot. } \item{add}{ Logical value indicating whether the graphics should be added to the current plot if there is one (\code{add=TRUE}) or whether a new plot should be initialised (\code{add=FALSE}, the default). } \item{dum}{ list of extra arguments controlling the plotting of the dummy points of the quadrature scheme. See below. } \item{tiles}{ Logical value indicating whether to display the tiles used to compute the quadrature weights. } } \value{ \code{NULL}. } \details{ This is the \code{plot} method for quadrature schemes (objects of class \code{"quad"}, see \code{\link{quad.object}}). First the data points of the quadrature scheme are plotted (in their observation window) using \code{\link{plot.ppp}} with any arguments specified in \code{...} Then the dummy points of the quadrature scheme are plotted using \code{\link{plot.ppp}} with any arguments specified in \code{dum}. By default the dummy points are superimposed onto the plot of data points. This can be overridden by including the argument \code{add=FALSE} in the list \code{dum} as shown in the examples. In this case the data and dummy point patterns are plotted separately. See \code{\link[graphics]{par}} and \code{\link{plot.ppp}} for other possible arguments controlling the plots. } \seealso{ \code{\link{quad.object}}, \code{\link{plot.ppp}}, \code{\link[graphics]{par}} } \examples{ data(nztrees) Q <- quadscheme(nztrees) plot(Q, main="NZ trees: quadrature scheme") oldpar <- par(mfrow=c(2,1)) plot(Q, main="NZ trees", dum=list(add=FALSE)) par(oldpar) } \author{\adrian and \rolf } \keyword{spatial} \keyword{hplot} spatstat/man/print.quad.Rd0000644000176200001440000000150013160710621015235 0ustar liggesusers\name{print.quad} \alias{print.quad} \title{Print a Quadrature Scheme} \description{ \code{print} method for a quadrature scheme. } \usage{ \method{print}{quad}(x,\dots) } \arguments{ \item{x}{ A quadrature scheme object, typically obtained from \code{\link{quadscheme}}. An object of class \code{"quad"}. } \item{\dots}{Ignored.} } \value{ none. } \details{ This is the \code{print} method for the class \code{"quad"}. It prints simple information about the quadrature scheme. See \code{\link{quad.object}} for details of the class \code{"quad"}. } \seealso{ \code{\link{quadscheme}}, \code{\link{quad.object}}, \code{\link{plot.quad}}, \code{\link{summary.quad}} } \examples{ data(cells) Q <- quadscheme(cells) Q } \author{\adrian and \rolf } \keyword{spatial} \keyword{print} spatstat/man/endpoints.psp.Rd0000644000176200001440000000461113160710621015762 0ustar liggesusers\name{endpoints.psp} \alias{endpoints.psp} \title{Endpoints of Line Segment Pattern} \description{ Extracts the endpoints of each line segment in a line segment pattern. } \usage{ endpoints.psp(x, which="both") } \arguments{ \item{x}{ A line segment pattern (object of class \code{"psp"}). } \item{which}{ String specifying which endpoint or endpoints should be returned. See Details. } } \value{ Point pattern (object of class \code{"ppp"}). } \details{ This function extracts one endpoint, or both endpoints, from each of the line segments in \code{x}, and returns these points as a point pattern object. The argument \code{which} determines which endpoint or endpoints of each line segment should be returned: \describe{ \item{\code{which="both"}}{ (the default): both endpoints of each line segment are returned. The result is a point pattern with twice as many points as there are line segments in \code{x}. } \item{\code{which="first"}}{ select the first endpoint of each line segment (returns the points with coordinates \code{x$ends$x0, x$ends$y0}). } \item{\code{which="second"}}{ select the second endpoint of each line segment (returns the points with coordinates \code{x$ends$x1, x$ends$y1}). } \item{\code{which="left"}}{ select the left-most endpoint (the endpoint with the smaller \eqn{x} coordinate) of each line segment. } \item{\code{which="right"}}{ select the right-most endpoint (the endpoint with the greater \eqn{x} coordinate) of each line segment. } \item{\code{which="lower"}}{ select the lower endpoint (the endpoint with the smaller \eqn{y} coordinate) of each line segment. } \item{\code{which="upper"}}{ select the upper endpoint (the endpoint with the greater \eqn{y} coordinate) of each line segment. } } The result is a point pattern. It also has an attribute \code{"id"} which is an integer vector identifying the segment which contributed each point. } \seealso{ \code{\link{psp.object}}, \code{\link{ppp.object}}, \code{\link{midpoints.psp}} } \examples{ a <- psp(runif(10), runif(10), runif(10), runif(10), window=owin()) plot(a) b <- endpoints.psp(a, "left") plot(b, add=TRUE) } \author{ \adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/nndist.Rd0000644000176200001440000001475113160710621014463 0ustar liggesusers\name{nndist} \alias{nndist} \alias{nndist.ppp} \alias{nndist.default} \title{Nearest neighbour distances} \description{ Computes the distance from each point to its nearest neighbour in a point pattern. Alternatively computes the distance to the second nearest neighbour, or third nearest, etc. } \usage{ nndist(X, \dots) \method{nndist}{ppp}(X, \dots, k=1, by=NULL, method="C") \method{nndist}{default}(X, Y=NULL, \dots, k=1, by=NULL, method="C") } \arguments{ \item{X,Y}{ Arguments specifying the locations of a set of points. For \code{nndist.ppp}, the argument \code{X} should be a point pattern (object of class \code{"ppp"}). For \code{nndist.default}, typically \code{X} and \code{Y} would be numeric vectors of equal length. Alternatively \code{Y} may be omitted and \code{X} may be a list with two components \code{x} and \code{y}, or a matrix with two columns. } \item{\dots}{ Ignored by \code{nndist.ppp} and \code{nndist.default}. } \item{k}{ Integer, or integer vector. The algorithm will compute the distance to the \code{k}th nearest neighbour. } \item{by}{ Optional. A factor, which separates \code{X} into groups. The algorithm will compute the distance to the nearest point in each group. } \item{method}{String specifying which method of calculation to use. Values are \code{"C"} and \code{"interpreted"}. } } \value{ Numeric vector or matrix containing the nearest neighbour distances for each point. If \code{k = 1} (the default), the return value is a numeric vector \code{v} such that \code{v[i]} is the nearest neighbour distance for the \code{i}th data point. If \code{k} is a single integer, then the return value is a numeric vector \code{v} such that \code{v[i]} is the \code{k}th nearest neighbour distance for the \code{i}th data point. If \code{k} is a vector, then the return value is a matrix \code{m} such that \code{m[i,j]} is the \code{k[j]}th nearest neighbour distance for the \code{i}th data point. If the argument \code{by} is given, then the result is a data frame containing the distances described above, from each point of \code{X}, to the nearest point in each subset of \code{X} defined by the factor \code{by}. } \details{ This function computes the Euclidean distance from each point in a point pattern to its nearest neighbour (the nearest other point of the pattern). If \code{k} is specified, it computes the distance to the \code{k}th nearest neighbour. The function \code{nndist} is generic, with a method for point patterns (objects of class \code{"ppp"}), and a default method for coordinate vectors. There is also a method for line segment patterns, \code{\link{nndist.psp}}. The method for point patterns expects a single point pattern argument \code{X} and returns the vector of its nearest neighbour distances. The default method expects that \code{X} and \code{Y} will determine the coordinates of a set of points. Typically \code{X} and \code{Y} would be numeric vectors of equal length. Alternatively \code{Y} may be omitted and \code{X} may be a list with two components named \code{x} and \code{y}, or a matrix or data frame with two columns. The argument \code{k} may be a single integer, or an integer vector. If it is a vector, then the \eqn{k}th nearest neighbour distances are computed for each value of \eqn{k} specified in the vector. If the argument \code{by} is given, it should be a \code{factor}, of length equal to the number of points in \code{X}. This factor effectively partitions \code{X} into subsets, each subset associated with one of the levels of \code{X}. The algorithm will then compute, for each point of \code{X}, the distance to the nearest neighbour \emph{in each subset}. The argument \code{method} is not normally used. It is retained only for checking the validity of the software. If \code{method = "interpreted"} then the distances are computed using interpreted R code only. If \code{method="C"} (the default) then C code is used. The C code is faster by two to three orders of magnitude and uses much less memory. If there is only one point (if \code{x} has length 1), then a nearest neighbour distance of \code{Inf} is returned. If there are no points (if \code{x} has length zero) a numeric vector of length zero is returned. To identify \emph{which} point is the nearest neighbour of a given point, use \code{\link{nnwhich}}. To use the nearest neighbour distances for statistical inference, it is often advisable to use the edge-corrected empirical distribution, computed by \code{\link{Gest}}. To find the nearest neighbour distances from one point pattern to another point pattern, use \code{\link{nncross}}. } \section{Nearest neighbours of each type}{ If \code{X} is a multitype point pattern and \code{by=marks(X)}, then the algorithm will compute, for each point of \code{X}, the distance to the nearest neighbour of each type. See the Examples. To find the minimum distance from \emph{any} point of type \code{i} to the nearest point of type \code{j}, for all combinations of \code{i} and \code{j}, use the \R function \code{\link[stats]{aggregate}} as suggested in the Examples. } \section{Warnings}{ An infinite or \code{NA} value is returned if the distance is not defined (e.g. if there is only one point in the point pattern). } \seealso{ \code{\link{nndist.psp}}, \code{\link{pairdist}}, \code{\link{Gest}}, \code{\link{nnwhich}}, \code{\link{nncross}}. } \examples{ data(cells) # nearest neighbours d <- nndist(cells) # second nearest neighbours d2 <- nndist(cells, k=2) # first, second and third nearest d1to3 <- nndist(cells, k=1:3) x <- runif(100) y <- runif(100) d <- nndist(x, y) # Stienen diagram plot(cells \%mark\% (nndist(cells)/2), markscale=1) # distance to nearest neighbour of each type nnda <- nndist(ants, by=marks(ants)) head(nnda) # For nest number 1, the nearest Cataglyphis nest is 87.32125 units away # Use of 'aggregate': # minimum distance between each pair of types aggregate(nnda, by=list(from=marks(ants)), min) # Always a symmetric matrix # mean nearest neighbour distances aggregate(nnda, by=list(from=marks(ants)), mean) # The mean distance from a Messor nest to # the nearest Cataglyphis nest is 59.02549 units } \author{Pavel Grabarnik \email{pavel.grabar@issp.serpukhov.su} and \adrian. } \keyword{spatial} \keyword{math} spatstat/man/rose.Rd0000644000176200001440000001261213160710621014126 0ustar liggesusers\name{rose} \alias{rose} \alias{rose.default} \alias{rose.histogram} \alias{rose.density} \alias{rose.fv} \title{Rose Diagram} \description{ Plots a rose diagram (rose of directions), the analogue of a histogram or density plot for angular data. } \usage{ rose(x, \dots) \method{rose}{default}(x, breaks = NULL, \dots, weights=NULL, nclass = NULL, unit = c("degree", "radian"), start=0, clockwise=FALSE, main) \method{rose}{histogram}(x, \dots, unit = c("degree", "radian"), start=0, clockwise=FALSE, main, labels=TRUE, at=NULL, do.plot = TRUE) \method{rose}{density}(x, \dots, unit = c("degree", "radian"), start=0, clockwise=FALSE, main, labels=TRUE, at=NULL, do.plot = TRUE) \method{rose}{fv}(x, \dots, unit = c("degree", "radian"), start=0, clockwise=FALSE, main, labels=TRUE, at=NULL, do.plot = TRUE) } \arguments{ \item{x}{ Data to be plotted. A numeric vector containing angles, or a \code{histogram} object containing a histogram of angular values, or a \code{density} object containing a smooth density estimate for angular data, or an \code{fv} object giving a function of an angular argument. } \item{breaks, nclass}{ Arguments passed to \code{\link[graphics]{hist}} to determine the histogram breakpoints. } \item{\dots}{ Additional arguments passed to \code{\link[graphics]{polygon}} controlling the appearance of the plot (or passed from \code{rose.default} to \code{\link[graphics]{hist}} to control the calculation of the histogram). } \item{unit}{ The unit in which the angles are expressed. } \item{start}{ The starting direction for measurement of angles, that is, the spatial direction which corresponds to a measured angle of zero. Either a character string giving a compass direction (\code{"N"} for north, \code{"S"} for south, \code{"E"} for east, or \code{"W"} for west) or a number giving the angle from the the horizontal (East) axis to the starting direction. For example, if \code{unit="degree"} and \code{clockwise=FALSE}, then \code{start=90} and \code{start="N"} are equivalent. The default is to measure angles anti-clockwise from the horizontal axis (East direction). } \item{clockwise}{ Logical value indicating whether angles increase in the clockwise direction (\code{clockwise=TRUE}) or anti-clockwise, counter-clockwise direction (\code{clockwise=FALSE}, the default). } \item{weights}{ Optional vector of numeric weights associated with \code{x}. } \item{main}{ Optional main title for the plot. } \item{labels}{ Either a logical value indicating whether to plot labels next to the tick marks, or a vector of labels for the tick marks. } \item{at}{ Optional vector of angles at which tick marks should be plotted. Set \code{at=numeric(0)} to suppress tick marks. } \item{do.plot}{ Logical value indicating whether to really perform the plot. } } \details{ A rose diagram or rose of directions is the analogue of a histogram or bar chart for data which represent angles in two dimensions. The bars of the bar chart are replaced by circular sectors in the rose diagram. The function \code{rose} is generic, with a default method for numeric data, and methods for histograms and function tables. If \code{x} is a numeric vector, it must contain angular values in the range 0 to 360 (if \code{unit="degree"}) or in the range 0 to \code{2 * pi} (if \code{unit="radian"}). A histogram of the data will first be computed using \code{\link[graphics]{hist}}. Then the rose diagram of this histogram will be plotted by \code{rose.histogram}. If \code{x} is an object of class \code{"histogram"} produced by the function \code{\link[graphics]{hist}}, representing the histogram of angular data, then the rose diagram of the densities (rather than the counts) in this histogram object will be plotted. If \code{x} is an object of class \code{"density"} produced by \code{\link{circdensity}} or \code{\link[stats]{density.default}}, representing a kernel smoothed density estimate of angular data, then the rose diagram of the density estimate will be plotted. If \code{x} is a function value table (object of class \code{"fv"}) then the argument of the function will be interpreted as an angle, and the value of the function will be interpreted as the radius. By default, angles are interpreted using the mathematical convention where the zero angle is the horizontal \eqn{x} axis, and angles increase anti-clockwise. Other conventions can be specified using the arguments \code{start} and \code{clockwise}. Standard compass directions are obtained by setting \code{unit="degree"}, \code{start="N"} and \code{clockwise=TRUE}. } \value{A window (class \code{"owin"}) containing the plotted region.} \author{\adrian \rolf and \ege } \seealso{ \code{\link{fv}}, \code{\link[graphics]{hist}}, \code{\link{circdensity}}, \code{\link[stats]{density.default}}. } \examples{ ang <- runif(1000, max=360) rose(ang, col="grey") rose(ang, col="grey", start="N", clockwise=TRUE) } \keyword{spatial} \keyword{hplot} spatstat/man/quantile.density.Rd0000644000176200001440000000451113160710621016455 0ustar liggesusers\name{quantile.density} \alias{quantile.density} \title{ Quantiles of a Density Estimate } \description{ Given a kernel estimate of a probability density, compute quantiles. } \usage{ \method{quantile}{density}(x, probs = seq(0, 1, 0.25), names = TRUE, \dots, warn = TRUE) } \arguments{ \item{x}{ Object of class \code{"density"} computed by a method for \code{\link[stats]{density}} } \item{probs}{ Numeric vector of probabilities for which the quantiles are required. } \item{names}{ Logical value indicating whether to attach names (based on \code{probs}) to the result. } \item{\dots}{ Ignored. } \item{warn}{ Logical value indicating whether to issue a warning if the density estimate \code{x} had to be renormalised because it was computed in a restricted interval. } } \details{ This function calculates quantiles of the probability distribution whose probability density has been estimated and stored in the object \code{x}. The object \code{x} must belong to the class \code{"density"}, and would typically have been obtained from a call to the function \code{\link[stats]{density}}. The probability density is first normalised so that the total probability is equal to 1. A warning is issued if the density estimate was restricted to an interval (i.e. if \code{x} was created by a call to \code{\link[stats]{density}} which included either of the arguments \code{from} and \code{to}). Next, the density estimate is numerically integrated to obtain an estimate of the cumulative distribution function \eqn{F(x)}. Then for each desired probability \eqn{p}, the algorithm finds the corresponding quantile \eqn{q}. The quantile \eqn{q} corresponding to probability \eqn{p} satisfies \eqn{F(q) = p} up to the resolution of the grid of values contained in \code{x}. The quantile is computed from the right, that is, \eqn{q} is the smallest available value of \eqn{x} such that \eqn{F(x) \ge p}{F(x) >= p}. } \value{ A numeric vector containing the quantiles. } \author{ \adrian \rolf and \ege } \seealso{ \code{\link[stats]{quantile}}, \code{\link{quantile.ewcdf}}, \code{\link{quantile.im}}, \code{\link{CDF}}. } \examples{ dd <- density(runif(10)) quantile(dd) } \keyword{methods} \keyword{univar} \keyword{nonparametric} spatstat/man/rotmean.Rd0000644000176200001440000000507013160710621014623 0ustar liggesusers\name{rotmean} \alias{rotmean} \title{ Rotational Average of a Pixel Image } \description{ Compute the average pixel value over all rotations of the image about the origin, as a function of distance from the origin. } \usage{ rotmean(X, ..., origin, padzero=TRUE, Xname, result=c("fv", "im")) } \arguments{ \item{X}{ A pixel image. } \item{\dots}{ Ignored. } \item{origin}{ Optional. Origin about which the rotations should be performed. Either a numeric vector or a character string as described in the help for \code{\link{shift.owin}}. } \item{padzero}{ Logical. If \code{TRUE} (the default), the value of \code{X} is assumed to be zero outside the window of \code{X}. If \code{FALSE}, the value of \code{X} is taken to be undefined outside the window of \code{X}. } \item{Xname}{ Optional name for \code{X} to be used in the function labels. } \item{result}{ Character string specifying the kind of result required: either a function object or a pixel image. } } \details{ This command computes, for each possible distance \eqn{r}, the average pixel value of the pixels lying at distance \eqn{r} from the origin. Kernel smoothing is used to obtain a smooth function of \eqn{r}. If \code{result="fv"} (the default) the result is a function object of class \code{"fv"} giving the mean pixel value of \code{X} as a function of distance from the origin. If \code{result="im"} the result is a pixel image, with the same dimensions as \code{X}, giving the mean value of \code{X} over all pixels lying at the same distance from the origin as the current pixel. If \code{padzero=TRUE} (the default), the value of \code{X} is assumed to be zero outside the window of \code{X}. The rotational mean at a given distance \eqn{r} is the average value of the image \code{X} over the \emph{entire} circle of radius \eqn{r}, including zero values outside the window if the circle lies partly outside the window. If \code{padzero=FALSE}, the value of \code{X} is taken to be undefined outside the window of \code{X}. The rotational mean is the average of the \code{X} values over the \emph{subset} of the circle of radius \eqn{r} that lies entirely inside the window. } \value{ An object of class \code{"fv"} or \code{"im"}. } \author{\adrian \rolf and \ege } \examples{ if(interactive()) { Z <- setcov(square(1)) plot(rotmean(Z)) plot(rotmean(Z, result="im")) } else { Z <- setcov(square(1), dimyx=32) f <- rotmean(Z) } } \keyword{spatial} \keyword{math} spatstat/man/Kcross.inhom.Rd0000644000176200001440000003134113160710571015537 0ustar liggesusers\name{Kcross.inhom} \alias{Kcross.inhom} \title{ Inhomogeneous Cross K Function } \description{ For a multitype point pattern, estimate the inhomogeneous version of the cross \eqn{K} function, which counts the expected number of points of type \eqn{j} within a given distance of a point of type \eqn{i}, adjusted for spatially varying intensity. } \usage{ Kcross.inhom(X, i, j, lambdaI=NULL, lambdaJ=NULL, \dots, r=NULL, breaks=NULL, correction = c("border", "isotropic", "Ripley", "translate"), sigma=NULL, varcov=NULL, lambdaIJ=NULL, lambdaX=NULL, update=TRUE, leaveoneout=TRUE) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the inhomogeneous cross type \eqn{K} function \eqn{K_{ij}(r)}{Kij(r)} will be computed. It must be a multitype point pattern (a marked point pattern whose marks are a factor). See under Details. } \item{i}{The type (mark value) of the points in \code{X} from which distances are measured. A character string (or something that will be converted to a character string). Defaults to the first level of \code{marks(X)}. } \item{j}{The type (mark value) of the points in \code{X} to which distances are measured. A character string (or something that will be converted to a character string). Defaults to the second level of \code{marks(X)}. } \item{lambdaI}{ Optional. Values of the the estimated intensity of the sub-process of points of type \code{i}. Either a pixel image (object of class \code{"im"}), a numeric vector containing the intensity values at each of the type \code{i} points in \code{X}, a fitted point process model (object of class \code{"ppm"} or \code{"kppm"} or \code{"dppm"}), or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. } \item{lambdaJ}{ Optional. Values of the the estimated intensity of the sub-process of points of type \code{j}. Either a pixel image (object of class \code{"im"}), a numeric vector containing the intensity values at each of the type \code{j} points in \code{X}, a fitted point process model (object of class \code{"ppm"} or \code{"kppm"} or \code{"dppm"}), or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. } \item{r}{ Optional. Numeric vector giving the values of the argument \eqn{r} at which the cross K function \eqn{K_{ij}(r)}{Kij(r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{breaks}{ This argument is for advanced use only. } \item{correction}{ A character vector containing any selection of the options \code{"border"}, \code{"bord.modif"}, \code{"isotropic"}, \code{"Ripley"} ,\code{"translate"}, \code{"translation"}, \code{"none"} or \code{"best"}. It specifies the edge correction(s) to be applied. Alternatively \code{correction="all"} selects all options. } \item{\dots}{ Ignored. } \item{sigma}{ Standard deviation of isotropic Gaussian smoothing kernel, used in computing leave-one-out kernel estimates of \code{lambdaI}, \code{lambdaJ} if they are omitted. } \item{varcov}{ Variance-covariance matrix of anisotropic Gaussian kernel, used in computing leave-one-out kernel estimates of \code{lambdaI}, \code{lambdaJ} if they are omitted. Incompatible with \code{sigma}. } \item{lambdaIJ}{ Optional. A matrix containing estimates of the product of the intensities \code{lambdaI} and \code{lambdaJ} for each pair of points of types \code{i} and \code{j} respectively. } \item{lambdaX}{ Optional. Values of the intensity for all points of \code{X}. Either a pixel image (object of class \code{"im"}), a numeric vector containing the intensity values at each of the points in \code{X}, a fitted point process model (object of class \code{"ppm"} or \code{"kppm"} or \code{"dppm"}), or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. If present, this argument overrides both \code{lambdaI} and \code{lambdaJ}. } \item{update}{ Logical value indicating what to do when \code{lambdaI}, \code{lambdaJ} or \code{lambdaX} is a fitted point process model (class \code{"ppm"}, \code{"kppm"} or \code{"dppm"}). If \code{update=TRUE} (the default), the model will first be refitted to the data \code{X} (using \code{\link{update.ppm}} or \code{\link{update.kppm}}) before the fitted intensity is computed. If \code{update=FALSE}, the fitted intensity of the model will be computed without re-fitting it to \code{X}. } \item{leaveoneout}{ Logical value (passed to \code{\link{density.ppp}} or \code{\link{fitted.ppm}}) specifying whether to use a leave-one-out rule when calculating the intensity. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame containing numeric columns \item{r}{the values of the argument \eqn{r} at which the function \eqn{K_{ij}(r)}{Kij(r)} has been estimated } \item{theo}{the theoretical value of \eqn{K_{ij}(r)}{Kij(r)} for a marked Poisson process, namely \eqn{\pi r^2}{pi * r^2} } together with a column or columns named \code{"border"}, \code{"bord.modif"}, \code{"iso"} and/or \code{"trans"}, according to the selected edge corrections. These columns contain estimates of the function \eqn{K_{ij}(r)}{Kij(r)} obtained by the edge corrections named. } \details{ This is a generalisation of the function \code{\link{Kcross}} to include an adjustment for spatially inhomogeneous intensity, in a manner similar to the function \code{\link{Kinhom}}. The inhomogeneous cross-type \eqn{K} function is described by \ifelse{latex}{\out{M\o ller}}{Moller} and Waagepetersen (2003, pages 48-49 and 51-53). Briefly, given a multitype point process, suppose the sub-process of points of type \eqn{j} has intensity function \eqn{\lambda_j(u)}{lambda[j](u)} at spatial locations \eqn{u}. Suppose we place a mass of \eqn{1/\lambda_j(\zeta)}{1/lambda[j](z)} at each point \eqn{\zeta}{z} of type \eqn{j}. Then the expected total mass per unit area is 1. The inhomogeneous ``cross-type'' \eqn{K} function \eqn{K_{ij}^{\mbox{inhom}}(r)}{K[ij]inhom(r)} equals the expected total mass within a radius \eqn{r} of a point of the process of type \eqn{i}. If the process of type \eqn{i} points were independent of the process of type \eqn{j} points, then \eqn{K_{ij}^{\mbox{inhom}}(r)}{K[ij]inhom(r)} would equal \eqn{\pi r^2}{pi * r^2}. Deviations between the empirical \eqn{K_{ij}}{Kij} curve and the theoretical curve \eqn{\pi r^2}{pi * r^2} suggest dependence between the points of types \eqn{i} and \eqn{j}. The argument \code{X} must be a point pattern (object of class \code{"ppp"}) or any data that are acceptable to \code{\link{as.ppp}}. It must be a marked point pattern, and the mark vector \code{X$marks} must be a factor. The arguments \code{i} and \code{j} will be interpreted as levels of the factor \code{X$marks}. (Warning: this means that an integer value \code{i=3} will be interpreted as the number 3, \bold{not} the 3rd smallest level). If \code{i} and \code{j} are missing, they default to the first and second level of the marks factor, respectively. The argument \code{lambdaI} supplies the values of the intensity of the sub-process of points of type \code{i}. It may be either \describe{ \item{a pixel image}{(object of class \code{"im"}) which gives the values of the type \code{i} intensity at all locations in the window containing \code{X}; } \item{a numeric vector}{containing the values of the type \code{i} intensity evaluated only at the data points of type \code{i}. The length of this vector must equal the number of type \code{i} points in \code{X}. } \item{a function}{ which can be evaluated to give values of the intensity at any locations. } \item{a fitted point process model}{ (object of class \code{"ppm"}, \code{"kppm"} or \code{"dppm"}) whose fitted \emph{trend} can be used as the fitted intensity. (If \code{update=TRUE} the model will first be refitted to the data \code{X} before the trend is computed.) } \item{omitted:}{ if \code{lambdaI} is omitted then it will be estimated using a leave-one-out kernel smoother. } } If \code{lambdaI} is omitted, then it will be estimated using a `leave-one-out' kernel smoother, as described in Baddeley, \Moller and Waagepetersen (2000). The estimate of \code{lambdaI} for a given point is computed by removing the point from the point pattern, applying kernel smoothing to the remaining points using \code{\link{density.ppp}}, and evaluating the smoothed intensity at the point in question. The smoothing kernel bandwidth is controlled by the arguments \code{sigma} and \code{varcov}, which are passed to \code{\link{density.ppp}} along with any extra arguments. Similarly \code{lambdaJ} should contain estimated values of the intensity of the sub-process of points of type \code{j}. It may be either a pixel image, a function, a numeric vector, or omitted. Alternatively if the argument \code{lambdaX} is given, then it specifies the intensity values for all points of \code{X}, and the arguments \code{lambdaI}, \code{lambdaJ} will be ignored. The optional argument \code{lambdaIJ} is for advanced use only. It is a matrix containing estimated values of the products of these two intensities for each pair of data points of types \code{i} and \code{j} respectively. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{K_{ij}(r)}{Kij(r)} should be evaluated. The values of \eqn{r} must be increasing nonnegative numbers and the maximum \eqn{r} value must exceed the radius of the largest disc contained in the window. The argument \code{correction} chooses the edge correction as explained e.g. in \code{\link{Kest}}. The pair correlation function can also be applied to the result of \code{Kcross.inhom}; see \code{\link{pcf}}. } \references{ Baddeley, A., \ifelse{latex}{\out{M\o ller}}{Moller}, J. and Waagepetersen, R. (2000) Non- and semiparametric estimation of interaction in inhomogeneous point patterns. \emph{Statistica Neerlandica} \bold{54}, 329--350. \ifelse{latex}{\out{M\o ller}}{Moller}, J. and Waagepetersen, R. Statistical Inference and Simulation for Spatial Point Processes Chapman and Hall/CRC Boca Raton, 2003. } \section{Warnings}{ The arguments \code{i} and \code{j} are always interpreted as levels of the factor \code{X$marks}. They are converted to character strings if they are not already character strings. The value \code{i=1} does \bold{not} refer to the first level of the factor. } \seealso{ \code{\link{Kcross}}, \code{\link{Kinhom}}, \code{\link{Kdot.inhom}}, \code{\link{Kmulti.inhom}}, \code{\link{pcf}} } \examples{ # Lansing Woods data woods <- lansing \testonly{woods <- woods[seq(1,npoints(woods), by=10)]} ma <- split(woods)$maple wh <- split(woods)$whiteoak # method (1): estimate intensities by nonparametric smoothing lambdaM <- density.ppp(ma, sigma=0.15, at="points") lambdaW <- density.ppp(wh, sigma=0.15, at="points") K <- Kcross.inhom(woods, "whiteoak", "maple", lambdaW, lambdaM) # method (2): leave-one-out K <- Kcross.inhom(woods, "whiteoak", "maple", sigma=0.15) # method (3): fit parametric intensity model fit <- ppm(woods ~marks * polynom(x,y,2)) # alternative (a): use fitted model as 'lambda' argument K <- Kcross.inhom(woods, "whiteoak", "maple", lambdaI=fit, lambdaJ=fit, update=FALSE) K <- Kcross.inhom(woods, "whiteoak", "maple", lambdaX=fit, update=FALSE) # alternative (b): evaluate fitted intensities at data points # (these are the intensities of the sub-processes of each type) inten <- fitted(fit, dataonly=TRUE) # split according to types of points lambda <- split(inten, marks(woods)) K <- Kcross.inhom(woods, "whiteoak", "maple", lambda$whiteoak, lambda$maple) # synthetic example: type A points have intensity 50, # type B points have intensity 100 * x lamB <- as.im(function(x,y){50 + 100 * x}, owin()) X <- superimpose(A=runifpoispp(50), B=rpoispp(lamB)) K <- Kcross.inhom(X, "A", "B", lambdaI=as.im(50, Window(X)), lambdaJ=lamB) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{nonparametric} spatstat/man/envelope.envelope.Rd0000644000176200001440000000656113160710621016615 0ustar liggesusers\name{envelope.envelope} \alias{envelope.envelope} \title{ Recompute Envelopes } \description{ Given a simulation envelope (object of class \code{"envelope"}), compute another envelope from the same simulation data using different parameters. } \usage{ \method{envelope}{envelope}(Y, fun = NULL, ..., transform=NULL, global=FALSE, VARIANCE=FALSE) } \arguments{ \item{Y}{ A simulation envelope (object of class \code{"envelope"}). } \item{fun}{ Optional. Summary function to be applied to the simulated point patterns. } \item{\dots,transform,global,VARIANCE}{ Parameters controlling the type of envelope that is re-computed. See \code{\link{envelope}}. } } \details{ This function can be used to re-compute a simulation envelope from previously simulated data, using different parameter settings for the envelope: for example, a different significance level, or a global envelope instead of a pointwise envelope. The function \code{\link{envelope}} is generic. This is the method for the class \code{"envelope"}. The argument \code{Y} should be a simulation envelope (object of class \code{"envelope"}) produced by any of the methods for \code{\link{envelope}}. Additionally, \code{Y} must contain either \itemize{ \item the simulated point patterns that were used to create the original envelope (so \code{Y} should have been created by calling \code{\link{envelope}} with \code{savepatterns=TRUE}); \item the summary functions of the simulated point patterns that were used to create the original envelope (so \code{Y} should have been created by calling \code{\link{envelope}} with \code{savefuns=TRUE}). } If the argument \code{fun} is given, it should be a summary function that can be applied to the simulated point patterns that were used to create \code{Y}. The envelope of the summary function \code{fun} for these point patterns will be computed using the parameters specified in \code{\dots}. If \code{fun} is not given, then: \itemize{ \item If \code{Y} contains the summary functions that were used to compute the original envelope, then the new envelope will be computed from these original summary functions. \item Otherwise, if \code{Y} contains the simulated point patterns. then the \eqn{K} function \code{\link{Kest}} will be applied to each of these simulated point patterns, and the new envelope will be based on the \eqn{K} functions. } The new envelope will be computed using the parameters specified in \code{\dots}. See \code{\link{envelope}} for a full list of envelope parameters. Frequently-used parameters include \code{nrank} and \code{nsim} (to change the number of simulations used and the significance level of the envelope), \code{global} (to change from pointwise to global envelopes) and \code{VARIANCE} (to compute the envelopes from the sample moments instead of the ranks). } \value{ An envelope (object of class \code{"envelope"}. } \seealso{ \code{\link{envelope}} } \examples{ E <- envelope(cells, Kest, nsim=19, savefuns=TRUE, savepatterns=TRUE) E2 <- envelope(E, nrank=2) Eg <- envelope(E, global=TRUE) EG <- envelope(E, Gest) EL <- envelope(E, transform=expression(sqrt(./pi))) } \author{\adrian and \rolf } \keyword{spatial} \keyword{htest} \keyword{hplot} \keyword{iteration} spatstat/man/dg.envelope.Rd0000644000176200001440000000774613160710571015404 0ustar liggesusers\name{dg.envelope} \alias{dg.envelope} \title{ Global Envelopes for Dao-Genton Test } \description{ Computes the global envelopes corresponding to the Dao-Genton test of goodness-of-fit. } \usage{ dg.envelope(X, \dots, nsim = 19, nsimsub=nsim-1, nrank = 1, alternative=c("two.sided", "less", "greater"), leaveout=1, interpolate = FALSE, savefuns=FALSE, savepatterns=FALSE, verbose = TRUE) } \arguments{ \item{X}{ Either a point pattern dataset (object of class \code{"ppp"}, \code{"lpp"} or \code{"pp3"}) or a fitted point process model (object of class \code{"ppm"}, \code{"kppm"} or \code{"slrm"}). } \item{\dots}{ Arguments passed to \code{\link{mad.test}} or \code{\link{envelope}} to control the conduct of the test. Useful arguments include \code{fun} to determine the summary function, \code{rinterval} to determine the range of \eqn{r} values used in the test, and \code{verbose=FALSE} to turn off the messages. } \item{nsim}{ Number of simulated patterns to be generated in the primary experiment. } \item{nsimsub}{ Number of simulations in each basic test. There will be \code{nsim} repetitions of the basic test, each involving \code{nsimsub} simulated realisations, so there will be a total of \code{nsim * (nsimsub + 1)} simulations. } \item{nrank}{ Integer. Rank of the envelope value amongst the \code{nsim} simulated values. A rank of 1 means that the minimum and maximum simulated values will be used. } \item{alternative}{ Character string determining whether the envelope corresponds to a two-sided test (\code{alternative="two.sided"}, the default) or a one-sided test with a lower critical boundary (\code{alternative="less"}) or a one-sided test with an upper critical boundary (\code{alternative="greater"}). } \item{leaveout}{ Optional integer 0, 1 or 2 indicating how to calculate the deviation between the observed summary function and the nominal reference value, when the reference value must be estimated by simulation. See Details. } \item{interpolate}{ Logical value indicating whether to interpolate the distribution of the test statistic by kernel smoothing, as described in Dao and Genton (2014, Section 5). } \item{savefuns}{ Logical flag indicating whether to save the simulated function values (from the first stage). } \item{savepatterns}{ Logical flag indicating whether to save the simulated point patterns (from the first stage). } \item{verbose}{ Logical value determining whether to print progress reports. } } \details{ Computes global simulation envelopes corresponding to the Dao-Genton (2014) adjusted Monte Carlo goodness-of-fit test. The envelopes are described in Baddeley et al (2015). If \code{X} is a point pattern, the null hypothesis is CSR. If \code{X} is a fitted model, the null hypothesis is that model. } \value{ An object of class \code{"fv"}. } \references{ Dao, N.A. and Genton, M. (2014) A Monte Carlo adjusted goodness-of-fit test for parametric models describing spatial point patterns. \emph{Journal of Graphical and Computational Statistics} \bold{23}, 497--517. Baddeley, A., Hardegen, A., Lawrence, L., Milne, R.K., Nair, G.M. and Rakshit, S. (2015) Pushing the envelope: extensions of graphical Monte Carlo tests. Submitted for publication. } \author{ \adrian, Andrew Hardegen, Tom Lawrence, Robin Milne, Gopalan Nair and Suman Rakshit. Implemented by \adrian \rolf and \ege } \seealso{ \code{\link{dg.test}}, \code{\link{mad.test}}, \code{\link{envelope}} } \examples{ ns <- if(interactive()) 19 else 4 E <- dg.envelope(swedishpines, Lest, nsim=ns) E plot(E) Eo <- dg.envelope(swedishpines, Lest, alternative="less", nsim=ns) Ei <- dg.envelope(swedishpines, Lest, interpolate=TRUE, nsim=ns) } \keyword{spatial} \keyword{htest} \keyword{hplot} \keyword{iteration} spatstat/man/plot.lpp.Rd0000644000176200001440000000617613160710621014736 0ustar liggesusers\name{plot.lpp} \alias{plot.lpp} \title{ Plot Point Pattern on Linear Network } \description{ Plots a point pattern on a linear network. Plot method for the class \code{"lpp"} of point patterns on a linear network. } \usage{ \method{plot}{lpp}(x, \dots, main, add = FALSE, use.marks=TRUE, which.marks=NULL, show.all = !add, show.window=FALSE, show.network=TRUE, do.plot = TRUE, multiplot=TRUE) } \arguments{ \item{x}{ Point pattern on a linear network (object of class \code{"lpp"}). } \item{\dots}{ Additional arguments passed to \code{\link{plot.linnet}} or \code{\link{plot.ppp}}. } \item{main}{ Main title for plot. } \item{add}{ Logical value indicating whether the plot is to be added to the existing plot (\code{add=TRUE}) or whether a new plot should be initialised (\code{add=FALSE}, the default). } \item{use.marks}{ logical flag; if \code{TRUE}, plot points using a different plotting symbol for each mark; if \code{FALSE}, only the locations of the points will be plotted, using \code{\link{points}()}. } \item{which.marks}{ Index determining which column of marks to use, if the marks of \code{x} are a data frame. A character or integer vector identifying one or more columns of marks. If \code{add=FALSE} then the default is to plot all columns of marks, in a series of separate plots. If \code{add=TRUE} then only one column of marks can be plotted, and the default is \code{which.marks=1} indicating the first column of marks. } \item{show.all}{ Logical value indicating whether to plot everything including the main title and the window containing the network. } \item{show.window}{ Logical value indicating whether to plot the window containing the network. Overrides \code{show.all}. } \item{show.network}{ Logical value indicating whether to plot the network. } \item{do.plot}{ Logical value determining whether to actually perform the plotting. } \item{multiplot}{ Logical value giving permission to display multiple plots. } } \details{ The linear network is plotted by \code{\link{plot.linnet}}, then the points are plotted by \code{\link{plot.ppp}}. Commonly-used arguments include: \itemize{ \item \code{col} and \code{lwd} for the colour and width of lines in the linear network \item \code{cols} for the colour or colours of the points \item \code{chars} for the plot characters representing different types of points \item \code{legend} and \code{leg.side} to control the graphics legend } Note that the linear network will be plotted even when \code{add=TRUE}, unless \code{show.network=FALSE}. } \value{ (Invisible) object of class \code{"symbolmap"} giving the correspondence between mark values and plotting characters. } \seealso{ \code{\link{lpp}}. See \code{\link{plot.ppp}} for options for representing the points. See also \code{\link{points.lpp}}, \code{\link{text.lpp}}. } \examples{ plot(chicago, cols=1:6) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{hplot} spatstat/man/formula.fv.Rd0000644000176200001440000000360713160710621015241 0ustar liggesusers\name{formula.fv} \alias{formula.fv} \alias{formula<-} \alias{formula<-.fv} \title{ Extract or Change the Plot Formula for a Function Value Table } \description{ Extract or change the default plotting formula for an object of class \code{"fv"} (function value table). } \usage{ \method{formula}{fv}(x, \dots) formula(x, \dots) <- value \method{formula}{fv}(x, \dots) <- value } \arguments{ \item{x}{ An object of class \code{"fv"}, containing the values of several estimates of a function. } \item{\dots}{ Arguments passed to other methods. } \item{value}{ New value of the formula. Either a \code{formula} or a character string. } } \details{ A function value table (object of class \code{"fv"}, see \code{\link{fv.object}}) is a convenient way of storing and plotting several different estimates of the same function. The default behaviour of \code{plot(x)} for a function value table \code{x} is determined by a formula associated with \code{x} called its \emph{plot formula}. See \code{\link{plot.fv}} for explanation about these formulae. The function \code{formula.fv} is a method for the generic command \code{\link{formula}}. It extracts the plot formula associated with the object. The function \code{formula<-} is generic. It changes the formula associated with an object. The function \code{formula<-.fv} is the method for \code{formula<-} for the class \code{"fv"}. It changes the plot formula associated with the object. } \value{ The result of \code{formula.fv} is a character string containing the plot formula. The result of \code{formula<-.fv} is a new object of class \code{"fv"}. } \author{ \adrian and \rolf } \seealso{ \code{\link{fv}}, \code{\link{plot.fv}}, \code{\link[stats]{formula}}. } \examples{ K <- Kest(cells) formula(K) formula(K) <- (iso ~ r) } \keyword{spatial} \keyword{methods} spatstat/man/PPversion.Rd0000644000176200001440000000555213160710571015114 0ustar liggesusers\name{PPversion} \alias{PPversion} \alias{QQversion} \title{ Transform a Function into its P-P or Q-Q Version } \description{ Given a function object \code{f} containing both the estimated and theoretical versions of a summary function, these operations combine the estimated and theoretical functions into a new function. When plotted, the new function gives either the P-P plot or Q-Q plot of the original \code{f}. } \usage{ PPversion(f, theo = "theo", columns = ".") QQversion(f, theo = "theo", columns = ".") } \arguments{ \item{f}{ The function to be transformed. An object of class \code{"fv"}. } \item{theo}{ The name of the column of \code{f} that should be treated as the theoretical value of the function. } \item{columns}{ Character vector, specifying the columns of \code{f} to which the transformation will be applied. Either a vector of names of columns of \code{f}, or one of the abbreviations recognised by \code{\link{fvnames}}. } } \details{ The argument \code{f} should be an object of class \code{"fv"}, containing both empirical estimates \eqn{\widehat f(r)}{fhat(r)} and a theoretical value \eqn{f_0(r)}{f0(r)} for a summary function. The \emph{P--P version} of \code{f} is the function \eqn{g(x) = \widehat f (f_0^{-1}(x))}{g(x) = fhat(f0^(-1)(x))} where \eqn{f_0^{-1}}{f0^(-1)} is the inverse function of \eqn{f_0}{f0}. A plot of \eqn{g(x)} against \eqn{x} is equivalent to a plot of \eqn{\widehat f(r)}{fhat(r)} against \eqn{f_0(r)}{f0(r)} for all \eqn{r}. If \code{f} is a cumulative distribution function (such as the result of \code{\link{Fest}} or \code{\link{Gest}}) then this is a P--P plot, a plot of the observed versus theoretical probabilities for the distribution. The diagonal line \eqn{y=x} corresponds to perfect agreement between observed and theoretical distribution. The \emph{Q--Q version} of \code{f} is the function \eqn{h(x) = f_0^{-1}(\widehat f(x))}{f0^(-1)(fhat(x))}. If \code{f} is a cumulative distribution function, a plot of \eqn{h(x)} against \eqn{x} is a Q--Q plot, a plot of the observed versus theoretical quantiles of the distribution. The diagonal line \eqn{y=x} corresponds to perfect agreement between observed and theoretical distribution. Another straight line corresponds to the situation where the observed variable is a linear transformation of the theoretical variable. For a point pattern \code{X}, the Q--Q version of \code{Kest(X)} is essentially equivalent to \code{Lest(X)}. } \value{ Another object of class \code{"fv"}. } \author{ Tom Lawrence and Adrian Baddeley. Implemented by \spatstatAuthors. } \seealso{ \code{\link{plot.fv}} } \examples{ opa <- par(mar=0.1+c(5,5,4,2)) G <- Gest(redwoodfull) plot(PPversion(G)) plot(QQversion(G)) par(opa) } \keyword{spatial} \keyword{nonparametric} \keyword{manip} spatstat/man/subset.hyperframe.Rd0000644000176200001440000000454513160710621016632 0ustar liggesusers\name{subset.hyperframe} \alias{subset.hyperframe} \title{ Subset of Hyperframe Satisfying A Condition } \description{ Given a hyperframe, return the subset specified by imposing a condition on each row, and optionally by choosing only some of the columns. } \usage{ \method{subset}{hyperframe}(x, subset, select, \dots) } \arguments{ \item{x}{ A hyperframe pattern (object of class \code{"hyperframe"}. } \item{subset}{ Logical expression indicating which points are to be kept. The expression may involve the names of columns of \code{x} and will be evaluated by \code{\link{with.hyperframe}}. } \item{select}{ Expression indicating which columns of marks should be kept. } \item{\dots}{ Arguments passed to \code{\link{[.hyperframe}} such as \code{drop} and \code{strip}. } } \details{ This is a method for the generic function \code{\link{subset}}. It extracts the subset of rows of \code{x} that satisfy the logical expression \code{subset}, and retains only the columns of \code{x} that are specified by the expression \code{select}. The result is always a hyperframe. The argument \code{subset} determines the subset of rows that will be extracted. It should be a logical expression. It may involve the names of columns of \code{x}. The default is to keep all points. The argument \code{select} determines which columns of \code{x} will be retained. It should be an expression involving the names of columns (which will be interpreted as integers representing the positions of these columns). For example if there are columns named \code{A} to \code{Z}, then \code{select=D:F} is a valid expression and means that columns \code{D}, \code{E} and \code{F} will be retained. Similarly \code{select=-(A:C)} is valid and means that columns \code{A} to \code{C} will be deleted. The default is to retain all columns. Setting \code{subset=FALSE} will remove all the rows. Setting \code{select=FALSE} will remove all the columns. The result is always a hyperframe. } \value{ A hyperframe. } \author{\adrian , \rolf and \ege } \seealso{ \code{\link[base]{subset}}, \code{\link{[.hyperframe}} } \examples{ a <- subset(flu, virustype=="wt") aa <- subset(flu, minnndist(pattern) > 10) aaa <- subset(flu, virustype=="wt", select = -pattern) } \keyword{spatial} \keyword{manip} spatstat/man/increment.fv.Rd0000644000176200001440000000166513160710621015562 0ustar liggesusers\name{increment.fv} \alias{increment.fv} \title{ Increments of a Function } \description{ Compute the change in the value of a function \code{f} when the function argument increases by \code{delta}. } \usage{ increment.fv(f, delta) } \arguments{ \item{f}{ Object of class \code{"fv"} representing a function. } \item{delta}{ Numeric. The increase in the value of the function argument. } } \details{ This command computes the new function \deqn{g(x) = f(x+h) - f(x-h)} where \code{h = delta/2}. The value of \eqn{g(x)} is the change in the value of \eqn{f} over an interval of length \code{delta} centred at \eqn{x}. } \value{ Another object of class \code{"fv"} compatible with \code{X}. } \author{\adrian \rolf and \ege } \seealso{ \code{\link{fv.object}}, \code{\link{deriv.fv}} } \examples{ plot(increment.fv(Kest(cells), 0.05)) } \keyword{spatial} \keyword{math} \keyword{nonparametric} spatstat/man/roc.Rd0000644000176200001440000000565113160710621013746 0ustar liggesusers\name{roc} \alias{roc} \alias{roc.ppp} \alias{roc.lpp} \alias{roc.ppm} \alias{roc.kppm} \alias{roc.lppm} \title{ Receiver Operating Characteristic } \description{ Computes the Receiver Operating Characteristic curve for a point pattern or a fitted point process model. } \usage{ roc(X, \dots) \method{roc}{ppp}(X, covariate, \dots, high = TRUE) \method{roc}{ppm}(X, \dots) \method{roc}{kppm}(X, \dots) \method{roc}{lpp}(X, covariate, \dots, high = TRUE) \method{roc}{lppm}(X, \dots) } \arguments{ \item{X}{ Point pattern (object of class \code{"ppp"} or \code{"lpp"}) or fitted point process model (object of class \code{"ppm"} or \code{"kppm"} or \code{"lppm"}). } \item{covariate}{ Spatial covariate. Either a \code{function(x,y)}, a pixel image (object of class \code{"im"}), or one of the strings \code{"x"} or \code{"y"} indicating the Cartesian coordinates. } \item{\dots}{ Arguments passed to \code{\link{as.mask}} controlling the pixel resolution for calculations. } \item{high}{ Logical value indicating whether the threshold operation should favour high or low values of the covariate. } } \details{ This command computes Receiver Operating Characteristic curve. The area under the ROC is computed by \code{\link{auc}}. For a point pattern \code{X} and a covariate \code{Z}, the ROC is a plot showing the ability of the covariate to separate the spatial domain into areas of high and low density of points. For each possible threshold \eqn{z}, the algorithm calculates the fraction \eqn{a(z)} of area in the study region where the covariate takes a value greater than \eqn{z}, and the fraction \eqn{b(z)} of data points for which the covariate value is greater than \eqn{z}. The ROC is a plot of \eqn{b(z)} against \eqn{a(z)} for all thresholds \eqn{z}. For a fitted point process model, the ROC shows the ability of the fitted model intensity to separate the spatial domain into areas of high and low density of points. The ROC is \bold{not} a diagnostic for the goodness-of-fit of the model (Lobo et al, 2007). } \value{ Function value table (object of class \code{"fv"}) which can be plotted to show the ROC curve. } \references{ Lobo, J.M., \ifelse{latex}{\out{Jim{\'e}nez}}{Jimenez}-Valverde, A. and Real, R. (2007) AUC: a misleading measure of the performance of predictive distribution models. \emph{Global Ecology and Biogeography} \bold{17}(2) 145--151. Nam, B.-H. and D'Agostino, R. (2002) Discrimination index, the area under the {ROC} curve. Pages 267--279 in Huber-Carol, C., Balakrishnan, N., Nikulin, M.S. and Mesbah, M., \emph{Goodness-of-fit tests and model validity}, \ifelse{latex}{\out{Birkh{\"a}user}}{Birkhauser}, Basel. } \author{ \adrian \rolf and \ege } \seealso{ \code{\link{auc}} } \examples{ plot(roc(swedishpines, "x")) fit <- ppm(swedishpines ~ x+y) plot(roc(fit)) } \keyword{spatial} spatstat/man/dilated.areas.Rd0000644000176200001440000000424713160710571015667 0ustar liggesusers\name{dilated.areas} \Rdversion{1.1} \alias{dilated.areas} \title{ Areas of Morphological Dilations } \description{ Computes the areas of successive morphological dilations. } \usage{ dilated.areas(X, r, W=as.owin(X), ..., constrained=TRUE, exact = FALSE) } \arguments{ \item{X}{ Object to be dilated. A point pattern (object of class \code{"ppp"}), a line segment pattern (object of class \code{"psp"}), or a window (object of class \code{"owin"}). } \item{r}{ Numeric vector of radii for the dilations. } \item{W}{ Window (object of class \code{"owin"}) inside which the areas will be computed, if \code{constrained=TRUE}. } \item{\dots}{Ignored.} \item{constrained}{ Logical flag indicating whether areas should be restricted to the window \code{W}. } \item{exact}{ Logical flag indicating whether areas should be computed using analytic geometry (which is slower but more accurate). Currently available only when \code{X} is a point pattern. } } \details{ This function computes the areas of the dilations of \code{X} by each of the radii \code{r[i]}. Areas may also be computed inside a specified window \code{W}. The morphological dilation of a set \eqn{X} by a distance \eqn{r > 0} is the subset consisting of all points \eqn{x}{x} such that the distance from \eqn{x} to \eqn{X} is less than or equal to \eqn{r}. When \code{X} is a point pattern, the dilation by a distance \eqn{r} is the union of discs of radius \eqn{r} centred at the points of \code{X}. The argument \code{r} should be a vector of nonnegative numbers. If \code{exact=TRUE} and if \code{X} is a point pattern, then the areas are computed using analytic geometry, which is slower but much more accurate. Otherwise the computation is performed using \code{\link{distmap}}. To compute the dilated object itself, use \code{\link{dilation}}. } \seealso{ \code{\link{owin}}, \code{\link{as.owin}}, \code{\link{dilation}}, \code{\link{eroded.areas}} } \examples{ X <- runifpoint(10) a <- dilated.areas(X, c(0.1,0.2), W=square(1), exact=TRUE) } \author{\adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/as.lpp.Rd0000644000176200001440000000534213160710571014361 0ustar liggesusers\name{as.lpp} \Rdversion{1.1} \alias{as.lpp} \title{ Convert Data to a Point Pattern on a Linear Network } \description{ Convert various kinds of data to a point pattern on a linear network. } \usage{ as.lpp(x=NULL, y=NULL, seg=NULL, tp=NULL, \dots, marks=NULL, L=NULL, check=FALSE, sparse) } \arguments{ \item{x,y}{ Vectors of cartesian coordinates, or any data acceptable to \code{\link[grDevices]{xy.coords}}. Alternatively \code{x} can be a point pattern on a linear network (object of class \code{"lpp"}) or a planar point pattern (object of class \code{"ppp"}). } \item{seg,tp}{ Optional local coordinates. Vectors of the same length as \code{x,y}. See Details. } \item{\dots}{Ignored.} \item{marks}{ Optional marks for the point pattern. A vector or factor with one entry for each point, or a data frame or hyperframe with one row for each point. } \item{L}{ Linear network (object of class \code{"linnet"}) on which the points lie. } \item{check}{ Logical. Whether to check the validity of the spatial coordinates. } \item{sparse}{ Optional logical value indicating whether to store the linear network data in a sparse matrix representation or not. See \code{\link{linnet}}. } } \details{ This function converts data in various formats into a point pattern on a linear network (object of class \code{"lpp"}). The possible formats are: \itemize{ \item \code{x} is already a point pattern on a linear network (object of class \code{"lpp"}). Then \code{x} is returned unchanged. \item \code{x} is a planar point pattern (object of class \code{"ppp"}). Then \code{x} is converted to a point pattern on the linear network \code{L} using \code{\link{lpp}}. \item \code{x,y,seg,tp} are vectors of equal length. These specify that the \code{i}th point has Cartesian coordinates \code{(x[i],y[i])}, and lies on segment number \code{seg[i]} of the network \code{L}, at a fractional position \code{tp[i]} along that segment (with \code{tp=0} representing one endpoint and \code{tp=1} the other endpoint of the segment). \item \code{x,y} are missing and \code{seg,tp} are vectors of equal length as described above. \item \code{seg,tp} are \code{NULL}, and \code{x,y} are data in a format acceptable to \code{\link[grDevices]{xy.coords}} specifying the Cartesian coordinates. } } \value{ A point pattern on a linear network (object of class \code{"lpp"}). } \seealso{ \code{\link{lpp}}. } \examples{ A <- as.psp(simplenet) X <- runifpointOnLines(10, A) is.ppp(X) Y <- as.lpp(X, L=simplenet) } \author{\adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/HierStrauss.Rd0000644000176200001440000001071713160710571015442 0ustar liggesusers\name{HierStrauss} \alias{HierStrauss} \title{The Hierarchical Strauss Point Process Model} \description{ Creates an instance of the hierarchical Strauss point process model which can then be fitted to point pattern data. } \usage{ HierStrauss(radii, types=NULL, archy=NULL) } \arguments{ \item{radii}{Matrix of interaction radii} \item{types}{Optional; vector of all possible types (i.e. the possible levels of the \code{marks} variable in the data)} \item{archy}{Optional: the hierarchical order. See Details.} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of the hierarchical Strauss process with interaction radii \eqn{radii[i,j]}. } \details{ This is a hierarchical point process model for a multitype point pattern (\ifelse{latex}{\out{H{\"o}gmander}}{Hogmander} and \ifelse{latex}{\out{S{\"a}rkk{\"a}}}{Sarkka}, 1999; Grabarnik and \ifelse{latex}{\out{S\"{a}rkk\"{a}}}{Sarkka}, 2009). It is appropriate for analysing multitype point pattern data in which the types are ordered so that the points of type \eqn{j} depend on the points of type \eqn{1,2,\ldots,j-1}{1,2,...,j-1}. The hierarchical version of the (stationary) Strauss process with \eqn{m} types, with interaction radii \eqn{r_{ij}}{r[i,j]} and parameters \eqn{\beta_j}{beta[j]} and \eqn{\gamma_{ij}}{gamma[i,j]} is a point process in which each point of type \eqn{j} contributes a factor \eqn{\beta_j}{beta[j]} to the probability density of the point pattern, and a pair of points of types \eqn{i} and \eqn{j} closer than \eqn{r_{ij}}{r[i,j]} units apart contributes a factor \eqn{\gamma_{ij}}{gamma[i,j]} to the density \bold{provided} \eqn{i \le j}{i <= j}. The nonstationary hierarchical Strauss process is similar except that the contribution of each individual point \eqn{x_i}{x[i]} is a function \eqn{\beta(x_i)}{beta(x[i])} of location and type, rather than a constant beta. The function \code{\link{ppm}()}, which fits point process models to point pattern data, requires an argument of class \code{"interact"} describing the interpoint interaction structure of the model to be fitted. The appropriate description of the hierarchical Strauss process pairwise interaction is yielded by the function \code{HierStrauss()}. See the examples below. The argument \code{types} need not be specified in normal use. It will be determined automatically from the point pattern data set to which the HierStrauss interaction is applied, when the user calls \code{\link{ppm}}. However, the user should be confident that the ordering of types in the dataset corresponds to the ordering of rows and columns in the matrix \code{radii}. The argument \code{archy} can be used to specify a hierarchical ordering of the types. It can be either a vector of integers or a character vector matching the possible types. The default is the sequence \eqn{1,2, \ldots, m}{1,2, ..., m} meaning that type \eqn{j} depends on types \eqn{1,2, \ldots, j-1}{1,2, ..., j-1}. The matrix \code{radii} must be symmetric, with entries which are either positive numbers or \code{NA}. A value of \code{NA} indicates that no interaction term should be included for this combination of types. Note that only the interaction radii are specified in \code{HierStrauss}. The canonical parameters \eqn{\log(\beta_j)}{log(beta[j])} and \eqn{\log(\gamma_{ij})}{log(gamma[i,j])} are estimated by \code{\link{ppm}()}, not fixed in \code{HierStrauss()}. } \seealso{ \code{\link{MultiStrauss}} for the corresponding symmetrical interaction. \code{\link{HierHard}}, \code{\link{HierStraussHard}}. } \examples{ r <- matrix(10 * c(3,4,4,3), nrow=2,ncol=2) HierStrauss(r) # prints a sensible description of itself ppm(ants ~1, HierStrauss(r, , c("Messor", "Cataglyphis"))) # fit the stationary hierarchical Strauss process to ants data } \author{\adrian , \rolf and \ege. } \references{ Grabarnik, P. and \ifelse{latex}{\out{S\"{a}rkk\"{a}}}{Sarkka}, A. (2009) Modelling the spatial structure of forest stands by multivariate point processes with hierarchical interactions. \emph{Ecological Modelling} \bold{220}, 1232--1240. \ifelse{latex}{\out{H{\"o}gmander}}{Hogmander}, H. and \ifelse{latex}{\out{S{\"a}rkk{\"a}}}{Sarkka}, A. (1999) Multitype spatial point patterns with hierarchical interactions. \emph{Biometrics} \bold{55}, 1051--1058. } \keyword{spatial} \keyword{models} spatstat/man/fitted.ppm.Rd0000644000176200001440000001140513160710621015227 0ustar liggesusers\name{fitted.ppm} \alias{fitted.ppm} \title{ Fitted Conditional Intensity for Point Process Model } \description{ Given a point process model fitted to a point pattern, compute the fitted conditional intensity or fitted trend of the model at the points of the pattern, or at the points of the quadrature scheme used to fit the model. } \usage{ \method{fitted}{ppm}(object, \dots, type="lambda", dataonly=FALSE, new.coef=NULL, leaveoneout=FALSE, drop=FALSE, check=TRUE, repair=TRUE, dropcoef=FALSE) } \arguments{ \item{object}{ The fitted point process model (an object of class \code{"ppm"}) } \item{\dots}{ Ignored. } \item{type}{ String (partially matched) indicating whether the fitted value is the conditional intensity (\code{"lambda"} or \code{"cif"}) or the first order trend (\code{"trend"}) or the logarithm of conditional intensity (\code{"link"}). } \item{dataonly}{ Logical. If \code{TRUE}, then values will only be computed at the points of the data point pattern. If \code{FALSE}, then values will be computed at all the points of the quadrature scheme used to fit the model, including the points of the data point pattern. } \item{new.coef}{ Numeric vector of parameter values to replace the fitted model parameters \code{coef(object)}. } \item{leaveoneout}{ Logical. If \code{TRUE} the fitted value at each data point will be computed using a leave-one-out method. See Details. } \item{drop}{ Logical value determining whether to delete quadrature points that were not used to fit the model. } \item{check}{ Logical value indicating whether to check the internal format of \code{object}. If there is any possibility that this object has been restored from a dump file, or has otherwise lost track of the environment where it was originally computed, set \code{check=TRUE}. } \item{repair}{ Logical value indicating whether to repair the internal format of \code{object}, if it is found to be damaged. } \item{dropcoef}{ Internal use only. } } \value{ A vector containing the values of the fitted conditional intensity, fitted spatial trend, or logarithm of the fitted conditional intensity. Entries in this vector correspond to the quadrature points (data or dummy points) used to fit the model. The quadrature points can be extracted from \code{object} by \code{union.quad(quad.ppm(object))}. } \details{ The argument \code{object} must be a fitted point process model (object of class \code{"ppm"}). Such objects are produced by the model-fitting algorithm \code{\link{ppm}}). This function evaluates the conditional intensity \eqn{\hat\lambda(u, x)}{lambdahat(u,x)} or spatial trend \eqn{\hat b(u)}{bhat(u)} of the fitted point process model for certain locations \eqn{u}, where \code{x} is the original point pattern dataset to which the model was fitted. The locations \eqn{u} at which the fitted conditional intensity/trend is evaluated, are the points of the quadrature scheme used to fit the model in \code{\link{ppm}}. They include the data points (the points of the original point pattern dataset \code{x}) and other ``dummy'' points in the window of observation. If \code{leaveoneout=TRUE}, fitted values will be computed for the data points only, using a \sQuote{leave-one-out} rule: the fitted value at \code{X[i]} is effectively computed by deleting this point from the data and re-fitting the model to the reduced pattern \code{X[-i]}, then predicting the value at \code{X[i]}. (Instead of literally performing this calculation, we apply a Taylor approximation using the influence function computed in \code{\link{dfbetas.ppm}}. The argument \code{drop} is explained in \code{\link{quad.ppm}}. Use \code{\link{predict.ppm}} to compute the fitted conditional intensity at other locations or with other values of the explanatory variables. } \references{ Baddeley, A., Turner, R., \ifelse{latex}{\out{M\o ller}}{Moller}, J. and Hazelton, M. (2005). Residual analysis for spatial point processes (with discussion). \emph{Journal of the Royal Statistical Society, Series B} \bold{67}, 617--666. } \seealso{ \code{\link{ppm.object}}, \code{\link{ppm}}, \code{\link{predict.ppm}} } \examples{ str <- ppm(cells ~x, Strauss(r=0.1)) lambda <- fitted(str) # extract quadrature points in corresponding order quadpoints <- union.quad(quad.ppm(str)) # plot conditional intensity values # as circles centred on the quadrature points quadmarked <- setmarks(quadpoints, lambda) plot(quadmarked) if(!interactive()) str <- ppm(cells ~ x) lambdaX <- fitted(str, leaveoneout=TRUE) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{methods} \keyword{models} spatstat/man/rMatClust.Rd0000644000176200001440000001600513160710621015074 0ustar liggesusers\name{rMatClust} \alias{rMatClust} \title{Simulate Matern Cluster Process} \description{ Generate a random point pattern, a simulated realisation of the \ifelse{latex}{\out{Mat\'ern}}{Matern} Cluster Process. } \usage{ rMatClust(kappa, scale, mu, win = owin(c(0,1),c(0,1)), nsim=1, drop=TRUE, saveLambda=FALSE, expand = scale, ..., poisthresh=1e-6, saveparents=TRUE) } \arguments{ \item{kappa}{ Intensity of the Poisson process of cluster centres. A single positive number, a function, or a pixel image. } \item{scale}{ Radius parameter of the clusters. } \item{mu}{ Mean number of points per cluster (a single positive number) or reference intensity for the cluster points (a function or a pixel image). } \item{win}{ Window in which to simulate the pattern. An object of class \code{"owin"} or something acceptable to \code{\link{as.owin}}. } \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a point pattern. } \item{saveLambda}{ Logical. If \code{TRUE} then the random intensity corresponding to the simulated parent points will also be calculated and saved, and returns as an attribute of the point pattern. } \item{expand}{Numeric. Size of window expansion for generation of parent points. Defaults to \code{scale} which is the cluster radius. } \item{\dots}{Passed to \code{\link{clusterfield}} to control the image resolution when \code{saveLambda=TRUE}. } \item{poisthresh}{ Numerical threshold below which the model will be treated as a Poisson process. See Details. } \item{saveparents}{ Logical value indicating whether to save the locations of the parent points as an attribute. } } \value{ A point pattern (an object of class \code{"ppp"}) if \code{nsim=1}, or a list of point patterns if \code{nsim > 1}. Additionally, some intermediate results of the simulation are returned as attributes of this point pattern (see \code{\link{rNeymanScott}}). Furthermore, the simulated intensity function is returned as an attribute \code{"Lambda"}, if \code{saveLambda=TRUE}. } \details{ This algorithm generates a realisation of \ifelse{latex}{\out{Mat\'ern}}{Matern}'s cluster process, a special case of the Neyman-Scott process, inside the window \code{win}. In the simplest case, where \code{kappa} and \code{mu} are single numbers, the algorithm generates a uniform Poisson point process of \dQuote{parent} points with intensity \code{kappa}. Then each parent point is replaced by a random cluster of \dQuote{offspring} points, the number of points per cluster being Poisson (\code{mu}) distributed, and their positions being placed and uniformly inside a disc of radius \code{scale} centred on the parent point. The resulting point pattern is a realisation of the classical \dQuote{stationary Matern cluster process} generated inside the window \code{win}. This point process has intensity \code{kappa * mu}. The algorithm can also generate spatially inhomogeneous versions of the \ifelse{latex}{\out{Mat\'ern}}{Matern} cluster process: \itemize{ \item The parent points can be spatially inhomogeneous. If the argument \code{kappa} is a \code{function(x,y)} or a pixel image (object of class \code{"im"}), then it is taken as specifying the intensity function of an inhomogeneous Poisson process that generates the parent points. \item The offspring points can be inhomogeneous. If the argument \code{mu} is a \code{function(x,y)} or a pixel image (object of class \code{"im"}), then it is interpreted as the reference density for offspring points, in the sense of Waagepetersen (2007). For a given parent point, the offspring constitute a Poisson process with intensity function equal to \code{mu/(pi * scale^2)} inside the disc of radius \code{scale} centred on the parent point, and zero intensity outside this disc. Equivalently we first generate, for each parent point, a Poisson (\eqn{M}) random number of offspring (where \eqn{M} is the maximum value of \code{mu}) placed independently and uniformly in the disc of radius \code{scale} centred on the parent location, and then randomly thin the offspring points, with retention probability \code{mu/M}. \item Both the parent points and the offspring points can be inhomogeneous, as described above. } Note that if \code{kappa} is a pixel image, its domain must be larger than the window \code{win}. This is because an offspring point inside \code{win} could have its parent point lying outside \code{win}. In order to allow this, the simulation algorithm first expands the original window \code{win} by a distance \code{expand} and generates the Poisson process of parent points on this larger window. If \code{kappa} is a pixel image, its domain must contain this larger window. The intensity of the \ifelse{latex}{\out{Mat\'ern}}{Matern} cluster process is \code{kappa * mu} if either \code{kappa} or \code{mu} is a single number. In the general case the intensity is an integral involving \code{kappa}, \code{mu} and \code{scale}. The \ifelse{latex}{\out{Mat\'ern}}{Matern} cluster process model with homogeneous parents (i.e. where \code{kappa} is a single number) can be fitted to data using \code{\link{kppm}}. Currently it is not possible to fit the \ifelse{latex}{\out{Mat\'ern}}{Matern} cluster process model with inhomogeneous parents. If the pair correlation function of the model is very close to that of a Poisson process, deviating by less than \code{poisthresh}, then the model is approximately a Poisson process, and will be simulated as a Poisson process with intensity \code{kappa * mu}, using \code{\link{rpoispp}}. This avoids computations that would otherwise require huge amounts of memory. } \seealso{ \code{\link{rpoispp}}, \code{\link{rThomas}}, \code{\link{rCauchy}}, \code{\link{rVarGamma}}, \code{\link{rNeymanScott}}, \code{\link{rGaussPoisson}}, \code{\link{kppm}}, \code{\link{clusterfit}}. } \examples{ # homogeneous X <- rMatClust(10, 0.05, 4) # inhomogeneous ff <- function(x,y){ 4 * exp(2 * abs(x) - 1) } Z <- as.im(ff, owin()) Y <- rMatClust(10, 0.05, Z) YY <- rMatClust(ff, 0.05, 3) } \references{ \ifelse{latex}{\out{Mat\'ern}}{Matern}, B. (1960) \emph{Spatial Variation}. Meddelanden \ifelse{latex}{\out{fr\r{a}n}}{fraan} Statens Skogsforskningsinstitut, volume 59, number 5. Statens Skogsforskningsinstitut, Sweden. \ifelse{latex}{\out{Mat\'ern}}{Matern}, B. (1986) \emph{Spatial Variation}. Lecture Notes in Statistics 36, Springer-Verlag, New York. Waagepetersen, R. (2007) An estimating function approach to inference for inhomogeneous Neyman-Scott processes. \emph{Biometrics} \bold{63}, 252--258. } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat/man/methods.boxx.Rd0000644000176200001440000000254713160710621015606 0ustar liggesusers\name{methods.boxx} \Rdversion{1.1} \alias{methods.boxx} %DoNotExport \alias{print.boxx} \alias{unitname.boxx} \alias{unitname<-.boxx} \title{ Methods for Multi-Dimensional Box } \description{ Methods for class \code{"boxx"}. } \usage{ \method{print}{boxx}(x, ...) \method{unitname}{boxx}(x) \method{unitname}{boxx}(x) <- value } \arguments{ \item{x}{ Object of class \code{"boxx"} representing a multi-dimensional box. } \item{\dots}{ Other arguments passed to \code{print.default}. } \item{value}{ Name of the unit of length. See \code{\link{unitname}}. } } \details{ These are methods for the generic functions \code{\link{print}} and \code{\link{unitname}} for the class \code{"boxx"} of multi-dimensional boxes. The \code{print} method prints a description of the box, while the \code{unitname} method extracts the name of the unit of length in which the box coordinates are expressed. } \value{ For \code{print.boxx} the value is \code{NULL}. For \code{unitname.boxx} an object of class \code{"units"}. } \author{\adrian and \rolf } \seealso{ \code{\link{boxx}}, \code{\link{print}}, \code{\link{unitname}} } \examples{ X <- boxx(c(0,10),c(0,10),c(0,5),c(0,1), unitname=c("metre", "metres")) X unitname(X) # Northern European usage unitname(X) <- "meter" } \keyword{spatial} \keyword{methods} spatstat/man/scalardilate.Rd0000644000176200001440000000463413160710621015613 0ustar liggesusers\name{scalardilate} \alias{scalardilate} \alias{scalardilate.im} \alias{scalardilate.owin} \alias{scalardilate.ppp} \alias{scalardilate.psp} \alias{scalardilate.default} \title{Apply Scalar Dilation} \description{ Applies scalar dilation to a plane geometrical object, such as a point pattern or a window, relative to a specified origin. } \usage{ scalardilate(X, f, \dots) \method{scalardilate}{im}(X, f, \dots, origin=NULL) \method{scalardilate}{owin}(X, f, \dots, origin=NULL) \method{scalardilate}{ppp}(X, f, \dots, origin=NULL) \method{scalardilate}{psp}(X, f, \dots, origin=NULL) \method{scalardilate}{default}(X, f, \dots) } \arguments{ \item{X}{Any suitable dataset representing a two-dimensional object, such as a point pattern (object of class \code{"ppp"}), a window (object of class \code{"owin"}), a pixel image (class \code{"im"}) and so on. } \item{f}{ Scalar dilation factor. A finite number greater than zero. } \item{\dots}{Ignored by the methods.} \item{origin}{ Origin for the scalar dilation. Either a vector of 2 numbers, or one of the character strings \code{"centroid"}, \code{"midpoint"} or \code{"bottomleft"} (partially matched). } } \value{ Another object of the same type, representing the result of applying the scalar dilation. } \details{ This command performs scalar dilation of the object \code{X} by the factor \code{f} relative to the origin specified by \code{origin}. The function \code{scalardilate} is generic, with methods for windows (class \code{"owin"}), point patterns (class \code{"ppp"}), pixel images (class \code{"im"}), line segment patterns (class \code{"psp"}) and a default method. If the argument \code{origin} is not given, then every spatial coordinate is multiplied by the factor \code{f}. If \code{origin} is given, then scalar dilation is performed relative to the specified origin. Effectively, \code{X} is shifted so that \code{origin} is moved to \code{c(0,0)}, then scalar dilation is performed, then the result is shifted so that \code{c(0,0)} is moved to \code{origin}. This command is a special case of an affine transformation: see \code{\link{affine}}. } \seealso{ \code{\link{affine}}, \code{\link{shift}} } \examples{ plot(letterR) plot(scalardilate(letterR, 0.7, origin="bot"), col="red", add=TRUE) } \author{\adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/Iest.Rd0000644000176200001440000001234713160710571014073 0ustar liggesusers\name{Iest} \alias{Iest} \title{Estimate the I-function} \description{ Estimates the summary function \eqn{I(r)} for a multitype point pattern. } \usage{ Iest(X, ..., eps=NULL, r=NULL, breaks=NULL, correction=NULL) } \arguments{ \item{X}{The observed point pattern, from which an estimate of \eqn{I(r)} will be computed. An object of class \code{"ppp"}, or data in any format acceptable to \code{\link{as.ppp}()}. } \item{\dots}{Ignored.} \item{eps}{ the resolution of the discrete approximation to Euclidean distance (see below). There is a sensible default. } \item{r}{Optional. Numeric vector of values for the argument \eqn{r} at which \eqn{I(r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \code{r}. } \item{breaks}{ This argument is for internal use only. } \item{correction}{ Optional. Vector of character strings specifying the edge correction(s) to be used by \code{\link{Jest}}. } } \value{ An object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. Essentially a data frame containing \item{r}{the vector of values of the argument \eqn{r} at which the function \eqn{I} has been estimated} \item{rs}{the ``reduced sample'' or ``border correction'' estimator of \eqn{I(r)} computed from the border-corrected estimates of \eqn{J} functions} \item{km}{the spatial Kaplan-Meier estimator of \eqn{I(r)} computed from the Kaplan-Meier estimates of \eqn{J} functions} \item{han}{the Hanisch-style estimator of \eqn{I(r)} computed from the Hanisch-style estimates of \eqn{J} functions} \item{un}{the uncorrected estimate of \eqn{I(r)} computed from the uncorrected estimates of \eqn{J} } \item{theo}{the theoretical value of \eqn{I(r)} for a stationary Poisson process: identically equal to \eqn{0} } } \note{ Sizeable amounts of memory may be needed during the calculation. } \details{ The \eqn{I} function summarises the dependence between types in a multitype point process (Van Lieshout and Baddeley, 1999) It is based on the concept of the \eqn{J} function for an unmarked point process (Van Lieshout and Baddeley, 1996). See \code{\link{Jest}} for information about the \eqn{J} function. The \eqn{I} function is defined as \deqn{ % I(r) = \sum_{i=1}^m p_i J_{ii}(r) % - J_{\bullet\bullet}(r)}{ % I(r) = (sum p[i] Jii(r)) - J(r) } where \eqn{J_{\bullet\bullet}}{J} is the \eqn{J} function for the entire point process ignoring the marks, while \eqn{J_{ii}}{Jii} is the \eqn{J} function for the process consisting of points of type \eqn{i} only, and \eqn{p_i}{p[i]} is the proportion of points which are of type \eqn{i}. The \eqn{I} function is designed to measure dependence between points of different types, even if the points are not Poisson. Let \eqn{X} be a stationary multitype point process, and write \eqn{X_i}{X[i]} for the process of points of type \eqn{i}. If the processes \eqn{X_i}{X[i]} are independent of each other, then the \eqn{I}-function is identically equal to \eqn{0}. Deviations \eqn{I(r) < 1} or \eqn{I(r) > 1} typically indicate negative and positive association, respectively, between types. See Van Lieshout and Baddeley (1999) for further information. An estimate of \eqn{I} derived from a multitype spatial point pattern dataset can be used in exploratory data analysis and formal inference about the pattern. The estimate of \eqn{I(r)} is compared against the constant function \eqn{0}. Deviations \eqn{I(r) < 1} or \eqn{I(r) > 1} may suggest negative and positive association, respectively. This algorithm estimates the \eqn{I}-function from the multitype point pattern \code{X}. It assumes that \code{X} can be treated as a realisation of a stationary (spatially homogeneous) random spatial marked point process in the plane, observed through a bounded window. The argument \code{X} is interpreted as a point pattern object (of class \code{"ppp"}, see \code{\link{ppp.object}}) and can be supplied in any of the formats recognised by \code{\link{as.ppp}()}. It must be a multitype point pattern (it must have a \code{marks} vector which is a \code{factor}). The function \code{\link{Jest}} is called to compute estimates of the \eqn{J} functions in the formula above. In fact three different estimates are computed using different edge corrections. See \code{\link{Jest}} for information. } \references{ Van Lieshout, M.N.M. and Baddeley, A.J. (1996) A nonparametric measure of spatial interaction in point patterns. \emph{Statistica Neerlandica} \bold{50}, 344--361. Van Lieshout, M.N.M. and Baddeley, A.J. (1999) Indices of dependence between types in multivariate point patterns. \emph{Scandinavian Journal of Statistics} \bold{26}, 511--532. } \seealso{ \code{\link{Jest}} } \examples{ data(amacrine) Ic <- Iest(amacrine) plot(Ic, main="Amacrine Cells data") # values are below I= 0, suggesting negative association # between 'on' and 'off' cells. } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat/man/plot.plotppm.Rd0000644000176200001440000000634413160710621015633 0ustar liggesusers\name{plot.plotppm} \alias{plot.plotppm} \title{Plot a plotppm Object Created by plot.ppm} \description{ The function plot.ppm produces objects which specify plots of fitted point process models. The function plot.plotppm carries out the actual plotting of these objects. } \usage{ \method{plot}{plotppm}(x, data = NULL, trend = TRUE, cif = TRUE, se = TRUE, pause = interactive(), how = c("persp", "image", "contour"), \dots, pppargs) } \arguments{ \item{x}{ An object of class \code{plotppm} produced by \code{\link{plot.ppm}()} }. \item{data}{ The point pattern (an object of class \code{ppp}) to which the point process model was fitted (by \code{\link{ppm}}). } \item{trend}{ Logical scalar; should the trend component of the fitted model be plotted? } \item{cif}{ Logical scalar; should the complete conditional intensity of the fitted model be plotted? } \item{se}{ Logical scalar; should the estimated standard error of the fitted intensity be plotted? } \item{pause}{ Logical scalar indicating whether to pause with a prompt after each plot. Set \code{pause=FALSE} if plotting to a file. } \item{how}{ Character string or character vector indicating the style or styles of plots to be performed. } \item{\dots}{ Extra arguments to the plotting functions \code{\link{persp}}, \code{\link{image}} and \code{\link{contour}}. } \item{pppargs}{ List of extra arguments passed to \code{\link{plot.ppp}} when displaying the original point pattern data. } } \details{ If argument \code{data} is supplied then the point pattern will be superimposed on the image and contour plots. Sometimes a fitted model does not have a trend component, or the trend component may constitute all of the conditional intensity (if the model is Poisson). In such cases the object \code{x} will not contain a trend component, or will contain only a trend component. This will also be the case if one of the arguments \code{trend} and \code{cif} was set equal to \code{FALSE} in the call to \code{plot.ppm()} which produced \code{x}. If this is so then only the item which is present will be plotted. Explicitly setting \code{trend=TRUE}, or \code{cif=TRUE}, respectively, will then give an error. } \value{ None. } \section{Warning}{ Arguments which are passed to \code{persp}, \code{image}, and \code{contour} via the \dots argument get passed to any of the other functions listed in the \code{how} argument, and won't be recognized by them. This leads to a lot of annoying but harmless warning messages. Arguments to \code{persp} may be supplied via \code{\link{spatstat.options}()} which alleviates the warning messages in this instance. } \author{\adrian and \rolf } \seealso{ \code{\link{plot.ppm}()} } \examples{ \dontrun{ m <- ppm(cells ~ 1, Strauss(0.05)) mpic <- plot(m) # Perspective plot only, with altered parameters: plot(mpic,how="persp", theta=-30,phi=40,d=4) # All plots, with altered parameters for perspective plot: op <- spatstat.options(par.persp=list(theta=-30,phi=40,d=4)) plot(mpic) # Revert spatstat.options(op) } } \keyword{spatial} \keyword{hplot} \keyword{models} spatstat/man/sumouter.Rd0000644000176200001440000000464713160710621015052 0ustar liggesusers\name{sumouter} \alias{sumouter} \alias{quadform} \alias{bilinearform} \title{Compute Quadratic Forms} \description{ Calculates certain quadratic forms of matrices. } \usage{ sumouter(x, w=NULL, y=x) quadform(x, v) bilinearform(x, v, y) } \arguments{ \item{x,y}{A matrix, whose rows are the vectors in the quadratic form.} \item{w}{Optional vector of weights} \item{v}{Matrix determining the quadratic form} } \value{ A vector or matrix. } \details{ The matrices \code{x} and \code{y} will be interpreted as collections of row vectors. They must have the same number of rows. The command \code{sumouter} computes the sum of the outer products of corresponding row vectors, weighted by the entries of \code{w}: \deqn{ M = \sum_i w_i x_i y_i^\top }{ M = sum[i] (w[i] * outer(x[i,], y[i,])) } where the sum is over all rows of \code{x} (after removing any rows containing \code{NA} or other non-finite values). If \code{w} is missing, the weights will be taken as 1. The result is a \eqn{p \times q}{p * q} matrix where \code{p = ncol(x)} and \code{q = ncol(y)}. The command \code{quadform} evaluates the quadratic form, defined by the matrix \code{v}, for each of the row vectors of \code{x}: \deqn{ y_i = x_i V x_i^\top }{ y[i] = x[i,] \%*\% v \%*\% t(x[i,]) } The result \code{y} is a numeric vector of length \code{n} where \code{n = nrow(x)}. If \code{x[i,]} contains \code{NA} or other non-finite values, then \code{y[i] = NA}. The command \code{bilinearform} evaluates the more general bilinear form defined by the matrix \code{v}. Here \code{x} and \code{y} must be matrices of the same dimensions. For each row vector of \code{x} and corresponding row vector of \code{y}, the bilinear form is \deqn{ z_i = x_i V y_i^\top }{ z[i] = x[i,] \%*\% v \%*\% t(y[i,]) } The result \code{z} is a numeric vector of length \code{n} where \code{n = nrow(x)}. If \code{x[i,]} or \code{y[i,]} contains \code{NA} or other non-finite values, then \code{z[i] = NA}. } \examples{ x <- matrix(1:12, 4, 3) dimnames(x) <- list(c("Wilma", "Fred", "Barney", "Betty"), letters[1:3]) x sumouter(x) w <- 4:1 sumouter(x, w) v <- matrix(1, 3, 3) quadform(x, v) # should be the same as quadform(x, v) bilinearform(x, v, x) # See what happens with NA's x[3,2] <- NA sumouter(x, w) quadform(x, v) } \author{\adrian and \rolf } \keyword{array} spatstat/man/linearpcfcross.Rd0000644000176200001440000000567313160710621016204 0ustar liggesusers\name{linearpcfcross} \alias{linearpcfcross} \title{ Multitype Pair Correlation Function (Cross-type) for Linear Point Pattern } \description{ For a multitype point pattern on a linear network, estimate the multitype pair correlation function from points of type \eqn{i} to points of type \eqn{j}. } \usage{ linearpcfcross(X, i, j, r=NULL, \dots, correction="Ang") } \arguments{ \item{X}{The observed point pattern, from which an estimate of the \eqn{i}-to-any pair correlation function \eqn{g_{ij}(r)}{g[ij](r)} will be computed. An object of class \code{"lpp"} which must be a multitype point pattern (a marked point pattern whose marks are a factor). } \item{i}{Number or character string identifying the type (mark value) of the points in \code{X} from which distances are measured. Defaults to the first level of \code{marks(X)}. } \item{j}{Number or character string identifying the type (mark value) of the points in \code{X} to which distances are measured. Defaults to the second level of \code{marks(X)}. } \item{r}{numeric vector. The values of the argument \eqn{r} at which the function \eqn{g_{ij}(r)}{g[ij](r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{correction}{ Geometry correction. Either \code{"none"} or \code{"Ang"}. See Details. } \item{\dots}{ Arguments passed to \code{\link[stats]{density.default}} to control the kernel smoothing. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). } \details{ This is a counterpart of the function \code{\link{pcfcross}} for a point pattern on a linear network (object of class \code{"lpp"}). The argument \code{i} will be interpreted as levels of the factor \code{marks(X)}. If \code{i} is missing, it defaults to the first level of the marks factor. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{g_{ij}(r)}{g[ij](r)} should be evaluated. The values of \eqn{r} must be increasing nonnegative numbers and the maximum \eqn{r} value must not exceed the radius of the largest disc contained in the window. } \references{ Baddeley, A, Jammalamadaka, A. and Nair, G. (to appear) Multitype point process analysis of spines on the dendrite network of a neuron. \emph{Applied Statistics} (Journal of the Royal Statistical Society, Series C), In press. } \section{Warnings}{ The argument \code{i} is interpreted as a level of the factor \code{marks(X)}. Beware of the usual trap with factors: numerical values are not interpreted in the same way as character values. } \seealso{ \code{\link{linearpcfdot}}, \code{\link{linearpcf}}, \code{\link{pcfcross}}. } \examples{ data(chicago) g <- linearpcfcross(chicago, "assault") } \author{\adrian } \keyword{spatial} \keyword{nonparametric} spatstat/man/pcf.Rd0000644000176200001440000000714113160710621013727 0ustar liggesusers\name{pcf} \alias{pcf} \title{Pair Correlation Function} \description{ Estimate the pair correlation function. } \usage{ pcf(X, \dots) } \arguments{ \item{X}{ Either the observed data point pattern, or an estimate of its \eqn{K} function, or an array of multitype \eqn{K} functions (see Details). } \item{\dots}{ Other arguments passed to the appropriate method. } } \value{ Either a function value table (object of class \code{"fv"}, see \code{\link{fv.object}}) representing a pair correlation function, or a function array (object of class \code{"fasp"}, see \code{\link{fasp.object}}) representing an array of pair correlation functions. } \details{ The pair correlation function of a stationary point process is \deqn{ g(r) = \frac{K'(r)}{2\pi r} }{ g(r) = K'(r)/ ( 2 * pi * r) } where \eqn{K'(r)} is the derivative of \eqn{K(r)}, the reduced second moment function (aka ``Ripley's \eqn{K} function'') of the point process. See \code{\link{Kest}} for information about \eqn{K(r)}. For a stationary Poisson process, the pair correlation function is identically equal to 1. Values \eqn{g(r) < 1} suggest inhibition between points; values greater than 1 suggest clustering. We also apply the same definition to other variants of the classical \eqn{K} function, such as the multitype \eqn{K} functions (see \code{\link{Kcross}}, \code{\link{Kdot}}) and the inhomogeneous \eqn{K} function (see \code{\link{Kinhom}}). For all these variants, the benchmark value of \eqn{K(r) = \pi r^2}{K(r) = pi * r^2} corresponds to \eqn{g(r) = 1}. This routine computes an estimate of \eqn{g(r)} either directly from a point pattern, or indirectly from an estimate of \eqn{K(r)} or one of its variants. This function is generic, with methods for the classes \code{"ppp"}, \code{"fv"} and \code{"fasp"}. If \code{X} is a point pattern (object of class \code{"ppp"}) then the pair correlation function is estimated using a traditional kernel smoothing method (Stoyan and Stoyan, 1994). See \code{\link{pcf.ppp}} for details. If \code{X} is a function value table (object of class \code{"fv"}), then it is assumed to contain estimates of the \eqn{K} function or one of its variants (typically obtained from \code{\link{Kest}} or \code{\link{Kinhom}}). This routine computes an estimate of \eqn{g(r)} using smoothing splines to approximate the derivative. See \code{\link{pcf.fv}} for details. If \code{X} is a function value array (object of class \code{"fasp"}), then it is assumed to contain estimates of several \eqn{K} functions (typically obtained from \code{\link{Kmulti}} or \code{\link{alltypes}}). This routine computes an estimate of \eqn{g(r)} for each cell in the array, using smoothing splines to approximate the derivatives. See \code{\link{pcf.fasp}} for details. } \references{ Stoyan, D. and Stoyan, H. (1994) Fractals, random shapes and point fields: methods of geometrical statistics. John Wiley and Sons. } \seealso{ \code{\link{pcf.ppp}}, \code{\link{pcf.fv}}, \code{\link{pcf.fasp}}, \code{\link{Kest}}, \code{\link{Kinhom}}, \code{\link{Kcross}}, \code{\link{Kdot}}, \code{\link{Kmulti}}, \code{\link{alltypes}} } \examples{ # ppp object X <- simdat \testonly{ X <- X[seq(1,npoints(X), by=4)] } p <- pcf(X) plot(p) # fv object K <- Kest(X) p2 <- pcf(K, spar=0.8, method="b") plot(p2) # multitype pattern; fasp object amaK <- alltypes(amacrine, "K") amap <- pcf(amaK, spar=1, method="b") plot(amap) } \author{ \spatstatAuthors } \keyword{spatial} \keyword{nonparametric} spatstat/man/bits.test.Rd0000644000176200001440000001163613160710571015106 0ustar liggesusers\name{bits.test} \alias{bits.test} \title{ Balanced Independent Two-Stage Monte Carlo Test } \description{ Performs a Balanced Independent Two-Stage Monte Carlo test of goodness-of-fit for spatial pattern. } \usage{ bits.test(X, \dots, exponent = 2, nsim=19, alternative=c("two.sided", "less", "greater"), leaveout=1, interpolate = FALSE, savefuns=FALSE, savepatterns=FALSE, verbose = TRUE) } \arguments{ \item{X}{ Either a point pattern dataset (object of class \code{"ppp"}, \code{"lpp"} or \code{"pp3"}) or a fitted point process model (object of class \code{"ppm"}, \code{"kppm"}, \code{"lppm"} or \code{"slrm"}). } \item{\dots}{ Arguments passed to \code{\link{dclf.test}} or \code{\link{mad.test}} or \code{\link{envelope}} to control the conduct of the test. Useful arguments include \code{fun} to determine the summary function, \code{rinterval} to determine the range of \eqn{r} values used in the test, and \code{use.theory} described under Details. } \item{exponent}{ Exponent used in the test statistic. Use \code{exponent=2} for the Diggle-Cressie-Loosmore-Ford test, and \code{exponent=Inf} for the Maximum Absolute Deviation test. } \item{nsim}{ Number of replicates in each stage of the test. A total of \code{nsim * (nsim + 1)} simulated point patterns will be generated, and the \eqn{p}-value will be a multiple of \code{1/(nsim+1)}. } \item{alternative}{ Character string specifying the alternative hypothesis. The default (\code{alternative="two.sided"}) is that the true value of the summary function is not equal to the theoretical value postulated under the null hypothesis. If \code{alternative="less"} the alternative hypothesis is that the true value of the summary function is lower than the theoretical value. } \item{leaveout}{ Optional integer 0, 1 or 2 indicating how to calculate the deviation between the observed summary function and the nominal reference value, when the reference value must be estimated by simulation. See Details. } \item{interpolate}{ Logical value indicating whether to interpolate the distribution of the test statistic by kernel smoothing, as described in Dao and Genton (2014, Section 5). } \item{savefuns}{ Logical flag indicating whether to save the simulated function values (from the first stage). } \item{savepatterns}{ Logical flag indicating whether to save the simulated point patterns (from the first stage). } \item{verbose}{ Logical value indicating whether to print progress reports. } } \details{ Performs the Balanced Independent Two-Stage Monte Carlo test proposed by Baddeley et al (2017), an improvement of the Dao-Genton (2014) test. If \code{X} is a point pattern, the null hypothesis is CSR. If \code{X} is a fitted model, the null hypothesis is that model. The argument \code{use.theory} passed to \code{\link{envelope}} determines whether to compare the summary function for the data to its theoretical value for CSR (\code{use.theory=TRUE}) or to the sample mean of simulations from CSR (\code{use.theory=FALSE}). The argument \code{leaveout} specifies how to calculate the discrepancy between the summary function for the data and the nominal reference value, when the reference value must be estimated by simulation. The values \code{leaveout=0} and \code{leaveout=1} are both algebraically equivalent (Baddeley et al, 2014, Appendix) to computing the difference \code{observed - reference} where the \code{reference} is the mean of simulated values. The value \code{leaveout=2} gives the leave-two-out discrepancy proposed by Dao and Genton (2014). } \value{ A hypothesis test (object of class \code{"htest"} which can be printed to show the outcome of the test. } \references{ Dao, N.A. and Genton, M. (2014) A Monte Carlo adjusted goodness-of-fit test for parametric models describing spatial point patterns. \emph{Journal of Graphical and Computational Statistics} \bold{23}, 497--517. Baddeley, A., Diggle, P.J., Hardegen, A., Lawrence, T., Milne, R.K. and Nair, G. (2014) On tests of spatial pattern based on simulation envelopes. \emph{Ecological Monographs} \bold{84} (3) 477--489. Baddeley, A., Hardegen, A., Lawrence, L., Milne, R.K., Nair, G.M. and Rakshit, S. (2017) On two-stage Monte Carlo tests of composite hypotheses. \emph{Computational Statistics and Data Analysis}, in press. } \author{ Adrian Baddeley, Andrew Hardegen, Tom Lawrence, Robin Milne, Gopalan Nair and Suman Rakshit. Implemented by \spatstatAuthors. } \seealso{ \code{\link{dg.test}}, \code{\link{dclf.test}}, \code{\link{mad.test}} } \examples{ ns <- if(interactive()) 19 else 4 bits.test(cells, nsim=ns) bits.test(cells, alternative="less", nsim=ns) bits.test(cells, nsim=ns, interpolate=TRUE) } \keyword{spatial} \keyword{htest} spatstat/man/nnwhich.ppx.Rd0000644000176200001440000000541713160710621015427 0ustar liggesusers\name{nnwhich.ppx} \alias{nnwhich.ppx} \title{Nearest Neighbours in Any Dimensions} \description{ Finds the nearest neighbour of each point in a multi-dimensional point pattern. } \usage{ \method{nnwhich}{ppx}(X, \dots, k=1) } \arguments{ \item{X}{ Multi-dimensional point pattern (object of class \code{"ppx"}). } \item{\dots}{ Arguments passed to \code{\link{coords.ppx}} to determine which coordinates should be used. } \item{k}{ Integer, or integer vector. The algorithm will compute the distance to the \code{k}th nearest neighbour. } } \value{ Numeric vector or matrix giving, for each point, the index of its nearest neighbour (or \code{k}th nearest neighbour). If \code{k = 1} (the default), the return value is a numeric vector \code{v} giving the indices of the nearest neighbours (the nearest neighbout of the \code{i}th point is the \code{j}th point where \code{j = v[i]}). If \code{k} is a single integer, then the return value is a numeric vector giving the indices of the \code{k}th nearest neighbours. If \code{k} is a vector, then the return value is a matrix \code{m} such that \code{m[i,j]} is the index of the \code{k[j]}th nearest neighbour for the \code{i}th data point. } \details{ For each point in the given multi-dimensional point pattern, this function finds its nearest neighbour (the nearest other point of the pattern). By default it returns a vector giving, for each point, the index of the point's nearest neighbour. If \code{k} is specified, the algorithm finds each point's \code{k}th nearest neighbour. The function \code{nnwhich} is generic. This is the method for the class \code{"ppx"}. If there are no points in the pattern, a numeric vector of length zero is returned. If there is only one point, then the nearest neighbour is undefined, and a value of \code{NA} is returned. In general if the number of points is less than or equal to \code{k}, then a vector of \code{NA}'s is returned. To evaluate the \emph{distance} between a point and its nearest neighbour, use \code{\link{nndist}}. To find the nearest neighbours from one point pattern to another point pattern, use \code{\link{nncross}}. By default, both spatial and temporal coordinates are extracted. To obtain the spatial distance between points in a space-time point pattern, set \code{temporal=FALSE}. } \section{Warnings}{ A value of \code{NA} is returned if there is only one point in the point pattern. } \seealso{ \code{\link{nnwhich}}, \code{\link{nndist}}, \code{\link{nncross}} } \examples{ df <- data.frame(x=runif(5),y=runif(5),z=runif(5),w=runif(5)) X <- ppx(data=df) m <- nnwhich(X) m2 <- nnwhich(X, k=2) } \author{ \adrian } \keyword{spatial} \keyword{math} spatstat/man/bdist.pixels.Rd0000644000176200001440000000553513160710571015600 0ustar liggesusers\name{bdist.pixels} \alias{bdist.pixels} \title{Distance to Boundary of Window} \description{ Computes the distances from each pixel in a window to the boundary of the window. } \usage{ bdist.pixels(w, \dots, style="image", method=c("C", "interpreted")) } \arguments{ \item{w}{A window (object of class \code{"owin"}).} \item{\dots}{Arguments passed to \code{\link{as.mask}} to determine the pixel resolution.} \item{style}{Character string determining the format of the output: either \code{"matrix"}, \code{"coords"} or \code{"image"}. } \item{method}{Choice of algorithm to use when \code{w} is polygonal.} } \value{ If \code{style="image"}, a pixel image (object of class \code{"im"}) containing the distances from each pixel in the image raster to the boundary of the window. If \code{style="matrix"}, a matrix giving the distances from each pixel in the image raster to the boundary of the window. Rows of this matrix correspond to the \eqn{y} coordinate and columns to the \eqn{x} coordinate. If \code{style="coords"}, a list with three components \code{x,y,z}, where \code{x,y} are vectors of length \eqn{m,n} giving the \eqn{x} and \eqn{y} coordinates respectively, and \code{z} is an \eqn{m \times n}{m x n} matrix such that \code{z[i,j]} is the distance from \code{(x[i],y[j])} to the boundary of the window. Rows of this matrix correspond to the \eqn{x} coordinate and columns to the \eqn{y} coordinate. This result can be plotted with \code{persp}, \code{image} or \code{contour}. } \details{ This function computes, for each pixel \eqn{u} in the window \code{w}, the shortest distance \eqn{d(u, W^c)}{dist(u, W')} from \eqn{u} to the boundary of \eqn{W}. If the window is a binary mask then the distance from each pixel to the boundary is computed using the distance transform algorithm \code{\link{distmap.owin}}. The result is equivalent to \code{distmap(W, invert=TRUE)}. If the window is a rectangle or a polygonal region, the grid of pixels is determined by the arguments \code{"\dots"} passed to \code{\link{as.mask}}. The distance from each pixel to the boundary is calculated exactly, using analytic geometry. This is slower but more accurate than in the case of a binary mask. For software testing purposes, there are two implementations available when \code{w} is a polygon: the default is \code{method="C"} which is much faster than \code{method="interpreted"}. } \seealso{ \code{\link{owin.object}}, \code{\link{erosion}}, \code{\link{bdist.points}}, \code{\link{bdist.tiles}}, \code{\link{distmap.owin}}. } \examples{ u <- owin(c(0,1),c(0,1)) d <- bdist.pixels(u, eps=0.01) image(d) d <- bdist.pixels(u, eps=0.01, style="matrix") mean(d >= 0.1) # value is approx (1 - 2 * 0.1)^2 = 0.64 } \author{\adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/simulate.mppm.Rd0000644000176200001440000000354113160710621015752 0ustar liggesusers\name{simulate.mppm} \alias{simulate.mppm} \title{Simulate a Point Process Model Fitted to Several Point Patterns} \description{ Generates simulated realisations from a point process model that was fitted to several point patterns. } \usage{ \method{simulate}{mppm}(object, nsim=1, \dots, verbose=TRUE) } \arguments{ \item{object}{ Point process model fitted to several point patterns. An object of class \code{"mppm"}. } \item{nsim}{ Number of simulated realisations (of each original pattern). } \item{\dots}{ Further arguments passed to \code{\link{simulate.ppm}} to control the simulation. } \item{verbose}{ Logical value indicating whether to print progress reports. } } \details{ This function is a method for the generic function \code{\link[stats]{simulate}} for the class \code{"mppm"} of fitted point process models for replicated point pattern data. The result is a hyperframe with \code{n} rows and \code{nsim} columns, where \code{n} is the number of original point pattern datasets to which the model was fitted. Each column of the hyperframe contains a simulated version of the original data. For each of the original point pattern datasets, the fitted model for this dataset is extracted using \code{\link{subfits}}, then \code{nsim} simulated realisations of this model are generated using \code{\link{simulate.ppm}}, and these are stored in the corresponding row of the output. } \value{ A hyperframe. } \examples{ H <- hyperframe(Bugs=waterstriders) fit <- mppm(Bugs ~ id, H) y <- simulate(fit, nsim=2) y plot(y[1,,drop=TRUE], main="Simulations for Waterstriders pattern 1") plot(y[,1,drop=TRUE], main="Simulation 1 for each Waterstriders pattern") } \seealso{ \code{\link{mppm}}, \code{\link{simulate.ppm}}. } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{models} spatstat/man/im.Rd0000644000176200001440000001165713160710621013573 0ustar liggesusers\name{im} \alias{im} \title{Create a Pixel Image Object} \description{ Creates an object of class \code{"im"} representing a two-dimensional pixel image. } \usage{ im(mat, xcol=seq_len(ncol(mat)), yrow=seq_len(nrow(mat)), xrange=NULL, yrange=NULL, unitname=NULL) } \arguments{ \item{mat}{ matrix or vector containing the pixel values of the image. } \item{xcol}{ vector of \eqn{x} coordinates for the pixel grid } \item{yrow}{ vector of \eqn{y} coordinates for the pixel grid } \item{xrange,yrange}{ Optional. Vectors of length 2 giving the \eqn{x} and \eqn{y} limits of the enclosing rectangle. (Ignored if \code{xcol}, \code{yrow} are present.) } \item{unitname}{ Optional. Name of unit of length. Either a single character string, or a vector of two character strings giving the singular and plural forms, respectively. } } \details{ This function creates an object of class \code{"im"} representing a \sQuote{pixel image} or two-dimensional array of values. The pixel grid is rectangular and occupies a rectangular window in the spatial coordinate system. The pixel values are \emph{scalars}: they can be real numbers, integers, complex numbers, single characters or strings, logical values, or categorical values. A pixel's value can also be \code{NA}, meaning that no value is defined at that location, and effectively that pixel is \sQuote{outside} the window. Although the pixel values must be scalar, photographic colour images (i.e., with red, green, and blue brightness channels) can be represented as character-valued images in \pkg{spatstat}, using \R's standard encoding of colours as character strings. The matrix \code{mat} contains the \sQuote{greyscale} values for a rectangular grid of pixels. Note carefully that the entry \code{mat[i,j]} gives the pixel value at the location \code{(xcol[j],yrow[i])}. That is, the \bold{row} index of the matrix \code{mat} corresponds to increasing \bold{y} coordinate, while the column index of \code{mat} corresponds to increasing \bold{x} coordinate. Thus \code{yrow} has one entry for each row of \code{mat} and \code{xcol} has one entry for each column of \code{mat}. Under the usual convention in \R, a correct display of the image would be obtained by transposing the matrix, e.g. \code{image.default(xcol, yrow, t(mat))}, if you wanted to do it by hand. The entries of \code{mat} may be numeric (real or integer), complex, logical, character, or factor values. If \code{mat} is not a matrix, it will be converted into a matrix with \code{nrow(mat) = length(yrow)} and \code{ncol(mat) = length(xcol)}. To make a factor-valued image, note that \R has a quirky way of handling matrices with factor-valued entries. The command \code{\link{matrix}} cannot be used directly, because it destroys factor information. To make a factor-valued image, do one of the following: \itemize{ \item Create a \code{factor} containing the pixel values, say \code{mat <- factor(.....)}, and then assign matrix dimensions to it by \code{dim(mat) <- c(nr, nc)} where \code{nr, nc} are the numbers of rows and columns. The resulting object \code{mat} is both a factor and a vector. \item Supply \code{mat} as a one-dimensional factor and specify the arguments \code{xcol} and \code{yrow} to determine the dimensions of the image. \item Use the functions \code{\link{cut.im}} or \code{\link{eval.im}} to make factor-valued images from other images). } For a description of the methods available for pixel image objects, see \code{\link{im.object}}. To convert other kinds of data to a pixel image (for example, functions or windows), use \code{\link{as.im}}. } \seealso{ \code{\link{im.object}} for details of the class. \code{\link{as.im}} for converting other kinds of data to an image. \code{\link{as.matrix.im}}, \code{\link{[.im}}, \code{\link{eval.im}} for manipulating images. } \section{Warnings}{ The internal representation of images is likely to change in future releases of \pkg{spatstat}. The safe way to extract pixel values from an image object is to use \code{\link{as.matrix.im}} or \code{\link{[.im}}. } \examples{ vec <- rnorm(1200) mat <- matrix(vec, nrow=30, ncol=40) whitenoise <- im(mat) whitenoise <- im(mat, xrange=c(0,1), yrange=c(0,1)) whitenoise <- im(mat, xcol=seq(0,1,length=40), yrow=seq(0,1,length=30)) whitenoise <- im(vec, xcol=seq(0,1,length=40), yrow=seq(0,1,length=30)) plot(whitenoise) # Factor-valued images: f <- factor(letters[1:12]) dim(f) <- c(3,4) Z <- im(f) # Factor image from other image: cutwhite <- cut(whitenoise, 3) plot(cutwhite) # Factor image from raw data cutmat <- cut(mat, 3) dim(cutmat) <- c(30,40) cutwhite <- im(cutmat) plot(cutwhite) } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} \keyword{datagen} spatstat/man/identify.ppp.Rd0000644000176200001440000000351213160710621015566 0ustar liggesusers\name{identify.ppp} \alias{identify.ppp} \alias{identify.lpp} \title{Identify Points in a Point Pattern} \description{ If a point pattern is plotted in the graphics window, this function will find the point of the pattern which is nearest to the mouse position, and print its mark value (or its serial number if there is no mark). } \usage{ \method{identify}{ppp}(x, \dots) \method{identify}{lpp}(x, \dots) } \arguments{ \item{x}{ A point pattern (object of class \code{"ppp"} or \code{"lpp"}). } \item{\dots}{ Arguments passed to \code{\link[graphics]{identify.default}}. } } \value{ If \code{x} is unmarked, the result is a vector containing the serial numbers of the points in the pattern \code{x} that were identified. If \code{x} is marked, the result is a 2-column matrix, the first column containing the serial numbers and the second containing the marks for these points. } \details{ This is a method for the generic function \code{\link[graphics]{identify}} for point pattern objects. The point pattern \code{x} should first be plotted using \code{\link{plot.ppp}} or \code{\link{plot.lpp}} as appropriate. Then \code{identify(x)} reads the position of the graphics pointer each time the left mouse button is pressed. It then finds the point of the pattern \code{x} closest to the mouse position. If this closest point is sufficiently close to the mouse pointer, its index (and its mark if any) will be returned as part of the value of the call. Each time a point of the pattern is identified, text will be displayed next to the point, showing its serial number (if \code{x} is unmarked) or its mark value (if \code{x} is marked). } \seealso{ \code{\link[graphics]{identify}}, \code{\link{clickppp}} } \author{\adrian and \rolf } \keyword{spatial} \keyword{iplot} spatstat/man/vcov.slrm.Rd0000644000176200001440000000660713160710621015116 0ustar liggesusers\name{vcov.slrm} \alias{vcov.slrm} \title{Variance-Covariance Matrix for a Fitted Spatial Logistic Regression} \description{ Returns the variance-covariance matrix of the estimates of the parameters of a point process model that was fitted by spatial logistic regression. } \usage{ \method{vcov}{slrm}(object, \dots, what=c("vcov", "corr", "fisher", "Fisher")) } \arguments{ \item{object}{A fitted point process model of class \code{"slrm"}.} \item{\dots}{Ignored.} \item{what}{Character string (partially-matched) that specifies what matrix is returned. Options are \code{"vcov"} for the variance-covariance matrix, \code{"corr"} for the correlation matrix, and \code{"fisher"} or \code{"Fisher"} for the Fisher information matrix. } } \details{ This function computes the asymptotic variance-covariance matrix of the estimates of the canonical parameters in the point process model \code{object}. It is a method for the generic function \code{\link{vcov}}. \code{object} should be an object of class \code{"slrm"}, typically produced by \code{\link{slrm}}. It represents a Poisson point process model fitted by spatial logistic regression. The canonical parameters of the fitted model \code{object} are the quantities returned by \code{coef.slrm(object)}. The function \code{vcov} calculates the variance-covariance matrix for these parameters. The argument \code{what} provides three options: \describe{ \item{\code{what="vcov"}}{ return the variance-covariance matrix of the parameter estimates } \item{\code{what="corr"}}{ return the correlation matrix of the parameter estimates } \item{\code{what="fisher"}}{ return the observed Fisher information matrix. } } In all three cases, the result is a square matrix. The rows and columns of the matrix correspond to the canonical parameters given by \code{\link{coef.slrm}(object)}. The row and column names of the matrix are also identical to the names in \code{\link{coef.slrm}(object)}. Note that standard errors and 95\% confidence intervals for the coefficients can also be obtained using \code{confint(object)} or \code{coef(summary(object))}. Standard errors for the fitted intensity can be obtained using \code{\link{predict.slrm}}. } \section{Error messages}{ An error message that reports \emph{system is computationally singular} indicates that the determinant of the Fisher information matrix was either too large or too small for reliable numerical calculation. This can occur because of numerical overflow or collinearity in the covariates. } \value{ A square matrix. } \examples{ X <- rpoispp(42) fit <- slrm(X ~ x + y) vcov(fit) vcov(fit, what="corr") vcov(fit, what="f") } \author{ \adrian and \rolf . } \seealso{ \code{\link{vcov}} for the generic, \code{\link{slrm}} for information about fitted models, \code{\link{predict.slrm}} for other kinds of calculation about the model, \code{\link[stats]{confint}} for confidence intervals. } \references{ Baddeley, A., Berman, M., Fisher, N.I., Hardegen, A., Milne, R.K., Schuhmacher, D., Shah, R. and Turner, R. (2010) Spatial logistic regression and change-of-support for spatial Poisson point processes. \emph{Electronic Journal of Statistics} \bold{4}, 1151--1201. {doi: 10.1214/10-EJS581} } \keyword{spatial} \keyword{methods} \keyword{models} spatstat/man/PairPiece.Rd0000644000176200001440000000746213160710571015032 0ustar liggesusers\name{PairPiece} \alias{PairPiece} \title{The Piecewise Constant Pairwise Interaction Point Process Model} \description{ Creates an instance of a pairwise interaction point process model with piecewise constant potential function. The model can then be fitted to point pattern data. } \usage{ PairPiece(r) } \arguments{ \item{r}{vector of jump points for the potential function} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of a point process. The process is a pairwise interaction process, whose interaction potential is piecewise constant, with jumps at the distances given in the vector \eqn{r}. } \details{ A pairwise interaction point process in a bounded region is a stochastic point process with probability density of the form \deqn{ f(x_1,\ldots,x_n) = \alpha \prod_i b(x_i) \prod_{i < j} h(x_i, x_j) }{ f(x_1,\ldots,x_n) = alpha . product { b(x[i]) } product { h(x_i, x_j) } } where \eqn{x_1,\ldots,x_n}{x[1],\ldots,x[n]} represent the points of the pattern. The first product on the right hand side is over all points of the pattern; the second product is over all unordered pairs of points of the pattern. Thus each point \eqn{x_i}{x[i]} of the pattern contributes a factor \eqn{b(x_i)}{b(x[i])} to the probability density, and each pair of points \eqn{x_i, x_j}{x[i], x[j]} contributes a factor \eqn{h(x_i,x_j)}{h(x[i], x[j])} to the density. The pairwise interaction term \eqn{h(u, v)} is called \emph{piecewise constant} if it depends only on the distance between \eqn{u} and \eqn{v}, say \eqn{h(u,v) = H(||u-v||)}, and \eqn{H} is a piecewise constant function (a function which is constant except for jumps at a finite number of places). The use of piecewise constant interaction terms was first suggested by Takacs (1986). The function \code{\link{ppm}()}, which fits point process models to point pattern data, requires an argument of class \code{"interact"} describing the interpoint interaction structure of the model to be fitted. The appropriate description of the piecewise constant pairwise interaction is yielded by the function \code{PairPiece()}. See the examples below. The entries of \code{r} must be strictly increasing, positive numbers. They are interpreted as the points of discontinuity of \eqn{H}. It is assumed that \eqn{H(s) =1} for all \eqn{s > r_{max}}{s > rmax} where \eqn{r_{max}}{rmax} is the maximum value in \code{r}. Thus the model has as many regular parameters (see \code{\link{ppm}}) as there are entries in \code{r}. The \eqn{i}-th regular parameter \eqn{\theta_i}{theta[i]} is the logarithm of the value of the interaction function \eqn{H} on the interval \eqn{[r_{i-1},r_i)}{[r[i-1],r[i])}. If \code{r} is a single number, this model is similar to the Strauss process, see \code{\link{Strauss}}. The difference is that in \code{PairPiece} the interaction function is continuous on the right, while in \code{\link{Strauss}} it is continuous on the left. The analogue of this model for multitype point processes has not yet been implemented. } \seealso{ \code{\link{ppm}}, \code{\link{pairwise.family}}, \code{\link{ppm.object}}, \code{\link{Strauss}} \code{\link{rmh.ppm}} } \examples{ PairPiece(c(0.1,0.2)) # prints a sensible description of itself data(cells) \dontrun{ ppm(cells, ~1, PairPiece(r = c(0.05, 0.1, 0.2))) # fit a stationary piecewise constant pairwise interaction process } ppm(cells, ~polynom(x,y,3), PairPiece(c(0.05, 0.1))) # nonstationary process with log-cubic polynomial trend } \references{ Takacs, R. (1986) Estimator for the pair potential of a Gibbsian point process. \emph{Statistics} \bold{17}, 429--433. } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} spatstat/man/bw.relrisk.Rd0000644000176200001440000000700313160710571015242 0ustar liggesusers\name{bw.relrisk} \alias{bw.relrisk} \title{ Cross Validated Bandwidth Selection for Relative Risk Estimation } \description{ Uses cross-validation to select a smoothing bandwidth for the estimation of relative risk. } \usage{ bw.relrisk(X, method = "likelihood", nh = spatstat.options("n.bandwidth"), hmin=NULL, hmax=NULL, warn=TRUE) } \arguments{ \item{X}{ A multitype point pattern (object of class \code{"ppp"} which has factor valued marks). } \item{method}{ Character string determining the cross-validation method. Current options are \code{"likelihood"}, \code{"leastsquares"} or \code{"weightedleastsquares"}. } \item{nh}{ Number of trial values of smoothing bandwith \code{sigma} to consider. The default is 32. } \item{hmin, hmax}{ Optional. Numeric values. Range of trial values of smoothing bandwith \code{sigma} to consider. There is a sensible default. } \item{warn}{ Logical. If \code{TRUE}, issue a warning if the minimum of the cross-validation criterion occurs at one of the ends of the search interval. } } \details{ This function selects an appropriate bandwidth for the nonparametric estimation of relative risk using \code{\link{relrisk}}. Consider the indicators \eqn{y_{ij}}{y[i,j]} which equal \eqn{1} when data point \eqn{x_i}{x[i]} belongs to type \eqn{j}, and equal \eqn{0} otherwise. For a particular value of smoothing bandwidth, let \eqn{\hat p_j(u)}{p*[j](u)} be the estimated probabilities that a point at location \eqn{u} will belong to type \eqn{j}. Then the bandwidth is chosen to minimise either the likelihood, the squared error, or the approximately standardised squared error, of the indicators \eqn{y_{ij}}{y[i,j]} relative to the fitted values \eqn{\hat p_j(x_i)}{p*[j](x[i])}. See Diggle (2003). The result is a numerical value giving the selected bandwidth \code{sigma}. The result also belongs to the class \code{"bw.optim"} allowing it to be printed and plotted. The plot shows the cross-validation criterion as a function of bandwidth. The range of values for the smoothing bandwidth \code{sigma} is set by the arguments \code{hmin, hmax}. There is a sensible default, based on multiples of Stoyan's rule of thumb \code{\link{bw.stoyan}}. If the optimal bandwidth is achieved at an endpoint of the interval \code{[hmin, hmax]}, the algorithm will issue a warning (unless \code{warn=FALSE}). If this occurs, then it is probably advisable to expand the interval by changing the arguments \code{hmin, hmax}. Computation time depends on the number \code{nh} of trial values considered, and also on the range \code{[hmin, hmax]} of values considered, because larger values of \code{sigma} require calculations involving more pairs of data points. } \value{ A numerical value giving the selected bandwidth. The result also belongs to the class \code{"bw.optim"} which can be plotted. } \seealso{ \code{\link{relrisk}}, \code{\link{bw.stoyan}} } \examples{ data(urkiola) \testonly{op <- spatstat.options(n.bandwidth=8)} b <- bw.relrisk(urkiola) b plot(b) b <- bw.relrisk(urkiola, hmax=20) plot(b) \testonly{spatstat.options(op)} } \references{ Diggle, P.J. (2003) \emph{Statistical analysis of spatial point patterns}, Second edition. Arnold. Kelsall, J.E. and Diggle, P.J. (1995) Kernel estimation of relative risk. \emph{Bernoulli} \bold{1}, 3--16. } \author{\adrian and \rolf } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat/man/HierStraussHard.Rd0000644000176200001440000001172513160710571016241 0ustar liggesusers\name{HierStraussHard} \alias{HierStraussHard} \title{The Hierarchical Strauss Hard Core Point Process Model} \description{ Creates an instance of the hierarchical Strauss-hard core point process model which can then be fitted to point pattern data. } \usage{ HierStraussHard(iradii, hradii=NULL, types=NULL, archy=NULL) } \arguments{ \item{iradii}{Matrix of interaction radii} \item{hradii}{Optional matrix of hard core distances} \item{types}{Optional; vector of all possible types (i.e. the possible levels of the \code{marks} variable in the data)} \item{archy}{Optional: the hierarchical order. See Details.} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of the hierarchical Strauss-hard core process with interaction radii \eqn{iradii[i,j]} and hard core distances \eqn{hradii[i,j]}. } \details{ This is a hierarchical point process model for a multitype point pattern (\ifelse{latex}{\out{H{\"o}gmander}}{Hogmander} and \ifelse{latex}{\out{S{\"a}rkk{\"a}}}{Sarkka}, 1999; Grabarnik and \ifelse{latex}{\out{S\"{a}rkk\"{a}}}{Sarkka}, 2009). It is appropriate for analysing multitype point pattern data in which the types are ordered so that the points of type \eqn{j} depend on the points of type \eqn{1,2,\ldots,j-1}{1,2,...,j-1}. The hierarchical version of the (stationary) Strauss hard core process with \eqn{m} types, with interaction radii \eqn{r_{ij}}{r[i,j]}, hard core distances \eqn{h_{ij}}{h[i,j]} and parameters \eqn{\beta_j}{beta[j]} and \eqn{\gamma_{ij}}{gamma[i,j]} is a point process in which each point of type \eqn{j} contributes a factor \eqn{\beta_j}{beta[j]} to the probability density of the point pattern, and a pair of points of types \eqn{i} and \eqn{j} closer than \eqn{r_{ij}}{r[i,j]} units apart contributes a factor \eqn{\gamma_{ij}}{gamma[i,j]} to the density \bold{provided} \eqn{i \le j}{i <= j}. If any pair of points of types \eqn{i} and \eqn{j} lies closer than \eqn{h_{ij}}{h[i,j]} units apart, the configuration of points is impossible (probability density zero). The nonstationary hierarchical Strauss hard core process is similar except that the contribution of each individual point \eqn{x_i}{x[i]} is a function \eqn{\beta(x_i)}{beta(x[i])} of location and type, rather than a constant beta. The function \code{\link{ppm}()}, which fits point process models to point pattern data, requires an argument of class \code{"interact"} describing the interpoint interaction structure of the model to be fitted. The appropriate description of the hierarchical Strauss hard core process pairwise interaction is yielded by the function \code{HierStraussHard()}. See the examples below. The argument \code{types} need not be specified in normal use. It will be determined automatically from the point pattern data set to which the HierStraussHard interaction is applied, when the user calls \code{\link{ppm}}. However, the user should be confident that the ordering of types in the dataset corresponds to the ordering of rows and columns in the matrix \code{radii}. The argument \code{archy} can be used to specify a hierarchical ordering of the types. It can be either a vector of integers or a character vector matching the possible types. The default is the sequence \eqn{1,2, \ldots, m}{1,2, ..., m} meaning that type \eqn{j} depends on types \eqn{1,2, \ldots, j-1}{1,2, ..., j-1}. The matrices \code{iradii} and \code{hradii} must be square, with entries which are either positive numbers or zero or \code{NA}. A value of zero or \code{NA} indicates that no interaction term should be included for this combination of types. Note that only the interaction radii and hard core distances are specified in \code{HierStraussHard}. The canonical parameters \eqn{\log(\beta_j)}{log(beta[j])} and \eqn{\log(\gamma_{ij})}{log(gamma[i,j])} are estimated by \code{\link{ppm}()}, not fixed in \code{HierStraussHard()}. } \seealso{ \code{\link{MultiStraussHard}} for the corresponding symmetrical interaction. \code{\link{HierHard}}, \code{\link{HierStrauss}}. } \examples{ r <- matrix(c(30, NA, 40, 30), nrow=2,ncol=2) h <- matrix(c(4, NA, 10, 15), 2, 2) HierStraussHard(r, h) # prints a sensible description of itself ppm(ants ~1, HierStraussHard(r, h)) # fit the stationary hierarchical Strauss-hard core process to ants data } \author{\adrian , \rolf and \ege. } \references{ Grabarnik, P. and \ifelse{latex}{\out{S\"{a}rkk\"{a}}}{Sarkka}, A. (2009) Modelling the spatial structure of forest stands by multivariate point processes with hierarchical interactions. \emph{Ecological Modelling} \bold{220}, 1232--1240. \ifelse{latex}{\out{H{\"o}gmander}}{Hogmander}, H. and \ifelse{latex}{\out{S{\"a}rkk{\"a}}}{Sarkka}, A. (1999) Multitype spatial point patterns with hierarchical interactions. \emph{Biometrics} \bold{55}, 1051--1058. } \keyword{spatial} \keyword{models} spatstat/man/cut.im.Rd0000644000176200001440000000311413160710571014356 0ustar liggesusers\name{cut.im} \alias{cut.im} \title{Convert Pixel Image from Numeric to Factor} \description{ Transform the values of a pixel image from numeric values into a factor. } \usage{ \method{cut}{im}(x, \dots) } \arguments{ \item{x}{ A pixel image. An object of class \code{"im"}. } \item{\dots}{ Arguments passed to \code{\link{cut.default}}. They determine the breakpoints for the mapping from numerical values to factor values. See \code{\link{cut.default}}. } } \value{ A pixel image (object of class \code{"im"}) with pixel values that are a factor. See \code{\link{im.object}}. } \details{ This simple function applies the generic \code{\link{cut}} operation to the pixel values of the image \code{x}. The range of pixel values is divided into several intervals, and each interval is associated with a level of a factor. The result is another pixel image, with the same window and pixel grid as \code{x}, but with the numeric value of each pixel discretised by replacing it by the factor level. This function is a convenient way to inspect an image and to obtain summary statistics. See the examples. To select a subset of an image, use the subset operator \code{\link{[.im}} instead. } \seealso{ \code{\link{cut}}, \code{\link{im.object}} } \examples{ # artificial image data Z <- setcov(square(1)) Y <- cut(Z, 3) Y <- cut(Z, breaks=seq(0,1,length=5)) # cut at the quartiles # (divides the image into 4 equal areas) Y <- cut(Z, quantile(Z)) } \author{\adrian and \rolf } \keyword{spatial} \keyword{methods} spatstat/man/rmhcontrol.Rd0000644000176200001440000003314513160710621015351 0ustar liggesusers\name{rmhcontrol} \alias{rmhcontrol} \alias{rmhcontrol.default} \title{Set Control Parameters for Metropolis-Hastings Algorithm.} \description{ Sets up a list of parameters controlling the iterative behaviour of the Metropolis-Hastings algorithm. } \usage{ rmhcontrol(\dots) \method{rmhcontrol}{default}(\dots, p=0.9, q=0.5, nrep=5e5, expand=NULL, periodic=NULL, ptypes=NULL, x.cond=NULL, fixall=FALSE, nverb=0, nsave=NULL, nburn=nsave, track=FALSE, pstage=c("block", "start")) } \arguments{ \item{\dots}{Arguments passed to methods.} \item{p}{Probability of proposing a shift (as against a birth/death).} \item{q}{Conditional probability of proposing a death given that a birth or death will be proposed.} \item{nrep}{Total number of steps (proposals) of Metropolis-Hastings algorithm that should be run.} \item{expand}{ Simulation window or expansion rule. Either a window (object of class \code{"owin"}) or a numerical expansion factor, specifying that simulations are to be performed in a domain other than the original data window, then clipped to the original data window. This argument is passed to \code{\link{rmhexpand}}. A numerical expansion factor can be in several formats: see \code{\link{rmhexpand}}. } \item{periodic}{ Logical value (or \code{NULL}) indicating whether to simulate ``periodically'', i.e. identifying opposite edges of the rectangular simulation window. A \code{NULL} value means ``undecided.'' } \item{ptypes}{For multitype point processes, the distribution of the mark attached to a new random point (when a birth is proposed)} \item{x.cond}{Conditioning points for conditional simulation.} \item{fixall}{(Logical) for multitype point processes, whether to fix the number of points of each type.} \item{nverb}{Progress reports will be printed every \code{nverb} iterations} \item{nsave,nburn}{ If these values are specified, then intermediate states of the simulation algorithm will be saved every \code{nsave} iterations, after an initial burn-in period of \code{nburn} iterations. } \item{track}{ Logical flag indicating whether to save the transition history of the simulations. } \item{pstage}{ Character string specifying when to generate proposal points. Either \code{"start"} or \code{"block"}. } } \value{ An object of class \code{"rmhcontrol"}, which is essentially a list of parameter values for the algorithm. There is a \code{print} method for this class, which prints a sensible description of the parameters chosen. } \details{ The Metropolis-Hastings algorithm, implemented as \code{\link{rmh}}, generates simulated realisations of point process models. The function \code{rmhcontrol} sets up a list of parameters which control the iterative behaviour and termination of the Metropolis-Hastings algorithm, for use in a subsequent call to \code{\link{rmh}}. It also checks that the parameters are valid. (A separate function \code{\link{rmhstart}} determines the initial state of the algorithm, and \code{\link{rmhmodel}} determines the model to be simulated.) The parameters are as follows: \describe{ \item{p}{The probability of proposing a ``shift'' (as opposed to a birth or death) in the Metropolis-Hastings algorithm. If \eqn{p = 1} then the algorithm only alters existing points, so the number of points never changes, i.e. we are simulating conditionally upon the number of points. The number of points is determined by the initial state (specified by \code{\link{rmhstart}}). If \eqn{p=1} and \code{fixall=TRUE} and the model is a multitype point process model, then the algorithm only shifts the locations of existing points and does not alter their marks (types). This is equivalent to simulating conditionally upon the number of points of each type. These numbers are again specified by the initial state. If \eqn{p = 1} then no expansion of the simulation window is allowed (see \code{expand} below). The default value of \code{p} can be changed by setting the parameter \code{rmh.p} in \code{\link{spatstat.options}}. } \item{q}{The conditional probability of proposing a death (rather than a birth) given that a shift is not proposed. This is of course ignored if \code{p} is equal to 1. The default value of \code{q} can be changed by setting the parameter \code{rmh.q} in \code{\link{spatstat.options}}. } \item{nrep}{The number of repetitions or iterations to be made by the Metropolis-Hastings algorithm. It should be large. The default value of \code{nrep} can be changed by setting the parameter \code{rmh.nrep} in \code{\link{spatstat.options}}. } \item{expand}{ Either a number or a window (object of class \code{"owin"}). Indicates that the process is to be simulated on a domain other than the original data window \code{w}, then clipped to \code{w} when the algorithm has finished. This would often be done in order to approximate the simulation of a stationary process (Geyer, 1999) or more generally a process existing in the whole plane, rather than just in the window \code{w}. If \code{expand} is a window object, it is taken as the larger domain in which simulation is performed. If \code{expand} is numeric, it is interpreted as an expansion factor or expansion distance for determining the simulation domain from the data window. It should be a \emph{named} scalar, such as \code{expand=c(area=2)}, \code{expand=c(distance=0.1)}, \code{expand=c(length=1.2)}. See \code{\link{rmhexpand}()} for more details. If the name is omitted, it defaults to \code{area}. Expansion is not permitted if the number of points has been fixed by setting \code{p = 1} or if the starting configuration has been specified via the argument \code{x.start} in \code{\link{rmhstart}}. If \code{expand} is \code{NULL}, this is interpreted to mean \dQuote{not yet decided}. An expansion rule will be determined at a later stage, using appropriate defaults. See \code{\link{rmhexpand}}. } \item{periodic}{A logical value (or \code{NULL}) determining whether to simulate \dQuote{periodically}. If \code{periodic} is \code{TRUE}, and if the simulation window is a rectangle, then the simulation algorithm effectively identifies opposite edges of the rectangle. Points near the right-hand edge of the rectangle are deemed to be close to points near the left-hand edge. Periodic simulation usually gives a better approximation to a stationary point process. For periodic simulation, the simulation window must be a rectangle. (The simulation window is determined by \code{expand} as described above.) The value \code{NULL} means \sQuote{undecided}. The decision is postponed until \code{\link{rmh}} is called. Depending on the point process model to be simulated, \code{rmh} will then set \code{periodic=TRUE} if the simulation window is expanded \emph{and} the expanded simulation window is rectangular; otherwise \code{periodic=FALSE}. Note that \code{periodic=TRUE} is only permitted when the simulation window (i.e. the expanded window) is rectangular. } \item{ptypes}{A vector of probabilities (summing to 1) to be used in assigning a random type to a new point. Defaults to a vector each of whose entries is \eqn{1/nt} where \eqn{nt} is the number of types for the process. Convergence of the simulation algorithm should be improved if \code{ptypes} is close to the relative frequencies of the types which will result from the simulation. } \item{x.cond}{ If this argument is given, then \emph{conditional simulation} will be performed, and \code{x.cond} specifies the location of the fixed points as well as the type of conditioning. It should be either a point pattern (object of class \code{"ppp"}) or a \code{list(x,y)} or a \code{data.frame}. See the section on Conditional Simulation. } \item{fixall}{A logical scalar specifying whether to condition on the number of points of each type. Meaningful only if a marked process is being simulated, and if \eqn{p = 1}. A warning message is given if \code{fixall} is set equal to \code{TRUE} when it is not meaningful. } \item{nverb}{An integer specifying how often ``progress reports'' (which consist simply of the number of repetitions completed) should be printed out. If nverb is left at 0, the default, the simulation proceeds silently. } \item{nsave,nburn}{If these integers are given, then the current state of the simulation algorithm (i.e. the current random point pattern) will be saved every \code{nsave} iterations, starting from iteration \code{nburn}. } \item{track}{ Logical flag indicating whether to save the transition history of the simulations (i.e. information specifying what type of proposal was made, and whether it was accepted or rejected, for each iteration). } \item{pstage}{ Character string specifying the stage of the algorithm at which the randomised proposal points should be generated. If \code{pstage="start"} or if \code{nsave=0}, the entire sequence of \code{nrep} random proposal points is generated at the start of the algorithm. This is the original behaviour of the code, and should be used in order to maintain consistency with older versions of \pkg{spatstat}. If \code{pstage="block"} and \code{nsave > 0}, then a set of \code{nsave} random proposal points will be generated before each block of \code{nsave} iterations. This is much more efficient. The default is \code{pstage="block"}. } } } \section{Conditional Simulation}{ For a Gibbs point process \eqn{X}, the Metropolis-Hastings algorithm easily accommodates several kinds of conditional simulation: \describe{ \item{conditioning on the total number of points:}{ We fix the total number of points \eqn{N(X)} to be equal to \eqn{n}. We simulate from the conditional distribution of \eqn{X} given \eqn{N(X) = n}. } \item{conditioning on the number of points of each type:}{ In a multitype point process, where \eqn{Y_j}{Y[[j]]} denotes the process of points of type \eqn{j}, we fix the number \eqn{N(Y_j)}{N(Y[[j]])} of points of type \eqn{j} to be equal to \eqn{n_j}{n[j]}, for \eqn{j=1,2,\ldots,m}{j=1,2,...,m}. We simulate from the conditional distribution of \eqn{X} given \eqn{N(Y_j)=n_j}{N(Y[[j]]) = n[j]} for \eqn{j=1,2,\ldots,m}{j=1,2,...,m}. } \item{conditioning on the realisation in a subwindow:}{ We require that the point process \eqn{X} should, within a specified sub-window \eqn{V}, coincide with a specified point pattern \eqn{y}. We simulate from the conditional distribution of \eqn{X} given \eqn{X \cap V = y}{(X intersect V) = y}. } \item{Palm conditioning:}{ We require that the point process \eqn{X} include a specified list of points \eqn{y}. We simulate from the point process with probability density \eqn{g(x) = c f(x \cup y)}{g(x) = c * f(x union y)} where \eqn{f} is the probability density of the original process \eqn{X}, and \eqn{c} is a normalising constant. } } To achieve each of these types of conditioning we do as follows: \describe{ \item{conditioning on the total number of points:}{ Set \code{p=1}. The number of points is determined by the initial state of the simulation: see \code{\link{rmhstart}}. } \item{conditioning on the number of points of each type:}{ Set \code{p=1} and \code{fixall=TRUE}. The number of points of each type is determined by the initial state of the simulation: see \code{\link{rmhstart}}. } \item{conditioning on the realisation in a subwindow:}{ Set \code{x.cond} to be a point pattern (object of class \code{"ppp"}). Its window \code{V=Window(x.cond)} becomes the conditioning subwindow \eqn{V}. } \item{Palm conditioning:}{ Set \code{x.cond} to be a \code{list(x,y)} or \code{data.frame} with two columns containing the coordinates of the points, or a \code{list(x,y,marks)} or \code{data.frame} with three columns containing the coordinates and marks of the points. } } The arguments \code{x.cond}, \code{p} and \code{fixall} can be combined. } \references{ Geyer, C.J. (1999) Likelihood Inference for Spatial Point Processes. Chapter 3 in O.E. Barndorff-Nielsen, W.S. Kendall and M.N.M. Van Lieshout (eds) \emph{Stochastic Geometry: Likelihood and Computation}, Chapman and Hall / CRC, Monographs on Statistics and Applied Probability, number 80. Pages 79--140. } \seealso{ \code{\link{rmh}}, \code{\link{rmhmodel}}, \code{\link{rmhstart}}, \code{\link{rmhexpand}}, \code{\link{spatstat.options}} } \examples{ # parameters given as named arguments c1 <- rmhcontrol(p=0.3,periodic=TRUE,nrep=1e6,nverb=1e5) # parameters given as a list liz <- list(p=0.9, nrep=1e4) c2 <- rmhcontrol(liz) # parameters given in rmhcontrol object c3 <- rmhcontrol(c1) } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat/man/pairdist.pp3.Rd0000644000176200001440000000365113160710621015501 0ustar liggesusers\name{pairdist.pp3} \alias{pairdist.pp3} \title{Pairwise distances in Three Dimensions} \description{ Computes the matrix of distances between all pairs of points in a three-dimensional point pattern. } \usage{ \method{pairdist}{pp3}(X, \dots, periodic=FALSE, squared=FALSE) } \arguments{ \item{X}{ A point pattern (object of class \code{"pp3"}). } \item{\dots}{ Ignored. } \item{periodic}{ Logical. Specifies whether to apply a periodic edge correction. } \item{squared}{ Logical. If \code{squared=TRUE}, the squared distances are returned instead (this computation is faster). } } \value{ A square matrix whose \code{[i,j]} entry is the distance between the points numbered \code{i} and \code{j}. } \details{ This is a method for the generic function \code{pairdist}. Given a three-dimensional point pattern \code{X} (an object of class \code{"pp3"}), this function computes the Euclidean distances between all pairs of points in \code{X}, and returns the matrix of distances. Alternatively if \code{periodic=TRUE} and the window containing \code{X} is a box, then the distances will be computed in the `periodic' sense (also known as `torus' distance): opposite faces of the box are regarded as equivalent. This is meaningless if the window is not a box. If \code{squared=TRUE} then the \emph{squared} Euclidean distances \eqn{d^2} are returned, instead of the Euclidean distances \eqn{d}. The squared distances are faster to calculate, and are sufficient for many purposes (such as finding the nearest neighbour of a point). } \seealso{ \code{\link{pairdist}}, \code{\link{crossdist}}, \code{\link{nndist}}, \code{\link{K3est}} } \examples{ X <- runifpoint3(20) d <- pairdist(X) d <- pairdist(X, periodic=TRUE) d <- pairdist(X, squared=TRUE) } \author{ \adrian based on two-dimensional code by Pavel Grabarnik. } \keyword{spatial} \keyword{math} spatstat/man/convexify.Rd0000644000176200001440000000334213160710571015174 0ustar liggesusers\name{convexify} \alias{convexify} \title{ Weil's Convexifying Operation } \description{ Converts the window \code{W} into a convex set by rearranging the edges, preserving spatial orientation of each edge. } \usage{ convexify(W, eps) } \arguments{ \item{W}{ A window (object of class \code{"owin"}). } \item{eps}{ Optional. Minimum edge length of polygonal approximation, if \code{W} is not a polygon. } } \details{ Weil (1995) defined a convexification operation for windows \eqn{W} that belong to the convex ring (that is, for any \eqn{W} which is a finite union of convex sets). Note that this is \bold{not} the same as the convex hull. The convexified set \eqn{f(W)} has the same total boundary length as \eqn{W} and the same distribution of orientations of the boundary. If \eqn{W} is a polygonal set, then the convexification \eqn{f(W)} is obtained by rearranging all the edges of \eqn{W} in order of their spatial orientation. The argument \code{W} must be a window. If it is not already a polygonal window, it is first converted to one, using \code{\link{simplify.owin}}. The edges are sorted in increasing order of angular orientation and reassembled into a convex polygon. } \value{ A window (object of class \code{"owin"}). } \references{ Weil, W. (1995) The estimation of mean particle shape and mean particle number in overlapping particle systems in the plane. \emph{Advances in Applied Probability} \bold{27}, 102--119. } \author{ \adrian \rolf and \ege } \seealso{ \code{\link{convexhull}} for the convex hull of a window. } \examples{ opa <- par(mfrow=c(1,2)) plot(letterR) plot(convexify(letterR)) par(opa) } \keyword{spatial} \keyword{utilities} spatstat/man/bw.stoyan.Rd0000644000176200001440000000336413160710571015112 0ustar liggesusers\name{bw.stoyan} \alias{bw.stoyan} \title{ Stoyan's Rule of Thumb for Bandwidth Selection } \description{ Computes a rough estimate of the appropriate bandwidth for kernel smoothing estimators of the pair correlation function and other quantities. } \usage{ bw.stoyan(X, co=0.15) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"}). } \item{co}{ Coefficient appearing in the rule of thumb. See Details. } } \details{ Estimation of the pair correlation function and other quantities by smoothing methods requires a choice of the smoothing bandwidth. Stoyan and Stoyan (1995, equation (15.16), page 285) proposed a rule of thumb for choosing the smoothing bandwidth. For the Epanechnikov kernel, the rule of thumb is to set the kernel's half-width \eqn{h} to \eqn{0.15/\sqrt{\lambda}}{0.15/sqrt(\lambda)} where \eqn{\lambda}{\lambda} is the estimated intensity of the point pattern, typically computed as the number of points of \code{X} divided by the area of the window containing \code{X}. For a general kernel, the corresponding rule is to set the standard deviation of the kernel to \eqn{\sigma = 0.15/\sqrt{5\lambda}}{\sigma = 0.15/sqrt(5 * \lambda)}. The coefficient \eqn{0.15} can be tweaked using the argument \code{co}. } \value{ A numerical value giving the selected bandwidth (the standard deviation of the smoothing kernel). } \seealso{ \code{\link{pcf}}, \code{\link{bw.relrisk}} } \examples{ data(shapley) bw.stoyan(shapley) } \references{ Stoyan, D. and Stoyan, H. (1995) Fractals, random shapes and point fields: methods of geometrical statistics. John Wiley and Sons. } \author{\adrian and \rolf } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat/man/summary.splitppp.Rd0000644000176200001440000000176513160710621016534 0ustar liggesusers\name{summary.splitppp} \alias{summary.splitppp} \title{Summary of a Split Point Pattern} \description{ Prints a useful summary of a split point pattern. } \usage{ \method{summary}{splitppp}(object, \dots) } \arguments{ \item{object}{ Split point pattern (object of class \code{"splitppp"}, effectively a list of point patterns, usually created by \code{\link{split.ppp}}). } \item{\dots}{ Ignored. } } \details{ This is a method for the generic function \code{\link{summary}}. An object of the class \code{"splitppp"} is effectively a list of point patterns (objects of class \code{"ppp"}) representing different sub-patterns of an original point pattern. This function extracts a useful summary of each of the sub-patterns. } \seealso{ \code{\link{summary}}, \code{\link{split}}, \code{\link{split.ppp}} } \examples{ data(amacrine) # multitype point pattern summary(split(amacrine)) } \author{\adrian and \rolf } \keyword{spatial} \keyword{methods} spatstat/man/mean.im.Rd0000644000176200001440000000446613160710621014512 0ustar liggesusers\name{mean.im} %DontDeclareMethods \alias{mean.im} \alias{median.im} \title{Mean and Median of Pixel Values in an Image} \description{ Calculates the mean or median of the pixel values in a pixel image. } %NAMESPACE S3method("mean", "im") %NAMESPACE S3method("median", "im") \usage{ ## S3 method for class 'im' ## mean(x, trim=0, na.rm=TRUE, ...) ## S3 method for class 'im' ## median(x, na.rm=TRUE) [R < 3.4.0] ## median(x, na.rm=TRUE, ...) [R >= 3.4.0] } \arguments{ \item{x}{A pixel image (object of class \code{"im"}).} \item{na.rm}{ Logical value indicating whether \code{NA} values should be stripped before the computation proceeds. } \item{trim}{ The fraction (0 to 0.5) of pixel values to be trimmed from each end of their range, before the mean is computed. } \item{\dots}{ Ignored. } } \details{ These functions calculate the mean and median of the pixel values in the image \code{x}. An object of class \code{"im"} describes a pixel image. See \code{\link{im.object}}) for details of this class. The function \code{mean.im} is a method for the generic function \code{\link[base]{mean}} for the class \code{"im"}. Similarly \code{median.im} is a method for the generic \code{\link[stats]{median}}. If the image \code{x} is logical-valued, the mean value of \code{x} is the fraction of pixels that have the value \code{TRUE}. The median is not defined. If the image \code{x} is factor-valued, then the mean of \code{x} is the mean of the integer codes of the pixel values. The median is are not defined. Other mathematical operations on images are supported by \code{\link{Math.im}}, \code{\link{Summary.im}} and \code{\link{Complex.im}}. Other information about an image can be obtained using \code{\link{summary.im}} or \code{\link{quantile.im}}. } \value{ A single number. } \seealso{ \code{\link{Math.im}} for other operations. Generics and default methods: \code{\link[base]{mean}}, \code{\link[stats]{median}}. \code{\link{quantile.im}}, \code{\link{anyNA.im}}, \code{\link{im.object}}, \code{\link{summary.im}}. } \examples{ X <- as.im(function(x,y) {x^2}, unit.square()) mean(X) median(X) mean(X, trim=0.05) } \author{ \spatstatAuthors and Kassel Hingee. } \keyword{spatial} \keyword{methods} \keyword{univar} spatstat/man/layout.boxes.Rd0000644000176200001440000000255513160710621015617 0ustar liggesusers\name{layout.boxes} \alias{layout.boxes} \title{ Generate a Row or Column Arrangement of Rectangles. } \description{ A simple utility to generate a row or column of boxes (rectangles) for use in point-and-click panels. } \usage{ layout.boxes(B, n, horizontal = FALSE, aspect = 0.5, usefrac = 0.9) } \arguments{ \item{B}{ Bounding rectangle for the boxes. An object of class \code{"owin"}. } \item{n}{ Integer. The number of boxes. } \item{horizontal}{ Logical. If \code{TRUE}, arrange the boxes in a horizontal row. If \code{FALSE} (the default), arrange them in a vertical column. } \item{aspect}{ Aspect ratio (height/width) of each box. } \item{usefrac}{ Number between 0 and 1. The fraction of height or width of \code{B} that should be occupied by boxes. } } \details{ This simple utility generates a list of boxes (rectangles) inside the bounding box \code{B} arranged in a regular row or column. It is useful for generating the positions of the panel buttons in the function \code{\link{simplepanel}}. } \value{ A list of rectangles. } \examples{ B <- owin(c(0,10),c(0,1)) boxes <- layout.boxes(B, 5, horizontal=TRUE) plot(B, main="", col="blue") niets <- lapply(boxes, plot, add=TRUE, col="grey") } \author{\adrian and \rolf } \seealso{ \code{\link{simplepanel}} } \keyword{utilities} spatstat/man/foo.Rd0000644000176200001440000000241513160710621013741 0ustar liggesusers\name{foo} \alias{foo} \alias{plot.foo} \title{ Foo is Not a Real Name } \description{ The name \code{foo} is not a real name: it is a place holder, used to represent the name of any desired thing. The functions defined here simply print an explanation of the placeholder name \code{foo}. } \usage{ foo() \method{plot}{foo}(x, \dots) } \arguments{ \item{x}{Ignored.} \item{\dots}{Ignored.} } \details{ The name \code{foo} is used by computer scientists as a \emph{place holder}, to represent the name of any desired object or function. It is not the name of an actual object or function; it serves only as an example, to explain a concept. However, many users misinterpret this convention, and actually type the command \code{foo} or \code{foo()}. Then they email the package author to inform them that \code{foo} is not defined. To avoid this correspondence, we have now defined an object called \code{foo}. The function \code{foo()} prints a message explaining that \code{foo} is not really the name of a variable. The function can be executed simply by typing \code{foo} without parentheses. } \value{ Null. } \author{\adrian \rolf and \ege } \seealso{ \code{\link{beginner}} } \examples{ foo } \keyword{documentation} spatstat/man/levelset.Rd0000644000176200001440000000403213160710621014776 0ustar liggesusers\name{levelset} \alias{levelset} \title{Level Set of a Pixel Image} \description{ Given a pixel image, find all pixels which have values less than a specified threshold value (or greater than a threshold, etc), and assemble these pixels into a window. } \usage{ levelset(X, thresh, compare="<=") } \arguments{ \item{X}{A pixel image (object of class "im")}. \item{thresh}{Threshold value. A single number or value compatible with the pixel values in \code{X}}. \item{compare}{Character string specifying one of the comparison operators \code{"<", ">", "==", "<=", ">=", "!="}. } } \details{ If \code{X} is a pixel image with numeric values, then \code{levelset(X, thresh)} finds the region of space where the pixel values are less than or equal to the threshold value \code{thresh}. This region is returned as a spatial window. The argument \code{compare} specifies how the pixel values should be compared with the threshold value. Instead of requiring pixel values to be less than or equal to \code{thresh}, you can specify that they must be less than (\code{<}), greater than (\code{>}), equal to (\code{==}), greater than or equal to (\code{>=}), or not equal to (\code{!=}) the threshold value \code{thresh}. If \code{X} has non-numeric pixel values (for example, logical or factor values) it is advisable to use only the comparisons \code{==} and \code{!=}, unless you really know what you are doing. For more complicated logical comparisons, see \code{\link{solutionset}}. } \value{ A spatial window (object of class \code{"owin"}, see \code{\link{owin.object}}) containing the pixels satisfying the constraint. } \seealso{ \code{\link{im.object}}, \code{\link{as.owin}}, \code{\link{solutionset}}. } \examples{ # test image X <- as.im(function(x,y) { x^2 - y^2 }, unit.square()) W <- levelset(X, 0.2) W <- levelset(X, -0.3, ">") # compute area of level set area(levelset(X, 0.1)) } \author{\adrian and \rolf } \keyword{spatial} \keyword{programming} \keyword{manip} spatstat/man/quantile.im.Rd0000644000176200001440000000203313160710621015400 0ustar liggesusers\name{quantile.im} \alias{quantile.im} \title{Sample Quantiles of Pixel Image} \description{ Compute the sample quantiles of the pixel values of a given pixel image. } \usage{ \method{quantile}{im}(x, \dots) } \arguments{ \item{x}{ A pixel image. An object of class \code{"im"}. } \item{\dots}{ Optional arguments passed to \code{\link{quantile.default}}. They determine the probabilities for which quantiles should be computed. See \code{\link{quantile.default}}. } } \value{ A vector of quantiles. } \details{ This simple function applies the generic \code{\link{quantile}} operation to the pixel values of the image \code{x}. This function is a convenient way to inspect an image and to obtain summary statistics. See the examples. } \seealso{ \code{\link{quantile}}, \code{\link{cut.im}}, \code{\link{im.object}} } \examples{ # artificial image data Z <- setcov(square(1)) # find the quartiles quantile(Z) # find the deciles quantile(Z, probs=(0:10)/10) } \author{\adrian and \rolf } \keyword{spatial} \keyword{methods} \keyword{univar} spatstat/man/quantess.Rd0000644000176200001440000000642313160710621015024 0ustar liggesusers\name{quantess} \alias{quantess} \alias{quantess.owin} \alias{quantess.ppp} \alias{quantess.im} \title{Quantile Tessellation} \description{ Divide space into tiles which contain equal amounts of stuff. } \usage{ quantess(M, Z, n, \dots) \method{quantess}{owin}(M, Z, n, \dots, type=2) \method{quantess}{ppp}(M, Z, n, \dots, type=2) \method{quantess}{im}(M, Z, n, \dots, type=2) } \arguments{ \item{M}{ A spatial object (such as a window, point pattern or pixel image) determining the weight or amount of stuff at each location. } \item{Z}{ A spatial covariate (a pixel image or a \code{function(x,y)}) or one of the strings \code{"x"} or \code{"y"} indicating the \eqn{x} or \eqn{y} coordinate. The range of values of \code{Z} will be broken into \code{n} bands containing equal amounts of stuff. } \item{n}{ Number of bands. A positive integer. } \item{type}{ Integer specifying the rule for calculating quantiles. Passed to \code{\link[stats]{quantile.default}}. } \item{\dots}{ Additional arguments passed to \code{\link{quadrats}} or \code{\link{tess}} defining another tessellation which should be intersected with the quantile tessellation. } } \details{ A \emph{quantile tessellation} is a division of space into pieces which contain equal amounts of stuff. The function \code{quantess} computes a quantile tessellation and returns the tessellation itself. The function \code{quantess} is generic, with methods for windows (class \code{"owin"}), point patterns (\code{"ppp"}) and pixel images (\code{"im"}). The first argument \code{M} (for mass) specifies the spatial distribution of stuff that is to be divided. If \code{M} is a window, the \emph{area} of the window is to be divided into \code{n} equal pieces. If \code{M} is a point pattern, the \emph{number of points} in the pattern is to be divided into \code{n} equal parts, as far as possible. If \code{M} is a pixel image, the pixel values are interpreted as weights, and the \emph{total weight} is to be divided into \code{n} equal parts. The second argument \code{Z} is a spatial covariate. The range of values of \code{Z} will be divided into \code{n} bands, each containing the same total weight. That is, we determine the quantiles of \code{Z} with weights given by \code{M}. For convenience, additional arguments \code{\dots} can be given, to further subdivide the tiles of the tessellation. The result of \code{quantess} is a tessellation of \code{as.owin(M)} determined by the quantiles of \code{Z}. } \value{ A tessellation (object of class \code{"tess"}). } \author{ Original idea by Ute Hahn. Implemented in \code{spatstat} by \adrian \rolf and \ege . } \seealso{ \code{\link{tess}}, \code{\link{quadrats}}, \code{\link{quantile}}, \code{\link{tilenames}} } \examples{ plot(quantess(letterR, "x", 5)) plot(quantess(bronzefilter, "x", 6)) points(unmark(bronzefilter)) opa <- par(mar=c(0,0,2,5)) A <- quantess(Window(bei), bei.extra$elev, 4) plot(A, ribargs=list(las=1)) B <- quantess(bei, bei.extra$elev, 4) tilenames(B) <- paste(spatstat.utils::ordinal(1:4), "quartile") plot(B, ribargs=list(las=1)) points(bei, pch=".", cex=2, col="white") par(opa) } \keyword{spatial} \keyword{manip} spatstat/man/textureplot.Rd0000644000176200001440000000666413160710621015567 0ustar liggesusers\name{textureplot} \alias{textureplot} \title{ Plot Image or Tessellation Using Texture Fill } \description{ For a factor-valued pixel image, this command plots each level of the factor using a different texture. For a tessellation, each tile is plotted using a different texture. } \usage{ textureplot(x, \dots, main, add=FALSE, clipwin=NULL, do.plot = TRUE, border=NULL, col = NULL, lwd = NULL, lty = NULL, spacing = NULL, textures=1:8, legend=TRUE, leg.side=c("right", "left", "bottom", "top"), legsep=0.1, legwid=0.2) } \arguments{ \item{x}{ A tessellation (object of class \code{"tess"} or something acceptable to \code{\link{as.tess}}) with at most 8 tiles, or a pixel image (object of class \code{"im"} or something acceptable to \code{\link{as.im}}) whose pixel values are a \code{factor} with at most 8 levels. } \item{\dots}{ Other arguments passed to \code{\link{add.texture}}. } \item{main}{ Character string giving a main title for the plot. } \item{add}{ Logical value indicating whether to draw on the current plot (\code{add=TRUE}) or to initialise a new plot (\code{add=FALSE}). } \item{clipwin}{ Optional. A window (object of class \code{"owin"}). Only this subset of the image will be displayed. } \item{do.plot}{ Logical. Whether to actually do the plot. } \item{border}{ Colour for drawing the boundaries between the different regions. The default (\code{border=NULL}) means to use \code{par("fg")}. Use \code{border=NA} to omit borders. } \item{col}{ Numeric value or vector giving the colour or colours in which the textures should be plotted. } \item{lwd}{ Numeric value or vector giving the line width or widths to be used. } \item{lty}{ Numeric value or vector giving the line type or types to be used. } \item{spacing}{ Numeric value or vector giving the \code{spacing} parameter for the textures. } \item{textures}{ Textures to be used for each level. Either a texture map (object of class \code{"texturemap"}) or a vector of integer codes (to be interpreted by \code{\link{add.texture}}). } \item{legend}{ Logical. Whether to display an explanatory legend. } \item{leg.side}{Position of legend relative to main plot.} \item{legsep}{ Separation between legend and main plot, as a fraction of the shortest side length of the main plot. } \item{legwid}{ Width (if vertical) or height (if horizontal) of the legend as a fraction of the shortest side length of the main plot. } } \details{ If \code{x} is a tessellation, then each tile of the tessellation is plotted and filled with a texture using \link{add.texture}. If \code{x} is a factor-valued pixel image, then for each level of the factor, the algorithm finds the region where the image takes this value, and fills the region with a texture using \code{\link{add.texture}}. } \value{ (Invisible) A texture map (object of class \code{"texturemap"}) associating a texture with each level of the factor. } \author{ \spatstatAuthors. } \seealso{ \code{\link{im}}, \code{\link{plot.im}}, \code{\link{add.texture}}. } \examples{ nd <- if(interactive()) 128 else 32 Z <- setcov(owin(), dimyx=nd) Zcut <- cut(Z, 3, labels=c("Lo", "Med", "Hi")) textureplot(Zcut) textureplot(dirichlet(runifpoint(6))) } \keyword{spatial} \keyword{hplot} spatstat/man/nestsplit.Rd0000644000176200001440000000450713160710621015207 0ustar liggesusers\name{nestsplit} \alias{nestsplit} \title{ Nested Split } \description{ Applies two splitting operations to a point pattern, producing a list of lists of patterns. } \usage{ nestsplit(X, \dots) } \arguments{ \item{X}{ Point pattern to be split. Object of class \code{"ppp"}. } \item{\dots}{ Data determining the splitting factors or splitting regions. See Details. } } \details{ This function splits the point pattern \code{X} into several sub-patterns using \code{\link{split.ppp}}, then splits each of the sub-patterns into sub-sub-patterns using \code{\link{split.ppp}} again. The result is a hyperframe containing the sub-sub-patterns and two factors indicating the grouping. The arguments \code{\dots} determine the two splitting factors or splitting regions. Each argument may be: \itemize{ \item a factor (of length equal to the number of points in \code{X}) \item the name of a column of marks of \code{X} (provided this column contains factor values) \item a tessellation (class \code{"tess"}) \item a pixel image (class \code{"im"}) with factor values \item a window (class \code{"owin"}) \item identified by name (in the form \code{name=value}) as one of the formal arguments of \code{\link{quadrats}} or \code{\link{tess}} } The arguments will be processed to yield a list of two splitting factors/tessellations. The splits will be applied to \code{X} consecutively to produce the sub-sub-patterns. } \value{ A hyperframe with three columns. The first column contains the sub-sub-patterns. The second and third columns are factors which identify the grouping according to the two splitting factors. } \author{ Original idea by Ute Hahn. Code by \adrian \rolf and \ege } \seealso{ \code{\link{split.ppp}}, \code{\link{quantess}} } \examples{ # factor and tessellation Nft <- nestsplit(amacrine, marks(amacrine), quadrats(amacrine, 3, 1)) Ntf <- nestsplit(amacrine, quadrats(amacrine, 3, 1), marks(amacrine)) Ntf # two factors big <- with(marks(betacells), area > 300) Nff <- nestsplit(betacells, "type", factor(big)) # two tessellations Tx <- quantess(redwood, "x", 4) Td <- dirichlet(runifpoint(5, Window(redwood))) Ntt <- nestsplit(redwood, Td, Tx) Ntt2 <- nestsplit(redwood, Td, ny=3) } \keyword{spatial} \keyword{manip} spatstat/man/ord.family.Rd0000644000176200001440000000321613160710621015222 0ustar liggesusers\name{ord.family} \alias{ord.family} \title{Ord Interaction Process Family} \description{ An object describing the family of all Ord interaction point processes } \details{ \bold{Advanced Use Only!} This structure would not normally be touched by the user. It describes the family of point process models introduced by Ord (1977). If you need to create a specific Ord-type model for use in analysis, use the function \code{\link{OrdThresh}} or \code{\link{Ord}}. Anyway, \code{ord.family} is an object of class \code{"isf"} containing a function \code{ord.family$eval} for evaluating the sufficient statistics of any Ord type point process model taking an exponential family form. } \seealso{ \code{\link{pairwise.family}}, \code{\link{pairsat.family}}, \code{\link{Poisson}}, \code{\link{Pairwise}}, \code{\link{PairPiece}}, \code{\link{Strauss}}, \code{\link{StraussHard}}, \code{\link{Softcore}}, \code{\link{Geyer}}, \code{\link{SatPiece}}, \code{\link{Saturated}}, \code{\link{Ord}}, \code{\link{OrdThresh}} } \references{ Baddeley, A. and Turner, R. (2000) Practical maximum pseudolikelihood for spatial point patterns. \emph{Australian and New Zealand Journal of Statistics} \bold{42}, 283--322. Ord, J.K. (1977) Contribution to the discussion of Ripley (1977). Ord, J.K. (1978) How many trees in a forest? \emph{Mathematical Scientist} \bold{3}, 23--33. Ripley, B.D. (1977) Modelling spatial patterns (with discussion). \emph{Journal of the Royal Statistical Society, Series B}, \bold{39}, 172 -- 212. } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} spatstat/man/linearKinhom.Rd0000644000176200001440000001326213160710621015600 0ustar liggesusers\name{linearKinhom} \alias{linearKinhom} \title{ Inhomogeneous Linear K Function } \description{ Computes an estimate of the inhomogeneous linear \eqn{K} function for a point pattern on a linear network. } \usage{ linearKinhom(X, lambda=NULL, r=NULL, ..., correction="Ang", normalise=TRUE, normpower=1, update=TRUE, leaveoneout=TRUE, ratio=FALSE) } \arguments{ \item{X}{ Point pattern on linear network (object of class \code{"lpp"}). } \item{lambda}{ Intensity values for the point pattern. Either a numeric vector, a \code{function}, a pixel image (object of class \code{"im"} or \code{"linim"}) or a fitted point process model (object of class \code{"ppm"} or \code{"lppm"}). } \item{r}{ Optional. Numeric vector of values of the function argument \eqn{r}. There is a sensible default. } \item{\dots}{ Ignored. } \item{correction}{ Geometry correction. Either \code{"none"} or \code{"Ang"}. See Details. } \item{normalise}{ Logical. If \code{TRUE} (the default), the denominator of the estimator is data-dependent (equal to the sum of the reciprocal intensities at the data points, raised to \code{normpower}), which reduces the sampling variability. If \code{FALSE}, the denominator is the length of the network. } \item{normpower}{ Integer (usually either 1 or 2). Normalisation power. See Details. } \item{update}{ Logical value indicating what to do when \code{lambda} is a fitted model (class \code{"lppm"} or \code{"ppm"}). If \code{update=TRUE} (the default), the model will first be refitted to the data \code{X} (using \code{\link{update.lppm}} or \code{\link{update.ppm}}) before the fitted intensity is computed. If \code{update=FALSE}, the fitted intensity of the model will be computed without re-fitting it to \code{X}. } \item{leaveoneout}{ Logical value (passed to \code{\link{fitted.lppm}} or \code{\link{fitted.ppm}}) specifying whether to use a leave-one-out rule when calculating the intensity, when \code{lambda} is a fitted model. Supported only when \code{update=TRUE}. } \item{ratio}{ Logical. If \code{TRUE}, the numerator and denominator of the estimate will also be saved, for use in analysing replicated point patterns. } } \details{ This command computes the inhomogeneous version of the linear \eqn{K} function from point pattern data on a linear network. If \code{lambda = NULL} the result is equivalent to the homogeneous \eqn{K} function \code{\link{linearK}}. If \code{lambda} is given, then it is expected to provide estimated values of the intensity of the point process at each point of \code{X}. The argument \code{lambda} may be a numeric vector (of length equal to the number of points in \code{X}), or a \code{function(x,y)} that will be evaluated at the points of \code{X} to yield numeric values, or a pixel image (object of class \code{"im"}) or a fitted point process model (object of class \code{"ppm"} or \code{"lppm"}). If \code{lambda} is a fitted point process model, the default behaviour is to update the model by re-fitting it to the data, before computing the fitted intensity. This can be disabled by setting \code{update=FALSE}. If \code{correction="none"}, the calculations do not include any correction for the geometry of the linear network. If \code{correction="Ang"}, the pair counts are weighted using Ang's correction (Ang, 2010). Each estimate is initially computed as \deqn{ \widehat K_{\rm inhom}(r) = \frac{1}{\mbox{length}(L)} \sum_i \sum_j \frac{1\{d_{ij} \le r\} e(x_i,x_j)}{\lambda(x_i)\lambda(x_j)} }{ K^inhom(r)= (1/length(L)) sum[i] sum[j] 1(d[i,j] <= r) * e(x[i],x[j])/(lambda(x[i]) * lambda(x[j])) } where \code{L} is the linear network, \eqn{d_{ij}}{d[i,j]} is the distance between points \eqn{x_i}{x[i]} and \eqn{x_j}{x[j]}, and \eqn{e(x_i,x_j)}{e(x[i],x[j])} is a weight. If \code{correction="none"} then this weight is equal to 1, while if \code{correction="Ang"} the weight is \eqn{e(x_i,x_j,r) = 1/m(x_i, d_{ij})}{e(x[i],x[j],r) = 1/m(x[i],d[i,j])} where \eqn{m(u,t)} is the number of locations on the network that lie exactly \eqn{t} units distant from location \eqn{u} by the shortest path. If \code{normalise=TRUE} (the default), then the estimates described above are multiplied by \eqn{c^{\mbox{normpower}}}{c^normpower} where \eqn{ c = \mbox{length}(L)/\sum (1/\lambda(x_i)). }{ c = length(L)/sum[i] (1/lambda(x[i])). } This rescaling reduces the variability and bias of the estimate in small samples and in cases of very strong inhomogeneity. The default value of \code{normpower} is 1 (for consistency with previous versions of \pkg{spatstat}) but the most sensible value is 2, which would correspond to rescaling the \code{lambda} values so that \eqn{ \sum (1/\lambda(x_i)) = \mbox{area}(W). }{ sum[i] (1/lambda(x[i])) = area(W). } } \value{ Function value table (object of class \code{"fv"}). } \author{ Ang Qi Wei \email{aqw07398@hotmail.com} and \adrian } \references{ Ang, Q.W. (2010) Statistical methodology for spatial point patterns on a linear network. MSc thesis, University of Western Australia. Ang, Q.W., Baddeley, A. and Nair, G. (2012) Geometrically corrected second-order analysis of events on a linear network, with applications to ecology and criminology. \emph{Scandinavian Journal of Statistics} \bold{39}, 591--617. } \seealso{ \code{\link{lpp}} } \examples{ data(simplenet) X <- rpoislpp(5, simplenet) fit <- lppm(X ~x) K <- linearKinhom(X, lambda=fit) plot(K) } \keyword{spatial} \keyword{nonparametric} spatstat/man/cdf.test.Rd0000644000176200001440000002624513160710571014703 0ustar liggesusers\name{cdf.test} \alias{cdf.test} \alias{cdf.test.ppm} \alias{cdf.test.lppm} \alias{cdf.test.lpp} \alias{cdf.test.ppp} \alias{cdf.test.slrm} \title{Spatial Distribution Test for Point Pattern or Point Process Model} \description{ Performs a test of goodness-of-fit of a point process model. The observed and predicted distributions of the values of a spatial covariate are compared using either the Kolmogorov-Smirnov test, \ifelse{latex}{\out{Cram\'er}}{Cramer}-von Mises test or Anderson-Darling test. For non-Poisson models, a Monte Carlo test is used. } \usage{ cdf.test(...) \method{cdf.test}{ppp}(X, covariate, test=c("ks", "cvm", "ad"), \dots, interpolate=TRUE, jitter=TRUE) \method{cdf.test}{ppm}(model, covariate, test=c("ks", "cvm", "ad"), \dots, interpolate=TRUE, jitter=TRUE, nsim=99, verbose=TRUE) \method{cdf.test}{lpp}(X, covariate, test=c("ks", "cvm", "ad"), \dots, interpolate=TRUE, jitter=TRUE) \method{cdf.test}{lppm}(model, covariate, test=c("ks", "cvm", "ad"), \dots, interpolate=TRUE, jitter=TRUE, nsim=99, verbose=TRUE) \method{cdf.test}{slrm}(model, covariate, test=c("ks", "cvm", "ad"), \dots, modelname=NULL, covname=NULL) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"} or \code{"lpp"}). } \item{model}{ A fitted point process model (object of class \code{"ppm"} or \code{"lppm"}) or fitted spatial logistic regression (object of class \code{"slrm"}). } \item{covariate}{ The spatial covariate on which the test will be based. A function, a pixel image (object of class \code{"im"}), a list of pixel images, or one of the characters \code{"x"} or \code{"y"} indicating the Cartesian coordinates. } \item{test}{ Character string identifying the test to be performed: \code{"ks"} for Kolmogorov-Smirnov test, \code{"cvm"} for \ifelse{latex}{\out{Cram\'er}}{Cramer}-von Mises test or \code{"ad"} for Anderson-Darling test. } \item{\dots}{ Arguments passed to \code{\link[stats]{ks.test}} (from the \pkg{stats} package) or \code{\link[goftest]{cvm.test}} or \code{\link[goftest]{ad.test}} (from the \pkg{goftest} package) to control the test. } \item{interpolate}{ Logical flag indicating whether to interpolate pixel images. If \code{interpolate=TRUE}, the value of the covariate at each point of \code{X} will be approximated by interpolating the nearby pixel values. If \code{interpolate=FALSE}, the nearest pixel value will be used. } \item{jitter}{ Logical flag. If \code{jitter=TRUE}, values of the covariate will be slightly perturbed at random, to avoid tied values in the test. } \item{modelname,covname}{ Character strings giving alternative names for \code{model} and \code{covariate} to be used in labelling plot axes. } \item{nsim}{ Number of simulated realisations from the \code{model} to be used for the Monte Carlo test, when \code{model} is not a Poisson process. } \item{verbose}{ Logical value indicating whether to print progress reports when performing a Monte Carlo test. } } \details{ These functions perform a goodness-of-fit test of a Poisson or Gibbs point process model fitted to point pattern data. The observed distribution of the values of a spatial covariate at the data points, and the predicted distribution of the same values under the model, are compared using the Kolmogorov-Smirnov test, the \ifelse{latex}{\out{Cram\'er}}{Cramer}-von Mises test or the Anderson-Darling test. For Gibbs models, a Monte Carlo test is performed using these test statistics. The function \code{cdf.test} is generic, with methods for point patterns (\code{"ppp"} or \code{"lpp"}), point process models (\code{"ppm"} or \code{"lppm"}) and spatial logistic regression models (\code{"slrm"}). \itemize{ \item If \code{X} is a point pattern dataset (object of class \code{"ppp"}), then \code{cdf.test(X, \dots)} performs a goodness-of-fit test of the uniform Poisson point process (Complete Spatial Randomness, CSR) for this dataset. For a multitype point pattern, the uniform intensity is assumed to depend on the type of point (sometimes called Complete Spatial Randomness and Independence, CSRI). \item If \code{model} is a fitted point process model (object of class \code{"ppm"} or \code{"lppm"}) then \code{cdf.test(model, \dots)} performs a test of goodness-of-fit for this fitted model. \item If \code{model} is a fitted spatial logistic regression (object of class \code{"slrm"}) then \code{cdf.test(model, \dots)} performs a test of goodness-of-fit for this fitted model. } The test is performed by comparing the observed distribution of the values of a spatial covariate at the data points, and the predicted distribution of the same covariate under the model, using a classical goodness-of-fit test. Thus, you must nominate a spatial covariate for this test. If \code{X} is a point pattern that does not have marks, the argument \code{covariate} should be either a \code{function(x,y)} or a pixel image (object of class \code{"im"} containing the values of a spatial function, or one of the characters \code{"x"} or \code{"y"} indicating the Cartesian coordinates. If \code{covariate} is an image, it should have numeric values, and its domain should cover the observation window of the \code{model}. If \code{covariate} is a function, it should expect two arguments \code{x} and \code{y} which are vectors of coordinates, and it should return a numeric vector of the same length as \code{x} and \code{y}. If \code{X} is a multitype point pattern, the argument \code{covariate} can be either a \code{function(x,y,marks)}, or a pixel image, or a list of pixel images corresponding to each possible mark value, or one of the characters \code{"x"} or \code{"y"} indicating the Cartesian coordinates. First the original data point pattern is extracted from \code{model}. The values of the \code{covariate} at these data points are collected. The predicted distribution of the values of the \code{covariate} under the fitted \code{model} is computed as follows. The values of the \code{covariate} at all locations in the observation window are evaluated, weighted according to the point process intensity of the fitted model, and compiled into a cumulative distribution function \eqn{F} using \code{\link{ewcdf}}. The probability integral transformation is then applied: the values of the \code{covariate} at the original data points are transformed by the predicted cumulative distribution function \eqn{F} into numbers between 0 and 1. If the model is correct, these numbers are i.i.d. uniform random numbers. The A goodness-of-fit test of the uniform distribution is applied to these numbers using \code{stats::\link[stats]{ks.test}}, \code{goftest::\link[goftest]{cvm.test}} or \code{goftest::\link[goftest]{ad.test}}. This test was apparently first described (in the context of spatial data, and using Kolmogorov-Smirnov) by Berman (1986). See also Baddeley et al (2005). If \code{model} is not a Poisson process, then a Monte Carlo test is performed, by generating \code{nsim} point patterns which are simulated realisations of the \code{model}, re-fitting the model to each simulated point pattern, and calculating the test statistic for each fitted model. The Monte Carlo \eqn{p} value is determined by comparing the simulated values of the test statistic with the value for the original data. The return value is an object of class \code{"htest"} containing the results of the hypothesis test. The print method for this class gives an informative summary of the test outcome. The return value also belongs to the class \code{"cdftest"} for which there is a plot method \code{\link{plot.cdftest}}. The plot method displays the empirical cumulative distribution function of the covariate at the data points, and the predicted cumulative distribution function of the covariate under the model, plotted against the value of the covariate. The argument \code{jitter} controls whether covariate values are randomly perturbed, in order to avoid ties. If the original data contains any ties in the covariate (i.e. points with equal values of the covariate), and if \code{jitter=FALSE}, then the Kolmogorov-Smirnov test implemented in \code{\link[stats]{ks.test}} will issue a warning that it cannot calculate the exact \eqn{p}-value. To avoid this, if \code{jitter=TRUE} each value of the covariate will be perturbed by adding a small random value. The perturbations are normally distributed with standard deviation equal to one hundredth of the range of values of the covariate. This prevents ties, and the \eqn{p}-value is still correct. There is a very slight loss of power. } \value{ An object of class \code{"htest"} containing the results of the test. See \code{\link[stats]{ks.test}} for details. The return value can be printed to give an informative summary of the test. The value also belongs to the class \code{"cdftest"} for which there is a plot method. } \section{Warning}{ The outcome of the test involves a small amount of random variability, because (by default) the coordinates are randomly perturbed to avoid tied values. Hence, if \code{cdf.test} is executed twice, the \eqn{p}-values will not be exactly the same. To avoid this behaviour, set \code{jitter=FALSE}. } \author{\adrian and \rolf } \seealso{ \code{\link{plot.cdftest}}, \code{\link{quadrat.test}}, \code{\link{berman.test}}, \code{\link[stats]{ks.test}}, \code{\link[goftest]{cvm.test}}, \code{\link[goftest]{ad.test}}, \code{\link{ppm}} } \references{ Baddeley, A., Turner, R., \ifelse{latex}{\out{M\o ller}}{Moller}, J. and Hazelton, M. (2005) Residual analysis for spatial point processes. \emph{Journal of the Royal Statistical Society, Series B} \bold{67}, 617--666. Berman, M. (1986) Testing for spatial association between a point process and another stochastic process. \emph{Applied Statistics} \bold{35}, 54--62. } \examples{ op <- options(useFancyQuotes=FALSE) # test of CSR using x coordinate cdf.test(nztrees, "x") cdf.test(nztrees, "x", "cvm") cdf.test(nztrees, "x", "ad") # test of CSR using a function of x and y fun <- function(x,y){2* x + y} cdf.test(nztrees, fun) # test of CSR using an image covariate funimage <- as.im(fun, W=Window(nztrees)) cdf.test(nztrees, funimage) # fit inhomogeneous Poisson model and test model <- ppm(nztrees ~x) cdf.test(model, "x") if(interactive()) { # synthetic data: nonuniform Poisson process X <- rpoispp(function(x,y) { 100 * exp(x) }, win=square(1)) # fit uniform Poisson process fit0 <- ppm(X ~1) # fit correct nonuniform Poisson process fit1 <- ppm(X ~x) # test wrong model cdf.test(fit0, "x") # test right model cdf.test(fit1, "x") } # multitype point pattern cdf.test(amacrine, "x") yimage <- as.im(function(x,y){y}, W=Window(amacrine)) cdf.test(ppm(amacrine ~marks+y), yimage) options(op) } \keyword{htest} \keyword{spatial} spatstat/man/complement.owin.Rd0000644000176200001440000000400313160710571016273 0ustar liggesusers\name{complement.owin} \alias{complement.owin} \title{Take Complement of a Window} \description{ Take the set complement of a window, within its enclosing rectangle or in a larger rectangle. } \usage{ complement.owin(w, frame=as.rectangle(w)) } \arguments{ \item{w}{ an object of class \code{"owin"} describing a window of observation for a point pattern. } \item{frame}{ Optional. The enclosing rectangle, with respect to which the set complement is taken. } } \value{ Another object of class \code{"owin"} representing the complement of the window, i.e. the inside of the window becomes the outside. } \details{ This yields a window object (of class \code{"owin"}, see \code{\link{owin.object}}) representing the set complement of \code{w} with respect to the rectangle \code{frame}. By default, \code{frame} is the enclosing box of \code{w} (originally specified by the arguments \code{xrange} and \code{yrange} given to \code{\link{owin}} when \code{w} was created). If \code{frame} is specified, it must be a rectangle (an object of class \code{"owin"} whose type is \code{"rectangle"}) and it must be larger than the enclosing box of \code{w}. This rectangle becomes the enclosing box for the resulting window. If \code{w} is a rectangle, then \code{frame} must be specified. Otherwise an error will occur (since the complement of \code{w} in itself is empty). For rectangular and polygonal windows, the complement is computed by reversing the sign of each boundary polygon, while for binary masks it is computed by negating the pixel values. } \seealso{ \code{\link{owin}}, \code{\link{owin.object}} } \examples{ # rectangular a <- owin(c(0,1),c(0,1)) b <- owin(c(-1,2),c(-1,2)) bmina <- complement.owin(a, frame=b) # polygonal data(demopat) w <- Window(demopat) outside <- complement.owin(w) # mask w <- as.mask(Window(demopat)) outside <- complement.owin(w) } \author{\adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/plot.pp3.Rd0000644000176200001440000000516413160710621014641 0ustar liggesusers\name{plot.pp3} \Rdversion{1.1} \alias{plot.pp3} \title{ Plot a Three-Dimensional Point Pattern } \description{ Plots a three-dimensional point pattern. } \usage{ \method{plot}{pp3}(x, ..., eye=NULL, org=NULL, theta=25, phi=15, type=c("p", "n", "h"), box.back=list(col="pink"), box.front=list(col="blue", lwd=2)) } \arguments{ \item{x}{ Three-dimensional point pattern (object of class \code{"pp3"}). } \item{\dots}{ Arguments passed to \code{\link[graphics]{points}} controlling the appearance of the points. } \item{eye}{ Optional. Eye position. A numeric vector of length 3 giving the location from which the scene is viewed. } \item{org}{ Optional. Origin (centre) of the view. A numeric vector of length 3 which will be at the centre of the view. } \item{theta,phi}{ Optional angular coordinates (in degrees) specifying the direction from which the scene is viewed: \code{theta} is the azimuth and \code{phi} is the colatitude. Ignored if \code{eye} is given. } \item{type}{ Type of plot: \code{type="p"} for points, \code{type="h"} for points on vertical lines, \code{type="n"} for box only. } \item{box.front,box.back}{ How to plot the three-dimensional box that contains the points. A list of graphical arguments passed to \code{\link[graphics]{segments}}, or a logical value indicating whether or not to plot the relevant part of the box. See Details. } } \details{ This is the plot method for objects of class \code{"pp3"}. It generates a two-dimensional plot of the point pattern \code{x} and its containing box as if they had been viewed from the location specified by \code{eye} (or from the direction specified by \code{theta} and \code{phi}). The edges of the box at the \sQuote{back} of the scene (as viewed from the eye position) are plotted first. Then the points are added. Finally the remaining \sQuote{front} edges are plotted. The arguments \code{box.back} and \code{box.front} specify graphical parameters for drawing the back and front edges, respectively. Alternatively \code{box.back=FALSE} specifies that the back edges shall not be drawn. Note that default values of arguments to \code{plot.pp3} can be set by \code{\link{spatstat.options}("par.pp3")}. } \value{Null.} \author{ \spatstatAuthors. } \seealso{ \code{\link{pp3}}, \code{\link{spatstat.options}}. } \examples{ X <- osteo$pts[[1]] plot(X, main="Osteocyte lacunae, animal 1, brick 1", cex=1.5, pch=16) plot(X, main="", box.back=list(lty=3)) } \keyword{spatial} \keyword{hplot} spatstat/man/trim.rectangle.Rd0000644000176200001440000000252313160710621016074 0ustar liggesusers\name{trim.rectangle} \alias{trim.rectangle} \title{Cut margins from rectangle} \description{ Trims a margin from a rectangle. } \usage{ trim.rectangle(W, xmargin=0, ymargin=xmargin) } \arguments{ \item{W}{ A window (object of class \code{"owin"}). Must be of type \code{"rectangle"}. } \item{xmargin}{Width of horizontal margin to be trimmed. A single nonnegative number, or a vector of length 2 indicating margins of unequal width at left and right. } \item{ymargin}{Height of vertical margin to be trimmed. A single nonnegative number, or a vector of length 2 indicating margins of unequal width at bottom and top. } } \value{ Another object of class \code{"owin"} representing the window after margins are trimmed. } \details{ This is a simple convenience function to trim off a margin of specified width and height from each side of a rectangular window. Unequal margins can also be trimmed. } \seealso{ \code{\link{grow.rectangle}}, \code{\link{erosion}}, \code{\link{owin.object}} } \examples{ w <- square(10) # trim a margin of width 1 from all four sides square9 <- trim.rectangle(w, 1) # trim margin of width 3 from the right side # and margin of height 4 from top edge. v <- trim.rectangle(w, c(0,3), c(0,4)) } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/quadrat.test.mppm.Rd0000644000176200001440000000752413160710621016553 0ustar liggesusers\name{quadrat.test.mppm} \alias{quadrat.test.mppm} \title{Chi-Squared Test for Multiple Point Process Model Based on Quadrat Counts} \description{ Performs a chi-squared goodness-of-fit test of a Poisson point process model fitted to multiple point patterns. } \usage{ \method{quadrat.test}{mppm}(X, ...) } \arguments{ \item{X}{ An object of class \code{"mppm"} representing a point process model fitted to multiple point patterns. It should be a Poisson model. } \item{\dots}{ Arguments passed to \code{\link{quadrat.test.ppm}} which determine the size of the quadrats. } } \details{ This function performs a \eqn{\chi^2}{chi^2} test of goodness-of-fit for a Poisson point process model, based on quadrat counts. It can also be used to perform a test of Complete Spatial Randomness for a list of point patterns. The function \code{quadrat.test} is generic, with methods for point patterns (class \code{"ppp"}), point process models (class \code{"ppm"}) and multiple point process models (class \code{"mppm"}). For this function, the argument \code{X} should be a multiple point process model (object of class \code{"mppm"}) obtained by fitting a point process model to a list of point patterns using the function \code{\link{mppm}}. To perform the test, the data point patterns are extracted from \code{X}. For each point pattern \itemize{ \item the window of observation is divided into rectangular tiles, and the number of data points in each tile is counted, as described in \code{\link{quadratcount}}. \item The expected number of points in each quadrat is calculated, as determined by the fitted model. } Then we perform a single \eqn{\chi^2}{chi^2} test of goodness-of-fit based on these observed and expected counts. } \section{Testing Complete Spatial Randomness}{ If the intention is to test Complete Spatial Randomness (CSR) there are two options: \itemize{ \item CSR with the same intensity of points in each point pattern; \item CSR with a different, unrelated intensity of points in each point pattern. } In the first case, suppose \code{P} is a list of point patterns we want to test. Then fit the multiple model \code{fit1 <- mppm(P, ~1)} which signifies a Poisson point process model with a constant intensity. Then apply \code{quadrat.test(fit1)}. In the second case, fit the model code{fit2 <- mppm(P, ~id)} which signifies a Poisson point process with a different constant intensity for each point pattern. Then apply \code{quadrat.test(fit2)}. } \value{ An object of class \code{"htest"}. Printing the object gives comprehensible output about the outcome of the test. The \eqn{p}-value of the test is stored in the component \code{p.value}. The return value also belongs to the special class \code{"quadrat.test"}. Plotting the object will display, for each window, the position of the quadrats, annotated by their observed and expected counts and the Pearson residuals. See the examples. The return value also has an attribute \code{"components"} which is a list containing the results of \eqn{\chi^2}{chi^2} tests of goodness-of-fit for each individual point pattern. } \seealso{ \code{\link{mppm}}, \code{\link{quadrat.test}} } \examples{ H <- hyperframe(X=waterstriders) # Poisson with constant intensity for all patterns fit1 <- mppm(X~1, H) quadrat.test(fit1, nx=2) # uniform Poisson with different intensity for each pattern fit2 <- mppm(X ~ id, H) quadrat.test(fit2, nx=2) } \references{ Baddeley, A., Rubak, E. and Turner, R. (2015) \emph{Spatial Point Patterns: Methodology and Applications with R}. London: Chapman and Hall/CRC Press. } \author{ \adrian, Ida-Maria Sintorn and Leanne Bischoff. Implemented by \adrian \rolf and \ege } \keyword{spatial} \keyword{htest} spatstat/man/as.fv.Rd0000644000176200001440000000613313160710571014200 0ustar liggesusers\name{as.fv} \alias{as.fv} \alias{as.fv.fv} \alias{as.fv.fasp} \alias{as.fv.data.frame} \alias{as.fv.matrix} \alias{as.fv.minconfit} \alias{as.fv.dppm} \alias{as.fv.kppm} \alias{as.fv.bw.optim} \title{Convert Data To Class fv} \description{ Converts data into a function table (an object of class \code{"fv"}). } \usage{ as.fv(x) \method{as.fv}{fv}(x) \method{as.fv}{data.frame}(x) \method{as.fv}{matrix}(x) \method{as.fv}{fasp}(x) \method{as.fv}{minconfit}(x) \method{as.fv}{dppm}(x) \method{as.fv}{kppm}(x) \method{as.fv}{bw.optim}(x) } \arguments{ \item{x}{Data which will be converted into a function table} } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). } \details{ This command converts data \code{x}, that could be interpreted as the values of a function, into a function value table (object of the class \code{"fv"} as described in \code{\link{fv.object}}). This object can then be plotted easily using \code{\link{plot.fv}}. The dataset \code{x} may be any of the following: \itemize{ \item an object of class \code{"fv"}; \item a matrix or data frame with at least two columns; \item an object of class \code{"fasp"}, representing an array of \code{"fv"} objects. \item an object of class \code{"minconfit"}, giving the results of a minimum contrast fit by the command \code{\link{mincontrast}}. The \item an object of class \code{"kppm"}, representing a fitted Cox or cluster point process model, obtained from the model-fitting command \code{\link{kppm}}; \item an object of class \code{"dppm"}, representing a fitted determinantal point process model, obtained from the model-fitting command \code{\link{dppm}}; \item an object of class \code{"bw.optim"}, representing an optimal choice of smoothing bandwidth by a cross-validation method, obtained from commands like \code{\link{bw.diggle}}. } The function \code{as.fv} is generic, with methods for each of the classes listed above. The behaviour is as follows: \itemize{ \item If \code{x} is an object of class \code{"fv"}, it is returned unchanged. \item If \code{x} is a matrix or data frame, the first column is interpreted as the function argument, and subsequent columns are interpreted as values of the function computed by different methods. \item If \code{x} is an object of class \code{"fasp"} representing an array of \code{"fv"} objects, these are combined into a single \code{"fv"} object. \item If \code{x} is an object of class \code{"minconfit"}, or an object of class \code{"kppm"} or \code{"dppm"}, the result is a function table containing the observed summary function and the best fit summary function. \item If \code{x} is an object of class \code{"bw.optim"}, the result is a function table of the optimisation criterion as a function of the smoothing bandwidth. } } \examples{ r <- seq(0, 1, length=101) x <- data.frame(r=r, y=r^2) as.fv(x) } \author{ \spatstatAuthors } \keyword{spatial} \keyword{manip} spatstat/man/varblock.Rd0000644000176200001440000001050213160710621014755 0ustar liggesusers\name{varblock} \alias{varblock} \title{ Estimate Variance of Summary Statistic by Subdivision } \description{ This command estimates the variance of any summary statistic (such as the \eqn{K}-function) by spatial subdivision of a single point pattern dataset. } \usage{ varblock(X, fun = Kest, blocks = quadrats(X, nx = nx, ny = ny), \dots, nx = 3, ny = nx, confidence=0.95) } \arguments{ \item{X}{ Point pattern dataset (object of class \code{"ppp"}). } \item{fun}{ Function that computes the summary statistic. } \item{blocks}{ Optional. A tessellation that specifies the division of the space into blocks. } \item{\dots}{ Arguments passed to \code{fun}. } \item{nx,ny}{ Optional. Number of rectangular blocks in the \eqn{x} and \eqn{y} directions. Incompatible with \code{blocks}. } \item{confidence}{ Confidence level, as a fraction between 0 and 1. } } \details{ This command computes an estimate of the variance of the summary statistic \code{fun(X)} from a single point pattern dataset \code{X} using a subdivision method. It can be used to plot \bold{confidence intervals} for the true value of a summary function such as the \eqn{K}-function. The window containing \code{X} is divided into pieces by an \code{nx * ny} array of rectangles (or is divided into pieces of more general shape, according to the argument \code{blocks} if it is present). The summary statistic \code{fun} is applied to each of the corresponding sub-patterns of \code{X} as described below. Then the pointwise sample mean, sample variance and sample standard deviation of these summary statistics are computed. Then pointwise confidence intervals are computed, for the specified level of confidence, defaulting to 95 percent. The variance is estimated by equation (4.21) of Diggle (2003, page 52). This assumes that the point pattern \code{X} is stationary. For further details see Diggle (2003, pp 52--53). The estimate of the summary statistic from each block is computed as follows. For most functions \code{fun}, the estimate from block \code{B} is computed by finding the subset of \code{X} consisting of points that fall inside \code{B}, and applying \code{fun} to these points, by calling \code{fun(X[B])}. However if \code{fun} is the \eqn{K}-function \code{\link{Kest}}, or any function which has an argument called \code{domain}, the estimate for each block \code{B} is computed by calling \code{fun(X, domain=B)}. In the case of the \eqn{K}-function this means that the estimate from block \code{B} is computed by counting pairs of points in which the \emph{first} point lies in \code{B}, while the second point may lie anywhere. } \section{Errors}{ If the blocks are too small, there may be insufficient data in some blocks, and the function \code{fun} may report an error. If this happens, you need to take larger blocks. An error message about incompatibility may occur. The different function estimates may be incompatible in some cases, for example, because they use different default edge corrections (typically because the tiles of the tessellation are not the same kind of geometric object as the window of \code{X}, or because the default edge correction depends on the number of points). To prevent this, specify the choice of edge correction, in the \code{correction} argument to \code{fun}, if it has one. An alternative to \code{varblock} is Loh's mark bootstrap \code{\link{lohboot}}. } \value{ A function value table (object of class \code{"fv"}) that contains the result of \code{fun(X)} as well as the sample mean, sample variance and sample standard deviation of the block estimates, together with the upper and lower two-standard-deviation confidence limits. } \references{ Diggle, P.J. (2003) \emph{Statistical analysis of spatial point patterns}, Second edition. Arnold. } \author{ \adrian and \rolf } \seealso{ \code{\link{tess}}, \code{\link{quadrats}} for basic manipulation. \code{\link{lohboot}} for an alternative bootstrap technique. } \examples{ v <- varblock(amacrine, Kest, nx=4, ny=2) v <- varblock(amacrine, Kcross, nx=4, ny=2) if(interactive()) plot(v, iso ~ r, shade=c("hiiso", "loiso")) } \keyword{nonparametric} \keyword{spatial} spatstat/man/reflect.Rd0000644000176200001440000000223413160710621014601 0ustar liggesusers\name{reflect} \alias{reflect} \alias{reflect.im} \alias{reflect.default} \title{Reflect In Origin} \description{ Reflects a geometrical object through the origin. } \usage{ reflect(X) \method{reflect}{im}(X) \method{reflect}{default}(X) } \arguments{ \item{X}{Any suitable dataset representing a two-dimensional object, such as a point pattern (object of class \code{"ppp"}), or a window (object of class \code{"owin"}).} } \value{ Another object of the same type, representing the result of reflection. } \details{ The object \code{X} is reflected through the origin. That is, each point in \code{X} with coordinates \eqn{(x,y)} is mapped to the position \eqn{(-x, -y)}. This is equivalent to applying the affine transformation with matrix \code{diag(c(-1,-1))}. It is also equivalent to rotation about the origin by 180 degrees. The command \code{reflect} is generic, with a method for pixel images and a default method. } \seealso{ \code{\link{affine}}, \code{\link{flipxy}} } \examples{ plot(reflect(as.im(letterR))) plot(reflect(letterR), add=TRUE) } \author{\adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/shift.ppp.Rd0000644000176200001440000000361113160710621015070 0ustar liggesusers\name{shift.ppp} \alias{shift.ppp} \title{Apply Vector Translation To Point Pattern} \description{ Applies a vector shift to a point pattern. } \usage{ \method{shift}{ppp}(X, vec=c(0,0), \dots, origin=NULL) } \arguments{ \item{X}{Point pattern (object of class \code{"ppp"}).} \item{vec}{Vector of length 2 representing a translation.} \item{\dots}{Ignored} \item{origin}{Character string determining a location that will be shifted to the origin. Options are \code{"centroid"}, \code{"midpoint"} and \code{"bottomleft"}. Partially matched. } } \value{ Another point pattern (of class \code{"ppp"}) representing the result of applying the vector shift. } \details{ The point pattern, and its window, are translated by the vector \code{vec}. This is a method for the generic function \code{\link{shift}}. If \code{origin} is given, then it should be one of the character strings \code{"centroid"}, \code{"midpoint"} or \code{"bottomleft"}. The argument \code{vec} will be ignored; instead the shift will be performed so that the specified geometric location is shifted to the origin. If \code{origin="centroid"} then the centroid of the window will be shifted to the origin. If \code{origin="midpoint"} then the centre of the bounding rectangle of the window will be shifted to the origin. If \code{origin="bottomleft"} then the bottom left corner of the bounding rectangle of the window will be shifted to the origin. } \seealso{ \code{\link{shift}}, \code{\link{shift.owin}}, \code{\link{periodify}}, \code{\link{rotate}}, \code{\link{affine}} } \examples{ data(cells) X <- shift(cells, c(2,3)) \dontrun{ plot(X) # no discernible difference except coordinates are different } plot(cells, pch=16) plot(shift(cells, c(0.03,0.03)), add=TRUE) shift(cells, origin="mid") } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/exactMPLEstrauss.Rd0000644000176200001440000001025513160710621016366 0ustar liggesusers\name{exactMPLEstrauss} \alias{exactMPLEstrauss} \title{ Exact Maximum Pseudolikelihood Estimate for Stationary Strauss Process } \description{ Computes, to very high accuracy, the Maximum Pseudolikelihood Estimates of the parameters of a stationary Strauss point process. } \usage{ exactMPLEstrauss(X, R, ngrid = 2048, plotit = FALSE, project=TRUE) } \arguments{ \item{X}{ Data to which the Strauss process will be fitted. A point pattern dataset (object of class \code{"ppp"}). } \item{R}{ Interaction radius of the Strauss process. A non-negative number. } \item{ngrid}{ Grid size for calculation of integrals. An integer, giving the number of grid points in the \eqn{x} and \eqn{y} directions. } \item{plotit}{ Logical. If \code{TRUE}, the log pseudolikelihood is plotted on the current device. } \item{project}{ Logical. If \code{TRUE} (the default), the parameter \eqn{\gamma}{gamma} is constrained to lie in the interval \eqn{[0,1]}. If \code{FALSE}, this constraint is not applied. } } \details{ This function is intended mainly for technical investigation of algorithm performance. Its practical use is quite limited. It fits the stationary Strauss point process model to the point pattern dataset \code{X} by maximum pseudolikelihood (with the border edge correction) using an algorithm with very high accuracy. This algorithm is more accurate than the \emph{default} behaviour of the model-fitting function \code{\link{ppm}} because the discretisation is much finer. Ripley (1988) and Baddeley and Turner (2000) derived the log pseudolikelihood for the stationary Strauss process, and eliminated the parameter \eqn{\beta}{beta}, obtaining an exact formula for the partial log pseudolikelihood as a function of the interaction parameter \eqn{\gamma}{gamma} only. The algorithm evaluates this expression to a high degree of accuracy, using numerical integration on a \code{ngrid * ngrid} lattice, uses \code{\link[stats]{optim}} to maximise the log pseudolikelihood with respect to \eqn{\gamma}{gamma}, and finally recovers \eqn{\beta}{beta}. The result is a vector of length 2, containing the fitted coefficients \eqn{\log\beta}{log(beta)} and \eqn{\log\gamma}{log(gamma)}. These values correspond to the entries that would be obtained with \code{coef(ppm(X, ~1, Strauss(R)))}. The fitted coefficients are typically accurate to within \eqn{10^{-6}}{10^(-6)} as shown in Baddeley and Turner (2013). Note however that (by default) \code{exactMPLEstrauss} constrains the parameter \eqn{\gamma}{gamma} to lie in the interval \eqn{[0,1]} in which the point process is well defined (Kelly and Ripley, 1976) whereas \code{\link{ppm}} does not constrain the value of \eqn{\gamma}{gamma} (by default). This behaviour is controlled by the argument \code{project} to \code{\link{ppm}} and \code{exactMPLEstrauss}. The default for \code{\link{ppm}} is \code{project=FALSE}, while the default for \code{exactMPLEstrauss} is \code{project=TRUE}. } \value{ Vector of length 2. } \references{ Baddeley, A. and Turner, R. (2000) Practical maximum pseudolikelihood for spatial point patterns. \emph{Australian and New Zealand Journal of Statistics} \bold{42}, 283--322. Baddeley, A. and Turner, R. (2013) Bias correction for parameter estimates of spatial point process models. \emph{Journal of Statistical Computation and Simulation} \bold{2012}. {doi: 10.1080/00949655.2012.755976} Kelly, F.P. and Ripley, B.D. (1976) On Strauss's model for clustering. \emph{Biometrika} \bold{63}, 357--360. Ripley, B.D. (1988) \emph{Statistical inference for spatial processes}. Cambridge University Press. } \author{ \adrian and \rolf } \seealso{ \code{\link{ppm}} } \examples{ \testonly{ exactMPLEstrauss(cells, 0.1, ngrid=128) exactMPLEstrauss(cells, 0.1, ngrid=128, project=FALSE) } if(interactive()) { exactMPLEstrauss(cells, 0.1) coef(ppm(cells, ~1, Strauss(0.1))) coef(ppm(cells, ~1, Strauss(0.1), nd=128)) exactMPLEstrauss(redwood, 0.04) exactMPLEstrauss(redwood, 0.04, project=FALSE) coef(ppm(redwood, ~1, Strauss(0.04))) } } \keyword{spatial} \keyword{models} spatstat/man/dppm.Rd0000644000176200001440000002654213160710571014131 0ustar liggesusers\name{dppm} \alias{dppm} \concept{point process model} \concept{determinantal point process} \title{Fit Determinantal Point Process Model} \description{ Fit a determinantal point process model to a point pattern. } \usage{ dppm(formula, family, data=NULL, ..., startpar = NULL, method = c("mincon", "clik2", "palm"), weightfun=NULL, control=list(), algorithm="Nelder-Mead", statistic="K", statargs=list(), rmax = NULL, covfunargs=NULL, use.gam=FALSE, nd=NULL, eps=NULL) } \arguments{ \item{formula}{ A \code{formula} in the \R language specifying the data (on the left side) and the form of the model to be fitted (on the right side). For a stationary model it suffices to provide a point pattern without a formula. See Details. } \item{family}{ Information specifying the family of point processes to be used in the model. Typically one of the family functions \code{\link{dppGauss}}, \code{\link{dppMatern}}, \code{\link{dppCauchy}}, \code{\link{dppBessel}} or \code{\link{dppPowerExp}}. Alternatively a character string giving the name of a family function, or the result of calling one of the family functions. See Details. } \item{data}{ The values of spatial covariates (other than the Cartesian coordinates) required by the model. A named list of pixel images, functions, windows, tessellations or numeric constants. } \item{\dots}{ Additional arguments. See Details. } \item{startpar}{ Named vector of starting parameter values for the optimization. } \item{method}{ The fitting method. Either \code{"mincon"} for minimum contrast, \code{"clik2"} for second order composite likelihood, or \code{"palm"} for Palm likelihood. Partially matched. } \item{weightfun}{ Optional weighting function \eqn{w} in the composite likelihood or Palm likelihood. A \code{function} in the \R language. See Details. } \item{control}{ List of control parameters passed to the optimization function \code{\link[stats]{optim}}. } \item{algorithm}{ Character string determining the mathematical optimisation algorithm to be used by \code{\link[stats]{optim}}. See the argument \code{method} of \code{\link[stats]{optim}}. } \item{statistic}{ Name of the summary statistic to be used for minimum contrast estimation: either \code{"K"} or \code{"pcf"}. } \item{statargs}{ Optional list of arguments to be used when calculating the \code{statistic}. See Details. } \item{rmax}{ Maximum value of interpoint distance to use in the composite likelihood. } \item{covfunargs,use.gam,nd,eps}{ Arguments passed to \code{\link{ppm}} when fitting the intensity. } } \details{ This function fits a determinantal point process model to a point pattern dataset as described in Lavancier et al. (2015). The model to be fitted is specified by the arguments \code{formula} and \code{family}. The argument \code{formula} should normally be a \code{formula} in the \R language. The left hand side of the formula specifies the point pattern dataset to which the model should be fitted. This should be a single argument which may be a point pattern (object of class \code{"ppp"}) or a quadrature scheme (object of class \code{"quad"}). The right hand side of the formula is called the \code{trend} and specifies the form of the \emph{logarithm of the intensity} of the process. Alternatively the argument \code{formula} may be a point pattern or quadrature scheme, and the trend formula is taken to be \code{~1}. The argument \code{family} specifies the family of point processes to be used in the model. It is typically one of the family functions \code{\link{dppGauss}}, \code{\link{dppMatern}}, \code{\link{dppCauchy}}, \code{\link{dppBessel}} or \code{\link{dppPowerExp}}. Alternatively it may be a character string giving the name of a family function, or the result of calling one of the family functions. A family function belongs to class \code{"detpointprocfamilyfun"}. The result of calling a family function is a point process family, which belongs to class \code{"detpointprocfamily"}. The algorithm first estimates the intensity function of the point process using \code{\link{ppm}}. If the trend formula is \code{~1} (the default if a point pattern or quadrature scheme is given rather than a \code{"formula"}) then the model is \emph{homogeneous}. The algorithm begins by estimating the intensity as the number of points divided by the area of the window. Otherwise, the model is \emph{inhomogeneous}. The algorithm begins by fitting a Poisson process with log intensity of the form specified by the formula \code{trend}. (See \code{\link{ppm}} for further explanation). The interaction parameters of the model are then fitted either by minimum contrast estimation, or by maximum composite likelihood. \describe{ \item{Minimum contrast:}{ If \code{method = "mincon"} (the default) interaction parameters of the model will be fitted by minimum contrast estimation, that is, by matching the theoretical \eqn{K}-function of the model to the empirical \eqn{K}-function of the data, as explained in \code{\link{mincontrast}}. For a homogeneous model (\code{ trend = ~1 }) the empirical \eqn{K}-function of the data is computed using \code{\link{Kest}}, and the interaction parameters of the model are estimated by the method of minimum contrast. For an inhomogeneous model, the inhomogeneous \eqn{K} function is estimated by \code{\link{Kinhom}} using the fitted intensity. Then the interaction parameters of the model are estimated by the method of minimum contrast using the inhomogeneous \eqn{K} function. This two-step estimation procedure is heavily inspired by Waagepetersen (2007). If \code{statistic="pcf"} then instead of using the \eqn{K}-function, the algorithm will use the pair correlation function \code{\link{pcf}} for homogeneous models and the inhomogeneous pair correlation function \code{\link{pcfinhom}} for inhomogeneous models. In this case, the smoothing parameters of the pair correlation can be controlled using the argument \code{statargs}, as shown in the Examples. Additional arguments \code{\dots} will be passed to \code{\link{mincontrast}} to control the minimum contrast fitting algorithm. } \item{Composite likelihood:}{ If \code{method = "clik2"} the interaction parameters of the model will be fitted by maximising the second-order composite likelihood (Guan, 2006). The log composite likelihood is \deqn{ \sum_{i,j} w(d_{ij}) \log\rho(d_{ij}; \theta) - \left( \sum_{i,j} w(d_{ij}) \right) \log \int_D \int_D w(\|u-v\|) \rho(\|u-v\|; \theta)\, du\, dv }{ \sum[i,j] w(d[i,j]) log(\rho(d[i,j]; \theta)) - (\sum[i,j] w(d[i,j])) log(integral[D,D] w(||u-v||) \rho(||u-v||; \theta) du dv) } where the sums are taken over all pairs of data points \eqn{x_i, x_j}{x[i], x[j]} separated by a distance \eqn{d_{ij} = \| x_i - x_j\|}{d[i,j] = ||x[i] - x[j]||} less than \code{rmax}, and the double integral is taken over all pairs of locations \eqn{u,v} in the spatial window of the data. Here \eqn{\rho(d;\theta)}{\rho(d;\theta)} is the pair correlation function of the model with cluster parameters \eqn{\theta}{\theta}. The function \eqn{w} in the composite likelihood is a weighting function and may be chosen arbitrarily. It is specified by the argument \code{weightfun}. If this is missing or \code{NULL} then the default is a threshold weight function, \eqn{w(d) = 1(d \le R)}{w(d) = 1(d \le R)}, where \eqn{R} is \code{rmax/2}. } \item{Palm likelihood:}{ If \code{method = "palm"} the interaction parameters of the model will be fitted by maximising the Palm loglikelihood (Tanaka et al, 2008) \deqn{ \sum_{i,j} w(x_i, x_j) \log \lambda_P(x_j \mid x_i; \theta) - \int_D w(x_i, u) \lambda_P(u \mid x_i; \theta) {\rm d} u }{ \sum[i,j] w(x[i], x[j]) log(\lambda[P](x[j] | x[i]; \theta) - integral[D] w(x[i], u) \lambda[P](u | x[i]; \theta) du } with the same notation as above. Here \eqn{\lambda_P(u|v;\theta}{\lambda[P](u|v;\theta)} is the Palm intensity of the model at location \eqn{u} given there is a point at \eqn{v}. } } In all three methods, the optimisation is performed by the generic optimisation algorithm \code{\link[stats]{optim}}. The behaviour of this algorithm can be modified using the argument \code{control}. Useful control arguments include \code{trace}, \code{maxit} and \code{abstol} (documented in the help for \code{\link[stats]{optim}}). Finally, it is also possible to fix any parameters desired before the optimisation by specifying them as \code{name=value} in the call to the family function. See Examples. } \value{ An object of class \code{"dppm"} representing the fitted model. There are methods for printing, plotting, predicting and simulating objects of this class. } \seealso{ methods for \code{dppm} objects: \code{\link{plot.dppm}}, \code{\link{fitted.dppm}}, \code{\link{predict.dppm}}, \code{\link{simulate.dppm}}, \code{\link{methods.dppm}}, \code{\link{as.ppm.dppm}}, \code{\link{Kmodel.dppm}}, \code{\link{pcfmodel.dppm}}. Minimum contrast fitting algorithm: \code{\link{mincontrast}}. Deterimantal point process models: \code{\link{dppGauss}}, \code{\link{dppMatern}}, \code{\link{dppCauchy}}, \code{\link{dppBessel}}, \code{\link{dppPowerExp}}, Summary statistics: \code{\link{Kest}}, \code{\link{Kinhom}}, \code{\link{pcf}}, \code{\link{pcfinhom}}. See also \code{\link{ppm}} } \references{ Lavancier, F. \ifelse{latex}{\out{M\o ller}}{Moller}, J. and Rubak, E. (2015) Determinantal point process models and statistical inference \emph{Journal of the Royal Statistical Society, Series B} \bold{77}, 853--977. Guan, Y. (2006) A composite likelihood approach in fitting spatial point process models. \emph{Journal of the American Statistical Association} \bold{101}, 1502--1512. Tanaka, U. and Ogata, Y. and Stoyan, D. (2008) Parameter estimation and model selection for Neyman-Scott point processes. \emph{Biometrical Journal} \bold{50}, 43--57. Waagepetersen, R. (2007) An estimating function approach to inference for inhomogeneous Neyman-Scott processes. \emph{Biometrics} \bold{63}, 252--258. } \examples{ jpines <- residualspaper$Fig1 \testonly{ # smaller dataset for testing jpines <- jpines[c(TRUE,FALSE)] } dppm(jpines ~ 1, dppGauss) dppm(jpines ~ 1, dppGauss, method="c") dppm(jpines ~ 1, dppGauss, method="p") # Fixing the intensity to lambda=2 rather than the Poisson MLE 2.04: dppm(jpines ~ 1, dppGauss(lambda=2)) if(interactive()) { # The following is quite slow (using K-function) dppm(jpines ~ x, dppMatern) } # much faster using pair correlation function dppm(jpines ~ x, dppMatern, statistic="pcf", statargs=list(stoyan=0.2)) # Fixing the Matern shape parameter to nu=2 rather than estimating it: dppm(jpines ~ x, dppMatern(nu=2)) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{models} spatstat/man/delaunayDistance.Rd0000644000176200001440000000254213160710571016440 0ustar liggesusers\name{delaunayDistance} \alias{delaunayDistance} \title{Distance on Delaunay Triangulation} \description{ Computes the graph distance in the Delaunay triangulation of a point pattern. } \usage{ delaunayDistance(X) } \arguments{ \item{X}{Spatial point pattern (object of class \code{"ppp"}).} } \details{ The Delaunay triangulation of a spatial point pattern \code{X} is defined as follows. First the Dirichlet/Voronoi tessellation of \code{X} computed; see \code{\link{dirichlet}}. Then two points of \code{X} are defined to be Delaunay neighbours if their Dirichlet/Voronoi tiles share a common boundary. Every pair of Delaunay neighbours is joined by a straight line. The \emph{graph distance} in the Delaunay triangulation between two points \code{X[i]} and \code{X[j]} is the minimum number of edges of the Delaunay triangulation that must be traversed to go from \code{X[i]} to \code{X[j]}. This command returns a matrix \code{D} such that \code{D[i,j]} is the graph distance between \code{X[i]} and \code{X[j]}. } \value{ A symmetric square matrix with integer entries. } \seealso{ \code{\link{delaunay}}, \code{\link{delaunayNetwork}} } \examples{ X <- runifpoint(20) M <- delaunayDistance(X) plot(delaunay(X), lty=3) text(X, labels=M[1, ], cex=2) } \author{ \adrian \rolf and \ege } \keyword{spatial} \keyword{manip} spatstat/man/GmultiInhom.Rd0000644000176200001440000000556513160710571015427 0ustar liggesusers\name{GmultiInhom} \alias{GmultiInhom} \title{ Inhomogeneous Marked G-Function } \description{ For a marked point pattern, estimate the inhomogeneous version of the multitype \eqn{G} function, effectively the cumulative distribution function of the distance from a point in subset \eqn{I} to the nearest point in subset \eqn{J}, adjusted for spatially varying intensity. } \usage{ GmultiInhom(X, I, J, lambda = NULL, lambdaI = NULL, lambdaJ = NULL, lambdamin = NULL, \dots, r = NULL, ReferenceMeasureMarkSetI = NULL, ratio = FALSE) } \arguments{ \item{X}{ A spatial point pattern (object of class \code{"ppp"}. } \item{I}{ A subset index specifying the subset of points \emph{from} which distances are measured. Any kind of subset index acceptable to \code{\link{[.ppp}}. } \item{J}{ A subset index specifying the subset of points \emph{to} which distances are measured. Any kind of subset index acceptable to \code{\link{[.ppp}}. } \item{lambda}{ Intensity estimates for each point of \code{X}. A numeric vector of length equal to \code{npoints(X)}. Incompatible with \code{lambdaI,lambdaJ}. } \item{lambdaI}{ Intensity estimates for each point of \code{X[I]}. A numeric vector of length equal to \code{npoints(X[I])}. Incompatible with \code{lambda}. } \item{lambdaJ}{ Intensity estimates for each point of \code{X[J]}. A numeric vector of length equal to \code{npoints(X[J])}. Incompatible with \code{lambda}. } \item{lambdamin}{ A lower bound for the intensity, or at least a lower bound for the values in \code{lambdaJ} or \code{lambda[J]}. } \item{\dots}{ Ignored. } \item{r}{ Vector of distance values at which the inhomogeneous \eqn{G} function should be estimated. There is a sensible default. } \item{ReferenceMeasureMarkSetI}{ Optional. The total measure of the mark set. A positive number. } \item{ratio}{ Logical value indicating whether to save ratio information. } } \details{ See Cronie and Van Lieshout (2015). } \value{ Object of class \code{"fv"} containing the estimate of the inhomogeneous multitype \eqn{G} function. } \references{ Cronie, O. and Van Lieshout, M.N.M. (2015) Summary statistics for inhomogeneous marked point processes. \emph{Annals of the Institute of Statistical Mathematics} DOI: 10.1007/s10463-015-0515-z } \author{ Ottmar Cronie and Marie-Colette van Lieshout. Rewritten for \pkg{spatstat} by \adrian. } \seealso{ \code{\link{Ginhom}}, \code{\link{Gmulti}} } \examples{ X <- amacrine I <- (marks(X) == "on") J <- (marks(X) == "off") mod <- ppm(X ~ marks * x) lam <- fitted(mod, dataonly=TRUE) lmin <- min(predict(mod)[["off"]]) * 0.9 plot(GmultiInhom(X, I, J, lambda=lam, lambdamin=lmin)) } \keyword{spatial} \keyword{nonparametric} spatstat/man/rGaussPoisson.Rd0000644000176200001440000000427613160710621016004 0ustar liggesusers\name{rGaussPoisson} \alias{rGaussPoisson} \title{Simulate Gauss-Poisson Process} \description{ Generate a random point pattern, a simulated realisation of the Gauss-Poisson Process. } \usage{ rGaussPoisson(kappa, r, p2, win = owin(c(0,1),c(0,1)), \dots, nsim=1, drop=TRUE) } \arguments{ \item{kappa}{ Intensity of the Poisson process of cluster centres. A single positive number, a function, or a pixel image. } \item{r}{ Diameter of each cluster that consists of exactly 2 points. } \item{p2}{ Probability that a cluster contains exactly 2 points. } \item{win}{ Window in which to simulate the pattern. An object of class \code{"owin"} or something acceptable to \code{\link{as.owin}}. } \item{\dots}{Ignored.} \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a point pattern. } } \value{ A point pattern (an object of class \code{"ppp"}) if \code{nsim=1}, or a list of point patterns if \code{nsim > 1}. Additionally, some intermediate results of the simulation are returned as attributes of the point pattern. See \code{\link{rNeymanScott}}. } \details{ This algorithm generates a realisation of the Gauss-Poisson point process inside the window \code{win}. The process is constructed by first generating a Poisson point process of parent points with intensity \code{kappa}. Then each parent point is either retained (with probability \code{1 - p2}) or replaced by a pair of points at a fixed distance \code{r} apart (with probability \code{p2}). In the case of clusters of 2 points, the line joining the two points has uniform random orientation. In this implementation, parent points are not restricted to lie in the window; the parent process is effectively the uniform Poisson process on the infinite plane. } \seealso{ \code{\link{rpoispp}}, \code{\link{rThomas}}, \code{\link{rMatClust}}, \code{\link{rNeymanScott}} } \examples{ pp <- rGaussPoisson(30, 0.07, 0.5) } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat/man/gridcentres.Rd0000644000176200001440000000355513160710621015475 0ustar liggesusers\name{gridcentres} \alias{gridcentres} \alias{gridcenters} \title{Rectangular grid of points} \description{ Generates a rectangular grid of points in a window } \usage{ gridcentres(window, nx, ny) } \arguments{ \item{window}{A window. An object of class \code{\link{owin}}, or data in any format acceptable to \code{\link{as.owin}()}. } \item{nx}{Number of points in each row of the rectangular grid. } \item{ny}{Number of points in each column of the rectangular grid. } } \value{ A list with two components \code{x} and \code{y}, which are numeric vectors giving the coordinates of the points of the rectangular grid. } \details{ This function creates a rectangular grid of points in the window. The bounding rectangle of the \code{window} is divided into a regular \eqn{nx \times ny}{nx * ny} grid of rectangular tiles. The function returns the \eqn{x,y} coordinates of the centres of these tiles. Note that some of these grid points may lie outside the window, if \code{window} is not of type \code{"rectangle"}. The function \code{\link{inside.owin}} can be used to select those grid points which do lie inside the window. See the examples. This function is useful in creating dummy points for quadrature schemes (see \code{\link{quadscheme}}) and for other miscellaneous purposes. } \seealso{ \code{\link{quad.object}}, \code{\link{quadscheme}}, \code{\link{inside.owin}}, \code{\link{stratrand}} } \examples{ w <- unit.square() xy <- gridcentres(w, 10,15) \dontrun{ plot(w) points(xy) } bdry <- list(x=c(0.1,0.3,0.7,0.4,0.2), y=c(0.1,0.1,0.5,0.7,0.3)) w <- owin(c(0,1), c(0,1), poly=bdry) xy <- gridcentres(w, 30, 30) ok <- inside.owin(xy$x, xy$y, w) \dontrun{ plot(w) points(xy$x[ok], xy$y[ok]) } } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat/man/slrm.Rd0000644000176200001440000001521113160710621014131 0ustar liggesusers\name{slrm} \alias{slrm} \title{Spatial Logistic Regression} \description{ Fits a spatial logistic regression model to a spatial point pattern. } \usage{ slrm(formula, ..., data = NULL, offset = TRUE, link = "logit", dataAtPoints=NULL, splitby=NULL) } \arguments{ \item{formula}{The model formula. See Details.} \item{\dots}{ Optional arguments passed to \code{\link{pixellate}} determining the pixel resolution for the discretisation of the point pattern. } \item{data}{ Optional. A list containing data required in the formula. The names of entries in the list should correspond to variable names in the formula. The entries should be point patterns, pixel images or windows. } \item{offset}{ Logical flag indicating whether the model formula should be augmented by an offset equal to the logarithm of the pixel area. } \item{link}{The link function for the regression model. A character string, specifying a link function for binary regression. } \item{dataAtPoints}{Optional. Exact values of the covariates at the data points. A data frame, with column names corresponding to variables in the \code{formula}, with one row for each point in the point pattern dataset. } \item{splitby}{ Optional. Character string identifying a window. The window will be used to split pixels into sub-pixels. } } \details{ This function fits a Spatial Logistic Regression model (Tukey, 1972; Agterberg, 1974) to a spatial point pattern dataset. The logistic function may be replaced by another link function. The \code{formula} specifies the form of the model to be fitted, and the data to which it should be fitted. The \code{formula} must be an \R formula with a left and right hand side. The left hand side of the \code{formula} is the name of the point pattern dataset, an object of class \code{"ppp"}. The right hand side of the \code{formula} is an expression, in the usual \R formula syntax, representing the functional form of the linear predictor for the model. Each variable name that appears in the formula may be \itemize{ \item one of the reserved names \code{x} and \code{y}, referring to the Cartesian coordinates; \item the name of an entry in the list \code{data}, if this argument is given; \item the name of an object in the parent environment, that is, in the environment where the call to \code{slrm} was issued. } Each object appearing on the right hand side of the formula may be \itemize{ \item a pixel image (object of class \code{"im"}) containing the values of a covariate; \item a window (object of class \code{"owin"}), which will be interpreted as a logical covariate which is \code{TRUE} inside the window and \code{FALSE} outside it; \item a \code{function} in the \R language, with arguments \code{x,y}, which can be evaluated at any location to obtain the values of a covariate. } See the Examples below. The fitting algorithm discretises the point pattern onto a pixel grid. The value in each pixel is 1 if there are any points of the point pattern in the pixel, and 0 if there are no points in the pixel. The dimensions of the pixel grid will be determined as follows: \itemize{ \item The pixel grid will be determined by the extra arguments \code{\dots} if they are specified (for example the argument \code{dimyx} can be used to specify the number of pixels). \item Otherwise, if the right hand side of the \code{formula} includes the names of any pixel images containing covariate values, these images will determine the pixel grid for the discretisation. The covariate image with the finest grid (the smallest pixels) will be used. \item Otherwise, the default pixel grid size is given by \code{spatstat.options("npixel")}. } If \code{link="logit"} (the default), the algorithm fits a Spatial Logistic Regression model. This model states that the probability \eqn{p} that a given pixel contains a data point, is related to the covariates through \deqn{\log\frac{p}{1-p} = \eta}{log(p/(1-p)) = eta} where \eqn{\eta}{eta} is the linear predictor of the model (a linear combination of the covariates, whose form is specified by the \code{formula}). If \code{link="cloglog"} then the algorithm fits a model stating that \deqn{\log(-\log(1-p)) = \eta}{log(-log(1-p)) = eta}. If \code{offset=TRUE} (the default), the model formula will be augmented by adding an offset term equal to the logarithm of the pixel area. This ensures that the fitted parameters are approximately independent of pixel size. If \code{offset=FALSE}, the offset is not included, and the traditional form of Spatial Logistic Regression is fitted. } \value{ An object of class \code{"slrm"} representing the fitted model. There are many methods for this class, including methods for \code{print}, \code{fitted}, \code{predict}, \code{anova}, \code{coef}, \code{logLik}, \code{terms}, \code{update}, \code{formula} and \code{vcov}. Automated stepwise model selection is possible using \code{\link{step}}. Confidence intervals for the parameters can be computed using \code{\link[stats]{confint}}. } \seealso{ \code{\link{anova.slrm}}, \code{\link{coef.slrm}}, \code{\link{fitted.slrm}}, \code{\link{logLik.slrm}}, \code{\link{plot.slrm}}, \code{\link{predict.slrm}}, \code{\link{vcov.slrm}} } \references{ Agterberg, F.P. (1974) Automatic contouring of geological maps to detect target areas for mineral exploration. \emph{Journal of the International Association for Mathematical Geology} \bold{6}, 373--395. Baddeley, A., Berman, M., Fisher, N.I., Hardegen, A., Milne, R.K., Schuhmacher, D., Shah, R. and Turner, R. (2010) Spatial logistic regression and change-of-support for spatial Poisson point processes. \emph{Electronic Journal of Statistics} \bold{4}, 1151--1201. {doi: 10.1214/10-EJS581} Tukey, J.W. (1972) Discussion of paper by F.P. Agterberg and S.C. Robinson. \emph{Bulletin of the International Statistical Institute} \bold{44} (1) p. 596. Proceedings, 38th Congress, International Statistical Institute. } \examples{ X <- copper$SouthPoints slrm(X ~ 1) slrm(X ~ x+y) slrm(X ~ x+y, link="cloglog") # specify a grid of 2-km-square pixels slrm(X ~ 1, eps=2) Y <- copper$SouthLines Z <- distmap(Y) slrm(X ~ Z) slrm(X ~ Z, dataAtPoints=list(Z=nncross(X,Y,what="dist"))) dat <- list(A=X, V=Z) slrm(A ~ V, data=dat) } \author{\adrian \email{adrian@maths.uwa.edu.au} and \rolf } \keyword{spatial} \keyword{models} spatstat/man/concatxy.Rd0000644000176200001440000000225513160710571015014 0ustar liggesusers\name{concatxy} \alias{concatxy} \title{Concatenate x,y Coordinate Vectors} \description{ Concatenate any number of pairs of \code{x} and \code{y} coordinate vectors. } \usage{ concatxy(\dots) } \arguments{ \item{\dots}{ Any number of arguments, each of which is a structure containing elements \code{x} and \code{y}. } } \value{ A list with two components \code{x} and \code{y}, which are the concatenations of all the corresponding \code{x} and \code{y} vectors in the argument list. } \details{ This function can be used to superimpose two or more point patterns of unmarked points (but see also \code{\link{superimpose}} which is recommended). It assumes that each of the arguments in \code{\dots} is a structure containing (at least) the elements \code{x} and \code{y}. It concatenates all the \code{x} elements into a vector \code{x}, and similarly for \code{y}, and returns these concatenated vectors. } \seealso{ \code{\link{superimpose}}, \code{\link{quadscheme}} } \examples{ dat <- runifrect(30) xy <- list(x=runif(10),y=runif(10)) new <- concatxy(dat, xy) } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/is.multitype.ppm.Rd0000644000176200001440000000455613160710621016427 0ustar liggesusers\name{is.multitype.ppm} \alias{is.multitype.ppm} \alias{is.multitype.lppm} \title{Test Whether A Point Process Model is Multitype} \description{ Tests whether a fitted point process model involves ``marks'' attached to the points that classify the points into several types. } \usage{ \method{is.multitype}{ppm}(X, \dots) \method{is.multitype}{lppm}(X, \dots) } \arguments{ \item{X}{ Fitted point process model (object of class \code{"ppm"}) usually obtained from \code{\link{ppm}}. Alternatively a model of class \code{"lppm"}. } \item{\dots}{ Ignored. } } \value{ Logical value, equal to \code{TRUE} if \code{X} is a model that was fitted to a multitype point pattern dataset. } \details{ ``Marks'' are observations attached to each point of a point pattern. For example the \code{\link[spatstat.data]{longleaf}} dataset contains the locations of trees, each tree being marked by its diameter; the \code{\link[spatstat.data]{amacrine}} dataset gives the locations of cells of two types (on/off) and the type of cell may be regarded as a mark attached to the location of the cell. The argument \code{X} is a fitted point process model (an object of class \code{"ppm"}) typically obtained by fitting a model to point pattern data using \code{\link{ppm}}. This function returns \code{TRUE} if the \emph{original data} (to which the model \code{X} was fitted) were a multitype point pattern. Note that this is not the same as testing whether the model involves terms that depend on the marks (i.e. whether the fitted model ignores the marks in the data). Currently we have not implemented a test for this. If this function returns \code{TRUE}, the implications are (for example) that any simulation of this model will require simulation of random marks as well as random point locations. } \seealso{ \code{\link{is.multitype}}, \code{\link{is.multitype.ppp}} } \examples{ X <- lansing # Multitype point pattern --- trees marked by species \testonly{ # Smaller dataset X <- amacrine } fit1 <- ppm(X, ~ marks, Poisson()) is.multitype(fit1) # TRUE fit2 <- ppm(X, ~ 1, Poisson()) is.multitype(fit2) # TRUE # Unmarked point pattern fit3 <- ppm(cells, ~ 1, Poisson()) is.multitype(fit3) # FALSE } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} \keyword{models} spatstat/man/fourierbasis.Rd0000644000176200001440000000343013160710621015651 0ustar liggesusers\name{fourierbasis} \alias{fourierbasis} \title{Fourier Basis Functions} \description{Evaluates the Fourier basis functions on a \eqn{d}-dimensional box with \eqn{d}-dimensional frequencies \eqn{k_i} at the \eqn{d}-dimensional coordinates \eqn{x_j}. } \usage{ fourierbasis(x, k, win = boxx(rep(list(0:1), ncol(k)))) } \arguments{ \item{x}{ Coordinates. A \code{data.frame} or matrix with \eqn{m} rows and \eqn{d} columns giving the \eqn{d}-dimensional coordinates. } \item{k}{Frequencies. A \code{data.frame} or matrix with \eqn{n} rows and \eqn{d} columns giving the frequencies of the Fourier-functions. } \item{win}{ window (of class \code{"owin"}, \code{"box3"} or \code{"boxx"}) giving the \eqn{d}-dimensional box domain of the Fourier functions. } } \details{ The result is an \eqn{n} by \eqn{m} matrix where the \eqn{(i,j)}'th entry is the \eqn{d}-dimensional Fourier basis function with frequency \eqn{k_i} evaluated at the point \eqn{x_j}, i.e., \deqn{ \frac{1}{|W|} \exp(2\pi i /|W|) }{ 1/|W| * exp(2*pi*i*k_i*x_j/|W|) } where \eqn{<\cdot,\cdot>}{*} is the \eqn{d}-dimensional inner product and \eqn{|W|} is the volume of the domain (window/box). Note that the algorithm does not check whether the coordinates given in \code{x} are contained in the given box. Actually the box is only used to determine the volume of the domain for normalization. } \value{An \code{n} by \code{m} matrix of complex values.} \author{ \adrian \rolf and \ege } \examples{ ## 27 rows of three dimensional Fourier frequencies: k <- expand.grid(-1:1,-1:1, -1:1) ## Two random points in the three dimensional unit box: x <- rbind(runif(3),runif(3)) ## 27 by 2 resulting matrix: v <- fourierbasis(x, k) head(v) } spatstat/man/vcov.ppm.Rd0000644000176200001440000002144013160710621014725 0ustar liggesusers\name{vcov.ppm} \alias{vcov.ppm} \title{Variance-Covariance Matrix for a Fitted Point Process Model} \description{ Returns the variance-covariance matrix of the estimates of the parameters of a fitted point process model. } \usage{ \method{vcov}{ppm}(object, \dots, what = "vcov", verbose = TRUE, fine=FALSE, gam.action=c("warn", "fatal", "silent"), matrix.action=c("warn", "fatal", "silent"), logi.action=c("warn", "fatal", "silent"), hessian=FALSE) } \arguments{ \item{object}{A fitted point process model (an object of class \code{"ppm"}.)} \item{\dots}{Ignored.} \item{what}{Character string (partially-matched) that specifies what matrix is returned. Options are \code{"vcov"} for the variance-covariance matrix, \code{"corr"} for the correlation matrix, and \code{"fisher"} or \code{"Fisher"} for the Fisher information matrix. } \item{fine}{ Logical value indicating whether to use a quick estimate (\code{fine=FALSE}, the default) or a slower, more accurate estimate (\code{fine=TRUE}). } \item{verbose}{Logical. If \code{TRUE}, a message will be printed if various minor problems are encountered. } \item{gam.action}{String indicating what to do if \code{object} was fitted by \code{gam}. } \item{matrix.action}{String indicating what to do if the matrix is ill-conditioned (so that its inverse cannot be calculated). } \item{logi.action}{String indicating what to do if \code{object} was fitted via the logistic regression approximation using a non-standard dummy point process. } \item{hessian}{ Logical. Use the negative Hessian matrix of the log pseudolikelihood instead of the Fisher information. } } \details{ This function computes the asymptotic variance-covariance matrix of the estimates of the canonical parameters in the point process model \code{object}. It is a method for the generic function \code{\link{vcov}}. \code{object} should be an object of class \code{"ppm"}, typically produced by \code{\link{ppm}}. The canonical parameters of the fitted model \code{object} are the quantities returned by \code{coef.ppm(object)}. The function \code{vcov} calculates the variance-covariance matrix for these parameters. The argument \code{what} provides three options: \describe{ \item{\code{what="vcov"}}{ return the variance-covariance matrix of the parameter estimates } \item{\code{what="corr"}}{ return the correlation matrix of the parameter estimates } \item{\code{what="fisher"}}{ return the observed Fisher information matrix. } } In all three cases, the result is a square matrix. The rows and columns of the matrix correspond to the canonical parameters given by \code{\link{coef.ppm}(object)}. The row and column names of the matrix are also identical to the names in \code{\link{coef.ppm}(object)}. For models fitted by the Berman-Turner approximation (Berman and Turner, 1992; Baddeley and Turner, 2000) to the maximum pseudolikelihood (using the default \code{method="mpl"} in the call to \code{\link{ppm}}), the implementation works as follows. \itemize{ \item If the fitted model \code{object} is a Poisson process, the calculations are based on standard asymptotic theory for the maximum likelihood estimator (Kutoyants, 1998). The observed Fisher information matrix of the fitted model \code{object} is first computed, by summing over the Berman-Turner quadrature points in the fitted model. The asymptotic variance-covariance matrix is calculated as the inverse of the observed Fisher information. The correlation matrix is then obtained by normalising. \item If the fitted model is not a Poisson process (i.e. it is some other Gibbs point process) then the calculations are based on Coeurjolly and Rubak (2012). A consistent estimator of the variance-covariance matrix is computed by summing terms over all pairs of data points. If required, the Fisher information is calculated as the inverse of the variance-covariance matrix. } For models fitted by the Huang-Ogata method (\code{method="ho"} in the call to \code{\link{ppm}}), the implementation uses the Monte Carlo estimate of the Fisher information matrix that was computed when the original model was fitted. For models fitted by the logistic regression approximation to the maximum pseudolikelihood (\code{method="logi"} in the call to \code{\link{ppm}}), calculations are based on (Baddeley et al., 2013). A consistent estimator of the variance-covariance matrix is computed by summing terms over all pairs of data points. If required, the Fisher information is calculated as the inverse of the variance-covariance matrix. In this case the calculations depend on the type of dummy pattern used, and currently only the types \code{"stratrand"}, \code{"binomial"} and \code{"poisson"} as generated by \code{\link{quadscheme.logi}} are implemented. For other types the behavior depends on the argument \code{logi.action}. If \code{logi.action="fatal"} an error is produced. Otherwise, for types \code{"grid"} and \code{"transgrid"} the formulas for \code{"stratrand"} are used which in many cases should be conservative. For an arbitrary user specified dummy pattern (type \code{"given"}) the formulas for \code{"poisson"} are used which in many cases should be conservative. If \code{logi.action="warn"} a warning is issued otherwise the calculation proceeds without a warning. The argument \code{verbose} makes it possible to suppress some diagnostic messages. The asymptotic theory is not correct if the model was fitted using \code{gam} (by calling \code{\link{ppm}} with \code{use.gam=TRUE}). The argument \code{gam.action} determines what to do in this case. If \code{gam.action="fatal"}, an error is generated. If \code{gam.action="warn"}, a warning is issued and the calculation proceeds using the incorrect theory for the parametric case, which is probably a reasonable approximation in many applications. If \code{gam.action="silent"}, the calculation proceeds without a warning. If \code{hessian=TRUE} then the negative Hessian (second derivative) matrix of the log pseudolikelihood, and its inverse, will be computed. For non-Poisson models, this is not a valid estimate of variance, but is useful for other calculations. Note that standard errors and 95\% confidence intervals for the coefficients can also be obtained using \code{confint(object)} or \code{coef(summary(object))}. } \section{Error messages}{ An error message that reports \emph{system is computationally singular} indicates that the determinant of the Fisher information matrix was either too large or too small for reliable numerical calculation. If this message occurs, try repeating the calculation using \code{fine=TRUE}. Singularity can occur because of numerical overflow or collinearity in the covariates. To check this, rescale the coordinates of the data points and refit the model. See the Examples. In a Gibbs model, a singular matrix may also occur if the fitted model is a hard core process: this is a feature of the variance estimator. } \value{ A square matrix. } \examples{ X <- rpoispp(42) fit <- ppm(X, ~ x + y) vcov(fit) vcov(fit, what="Fish") # example of singular system m <- ppm(demopat ~polynom(x,y,2)) \dontrun{ try(v <- vcov(m)) } # rescale x, y coordinates to range [0,1] x [0,1] approximately demopatScale <- rescale(demopat, 10000) m <- ppm(demopatScale ~ polynom(x,y,2)) v <- vcov(m) # Gibbs example fitS <- ppm(swedishpines ~1, Strauss(9)) coef(fitS) sqrt(diag(vcov(fitS))) } \author{ Original code for Poisson point process was written by \adrian and \rolf . New code for stationary Gibbs point processes was generously contributed by \ege and Jean-Francois Coeurjolly. New code for generic Gibbs process written by \adrian. New code for logistic method contributed by \ege. } \seealso{ \code{\link{vcov}} for the generic, \code{\link{ppm}} for information about fitted models, \code{\link[stats]{confint}} for confidence intervals. } \references{ Baddeley, A., Coeurjolly, J.-F., Rubak, E. and Waagepetersen, R. (2014) Logistic regression for spatial Gibbs point processes. \emph{Biometrika} \bold{101} (2) 377--392. Coeurjolly, J.-F. and Rubak, E. (2013) Fast covariance estimation for innovations computed from a spatial Gibbs point process. Scandinavian Journal of Statistics \bold{40} 669--684. Kutoyants, Y.A. (1998) \bold{Statistical Inference for Spatial Poisson Processes}, Lecture Notes in Statistics 134. New York: Springer 1998. } \keyword{spatial} \keyword{methods} \keyword{models} spatstat/man/kppm.Rd0000644000176200001440000003653213160710621014134 0ustar liggesusers\name{kppm} \alias{kppm} \alias{kppm.formula} \alias{kppm.ppp} \alias{kppm.quad} \concept{point process model} \concept{Cox point process} \concept{cluster process} \concept{Neyman-Scott cluster process} \title{Fit Cluster or Cox Point Process Model} \description{ Fit a homogeneous or inhomogeneous cluster process or Cox point process model to a point pattern. } \usage{ kppm(X, \dots) \method{kppm}{formula}(X, clusters = c("Thomas","MatClust","Cauchy","VarGamma","LGCP"), \dots, data=NULL) \method{kppm}{ppp}(X, trend = ~1, clusters = c("Thomas","MatClust","Cauchy","VarGamma","LGCP"), data = NULL, ..., covariates=data, subset, method = c("mincon", "clik2", "palm"), improve.type = c("none", "clik1", "wclik1", "quasi"), improve.args = list(), weightfun=NULL, control=list(), algorithm="Nelder-Mead", statistic="K", statargs=list(), rmax = NULL, covfunargs=NULL, use.gam=FALSE, nd=NULL, eps=NULL) \method{kppm}{quad}(X, trend = ~1, clusters = c("Thomas","MatClust","Cauchy","VarGamma","LGCP"), data = NULL, ..., covariates=data, subset, method = c("mincon", "clik2", "palm"), improve.type = c("none", "clik1", "wclik1", "quasi"), improve.args = list(), weightfun=NULL, control=list(), algorithm="Nelder-Mead", statistic="K", statargs=list(), rmax = NULL, covfunargs=NULL, use.gam=FALSE, nd=NULL, eps=NULL) } \arguments{ \item{X}{ A point pattern dataset (object of class \code{"ppp"} or \code{"quad"}) to which the model should be fitted, or a \code{formula} in the \R language defining the model. See Details. } \item{trend}{ An \R formula, with no left hand side, specifying the form of the log intensity. } \item{clusters}{ Character string determining the cluster model. Partially matched. Options are \code{"Thomas"}, \code{"MatClust"}, \code{"Cauchy"}, \code{"VarGamma"} and \code{"LGCP"}. } \item{data,covariates}{ The values of spatial covariates (other than the Cartesian coordinates) required by the model. A named list of pixel images, functions, windows, tessellations or numeric constants. } \item{\dots}{ Additional arguments. See Details. } \item{subset}{ Optional. A subset of the spatial domain, to which the model-fitting should be restricted. A window (object of class \code{"owin"}) or a logical-valued pixel image (object of class \code{"im"}), or an expression (possibly involving the names of entries in \code{data}) which can be evaluated to yield a window or pixel image. } \item{method}{ The fitting method. Either \code{"mincon"} for minimum contrast, \code{"clik2"} for second order composite likelihood, or \code{"palm"} for Palm likelihood. Partially matched. } \item{improve.type}{ Method for updating the initial estimate of the trend. Initially the trend is estimated as if the process is an inhomogeneous Poisson process. The default, \code{improve.type = "none"}, is to use this initial estimate. Otherwise, the trend estimate is updated by \code{\link{improve.kppm}}, using information about the pair correlation function. Options are \code{"clik1"} (first order composite likelihood, essentially equivalent to \code{"none"}), \code{"wclik1"} (weighted first order composite likelihood) and \code{"quasi"} (quasi likelihood). } \item{improve.args}{ Additional arguments passed to \code{\link{improve.kppm}} when \code{improve.type != "none"}. See Details. } \item{weightfun}{ Optional weighting function \eqn{w} in the composite likelihood or Palm likelihood. A \code{function} in the \R language. See Details. } \item{control}{ List of control parameters passed to the optimization function \code{\link[stats]{optim}}. } \item{algorithm}{ Character string determining the mathematical optimisation algorithm to be used by \code{\link[stats]{optim}}. See the argument \code{method} of \code{\link[stats]{optim}}. } \item{statistic}{ Name of the summary statistic to be used for minimum contrast estimation: either \code{"K"} or \code{"pcf"}. } \item{statargs}{ Optional list of arguments to be used when calculating the \code{statistic}. See Details. } \item{rmax}{ Maximum value of interpoint distance to use in the composite likelihood. } \item{covfunargs,use.gam,nd,eps}{ Arguments passed to \code{\link{ppm}} when fitting the intensity. } } \details{ This function fits a clustered point process model to the point pattern dataset \code{X}. The model may be either a \emph{Neyman-Scott cluster process} or another \emph{Cox process}. The type of model is determined by the argument \code{clusters}. Currently the options are \code{clusters="Thomas"} for the Thomas process, \code{clusters="MatClust"} for the Matern cluster process, \code{clusters="Cauchy"} for the Neyman-Scott cluster process with Cauchy kernel, \code{clusters="VarGamma"} for the Neyman-Scott cluster process with Variance Gamma kernel (requires an additional argument \code{nu} to be passed through the dots; see \code{\link{rVarGamma}} for details), and \code{clusters="LGCP"} for the log-Gaussian Cox process (may require additional arguments passed through \code{\dots}; see \code{\link{rLGCP}} for details on argument names). The first four models are Neyman-Scott cluster processes. The algorithm first estimates the intensity function of the point process using \code{\link{ppm}}. The argument \code{X} may be a point pattern (object of class \code{"ppp"}) or a quadrature scheme (object of class \code{"quad"}). The intensity is specified by the \code{trend} argument. If the trend formula is \code{~1} (the default) then the model is \emph{homogeneous}. The algorithm begins by estimating the intensity as the number of points divided by the area of the window. Otherwise, the model is \emph{inhomogeneous}. The algorithm begins by fitting a Poisson process with log intensity of the form specified by the formula \code{trend}. (See \code{\link{ppm}} for further explanation). The argument \code{X} may also be a \code{formula} in the \R language. The right hand side of the formula gives the \code{trend} as described above. The left hand side of the formula gives the point pattern dataset to which the model should be fitted. If \code{improve.type="none"} this is the final estimate of the intensity. Otherwise, the intensity estimate is updated, as explained in \code{\link{improve.kppm}}. Additional arguments to \code{\link{improve.kppm}} are passed as a named list in \code{improve.args}. The clustering parameters of the model are then fitted either by minimum contrast estimation, or by maximum composite likelihood. \describe{ \item{Minimum contrast:}{ If \code{method = "mincon"} (the default) clustering parameters of the model will be fitted by minimum contrast estimation, that is, by matching the theoretical \eqn{K}-function of the model to the empirical \eqn{K}-function of the data, as explained in \code{\link{mincontrast}}. For a homogeneous model (\code{ trend = ~1 }) the empirical \eqn{K}-function of the data is computed using \code{\link{Kest}}, and the parameters of the cluster model are estimated by the method of minimum contrast. For an inhomogeneous model, the inhomogeneous \eqn{K} function is estimated by \code{\link{Kinhom}} using the fitted intensity. Then the parameters of the cluster model are estimated by the method of minimum contrast using the inhomogeneous \eqn{K} function. This two-step estimation procedure is due to Waagepetersen (2007). If \code{statistic="pcf"} then instead of using the \eqn{K}-function, the algorithm will use the pair correlation function \code{\link{pcf}} for homogeneous models and the inhomogeneous pair correlation function \code{\link{pcfinhom}} for inhomogeneous models. In this case, the smoothing parameters of the pair correlation can be controlled using the argument \code{statargs}, as shown in the Examples. Additional arguments \code{\dots} will be passed to \code{\link{mincontrast}} to control the minimum contrast fitting algorithm. } \item{Composite likelihood:}{ If \code{method = "clik2"} the clustering parameters of the model will be fitted by maximising the second-order composite likelihood (Guan, 2006). The log composite likelihood is \deqn{ \sum_{i,j} w(d_{ij}) \log\rho(d_{ij}; \theta) - \left( \sum_{i,j} w(d_{ij}) \right) \log \int_D \int_D w(\|u-v\|) \rho(\|u-v\|; \theta)\, du\, dv }{ sum[i,j] w(d[i,j]) log(rho(d[i,j]; theta)) - (sum[i,j] w(d[i,j])) log(integral[D,D] w(||u-v||) rho(||u-v||; theta) du dv) } where the sums are taken over all pairs of data points \eqn{x_i, x_j}{x[i], x[j]} separated by a distance \eqn{d_{ij} = \| x_i - x_j\|}{d[i,j] = ||x[i] - x[j]||} less than \code{rmax}, and the double integral is taken over all pairs of locations \eqn{u,v} in the spatial window of the data. Here \eqn{\rho(d;\theta)}{rho(d;theta)} is the pair correlation function of the model with cluster parameters \eqn{\theta}{theta}. The function \eqn{w} in the composite likelihood is a weighting function and may be chosen arbitrarily. It is specified by the argument \code{weightfun}. If this is missing or \code{NULL} then the default is a threshold weight function, \eqn{w(d) = 1(d \le R)}{w(d) = 1(d <= R)}, where \eqn{R} is \code{rmax/2}. } \item{Palm likelihood:}{ If \code{method = "palm"} the clustering parameters of the model will be fitted by maximising the Palm loglikelihood (Tanaka et al, 2008) \deqn{ \sum_{i,j} w(x_i, x_j) \log \lambda_P(x_j \mid x_i; \theta) - \int_D w(x_i, u) \lambda_P(u \mid x_i; \theta) {\rm d} u }{ sum[i,j] w(x[i], x[j]) log(lambdaP(x[j] | x[i]; theta) - integral[D] w(x[i], u) lambdaP(u | x[i]; theta) du } with the same notation as above. Here \eqn{\lambda_P(u|v;\theta}{lambdaP(u|v;theta)} is the Palm intensity of the model at location \eqn{u} given there is a point at \eqn{v}. } } In all three methods, the optimisation is performed by the generic optimisation algorithm \code{\link[stats]{optim}}. The behaviour of this algorithm can be modified using the argument \code{control}. Useful control arguments include \code{trace}, \code{maxit} and \code{abstol} (documented in the help for \code{\link[stats]{optim}}). Fitting the LGCP model requires the \pkg{RandomFields} package, except in the default case where the exponential covariance is assumed. } \section{Log-Gaussian Cox Models}{ To fit a log-Gaussian Cox model with non-exponential covariance, specify \code{clusters="LGCP"} and use additional arguments to specify the covariance structure. These additional arguments can be given individually in the call to \code{kppm}, or they can be collected together in a list called \code{covmodel}. For example a Matern model with parameter \eqn{\nu=0.5} could be specified either by \code{kppm(X, clusters="LGCP", model="matern", nu=0.5)} or by \code{kppm(X, clusters="LGCP", covmodel=list(model="matern", nu=0.5))}. The argument \code{model} specifies the type of covariance model: the default is \code{model="exp"} for an exponential covariance. Alternatives include \code{"matern"}, \code{"cauchy"} and \code{"spheric"}. Model names correspond to functions beginning with \code{RM} in the \pkg{RandomFields} package: for example \code{model="matern"} corresponds to the function \code{RMmatern} in the \pkg{RandomFields} package. Additional arguments are passed to the relevant function in the \pkg{RandomFields} package: for example if \code{model="matern"} then the additional argument \code{nu} is required, and is passed to the function \code{RMmatern} in the \pkg{RandomFields} package. Note that it is not possible to use \emph{anisotropic} covariance models because the \code{kppm} technique assumes the pair correlation function is isotropic. } \value{ An object of class \code{"kppm"} representing the fitted model. There are methods for printing, plotting, predicting, simulating and updating objects of this class. } \section{Error and warning messages}{ See \code{\link{ppm.ppp}} for a list of common error messages and warnings originating from the first stage of model-fitting. } \seealso{ Methods for \code{kppm} objects: \code{\link{plot.kppm}}, \code{\link{fitted.kppm}}, \code{\link{predict.kppm}}, \code{\link{simulate.kppm}}, \code{\link{update.kppm}}, \code{\link{vcov.kppm}}, \code{\link[spatstat:methods.kppm]{methods.kppm}}, \code{\link{as.ppm.kppm}}, \code{\link{Kmodel.kppm}}, \code{\link{pcfmodel.kppm}}. Minimum contrast fitting algorithm: \code{\link{mincontrast}}. Alternative fitting algorithms: \code{\link{thomas.estK}}, \code{\link{matclust.estK}}, \code{\link{lgcp.estK}}, \code{\link{cauchy.estK}}, \code{\link{vargamma.estK}}, \code{\link{thomas.estpcf}}, \code{\link{matclust.estpcf}}, \code{\link{lgcp.estpcf}}, \code{\link{cauchy.estpcf}}, \code{\link{vargamma.estpcf}}, Summary statistics: \code{\link{Kest}}, \code{\link{Kinhom}}, \code{\link{pcf}}, \code{\link{pcfinhom}}. See also \code{\link{ppm}} } \references{ Guan, Y. (2006) A composite likelihood approach in fitting spatial point process models. \emph{Journal of the American Statistical Association} \bold{101}, 1502--1512. Jalilian, A., Guan, Y. and Waagepetersen, R. (2012) Decomposition of variance for spatial Cox processes. \emph{Scandinavian Journal of Statistics} \bold{40}, 119--137. Tanaka, U. and Ogata, Y. and Stoyan, D. (2008) Parameter estimation and model selection for Neyman-Scott point processes. \emph{Biometrical Journal} \bold{50}, 43--57. Waagepetersen, R. (2007) An estimating function approach to inference for inhomogeneous Neyman-Scott processes. \emph{Biometrics} \bold{63}, 252--258. } \examples{ # method for point patterns kppm(redwood, ~1, "Thomas") # method for formulas kppm(redwood ~ 1, "Thomas") kppm(redwood ~ 1, "Thomas", method="c") kppm(redwood ~ 1, "Thomas", method="p") kppm(redwood ~ x, "MatClust") kppm(redwood ~ x, "MatClust", statistic="pcf", statargs=list(stoyan=0.2)) kppm(redwood ~ x, cluster="Cauchy", statistic="K") kppm(redwood, cluster="VarGamma", nu = 0.5, statistic="pcf") # LGCP models kppm(redwood ~ 1, "LGCP", statistic="pcf") if(require("RandomFields")) { kppm(redwood ~ x, "LGCP", statistic="pcf", model="matern", nu=0.3, control=list(maxit=10)) } # fit with composite likelihood method kppm(redwood ~ x, "VarGamma", method="clik2", nu.ker=-3/8) # fit intensity with quasi-likelihood method kppm(redwood ~ x, "Thomas", improve.type = "quasi") } \author{ \spatstatAuthors, with contributions from Abdollah Jalilian and Rasmus Waagepetersen. } \keyword{spatial} \keyword{models} spatstat/man/is.connected.ppp.Rd0000644000176200001440000000255413160710621016334 0ustar liggesusers\name{is.connected.ppp} \Rdversion{1.1} \alias{is.connected.ppp} \title{ Determine Whether a Point Pattern is Connected } \description{ Determine whether a point pattern is topologically connected when all pairs of points closer than a threshold distance are joined. } \usage{ \method{is.connected}{ppp}(X, R, \dots) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"}). } \item{R}{ Threshold distance. Pairs of points closer than \code{R} units apart will be joined together. } \item{\dots}{ Ignored. } } \details{ The function \code{is.connected} is generic. This is the method for point patterns (objects of class \code{"ppp"}). The point pattern \code{X} is first converted into an abstract graph by joining every pair of points that lie closer than \code{R} units apart. Then the algorithm determines whether this graph is connected. That is, the result of \code{is.connected(X)} is \code{TRUE} if any point in \code{X} can be reached from any other point, by a series of steps between points of \code{X}, each step being shorter than \code{R} units in length. } \value{ A logical value. } \seealso{ \code{\link{is.connected}}, \code{\link{connected.ppp}}. } \examples{ is.connected(redwoodfull, 0.1) is.connected(redwoodfull, 0.2) } \author{ \spatstatAuthors } \keyword{spatial} \keyword{math} spatstat/man/transect.im.Rd0000644000176200001440000000351613160710621015410 0ustar liggesusers\name{transect.im} \alias{transect.im} \title{ Pixel Values Along a Transect } \description{ Extract the pixel values of a pixel image at each point along a linear transect. } \usage{ transect.im(X, ..., from="bottomleft", to="topright", click=FALSE, add=FALSE) } \arguments{ \item{X}{ A pixel image (object of class \code{"im"}). } \item{\dots}{ Ignored. } \item{from,to}{ Optional. Start point and end point of the transect. Pairs of \eqn{(x,y)} coordinates in a format acceptable to \code{\link{xy.coords}}, or keywords \code{"bottom"}, \code{"left"}, \code{"top"}, \code{"right"}, \code{"bottomleft"} etc. } \item{click}{ Optional. Logical value. If \code{TRUE}, the linear transect is determined interactively by the user, who clicks two points on the current plot. } \item{add}{ Logical. If \code{click=TRUE}, this argument determines whether to perform interactive tasks on the current plot (\code{add=TRUE}) or to start by plotting \code{X} (\code{add=FALSE}). } } \details{ The pixel values of the image \code{X} along a line segment will be extracted. The result is a function table (\code{"fv"} object) which can be plotted directly. If \code{click=TRUE}, then the user is prompted to click two points on the plot of \code{X}. These endpoints define the transect. Otherwise, the transect is defined by the endpoints \code{from} and \code{to}. The default is a diagonal transect from bottom left to top right of the frame. } \value{ An object of class \code{"fv"} which can be plotted. } \author{ \adrian and \rolf } \seealso{ \code{\link{im}} } \examples{ Z <- density(redwood) plot(transect.im(Z)) \dontrun{ if(FALSE) { plot(transect.im(Z, click=TRUE)) } } } \keyword{spatial} \keyword{manip} \keyword{iplot} spatstat/man/rDGS.Rd0000644000176200001440000000746513160710621013767 0ustar liggesusers\name{rDGS} \alias{rDGS} \title{Perfect Simulation of the Diggle-Gates-Stibbard Process} \description{ Generate a random pattern of points, a simulated realisation of the Diggle-Gates-Stibbard process, using a perfect simulation algorithm. } \usage{ rDGS(beta, rho, W = owin(), expand=TRUE, nsim=1, drop=TRUE) } \arguments{ \item{beta}{ intensity parameter (a positive number). } \item{rho}{ interaction range (a non-negative number). } \item{W}{ window (object of class \code{"owin"}) in which to generate the random pattern. } \item{expand}{ Logical. If \code{FALSE}, simulation is performed in the window \code{W}, which must be rectangular. If \code{TRUE} (the default), simulation is performed on a larger window, and the result is clipped to the original window \code{W}. Alternatively \code{expand} can be an object of class \code{"rmhexpand"} (see \code{\link{rmhexpand}}) determining the expansion method. } \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a point pattern. } } \details{ This function generates a realisation of the Diggle-Gates-Stibbard point process in the window \code{W} using a \sQuote{perfect simulation} algorithm. Diggle, Gates and Stibbard (1987) proposed a pairwise interaction point process in which each pair of points separated by a distance \eqn{d} contributes a factor \eqn{e(d)} to the probability density, where \deqn{ e(d) = \sin^2\left(\frac{\pi d}{2\rho}\right) }{ e(d) = sin^2((pi * d)/(2 * rho)) } for \eqn{d < \rho}{d < rho}, and \eqn{e(d)} is equal to 1 for \eqn{d \ge \rho}{d >= rho}. The simulation algorithm used to generate the point pattern is \sQuote{dominated coupling from the past} as implemented by Berthelsen and \ifelse{latex}{\out{M\o ller}}{Moller} (2002, 2003). This is a \sQuote{perfect simulation} or \sQuote{exact simulation} algorithm, so called because the output of the algorithm is guaranteed to have the correct probability distribution exactly (unlike the Metropolis-Hastings algorithm used in \code{\link{rmh}}, whose output is only approximately correct). There is a tiny chance that the algorithm will run out of space before it has terminated. If this occurs, an error message will be generated. } \value{ If \code{nsim = 1}, a point pattern (object of class \code{"ppp"}). If \code{nsim > 1}, a list of point patterns. } \references{ Berthelsen, K.K. and \ifelse{latex}{\out{M\o ller}}{Moller}, J. (2002) A primer on perfect simulation for spatial point processes. \emph{Bulletin of the Brazilian Mathematical Society} 33, 351-367. Berthelsen, K.K. and \ifelse{latex}{\out{M\o ller}}{Moller}, J. (2003) Likelihood and non-parametric Bayesian MCMC inference for spatial point processes based on perfect simulation and path sampling. \emph{Scandinavian Journal of Statistics} 30, 549-564. Diggle, P.J., Gates, D.J., and Stibbard, A. (1987) A nonparametric estimator for pairwise-interaction point processes. Biometrika \bold{74}, 763 -- 770. \emph{Scandinavian Journal of Statistics} \bold{21}, 359--373. \ifelse{latex}{\out{M\o ller}}{Moller}, J. and Waagepetersen, R. (2003). \emph{Statistical Inference and Simulation for Spatial Point Processes.} Chapman and Hall/CRC. } \author{ \adrian, based on original code for the Strauss process by Kasper Klitgaard Berthelsen. } \examples{ X <- rDGS(50, 0.05) } \seealso{ \code{\link{rmh}}, \code{\link{DiggleGatesStibbard}}. \code{\link{rStrauss}}, \code{\link{rHardcore}}, \code{\link{rStraussHard}}, \code{\link{rDiggleGratton}}, \code{\link{rPenttinen}}. } \keyword{spatial} \keyword{datagen} spatstat/man/closepairs.pp3.Rd0000644000176200001440000001013313160710571016023 0ustar liggesusers\name{closepairs.pp3} \alias{closepairs.pp3} \alias{crosspairs.pp3} \title{ Close Pairs of Points in 3 Dimensions } \description{ Low-level functions to find all close pairs of points in three-dimensional point patterns. } \usage{ \method{closepairs}{pp3}(X, rmax, twice=TRUE, what=c("all", "indices"), distinct=TRUE, neat=TRUE, \dots) \method{crosspairs}{pp3}(X, Y, rmax, what=c("all", "indices"), \dots) } \arguments{ \item{X,Y}{ Point patterns in three dimensions (objects of class \code{"pp3"}). } \item{rmax}{ Maximum distance between pairs of points to be counted as close pairs. } \item{twice}{ Logical value indicating whether all ordered pairs of close points should be returned. If \code{twice=TRUE}, each pair will appear twice in the output, as \code{(i,j)} and again as \code{(j,i)}. If \code{twice=FALSE}, then each pair will appear only once, as the pair \code{(i,j)} such that \code{i < j}. } \item{what}{ String specifying the data to be returned for each close pair of points. If \code{what="all"} (the default) then the returned information includes the indices \code{i,j} of each pair, their \code{x,y,z} coordinates, and the distance between them. If \code{what="indices"} then only the indices \code{i,j} are returned. } \item{distinct}{ Logical value indicating whether to return only the pairs of points with different indices \code{i} and \code{j} (\code{distinct=TRUE}, the default) or to also include the pairs where \code{i=j} (\code{distinct=FALSE}). } \item{neat}{ Logical value indicating whether to ensure that \code{i < j} in each output pair, when \code{twice=FALSE}. } \item{\dots}{Ignored.} } \details{ These are the efficient low-level functions used by \pkg{spatstat} to find all close pairs of points in a three-dimensional point pattern or all close pairs between two point patterns in three dimensions. \code{closepairs(X,rmax)} identifies all pairs of neighbours in the pattern \code{X} and returns them. The result is a list with the following components: \describe{ \item{i}{Integer vector of indices of the first point in each pair.} \item{j}{Integer vector of indices of the second point in each pair.} \item{xi,yi,zi}{Coordinates of the first point in each pair.} \item{xj,yj,zj}{Coordinates of the second point in each pair.} \item{dx}{Equal to \code{xj-xi}} \item{dy}{Equal to \code{yj-yi}} \item{dz}{Equal to \code{zj-zi}} \item{d}{Euclidean distance between each pair of points.} } If \code{what="indices"} then only the components \code{i} and \code{j} are returned. This is slightly faster. \code{crosspairs(X,rmax)} identifies all pairs of neighbours \code{(X[i], Y[j])} between the patterns \code{X} and \code{Y}, and returns them. The result is a list with the same format as for \code{closepairs}. } \section{Warning about accuracy}{ The results of these functions may not agree exactly with the correct answer (as calculated by a human) and may not be consistent between different computers and different installations of \R. The discrepancies arise in marginal cases where the interpoint distance is equal to, or very close to, the threshold \code{rmax}. Floating-point numbers in a computer are not mathematical Real Numbers: they are approximations using finite-precision binary arithmetic. The approximation is accurate to a tolerance of about \code{.Machine$double.eps}. If the true interpoint distance \eqn{d} and the threshold \code{rmax} are equal, or if their difference is no more than \code{.Machine$double.eps}, the result may be incorrect. } \value{ A list with components \code{i} and \code{j}, and possibly other components as described under Details. } \author{\adrian , \rolf and \ege. } \seealso{ \code{\link{closepairs}} } \examples{ X <- pp3(runif(10), runif(10), runif(10), box3(c(0,1))) Y <- pp3(runif(10), runif(10), runif(10), box3(c(0,1))) a <- closepairs(X, 0.1) b <- crosspairs(X, Y, 0.1) } \keyword{spatial} \keyword{math} spatstat/man/clip.infline.Rd0000644000176200001440000000226013160710571015532 0ustar liggesusers\name{clip.infline} \alias{clip.infline} \title{Intersect Infinite Straight Lines with a Window} \description{ Take the intersection between a set of infinite straight lines and a window, yielding a set of line segments. } \usage{ clip.infline(L, win) } \arguments{ \item{L}{ Object of class \code{"infline"} specifying a set of infinite straight lines in the plane. } \item{win}{ Window (object of class \code{"owin"}). } } \details{ This function computes the intersection between a set of infinite straight lines in the plane (stored in an object \code{L} of class \code{"infline"} created by the function \code{\link{infline}}) and a window \code{win}. The result is a pattern of line segments. Each line segment carries a mark indicating which line it belongs to. } \value{ A line segment pattern (object of class \code{"psp"}) with a single column of marks. } \author{ \adrian and \rolf. } \seealso{ \code{\link{infline}},\code{\link{psp}}. To divide a window into pieces using infinite lines, use \code{\link{chop.tess}}. } \examples{ L <- infline(p=1:3, theta=pi/4) W <- square(4) clip.infline(L, W) } \keyword{spatial} \keyword{math} spatstat/man/methods.slrm.Rd0000644000176200001440000000331113160710621015571 0ustar liggesusers\name{methods.slrm} \alias{methods.slrm} %DoNotExport \alias{formula.slrm} \alias{update.slrm} \alias{print.slrm} \alias{terms.slrm} \alias{labels.slrm} \title{ Methods for Spatial Logistic Regression Models } \description{ These are methods for the class \code{"slrm"}. } \usage{ \method{formula}{slrm}(x, \dots) \method{print}{slrm}(x, ...) \method{terms}{slrm}(x, \dots) \method{labels}{slrm}(object, \dots) \method{update}{slrm}(object, ..., evaluate = TRUE, env = parent.frame()) } \arguments{ \item{x,object}{ An object of class \code{"slrm"}, representing a fitted spatial logistic regression model. } \item{\dots}{ Arguments passed to other methods. } \item{evaluate}{ Logical value. If \code{TRUE}, evaluate the updated call to \code{slrm}, so that the model is refitted; if \code{FALSE}, simply return the updated call. } \item{env}{ Optional environment in which the model should be updated. } } \details{ These functions are methods for the generic commands \code{\link{formula}}, \code{\link{update}}, \code{\link{print}}, \code{\link{terms}} and \code{\link{labels}} for the class \code{"slrm"}. An object of class \code{"slrm"} represents a fitted spatial logistic regression model. It is obtained from \code{\link{slrm}}. } \value{ See the help files for the corresponding generic functions. } \author{ \adrian } \seealso{ \code{\link{slrm}}, \code{\link{plot.slrm}}, \code{\link{predict.slrm}}, \code{\link{simulate.slrm}}, \code{\link{vcov.slrm}}, \code{\link{coef.slrm}}. } \examples{ data(redwood) fit <- slrm(redwood ~ x) coef(fit) formula(fit) tf <- terms(fit) labels(fit) } \keyword{spatial} \keyword{methods} spatstat/man/marktable.Rd0000644000176200001440000000541713160710621015125 0ustar liggesusers\name{marktable} \alias{marktable} \title{Tabulate Marks in Neighbourhood of Every Point in a Point Pattern} \description{ Visit each point in a point pattern, find the neighbouring points, and compile a frequency table of the marks of these neighbour points. } \usage{ marktable(X, R, N, exclude=TRUE, collapse=FALSE) } \arguments{ \item{X}{ A marked point pattern. An object of class \code{"ppp"}. } \item{R}{ Neighbourhood radius. Incompatible with \code{N}. } \item{N}{ Number of neighbours of each point. Incompatible with \code{R}. } \item{exclude}{ Logical. If \code{exclude=TRUE}, the neighbours of a point do not include the point itself. If \code{exclude=FALSE}, a point belongs to its own neighbourhood. } \item{collapse}{ Logical. If \code{collapse=FALSE} (the default) the results for each point are returned as separate rows of a table. If \code{collapse=TRUE}, the results are aggregated according to the type of point. } } \value{ A contingency table (object of class \code{"table"}). If \code{collapse=FALSE}, the table has one row for each point in \code{X}, and one column for each possible mark value. If \code{collapse=TRUE}, the table has one row and one column for each possible mark value. } \details{ This algorithm visits each point in the point pattern \code{X}, inspects all the neighbouring points within a radius \code{R} of the current point (or the \code{N} nearest neighbours of the current point), and compiles a frequency table of the marks attached to the neighbours. The dataset \code{X} must be a multitype point pattern, that is, \code{marks(X)} must be a \code{factor}. If \code{collapse=FALSE} (the default), the result is a two-dimensional contingency table with one row for each point in the pattern, and one column for each possible mark value. The \code{[i,j]} entry in the table gives the number of neighbours of point \code{i} that have mark \code{j}. If \code{collapse=TRUE}, this contingency table is aggregated according to the type of point, so that the result is a contingency table with one row and one column for each possible mark value. The \code{[i,j]} entry in the table gives the number of neighbours of a point with mark \code{i} that have mark \code{j}. To perform more complicated calculations on the neighbours of every point, use \code{\link{markstat}} or \code{\link{applynbd}}. } \seealso{ \code{\link{markstat}}, \code{\link{applynbd}}, \code{\link{Kcross}}, \code{\link{ppp.object}}, \code{\link{table}} } \examples{ head(marktable(amacrine, 0.1)) head(marktable(amacrine, 0.1, exclude=FALSE)) marktable(amacrine, N=1, collapse=TRUE) } \author{\adrian and \rolf } \keyword{spatial} \keyword{programming} spatstat/man/harmonise.im.Rd0000644000176200001440000000354113160710621015550 0ustar liggesusers\name{harmonise.im} \alias{harmonise.im} \alias{harmonize.im} \title{Make Pixel Images Compatible} \description{ Convert several pixel images to a common pixel raster. } \usage{ \method{harmonise}{im}(\dots) \method{harmonize}{im}(\dots) } \arguments{ \item{\dots}{ Any number of pixel images (objects of class \code{"im"}) or data which can be converted to pixel images by \code{\link{as.im}}. } } \details{ This function makes any number of pixel images compatible, by converting them all to a common pixel grid. The command \code{\link{harmonise}} is generic. This is the method for objects of class \code{"im"}. At least one of the arguments \code{\dots} must be a pixel image. Some arguments may be windows (objects of class \code{"owin"}), functions (\code{function(x,y)}) or numerical constants. These will be converted to images using \code{\link{as.im}}. The common pixel grid is determined by inspecting all the pixel images in the argument list, computing the bounding box of all the images, then finding the image with the highest spatial resolution, and extending its pixel grid to cover the bounding box. The return value is a list with entries corresponding to the input arguments. If the arguments were named (\code{name=value}) then the return value also carries these names. If you just want to determine the appropriate pixel resolution, without converting the images, use \code{\link{commonGrid}}. } \value{ A list, of length equal to the number of arguments \code{\dots}, whose entries are pixel images. } \author{\adrian and \rolf } \examples{ A <- setcov(square(1)) B <- function(x,y) { x } G <- density(runifpoint(42)) harmonise(X=A, Y=B, Z=G) } \seealso{ \code{\link{commonGrid}}, \code{\link{compatible.im}}, \code{\link{as.im}} } \keyword{spatial} \keyword{manip} spatstat/man/subfits.Rd0000644000176200001440000000512713160710621014640 0ustar liggesusers\name{subfits} \alias{subfits} \alias{subfits.new} \alias{subfits.old} \title{Extract List of Individual Point Process Models} \description{ Takes a Gibbs point process model that has been fitted to several point patterns simultaneously, and produces a list of fitted point process models for the individual point patterns. } \usage{ subfits(object, what="models", verbose=FALSE) subfits.old(object, what="models", verbose=FALSE) subfits.new(object, what="models", verbose=FALSE) } \arguments{ \item{object}{ An object of class \code{"mppm"} representing a point process model fitted to several point patterns. } \item{what}{ What should be returned. Either \code{"models"} to return the fitted models, or \code{"interactions"} to return the fitted interactions only. } \item{verbose}{ Logical flag indicating whether to print progress reports. } } \details{ \code{object} is assumed to have been generated by \code{\link{mppm}}. It represents a point process model that has been fitted to a list of several point patterns, with covariate data. For each of the \emph{individual} point pattern datasets, this function derives the corresponding fitted model for that dataset only (i.e. a point process model for the \eqn{i}th point pattern, that is consistent with \code{object}). If \code{what="models"}, the result is a list of point process models (a list of objects of class \code{"ppm"}), one model for each point pattern dataset in the original fit. If \code{what="interactions"}, the result is a list of fitted interpoint interactions (a list of objects of class \code{"fii"}). Two different algorithms are provided, as \code{subfits.old} and \code{subfits.new}. Currently \code{subfits} is the same as the old algorithm \code{subfits.old} because the newer algorithm is too memory-hungry. } \value{ A list of point process models (a list of objects of class \code{"ppm"}) or a list of fitted interpoint interactions (a list of objects of class \code{"fii"}). } \examples{ H <- hyperframe(Wat=waterstriders) fit <- mppm(Wat~x, data=H) subfits(fit) H$Wat[[3]] <- rthin(H$Wat[[3]], 0.1) fit2 <- mppm(Wat~x, data=H, random=~1|id) subfits(fit2) } \references{ Baddeley, A., Rubak, E. and Turner, R. (2015) \emph{Spatial Point Patterns: Methodology and Applications with R}. London: Chapman and Hall/CRC Press. } \author{ Adrian Baddeley, Ida-Maria Sintorn and Leanne Bischoff. Implemented in \pkg{spatstat} by \spatstatAuthors. } \seealso{ \code{\link{mppm}}, \code{\link{ppm}} } \keyword{spatial} \keyword{models} spatstat/man/rPenttinen.Rd0000644000176200001440000001037213160710621015305 0ustar liggesusers\name{rPenttinen} \alias{rPenttinen} \title{Perfect Simulation of the Penttinen Process} \description{ Generate a random pattern of points, a simulated realisation of the Penttinen process, using a perfect simulation algorithm. } \usage{ rPenttinen(beta, gamma=1, R, W = owin(), expand=TRUE, nsim=1, drop=TRUE) } \arguments{ \item{beta}{ intensity parameter (a positive number). } \item{gamma}{ Interaction strength parameter (a number between 0 and 1). } \item{R}{ disc radius (a non-negative number). } \item{W}{ window (object of class \code{"owin"}) in which to generate the random pattern. } \item{expand}{ Logical. If \code{FALSE}, simulation is performed in the window \code{W}, which must be rectangular. If \code{TRUE} (the default), simulation is performed on a larger window, and the result is clipped to the original window \code{W}. Alternatively \code{expand} can be an object of class \code{"rmhexpand"} (see \code{\link{rmhexpand}}) determining the expansion method. } \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a point pattern. } } \details{ This function generates a realisation of the Penttinen point process in the window \code{W} using a \sQuote{perfect simulation} algorithm. Penttinen (1984, Example 2.1, page 18), citing Cormack (1979), described the pairwise interaction point process with interaction factor \deqn{ h(d) = e^{\theta A(d)} = \gamma^{A(d)} }{ h(d) = exp(theta * A(d)) = gamma^(A(d)) } between each pair of points separated by a distance $d$. Here \eqn{A(d)} is the area of intersection between two discs of radius \eqn{R} separated by a distance \eqn{d}, normalised so that \eqn{A(0) = 1}. The simulation algorithm used to generate the point pattern is \sQuote{dominated coupling from the past} as implemented by Berthelsen and \ifelse{latex}{\out{M\o ller}}{Moller} (2002, 2003). This is a \sQuote{perfect simulation} or \sQuote{exact simulation} algorithm, so called because the output of the algorithm is guaranteed to have the correct probability distribution exactly (unlike the Metropolis-Hastings algorithm used in \code{\link{rmh}}, whose output is only approximately correct). There is a tiny chance that the algorithm will run out of space before it has terminated. If this occurs, an error message will be generated. } \value{ If \code{nsim = 1}, a point pattern (object of class \code{"ppp"}). If \code{nsim > 1}, a list of point patterns. } \references{ Berthelsen, K.K. and \ifelse{latex}{\out{M\o ller}}{Moller}, J. (2002) A primer on perfect simulation for spatial point processes. \emph{Bulletin of the Brazilian Mathematical Society} 33, 351-367. Berthelsen, K.K. and \ifelse{latex}{\out{M\o ller}}{Moller}, J. (2003) Likelihood and non-parametric Bayesian MCMC inference for spatial point processes based on perfect simulation and path sampling. \emph{Scandinavian Journal of Statistics} 30, 549-564. Cormack, R.M. (1979) Spatial aspects of competition between individuals. Pages 151--212 in \emph{Spatial and Temporal Analysis in Ecology}, eds. R.M. Cormack and J.K. Ord, International Co-operative Publishing House, Fairland, MD, USA. \ifelse{latex}{\out{M\o ller}}{Moller}, J. and Waagepetersen, R. (2003). \emph{Statistical Inference and Simulation for Spatial Point Processes.} Chapman and Hall/CRC. Penttinen, A. (1984) \emph{Modelling Interaction in Spatial Point Patterns: Parameter Estimation by the Maximum Likelihood Method.} \ifelse{latex}{\out{Jyv\"askyl\"a}}{Jyvaskyla} Studies in Computer Science, Economics and Statistics \bold{7}, University of \ifelse{latex}{\out{Jyv\"askyl\"a}}{Jyvaskyla}, Finland. } \author{ \adrian, based on original code for the Strauss process by Kasper Klitgaard Berthelsen. } \examples{ X <- rPenttinen(50, 0.5, 0.02) } \seealso{ \code{\link{rmh}}, \code{\link{Penttinen}}. \code{\link{rStrauss}}, \code{\link{rHardcore}}, \code{\link{rStraussHard}}, \code{\link{rDiggleGratton}}, \code{\link{rDGS}}. } \keyword{spatial} \keyword{datagen} spatstat/man/as.ppp.Rd0000644000176200001440000001142413160710571014363 0ustar liggesusers\name{as.ppp} \alias{as.ppp} \alias{as.ppp.ppp} \alias{as.ppp.psp} \alias{as.ppp.quad} \alias{as.ppp.matrix} \alias{as.ppp.data.frame} \alias{as.ppp.influence.ppm} \alias{as.ppp.default} \title{Convert Data To Class ppp} \description{ Tries to coerce any reasonable kind of data to a spatial point pattern (an object of class \code{"ppp"}) for use by the \pkg{spatstat} package). } \usage{ as.ppp(X, \dots, fatal=TRUE) \method{as.ppp}{ppp}(X, \dots, fatal=TRUE) \method{as.ppp}{psp}(X, \dots, fatal=TRUE) \method{as.ppp}{quad}(X, \dots, fatal=TRUE) \method{as.ppp}{matrix}(X, W=NULL, \dots, fatal=TRUE) \method{as.ppp}{data.frame}(X, W=NULL, \dots, fatal=TRUE) \method{as.ppp}{influence.ppm}(X, \dots) \method{as.ppp}{default}(X, W=NULL, \dots, fatal=TRUE) } \arguments{ \item{X}{Data which will be converted into a point pattern} \item{W}{ Data which define a window for the pattern, when \code{X} does not contain a window. (Ignored if \code{X} contains window information.) } \item{\dots}{Ignored.} \item{fatal}{ Logical value specifying what to do if the data cannot be converted. See Details. } } \value{ An object of class \code{"ppp"} (see \code{\link{ppp.object}}) describing the point pattern and its window of observation. The value \code{NULL} may also be returned; see Details. } \details{ Converts the dataset \code{X} to a point pattern (an object of class \code{"ppp"}; see \code{\link{ppp.object}} for an overview). This function is normally used to convert an existing point pattern dataset, stored in another format, to the \code{"ppp"} format. To create a new point pattern from raw data such as \eqn{x,y} coordinates, it is normally easier to use the creator function \code{\link{ppp}}. The function \code{as.ppp} is generic, with methods for the classes \code{"ppp"}, \code{"psp"}, \code{"quad"}, \code{"matrix"}, \code{"data.frame"} and a default method. The dataset \code{X} may be: \itemize{ \item an object of class \code{"ppp"} \item an object of class \code{"psp"} \item a point pattern object created by the \pkg{spatial} library \item an object of class \code{"quad"} representing a quadrature scheme (see \code{\link{quad.object}}) \item a matrix or data frame with at least two columns \item a structure with entries \code{x}, \code{y} which are numeric vectors of equal length \item a numeric vector of length 2, interpreted as the coordinates of a single point. } In the last three cases, we need the second argument \code{W} which is converted to a window object by the function \code{\link{as.owin}}. In the first four cases, \code{W} will be ignored. If \code{X} is a line segment pattern (an object of class \code{psp}) the point pattern returned consists of the endpoints of the segments. If \code{X} is marked then the point pattern returned will also be marked, the mark associated with a point being the mark of the segment of which that point was an endpoint. If \code{X} is a matrix or data frame, the first and second columns will be interpreted as the \eqn{x} and \eqn{y} coordinates respectively. Any additional columns will be interpreted as marks. The argument \code{fatal} indicates what to do when \code{W} is missing and \code{X} contains no information about the window. If \code{fatal=TRUE}, a fatal error will be generated; if \code{fatal=FALSE}, the value \code{NULL} is returned. In the \pkg{spatial} library, a point pattern is represented in either of the following formats: \itemize{ \item (in \pkg{spatial} versions 1 to 6) a structure with entries \code{x}, \code{y} \code{xl}, \code{xu}, \code{yl}, \code{yu} \item (in \pkg{spatial} version 7) a structure with entries \code{x}, \code{y} and \code{area}, where \code{area} is a structure with entries \code{xl}, \code{xu}, \code{yl}, \code{yu} } where \code{x} and \code{y} are vectors of equal length giving the point coordinates, and \code{xl}, \code{xu}, \code{yl}, \code{yu} are numbers giving the dimensions of a rectangular window. Point pattern datasets can also be created by the function \code{\link{ppp}}. } \seealso{ \code{\link{ppp}}, \code{\link{ppp.object}}, \code{\link{as.owin}}, \code{\link{owin.object}} } \examples{ xy <- matrix(runif(40), ncol=2) pp <- as.ppp(xy, c(0,1,0,1)) # Venables-Ripley format # check for 'spatial' package spatialpath <- system.file(package="spatial") if(nchar(spatialpath) > 0) { require(spatial) towns <- ppinit("towns.dat") pp <- as.ppp(towns) # converted to our format detach(package:spatial) } xyzt <- matrix(runif(40), ncol=4) Z <- as.ppp(xyzt, square(1)) } \author{ \spatstatAuthors } \keyword{spatial} \keyword{manip} spatstat/man/gauss.hermite.Rd0000644000176200001440000000322213160710621015731 0ustar liggesusers\name{gauss.hermite} \alias{gauss.hermite} \title{ Gauss-Hermite Quadrature Approximation to Expectation for Normal Distribution } \description{ Calculates an approximation to the expected value of any function of a normally-distributed random variable, using Gauss-Hermite quadrature. } \usage{ gauss.hermite(f, mu = 0, sd = 1, ..., order = 5) } \arguments{ \item{f}{ The function whose moment should be approximated. } \item{mu}{ Mean of the normal distribution. } \item{sd}{ Standard deviation of the normal distribution. } \item{\dots}{ Additional arguments passed to \code{f}. } \item{order}{ Number of quadrature points in the Gauss-Hermite quadrature approximation. A small positive integer. } } \details{ This algorithm calculates the approximate expected value of \code{f(Z)} when \code{Z} is a normally-distributed random variable with mean \code{mu} and standard deviation \code{sd}. The expected value is an integral with respect to the Gaussian density; this integral is approximated using Gauss-Hermite quadrature. The argument \code{f} should be a function in the \R language whose first argument is the variable \code{Z}. Additional arguments may be passed through \code{\dots}. The value returned by \code{f} may be a single numeric value, a vector, or a matrix. The values returned by \code{f} for different values of \code{Z} must have compatible dimensions. The result is a weighted average of several values of \code{f}. } \value{ Numeric value, vector or matrix. } \author{\adrian , \rolf and \ege. } \examples{ gauss.hermite(function(x) x^2, 3, 1) } \keyword{math} spatstat/man/plot.imlist.Rd0000644000176200001440000000524413160710621015437 0ustar liggesusers\name{plot.imlist} \alias{plot.imlist} \alias{image.imlist} \alias{image.listof} \title{Plot a List of Images} \description{ Plots an array of pixel images. } \usage{ \method{plot}{imlist}(x, \dots, plotcommand="image", equal.ribbon=FALSE, ribmar=NULL) \method{image}{imlist}(x, \dots, equal.ribbon=FALSE, ribmar=NULL) \method{image}{listof}(x, \dots, equal.ribbon=FALSE, ribmar=NULL) } \arguments{ \item{x}{ An object of the class \code{"imlist"} representing a list of pixel images. Alternatively \code{x} may belong to the outdated class \code{"listof"}. } \item{\dots}{ Arguments passed to \code{\link{plot.solist}} to control the spatial arrangement of panels, and arguments passed to \code{\link{plot.im}} to control the display of each panel. } \item{equal.ribbon}{ Logical. If \code{TRUE}, the colour maps of all the images will be the same. If \code{FALSE}, the colour map of each image is adjusted to the range of values of that image. } \item{ribmar}{ Numeric vector of length 4 specifying the margins around the colour ribbon, if \code{equal.ribbon=TRUE}. Entries in the vector give the margin at the bottom, left, top, and right respectively, as a multiple of the height of a line of text. } \item{plotcommand}{ Character string giving the name of a function to be used to display each image. Recognised by \code{plot.imlist} only. } } \value{ Null. } \details{ These are methods for the generic plot commands \code{plot} and \code{image} for the class \code{"imlist"}. They are currently identical. An object of class \code{"imlist"} represents a list of pixel images. (The outdated class \code{"listof"} is also handled.) Each entry in the list \code{x} will be displayed as a pixel image, in an array of panels laid out on the same graphics display, using \code{\link{plot.solist}}. Individual panels are plotted by \code{\link{plot.im}}. If \code{equal.ribbon=FALSE} (the default), the images are rendered using different colour maps, which are displayed as colour ribbons beside each image. If \code{equal.ribbon=TRUE}, the images are rendered using the same colour map, and a single colour ribbon will be displayed at the right side of the array. The colour maps and the placement of the colour ribbons are controlled by arguments \code{\dots} passed to \code{\link{plot.im}}. } \seealso{ \code{\link{plot.solist}}, \code{\link{plot.im}} } \examples{ D <- density(split(amacrine)) image(D, equal.ribbon=TRUE, main="", col.ticks="red", col.axis="red") } \author{\adrian \rolf and \ege } \keyword{spatial} \keyword{hplot} spatstat/man/lohboot.Rd0000644000176200001440000001644413164370433014642 0ustar liggesusers\name{lohboot} \alias{lohboot} \title{Bootstrap Confidence Bands for Summary Function} \description{ Computes a bootstrap confidence band for a summary function of a point process. } \usage{ lohboot(X, fun=c("pcf", "Kest", "Lest", "pcfinhom", "Kinhom", "Linhom"), \dots, block=FALSE, global=FALSE, basicboot=FALSE, Vcorrection=FALSE, confidence=0.95, nx = 4, ny = nx, nsim=200, type=7) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"}). } \item{fun}{ Name of the summary function for which confidence intervals are desired: one of the strings \code{"pcf"}, \code{"Kest"}, \code{"Lest"}, \code{"pcfinhom"}, \code{"Kinhom"} or \code{"Linhom"}. Alternatively, the function itself; it must be one of the functions listed here. } \item{\dots}{ Arguments passed to the corresponding local version of the summary function (see Details). } \item{block}{ Logical value indicating whether to use Loh's block bootstrap as originally proposed. Default is \code{FALSE} for consistency with older code. See Details. } \item{global}{ Logical. If \code{FALSE} (the default), pointwise confidence intervals are constructed. If \code{TRUE}, a global (simultaneous) confidence band is constructed. } \item{basicboot}{ Logical value indicating whether to use the so-called basic bootstrap confidence interval. See Details. } \item{Vcorrection}{ Logical value indicating whether to use a variance correction when \code{fun="Kest"} or \code{fun="Kinhom"}. See Details. } \item{confidence}{ Confidence level, as a fraction between 0 and 1. } \item{nx,ny}{ Integers. If \code{block=TRUE}, divide the window into \code{nx*ny} rectangles. } \item{nsim}{ Number of bootstrap simulations. } \item{type}{ Integer. Type of quantiles. Argument passed to \code{\link[stats]{quantile.default}} controlling the way the quantiles are calculated. } } \value{ A function value table (object of class \code{"fv"}) containing columns giving the estimate of the summary function, the upper and lower limits of the bootstrap confidence interval, and the theoretical value of the summary function for a Poisson process. } \details{ This algorithm computes confidence bands for the true value of the summary function \code{fun} using the bootstrap method of Loh (2008) and a modification described in Baddeley, Rubak, Turner (2015). If \code{fun="pcf"}, for example, the algorithm computes a pointwise \code{(100 * confidence)}\% confidence interval for the true value of the pair correlation function for the point process, normally estimated by \code{\link{pcf}}. It starts by computing the array of \emph{local} pair correlation functions, \code{\link{localpcf}}, of the data pattern \code{X}. This array consists of the contributions to the estimate of the pair correlation function from each data point. If \code{block=FALSE}, these contributions are resampled \code{nsim} times with replacement as described in Baddeley, Rubak, Turner (2015); from each resampled dataset the total contribution is computed, yielding \code{nsim} random pair correlation functions. If \code{block=TRUE}, the (bounding box of the) window is divided into \eqn{nx * ny} rectangles (blocks). The average contribution of a block is obtained by averaging the contribution of each point included in the block. Then, the average contributions on each block are resampled \code{nsim} times with replacement as described in Loh (2008) and Loh (2010); from each resampled dataset the total contribution is computed, yielding \code{nsim} random pair correlation functions. Notice that for non-rectangular windows any blocks not fully contained in the window are discarded before doing the resampling, so the effective number of blocks may be substantially smaller than \eqn{nx * ny} in this case. The pointwise \code{alpha/2} and \code{1 - alpha/2} quantiles of these functions are computed, where \code{alpha = 1 - confidence}. The average of the local functions is also computed as an estimate of the pair correlation function. There are several ways to define a bootstrap confidence interval. If \code{basicbootstrap=TRUE}, the so-called basic confidence bootstrap interval is used as described in Loh (2008). It has been noticed in Loh (2010) that when the intensity of the point process is unknown, the bootstrap error estimate is larger than it should be. When the \eqn{K} function is used, an adjustment procedure has been proposed in Loh (2010) that is used if \code{Vcorrection=TRUE}. In this case, the basic confidence bootstrap interval is implicitly used. To control the estimation algorithm, use the arguments \code{\dots}, which are passed to the local version of the summary function, as shown below: \tabular{ll}{ \bold{fun} \tab \bold{local version} \cr \code{\link{pcf}} \tab \code{\link{localpcf}} \cr \code{\link{Kest}} \tab \code{\link{localK}} \cr \code{\link{Lest}} \tab \code{\link{localK}} \cr \code{\link{pcfinhom}} \tab \code{\link{localpcfinhom}} \cr \code{\link{Kinhom}} \tab \code{\link{localKinhom}} \cr \code{\link{Linhom}} \tab \code{\link{localKinhom}} } For \code{fun="Lest"}, the calculations are first performed as if \code{fun="Kest"}, and then the square-root transformation is applied to obtain the \eqn{L}-function. Note that the confidence bands computed by \code{lohboot(fun="pcf")} may not contain the estimate of the pair correlation function computed by \code{\link{pcf}}, because of differences between the algorithm parameters (such as the choice of edge correction) in \code{\link{localpcf}} and \code{\link{pcf}}. If you are using \code{lohboot}, the appropriate point estimate of the pair correlation itself is the pointwise mean of the local estimates, which is provided in the result of \code{lohboot} and is shown in the default plot. If the confidence bands seem unbelievably narrow, this may occur because the point pattern has a hard core (the true pair correlation function is zero for certain values of distance) or because of an optical illusion when the function is steeply sloping (remember the width of the confidence bands should be measured \emph{vertically}). An alternative to \code{lohboot} is \code{\link{varblock}}. } \references{ Baddeley, A., Rubak, E. and Turner, R. (2015) \emph{Spatial Point Patterns: Methodology and Applications with R}. Chapman and Hall/CRC Press. Loh, J.M. (2008) A valid and fast spatial bootstrap for correlation functions. \emph{The Astrophysical Journal}, \bold{681}, 726--734. Loh, J.M. (2010) Bootstrapping an inhomogeneous point process. \emph{Journal of Statistical Planning and Inference}, \bold{140}, 734--749. } \seealso{ Summary functions \code{\link{Kest}}, \code{\link{pcf}}, \code{\link{Kinhom}}, \code{\link{pcfinhom}}, \code{\link{localK}}, \code{\link{localpcf}}, \code{\link{localKinhom}}, \code{\link{localpcfinhom}}. See \code{\link{varblock}} for an alternative bootstrap technique. } \examples{ p <- lohboot(simdat, stoyan=0.5) plot(p) } \author{ \spatstatAuthors and Christophe Biscio. } \keyword{spatial} \keyword{nonparametric} spatstat/man/range.fv.Rd0000644000176200001440000000236013160710621014663 0ustar liggesusers\name{range.fv} \alias{range.fv} \alias{max.fv} \alias{min.fv} \title{ Range of Function Values } \description{ Compute the range, maximum, or minimum of the function values in a summary function. } \usage{ \method{range}{fv}(\dots, na.rm = TRUE, finite = na.rm) \method{max}{fv}(\dots, na.rm = TRUE, finite = na.rm) \method{min}{fv}(\dots, na.rm = TRUE, finite = na.rm) } \arguments{ \item{\dots}{ One or more function value tables (objects of class \code{"fv"} representing summary functions) or other data. } \item{na.rm}{ Logical. Whether to ignore \code{NA} values. } \item{finite}{ Logical. Whether to ignore values that are infinite, \code{NaN} or \code{NA}. } } \details{ These are methods for the generic \code{\link[base]{range}}, \code{\link[base]{max}} and \code{\link[base]{min}}. They compute the range, maximum, and minimum of the \emph{function} values that would be plotted on the \eqn{y} axis by default. For more complicated calculations, use \code{\link{with.fv}}. } \value{ Numeric vector of length 2. } \seealso{ \code{\link{with.fv}} } \examples{ G <- Gest(cells) range(G) max(G) min(G) } \author{ \adrian , \rolf and \ege. } \keyword{spatial} \keyword{math} spatstat/man/psp.Rd0000644000176200001440000000712513160710621013763 0ustar liggesusers\name{psp} \alias{psp} \title{Create a Line Segment Pattern} \description{ Creates an object of class \code{"psp"} representing a line segment pattern in the two-dimensional plane. } \usage{ psp(x0,y0, x1, y1, window, marks=NULL, check=spatstat.options("checksegments")) } \arguments{ \item{x0}{Vector of \eqn{x} coordinates of first endpoint of each segment} \item{y0}{Vector of \eqn{y} coordinates of first endpoint of each segment} \item{x1}{Vector of \eqn{x} coordinates of second endpoint of each segment} \item{y1}{Vector of \eqn{y} coordinates of second endpoint of each segment} \item{window}{window of observation, an object of class \code{"owin"}} \item{marks}{(optional) vector or data frame of mark values} \item{check}{Logical value indicating whether to check that the line segments lie inside the window.} } \value{ An object of class \code{"psp"} describing a line segment pattern in the two-dimensional plane (see \code{\link{psp.object}}). } \details{ In the \pkg{spatstat} library, a spatial pattern of line segments is described by an object of class \code{"psp"}. This function creates such objects. The vectors \code{x0}, \code{y0}, \code{x1} and \code{y1} must be numeric vectors of equal length. They are interpreted as the cartesian coordinates of the endpoints of the line segments. A line segment pattern is assumed to have been observed within a specific region of the plane called the observation window. An object of class \code{"psp"} representing a point pattern contains information specifying the observation window. This window must always be specified when creating a point pattern dataset; there is intentionally no default action of ``guessing'' the window dimensions from the data points alone. The argument \code{window} must be an object of class \code{"owin"}. It is a full description of the window geometry, and could have been obtained from \code{\link{owin}} or \code{\link{as.owin}}, or by just extracting the observation window of another dataset, or by manipulating such windows. See \code{\link{owin}} or the Examples below. The optional argument \code{marks} is given if the line segment pattern is marked, i.e. if each line segment carries additional information. For example, line segments which are classified into two or more different types, or colours, may be regarded as having a mark which identifies which colour they are. The object \code{marks} must be a vector of the same length as \code{x0}, or a data frame with number of rows equal to the length of \code{x0}. The interpretation is that \code{marks[i]} or \code{marks[i,]} is the mark attached to the \eqn{i}th line segment. If the marks are real numbers then \code{marks} should be a numeric vector, while if the marks takes only a finite number of possible values (e.g. colours or types) then \code{marks} should be a \code{factor}. See \code{\link{psp.object}} for a description of the class \code{"psp"}. Users would normally invoke \code{psp} to create a line segment pattern, and the function \code{\link{as.psp}} to convert data in another format into a line segment pattern. } \seealso{ \code{\link{psp.object}}, \code{\link{as.psp}}, \code{\link{owin.object}}, \code{\link{owin}}, \code{\link{as.owin}}, \code{\link{marks.psp}} } \examples{ X <- psp(runif(10), runif(10), runif(10), runif(10), window=owin()) m <- data.frame(A=1:10, B=letters[1:10]) X <- psp(runif(10), runif(10), runif(10), runif(10), window=owin(), marks=m) } \author{\adrian and \rolf. } \keyword{spatial} \keyword{datagen} spatstat/man/linearKcross.inhom.Rd0000644000176200001440000001077513160710621016736 0ustar liggesusers\name{linearKcross.inhom} \alias{linearKcross.inhom} \title{ Inhomogeneous multitype K Function (Cross-type) for Linear Point Pattern } \description{ For a multitype point pattern on a linear network, estimate the inhomogeneous multitype \eqn{K} function which counts the expected number of points of type \eqn{j} within a given distance of a point of type \eqn{i}. } \usage{ linearKcross.inhom(X, i, j, lambdaI, lambdaJ, r=NULL, \dots, correction="Ang", normalise=TRUE) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the cross type \eqn{K} function \eqn{K_{ij}(r)}{Kij(r)} will be computed. An object of class \code{"lpp"} which must be a multitype point pattern (a marked point pattern whose marks are a factor). } \item{i}{Number or character string identifying the type (mark value) of the points in \code{X} from which distances are measured. Defaults to the first level of \code{marks(X)}. } \item{j}{Number or character string identifying the type (mark value) of the points in \code{X} to which distances are measured. Defaults to the second level of \code{marks(X)}. } \item{lambdaI}{ Intensity values for the points of type \code{i}. Either a numeric vector, a \code{function}, a pixel image (object of class \code{"im"} or \code{"linim"}) or a fitted point process model (object of class \code{"ppm"} or \code{"lppm"}). } \item{lambdaJ}{ Intensity values for the points of type \code{j}. Either a numeric vector, a \code{function}, a pixel image (object of class \code{"im"} or \code{"linim"}) or a fitted point process model (object of class \code{"ppm"} or \code{"lppm"}). } \item{r}{numeric vector. The values of the argument \eqn{r} at which the \eqn{K}-function \eqn{K_{ij}(r)}{Kij(r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{correction}{ Geometry correction. Either \code{"none"} or \code{"Ang"}. See Details. } \item{\dots}{ Arguments passed to \code{lambdaI} and \code{lambdaJ} if they are functions. } \item{normalise}{ Logical. If \code{TRUE} (the default), the denominator of the estimator is data-dependent (equal to the sum of the reciprocal intensities at the points of type \code{i}), which reduces the sampling variability. If \code{FALSE}, the denominator is the length of the network. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). } \details{ This is a counterpart of the function \code{\link{Kcross.inhom}} for a point pattern on a linear network (object of class \code{"lpp"}). The arguments \code{i} and \code{j} will be interpreted as levels of the factor \code{marks(X)}. If \code{i} and \code{j} are missing, they default to the first and second level of the marks factor, respectively. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{K_{ij}(r)}{Kij(r)} should be evaluated. The values of \eqn{r} must be increasing nonnegative numbers and the maximum \eqn{r} value must not exceed the radius of the largest disc contained in the window. If \code{lambdaI} or \code{lambdaJ} is a fitted point process model, the default behaviour is to update the model by re-fitting it to the data, before computing the fitted intensity. This can be disabled by setting \code{update=FALSE}. } \references{ Baddeley, A, Jammalamadaka, A. and Nair, G. (to appear) Multitype point process analysis of spines on the dendrite network of a neuron. \emph{Applied Statistics} (Journal of the Royal Statistical Society, Series C), In press. } \section{Warnings}{ The arguments \code{i} and \code{j} are interpreted as levels of the factor \code{marks(X)}. Beware of the usual trap with factors: numerical values are not interpreted in the same way as character values. } \seealso{ \code{\link{linearKdot}}, \code{\link{linearK}}. } \examples{ lam <- table(marks(chicago))/(summary(chicago)$totlength) lamI <- function(x,y,const=lam[["assault"]]){ rep(const, length(x)) } lamJ <- function(x,y,const=lam[["robbery"]]){ rep(const, length(x)) } K <- linearKcross.inhom(chicago, "assault", "robbery", lamI, lamJ) \dontrun{ fit <- lppm(chicago, ~marks + x) linearKcross.inhom(chicago, "assault", "robbery", fit, fit) } } \author{\adrian } \keyword{spatial} \keyword{nonparametric} spatstat/man/pixelquad.Rd0000644000176200001440000000557513160710621015164 0ustar liggesusers\name{pixelquad} \alias{pixelquad} \title{Quadrature Scheme Based on Pixel Grid} \description{ Makes a quadrature scheme with a dummy point at every pixel of a pixel image. } \usage{ pixelquad(X, W = as.owin(X)) } \arguments{ \item{X}{Point pattern (object of class \code{"ppp"}) containing the data points for the quadrature scheme. } \item{W}{ Specifies the pixel grid. A pixel image (object of class \code{"im"}), a window (object of class \code{"owin"}), or anything that can be converted to a window by \code{\link{as.owin}}. } } \value{ An object of class \code{"quad"} describing the quadrature scheme (data points, dummy points, and quadrature weights) suitable as the argument \code{Q} of the function \code{\link{ppm}()} for fitting a point process model. The quadrature scheme can be inspected using the \code{print} and \code{plot} methods for objects of class \code{"quad"}. } \details{ This is a method for producing a quadrature scheme for use by \code{\link{ppm}}. It is an alternative to \code{\link{quadscheme}}. The function \code{\link{ppm}} fits a point process model to an observed point pattern using the Berman-Turner quadrature approximation (Berman and Turner, 1992; Baddeley and Turner, 2000) to the pseudolikelihood of the model. It requires a quadrature scheme consisting of the original data point pattern, an additional pattern of dummy points, and a vector of quadrature weights for all these points. Such quadrature schemes are represented by objects of class \code{"quad"}. See \code{\link{quad.object}} for a description of this class. Given a grid of pixels, this function creates a quadrature scheme in which there is one dummy point at the centre of each pixel. The counting weights are used (the weight attached to each quadrature point is 1 divided by the number of quadrature points falling in the same pixel). The argument \code{X} specifies the locations of the data points for the quadrature scheme. Typically this would be a point pattern dataset. The argument \code{W} specifies the grid of pixels for the dummy points of the quadrature scheme. It should be a pixel image (object of class \code{"im"}), a window (object of class \code{"owin"}), or anything that can be converted to a window by \code{\link{as.owin}}. If \code{W} is a pixel image or a binary mask (a window of type \code{"mask"}) then the pixel grid of \code{W} will be used. If \code{W} is a rectangular or polygonal window, then it will first be converted to a binary mask using \code{\link{as.mask}} at the default pixel resolution. } \examples{ W <- owin(c(0,1),c(0,1)) X <- runifpoint(42, W) W <- as.mask(W,dimyx=128) pixelquad(X,W) } \seealso{ \code{\link{quadscheme}}, \code{\link{quad.object}}, \code{\link{ppm}} } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat/man/requireversion.Rd0000644000176200001440000000173213160710621016241 0ustar liggesusers\name{requireversion} \alias{requireversion} \title{ Require a Specific Version of a Package } \description{ Checks that the version number of a specified package is greater than or equal to the specified version number. For use in stand-alone \R scripts. } \usage{ requireversion(pkg, ver) } \arguments{ \item{pkg}{ Package name. } \item{ver}{ Character string containing version number. } } \details{ This function checks whether the installed version of the package \code{pkg} is greater than or equal to \code{ver}. It is useful in stand-alone \R scripts, which often require a particular version of a package in order to work correctly. \bold{This function should not be used inside a package}: for that purpose, the dependence on packages and versions should be specified in the package description file. } \value{ Null. } \author{ \adrian } \examples{ \dontrun{ requireversion(spatstat, "1.42-0") } } \keyword{environment} spatstat/man/plot.owin.Rd0000644000176200001440000001654113160710621015114 0ustar liggesusers\name{plot.owin} \alias{plot.owin} \title{Plot a Spatial Window} \description{ Plot a two-dimensional window of observation for a spatial point pattern } \usage{ \method{plot}{owin}(x, main, add=FALSE, \dots, box, edge=0.04, type=c("w","n"), show.all=!add, hatch=FALSE, hatchargs=list(), invert=FALSE, do.plot=TRUE, claim.title.space=FALSE) } \arguments{ \item{x}{ The window to be plotted. An object of class \code{\link{owin}}, or data which can be converted into this format by \code{\link{as.owin}()}. } \item{main}{ text to be displayed as a title above the plot. } \item{add}{ logical flag: if \code{TRUE}, draw the window in the current plot; if \code{FALSE}, generate a new plot. } \item{\dots}{ extra arguments controlling the appearance of the plot. These arguments are passed to \code{\link[graphics]{polygon}} if \code{x} is a polygonal or rectangular window, or passed to \code{\link[graphics]{image.default}} if \code{x} is a binary mask. See Details. } \item{box}{ logical flag; if \code{TRUE}, plot the enclosing rectangular box } \item{edge}{ nonnegative number; the plotting region will have coordinate limits that are \code{1 + edge} times as large as the limits of the rectangular box that encloses the pattern. } \item{type}{ Type of plot: either \code{"w"} or \code{"n"}. If \code{type="w"} (the default), the window is plotted. If \code{type="n"} and \code{add=TRUE}, a new plot is initialised and the coordinate system is established, but nothing is drawn. } \item{show.all}{ Logical value indicating whether to plot everything including the main title. } \item{hatch}{ logical flag; if \code{TRUE}, the interior of the window will be shaded by texture, such as a grid of parallel lines. } \item{hatchargs}{ List of arguments passed to \code{\link{add.texture}} to control the texture shading when \code{hatch=TRUE}. } \item{invert}{ logical flag; when the window is a binary pixel mask, the mask colours will be inverted if \code{invert=TRUE}. } \item{do.plot}{ Logical value indicating whether to actually perform the plot. } \item{claim.title.space}{ Logical value indicating whether extra space for the main title should be allocated when declaring the plot dimensions. Should be set to \code{FALSE} under normal conditions. } } \value{ none. } \details{ This is the \code{plot} method for the class \code{\link{owin}}. The action is to plot the boundary of the window on the current plot device, using equal scales on the \code{x} and \code{y} axes. If the window \code{x} is of type \code{"rectangle"} or \code{"polygonal"}, the boundary of the window is plotted as a polygon or series of polygons. If \code{x} is of type \code{"mask"} the discrete raster approximation of the window is displayed as a binary image (white inside the window, black outside). Graphical parameters controlling the display (e.g. setting the colours) may be passed directly via the \code{...} arguments, or indirectly reset using \code{\link{spatstat.options}}. When \code{x} is of type \code{"rectangle"} or \code{"polygonal"}, it is plotted by the \R function \code{\link[graphics]{polygon}}. To control the appearance (colour, fill density, line density etc) of the polygon plot, determine the required argument of \code{\link[graphics]{polygon}} and pass it through \code{...} For example, to paint the interior of the polygon in red, use the argument \code{col="red"}. To draw the polygon edges in green, use \code{border="green"}. To suppress the drawing of polygon edges, use \code{border=NA}. When \code{x} is of type \code{"mask"}, it is plotted by \code{\link[graphics]{image.default}}. The appearance of the image plot can be controlled by passing arguments to \code{\link[graphics]{image.default}} through \code{...}. The default appearance can also be changed by setting the parameter \code{par.binary} of \code{\link{spatstat.options}}. To zoom in (to view only a subset of the window at higher magnification), use the graphical arguments \code{xlim} and \code{ylim} to specify the desired rectangular field of view. (The actual field of view may be larger, depending on the graphics device). } \section{Notes on Filled Polygons with Holes}{ The function \code{\link[graphics]{polygon}} can only handle polygons without holes. To plot polygons with holes in a solid colour, we have implemented two workarounds. \describe{ \item{polypath function:}{ The first workaround uses the relatively new function \code{\link[graphics]{polypath}} which \emph{does} have the capability to handle polygons with holes. However, not all graphics devices support \code{\link[graphics]{polypath}}. The older devices \code{\link{xfig}} and \code{\link{pictex}} do not support \code{\link[graphics]{polypath}}. On a Windows system, the default graphics device #ifdef windows \code{\link{windows}} #endif #ifndef windows \code{windows} #endif supports \code{\link[graphics]{polypath}}. #ifdef unix On a Linux system, the default graphics device \code{X11(type="Xlib")} does \emph{not} support \code{\link[graphics]{polypath}} but \code{X11(type="cairo")} does support it. See \code{\link{X11}} and the section on Cairo below. #endif } \item{polygon decomposition:}{ The other workaround involves decomposing the polygonal window into pieces which do not have holes. This code is experimental but works in all our test cases. If this code fails, a warning will be issued, and the filled colours will not be plotted. } } } #ifdef unix \section{Cairo graphics on a Linux system}{ Linux systems support the graphics device \code{X11(type="cairo")} (see \code{\link{X11}}) provided the external library \pkg{cairo} is installed on the computer. See \code{www.cairographics.org} for instructions on obtaining and installing \pkg{cairo}. After having installed \pkg{cairo} one needs to re-install \R from source so that it has \pkg{cairo} capabilites. To check whether your current installation of R has \pkg{cairo} capabilities, type (in \R) \code{capabilities()["cairo"]}. The default type for \code{\link{X11}} is controlled by \code{\link[grDevices]{X11.options}}. You may find it convenient to make \pkg{cairo} the default, e.g. via your \code{.Rprofile}. The magic incantation to put into \code{.Rprofile} is \preformatted{ setHook(packageEvent("graphics", "onLoad"), function(...) grDevices::X11.options(type="cairo")) } } #endif \seealso{ \code{\link{owin.object}}, \code{\link{plot.ppp}}, \code{\link[graphics]{polygon}}, \code{\link[graphics]{image.default}}, \code{\link{spatstat.options}} } \examples{ # rectangular window plot(Window(nztrees)) abline(v=148, lty=2) # polygonal window w <- Window(demopat) plot(w) plot(w, col="red", border="green", lwd=2) plot(w, hatch=TRUE, lwd=2) # binary mask we <- as.mask(w) plot(we) op <- spatstat.options(par.binary=list(col=grey(c(0.5,1)))) plot(we) spatstat.options(op) } \author{\adrian and \rolf } \keyword{spatial} \keyword{hplot} spatstat/man/Ops.msr.Rd0000644000176200001440000000300513160710571014517 0ustar liggesusers\name{Ops.msr} \alias{Ops.msr} \title{Arithmetic Operations on Measures} \description{ These group generic methods for the class \code{"msr"} allow the arithmetic operators \code{+}, \code{-}, \code{*} and \code{/} to be applied directly to measures. } \usage{ ## S3 methods for group generics have prototypes: \special{Ops(e1, e2)} %NAMESPACE S3method("Ops", "msr") } \arguments{ \item{e1, e2}{objects of class \code{"msr"}.} } \details{ Arithmetic operators on a measure \code{A} are only defined in some cases. The arithmetic operator is effectively applied to the value of \code{A(W)} for every spatial domain \code{W}. If the result is a measure, then this operation is valid. If \code{A} is a measure (object of class \code{"msr"}) then the operations \code{-A} and \code{+A} are defined. If \code{A} and \code{B} are measures with the same dimension (i.e. both are scalar-valued, or both are \code{k}-dimensional vector-valued) then \code{A + B} and \code{A - B} are defined. If \code{A} is a measure and \code{z} is a numeric value, then \code{A * z} and \code{A / z} are defined, and \code{z * A} is defined. } \value{ Another measure (object of class \code{"msr"}). } \seealso{ \code{\link{with.msr}} } \examples{ X <- rpoispp(function(x,y) { exp(3+3*x) }) fit <- ppm(X, ~x+y) rp <- residuals(fit, type="pearson") rp -rp 2 * rp rp /2 rp - rp rr <- residuals(fit, type="raw") rp - rr } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{methods} spatstat/man/plot.ppp.Rd0000644000176200001440000003374513160710621014744 0ustar liggesusers\name{plot.ppp} \alias{plot.ppp} \title{plot a Spatial Point Pattern} \description{ Plot a two-dimensional spatial point pattern } \usage{ \method{plot}{ppp}(x, main, \dots, clipwin=NULL, chars=NULL, cols=NULL, use.marks=TRUE, which.marks=NULL, add=FALSE, type=c("p","n"), legend=TRUE, leg.side=c("left", "bottom", "top", "right"), leg.args=list(), symap=NULL, maxsize=NULL, meansize=NULL, markscale=NULL, zap=0.01, show.window=show.all, show.all=!add, do.plot=TRUE, multiplot=TRUE) } \arguments{ \item{x}{ The spatial point pattern to be plotted. An object of class \code{"ppp"}, or data which can be converted into this format by \code{\link{as.ppp}()}. } \item{main}{ text to be displayed as a title above the plot. } \item{\dots}{ extra arguments that will be passed to the plotting functions \code{\link{plot.default}}, \code{\link{points}} and/or \code{\link{symbols}}. } \item{clipwin}{ Optional. A window (object of class \code{"owin"}). Only this subset of the image will be displayed. } \item{chars}{ plotting character(s) used to plot points. } \item{cols}{ the colour(s) used to plot points. } \item{use.marks}{ logical flag; if \code{TRUE}, plot points using a different plotting symbol for each mark; if \code{FALSE}, only the locations of the points will be plotted, using \code{\link{points}()}. } \item{which.marks}{ Index determining which column of marks to use, if the marks of \code{x} are a data frame. A character or integer vector identifying one or more columns of marks. If \code{add=FALSE} then the default is to plot all columns of marks, in a series of separate plots. If \code{add=TRUE} then only one column of marks can be plotted, and the default is \code{which.marks=1} indicating the first column of marks. } \item{add}{ logical flag; if \code{TRUE}, just the points are plotted, over the existing plot. A new plot is not created, and the window is not plotted. } \item{type}{ Type of plot: either \code{"p"} or \code{"n"}. If \code{type="p"} (the default), both the points and the observation window are plotted. If \code{type="n"}, only the window is plotted. } \item{legend}{ Logical value indicating whether to add a legend showing the mapping between mark values and graphical symbols (for a marked point pattern). } \item{leg.side}{ Position of legend relative to main plot. } \item{leg.args}{ List of additional arguments passed to \code{\link{plot.symbolmap}} or \code{\link{symbolmap}} to control the legend. In addition to arguments documented under \code{\link{plot.symbolmap}}, and graphical arguments recognised by \code{\link{symbolmap}}, the list may also include the argument \code{sep} giving the separation between the main plot and the legend, or \code{sep.frac} giving the separation as a fraction of the relevant dimension (width or height) of the main plot. } \item{symap}{ Optional. The graphical symbol map to be applied to the marks. An object of class \code{"symbolmap"}; see \code{\link{symbolmap}}. } \item{maxsize}{ \emph{Maximum} physical size of the circles/squares plotted when \code{x} is a marked point pattern with numerical marks. Incompatible with \code{meansize} and \code{markscale}. Ignored if \code{symap} is given. } \item{meansize}{ \emph{Average} physical size of the circles/squares plotted when \code{x} is a marked point pattern with numerical marks. Incompatible with \code{maxsize} and \code{markscale}. Ignored if \code{symap} is given. } \item{markscale}{ physical scale factor determining the sizes of the circles/squares plotted when \code{x} is a marked point pattern with numerical marks. Mark value will be multiplied by \code{markscale} to determine physical size. Incompatible with \code{maxsize} and \code{meansize}. Ignored if \code{symap} is given. } \item{zap}{ Fraction between 0 and 1. When \code{x} is a marked point pattern with numerical marks, \code{zap} is the smallest mark value (expressed as a fraction of the maximum possible mark) that will be plotted. Any points which have marks smaller in absolute value than \code{zap * max(abs(marks(x)))} will not be plotted. } \item{show.window}{ Logical value indicating whether to plot the observation window of \code{x}. } \item{show.all}{ Logical value indicating whether to plot everything including the main title and the observation window of \code{x}. } \item{do.plot}{ Logical value determining whether to actually perform the plotting. } \item{multiplot}{ Logical value giving permission to display multiple plots. } } \value{ (Invisible) object of class \code{"symbolmap"} giving the correspondence between mark values and plotting characters. } \details{ This is the \code{plot} method for point pattern datasets (of class \code{"ppp"}, see \code{\link{ppp.object}}). First the observation window \code{Window(x)} is plotted (if \code{show.window=TRUE}). Then the points themselves are plotted, in a fashion that depends on their marks, as follows. \describe{ \item{unmarked point pattern:}{ If the point pattern does not have marks, or if \code{use.marks = FALSE}, then the locations of all points will be plotted using a single plot character } \item{multitype point pattern:}{ If \code{x$marks} is a factor, then each level of the factor is represented by a different plot character. } \item{continuous marks:}{ If \code{x$marks} is a numeric vector, the marks are rescaled to the unit interval and each point is represented by a circle with \emph{diameter} proportional to the rescaled mark (if the value is positive) or a square with \emph{side length} proportional to the absolute value of the rescaled mark (if the value is negative). } \item{other kinds of marks:}{ If \code{x$marks} is neither numeric nor a factor, then each possible mark will be represented by a different plotting character. The default is to represent the \eqn{i}th smallest mark value by \code{points(..., pch=i)}. } } If there are several columns of marks, and if \code{which.marks} is missing or \code{NULL}, then \itemize{ \item if \code{add=FALSE} and \code{multiplot=TRUE} the default is to plot all columns of marks, in a series of separate plots, placed side-by-side. The plotting is coordinated by \code{\link{plot.listof}}, which calls \code{plot.ppp} to make each of the individual plots. \item Otherwise, only one column of marks can be plotted, and the default is \code{which.marks=1} indicating the first column of marks. } Plotting of the window \code{Window(x)} is performed by \code{\link{plot.owin}}. This plot may be modified through the \code{...} arguments. In particular the extra argument \code{border} determines the colour of the window, if the window is not a binary mask. Plotting of the points themselves is performed by the function \code{\link{points}}, except for the case of continuous marks, where it is performed by \code{\link{symbols}}. Their plotting behaviour may be modified through the \code{...} arguments. The argument \code{chars} determines the plotting character or characters used to display the points (in all cases except for the case of continuous marks). For an unmarked point pattern, this should be a single integer or character determining a plotting character (see \code{par("pch")}). For a multitype point pattern, \code{chars} should be a vector of integers or characters, of the same length as \code{levels(x$marks)}, and then the \eqn{i}th level or type will be plotted using character \code{chars[i]}. If \code{chars} is absent, but there is an extra argument \code{pch}, then this will determine the plotting character for all points. The argument \code{cols} determines the colour or colours used to display the points. For an unmarked point pattern, \code{cols} should be a character string determining a colour. For a multitype point pattern, \code{cols} should be a character vector, of the same length as \code{levels(marks(x))}: that is, there is one colour for each possible mark value. The \eqn{i}th level or type will be plotted using colour \code{cols[i]}. For a point pattern with continuous marks, \code{cols} can be either a character string or a character vector specifying colour values: the range of mark values will be mapped to the specified colours. If \code{cols} is absent, the colours used to plot the points may be determined by the extra argument \code{fg} (for multitype point patterns) or the extra argument \code{col} (for all other cases). Note that specifying \code{col} will also apply this colour to the window itself. The default colour for the points is a semi-transparent grey, if this is supported by the plot device. This behaviour can be suppressed (so that the default colour is non-transparent) by setting \code{spatstat.options(transparent=FALSE)}. The arguments \code{maxsize}, \code{meansize} and \code{markscale} incompatible. They control the physical size of the circles and squares which represent the marks in a point pattern with continuous marks. The size of a circle is defined as its \emph{diameter}; the size of a square is its side length. If \code{markscale} is given, then a mark value of \code{m} is plotted as a circle of diameter \code{m * markscale} (if \code{m} is positive) or a square of side \code{abs(m) * markscale} (if \code{m} is negative). If \code{maxsize} is given, then the largest mark in absolute value, \code{mmax=max(abs(marks(x)))}, will be scaled to have physical size \code{maxsize}. If \code{meansize} is given, then the average absolute mark value, \code{mmean=mean(abs(marks(x)))}, will be scaled to have physical size \code{meansize}. The user can set the default values of these plotting parameters using \code{\link{spatstat.options}("par.points")}. To zoom in (to view only a subset of the point pattern at higher magnification), use the graphical arguments \code{xlim} and \code{ylim} to specify the rectangular field of view. The value returned by this plot function is an object of class \code{"symbolmap"} representing the mapping from mark values to graphical symbols. See \code{\link{symbolmap}}. It can be used to make a suitable legend, or to ensure that two plots use the same graphics map. } \section{Removing White Space Around The Plot}{ A frequently-asked question is: How do I remove the white space around the plot? Currently \code{plot.ppp} uses the base graphics system of \R, so the space around the plot is controlled by parameters to \code{\link{par}}. To reduce the white space, change the parameter \code{mar}. Typically, \code{par(mar=rep(0.5, 4))} is adequate, if there are no annotations or titles outside the window. } \seealso{ \code{\link{iplot}}, \code{\link{ppp.object}}, \code{\link{plot}}, \code{\link{par}}, \code{\link{points}}, \code{\link{text.ppp}}, \code{\link{plot.owin}}, \code{\link{symbols}} } \examples{ plot(cells) plot(cells, pch=16) # make the plotting symbols larger (for publication at reduced scale) plot(cells, cex=2) # set it in spatstat.options oldopt <- spatstat.options(par.points=list(cex=2)) plot(cells) spatstat.options(oldopt) # multitype plot(lansing) # marked by a real number plot(longleaf) # just plot the points plot(longleaf, use.marks=FALSE) plot(unmark(longleaf)) # equivalent # point pattern with multiple marks plot(finpines) plot(finpines, which.marks="height") # controlling COLOURS of points plot(cells, cols="blue") plot(lansing, cols=c("black", "yellow", "green", "blue","red","pink")) plot(longleaf, fg="blue") # make window purple plot(lansing, border="purple") # make everything purple plot(lansing, border="purple", cols="purple", col.main="purple", leg.args=list(col.axis="purple")) # controlling PLOT CHARACTERS for multitype pattern plot(lansing, chars = 11:16) plot(lansing, chars = c("o","h","m",".","o","o")) ## multitype pattern mapped to symbols plot(amacrine, shape=c("circles", "squares"), size=0.04) plot(amacrine, shape="arrows", direction=c(0,90), size=0.07) ## plot trees as trees! plot(lansing, shape="arrows", direction=90, cols=1:6) # controlling MARK SCALE for pattern with numeric marks plot(longleaf, markscale=0.1) plot(longleaf, maxsize=5) plot(longleaf, meansize=2) # draw circles of diameter equal to nearest neighbour distance plot(cells \%mark\% nndist(cells), markscale=1, legend=FALSE) # inspecting the symbol map v <- plot(amacrine) v ## variable colours ('cols' not 'col') plot(longleaf, cols=function(x) ifelse(x < 30, "red", "black")) ## re-using the same mark scale a <- plot(longleaf) juveniles <- longleaf[marks(longleaf) < 30] plot(juveniles, symap=a) ## numerical marks mapped to symbols of fixed size with variable colour ra <- range(marks(longleaf)) colmap <- colourmap(terrain.colors(20), range=ra) ## filled plot characters are the codes 21-25 ## fill colour is indicated by 'bg' sy <- symbolmap(pch=21, bg=colmap, range=ra) plot(longleaf, symap=sy) ## or more compactly.. plot(longleaf, bg=terrain.colors(20), pch=21, cex=1) ## clipping plot(humberside) B <- owin(c(4810, 5190), c(4180, 4430)) plot(B, add=TRUE, border="red") plot(humberside, clipwin=B, main="Humberside (clipped)") } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{hplot} spatstat/man/reach.dppm.Rd0000644000176200001440000000215113160710621015174 0ustar liggesusers\name{reach.dppm} \alias{reach.dppm} \alias{reach.detpointprocfamily} \title{Range of Interaction for a Determinantal Point Process Model} \description{ Returns the range of interaction for a determinantal point process model. } \usage{ \method{reach}{dppm}(x, \dots) \method{reach}{detpointprocfamily}(x, \dots) } \arguments{ \item{x}{Model of class \code{"detpointprocfamily"} or \code{"dppm"}.} \item{\dots}{Additional arguments passed to the range function of the given model. } } \details{ The range of interaction for a determinantal point process model may defined as the smallest number \eqn{R} such that \eqn{g(r)=1} for all \eqn{r\ge R}{r>=R}, where \eqn{g} is the pair correlation function. For many models the range is infinite, but one may instead use a value where the pair correlation function is sufficiently close to 1. For example in the Matern model this defaults to finding \eqn{R} such that \eqn{g(R)=0.99}. } \value{Numeric} \author{ \adrian \rolf and \ege } \examples{ reach(dppMatern(lambda=100, alpha=.01, nu=1, d=2)) } \keyword{spatial} \keyword{models} spatstat/man/Extract.layered.Rd0000644000176200001440000000472113160710621016216 0ustar liggesusers\name{Extract.layered} \alias{[.layered} \alias{[<-.layered} \alias{[[<-.layered} \title{Extract or Replace Subset of a Layered Object} \description{ Extract or replace some or all of the layers of a layered object, or extract a spatial subset of each layer. } \usage{ \method{[}{layered}(x, i, j, drop=FALSE, ...) \method{[}{layered}(x, i) <- value \method{[[}{layered}(x, i) <- value } \arguments{ \item{x}{ A layered object (class \code{"layered"}). } \item{i}{ Subset index for the list of layers. A logical vector, integer vector or character vector specifying which layers are to be extracted or replaced. } \item{j}{ Subset index to be applied to the data in each layer. Typically a spatial window (class \code{"owin"}). } \item{drop}{ Logical. If \code{i} specifies only a single layer and \code{drop=TRUE}, then the contents of this layer will be returned. } \item{\dots}{ Additional arguments, passed to other subset methods if the subset index is a window. } \item{value}{List of objects which shall replace the designated subset, or an object which shall replace the designated element. } } \value{ Usually an object of class \code{"layered"}. } \details{ A layered object represents data that should be plotted in successive layers, for example, a background and a foreground. See \code{\link{layered}}. The function \code{[.layered} extracts a designated subset of a layered object. It is a method for \code{\link{[}} for the class \code{"layered"}. The functions \code{[<-.layered} and \code{[[<-.layered} replace a designated subset or designated entry of the object by new values. They are methods for \code{\link{[<-}} and \code{\link{[[<-}} for the \code{"layered"} class. The index \code{i} specifies which layers will be retained. It should be a valid subset index for the list of layers. The index \code{j} will be applied to each layer. It is typically a spatial window (class \code{"owin"}) so that each of the layers will be restricted to the same spatial region. Alternatively \code{j} may be any subset index which is permissible for the \code{"["} method for each of the layers. } \seealso{ \code{\link{layered}} } \examples{ D <- distmap(cells) L <- layered(D, cells, plotargs=list(list(ribbon=FALSE), list(pch=16))) L[-2] L[, square(0.5)] L[[3]] <- japanesepines L } \author{ \spatstatAuthors } \keyword{spatial} \keyword{manip} spatstat/man/plot.tess.Rd0000644000176200001440000000415713160710621015116 0ustar liggesusers\name{plot.tess} \alias{plot.tess} \title{Plot a tessellation} \description{ Plots a tessellation. } \usage{ \method{plot}{tess}(x, ..., main, add=FALSE, show.all=!add, col=NULL, do.plot=TRUE, do.labels=FALSE, labels=tilenames(x), labelargs=list()) } \arguments{ \item{x}{Tessellation (object of class \code{"tess"}) to be plotted.} \item{\dots}{Arguments controlling the appearance of the plot.} \item{main}{Heading for the plot. A character string.} \item{add}{Logical. Determines whether the tessellation plot is added to the existing plot. } \item{show.all}{ Logical value indicating whether to plot everything including the main title and the observation window of \code{x}. } \item{col}{ Colour of the tile boundaries. A character string. Ignored for pixel tessellations. } \item{do.plot}{ Logical value indicating whether to actually perform the plot. } \item{do.labels}{ Logical value indicating whether to show a text label for each tile of the tessellation. } \item{labels}{Character vector of labels for the tiles.} \item{labelargs}{ List of arguments passed to \code{\link{text.default}} to control display of the text labels. } } \details{ This is a method for the generic \code{\link{plot}} function for the class \code{"tess"} of tessellations (see \code{\link{tess}}). The arguments \code{\dots} control the appearance of the plot. They are passed to \code{\link{segments}}, \code{\link{plot.owin}} or \code{\link{plot.im}}, depending on the type of tessellation. } \value{ (Invisible) window of class \code{"owin"} specifying a bounding box for the plot (including a colour ribbon if plotted). } \seealso{ \code{\link{tess}} } \examples{ A <- tess(xgrid=0:4,ygrid=0:4) plot(A, col="blue", lwd=2, lty=2) B <- A[c(1, 2, 5, 7, 9)] plot(B, hatch=TRUE) v <- as.im(function(x,y){factor(round(5 * (x^2 + y^2)))}, W=owin()) levels(v) <- letters[seq(length(levels(v)))] E <- tess(image=v) plot(E) } \author{\adrian and \rolf } \keyword{spatial} \keyword{hplot} spatstat/man/pcfcross.Rd0000644000176200001440000001455613160710621015011 0ustar liggesusers\name{pcfcross} \alias{pcfcross} \title{Multitype pair correlation function (cross-type)} \description{ Calculates an estimate of the cross-type pair correlation function for a multitype point pattern. } \usage{ pcfcross(X, i, j, ..., r = NULL, kernel = "epanechnikov", bw = NULL, stoyan = 0.15, correction = c("isotropic", "Ripley", "translate"), divisor = c("r", "d")) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the cross-type pair correlation function \eqn{g_{ij}(r)}{g[i,j](r)} will be computed. It must be a multitype point pattern (a marked point pattern whose marks are a factor). } \item{i}{The type (mark value) of the points in \code{X} from which distances are measured. A character string (or something that will be converted to a character string). Defaults to the first level of \code{marks(X)}. } \item{j}{The type (mark value) of the points in \code{X} to which distances are measured. A character string (or something that will be converted to a character string). Defaults to the second level of \code{marks(X)}. } \item{\dots}{ Ignored. } \item{r}{ Vector of values for the argument \eqn{r} at which \eqn{g(r)} should be evaluated. There is a sensible default. } \item{kernel}{ Choice of smoothing kernel, passed to \code{\link{density.default}}. } \item{bw}{ Bandwidth for smoothing kernel, passed to \code{\link{density.default}}. } \item{stoyan}{ Coefficient for default bandwidth rule; see Details. } \item{correction}{ Choice of edge correction. } \item{divisor}{ Choice of divisor in the estimation formula: either \code{"r"} (the default) or \code{"d"}. See Details. } } \details{ The cross-type pair correlation function is a generalisation of the pair correlation function \code{\link{pcf}} to multitype point patterns. For two locations \eqn{x} and \eqn{y} separated by a distance \eqn{r}, the probability \eqn{p(r)} of finding a point of type \eqn{i} at location \eqn{x} and a point of type \eqn{j} at location \eqn{y} is \deqn{ p(r) = \lambda_i \lambda_j g_{i,j}(r) \,{\rm d}x \, {\rm d}y }{ p(r) = lambda[i] * lambda[j] * g[i,j](r) dx dy } where \eqn{\lambda_i}{lambda[i]} is the intensity of the points of type \eqn{i}. For a completely random Poisson marked point process, \eqn{p(r) = \lambda_i \lambda_j}{p(r) = lambda[i] * lambda[j]} so \eqn{g_{i,j}(r) = 1}{g[i,j](r) = 1}. Indeed for any marked point pattern in which the points of type \code{i} are independent of the points of type \code{j}, the theoretical value of the cross-type pair correlation is \eqn{g_{i,j}(r) = 1}{g[i,j](r) = 1}. For a stationary multitype point process, the cross-type pair correlation function between marks \eqn{i} and \eqn{j} is formally defined as \deqn{ g_{i,j}(r) = \frac{K_{i,j}^\prime(r)}{2\pi r} }{ g(r) = K[i,j]'(r)/ ( 2 * pi * r) } where \eqn{K_{i,j}^\prime}{K[i,j]'(r)} is the derivative of the cross-type \eqn{K} function \eqn{K_{i,j}(r)}{K[i,j](r)}. of the point process. See \code{\link{Kest}} for information about \eqn{K(r)}. The command \code{pcfcross} computes a kernel estimate of the cross-type pair correlation function between marks \eqn{i} and \eqn{j}. \itemize{ \item If \code{divisor="r"} (the default), then the multitype counterpart of the standard kernel estimator (Stoyan and Stoyan, 1994, pages 284--285) is used. By default, the recommendations of Stoyan and Stoyan (1994) are followed exactly. \item If \code{divisor="d"} then a modified estimator is used: the contribution from an interpoint distance \eqn{d_{ij}}{d[ij]} to the estimate of \eqn{g(r)} is divided by \eqn{d_{ij}}{d[ij]} instead of dividing by \eqn{r}. This usually improves the bias of the estimator when \eqn{r} is close to zero. } There is also a choice of spatial edge corrections (which are needed to avoid bias due to edge effects associated with the boundary of the spatial window): \code{correction="translate"} is the Ohser-Stoyan translation correction, and \code{correction="isotropic"} or \code{"Ripley"} is Ripley's isotropic correction. The choice of smoothing kernel is controlled by the argument \code{kernel} which is passed to \code{\link{density}}. The default is the Epanechnikov kernel. The bandwidth of the smoothing kernel can be controlled by the argument \code{bw}. Its precise interpretation is explained in the documentation for \code{\link{density.default}}. For the Epanechnikov kernel with support \eqn{[-h,h]}, the argument \code{bw} is equivalent to \eqn{h/\sqrt{5}}{h/sqrt(5)}. If \code{bw} is not specified, the default bandwidth is determined by Stoyan's rule of thumb (Stoyan and Stoyan, 1994, page 285) applied to the points of type \code{j}. That is, \eqn{h = c/\sqrt{\lambda}}{h = c/sqrt(lambda)}, where \eqn{\lambda}{lambda} is the (estimated) intensity of the point process of type \code{j}, and \eqn{c} is a constant in the range from 0.1 to 0.2. The argument \code{stoyan} determines the value of \eqn{c}. The companion function \code{\link{pcfdot}} computes the corresponding analogue of \code{\link{Kdot}}. } \value{ An object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. Essentially a data frame containing columns \item{r}{the vector of values of the argument \eqn{r} at which the function \eqn{g_{i,j}}{g[i,j]} has been estimated } \item{theo}{the theoretical value \eqn{g_{i,j}(r) = 1}{g[i,j](r) = r} for independent marks. } together with columns named \code{"border"}, \code{"bord.modif"}, \code{"iso"} and/or \code{"trans"}, according to the selected edge corrections. These columns contain estimates of the function \eqn{g_{i,j}}{g[i,j]} obtained by the edge corrections named. } \seealso{ Mark connection function \code{\link{markconnect}}. Multitype pair correlation \code{\link{pcfdot}}, \code{\link{pcfmulti}}. Pair correlation \code{\link{pcf}},\code{\link{pcf.ppp}}. \code{\link{Kcross}} } \examples{ data(amacrine) p <- pcfcross(amacrine, "off", "on") p <- pcfcross(amacrine, "off", "on", stoyan=0.1) plot(p) } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat/man/as.function.im.Rd0000644000176200001440000000156113160710571016016 0ustar liggesusers\name{as.function.im} \alias{as.function.im} \title{ Convert Pixel Image to Function of Coordinates } \description{ Converts a pixel image to a function of the \eqn{x} and \eqn{y} coordinates. } \usage{ \method{as.function}{im}(x, ...) } \arguments{ \item{x}{ Pixel image (object of class \code{"im"}). } \item{\dots}{ Ignored. } } \details{ This command converts a pixel image (object of class \code{"im"}) to a \code{function(x,y)} where the arguments \code{x} and \code{y} are (vectors of) spatial coordinates. This function returns the pixel values at the specified locations. } \value{ A function in the \R language, also belonging to the class \code{"funxy"}. } \author{ \adrian \rolf and \ege } \seealso{ \code{\link{[.im}} } \examples{ d <- density(cells) f <- as.function(d) f(0.1, 0.3) } \keyword{spatial} \keyword{manip} spatstat/man/laslett.Rd0000644000176200001440000001355013160710621014630 0ustar liggesusers\name{laslett} \alias{laslett} \title{ Laslett's Transform } \description{ Apply Laslett's Transform to a spatial region, returning the original and transformed regions, and the original and transformed positions of the lower tangent points. This is a diagnostic for the Boolean model. } \usage{ laslett(X, \dots, verbose = FALSE, plotit = TRUE, discretise = FALSE, type=c("lower", "upper", "left", "right")) } \arguments{ \item{X}{ Spatial region to be transformed. A window (object of class \code{"owin"}) or a logical-valued pixel image (object of class \code{"im"}). } \item{\dots}{ Graphics arguments to control the plot (passed to \code{\link{plot.laslett}} when \code{plotit=TRUE}) or arguments determining the pixel resolution (passed to \code{\link{as.mask}}). } \item{verbose}{ Logical value indicating whether to print progress reports. } \item{plotit}{ Logical value indicating whether to plot the result. } \item{discretise}{ Logical value indicating whether polygonal windows should first be converted to pixel masks before the Laslett transform is computed. This should be set to \code{TRUE} for very complicated polygons. } \item{type}{ Type of tangent points to be detected. This also determines the direction of contraction in the set transformation. Default is \code{type="lower"}. } } \details{ This function finds the lower tangent points of the spatial region \code{X}, then applies Laslett's Transform to the space, and records the transformed positions of the lower tangent points. Laslett's transform is a diagnostic for the Boolean Model. A test of the Boolean model can be performed by applying a test of CSR to the transformed tangent points. See the Examples. The rationale is that, if the region \code{X} was generated by a Boolean model with convex grains, then the lower tangent points of \code{X}, when subjected to Laslett's transform, become a Poisson point process (Cressie, 1993, section 9.3.5; Molchanov, 1997; Barbour and Schmidt, 2001). Intuitively, Laslett's transform is a way to account for the fact that tangent points of \code{X} cannot occur \emph{inside} \code{X}. It treats the interior of \code{X} as empty space, and collapses this empty space so that only the \emph{exterior} of \code{X} remains. In this collapsed space, the tangent points are completely random. Formally, Laslett's transform is a random (i.e. data-dependent) spatial transformation which maps each spatial location \eqn{(x,y)} to a new location \eqn{(x',y)} at the same height \eqn{y}. The transformation is defined so that \eqn{x'} is the total \emph{uncovered} length of the line segment from \eqn{(0,y)} to \eqn{(x,y)}, that is, the total length of the parts of this segment that fall outside the region \code{X}. In more colourful terms, suppose we use an abacus to display a pixellated version of \code{X}. Each wire of the abacus represents one horizontal line in the pixel image. Each pixel lying \emph{outside} the region \code{X} is represented by a bead of the abacus; pixels \emph{inside} \code{X} are represented by the absence of a bead. Next we find any beads which are lower tangent points of \code{X}, and paint them green. Then Laslett's Transform is applied by pushing all beads to the left, as far as possible. The final locations of all the beads provide a new spatial region, inside which is the point pattern of tangent points (marked by the green-painted beads). If \code{plotit=TRUE} (the default), a before-and-after plot is generated, showing the region \code{X} and the tangent points before and after the transformation. This plot can also be generated by calling \code{plot(a)} where \code{a} is the object returned by the function \code{laslett}. If the argument \code{type} is given, then this determines the type of tangents that will be detected, and also the direction of contraction in Laslett's transform. The computation is performed by first rotating \code{X}, applying Laslett's transform for lower tangent points, then rotating back. There are separate algorithms for polygonal windows and pixellated windows (binary masks). The polygonal algorithm may be slow for very complicated polygons. If this happens, setting \code{discretise=TRUE} will convert the polygonal window to a binary mask and invoke the pixel raster algorithm. } \value{ A list, which also belongs to the class \code{"laslett"} so that it can immediately be printed and plotted. The list elements are: \describe{ \item{oldX:}{the original dataset \code{X};} \item{TanOld:}{a point pattern, whose window is \code{Frame(X)}, containing the lower tangent points of \code{X};} \item{TanNew:}{a point pattern, whose window is the Laslett transform of \code{Frame(X)}, and which contains the Laslett-transformed positions of the tangent points;} \item{Rect:}{a rectangular window, which is the largest rectangle lying inside the transformed set;} \item{df:}{a data frame giving the locations of the tangent points before and after transformation. } \item{type:}{character string specifying the type of tangents.} } } \references{ Barbour, A.D. and Schmidt, V. (2001) On Laslett's Transform for the Boolean Model. \emph{Advances in Applied Probability} \bold{33}(1), 1--5. Cressie, N.A.C. (1993) \emph{Statistics for spatial data}, second edition. John Wiley and Sons. Molchanov, I. (1997) \emph{Statistics of the Boolean Model for Practitioners and Mathematicians}. Wiley. } \author{ Kassel Hingee and \adrian. } \seealso{ \code{\link{plot.laslett}} } \examples{ a <- laslett(heather$coarse) with(a, clarkevans.test(TanNew[Rect], correction="D", nsim=39)) X <- discs(runifpoint(15) \%mark\% 0.2, npoly=16) b <- laslett(X) } \keyword{spatial} \keyword{manip} spatstat/man/kernel.factor.Rd0000644000176200001440000000265213160710621015716 0ustar liggesusers\name{kernel.factor} \alias{kernel.factor} \title{Scale factor for density kernel} \description{ Returns a scale factor for the kernels used in density estimation for numerical data. } \usage{ kernel.factor(kernel = "gaussian") } \arguments{ \item{kernel}{ String name of the kernel. Options are \code{"gaussian"}, \code{"rectangular"}, \code{"triangular"}, \code{"epanechnikov"}, \code{"biweight"}, \code{"cosine"} and \code{"optcosine"}. (Partial matching is used). } } \details{ Kernel estimation of a probability density in one dimension is performed by \code{\link[stats]{density.default}} using a kernel function selected from the list above. This function computes a scale constant for the kernel. For the Gaussian kernel, this constant is equal to 1. Otherwise, the constant \eqn{c} is such that the kernel with standard deviation \eqn{1} is supported on the interval \eqn{[-c,c]}. For more information about these kernels, see \code{\link[stats]{density.default}}. } \value{ A single number. } \seealso{ \code{\link[stats]{density.default}}, \code{\link{dkernel}}, \code{\link{kernel.moment}}, \code{\link{kernel.squint}} } \examples{ kernel.factor("rect") # bandwidth for Epanechnikov kernel with half-width h=1 h <- 1 bw <- h/kernel.factor("epa") } \author{\adrian and Martin Hazelton } \keyword{methods} \keyword{nonparametric} \keyword{smooth} spatstat/man/scan.test.Rd0000644000176200001440000001260113160710621015056 0ustar liggesusers\name{scan.test} \alias{scan.test} \title{ Spatial Scan Test } \description{ Performs the Spatial Scan Test for clustering in a spatial point pattern, or for clustering of one type of point in a bivariate spatial point pattern. } \usage{ scan.test(X, r, ..., method = c("poisson", "binomial"), nsim = 19, baseline = NULL, case = 2, alternative = c("greater", "less", "two.sided"), verbose = TRUE) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"}). } \item{r}{ Radius of circle to use. A single number or a numeric vector. } \item{\dots}{ Optional. Arguments passed to \code{\link{as.mask}} to determine the spatial resolution of the computations. } \item{method}{ Either \code{"poisson"} or \code{"binomial"} specifying the type of likelihood. } \item{nsim}{ Number of simulations for computing Monte Carlo p-value. } \item{baseline}{ Baseline for the Poisson intensity, if \code{method="poisson"}. A pixel image or a function. } \item{case}{ Which type of point should be interpreted as a case, if \code{method="binomial"}. Integer or character string. } \item{alternative}{ Alternative hypothesis: \code{"greater"} if the alternative postulates that the mean number of points inside the circle will be greater than expected under the null. } \item{verbose}{ Logical. Whether to print progress reports. } } \details{ The spatial scan test (Kulldorf, 1997) is applied to the point pattern \code{X}. In a nutshell, \itemize{ \item If \code{method="poisson"} then a significant result would mean that there is a circle of radius \code{r}, located somewhere in the spatial domain of the data, which contains a significantly higher than expected number of points of \code{X}. That is, the pattern \code{X} exhibits spatial clustering. \item If \code{method="binomial"} then \code{X} must be a bivariate (two-type) point pattern. By default, the first type of point is interpreted as a control (non-event) and the second type of point as a case (event). A significant result would mean that there is a circle of radius \code{r} which contains a significantly higher than expected number of cases. That is, the cases are clustered together, conditional on the locations of all points. } Following is a more detailed explanation. \itemize{ \item If \code{method="poisson"} then the scan test based on Poisson likelihood is performed (Kulldorf, 1997). The dataset \code{X} is treated as an unmarked point pattern. By default (if \code{baseline} is not specified) the null hypothesis is complete spatial randomness CSR (i.e. a uniform Poisson process). The alternative hypothesis is a Poisson process with one intensity \eqn{\beta_1}{beta1} inside some circle of radius \code{r} and another intensity \eqn{\beta_0}{beta0} outside the circle. If \code{baseline} is given, then it should be a pixel image or a \code{function(x,y)}. The null hypothesis is an inhomogeneous Poisson process with intensity proportional to \code{baseline}. The alternative hypothesis is an inhomogeneous Poisson process with intensity \code{beta1 * baseline} inside some circle of radius \code{r}, and \code{beta0 * baseline} outside the circle. \item If \code{method="binomial"} then the scan test based on binomial likelihood is performed (Kulldorf, 1997). The dataset \code{X} must be a bivariate point pattern, i.e. a multitype point pattern with two types. The null hypothesis is that all permutations of the type labels are equally likely. The alternative hypothesis is that some circle of radius \code{r} has a higher proportion of points of the second type, than expected under the null hypothesis. } The result of \code{scan.test} is a hypothesis test (object of class \code{"htest"}) which can be plotted to report the results. The component \code{p.value} contains the \eqn{p}-value. The result of \code{scan.test} can also be plotted (using the plot method for the class \code{"scan.test"}). The plot is a pixel image of the Likelihood Ratio Test Statistic (2 times the log likelihood ratio) as a function of the location of the centre of the circle. This pixel image can be extracted from the object using \code{\link{as.im.scan.test}}. The Likelihood Ratio Test Statistic is computed by \code{\link{scanLRTS}}. } \value{ An object of class \code{"htest"} (hypothesis test) which also belongs to the class \code{"scan.test"}. Printing this object gives the result of the test. Plotting this object displays the Likelihood Ratio Test Statistic as a function of the location of the centre of the circle. } \references{ Kulldorff, M. (1997) A spatial scan statistic. \emph{Communications in Statistics --- Theory and Methods} \bold{26}, 1481--1496. } \author{\adrian and \rolf } \seealso{ \code{\link{plot.scan.test}}, \code{\link{as.im.scan.test}}, \code{\link{relrisk}}, \code{\link{scanLRTS}} } \examples{ nsim <- if(interactive()) 19 else 2 rr <- if(interactive()) seq(0.5, 1, by=0.1) else c(0.5, 1) scan.test(redwood, 0.1 * rr, method="poisson", nsim=nsim) scan.test(chorley, rr, method="binomial", case="larynx", nsim=nsim) } \keyword{htest} \keyword{spatial} spatstat/man/closepairs.Rd0000644000176200001440000001302013160710571015320 0ustar liggesusers\name{closepairs} \alias{closepairs} \alias{crosspairs} \alias{closepairs.ppp} \alias{crosspairs.ppp} \alias{closepaircounts} \alias{crosspaircounts} \title{ Close Pairs of Points } \description{ Low-level functions to find all close pairs of points. } \usage{ closepaircounts(X, r) crosspaircounts(X, Y, r) closepairs(X, rmax, \dots) \method{closepairs}{ppp}(X, rmax, twice=TRUE, what=c("all","indices","ijd"), distinct=TRUE, neat=TRUE, \dots) crosspairs(X, Y, rmax, \dots) \method{crosspairs}{ppp}(X, Y, rmax, what=c("all", "indices", "ijd"), \dots) } \arguments{ \item{X,Y}{ Point patterns (objects of class \code{"ppp"}). } \item{r,rmax}{ Maximum distance between pairs of points to be counted as close pairs. } \item{twice}{ Logical value indicating whether all ordered pairs of close points should be returned. If \code{twice=TRUE} (the default), each pair will appear twice in the output, as \code{(i,j)} and again as \code{(j,i)}. If \code{twice=FALSE}, then each pair will appear only once, as the pair \code{(i,j)} with \code{i < j}. } \item{what}{ String specifying the data to be returned for each close pair of points. If \code{what="all"} (the default) then the returned information includes the indices \code{i,j} of each pair, their \code{x,y} coordinates, and the distance between them. If \code{what="indices"} then only the indices \code{i,j} are returned. If \code{what="ijd"} then the indices \code{i,j} and the distance \code{d} are returned. } \item{distinct}{ Logical value indicating whether to return only the pairs of points with different indices \code{i} and \code{j} (\code{distinct=TRUE}, the default) or to also include the pairs where \code{i=j} (\code{distinct=FALSE}). } \item{neat}{ Logical value indicating whether to ensure that \code{i < j} in each output pair, when \code{twice=FALSE}. } \item{\dots}{Extra arguments, ignored by methods.} } \details{ These are the efficient low-level functions used by \pkg{spatstat} to find all close pairs of points in a point pattern or all close pairs between two point patterns. \code{closepaircounts(X,r)} counts the number of neighbours for each point in the pattern \code{X}. That is, for each point \code{X[i]}, it counts the number of other points \code{X[j]} with \code{j != i} such that \code{d(X[i],X[j]) <= r} where \code{d} denotes Euclidean distance. The result is an integer vector \code{v} such that \code{v[i]} is the number of neighbours of \code{X[i]}. \code{crosspaircounts(X,Y,r)} counts, for each point in the pattern \code{X}, the number of neighbours in the pattern \code{Y}. That is, for each point \code{X[i]}, it counts the number of points \code{Y[j]} such that \code{d(X[i],X[j]) <= r}. The result is an integer vector \code{v} such that \code{v[i]} is the number of neighbours of \code{X[i]} in the pattern \code{Y}. \code{closepairs(X,rmax)} identifies all pairs of distinct neighbours in the pattern \code{X} and returns them. The result is a list with the following components: \describe{ \item{i}{Integer vector of indices of the first point in each pair.} \item{j}{Integer vector of indices of the second point in each pair.} \item{xi,yi}{Coordinates of the first point in each pair.} \item{xj,yj}{Coordinates of the second point in each pair.} \item{dx}{Equal to \code{xj-xi}} \item{dy}{Equal to \code{yj-yi}} \item{d}{Euclidean distance between each pair of points.} } If \code{what="indices"} then only the components \code{i} and \code{j} are returned. This is slightly faster and more efficient with use of memory. \code{crosspairs(X,rmax)} identifies all pairs of neighbours \code{(X[i], Y[j])} between the patterns \code{X} and \code{Y}, and returns them. The result is a list with the same format as for \code{closepairs}. } \section{Warning about accuracy}{ The results of these functions may not agree exactly with the correct answer (as calculated by a human) and may not be consistent between different computers and different installations of \R. The discrepancies arise in marginal cases where the interpoint distance is equal to, or very close to, the threshold \code{rmax}. Floating-point numbers in a computer are not mathematical Real Numbers: they are approximations using finite-precision binary arithmetic. The approximation is accurate to a tolerance of about \code{.Machine$double.eps}. If the true interpoint distance \eqn{d} and the threshold \code{rmax} are equal, or if their difference is no more than \code{.Machine$double.eps}, the result may be incorrect. } \value{ For \code{closepaircounts} and \code{crosspaircounts}, an integer vector of length equal to the number of points in \code{X}. For \code{closepairs} and \code{crosspairs}, a list with components \code{i} and \code{j}, and possibly other components as described under Details. } \author{\adrian and \rolf } \seealso{ \code{\link{closepairs.pp3}} for the corresponding functions for 3D point patterns. \code{\link{Kest}}, \code{\link{Kcross}}, \code{\link{nndist}}, \code{\link{nncross}}, \code{\link{applynbd}}, \code{\link{markstat}} for functions which use these capabilities. } \examples{ a <- closepaircounts(cells, 0.1) sum(a) Y <- split(amacrine) b <- crosspaircounts(Y$on, Y$off, 0.1) d <- closepairs(cells, 0.1) e <- crosspairs(Y$on, Y$off, 0.1) } \keyword{spatial} \keyword{math} spatstat/man/sdrPredict.Rd0000644000176200001440000000236413160710621015264 0ustar liggesusers\name{sdrPredict} \alias{sdrPredict} \title{ Compute Predictors from Sufficient Dimension Reduction } \description{ Given the result of a Sufficient Dimension Reduction method, compute the new predictors. } \usage{ sdrPredict(covariates, B) } \arguments{ \item{covariates}{ A list of pixel images (objects of class \code{"im"}). } \item{B}{ Either a matrix of coefficients for the covariates, or the result of a call to \code{\link{sdr}}. } } \details{ This function assumes that \code{\link{sdr}} has already been used to find a minimal set of predictors based on the \code{covariates}. The argument \code{B} should be either the result of \code{\link{sdr}} or the coefficient matrix returned as one of the results of \code{\link{sdr}}. The columns of this matrix define linear combinations of the \code{covariates}. This function evaluates those linear combinations, and returns a list of pixel images containing the new predictors. } \value{ A list of pixel images (objects of class \code{"im"}) with one entry for each column of \code{B}. } \author{ \adrian } \seealso{ \code{\link{sdr}} } \examples{ A <- sdr(bei, bei.extra) Y <- sdrPredict(bei.extra, A) Y } \keyword{spatial} \keyword{nonparametric} spatstat/man/volume.Rd0000644000176200001440000000176413160710621014473 0ustar liggesusers\name{volume} \alias{volume} \title{Volume of an Object} \description{ Computes the volume of a spatial object such as a three-dimensional box. } \usage{ volume(x) } \arguments{ \item{x}{ An object whose volume will be computed. } } \value{ The numerical value of the volume of the object. } \details{ This function computes the volume of an object such as a three-dimensional box. The function \code{volume} is generic, with methods for the classes \code{"box3"} (three-dimensional boxes) and \code{"boxx"} (multi-dimensional boxes). There is also a method for the class \code{"owin"} (two-dimensional windows), which is identical to \code{\link{area.owin}}, and a method for the class \code{"linnet"} of linear networks, which returns the length of the network. } \seealso{ \code{\link{area.owin}}, \code{\link{volume.box3}}, \code{\link{volume.boxx}}, \code{\link{volume.linnet}} } \author{\adrian \rolf and \ege } \keyword{spatial} \keyword{math} spatstat/man/by.im.Rd0000644000176200001440000000325713160710571014205 0ustar liggesusers\name{by.im} \alias{by.im} \title{Apply Function to Image Broken Down by Factor} \description{ Splits a pixel image into sub-images and applies a function to each sub-image. } \usage{ \method{by}{im}(data, INDICES, FUN, ...) } \arguments{ \item{data}{A pixel image (object of class \code{"im"}).} \item{INDICES}{Grouping variable. Either a tessellation (object of class \code{"tess"}) or a factor-valued pixel image. } \item{FUN}{Function to be applied to each sub-image of \code{data}.} \item{\dots}{Extra arguments passed to \code{FUN}.} } \details{ This is a method for the generic function \code{\link{by}} for pixel images (class \code{"im"}). The pixel image \code{data} is first divided into sub-images according to \code{INDICES}. Then the function \code{FUN} is applied to each subset. The results of each computation are returned in a list. The grouping variable \code{INDICES} may be either \itemize{ \item a tessellation (object of class \code{"tess"}). Each tile of the tessellation delineates a subset of the spatial domain. \item a pixel image (object of class \code{"im"}) with factor values. The levels of the factor determine subsets of the spatial domain. } } \value{ A list containing the results of each evaluation of \code{FUN}. } \seealso{ \code{\link{split.im}}, \code{\link{tess}}, \code{\link{im}} } \examples{ W <- square(1) X <- as.im(function(x,y){sqrt(x^2+y^2)}, W) Y <- dirichlet(runifpoint(12, W)) # mean pixel value in each subset unlist(by(X, Y, mean)) # trimmed mean unlist(by(X, Y, mean, trim=0.05)) } \author{\adrian and \rolf } \keyword{spatial} \keyword{methods} \keyword{manip} spatstat/man/Ginhom.Rd0000644000176200001440000001515213160710571014405 0ustar liggesusers\name{Ginhom} \alias{Ginhom} \title{ Inhomogeneous Nearest Neighbour Function } \description{ Estimates the inhomogeneous nearest neighbour function \eqn{G} of a non-stationary point pattern. } \usage{ Ginhom(X, lambda = NULL, lmin = NULL, ..., sigma = NULL, varcov = NULL, r = NULL, breaks = NULL, ratio = FALSE, update = TRUE) } \arguments{ \item{X}{ The observed data point pattern, from which an estimate of the inhomogeneous \eqn{G} function will be computed. An object of class \code{"ppp"} or in a format recognised by \code{\link{as.ppp}()} } \item{lambda}{ Optional. Values of the estimated intensity function. Either a vector giving the intensity values at the points of the pattern \code{X}, a pixel image (object of class \code{"im"}) giving the intensity values at all locations, a fitted point process model (object of class \code{"ppm"}) or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. } \item{lmin}{ Optional. The minimum possible value of the intensity over the spatial domain. A positive numerical value. } \item{sigma,varcov}{ Optional arguments passed to \code{\link{density.ppp}} to control the smoothing bandwidth, when \code{lambda} is estimated by kernel smoothing. } \item{\dots}{ Extra arguments passed to \code{\link{as.mask}} to control the pixel resolution, or passed to \code{\link{density.ppp}} to control the smoothing bandwidth. } \item{r}{ vector of values for the argument \eqn{r} at which the inhomogeneous \eqn{K} function should be evaluated. Not normally given by the user; there is a sensible default. } \item{breaks}{ This argument is for internal use only. } \item{ratio}{ Logical. If \code{TRUE}, the numerator and denominator of the estimate will also be saved, for use in analysing replicated point patterns. } \item{update}{ Logical. If \code{lambda} is a fitted model (class \code{"ppm"} or \code{"kppm"}) and \code{update=TRUE} (the default), the model will first be refitted to the data \code{X} (using \code{\link{update.ppm}} or \code{\link{update.kppm}}) before the fitted intensity is computed. If \code{update=FALSE}, the fitted intensity of the model will be computed without fitting it to \code{X}. } } \details{ This command computes estimates of the inhomogeneous \eqn{G}-function (van Lieshout, 2010) of a point pattern. It is the counterpart, for inhomogeneous spatial point patterns, of the nearest-neighbour distance distribution function \eqn{G} for homogeneous point patterns computed by \code{\link{Gest}}. The argument \code{X} should be a point pattern (object of class \code{"ppp"}). The inhomogeneous \eqn{G} function is computed using the border correction, equation (7) in Van Lieshout (2010). The argument \code{lambda} should supply the (estimated) values of the intensity function \eqn{\lambda}{lambda} of the point process. It may be either \describe{ \item{a numeric vector}{ containing the values of the intensity function at the points of the pattern \code{X}. } \item{a pixel image}{ (object of class \code{"im"}) assumed to contain the values of the intensity function at all locations in the window. } \item{a fitted point process model}{ (object of class \code{"ppm"} or \code{"kppm"}) whose fitted \emph{trend} can be used as the fitted intensity. (If \code{update=TRUE} the model will first be refitted to the data \code{X} before the trend is computed.) } \item{a function}{ which can be evaluated to give values of the intensity at any locations. } \item{omitted:}{ if \code{lambda} is omitted, then it will be estimated using a `leave-one-out' kernel smoother. } } If \code{lambda} is a numeric vector, then its length should be equal to the number of points in the pattern \code{X}. The value \code{lambda[i]} is assumed to be the the (estimated) value of the intensity \eqn{\lambda(x_i)}{lambda(x[i])} for the point \eqn{x_i}{x[i]} of the pattern \eqn{X}. Each value must be a positive number; \code{NA}'s are not allowed. If \code{lambda} is a pixel image, the domain of the image should cover the entire window of the point pattern. If it does not (which may occur near the boundary because of discretisation error), then the missing pixel values will be obtained by applying a Gaussian blur to \code{lambda} using \code{\link{blur}}, then looking up the values of this blurred image for the missing locations. (A warning will be issued in this case.) If \code{lambda} is a function, then it will be evaluated in the form \code{lambda(x,y)} where \code{x} and \code{y} are vectors of coordinates of the points of \code{X}. It should return a numeric vector with length equal to the number of points in \code{X}. If \code{lambda} is omitted, then it will be estimated using a `leave-one-out' kernel smoother, as described in Baddeley, \ifelse{latex}{\out{M\o ller}}{Moller} and Waagepetersen (2000). The estimate \code{lambda[i]} for the point \code{X[i]} is computed by removing \code{X[i]} from the point pattern, applying kernel smoothing to the remaining points using \code{\link{density.ppp}}, and evaluating the smoothed intensity at the point \code{X[i]}. The smoothing kernel bandwidth is controlled by the arguments \code{sigma} and \code{varcov}, which are passed to \code{\link{density.ppp}} along with any extra arguments. } \value{ An object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. } \references{ Baddeley, A., \ifelse{latex}{\out{M\o ller}}{Moller}, J. and Waagepetersen, R. (2000) Non- and semiparametric estimation of interaction in inhomogeneous point patterns. \emph{Statistica Neerlandica} \bold{54}, 329--350. Van Lieshout, M.N.M. and Baddeley, A.J. (1996) A nonparametric measure of spatial interaction in point patterns. \emph{Statistica Neerlandica} \bold{50}, 344--361. Van Lieshout, M.N.M. (2010) A J-function for inhomogeneous point processes. \emph{Statistica Neerlandica} \bold{65}, 183--201. } \seealso{ \code{\link{Finhom}}, \code{\link{Jinhom}}, \code{\link{Gest}} } \examples{ \dontrun{ plot(Ginhom(swedishpines, sigma=bw.diggle, adjust=2)) } plot(Ginhom(swedishpines, sigma=10)) } \author{ Original code by Marie-Colette van Lieshout. C implementation and R adaptation by \adrian and \ege. } \keyword{spatial} \keyword{nonparametric} spatstat/man/psstA.Rd0000644000176200001440000001533013160710621014250 0ustar liggesusers\name{psstA} \Rdversion{1.1} \alias{psstA} \title{ Pseudoscore Diagnostic For Fitted Model against Area-Interaction Alternative } \description{ Given a point process model fitted to a point pattern dataset, this function computes the pseudoscore diagnostic of goodness-of-fit for the model, against moderately clustered or moderately inhibited alternatives of area-interaction type. } \usage{ psstA(object, r = NULL, breaks = NULL, \dots, model = NULL, trend = ~1, interaction = Poisson(), rbord = reach(interaction), ppmcorrection = "border", correction = "all", truecoef = NULL, hi.res = NULL, nr=spatstat.options("psstA.nr"), ngrid=spatstat.options("psstA.ngrid")) } \arguments{ \item{object}{ Object to be analysed. Either a fitted point process model (object of class \code{"ppm"}) or a point pattern (object of class \code{"ppp"}) or quadrature scheme (object of class \code{"quad"}). } \item{r}{ Optional. Vector of values of the argument \eqn{r} at which the diagnostic should be computed. This argument is usually not specified. There is a sensible default. } \item{breaks}{ This argument is for internal use only. } \item{\dots}{ Extra arguments passed to \code{\link{quadscheme}} to determine the quadrature scheme, if \code{object} is a point pattern. } \item{model}{ Optional. A fitted point process model (object of class \code{"ppm"}) to be re-fitted to the data using \code{\link{update.ppm}}, if \code{object} is a point pattern. Overrides the arguments \code{trend,interaction,rbord,ppmcorrection}. } \item{trend,interaction,rbord}{ Optional. Arguments passed to \code{\link{ppm}} to fit a point process model to the data, if \code{object} is a point pattern. See \code{\link{ppm}} for details. } \item{ppmcorrection}{ Optional. Character string specifying the edge correction for the pseudolikelihood to be used in fitting the point process model. Passed to \code{\link{ppm}}. } \item{correction}{ Optional. Character string specifying which diagnostic quantities will be computed. Options are \code{"all"} and \code{"best"}. The default is to compute all diagnostic quantities. } \item{truecoef}{ Optional. Numeric vector. If present, this will be treated as if it were the true coefficient vector of the point process model, in calculating the diagnostic. Incompatible with \code{hi.res}. } \item{hi.res}{ Optional. List of parameters passed to \code{\link{quadscheme}}. If this argument is present, the model will be re-fitted at high resolution as specified by these parameters. The coefficients of the resulting fitted model will be taken as the true coefficients. Then the diagnostic will be computed for the default quadrature scheme, but using the high resolution coefficients. } \item{nr}{ Optional. Number of \code{r} values to be used if \code{r} is not specified. } \item{ngrid}{ Integer. Number of points in the square grid used to compute the approximate area. } } \details{ This function computes the pseudoscore test statistic which can be used as a diagnostic for goodness-of-fit of a fitted point process model. Let \eqn{x} be a point pattern dataset consisting of points \eqn{x_1,\ldots,x_n}{x[1],...,x[n]} in a window \eqn{W}. Consider a point process model fitted to \eqn{x}, with conditional intensity \eqn{\lambda(u,x)}{lambda(u,x)} at location \eqn{u}. For the purpose of testing goodness-of-fit, we regard the fitted model as the null hypothesis. The alternative hypothesis is a family of hybrid models obtained by combining the fitted model with the area-interaction process (see \code{\link{AreaInter}}). The family of alternatives includes models that are slightly more regular than the fitted model, and others that are slightly more clustered than the fitted model. The pseudoscore, evaluated at the null model, is \deqn{ V(r) = \sum_i A(x_i, x, r) - \int_W A(u,x, r) \lambda(u,x) {\rm d} u }{ V(r) = sum( A(x[i], x, r)) - integral( A(u,x,r) lambda(u,x) du) } where \deqn{ A(u,x,r) = B(x\cup\{u\},r) - B(x\setminus u, r) }{ A(u,x,r) = B(x union u, r) - B(x setminus u, r) } where \eqn{B(x,r)} is the area of the union of the discs of radius \eqn{r} centred at the points of \eqn{x} (i.e. \eqn{B(x,r)} is the area of the dilation of \eqn{x} by a distance \eqn{r}). Thus \eqn{A(u,x,r)} is the \emph{unclaimed area} associated with \eqn{u}, that is, the area of that part of the disc of radius \eqn{r} centred at the point \eqn{u} that is not covered by any of the discs of radius \eqn{r} centred at points of \eqn{x}. According to the Georgii-Nguyen-Zessin formula, \eqn{V(r)} should have mean zero if the model is correct (ignoring the fact that the parameters of the model have been estimated). Hence \eqn{V(r)} can be used as a diagnostic for goodness-of-fit. The diagnostic \eqn{V(r)} is also called the \bold{pseudoresidual} of \eqn{S}. On the right hand side of the equation for \eqn{V(r)} given above, the sum over points of \eqn{x} is called the \bold{pseudosum} and the integral is called the \bold{pseudocompensator}. } \value{ A function value table (object of class \code{"fv"}), essentially a data frame of function values. Columns in this data frame include \code{dat} for the pseudosum, \code{com} for the compensator and \code{res} for the pseudoresidual. There is a plot method for this class. See \code{\link{fv.object}}. } \section{Warning}{ This computation can take a \bold{very long time}. To shorten the computation time, choose smaller values of the arguments \code{nr} and \code{ngrid}, or reduce the values of their defaults \code{spatstat.options("psstA.nr")} and \code{spatstat.options("psstA.ngrid")}. Computation time is roughly proportional to \code{nr * npoints * ngrid^2} where \code{npoints} is the number of points in the point pattern. } \references{ Baddeley, A., Rubak, E. and \ifelse{latex}{\out{M\o ller}}{Moller}, J. (2011) Score, pseudo-score and residual diagnostics for spatial point process models. \emph{Statistical Science} \bold{26}, 613--646. } \author{ \adrian \ege and Jesper \ifelse{latex}{\out{M\o ller}}{Moller}. } \seealso{ Alternative functions: \code{\link{psstG}}, \code{\link{psst}}, \code{\link{Gres}}, \code{\link{Kres}}. Point process models: \code{\link{ppm}}. Options: \code{\link{spatstat.options}} } \examples{ pso <- spatstat.options(psstA.ngrid=16,psstA.nr=10) X <- rStrauss(200,0.1,0.05) plot(psstA(X)) plot(psstA(X, interaction=Strauss(0.05))) spatstat.options(pso) } \keyword{spatial} \keyword{models} spatstat/man/owin.object.Rd0000644000176200001440000000706713160710621015407 0ustar liggesusers\name{owin.object} \alias{owin.object} %DoNotExport \title{Class owin} \description{ A class \code{owin} to define the ``observation window'' of a point pattern } \details{ In the \pkg{spatstat} library, a point pattern dataset must include information about the window or region in which the pattern was observed. A window is described by an object of class \code{"owin"}. Windows of arbitrary shape are supported. An object of class \code{"owin"} has one of three types: \tabular{ll}{ \code{"rectangle"}: \tab a rectangle in the two-dimensional plane with edges parallel to the axes \cr \code{"polygonal"}: \tab a region whose boundary is a polygon or several polygons. The region may have holes and may consist of several disconnected pieces. \cr \code{"mask"}: \tab a binary image (a logical matrix) set to \code{TRUE} for pixels inside the window and \code{FALSE} outside the window. } Objects of class \code{"owin"} may be created by the function \code{\link{owin}} and converted from other types of data by the function \code{\link{as.owin}}. They may be manipulated by the functions \code{\link{as.rectangle}}, \code{\link{as.mask}}, \code{\link{complement.owin}}, \code{\link{rotate}}, \code{\link{shift}}, \code{\link{affine}}, \code{\link{erosion}}, \code{\link{dilation}}, \code{\link{opening}} and \code{\link{closing}}. Geometrical calculations available for windows include \code{\link{area.owin}}, \code{\link{perimeter}}, \code{\link{diameter.owin}}, \code{\link{boundingbox}}, \code{\link{eroded.areas}}, \code{\link{bdist.points}}, \code{\link{bdist.pixels}}, and \code{even.breaks.owin}. The mapping between continuous coordinates and pixel raster indices is facilitated by the functions \code{\link{raster.x}}, \code{\link{raster.y}} and \code{\link{nearest.raster.point}}. There is a \code{plot} method for window objects, \code{\link{plot.owin}}. This may be useful if you wish to plot a point pattern's window without the points for graphical purposes. There are also methods for \code{summary} and \code{print}. } \seealso{ \code{\link{owin}}, \code{\link{as.owin}}, \code{\link{as.rectangle}}, \code{\link{as.mask}}, \code{\link{summary.owin}}, \code{\link{print.owin}}, \code{\link{complement.owin}}, \code{\link{erosion}}, \code{\link{dilation}}, \code{\link{opening}}, \code{\link{closing}}, \code{\link{affine.owin}}, \code{\link{shift.owin}}, \code{\link{rotate.owin}}, \code{\link{raster.x}}, \code{\link{raster.y}}, \code{\link{nearest.raster.point}}, \code{\link{plot.owin}}, \code{\link{area.owin}}, \code{\link{boundingbox}}, \code{\link{diameter}}, \code{\link{eroded.areas}}, \code{\link{bdist.points}}, \code{\link{bdist.pixels}} } \section{Warnings}{ In a window of type \code{"mask"}, the row index corresponds to increasing \eqn{y} coordinate, and the column index corresponds to increasing \eqn{x} coordinate. } \examples{ w <- owin() w <- owin(c(0,1), c(0,1)) # the unit square w <- owin(c(0,1), c(0,2)) \dontrun{ if(FALSE) { plot(w) # plots edges of a box 1 unit x 2 units v <- locator() # click on points in the plot window # to be the vertices of a polygon # traversed in anticlockwise order u <- owin(c(0,1), c(0,2), poly=v) plot(u) # plots polygonal boundary using polygon() plot(as.mask(u, eps=0.02)) # plots discrete pixel approximation to polygon } } } \author{\adrian and \rolf } \keyword{spatial} \keyword{attribute} spatstat/man/plot.listof.Rd0000644000176200001440000002104213160710621015430 0ustar liggesusers\name{plot.listof} \alias{plot.listof} \title{Plot a List of Things} \description{ Plots a list of things } \usage{ \method{plot}{listof}(x, \dots, main, arrange=TRUE, nrows=NULL, ncols=NULL, main.panel=NULL, mar.panel=c(2,1,1,2), hsep=0, vsep=0, panel.begin=NULL, panel.end=NULL, panel.args=NULL, panel.begin.args=NULL, panel.end.args=NULL, plotcommand="plot", adorn.left=NULL, adorn.right=NULL, adorn.top=NULL, adorn.bottom=NULL, adorn.size=0.2, equal.scales=FALSE, halign=FALSE, valign=FALSE) } \arguments{ \item{x}{ An object of the class \code{"listof"}. Essentially a list of objects. } \item{\dots}{ Arguments passed to \code{\link{plot}} when generating each plot panel. } \item{main}{ Overall heading for the plot. } \item{arrange}{ Logical flag indicating whether to plot the objects side-by-side on a single page (\code{arrange=TRUE}) or plot them individually in a succession of frames (\code{arrange=FALSE}). } \item{nrows,ncols}{ Optional. The number of rows/columns in the plot layout (assuming \code{arrange=TRUE}). You can specify either or both of these numbers. } \item{main.panel}{ Optional. A character string, or a vector of character strings, giving the headings for each of the objects. } \item{mar.panel}{ Size of the margins outside each plot panel. A numeric vector of length 4 giving the bottom, left, top, and right margins in that order. (Alternatively the vector may have length 1 or 2 and will be replicated to length 4). See the section on \emph{Spacing between plots}. } \item{hsep,vsep}{ Additional horizontal and vertical separation between plot panels, expressed in the same units as \code{mar.panel}. } \item{panel.begin,panel.end}{ Optional. Functions that will be executed before and after each panel is plotted. See Details. } \item{panel.args}{ Optional. Function that determines different plot arguments for different panels. See Details. } \item{panel.begin.args}{ Optional. List of additional arguments for \code{panel.begin} when it is a function. } \item{panel.end.args}{ Optional. List of additional arguments for \code{panel.end} when it is a function. } \item{plotcommand}{ Optional. Character string containing the name of the command that should be executed to plot each panel. } \item{adorn.left,adorn.right,adorn.top,adorn.bottom}{ Optional. Functions (with no arguments) that will be executed to generate additional plots at the margins (left, right, top and/or bottom, respectively) of the array of plots. } \item{adorn.size}{ Relative width (as a fraction of the other panels' widths) of the margin plots. } \item{equal.scales}{ Logical value indicating whether the components should be plotted at (approximately) the same physical scale. } \item{halign,valign}{ Logical values indicating whether panels in a column should be aligned to the same \eqn{x} coordinate system (\code{halign=TRUE}) and whether panels in a row should be aligned to the same \eqn{y} coordinate system (\code{valign=TRUE}). These are applicable only if \code{equal.scales=TRUE}. } } \value{ Null. } \details{ This is the \code{plot} method for the class \code{"listof"}. An object of class \code{"listof"} (defined in the base R package) represents a list of objects, all belonging to a common class. The base R package defines a method for printing these objects, \code{\link[base]{print.listof}}, but does not define a method for \code{plot}. So here we have provided a method for \code{plot}. In the \pkg{spatstat} package, various functions produce an object of class \code{"listof"}, essentially a list of spatial objects of the same kind. These objects can be plotted in a nice arrangement using \code{plot.listof}. See the Examples. The argument \code{panel.args} determines extra graphics parameters for each panel. It should be a function that will be called as \code{panel.args(i)} where \code{i} is the panel number. Its return value should be a list of graphics parameters that can be passed to the relevant \code{plot} method. These parameters override any parameters specified in the \code{\dots} arguments. The arguments \code{panel.begin} and \code{panel.end} determine graphics that will be plotted before and after each panel is plotted. They may be objects of some class that can be plotted with the generic \code{plot} command. Alternatively they may be functions that will be called as \code{panel.begin(i, y, main=main.panel[i])} and \code{panel.end(i, y, add=TRUE)} where \code{i} is the panel number and \code{y = x[[i]]}. If all entries of \code{x} are pixel images, the function \code{\link{image.listof}} is called to control the plotting. The arguments \code{equal.ribbon} and \code{col} can be used to determine the colour map or maps applied. If \code{equal.scales=FALSE} (the default), then the plot panels will have equal height on the plot device (unless there is only one column of panels, in which case they will have equal width on the plot device). This means that the objects are plotted at different physical scales, by default. If \code{equal.scales=TRUE}, then the dimensions of the plot panels on the plot device will be proportional to the spatial dimensions of the corresponding components of \code{x}. This means that the objects will be plotted at \emph{approximately} equal physical scales. If these objects have very different spatial sizes, the plot command could fail (when it tries to plot the smaller objects at a tiny scale), with an error message that the figure margins are too large. The objects will be plotted at \emph{exactly} equal physical scales, and \emph{exactly} aligned on the device, under the following conditions: \itemize{ \item every component of \code{x} is a spatial object whose position can be shifted by \code{\link{shift}}; \item \code{panel.begin} and \code{panel.end} are either \code{NULL} or they are spatial objects whose position can be shifted by \code{\link{shift}}; \item \code{adorn.left}, \code{adorn.right}, \code{adorn.top} and \code{adorn.bottom} are all \code{NULL}. } Another special case is when every component of \code{x} is an object of class \code{"fv"} representing a function. If \code{equal.scales=TRUE} then all these functions will be plotted with the same axis scales (i.e. with the same \code{xlim} and the same \code{ylim}). } \section{Spacing between plots}{ The spacing between individual plots is controlled by the parameters \code{mar.panel}, \code{hsep} and \code{vsep}. If \code{equal.scales=FALSE}, the plot panels are logically separate plots. The margins for each panel are determined by the argument \code{mar.panel} which becomes the graphics parameter \code{mar} described in the help file for \code{\link{par}}. One unit of \code{mar} corresponds to one line of text in the margin. If \code{hsep} or \code{vsep} are present, \code{mar.panel} is augmented by \code{c(vsep, hsep, vsep, hsep)/2}. If \code{equal.scales=TRUE}, all the plot panels are drawn in the same coordinate system which represents a physical scale. The unit of measurement for \code{mar.panel[1,3]} is one-sixth of the greatest height of any object plotted in the same row of panels, and the unit for \code{mar.panel[2,4]} is one-sixth of the greatest width of any object plotted in the same column of panels. If \code{hsep} or \code{vsep} are present, they are interpreted in the same units as \code{mar.panel[2]} and \code{mar.panel[1]} respectively. } \seealso{ \code{\link[base]{print.listof}}, \code{\link{contour.listof}}, \code{\link{image.listof}}, \code{\link{density.splitppp}} } \section{Error messages}{ If the error message \sQuote{Figure margins too large} occurs, this generally means that one of the objects had a much smaller physical scale than the others. Ensure that \code{equal.scales=FALSE} and increase the values of \code{mar.panel}. } \examples{ # Intensity estimate of multitype point pattern plot(D <- density(split(amacrine))) plot(D, main="", equal.ribbon=TRUE, panel.end=function(i,y,...){contour(y, ...)}) # list of 3D point patterns ape1 <- osteo[osteo$shortid==4, "pts", drop=TRUE] class(ape1) plot(ape1, main.panel="", mar.panel=0.1, hsep=0.7, vsep=1, cex=1.5, pch=21, bg='white') } \author{\adrian and \rolf } \keyword{spatial} \keyword{hplot} spatstat/man/multiplicity.ppp.Rd0000644000176200001440000000403413160710621016503 0ustar liggesusers\name{multiplicity.ppp} \alias{multiplicity} \alias{multiplicity.default} \alias{multiplicity.data.frame} \alias{multiplicity.ppp} \alias{multiplicity.ppx} \title{Count Multiplicity of Duplicate Points} \description{ Counts the number of duplicates for each point in a spatial point pattern. } \usage{ multiplicity(x) \method{multiplicity}{ppp}(x) \method{multiplicity}{ppx}(x) \method{multiplicity}{data.frame}(x) \method{multiplicity}{default}(x) } \arguments{ \item{x}{ A spatial point pattern (object of class \code{"ppp"} or \code{"ppx"}) or a vector, matrix or data frame. } } \value{ A vector of integers (multiplicities) of length equal to the number of points in \code{x}. } \details{ Two points in a point pattern are deemed to be identical if their \eqn{x,y} coordinates are the same, and their marks are also the same (if they carry marks). The Examples section illustrates how it is possible for a point pattern to contain a pair of identical points. For each point in \code{x}, the function \code{multiplicity} counts how many points are identical to it, and returns the vector of counts. The argument \code{x} can also be a vector, a matrix or a data frame. When \code{x} is a vector, \code{m <- multiplicity(x)} is a vector of the same length as \code{x}, and \code{m[i]} is the number of elements of \code{x} that are identical to \code{x[i]}. When \code{x} is a matrix or data frame, \code{m <- multiplicity(x)} is a vector of length equal to the number of rows of \code{x}, and \code{m[i]} is the number of rows of \code{x} that are identical to the \code{i}th row. } \seealso{ \code{\link{ppp.object}}, \code{\link{duplicated.ppp}}, \code{\link{unique.ppp}} } \examples{ X <- ppp(c(1,1,0.5,1), c(2,2,1,2), window=square(3), check=FALSE) m <- multiplicity(X) # unique points in X, marked by their multiplicity first <- !duplicated(X) Y <- X[first] \%mark\% m[first] } \author{\adrian , \rolf and Sebastian Meyer. } \keyword{spatial} \keyword{utilities} spatstat/man/macros/0000755000176200001440000000000013115225157014156 5ustar liggesusersspatstat/man/macros/defns.Rd0000644000176200001440000000414513115225157015550 0ustar liggesusers%% macro definitions for spatstat man pages \newcommand{\adrian}{Adrian Baddeley \email{Adrian.Baddeley@curtin.edu.au}} \newcommand{\rolf}{Rolf Turner \email{r.turner@auckland.ac.nz}} \newcommand{\ege}{Ege Rubak \email{rubak@math.aau.dk}} \newcommand{\spatstatAuthors}{\adrian, \rolf and \ege} % Names with accents \newcommand{\Bogsted}{\ifelse{latex}{\out{B\o gsted}}{Bogsted}} \newcommand{\Cramer}{\ifelse{latex}{\out{Cram\'er}}{Cramer}} \newcommand{\Hogmander}{\ifelse{latex}{\out{H{\"o}gmander}}{Hogmander}} \newcommand{\Jyvaskyla}{\ifelse{latex}{\out{Jyv\"askyl\"a}}{Jyvaskyla}} \newcommand{\Matern}{\ifelse{latex}{\out{Mat\'ern}}{Matern}} \newcommand{\Moller}{\ifelse{latex}{\out{M\o ller}}{Moller}} \newcommand{\Oehlschlaegel}{\ifelse{latex}{\out{Oehlschl\"{a}gel}}{Oehlschlaegel}} \newcommand{\Prokesova}{\ifelse{latex}{\out{Proke\u{s}ov{\'{a}}}}{Prokesova}} \newcommand{\Sarkka}{\ifelse{latex}{\out{S\"{a}rkk\"{a}}}{Sarkka}} %% List of all Gibbs interactions \newcommand{\GibbsInteractionsList}{\code{\link{AreaInter}}, \code{\link{BadGey}}, \code{\link{Concom}}, \code{\link{DiggleGatesStibbard}}, \code{\link{DiggleGratton}}, \code{\link{Fiksel}}, \code{\link{Geyer}}, \code{\link{Hardcore}}, \code{\link{Hybrid}}, \code{\link{LennardJones}}, \code{\link{MultiStrauss}}, \code{\link{MultiStraussHard}}, \code{\link{OrdThresh}}, \code{\link{Ord}}, \code{\link{Pairwise}}, \code{\link{PairPiece}}, \code{\link{Penttinen}}, \code{\link{Poisson}}, \code{\link{Saturated}}, \code{\link{SatPiece}}, \code{\link{Softcore}}, \code{\link{Strauss}}, \code{\link{StraussHard}} and \code{\link{Triplets}}} %% List of interactions recognised by RMH code \newcommand{\rmhInteractionsList}{\code{\link{AreaInter}}, \code{\link{BadGey}}, \code{\link{DiggleGatesStibbard}}, \code{\link{DiggleGratton}}, \code{\link{Fiksel}}, \code{\link{Geyer}}, \code{\link{Hardcore}}, \code{\link{Hybrid}}, \code{\link{LennardJones}}, \code{\link{MultiStrauss}}, \code{\link{MultiStraussHard}}, \code{\link{PairPiece}}, \code{\link{Penttinen}}, \code{\link{Poisson}}, \code{\link{Softcore}}, \code{\link{Strauss}}, \code{\link{StraussHard}} and \code{\link{Triplets}}} spatstat/man/Kmark.Rd0000644000176200001440000001422613160710571014232 0ustar liggesusers\name{Kmark} \alias{Kmark} \alias{markcorrint} \title{Mark-Weighted K Function} \description{ Estimates the mark-weighted \eqn{K} function of a marked point pattern. } \usage{ Kmark(X, f = NULL, r = NULL, correction = c("isotropic", "Ripley", "translate"), ..., f1 = NULL, normalise = TRUE, returnL = FALSE, fargs = NULL) markcorrint(X, f = NULL, r = NULL, correction = c("isotropic", "Ripley", "translate"), ..., f1 = NULL, normalise = TRUE, returnL = FALSE, fargs = NULL) } \arguments{ \item{X}{The observed point pattern. An object of class \code{"ppp"} or something acceptable to \code{\link{as.ppp}}. } \item{f}{Optional. Test function \eqn{f} used in the definition of the mark correlation function. An \R function with at least two arguments. There is a sensible default. } \item{r}{Optional. Numeric vector. The values of the argument \eqn{r} at which the mark correlation function \eqn{k_f(r)}{k[f](r)} should be evaluated. There is a sensible default. } \item{correction}{ A character vector containing any selection of the options \code{"isotropic"}, \code{"Ripley"} or \code{"translate"}. It specifies the edge correction(s) to be applied. Alternatively \code{correction="all"} selects all options. } \item{\dots}{ Ignored. } \item{f1}{ An alternative to \code{f}. If this argument is given, then \eqn{f} is assumed to take the form \eqn{f(u,v)=f_1(u)f_1(v)}{f(u,v)=f1(u) * f1(v)}. } \item{normalise}{ If \code{normalise=FALSE}, compute only the numerator of the expression for the mark correlation. } \item{returnL}{ Compute the analogue of the K-function if \code{returnL=FALSE} or the analogue of the L-function if \code{returnL=TRUE}. } \item{fargs}{ Optional. A list of extra arguments to be passed to the function \code{f} or \code{f1}. } } \details{ The functions \code{Kmark} and \code{markcorrint} are identical. (Eventually \code{markcorrint} will be deprecated.) The \emph{mark-weighted \eqn{K} function} \eqn{K_f(r)}{K[f](r)} of a marked point process (Penttinen et al, 1992) is a generalisation of Ripley's \eqn{K} function, in which the contribution from each pair of points is weighted by a function of their marks. If the marks of the two points are \eqn{m_1, m_2}{m1, m2} then the weight is proportional to \eqn{f(m_1, m_2)}{f(m1, m2)} where \eqn{f} is a specified \emph{test function}. The mark-weighted \eqn{K} function is defined so that \deqn{ \lambda K_f(r) = \frac{C_f(r)}{E[ f(M_1, M_2) ]} }{ lambda * K_f(r) = C[f](r)/E[f(M1, M2)] } where \deqn{ C_f(r) = E \left[ \sum_{x \in X} f(m(u), m(x)) 1{0 < ||u - x|| \le r} \; \big| \; u \in X \right] }{ C[f](r) = E[ sum[x in X] f(m(u), m(x)) 1(0 < d(u,x) <= r) | u in X] } for any spatial location \eqn{u} taken to be a typical point of the point process \eqn{X}. Here \eqn{||u-x||}{d(u,x)} is the euclidean distance between \eqn{u} and \eqn{x}, so that the sum is taken over all random points \eqn{x} that lie within a distance \eqn{r} of the point \eqn{u}. The function \eqn{C_f(r)}{C[f](r)} is the \emph{unnormalised} mark-weighted \eqn{K} function. To obtain \eqn{K_f(r)}{K[f](r)} we standardise \eqn{C_f(r)}{C[f](r)} by dividing by \eqn{E[f(M_1,M_2)]}{E[f(M1,M2)]}, the expected value of \eqn{f(M_1,M_2)}{f(M1,M2)} when \eqn{M_1}{M1} and \eqn{M_2}{M2} are independent random marks with the same distribution as the marks in the point process. Under the hypothesis of random labelling, the mark-weighted \eqn{K} function is equal to Ripley's \eqn{K} function, \eqn{K_f(r) = K(r)}{K[f](r) = K(r)}. The mark-weighted \eqn{K} function is sometimes called the \emph{mark correlation integral} because it is related to the mark correlation function \eqn{k_f(r)}{k[f](r)} and the pair correlation function \eqn{g(r)} by \deqn{ K_f(r) = 2 \pi \int_0^r s k_f(s) \, g(s) \, {\rm d}s }{ K[f](r) = 2 * pi * integral[0,r] (s * k[f](s) * g(s) ) ds } See \code{\link{markcorr}} for a definition of the mark correlation function. Given a marked point pattern \code{X}, this command computes edge-corrected estimates of the mark-weighted \eqn{K} function. If \code{returnL=FALSE} then the estimated function \eqn{K_f(r)}{K[f](r)} is returned; otherwise the function \deqn{ L_f(r) = \sqrt{K_f(r)/\pi} }{ L[f](r) = sqrt(K[f](r)/pi) } is returned. } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame containing numeric columns \item{r}{the values of the argument \eqn{r} at which the mark correlation integral \eqn{K_f(r)}{K[f](r)} has been estimated } \item{theo}{the theoretical value of \eqn{K_f(r)}{K[f](r)} when the marks attached to different points are independent, namely \eqn{\pi r^2}{pi * r^2} } together with a column or columns named \code{"iso"} and/or \code{"trans"}, according to the selected edge corrections. These columns contain estimates of the mark-weighted \eqn{K} function \eqn{K_f(r)}{K[f](r)} obtained by the edge corrections named (if \code{returnL=FALSE}). } \references{ Penttinen, A., Stoyan, D. and Henttonen, H. M. (1992) Marked point processes in forest statistics. \emph{Forest Science} \bold{38} (1992) 806-824. Illian, J., Penttinen, A., Stoyan, H. and Stoyan, D. (2008) \emph{Statistical analysis and modelling of spatial point patterns}. Chichester: John Wiley. } \seealso{ \code{\link{markcorr}} to estimate the mark correlation function. } \examples{ # CONTINUOUS-VALUED MARKS: # (1) Spruces # marks represent tree diameter # mark correlation function ms <- Kmark(spruces) plot(ms) # (2) simulated data with independent marks X <- rpoispp(100) X <- X \%mark\% runif(npoints(X)) Xc <- Kmark(X) plot(Xc) # MULTITYPE DATA: # Hughes' amacrine data # Cells marked as 'on'/'off' M <- Kmark(amacrine, function(m1,m2) {m1==m2}, correction="translate") plot(M) } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat/man/dirichletWeights.Rd0000644000176200001440000000356613160710571016474 0ustar liggesusers\name{dirichletWeights} \alias{dirichletWeights} \title{Compute Quadrature Weights Based on Dirichlet Tessellation} \description{ Computes quadrature weights for a given set of points, using the areas of tiles in the Dirichlet tessellation. } \usage{ dirichletWeights(X, window=NULL, exact=TRUE, \dots) } \arguments{ \item{X}{Data defining a point pattern.} \item{window}{Default window for the point pattern} \item{exact}{Logical value. If \code{TRUE}, compute exact areas using the package \code{deldir}. If \code{FALSE}, compute approximate areas using a pixel raster. } \item{\dots}{ Ignored. } } \value{ Vector of nonnegative weights for each point in \code{X}. } \details{ This function computes a set of quadrature weights for a given pattern of points (typically comprising both ``data'' and `dummy'' points). See \code{\link{quad.object}} for an explanation of quadrature weights and quadrature schemes. The weights are computed using the Dirichlet tessellation. First \code{X} and (optionally) \code{window} are converted into a point pattern object. Then the Dirichlet tessellation of the points of \code{X} is computed. The weight attached to a point of \code{X} is the area of its Dirichlet tile (inside the window \code{Window(X)}). If \code{exact=TRUE} the Dirichlet tessellation is computed exactly by the Lee-Schachter algorithm using the package \code{deldir}. Otherwise a pixel raster approximation is constructed and the areas are approximations to the true weights. In all cases the sum of the weights is equal to the area of the window. } \seealso{ \code{\link{quad.object}}, \code{\link{gridweights}} } \examples{ Q <- quadscheme(runifpoispp(10)) X <- as.ppp(Q) # data and dummy points together w <- dirichletWeights(X, exact=FALSE) } \author{\adrian and \rolf } \keyword{spatial} \keyword{utilities} spatstat/man/dmixpois.Rd0000644000176200001440000000512313160710571015015 0ustar liggesusers\name{dmixpois} \alias{dmixpois} \alias{pmixpois} \alias{qmixpois} \alias{rmixpois} \title{ Mixed Poisson Distribution } \description{ Density, distribution function, quantile function and random generation for a mixture of Poisson distributions. } \usage{ dmixpois(x, mu, sd, invlink = exp, GHorder = 5) pmixpois(q, mu, sd, invlink = exp, lower.tail = TRUE, GHorder = 5) qmixpois(p, mu, sd, invlink = exp, lower.tail = TRUE, GHorder = 5) rmixpois(n, mu, sd, invlink = exp) } \arguments{ \item{x}{vector of (non-negative integer) quantiles.} \item{q}{vector of quantiles.} \item{p}{vector of probabilities.} \item{n}{number of random values to return.} \item{mu}{ Mean of the linear predictor. A single numeric value. } \item{sd}{ Standard deviation of the linear predictor. A single numeric value. } \item{invlink}{ Inverse link function. A function in the \R language, used to transform the linear predictor into the parameter \code{lambda} of the Poisson distribution. } \item{lower.tail}{ Logical. If \code{TRUE} (the default), probabilities are \eqn{P[X \le x]}, otherwise, \eqn{P[X > x]}. } \item{GHorder}{ Number of quadrature points in the Gauss-Hermite quadrature approximation. A small positive integer. } } \details{ These functions are analogous to \code{\link{dpois}} \code{\link{ppois}}, \code{\link{qpois}} and \code{\link{rpois}} except that they apply to a mixture of Poisson distributions. In effect, the Poisson mean parameter \code{lambda} is randomised by setting \code{lambda = invlink(Z)} where \code{Z} has a Gaussian \eqn{N(\mu,\sigma^2)}{N(\mu, \sigma^2)} distribution. The default is \code{invlink=exp} which means that \code{lambda} is lognormal. Set \code{invlink=I} to assume that \code{lambda} is approximately Normal. For \code{dmixpois}, \code{pmixpois} and \code{qmixpois}, the probability distribution is approximated using Gauss-Hermite quadrature. For \code{rmixpois}, the deviates are simulated exactly. } \value{ Numeric vector: \code{dmixpois} gives probability masses, \code{ppois} gives cumulative probabilities, \code{qpois} gives (non-negative integer) quantiles, and \code{rpois} generates (non-negative integer) random deviates. } \seealso{ \code{\link{dpois}}, \code{\link{gauss.hermite}}. } \examples{ dmixpois(7, 10, 1, invlink = I) dpois(7, 10) pmixpois(7, log(10), 0.2) ppois(7, 10) qmixpois(0.95, log(10), 0.2) qpois(0.95, 10) x <- rmixpois(100, log(10), log(1.2)) mean(x) var(x) } \author{\adrian , \rolf and \ege } \keyword{distribution} spatstat/man/Kcom.Rd0000644000176200001440000002223313160710571014053 0ustar liggesusers\name{Kcom} \Rdversion{1.1} \alias{Kcom} \title{ Model Compensator of K Function } \description{ Given a point process model fitted to a point pattern dataset, this function computes the \emph{compensator} of the \eqn{K} function based on the fitted model (as well as the usual nonparametric estimates of \eqn{K} based on the data alone). Comparison between the nonparametric and model-compensated \eqn{K} functions serves as a diagnostic for the model. } \usage{ Kcom(object, r = NULL, breaks = NULL, ..., correction = c("border", "isotropic", "translate"), conditional = !is.poisson(object), restrict = FALSE, model = NULL, trend = ~1, interaction = Poisson(), rbord = reach(interaction), compute.var = TRUE, truecoef = NULL, hi.res = NULL) } \arguments{ \item{object}{ Object to be analysed. Either a fitted point process model (object of class \code{"ppm"}) or a point pattern (object of class \code{"ppp"}) or quadrature scheme (object of class \code{"quad"}). } \item{r}{ Optional. Vector of values of the argument \eqn{r} at which the function \eqn{K(r)} should be computed. This argument is usually not specified. There is a sensible default. } \item{breaks}{ This argument is for advanced use only. } \item{\dots}{ Ignored. } \item{correction}{ Optional vector of character strings specifying the edge correction(s) to be used. See \code{\link{Kest}} for options. } \item{conditional}{ Optional. Logical value indicating whether to compute the estimates for the conditional case. See Details. } \item{restrict}{ Logical value indicating whether to compute the restriction estimator (\code{restrict=TRUE}) or the reweighting estimator (\code{restrict=FALSE}, the default). Applies only if \code{conditional=TRUE}. See Details. } \item{model}{ Optional. A fitted point process model (object of class \code{"ppm"}) to be re-fitted to the data using \code{\link{update.ppm}}, if \code{object} is a point pattern. Overrides the arguments \code{trend,interaction,rbord}. } \item{trend,interaction,rbord}{ Optional. Arguments passed to \code{\link{ppm}} to fit a point process model to the data, if \code{object} is a point pattern. See \code{\link{ppm}} for details. } \item{compute.var}{ Logical value indicating whether to compute the Poincare variance bound for the residual \eqn{K} function (calculation is only implemented for the isotropic correction). } \item{truecoef}{ Optional. Numeric vector. If present, this will be treated as if it were the true coefficient vector of the point process model, in calculating the diagnostic. Incompatible with \code{hi.res}. } \item{hi.res}{ Optional. List of parameters passed to \code{\link{quadscheme}}. If this argument is present, the model will be re-fitted at high resolution as specified by these parameters. The coefficients of the resulting fitted model will be taken as the true coefficients. Then the diagnostic will be computed for the default quadrature scheme, but using the high resolution coefficients. } } \details{ This command provides a diagnostic for the goodness-of-fit of a point process model fitted to a point pattern dataset. It computes an estimate of the \eqn{K} function of the dataset, together with a \emph{model compensator} of the \eqn{K} function, which should be approximately equal if the model is a good fit to the data. The first argument, \code{object}, is usually a fitted point process model (object of class \code{"ppm"}), obtained from the model-fitting function \code{\link{ppm}}. For convenience, \code{object} can also be a point pattern (object of class \code{"ppp"}). In that case, a point process model will be fitted to it, by calling \code{\link{ppm}} using the arguments \code{trend} (for the first order trend), \code{interaction} (for the interpoint interaction) and \code{rbord} (for the erosion distance in the border correction for the pseudolikelihood). See \code{\link{ppm}} for details of these arguments. The algorithm first extracts the original point pattern dataset (to which the model was fitted) and computes the standard nonparametric estimates of the \eqn{K} function. It then also computes the \emph{model compensator} of the \eqn{K} function. The different function estimates are returned as columns in a data frame (of class \code{"fv"}). The argument \code{correction} determines the edge correction(s) to be applied. See \code{\link{Kest}} for explanation of the principle of edge corrections. The following table gives the options for the \code{correction} argument, and the corresponding column names in the result: \tabular{llll}{ \code{correction} \tab \bold{description of correction} \tab \bold{nonparametric} \tab \bold{compensator} \cr \code{"isotropic"} \tab Ripley isotropic correction \tab \code{iso} \tab \code{icom} \cr \code{"translate"} \tab Ohser-Stoyan translation correction \tab \code{trans} \tab \code{tcom} \cr \code{"border"} \tab border correction \tab \code{border} \tab \code{bcom} } The nonparametric estimates can all be expressed in the form \deqn{ \hat K(r) = \sum_i \sum_{j < i} e(x_i,x_j,r,x) I\{ d(x_i,x_j) \le r \} }{ K(r) = sum[i] sum[j < i] e(x[i], x[j], r, x) I( d(x[i],x[j]) <= r ) } where \eqn{x_i}{x[i]} is the \eqn{i}-th data point, \eqn{d(x_i,x_j)}{d(x[i],x[j])} is the distance between \eqn{x_i}{x[i]} and \eqn{x_j}{x[j]}, and \eqn{e(x_i,x_j,r,x)}{e(x[i],x[j],r,x)} is a term that serves to correct edge effects and to re-normalise the sum. The corresponding model compensator is \deqn{ {\bf C} \, \tilde K(r) = \int_W \lambda(u,x) \sum_j e(u,x_j,r,x \cup u) I\{ d(u,x_j) \le r\} }{ C K(r) = integral[u] lambda(u,x) sum[j] e(u, x[j], r, x+u) I( d(u,x[j]) <= r ) } where the integral is over all locations \eqn{u} in the observation window, \eqn{\lambda(u,x)}{lambda(u,x)} denotes the conditional intensity of the model at the location \eqn{u}, and \eqn{x \cup u}{x+u} denotes the data point pattern \eqn{x} augmented by adding the extra point \eqn{u}. If the fitted model is a Poisson point process, then the formulae above are exactly what is computed. If the fitted model is not Poisson, the formulae above are modified slightly to handle edge effects. The modification is determined by the arguments \code{conditional} and \code{restrict}. The value of \code{conditional} defaults to \code{FALSE} for Poisson models and \code{TRUE} for non-Poisson models. If \code{conditional=FALSE} then the formulae above are not modified. If \code{conditional=TRUE}, then the algorithm calculates the \emph{restriction estimator} if \code{restrict=TRUE}, and calculates the \emph{reweighting estimator} if \code{restrict=FALSE}. See Appendix D of Baddeley, Rubak and \ifelse{latex}{\out{M\o ller}}{Moller} (2011). Thus, by default, the reweighting estimator is computed for non-Poisson models. The nonparametric estimates of \eqn{K(r)} are approximately unbiased estimates of the \eqn{K}-function, assuming the point process is stationary. The model compensators are unbiased estimates \emph{of the mean values of the corresponding nonparametric estimates}, assuming the model is true. Thus, if the model is a good fit, the mean value of the difference between the nonparametric estimates and model compensators is approximately zero. } \value{ A function value table (object of class \code{"fv"}), essentially a data frame of function values. There is a plot method for this class. See \code{\link{fv.object}}. } \references{ Baddeley, A., Rubak, E. and \ifelse{latex}{\out{M\o ller}}{Moller}, J. (2011) Score, pseudo-score and residual diagnostics for spatial point process models. \emph{Statistical Science} \bold{26}, 613--646. } \author{ \adrian \ege and Jesper \ifelse{latex}{\out{M\o ller}}{Moller}. } \seealso{ Related functions: \code{\link{Kres}}, \code{\link{Kest}}. Alternative functions: \code{\link{Gcom}}, \code{\link{psstG}}, \code{\link{psstA}}, \code{\link{psst}}. Point process models: \code{\link{ppm}}. } \examples{ fit0 <- ppm(cells, ~1) # uniform Poisson \testonly{fit0 <- ppm(cells, ~1, nd=16)} if(interactive()) { plot(Kcom(fit0)) # compare the isotropic-correction estimates plot(Kcom(fit0), cbind(iso, icom) ~ r) # uniform Poisson is clearly not correct } fit1 <- ppm(cells, ~1, Strauss(0.08)) \testonly{fit1 <- ppm(cells, ~1, Strauss(0.08), nd=16)} K1 <- Kcom(fit1) K1 if(interactive()) { plot(K1) plot(K1, cbind(iso, icom) ~ r) plot(K1, cbind(trans, tcom) ~ r) # how to plot the difference between nonparametric estimates and compensators plot(K1, iso - icom ~ r) # fit looks approximately OK; try adjusting interaction distance } fit2 <- ppm(cells, ~1, Strauss(0.12)) \testonly{fit2 <- ppm(cells, ~1, Strauss(0.12), nd=16)} K2 <- Kcom(fit2) if(interactive()) { plot(K2) plot(K2, cbind(iso, icom) ~ r) plot(K2, iso - icom ~ r) } } \keyword{spatial} \keyword{models} spatstat/man/methods.lppm.Rd0000644000176200001440000000421613160710621015571 0ustar liggesusers\name{methods.lppm} \alias{methods.lppm} %DoNotExport \alias{coef.lppm} \alias{emend.lppm} \alias{extractAIC.lppm} \alias{formula.lppm} \alias{logLik.lppm} \alias{deviance.lppm} \alias{nobs.lppm} \alias{print.lppm} \alias{summary.lppm} \alias{terms.lppm} \alias{update.lppm} \alias{valid.lppm} \alias{vcov.lppm} \alias{as.linnet.lppm} \title{ Methods for Fitted Point Process Models on a Linear Network } \description{ These are methods for the class \code{"lppm"} of fitted point process models on a linear network. } \usage{ \method{coef}{lppm}(object, ...) \method{emend}{lppm}(object, \dots) \method{extractAIC}{lppm}(fit, ...) \method{formula}{lppm}(x, ...) \method{logLik}{lppm}(object, ...) \method{deviance}{lppm}(object, ...) \method{nobs}{lppm}(object, ...) \method{print}{lppm}(x, ...) \method{summary}{lppm}(object, ...) \method{terms}{lppm}(x, ...) \method{update}{lppm}(object, ...) \method{valid}{lppm}(object, ...) \method{vcov}{lppm}(object, ...) \method{as.linnet}{lppm}(X, ...) } \arguments{ \item{object,fit,x,X}{ An object of class \code{"lppm"} representing a fitted point process model on a linear network. } \item{\dots}{ Arguments passed to other methods, usually the method for the class \code{"ppm"}. } } \details{ These are methods for the generic commands \code{\link[stats]{coef}}, \code{\link{emend}}, \code{\link[stats]{extractAIC}}, \code{\link[stats]{formula}}, \code{\link[stats]{logLik}}, \code{\link[stats]{deviance}}, \code{\link[stats]{nobs}}, \code{\link[base]{print}}, \code{\link[base]{summary}}, \code{\link[stats]{terms}}, \code{\link[stats]{update}}, \code{\link{valid}} and \code{\link[stats]{vcov}} for the class \code{"lppm"}. } \value{ See the default methods. } \author{ \adrian \rolf and \ege } \seealso{ \code{\link{lppm}}, \code{\link{plot.lppm}}. } \examples{ X <- runiflpp(15, simplenet) fit <- lppm(X ~ x) print(fit) coef(fit) formula(fit) terms(fit) logLik(fit) deviance(fit) nobs(fit) extractAIC(fit) update(fit, ~1) valid(fit) vcov(fit) } \keyword{spatial} \keyword{models} spatstat/man/ppp.object.Rd0000644000176200001440000000771513160710621015232 0ustar liggesusers\name{ppp.object} \alias{ppp.object} %DoNotExport \title{Class of Point Patterns} \description{ A class \code{"ppp"} to represent a two-dimensional point pattern. Includes information about the window in which the pattern was observed. Optionally includes marks. } \details{ This class represents a two-dimensional point pattern dataset. It specifies \itemize{ \item the locations of the points \item the window in which the pattern was observed \item optionally, ``marks'' attached to each point (extra information such as a type label). } If \code{X} is an object of type \code{ppp}, it contains the following elements: \tabular{ll}{ \code{x} \tab vector of \eqn{x} coordinates of data points \cr \code{y} \tab vector of \eqn{y} coordinates of data points \cr \code{n} \tab number of points \cr \code{window} \tab window of observation \cr \tab (an object of class \code{\link{owin}}) \cr \code{marks} \tab optional vector or data frame of marks } Users are strongly advised not to manipulate these entries directly. Objects of class \code{"ppp"} may be created by the function \code{\link{ppp}} and converted from other types of data by the function \code{\link{as.ppp}}. Note that you must always specify the window of observation; there is intentionally no default action of ``guessing'' the window dimensions from the data points alone. Standard point pattern datasets provided with the package include \code{\link[spatstat.data]{amacrine}}, \code{\link[spatstat.data]{betacells}}, \code{\link[spatstat.data]{bramblecanes}}, \code{\link[spatstat.data]{cells}}, \code{\link[spatstat.data]{demopat}}, \code{\link[spatstat.data]{ganglia}}, \code{\link[spatstat.data]{lansing}}, \code{\link[spatstat.data]{longleaf}}, \code{\link[spatstat.data]{nztrees}}, \code{\link[spatstat.data]{redwood}}, \code{\link[spatstat.data]{simdat}} and \code{\link[spatstat.data]{swedishpines}}. Point patterns may be scanned from your own data files by \code{\link{scanpp}} or by using \code{\link{read.table}} and \code{\link{as.ppp}}. They may be manipulated by the functions \code{\link{[.ppp}} and \code{\link{superimpose}}. Point pattern objects can be plotted just by typing \code{plot(X)} which invokes the \code{plot} method for point pattern objects, \code{\link{plot.ppp}}. See \code{\link{plot.ppp}} for further information. There are also methods for \code{summary} and \code{print} for point patterns. Use \code{summary(X)} to see a useful description of the data. Patterns may be generated at random by \code{\link{runifpoint}}, \code{\link{rpoispp}}, \code{\link{rMaternI}}, \code{\link{rMaternII}}, \code{\link{rSSI}}, \code{\link{rNeymanScott}}, \code{\link{rMatClust}}, and \code{\link{rThomas}}. Most functions which are intended to operate on a window (of class \code{\link{owin}}) will, if presented with a \code{\link{ppp}} object instead, automatically extract the window information from the point pattern. } \seealso{ \code{\link{owin}}, \code{\link{ppp}}, \code{\link{as.ppp}}, \code{\link{[.ppp}} } \section{Warnings}{ The internal representation of marks is likely to change in the next release of this package. } \examples{ x <- runif(100) y <- runif(100) X <- ppp(x, y, c(0,1),c(0,1)) X \dontrun{plot(X)} mar <- sample(1:3, 100, replace=TRUE) mm <- ppp(x, y, c(0,1), c(0,1), marks=mar) \dontrun{plot(mm)} # points with mark equal to 2 ss <- mm[ mm$marks == 2 , ] \dontrun{plot(ss)} # left half of pattern 'mm' lu <- owin(c(0,0.5),c(0,1)) mmleft <- mm[ , lu] \dontrun{plot(mmleft)} \dontrun{ if(FALSE) { # input data from file qq <- scanpp("my.table", unit.square()) # interactively build a point pattern plot(unit.square()) X <- as.ppp(locator(10), unit.square()) plot(X) } } } \author{\adrian and \rolf } \keyword{spatial} \keyword{attribute} spatstat/man/methods.linfun.Rd0000644000176200001440000000473413160710621016121 0ustar liggesusers\name{methods.linfun} \Rdversion{1.1} \alias{methods.linfun} %DoNotExport \alias{print.linfun} \alias{summary.linfun} \alias{plot.linfun} \alias{as.data.frame.linfun} \alias{as.owin.linfun} \alias{as.function.linfun} \title{ Methods for Functions on Linear Network } \description{ Methods for the class \code{"linfun"} of functions on a linear network. } \usage{ \method{print}{linfun}(x, \dots) \method{summary}{linfun}(object, \dots) \method{plot}{linfun}(x, \dots, L=NULL, main) \method{as.data.frame}{linfun}(x, \dots) \method{as.owin}{linfun}(W, \dots) \method{as.function}{linfun}(x, \dots) } \arguments{ \item{x,object,W}{ A function on a linear network (object of class \code{"linfun"}). } \item{L}{A linear network} \item{\dots}{ Extra arguments passed to \code{\link{as.linim}}, \code{\link{plot.linim}}, \code{\link{plot.im}} or \code{\link{print.default}}, or arguments passed to \code{x} if it is a function. } \item{main}{Main title for plot.} } \details{ These are methods for the generic functions \code{\link{plot}}, \code{\link{print}}, \code{\link{summary}} \code{\link{as.data.frame}} and \code{\link{as.function}}, and for the \pkg{spatstat} generic function \code{\link{as.owin}}. An object of class \code{"linfun"} represents a mathematical function that could be evaluated at any location on a linear network. It is essentially an \R \code{function} with some extra attributes. The method \code{as.owin.linfun} extracts the two-dimensional spatial window containing the linear network. The method \code{plot.linfun} first converts the function to a pixel image using \code{\link{as.linim.linfun}}, then plots the image using \code{\link{plot.linim}}. Note that a \code{linfun} function may have additional arguments, other than those which specify the location on the network (see \code{\link{linfun}}). These additional arguments may be passed to \code{plot.linfun}. } \value{ For \code{print.linfun} and \code{summary.linfun} the result is \code{NULL}. For \code{plot.linfun} the result is the same as for \code{\link{plot.linim}}. For the conversion methods, the result is an object of the required type: \code{as.owin.linfun} returns an object of class \code{"owin"}, and so on. } \examples{ X <- runiflpp(3, simplenet) f <- nnfun(X) f plot(f) as.function(f) as.owin(f) head(as.data.frame(f)) } \author{ \spatstatAuthors } \keyword{spatial} \keyword{math} spatstat/man/lurking.Rd0000644000176200001440000002567113160710621014642 0ustar liggesusers\name{lurking} \alias{lurking} \title{Lurking variable plot} \description{ Plot spatial point process residuals against a covariate } \usage{ lurking(object, covariate, type="eem", cumulative=TRUE, clipwindow=default.clipwindow(object), rv, plot.sd, envelope=FALSE, nsim=39, nrank=1, plot.it=TRUE, typename, covname, oldstyle=FALSE, check=TRUE, \dots, splineargs=list(spar=0.5), verbose=TRUE) } \arguments{ \item{object}{ The fitted point process model (an object of class \code{"ppm"}) for which diagnostics should be produced. This object is usually obtained from \code{\link{ppm}}. Alternatively, \code{object} may be a point pattern (object of class \code{"ppp"}). } \item{covariate}{ The covariate against which residuals should be plotted. Either a numeric vector, a pixel image, or an \code{expression}. See \emph{Details} below. } \item{type}{ String indicating the type of residuals or weights to be computed. Choices include \code{"eem"}, \code{"raw"}, \code{"inverse"} and \code{"pearson"}. See \code{\link{diagnose.ppm}} for all possible choices. } \item{cumulative}{ Logical flag indicating whether to plot a cumulative sum of marks (\code{cumulative=TRUE}) or the derivative of this sum, a marginal density of the smoothed residual field (\code{cumulative=FALSE}). } \item{clipwindow}{ If not \code{NULL} this argument indicates that residuals shall only be computed inside a subregion of the window containing the original point pattern data. Then \code{clipwindow} should be a window object of class \code{"owin"}. } \item{rv}{ Usually absent. If this argument is present, the point process residuals will not be calculated from the fitted model \code{object}, but will instead be taken directly from \code{rv}. } \item{plot.sd}{ Logical value indicating whether error bounds should be added to plot. The default is \code{TRUE} for Poisson models and \code{FALSE} for non-Poisson models. See Details. } \item{envelope}{ Logical value indicating whether to compute simulation envelopes for the plot. Alternatively \code{envelope} may be a list of point patterns to use for computing the simulation envelopes, or an object of class \code{"envelope"} containing simulated point patterns. } \item{nsim}{ Number of simulated point patterns to be generated to produce the simulation envelope, if \code{envelope=TRUE}. } \item{nrank}{ Integer. Rank of the envelope value amongst the \code{nsim} simulated values. A rank of 1 means that the minimum and maximum simulated values will be used. } \item{plot.it}{ Logical value indicating whether plots should be shown. If \code{plot.it=FALSE}, only the computed coordinates for the plots are returned. See \emph{Value}. } \item{typename}{ Usually absent. If this argument is present, it should be a string, and will be used (in the axis labels of plots) to describe the type of residuals. } \item{covname}{ A string name for the covariate, to be used in axis labels of plots. } \item{oldstyle}{ Logical flag indicating whether error bounds should be plotted using the approximation given in the original paper (\code{oldstyle=TRUE}), or using the correct asymptotic formula (\code{oldstyle=FALSE}). } \item{check}{ Logical flag indicating whether the integrity of the data structure in \code{object} should be checked. } \item{\dots}{ Arguments passed to \code{\link{plot.default}} and \code{\link{lines}} to control the plot behaviour. } \item{splineargs}{ A list of arguments passed to \code{smooth.spline} for the estimation of the derivatives in the case \code{cumulative=FALSE}. } \item{verbose}{ Logical value indicating whether to print progress reports during Monte Carlo simulation. } } \value{ A list containing two dataframes \code{empirical} and \code{theoretical}. The first dataframe \code{empirical} contains columns \code{covariate} and \code{value} giving the coordinates of the lurking variable plot. The second dataframe \code{theoretical} contains columns \code{covariate}, \code{mean} and \code{sd} giving the coordinates of the plot of the theoretical mean and standard deviation. The return value belongs to the class \code{"lurk"} for which there is a plot method. } \details{ This function generates a `lurking variable' plot for a fitted point process model. Residuals from the model represented by \code{object} are plotted against the covariate specified by \code{covariate}. This plot can be used to reveal departures from the fitted model, in particular, to reveal that the point pattern depends on the covariate. First the residuals from the fitted model (Baddeley et al, 2004) are computed at each quadrature point, or alternatively the `exponential energy marks' (Stoyan and Grabarnik, 1991) are computed at each data point. The argument \code{type} selects the type of residual or weight. See \code{\link{diagnose.ppm}} for options and explanation. A lurking variable plot for point processes (Baddeley et al, 2004) displays either the cumulative sum of residuals/weights (if \code{cumulative = TRUE}) or a kernel-weighted average of the residuals/weights (if \code{cumulative = FALSE}) plotted against the covariate. The empirical plot (solid lines) is shown together with its expected value assuming the model is true (dashed lines) and optionally also the pointwise two-standard-deviation limits (grey shading). To be more precise, let \eqn{Z(u)} denote the value of the covariate at a spatial location \eqn{u}. \itemize{ \item If \code{cumulative=TRUE} then we plot \eqn{H(z)} against \eqn{z}, where \eqn{H(z)} is the sum of the residuals over all quadrature points where the covariate takes a value less than or equal to \eqn{z}, or the sum of the exponential energy weights over all data points where the covariate takes a value less than or equal to \eqn{z}. \item If \code{cumulative=FALSE} then we plot \eqn{h(z)} against \eqn{z}, where \eqn{h(z)} is the derivative of \eqn{H(z)}, computed approximately by spline smoothing. } For the point process residuals \eqn{E(H(z)) = 0}, while for the exponential energy weights \eqn{E(H(z)) = } area of the subset of the window satisfying \eqn{Z(u) <= z}{Z(u) \le z}. If the empirical and theoretical curves deviate substantially from one another, the interpretation is that the fitted model does not correctly account for dependence on the covariate. The correct form (of the spatial trend part of the model) may be suggested by the shape of the plot. If \code{plot.sd = TRUE}, then superimposed on the lurking variable plot are the pointwise two-standard-deviation error limits for \eqn{H(x)} calculated for the inhomogeneous Poisson process. The default is \code{plot.sd = TRUE} for Poisson models and \code{plot.sd = FALSE} for non-Poisson models. By default, the two-standard-deviation limits are calculated from the exact formula for the asymptotic variance of the residuals under the asymptotic normal approximation, equation (37) of Baddeley et al (2006). However, for compatibility with the original paper of Baddeley et al (2005), if \code{oldstyle=TRUE}, the two-standard-deviation limits are calculated using the innovation variance, an over-estimate of the true variance of the residuals. The argument \code{object} must be a fitted point process model (object of class \code{"ppm"}) typically produced by the maximum pseudolikelihood fitting algorithm \code{\link{ppm}}). The argument \code{covariate} is either a numeric vector, a pixel image, or an R language expression. If it is a numeric vector, it is assumed to contain the values of the covariate for each of the quadrature points in the fitted model. The quadrature points can be extracted by \code{\link{quad.ppm}(object)}. If \code{covariate} is a pixel image, it is assumed to contain the values of the covariate at each location in the window. The values of this image at the quadrature points will be extracted. Alternatively, if \code{covariate} is an \code{expression}, it will be evaluated in the same environment as the model formula used in fitting the model \code{object}. It must yield a vector of the same length as the number of quadrature points. The expression may contain the terms \code{x} and \code{y} representing the cartesian coordinates, and may also contain other variables that were available when the model was fitted. Certain variable names are reserved words; see \code{\link{ppm}}. Note that lurking variable plots for the \eqn{x} and \eqn{y} coordinates are also generated by \code{\link{diagnose.ppm}}, amongst other types of diagnostic plots. This function is more general in that it enables the user to plot the residuals against any chosen covariate that may have been present. For advanced use, even the values of the residuals/weights can be altered. If the argument \code{rv} is present, the residuals will not be calculated from the fitted model \code{object} but will instead be taken directly from the object \code{rv}. If \code{type = "eem"} then \code{rv} should be similar to the return value of \code{\link{eem}}, namely, a numeric vector with length equal to the number of data points in the original point pattern. Otherwise, \code{rv} should be similar to the return value of \code{\link{residuals.ppm}}, that is, \code{rv} should be an object of class \code{"msr"} (see \code{\link{msr}}) representing a signed measure. } \seealso{ \code{\link{residuals.ppm}}, \code{\link{diagnose.ppm}}, \code{\link{residuals.ppm}}, \code{\link{qqplot.ppm}}, \code{\link{eem}}, \code{\link{ppm}} } \references{ Baddeley, A., Turner, R., \ifelse{latex}{\out{M\o ller}}{Moller}, J. and Hazelton, M. (2005) Residual analysis for spatial point processes. \emph{Journal of the Royal Statistical Society, Series B} \bold{67}, 617--666. Baddeley, A., \ifelse{latex}{\out{M\o ller}}{Moller}, J. and Pakes, A.G. (2006) Properties of residuals for spatial point processes. \emph{Annals of the Institute of Statistical Mathematics} \bold{60}, 627--649. Stoyan, D. and Grabarnik, P. (1991) Second-order characteristics for stochastic structures connected with Gibbs point processes. \emph{Mathematische Nachrichten}, 151:95--100. } \examples{ data(nztrees) lurking(nztrees, expression(x)) fit <- ppm(nztrees, ~x, Poisson()) lurking(fit, expression(x)) lurking(fit, expression(x), cumulative=FALSE) } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} \keyword{hplot} spatstat/man/linequad.Rd0000644000176200001440000000350113160710621014755 0ustar liggesusers\name{linequad} \alias{linequad} \title{ Quadrature Scheme on a Linear Network } \description{ Generates a quadrature scheme (an object of class \code{"quad"}) on a linear network. } \usage{ linequad(X, Y, \dots, eps = NULL, nd = 1000, random = FALSE) } \arguments{ \item{X}{ Data points. An object of class \code{"lpp"} or \code{"ppp"}. } \item{Y}{ Line segments on which the points of \code{X} lie. An object of class \code{"psp"}. Required only when \code{X} is a \code{"ppp"} object. } \item{\dots}{ Ignored. } \item{eps}{ Optional. Spacing between successive dummy points along each segment. } \item{nd}{ Optional. Total number of dummy points to be generated. } \item{random}{ Logical value indicating whether the sequence of dummy points should start at a randomly-chosen position along each segment. } } \details{ This command generates a quadrature scheme (object of class \code{"quad"}) from a pattern of points on a linear network. Normally the user does not need to call \code{linequad} explicitly. It is invoked by \pkg{spatstat} functions when needed. A quadrature scheme is required by \code{\link{lppm}} in order to fit point process models to point pattern data on a linear network. A quadrature scheme is also used by \code{\link{rhohat.lpp}} and other functions. In order to create the quadrature scheme, dummy points are placed along each line segment of the network. The dummy points are evenly-spaced with spacing \code{eps}. The default is \code{eps = totlen/nd} where \code{totlen} is the total length of all line segments in the network. } \value{ A quadrature scheme (object of class \code{"quad"}). } \author{ \adrian, Greg McSwiggan and Suman Rakshit. } \seealso{ \code{\link{lppm}} } \keyword{datagen} \keyword{spatial} spatstat/man/compareFit.Rd0000644000176200001440000001013513160710571015251 0ustar liggesusers\name{compareFit} \alias{compareFit} \title{ Residual Diagnostics for Multiple Fitted Models } \description{ Compares several fitted point process models using the same residual diagnostic. } \usage{ compareFit(object, Fun, r = NULL, breaks = NULL, ..., trend = ~1, interaction = Poisson(), rbord = NULL, modelnames = NULL, same = NULL, different = NULL) } \arguments{ \item{object}{ Object or objects to be analysed. Either a fitted point process model (object of class \code{"ppm"}), a point pattern (object of class \code{"ppp"}), or a list of these objects. } \item{Fun}{ Diagnostic function to be computed for each model. One of the functions \code{Kcom}, \code{Kres}, \code{Gcom}, \code{Gres}, \code{psst}, \code{psstA} or \code{psstG} or a string containing one of these names. } \item{r}{ Optional. Vector of values of the argument \eqn{r} at which the diagnostic should be computed. This argument is usually not specified. There is a sensible default. } \item{breaks}{ Optional alternative to \code{r} for advanced use. } \item{\dots}{ Extra arguments passed to \code{Fun}. } \item{trend,interaction,rbord}{ Optional. Arguments passed to \code{\link{ppm}} to fit a point process model to the data, if \code{object} is a point pattern or list of point patterns. See \code{\link{ppm}} for details. Each of these arguments can be a list, specifying different \code{trend}, \code{interaction} and/or \code{rbord} values to be used to generate different fitted models. } \item{modelnames}{ Character vector. Short descriptive names for the different models. } \item{same,different}{ Character strings or character vectors passed to \code{\link{collapse.fv}} to determine the format of the output. } } \details{ This is a convenient way to collect diagnostic information for several different point process models fitted to the same point pattern dataset, or for point process models of the same form fitted to several different datasets, etc. The first argument, \code{object}, is usually a list of fitted point process models (objects of class \code{"ppm"}), obtained from the model-fitting function \code{\link{ppm}}. For convenience, \code{object} can also be a list of point patterns (objects of class \code{"ppp"}). In that case, point process models will be fitted to each of the point pattern datasets, by calling \code{\link{ppm}} using the arguments \code{trend} (for the first order trend), \code{interaction} (for the interpoint interaction) and \code{rbord} (for the erosion distance in the border correction for the pseudolikelihood). See \code{\link{ppm}} for details of these arguments. Alternatively \code{object} can be a single point pattern (object of class \code{"ppp"}) and one or more of the arguments \code{trend}, \code{interaction} or \code{rbord} can be a list. In this case, point process models will be fitted to the same point pattern dataset, using each of the model specifications listed. The diagnostic function \code{Fun} will be applied to each of the point process models. The results will be collected into a single function value table. The \code{modelnames} are used to label the results from each fitted model. } \value{ Function value table (object of class \code{"fv"}). } \author{ \ege, \adrian and Jesper \ifelse{latex}{\out{M\o ller}}{Moller}. } \seealso{ \code{\link{ppm}}, \code{\link{Kcom}}, \code{\link{Kres}}, \code{\link{Gcom}}, \code{\link{Gres}}, \code{\link{psst}}, \code{\link{psstA}}, \code{\link{psstG}}, \code{\link{collapse.fv}} } \examples{ nd <- 40 \testonly{ nd <- 10 } ilist <- list(Poisson(), Geyer(7, 2), Strauss(7)) iname <- c("Poisson", "Geyer", "Strauss") \testonly{ ilist <- ilist[c(1,3)] iname <- iname[c(1,3)] } K <- compareFit(swedishpines, Kcom, interaction=ilist, rbord=9, correction="translate", same="trans", different="tcom", modelnames=iname, nd=nd) K } \keyword{spatial} \keyword{models} spatstat/man/as.box3.Rd0000644000176200001440000000223413160710571014436 0ustar liggesusers\name{as.box3} \Rdversion{1.1} \alias{as.box3} \title{ Convert Data to Three-Dimensional Box } \description{ Interprets data as the dimensions of a three-dimensional box. } \usage{ as.box3(...) } \arguments{ \item{\dots}{ Data that can be interpreted as giving the dimensions of a three-dimensional box. See Details. } } \details{ This function converts data in various formats to an object of class \code{"box3"} representing a three-dimensional box (see \code{\link{box3}}). The arguments \code{\dots} may be \itemize{ \item an object of class \code{"box3"} \item arguments acceptable to \code{box3} \item a numeric vector of length 6, interpreted as \code{c(xrange[1],xrange[2],yrange[1],yrange[2],zrange[1],zrange[2])} \item an object of class \code{"pp3"} representing a three-dimensional point pattern contained in a box. } } \value{ Object of class \code{"box3"}. } \author{\adrian and \rolf } \seealso{ \code{\link{box3}}, \code{\link{pp3}} } \examples{ X <- c(0,10,0,10,0,5) as.box3(X) X <- pp3(runif(42),runif(42),runif(42), box3(c(0,1))) as.box3(X) } \keyword{spatial} \keyword{manip} spatstat/man/opening.Rd0000644000176200001440000000467213160710621014624 0ustar liggesusers\name{opening} \alias{opening} \alias{opening.owin} \alias{opening.psp} \alias{opening.ppp} \title{Morphological Opening} \description{ Perform morphological opening of a window, a line segment pattern or a point pattern. } \usage{ opening(w, r, \dots) \method{opening}{owin}(w, r, \dots, polygonal=NULL) \method{opening}{ppp}(w, r, \dots) \method{opening}{psp}(w, r, \dots) } \arguments{ \item{w}{ A window (object of class \code{"owin"} or a line segment pattern (object of class \code{"psp"}) or a point pattern (object of class \code{"ppp"}). } \item{r}{positive number: the radius of the opening.} \item{\dots}{ extra arguments passed to \code{\link{as.mask}} controlling the pixel resolution, if a pixel approximation is used } \item{polygonal}{ Logical flag indicating whether to compute a polygonal approximation to the erosion (\code{polygonal=TRUE}) or a pixel grid approximation (\code{polygonal=FALSE}). } } \value{ If \code{r > 0}, an object of class \code{"owin"} representing the opened region. If \code{r=0}, the result is identical to \code{w}. } \details{ The morphological opening (Serra, 1982) of a set \eqn{W} by a distance \eqn{r > 0} is the subset of points in \eqn{W} that can be separated from the boundary of \eqn{W} by a circle of radius \eqn{r}. That is, a point \eqn{x} belongs to the opening if it is possible to draw a circle of radius \eqn{r} (not necessarily centred on \eqn{x}) that has \eqn{x} on the inside and the boundary of \eqn{W} on the outside. The opened set is a subset of \code{W}. For a small radius \eqn{r}, the opening operation has the effect of smoothing out irregularities in the boundary of \eqn{W}. For larger radii, the opening operation removes promontories in the boundary. For very large radii, the opened set is empty. The algorithm applies \code{\link{erosion}} followed by \code{\link{dilation}}. } \seealso{ \code{\link{closing}} for the opposite operation. \code{\link{dilation}}, \code{\link{erosion}} for the basic operations. \code{\link{owin}}, \code{\link{as.owin}} for information about windows. } \examples{ v <- opening(letterR, 0.3) plot(letterR, type="n", main="opening") plot(v, add=TRUE, col="grey") plot(letterR, add=TRUE) } \references{ Serra, J. (1982) Image analysis and mathematical morphology. Academic Press. } \author{\adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/psst.Rd0000644000176200001440000001223613160710621014151 0ustar liggesusers\name{psst} \alias{psst} \title{ Pseudoscore Diagnostic For Fitted Model against General Alternative } \description{ Given a point process model fitted to a point pattern dataset, and any choice of functional summary statistic, this function computes the pseudoscore test statistic of goodness-of-fit for the model. } \usage{ psst(object, fun, r = NULL, breaks = NULL, ..., model=NULL, trend = ~1, interaction = Poisson(), rbord = reach(interaction), truecoef=NULL, hi.res=NULL, funargs = list(correction="best"), verbose=TRUE) } \arguments{ \item{object}{ Object to be analysed. Either a fitted point process model (object of class \code{"ppm"}) or a point pattern (object of class \code{"ppp"}) or quadrature scheme (object of class \code{"quad"}). } \item{fun}{ Summary function to be applied to each point pattern. } \item{r}{ Optional. Vector of values of the argument \eqn{r} at which the function \eqn{S(r)} should be computed. This argument is usually not specified. There is a sensible default. } \item{breaks}{ Optional alternative to \code{r} for advanced use. } \item{\dots}{ Ignored. } \item{model}{ Optional. A fitted point process model (object of class \code{"ppm"}) to be re-fitted to the data using \code{\link{update.ppm}}, if \code{object} is a point pattern. Overrides the arguments \code{trend,interaction,rbord}. } \item{trend,interaction,rbord}{ Optional. Arguments passed to \code{\link{ppm}} to fit a point process model to the data, if \code{object} is a point pattern. See \code{\link{ppm}} for details. } \item{truecoef}{ Optional. Numeric vector. If present, this will be treated as if it were the true coefficient vector of the point process model, in calculating the diagnostic. Incompatible with \code{hi.res}. } \item{hi.res}{ Optional. List of parameters passed to \code{\link{quadscheme}}. If this argument is present, the model will be re-fitted at high resolution as specified by these parameters. The coefficients of the resulting fitted model will be taken as the true coefficients. Then the diagnostic will be computed for the default quadrature scheme, but using the high resolution coefficients. } \item{funargs}{ List of additional arguments to be passed to \code{fun}. } \item{verbose}{ Logical value determining whether to print progress reports during the computation. } } \details{ Let \eqn{x} be a point pattern dataset consisting of points \eqn{x_1,\ldots,x_n}{x[1],...,x[n]} in a window \eqn{W}. Consider a point process model fitted to \eqn{x}, with conditional intensity \eqn{\lambda(u,x)}{lambda(u,x)} at location \eqn{u}. For the purpose of testing goodness-of-fit, we regard the fitted model as the null hypothesis. Given a functional summary statistic \eqn{S}, consider a family of alternative models obtained by exponential tilting of the null model by \eqn{S}. The pseudoscore for the null model is \deqn{ V(r) = \sum_i \Delta S(x_i, x, r) - \int_W \Delta S(u,x, r) \lambda(u,x) {\rm d} u }{ V(r) = sum( Delta S(x[i], x, r)) - integral( Delta S(u,x, r) lambda(u,x) du) } where the \eqn{\Delta}{Delta} operator is \deqn{ \Delta S(u,x, r) = S(x\cup\{u\}, r) - S(x\setminus u, r) }{ Delta S(u,x, r) = S(x union u, r) - S(x setminus u, r) } the difference between the values of \eqn{S} for the point pattern with and without the point \eqn{u}. According to the Georgii-Nguyen-Zessin formula, \eqn{V(r)} should have mean zero if the model is correct (ignoring the fact that the parameters of the model have been estimated). Hence \eqn{V(r)} can be used as a diagnostic for goodness-of-fit. This algorithm computes \eqn{V(r)} by direct evaluation of the sum and integral. It is computationally intensive, but it is available for any summary statistic \eqn{S(r)}. The diagnostic \eqn{V(r)} is also called the \bold{pseudoresidual} of \eqn{S}. On the right hand side of the equation for \eqn{V(r)} given above, the sum over points of \eqn{x} is called the \bold{pseudosum} and the integral is called the \bold{pseudocompensator}. } \value{ A function value table (object of class \code{"fv"}), essentially a data frame of function values. Columns in this data frame include \code{dat} for the pseudosum, \code{com} for the compensator and \code{res} for the pseudoresidual. There is a plot method for this class. See \code{\link{fv.object}}. } \references{ Baddeley, A., Rubak, E. and \ifelse{latex}{\out{M\o ller}}{Moller}, J. (2011) Score, pseudo-score and residual diagnostics for spatial point process models. \emph{Statistical Science} \bold{26}, 613--646. } \author{ \adrian \ege and Jesper \ifelse{latex}{\out{M\o ller}}{Moller}. } \seealso{ Special cases: \code{\link{psstA}}, \code{\link{psstG}}. Alternative functions: \code{\link{Kres}}, \code{\link{Gres}}. } \examples{ data(cells) fit0 <- ppm(cells, ~1) # uniform Poisson \testonly{fit0 <- ppm(cells, ~1, nd=8)} G0 <- psst(fit0, Gest) G0 if(interactive()) plot(G0) } \keyword{spatial} \keyword{models} spatstat/man/nnmap.Rd0000644000176200001440000001125613160710621014272 0ustar liggesusers\name{nnmap} \alias{nnmap} \title{ K-th Nearest Point Map } \description{ Given a point pattern, this function constructs pixel images giving the distance from each pixel to its \eqn{k}-th nearest neighbour in the point pattern, and the index of the \eqn{k}-th nearest neighbour. } \usage{ nnmap(X, k = 1, what = c("dist", "which"), \dots, W = as.owin(X), is.sorted.X = FALSE, sortby = c("range", "var", "x", "y")) } \arguments{ \item{X}{ Point pattern (object of class \code{"ppp"}). } \item{k}{ Integer, or integer vector. The algorithm will find the \code{k}th nearest neighbour. } \item{what}{ Character string specifying what information should be returned. Either the nearest neighbour distance (\code{"dist"}), the index of the nearest neighbour (\code{"which"}), or both. } \item{\dots}{ Arguments passed to \code{\link{as.mask}} to determine the pixel resolution of the result. } \item{W}{ Window (object of class \code{"owin"}) specifying the spatial domain in which the distances will be computed. Defaults to the window of \code{X}. } \item{is.sorted.X}{ Logical value attesting whether the point pattern \code{X} has been sorted. See Details. } \item{sortby}{ Determines which coordinate to use to sort the point pattern. See Details. } } \details{ Given a point pattern \code{X}, this function constructs two pixel images: \itemize{ \item a distance map giving, for each pixel, the distance to the nearest point of \code{X}; \item a nearest neighbour map giving, for each pixel, the identifier of the nearest point of \code{X}. } If the argument \code{k} is specified, then the \code{k}-th nearest neighbours will be found. If \code{what="dist"} then only the distance map is returned. If \code{what="which"} then only the nearest neighbour map is returned. The argument \code{k} may be an integer or an integer vector. If it is a single integer, then the \code{k}-th nearest neighbours are computed. If it is a vector, then the \code{k[i]}-th nearest neighbours are computed for each entry \code{k[i]}. For example, setting \code{k=1:3} will compute the nearest, second-nearest and third-nearest neighbours. } \section{Sorting data and pre-sorted data}{ Read this section if you care about the speed of computation. For efficiency, the algorithm sorts the point pattern \code{X} into increasing order of the \eqn{x} coordinate or increasing order of the the \eqn{y} coordinate. Sorting is only an intermediate step; it does not affect the output, which is always given in the same order as the original data. By default (if \code{sortby="range"}), the sorting will occur on the coordinate that has the larger range of values (according to the frame of the enclosing window of \code{X}). If \code{sortby = "var"}), sorting will occur on the coordinate that has the greater variance (in the pattern \code{X}). Setting \code{sortby="x"} or \code{sortby = "y"} will specify that sorting should occur on the \eqn{x} or \eqn{y} coordinate, respectively. If the point pattern \code{X} is already sorted, then the argument \code{is.sorted.X} should be set to \code{TRUE}, and \code{sortby} should be set equal to \code{"x"} or \code{"y"} to indicate which coordinate is sorted. } \section{Warning About Ties}{ Ties are possible: there may be two data points which lie exactly the same distance away from a particular pixel. This affects the results from \code{nnmap(what="which")}. The handling of ties is not well-defined: it is not consistent between different computers and different installations of \R. If there are ties, then different calls to \code{nnmap(what="which")} may give inconsistent results. For example, you may get a different answer from \code{nnmap(what="which",k=1)} and \code{nnmap(what="which", k=1:2)[[1]]}. } \value{ A pixel image, or a list of pixel images. By default (if \code{what=c("dist", "which")}), the result is a list with two components \code{dist} and \code{which} containing the distance map and the nearest neighbour map. If \code{what="dist"} then the result is a real-valued pixel image containing the distance map. If \code{what="which"} then the result is an integer-valued pixel image containing the nearest neighbour map. If \code{k} is a vector of several integers, then the result is similar except that each pixel image is replaced by a list of pixel images, one for each entry of \code{k}. } \seealso{ \code{\link{distmap}} } \examples{ plot(nnmap(cells, 2, what="which")) } \author{ \adrian , \rolf , and Jens Oehlschlaegel } \keyword{spatial} \keyword{math} spatstat/man/nvertices.Rd0000644000176200001440000000160113160710621015154 0ustar liggesusers\name{nvertices} \alias{nvertices} \alias{nvertices.owin} \alias{nvertices.default} \title{ Count Number of Vertices } \description{ Count the number of vertices in an object for which vertices are well-defined. } \usage{ nvertices(x, \dots) \method{nvertices}{owin}(x, \dots) \method{nvertices}{default}(x, \dots) } \arguments{ \item{x}{ A window (object of class \code{"owin"}), or some other object which has vertices. } \item{\dots}{ Currently ignored. } } \details{ This function counts the number of vertices of \code{x} as they would be returned by \code{\link{vertices}(x)}. It is more efficient than executing \code{npoints(vertices(x))}. } \value{ A single integer. } \author{ \spatstatAuthors and Suman Rakshit. } \seealso{ \code{\link{vertices}} } \examples{ nvertices(square(2)) nvertices(letterR) } \keyword{spatial} \keyword{manip} spatstat/man/nearestsegment.Rd0000644000176200001440000000305713160710621016205 0ustar liggesusers\name{nearestsegment} \alias{nearestsegment} \title{Find Line Segment Nearest to Each Point} \description{ Given a point pattern and a line segment pattern, this function finds the nearest line segment for each point. } \usage{ nearestsegment(X, Y) } \arguments{ \item{X}{A point pattern (object of class \code{"ppp"}).} \item{Y}{A line segment pattern (object of class \code{"psp"}).} } \details{ The distance between a point \code{x} and a straight line segment \code{y} is defined to be the shortest Euclidean distance between \code{x} and any location on \code{y}. This algorithm first calculates the distance from each point of \code{X} to each segment of \code{Y}. Then it determines, for each point \code{x} in \code{X}, which segment of \code{Y} is closest. The index of this segment is returned. } \value{ Integer vector \code{v} (of length equal to the number of points in \code{X}) identifying the nearest segment to each point. If \code{v[i] = j}, then \code{Y[j]} is the line segment lying closest to \code{X[i]}. } \author{ \adrian and \rolf } \seealso{ \code{\link{project2segment}} to project each point of \code{X} to a point lying on one of the line segments. Use \code{\link{distmap.psp}} to identify the nearest line segment for each pixel in a grid. } \examples{ X <- runifpoint(3) Y <- as.psp(matrix(runif(20), 5, 4), window=owin()) v <- nearestsegment(X,Y) plot(Y) plot(X, add=TRUE) plot(X[1], add=TRUE, col="red") plot(Y[v[1]], add=TRUE, lwd=2, col="red") } \keyword{spatial} \keyword{math} spatstat/man/dclf.sigtrace.Rd0000644000176200001440000001364313160710571015677 0ustar liggesusers\name{dclf.sigtrace} \alias{dclf.sigtrace} \alias{mad.sigtrace} \alias{mctest.sigtrace} \title{ Significance Trace of Cressie-Loosmore-Ford or Maximum Absolute Deviation Test } \description{ Generates a Significance Trace of the Diggle(1986)/ Cressie (1991)/ Loosmore and Ford (2006) test or the Maximum Absolute Deviation test for a spatial point pattern. } \usage{ dclf.sigtrace(X, \dots) mad.sigtrace(X, \dots) mctest.sigtrace(X, fun=Lest, \dots, exponent=1, interpolate=FALSE, alpha=0.05, confint=TRUE, rmin=0) } \arguments{ \item{X}{ Either a point pattern (object of class \code{"ppp"}, \code{"lpp"} or other class), a fitted point process model (object of class \code{"ppm"}, \code{"kppm"} or other class) or an envelope object (class \code{"envelope"}). } \item{\dots}{ Arguments passed to \code{\link{envelope}} or \code{\link{mctest.progress}}. Useful arguments include \code{fun} to determine the summary function, \code{nsim} to specify the number of Monte Carlo simulations, \code{alternative} to specify a one-sided test, and \code{verbose=FALSE} to turn off the messages. } \item{fun}{ Function that computes the desired summary statistic for a point pattern. } \item{exponent}{ Positive number. The exponent of the \eqn{L^p} distance. See Details. } \item{interpolate}{ Logical value specifying whether to calculate the \eqn{p}-value by interpolation. If \code{interpolate=FALSE} (the default), a standard Monte Carlo test is performed, yielding a \eqn{p}-value of the form \eqn{(k+1)/(n+1)} where \eqn{n} is the number of simulations and \eqn{k} is the number of simulated values which are more extreme than the observed value. If \code{interpolate=TRUE}, the \eqn{p}-value is calculated by applying kernel density estimation to the simulated values, and computing the tail probability for this estimated distribution. } \item{alpha}{ Significance level to be plotted (this has no effect on the calculation but is simply plotted as a reference value). } \item{confint}{ Logical value indicating whether to compute a confidence interval for the \sQuote{true} \eqn{p}-value. } \item{rmin}{ Optional. Left endpoint for the interval of \eqn{r} values on which the test statistic is calculated. } } \details{ The Diggle (1986)/ Cressie (1991)/Loosmore and Ford (2006) test and the Maximum Absolute Deviation test for a spatial point pattern are described in \code{\link{dclf.test}}. These tests depend on the choice of an interval of distance values (the argument \code{rinterval}). A \emph{significance trace} (Bowman and Azzalini, 1997; Baddeley et al, 2014, 2015) of the test is a plot of the \eqn{p}-value obtained from the test against the length of the interval \code{rinterval}. The command \code{dclf.sigtrace} performs \code{\link{dclf.test}} on \code{X} using all possible intervals of the form \eqn{[0,R]}, and returns the resulting \eqn{p}-values as a function of \eqn{R}. Similarly \code{mad.sigtrace} performs \code{\link{mad.test}} using all possible intervals and returns the \eqn{p}-values. More generally, \code{mctest.sigtrace} performs a test based on the \eqn{L^p} discrepancy between the curves. The deviation between two curves is measured by the \eqn{p}th root of the integral of the \eqn{p}th power of the absolute value of the difference between the two curves. The exponent \eqn{p} is given by the argument \code{exponent}. The case \code{exponent=2} is the Cressie-Loosmore-Ford test, while \code{exponent=Inf} is the MAD test. If the argument \code{rmin} is given, it specifies the left endpoint of the interval defining the test statistic: the tests are performed using intervals \eqn{[r_{\mbox{\scriptsize min}},R]}{[rmin,R]} where \eqn{R \ge r_{\mbox{\scriptsize min}}}{R \ge rmin}. The result of each command is an object of class \code{"fv"} that can be plotted to obtain the significance trace. The plot shows the Monte Carlo \eqn{p}-value (solid black line), the critical value \code{0.05} (dashed red line), and a pointwise 95\% confidence band (grey shading) for the \sQuote{true} (Neyman-Pearson) \eqn{p}-value. The confidence band is based on the Agresti-Coull (1998) confidence interval for a binomial proportion (when \code{interpolate=FALSE}) or the delta method and normal approximation (when \code{interpolate=TRUE}). If \code{X} is an envelope object and \code{fun=NULL} then the code will re-use the simulated functions stored in \code{X}. } \value{ An object of class \code{"fv"} that can be plotted to obtain the significance trace. } \references{ Agresti, A. and Coull, B.A. (1998) Approximate is better than \dQuote{Exact} for interval estimation of binomial proportions. \emph{American Statistician} \bold{52}, 119--126. Baddeley, A., Diggle, P., Hardegen, A., Lawrence, T., Milne, R. and Nair, G. (2014) On tests of spatial pattern based on simulation envelopes. \emph{Ecological Monographs} \bold{84}(3) 477--489. Baddeley, A., Hardegen, A., Lawrence, L., Milne, R.K., Nair, G.M. and Rakshit, S. (2015) Pushing the envelope: extensions of graphical Monte Carlo tests. Submitted for publication. Bowman, A.W. and Azzalini, A. (1997) \emph{Applied smoothing techniques for data analysis: the kernel approach with S-Plus illustrations}. Oxford University Press, Oxford. } \author{ \adrian, Andrew Hardegen, Tom Lawrence, Robin Milne, Gopalan Nair and Suman Rakshit. Implemented by \adrian \rolf and \ege } \seealso{ \code{\link{dclf.test}} for the tests; \code{\link{dclf.progress}} for progress plots. See \code{\link{plot.fv}} for information on plotting objects of class \code{"fv"}. See also \code{\link{dg.sigtrace}}. } \examples{ plot(dclf.sigtrace(cells, Lest, nsim=19)) } \keyword{spatial} \keyword{htest} spatstat/man/linearmarkequal.Rd0000644000176200001440000000421013160710621016326 0ustar liggesusers\name{linearmarkequal} \alias{linearmarkequal} \title{ Mark Connection Function for Multitype Point Pattern on Linear Network } \description{ For a multitype point pattern on a linear network, estimate the mark connection function from points of type \eqn{i} to points of type \eqn{j}. } \usage{ linearmarkequal(X, r=NULL, \dots) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the mark connection function \eqn{p_{ij}(r)}{p[ij](r)} will be computed. An object of class \code{"lpp"} which must be a multitype point pattern (a marked point pattern whose marks are a factor). } \item{r}{numeric vector. The values of the argument \eqn{r} at which the function \eqn{p_{ij}(r)}{p[ij](r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{\dots}{ Arguments passed to \code{\link{linearpcfcross}} and \code{\link{linearpcf}}. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). } \details{ This is the mark equality function for a point pattern on a linear network (object of class \code{"lpp"}). The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{p_{ij}(r)}{p[ij](r)} should be evaluated. The values of \eqn{r} must be increasing nonnegative numbers and the maximum \eqn{r} value must not exceed the radius of the largest disc contained in the window. } \references{ Baddeley, A, Jammalamadaka, A. and Nair, G. (to appear) Multitype point process analysis of spines on the dendrite network of a neuron. \emph{Applied Statistics} (Journal of the Royal Statistical Society, Series C), In press. } \seealso{ \code{\link{linearpcfcross}}, \code{\link{linearpcf}}, \code{\link{linearmarkconnect}}, \code{\link{markconnect}}. } \examples{ if(interactive()) { X <- chicago } else { X <- runiflpp(20, simplenet) \%mark\% sample(c("A","B"), 20, replace=TRUE) } p <- linearmarkequal(X) } \author{\adrian } \keyword{spatial} \keyword{nonparametric} spatstat/man/is.multitype.Rd0000644000176200001440000000325713160710621015631 0ustar liggesusers\name{is.multitype} \alias{is.multitype} \title{Test whether Object is Multitype} \description{ Generic function to test whether a given object (usually a point pattern or something related to a point pattern) has ``marks'' attached to the points which classify the points into several types. } \usage{ is.multitype(X, \dots) } \arguments{ \item{X}{ Object to be inspected } \item{\dots}{ Other arguments. } } \value{ Logical value, equal to \code{TRUE} if \code{X} is multitype. } \details{ ``Marks'' are observations attached to each point of a point pattern. For example the \code{\link[spatstat.data]{longleaf}} dataset contains the locations of trees, each tree being marked by its diameter; the \code{\link[spatstat.data]{amacrine}} dataset gives the locations of cells of two types (on/off) and the type of cell may be regarded as a mark attached to the location of the cell. Other objects related to point patterns, such as point process models, may involve marked points. This function tests whether the object \code{X} contains or involves marked points, \bold{and} that the marks are a factor. For example, the \code{\link[spatstat.data]{amacrine}} dataset is multitype (there are two types of cells, on and off), but the \code{\link[spatstat.data]{longleaf}} dataset is \emph{not} multitype (the marks are real numbers). This function is generic; methods are provided for point patterns (objects of class \code{"ppp"}) and point process models (objects of class \code{"ppm"}). } \seealso{ \code{\link{is.multitype.ppp}}, \code{\link{is.multitype.ppm}} } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/linfun.Rd0000644000176200001440000000405613160710621014454 0ustar liggesusers\name{linfun} \Rdversion{1.1} \alias{linfun} \title{ Function on a Linear Network } \description{ Create a function on a linear network. } \usage{ linfun(f, L) } \arguments{ \item{f}{ A \code{function} in the \R language. } \item{L}{ A linear network (object of class \code{"linnet"}) on which \code{f} is defined. } } \details{ This creates an object of class \code{"linfun"}. This is a simple mechanism for handling a function defined on a linear network, to make it easier to display and manipulate. \code{f} should be a \code{function} in the \R language, with formal arguments \code{f(x,y,seg,tp)} or \code{f(x,y,seg,tp, \dots)} where \code{x,y} are Cartesian coordinates of locations on the linear network, \code{seg, tp} are the local coordinates, and \code{\dots} are optional additional arguments. The function \code{f} should be vectorised: that is, if \code{x,y,seg,tp} are numeric vectors of the same length \code{n}, then \code{v <- f(x,y,seg,tp)} should be a vector of length \code{n}. \code{L} should be a linear network (object of class \code{"linnet"}) inside which the function \code{f} is well-defined. The result is a function \code{g} in the \R language which belongs to the special class \code{"linfun"}. This function can be called as \code{g(X)} where \code{X} is an \code{"lpp"} object, or called as \code{g(x,y)} or \code{g(x,y,seg,tp)} where \code{x,y,seg,tp} are coordinates. There are several methods for this class including \code{print}, \code{plot} and \code{\link{as.linim}}. } \value{ A function in the \R\ language. It also belongs to the class \code{"linfun"} which has methods for \code{plot}, \code{print} etc. } \seealso{ \code{\link{methods.linfun}} for methods applicable to \code{"linfun"} objects. \code{\link{distfun.lpp}}, \code{\link{nnfun.lpp}}. } \examples{ f <- linfun(function(x,y,seg,tp) { x+y }, simplenet) plot(f) X <- runiflpp(3, simplenet) plot(X, add=TRUE, cex=2) f(X) } \author{\adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/as.psp.Rd0000644000176200001440000001467613160710571014402 0ustar liggesusers\name{as.psp} \alias{as.psp} \alias{as.psp.psp} \alias{as.psp.data.frame} \alias{as.psp.matrix} \alias{as.psp.default} \title{Convert Data To Class psp} \description{ Tries to coerce any reasonable kind of data object to a line segment pattern (an object of class \code{"psp"}) for use by the \pkg{spatstat} package. } \usage{ as.psp(x, \dots, from=NULL, to=NULL) \method{as.psp}{psp}(x, \dots, check=FALSE, fatal=TRUE) \method{as.psp}{data.frame}(x, \dots, window=NULL, marks=NULL, check=spatstat.options("checksegments"), fatal=TRUE) \method{as.psp}{matrix}(x, \dots, window=NULL, marks=NULL, check=spatstat.options("checksegments"), fatal=TRUE) \method{as.psp}{default}(x, \dots, window=NULL, marks=NULL, check=spatstat.options("checksegments"), fatal=TRUE) } \arguments{ \item{x}{Data which will be converted into a line segment pattern} \item{window}{Data which define a window for the pattern.} \item{\dots}{Ignored.} \item{marks}{(Optional) vector or data frame of marks for the pattern} \item{check}{ Logical value indicating whether to check the validity of the data, e.g. to check that the line segments lie inside the window. } \item{fatal}{Logical value. See Details.} \item{from,to}{Point patterns (object of class \code{"ppp"}) containing the first and second endpoints (respectively) of each segment. Incompatible with \code{x}. } } \value{ An object of class \code{"psp"} (see \code{\link{psp.object}}) describing the line segment pattern and its window of observation. The value \code{NULL} may also be returned; see Details. } \details{ Converts the dataset \code{x} to a line segment pattern (an object of class \code{"psp"}; see \code{\link{psp.object}} for an overview). This function is normally used to convert an existing line segment pattern dataset, stored in another format, to the \code{"psp"} format. To create a new point pattern from raw data such as \eqn{x,y} coordinates, it is normally easier to use the creator function \code{\link{psp}}. The dataset \code{x} may be: \itemize{ \item an object of class \code{"psp"} \item a data frame with at least 4 columns \item a structure (list) with elements named \code{x0, y0, x1, y1} or elements named \code{xmid, ymid, length, angle} and possibly a fifth element named \code{marks} } If \code{x} is a data frame the interpretation of its columns is as follows: \itemize{ \item If there are columns named \code{x0, y0, x1, y1} then these will be interpreted as the coordinates of the endpoints of the segments and used to form the \code{ends} component of the \code{psp} object to be returned. \item If there are columns named \code{xmid, ymid, length, angle} then these will be interpreted as the coordinates of the segment midpoints, the lengths of the segments, and the orientations of the segments in radians and used to form the \code{ends} component of the \code{psp} object to be returned. \item If there is a column named \code{marks} then this will be interpreted as the marks of the pattern provided that the argument \code{marks} of this function is \code{NULL}. If argument \code{marks} is not \code{NULL} then the value of this argument is taken to be the marks of the pattern and the column named \code{marks} is ignored (with a warning). In either case the column named marks is deleted and omitted from further consideration. \item If there is no column named \code{marks} and if the \code{marks} argument of this function is \code{NULL}, and if after interpreting 4 columns of \code{x} as determining the \code{ends} component of the \code{psp} object to be returned, there remain other columns of \code{x}, then these remaining columns will be taken to form a data frame of marks for the \code{psp} object to be returned. } If \code{x} is a structure (list) with elements named \code{x0, y0, x1, y1, marks} or \code{xmid, ymid, length, angle, marks}, then the element named \code{marks} will be interpreted as the marks of the pattern provide that the argument \code{marks} of this function is \code{NULL}. If this argument is non-\code{NULL} then it is interpreted as the marks of the pattern and the element \code{marks} of \code{x} is ignored --- with a warning. Alternatively, you may specify two point patterns \code{from} and \code{to} containing the first and second endpoints of the line segments. The argument \code{window} is converted to a window object by the function \code{\link{as.owin}}. The argument \code{fatal} indicates what to do when the data cannot be converted to a line segment pattern. If \code{fatal=TRUE}, a fatal error will be generated; if \code{fatal=FALSE}, the value \code{NULL} is returned. The function \code{as.psp} is generic, with methods for the classes \code{"psp"}, \code{"data.frame"}, \code{"matrix"} and a default method. Point pattern datasets can also be created by the function \code{\link{psp}}. } \section{Warnings}{ If only a proper subset of the names \code{x0,y0,x1,y1} or \code{xmid,ymid,length,angle} appear amongst the names of the columns of \code{x} where \code{x} is a data frame, then these special names are ignored. For example if the names of the columns were \code{xmid,ymid,length,degrees}, then these columns would be interpreted as if the represented \code{x0,y0,x1,y1} in that order. Whether it gets used or not, column named \code{marks} is \emph{always} removed from \code{x} before any attempt to form the \code{ends} component of the \code{psp} object that is returned. } \seealso{ \code{\link{psp}}, \code{\link{psp.object}}, \code{\link{as.owin}}, \code{\link{owin.object}}. See \code{\link{edges}} for extracting the edges of a polygonal window as a \code{"psp"} object. } \examples{ mat <- matrix(runif(40), ncol=4) mx <- data.frame(v1=sample(1:4,10,TRUE), v2=factor(sample(letters[1:4],10,TRUE),levels=letters[1:4])) a <- as.psp(mat, window=owin(),marks=mx) mat <- cbind(as.data.frame(mat),mx) b <- as.psp(mat, window=owin()) # a and b are identical. stuff <- list(xmid=runif(10), ymid=runif(10), length=rep(0.1, 10), angle=runif(10, 0, 2 * pi)) a <- as.psp(stuff, window=owin()) b <- as.psp(from=runifpoint(10), to=runifpoint(10)) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{manip} spatstat/man/distfun.lpp.Rd0000644000176200001440000000500613160710571015427 0ustar liggesusers\name{distfun.lpp} \Rdversion{1.1} \alias{distfun.lpp} \title{ Distance Map on Linear Network } \description{ Compute the distance function of a point pattern on a linear network. } \usage{ \method{distfun}{lpp}(X, ..., k=1) } \arguments{ \item{X}{ A point pattern on a linear network (object of class \code{"lpp"}). } \item{k}{ An integer. The distance to the \code{k}th nearest point will be computed. } \item{\dots}{ Extra arguments are ignored. } } \details{ On a linear network \eqn{L}, the \dQuote{geodesic distance function} of a set of points \eqn{A} in \eqn{L} is the mathematical function \eqn{f} such that, for any location \eqn{s} on \eqn{L}, the function value \code{f(s)} is the shortest-path distance from \eqn{s} to \eqn{A}. The command \code{distfun.lpp} is a method for the generic command \code{\link{distfun}} for the class \code{"lpp"} of point patterns on a linear network. If \code{X} is a point pattern on a linear network, \code{f <- distfun(X)} returns a \emph{function} in the \R language that represents the distance function of \code{X}. Evaluating the function \code{f} in the form \code{v <- f(x,y)}, where \code{x} and \code{y} are any numeric vectors of equal length containing coordinates of spatial locations, yields the values of the distance function at these locations. More efficiently \code{f} can be called in the form \code{v <- f(x, y, seg, tp)} where \code{seg} and \code{tp} are the local coordinates on the network. It can also be called as \code{v <- f(x)} where \code{x} is a point pattern on the same linear network. The function \code{f} obtained from \code{f <- distfun(X)} also belongs to the class \code{"linfun"}. It can be printed and plotted immediately as shown in the Examples. It can be converted to a pixel image using \code{\link{as.linim}}. } \value{ A \code{function} with arguments \code{x,y} and optional arguments \code{seg,tp}. It also belongs to the class \code{"linfun"} which has methods for \code{plot}, \code{print} etc. } \seealso{ \code{\link{linfun}}, \code{\link{methods.linfun}}. To identify \emph{which} point is the nearest neighbour, see \code{\link{nnfun.lpp}}. } \examples{ data(letterR) X <- runiflpp(3, simplenet) f <- distfun(X) f plot(f) # using a distfun as a covariate in a point process model: Y <- runiflpp(4, simplenet) fit <- lppm(Y ~D, covariates=list(D=f)) f(Y) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{math} spatstat/man/DiggleGratton.Rd0000644000176200001440000000544513160710571015722 0ustar liggesusers\name{DiggleGratton} \alias{DiggleGratton} \title{Diggle-Gratton model} \description{ Creates an instance of the Diggle-Gratton pairwise interaction point process model, which can then be fitted to point pattern data. } \usage{ DiggleGratton(delta=NA, rho) } \arguments{ \item{delta}{lower threshold \eqn{\delta}{\delta}} \item{rho}{upper threshold \eqn{\rho}{\rho}} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of a point process. } \details{ Diggle and Gratton (1984, pages 208-210) introduced the pairwise interaction point process with pair potential \eqn{h(t)} of the form \deqn{ h(t) = \left( \frac{t-\delta}{\rho-\delta} \right)^\kappa \quad\quad \mbox{ if } \delta \le t \le \rho }{ h(t) = ((t - \delta)/(\rho - \delta))^\kappa, { } \delta \le t \le \rho } with \eqn{h(t) = 0} for \eqn{t < \delta}{t < \delta} and \eqn{h(t) = 1} for \eqn{t > \rho}{t > \rho}. Here \eqn{\delta}{\delta}, \eqn{\rho}{\rho} and \eqn{\kappa}{\kappa} are parameters. Note that we use the symbol \eqn{\kappa}{\kappa} where Diggle and Gratton (1984) and Diggle, Gates and Stibbard (1987) use \eqn{\beta}{\beta}, since in \pkg{spatstat} we reserve the symbol \eqn{\beta}{\beta} for an intensity parameter. The parameters must all be nonnegative, and must satisfy \eqn{\delta \le \rho}{\delta \le \rho}. The potential is inhibitory, i.e.\ this model is only appropriate for regular point patterns. The strength of inhibition increases with \eqn{\kappa}{\kappa}. For \eqn{\kappa=0}{\kappa=0} the model is a hard core process with hard core radius \eqn{\delta}{\delta}. For \eqn{\kappa=\infty}{\kappa=Inf} the model is a hard core process with hard core radius \eqn{\rho}{\rho}. The irregular parameters \eqn{\delta, \rho}{\delta, \rho} must be given in the call to \code{DiggleGratton}, while the regular parameter \eqn{\kappa}{\kappa} will be estimated. If the lower threshold \code{delta} is missing or \code{NA}, it will be estimated from the data when \code{\link{ppm}} is called. The estimated value of \code{delta} is the minimum nearest neighbour distance multiplied by \eqn{n/(n+1)}, where \eqn{n} is the number of data points. } \seealso{ \code{\link{ppm}}, \code{\link{ppm.object}}, \code{\link{Pairwise}} } \examples{ ppm(cells ~1, DiggleGratton(0.05, 0.1)) } \references{ Diggle, P.J., Gates, D.J. and Stibbard, A. (1987) A nonparametric estimator for pairwise-interaction point processes. \emph{Biometrika} \bold{74}, 763 -- 770. Diggle, P.J. and Gratton, R.J. (1984) Monte Carlo methods of inference for implicit statistical models. \emph{Journal of the Royal Statistical Society, series B} \bold{46}, 193 -- 212. } \author{ \spatstatAuthors } \keyword{spatial} \keyword{models} spatstat/man/angles.psp.Rd0000644000176200001440000000274613160710571015243 0ustar liggesusers\name{angles.psp} \alias{angles.psp} \title{Orientation Angles of Line Segments} \description{ Computes the orientation angle of each line segment in a line segment pattern. } \usage{ angles.psp(x, directed=FALSE) } \arguments{ \item{x}{ A line segment pattern (object of class \code{"psp"}). } \item{directed}{ Logical flag. See details. } } \value{ Numeric vector. } \details{ For each line segment, the angle of inclination to the \eqn{x}-axis (in radians) is computed, and the angles are returned as a numeric vector. If \code{directed=TRUE}, the directed angle of orientation is computed. The angle respects the sense of direction from \code{(x0,y0)} to \code{(x1,y1)}. The values returned are angles in the full range from \eqn{-\pi}{-\pi} to \eqn{\pi}{\pi}. The angle is computed as \code{atan2(y1-y0,x1-x0)}. See \code{\link{atan2}}. If \code{directed=FALSE}, the undirected angle of orientation is computed. Angles differing by \eqn{\pi} are regarded as equivalent. The values returned are angles in the range from \eqn{0} to \eqn{\pi}{\pi}. These angles are computed by first computing the directed angle, then adding \eqn{\pi}{\pi} to any negative angles. } \seealso{ \code{\link{summary.psp}}, \code{\link{midpoints.psp}}, \code{\link{lengths.psp}} } \examples{ a <- psp(runif(10), runif(10), runif(10), runif(10), window=owin()) b <- angles.psp(a) } \author{ \adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/hist.funxy.Rd0000644000176200001440000000375113160710621015301 0ustar liggesusers\name{hist.funxy} \alias{hist.funxy} \title{Histogram of Values of a Spatial Function} \description{ Computes and displays a histogram of the values of a spatial function of class \code{"funxy"}. } \usage{ \method{hist}{funxy}(x, \dots, xname) } \arguments{ \item{x}{A pixel image (object of class \code{"funxy"}).} \item{\dots}{ Arguments passed to \code{\link{as.im}} or \code{\link{hist.im}}. } \item{xname}{ Optional. Character string to be used as the name of the dataset \code{x}. } } \details{ This function computes and (by default) displays a histogram of the values of the function \code{x}. An object of class \code{"funxy"} describes a function of spatial location. It is a \code{function(x,y,..)} in the \R language, with additional attributes. The function \code{hist.funxy} is a method for the generic function \code{\link{hist}} for the class \code{"funxy"}. The function is first converted to a pixel image using \code{\link{as.im}}, then \code{\link{hist.im}} is called to produce the histogram. Any arguments in \code{...} are passed to \code{\link{as.im}} to determine the pixel resolution, or to \code{\link{hist.im}} to determine the histogram breaks and to control or suppress plotting. Useful arguments include \code{W} for the spatial domain, \code{eps,dimyx} for pixel resolution, \code{main} for the main title. } \value{ An object of class \code{"histogram"} as returned by \code{\link[graphics:hist]{hist.default}}. This object can be plotted. } \seealso{ \code{\link{spatialcdf}} for the cumulative distribution function of an image or function. \code{\link{hist}}, \code{\link{hist.default}}. For other statistical graphics such as Q-Q plots, use \code{as.im(X)[]} to extract the pixel values of image \code{X}, and apply the usual statistical graphics commands. } \examples{ f <- funxy(function(x,y) {x^2}, unit.square()) hist(f) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{methods} spatstat/man/bugfixes.Rd0000644000176200001440000000411113160710571014771 0ustar liggesusers\name{bugfixes} \alias{bugfixes} \title{ List Recent Bug Fixes } \description{ List all bug fixes in a package, starting from a certain date or version of the package. Fixes are sorted alphabetically by the name of the affected function. The default is to list bug fixes in the latest version of the \pkg{spatstat} package. } \usage{ bugfixes(sinceversion = NULL, sincedate = NULL, package = "spatstat", show = TRUE) } \arguments{ \item{sinceversion}{ Earliest version of \code{package} for which bugs should be listed. The default is the current installed version. } \item{sincedate}{ Earliest release date of \code{package} for which bugs should be listed. A character string or a date-time object. } \item{package}{ Character string. The name of the package for which bugs are to be listed. } \item{show}{ Logical value indicating whether to display the bug table on the terminal. } } \details{ Bug reports are extracted from the NEWS file of the specified \code{package}. Only those after a specified date, or after a specified version of the package, are retained. The bug reports are then sorted alphabetically, so that all bugs affecting a particular function are listed consecutively. Finally the table of bug reports is displayed (if \code{show=TRUE}) and returned invisibly. The argument \code{sinceversion} should be a character string like \code{"1.2-3"}. The default is the current installed version of the package. The argument \code{sincedata} should be a character string like \code{"2015-05-27"}, or a date-time object. Typing \code{bugfixes} without parentheses will display a table of all bug fixes in the current installed version of \pkg{spatstat}. } \value{ A data frame, belonging to the class \code{"bugtable"}, which has its own print method. } \author{ \adrian. } \seealso{ \code{\link{latest.news}}, \code{\link[utils]{news}}. } \examples{ # show all bugs reported after publication of the spatstat book if(interactive()) bugfixes("1.42-0") } \keyword{documentation} spatstat/man/bw.pcf.Rd0000644000176200001440000001025113160710571014336 0ustar liggesusers\name{bw.pcf} \alias{bw.pcf} \title{ Cross Validated Bandwidth Selection for Pair Correlation Function } \description{ Uses composite likelihood or generalized least squares cross-validation to select a smoothing bandwidth for the kernel estimation of pair correlation function. } \usage{ bw.pcf(X, rmax=NULL, lambda=NULL, divisor="r", kernel="epanechnikov", nr=10000, bias.correct=TRUE, cv.method=c("compLik", "leastSQ"), simple=TRUE, srange=NULL, \dots, verbose=FALSE) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"}). } \item{rmax}{ Numeric. Maximum value of the spatial lag distance \eqn{r} for which \eqn{g(r)} should be evaluated. } \item{lambda}{ Optional. Values of the estimated intensity function. A vector giving the intensity values at the points of the pattern \code{X}. } \item{divisor}{ Choice of divisor in the estimation formula: either \code{"r"} (the default) or \code{"d"}. See \code{pcf.ppp}. } \item{kernel}{ Choice of smoothing kernel, passed to \code{density}; see \code{\link{pcf}} and \code{\link{pcfinhom}}. } \item{nr}{ Integer. Number of subintervals for discretization of [0, rmax] to use in computing numerical integrals. } \item{bias.correct}{ Logical. Whether to use bias corrected version of the kernel estimate. See Details. } \item{cv.method}{ Choice of cross validation method: either \code{"compLik"} or \code{"leastSQ"} (partially matched). } \item{simple}{ Logical. Whether to use simple removal of spatial lag distances. See Details. } \item{srange}{ Optional. Numeric vector of length 2 giving the range of bandwidth values that should be searched to find the optimum bandwidth. } \item{\dots}{ Other arguments, passed to \code{\link{pcf}} or \code{\link{pcfinhom}}. } \item{verbose}{ Logical value indicating whether to print progress reports during the optimization procedure. } } \details{ This function selects an appropriate bandwidth \code{bw} for the kernel estimator of the pair correlation function of a point process intensity computed by \code{\link{pcf.ppp}} (homogeneous case) or \code{\link{pcfinhom}} (inhomogeneous case). With \code{cv.method="leastSQ"}, the bandwidth \eqn{h} is chosen to minimise an unbiased estimate of the integrated mean-square error criterion \eqn{M(h)} defined in equation (4) in Guan (2007a). With \code{cv.method="compLik"}, the bandwidth \eqn{h} is chosen to maximise a likelihood cross-validation criterion \eqn{CV(h)} defined in equation (6) of Guan (2007b). \deqn{ M(b) = \frac{\mbox{MSE}(\sigma)}{\lambda^2} - g(0) }{ M(b) = \int_{0}^{rmax} \hat{g}^2(r;b) r dr - \sum_{u,v} } The result is a numerical value giving the selected bandwidth. } \section{Definition of bandwidth}{ The bandwidth \code{bw} returned by \code{bw.pcf} corresponds to the standard deviation of the smoothoing kernel. As mentioned in the documentation of \code{\link{density.default}} and \code{\link{pcf.ppp}}, this differs from the scale parameter \code{h} of the smoothing kernel which is often considered in the literature as the bandwidth of the kernel function. For example for the Epanechnikov kernel, \code{bw=h/sqrt(h)}. } \value{ A numerical value giving the selected bandwidth. The result also belongs to the class \code{"bw.optim"} which can be plotted. } \seealso{ \code{\link{pcf.ppp}}, \code{\link{pcfinhom}} } \examples{ b <- bw.pcf(redwood) plot(pcf(redwood, bw=b)) } \references{ Guan, Y. (2007a). A composite likelihood cross-validation approach in selecting bandwidth for the estimation of the pair correlation function. \emph{Scandinavian Journal of Statistics}, \bold{34}(2), 336--346. Guan, Y. (2007b). A least-squares cross-validation bandwidth selection approach in pair correlation function estimations. \emph{Statistics & Probability Letters}, \bold{77}(18), 1722--1729. } \author{ Rasmus Waagepetersen and Abdollah Jalilian. Adapted for \pkg{spatstat} by \spatstatAuthors. } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat/man/tilenames.Rd0000644000176200001440000000150313160710621015134 0ustar liggesusers\name{tilenames} \alias{tilenames} \alias{tilenames<-} \title{Names of Tiles in a Tessellation} \description{ Extract or Change the Names of the Tiles in a Tessellation. } \usage{ tilenames(x) tilenames(x) <- value } \arguments{ \item{x}{A tessellation (object of class \code{"tess"}).} \item{value}{Character vector giving new names for the tiles.} } \details{ These functions extract or change the names of the tiles that make up the tessellation \code{x}. If the tessellation is a regular grid, the tile names cannot be changed. } \value{ \code{tilenames} returns a character vector. } \seealso{ \code{\link{tess}}, \code{\link{tiles}} } \examples{ D <- dirichlet(runifpoint(10)) tilenames(D) tilenames(D) <- paste("Cell", 1:10) } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/dummify.Rd0000644000176200001440000000342713160710571014640 0ustar liggesusers\name{dummify} \alias{dummify} \title{ Convert Data to Numeric Values by Constructing Dummy Variables } \description{ Converts data of any kind to numeric values. A factor is expanded to a set of dummy variables. } \usage{ dummify(x) } \arguments{ \item{x}{ Vector, factor, matrix or data frame to be converted. } } \details{ This function converts data (such as a factor) to numeric values in order that the user may calculate, for example, the mean, variance, covariance and correlation of the data. If \code{x} is a numeric vector or integer vector, it is returned unchanged. If \code{x} is a logical vector, it is converted to a 0-1 matrix with 2 columns. The first column contains a 1 if the logical value is \code{FALSE}, and the second column contains a 1 if the logical value is \code{TRUE}. If \code{x} is a complex vector, it is converted to a matrix with 2 columns, containing the real and imaginary parts. If \code{x} is a factor, the result is a matrix of 0-1 dummy variables. The matrix has one column for each possible level of the factor. The \code{(i,j)} entry is equal to 1 when the \code{i}th factor value equals the \code{j}th level, and is equal to 0 otherwise. If \code{x} is a matrix or data frame, the appropriate conversion is applied to each column of \code{x}. Note that, unlike \code{\link[stats]{model.matrix}}, this command converts a factor into a full set of dummy variables (one column for each level of the factor). } \value{ A numeric matrix. } \author{ \adrian } \examples{ chara <- sample(letters[1:3], 8, replace=TRUE) logi <- (runif(8) < 0.3) comp <- round(4*runif(8) + 3*runif(8) * 1i, 1) nume <- 8:1 + 0.1 df <- data.frame(nume, chara, logi, comp) df dummify(df) } \keyword{math} spatstat/man/data.ppm.Rd0000644000176200001440000000223413160710571014665 0ustar liggesusers\name{data.ppm} \alias{data.ppm} \title{Extract Original Data from a Fitted Point Process Model} \description{ Given a fitted point process model, this function extracts the original point pattern dataset to which the model was fitted. } \usage{ data.ppm(object) } \arguments{ \item{object}{ fitted point process model (an object of class \code{"ppm"}). } } \value{ A point pattern (object of class \code{"ppp"}). } \details{ An object of class \code{"ppm"} represents a point process model that has been fitted to data. It is typically produced by the model-fitting algorithm \code{\link{ppm}}. The object contains complete information about the original data point pattern to which the model was fitted. This function extracts the original data pattern. See \code{\link{ppm.object}} for a list of all operations that can be performed on objects of class \code{"ppm"}. } \seealso{ \code{\link{ppm.object}}, \code{\link{ppp.object}} } \examples{ data(cells) fit <- ppm(cells, ~1, Strauss(r=0.1)) X <- data.ppm(fit) # 'X' is identical to 'cells' } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} \keyword{models} spatstat/man/edit.hyperframe.Rd0000644000176200001440000000223513160710571016250 0ustar liggesusers\name{edit.hyperframe} \alias{edit.hyperframe} \title{ Invoke Text Editor on Hyperframe } \description{ Invokes a text editor allowing the user to inspect and change entries in a hyperframe. } \usage{ \method{edit}{hyperframe}(name, \dots) } \arguments{ \item{name}{ A hyperframe (object of class \code{"hyperframe"}). } \item{\dots}{ Other arguments passed to \code{\link[utils]{edit.data.frame}}. } } \details{ The function \code{\link[utils]{edit}} is generic. This function is the methods for objects of class \code{"hyperframe"}. The hyperframe \code{name} is converted to a data frame or array, and the text editor is invoked. The user can change entries in the columns of data, and create new columns of data. Only the columns of atomic data (numbers, characters, factor values etc) can be edited. Note that the original object \code{name} is not changed; the function returns the edited dataset. } \value{ Another hyperframe. } \author{ \adrian \rolf and \ege } \seealso{ \code{\link[utils]{edit.data.frame}}, \code{\link{edit.ppp}} } \examples{ if(interactive()) Z <- edit(flu) } \keyword{spatial} \keyword{manip} spatstat/man/crossing.psp.Rd0000644000176200001440000000425213160710571015613 0ustar liggesusers\name{crossing.psp} \alias{crossing.psp} \title{Crossing Points of Two Line Segment Patterns} \description{ Finds any crossing points between two line segment patterns. } \usage{ crossing.psp(A,B,fatal=TRUE,details=FALSE) } \arguments{ \item{A,B}{ Line segment patterns (objects of class \code{"psp"}). } \item{details}{ Logical value indicating whether to return additional information. See below. } \item{fatal}{ Logical value indicating what to do if the windows of \code{A} and \code{B} do not overlap. See Details. } } \value{ Point pattern (object of class \code{"ppp"}). } \details{ This function finds any crossing points between the line segment patterns \code{A} and \code{B}. A crossing point occurs whenever one of the line segments in \code{A} intersects one of the line segments in \code{B}, at a nonzero angle of intersection. The result is a point pattern consisting of all the intersection points. If \code{details=TRUE}, additional information is computed, specifying where each intersection point came from. The resulting point pattern has a data frame of marks, with columns named \code{iA, jB, tA, tB}. The marks \code{iA} and \code{jB} are the indices of the line segments in \code{A} and \code{B}, respectively, which produced each intersection point. The marks \code{tA} and \code{tB} are numbers between 0 and 1 specifying the position of the intersection point along the original segments. If the windows \code{Window(A)} and \code{Window(B)} do not overlap, then an error will be reported if \code{fatal=TRUE}, while if \code{fatal=FALSE} an error will not occur and the result will be \code{NULL}. } \seealso{ \code{\link{selfcrossing.psp}}, \code{\link{psp.object}}, \code{\link{ppp.object}}. } \examples{ a <- psp(runif(10), runif(10), runif(10), runif(10), window=owin()) b <- psp(runif(10), runif(10), runif(10), runif(10), window=owin()) plot(a, col="green", main="crossing.psp") plot(b, add=TRUE, col="blue") P <- crossing.psp(a,b) plot(P, add=TRUE, col="red") as.data.frame(crossing.psp(a,b,details=TRUE)) } \author{ \adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/discs.Rd0000644000176200001440000000641313160710571014271 0ustar liggesusers\name{discs} \alias{discs} \title{ Union of Discs } \description{ Make a spatial region composed of discs with given centres and radii. } \usage{ discs(centres, radii = marks(centres)/2, \dots, separate = FALSE, mask = FALSE, trim = TRUE, delta = NULL, npoly=NULL) } \arguments{ \item{centres}{ Point pattern giving the locations of centres for the discs. } \item{radii}{ Vector of radii for each disc, or a single number giving a common radius. (Notice that the default assumes that the marks of \code{X} are \emph{diameters}.) } \item{\dots}{ Optional arguments passed to \code{\link{as.mask}} to determine the pixel resolution, if \code{mask=TRUE}. } \item{separate}{ Logical. If \code{TRUE}, the result is a list containing each disc as a separate entry. If \code{FALSE} (the default), the result is a window obtained by forming the union of the discs. } \item{mask}{ Logical. If \code{TRUE}, the result is a binary mask window. If \code{FALSE}, the result is a polygonal window. Applies only when \code{separate=FALSE}. } \item{trim}{ Logical value indicating whether to restrict the result to the original window of the \code{centres}. Applies only when \code{separate=FALSE}. } \item{delta}{ Argument passed to \code{\link{disc}} to determine the tolerance for the polygonal approximation of each disc. Applies only when \code{mask=FALSE}. Incompatible with \code{npoly}. } \item{npoly}{ Argument passed to \code{\link{disc}} to determine the number of edges in the polygonal approximation of each disc. Applies only when \code{mask=FALSE}. Incompatible with \code{delta}. } } \details{ This command is typically applied to a marked point pattern dataset \code{X} in which the marks represent the sizes of objects. The result is a spatial region representing the space occupied by the objects. If the marks of \code{X} represent the diameters of circular objects, then the result of \code{discs(X)} is a spatial region constructed by taking discs, of the specified diameters, centred at the points of \code{X}, and forming the union of these discs. If the marks of \code{X} represent the areas of objects, one could take \code{discs(X, sqrt(marks(X)/pi))} to produce discs of equivalent area. A fast algorithm is used to compute the result as a binary mask, when \code{mask=TRUE}. This option is recommended unless polygons are really necessary. If \code{mask=FALSE}, the discs will be constructed as polygons by the function \code{\link{disc}}. To avoid computational problems, by default, the discs will all be constructed using the same physical tolerance value \code{delta} passed to \code{\link{disc}}. The default is such that the smallest disc will be approximated by a 16-sided polygon. (The argument \code{npoly} should not normally be used, to avoid computational problems arising with small radii.) } \value{ If \code{separate=FALSE}, a window (object of class \code{"owin"}). If \code{separate=TRUE}, a list of windows. } \author{ \spatstatAuthors. } \seealso{ \code{\link{disc}}, \code{\link{union.owin}} } \examples{ plot(discs(anemones, mask=TRUE, eps=0.5)) } \keyword{spatial} \keyword{datagen} spatstat/man/rat.Rd0000644000176200001440000000335613160710621013751 0ustar liggesusers\name{rat} \alias{rat} \title{ Ratio object } \description{ Stores the numerator, denominator, and value of a ratio as a single object. } \usage{ rat(ratio, numerator, denominator, check = TRUE) } \arguments{ \item{ratio,numerator,denominator}{ Three objects belonging to the same class. } \item{check}{ Logical. Whether to check that the objects are \code{\link{compatible}}. } } \details{ The class \code{"rat"} is a simple mechanism for keeping track of the numerator and denominator when calculating a ratio. Its main purpose is simply to signal that the object is a ratio. The function \code{rat} creates an object of class \code{"rat"} given the numerator, the denominator and the ratio. No calculation is performed; the three objects are simply stored together. The arguments \code{ratio}, \code{numerator}, \code{denominator} can be objects of any kind. They should belong to the same class. It is assumed that the relationship \deqn{ \mbox{ratio} = \frac{\mbox{numerator}}{\mbox{denominator}} }{ ratio = numerator/denominator } holds in some version of arithmetic. However, no calculation is performed. By default the algorithm checks whether the three arguments \code{ratio}, \code{numerator}, \code{denominator} are compatible objects, according to \code{\link{compatible}}. The result is equivalent to \code{ratio} except for the addition of extra information. } \value{ An object equivalent to the object \code{ratio} except that it also belongs to the class \code{"rat"} and has additional attributes \code{numerator} and \code{denominator}. } \author{\adrian and \rolf. } \seealso{ \code{\link{compatible}}, \code{\link{pool}} } \keyword{spatial} \keyword{manip} spatstat/man/formula.ppm.Rd0000644000176200001440000000326213160710621015417 0ustar liggesusers\name{formula.ppm} \alias{formula.ppm} \alias{terms.ppm} \title{ Model Formulae for Gibbs Point Process Models } \description{ Extract the trend formula, or the terms in the trend formula, in a fitted Gibbs point process model. } \usage{ \method{formula}{ppm}(x, \dots) \method{terms}{ppm}(x, \dots) } \arguments{ \item{x}{ An object of class \code{"ppm"}, representing a fitted point process model. } \item{\dots}{ Arguments passed to other methods. } } \details{ These functions are methods for the generic commands \code{\link{formula}} and \code{\link{terms}} for the class \code{"ppm"}. An object of class \code{"ppm"} represents a fitted Poisson or Gibbs point process model. It is obtained from the model-fitting function \code{\link{ppm}}. The method \code{formula.ppm} extracts the trend formula from the fitted model \code{x} (the formula originally specified as the argument \code{trend} to \code{\link{ppm}}). The method \code{terms.ppm} extracts the individual terms in the trend formula. } \value{ See the help files for the corresponding generic functions. } \author{ \adrian } \seealso{ \code{\link{ppm}}, \code{\link{as.owin}}, \code{\link{coef.ppm}}, \code{\link{extractAIC.ppm}}, \code{\link{fitted.ppm}}, \code{\link{logLik.ppm}}, \code{\link{model.frame.ppm}}, \code{\link{model.matrix.ppm}}, \code{\link{plot.ppm}}, \code{\link{predict.ppm}}, \code{\link{residuals.ppm}}, \code{\link{simulate.ppm}}, \code{\link{summary.ppm}}, \code{\link{update.ppm}}, \code{\link{vcov.ppm}}. } \examples{ data(cells) fit <- ppm(cells, ~x) formula(fit) terms(fit) } \keyword{spatial} \keyword{methods} spatstat/man/Extract.msr.Rd0000644000176200001440000000230013160710621015361 0ustar liggesusers\name{Extract.msr} \alias{[.msr} \title{Extract Subset of Signed or Vector Measure} \description{ Extract a subset of a signed measure or vector-valued measure. } \usage{ \method{[}{msr}(x, i, j, \dots) } \arguments{ \item{x}{ A signed or vector measure. An object of class \code{"msr"} (see \code{\link{msr}}). } \item{i}{ Object defining the subregion or subset to be extracted. Either a spatial window (an object of class \code{"owin"}), or a pixel image with logical values, or any type of index that applies to a matrix. } \item{j}{ Subset index selecting the vector coordinates to be extracted, if \code{x} is a vector-valued measure. } \item{\dots}{Ignored.} } \value{ An object of class \code{"msr"}. } \details{ This operator extracts a subset of the data which determines the signed measure or vector-valued measure \code{x}. The result is another measure. } \seealso{ \code{\link{msr}} } \examples{ X <- rpoispp(function(x,y) { exp(3+3*x) }) fit <- ppm(X ~x+y) rp <- residuals(fit, type="pearson") rs <- residuals(fit, type="score") rp[square(0.5)] rs[ , 2:3] } \author{ \spatstatAuthors } \keyword{spatial} \keyword{manip} spatstat/man/density.splitppp.Rd0000644000176200001440000000454513160710571016521 0ustar liggesusers\name{density.splitppp} \alias{density.splitppp} \alias{density.ppplist} \title{Kernel Smoothed Intensity of Split Point Pattern} \description{ Compute a kernel smoothed intensity function for each of the components of a split point pattern, or each of the point patterns in a list. } \usage{ \method{density}{splitppp}(x, \dots, se=FALSE) \method{density}{ppplist}(x, \dots, se=FALSE) } \arguments{ \item{x}{ Split point pattern (object of class \code{"splitppp"} created by \code{\link{split.ppp}}) to be smoothed. Alternatively a list of point patterns, of class \code{"ppplist"}. } \item{\dots}{ Arguments passed to \code{\link{density.ppp}} to control the smoothing, pixel resolution, edge correction etc. } \item{se}{ Logical value indicating whether to compute standard errors as well. } } \value{ A list of pixel images (objects of class \code{"im"}) which can be plotted or printed; or a list of numeric vectors giving the values at specified points. If \code{se=TRUE}, the result is a list with two elements named \code{estimate} and \code{SE}, each of the format described above. } \details{ This is a method for the generic function \code{density}. The argument \code{x} should be a list of point patterns, and should belong to one of the classes \code{"ppplist"} or \code{"splitppp"}. Typically \code{x} is obtained by applying the function \code{\link{split.ppp}} to a point pattern \code{y} by calling \code{split(y)}. This splits the points of \code{y} into several sub-patterns. A kernel estimate of the intensity function of each of the point patterns is computed using \code{\link{density.ppp}}. The return value is usually a list, each of whose entries is a pixel image (object of class \code{"im"}). The return value also belongs to the class \code{"solist"} and can be plotted or printed. If the argument \code{at="points"} is given, the result is a list of numeric vectors giving the intensity values at the data points. If \code{se=TRUE}, the result is a list with two elements named \code{estimate} and \code{SE}, each of the format described above. } \seealso{ \code{\link{ppp.object}}, \code{\link{im.object}} } \examples{ Z <- density(split(amacrine), 0.05) plot(Z) } \author{\adrian and \rolf } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat/man/simplepanel.Rd0000644000176200001440000002006313160710621015466 0ustar liggesusers\name{simplepanel} \alias{simplepanel} \alias{grow.simplepanel} \title{Simple Point-and-Click Interface Panels} \description{ These functions enable the user to create a simple, robust, point-and-click interface to any \R code. } \usage{ simplepanel(title, B, boxes, clicks, redraws=NULL, exit = NULL, env) grow.simplepanel(P, side = c("right", "left", "top", "bottom"), len = NULL, new.clicks, new.redraws=NULL, \dots, aspect) } \arguments{ \item{title}{ Character string giving the title of the interface panel. } \item{B}{ Bounding box of the panel coordinates. A rectangular window (object of class \code{"owin"}) } \item{boxes}{ A list of rectangular windows (objects of class \code{"owin"}) specifying the placement of the buttons and other interactive components of the panel. } \item{clicks}{ A list of \R functions, of the same length as \code{boxes}, specifying the operations to be performed when each button is clicked. Entries can also be \code{NULL} indicating that no action should occur. See Details. } \item{redraws}{ Optional list of \R functions, of the same length as \code{boxes}, specifying how to redraw each button. Entries can also be \code{NULL} indicating a simple default. See Details. } \item{exit}{ An \R function specifying actions to be taken when the interactive panel terminates. } \item{env}{ An \code{environment} that will be passed as an argument to all the functions in \code{clicks}, \code{redraws} and \code{exit}. } \item{P}{ An existing interaction panel (object of class \code{"simplepanel"}). } \item{side}{ Character string identifying which side of the panel \code{P} should be grown to accommodate the new buttons. } \item{len}{ Optional. Thickness of the new panel area that should be grown to accommodate the new buttons. A single number in the same units as the coordinate system of \code{P}. } \item{new.clicks}{ List of \R functions defining the operations to be performed when each of the new buttons is clicked. } \item{new.redraws}{ Optional. List of \R functions, of the same length as \code{new.clicks}, defining how to redraw each of the new buttons. } \item{\dots}{ Arguments passed to \code{\link{layout.boxes}} to determine the layout of the new buttons. } \item{aspect}{ Optional. Aspect ratio (height/width) of the new buttons. } } \details{ These functions enable the user to create a simple, robust, point-and-click interface to any \R code. The functions \code{simplepanel} and \code{grow.simplepanel} create an object of class \code{"simplepanel"}. Such an object defines the graphics to be displayed and the actions to be performed when the user interacts with the panel. The panel is activated by calling \code{\link{run.simplepanel}}. The function \code{simplepanel} creates a panel object from basic data. The function \code{grow.simplepanel} modifies an existing panel object \code{P} by growing an additional row or column of buttons. For \code{simplepanel}, \itemize{ \item The spatial layout of the panel is determined by the rectangles \code{B} and \code{boxes}. \item The argument \code{clicks} must be a list of functions specifying the action to be taken when each button is clicked (or \code{NULL} to indicate that no action should be taken). The list entries should have names (but there are sensible defaults). Each function should be of the form \code{function(env, xy)} where \code{env} is an \code{environment} that may contain shared data, and \code{xy} gives the coordinates of the mouse click, in the format \code{list(x, y)}. The function returns \code{TRUE} if the panel should continue running, and \code{FALSE} if the panel should terminate. \item The argument \code{redraws}, if given, must be a list of functions specifying the action to be taken when each button is to be redrawn. Each function should be of the form \code{function(button, name, env)} where \code{button} is a rectangle specifying the location of the button in the current coordinate system; \code{name} is a character string giving the name of the button; and \code{env} is the \code{environment} that may contain shared data. The function returns \code{TRUE} if the panel should continue running, and \code{FALSE} if the panel should terminate. If \code{redraws} is not given (or if one of the entries in \code{redraws} is \code{NULL}), the default action is to draw a pink rectangle showing the button position, draw the name of the button in the middle of this rectangle, and return \code{TRUE}. \item The argument \code{exit}, if given, must be a function specifying the action to be taken when the panel terminates. (Termination occurs when one of the \code{clicks} functions returns \code{FALSE}). The \code{exit} function should be of the form \code{function(env)} where \code{env} is the \code{environment} that may contain shared data. Its return value will be used as the return value of \code{\link{run.simplepanel}}. \item The argument \code{env} should be an \R environment. The panel buttons will have access to this environment, and will be able to read and write data in it. This mechanism is used to exchange data between the panel and other \R code. } For \code{grow.simplepanel}, \itemize{ \item the spatial layout of the new boxes is determined by the arguments \code{side}, \code{len}, \code{aspect} and by the additional \code{\dots} arguments passed to \code{\link{layout.boxes}}. \item the argument \code{new.clicks} should have the same format as \code{clicks}. It implicitly specifies the number of new buttons to be added, and the actions to be performed when they are clicked. \item the optional argument \code{new.redraws}, if given, should have the same format as \code{redraws}. It specifies the actions to be performed when the new buttons are clicked. } } \value{ An object of class \code{"simplepanel"}. } \author{ \spatstatAuthors. } \seealso{ \code{\link{run.simplepanel}}, \code{\link{layout.boxes}} } \examples{ # make boxes (alternatively use layout.boxes()) Bminus <- square(1) Bvalue <- shift(Bminus, c(1.2, 0)) Bplus <- shift(Bvalue, c(1.2, 0)) Bdone <- shift(Bplus, c(1.2, 0)) myboxes <- list(Bminus, Bvalue, Bplus, Bdone) myB <- do.call(boundingbox,myboxes) # make environment containing an integer count myenv <- new.env() assign("answer", 0, envir=myenv) # what to do when finished: return the count. myexit <- function(e) { return(get("answer", envir=e)) } # button clicks # decrement the count Cminus <- function(e, xy) { ans <- get("answer", envir=e) assign("answer", ans - 1, envir=e) return(TRUE) } # display the count (clicking does nothing) Cvalue <- function(...) { TRUE } # increment the count Cplus <- function(e, xy) { ans <- get("answer", envir=e) assign("answer", ans + 1, envir=e) return(TRUE) } # 'Clear' button Cclear <- function(e, xy) { assign("answer", 0, envir=e) return(TRUE) } # quit button Cdone <- function(e, xy) { return(FALSE) } myclicks <- list("-"=Cminus, value=Cvalue, "+"=Cplus, done=Cdone) # redraw the button that displays the current value of the count Rvalue <- function(button, nam, e) { plot(button, add=TRUE) ans <- get("answer", envir=e) text(centroid.owin(button), labels=ans) return(TRUE) } # make the panel P <- simplepanel("Counter", B=myB, boxes=myboxes, clicks=myclicks, redraws = list(NULL, Rvalue, NULL, NULL), exit=myexit, env=myenv) P # ( type run.simplepanel(P) to run the panel interactively ) # add another button to right Pplus <- grow.simplepanel(P, "right", new.clicks=list(clear=Cclear)) } \keyword{iplot} \keyword{utilities} spatstat/man/update.detpointprocfamily.Rd0000644000176200001440000000106113160710621020347 0ustar liggesusers\name{update.detpointprocfamily} \alias{update.detpointprocfamily} \title{Set Parameter Values in a Determinantal Point Process Model} \description{ Set parameter values in a determinantal point process model object. } \usage{ \method{update}{detpointprocfamily}(object, \dots) } \arguments{ \item{object}{object of class \code{"detpointprocfamily"}.} \item{\dots}{ arguments of the form \code{tag=value} specifying the parameters values to set. } } \author{ \adrian \rolf and \ege } \keyword{spatial} \keyword{models} spatstat/man/rotate.ppp.Rd0000644000176200001440000000241313160710621015250 0ustar liggesusers\name{rotate.ppp} \alias{rotate.ppp} \title{Rotate a Point Pattern} \description{ Rotates a point pattern } \usage{ \method{rotate}{ppp}(X, angle=pi/2, \dots, centre=NULL) } \arguments{ \item{X}{A point pattern (object of class \code{"ppp"}).} \item{angle}{Angle of rotation.} \item{\dots}{ Arguments passed to \code{\link{rotate.owin}} affecting the handling of the observation window, if it is a binary pixel mask. } \item{centre}{ Centre of rotation. Either a vector of length 2, or a character string (partially matched to \code{"centroid"}, \code{"midpoint"} or \code{"bottomleft"}). The default is the coordinate origin \code{c(0,0)}. } } \value{ Another object of class \code{"ppp"} representing the rotated point pattern. } \details{ The points of the pattern, and the window of observation, are rotated about the origin by the angle specified. Angles are measured in radians, anticlockwise. The default is to rotate the pattern 90 degrees anticlockwise. If the points carry marks, these are preserved. } \seealso{ \code{\link{ppp.object}}, \code{\link{rotate.owin}} } \examples{ data(cells) X <- rotate(cells, pi/3) \dontrun{ plot(X) } } \author{\adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/sessionLibs.Rd0000644000176200001440000000150713160710621015454 0ustar liggesusers\name{sessionLibs} \alias{sessionLibs} \title{ Print Names and Version Numbers of Libraries Loaded } \description{ Prints the names and version numbers of libraries currently loaded by the user. } \usage{ sessionLibs() } \details{ This function prints a list of the libraries loaded by the user in the current session, giving just their name and version number. It obtains this information from \code{\link[utils]{sessionInfo}}. This function is not needed in an interactive \R session because the package startup messages will usually provide this information. Its main use is in an \code{\link{Sweave}} script, where it is needed because the package startup messages are not printed. } \value{ Null. } \author{ \adrian and \rolf } \seealso{ \code{\link[utils]{sessionInfo}} } \keyword{data} spatstat/man/rthin.Rd0000644000176200001440000000651513160710621014307 0ustar liggesusers\name{rthin} \alias{rthin} \title{Random Thinning} \description{ Applies independent random thinning to a point pattern. } \usage{ rthin(X, P, \dots, nsim=1, drop=TRUE) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"} or \code{"lpp"}) that will be thinned. } \item{P}{ Data giving the retention probabilities, i.e. the probability that each point in \code{X} will be retained. Either a single number, or a vector of numbers, or a \code{function(x,y)} in the \R language, or a function object (class \code{"funxy"} or \code{"linfun"}), or a pixel image (object of class \code{"im"} or \code{"linim"}). } \item{\dots}{ Additional arguments passed to \code{P}, if it is a function. } \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a point pattern. } } \value{ A point pattern (object of class \code{"ppp"} or \code{"lpp"}) if \code{nsim=1}, or a list of point patterns if \code{nsim > 1}. } \details{ In a random thinning operation, each point of the pattern \code{X} is randomly either deleted or retained (i.e. not deleted). The result is a point pattern, consisting of those points of \code{X} that were retained. Independent random thinning means that the retention/deletion of each point is independent of other points. The argument \code{P} determines the probability of \bold{retaining} each point. It may be \describe{ \item{a single number,}{so that each point will be retained with the same probability \code{P}; } \item{a vector of numbers,}{so that the \code{i}th point of \code{X} will be retained with probability \code{P[i]}; } \item{a function \code{P(x,y)},}{so that a point at a location \code{(x,y)} will be retained with probability \code{P(x,y)}; } \item{an object of class \code{"funxy"} or \code{"linfun"},}{so that points in the pattern \code{X} will be retained with probabilities \code{P(X)}; } \item{a pixel image,}{containing values of the retention probability for all locations in a region encompassing the point pattern. } } If \code{P} is a function \code{P(x,y)}, it should be \sQuote{vectorised}, that is, it should accept vector arguments \code{x,y} and should yield a numeric vector of the same length. The function may have extra arguments which are passed through the \code{\dots} argument. } \section{Reproducibility}{ The algorithm for random thinning was changed in \pkg{spatstat} version \code{1.42-3}. Set \code{spatstat.options(fastthin=FALSE)} to use the previous, slower algorithm, if it is desired to reproduce results obtained with earlier versions. } \examples{ plot(redwood, main="thinning") # delete 20\% of points Y <- rthin(redwood, 0.8) points(Y, col="green", cex=1.4) # function f <- function(x,y) { ifelse(x < 0.4, 1, 0.5) } Y <- rthin(redwood, f) # pixel image Z <- as.im(f, Window(redwood)) Y <- rthin(redwood, Z) # pattern on a linear network A <- runiflpp(30, simplenet) B <- rthin(A, 0.2) g <- function(x,y,seg,tp) { ifelse(y < 0.4, 1, 0.5) } B <- rthin(A, linfun(g, simplenet)) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{datagen} spatstat/man/istat.Rd0000644000176200001440000000304113160710621014276 0ustar liggesusers\name{istat} \alias{istat} \title{Point and Click Interface for Exploratory Analysis of Point Pattern} \description{ Compute various summary functions for a point pattern using a point-and-click interface. } \usage{ istat(x, xname) } \arguments{ \item{x}{ The spatial point pattern to be analysed. An object of class \code{"ppp"}. } \item{xname}{ Optional. Character string to use as the title of the dataset. } } \value{ \code{NULL}. } \details{ This command launches an interactive (point-and-click) interface which offers a choice of spatial summary functions that can be applied to the point pattern \code{x}. The selected summary function is computed for the point pattern \code{x} and plotted in a popup window. The selection of functions includes \code{\link{Kest}}, \code{\link{Lest}}, \code{\link{pcf}}, \code{\link{Fest}} ,\code{\link{Gest}} and \code{\link{Jest}}. For the function \code{\link{pcf}} it is possible to control the bandwidth parameter \code{bw}. There is also an option to show simulation envelopes of the summary function. } \section{Note}{ Before adjusting the bandwidth parameter \code{bw}, it is advisable to select \emph{No simulation envelopes} to save a lot of computation time. } \section{Package Dependence}{ This function requires the package \pkg{rpanel} to be loaded. } \seealso{ \code{\link{iplot}} } \examples{ if(interactive() && require(rpanel)) { istat(swedishpines) } } \author{\adrian and \rolf } \keyword{spatial} \keyword{hplot} spatstat/man/boxx.Rd0000644000176200001440000000234713160710571014146 0ustar liggesusers\name{boxx} \Rdversion{1.1} \alias{boxx} \title{ Multi-Dimensional Box } \description{ Creates an object representing a multi-dimensional box. } \usage{ boxx(..., unitname = NULL) } \arguments{ \item{\dots}{ Dimensions of the box. Vectors of length 2. } \item{unitname}{ Optional. Name of the unit of length. See Details. } } \details{ This function creates an object representing a multi-dimensional rectangular parallelepiped (box) with sides parallel to the coordinate axes. The object can be used to specify the domain of a multi-dimensional point pattern (see \code{\link{ppx}}) and in various geometrical calculations (see \code{\link{volume.boxx}}, \code{\link{diameter.boxx}}, \code{\link{eroded.volumes}}). The optional argument \code{unitname} specifies the name of the unit of length. See \code{\link{unitname}} for valid formats. } \value{ An object of class \code{"boxx"}. There is a print method for this class. } \author{ \spatstatAuthors. } \seealso{ \code{\link{ppx}}, \code{\link{volume.boxx}}, \code{\link{diameter.boxx}}, \code{\link{eroded.volumes.boxx}}. } \examples{ boxx(c(0,10),c(0,10),c(0,5),c(0,1), unitname=c("metre","metres")) } \keyword{spatial} \keyword{datagen} spatstat/man/Smooth.Rd0000644000176200001440000000147113160710621014430 0ustar liggesusers\name{Smooth} \alias{Smooth} \title{Spatial smoothing of data} \description{ Generic function to perform spatial smoothing of spatial data. } \usage{ Smooth(X, ...) } \arguments{ \item{X}{Some kind of spatial data} \item{\dots}{Arguments passed to methods.} } \details{ This generic function calls an appropriate method to perform spatial smoothing on the spatial dataset \code{X}. Methods for this function include \itemize{ \item \code{\link{Smooth.ppp}} for point patterns \item \code{\link{Smooth.msr}} for measures \item \code{\link{Smooth.fv}} for function value tables } } \seealso{ \code{\link{Smooth.ppp}}, \code{\link{Smooth.im}}, \code{\link{Smooth.msr}}, \code{\link{Smooth.fv}}. } \author{\adrian and \rolf } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat/man/cut.lpp.Rd0000644000176200001440000001001513160710571014542 0ustar liggesusers\name{cut.lpp} \alias{cut.lpp} \title{Classify Points in a Point Pattern on a Network} \description{ For a point pattern on a linear network, classify the points into distinct types according to the numerical marks in the pattern, or according to another variable. } \usage{ \method{cut}{lpp}(x, z=marks(x), ...) } \arguments{ \item{x}{ A point pattern on a linear network (object of class \code{"lpp"}). } \item{z}{ Data determining the classification. A numeric vector, a factor, a pixel image on a linear network (class \code{"linim"}), a function on a linear network (class \code{"linfun"}), a tessellation on a linear network (class \code{"lintess"}), a string giving the name of a column of marks, or one of the coordinate names \code{"x"}, \code{"y"}, \code{"seg"} or \code{"tp"}. } \item{\dots}{ Arguments passed to \code{\link{cut.default}}. They determine the breakpoints for the mapping from numerical values in \code{z} to factor values in the output. See \code{\link{cut.default}}. } } \value{ A multitype point pattern on the same linear network, that is, a point pattern object (of class \code{"lpp"}) with a \code{marks} vector that is a factor. } \details{ This function has the effect of classifying each point in the point pattern \code{x} into one of several possible types. The classification is based on the dataset \code{z}, which may be either \itemize{ \item a factor (of length equal to the number of points in \code{z}) determining the classification of each point in \code{x}. Levels of the factor determine the classification. \item a numeric vector (of length equal to the number of points in \code{z}). The range of values of \code{z} will be divided into bands (the number of bands is determined by \code{\dots}) and \code{z} will be converted to a factor using \code{\link{cut.default}}. \item a pixel image on a network (object of class \code{"linim"}). The value of \code{z} at each point of \code{x} will be used as the classifying variable. \item a function on a network (object of class \code{"linfun"}, see \code{\link{linfun}}). The value of \code{z} at each point of \code{x} will be used as the classifying variable. \item a tessellation on a network (object of class \code{"lintess"}, see \code{\link{lintess}}). Each point of \code{x} will be classified according to the tile of the tessellation into which it falls. \item a character string, giving the name of one of the columns of \code{marks(x)}, if this is a data frame. \item a character string identifying one of the coordinates: the spatial coordinates \code{"x"}, \code{"y"} or the segment identifier \code{"seg"} or the fractional coordinate along the segment, \code{"tp"}. } The default is to take \code{z} to be the vector of marks in \code{x} (or the first column in the data frame of marks of \code{x}, if it is a data frame). If the marks are numeric, then the range of values of the numerical marks is divided into several intervals, and each interval is associated with a level of a factor. The result is a marked point pattern, on the same linear network, with the same point locations as \code{x}, but with the numeric mark of each point discretised by replacing it by the factor level. This is a convenient way to transform a marked point pattern which has numeric marks into a multitype point pattern, for example to plot it or analyse it. See the examples. To select some points from \code{x}, use the subset operators \code{\link{[.lpp}} or \code{\link{subset.lpp}} instead. } \seealso{ \code{\link{cut}}, \code{\link{lpp}}, \code{\link{lintess}}, \code{\link{linfun}}, \code{\link{linim}} } \examples{ X <- runiflpp(20, simplenet) f <- linfun(function(x,y,seg,tp) { x }, simplenet) plot(cut(X, f, breaks=4)) plot(cut(X, "x", breaks=4)) plot(cut(X, "seg")) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{methods} spatstat/man/as.solist.Rd0000644000176200001440000000176013160710571015103 0ustar liggesusers\name{as.solist} \alias{as.solist} \title{ Convert List of Two-Dimensional Spatial Objects } \description{ Given a list of two-dimensional spatial objects, convert it to the class \code{"solist"}. } \usage{ as.solist(x, \dots) } \arguments{ \item{x}{ A list of objects, each representing a two-dimensional spatial dataset. } \item{\dots}{ Additional arguments passed to \code{\link{solist}}. } } \details{ This command makes the list \code{x} into an object of class \code{"solist"} (spatial object list). See \code{\link{solist}} for details. The entries in the list \code{x} should be two-dimensional spatial datasets (not necessarily of the same class). } \value{ A list, usually of class \code{"solist"}. } \seealso{ \code{\link{solist}}, \code{\link{as.anylist}}, \code{\link{solapply}}. } \examples{ x <- list(cells, density(cells)) y <- as.solist(x) } \author{\adrian \rolf and \ege } \keyword{spatial} \keyword{list} \keyword{manip} spatstat/man/methods.funxy.Rd0000644000176200001440000000336613160710621015777 0ustar liggesusers\name{methods.funxy} \alias{methods.funxy} %DoNotExport \alias{contour.funxy} \alias{persp.funxy} \alias{plot.funxy} \Rdversion{1.1} \title{ Methods for Spatial Functions } \description{ Methods for objects of the class \code{"funxy"}. } \usage{ \method{contour}{funxy}(x, \dots) \method{persp}{funxy}(x, \dots) \method{plot}{funxy}(x, \dots) } \arguments{ \item{x}{ Object of class \code{"funxy"} representing a function of \eqn{x,y} coordinates. } \item{\dots}{ Named arguments controlling the plot. See Details. } } \details{ These are methods for the generic functions \code{\link{plot}}, \code{\link{contour}} and \code{\link{persp}} for the class \code{"funxy"} of spatial functions. Objects of class \code{"funxy"} are created, for example, by the commands \code{\link{distfun}} and \code{\link{funxy}}. The \code{plot}, \code{contour} and \code{persp} methods first convert \code{x} to a pixel image object using \code{\link{as.im}}, then display it using \code{\link{plot.im}}, \code{\link{contour.im}} or \code{\link{persp.im}}. Additional arguments \code{\dots} are either passed to \code{\link{as.im.function}} to control the spatial resolution of the pixel image, or passed to \code{\link{contour.im}}, \code{\link{persp.im}} or \code{\link{plot.im}} to control the appearance of the plot. } \value{ \code{NULL}. } \author{\adrian and \rolf } \seealso{ \code{\link{funxy}}, \code{\link{distfun}}, \code{\link{as.im}}, \code{\link{plot.im}}, \code{\link{persp.im}}, \code{\link{contour.im}}, \code{\link{spatstat.options}} } \examples{ data(letterR) f <- distfun(letterR) contour(f) contour(f, W=owin(c(1,5),c(-1,4)), eps=0.1) } \keyword{spatial} \keyword{methods} spatstat/man/plot.symbolmap.Rd0000644000176200001440000000456513160710621016146 0ustar liggesusers\name{plot.symbolmap} \alias{plot.symbolmap} \title{ Plot a Graphics Symbol Map } \description{ Plot a representation of a graphics symbol map, similar to a plot legend. } \usage{ \method{plot}{symbolmap}(x, \dots, main, xlim = NULL, ylim = NULL, vertical = FALSE, side = c("bottom", "left", "top", "right"), annotate = TRUE, labelmap = NULL, add = FALSE, nsymbols = NULL) } \arguments{ \item{x}{ Graphics symbol map (object of class \code{"symbolmap"}). } \item{\dots}{ Additional graphics arguments passed to \code{\link{points}}, \code{\link{symbols}} or \code{\link{axis}}. } \item{main}{ Main title for the plot. A character string. } \item{xlim,ylim}{ Coordinate limits for the plot. Numeric vectors of length 2. } \item{vertical}{ Logical. Whether to plot the symbol map in a vertical orientation. } \item{side}{ Character string specifying the position of the text that annotates the symbols. } \item{annotate}{ Logical. Whether to annotate the symbols with labels. } \item{labelmap}{ Transformation of the labels. A function or a scale factor which will be applied to the data values corresponding to the plotted symbols. } \item{add}{ Logical value indicating whether to add the plot to the current plot (\code{add=TRUE}) or to initialise a new plot. } \item{nsymbols}{ Optional. The number of symbols that should be displayed. (This may not be exactly obeyed.) } } \details{ A graphics symbol map is an association between data values and graphical symbols. This command plots the graphics symbol map itself, in the style of a plot legend. } \value{ None. } \author{ \spatstatAuthors. } \seealso{ \code{\link{symbolmap}} to create a symbol map. \code{\link{invoke.symbolmap}} to apply the symbol map to some data and plot the resulting symbols. } \examples{ g <- symbolmap(inputs=letters[1:10], pch=11:20) plot(g) g2 <- symbolmap(range=c(-1,1), shape=function(x) ifelse(x > 0, "circles", "squares"), size=function(x) sqrt(ifelse(x > 0, x/pi, -x)), bg = function(x) ifelse(abs(x) < 1, "red", "black")) plot(g2, vertical=TRUE, side="left", col.axis="blue", cex.axis=2) } \keyword{spatial} \keyword{hplot} spatstat/man/scanLRTS.Rd0000644000176200001440000001167213160710621014614 0ustar liggesusers\name{scanLRTS} \alias{scanLRTS} \title{ Likelihood Ratio Test Statistic for Scan Test } \description{ Calculate the Likelihood Ratio Test Statistic for the Scan Test, at each spatial location. } \usage{ scanLRTS(X, r, \dots, method = c("poisson", "binomial"), baseline = NULL, case = 2, alternative = c("greater", "less", "two.sided"), saveopt = FALSE, Xmask = NULL) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"}). } \item{r}{ Radius of circle to use. A single number or a numeric vector. } \item{\dots}{ Optional. Arguments passed to \code{\link{as.mask}} to determine the spatial resolution of the computations. } \item{method}{ Either \code{"poisson"} or \code{"binomial"} specifying the type of likelihood. } \item{baseline}{ Baseline for the Poisson intensity, if \code{method="poisson"}. A pixel image or a function. } \item{case}{ Which type of point should be interpreted as a case, if \code{method="binomial"}. Integer or character string. } \item{alternative}{ Alternative hypothesis: \code{"greater"} if the alternative postulates that the mean number of points inside the circle will be greater than expected under the null. } \item{saveopt}{ Logical value indicating to save the optimal value of \code{r} at each location. } \item{Xmask}{ Internal use only. } } \details{ This command computes, for all spatial locations \code{u}, the Likelihood Ratio Test Statistic \eqn{\Lambda(u)}{Lambda(u)} for a test of homogeneity at the location \eqn{u}, as described below. The result is a pixel image giving the values of \eqn{\Lambda(u)}{Lambda(u)} at each pixel. The \bold{maximum} value of \eqn{\Lambda(u)}{Lambda(u)} over all locations \eqn{u} is the \emph{scan statistic}, which is the basis of the \emph{scan test} performed by \code{\link{scan.test}}. \itemize{ \item If \code{method="poisson"} then the test statistic is based on Poisson likelihood. The dataset \code{X} is treated as an unmarked point pattern. By default (if \code{baseline} is not specified) the null hypothesis is complete spatial randomness CSR (i.e. a uniform Poisson process). At the spatial location \eqn{u}, the alternative hypothesis is a Poisson process with one intensity \eqn{\beta_1}{beta1} inside the circle of radius \code{r} centred at \eqn{u}, and another intensity \eqn{\beta_0}{beta0} outside the circle. If \code{baseline} is given, then it should be a pixel image or a \code{function(x,y)}. The null hypothesis is an inhomogeneous Poisson process with intensity proportional to \code{baseline}. The alternative hypothesis is an inhomogeneous Poisson process with intensity \code{beta1 * baseline} inside the circle, and \code{beta0 * baseline} outside the circle. \item If \code{method="binomial"} then the test statistic is based on binomial likelihood. The dataset \code{X} must be a bivariate point pattern, i.e. a multitype point pattern with two types. The null hypothesis is that all permutations of the type labels are equally likely. The alternative hypothesis is that the circle of radius \code{r} centred at \eqn{u} has a higher proportion of points of the second type, than expected under the null hypothesis. } If \code{r} is a vector of more than one value for the radius, then the calculations described above are performed for every value of \code{r}. Then the maximum over \code{r} is taken for each spatial location \eqn{u}. The resulting pixel value of \code{scanLRTS} at a location \eqn{u} is the profile maximum of the Likelihood Ratio Test Statistic, that is, the maximum of the Likelihood Ratio Test Statistic for circles of all radii, centred at the same location \eqn{u}. If you have already performed a scan test using \code{\link{scan.test}}, the Likelihood Ratio Test Statistic can be extracted from the test result using the function \code{\link{as.im.scan.test}}. } \section{Warning: window size}{ Note that the result of \code{scanLRTS} is a pixel image on a larger window than the original window of \code{X}. The expanded window contains the centre of any circle of radius \code{r} that has nonempty intersection with the original window. } \value{ A pixel image (object of class \code{"im"}) whose pixel values are the values of the (profile) Likelihood Ratio Test Statistic at each spatial location. } \references{ Kulldorff, M. (1997) A spatial scan statistic. \emph{Communications in Statistics --- Theory and Methods} \bold{26}, 1481--1496. } \author{\adrian and \rolf } \seealso{ \code{\link{scan.test}}, \code{\link{as.im.scan.test}} } \examples{ plot(scanLRTS(redwood, 0.1, method="poisson")) sc <- scanLRTS(chorley, 1, method="binomial", case="larynx") plot(sc) scanstatchorley <- max(sc) } \keyword{htest} \keyword{spatial} spatstat/man/eval.fasp.Rd0000644000176200001440000000570613160710621015043 0ustar liggesusers\name{eval.fasp} \alias{eval.fasp} \title{Evaluate Expression Involving Function Arrays} \description{ Evaluates any expression involving one or more function arrays (\code{fasp} objects) and returns another function array. } \usage{ eval.fasp(expr, envir, dotonly=TRUE) } \arguments{ \item{expr}{ An expression involving the names of objects of class \code{"fasp"}. } \item{envir}{ Optional. The environment in which to evaluate the expression, or a named list containing \code{"fasp"} objects to be used in the expression. } \item{dotonly}{Logical. Passed to \code{\link{eval.fv}}.} } \details{ This is a wrapper to make it easier to perform pointwise calculations with the arrays of summary functions used in spatial statistics. A function array (object of class \code{"fasp"}) can be regarded as a matrix whose entries are functions. Objects of this kind are returned by the command \code{\link{alltypes}}. Suppose \code{X} is an object of class \code{"fasp"}. Then \code{eval.fasp(X+3)} effectively adds 3 to the value of every function in the array \code{X}, and returns the resulting object. Suppose \code{X} and \code{Y} are two objects of class \code{"fasp"} which are compatible (for example the arrays must have the same dimensions). Then \code{eval.fasp(X + Y)} will add the corresponding functions in each cell of the arrays \code{X} and \code{Y}, and return the resulting array of functions. Suppose \code{X} is an object of class \code{"fasp"} and \code{f} is an object of class \code{"fv"}. Then \code{eval.fasp(X + f)} will add the function \code{f} to the functions in each cell of the array \code{X}, and return the resulting array of functions. In general, \code{expr} can be any expression involving (a) the \emph{names} of objects of class \code{"fasp"} or \code{"fv"}, (b) scalar constants, and (c) functions which are vectorised. See the Examples. First \code{eval.fasp} determines which of the \emph{variable names} in the expression \code{expr} refer to objects of class \code{"fasp"}. The expression is then evaluated for each cell of the array using \code{\link{eval.fv}}. The expression \code{expr} must be vectorised. There must be at least one object of class \code{"fasp"} in the expression. All such objects must be compatible. } \value{ Another object of class \code{"fasp"}. } \seealso{ \code{\link{fasp.object}}, \code{\link{Kest}} } \examples{ # manipulating the K function K <- alltypes(amacrine, "K") # expressions involving a fasp object eval.fasp(K + 3) L <- eval.fasp(sqrt(K/pi)) # expression involving two fasp objects D <- eval.fasp(K - L) # subtracting the unmarked K function from the cross-type K functions K0 <- Kest(unmark(amacrine)) DK <- eval.fasp(K - K0) ## Use of 'envir' S <- eval.fasp(1-G, list(G=alltypes(amacrine, "G"))) } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} \keyword{programming} spatstat/man/zapsmall.im.Rd0000644000176200001440000000136213160710621015405 0ustar liggesusers\name{zapsmall.im} \alias{zapsmall.im} \title{Rounding of Pixel Values} \description{ Modifies a pixel image, identifying those pixels that have values very close to zero, and replacing the value by zero. } \usage{ zapsmall.im(x, digits) } \arguments{ \item{x}{Pixel image (object of class \code{"im"}).} \item{digits}{ Argument passed to \code{\link{zapsmall}} indicating the precision to be used. } } \details{ The function \code{\link{zapsmall}} is applied to each pixel value of the image \code{x}. } \value{ Another pixel image. } \seealso{ \code{\link{zapsmall}} } \examples{ data(cells) D <- density(cells) zapsmall.im(D) } \author{\ege and \adrian } \keyword{spatial} \keyword{methods} \keyword{univar} spatstat/man/deriv.fv.Rd0000644000176200001440000000730613160710571014711 0ustar liggesusers\name{deriv.fv} \alias{deriv.fv} \title{ Calculate Derivative of Function Values } \description{ Applies numerical differentiation to the values in selected columns of a function value table. } \usage{ \method{deriv}{fv}(expr, which = "*", ..., method=c("spline", "numeric"), kinks=NULL, periodic=FALSE, Dperiodic=periodic) } \arguments{ \item{expr}{ Function values to be differentiated. A function value table (object of class \code{"fv"}, see \code{\link{fv.object}}). } \item{which}{ Character vector identifying which columns of the table should be differentiated. Either a vector containing names of columns, or one of the wildcard strings \code{"*"} or \code{"."} explained below. } \item{\dots}{ Extra arguments passed to \code{\link[stats]{smooth.spline}} to control the differentiation algorithm, if \code{method="spline"}. } \item{method}{ Differentiation method. A character string, partially matched to either \code{"spline"} or \code{"numeric"}. } \item{kinks}{ Optional vector of \eqn{x} values where the derivative is allowed to be discontinuous. } \item{periodic}{ Logical value indicating whether the function \code{expr} is periodic. } \item{Dperiodic}{ Logical value indicating whether the resulting derivative should be a periodic function. } } \details{ This command performs numerical differentiation on the function values in a function value table (object of class \code{"fv"}). The differentiation is performed either by \code{\link[stats]{smooth.spline}} or by a naive numerical difference algorithm. The command \code{\link{deriv}} is generic. This is the method for objects of class \code{"fv"}. Differentiation is applied to every column (or to each of the selected columns) of function values in turn, using the function argument as the \eqn{x} coordinate and the selected column as the \eqn{y} coordinate. The original function values are then replaced by the corresponding derivatives. The optional argument \code{which} specifies which of the columns of function values in \code{expr} will be differentiated. The default (indicated by the wildcard \code{which="*"}) is to differentiate all function values, i.e.\ all columns except the function argument. Alternatively \code{which="."} designates the subset of function values that are displayed in the default plot. Alternatively \code{which} can be a character vector containing the names of columns of \code{expr}. If the argument \code{kinks} is given, it should be a numeric vector giving the discontinuity points of the function: the value or values of the function argument at which the function is not differentiable. Differentiation will be performed separately on intervals between the discontinuity points. If \code{periodic=TRUE} then the function \code{expr} is taken to be periodic, with period equal to the range of the function argument in \code{expr}. The resulting derivative is periodic. If \code{periodic=FALSE} but \code{Dperiodic=TRUE}, then the \emph{derivative} is assumed to be periodic. This would be appropriate if \code{expr} is the cumulative distribution function of an angular variable, for example. } \value{ Another function value table (object of class \code{"fv"}) of the same format. } \author{\adrian and \rolf } \seealso{ \code{\link{with.fv}}, \code{\link{fv.object}}, \code{\link[stats]{smooth.spline}} } \examples{ G <- Gest(cells) plot(deriv(G, which=".", spar=0.5)) A <- pairorient(redwood, 0.05, 0.15) DA <- deriv(A, spar=0.6, Dperiodic=TRUE) } \keyword{spatial} \keyword{math} \keyword{nonparametric} spatstat/man/residuals.ppm.Rd0000644000176200001440000002020113160710621015735 0ustar liggesusers\name{residuals.ppm} \alias{residuals.ppm} \title{ Residuals for Fitted Point Process Model } \description{ Given a point process model fitted to a point pattern, compute residuals. } \usage{ \method{residuals}{ppm}(object, type="raw", \dots, check=TRUE, drop=FALSE, fittedvalues=NULL, new.coef=NULL, dropcoef=FALSE, quad=NULL) } \arguments{ \item{object}{ The fitted point process model (an object of class \code{"ppm"}) for which residuals should be calculated. } \item{type}{ String indicating the type of residuals to be calculated. Current options are \code{"raw"}, \code{"inverse"}, \code{"pearson"} and \code{"score"}. A partial match is adequate. } \item{\dots}{ Ignored. } \item{check}{ Logical value indicating whether to check the internal format of \code{object}. If there is any possibility that this object has been restored from a dump file, or has otherwise lost track of the environment where it was originally computed, set \code{check=TRUE}. } \item{drop}{ Logical value determining whether to delete quadrature points that were not used to fit the model. See \code{\link{quad.ppm}} for explanation. } \item{fittedvalues}{ Vector of fitted values for the conditional intensity at the quadrature points, from which the residuals will be computed. For expert use only. } \item{new.coef}{ Optional. Numeric vector of coefficients for the model, replacing \code{coef(object)}. See the section on Modified Residuals below. } \item{dropcoef}{ Internal use only. } \item{quad}{ Optional. Data specifying how to re-fit the model. A list of arguments passed to \code{\link{quadscheme}}. See the section on Modified Residuals below. } } \value{ An object of class \code{"msr"} representing a signed measure or vector-valued measure (see \code{\link{msr}}). This object can be plotted. } \details{ This function computes several kinds of residuals for the fit of a point process model to a spatial point pattern dataset (Baddeley et al, 2005). Use \code{\link{plot.msr}} to plot the residuals directly, or \code{\link{diagnose.ppm}} to produce diagnostic plots based on these residuals. The argument \code{object} must be a fitted point process model (object of class \code{"ppm"}). Such objects are produced by the maximum pseudolikelihood fitting algorithm \code{\link{ppm}}. This fitted model object contains complete information about the original data pattern. Residuals are attached both to the data points and to some other points in the window of observation (namely, to the dummy points of the quadrature scheme used to fit the model). If the fitted model is correct, then the sum of the residuals over all (data and dummy) points in a spatial region \eqn{B} has mean zero. For further explanation, see Baddeley et al (2005). The type of residual is chosen by the argument \code{type}. Current options are \describe{ \item{\code{"raw"}:}{ the raw residuals \deqn{ r_j = z_j - w_j \lambda_j }{ r[j] = z[j] - w[j] lambda[j] } at the quadrature points \eqn{u_j}{u[j]}, where \eqn{z_j}{z[j]} is the indicator equal to 1 if \eqn{u_j}{u[j]} is a data point and 0 if \eqn{u_j}{u[j]} is a dummy point; \eqn{w_j}{w[j]} is the quadrature weight attached to \eqn{u_j}{u[j]}; and \deqn{\lambda_j = \hat\lambda(u_j,x)}{lambda[j] = lambda(u[j],x)} is the conditional intensity of the fitted model at \eqn{u_j}{u[j]}. These are the spatial analogue of the martingale residuals of a one-dimensional counting process. } \item{\code{"inverse"}:}{ the `inverse-lambda' residuals (Baddeley et al, 2005) \deqn{ r^{(I)}_j = \frac{r_j}{\lambda_j} = \frac{z_j}{\lambda_j} - w_j }{ rI[j] = r[j]/lambda[j] = z[j]/lambda[j] - w[j] } obtained by dividing the raw residuals by the fitted conditional intensity. These are a counterpart of the exponential energy marks (see \code{\link{eem}}). } \item{\code{"pearson"}:}{ the Pearson residuals (Baddeley et al, 2005) \deqn{ r^{(P)}_j = \frac{r_j}{\sqrt{\lambda_j}} = \frac{z_j}{\sqrt{\lambda_j}} - w_j \sqrt{\lambda_j} }{ rP[j] = r[j]/sqrt(lambda[j]) = z[j]/sqrt(lambda[j]) - w[j] sqrt(lambda[j]) } obtained by dividing the raw residuals by the square root of the fitted conditional intensity. The Pearson residuals are standardised, in the sense that if the model (true and fitted) is Poisson, then the sum of the Pearson residuals in a spatial region \eqn{B} has variance equal to the area of \eqn{B}. } \item{\code{"score"}:}{ the score residuals (Baddeley et al, 2005) \deqn{ r_j = (z_j - w_j \lambda_j) x_j }{ r[j] = (z[j] - w[j] lambda[j]) * x[j,] } obtained by multiplying the raw residuals \eqn{r_j}{r[j]} by the covariates \eqn{x_j}{x[j,]} for quadrature point \eqn{j}. The score residuals always sum to zero. } } The result of \code{residuals.ppm} is a measure (object of class \code{"msr"}). Use \code{\link{plot.msr}} to plot the residuals directly, or \code{\link{diagnose.ppm}} to produce diagnostic plots based on these residuals. Use \code{\link{integral.msr}} to compute the total residual. By default, the window of the measure is the same as the original window of the data. If \code{drop=TRUE} then the window is the domain of integration of the pseudolikelihood or composite likelihood. This only matters when the model \code{object} was fitted using the border correction: in that case, if \code{drop=TRUE} the window of the residuals is the erosion of the original data window by the border correction distance \code{rbord}. } \section{Modified Residuals}{ Sometimes we want to modify the calculation of residuals by using different values for the model parameters. This capability is provided by the arguments \code{new.coef} and \code{quad}. If \code{new.coef} is given, then the residuals will be computed by taking the model parameters to be \code{new.coef}. This should be a numeric vector of the same length as the vector of fitted model parameters \code{coef(object)}. If \code{new.coef} is missing and \code{quad} is given, then the model parameters will be determined by re-fitting the model using a new quadrature scheme specified by \code{quad}. Residuals will be computed for the original model \code{object} using these new parameter values. The argument \code{quad} should normally be a list of arguments in \code{name=value} format that will be passed to \code{\link{quadscheme}} (together with the original data points) to determine the new quadrature scheme. It may also be a quadrature scheme (object of class \code{"quad"}) to which the model should be fitted, or a point pattern (object of class \code{"ppp"}) specifying the \emph{dummy points} in a new quadrature scheme. } \references{ Baddeley, A., Turner, R., \ifelse{latex}{\out{M\o ller}}{Moller}, J. and Hazelton, M. (2005) Residual analysis for spatial point processes. \emph{Journal of the Royal Statistical Society, Series B} \bold{67}, 617--666. Baddeley, A., \ifelse{latex}{\out{M\o ller}}{Moller}, J. and Pakes, A.G. (2008) Properties of residuals for spatial point processes. \emph{Annals of the Institute of Statistical Mathematics} \bold{60}, 627--649. } \seealso{ \code{\link{msr}}, \code{\link{diagnose.ppm}}, \code{\link{ppm.object}}, \code{\link{ppm}} } \examples{ fit <- ppm(cells, ~x, Strauss(r=0.15)) # Pearson residuals rp <- residuals(fit, type="pe") rp # simulated data X <- rStrauss(100,0.7,0.05) # fit Strauss model fit <- ppm(X, ~1, Strauss(0.05)) res.fit <- residuals(fit) # check that total residual is 0 integral.msr(residuals(fit, drop=TRUE)) # true model parameters truecoef <- c(log(100), log(0.7)) res.true <- residuals(fit, new.coef=truecoef) } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} \keyword{methods} spatstat/man/whist.Rd0000644000176200001440000000431113160710621014311 0ustar liggesusers\name{whist} \alias{whist} \title{ Weighted Histogram } \description{ Computes the weighted histogram of a set of observations with a given set of weights. } \usage{ whist(x, breaks, weights = NULL) } \arguments{ \item{x}{ Numeric vector of observed values. } \item{breaks}{ Vector of breakpoints for the histogram. } \item{weights}{ Numeric vector of weights for the observed values. } } \details{ This low-level function computes (but does not plot) the weighted histogram of a vector of observations \code{x} using a given vector of \code{weights}. The arguments \code{x} and \code{weights} should be numeric vectors of equal length. They may include \code{NA} or infinite values. The argument \code{breaks} should be a numeric vector whose entries are strictly increasing. These values define the boundaries between the successive histogram cells. The breaks \emph{do not} have to span the range of the observations. There are \code{N-1} histogram cells, where \code{N = length(breaks)}. An observation \code{x[i]} falls in the \code{j}th cell if \code{breaks[j] <= x[i] < breaks[j+1]} (for \code{j < N-1}) or \code{breaks[j] <= x[i] <= breaks[j+1]} (for \code{j = N-1}). The weighted histogram value \code{h[j]} for the \code{j}th cell is the sum of \code{weights[i]} for all observations \code{x[i]} that fall in the cell. Note that, in contrast to the function \code{\link{hist}}, the function \code{whist} does not require the breakpoints to span the range of the observations \code{x}. Values of \code{x} that fall outside the range of \code{breaks} are handled separately; their total weight is returned as an attribute of the histogram. } \value{ A numeric vector of length \code{N-1} containing the histogram values, where \code{N = length(breaks)}. The return value also has attributes \code{"low"} and \code{"high"} giving the total weight of all observations that are less than the lowest breakpoint, or greater than the highest breakpoint, respectively. } \examples{ x <- rnorm(100) b <- seq(-1,1,length=21) w <- runif(100) whist(x,b,w) } \author{\adrian and \rolf with thanks to Peter Dalgaard. } \keyword{arith} spatstat/man/rNeymanScott.Rd0000644000176200001440000002245413160710621015611 0ustar liggesusers\name{rNeymanScott} \alias{rNeymanScott} \title{Simulate Neyman-Scott Process} \description{ Generate a random point pattern, a realisation of the Neyman-Scott cluster process. } \usage{ rNeymanScott(kappa, expand, rcluster, win = owin(c(0,1),c(0,1)), \dots, lmax=NULL, nsim=1, drop=TRUE, nonempty=TRUE, saveparents=TRUE) } \arguments{ \item{kappa}{ Intensity of the Poisson process of cluster centres. A single positive number, a function, or a pixel image. } \item{expand}{ Size of the expansion of the simulation window for generating parent points. A single non-negative number. } \item{rcluster}{ A function which generates random clusters, or other data specifying the random cluster mechanism. See Details. } \item{win}{ Window in which to simulate the pattern. An object of class \code{"owin"} or something acceptable to \code{\link{as.owin}}. } \item{\dots}{ Arguments passed to \code{rcluster}. } \item{lmax}{ Optional. Upper bound on the values of \code{kappa} when \code{kappa} is a function or pixel image. } \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a point pattern. } \item{nonempty}{ Logical. If \code{TRUE} (the default), a more efficient algorithm is used, in which parents are generated conditionally on having at least one offspring point. If \code{FALSE}, parents are generated even if they have no offspring. Both choices are valid; the default is recommended unless you need to simulate all the parent points for some other purpose. } \item{saveparents}{ Logical value indicating whether to save the locations of the parent points as an attribute. } } \value{ A point pattern (an object of class \code{"ppp"}) if \code{nsim=1}, or a list of point patterns if \code{nsim > 1}. Additionally, some intermediate results of the simulation are returned as attributes of this point pattern: see Details. } \details{ This algorithm generates a realisation of the general Neyman-Scott process, with the cluster mechanism given by the function \code{rcluster}. First, the algorithm generates a Poisson point process of \dQuote{parent} points with intensity \code{kappa} in an expanded window as explained below. Here \code{kappa} may be a single positive number, a function \code{kappa(x,y)}, or a pixel image object of class \code{"im"} (see \code{\link{im.object}}). See \code{\link{rpoispp}} for details. Second, each parent point is replaced by a random cluster of points. These clusters are combined together to yield a single point pattern, and the restriction of this pattern to the window \code{win} is then returned as the result of \code{rNeymanScott}. The expanded window consists of \code{\link{as.rectangle}(win)} extended by the amount \code{expand} in each direction. The size of the expansion is saved in the attribute \code{"expand"} and may be extracted by \code{attr(X, "expand")} where \code{X} is the generated point pattern. The argument \code{rcluster} specifies the cluster mechanism. It may be either: \itemize{ \item A \code{function} which will be called to generate each random cluster (the offspring points of each parent point). The function should expect to be called in the form \code{rcluster(x0,y0,\dots)} for a parent point at a location \code{(x0,y0)}. The return value of \code{rcluster} should specify the coordinates of the points in the cluster; it may be a list containing elements \code{x,y}, or a point pattern (object of class \code{"ppp"}). If it is a marked point pattern then the result of \code{rNeymanScott} will be a marked point pattern. \item A \code{list(mu, f)} where \code{mu} specifies the mean number of offspring points in each cluster, and \code{f} generates the random displacements (vectors pointing from the parent to the offspring). In this case, the number of offspring in a cluster is assumed to have a Poisson distribution, implying that the Neyman-Scott process is also a Cox process. The first element \code{mu} should be either a single nonnegative number (interpreted as the mean of the Poisson distribution of cluster size) or a pixel image or a \code{function(x,y)} giving a spatially varying mean cluster size (interpreted in the sense of Waagepetersen, 2007). The second element \code{f} should be a function that will be called once in the form \code{f(n)} to generate \code{n} independent and identically distributed displacement vectors (i.e. as if there were a cluster of size \code{n} with a parent at the origin \code{(0,0)}). The function should return a point pattern (object of class \code{"ppp"}) or something acceptable to \code{\link[grDevices]{xy.coords}} that specifies the coordinates of \code{n} points. } If required, the intermediate stages of the simulation (the parents and the individual clusters) can also be extracted from the return value of \code{rNeymanScott} through the attributes \code{"parents"} and \code{"parentid"}. The attribute \code{"parents"} is the point pattern of parent points. The attribute \code{"parentid"} is an integer vector specifying the parent for each of the points in the simulated pattern. Neyman-Scott models where \code{kappa} is a single number and \code{rcluster = list(mu,f)} can be fitted to data using the function \code{\link{kppm}}. } \section{Inhomogeneous Neyman-Scott Processes}{ There are several different ways of specifying a spatially inhomogeneous Neyman-Scott process: \itemize{ \item The point process of parent points can be inhomogeneous. If the argument \code{kappa} is a \code{function(x,y)} or a pixel image (object of class \code{"im"}), then it is taken as specifying the intensity function of an inhomogeneous Poisson process according to which the parent points are generated. \item The number of points in a typical cluster can be spatially varying. If the argument \code{rcluster} is a list of two elements \code{mu, f} and the first entry \code{mu} is a \code{function(x,y)} or a pixel image (object of class \code{"im"}), then \code{mu} is interpreted as the reference intensity for offspring points, in the sense of Waagepetersen (2007). For a given parent point, the offspring constitute a Poisson process with intensity function equal to \code{mu(x, y) * g(x-x0, y-y0)} where \code{g} is the probability density of the offspring displacements generated by the function \code{f}. Equivalently, clusters are first generated with a constant expected number of points per cluster: the constant is \code{mumax}, the maximum of \code{mu}. Then the offspring are randomly \emph{thinned} (see \code{\link{rthin}}) with spatially-varying retention probabilities given by \code{mu/mumax}. \item The entire mechanism for generating a cluster can be dependent on the location of the parent point. If the argument \code{rcluster} is a function, then the cluster associated with a parent point at location \code{(x0,y0)} will be generated by calling \code{rcluster(x0, y0, \dots)}. The behaviour of this function could depend on the location \code{(x0,y0)} in any fashion. } Note that if \code{kappa} is an image, the spatial domain covered by this image must be large enough to include the \emph{expanded} window in which the parent points are to be generated. This requirement means that \code{win} must be small enough so that the expansion of \code{as.rectangle(win)} is contained in the spatial domain of \code{kappa}. As a result, one may wind up having to simulate the process in a window smaller than what is really desired. In the first two cases, the intensity of the Neyman-Scott process is equal to \code{kappa * mu} if at least one of \code{kappa} or \code{mu} is a single number, and is otherwise equal to an integral involving \code{kappa}, \code{mu} and \code{f}. } \seealso{ \code{\link{rpoispp}}, \code{\link{rThomas}}, \code{\link{rGaussPoisson}}, \code{\link{rMatClust}}, \code{\link{rCauchy}}, \code{\link{rVarGamma}} } \examples{ # each cluster consist of 10 points in a disc of radius 0.2 nclust <- function(x0, y0, radius, n) { return(runifdisc(n, radius, centre=c(x0, y0))) } plot(rNeymanScott(10, 0.2, nclust, radius=0.2, n=5)) # multitype Neyman-Scott process (each cluster is a multitype process) nclust2 <- function(x0, y0, radius, n, types=c("a", "b")) { X <- runifdisc(n, radius, centre=c(x0, y0)) M <- sample(types, n, replace=TRUE) marks(X) <- M return(X) } plot(rNeymanScott(15,0.1,nclust2, radius=0.1, n=5)) } \references{ Neyman, J. and Scott, E.L. (1958) A statistical approach to problems of cosmology. \emph{Journal of the Royal Statistical Society, Series B} \bold{20}, 1--43. Waagepetersen, R. (2007) An estimating function approach to inference for inhomogeneous Neyman-Scott processes. \emph{Biometrics} \bold{63}, 252--258. } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat/man/superimpose.Rd0000644000176200001440000001667513160710621015546 0ustar liggesusers\name{superimpose} \alias{superimpose} \alias{superimpose.ppp} \alias{superimpose.splitppp} \alias{superimpose.ppplist} \alias{superimpose.psp} \alias{superimpose.default} \title{Superimpose Several Geometric Patterns} \description{ Superimpose any number of point patterns or line segment patterns. } \usage{ superimpose(\dots) \method{superimpose}{ppp}(\dots, W=NULL, check=TRUE) \method{superimpose}{psp}(\dots, W=NULL, check=TRUE) \method{superimpose}{splitppp}(\dots, W=NULL, check=TRUE) \method{superimpose}{ppplist}(\dots, W=NULL, check=TRUE) \method{superimpose}{default}(\dots) } \arguments{ \item{\dots}{ Any number of arguments, each of which represents either a point pattern or a line segment pattern or a list of point patterns. } \item{W}{ Optional. Data determining the window for the resulting pattern. Either a window (object of class \code{"owin"}, or something acceptable to \code{\link{as.owin}}), or a function which returns a window, or one of the strings \code{"convex"}, \code{"rectangle"}, \code{"bbox"} or \code{"none"}. } \item{check}{ Logical value (passed to \code{\link{ppp}} or \code{\link{psp}} as appropriate) determining whether to check the geometrical validity of the resulting pattern. } } \value{ For \code{superimpose.ppp}, a point pattern (object of class \code{"ppp"}). For \code{superimpose.default}, either a point pattern (object of class \code{"ppp"}) or a \code{list(x,y)}. For \code{superimpose.psp}, a line segment pattern (object of class \code{"psp"}). } \details{ This function is used to superimpose several geometric patterns of the same kind, producing a single pattern of the same kind. The function \code{superimpose} is generic, with methods for the class \code{ppp} of point patterns, the class \code{psp} of line segment patterns, and a default method. There is also a method for \code{lpp}, described separately in \code{\link{superimpose.lpp}}. The dispatch to a method is initially determined by the class of the \emph{first} argument in \code{\dots}. \itemize{ \item \code{default}: If the first argument is \emph{not} an object of class \code{ppp} or \code{psp}, then the default method \code{superimpose.default} is executed. This checks the class of all arguments, and dispatches to the appropriate method. Arguments of class \code{ppplist} can be handled. \item \code{ppp}: If the first \code{\dots} argument is an object of class \code{ppp} then the method \code{superimpose.ppp} is executed. All arguments in \code{\dots} must be either \code{ppp} objects or lists with components \code{x} and \code{y}. The result will be an object of class \code{ppp}. \item psp: If the first \code{\dots} argument is an object of class \code{psp} then the \code{psp} method is dispatched and all \code{\dots} arguments must be \code{psp} objects. The result is a \code{psp} object. } The patterns are \emph{not} required to have the same window of observation. The window for the superimposed pattern is controlled by the argument \code{W}. \itemize{ \item If \code{W} is a window (object of class \code{"W"} or something acceptable to \code{\link{as.owin}}) then this determines the window for the superimposed pattern. \item If \code{W} is \code{NULL}, or the character string \code{"none"}, then windows are extracted from the geometric patterns, as follows. For \code{superimpose.psp}, all arguments \code{\dots} are line segment patterns (objects of class \code{"psp"}); their observation windows are extracted; the union of these windows is computed; and this union is taken to be the window for the superimposed pattern. For \code{superimpose.ppp} and \code{superimpose.default}, the arguments \code{\dots} are inspected, and any arguments which are point patterns (objects of class \code{"ppp"}) are selected; their observation windows are extracted, and the union of these windows is taken to be the window for the superimposed point pattern. For \code{superimpose.default} if none of the arguments is of class \code{"ppp"} then no window is computed and the result of \code{superimpose} is a \code{list(x,y)}. \item If \code{W} is one of the strings \code{"convex"}, \code{"rectangle"} or \code{"bbox"} then a window for the superimposed pattern is computed from the coordinates of the points or the line segments as follows. \describe{ \item{\code{"bbox"}:}{the bounding box of the points or line segments (see \code{\link{bounding.box.xy}});} \item{\code{"convex"}:}{the Ripley-Rasson estimator of a convex window (see \code{\link{ripras}});} \item{\code{"rectangle"}:}{the Ripley-Rasson estimator of a rectangular window (using \code{\link{ripras}} with argument \code{shape="rectangle"}).} } \item If \code{W} is a function, then this function is used to compute a window for the superimposed pattern from the coordinates of the points or the line segments. The function should accept input of the form \code{list(x,y)} and is expected to return an object of class \code{"owin"}. Examples of such functions are \code{\link{ripras}} and \code{\link{bounding.box.xy}}. } The arguments \code{\dots} may be \emph{marked} patterns. The marks of each component pattern must have the same format. Numeric and character marks may be ``mixed''. If there is such mixing then the numeric marks are coerced to character in the combining process. If the mark structures are all data frames, then these data frames must have the same number of columns and identical column names. If the arguments \code{\dots} are given in the form \code{name=value}, then the \code{name}s will be used as an extra column of marks attached to the elements of the corresponding patterns. } \seealso{ \code{\link{superimpose.lpp}}, \code{\link{concatxy}}, \code{\link{quadscheme}}. } \examples{ # superimposing point patterns p1 <- runifrect(30) p2 <- runifrect(42) s1 <- superimpose(p1,p2) # Unmarked pattern. p3 <- list(x=rnorm(20),y=rnorm(20)) s2 <- superimpose(p3,p2,p1) # Default method gets called. s2a <- superimpose(p1,p2,p3) # Same as s2 except for order of points. s3 <- superimpose(clyde=p1,irving=p2) # Marked pattern; marks a factor # with levels "clyde" and "irving"; # warning given. marks(p1) <- factor(sample(LETTERS[1:3],30,TRUE)) marks(p2) <- factor(sample(LETTERS[1:3],42,TRUE)) s5 <- superimpose(clyde=p1,irving=p2) # Marked pattern with extra column marks(p2) <- data.frame(a=marks(p2),b=runif(42)) s6 <- try(superimpose(p1,p2)) # Gives an error. marks(p1) <- data.frame(a=marks(p1),b=1:30) s7 <- superimpose(p1,p2) # O.K. # how to make a 2-type point pattern with types "a" and "b" u <- superimpose(a = rpoispp(10), b = rpoispp(20)) # how to make a 2-type point pattern with types 1 and 2 u <- superimpose("1" = rpoispp(10), "2" = rpoispp(20)) # superimposing line segment patterns X <- rpoisline(10) Y <- as.psp(matrix(runif(40), 10, 4), window=owin()) Z <- superimpose(X, Y) # being unreasonable \dontrun{ if(FALSE) { crud <- try(superimpose(p1,p2,X,Y)) # Gives an error, of course! } } } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/Extract.quad.Rd0000644000176200001440000000221413160710621015516 0ustar liggesusers\name{Extract.quad} \alias{[.quad} \title{Subset of Quadrature Scheme} \description{ Extract a subset of a quadrature scheme. } \usage{ \method{[}{quad}(x, ...) } \arguments{ \item{x}{ A quadrature scheme (object of class \code{"quad"}). } \item{\dots}{ Arguments passed to \code{\link{[.ppp}} to determine the subset. } } \value{ A quadrature scheme (object of class \code{"quad"}). } \details{ This function extracts a designated subset of a quadrature scheme. The function \code{[.quad} is a method for \code{\link{[}} for the class \code{"quad"}. It extracts a designated subset of a quadrature scheme. The subset to be extracted is determined by the arguments \code{\dots} which are interpreted by \code{\link{[.ppp}}. Thus it is possible to take the subset consisting of all quadrature points that lie inside a given region, or a subset of quadrature points identified by numeric indices. } \seealso{ \code{\link{quad.object}}, \code{\link{[.ppp}}. } \examples{ Q <- quadscheme(nztrees) W <- owin(c(0,148),c(0,95)) # a subwindow Q[W] } \author{ \adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/flipxy.Rd0000644000176200001440000000214513160710621014471 0ustar liggesusers\name{flipxy} \alias{flipxy} \alias{flipxy.owin} \alias{flipxy.ppp} \alias{flipxy.psp} \alias{flipxy.im} \title{Exchange X and Y Coordinates} \description{ Exchanges the \eqn{x} and \eqn{y} coordinates in a spatial dataset. } \usage{ flipxy(X) \method{flipxy}{owin}(X) \method{flipxy}{ppp}(X) \method{flipxy}{psp}(X) \method{flipxy}{im}(X) } \arguments{ \item{X}{Spatial dataset. An object of class \code{"owin"}, \code{"ppp"}, \code{"psp"} or \code{"im"}. } } \value{ Another object of the same type, representing the result of swapping the \eqn{x} and \eqn{y} coordinates. } \details{ This function swaps the \eqn{x} and \eqn{y} coordinates of a spatial dataset. This could also be performed using the command \code{\link{affine}}, but \code{flipxy} is faster. The function \code{\link{flipxy}} is generic, with methods for the classes of objects listed above. } \seealso{ \code{\link{affine}}, \code{\link{reflect}}, \code{\link{rotate}}, \code{\link{shift}} } \examples{ data(cells) X <- flipxy(cells) } \author{\adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/linim.Rd0000644000176200001440000000601013160710621014261 0ustar liggesusers\name{linim} \alias{linim} \title{ Create Pixel Image on Linear Network } \description{ Creates an object of class \code{"linim"} that represents a pixel image on a linear network. } \usage{ linim(L, Z, \dots, restrict=TRUE, df=NULL) } \arguments{ \item{L}{ Linear network (object of class \code{"linnet"}). } \item{Z}{ Pixel image (object of class \code{"im"}). } \item{\dots}{Ignored.} \item{restrict}{ Advanced use only. Logical value indicating whether to ensure that all pixels in \code{Z} which do not lie on the network \code{L} have pixel value \code{NA}. This condition must be satisfied, but if you set \code{restrict=FALSE} it will not be checked, and the code will run faster. } \item{df}{ Advanced use only. Data frame giving full details of the mapping between the pixels of \code{Z} and the lines of \code{L}. See Details. } } \details{ This command creates an object of class \code{"linim"} that represents a pixel image defined on a linear network. Typically such objects are used to represent the result of smoothing or model-fitting on the network. Most users will not need to call \code{linim} directly. The argument \code{L} is a linear network (object of class \code{"linnet"}). It gives the exact spatial locations of the line segments of the network, and their connectivity. The argument \code{Z} is a pixel image object of class \code{"im"} that gives a pixellated approximation of the function values. For increased efficiency, advanced users may specify the optional argument \code{df}. This is a data frame giving the precomputed mapping between the pixels of \code{Z} and the line segments of \code{L}. It should have columns named \code{xc, yc} containing the coordinates of the pixel centres, \code{x,y} containing the projections of these pixel centres onto the linear network, \code{mapXY} identifying the line segment on which each projected point lies, and \code{tp} giving the parametric position of \code{(x,y)} along the segment. } \value{ Object of class \code{"linim"} that also inherits the class \code{"im"}. There is a special method for plotting this class. } \author{ \adrian } \seealso{ \code{\link{plot.linim}}, \code{\link{linnet}}, \code{\link{eval.linim}}, \code{\link{Math.linim}}, \code{\link{im}}. } \examples{ Z <- as.im(function(x,y) {x-y}, Frame(simplenet)) X <- linim(simplenet, Z) X } \references{ Ang, Q.W. (2010) \emph{Statistical methodology for events on a network}. Master's thesis, School of Mathematics and Statistics, University of Western Australia. Ang, Q.W., Baddeley, A. and Nair, G. (2012) Geometrically corrected second-order analysis of events on a linear network, with applications to ecology and criminology. \emph{Scandinavian Journal of Statistics} \bold{39}, 591--617. McSwiggan, G., Nair, M.G. and Baddeley, A. (2012) Fitting Poisson point process models to events on a linear network. Manuscript in preparation. } \keyword{spatial} spatstat/man/stratrand.Rd0000644000176200001440000000403113160710621015154 0ustar liggesusers\name{stratrand} \alias{stratrand} \title{Stratified random point pattern} \description{ Generates a \dQuote{stratified random} pattern of points in a window, by dividing the window into rectangular tiles and placing \code{k} random points in each tile. } \usage{ stratrand(window, nx, ny, k = 1) } \arguments{ \item{window}{A window. An object of class \code{\link{owin}}, or data in any format acceptable to \code{\link{as.owin}()}. } \item{nx}{Number of tiles in each row. } \item{ny}{Number of tiles in each column. } \item{k}{Number of random points to generate in each tile. } } \value{ A list with two components \code{x} and \code{y}, which are numeric vectors giving the coordinates of the random points. } \details{ The bounding rectangle of \code{window} is divided into a regular \eqn{nx \times ny}{nx * ny} grid of rectangular tiles. In each tile, \code{k} random points are generated independently with a uniform distribution in that tile. Note that some of these grid points may lie outside the window, if \code{window} is not of type \code{"rectangle"}. The function \code{\link{inside.owin}} can be used to select those grid points which do lie inside the window. See the examples. This function is useful in creating dummy points for quadrature schemes (see \code{\link{quadscheme}}) as well as in simulating random point patterns. } \seealso{ \code{\link{quad.object}}, \code{\link{quadscheme}}, \code{\link{inside.owin}}, \code{\link{gridcentres}} } \examples{ w <- unit.square() xy <- stratrand(w, 10, 10) \dontrun{ plot(w) points(xy) } # polygonal boundary bdry <- list(x=c(0.1,0.3,0.7,0.4,0.2), y=c(0.1,0.1,0.5,0.7,0.3)) w <- owin(c(0,1), c(0,1), poly=bdry) xy <- stratrand(w, 10, 10, 3) \dontrun{ plot(w) points(xy) } # determine which grid points are inside polygon ok <- inside.owin(xy$x, xy$y, w) \dontrun{ plot(w) points(xy$x[ok], xy$y[ok]) } } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat/man/linnet.Rd0000644000176200001440000000620713160710621014452 0ustar liggesusers\name{linnet} \alias{linnet} \title{ Create a Linear Network } \description{ Creates an object of class \code{"linnet"} representing a network of line segments. } \usage{ linnet(vertices, m, edges, sparse=FALSE, warn=TRUE) } \arguments{ \item{vertices}{ Point pattern (object of class \code{"ppp"}) specifying the vertices of the network. } \item{m}{ Adjacency matrix. A matrix or sparse matrix of logical values equal to \code{TRUE} when the corresponding vertices are joined by a line. (Specify either \code{m} or \code{edges}.) } \item{edges}{ Edge list. A two-column matrix of integers, specifying all pairs of vertices that should be joined by an edge. (Specify either \code{m} or \code{edges}.) } \item{sparse}{ Optional. Logical value indicating whether to use a sparse matrix representation of the network. See Details. } \item{warn}{ Logical value indicating whether to issue a warning if the resulting network is not connected. } } \details{ An object of class \code{"linnet"} represents a network of straight line segments in two dimensions. The function \code{linnet} creates such an object from the minimal information: the spatial location of each vertex (endpoint, crossing point or meeting point of lines) and information about which vertices are joined by an edge. If \code{sparse=FALSE} (the default), the algorithm will compute and store various properties of the network, including the adjacency matrix \code{m} and a matrix giving the shortest-path distances between each pair of vertices in the network. This is more efficient for small datasets. However it can require large amounts of memory and can take a long time to execute. If \code{sparse=TRUE}, then the shortest-path distances will not be computed, and the network adjacency matrix \code{m} will be stored as a sparse matrix. This saves a lot of time and memory when creating the linear network. If the argument \code{edges} is given, then it will also determine the \emph{ordering} of the line segments when they are stored or extracted. For example, \code{edges[i,]} corresponds to \code{as.psp(L)[i]}. } \value{ Object of class \code{"linnet"} representing the linear network. } \author{ Ang Qi Wei \email{aqw07398@hotmail.com} and \adrian } \seealso{ \code{\link[spatstat.data]{simplenet}} for an example of a linear network. \code{\link[spatstat:methods.linnet]{methods.linnet}} for methods applicable to \code{linnet} objects. Special tools: \code{\link{thinNetwork}}, \code{\link{insertVertices}}, \code{\link{connected.linnet}}, \code{\link{lixellate}}. \code{\link{delaunayNetwork}} for the Delaunay triangulation as a network. \code{\link{ppp}}, \code{\link{psp}}. } \examples{ # letter 'A' specified by adjacency matrix v <- ppp(x=(-2):2, y=3*c(0,1,2,1,0), c(-3,3), c(-1,7)) m <- matrix(FALSE, 5,5) for(i in 1:4) m[i,i+1] <- TRUE m[2,4] <- TRUE m <- m | t(m) letterA <- linnet(v, m) plot(letterA) # letter 'A' specified by edge list edg <- cbind(1:4, 2:5) edg <- rbind(edg, c(2,4)) letterA <- linnet(v, edges=edg) } \keyword{spatial} spatstat/man/harmonise.owin.Rd0000644000176200001440000000367713160710621016131 0ustar liggesusers\name{harmonise.owin} \alias{harmonise.owin} \alias{harmonize.owin} \title{Make Windows Compatible} \description{ Convert several windows to a common pixel raster. } \usage{ \method{harmonise}{owin}(\dots) \method{harmonize}{owin}(\dots) } \arguments{ \item{\dots}{ Any number of windows (objects of class \code{"owin"}) or data which can be converted to windows by \code{\link{as.owin}}. } } \details{ This function makes any number of windows compatible, by converting them all to a common pixel grid. This only has an effect if one of the windows is a binary mask. If all the windows are rectangular or polygonal, they are returned unchanged. The command \code{\link{harmonise}} is generic. This is the method for objects of class \code{"owin"}. Each argument must be a window (object of class \code{"owin"}), or data that can be converted to a window by \code{\link{as.owin}}. The common pixel grid is determined by inspecting all the windows in the argument list, computing the bounding box of all the windows, then finding the binary mask with the finest spatial resolution, and extending its pixel grid to cover the bounding box. The return value is a list with entries corresponding to the input arguments. If the arguments were named (\code{name=value}) then the return value also carries these names. If you just want to determine the appropriate pixel resolution, without converting the windows, use \code{\link{commonGrid}}. } \value{ A list of windows, of length equal to the number of arguments \code{\dots}. The list belongs to the class \code{"solist"}. } \author{\adrian and \rolf } \examples{ harmonise(X=letterR, Y=grow.rectangle(Frame(letterR), 0.2), Z=as.mask(letterR, eps=0.1), V=as.mask(letterR, eps=0.07)) } \seealso{ \code{\link{commonGrid}}, \code{\link{harmonise.im}}, \code{\link{as.owin}} } \keyword{spatial} \keyword{manip} spatstat/man/is.empty.Rd0000644000176200001440000000224213160710621014724 0ustar liggesusers\name{is.empty} \alias{is.empty} \alias{is.empty.owin} \alias{is.empty.ppp} \alias{is.empty.psp} \alias{is.empty.default} \title{Test Whether An Object Is Empty} \description{ Checks whether the argument is an empty window, an empty point pattern, etc. } \usage{ is.empty(x) \method{is.empty}{owin}(x) \method{is.empty}{ppp}(x) \method{is.empty}{psp}(x) \method{is.empty}{default}(x) } \arguments{ \item{x}{ A window (object of class \code{"owin"}), a point pattern (object of class \code{"ppp"}), or a line segment pattern (object of class \code{"psp"}). } } \details{ This function tests whether the object \code{x} represents an empty spatial object, such as an empty window, a point pattern with zero points, or a line segment pattern with zero line segments. An empty window can be obtained as the output of \code{\link{intersect.owin}}, \code{\link{erosion}}, \code{\link{opening}}, \code{\link{complement.owin}} and some other operations. An empty point pattern or line segment pattern can be obtained as the result of simulation. } \value{ Logical value. } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/weighted.median.Rd0000644000176200001440000000260613160710621016214 0ustar liggesusers\name{weighted.median} \alias{weighted.median} \alias{weighted.quantile} \alias{weighted.var} \title{ Weighted Median, Quantiles or Variance } \description{ Compute the median, quantiles or variance of a set of numbers which have weights associated with them. } \usage{ weighted.median(x, w, na.rm = TRUE) weighted.quantile(x, w, probs=seq(0,1,0.25), na.rm = TRUE) weighted.var(x, w, na.rm = TRUE) } \arguments{ \item{x}{ Data values. A vector of numeric values, for which the median or quantiles are required. } \item{w}{ Weights. A vector of nonnegative numbers, of the same length as \code{x}. } \item{probs}{ Probabilities for which the quantiles should be computed. A numeric vector of values between 0 and 1. } \item{na.rm}{ Logical. Whether to ignore \code{NA} values. } } \details{ The \code{i}th observation \code{x[i]} is treated as having a weight proportional to \code{w[i]}. The weighted median is a value \code{m} such that the total weight of data to the left of \code{m} is equal to half the total weight. If there is no such value, linear interpolation is performed. } \value{ A numeric value or vector. } \author{ \adrian. } \seealso{ \code{\link[stats]{quantile}}, \code{\link[stats]{median}}. } \examples{ x <- 1:20 w <- runif(20) weighted.median(x, w) weighted.quantile(x, w) weighted.var(x, w) } \keyword{math} spatstat/man/rounding.Rd0000644000176200001440000000417413160710621015007 0ustar liggesusers\name{rounding} \alias{rounding} \alias{rounding.default} \alias{rounding.ppp} \alias{rounding.pp3} \alias{rounding.ppx} \title{ Detect Numerical Rounding } \description{ Given a numeric vector, or an object containing numeric spatial coordinates, determine whether the values have been rounded to a certain number of decimal places. } \usage{ rounding(x) \method{rounding}{default}(x) \method{rounding}{ppp}(x) \method{rounding}{pp3}(x) \method{rounding}{ppx}(x) } \arguments{ \item{x}{ A numeric vector, or an object containing numeric spatial coordinates. } } \details{ For a numeric vector \code{x}, this function determines whether the values have been rounded to a certain number of decimal places. \itemize{ \item If the entries of \code{x} are not all integers, then \code{rounding(x)} returns the smallest number of digits \code{d} after the decimal point such that \code{\link[base]{round}(x, digits=d)} is identical to \code{x}. For example if \code{rounding(x) = 2} then the entries of \code{x} are rounded to 2 decimal places, and are multiples of 0.01. \item If all the entries of \code{x} are integers, then \code{rounding(x)} returns \code{-d}, where \code{d} is the smallest number of digits \emph{before} the decimal point such that \code{\link[base]{round}(x, digits=-d)} is identical to \code{x}. For example if \code{rounding(x) = -3} then the entries of \code{x} are multiples of 1000. If \code{rounding(x) = 0} then the entries of \code{x} are integers but not multiples of 10. \item If all entries of \code{x} are equal to 0, the rounding is not determined, and a value of \code{NULL} is returned. } For a point pattern (object of class \code{"ppp"}) or similar object \code{x} containing numeric spatial coordinates, this procedure is applied to the spatial coordinates. } \value{ An integer. } \author{ \adrian and \rolf } \seealso{ \code{\link{round.ppp}} } \examples{ rounding(c(0.1, 0.3, 1.2)) rounding(c(1940, 1880, 2010)) rounding(0) rounding(cells) } \keyword{spatial} \keyword{math} spatstat/man/clusterset.Rd0000644000176200001440000001131013160710571015351 0ustar liggesusers\name{clusterset} \alias{clusterset} \title{ Allard-Fraley Estimator of Cluster Feature } \description{ Detect high-density features in a spatial point pattern using the (unrestricted) Allard-Fraley estimator. } \usage{ clusterset(X, what=c("marks", "domain"), \dots, verbose=TRUE, fast=FALSE, exact=!fast) } \arguments{ \item{X}{ A dimensional spatial point pattern (object of class \code{"ppp"}). } \item{what}{ Character string or character vector specifying the type of result. See Details. } \item{verbose}{ Logical value indicating whether to print progress reports. } \item{fast}{ Logical. If \code{FALSE} (the default), the Dirichlet tile areas will be computed exactly using polygonal geometry, so that the optimal choice of tiles will be computed exactly. If \code{TRUE}, the Dirichlet tile areas will be approximated using pixel counting, so the optimal choice will be approximate. } \item{exact}{ Logical. If \code{TRUE}, the Allard-Fraley estimator of the domain will be computed exactly using polygonal geometry. If \code{FALSE}, the Allard-Fraley estimator of the domain will be approximated by a binary pixel mask. The default is initially set to \code{FALSE}. } \item{\dots}{ Optional arguments passed to \code{\link{as.mask}} to control the pixel resolution if \code{exact=FALSE}. } } \details{ Allard and Fraley (1997) developed a technique for recognising features of high density in a spatial point pattern in the presence of random clutter. This algorithm computes the \emph{unrestricted} Allard-Fraley estimator. The Dirichlet (Voronoi) tessellation of the point pattern \code{X} is computed. The smallest \code{m} Dirichlet cells are selected, where the number \code{m} is determined by a maximum likelihood criterion. \itemize{ \item If \code{fast=FALSE} (the default), the areas of the tiles of the Dirichlet tessellation will be computed exactly using polygonal geometry. This ensures that the optimal selection of tiles is computed exactly. \item If \code{fast=TRUE}, the Dirichlet tile areas will be approximated by counting pixels. This is faster, and is usually correct (depending on the pixel resolution, which is controlled by the arguments \code{\dots}). } The type of result depends on the character vector \code{what}. \itemize{ \item If \code{what="marks"} the result is the point pattern \code{X} with a vector of marks labelling each point with a value \code{yes} or \code{no} depending on whether the corresponding Dirichlet cell is selected by the Allard-Fraley estimator. In other words each point of \code{X} is labelled as either a cluster point or a non-cluster point. \item If \code{what="domain"}, the result is the Allard-Fraley estimator of the cluster feature set, which is the union of all the selected Dirichlet cells, represented as a window (object of class \code{"owin"}). \item If \code{what=c("marks", "domain")} the result is a list containing both of the results described above. } Computation of the Allard-Fraley set estimator depends on the argument \code{exact}. \itemize{ \item If \code{exact=TRUE} (the default), the Allard-Fraley set estimator will be computed exactly using polygonal geometry. The result is a polygonal window. \item If \code{exact=FALSE}, the Allard-Fraley set estimator will be approximated by a binary pixel mask. This is faster than the exact computation. The result is a binary mask. } } \value{ If \code{what="marks"}, a multitype point pattern (object of class \code{"ppp"}). If \code{what="domain"}, a window (object of class \code{"owin"}). If \code{what=c("marks", "domain")} (the default), a list consisting of a multitype point pattern and a window. } \references{ Allard, D. and Fraley, C. (1997) Nonparametric maximum likelihood estimation of features in spatial point processes using Voronoi tessellation. \emph{Journal of the American Statistical Association} \bold{92}, 1485--1493. } \author{ \adrian and \rolf } \seealso{ \code{\link{nnclean}}, \code{\link{sharpen}} } \examples{ opa <- par(mfrow=c(1,2)) W <- grow.rectangle(as.rectangle(letterR), 1) X <- superimpose(runifpoint(300, letterR), runifpoint(50, W), W=W) plot(W, main="clusterset(X, 'm')") plot(clusterset(X, "marks", fast=TRUE), add=TRUE, chars=c(1, 3), cols=1:2) plot(letterR, add=TRUE) plot(W, main="clusterset(X, 'd')") plot(clusterset(X, "domain", exact=FALSE), add=TRUE) plot(letterR, add=TRUE) par(opa) } \keyword{spatial} \keyword{classif} spatstat/man/edit.ppp.Rd0000644000176200001440000000312113160710571014700 0ustar liggesusers\name{edit.ppp} \alias{edit.ppp} \alias{edit.psp} \alias{edit.im} \title{ Invoke Text Editor on Spatial Data } \description{ Invokes a text editor allowing the user to inspect and change entries in a spatial dataset. } \usage{ \method{edit}{ppp}(name, \dots) \method{edit}{psp}(name, \dots) \method{edit}{im}(name, \dots) } \arguments{ \item{name}{ A spatial dataset (object of class \code{"ppp"}, \code{"psp"} or \code{"im"}). } \item{\dots}{ Other arguments passed to \code{\link[utils]{edit.data.frame}}. } } \details{ The function \code{\link[utils]{edit}} is generic. These functions are methods for spatial objects of class \code{"ppp"}, \code{"psp"} and \code{"im"}. The spatial dataset \code{name} is converted to a data frame or array, and the text editor is invoked. The user can change the values of spatial coordinates or marks of the points in a point pattern, or the coordinates or marks of the segments in a segment pattern, or the pixel values in an image. The names of the columns of marks can also be edited. If \code{name} is a pixel image, it is converted to a matrix and displayed in the same spatial orientation as if the image had been plotted. Note that the original object \code{name} is not changed; the function returns the edited dataset. } \value{ Object of the same kind as \code{name} containing the edited data. } \author{ \adrian \rolf and \ege } \seealso{ \code{\link[utils]{edit.data.frame}}, \code{\link{edit.hyperframe}} } \examples{ if(interactive()) Z <- edit(cells) } \keyword{spatial} \keyword{manip} spatstat/man/text.ppp.Rd0000644000176200001440000000321613160710621014740 0ustar liggesusers\name{text.ppp} \alias{text.ppp} \alias{text.lpp} \alias{text.psp} \title{ Add Text Labels to Spatial Pattern } \description{ Plots a text label at the location of each point in a spatial point pattern, or each object in a spatial pattern of objects. } \usage{ \method{text}{ppp}(x, \dots) \method{text}{lpp}(x, \dots) \method{text}{psp}(x, \dots) } \arguments{ \item{x}{ A spatial point pattern (object of class \code{"ppp"}), a point pattern on a linear network (class \code{"lpp"}) or a spatial pattern of line segments (class \code{"psp"}). } \item{\dots}{ Additional arguments passed to \code{\link[graphics]{text.default}}. } } \details{ These functions are methods for the generic \code{\link{text}}. A text label is added to the existing plot, at the location of each point in the point pattern \code{x}, or near the location of the midpoint of each segment in the segment pattern \code{x}. Additional arguments \code{\dots} are passed to \code{\link[graphics]{text.default}} and may be used to control the placement of the labels relative to the point locations, and the size and colour of the labels. By default, the labels are the serial numbers 1 to \code{n}, where \code{n} is the number of points or segments in \code{x}. This can be changed by specifying the argument \code{labels}, which should be a vector of length \code{n}. } \value{ Null. } \author{ \spatstatAuthors. } \seealso{ \code{\link[graphics]{text.default}} } \examples{ plot(cells) text(cells, pos=2) plot(Frame(cells)) text(cells, cex=1.5) S <- as.psp(simplenet) plot(S) text(S) } \keyword{spatial} \keyword{hplot} spatstat/man/pcf3est.Rd0000644000176200001440000001021613160710621014523 0ustar liggesusers\name{pcf3est} \Rdversion{1.1} \alias{pcf3est} \title{ Pair Correlation Function of a Three-Dimensional Point Pattern } \description{ Estimates the pair correlation function from a three-dimensional point pattern. } \usage{ pcf3est(X, ..., rmax = NULL, nrval = 128, correction = c("translation", "isotropic"), delta=NULL, adjust=1, biascorrect=TRUE) } \arguments{ \item{X}{ Three-dimensional point pattern (object of class \code{"pp3"}). } \item{\dots}{ Ignored. } \item{rmax}{ Optional. Maximum value of argument \eqn{r} for which \eqn{g_3(r)}{g3(r)} will be estimated. } \item{nrval}{ Optional. Number of values of \eqn{r} for which \eqn{g_3(r)}{g3(r)} will be estimated. } \item{correction}{ Optional. Character vector specifying the edge correction(s) to be applied. See Details. } \item{delta}{ Optional. Half-width of the Epanechnikov smoothing kernel. } \item{adjust}{ Optional. Adjustment factor for the default value of \code{delta}. } \item{biascorrect}{ Logical value. Whether to correct for underestimation due to truncation of the kernel near \eqn{r=0}. } } \details{ For a stationary point process \eqn{\Phi}{Phi} in three-dimensional space, the pair correlation function is \deqn{ g_3(r) = \frac{K_3'(r)}{4\pi r^2} }{ g3(r) = K3'(r)/(4 * pi * r^2) } where \eqn{K_3'}{K3'} is the derivative of the three-dimensional \eqn{K}-function (see \code{\link{K3est}}). The three-dimensional point pattern \code{X} is assumed to be a partial realisation of a stationary point process \eqn{\Phi}{Phi}. The distance between each pair of distinct points is computed. Kernel smoothing is applied to these distance values (weighted by an edge correction factor) and the result is renormalised to give the estimate of \eqn{g_3(r)}{g3(r)}. The available edge corrections are: \describe{ \item{\code{"translation"}:}{ the Ohser translation correction estimator (Ohser, 1983; Baddeley et al, 1993) } \item{\code{"isotropic"}:}{ the three-dimensional counterpart of Ripley's isotropic edge correction (Ripley, 1977; Baddeley et al, 1993). } } Kernel smoothing is performed using the Epanechnikov kernel with half-width \code{delta}. If \code{delta} is missing, the default is to use the rule-of-thumb \eqn{\delta = 0.26/\lambda^{1/3}}{delta = 0.26/lambda^(1/3)} where \eqn{\lambda = n/v}{lambda = n/v} is the estimated intensity, computed from the number \eqn{n} of data points and the volume \eqn{v} of the enclosing box. This default value of \code{delta} is multiplied by the factor \code{adjust}. The smoothing estimate of the pair correlation \eqn{g_3(r)}{g3(r)} is typically an underestimate when \eqn{r} is small, due to truncation of the kernel at \eqn{r=0}. If \code{biascorrect=TRUE}, the smoothed estimate is approximately adjusted for this bias. This is advisable whenever the dataset contains a sufficiently large number of points. } \value{ A function value table (object of class \code{"fv"}) that can be plotted, printed or coerced to a data frame containing the function values. Additionally the value of \code{delta} is returned as an attribute of this object. } \references{ Baddeley, A.J, Moyeed, R.A., Howard, C.V. and Boyde, A. (1993) Analysis of a three-dimensional point pattern with replication. \emph{Applied Statistics} \bold{42}, 641--668. Ohser, J. (1983) On estimators for the reduced second moment measure of point processes. \emph{Mathematische Operationsforschung und Statistik, series Statistics}, \bold{14}, 63 -- 71. Ripley, B.D. (1977) Modelling spatial patterns (with discussion). \emph{Journal of the Royal Statistical Society, Series B}, \bold{39}, 172 -- 212. } \author{ \adrian and Rana Moyeed. } \seealso{ \code{\link{K3est}}, \code{\link{pcf}} } \examples{ X <- rpoispp3(250) Z <- pcf3est(X) Zbias <- pcf3est(X, biascorrect=FALSE) if(interactive()) { opa <- par(mfrow=c(1,2)) plot(Z, ylim.covers=c(0, 1.2)) plot(Zbias, ylim.covers=c(0, 1.2)) par(opa) } attr(Z, "delta") } \keyword{spatial} \keyword{nonparametric} spatstat/man/affine.lpp.Rd0000644000176200001440000000507713160710571015213 0ustar liggesusers\name{affine.lpp} \alias{affine.lpp} \alias{shift.lpp} \alias{rotate.lpp} \alias{rescale.lpp} \alias{scalardilate.lpp} \title{Apply Geometrical Transformations to Point Pattern on a Linear Network} \description{ Apply geometrical transformations to a point pattern on a linear network. } \usage{ \method{affine}{lpp}(X, mat=diag(c(1,1)), vec=c(0,0), \dots) \method{shift}{lpp}(X, vec=c(0,0), \dots, origin=NULL) \method{rotate}{lpp}(X, angle=pi/2, \dots, centre=NULL) \method{scalardilate}{lpp}(X, f, \dots) \method{rescale}{lpp}(X, s, unitname) } \arguments{ \item{X}{Point pattern on a linear network (object of class \code{"lpp"}).} \item{mat}{Matrix representing a linear transformation.} \item{vec}{Vector of length 2 representing a translation.} \item{angle}{Rotation angle in radians.} \item{f}{Scalar dilation factor.} \item{s}{ Unit conversion factor: the new units are \code{s} times the old units. } \item{\dots}{ Arguments passed to other methods. } \item{origin}{ Character string determining a location that will be shifted to the origin. Options are \code{"centroid"}, \code{"midpoint"} and \code{"bottomleft"}. Partially matched. } \item{centre}{ Centre of rotation. Either a vector of length 2, or a character string (partially matched to \code{"centroid"}, \code{"midpoint"} or \code{"bottomleft"}). The default is the coordinate origin \code{c(0,0)}. } \item{unitname}{ Optional. New name for the unit of length. A value acceptable to the function \code{\link{unitname<-}} } } \value{ Another point pattern on a linear network (object of class \code{"lpp"}) representing the result of applying the geometrical transformation. } \details{ These functions are methods for the generic functions \code{\link{affine}}, \code{\link{shift}}, \code{\link{rotate}}, \code{\link{rescale}} and \code{\link{scalardilate}} applicable to objects of class \code{"lpp"}. All of these functions perform geometrical transformations on the object \code{X}, except for \code{rescale}, which simply rescales the units of length. } \seealso{ \code{\link{lpp}}. Generic functions \code{\link{affine}}, \code{\link{shift}}, \code{\link{rotate}}, \code{\link{scalardilate}}, \code{\link{rescale}}. } \examples{ X <- rpoislpp(2, simplenet) U <- rotate(X, pi) stretch <- diag(c(2,3)) Y <- affine(X, mat=stretch) shear <- matrix(c(1,0,0.6,1),ncol=2, nrow=2) Z <- affine(X, mat=shear, vec=c(0, 1)) } \author{\adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/eval.linim.Rd0000644000176200001440000000546213160710621015221 0ustar liggesusers\name{eval.linim} \alias{eval.linim} \title{Evaluate Expression Involving Pixel Images on Linear Network} \description{ Evaluates any expression involving one or more pixel images on a linear network, and returns a pixel image on the same linear network. } \usage{ eval.linim(expr, envir, harmonize=TRUE) } \arguments{ \item{expr}{An expression in the \R language, involving the names of objects of class \code{"linim"}.} \item{envir}{Optional. The environment in which to evaluate the expression.} \item{harmonize}{ Logical. Whether to resolve inconsistencies between the pixel grids. } } \details{ This function a wrapper to make it easier to perform pixel-by-pixel calculations. It is one of several functions whose names begin with \code{eval} which work on objects of different types. This particular function is designed to work with objects of class \code{"linim"} which represent pixel images on a linear network. Suppose \code{X} is a pixel image on a linear network (object of class \code{"linim"}. Then \code{eval.linim(X+3)} will add 3 to the value of every pixel in \code{X}, and return the resulting pixel image on the same linear network. Suppose \code{X} and \code{Y} are two pixel images on the same linear network, with compatible pixel dimensions. Then \code{eval.linim(X + Y)} will add the corresponding pixel values in \code{X} and \code{Y}, and return the resulting pixel image on the same linear network. In general, \code{expr} can be any expression in the R language involving (a) the \emph{names} of pixel images, (b) scalar constants, and (c) functions which are vectorised. See the Examples. First \code{eval.linim} determines which of the \emph{variable names} in the expression \code{expr} refer to pixel images. Each such name is replaced by a matrix containing the pixel values. The expression is then evaluated. The result should be a matrix; it is taken as the matrix of pixel values. The expression \code{expr} must be vectorised. There must be at least one linear pixel image in the expression. All images must have compatible dimensions. If \code{harmonize=TRUE}, images that have incompatible dimensions will be resampled so that they are compatible. If \code{harmonize=FALSE}, images that are incompatible will cause an error. } \value{ An image object of class \code{"linim"}. } \seealso{ \code{\link{eval.im}}, \code{\link{linim}} } \examples{ M <- as.mask.psp(as.psp(simplenet)) Z <- as.im(function(x,y) {x-y}, W=M) X <- linim(simplenet, Z) X Y <- linfun(function(x,y,seg,tp){y^2+x}, simplenet) Y <- as.linim(Y) eval.linim(X + 3) eval.linim(X - Y) eval.linim(abs(X - Y)) Z <- eval.linim(sin(X * pi) + Y) } \author{ \adrian and \rolf } \keyword{spatial} \keyword{manip} \keyword{programming} spatstat/man/lgcp.estK.Rd0000644000176200001440000002154713160710621015017 0ustar liggesusers\name{lgcp.estK} \alias{lgcp.estK} \title{Fit a Log-Gaussian Cox Point Process by Minimum Contrast} \description{ Fits a log-Gaussian Cox point process model to a point pattern dataset by the Method of Minimum Contrast. } \usage{ lgcp.estK(X, startpar=c(var=1,scale=1), covmodel=list(model="exponential"), lambda=NULL, q = 1/4, p = 2, rmin = NULL, rmax = NULL, ...) } \arguments{ \item{X}{ Data to which the model will be fitted. Either a point pattern or a summary statistic. See Details. } \item{startpar}{ Vector of starting values for the parameters of the log-Gaussian Cox process model. } \item{covmodel}{ Specification of the covariance model for the log-Gaussian field. See Details. } \item{lambda}{ Optional. An estimate of the intensity of the point process. } \item{q,p}{ Optional. Exponents for the contrast criterion. } \item{rmin, rmax}{ Optional. The interval of \eqn{r} values for the contrast criterion. } \item{\dots}{ Optional arguments passed to \code{\link[stats]{optim}} to control the optimisation algorithm. See Details. } } \details{ This algorithm fits a log-Gaussian Cox point process (LGCP) model to a point pattern dataset by the Method of Minimum Contrast, using the K function of the point pattern. The shape of the covariance of the LGCP must be specified: the default is the exponential covariance function, but other covariance models can be selected. The argument \code{X} can be either \describe{ \item{a point pattern:}{An object of class \code{"ppp"} representing a point pattern dataset. The \eqn{K} function of the point pattern will be computed using \code{\link{Kest}}, and the method of minimum contrast will be applied to this. } \item{a summary statistic:}{An object of class \code{"fv"} containing the values of a summary statistic, computed for a point pattern dataset. The summary statistic should be the \eqn{K} function, and this object should have been obtained by a call to \code{\link{Kest}} or one of its relatives. } } The algorithm fits a log-Gaussian Cox point process (LGCP) model to \code{X}, by finding the parameters of the LGCP model which give the closest match between the theoretical \eqn{K} function of the LGCP model and the observed \eqn{K} function. For a more detailed explanation of the Method of Minimum Contrast, see \code{\link{mincontrast}}. The model fitted is a stationary, isotropic log-Gaussian Cox process (\ifelse{latex}{\out{M\o ller}}{Moller} and Waagepetersen, 2003, pp. 72-76). To define this process we start with a stationary Gaussian random field \eqn{Z} in the two-dimensional plane, with constant mean \eqn{\mu}{mu} and covariance function \eqn{C(r)}. Given \eqn{Z}, we generate a Poisson point process \eqn{Y} with intensity function \eqn{\lambda(u) = \exp(Z(u))}{lambda(u) = exp(Z(u))} at location \eqn{u}. Then \eqn{Y} is a log-Gaussian Cox process. The \eqn{K}-function of the LGCP is \deqn{ K(r) = \int_0^r 2\pi s \exp(C(s)) \, {\rm d}s. }{ K(r) = integral from 0 to r of (2 * pi * s * exp(C(s))) ds. } The intensity of the LGCP is \deqn{ \lambda = \exp(\mu + \frac{C(0)}{2}). }{ lambda= exp(mu + C(0)/2). } The covariance function \eqn{C(r)} is parametrised in the form \deqn{ C(r) = \sigma^2 c(r/\alpha) }{ C(r) = sigma^2 * c(-r/alpha) } where \eqn{\sigma^2}{sigma^2} and \eqn{\alpha}{alpha} are parameters controlling the strength and the scale of autocorrelation, respectively, and \eqn{c(r)} is a known covariance function determining the shape of the covariance. The strength and scale parameters \eqn{\sigma^2}{sigma^2} and \eqn{\alpha}{alpha} will be estimated by the algorithm as the values \code{var} and \code{scale} respectively. The template covariance function \eqn{c(r)} must be specified as explained below. In this algorithm, the Method of Minimum Contrast is first used to find optimal values of the parameters \eqn{\sigma^2}{sigma^2} and \eqn{\alpha}{alpha^2}. Then the remaining parameter \eqn{\mu}{mu} is inferred from the estimated intensity \eqn{\lambda}{lambda}. The template covariance function \eqn{c(r)} is specified using the argument \code{covmodel}. This should be of the form \code{list(model="modelname", \dots)} where \code{modelname} is a string identifying the template model as explained below, and \code{\dots} are optional arguments of the form \code{tag=value} giving the values of parameters controlling the \emph{shape} of the template model. The default is the exponential covariance \eqn{c(r) = e^{-r}}{c(r) = e^(-r)} so that the scaled covariance is \deqn{ C(r) = \sigma^2 e^{-r/\alpha}. }{ C(r) = sigma^2 * exp(-r/alpha). } To determine the template model, the string \code{"modelname"} will be prefixed by \code{"RM"} and the code will search for a function of this name in the \pkg{RandomFields} package. For a list of available models see \code{\link[RandomFields]{RMmodel}} in the \pkg{RandomFields} package. For example the Matern covariance with exponent \eqn{\nu=0.3}{nu = 0.3} is specified by \code{covmodel=list(model="matern", nu=0.3)} corresponding to the function \code{RMmatern} in the \pkg{RandomFields} package. If the argument \code{lambda} is provided, then this is used as the value of \eqn{\lambda}{lambda}. Otherwise, if \code{X} is a point pattern, then \eqn{\lambda}{lambda} will be estimated from \code{X}. If \code{X} is a summary statistic and \code{lambda} is missing, then the intensity \eqn{\lambda}{lambda} cannot be estimated, and the parameter \eqn{\mu}{mu} will be returned as \code{NA}. The remaining arguments \code{rmin,rmax,q,p} control the method of minimum contrast; see \code{\link{mincontrast}}. The optimisation algorithm can be controlled through the additional arguments \code{"..."} which are passed to the optimisation function \code{\link[stats]{optim}}. For example, to constrain the parameter values to a certain range, use the argument \code{method="L-BFGS-B"} to select an optimisation algorithm that respects box constraints, and use the arguments \code{lower} and \code{upper} to specify (vectors of) minimum and maximum values for each parameter. } \value{ An object of class \code{"minconfit"}. There are methods for printing and plotting this object. It contains the following main components: \item{par }{Vector of fitted parameter values.} \item{fit }{Function value table (object of class \code{"fv"}) containing the observed values of the summary statistic (\code{observed}) and the theoretical values of the summary statistic computed from the fitted model parameters. } } \note{ This function is considerably slower than \code{\link{lgcp.estpcf}} because of the computation time required for the integral in the \eqn{K}-function. Computation can be accelerated, at the cost of less accurate results, by setting \code{spatstat.options(fastK.lgcp=TRUE)}. } \references{ \ifelse{latex}{\out{M\o ller}}{Moller}, J, Syversveen, A. and Waagepetersen, R. (1998) Log Gaussian Cox Processes. \emph{Scandinavian Journal of Statistics} \bold{25}, 451--482. \ifelse{latex}{\out{M\o ller}}{Moller}, J. and Waagepetersen, R. (2003). Statistical Inference and Simulation for Spatial Point Processes. Chapman and Hall/CRC, Boca Raton. Waagepetersen, R. (2007) An estimating function approach to inference for inhomogeneous Neyman-Scott processes. \emph{Biometrics} \bold{63}, 252--258. } \author{ Rasmus Waagepetersen \email{rw@math.auc.dk}. Adapted for \pkg{spatstat} by \adrian Further modifications by Rasmus Waagepetersen and Shen Guochun, and by \ege. } \seealso{ \code{\link{lgcp.estpcf}} for alternative method of fitting LGCP. \code{\link{matclust.estK}}, \code{\link{thomas.estK}} for other models. \code{\link{mincontrast}} for the generic minimum contrast fitting algorithm, including important parameters that affect the accuracy of the fit. \code{\link[RandomFields]{RMmodel}} in the \pkg{RandomFields} package, for covariance function models. \code{\link{Kest}} for the \eqn{K} function. } \examples{ if(interactive()) { u <- lgcp.estK(redwood) } else { # slightly faster - better starting point u <- lgcp.estK(redwood, c(var=1, scale=0.1)) } u plot(u) \testonly{ if(require(RandomFields)) { K <- Kest(redwood, r=seq(0, 0.1, length=9)) op <- spatstat.options(fastK.lgcp=TRUE) lgcp.estK(K, covmodel=list(model="matern", nu=0.3), control=list(maxit=2)) spatstat.options(op) } } if(FALSE) { ## takes several minutes! lgcp.estK(redwood, covmodel=list(model="matern", nu=0.3)) } } \keyword{spatial} \keyword{models} spatstat/man/will.expand.Rd0000644000176200001440000000171413160710621015404 0ustar liggesusers\name{will.expand} \alias{will.expand} \title{ Test Expansion Rule } \description{ Determines whether an expansion rule will actually expand the window or not. } \usage{ will.expand(x) } \arguments{ \item{x}{ Expansion rule. An object of class \code{"rmhexpand"}. } } \details{ An object of class \code{"rmhexpand"} describes a rule for expanding a simulation window. See \code{\link{rmhexpand}} for details. One possible expansion rule is to do nothing, i.e. not to expand the window. This command inspects the expansion rule \code{x} and determines whether it will or will not actually expand the window. It returns \code{TRUE} if the window will be expanded. } \value{ Logical value. } \author{\adrian and \rolf } \seealso{ \code{\link{rmhexpand}}, \code{\link{expand.owin}} } \examples{ x <- rmhexpand(distance=0.2) y <- rmhexpand(area=1) will.expand(x) will.expand(y) } \keyword{spatial} \keyword{manip} spatstat/man/rQuasi.Rd0000644000176200001440000000233313160710621014421 0ustar liggesusers\name{rQuasi} \alias{rQuasi} \title{ Generate Quasirandom Point Pattern in Given Window } \description{ Generates a quasirandom pattern of points in any two-dimensional window. } \usage{ rQuasi(n, W, type = c("Halton", "Hammersley"), ...) } \arguments{ \item{n}{ Maximum number of points to be generated. } \item{W}{ Window (object of class \code{"owin"}) in which to generate the points. } \item{type}{ String identifying the quasirandom generator. } \item{\dots}{ Arguments passed to the quasirandom generator. } } \details{ This function generates a quasirandom point pattern, using the quasirandom sequence generator \code{\link{Halton}} or \code{\link{Hammersley}} as specified. If \code{W} is a rectangle, exactly \code{n} points will be generated. If \code{W} is not a rectangle, \code{n} points will be generated in the containing rectangle \code{as.rectangle(W)}, and only the points lying inside \code{W} will be retained. } \value{ Point pattern (object of class \code{"ppp"}) inside the window \code{W}. } \seealso{ \code{\link{Halton}} } \examples{ plot(rQuasi(256, letterR)) } \author{\adrian , \rolf and \ege. } \keyword{spatial} \keyword{datagen} spatstat/man/Replace.im.Rd0000644000176200001440000000721013160710621015133 0ustar liggesusers\name{Replace.im} \alias{[<-.im} \title{Reset Values in Subset of Image} \description{ Reset the values in a subset of a pixel image. } \usage{ \method{[}{im}(x, i, j) <- value } \arguments{ \item{x}{ A two-dimensional pixel image. An object of class \code{"im"}. } \item{i}{ Object defining the subregion or subset to be replaced. Either a spatial window (an object of class \code{"owin"}), or a pixel image with logical values, or a point pattern (an object of class \code{"ppp"}), or any type of index that applies to a matrix, or something that can be converted to a point pattern by \code{\link{as.ppp}} (using the window of \code{x}). } \item{j}{ An integer or logical vector serving as the column index if matrix indexing is being used. Ignored if \code{i} is appropriate to some sort of replacement \emph{other than} matrix indexing. } \item{value}{ Vector, matrix, factor or pixel image containing the replacement values. Short vectors will be recycled. } } \value{ The image \code{x} with the values replaced. } \details{ This function changes some of the pixel values in a pixel image. The image \code{x} must be an object of class \code{"im"} representing a pixel image defined inside a rectangle in two-dimensional space (see \code{\link{im.object}}). The subset to be changed is determined by the arguments \code{i,j} according to the following rules (which are checked in this order): \enumerate{ \item \code{i} is a spatial object such as a window, a pixel image with logical values, or a point pattern; or \item \code{i,j} are indices for the matrix \code{as.matrix(x)}; or \item \code{i} can be converted to a point pattern by \code{\link{as.ppp}(i, W=Window(x))}, and \code{i} is not a matrix. } If \code{i} is a spatial window (an object of class \code{"owin"}), the values of the image inside this window are changed. If \code{i} is a point pattern (an object of class \code{"ppp"}), then the values of the pixel image at the points of this pattern are changed. If \code{i} does not satisfy any of the conditions above, then the algorithm tries to interpret \code{i,j} as indices for the matrix \code{as.matrix(x)}. Either \code{i} or \code{j} may be missing or blank. If none of the conditions above are met, and if \code{i} is not a matrix, then \code{i} is converted into a point pattern by \code{\link{as.ppp}(i, W=Window(x))}. Again the values of the pixel image at the points of this pattern are changed. } \section{Warning}{ If you have a 2-column matrix containing the \eqn{x,y} coordinates of point locations, then to prevent this being interpreted as an array index, you should convert it to a \code{data.frame} or to a point pattern. } \seealso{ \code{\link{im.object}}, \code{\link{[.im}}, \code{\link{[}}, \code{\link{ppp.object}}, \code{\link{as.ppp}}, \code{\link{owin.object}} } \examples{ # make up an image X <- setcov(unit.square()) plot(X) # a rectangular subset W <- owin(c(0,0.5),c(0.2,0.8)) X[W] <- 2 plot(X) # a polygonal subset data(letterR) R <- affine(letterR, diag(c(1,1)/2), c(-2,-0.7)) X[R] <- 3 plot(X) # a point pattern P <- rpoispp(20) X[P] <- 10 plot(X) # change pixel value at a specific location X[list(x=0.1,y=0.2)] <- 7 # matrix indexing --- single vector index X[1:2570] <- 10 plot(X) # matrix indexing using double indices X[1:257,1:10] <- 5 plot(X) # matrix indexing using a matrix of indices X[cbind(1:257,1:257)] <- 10 X[cbind(257:1,1:257)] <- 10 plot(X) } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/clickbox.Rd0000644000176200001440000000246113160710571014761 0ustar liggesusers\name{clickbox} \alias{clickbox} \title{Interactively Define a Rectangle} \description{ Allows the user to specify a rectangle by point-and-click in the display. } \usage{ clickbox(add=TRUE, \dots) } \arguments{ \item{add}{ Logical value indicating whether to create a new plot (\code{add=FALSE}) or draw over the existing plot (\code{add=TRUE}). } \item{\dots}{ Graphics arguments passed to \code{\link[graphics]{polygon}} to plot the box. } } \value{ A window (object of class \code{"owin"}) representing the selected rectangle. } \details{ This function allows the user to create a rectangular window by interactively clicking on the screen display. The user is prompted to point the mouse at any desired locations for two corners of the rectangle, and click the left mouse button to add each point. The return value is a window (object of class \code{"owin"}) representing the rectangle. This function uses the \R command \code{\link[graphics]{locator}} to input the mouse clicks. It only works on screen devices such as \sQuote{X11}, \sQuote{windows} and \sQuote{quartz}. } \seealso{ \code{\link{clickpoly}}, \code{\link{clickppp}}, \code{\link{clickdist}}, \code{\link[graphics]{locator}} } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{iplot} spatstat/man/border.Rd0000644000176200001440000000342513160710571014441 0ustar liggesusers\name{border} \alias{border} \title{Border Region of a Window} \description{ Computes the border region of a window, that is, the region lying within a specified distance of the boundary of a window. } \usage{ border(w, r, outside=FALSE, ...) } \arguments{ \item{w}{A window (object of class \code{"owin"}) or something acceptable to \code{\link{as.owin}}. } \item{r}{Numerical value.} \item{outside}{Logical value determining whether to compute the border outside or inside \code{w}.} \item{\dots}{ Optional arguments passed to \code{\link{erosion}} (if \code{outside=FALSE}) or to \code{\link{dilation}} (if \code{outside=TRUE}). } } \value{ A window (object of class \code{"owin"}). } \details{ By default (if \code{outside=FALSE}), the border region is the subset of \code{w} lying within a distance \code{r} of the boundary of \code{w}. It is computed by eroding \code{w} by the distance \code{r} (using \code{\link{erosion}}) and subtracting this eroded window from the original window \code{w}. If \code{outside=TRUE}, the border region is the set of locations outside \code{w} lying within a distance \code{r} of \code{w}. It is computed by dilating \code{w} by the distance \code{r} (using \code{\link{dilation}}) and subtracting the original window \code{w} from the dilated window. } \author{\adrian and \rolf } \seealso{ \code{\link{erosion}}, \code{\link{dilation}} } \examples{ # rectangle u <- unit.square() border(u, 0.1) border(u, 0.1, outside=TRUE) # polygon \testonly{opa <- spatstat.options(npixel=32)} data(letterR) plot(letterR) plot(border(letterR, 0.1), add=TRUE) plot(border(letterR, 0.1, outside=TRUE), add=TRUE) \testonly{spatstat.options(opa)} } \keyword{spatial} \keyword{math} spatstat/man/bw.scott.Rd0000644000176200001440000000256513160710571014733 0ustar liggesusers\name{bw.scott} \alias{bw.scott} \title{ Scott's Rule for Bandwidth Selection for Kernel Density } \description{ Use Scott's rule of thumb to determine the smoothing bandwidth for the kernel estimation of point process intensity. } \usage{ bw.scott(X) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"}). } } \details{ This function selects a bandwidth \code{sigma} for the kernel estimator of point process intensity computed by \code{\link{density.ppp}}. The bandwidth \eqn{\sigma}{\sigma} is computed by the rule of thumb of Scott (1992, page 152). It is very fast to compute. This rule is designed for density estimation, and typically produces a larger bandwidth than \code{\link{bw.diggle}}. It is useful for estimating gradual trend. } \value{ A numerical vector of two elements giving the selected bandwidths in the \code{x} and \code{y} directions. } \seealso{ \code{\link{density.ppp}}, \code{\link{bw.diggle}}, \code{\link{bw.ppl}}, \code{\link{bw.frac}}. } \examples{ data(lansing) attach(split(lansing)) b <- bw.scott(hickory) b \donttest{ plot(density(hickory, b)) } } \references{ Scott, D.W. (1992) \emph{Multivariate Density Estimation. Theory, Practice and Visualization}. New York: Wiley. } \author{\adrian and \rolf } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat/man/is.lpp.Rd0000644000176200001440000000107113160710621014360 0ustar liggesusers\name{is.lpp} \alias{is.lpp} \title{Test Whether An Object Is A Point Pattern on a Linear Network} \description{ Checks whether its argument is a point pattern on a linear network (object of class \code{"lpp"}). } \usage{ is.lpp(x) } \arguments{ \item{x}{Any object.} } \details{ This function tests whether the object \code{x} is a point pattern object of class \code{"lpp"}. } \value{ \code{TRUE} if \code{x} is a point pattern of class \code{"lpp"}, otherwise \code{FALSE}. } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/residuals.dppm.Rd0000644000176200001440000000215013160710621016104 0ustar liggesusers\name{residuals.dppm} \alias{residuals.dppm} \title{ Residuals for Fitted Determinantal Point Process Model } \description{ Given a determinantal point process model fitted to a point pattern, compute residuals. } \usage{ \method{residuals}{dppm}(object, \dots) } \arguments{ \item{object}{ The fitted determinatal point process model (an object of class \code{"dppm"}) for which residuals should be calculated. } \item{\dots}{ Arguments passed to \code{\link{residuals.ppm}}. } } \value{ An object of class \code{"msr"} representing a signed measure or vector-valued measure (see \code{\link{msr}}). This object can be plotted. } \details{ This function extracts the intensity component of the model using \code{\link{as.ppm}} and then applies \code{\link{residuals.ppm}} to compute the residuals. Use \code{\link{plot.msr}} to plot the residuals directly. } \seealso{ \code{\link{msr}}, \code{\link{dppm}} } \examples{ fit <- dppm(swedishpines ~ x, dppGauss()) rr <- residuals(fit) } \author{ \adrian \rolf and \ege } \keyword{spatial} \keyword{models} \keyword{methods} spatstat/man/methods.rhohat.Rd0000644000176200001440000000651613160710621016113 0ustar liggesusers\name{methods.rhohat} \alias{methods.rhohat} %DoNotExport \alias{print.rhohat} \alias{plot.rhohat} \alias{predict.rhohat} \alias{simulate.rhohat} \title{ Methods for Intensity Functions of Spatial Covariate } \description{ These are methods for the class \code{"rhohat"}. } \usage{ \method{print}{rhohat}(x, ...) \method{plot}{rhohat}(x, ..., do.rug=TRUE) \method{predict}{rhohat}(object, ..., relative=FALSE, what=c("rho", "lo", "hi", "se")) \method{simulate}{rhohat}(object, nsim=1, ..., drop=TRUE) } \arguments{ \item{x,object}{ An object of class \code{"rhohat"} representing a smoothed estimate of the intensity function of a point process. } \item{\dots}{ Arguments passed to other methods. } \item{do.rug}{ Logical value indicating whether to plot the observed values of the covariate as a rug plot along the horizontal axis. } \item{relative}{ Logical value indicating whether to compute the estimated point process intensity (\code{relative=FALSE}) or the relative risk (\code{relative=TRUE}) in the case of a relative risk estimate. } \item{nsim}{ Number of simulations to be generated. } \item{drop}{ Logical value indicating what to do when \code{nsim=1}. If \code{drop=TRUE} (the default), a point pattern is returned. If \code{drop=FALSE}, a list of length 1 containing a point pattern is returned. } \item{what}{ Optional character string (partially matched) specifying which value should be calculated: either the function estimate (\code{what="rho"}, the default), the lower or upper end of the confidence interval (\code{what="lo"} or \code{what="hi"}) or the standard error (\code{what="se"}). } } \details{ These functions are methods for the generic commands \code{\link{print}}, \code{\link[graphics]{plot}}, \code{\link[stats]{predict}} and \code{\link[stats]{simulate}} for the class \code{"rhohat"}. An object of class \code{"rhohat"} is an estimate of the intensity of a point process, as a function of a given spatial covariate. See \code{\link{rhohat}}. The method \code{plot.rhohat} displays the estimated function \eqn{\rho}{rho} using \code{\link{plot.fv}}, and optionally adds a \code{\link{rug}} plot of the observed values of the covariate. The method \code{predict.rhohat} computes a pixel image of the intensity \eqn{\rho(Z(u))}{rho(Z(u))} at each spatial location \eqn{u}, where \eqn{Z} is the spatial covariate. The method \code{simulate.rhohat} invokes \code{predict.rhohat} to determine the predicted intensity, and then simulates a Poisson point process with this intensity. } \value{ For \code{predict.rhohat} the value is a pixel image (object of class \code{"im"} or \code{"linim"}). For \code{simulate.rhohat} the value is a point pattern (object of class \code{"ppp"} or \code{"lpp"}). For other functions, the value is \code{NULL}. } \author{ \adrian } \seealso{ \code{\link{rhohat}} } \examples{ X <- rpoispp(function(x,y){exp(3+3*x)}) rho <- rhohat(X, function(x,y){x}) rho plot(rho) Y <- predict(rho) plot(Y) plot(simulate(rho), add=TRUE) # fit <- ppm(X, ~x) rho <- rhohat(fit, "y") opa <- par(mfrow=c(1,2)) plot(predict(rho)) plot(predict(rho, relative=TRUE)) par(opa) plot(predict(rho, what="se")) } \keyword{spatial} \keyword{methods} spatstat/man/clarkevans.Rd0000644000176200001440000001061513160710571015314 0ustar liggesusers\name{clarkevans} \alias{clarkevans} \title{Clark and Evans Aggregation Index} \description{ Computes the Clark and Evans aggregation index \eqn{R} for a spatial point pattern. } \usage{ clarkevans(X, correction=c("none", "Donnelly", "cdf"), clipregion=NULL) } \arguments{ \item{X}{ A spatial point pattern (object of class \code{"ppp"}). } \item{correction}{ Character vector. The type of edge correction(s) to be applied. } \item{clipregion}{ Clipping region for the guard area correction. A window (object of class \code{"owin"}). See Details. } } \details{ The Clark and Evans (1954) aggregation index \eqn{R} is a crude measure of clustering or ordering of a point pattern. It is the ratio of the observed mean nearest neighbour distance in the pattern to that expected for a Poisson point process of the same intensity. A value \eqn{R>1} suggests ordering, while \eqn{R<1} suggests clustering. Without correction for edge effects, the value of \code{R} will be positively biased. Edge effects arise because, for a point of \code{X} close to the edge of the window, the true nearest neighbour may actually lie outside the window. Hence observed nearest neighbour distances tend to be larger than the true nearest neighbour distances. The argument \code{correction} specifies an edge correction or several edge corrections to be applied. It is a character vector containing one or more of the options \code{"none"}, \code{"Donnelly"}, \code{"guard"} and \code{"cdf"} (which are recognised by partial matching). These edge corrections are: \describe{ \item{"none":}{ No edge correction is applied. } \item{"Donnelly":}{ Edge correction of Donnelly (1978), available for rectangular windows only. The theoretical expected value of mean nearest neighbour distance under a Poisson process is adjusted for edge effects by the edge correction of Donnelly (1978). The value of \eqn{R} is the ratio of the observed mean nearest neighbour distance to this adjusted theoretical mean. } \item{"guard":}{ Guard region or buffer area method. The observed mean nearest neighbour distance for the point pattern \code{X} is re-defined by averaging only over those points of \code{X} that fall inside the sub-window \code{clipregion}. } \item{"cdf":}{ Cumulative Distribution Function method. The nearest neighbour distance distribution function \eqn{G(r)} of the stationary point process is estimated by \code{\link{Gest}} using the Kaplan-Meier type edge correction. Then the mean of the distribution is calculated from the cdf. } } Alternatively \code{correction="all"} selects all options. If the argument \code{clipregion} is given, then the selected edge corrections will be assumed to include \code{correction="guard"}. To perform a test based on the Clark-Evans index, see \code{\link{clarkevans.test}}. } \value{ A numeric value, or a numeric vector with named components \item{naive}{\eqn{R} without edge correction} \item{Donnelly}{\eqn{R} using Donnelly edge correction} \item{guard}{\eqn{R} using guard region} \item{cdf}{\eqn{R} using cdf method} (as selected by \code{correction}). The value of the \code{Donnelly} component will be \code{NA} if the window of \code{X} is not a rectangle. } \references{ Clark, P.J. and Evans, F.C. (1954) Distance to nearest neighbour as a measure of spatial relationships in populations \emph{Ecology} \bold{35}, 445--453. Donnelly, K. (1978) Simulations to determine the variance and edge-effect of total nearest neighbour distance. In I. Hodder (ed.) \emph{Simulation studies in archaeology}, Cambridge/New York: Cambridge University Press, pp 91--95. } \author{ John Rudge \email{rudge@esc.cam.ac.uk} with modifications by \adrian } \seealso{ \code{\link{clarkevans.test}}, \code{\link{hopskel}}, \code{\link{nndist}}, \code{\link{Gest}} } \examples{ # Example of a clustered pattern clarkevans(redwood) # Example of an ordered pattern clarkevans(cells) # Random pattern X <- rpoispp(100) clarkevans(X) # How to specify a clipping region clip1 <- owin(c(0.1,0.9),c(0.1,0.9)) clip2 <- erosion(Window(cells), 0.1) clarkevans(cells, clipregion=clip1) clarkevans(cells, clipregion=clip2) } \keyword{spatial} \keyword{nonparametric} spatstat/man/as.data.frame.im.Rd0000644000176200001440000000164413160710571016175 0ustar liggesusers\name{as.data.frame.im} \alias{as.data.frame.im} \title{Convert Pixel Image to Data Frame} \description{ Convert a pixel image to a data frame } \usage{ \method{as.data.frame}{im}(x, ...) } \arguments{ \item{x}{A pixel image (object of class \code{"im"}).} \item{\dots}{Further arguments passed to \code{\link[base:as.data.frame]{as.data.frame.default}} to determine the row names and other features.} } \details{ This function takes the pixel image \code{x} and returns a data frame with three columns containing the pixel coordinates and the pixel values. The data frame entries are automatically sorted in increasing order of the \code{x} coordinate (and in increasing order of \code{y} within \code{x}). } \value{ A data frame. } \examples{ # artificial image Z <- setcov(square(1)) Y <- as.data.frame(Z) head(Y) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{methods} spatstat/man/summary.owin.Rd0000644000176200001440000000146313160710621015630 0ustar liggesusers\name{summary.owin} \alias{summary.owin} \title{Summary of a Spatial Window} \description{ Prints a useful description of a window object. } \usage{ \method{summary}{owin}(object, \dots) } \arguments{ \item{object}{Window (object of class \code{"owin"}).} \item{\dots}{Ignored.} } \details{ A useful description of the window \code{object} is printed. This is a method for the generic function \code{\link{summary}}. } \seealso{ \code{\link{summary}}, \code{\link{summary.ppp}}, \code{\link{print.owin}} } \examples{ summary(owin()) # the unit square data(demopat) W <- Window(demopat) # weird polygonal window summary(W) # describes it summary(as.mask(W)) # demonstrates current pixel resolution } \author{\adrian and \rolf } \keyword{spatial} \keyword{methods} spatstat/man/relrisk.Rd0000644000176200001440000000253313160710621014632 0ustar liggesusers\name{relrisk} \alias{relrisk} \title{ Estimate of Spatially-Varying Relative Risk } \description{ Generic command to estimate the spatially-varying probability of each type of point, or the ratios of such probabilities. } \usage{ relrisk(X, \dots) } \arguments{ \item{X}{ Either a point pattern (class \code{"ppp"}) or a fitted point process model (class \code{"ppm"}) from which the probabilities will be estimated. } \item{\dots}{ Additional arguments appropriate to the method. } } \details{ In a point pattern containing several different types of points, we may be interested in the spatially-varying probability of each possible type, or the relative risks which are the ratios of such probabilities. The command \code{\link{relrisk}} is generic and can be used to estimate relative risk in different ways. The function \code{\link{relrisk.ppp}} is the method for point pattern datasets. It computes \emph{nonparametric} estimates of relative risk by kernel smoothing. The function \code{\link{relrisk.ppm}} is the method for fitted point process models (class \code{"ppm"}). It computes \emph{parametric} estimates of relative risk, using the fitted model. } \seealso{ \code{\link{relrisk.ppp}}, \code{\link{relrisk.ppm}}. } \author{\adrian \rolf and \ege } \keyword{spatial} spatstat/man/affine.tess.Rd0000644000176200001440000000550613160710571015373 0ustar liggesusers\name{affine.tess} \alias{reflect.tess} \alias{shift.tess} \alias{rotate.tess} \alias{scalardilate.tess} \alias{affine.tess} \title{Apply Geometrical Transformation To Tessellation} \description{ Apply various geometrical transformations of the plane to each tile in a tessellation. } \usage{ \method{reflect}{tess}(X) \method{shift}{tess}(X, \dots) \method{rotate}{tess}(X, angle=pi/2, \dots, centre=NULL) \method{scalardilate}{tess}(X, f, \dots) \method{affine}{tess}(X, mat=diag(c(1,1)), vec=c(0,0), \dots) } \arguments{ \item{X}{Tessellation (object of class \code{"tess"}).} \item{angle}{ Rotation angle in radians (positive values represent anticlockwise rotations). } \item{mat}{Matrix representing a linear transformation.} \item{vec}{Vector of length 2 representing a translation.} \item{f}{Positive number giving scale factor.} \item{\dots}{Arguments passed to other methods.} \item{centre}{ Centre of rotation. Either a vector of length 2, or a character string (partially matched to \code{"centroid"}, \code{"midpoint"} or \code{"bottomleft"}). The default is the coordinate origin \code{c(0,0)}. } } \value{ Another tessellation (of class \code{"tess"}) representing the result of applying the geometrical transformation. } \details{ These are method for the generic functions \code{\link{reflect}}, \code{\link{shift}}, \code{\link{rotate}}, \code{\link{scalardilate}}, \code{\link{affine}} for tessellations (objects of class \code{"tess"}). The individual tiles of the tessellation, and the window containing the tessellation, are all subjected to the same geometrical transformation. The transformations are performed by the corresponding method for windows (class \code{"owin"}) or images (class \code{"im"}) depending on the type of tessellation. If the argument \code{origin} is used in \code{shift.tess} it is interpreted as applying to the window containing the tessellation. Then all tiles are shifted by the same vector. } \seealso{ Generic functions \code{\link{reflect}}, \code{\link{shift}}, \code{\link{rotate}}, \code{\link{scalardilate}}, \code{\link{affine}}. Methods for windows: \code{\link{reflect.default}}, \code{\link{shift.owin}}, \code{\link{rotate.owin}}, \code{\link{scalardilate.owin}}, \code{\link{affine.owin}}. Methods for images: \code{\link{reflect.im}}, \code{\link{shift.im}}, \code{\link{rotate.im}}, \code{\link{scalardilate.im}}, \code{\link{affine.im}}. } \examples{ live <- interactive() if(live) { H <- hextess(letterR, 0.2) plot(H) plot(reflect(H)) plot(rotate(H, pi/3)) } else H <- hextess(letterR, 0.6) # shear transformation shear <- matrix(c(1,0,0.6,1),2,2) sH <- affine(H, shear) if(live) plot(sH) } \author{\adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/as.hyperframe.Rd0000644000176200001440000000503313160710571015725 0ustar liggesusers\name{as.hyperframe} \Rdversion{1.1} \alias{as.hyperframe} \alias{as.hyperframe.default} \alias{as.hyperframe.data.frame} \alias{as.hyperframe.hyperframe} \alias{as.hyperframe.listof} \alias{as.hyperframe.anylist} \title{ Convert Data to Hyperframe } \description{ Converts data from any suitable format into a hyperframe. } \usage{ as.hyperframe(x, \dots) \method{as.hyperframe}{default}(x, \dots) \method{as.hyperframe}{data.frame}(x, \dots, stringsAsFactors=FALSE) \method{as.hyperframe}{hyperframe}(x, \dots) \method{as.hyperframe}{listof}(x, \dots) \method{as.hyperframe}{anylist}(x, \dots) } \arguments{ \item{x}{ Data in some other format. } \item{\dots}{ Optional arguments passed to \code{\link{hyperframe}}. } \item{stringsAsFactors}{ Logical. If \code{TRUE}, any column of the data frame \code{x} that contains character strings will be converted to a \code{factor}. If \code{FALSE}, no such conversion will occur. } } \details{ A hyperframe is like a data frame, except that its entries can be objects of any kind. The generic function \code{as.hyperframe} converts any suitable kind of data into a hyperframe. There are methods for the classes \code{data.frame}, \code{listof}, \code{anylist} and a default method, all of which convert data that is like a hyperframe into a hyperframe object. (The method for the class \code{listof} and \code{anylist} converts a list of objects, of arbitrary type, into a hyperframe with one column.) These methods do not discard any information. There are also methods for other classes (see \code{\link{as.hyperframe.ppx}}) which extract the coordinates from a spatial dataset. These methods do discard some information. } \section{Conversion of Strings to Factors}{ Note that \code{as.hyperframe.default} will convert a character vector to a factor. It behaves like \code{\link{as.data.frame}}. However \code{as.hyperframe.data.frame} does not convert strings to factors; it respects the structure of the data frame \code{x}. The behaviour can be changed using the argument \code{stringsAsFactors}. } \value{ An object of class \code{"hyperframe"} created by \code{\link{hyperframe}}. } \author{\adrian and \rolf } \seealso{ \code{\link{hyperframe}}, \code{\link{as.hyperframe.ppx}} } \examples{ df <- data.frame(x=runif(4),y=letters[1:4]) as.hyperframe(df) sims <- list() for(i in 1:3) sims[[i]] <- rpoispp(42) as.hyperframe(as.listof(sims)) as.hyperframe(as.solist(sims)) } \keyword{spatial} \keyword{manip} spatstat/man/rhohat.Rd0000644000176200001440000002673713160710621014460 0ustar liggesusers\name{rhohat} \alias{rhohat} \alias{rhohat.ppp} \alias{rhohat.quad} \alias{rhohat.ppm} \alias{rhohat.lpp} \alias{rhohat.lppm} \concept{Resource Selection Function} \concept{Prospectivity} \title{ Smoothing Estimate of Intensity as Function of a Covariate } \description{ Computes a smoothing estimate of the intensity of a point process, as a function of a (continuous) spatial covariate. } \usage{ rhohat(object, covariate, ...) \method{rhohat}{ppp}(object, covariate, ..., baseline=NULL, weights=NULL, method=c("ratio", "reweight", "transform"), horvitz=FALSE, smoother=c("kernel", "local"), subset=NULL, dimyx=NULL, eps=NULL, n = 512, bw = "nrd0", adjust=1, from = NULL, to = NULL, bwref=bw, covname, confidence=0.95) \method{rhohat}{quad}(object, covariate, ..., baseline=NULL, weights=NULL, method=c("ratio", "reweight", "transform"), horvitz=FALSE, smoother=c("kernel", "local"), subset=NULL, dimyx=NULL, eps=NULL, n = 512, bw = "nrd0", adjust=1, from = NULL, to = NULL, bwref=bw, covname, confidence=0.95) \method{rhohat}{ppm}(object, covariate, ..., weights=NULL, method=c("ratio", "reweight", "transform"), horvitz=FALSE, smoother=c("kernel", "local"), subset=NULL, dimyx=NULL, eps=NULL, n = 512, bw = "nrd0", adjust=1, from = NULL, to = NULL, bwref=bw, covname, confidence=0.95) \method{rhohat}{lpp}(object, covariate, ..., weights=NULL, method=c("ratio", "reweight", "transform"), horvitz=FALSE, smoother=c("kernel", "local"), subset=NULL, nd=1000, eps=NULL, random=TRUE, n = 512, bw = "nrd0", adjust=1, from = NULL, to = NULL, bwref=bw, covname, confidence=0.95) \method{rhohat}{lppm}(object, covariate, ..., weights=NULL, method=c("ratio", "reweight", "transform"), horvitz=FALSE, smoother=c("kernel", "local"), subset=NULL, nd=1000, eps=NULL, random=TRUE, n = 512, bw = "nrd0", adjust=1, from = NULL, to = NULL, bwref=bw, covname, confidence=0.95) } \arguments{ \item{object}{ A point pattern (object of class \code{"ppp"} or \code{"lpp"}), a quadrature scheme (object of class \code{"quad"}) or a fitted point process model (object of class \code{"ppm"} or \code{"lppm"}). } \item{covariate}{ Either a \code{function(x,y)} or a pixel image (object of class \code{"im"}) providing the values of the covariate at any location. Alternatively one of the strings \code{"x"} or \code{"y"} signifying the Cartesian coordinates. } \item{weights}{ Optional weights attached to the data points. Either a numeric vector of weights for each data point, or a pixel image (object of class \code{"im"}) or a \code{function(x,y)} providing the weights. } \item{baseline}{ Optional baseline for intensity function. A \code{function(x,y)} or a pixel image (object of class \code{"im"}) providing the values of the baseline at any location. } \item{method}{ Character string determining the smoothing method. See Details. } \item{horvitz}{ Logical value indicating whether to use Horvitz-Thompson weights. See Details. } \item{smoother}{ Character string determining the smoothing algorithm. See Details. } \item{subset}{ Optional. A spatial window (object of class \code{"owin"}) specifying a subset of the data, from which the estimate should be calculated. } \item{dimyx,eps,nd,random}{ Arguments controlling the pixel resolution at which the covariate will be evaluated. See Details. } \item{bw}{ Smoothing bandwidth or bandwidth rule (passed to \code{\link{density.default}}). } \item{adjust}{ Smoothing bandwidth adjustment factor (passed to \code{\link{density.default}}). } \item{n, from, to}{ Arguments passed to \code{\link{density.default}} to control the number and range of values at which the function will be estimated. } \item{bwref}{ Optional. An alternative value of \code{bw} to use when smoothing the reference density (the density of the covariate values observed at all locations in the window). } \item{\dots}{ Additional arguments passed to \code{\link{density.default}} or \code{\link[locfit]{locfit}}. } \item{covname}{ Optional. Character string to use as the name of the covariate. } \item{confidence}{ Confidence level for confidence intervals. A number between 0 and 1. } } \details{ This command estimates the relationship between point process intensity and a given spatial covariate. Such a relationship is sometimes called a \emph{resource selection function} (if the points are organisms and the covariate is a descriptor of habitat) or a \emph{prospectivity index} (if the points are mineral deposits and the covariate is a geological variable). This command uses a nonparametric smoothing method which does not assume a particular form for the relationship. If \code{object} is a point pattern, and \code{baseline} is missing or null, this command assumes that \code{object} is a realisation of a Poisson point process with intensity function \eqn{\lambda(u)}{lambda(u)} of the form \deqn{\lambda(u) = \rho(Z(u))}{lambda(u) = rho(Z(u))} where \eqn{Z} is the spatial covariate function given by \code{covariate}, and \eqn{\rho(z)}{rho(z)} is a function to be estimated. This command computes estimators of \eqn{\rho(z)}{rho(z)} proposed by Baddeley and Turner (2005) and Baddeley et al (2012). The covariate \eqn{Z} must have continuous values. If \code{object} is a point pattern, and \code{baseline} is given, then the intensity function is assumed to be \deqn{\lambda(u) = \rho(Z(u)) B(u)}{lambda(u) = rho(Z(u)) * B(u)} where \eqn{B(u)} is the baseline intensity at location \eqn{u}. A smoothing estimator of the relative intensity \eqn{\rho(z)}{rho(z)} is computed. If \code{object} is a fitted point process model, suppose \code{X} is the original data point pattern to which the model was fitted. Then this command assumes \code{X} is a realisation of a Poisson point process with intensity function of the form \deqn{ \lambda(u) = \rho(Z(u)) \kappa(u) }{ lambda(u) = rho(Z(u)) * kappa(u) } where \eqn{\kappa(u)}{kappa(u)} is the intensity of the fitted model \code{object}. A smoothing estimator of \eqn{\rho(z)}{rho(z)} is computed. The estimation procedure is determined by the character strings \code{method} and \code{smoother} and the argument \code{horvitz}. The estimation procedure involves computing several density estimates and combining them. The algorithm used to compute density estimates is determined by \code{smoother}: \itemize{ \item If \code{smoother="kernel"}, each the smoothing procedure is based on fixed-bandwidth kernel density estimation, performed by \code{\link{density.default}}. \item If \code{smoother="local"}, the smoothing procedure is based on local likelihood density estimation, performed by \code{\link[locfit]{locfit}}. } The \code{method} determines how the density estimates will be combined to obtain an estimate of \eqn{\rho(z)}{rho(z)}: \itemize{ \item If \code{method="ratio"}, then \eqn{\rho(z)}{rho(z)} is estimated by the ratio of two density estimates. The numerator is a (rescaled) density estimate obtained by smoothing the values \eqn{Z(y_i)}{Z(y[i])} of the covariate \eqn{Z} observed at the data points \eqn{y_i}{y[i]}. The denominator is a density estimate of the reference distribution of \eqn{Z}. \item If \code{method="reweight"}, then \eqn{\rho(z)}{rho(z)} is estimated by applying density estimation to the values \eqn{Z(y_i)}{Z(y[i])} of the covariate \eqn{Z} observed at the data points \eqn{y_i}{y[i]}, with weights inversely proportional to the reference density of \eqn{Z}. \item If \code{method="transform"}, the smoothing method is variable-bandwidth kernel smoothing, implemented by applying the Probability Integral Transform to the covariate values, yielding values in the range 0 to 1, then applying edge-corrected density estimation on the interval \eqn{[0,1]}, and back-transforming. } If \code{horvitz=TRUE}, then the calculations described above are modified by using Horvitz-Thompson weighting. The contribution to the numerator from each data point is weighted by the reciprocal of the baseline value or fitted intensity value at that data point; and a corresponding adjustment is made to the denominator. The covariate will be evaluated on a fine grid of locations, with spatial resolution controlled by the arguments \code{dimyx,eps,nd,random}. In two dimensions (i.e. if \code{object} is of class \code{"ppp"}, \code{"ppm"} or \code{"quad"}) the arguments \code{dimyx, eps} are passed to \code{\link{as.mask}} to control the pixel resolution. On a linear network (i.e. if \code{object} is of class \code{"lpp"}) the argument \code{nd} specifies the total number of test locations on the linear network, \code{eps} specifies the linear separation between test locations, and \code{random} specifies whether the test locations have a randomised starting position. If the argument \code{weights} is present, then the contribution from each data point \code{X[i]} to the estimate of \eqn{\rho}{rho} is multiplied by \code{weights[i]}. If the argument \code{subset} is present, then the calculations are performed using only the data inside this spatial region. } \value{ A function value table (object of class \code{"fv"}) containing the estimated values of \eqn{\rho}{rho} for a sequence of values of \eqn{Z}. Also belongs to the class \code{"rhohat"} which has special methods for \code{print}, \code{plot} and \code{predict}. } \section{Categorical and discrete covariates}{ This technique assumes that the covariate has continuous values. It is not applicable to covariates with categorical (factor) values or discrete values such as small integers. For a categorical covariate, use \code{\link{intensity.quadratcount}} applied to the result of \code{\link{quadratcount}(X, tess=covariate)}. } \references{ Baddeley, A., Chang, Y.-M., Song, Y. and Turner, R. (2012) Nonparametric estimation of the dependence of a point process on spatial covariates. \emph{Statistics and Its Interface} \bold{5} (2), 221--236. Baddeley, A. and Turner, R. (2005) Modelling spatial point patterns in R. In: A. Baddeley, P. Gregori, J. Mateu, R. Stoica, and D. Stoyan, editors, \emph{Case Studies in Spatial Point Pattern Modelling}, Lecture Notes in Statistics number 185. Pages 23--74. Springer-Verlag, New York, 2006. ISBN: 0-387-28311-0. } \author{ \adrian, Ya-Mei Chang, Yong Song, and \rolf. } \seealso{ \code{\link{rho2hat}}, \code{\link{methods.rhohat}}, \code{\link{parres}}. See \code{\link{ppm}} for a parametric method for the same problem. } \examples{ X <- rpoispp(function(x,y){exp(3+3*x)}) rho <- rhohat(X, "x") rho <- rhohat(X, function(x,y){x}) plot(rho) curve(exp(3+3*x), lty=3, col=2, add=TRUE) rhoB <- rhohat(X, "x", method="reweight") rhoC <- rhohat(X, "x", method="transform") \testonly{rh <- rhohat(X, "x", dimyx=32)} fit <- ppm(X, ~x) rr <- rhohat(fit, "y") # linear network Y <- runiflpp(30, simplenet) rhoY <- rhohat(Y, "y") } \keyword{spatial} \keyword{models} spatstat/man/AreaInter.Rd0000644000176200001440000001737713160710571015051 0ustar liggesusers\name{AreaInter} \alias{AreaInter} \title{The Area Interaction Point Process Model} \description{ Creates an instance of the Area Interaction point process model (Widom-Rowlinson penetrable spheres model) which can then be fitted to point pattern data. } \usage{ AreaInter(r) } \arguments{ \item{r}{The radius of the discs in the area interaction process} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of the area-interaction process with disc radius \eqn{r}. } \details{ This function defines the interpoint interaction structure of a point process called the Widom-Rowlinson penetrable sphere model or area-interaction process. It can be used to fit this model to point pattern data. The function \code{\link{ppm}()}, which fits point process models to point pattern data, requires an argument of class \code{"interact"} describing the interpoint interaction structure of the model to be fitted. The appropriate description of the area interaction structure is yielded by the function \code{AreaInter()}. See the examples below. In \bold{standard form}, the area-interaction process (Widom and Rowlinson, 1970; Baddeley and Van Lieshout, 1995) with disc radius \eqn{r}, intensity parameter \eqn{\kappa}{\kappa} and interaction parameter \eqn{\gamma}{\gamma} is a point process with probability density \deqn{ f(x_1,\ldots,x_n) = \alpha \kappa^{n(x)} \gamma^{-A(x)} }{ f(x[1],\ldots,x[n]) = \alpha . \kappa^n(x) . \gamma^(-A(x)) } for a point pattern \eqn{x}, where \eqn{x_1,\ldots,x_n}{x[1],\ldots,x[n]} represent the points of the pattern, \eqn{n(x)} is the number of points in the pattern, and \eqn{A(x)} is the area of the region formed by the union of discs of radius \eqn{r} centred at the points \eqn{x_1,\ldots,x_n}{x[1],\ldots,x[n]}. Here \eqn{\alpha}{\alpha} is a normalising constant. The interaction parameter \eqn{\gamma}{\gamma} can be any positive number. If \eqn{\gamma = 1}{\gamma = 1} then the model reduces to a Poisson process with intensity \eqn{\kappa}{\kappa}. If \eqn{\gamma < 1}{\gamma < 1} then the process is regular, while if \eqn{\gamma > 1}{\gamma > 1} the process is clustered. Thus, an area interaction process can be used to model either clustered or regular point patterns. Two points interact if the distance between them is less than \eqn{2r}{2 * r}. The standard form of the model, shown above, is a little complicated to interpret in practical applications. For example, each isolated point of the pattern \eqn{x} contributes a factor \eqn{\kappa \gamma^{-\pi r^2}}{\kappa * \gamma^(-\pi * r^2)} to the probability density. In \pkg{spatstat}, the model is parametrised in a different form, which is easier to interpret. In \bold{canonical scale-free form}, the probability density is rewritten as \deqn{ f(x_1,\ldots,x_n) = \alpha \beta^{n(x)} \eta^{-C(x)} }{ f(x_1,\ldots,x_n) = \alpha . \beta^n(x) \eta^(-C(x)) } where \eqn{\beta}{\beta} is the new intensity parameter, \eqn{\eta}{\eta} is the new interaction parameter, and \eqn{C(x) = B(x) - n(x)} is the interaction potential. Here \deqn{ B(x) = \frac{A(x)}{\pi r^2} }{ B(x) = A(x)/(\pi * r^2) } is the normalised area (so that the discs have unit area). In this formulation, each isolated point of the pattern contributes a factor \eqn{\beta}{\beta} to the probability density (so the first order trend is \eqn{\beta}{\beta}). The quantity \eqn{C(x)} is a true interaction potential, in the sense that \eqn{C(x) = 0} if the point pattern \eqn{x} does not contain any points that lie close together (closer than \eqn{2r}{2*r} units apart). When a new point \eqn{u} is added to an existing point pattern \eqn{x}, the rescaled potential \eqn{-C(x)} increases by a value between 0 and 1. The increase is zero if \eqn{u} is not close to any point of \eqn{x}. The increase is 1 if the disc of radius \eqn{r} centred at \eqn{u} is completely contained in the union of discs of radius \eqn{r} centred at the data points \eqn{x_i}{x[i]}. Thus, the increase in potential is a measure of how close the new point \eqn{u} is to the existing pattern \eqn{x}. Addition of the point \eqn{u} contributes a factor \eqn{\beta \eta^\delta}{\beta * \eta^\delta} to the probability density, where \eqn{\delta}{\delta} is the increase in potential. The old parameters \eqn{\kappa,\gamma}{\kappa,\gamma} of the standard form are related to the new parameters \eqn{\beta,\eta}{\beta,\eta} of the canonical scale-free form, by \deqn{ \beta = \kappa \gamma^{-\pi r^2} = \kappa /\eta }{ \beta = \kappa * \gamma^(-\pi * r^2)= \kappa / \eta } and \deqn{ \eta = \gamma^{\pi r^2} }{ \eta = \gamma^(\pi * r^2) } provided \eqn{\gamma}{\gamma} and \eqn{\kappa}{\kappa} are positive and finite. In the canonical scale-free form, the parameter \eqn{\eta}{\eta} can take any nonnegative value. The value \eqn{\eta = 1}{\eta = 1} again corresponds to a Poisson process, with intensity \eqn{\beta}{\beta}. If \eqn{\eta < 1}{\eta < 1} then the process is regular, while if \eqn{\eta > 1}{\eta > 1} the process is clustered. The value \eqn{\eta = 0}{\eta = 0} corresponds to a hard core process with hard core radius \eqn{r} (interaction distance \eqn{2r}). The \emph{nonstationary} area interaction process is similar except that the contribution of each individual point \eqn{x_i}{x[i]} is a function \eqn{\beta(x_i)}{\beta(x[i])} of location, rather than a constant beta. Note the only argument of \code{AreaInter()} is the disc radius \code{r}. When \code{r} is fixed, the model becomes an exponential family. The canonical parameters \eqn{\log(\beta)}{log(\beta)} and \eqn{\log(\eta)}{log(\eta)} are estimated by \code{\link{ppm}()}, not fixed in \code{AreaInter()}. } \seealso{ \code{\link{ppm}}, \code{\link{pairwise.family}}, \code{\link{ppm.object}} \code{\link{ragsAreaInter}} and \code{\link{rmh}} for simulation of area-interaction models. } \section{Warnings}{ The interaction distance of this process is equal to \code{2 * r}. Two discs of radius \code{r} overlap if their centres are closer than \code{2 * r} units apart. The estimate of the interaction parameter \eqn{\eta}{\eta} is unreliable if the interaction radius \code{r} is too small or too large. In these situations the model is approximately Poisson so that \eqn{\eta}{\eta} is unidentifiable. As a rule of thumb, one can inspect the empty space function of the data, computed by \code{\link{Fest}}. The value \eqn{F(r)} of the empty space function at the interaction radius \code{r} should be between 0.2 and 0.8. } \examples{ \testonly{op <- spatstat.options(ngrid.disc=8)} # prints a sensible description of itself AreaInter(r=0.1) # Note the reach is twice the radius reach(AreaInter(r=1)) # Fit the stationary area interaction process to Swedish Pines data data(swedishpines) ppm(swedishpines, ~1, AreaInter(r=7)) # Fit the stationary area interaction process to `cells' data(cells) ppm(cells, ~1, AreaInter(r=0.06)) # eta=0 indicates hard core process. # Fit a nonstationary area interaction with log-cubic polynomial trend \dontrun{ ppm(swedishpines, ~polynom(x/10,y/10,3), AreaInter(r=7)) } \testonly{spatstat.options(op)} } \references{ Baddeley, A.J. and Van Lieshout, M.N.M. (1995). Area-interaction point processes. \emph{Annals of the Institute of Statistical Mathematics} \bold{47} (1995) 601--619. Widom, B. and Rowlinson, J.S. (1970). New model for the study of liquid-vapor phase transitions. \emph{The Journal of Chemical Physics} \bold{52} (1970) 1670--1684. } \author{ \adrian and \rolf } \keyword{spatial} \keyword{models} spatstat/man/plot.lintess.Rd0000644000176200001440000000400413160710621015610 0ustar liggesusers\name{plot.lintess} \alias{plot.lintess} \title{ Plot a Tessellation on a Linear Network } \description{ Plot a tessellation or division of a linear network into tiles. } \usage{ \method{plot}{lintess}(x, \dots, main, add = FALSE, style = c("segments", "image"), col = NULL) } \arguments{ \item{x}{ Tessellation on a linear network (object of class \code{"lintess"}). } \item{\dots}{ Arguments passed to \code{\link[graphics]{segments}} (if \code{style="segments"}) or to \code{\link{plot.im}} (if \code{style="image"}) to control the plot. } \item{main}{ Optional main title for the plot. } \item{add}{ Logical value indicating whether the plot is to be added to an existing plot. } \item{style}{ Character string (partially matched) indicating whether to plot the tiles of the tessellation using \code{\link[graphics]{segments}} or to convert the tessellation to a pixel image and use \code{\link{plot.im}}. } \item{col}{ Vector of colours, or colour map, determining the colours used to plot the different tiles of the tessellation. } } \details{ A tessellation on a linear network \code{L} is a partition of the network into non-overlapping pieces (tiles). Each tile consists of one or more line segments which are subsets of the line segments making up the network. A tile can consist of several disjoint pieces. This function plots the tessellation on the current device. It is a method for the generic \code{plot}. If \code{style="segments"}, each tile is plotted using \code{\link[graphics]{segments}}. Colours distinguish the different tiles. If \code{style="image"}, the tessellation is converted to a pixel image, and plotted using \code{\link{plot.im}}. } \value{ (Invisible) colour map. } \author{ \adrian } \seealso{ \code{\link{lintess}} } \examples{ X <- runiflpp(7, simplenet) Z <- divide.linnet(X) plot(Z, main="tessellation on network") points(as.ppp(X)) } \keyword{spatial} \keyword{hplot} spatstat/man/plot.yardstick.Rd0000644000176200001440000000531613160710621016133 0ustar liggesusers\name{plot.yardstick} \alias{plot.yardstick} \title{ Plot a Yardstick or Scale Bar } \description{ Plots an object of class \code{"yardstick"}. } \usage{ \method{plot}{yardstick}(x, \dots, angle = 20, frac = 1/8, split = FALSE, shrink = 1/4, pos = NULL, txt.args=list(), txt.shift=c(0,0), do.plot = TRUE) } \arguments{ \item{x}{ Object of class \code{"yardstick"} to be plotted. This object is created by the command \code{\link{yardstick}}. } \item{\dots}{ Additional graphics arguments passed to \code{\link[graphics]{segments}} to control the appearance of the line. } \item{angle}{ Angle between the arrows and the line segment, in degrees. } \item{frac}{ Length of arrow as a fraction of total length of the line segment. } \item{split}{ Logical. If \code{TRUE}, then the line will be broken in the middle, and the text will be placed in this gap. If \code{FALSE}, the line will be unbroken, and the text will be placed beside the line. } \item{shrink}{ Fraction of total length to be removed from the middle of the line segment, if \code{split=TRUE}. } \item{pos}{ Integer (passed to \code{\link[graphics]{text}}) determining the position of the annotation text relative to the line segment, if \code{split=FALSE}. Values of 1, 2, 3 and 4 indicate positions below, to the left of, above and to the right of the line, respectively. } \item{txt.args}{ Optional list of additional arguments passed to \code{\link[graphics]{text}} controlling the appearance of the text. Examples include \code{adj}, \code{srt}, \code{col}, \code{cex}, \code{font}. } \item{txt.shift}{ Optional numeric vector of length 2 specifying displacement of the text position relative to the centre of the yardstick. } \item{do.plot}{ Logical. Whether to actually perform the plot (\code{do.plot=TRUE}). } } \details{ A yardstick or scale bar is a line segment, drawn on any spatial graphics display, indicating the scale of the plot. The argument \code{x} should be an object of class \code{"yardstick"} created by the command \code{\link{yardstick}}. } \value{ A window (class \code{"owin"}) enclosing the plotted graphics. } \examples{ plot(owin(), main="Yardsticks") ys <- yardstick(as.psp(list(xmid=0.5, ymid=0.1, length=0.4, angle=0), window=owin(c(0.2, 0.8), c(0, 0.2))), txt="1 km") plot(ys) ys <- shift(ys, c(0, 0.3)) plot(ys, angle=90, frac=0.08) ys <- shift(ys, c(0, 0.3)) plot(ys, split=TRUE) } \author{\adrian \rolf and \ege } \seealso{ \code{\link{yardstick}} } \keyword{spatial} \keyword{hplot} spatstat/man/ippm.Rd0000644000176200001440000001331013162665150014127 0ustar liggesusers\name{ippm} \alias{ippm} \title{ Fit Point Process Model Involving Irregular Trend Parameters } \description{ Experimental extension to \code{ppm} which finds optimal values of the irregular trend parameters in a point process model. } \usage{ ippm(Q, \dots, iScore=NULL, start=list(), covfunargs=start, nlm.args=list(stepmax=1/2), silent=FALSE, warn.unused=TRUE) } \arguments{ \item{Q,\dots}{ Arguments passed to \code{\link{ppm}} to fit the point process model. } \item{iScore}{ Optional. A named list of \R functions that compute the partial derivatives of the logarithm of the trend, with respect to each irregular parameter. See Details. } \item{start}{ Named list containing initial values of the irregular parameters over which to optimise. } \item{covfunargs}{ Argument passed to \code{\link{ppm}}. A named list containing values for \emph{all} irregular parameters required by the covariates in the model. Must include all the parameters named in \code{start}. } \item{nlm.args}{ Optional list of arguments passed to \code{\link[stats]{nlm}} to control the optimization algorithm. } \item{silent}{ Logical. Whether to print warnings if the optimization algorithm fails to converge. } \item{warn.unused}{ Logical. Whether to print a warning if some of the parameters in \code{start} are not used in the model. } } \details{ This function is an experimental extension to the point process model fitting command \code{\link{ppm}}. The extension allows the trend of the model to include irregular parameters, which will be maximised by a Newton-type iterative method, using \code{\link[stats]{nlm}}. For the sake of explanation, consider a Poisson point process with intensity function \eqn{\lambda(u)}{\lambda(u)} at location \eqn{u}. Assume that \deqn{ \lambda(u) = \exp(\alpha + \beta Z(u)) \, f(u, \gamma) }{ \lambda(u) = exp(\alpha + \beta * Z(u)) * f(u, \gamma) } where \eqn{\alpha,\beta,\gamma} are parameters to be estimated, \eqn{Z(u)} is a spatial covariate function, and \eqn{f} is some known function. Then the parameters \eqn{\alpha,\beta} are called \emph{regular} because they appear in a loglinear form; the parameter \eqn{\gamma} is called \emph{irregular}. To fit this model using \code{ippm}, we specify the intensity using the \code{trend} formula in the same way as usual for \code{\link{ppm}}. The trend formula is a representation of the log intensity. In the above example the log intensity is \deqn{ \log\lambda(u) = \alpha + \beta Z(u) + \log f(u, \gamma) }{ log(\lambda(u)) = \alpha + \beta * Z(u) + log(f(u, \gamma)) } So the model above would be encoded with the trend formula \code{~Z + offset(log(f))}. Note that the irregular part of the model is an \emph{offset} term, which means that it is included in the log trend as it is, without being multiplied by another regular parameter. The optimisation runs faster if we specify the derivative of \eqn{\log f(u,\gamma)}{log(f(u,\gamma))} with respect to \eqn{\gamma}. We call this the \emph{irregular score}. To specify this, the user must write an \R function that computes the irregular score for any value of \eqn{\gamma} at any location \code{(x,y)}. Thus, to code such a problem, \enumerate{ \item The argument \code{trend} should define the log intensity, with the irregular part as an offset; \item The argument \code{start} should be a list containing initial values of each of the irregular parameters; \item The argument \code{iScore}, if provided, must be a list (with one entry for each entry of \code{start}) of functions with arguments \code{x,y,\dots}, that evaluate the partial derivatives of \eqn{\log f(u,\gamma)}{log(f(u,gamma))} with respect to each irregular parameter. } The coded example below illustrates the model with two irregular parameters \eqn{\gamma,\delta}{gamma,delta} and irregular term \deqn{ f((x,y), (\gamma, \delta)) = 1 + \exp(\gamma - \delta x^3) }{ f((x,y), (\gamma, \delta)) = 1 + \exp(\gamma - \delta * x^3) } Arguments \code{\dots} passed to \code{\link{ppm}} may also include \code{interaction}. In this case the model is not a Poisson point process but a more general Gibbs point process; the trend formula \code{trend} determines the first-order trend of the model (the first order component of the conditional intensity), not the intensity. } \value{ A fitted point process model (object of class \code{"ppm"}). } \author{\spatstatAuthors.} \seealso{ \code{\link{ppm}}, \code{\link{profilepl}} } \examples{ nd <- 32 \testonly{nd <- 10} gamma0 <- 3 delta0 <- 5 POW <- 3 # Terms in intensity Z <- function(x,y) { -2*y } f <- function(x,y,gamma,delta) { 1 + exp(gamma - delta * x^POW) } # True intensity lamb <- function(x,y,gamma,delta) { 200 * exp(Z(x,y)) * f(x,y,gamma,delta) } # Simulate realisation lmax <- max(lamb(0,0,gamma0,delta0), lamb(1,1,gamma0,delta0)) set.seed(42) X <- rpoispp(lamb, lmax=lmax, win=owin(), gamma=gamma0, delta=delta0) # Partial derivatives of log f DlogfDgamma <- function(x,y, gamma, delta) { topbit <- exp(gamma - delta * x^POW) topbit/(1 + topbit) } DlogfDdelta <- function(x,y, gamma, delta) { topbit <- exp(gamma - delta * x^POW) - (x^POW) * topbit/(1 + topbit) } # irregular score Dlogf <- list(gamma=DlogfDgamma, delta=DlogfDdelta) # fit model ippm(X ~Z + offset(log(f)), covariates=list(Z=Z, f=f), iScore=Dlogf, start=list(gamma=1, delta=1), nlm.args=list(stepmax=1), nd=nd) } \keyword{spatial} \keyword{models} spatstat/man/shift.owin.Rd0000644000176200001440000000346213160710621015251 0ustar liggesusers\name{shift.owin} \alias{shift.owin} \title{Apply Vector Translation To Window} \description{ Applies a vector shift to a window } \usage{ \method{shift}{owin}(X, vec=c(0,0), \dots, origin=NULL) } \arguments{ \item{X}{Window (object of class \code{"owin"}).} \item{vec}{Vector of length 2 representing a translation.} \item{\dots}{Ignored} \item{origin}{Character string determining a location that will be shifted to the origin. Options are \code{"centroid"}, \code{"midpoint"} and \code{"bottomleft"}. Partially matched. } } \value{ Another window (of class \code{"owin"}) representing the result of applying the vector shift. } \details{ The window is translated by the vector \code{vec}. This is a method for the generic function \code{\link{shift}}. If \code{origin} is given, then it should be one of the character strings \code{"centroid"}, \code{"midpoint"} or \code{"bottomleft"}. The argument \code{vec} will be ignored; instead the shift will be performed so that the specified geometric location is shifted to the origin. If \code{origin="centroid"} then the centroid of the window will be shifted to the origin. If \code{origin="midpoint"} then the centre of the bounding rectangle of the window will be shifted to the origin. If \code{origin="bottomleft"} then the bottom left corner of the bounding rectangle of the window will be shifted to the origin. } \seealso{ \code{\link{shift}}, \code{\link{shift.ppp}}, \code{\link{periodify}}, \code{\link{rotate}}, \code{\link{affine}}, \code{\link{centroid.owin}} } \examples{ W <- owin(c(0,1),c(0,1)) X <- shift(W, c(2,3)) \dontrun{ plot(W) # no discernible difference except coordinates are different } shift(W, origin="mid") } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/dppGauss.Rd0000644000176200001440000000224713160710571014753 0ustar liggesusers\name{dppGauss} \alias{dppGauss} \title{Gaussian Determinantal Point Process Model} \description{ Function generating an instance of the Gaussian determinantal point process model. } \usage{dppGauss(\dots)} \arguments{ \item{\dots}{arguments of the form \code{tag=value} specifying the parameters. See Details.} } \details{ The Gaussian DPP is defined in (Lavancier, \ifelse{latex}{\out{M\o ller}}{Moller} and Rubak, 2015) The possible parameters are: \itemize{ \item the intensity \code{lambda} as a positive numeric \item the scale parameter \code{alpha} as a positive numeric \item the dimension \code{d} as a positive integer } } \value{An object of class \code{"detpointprocfamily"}.} \author{ \adrian \rolf and \ege } \references{ Lavancier, F. \ifelse{latex}{\out{M\o ller}}{Moller}, J. and Rubak, E. (2015) Determinantal point process models and statistical inference \emph{Journal of the Royal Statistical Society, Series B} \bold{77}, 853--977. } \examples{ m <- dppGauss(lambda=100, alpha=.05, d=2) } \seealso{ \code{\link{dppBessel}}, \code{\link{dppCauchy}}, \code{\link{dppMatern}}, \code{\link{dppPowerExp}} } spatstat/man/dppspecden.Rd0000644000176200001440000000110413160710571015301 0ustar liggesusers\name{dppspecden} \alias{dppspecden} \title{Extract Spectral Density from Determinantal Point Process Model Object} \description{ Returns the spectral density of a determinantal point process model as a function of one argument \code{x}. } \usage{dppspecden(model)} \arguments{ \item{model}{Model of class \code{"detpointprocfamily"}.} } \value{A function} \author{ \adrian \rolf and \ege } \examples{ model <- dppMatern(lambda = 100, alpha=.01, nu=1, d=2) dppspecden(model) } \seealso{ \code{\link{dppspecdenrange}} } \keyword{spatial} \keyword{models} spatstat/man/compatible.fasp.Rd0000644000176200001440000000212313160710571016225 0ustar liggesusers\name{compatible.fasp} \alias{compatible.fasp} \title{Test Whether Function Arrays Are Compatible} \description{ Tests whether two or more function arrays (class \code{"fasp"}) are compatible. } \usage{ \method{compatible}{fasp}(A, B, \dots) } \arguments{ \item{A,B,\dots}{Two or more function arrays (object of class \code{"fasp"}).} } \details{ An object of class \code{"fasp"} can be regarded as an array of functions. Such objects are returned by the command \code{\link{alltypes}}. This command tests whether such objects are compatible (so that, for example, they could be added or subtracted). It is a method for the generic command \code{\link{compatible}}. The function arrays are compatible if the arrays have the same dimensions, and the corresponding elements in each cell of the array are compatible as defined by \code{\link{compatible.fv}}. } \value{ Logical value: \code{TRUE} if the objects are compatible, and \code{FALSE} if they are not. } \seealso{ \code{\link{eval.fasp}} } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/dim.detpointprocfamily.Rd0000644000176200001440000000073113160710571017645 0ustar liggesusers\name{dim.detpointprocfamily} \alias{dim.detpointprocfamily} \title{Dimension of Determinantal Point Process Model} \description{Extracts the dimension of a determinantal point process model.} \usage{ \method{dim}{detpointprocfamily}(x) } \arguments{ \item{x}{object of class \code{"detpointprocfamily"}.} } \value{A numeric (or NULL if the dimension of the model is unspecified).} \author{ \adrian \rolf and \ege } \keyword{spatial} \keyword{models} spatstat/man/Penttinen.Rd0000644000176200001440000000520013160710571015121 0ustar liggesusers\name{Penttinen} \alias{Penttinen} \title{Penttinen Interaction} \description{ Creates an instance of the Penttinen pairwise interaction point process model, which can then be fitted to point pattern data. } \usage{ Penttinen(r) } \arguments{ \item{r}{circle radius} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of a point process. } \details{ Penttinen (1984, Example 2.1, page 18), citing Cormack (1979), described the pairwise interaction point process with interaction factor \deqn{ h(d) = e^{\theta A(d)} = \gamma^{A(d)} }{ h(d) = exp(theta * A(d)) = gamma^(A(d)) } between each pair of points separated by a distance $d$. Here \eqn{A(d)} is the area of intersection between two discs of radius \eqn{r} separated by a distance \eqn{d}, normalised so that \eqn{A(0) = 1}. The scale of interaction is controlled by the disc radius \eqn{r}: two points interact if they are closer than \eqn{2 r}{2 * r} apart. The strength of interaction is controlled by the canonical parameter \eqn{\theta}{theta}, which must be less than or equal to zero, or equivalently by the parameter \eqn{\gamma = e^\theta}{gamma = exp(theta)}, which must lie between 0 and 1. The potential is inhibitory, i.e.\ this model is only appropriate for regular point patterns. For \eqn{\gamma=0}{gamma=0} the model is a hard core process with hard core diameter \eqn{2 r}{2 * r}. For \eqn{\gamma=1}{gamma=1} the model is a Poisson process. The irregular parameter \eqn{r} must be given in the call to \code{Penttinen}, while the regular parameter \eqn{\theta}{theta} will be estimated. This model can be considered as a pairwise approximation to the area-interaction model \code{\link{AreaInter}}. } \seealso{ \code{\link{ppm}}, \code{\link{ppm.object}}, \code{\link{Pairwise}}, \code{\link{AreaInter}}. } \examples{ fit <- ppm(cells ~ 1, Penttinen(0.07)) fit reach(fit) # interaction range is circle DIAMETER } \references{ Cormack, R.M. (1979) Spatial aspects of competition between individuals. Pages 151--212 in \emph{Spatial and Temporal Analysis in Ecology}, eds. R.M. Cormack and J.K. Ord, International Co-operative Publishing House, Fairland, MD, USA. Penttinen, A. (1984) \emph{Modelling Interaction in Spatial Point Patterns: Parameter Estimation by the Maximum Likelihood Method.} \ifelse{latex}{\out{Jyv\"askyl\"a}}{Jyvaskyla} Studies in Computer Science, Economics and Statistics \bold{7}, University of \ifelse{latex}{\out{Jyv\"askyl\"a}}{Jyvaskyla}, Finland. } \author{ \spatstatAuthors } \keyword{spatial} \keyword{models} spatstat/man/lixellate.Rd0000644000176200001440000000467613160710621015154 0ustar liggesusers\name{lixellate} \alias{lixellate} \title{ Subdivide Segments of a Network } \description{ Each line segment of a linear network will be divided into several shorter segments (line elements or lixels). } \usage{ lixellate(X, \dots, nsplit, eps, sparse = TRUE) } \arguments{ \item{X}{ A linear network (object of class \code{"linnet"}) or a point pattern on a linear network (object of class \code{"lpp"}). } \item{\dots}{ Ignored. } \item{nsplit}{ Number of pieces into which \emph{each} line segment of \code{X} should be divided. Either a single integer, or an integer vector with one entry for each line segment in \code{X}. Incompatible with \code{eps}. } \item{eps}{ Maximum length of the resulting pieces of line segment. A single numeric value. Incompatible with \code{nsplit}. } \item{sparse}{ Optional. Logical value specifying whether the resulting linear network should be represented using a sparse matrix. If \code{sparse=NULL}, then the representation will be the same as in \code{X}. } } \details{ Each line segment in \code{X} will be subdivided into equal pieces. The result is an object of the same kind as \code{X}, representing the same data as \code{X} except that the segments have been subdivided. Splitting is controlled by the arguments \code{nsplit} and \code{eps}, exactly one of which should be given. If \code{nsplit} is given, it specifies the number of pieces into which \emph{each} line segment of \code{X} should be divided. It should be either a single integer, or an integer vector of length equal to the number of line segments in \code{X}. If \code{eps} is given, it specifies the maximum length of any resulting piece of line segment. It is strongly advisable to use \code{sparse=TRUE} (the default) to limit the computation time. If \code{X} is a point pattern (class \code{"lpp"}) then the spatial coordinates and marks of each data point are unchanged, but the local coordinates will change, because they are adjusted to map them to the new subdivided network. } \value{ Object of the same kind as \code{X}. } \author{ Greg McSwiggan, \spatstatAuthors. } \seealso{ \code{\link{linnet}}, \code{\link{lpp}}. } \examples{ A <- lixellate(simplenet, nsplit=4) plot(A, main="lixellate(simplenet, nsplit=4)") points(vertices(A), pch=16) spiders lixellate(spiders, nsplit=3) } \keyword{spatial} \keyword{manip} spatstat/man/run.simplepanel.Rd0000644000176200001440000001173613160710621016300 0ustar liggesusers\name{run.simplepanel} \alias{clear.simplepanel} \alias{redraw.simplepanel} \alias{run.simplepanel} \title{ Run Point-and-Click Interface } \description{ Execute various operations in a simple point-and-click user interface. } \usage{ run.simplepanel(P, popup=TRUE, verbose = FALSE) clear.simplepanel(P) redraw.simplepanel(P, verbose = FALSE) } \arguments{ \item{P}{ An interaction panel (object of class \code{"simplepanel"}, created by \code{\link{simplepanel}} or \code{\link{grow.simplepanel}}). } \item{popup}{ Logical. If \code{popup=TRUE} (the default), the panel will be displayed in a new popup window. If \code{popup=FALSE}, the panel will be displayed on the current graphics window if it already exists, and on a new window otherwise. } \item{verbose}{ Logical. If \code{TRUE}, debugging information will be printed. } } \details{ These commands enable the user to run a simple, robust, point-and-click interface to any \R code. The interface is implemented using only the basic graphics package in \R. The argument \code{P} is an object of class \code{"simplepanel"}, created by \code{\link{simplepanel}} or \code{\link{grow.simplepanel}}, which specifies the graphics to be displayed and the actions to be performed when the user interacts with the panel. The command \code{run.simplepanel(P)} activates the panel: the display is initialised and the graphics system waits for the user to click the panel. While the panel is active, the user can only interact with the panel; the \R command line interface and the \R GUI cannot be used. When the panel terminates (typically because the user clicked a button labelled Exit), control returns to the \R command line interface and the \R GUI. The command \code{clear.simplepanel(P)} clears all the display elements in the panel, resulting in a blank display except for the title of the panel. The command \code{redraw.simplepanel(P)} redraws all the buttons of the panel, according to the \code{redraw} functions contained in the panel. If \code{popup=TRUE} (the default), \code{run.simplepanel} begins by calling \code{\link[grDevices]{dev.new}} so that a new popup window is created; this window is closed using \code{\link[grDevices]{dev.off}} when \code{run.simplepanel} terminates. If \code{popup=FALSE}, the panel will be displayed on the current graphics window if it already exists, and on a new window otherwise; this window is not closed when \code{run.simplepanel} terminates. For more sophisticated control of the graphics focus (for example, to use the panel to control the display on another window), initialise the graphics devices yourself using \code{\link[grDevices]{dev.new}} or similar commands; save these devices in the shared environment \code{env} of the panel \code{P}; and write the click/redraw functions of \code{P} in such a way that they access these devices using \code{\link[grDevices]{dev.set}}. Then use \code{run.simplepanel} with \code{popup=FALSE}. } \value{ The return value of \code{run.simplepanel(P)} is the value returned by the \code{exit} function of \code{P}. See \code{\link{simplepanel}}. The functions \code{clear.simplepanel} and \code{redraw.simplepanel} return \code{NULL}. } \author{\adrian and \rolf } \seealso{ \code{\link{simplepanel}} } \examples{ if(interactive()) { # make boxes (alternatively use layout.boxes()) Bminus <- square(1) Bvalue <- shift(Bminus, c(1.2, 0)) Bplus <- shift(Bvalue, c(1.2, 0)) Bdone <- shift(Bplus, c(1.2, 0)) myboxes <- list(Bminus, Bvalue, Bplus, Bdone) myB <- do.call(boundingbox,myboxes) # make environment containing an integer count myenv <- new.env() assign("answer", 0, envir=myenv) # what to do when finished: return the count. myexit <- function(e) { return(get("answer", envir=e)) } # button clicks # decrement the count Cminus <- function(e, xy) { ans <- get("answer", envir=e) assign("answer", ans - 1, envir=e) return(TRUE) } # display the count (clicking does nothing) Cvalue <- function(...) { TRUE } # increment the count Cplus <- function(e, xy) { ans <- get("answer", envir=e) assign("answer", ans + 1, envir=e) return(TRUE) } # quit button Cdone <- function(e, xy) { return(FALSE) } myclicks <- list("-"=Cminus, value=Cvalue, "+"=Cplus, done=Cdone) # redraw the button that displays the current value of the count Rvalue <- function(button, nam, e) { plot(button, add=TRUE) ans <- get("answer", envir=e) text(centroid.owin(button), labels=ans) return(TRUE) } # make the panel P <- simplepanel("Counter", B=myB, boxes=myboxes, clicks=myclicks, redraws = list(NULL, Rvalue, NULL, NULL), exit=myexit, env=myenv) P run.simplepanel(P) } } \keyword{iplot} \keyword{utilities} spatstat/man/plot.im.Rd0000644000176200001440000004035013160710621014540 0ustar liggesusers\name{plot.im} \alias{plot.im} \alias{image.im} \title{Plot a Pixel Image} \description{ Plot a pixel image. } \usage{ \method{plot}{im}(x, \dots, main, add=FALSE, clipwin=NULL, col=NULL, valuesAreColours=NULL, log=FALSE, ribbon=show.all, show.all=!add, ribside=c("right", "left", "bottom", "top"), ribsep=0.15, ribwid=0.05, ribn=1024, ribscale=1, ribargs=list(), colargs=list(), useRaster=NULL, workaround=FALSE, do.plot=TRUE) \method{image}{im}(x, \dots, main, add=FALSE, clipwin=NULL, col=NULL, valuesAreColours=NULL, log=FALSE, ribbon=show.all, show.all=!add, ribside=c("right", "left", "bottom", "top"), ribsep=0.15, ribwid=0.05, ribn=1024, ribscale=1, ribargs=list(), colargs=list(), useRaster=NULL, workaround=FALSE, do.plot=TRUE) } \arguments{ \item{x}{ The pixel image to be plotted. An object of class \code{"im"} (see \code{\link{im.object}}). } \item{\dots}{ Extra arguments passed to \code{\link[graphics]{image.default}} to control the plot. See Details. } \item{main}{Main title for the plot.} \item{add}{ Logical value indicating whether to superimpose the image on the existing plot (\code{add=TRUE}) or to initialise a new plot (\code{add=FALSE}, the default). } \item{clipwin}{ Optional. A window (object of class \code{"owin"}). Only this subset of the image will be displayed. } \item{col}{ Colours for displaying the pixel values. Either a character vector of colour values, an object of class \code{\link{colourmap}}, or a \code{function} as described under Details. } \item{valuesAreColours}{ Logical value. If \code{TRUE}, the pixel values of \code{x} are to be interpreted as colour values. } \item{log}{ Logical value. If \code{TRUE}, the colour map will be evenly-spaced on a logarithmic scale. } \item{ribbon}{ Logical flag indicating whether to display a ribbon showing the colour map. Default is \code{TRUE} for new plots and \code{FALSE} for added plots. } \item{show.all}{ Logical value indicating whether to display all plot elements including the main title and colour ribbon. Default is \code{TRUE} for new plots and \code{FALSE} for added plots. } \item{ribside}{ Character string indicating where to display the ribbon relative to the main image. } \item{ribsep}{ Factor controlling the space between the ribbon and the image. } \item{ribwid}{ Factor controlling the width of the ribbon. } \item{ribn}{ Number of different values to display in the ribbon. } \item{ribscale}{ Rescaling factor for tick marks. The values on the numerical scale printed beside the ribbon will be multiplied by this rescaling factor. } \item{ribargs}{ List of additional arguments passed to \code{\link[graphics]{image.default}}, \code{\link[graphics]{axis}} and \code{\link[grDevices]{axisTicks}} to control the display of the ribbon and its scale axis. These may override the \code{\dots} arguments. } \item{colargs}{ List of additional arguments passed to \code{col} if it is a function. } \item{useRaster}{ Logical value, passed to \code{\link[graphics]{image.default}}. Images are plotted using a bitmap raster if \code{useRaster=TRUE} or by drawing polygons if \code{useRaster=FALSE}. Bitmap raster display tends to produce better results, but is not supported on all graphics devices. The default is to use bitmap raster display if it is supported. } \item{workaround}{ Logical value, specifying whether to use a workaround to avoid a bug which occurs with some device drivers in \R, in which the image has the wrong spatial orientation. See the section on \bold{Image is Displayed in Wrong Spatial Orientation} below. } \item{do.plot}{ Logical value indicating whether to actually plot the image and colour ribbon. Setting \code{do.plot=FALSE} will simply return the colour map and the bounding box that were chosen for the plot. } } \value{ The colour map used. An object of class \code{"colourmap"}. Also has an attribute \code{"bbox"} giving a bounding box for the colour image (including the ribbon if present). } \details{ This is the \code{plot} method for the class \code{"im"}. [It is also the \code{image} method for \code{"im"}.] The pixel image \code{x} is displayed on the current plot device, using equal scales on the \code{x} and \code{y} axes. If \code{ribbon=TRUE}, a legend will be plotted. The legend consists of a colour ribbon and an axis with tick-marks, showing the correspondence between the pixel values and the colour map. Arguments \code{ribside, ribsep, ribwid} control the placement of the colour ribbon. By default, the ribbon is placed at the right of the main image. This can be changed using the argument \code{ribside}. The width of the ribbon is \code{ribwid} times the size of the pixel image, where `size' means the larger of the width and the height. The distance separating the ribbon and the image is \code{ribsep} times the size of the pixel image. The ribbon contains the colours representing \code{ribn} different numerical values, evenly spaced between the minimum and maximum pixel values in the image \code{x}, rendered according to the chosen colour map. The argument \code{ribargs} controls the annotation of the colour ribbon. It is a list of arguments to be passed to \code{\link[graphics]{image.default}}, \code{\link[graphics]{axis}} and \code{\link[grDevices]{axisTicks}}. To plot the colour ribbon without the axis and tick-marks, use \code{ribargs=list(axes=FALSE)}. To ensure that the numerals or symbols printed next to the colour map are oriented horizontally, use \code{ribargs=list(las=1)}. To control the number of tick-marks, use \code{ribargs=list(nint=N)} where \code{N} is the desired number of intervals (so there will be \code{N+1} tickmarks, subject to the vagaries of \R internal code). The argument \code{ribscale} is used to rescale the numerals printed next to the colour map. Normally the pixel values are displayed using the colours given in the argument \code{col}. This may be either \itemize{ \item an explicit colour map (an object of class \code{"colourmap"}, created by the command \code{\link{colourmap}}). This is the best way to ensure that when we plot different images, the colour maps are consistent. \item a character vector or integer vector that specifies a set of colours. The colour mapping will be stretched to match the range of pixel values in the image \code{x}. The mapping of pixel values to colours is determined as follows. \describe{ \item{logical-valued images:}{the values \code{FALSE} and \code{TRUE} are mapped to the colours \code{col[1]} and \code{col[2]} respectively. The vector \code{col} should have length 2. } \item{factor-valued images:}{the factor levels \code{levels(x)} are mapped to the entries of \code{col} in order. The vector \code{col} should have the same length as \code{levels(x)}. } \item{numeric-valued images:}{ By default, the range of pixel values in \code{x} is divided into \code{n = length(col)} equal subintervals, which are mapped to the colours in \code{col}. (If \code{col} was not specified, it defaults to a vector of 255 colours.) Alternatively if the argument \code{zlim} is given, it should be a vector of length 2 specifying an interval of real numbers. This interval will be used instead of the range of pixel values. The interval from \code{zlim[1]} to \code{zlim[2]} will be mapped to the colours in \code{col}. This facility enables the user to plot several images using a consistent colour map. Alternatively if the argument \code{breaks} is given, then this specifies the endpoints of the subintervals that are mapped to each colour. This is incompatible with \code{zlim}. The arguments \code{col} and \code{zlim} or \code{breaks} are then passed to the function \code{\link{image.default}}. For examples of the use of these arguments, see \code{\link{image.default}}. } } \item { a \code{function} in the \R language with an argument named \code{range} or \code{inputs}. If \code{col} is a function with an argument named \code{range}, and if the pixel values of \code{x} are numeric values, then the colour values will be determined by evaluating \code{col(range=range(x))}. The result of this evaluation should be a character vector containing colour values, or a \code{"colourmap"} object. Examples of such functions are \code{\link{beachcolours}} and \code{\link{beachcolourmap}}. If \code{col} is a function with an argument named \code{inputs}, and if the pixel values of \code{x} are discrete values (integer, logical, factor or character), then the colour values will be determined by evaluating \code{col(inputs=p)} where \code{p} is the set of possible pixel values. The result should be a character vector containing colour values, or a \code{"colourmap"} object. } \item{ a \code{function} in the \R language with first argument named \code{n}. The colour values will be determined by evaluating \code{col(n)} where \code{n} is the number of distinct pixel values, up to a maximum of 128. The result of this evaluation should be a character vector containing color values. Examples of such functions are \code{\link[grDevices]{heat.colors}}, \code{\link[grDevices]{terrain.colors}}, \code{\link[grDevices]{topo.colors}} and \code{\link[grDevices]{cm.colors}}. } } If \code{spatstat.options("monochrome")} has been set to \code{TRUE} then \bold{all colours will be converted to grey scale values}. Other graphical parameters controlling the display of both the pixel image and the ribbon can be passed through the \code{...} arguments to the function \code{\link[graphics]{image.default}}. A parameter is handled only if it is one of the following: \itemize{ \item a formal argument of \code{\link[graphics]{image.default}} that is operative when \code{add=TRUE}. \item one of the parameters \code{"main", "asp", "sub", "axes", "ann", "cex", "font", "cex.axis", "cex.lab", "cex.main", "cex.sub", "col.axis", "col.lab", "col.main", "col.sub", "font.axis", "font.lab", "font.main", "font.sub"} described in \code{\link[graphics]{par}}. \item the argument \code{box}, a logical value specifying whether a box should be drawn. } Images are plotted using a bitmap raster if \code{useRaster=TRUE} or by drawing polygons if \code{useRaster=FALSE}. Bitmap raster display (performed by \code{\link[graphics]{rasterImage}}) tends to produce better results, but is not supported on all graphics devices. The default is to use bitmap raster display if it is supported according to \code{\link[grDevices]{dev.capabilities}}. Alternatively, the pixel values could be directly interpretable as colour values in \R. That is, the pixel values could be character strings that represent colours, or values of a factor whose levels are character strings representing colours. \itemize{ \item If \code{valuesAreColours=TRUE}, then the pixel values will be interpreted as colour values and displayed using these colours. \item If \code{valuesAreColours=FALSE}, then the pixel values will \emph{not} be interpreted as colour values, even if they could be. \item If \code{valuesAreColours=NULL}, the algorithm will guess what it should do. If the argument \code{col} is given, the pixel values will \emph{not} be interpreted as colour values. Otherwise, if all the pixel values are strings that represent colours, then they will be interpreted and displayed as colours. } If pixel values are interpreted as colours, the arguments \code{col} and \code{ribbon} will be ignored, and a ribbon will not be plotted. } \section{Complex-valued images}{ If the pixel values in \code{x} are complex numbers, they will be converted into four images containing the real and imaginary parts and the modulus and argument, and plotted side-by-side using \code{\link{plot.imlist}}. } \section{Monochrome colours}{ If \code{spatstat.options("monochrome")} has been set to \code{TRUE}, then \bold{the image will be plotted in greyscale}. The colours are converted to grey scale values using \code{\link{to.grey}}. The choice of colour map still has an effect, since it determines the final grey scale values. Monochrome display can also be achieved by setting the graphics device parameter \code{colormodel="grey"} when starting a new graphics device, or in a call to \code{\link{ps.options}} or \code{\link{pdf.options}}. } \section{Image Rendering Errors and Problems}{ The help for \code{\link[graphics]{image.default}} and \code{\link[graphics]{rasterImage}} explains that errors may occur, or images may be rendered incorrectly, on some devices, depending on the availability of colours and other device-specific constraints. If the image is not displayed at all, try setting \code{useRaster=FALSE} in the call to \code{plot.im}. If the ribbon colours are not displayed, set \code{ribargs=list(useRaster=FALSE)}. Errors may occur on some graphics devices if the image is very large. If this happens, try setting \code{useRaster=FALSE} in the call to \code{plot.im}. The error message \code{useRaster=TRUE can only be used with a regular grid} means that the \eqn{x} and \eqn{y} coordinates of the pixels in the image are not perfectly equally spaced, due to numerical rounding. This occurs with some images created by earlier versions of \pkg{spatstat}. To repair the coordinates in an image \code{X}, type \code{X <- as.im(X)}. } \section{Image is Displayed in Wrong Spatial Orientation}{ If the image is displayed in the wrong spatial orientation, and you created the image data directly, please check that you understand the \pkg{spatstat} convention for the spatial orientation of pixel images. The row index of the matrix of pixel values corresponds to the increasing \eqn{y} coordinate; the column index of the matrix corresponds to the increasing \eqn{x} coordinate (Baddeley, Rubak and Turner, 2015, section 3.6.3, pages 66--67). Images can be displayed in the wrong spatial orientation on some devices, due to a bug in the device driver. This occurs only when the plot coordinates are \emph{reversed}, that is, when the plot was initialised with coordinate limits \code{xlim, ylim} such that \code{xlim[1] > xlim[2]} or \code{ylim[1] > ylim[2]} or both. This bug is reported to occur only when \code{useRaster=TRUE}. To fix this, try setting \code{workaround=TRUE}, or if that is unsuccessful, \code{useRaster=FALSE}. } \seealso{ \code{\link{im.object}}, \code{\link{colourmap}}, \code{\link{contour.im}}, \code{\link{persp.im}}, \code{\link{hist.im}}, \code{\link[graphics]{image.default}}, \code{\link{spatstat.options}} } \examples{ # an image Z <- setcov(owin()) plot(Z) plot(Z, ribside="bottom") # stretchable colour map plot(Z, col=terrain.colors(128), axes=FALSE) # fixed colour map tc <- colourmap(rainbow(128), breaks=seq(-1,2,length=129)) plot(Z, col=tc) # colour map function, with argument 'range' plot(Z, col=beachcolours, colargs=list(sealevel=0.5)) # tweaking the plot plot(Z, main="La vie en bleu", col.main="blue", cex.main=1.5, box=FALSE, ribargs=list(col.axis="blue", col.ticks="blue", cex.axis=0.75)) # log scale V <- eval.im(exp(exp(Z+2))/1e4) plot(V, log=TRUE, main="Log scale") # it's complex Y <- exp(Z + V * 1i) plot(Y) } \references{ Baddeley, A., Rubak, E. and Turner, R. (2015) \emph{Spatial Point Patterns: Methodology and Applications with R}. Chapman and Hall/CRC Press. } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{hplot} spatstat/man/cdf.test.mppm.Rd0000644000176200001440000001771613160710621015652 0ustar liggesusers\name{cdf.test.mppm} \alias{cdf.test.mppm} \title{Spatial Distribution Test for Multiple Point Process Model} \description{ Performs a spatial distribution test of a point process model fitted to multiple spatial point patterns. The test compares the observed and predicted distributions of the values of a spatial covariate, using either the Kolmogorov-Smirnov, \ifelse{latex}{\out{Cram\'er}}{Cramer}-von Mises or Anderson-Darling test of goodness-of-fit. } \usage{ \method{cdf.test}{mppm}(model, covariate, test=c("ks", "cvm", "ad"), ..., nsim=19, verbose=TRUE, interpolate=FALSE, fast=TRUE, jitter=TRUE) } \arguments{ \item{model}{ An object of class \code{"mppm"} representing a point process model fitted to multiple spatial point patterns. } \item{covariate}{ The spatial covariate on which the test will be based. A function, a pixel image, a list of functions, a list of pixel images, a hyperframe, a character string containing the name of one of the covariates in \code{model}, or one of the strings \code{"x"} or \code{"y"}. } \item{test}{ Character string identifying the test to be performed: \code{"ks"} for Kolmogorov-Smirnov test, \code{"cvm"} for \ifelse{latex}{\out{Cram\'er}}{Cramer}-von Mises test or \code{"ad"} for Anderson-Darling test. } \item{\dots}{ Arguments passed to \code{\link{cdf.test}} to control the test. } \item{nsim}{ Number of simulated realisations which should be generated, if a Monte Carlo test is required. } \item{verbose}{Logical flag indicating whether to print progress reports. } \item{interpolate}{ Logical flag indicating whether to interpolate between pixel values when code{covariate} is a pixel image. See \emph{Details}. } \item{fast}{ Logical flag. If \code{TRUE}, values of the covariate are only sampled at the original quadrature points used to fit the model. If \code{FALSE}, values of the covariate are sampled at all pixels, which can be slower by three orders of magnitude. } \item{jitter}{ Logical flag. If \code{TRUE}, observed values of the covariate are perturbed by adding small random values, to avoid tied observations. } } \details{ This function is a method for the generic function \code{\link{cdf.test}} for the class \code{mppm}. This function performs a goodness-of-fit test of a point process model that has been fitted to multiple point patterns. The observed distribution of the values of a spatial covariate at the data points, and the predicted distribution of the same values under the model, are compared using the Kolmogorov-Smirnov, \ifelse{latex}{\out{Cram\'er}}{Cramer}-von Mises or Anderson-Darling test of goodness-of-fit. These are exact tests if the model is Poisson; otherwise, for a Gibbs model, a Monte Carlo p-value is computed by generating simulated realisations of the model and applying the selected goodness-of-fit test to each simulation. The argument \code{model} should be a fitted point process model fitted to multiple point patterns (object of class \code{"mppm"}). The argument \code{covariate} contains the values of a spatial function. It can be \itemize{ \item a \code{function(x,y)} \item a pixel image (object of class \code{"im"} \item a list of \code{function(x,y)}, one for each point pattern \item a list of pixel images, one for each point pattern \item a hyperframe (see \code{\link{hyperframe}}) of which the first column will be taken as containing the covariate \item a character string giving the name of one of the covariates in \code{model} \item one of the character strings \code{"x"} or \code{"y"}, indicating the spatial coordinates. } If \code{covariate} is an image, it should have numeric values, and its domain should cover the observation window of the \code{model}. If \code{covariate} is a function, it should expect two arguments \code{x} and \code{y} which are vectors of coordinates, and it should return a numeric vector of the same length as \code{x} and \code{y}. First the original data point pattern is extracted from \code{model}. The values of the \code{covariate} at these data points are collected. The predicted distribution of the values of the \code{covariate} under the fitted \code{model} is computed as follows. The values of the \code{covariate} at all locations in the observation window are evaluated, weighted according to the point process intensity of the fitted model, and compiled into a cumulative distribution function \eqn{F} using \code{\link{ewcdf}}. The probability integral transformation is then applied: the values of the \code{covariate} at the original data points are transformed by the predicted cumulative distribution function \eqn{F} into numbers between 0 and 1. If the model is correct, these numbers are i.i.d. uniform random numbers. A goodness-of-fit test of the uniform distribution is applied to these numbers using \code{\link[stats]{ks.test}}, \code{\link[goftest]{cvm.test}} or \code{\link[goftest]{ad.test}}. The argument \code{interpolate} determines how pixel values will be handled when code{covariate} is a pixel image. The value of the covariate at a data point is obtained by looking up the value of the nearest pixel if \code{interpolate=FALSE}, or by linearly interpolating between the values of the four nearest pixels if \code{interpolate=TRUE}. Linear interpolation is slower, but is sometimes necessary to avoid tied values of the covariate arising when the pixel grid is coarse. If \code{model} is a Poisson point process, then the Kolmogorov-Smirnov, \ifelse{latex}{\out{Cram\'er}}{Cramer}-von Mises and Anderson-Darling tests are theoretically exact. This test was apparently first described (in the context of spatial data, and for Kolmogorov-Smirnov) by Berman (1986). See also Baddeley et al (2005). If \code{model} is not a Poisson point process, then the Kolmogorov-Smirnov, \ifelse{latex}{\out{Cram\'er}}{Cramer}-von Mises and Anderson-Darling tests are biased. Instead they are used as the basis of a Monte Carlo test. First \code{nsim} simulated realisations of the model will be generated. Each simulated realisation consists of a list of simulated point patterns, one for each of the original data patterns. This can take a very long time. The model is then re-fitted to each simulation, and the refitted model is subjected to the goodness-of-fit test described above. A Monte Carlo p-value is then computed by comparing the p-value of the original test with the p-values obtained from the simulations. } \value{ An object of class \code{"cdftest"} and \code{"htest"} containing the results of the test. See \code{\link{cdf.test}} for details. } \seealso{ \code{\link{cdf.test}}, \code{\link{quadrat.test}}, \code{\link{mppm}} } \references{ Baddeley, A., Rubak, E. and Turner, R. (2015) \emph{Spatial Point Patterns: Methodology and Applications with R}. London: Chapman and Hall/CRC Press. Baddeley, A., Turner, R., Moller, J. and Hazelton, M. (2005) Residual analysis for spatial point processes. \emph{Journal of the Royal Statistical Society, Series B} \bold{67}, 617--666. Berman, M. (1986) Testing for spatial association between a point process and another stochastic process. \emph{Applied Statistics} \bold{35}, 54--62. } \author{ \adrian, Ida-Maria Sintorn and Leanne Bischoff. Implemented by \spatstatAuthors. } \examples{ # three i.i.d. realisations of nonuniform Poisson process lambda <- as.im(function(x,y) { 300 * exp(x) }, square(1)) dat <- hyperframe(X=list(rpoispp(lambda), rpoispp(lambda), rpoispp(lambda))) # fit uniform Poisson process fit0 <- mppm(X~1, dat) # fit correct nonuniform Poisson process fit1 <- mppm(X~x, dat) # test wrong model cdf.test(fit0, "x") # test right model cdf.test(fit1, "x") } \keyword{htest} \keyword{spatial} spatstat/man/matrixpower.Rd0000644000176200001440000000315113160710621015535 0ustar liggesusers\name{matrixpower} \alias{matrixpower} \alias{matrixsqrt} \alias{matrixinvsqrt} \title{ Power of a Matrix } \description{ Evaluate a specified power of a matrix. } \usage{ matrixpower(x, power, complexOK = TRUE) matrixsqrt(x, complexOK = TRUE) matrixinvsqrt(x, complexOK = TRUE) } \arguments{ \item{x}{ A square matrix containing numeric or complex values. } \item{power}{ A numeric value giving the power (exponent) to which \code{x} should be raised. } \item{complexOK}{ Logical value indicating whether the result is allowed to be complex. } } \details{ These functions raise the matrix \code{x} to the desired power: \code{matrixsqrt} takes the square root, \code{matrixinvsqrt} takes the inverse square root, and \code{matrixpower} takes the specified power of \code{x}. Up to numerical error, \code{matrixpower(x, 2)} should be equivalent to \code{x \%*\% x}, and \code{matrixpower(x, -1)} should be equivalent to \code{solve(x)}, the inverse of \code{x}. The square root \code{y <- matrixsqrt(x)} should satisfy \code{y \%*\% y = x}. The inverse square root \code{z <- matrixinvsqrt(x)} should satisfy \code{z \%*\% z = solve(x)}. Computations are performed using the eigen decomposition (\code{\link{eigen}}). } \value{ A matrix of the same size as \code{x} containing numeric or complex values. } \author{ \adrian. } \seealso{ \code{\link[base]{eigen}}, \code{\link[base]{svd}} } \examples{ x <- matrix(c(10,2,2,1), 2, 2) y <- matrixsqrt(x) y y \%*\% y z <- matrixinvsqrt(x) z \%*\% y matrixpower(x, 0.1) } \keyword{algebra} \keyword{array} spatstat/man/rshift.ppp.Rd0000644000176200001440000001606113160710621015255 0ustar liggesusers\name{rshift.ppp} \alias{rshift.ppp} \title{Randomly Shift a Point Pattern} \description{ Randomly shifts the points of a point pattern. } \usage{ \method{rshift}{ppp}(X, \dots, which=NULL, group) } \arguments{ \item{X}{Point pattern to be subjected to a random shift. An object of class \code{"ppp"} } \item{\dots}{ Arguments that determine the random shift. See Details. } \item{group}{ Optional. Factor specifying a grouping of the points of \code{X}, or \code{NULL} indicating that all points belong to the same group. Each group will be shifted together, and separately from other groups. By default, points in a marked point pattern are grouped according to their mark values, while points in an unmarked point pattern are treated as a single group. } \item{which}{ Optional. Identifies which groups of the pattern will be shifted, while other groups are not shifted. A vector of levels of \code{group}. } } \value{ A point pattern (object of class \code{"ppp"}). } \details{ This operation randomly shifts the locations of the points in a point pattern. The function \code{rshift} is generic. This function \code{rshift.ppp} is the method for point patterns. The most common use of this function is to shift the points in a multitype point pattern. By default, points of the same type are shifted in parallel (i.e. points of a common type are shifted by a common displacement vector), and independently of other types. This is useful for testing the hypothesis of independence of types (the null hypothesis that the sub-patterns of points of each type are independent point processes). In general the points of \code{X} are divided into groups, then the points within a group are shifted by a common random displacement vector. Different groups of points are shifted independently. The grouping is determined as follows: \itemize{ \item If the argument \code{group} is present, then this determines the grouping. \item Otherwise, if \code{X} is a multitype point pattern, the marks determine the grouping. \item Otherwise, all points belong to a single group. } The argument \code{group} should be a factor, of length equal to the number of points in \code{X}. Alternatively \code{group} may be \code{NULL}, which specifies that all points of \code{X} belong to a single group. By default, every group of points will be shifted. The argument \code{which} indicates that only some of the groups should be shifted, while other groups should be left unchanged. \code{which} must be a vector of levels of \code{group} (for example, a vector of types in a multitype pattern) indicating which groups are to be shifted. The displacement vector, i.e. the vector by which the data points are shifted, is generated at random. Parameters that control the randomisation and the handling of edge effects are passed through the \code{\dots} argument. They are \describe{ \item{radius,width,height}{ Parameters of the random shift vector. } \item{edge}{ String indicating how to deal with edges of the pattern. Options are \code{"torus"}, \code{"erode"} and \code{"none"}. } \item{clip}{ Optional. Window to which the final point pattern should be clipped. } } If the window is a rectangle, the \emph{default} behaviour is to generate a displacement vector at random with equal probability for all possible displacements. This means that the \eqn{x} and \eqn{y} coordinates of the displacement vector are independent random variables, uniformly distributed over the range of possible coordinates. Alternatively, the displacement vector can be generated by another random mechanism, controlled by the arguments \code{radius}, \code{width} and \code{height}. \describe{ \item{rectangular:}{ if \code{width} and \code{height} are given, then the displacement vector is uniformly distributed in a rectangle of these dimensions, centred at the origin. The maximum possible displacement in the \eqn{x} direction is \code{width/2}. The maximum possible displacement in the \eqn{y} direction is \code{height/2}. The \eqn{x} and \eqn{y} displacements are independent. (If \code{width} and \code{height} are actually equal to the dimensions of the observation window, then this is equivalent to the default.) } \item{radial:}{ if \code{radius} is given, then the displacement vector is generated by choosing a random point inside a disc of the given radius, centred at the origin, with uniform probability density over the disc. Thus the argument \code{radius} determines the maximum possible displacement distance. The argument \code{radius} is incompatible with the arguments \code{width} and \code{height}. } } The argument \code{edge} controls what happens when a shifted point lies outside the window of \code{X}. Options are: \describe{ \item{"none":}{ Points shifted outside the window of \code{X} simply disappear. } \item{"torus":}{ Toroidal or periodic boundary. Treat opposite edges of the window as identical, so that a point which disappears off the right-hand edge will re-appear at the left-hand edge. This is called a ``toroidal shift'' because it makes the rectangle topologically equivalent to the surface of a torus (doughnut). The window must be a rectangle. Toroidal shifts are undefined if the window is non-rectangular. } \item{"erode":}{ Clip the point pattern to a smaller window. If the random displacements are generated by a radial mechanism (see above), then the window of \code{X} is eroded by a distance equal to the value of the argument \code{radius}, using \code{\link{erosion}}. If the random displacements are generated by a rectangular mechanism, then the window of \code{X} is (if it is not rectangular) eroded by a distance \code{max(height,width)} using \code{\link{erosion}}; or (if it is rectangular) trimmed by a margin of width \code{width} at the left and right sides and trimmed by a margin of height \code{height} at the top and bottom. The rationale for this is that the clipping window is the largest window for which edge effects can be ignored. } } The optional argument \code{clip} specifies a smaller window to which the pattern should be restricted. } \seealso{ \code{\link{rshift}}, \code{\link{rshift.psp}} } \examples{ data(amacrine) # random toroidal shift # shift "on" and "off" points separately X <- rshift(amacrine) # shift "on" points and leave "off" points fixed X <- rshift(amacrine, which="on") # shift all points simultaneously X <- rshift(amacrine, group=NULL) # maximum displacement distance 0.1 units X <- rshift(amacrine, radius=0.1) # shift with erosion X <- rshift(amacrine, radius=0.1, edge="erode") } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat/man/reach.Rd0000644000176200001440000001125013160710621014235 0ustar liggesusers\name{reach} \alias{reach} \alias{reach.ppm} \alias{reach.interact} \alias{reach.fii} \alias{reach.rmhmodel} \title{Interaction Distance of a Point Process} \description{ Computes the interaction distance of a point process. } \usage{ reach(x, \dots) \method{reach}{ppm}(x, \dots, epsilon=0) \method{reach}{interact}(x, \dots) \method{reach}{rmhmodel}(x, \dots) \method{reach}{fii}(x, \dots, epsilon) } \arguments{ \item{x}{Either a fitted point process model (object of class \code{"ppm"}), an interpoint interaction (object of class \code{"interact"}), a fitted interpoint interaction (object of class \code{"fii"}) or a point process model for simulation (object of class \code{"rmhmodel"}). } \item{epsilon}{ Numerical threshold below which interaction is treated as zero. See details. } \item{\dots}{ Other arguments are ignored. } } \value{ The interaction distance, or \code{NA} if this cannot be computed from the information given. } \details{ The `interaction distance' or `interaction range' of a point process model is the smallest distance \eqn{D} such that any two points in the process which are separated by a distance greater than \eqn{D} do not interact with each other. For example, the interaction range of a Strauss process (see \code{\link{Strauss}}) with parameters \eqn{\beta,\gamma,r}{beta,gamma,r} is equal to \eqn{r}, unless \eqn{\gamma=1}{gamma=1} in which case the model is Poisson and the interaction range is \eqn{0}. The interaction range of a Poisson process is zero. The interaction range of the Ord threshold process (see \code{\link{OrdThresh}}) is infinite, since two points \emph{may} interact at any distance apart. The function \code{reach(x)} is generic, with methods for the case where \code{x} is \itemize{ \item a fitted point process model (object of class \code{"ppm"}, usually obtained from the model-fitting function \code{\link{ppm}}); \item an interpoint interaction structure (object of class \code{"interact"}), created by one of the functions \code{\link{Poisson}}, \code{\link{Strauss}}, \code{\link{StraussHard}}, \code{\link{MultiStrauss}}, \code{\link{MultiStraussHard}}, \code{\link{Softcore}}, \code{\link{DiggleGratton}}, \code{\link{Pairwise}}, \code{\link{PairPiece}}, \code{\link{Geyer}}, \code{\link{LennardJones}}, \code{\link{Saturated}}, \code{\link{OrdThresh}} or \code{\link{Ord}}; \item a fitted interpoint interaction (object of class \code{"fii"}) extracted from a fitted point process model by the command \code{\link{fitin}}; \item a point process model for simulation (object of class \code{"rmhmodel"}), usually obtained from \code{\link{rmhmodel}}. } When \code{x} is an \code{"interact"} object, \code{reach(x)} returns the maximum possible interaction range for any point process model with interaction structure given by \code{x}. For example, \code{reach(Strauss(0.2))} returns \code{0.2}. When \code{x} is a \code{"ppm"} object, \code{reach(x)} returns the interaction range for the point process model represented by \code{x}. For example, a fitted Strauss process model with parameters \code{beta,gamma,r} will return either \code{0} or \code{r}, depending on whether the fitted interaction parameter \code{gamma} is equal or not equal to 1. For some point process models, such as the soft core process (see \code{\link{Softcore}}), the interaction distance is infinite, because the interaction terms are positive for all pairs of points. A practical solution is to compute the distance at which the interaction contribution from a pair of points falls below a threshold \code{epsilon}, on the scale of the log conditional intensity. This is done by setting the argument \code{epsilon} to a positive value. } \seealso{ \code{\link{ppm}}, \code{\link{Poisson}}, \code{\link{Strauss}}, \code{\link{StraussHard}}, \code{\link{MultiStrauss}}, \code{\link{MultiStraussHard}}, \code{\link{Softcore}}, \code{\link{DiggleGratton}}, \code{\link{Pairwise}}, \code{\link{PairPiece}}, \code{\link{Geyer}}, \code{\link{LennardJones}}, \code{\link{Saturated}}, \code{\link{OrdThresh}}, \code{\link{Ord}}, \code{\link{rmhmodel}} } \examples{ reach(Poisson()) # returns 0 reach(Strauss(r=7)) # returns 7 fit <- ppm(swedishpines ~ 1, Strauss(r=7)) reach(fit) # returns 7 reach(OrdThresh(42)) # returns Inf reach(MultiStrauss(matrix(c(1,3,3,1),2,2))) # returns 3 } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} spatstat/man/integral.im.Rd0000644000176200001440000000413313160710621015366 0ustar liggesusers\name{integral.im} \alias{integral} \alias{integral.im} \title{ Integral of a Pixel Image } \description{ Computes the integral of a pixel image. } \usage{ integral(f, domain=NULL, \dots) \method{integral}{im}(f, domain=NULL, \dots) } \arguments{ \item{f}{ A pixel image (object of class \code{"im"}) with pixel values that can be treated as numeric or complex values. } \item{domain}{ Optional. Window specifying the domain of integration. Alternatively a tessellation. } \item{\dots}{ Ignored. } } \details{ The function \code{integral} is generic, with methods for \code{"im"}, \code{"msr"}, \code{"linim"} and \code{"linfun"}. The method \code{integral.im} treats the pixel image \code{f} as a function of the spatial coordinates, and computes its integral. The integral is calculated by summing the pixel values and multiplying by the area of one pixel. The pixel values of \code{f} may be numeric, integer, logical or complex. They cannot be factor or character values. The logical values \code{TRUE} and \code{FALSE} are converted to \code{1} and \code{0} respectively, so that the integral of a logical image is the total area of the \code{TRUE} pixels, in the same units as \code{unitname(x)}. If \code{domain} is a window (class \code{"owin"}) then the integration will be restricted to this window. If \code{domain} is a tessellation (class \code{"tess"}) then the integral of \code{f} in each tile of \code{domain} will be computed. } \value{ A single numeric or complex value (or a vector of such values if \code{domain} is a tessellation). } \seealso{ \code{\link{eval.im}}, \code{\link{[.im}} } \examples{ # approximate integral of f(x,y) dx dy f <- function(x,y){3*x^2 + 2*y} Z <- as.im(f, square(1)) integral.im(Z) # correct answer is 2 D <- density(cells) integral.im(D) # should be approximately equal to number of points = 42 # integrate over the subset [0.1,0.9] x [0.2,0.8] W <- owin(c(0.1,0.9), c(0.2,0.8)) integral.im(D, W) } \author{ \adrian \rolf and \ege } \keyword{spatial} \keyword{math} spatstat/man/beachcolours.Rd0000644000176200001440000000546313160710571015641 0ustar liggesusers\name{beachcolours} \alias{beachcolours} \alias{beachcolourmap} \title{ Create Colour Scheme for a Range of Numbers } \description{ Given a range of numerical values, this command creates a colour scheme that would be appropriate if the numbers were altitudes (elevation above or below sea level). } \usage{ beachcolours(range, sealevel = 0, monochrome = FALSE, ncolours = if (monochrome) 16 else 64, nbeach = 1) beachcolourmap(range, ...) } \arguments{ \item{range}{ Range of numerical values to be mapped. A numeric vector of length 2. } \item{sealevel}{ Value that should be treated as zero. A single number, lying between \code{range[1]} and \code{range[2]}. } \item{monochrome}{ Logical. If \code{TRUE} then a greyscale colour map is constructed. } \item{ncolours}{ Number of distinct colours to use. } \item{nbeach}{ Number of colours that will be yellow. } \item{\dots}{Arguments passed to \code{beachcolours}.} } \details{ Given a range of numerical values, these commands create a colour scheme that would be appropriate if the numbers were altitudes (elevation above or below sea level). Numerical values close to zero are portrayed in green (representing the waterline). Negative values are blue (representing water) and positive values are yellow to red (representing land). At least, these are the colours of land and sea in Western Australia. This colour scheme was proposed by Baddeley et al (2005). The function \code{beachcolours} returns these colours as a character vector, while \code{beachcolourmap} returns a colourmap object. The argument \code{range} should be a numeric vector of length 2 giving a range of numerical values. The argument \code{sealevel} specifies the height value that will be treated as zero, and mapped to the colour green. A vector of \code{ncolours} colours will be created, of which \code{nbeach} colours will be green. The argument \code{monochrome} is included for convenience when preparing publications. If \code{monochrome=TRUE} the colour map will be a simple grey scale containing \code{ncolours} shades from black to white. } \value{ For \code{beachcolours}, a character vector of length \code{ncolours} specifying colour values. For \code{beachcolourmap}, a colour map (object of class \code{"colourmap"}). } \references{ Baddeley, A., Turner, R., \ifelse{latex}{\out{M\o ller}}{Moller}, J. and Hazelton, M. (2005) Residual analysis for spatial point processes. \emph{Journal of the Royal Statistical Society, Series B} \bold{67}, 617--666. } \seealso{ \code{\link{colourmap}}, \code{\link[spatstat:colourtools]{colourtools}}. } \examples{ plot(beachcolourmap(c(-2,2))) } \author{\adrian and \rolf } \keyword{spatial} \keyword{color} spatstat/man/OrdThresh.Rd0000644000176200001440000000336313160710571015067 0ustar liggesusers\name{OrdThresh} \alias{OrdThresh} \title{Ord's Interaction model} \description{ Creates an instance of Ord's point process model which can then be fitted to point pattern data. } \usage{ OrdThresh(r) } \arguments{ \item{r}{Positive number giving the threshold value for Ord's model.} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of a point process. } \details{ Ord's point process model (Ord, 1977) is a Gibbs point process of infinite order. Each point \eqn{x_i}{x[i]} in the point pattern \eqn{x} contributes a factor \eqn{g(a_i)}{g(a[i])} where \eqn{a_i = a(x_i, x)}{a[i] = a(x[i], x)} is the area of the tile associated with \eqn{x_i}{x[i]} in the Dirichlet tessellation of \eqn{x}. The function \eqn{g} is simply \eqn{g(a) = 1} if \eqn{a \ge r}{a >= r} and \eqn{g(a) = \gamma < 1}{g(a) = gamma < 1} if \eqn{a < r}{a < r}, where \eqn{r} is called the threshold value. This function creates an instance of Ord's model with a given value of \eqn{r}. It can then be fitted to point process data using \code{\link{ppm}}. } \seealso{ \code{\link{ppm}}, \code{\link{ppm.object}} } \references{ Baddeley, A. and Turner, R. (2000) Practical maximum pseudolikelihood for spatial point patterns. \emph{Australian and New Zealand Journal of Statistics} \bold{42}, 283--322. Ord, J.K. (1977) Contribution to the discussion of Ripley (1977). Ord, J.K. (1978) How many trees in a forest? \emph{Mathematical Scientist} \bold{3}, 23--33. Ripley, B.D. (1977) Modelling spatial patterns (with discussion). \emph{Journal of the Royal Statistical Society, Series B}, \bold{39}, 172 -- 212. } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} spatstat/man/crossdist.ppp.Rd0000644000176200001440000000474213160710571016002 0ustar liggesusers\name{crossdist.ppp} \alias{crossdist.ppp} \title{Pairwise distances between two different point patterns} \description{ Computes the distances between pairs of points taken from two different point patterns. } \usage{ \method{crossdist}{ppp}(X, Y, \dots, periodic=FALSE, method="C", squared=FALSE) } \arguments{ \item{X,Y}{ Point patterns (objects of class \code{"ppp"}). } \item{\dots}{ Ignored. } \item{periodic}{ Logical. Specifies whether to apply a periodic edge correction. } \item{method}{ String specifying which method of calculation to use. Values are \code{"C"} and \code{"interpreted"}. } \item{squared}{ Logical. If \code{squared=TRUE}, the squared distances are returned instead (this computation is faster). } } \value{ A matrix whose \code{[i,j]} entry is the distance from the \code{i}-th point in \code{X} to the \code{j}-th point in \code{Y}. } \details{ Given two point patterns, this function computes the Euclidean distance from each point in the first pattern to each point in the second pattern, and returns a matrix containing these distances. This is a method for the generic function \code{\link{crossdist}} for point patterns (objects of class \code{"ppp"}). This function expects two point patterns \code{X} and \code{Y}, and returns the matrix whose \code{[i,j]} entry is the distance from \code{X[i]} to \code{Y[j]}. Alternatively if \code{periodic=TRUE}, then provided the windows containing \code{X} and \code{Y} are identical and are rectangular, then the distances will be computed in the `periodic' sense (also known as `torus' distance): opposite edges of the rectangle are regarded as equivalent. This is meaningless if the window is not a rectangle. The argument \code{method} is not normally used. It is retained only for checking the validity of the software. If \code{method = "interpreted"} then the distances are computed using interpreted R code only. If \code{method="C"} (the default) then C code is used. The C code is faster by a factor of 4. } \seealso{ \code{\link{crossdist}}, \code{\link{crossdist.default}}, \code{\link{crossdist.psp}}, \code{\link{pairdist}}, \code{\link{nndist}}, \code{\link{Gest}} } \examples{ data(cells) d <- crossdist(cells, runifpoint(6)) d <- crossdist(cells, runifpoint(6), periodic=TRUE) } \author{Pavel Grabarnik \email{pavel.grabar@issp.serpukhov.su} and \adrian } \keyword{spatial} \keyword{math} spatstat/man/anyNA.im.Rd0000644000176200001440000000167313160710571014601 0ustar liggesusers\name{anyNA.im} \alias{anyNA.im} \title{ Check Whether Image Contains NA Values } \description{ Checks whether any pixel values in a pixel image are \code{NA} (meaning that the pixel lies outside the domain of definition of the image). } \usage{ \method{anyNA}{im}(x, recursive = FALSE) } \arguments{ \item{x}{ A pixel image (object of class \code{"im"}). } \item{recursive}{ Ignored. } } \details{ The function \code{\link{anyNA}} is generic: \code{anyNA(x)} is a faster alternative to \code{any(is.na(x))}. This function \code{anyNA.im} is a method for the generic \code{anyNA} defined for pixel images. It returns the value \code{TRUE} if any of the pixel values in \code{x} are \code{NA}, and and otherwise returns \code{FALSE}. } \value{ A single logical value. } \author{ \spatstatAuthors. } \seealso{ \code{\link{im.object}} } \examples{ anyNA(as.im(letterR)) } \keyword{spatial} \keyword{methods} spatstat/man/Hybrid.Rd0000644000176200001440000000634413160710571014410 0ustar liggesusers\name{Hybrid} \alias{Hybrid} \title{ Hybrid Interaction Point Process Model } \description{ Creates an instance of a hybrid point process model which can then be fitted to point pattern data. } \usage{ Hybrid(...) } \arguments{ \item{\dots}{ Two or more interactions (objects of class \code{"interact"}) or objects which can be converted to interactions. See Details. } } \details{ A \emph{hybrid} (Baddeley, Turner, Mateu and Bevan, 2013) is a point process model created by combining two or more point process models, or an interpoint interaction created by combining two or more interpoint interactions. The \emph{hybrid} of two point processes, with probability densities \eqn{f(x)} and \eqn{g(x)} respectively, is the point process with probability density \deqn{h(x) = c \, f(x) \, g(x)}{h(x) = c * f(x) * g(x)} where \eqn{c} is a normalising constant. Equivalently, the hybrid of two point processes with conditional intensities \eqn{\lambda(u,x)}{lambda(u,x)} and \eqn{\kappa(u,x)}{kappa(u,x)} is the point process with conditional intensity \deqn{ \phi(u,x) = \lambda(u,x) \, \kappa(u,x). }{ phi(u,x) = lambda(u,x) * kappa(u,x). } The hybrid of \eqn{m > 3} point processes is defined in a similar way. The function \code{\link{ppm}}, which fits point process models to point pattern data, requires an argument of class \code{"interact"} describing the interpoint interaction structure of the model to be fitted. The appropriate description of a hybrid interaction is yielded by the function \code{Hybrid()}. The arguments \code{\dots} will be interpreted as interpoint interactions (objects of class \code{"interact"}) and the result will be the hybrid of these interactions. Each argument must either be an interpoint interaction (object of class \code{"interact"}), or a point process model (object of class \code{"ppm"}) from which the interpoint interaction will be extracted. The arguments \code{\dots} may also be given in the form \code{name=value}. This is purely cosmetic: it can be used to attach simple mnemonic names to the component interactions, and makes the printed output from \code{\link{print.ppm}} neater. } \value{ An object of class \code{"interact"} describing an interpoint interaction structure. } \references{ Baddeley, A., Turner, R., Mateu, J. and Bevan, A. (2013) Hybrids of Gibbs point process models and their implementation. \emph{Journal of Statistical Software} \bold{55}:11, 1--43. \url{http://www.jstatsoft.org/v55/i11/} } \seealso{ \code{\link{ppm}} } \examples{ Hybrid(Strauss(0.1), Geyer(0.2, 3)) Hybrid(Ha=Hardcore(0.05), St=Strauss(0.1), Ge=Geyer(0.2, 3)) fit <- ppm(redwood, ~1, Hybrid(A=Strauss(0.02), B=Geyer(0.1, 2))) fit ctr <- rmhcontrol(nrep=5e4, expand=1) plot(simulate(fit, control=ctr)) # hybrid components can be models (including hybrid models) Hybrid(fit, S=Softcore(0.5)) # plot.fii only works if every component is a pairwise interaction data(swedishpines) fit2 <- ppm(swedishpines, ~1, Hybrid(DG=DiggleGratton(2,10), S=Strauss(5))) plot(fitin(fit2)) plot(fitin(fit2), separate=TRUE, mar.panel=rep(4,4)) } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} spatstat/man/plot.quadratcount.Rd0000644000176200001440000000441013160710621016642 0ustar liggesusers\name{plot.quadratcount} \alias{plot.quadratcount} \title{ Plot Quadrat Counts } \description{ Given a table of quadrat counts for a spatial point pattern, plot the quadrats which were used, and display the quadrat count as text in the centre of each quadrat. } \usage{ \method{plot}{quadratcount}(x, \dots, add = FALSE, entries = as.vector(t(as.table(x))), dx = 0, dy = 0, show.tiles = TRUE, textargs = list()) } \arguments{ \item{x}{ Object of class \code{"quadratcount"} produced by the function \code{\link{quadratcount}}. } \item{\dots}{ Additional arguments passed to \code{\link{plot.tess}} to plot the quadrats. } \item{add}{ Logical. Whether to add the graphics to an existing plot. } \item{entries}{ Vector of numbers to be plotted in each quadrat. The default is to plot the quadrat counts. } \item{dx,dy}{ Horizontal and vertical displacement of text relative to centroid of quadrat. } \item{show.tiles}{ Logical value indicating whether to plot the quadrats. } \item{textargs}{ List containing extra arguments passed to \code{\link[graphics]{text.default}} to control the annotation. } } \details{ This is the plot method for the objects of class \code{"quadratcount"} that are produced by the function \code{\link{quadratcount}}. Given a spatial point pattern, \code{\link{quadratcount}} divides the observation window into disjoint tiles or quadrats, counts the number of points in each quadrat, and stores the result as a contingency table which also belongs to the class \code{"quadratcount"}. First the quadrats are plotted (provided \code{show.tiles=TRUE}, the default). This display can be controlled by passing additional arguments \code{\dots} to \code{\link{plot.tess}}. Then the quadrat counts are printed using \code{\link[graphics]{text.default}}. This display can be controlled using the arguments \code{dx,dy} and \code{textargs}. } \value{ Null. } \seealso{ \code{\link{quadratcount}}, \code{\link{plot.tess}}, \code{\link[graphics]{text.default}}, \code{\link{plot.quadrattest}}. } \examples{ plot(quadratcount(swedishpines, 5)) } \author{\adrian and \rolf } \keyword{spatial} \keyword{hplot} spatstat/man/padimage.Rd0000644000176200001440000000320513160710621014723 0ustar liggesusers\name{padimage} \alias{padimage} \title{ Pad the Border of a Pixel Image } \description{ Fills the border of a pixel image with a given value or values, or extends a pixel image to fill a larger window. } \usage{ padimage(X, value=NA, n=1, W=NULL) } \arguments{ \item{X}{ Pixel image (object of class \code{"im"}). } \item{value}{ Single value to be placed around the border of \code{X}. } \item{n}{ Width of border, in pixels. See Details. } \item{W}{ Window for the resulting image. Incompatible with \code{n}. } } \details{ The image \code{X} will be expanded by a margin of \code{n} pixels, or extended to fill the window \code{W}, with new pixel values set to \code{value}. The argument \code{value} should be a single value (a vector of length 1), normally a value of the same type as the pixel values of \code{X}. It may be \code{NA}. Alternatively if \code{X} is a factor-valued image, \code{value} can be one of the levels of \code{X}. If \code{n} is given, it may be a single number, specifying the width of the border in pixels. Alternatively it may be a vector of length 2 or 4. It will be replicated to length 4, and these numbers will be interpreted as the border widths for the (left, right, top, bottom) margins respectively. Alternatively if \code{W} is given, the image will be extended to the window \code{W}. } \value{ Another object of class \code{"im"}, of the same type as \code{X}. } \author{\adrian \rolf and \ege } \seealso{ \code{\link{im}} } \examples{ Z <- setcov(owin()) plot(padimage(Z, 1, 10)) } \keyword{spatial} \keyword{manip} spatstat/man/affine.im.Rd0000644000176200001440000000272713160710571015024 0ustar liggesusers\name{affine.im} \alias{affine.im} \title{Apply Affine Transformation To Pixel Image} \description{ Applies any affine transformation of the plane (linear transformation plus vector shift) to a pixel image. } \usage{ \method{affine}{im}(X, mat=diag(c(1,1)), vec=c(0,0), \dots) } \arguments{ \item{X}{Pixel image (object of class \code{"im"}).} \item{mat}{Matrix representing a linear transformation.} \item{vec}{Vector of length 2 representing a translation.} \item{\dots}{ Optional arguments passed to \code{\link{as.mask}} controlling the pixel resolution of the transformed image. } } \value{ Another pixel image (of class \code{"im"}) representing the result of applying the affine transformation. } \details{ The image is subjected first to the linear transformation represented by \code{mat} (multiplying on the left by \code{mat}), and then the result is translated by the vector \code{vec}. The argument \code{mat} must be a nonsingular \eqn{2 \times 2}{2 * 2} matrix. This is a method for the generic function \code{\link{affine}}. } \seealso{ \code{\link{affine}}, \code{\link{affine.ppp}}, \code{\link{affine.psp}}, \code{\link{affine.owin}}, \code{\link{rotate}}, \code{\link{shift}} } \examples{ X <- setcov(owin()) stretch <- diag(c(2,3)) Y <- affine(X, mat=stretch) shear <- matrix(c(1,0,0.6,1),ncol=2, nrow=2) Z <- affine(X, mat=shear) } \author{\adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/localpcf.Rd0000644000176200001440000001272613160710621014747 0ustar liggesusers\name{localpcf} \alias{localpcf} \alias{localpcfinhom} \title{Local pair correlation function} \description{ Computes individual contributions to the pair correlation function from each data point. } \usage{ localpcf(X, ..., delta=NULL, rmax=NULL, nr=512, stoyan=0.15) localpcfinhom(X, ..., delta=NULL, rmax=NULL, nr=512, stoyan=0.15, lambda=NULL, sigma=NULL, varcov=NULL) } \arguments{ \item{X}{A point pattern (object of class \code{"ppp"}).} \item{delta}{ Smoothing bandwidth for pair correlation. The halfwidth of the Epanechnikov kernel. } \item{rmax}{ Optional. Maximum value of distance \eqn{r} for which pair correlation values \eqn{g(r)} should be computed. } \item{nr}{ Optional. Number of values of distance \eqn{r} for which pair correlation \eqn{g(r)} should be computed. } \item{stoyan}{ Optional. The value of the constant \eqn{c} in Stoyan's rule of thumb for selecting the smoothing bandwidth \code{delta}. } \item{lambda}{ Optional. Values of the estimated intensity function, for the inhomogeneous pair correlation. Either a vector giving the intensity values at the points of the pattern \code{X}, a pixel image (object of class \code{"im"}) giving the intensity values at all locations, a fitted point process model (object of class \code{"ppm"}) or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. } \item{sigma,varcov,\dots}{ These arguments are ignored by \code{localpcf} but are passed by \code{localpcfinhom} (when \code{lambda=NULL}) to the function \code{\link{density.ppp}} to control the kernel smoothing estimation of \code{lambda}. } } \details{ \code{localpcf} computes the contribution, from each individual data point in a point pattern \code{X}, to the empirical pair correlation function of \code{X}. These contributions are sometimes known as LISA (local indicator of spatial association) functions based on pair correlation. \code{localpcfinhom} computes the corresponding contribution to the \emph{inhomogeneous} empirical pair correlation function of \code{X}. Given a spatial point pattern \code{X}, the local pcf \eqn{g_i(r)}{g[i](r)} associated with the \eqn{i}th point in \code{X} is computed by \deqn{ g_i(r) = \frac a {2 \pi n} \sum_j k(d_{i,j} - r) }{ g[i](r) = (a/(2 * pi * n) * sum[j] k(d[i,j] - r) } where the sum is over all points \eqn{j \neq i}{j != i}, \eqn{a} is the area of the observation window, \eqn{n} is the number of points in \code{X}, and \eqn{d_{ij}}{d[i,j]} is the distance between points \code{i} and \code{j}. Here \code{k} is the Epanechnikov kernel, \deqn{ k(t) = \frac 3 { 4\delta} \max(0, 1 - \frac{t^2}{\delta^2}). }{ k(t) = (3/(4*delta)) * max(0, 1 - t^2/delta^2). } Edge correction is performed using the border method (for the sake of computational efficiency): the estimate \eqn{g_i(r)}{g[i](r)} is set to \code{NA} if \eqn{r > b_i}{r > b[i]}, where \eqn{b_i}{b[i]} is the distance from point \eqn{i} to the boundary of the observation window. The smoothing bandwidth \eqn{\delta}{delta} may be specified. If not, it is chosen by Stoyan's rule of thumb \eqn{\delta = c/\hat\lambda}{delta = c/lambda} where \eqn{\hat\lambda = n/a}{lambda = n/a} is the estimated intensity and \eqn{c} is a constant, usually taken to be 0.15. The value of \eqn{c} is controlled by the argument \code{stoyan}. For \code{localpcfinhom}, the optional argument \code{lambda} specifies the values of the estimated intensity function. If \code{lambda} is given, it should be either a numeric vector giving the intensity values at the points of the pattern \code{X}, a pixel image (object of class \code{"im"}) giving the intensity values at all locations, a fitted point process model (object of class \code{"ppm"}) or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. If \code{lambda} is not given, then it will be estimated using a leave-one-out kernel density smoother as described in \code{\link{pcfinhom}}. } \value{ An object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. Essentially a data frame containing columns \item{r}{the vector of values of the argument \eqn{r} at which the function \eqn{K} has been estimated } \item{theo}{the theoretical value \eqn{K(r) = \pi r^2}{K(r) = pi * r^2} or \eqn{L(r)=r} for a stationary Poisson process } together with columns containing the values of the local pair correlation function for each point in the pattern. Column \code{i} corresponds to the \code{i}th point. The last two columns contain the \code{r} and \code{theo} values. } \seealso{ \code{\link{localK}}, \code{\link{localKinhom}}, \code{\link{pcf}}, \code{\link{pcfinhom}} } \examples{ data(ponderosa) X <- ponderosa g <- localpcf(X, stoyan=0.5) colo <- c(rep("grey", npoints(X)), "blue") a <- plot(g, main=c("local pair correlation functions", "Ponderosa pines"), legend=FALSE, col=colo, lty=1) # plot only the local pair correlation function for point number 7 plot(g, est007 ~ r) gi <- localpcfinhom(X, stoyan=0.5) a <- plot(gi, main=c("inhomogeneous local pair correlation functions", "Ponderosa pines"), legend=FALSE, col=colo, lty=1) } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat/man/tileindex.Rd0000644000176200001440000000250413160710621015142 0ustar liggesusers\name{tileindex} \alias{tileindex} \title{ Determine Which Tile Contains Each Given Point } \description{ Given a tessellation and a list of spatial points, determine which tile of the tessellation contains each of the given points. } \usage{ tileindex(x, y, Z) } \arguments{ \item{x,y}{ Spatial coordinates. Numeric vectors of equal length. } \item{Z}{ A tessellation (object of class \code{"tess"}). } } \details{ This function determines which tile of the tessellation \code{Z} contains each of the spatial points with coordinates \code{(x[i],y[i])}. The result is a factor, of the same length as \code{x} and \code{y}, indicating which tile contains each point. The levels of the factor are the names of the tiles of \code{Z}. Values are \code{NA} if the corresponding point lies outside the tessellation. } \value{ A factor, of the same length as \code{x} and \code{y}, whose levels are the names of the tiles of \code{Z}. } \author{ \spatstatAuthors } \seealso{ \code{\link{cut.ppp}} and \code{\link{split.ppp}} to divide up the points of a point pattern according to a tessellation. \code{\link{as.function.tess}} to create a function whose value is the tile index. } \examples{ X <- runifpoint(7) V <- dirichlet(X) tileindex(0.1, 0.4, V) } \keyword{spatial} \keyword{manip} spatstat/man/bdist.points.Rd0000644000176200001440000000241113160710571015576 0ustar liggesusers\name{bdist.points} \alias{bdist.points} \title{Distance to Boundary of Window} \description{ Computes the distances from each point of a point pattern to the boundary of the window. } \usage{ bdist.points(X) } \arguments{ \item{X}{A point pattern (object of class \code{"ppp"}).} } \value{ A numeric vector, giving the distances from each point of the pattern to the boundary of the window. } \details{ This function computes, for each point \eqn{x_i}{x[i]} in the point pattern \code{X}, the shortest distance \eqn{d(x_i, W^c)}{dist(x[i], W')} from \eqn{x_i}{x[i]} to the boundary of the window \eqn{W} of observation. If the window \code{Window(X)} is of type \code{"rectangle"} or \code{"polygonal"}, then these distances are computed by analytic geometry and are exact, up to rounding errors. If the window is of type \code{"mask"} then the distances are computed using the real-valued distance transform, which is an approximation with maximum error equal to the width of one pixel in the mask. } \seealso{ \code{\link{bdist.pixels}}, \code{\link{bdist.tiles}}, \code{\link{ppp.object}}, \code{\link{erosion}} } \examples{ data(cells) d <- bdist.points(cells) } \author{\adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/rnoise.Rd0000644000176200001440000000342213160710621014454 0ustar liggesusers\name{rnoise} \alias{rnoise} \title{ Random Pixel Noise } \description{ Generate a pixel image whose pixel values are random numbers following a specified probability distribution. } \usage{ rnoise(rgen = runif, w = square(1), \dots) } \arguments{ \item{rgen}{ Random generator for the pixel values. A function in the \R language. } \item{w}{ Window (region or pixel raster) in which to generate the image. Any data acceptable to \code{\link{as.mask}}. } \item{\dots}{ Arguments, matched by name, to be passed to \code{rgen} to specify the parameters of the probability distribution, or passed to \code{\link{as.mask}} to control the pixel resolution. } } \details{ The argument \code{w} could be a window (class \code{"owin"}), a pixel image (class \code{"im"}) or other data. It is first converted to a binary mask by \code{\link{as.mask}} using any relevant arguments in \code{\dots}. Then each pixel inside the window (i.e. with logical value \code{TRUE} in the mask) is assigned a random numerical value by calling the function \code{rgen}. The function \code{rgen} would typically be one of the standard random variable generators like \code{\link{runif}} (uniformly distributed random values) or \code{\link{rnorm}} (Gaussian random values). Its first argument \code{n} is the number of values to be generated. Other arguments to \code{rgen} must be matched by name. } \value{ A pixel image (object of class \code{"im"}). } \author{ \adrian \rolf and \ege } \seealso{ \code{\link{as.mask}}, \code{\link{as.im}}, \code{\link[stats]{Distributions}}. } \examples{ plot(rnoise(), main="Uniform noise") plot(rnoise(rnorm, dimyx=32, mean=2, sd=1), main="White noise") } \keyword{spatial} \keyword{datagen} spatstat/man/simulate.lppm.Rd0000644000176200001440000000403313160710621015746 0ustar liggesusers\name{simulate.lppm} \alias{simulate.lppm} \title{Simulate a Fitted Point Process Model on a Linear Network} \description{ Generates simulated realisations from a fitted Poisson point process model on a linear network. } \usage{ \method{simulate}{lppm}(object, nsim=1, ..., new.coef=NULL, progress=(nsim > 1), drop=FALSE) } \arguments{ \item{object}{ Fitted point process model on a linear network. An object of class \code{"lppm"}. } \item{nsim}{ Number of simulated realisations. } \item{progress}{ Logical flag indicating whether to print progress reports for the sequence of simulations. } \item{new.coef}{ New values for the canonical parameters of the model. A numeric vector of the same length as \code{coef(object)}. } \item{\dots}{ Arguments passed to \code{\link{predict.lppm}} to determine the spatial resolution of the image of the fitted intensity used in the simulation. } \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE}, the result will be a point pattern, rather than a list containing a point pattern. } } \details{ This function is a method for the generic function \code{\link[stats]{simulate}} for the class \code{"lppm"} of fitted point process models on a linear network. Only Poisson process models are supported so far. Simulations are performed by \code{\link{rpoislpp}}. } \value{ A list of length \code{nsim} containing simulated point patterns (objects of class \code{"lpp"}) on the same linear network as the original data used to fit the model. The result also belongs to the class \code{"solist"}, so that it can be plotted, and the class \code{"timed"}, so that the total computation time is recorded. } \examples{ fit <- lppm(unmark(chicago) ~ y) simulate(fit)[[1]] } \seealso{ \code{\link{lppm}}, \code{\link{rpoislpp}}, \code{\link[stats]{simulate}} } \author{\adrian , \rolf and \ege } \keyword{spatial} \keyword{models} spatstat/man/model.matrix.ppm.Rd0000644000176200001440000001042213160710621016351 0ustar liggesusers\name{model.matrix.ppm} \alias{model.matrix.ppm} \alias{model.matrix.kppm} \alias{model.matrix.dppm} \alias{model.matrix.lppm} \alias{model.matrix.ippm} \title{Extract Design Matrix from Point Process Model} \description{ Given a point process model that has been fitted to spatial point pattern data, this function extracts the design matrix of the model. } \usage{ \method{model.matrix}{ppm}(object, data=model.frame(object, na.action=NULL), \dots, Q=NULL, keepNA=TRUE) \method{model.matrix}{kppm}(object, data=model.frame(object, na.action=NULL), \dots, Q=NULL, keepNA=TRUE) \method{model.matrix}{dppm}(object, data=model.frame(object, na.action=NULL), \dots, Q=NULL, keepNA=TRUE) \method{model.matrix}{lppm}(object, data=model.frame(object, na.action=NULL), \dots, keepNA=TRUE) \method{model.matrix}{ippm}(object, data=model.frame(object, na.action=NULL), \dots, Q=NULL, keepNA=TRUE, irregular=FALSE) } \arguments{ \item{object}{ The fitted point process model. An object of class \code{"ppm"} or \code{"kppm"} or \code{"dppm"} or \code{"ippm"} or \code{"lppm"}. } \item{data}{ A model frame, containing the data required for the Berman-Turner device. } \item{Q}{ A point pattern (class \code{"ppp"}) or quadrature scheme (class \code{"quad"}) specifying new locations where the covariates should be computed. } \item{keepNA}{ Logical. Determines whether rows containing NA values will be deleted or retained. } \item{\dots}{ Other arguments (such as \code{na.action}) passed to \code{\link{model.matrix.lm}}. } \item{irregular}{ Logical value indicating whether to include the irregular score components. } } \details{ These commands are methods for the generic function \code{\link{model.matrix}}. They extract the design matrix of a spatial point process model (class \code{"ppm"} or \code{"kppm"} or \code{"dppm"} or \code{"lppm"}). More precisely, this command extracts the design matrix of the generalised linear model associated with a spatial point process model. The \code{object} must be a fitted point process model (object of class \code{"ppm"} or \code{"kppm"} or \code{"dppm"} or \code{"lppm"}) fitted to spatial point pattern data. Such objects are produced by the model-fitting functions \code{\link{ppm}}, \code{\link{kppm}}, \code{\link{dppm}} and \code{\link{lppm}}. The methods \code{model.matrix.ppm}, \code{model.matrix.kppm}, \code{model.matrix.dppm} and \code{model.matrix.lppm} extract the model matrix for the GLM. The result is a matrix, with one row for every quadrature point in the fitting procedure, and one column for every constructed covariate in the design matrix. If there are \code{NA} values in the covariates, the argument \code{keepNA} determines whether to retain or delete the corresponding rows of the model matrix. The default \code{keepNA=TRUE} is to retain them. Note that this differs from the default behaviour of many other methods for \code{model.matrix}, which typically delete rows containing \code{NA}. The quadrature points themselves can be extracted using \code{\link{quad.ppm}}. } \value{ A matrix. Columns of the matrix are canonical covariates in the model. Rows of the matrix correspond to quadrature points in the fitting procedure (provided \code{keepNA=TRUE}). } \author{ \spatstatAuthors. } \seealso{ \code{\link{model.matrix}}, \code{\link{model.images}}, \code{\link{ppm}}, \code{\link{kppm}}, \code{\link{dppm}}, \code{\link{lppm}}, \code{\link{ippm}}, \code{\link{ppm.object}}, \code{\link{quad.ppm}}, \code{\link{residuals.ppm}} } \examples{ fit <- ppm(cells ~ x) head(model.matrix(fit)) model.matrix(fit, Q=runifpoint(5)) kfit <- kppm(redwood ~ x, "Thomas") m <- model.matrix(kfit) } \keyword{spatial} \keyword{models} spatstat/man/timed.Rd0000644000176200001440000000561713160710621014267 0ustar liggesusers\name{timed} \alias{timed} \title{ Record the Computation Time } \description{ Saves the result of a calculation as an object of class \code{"timed"} which includes information about the time taken to compute the result. The computation time is printed when the object is printed. } \usage{ timed(x, ..., starttime = NULL, timetaken = NULL) } \arguments{ \item{x}{ An expression to be evaluated, or an object that has already been evaluated. } \item{starttime}{ The time at which the computation is defined to have started. The default is the current time. Ignored if \code{timetaken} is given. } \item{timetaken}{ The length of time taken to perform the computation. The default is the time taken to evaluate \code{x}. } \item{\dots}{ Ignored. } } \details{ This is a simple mechanism for recording how long it takes to perform complicated calculations (usually for the purposes of reporting in a publication). If \code{x} is an expression to be evaluated, \code{timed(x)} evaluates the expression and measures the time taken to evaluate it. The result is saved as an object of the class \code{"timed"}. Printing this object displays the computation time. If \code{x} is an object which has already been computed, then the time taken to compute the object can be specified either directly by the argument \code{timetaken}, or indirectly by the argument \code{starttime}. \itemize{ \item \code{timetaken} is the duration of time taken to perform the computation. It should be the difference of two clock times returned by \code{\link{proc.time}}. Typically the user sets \code{begin <- proc.time()} before commencing the calculations, then \code{end <- proc.time()} after completing the calculations, and then sets \code{timetaken <- end - begin}. \item \code{starttime} is the clock time at which the computation started. It should be a value that was returned by \code{\link{proc.time}} at some earlier time when the calculations commenced. When \code{timed} is called, the computation time will be taken as the difference between the current clock time and \code{starttime}. Typically the user sets \code{begin <- proc.time()} before commencing the calculations, and when the calculations are completed, the user calls \code{result <- timed(result, starttime=begin)}. } If the result of evaluating \code{x} belongs to other S3 classes, then the result of \code{timed(x, \dots)} also inherits these classes, and printing the object will display the appropriate information for these classes as well. } \value{ An object inheriting the class \code{"timed"}. } \examples{ timed(clarkevans(cells)) timed(Kest(cells)) answer <- timed(42, timetaken=4.1e17) answer } \seealso{ \code{\link{timeTaken}} to extract the time taken. } \author{ \spatstatAuthors. } \keyword{utilities} spatstat/man/latest.news.Rd0000644000176200001440000000254413160710621015430 0ustar liggesusers\name{latest.news} \alias{latest.news} \title{ Print News About Latest Version of Package } \description{ Prints the news documentation for the current version of \code{spatstat} or another specified package. } \usage{ latest.news(package = "spatstat", doBrowse=FALSE) } \arguments{ \item{package}{ Name of package for which the latest news should be printed. } \item{doBrowse}{ Logical value indicating whether to display the results in a browser window instead of printing them. } } \details{ By default, this function prints the news documentation about changes in the current installed version of the \pkg{spatstat} package. The function can be called simply by typing its name without parentheses (see the Examples). If \code{package} is given, then the function reads the news for the specified package from its \code{NEWS} file (if it has one) and prints only the entries that refer to the current version of the package. To see the news for all previous versions as well as the current version, use the \R utility \code{\link[utils]{news}}. See the Examples. } \value{ Null. } \author{ \adrian and \rolf } \seealso{ \code{\link[utils]{news}}, \code{\link{bugfixes}} } \examples{ if(interactive()) { # current news latest.news # all news news(package="spatstat") } } \keyword{documentation} spatstat/man/quantile.ewcdf.Rd0000644000176200001440000000406013160710621016065 0ustar liggesusers\name{quantile.ewcdf} \alias{quantile.ewcdf} \title{ Quantiles of Weighted Empirical Cumulative Distribution Function } \description{ Compute quantiles of a weighted empirical cumulative distribution function. } \usage{ \method{quantile}{ewcdf}(x, probs = seq(0, 1, 0.25), names = TRUE, \dots, normalise = TRUE, type=1) } \arguments{ \item{x}{ A weighted empirical cumulative distribution function (object of class \code{"ewcdf"}, produced by \code{\link{ewcdf}}) for which the quantiles are desired. } \item{probs}{ probabilities for which the quantiles are desired. A numeric vector of values between 0 and 1. } \item{names}{ Logical. If \code{TRUE}, the resulting vector of quantiles is annotated with names corresponding to \code{probs}. } \item{\dots}{ Ignored. } \item{normalise}{ Logical value indicating whether \code{x} should first be normalised so that it ranges between 0 and 1. } \item{type}{ Integer specifying the type of quantile to be calculated, as explained in \code{\link[stats]{quantile.default}}. Only types 1 and 2 are currently implemented. } } \details{ This is a method for the generic \code{\link[stats]{quantile}} function for the class \code{ewcdf} of empirical weighted cumulative distribution functions. The quantile for a probability \code{p} is computed as the right-continuous inverse of the cumulative distribution function \code{x} (assuming \code{type=1}, the default). If \code{normalise=TRUE} (the default), the weighted cumulative function \code{x} is first normalised to have total mass \code{1} so that it can be interpreted as a cumulative probability distribution function. } \value{ Numeric vector of quantiles, of the same length as \code{probs}. } \seealso{ \code{\link{ewcdf}}, \code{\link[stats]{quantile}} } \examples{ z <- rnorm(50) w <- runif(50) Fun <- ewcdf(z, w) quantile(Fun, c(0.95,0.99)) } \author{ \spatstatAuthors and Kevin Ummel. } \keyword{spatial} \keyword{nonparametric} spatstat/man/lpp.Rd0000644000176200001440000000727313160710621013760 0ustar liggesusers\name{lpp} \alias{lpp} \title{ Create Point Pattern on Linear Network } \description{ Creates an object of class \code{"lpp"} that represents a point pattern on a linear network. } \usage{ lpp(X, L, \dots) } \arguments{ \item{X}{ Locations of the points. A matrix or data frame of coordinates, or a point pattern object (of class \code{"ppp"}) or other data acceptable to \code{\link{as.ppp}}. } \item{L}{ Linear network (object of class \code{"linnet"}). } \item{\dots}{ Ignored. } } \details{ This command creates an object of class \code{"lpp"} that represents a point pattern on a linear network. Normally \code{X} is a point pattern. The points of \code{X} should lie on the lines of \code{L}. Alternatively \code{X} may be a matrix or data frame containing at least two columns. \itemize{ \item Usually the first two columns of \code{X} will be interpreted as spatial coordinates, and any remaining columns as marks. \item An exception occurs if \code{X} is a data frame with columns named \code{x}, \code{y}, \code{seg} and \code{tp}. Then \code{x} and \code{y} will be interpreted as spatial coordinates, and \code{seg} and \code{tp} as local coordinates, with \code{seg} indicating which line segment of \code{L} the point lies on, and \code{tp} indicating how far along the segment the point lies (normalised to 1). Any remaining columns will be interpreted as marks. \item Another exception occurs if \code{X} is a data frame with columns named \code{seg} and \code{tp}. Then \code{seg} and \code{tp} will be interpreted as local coordinates, as above, and the spatial coordinates \code{x,y} will be computed from them. Any remaining columns will be interpreted as marks. } If \code{X} is missing or \code{NULL}, the result is an empty point pattern (i.e. containing no points). } \section{Note on changed format}{ The internal format of \code{"lpp"} objects was changed in \pkg{spatstat} version \code{1.28-0}. Objects in the old format are still handled correctly, but computations are faster in the new format. To convert an object \code{X} from the old format to the new format, use \code{X <- lpp(as.ppp(X), as.linnet(X))}. } \value{ An object of class \code{"lpp"}. Also inherits the class \code{"ppx"}. } \author{ Ang Qi Wei \email{aqw07398@hotmail.com} and \adrian } \seealso{ Installed datasets which are \code{"lpp"} objects: \code{\link[spatstat.data]{chicago}}, \code{\link[spatstat.data]{dendrite}}, \code{\link[spatstat.data]{spiders}}. See \code{\link{as.lpp}} for converting data to an \code{lpp} object. See \code{\link{methods.lpp}} and \code{\link{methods.ppx}} for other methods applicable to \code{lpp} objects. Calculations on an \code{lpp} object: \code{\link{intensity.lpp}}, \code{\link{distfun.lpp}}, \code{\link{nndist.lpp}}, \code{\link{nnwhich.lpp}}, \code{\link{nncross.lpp}}, \code{\link{nnfun.lpp}}. Summary functions: \code{\link{linearK}}, \code{\link{linearKinhom}}, \code{\link{linearpcf}}, \code{\link{linearKdot}}, \code{\link{linearKcross}}, \code{\link{linearmarkconnect}}, etc. Random point patterns on a linear network can be generated by \code{\link{rpoislpp}} or \code{\link{runiflpp}}. See \code{\link{linnet}} for linear networks. } \examples{ # letter 'A' v <- ppp(x=(-2):2, y=3*c(0,1,2,1,0), c(-3,3), c(-1,7)) edg <- cbind(1:4, 2:5) edg <- rbind(edg, c(2,4)) letterA <- linnet(v, edges=edg) # points on letter A xx <- list(x=c(-1.5,0,0.5,1.5), y=c(1.5,3,4.5,1.5)) X <- lpp(xx, letterA) plot(X) X summary(X) # empty pattern lpp(L=letterA) } \keyword{spatial} spatstat/man/print.owin.Rd0000644000176200001440000000155713160710621015273 0ustar liggesusers\name{print.owin} \alias{print.owin} \title{Print Brief Details of a Spatial Window} \description{ Prints a very brief description of a window object. } \usage{ \method{print}{owin}(x, \dots, prefix="window: ") } \arguments{ \item{x}{Window (object of class \code{"owin"}).} \item{\dots}{Ignored.} \item{prefix}{Character string to be printed at the start of the output.} } \details{ A very brief description of the window \code{x} is printed. This is a method for the generic function \code{\link{print}}. } \seealso{ \code{\link{print}}, \code{\link{print.ppp}}, \code{\link{summary.owin}} } \examples{ owin() # the unit square data(demopat) W <- Window(demopat) W # just says it is polygonal as.mask(W) # just says it is a binary image } \author{\adrian and \rolf } \keyword{spatial} \keyword{print} spatstat/man/rpoispp.Rd0000644000176200001440000001373613160710621014662 0ustar liggesusers\name{rpoispp} \alias{rpoispp} \title{Generate Poisson Point Pattern} \description{ Generate a random point pattern using the (homogeneous or inhomogeneous) Poisson process. Includes CSR (complete spatial randomness). } \usage{ rpoispp(lambda, lmax=NULL, win=owin(), \dots, nsim=1, drop=TRUE, ex=NULL, warnwin=TRUE) } \arguments{ \item{lambda}{ Intensity of the Poisson process. Either a single positive number, a \code{function(x,y, \dots)}, or a pixel image. } \item{lmax}{ Optional. An upper bound for the value of \code{lambda(x,y)}, if \code{lambda} is a function. } \item{win}{ Window in which to simulate the pattern. An object of class \code{"owin"} or something acceptable to \code{\link{as.owin}}. Ignored if \code{lambda} is a pixel image. } \item{\dots}{ Arguments passed to \code{lambda} if it is a function. } \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a point pattern. } \item{ex}{ Optional. A point pattern to use as the example. If \code{ex} is given and \code{lambda,lmax,win} are missing, then \code{lambda} and \code{win} will be calculated from the point pattern \code{ex}. } \item{warnwin}{ Logical value specifying whether to issue a warning when \code{win} is ignored (which occurs when \code{lambda} is an image and \code{win} is present). } } \value{ A point pattern (an object of class \code{"ppp"}) if \code{nsim=1}, or a list of point patterns if \code{nsim > 1}. } \details{ If \code{lambda} is a single number, then this algorithm generates a realisation of the uniform Poisson process (also known as Complete Spatial Randomness, CSR) inside the window \code{win} with intensity \code{lambda} (points per unit area). If \code{lambda} is a function, then this algorithm generates a realisation of the inhomogeneous Poisson process with intensity function \code{lambda(x,y,\dots)} at spatial location \code{(x,y)} inside the window \code{win}. The function \code{lambda} must work correctly with vectors \code{x} and \code{y}. If \code{lmax} is given, it must be an upper bound on the values of \code{lambda(x,y,\dots)} for all locations \code{(x, y)} inside the window \code{win}. That is, we must have \code{lambda(x,y,\dots) <= lmax} for all locations \code{(x,y)}. If this is not true then the results of the algorithm will be incorrect. If \code{lmax} is missing or \code{NULL}, an approximate upper bound is computed by finding the maximum value of \code{lambda(x,y,\dots)} on a grid of locations \code{(x,y)} inside the window \code{win}, and adding a safety margin equal to 5 percent of the range of \code{lambda} values. This can be computationally intensive, so it is advisable to specify \code{lmax} if possible. If \code{lambda} is a pixel image object of class \code{"im"} (see \code{\link{im.object}}), this algorithm generates a realisation of the inhomogeneous Poisson process with intensity equal to the pixel values of the image. (The value of the intensity function at an arbitrary location is the pixel value of the nearest pixel.) The argument \code{win} is ignored; the window of the pixel image is used instead. It will be converted to a rectangle if possible, using \code{\link{rescue.rectangle}}. To generate an inhomogeneous Poisson process the algorithm uses ``thinning'': it first generates a uniform Poisson process of intensity \code{lmax}, then randomly deletes or retains each point, independently of other points, with retention probability \eqn{p(x,y) = \lambda(x,y)/\mbox{lmax}}{p(x,y) = lambda(x,y)/lmax}. For \emph{marked} point patterns, use \code{\link{rmpoispp}}. } \section{Warning}{ Note that \code{lambda} is the \bold{intensity}, that is, the expected number of points \bold{per unit area}. The total number of points in the simulated pattern will be random with expected value \code{mu = lambda * a} where \code{a} is the area of the window \code{win}. } \section{Reproducibility}{ The simulation algorithm, for the case where \code{lambda} is a pixel image, was changed in \pkg{spatstat} version \code{1.42-3}. Set \code{spatstat.options(fastpois=FALSE)} to use the previous, slower algorithm, if it is desired to reproduce results obtained with earlier versions. } \seealso{ \code{\link{rmpoispp}} for Poisson \emph{marked} point patterns, \code{\link{runifpoint}} for a fixed number of independent uniform random points; \code{\link{rpoint}}, \code{\link{rmpoint}} for a fixed number of independent random points with any distribution; \code{\link{rMaternI}}, \code{\link{rMaternII}}, \code{\link{rSSI}}, \code{\link{rStrauss}}, \code{\link{rstrat}} for random point processes with spatial inhibition or regularity; \code{\link{rThomas}}, \code{\link{rGaussPoisson}}, \code{\link{rMatClust}}, \code{\link{rcell}} for random point processes exhibiting clustering; \code{\link{rmh.default}} for Gibbs processes. See also \code{\link{ppp.object}}, \code{\link{owin.object}}. } \examples{ # uniform Poisson process with intensity 100 in the unit square pp <- rpoispp(100) # uniform Poisson process with intensity 1 in a 10 x 10 square pp <- rpoispp(1, win=owin(c(0,10),c(0,10))) # plots should look similar ! # inhomogeneous Poisson process in unit square # with intensity lambda(x,y) = 100 * exp(-3*x) # Intensity is bounded by 100 pp <- rpoispp(function(x,y) {100 * exp(-3*x)}, 100) # How to tune the coefficient of x lamb <- function(x,y,a) { 100 * exp( - a * x)} pp <- rpoispp(lamb, 100, a=3) # pixel image Z <- as.im(function(x,y){100 * sqrt(x+y)}, unit.square()) pp <- rpoispp(Z) # randomising an existing point pattern rpoispp(intensity(cells), win=Window(cells)) rpoispp(ex=cells) } \author{ \adrian \rolf and \ege } \keyword{spatial} \keyword{datagen} spatstat/man/Kmulti.inhom.Rd0000644000176200001440000002621613160710571015545 0ustar liggesusers\name{Kmulti.inhom} \alias{Kmulti.inhom} \title{ Inhomogeneous Marked K-Function } \description{ For a marked point pattern, estimate the inhomogeneous version of the multitype \eqn{K} function which counts the expected number of points of subset \eqn{J} within a given distance from a typical point in subset \code{I}, adjusted for spatially varying intensity. } \usage{ Kmulti.inhom(X, I, J, lambdaI=NULL, lambdaJ=NULL, \dots, r=NULL, breaks=NULL, correction=c("border", "isotropic", "Ripley", "translate"), lambdaIJ=NULL, sigma=NULL, varcov=NULL, lambdaX=NULL, update=TRUE, leaveoneout=TRUE) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the inhomogeneous multitype \eqn{K} function \eqn{K_{IJ}(r)}{KIJ(r)} will be computed. It must be a marked point pattern. See under Details. } \item{I}{Subset index specifying the points of \code{X} from which distances are measured. See Details. } \item{J}{Subset index specifying the points in \code{X} to which distances are measured. See Details. } \item{lambdaI}{ Optional. Values of the estimated intensity of the sub-process \code{X[I]}. Either a pixel image (object of class \code{"im"}), a numeric vector containing the intensity values at each of the points in \code{X[I]}, a fitted point process model (object of class \code{"ppm"} or \code{"kppm"} or \code{"dppm"}), or a \code{function(x,y)} which can be evaluated to give the intensity value at any location, } \item{lambdaJ}{ Optional. Values of the estimated intensity of the sub-process \code{X[J]}. Either a pixel image (object of class \code{"im"}), a numeric vector containing the intensity values at each of the points in \code{X[J]}, a fitted point process model (object of class \code{"ppm"} or \code{"kppm"} or \code{"dppm"}), or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. } \item{\dots}{Ignored.} \item{r}{Optional. Numeric vector. The values of the argument \eqn{r} at which the multitype \eqn{K} function \eqn{K_{IJ}(r)}{KIJ(r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{breaks}{ This argument is for internal use only. } \item{correction}{ A character vector containing any selection of the options \code{"border"}, \code{"bord.modif"}, \code{"isotropic"}, \code{"Ripley"}, \code{"translate"}, \code{"none"} or \code{"best"}. It specifies the edge correction(s) to be applied. Alternatively \code{correction="all"} selects all options. } \item{lambdaIJ}{ Optional. A matrix containing estimates of the product of the intensities \code{lambdaI} and \code{lambdaJ} for each pair of points, the first point belonging to subset \code{I} and the second point to subset \code{J}. } \item{sigma,varcov}{ Optional arguments passed to \code{\link{density.ppp}} to control the smoothing bandwidth, when \code{lambda} is estimated by kernel smoothing. } \item{lambdaX}{ Optional. Values of the intensity for all points of \code{X}. Either a pixel image (object of class \code{"im"}), a numeric vector containing the intensity values at each of the points in \code{X}, a fitted point process model (object of class \code{"ppm"} or \code{"kppm"} or \code{"dppm"}), or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. If present, this argument overrides both \code{lambdaI} and \code{lambdaJ}. } \item{update}{ Logical value indicating what to do when \code{lambdaI}, \code{lambdaJ} or \code{lambdaX} is a fitted point process model (class \code{"ppm"}, \code{"kppm"} or \code{"dppm"}). If \code{update=TRUE} (the default), the model will first be refitted to the data \code{X} (using \code{\link{update.ppm}} or \code{\link{update.kppm}}) before the fitted intensity is computed. If \code{update=FALSE}, the fitted intensity of the model will be computed without re-fitting it to \code{X}. } \item{leaveoneout}{ Logical value (passed to \code{\link{density.ppp}} or \code{\link{fitted.ppm}}) specifying whether to use a leave-one-out rule when calculating the intensity. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame containing numeric columns \item{r}{the values of the argument \eqn{r} at which the function \eqn{K_{IJ}(r)}{KIJ(r)} has been estimated } \item{theo}{the theoretical value of \eqn{K_{IJ}(r)}{KIJ(r)} for a marked Poisson process, namely \eqn{\pi r^2}{pi * r^2} } together with a column or columns named \code{"border"}, \code{"bord.modif"}, \code{"iso"} and/or \code{"trans"}, according to the selected edge corrections. These columns contain estimates of the function \eqn{K_{IJ}(r)}{KIJ(r)} obtained by the edge corrections named. } \details{ The function \code{Kmulti.inhom} is the counterpart, for spatially-inhomogeneous marked point patterns, of the multitype \eqn{K} function \code{\link{Kmulti}}. Suppose \eqn{X} is a marked point process, with marks of any kind. Suppose \eqn{X_I}{X[I]}, \eqn{X_J}{X[J]} are two sub-processes, possibly overlapping. Typically \eqn{X_I}{X[I]} would consist of those points of \eqn{X} whose marks lie in a specified range of mark values, and similarly for \eqn{X_J}{X[J]}. Suppose that \eqn{\lambda_I(u)}{lambdaI(u)}, \eqn{\lambda_J(u)}{lambdaJ(u)} are the spatially-varying intensity functions of \eqn{X_I}{X[I]} and \eqn{X_J}{X[J]} respectively. Consider all the pairs of points \eqn{(u,v)} in the point process \eqn{X} such that the first point \eqn{u} belongs to \eqn{X_I}{X[I]}, the second point \eqn{v} belongs to \eqn{X_J}{X[J]}, and the distance between \eqn{u} and \eqn{v} is less than a specified distance \eqn{r}. Give this pair \eqn{(u,v)} the numerical weight \eqn{1/(\lambda_I(u)\lambda_J(u))}{1/(lambdaI(u) lambdaJ(u))}. Calculate the sum of these weights over all pairs of points as described. This sum (after appropriate edge-correction and normalisation) is the estimated inhomogeneous multitype \eqn{K} function. The argument \code{X} must be a point pattern (object of class \code{"ppp"}) or any data that are acceptable to \code{\link{as.ppp}}. The arguments \code{I} and \code{J} specify two subsets of the point pattern. They may be any type of subset indices, for example, logical vectors of length equal to \code{npoints(X)}, or integer vectors with entries in the range 1 to \code{npoints(X)}, or negative integer vectors. Alternatively, \code{I} and \code{J} may be \bold{functions} that will be applied to the point pattern \code{X} to obtain index vectors. If \code{I} is a function, then evaluating \code{I(X)} should yield a valid subset index. This option is useful when generating simulation envelopes using \code{\link{envelope}}. The argument \code{lambdaI} supplies the values of the intensity of the sub-process identified by index \code{I}. It may be either \describe{ \item{a pixel image}{(object of class \code{"im"}) which gives the values of the intensity of \code{X[I]} at all locations in the window containing \code{X}; } \item{a numeric vector}{containing the values of the intensity of \code{X[I]} evaluated only at the data points of \code{X[I]}. The length of this vector must equal the number of points in \code{X[I]}. } \item{a function}{ of the form \code{function(x,y)} which can be evaluated to give values of the intensity at any locations. } \item{a fitted point process model}{ (object of class \code{"ppm"}, \code{"kppm"} or \code{"dppm"}) whose fitted \emph{trend} can be used as the fitted intensity. (If \code{update=TRUE} the model will first be refitted to the data \code{X} before the trend is computed.) } \item{omitted:}{ if \code{lambdaI} is omitted then it will be estimated using a leave-one-out kernel smoother. } } If \code{lambdaI} is omitted, then it will be estimated using a `leave-one-out' kernel smoother, as described in Baddeley, \Moller and Waagepetersen (2000). The estimate of \code{lambdaI} for a given point is computed by removing the point from the point pattern, applying kernel smoothing to the remaining points using \code{\link{density.ppp}}, and evaluating the smoothed intensity at the point in question. The smoothing kernel bandwidth is controlled by the arguments \code{sigma} and \code{varcov}, which are passed to \code{\link{density.ppp}} along with any extra arguments. Similarly \code{lambdaJ} supplies the values of the intensity of the sub-process identified by index \code{J}. Alternatively if the argument \code{lambdaX} is given, then it specifies the intensity values for all points of \code{X}, and the arguments \code{lambdaI}, \code{lambdaJ} will be ignored. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{K_{IJ}(r)}{KIJ(r)} should be evaluated. It is also used to determine the breakpoints (in the sense of \code{\link{hist}}) for the computation of histograms of distances. First-time users would be strongly advised not to specify \code{r}. However, if it is specified, \code{r} must satisfy \code{r[1] = 0}, and \code{max(r)} must be larger than the radius of the largest disc contained in the window. Biases due to edge effects are treated in the same manner as in \code{\link{Kinhom}}. The edge corrections implemented here are \describe{ \item{border}{the border method or ``reduced sample'' estimator (see Ripley, 1988). This is the least efficient (statistically) and the fastest to compute. It can be computed for a window of arbitrary shape. } \item{isotropic/Ripley}{Ripley's isotropic correction (see Ripley, 1988; Ohser, 1983). This is currently implemented only for rectangular windows. } \item{translate}{Translation correction (Ohser, 1983). Implemented for all window geometries. } } The pair correlation function \code{\link{pcf}} can also be applied to the result of \code{Kmulti.inhom}. } \references{ Baddeley, A., \ifelse{latex}{\out{M\o ller}}{Moller}, J. and Waagepetersen, R. (2000) Non- and semiparametric estimation of interaction in inhomogeneous point patterns. \emph{Statistica Neerlandica} \bold{54}, 329--350. } \seealso{ \code{\link{Kmulti}}, \code{\link{Kdot.inhom}}, \code{\link{Kcross.inhom}}, \code{\link{pcf}} } \examples{ # Finnish Pines data: marked by diameter and height plot(finpines, which.marks="height") II <- (marks(finpines)$height <= 2) JJ <- (marks(finpines)$height > 3) K <- Kmulti.inhom(finpines, II, JJ) plot(K) # functions determining subsets f1 <- function(X) { marks(X)$height <= 2 } f2 <- function(X) { marks(X)$height > 3 } K <- Kmulti.inhom(finpines, f1, f2) } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat/man/pointsOnLines.Rd0000644000176200001440000000320113160710621015754 0ustar liggesusers\name{pointsOnLines} \alias{pointsOnLines} \title{Place Points Evenly Along Specified Lines} \description{ Given a line segment pattern, place a series of points at equal distances along each line segment. } \usage{ pointsOnLines(X, eps = NULL, np = 1000, shortok=TRUE) } \arguments{ \item{X}{A line segment pattern (object of class \code{"psp"}).} \item{eps}{Spacing between successive points.} \item{np}{Approximate total number of points (incompatible with \code{eps}).} \item{shortok}{ Logical. If \code{FALSE}, very short segments (of length shorter than \code{eps}) will not generate any points. If \code{TRUE}, a very short segment will be represented by its midpoint. } } \details{ For each line segment in the pattern \code{X}, a succession of points is placed along the line segment. These points are equally spaced at a distance \code{eps}, except for the first and last points in the sequence. The spacing \code{eps} is measured in coordinate units of \code{X}. If \code{eps} is not given, then it is determined by \code{eps = len/np} where \code{len} is the total length of the segments in \code{X}. The actual number of points will then be slightly larger than \code{np}. } \value{ A point pattern (object of class \code{"ppp"}) in the same window as \code{X}. } \seealso{ \code{\link{psp}}, \code{\link{ppp}}, \code{\link{runifpointOnLines}} } \examples{ X <- psp(runif(20), runif(20), runif(20), runif(20), window=owin()) Y <- pointsOnLines(X, eps=0.05) plot(X, main="") plot(Y, add=TRUE, pch="+") } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/marks.tess.Rd0000644000176200001440000000412513160710621015250 0ustar liggesusers\name{marks.tess} \alias{marks.tess} \alias{marks<-.tess} \alias{unmark.tess} \title{Marks of a Tessellation} \description{ Extract or change the marks attached to the tiles of a tessellation. } \usage{ \method{marks}{tess}(x, \dots) \method{marks}{tess}(x, \dots) <- value \method{unmark}{tess}(X) } \arguments{ \item{x,X}{ Tessellation (object of class \code{"tess"}). } \item{\dots}{ Ignored. } \item{value}{ Vector or data frame of mark values, or \code{NULL}. } } \value{ For \code{marks(x)}, the result is a vector, factor or data frame, containing the mark values attached to the tiles of \code{x}. If there are no marks, the result is \code{NULL}. For \code{unmark(x)}, the result is the tessellation without marks. For \code{marks(x) <- value}, the result is the updated tessellation \code{x} (with the side-effect that the dataset \code{x} is updated in the current environment). } \details{ These functions extract or change the marks attached to each of the tiles in the tessellation \code{x}. They are methods for the generic functions \code{\link{marks}} and \code{\link{marks<-}} for the class \code{"tess"} of tessellations. The expression \code{marks(x)} extracts the marks of \code{x}. The assignment \code{marks(x) <- value} assigns new marks to the dataset \code{x}, and updates the dataset \code{x} in the current environment. The marks can be a vector, a factor, or a data frame. For the assignment \code{marks(x) <- value}, the \code{value} should be a vector or factor of length equal to the number of tiles in \code{x}, or a data frame with as many rows as there are tiles in \code{x}. If \code{value} is a single value, or a data frame with one row, then it will be replicated so that the same marks will be attached to each tile. To remove marks, use \code{marks(x) <- NULL} or \code{unmark(x)}. } \seealso{ \code{\link{marks}}, \code{\link{marks<-}} } \examples{ D <- dirichlet(cells) marks(D) <- tile.areas(D) } \author{ \adrian \rolf and \ege } \keyword{spatial} \keyword{manip} spatstat/man/matchingdist.Rd0000644000176200001440000000751613160710621015643 0ustar liggesusers\name{matchingdist} \alias{matchingdist} \title{Distance for a Point Pattern Matching} \description{ Computes the distance associated with a matching between two point patterns. } \usage{ matchingdist(matching, type = NULL, cutoff = NULL, q = NULL) } \arguments{ \item{matching}{A point pattern matching (an object of class \code{"pppmatching"}).} \item{type}{ A character string giving the type of distance to be computed. One of \code{"spa"}, \code{"ace"} or \code{"mat"}. See details below. } \item{cutoff}{ The value \eqn{> 0} at which interpoint distances are cut off. } \item{q}{ The order of the average that is applied to the interpoint distances. May be \code{Inf}, in which case the maximum of the interpoint distances is taken. } } \details{ Computes the distance specified by \code{type}, \code{cutoff}, and \code{order} for a point matching. If any of these arguments are not provided, the function uses the corresponding elements of \code{matching} (if available). For the type \code{"spa"} (subpattern assignment) it is assumed that the points of the point pattern with the smaller cardinality \eqn{m} are matched to a \eqn{m}-point subpattern of the point pattern with the larger cardinality \eqn{n} in a 1-1 way. The distance is then given as the \code{q}-th order average of the \eqn{m} distances between matched points (minimum of Euclidean distance and \code{cutoff}) and \eqn{n-m} "penalty distances" of value \code{cutoff}. For the type \code{"ace"} (assignment only if cardinalities equal) the matching is assumed to be 1-1 if the cardinalities of the point patterns are the same, in which case the \code{q}-th order average of the matching distances (minimum of Euclidean distance and \code{cutoff}) is taken. If the cardinalities are different, the matching may be arbitrary and the distance returned is always equal to \code{cutoff}. For the type \code{mat} (mass transfer) it is assumed that each point of the point pattern with the smaller cardinality \eqn{m} has mass \eqn{1}, each point of the point pattern with the larger cardinality \eqn{n} has mass \eqn{m/n}, and fractions of these masses are matched in such a way that each point contributes exactly its mass. The distance is then given as the \code{q}-th order weighted average of all distances (minimum of Euclidean distance and \code{cutoff}) of (partially) matched points with weights equal to the fractional masses divided by \eqn{m}. If the cardinalities of the two point patterns are equal, \code{matchingdist(m, type, cutoff, q)} yields the same result no matter if \code{type} is \code{"spa"}, \code{"ace"} or \code{"mat"}. } \value{ Numeric value of the distance associated with the matching. } \author{ Dominic Schuhmacher \email{dominic.schuhmacher@stat.unibe.ch} \url{http://www.dominic.schuhmacher.name} } \seealso{ \code{\link{pppdist}} \code{\link{pppmatching.object}} } \examples{ # an optimal matching X <- runifpoint(20) Y <- runifpoint(20) m.opt <- pppdist(X, Y) summary(m.opt) matchingdist(m.opt) # is the same as the distance given by summary(m.opt) # sequential nearest neighbour matching # (go through all points of point pattern X in sequence # and match each point with the closest point of Y that is # still unmatched) am <- matrix(0, 20, 20) h <- matrix(c(1:20, rep(0,20)), 20, 2) h[1,2] = nncross(X[1],Y)[1,2] for (i in 2:20) { nn <- nncross(X[i],Y[-h[1:(i-1),2]])[1,2] h[i,2] <- ((1:20)[-h[1:(i-1),2]])[nn] } am[h] <- 1 m.nn <- pppmatching(X, Y, am) matchingdist(m.nn, type="spa", cutoff=1, q=1) # is >= the distance obtained for m.opt # in most cases strictly > \dontrun{ par(mfrow=c(1,2)) plot(m.opt) plot(m.nn) text(X$x, X$y, 1:20, pos=1, offset=0.3, cex=0.8) } } \keyword{spatial} \keyword{math} spatstat/man/diameter.box3.Rd0000644000176200001440000000363113160710571015627 0ustar liggesusers\name{diameter.box3} \Rdversion{1.1} \alias{diameter.box3} \alias{volume.box3} \alias{shortside.box3} \alias{sidelengths.box3} \alias{eroded.volumes.box3} \alias{shortside} \alias{sidelengths} \alias{eroded.volumes} \title{ Geometrical Calculations for Three-Dimensional Box } \description{ Calculates the volume, diameter, shortest side, side lengths, or eroded volume of a three-dimensional box. } \usage{ \method{diameter}{box3}(x) \method{volume}{box3}(x) shortside(x) sidelengths(x) eroded.volumes(x, r) \method{shortside}{box3}(x) \method{sidelengths}{box3}(x) \method{eroded.volumes}{box3}(x, r) } \arguments{ \item{x}{ Three-dimensional box (object of class \code{"box3"}). } \item{r}{ Numeric value or vector of numeric values for which eroded volumes should be calculated. } } \details{ \code{diameter.box3} computes the diameter of the box. \code{volume.box3} computes the volume of the box. \code{shortside.box3} finds the shortest of the three side lengths of the box. \code{sidelengths.box3} returns all three side lengths of the box. \code{eroded.volumes} computes, for each entry \code{r[i]}, the volume of the smaller box obtained by removing a slab of thickness \code{r[i]} from each face of the box. This smaller box is the subset consisting of points that lie at least \code{r[i]} units away from the boundary of the box. } \value{ For \code{diameter.box3}, \code{shortside.box3} and \code{volume.box3}, a single numeric value. For \code{sidelengths.box3}, a vector of three numbers. For \code{eroded.volumes}, a numeric vector of the same length as \code{r}. } \author{\adrian and \rolf } \seealso{ \code{\link{as.box3}} } \examples{ X <- box3(c(0,10),c(0,10),c(0,5)) diameter(X) volume(X) sidelengths(X) shortside(X) hd <- shortside(X)/2 eroded.volumes(X, seq(0,hd, length=10)) } \keyword{spatial} \keyword{math} spatstat/man/zclustermodel.Rd0000644000176200001440000000176413160710621016060 0ustar liggesusers\name{zclustermodel} \alias{zclustermodel} \title{ Cluster Point Process Model } \description{ Experimental code. Creates an object representing a cluster point process model. Typically used for theoretical calculations about such a model. } \usage{ zclustermodel(name = "Thomas", \dots, mu, kappa, scale) } \arguments{ \item{name}{ Name of the cluster process. One of \code{"Thomas"}, \code{"MatClust"}, \code{"VarGamma"} or \code{"Cauchy"}. } \item{\dots}{ Other arguments needed for the model. } \item{mu}{ Mean cluster size. A single number, or a pixel image. } \item{kappa}{ Parent intensity. A single number. } \item{scale}{ Cluster scale parameter of the model. } } \details{ Experimental. } \value{ Object of the experimental class \code{"zclustermodel"}. } \author{ \adrian } \seealso{ \code{\link{methods.zclustermodel}} } \examples{ m <- zclustermodel("Thomas", kappa=10, mu=5, scale=0.1) } \keyword{spatial} \keyword{models} spatstat/man/rshift.splitppp.Rd0000644000176200001440000000412613160710621016330 0ustar liggesusers\name{rshift.splitppp} \alias{rshift.splitppp} \title{Randomly Shift a List of Point Patterns} \description{ Randomly shifts each point pattern in a list of point patterns. } \usage{ \method{rshift}{splitppp}(X, \dots, which=seq_along(X)) } \arguments{ \item{X}{ An object of class \code{"splitppp"}. Basically a list of point patterns. } \item{\dots}{ Parameters controlling the generation of the random shift vector and the handling of edge effects. See \code{\link{rshift.ppp}}. } \item{which}{ Optional. Identifies which patterns will be shifted, while other patterns are not shifted. Any valid subset index for \code{X}. } } \value{ Another object of class \code{"splitppp"}. } \details{ This operation applies a random shift to each of the point patterns in the list \code{X}. The function \code{\link{rshift}} is generic. This function \code{rshift.splitppp} is the method for objects of class \code{"splitppp"}, which are essentially lists of point patterns, created by the function \code{\link{split.ppp}}. By default, every pattern in the list \code{X} will be shifted. The argument \code{which} indicates that only some of the patterns should be shifted, while other groups should be left unchanged. \code{which} can be any valid subset index for \code{X}. Each point pattern in the list \code{X} (or each pattern in \code{X[which]}) is shifted by a random displacement vector. The shifting is performed by \code{\link{rshift.ppp}}. See the help page for \code{\link{rshift.ppp}} for details of the other arguments. } \seealso{ \code{\link{rshift}}, \code{\link{rshift.ppp}} } \examples{ data(amacrine) Y <- split(amacrine) # random toroidal shift # shift "on" and "off" points separately X <- rshift(Y) # shift "on" points and leave "off" points fixed X <- rshift(Y, which="on") # maximum displacement distance 0.1 units X <- rshift(Y, radius=0.1) # shift with erosion X <- rshift(Y, radius=0.1, edge="erode") } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat/man/rMaternI.Rd0000644000176200001440000000470713160710621014705 0ustar liggesusers\name{rMaternI} \alias{rMaternI} \title{Simulate Matern Model I} \description{ Generate a random point pattern, a simulated realisation of the \ifelse{latex}{\out{Mat\'ern}}{Matern} Model I inhibition process model. } \usage{ rMaternI(kappa, r, win = owin(c(0,1),c(0,1)), stationary=TRUE, \dots, nsim=1, drop=TRUE) } \arguments{ \item{kappa}{ Intensity of the Poisson process of proposal points. A single positive number. } \item{r}{ Inhibition distance. } \item{win}{ Window in which to simulate the pattern. An object of class \code{"owin"} or something acceptable to \code{\link{as.owin}}. Alternatively a higher-dimensional box of class \code{"box3"} or \code{"boxx"}. } \item{stationary}{ Logical. Whether to start with a stationary process of proposal points (\code{stationary=TRUE}) or to generate the proposal points only inside the window (\code{stationary=FALSE}). } \item{\dots}{Ignored.} \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a point pattern. } } \value{ A point pattern if \code{nsim=1}, or a list of point patterns if \code{nsim > 1}. Each point pattern is normally an object of class \code{"ppp"}, but may be of class \code{"pp3"} or \code{"ppx"} depending on the window. } \details{ This algorithm generates one or more realisations of \ifelse{latex}{\out{Mat\'ern}}{Matern}'s Model I inhibition process inside the window \code{win}. The process is constructed by first generating a uniform Poisson point process of ``proposal'' points with intensity \code{kappa}. If \code{stationary = TRUE} (the default), the proposal points are generated in a window larger than \code{win} that effectively means the proposals are stationary. If \code{stationary=FALSE} then the proposal points are only generated inside the window \code{win}. A proposal point is then deleted if it lies within \code{r} units' distance of another proposal point. Otherwise it is retained. The retained points constitute \ifelse{latex}{\out{Mat\'ern}}{Matern}'s Model I. } \seealso{ \code{\link{rpoispp}}, \code{\link{rMatClust}} } \examples{ X <- rMaternI(20, 0.05) Y <- rMaternI(20, 0.05, stationary=FALSE) } \author{ \adrian , Ute Hahn, \rolf and \ege } \keyword{spatial} \keyword{datagen} spatstat/man/domain.Rd0000644000176200001440000000707013160710571014433 0ustar liggesusers\name{domain} \alias{domain} \alias{domain.ppp} \alias{domain.psp} \alias{domain.im} \alias{domain.ppx} \alias{domain.pp3} \alias{domain.lpp} \alias{domain.ppm} \alias{domain.kppm} \alias{domain.dppm} \alias{domain.lpp} \alias{domain.lppm} \alias{domain.msr} \alias{domain.quad} \alias{domain.quadratcount} \alias{domain.quadrattest} \alias{domain.tess} \alias{domain.linfun} \alias{domain.lintess} \alias{domain.im} \alias{domain.layered} \alias{domain.distfun} \alias{domain.nnfun} \alias{domain.funxy} \alias{domain.rmhmodel} \alias{domain.leverage.ppm} \alias{domain.influence.ppm} \title{ Extract the Domain of any Spatial Object } \description{ Given a spatial object such as a point pattern, in any number of dimensions, this function extracts the spatial domain in which the object is defined. } \usage{ domain(X, \dots) \method{domain}{ppp}(X, \dots) \method{domain}{psp}(X, \dots) \method{domain}{im}(X, \dots) \method{domain}{ppx}(X, \dots) \method{domain}{pp3}(X, \dots) \method{domain}{lpp}(X, \dots) \method{domain}{ppm}(X, \dots, from=c("points", "covariates")) \method{domain}{kppm}(X, \dots, from=c("points", "covariates")) \method{domain}{dppm}(X, \dots, from=c("points", "covariates")) \method{domain}{lpp}(X, \dots) \method{domain}{lppm}(X, \dots) \method{domain}{msr}(X, \dots) \method{domain}{quad}(X, \dots) \method{domain}{quadratcount}(X, \dots) \method{domain}{quadrattest}(X, \dots) \method{domain}{tess}(X, \dots) \method{domain}{linfun}(X, \dots) \method{domain}{lintess}(X, \dots) \method{domain}{im}(X, \dots) \method{domain}{layered}(X, \dots) \method{domain}{distfun}(X, \dots) \method{domain}{nnfun}(X, \dots) \method{domain}{funxy}(X, \dots) \method{domain}{rmhmodel}(X, \dots) \method{domain}{leverage.ppm}(X, \dots) \method{domain}{influence.ppm}(X, \dots) } \arguments{ \item{X}{ A spatial object such as a point pattern (in any number of dimensions), line segment pattern or pixel image. } \item{\dots}{ Extra arguments. They are ignored by all the methods listed here. } \item{from}{Character string. See Details.} } \details{ The function \code{domain} is generic. For a spatial object \code{X} in any number of dimensions, \code{domain(X)} extracts the spatial domain in which \code{X} is defined. For a two-dimensional object \code{X}, typically \code{domain(X)} is the same as \code{domain(X)}. The exception is that, if \code{X} is a point pattern on a linear network (class \code{"lpp"}) or a point process model on a linear network (class \code{"lppm"}), then \code{domain(X)} is the linear network on which the points lie, while \code{Window(X)} is the two-dimensional window containing the linear network. The argument \code{from} applies when \code{X} is a fitted point process model (object of class \code{"ppm"}, \code{"kppm"} or \code{"dppm"}). If \code{from="data"} (the default), \code{domain} extracts the window of the original point pattern data to which the model was fitted. If \code{from="covariates"} then \code{domain} returns the window in which the spatial covariates of the model were provided. } \value{ A spatial object representing the domain of \code{X}. Typically a window (object of class \code{"owin"}), a three-dimensional box (\code{"box3"}), a multidimensional box (\code{"boxx"}) or a linear network (\code{"linnet"}). } \author{ \spatstatAuthors. } \seealso{ \code{\link{Window}}, \code{\link{Frame}} } \examples{ domain(cells) domain(bei.extra$elev) domain(chicago) } \keyword{spatial} \keyword{manip} spatstat/man/intensity.Rd0000644000176200001440000000247113160710621015206 0ustar liggesusers\name{intensity} \alias{intensity} \title{ Intensity of a Dataset or a Model } \description{ Generic function for computing the intensity of a spatial dataset or spatial point process model. } \usage{ intensity(X, ...) } \arguments{ \item{X}{ A spatial dataset or a spatial point process model. } \item{\dots}{ Further arguments depending on the class of \code{X}. } } \details{ This is a generic function for computing the intensity of a spatial dataset or spatial point process model. There are methods for point patterns (objects of class \code{"ppp"}) and fitted point process models (objects of class \code{"ppm"}). The empirical intensity of a dataset is the average density (the average amount of \sQuote{stuff} per unit area or volume). The empirical intensity of a point pattern is computed by the method \code{\link{intensity.ppp}}. The theoretical intensity of a stochastic model is the expected density (expected amount of \sQuote{stuff} per unit area or volume). The theoretical intensity of a fitted point process model is computed by the method \code{\link{intensity.ppm}}. } \value{ Usually a numeric value or vector. } \seealso{ \code{\link{intensity.ppp}}, \code{\link{intensity.ppm}}. } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} spatstat/man/rStraussHard.Rd0000644000176200001440000000731513160710621015607 0ustar liggesusers\name{rStraussHard} \alias{rStraussHard} \title{Perfect Simulation of the Strauss-Hardcore Process} \description{ Generate a random pattern of points, a simulated realisation of the Strauss-Hardcore process, using a perfect simulation algorithm. } \usage{ rStraussHard(beta, gamma = 1, R = 0, H = 0, W = owin(), expand=TRUE, nsim=1, drop=TRUE) } \arguments{ \item{beta}{ intensity parameter (a positive number). } \item{gamma}{ interaction parameter (a number between 0 and 1, inclusive). } \item{R}{ interaction radius (a non-negative number). } \item{H}{ hard core distance (a non-negative number smaller than \code{R}). } \item{W}{ window (object of class \code{"owin"}) in which to generate the random pattern. Currently this must be a rectangular window. } \item{expand}{ Logical. If \code{FALSE}, simulation is performed in the window \code{W}, which must be rectangular. If \code{TRUE} (the default), simulation is performed on a larger window, and the result is clipped to the original window \code{W}. Alternatively \code{expand} can be an object of class \code{"rmhexpand"} (see \code{\link{rmhexpand}}) determining the expansion method. } \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a point pattern. } } \details{ This function generates a realisation of the Strauss-Hardcore point process in the window \code{W} using a \sQuote{perfect simulation} algorithm. The Strauss-Hardcore process is described in \code{\link{StraussHard}}. The simulation algorithm used to generate the point pattern is \sQuote{dominated coupling from the past} as implemented by Berthelsen and \ifelse{latex}{\out{M\o ller}}{Moller} (2002, 2003). This is a \sQuote{perfect simulation} or \sQuote{exact simulation} algorithm, so called because the output of the algorithm is guaranteed to have the correct probability distribution exactly (unlike the Metropolis-Hastings algorithm used in \code{\link{rmh}}, whose output is only approximately correct). A limitation of the perfect simulation algorithm is that the interaction parameter \eqn{\gamma}{gamma} must be less than or equal to \eqn{1}. To simulate a Strauss-hardcore process with \eqn{\gamma > 1}{gamma > 1}, use \code{\link{rmh}}. There is a tiny chance that the algorithm will run out of space before it has terminated. If this occurs, an error message will be generated. } \value{ If \code{nsim = 1}, a point pattern (object of class \code{"ppp"}). If \code{nsim > 1}, a list of point patterns. } \references{ Berthelsen, K.K. and \ifelse{latex}{\out{M\o ller}}{Moller}, J. (2002) A primer on perfect simulation for spatial point processes. \emph{Bulletin of the Brazilian Mathematical Society} 33, 351-367. Berthelsen, K.K. and \ifelse{latex}{\out{M\o ller}}{Moller}, J. (2003) Likelihood and non-parametric Bayesian MCMC inference for spatial point processes based on perfect simulation and path sampling. \emph{Scandinavian Journal of Statistics} 30, 549-564. \ifelse{latex}{\out{M\o ller}}{Moller}, J. and Waagepetersen, R. (2003). \emph{Statistical Inference and Simulation for Spatial Point Processes.} Chapman and Hall/CRC. } \author{ Kasper Klitgaard Berthelsen and \adrian } \examples{ Z <- rStraussHard(100,0.7,0.05,0.02) } \seealso{ \code{\link{rmh}}, \code{\link{StraussHard}}. \code{\link{rHardcore}}, \code{\link{rStrauss}}, \code{\link{rDiggleGratton}}, \code{\link{rDGS}}, \code{\link{rPenttinen}}. } \keyword{spatial} \keyword{datagen} spatstat/man/as.function.tess.Rd0000644000176200001440000000305413160710571016366 0ustar liggesusers\name{as.function.tess} \alias{as.function.tess} \title{ Convert a Tessellation to a Function } \description{ Convert a tessellation into a function of the \eqn{x} and \eqn{y} coordinates. The default function values are factor levels specifying which tile of the tessellation contains the point \eqn{(x,y)}. } \usage{ \method{as.function}{tess}(x,\dots,values=NULL) } \arguments{ \item{x}{ A tessellation (object of class \code{"tess"}). } \item{values}{ Optional. A vector giving the values of the function for each tile of \code{x}. } \item{\dots}{ Ignored. } } \details{ This command converts a tessellation (object of class \code{"tess"}) to a \code{function(x,y)} where the arguments \code{x} and \code{y} are (vectors of) spatial coordinates. The corresponding function values are factor levels identifying which tile of the tessellation contains each point. Values are \code{NA} if the corresponding point lies outside the tessellation. If the argument \code{values} is given, then it determines the value of the function in each tile of \code{x}. } \value{ A function in the \R language, also belonging to the class \code{"funxy"}. } \author{ \spatstatAuthors } \seealso{ \code{\link{tileindex}} for the low-level calculation of tile index. \code{\link{cut.ppp}} and \code{\link{split.ppp}} to divide up the points of a point pattern according to a tessellation. } \examples{ X <- runifpoint(7) V <- dirichlet(X) f <- as.function(V) f(0.1, 0.4) plot(f) } \keyword{spatial} \keyword{manip} spatstat/man/raster.x.Rd0000644000176200001440000000477113160710621014733 0ustar liggesusers\name{raster.x} \alias{raster.x} \alias{raster.y} \alias{raster.xy} \title{Cartesian Coordinates for a Pixel Raster} \description{ Return the \eqn{x} and \eqn{y} coordinates of each pixel in a pixel image or binary mask. } \usage{ raster.x(w, drop=FALSE) raster.y(w, drop=FALSE) raster.xy(w, drop=FALSE) } \arguments{ \item{w}{ A pixel image (object of class \code{"im"}) or a mask window (object of class \code{"owin"} of type \code{"mask"}). } \item{drop}{ Logical. If \code{TRUE}, then coordinates of pixels that lie outside the window are removed. If \code{FALSE} (the default) then the coordinates of every pixel in the containing rectangle are retained. } } \value{ \code{raster.xy} returns a list with components \code{x} and \code{y} which are numeric vectors of equal length containing the pixel coordinates. If \code{drop=FALSE}, \code{raster.x} and \code{raster.y} return a matrix of the same dimensions as the pixel grid in \code{w}, and giving the value of the \eqn{x} (or \eqn{y}) coordinate of each pixel in the raster. If \code{drop=TRUE}, \code{raster.x} and \code{raster.y} return numeric vectors. } \details{ The argument \code{w} should be either a pixel image (object of class \code{"im"}) or a mask window (an object of class \code{"owin"} of type \code{"mask"}). If \code{drop=FALSE} (the default), the functions \code{raster.x} and \code{raster.y} return a matrix of the same dimensions as the pixel image or mask itself, with entries giving the \eqn{x} coordinate (for \code{raster.x}) or \eqn{y} coordinate (for \code{raster.y}) of each pixel in the pixel grid. If \code{drop=TRUE}, pixels that lie outside the window \code{w} (or outside the domain of the image \code{w}) are removed, and \code{raster.x} and \code{raster.y} return numeric vectors containing the coordinates of the pixels that are inside the window \code{w}. The function \code{raster.xy} returns a list with components \code{x} and \code{y} which are numeric vectors of equal length containing the pixel coordinates. } \seealso{ \code{\link{owin}}, \code{\link{as.mask}}, \code{\link{pixelcentres}} } \examples{ u <- owin(c(-1,1),c(-1,1)) # square of side 2 w <- as.mask(u, eps=0.01) # 200 x 200 grid X <- raster.x(w) Y <- raster.y(w) disc <- owin(c(-1,1), c(-1,1), mask=(X^2 + Y^2 <= 1)) \dontrun{plot(disc)} # approximation to the unit disc } \author{\adrian , \rolf and \ege } \keyword{spatial} \keyword{manip} spatstat/man/distmap.Rd0000644000176200001440000000273313160710571014626 0ustar liggesusers\name{distmap} \alias{distmap} \title{ Distance Map } \description{ Compute the distance map of an object, and return it as a pixel image. Generic. } \usage{ distmap(X, \dots) } \arguments{ \item{X}{Any suitable dataset representing a two-dimensional object, such as a point pattern (object of class \code{"ppp"}), a window (object of class \code{"owin"}) or a line segment pattern (object of class \code{"psp"}). } \item{\dots}{Arguments passed to \code{\link{as.mask}} to control pixel resolution. } } \value{ A pixel image (object of class \code{"im"}) whose grey scale values are the values of the distance map. } \details{ The \dQuote{distance map} of a set of points \eqn{A} is the function \eqn{f} whose value \code{f(x)} is defined for any two-dimensional location \eqn{x} as the shortest distance from \eqn{x} to \eqn{A}. This function computes the distance map of the set \code{X} and returns the distance map as a pixel image. This is generic. Methods are provided for point patterns (\code{\link{distmap.ppp}}), line segment patterns (\code{\link{distmap.psp}}) and windows (\code{\link{distmap.owin}}). } \seealso{ \code{\link{distmap.ppp}}, \code{\link{distmap.psp}}, \code{\link{distmap.owin}}, \code{\link{distfun}} } \examples{ data(cells) U <- distmap(cells) data(letterR) V <- distmap(letterR) \dontrun{ plot(U) plot(V) } } \author{\adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/bounding.box.xy.Rd0000644000176200001440000000243213160710571016214 0ustar liggesusers\name{bounding.box.xy} \alias{bounding.box.xy} \title{Convex Hull of Points} \description{ Computes the smallest rectangle containing a set of points. } \usage{ bounding.box.xy(x, y=NULL) } \arguments{ \item{x}{ vector of \code{x} coordinates of observed points, or a 2-column matrix giving \code{x,y} coordinates, or a list with components \code{x,y} giving coordinates (such as a point pattern object of class \code{"ppp"}.) } \item{y}{(optional) vector of \code{y} coordinates of observed points, if \code{x} is a vector.} } \value{ A window (an object of class \code{"owin"}). } \details{ Given an observed pattern of points with coordinates given by \code{x} and \code{y}, this function finds the smallest rectangle, with sides parallel to the coordinate axes, that contains all the points, and returns it as a window. } \seealso{ \code{\link{owin}}, \code{\link{as.owin}}, \code{\link{convexhull.xy}}, \code{\link{ripras}} } \examples{ x <- runif(30) y <- runif(30) w <- bounding.box.xy(x,y) plot(owin(), main="bounding.box.xy(x,y)") plot(w, add=TRUE) points(x,y) X <- rpoispp(30) plot(X, main="bounding.box.xy(X)") plot(bounding.box.xy(X), add=TRUE) } \author{\adrian and \rolf } \keyword{spatial} \keyword{utilities} spatstat/man/is.marked.ppm.Rd0000644000176200001440000000443413160710621015631 0ustar liggesusers\name{is.marked.ppm} \alias{is.marked.ppm} \alias{is.marked.lppm} \title{Test Whether A Point Process Model is Marked} \description{ Tests whether a fitted point process model involves ``marks'' attached to the points. } \usage{ \method{is.marked}{ppm}(X, \dots) \method{is.marked}{lppm}(X, \dots) } \arguments{ \item{X}{ Fitted point process model (object of class \code{"ppm"}) usually obtained from \code{\link{ppm}}. Alternatively, a model of class \code{"lppm"}. } \item{\dots}{ Ignored. } } \value{ Logical value, equal to \code{TRUE} if \code{X} is a model that was fitted to a marked point pattern dataset. } \details{ ``Marks'' are observations attached to each point of a point pattern. For example the \code{\link[spatstat.data]{longleaf}} dataset contains the locations of trees, each tree being marked by its diameter; the \code{\link[spatstat.data]{amacrine}} dataset gives the locations of cells of two types (on/off) and the type of cell may be regarded as a mark attached to the location of the cell. The argument \code{X} is a fitted point process model (an object of class \code{"ppm"}) typically obtained by fitting a model to point pattern data using \code{\link{ppm}}. This function returns \code{TRUE} if the \emph{original data} (to which the model \code{X} was fitted) were a marked point pattern. Note that this is not the same as testing whether the model involves terms that depend on the marks (i.e. whether the fitted model ignores the marks in the data). Currently we have not implemented a test for this. If this function returns \code{TRUE}, the implications are (for example) that any simulation of this model will require simulation of random marks as well as random point locations. } \seealso{ \code{\link{is.marked}}, \code{\link{is.marked.ppp}} } \examples{ X <- lansing # Multitype point pattern --- trees marked by species \testonly{ # Smaller dataset X <- amacrine } fit1 <- ppm(X, ~ marks, Poisson()) is.marked(fit1) # TRUE fit2 <- ppm(X, ~ 1, Poisson()) is.marked(fit2) # TRUE # Unmarked point pattern fit3 <- ppm(cells, ~ 1, Poisson()) is.marked(fit3) # FALSE } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} \keyword{models} spatstat/man/linearpcfdot.inhom.Rd0000644000176200001440000001041613160710621016741 0ustar liggesusers\name{linearpcfdot.inhom} \alias{linearpcfdot.inhom} \title{ Inhomogeneous Multitype Pair Correlation Function (Dot-type) for Linear Point Pattern } \description{ For a multitype point pattern on a linear network, estimate the inhomogeneous multitype pair correlation function from points of type \eqn{i} to points of any type. } \usage{ linearpcfdot.inhom(X, i, lambdaI, lambdadot, r=NULL, \dots, correction="Ang", normalise=TRUE) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the \eqn{i}-to-any pair correlation function \eqn{g_{i\bullet}(r)}{g[i.](r)} will be computed. An object of class \code{"lpp"} which must be a multitype point pattern (a marked point pattern whose marks are a factor). } \item{i}{Number or character string identifying the type (mark value) of the points in \code{X} from which distances are measured. Defaults to the first level of \code{marks(X)}. } \item{lambdaI}{ Intensity values for the points of type \code{i}. Either a numeric vector, a \code{function}, a pixel image (object of class \code{"im"} or \code{"linim"}) or a fitted point process model (object of class \code{"ppm"} or \code{"lppm"}). } \item{lambdadot}{ Intensity values for all points of \code{X}. Either a numeric vector, a \code{function}, a pixel image (object of class \code{"im"} or \code{"linim"}) or a fitted point process model (object of class \code{"ppm"} or \code{"lppm"}). } \item{r}{numeric vector. The values of the argument \eqn{r} at which the function \eqn{g_{i\bullet}(r)}{g[i.](r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{correction}{ Geometry correction. Either \code{"none"} or \code{"Ang"}. See Details. } \item{\dots}{ Arguments passed to \code{\link[stats]{density.default}} to control the kernel smoothing. } \item{normalise}{ Logical. If \code{TRUE} (the default), the denominator of the estimator is data-dependent (equal to the sum of the reciprocal intensities at the points of type \code{i}), which reduces the sampling variability. If \code{FALSE}, the denominator is the length of the network. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). } \details{ This is a counterpart of the function \code{\link{pcfdot.inhom}} for a point pattern on a linear network (object of class \code{"lpp"}). The argument \code{i} will be interpreted as levels of the factor \code{marks(X)}. If \code{i} is missing, it defaults to the first level of the marks factor. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{g_{i\bullet}(r)}{g[i.](r)} should be evaluated. The values of \eqn{r} must be increasing nonnegative numbers and the maximum \eqn{r} value must not exceed the radius of the largest disc contained in the window. If \code{lambdaI} or \code{lambdadot} is a fitted point process model, the default behaviour is to update the model by re-fitting it to the data, before computing the fitted intensity. This can be disabled by setting \code{update=FALSE}. } \references{ Baddeley, A, Jammalamadaka, A. and Nair, G. (to appear) Multitype point process analysis of spines on the dendrite network of a neuron. \emph{Applied Statistics} (Journal of the Royal Statistical Society, Series C), In press. } \section{Warnings}{ The argument \code{i} is interpreted as a level of the factor \code{marks(X)}. Beware of the usual trap with factors: numerical values are not interpreted in the same way as character values. } \seealso{ \code{\link{linearpcfcross.inhom}}, \code{\link{linearpcfcross}}, \code{\link{pcfcross.inhom}}. } \examples{ lam <- table(marks(chicago))/(summary(chicago)$totlength) lamI <- function(x,y,const=lam[["assault"]]){ rep(const, length(x)) } lam. <- function(x,y,const=sum(lam)){ rep(const, length(x)) } g <- linearpcfdot.inhom(chicago, "assault", lamI, lam.) \dontrun{ fit <- lppm(chicago, ~marks + x) linearpcfdot.inhom(chicago, "assault", fit, fit) } } \author{\adrian } \keyword{spatial} \keyword{nonparametric} spatstat/man/profilepl.Rd0000644000176200001440000001460113165362536015167 0ustar liggesusers\name{profilepl} \alias{profilepl} \title{Fit Models by Profile Maximum Pseudolikelihood or AIC} \description{ Fits point process models by maximising the profile likelihood, profile pseudolikelihood, profile composite likelihood or AIC. } \usage{ profilepl(s, f, \dots, aic=FALSE, rbord=NULL, verbose = TRUE) } \arguments{ \item{s}{ Data frame containing values of the irregular parameters over which the criterion will be computed. } \item{f}{ Function (such as \code{\link{Strauss}}) that generates an interpoint interaction object, given values of the irregular parameters. } \item{\dots}{ Data passed to \code{\link{ppm}} to fit the model. } \item{aic}{ Logical value indicating whether to find the parameter values which minimise the AIC (\code{aic=TRUE}) or maximise the profile likelihood (\code{aic=FALSE}, the default). } \item{rbord}{ Radius for border correction (same for all models). If omitted, this will be computed from the interactions. } \item{verbose}{ Logical flag indicating whether to print progress reports. } } \details{ The model-fitting function \code{\link{ppm}} fits point process models to point pattern data. However, only the \sQuote{regular} parameters of the model can be fitted by \code{\link{ppm}}. The model may also depend on \sQuote{irregular} parameters that must be fixed in any call to \code{\link{ppm}}. This function \code{profilepl} is a wrapper which finds the values of the irregular parameters that give the best fit. If \code{aic=FALSE} (the default), the best fit is the model which maximises the likelihood (if the models are Poisson processes) or maximises the pseudolikelihood or logistic likelihood. If \code{aic=TRUE} then the best fit is the model which minimises the Akaike Information Criterion \code{\link{AIC.ppm}}. The argument \code{s} must be a data frame whose columns contain values of the irregular parameters over which the maximisation is to be performed. An irregular parameter may affect either the interpoint interaction or the spatial trend. \describe{ \item{interaction parameters:}{ in a call to \code{\link{ppm}}, the argument \code{interaction} determines the interaction between points. It is usually a call to a function such as \code{\link{Strauss}}. The arguments of this call are irregular parameters. For example, the interaction radius parameter \eqn{r} of the Strauss process, determined by the argument \code{r} to the function \code{\link{Strauss}}, is an irregular parameter. } \item{trend parameters:}{ in a call to \code{\link{ppm}}, the spatial trend may depend on covariates, which are supplied by the argument \code{covariates}. These covariates may be functions written by the user, of the form \code{function(x,y,...)}, and the extra arguments \code{\dots} are irregular parameters. } } The argument \code{f} determines the interaction for each model to be fitted. It would typically be one of the functions \code{\link{Poisson}}, \code{\link{AreaInter}}, \code{\link{BadGey}}, \code{\link{DiggleGatesStibbard}}, \code{\link{DiggleGratton}}, \code{\link{Fiksel}}, \code{\link{Geyer}}, \code{\link{Hardcore}}, \code{\link{LennardJones}}, \code{\link{OrdThresh}}, \code{\link{Softcore}}, \code{\link{Strauss}} or \code{\link{StraussHard}}. Alternatively it could be a function written by the user. Columns of \code{s} which match the names of arguments of \code{f} will be interpreted as interaction parameters. Other columns will be interpreted as trend parameters. The data frame \code{s} must provide values for each argument of \code{f}, except for the optional arguments, which are those arguments of \code{f} that have the default value \code{NA}. To find the best fit, each row of \code{s} will be taken in turn. Interaction parameters in this row will be passed to \code{f}, resulting in an interaction object. Then \code{\link{ppm}} will be applied to the data \code{...} using this interaction. Any trend parameters will be passed to \code{\link{ppm}} through the argument \code{covfunargs}. This results in a fitted point process model. The value of the log pseudolikelihood or AIC from this model is stored. After all rows of \code{s} have been processed in this way, the row giving the maximum value of log pseudolikelihood will be found. The object returned by \code{profilepl} contains the profile pseudolikelihood (or profile AIC) function, the best fitting model, and other data. It can be plotted (yielding a plot of the log pseudolikelihood or AIC values against the irregular parameters) or printed (yielding information about the best fitting values of the irregular parameters). In general, \code{f} may be any function that will return an interaction object (object of class \code{"interact"}) that can be used in a call to \code{\link{ppm}}. Each argument of \code{f} must be a single value. } \value{ An object of class \code{"profilepl"}. There are methods for \code{\link[graphics]{plot}}, \code{\link[base]{print}}, \code{\link[base]{summary}}, \code{\link[stats]{simulate}}, \code{\link{as.ppm}}, \code{\link{fitin}} and \code{\link{parameters}} for objects of this class. The components of the object include \item{fit}{Best-fitting model} \item{param}{The data frame \code{s}} \item{iopt}{Row index of the best-fitting parameters in \code{s}} To extract the best fitting model you can also use \code{\link{as.ppm}}. } \examples{ # one irregular parameter rr <- data.frame(r=seq(0.05,0.15, by=0.01)) \testonly{ rr <- data.frame(r=c(0.05,0.1,0.15)) } ps <- profilepl(rr, Strauss, cells) ps if(interactive()) plot(ps) # two irregular parameters rs <- expand.grid(r=seq(0.05,0.15, by=0.01),sat=1:3) \testonly{ rs <- expand.grid(r=c(0.07,0.12),sat=1:2) } pg <- profilepl(rs, Geyer, cells) pg if(interactive()) { plot(pg) as.ppm(pg) } # multitype pattern with a common interaction radius \dontrun{ RR <- data.frame(R=seq(0.03,0.05,by=0.01)) MS <- function(R) { MultiStrauss(radii=diag(c(R,R))) } pm <- profilepl(RR, MS, amacrine ~marks) } ## more information summary(pg) } \author{ \spatstatAuthors } \keyword{spatial} \keyword{models} spatstat/man/rppm.Rd0000644000176200001440000000365113160710621014137 0ustar liggesusers\name{rppm} \alias{rppm} \title{ Recursively Partitioned Point Process Model } \description{ Fits a recursive partition model to point pattern data. } \usage{ rppm(\dots, rpargs=list()) } \arguments{ \item{\dots}{ Arguments passed to \code{\link{ppm}} specifying the point pattern data and the explanatory covariates. } \item{rpargs}{ Optional list of arguments passed to \code{\link[rpart]{rpart}} controlling the recursive partitioning procedure. } } \details{ This function attempts to find a simple rule for predicting low and high intensity regions of points in a point pattern, using explanatory covariates. The arguments \code{\dots} specify the point pattern data and explanatory covariates in the same way as they would be in the function \code{\link{ppm}}. The recursive partitioning algorithm \code{\link[rpart]{rpart}} is then used to find a partitioning rule. } \value{ An object of class \code{"rppm"}. There are methods for \code{print}, \code{plot}, \code{fitted}, \code{predict} and \code{prune} for this class. } \references{ Breiman, L., Friedman, J. H., Olshen, R. A., and Stone, C. J. (1984) \emph{Classification and Regression Trees}. Wadsworth. } \author{ \spatstatAuthors. } \seealso{ \code{\link{plot.rppm}}, \code{\link{predict.rppm}}, \code{\link{prune.rppm}}. } \examples{ # New Zealand trees data: trees planted along border # Use covariates 'x', 'y' nzfit <- rppm(nztrees ~ x + y) nzfit prune(nzfit, cp=0.035) # Murchison gold data: numeric and logical covariates mur <- solapply(murchison, rescale, s=1000, unitname="km") mur$dfault <- distfun(mur$faults) # mfit <- rppm(gold ~ dfault + greenstone, data=mur) mfit # Gorillas data: factor covariates # (symbol '.' indicates 'all variables') gfit <- rppm(unmark(gorillas) ~ . , data=gorillas.extra) gfit } \keyword{spatial} \keyword{models} spatstat/man/clarkevans.test.Rd0000644000176200001440000000667113160710571016301 0ustar liggesusers\name{clarkevans.test} \alias{clarkevans.test} \title{Clark and Evans Test} \description{ Performs the Clark-Evans test of aggregation for a spatial point pattern. } \usage{ clarkevans.test(X, ..., correction="none", clipregion=NULL, alternative=c("two.sided", "less", "greater", "clustered", "regular"), nsim=999) } \arguments{ \item{X}{ A spatial point pattern (object of class \code{"ppp"}). } \item{\dots}{Ignored.} \item{correction}{ Character string. The type of edge correction to be applied. See \code{\link{clarkevans}} } \item{clipregion}{ Clipping region for the guard area correction. A window (object of class \code{"owin"}). See \code{\link{clarkevans}} } \item{alternative}{ String indicating the type of alternative for the hypothesis test. Partially matched. } \item{nsim}{ Number of Monte Carlo simulations to perform, if a Monte Carlo p-value is required. } } \details{ This command uses the Clark and Evans (1954) aggregation index \eqn{R} as the basis for a crude test of clustering or ordering of a point pattern. The Clark-Evans index is computed by the function \code{\link{clarkevans}}. See the help for \code{\link{clarkevans}} for information about the Clark-Evans index \eqn{R} and about the arguments \code{correction} and \code{clipregion}. This command performs a hypothesis test of clustering or ordering of the point pattern \code{X}. The null hypothesis is Complete Spatial Randomness, i.e.\ a uniform Poisson process. The alternative hypothesis is specified by the argument \code{alternative}: \itemize{ \item \code{alternative="less"} or \code{alternative="clustered"}: the alternative hypothesis is that \eqn{R < 1} corresponding to a clustered point pattern; \item \code{alternative="greater"} or \code{alternative="regular"}: the alternative hypothesis is that \eqn{R > 1} corresponding to a regular or ordered point pattern; \item \code{alternative="two.sided"}: the alternative hypothesis is that \eqn{R \neq 1}{R != 1} corresponding to a clustered or regular pattern. } The Clark-Evans index \eqn{R} is computed for the data as described in \code{\link{clarkevans}}. If \code{correction="none"} and \code{nsim} is missing, the \eqn{p}-value for the test is computed by standardising \eqn{R} as proposed by Clark and Evans (1954) and referring the statistic to the standard Normal distribution. Otherwise, the \eqn{p}-value for the test is computed by Monte Carlo simulation of \code{nsim} realisations of Complete Spatial Randomness conditional on the observed number of points. } \value{ An object of class \code{"htest"} representing the result of the test. } \references{ Clark, P.J. and Evans, F.C. (1954) Distance to nearest neighbour as a measure of spatial relationships in populations. \emph{Ecology} \bold{35}, 445--453. Donnelly, K. (1978) Simulations to determine the variance and edge-effect of total nearest neighbour distance. In \emph{Simulation methods in archaeology}, Cambridge University Press, pp 91--95. } \author{ \adrian } \seealso{ \code{\link{clarkevans}}, \code{\link{hopskel.test}} } \examples{ # Redwood data - clustered clarkevans.test(redwood) clarkevans.test(redwood, alternative="clustered") } \keyword{spatial} \keyword{nonparametric} \keyword{htest} spatstat/man/rotate.psp.Rd0000644000176200001440000000272013160710621015254 0ustar liggesusers\name{rotate.psp} \alias{rotate.psp} \title{Rotate a Line Segment Pattern} \description{ Rotates a line segment pattern } \usage{ \method{rotate}{psp}(X, angle=pi/2, \dots, centre=NULL) } \arguments{ \item{X}{A line segment pattern (object of class \code{"psp"}).} \item{angle}{Angle of rotation.} \item{\dots}{ Arguments passed to \code{\link{rotate.owin}} affecting the handling of the observation window, if it is a binary pixel mask. } \item{centre}{ Centre of rotation. Either a vector of length 2, or a character string (partially matched to \code{"centroid"}, \code{"midpoint"} or \code{"bottomleft"}). The default is the coordinate origin \code{c(0,0)}. } } \value{ Another object of class \code{"psp"} representing the rotated line segment pattern. } \details{ The line segments of the pattern, and the window of observation, are rotated about the origin by the angle specified. Angles are measured in radians, anticlockwise. The default is to rotate the pattern 90 degrees anticlockwise. If the line segments carry marks, these are preserved. } \seealso{ \code{\link{psp.object}}, \code{\link{rotate.owin}}, \code{\link{rotate.ppp}} } \examples{ oldpar <- par(mfrow=c(2,1)) X <- psp(runif(10), runif(10), runif(10), runif(10), window=owin()) plot(X, main="original") Y <- rotate(X, pi/4) plot(Y, main="rotated") par(oldpar) } \author{\adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/pcfcross.inhom.Rd0000644000176200001440000001147013160710621016112 0ustar liggesusers\name{pcfcross.inhom} \alias{pcfcross.inhom} \title{ Inhomogeneous Multitype Pair Correlation Function (Cross-Type) } \description{ Estimates the inhomogeneous cross-type pair correlation function for a multitype point pattern. } \usage{ pcfcross.inhom(X, i, j, lambdaI = NULL, lambdaJ = NULL, ..., r = NULL, breaks = NULL, kernel="epanechnikov", bw=NULL, stoyan=0.15, correction = c("isotropic", "Ripley", "translate"), sigma = NULL, varcov = NULL) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the inhomogeneous cross-type pair correlation function \eqn{g_{ij}(r)}{g[i,j](r)} will be computed. It must be a multitype point pattern (a marked point pattern whose marks are a factor). } \item{i}{The type (mark value) of the points in \code{X} from which distances are measured. A character string (or something that will be converted to a character string). Defaults to the first level of \code{marks(X)}. } \item{j}{The type (mark value) of the points in \code{X} to which distances are measured. A character string (or something that will be converted to a character string). Defaults to the second level of \code{marks(X)}. } \item{lambdaI}{ Optional. Values of the estimated intensity function of the points of type \code{i}. Either a vector giving the intensity values at the points of type \code{i}, a pixel image (object of class \code{"im"}) giving the intensity values at all locations, or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. } \item{lambdaJ}{ Optional. Values of the estimated intensity function of the points of type \code{j}. A numeric vector, pixel image or \code{function(x,y)}. } \item{r}{ Vector of values for the argument \eqn{r} at which \eqn{g_{ij}(r)}{g[i,j](r)} should be evaluated. There is a sensible default. } \item{breaks}{ This argument is for internal use only. } \item{kernel}{ Choice of smoothing kernel, passed to \code{\link{density.default}}. } \item{bw}{ Bandwidth for smoothing kernel, passed to \code{\link{density.default}}. } \item{\dots}{ Other arguments passed to the kernel density estimation function \code{\link{density.default}}. } \item{stoyan}{ Bandwidth coefficient; see Details. } \item{correction}{ Choice of edge correction. } \item{sigma,varcov}{ Optional arguments passed to \code{\link{density.ppp}} to control the smoothing bandwidth, when \code{lambdaI} or \code{lambdaJ} is estimated by kernel smoothing. } } \details{ The inhomogeneous cross-type pair correlation function \eqn{g_{ij}(r)}{g[i,j](r)} is a summary of the dependence between two types of points in a multitype spatial point process that does not have a uniform density of points. The best intuitive interpretation is the following: the probability \eqn{p(r)} of finding two points, of types \eqn{i} and \eqn{j} respectively, at locations \eqn{x} and \eqn{y} separated by a distance \eqn{r} is equal to \deqn{ p(r) = \lambda_i(x) lambda_j(y) g(r) \,{\rm d}x \, {\rm d}y }{ p(r) = lambda[i](x) * lambda[j](y) * g(r) dx dy } where \eqn{\lambda_i}{lambda[i]} is the intensity function of the process of points of type \eqn{i}. For a multitype Poisson point process, this probability is \eqn{p(r) = \lambda_i(x) \lambda_j(y)}{p(r) = lambda[i](x) * lambda[j](y)} so \eqn{g_{ij}(r) = 1}{g[i,j](r) = 1}. The command \code{pcfcross.inhom} estimates the inhomogeneous pair correlation using a modified version of the algorithm in \code{\link{pcf.ppp}}. If the arguments \code{lambdaI} and \code{lambdaJ} are missing or null, they are estimated from \code{X} by kernel smoothing using a leave-one-out estimator. } \value{ A function value table (object of class \code{"fv"}). Essentially a data frame containing the variables \item{r}{ the vector of values of the argument \eqn{r} at which the inhomogeneous cross-type pair correlation function \eqn{g_{ij}(r)}{g[i,j](r)} has been estimated } \item{theo}{vector of values equal to 1, the theoretical value of \eqn{g_{ij}(r)}{g[i,j](r)} for the Poisson process } \item{trans}{vector of values of \eqn{g_{ij}(r)}{g[i,j](r)} estimated by translation correction } \item{iso}{vector of values of \eqn{g_{ij}(r)}{g[i,j](r)} estimated by Ripley isotropic correction } as required. } \seealso{ \code{\link{pcf.ppp}}, \code{\link{pcfinhom}}, \code{\link{pcfcross}}, \code{\link{pcfdot.inhom}} } \examples{ data(amacrine) plot(pcfcross.inhom(amacrine, "on", "off", stoyan=0.1), legendpos="bottom") } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat/man/pairdist.default.Rd0000644000176200001440000000564713160710621016432 0ustar liggesusers\name{pairdist.default} \alias{pairdist.default} \title{Pairwise distances} \description{ Computes the matrix of distances between all pairs of points in a set of points } \usage{ \method{pairdist}{default}(X, Y=NULL, \dots, period=NULL, method="C", squared=FALSE) } \arguments{ \item{X,Y}{ Arguments specifying the coordinates of a set of points. Typically \code{X} and \code{Y} would be numeric vectors of equal length. Alternatively \code{Y} may be omitted and \code{X} may be a list with two components \code{x} and \code{y}, or a matrix with two columns. } \item{\dots}{ Ignored. } \item{period}{ Optional. Dimensions for periodic edge correction. } \item{method}{ String specifying which method of calculation to use. Values are \code{"C"} and \code{"interpreted"}. Usually not specified. } \item{squared}{ Logical. If \code{squared=TRUE}, the squared distances are returned instead (this computation is faster). } } \value{ A square matrix whose \code{[i,j]} entry is the distance between the points numbered \code{i} and \code{j}. } \details{ Given the coordinates of a set of points, this function computes the Euclidean distances between all pairs of points, and returns the matrix of distances. It is a method for the generic function \code{pairdist}. The arguments \code{X} and \code{Y} must determine the coordinates of a set of points. Typically \code{X} and \code{Y} would be numeric vectors of equal length. Alternatively \code{Y} may be omitted and \code{X} may be a list with two components named \code{x} and \code{y}, or a matrix or data frame with two columns. Alternatively if \code{period} is given, then the distances will be computed in the `periodic' sense (also known as `torus' distance). The points will be treated as if they are in a rectangle of width \code{period[1]} and height \code{period[2]}. Opposite edges of the rectangle are regarded as equivalent. If \code{squared=TRUE} then the \emph{squared} Euclidean distances \eqn{d^2} are returned, instead of the Euclidean distances \eqn{d}. The squared distances are faster to calculate, and are sufficient for many purposes (such as finding the nearest neighbour of a point). The argument \code{method} is not normally used. It is retained only for checking the validity of the software. If \code{method = "interpreted"} then the distances are computed using interpreted R code only. If \code{method="C"} (the default) then C code is used. The C code is somewhat faster. } \seealso{ \code{\link{crossdist}}, \code{\link{nndist}}, \code{\link{Kest}} } \examples{ x <- runif(100) y <- runif(100) d <- pairdist(x, y) d <- pairdist(cbind(x,y)) d <- pairdist(x, y, period=c(1,1)) d <- pairdist(x, y, squared=TRUE) } \author{Pavel Grabarnik \email{pavel.grabar@issp.serpukhov.su} and \adrian } \keyword{spatial} \keyword{math} spatstat/man/plot.leverage.ppm.Rd0000644000176200001440000000455413164570360016536 0ustar liggesusers\name{plot.leverage.ppm} \alias{plot.leverage.ppm} \alias{persp.leverage.ppm} \title{ Plot Leverage Function } \description{ Generate a pixel image plot, or a perspective plot, of a leverage function that has been computed by \code{\link{leverage.ppm}}. } \usage{ \method{plot}{leverage.ppm}(x, \dots, showcut=TRUE, col.cut=par("fg"), args.contour=list(), multiplot=TRUE) \method{persp}{leverage.ppm}(x, \dots, main) } \arguments{ \item{x}{ Leverage function (object of class \code{"leverage.ppm"}) computed by \code{\link{leverage.ppm}}. } \item{\dots}{ Arguments passed to \code{\link{plot.im}} or \code{\link{persp.im}} controlling the plot. } \item{showcut}{ Logical. If \code{TRUE}, a contour line is plotted at the level equal to the theoretical mean of the leverage. } \item{col.cut}{ Optional colour for the contour line. } \item{args.contour}{ Optional list of arguments passed to \code{\link[graphics]{contour.default}} to control the plotting of the contour line. } \item{multiplot}{ Logical value indicating whether it is permissible to display several plot panels. } \item{main}{ Optional main title. } } \details{ These functions are the \code{plot} and \code{persp} methods for objects of class \code{"leverage.ppm"}. Such objects are computed by the command \code{\link{leverage.ppm}}. The \code{plot} method displays the leverage function as a colour pixel image using \code{\link{plot.im}}, and draws a single contour line at the mean leverage value using \code{\link{contour.default}}. Use the argument \code{clipwin} to restrict the plot to a subset of the full data. The \code{persp} method displays the leverage function as a surface in perspective view, using \code{\link{persp.im}}. } \value{ Same as for \code{\link{plot.im}} and \code{\link{persp.im}} respectively. } \references{ Baddeley, A., Chang, Y.M. and Song, Y. (2013) Leverage and influence diagnostics for spatial point process models. \emph{Scandinavian Journal of Statistics} \bold{40}, 86--104. } \author{ \spatstatAuthors. } \seealso{ \code{\link{leverage.ppm}}. } \examples{ X <- rpoispp(function(x,y) { exp(3+3*x) }) fit <- ppm(X ~x+y) lef <- leverage(fit) plot(lef) persp(lef) } \keyword{spatial} \keyword{models} spatstat/man/predict.slrm.Rd0000644000176200001440000000536713160710621015575 0ustar liggesusers\name{predict.slrm} \Rdversion{1.1} \alias{predict.slrm} \title{ Predicted or Fitted Values from Spatial Logistic Regression } \description{ Given a fitted Spatial Logistic Regression model, this function computes the fitted probabilities for each pixel, or the fitted point process intensity, or the values of the linear predictor in each pixel. } \usage{ \method{predict}{slrm}(object, ..., type = "intensity", newdata=NULL, window=NULL) } \arguments{ \item{object}{ a fitted spatial logistic regression model. An object of class \code{"slrm"}. } \item{\dots}{ Optional arguments passed to \code{\link{pixellate}} determining the pixel resolution for the discretisation of the point pattern. } \item{type}{ Character string (partially) matching one of \code{"probabilities"}, \code{"intensity"} or \code{"link"}. } \item{newdata}{ Optional. List containing new covariate values for the prediction. See Details. } \item{window}{ Optional. New window in which to predict. An object of class \code{"owin"}. } } \details{ This is a method for \code{\link[stats]{predict}} for spatial logistic regression models (objects of class \code{"slrm"}, usually obtained from the function \code{\link{slrm}}). The argument \code{type} determines which quantity is computed. If \code{type="intensity"}), the value of the point process intensity is computed at each pixel. If \code{type="probabilities"}) the probability of the presence of a random point in each pixel is computed. If \code{type="link"}, the value of the linear predictor is computed at each pixel. If \code{newdata = NULL} (the default), the algorithm computes fitted values of the model (based on the data that was originally used to fit the model \code{object}). If \code{newdata} is given, the algorithm computes predicted values of the model, using the new values of the covariates provided by \code{newdata}. The argument \code{newdata} should be a list; names of entries in the list should correspond to variables appearing in the model formula of the \code{object}. Each list entry may be a pixel image or a single numeric value. } \value{ A pixel image (object of class \code{"im"}) containing the predicted values for each pixel. } \seealso{ \code{\link{slrm}} } \examples{ X <- rpoispp(42) fit <- slrm(X ~ x+y) plot(predict(fit)) data(copper) X <- copper$SouthPoints Y <- copper$SouthLines Z <- distmap(Y) fitc <- slrm(X ~ Z) pc <- predict(fitc) Znew <- distmap(copper$Lines)[copper$SouthWindow] pcnew <- predict(fitc, newdata=list(Z=Znew)) } \author{\adrian \email{adrian@maths.uwa.edu.au} and \rolf } \keyword{spatial} \keyword{models} \keyword{methods} spatstat/man/clusterradius.Rd0000644000176200001440000000671513160710571016062 0ustar liggesusers\name{clusterradius} \alias{clusterradius} \alias{clusterradius.character} \alias{clusterradius.kppm} \title{ Compute or Extract Effective Range of Cluster Kernel } \description{ Given a cluster point process model, this command returns a value beyond which the the probability density of the cluster offspring is neglible. } \usage{ clusterradius(model, \dots) \method{clusterradius}{kppm}(model, \dots, thresh = NULL, precision = FALSE) \method{clusterradius}{character}(model, \dots, thresh = NULL, precision = FALSE) } \arguments{ \item{model}{ Cluster model. Either a fitted cluster or Cox model (object of class \code{"kppm"}), or a character string specifying the type of cluster model. } \item{\dots}{ Parameter values for the model, when \code{model} is a character string. } \item{thresh}{ Numerical threshold relative to the cluster kernel value at the origin (parent location) determining when the cluster kernel will be considered neglible. A sensible default is provided. } \item{precision}{ Logical. If \code{precision=TRUE} the precision of the calculated range is returned as an attribute to the range. See details. } } \details{ Given a cluster model this function by default returns the effective range of the model with the given parameters as used in spatstat. For the Matern cluster model (see e.g. \code{\link{rMatClust}}) this is simply the finite radius of the offsring density given by the paramter \code{scale} irrespective of other options given to this function. The remaining models in spatstat have infinite theoretical range, and an effective finite value is given as follows: For the Thomas model (see e.g. \code{\link{rThomas}} the default is \code{4*scale} where scale is the scale or standard deviation parameter of the model. If \code{thresh} is given the value is instead found as described for the other models below. For the Cauchy model (see e.g. \code{\link{rCauchy}}) and the Variance Gamma (Bessel) model (see e.g. \code{\link{rVarGamma}}) the value of \code{thresh} defaults to 0.001, and then this is used to compute the range numerically as follows. If \eqn{k(x,y)=k_0(r)}{k(x,y)=k0(r)} with \eqn{r=\sqrt(x^2+y^2)}{r=sqrt(x^2+y^2)} denotes the isotropic cluster kernel then \eqn{f(r) = 2 \pi r k_0(r)}{f(r) = 2 \pi r k0(r)} is the density function of the offspring distance from the parent. The range is determined as the value of \eqn{r} where \eqn{f(r)} falls below \code{thresh} times \eqn{k_0(r)}{k0(r)}. If \code{precision=TRUE} the precision related to the chosen range is returned as an attribute. Here the precision is defined as the polar integral of the kernel from distance 0 to the calculated range. Ideally this should be close to the value 1 which would be obtained for the true theretical infinite range. } \value{ A positive numeric. Additionally, the precision related to this range value is returned as an attribute \code{"prec"}, if \code{precision=TRUE}. } \author{ \adrian \rolf and \ege } \seealso{ \code{\link{clusterkernel}}, \code{\link{kppm}}, \code{\link{rMatClust}}, \code{\link{rThomas}}, \code{\link{rCauchy}}, \code{\link{rVarGamma}}, \code{\link{rNeymanScott}}. } \examples{ fit <- kppm(redwood ~ x, "MatClust") clusterradius(fit) clusterradius("Thomas", scale = .1) clusterradius("Thomas", scale = .1, thresh = 0.001) clusterradius("VarGamma", scale = .1, nu = 2, precision = TRUE) } \keyword{spatial} spatstat/man/nobjects.Rd0000644000176200001440000000225613160710621014770 0ustar liggesusers\name{nobjects} \alias{nobjects} \alias{nobjects.ppp} \alias{nobjects.ppx} \alias{nobjects.psp} \alias{nobjects.tess} \title{ Count Number of Geometrical Objects in a Spatial Dataset } \description{ A generic function to count the number of geometrical objects in a spatial dataset. } \usage{ nobjects(x) \method{nobjects}{ppp}(x) \method{nobjects}{ppx}(x) \method{nobjects}{psp}(x) \method{nobjects}{tess}(x) } \arguments{ \item{x}{A dataset.} } \details{ The generic function \code{nobjects} counts the number of geometrical objects in the spatial dataset \code{x}. The methods for point patterns (classes \code{"ppp"} and \code{"ppx"}, embracing \code{"pp3"} and \code{"lpp"}) count the number of points in the pattern. The method for line segment patterns (class \code{"psp"}) counts the number of line segments in the pattern. The method for tessellations (class \code{"tess"}) counts the number of tiles of the tessellation. } \value{ A single integer. } \author{ \spatstatAuthors } \seealso{ \code{\link{npoints}} } \examples{ nobjects(redwood) nobjects(edges(letterR)) nobjects(dirichlet(cells)) } \keyword{spatial} \keyword{manip} spatstat/man/Smooth.ppp.Rd0000644000176200001440000001640313160710621015227 0ustar liggesusers\name{Smooth.ppp} \alias{Smooth.ppp} \alias{markmean} \alias{markvar} \title{Spatial smoothing of observations at irregular points} \description{ Performs spatial smoothing of numeric values observed at a set of irregular locations. Uses Gaussian kernel smoothing and least-squares cross-validated bandwidth selection. } \usage{ \method{Smooth}{ppp}(X, sigma=NULL, ..., weights = rep(1, npoints(X)), at="pixels", edge=TRUE, diggle=FALSE, geometric=FALSE) markmean(X, ...) markvar(X, sigma=NULL, ..., weights=NULL, varcov=NULL) } \arguments{ \item{X}{A marked point pattern (object of class \code{"ppp"}).} \item{sigma}{ Smoothing bandwidth. A single positive number, a numeric vector of length 2, or a function that selects the bandwidth automatically. See \code{\link{density.ppp}}. } \item{\dots}{ Further arguments passed to \code{\link{bw.smoothppp}} and \code{\link{density.ppp}} to control the kernel smoothing and the pixel resolution of the result. } \item{weights}{ Optional weights attached to the observations. A numeric vector, numeric matrix, an \code{expression} or a pixel image. See \code{\link{density.ppp}}. } \item{at}{ String specifying whether to compute the smoothed values at a grid of pixel locations (\code{at="pixels"}) or only at the points of \code{X} (\code{at="points"}). } \item{edge,diggle}{ Arguments passed to \code{\link{density.ppp}} to determine the edge correction. } \item{varcov}{ Variance-covariance matrix. An alternative to \code{sigma}. See \code{\link{density.ppp}}. } \item{geometric}{ Logical value indicating whether to perform geometric mean smoothing instead of arithmetic mean smoothing. See Details. } } \details{ The function \code{Smooth.ppp} performs spatial smoothing of numeric values observed at a set of irregular locations. The functions \code{markmean} and \code{markvar} are wrappers for \code{Smooth.ppp} which compute the spatially-varying mean and variance of the marks of a point pattern. \code{Smooth.ppp} is a method for the generic function \code{\link{Smooth}} for the class \code{"ppp"} of point patterns. Thus you can type simply \code{Smooth(X)}. Smoothing is performed by Gaussian kernel weighting. If the observed values are \eqn{v_1,\ldots,v_n}{v[1],...,v[n]} at locations \eqn{x_1,\ldots,x_n}{x[1],...,x[n]} respectively, then the smoothed value at a location \eqn{u} is (ignoring edge corrections) \deqn{ g(u) = \frac{\sum_i k(u-x_i) v_i}{\sum_i k(u-x_i)} }{ g(u) = (sum of k(u-x[i]) v[i])/(sum of k(u-x[i])) } where \eqn{k} is a Gaussian kernel. This is known as the Nadaraya-Watson smoother (Nadaraya, 1964, 1989; Watson, 1964). By default, the smoothing kernel bandwidth is chosen by least squares cross-validation (see below). The argument \code{X} must be a marked point pattern (object of class \code{"ppp"}, see \code{\link{ppp.object}}). The points of the pattern are taken to be the observation locations \eqn{x_i}{x[i]}, and the marks of the pattern are taken to be the numeric values \eqn{v_i}{v[i]} observed at these locations. The marks are allowed to be a data frame (in \code{Smooth.ppp} and \code{markmean}). Then the smoothing procedure is applied to each column of marks. The numerator and denominator are computed by \code{\link{density.ppp}}. The arguments \code{...} control the smoothing kernel parameters and determine whether edge correction is applied. The smoothing kernel bandwidth can be specified by either of the arguments \code{sigma} or \code{varcov} which are passed to \code{\link{density.ppp}}. If neither of these arguments is present, then by default the bandwidth is selected by least squares cross-validation, using \code{\link{bw.smoothppp}}. The optional argument \code{weights} allows numerical weights to be applied to the data. If a weight \eqn{w_i}{w[i]} is associated with location \eqn{x_i}{x[i]}, then the smoothed function is (ignoring edge corrections) \deqn{ g(u) = \frac{\sum_i k(u-x_i) v_i w_i}{\sum_i k(u-x_i) w_i} }{ g(u) = (sum of k(u-x[i]) v[i] w[i])/(sum of k(u-x[i]) w[i]) } If \code{geometric=TRUE} then geometric mean smoothing is performed instead of arithmetic mean smoothing. The mark values must be non-negative numbers. The logarithm of the mark values is computed; these logarithmic values are kernel-smoothed as described above; then the exponential function is applied to the smoothed values. An alternative to kernel smoothing is inverse-distance weighting, which is performed by \code{\link{idw}}. } \section{Very small bandwidth}{ If the chosen bandwidth \code{sigma} is very small, kernel smoothing is mathematically equivalent to nearest-neighbour interpolation; the result will be computed by \code{\link{nnmark}}. This is unless \code{at="points"} and \code{leaveoneout=FALSE}, when the original mark values are returned. } \value{ \emph{If \code{X} has a single column of marks:} \itemize{ \item If \code{at="pixels"} (the default), the result is a pixel image (object of class \code{"im"}). Pixel values are values of the interpolated function. \item If \code{at="points"}, the result is a numeric vector of length equal to the number of points in \code{X}. Entries are values of the interpolated function at the points of \code{X}. } \emph{If \code{X} has a data frame of marks:} \itemize{ \item If \code{at="pixels"} (the default), the result is a named list of pixel images (object of class \code{"im"}). There is one image for each column of marks. This list also belongs to the class \code{"solist"}, for which there is a plot method. \item If \code{at="points"}, the result is a data frame with one row for each point of \code{X}, and one column for each column of marks. Entries are values of the interpolated function at the points of \code{X}. } The return value has attributes \code{"sigma"} and \code{"varcov"} which report the smoothing bandwidth that was used. } \seealso{ \code{\link{Smooth}}, \code{\link{density.ppp}}, \code{\link{bw.smoothppp}}, \code{\link{nnmark}}, \code{\link{ppp.object}}, \code{\link{im.object}}. See \code{\link{idw}} for inverse-distance weighted smoothing. To perform interpolation, see also the \code{akima} package. } \examples{ # Longleaf data - tree locations, marked by tree diameter # Local smoothing of tree diameter (automatic bandwidth selection) Z <- Smooth(longleaf) # Kernel bandwidth sigma=5 plot(Smooth(longleaf, 5)) # mark variance plot(markvar(longleaf, sigma=5)) # data frame of marks: trees marked by diameter and height plot(Smooth(finpines, sigma=2)) } \author{ \spatstatAuthors. } \references{ Nadaraya, E.A. (1964) On estimating regression. \emph{Theory of Probability and its Applications} \bold{9}, 141--142. Nadaraya, E.A. (1989) \emph{Nonparametric estimation of probability densities and regression curves}. Kluwer, Dordrecht. Watson, G.S. (1964) Smooth regression analysis. \emph{Sankhya A} \bold{26}, 359--372. } \keyword{spatial} \keyword{methods} \keyword{smooth}

(6}b8 PlM4`@MAGŦ#bSБA) Dk!_ Q&D+ L-p%h}A) tde L`rZБA) tde :22D LAGQ&_9-8D6 eăe :22D LAGQ& :22DCd}Wplz3(@2DLL`B\I)$dde 222Ȇ2D !e2DCL!Q&px(aE8̼hWM0ʢ ;aEw})U¢)`tdh vD0ʢ ;aE[4,Cd+&f :2X4, M4¢ ق MA_xX4}aESM-(fCX4}a#ESБ)`tdiFESpl`N3, atdh :2X4, M4RâMAX4S`N#5,iE8}# `M! MfCX47,ޢ)`tdh vM4-â - M΢y-SˌS6)xZfLOLBE=iQ-]<-j3-uXo0,t;ujY<j3T'BmղxfZxZ+PogbmHf݃e|&VV4 f [6gb=DfLaްe|&V+XÈffLaP+|UA_2GPN=]#ǞTѕY>jC2GPove*,A!]僤ARҕY>Ho02Ive*ARK]僤 ɕY>H#+|TG AR0NWf AR0NWf i+|TAGFWf i+|TAGFWf sҮAR-]僤 [>HꁖZ@^[K-Ԃm @^[K-Zk;'Xjy`{=RK-,als+̕ JAG9%pzV TAN ? R $T}AN) tdS rJ`YБAN) tdS :2)䔂 rJAG9%_,8D6ԐSrʆS :2)䔂 rJAG9%o :2)䔂LH* Q DE _-[SHAޚE ֔(RБA l"Ü-Q$p%sDaΖ(8 ӕDk!_ Qo0"}A +"}-W7D*E :2"D*{0]I ka(؋"DCd(Rp :2"D H4B ǂ4BTgt aW o0}`W]QpB04î( vEAG#]QБ(`Wtd+avņ+ ("p`WN|UB_aC ӫ:|W(GBA? vB`+td :2 ;_!Q6uk͛Uff}tXWMoHpXY_UӛJàl6\kh7@;ج}t.$l֯ck%0^3( AIج/5pXZm/^gJafaJ~eW(?UBA 򖒯PlU-M*{AY 빠ÆuPa:(atذLA`WpM6;8=+pg@(g@Ap ;Pw<܁ @AGw#PM߄m=`8M_9z@`Rp{=#PБA(Ƞtd :2􀂎 z@`PWplN1 td :2􀂎 z@`PWБA(Ƞp?UaTU8̟SU}0Uԡ>p:TS U}S7 :2Ts0u;aPUWCd}U_p :2T U}`etd :2TiD37'xAfƿE6SpćTD_ ,/p苧3l%3֗[ xft.0O|8ד%6g6SI}ć~ć~OIj;`_>"P綾T+w@-_>bCV͗uCIUC :V͗ut*/PpqUc 1 ӭ1 ea|oY_>&!pxpa|c6|Mk5Y_>`CƗ;(ۄӅ 6aa|ida| "||@4||@"L+N+td`/8 o`/PБ||@ ӛ+pW\>pW\>pW\>pW\>pW\>pW>E [[[[[/x0;e}@}lYdز:2-P-k#c8D5pL-k#cز:2T/ز:2-Q-ղ:OΓ-kdidi|M&[@GƖ5Бe tdlY[@GƖӨ"5-@l/o݁v݁êvGxX Y߈ֽm/45ṀDxNuMî+n tw1d:?0^kZ%=-f}KzX=snØxѠk0^k4 ahP㳂0fnlxVР>?BjA UCT~JY7*Ԃ;:o|: ,ˇ/΂|hT}{X N@{XwÂ#C{XБ]Dj TZ 엀 BXUny% o;UeQnZ zβ4"`?DBX;D- `AZdi>mWC+ z!, 2 `AK %CXВ! -2  RO|bS1_A꩘/0+ 8sb/0+ 8?? eA`auPpXVe{7;:( ػɆA Z2d-2pX 0`d `A>,hɐd Z2d %CXВ!,hMkgZt>ӢӉ-0AXbAD=L-&cDbi2bAKh%CXВ!Z,h-&5M vy\>+ׇ<_]PyŶz_b[d.d*]lY4.Mg`VweaH/}Ga}ne:;_~_e-+7 ^l{0> ^l囌27u1xcy"xaya.>LYŖÇ)ra"/NFmE^l9|w[6|E^l99 YE^nHPKܐ8R]|woauxoA!ƍ1Ǎ^,'ŚO{, &Ś#X{&*Xs&ŨQbF}Y g1{}Y0v3XnkMb6`ø`m` w{N1>m-qݧ8YFmZika`ӧl1jԧŚMKaþi+Ql֥y!Q b_8m}_B/O[_Ч/l5n5ar$l6ބmB }B>m}GBВi -Y&6ֽx5]q 6TxP&lzP! waC5b^h7`&V>M8}NZ^h6a뵼b9 -vӾb9 -YӾВ9 -YӾВ9mky%sZ>}!%CNBJ 9 )rR2 )9maJENpR&T )9m>M8&lQã s} YӾpi_hi_h6aӾВ9 -YӾВ9 -֒Ӳ ɦeߴ,k-Y9В -Y9В -Y9# = }B>rN8-}pZ"9Ds5-kZ"_hNKd'lQMp *8^jG[wޖ7_&>YjULFR*/ &yUa La}X.-yF F Fe + 8cPԦ_Ơˆ /SPe 30>K-)^jQLR` ZSPԢ0K-Q#K-i߻Ԣ801^jQ\9^_P6b 'Mؿ8*L]l4PYbYb9E/6,z ?$g #bz3^l4Yb",z -F Yt@Kv ψ;_<#k q/6TbC{ -#ņ EbsZlF܋͍Ylnpq/67N8Mb3^lnpq/67~%cĽ8412^lnp89^lnBJ{qaJuĽؿb‌9^_pRq//~%cĽؿ8 #/0^lQpq/(~G# /(~G# 9bZ2F܋-s#/d[В1^lQpq/(~ "q/(N{EqBE܋-_hq/(N8tq/(~%cplZq?pZq!#N2#g;<'eFeFTВ1^*PbW{ =L/v~ Ů bWZ2ߋ]N'Ů/d̿Jd~ߋ*~`gXgzɺa՗Zϳaχl<x2>߿>{r)Y3֟ ua}n;GLɨrdy2aWo,=l{2>2d|>d|ԓq|"S?ٓq "SO1L=a}πL=ԓq!SOqL=2dgԓQ#d s2ÆaF;5B>6u )s(7/'Qn^MyA8i~E]Пwnz }]p}O v}SwA[=~H@]В!.hɐ~Veu5,x_HVSAPD^;쭦zA빆OzAP%C^В!T/hdHN+ӊ=`&H@w z$ ?xA[DnE4^x)whe  ROrSyaTnp5fMYSyaT4p8([iC4^ xA0DXx/8H6h%C4^В!/9^x8xAKh 4#G- xAKh<`d Z2D- xAiE4pZyh<"G- xAKh%f !7/ܼrCN+r<സ"7?yAKuM+r yAK<സ"7( vy>78_ib7d؍|ӓkɺ4LZuXM= a}6χ>}2>d]~xX%D~cî:O1Q0Q $HF0׭aC@2zd^aCaePKχZÆ sj :$D-A2A$D-A2jZÆ 5j Jɨj u{ÆZ%(P-AA>j r G߿ôn=0D!S(Q(DBA?yow?,ω}DAZLv87T3?0W5=4Q3QC5-j& Z2L68l?T3w Zkf;&`wi z`g]ڂ}((чRK[ВaRC{%C)FAKz@iEFAiH@EibDEI,8|ibDEAMT[DEAMT[DEAJgfN#0N#0N#0N@(**Q(7~C(Q(7S}R_$(I% @(d(Q(hPP%sJ)JaSBD%CBAaSBD%CBAK % ͯJ% -J Z2(d(Q(8H6-(Q8-(Q8% ͯJ% -J Z2(d(Q(hPp J D?J z DJN+JN+JN+JDBAKlMW(d(Q8% (Q(ؕ(K% Dfdv~~o{~>2G09g,_c0y7{&ggaeeO`_H² ${aSSv~cݼ_0=ܼ_0*QK9;_<0N)_00%nƱҍw9<7r8lXXq.dԈ7r8lXXq.d޼0nFXqÆ5e78lXXqd~a `Y; q| :nqذɨ:nqذUe޾!3n_pXy\q*߿:s]GaVw?޾_TsM5&Ve1T2Ъ,% ܾ$ۗk.}yIA2%//9e1//)hְqsYK Z2ܾ$`',JZ2ܾ$Df&eP߾#4i//(ʗ}| ȑʗe1/)8|asY++*b}eE[WVe~ݾ""WVTvVV%*o8LR.Q}+EA۷R*,Q}+Ear[) DDKTnJp؈D-KTnJp؈Dɦi%*o(hXrV%*o8lD]rV%*o8lD]rV*Q}+Eid[) z D=XrV۷Rd,Q}+Eiq]۷RWܾx?-KTn_<P%*/(ؗ¹DO߶ӣ\K8gzg-YquPkώZd|vԚ5IY9ctX3&r/Psd|gCIw:d|gCIw:dרCIq:}qMԡ$DJ2~Nԡ$XBa}c u(8Pc u(ɨPQס$FCIFPrذ>ްKFP%u( Pԡ6C9lXPrX &DJ2O~${AG2~}p \ Z2d(r g -\ RKAK" E.l Y-d(r g -v*r 8QU Z2d(r)hPR&\ DJި.1R} \c*=PRc*=PRzT %3Ԫc 8LRc 8LRF)j@F)ׇjF اiP'V}G> 8|U(o; va᛭}}GAK -;{0w; Z2wd(hPQplPp؃@w`wd(hPQВ%C}ǁ(@(1C8HwB}GA!wd(hPQВiq]⺧}}GAZw;]}UQhw&iyXya749L&& nNdgyIe 냸/ӱd?.ؼ_a,ؼ_%5 <6W9Lpo~dlޯr <6Wyإ@0~Nx9RƱ;bq,cTyGXyGaJ&69LwTbÆw&kTq5b1 <69lXWXyL2?cdؾ}$0ݹa,qxuȯK/5 8|S]?=prz$a~Ö%d,qؾ@?l_RpؾGz$ؾM_+'o 8lK\9}Lagʉd ʉdNs+'o)hX9}Licm2۷$>VN5Bal 5B@2jd|v 69(8lXsP6pذlaAaÚl ? vi [݂)/mu7I+?`#  R"/hd>/葀G¿ofo6  z$ࠠ%CAAKÎF= BAK -  Z2d(88ͷ(88ͷ(88ͷ(88ͷ(88ͷ(88-  Z2} ;%`d(8(hPpPp }AKo_Вࠠ%CAAKz߾%CAAK - ӢࠠMCԂ&(8(a&(8(a|*8(aLiQpPПiQSPП5 !uPMA uPMA@$<*1'Tb^+}b^cyA!$-ʫ<`dH Z2$- w>.8iCp{s}]#wA={dȽ Z2-r+U]ПwAZ;`{s]@9{o]7)6.HoZ6.Hgw btݼ0ew bt=)>)޼+a̽7JHF{oޕprͻSyWa:z^tؼ0כ&w^dnk3޼0כ<ݼ0כ&w^d)lk1޼0כw$1޼  3ͻ q :k_CԹv_(u>0Pn/H-nb}aSw}AK7%c>-6݀n@h7f݅?N]+.vd:=ow?vӔhw AKhw iJe݅?42_plRnw/[] Z2F]G]G] Z2F]^G] ` .-.e0ow(y hw AKh7%c?BYvG֓vGӬ v#~A 6 %s݀@- C[lryx@eK9nMn78,&&d&g Z2f]*~p8@9~]b3~]b3~p8@9~]В1~po?nw>pOv ~f-[g-*n@g-N/F .uG!LKd1}X;7ua9.Yw>7u;zc:YgL֟Y>Lf25BpyX\&F.Q#ɨdd2f;E8"LFmNɨ-dd2jp25B8!LFN&F?kɨdN&! ؇-6ɂdApF8Yb#,hNd'ɀrp2`.hNd' Z2-ɂ diJE8pRNT)diJE8y ɂ dAKp2`0d' vBa.hNd' sAa.hN悖 dAKp%C8YВ>,hNd' Z2YdApD8Yz",h=N ' RO)"Ƃ v>q.8|>b<)bA09"O RĂ )bAK%Cp"SĂ )bAK%CXВ! ,hɐdY`AK,%CXВ! ,hɐd Z2dih"(,AaA?'‚]P Aa.(|‚ vng߷ScNo!uG!DL}d|X:7uay+YwB>7u;z95B!b2kԇɨB15B!DLF"> aaM! LF&Fda`2j005B! LF&FQ#`2j005a`2>Æaa\0a.Gx0# | R'}ya(*+Qy_f)H=d Z2}y_AFWb#+h Zl}-z/ఢ+ ؟ Z2}-򾂖 y_AK%CpRT})y_iJEpR%CWВ! {-򾂝 Z2}-򾂃d\p Z2}{/%CWВ!+hɐd Z}-򾂖 ^>+hd Z2Dz-"L^AJH %SWП^ ;i샹& җU0WD\AxsG0p샹 \AK`@kⵀ}V/Z>^+x_<ⵂ~ x%CVВi _] % {+8a+-`쳷]@eo)^ߏ`ö3mIے&ے&3ö`a[d~ؖ0%=LGے`ö)-}ؖ45b0a[}ؖa >lz[&OVɑh2kl=Lۇ@ƀVɈa >lzL[懭@Qc懭@ƀV| >lǀV s2[>9M; u;'igaD9'igaP9'igAwd7 8lY}ܴ%cNq}ܗ3r(㾜}ܗ3r\Ga,h=}ܗ3: /gauq_΂Q}9+rd>p5}ܗ34k2 /gAidq_΂㾜-r>p]8 fAxZw (~]@Zw,@~]3\kTqẁ >P.=2*Gficfq̂V=2N33{d*ά>y cYΏYƒ)KfM'O7,wƒYǍ%KdL>n,y å8eqǀn.}1¥86>X_Kq 8M5 8M4B{Rh=PV@xZw @㮓h}u2pqɀ >:@Zw @㮓@k}? ]Bd0Zɺ-a33OO8i<=?$in3wXo&FY>Jf@+5BuXh%FQ#ZɨB*'Bd|뭳d|}"J*Ifd$5B2rX$FHFQ#$#ɨҏz% ҏz% ҏd|/H? HƱ#.ҏdG2jYIF~$Fk0_#5yRd|HM}"59l6HMk&>qn RDe*)?ܥ%PR; \CAK%CRВ!p)h/ eZQ́j ZOD5'QMA뉨DTSВ! 8 jCAK%CTSВ!)hdjN&Ӭ&4k" 8͚jN&dj Z2D5%CSpx Zpx Z/iLy-藀4`gӘHc z")hɐL_2~ T % S)ȗL@$#d`쓑|N%#HF vid@$#}QG>( CGA!=~B7 z!(h=o7 #(|`o~7 "(|#`o*(Hɔod7~/8XuV~ZcO0ib?d_O+YVX징l,JFo,:Lb?,z~Zɨ~Z)X0}0X싕b_Ôo,zžX sžXɨžXɏX싕1XuزFK/ր5LFLw6Lw6Lw%FLw=웙)77`~܀T؁J@j@%`^(V?-w+H5,w 8|霙,w+hU,w 8lpÎi2myiBmz$0Ynqpf3YnqVc9r79r9r9rB[ȜfYaGfYAKƜfYa_fYifN 4S3YnqVwQ[di[Ii[ջܨz(fQYA?'Fe,7* (oQY@E1ˍdf+2^/Í~Yn, pc_Va;Xu >R4,*ȯ>RAr֓AbԁL!Hx ˭*hXnp4,*ǠaUT@ ˭*hXnU[Ed[E샆߉u;z~e8a% e!?'BdݦހJP;z*Yg@Pɺa!HFB$F}B2jOHFB$F!Q# g#t2>„d|> |q "LH10! „daa95B!LHf| s9„daB2j0!5Bp0#L8l&6OGȂßȂ.:`GJ r(i(Dp)L؟ #L(0_<„ê0!`>.h=&„0!`H@ Q&bÿ9}C!N_{=bJ1DAz%C QВ!(hCd! Z2-b"4#8!NszA- Ŗ^^ЯVy*/`7_~}p vnx /77z7`^s(7 %^ _l+,J^_]b;d؎(Nb;,#:Lb;d؎(YW[lG)_lGt̋S؎0=Nb;dԈNb;/#zŶB锷V0 &|/:l/J1H|a:-JF/:LNah/*JF/*:l/*:l/*JF蔯5k nr#Ŗ-(Lˍ |8̗v×S܎(Ų܎_< )c"%ܫ@˽ g ʘh7Ho^pf^TÄr7rfȆrrÖȆrrȆr4N{?RAi`o,G*hhx/G 8lx/G 8-HO{QAZzˍNK=FF9~~9is~0~ r+r[q[=_ B~"*8rCrF0L/7 (gz!L@9 a rCrC3jSٙkݚb4]u֮ 3];7#XJk݌bu35`Qt1j;ŬQLF5`5bԨwQO{=wDZb|ޙ.b|ޙ.1;8{g`L`5bԨwYa.`\wQޙ.Fzg:0ta.`\g:a_™~aN؞m^HL"Ù~asI8̗ &鄭Bޙ~_|LZ|~UZ|N8,0 5焭Bٛ/Uq{9ӗ7N_|N8}y{9z =Lz7_hz7_hz7 Lݛ 7N3uo>Z|~%焭BK֛ [5_Oۛ/l|ћ [+/B 秭v1/K]%Wnͼ1f^G2l]O/w zm_/m6uֶN/lla[~!%m}H~OsH~#V[`jKL;[mI}-I0 ڒ-IՖXUՖ5}-I0o% &KV[`:.jKr-[mIQ#Zڷڒ}-I0Fo)gm}ao 6uo)qFV`:j=RѶz$0_Ӷz5kַZFo 6״o 6״o9k[Ӿ{ޞ# ud{$L7{$-vHBb-?=Bxz7> > > /ׁ> 5LdtoI8}LB:7$toI8liLB:7$66voy%3}Ha{cgf> /$[:77Cny%3}Hacgf@fÎߤ|aGaCrBٰ#0BٰㅔooH( f]RV\@{+2%|8af2o6H(fÎo6H(fÎo6H(fÎo6xa/lBJffÎ=5|Nm[ dvXof$&e>Ygf/|0`>'̌H֙fFn7vXof$F0Q#ɬQo>'F85|XNF`>'F0Q#ɨѧ?'F0s`N 9?' dK0q,`>?%Xc s2k40d9dɨ Æ9aÜ aN\UsAj/` R9d0T{w+`n{w@`M\o6qa]MpXe]԰ zP&.h~%/[2x!\cr[9- z C.1%\ВC.h!0y{ ٰpC>rAK% R/W6`o[䋗+[/^lAxe[omA!X;/.04ow7tC =4a0켸\В`.h`0$}20{foA~Z9҂TEiA;G>Ϲُ"YoHOP^iUq^٫07{UeoWE٫"YKU[Uq*٫0#7{Uo(yF@ۭ5[Xۭ5 R2[MBnp5m!?Pr _*ɺޒH֭(qΒ8_`!'BNYa%$-dݶޒHF`!'FYBNfsm2j PBNF`!'FQ#Xɨ,dr2kc6q2~N9a֟q,&NƱ8ldKq,&NFaN96aÜ aNM|0' ;)V/`+Hr v[w vTp?d{@EXs~w^В`.h`0bXs`.h`0d0 Z2- 悃daiBK? sk9| C0 z `.1%p6`8ld0 悃dIsa$@- 悖 !ݐ|C C.7$ 5Nl>}.bMp z$/t} z.artޚ.ak 邖 tAKk ٰ5 R2YneM仕5]tnvHL6;G%yDfdglv8L+fd"YqpNFw#C23Zّ007;2fGdFhft2+FW_xnѥjqR!Aj;D23ϛ!a"5 5 .m."oq8 m} R)}NPvvFzCAJm9%Вl[R[Rv[R6[R[R[Rd[R6 8l?dZ2Z]'Ns-=Lh1ow8 N0żu"3żu"9żu ٰAżu 춘Nu6<v6<HGrt9>Pn@M;{;lbG dtttDw Fnq 66en(w F@ٽm0nPvv^vvvv3{YT۽, vwdݛ%/+`"8_M-~A"8-dsXo-֟S XOR Xuz5,dgXqbMF`&F蔌bMF`&FXQ#&F@jkrXHw k65d|װf]ÚMw k6CdM'5YUdn&6Dn! rXɚ,k` 8/Вk)f7=L`j0pXejc{S L͂ fAKS%YplXejd05 Z}NTВ,8lS@=L`j0YfAXv}V/Aف0&XA>L V#^&Xp}2 V/&Xkp w w zh!+CV YAK%CVВ!+8H6l\ld >+/`g'떜a+&d"vXt%NŇNauXU>)'gcuXMN֝7ɺacoqJ*5c}q2j*5cJFX%FpQ7&:wQ#8]ɨdNW2j+5c=a\{Gݻ\-\ .WAK ٰÐw Z2\7 ß>Ї?_=L?I~3ôgi4#4Ӯßi=ׇ?ha?/F Z2Cb44-1ZВ hdZ55p ((Gnr$Ywi,};çk'XIvve;}À>aAKvve;}À>a@N0q Z2N;*%ΠsXOJ֝3wIɺI֭;d9~UL֭^N2j/'5Q2j/'5IF$FrQ#x9ɨ<ɨ-|d>O2j aroIP2~x@ɺa~tXNƱ (Y6/]A>|K$2CK{c t2 rX*h`,c)}7 X*cÆIRa$c)0X 8Lk2&c)0X 8Mo0 Z2K- RAKc%m-ٚFƧ;*NfV*3+`of軇?Ϧ(?gEME0z+-`o>Poj/@XouV_/7.`oiWpxCiWpxCiWo]A`i7 Z2v-?i7 Z2vɆMLɴĸ,Kvviw{vxvz//K+..Y\]~{tvɨ;LۉKF^a/0b/0a/0yiwlè/q5Ox{7^a /zaw*-/q,''Æ>_%㸦Ox5}uˆgsAܦAv:钄TnS1)Ӑ? 8DvC/ GLy ? (7) hh^fA? ˿ ]h6J/F3టUz7=h^fafUz7-˿ 8J/F30*͂VhxZYВ* hpN?_iA!:npt`zPӁ H~ɿx0S}|C6v(" g/o{^)oaWcOy +h7v5_ ihU4_Ur@9͗PN_%|Wir/*9˿J.h4_Ur@9͗PN_%|WN_%dt8t8{^Ic炝dݲɺay$Yg= aa$YgEuzs;Ye;?$̮ZnC{Xon'F0Y~vXHF`n'F0Q#ɨdv2js;5}Xo)%F0d~ſ sNƱs;DÜSS~[ ?`od vG[ ?`o ~/h=ad,-HU~ k76蜪LBΩ ;U)2SU)S(H~>÷) q)rsQ"(A5rԿS5B}9 Z2-Bd}Qp9 Z2-B !G>(hrd9 Z9 Z2ɦ!Gi@q Bҁ#t ؇-6h_]M+_#}@s,I[}@> bd]$YzY1Yrv}@Q##}@S<02#b,9L> ]\Z\rC1wfdObod'.9Lfa\bÆybd~>9JAF.JAF.J\R'G.#,-we 8L\fEG.]Y 餻ܕ@F.]Y*rYP'݀r?eKA?'#~.]#~.#~.=4,s 8L\d\&G.\#~.-#~.r?r?  y8~Do|ޒu{ÆaMdɺULVT1YAUd^ULV&>dɆO1Y[В!&+hd ZaSV/ZAdh^°Uv 2WZaVpoeh\ZaWp+C 8샕d Z2dh-2 ZAK %Cvg>܇?ҷCHgoҷWc`u5k켔 P/`ef0zPGB}WВ!+h6 ށ Zlz-3wf *,O8`قlAMŁ9d_oVO 1ER z!H,8H,8H-Ă AbAK 1`$d Z2-Ă잖{Z$$+"H 8-WNĀ}XВ!H,$.*Ln[&VNYlUx" Sت0) !dUa [& eUa:~,*LF$.*mO*]n{pj.=-viaqa۞da۞voa۞$S@mO*]n{P9rӂ9rӀBviAKvO =-ta۞:mON s嶧-s嶧9WDMm d]0䰇nT.8?%9ln;|XC%SQ]8aQ#ɬQ:Q#ɨrd9l2j65B!MF֧ɬQLF֛Tþ&r6?'r6'rd|aq#M1fW2yCώ69ln?s0'+O-ȧPZϡ< DyjA< mS v;<`3 8aSGւ yjAK<%CZВ!O ؟( % O-藀<_ԂKSDp*O 8̗S3ԀÜ<54k"O -Ԃd~VyjAK<%CZВ!O-hɐdSDdZiAD=LiAD=H9yj5*8<54_QAd{ר 2܂kp] nAK %C[pldp Z2d-=ma>u 8ma ނ&x r;OpaS4w ).9Osွc^В!.h d1BKf/^=aS} ~ #. ;0k*.8<0k*>YuA?'`Ȫ zN@V]CYu>.hɐUdȪ YuA>.hɐUdȪ Z2d%CV]В!.hɐUdȪ M+=Ȫ Z2dYuiEVϪ Z2d+ g\[||›}Ƙfd}toqdٷ8Y(ٷ8Y'[wn-NFs[|N7'Fo->L1;Ňٷ07fÔ[|R}Y#FKa|F࿌1a|o;>L1~>sw|bSta o;>FoB>Lڛdܷ{ 'u}parr}pAj(v(v,(vႃd:1| =c8N{ (v~ o8L1o.8a(v.Q-== Z2Fٷ{ vo8lZe1\В1ʾc8:0ʾc%L3۝N3۝ z$0ʾ)G۝ʓ)8쀖i`}rۭN sN sq 1Ĝ7ܷ[4ܷ[4d̹oh8-̹oh.hɘsnpZsn| s-N sߐsnp2ܹ s͝Ns_w`_8B0gR쟟a}!vۍsۍ r$8Ͽ8;0;Ͽ8;0;Ͽ8%cqvaw@-g BKJd Js+nw/hX ВvúJ 9Y p{xa]q%=4Y p{xAMVp8uvd=@VPT%-+nw8٬=%c%5lV~ +nw/hX p{x@U=)藀]zpXTp * *IAK -* Z2T<dx(8HGH⡠%CCAK -* 9x8x(DCAMT<DC⡠&* Z2T<$+ I@T<dx(hPx(hPPВ⡠%CCAK y~i2F@S_y ׷>Ydm6O[1M{&$k}7lb?l6O֧MɹaL7~a77CfÔo6?Lfzm6Of%l6?Lah):-Q#VFXAal6*F8&X%و0U l6?Lf#d(o6+66Ni}asڿ ߝiiv}A2ȴ}atڿ%c>v߾v @}AM-m8lto8uo/how/w}c|a}c|@%*yPvJ޷;}|A &NS*v=o?v;CL޷T& Z2&NS*vɦ)v;op}|iSp}|ANw8!Ɛ.1 RO󀖌y@KddC,}@at,}@A[ VA ׁ-cc Z2 f0W$S}%AAKX~J X~J|@Kp}bo_,Pӕ z1\߾X+ $S} ׷/p}b ׷/(ho_,pZyo_,PВ1\;)v:\nPvÔpÔpO5P\q[zfu5P\Ƃzf5!]h5Q>\/Fp5bԨ׋Q>\/Fp5`mT^B1kԆŨQF}^z1jԇ!y6$/Fm~_BhO;J$ =%턭BK'/d}BK'/d}B}ߜ+}қMzI =L&}B>}I&>M؞_8|iބT' [C}қ_1'/d}BK'/d}BK'/d}A(Oz_hOȳUOړʬʨ-@tK ,PDAD~hv0-}߾Yk穌ϼ+)?+)?O}Oz0waRso&0waRs^r_bd0%_s_2? Wr;7bI/ͫ`9wUI/K'/bd~I/K'xsXbOzA?~%_Oz_N%A=;~%_N_2?;~%_N%_N_2?;ϻ!?{_`9*ǣXBxxty1|Eq;o=÷[gc6?lw y;o㙳1kǣgcxX;=ñvq;cm#X;=ñvq;c4lw ֯Qgc8֎GcXFx4VQ==˱&^zzoׯnX;=ıvscMѳވ{eYo=x|O9z#_~yq<+Sγވ{扡rFܿxs{sg7XO9z#'~yqbd여.݃7wGgK`;=]zzֻtbP#гޥ{#sֻt޴c?=]zy[>bd8|lǑg/KV#z6zvy[>wulwuͮ'g/I=q<]zxֻ:bdzy78ο/Y=q<]{g`{wuYwuY7ػpg7/Gzop释g/GB?T<o PwXY7GB9ύP6|~|n여m여m여md7[m여wQN6]N6]N6֋6֫6֫6֫6R6KV6KV6KV@scxr@?n ޼\`9o^Au%+u%+u%+u%+u%+uo.A=`9/Y9/Y9/Y9/Y9/Y9,nc_6|9,nc_6ߕrY~Y~Y~Y\<_uٱlAF;AF;[6֟m;Xnԁ7Cr_^cN͘8fo4ь1cN,Lc~7f11?רkT5*xcCz!XFo^r5=֯kT5*xcCz!޲rP󃺱zAXn^rPƁd;h%8xc}øVbi;h%8*/Y9/Y9/Y9/Y9/Y9Pccd,'ul?rRlI-'uM:Iˑ*GjOwr*Gjmrjסw !7]N@rclmIס6~ʩ~ʩ~ʩƛKv3ک~ʩg_ z9 kcٶgNm; gN6֟m;X N6KVN6KVN@pcxN@?k ޼t`9o^: 'X%+'X%+'X%+'X%+'X%+'Xo.s =`9/Y9/Y9/Y9/Y9/Y9jcٖCg[6m9jcٖCסR~H_rH_rH_rH< /'X {*'Xka;Xkcv<^VO^cnṙrU{9ʿ}/}WGr_62^cӭWen}/cWemWen}/c}WekרeӭWz7_v*c}7*_ޖcՓWe|-ӭWƛg?zPnn}(~o~Q֫߇r%ko}(tC ӭW~֫߇[`dvKhC d|o5P~ɍP^E~ FzW~=yѫ߂o߫Hloo_z^7Rۛ~~#E7RoF?/z)n여ϋ^Fz^woU̫ W3~'׏g^N`{Z?y;n여ϼ6_=dy7Ϸ緱!˫y,~??6KVY^~~m?dym여'z!˫ocd=ICW~!˫{G%~o'TJ^z`;*y{Qɫ[lG%~o=yG%~o=6KVJ~ίv»'z9ywORo=IWz= ̫y'0~vɮc|;1Nc6c~:3fә1{W9iޣ̘5kOg5әzXFtf^r:3VQ9רΌkTNg5*')c')cYNRϳ՟g9I?r2V$%|9Iר,{h;J=7kߎR6Z/7N(X(e%G)lo~WQ G)%+G)%+G)%+G)%+G)%+G)/n;J7z/Y9~Xٖcg[56m9Xccٖ O.6|n',','_rrm׳\l여\l여\l여\ld7)N.6KVN.v/6P;XCac} 16P;Eom?r)F{ykrErx1Vêyz/kb;XcxoWhA?c%C7=6KV=6KV=6KV=6KV=6KV=_vZCߘxs7=ccٖCg[=m9~ٖCg[ND@?XlHDdcD6]ND6}b7즩ۉ~ʉHwt㒍ЎK6B;.R|;K3YR6ߕvaRRDdbJ9UʉF{s yn'"r"=o?={Gx? ~_7+?c8??ޱn]N\?S߿?;?b?nW5]|ow녱3Gg+վw-g|gg^ul~+2R=ޯŮg\W*vKgaQt{?N.-=/z=2ob_>u{lh?s4u~m=xSsiNz֫r={?zP_FNǾ5????u}~~~g3??_A\_?SE>_~^IP7jOwyS~suZ>Ψ?=c}7c=5c'm R?/R^ cޟ C?뤭y]}NxO/{N83d~3?_V2?̏ynv};@oзClзv};e;@oз,, ++A`IGt t=AOGt t=%AODž-AO`IGt=AOGt t=AO`C:A`C3=%@83=@83 =@83=@ϸ`83=+_e0<cJ<cJ.+[V"/ȋybGvJ<cZ ,zx7zxxRjZz^jZ}͎~ZRKRT'IzRTRMW,ng1c =Ƃ%@O*Г*X =@O*Г =@O`I*Г =}@o}@o}2{x$40M2L.\yakiak1ϵia_\O-am)[ߖ{: zsjsjsjsjsjsWܠVܠVܠVܠV7z j j j jK j j jbbbbbbbbؠؠuz j j!!פl<9/1p0e}YyYz_c/+/z_c~P9V^c%ۉT=%@OST=@Oq-H}<CEzdGFEW>,r}\>g|\G1X$=@O"Г(X$n}?g~]>=zage zKagz^ZVziנ5z ^2owZz#A͠~~7X>{ϯ nz~wyzuk/sAC0= AC0X0= AC0X0v;^WcT#z5^W#z5.|~!a,az!y,yzށwyzKiGz-l,Hyz:IпdУ=:%:ANУ=:ANУ3X?{"HA/`)RЋ" "HA/RЋta+k"yƼHc^1/eHc^1/e8ZyƼVZyƼVc^JƼVc^1e-+AJ?j, zsޜ7gd%Y,Y zVged%Y zVge7Y zVKz,!qz!qzK,z4MGSDz4_)Dms =%@-s+Xr =@ϭ-kcɭVs+ZVr+܊؝oS}?6#W8ԇcy^`X\V19yN<'cJN<'c1e%@/`)-K =@)c =@`)c*Xb =@)c ==Ac5az؀69#XG=@O"Г$ $=@O"Г(_0_0@/-K+XJ @聾T940)Lc ӘyLaS~{`hZ @40 4L Gp x407L xyao+6z j^RZRZRZRvPKjPKjPKjPO;A/)KjPK ?e @yPciPciPciPc ی m&7ؠ6ؠ6ؠ6ؠ6 6 6 6 7j j j j j j jhhh}ZڠX!Z-xy<ޖxy7@ F s}n>7en>7@ P?Rg_}>T@*Ї C%X CP -}E@"oзHlз[-oyAA#A?e@?W |looooee|||||||,+'''''gY1111EZvbߣQ>ԗؗfU@*oN`OiЧS7 }eA߁@w ;,;}A߁`ف@w ; 2ADG"#H H}$}A߁@w ;0Xv ;}A߁AA#~qP?8AW'ye_|_|_.+222rYِ1ߐ1ߐ1ߐ1ߐ1ߐ1ߐ1ߐʆ\V`̷`̷`̷`̷ಲc}]9b^*, ,  }a0A_/L8m~>?AOgO' }~>?e~>?e~>?AO'O'3X' }D>"eD>"AHG$#,# }D>"A`HG$#  }D>"AHGdHG$#2Xv";N}'e'A߉Dw";,;N}'2}큾>ڢ[VWײbbbbbbb,///eeab󅵬-, +X +X }a@_X/`J }~}~>@_/+X+X }~>@_ }~}a@Q`Q(G# }Dˈ}D>@Q(G#*XF# }'N}'@I$w;)Xv; ,S)} >e >@BO!ЧS),S)} ~dRk|NEE,///////ee||||,++"+"+"wdޑe-З |l}l}>[@-g  v[}G@Q`Q(w;  T T}*>@JO%Ч6@JeB>!@2!@O'  }B>!@2!@O'  }B.迼m|>>@G}|>>e|>>@6>@e|`:>b>>0Lt|||`:>0LG̏7`:L0&L a+dPɠA&A&A&:Lu 0a2dP A&:L@&:Lu 0a2dPɠЇɠA&:Lu 0Ku j} j2ePWˠA]-Zu i݃݃Z֠5 zjjjhYsYIΘ'g̓3yr<9cJr<9c1Oe5v<9c1OΘ'g̓3ɹ%' zrA(=,= zޣ(= zޣh`Uc5XbX=VAUcX=VA`UcX=VAՠנ* zK* z* 777%sA`\37ozޣ(=,= zޣ(= zޣң(= zޣ~9bXL=bz#-L?+yσO}; z˃|<-z˃<-z˃O}_ }t}0X"A/`)rЋ"A/!:z:z:蹾:z:A4àΠ~,+ XV@?:+ }e@_W+,+ }%AX ~C?ϿJb>b>b>bM2b>{sBBBB²w>zw~t>zw>zKw>zw>?n|/ B}!e!l , B}!W_2@!X|}>>@ |}>>@!Xݎ@_/ , B}!@_/`Y/ . p«-////ee!|!|!|!|!,+ aYY1_1_1_1_1_1.XY1_z=;!!!!aY|3=A|3=A`|3=A|3=%A|3=A|3?X2?s?i@@} >@2@,@} >@m >@,@} u(~ w}w}w;@w#XvHi~Ax1111ʴX_XVfGgGgGgGgGgGg2 mgDz2;b ώώώώώemY,eY,@_/ Зe,e}Y,@_/ ЗE, Зe}Y,eY,@_/ Зe,e}Yx}<>@Cx}<>x}<>@Cx=QFzCAx|󪪟?YS}>u@:O`:OЧS}}>u`y_2Xb>Xb>Xb>Xb>X|7c&cfY3131313131313ʘ}c>t{h|||||,ks\}>W@+ϕ`+ s\}>We>W@+ s,s\}>W@+H@_$/`G}t>:@G6:@G}t>:@2:@GGGGG}t>:@2:@G}t}tSсtX`:,0 Lo8 b~!L 7t`:db>d02L C!鐉t`e12ò!t`:d02Uu Vԭ2[=555555A|{zP{zP{ԞԞԞԞԞAAAAAA.kc{Pc{Pc{Pc=5555555AAAAAAAmc{Pc{Pc{PzFjylRzAUпDux=^AWGBWx=^AW5X @/>Ћ)r؇۰X4VR1w}Ty*<c1Oe%c1OŘbS1T\VR1Ty*<c1Oe%c1OŘbS1橸,znzn}K^!Ez^R!E!Ez^!E,Ez^!Ez^a!Ez~ >\ =4AMC=4c`3 :ήGk~b Ɗyc-+Ɗycž qYi7V+捵4V+ƊycżVy+żbJ1oҲJ1oR[)VZZ V V@o%[ V@HQ#-=@O`I0 =@O0,Xla;@,+ @`,+ @,+ @G =%@O0S4-ނ%@7 x=@7 ԯwRv=(@y-+@y z$ɠG2 z$ɠG2, >:X A/c4 zA^Р4 zA]z@/h za+h A/h A/h A/h :X A/h A/h A/h :X A/h A/`)h A/h @y=fy=fy=fyi^Y^Y^Y^c7cccИ&%%%%(yzF덖-57Z^o{l{ u6Zojh>վѪ}UFV7Zjoj*%U%U*h_1ѾlPgq}3bPgFmFlfFmF h3bgFmFlfFmF h3b͈6#6ڌh3b͈6#uFlfFmFlfĠΈ6#6ڌhKa-u)lіF[ m)lіF[ 6RhKa?k>cm16hcm16ߐkݐkPoȵnȵnu\{L=1=1=1=Y7ѳ|ӛه߰6@R)ЇC !,C !}H%K 6 m/T,e}˲}ف@_v/;З.Xe}ف@_v/v,;З.Xe}ف@_v@_v/mف@_v/;Зe,e}ف@_v/`Jo`JGMG Q}Ԁ>j@52j@5G &XF Q}Ԁ>j@5AQF5`5f]I-///ee||,ӻZVPPPPPPвbv{1}uVn/}}@__`Y_/ }}}}u7YQ}ԁ>eԁ>@u:GQ,Q}ԁ>@u:G]`u:G]:GQ}ԁ>en}@m6w[6w n}ۂeۂe@m@m6w n}n}@mџڨ Q}ԁ>@u:G]:G.XFQތ:bbb?[V`````ಲcccccc555555Mbm)A_/EЗ"K1X"KR})A_A_/EЗ"K1X"KR R})A_/EЗb,EЗ"KR})A_e)A_A_/EЗ"KR eF># }F>#AHgdHg$3 }F>#ee@{={ }@{wnc0X c1} >e@{@{=~U---mYm1m1m1mllllll[Vf[g[g[g[g[g[g۲2b>b>b>b>b>b>ۖw`cXOAЧ S) )} >AOAЧ`LAЧ S) )} >eˀe@2o`22W@+2W@+ s\}6gmyܖ ЇC}>te>t@:ЇC,[&X [-}ˀe@2o`2oз [-}˖}ˀe@2oзLlз [&X [-m\}^`rOsŘee||-+K'K'K'K'K'KgYY:1_:1_:1_:1_:1_:1_:҉҉҉҉҉҉YVN̾ m̾0d̾0d̾0+**vkܲ& I$}>ɂe>@dO2' I, I$}>ɂeu@_]rp~\,=З^,`Yz/`Yz/=ЗK}遾e遾@_z/m遾@_z/=ЗK,K}遾@_z/=U,* I$}>ɂe>@dO2' I, I$}>ɂeu@_]@_]̜2@V;!xxZVSSӲl+*****jYV1V1V1V1V1V1VʰZVU̇U̇U̇U̇ղ'ѵ.G }t>et>@].G , }t>et>@]2@].G _=[}t}t>@].G , _7e-l E"}@_d/`Yd/2 E"}Ȃeg@#cfA_`Y@W s-Xs \}_7eeˁ@r@roy}:'gz=LC;{}Ӂ@t b:1DLb"Ӂ@t b:11DL"Ӂ@t | b:1DLbc:1GLcA:u<x88qP㠎A:u<>u<x8}8}8pPA݇uATrP%x=_^_/A],uYe9rP%rP.A]zA۠n`٤Iu&M:tP7)tP7.A],uY,A_,uYe 96slPkPנ.A]\/A]\,U:}T=9WFUGղ2b>b>b>QQQQQQQQeeT|T|T|T|T|T|T-+***jYN1N1N1N1Np}8>@N'Ї)X p}8>@N2@N'Ї)XQl#зm6}e@cȠ~V, U,*X p}8>@N`N'Ї p}8-l p}8>@N`N xA,qz܃}zT8~=c^JǼc^1 vXy~/c^JǼc^1/}>e_V>eyǼc^1/ec^1/}>e_V>ey/+zyǼޗzAwzA`wzAwzzAwzzzAwzA`wz~zA~P?9Q(Q,Q(}> @GAGQ(}> Q(}> @GA5P{|l?2D@"ЇHVAoH1ֈֈֈֈֈֈXVF̷F̷F̷F̷F̷F̷F̷Ʋ5b5b5b5b5b5b5󭱬looeeO|O|O|O,k{=}O'@` {=}O˞}O'@ qmO'@ DWU*} @X&!X&yN(d}2>%Aw|=|=Aw|=A`w:zm2>@ Aɠg`P30/@K)R /n}7@M&w) &wrݴ즘輦輦輦輦輦ee7|7|7|7|7|7|7|7-+))))))iYM1M1M1M1M1M1MnnZs9}>Ge>G@#s9,s9}>G@#2G@#H,}q8@_8@_8@_8@_/#X`aOЧi4}4}> @"Xv,v~x9}>G@#2G@#?,s9N/z?o||,+ `YY1_1_1_1_1_1_1_2222222XVA̗A̗A̗A̗A̗A̗w:zKw:zwzt:zw:,zwz8)z⠧8)zKR۠6 zmK3/l z3̠73 z3̠73 z3̠*O%`T2zw:V zăG<zăG<zKăIGLy @?zK/lz?z?z?zK?,z?z?,z=Q?.j_?iǼ1mLǴ1mL?ic?ic;ӎybXL;Jjj޸ڸڸڸڸڸڸ76666. zjjjƂY ::: zKjjjjjj^ZZZڛڛ-wu(:: /(1:1:>>*N3t8q8y/+q8yogc<^ A/` A/`  cAX;c?[K^(5 z^g' &*?Z^}R1/p 8p 8yǼ"y.?ZyƼVZ=0Z]ւ4X =HAҠ',z⁞x'`C8=AC8=%AC6<ͯCJгraC8=AC8 8=A`?@?/X/X=@O</_:zK^qWzŁ^qRqW_ K=@O6}>_/y-+y_Rg/3:8t ݱ;6tdž;<:6tdž;R5tHՙSuT9UgNՙSuTHՙSuT9U?;wHܙwĝ9qgvّ0t.e G.e Cފ:Cgcl'M'm:#8Cg 3tpC7e ݔ2tSnє2tSnL#8C/98gΙs98gώ98gΙ#8gΙ9Fgώ9Fgљct]Q:*GTg&=3㞙 V ݪqxtcn ݍ1t7G7n 7<.tÅn B7܇WÅn B7h B7(%6;zj枚fώf{j詙{j枚fώ>$]EhxTQ* }'UxxT:B'Sd L)t2NQEhxTQ* ]E(tJ:B'Sd:B':BBGH !#BBGH !#$t!#$t(_|84ß_5ןKq$SdʘL)c2eL4s2eLɔ1fNɔ12&Sd92&SdʘL3?G|Wzf̩95~W:22-=Ew.t=d=d=d=d=dm!|Ȃ{Ȃ ]pYpYpYpYpYpYpy{Ȃ{Ȃ{Ȃ ]pYp#!#!#!#!#!#-t=d=d=d=d=d=dБ_"篿OQp37s\p3Qp37sx쨻n溛fώf3A&sȌș_4g.͙K3_Яo.xp沝=h7ˡs9t.\ˡs9t.y|;B{xh txh 4t:<4ha4Ç|Cʕˡs9t.C?/ŸWKoxTx ]+]>s\3W>s\>s\3W>C gG\3>;s\>s\3gG\3̅>sv̅>s\>hBg. }Bُq\i"GCw;? [79=MN5Bo[#[#5Bo[cxУ (= BУ /HgO/;WOqC17Z?3zk zk!*}ȫJc^)WJz^)c^)WJz^)c^)WJz^)WX)WJz^)WJ?X)3W+e2Jy|vW+e2JyٱRf^kc1ژymxlƷg|ovg3y̼|>;gٱnf?xͼnf^73fuُOz-z4MGMGSz4MGSz4 z4|6ZgJ< =Bϭ=<6J2^B|e=cIKEOz>>Oz>!_2o3X7g?3N1}BO'' =} =CBϐ3$ =C!~?um7e2Fy̼Q>;6e2Fy̼Qf(>#;d2y̼Cf!3ώ2y̼Cf!3wgy|v쉙{b=1yO|??/kPEAzP ߇^Fzm zm^Fzm^XFI}SFkBߔ1 }IO~nJx\z 1z Ck=^cz=.vcwޝwgzwޝcwޝwgzwޝw /yCGiQz@>>N;xCs!߹5b XC/Ћ5b X7uW μug޺3oݙ[wٱKgޥ3ώ8Ny'μgމ;q8Ny'μgމ3ώٱf{3Ϯzmw[z Ç|:C/Ћ/ B/B/Ћ/+ B+ B+ B᱾B+ B++㎤Bл-n }uG]Qzԅu^. =BУ. =Sc^|_z^|c^|Яl|z`XVz` z`XVz` + =BKhSXBP%럏f^B3/Kh%ٱvf^;?s\]CWv]١+{xTv]١+;teЕ=<*;teҡC:tH!:C!:CtҡCzxtҡC:tH!:GH鐯 CWv]١o }O[|~8?t:??t:Cg)cc7AMzc7/ z0 Cz0 c0 Cz0 M0<6AMz!@.˘?cg?s6󌯁2}!C.\B/\r<"xE "xEW==\=r.<\xȹs!C΅s!C΅ 9r.<\=r.<\xȹYO|:2222222CGCFCFCFCFCFCF/xi=;QC!C94rh<=4rh3/gKf)mNosʼ>;Kci~g5ҚҨ5BC+C+ B/᱈BУgxУ't =]BO1]BO%t =]BOexL%t$ =@B>!faf0|$C/Ћhx,Ћ(" B/Ћ(" " B/Ћ(" B/Ez^DQEz^Dc^DQEz_o" B/Ћ(" " B/Ћ( 8yCހ[* Bo[* ǖ Bo[* Bo|E= B/oww3G#m1fi3|Ow3ώq7ЙnqٵBmxпhoo!qz q7;6mٱfa3ώ6ayͼfax[B_Azޅww]y7<]yzޅwciGZz ziciGiGZziGZ6aa1&y~vl3o™7̛G_cz0ezم^vcم~ `'X z z`ce^Yce^YWVze^YWXYWVze^YWVze ze^YWևר =jBУfxУ& =j_5jBУ& =jBУfxУ& =jBУ&& =j_o=0B#=0B10B#$>G쇎c?t쇎бKϫg{~{36s3̭?a0F̯Jgwj~M2&y|vl~ϼIf$_6$7IM2cgv*f33v4kGg wCݐq7d| ̻!nȸ2!ny7d?t8d?t?dNLLLЉ/ 89r<8xqz<8xq?u:o3(z; h4yٱf^G3huٵdB/Kf=-Nh!Zy5tɇ.%?tɇ.%C|%C|]#CzXc+ևG8<<:5tНSCwjNSCwНSCwjN ݩãSCw%'CAC?\8ta:שGυ(G3a3aqxj| s2tN9;rG畓399sN~v99sNΜ399sN~v99sN|7~v9ّ399sNΜ39ّ399sN~v<:+5Cf !:5GM59;:곫BwT Q;jxP ]CãBP xȆG 5B5BP ]Ck(t  ]CgxOm(t3<#t|'{GӄnMiB7MYFiq4MƦ-en-%cc#ccdl1uVdtT~?xM2I6&$c&$cdl|}CgCfCfMyȬyȬyȬyȬyKYYY:k2k2k2k2k2k2k2kBgHLL222:N2N2NB1~ȬyȬyȬy7NNM?&]~H!1sH 0s, 3̱0s, 3gG, 3:A͎9$f|! u>C롏8C$^`g A:GA:BA C4=t ju}iCЧhS4):UǾowxo7 }>}gggggggggY9Y9Y9Y9YqV|V~v ݙώrr~/m;f fX=COЧzS=>'Ϗ{ t!߰б:BBX:BBX c!k?3'glj???????q||||xYKg)}˜0f|쨏c>f㳫>BG]#t}QcxG]cxG]#t}1<#t}1<~d#CB0Cs(tΡ94Uy݆nqCޯ<:  }/t |Cc` 1t0` 1t0K0>0?<r#'gəsr朜9'gəsrəsr朜9'gəs#'gώ9'gəsr朜9'?;rr朜9>/tN:B_:?w%^] Cwa.] Cwa. ݅0t. ݅px Cwa. ݅/>yL<|S+c:TCjP C5tP ]+v7tCo ݿãC>oxCgu#Cguա:tVY=<:tVY:CguYKVss f'WCz?c?!_:}k z%g%gIYz%gIYz Yz%gIYz%c /QD[{f=3y̼gf33ώ=3y̼gf33gǞy|v쒙w̻d]ٱ/>E}z_E}z_ }z_|Cx"/Ǿ/Bbx{b]졋=tݡ;twݡ;twGwٮݡ;twݡQ:CGy(塣|xDy(塣<dU>dU>dU>dU>dU>dUʇʇ|l@p|p|p|p|p UU9tp~xgNL3> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 315 0 obj << /Length 782 /Filter /FlateDecode >> stream xڭUKo0 WG0lVl:Jֱ)?l'MJ)#+"?e4,+g,M3Vn~,!di1:be` UFe;^ElIx ۪wk.n.u:$+3>lAnH{ Y H Dg8L+S|lZfqw;Ot;'ʯ.;ڎ[9.-cxi@ ]{S HKnڸԙ?،_w? c91%'1^^Il^}bΔG,X^c_HB* jjj+J{eBBv5a=Ʃ9!Iu͆zMXf_С#J&=sM qS02'ឞ Yu>p3O7a#Ss%Y]%lԮG'\&G\~*Ys蠩o)4>CۇLҲ&VVC.G눰jx&إƳqz6A,?=j2K)?y@SplSBH*3EE; endstream endobj 294 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpkG7Hnk/Rbuild688f6c73bb66/spatstat/vignettes/datasets-036.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 316 0 R /BBox [0 0 864 432] /Resources << /ProcSet [ /PDF /Text ] /Font << /F1 317 0 R/F2 318 0 R/F3 319 0 R>> /ExtGState << >>/ColorSpace << /sRGB 320 0 R >>>> /Length 4100 /Filter /FlateDecode >> stream x[I]Gݿ_qa"I,,c_"l:zk%_`;Ƿz:5t?|y_>?^{9w?߽oȿ)#~z'O=^Do2]Jnb>LEDZ/|_k>?sFTڎ,+Qdi.3ͣp|OUp#71y.#J/!QK: ogG.yٳ^ԁ?o~G6(!b=a=Y`&ỉ8KI>"8WO>7xjDvK/,4_*٤}a"ܯ~Dw !X)V/w/ԉy~Ql7=4):Q/ .SYCcON ?4t~e[ yV.hG'jٌ{e#e,!`q8GBUа#DZn#_L,0{VgLs{П 1?yAV|/~^c(g·3i ƞ.qWl5Her0]7H>6RGh7S?|~b`3)~Y9.WGxv[\қ(wlr-ދAT1X""}TN%,<ƶTc HhʛۣbK~ &`poOTyBJ&pqf2{Q{pO Fs9$0bA'/cvP [ j{N^y ~p$Êsl7ObeS`>76>|2k8qD?|te8J'#Qzc]';^>W 0\(.Ίgr嗀4x8?7%Zq[dq-A8t)A>WX#rpꍃ1)[L9.]m.;v7x:pa:n=\ww2L6e^w'!K7#͠ytW!U9)WA.AcUs MNtVv 㝭 p<%$v @N;/!";;?cһte8~3^.^@&wwn49ޏᐌ݌ŤQ )Lp, t/HG6|j]S&$u<0 0,=dex@n< T+R0a+}P@b<{-ʬğuCDShE%ayOֳQu\=Ρ:ʝt gP1ʣQcCR#1f/F f*frG*|*"dTv"o6߀dk9ԩd}b+9V&QI fYy[y= Vb*`ڿ_uZ].: eV^{`}mF~'n,\Ÿ%YEM guiQl„XNY; [yɮUtyePWҺ -=!s6X_Q+)UK*6,iγX7ba+?9b(b6V^-KBgbCVQI<{}"u 掋HuCb*'V P``No.R{x^rBz<oC+mXH,E~[B{ \Cd7G^ b+w{$pλ`+n!>+Zju2A5"s<%;&zzE*oaŁvܧ̰E^MbB$N6"w0_m{܁v3{X"=.D]V^w21CV; #Ρj}djT"m {Qy }nӮ =.Վ27ȫe01ew ٴV `CYgnpF/}vDjG6%ONEO/^Y4FOO `8K|{!9ML*:z/hud4*`++dkȕ{uFӎ#'kN&ȫF7%iQg;dW]H"QG#Gdj`8{Vdxx9fxA^s |e fO"W1IQ/򺊠W<-e8I`Cm0WRVlֲ8OF fm4O}@1yIuђf|_AALO/JTzx{Z>iDB7"o5rڪ;V+TWaV)`x?L<[olMFk|nLJx|䵈EBfF &m2%J+V1E~`F#Ui^+DUll姷; yj;-10["vGB& ZaZgfUq* #7 "9 .dSQesZ{^H5KfE^KFpҝV~\ hˆlۈ|e#"_Yk5)qϕ Ndc+o%3l_62}ѴL7ޡMUXŪQUll5Cc1FS `kOM7u(y6wx֯;x[ӏ{8 pZdA`+5۵v3w.V^o"g{2Wb}( z0}l/fԸ=yadԝ #qВy!F/B VwZyydc Q3baXFc[#bpR;MʫW+[xʎ_c`}?ǩ/FݬIܤy4届4㱊8NoqPlO"boSb`y|5$DbNMe+"?aa+UG4r\uGʰhՌy]-l_5y&YSnlu|(qQS՚ VDk'_?sV5SbQ_m^s. , OP_7b++GP"7m E'=qH~05AugW;9>&fA"h@`+?f՞{jE^砿Y )&K< p1川%*Pmc+Tw foʹV~vKh3N7 ^~D'޲ZV^Bsāk~joc-oK'G|਒olrYᣋgs𚇆oE[>_uG*ׯ+:^\-$Kqa:=%03_kERE$9ϣB)U3YÝV=; S]SȣbQ jJކ1Bx麟dg: ~ozsw!,{zo L endstream endobj 322 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 311 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpkG7Hnk/Rbuild688f6c73bb66/spatstat/vignettes/datasets-037.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 323 0 R /BBox [0 0 864 432] /Resources << /ProcSet [ /PDF /Text ] /Font << /F1 324 0 R/F2 325 0 R/F3 326 0 R>> /ExtGState << >>/ColorSpace << /sRGB 327 0 R >>>> /Length 4415 /Filter /FlateDecode >> stream xZK%G.ۛK[di4 #lu0sNDT5 ,ܟϭ̌׉̏/~~g?oRzͧB=c~x_4ioݟxx׿w_=ɱ0YӒjP~|⫟XW5~xˏ/oYϷ۳G9[*!Cu B->z||ZVSu0d9?w~ԙ+Q{`YuWEeobmY4Y|go03LxB$aX#+ jf-?ƃLm?S lP4V׳TIXy%ہMV֞jK;3rk!Q:7ӱ$JfVKZOnm*{<6FmikJ\%ǘ!|mc&wwpyscÒ>-wjnVzU^  F 6v{3ЋC/Q%2±I@14e _A$S$)7|Gz酦uٺK[Uo[/p_)-,sznĠ. l}qmܟ_ ͳw)bNbs(V2pA(wBY]N8s9tԨ":,6E<0&s{6P_llC!1çr3O&QMbi/E6xfV) $$ Ė_DAy̢]7lKC(Ns+}G!\9,رjQ4tY2L'OqzcXplhn oތkf ϽI8T.OԺA{>h8M&H_##SWh`d%6r$:iLN<21mNV [Y]Um룳')p$X)K4:A'K[MbʘK O̕c"Y/BD .{_h0GC{p㕾&Xo6Y`苁T4M@2Fa76Qܹsx̅ UJ$Bqr85S5@QplKց4c } x&udtdbYMm$[~!/7eohXCJ`G|2 e1r ئ%賉Aǘc؜\13Y/hPO D J5KuR~Q?u؂ja7UyKl=wLfϽ&_9͞;, 1UDc@cMb1*=i LOt1i٨j[dñ$`!iC ùF PFaC J^RPw}4Emdm4Ņ/FA^DXz&:6 DRk N;pOk+=0>Y9ޠX;6ڀGylȴG{2>ٖHc#'W}圴1b+a0r6FQd>1 cݳ;;2&&ѨRC6.KN%E:ca2)kC.cbsD_RҎm! ì>=>bG`hVa72А\k>kCcb8vUǎU>*Vp$ò$DEuh}RW*6zQT9*wE\(*6qljePb_1 {c\k1;Zk" ŢREl]$~kot/zA=>Ƿ^FU$U%c茎dF*?c:caR1[HZ0$T+d_ m%ͣs-Gb<`\Ls#}6bYgc u#"?mC6P0,({tp:Yh;X#>☵ BJNjDzc3/ {W+(N0l"ym Ĩ"+[qnyNtW(9ewv| ꝳ34b =ށMbqkra&4 \a%߹+s XѤ3kȦ*sF)־6ܯEԑW`wDE.:eCD`py>WEvG@0hzrWtHQ,a\&هP0рmzs~8€EN( GvUJ`XTb8FZH?@WQuNa؜@azyd[4tjz<;-‹Ȕ0 bw/1x.~$7~U99ͻu|ѼU@\|4}1V$V -=$`y0"J୓`x-[u;Ƭkx_ ~(q\K^%akwW9~=XT25~`(#rcJ B-`$dzG-j`fCrQ9ǑQkg`WͻaU-y ,W($Ff5MzMuyBhI1)I/KV>Qp19202?83v zb7lCqr6!C\/HgA%2l]|+-$,uIM9۱I8#:wr|WP؟yD8XAѤs$p^9޸0x0U<}a|k m N&BsZ!ۮsZ{C#b@W2q^1,i'aA , !=$ae{`_a%ڎ0"wdu{6 \Gm=\]F]CJ6K/ ݼzyagޏ:c]G5Y#lu6:km)/ҰvF !OؾZӁ_5PzNWp2as fTTrZE[ħmvi*|ʛU/NvdsR`w]Oye0"nk۶󆧰`w2HT؟0]r/cxIe;pS%|Kq7T[{Kg.\$@8Nkٮb_n {CL3l;*GHh$yCagcߐN%qC"vK0W rH봼VΧ{IL㞗D*⚟d1u}┰_nq݆dѮڤDG^%zGzøQ3\HU_%D#O0]xDﮝ??/a}xtU`^'܏Xul(VaJ,;$j곚fށHQ1!ao\b'Uvbr^*r$%cU۹mIQ-t^$[޸*(vܯV!a%O^EW??&?l-ת ?R5?6x ]7Fy SF/am/̮/-'bl(tecnF:ZM\?8ڒ.fLtS)`_'݊FBvb> stream xZMs6WXx2MCLv`,I}ʱjXL`q `дQ6z=.AQQ82Y`MF$WRD\Y%v: U6襠bq!p`dyi.iQd;caE G-,VEc͊K,UlW.[ dbo`"(ĎŘ,N99VXs9w.@@r#伄 n<\&#om2g奙5ϒKe C}@ ^)AAA ME@HPGvpChE(vA49X.VI~@,:I)T& mRK6u5V5-LF?ꦓvQuU\WVvl^wy;[.9ϲ˴I,>ׯe_tΛ鴼};}^7nOcR7 @'-X `r[7@tZ_-gg|y>κY[7[\M9_- F@U4/N 28 Dz)[yn^GP<=4O1wpA |4/JgQWCbtXĚnp<ׄOCvSd~O5w͚g̕YCsȳہxqI=x,p7\f\""9{8ߊѸqpqsy1&,x?fF dSy܊S>zwN ^|Q#?E Ot#W*NiưW|c\>7xO7(ɏĚnc,|\kOP"ok#y XGuf|AUa;GބkF}aͳoól<˖8φm,f2;\GD#o0/  endstream endobj 330 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 335 0 obj << /Length 472 /Filter /FlateDecode >> stream xڕSM0 WI9 `E{Aā043B{-*M???6(cmƗFSTv[aB]?I.6G囩xb¨WbsC 4/qö6 eۗաk5K}5ABE#XuG }rCj³wAW vVuȚ8Q|8 t:F:v$m&=!Ě4=Vy0qFPS$ _)Ǽ~ȁq1q!N.$} isBU.( 8V RM9yİJ9L)n%vxp=Icd0dڮ{źhYKl.> /ExtGState << >>/ColorSpace << /sRGB 339 0 R >>>> /Length 646 /Filter /FlateDecode >> stream xWMk1 ϯб8%]Btz(=t ]+xPB3ޙ7d隘vk<]>Cb :sM-0r@oҼ&}iCRhcَq'f=}FOLM,4JLJp q1뛕94mf87+DԞ^FK(:ף ]CGl+5q%[[Q@ݛt6vmz- \B|^b[E,iMQfMP4D/Akg1FBʍ9AQ1G#l\縳WKDe~Fj=U٪$TKҊq`Tp."Z17.$[)%D+Fڭ)voPաsK+wnD {"ye\Ղy$05{Q9%`ƍm"vh ĚpX cmͱ}ɸ.ViMjuX?zZ(b"guTtO/kZJ1G7/(Q.| 6)8ZA9- +r.u'8GkC"7Y endstream endobj 341 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 331 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpkG7Hnk/Rbuild688f6c73bb66/spatstat/vignettes/datasets-039.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 342 0 R /BBox [0 0 864 432] /Resources << /ProcSet [ /PDF /Text ] /Font << /F1 343 0 R/F2 344 0 R/F3 345 0 R>> /ExtGState << >>/ColorSpace << /sRGB 346 0 R >>>> /Length 45998 /Filter /FlateDecode >> stream xK56_(&,l x`x 54b+ ّL&wϿ߿_~?<.!u|o2g?O6}FG?|?u|Ͽ|Q1?ڳZԃ1wKS_߹;?Zxy}׿/.ug}l?ms=~7ǧ//.K7??߻j??w~^Y6}of~:?.mZ߽}rjʪk`5{wy÷Sڽoޞs7;nzy|^~jYuSe3W0v)=Dg~3uRz+vq.rͻ{w}hhE{|3{p"ίoX};eQ/_Iw/ߥzY{~/.nPXl+S\s/.Heׯ潷3>_TBWn7\o.o*dmfB_+2nׯ-bwL^|.:@(/,2Djpfr!o$-BͲ*4~=yudu릈U^ZCd͚m|%VU o6kYXŚNT5v3E39+V3bW>pw]o1SRǠDuolj<7.޽u)AR7jȨ_Ư>վP}]׺+f@]Vwٱ/_|>8g+>]\^%,4<wf|tn=\iW3_h|X6o Uwo6,e$`1C XoŢ4|՘oiJQYb+|({}ʚEo6,{ ~!^uũhQu2\g}wQ7Vy^=rbM(}/-PYW:hU8_s.F6.ï+~+B*aZTxy+R-ΈovwEr1ȷfQ3^xa%>GD0vW]C|B~YJP jܛ{5ef31^8D4Ľz%Rx] &bJ:{} pՔ\tRyD,W71"MYPx tVi%`"Ꮎ{W*V*/o^;D>T&b~"XX _fbwSGv}`qt;ļw|.va&{H C ӽ؞CALlWϞ^`A.ALj'{b/{+wa&{O|z qݮS +8stʷCfrb5W*w@?3+WڽKOYwL|’)`Ο8ۊߐHkx!{tbD/zNfA+6I 9’h{1M<8@8ӧ][m{6Lϯx!"\\a~jƙL8j\`4NRWƙ>`sZe-gzr Wܝ8s1b 8xch=gz~Mqd+3y`,3}nbIh1p G'"ZKXJb*%m]` 2ܳ2&՜!95s/Z:`N gzMQ1Kp9&2[~s+Dx ɣ*x;P~}z Y151[~s&dPk=-v8ӧSq"c8w| 6 dùYKϋ53䓲 ]Mx3=G#"j^qyT(`Cgz+LD=XPpQ0A!LϬ.,T/qf%<_?!#B/| y3ELor5WUW\gƙ޾[`ԈIgz;:x8eVD[L`;ELdaDhˢ@Bwƙ>`O>YP/Y3YͯpXc;~nU=*F8l`MS r`!9v/z;/j8D(W˪]qOjIQUc樫& 0,grO$_IXk^~>ҟ-Q9ZELϷ(iIl{b<yw΋]mx^8~'\=1ρUu~n=P#"p7{V+<ŗEϜB9?r>Y0Q[X0ꕛq3ɂA,qOZ03d|f{ONgVL{}ҹy0!8UIJ38׬x*}}OaIEgxdܹU=?5*/z@pu3TS;M(&5T!H8'9Qt9AX!G%zR>Y3A7g"Bkr,޾&jR>yUBzA=O3鏧LϠ gļ2b=`;!޸=#>0ވgT]Ͽz3}:k?1Zslyă%L6XR2 Wƙ# L*fFpG%\:GUu+I;$,(.$Mk~ZZQ "lq7f WB|)z6E4cZIqk"*pONI+a[[8ӛ>eE1.IӦ3J/z6ÕطI*7=Ly`8h(-@q'3Xl lgS>s{)铝bna3f-#Lo?w[T~:ArHgt c9qb!q7 3HD'X{O)6ݙ|f/<󱓗3 sKn']O='y m4f,l/5Q*dfgC֓bJ^/t"}W2ɛpOVw>"%cV3=G7S[M$qY./,Zz_ɗ[3WWd}WUAv2]+ᾐ$4}1 p޾N1 ],?&W/d 5I[PZb J8ӓWa Z8@_I獽I7"u/˺/S|\RϤJA>s*>I=qtnӹE(_*G*V9*pY$n XK/z``/"x R(ꓱ:Ҋ z]9M-@Q3}V)bVm1L,9hɉOKN™>'I ؽzsvNђgF:#Ini gdlhXmX1I48~g8FTGTGdD3*?֞q7Kdc zw/'Lo`Y∝(3_FxFaoFp¥(;]1eC2=GhΪ^9VЪW 4p<`LԂwgz'gZ~EOfS63"˙ ;;e9ǙoQ덢nqmtP48~spw :͡8sF=jqں\9gzLͽ?Ō|`Y3#b?e},6cMϙ|LarO(WsN/hroc`,*3h=0n_ # jQ^vU;zEgzl:LTn /u񐻯D¨gzrLpy']1%%Ȫݒl+\ `vJLp5Ǚޞ;mpe[]|Y_Q>=G~# k>3W#,2O=$9aO,K?(X2a38[f8s*O(=${%B_Md{D/Y[kqgɣd'Ε!{{ {jƙ>sZ8i[QE9mLoV(6r6QzXQ3=ߢSwMS7KWdMoTKG@.vS#FlgMY˙߸=Acb4o,ޓ8/{Lϙ\r1HH)*q ޾&t+>S~>J Gn[ Z Lo2;jF)3,U"Q/WY)R{/zIa2*Fq7yRט5}& ytfOQ~ȅ8s%{Sc2%жG g,O5 'ш 'gzba{,9T W#+j6/D9vC~MF`~ѧY4n/_1y_#VwCȍ;~ѓ?! AP-Fƙ>q{'="T,qCbM,'̓1gz  "dDǁ m+GBi ӯ'W5="~+_3_A'bzQy,k!Nƙ>t.R,zy%LHJ ֖jUk>>Ɉ4#2Ʉ3=Ghy~bE\P7+_ 4-oV4}3} ( gzYR+z${\CENZzyILS/ޞ_y&;y^07M/ez`roM_IBNrTmfp7)h0 +bQ 4(%Z}PL8\Ix:. _a{CDqoqh1>[#3DFW8Ia8:o08L/˝ᮘv;WKBy,΋CLj J-AvnUT\2*L05KOt'(`"NRL9S!{N0mEV|p-eSߛLr7)jPԇLr/4)}h `LļZ X\O q yʣU9 Rsase8K2U]xC™OYA.}]Yp\Ǚ/aHd\Eo|XS^%ӳ:R;3Lg';(*0Ab>J8s odM,s8:B|_C[<9Ueכr$$JT] .!g;L/#TOtk*e q]l0j1G2Le9 Ĩ'G bfؾx\'~i8I\SF*pM0Mn hG#q[&b~waF!/NM p 7>^Vnԓl|C9Q%ISY{kOLQ^S.2Cu΢Nrf ص3=ߢ2ÿ Cc\, Egѭ>"WɶJzef|LokA=`1@3kFDSLٱmQn8'Nz%N%%N8sq3+S4A|m^K +5 gzaKp$4f̾-i̮lG*ZcF@v\(dU;ҫ~ѧW΂EJtg~΂&ΚCqEHz3pԂV^38-ٶ./y]GJ™#,5jTF@Ǚ>Il'Ey+ LJhgG픏{’EoN1q,)ʫB~*}sėܙˀë_i= |Xc=$_ooaEon*#&X++ i_ax6-{`sRgӚ5Oϴ&[t g@T0~s2"fIT(O/<g<$mb͏Uo]yRgzn*s g,yZ>˼**{~ѧ3ɽ3ǽEoR8MoNDUm]bͨ QHrӱ|M݈z s᠔Nr}h FLJI[WjgF+&*#֬k< dI3,2L=kL5ӄ3=G$&I`z^Tq[8~'B4gz o>uQ}CJAǙb0[ΒJg1h/&kؾ`_ISe{sN޲" %2pgzp,Y[u·aǙ>K4vfL4u ;˘Fѯ3=6vXgr5AI`,4g6|hIqt gz>3WOpE- e)鞝5g$ J inLFQ 7Y88i_q7;&֊B+U!=uhfB\e;~bk澻}ͭw$C|X5Kdoj=၌#YD_2aۭST[+5S#Ϋj6V1gt^'D ϋ2b"3)@AJ|elėp#t@g#t8g#i_xk^ZE™MAEb_a4%yg2 T'DSC}rjfGt$9q|"]k"$p3HsRz^EmRTT$ʚ iI <HؚDHx$-LO.+o-&ܥ[]kLlekHYUR%a&NQ|w{;ĉL-dj$&-JžtUUS,W1xTmJq~swX^bK^K=ca&m!l%a&6YKPnÈ0|˾&bIGjni LļZr蒪hMM@N}? dF%0]`&A.13[s1J0j10gͻ䊖$W4>G1Bc9Ѝ$Ko|e::y6]v_\w[o錖$UGSt(Cm|.= "95N™=ƴ gN qjyG[QQKbAb+R;Lo ^WO _ʕqQ]V6W#'L]% U*s#z Vxgz \2Zg1 g5?}Sk:~s_{; hJQ2A}ċt,ztE/zl{-VfB-zɈ3=w`C &\:[p0MԘ7 m&u\c\_Ә*3=A\KK5QŤw81Y gz̏ffL;"f8QU5AM!jbfgzrZeܣ> ԠEBhAԐpϻ{hm$R~N8sxvP@Z8a /zEv|)k ;i3=G4b(;(- Y'͚#vQ𴴈gƙZbI,hUgz[ÞC86"k$8\ef_L~0gixT NhErڄ3qڣ2QILI*GyVuMS ݌(/$ YA04pO+#*JzM^[S9m}ĤĤ]U_3=}Ꞿ c#8~ѧsSFcg9u/ދq퐣NEoXM1Yм?xIozJ5_8~<`/? O2f֌3=W^+ :V|}_I'dZd;Keڄ3qZЪ:0GvT8AK _Q`/dkDjl:DjmH8s MU$O5"pOVCPÑlg"nO81pRS䐒Ω=GuG ʪ> ՛eosH8-Xzh.:o/SNvL]\ :#fk=fo_2B)#5 =Vo>:~U gUU8s9R}M6vYL™#HJI>:|o8sMh6\=38) r*VLISbaL/ۃUCXuqņ~sEiL[6fS} 3=WútZu Xjkj1طr=GhB2^/c2b%/z[ y X1RmK>ږj9 u99+Z$,g>gzZ0vj+P{5pU=Z4dV/:|zzMYK5'gzY!O)!4cln9+6ͨגHA3 +6JE2+y3c3{o|LX"Aga\E~OOMϽYXIG^W`H%/z 2vLUpEgzMWcKwς8ӓG7`̇-gTMB8PEaEǙ &? ${Rzƙ#!1c;cK[P Ro#-?Hs/>n gz$;wB S{b 8ӓð/uD u@xz/z}]Ŋ6Eoo!M {qb=_|Ǚ>ϤU6JF63=G8Мޑ3 әERJ,ęEgX@񔨅y#&jP JGeԄQ$-L}1MNRNgƙoQK{a࿰iz%L{LwĚ{cK3qޢ>8t/zLޤհ{WTkƙ xz^H$ ́?8~uXE#a_h/z';,Nzm<^Ǚ JxP$ϖZ girZﰮ6L[$M[D5aZU~ĩf1s؛9p>YXsmpOҠ64* &3nKOՐīg$I4Y Y Te3(:oZX&Wc^g|Y*;.z_쮍 ;ˢ+Jc×7{hK}™>i^LEtڢ7=w0@JbgS~:0WkN&=3} x\$lg5v͕>݈`<+xv' 5TP*~۩'QtV(oJ[kCFA厥5qMoyN٣mfEM{/DUKi=DDCDp=$MGvZɻee$I%_&ĥO >&/ FˇZU T+T™>=})㗢>™+wDDyp&TL 9dZg ڋZcQ#p qZ<txIU_&w ^i䎻yKӜ6[ 铞>'RʝS)]D1QK>|;ќ绉$9%hhN,3\ݥҤL8[=x Ȱ W"'BOfJ=_H$w0`M# 9f7X*ڡo!ugq-_o8+_aVn8w'2QWO^/z;ְBbohEo3>`y$p LϋUZZ /zΤK`M3=Gz<ڤ~@kycjMqg$Qc=CX a!3$9mnmnm~g\ec~ri9{pU'N~sw32bp5#;<AV[fZ|t*k5*u"Y )̄_\QZڠ׊:q9k~t&'WEo2)5:* &ۋ\;#b脄5 }agM&|ԔamEN֊8Q#=c9Ғ9Lon;,?验M({+39~'y2fdɄ3=ߢ3"[#vWM-vfBEo?<#O Ԏ?|j M8-W<%xvjo"J&p{}7W{5v%#~LsG6dk>?$anz T~Zd*?$4X82`}q":ND^PI8s1 q589Q=s9Fy8')Ts)|RP™ /l38ɦx.Q=}U1>Yccfc5#jYk+q 2pO'W+G鉓py~&#c')LEgjzL@9_ɒ1D ,-9_?xw]C}=Je{ϩR>ޤ QI R(Y $1>V"U"8ӛD*#HUBrMsqO>I&kP_lou/zG^ˌ޽؋RgqZYU.5Tv 䙃*yJA_IG/V&_hf=b#HBgD8OjS gz[X3is3}^QVVU"E,Ɉ34.[^Eo# b_H$Ȇ_S+f_j/d+Nj+λj+N8'[1j؊[mA[q™t DyRY8$OL>GO>/{j祥{™#@AqRApw E@բyjJ8s)ݏ%Y];~'taih OƎ6Eo\NjjXڙ붓qOVV'%STA5L96㴁3}Y3ŷ[ȓp7O1,rF͘S?=L׺imkO[(V>E8yeE*N8SHzk^LϙwJfbyL<`3IelC*IMߡ\ 8ӛ-hfܷ ΋8ӛ4R{7H8gXb Tw7^!7Փ"fN'A/#01eP212&8c>:-ja=Q=[3pؠEk-k5V™#L] ʹp៟#cP7!C ~%-]ڊG`|!S,!Z_O0)~\/eH9~j%9y>q &'Ɔ(-Β=KR(7)M~YrdZMWwQh3S/ΖL_ R]++Oޟ| Q~vGZix!?ؙ(Ԋvr͸#)0GQ1=Y#x_4;GWţW>SgX*3DaR7H0 H Ԩ/sǀѤsB~?ڣzvP sݕi(<4d~ԫB}r!L˪RpKy.m4)XC m7  )LΘddŷc LxfglbR0rvh\[ƸDݙ$ҹT&&L)ƎĿ3=RXc\wv~ sdߏ"Pl KH{s1wQcx-1){['1/K$hMЉbS7*\5ݨgd{UIV-awb9r#/p:6ז@#[ik#m Ip(BۦXL[7V\&2$@*SppoccE~l!֤uÐ m\kzoVqaxkDʄ08 Ì愈wB.bY,'eqa_%ʁ`R *b qRE}f6J=YhULػ xu3 #LX/*+ .lzfMza]CNݪb1YdXI؛b3'kv=QYQVkB:o]X pa!DU LD*?̮LȂ'4򲔘3)r2e%˔ufdv#.;DT.#4Ӥa~Bh0U#o"LiUU=]?ab.oJȃrUbY֒UD?K! i9@VvmhvaP@4j8D͡?sT^Kߥ*ԺpvDwE:RYA`RIQ0):߳a |؊_4<$B̰ W@EB+ 'K偽h#QxHE, vTKLŊI!, k%m:/!z1uq(jLWXͼɺ|N !6lu4\PuJl҆l:y.p\*'1F`<Q4C|V FsG'B.*,-(ܮ.f.qiѨOpDLyIsg̝\q:?W⒚NGU;Y@\X+j` Iĥ!茛t`Х!~}/]_k+LKѕ;WLb(nMR҅ڥ!Kl>8ċ}X$LOJVI!+`.PD< Kq(lW`c[5)uhTxʏ?6~4pԱ:#cJB.EXo/I3!ޥmkҍAnY8hu"|=3GВp6gC e3OOvJ}-gPr1=֤pwS;+hX8?e&s _){փ*80Jy(NCVL %;sP5Ae CqzM@i^ s2k,q|LNm Fm8zm6nSOVj( oP7/MOlCOah fzz \CTlzJxx%.󰘪xɆbЬ~(Xo 35FiǨc cu,Ҵ.o(fL&1̰oc,vMW^xsq߲l֏arA.xv;kǻL_RUT qzOp$?b.7 a{nʙGҶ:[1O^B܁ؤ3]I&>cn$$=喊.!e97棎`xX83=M󱶝Q_w]n+i<_kZF;d4P&RޔjFmg0gQtւ1Ńҳ,;sbUl͸XwO vcY[;  nɶd-̀p +Nn1-owд9IۂfH1{{8͐rZxVa۔ָYnR:Xs6VUSOؚ<!P@hqO{L g6/oi$8wJp$в 3v;@LQ(Q\"QxcO:ט #mbI1$ճOzY n^~Ihʠ gD=ޅXA&QbnS+\Ge\EjzILn` Iq*fsaS|CôJmmq+R?>ōn9uŦW-,7#7-c&N!0ax\X4|Y,sbR&Iv=Ꞟ҅tkRn]C32 H^u X1{ ף&UoWgXZaX鷊~{>Yx@VQ 1u9N(3s: K؜:9mmBܠ98>gKX11.RAudǡ#Ee\]y(3]r$kŹb9Y rFz_~eAݑ\'֠< #9N:ўk}i+ }6bsBt=Nqa>,\Tr:;9bxN-Nra41)*AOs95e uvpŒǸqB!i(Kc;{_{6{8}i?-[Zhmr)6`m1(""ʨrf&k"#CY6YGSǵgUֆ`Z,zr."` 7N~a/yƐTZVXeIae!6>&5fXi'>Vc݌oU xsov`&~?=p/mGp:Es Ɩ w<;NO܅mr,8RU{_g%(xSo(w/Ee:wÖHB%X}bH@B^&K-)(@I}Bqg}b錕ff]Jq/ǖ;1i }R8Ae^i̯8yG(%jV|V@qIil2G6ᬣ91~' dtI_YQĽۗSA >!;(NCaYpئ7螓{Nqs(xV9KT^/Z7P(xaa)ԧ(/^,$g=Iq6D3 g=N%'Ź<+fФhhCL L'4|Zpm?rPe31Ϋx?0GhT麘HFlj2\(2vV{89=#URlaYmZ&۴%^h $Y!Ć26ڑ0n-̫Fé$qf6E"mNHhv^mvbR};'' FV&pYjHBY~аŕQX5Qpqkr ˱ǖ$DMP[ާ4tWZASum麬#"]WE+tLLg!xG v-Η4.p HᓻHsC\ω=,@ϝc9IRR,y3"0Uհmm>/i1dexk ;lv6?pw>l.8,Iƃ*twH !l.%|:YSImjSx|b\۬ʴv:iL1[5"L1[m9'LϏSrc1`4Y9!qIГ=kni 뗤UFx&h!h~=/o)Hz|;Z}4mV+/3KM&Vٷ=~CyNVr?*ݚ.\7 Ku~".\p9V, sX._@ S#RJ`$9~&<*VC!ORiQc<`Y+-%" -S{VEa*ƽSN_9Mxșr GN(OP%nB=fKM6)x2ssH9actKuo!0#Fp[4r8! T '~v={.`t/$\$%)N-G9mym1Mz Ɯ*s*:|`mdUpvz<yl%_I{zp:̺;۵Q, YWu[qKcd*;<(;(Ngg9It!w&ջN$VY OMId)|q~:Xn͟c\`y59&|>&|Li|(Nkӑ$9  &x3P[1(?l Oثa` $ziˆ8iĨx/1Wa#8s PCePӤn܍Wwz\`U|&! ,8yߖm<_o|lcۣ o-HTGp܇+>f>HK˩8AX_ ~<&P4+3lc4)טhR1PƠ-v͋WQ/ZPG6#C1ờ N.֚,Ơ,AWYAuu;벙{Mq'oUoKb1-T8PjVőҽ\׮P}h8(_FIJf': *áXyBbݟAlw ;sH9y}0hzfV¤M̦᤿]TS%9 ?}NgcA鐊=1r߮c}t I$R+=:wWZޱW?*;9B(91u֤1uxcG{~Ӹ#0x,ׁfL6`%m2{m]k5lBԣ> be%ksƑP͔7d>zN[&  I pp|s㜋1d71kKc؎bcg{c÷D)HŤ8oz mрʌm!UL .lf-:*yI=ƞZnޱ[6g;B2"oKxޖB5T˦~| BBZT|\_|vR0 G:GIN> |TɆ]NE XwDF s>ϡm,ӳ­ ۬KasJ\#RFfBg.0Kar k0\X #X)R=EC W=a9$>"փN9$Gy֎~Ul<j![@D $y'k8(OveΘuXðCOgyv[JAAb{'^;Fǖ4j#•e+`!j"YVl]g1hz?Uz¨? >-5+>W(f:B/v"`*6dY*Me*حb>,2,w<8Idhƨɛ*&Eg V32\=' Y#=Geȟ8?9Y *r2+8Yr3N 8R:N*y R ܼqQ# 6XUl;~3ڄ+Pq{GKoF}<[xM[@w0zőmqcD[(NzXF*)DO(1}Ȫ==rX[#g^Iuzt*. k3xzUG7a}ֆA3UʱuYamxmt='U4(jhXj0fԋ_Y횢BE=jx]wTVծ5Dt? ~^  .47̃#2FJ&xq) zA<'g~` #K`ݿSыHF/{^3Wű5 6uk>iY> G>2{`LS8K?So<~i#p a aA0/ld>i z_'en;24sKadnz2ip:c߃bzll}Ȟ9%Z%т߷o0Z5ޟ0tU< Ɵ0?XW3a=:?T[a =^+oc.j]Htѩ`M]Ȃ#jh: 1Zx_Z+`[2?hl߭5pg+zweE/m!npF/0!'7"lsTA!\3o2TBlf*aViHA~t %)h=Ndqi^=[受ݳ, pX<WIOХʼ?a~vߠ0ͽ&ȲN7AV K8Gh5}n$BvRJA)v=i1%&ٟ$p$-:(cRgDeAYOQq&i8= Gw}->&V+҉pj<Ӊ z:ݠ6=# l1fнXAy*w0;% 1acI5g#q@7]S_bQ)N J*;b-/ _i'Ol K`U@TlD1;Cqr{04Ɛ4Sb<'\9_ !j8ZU0Us'WpIȂ\; .jgf\K,˵ܒ}ķޥdSJ|D[ {k+T*E*7{Q,Zέ) Gkޅ(R6UT# K;"tyk3f˜+{1.1acػtr 3=M6UܦCb28c,n*xuh'-ax>!a*"q |M粍Pvw'y߭=t~#C[?Iߥv&tj=POZJ0O؍ܾ0PiJ҅e2ZtxʫQ/AQ<5V1zvR/ASSԖu--]G,Ef(JqRXcE]{g~?ڄ[&+y8 $sD :FQ,=E[[Shr)V3 W!x)[2>SsNVe"v7V d'IA7 %[nKPTw!µ3U\&u)OVnzcwh9-xONu&X`1[F ^a_ufyF[?VȍV֛C{{@EpXO4H'9ϣGZ]Αzx\KW ^ Y4ׯnKAnF|Iv/WߧV߽]7\Ll~rqG4b SGĩ۩ǩ@%Y:ZbsMč`B3ե ^!G.O+1Zmbi-(E@^PfUD8VBB)x)Ĥ,Q̒Cb[ayhnM(<aKƤ?("QzV~D)/NW.R4 *!GRfaI+l{MmX1R %(xGA@<KP clb-fXxF`a#ՂWBcފP"VRTK;P ^!OʂVKG빧9{(]<#ZӖ0/$;{Ԕ{w'ΜX97߇vߟ(H)Fok/޴;9tyF$ =^ixA%ݢX?$? 2A;a/Ȇi"mII.k!ҭ,J) L+Z'xpXJc/XDbmǪUuJų6Z;L9 )y`JNQGҪ%BFF,0~,6› ^0Kä wIͯY,al3 %(xE^B@9b*4) VZaRKP pHLJq%Wجd *U0|MA\b8vKt>]"ƸyV%OBB!爭QBBm${$qmJa }e{%Q܇- jpSܝ*&*_5|Yј6<HTTK<cOc̢,=EC.VOjf TK<Ϟ[Yn[jޅcQD: SBB5(n_jz ֹaSK0"h#}/+AY60ah!K_UX_ye{18l*K1*Dh: 8z{1T}F6}N:"=uXD#WaI#Sέ>uX61f}C"+D{O]&\m6 TCFop/w.{B)/h`ets׹D;xO]o.iC.?X}z׹r|Mk+`eƏ5x\2=u1vᵘOYNth~O]6Fo,1F<"'S y.*li*}4-\ԬxO]6v>~c^gek]gݍe] 3%#>z$:3U};NPrڰ8>(Uf+:t97nT8Vuiq0};ǬUчʚ21gE}Y$**LecLT/oG*N[CM|@~]ii m)}h&qng8oAjPW֋~DsK}Q_<46}EąXxiVB`5v!|v}z1r^<,^5R. x`!mV S0_Corh|T=JF}8 ˰شX7qhHa<^Ldp37vx<9d#!x[nZ ~7ꇾhgFrhT`h"ߐQ-Q`>-YvT*Z :\η3Q2>[d) Gt/omL'[@}xd8[6Tgd$o/}7(-"P fiWnvoY7>[DDjxhEŏ>[Dyhy4=NP ǡ%] ..)FסƣGx{#|?Ӽ6;ky-n0NyCӦV\iԲW\u?nyXq|7-֪󞌆6'v7γ`mZ)cr՗Xk7Jq'k!K;'v~< gPeps<½nedyNl2= sg_evN*1? 1S?-T75~xƮyb "_o @7>2SxoFɻ{hOe^ԇqd4o'm|=Vbb|ђpenC=LoQ_SoͨI / 'i&1(M`O?EXVZF1z,w{ߞ@(Pf3T7XW}̖1|zf,8393g RЇn9s(nS;&Ww*LHϠp9|Nx913^l/Cߑ t8F*qnx{ϴ(>ƞ)OCXly^ؙYv07ej%99boy5͙JhZ% Gs*G,X57}ҡsf( U(]Q(.,x =l2\{,=Y??pB0ܦ3)nS{˗տZ&A^?Ͼ6;ɜҎ@>C)ɼ8nGF(L#Pg<ܚKcpPS{9(8&sH(ƐFT&!C:^5ڊ`hUĂb}p+--NU⿛թ+ī6k }&l;oY?Pu3BjUSAn-Y7Y&%!7=YZݲ@Bcg:-CYWSŠ?C[xmb/-WͦqGo,ø@$'̺l10Y*ovmr/]g]o6)7gO2e#؜=YzuTS|Ti_EUIŹ.{pY޼.b+[ 凕n։B=uHmH 1Nm7Ȥ J~VWb!b%(SM[zIhn EJLuq:j>Zdj W|;xŽ57SiJ*"SQp>K0?"^˒ƊhQbo⡏; >̙աM;>zg_ߝwv>41J2q= :w|oU|+I(~|C-NFA5 pv Q^<}~u՟jnSӅEz6! B,붧6=Y hvU+*3 d7uKrsrNdža/*ehuEC }C|bD Ϸ&dMXY hMgOK*˅#ݼl5JsoYl]4o.Ҡ´MNc(ubu+&r\%qT-FđM*QLqt3 Äle*uv^Ϲ %@UHǧW҇DA疪Au9.*i7螺[t -3|-KpKY9/WV(K. ,:>YE@Xg:;t9 KY UypD8: -VYj-(ZꬒҭPM0X3sVKliQ uC` 14ȍuG έ(\Dj>lb[z]}v˺⫆ q7*f()HM }ۆ샬'CEva(mC"L+n{t4L؂`s~gh\QmNmoPn>Y~s>DXzY Zv'841גR*.8 ؝>g(ZN!!Vn Hp@拢B?u>>}=~񛏡EOv q9.S-M${Icxo>Edư+72G-[l%R_]xF' 'B@GK-Mo6h+/p( P _SǡE;JЁ`-#/;@U `/}_W fp![{ r \gs5:X1P[wo͡C?-! &cJ_?34~2-TZR@ܩ{e-n v `o=`[n>{Y^  {aZ׵Ȍ%j[q/KܡHLϙJ̸ϙCmnmS".K=i,1S_VCߍ1+yb^Dj7d8fcn6EGb1oX-MH y[BbAmX .b*vw~Wxg\V,kψ/Jw/C-d?4S[o״CdfݪCxx֝,̷dȇ e1೔Cߟq, ZBI(ua#7*Ck8<.Ɗ5ڞN}B%[U_PPs[ 5tY)>ԅXX;b_ ~aFخ"l(~x"]8$ٺl 9bn: NNk'S-27d\3(eh8C-Bͮ'EL+,# xxs<<i;BB"op,*C?PG S4zF(~4 )K~yn3סlT^Vd fKAb/Ť|h9tU4aU)&D+W)pDͿ>GKa3ZIMxB~FO}˃2BY['&6_,|{g?h`ruS gJ.5Ҙ\,zˡn>dik8lٓ\+l,V~ޕ4,Eݳ.<1лP ?Bp'ެ~VZgg/bʉ>COxOuGµ=YJ]lbXqa*}at"f5t/ jU]\ecc>Z3j}Fug֡dBMtT[^KEҊ}y袱XV<!/.QSlJ4p_J#n{Ku_ʡ.%7 !_/Rj6y?t#X*qqzob" ~|@@PU:D^m-8 gX_Ί:-^\y"{!`\g}+{1vJ7F =PGha4Ka% Qv"~'Mm,zSe[  8а1tYx;g %Aثb$oᇉvv?ٮ"*2YŏlSvyG|C[Ýb۝b \r8)>׭ɚL@0x{VzۺuwYPZ}Ň֭96ߺ-[o};balW{&˙l6:٬WaX 7?٢9;W^}Rh+[ a^@ 7v[-G0qFQܔggk*)Uyv>[\*Cޑk6hf#{[3[uC3U .WU֢d[;֪#o|S(HEd,p4 bz&8p-AhGwu)XR>p{rtS[4'"6>ktp"|kvT -"4SY*,Jgcb<#FwC= Bs*+r8Z.fC>FϤxv=uiogss<bwIm~=>[DXyo4η?[Uo}w=@*0FT %GPjRo;:7_@ێOZ֜|k\͉ 6Fkdh 2 N}k g}ix W$׬OKEuF0;k/Y& [h8t,! >oG9֪ϸXƟfٚŏg<;nR:Su9_*#|ށ28c\>{QRfg.}@7CGLjo܁g ~3>^ion`aG|G|FVao*s3a. >uN2ulJ|psos<V[t\ |նϾFT;Upf:9ΦVJ-v<#n ?M>o;cwx"z]^^ \I<`i<xg,{bmÌ+.ra2Ê7Uʬ;҉k+Kؾݜ }߹8Lx`ծl Np*YŶsmdSo6U$z[ݡR]{D_]^xd~]bkUAxZ].*w+NA Pj%78 B"aAVJ$$8N}5s"#9~TE)g\:GN.R׃T.cqI#ш:u$l(TGB|#qs$b/?$\O{ihKi^P-Bdu{f̖j;yj*#jX#i5oYyր+ogx^hţ4sq>YWē]Qn:ܧ~kGnZͺ|ad-n#TZ\1O]77-{JL]ht!kkK:t9>p{G!x5ql* AgCVMq_v]h9r ;'WʹvdhQx wcpvqq,w+gęgq(~Z,^`o[8hC7³oKs?,ŏgT6V "*xsơg*륍iFzgH*\}5D^:,*l*gTQUo"t(5$=>Od1C'z-4 1俛0g'$OJ΅f+.Z:6|XKrA|>9 zvEo wo݈b :|"KgWS#LƲ&jXw8>#_䉔^VieD.~l _WCoYQeӾSwM@Q|ys3l`+ξÎ|!GB-~>{HLe8V2ߺ"tpև XKzTMF40U0!ps(aF76Pcu>>OBUu% 'E>tD39^T͉j΄E=[w![uA[w}8 6Qm-ptqFg_;QrOJe"y/YGh9*9&ϷFT+Ӈ'qh/?O2q5yVԽP=~VpZe?-}P-҈ZH5il7~~[^k˦'7+VXlu=" ~`[3ͭ HwΡy;8URdy4to:֢vDs*YߜQHwu2 C3 hE}Y]m)U 5&2 Cі{ayʥ%2 CJyS+ECJG =,˧գ B6pdoȬr(8n&:sazSU6VEO>佅lAw?ܵD7MoyRs馳/.`"}'HbW|nZǏynEfͪ"[4,G@g7ۖg *nߍES!Guu[VQ,(sVe2ndnSz oƕZ^uNZcoD8':ˣ#dL(wDKb>in~sӖs3Y>M-'iD٢QQinđOg(nvٖ~QX0 P:nm{yony>P~(V4,?C>8*; q}v7ZbFYn0u[򼟃jcADV~iӇ-r/PuӇ{8!QD-b]0wiYzӵn]Z݃Ku#=iOy1#Ee^+Qs5>9lUU>ӂ͜qv%}ۇ0T-C7}-P Х &xnTKt`S8/Lay΋ 2bNTGItgo,beG fc26 _8>´hd,d8"#]7}ȳ7LduRn1Љ>,s*jg ԦyLJԼxu,N#ч8vvю<)߂%#[xY8?,` 8Agy(lL|ΐh\ݻk-Ukmt"};z\@D,{ͻ`mlLgyEM]ཬbT93~!T/(/\YܽFEO-'l;G^Nt,]O}6,Qׇ'uDiP˜gtbv,؅|yo څeēT:$oH˭[ۭ->[#dʄM̪S5ꂁϱu.,ϑ&vUziK0֠]LYwgiezt/jʓ/,vzaNo(o5G -l::UFv E7}\#ydIv,~<[Yr v!n],o;P4y!32'zV8aAl!+;rܟ:xӁDgy#xoYjlKc/ǤM7L?c٘ Cx٠Wa$ _ j\. ~8ɬL>>vsCsgYw qVҾ3?>qI $]3&GHbrD&a*׶3q9s'2 5$UP]+k>CIؿgE WkCYf)}M&a7qZήdvz}UE b{N *LfS70>>Z*Ygd6ߟ>!vok%+ҹ6 pٴx[pdzaf&0/F|Lh%rmo&痃iq,)׎[ϓ,ֵ{3"7d:P6C%MXoq&o ]3mZIsac9zhXnr۵n2 0xD1bdA'YXU3֬)M&a~w)udk,߆'n<c GmoS_bMDn z oLԾu>+bQiӇ<׼ƈ`l<) +#:{4EҋUDgGtؒSk<[`>׺%N2 DElz& %߷J1c#Jt3`P!`QEDgy~Ɏ[t7AA6S_m"n> ֫*"Іܥ"$G MG`E8FΌӇ|ڇȌ2}6LMgy>|x0.bAz49kbns>3 fX+go}GDgyR&Of0aq!9yyQ2bӇ<մ>&|ՄZ>C,Fg;:1|1Bൠ ´nsOVR5tw@Y8.t3®)Ä$T}-p7] %7Agy8|j  YG/|8P/C-0KͽaڽfU1>96pc8YAO=`0oC@󗼊`%%L}ɠy@hܼ!lhY- ra16Ү~U*=g}pk Z dJ`oZ>}N)Z2Vn3L=ƚd9>9XxY႟IN>}<%+hUVT0yzyenm%զy&¨܍# M\p?߀jHtdi`?vݴ̃| gq8&lD]?FS21Z"\kT<kO&ADa/;$oOsRwBDgyάU!mTVkxMlp^e5aX2Ӯܺծgfᮗ,|(6jqbN%P1G*ДD\ZbcKK,Y>Ҩl/MiDPqDY:ⱊ⦳<&Ro졇Vݽ,usLƳ}:rug^qW ckӇ|yo2b ߼2Уo2ƻ U'!g@J4D;b-<w;&VtO~T0 /r?n2 U*WJ|\O|-Rpy< Zӧ<߀ydIo؇^2هؼ;%vLb1tO' O6crd,k' /( 9 +Bzq> K'8P,s٩\OQC,,@›L>>c9tn$ M&aE\]زykIXSEcaM&aC3l TW09y'kX*cit(d9h bm!vEĦ8}$UNܦFS뎽&0i1ېڼhţ]{^npE>+, g`h! 3,߮p) @?ׯ1C4$ mC@ 7z^/f>YM@X_Y>Z+">A,Xwѣ .00;ͷ,*W eҞZN>+Q OaY-"";BJs6d%:˳8-ƃ½L<[`%P^rWg`{Mgyɦlh†]b6P 7Kg|Y-T\ur"1߼C-YY6>g7Otg Ht㏅Ftg (uiDsd:˳ʤoMU9X .Y-cѽFtx Mgy.`lq3/>,gGkoG-Dg|d٩9eɩMgyj.6%: j DC@|PVsU}Ӈ<+5ZmNB:Ktg (U 7؊3Lgy@R^}7++_>62Ӈ`?;m3C|rD6}K1@ڽ&quXӇ<,l& $_k'^ӇS}+B&:˻`wL-MzF=PVvUtgm݋čyAgNou~gm b Ve;ǝ>pyU`Ha9`|l /Mܚ<+K1V8yɃ>ƒ<ƫ)Ռ: Jm\83DJ8lwR3ч<(MưfOmMg>XOnexL{vodY W9ZbRYb>.K40r@刌띒>ӎ^}8[M R8>ZѲ1,CW9C.DzRoT쇇Rotw8zJYfO,1$ґxB\NЇ[oHmPw׽#X0'E4|_sL2|Ul,ȜҲZZ#rCku<=R]5N,:;tFM{7^m:˧w&:˧, Vs`=66?פX?CZj\T0T\i0zrS-TV<o Bn"+o:s}X\{}}R40S 8Xn Dgy X1Rzli\#Yo1O:ePg;GeTUYπ]x*atg t)3,.ozBQ8ė[{Tf<[@P(Fؗ}3C# QY@l<Dgy kVU(XMgy` kk`Lgyﰊ; dFʮa\Tg (hX[5/b:o:K[Ҩa^*}ze:s<A.ZfWEԲq49E<[@[2[ѽBDgy 2t Ktg @]MVe.;V,tO3+@4C3+Y-`VE{h12A|&0xLUYٓC>ɡ7"7&z\t#f;Pmە,{Mw9, w2NV[.wMT;҃>>C7. 0.k,T+ *$:l^x7oxn 'n0k+)Қ7ʅy()*wu83DgyWƎ˙5q8l73 z$y_a[cTݪO",]>t&߄v9htg (̀JG l:˳Fxieg36s<  d<Ԡmrdb V#ZZt$ʃ>i@o>xj"hZZtw[`YwIL!,ss! [.dN#zv~р<޲sY8,Ñl2 `R0Xe2Ln?T8(ۺfM'q>lڎ_t̰V-ŧ(@ddSK7r~d=c?D*XWڈTt u VZ$:'hh;Y|> XpjxBWD&tzY_FlЂM&{}œAfah3!l)ʪa#Jt>]kihxl!;YO`g"4,q*pj=4-&QXi\r~}S8+P,Ϸ`[T ;sb⦳<[Y7v|K/PkӇ<Qa{6d5qDsGpha<3Ʊe\ljy0̅),G,JS1`{ #7}Njgw$KS(C_e&Fg:JzKTЇ<{^<9N|VV}.wcuMgy(=t5u¸Co1Y Iqb+U}MgyQ _ ϙi92Y0A|E*C.djoӇ zgXLmSXɿ~sL"#U위pRtϣ(zGu`jT};xx܀<͞ ~;$3q0Qhk,HY6K:y_l~N[,/2|4@6ih\%ч<$BckL^Ag:c^J=YZPMV4]h@d6×\V 0!7p67Mۮ@;o6YO-"Jy?n-I8/>ҩu3"5KHHhF>_B4$hǹ_Ncd :Mͦy{j\cv> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 353 0 obj << /Length 643 /Filter /FlateDecode >> stream xڥTYo@~ϯG@Edq)Bgfv;!E MޙfUOE.Yi":El&?'L~8B_Lm'j2v6R,REռkLYUc%<d-H 2W׹X'sY8Slj)B tP1`ˆN+\qs{pC%#j>zٿYT2x5p}x x挠Z /㑳?t7Nj7(|/{n2C~Yʕ|F-ÈSiθ 3ľr=]MmT?g]o endstream endobj 332 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpkG7Hnk/Rbuild688f6c73bb66/spatstat/vignettes/datasets-040.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 355 0 R /BBox [0 0 864 432] /Resources << /ProcSet [ /PDF /Text ] /Font << /F1 356 0 R/F3 357 0 R>> /ExtGState << >>/ColorSpace << /sRGB 358 0 R >>>> /Length 15505 /Filter /FlateDecode >> stream xݝKIqW|CDަnKH$0`6e߱"ݔZ6;N曗ȸXQ_?zׯ^߿ۿoR~o߯j?Xo^g?￾՗u [G^G'߲r}|4)]{i>G5'_/{?gc?2W?/?髼~~KaKث17_ze #/Ws\?sit-S_좕z}ikݕ8qRƭ=]رըxӚv>/Zq_:v?QAg~K&љ/c3ӛxCW|K=vpҹϏCUIfkc}8C=ݬ[o~h]ی ŒqGgŒwW*Z#D{'+l#M=MʨRdZX.^{]}Gօ TV(a/˼F>ˮs]ݦ !ܦܗqLD6îkP|z,2.ѕ3,+taʻY؈1?-FGqq#zwl-2t[g-h״|_ԪSWO(rr/u(xMm㖸FNݴQs侻ߎ4trDq.[274[~e,^XϮxOG'md/EN:3na}8rVy2V>\>^]'&&nWE$>[F(kơow[epq`OA'.btD7>Rm?urƐ]μ2\Ժ}Dhva9dDם\:yh3zƣ.SAV]g)f,x]}}Jōk#=E+*qзoVۦ"=geFΒEG}aO$>a G8`^'=Rěeaƒ/3V菪T8 ڜ[$xC+X. 2FbL)FwXb)e D3taWSkzt|8xpF&:uDCc;Oh^tx =ЏxEY:6E't(pU#B)$߆ 5EMQ(3Q;4Jd+\#hRœoYp|iys GW-CArbUH zhF74#D\tҹ;q ˳qYƊ]GJ =*!/ejPjЍS ^TxmǣW7sq\:m"o'y"}H3062O uѷg1Nl:u9zزҾwÊZHKIэ48^XF'26VojC~B c# -'*ƩBV5 h(iu7^O}Au1QQp2VQnbLx~fxH_ywY5t6ʟ!LTe`@ZGe_7#>vMk#[p(5N`5R|c YZ0}qj(rO]_t7|zƥ p.MDNۆڕ%0v]o,mַl,ѷL6?S=6cv?^ҧ΂+0 tXv Fo'b6q|X:CtE%h0T};Øel,(5r?^-W,$=BQ®7-XytX*s"^ !^"c~Z#`X2X\o=ߙ7Ԇ+iThsFNJYʥmע;:S"7[ruG1+QCV7|T1b4]u}0HeI UM"`r$yU`8B}\eEC2̻Qjdvݹ4Sg]XQTWV+P¾T)Yofd(PބQ8:t~&IUTy Aߍ$OEgQʻ w¶%ʖhDP䂅9th/뮮P%1`(8a OcBG=RQƢ$ϣ<$!ѱM-U7&i4DL/.6'H-ert fNEoDB |*]Np $ ##. DN,xsS'&d0Da# 6fHcr!m` .Q*ldϋ{-X\,Y33ox7IaY=6 Y F;Ui5(C rA5A5"DR݇3^FRJ2X*5n$^'q]`}P̈GLnD'yRg%s){eqc?,(}o" I\)4 C Mi!(iZPɛ&M=Z >hGYG7 |#`u3_/]Mow`]tHMWfe LeuWulXɬ-s5X^}^F_YFg{AYG!/0\'y-hHn(#qq}t`8zPսqp@a в̈|M aU +Јwh~FG0aH麁VH 8IbEw+HY+9hЅwЕ=iL@+SWAZu^STªfc<>c(M8]a#+$̏a:c Mƌ#q@ϭ'FtY0^HPċ5{Ҹȇc [hug؇>tL~vւ [!̇& sˮ}ăRx蔒ꓠ:Zk~2>ZBʤx1؆L  8c"EYD #=w3Oqϊ9ҝk7F=]ՄV~G\Hmމ4..(/O%xt㺅8rog.P#$ꦒ̠So'?RV"IqH8֮%90P=2^Hx)$,lƍ.A{ʉቕ3ȸr4Tz qU]7Ӳ)NjMФlĖ]OIZ̆@7[Ɏl|Ⱥ5nQrX:cf#ԔM₨S7Nڸq:: sh^a%VI? 2ʈZ"kL|Y-Ɓ~kBӐIҸc'cA,V刔`Z(f5F#!4qt](F=Malt!$14 mOc#b*Ygx ~7t<2@dY{=AK׳a 5u6܆Lg~X: ɷ\y)mh;?q EJ.+: 2G7+0SSo9PjԤo\7 K8=j`V%pnbNFd@!n񛾓UI ߕ 1YY.{u}Ҙ.)hQemD83;> [`): Rw2kPOĽ/RHӦuAuqyR5{awf۝<+!eE24r LBD%ta<Ava)tD6};;6ik8Z|!Nn Y"FZV 狫f F)D0td%S辆Oڱ٩*W~6]%5uN'`W``Q[,c2u)oWC@q®p*Q{4 >.2#z^fJd̚lZ2R3GÚq#'z~*P (+4O^rR3(>̇UȢv7Bfx'xq6[nV=q/G Lb(3r'.0*޿tO̓q^G _oO= $cDC $vmZQ?Bqu3üת<ԑyby]yCE *`2Yc➑MRee7IH 3/p`k ŭV}0G,:t+>WѾ# 5$2Y"j7q֦D3YWMDHʔݐ;i\ۈQmmq5N4y<37ء = D^8}&({B*g `>0wZԸ^f,쾯bu5e_ +Noz:Q〹)^y]ZU7hhĀX͟B@e)%$ׅ] ox_4-lvy#Zy^$3};*.0PEyX7&GDp*A+~*R `jX,U%TTZ=ۈ7L\0a^6dqlutidb>RBk!=SQӪLuc)liQ pAVn,p++(`=몿i/O|7~A oq L7G^߲r=Nm%_g0f@(jO7?_D?_o߾䧾,?R_?~ƿ_Fv[ݏU_g{6,h@S 1&XC"İXr!fD!-5Լ{F:={8z!iV>6ktVdL)* ҐK(a5汮{F1,1,$.T"$ >7n"ƣ.ցKgXAE;%tZt,MF? d/ XIʾ19{BE1 {80q-N.틳 ?χ+i?nzg6w,9:fit流wUXݸ1sM}#\aOmן}`v]]Ph3t2`M> 4 > 7<֍stXȇ]3%ש#)Ko@d} JL,T%=;#JYE1մ;ZʂN٢BO$:w>㢿#U¾za ȫ>g>[HY ÔUj}i26N2L(T>F7TZ7 )H.A K[*f%btlF2"Xc@(,E\-űQ JmQv`ކ>fh!.8'F4:Ҵl$t*.AGj6{E!= r};z3Fl\=O9FNJg|_8muG_I=KF|\P֬BLP"j&1!qHitX]|{ 4r߸V'&1}(L#,g%ǶiFFh.2#'"%ª9ZDG.pdn7n'yth)p788& ~Φ>;Y"EOLe뚼 kǾ*N}W8v=%Df==X' !|IR!d [ ' ecZr"Kl$<{FZ6=Pjء]qrǏ䨤qRtF"):'_:R.q?uxxx6QF,2t?5xr-;ByQafx!ՅSLljBsp$?'%|1fhd1'\MřOvK;E^ۆy%̨-AQ=Z&k?[KK uh/:S4ϕa]t#2?>pfY [ib<9i7+)=&<6Z !q9q>pQ0 xvJH1߉`2g>LUZ62X77MU3iXGAi1pч; U/('`R|&a>m%3 dF,+DQrCR'Rk8N~t1;Ϯ.D8f&4:I"τ@Vf0[m%QOq|߸V^vi$'>-΄^7Bhl6Q*qs9r-jqQ!P'IqqA֡~WY !Zg7 ENf.X= مWFFWyu(!ؙw>0þ]v^}C/M{"\.N*)00:ڸJbgJn"%^8zRɜoݸGFVo_5iU}Gp3A"Z` 4QF6:vP۴1Ƭ ɆX<݂6NBK˼#S]ܴXQJ+&o+b1rk {GG9tLY= zaK'4@EN,ާ/##vҪڀ]:f^P8#3ki̚ff 1~eqʸJab$LiZ¶1kJgL71| 3iq'm{z@x3B1SGUҸRAB.*i'm{?ܥe2-yQ6Ue>3\i<8>ޔsin" eGu4'6w™ŭzfE@h*%^%gVs* @#>Eѳ1ԋ`6iD;>z'Xѵg?yGϣ!~^ȴǀ\Pd–3NfݸB3B]3)mAEf„DJLמNnetӂ煅:cHl3GG-iҔuN|&+%Y,aؽS(쾍=(}3-C@͗N~*ϥuNʾbefKBX]!=6x3i4-͆Q|5m?bzIrA&̶c|KF"9̺8a%e#GEJ<,\H'@[&ㄖ+ghѺcZn#E-8.žݲĠ=ƽHϛ]omrWN*p$~f0d퐇ڴPz&1I(qQ8 2^M$|k3M@%h Ǖ+o2SU*tF(wth͈'ikߙIu> !] rэٶ}{ǯۿܻwGl-(9~u|4@e\HwxϠY?U"oϯ jS(J]YS{ E];j]aIRAk.6$]Z\wd|ydZ )%Zu6B%5=Dt+EHTɆfJUn(0Y])ᱻ%{FqFEl&~~%L x[9-~>T\DzI_ `]OÑ)9wI On+bjguGJn#>l:i!t[G=iuqu6UA9bt")hQuk8q-!j$VPulB8ukjeDU8wj+L*rkҡ+]辩bĵTvw[om2DaI f586)9S7 Td |Sj嫅8m 3.g9(&ZWH"qR~m}u n =TEWUA"4YD23`CGFgOfۭZz;aUPUwiy 1ѱMxD"Bts<&oԐJÙY, oh'LdDDAp8ln@Ck=u{k1Iɪ!XFOjcpkxs[IߖTIMlۭhA9@h6NTheU'Dkmf\<ψă$e15&xFVwwT ߪ3}.2L[olՌBTD%59u4/3\Cj{o<+4mB&}ehLgoPso8xB䑺=8.Ĉ^L(" ~h.qlqY[ qt{&Ou,J!MMZ/|ؑ }q4yu32x@4;6%pt4 MPl m2|B&d!wﻘ SiiDzGHQW^آT,̓rjtq Uy G:#oJ7ne/PU(Ntb(Ja}e\i%ILOgu<)EXi@11PڴƲtwfٕ!ZԤc0v,‚Ke,qmz]3'\ڱ:h|!i2辭h7<"r*\#B4 E v,1;YRVm.:\iiQlAz7.VxG6s`:q<.Jѡ K<[!1#ɮO5mZV@" fDIn1 'cVL+ I\4AH(.'xzcF$c3FtQ%Dqcwڄ:aY:5!WƲ^ _Ⱦƭe]fJxvT8J8FEs- GZAjƸ]YK1Թ@vָim2i` N C v.M?E;үVp7_}XSeT+QF&q(8c(9;~{1j'[r~#OO*^mR7t #F=&p wA;6, "GWUGntMls2k:fa=ИZq{x،f1&;H';tX2G1Ȭ^lǥ+r; 6 QE å}ΐPTȗUsKM9WFۻhX!rfBiulι텑KU^9fs{4+d1A}Y[dӑAiQ~QN|/1<=+OCH39F8[9Ui 1gS 7.ϼZ|($0g~|!W w<F$rM$6Vj B:j5+NWɆҎ!2%feN:7Q^w[r]J6cHQl*l"z-##K+vC1-tL>cYDBYG+mPqgNy5 يj|f^"7}0s)pǂ<:]z[X!VVZ0VI(6YsH!dL˨ Td;,Wl kp+lWn>ҚlPzCR/YL7Xf9i۴n9TTzK< =A y%"?vG5u#t{zS eP0*iwJ)-V%z6”t3Sy7Wdg-htZw.,Q7VwGFN9QB]EztCs7Y+ҩymlԅنۉOV6^-*R=I8+K^'CQ'Cdvy 'e.S&Xѡ͎]@*lJqRxU=-}U 9T2j+mO!5qoٰ3Ku iUQIƵ,fAA93F_؈( JiE:ȳjaڨeM쥬T%ӣqJ ìFyai!@67nDzttlPDNlO `ٖ|&|5'00~>YF5aaT'Kmf fM6%;5 xS&/"uɫwDiR6쬖kS'rk̯&`lw"S Zz2Ah\t ی 6"m{!boyGaO|7љ#hm&Y,]g ϕуFm1(l-봩'" tČw!+ ZRQrZjP@yE썫-fYH!9%0) D{V JWUp;:w5Zwȍb(F҆aP4 ,,fA_R##  c ~j Cf{`7oJ"`P&rmZ 6pUkFqCX[rš퉪+z56iD^eB薬F;,Ȋָ9KIf`M4u撮Qg"nAkU!?Y>|n53sVm-,Kږ !9թ\3JS"Rd!9>p_iԢ$ ח+'8=f,/%ҝjX$8 5RM@ "/n,OYD؉f@k([c^n0ֱiaT #A@F֨}W/@j(pm';5,դ}]u*C,I>_tZ:YtI̱5Gqi"? endstream endobj 360 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 349 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpkG7Hnk/Rbuild688f6c73bb66/spatstat/vignettes/datasets-041.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 361 0 R /BBox [0 0 864 432] /Resources << /ProcSet [ /PDF /Text ] /Font << /F1 362 0 R/F2 363 0 R/F3 364 0 R>> /ExtGState << >>/ColorSpace << /sRGB 365 0 R >>>> /Length 8168 /Filter /FlateDecode >> stream x]ˎ$qWޔ~l` `K5; S݂Ob#L&lj㷏_~g~W7?=%z_Gï>_?___"/_ҌVi>s~|Zyξ./KHz~owv簨V3gxVΤO92;jy6eG+g@?>V3ǰ!o .mvu^S̻] w~=׷>uޛOO1U؞'ϩf˵xfmsj /,鯶"c O3aWٞ9W}[[hْ}7qy븅wQ%7)%o>FRS^e|Hٟg^[ 3=SێޞTvQzlNxaRXy{R[=>4=qi諹նްD?~j^ҚO椪q4g^uFU{ꌥ.(Ɲl`\0u jR)oj/ث`e3njj8?XvoA*sbj bF[y1ԦjO/ߎmq(SójǩjT[|Hk&Um@)vP6sMO|QeXzb"t`oeoFe Z|Pȥg75 rV\GZi ?]d(]ϙv~=׷kxuo5rh2lҵgT6n[5xgYШ(3([Te+r/ D oDJe/371*`8J[=lc~7pL#o*h.TAM{ol1Un dG'bϦߛbbrWOLyScx}cZ.C~w۫mQqP PRH%1Rh0>$wFe0+Oݏ0${ĺ̶K#h;p/sʕ!T4M5i0uR:M\FeۈXU5')0*P'03b8x0e6Wl*!JKw FdTEP3dzFeKrTQV٦̠)_ @(4ˆ\&pabHou3;n#M :6`T6IhbvIT!ےVҰ XkidfUXSG<|E])Ϧ}Mbh.I4CEcs¥e`q0UY'[Im MÜM-.BgfP]zJ&8;k^Įa:N'dRmCEc$Yȩ و'#|h)NQ>4sh(PDIEo -2"6"Jj"5Nb. $>`iF2J#T+eE ZBFA:PW ʦqʩ0&f% Ae=UT;:1w{ Q("꠺` ^c6Q |{)3)oa-*XsPJJ~\G(fh-I1ىJm}iS{ dQI6i927 #0>nsҬdn`jYDZj`kwVޕ t$U-h/jnũbܖ"jT ٗ"K*b<4jaxnL pm plR@,TvK~RLjy>S '@ZWX1dBY_5"6z*;h.} ~ReRT $_@8M , GTM(fNU2Uw]' TmA| (~=lu}hf=52Mja0CD*^4<{谖(մέ iT:>ccm*fقUTfףkϹj5pIRbsE$#5e#S :R=,r=(2uN[7lĩ'Eq|Pcؐ*A쒄;-:;FA* ,v*'4\fROT6nӵ6OzˍtUb/ 3"H׏qMlOml|Bl妉q &0E vjatFtKb`֙j!ઌlg`$;^D LQnSu[|QN2@ڱPF4[{1p53p"gǤpi'lQo1-wm=ƫ,mϤtt/RLҹ#Kؚ1姫&yPg֭{Hf; :m=ʋ^:$Hph Ӌ p(Vk*` {p!LWqF"; Cg?caS5ҵu}6)EY!:ُ˻8iSقYlZ !6izzޏO55aK'o=ԚDC-h7GT"Fxmn:Z2]U # eeSe\t@/]Өfn;tgx}EoJ8Ձ]5E1> ="2HI sy$,Ϋ;,)yLyq1!{D'p*O]˚K8J CrE8r/)$9I7#vykO]}s=/Dbi шkߡNI\,02[<c-'H#8nd}!+=Na!{v7/dїַ2Ri56L>d&|FBV]у0wjK%KG#U35m\ &QKd#UK5h[D9VHTO;Bs9},l6"QM>طD&Đf oǨj&4B$ i6t>bJ (mrOBl^A [s .Un̢wt#HVEHT=Y=ѥkQrKSadBA4+jAArFq@ oKhW o{! nіG;w) *d@g! YA QFu{biΚK5ȋw$X!ܚGBþ$`V:pbǷEV"F>tt=cҞ?u|'=BnOL+L"frfsث"zZl zǼlYČez~(ɮlcQ]%W@eݍa ½|łktLZ-莚W'7ܽpMwsSl`{u2"H0>pZ|;L]Wsp.>:jhW: jCT ԋwXujkdVw|#{& ԗ +/#Wk_';mnW^qyn^6lS*o[5Xz҈<3{o -ѐ{2P: rZ=("h彵CG}/ׁVPK/?uJA62"xWZ^߯qqc5=(VO܏D_x:xj^pݶ_}_Zm/^RX!fFd`i0V~?^y1⑮= q+Ocrv[S>}{׸V˸O(t Se [XDY(yq"AEbޮYtu"6MfwY)>AgR xO\AȜ@8lEk)ѳ )L@M,ӱnIQPk "0?7cL]M;a-j] \5S*pU4=/DN=R,F}ϝFKwܛ3/AiU=i´wnS7 g^ Tɴ6>` L uGB>iƂIlHS#0E6gZ@LV}$jgaYjSGo" 굉Pz"UQ7q\q#0DTn!\ 4 pElx~OL}'~ D]yEn{?J=qP> =YD7ЗLb@. JFWcn|,{8t*` l:% Yrx ' Ҽ10RĽ7'r$0T{B m[$'; 0M.oCȸQs/LX؛~(SD`V" `{p\ 9o ລk#kʕٓgk|k,eӀ+;`PI b: 2Xm!ܨt 2~j(Ax_#O*к{BVmb{F -*;'cl:׫kUa(4\gnW^h/v4O{^v!/)qP7 L<Ӣ] Z%{5% #!+ow.<ddNwdI"lJ,@r/=QܡUz$B4%13EA$rXF U0PxDa&/{,^ݟXd㖍4KGU'c\Жͽs<<{Q uСGcEDx_ 0iKu*w)Pi(IboDX8OUo5Zr;;mTF Z̦t.mSE 2.oRfPX;1A ]MT 7˅u$3S/8ɵ".gJSC5u_IjjEޫUkpxO;);@P>$?W{D0R8yRzm]#$߈S)>-x:C*gμXaV9#(cV9FHq/ք@#龯!̾sj-;1Ӡ9jWUUq{+~na @Ear1V`cgN1b?ZpF;)fq'p'M̠['۫ >B5c,w\6[dj5I:&-CơHm+FPwjط?)}݋G{늿{׸V˸ߏH Y} G&J=*/<5bߛ6ӌT4PeU,xdx/D[o0n%O>uE҃(^<XZ=zjj^Y]š~DK-J"b!OrIJ @s]0mnV $ΰQ88 g !5|J nh*En_[t-J pQrM(9"5uȺi%t~ @H=ӥj\ 0f }m(+pM{;^&{DB,[LdAffmy8Mţơ}=2;,Gu'E$zj?'dNމP%Vu@]e"> X-@ts(Zln3T6Ӽf7j;闣oBN]1#w%;@A(MnH5.:2~l7 yяE"cjݞDk8Mˑ~/'&j᝛' H"C? vEȳEOs*j N]9g1!t./=UD^ o"Y ,Ǔnx Ju=)˗=:[qʔ g;0UB#ÆB%"H'Qi8Q?fRv?){tұ&Ieg (zir NL*wpN-os e, i|?%05MG~hMePJN-ƘޮVW'\B/{m;ϩ!۩\liEi7Dt/ zp,M㝂[''m!!嘘 Fw-c=ZNJ<zG=ު"Ω'sdKpֿ(psp1E(NڗSr/) i R( gJʅs|mF1QZ]K~K j*h*5P&*򁾙1 E,S D#?g׭[o vYRTSK/kCI'IpID&N]:$Ë:% iV;)*WP&BVxX#`u7&&ñ H Y|=r 4^zm{7tgdq|H_k+O=]HL@3E,~klSZrk6rهdž5Uʒ8A3n hOLo<> =KPveݟ p"v_y CvJkW8~mP\\wA}AFn)шt+oƿG)q+OZOo[9w=Oセ_Zm/~?M.3SY]>u4dUUiA]؅t~6@D hQK,P{^#CwzTar"twk ͭmB:I5qO׹o iGX/#ZC;4UeeѲbK! Y@\=M ke"bgŭTO'ߍzJSjzfRw>p%y_t~'5Z}&+uqKZ+< OUt]ٌ\Afj76bra~z}QT[?+q@* S=<{_/Oo|d[#{xA_Ln_S](ɿDDaݗݞ; endstream endobj 367 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 372 0 obj << /Length 938 /Filter /FlateDecode >> stream xڵVKo@WXG[wv@j"E87qܨIy[hF^| G.U.v*`|(kӄZqzϋDcQy6;X3ofG6O/r=ht4@(2Ӥd9jD&OW^L5cAnb~u4\(IëW[f3~W5aT0$ܢ쎐/ EnEp&r& %HRBU6~`IE-(Xw07!&@3Nrv:{ E|PZ*c(tsM ⩲%K,r5LrىTJMN9[ND,{XTU%Sw 5XJަP0+CS89 *!(\${<6V%)+UV<ڗob~ )gAP?) wߴo$7%!ưkeJl%鮸)VyE\kRwYk%^g^VY^;bu_wwaj{Fϩ}[wKދ߽Fq_7|2dzCKzj}fq$LoTeTy''np1FJsc9nDLl"=6Ӈ&2oPLc"orR i)8/ DxZ7 BW3 __0"n6Xڟ¿=M ꦵrcO}^n6\F&QJl endstream endobj 350 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpkG7Hnk/Rbuild688f6c73bb66/spatstat/vignettes/datasets-042.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 374 0 R /BBox [0 0 864 432] /Resources << /ProcSet [ /PDF /Text ] /Font << /F1 375 0 R/F2 376 0 R/F3 377 0 R>> /ExtGState << >>/ColorSpace << /sRGB 378 0 R >>>> /Length 21138 /Filter /FlateDecode >> stream xɮ-Kv8_0w%T>@BBKAU23YD yfM'}?o7/o?ӏyPO_"y~i}>?x}_O_PqG}YW|!C~wyuOy>G_[jW)?F__}lHbB&?sѾZ?`&L{%Z5_~xWʟ5wk+ZUwFλWߙ5{w&}>i؟O^)};v_۾Zߙg~_;?{OkJy&9ʊVz6̵}{}ܿi{;#oc߻n4gﮓ.}o<Ϩ_~{+S9Df5+֟>[wO얔 (7>|b}|g3q{}P}0m/vMҟ(u ;Mk s×ŐhyXol ܾAرe.cas4A۟XLۣnΖ,uS֒Xjtڷo|c^=͐7^^zƖ|Pwc@Z V-o,y! !cN$oje<_e@ha,N }{ۃ-UnҶw޼֐#AMR*s!lV:$ P} (w$Y٬ K{-~|&-=TzPK\2}So}r $joy|Oe-u'ͱ~s-b eboݰiy o;ƤRl5-= #ya s_~wӿ%=vM7jiAXSL! KsUO~(и}A"Ce0g[!|-,pӟsǓqyJ #FN &ߚ qAE`ED,,/ l=ca& 6֜ c|+d'?`$'*8k׺㈂{[4o+ ₀iUly,1m֙ľw'tMs7Vf=)K%œ | . :~o ̽F51y|~s`V|bR'䎭HiU6+> V`U oI&*̮ *H犭9*Ƽ"E{xqz8{VKx)g%7RK F ߌX!aqJp~#@]OVjWl!_P.G.gߓ~EAc+/{%(]'F_KG֕N͇oI!iJI?$c^WKZ0ye{kyvМ8gЯ3_πnÖ(ljJGp(o"]3:e?g2gy`M{ { 6۴AĺvPƇ-c6 Wtj-b W#g rxBY4`LC^:x>csL o( /f, K 0`%5结o)M2&>0[KTaXg6 &(nƦ5EdCz*ȳG Ne朆Fse %V|C$ϋpCBdR={Ni.X+OɈY$3r`Dx@^Ai&0LK HAih21ߒ(.isX¯s)$XӘBI $ 3)ȡ{v24ih) O0 8snQ.9s3X W؃%A[˒O;yoNyY|5l \.o Y{)d8oY |Лp>,'_9y 6Gvuca\@u%ucǼSx.OAk)xr\ m1r&aKҐYV0wJ9Q=-+ y61a# *Oac|$}㷃â2\ )]asDx*M-д&_Qm TI(8AsJq`~G2`FU#ZJ3:-g<3 .)"m3GGG/܊i͍@ VEb* [5ɦO [B9**%<@Y-}rƈ&` ),-a7 )8n}'ª l^7Gߟ@?>#(R[8T`C(Ș1L~%t&#g~.P2@MOذOxTc^ANxWQv+ǶIk=& S9=ܡlL5N9!q{l]:2T°uitqlqy6^lR:au<21G[a8l}⽇g ;c'X8oxX0ҽ2-6KJ~C?_cTm'(a%2-Ɛx˰v3=1")Gx?( AǕX;yI!V$c?cG-AeIzѶBW"3xe(Gz¼_zyN+{l&̯ cvk=?s/a%RVq-y£-1馸Tr>Z%8iK<^ \CYϊn=#Aa J;ӕPfxtAVka1H$<8 2֞ *dy5cֳ IJck`⼉DDaNgEQtMa/> `hI U#9Q.!. t 懘W0#Pcs >i [JL[(Q)¶L@(:(]'~!l<ɒ5>APG^p-fHMvMׄyEMJ!_T<'+" 4ZYԼ i?ch F_p/bK_ /_W#[>Z#0G6-+Wj Ү?j!둤&˘&hQB0mR6oh5-؇Z4񆱥-FQsS}V)|GeKl9Fz"2$Ed!¶]l(H07r6%>e}eS"g'Χ2nlrd\xۉ"i|=;ȫcN[{*Dûuwa#Уy<Ox/¼"LHn絶aJ0MɅr6̏ Me8h|;8D,F_r2-72;O&)$Ⱦn<^R>[A([yϭI^'~Wxۭĕ%[nRز*q=GC^-_vK}8y2DayE(ni)x;{Iq;>!G1"c :T"~ 2 EĢ>W\k! _VpF[gfB~C-TѥKS] l񩮈-Y 7JАr8 ؐ]WN; en=cDA*#m' 385fcLH4јq,|d0+i񌒍ɐsD3%RoiѷvlFlW~([70ag(ζ#GD%x8NUq\#q>l-1/Im)a^~|}CZUSr4z\}L磋b)+m1uϑxIOې61 CCWg,Ϗ `./VuTEsȢ"^Cl%#B6LI L=x0'i{/яA&ctg *|*E]z,݌E9k]QɪèQ"YmEtplӘo)aAnN~e+ZvXw0CU5EHEY|)QƩزzӤG`p)pʶD6EӟVIjHӫW WQ*Thmt@2,ٙ1~G~~]<|UruU_ NaOczaK|ɢID0jh2jSh@^H<^$3 {TH_TۖbRNt"0Le ;t},[U(r)c-"h/`~5g!JI 6[! iDBOvnT(\}mPk-6!ݎ}Sr4ˆFf֚aU.U i;&nGJ&A_!Qv1a/*{R9ne o-|/ S\ o1;Fyrlˍ6I&wkKt(uMFc|_*XC2B#=)ensOi.%NJշ "멞oz]WXp߲ߋ2$ЭMaIhƤc8ZbRk]zpQs@}*rS$LeOȖ&|OʶFË#ꔠkw]X,-B :W"cD*Jٌ\2jt6 F~)pwGiy0im|O4Q1,.<d?d$e A?2A0 ϘsL{X9MDޓt} VU\/D[^>QFv_"6M{yETAn]:L+;̩lK[WSsʝU,1 3aٙ pO+ 2=nOc,7N%)zʳxpdVr#z.L~ _~ `0RΠwP17U 6zD rؒ2ET _%yq=Enc0GֆV8Bb5Bʝ \-Uʸk'`c>ptT7\#s\FP}x.YgepϮSRNXUo OviVH [҇ܳpMN'_Ĉ OQI߅9Jd Erv>CJ|R^StJ{QZڏ"jnRpQͷ<\];UlYMؼy.kℙySv/ewOĕǸ^ BiE_ip{iD(ѕs06YMYT*1:|HT h1 EVMޑlvLUVIfyQ,d& P.؂D6o {T19]U<0k/u\I;D]9.ʵ'WvWRg#[ۃSsƖT@O%+/i':Y*VLbS<5۪`k-#7+>A2kI¼b2˼8KvmO&G>+C9Aad}W;$ CLcwnoE|j|S>ӌlj91[j8,8"+" -6:J @VDŰ./ k+Խ5E4u '/rAu-"RFl^#Oкl̉)E{dP7nW7voiw)G~$Œ$X G9:Y#[vd4Ruɧ= tUᘥX|V,Abk1G"DQy@MيW7ݔ dT8_T>{9Y;˵}ddԗ\L`u;ӐH7Lɶ~zWF֊ s ~Ѹ }RĻ._L Y3-"<sd9b0DöȴVItb(%J>^y>Fz}IŒY9ْtx2#N13(DO(l02Giu('l<\(|jA)ƽ^Uyã $|E!H˘W0,y$??Ng) p@BZSP1zEFmaLb^e"/C8'51fjbĭfV2rTSͬr_THYOW0&)p:jQ14WyZc qUVkc檭ͩ2޾FU>U-)Bݯ,LqLB6OZ>2TaWh-WBF'=)D=uBޣuL%Ixi!w LA}t  %JeICD>tł^h<{UIтh# i̧bE 5 v(]La!~ߤdIǴ2W,ht |9>) Bkb z|5ŚWd1`?B<,y /xi@#X)a^*hG, "ljMla!? >PLS09‚kj1{t[09ùkk۾>EzTDz-)ee3>ft_JRT<LWe 8TZo)07U2ꇐ63~ Oq22_8!X9L4HG"+8>tbcH.{8=L}f!o*+mEo~ioFGJ2@1{zBm+UK{3Ԥlzey,ҽ]R^qAeIWvEC/J2#CBB1N]mK33Dp=6U4)RH R.p=֢ⱅR/ 9B)JPZv,0)<ڍS"lYH#=*3O$-DJ'AR|B o!ٮ="x e G.ODVR ׵fqt~dNĄ9ɺ>f\NxrRᨺi]Dge)xHL`e zZi(Yעg,-!1+BʨQV ǪPkNcGb9.)f1HBo{"6!fPPyjTfq|vf#`FHe.a^AD%0>E. 7Dԃ웬(ca^l|` )D3N Nafj׎"+J7S'9F{蓣0YM 赃*?qje_t#W!g5Uz$wEͪC`5]iW]g\O`E1a`-c,v`|)"kha=&ncgD-Ϻ8tI쏜4Y8Jf.|ꎊۃIKYU*SY !s) X' ;h0,̞)yaϔc #x'AٿwϢL矧'ЭHGg v̫Anl#ruh♎3{& F Μ*=Tx0D{r;7K{$'D/Gԭ8 [E/`pSY- ,WMK3݇\̊`\Wn6WMRla;K R(< ԔSqnjA#rb<Ƽ:?XxZбR Qo)g9RVv>g"zFM㣇8;9;5gnfyd\hp yd\;KbGJ1*Kiw4? B>OF<-+8_Y*GO&{bLr5^(=7sINk[xm(Q7 3tA;$+'k0,7=f*JIiy3Cc5wafQR;)k:4&]ʤ+a(kuw'Tt7 [kttf^a r+ ADvg" veKX֕`ە-G(LQ[kGdeN:YX!)Jd3yTkΩbj$zȼtştBy VYF*<]"KX%(n.<]uXxQggeW ~ 8>Ui82#}ˎ[QٴRS~v+={^Zc5>gf_DkRDe7UkqCh~b[6 =~K3o| vezGe3G@5ʷ>4)hD*Zǣ09Pl&CVĜdS#Em ƥ$Uy1Pff&1>YXKCzd@D1N'4-ڗI_@-l%YsorINedI] NFakWB!3:M5zJøWX?wFz!&)! h9n8q7:+pNjP[X' 1O;USm TLёNYt7W7綟.By]h_i8Țg]⩅]dLw7@=!w9WrPn>&ڭ4Xu{ʃ$@[WlaS^R-w1HYWW\EZ%g--[F6"5˵lNf?nLi& +g8+7=v]V~tg u[P)y_5`cZ(Bߴ2[;>MkQw.{^ꀯn r7+sjySGU%Q]͖YLLHHpxe=ٰ)E V?2ۡ2.!E$y=8㑂Aںٮ1̮;e\ƵF|2i+ë /k3@S4ь--W/k6X}^z穼祳cayn74泯^}(pVq;gЉ%.6cs~'l+CN\YUZ)J71[Uc/]bm>&M<5Yy`'ՁEx#Vz 0 Ř kt"IA2$N*dx:bNC+_"?*lagx&#FŲk9-\gWg 'iٳztC2nY˻W3"0>R_Ή`˕cq0[VGCq:=roZ8 ;82¨uyCaZ.nxhr -yjay &#> ѣjē!WsN+ָc/xb> )̂JXr tb>#HSeC|WDrdAryET){gU5!9 [v@yy7Dam#U祲v̥\qd֙꽮o<DʍȪd|:K2N,kԎA:ƿ]=bKB>GŽy”Àl]R֒z?uꏈE3eEYb{x^zc-Ms3}n_{dSa-lI(4W֭vzӧYv8/ue׎cFyK["OC c !S8(-d1LKغi#Fvp']ƅ=#g9<I!8P܇oa(.FzG^r@kz{>rEb0DQZ?"'v"+BY'7hC sTyu.-~2ؿrJ +DZEaB$LUQotejO霒(bm-ċtas 쩱 qtB^SޘO=,Uo!bGS!jU ^|}сב:/"]G- b>CMly 0gA?i!ryili?ra/?Jl%3-`՘W,7kϷd`CQ {$93v Ƽb1R _}gf©KwNMPm3㜝x%OYZ8`Qds]D8P$w8lϸ0V G"?F: !V-f2N'B:H5|]Yv-L3Hnki!H؊La^1Y+~XMsB7f:c7]j*͒=(7՟=3:2S3<٣[Gwjc'f(>EpH aƊ&[6fyDx~nY3L1~"xtE/["* HņnFSN/zM'dюE(F B=Jo6E-`7/-opiiC҆'L$,%8YO ! $lӟ]B'7x>ل@^ny!ż plΪ8]'=+}UW'0j\iM}צ,c^1"}b) 5ZP6f-/o'.1/{ (*SKg nKteXPǡbFp8` V1tVB88&16_ =LA$45Hd]7[`\S G# @a#=dua;2T}U@|<)Q'RekP;)BpO]%R}c37!NҁA3^i, IP;vA> ڐ R9F -l@q+8N,*3yȽ!fEQo@g.rw%h=$,l.q( o=*7djb{D0[fh0#'<̄u&-K;H봇@4( ؠYP| _f=qAg'-8N bC]]_.x~$M00<b%ittA^c}adxHnM7p(T/amlֿ*VoiݸK[xft-DHy_0r/`b,g!' ׵pDcU [fAWapr|f% (DLKD/0rn,'{۪19]TՉZF >%4$쟬9^w݌\ *$ijKU[p^h5uTG?+}ڴp]R\(-IC`8ݧ-8,Rx, ksc)J$Ne\M'xВp;N v):MЗJl3B{7ډ٨e@h`|%*#F2U1uyD,x(#[=ԓpf+(̊ 9SUso9[:fw#iMwt^n+''vz|ՙm-ѷXkv|}x_*oZG_uWH?i1W_$=E84eW2_]L2wwy2w_)jFN4j [Q#A,vԉ343 deZ_rV-k6@.) H9,wl4xD f1Xeqa vp#Wiy)#y)ʹGQizrdDs,>QsȊ2.vnUAXCUUA8_ڌ@.tF\0i)+̸"Zu󲦱zbWS<#G:p6)E6ggT7 gMƑ#1]Hq<2e^83Wv^4郙`q`>[j¹ئY9-ae9a^.xH0d@^9V~uGEu5_z  @=E_l-^؃ 3dž3dž =*9im>kLGSZS.V9:q' /7ԑqљ][gϻ Ġ䕑V:&)])N–0" 1愿pmb|63*Mpnf~} o|pz=CwK̻`+sAx˱lT 1XF3(  3WNI̎n霗Xd8)L#bV7ZИUpěW Q2 7C_@s1|x}oI[1E8oQ)s-kvvx'<#F (NBg;J 9JlS|.a3,c=m'OشL'yld:_tB-]`׍>T*u$7PK%~ۑu^=/(òe^O_%8%,GrLӮk#X{lŽ\_352-9[59疸='#ØW#m&qB)/E0gRB1K$Ŷ(W~t-ҕ"Y\,-GWds:sBy#I]Y#PӃ%i!ʇmhJ57efvqqDL4sDwSCcLDaNW7FRd s*H3G % 4N¯XIq9sE|v\79~ho_K'2|+UakQѠѠtcjDsGe q-ḥK7CxDe#)%eR`(8=xScƯ=È*4xm{ap$q? Fiu㣭UkQ %G@0`>l`|&DH9[ fj8L+ yXh9>PKW)ܭ~GĦdeD㫱Z.EUhvvgYO) Y8ga.3D+APǿ*Do??~<ߐ>_ZA_K"#y ehs}CVO(Ͽw`5O|)*#}a?R?[ۂ1%a{W9?_m endstream endobj 380 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 368 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpkG7Hnk/Rbuild688f6c73bb66/spatstat/vignettes/datasets-043.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 381 0 R /BBox [0 0 864 432] /Resources << /ProcSet [ /PDF /Text ] /Font << /F3 382 0 R>> /ExtGState << >>/ColorSpace << /sRGB 383 0 R >>>> /Length 113597 /Filter /FlateDecode >> stream xlI&M [ dZ6«630-nUe291?G_~/o_?G?u]~s?ǿQ~o}/sE/-?6x^zZao?UpƏ8g<\?>Ӯ~>ߒxY~.|6sW?Kq}plKxCσǃϲgן_~>OGL \p.…x~?;Im='qGO?K X[)%|0x'ۿC۞~*\ T-BhQ)w-x+w>1 \_/)K~=w?r!~YpK`X {"\^|8•7_s4(]8O`Z}z*|ߖyp{s?ݚg}fz}Cl?ӚܽskwU^!n^Lz3|tgޑ ׳Vg[~l-?ӄs=j\/}h}Ot|m۬o_?)h)f?o\h=>8#Ƈqi?8#ƛs~c<|z߯ۗy̿gzOY6xyqg5lO!+ňbn g=baoiH[Gh;Ly3_]׃!wW?{wʛ)'G=8fs}+G빡gcy P$fߡpʯ,.~_E<'ᶷw}vDž@;W~~9?3u^%JN{}?#3?Mw'DN|b/OÀq{<'!?b~΂w?azV_~?ny3^W~C#x+?K. \GMɻ;%o`nn>x=̇ԟq<P|p?)f'|?NcNO:}~HR{=Y\?p6s ]bC?xlsڣx{XXk} o81q~;;y>a9XnO7P> 8)ۈ=@}y,[vfؒmgyqF`~yc!ϯlfNy`G`mOSQi<&zG}!Xn?멯xc}З|Iũ@?Qr|ӃS_:6Էεt`WG3>Q' Ĝ_>mI}1~)Du؞aVڟ>J} 4?nO>k{QگN ĺN{H|HO~-9G]pzl/{^`uXF϶=o&_Nn:;;wuJ7Wc>B)'ր3Q`yp}q|XhOp7`)¹>kq{a}hAh~:wG]s.h؏˖'Xk8%>/[/w8c\=UlOIg% 'hkP>xY=IM_Ly`SPg5[۝/|?fo}~c,7߷=ka+F{h:ު||}z~}?Q>0q밧w7p///nox~Z{`c /(ܿcA/‡aAF7p8wozzoK'7kWȻb{Qί?+ʯoI;sXꃋ\<* M X  z?ZxW]A [dž7=v_md$|ר'hԿ71%^<܁5*DьI=v}K6=A<y!/[0b`Xd,U'Ԯ_㦡X8<)'0Ombz2T`tG?Poyx玺Ops}O?71G'~(?N\)_v)?҂ڍ0#p֐Ty{v㨚߳㙴SI iM,w ` mGP=-XϣS>i艧Ny!1Jo ۃ:cc#H<0,Y sw.szR4Sb|?1sRI'ԣ\Uߟio-f4?-Ԑ[ 1ݾŻScO ,S>bq#uύ5ޅi>o#uby$8?-y=0QxIJ'l^GbQ -qq1|?`&V{CG|x. #zz<8~Hzpq#c|[/-hAM\(߸#'p5nx&Vۀo OGI(Ꟊ#'- %a- G/=?=rg[ `o5!_h_׹0Pԭ?-ܯV~;XZW[-Ge\9cxÈ#ǃ?oyolo֧s?G33nJO'ibm`8΅EgE$wqmI0EsgUne1ѹv 6x'y`,G6?xy}nB2ԅF=N uQ5 [훜1M'B0N8![s|DRy"9D"DYG+hp>?|F߄qp h \]t }"`#쓜<|ߍ)OPoj;Vw( : i|1cd;m}qLk7Plq.ؑpP=4 |aB9~9OT>ZucĨ <^'-w`bҢEaZlZde4"N~^ߚzjgtLhiꡄa'{%tی18PU2Wțp%V }c>ڀiӞfΰE;-M1WF |⵿w{m寮.mqszq}*>[LA Op)i_pe m)?Fs;>61i'FqkO{'S#F;v2fKgϥ3,|3u;ʊ'ؔ0̚V.F)mF n N|;jiWGQBmE9JXG+<\"3>j3*`J@bMcڸw#l{yh̃ތٟ2wPp 1L<>&-=BOUV_y%Fb~} V6SiX{N2{NQϜۓR疜sSV>]:h#D,9sU%q7HMbd3W8t$Fګw X;gSOל{80"8SoFZpxy84ٟ ߟkr I}#{p?_b_L_3T>ih'EB^ot>97weZ~&-SI]hw$^73=e@t{jA[+`;M?~nnF1Tܒ)a;MKOǖFO,Gvs[^;crSdm یSDjǖ[YD_T8rܳ>Wu;{v)FO۪߈}Iq[Ӷ6]Ww`XWYgu=eC;ɋ7ƷNt qu|HxwYTd:n#1R;>;R8>U޿{[21/*!V [J]d: -(\d\bj߅7ã0b"V"C#^< =~]I;J򛒕]b`7=JIIQ=szctSr:c06t$yCDY&ڷ,a/Y_7=\Cc>o-.}0qӢ1+"nHFQ14m}zQfY?ԗ̍Lqʶu$7mJ6(ǭ+v,I<἟"i`<]3цN K8:8:ꣶ<͙7Ҷ"9#sZy ޯv h~L/svgTfuGqץ"?H0Frw3e3c~y=+#q FOgAa~[8V)C k%_)q.)'G}+ QP_[QŰ~W"y7OFڿSYw3iq$OΟ};?*#Sa;㝶(SљZݑZ._h_Μ! :D~H㸒iY3/5?G鶊Wey=Sֿ8gap2C}=˶_'O=Ow}22)5ց 4<&;FnXG`.su!'W>\܃1|8#u\awJf L]oXrlplL /f7kWc'`fT[revtdߜ-io}:cZ.לMmk.Z;ޘq0;5v2Ф_:ܴeϜMv5P_X3ޓ:G8XWVl=f\5o,~wcz(=}l=8٭p6k׳!`gԏe0M^HX c]Slo]#ǧ_OW mf`W:x"kGK%Ǐ3@b ĒRF4{ҒlY>/_.=Eu;9ݒ͏k+RZhȗ3Сؾl'?CD -vɌx@0xFC(w90X֐x  ~b?4'>yu i޾>jpzp<"vS6v=|1{yG݃o^|xvmaxg&Vl.!s3[[s#1#q#,>ٹYvpPO܍>O׫+2iiO?(˙Izֻ[h|;Cp~D)|xt(%? 9Jϝ pE^/ӳxpXg`kbA7ޭW^8,[o(94ytK~FYo|y2%^;Tb/xÁ>:7.x:'x cq\b%\|h5b?i_Χ@~R\G$_p+0}(1?͘X!V7?}n.xvo0a'=nx3B9?v\_;^p}GuFiN1vȇed{ϛ |:{TG &BDm!q)jꞨy|ddFU>X\HOyX`yQ,x3{Ge{beg[8Z~H4Y-'cgCb4ѾhwE>Lݝ;?] FvGW?y.K("|? n6b|mSxS8ډ1*`L|d]y8sdi&#班)_~_>OE?#>8cTKj0G?LyvσW[](0y$Ѿ׏?ʫ/no>?,ʟ~[u^O׾T`ʏ{H}[•j7_0^0,E#"!?;)|kQ^X8Ǽ2\ayv>J\w¡!UC,?$IƅQ'nW^eyFK]WAS,Ok\ [`Mׯ_8s_}㍿@{L._y~o?AS[ַkgѣv~+ xr\G~n>|5|zurƧ"/n~xg@&΢,ܿǦ>Oj{F(vt>8#_oomľ>p>3|Gްqw޶_Q^n3.(.>8Y*WRpЈ# W$&ˍc]FILJ}BaYQAm -PznX엯|p/QmG(X j'/ROs| E7Z~$H5#M'/2&_8b {C+f䫛Ff{㨇eP1ߌK*bƦbbcvS}}!JU Ƥ A2Y u*.}b_|78Qe` Ff3w~/18{Q.T  gIT?F=`pmPq`p`{C0E@s/bByت`OuKn7U5qhU0ȩBe,[$:ҧfy0}ʪf,2תFo\J˰ɈocXh fć,3zFbof`٭,)ilW <fo >|?0?-J3֛ʃ(ZU"c?g#漈 ['dzd &bAT1+*4X98ۊɧIa#!X'0z^]ocayAQ9~` ޏFbq8=Sx{?t;X8&2")\K9x}1]:0/y˫>,ry~s>?Ta{+9o0fMy۞Uf dܮq>0&מȸ9._MV*-f-#s[}>Yd\_}^{&6286~__FF3mO~߃q'kWVַ3f:۷0B6ٲBI5c5G>" shcAۏ3BNۗAD=6W~l¼_~31 x }Οp :_|jN׆ga9|@EwIؿ1q=? Cqxɾ' /&.>H+$O}u|*yc1nWGF?__?뿀YPX7O"}1wB?z~c}Xs/ _OO}/d| C&~^Q_~i~r}{1?F2w\oL0GbbE#_Hi=f0YocB9/JulM@s)r>7-4~|[;RE.?1?]~ u+8+W17k𼤜Ɗh@9/yOtW~U*'GɼPfg)#<^~䯛x0 7LF0J:iCZ(YoyAn yG>ugB폱|??X"dx!}DĺN{|ی6}_1 G>Cy5c U1~V0a^{뚟L^h0; a^_ߗU番xVO;XA8{?Baw"U┧륫"8+~QMW,:;v*<%=S9/h/WS>WNT,]#n*Sa|ɼzUWSe|٬jyuke CUdWEڴ? h)ǢOm^V]}~f'y*{S_%ŜOReOgؿIފfR⋸pÏccs~mY?|N0;!7;Z4N/_sƻ_?;L>L+xx},/fr0^c˾IF_O_x<Nf+ ._.c_7 ZHMe⸜3_8ygU8(ˆ0b9?d_>}(7NEܯuL!h{+ta|r+"œ@/._4b0PƿF⌿#;,y/w 7SM*+| G"gsVƋV琉g9V7>i|v 㯇0ft~fv~f·QT/w}_ʧ|O||R_r3y}#'AKgI>#g|T[-ȗ'|g~_mXpj}69o&5'0}p[̯)‡ky7߈)[{s<[s!Ku:3)8џҰFxo#qAy Hb8xq҈+v*f/ߋ|lw>ɇތ.=ߦS\c3^,2faq=|_{ϓHʷ}?pO`?Yo(6R`zOO6p/4w|v^޳~,znW/f؞o><f)  ߰%.'~ Ӗ_UW@TqYfy}#< /WW$)~}/a}ć'I\?eOCi{uusp}au ?|_n4O"'Űja|z|gJ>^Q[p=|o&|0bO+7A sTH܍eV[ d<1g'{}/?ri1%KdBgu>clzK;ۇ)O?qhX^uUߜqm9?T&ppPz?o](O1|=_~\M~o*ʯN.Թ^^߹yJ>qO_{* ?OVpzo &t~f򿐟75'u|QD֯5fg'dM.EbD䘥/3쌰1fU5+c>k('DuǴUbqhBV1)&/a*hs:ͬ#t2j8W2UaDQ9q+*65T 2jV~_aT>|h%CP3JFnMj}8.W"y(s_A25sJyU&8-|'zV5;nlu`8O ntzY+Hu#{#9Ǥg,׃LbvD2Fu/z7lܲ3d"CVњT~#)9u̙p]w3$OXY/G2`2~TdWd%pd͌oIlOc$0*$gVdW5 F.OGu&UDqa)9ٯtS. fn6MVd#5gr27-J^ܡzJmʕd8skc+ckum_qEov87#ϯU{ d?;\rN[˝8ͥQ<#sM0sŹ37W!6ŠqʠJ\e*!)΅XjNT*c)Sձܛ빢rkOV`8UFnbuHXG|pnC0r-RWnK&pS.%F06$ /dL}Xl};aFiN%)1W\k4TXQ{X戮̦.ZYgE=]}3xVh q5Zs5E\F{8[7};[]%* ^Vא_ `vHe4 I'nvxm<= U'*uqu<Vfȏx&ݿĔmrQ|7Y窊fKj~}dh;1\g>BVY)s穚g%.?CKK0xP/a1XK*}WrE_2Tjc#0d)2Z09e*.;}3bu%2v̸Ҹo{gD(X~3pN8'7*`L81hvsBs6[ToNbΟxs[5uju uZ,z,4=P;vꊳC=)W((eRe\ع~!p1Oąyz3TYe7nz2/_qы{~𛷟q+[:A|̸\Gq;[8וq/lDWc4{9κk8v6.A?קA]v䡝TeJwszuPq7 m"_MX󽿒9~oc+38n+ʑƸ8F;@y_̋CVl;p9ytFX'g(n xBW$qj(3ԙUk`NA = 9stW?h4!cHX5I[4(֎+4C]:֮r){8VZ;7O_Mq)xX9:9ܛ5}=+w8;K{/~߸|}p8g刽'mٱ|iK˺W8ԏLWfnl"]:pr.z\xt.Tg.v?aE߈ SRfynBxu*m5'#3cjF7۞F(a8c$TOE#gsufHR o2øVd@ql?dvOt]6|oΖ/Խ:1"z@&L͌p|q\vAL{b~Txbt>b]|ȥr<$ּxԞq7ߪѡ9OV7(5<){}rp<^T7o9^g[UB4g֌ kUrƝ ]cjFQB?#:+G(5:+0=t$RJehۯ];odpD| Rɿ3\܊+2+gcr~o Et?r95禟wբ\x%KpVe5tm1r4bΚpO[:9=՜a y^ɑr*a%)QTip(@,W2pTqOQ)eXŽ5<ߧJҍM:R%?!e ^f=\vKQ s:q'owP \!9$h3ob.p0H/G12 /op{0yYk+ ם 8O.;MnGْ'Fb$9銣LF61X59L@`eyJ (]I|<(ف1"ĐcI!UB>/#bW2N̄c:ofčL<Y{?\՘SyS7pu{[iD1єY[KAьo_haW>t2m~Q&nR\s$aA&FCbi|-lF:}cYH}hE*J{r>pR;3!e\/G'x3FzV?s~0d>_oa( p$Vkr&#(3( 2a$6fK]7q Z(ԃzƙ QWl6RFOt1I5Ֆ ׼/ԄjrQق~f kZ;jםlY'1k1^u 8)0>4V>X)uXz'k>v.jUdxĝ\/djvD73BEGF8*)d&{QEA/.2}:@FVtf˼0#Ťڄ3TxG gDndE##V`#Cil!2d"C/^!/ubUafA3fFL3p #*PfoaYe2)5! G